From efb89583f33abd208b3e0bc1d27b93af49d7cbb5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 4 Oct 2019 14:47:12 -0700 Subject: [PATCH 0001/2111] PYTHON-1995 Resync encryption spec tests to 71518175e8bd5ee36fc35a7529b65f9d248b1c83 --- test/client-side-encryption/spec/bulk.json | 12 ++---------- test/client-side-encryption/spec/getMore.json | 2 +- .../spec/malformedCiphertext.json | 2 +- test/client-side-encryption/spec/missingKey.json | 2 +- 4 files changed, 5 insertions(+), 13 deletions(-) diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/bulk.json index 232635609e..5e71b593bb 100644 --- a/test/client-side-encryption/spec/bulk.json +++ b/test/client-side-encryption/spec/bulk.json @@ -148,16 +148,8 @@ "name": "deleteOne", "arguments": { "filter": { - "$and": [ - { - "encrypted_string": "string1" - }, - { - "_id": { - "$eq": 2 - } - } - ] + "encrypted_string": "string1", + "_id": 2 } } } diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/getMore.json index 353aa6d1be..543fcc81ab 100644 --- a/test/client-side-encryption/spec/getMore.json +++ b/test/client-side-encryption/spec/getMore.json @@ -142,7 +142,7 @@ "batchSize": 2, "filter": {} }, - "results": [ + "result": [ { "_id": 1, "encrypted_string": "string0" diff --git a/test/client-side-encryption/spec/malformedCiphertext.json b/test/client-side-encryption/spec/malformedCiphertext.json index 2f9794e336..c81330ce83 100644 --- a/test/client-side-encryption/spec/malformedCiphertext.json +++ b/test/client-side-encryption/spec/malformedCiphertext.json @@ -312,7 +312,7 @@ } }, "result": { - "errorContains": "did not provide all keys" + "errorContains": "not all keys requested were satisfied" } } ] diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/missingKey.json index 0ce6fc57df..4a4b1fd4d7 100644 --- a/test/client-side-encryption/spec/missingKey.json +++ b/test/client-side-encryption/spec/missingKey.json @@ -119,7 +119,7 @@ } }, "result": { - "errorContains": "did not provide all keys" + "errorContains": "not all keys requested were satisfied" } } ], From bce43939c959d337158ecda41c3e13d9938d4462 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 4 Oct 2019 14:59:04 -0700 Subject: [PATCH 0002/2111] PYTHON-1996 Add require_test_commands tests that use failpoints --- test/test_connections_survive_primary_stepdown_spec.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 8c8b6adf74..4a63e0e239 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -117,19 +117,23 @@ def run_scenario(self, error_code, retry, pool_status_checker): self.coll.insert_one({"test": 1}) @client_context.require_version_min(4, 2, -1) + @client_context.require_test_commands def test_not_master_keep_connection_pool(self): self.run_scenario(10107, True, self.verify_pool_not_cleared) @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 1, 0, -1) + @client_context.require_test_commands def test_not_master_reset_connection_pool(self): self.run_scenario(10107, False, self.verify_pool_cleared) @client_context.require_version_min(4, 0, 0) + @client_context.require_test_commands def test_shutdown_in_progress(self): self.run_scenario(91, False, self.verify_pool_cleared) @client_context.require_version_min(4, 0, 0) + @client_context.require_test_commands def test_interrupted_at_shutdown(self): self.run_scenario(11600, False, self.verify_pool_cleared) From 3236994c817431559deb549cc0a89fb92b8c40f4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 4 Oct 2019 16:22:39 -0700 Subject: [PATCH 0003/2111] SPEC-1464 Workaround for unordered JSON parsing in FLE test --- test/client-side-encryption/spec/bulk.json | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/bulk.json index 5e71b593bb..b3d5cdba9a 100644 --- a/test/client-side-encryption/spec/bulk.json +++ b/test/client-side-encryption/spec/bulk.json @@ -148,8 +148,14 @@ "name": "deleteOne", "arguments": { "filter": { - "encrypted_string": "string1", - "_id": 2 + "$and": [ + { + "encrypted_string": "string1" + }, + { + "_id": 2 + } + ] } } } From 502b59898e46e721ee7afaf512f2e95eb56a4c2a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Oct 2019 15:52:52 -0700 Subject: [PATCH 0004/2111] PYTHON-2006 Fix DuplicateKeyError in custom types test --- test/test_custom_types.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index d5c75d99dc..b0a190d0f9 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -863,8 +863,9 @@ def create_targets(self, *args, **kwargs): self.watched_target = self.db.get_collection( 'test', *args, **kwargs) self.input_target = self.watched_target - # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + # Ensure the collection exists and is empty. + self.input_target.insert_one({}) + self.input_target.delete_many({}) class TestDatabaseChangeStreamsWCustomTypes( From bb18da769cbc8b4b31a5b3d3e61363ad323a0672 Mon Sep 17 00:00:00 2001 From: Anders Kaseorg Date: Thu, 24 Oct 2019 12:08:08 -0700 Subject: [PATCH 0005/2111] PYTHON-2001 Fix Python 3.8 SyntaxWarning: "is not" with a literal (#425) Fixes this warning from Python 3.8: bson/json_util.py:702: SyntaxWarning: "is not" with a literal. Did you mean "!="? if doc['$minKey'] is not 1: bson/json_util.py:711: SyntaxWarning: "is not" with a literal. Did you mean "!="? if doc['$maxKey'] is not 1: --- bson/json_util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index 86c5c41c93..35bdc3070d 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -699,7 +699,7 @@ def _parse_canonical_decimal128(doc): def _parse_canonical_minkey(doc): """Decode a JSON MinKey to bson.min_key.MinKey.""" - if doc['$minKey'] is not 1: + if type(doc['$minKey']) is not int or doc['$minKey'] != 1: raise TypeError('$minKey value must be 1: %s' % (doc,)) if len(doc) != 1: raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) @@ -708,7 +708,7 @@ def _parse_canonical_minkey(doc): def _parse_canonical_maxkey(doc): """Decode a JSON MaxKey to bson.max_key.MaxKey.""" - if doc['$maxKey'] is not 1: + if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1: raise TypeError('$maxKey value must be 1: %s', (doc,)) if len(doc) != 1: raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) From 560415666a821fd2d0c2b20335fc651995a25ece Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 25 Oct 2019 15:56:47 -0700 Subject: [PATCH 0006/2111] PYTHON-2023 Use $merge to non-admin db to fix db.aggregate write concern test --- test/test_database.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/test/test_database.py b/test/test_database.py index 538e074f75..983bcecec5 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -1036,20 +1036,28 @@ def test_database_aggregation(self): @client_context.require_version_min(3, 6, 0) @client_context.require_no_mongos def test_database_aggregation_fake_cursor(self): - admin = self.admin.with_options(write_concern=WriteConcern(w=0)) - test_collection_name = "test_output" - admin.drop_collection(test_collection_name) - self.addCleanup(admin.drop_collection, test_collection_name) + coll_name = "test_output" + if client_context.version < (4, 3): + db_name = "admin" + write_stage = {"$out": coll_name} + else: + # SERVER-43287 disallows writing with $out to the admin db, use + # $merge instead. + db_name = "pymongo_test" + write_stage = { + "$merge": {"into": {"db": db_name, "coll": coll_name}}} + output_coll = self.client[db_name][coll_name] + output_coll.drop() + self.addCleanup(output_coll.drop) + admin = self.admin.with_options(write_concern=WriteConcern(w=0)) pipeline = self.pipeline[:] - pipeline.append({"$out": "test_output"}) + pipeline.append(write_stage) with admin.aggregate(pipeline) as cursor: with self.assertRaises(StopIteration): next(cursor) - result = wait_until( - admin[test_collection_name].find_one, - "read unacknowledged write") + result = wait_until(output_coll.find_one, "read unacknowledged write") self.assertEqual(result["dummy"], self.result["dummy"]) @client_context.require_version_max(3, 6, 0, -1) From ad0e87c0e0bbe180376e0d7d4ad44ad8a1760f1d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 24 Oct 2019 18:05:50 -0700 Subject: [PATCH 0007/2111] PYTHON-2022 Fix potential UnboundLocalError in gridfs test --- test/test_gridfs_spec.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 1469feb4d5..bda6f52816 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -146,6 +146,8 @@ def run_scenario(self): converted_args = dict((camel_to_snake(c), v) for c, v in args.items()) + expect_error = test['assert'].get("error", False) + result = None error = None try: result = operation(**converted_args) @@ -153,6 +155,8 @@ def run_scenario(self): if 'download' in test['act']['operation']: result = Binary(result.read()) except Exception as exc: + if not expect_error: + raise error = exc self.init_expected_db(test, result) @@ -164,7 +168,7 @@ def run_scenario(self): "ChunkIsWrongSize": CorruptGridFile, "RevisionNotFound": NoFile} - if test['assert'].get("error", False): + if expect_error: self.assertIsNotNone(error) self.assertIsInstance(error, errors[test['assert']['error']], test['description']) From c25a83aee4523c86f0dc3d39fb0eed3fa3c05a0c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 31 Oct 2019 16:14:57 -0700 Subject: [PATCH 0008/2111] PYTHON-2007 Fix pymongocrypt install error message --- pymongo/encryption.py | 2 +- pymongo/encryption_options.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1986f5f502..d9afe048b6 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -387,7 +387,7 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, raise ConfigurationError( "client side encryption requires the pymongocrypt library: " "install a compatible version with: " - "python -m pip install pymongo['encryption']") + "python -m pip install 'pymongo[encryption]'") if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of " diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index bc469f0ea1..60bb41e334 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -115,7 +115,7 @@ def __init__(self, kms_providers, key_vault_namespace, raise ConfigurationError( "client side encryption requires the pymongocrypt library: " "install a compatible version with: " - "python -m pip install pymongo['encryption']") + "python -m pip install 'pymongo[encryption]'") self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace From 08e839070b062b2e35fe57b5f729dcd62f54df4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=22RooTer=22=20Urba=C5=84ski?= Date: Fri, 1 Nov 2019 19:38:25 +0100 Subject: [PATCH 0009/2111] Test against Python 3.7 on Travis (#426) --- .travis.yml | 1 + appveyor.yml | 2 ++ tox.ini | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 647a95f0bd..6e9bd3944d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,6 +5,7 @@ python: - 3.4 - 3.5 - 3.6 + - 3.7 - pypy - pypy3.5 diff --git a/appveyor.yml b/appveyor.yml index e45c6a1b45..ab259fbadd 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -15,9 +15,11 @@ environment: - PYTHON: "C:\\Python27" - PYTHON: "C:\\Python35" - PYTHON: "C:\\Python36" + - PYTHON: "C:\\Python37" - PYTHON: "C:\\Python27-x64" - PYTHON: "C:\\Python35-x64" - PYTHON: "C:\\Python36-x64" + - PYTHON: "C:\\Python37-x64" build: off diff --git a/tox.ini b/tox.ini index 340ba5131a..c6d6d2dc8c 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = py27, py34, py35, py36, pypy, pypy3 +envlist = py27, py34, py35, py36, py37, pypy, pypy3 skip_missing_interpreters = True [testenv] From 9a882245b14a29048b65566d880c6748d6adf8a7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 1 Nov 2019 13:09:34 -0700 Subject: [PATCH 0010/2111] PYTHON-2025 Remove unused tox and appveyor configs --- appveyor.yml | 29 ----------------------------- tox.ini | 12 ------------ 2 files changed, 41 deletions(-) delete mode 100644 appveyor.yml delete mode 100644 tox.ini diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index ab259fbadd..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,29 +0,0 @@ -environment: - - matrix: - - # For Python versions available on Appveyor, see - # http://www.appveyor.com/docs/installed-software#python - - # Though we support Python 3.4 we're not testing it here. - # Testing it requires extra work that doesn't seem necessary - # since we already test it in Evergreen. - # See https://packaging.python.org/guides/supporting-windows-using-appveyor/#support-script - - # Test both 32 and 64bit builds. - - - PYTHON: "C:\\Python27" - - PYTHON: "C:\\Python35" - - PYTHON: "C:\\Python36" - - PYTHON: "C:\\Python37" - - PYTHON: "C:\\Python27-x64" - - PYTHON: "C:\\Python35-x64" - - PYTHON: "C:\\Python36-x64" - - PYTHON: "C:\\Python37-x64" - -build: off - -test_script: - # We just want to test C extension builds for pull requests. - # No need for a running MongoDB instance. - - "%PYTHON%\\python.exe setup.py test" diff --git a/tox.ini b/tox.ini deleted file mode 100644 index c6d6d2dc8c..0000000000 --- a/tox.ini +++ /dev/null @@ -1,12 +0,0 @@ -# Tox (http://tox.testrun.org/) is a tool for running tests -# in multiple virtualenvs. This configuration file will run the -# test suite on all supported python versions. To use it, "pip install tox" -# and then run "tox" from this directory. - -[tox] -envlist = py27, py34, py35, py36, py37, pypy, pypy3 -skip_missing_interpreters = True - -[testenv] -commands = - {envpython} setup.py --no_ext test From f62c53f4727b658ffa40a80ed46cbc0f5c96afa0 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 31 Oct 2019 18:05:51 -0700 Subject: [PATCH 0011/2111] PYTHON-2020 Make ClientSession._in_transaction a public property --- pymongo/client_session.py | 28 +++++++++++++++------------- pymongo/common.py | 2 +- pymongo/message.py | 4 ++-- pymongo/mongo_client.py | 6 +++--- pymongo/network.py | 2 +- test/test_transactions.py | 36 ++++++++++++++++++++++++++++++++++++ 6 files changed, 58 insertions(+), 20 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 78bc0a8c9c..245afdef57 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -267,7 +267,7 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if self.__session._in_transaction: + if self.__session.in_transaction: if exc_val is None: self.__session.commit_transaction() else: @@ -356,7 +356,7 @@ def end_session(self): def _end_session(self, lock): if self._server_session is not None: try: - if self._in_transaction: + if self.in_transaction: self.abort_transaction() finally: self._client._return_server_session(self._server_session, lock) @@ -505,7 +505,7 @@ def callback(session, custom_arg, custom_kwarg=None): try: ret = callback(self) except Exception as exc: - if self._in_transaction: + if self.in_transaction: self.abort_transaction() if (isinstance(exc, PyMongoError) and exc.has_error_label("TransientTransactionError") and @@ -514,8 +514,7 @@ def callback(session, custom_arg, custom_kwarg=None): continue raise - if self._transaction.state in ( - _TxnState.NONE, _TxnState.COMMITTED, _TxnState.ABORTED): + if not self.in_transaction: # Assume callback intentionally ended the transaction. return ret @@ -551,7 +550,7 @@ def start_transaction(self, read_concern=None, write_concern=None, """ self._check_ended() - if self._in_transaction: + if self.in_transaction: raise InvalidOperation("Transaction already in progress") read_concern = self._inherit_option("read_concern", read_concern) @@ -589,7 +588,7 @@ def commit_transaction(self): "Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: # We're explicitly retrying the commit, move the state back to - # "in progress" so that _in_transaction returns true. + # "in progress" so that in_transaction returns true. self._transaction.state = _TxnState.IN_PROGRESS retry = True @@ -750,7 +749,7 @@ def _process_response(self, reply): """Process a response to a command that was run with this session.""" self._advance_cluster_time(reply.get('$clusterTime')) self._advance_operation_time(reply.get('operationTime')) - if self._in_transaction and self._transaction.sharded: + if self.in_transaction and self._transaction.sharded: recovery_token = reply.get('recoveryToken') if recovery_token: self._transaction.recovery_token = recovery_token @@ -761,8 +760,11 @@ def has_ended(self): return self._server_session is None @property - def _in_transaction(self): - """True if this session has an active multi-statement transaction.""" + def in_transaction(self): + """True if this session has an active multi-statement transaction. + + .. versionadded:: 3.10 + """ return self._transaction.active() @property @@ -783,7 +785,7 @@ def _unpin_mongos(self): def _txn_read_preference(self): """Return read preference of this transaction or None.""" - if self._in_transaction: + if self.in_transaction: return self._transaction.opts.read_preference return None @@ -793,14 +795,14 @@ def _apply_to(self, command, is_retryable, read_preference): self._server_session.last_use = monotonic.time() command['lsid'] = self._server_session.session_id - if not self._in_transaction: + if not self.in_transaction: self._transaction.reset() if is_retryable: command['txnNumber'] = self._server_session.transaction_id return - if self._in_transaction: + if self.in_transaction: if read_preference != ReadPreference.PRIMARY: raise InvalidOperation( 'read preference in a transaction must be primary, not: ' diff --git a/pymongo/common.py b/pymongo/common.py index 76c14e08f1..f208e83d08 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -824,7 +824,7 @@ def _write_concern_for(self, session): """Read only access to the write concern of this instance or session. """ # Override this operation's write concern with the transaction's. - if session and session._in_transaction: + if session and session.in_transaction: return DEFAULT_WRITE_CONCERN return self.write_concern diff --git a/pymongo/message.py b/pymongo/message.py index 56f8636955..2b8ff10422 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -205,7 +205,7 @@ def _gen_find_command(coll, spec, projection, skip, limit, batch_size, options, cmd['singleBatch'] = True if batch_size: cmd['batchSize'] = batch_size - if read_concern.level and not (session and session._in_transaction): + if read_concern.level and not (session and session.in_transaction): cmd['readConcern'] = read_concern.document if collation: cmd['collation'] = collation @@ -304,7 +304,7 @@ def as_command(self, sock_info): # Explain does not support readConcern. if (not explain and session.options.causal_consistency and session.operation_time is not None - and not session._in_transaction): + and not session.in_transaction): cmd.setdefault( 'readConcern', {})[ 'afterClusterTime'] = session.operation_time diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 79ceea2458..8220a5e443 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1255,7 +1255,7 @@ def _select_server(self, server_selector, session, address=None): # Pin this session to the selected server if it's performing a # sharded transaction. if server.description.mongos and (session and - session._in_transaction): + session.in_transaction): session._pin_mongos(server) return server except PyMongoError as exc: @@ -1355,7 +1355,7 @@ def _retry_with_session(self, retryable, func, session, bulk): Re-raises any exception thrown by func(). """ retryable = (retryable and self.retry_writes - and session and not session._in_transaction) + and session and not session.in_transaction) last_error = None retrying = False @@ -1445,7 +1445,7 @@ def _retryable_read(self, func, read_pref, session, address=None, """ retryable = (retryable and self.retry_reads - and not (session and session._in_transaction)) + and not (session and session.in_transaction)) last_error = None retrying = False diff --git a/pymongo/network.py b/pymongo/network.py index cf88a814a3..0996180f5f 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -98,7 +98,7 @@ def command(sock, dbname, spec, slave_ok, is_mongos, orig = spec if is_mongos and not use_op_msg: spec = message._maybe_add_read_preference(spec, read_preference) - if read_concern and not (session and session._in_transaction): + if read_concern and not (session and session.in_transaction): if read_concern.level: spec['readConcern'] = read_concern.document if (session and session.options.causal_consistency diff --git a/test/test_transactions.py b/test/test_transactions.py index 5b575adbf8..88e6dae5ab 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -334,6 +334,42 @@ def callback(session): self.assertEqual(listener.started_command_names(), ['insert', 'commitTransaction', 'commitTransaction']) + # Tested here because this supports Motor's convenient transactions API. + @client_context.require_transactions + def test_in_transaction_property(self): + client = client_context.client + coll = client.test.testcollection + coll.insert_one({}) + self.addCleanup(coll.drop) + + with client.start_session() as s: + self.assertFalse(s.in_transaction) + s.start_transaction() + self.assertTrue(s.in_transaction) + coll.insert_one({}, session=s) + self.assertTrue(s.in_transaction) + s.commit_transaction() + self.assertFalse(s.in_transaction) + + with client.start_session() as s: + s.start_transaction() + # commit empty transaction + s.commit_transaction() + self.assertFalse(s.in_transaction) + + with client.start_session() as s: + s.start_transaction() + s.abort_transaction() + self.assertFalse(s.in_transaction) + + # Using a callback + def callback(session): + self.assertTrue(session.in_transaction) + with client.start_session() as s: + self.assertFalse(s.in_transaction) + s.with_transaction(callback) + self.assertFalse(s.in_transaction) + def create_test(scenario_def, test, name): @client_context.require_test_commands From b57260082109bd6156805c426b490956f0c01b43 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 Nov 2019 14:45:30 -0800 Subject: [PATCH 0012/2111] PYTHON-2012 FLE GA changes (#427) Silence mongocryptd output, users should provide logging options via mongocryptd_spawn_args instead. Document 'endpoint' support in create_data_key. Document that create_data_key returns a Binary with UUID subtype. Add custom endpoint prose test. Resync encryption spec tests. Check command started events in Data key and double encryption prose test. --- pymongo/encryption.py | 33 +++--- .../spec/aggregate.json | 14 +-- test/client-side-encryption/spec/basic.json | 14 +-- test/client-side-encryption/spec/bulk.json | 7 +- .../spec/bypassAutoEncryption.json | 10 +- test/client-side-encryption/spec/count.json | 7 +- .../spec/countDocuments.json | 7 +- test/client-side-encryption/spec/delete.json | 14 +-- .../client-side-encryption/spec/distinct.json | 7 +- test/client-side-encryption/spec/explain.json | 7 +- test/client-side-encryption/spec/find.json | 14 +-- .../spec/findOneAndDelete.json | 7 +- .../spec/findOneAndReplace.json | 7 +- .../spec/findOneAndUpdate.json | 7 +- test/client-side-encryption/spec/getMore.json | 7 +- test/client-side-encryption/spec/insert.json | 14 +-- .../spec/keyAltName.json | 7 +- .../client-side-encryption/spec/localKMS.json | 2 - .../spec/localSchema.json | 6 +- .../spec/missingKey.json | 7 +- .../spec/replaceOne.json | 7 +- test/client-side-encryption/spec/types.json | 48 ++++++--- .../spec/updateMany.json | 7 +- .../spec/updateOne.json | 9 +- test/test_encryption.py | 100 +++++++++++++++++- 25 files changed, 257 insertions(+), 112 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index d9afe048b6..b61937cb66 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -19,6 +19,7 @@ """ import contextlib +import os import subprocess import uuid import weakref @@ -56,6 +57,7 @@ from pymongo.pool import _configured_socket, PoolOptions from pymongo.read_concern import ReadConcern from pymongo.ssl_support import get_ssl_context +from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -110,11 +112,12 @@ def kms_request(self, kms_context): """ endpoint = kms_context.endpoint message = kms_context.message + host, port = parse_host(endpoint, _HTTPS_PORT) ctx = get_ssl_context(None, None, None, None, None, None, True) opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, socket_timeout=_KMS_CONNECT_TIMEOUT, ssl_context=ctx) - conn = _configured_socket((endpoint, _HTTPS_PORT), opts) + conn = _configured_socket((host, port), opts) try: conn.sendall(message) while kms_context.bytes_needed > 0: @@ -150,7 +153,9 @@ def spawn(self): self._spawned = True args = [self.opts._mongocryptd_spawn_path or 'mongocryptd'] args.extend(self.opts._mongocryptd_spawn_args) - subprocess.Popen(args) + # Silence mongocryptd output, users should pass --logpath. + with open(os.devnull, 'wb') as devnull: + subprocess.Popen(args, stdout=devnull, stderr=devnull) def mark_command(self, database, cmd): """Mark a command for encryption. @@ -412,15 +417,17 @@ def create_data_key(self, kms_provider, master_key=None, :Parameters: - `kms_provider`: The KMS provider to use. Supported values are "aws" and "local". - - `master_key`: The `master_key` identifies a KMS-specific key used - to encrypt the new data key. If the kmsProvider is "local" the - `master_key` is not applicable and may be omitted. - If the `kms_provider` is "aws", `master_key` is required and must - have the following fields: - - - `region` (string): The AWS region as a string. - - `key` (string): The Amazon Resource Name (ARN) to the AWS - customer master key (CMK). + - `master_key`: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. If the `kms_provider` is "aws" + it is required and has the following fields:: + + - `region` (string): Required. The AWS region, e.g. "us-east-1". + - `key` (string): Required. The Amazon Resource Name (ARN) to + the AWS customer. + - `endpoint` (string): Optional. An alternate host to send KMS + requests to. May include port number, e.g. + "kms.us-east-1.amazonaws.com:443". - `key_alt_names` (optional): An optional list of string alternate names used to reference a key. If a key is created with alternate @@ -434,7 +441,9 @@ def create_data_key(self, kms_provider, master_key=None, algorithm=Algorithm.Random) :Returns: - The ``_id`` of the created data key document. + The ``_id`` of the created data key document as a + :class:`~bson.binary.Binary` with subtype + :data:`~bson.binary.UUID_SUBTYPE`. """ self._check_closed() with _wrap_encryption_errors(): diff --git a/test/client-side-encryption/spec/aggregate.json b/test/client-side-encryption/spec/aggregate.json index f409bfd1f0..6bc9242717 100644 --- a/test/client-side-encryption/spec/aggregate.json +++ b/test/client-side-encryption/spec/aggregate.json @@ -143,7 +143,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -155,7 +154,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -189,7 +187,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -255,7 +256,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -277,7 +277,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -311,7 +310,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/basic.json b/test/client-side-encryption/spec/basic.json index 99408ae066..371894e8ca 100644 --- a/test/client-side-encryption/spec/basic.json +++ b/test/client-side-encryption/spec/basic.json @@ -137,7 +137,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -149,7 +148,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -183,7 +181,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -275,7 +276,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -287,7 +287,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -321,7 +320,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/bulk.json index b3d5cdba9a..7a401d5e8e 100644 --- a/test/client-side-encryption/spec/bulk.json +++ b/test/client-side-encryption/spec/bulk.json @@ -171,7 +171,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -183,7 +182,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -217,7 +215,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/bypassAutoEncryption.json b/test/client-side-encryption/spec/bypassAutoEncryption.json index 97a61651a4..42f4473223 100644 --- a/test/client-side-encryption/spec/bypassAutoEncryption.json +++ b/test/client-side-encryption/spec/bypassAutoEncryption.json @@ -196,7 +196,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -366,7 +369,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/count.json b/test/client-side-encryption/spec/count.json index 38ece606d7..9ac5104a09 100644 --- a/test/client-side-encryption/spec/count.json +++ b/test/client-side-encryption/spec/count.json @@ -142,7 +142,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -154,7 +153,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -188,7 +186,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/countDocuments.json b/test/client-side-encryption/spec/countDocuments.json index 9f495bafc0..d4ae0aeb46 100644 --- a/test/client-side-encryption/spec/countDocuments.json +++ b/test/client-side-encryption/spec/countDocuments.json @@ -143,7 +143,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -155,7 +154,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -189,7 +187,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/delete.json b/test/client-side-encryption/spec/delete.json index aadd1e3019..bb9c061556 100644 --- a/test/client-side-encryption/spec/delete.json +++ b/test/client-side-encryption/spec/delete.json @@ -144,7 +144,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -156,7 +155,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -190,7 +188,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -268,7 +269,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -280,7 +280,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -314,7 +313,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/distinct.json b/test/client-side-encryption/spec/distinct.json index 4a24aa7797..c473030580 100644 --- a/test/client-side-encryption/spec/distinct.json +++ b/test/client-side-encryption/spec/distinct.json @@ -154,7 +154,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -166,7 +165,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -200,7 +198,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/explain.json b/test/client-side-encryption/spec/explain.json index 1253b7f581..6872cedf2b 100644 --- a/test/client-side-encryption/spec/explain.json +++ b/test/client-side-encryption/spec/explain.json @@ -148,7 +148,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -160,7 +159,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -194,7 +192,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/find.json b/test/client-side-encryption/spec/find.json index e754fa1709..93cef311c0 100644 --- a/test/client-side-encryption/spec/find.json +++ b/test/client-side-encryption/spec/find.json @@ -153,7 +153,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -165,7 +164,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -199,7 +197,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -294,7 +295,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -306,7 +306,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -340,7 +339,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/findOneAndDelete.json b/test/client-side-encryption/spec/findOneAndDelete.json index 5459b8c21e..2d9f963f23 100644 --- a/test/client-side-encryption/spec/findOneAndDelete.json +++ b/test/client-side-encryption/spec/findOneAndDelete.json @@ -141,7 +141,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -153,7 +152,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -187,7 +185,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/findOneAndReplace.json b/test/client-side-encryption/spec/findOneAndReplace.json index fbc1632e4c..1512fb9552 100644 --- a/test/client-side-encryption/spec/findOneAndReplace.json +++ b/test/client-side-encryption/spec/findOneAndReplace.json @@ -140,7 +140,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -152,7 +151,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -186,7 +184,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/findOneAndUpdate.json b/test/client-side-encryption/spec/findOneAndUpdate.json index d342472be0..a5b41f8455 100644 --- a/test/client-side-encryption/spec/findOneAndUpdate.json +++ b/test/client-side-encryption/spec/findOneAndUpdate.json @@ -142,7 +142,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -154,7 +153,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -188,7 +186,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/getMore.json index 543fcc81ab..637f69d509 100644 --- a/test/client-side-encryption/spec/getMore.json +++ b/test/client-side-encryption/spec/getMore.json @@ -163,7 +163,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -184,7 +183,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -218,7 +216,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/insert.json b/test/client-side-encryption/spec/insert.json index 12471eb691..beb98c5eb0 100644 --- a/test/client-side-encryption/spec/insert.json +++ b/test/client-side-encryption/spec/insert.json @@ -124,7 +124,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -136,7 +135,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -170,7 +168,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -250,7 +251,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -262,7 +262,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -296,7 +295,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/keyAltName.json b/test/client-side-encryption/spec/keyAltName.json index ae753baac6..7088d0b0be 100644 --- a/test/client-side-encryption/spec/keyAltName.json +++ b/test/client-side-encryption/spec/keyAltName.json @@ -124,7 +124,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -136,7 +135,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -165,7 +163,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/localKMS.json b/test/client-side-encryption/spec/localKMS.json index 1506076b0c..febc1ccfc8 100644 --- a/test/client-side-encryption/spec/localKMS.json +++ b/test/client-side-encryption/spec/localKMS.json @@ -107,7 +107,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -119,7 +118,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, diff --git a/test/client-side-encryption/spec/localSchema.json b/test/client-side-encryption/spec/localSchema.json index 9de47f066c..f939dbc123 100644 --- a/test/client-side-encryption/spec/localSchema.json +++ b/test/client-side-encryption/spec/localSchema.json @@ -140,7 +140,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -174,7 +173,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/missingKey.json index 4a4b1fd4d7..a7237f1792 100644 --- a/test/client-side-encryption/spec/missingKey.json +++ b/test/client-side-encryption/spec/missingKey.json @@ -133,7 +133,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -145,7 +144,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "different" }, @@ -179,7 +177,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/replaceOne.json b/test/client-side-encryption/spec/replaceOne.json index b8c2ffc190..1287fdea14 100644 --- a/test/client-side-encryption/spec/replaceOne.json +++ b/test/client-side-encryption/spec/replaceOne.json @@ -141,7 +141,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -153,7 +152,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -187,7 +185,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/types.json index 26ec8ea82f..08928381e1 100644 --- a/test/client-side-encryption/spec/types.json +++ b/test/client-side-encryption/spec/types.json @@ -107,7 +107,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -141,7 +140,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -256,7 +258,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -290,7 +291,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -405,7 +409,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -439,7 +442,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -654,7 +660,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -688,7 +693,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -803,7 +811,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -837,7 +844,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -1051,7 +1061,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -1085,7 +1094,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -1206,7 +1218,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -1240,7 +1251,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -1359,7 +1373,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -1393,7 +1406,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/updateMany.json b/test/client-side-encryption/spec/updateMany.json index 7af4c133d2..43c6dd717c 100644 --- a/test/client-side-encryption/spec/updateMany.json +++ b/test/client-side-encryption/spec/updateMany.json @@ -157,7 +157,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -169,7 +168,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -203,7 +201,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/updateOne.json b/test/client-side-encryption/spec/updateOne.json index 9352471c7c..d6a6de79e2 100644 --- a/test/client-side-encryption/spec/updateOne.json +++ b/test/client-side-encryption/spec/updateOne.json @@ -143,7 +143,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -155,7 +154,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "datakeys" }, @@ -189,7 +187,10 @@ } ] }, - "$db": "admin" + "$db": "admin", + "readConcern": { + "level": "majority" + } }, "command_name": "find" } @@ -310,7 +311,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } @@ -380,7 +380,6 @@ "command_started_event": { "command": { "listCollections": 1, - "cursor": {}, "filter": { "name": "default" } diff --git a/test/test_encryption.py b/test/test_encryption.py index 1fc7cd2012..58a392d699 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -18,6 +18,7 @@ import copy import os import traceback +import socket import sys import uuid @@ -585,8 +586,11 @@ def kms_providers(): return {'aws': AWS_CREDS, 'local': {'key': LOCAL_MASTER_KEY}} def test_data_key(self): - self.client.db.coll.drop() - vault = create_key_vault(self.client.admin.datakeys) + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + client.db.coll.drop() + vault = create_key_vault(client.admin.datakeys) self.addCleanup(vault.drop) # Configure the encrypted field via the local schema_map option. @@ -611,14 +615,17 @@ def test_data_key(self): self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', client_context.client, - OPTS) + self.kms_providers(), 'admin.datakeys', client, OPTS) self.addCleanup(client_encryption.close) # Local create data key. + listener.reset() local_datakey_id = client_encryption.create_data_key( 'local', key_alt_names=['local_altname']) self.assertBinaryUUID(local_datakey_id) + cmd = listener.results['started'][-1] + self.assertEqual('insert', cmd.command_name) + self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) docs = list(vault.find({'_id': local_datakey_id})) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]['masterKey']['provider'], 'local') @@ -642,6 +649,7 @@ def test_data_key(self): self.assertEqual(local_encrypted_altname, local_encrypted) # AWS create data key. + listener.reset() master_key = { 'region': 'us-east-1', 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-' @@ -650,6 +658,9 @@ def test_data_key(self): aws_datakey_id = client_encryption.create_data_key( 'aws', master_key=master_key, key_alt_names=['aws_altname']) self.assertBinaryUUID(aws_datakey_id) + cmd = listener.results['started'][-1] + self.assertEqual('insert', cmd.command_name) + self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) docs = list(vault.find({'_id': aws_datakey_id})) self.assertEqual(len(docs), 1) self.assertEqual(docs[0]['masterKey']['provider'], 'aws') @@ -998,5 +1009,86 @@ def test_05_bulk_batch_split(self): self.listener.started_command_names(), ['insert', 'insert']) +class TestCustomEndpoint(EncryptionIntegrationTest): + """Prose tests for creating data keys with a custom endpoint.""" + + @classmethod + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def setUpClass(cls): + super(TestCustomEndpoint, cls).setUpClass() + cls.client_encryption = ClientEncryption( + {'aws': AWS_CREDS}, 'admin.datakeys', client_context.client, OPTS) + + def _test_create_data_key(self, master_key): + data_key_id = self.client_encryption.create_data_key( + 'aws', master_key=master_key) + encrypted = self.client_encryption.encrypt( + 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=data_key_id) + self.assertEqual('test', self.client_encryption.decrypt(encrypted)) + + def test_02_aws_region_key(self): + self._test_create_data_key({ + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0") + }) + + def test_03_aws_region_key_endpoint(self): + self._test_create_data_key({ + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com" + }) + + def test_04_aws_region_key_endpoint_port(self): + self._test_create_data_key({ + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com:443" + }) + + def test_05_endpoint_invalid_port(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com:12345" + } + with self.assertRaises(EncryptionError) as ctx: + self.client_encryption.create_data_key( + 'aws', master_key=master_key) + self.assertIsInstance(ctx.exception.cause, socket.error) + + def test_05_endpoint_wrong_region(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-2.amazonaws.com" + } + # The full error should be something like: + # "Credential should be scoped to a valid region, not 'us-east-1'" + # but we only check for "us-east-1" to avoid breaking on slight + # changes to AWS' error message. + with self.assertRaisesRegex(EncryptionError, 'us-east-1'): + self.client_encryption.create_data_key( + 'aws', master_key=master_key) + + def test_05_endpoint_invalid_host(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "example.com" + } + with self.assertRaisesRegex(EncryptionError, 'parse error'): + self.client_encryption.create_data_key( + 'aws', master_key=master_key) + + if __name__ == "__main__": unittest.main() From 13d559b6d46d0d3c778f546f8b622e0a087ef520 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 Nov 2019 16:14:15 -0800 Subject: [PATCH 0013/2111] PYTHON-2002 Skip failing dnspython SRV polling tests on Python 2 --- test/test_srv_polling.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 1908103476..ff12e98a91 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -88,6 +88,7 @@ class TestSrvPolling(unittest.TestCase): CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" + @unittest.skipIf(sys.version_info[0] < 3, "PYTHON-2002 fails on python 2") def setUp(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("SRV polling tests require the dnspython " From 55f8df214180c9c74ad784152512a0b07d8516ba Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 Nov 2019 16:54:00 -0800 Subject: [PATCH 0014/2111] PYTHON-2002 Skip failing dnspython seedlist tests on Python 2 --- test/test_dns.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_dns.py b/test/test_dns.py index 3d75b2f06d..600ddcbcd1 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -39,6 +39,7 @@ class TestDNS(unittest.TestCase): def create_test(test_case): + @unittest.skipIf(sys.version_info[0] < 3, "PYTHON-2002 fails on python 2") @client_context.require_replica_set @client_context.require_ssl def run_test(self): From bbf55d6da554735ff3d4bf052261f6e4e91b028a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Oct 2019 17:11:43 -0700 Subject: [PATCH 0015/2111] PYTHON-2039 Avoid shadowing dns module in srv tests --- test/{dns => srv_seedlist}/longer-parent-in-return.json | 0 test/{dns => srv_seedlist}/misformatted-option.json | 0 test/{dns => srv_seedlist}/no-results.json | 0 test/{dns => srv_seedlist}/not-enough-parts.json | 0 test/{dns => srv_seedlist}/one-result-default-port.json | 0 test/{dns => srv_seedlist}/one-txt-record-multiple-strings.json | 0 test/{dns => srv_seedlist}/one-txt-record.json | 0 test/{dns => srv_seedlist}/parent-part-mismatch1.json | 0 test/{dns => srv_seedlist}/parent-part-mismatch2.json | 0 test/{dns => srv_seedlist}/parent-part-mismatch3.json | 0 test/{dns => srv_seedlist}/parent-part-mismatch4.json | 0 test/{dns => srv_seedlist}/parent-part-mismatch5.json | 0 test/{dns => srv_seedlist}/returned-parent-too-short.json | 0 test/{dns => srv_seedlist}/returned-parent-wrong.json | 0 test/{dns => srv_seedlist}/two-results-default-port.json | 0 test/{dns => srv_seedlist}/two-results-nonstandard-port.json | 0 test/{dns => srv_seedlist}/two-txt-records.json | 0 .../txt-record-with-overridden-uri-option.json | 0 .../{dns => srv_seedlist}/txt-record-with-unallowed-option.json | 0 test/{dns => srv_seedlist}/uri-with-port.json | 0 test/{dns => srv_seedlist}/uri-with-two-hosts.json | 0 test/test_dns.py | 2 +- 22 files changed, 1 insertion(+), 1 deletion(-) rename test/{dns => srv_seedlist}/longer-parent-in-return.json (100%) rename test/{dns => srv_seedlist}/misformatted-option.json (100%) rename test/{dns => srv_seedlist}/no-results.json (100%) rename test/{dns => srv_seedlist}/not-enough-parts.json (100%) rename test/{dns => srv_seedlist}/one-result-default-port.json (100%) rename test/{dns => srv_seedlist}/one-txt-record-multiple-strings.json (100%) rename test/{dns => srv_seedlist}/one-txt-record.json (100%) rename test/{dns => srv_seedlist}/parent-part-mismatch1.json (100%) rename test/{dns => srv_seedlist}/parent-part-mismatch2.json (100%) rename test/{dns => srv_seedlist}/parent-part-mismatch3.json (100%) rename test/{dns => srv_seedlist}/parent-part-mismatch4.json (100%) rename test/{dns => srv_seedlist}/parent-part-mismatch5.json (100%) rename test/{dns => srv_seedlist}/returned-parent-too-short.json (100%) rename test/{dns => srv_seedlist}/returned-parent-wrong.json (100%) rename test/{dns => srv_seedlist}/two-results-default-port.json (100%) rename test/{dns => srv_seedlist}/two-results-nonstandard-port.json (100%) rename test/{dns => srv_seedlist}/two-txt-records.json (100%) rename test/{dns => srv_seedlist}/txt-record-with-overridden-uri-option.json (100%) rename test/{dns => srv_seedlist}/txt-record-with-unallowed-option.json (100%) rename test/{dns => srv_seedlist}/uri-with-port.json (100%) rename test/{dns => srv_seedlist}/uri-with-two-hosts.json (100%) diff --git a/test/dns/longer-parent-in-return.json b/test/srv_seedlist/longer-parent-in-return.json similarity index 100% rename from test/dns/longer-parent-in-return.json rename to test/srv_seedlist/longer-parent-in-return.json diff --git a/test/dns/misformatted-option.json b/test/srv_seedlist/misformatted-option.json similarity index 100% rename from test/dns/misformatted-option.json rename to test/srv_seedlist/misformatted-option.json diff --git a/test/dns/no-results.json b/test/srv_seedlist/no-results.json similarity index 100% rename from test/dns/no-results.json rename to test/srv_seedlist/no-results.json diff --git a/test/dns/not-enough-parts.json b/test/srv_seedlist/not-enough-parts.json similarity index 100% rename from test/dns/not-enough-parts.json rename to test/srv_seedlist/not-enough-parts.json diff --git a/test/dns/one-result-default-port.json b/test/srv_seedlist/one-result-default-port.json similarity index 100% rename from test/dns/one-result-default-port.json rename to test/srv_seedlist/one-result-default-port.json diff --git a/test/dns/one-txt-record-multiple-strings.json b/test/srv_seedlist/one-txt-record-multiple-strings.json similarity index 100% rename from test/dns/one-txt-record-multiple-strings.json rename to test/srv_seedlist/one-txt-record-multiple-strings.json diff --git a/test/dns/one-txt-record.json b/test/srv_seedlist/one-txt-record.json similarity index 100% rename from test/dns/one-txt-record.json rename to test/srv_seedlist/one-txt-record.json diff --git a/test/dns/parent-part-mismatch1.json b/test/srv_seedlist/parent-part-mismatch1.json similarity index 100% rename from test/dns/parent-part-mismatch1.json rename to test/srv_seedlist/parent-part-mismatch1.json diff --git a/test/dns/parent-part-mismatch2.json b/test/srv_seedlist/parent-part-mismatch2.json similarity index 100% rename from test/dns/parent-part-mismatch2.json rename to test/srv_seedlist/parent-part-mismatch2.json diff --git a/test/dns/parent-part-mismatch3.json b/test/srv_seedlist/parent-part-mismatch3.json similarity index 100% rename from test/dns/parent-part-mismatch3.json rename to test/srv_seedlist/parent-part-mismatch3.json diff --git a/test/dns/parent-part-mismatch4.json b/test/srv_seedlist/parent-part-mismatch4.json similarity index 100% rename from test/dns/parent-part-mismatch4.json rename to test/srv_seedlist/parent-part-mismatch4.json diff --git a/test/dns/parent-part-mismatch5.json b/test/srv_seedlist/parent-part-mismatch5.json similarity index 100% rename from test/dns/parent-part-mismatch5.json rename to test/srv_seedlist/parent-part-mismatch5.json diff --git a/test/dns/returned-parent-too-short.json b/test/srv_seedlist/returned-parent-too-short.json similarity index 100% rename from test/dns/returned-parent-too-short.json rename to test/srv_seedlist/returned-parent-too-short.json diff --git a/test/dns/returned-parent-wrong.json b/test/srv_seedlist/returned-parent-wrong.json similarity index 100% rename from test/dns/returned-parent-wrong.json rename to test/srv_seedlist/returned-parent-wrong.json diff --git a/test/dns/two-results-default-port.json b/test/srv_seedlist/two-results-default-port.json similarity index 100% rename from test/dns/two-results-default-port.json rename to test/srv_seedlist/two-results-default-port.json diff --git a/test/dns/two-results-nonstandard-port.json b/test/srv_seedlist/two-results-nonstandard-port.json similarity index 100% rename from test/dns/two-results-nonstandard-port.json rename to test/srv_seedlist/two-results-nonstandard-port.json diff --git a/test/dns/two-txt-records.json b/test/srv_seedlist/two-txt-records.json similarity index 100% rename from test/dns/two-txt-records.json rename to test/srv_seedlist/two-txt-records.json diff --git a/test/dns/txt-record-with-overridden-uri-option.json b/test/srv_seedlist/txt-record-with-overridden-uri-option.json similarity index 100% rename from test/dns/txt-record-with-overridden-uri-option.json rename to test/srv_seedlist/txt-record-with-overridden-uri-option.json diff --git a/test/dns/txt-record-with-unallowed-option.json b/test/srv_seedlist/txt-record-with-unallowed-option.json similarity index 100% rename from test/dns/txt-record-with-unallowed-option.json rename to test/srv_seedlist/txt-record-with-unallowed-option.json diff --git a/test/dns/uri-with-port.json b/test/srv_seedlist/uri-with-port.json similarity index 100% rename from test/dns/uri-with-port.json rename to test/srv_seedlist/uri-with-port.json diff --git a/test/dns/uri-with-two-hosts.json b/test/srv_seedlist/uri-with-two-hosts.json similarity index 100% rename from test/dns/uri-with-two-hosts.json rename to test/srv_seedlist/uri-with-two-hosts.json diff --git a/test/test_dns.py b/test/test_dns.py index 600ddcbcd1..be972fa352 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -31,7 +31,7 @@ _TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'dns') + os.path.dirname(os.path.realpath(__file__)), 'srv_seedlist') class TestDNS(unittest.TestCase): pass From 9cf0fbd785086ef1702bbafd2ad92a279fe406e4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 8 Nov 2019 11:48:15 -0800 Subject: [PATCH 0016/2111] PYTHON-2001 Fix warnings emitted by Python 3.8 (#428) Fix DeprecationWarning: PY_SSIZE_T_CLEAN will be required for '#' formats Fix DeprecationWarning: isAlive() is deprecated, use is_alive() instead Fix SyntaxWarning: invalid escape sequence Test Python 3.8 on Travis --- .travis.yml | 1 + bson/_cbsonmodule.c | 39 ++++++----------- bson/_cbsonmodule.h | 10 ++++- bson/time64.c | 1 + pymongo/_cmessagemodule.c | 92 ++++++++++++++++++++------------------- pymongo/database.py | 2 +- test/test_custom_types.py | 4 +- test/test_database.py | 2 +- test/test_encryption.py | 4 +- test/test_pooling.py | 2 +- test/utils.py | 4 +- 11 files changed, 81 insertions(+), 80 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6e9bd3944d..5dd72f6da5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ python: - 3.5 - 3.6 - 3.7 + - 3.8 - pypy - pypy3.5 diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index e82e1bb347..1fbb48cc96 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -20,6 +20,7 @@ * should be used to speed up BSON encoding and decoding. */ +#define PY_SSIZE_T_CLEAN #include "Python.h" #include "datetime.h" @@ -1818,13 +1819,8 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { } /* objectify buffer */ -#if PY_MAJOR_VERSION >= 3 - result = Py_BuildValue("y#", buffer_get_buffer(buffer), - buffer_get_position(buffer)); -#else - result = Py_BuildValue("s#", buffer_get_buffer(buffer), - buffer_get_position(buffer)); -#endif + result = Py_BuildValue(BYTES_FORMAT_STRING, buffer_get_buffer(buffer), + (Py_ssize_t)buffer_get_position(buffer)); destroy_codec_options(&options); buffer_free(buffer); return result; @@ -1896,7 +1892,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (options->is_raw_bson) { value = PyObject_CallFunction( options->document_class, BYTES_FORMAT_STRING "O", - buffer + *position, size, options->options_obj); + buffer + *position, (Py_ssize_t)size, options->options_obj); if (!value) { goto invalid; } @@ -2175,11 +2171,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { -#if PY_MAJOR_VERSION >= 3 - value = PyObject_CallFunction(objectid_type, "y#", buffer + *position, 12); -#else - value = PyObject_CallFunction(objectid_type, "s#", buffer + *position, 12); -#endif + value = PyObject_CallFunction(objectid_type, BYTES_FORMAT_STRING, + buffer + *position, (Py_ssize_t)12); Py_DECREF(objectid_type); } *position += 12; @@ -2365,11 +2358,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, *position += coll_length; if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { -#if PY_MAJOR_VERSION >= 3 - id = PyObject_CallFunction(objectid_type, "y#", buffer + *position, 12); -#else - id = PyObject_CallFunction(objectid_type, "s#", buffer + *position, 12); -#endif + id = PyObject_CallFunction(objectid_type, BYTES_FORMAT_STRING, + buffer + *position, (Py_ssize_t)12); Py_DECREF(objectid_type); } if (!id) { @@ -2556,13 +2546,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, "Decimal128"))) { value = PyObject_CallMethod(dec128, "from_bid", -#if PY_MAJOR_VERSION >= 3 - "y#", -#else - "s#", -#endif + BYTES_FORMAT_STRING, buffer + *position, - 16); + (Py_ssize_t)16); Py_DECREF(dec128); } *position += 16; @@ -2939,7 +2925,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { /* No need to decode fields if using RawBSONDocument */ if (options.is_raw_bson) { result = PyObject_CallFunction( - options.document_class, BYTES_FORMAT_STRING "O", string, size, + options.document_class, BYTES_FORMAT_STRING "O", string, (Py_ssize_t)size, options_obj); } else { @@ -3031,7 +3017,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { /* No need to decode fields if using RawBSONDocument. */ if (options.is_raw_bson) { dict = PyObject_CallFunction( - options.document_class, BYTES_FORMAT_STRING "O", string, size, + options.document_class, BYTES_FORMAT_STRING "O", string, (Py_ssize_t)size, options_obj); } else { dict = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); @@ -3143,6 +3129,7 @@ init_cbson(void) _cbson_API[_cbson_buffer_write_int64_INDEX] = (void *) buffer_write_int64; _cbson_API[_cbson_buffer_write_int32_at_position_INDEX] = (void *) buffer_write_int32_at_position; + _cbson_API[_cbson_downcast_and_check_INDEX] = (void *) _downcast_and_check; #if PY_VERSION_HEX >= 0x03010000 /* PyCapsule is new in python 3.1 */ diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 237eea0374..69590d5647 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -113,8 +113,12 @@ typedef struct codec_options_t { #define _cbson_buffer_write_int32_at_position_RETURN void #define _cbson_buffer_write_int32_at_position_PROTO (buffer_t buffer, int position, int32_t data) +#define _cbson_downcast_and_check_INDEX 10 +#define _cbson_downcast_and_check_RETURN int +#define _cbson_downcast_and_check_PROTO (Py_ssize_t size, uint8_t extra) + /* Total number of C API pointers */ -#define _cbson_API_POINTER_COUNT 10 +#define _cbson_API_POINTER_COUNT 11 #ifdef _CBSON_MODULE /* This section is used when compiling _cbsonmodule */ @@ -139,6 +143,8 @@ static _cbson_buffer_write_int64_RETURN buffer_write_int64 _cbson_buffer_write_i static _cbson_buffer_write_int32_at_position_RETURN buffer_write_int32_at_position _cbson_buffer_write_int32_at_position_PROTO; +static _cbson_downcast_and_check_RETURN _downcast_and_check _cbson_downcast_and_check_PROTO; + #else /* This section is used in modules that use _cbsonmodule's API */ @@ -164,6 +170,8 @@ static void **_cbson_API; #define buffer_write_int32_at_position (*(_cbson_buffer_write_int32_at_position_RETURN (*)_cbson_buffer_write_int32_at_position_PROTO) _cbson_API[_cbson_buffer_write_int32_at_position_INDEX]) +#define _downcast_and_check (*(_cbson_downcast_and_check_RETURN (*)_cbson_downcast_and_check_PROTO) _cbson_API[_cbson_downcast_and_check_INDEX]) + #define _cbson_IMPORT _cbson_API = (void **)PyCapsule_Import("_cbson._C_API", 0) #endif diff --git a/bson/time64.c b/bson/time64.c index d9173e7f7e..bad6b51dc1 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -44,6 +44,7 @@ gmtime64_r() is a 64-bit equivalent of gmtime_r(). #endif /* Including Python.h fixes issues with interpreters built with -std=c99. */ +#define PY_SSIZE_T_CLEAN #include "Python.h" #include diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 2ed23dd2c2..b3a82d6312 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -20,6 +20,7 @@ * should be used to speed up message creation. */ +#define PY_SSIZE_T_CLEAN #include "Python.h" #include "_cbsonmodule.h" @@ -37,12 +38,6 @@ struct module_state { static struct module_state _state; #endif -#if PY_MAJOR_VERSION >= 3 -#define BYTES_FORMAT_STRING "y#" -#else -#define BYTES_FORMAT_STRING "s#" -#endif - #define DOC_TOO_LARGE_FMT "BSON document too large (%d bytes)" \ " - the connected server supports" \ " BSON document sizes up to %ld bytes." @@ -61,10 +56,21 @@ static PyObject* _error(char* name) { return error; } +/* The same as buffer_write_bytes except that it also validates + * "size" will fit in an int. + * Returns 0 on failure */ +static int buffer_write_bytes_ssize_t(buffer_t buffer, const char* data, Py_ssize_t size) { + int downsize = _downcast_and_check(size, 0); + if (size == -1) { + return 0; + } + return buffer_write_bytes(buffer, data, downsize); +} + /* add a lastError message on the end of the buffer. * returns 0 on failure */ static int add_last_error(PyObject* self, buffer_t buffer, - int request_id, char* ns, int nslen, + int request_id, char* ns, Py_ssize_t nslen, codec_options_t* options, PyObject* args) { struct module_state *state = GETSTATE(self); @@ -91,8 +97,7 @@ static int add_last_error(PyObject* self, buffer_t buffer, "\xd4\x07\x00\x00" /* opcode */ "\x00\x00\x00\x00", /* options */ 12) || - !buffer_write_bytes(buffer, - ns, nslen) || /* database */ + !buffer_write_bytes_ssize_t(buffer, ns, nslen) || /* database */ !buffer_write_bytes(buffer, ".$cmd\x00" /* collection name */ "\x00\x00\x00\x00" /* skip */ @@ -142,7 +147,7 @@ static int add_last_error(PyObject* self, buffer_t buffer, } static int init_insert_buffer(buffer_t buffer, int request_id, int options, - const char* coll_name, int coll_name_len, + const char* coll_name, Py_ssize_t coll_name_len, int compress) { int length_location = 0; if (!compress) { @@ -161,9 +166,9 @@ static int init_insert_buffer(buffer_t buffer, int request_id, int options, } } if (!buffer_write_int32(buffer, (int32_t)options) || - !buffer_write_bytes(buffer, - coll_name, - coll_name_len + 1)) { + !buffer_write_bytes_ssize_t(buffer, + coll_name, + coll_name_len + 1)) { return -1; } return length_location; @@ -177,7 +182,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ int request_id = rand(); char* collection_name = NULL; - int collection_name_length; + Py_ssize_t collection_name_length; PyObject* docs; PyObject* doc; PyObject* iterator; @@ -293,7 +298,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), max_size); destroy_codec_options(&options); buffer_free(buffer); @@ -306,7 +311,7 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { int request_id = rand(); char* collection_name = NULL; - int collection_name_length; + Py_ssize_t collection_name_length; int before, cur_size, max_size = 0; PyObject* doc; PyObject* spec; @@ -360,9 +365,9 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { "\xd1\x07\x00\x00" "\x00\x00\x00\x00", 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1) || + !buffer_write_bytes_ssize_t(buffer, + collection_name, + collection_name_length + 1) || !buffer_write_int32(buffer, (int32_t)flags)) { destroy_codec_options(&options); buffer_free(buffer); @@ -409,7 +414,7 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), max_size); destroy_codec_options(&options); buffer_free(buffer); @@ -424,7 +429,7 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { PyObject* cluster_time = NULL; unsigned int flags; char* collection_name = NULL; - int collection_name_length; + Py_ssize_t collection_name_length; int begin, cur_size, max_size = 0; int num_to_skip; int num_to_return; @@ -490,8 +495,8 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || !buffer_write_int32(buffer, (int32_t)flags) || - !buffer_write_bytes(buffer, collection_name, - collection_name_length + 1) || + !buffer_write_bytes_ssize_t(buffer, collection_name, + collection_name_length + 1) || !buffer_write_int32(buffer, (int32_t)num_to_skip) || !buffer_write_int32(buffer, (int32_t)num_to_return)) { goto fail; @@ -548,7 +553,7 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), max_size); fail: @@ -563,7 +568,7 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ int request_id = rand(); char* collection_name = NULL; - int collection_name_length; + Py_ssize_t collection_name_length; int num_to_return; long long cursor_id; buffer_t buffer; @@ -597,9 +602,9 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { "\x00\x00\x00\x00" "\xd5\x07\x00\x00" "\x00\x00\x00\x00", 12) || - !buffer_write_bytes(buffer, - collection_name, - collection_name_length + 1) || + !buffer_write_bytes_ssize_t(buffer, + collection_name, + collection_name_length + 1) || !buffer_write_int32(buffer, (int32_t)num_to_return) || !buffer_write_int64(buffer, (int64_t)cursor_id)) { buffer_free(buffer); @@ -616,7 +621,7 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer)); + (Py_ssize_t)buffer_get_position(buffer)); buffer_free(buffer); return result; } @@ -634,7 +639,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { unsigned int flags; PyObject* command; char* identifier = NULL; - int identifier_length = 0; + Py_ssize_t identifier_length = 0; PyObject* docs; PyObject* doc; unsigned char check_keys = 0; @@ -696,7 +701,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { /* save space for payload 0 length */ payload_one_length_location = buffer_save_space(buffer, 4); /* C string identifier */ - if (!buffer_write_bytes(buffer, identifier, identifier_length + 1)) { + if (!buffer_write_bytes_ssize_t(buffer, identifier, identifier_length + 1)) { goto encodefail; } iterator = PyObject_GetIter(docs); @@ -730,7 +735,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING "ii", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), total_size, max_doc_size); encodefail: @@ -763,7 +768,7 @@ _set_document_too_large(int size, long max) { static PyObject* _send_insert(PyObject* self, PyObject* ctx, PyObject* gle_args, buffer_t buffer, - char* coll_name, int coll_len, int request_id, int safe, + char* coll_name, Py_ssize_t coll_len, int request_id, int safe, codec_options_t* options, PyObject* to_publish, int compress) { if (safe) { @@ -779,7 +784,7 @@ _send_insert(PyObject* self, PyObject* ctx, "i" BYTES_FORMAT_STRING "iNOi", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), 0, PyBool_FromLong((long)safe), to_publish, compress); @@ -792,7 +797,7 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { int request_id = rand(); int send_safe, flags = 0; int length_location, message_length; - int collection_name_length; + Py_ssize_t collection_name_length; int compress; char* collection_name = NULL; PyObject* docs; @@ -1344,7 +1349,7 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { result = Py_BuildValue(BYTES_FORMAT_STRING "O", buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); @@ -1415,7 +1420,7 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); result = Py_BuildValue("i" BYTES_FORMAT_STRING "O", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); @@ -1428,7 +1433,7 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { static int _batched_write_command( - char* ns, int ns_len, unsigned char op, int check_keys, + char* ns, Py_ssize_t ns_len, unsigned char op, int check_keys, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -1476,8 +1481,7 @@ _batched_write_command( if (!buffer_write_bytes(buffer, "\x00\x00\x00\x00", /* flags */ 4) || - !buffer_write_bytes(buffer, - ns, ns_len + 1) || /* namespace */ + !buffer_write_bytes_ssize_t(buffer, ns, ns_len + 1) || /* namespace */ !buffer_write_bytes(buffer, "\x00\x00\x00\x00" /* skip */ "\xFF\xFF\xFF\xFF", /* limit (-1) */ @@ -1634,7 +1638,7 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; unsigned char check_keys; - int ns_len; + Py_ssize_t ns_len; PyObject* command; PyObject* docs; PyObject* ctx = NULL; @@ -1677,7 +1681,7 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { result = Py_BuildValue(BYTES_FORMAT_STRING "O", buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), to_publish); fail: PyMem_Free(ns); @@ -1692,7 +1696,7 @@ _cbson_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; unsigned char check_keys; - int ns_len; + Py_ssize_t ns_len; int request_id; int position; PyObject* command; @@ -1752,7 +1756,7 @@ _cbson_batched_write_command(PyObject* self, PyObject* args) { buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); result = Py_BuildValue("i" BYTES_FORMAT_STRING "O", request_id, buffer_get_buffer(buffer), - buffer_get_position(buffer), + (Py_ssize_t)buffer_get_position(buffer), to_publish); fail: PyMem_Free(ns); diff --git a/pymongo/database.py b/pymongo/database.py index c6900fed11..701e55221e 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -823,7 +823,7 @@ def list_collection_names(self, session=None, filter=None, **kwargs): For example, to list all non-system collections:: - filter = {"name": {"$regex": r"^(?!system\.)"}} + filter = {"name": {"$regex": r"^(?!system\\.)"}} db.list_collection_names(filter=filter) :Parameters: diff --git a/test/test_custom_types.py b/test/test_custom_types.py index b0a190d0f9..3d937082fd 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -512,8 +512,8 @@ def test_type_registry_eq(self): def test_builtin_types_override_fails(self): def run_test(base, attrs): - msg = ("TypeEncoders cannot change how built-in types " - "are encoded \(encoder .* transforms type .*\)") + msg = (r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)") for pytype in _BUILT_IN_TYPES: attrs.update({'python_type': pytype, 'transform_python': lambda x: x}) diff --git a/test/test_database.py b/test/test_database.py index 983bcecec5..76a549c95f 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -1062,7 +1062,7 @@ def test_database_aggregation_fake_cursor(self): @client_context.require_version_max(3, 6, 0, -1) def test_database_aggregation_unsupported(self): - err_msg = "Database.aggregate\(\) is only supported on MongoDB 3.6\+." + err_msg = r"Database.aggregate\(\) is only supported on MongoDB 3.6\+." with self.assertRaisesRegex(ConfigurationError, err_msg): with self.admin.aggregate(self.pipeline) as _: pass diff --git a/test/test_encryption.py b/test/test_encryption.py index 58a392d699..fbd6c7010e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -684,8 +684,8 @@ def test_data_key(self): self.assertEqual(aws_encrypted_altname, aws_encrypted) # Explicitly encrypting an auto encrypted field. - msg = ('Cannot encrypt element of type binData because schema ' - 'requires that type is one of: \[ string \]') + msg = (r'Cannot encrypt element of type binData because schema ' + r'requires that type is one of: \[ string \]') with self.assertRaisesRegex(EncryptionError, msg): client_encrypted.db.coll.insert_one( {'encrypted_placeholder': local_encrypted}) diff --git a/test/test_pooling.py b/test/test_pooling.py index f5945a43d5..922deecdcf 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -53,7 +53,7 @@ def gc_collect_until_done(threads, timeout=60): assert (time.time() - start) < timeout, "Threads timed out" for t in running: t.join(0.1) - if not t.isAlive(): + if not t.is_alive(): running.remove(t) gc.collect() diff --git a/test/utils.py b/test/utils.py index e308a53c6f..d26b961380 100644 --- a/test/utils.py +++ b/test/utils.py @@ -579,7 +579,7 @@ def joinall(threads): """Join threads with a 5-minute timeout, assert joins succeeded""" for t in threads: t.join(300) - assert not t.isAlive(), "Thread %s hung" % t + assert not t.is_alive(), "Thread %s hung" % t def connected(client): @@ -708,7 +708,7 @@ def run_threads(collection, target): for t in threads: t.join(60) - assert not t.isAlive() + assert not t.is_alive() @contextlib.contextmanager From c65367b8f05605b4e52055a09f9cb9570aa2215a Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 7 Nov 2019 17:04:27 -0800 Subject: [PATCH 0017/2111] PYTHON-1972 Add example usage for withTransaction API --- test/test_examples.py | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/test/test_examples.py b/test/test_examples.py index efc6307aa6..cadd15d959 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -27,8 +27,7 @@ from pymongo.write_concern import WriteConcern from test import client_context, unittest, IntegrationTest -from test.utils import rs_or_single_client - +from test.utils import rs_client, rs_or_single_client class TestSampleShellCommands(unittest.TestCase): @@ -1037,6 +1036,44 @@ def update_employee_info(session): self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') + MongoClient = lambda _: rs_client() + uriString = None + + # Start Transactions withTxn API Example 1 + + # For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g. + # uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl' + # For a sharded cluster, connect to the mongos instances; e.g. + # uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/' + + client = MongoClient(uriString) + wc_majority = WriteConcern("majority", wtimeout=1000) + + # Prereq: Create collections. CRUD operations in transactions must be on existing collections. + client.get_database( + "mydb1", write_concern=wc_majority).foo.insert_one({'abc': 0}) + client.get_database( + "mydb2", write_concern=wc_majority).bar.insert_one({'xyz': 0}) + + # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. + def callback(session): + collection_one = session.client.mydb1.foo + collection_two = session.client.mydb2.bar + + # Important:: You must pass the session to the operations. + collection_one.insert_one({'abc': 1}, session=session) + collection_two.insert_one({'xyz': 999}, session=session) + + # Step 2: Start a client session. + with client.start_session() as session: + # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). + session.with_transaction( + callback, read_concern=ReadConcern('local'), + write_concern=wc_majority, + read_preference=ReadPreference.PRIMARY) + + # End Transactions withTxn API Example 1 + @client_context.require_transactions def test_transactions_beta(self): # Transaction beta examples From a3556c44472b25c213fc0d29122e80fea1989860 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 31 Oct 2019 15:45:59 -0700 Subject: [PATCH 0018/2111] PYTHON-2012 Update FLE to support commands larger than 6MiB Bulk write command are batched at 2MiB when auto encryption is enabled. --- pymongo/_cmessagemodule.c | 19 +++++++++- pymongo/encryption.py | 7 ---- pymongo/message.py | 31 ++++++++-------- test/test_encryption.py | 75 +++++++++++++++++++++++---------------- 4 files changed, 80 insertions(+), 52 deletions(-) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index b3a82d6312..7c4a517c5c 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -1441,6 +1441,7 @@ _batched_write_command( long max_bson_size; long max_cmd_size; long max_write_batch_size; + long max_split_size; int idx = 0; int cmd_len_loc; int lst_len_loc; @@ -1448,6 +1449,7 @@ _batched_write_command( int length; PyObject* max_bson_size_obj = NULL; PyObject* max_write_batch_size_obj = NULL; + PyObject* max_split_size_obj = NULL; PyObject* doc = NULL; PyObject* iterator = NULL; @@ -1478,6 +1480,20 @@ _batched_write_command( return 0; } + // max_split_size is the size at which to perform a batch split. + // Normally this this value is equal to max_bson_size (16MiB). However, + // when auto encryption is enabled max_split_size is reduced to 2MiB. + max_split_size_obj = PyObject_GetAttrString(ctx, "max_split_size"); +#if PY_MAJOR_VERSION >= 3 + max_split_size = PyLong_AsLong(max_split_size_obj); +#else + max_split_size = PyInt_AsLong(max_split_size_obj); +#endif + Py_XDECREF(max_split_size_obj); + if (max_split_size == -1) { + return 0; + } + if (!buffer_write_bytes(buffer, "\x00\x00\x00\x00", /* flags */ 4) || @@ -1570,7 +1586,6 @@ _batched_write_command( * max_cmd_size accounts for the two trailing null bytes. */ cur_size = buffer_get_position(buffer) - cur_doc_begin; - enough_data = (buffer_get_position(buffer) > max_cmd_size); /* This single document is too large for the command. */ if (cur_size > max_cmd_size) { if (op == _INSERT) { @@ -1591,6 +1606,8 @@ _batched_write_command( } goto fail; } + enough_data = (idx >= 1 && + (buffer_get_position(buffer) > max_split_size)); if (enough_data) { /* * Roll the existing buffer back to the beginning diff --git a/pymongo/encryption.py b/pymongo/encryption.py index b61937cb66..bdbd4fc723 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -50,9 +50,6 @@ EncryptionError, InvalidOperation, ServerSelectionTimeoutError) -from pymongo.message import (_COMMAND_OVERHEAD, - _MAX_ENC_BSON_SIZE, - _raise_document_too_large) from pymongo.mongo_client import MongoClient from pymongo.pool import _configured_socket, PoolOptions from pymongo.read_concern import ReadConcern @@ -277,10 +274,6 @@ def encrypt(self, database, cmd, check_keys, codec_options): # check_keys. cluster_time = check_keys and cmd.pop('$clusterTime', None) encoded_cmd = _dict_to_bson(cmd, check_keys, codec_options) - max_cmd_size = _MAX_ENC_BSON_SIZE + _COMMAND_OVERHEAD - if len(encoded_cmd) > max_cmd_size: - raise _raise_document_too_large( - next(iter(cmd)), len(encoded_cmd), max_cmd_size) with _wrap_encryption_errors(): encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. diff --git a/pymongo/message.py b/pymongo/message.py index 2b8ff10422..6e8c596953 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -929,6 +929,11 @@ def max_write_batch_size(self): """A proxy for SockInfo.max_write_batch_size.""" return self.sock_info.max_write_batch_size + @property + def max_split_size(self): + """The maximum size of a BSON command before batch splitting.""" + return self.max_bson_size + def legacy_bulk_insert( self, request_id, msg, max_doc_size, acknowledged, docs, compress): if compress: @@ -1011,10 +1016,11 @@ def _fail(self, request_id, failure, duration): request_id, self.sock_info.address, self.op_id) -# 2MiB -_MAX_ENC_BSON_SIZE = 2 * (1024 * 1024) -# 6MB -_MAX_ENC_MESSAGE_SIZE = 6 * (1000 * 1000) +# From the Client Side Encryption spec: +# Because automatic encryption increases the size of commands, the driver +# MUST split bulk writes at a reduced size limit before undergoing automatic +# encryption. The write payload MUST be split at 2MiB (2097152). +_MAX_SPLIT_SIZE_ENC = 2097152 class _EncryptedBulkWriteContext(_BulkWriteContext): @@ -1049,14 +1055,9 @@ def execute_unack(self, docs, client): return to_send @property - def max_bson_size(self): - """A proxy for SockInfo.max_bson_size.""" - return min(self.sock_info.max_bson_size, _MAX_ENC_BSON_SIZE) - - @property - def max_message_size(self): - """A proxy for SockInfo.max_message_size.""" - return min(self.sock_info.max_message_size, _MAX_ENC_MESSAGE_SIZE) + def max_split_size(self): + """Reduce the batch splitting size.""" + return _MAX_SPLIT_SIZE_ENC def _raise_document_too_large(operation, doc_size, max_size): @@ -1388,6 +1389,7 @@ def _batched_write_command_impl( # Max BSON object size + 16k - 2 bytes for ending NUL bytes. # Server guarantees there is enough room: SERVER-10643. max_cmd_size = max_bson_size + _COMMAND_OVERHEAD + max_split_size = ctx.max_split_size # No options buf.write(_ZERO_32) @@ -1424,12 +1426,13 @@ def _batched_write_command_impl( # Is there enough room to add this document? max_cmd_size accounts for # the two trailing null bytes. doc_too_large = len(value) > max_cmd_size - enough_data = (buf.tell() + len(key) + len(value)) >= max_cmd_size - enough_documents = (idx >= max_write_batch_size) if doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] _raise_document_too_large( write_op, len(value), max_bson_size) + enough_data = (idx >= 1 and + (buf.tell() + len(key) + len(value)) >= max_split_size) + enough_documents = (idx >= max_write_batch_size) if enough_data or enough_documents: break buf.write(_BSONOBJ) diff --git a/test/test_encryption.py b/test/test_encryption.py index fbd6c7010e..91018d74bf 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -35,15 +35,15 @@ from bson.son import SON from pymongo.cursor import CursorType -from pymongo.errors import (ConfigurationError, - EncryptionError, - InvalidOperation, - OperationFailure) from pymongo.encryption import (Algorithm, ClientEncryption) -from pymongo.errors import ConfigurationError, DocumentTooLarge from pymongo.encryption_options import AutoEncryptionOpts, _HAVE_PYMONGOCRYPT -from pymongo.message import _COMMAND_OVERHEAD +from pymongo.errors import (BulkWriteError, + ConfigurationError, + EncryptionError, + InvalidOperation, + OperationFailure, + WriteError) from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne from pymongo.write_concern import WriteConcern @@ -918,6 +918,10 @@ def test_corpus_local_schema(self): self._test_corpus(opts) +_2_MiB = 2097152 +_16_MiB = 16777216 + + class TestBsonSizeBatches(EncryptionIntegrationTest): """Prose tests for BSON size limits and batch splitting.""" @@ -955,27 +959,14 @@ def tearDownClass(cls): super(TestBsonSizeBatches, cls).tearDownClass() def test_01_insert_succeeds_under_2MiB(self): - doc = {'_id': 'no_encryption_under_2mib', - 'unencrypted': 'a' * ((2**21) - 1000)} + doc = {'_id': 'over_2mib_under_16mib', 'unencrypted': 'a' * _2_MiB} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc = {'_id': 'no_encryption_under_2mib_bulk', - 'unencrypted': 'a' * ((2**21) - 1000)} + doc['_id'] = 'over_2mib_under_16mib_bulk' self.coll_encrypted.bulk_write([InsertOne(doc)]) - def test_02_insert_fails_over_2MiB(self): - doc = {'_id': 'no_encryption_over_2mib', - 'unencrypted': 'a' * (2**21 + _COMMAND_OVERHEAD)} - - with self.assertRaises(DocumentTooLarge): - self.coll_encrypted.insert_one(doc) - with self.assertRaises(DocumentTooLarge): - self.coll_encrypted.insert_many([doc]) - with self.assertRaises(DocumentTooLarge): - self.coll_encrypted.bulk_write([InsertOne(doc)]) - - def test_03_insert_succeeds_over_2MiB_post_encryption(self): + def test_02_insert_succeeds_over_2MiB_post_encryption(self): doc = {'_id': 'encryption_exceeds_2mib', 'unencrypted': 'a' * ((2**21) - 2000)} doc.update(json_data('limits', 'limits-doc.json')) @@ -985,29 +976,53 @@ def test_03_insert_succeeds_over_2MiB_post_encryption(self): doc['_id'] = 'encryption_exceeds_2mib_bulk' self.coll_encrypted.bulk_write([InsertOne(doc)]) - def test_04_bulk_batch_split(self): - doc1 = {'_id': 'no_encryption_under_2mib_1', - 'unencrypted': 'a' * ((2**21) - 1000)} - doc2 = {'_id': 'no_encryption_under_2mib_2', - 'unencrypted': 'a' * ((2**21) - 1000)} + def test_03_bulk_batch_split(self): + doc1 = {'_id': 'over_2mib_1', 'unencrypted': 'a' * _2_MiB} + doc2 = {'_id': 'over_2mib_2', 'unencrypted': 'a' * _2_MiB} self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) self.assertEqual( self.listener.started_command_names(), ['insert', 'insert']) - def test_05_bulk_batch_split(self): + def test_04_bulk_batch_split(self): limits_doc = json_data('limits', 'limits-doc.json') doc1 = {'_id': 'encryption_exceeds_2mib_1', - 'unencrypted': 'a' * ((2**21) - 2000)} + 'unencrypted': 'a' * (_2_MiB - 2000)} doc1.update(limits_doc) doc2 = {'_id': 'encryption_exceeds_2mib_2', - 'unencrypted': 'a' * ((2**21) - 2000)} + 'unencrypted': 'a' * (_2_MiB - 2000)} doc2.update(limits_doc) self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) self.assertEqual( self.listener.started_command_names(), ['insert', 'insert']) + def test_05_insert_succeeds_just_under_16MiB(self): + doc = {'_id': 'under_16mib', 'unencrypted': 'a' * (_16_MiB - 2000)} + self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc['_id'] = 'under_16mib_bulk' + self.coll_encrypted.bulk_write([InsertOne(doc)]) + + def test_06_insert_fails_over_16MiB(self): + limits_doc = json_data('limits', 'limits-doc.json') + doc = {'_id': 'encryption_exceeds_16mib', + 'unencrypted': 'a' * (_16_MiB - 2000)} + doc.update(limits_doc) + + with self.assertRaisesRegex(WriteError, 'object to insert too large'): + self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc['_id'] = 'encryption_exceeds_16mib_bulk' + with self.assertRaises(BulkWriteError) as ctx: + self.coll_encrypted.bulk_write([InsertOne(doc)]) + err = ctx.exception.details['writeErrors'][0] + self.assertEqual(2, err['code']) + self.assertIn('object to insert too large', err['errmsg']) + + class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" From 4cbbd85c4c1fdba47fae3914723f600527443437 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 12 Nov 2019 12:59:36 -0800 Subject: [PATCH 0019/2111] PYTHON-2009 Test with Python 3.8 in Evergreen --- .evergreen/config.yml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index fda4413027..c9db500c2b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -969,6 +969,10 @@ axes: display_name: "Python 3.7" variables: PYTHON_BINARY: "/opt/python/3.7/bin/python3" + - id: "3.8" + display_name: "Python 3.8" + variables: + PYTHON_BINARY: "/opt/python/3.8/bin/python3" - id: "pypy" display_name: "PyPy" variables: @@ -1333,7 +1337,7 @@ buildvariants: - matrix_name: "tests-python-version-ubuntu1604-without-c-extensions" matrix_spec: platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7"] + python-version: &openssl-102-plus-pythons ["3.7", "3.8"] c-extensions: without-c-extensions auth-ssl: noauth-nossl display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" @@ -1349,7 +1353,7 @@ buildvariants: matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "pypy", "pypy3.5", "jython2.7"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "pypy", "pypy3.5", "jython2.7"] c-extensions: "*" compression: "*" exclude_spec: @@ -1363,6 +1367,11 @@ buildvariants: python-version: ["jython2.7"] c-extensions: "*" compression: ["snappy", "zstd"] + # Some tests fail with CPython 3.8 and python-snappy + - platform: ubuntu-16.04 + python-version: ["3.8"] + c-extensions: "*" + compression: ["snappy"] display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" @@ -1416,7 +1425,7 @@ buildvariants: - matrix_name: "tests-python-version-requires-openssl-102-plus-test-ssl" matrix_spec: platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7"] + python-version: &openssl-102-plus-pythons ["3.7", "3.8"] auth-ssl: "*" display_name: "${python-version} OpenSSL 1.0.2 ${platform} ${auth-ssl}" tasks: From bfdf48edfa65edc8027aaf2d4472f411a17be4b1 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 13 Nov 2019 08:21:45 -0800 Subject: [PATCH 0020/2111] PYTHON-2009 Add Python 3.8 to trove classifiers --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index d116b02ec1..68d9c3e898 100755 --- a/setup.py +++ b/setup.py @@ -384,6 +384,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], From d37540ae15321a74536e1a932474b41bd643f507 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 13 Nov 2019 14:17:50 -0800 Subject: [PATCH 0021/2111] PYTHON-2053 Test with pypy3.6 --- .evergreen/config.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c9db500c2b..27a593aaca 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -981,6 +981,10 @@ axes: display_name: "PyPy 3.5" variables: PYTHON_BINARY: "/opt/python/pypy3.5/bin/pypy3" + - id: "pypy3.6" + display_name: "PyPy 3.6" + variables: + PYTHON_BINARY: "/opt/python/pypy3.6/bin/pypy3" - id: "jython2.7" display_name: "Jython 2.7" batchtime: 10080 # 7 days @@ -1279,7 +1283,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: &rhel62-pythons ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "jython2.7"] + python-version: &rhel62-pythons ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] auth: "*" ssl: "*" coverage: "*" @@ -1308,7 +1312,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "pypy3.6"] auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -1327,7 +1331,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: rhel62 - python-version: ["pypy", "pypy3.5", "jython2.7"] + python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -1353,13 +1357,13 @@ buildvariants: matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "pypy", "pypy3.5", "jython2.7"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-16.04 - python-version: ["pypy", "pypy3.5", "jython2.7"] + python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] c-extensions: "with-c-extensions" compression: "*" # Jython doesn't support some compression types. @@ -1396,7 +1400,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: rhel62 - python-version: ["pypy", "pypy3.5", "jython2.7"] + python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" From 1fdfb9864b3c0614f0f46add0bb7a25ebb8d2e77 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 14 Nov 2019 13:29:45 -0800 Subject: [PATCH 0022/2111] PYTHON-2054 Remove unused example TestTransactionExamples.test_transactions_beta --- test/test_examples.py | 77 ------------------------------------------- 1 file changed, 77 deletions(-) diff --git a/test/test_examples.py b/test/test_examples.py index cadd15d959..4db7cf020d 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1074,83 +1074,6 @@ def callback(session): # End Transactions withTxn API Example 1 - @client_context.require_transactions - def test_transactions_beta(self): - # Transaction beta examples - client = self.client - self.addCleanup(client.drop_database, "test") - - db = client.test - shipment = db.create_collection("shipment") - inventory = db.create_collection("inventory") - inventory.insert_one({"sku": "abc123", "qty": 500}) - - # Start Beta Transaction Example 1 - db = client.test - with client.start_session() as s: - with s.start_transaction(): - db.inventory.update_one({'sku': 'abc123'}, - {'$inc': {'qty': -100}}, - session=s) - db.shipment.insert_one({'sku': 'abc123', 'qty': 100}, - session=s) - # End Beta Transaction Example 1 - - # Beta Transaction Example 1 with explicit start, commit, and abort. - with client.start_session() as s: - s.start_transaction() - try: - db.inventory.update_one({'sku': 'abc123'}, - {'$inc': {'qty': -100}}, - session=s) - db.shipment.insert_one({'sku': 'abc123', 'qty': 100}, - session=s) - except Exception: - s.abort_transaction() - raise - s.commit_transaction() - - # Start Beta Transaction Example 2 - db = client.test - shipment = db.get_collection('shipment', - write_concern=WriteConcern(w='majority')) - - # In the following block, the following write concerns are used: - # the update_one and insert_one operations uses w = 1, - # the transaction commit/abort uses w = 'majority'. - with client.start_session() as s: - with s.start_transaction(write_concern=WriteConcern(w='majority')): - inventory.update_one({'sku': 'abc123'}, - {'$inc': {'qty': -100}}, - session=s) - shipment.insert_one({'sku': 'abc123', 'qty': 100}, session=s) - # End Beta Transaction Example 2 - - # Start Beta Transaction Example 3 - def run_transaction(session, txn_callback): - with session.start_transaction(): - txn_callback(session) - - def run_transaction_with_retry(session, txn_callback): - try: - run_transaction(session, txn_callback) - except (OperationFailure, ConnectionFailure) as exc: - if exc.has_error_label("TransientTransactionError"): - # Retry the entire transaction on temporary transaction - # failures. - run_transaction(session, txn_callback) - else: - raise - - def shipment_transaction(session): - inventory.update_one({'sku': 'abc123'}, {'$inc': {'qty': -100}}, - session=session) - shipment.insert_one({'sku': 'abc123', 'qty': 100}, session=session) - - with client.start_session() as session: - run_transaction_with_retry(session, shipment_transaction) - # End Beta Transaction Example 3 - class TestCausalConsistencyExamples(IntegrationTest): @client_context.require_version_min(3, 6, 0) From 083c2474dad0e2ff2130edb5d77e668bb5f15f1e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 14 Nov 2019 16:18:12 -0800 Subject: [PATCH 0023/2111] PYTHON-2055 Subtract message header from compressed bulk OP_MSG This change prevents pymongo from generating a bulk OP_COMPRESSED/OP_MSG with an uncompressed message larger than the server's limit of maxMessageSizeBytes - 16. --- pymongo/message.py | 3 +++ test/test_bulk.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/pymongo/message.py b/pymongo/message.py index 6e8c596953..de0a6ea9fc 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -922,6 +922,9 @@ def max_bson_size(self): @property def max_message_size(self): """A proxy for SockInfo.max_message_size.""" + if self.compress: + # Subtract 16 bytes for the message header. + return self.sock_info.max_message_size - 16 return self.sock_info.max_message_size @property diff --git a/test/test_bulk.py b/test/test_bulk.py index 81bb35c33d..ec148a4ac5 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -308,6 +308,24 @@ def test_numerous_inserts(self): self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) + @client_context.require_version_min(3, 6) + def test_bulk_max_message_size(self): + self.coll.delete_many({}) + self.addCleanup(self.coll.delete_many, {}) + _16_MB = 16 * 1000 * 1000 + # Generate a list of documents such that the first batched OP_MSG is + # as close as possible to the 48MB limit. + docs = [ + {'_id': 1, 'l': 's' * _16_MB}, + {'_id': 2, 'l': 's' * _16_MB}, + {'_id': 3, 'l': 's' * (_16_MB - 10000)}, + ] + # Fill in the remaining ~10000 bytes with small documents. + for i in range(4, 10000): + docs.append({'_id': i}) + result = self.coll.insert_many(docs) + self.assertEqual(len(docs), len(result.inserted_ids)) + def test_generator_insert(self): def gen(): yield {'a': 1, 'b': 1} From 849a4153565ec27681667859e5223f2b7ba3e8eb Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 18 Nov 2019 17:41:42 -0800 Subject: [PATCH 0024/2111] PYTHON-2002 Re-enable dns/srv tests --- test/test_dns.py | 1 - test/test_srv_polling.py | 1 - 2 files changed, 2 deletions(-) diff --git a/test/test_dns.py b/test/test_dns.py index be972fa352..58c9005d3e 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -39,7 +39,6 @@ class TestDNS(unittest.TestCase): def create_test(test_case): - @unittest.skipIf(sys.version_info[0] < 3, "PYTHON-2002 fails on python 2") @client_context.require_replica_set @client_context.require_ssl def run_test(self): diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index ff12e98a91..1908103476 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -88,7 +88,6 @@ class TestSrvPolling(unittest.TestCase): CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" - @unittest.skipIf(sys.version_info[0] < 3, "PYTHON-2002 fails on python 2") def setUp(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("SRV polling tests require the dnspython " From d0423d2d53b11963a758dfc543035c82492b0423 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 14 Nov 2019 14:43:36 -0800 Subject: [PATCH 0025/2111] PYTHON-1993 Add client-side field level encryption documentation examples Specify pymongocrypt<2.0.0 in setup.py for compatibility. --- doc/api/pymongo/encryption.rst | 4 +- doc/api/pymongo/encryption_options.rst | 4 +- doc/examples/encryption.rst | 498 +++++++++++++++++++++++++ doc/examples/index.rst | 1 + doc/index.rst | 3 + pymongo/encryption.py | 38 +- pymongo/encryption_options.py | 30 +- pymongo/mongo_client.py | 5 +- setup.py | 2 +- 9 files changed, 540 insertions(+), 45 deletions(-) create mode 100644 doc/examples/encryption.rst diff --git a/doc/api/pymongo/encryption.rst b/doc/api/pymongo/encryption.rst index 1501ef98a6..3a8c3c5cc4 100644 --- a/doc/api/pymongo/encryption.rst +++ b/doc/api/pymongo/encryption.rst @@ -1,5 +1,5 @@ -:mod:`encryption` -- Client side encryption -=========================================== +:mod:`encryption` -- Client-Side Field Level Encryption +======================================================= .. automodule:: pymongo.encryption :members: diff --git a/doc/api/pymongo/encryption_options.rst b/doc/api/pymongo/encryption_options.rst index 492139faaf..08bfc157a9 100644 --- a/doc/api/pymongo/encryption_options.rst +++ b/doc/api/pymongo/encryption_options.rst @@ -1,8 +1,8 @@ -:mod:`encryption_options` -- Support for automatic client side encryption +:mod:`encryption_options` -- Automatic Client-Side Field Level Encryption ========================================================================= .. automodule:: pymongo.encryption_options - :synopsis: Support for automatic client side encryption + :synopsis: Support for automatic client-side field level encryption .. autoclass:: pymongo.encryption_options.AutoEncryptionOpts :members: diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst new file mode 100644 index 0000000000..540a6f8065 --- /dev/null +++ b/doc/examples/encryption.rst @@ -0,0 +1,498 @@ +Client-Side Field Level Encryption +================================== + +New in MongoDB 4.2, client-side field level encryption allows an application +to encrypt specific data fields in addition to pre-existing MongoDB +encryption features such as `Encryption at Rest +`_ and +`TLS/SSL (Transport Encryption) +`_. + +With field level encryption, applications can encrypt fields in documents +*prior* to transmitting data over the wire to the server. Client-side field +level encryption supports workloads where applications must guarantee that +unauthorized parties, including server administrators, cannot read the +encrypted data. + +.. seealso:: The MongoDB documentation for `Client-Side Field Level Encryption + `_. + +Dependencies +------------ + +To get started using client-side field level encryption in your project, +you will need to install the +`pymongocrypt `_ library +as well as the driver itself. Install both the driver and a compatible +version of pymongocrypt like this:: + + $ python -m pip install 'pymongo[encryption]' + +Note that installing on Linux requires pip 19 or later for manylinux2010 wheel +support. For more information about installing pymongocrypt see +`the installation instructions on the project's PyPI page +`_. + +mongocryptd +----------- + +The ``mongocryptd`` binary is required for automatic client-side encryption +and is included as a component in the `MongoDB Enterprise Server package +`_. +For detailed installation instructions see +`the MongoDB documentation on mongocryptd +`_. + +``mongocryptd`` performs the following: + +- Parses the automatic encryption rules specified to the database connection. + If the JSON schema contains invalid automatic encryption syntax or any + document validation syntax, ``mongocryptd`` returns an error. +- Uses the specified automatic encryption rules to mark fields in read and + write operations for encryption. +- Rejects read/write operations that may return unexpected or incorrect results + when applied to an encrypted field. For supported and unsupported operations, + see `Read/Write Support with Automatic Field Level Encryption + `_. + +A MongoClient configured with auto encryption will automatically spawn the +``mongocryptd`` process from the application's ``PATH``. Applications can +control the spawning behavior as part of the automatic encryption options. +For example to set the path to the ``mongocryptd`` process:: + + auto_encryption_opts = AutoEncryptionOpts( + ..., + mongocryptd_spawn_path='/path/to/mongocryptd') + +To control the logging output of ``mongocryptd`` pass options using +``mongocryptd_spawn_args``:: + + auto_encryption_opts = AutoEncryptionOpts( + ..., + mongocryptd_spawn_args=['--logpath=/path/to/mongocryptd.log', '--logappend']) + +If your application wishes to manage the ``mongocryptd`` process manually, +it is possible to disable spawning ``mongocryptd``:: + + auto_encryption_opts = AutoEncryptionOpts( + ..., + mongocryptd_bypass_spawn=True, + # URI of the local ``mongocryptd`` process. + mongocryptd_uri='mongodb://localhost:27020') + +``mongocryptd`` is only responsible for supporting automatic client-side field +level encryption and does not itself perform any encryption or decryption. + +.. _automatic-client-side-encryption: + +Automatic Client-Side Field Level Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Automatic client-side field level encryption is enabled by creating a +:class:`~pymongo.mongo_client.MongoClient` with the ``auto_encryption_opts`` +option set to an instance of +:class:`~pymongo.encryption_options.AutoEncryptionOpts`. The following +examples show how to setup automatic client-side field level encryption +using :class:`~pymongo.encryption.ClientEncryption` to create a new +encryption data key. + +.. note:: Automatic client-side field level encryption requires MongoDB 4.2 + enterprise or a MongoDB 4.2 Atlas cluster. The community version of the + server supports automatic decryption as well as + :ref:`explicit-client-side-encryption`. + +Providing Local Automatic Encryption Rules +`````````````````````````````````````````` + +The following example shows how to specify automatic encryption rules via the +``schema_map`` option. The automatic encryption rules are expressed using a +`strict subset of the JSON Schema syntax +`_. + +Supplying a ``schema_map`` provides more security than relying on +JSON Schemas obtained from the server. It protects against a +malicious server advertising a false JSON Schema, which could trick +the client into sending unencrypted data that should be encrypted. + +JSON Schemas supplied in the ``schema_map`` only apply to configuring +automatic client-side field level encryption. Other validation +rules in the JSON schema will not be enforced by the driver and +will result in an error.:: + + import os + + from bson.codec_options import CodecOptions + from bson import json_util + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, + ClientEncryption) + from pymongo.encryption_options import AutoEncryptionOpts + + + def create_json_schema_file(kms_providers, key_vault_namespace, + key_vault_client): + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + key_vault_client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. We will not be calling + # encrypt() or decrypt() in this example so we can use any + # CodecOptions. + CodecOptions()) + + # Create a new data key and json schema for the encryptedField. + # https://docs.mongodb.com/manual/reference/security-client-side-automatic-json-schema/ + data_key_id = client_encryption.create_data_key( + 'local', key_alt_names=['pymongo_encryption_example_1']) + schema = { + "properties": { + "encryptedField": { + "encrypt": { + "keyId": [data_key_id], + "bsonType": "string", + "algorithm": + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + } + } + }, + "bsonType": "object" + } + # Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be + # able to parse the MongoDB extended JSON file. + json_schema_string = json_util.dumps( + schema, json_options=json_util.CANONICAL_JSON_OPTIONS) + + with open('jsonSchema.json', 'w') as file: + file.write(json_schema_string) + + + def main(): + # The MongoDB namespace (db.collection) used to store the + # encrypted documents in this example. + encrypted_namespace = "test.coll" + + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # The MongoClient used to access the key vault (key_vault_namespace). + key_vault_client = MongoClient() + key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name] + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + create_json_schema_file( + kms_providers, key_vault_namespace, key_vault_client) + + # Load the JSON Schema and construct the local schema_map option. + with open('jsonSchema.json', 'r') as file: + json_schema_string = file.read() + json_schema = json_util.loads(json_schema_string) + schema_map = {encrypted_namespace: json_schema} + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, schema_map=schema_map) + + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + db_name, coll_name = encrypted_namespace.split(".", 1) + coll = client[db_name][coll_name] + # Clear old data + coll.drop() + + coll.insert_one({"encryptedField": "123456789"}) + print('Decrypted document: %s' % (coll.find_one(),)) + unencrypted_coll = MongoClient()[db_name][coll_name] + print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + + + if __name__ == "__main__": + main() + +Server-Side Field Level Encryption Enforcement +`````````````````````````````````````````````` + +The MongoDB 4.2 server supports using schema validation to enforce encryption +of specific fields in a collection. This schema validation will prevent an +application from inserting unencrypted values for any fields marked with the +``"encrypt"`` JSON schema keyword. + +The following example shows how to setup automatic client-side field level +encryption using +:class:`~pymongo.encryption.ClientEncryption` to create a new encryption +data key and create a collection with the +`Automatic Encryption JSON Schema Syntax +`_:: + + import os + + from bson.codec_options import CodecOptions + from bson.binary import STANDARD + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, + ClientEncryption) + from pymongo.encryption_options import AutoEncryptionOpts + from pymongo.errors import OperationFailure + from pymongo.write_concern import WriteConcern + + + def main(): + # The MongoDB namespace (db.collection) used to store the + # encrypted documents in this example. + encrypted_namespace = "test.coll" + + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # The MongoClient used to access the key vault (key_vault_namespace). + key_vault_client = MongoClient() + key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name] + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + key_vault_client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. We will not be calling + # encrypt() or decrypt() in this example so we can use any + # CodecOptions. + CodecOptions()) + + # Create a new data key and json schema for the encryptedField. + data_key_id = client_encryption.create_data_key( + 'local', key_alt_names=['pymongo_encryption_example_2']) + json_schema = { + "properties": { + "encryptedField": { + "encrypt": { + "keyId": [data_key_id], + "bsonType": "string", + "algorithm": + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + } + } + }, + "bsonType": "object" + } + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + db_name, coll_name = encrypted_namespace.split(".", 1) + db = client[db_name] + # Clear old data + db.drop_collection(coll_name) + # Create the collection with the encryption JSON Schema. + db.create_collection( + coll_name, + # uuid_representation=STANDARD is required to ensure that any + # UUIDs in the $jsonSchema document are encoded to BSON Binary + # with the standard UUID subtype 4. This is only needed when + # running the "create" collection command with an encryption + # JSON Schema. + codec_options=CodecOptions(uuid_representation=STANDARD), + write_concern=WriteConcern(w="majority"), + validator={"$jsonSchema": json_schema}) + coll = client[db_name][coll_name] + + coll.insert_one({"encryptedField": "123456789"}) + print('Decrypted document: %s' % (coll.find_one(),)) + unencrypted_coll = MongoClient()[db_name][coll_name] + print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + try: + unencrypted_coll.insert_one({"encryptedField": "123456789"}) + except OperationFailure as exc: + print('Unencrypted insert failed: %s' % (exc.details,)) + + + if __name__ == "__main__": + main() + +.. _explicit-client-side-encryption: + +Explicit Encryption +~~~~~~~~~~~~~~~~~~~ + +Explicit encryption is a MongoDB community feature and does not use the +``mongocryptd`` process. Explicit encryption is provided by the +:class:`~pymongo.encryption.ClientEncryption` class, for example:: + + import os + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, + ClientEncryption) + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # The MongoClient used to read/write application data. + client = MongoClient() + coll = client.test.coll + # Clear old data + coll.drop() + + # Set up the key vault (key_vault_namespace) for this example. + key_vault = client[key_vault_db_name][key_vault_coll_name] + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + coll.codec_options) + + # Create a new data key and json schema for the encryptedField. + data_key_id = client_encryption.create_data_key( + 'local', key_alt_names=['pymongo_encryption_example_3']) + + # Explicitly encrypt a field: + encrypted_field = client_encryption.encrypt( + "123456789", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=data_key_id) + coll.insert_one({"encryptedField": encrypted_field}) + doc = coll.find_one() + print('Encrypted document: %s' % (doc,)) + + # Explicitly decrypt the field: + doc["encryptedField"] = client_encryption.decrypt(doc["encryptedField"]) + print('Decrypted document: %s' % (doc,)) + + # Cleanup resources. + client_encryption.close() + client.close() + + + if __name__ == "__main__": + main() + + +Explicit Encryption with Automatic Decryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Although automatic encryption requires MongoDB 4.2 enterprise or a +MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all users. +To configure automatic *decryption* without automatic *encryption* set +``bypass_auto_encryption=True`` in +:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: + + import os + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, + ClientEncryption) + from pymongo.encryption_options import AutoEncryptionOpts + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # bypass_auto_encryption=True disable automatic encryption but keeps + # the automatic _decryption_ behavior. bypass_auto_encryption will + # also disable spawning mongocryptd. + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, bypass_auto_encryption=True) + + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + coll = client.test.coll + # Clear old data + coll.drop() + + # Set up the key vault (key_vault_namespace) for this example. + key_vault = client[key_vault_db_name][key_vault_coll_name] + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + coll.codec_options) + + # Create a new data key and json schema for the encryptedField. + data_key_id = client_encryption.create_data_key( + 'local', key_alt_names=['pymongo_encryption_example_4']) + + # Explicitly encrypt a field: + encrypted_field = client_encryption.encrypt( + "123456789", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_alt_name='pymongo_encryption_example_4') + coll.insert_one({"encryptedField": encrypted_field}) + # Automatically decrypts any encrypted fields. + doc = coll.find_one() + print('Decrypted document: %s' % (doc,)) + unencrypted_coll = MongoClient().test.coll + print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + + # Cleanup resources. + client_encryption.close() + client.close() + + + if __name__ == "__main__": + main() diff --git a/doc/examples/index.rst b/doc/examples/index.rst index 7431acd9e1..baadd74464 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -31,3 +31,4 @@ MongoDB, you can start it like so: server_selection tailable tls + encryption diff --git a/doc/index.rst b/doc/index.rst index 051b68d1a2..3a4aa316b2 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -25,6 +25,9 @@ everything you need to know to use **PyMongo**. :doc:`examples/tls` Using PyMongo with TLS / SSL. +:doc:`examples/encryption` + Using PyMongo with client side encryption. + :doc:`faq` Some questions that come up often. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index bdbd4fc723..e782a9f130 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -12,11 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Support for explicit client side encryption. - -**Support for client side encryption is in beta. Backwards-breaking changes -may be made before the final release.** -""" +"""Support for explicit client-side field level encryption.""" import contextlib import os @@ -35,7 +31,7 @@ _HAVE_PYMONGOCRYPT = False MongoCryptCallback = object -from bson import _bson_to_dict, _dict_to_bson, decode, encode +from bson import _dict_to_bson, decode, encode from bson.codec_options import CodecOptions from bson.binary import (Binary, STANDARD, @@ -204,13 +200,13 @@ def insert_data_key(self, data_key): :Returns: The _id of the inserted data key document. """ - # insert does not return the inserted _id when given a RawBSONDocument. - doc = _bson_to_dict(data_key, _DATA_KEY_OPTS) - if not isinstance(doc.get('_id'), uuid.UUID): - raise TypeError( - 'data_key _id must be a bson.binary.Binary with subtype 4') - res = self.key_vault_coll.insert_one(doc) - return Binary(res.inserted_id.bytes, subtype=UUID_SUBTYPE) + raw_doc = RawBSONDocument(data_key) + data_key_id = raw_doc.get('_id') + if not isinstance(data_key_id, uuid.UUID): + raise TypeError('data_key _id must be a UUID') + + self.key_vault_coll.insert_one(raw_doc) + return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE) def bson_encode(self, doc): """Encode a document to BSON. @@ -338,11 +334,11 @@ class Algorithm(object): class ClientEncryption(object): - """Explicit client side encryption.""" + """Explicit client-side field level encryption.""" def __init__(self, kms_providers, key_vault_namespace, key_vault_client, codec_options): - """Explicit client side encryption. + """Explicit client-side field level encryption. The ClientEncryption class encapsulates explicit operations on a key vault collection that cannot be done directly on a MongoClient. Similar @@ -353,8 +349,7 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, creating data keys. It does not provide an API to query keys from the key vault collection, as this can be done directly on the MongoClient. - .. note:: Support for client side encryption is in beta. - Backwards-breaking changes may be made before the final release. + See :ref:`explicit-client-side-encryption` for an example. :Parameters: - `kms_providers`: Map of KMS provider options. Two KMS providers @@ -377,14 +372,17 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, containing the `key_vault_namespace` collection. - `codec_options`: An instance of :class:`~bson.codec_options.CodecOptions` to use when encoding a - value for encryption and decoding the decrypted BSON value. + value for encryption and decoding the decrypted BSON value. This + should be the same CodecOptions instance configured on the + MongoClient, Database, or Collection used to access application + data. .. versionadded:: 3.9 """ if not _HAVE_PYMONGOCRYPT: raise ConfigurationError( - "client side encryption requires the pymongocrypt library: " - "install a compatible version with: " + "client-side field level encryption requires the pymongocrypt " + "library: install a compatible version with: " "python -m pip install 'pymongo[encryption]'") if not isinstance(codec_options, CodecOptions): diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 60bb41e334..3158b3e84d 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -12,11 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Support for automatic client side encryption. - -**Support for client side encryption is in beta. Backwards-breaking changes -may be made before the final release.** -""" +"""Support for automatic client-side field level encryption.""" import copy @@ -30,7 +26,7 @@ class AutoEncryptionOpts(object): - """Options to configure automatic encryption.""" + """Options to configure automatic client-side field level encryption.""" def __init__(self, kms_providers, key_vault_namespace, key_vault_client=None, schema_map=None, @@ -39,21 +35,21 @@ def __init__(self, kms_providers, key_vault_namespace, mongocryptd_bypass_spawn=False, mongocryptd_spawn_path='mongocryptd', mongocryptd_spawn_args=None): - """Options to configure automatic encryption. + """Options to configure automatic client-side field level encryption. - Automatic encryption is an enterprise only feature that only - applies to operations on a collection. Automatic encryption is not + Automatic client-side field level encryption requires MongoDB 4.2 + enterprise or a MongoDB 4.2 Atlas cluster. Automatic encryption is not supported for operations on a database or view and will result in - error. To bypass automatic encryption (but enable automatic - decryption), set ``bypass_auto_encryption=True`` in - AutoEncryptionOpts. + error. - Explicit encryption/decryption and automatic decryption is a - community feature. A MongoClient configured with - bypassAutoEncryption=true will still automatically decrypt. + Although automatic encryption requires MongoDB 4.2 enterprise or a + MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all + users. To configure automatic *decryption* without automatic + *encryption* set ``bypass_auto_encryption=True``. Explicit + encryption and explicit decryption is also supported for all users + with the :class:`~pymongo.encryption.ClientEncryption` class. - .. note:: Support for client side encryption is in beta. - Backwards-breaking changes may be made before the final release. + See :ref:`automatic-client-side-encryption` for an example. :Parameters: - `kms_providers`: Map of KMS provider options. Two KMS providers diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 8220a5e443..08a381f539 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -481,9 +481,8 @@ def __init__( - `auto_encryption_opts`: A :class:`~pymongo.encryption_options.AutoEncryptionOpts` which configures this client to automatically encrypt collection commands - and automatically decrypt results. **Support for client side - encryption is in beta. Backwards-breaking changes may be made - before the final release.** + and automatically decrypt results. See + :ref:`automatic-client-side-encryption` for an example. .. mongodoc:: connections diff --git a/setup.py b/setup.py index 68d9c3e898..4167c3c7ad 100755 --- a/setup.py +++ b/setup.py @@ -318,7 +318,7 @@ def build_extension(self, ext): 'bson/buffer.c'])] extras_require = { - 'encryption': ['pymongocrypt'], # For client side field level encryption. + 'encryption': ['pymongocrypt<2.0.0'], 'snappy': ['python-snappy'], 'zstd': ['zstandard'], } From e31a0ef95f8cc397eb7d0b4bc83916817acc0a53 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 18 Nov 2019 14:47:28 -0800 Subject: [PATCH 0026/2111] PYTHON-1911 Implement missing changeStream prose tests --- test/__init__.py | 30 ++++++++++++++++++++ test/test_change_stream.py | 57 +++++++++++++++++++++++++++++++++----- 2 files changed, 80 insertions(+), 7 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 463a05b757..603f158ff5 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -29,6 +29,7 @@ except ImportError: HAVE_IPADDRESS = False +from contextlib import contextmanager from functools import wraps from unittest import SkipTest @@ -585,6 +586,13 @@ def require_test_commands(self, func): "Test commands must be enabled", func=func) + def require_failCommand_fail_point(self, func): + """Run a test only if the server supports the failCommand fail + point.""" + return self._require(lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func) + def require_ssl(self, func): """Run a test only if the client can connect over SSL.""" return self._require(lambda: self.ssl, @@ -656,6 +664,17 @@ def supports_getpreverror(self): """Does the connected server support getpreverror?""" return not (self.version.at_least(4, 1, 0) or self.is_mongos) + @property + def supports_failCommand_fail_point(self): + """Does the server support the failCommand fail point?""" + if self.is_mongos: + return (self.version.at_least(4, 1, 5) and + self.test_commands_enabled) + else: + return (self.version.at_least(4, 0) and + self.test_commands_enabled) + + @property def requires_hint_with_min_max_queries(self): """Does the server require a hint with min/max queries.""" @@ -713,6 +732,17 @@ def setUpClass(cls): else: cls.credentials = {} + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on.update(command_args) + self.client.admin.command(cmd_on) + try: + yield + finally: + cmd_off = {'configureFailPoint': cmd_on['configureFailPoint'], + 'mode': 'off'} + self.client.admin.command(cmd_off) # Use assertRaisesRegex if available, otherwise use Python 2.7's # deprecated assertRaisesRegexp, with a 'p'. diff --git a/test/test_change_stream.py b/test/test_change_stream.py index ced8af8ccf..97b67fb69d 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -23,6 +23,7 @@ import time import uuid +from contextlib import contextmanager from itertools import product sys.path[0:0] = [''] @@ -535,26 +536,65 @@ def test_resume_on_error(self): self.kill_change_stream_cursor(change_stream) self.insert_one_and_check(change_stream, {'_id': 2}) + # Prose test no. 4 + @client_context.require_failCommand_fail_point + def test_no_resume_attempt_if_aggregate_command_fails(self): + # Set non-retryable error on aggregate command. + fail_point = {'mode': {'times': 1}, + 'data': {'errorCode': 2, 'failCommands': ['aggregate']}} + client, listener = self._client_with_listener("aggregate", "getMore") + with self.fail_point(fail_point): + try: + _ = self.change_stream_with_client(client) + except OperationFailure: + pass + + # Driver should have attempted aggregate command only once. + self.assertEqual(len(listener.results['started']), 1) + self.assertEqual(listener.results['started'][0].command_name, + 'aggregate') + # Prose test no. 5 def test_does_not_resume_fatal_errors(self): """ChangeStream will not attempt to resume fatal server errors.""" - for code in _NON_RESUMABLE_GETMORE_ERRORS: - with self.change_stream() as change_stream: - self.watched_collection().insert_one({}) - + if client_context.supports_failCommand_fail_point: + # failCommand does not support returning no errorCode. + TEST_ERROR_CODES = _NON_RESUMABLE_GETMORE_ERRORS - {None} + @contextmanager + def generate_error(change_stream, code): + fail_point = {'mode': {'times': 1}, 'data': { + 'errorCode': code, 'failCommands': ['getMore']}} + with self.fail_point(fail_point): + yield + else: + TEST_ERROR_CODES = _NON_RESUMABLE_GETMORE_ERRORS + @contextmanager + def generate_error(change_stream, code): def mock_try_next(*args, **kwargs): change_stream._cursor.close() raise OperationFailure('Mock server error', code=code) original_try_next = change_stream._cursor._try_next change_stream._cursor._try_next = mock_try_next + try: + yield + finally: + change_stream._cursor._try_next = original_try_next - with self.assertRaises(OperationFailure): - next(change_stream) - change_stream._cursor._try_next = original_try_next + for code in TEST_ERROR_CODES: + with self.change_stream() as change_stream: + self.watched_collection().insert_one({}) + with generate_error(change_stream, code): + with self.assertRaises(OperationFailure): + next(change_stream) with self.assertRaises(StopIteration): next(change_stream) + # Prose test no. 6 - SKIPPED + # readPreference is not configurable using the watch() helpers so we can + # skip this test. Also, PyMongo performs server selection for each + # operation which ensure compliance with this prose test. + # Prose test no. 7 def test_initial_empty_batch(self): with self.change_stream() as change_stream: @@ -603,6 +643,9 @@ def test_start_at_operation_time_caching(self): "startAtOperationTime"), optime, str([k.command for k in listener.results['started']])) + # Prose test no. 10 - SKIPPED + # This test is identical to prose test no. 3. + # Prose test no. 11 @client_context.require_version_min(4, 0, 7) def test_resumetoken_empty_batch(self): From ec4b02052d790b18df832194eeb6cddc66f9047d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 21 Nov 2019 12:14:06 -0800 Subject: [PATCH 0027/2111] PYTHON-1993 Use dochub for stable CSFLE documentation links --- doc/examples/encryption.rst | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 540a6f8065..75aad30dfc 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -4,9 +4,9 @@ Client-Side Field Level Encryption New in MongoDB 4.2, client-side field level encryption allows an application to encrypt specific data fields in addition to pre-existing MongoDB encryption features such as `Encryption at Rest -`_ and +`_ and `TLS/SSL (Transport Encryption) -`_. +`_. With field level encryption, applications can encrypt fields in documents *prior* to transmitting data over the wire to the server. Client-side field @@ -14,8 +14,7 @@ level encryption supports workloads where applications must guarantee that unauthorized parties, including server administrators, cannot read the encrypted data. -.. seealso:: The MongoDB documentation for `Client-Side Field Level Encryption - `_. +.. mongodoc:: client-side-field-level-encryption Dependencies ------------ @@ -38,10 +37,10 @@ mongocryptd The ``mongocryptd`` binary is required for automatic client-side encryption and is included as a component in the `MongoDB Enterprise Server package -`_. +`_. For detailed installation instructions see `the MongoDB documentation on mongocryptd -`_. +`_. ``mongocryptd`` performs the following: @@ -53,7 +52,7 @@ For detailed installation instructions see - Rejects read/write operations that may return unexpected or incorrect results when applied to an encrypted field. For supported and unsupported operations, see `Read/Write Support with Automatic Field Level Encryption - `_. + `_. A MongoClient configured with auto encryption will automatically spawn the ``mongocryptd`` process from the application's ``PATH``. Applications can @@ -107,7 +106,7 @@ Providing Local Automatic Encryption Rules The following example shows how to specify automatic encryption rules via the ``schema_map`` option. The automatic encryption rules are expressed using a `strict subset of the JSON Schema syntax -`_. +`_. Supplying a ``schema_map`` provides more security than relying on JSON Schemas obtained from the server. It protects against a @@ -144,7 +143,7 @@ will result in an error.:: CodecOptions()) # Create a new data key and json schema for the encryptedField. - # https://docs.mongodb.com/manual/reference/security-client-side-automatic-json-schema/ + # https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules data_key_id = client_encryption.create_data_key( 'local', key_alt_names=['pymongo_encryption_example_1']) schema = { @@ -234,7 +233,7 @@ encryption using :class:`~pymongo.encryption.ClientEncryption` to create a new encryption data key and create a collection with the `Automatic Encryption JSON Schema Syntax -`_:: +`_:: import os From b8ce14dfd2f8c9eb20edbe5e420798f0d9b997f2 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 21 Nov 2019 16:24:00 -0800 Subject: [PATCH 0028/2111] PYTHON-2063 Fix faulty command construction in failpoint context manager --- test/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 603f158ff5..6f585f27eb 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -740,9 +740,8 @@ def fail_point(self, command_args): try: yield finally: - cmd_off = {'configureFailPoint': cmd_on['configureFailPoint'], - 'mode': 'off'} - self.client.admin.command(cmd_off) + self.client.admin.command( + 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') # Use assertRaisesRegex if available, otherwise use Python 2.7's # deprecated assertRaisesRegexp, with a 'p'. From 6c4e1c93716687cbe970354054c2d1ffd599f6c7 Mon Sep 17 00:00:00 2001 From: paul fisher Date: Fri, 22 Nov 2019 17:57:00 -0500 Subject: [PATCH 0029/2111] PYTHON-2061 bson: check for negative entry size in decode_file_iter (#429) Raise InvalidBSON instead of ValueError when decode_file_iter reads an invalid BSON object size. --- bson/__init__.py | 2 +- doc/contributors.rst | 1 + test/test_bson.py | 72 +++++++++++++++++++++++--------------------- 3 files changed, 39 insertions(+), 36 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 97d38b37a9..c8ac12e46e 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1166,7 +1166,7 @@ def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): elif len(size_data) != 4: raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 - elements = size_data + file_obj.read(obj_size) + elements = size_data + file_obj.read(max(0, obj_size)) yield _bson_to_dict(elements, codec_options) diff --git a/doc/contributors.rst b/doc/contributors.rst index 26717f76b8..9773b38224 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -86,3 +86,4 @@ The following is a list of people who have contributed to - Shrey Batra(shreybatra) - Felipe Rodrigues(fbidu) - Terence Honles (terencehonles) +- Paul Fisher (thetorpedodog) diff --git a/test/test_bson.py b/test/test_bson.py index 0e1b8e1b7b..dd604c7389 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -18,8 +18,10 @@ import collections import datetime +import os import re import sys +import tempfile import uuid sys.path[0:0] = [""] @@ -335,41 +337,41 @@ def test_invalid_decodes(self): self.assertRaises(InvalidBSON, list, decode_file_iter(StringIO(b"\x1B"))) - # An object size that's too small to even include the object size, - # but is correctly encoded, along with a correct EOO (and no data). - data = b"\x01\x00\x00\x00\x00" - self.assertRaises(InvalidBSON, decode_all, data) - self.assertRaises(InvalidBSON, list, decode_iter(data)) - self.assertRaises(InvalidBSON, list, decode_file_iter(StringIO(data))) - - # One object, but with object size listed smaller than it is in the - # data. - data = (b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00") - self.assertRaises(InvalidBSON, decode_all, data) - self.assertRaises(InvalidBSON, list, decode_iter(data)) - self.assertRaises(InvalidBSON, list, decode_file_iter(StringIO(data))) - - # One object, missing the EOO at the end. - data = (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00") - self.assertRaises(InvalidBSON, decode_all, data) - self.assertRaises(InvalidBSON, list, decode_iter(data)) - self.assertRaises(InvalidBSON, list, decode_file_iter(StringIO(data))) - - # One object, sized correctly, with a spot for an EOO, but the EOO - # isn't 0x00. - data = (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\xFF") - self.assertRaises(InvalidBSON, decode_all, data) - self.assertRaises(InvalidBSON, list, decode_iter(data)) - self.assertRaises(InvalidBSON, list, decode_file_iter(StringIO(data))) + bad_bsons = [ + # An object size that's too small to even include the object size, + # but is correctly encoded, along with a correct EOO (and no data). + b"\x01\x00\x00\x00\x00", + # One object, but with object size listed smaller than it is in the + # data. + (b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00"), + # One object, missing the EOO at the end. + (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00"), + # One object, sized correctly, with a spot for an EOO, but the EOO + # isn't 0x00. + (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\xFF"), + ] + for i, data in enumerate(bad_bsons): + msg = "bad_bson[{}]".format(i) + with self.assertRaises(InvalidBSON, msg=msg): + decode_all(data) + with self.assertRaises(InvalidBSON, msg=msg): + list(decode_iter(data)) + with self.assertRaises(InvalidBSON, msg=msg): + list(decode_file_iter(StringIO(data))) + with tempfile.TemporaryFile() as scratch: + scratch.write(data) + scratch.seek(0, os.SEEK_SET) + with self.assertRaises(InvalidBSON, msg=msg): + list(decode_file_iter(scratch)) def test_data_timestamp(self): self.assertEqual({"test": Timestamp(4, 20)}, From e627321c2e541848f34f496383e324366012cf55 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 21 Nov 2019 13:38:25 -0800 Subject: [PATCH 0030/2111] PYTHON-1966 Fix unicode(PyMongoError) on Python 2 --- pymongo/errors.py | 14 ++++++++++---- test/test_collection.py | 6 ++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/pymongo/errors.py b/pymongo/errors.py index 1b5bcbdb03..2d9fd05029 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -46,10 +46,16 @@ def _remove_error_label(self, label): """Remove the given label from this error.""" self._error_labels.remove(label) - def __str__(self): - if sys.version_info[0] == 2 and isinstance(self._message, unicode): - return self._message.encode('utf-8', errors='replace') - return str(self._message) + if sys.version_info[0] == 2: + def __str__(self): + if isinstance(self._message, unicode): + return self._message.encode('utf-8', errors='replace') + return str(self._message) + + def __unicode__(self): + if isinstance(self._message, unicode): + return self._message + return unicode(self._message, 'utf-8', errors='replace') class ProtocolError(PyMongoError): diff --git a/test/test_collection.py b/test/test_collection.py index f21720480c..3f275aba10 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1335,6 +1335,12 @@ def test_write_error_unicode(self): self.assertIn('E11000 duplicate key error', str(ctx.exception)) + if sys.version_info[0] == 2: + # Test unicode(error) conversion. + self.assertIn('E11000 duplicate key error', + unicode(ctx.exception)) + + def test_wtimeout(self): # Ensure setting wtimeout doesn't disable write concern altogether. # See SERVER-12596. From eda4fbb1591bd88d58d5bd3452f82ed656e95b1c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Nov 2019 18:27:01 -0800 Subject: [PATCH 0031/2111] PYTHON-2043 Spawn mongocryptd as a daemon process and silence resource warnings --- pymongo/daemon.py | 144 ++++++++++++++++++++++++++++++++++++++++++ pymongo/encryption.py | 5 +- 2 files changed, 146 insertions(+), 3 deletions(-) create mode 100644 pymongo/daemon.py diff --git a/pymongo/daemon.py b/pymongo/daemon.py new file mode 100644 index 0000000000..f066a02c23 --- /dev/null +++ b/pymongo/daemon.py @@ -0,0 +1,144 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for spawning a daemon process. + +PyMongo only attempts to spawn the mongocryptd daemon process when automatic +client-side field level encryption is enabled. See +:ref:`automatic-client-side-encryption` for more info. +""" + +import os +import subprocess +import sys +import time + +# The maximum amount of time to wait for the intermediate subprocess. +_WAIT_TIMEOUT = 10 +_THIS_FILE = os.path.realpath(__file__) + +if sys.version_info[0] < 3: + def _popen_wait(popen, timeout): + """Implement wait timeout support for Python 2.""" + from pymongo.monotonic import time as _time + deadline = _time() + timeout + # Initial delay of 1ms + delay = .0005 + while True: + returncode = popen.poll() + if returncode is not None: + return returncode + + remaining = deadline - _time() + if remaining <= 0: + # Just return None instead of raising an error. + return None + delay = min(delay * 2, remaining, .5) + time.sleep(delay) + +else: + def _popen_wait(popen, timeout): + """Implement wait timeout support for Python 3.""" + try: + return popen.wait(timeout=timeout) + except subprocess.TimeoutExpired: + # Silence TimeoutExpired errors. + return None + + +def _silence_resource_warning(popen): + """Silence Popen's ResourceWarning. + + Note this should only be used if the process was created as a daemon. + """ + # Set the returncode to avoid this warning when popen is garbage collected: + # "ResourceWarning: subprocess XXX is still running". + # See https://bugs.python.org/issue38890 and + # https://bugs.python.org/issue26741. + popen.returncode = 0 + + +if sys.platform == 'win32': + # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. + _DETACHED_PROCESS = getattr(subprocess, 'DETACHED_PROCESS', 0x00000008) + + def _spawn_daemon(args): + """Spawn a daemon process (Windows).""" + with open(os.devnull, 'r+b') as devnull: + popen = subprocess.Popen( + args, + creationflags=_DETACHED_PROCESS, + stdin=devnull, stderr=devnull, stdout=devnull) + _silence_resource_warning(popen) +else: + # On Unix we spawn the daemon process with a double Popen. + # 1) The first Popen runs this file as a Python script using the current + # interpreter. + # 2) The script then decouples itself and performs the second Popen to + # spawn the daemon process. + # 3) The original process waits up to 10 seconds for the script to exit. + # + # Note that we do not call fork() directly because we want this procedure + # to be safe to call from any thread. Using Popen instead of fork also + # avoids triggering the application's os.register_at_fork() callbacks when + # we spawn the mongocryptd daemon process. + def _spawn(args): + """Spawn the process and silence stdout/stderr.""" + with open(os.devnull, 'r+b') as devnull: + return subprocess.Popen( + args, + close_fds=True, + stdin=devnull, stderr=devnull, stdout=devnull) + + + def _spawn_daemon_double_popen(args): + """Spawn a daemon process using a double subprocess.Popen.""" + spawner_args = [sys.executable, _THIS_FILE] + spawner_args.extend(args) + temp_proc = subprocess.Popen(spawner_args, close_fds=True) + # Reap the intermediate child process to avoid creating zombie + # processes. + _popen_wait(temp_proc, _WAIT_TIMEOUT) + + + def _spawn_daemon(args): + """Spawn a daemon process (Unix).""" + # "If Python is unable to retrieve the real path to its executable, + # sys.executable will be an empty string or None". + if sys.executable: + _spawn_daemon_double_popen(args) + else: + # Fallback to spawn a non-daemon process without silencing the + # resource warning. We do not use fork here because it is not + # safe to call from a thread on all systems. + # Unfortunately, this means that: + # 1) If the parent application is killed via Ctrl-C, the + # non-daemon process will also be killed. + # 2) Each non-daemon process will hang around as a zombie process + # until the main application exits. + _spawn(args) + + + if __name__ == '__main__': + # Attempt to start a new session to decouple from the parent. + if hasattr(os, 'setsid'): + try: + os.setsid() + except OSError: + pass + + # We are performing a double fork (Popen) to spawn the process as a + # daemon so it is safe to ignore the resource warning. + _silence_resource_warning(_spawn(sys.argv[1:])) + os._exit(0) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index e782a9f130..f71cd48b6d 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -52,6 +52,7 @@ from pymongo.ssl_support import get_ssl_context from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern +from pymongo.daemon import _spawn_daemon _HTTPS_PORT = 443 @@ -146,9 +147,7 @@ def spawn(self): self._spawned = True args = [self.opts._mongocryptd_spawn_path or 'mongocryptd'] args.extend(self.opts._mongocryptd_spawn_args) - # Silence mongocryptd output, users should pass --logpath. - with open(os.devnull, 'wb') as devnull: - subprocess.Popen(args, stdout=devnull, stderr=devnull) + _spawn_daemon(args) def mark_command(self, database, cmd): """Mark a command for encryption. From 94cb6acc09da11d34888e55284c162c7192f707f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 22 Nov 2019 15:56:02 -0800 Subject: [PATCH 0032/2111] PYTHON-1966 Test with custom default encoding on Python 2 --- .evergreen/config.yml | 23 +++++++++++++++++++++++ .evergreen/run-tests.sh | 31 ++++++++++++++++++++++--------- test/test_client_context.py | 7 +++++++ 3 files changed, 52 insertions(+), 9 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 27a593aaca..2ef9191ae4 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -339,6 +339,9 @@ functions: export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi + if [ -n "${SETDEFAULTENCODING}" ]; then + export SETDEFAULTENCODING="${SETDEFAULTENCODING}" + fi PYTHON_BINARY=${PYTHON_BINARY} \ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ @@ -1097,6 +1100,16 @@ axes: variables: test_encryption: true + # Run setdefaultencoding before running the test suite? + - id: setdefaultencoding + display_name: "setdefaultencoding" + values: + - id: "setdefaultencoding" + display_name: "setdefaultencoding" + tags: ["setdefaultencoding_tag"] + variables: + SETDEFAULTENCODING: "cp1251" + buildvariants: - matrix_name: "tests-all" matrix_spec: @@ -1529,6 +1542,16 @@ buildvariants: tasks: - ".latest" +# setdefaultencoding tests on RHEL 6.2 (x86_64) with Python 2.7. +- matrix_name: "test-setdefaultencoding" + matrix_spec: + platform: rhel62 + setdefaultencoding: "*" + python-version: "2.7" + display_name: "setdefaultencoding ${python-version} ${platform}" + tasks: + - "test-latest-standalone" + - matrix_name: "test-linux-enterprise-auth" matrix_spec: platform: rhel62 diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9a27efe544..8b27e0ec84 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -2,15 +2,16 @@ set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: -# SET_XTRACE_ON Set to non-empty to write all commands first to stderr. -# AUTH Set to enable authentication. Defaults to "noauth" -# SSL Set to enable SSL. Defaults to "nossl" -# PYTHON_BINARY The Python version to use. Defaults to whatever is available -# GREEN_FRAMEWORK The green framework to test with, if any. -# C_EXTENSIONS Pass --no_ext to setup.py, or not. -# COVERAGE If non-empty, run the test suite with coverage. -# TEST_ENCRYPTION If non-empty, install pymongocrypt. -# LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# SET_XTRACE_ON Set to non-empty to write all commands first to stderr. +# AUTH Set to enable authentication. Defaults to "noauth" +# SSL Set to enable SSL. Defaults to "nossl" +# PYTHON_BINARY The Python version to use. Defaults to whatever is available +# GREEN_FRAMEWORK The green framework to test with, if any. +# C_EXTENSIONS Pass --no_ext to setup.py, or not. +# COVERAGE If non-empty, run the test suite with coverage. +# TEST_ENCRYPTION If non-empty, install pymongocrypt. +# LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# SETDEFAULTENCODING The encoding to set via sys.setdefaultencoding. if [ -n "${SET_XTRACE_ON}" ]; then set -o xtrace @@ -28,6 +29,7 @@ COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} +SETDEFAULTENCODING=${SETDEFAULTENCODING:-} if [ -n "$COMPRESSORS" ]; then export COMPRESSORS=$COMPRESSORS @@ -72,6 +74,17 @@ elif [ "$COMPRESSORS" = "zstd" ]; then trap "deactivate; rm -rf zstdtest" EXIT HUP pip install zstandard PYTHON=python +elif [ -n "$SETDEFAULTENCODING" ]; then + $PYTHON_BINARY -m virtualenv --system-site-packages --never-download encodingtest + . encodingtest/bin/activate + trap "deactivate; rm -rf encodingtest" EXIT HUP + mkdir test-sitecustomize + cat < test-sitecustomize/sitecustomize.py +import sys +sys.setdefaultencoding("$SETDEFAULTENCODING") +EOT + export PYTHONPATH="$(pwd)/test-sitecustomize" + PYTHON=python else PYTHON="$PYTHON_BINARY" fi diff --git a/test/test_client_context.py b/test/test_client_context.py index 55f9b10e10..512347daa6 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -38,6 +38,13 @@ def test_enableTestCommands_is_disabled(self): 'enableTestCommands must be disabled when ' 'PYMONGO_DISABLE_TEST_COMMANDS is set.') + def test_setdefaultencoding_worked(self): + if 'SETDEFAULTENCODING' not in os.environ: + raise SkipTest('SETDEFAULTENCODING is not set') + + self.assertEqual( + sys.getdefaultencoding(), os.environ['SETDEFAULTENCODING']) + if __name__ == "__main__": unittest.main() From 23a62433b6de244dba55095a948578530e44209e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 2 Dec 2019 12:31:27 -0800 Subject: [PATCH 0033/2111] PYTHON-1660 Clear MongoClient session pool after a fork Note that a MongoClient instance is still not fork-safe. This change avoids "Cannot start transaction X on session because a newer transaction Y has already started" errors and other incorrect command results caused by duplicate sessions in the child process. --- pymongo/client_session.py | 18 +++++++++++++++--- pymongo/topology.py | 4 ++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 245afdef57..0c0e7c436d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -95,6 +95,7 @@ """ import collections +import os import sys import uuid @@ -834,12 +835,13 @@ def _start_retryable_write(self): class _ServerSession(object): - def __init__(self): + def __init__(self, pool_id): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)} self.last_use = monotonic.time() self._transaction_id = 0 self.dirty = False + self.pool_id = pool_id def mark_dirty(self): """Mark this session as dirty. @@ -869,6 +871,14 @@ class _ServerSessionPool(collections.deque): This class is not thread-safe, access it while holding the Topology lock. """ + def __init__(self, *args, **kwargs): + super(_ServerSessionPool, self).__init__(*args, **kwargs) + self.pool_id = 0 + + def reset(self): + self.pool_id += 1 + self.clear() + def pop_all(self): ids = [] while self: @@ -889,7 +899,7 @@ def get_server_session(self, session_timeout_minutes): if not s.timed_out(session_timeout_minutes): return s - return _ServerSession() + return _ServerSession(self.pool_id) def return_server_session(self, server_session, session_timeout_minutes): self._clear_stale(session_timeout_minutes) @@ -897,7 +907,9 @@ def return_server_session(self, server_session, session_timeout_minutes): self.return_server_session_no_lock(server_session) def return_server_session_no_lock(self, server_session): - if not server_session.dirty: + # Discard sessions from an old pool to avoid duplicate sessions in the + # child process after a fork. + if server_session.pool_id == self.pool_id and not server_session.dirty: self.appendleft(server_session) def _clear_stale(self, session_timeout_minutes): diff --git a/pymongo/topology.py b/pymongo/topology.py index e205e2f60c..b00bc3306a 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -156,6 +156,10 @@ def open(self): "after forking. See PyMongo's documentation for details: " "http://api.mongodb.org/python/current/faq.html#" "is-pymongo-fork-safe") + with self._lock: + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() with self._lock: self._ensure_opened() From 895b66272eecf8518d755f71c2c72f4955e8c001 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 2 Dec 2019 16:10:27 -0800 Subject: [PATCH 0034/2111] PYTHON-2070 Migrate MongoDB 4.3+ Windows testing to Windows 2016+ MongoDB 4.4 requires a minimum of Windows 10 / Server 2016. --- .evergreen/config.yml | 39 ++++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2ef9191ae4..3e4143fd9f 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -886,6 +886,12 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz + - id: windows-vs2017 + display_name: "Windows 64 Visual Studio 2017" + run_on: windows-64-vs2017-test + batchtime: 10080 # 7 days + variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz # Test with authentication? - id: auth @@ -1419,15 +1425,21 @@ buildvariants: display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions -# Test CPython 3.4 against all versions on MongoDB >= 2.6 -# on Windows with Visual Studio 2010. +# Test CPython 3.4 against MongoDB 2.6-4.2 on Windows with Visual Studio 2010. - matrix_name: "tests-windows-vs2010-python-version" matrix_spec: platform: windows-vs2010 python-version: &win-vs2010-pythons ["win-vs2010-3.4"] auth-ssl: "*" display_name: "${platform} ${python-version} ${auth-ssl}" - tasks: *all-server-versions + tasks: + - ".4.2" + - ".4.0" + - ".3.6" + - ".3.4" + - ".3.2" + - ".3.0" + - ".2.6" # windows-vs2010 3.4 is unable to dlopen the libmongocrypt ddl built on 2016 #- matrix_name: "tests-windows-vs2010-python-version-encryption" @@ -1471,7 +1483,7 @@ buildvariants: tasks: - ".latest" -# Test CPython 2.7, 3.5 and 3.6 against all versions on MongoDB >= 2.6 +# Test CPython 2.7, 3.5 and 3.6 against MongoDB 2.6-4.2 # on Windows with the Microsoft Visual C++ Compiler for Python 2.7 or Visual Studio 2015. - matrix_name: "tests-windows-vs2015-python-version-27plus" matrix_spec: @@ -1479,7 +1491,14 @@ buildvariants: python-version: &win-vs2015-pythons ["win-vs2015-2.7", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7"] auth-ssl: "*" display_name: "${platform} ${python-version} ${auth-ssl}" - tasks: *all-server-versions + tasks: + - ".4.2" + - ".4.0" + - ".3.6" + - ".3.4" + - ".3.2" + - ".3.0" + - ".2.6" - matrix_name: "tests-windows-vs2015-python-version-encryption" matrix_spec: @@ -1490,6 +1509,16 @@ buildvariants: display_name: "Encryption ${platform} ${python-version} ${auth-ssl}" tasks: *encryption-server-versions +# Test CPython 3.7 against MongoDB >= 4.3 on Windows 2017+. +- matrix_name: "tests-windows-vs2017" + matrix_spec: + platform: windows-vs2017 + python-version: ["win-vs2015-3.7"] + auth-ssl: "*" + display_name: "${platform} ${python-version} ${auth-ssl}" + tasks: + - .latest + # Storage engine tests on RHEL 6.2 (x86_64) with Python 2.7. - matrix_name: "tests-storage-engines" matrix_spec: From 228f7165703526b1b0da94f30769c2e3b29c0961 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 27 Nov 2019 14:38:54 -0800 Subject: [PATCH 0035/2111] PYTHON-2059 Do not send readPreference with OP_MSG getMore commands --- pymongo/message.py | 5 +++-- test/test_cursor.py | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index de0a6ea9fc..1f34efa952 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -422,7 +422,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): spec = self.as_command(sock_info)[0] if sock_info.op_msg_enabled: request_id, msg, size, _ = _op_msg( - 0, spec, self.db, ReadPreference.PRIMARY, + 0, spec, self.db, None, False, False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -685,7 +685,8 @@ def _op_msg(flags, command, dbname, read_preference, slave_ok, check_keys, opts, ctx=None): """Get a OP_MSG message.""" command['$db'] = dbname - if "$readPreference" not in command: + # getMore commands do not send $readPreference. + if read_preference is not None and "$readPreference" not in command: if slave_ok and not read_preference.mode: command["$readPreference"] = ( ReadPreference.PRIMARY_PREFERRED.document) diff --git a/test/test_cursor.py b/test/test_cursor.py index 73caa9f884..ca19ebb28b 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -40,6 +40,7 @@ InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference from test import (client_context, unittest, IntegrationTest) @@ -1407,6 +1408,26 @@ def test_delete_not_initialized(self): cursor = Cursor.__new__(Cursor) # Skip calling __init__ cursor.__del__() # no error + @client_context.require_version_min(3, 6) + def test_getMore_does_not_send_readPreference(self): + listener = WhiteListEventListener('find', 'getMore') + client = rs_or_single_client( + event_listeners=[listener]) + self.addCleanup(client.close) + coll = client[self.db.name].test + + coll.delete_many({}) + coll.insert_many([{} for _ in range(5)]) + self.addCleanup(coll.drop) + + list(coll.find(batch_size=3)) + started = listener.results['started'] + self.assertEqual(2, len(started)) + self.assertEqual('find', started[0].command_name) + self.assertIn('$readPreference', started[0].command) + self.assertEqual('getMore', started[1].command_name) + self.assertNotIn('$readPreference', started[1].command) + class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): From ed4204f237b6c781e94dbe18454681b2e28a804f Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 25 Nov 2019 16:45:53 -0800 Subject: [PATCH 0036/2111] PYTHON-1954 Stop holding the topology lock while creating new connections --- pymongo/pool.py | 14 ++++++- pymongo/topology.py | 6 ++- test/test_command_monitoring_spec.py | 3 +- test/test_discovery_and_monitoring.py | 33 +--------------- test/test_heartbeat_monitoring.py | 44 +-------------------- test/test_topology.py | 42 +------------------- test/utils.py | 56 +++++++++++++++++++++++++-- test/utils_selection_tests.py | 41 +------------------- test/utils_spec_runner.py | 3 +- 9 files changed, 78 insertions(+), 164 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 1ccaf700eb..f114c40e4d 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1033,14 +1033,19 @@ def reset(self): def close(self): self._reset(close=True) - def remove_stale_sockets(self): - """Removes stale sockets then adds new ones if pool is too small.""" + def remove_stale_sockets(self, reference_pool_id): + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_pool_id` argument specifies the + `pool_id` at the point in time this operation was requested on the + pool. + """ if self.opts.max_idle_time_seconds is not None: with self.lock: while (self.sockets and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): sock_info = self.sockets.pop() sock_info.close_socket(ConnectionClosedReason.IDLE) + while True: with self.lock: if (len(self.sockets) + self.active_sockets >= @@ -1054,6 +1059,11 @@ def remove_stale_sockets(self): try: sock_info = self.connect() with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.pool_id != reference_pool_id: + sock_info.close_socket() + break self.sockets.appendleft(sock_info) finally: self._socket_semaphore.release() diff --git a/pymongo/topology.py b/pymongo/topology.py index b00bc3306a..a3cfe1e79e 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -429,9 +429,13 @@ def mark_server_unknown_and_request_check(self, address): def update_pool(self): # Remove any stale sockets and add new sockets if pool is too small. + servers = [] with self._lock: for server in self._servers.values(): - server._pool.remove_stale_sockets() + servers.append((server, server._pool.pool_id)) + + for server, pool_id in servers: + server._pool.remove_stale_sockets(pool_id) def close(self): """Clear pools and terminate monitors. Topology reopens on demand.""" diff --git a/test/test_command_monitoring_spec.py b/test/test_command_monitoring_spec.py index 2579889117..3d41d4b487 100644 --- a/test/test_command_monitoring_spec.py +++ b/test/test_command_monitoring_spec.py @@ -26,8 +26,7 @@ from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern from test import unittest, client_context -from test.utils import single_client, wait_until, EventListener -from test.utils_selection_tests import parse_read_preference +from test.utils import single_client, wait_until, EventListener, parse_read_preference # Location of JSON test specifications. _TEST_PATH = os.path.join( diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index fe9ae58771..51340fce64 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -30,6 +30,7 @@ from pymongo.settings import TopologySettings from pymongo.uri_parser import parse_uri from test import unittest +from test.utils import MockPool # Location of JSON test specifications. @@ -37,36 +38,6 @@ os.path.dirname(os.path.realpath(__file__)), 'discovery_and_monitoring') -class MockSocketInfo(object): - def close(self): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class MockPool(object): - def __init__(self, *args, **kwargs): - self.pool_id = 0 - self._lock = threading.Lock() - - def _reset(self): - with self._lock: - self.pool_id += 1 - - def reset(self): - self._reset() - - def close(self): - self._reset() - - def update_is_writable(self, is_writable): - pass - - class MockMonitor(object): def __init__(self, server_description, topology, pool, topology_settings): self._server_description = server_description @@ -81,7 +52,7 @@ def request_check(self): def close(self): pass - def remove_stale_sockets(self): + def remove_stale_sockets(self, reference_pool_id): pass diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index b0fdd2a197..61a0afc15c 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -22,49 +22,9 @@ from pymongo.errors import ConnectionFailure from pymongo.ismaster import IsMaster from pymongo.monitor import Monitor -from pymongo.pool import PoolOptions from test import unittest, client_knobs -from test.utils import HeartbeatEventListener, single_client, wait_until - - -class MockSocketInfo(object): - def close(self): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class MockPool(object): - def __init__(self, *args, **kwargs): - self.pool_id = 0 - self._lock = threading.Lock() - self.opts = PoolOptions() - - def get_socket(self, all_credentials): - return MockSocketInfo() - - def return_socket(self, *args, **kwargs): - pass - - def _reset(self): - with self._lock: - self.pool_id += 1 - - def reset(self): - self._reset() - - def close(self): - self._reset() - - def update_is_writable(self, is_writable): - pass - - def remove_stale_sockets(self): - pass +from test.utils import (HeartbeatEventListener, MockPool, single_client, + wait_until) class TestHeartbeatMonitoring(unittest.TestCase): diff --git a/test/test_topology.py b/test/test_topology.py index 182b5e19e7..31c7b0ce1c 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -37,47 +37,7 @@ writable_server_selector) from pymongo.settings import TopologySettings from test import client_knobs, unittest -from test.utils import wait_until - - -class MockSocketInfo(object): - def close(self): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class MockPool(object): - def __init__(self, *args, **kwargs): - self.pool_id = 0 - self._lock = threading.Lock() - self.opts = PoolOptions() - - def get_socket(self, all_credentials): - return MockSocketInfo() - - def return_socket(self, *args, **kwargs): - pass - - def _reset(self): - with self._lock: - self.pool_id += 1 - - def reset(self): - self._reset() - - def close(self): - self._reset() - - def update_is_writable(self, is_writable): - pass - - def remove_stale_sockets(self): - pass +from test.utils import MockPool, wait_until class MockMonitor(object): diff --git a/test/utils.py b/test/utils.py index d26b961380..81c57b5aab 100644 --- a/test/utils.py +++ b/test/utils.py @@ -32,9 +32,10 @@ from bson.objectid import ObjectId from pymongo import (MongoClient, - monitoring) + monitoring, read_preferences) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener +from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, @@ -44,8 +45,6 @@ from test import (client_context, db_user, db_pwd) -from test.utils_selection_tests import parse_read_preference - IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000) @@ -185,6 +184,46 @@ def failed(self, event): self.results.append(event) +class MockSocketInfo(object): + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +class MockPool(object): + def __init__(self, *args, **kwargs): + self.pool_id = 0 + self._lock = threading.Lock() + self.opts = PoolOptions() + + def get_socket(self, all_credentials): + return MockSocketInfo() + + def return_socket(self, *args, **kwargs): + pass + + def _reset(self): + with self._lock: + self.pool_id += 1 + + def reset(self): + self._reset() + + def close(self): + self._reset() + + def update_is_writable(self, is_writable): + pass + + def remove_stale_sockets(self, reference_pool_id): + pass + + class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" def __init__(self, data): @@ -811,3 +850,14 @@ def run(self): except BaseException as exc: self.exc = exc raise + + +def parse_read_preference(pref): + # Make first letter lowercase to match read_pref's modes. + mode_string = pref.get('mode', 'primary') + mode_string = mode_string[:1].lower() + mode_string[1:] + mode = read_preferences.read_pref_mode_from_name(mode_string) + max_staleness = pref.get('maxStalenessSeconds', -1) + tag_sets = pref.get('tag_sets') + return read_preferences.make_read_preference( + mode, tag_sets=tag_sets, max_staleness=max_staleness) \ No newline at end of file diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 950defcf01..ad17166807 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -21,7 +21,6 @@ sys.path[0:0] = [""] from bson import json_util -from pymongo import read_preferences from pymongo.common import clean_node, HEARTBEAT_FREQUENCY from pymongo.errors import AutoReconnect, ConfigurationError from pymongo.ismaster import IsMaster @@ -30,34 +29,7 @@ from pymongo.server_selectors import writable_server_selector from pymongo.topology import Topology from test import unittest - - -class MockSocketInfo(object): - def close(self): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class MockPool(object): - def __init__(self, *args, **kwargs): - pass - - def reset(self): - pass - - def close(self): - pass - - def update_is_writable(self, is_writable): - pass - - def remove_stale_sockets(self): - pass +from test.utils import MockPool, parse_read_preference class MockMonitor(object): @@ -288,14 +260,3 @@ class TestAllScenarios(unittest.TestCase): setattr(TestAllScenarios, new_test.__name__, new_test) return TestAllScenarios - - -def parse_read_preference(pref): - # Make first letter lowercase to match read_pref's modes. - mode_string = pref.get('mode', 'primary') - mode_string = mode_string[:1].lower() + mode_string[1:] - mode = read_preferences.read_pref_mode_from_name(mode_string) - max_staleness = pref.get('maxStalenessSeconds', -1) - tag_sets = pref.get('tag_sets') - return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index c258d35489..a200b41f99 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -46,8 +46,7 @@ camel_to_upper_camel, CompareType, OvertCommandListener, - rs_client) -from test.utils_selection_tests import parse_read_preference + rs_client, parse_read_preference) class SpecRunner(IntegrationTest): From 04a51ed57aa273dcf784b71ffbbe34dd8ab8f683 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 4 Dec 2019 21:20:33 +0530 Subject: [PATCH 0037/2111] PYTHON-2074 Correctly mock the MonitorBase API --- test/test_discovery_and_monitoring.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 51340fce64..05bef0de29 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -46,13 +46,13 @@ def __init__(self, server_description, topology, pool, topology_settings): def open(self): pass - def request_check(self): + def close(self): pass - def close(self): + def join(self): pass - def remove_stale_sockets(self, reference_pool_id): + def request_check(self): pass From a7c37387852c5d754a42df794d1f48acec0c0212 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 Dec 2019 13:58:46 -0800 Subject: [PATCH 0038/2111] Update changelog for 3.10 release --- doc/changelog.rst | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 77b2f22da8..b80dad9e61 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,33 @@ Changelog ========= +Changes in Version 3.10.0 +------------------------- + +Version 3.10 includes a number of improvements and bug fixes. Highlights +include: + +- Support for Client-Side Field Level Encryption with MongoDB 4.2. See + :doc:`examples/encryption` for examples. +- Support for Python 3.8. +- Added :attr:`pymongo.client_session.ClientSession.in_transaction`. +- Do not hold the Topology lock while creating connections in a MongoClient's + background thread. This change fixes a bug where application operations would + block while the background thread ensures that all server pools have + minPoolSize connections. +- Fix a UnicodeDecodeError bug when coercing a PyMongoError with a non-ascii + error message to unicode on Python 2. +- Fix an edge case bug where PyMongo could exceed the server's + maxMessageSizeBytes when generating a compressed bulk write command. + +Issues Resolved +............... + +See the `PyMongo 3.10 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=23944 + Changes in Version 3.9.0 ------------------------ @@ -11,7 +38,7 @@ Version 3.9 adds support for MongoDB 4.2. Highlights include: - New method :meth:`pymongo.client_session.ClientSession.with_transaction` to support conveniently running a transaction in a session with automatic retries and at-most-once semantics. -- Initial support for client side field level encyption. See the docstring for +- Initial support for client side field level encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, :class:`~pymongo.encryption_options.AutoEncryptionOpts`, and :mod:`~pymongo.encryption` for details. **Note: Support for client side From 1d71968c76c1c029c4296f7315d4fb17e006048e Mon Sep 17 00:00:00 2001 From: Mark Benvenuto Date: Tue, 10 Dec 2019 13:05:51 -0500 Subject: [PATCH 0039/2111] PYTHON-2064 Collect crash dumps from mongo-orchestration (#431) --- .evergreen/config.yml | 46 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3e4143fd9f..4220e5d736 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -165,6 +165,24 @@ functions: set -o xtrace ${PREPARE_SHELL} find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz + - command: archive.targz_pack + params: + target: "mongo-coredumps.tgz" + source_dir: "./" + include: + - "./**.core" + - "./**.mdmp" # Windows: minidumps + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: mongo-coredumps.tgz + remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Core Dumps - Execution + optional: true - command: s3.put params: aws_key: ${aws_key} @@ -236,6 +254,34 @@ functions: params: script: | set -o xtrace + + # Enable core dumps if enabled on the machine + # Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml + if [ -f /proc/self/coredump_filter ]; then + # Set the shell process (and its children processes) to dump ELF headers (bit 4), + # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). + echo 0x13 > /proc/self/coredump_filter + + if [ -f /sbin/sysctl ]; then + # Check that the core pattern is set explicitly on our distro image instead + # of being the OS's default value. This ensures that coredump names are consistent + # across distros and can be picked up by Evergreen. + core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") + if [ "$core_pattern" = "dump_%e.%p.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi + fi + fi + + if [ $(uname -s) == "Darwin" ]; then + core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") + if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi + fi + ${PREPARE_SHELL} MONGODB_VERSION=${VERSION} \ TOPOLOGY=${TOPOLOGY} \ From 2e36161e38c65b1c01588da928b46f393395cea8 Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Wed, 11 Dec 2019 05:08:54 +1100 Subject: [PATCH 0040/2111] Fix simple typo: overidden -> overridden (#432) --- doc/examples/authentication.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 4a0b352554..dabf06957a 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -32,7 +32,7 @@ SCRAM-SHA-256 (RFC 7677) SCRAM-SHA-256 is the default authentication mechanism supported by a cluster configured for authentication with MongoDB 4.0 or later. Authentication requires a username, a password, and a database name. The default database -name is "admin", this can be overidden with the ``authSource`` option. +name is "admin", this can be overridden with the ``authSource`` option. Credentials can be specified as arguments to :class:`~pymongo.mongo_client.MongoClient`:: @@ -55,7 +55,7 @@ SCRAM-SHA-1 (RFC 5802) SCRAM-SHA-1 is the default authentication mechanism supported by a cluster configured for authentication with MongoDB 3.0 or later. Authentication requires a username, a password, and a database name. The default database -name is "admin", this can be overidden with the ``authSource`` option. +name is "admin", this can be overridden with the ``authSource`` option. Credentials can be specified as arguments to :class:`~pymongo.mongo_client.MongoClient`:: From 25a9b98cb31fad17ceb338161e5555793e6b3641 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 Dec 2019 10:51:31 -0800 Subject: [PATCH 0041/2111] BUMP 3.10.0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index d7298ad028..f8545b1127 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -64,7 +64,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 10, 0, '.dev0') +version_tuple = (3, 10, 0) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 4167c3c7ad..612a36eca5 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.10.0.dev0" +version = "3.10.0" f = open("README.rst") try: From 4c18d09eff634d06d33f6ceae55c9613e62102ef Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 Dec 2019 10:52:38 -0800 Subject: [PATCH 0042/2111] BUMP 3.11.0.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index f8545b1127..0fa71c9f5b 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -64,7 +64,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 10, 0) +version_tuple = (3, 11, 0, '.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 612a36eca5..659fcb1ea4 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.10.0" +version = "3.11.0.dev0" f = open("README.rst") try: From eed8e29bbb46df0f246b9dbb8d2259b963bb230d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 18 Dec 2019 15:24:02 -0800 Subject: [PATCH 0043/2111] PYTHON-2089 Call close_socket with a reason Add tests for PYTHON-1954. --- pymongo/pool.py | 2 +- test/test_client.py | 71 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index f114c40e4d..6407d53ab9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1062,7 +1062,7 @@ def remove_stale_sockets(self, reference_pool_id): # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. if self.pool_id != reference_pool_id: - sock_info.close_socket() + sock_info.close_socket(ConnectionClosedReason.STALE) break self.sockets.appendleft(sock_info) finally: diff --git a/test/test_client.py b/test/test_client.py index 775402f597..05e94b3d9b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -24,6 +24,7 @@ import struct import sys import time +import threading import warnings sys.path[0:0] = [""] @@ -51,6 +52,7 @@ from pymongo.monitoring import (ServerHeartbeatListener, ServerHeartbeatStartedEvent) from pymongo.mongo_client import MongoClient +from pymongo.monotonic import time as monotonic_time from pymongo.driver_info import DriverInfo from pymongo.pool import SocketInfo, _METADATA from pymongo.read_preferences import ReadPreference @@ -1455,6 +1457,75 @@ def compression_settings(client): # No error client.pymongo_test.test.find_one() + def test_reset_during_update_pool(self): + client = rs_or_single_client(minPoolSize=10) + self.addCleanup(client.close) + client.admin.command('ping') + pool = get_pool(client) + pool_id = pool.pool_id + + # Continuously reset the pool. + class ResetPoolThread(threading.Thread): + def __init__(self, pool): + super(ResetPoolThread, self).__init__() + self.running = True + self.pool = pool + + def stop(self): + self.running = False + + def run(self): + while self.running: + self.pool.reset() + time.sleep(0.001) + + t = ResetPoolThread(pool) + t.start() + + # Ensure that update_pool completes without error even when the pool + # is reset concurrently. + try: + while True: + for _ in range(10): + client._topology.update_pool() + if pool_id != pool.pool_id: + break + finally: + t.stop() + t.join() + client.admin.command('ping') + + def test_background_connections_do_not_hold_locks(self): + min_pool_size = 10 + client = rs_or_single_client( + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, + connect=False) + self.addCleanup(client.close) + + # Create a single connection in the pool. + client.admin.command('ping') + + # Cause new connections stall for a few seconds. + pool = get_pool(client) + original_connect = pool.connect + + def stall_connect(*args, **kwargs): + time.sleep(2) + return original_connect(*args, **kwargs) + + pool.connect = stall_connect + + # Wait for the background thread to start creating connections + wait_until(lambda: len(pool.sockets) > 1, 'start creating connections') + + # Assert that application operations do not block. + for _ in range(10): + start = monotonic_time() + client.admin.command('ping') + total = monotonic_time() - start + # Each ping command should not take more than 2 seconds + self.assertLess(total, 2) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From fc26881f01354be04c5181fb50225945cb2b4839 Mon Sep 17 00:00:00 2001 From: Kevin Albertson Date: Mon, 23 Dec 2019 16:38:33 -0500 Subject: [PATCH 0044/2111] Fix typo in client side encryption examples (#435) --- doc/examples/encryption.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 75aad30dfc..2f8e2c7a93 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -387,7 +387,7 @@ Explicit encryption is a MongoDB community feature and does not use the # on MongoClient, Database, or Collection. coll.codec_options) - # Create a new data key and json schema for the encryptedField. + # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( 'local', key_alt_names=['pymongo_encryption_example_3']) @@ -472,7 +472,7 @@ To configure automatic *decryption* without automatic *encryption* set # on MongoClient, Database, or Collection. coll.codec_options) - # Create a new data key and json schema for the encryptedField. + # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( 'local', key_alt_names=['pymongo_encryption_example_4']) From 5c02f8bec401e7376d6f16287b671c8089fbf44c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 12 Dec 2019 15:01:08 -0800 Subject: [PATCH 0045/2111] PYTHON-2072 Fix tests now that "counts" is not reported in map_reduce --- test/test_collection.py | 10 +++++++-- test/test_custom_types.py | 5 ++++- test/test_read_concern.py | 46 +++++++++++++++------------------------ test/test_session.py | 19 +++++++++------- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/test/test_collection.py b/test/test_collection.py index 3f275aba10..acd2c2c1a1 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2079,6 +2079,8 @@ def test_map_reduce(self): self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) + # Create the output database. + db.client.mrtestdb.mrunittests.insert_one({}) result = db.test.map_reduce(map, reduce, out=SON([('replace', 'mrunittests'), ('db', 'mrtestdb') @@ -2090,7 +2092,9 @@ def test_map_reduce(self): full_result = db.test.map_reduce(map, reduce, out='mrunittests', full_response=True) - self.assertEqual(6, full_result["counts"]["emit"]) + self.assertEqual('mrunittests', full_result["result"]) + if client_context.version < (4, 3): + self.assertEqual(6, full_result["counts"]["emit"]) result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2) self.assertEqual(2, result.find_one({"_id": "cat"})["value"]) @@ -2111,7 +2115,9 @@ def test_map_reduce(self): full_result = db.test.inline_map_reduce(map, reduce, full_response=True) - self.assertEqual(6, full_result["counts"]["emit"]) + self.assertEqual(3, len(full_result["results"])) + if client_context.version < (4, 3): + self.assertEqual(6, full_result["counts"]["emit"]) with self.write_concern_collection() as coll: coll.map_reduce(map, reduce, 'output') diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 3d937082fd..ba0bb0ca69 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -663,7 +663,10 @@ def test_map_reduce_w_custom_type(self): full_result = test.inline_map_reduce(map, reduce, full_response=True) - self.assertEqual(3, full_result["counts"]["emit"]) + result = full_result['results'] + self.assertTrue(isinstance(result, list)) + self.assertEqual(1, len(result)) + self.assertEqual(result[0]["_id"], 'ABCD') def test_find_one_and__w_custom_type_decoder(self): db = self.db diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 4d7fcaf949..abd69309a9 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -30,9 +30,13 @@ def setUpClass(cls): cls.listener = OvertCommandListener() cls.client = single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test + client_context.client.pymongo_test.create_collection('coll') + + @classmethod + def tearDownClass(cls): + client_context.client.pymongo_test.drop_collection('coll') def tearDown(self): - self.db.coll.drop() self.listener.results.clear() def test_read_concern(self): @@ -104,12 +108,8 @@ def test_command_cursor(self): def test_aggregate_out(self): coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - try: - tuple(coll.aggregate([{'$match': {'field': 'value'}}, - {'$out': 'output_collection'}])) - except OperationFailure: - # "ns doesn't exist" - pass + tuple(coll.aggregate([{'$match': {'field': 'value'}}, + {'$out': 'output_collection'}])) # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): @@ -121,26 +121,18 @@ def test_aggregate_out(self): def test_map_reduce_out(self): coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - try: - tuple(coll.map_reduce('function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }', - out='output_collection')) - except OperationFailure: - # "ns doesn't exist" - pass + coll.map_reduce('function() { emit(this._id, this.value); }', + 'function(key, values) { return 42; }', + out='output_collection') self.assertNotIn('readConcern', self.listener.results['started'][0].command) if client_context.version.at_least(3, 1, 9, -1): self.listener.results.clear() - try: - tuple(coll.map_reduce( - 'function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }', - out={'inline': 1})) - except OperationFailure: - # "ns doesn't exist" - pass + coll.map_reduce( + 'function() { emit(this._id, this.value); }', + 'function(key, values) { return 42; }', + out={'inline': 1}) self.assertEqual( {'level': 'local'}, self.listener.results['started'][0].command['readConcern']) @@ -148,13 +140,9 @@ def test_map_reduce_out(self): @client_context.require_version_min(3, 1, 9, -1) def test_inline_map_reduce(self): coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - try: - tuple(coll.inline_map_reduce( - 'function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }')) - except OperationFailure: - # "ns doesn't exist" - pass + tuple(coll.inline_map_reduce( + 'function() { emit(this._id, this.value); }', + 'function(key, values) { return 42; }')) self.assertEqual( {'level': 'local'}, self.listener.results['started'][0].command['readConcern']) diff --git a/test/test_session.py b/test/test_session.py index 461a26a03e..02352022ee 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -857,14 +857,17 @@ def test_reads(self): map_reduce_exc = None if client_context.version.at_least(4, 1, 12): map_reduce_exc = OperationFailure - self._test_reads( - lambda coll, session: coll.map_reduce( - 'function() {}', 'function() {}', 'inline', session=session), - exception=map_reduce_exc) - self._test_reads( - lambda coll, session: coll.inline_map_reduce( - 'function() {}', 'function() {}', session=session), - exception=map_reduce_exc) + # SERVER-44635 The mapReduce in aggregation project added back + # support for casually consistent mapReduce. + if client_context.version < (4, 3): + self._test_reads( + lambda coll, session: coll.map_reduce( + 'function() {}', 'function() {}', 'inline', session=session), + exception=map_reduce_exc) + self._test_reads( + lambda coll, session: coll.inline_map_reduce( + 'function() {}', 'function() {}', session=session), + exception=map_reduce_exc) if (not client_context.is_mongos and not client_context.version.at_least(4, 1, 0)): def scan(coll, session): From 30667d1e005cee54de78d2fba1b15bc1e4fa813a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Jan 2020 16:30:26 -0800 Subject: [PATCH 0046/2111] PYTHON-2092 Avoid creating new connections during MongoClient.close --- doc/changelog.rst | 19 +++++++++++++++++++ pymongo/mongo_client.py | 18 +++++++++++------- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index b80dad9e61..f8ed9f702f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,25 @@ Changelog ========= +Changes in Version 3.10.1 +------------------------- + +Version 3.10.1 fixes the following issues discovered since the release of +3.10.0: + +- Fix a TypeError logged to stderr that could be triggered during server + maintenance or during :meth:`pymongo.mongo_client.MongoClient.close`. +- Avoid creating new connections during + :meth:`pymongo.mongo_client.MongoClient.close`. + +Issues Resolved +............... + +See the `PyMongo 3.10.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=25039 + Changes in Version 3.10.0 ------------------------- diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 08a381f539..edee1afb68 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1165,10 +1165,10 @@ def close(self): session_ids = self._topology.pop_all_sessions() if session_ids: self._end_sessions(session_ids) - # Stop the periodic task thread and then run _process_periodic_tasks - # to send pending killCursor requests before closing the topology. + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. self._kill_cursors_executor.close() - self._process_periodic_tasks() + self._process_kill_cursors() self._topology.close() if self._encrypter: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. @@ -1717,10 +1717,8 @@ def _kill_cursors(self, cursor_ids, address, topology, session): duration, reply, 'killCursors', request_id, tuple(address)) - # This method is run periodically by a background thread. - def _process_periodic_tasks(self): - """Process any pending kill cursors requests and - maintain connection pool parameters.""" + def _process_kill_cursors(self): + """Process any pending kill cursors requests.""" address_to_cursor_ids = defaultdict(list) # Other threads or the GC may append to the queue concurrently. @@ -1741,6 +1739,12 @@ def _process_periodic_tasks(self): cursor_ids, address, topology, session=None) except Exception: helpers._handle_exception() + + # This method is run periodically by a background thread. + def _process_periodic_tasks(self): + """Process any pending kill cursors requests and + maintain connection pool parameters.""" + self._process_kill_cursors() try: self._topology.update_pool() except Exception: From d7d94b2776098dba32686ddf3ada1f201172daaf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jan 2020 16:36:17 -0800 Subject: [PATCH 0047/2111] BUMP 3.10.1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 0fa71c9f5b..92ae333175 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -64,7 +64,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, '.dev0') +version_tuple = (3, 10, 1) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 659fcb1ea4..7c0110fae8 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0.dev0" +version = "3.10.1" f = open("README.rst") try: From 2d836ecb519635913dab0117c08533ed66d7629d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jan 2020 16:39:06 -0800 Subject: [PATCH 0048/2111] BUMP 3.11.0.dev1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 92ae333175..a54ba8bac3 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -64,7 +64,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 10, 1) +version_tuple = (3, 11, 0, '.dev1') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 7c0110fae8..39204a4690 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.10.1" +version = "3.11.0.dev1" f = open("README.rst") try: From 6c27e3e9d79f544db032f71a7c0c74d8b8ae2891 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 15 Jan 2020 13:34:06 -0800 Subject: [PATCH 0049/2111] PYTHON-2094 Migrate macOS evergreen builds to macOS-1014 --- .evergreen/config.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4220e5d736..3729d3d56e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -861,9 +861,9 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/debian92/master/latest/libmongocrypt.tar.gz - - id: macos-1012 - display_name: "macOS 10.12" - run_on: macos-1012 + - id: macos-1014 + display_name: "macOS 10.14" + run_on: macos-1014 variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 @@ -1267,12 +1267,12 @@ buildvariants: platform: # MacOS introduced SSL support with MongoDB >= 3.2. # Older server versions (2.6, 3.0) are supported without SSL. - - macos-1012 + - macos-1014 auth: "*" ssl: "*" exclude_spec: # No point testing with SSL without auth. - - platform: macos-1012 + - platform: macos-1014 auth: "noauth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" @@ -1285,7 +1285,7 @@ buildvariants: - ".3.2" rules: - if: - platform: macos-1012 + platform: macos-1014 auth: "*" ssl: "nossl" then: @@ -1296,7 +1296,7 @@ buildvariants: - matrix_name: "test-macos-encryption" matrix_spec: platform: - - macos-1012 + - macos-1014 auth: "auth" ssl: "nossl" encryption: "*" From d5b0790da18030e20a69e5ded520ab2690364ea3 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 23 Jan 2020 18:00:03 -0800 Subject: [PATCH 0050/2111] PYTHON-2101 Fix SSL failures on MacOS-10.14 --- test/test_ssl.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/test_ssl.py b/test/test_ssl.py index c5531f9eb6..909d38f28c 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -428,6 +428,12 @@ def test_validation_with_system_ca_certs(self): if sys.version_info < (2, 7, 9): raise SkipTest("Can't load system CA certificates.") + if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and + sys.platform == 'darwin'): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable.") + # Tell OpenSSL where CA certificates live. os.environ['SSL_CERT_FILE'] = CA_PEM try: From e08ad6e8f7ffdc5d492674cb0a84240b2bbeb252 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Dec 2019 10:50:15 -0800 Subject: [PATCH 0051/2111] PYTHON-2068 Wait for w=0 operations to complete when dropping test database --- test/test_session.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/test/test_session.py b/test/test_session.py index 02352022ee..3c90fa5fa1 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -32,7 +32,8 @@ from test.utils import (ignore_deprecations, rs_or_single_client, EventListener, - TestCreator) + TestCreator, + wait_until) from test.utils_spec_runner import SpecRunner # Ignore auth commands like saslStart, so we can assert lsid is in all commands. @@ -761,6 +762,19 @@ def test_unacknowledged_writes(self): ops.extend(self.collection_write_ops(coll)) self._test_unacknowledged_ops(client, *ops) + def drop_db(): + try: + self.client.drop_database(db.name) + return True + except OperationFailure as exc: + # Try again on BackgroundOperationInProgressForDatabase and + # BackgroundOperationInProgressForNamespace. + if exc.code in (12586, 12587): + return False + raise + + wait_until(drop_db, 'dropped database after w=0 writes') + class TestCausalConsistency(unittest.TestCase): From 5df94d08f025b4c82c4ba318bd6b9304b5185fcd Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 1 Nov 2019 20:05:26 -0700 Subject: [PATCH 0052/2111] PYTHON-2040 PyOpenSSL support --- .evergreen/config.yml | 49 ++++++ .evergreen/run-atlas-tests.sh | 23 ++- .evergreen/run-pyopenssl-tests.sh | 27 +++ pymongo/errors.py | 10 +- pymongo/network.py | 72 +------- pymongo/pool.py | 37 ++-- pymongo/pyopenssl_context.py | 269 ++++++++++++++++++++++++++++++ pymongo/socket_checker.py | 91 ++++++++++ pymongo/ssl_context.py | 168 +++++++++++-------- pymongo/ssl_match_hostname.py | 5 +- pymongo/ssl_support.py | 107 +++++------- setup.py | 21 ++- test/atlas/test_connection.py | 4 +- test/test_pooling.py | 2 +- test/test_ssl.py | 17 +- test/test_uri_parser.py | 16 +- 16 files changed, 664 insertions(+), 254 deletions(-) create mode 100644 .evergreen/run-pyopenssl-tests.sh create mode 100644 pymongo/pyopenssl_context.py create mode 100644 pymongo/socket_checker.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3729d3d56e..0d79109080 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -338,6 +338,16 @@ functions: ${PREPARE_SHELL} PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-cdecimal-tests.sh + "run pyopenssl tests": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + set -o xtrace + ${PREPARE_SHELL} + PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-pyopenssl-tests.sh + "run doctests": - command: shell.exec type: test @@ -796,6 +806,17 @@ tasks: TOPOLOGY: "server" - func: "run cdecimal tests" + # Use latest for this, since we're + # adding this to support OCSP stapling + - name: "pyopenssl" + tags: ["pyopenssl"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + - func: "run pyopenssl tests" + - name: "no-server" tags: ["no-server"] commands: @@ -1373,6 +1394,27 @@ buildvariants: display_name: "${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions +- matrix_name: "tests-pyopenssl" + matrix_spec: + platform: ubuntu-16.04 + python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "pypy", "pypy3.5"] + auth: "*" + ssl: "ssl" + display_name: "PyOpenSSL ${python-version} ${platform} ${auth}" + tasks: + - "pyopenssl" + +- matrix_name: "test-pyopenssl-old-py27" + matrix_spec: + platform: + # Supported OSes with pre-2.7.9 CPython versions. + - rhel70 # CPython 2.7.5 + auth: "*" + ssl: "ssl" + display_name: "PyOpenSSL Pre-2.7.9 Python ${platform} ${auth}" + tasks: + - "pyopenssl" + - matrix_name: "tests-python-version-rhel62-test-encryption" matrix_spec: platform: rhel62 @@ -1706,6 +1748,13 @@ buildvariants: expansions: set_xtrace_on: on +- matrix_name: "atlas-connect-pre-279" + matrix_spec: + platform: rhel70 + display_name: "Atlas connect Pre-2.7.9 Python ${platform}" + tasks: + - name: "atlas-connect" + - matrix_name: "atlas-connect" matrix_spec: platform: rhel62 diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index c06d3b45b5..af861ce350 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -5,15 +5,36 @@ set -o errexit export JAVA_HOME=/opt/java/jdk8 +if [ -z "$PYTHON_BINARY" ]; then + echo "No python binary specified" + PYTHON_BINARY=$(command -v python || command -v python3) || true + if [ -z "$PYTHON_BINARY" ]; then + echo "Cannot test without python or python3 installed!" + exit 1 + fi +fi + IMPL=$(${PYTHON_BINARY} -c "import platform, sys; sys.stdout.write(platform.python_implementation())") if [ $IMPL = "Jython" -o $IMPL = "PyPy" ]; then + echo "Using Jython or PyPy" $PYTHON_BINARY -m virtualenv --never-download --no-wheel atlastest . atlastest/bin/activate trap "deactivate; rm -rf atlastest" EXIT HUP pip install certifi PYTHON=python else - PYTHON=$PYTHON_BINARY + IS_PRE_279=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('1' if sys.version_info < (2, 7, 9) else '0')") + if [ $IS_PRE_279 = "1" ]; then + echo "Using a Pre-2.7.9 CPython" + $PYTHON_BINARY -m virtualenv --never-download --no-wheel atlastest + . atlastest/bin/activate + trap "deactivate; rm -rf atlastest" EXIT HUP + pip install pyopenssl>=17.2.0 service_identity>18.1.0 + PYTHON=python + else + echo "Using CPython 2.7.9+" + PYTHON=$PYTHON_BINARY + fi fi echo "Running tests" diff --git a/.evergreen/run-pyopenssl-tests.sh b/.evergreen/run-pyopenssl-tests.sh new file mode 100644 index 0000000000..8418947410 --- /dev/null +++ b/.evergreen/run-pyopenssl-tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -o xtrace +set -o errexit + +export DB_USER="bob" +export DB_PASSWORD="pwd123" +export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" +export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" + +if [ -z "$PYTHON_BINARY" ]; then + PYTHON=$(command -v python || command -v python3) || true + if [ -z "$PYTHON" ]; then + echo "Cannot test without python or python3 installed!" + exit 1 + fi +else + PYTHON="$PYTHON_BINARY" +fi + +$PYTHON -m virtualenv pyopenssltest +trap "deactivate; rm -rf pyopenssltest" EXIT HUP +. pyopenssltest/bin/activate +pip install pyopenssl>=17.2.0 service_identity>=18.1.0 +pip list +python -c 'import sys; print(sys.version)' +python setup.py test diff --git a/pymongo/errors.py b/pymongo/errors.py index 2d9fd05029..fb4b45628d 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -19,10 +19,14 @@ from bson.errors import * try: - from ssl import CertificateError + # CPython 3.7+ + from ssl import SSLCertVerificationError as CertificateError except ImportError: - from pymongo.ssl_match_hostname import CertificateError - + try: + from ssl import CertificateError + except ImportError: + class CertificateError(ValueError): + pass class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" diff --git a/pymongo/network.py b/pymongo/network.py index 0996180f5f..cf714a4208 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -16,23 +16,7 @@ import datetime import errno -import select import struct -import threading - -_HAS_POLL = True -_EVENT_MASK = 0 -try: - from select import poll - _EVENT_MASK = ( - select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP) -except ImportError: - _HAS_POLL = False - -try: - from select import error as _SELECT_ERROR -except ImportError: - _SELECT_ERROR = OSError from bson import _decode_all_selective from bson.py3compat import PY3 @@ -45,6 +29,7 @@ OperationFailure, ProtocolError) from pymongo.message import _UNPACK_REPLY +from pymongo.socket_checker import _errno_from_exception _UNPACK_HEADER = struct.Struct(" 1023). - return True - except (_SELECT_ERROR, IOError) as exc: - if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): - continue - return True - except Exception: - # Any other exceptions should be attributed to a closed - # or invalid socket. - return True - return len(rd) > 0 diff --git a/pymongo/pool.py b/pymongo/pool.py index 6407d53ab9..ad8e0ba993 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -21,21 +21,11 @@ import threading import collections -try: - import ssl - from ssl import SSLError - _HAVE_SNI = getattr(ssl, 'HAS_SNI', False) -except ImportError: - _HAVE_SNI = False - class SSLError(socket.error): - pass - -try: - from ssl import CertificateError as _SSLCertificateError -except ImportError: - class _SSLCertificateError(ValueError): - pass +from pymongo.ssl_support import ( + SSLError as _SSLError, + HAS_SNI as _HAVE_SNI, + IPADDR_SAFE as _IPADDR_SAFE) from bson import DEFAULT_CODEC_OPTIONS from bson.py3compat import imap, itervalues, _unicode, integer_types @@ -52,6 +42,7 @@ class _SSLCertificateError(ValueError): ORDERED_TYPES, WAIT_QUEUE_TIMEOUT) from pymongo.errors import (AutoReconnect, + CertificateError, ConnectionFailure, ConfigurationError, InvalidOperation, @@ -65,12 +56,12 @@ class _SSLCertificateError(ValueError): from pymongo.monitoring import (ConnectionCheckOutFailedReason, ConnectionClosedReason) from pymongo.network import (command, - receive_message, - SocketChecker) + receive_message) from pymongo.read_preferences import ReadPreference from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker # Always use our backport so we always have support for IP address matching -from pymongo.ssl_match_hostname import match_hostname, CertificateError +from pymongo.ssl_match_hostname import match_hostname # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are # not permitted for SNI hostname. @@ -279,7 +270,7 @@ def _raise_connection_failure(address, error, msg_prefix=None): msg = msg_prefix + msg if isinstance(error, socket.timeout): raise NetworkTimeout(msg) - elif isinstance(error, SSLError) and 'timed out' in str(error): + elif isinstance(error, _SSLError) and 'timed out' in str(error): # CPython 2.7 and PyPy 2.x do not distinguish network # timeouts from other SSLErrors (https://bugs.python.org/issue10272). # Luckily, we can work around this limitation because the phrase @@ -791,7 +782,8 @@ def _raise_connection_failure(self, error): # KeyboardInterrupt from the start, rather than as an initial # socket.error, so we catch that, close the socket, and reraise it. self.close_socket(ConnectionClosedReason.ERROR) - if isinstance(error, socket.error): + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, _SSLError)): _raise_connection_failure(self.address, error) else: raise @@ -882,9 +874,6 @@ def _create_connection(address, options): raise socket.error('getaddrinfo failed') -_PY37PLUS = sys.version_info[:2] >= (3, 7) - - def _configured_socket(address, options): """Given (host, port) and PoolOptions, return a configured socket. @@ -905,11 +894,11 @@ def _configured_socket(address, options): # https://bugs.python.org/issue32185 # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. - if _HAVE_SNI and (not is_ip_address(host) or _PY37PLUS): + if _HAVE_SNI and (not is_ip_address(host) or _IPADDR_SAFE): sock = ssl_context.wrap_socket(sock, server_hostname=host) else: sock = ssl_context.wrap_socket(sock) - except _SSLCertificateError: + except CertificateError: sock.close() # Raise CertificateError directly like we do after match_hostname # below. diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py new file mode 100644 index 0000000000..4cb2ec003f --- /dev/null +++ b/pymongo/pyopenssl_context.py @@ -0,0 +1,269 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""A CPython compatible SSLContext implementation wrapping PyOpenSSL's +context. +""" + +import socket as _socket +import ssl as _stdlibssl + +from errno import EINTR as _EINTR + +# service_identity requires this for py27, so it should always be available +from ipaddress import ip_address as _ip_address + +from OpenSSL import SSL as _SSL +from service_identity.pyopenssl import ( + verify_hostname as _verify_hostname, + verify_ip_address as _verify_ip_address) +from service_identity import ( + CertificateError as _SICertificateError, + VerificationError as _SIVerificationError) + +from bson.py3compat import _unicode +from pymongo.errors import CertificateError as _CertificateError +from pymongo.monotonic import time as _time +from pymongo.socket_checker import ( + _errno_from_exception, SocketChecker as _SocketChecker) + +PROTOCOL_SSLv23 = _SSL.SSLv23_METHOD +# Always available +OP_NO_SSLv2 = _SSL.OP_NO_SSLv2 +OP_NO_SSLv3 = _SSL.OP_NO_SSLv3 +OP_NO_COMPRESSION = _SSL.OP_NO_COMPRESSION +# This isn't currently documented for PyOpenSSL +OP_NO_RENEGOTIATION = getattr(_SSL, "OP_NO_RENEGOTIATION", 0) + +# Always available +HAS_SNI = True +CHECK_HOSTNAME_SAFE = True +IS_PYOPENSSL = True + +# Base Exception class +SSLError = _SSL.Error + +# https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L2995-L3002 +_VERIFY_MAP = { + _stdlibssl.CERT_NONE: _SSL.VERIFY_NONE, + _stdlibssl.CERT_OPTIONAL: _SSL.VERIFY_PEER, + _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT +} + +_REVERSE_VERIFY_MAP = dict( + (value, key) for key, value in _VERIFY_MAP.items()) + +def _is_ip_address(address): + try: + _ip_address(_unicode(address)) + return True + except (ValueError, UnicodeError): + return False + +# According to the docs for Connection.send it can raise +# WantX509LookupError and should be retried. +_RETRY_ERRORS = ( + _SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) + + +# https://github.com/pyca/pyopenssl/issues/168 +# https://github.com/pyca/pyopenssl/issues/176 +# https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets +class _sslConn(_SSL.Connection): + + def __init__(self, *args, **kwargs): + self.socket_checker = _SocketChecker() + super(_sslConn, self).__init__(*args, **kwargs) + + def _call(self, call, *args, **kwargs): + timeout = self.gettimeout() + if timeout: + start = _time() + while True: + try: + return call(*args, **kwargs) + except _RETRY_ERRORS: + self.socket_checker.select( + self, True, True, timeout) + if timeout and _time() - start > timeout: + raise _socket.timeout("timed out") + continue + + def do_handshake(self, *args, **kwargs): + return self._call(super(_sslConn, self).do_handshake, *args, **kwargs) + + def recv(self, *args, **kwargs): + return self._call(super(_sslConn, self).recv, *args, **kwargs) + + def recv_into(self, *args, **kwargs): + return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + + def sendall(self, buf, flags=0): + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + sent = 0 + while total_sent < total_length: + try: + sent = self._call( + super(_sslConn, self).send, view[total_sent:], flags) + # XXX: It's not clear if this can actually happen. PyOpenSSL + # doesn't appear to have any interrupt handling, nor any interrupt + # errors for OpenSSL connections. + except (IOError, OSError) as exc: + if _errno_from_exception(exc) == _EINTR: + continue + raise + # https://github.com/pyca/pyopenssl/blob/19.1.0/src/OpenSSL/SSL.py#L1756 + # https://www.openssl.org/docs/man1.0.2/man3/SSL_write.html + if sent <= 0: + raise Exception("Connection closed") + total_sent += sent + + +class SSLContext(object): + """A CPython compatible SSLContext implementation wrapping PyOpenSSL's + context. + """ + + __slots__ = ('_protocol', '_ctx', '_check_hostname') + + def __init__(self, protocol): + self._protocol = protocol + self._ctx = _SSL.Context(self._protocol) + self._check_hostname = True + + @property + def protocol(self): + """The protocol version chosen when constructing the context. + This attribute is read-only. + """ + return self._protocol + + def __get_verify_mode(self): + """Whether to try to verify other peers' certificates and how to + behave if verification fails. This attribute must be one of + ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. + """ + return _REVERSE_VERIFY_MAP[self._ctx.get_verify_mode()] + + def __set_verify_mode(self, value): + """Setter for verify_mode.""" + def _cb(connobj, x509obj, errnum, errdepth, retcode): + # It seems we don't need to do anything here. Twisted doesn't, + # and OpenSSL's SSL_CTX_set_verify let's you pass NULL + # for the callback option. It's weird that PyOpenSSL requires + # this. + return retcode + self._ctx.set_verify(_VERIFY_MAP[value], _cb) + + verify_mode = property(__get_verify_mode, __set_verify_mode) + + def __get_check_hostname(self): + return self._check_hostname + + def __set_check_hostname(self, value): + if not isinstance(value, bool): + raise TypeError("check_hostname must be True or False") + self._check_hostname = value + + check_hostname = property(__get_check_hostname, __set_check_hostname) + + def __get_options(self): + # Calling set_options adds the option to the existing bitmask and + # returns the new bitmask. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_options + return self._ctx.set_options(0) + + def __set_options(self, value): + # Explcitly convert to int, since newer CPython versions + # use enum.IntFlag for options. The values are the same + # regardless of implementation. + self._ctx.set_options(int(value)) + + options = property(__get_options, __set_options) + + def load_cert_chain(self, certfile, keyfile=None, password=None): + """Load a private key and the corresponding certificate. The certfile + string must be the path to a single file in PEM format containing the + certificate as well as any number of CA certificates needed to + establish the certificate's authenticity. The keyfile string, if + present, must point to a file containing the private key. Otherwise + the private key will be taken from certfile as well. + """ + # Match CPython behavior + # https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L3930-L3971 + # Password callback MUST be set first or it will be ignored. + if password: + def _pwcb(max_length, prompt_twice, user_data): + # XXX:We could check the password length against what OpenSSL + # tells us is the max, but we can't raise an exception, so... + # warn? + return password.encode('utf-8') + self._ctx.set_passwd_cb(_pwcb) + self._ctx.use_certificate_chain_file(certfile) + self._ctx.use_privatekey_file(keyfile or certfile) + self._ctx.check_privatekey() + + def load_verify_locations(self, cafile=None, capath=None): + """Load a set of "certification authority"(CA) certificates used to + validate other peers' certificates when `~verify_mode` is other than + ssl.CERT_NONE. + """ + self._ctx.load_verify_locations(cafile, capath) + + def set_default_verify_paths(self): + """Specify that the platform provided CA certificates are to be used + for verification purposes.""" + # Note: See PyOpenSSL's docs for limitations, which are similar + # but not that same as CPython's. + self._ctx.set_default_verify_paths() + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, # TODO: Add support to _sslConn. + server_hostname=None, session=None): + """Wrap an existing Python socket sock and return a TLS socket + object. + """ + ssl_conn = _sslConn(self._ctx, sock) + if session: + ssl_conn.set_session(session) + if server_side is True: + ssl_conn.set_accept_state() + else: + # SNI + if server_hostname and not _is_ip_address(server_hostname): + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + ssl_conn.set_tlsext_host_name(server_hostname.encode('idna')) + ssl_conn.set_connect_state() + # If this wasn't true the caller of wrap_socket would call + # do_handshake() + if do_handshake_on_connect: + # XXX: If we do hostname checking in a callback we can get rid + # of this call to do_handshake() since the handshake + # will happen automatically later. + ssl_conn.do_handshake() + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + if self.check_hostname and server_hostname is not None: + try: + if _is_ip_address(server_hostname): + _verify_ip_address(ssl_conn, _unicode(server_hostname)) + else: + _verify_hostname(ssl_conn, _unicode(server_hostname)) + except (_SICertificateError, _SIVerificationError) as exc: + raise _CertificateError(str(exc)) + return ssl_conn diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py new file mode 100644 index 0000000000..9b21e69d2d --- /dev/null +++ b/pymongo/socket_checker.py @@ -0,0 +1,91 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Select / poll helper""" + +import errno +import select +import threading + +_HAVE_POLL = hasattr(select, "poll") +_SelectError = getattr(select, "error", OSError) + + +def _errno_from_exception(exc): + if hasattr(exc, 'errno'): + return exc.errno + if exc.args: + return exc.args[0] + return None + + +class SocketChecker(object): + + def __init__(self): + if _HAVE_POLL: + self._lock = threading.Lock() + self._poller = select.poll() + else: + self._lock = None + self._poller = None + + def select(self, sock, read=False, write=False, timeout=0): + """Select for reads or writes with a timeout in seconds.""" + while True: + try: + if self._poller: + mask = select.POLLERR | select.POLLHUP + if read: + mask = mask | select.POLLIN | select.POLLPRI + if write: + mask = mask | select.POLLOUT + with self._lock: + self._poller.register(sock, mask) + try: + # poll() timeout is in milliseconds. select() + # timeout is in seconds. + res = self._poller.poll(timeout * 1000) + finally: + self._poller.unregister(sock) + else: + rlist = [sock] if read else [] + wlist = [sock] if write else [] + res = select.select(rlist, wlist, [sock], timeout) + except (_SelectError, IOError) as exc: + if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): + continue + raise + return res + + def socket_closed(self, sock): + """Return True if we know socket has been closed, False otherwise. + """ + try: + res = self.select(sock, read=True) + except (RuntimeError, KeyError): + # RuntimeError is raised during a concurrent poll. KeyError + # is raised by unregister if the socket is not in the poller. + # These errors should not be possible since we protect the + # poller with a mutex. + raise + except ValueError: + # ValueError is raised by register/unregister/select if the + # socket file descriptor is negative or outside the range for + # select (> 1023). + return True + except Exception: + # Any other exceptions should be attributed to a closed + # or invalid socket. + return True + return any(res) diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 6afb2d2d56..6ca28a58bf 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -14,83 +14,119 @@ """A fake SSLContext implementation.""" -try: - import ssl -except ImportError: - pass +import ssl as _ssl +import sys as _sys +# PROTOCOL_TLS_CLIENT is Python 3.6+ +PROTOCOL_SSLv23 = getattr(_ssl, "PROTOCOL_TLS_CLIENT", _ssl.PROTOCOL_SSLv23) +# Python 2.7.9+ +OP_NO_SSLv2 = getattr(_ssl, "OP_NO_SSLv2", 0) +# Python 2.7.9+ +OP_NO_SSLv3 = getattr(_ssl, "OP_NO_SSLv3", 0) +# Python 2.7.9+, OpenSSL 1.0.0+ +OP_NO_COMPRESSION = getattr(_ssl, "OP_NO_COMPRESSION", 0) +# Python 3.7+, OpenSSL 1.1.0h+ +OP_NO_RENEGOTIATION = getattr(_ssl, "OP_NO_RENEGOTIATION", 0) -class SSLContext(object): - """A fake SSLContext. +# Python 2.7.9+ +HAS_SNI = getattr(_ssl, "HAS_SNI", False) +IS_PYOPENSSL = False - This implements an API similar to ssl.SSLContext from python 3.2 - but does not implement methods or properties that would be - incompatible with ssl.wrap_socket from python 2.7 < 2.7.9. +# Base Exception class +SSLError = _ssl.SSLError - You must pass protocol which must be one of the PROTOCOL_* constants - defined in the ssl module. ssl.PROTOCOL_SSLv23 is recommended for maximum - interoperability. - """ +try: + # CPython 2.7.9+ + from ssl import SSLContext + if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): + from ssl import VERIFY_CRL_CHECK_LEAF + # Python 3.7 uses OpenSSL's hostname matching implementation + # making it the obvious version to start using SSLConext.check_hostname. + # Python 3.6 might have been a good version, but it suffers + # from https://bugs.python.org/issue32185. + # We'll use our bundled match_hostname for older Python + # versions, which also supports IP address matching + # with Python < 3.5. + CHECK_HOSTNAME_SAFE = _sys.version_info[:2] >= (3, 7) +except ImportError: + from pymongo.errors import ConfigurationError - __slots__ = ('_cafile', '_certfile', - '_keyfile', '_protocol', '_verify_mode') + class SSLContext(object): + """A fake SSLContext. - def __init__(self, protocol): - self._cafile = None - self._certfile = None - self._keyfile = None - self._protocol = protocol - self._verify_mode = ssl.CERT_NONE + This implements an API similar to ssl.SSLContext from python 3.2 + but does not implement methods or properties that would be + incompatible with ssl.wrap_socket from python 2.7 < 2.7.9. - @property - def protocol(self): - """The protocol version chosen when constructing the context. - This attribute is read-only. + You must pass protocol which must be one of the PROTOCOL_* constants + defined in the ssl module. ssl.PROTOCOL_SSLv23 is recommended for maximum + interoperability. """ - return self._protocol - def __get_verify_mode(self): - """Whether to try to verify other peers' certificates and how to - behave if verification fails. This attribute must be one of - ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. - """ - return self._verify_mode + __slots__ = ('_cafile', '_certfile', + '_keyfile', '_protocol', '_verify_mode') - def __set_verify_mode(self, value): - """Setter for verify_mode.""" - self._verify_mode = value + def __init__(self, protocol): + self._cafile = None + self._certfile = None + self._keyfile = None + self._protocol = protocol + self._verify_mode = _ssl.CERT_NONE - verify_mode = property(__get_verify_mode, __set_verify_mode) + @property + def protocol(self): + """The protocol version chosen when constructing the context. + This attribute is read-only. + """ + return self._protocol - def load_cert_chain(self, certfile, keyfile=None): - """Load a private key and the corresponding certificate. The certfile - string must be the path to a single file in PEM format containing the - certificate as well as any number of CA certificates needed to - establish the certificate's authenticity. The keyfile string, if - present, must point to a file containing the private key. Otherwise - the private key will be taken from certfile as well. - """ - self._certfile = certfile - self._keyfile = keyfile + def __get_verify_mode(self): + """Whether to try to verify other peers' certificates and how to + behave if verification fails. This attribute must be one of + ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. + """ + return self._verify_mode - def load_verify_locations(self, cafile=None, dummy=None): - """Load a set of "certification authority"(CA) certificates used to - validate other peers' certificates when `~verify_mode` is other than - ssl.CERT_NONE. - """ - self._cafile = cafile + def __set_verify_mode(self, value): + """Setter for verify_mode.""" + self._verify_mode = value - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, dummy=None): - """Wrap an existing Python socket sock and return an ssl.SSLSocket - object. - """ - return ssl.wrap_socket(sock, keyfile=self._keyfile, - certfile=self._certfile, - server_side=server_side, - cert_reqs=self._verify_mode, - ssl_version=self._protocol, - ca_certs=self._cafile, - do_handshake_on_connect=do_handshake_on_connect, - suppress_ragged_eofs=suppress_ragged_eofs) + verify_mode = property(__get_verify_mode, __set_verify_mode) + + def load_cert_chain(self, certfile, keyfile=None, password=None): + """Load a private key and the corresponding certificate. The certfile + string must be the path to a single file in PEM format containing the + certificate as well as any number of CA certificates needed to + establish the certificate's authenticity. The keyfile string, if + present, must point to a file containing the private key. Otherwise + the private key will be taken from certfile as well. + """ + if password is not None: + raise ConfigurationError( + "Support for ssl_pem_passphrase requires " + "python 2.7.9+ (pypy 2.5.1+), python 3 or " + "PyOpenSSL") + self._certfile = certfile + self._keyfile = keyfile + + def load_verify_locations(self, cafile=None, dummy=None): + """Load a set of "certification authority"(CA) certificates used to + validate other peers' certificates when `~verify_mode` is other than + ssl.CERT_NONE. + """ + self._cafile = cafile + + def wrap_socket(self, sock, server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, dummy=None): + """Wrap an existing Python socket sock and return an ssl.SSLSocket + object. + """ + return _ssl.wrap_socket(sock, keyfile=self._keyfile, + certfile=self._certfile, + server_side=server_side, + cert_reqs=self._verify_mode, + ssl_version=self._protocol, + ca_certs=self._cafile, + do_handshake_on_connect=do_handshake_on_connect, + suppress_ragged_eofs=suppress_ragged_eofs) diff --git a/pymongo/ssl_match_hostname.py b/pymongo/ssl_match_hostname.py index 49e3dd6576..8da3aa5d3a 100644 --- a/pymongo/ssl_match_hostname.py +++ b/pymongo/ssl_match_hostname.py @@ -16,10 +16,7 @@ else: _unicode = lambda value: value - -class CertificateError(ValueError): - pass - +from pymongo.errors import CertificateError def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 4976017daa..4307d97744 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -18,11 +18,18 @@ import sys import threading +from bson.py3compat import string_type +from pymongo.errors import ConfigurationError + HAVE_SSL = True + try: - import ssl + import pymongo.pyopenssl_context as _ssl except ImportError: - HAVE_SSL = False + try: + import pymongo.ssl_context as _ssl + except ImportError: + HAVE_SSL = False HAVE_CERTIFI = False try: @@ -38,21 +45,19 @@ except ImportError: pass -from bson.py3compat import string_type -from pymongo.errors import ConfigurationError - _WINCERTSLOCK = threading.Lock() _WINCERTS = None -_PY37PLUS = sys.version_info[:2] >= (3, 7) - if HAVE_SSL: - try: - # Python 2.7.9+, PyPy 2.5.1+, etc. - from ssl import SSLContext - except ImportError: - from pymongo.ssl_context import SSLContext - + # Note: The validate* functions below deal with users passing + # CPython ssl module constants to configure certificate verification + # at a high level. This is legacy behavior, but requires us to + # import the ssl module even if we're only using it for this purpose. + import ssl as _stdlibssl + from ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED + HAS_SNI = _ssl.HAS_SNI + IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) + SSLError = _ssl.SSLError def validate_cert_reqs(option, value): """Validate the cert reqs are valid. It must be None or one of the three values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or @@ -60,10 +65,10 @@ def validate_cert_reqs(option, value): """ if value is None: return value - elif isinstance(value, string_type) and hasattr(ssl, value): - value = getattr(ssl, value) + if isinstance(value, string_type) and hasattr(_stdlibssl, value): + value = getattr(_stdlibssl, value) - if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED): + if value in (CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED): return value raise ValueError("The value of %s must be one of: " "`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or " @@ -75,8 +80,8 @@ def validate_allow_invalid_certs(option, value): from pymongo.common import validate_boolean_or_string boolean_cert_reqs = validate_boolean_or_string(option, value) if boolean_cert_reqs: - return ssl.CERT_NONE - return ssl.CERT_REQUIRED + return CERT_NONE + return CERT_REQUIRED def _load_wincerts(): """Set _WINCERTS to an instance of wincertstore.Certfile.""" @@ -89,13 +94,6 @@ def _load_wincerts(): _WINCERTS = certfile - # XXX: Possible future work. - # - OCSP? Not supported by python at all. - # http://bugs.python.org/issue17123 - # - Adding an ssl_context keyword argument to MongoClient? This might - # be useful for sites that have unusual requirements rather than - # trying to expose every SSLContext option through a keyword/uri - # parameter. def get_ssl_context(*args): """Create and return an SSLContext object.""" (certfile, @@ -105,25 +103,11 @@ def get_ssl_context(*args): cert_reqs, crlfile, match_hostname) = args - verify_mode = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs - # Note PROTOCOL_SSLv23 is about the most misleading name imaginable. - # This configures the server and client to negotiate the - # highest protocol version they both support. A very good thing. - # PROTOCOL_TLS_CLIENT was added in CPython 3.6, deprecating - # PROTOCOL_SSLv23. - ctx = SSLContext( - getattr(ssl, "PROTOCOL_TLS_CLIENT", ssl.PROTOCOL_SSLv23)) + verify_mode = CERT_REQUIRED if cert_reqs is None else cert_reqs + ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) # SSLContext.check_hostname was added in CPython 2.7.9 and 3.4. - # PROTOCOL_TLS_CLIENT (added in Python 3.6) enables it by default. if hasattr(ctx, "check_hostname"): - if _PY37PLUS and verify_mode != ssl.CERT_NONE: - # Python 3.7 uses OpenSSL's hostname matching implementation - # making it the obvious version to start using this with. - # Python 3.6 might have been a good version, but it suffers - # from https://bugs.python.org/issue32185. - # We'll use our bundled match_hostname for older Python - # versions, which also supports IP address matching - # with Python < 3.5. + if _ssl.CHECK_HOSTNAME_SAFE and verify_mode != CERT_NONE: ctx.check_hostname = match_hostname else: ctx.check_hostname = False @@ -131,42 +115,31 @@ def get_ssl_context(*args): # Explicitly disable SSLv2, SSLv3 and TLS compression. Note that # up to date versions of MongoDB 2.4 and above already disable # SSLv2 and SSLv3, python disables SSLv2 by default in >= 2.7.7 - # and >= 3.3.4 and SSLv3 in >= 3.4.3. There is no way for us to do - # any of this explicitly for python 2.7 before 2.7.9. - ctx.options |= getattr(ssl, "OP_NO_SSLv2", 0) - ctx.options |= getattr(ssl, "OP_NO_SSLv3", 0) - # OpenSSL >= 1.0.0 - ctx.options |= getattr(ssl, "OP_NO_COMPRESSION", 0) - # Python 3.7+ with OpenSSL >= 1.1.0h - ctx.options |= getattr(ssl, "OP_NO_RENEGOTIATION", 0) + # and >= 3.3.4 and SSLv3 in >= 3.4.3. + ctx.options |= _ssl.OP_NO_SSLv2 + ctx.options |= _ssl.OP_NO_SSLv3 + ctx.options |= _ssl.OP_NO_COMPRESSION + ctx.options |= _ssl.OP_NO_RENEGOTIATION if certfile is not None: try: - if passphrase is not None: - vi = sys.version_info - # Since python just added a new parameter to an existing method - # this seems to be about the best we can do. - if (vi[0] == 2 and vi < (2, 7, 9) or - vi[0] == 3 and vi < (3, 3)): - raise ConfigurationError( - "Support for ssl_pem_passphrase requires " - "python 2.7.9+ (pypy 2.5.1+) or 3.3+") - ctx.load_cert_chain(certfile, keyfile, passphrase) - else: - ctx.load_cert_chain(certfile, keyfile) - except ssl.SSLError as exc: + ctx.load_cert_chain(certfile, keyfile, passphrase) + except _ssl.SSLError as exc: raise ConfigurationError( "Private key doesn't match certificate: %s" % (exc,)) if crlfile is not None: + if _ssl.IS_PYOPENSSL: + raise ConfigurationError( + "ssl_crlfile cannot be used with PyOpenSSL") if not hasattr(ctx, "verify_flags"): raise ConfigurationError( "Support for ssl_crlfile requires " "python 2.7.9+ (pypy 2.5.1+) or 3.4+") # Match the server's behavior. - ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF + ctx.verify_flags = getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) - elif cert_reqs != ssl.CERT_NONE: + elif cert_reqs != CERT_NONE: # CPython >= 2.7.9 or >= 3.4.0, pypy >= 2.5.1 if hasattr(ctx, "load_default_certs"): ctx.load_default_certs() @@ -189,6 +162,10 @@ def get_ssl_context(*args): ctx.verify_mode = verify_mode return ctx else: + class SSLError(Exception): + pass + HAS_SNI = False + IPADDR_SAFE = False def validate_cert_reqs(option, dummy): """No ssl module, raise ConfigurationError.""" raise ConfigurationError("The value of %s is set but can't be " diff --git a/setup.py b/setup.py index 39204a4690..7450caeee5 100755 --- a/setup.py +++ b/setup.py @@ -317,26 +317,31 @@ def build_extension(self, ext): sources=['pymongo/_cmessagemodule.c', 'bson/buffer.c'])] +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +pyopenssl_reqs = ["pyopenssl>=17.2.0", "service_identity>=18.1.0"] + extras_require = { 'encryption': ['pymongocrypt<2.0.0'], 'snappy': ['python-snappy'], 'zstd': ['zstandard'], } -vi = sys.version_info -if vi[0] == 2: - extras_require.update({'tls': ["ipaddress"]}) + +if sys.version_info[0] == 2: extras_require.update({'srv': ["dnspython>=1.16.0,<1.17.0"]}) + extras_require.update({'tls': ["ipaddress"]}) + for req in pyopenssl_reqs: + extras_require['tls'].append("%s ; python_full_version < '2.7.9'" % (req,)) else: - extras_require.update({'tls': []}) extras_require.update({'srv': ["dnspython>=1.16.0,<2.0.0"]}) + extras_require.update({'tls': []}) if sys.platform == 'win32': extras_require['gssapi'] = ["winkerberos>=0.5.0"] - if vi < (2, 7, 9): - extras_require['tls'].append("wincertstore>=0.2") + extras_require['tls'].append("wincertstore>=0.2 ; python_full_version < '2.7.9'") else: extras_require['gssapi'] = ["pykerberos"] - if vi < (2, 7, 9): - extras_require['tls'].append("certifi") + extras_require['tls'].append("certifi ; python_full_version < '2.7.9'") extra_opts = { "packages": ["bson", "pymongo", "gridfs"] diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 4241f59232..813295977a 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -15,13 +15,13 @@ """Test connections to various Atlas cluster types.""" import os -import ssl import sys import unittest sys.path[0:0] = [""] import pymongo +from pymongo.ssl_support import HAS_SNI _REPL = os.environ.get("ATLAS_REPL") @@ -54,7 +54,7 @@ def test_sharded_cluster(self): _connect(_SHRD) def test_free_tier(self): - if not getattr(ssl, 'HAS_SNI', False): + if not HAS_SNI: raise unittest.SkipTest("Free tier requires SNI support.") _connect(_FREE) diff --git a/test/test_pooling.py b/test/test_pooling.py index 922deecdcf..bfe5abc110 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -29,8 +29,8 @@ sys.path[0:0] = [""] -from pymongo.network import SocketChecker from pymongo.pool import Pool, PoolOptions +from pymongo.socket_checker import SocketChecker from test import client_context, unittest from test.utils import (get_pool, joinall, diff --git a/test/test_ssl.py b/test/test_ssl.py index 909d38f28c..352ef1f4b8 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -30,7 +30,7 @@ from pymongo.errors import (ConfigurationError, ConnectionFailure, OperationFailure) -from pymongo.ssl_support import HAVE_SSL, get_ssl_context, validate_cert_reqs +from pymongo.ssl_support import HAVE_SSL, get_ssl_context, validate_cert_reqs, _ssl from pymongo.write_concern import WriteConcern from test import (IntegrationTest, client_context, @@ -41,6 +41,13 @@ HAVE_IPADDRESS) from test.utils import remove_all_users, connected +_HAVE_PYOPENSSL = False +try: + import OpenSSL + _HAVE_PYOPENSSL = True +except ImportError: + pass + if HAVE_SSL: import ssl @@ -143,6 +150,10 @@ def test_config_ssl(self): validate_cert_reqs('ssl_cert_reqs', 'CERT_REQUIRED'), ssl.CERT_REQUIRED) + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + def test_use_openssl_when_available(self): + self.assertTrue(_ssl.IS_PYOPENSSL) + class TestSSL(IntegrationTest): @@ -180,7 +191,7 @@ def test_ssl_pem_passphrase(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem vi = sys.version_info - if vi[0] == 2 and vi < (2, 7, 9) or vi[0] == 3 and vi < (3, 3): + if vi[0] == 2 and vi < (2, 7, 9) and not _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, @@ -378,7 +389,7 @@ def test_cert_ssl_validation_hostname_matching(self): @client_context.require_ssl_certfile def test_ssl_crlfile_support(self): - if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF'): + if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF') or _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index d218e2be7f..4fb60c8433 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -18,17 +18,21 @@ import sys import warnings +try: + from ssl import CERT_NONE +except ImportError: + CERT_NONE = 0 + sys.path[0:0] = [""] +from bson.binary import JAVA_LEGACY +from bson.py3compat import string_type, _unicode +from pymongo import ReadPreference +from pymongo.errors import ConfigurationError, InvalidURI from pymongo.uri_parser import (parse_userinfo, split_hosts, split_options, parse_uri) -from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.ssl_support import ssl -from pymongo import ReadPreference -from bson.binary import JAVA_LEGACY -from bson.py3compat import string_type, _unicode from test import unittest @@ -465,7 +469,7 @@ def test_tlsinsecure_simple(self): # check that tlsInsecure is expanded correctly. uri = "mongodb://example.com/?tlsInsecure=true" res = { - "ssl_match_hostname": False, "ssl_cert_reqs": ssl.CERT_NONE, + "ssl_match_hostname": False, "ssl_cert_reqs": CERT_NONE, "tlsinsecure": True} self.assertEqual(res, parse_uri(uri)["options"]) From c69ea64220b5e26a330c9e5803cdae138d02e61b Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 7 Feb 2020 14:43:39 -0800 Subject: [PATCH 0053/2111] PYTHON-2117 Only use env markers when setuptools is new enough --- setup.py | 42 +++++++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index 7450caeee5..6a5520b835 100755 --- a/setup.py +++ b/setup.py @@ -18,11 +18,11 @@ # Don't force people to install setuptools unless # we have to. try: - from setuptools import setup + from setuptools import setup, __version__ as _setuptools_version except ImportError: from ez_setup import use_setuptools use_setuptools() - from setuptools import setup + from setuptools import setup, __version__ as _setuptools_version from distutils.cmd import Command from distutils.command.build_ext import build_ext @@ -317,6 +317,7 @@ def build_extension(self, ext): sources=['pymongo/_cmessagemodule.c', 'bson/buffer.c'])] + # PyOpenSSL 17.0.0 introduced support for OCSP. 17.2.0 fixes a bug # in set_default_verify_paths we should really avoid. # service_identity 18.1.0 introduced support for IP addr matching. @@ -325,23 +326,50 @@ def build_extension(self, ext): extras_require = { 'encryption': ['pymongocrypt<2.0.0'], 'snappy': ['python-snappy'], + 'tls': [], 'zstd': ['zstandard'], } +# https://jira.mongodb.org/browse/PYTHON-2117 +# Environment marker support didn't settle down until version 20.10 +# https://setuptools.readthedocs.io/en/latest/history.html#v20-10-0 +_use_env_markers = tuple(map(int, _setuptools_version.split('.')[:2])) > (20, 9) + +# TLS and DNS extras +# We install PyOpenSSL and service_identity for Python < 2.7.9 to +# get support for SNI, which is required to connection to Altas +# free and shared tier. if sys.version_info[0] == 2: + if _use_env_markers: + # For building wheels on Python versions >= 2.7.9 + for req in pyopenssl_reqs: + extras_require['tls'].append( + "%s ; python_full_version < '2.7.9'" % (req,)) + if sys.platform == 'win32': + extras_require['tls'].append( + "wincertstore>=0.2 ; python_full_version < '2.7.9'") + else: + extras_require['tls'].append( + "certifi ; python_full_version < '2.7.9'") + elif sys.version_info < (2, 7, 9): + # For installing from source or egg files on Python versions + # older than 2.7.9, or systems that have setuptools versions + # older than 20.10. + extras_require['tls'].extend(pyopenssl_reqs) + if sys.platform == 'win32': + extras_require['tls'].append("wincertstore>=0.2") + else: + extras_require['tls'].append("certifi") extras_require.update({'srv': ["dnspython>=1.16.0,<1.17.0"]}) extras_require.update({'tls': ["ipaddress"]}) - for req in pyopenssl_reqs: - extras_require['tls'].append("%s ; python_full_version < '2.7.9'" % (req,)) else: extras_require.update({'srv': ["dnspython>=1.16.0,<2.0.0"]}) - extras_require.update({'tls': []}) + +# GSSAPI extras if sys.platform == 'win32': extras_require['gssapi'] = ["winkerberos>=0.5.0"] - extras_require['tls'].append("wincertstore>=0.2 ; python_full_version < '2.7.9'") else: extras_require['gssapi'] = ["pykerberos"] - extras_require['tls'].append("certifi ; python_full_version < '2.7.9'") extra_opts = { "packages": ["bson", "pymongo", "gridfs"] From 5ccdf1af44ebafe50183431f4262c85e3fa23909 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Feb 2020 10:41:41 -0800 Subject: [PATCH 0054/2111] PYTHON-2118 Adjust impossible writeConcern tests for 4.4 --- test/test_read_write_concern_spec.py | 3 +-- test/utils.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 568eb5c951..510792dacc 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -31,8 +31,7 @@ from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern from test import client_context, unittest -from test.utils import (IMPOSSIBLE_WRITE_CONCERN, - EventListener, +from test.utils import (EventListener, disable_replication, enable_replication, rs_or_single_client) diff --git a/test/utils.py b/test/utils.py index 81c57b5aab..768a77996c 100644 --- a/test/utils.py +++ b/test/utils.py @@ -46,7 +46,7 @@ db_user, db_pwd) -IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000) +IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) class WhiteListEventListener(monitoring.CommandListener): From 9079596bf0f3de71e9ce3a102c8c4cd85a27b030 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Feb 2020 11:32:30 -0800 Subject: [PATCH 0055/2111] PYTHON-2120 Skip failing ChangeStream tests on 4.4 --- test/change_streams/change-streams-errors.json | 1 + test/test_change_stream.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/change_streams/change-streams-errors.json b/test/change_streams/change-streams-errors.json index 00f51eb47e..5ebbd28f46 100644 --- a/test/change_streams/change-streams-errors.json +++ b/test/change_streams/change-streams-errors.json @@ -75,6 +75,7 @@ { "description": "Change Stream should error when _id is projected out", "minServerVersion": "4.1.11", + "maxServerVersion": "4.3.3", "target": "collection", "topology": [ "replicaset", diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 97b67fb69d..a4421f265b 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -517,6 +517,7 @@ def test_update_resume_token_legacy(self): self._test_update_resume_token(self._get_expected_resume_token_legacy) # Prose test no. 2 + @client_context.require_version_max(4, 3, 3) # PYTHON-2120 @client_context.require_version_min(4, 1, 8) def test_raises_error_on_missing_id_418plus(self): # Server returns an error on 4.1.8+ From 5a1cbd8f20b1f9255d2c9338e3a198525aaab026 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Feb 2020 14:02:59 -0800 Subject: [PATCH 0056/2111] PYTHON-2073 Add NPS Survey to documentation --- doc/conf.py | 7 +++++++ doc/static/delighted.js | 22 ++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 doc/static/delighted.js diff --git a/doc/conf.py b/doc/conf.py index ad4c42b9e4..c545ab093f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -91,6 +91,13 @@ # Additional static files. html_static_path = ['static'] +# These paths are either relative to html_static_path +# or fully qualified paths (eg. https://...) +# Note: html_js_files was added in Sphinx 1.8. +html_js_files = [ + 'delighted.js', +] + # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None diff --git a/doc/static/delighted.js b/doc/static/delighted.js new file mode 100644 index 0000000000..d373bd0acf --- /dev/null +++ b/doc/static/delighted.js @@ -0,0 +1,22 @@ +/* eslint-disable */ +// Delighted +!function(e,t,r,n,a){if(!e[a]){for(var i=e[a]=[],s=0;s Date: Mon, 10 Feb 2020 15:30:07 -0800 Subject: [PATCH 0057/2111] PYTHON-2029 Support shorter SCRAM conversation --- pymongo/auth.py | 7 ++++--- test/test_auth.py | 25 +++++++++++++++++++++++++ test/utils.py | 38 +++++++++++++++++++------------------- 3 files changed, 48 insertions(+), 22 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index fef4386f19..455717a6af 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -259,7 +259,8 @@ def _authenticate_scram(credentials, sock_info, mechanism): cmd = SON([('saslStart', 1), ('mechanism', mechanism), ('payload', Binary(b"n,," + first_bare)), - ('autoAuthorize', 1)]) + ('autoAuthorize', 1), + ('options', {'skipEmptyExchange': True})]) res = sock_info.command(source, cmd) server_first = res['payload'] @@ -304,8 +305,8 @@ def _authenticate_scram(credentials, sock_info, mechanism): if not compare_digest(parsed[b'v'], server_sig): raise OperationFailure("Server returned an invalid signature.") - # Depending on how it's configured, Cyrus SASL (which the server uses) - # requires a third empty challenge. + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. if not res['done']: cmd = SON([('saslContinue', 1), ('conversationId', res['conversationId']), diff --git a/test/test_auth.py b/test/test_auth.py index 8e41e100fd..14b2f94394 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -409,6 +409,31 @@ def tearDown(self): client_context.client.testscram.command("dropAllUsersFromDatabase") client_context.client.drop_database("testscram") + def test_scram_skip_empty_exchange(self): + listener = WhiteListEventListener("saslStart", "saslContinue") + client_context.create_user( + 'testscram', 'sha256', 'pwd', roles=['dbOwner'], + mechanisms=['SCRAM-SHA-256']) + + client = rs_or_single_client_noauth( + username='sha256', password='pwd', authSource='testscram', + event_listeners=[listener]) + client.admin.command('isMaster') + + # Assert we sent the skipEmptyExchange option. + first_event = listener.results['started'][0] + self.assertEqual(first_event.command_name, 'saslStart') + self.assertEqual( + first_event.command['options'], {'skipEmptyExchange': True}) + + # Assert the third exchange was skipped on servers that support it. + started = listener.started_command_names() + if client_context.version.at_least(4, 3, 3): + self.assertEqual(started, ['saslStart', 'saslContinue']) + else: + self.assertEqual( + started, ['saslStart', 'saslContinue', 'saslContinue']) + @ignore_deprecations def test_scram(self): host, port = client_context.host, client_context.port diff --git a/test/utils.py b/test/utils.py index 768a77996c..95c0885851 100644 --- a/test/utils.py +++ b/test/utils.py @@ -49,25 +49,6 @@ IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) -class WhiteListEventListener(monitoring.CommandListener): - - def __init__(self, *commands): - self.commands = set(commands) - self.results = defaultdict(list) - - def started(self, event): - if event.command_name in self.commands: - self.results['started'].append(event) - - def succeeded(self, event): - if event.command_name in self.commands: - self.results['succeeded'].append(event) - - def failed(self, event): - if event.command_name in self.commands: - self.results['failed'].append(event) - - class CMAPListener(ConnectionPoolListener): def __init__(self): self.events = [] @@ -136,6 +117,25 @@ def reset(self): self.results.clear() +class WhiteListEventListener(EventListener): + + def __init__(self, *commands): + self.commands = set(commands) + super(WhiteListEventListener, self).__init__() + + def started(self, event): + if event.command_name in self.commands: + super(WhiteListEventListener, self).started(event) + + def succeeded(self, event): + if event.command_name in self.commands: + super(WhiteListEventListener, self).succeeded(event) + + def failed(self, event): + if event.command_name in self.commands: + super(WhiteListEventListener, self).failed(event) + + class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" def started(self, event): From c4b8aef1e80dddc8c7202c527f6025d32aeaf4b0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Feb 2020 10:03:21 -0800 Subject: [PATCH 0058/2111] PYTHON-2119 Fix doc tests caused by 4.4 mapReduce change --- doc/examples/aggregation.rst | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index fd58479b35..f816eed6e5 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -120,7 +120,7 @@ iterate over the result collection: .. doctest:: >>> result = db.things.map_reduce(mapper, reducer, "myresults") - >>> for doc in result.find(): + >>> for doc in result.find().sort("_id"): ... pprint.pprint(doc) ... {u'_id': u'cat', u'value': 3.0} @@ -140,10 +140,7 @@ response to the map/reduce command, rather than just the result collection: >>> pprint.pprint( ... db.things.map_reduce(mapper, reducer, "myresults", full_response=True)) - {...u'counts': {u'emit': 6, u'input': 4, u'output': 3, u'reduce': 2}, - u'ok': ..., - u'result': u'...', - u'timeMillis': ...} + {...u'ok': 1.0,... u'result': u'myresults'...} All of the optional map/reduce parameters are also supported, simply pass them as keyword arguments. In this example we use the `query` parameter to limit the @@ -153,7 +150,7 @@ documents that will be mapped over: >>> results = db.things.map_reduce( ... mapper, reducer, "myresults", query={"x": {"$lt": 2}}) - >>> for doc in results.find(): + >>> for doc in results.find().sort("_id"): ... pprint.pprint(doc) ... {u'_id': u'cat', u'value': 1.0} @@ -171,9 +168,6 @@ specify a different database to store the result collection: ... reducer, ... out=SON([("replace", "results"), ("db", "outdb")]), ... full_response=True)) - {...u'counts': {u'emit': 6, u'input': 4, u'output': 3, u'reduce': 2}, - u'ok': ..., - u'result': {u'collection': ..., u'db': ...}, - u'timeMillis': ...} + {...u'ok': 1.0,... u'result': {u'collection': u'results', u'db': u'outdb'}...} .. seealso:: The full list of options for MongoDB's `map reduce engine `_ From 6609cc571476f9be33da3a6d5dd703bfcb26a377 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 15 Jan 2020 17:17:19 -0800 Subject: [PATCH 0059/2111] PYTHON-2098 Publish server/topology events when reseting a server due to an application error --- pymongo/database.py | 4 +- pymongo/mongo_client.py | 22 ++++---- pymongo/topology.py | 18 +++---- test/test_sdam_monitoring_spec.py | 89 ++++++++++++++++++++++++++++++- test/test_topology.py | 6 +-- test/utils.py | 8 +++ 6 files changed, 120 insertions(+), 27 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 701e55221e..50a5d1fa36 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -29,6 +29,7 @@ from pymongo.errors import (CollectionInvalid, ConfigurationError, InvalidName, + NotMasterError, OperationFailure) from pymongo.message import _first_batch from pymongo.read_preferences import ReadPreference @@ -1135,7 +1136,8 @@ def error(self): # doing so already. primary = self.__client.primary if primary: - self.__client._reset_server_and_request_check(primary) + self.__client._reset_server_and_request_check( + primary, NotMasterError(error_msg, error)) return error def last_status(self): diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index edee1afb68..8294af14e4 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1490,13 +1490,9 @@ def _retryable_write(self, retryable, func, session): with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None) - def _reset_server(self, address): - """Clear our connection pool for a server and mark it Unknown.""" - self._topology.reset_server(address) - - def _reset_server_and_request_check(self, address): + def _reset_server_and_request_check(self, address, error): """Clear our pool for a server, mark it Unknown, and check it soon.""" - self._topology.reset_server_and_request_check(address) + self._topology.reset_server_and_request_check(address, error) def __eq__(self, other): if isinstance(other, self.__class__): @@ -2168,7 +2164,7 @@ def __init__(self, client, server_address, session): self._client = client self._server_address = server_address self._session = session - self._max_wire_version = None + self._max_wire_version = common.MIN_WIRE_VERSION def contribute_socket(self, sock_info): """Provide socket information to the error handler.""" @@ -2205,22 +2201,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): # Unknown and request an immediate check of the server. err_code = exc_val.details.get('code', -1) is_shutting_down = err_code in helpers._SHUTDOWN_CODES - if (is_shutting_down or (self._max_wire_version is None) or - (self._max_wire_version <= 7)): + if is_shutting_down or (self._max_wire_version <= 7): # Clear the pool, mark server Unknown and request check. self._client._reset_server_and_request_check( - self._server_address) + self._server_address, exc_val) else: self._client._topology.mark_server_unknown_and_request_check( - self._server_address) + self._server_address, exc_val) elif issubclass(exc_type, ConnectionFailure): # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." - self._client._reset_server(self._server_address) + self._client._topology.reset_server(self._server_address, exc_val) if self._session: self._session._server_session.mark_dirty() elif issubclass(exc_type, OperationFailure): # Do not request an immediate check since the server is likely # shutting down. if exc_val.code in helpers._RETRYABLE_ERROR_CODES: - self._client._reset_server(self._server_address) + self._client._topology.reset_server( + self._server_address, exc_val) diff --git a/pymongo/topology.py b/pymongo/topology.py index a3cfe1e79e..7cad22cd99 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -37,6 +37,7 @@ from pymongo.monitor import SrvMonitor from pymongo.monotonic import time as _time from pymongo.server import Server +from pymongo.server_description import ServerDescription from pymongo.server_selectors import (any_server_selector, arbiter_server_selector, secondary_server_selector, @@ -407,24 +408,24 @@ def reset_pool(self, address): if server: server.pool.reset() - def reset_server(self, address): + def reset_server(self, address, error): """Clear our pool for a server and mark it Unknown. Do *not* request an immediate check. """ with self._lock: - self._reset_server(address, reset_pool=True) + self._reset_server(address, reset_pool=True, error=error) - def reset_server_and_request_check(self, address): + def reset_server_and_request_check(self, address, error): """Clear our pool for a server, mark it Unknown, and check it soon.""" with self._lock: - self._reset_server(address, reset_pool=True) + self._reset_server(address, reset_pool=True, error=error) self._request_check(address) - def mark_server_unknown_and_request_check(self, address): + def mark_server_unknown_and_request_check(self, address, error): """Mark a server Unknown, and check it soon.""" with self._lock: - self._reset_server(address, reset_pool=False) + self._reset_server(address, reset_pool=False, error=error) self._request_check(address) def update_pool(self): @@ -537,7 +538,7 @@ def _ensure_opened(self): for server in itervalues(self._servers): server.open() - def _reset_server(self, address, reset_pool): + def _reset_server(self, address, reset_pool, error): """Mark a server Unknown and optionally reset it's pool. Hold the lock when calling this. Does *not* request an immediate check. @@ -550,8 +551,7 @@ def _reset_server(self, address, reset_pool): server.reset() # Mark this server Unknown. - self._description = self._description.reset_server(address) - self._update_servers() + self._process_change(ServerDescription(address, error=error)) def _request_check(self, address): """Wake one monitor. Hold the lock when calling this.""" diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index cb5ebcd5e3..a3a8435706 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -24,15 +24,18 @@ from bson.json_util import object_hook from pymongo import monitoring from pymongo import periodic_executor +from pymongo.errors import (ConnectionFailure, + NotMasterError) from pymongo.ismaster import IsMaster from pymongo.monitor import Monitor from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, client_knobs +from test import unittest, client_context, client_knobs, IntegrationTest from test.utils import (ServerAndTopologyEventListener, single_client, + rs_or_single_client, wait_until) # Location of JSON test specifications. @@ -278,5 +281,89 @@ def create_tests(): create_tests() + +class TestSdamMonitoring(IntegrationTest): + + @classmethod + @client_context.require_failCommand_fail_point + def setUpClass(cls): + super(TestSdamMonitoring, cls).setUpClass() + # Speed up the tests by decreasing the event publish frequency. + cls.knobs = client_knobs(events_queue_frequency=0.1) + cls.knobs.enable() + cls.listener = ServerAndTopologyEventListener() + retry_writes = client_context.supports_transactions() + cls.test_client = rs_or_single_client( + event_listeners=[cls.listener], retryWrites=retry_writes) + cls.coll = cls.test_client[cls.client.db.name].test + cls.coll.insert_one({}) + + @classmethod + def tearDownClass(cls): + cls.test_client.close() + cls.knobs.disable() + super(TestSdamMonitoring, cls).tearDownClass() + + def setUp(self): + self.listener.reset() + + def _test_app_error(self, fail_command_opts, expected_error): + address = self.test_client.address + + # Test that an application error causes a ServerDescriptionChangedEvent + # to be published. + data = {'failCommands': ['insert']} + data.update(fail_command_opts) + fail_insert = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 1}, + 'data': data, + } + with self.fail_point(fail_insert): + if self.test_client.retry_writes: + self.coll.insert_one({}) + else: + with self.assertRaises(expected_error): + self.coll.insert_one({}) + self.coll.insert_one({}) + + def marked_unknown(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.new_description.is_server_type_known) + + def discovered_node(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known) + + def marked_unknown_and_rediscovered(): + return (len(self.listener.matching(marked_unknown)) >= 1 and + len(self.listener.matching(discovered_node)) >= 1) + + # Topology events are published asynchronously + wait_until(marked_unknown_and_rediscovered, 'rediscover node') + + # Expect a single ServerDescriptionChangedEvent for the network error. + marked_unknown_events = self.listener.matching(marked_unknown) + self.assertEqual(len(marked_unknown_events), 1) + self.assertIsInstance( + marked_unknown_events[0].new_description.error, expected_error) + + def test_network_error_publishes_events(self): + self._test_app_error({'closeConnection': True}, ConnectionFailure) + + def test_not_master_error_publishes_events(self): + self._test_app_error({'errorCode': 10107, 'closeConnection': False}, + NotMasterError) + + def test_shutdown_error_publishes_events(self): + self._test_app_error({'errorCode': 91, 'closeConnection': False}, + NotMasterError) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index 31c7b0ce1c..017951b575 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -423,7 +423,7 @@ def test_reset_server(self): 'setName': 'rs', 'hosts': ['a', 'b']}) - t.reset_server(('a', 27017)) + t.reset_server(('a', 27017), None) self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) self.assertEqual('rs', t.description.replica_set_name) @@ -440,7 +440,7 @@ def test_reset_server(self): self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) - t.reset_server(('b', 27017)) + t.reset_server(('b', 27017), None) self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) self.assertEqual('rs', t.description.replica_set_name) @@ -451,7 +451,7 @@ def test_reset_removed_server(self): t = create_mock_topology(replica_set_name='rs') # No error resetting a server not in the TopologyDescription. - t.reset_server(('b', 27017)) + t.reset_server(('b', 27017), None) # Server was *not* added as type Unknown. self.assertFalse(t.has_server(('b', 27017))) diff --git a/test/utils.py b/test/utils.py index 95c0885851..77e08fc4ed 100644 --- a/test/utils.py +++ b/test/utils.py @@ -167,6 +167,14 @@ def description_changed(self, event): def closed(self, event): self.results.append(event) + def matching(self, matcher): + """Return the matching events.""" + results = self.results[:] + return [event for event in results if matcher(event)] + + def reset(self): + self.results = [] + class HeartbeatEventListener(monitoring.ServerHeartbeatListener): """Listens to only server heartbeat events.""" From 0b24e1e0b79a8cf6426c02df7d4dca67825bff89 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 13 Feb 2020 15:52:07 -0800 Subject: [PATCH 0060/2111] PYTHON-2126 Refactor monitor to create PeriodicExecutor in one place --- pymongo/monitor.py | 81 ++++++++++++++----------------- test/test_sdam_monitoring_spec.py | 36 ++------------ 2 files changed, 40 insertions(+), 77 deletions(-) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 23af967ffe..9aaa95c6fb 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -26,9 +26,32 @@ class MonitorBase(object): - def __init__(self, *args, **kwargs): - """Override this method to create an executor.""" - raise NotImplementedError + def __init__(self, topology, name, interval, min_interval): + """Base class to do periodic work on a background thread. + + The the background thread is signaled to stop when the Topology or + this instance is freed. + """ + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + def target(): + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + monitor._run() + return True + + executor = periodic_executor.PeriodicExecutor( + interval=interval, + min_interval=min_interval, + target=target, + name=name) + + self._executor = executor + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, executor.close) def open(self): """Start monitoring, or restart after a fork. @@ -68,6 +91,11 @@ def __init__( The Topology is weakly referenced. The Pool must be exclusive to this Monitor. """ + super(Monitor, self).__init__( + topology, + "pymongo_server_monitor_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL) self._server_description = server_description self._pool = pool self._settings = topology_settings @@ -76,27 +104,6 @@ def __init__( pub = self._listeners is not None self._publish = pub and self._listeners.enabled_for_server_heartbeat - # We strongly reference the executor and it weakly references us via - # this closure. When the monitor is freed, stop the executor soon. - def target(): - monitor = self_ref() - if monitor is None: - return False # Stop the executor. - Monitor._run(monitor) - return True - - executor = periodic_executor.PeriodicExecutor( - interval=self._settings.heartbeat_frequency, - min_interval=common.MIN_HEARTBEAT_INTERVAL, - target=target, - name="pymongo_server_monitor_thread") - - self._executor = executor - - # Avoid cycles. When self or topology is freed, stop executor soon. - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) - def close(self): super(Monitor, self).close() @@ -203,31 +210,15 @@ def __init__(self, topology, topology_settings): The Topology is weakly referenced. """ + super(SrvMonitor, self).__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency) self._settings = topology_settings self._seedlist = self._settings._seeds self._fqdn = self._settings.fqdn - # We strongly reference the executor and it weakly references us via - # this closure. When the monitor is freed, stop the executor soon. - def target(): - monitor = self_ref() - if monitor is None: - return False # Stop the executor. - SrvMonitor._run(monitor) - return True - - executor = periodic_executor.PeriodicExecutor( - interval=common.MIN_SRV_RESCAN_INTERVAL, - min_interval=self._settings.heartbeat_frequency, - target=target, - name="pymongo_srv_polling_thread") - - self._executor = executor - - # Avoid cycles. When self or topology is freed, stop executor soon. - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) - def _run(self): seedlist = self._get_seedlist() if seedlist: diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index a3a8435706..5741110973 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -181,39 +181,11 @@ def create_test(scenario_def): def run_scenario(self): responses = (r for r in scenario_def['phases'][0]['responses']) - with client_knobs(events_queue_frequency=0.1): + with client_knobs(events_queue_frequency=0.1, + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1): class MockMonitor(Monitor): - def __init__(self, server_description, topology, pool, - topology_settings): - """Have to copy entire constructor from Monitor so that we - can override _run and change the periodic executor's - interval.""" - - self._server_description = server_description - self._pool = pool - self._settings = topology_settings - self._avg_round_trip_time = MovingAverage() - options = self._settings._pool_options - self._listeners = options.event_listeners - self._publish = self._listeners is not None - - def target(): - monitor = self_ref() - if monitor is None: - return False - MockMonitor._run(monitor) # Change target to subclass - return True - - # Shorten interval - executor = periodic_executor.PeriodicExecutor( - interval=0.1, - min_interval=0.1, - target=target, - name="pymongo_server_monitor_thread") - self._executor = executor - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) - + """Override the _run method""" def _run(self): try: if self._server_description.address != ('a', 27017): From 90cb16059533a3fb81f504cf50b59de2bf43e17b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 18 Feb 2020 16:16:11 -0800 Subject: [PATCH 0061/2111] PYTHON-2129 Use error code to check for NamespaceNotFound errors --- pymongo/collection.py | 15 ++++----------- pymongo/database.py | 2 +- pymongo/helpers.py | 4 +++- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 8bedb02243..894d1a48a8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -54,7 +54,6 @@ UpdateResult) from pymongo.write_concern import WriteConcern -_NO_OBJ_ERROR = "No matching object found" _UJOIN = u"%s.%s" _FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} @@ -1582,6 +1581,8 @@ def parallel_scan(self, num_cursors, session=None, **kwargs): def _count(self, cmd, collation=None, session=None): """Internal count helper.""" + # XXX: "ns missing" checks can be removed when we drop support for + # MongoDB 3.0, see SERVER-17051. def _cmd(session, server, sock_info, slave_ok): res = self._command( sock_info, @@ -2111,7 +2112,7 @@ def drop_index(self, index_or_name, session=None, **kwargs): self._command(sock_info, cmd, read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found"], + allowable_errors=["ns not found", 26], write_concern=self._write_concern_for(session), session=session) @@ -2873,7 +2874,6 @@ def _find_and_modify(session, sock_info, retryable_write): out = self._command(sock_info, cmd, read_preference=ReadPreference.PRIMARY, write_concern=write_concern, - allowable_errors=[_NO_OBJ_ERROR], collation=collation, session=session, retryable_write=retryable_write, user_fields=_FIND_AND_MODIFY_DOC_FIELDS) @@ -3295,7 +3295,7 @@ def _find_and_modify(session, sock_info, retryable_write): cmd['writeConcern'] = write_concern.document result = self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, - allowable_errors=[_NO_OBJ_ERROR], collation=collation, + collation=collation, session=session, retryable_write=retryable_write, user_fields=_FIND_AND_MODIFY_DOC_FIELDS) @@ -3305,13 +3305,6 @@ def _find_and_modify(session, sock_info, retryable_write): out = self.__database.client._retryable_write( write_concern.acknowledged, _find_and_modify, None) - if not out['ok']: - if out["errmsg"] == _NO_OBJ_ERROR: - return None - else: - # Should never get here b/c of allowable_errors - raise ValueError("Unexpected Error: %s" % (out,)) - if full_response: return out else: diff --git a/pymongo/database.py b/pymongo/database.py index 50a5d1fa36..2af099c289 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -915,7 +915,7 @@ def drop_collection(self, name_or_collection, session=None): with self.__client._socket_for_writes(session) as sock_info: return self._command( sock_info, 'drop', value=_unicode(name), - allowable_errors=['ns not found'], + allowable_errors=['ns not found', 26], write_concern=self._write_concern_for(session), parse_write_concern_error=True, session=session) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 8bfc62c6be..309470a284 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -128,7 +128,9 @@ def _check_command_response(response, msg=None, allowable_errors=None, break errmsg = details["errmsg"] - if allowable_errors is None or errmsg not in allowable_errors: + if (allowable_errors is None + or (errmsg not in allowable_errors + and details.get("code") not in allowable_errors)): code = details.get("code") # Server is "not master" or "recovering" From dc692e4d8206647015221238f3f22dd9a8f81834 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 18 Feb 2020 16:43:50 -0800 Subject: [PATCH 0062/2111] PYTHON-2130 Skip failing $where test on 4.4 --- test/test_cursor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_cursor.py b/test/test_cursor.py index ca19ebb28b..6c5f34afad 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -817,6 +817,7 @@ def test_count_with_hint(self): self.assertEqual(2, collection.find().hint("x_1").count()) self.assertEqual(2, collection.find().hint([("x", 1)]).count()) + @client_context.require_version_max(4, 3, 2) # PYTHON-2130 @ignore_deprecations def test_where(self): db = self.db From a06a0e7aa66b4226b6c5ce97b39dd10d7f7f7121 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 6 Feb 2020 18:24:15 -0800 Subject: [PATCH 0063/2111] PYTHON-2093 OCSP Support --- .evergreen/config.yml | 149 +++++++++++++++++ .evergreen/run-ocsp-tests.sh | 21 +++ pymongo/ocsp_support.py | 306 +++++++++++++++++++++++++++++++++++ pymongo/pyopenssl_context.py | 10 ++ setup.py | 7 +- test/ocsp/test_ocsp.py | 70 ++++++++ tools/ocsptest.py | 54 +++++++ 7 files changed, 614 insertions(+), 3 deletions(-) create mode 100644 .evergreen/run-ocsp-tests.sh create mode 100644 pymongo/ocsp_support.py create mode 100644 test/ocsp/test_ocsp.py create mode 100644 tools/ocsptest.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0d79109080..757a574803 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -289,6 +289,7 @@ functions: SSL=${SSL} \ STORAGE_ENGINE=${STORAGE_ENGINE} \ DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ + ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update @@ -488,6 +489,55 @@ functions: # Don't use ${file} syntax here because evergreen treats it as an empty expansion. [ -f "$file" ] && sh $file || echo "$file not available, skipping" + "run-ocsp-test": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/rsa/ca.pem" \ + OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ + sh ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-tests.sh + + "run-valid-ocsp-server": + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + /opt/mongodbtoolchain/v3/bin/python3 -m venv ./venv + ./venv/bin/pip3 install asn1crypto oscrypto bottle + - command: shell.exec + params: + background: true + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + nohup ./venv/bin/python3 ocsp_mock.py \ + --ca_file rsa/ca.pem \ + --ocsp_responder_cert rsa/ca.crt \ + --ocsp_responder_key rsa/ca.key \ + -p 8100 -v + + "run-revoked-ocsp-server": + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + /opt/mongodbtoolchain/v3/bin/python3 -m venv ./venv + ./venv/bin/pip3 install asn1crypto oscrypto bottle + - command: shell.exec + params: + background: true + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + nohup ./venv/bin/python3 ocsp_mock.py \ + --ca_file rsa/ca.pem \ + --ocsp_responder_cert rsa/ca.crt \ + --ocsp_responder_key rsa/ca.key \ + -p 8100 \ + -v \ + --fault revoked + pre: - func: "fetch source" - func: "prepare resources" @@ -829,6 +879,95 @@ tasks: commands: - func: "run atlas tests" + - name: "test-ocsp-valid-cert-server-staples" + tags: ["ocsp"] + commands: + - func: "run-valid-ocsp-server" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-invalid-cert-server-staples" + tags: ["ocsp"] + commands: + - func: "run-revoked-ocsp-server" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-valid-cert-server-does-not-staple" + tags: ["ocsp"] + commands: + - func: "run-valid-ocsp-server" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-invalid-cert-server-does-not-staple" + tags: ["ocsp"] + commands: + - func: "run-revoked-ocsp-server" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-soft-fail" + tags: ["ocsp"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-malicious-invalid-cert-mustStaple-server-does-not-staple" + tags: ["ocsp"] + commands: + - func: "run-revoked-ocsp-server" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-malicious-no-responder-mustStaple-server-does-not-staple" + tags: ["ocsp"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_TLS_SHOULD_SUCCEED: "0" + # }}} - name: "coverage-report" tags: ["coverage"] @@ -1771,6 +1910,16 @@ buildvariants: tasks: - name: "atlas-connect" +- matrix_name: "ocsp-test" + matrix_spec: + platform: ubuntu-16.04 + python-version: ["2.7", "3.4", "3.8", "pypy", "pypy3.5"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${python-version} ${platform}" + tasks: + - name: ".ocsp" + # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh new file mode 100644 index 0000000000..d3532352cf --- /dev/null +++ b/.evergreen/run-ocsp-tests.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -o xtrace +set -o errexit + +if [ -z "$PYTHON_BINARY" ]; then + echo "No python binary specified" + PYTHON_BINARY=$(command -v python || command -v python3) || true + if [ -z "$PYTHON_BINARY" ]; then + echo "Cannot test without python or python3 installed!" + exit 1 + fi +fi + +$PYTHON_BINARY -m virtualenv --never-download --no-wheel ocsptest + . ocsptest/bin/activate + trap "deactivate; rm -rf ocsptest" EXIT HUP + pip install pyopenssl requests service_identity + PYTHON=python + +OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED} CA_FILE=${CA_FILE} $PYTHON test/ocsp/test_ocsp.py diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py new file mode 100644 index 0000000000..d3ec194645 --- /dev/null +++ b/pymongo/ocsp_support.py @@ -0,0 +1,306 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for requesting and verifying OCSP responses.""" + +import logging as _logging + +from datetime import datetime as _datetime + +from cryptography.exceptions import InvalidSignature as _InvalidSignature +from cryptography.hazmat.backends import default_backend as _default_backend +from cryptography.hazmat.primitives.asymmetric.dsa import ( + DSAPublicKey as _DSAPublicKey) +from cryptography.hazmat.primitives.asymmetric.ec import ( + ECDSA as _ECDSA, + EllipticCurvePublicKey as _EllipticCurvePublicKey) +from cryptography.hazmat.primitives.asymmetric.padding import ( + PKCS1v15 as _PKCS1v15) +from cryptography.hazmat.primitives.asymmetric.rsa import ( + RSAPublicKey as _RSAPublicKey) +from cryptography.hazmat.primitives.hashes import ( + Hash as _Hash, + SHA1 as _SHA1) +from cryptography.hazmat.primitives.serialization import ( + Encoding as _Encoding, + PublicFormat as _PublicFormat) +from cryptography.x509 import ( + AuthorityInformationAccess as _AuthorityInformationAccess, + ExtendedKeyUsage as _ExtendedKeyUsage, + ExtensionNotFound as _ExtensionNotFound, + TLSFeature as _TLSFeature, + TLSFeatureType as _TLSFeatureType) +from cryptography.x509.oid import ( + AuthorityInformationAccessOID as _AuthorityInformationAccessOID, + ExtendedKeyUsageOID as _ExtendedKeyUsageOID) +from cryptography.x509.ocsp import ( + load_der_ocsp_response as _load_der_ocsp_response, + OCSPCertStatus as _OCSPCertStatus, + OCSPRequestBuilder as _OCSPRequestBuilder, + OCSPResponseStatus as _OCSPResponseStatus) +from requests import post as _post +from requests.exceptions import RequestException as _RequestException + +# Note: the functions in this module generally return 1 or 0. The reason +# is simple. The entry point, ocsp_callback, is registered as a callback +# with OpenSSL through PyOpenSSL. The callback must return 1 (success) or +# 0 (failure). + +_LOGGER = _logging.getLogger(__name__) + + +def _get_issuer_cert(cert, chain): + issuer_name = cert.issuer + for candidate in chain: + if candidate.subject == issuer_name: + return candidate + return None + + +def _verify_signature(key, signature, algorithm, data): + # See cryptography.x509.Certificate.public_key + # for the public key types. + try: + if isinstance(key, _RSAPublicKey): + key.verify(signature, data, _PKCS1v15(), algorithm) + elif isinstance(key, _DSAPublicKey): + key.verify(signature, data, algorithm) + elif isinstance(key, _EllipticCurvePublicKey): + key.verify(signature, data, _ECDSA(algorithm)) + else: + key.verify(signature, data) + except _InvalidSignature: + return 0 + return 1 + + +def _get_extension(cert, klass): + try: + return cert.extensions.get_extension_for_class(klass) + except _ExtensionNotFound: + return None + + +def _public_key_hash(cert): + public_key = cert.public_key() + # https://tools.ietf.org/html/rfc2560#section-4.2.1 + # "KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key + # (excluding the tag and length fields)" + # https://stackoverflow.com/a/46309453/600498 + if isinstance(public_key, _RSAPublicKey): + pbytes = public_key.public_bytes( + _Encoding.DER, _PublicFormat.PKCS1) + elif isinstance(public_key, _EllipticCurvePublicKey): + pbytes = public_key.public_bytes( + _Encoding.X962, _PublicFormat.UncompressedPoint) + else: + pbytes = public_key.public_bytes( + _Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) + digest = _Hash(_SHA1(), backend=_default_backend()) + digest.update(pbytes) + return digest.finalize() + + +def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): + return [ + cert for cert in certificates + if _public_key_hash(cert) == responder_key_hash and + cert.issuer == issuer.subject] + + +def _get_certs_by_name(certificates, issuer, responder_name): + return [ + cert for cert in certificates + if cert.subject == responder_name and + cert.issuer == issuer.subject] + + +def _verify_response_signature(issuer, response): + # Response object will have a responder_name or responder_key_hash + # not both. + name = response.responder_name + rkey_hash = response.responder_key_hash + ikey_hash = response.issuer_key_hash + if name is not None and name == issuer.subject or rkey_hash == ikey_hash: + _LOGGER.debug("Responder is issuer") + # Responder is the issuer + responder_cert = issuer + else: + _LOGGER.debug("Responder is a delegate") + # Responder is a delegate + # https://tools.ietf.org/html/rfc6960#section-2.6 + # RFC6960, Section 3.2, Number 3 + certs = response.certificates + if response.responder_name is not None: + responder_certs = _get_certs_by_name(certs, issuer, name) + _LOGGER.debug("Using responder name") + else: + responder_certs = _get_certs_by_key_hash(certs, issuer, rkey_hash) + _LOGGER.debug("Using key hash") + if not responder_certs: + _LOGGER.debug("No matching or valid responder certs.") + return 0 + # XXX: Can there be more than one? If so, should we try each one + # until we find one that passes signature verification? + responder_cert = responder_certs[0] + + # RFC6960, Section 3.2, Number 4 + ext = _get_extension(responder_cert, _ExtendedKeyUsage) + if not ext or _ExtendedKeyUsageOID.OCSP_SIGNING not in ext.value: + _LOGGER.debug("Delegate not authorized for OCSP signing") + return 0 + if not _verify_signature( + issuer.public_key(), + responder_cert.signature, + responder_cert.signature_hash_algorithm, + responder_cert.tbs_certificate_bytes): + _LOGGER.debug("Delegate signature verification failed") + return 0 + # RFC6960, Section 3.2, Number 2 + ret = _verify_signature( + responder_cert.public_key(), + response.signature, + response.signature_hash_algorithm, + response.tbs_response_bytes) + if not ret: + _LOGGER.debug("Response signature verification failed") + return ret + + +def _request_ocsp(cert, issuer, uri): + # https://cryptography.io/en/latest/x509/ocsp/#creating-requests + builder = _OCSPRequestBuilder() + # add_certificate returns a new instance + builder = builder.add_certificate(cert, issuer, _SHA1()) + ocsp_request = builder.build() + try: + response = _post( + uri, + data=ocsp_request.public_bytes(_Encoding.DER), + headers={'Content-Type': 'application/ocsp-request'}, + timeout=5) + except _RequestException: + _LOGGER.debug("HTTP request failed") + return None + if response.status_code != 200: + _LOGGER.debug("HTTP request returned %d", response.status_code) + return None + ocsp_response = _load_der_ocsp_response(response.content) + _LOGGER.debug( + "OCSP response status: %r", ocsp_response.response_status) + if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return None + # RFC6960, Section 3.2, Number 1. Only relevant if we need to + # talk to the responder directly. + # Accessing response.serial_number raises if response status is not + # SUCCESSFUL. + if ocsp_response.serial_number != ocsp_request.serial_number: + _LOGGER.debug("Response serial number does not match request") + return None + return ocsp_response + + +def _verify_response(issuer, response): + _LOGGER.debug("Verifying response") + # RFC6960, Section 3.2, Number 2, 3 and 4 happen here. + res = _verify_response_signature(issuer, response) + if not res: + return 0 + + # Note that we are not using a "tolerence period" as discussed in + # https://tools.ietf.org/rfc/rfc5019.txt? + now = _datetime.utcnow() + # RFC6960, Section 3.2, Number 5 + if response.this_update > now: + _LOGGER.debug("thisUpdate is in the future") + return 0 + # RFC6960, Section 3.2, Number 6 + if response.next_update and response.next_update < now: + _LOGGER.debug("nextUpdate is in the past") + return 0 + return 1 + + +def ocsp_callback(conn, ocsp_bytes, user_data): + """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" + cert = conn.get_peer_certificate().to_cryptography() + chain = [cer.to_cryptography() for cer in conn.get_peer_cert_chain()] + issuer = _get_issuer_cert(cert, chain) + if issuer is None: + _LOGGER.debug("No issuer cert?") + return 0 + must_staple = False + # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 + ext = _get_extension(cert, _TLSFeature) + if ext is not None: + for feature in ext.value: + if feature == _TLSFeatureType.status_request: + _LOGGER.debug("Peer presented a must-staple cert") + must_staple = True + break + # No stapled OCSP response + if ocsp_bytes == b'': + _LOGGER.debug("Peer did not staple an OCSP response") + if must_staple: + _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") + return 0 + # https://tools.ietf.org/html/rfc6960#section-3.1 + ext = _get_extension(cert, _AuthorityInformationAccess) + if ext is None: + _LOGGER.debug("No authority access information, soft fail") + # No stapled OCSP response, no responder URI, soft fail. + return 1 + uris = [desc.access_location.value + for desc in ext.value + if desc.access_method == _AuthorityInformationAccessOID.OCSP] + if not uris: + _LOGGER.debug("No OCSP URI, soft fail") + # No responder URI, soft fail. + return 1 + _LOGGER.debug("Requesting OCSP data") + # When requesting data from an OCSP endpoint we only fail on + # successful, valid responses with a certificate status of REVOKED. + for uri in uris: + _LOGGER.debug("Trying %s", uri) + response = _request_ocsp(cert, issuer, uri) + if response is None: + # The endpoint didn't respond in time, or the response was + # unsuccessful or didn't match the request. + continue + if not _verify_response(issuer, response): + # The response failed verification. + continue + _LOGGER.debug("OCSP cert status: %r", response.certificate_status) + if response.certificate_status == _OCSPCertStatus.GOOD: + return 1 + if response.certificate_status == _OCSPCertStatus.REVOKED: + return 0 + # Soft fail if we couldn't get a definitive status. + _LOGGER.debug("No definitive OCSP cert status, soft fail") + return 1 + + _LOGGER.debug("Peer stapled an OCSP response") + response = _load_der_ocsp_response(ocsp_bytes) + _LOGGER.debug( + "OCSP response status: %r", response.response_status) + # This happens in _request_ocsp when there is no stapled response so + # we know if we can compare serial numbers for the request and response. + if response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return 0 + if not _verify_response(issuer, response): + return 0 + _LOGGER.debug("OCSP cert status: %r", response.certificate_status) + if response.certificate_status == _OCSPCertStatus.REVOKED: + return 0 + return 1 diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 4cb2ec003f..aaf0e8562f 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -35,6 +35,7 @@ from bson.py3compat import _unicode from pymongo.errors import CertificateError as _CertificateError from pymongo.monotonic import time as _time +from pymongo.ocsp_support import ocsp_callback as _ocsp_callback from pymongo.socket_checker import ( _errno_from_exception, SocketChecker as _SocketChecker) @@ -143,6 +144,12 @@ def __init__(self, protocol): self._protocol = protocol self._ctx = _SSL.Context(self._protocol) self._check_hostname = True + # OCSP + # XXX: Find a better place to do this someday, since this is client + # side configuration and wrap_socket tries to support both client and + # server side sockets. + self._ctx.set_ocsp_client_callback( + callback=_ocsp_callback, data=None) @property def protocol(self): @@ -248,6 +255,9 @@ def wrap_socket(self, sock, server_side=False, # XXX: Do this in a callback registered with # SSLContext.set_info_callback? See Twisted for an example. ssl_conn.set_tlsext_host_name(server_hostname.encode('idna')) + if self.verify_mode != _stdlibssl.CERT_NONE: + # Request a stapled OCSP response. + ssl_conn.request_ocsp() ssl_conn.set_connect_state() # If this wasn't true the caller of wrap_socket would call # do_handshake() diff --git a/setup.py b/setup.py index 6a5520b835..881bf2c614 100755 --- a/setup.py +++ b/setup.py @@ -317,14 +317,15 @@ def build_extension(self, ext): sources=['pymongo/_cmessagemodule.c', 'bson/buffer.c'])] - -# PyOpenSSL 17.0.0 introduced support for OCSP. 17.2.0 fixes a bug +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug # in set_default_verify_paths we should really avoid. # service_identity 18.1.0 introduced support for IP addr matching. -pyopenssl_reqs = ["pyopenssl>=17.2.0", "service_identity>=18.1.0"] +pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] extras_require = { 'encryption': ['pymongocrypt<2.0.0'], + 'ocsp': pyopenssl_reqs, 'snappy': ['python-snappy'], 'tls': [], 'zstd': ['zstandard'], diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py new file mode 100644 index 0000000000..c65ad269cd --- /dev/null +++ b/test/ocsp/test_ocsp.py @@ -0,0 +1,70 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test OCSP.""" + +import logging +import os +import sys +import unittest + +sys.path[0:0] = [""] + +import pymongo + +from pymongo.errors import ServerSelectionTimeoutError + + +CA_FILE = os.environ.get("CA_FILE") +OCSP_TLS_SHOULD_SUCCEED = bool(int(os.environ.get('OCSP_TLS_SHOULD_SUCCEED', 0))) + +logging.basicConfig(level=logging.DEBUG) + +def _connect(options): + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=500" + "&tlsCAFile=%s&%s" % (CA_FILE, options)) + print(uri) + client = pymongo.MongoClient(uri) + client.admin.command('ismaster') + + +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +class TestOCSP(unittest.TestCase): + + def test_tls_insecure(self): + # Should always succeed + options = "tls=true&tlsInsecure=true" + _connect(options) + + def test_allow_invalid_certificates(self): + # Should always succeed + options = "tls=true&tlsAllowInvalidCertificates=true" + _connect(options) + + def test_tls(self): + options = "tls=true" + if not OCSP_TLS_SHOULD_SUCCEED: + self.assertRaisesRegex( + ServerSelectionTimeoutError, + "invalid status response", + _connect, options) + else: + _connect(options) + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/ocsptest.py b/tools/ocsptest.py new file mode 100644 index 0000000000..1504169e2d --- /dev/null +++ b/tools/ocsptest.py @@ -0,0 +1,54 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +import argparse +import logging +import socket + +from ssl import CERT_REQUIRED + +from pymongo.pyopenssl_context import SSLContext, PROTOCOL_SSLv23 + +logging.basicConfig(level=logging.DEBUG) + +def check_ocsp(host, port, capath): + ctx = SSLContext(PROTOCOL_SSLv23) + ctx.verify_mode = CERT_REQUIRED + if capath is not None: + ctx.load_verify_locations(capath) + else: + ctx.set_default_verify_paths() + + s = socket.socket() + s.connect((host, port)) + try: + s = ctx.wrap_socket(s, server_hostname=host) + finally: + s.close() + +def main(): + parser = argparse.ArgumentParser( + description='Debug OCSP') + parser.add_argument( + '--host', type=str, required=True, help="Host to connect to") + parser.add_argument( + '-p', '--port', type=int, default=443, help="Port to connect to") + parser.add_argument( + '--ca_file', type=str, default=None, help="CA file for host") + args = parser.parse_args() + check_ocsp(args.host, args.port, args.ca_file) + +if __name__ == '__main__': + main() + From 6ee80cecfafff8f3741c12d93e21d156560dd312 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 12 Feb 2020 13:26:59 -0800 Subject: [PATCH 0064/2111] PYTHON-2124 Assert no gc.garbage at test suite teardown and enable debug output --- setup.py | 6 ++--- test/__init__.py | 46 +++++++++++++++++++++++++++++++------- test/test_change_stream.py | 5 +++-- test/test_client.py | 2 ++ test/test_cmap.py | 2 ++ 5 files changed, 48 insertions(+), 13 deletions(-) diff --git a/setup.py b/setup.py index 881bf2c614..4c84915059 100755 --- a/setup.py +++ b/setup.py @@ -122,9 +122,9 @@ def run(self): suite = unittest.defaultTestLoader.loadTestsFromName( self.test_suite) if self.xunit_output: - from xmlrunner import XMLTestRunner - runner = XMLTestRunner(verbosity=2, failfast=self.failfast, - output=self.xunit_output) + from test import PymongoXMLTestRunner + runner = PymongoXMLTestRunner(verbosity=2, failfast=self.failfast, + output=self.xunit_output) else: runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) result = runner.run(suite) diff --git a/test/__init__.py b/test/__init__.py index 6f585f27eb..5943d968f1 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -15,6 +15,7 @@ """Test suite for pymongo, bson, and gridfs. """ +import gc import os import socket import sys @@ -23,6 +24,12 @@ import unittest import warnings +try: + from xmlrunner import XMLTestRunner + HAVE_XML = True +except ImportError: + HAVE_XML = False + try: import ipaddress HAVE_IPADDRESS = True @@ -53,6 +60,13 @@ except ImportError: pass +# Enable debug output for uncollectable objects. PyPy does not have set_debug. +if hasattr(gc, 'set_debug'): + gc.set_debug( + gc.DEBUG_UNCOLLECTABLE | + getattr(gc, 'DEBUG_OBJECTS', 0) | + getattr(gc, 'DEBUG_INSTANCES', 0)) + # The host and port of a single mongod or mongos, or the seed host # for a replica set. host = os.environ.get("DB_IP", 'localhost') @@ -779,22 +793,38 @@ def setup(): def teardown(): + garbage = [] + for g in gc.garbage: + garbage.append('GARBAGE: %r' % (g,)) + garbage.append(' gc.get_referents: %r' % (gc.get_referents(g),)) + garbage.append(' gc.get_referrers: %r' % (gc.get_referrers(g),)) + if garbage: + assert False, '\n'.join(garbage) c = client_context.client - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") + if c: + c.drop_database("pymongo-pooling-tests") + c.drop_database("pymongo_test") + c.drop_database("pymongo_test1") + c.drop_database("pymongo_test2") + c.drop_database("pymongo_test_mike") + c.drop_database("pymongo_test_bernie") + c.close() class PymongoTestRunner(unittest.TextTestRunner): def run(self, test): setup() result = super(PymongoTestRunner, self).run(test) - try: + teardown() + return result + + +if HAVE_XML: + class PymongoXMLTestRunner(XMLTestRunner): + def run(self, test): + setup() + result = super(PymongoXMLTestRunner, self).run(test) teardown() - finally: return result diff --git a/test/test_change_stream.py b/test/test_change_stream.py index a4421f265b..669c774a3c 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -575,12 +575,13 @@ def mock_try_next(*args, **kwargs): change_stream._cursor.close() raise OperationFailure('Mock server error', code=code) - original_try_next = change_stream._cursor._try_next + original_cursor = change_stream._cursor change_stream._cursor._try_next = mock_try_next try: yield finally: - change_stream._cursor._try_next = original_try_next + # Un patch the instance. + del original_cursor._try_next for code in TEST_ERROR_CODES: with self.change_stream() as change_stream: diff --git a/test/test_client.py b/test/test_client.py index 05e94b3d9b..c547c98cb5 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1514,6 +1514,8 @@ def stall_connect(*args, **kwargs): return original_connect(*args, **kwargs) pool.connect = stall_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, 'connect') # Wait for the background thread to start creating connections wait_until(lambda: len(pool.sockets) > 1, 'start creating connections') diff --git a/test/test_cmap.py b/test/test_cmap.py index 9a021f6b45..28de566338 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -376,6 +376,8 @@ def mock_connect(*args, **kwargs): sock_info.check_auth = functools.partial(mock_check_auth, sock_info) return sock_info pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, 'connect') # Attempt to create a new connection. with self.assertRaisesRegex(ConnectionFailure, 'auth failed'): From 8e5dbc9f0ab2426ee362f8b80fe60640b3349781 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 20 Feb 2020 19:47:20 -0800 Subject: [PATCH 0065/2111] PYTHON-2128 Fix test_ssl_pem_passphrase --- .evergreen/run-pyopenssl-tests.sh | 2 +- test/test_ssl.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.evergreen/run-pyopenssl-tests.sh b/.evergreen/run-pyopenssl-tests.sh index 8418947410..0ef5d2c485 100644 --- a/.evergreen/run-pyopenssl-tests.sh +++ b/.evergreen/run-pyopenssl-tests.sh @@ -21,7 +21,7 @@ fi $PYTHON -m virtualenv pyopenssltest trap "deactivate; rm -rf pyopenssltest" EXIT HUP . pyopenssltest/bin/activate -pip install pyopenssl>=17.2.0 service_identity>=18.1.0 +pip install pyopenssl>=17.2.0 "requests<3.0.0" service_identity>=18.1.0 pip list python -c 'import sys; print(sys.version)' python setup.py test diff --git a/test/test_ssl.py b/test/test_ssl.py index 352ef1f4b8..d0aa9cbf82 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -43,7 +43,10 @@ _HAVE_PYOPENSSL = False try: + # All of these must be available to use PyOpenSSL import OpenSSL + import requests + import service_identity _HAVE_PYOPENSSL = True except ImportError: pass @@ -190,8 +193,7 @@ def test_ssl_pem_passphrase(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - vi = sys.version_info - if vi[0] == 2 and vi < (2, 7, 9) and not _ssl.IS_PYOPENSSL: + if not hasattr(ssl, 'SSLContext') and not _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, From d46bd1671c3fbc246e3a174468eba66826aa1234 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 21 Feb 2020 09:32:45 -0800 Subject: [PATCH 0066/2111] PYTHON-2093 Documentation for OCSP --- README.rst | 12 +++++++++- doc/atlas.rst | 12 ++++++++-- doc/changelog.rst | 19 ++++++++++++++++ doc/examples/tls.rst | 52 +++++++++++++++++++++++++++++++++++++++----- doc/installation.rst | 12 +++++++++- 5 files changed, 98 insertions(+), 9 deletions(-) diff --git a/README.rst b/README.rst index a1f3664e44..7e4d16b19d 100644 --- a/README.rst +++ b/README.rst @@ -113,6 +113,16 @@ PyMongo:: $ python -m pip install pymongo[tls] +.. note:: Users of Python versions older than 2.7.9 will also + receive the dependencies for OCSP when using the tls extra. + +:ref:`OCSP` requires `PyOpenSSL +`_, `requests +`_ and `service_identity +`_:: + + $ python -m pip install pymongo[ocsp] + Wire protocol compression with snappy requires `python-snappy `_:: @@ -126,7 +136,7 @@ Wire protocol compression with zstandard requires `zstandard You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[snappy,gssapi,srv,tls,zstd] + $ python -m pip install pymongo[gssapi,ocsp,snappy,srv,tls,zstd] Other optional packages: diff --git a/doc/atlas.rst b/doc/atlas.rst index bb661e8592..59605b58e7 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -14,13 +14,21 @@ dependencies using the following pip command:: $ python -m pip install pymongo[tls] +Starting with PyMongo 3.11 this installs `PyOpenSSL +`_, `requests`_ +and `service_identity +`_ +for users of Python versions older than 2.7.9. PyOpenSSL supports SNI for these +old Python versions, allowing applictions to connect to Altas free and shared +tier instances. + Earlier versions of PyMongo require you to manually install the dependencies. For a list of TLS/SSL-related dependencies, see :doc:`examples/tls`. .. note:: Connecting to Atlas "Free Tier" or "Shared Cluster" instances requires Server Name Indication (SNI) support. SNI support requires CPython - 2.7.9 / PyPy 2.5.1 or newer. To check if your version of Python supports - SNI run the following command:: + 2.7.9 / PyPy 2.5.1 or newer or PyMongo 3.11+ with PyOpenSSL. + To check if your version of Python supports SNI run the following command:: $ python -c "import ssl; print(getattr(ssl, 'HAS_SNI', False))" diff --git a/doc/changelog.rst b/doc/changelog.rst index f8ed9f702f..8a742e0056 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,25 @@ Changelog ========= +Changes in Version 3.11.0 +------------------------- + +Version 3.11 adds support for MongoDB 4.4. Highlights include: + +- Support for :ref:`OCSP` (Online Certificate Status Protocol) +- Support for `PyOpenSSL `_ as an + alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` + support. It will also be installed when using the "tls" extra if the + version of Python in use is older than 2.7.9. + +Issues Resolved +............... + +See the `PyMongo 3.11.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.11.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=24799 + Changes in Version 3.10.1 ------------------------- diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 4454a1e4b7..133c4125de 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -16,6 +16,14 @@ command:: $ python -m pip install pymongo[tls] +Starting with PyMongo 3.11 this installs `PyOpenSSL +`_, `requests`_ +and `service_identity +`_ +for users of Python versions older than 2.7.9. PyOpenSSL supports SNI for these +old Python versions allowing applictions to connect to Altas free and shared +tier instances. + Earlier versions of PyMongo require you to manually install the dependencies listed below. @@ -103,8 +111,9 @@ Specifying a CA file .................... In some cases you may want to configure PyMongo to use a specific set of CA -certificates. This is most often the case when using "self-signed" server -certificates. The `ssl_ca_certs` option takes a path to a CA file. It can be +certificates. This is most often the case when you are acting as your own +certificate authority rather than using server certificates signed by a well +known authority. The `ssl_ca_certs` option takes a path to a CA file. It can be passed as a keyword argument:: >>> client = pymongo.MongoClient('example.com', @@ -132,6 +141,8 @@ Or, in the URI:: >>> uri = 'mongodb://example.com/?ssl=true&ssl_crlfile=/path/to/crl.pem' >>> client = pymongo.MongoClient(uri) +.. note:: Certificate revocation lists and :ref:`OCSP` cannot be used together. + Client certificates ................... @@ -162,12 +173,37 @@ to decrypt encrypted private keys. Use the `ssl_pem_passphrase` option:: These options can also be passed as part of the MongoDB URI. +.. _OCSP: + +OCSP +.... + +Starting with PyMongo 3.11, if PyMongo was installed with the "ocsp" extra:: + + python -m pip install pymongo[ocsp] + +certificate revocation checking is enabled by way of `OCSP (Online Certification +Status Protocol) `_. +MongoDB 4.4+ `staples OCSP responses `_ +to the TLS handshake which PyMongo will verify, failing the TLS handshake if +the stapled OCSP response is invalid or indicates that the peer certificate is +revoked. + +When connecting to a server version older than 4.4, or when a 4.4+ version of +MongoDB does not staple an OCSP response, PyMongo will attempt to connect +directly to an OCSP endpoint if the peer certificate specified one. The TLS +handshake will only fail in this case if the response indicates that the +certificate is revoked. Invalid or malformed responses will be ignored, +favoring availability over maximum security. + + Troubleshooting TLS Errors .......................... -TLS errors often fall into two categories, certificate verification failure or -protocol version mismatch. An error message similar to the following means that -OpenSSL was not able to verify the server's certificate:: +TLS errors often fall into three categories - certificate verification failure, +protocol version mismatch or certificate revocation checking failure. An error +message similar to the following means that OpenSSL was not able to verify the +server's certificate:: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed @@ -200,3 +236,9 @@ TLS protocols be disabled in some MongoDB deployments. Some deployments may disable TLS 1.0, others may disable TLS 1.0 and TLS 1.1. See the warning earlier in this document for troubleshooting steps and solutions. +An error message similar to the following message means that certificate +revocation checking failed:: + + [('SSL routines', 'tls_process_initial_server_flight', 'invalid status response')] + +See :ref:`OCSP` for more details. diff --git a/doc/installation.rst b/doc/installation.rst index ca702b6fb7..4875d72aad 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -70,6 +70,16 @@ PyMongo:: $ python -m pip install pymongo[tls] +.. note:: Users of Python versions older than 2.7.9 will also + receive the dependencies for OCSP when using the tls extra. + +:ref:`OCSP` requires `PyOpenSSL +`_, `requests +`_ and `service_identity +`_:: + + $ python -m pip install pymongo[ocsp] + Wire protocol compression with snappy requires `python-snappy `_:: @@ -83,7 +93,7 @@ Wire protocol compression with zstandard requires `zstandard You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[snappy,gssapi,srv,tls,zstd] + $ python -m pip install pymongo[gssapi,ocsp,snappy,srv,tls,zstd] Other optional packages: From e989be53c1e51e9200ac9d6cff3f3e71163213c8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 18 Feb 2020 18:50:12 -0800 Subject: [PATCH 0067/2111] PYTHON-2088 Define how multiple phases should be handled in SDAM spec runner --- .../rs/compatible_unknown.json | 39 +++ .../rs/equal_electionids.json | 6 +- .../rs/incompatible_arbiter.json | 54 ++++ .../rs/incompatible_ghost.json | 49 +++ .../rs/incompatible_other.json | 54 ++++ .../rs/new_primary_new_electionid.json | 18 +- .../rs/new_primary_new_setversion.json | 18 +- .../rs/null_election_id.json | 21 +- .../rs/primary_becomes_ghost.json | 59 ++++ .../rs/primary_becomes_mongos.json | 54 ++++ .../rs/primary_disconnect_electionid.json | 30 +- .../rs/primary_disconnect_setversion.json | 30 +- .../rs/primary_mismatched_me_not_removed.json | 77 +++++ .../discovery_and_monitoring/rs/repeated.json | 140 +++++++++ .../rs/secondary_ignore_ok_0.json | 81 +++++ .../rs/setversion_without_electionid.json | 6 +- .../rs/use_setversion_without_electionid.json | 18 +- .../replica_set_with_no_primary.json | 11 +- .../replica_set_with_primary.json | 6 +- .../replica_set_with_removal.json | 57 ++-- .../sdam_monitoring/required_replica_set.json | 292 +++++++++--------- test/sdam_monitoring/standalone.json | 200 ++++++------ ...ne_suppress_equal_description_changes.json | 113 +++++++ test/test_discovery_and_monitoring.py | 13 +- test/test_sdam_monitoring_spec.py | 107 +++---- test/utils.py | 13 +- 26 files changed, 1196 insertions(+), 370 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/compatible_unknown.json create mode 100644 test/discovery_and_monitoring/rs/incompatible_arbiter.json create mode 100644 test/discovery_and_monitoring/rs/incompatible_ghost.json create mode 100644 test/discovery_and_monitoring/rs/incompatible_other.json create mode 100644 test/discovery_and_monitoring/rs/primary_becomes_ghost.json create mode 100644 test/discovery_and_monitoring/rs/primary_becomes_mongos.json create mode 100644 test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json create mode 100644 test/discovery_and_monitoring/rs/repeated.json create mode 100644 test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json create mode 100644 test/sdam_monitoring/standalone_suppress_equal_description_changes.json diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json new file mode 100644 index 0000000000..1105da8764 --- /dev/null +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -0,0 +1,39 @@ +{ + "description": "Replica set member and an unknown server", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json index 8a5aa8cd67..f8d20b350d 100644 --- a/test/discovery_and_monitoring/rs/equal_electionids.json +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -60,7 +60,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } } ] diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json new file mode 100644 index 0000000000..aa582208d6 --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -0,0 +1,54 @@ +{ + "description": "Incompatible arbiter", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "arbiterOnly": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSArbiter", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json new file mode 100644 index 0000000000..088159c3ab --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -0,0 +1,49 @@ +{ + "description": "Incompatible ghost", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json new file mode 100644 index 0000000000..b65d674b42 --- /dev/null +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -0,0 +1,54 @@ +{ + "description": "Incompatible other", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "hidden": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 1 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSOther", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": false + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index cd6c37cef7..67f314b1ed 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -41,7 +41,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -83,7 +87,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -125,7 +133,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index c5828171d4..c1ec50c845 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -41,7 +41,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -83,7 +87,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -125,7 +133,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } } ] diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index d4348df442..3de0a74e41 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -42,7 +42,8 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1 } }, { @@ -90,7 +91,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -133,7 +138,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -179,7 +188,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json new file mode 100644 index 0000000000..897120f1fb --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -0,0 +1,59 @@ +{ + "description": "Primary becomes ghost", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json new file mode 100644 index 0000000000..8d4967b7dd --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -0,0 +1,54 @@ +{ + "description": "Primary becomes mongos", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index e81f299086..59c8faf180 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -59,7 +59,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -84,7 +88,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -123,7 +131,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -165,7 +177,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000003" + } } }, { @@ -203,7 +219,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000003" + } } } ] diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index d0e55c545a..beb023e4f4 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -59,7 +59,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -84,7 +88,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -123,7 +131,11 @@ }, "topologyType": "ReplicaSetNoPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -165,7 +177,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } }, { @@ -203,7 +219,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000002" + } } } ] diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json new file mode 100644 index 0000000000..a9e01987c8 --- /dev/null +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -0,0 +1,77 @@ +{ + "description": "Primary mismatched me is not removed", + "uri": "mongodb://localhost:27017,localhost:27018/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "localhost:27017", + { + "ok": 1, + "hosts": [ + "localhost:27017", + "localhost:27018" + ], + "ismaster": true, + "setName": "rs", + "primary": "localhost:27017", + "me": "a:27017", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "localhost:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "localhost:27018": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "localhost:27018", + { + "ok": 1, + "hosts": [ + "localhost:27017", + "localhost:27018" + ], + "ismaster": false, + "secondary": true, + "setName": "rs", + "primary": "localhost:27017", + "me": "localhost:27018", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "localhost:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "localhost:27018": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json new file mode 100644 index 0000000000..392d485794 --- /dev/null +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -0,0 +1,140 @@ +{ + "description": "Repeated ismaster response must be processed", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown" + }, + "c:27017": { + "type": "Unknown" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json new file mode 100644 index 0000000000..6d3033eeee --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -0,0 +1,81 @@ +{ + "description": "New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index dbd9765d2f..0500c6d157 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -36,7 +36,8 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2 } }, { @@ -73,7 +74,8 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2 } } ] diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 19e1727bf3..16225d6b83 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -41,7 +41,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -77,7 +81,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } }, { @@ -116,7 +124,11 @@ }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, - "setName": "rs" + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } } } ] diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json index 2f398a85f0..33010d49fb 100644 --- a/test/sdam_monitoring/replica_set_with_no_primary.json +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -14,7 +14,8 @@ "setVersion": 1, "primary": "b:27017", "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "minWireVersion": 0, "maxWireVersion": 4 @@ -83,7 +84,8 @@ "address": "a:27017", "arbiters": [], "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "passives": [], "primary": "b:27017", @@ -122,7 +124,8 @@ "address": "a:27017", "arbiters": [], "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "passives": [], "primary": "b:27017", @@ -134,7 +137,7 @@ "arbiters": [], "hosts": [], "passives": [], - "type": "Unknown" + "type": "PossiblePrimary" } ] } diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json index 6c0d8819d0..04caeba652 100644 --- a/test/sdam_monitoring/replica_set_with_primary.json +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -13,7 +13,8 @@ "setVersion": 1, "primary": "a:27017", "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "minWireVersion": 0, "maxWireVersion": 4 @@ -82,7 +83,8 @@ "address": "a:27017", "arbiters": [], "hosts": [ - "a:27017", "b:27017" + "a:27017", + "b:27017" ], "passives": [], "primary": "a:27017", diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json index a14456cdba..3cad92d6b8 100644 --- a/test/sdam_monitoring/replica_set_with_removal.json +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -3,30 +3,7 @@ "uri": "mongodb://a,b/", "phases": [ { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "ismaster": true, - "setName": "rs", - "setVersion": 1, - "primary": "a:27017", - "hosts": [ - "a:27017" - ], - "minWireVersion": 0, - "maxWireVersion": 4 - } - ], - [ - "b:27017", - { - "ok": 1, - "ismaster": true - } - ] - ], + "responses": [], "outcome": { "events": [ { @@ -73,7 +50,37 @@ "topologyId": "42", "address": "b:27017" } - }, + } + ] + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 4 + } + ], + [ + "b:27017", + { + "ok": 1, + "ismaster": true + } + ] + ], + "outcome": { + "events": [ { "server_description_changed_event": { "topologyId": "42", diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json index 0afe0d1a42..0f64bde118 100644 --- a/test/sdam_monitoring/required_replica_set.json +++ b/test/sdam_monitoring/required_replica_set.json @@ -1,149 +1,151 @@ { - "description": "Monitoring a topology that is required to be a replica set", - "phases": [ - { - "outcome": { - "events": [ - { - "topology_opening_event": { - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - { - "address": "b:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "ReplicaSetNoPrimary" - }, - "previousDescription": { - "servers": [], - "topologyType": "Unknown" - }, - "topologyId": "42" - } - }, - { - "server_opening_event": { - "address": "a:27017", - "topologyId": "42" - } - }, - { - "server_opening_event": { - "address": "b:27017", - "topologyId": "42" - } - }, - { - "server_description_changed_event": { - "address": "a:27017", - "newDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [ - "a:27017", - "b:27017" - ], - "passives": [], - "primary": "a:27017", - "setName": "rs", - "type": "RSPrimary" - }, - "previousDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [ - "a:27017", - "b:27017" - ], - "passives": [], - "primary": "a:27017", - "setName": "rs", - "type": "RSPrimary" - }, - { - "address": "b:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "setName": "rs", - "topologyType": "ReplicaSetWithPrimary" - }, - "previousDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - { - "address": "b:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "ReplicaSetNoPrimary" - }, - "topologyId": "42" - } - } + "description": "Monitoring a topology that is required to be a replica set", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "setVersion": 1, + "primary": "a:27017", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 4 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - }, - "responses": [ - [ - "a:27017", - { - "hosts": [ - "a:27017", - "b:27017" - ], - "ismaster": true, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1, - "primary": "a:27017", - "setName": "rs", - "setVersion": 1.0 - } + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "b:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - ] - } - ], - "uri": "mongodb://a,b/?replicaSet=rs" + }, + "newDescription": { + "topologyType": "ReplicaSetWithPrimary", + "setName": "rs", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [ + "a:27017", + "b:27017" + ], + "passives": [], + "primary": "a:27017", + "setName": "rs", + "type": "RSPrimary" + }, + { + "address": "b:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + } + ] + } + } + ] } diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 1ca3c3c24d..5d40286c97 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -1,104 +1,104 @@ { - "description": "Monitoring a standalone connection", - "phases": [ - { - "outcome": { - "events": [ - { - "topology_opening_event": { - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "Single" - }, - "previousDescription": { - "servers": [], - "topologyType": "Unknown" - }, - "topologyId": "42" - } - }, - { - "server_opening_event": { - "address": "a:27017", - "topologyId": "42" - } - }, - { - "server_description_changed_event": { - "address": "a:27017", - "newDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Standalone" - }, - "previousDescription": { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - }, - "topologyId": "42" - } - }, - { - "topology_description_changed_event": { - "newDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Standalone" - } - ], - "topologyType": "Single" - }, - "previousDescription": { - "servers": [ - { - "address": "a:27017", - "arbiters": [], - "hosts": [], - "passives": [], - "type": "Unknown" - } - ], - "topologyType": "Single" - }, - "topologyId": "42" - } - } + "description": "Monitoring a standalone connection", + "uri": "mongodb://a:27017", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 4 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - }, - "responses": [ - [ - "a:27017", - { - "ismaster": true, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1 - } + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } ] - ] - } - ], - "uri": "mongodb://a:27017" + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] } diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json new file mode 100644 index 0000000000..a4b2d10da8 --- /dev/null +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -0,0 +1,113 @@ +{ + "description": "Monitoring a standalone connection - suppress update events for equal server descriptions", + "uri": "mongodb://a:27017", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 4 + } + ], + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 4 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 05bef0de29..cb17104cd4 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -30,7 +30,8 @@ from pymongo.settings import TopologySettings from pymongo.uri_parser import parse_uri from test import unittest -from test.utils import MockPool +from test.utils import (MockPool, + server_name_to_type) # Location of JSON test specifications. @@ -121,15 +122,7 @@ def check_outcome(self, topology, outcome): self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description - - if expected_server['type'] == 'PossiblePrimary': - # Special case, some tests in the spec include the PossiblePrimary - # type, but only single-threaded drivers need that type. We call - # possible primaries Unknown. - expected_server_type = SERVER_TYPE.Unknown - else: - expected_server_type = getattr( - SERVER_TYPE, expected_server['type']) + expected_server_type = server_name_to_type(expected_server['type']) self.assertEqual( server_type_name(expected_server_type), diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 5741110973..0d181bb75c 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -17,24 +17,23 @@ import json import os import sys -import weakref +import time sys.path[0:0] = [""] from bson.json_util import object_hook from pymongo import monitoring -from pymongo import periodic_executor +from pymongo.common import clean_node from pymongo.errors import (ConnectionFailure, NotMasterError) from pymongo.ismaster import IsMaster from pymongo.monitor import Monitor -from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription -from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TOPOLOGY_TYPE from test import unittest, client_context, client_knobs, IntegrationTest from test.utils import (ServerAndTopologyEventListener, single_client, + server_name_to_type, rs_or_single_client, wait_until) @@ -46,7 +45,7 @@ def compare_server_descriptions(expected, actual): if ((not expected['address'] == "%s:%s" % actual.address) or - (not SERVER_TYPE.__getattribute__(expected['type']) == + (not server_name_to_type(expected['type']) == actual.server_type)): return False expected_hosts = set( @@ -179,62 +178,56 @@ def setUp(cls): def create_test(scenario_def): def run_scenario(self): - responses = (r for r in scenario_def['phases'][0]['responses']) - - with client_knobs(events_queue_frequency=0.1, - heartbeat_frequency=0.1, - min_heartbeat_interval=0.1): - class MockMonitor(Monitor): - """Override the _run method""" - def _run(self): - try: - if self._server_description.address != ('a', 27017): - # Because PyMongo doesn't keep information about - # the order of addresses, we might accidentally - # start a MockMonitor on the wrong server first, - # so we need to only mock responses for the server - # the test's response is supposed to come from. - return - response = next(responses)[1] - isMaster = IsMaster(response) - self._server_description = ServerDescription( - address=self._server_description.address, - ismaster=isMaster) - self._topology.on_change(self._server_description) - except (ReferenceError, StopIteration): - # Topology was garbage-collected. - self.close() - - m = single_client(h=scenario_def['uri'], p=27017, - event_listeners=(self.all_listener,), - _monitor_class=MockMonitor) - - expected_results = scenario_def['phases'][0]['outcome']['events'] - - expected_len = len(expected_results) - wait_until(lambda: len(self.all_listener.results) >= expected_len, - "publish all events", timeout=15) + with client_knobs(events_queue_frequency=0.1): + _run_scenario(self) - try: - i = 0 - while i < expected_len: - result = self.all_listener.results[i] if len( - self.all_listener.results) > i else None - # The order of ServerOpening/ClosedEvents doesn't matter - if (isinstance(result, - monitoring.ServerOpeningEvent) or - isinstance(result, - monitoring.ServerClosedEvent)): - i, passed, message = compare_multiple_events( - i, expected_results, self.all_listener.results) - self.assertTrue(passed, message) - else: - self.assertTrue( - *compare_events(expected_results[i], result)) - i += 1 + def _run_scenario(self): + class NoopMonitor(Monitor): + """Override the _run method to do nothing.""" + def _run(self): + time.sleep(0.05) + + m = single_client(h=scenario_def['uri'], p=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor) + topology = m._get_topology() + try: + for phase in scenario_def['phases']: + for (source, response) in phase['responses']: + source_address = clean_node(source) + topology.on_change(ServerDescription( + address=source_address, + ismaster=IsMaster(response), + round_trip_time=0)) + + expected_results = phase['outcome']['events'] + expected_len = len(expected_results) + wait_until( + lambda: len(self.all_listener.results) >= expected_len, + "publish all events", timeout=15) + + i = 0 + while i < expected_len: + result = self.all_listener.results[i] if len( + self.all_listener.results) > i else None + # The order of ServerOpening/ClosedEvents doesn't matter + if (isinstance(result, + monitoring.ServerOpeningEvent) or + isinstance(result, + monitoring.ServerClosedEvent)): + i, passed, message = compare_multiple_events( + i, expected_results, self.all_listener.results) + self.assertTrue(passed, message) + else: + self.assertTrue( + *compare_events(expected_results[i], result)) + i += 1 + + self.all_listener.reset() finally: m.close() + return run_scenario diff --git a/test/utils.py b/test/utils.py index 77e08fc4ed..c1f7b9a570 100644 --- a/test/utils.py +++ b/test/utils.py @@ -40,6 +40,7 @@ from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, writable_server_selector) +from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern from test import (client_context, @@ -868,4 +869,14 @@ def parse_read_preference(pref): max_staleness = pref.get('maxStalenessSeconds', -1) tag_sets = pref.get('tag_sets') return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness) \ No newline at end of file + mode, tag_sets=tag_sets, max_staleness=max_staleness) + + +def server_name_to_type(name): + """Convert a ServerType name to the corresponding value. For SDAM tests.""" + # Special case, some tests in the spec include the PossiblePrimary + # type, but only single-threaded drivers need that type. We call + # possible primaries Unknown. + if name == 'PossiblePrimary': + return SERVER_TYPE.Unknown + return getattr(SERVER_TYPE, name) From d7128c130c1fc20ea8178bc8adb62949e61af4fd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 18 Feb 2020 23:39:57 -0800 Subject: [PATCH 0068/2111] PYTHON-2024 Skip publishing SDAM events for "equivalent" ServerDescriptions --- pymongo/server_description.py | 21 +++++++++++++++++++++ pymongo/topology.py | 10 ++++++---- test/test_sdam_monitoring_spec.py | 14 ++++++++++---- 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 04e9dbfe77..4f4dcdae8f 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -207,5 +207,26 @@ def retryable_reads_supported(self): """Checks if this server supports retryable writes.""" return self._max_wire_version >= 6 + def __eq__(self, other): + if isinstance(other, ServerDescription): + return ((self._address == other.address) and + (self._server_type == other.server_type) and + (self._min_wire_version == other.min_wire_version) and + (self._max_wire_version == other.max_wire_version) and + (self._me == other.me) and + (self._all_hosts == other.all_hosts) and + (self._tags == other.tags) and + (self._replica_set_name == other.replica_set_name) and + (self._set_version == other.set_version) and + (self._election_id == other.election_id) and + (self._primary == other.primary) and + (self._ls_timeout_minutes == + other.logical_session_timeout_minutes)) + + return NotImplemented + + def __ne__(self, other): + return not self == other + # For unittesting only. Use under no circumstances! _host_to_round_trip_time = {} diff --git a/pymongo/topology.py b/pymongo/topology.py index 7cad22cd99..7f65a36dfe 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -264,9 +264,11 @@ def _process_change(self, server_description): Hold the lock when calling this. """ td_old = self._description - if self._publish_server: - old_server_description = td_old._server_descriptions[ - server_description.address] + old_server_description = td_old._server_descriptions[ + server_description.address] + suppress_event = ((self._publish_server or self._publish_tp) + and old_server_description == server_description) + if self._publish_server and not suppress_event: self._events.put(( self._listeners.publish_server_description_changed, (old_server_description, server_description, @@ -278,7 +280,7 @@ def _process_change(self, server_description): self._update_servers() self._receive_cluster_time_no_lock(server_description.cluster_time) - if self._publish_tp: + if self._publish_tp and not suppress_event: self._events.put(( self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 0d181bb75c..616cce35b4 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -207,15 +207,16 @@ def _run(self): lambda: len(self.all_listener.results) >= expected_len, "publish all events", timeout=15) + # Wait some time to catch possible lagging extra events. + time.sleep(0.5) + i = 0 while i < expected_len: result = self.all_listener.results[i] if len( self.all_listener.results) > i else None # The order of ServerOpening/ClosedEvents doesn't matter - if (isinstance(result, - monitoring.ServerOpeningEvent) or - isinstance(result, - monitoring.ServerClosedEvent)): + if isinstance(result, (monitoring.ServerOpeningEvent, + monitoring.ServerClosedEvent)): i, passed, message = compare_multiple_events( i, expected_results, self.all_listener.results) self.assertTrue(passed, message) @@ -224,6 +225,11 @@ def _run(self): *compare_events(expected_results[i], result)) i += 1 + # Assert no extra events. + extra_events = self.all_listener.results[expected_len:] + if extra_events: + self.fail('Extra events %r' % (extra_events,)) + self.all_listener.reset() finally: m.close() From a460725f6b91ba14a1353b78e827574b234a3083 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 24 Feb 2020 17:13:12 -0800 Subject: [PATCH 0069/2111] PYTHON-2019 Add support for validate command "background" option --- doc/changelog.rst | 5 +++++ pymongo/database.py | 21 ++++++++++++++++++--- test/test_database.py | 10 ++++++++++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8a742e0056..e9d57304bb 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -11,6 +11,11 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` support. It will also be installed when using the "tls" extra if the version of Python in use is older than 2.7.9. +- Added the ``background`` parameter to + :meth:`pymongo.database.Database.validate_collection`. For a description + of this parameter see the MongoDB documentation for the `validate command`_. + +.. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ Issues Resolved ............... diff --git a/pymongo/database.py b/pymongo/database.py index 2af099c289..d363d3b90a 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -921,12 +921,15 @@ def drop_collection(self, name_or_collection, session=None): session=session) def validate_collection(self, name_or_collection, - scandata=False, full=False, session=None): + scandata=False, full=False, session=None, + background=None): """Validate a collection. Returns a dict of validation info. Raises CollectionInvalid if validation fails. + See also the MongoDB documentation on the `validate command`_. + :Parameters: - `name_or_collection`: A Collection object or the name of a collection to validate. @@ -938,9 +941,16 @@ def validate_collection(self, name_or_collection, documents. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `background` (optional): A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + + .. versionchanged:: 3.11 + Added ``background`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. + + .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ """ name = name_or_collection if isinstance(name, Collection): @@ -950,8 +960,13 @@ def validate_collection(self, name_or_collection, raise TypeError("name_or_collection must be an instance of " "%s or Collection" % (string_type.__name__,)) - result = self.command("validate", _unicode(name), - scandata=scandata, full=full, session=session) + cmd = SON([("validate", _unicode(name)), + ("scandata", scandata), + ("full", full)]) + if background is not None: + cmd["background"] = background + + result = self.command(cmd, session=session) valid = True # Pre 1.9 results diff --git a/test/test_database.py b/test/test_database.py index 76a549c95f..9ee889f073 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -367,6 +367,16 @@ def test_validate_collection(self): self.assertTrue(db.validate_collection(db.test, scandata=True)) self.assertTrue(db.validate_collection(db.test, scandata=True, full=True)) self.assertTrue(db.validate_collection(db.test, True, True)) + if client_context.version.at_least(4, 3, 3): + self.assertTrue(db.validate_collection(db.test, background=True)) + self.assertTrue(db.validate_collection(db.test, background=False)) + self.assertTrue( + db.validate_collection(db.test, scandata=True, background=True)) + # The server does not support background=True with full=True. + # Assert that we actually send the background option by checking + # that this combination fails. + with self.assertRaises(OperationFailure): + db.validate_collection(db.test, full=True, background=True) @client_context.require_no_mongos def test_profiling_levels(self): From 1323ef15cb5d2e2fe10b26feb31ea80df1050752 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Tue, 25 Feb 2020 16:49:26 -0800 Subject: [PATCH 0070/2111] PYTHON-2036 Expand CRUD API support for index hinting (also PYTHON-2015, PYTHON-2104, PYTHON-2134) --- pymongo/bulk.py | 24 +- pymongo/collection.py | 143 ++++++--- pymongo/operations.py | 89 ++++-- test/crud/v2/bulkWrite-update-hint.json | 366 +++++++++++++++++++++++ test/crud/v2/findOneAndReplace-hint.json | 128 ++++++++ test/crud/v2/findOneAndUpdate-hint.json | 136 +++++++++ test/crud/v2/replaceOne-hint.json | 146 +++++++++ test/crud/v2/updateMany-hint.json | 168 +++++++++++ test/crud/v2/updateOne-hint.json | 154 ++++++++++ test/utils_spec_runner.py | 22 +- 10 files changed, 1300 insertions(+), 76 deletions(-) create mode 100644 test/crud/v2/bulkWrite-update-hint.json create mode 100644 test/crud/v2/findOneAndReplace-hint.json create mode 100644 test/crud/v2/findOneAndUpdate-hint.json create mode 100644 test/crud/v2/replaceOne-hint.json create mode 100644 test/crud/v2/updateMany-hint.json create mode 100644 test/crud/v2/updateOne-hint.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index cd942a4884..41b2eedcd2 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -156,6 +156,7 @@ def __init__(self, collection, ordered, bypass_document_validation): self.bypass_doc_val = bypass_document_validation self.uses_collation = False self.uses_array_filters = False + self.uses_hint = False self.is_retryable = True self.retrying = False self.started_retryable_write = False @@ -180,7 +181,7 @@ def add_insert(self, document): self.ops.append((_INSERT, document)) def add_update(self, selector, update, multi=False, upsert=False, - collation=None, array_filters=None): + collation=None, array_filters=None, hint=None): """Create an update document and add it to the list of ops. """ validate_ok_for_update(update) @@ -193,13 +194,16 @@ def add_update(self, selector, update, multi=False, upsert=False, if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters + if hint is not None: + self.uses_hint = True + cmd['hint'] = hint if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd)) def add_replace(self, selector, replacement, upsert=False, - collation=None): + collation=None, hint=None): """Create a replace document and add it to the list of ops. """ validate_ok_for_replace(replacement) @@ -209,6 +213,9 @@ def add_replace(self, selector, replacement, upsert=False, if collation is not None: self.uses_collation = True cmd['collation'] = collation + if hint is not None: + self.uses_hint = True + cmd['hint'] = hint self.ops.append((_UPDATE, cmd)) def add_delete(self, selector, limit, collation=None): @@ -252,9 +259,13 @@ def gen_unordered(self): def _execute_command(self, generator, write_concern, session, sock_info, op_id, retryable, full_result): - if sock_info.max_wire_version < 5 and self.uses_collation: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use a collation.') + if sock_info.max_wire_version < 5: + if self.uses_collation: + raise ConfigurationError( + 'Must be connected to MongoDB 3.4+ to use a collation.') + if self.uses_hint: + raise ConfigurationError( + 'Must be connected to MongoDB 3.4+ to use hint.') if sock_info.max_wire_version < 6 and self.uses_array_filters: raise ConfigurationError( 'Must be connected to MongoDB 3.6+ to use arrayFilters.') @@ -428,6 +439,9 @@ def execute_no_results(self, sock_info, generator): if self.uses_array_filters: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') + if self.uses_hint: + raise ConfigurationError( + 'hint is unsupported for unacknowledged writes.') # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val and sock_info.max_wire_version >= 4: raise OperationFailure("Cannot set bypass_document_validation with" diff --git a/pymongo/collection.py b/pymongo/collection.py index 894d1a48a8..502c9533d7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -761,7 +761,7 @@ def _update(self, sock_info, criteria, document, upsert=False, check_keys=True, multi=False, manipulate=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, - session=None, retryable_write=False): + hint=None, session=None, retryable_write=False): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) if manipulate: @@ -791,6 +791,17 @@ def _update(self, sock_info, criteria, document, upsert=False, 'arrayFilters is unsupported for unacknowledged writes.') else: update_doc['arrayFilters'] = array_filters + if hint is not None: + if sock_info.max_wire_version < 5: + raise ConfigurationError( + 'Must be connected to MongoDB 3.4+ to use hint.') + elif not acknowledged: + raise ConfigurationError( + 'hint is unsupported for unacknowledged writes.') + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) + update_doc['hint'] = hint + command = SON([('update', self.name), ('ordered', ordered), ('updates', [update_doc])]) @@ -839,7 +850,7 @@ def _update_retryable( check_keys=True, multi=False, manipulate=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, - session=None): + hint=None, session=None): """Internal update / replace helper.""" def _update(session, sock_info, retryable_write): return self._update( @@ -847,7 +858,7 @@ def _update(session, sock_info, retryable_write): check_keys=check_keys, multi=multi, manipulate=manipulate, write_concern=write_concern, op_id=op_id, ordered=ordered, bypass_doc_val=bypass_doc_val, collation=collation, - array_filters=array_filters, session=session, + array_filters=array_filters, hint=hint, session=session, retryable_write=retryable_write) return self.__database.client._retryable_write( @@ -856,7 +867,7 @@ def _update(session, sock_info, retryable_write): def replace_one(self, filter, replacement, upsert=False, bypass_document_validation=False, collation=None, - session=None): + hint=None, session=None): """Replace a single document matching the filter. >>> for doc in db.test.find({}): @@ -893,27 +904,30 @@ def replace_one(self, filter, replacement, upsert=False, match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. + ``False``. This option is only supported on MongoDB 3.2 and above. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support + Added bypass_document_validation support. .. versionadded:: 3.0 """ @@ -926,12 +940,13 @@ def replace_one(self, filter, replacement, upsert=False, filter, replacement, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, session=session), + collation=collation, hint=hint, session=session), write_concern.acknowledged) def update_one(self, filter, update, upsert=False, bypass_document_validation=False, - collation=None, array_filters=None, session=None): + collation=None, array_filters=None, hint=None, + session=None): """Update a single document matching the filter. >>> for doc in db.test.find(): @@ -959,32 +974,35 @@ def update_one(self, filter, update, upsert=False, match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. + ``False``. This option is only supported on MongoDB 3.2 and above. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. This option is only + supported on MongoDB 3.6 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. - + Added the ability to accept a pipeline as the ``update``. .. versionchanged:: 3.6 - Added the `array_filters` and ``session`` parameters. - + Added the ``array_filters`` and ``session`` parameters. .. versionchanged:: 3.4 - Added the `collation` option. - + Added the ``collation`` option. .. versionchanged:: 3.2 - Added bypass_document_validation support + Added ``bypass_document_validation`` support. .. versionadded:: 3.0 """ @@ -999,12 +1017,12 @@ def update_one(self, filter, update, upsert=False, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, - session=session), + hint=hint, session=session), write_concern.acknowledged) def update_many(self, filter, update, upsert=False, array_filters=None, bypass_document_validation=False, collation=None, - session=None): + hint=None, session=None): """Update one or more documents that match the filter. >>> for doc in db.test.find(): @@ -1032,32 +1050,35 @@ def update_many(self, filter, update, upsert=False, array_filters=None, match the filter. - `bypass_document_validation` (optional): If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. + ``False``. This option is only supported on MongoDB 3.2 and above. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. This option is only + supported on MongoDB 3.6 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.9 Added the ability to accept a pipeline as the `update`. - .. versionchanged:: 3.6 Added ``array_filters`` and ``session`` parameters. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support + Added bypass_document_validation support. .. versionadded:: 3.0 """ @@ -1072,7 +1093,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, - session=session), + hint=hint, session=session), write_concern.acknowledged) def drop(self, session=None): @@ -2834,7 +2855,8 @@ def _write_concern_for_cmd(self, cmd, session): def __find_and_modify(self, filter, projection, sort, upsert=None, return_document=ReturnDocument.BEFORE, - array_filters=None, session=None, **kwargs): + array_filters=None, hint=None, session=None, + **kwargs): """Internal findAndModify helper.""" common.validate_is_mapping("filter", filter) @@ -2854,6 +2876,9 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, if upsert is not None: common.validate_boolean("upsert", upsert) cmd["upsert"] = upsert + if hint is not None: + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) write_concern = self._write_concern_for_cmd(cmd, session) @@ -2868,6 +2893,11 @@ def _find_and_modify(session, sock_info, retryable_write): 'arrayFilters is unsupported for unacknowledged ' 'writes.') cmd["arrayFilters"] = array_filters + if hint is not None: + if sock_info.max_wire_version < 8: + raise ConfigurationError( + 'Must be connected to MongoDB 4.2+ to use hint.') + cmd['hint'] = hint if (sock_info.max_wire_version >= 4 and not write_concern.is_server_default): cmd['writeConcern'] = write_concern.document @@ -2952,7 +2982,7 @@ def find_one_and_delete(self, filter, def find_one_and_replace(self, filter, replacement, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, - session=None, **kwargs): + hint=None, session=None, **kwargs): """Finds a single document and replaces it, returning either the original or the replaced document. @@ -2994,16 +3024,24 @@ def find_one_and_replace(self, filter, replacement, if no document matches. If :attr:`ReturnDocument.AFTER`, returns the replaced or inserted document. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 - Added the `collation` option. + Added the ``collation`` option. .. versionchanged:: 3.2 Respects write concern. @@ -3019,12 +3057,13 @@ def find_one_and_replace(self, filter, replacement, kwargs['update'] = replacement return self.__find_and_modify(filter, projection, sort, upsert, return_document, - session=session, **kwargs) + hint=hint, session=session, **kwargs) def find_one_and_update(self, filter, update, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, - array_filters=None, session=None, **kwargs): + array_filters=None, hint=None, session=None, + **kwargs): """Finds a single document and updates it, returning either the original or the updated document. @@ -3104,19 +3143,28 @@ def find_one_and_update(self, filter, update, :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. This option is only + supported on MongoDB 3.6 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. + Added the ability to accept a pipeline as the ``update``. .. versionchanged:: 3.6 - Added the `array_filters` and `session` options. + Added the ``array_filters`` and ``session`` options. .. versionchanged:: 3.4 - Added the `collation` option. + Added the ``collation`` option. .. versionchanged:: 3.2 Respects write concern. @@ -3133,7 +3181,8 @@ def find_one_and_update(self, filter, update, kwargs['update'] = update return self.__find_and_modify(filter, projection, sort, upsert, return_document, - array_filters, session=session, **kwargs) + array_filters, hint=hint, + session=session, **kwargs) def save(self, to_save, manipulate=True, check_keys=True, **kwargs): """Save a document in this collection. diff --git a/pymongo/operations.py b/pymongo/operations.py index 76974e75a0..987a2cdfcf 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -14,6 +14,9 @@ """Operation class definitions.""" +from bson.py3compat import string_type + +from pymongo import helpers from pymongo.common import validate_boolean, validate_is_mapping, validate_list from pymongo.collation import validate_collation_or_none from pymongo.helpers import _gen_index_name, _index_document, _index_list @@ -136,9 +139,10 @@ def __ne__(self, other): class ReplaceOne(object): """Represents a replace_one operation.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation") + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - def __init__(self, filter, replacement, upsert=False, collation=None): + def __init__(self, filter, replacement, upsert=False, collation=None, + hint=None): """Create a ReplaceOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -151,74 +155,95 @@ def __init__(self, filter, replacement, upsert=False, collation=None): - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. - + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.5 - Added the `collation` option. + Added the ``collation`` option. """ if filter is not None: validate_is_mapping("filter", filter) if upsert is not None: validate_boolean("upsert", upsert) + if hint is not None: + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) + self._filter = filter self._doc = replacement self._upsert = upsert self._collation = collation + self._hint = hint def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" bulkobj.add_replace(self._filter, self._doc, self._upsert, - collation=self._collation) + collation=self._collation, hint=self._hint) def __eq__(self, other): if type(other) == type(self): return ( - (other._filter, other._doc, other._upsert, other._collation) == - (self._filter, self._doc, self._upsert, self._collation)) + (other._filter, other._doc, other._upsert, other._collation, + other._hint) == (self._filter, self._doc, self._upsert, + self._collation, other._hint)) return NotImplemented def __ne__(self, other): return not self == other def __repr__(self): - return "%s(%r, %r, %r, %r)" % ( + return "%s(%r, %r, %r, %r, %r)" % ( self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation) + self._collation, self._hint) class _UpdateOp(object): """Private base class for update operations.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters") + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", + "_hint") - def __init__(self, filter, doc, upsert, collation, array_filters): + def __init__(self, filter, doc, upsert, collation, array_filters, hint): if filter is not None: validate_is_mapping("filter", filter) if upsert is not None: validate_boolean("upsert", upsert) if array_filters is not None: validate_list("array_filters", array_filters) + if hint is not None: + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) + + self._filter = filter self._doc = doc self._upsert = upsert self._collation = collation self._array_filters = array_filters + self._hint = hint def __eq__(self, other): if type(other) == type(self): return ( (other._filter, other._doc, other._upsert, other._collation, - other._array_filters) == + other._array_filters, other._hint) == (self._filter, self._doc, self._upsert, self._collation, - self._array_filters)) + self._array_filters, self._hint)) return NotImplemented def __ne__(self, other): return not self == other def __repr__(self): - return "%s(%r, %r, %r, %r, %r)" % ( + return "%s(%r, %r, %r, %r, %r, %r)" % ( self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation, self._array_filters) + self._collation, self._array_filters, self._hint) class UpdateOne(_UpdateOp): @@ -227,7 +252,7 @@ class UpdateOne(_UpdateOp): __slots__ = () def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None): + array_filters=None, hint=None): """Represents an update_one operation. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -242,7 +267,15 @@ def __init__(self, filter, update, upsert=False, collation=None, supported on MongoDB 3.4 and above. - `array_filters` (optional): A list of filters specifying which array elements an update should apply. Requires MongoDB 3.6+. - + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. .. versionchanged:: 3.9 Added the ability to accept a pipeline as the `update`. .. versionchanged:: 3.6 @@ -251,13 +284,14 @@ def __init__(self, filter, update, upsert=False, collation=None, Added the `collation` option. """ super(UpdateOne, self).__init__(filter, update, upsert, collation, - array_filters) + array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" bulkobj.add_update(self._filter, self._doc, False, self._upsert, collation=self._collation, - array_filters=self._array_filters) + array_filters=self._array_filters, + hint=self._hint) class UpdateMany(_UpdateOp): @@ -266,7 +300,7 @@ class UpdateMany(_UpdateOp): __slots__ = () def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None): + array_filters=None, hint=None): """Create an UpdateMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -281,7 +315,15 @@ def __init__(self, filter, update, upsert=False, collation=None, supported on MongoDB 3.4 and above. - `array_filters` (optional): A list of filters specifying which array elements an update should apply. Requires MongoDB 3.6+. - + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. .. versionchanged:: 3.9 Added the ability to accept a pipeline as the `update`. .. versionchanged:: 3.6 @@ -290,13 +332,14 @@ def __init__(self, filter, update, upsert=False, collation=None, Added the `collation` option. """ super(UpdateMany, self).__init__(filter, update, upsert, collation, - array_filters) + array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" bulkobj.add_update(self._filter, self._doc, True, self._upsert, collation=self._collation, - array_filters=self._array_filters) + array_filters=self._array_filters, + hint=self._hint) class IndexModel(object): diff --git a/test/crud/v2/bulkWrite-update-hint.json b/test/crud/v2/bulkWrite-update-hint.json new file mode 100644 index 0000000000..15e169f76c --- /dev/null +++ b/test/crud/v2/bulkWrite-update-hint.json @@ -0,0 +1,366 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "test_bulkwrite_update_hint", + "tests": [ + { + "description": "BulkWrite updateOne with update hints", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "result": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": {}, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite updateMany with update hints", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "result": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": {}, + "matchedCount": 4, + "modifiedCount": 4, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_" + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite replaceOne with update hints", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "result": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": {}, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "hint": "_id_" + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 444 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndReplace-hint.json b/test/crud/v2/findOneAndReplace-hint.json new file mode 100644 index 0000000000..263fdf9623 --- /dev/null +++ b/test/crud/v2/findOneAndReplace-hint.json @@ -0,0 +1,128 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndReplace_hint", + "tests": [ + { + "description": "FindOneAndReplace with hint string", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndUpdate-hint.json b/test/crud/v2/findOneAndUpdate-hint.json new file mode 100644 index 0000000000..451eecc013 --- /dev/null +++ b/test/crud/v2/findOneAndUpdate-hint.json @@ -0,0 +1,136 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndUpdate_hint", + "tests": [ + { + "description": "FindOneAndUpdate with hint string", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/replaceOne-hint.json b/test/crud/v2/replaceOne-hint.json new file mode 100644 index 0000000000..de4aa4d02f --- /dev/null +++ b/test/crud/v2/replaceOne-hint.json @@ -0,0 +1,146 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "test_replaceone_hint", + "tests": [ + { + "description": "ReplaceOne with hint string", + "operations": [ + { + "object": "collection", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": "_id_" + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + } + }, + { + "description": "ReplaceOne with hint document", + "operations": [ + { + "object": "collection", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateMany-hint.json b/test/crud/v2/updateMany-hint.json new file mode 100644 index 0000000000..489348917f --- /dev/null +++ b/test/crud/v2/updateMany-hint.json @@ -0,0 +1,168 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "test_updatemany_hint", + "tests": [ + { + "description": "UpdateMany with hint string", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "result": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_" + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + } + }, + { + "description": "UpdateMany with hint document", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "result": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + } + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateOne-hint.json b/test/crud/v2/updateOne-hint.json new file mode 100644 index 0000000000..43f76da498 --- /dev/null +++ b/test/crud/v2/updateOne-hint.json @@ -0,0 +1,154 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "test_updateone_hint", + "tests": [ + { + "description": "UpdateOne with hint string", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + } + }, + { + "description": "UpdateOne with hint document", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + } + } + ] +} diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index a200b41f99..4357b29d2f 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -22,12 +22,13 @@ from bson.binary import Binary, STANDARD from bson.codec_options import CodecOptions from bson.int64 import Int64 -from bson.py3compat import iteritems, abc, text_type +from bson.py3compat import iteritems, abc, string_type, text_type from bson.son import SON from gridfs import GridFSBucket from pymongo import (client_session, + helpers, operations) from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor @@ -204,6 +205,25 @@ def parse_options(opts): if 'maxCommitTimeMS' in opts: opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') + if 'hint' in opts: + hint = opts.pop('hint') + if not isinstance(hint, string_type): + hint = list(iteritems(hint)) + opts['hint'] = hint + + # Properly format 'hint' arguments for the Bulk API tests. + if 'requests' in opts: + reqs = opts.pop('requests') + for req in reqs: + args = req.pop('arguments') + if 'hint' in args: + hint = args.pop('hint') + if not isinstance(hint, string_type): + hint = list(iteritems(hint)) + args['hint'] = hint + req['arguments'] = args + opts['requests'] = reqs + return dict(opts) def run_operation(self, sessions, collection, operation): From 651aa6aa98641c49c4dbbff5ab83f28904ee3baf Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 24 Feb 2020 13:35:28 -0800 Subject: [PATCH 0071/2111] PYTHON-2035: support for allowDiskUse in find() commands --- doc/changelog.rst | 2 + pymongo/collection.py | 64 +++++++++++++---------- pymongo/cursor.py | 30 ++++++++++- pymongo/message.py | 20 ++++++-- test/crud/v2/find-allowdiskuse.json | 78 +++++++++++++++++++++++++++++ test/test_cursor.py | 12 +++++ 6 files changed, 173 insertions(+), 33 deletions(-) create mode 100644 test/crud/v2/find-allowdiskuse.json diff --git a/doc/changelog.rst b/doc/changelog.rst index e9d57304bb..9aedb72fcb 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,8 @@ Changes in Version 3.11.0 Version 3.11 adds support for MongoDB 4.4. Highlights include: +- Added the ``allow_disk_use`` parameters to + :meth:`pymongo.collection.Collection.find`. - Support for :ref:`OCSP` (Online Certificate Status Protocol) - Support for `PyOpenSSL `_ as an alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` diff --git a/pymongo/collection.py b/pymongo/collection.py index 502c9533d7..60889fd9a0 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1412,6 +1412,11 @@ def find(self, *args, **kwargs): - `modifiers` (optional): **DEPRECATED** - A dict specifying additional MongoDB query modifiers. Use the keyword arguments listed above instead. + - `allow_disk_use` (optional): if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. .. note:: There are a number of caveats to using :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: @@ -1429,48 +1434,55 @@ def find(self, *args, **kwargs): connection will be closed and discarded without being returned to the connection pool. + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + .. versionchanged:: 3.7 - Deprecated the `snapshot` option, which is deprecated in MongoDB + Deprecated the ``snapshot`` option, which is deprecated in MongoDB 3.6 and removed in MongoDB 4.0. - Deprecated the `max_scan` option. Support for this option is - deprecated in MongoDB 4.0. Use `max_time_ms` instead to limit server - side execution time. - + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.5 - Added the options `return_key`, `show_record_id`, `snapshot`, - `hint`, `max_time_ms`, `max_scan`, `min`, `max`, and `comment`. - Deprecated the option `modifiers`. + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. .. versionchanged:: 3.4 - Support the `collation` option. + Added support for the ``collation`` option. .. versionchanged:: 3.0 - Changed the parameter names `spec`, `fields`, `timeout`, and - `partial` to `filter`, `projection`, `no_cursor_timeout`, and - `allow_partial_results` respectively. - Added the `cursor_type`, `oplog_replay`, and `modifiers` options. - Removed the `network_timeout`, `read_preference`, `tag_sets`, - `secondary_acceptable_latency_ms`, `max_scan`, `snapshot`, - `tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay - parameters. Removed `compile_re` option: PyMongo now always + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always represents BSON regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular - expression object. Soft deprecated the `manipulate` option. + expression object. + Soft deprecated the ``manipulate`` option. .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. versionadded:: 2.3 - The `tag_sets` and `secondary_acceptable_latency_ms` parameters. + Added ``compile_re`` option. If set to False, PyMongo represented + BSON regular expressions as :class:`~bson.regex.Regex` objects + instead of attempting to compile BSON regular expressions as Python + native regular expressions, thus preventing errors for some + incompatible patterns, see `PYTHON-500`_. + + .. versionchanged:: 2.3 + Added the ``tag_sets`` and ``secondary_acceptable_latency_ms`` + parameters. .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 5de40518c5..52ed1c0c5e 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -114,7 +114,8 @@ def __init__(self, collection, filter=None, projection=None, skip=0, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, - snapshot=False, comment=None, session=None): + snapshot=False, comment=None, session=None, + allow_disk_use=None): """Create a new cursor. Should not be called directly by application developers - see @@ -159,6 +160,9 @@ def __init__(self, collection, filter=None, projection=None, skip=0, raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: if not projection: @@ -184,6 +188,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id + self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot self.__set_hint(hint) @@ -426,6 +431,26 @@ def remove_option(self, mask): self.__query_flags &= ~mask return self + def allow_disk_use(self, allow_disk_use): + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` is `allow_disk_use` is not a boolean. + + :Parameters: + - `allow_disk_use`: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError('allow_disk_use must be a bool') + self.__check_okay_to_chain() + + self.__allow_disk_use = allow_disk_use + return self + def limit(self, limit): """Limits the number of results to be returned by this cursor. @@ -1069,7 +1094,8 @@ def _refresh(self): self.__read_concern, self.__collation, self.__session, - self.__collection.database.client) + self.__collection.database.client, + self.__allow_disk_use) self.__send_message(q) elif self.__id: # Get More if self.__limit: diff --git a/pymongo/message.py b/pymongo/message.py index 1f34efa952..9efb835c12 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -182,7 +182,8 @@ def _convert_write_result(operation, command, result): def _gen_find_command(coll, spec, projection, skip, limit, batch_size, options, - read_concern, collation=None, session=None): + read_concern, collation=None, session=None, + allow_disk_use=None): """Generate a find command document.""" cmd = SON([('find', coll)]) if '$query' in spec: @@ -209,10 +210,13 @@ def _gen_find_command(coll, spec, projection, skip, limit, batch_size, options, cmd['readConcern'] = read_concern.document if collation: cmd['collation'] = collation + if allow_disk_use is not None: + cmd['allowDiskUse'] = allow_disk_use if options: cmd.update([(opt, True) for opt, val in _OPTIONS.items() if options & val]) + return cmd @@ -233,7 +237,7 @@ class _Query(object): __slots__ = ('flags', 'db', 'coll', 'ntoskip', 'spec', 'fields', 'codec_options', 'read_preference', 'limit', 'batch_size', 'name', 'read_concern', 'collation', - 'session', 'client', '_as_command') + 'session', 'client', 'allow_disk_use', '_as_command') # For compatibility with the _GetMore class. exhaust_mgr = None @@ -241,7 +245,8 @@ class _Query(object): def __init__(self, flags, db, coll, ntoskip, spec, fields, codec_options, read_preference, limit, - batch_size, read_concern, collation, session, client): + batch_size, read_concern, collation, session, client, + allow_disk_use): self.flags = flags self.db = db self.coll = coll @@ -256,6 +261,7 @@ def __init__(self, flags, db, coll, ntoskip, spec, fields, self.collation = collation self.session = session self.client = client + self.allow_disk_use = allow_disk_use self.name = 'find' self._as_command = None @@ -279,6 +285,10 @@ def use_command(self, sock_info, exhaust): 'Specifying a collation is unsupported with a max wire ' 'version of %d.' % (sock_info.max_wire_version,)) + if sock_info.max_wire_version < 4 and self.allow_disk_use is not None: + # Ignore allowDiskUse for MongoDB < 3.2. + self.allow_disk_use = None + sock_info.validate_session(self.client, self.session) return use_find_cmd @@ -294,7 +304,7 @@ def as_command(self, sock_info): cmd = _gen_find_command( self.coll, self.spec, self.fields, self.ntoskip, self.limit, self.batch_size, self.flags, self.read_concern, - self.collation, self.session) + self.collation, self.session, self.allow_disk_use) if explain: self.name = 'explain' cmd = SON([('explain', cmd)]) @@ -1629,7 +1639,7 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, query = _Query( 0, db, coll, 0, query, None, codec_options, read_preference, ntoreturn, 0, DEFAULT_READ_CONCERN, None, None, - None) + None, None) name = next(iter(cmd)) publish = listeners.enabled_for_commands diff --git a/test/crud/v2/find-allowdiskuse.json b/test/crud/v2/find-allowdiskuse.json new file mode 100644 index 0000000000..2df4dbc98e --- /dev/null +++ b/test/crud/v2/find-allowdiskuse.json @@ -0,0 +1,78 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "collection_name": "test_find_allowdiskuse", + "tests": [ + { + "description": "Find does not send allowDiskuse when value is not specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": null + } + } + } + ] + }, + { + "description": "Find sends allowDiskuse false when false is specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": false + } + } + } + ] + }, + { + "description": "Find sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": true + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/test/test_cursor.py b/test/test_cursor.py index 6c5f34afad..c04c58def3 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -146,6 +146,18 @@ def test_add_remove_option_exhaust(self): self.assertEqual(0, cursor._Cursor__query_flags) self.assertFalse(cursor._Cursor__exhaust) + def test_allow_disk_use(self): + db = self.db + db.pymongo_test.drop() + coll = db.pymongo_test + + self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz') + + cursor = coll.find().allow_disk_use(True) + self.assertEqual(True, cursor._Cursor__allow_disk_use) + cursor = coll.find().allow_disk_use(False) + self.assertEqual(False, cursor._Cursor__allow_disk_use) + def test_max_time_ms(self): db = self.db db.pymongo_test.drop() From a43e73dd200e1dba2e2a89847a46fad1af09bbe3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 27 Feb 2020 12:05:08 -0800 Subject: [PATCH 0072/2111] PYTHON-2113 An empty authSource URI option is not valid Update spec test runner for PYTHON-1846. --- pymongo/uri_parser.py | 3 + test/auth/connection-string.json | 376 +++++++++++++------------------ test/test_auth_spec.py | 49 ++-- 3 files changed, 183 insertions(+), 245 deletions(-) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 82e69b163b..131f54d8f1 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -294,6 +294,9 @@ def split_options(opts, validate=True, warn=False, normalize=True): if validate: options = validate_options(options, warn) + if options.get('authsource') == '': + raise InvalidURI( + "the authSource database cannot be an empty string") if normalize: options = _normalize_options(options) diff --git a/test/auth/connection-string.json b/test/auth/connection-string.json index 820ad853c7..2005a090ab 100644 --- a/test/auth/connection-string.json +++ b/test/auth/connection-string.json @@ -3,101 +3,91 @@ { "description": "should use the default source and mechanism", "uri": "mongodb://user:password@localhost", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "admin" - }, - "options": null + "source": "admin", + "mechanism": null, + "mechanism_properties": null + } }, { "description": "should use the database when no authSource is specified", "uri": "mongodb://user:password@localhost/foo", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "foo" - }, - "options": null + "source": "foo", + "mechanism": null, + "mechanism_properties": null + } }, { "description": "should use the authSource when specified", "uri": "mongodb://user:password@localhost/foo?authSource=bar", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "bar" - }, - "options": null + "source": "bar", + "mechanism": null, + "mechanism_properties": null + } }, { "description": "should recognise the mechanism (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user@DOMAIN.COM", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } } }, { "description": "should ignore the database (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM@localhost/foo?authMechanism=GSSAPI", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user@DOMAIN.COM", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } } }, { "description": "should accept valid authSource (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=$external", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user@DOMAIN.COM", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } } }, { "description": "should accept generic mechanism property (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user@DOMAIN.COM", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI", - "authmechanismproperties": { + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { "SERVICE_NAME": "other", "CANONICALIZE_HOST_NAME": true } @@ -106,348 +96,286 @@ { "description": "should accept the password (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user@DOMAIN.COM", "password": "password", - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI" + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } } }, { - "description": "may support deprecated gssapiServiceName option (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&gssapiServiceName=other", - "hosts": null, - "valid": true, - "warning": false, - "optional": true, - "auth": { - "username": "user@DOMAIN.COM", - "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "GSSAPI", - "authmechanismproperties": { - "SERVICE_NAME": "other" - } - } + "description": "must raise an error when the authSource is empty", + "uri": "mongodb://user:password@localhost/foo?authSource=", + "valid": false + }, + { + "description": "must raise an error when the authSource is empty without credentials", + "uri": "mongodb://localhost/admin?authSource=", + "valid": false }, { "description": "should throw an exception if authSource is invalid (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=foo", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should throw an exception if no username (GSSAPI)", "uri": "mongodb://localhost/?authMechanism=GSSAPI", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should recognize the mechanism (MONGODB-CR)", "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-CR", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "admin" - }, - "options": { - "authmechanism": "MONGODB-CR" + "source": "admin", + "mechanism": "MONGODB-CR", + "mechanism_properties": null } }, { "description": "should use the database when no authSource is specified (MONGODB-CR)", "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "MONGODB-CR" + "source": "foo", + "mechanism": "MONGODB-CR", + "mechanism_properties": null } }, { "description": "should use the authSource when specified (MONGODB-CR)", "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "MONGODB-CR" + "source": "bar", + "mechanism": "MONGODB-CR", + "mechanism_properties": null } }, { "description": "should throw an exception if no username is supplied (MONGODB-CR)", "uri": "mongodb://localhost/?authMechanism=MONGODB-CR", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should recognize the mechanism (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null } }, { "description": "should ignore the database (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null } }, { "description": "should accept valid authSource (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509&authSource=$external", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null } }, { "description": "should recognize the mechanism with no username (MONGODB-X509)", "uri": "mongodb://localhost/?authMechanism=MONGODB-X509", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": null, "password": null, - "db": "$external" - }, - "options": { - "authmechanism": "MONGODB-X509" + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null } }, { "description": "should throw an exception if supplied a password (MONGODB-X509)", "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should throw an exception if authSource is invalid (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509&authSource=bar", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should recognize the mechanism (PLAIN)", "uri": "mongodb://user:password@localhost/?authMechanism=PLAIN", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "$external" - }, - "options": { - "authmechanism": "PLAIN" + "source": "$external", + "mechanism": "PLAIN", + "mechanism_properties": null } }, { "description": "should use the database when no authSource is specified (PLAIN)", "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "PLAIN" + "source": "foo", + "mechanism": "PLAIN", + "mechanism_properties": null } }, { "description": "should use the authSource when specified (PLAIN)", "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN&authSource=bar", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "PLAIN" + "source": "bar", + "mechanism": "PLAIN", + "mechanism_properties": null } }, { "description": "should throw an exception if no username (PLAIN)", "uri": "mongodb://localhost/?authMechanism=PLAIN", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should recognize the mechanism (SCRAM-SHA-1)", "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-1", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "admin" - }, - "options": { - "authmechanism": "SCRAM-SHA-1" + "source": "admin", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null } }, { "description": "should use the database when no authSource is specified (SCRAM-SHA-1)", "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "SCRAM-SHA-1" + "source": "foo", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null } }, { "description": "should accept valid authSource (SCRAM-SHA-1)", "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1&authSource=bar", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "SCRAM-SHA-1" + "source": "bar", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null } }, { "description": "should throw an exception if no username (SCRAM-SHA-1)", "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-1", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false }, { "description": "should recognize the mechanism (SCRAM-SHA-256)", "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "admin" - }, - "options": { - "authmechanism": "SCRAM-SHA-256" + "source": "admin", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null } }, { "description": "should use the database when no authSource is specified (SCRAM-SHA-256)", "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "foo" - }, - "options": { - "authmechanism": "SCRAM-SHA-256" + "source": "foo", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null } }, { "description": "should accept valid authSource (SCRAM-SHA-256)", "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256&authSource=bar", - "hosts": null, "valid": true, - "warning": false, - "auth": { + "credential": { "username": "user", "password": "password", - "db": "bar" - }, - "options": { - "authmechanism": "SCRAM-SHA-256" + "source": "bar", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null } }, { "description": "should throw an exception if no username (SCRAM-SHA-256)", "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-256", - "hosts": null, - "valid": false, - "warning": false, - "auth": null, - "options": null + "valid": false + }, + { + "description": "URI with no auth-related info doesn't create credential", + "uri": "mongodb://localhost/", + "valid": true, + "credential": null + }, + { + "description": "database in URI path doesn't create credentials", + "uri": "mongodb://localhost/foo", + "valid": true, + "credential": null + }, + { + "description": "authSource without username doesn't create credential", + "uri": "mongodb://localhost/?authSource=foo", + "valid": true, + "credential": null + }, + { + "description": "should throw an exception if no username provided (userinfo implies default mechanism)", + "uri": "mongodb://@localhost.com/", + "valid": false + }, + { + "description": "should throw an exception if no username/password provided (userinfo implies default mechanism)", + "uri": "mongodb://:@localhost.com/", + "valid": false } ] -} +} \ No newline at end of file diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 947bdbb987..4e76a97dd7 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -38,37 +38,44 @@ def create_test(test_case): def run_test(self): uri = test_case['uri'] valid = test_case['valid'] - auth = test_case['auth'] - options = test_case['options'] + credential = test_case.get('credential') if not valid: self.assertRaises(Exception, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) credentials = client._MongoClient__options.credentials - if auth is not None: - self.assertEqual(credentials.username, auth['username']) - self.assertEqual(credentials.password, auth['password']) - self.assertEqual(credentials.source, auth['db']) - if options is not None: - if 'authmechanism' in options: + if credential is None: + self.assertIsNone(credentials) + else: + self.assertIsNotNone(credentials) + self.assertEqual(credentials.username, credential['username']) + self.assertEqual(credentials.password, credential['password']) + self.assertEqual(credentials.source, credential['source']) + if credential['mechanism'] is not None: self.assertEqual( - credentials.mechanism, options['authmechanism']) + credentials.mechanism, credential['mechanism']) else: self.assertEqual(credentials.mechanism, 'DEFAULT') - if 'authmechanismproperties' in options: - expected = options['authmechanismproperties'] + expected = credential['mechanism_properties'] + if expected is not None: actual = credentials.mechanism_properties - if 'SERVICE_NAME' in expected: - self.assertEqual( - actual.service_name, expected['SERVICE_NAME']) - if 'CANONICALIZE_HOST_NAME' in expected: - self.assertEqual( - actual.canonicalize_host_name, - expected['CANONICALIZE_HOST_NAME']) - if 'SERVICE_REALM' in expected: - self.assertEqual( - actual.service_realm, expected['SERVICE_REALM']) + for key, val in expected.items(): + if 'SERVICE_NAME' in expected: + self.assertEqual( + actual.service_name, expected['SERVICE_NAME']) + elif 'CANONICALIZE_HOST_NAME' in expected: + self.assertEqual( + actual.canonicalize_host_name, + expected['CANONICALIZE_HOST_NAME']) + elif 'SERVICE_REALM' in expected: + self.assertEqual( + actual.service_realm, + expected['SERVICE_REALM']) + else: + self.fail('Unhandled property: %s' % (key,)) + else: + self.assertIsNone(credentials.mechanism_properties) return run_test From e26dc96e31789e7c6ea1991ff1be44c4065a3403 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 31 Jan 2020 16:24:31 -0800 Subject: [PATCH 0073/2111] PYTHON-2034 Support MONGODB-AWS authentication mechanism Use botocore to perform the manual Signature Version 4 Signing Process. Test MONGODB-AWS in Evergreen. Properly unquote URI option values in authMechanismProperties and readPreferenceTags. --- .evergreen/config.yml | 194 ++++++++++++++++++++++++ .evergreen/run-mongodb-aws-ecs-test.sh | 48 ++++++ .evergreen/run-mongodb-aws-test.sh | 45 ++++++ README.rst | 15 +- doc/changelog.rst | 5 +- doc/examples/authentication.rst | 119 +++++++++++++++ doc/examples/encryption.rst | 2 + doc/installation.rst | 13 +- pymongo/auth.py | 39 ++++- pymongo/auth_aws.py | 201 +++++++++++++++++++++++++ pymongo/common.py | 22 ++- pymongo/database.py | 4 +- pymongo/mongo_client.py | 6 +- pymongo/uri_parser.py | 21 ++- setup.py | 1 + test/auth/connection-string.json | 46 +++++- test/auth_aws/test_auth_aws.py | 60 ++++++++ test/test_auth_spec.py | 10 +- test/test_uri_parser.py | 37 +++++ 19 files changed, 860 insertions(+), 28 deletions(-) create mode 100755 .evergreen/run-mongodb-aws-ecs-test.sh create mode 100755 .evergreen/run-mongodb-aws-test.sh create mode 100644 pymongo/auth_aws.py create mode 100644 test/auth_aws/test_auth_aws.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 757a574803..443b305ac0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -429,6 +429,172 @@ functions: # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) PYTHON_BINARY=${PYTHON_BINARY} ATLAS_REPL='${atlas_repl}' ATLAS_SHRD='${atlas_shrd}' ATLAS_FREE='${atlas_free}' ATLAS_TLS11='${atlas_tls11}' ATLAS_TLS12='${atlas_tls12}' sh ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh + "add aws auth variables to file": + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat < ${DRIVERS_TOOLS}/.evergreen/auth_aws/aws_e2e_setup.json + { + "iam_auth_ecs_account" : "${iam_auth_ecs_account}", + "iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}", + "iam_auth_ecs_account_arn": "arn:aws:iam::557821124784:user/authtest_fargate_user", + "iam_auth_ecs_cluster": "${iam_auth_ecs_cluster}", + "iam_auth_ecs_task_definition": "${iam_auth_ecs_task_definition}", + "iam_auth_ecs_subnet_a": "${iam_auth_ecs_subnet_a}", + "iam_auth_ecs_subnet_b": "${iam_auth_ecs_subnet_b}", + "iam_auth_ecs_security_group": "${iam_auth_ecs_security_group}", + + "iam_auth_assume_aws_account" : "${iam_auth_assume_aws_account}", + "iam_auth_assume_aws_secret_access_key" : "${iam_auth_assume_aws_secret_access_key}", + "iam_auth_assume_role_name" : "${iam_auth_assume_role_name}", + + "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", + "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", + "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}" + } + EOF + + "run aws auth test with regular aws credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + mongo aws_e2e_regular_aws.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + USER=$(urlencode ${iam_auth_ecs_account}) + PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) + MONGODB_URI="mongodb://$USER:$PASS@localhost" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh + + "run aws auth test with assume role credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + mongo aws_e2e_assume_role.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + USER=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + USER=$(urlencode $USER) + PASS=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + PASS=$(urlencode $PASS) + SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + SESSION_TOKEN=$(urlencode $SESSION_TOKEN) + MONGODB_URI="mongodb://$USER:$PASS@localhost" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh + + "run aws auth test with aws EC2 credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + mongo aws_e2e_ec2.js + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh + + "run aws auth test with aws credentials as environment variables": + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ACCESS_KEY_ID=${iam_auth_ecs_account} + export AWS_SECRET_ACCESS_KEY=${iam_auth_ecs_secret_access_key} + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} .evergreen/run-mongodb-aws-test.sh + + "run aws auth test with aws credentials and session token as environment variables": + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ACCESS_KEY_ID=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export AWS_SECRET_ACCESS_KEY=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + export AWS_SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + .evergreen/run-mongodb-aws-test.sh + + "run aws ECS auth test": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + + cat < setup.js + const mongo_binaries = "$MONGODB_BINARIES"; + const project_dir = "$PROJECT_DIRECTORY"; + EOF + + mongo --nodb setup.js aws_e2e_ecs.js + cd - + "cleanup": - command: shell.exec params: @@ -968,6 +1134,22 @@ tasks: vars: OCSP_TLS_SHOULD_SUCCEED: "0" + - name: "aws-auth-test" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + # TODO: SSL?? + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" + # }}} - name: "coverage-report" tags: ["coverage"] @@ -1064,6 +1246,10 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604/master/latest/libmongocrypt.tar.gz + - id: ubuntu-18.04 + display_name: "Ubuntu 18.04" + run_on: ubuntu1804-test + batchtime: 10080 # 7 days - id: ubuntu1604-arm64-small display_name: "Ubuntu 16.04 (ARM64)" run_on: ubuntu1604-arm64-small @@ -1920,6 +2106,14 @@ buildvariants: tasks: - name: ".ocsp" +- matrix_name: "aws-auth-test" + matrix_spec: + platform: ubuntu-18.04 + display_name: "MONGODB-AWS Auth test" + run_on: ubuntu1804-test + tasks: + - name: "aws-auth-test" + # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh new file mode 100755 index 0000000000..6be913eff3 --- /dev/null +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Don't trace since the URI contains a password that shouldn't show up in the logs +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +if [[ -z "$1" ]]; then + echo "usage: $0 " + exit 1 +fi +export MONGODB_URI="$1" + +if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in ECS test!"; + exit 1 +fi +# Now we can safely enable xtrace +set -o xtrace + +if command -v virtualenv ; then + VIRTUALENV=$(command -v virtualenv) +else + echo "Installing virtualenv..." + apt install python3-pip -y + pip3 install --user virtualenv + VIRTUALENV='python3 -m virtualenv' +fi + +authtest () { + echo "Running MONGODB-AWS ECS authentication tests with $PYTHON" + $PYTHON --version + + $VIRTUALENV -p $PYTHON --system-site-packages --never-download venvaws + . venvaws/bin/activate + pip install requests botocore + + cd src + python test/auth_aws/test_auth_aws.py + cd - + deactivate + rm -rf venvaws +} + +PYTHON=$(command -v python) authtest +PYTHON=$(command -v python3) authtest diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh new file mode 100755 index 0000000000..5febd1302c --- /dev/null +++ b/.evergreen/run-mongodb-aws-test.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +echo "Running MONGODB-AWS authentication tests" +# ensure no secrets are printed in log files +set +x + +# load the script +shopt -s expand_aliases # needed for `urlencode` alias +[ -s "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" ] && source "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + +MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} +MONGODB_URI="${MONGODB_URI}/aws?authMechanism=MONGODB-AWS" +if [[ -n ${SESSION_TOKEN} ]]; then + MONGODB_URI="${MONGODB_URI}&authMechanismProperties=AWS_SESSION_TOKEN:${SESSION_TOKEN}" +fi + +export MONGODB_URI="$MONGODB_URI" + +# show test output +set -x + +VIRTUALENV=$(command -v virtualenv) + +authtest () { + echo "Running MONGODB-AWS authentication tests with $PYTHON" + $PYTHON --version + + $VIRTUALENV -p $PYTHON --system-site-packages --never-download venvaws + . venvaws/bin/activate + pip install requests botocore + + python test/auth_aws/test_auth_aws.py + deactivate + rm -rf venvaws +} + +PYTHON=$(command -v python) authtest +PYTHON=$(command -v python3) authtest diff --git a/README.rst b/README.rst index 7e4d16b19d..585c51f1c8 100644 --- a/README.rst +++ b/README.rst @@ -99,6 +99,12 @@ dependency can be installed automatically along with PyMongo:: $ python -m pip install pymongo[gssapi] +MONGODB-AWS authentication requires `botocore +`_ and `requests +`_:: + + $ python -m pip install pymongo[aws] + Support for mongodb+srv:// URIs requires `dnspython `_:: @@ -116,7 +122,7 @@ PyMongo:: .. note:: Users of Python versions older than 2.7.9 will also receive the dependencies for OCSP when using the tls extra. -:ref:`OCSP` requires `PyOpenSSL +OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests `_ and `service_identity `_:: @@ -133,10 +139,15 @@ Wire protocol compression with zstandard requires `zstandard $ python -m pip install pymongo[zstd] +Client-Side Field Level Encryption requires `pymongocrypt +`_:: + + $ python -m pip install pymongo[encryption] + You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[gssapi,ocsp,snappy,srv,tls,zstd] + $ python -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption] Other optional packages: diff --git a/doc/changelog.rst b/doc/changelog.rst index 9aedb72fcb..9cbf8eebcb 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,16 +6,17 @@ Changes in Version 3.11.0 Version 3.11 adds support for MongoDB 4.4. Highlights include: -- Added the ``allow_disk_use`` parameters to - :meth:`pymongo.collection.Collection.find`. - Support for :ref:`OCSP` (Online Certificate Status Protocol) - Support for `PyOpenSSL `_ as an alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` support. It will also be installed when using the "tls" extra if the version of Python in use is older than 2.7.9. +- Support for the :ref:`MONGODB-AWS` authentication mechanism. - Added the ``background`` parameter to :meth:`pymongo.database.Database.validate_collection`. For a description of this parameter see the MongoDB documentation for the `validate command`_. +- Added the ``allow_disk_use`` parameters to + :meth:`pymongo.collection.Collection.find`. .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index dabf06957a..bf5fff1303 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -5,6 +5,8 @@ MongoDB supports several different authentication mechanisms. These examples cover all authentication methods currently supported by PyMongo, documenting Python module and MongoDB version dependencies. +.. _percent escaped: + Percent-Escaping Username and Password -------------------------------------- @@ -252,3 +254,120 @@ the SASL PLAIN mechanism:: ... ssl_cert_reqs=ssl.CERT_REQUIRED, ... ssl_ca_certs='/path/to/ca.pem') >>> + +.. _MONGODB-AWS: + +MONGODB-AWS +----------- +.. versionadded:: 3.11 + +The MONGODB-AWS authentication mechanism is available in MongoDB 4.4+ and +requires extra pymongo dependencies. To use it, install pymongo with the +``aws`` extra:: + + $ python -m pip install 'pymongo[aws]' + +The MONGODB-AWS mechanism authenticates using AWS IAM credentials (an access +key ID and a secret access key), `temporary AWS IAM credentials`_ obtained +from an `AWS Security Token Service (STS)`_ `Assume Role`_ request, +AWS Lambda `environment variables`_, or temporary AWS IAM credentials assigned +to an `EC2 instance`_ or ECS task. The use of temporary credentials, in +addition to an access key ID and a secret access key, also requires a +security (or session) token. + +Credentials can be configured through the MongoDB URI, environment variables, +or the local EC2 or ECS endpoint. The order in which the client searches for +credentials is: + +#. Credentials passed through the URI +#. Environment variables +#. ECS endpoint if and only if ``AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`` is set. +#. EC2 endpoint + +MONGODB-AWS authenticates against the "$external" virtual database, so none of +the URIs in this section need to include the ``authSource`` URI option. + +AWS IAM credentials +~~~~~~~~~~~~~~~~~~~ + +Applications can authenticate using AWS IAM credentials by providing a valid +access key id and secret access key pair as the username and password, +respectively, in the MongoDB URI. A sample URI would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb://:@localhost/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: The access_key_id and secret_access_key passed into the URI MUST + be `percent escaped`_. + +AssumeRole +~~~~~~~~~~ + +Applications can authenticate using temporary credentials returned from an +assume role request. These temporary credentials consist of an access key +ID, a secret access key, and a security token passed into the URI. +A sample URI would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb://:@example.com/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" + >>> client = MongoClient(uri) + +.. note:: The access_key_id, secret_access_key, and session_token passed into + the URI MUST be `percent escaped`_. + +AWS Lambda (Environment Variables) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When the username and password are not provided and the MONGODB-AWS mechanism +is set, the client will fallback to using the `environment variables`_ +``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` +for the access key ID, secret access key, and session token, respectively:: + + $ export AWS_ACCESS_KEY_ID= + $ export AWS_SECRET_ACCESS_KEY= + $ export AWS_SESSION_TOKEN= + $ python + >>> from pymongo import MongoClient + >>> uri = "mongodb://example.com/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: No username, password, or session token is passed into the URI. + PyMongo will use credentials set via the environment variables. + These environment variables MUST NOT be `percent escaped`_. + +ECS Container +~~~~~~~~~~~~~ + +Applications can authenticate from an ECS container via temporary +credentials assigned to the machine. A sample URI on an ECS container +would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: No username, password, or session token is passed into the URI. + PyMongo will query the ECS container endpoint to obtain these + credentials. + +EC2 Instance +~~~~~~~~~~~~ + +Applications can authenticate from an EC2 instance via temporary +credentials assigned to the machine. A sample URI on an EC2 machine +would be:: + + >>> from pymongo import MongoClient + >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" + >>> client = MongoClient(uri) + +.. note:: No username, password, or session token is passed into the URI. + PyMongo will query the EC2 instance endpoint to obtain these + credentials. + +.. _temporary AWS IAM credentials: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html +.. _AWS Security Token Service (STS): https://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html +.. _Assume Role: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html +.. _EC2 instance: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html +.. _environment variables: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 2f8e2c7a93..ba07b37223 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -1,3 +1,5 @@ +.. _Client-Side Field Level Encryption: + Client-Side Field Level Encryption ================================== diff --git a/doc/installation.rst b/doc/installation.rst index 4875d72aad..aa7ec6a41c 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -56,6 +56,12 @@ dependency can be installed automatically along with PyMongo:: $ python -m pip install pymongo[gssapi] +:ref:`MONGODB-AWS` authentication requires `botocore +`_ and `requests +`_:: + + $ python -m pip install pymongo[aws] + Support for mongodb+srv:// URIs requires `dnspython `_:: @@ -90,10 +96,15 @@ Wire protocol compression with zstandard requires `zstandard $ python -m pip install pymongo[zstd] +:ref:`Client-Side Field Level Encryption` requires `pymongocrypt +`_:: + + $ python -m pip install pymongo[encryption] + You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[gssapi,ocsp,snappy,srv,tls,zstd] + $ python -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption] Other optional packages: diff --git a/pymongo/auth.py b/pymongo/auth.py index 455717a6af..b52c6b0aff 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -43,6 +43,7 @@ from bson.binary import Binary from bson.py3compat import string_type, _unicode, PY3 from bson.son import SON +from pymongo.auth_aws import _HAVE_MONGODB_AWS, _auth_aws, _AWSCredential from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep @@ -51,6 +52,7 @@ ['GSSAPI', 'MONGODB-CR', 'MONGODB-X509', + 'MONGODB-AWS', 'PLAIN', 'SCRAM-SHA-1', 'SCRAM-SHA-256', @@ -100,10 +102,14 @@ def __hash__(self): """Mechanism properties for GSSAPI authentication.""" +_AWSProperties = namedtuple('AWSProperties', ['aws_session_token']) +"""Mechanism properties for MONGODB-AWS authentication.""" + + def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple. """ - if mech != 'MONGODB-X509' and user is None: + if mech not in ('MONGODB-X509', 'MONGODB-AWS') and user is None: raise ConfigurationError("%s requires a username." % (mech,)) if mech == 'GSSAPI': if source is not None and source != '$external': @@ -126,8 +132,22 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): raise ValueError( "authentication source must be " "$external or None for MONGODB-X509") - # user can be None. + # Source is always $external, user can be None. return MongoCredential(mech, '$external', user, None, None, None) + elif mech == 'MONGODB-AWS': + if user is not None and passwd is None: + raise ConfigurationError( + "username without a password is not supported by MONGODB-AWS") + if source is not None and source != '$external': + raise ConfigurationError( + "authentication source must be " + "$external or None for MONGODB-AWS") + + properties = extra.get('authmechanismproperties', {}) + aws_session_token = properties.get('AWS_SESSION_TOKEN') + props = _AWSProperties(aws_session_token=aws_session_token) + # user can be None for temporary link-local EC2 credentials. + return MongoCredential(mech, '$external', user, passwd, props, None) elif mech == 'PLAIN': source_database = source or database or '$external' return MongoCredential(mech, source_database, user, passwd, None, None) @@ -507,6 +527,20 @@ def _authenticate_x509(credentials, sock_info): sock_info.command('$external', query) +def _authenticate_aws(credentials, sock_info): + """Authenticate using MONGODB-AWS. + """ + if not _HAVE_MONGODB_AWS: + raise ConfigurationError( + "MONGODB-AWS authentication requires botocore and requests: " + "install these libraries with: " + "python -m pip install 'pymongo[aws]'") + + _auth_aws(_AWSCredential( + credentials.username, credentials.password, + credentials.mechanism_properties.aws_session_token), sock_info) + + def _authenticate_mongo_cr(credentials, sock_info): """Authenticate using MONGODB-CR. """ @@ -549,6 +583,7 @@ def _authenticate_default(credentials, sock_info): 'GSSAPI': _authenticate_gssapi, 'MONGODB-CR': _authenticate_mongo_cr, 'MONGODB-X509': _authenticate_x509, + 'MONGODB-AWS': _authenticate_aws, 'PLAIN': _authenticate_plain, 'SCRAM-SHA-1': functools.partial( _authenticate_scram, mechanism='SCRAM-SHA-1'), diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py new file mode 100644 index 0000000000..fdb6ec88f8 --- /dev/null +++ b/pymongo/auth_aws.py @@ -0,0 +1,201 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-AWS Authentication helpers.""" + +import os + +try: + + from botocore.auth import SigV4Auth + from botocore.awsrequest import AWSRequest + from botocore.credentials import Credentials + + import requests + + _HAVE_MONGODB_AWS = True +except ImportError: + _HAVE_MONGODB_AWS = False + +import bson + + +from base64 import standard_b64encode +from collections import namedtuple + +from bson.binary import Binary +from bson.son import SON +from pymongo.errors import ConfigurationError, OperationFailure + + +_AWS_REL_URI = 'http://169.254.170.2/' +_AWS_EC2_URI = 'http://169.254.169.254/' +_AWS_EC2_PATH = 'latest/meta-data/iam/security-credentials/' +_AWS_HTTP_TIMEOUT = 10 + + +_AWSCredential = namedtuple('_AWSCredential', + ['username', 'password', 'token']) +"""MONGODB-AWS credentials.""" + + +def _aws_temp_credentials(): + """Construct temporary MONGODB-AWS credentials.""" + access_key = os.environ.get('AWS_ACCESS_KEY_ID') + secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY') + if access_key and secret_key: + return _AWSCredential( + access_key, secret_key, os.environ.get('AWS_SESSION_TOKEN')) + # If the environment variable + # AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is set then drivers MUST + # assume that it was set by an AWS ECS agent and use the URI + # http://169.254.170.2/$AWS_CONTAINER_CREDENTIALS_RELATIVE_URI to + # obtain temporary credentials. + relative_uri = os.environ.get('AWS_CONTAINER_CREDENTIALS_RELATIVE_URI') + if relative_uri is not None: + try: + res = requests.get(_AWS_REL_URI+relative_uri, + timeout=_AWS_HTTP_TIMEOUT) + res_json = res.json() + except (ValueError, requests.exceptions.RequestException): + raise OperationFailure( + 'temporary MONGODB-AWS credentials could not be obtained') + else: + # If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is + # not set drivers MUST assume we are on an EC2 instance and use the + # endpoint + # http://169.254.169.254/latest/meta-data/iam/security-credentials + # / + # whereas role-name can be obtained from querying the URI + # http://169.254.169.254/latest/meta-data/iam/security-credentials/. + try: + # Get token + headers = {'X-aws-ec2-metadata-token-ttl-seconds': "30"} + res = requests.post(_AWS_EC2_URI+'latest/api/token', + headers=headers, timeout=_AWS_HTTP_TIMEOUT) + token = res.content + # Get role name + headers = {'X-aws-ec2-metadata-token': token} + res = requests.get(_AWS_EC2_URI+_AWS_EC2_PATH, headers=headers, + timeout=_AWS_HTTP_TIMEOUT) + role = res.text + # Get temp creds + res = requests.get(_AWS_EC2_URI+_AWS_EC2_PATH+role, + headers=headers, timeout=_AWS_HTTP_TIMEOUT) + res_json = res.json() + except (ValueError, requests.exceptions.RequestException): + raise OperationFailure( + 'temporary MONGODB-AWS credentials could not be obtained') + + try: + temp_user = res_json['AccessKeyId'] + temp_password = res_json['SecretAccessKey'] + token = res_json['Token'] + except KeyError: + # If temporary credentials cannot be obtained then drivers MUST + # fail authentication and raise an error. + raise OperationFailure( + 'temporary MONGODB-AWS credentials could not be obtained') + + return _AWSCredential(temp_user, temp_password, token) + + +_AWS4_HMAC_SHA256 = 'AWS4-HMAC-SHA256' +_AWS_SERVICE = 'sts' + + +def _get_region(sts_host): + """""" + parts = sts_host.split('.') + if len(parts) == 1 or sts_host == 'sts.amazonaws.com': + return 'us-east-1' # Default + + if len(parts) > 2 or not all(parts): + raise OperationFailure("Server returned an invalid sts host") + + return parts[1] + + +def _aws_auth_header(credentials, server_nonce, sts_host): + """Signature Version 4 Signing Process to construct the authorization header + """ + region = _get_region(sts_host) + + request_parameters = 'Action=GetCallerIdentity&Version=2011-06-15' + encoded_nonce = standard_b64encode(server_nonce).decode('utf8') + request_headers = { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Content-Length': str(len(request_parameters)), + 'Host': sts_host, + 'X-MongoDB-Server-Nonce': encoded_nonce, + 'X-MongoDB-GS2-CB-Flag': 'n', + } + request = AWSRequest(method="POST", url="/", data=request_parameters, + headers=request_headers) + boto_creds = Credentials(credentials.username, credentials.password, + token=credentials.token) + auth = SigV4Auth(boto_creds, "sts", region) + auth.add_auth(request) + final = { + 'a': request.headers['Authorization'], + 'd': request.headers['X-Amz-Date'] + } + if credentials.token: + final['t'] = credentials.token + return final + + +def _auth_aws(credentials, sock_info): + """Authenticate using MONGODB-AWS. + """ + if not _HAVE_MONGODB_AWS: + raise ConfigurationError( + "MONGODB-AWS authentication requires botocore and requests: " + "install these libraries with: " + "python -m pip install 'pymongo[aws]'") + + if sock_info.max_wire_version < 9: + raise ConfigurationError( + "MONGODB-AWS authentication requires MongoDB version 4.4 or later") + + # If a username and password are not provided, drivers MUST query + # a link-local AWS address for temporary credentials. + if credentials.username is None: + credentials = _aws_temp_credentials() + + # Client first. + client_nonce = os.urandom(32) + payload = {'r': Binary(client_nonce), 'p': 110} + client_first = SON([('saslStart', 1), + ('mechanism', 'MONGODB-AWS'), + ('payload', Binary(bson.encode(payload)))]) + server_first = sock_info.command('$external', client_first) + + server_payload = bson.decode(server_first['payload']) + server_nonce = server_payload['s'] + if len(server_nonce) != 64 or not server_nonce.startswith(client_nonce): + raise OperationFailure("Server returned an invalid nonce.") + sts_host = server_payload['h'] + if len(sts_host) < 1 or len(sts_host) > 255 or '..' in sts_host: + # Drivers must also validate that the host is greater than 0 and less + # than or equal to 255 bytes per RFC 1035. + raise OperationFailure("Server returned an invalid sts host.") + + payload = _aws_auth_header(credentials, server_nonce, sts_host) + client_second = SON([('saslContinue', 1), + ('conversationId', server_first['conversationId']), + ('payload', Binary(bson.encode(payload)))]) + res = sock_info.command('$external', client_second) + if not res['done']: + raise OperationFailure('MONGODB-AWS conversation failed to complete.') diff --git a/pymongo/common.py b/pymongo/common.py index f208e83d08..12777b7acf 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -22,7 +22,7 @@ from bson.binary import (STANDARD, PYTHON_LEGACY, JAVA_LEGACY, CSHARP_LEGACY) from bson.codec_options import CodecOptions, TypeRegistry -from bson.py3compat import abc, integer_types, iteritems, string_type +from bson.py3compat import abc, integer_types, iteritems, string_type, PY3 from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.compression_support import (validate_compressors, @@ -43,6 +43,10 @@ except ImportError: ORDERED_TYPES = (SON,) +if PY3: + from urllib.parse import unquote_plus +else: + from urllib import unquote_plus # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024 ** 2) @@ -391,8 +395,11 @@ def validate_read_preference_tags(name, value): tag_sets.append({}) continue try: - tag_sets.append(dict([tag.split(":") - for tag in tag_set.split(",")])) + tags = {} + for tag in tag_set.split(","): + key, val = tag.split(":") + tags[unquote_plus(key)] = unquote_plus(val) + tag_sets.append(tags) except Exception: raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) @@ -401,7 +408,8 @@ def validate_read_preference_tags(name, value): _MECHANISM_PROPS = frozenset(['SERVICE_NAME', 'CANONICALIZE_HOST_NAME', - 'SERVICE_REALM']) + 'SERVICE_REALM', + 'AWS_SESSION_TOKEN']) def validate_auth_mechanism_properties(option, value): @@ -412,6 +420,10 @@ def validate_auth_mechanism_properties(option, value): try: key, val = opt.split(':') except ValueError: + # Try not to leak the token. + if 'AWS_SESSION_TOKEN' in opt: + opt = ('AWS_SESSION_TOKEN:, did you forget ' + 'to percent-escape the token with quote_plus?') raise ValueError("auth mechanism properties must be " "key:value pairs like SERVICE_NAME:" "mongodb, not %s." % (opt,)) @@ -422,7 +434,7 @@ def validate_auth_mechanism_properties(option, value): if key == 'CANONICALIZE_HOST_NAME': props[key] = validate_boolean_or_string(key, val) else: - props[key] = val + props[key] = unquote_plus(val) return props diff --git a/pymongo/database.py b/pymongo/database.py index d363d3b90a..144a1f6b40 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1437,7 +1437,9 @@ def authenticate(self, name=None, password=None, - `authMechanismProperties` (optional): Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass - authMechanismProperties='SERVICE_NAME:' + ``authMechanismProperties='SERVICE_NAME:'``. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. .. versionchanged:: 3.7 Added support for SCRAM-SHA-256 with MongoDB 4.0 and later. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 8294af14e4..9b33fa2cd3 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -411,7 +411,9 @@ def __init__( - `authMechanismProperties`: Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass authMechanismProperties='SERVICE_NAME:' + name>'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. .. seealso:: :doc:`/examples/authentication` @@ -520,7 +522,7 @@ def __init__( .. versionchanged:: 3.5 Add ``username`` and ``password`` options. Document the - ``authSource``, ``authMechanism``, and ``authMechanismProperties `` + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` options. Deprecated the ``socketKeepAlive`` keyword argument and URI option. ``socketKeepAlive`` now defaults to ``True``. diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 131f54d8f1..a6b0a46cda 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -146,7 +146,11 @@ def _parse_options(opts, delim): else: if key in options: warnings.warn("Duplicate URI option '%s'." % (key,)) - options[key] = unquote_plus(value) + if key.lower() == 'authmechanismproperties': + val = value + else: + val = unquote_plus(value) + options[key] = val return options @@ -417,24 +421,19 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, "the host list and any options.") if path_part: - if path_part[0] == '?': - opts = unquote_plus(path_part[1:]) - else: - dbase, _, opts = map(unquote_plus, path_part.partition('?')) + dbase, _, opts = path_part.partition('?') + if dbase: + dbase = unquote_plus(dbase) if '.' in dbase: dbase, collection = dbase.split('.', 1) - if _BAD_DB_CHARS.search(dbase): raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None if opts: options.update(split_options(opts, validate, warn, normalize)) - if dbase is not None: - dbase = unquote_plus(dbase) - if collection is not None: - collection = unquote_plus(collection) - if '@' in host_part: userinfo, _, hosts = host_part.rpartition('@') user, passwd = parse_userinfo(userinfo) diff --git a/setup.py b/setup.py index 4c84915059..ba051f73be 100755 --- a/setup.py +++ b/setup.py @@ -329,6 +329,7 @@ def build_extension(self, ext): 'snappy': ['python-snappy'], 'tls': [], 'zstd': ['zstandard'], + 'aws': ['requests<3.0.0', 'botocore'], } # https://jira.mongodb.org/browse/PYTHON-2117 diff --git a/test/auth/connection-string.json b/test/auth/connection-string.json index 2005a090ab..5452912e87 100644 --- a/test/auth/connection-string.json +++ b/test/auth/connection-string.json @@ -376,6 +376,50 @@ "description": "should throw an exception if no username/password provided (userinfo implies default mechanism)", "uri": "mongodb://:@localhost.com/", "valid": false + }, + { + "description": "should recognise the mechanism (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if username and no password (MONGODB-AWS)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", + "valid": false, + "credential": null + }, + { + "description": "should use username and password if specified (MONGODB-AWS)", + "uri": "mongodb://user%21%40%23%24%25%5E%26%2A%28%29_%2B:pass%21%40%23%24%25%5E%26%2A%28%29_%2B@localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": "user!@#$%^&*()_+", + "password": "pass!@#$%^&*()_+", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should use username, password and session token if specified (MONGODB-AWS)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:token%21%40%23%24%25%5E%26%2A%28%29_%2B", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": { + "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" + } + } } ] -} \ No newline at end of file +} diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py new file mode 100644 index 0000000000..d17ebb5aac --- /dev/null +++ b/test/auth_aws/test_auth_aws.py @@ -0,0 +1,60 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-AWS Authentication.""" + +import os +import sys +import unittest + +sys.path[0:0] = [""] + +from pymongo import MongoClient +from pymongo.errors import OperationFailure +from pymongo.uri_parser import parse_uri + + +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +class TestAuthAWS(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.uri = os.environ['MONGODB_URI'] + + def test_should_fail_without_credentials(self): + if '@' not in self.uri: + self.skipTest('MONGODB_URI already has no credentials') + + hosts = ['%s:%s' % addr for addr in parse_uri(self.uri)['nodelist']] + self.assertTrue(hosts) + with MongoClient(hosts) as client: + with self.assertRaises(OperationFailure): + client.aws.test.find_one() + + def test_should_fail_incorrect_credentials(self): + with MongoClient(self.uri, username='fake', password='fake', + authMechanism='MONGODB-AWS') as client: + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + def test_connect_uri(self): + with MongoClient(self.uri) as client: + client.get_database().test.find_one() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 4e76a97dd7..1b6b919e80 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -72,10 +72,18 @@ def run_test(self): self.assertEqual( actual.service_realm, expected['SERVICE_REALM']) + elif 'AWS_SESSION_TOKEN' in expected: + self.assertEqual( + actual.aws_session_token, + expected['AWS_SESSION_TOKEN']) else: self.fail('Unhandled property: %s' % (key,)) else: - self.assertIsNone(credentials.mechanism_properties) + if credential['mechanism'] == 'MONGODB-AWS': + self.assertIsNone( + credentials.mechanism_properties.aws_session_token) + else: + self.assertIsNone(credentials.mechanism_properties) return run_test diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 4fb60c8433..a921c66563 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -498,6 +498,43 @@ def test_waitQueueMultiple_deprecated(self): self.assertEqual(len(ctx), 1) self.assertTrue(issubclass(ctx[0].category, DeprecationWarning)) + def test_unquote_after_parsing(self): + quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" + unquoted_val = "val!@#$%^&*()_+,: etc" + uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:"+quoted_val) + res = parse_uri(uri) + options = { + 'authmechanism': 'MONGODB-AWS', + 'authmechanismproperties': { + 'AWS_SESSION_TOKEN': unquoted_val}} + self.assertEqual(options, res['options']) + + uri = (("mongodb://localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,"+quoted_val+":"+quoted_val+"&" + "readpreferencetags=dc:east,use:"+quoted_val)) + res = parse_uri(uri) + options = { + 'readpreference': ReadPreference.SECONDARY.mongos_mode, + 'readpreferencetags': [ + {'dc': 'west', unquoted_val: unquoted_val}, + {'dc': 'east', 'use': unquoted_val} + ] + } + self.assertEqual(options, res['options']) + + def test_redact_AWS_SESSION_TOKEN(self): + unquoted_colon = "token:" + uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:"+unquoted_colon) + with self.assertRaisesRegex( + ValueError, + 'auth mechanism properties must be key:value pairs like ' + 'SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:' + ', did you forget to percent-escape the token with ' + 'quote_plus?'): + parse_uri(uri) + if __name__ == "__main__": unittest.main() From 043c8e822bb4baa542ad74fc3477bce759db2a42 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 27 Feb 2020 13:36:16 -0800 Subject: [PATCH 0074/2111] PYTHON-2140 Test PyOpenSSL on macOS Handle the case where the peer omits the self-signed issuer cert and OCSP is not requested by delaying issuer check. Properly set PYMONGO_MUST_CONNECT in PyOpenSSL tests. Properly set PYTHON_BINARY in OCSP test. --- .evergreen/config.yml | 13 +++++++++++++ .evergreen/run-ocsp-tests.sh | 16 +++++++++++----- .evergreen/run-pyopenssl-tests.sh | 12 +++++++++--- pymongo/ocsp_support.py | 9 ++++++--- test/test_ssl.py | 2 +- 5 files changed, 40 insertions(+), 12 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 443b305ac0..774a84beda 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -347,6 +347,9 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} + if [ -n "${MONGODB_STARTED}" ]; then + export PYMONGO_MUST_CONNECT=1 + fi PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-pyopenssl-tests.sh "run doctests": @@ -662,6 +665,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} + PYTHON_BINARY=${PYTHON_BINARY} \ CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/rsa/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ sh ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-tests.sh @@ -1740,6 +1744,15 @@ buildvariants: tasks: - "pyopenssl" +- matrix_name: "tests-pyopenssl-macOS" + matrix_spec: + platform: macos-1014 + auth: "*" + ssl: "ssl" + display_name: "PyOpenSSL ${platform} ${auth}" + tasks: + - "pyopenssl" + - matrix_name: "tests-python-version-rhel62-test-encryption" matrix_spec: platform: rhel62 diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh index d3532352cf..fdfea0641b 100644 --- a/.evergreen/run-ocsp-tests.sh +++ b/.evergreen/run-ocsp-tests.sh @@ -13,9 +13,15 @@ if [ -z "$PYTHON_BINARY" ]; then fi $PYTHON_BINARY -m virtualenv --never-download --no-wheel ocsptest - . ocsptest/bin/activate - trap "deactivate; rm -rf ocsptest" EXIT HUP - pip install pyopenssl requests service_identity - PYTHON=python +. ocsptest/bin/activate +trap "deactivate; rm -rf ocsptest" EXIT HUP -OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED} CA_FILE=${CA_FILE} $PYTHON test/ocsp/test_ocsp.py +IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") +if [ $IS_PYTHON_2 = "1" ]; then + echo "Using a Python 2" + pip install --upgrade 'setuptools<45' +fi + +pip install pyopenssl requests service_identity + +OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED} CA_FILE=${CA_FILE} python test/ocsp/test_ocsp.py diff --git a/.evergreen/run-pyopenssl-tests.sh b/.evergreen/run-pyopenssl-tests.sh index 0ef5d2c485..4d27ee3018 100644 --- a/.evergreen/run-pyopenssl-tests.sh +++ b/.evergreen/run-pyopenssl-tests.sh @@ -19,9 +19,15 @@ else fi $PYTHON -m virtualenv pyopenssltest -trap "deactivate; rm -rf pyopenssltest" EXIT HUP . pyopenssltest/bin/activate -pip install pyopenssl>=17.2.0 "requests<3.0.0" service_identity>=18.1.0 -pip list +trap "deactivate; rm -rf pyopenssltest" EXIT HUP + +IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") +if [ $IS_PYTHON_2 = "1" ]; then + echo "Using a Python 2" + pip install --upgrade 'setuptools<45' +fi + +pip install pyopenssl requests service_identity python -c 'import sys; print(sys.version)' python setup.py test diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index d3ec194645..24a437460d 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -237,9 +237,6 @@ def ocsp_callback(conn, ocsp_bytes, user_data): cert = conn.get_peer_certificate().to_cryptography() chain = [cer.to_cryptography() for cer in conn.get_peer_cert_chain()] issuer = _get_issuer_cert(cert, chain) - if issuer is None: - _LOGGER.debug("No issuer cert?") - return 0 must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 ext = _get_extension(cert, _TLSFeature) @@ -268,6 +265,9 @@ def ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No OCSP URI, soft fail") # No responder URI, soft fail. return 1 + if issuer is None: + _LOGGER.debug("No issuer cert?") + return 0 _LOGGER.debug("Requesting OCSP data") # When requesting data from an OCSP endpoint we only fail on # successful, valid responses with a certificate status of REVOKED. @@ -291,6 +291,9 @@ def ocsp_callback(conn, ocsp_bytes, user_data): return 1 _LOGGER.debug("Peer stapled an OCSP response") + if issuer is None: + _LOGGER.debug("No issuer cert?") + return 0 response = _load_der_ocsp_response(ocsp_bytes) _LOGGER.debug( "OCSP response status: %r", response.response_status) diff --git a/test/test_ssl.py b/test/test_ssl.py index d0aa9cbf82..c2b8deae35 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -442,7 +442,7 @@ def test_validation_with_system_ca_certs(self): raise SkipTest("Can't load system CA certificates.") if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and - sys.platform == 'darwin'): + sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL): raise SkipTest( "LibreSSL on OSX doesn't support setting CA certificates " "using SSL_CERT_FILE environment variable.") From e485c0710961acdfc21923f1417ad3fed9786ad3 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 2 Mar 2020 17:51:06 -0800 Subject: [PATCH 0075/2111] PYTHON-2036 Update documentation and changelog to reflect expanded support for index hinting --- doc/api/pymongo/collection.rst | 4 ++-- doc/changelog.rst | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index d7b1f0dd8b..d68653070a 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -51,8 +51,8 @@ .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) .. automethod:: find_one(filter=None, *args, **kwargs) .. automethod:: find_one_and_delete - .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, session=None, **kwargs) - .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, session=None, **kwargs) + .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) + .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, **kwargs) .. automethod:: count_documents .. automethod:: estimated_document_count .. automethod:: distinct diff --git a/doc/changelog.rst b/doc/changelog.rst index 9cbf8eebcb..cfee53ab88 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,16 @@ Changes in Version 3.11.0 Version 3.11 adds support for MongoDB 4.4. Highlights include: +- Added index hinting support to the + :meth:`pymongo.collection.Collection.replace_one`, + :meth:`pymongo.collection.Collection.update_one`, + :meth:`pymongo.collection.Collection.update_many`, + :meth:`pymongo.collection.Collection.find_one_and_replace`, + and :meth:`pymongo.collection.Collection.find_one_and_update` commands. +- Added index hinting support to the + :class:`pymongo.operations.ReplaceOne`, + :class:`pymongo.operations.UpdateOne`, + and :class:`pymongo.operations.UpdateMany` bulk operations. - Support for :ref:`OCSP` (Online Certificate Status Protocol) - Support for `PyOpenSSL `_ as an alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` From 2157dc58ea07e033760bcc81fee386f9f879edd2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 3 Mar 2020 11:55:43 -0800 Subject: [PATCH 0076/2111] PYTHON-2148 Test PyOpenSSL support on Windows --- .evergreen/config.yml | 34 ++++++++++++++++++++++++------- .evergreen/run-ocsp-tests.sh | 28 ++++++++++++++++++++++--- .evergreen/run-pyopenssl-tests.sh | 25 +++++++++++++++++++++-- 3 files changed, 75 insertions(+), 12 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 774a84beda..2bea80c427 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1288,6 +1288,12 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz + - id: windows-64-vsMulti-small + display_name: "Windows 64" + run_on: windows-64-vsMulti-small + batchtime: 10080 # 7 days + variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz # Test with authentication? - id: auth @@ -1399,23 +1405,27 @@ axes: - id: "win-vs2010-3.4" display_name: "Python 3.4" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python34/python.exe" + PYTHON_BINARY: "C:/python/Python34/python.exe" - id: "win-vs2015-2.7" display_name: "Python 2.7" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python27/python.exe" + PYTHON_BINARY: "C:/python/Python27/python.exe" - id: "win-vs2015-3.5" display_name: "Python 3.5" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python35/python.exe" + PYTHON_BINARY: "C:/python/Python35/python.exe" - id: "win-vs2015-3.6" display_name: "Python 3.6" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python36/python.exe" + PYTHON_BINARY: "C:/python/Python36/python.exe" - id: "win-vs2015-3.7" display_name: "Python 3.7" variables: - PYTHON_BINARY: "/cygdrive/c/python/Python37/python.exe" + PYTHON_BINARY: "C:/python/Python37/python.exe" + - id: "win-vs2015-3.8" + display_name: "Python 3.8" + variables: + PYTHON_BINARY: "C:/python/Python38/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version @@ -1729,7 +1739,7 @@ buildvariants: python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "pypy", "pypy3.5"] auth: "*" ssl: "ssl" - display_name: "PyOpenSSL ${python-version} ${platform} ${auth}" + display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - "pyopenssl" @@ -1740,7 +1750,7 @@ buildvariants: - rhel70 # CPython 2.7.5 auth: "*" ssl: "ssl" - display_name: "PyOpenSSL Pre-2.7.9 Python ${platform} ${auth}" + display_name: "PyOpenSSL ${platform} Pre-2.7.9 Python ${auth}" tasks: - "pyopenssl" @@ -1753,6 +1763,16 @@ buildvariants: tasks: - "pyopenssl" +- matrix_name: "tests-pyopenssl-windows" + matrix_spec: + platform: windows-64-vsMulti-small + python-version: ["win-vs2015-2.7", "win-vs2010-3.4", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7", "win-vs2015-3.8"] + auth: "*" + ssl: "ssl" + display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" + tasks: + - "pyopenssl" + - matrix_name: "tests-python-version-rhel62-test-encryption" matrix_spec: platform: rhel62 diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh index fdfea0641b..25006811dc 100644 --- a/.evergreen/run-ocsp-tests.sh +++ b/.evergreen/run-ocsp-tests.sh @@ -5,20 +5,42 @@ set -o errexit if [ -z "$PYTHON_BINARY" ]; then echo "No python binary specified" - PYTHON_BINARY=$(command -v python || command -v python3) || true + PYTHON=$(command -v python || command -v python3) || true if [ -z "$PYTHON_BINARY" ]; then echo "Cannot test without python or python3 installed!" exit 1 fi +else + PYTHON="$PYTHON_BINARY" fi -$PYTHON_BINARY -m virtualenv --never-download --no-wheel ocsptest -. ocsptest/bin/activate +if $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" +elif command -v virtualenv; then + # We can remove this fallback after: + # https://github.com/10gen/mongo-python-toolchain/issues/8 + VIRTUALENV="$(command -v virtualenv) -p $PYTHON" +else + echo "Cannot test without virtualenv" + exit 1 +fi + +$VIRTUALENV --never-download --no-wheel ocsptest +if [ "Windows_NT" = "$OS" ]; then + . ocsptest/Scripts/activate +else + . ocsptest/bin/activate +fi trap "deactivate; rm -rf ocsptest" EXIT HUP IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") if [ $IS_PYTHON_2 = "1" ]; then echo "Using a Python 2" + # Upgrade pip to install the cryptography wheel and not the tar. + # <20.1 because 20.0.2 says a future release may drop support for 2.7. + pip install --upgrade 'pip<20.1' + # Upgrade setuptools because cryptography requires 18.5+. + # <45 because 45.0 dropped support for 2.7. pip install --upgrade 'setuptools<45' fi diff --git a/.evergreen/run-pyopenssl-tests.sh b/.evergreen/run-pyopenssl-tests.sh index 4d27ee3018..ea4271572b 100644 --- a/.evergreen/run-pyopenssl-tests.sh +++ b/.evergreen/run-pyopenssl-tests.sh @@ -9,6 +9,7 @@ export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" if [ -z "$PYTHON_BINARY" ]; then + echo "No python binary specified" PYTHON=$(command -v python || command -v python3) || true if [ -z "$PYTHON" ]; then echo "Cannot test without python or python3 installed!" @@ -18,13 +19,33 @@ else PYTHON="$PYTHON_BINARY" fi -$PYTHON -m virtualenv pyopenssltest -. pyopenssltest/bin/activate +if $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" +elif command -v virtualenv; then + # We can remove this fallback after: + # https://github.com/10gen/mongo-python-toolchain/issues/8 + VIRTUALENV="$(command -v virtualenv) -p $PYTHON" +else + echo "Cannot test without virtualenv" + exit 1 +fi + +$VIRTUALENV pyopenssltest +if [ "Windows_NT" = "$OS" ]; then + . pyopenssltest/Scripts/activate +else + . pyopenssltest/bin/activate +fi trap "deactivate; rm -rf pyopenssltest" EXIT HUP IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") if [ $IS_PYTHON_2 = "1" ]; then echo "Using a Python 2" + # Upgrade pip to install the cryptography wheel and not the tar. + # <20.1 because 20.0.2 says a future release may drop support for 2.7. + pip install --upgrade 'pip<20.1' + # Upgrade setuptools because cryptography requires 18.5+. + # <45 because 45.0 dropped support for 2.7. pip install --upgrade 'setuptools<45' fi From ce601190cf8e73794a34e681bd860d55bb89a324 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 2 Mar 2020 17:25:54 -0800 Subject: [PATCH 0077/2111] PYTHON-2097 Deprecate oplogReplay find command option --- doc/changelog.rst | 4 ++++ doc/examples/tailable.rst | 6 ++++-- pymongo/collection.py | 12 ++++++++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index cfee53ab88..83f82b023f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,10 @@ Changes in Version 3.11.0 Version 3.11 adds support for MongoDB 4.4. Highlights include: +- Deprecated the ``oplog_replay`` parameter to + :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the + server optimizes queries against the oplog collection without requiring + the user to set this flag. - Added index hinting support to the :meth:`pymongo.collection.Collection.replace_one`, :meth:`pymongo.collection.Collection.update_one`, diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst index b9b6dcd74d..482b049c56 100644 --- a/doc/examples/tailable.rst +++ b/doc/examples/tailable.rst @@ -24,9 +24,11 @@ of a replica set member:: while True: # For a regular capped collection CursorType.TAILABLE_AWAIT is the # only option required to create a tailable cursor. When querying the - # oplog the oplog_replay option enables an optimization to quickly + # oplog, the oplog_replay option enables an optimization to quickly # find the 'ts' value we're looking for. The oplog_replay option - # can only be used when querying the oplog. + # can only be used when querying the oplog. Starting in MongoDB 4.4 + # this option is ignored by the server as queries against the oplog + # are optimized automatically by the MongoDB query engine. cursor = oplog.find({'ts': {'$gt': ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True) diff --git a/pymongo/collection.py b/pymongo/collection.py index 60889fd9a0..4e3d1d13f1 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1366,12 +1366,12 @@ def find(self, *args, **kwargs): - `allow_partial_results` (optional): if True, mongos will return partial results if some shards are down instead of returning an error. - - `oplog_replay` (optional): If True, set the oplogReplay query - flag. + - `oplog_replay` (optional): **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. - `batch_size` (optional): Limits the number of documents returned in a single batch. - - `manipulate` (optional): **DEPRECATED** - If True (the default), - apply any outgoing SON manipulators before returning. + - `manipulate` (optional): **DEPRECATED** - If True, apply any + outgoing SON manipulators before returning. Default: True. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. @@ -1436,6 +1436,10 @@ def find(self, *args, **kwargs): .. versionchanged:: 3.11 Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. .. versionchanged:: 3.7 Deprecated the ``snapshot`` option, which is deprecated in MongoDB From bb272b1d544a6f69c905949b0708022421bbd89d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 3 Mar 2020 15:22:17 -0800 Subject: [PATCH 0078/2111] PYTHON-2139 Test MONGODB-AWS auth on macOS and Windows Replace jq with python as macOS does not have jq. Use sys.stdout.write instead of print to avoid trailing newlines. --- .evergreen/config.yml | 81 +++++++++++++++++++++++------- .evergreen/run-mongodb-aws-test.sh | 25 +++++++-- 2 files changed, 86 insertions(+), 20 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2bea80c427..b7b5ab39fc 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -476,7 +476,7 @@ functions: silent: true script: | cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' + alias urlencode='python -c "import sys, urllib as ul; sys.stdout.write(ul.quote_plus(sys.argv[1]))"' USER=$(urlencode ${iam_auth_ecs_account}) PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) MONGODB_URI="mongodb://$USER:$PASS@localhost" @@ -487,7 +487,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh "run aws auth test with assume role credentials": - command: shell.exec @@ -496,6 +496,14 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} + # The aws_e2e_assume_role script requires python3 with boto3. + virtualenv -p ${python3_binary} mongovenv + if [ "Windows_NT" = "$OS" ]; then + . mongovenv/Scripts/activate + else + . mongovenv/bin/activate + fi + pip install boto3 cd ${DRIVERS_TOOLS}/.evergreen/auth_aws mongo aws_e2e_assume_role.js - command: shell.exec @@ -506,12 +514,13 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' - USER=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + alias urlencode='python -c "import sys, urllib as ul; sys.stdout.write(ul.quote_plus(sys.argv[1]))"' + alias jsonkey='python -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' + USER=$(jsonkey AccessKeyId) USER=$(urlencode $USER) - PASS=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + PASS=$(jsonkey SecretAccessKey) PASS=$(urlencode $PASS) - SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + SESSION_TOKEN=$(jsonkey SessionToken) SESSION_TOKEN=$(urlencode $SESSION_TOKEN) MONGODB_URI="mongodb://$USER:$PASS@localhost" EOF @@ -521,7 +530,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws EC2 credentials": - command: shell.exec @@ -530,6 +539,16 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" == "true" ]; then + echo "This platform does not support the EC2 auth test, skipping..." + exit 0 + fi + # The mongovenv was created earlier in "run aws auth test with assume role credentials". + if [ "Windows_NT" = "$OS" ]; then + . mongovenv/Scripts/activate + else + . mongovenv/bin/activate + fi cd ${DRIVERS_TOOLS}/.evergreen/auth_aws mongo aws_e2e_ec2.js - command: shell.exec @@ -538,7 +557,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials as environment variables": - command: shell.exec @@ -558,7 +577,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials and session token as environment variables": - command: shell.exec @@ -569,9 +588,10 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - export AWS_ACCESS_KEY_ID=$(jq -r '.AccessKeyId' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - export AWS_SECRET_ACCESS_KEY=$(jq -r '.SecretAccessKey' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) - export AWS_SESSION_TOKEN=$(jq -r '.SessionToken' ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json) + alias jsonkey='python -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' + export AWS_ACCESS_KEY_ID=$(jsonkey AccessKeyId) + export AWS_SECRET_ACCESS_KEY=$(jsonkey SecretAccessKey) + export AWS_SESSION_TOKEN=$(jsonkey SessionToken) EOF - command: shell.exec type: test @@ -579,7 +599,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh "run aws ECS auth test": - command: shell.exec @@ -588,8 +608,11 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} + if [ "${skip_ECS_auth_test}" == "true" ]; then + echo "This platform does not support the ECS auth test, skipping..." + exit 0 + fi cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - cat < setup.js const mongo_binaries = "$MONGODB_BINARIES"; const project_dir = "$PROJECT_DIRECTORY"; @@ -1211,6 +1234,9 @@ axes: display_name: "macOS 10.14" run_on: macos-1014 variables: + skip_EC2_auth_test: true + skip_ECS_auth_test: true + python3_binary: python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 display_name: "RHEL 6.2 (x86_64)" @@ -1254,6 +1280,8 @@ axes: display_name: "Ubuntu 18.04" run_on: ubuntu1804-test batchtime: 10080 # 7 days + variables: + python3_binary: python3 - id: ubuntu1604-arm64-small display_name: "Ubuntu 16.04 (ARM64)" run_on: ubuntu1604-arm64-small @@ -1293,6 +1321,8 @@ axes: run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days variables: + skip_ECS_auth_test: true + python3_binary: "C:/python/Python38/python.exe" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz # Test with authentication? @@ -1426,6 +1456,15 @@ axes: display_name: "Python 3.8" variables: PYTHON_BINARY: "C:/python/Python38/python.exe" + # System python + - id: "system-python" + display_name: "Python" + variables: + PYTHON_BINARY: "python" + - id: "system-python3" + display_name: "Python3" + variables: + PYTHON_BINARY: "python3" # Choice of mod_wsgi version - id: mod-wsgi-version @@ -2141,9 +2180,17 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: ubuntu-18.04 - display_name: "MONGODB-AWS Auth test" - run_on: ubuntu1804-test + platform: [ubuntu-18.04, macos-1014] + python-version: ["system-python", "system-python3"] + display_name: "MONGODB-AWS Auth ${platform} ${python-version}" + tasks: + - name: "aws-auth-test" + +- matrix_name: "aws-auth-test-windows" + matrix_spec: + platform: [windows-64-vsMulti-small] + python-version: ["win-vs2015-2.7", "win-vs2010-3.4", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7", "win-vs2015-3.8"] + display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: - name: "aws-auth-test" diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index 5febd1302c..bc577808d2 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -7,6 +7,12 @@ set -o errexit # Exit the script with error if any of the commands fail # Main Program # ############################################ +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use +# to connect to the server via MONGODB-AWS authentication +# mechanism. +# PYTHON_BINARY The Python version to use. + echo "Running MONGODB-AWS authentication tests" # ensure no secrets are printed in log files set +x @@ -29,11 +35,19 @@ set -x VIRTUALENV=$(command -v virtualenv) authtest () { + if [ "Windows_NT" = "$OS" ]; then + PYTHON=$(cygpath -m $PYTHON) + fi + echo "Running MONGODB-AWS authentication tests with $PYTHON" $PYTHON --version $VIRTUALENV -p $PYTHON --system-site-packages --never-download venvaws - . venvaws/bin/activate + if [ "Windows_NT" = "$OS" ]; then + . venvaws/Scripts/activate + else + . venvaws/bin/activate + fi pip install requests botocore python test/auth_aws/test_auth_aws.py @@ -41,5 +55,10 @@ authtest () { rm -rf venvaws } -PYTHON=$(command -v python) authtest -PYTHON=$(command -v python3) authtest +PYTHON=${PYTHON_BINARY:-} +if [ -z "$PYTHON" ]; then + echo "Cannot test without specifying PYTHON_BINARY" + exit 1 +fi + +authtest From 04c3a48366c6b50be95110ff2513583212e25c98 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 Mar 2020 17:27:02 -0800 Subject: [PATCH 0079/2111] PYTHON-2153 Skip failing createIndexes wtimeout test on 4.3+ PYTHON-2154 Skip failing transactions test on 4.5+ --- test/test_examples.py | 1 + test/test_read_write_concern_spec.py | 22 ++++++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/test/test_examples.py b/test/test_examples.py index 4db7cf020d..e6fd1f3855 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -850,6 +850,7 @@ def setUpClass(cls): super(TestTransactionExamples, cls).setUpClass() cls.client = rs_or_single_client(w="majority") + @client_context.require_version_max(4, 4, 99) # PYTHON-2154 skip on 4.5+ @client_context.require_transactions def test_transactions(self): # Transaction examples diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 510792dacc..ad4f7b644e 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -97,7 +97,10 @@ def insert_command_default_write_concern(): name, event.command_name)) def assertWriteOpsRaise(self, write_concern, expected_exception): - client = rs_or_single_client(**write_concern.document) + wc = write_concern.document + # Set socket timeout to avoid indefinite stalls + client = rs_or_single_client( + w=wc['w'], wTimeoutMS=wc['wtimeout'], socketTimeoutMS=30000) db = client.get_database('pymongo_test') coll = db.test @@ -119,18 +122,25 @@ def insert_command(): ] ops_require_34 = [ ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])), - ('create_index', lambda: coll.create_index([('a', DESCENDING)])), - ('create_indexes', lambda: coll.create_indexes([IndexModel('b')])), - ('drop_index', lambda: coll.drop_index([('a', DESCENDING)])), ('create', lambda: db.create_collection('new')), ('rename', lambda: coll.rename('new')), ('drop', lambda: db.new.drop()), ] if client_context.version > (3, 4): ops.extend(ops_require_34) - # SERVER-34776: Drop database does not respect wtimeout in 4.0. - if client_context.version <= (3, 6): + # SERVER-34776: dropDatabase does not respect wtimeout in 3.6. + if client_context.version[:2] != (3, 6): ops.append(('drop_database', lambda: client.drop_database(db))) + # SERVER-46668: createIndexes does not respect wtimeout in 4.4+. + if client_context.version <= (4, 3): + ops.extend([ + ('create_index', + lambda: coll.create_index([('a', DESCENDING)])), + ('create_indexes', + lambda: coll.create_indexes([IndexModel('b')])), + ('drop_index', + lambda: coll.drop_index([('a', DESCENDING)])), + ]) for name, f in ops: # Ensure insert_many and bulk_write still raise BulkWriteError. From da778c501761f9c7fa0f2f1538e5d569d67ad044 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Mar 2020 10:35:41 -0800 Subject: [PATCH 0080/2111] PYTHON-2150 Fix ObjectId test on 32-bit platforms --- test/test_objectid.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/test_objectid.py b/test/test_objectid.py index 4e66b67fab..08398c4caa 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -189,9 +189,14 @@ def generate_objectid_with_timestamp(timestamp): for tstamp, exp_datetime_args in TEST_DATA.items(): oid = generate_objectid_with_timestamp(tstamp) - self.assertEqual( - oid.generation_time, - datetime.datetime(*exp_datetime_args, tzinfo=utc)) + if tstamp > 0x7FFFFFFF and sys.maxsize < 2**32: + # 32-bit platforms will overflow in datetime.fromtimestamp. + with self.assertRaises((OverflowError, ValueError)): + oid.generation_time + else: + self.assertEqual( + oid.generation_time, + datetime.datetime(*exp_datetime_args, tzinfo=utc)) def test_random_regenerated_on_pid_change(self): # Test that change of pid triggers new random number generation. From 5f45a69f703d295038e1ffe21855b0f5214d236d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 Mar 2020 16:35:29 -0700 Subject: [PATCH 0081/2111] PYTHON-2150 Fix test_timestamp_values on Jython --- test/test_objectid.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_objectid.py b/test/test_objectid.py index 08398c4caa..cb1f8bb49e 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -189,7 +189,8 @@ def generate_objectid_with_timestamp(timestamp): for tstamp, exp_datetime_args in TEST_DATA.items(): oid = generate_objectid_with_timestamp(tstamp) - if tstamp > 0x7FFFFFFF and sys.maxsize < 2**32: + if (not sys.platform.startswith("java") and + tstamp > 0x7FFFFFFF and sys.maxsize < 2**32): # 32-bit platforms will overflow in datetime.fromtimestamp. with self.assertRaises((OverflowError, ValueError)): oid.generation_time From c04a43396c1b94bc04343fbc21312b856f5f1c1b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 Mar 2020 16:14:58 -0700 Subject: [PATCH 0082/2111] PYTHON-2130 Note that $where does not support Code with scope in MongoDB 4.4+ --- pymongo/cursor.py | 14 ++++++++++++-- test/test_cursor.py | 17 ++++++++++++++--- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 52ed1c0c5e..f62f2f66f1 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -915,7 +915,7 @@ def comment(self, comment): return self def where(self, code): - """Adds a $where clause to this query. + """Adds a `$where`_ clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` @@ -923,7 +923,11 @@ def where(self, code): evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently - being scanned. + being scanned. For example:: + + # Find all documents where field "a" is less than "b" plus "c". + for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises @@ -931,8 +935,14 @@ def where(self, code): :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + :Parameters: - `code`: JavaScript expression to use as a filter + + .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ + .. _$where: https://docs.mongodb.com/manual/reference/operator/query/where/ """ self.__check_okay_to_chain() if not isinstance(code, Code): diff --git a/test/test_cursor.py b/test/test_cursor.py index c04c58def3..12720aa74d 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -829,7 +829,6 @@ def test_count_with_hint(self): self.assertEqual(2, collection.find().hint("x_1").count()) self.assertEqual(2, collection.find().hint([("x", 1)]).count()) - @client_context.require_version_max(4, 3, 2) # PYTHON-2130 @ignore_deprecations def test_where(self): db = self.db @@ -845,8 +844,20 @@ def test_where(self): self.assertEqual(3, len(list(db.test.find().where('this.x < 3')))) self.assertEqual(3, len(list(db.test.find().where(Code('this.x < 3'))))) - self.assertEqual(3, len(list(db.test.find().where(Code('this.x < i', - {"i": 3}))))) + + code_with_scope = Code('this.x < i', {"i": 3}) + if client_context.version.at_least(4, 3, 3): + # MongoDB 4.4 removed support for Code with scope. + with self.assertRaises(OperationFailure): + list(db.test.find().where(code_with_scope)) + + code_with_empty_scope = Code('this.x < 3', {}) + with self.assertRaises(OperationFailure): + list(db.test.find().where(code_with_empty_scope)) + else: + self.assertEqual( + 3, len(list(db.test.find().where(code_with_scope)))) + self.assertEqual(10, len(list(db.test.find()))) self.assertEqual(3, db.test.find().where('this.x < 3').count()) From 84f1a8c5f90c3d44ec2c8d6429bc8073ee9944a5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 27 Feb 2020 13:36:16 -0800 Subject: [PATCH 0083/2111] PYTHON-2144 Handle the case where the peer omits the self-signed issuer cert --- pymongo/ocsp_support.py | 47 ++++++++++++++++-- pymongo/pyopenssl_context.py | 19 ++++++-- test/certificates/trusted-ca.pem | 82 ++++++++++++++++++++++++++++++++ test/test_ssl.py | 32 ++++++++++++- test/utils.py | 9 ++++ 5 files changed, 180 insertions(+), 9 deletions(-) create mode 100644 test/certificates/trusted-ca.pem diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 24a437460d..daa83e39bd 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -15,6 +15,7 @@ """Support for requesting and verifying OCSP responses.""" import logging as _logging +import re as _re from datetime import datetime as _datetime @@ -39,6 +40,7 @@ AuthorityInformationAccess as _AuthorityInformationAccess, ExtendedKeyUsage as _ExtendedKeyUsage, ExtensionNotFound as _ExtensionNotFound, + load_pem_x509_certificate as _load_pem_x509_certificate, TLSFeature as _TLSFeature, TLSFeatureType as _TLSFeatureType) from cryptography.x509.oid import ( @@ -59,12 +61,39 @@ _LOGGER = _logging.getLogger(__name__) +_CERT_REGEX = _re.compile( + b'-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+', + _re.DOTALL) -def _get_issuer_cert(cert, chain): + +def _load_trusted_ca_certs(cafile): + """Parse the tlsCAFile into a list of certificates.""" + with open(cafile, 'rb') as f: + data = f.read() + + # Load all the certs in the file. + trusted_ca_certs = [] + backend = _default_backend() + for cert_data in _re.findall(_CERT_REGEX, data): + trusted_ca_certs.append( + _load_pem_x509_certificate(cert_data, backend)) + return trusted_ca_certs + + +def _get_issuer_cert(cert, chain, trusted_ca_certs): issuer_name = cert.issuer for candidate in chain: if candidate.subject == issuer_name: return candidate + + # Depending on the server's TLS library, the peer's cert chain may not + # include the self signed root CA. In this case we check the user + # provided tlsCAFile (ssl_ca_certs) for the issuer. + # Remove once we use the verified peer cert chain in PYTHON-2147. + if trusted_ca_certs: + for candidate in trusted_ca_certs: + if candidate.subject == issuer_name: + return candidate return None @@ -232,11 +261,19 @@ def _verify_response(issuer, response): return 1 -def ocsp_callback(conn, ocsp_bytes, user_data): +def _ocsp_callback(conn, ocsp_bytes, user_data): """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" - cert = conn.get_peer_certificate().to_cryptography() - chain = [cer.to_cryptography() for cer in conn.get_peer_cert_chain()] - issuer = _get_issuer_cert(cert, chain) + cert = conn.get_peer_certificate() + if cert is None: + _LOGGER.debug("No peer cert?") + return 0 + cert = cert.to_cryptography() + chain = conn.get_peer_cert_chain() + if not chain: + _LOGGER.debug("No peer cert chain?") + return 0 + chain = [cer.to_cryptography() for cer in chain] + issuer = _get_issuer_cert(cert, chain, user_data.trusted_ca_certs) must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 ext = _get_extension(cert, _TLSFeature) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index aaf0e8562f..1011893e23 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -32,10 +32,14 @@ CertificateError as _SICertificateError, VerificationError as _SIVerificationError) +from cryptography.hazmat.backends import default_backend as _default_backend + from bson.py3compat import _unicode from pymongo.errors import CertificateError as _CertificateError from pymongo.monotonic import time as _time -from pymongo.ocsp_support import ocsp_callback as _ocsp_callback +from pymongo.ocsp_support import ( + _load_trusted_ca_certs, + _ocsp_callback) from pymongo.socket_checker import ( _errno_from_exception, SocketChecker as _SocketChecker) @@ -133,23 +137,31 @@ def sendall(self, buf, flags=0): total_sent += sent +class _CallbackData(object): + """Data class which is passed to the OCSP callback.""" + def __init__(self): + self.trusted_ca_certs = None + + class SSLContext(object): """A CPython compatible SSLContext implementation wrapping PyOpenSSL's context. """ - __slots__ = ('_protocol', '_ctx', '_check_hostname') + __slots__ = ('_protocol', '_ctx', '_check_hostname', '_callback_data') def __init__(self, protocol): self._protocol = protocol self._ctx = _SSL.Context(self._protocol) self._check_hostname = True + self._callback_data = _CallbackData() # OCSP # XXX: Find a better place to do this someday, since this is client # side configuration and wrap_socket tries to support both client and # server side sockets. self._ctx.set_ocsp_client_callback( - callback=_ocsp_callback, data=None) + callback=_ocsp_callback, data=self._callback_data) + @property def protocol(self): @@ -229,6 +241,7 @@ def load_verify_locations(self, cafile=None, capath=None): ssl.CERT_NONE. """ self._ctx.load_verify_locations(cafile, capath) + self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) def set_default_verify_paths(self): """Specify that the platform provided CA certificates are to be used diff --git a/test/certificates/trusted-ca.pem b/test/certificates/trusted-ca.pem new file mode 100644 index 0000000000..a6f6f312d0 --- /dev/null +++ b/test/certificates/trusted-ca.pem @@ -0,0 +1,82 @@ +# CA bundle file used to test tlsCAFile loading for OCSP. +# Copied from the server: +# https://github.com/mongodb/mongo/blob/r4.3.4/jstests/libs/trusted-ca.pem + +# Autogenerated file, do not edit. +# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml trusted-ca.pem +# +# CA for alternate client/server certificate chain. +-----BEGIN CERTIFICATE----- +MIIDojCCAooCBG585gswDQYJKoZIhvcNAQELBQAwfDELMAkGA1UEBhMCVVMxETAP +BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK +DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxHzAdBgNVBAMMFlRydXN0ZWQgS2Vy +bmVsIFRlc3QgQ0EwHhcNMTkwOTI1MjMyNzQxWhcNMzkwOTI3MjMyNzQxWjB8MQsw +CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr +IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UE +AwwWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBANlRxtpMeCGhkotkjHQqgqvO6O6hoRoAGGJlDaTVtqrjmC8nwySz +1nAFndqUHttxS3A5j4enOabvffdOcV7+Z6vDQmREF6QZmQAk81pmazSc3wOnRiRs +AhXjld7i+rhB50CW01oYzQB50rlBFu+ONKYj32nBjD+1YN4AZ2tuRlbxfx2uf8Bo +Zowfr4n9nHVcWXBLFmaQLn+88WFO/wuwYUOn6Di1Bvtkvqum0or5QeAF0qkJxfhg +3a4vBnomPdwEXCgAGLvHlB41CWG09EuAjrnE3HPPi5vII8pjY2dKKMomOEYmA+KJ +AC1NlTWdN0TtsoaKnyhMMhLWs3eTyXL7kbkCAwEAAaMxMC8wDAYDVR0TBAUwAwEB +/zAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTANBgkqhkiG9w0BAQsF +AAOCAQEAQk56MO9xAhtO077COCqIYe6pYv3uzOplqjXpJ7Cph7GXwQqdFWfKls7B +cLfF/fhIUZIu5itStEkY+AIwht4mBr1F5+hZUp9KZOed30/ewoBXAUgobLipJV66 +FKg8NRtmJbiZrrC00BSO+pKfQThU8k0zZjBmNmpjxnbKZZSFWUKtbhHV1vujver6 +SXZC7R6692vLwRBMoZxhgy/FkYRdiN0U9wpluKd63eo/O02Nt6OEMyeiyl+Z3JWi +8g5iHNrBYGBbGSnDOnqV6tjEY3eq600JDWiodpA1OQheLi78pkc/VQZwof9dyBCm +6BoCskTjip/UB+vIhdPFT9sgUdgDTg== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDZUcbaTHghoZKL +ZIx0KoKrzujuoaEaABhiZQ2k1baq45gvJ8Mks9ZwBZ3alB7bcUtwOY+Hpzmm7333 +TnFe/merw0JkRBekGZkAJPNaZms0nN8Dp0YkbAIV45Xe4vq4QedAltNaGM0AedK5 +QRbvjjSmI99pwYw/tWDeAGdrbkZW8X8drn/AaGaMH6+J/Zx1XFlwSxZmkC5/vPFh +Tv8LsGFDp+g4tQb7ZL6rptKK+UHgBdKpCcX4YN2uLwZ6Jj3cBFwoABi7x5QeNQlh +tPRLgI65xNxzz4ubyCPKY2NnSijKJjhGJgPiiQAtTZU1nTdE7bKGip8oTDIS1rN3 +k8ly+5G5AgMBAAECggEAS7GjLKgT88reSzUTgubHquYf1fZwMak01RjTnsVdoboy +aMJVwzPsjgo2yEptUQvuNcGmz54cg5vJaVlmPaspGveg6WGaRmswEo/MP4GK98Fo +IFKkKM2CEHO74O14XLN/w8yFA02+IdtM3X/haEFE71VxXNmwawRXIBxN6Wp4j5Fb +mPLKIspnWQ/Y/Fn799sCFAzX5mKkbCt1IEgKssgQQEm1UkvmCkcZE+mdO/ErYP8A +COO0LpM+TK6WQY2LKiteeCCiosTZFb1GO7MkXrRP5uOBZKaW5kq1R0b6PcopJPCM +OcYF0Zli6KB7oiQLdXgU2jCaxYOnuRb6RYh2l7NvAQKBgQD6CZ9TKOn/EUQtukyw +pvYTyt1hoLXqYGcbRtLc1gcC+Z2BD28hd3eD/mEUv+g/8bq/OP4wYV9X+VRvR8xN +MmfAG/sJeOCOClz1A1TyNeA+G0GZ25qWHyHQ2W4WlSG1CXQgxGzU6wo/t6wiVW5R +O4jplFVEOXznf4vmVfBJK50R2QKBgQDegGxm23jF2N5sIYDZ14oxms8bbjPz8zH6 +tiIRYNGbSzI7J4KFGY2HiBwtf1yxS22HBL69Y1WrEzGm1vm4aZG/GUwBzI79QZAO ++YFIGaIrdlv12Zm6lpJMmAWlOs9XFirC17oQEwOQFweOdQSt7F/+HMZOigdikRBV +pK+8Kfay4QKBgQDarDevHwUmkg8yftA7Xomv3aenjkoK5KzH6jTX9kbDj1L0YG8s +sbLQuVRmNUAFTH+qZUnJPh+IbQIvIHfIu+CI3u+55QFeuCl8DqHoAr5PEr9Ys/qK +eEe2w7HIBj0oe1AYqDEWNUkNWLEuhdCpMowW3CeGN1DJlX7gvyAang4MYQKBgHwM +aWNnFQxo/oiWnTnWm2tQfgszA7AMdF7s0E2UBwhnghfMzU3bkzZuwhbznQATp3rR +QG5iRU7dop7717ni0akTN3cBTu8PcHuIy3UhJXLJyDdnG/gVHnepgew+v340E58R +muB/WUsqK8JWp0c4M8R+0mjTN47ShaLZ8EgdtTbBAoGBAKOcpuDfFEMI+YJgn8zX +h0nFT60LX6Lx+zcSDY9+6J6a4n5NhC+weYCDFOGlsLka1SwHcg1xanfrLVjpH7Ok +HDJGLrSh1FP2Rq/oFxZ/OKCjonHLa8IulqD/AA+sqYRbysKNsT3Pi0554F2xFEqQ +z/C84nlT1R2uTCWIxvrnpU2h +-----END PRIVATE KEY----- +# Pre Oct 2019 trusted-ca.pem +# Transitional pending BUILD update. +-----BEGIN CERTIFICATE----- +MIIDpjCCAo6gAwIBAgIDAghHMA0GCSqGSIb3DQEBBQUAMHwxHzAdBgNVBAMTFlRy +dXN0ZWQgS2VybmVsIFRlc3QgQ0ExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMH +TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv +cmsxCzAJBgNVBAYTAlVTMB4XDTE2MDMzMTE0NTY1NVoXDTM2MDMzMTE0NTY1NVow +fDEfMB0GA1UEAxMWVHJ1c3RlZCBLZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2Vy +bmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREw +DwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCePFHZTydC96SlSHSyu73vw//ddaE33kPllBB9DP2L7yRF +6D/blFmno9fSM+Dfg64VfGV+0pCXPIZbpH29nzJu0DkvHzKiWK7P1zUj8rAHaX++ +d6k0yeTLFM9v+7YE9rHoANVn22aOyDvTgAyMmA0CLn+SmUy6WObwMIf9cZn97Znd +lww7IeFNyK8sWtfsVN4yRBnjr7kKN2Qo0QmWeFa7jxVQptMJQrY8k1PcyVUOgOjQ +ocJLbWLlm9k0/OMEQSwQHJ+d9weUbKjlZ9ExOrm4QuuA2tJhb38baTdAYw3Jui4f +yD6iBAGD0Jkpc+3YaWv6CBmK8NEFkYJD/gn+lJ75AgMBAAGjMTAvMAwGA1UdEwQF +MAMBAf8wHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEwDQYJKoZIhvcN +AQEFBQADggEBADYikjB6iwAUs6sglwkE4rOkeMkJdRCNwK/5LpFJTWrDjBvBQCdA +Y5hlAVq8PfIYeh+wEuSvsEHXmx7W29X2+p4VuJ95/xBA6NLapwtzuiijRj2RBAOG +1EGuyFQUPTL27DR3+tfayNykDclsVDNN8+l7nt56j8HojP74P5OMHtn+6HX5+mtF +FfZMTy0mWguCsMOkZvjAskm6s4U5gEC8pYEoC0ZRbfUdyYsxZe/nrXIFguVlVPCB +XnfB/0iG9t+VH5cUVj1LP9skXTW4kXfhQmljUuo+EVBNR6n2nfTnpoC65WeAgHV4 +V+s9mJsUv2x72KtKYypqEVT0gaJ1WIN9N1s= +-----END CERTIFICATE----- diff --git a/test/test_ssl.py b/test/test_ssl.py index c2b8deae35..227d0db378 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -39,7 +39,9 @@ SkipTest, unittest, HAVE_IPADDRESS) -from test.utils import remove_all_users, connected +from test.utils import (remove_all_users, + cat_files, + connected) _HAVE_PYOPENSSL = False try: @@ -51,6 +53,11 @@ except ImportError: pass +if _HAVE_PYOPENSSL: + from pymongo.ocsp_support import _load_trusted_ca_certs +else: + _load_trusted_ca_certs = None + if HAVE_SSL: import ssl @@ -59,6 +66,7 @@ CLIENT_PEM = os.path.join(CERT_PATH, 'client.pem') CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, 'password_protected.pem') CA_PEM = os.path.join(CERT_PATH, 'ca.pem') +CA_BUNDLE_PEM = os.path.join(CERT_PATH, 'trusted-ca.pem') CRL_PEM = os.path.join(CERT_PATH, 'crl.pem') MONGODB_X509_USERNAME = ( "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client") @@ -157,6 +165,11 @@ def test_config_ssl(self): def test_use_openssl_when_available(self): self.assertTrue(_ssl.IS_PYOPENSSL) + @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") + def test_load_trusted_ca_certs(self): + trusted_ca_certs = _load_trusted_ca_certs(CA_BUNDLE_PEM) + self.assertEqual(2, len(trusted_ca_certs)) + class TestSSL(IntegrationTest): @@ -644,6 +657,23 @@ def test_mongodb_x509_auth(self): else: self.fail("Invalid certificate accepted.") + def test_connect_with_ca_bundle(self): + def remove(path): + try: + os.remove(path) + except OSError: + pass + + temp_ca_bundle = os.path.join(CERT_PATH, 'trusted-ca-bundle.pem') + self.addCleanup(remove, temp_ca_bundle) + # Add the CA cert file to the bundle. + cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) + with MongoClient('localhost', + tls=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsCAFile=temp_ca_bundle) as client: + self.assertTrue(client.admin.command('ismaster')) + if __name__ == "__main__": unittest.main() diff --git a/test/utils.py b/test/utils.py index c1f7b9a570..05e9be12cd 100644 --- a/test/utils.py +++ b/test/utils.py @@ -20,6 +20,7 @@ import functools import os import re +import shutil import sys import threading import time @@ -880,3 +881,11 @@ def server_name_to_type(name): if name == 'PossiblePrimary': return SERVER_TYPE.Unknown return getattr(SERVER_TYPE, name) + + +def cat_files(dest, *sources): + """Cat multiple files into dest.""" + with open(dest, 'wb') as fdst: + for src in sources: + with open(src, 'rb') as fsrc: + shutil.copyfileobj(fsrc, fdst) From af1dcc5143f87e44fb57c22fc4ebd452c9c201b5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 Mar 2020 18:25:15 -0700 Subject: [PATCH 0084/2111] PYTHON-2144 Properly require TLS for test_connect_with_ca_bundle --- test/test_ssl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_ssl.py b/test/test_ssl.py index 227d0db378..f663a2e19c 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -657,6 +657,7 @@ def test_mongodb_x509_auth(self): else: self.fail("Invalid certificate accepted.") + @client_context.require_ssl_certfile def test_connect_with_ca_bundle(self): def remove(path): try: From 47a67183526530bc8e9ac9ffe5325c5dfd3a0b2e Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 9 Mar 2020 16:06:50 -0700 Subject: [PATCH 0085/2111] PYTHON-2161 add support for tlsDisableOcspEndpointCheck URI option --- pymongo/client_options.py | 6 +- pymongo/common.py | 6 +- pymongo/database.py | 2 +- pymongo/encryption.py | 2 +- pymongo/mongo_client.py | 11 +- pymongo/ocsp_support.py | 4 + pymongo/pyopenssl_context.py | 18 +- pymongo/ssl_support.py | 5 +- pymongo/uri_parser.py | 43 ++- test/test_ssl.py | 18 +- test/test_uri_parser.py | 2 +- test/test_uri_spec.py | 28 +- test/uri_options/tls-options.json | 426 +++++++++++++++++++++++++++++- 13 files changed, 538 insertions(+), 33 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 7bb68a9105..4f40629e7d 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -80,10 +80,11 @@ def _parse_ssl_options(options): cert_reqs = options.get('ssl_cert_reqs') match_hostname = options.get('ssl_match_hostname', True) crlfile = options.get('ssl_crlfile') + check_ocsp_endpoint = options.get('ssl_check_ocsp_endpoint', True) ssl_kwarg_keys = [k for k in options if k.startswith('ssl_') and options[k]] - if use_ssl == False and ssl_kwarg_keys: + if use_ssl is False and ssl_kwarg_keys: raise ConfigurationError("ssl has not been enabled but the " "following ssl parameters have been set: " "%s. Please set `ssl=True` or remove." @@ -101,7 +102,8 @@ def _parse_ssl_options(options): ca_certs, cert_reqs, crlfile, - match_hostname) + match_hostname, + check_ocsp_endpoint) return ctx, match_hostname return None, match_hostname diff --git a/pymongo/common.py b/pymongo/common.py index 12777b7acf..8b586c4b42 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -617,6 +617,7 @@ def validate_tzinfo(dummy, value): 'tlscafile': validate_readable, 'tlscertificatekeyfile': validate_readable, 'tlscertificatekeyfilepassword': validate_string_or_none, + 'tlsdisableocspendpointcheck': validate_boolean_or_string, 'tlsinsecure': validate_boolean_or_string, 'w': validate_non_negative_int_or_basestring, 'wtimeoutms': validate_non_negative_integer, @@ -668,6 +669,7 @@ def validate_tzinfo(dummy, value): 'tlscafile': 'ssl_ca_certs', 'tlscertificatekeyfile': 'ssl_certfile', 'tlscertificatekeyfilepassword': 'ssl_pem_passphrase', + 'tlsdisableocspendpointcheck': 'ssl_check_ocsp_endpoint', } # Map from deprecated URI option names to a tuple indicating the method of @@ -726,7 +728,7 @@ def validate_auth_option(option, value): if lower not in _AUTH_OPTIONS: raise ConfigurationError('Unknown ' 'authentication option: %s' % (option,)) - return lower, value + return option, value def validate(option, value): @@ -735,7 +737,7 @@ def validate(option, value): lower = option.lower() validator = VALIDATORS.get(lower, raise_config_error) value = validator(option, value) - return lower, value + return option, value def get_validated_options(options, warn=True): diff --git a/pymongo/database.py b/pymongo/database.py index 144a1f6b40..7137c96bf6 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1471,7 +1471,7 @@ def authenticate(self, name=None, password=None, "instance of %s" % (string_type.__name__,)) common.validate_auth_mechanism('mechanism', mechanism) - validated_options = {} + validated_options = common._CaseInsensitiveDictionary() for option, value in iteritems(kwargs): normalized, val = common.validate_auth_option(option, value) validated_options[normalized] = val diff --git a/pymongo/encryption.py b/pymongo/encryption.py index f71cd48b6d..952470809d 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -107,7 +107,7 @@ def kms_request(self, kms_context): endpoint = kms_context.endpoint message = kms_context.message host, port = parse_host(endpoint, _HTTPS_PORT) - ctx = get_ssl_context(None, None, None, None, None, None, True) + ctx = get_ssl_context(None, None, None, None, None, None, True, True) opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, socket_timeout=_KMS_CONNECT_TIMEOUT, ssl_context=ctx) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 9b33fa2cd3..ac00a38e8c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -459,6 +459,9 @@ def __init__( ``ssl_keyfile``. Only necessary if the private key is encrypted. Only supported by python 2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. Defaults to ``False``. - `ssl`: (boolean) Alias for ``tls``. - `ssl_certfile`: The certificate file used to identify the local connection against mongod. Implies ``tls=True``. Defaults to @@ -488,6 +491,10 @@ def __init__( .. mongodoc:: connections + .. versionchanged:: 3.11 + Added the ``tlsDisableOCSPEndpointCheck`` keyword argument and + URI option. + .. versionchanged:: 3.9 Added the ``retryReads`` keyword argument and URI option. Added the ``tlsInsecure`` keyword argument and URI option. @@ -645,8 +652,8 @@ def __init__( # Handle deprecated options in kwarg options. keyword_opts = _handle_option_deprecations(keyword_opts) # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary( - dict(common.validate(k, v) for k, v in keyword_opts.items())) + keyword_opts = common._CaseInsensitiveDictionary(dict(common.validate( + keyword_opts.cased_key(k), v) for k, v in keyword_opts.items())) # Override connection string options with kwarg options. opts.update(keyword_opts) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index daa83e39bd..be09277999 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -289,6 +289,10 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): if must_staple: _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") return 0 + if not user_data.check_ocsp_endpoint: + _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") + # No stapled OCSP response, checking responder URI diabled, soft fail. + return 1 # https://tools.ietf.org/html/rfc6960#section-3.1 ext = _get_extension(cert, _AuthorityInformationAccess) if ext is None: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 1011893e23..c073427de3 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -141,6 +141,7 @@ class _CallbackData(object): """Data class which is passed to the OCSP callback.""" def __init__(self): self.trusted_ca_certs = None + self.check_ocsp_endpoint = None class SSLContext(object): @@ -148,21 +149,21 @@ class SSLContext(object): context. """ - __slots__ = ('_protocol', '_ctx', '_check_hostname', '_callback_data') + __slots__ = ('_protocol', '_ctx', '_callback_data', '_check_hostname') def __init__(self, protocol): self._protocol = protocol self._ctx = _SSL.Context(self._protocol) - self._check_hostname = True self._callback_data = _CallbackData() + self._check_hostname = True # OCSP # XXX: Find a better place to do this someday, since this is client # side configuration and wrap_socket tries to support both client and # server side sockets. + self._callback_data.check_ocsp_endpoint = True self._ctx.set_ocsp_client_callback( callback=_ocsp_callback, data=self._callback_data) - @property def protocol(self): """The protocol version chosen when constructing the context. @@ -199,6 +200,17 @@ def __set_check_hostname(self, value): check_hostname = property(__get_check_hostname, __set_check_hostname) + def __get_check_ocsp_endpoint(self): + return self._callback_data.check_ocsp_endpoint + + def __set_check_ocsp_endpoint(self, value): + if not isinstance(value, bool): + raise TypeError("check_ocsp must be True or False") + self._callback_data.check_ocsp_endpoint = value + + check_ocsp_endpoint = property(__get_check_ocsp_endpoint, + __set_check_ocsp_endpoint) + def __get_options(self): # Calling set_options adds the option to the existing bitmask and # returns the new bitmask. diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 4307d97744..b5847244c3 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -102,7 +102,8 @@ def get_ssl_context(*args): ca_certs, cert_reqs, crlfile, - match_hostname) = args + match_hostname, + check_ocsp_endpoint) = args verify_mode = CERT_REQUIRED if cert_reqs is None else cert_reqs ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) # SSLContext.check_hostname was added in CPython 2.7.9 and 3.4. @@ -111,6 +112,8 @@ def get_ssl_context(*args): ctx.check_hostname = match_hostname else: ctx.check_hostname = False + if hasattr(ctx, "check_ocsp_endpoint"): + ctx.check_ocsp_endpoint = check_ocsp_endpoint if hasattr(ctx, "options"): # Explicitly disable SSLv2, SSLv3 and TLS compression. Note that # up to date versions of MongoDB 2.4 and above already disable diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index a6b0a46cda..779a8c5ed9 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -126,12 +126,16 @@ def parse_host(entity, default_port=DEFAULT_PORT): return host.lower(), port -_IMPLICIT_TLSINSECURE_OPTS = {"tlsallowinvalidcertificates", - "tlsallowinvalidhostnames"} +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck",} -_TLSINSECURE_EXCLUDE_OPTS = (_IMPLICIT_TLSINSECURE_OPTS | - {INTERNAL_URI_OPTION_NAME_MAP[k] for k in - _IMPLICIT_TLSINSECURE_OPTS}) +# Options that cannot be specified when tlsInsecure is also specified. +_TLSINSECURE_EXCLUDE_OPTS = ( + {k for k in _IMPLICIT_TLSINSECURE_OPTS} | + {INTERNAL_URI_OPTION_NAME_MAP[k] for k in _IMPLICIT_TLSINSECURE_OPTS}) def _parse_options(opts, delim): @@ -172,6 +176,33 @@ def _handle_security_options(options): raise InvalidURI(err_msg % ( options.cased_key('tlsinsecure'), options.cased_key(opt))) + # Convenience function to retrieve option values based on public or private names. + def _getopt(opt): + return (options.get(opt) or + options.get(INTERNAL_URI_OPTION_NAME_MAP[opt])) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = _getopt('tlsallowinvalidcertificates') + if tlsallowinvalidcerts is not None: + if 'tlsdisableocspendpointcheck' in options: + err_msg = ("URI options %s and %s cannot be specified " + "simultaneously.") + raise InvalidURI(err_msg % ( + 'tlsallowinvalidcertificates', options.cased_key( + 'tlsdisableocspendpointcheck'))) + if tlsallowinvalidcerts is True: + options['tlsdisableocspendpointcheck'] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = _getopt('tlscrlfile') + if tlscrlfile is not None: + for opt in ('tlsinsecure', 'tlsallowinvalidcertificates', + 'tlsdisableocspendpointcheck'): + if options.get(opt) is True: + err_msg = ("URI option %s=True cannot be specified when " + "CRL checking is enabled.") + raise InvalidURI(err_msg % (opt,)) + if 'ssl' in options and 'tls' in options: def truth_value(val): if val in ('true', 'false'): @@ -235,7 +266,7 @@ def _normalize_options(options): tlsinsecure = options.get('tlsinsecure') if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: - intname = INTERNAL_URI_OPTION_NAME_MAP.get(opt, None) + intname = INTERNAL_URI_OPTION_NAME_MAP[opt] # Internal options are logical inverse of public options. options[intname] = not tlsinsecure diff --git a/test/test_ssl.py b/test/test_ssl.py index f663a2e19c..0987354c79 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -345,17 +345,17 @@ def test_cert_ssl_validation_hostname_matching(self): # Python > 2.7.9. If SSLContext doesn't have load_default_certs # it also doesn't have check_hostname. ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, False) + None, None, None, None, ssl.CERT_NONE, None, False, True) if hasattr(ctx, 'load_default_certs'): self.assertFalse(ctx.check_hostname) ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, True) + None, None, None, None, ssl.CERT_NONE, None, True, True) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, False) + None, None, None, None, ssl.CERT_REQUIRED, None, False, True) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, True) + None, None, None, None, ssl.CERT_REQUIRED, None, True, True) if _PY37PLUS: self.assertTrue(ctx.check_hostname) else: @@ -493,7 +493,7 @@ def test_validation_with_system_ca_certs(self): def test_system_certs_config_error(self): ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, False) + None, None, None, None, ssl.CERT_NONE, None, False, True) if ((sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr(ctx, "load_default_certs")): @@ -526,11 +526,11 @@ def test_certifi_support(self): ssl_support.HAVE_WINCERTSTORE = False try: ctx = get_ssl_context( - None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True) + None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True, True) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, None, None, True) + ctx = get_ssl_context(None, None, None, None, None, None, True, True) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) finally: @@ -548,11 +548,11 @@ def test_wincertstore(self): raise SkipTest("Need wincertstore to test wincertstore.") ctx = get_ssl_context( - None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True) + None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True, True) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, None, None, True) + ctx = get_ssl_context(None, None, None, None, None, None, True, True) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index a921c66563..8d1f55a0dd 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -470,7 +470,7 @@ def test_tlsinsecure_simple(self): uri = "mongodb://example.com/?tlsInsecure=true" res = { "ssl_match_hostname": False, "ssl_cert_reqs": CERT_NONE, - "tlsinsecure": True} + "tlsinsecure": True, 'ssl_check_ocsp_endpoint': False} self.assertEqual(res, parse_uri(uri)["options"]) def test_tlsinsecure_legacy_conflict(self): diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 9bc7cb178a..cfca633458 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -37,7 +37,33 @@ TEST_DESC_SKIP_LIST = [ "Valid options specific to single-threaded drivers are parsed correctly", - "Invalid serverSelectionTryOnce causes a warning"] + "Invalid serverSelectionTryOnce causes a warning", + "tlsDisableCertificateRevocationCheck can be set to true", + "tlsDisableCertificateRevocationCheck can be set to false", + "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", + "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", + "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", + "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", + "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error"] class TestAllScenarios(unittest.TestCase): diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json index 6db80ed623..edf6042943 100644 --- a/test/uri_options/tls-options.json +++ b/test/uri_options/tls-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid required tls options are parsed correctly", - "uri": "mongodb://example.com/?tls=true&tlsCAFile=ca.pem&tlsCertificateKeyFile=cert.pem&tlsCertificateKeyFilePassword=hunter2", + "uri": "mongodb://example.com/?tls=true&tlsCAFile=ca.pem&tlsCertificateKeyFile=cert.pem", "valid": true, "warning": false, "hosts": null, @@ -10,7 +10,17 @@ "options": { "tls": true, "tlsCAFile": "ca.pem", - "tlsCertificateKeyFile": "cert.pem", + "tlsCertificateKeyFile": "cert.pem" + } + }, + { + "description": "Valid tlsCertificateKeyFilePassword is parsed correctly", + "uri": "mongodb://example.com/?tlsCertificateKeyFilePassword=hunter2", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { "tlsCertificateKeyFilePassword": "hunter2" } }, @@ -75,8 +85,8 @@ } }, { - "description": "Invalid tlsAllowInsecure causes a warning", - "uri": "mongodb://example.com/?tlsAllowInsecure=invalid", + "description": "Invalid tlsInsecure causes a warning", + "uri": "mongodb://example.com/?tlsInsecure=invalid", "valid": true, "warning": true, "hosts": null, @@ -226,6 +236,414 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck can be set to true", + "uri": "mongodb://example.com/?tls=true&tlsDisableCertificateRevocationCheck=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableCertificateRevocationCheck": true + } + }, + { + "description": "tlsDisableCertificateRevocationCheck can be set to false", + "uri": "mongodb://example.com/?tls=true&tlsDisableCertificateRevocationCheck=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableCertificateRevocationCheck": false + } + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableCertificateRevocationCheck=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsDisableCertificateRevocationCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsDisableCertificateRevocationCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck can be set to true", + "uri": "mongodb://example.com/?tls=true&tlsDisableOCSPEndpointCheck=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableOCSPEndpointCheck": true + } + }, + { + "description": "tlsDisableOCSPEndpointCheck can be set to false", + "uri": "mongodb://example.com/?tls=true&tlsDisableOCSPEndpointCheck=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "tls": true, + "tlsDisableOCSPEndpointCheck": false + } + }, + { + "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsInsecure=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsInsecure=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsInsecure=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsInsecure=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsInsecure=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsInsecure=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=true and tlsDisableOCSPEndpointCheck=false raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=true&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates=false and tlsDisableOCSPEndpointCheck=true raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableOCSPEndpointCheck=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=false&tlsDisableOCSPEndpointCheck=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and true) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=true and tlsAllowInvalidCertificates=false raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck=false and tlsAllowInvalidCertificates=true raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsAllowInvalidCertificates=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and false) raises an error", + "uri": "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false&tlsAllowInvalidCertificates=false", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} } ] } From 0609fea0128b5fe9c8312feb424b1c2866b852fd Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 11 Mar 2020 18:33:58 -0700 Subject: [PATCH 0086/2111] PYTHON-2132 cache OCSP responses --- pymongo/ocsp_cache.py | 87 +++++++++++++++++++++++ pymongo/ocsp_support.py | 82 ++++++++++++--------- pymongo/pyopenssl_context.py | 2 + test/test_ocsp_cache.py | 133 +++++++++++++++++++++++++++++++++++ 4 files changed, 271 insertions(+), 33 deletions(-) create mode 100644 pymongo/ocsp_cache.py create mode 100644 test/test_ocsp_cache.py diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py new file mode 100644 index 0000000000..c2c24c4ab0 --- /dev/null +++ b/pymongo/ocsp_cache.py @@ -0,0 +1,87 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for caching OCSP responses.""" + +from collections import namedtuple +from datetime import datetime as _datetime +from threading import Lock + + +class _OCSPCache(object): + """A cache for OCSP responses.""" + CACHE_KEY_TYPE = namedtuple('OcspResponseCacheKey', + ['hash_algorithm', 'issuer_name_hash', + 'issuer_key_hash', 'serial_number']) + + def __init__(self): + self._data = {} + # Hold this lock when accessing _data. + self._lock = Lock() + + def _get_cache_key(self, ocsp_request): + return self.CACHE_KEY_TYPE( + hash_algorithm=ocsp_request.hash_algorithm.name.lower(), + issuer_name_hash=ocsp_request.issuer_name_hash, + issuer_key_hash=ocsp_request.issuer_key_hash, + serial_number=ocsp_request.serial_number) + + def __setitem__(self, key, value): + """Add/update a cache entry. + + 'key' is of type cryptography.x509.ocsp.OCSPRequest + 'value' is of type cryptography.x509.ocsp.OCSPResponse + + Validity of the OCSP response must be checked by caller. + """ + with self._lock: + cache_key = self._get_cache_key(key) + + # As per the OCSP protocol, if the response's nextUpdate field is + # not set, the responder is indicating that newer revocation + # information is available all the time. + if value.next_update is None: + self._data.pop(cache_key, None) + return + + # Do nothing if the response is invalid. + if not (value.this_update <= _datetime.utcnow() + < value.next_update): + return + + # Cache new response OR update cached response if new response + # has longer validity. + cached_value = self._data.get(cache_key, None) + if (cached_value is None or + cached_value.next_update < value.next_update): + self._data[cache_key] = value + + def __getitem__(self, item): + """Get a cache entry if it exists. + + 'item' is of type cryptography.x509.ocsp.OCSPRequest + + Raises KeyError if the item is not in the cache. + """ + with self._lock: + cache_key = self._get_cache_key(item) + value = self._data[cache_key] + + # Return cached response if it is still valid. + if (value.this_update <= _datetime.utcnow() < + value.next_update): + return value + + self._data.pop(cache_key, None) + raise KeyError(cache_key) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index be09277999..20e029037e 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -207,37 +207,11 @@ def _verify_response_signature(issuer, response): return ret -def _request_ocsp(cert, issuer, uri): +def _build_ocsp_request(cert, issuer): # https://cryptography.io/en/latest/x509/ocsp/#creating-requests builder = _OCSPRequestBuilder() - # add_certificate returns a new instance builder = builder.add_certificate(cert, issuer, _SHA1()) - ocsp_request = builder.build() - try: - response = _post( - uri, - data=ocsp_request.public_bytes(_Encoding.DER), - headers={'Content-Type': 'application/ocsp-request'}, - timeout=5) - except _RequestException: - _LOGGER.debug("HTTP request failed") - return None - if response.status_code != 200: - _LOGGER.debug("HTTP request returned %d", response.status_code) - return None - ocsp_response = _load_der_ocsp_response(response.content) - _LOGGER.debug( - "OCSP response status: %r", ocsp_response.response_status) - if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: - return None - # RFC6960, Section 3.2, Number 1. Only relevant if we need to - # talk to the responder directly. - # Accessing response.serial_number raises if response status is not - # SUCCESSFUL. - if ocsp_response.serial_number != ocsp_request.serial_number: - _LOGGER.debug("Response serial number does not match request") - return None - return ocsp_response + return builder.build() def _verify_response(issuer, response): @@ -261,6 +235,45 @@ def _verify_response(issuer, response): return 1 +def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): + ocsp_request = _build_ocsp_request(cert, issuer) + try: + ocsp_response = ocsp_response_cache[ocsp_request] + _LOGGER.debug("Using cached OCSP response.") + except KeyError: + try: + response = _post( + uri, + data=ocsp_request.public_bytes(_Encoding.DER), + headers={'Content-Type': 'application/ocsp-request'}, + timeout=5) + except _RequestException: + _LOGGER.debug("HTTP request failed") + return None + if response.status_code != 200: + _LOGGER.debug("HTTP request returned %d", response.status_code) + return None + ocsp_response = _load_der_ocsp_response(response.content) + _LOGGER.debug( + "OCSP response status: %r", ocsp_response.response_status) + if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: + return None + # RFC6960, Section 3.2, Number 1. Only relevant if we need to + # talk to the responder directly. + # Accessing response.serial_number raises if response status is not + # SUCCESSFUL. + if ocsp_response.serial_number != ocsp_request.serial_number: + _LOGGER.debug("Response serial number does not match request") + return None + if not _verify_response(issuer, ocsp_response): + # The response failed verification. + return None + _LOGGER.debug("Caching OCSP response.") + ocsp_response_cache[ocsp_request] = ocsp_response + + return ocsp_response + + def _ocsp_callback(conn, ocsp_bytes, user_data): """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" cert = conn.get_peer_certificate() @@ -283,6 +296,8 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("Peer presented a must-staple cert") must_staple = True break + ocsp_response_cache = user_data.ocsp_response_cache + # No stapled OCSP response if ocsp_bytes == b'': _LOGGER.debug("Peer did not staple an OCSP response") @@ -314,13 +329,12 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): # successful, valid responses with a certificate status of REVOKED. for uri in uris: _LOGGER.debug("Trying %s", uri) - response = _request_ocsp(cert, issuer, uri) + response = _get_ocsp_response( + cert, issuer, uri, ocsp_response_cache) if response is None: # The endpoint didn't respond in time, or the response was - # unsuccessful or didn't match the request. - continue - if not _verify_response(issuer, response): - # The response failed verification. + # unsuccessful or didn't match the request, or the response + # failed verification. continue _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.GOOD: @@ -344,6 +358,8 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): return 0 if not _verify_response(issuer, response): return 0 + # Cache the verified, stapled response. + ocsp_response_cache[_build_ocsp_request(cert, issuer)] = response _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.REVOKED: return 0 diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index c073427de3..bb10eb6a00 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -40,6 +40,7 @@ from pymongo.ocsp_support import ( _load_trusted_ca_certs, _ocsp_callback) +from pymongo.ocsp_cache import _OCSPCache from pymongo.socket_checker import ( _errno_from_exception, SocketChecker as _SocketChecker) @@ -142,6 +143,7 @@ class _CallbackData(object): def __init__(self): self.trusted_ca_certs = None self.check_ocsp_endpoint = None + self.ocsp_response_cache = _OCSPCache() class SSLContext(object): diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py new file mode 100644 index 0000000000..7562d0f74a --- /dev/null +++ b/test/test_ocsp_cache.py @@ -0,0 +1,133 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the pymongo ocsp_support module.""" + +from collections import namedtuple +from datetime import datetime, timedelta +from os import urandom +import random +import sys +from time import sleep + +sys.path[0:0] = [""] + +from pymongo.ocsp_cache import _OCSPCache +from test import unittest + + +class TestOcspCache(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.MockHashAlgorithm = namedtuple( + "MockHashAlgorithm", ['name']) + cls.MockOcspRequest = namedtuple( + "MockOcspRequest", ['hash_algorithm', 'issuer_name_hash', + 'issuer_key_hash', 'serial_number']) + cls.MockOcspResponse = namedtuple( + "MockOcspResponse", ["this_update", "next_update"]) + + def setUp(self): + self.cache = _OCSPCache() + + def _create_mock_request(self): + hash_algorithm = self.MockHashAlgorithm( + random.choice(['sha1', 'md5', 'sha256'])) + issuer_name_hash = urandom(8) + issuer_key_hash = urandom(8) + serial_number = random.randint(0, 10**10) + return self.MockOcspRequest( + hash_algorithm=hash_algorithm, + issuer_name_hash=issuer_name_hash, + issuer_key_hash=issuer_key_hash, + serial_number=serial_number) + + def _create_mock_response(self, this_update_delta_seconds, + next_update_delta_seconds): + now = datetime.utcnow() + this_update = now + timedelta(seconds=this_update_delta_seconds) + if next_update_delta_seconds is not None: + next_update = now + timedelta(seconds=next_update_delta_seconds) + else: + next_update = None + return self.MockOcspResponse( + this_update=this_update, + next_update=next_update) + + def _add_mock_cache_entry(self, mock_request, mock_response): + key = self.cache._get_cache_key(mock_request) + self.cache._data[key] = mock_response + + def test_simple(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +3600) + self._add_mock_cache_entry(request, response) + + # Ensure entry can be retrieved. + self.assertEqual(self.cache[request], response) + + # Valid entries with an earlier next_update have no effect. + response_1 = self._create_mock_response(-20, +1800) + self.cache[request] = response_1 + self.assertEqual(self.cache[request], response) + + # Invalid entries with a later this_update have no effect. + response_2 = self._create_mock_response(+20, +1800) + self.cache[request] = response_2 + self.assertEqual(self.cache[request], response) + + # Invalid entries with passed next_update have no effect. + response_3 = self._create_mock_response(-10, -5) + self.cache[request] = response_3 + self.assertEqual(self.cache[request], response) + + # Valid entries with a later next_update update the cache. + response_new = self._create_mock_response(-5, +7200) + self.cache[request] = response_new + self.assertEqual(self.cache[request], response_new) + + # Entries with an unset next_update purge the cache. + response_notset = self._create_mock_response(-5, None) + self.cache[request] = response_notset + with self.assertRaises(KeyError): + _ = self.cache[request] + + def test_invalidate(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +0.25) + self._add_mock_cache_entry(request, response) + + # Ensure entry can be retrieved. + self.assertEqual(self.cache[request], response) + + # Wait for entry to become invalid and ensure KeyError is raised. + sleep(0.5) + with self.assertRaises(KeyError): + _ = self.cache[request] + + def test_non_existent(self): + # Start with 1 valid entry in the cache. + request = self._create_mock_request() + response = self._create_mock_response(-10, +10) + self._add_mock_cache_entry(request, response) + + # Attempt to retrieve non-existent entry must raise KeyError. + with self.assertRaises(KeyError): + _ = self.cache[self._create_mock_request()] + + +if __name__ == "__main__": + unittest.main() From e5ef8f4dfa7f4bfa9b6830874140ed486bbf945a Mon Sep 17 00:00:00 2001 From: Chris Cho Date: Mon, 16 Mar 2020 19:13:36 -0400 Subject: [PATCH 0087/2111] DOCSP-9413 Replace references to google groups with community forums (#437) --- README.rst | 4 ++-- doc/index.rst | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 585c51f1c8..5735b2d84f 100644 --- a/README.rst +++ b/README.rst @@ -24,8 +24,8 @@ Support / Feedback For issues with, questions about, or feedback for PyMongo, please look into our `support channels `_. Please do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on the `mongodb-user -`_ list on Google Groups. +questions - you're more likely to get an answer on the `MongoDB Community +Forums `_. Bugs / Feature Requests ======================= diff --git a/doc/index.rst b/doc/index.rst index 3a4aa316b2..d649884c4b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -53,7 +53,10 @@ everything you need to know to use **PyMongo**. Getting Help ------------ -If you're having trouble or have questions about PyMongo, the best place to ask is the `MongoDB user group `_. Once you get an answer, it'd be great if you could work it back into this documentation and contribute! +If you're having trouble or have questions about PyMongo, ask your question in +our `MongoDB Community Forums `_. Once you +get an answer, it'd be great if you could work it back into this documentation +and contribute! Issues ------ From 93cf0dd17647f43e3e8c4e9ab5b034fe9ebbfe45 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 13 Mar 2020 12:47:30 -0700 Subject: [PATCH 0088/2111] PYTHON-2102 Migrate testing to Windows 10 Add 32-bit Python testing on Windows. --- .evergreen/config.yml | 176 +++++++++++++++++++----------------------- test/test_objectid.py | 16 ++-- 2 files changed, 88 insertions(+), 104 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index b7b5ab39fc..8041e06771 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1298,24 +1298,6 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz - - id: windows-vs2010 - display_name: "Windows 64 Visual Studio 2010" - run_on: windows-64-vs2010-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz - - id: windows-vs2015 - display_name: "Windows 64 Visual Studio 2015" - run_on: windows-64-vs2015-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz - - id: windows-vs2017 - display_name: "Windows 64 Visual Studio 2017" - run_on: windows-64-vs2017-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz - id: windows-64-vsMulti-small display_name: "Windows 64" run_on: windows-64-vsMulti-small @@ -1431,40 +1413,71 @@ axes: batchtime: 10080 # 7 days variables: PYTHON_BINARY: "/opt/python/jython2.7/bin/jython" - # Windows - - id: "win-vs2010-3.4" - display_name: "Python 3.4" + # System python + - id: "system-python" + display_name: "Python" variables: - PYTHON_BINARY: "C:/python/Python34/python.exe" - - id: "win-vs2015-2.7" + PYTHON_BINARY: "python" + - id: "system-python3" + display_name: "Python3" + variables: + PYTHON_BINARY: "python3" + + - id: python-version-windows + display_name: "Python" + values: + - id: "2.7" display_name: "Python 2.7" variables: PYTHON_BINARY: "C:/python/Python27/python.exe" - - id: "win-vs2015-3.5" + - id: "3.4" + display_name: "Python 3.4" + variables: + PYTHON_BINARY: "C:/python/Python34/python.exe" + - id: "3.5" display_name: "Python 3.5" variables: PYTHON_BINARY: "C:/python/Python35/python.exe" - - id: "win-vs2015-3.6" + - id: "3.6" display_name: "Python 3.6" variables: PYTHON_BINARY: "C:/python/Python36/python.exe" - - id: "win-vs2015-3.7" + - id: "3.7" display_name: "Python 3.7" variables: PYTHON_BINARY: "C:/python/Python37/python.exe" - - id: "win-vs2015-3.8" + - id: "3.8" display_name: "Python 3.8" variables: PYTHON_BINARY: "C:/python/Python38/python.exe" - # System python - - id: "system-python" - display_name: "Python" + + - id: python-version-windows-32 + display_name: "Python" + values: + - id: "2.7" + display_name: "32-bit Python 2.7" variables: - PYTHON_BINARY: "python" - - id: "system-python3" - display_name: "Python3" + PYTHON_BINARY: "C:/python/32/Python27/python.exe" + - id: "3.4" + display_name: "32-bit Python 3.4" variables: - PYTHON_BINARY: "python3" + PYTHON_BINARY: "C:/python/32/Python34/python.exe" + - id: "3.5" + display_name: "32-bit Python 3.5" + variables: + PYTHON_BINARY: "C:/python/32/Python35/python.exe" + - id: "3.6" + display_name: "32-bit Python 3.6" + variables: + PYTHON_BINARY: "C:/python/32/Python36/python.exe" + - id: "3.7" + display_name: "32-bit Python 3.7" + variables: + PYTHON_BINARY: "C:/python/32/Python37/python.exe" + - id: "3.8" + display_name: "32-bit Python 3.8" + variables: + PYTHON_BINARY: "C:/python/32/Python38/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version @@ -1590,6 +1603,7 @@ buildvariants: encryption: "*" display_name: "Encryption ${platform} ${auth-ssl}" tasks: &encryption-server-versions + - ".latest" - ".4.2" - ".4.0" - ".2.6" @@ -1805,10 +1819,10 @@ buildvariants: - matrix_name: "tests-pyopenssl-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version: ["win-vs2015-2.7", "win-vs2010-3.4", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7", "win-vs2015-3.8"] + python-version-windows: "*" auth: "*" ssl: "ssl" - display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" + display_name: "PyOpenSSL ${platform} ${python-version-windows} ${auth}" tasks: - "pyopenssl" @@ -1910,14 +1924,14 @@ buildvariants: display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions -# Test CPython 3.4 against MongoDB 2.6-4.2 on Windows with Visual Studio 2010. -- matrix_name: "tests-windows-vs2010-python-version" +- matrix_name: "tests-windows-python-version" matrix_spec: - platform: windows-vs2010 - python-version: &win-vs2010-pythons ["win-vs2010-3.4"] + platform: windows-64-vsMulti-small + python-version-windows: "*" auth-ssl: "*" - display_name: "${platform} ${python-version} ${auth-ssl}" + display_name: "${platform} ${python-version-windows} ${auth-ssl}" tasks: + - ".latest" - ".4.2" - ".4.0" - ".3.6" @@ -1926,15 +1940,21 @@ buildvariants: - ".3.0" - ".2.6" -# windows-vs2010 3.4 is unable to dlopen the libmongocrypt ddl built on 2016 -#- matrix_name: "tests-windows-vs2010-python-version-encryption" -# matrix_spec: -# platform: windows-vs2010 -# python-version: *win-vs2010-pythons -# auth-ssl: "*" -# encryption: "*" -# display_name: "Encryption ${platform} ${python-version} ${auth-ssl}" -# tasks: *encryption-server-versions +- matrix_name: "tests-windows-python-version-32-bit" + matrix_spec: + platform: windows-64-vsMulti-small + python-version-windows-32: "*" + auth-ssl: "*" + display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" + tasks: + - ".latest" + - ".4.2" + - ".4.0" + - ".3.6" + - ".3.4" + - ".3.2" + - ".3.0" + - ".2.6" - matrix_name: "tests-python-version-requires-openssl-102-plus-test-ssl" matrix_spec: @@ -1968,42 +1988,15 @@ buildvariants: tasks: - ".latest" -# Test CPython 2.7, 3.5 and 3.6 against MongoDB 2.6-4.2 -# on Windows with the Microsoft Visual C++ Compiler for Python 2.7 or Visual Studio 2015. -- matrix_name: "tests-windows-vs2015-python-version-27plus" - matrix_spec: - platform: windows-vs2015 - python-version: &win-vs2015-pythons ["win-vs2015-2.7", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7"] - auth-ssl: "*" - display_name: "${platform} ${python-version} ${auth-ssl}" - tasks: - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - -- matrix_name: "tests-windows-vs2015-python-version-encryption" +- matrix_name: "tests-windows-encryption" matrix_spec: - platform: windows-vs2015 - python-version: *win-vs2015-pythons + platform: windows-64-vsMulti-small + python-version-windows: "*" auth-ssl: "*" encryption: "*" - display_name: "Encryption ${platform} ${python-version} ${auth-ssl}" + display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Test CPython 3.7 against MongoDB >= 4.3 on Windows 2017+. -- matrix_name: "tests-windows-vs2017" - matrix_spec: - platform: windows-vs2017 - python-version: ["win-vs2015-3.7"] - auth-ssl: "*" - display_name: "${platform} ${python-version} ${auth-ssl}" - tasks: - - .latest - # Storage engine tests on RHEL 6.2 (x86_64) with Python 2.7. - matrix_name: "tests-storage-engines" matrix_spec: @@ -2075,21 +2068,12 @@ buildvariants: tasks: - name: "test-enterprise-auth" -- matrix_name: "tests-windows-vs2010-enterprise-auth" +- matrix_name: "tests-windows-enterprise-auth" matrix_spec: - platform: windows-vs2010 - python-version: *win-vs2010-pythons - auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version}" - tasks: - - name: "test-enterprise-auth" - -- matrix_name: "tests-windows-vs2015-enterprise-auth" - matrix_spec: - platform: windows-vs2015 - python-version: *win-vs2015-pythons + platform: windows-64-vsMulti-small + python-version-windows: "*" auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version}" + display_name: "Enterprise ${auth} ${platform} ${python-version-windows}" tasks: - name: "test-enterprise-auth" @@ -2189,8 +2173,8 @@ buildvariants: - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] - python-version: ["win-vs2015-2.7", "win-vs2010-3.4", "win-vs2015-3.5", "win-vs2015-3.6", "win-vs2015-3.7", "win-vs2015-3.8"] - display_name: "MONGODB-AWS Auth ${platform} ${python-version}" + python-version-windows: "*" + display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" tasks: - name: "aws-auth-test" diff --git a/test/test_objectid.py b/test/test_objectid.py index cb1f8bb49e..df80caf397 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -189,15 +189,15 @@ def generate_objectid_with_timestamp(timestamp): for tstamp, exp_datetime_args in TEST_DATA.items(): oid = generate_objectid_with_timestamp(tstamp) - if (not sys.platform.startswith("java") and - tstamp > 0x7FFFFFFF and sys.maxsize < 2**32): - # 32-bit platforms will overflow in datetime.fromtimestamp. - with self.assertRaises((OverflowError, ValueError)): + # 32-bit platforms may overflow in datetime.fromtimestamp. + if tstamp > 0x7FFFFFFF and sys.maxsize < 2**32: + try: oid.generation_time - else: - self.assertEqual( - oid.generation_time, - datetime.datetime(*exp_datetime_args, tzinfo=utc)) + except (OverflowError, ValueError): + continue + self.assertEqual( + oid.generation_time, + datetime.datetime(*exp_datetime_args, tzinfo=utc)) def test_random_regenerated_on_pid_change(self): # Test that change of pid triggers new random number generation. From 42aafd74d72fe72a7243c48c72c9dc4b82d92626 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 13 Mar 2020 12:17:24 -0700 Subject: [PATCH 0089/2111] PYTHON-2034 Validate EC2/Lambda auth tests do not contain URI credentials --- .evergreen/config.yml | 12 ++++++------ .evergreen/run-mongodb-aws-test.sh | 7 +++++++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8041e06771..bfa2e83419 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -274,7 +274,7 @@ functions: fi fi - if [ $(uname -s) == "Darwin" ]; then + if [ $(uname -s) = "Darwin" ]; then core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then echo "Enabling coredumps" @@ -539,7 +539,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - if [ "${skip_EC2_auth_test}" == "true" ]; then + if [ "${skip_EC2_auth_test}" = "true" ]; then echo "This platform does not support the EC2 auth test, skipping..." exit 0 fi @@ -557,7 +557,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials as environment variables": - command: shell.exec @@ -577,7 +577,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials and session token as environment variables": - command: shell.exec @@ -599,7 +599,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws ECS auth test": - command: shell.exec @@ -608,7 +608,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - if [ "${skip_ECS_auth_test}" == "true" ]; then + if [ "${skip_ECS_auth_test}" = "true" ]; then echo "This platform does not support the ECS auth test, skipping..." exit 0 fi diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index bc577808d2..8765d4702c 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -29,6 +29,13 @@ fi export MONGODB_URI="$MONGODB_URI" +if [ "$ASSERT_NO_URI_CREDS" = "true" ]; then + if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials!"; + exit 1 + fi +fi + # show test output set -x From 016f8de9659143f8b8cdb6c4c9b1566ad548fc1d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 17 Mar 2020 14:33:21 -0700 Subject: [PATCH 0090/2111] PYTHON-2155 Add MongoDB 4.4 to testing matrix --- .evergreen/config.yml | 57 +++++++++++++++++++++++++++++-------------- test/test_database.py | 17 +++++++++---- 2 files changed, 51 insertions(+), 23 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index bfa2e83419..7554956dbb 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -986,6 +986,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-4.4-standalone" + tags: ["4.4", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "4.4" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-4.4-replica_set" + tags: ["4.4", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "4.4" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-4.4-sharded_cluster" + tags: ["4.4", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "4.4" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-latest-standalone" tags: ["latest", "standalone"] commands: @@ -1585,6 +1612,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: &all-server-versions - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" @@ -1604,6 +1632,7 @@ buildvariants: display_name: "Encryption ${platform} ${auth-ssl}" tasks: &encryption-server-versions - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".2.6" @@ -1669,6 +1698,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" @@ -1691,6 +1721,7 @@ buildvariants: display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" @@ -1739,6 +1770,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" @@ -1753,6 +1785,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: - ".latest" + - ".4.4" - ".4.2" variables: set_xtrace_on: on @@ -1865,6 +1898,7 @@ buildvariants: display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" @@ -1897,6 +1931,7 @@ buildvariants: display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" + - "test-4.4-standalone" - "test-4.2-standalone" rules: # Server versions 3.6 and 4.0 support snappy and zlib. @@ -1930,15 +1965,7 @@ buildvariants: python-version-windows: "*" auth-ssl: "*" display_name: "${platform} ${python-version-windows} ${auth-ssl}" - tasks: - - ".latest" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" + tasks: *all-server-versions - matrix_name: "tests-windows-python-version-32-bit" matrix_spec: @@ -1946,15 +1973,7 @@ buildvariants: python-version-windows-32: "*" auth-ssl: "*" display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" - tasks: - - ".latest" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" + tasks: *all-server-versions - matrix_name: "tests-python-version-requires-openssl-102-plus-test-ssl" matrix_spec: @@ -1964,6 +1983,7 @@ buildvariants: display_name: "${python-version} OpenSSL 1.0.2 ${platform} ${auth-ssl}" tasks: - ".latest" + - ".4.4" - ".4.2" - ".4.0" - ".3.6" @@ -2012,6 +2032,7 @@ buildvariants: then: add_tasks: - "test-latest-standalone" + - "test-4.4-standalone" - "test-4.2-standalone" - "test-4.0-standalone" - "test-3.6-standalone" diff --git a/test/test_database.py b/test/test_database.py index 9ee889f073..0dfdccdea1 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -367,16 +367,23 @@ def test_validate_collection(self): self.assertTrue(db.validate_collection(db.test, scandata=True)) self.assertTrue(db.validate_collection(db.test, scandata=True, full=True)) self.assertTrue(db.validate_collection(db.test, True, True)) - if client_context.version.at_least(4, 3, 3): - self.assertTrue(db.validate_collection(db.test, background=True)) - self.assertTrue(db.validate_collection(db.test, background=False)) + + @client_context.require_version_min(4, 3, 3) + def test_validate_collection_background(self): + db = self.client.pymongo_test + db.test.insert_one({"dummy": u"object"}) + coll = db.test + self.assertTrue(db.validate_collection(coll, background=False)) + # The inMemory storage engine does not support background=True. + if client_context.storage_engine != 'inMemory': + self.assertTrue(db.validate_collection(coll, background=True)) self.assertTrue( - db.validate_collection(db.test, scandata=True, background=True)) + db.validate_collection(coll, scandata=True, background=True)) # The server does not support background=True with full=True. # Assert that we actually send the background option by checking # that this combination fails. with self.assertRaises(OperationFailure): - db.validate_collection(db.test, full=True, background=True) + db.validate_collection(coll, full=True, background=True) @client_context.require_no_mongos def test_profiling_levels(self): From b65fdf3f51d98c202d5dd2c7f6532ac81dee486b Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Tue, 24 Mar 2020 17:19:07 -0700 Subject: [PATCH 0091/2111] PYTHON-2142 Add index hinting support to delete operations --- pymongo/bulk.py | 5 +- pymongo/collection.py | 60 ++- pymongo/operations.py | 38 +- test/crud/v2/aggregate-merge.json | 2 +- test/crud/v2/bulkWrite-arrayFilters.json | 102 +++++- .../v2/bulkWrite-delete-hint-clientError.json | 150 ++++++++ .../v2/bulkWrite-delete-hint-serverError.json | 209 +++++++++++ test/crud/v2/bulkWrite-delete-hint.json | 204 +++++++++++ .../v2/bulkWrite-update-hint-clientError.json | 235 ++++++++++++ .../v2/bulkWrite-update-hint-serverError.json | 343 ++++++++++++++++++ test/crud/v2/deleteMany-hint-clientError.json | 100 +++++ test/crud/v2/deleteMany-hint-serverError.json | 141 +++++++ test/crud/v2/deleteMany-hint.json | 128 +++++++ test/crud/v2/deleteOne-hint-clientError.json | 84 +++++ test/crud/v2/deleteOne-hint-serverError.json | 121 ++++++ test/crud/v2/deleteOne-hint.json | 116 ++++++ .../v2/findOneAndDelete-hint-clientError.json | 84 +++++ .../v2/findOneAndDelete-hint-serverError.json | 113 ++++++ test/crud/v2/findOneAndDelete-hint.json | 110 ++++++ .../findOneAndReplace-hint-clientError.json | 90 +++++ .../findOneAndReplace-hint-serverError.json | 123 +++++++ .../v2/findOneAndUpdate-hint-clientError.json | 94 +++++ .../v2/findOneAndUpdate-hint-serverError.json | 131 +++++++ test/crud/v2/updateMany-hint-clientError.json | 110 ++++++ test/crud/v2/updateMany-hint-serverError.json | 161 ++++++++ test/crud/v2/updateOne-hint-clientError.json | 98 +++++ test/crud/v2/updateOne-hint-serverError.json | 147 ++++++++ test/crud/v2/updateWithPipelines.json | 165 +++++++++ test/utils_spec_runner.py | 4 + 29 files changed, 3425 insertions(+), 43 deletions(-) create mode 100644 test/crud/v2/bulkWrite-delete-hint-clientError.json create mode 100644 test/crud/v2/bulkWrite-delete-hint-serverError.json create mode 100644 test/crud/v2/bulkWrite-delete-hint.json create mode 100644 test/crud/v2/bulkWrite-update-hint-clientError.json create mode 100644 test/crud/v2/bulkWrite-update-hint-serverError.json create mode 100644 test/crud/v2/deleteMany-hint-clientError.json create mode 100644 test/crud/v2/deleteMany-hint-serverError.json create mode 100644 test/crud/v2/deleteMany-hint.json create mode 100644 test/crud/v2/deleteOne-hint-clientError.json create mode 100644 test/crud/v2/deleteOne-hint-serverError.json create mode 100644 test/crud/v2/deleteOne-hint.json create mode 100644 test/crud/v2/findOneAndDelete-hint-clientError.json create mode 100644 test/crud/v2/findOneAndDelete-hint-serverError.json create mode 100644 test/crud/v2/findOneAndDelete-hint.json create mode 100644 test/crud/v2/findOneAndReplace-hint-clientError.json create mode 100644 test/crud/v2/findOneAndReplace-hint-serverError.json create mode 100644 test/crud/v2/findOneAndUpdate-hint-clientError.json create mode 100644 test/crud/v2/findOneAndUpdate-hint-serverError.json create mode 100644 test/crud/v2/updateMany-hint-clientError.json create mode 100644 test/crud/v2/updateMany-hint-serverError.json create mode 100644 test/crud/v2/updateOne-hint-clientError.json create mode 100644 test/crud/v2/updateOne-hint-serverError.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 41b2eedcd2..0189c2eac2 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -218,7 +218,7 @@ def add_replace(self, selector, replacement, upsert=False, cmd['hint'] = hint self.ops.append((_UPDATE, cmd)) - def add_delete(self, selector, limit, collation=None): + def add_delete(self, selector, limit, collation=None, hint=None): """Create a delete document and add it to the list of ops. """ cmd = SON([('q', selector), ('limit', limit)]) @@ -226,6 +226,9 @@ def add_delete(self, selector, limit, collation=None): if collation is not None: self.uses_collation = True cmd['collation'] = collation + if hint is not None: + self.uses_hint = True + cmd['hint'] = hint if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. self.is_retryable = False diff --git a/pymongo/collection.py b/pymongo/collection.py index 4e3d1d13f1..a7b563bd93 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1125,7 +1125,7 @@ def drop(self, session=None): def _delete( self, sock_info, criteria, multi, write_concern=None, op_id=None, ordered=True, - collation=None, session=None, retryable_write=False): + collation=None, hint=None, session=None, retryable_write=False): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern @@ -1142,6 +1142,16 @@ def _delete( 'Collation is unsupported for unacknowledged writes.') else: delete_doc['collation'] = collation + if hint is not None: + if sock_info.max_wire_version < 5: + raise ConfigurationError( + 'Must be connected to MongoDB 3.4+ to use hint.') + elif not acknowledged: + raise ConfigurationError( + 'hint is unsupported for unacknowledged writes.') + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) + delete_doc['hint'] = hint command = SON([('delete', self.name), ('ordered', ordered), ('deletes', [delete_doc])]) @@ -1171,20 +1181,20 @@ def _delete( def _delete_retryable( self, criteria, multi, write_concern=None, op_id=None, ordered=True, - collation=None, session=None): + collation=None, hint=None, session=None): """Internal delete helper.""" def _delete(session, sock_info, retryable_write): return self._delete( sock_info, criteria, multi, write_concern=write_concern, op_id=op_id, ordered=ordered, - collation=collation, session=session, + collation=collation, hint=hint, session=session, retryable_write=retryable_write) return self.__database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, _delete, session) - def delete_one(self, filter, collation=None, session=None): + def delete_one(self, filter, collation=None, hint=None, session=None): """Delete a single document matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1200,18 +1210,24 @@ def delete_one(self, filter, collation=None, session=None): - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionadded:: 3.0 """ write_concern = self._write_concern_for(session) @@ -1219,10 +1235,10 @@ def delete_one(self, filter, collation=None, session=None): self._delete_retryable( filter, False, write_concern=write_concern, - collation=collation, session=session), + collation=collation, hint=hint, session=session), write_concern.acknowledged) - def delete_many(self, filter, collation=None, session=None): + def delete_many(self, filter, collation=None, hint=None, session=None): """Delete one or more documents matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1238,18 +1254,24 @@ def delete_many(self, filter, collation=None, session=None): - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - .. versionadded:: 3.0 """ write_concern = self._write_concern_for(session) @@ -1257,7 +1279,7 @@ def delete_many(self, filter, collation=None, session=None): self._delete_retryable( filter, True, write_concern=write_concern, - collation=collation, session=session), + collation=collation, hint=hint, session=session), write_concern.acknowledged) def find_one(self, filter=None, *args, **kwargs): @@ -2849,10 +2871,8 @@ def inline_map_reduce(self, map, reduce, full_response=False, session=None, .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.4 Added the `collation` option. - """ res = self._map_reduce(map, reduce, {"inline": 1}, session, self.read_preference, **kwargs) @@ -2931,7 +2951,8 @@ def _find_and_modify(session, sock_info, retryable_write): write_concern.acknowledged, _find_and_modify, session) def find_one_and_delete(self, filter, - projection=None, sort=None, session=None, **kwargs): + projection=None, sort=None, hint=None, + session=None, **kwargs): """Finds a single document and deletes it, returning the document. >>> db.test.count_documents({'x': 1}) @@ -2968,15 +2989,21 @@ def find_one_and_delete(self, filter, - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is deleted. + - `hint` (optional): An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 3.11 + Added ``hint`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. - .. versionchanged:: 3.2 Respects write concern. @@ -2989,11 +3016,10 @@ def find_one_and_delete(self, filter, .. versionchanged:: 3.4 Added the `collation` option. .. versionadded:: 3.0 - """ kwargs['remove'] = True return self.__find_and_modify(filter, projection, sort, - session=session, **kwargs) + hint=hint, session=session, **kwargs) def find_one_and_replace(self, filter, replacement, projection=None, sort=None, upsert=False, diff --git a/pymongo/operations.py b/pymongo/operations.py index 987a2cdfcf..f72ef82aa0 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -57,9 +57,9 @@ def __ne__(self, other): class DeleteOne(object): """Represents a delete_one operation.""" - __slots__ = ("_filter", "_collation") + __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None): + def __init__(self, filter, collation=None, hint=None): """Create a DeleteOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -69,18 +69,31 @@ def __init__(self, filter, collation=None): - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ if filter is not None: validate_is_mapping("filter", filter) + if hint is not None: + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) self._filter = filter self._collation = collation + self._hint = hint def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation) + bulkobj.add_delete(self._filter, 1, collation=self._collation, + hint=self._hint) def __repr__(self): return "DeleteOne(%r, %r)" % (self._filter, self._collation) @@ -98,9 +111,9 @@ def __ne__(self, other): class DeleteMany(object): """Represents a delete_many operation.""" - __slots__ = ("_filter", "_collation") + __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None): + def __init__(self, filter, collation=None, hint=None): """Create a DeleteMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -110,18 +123,31 @@ def __init__(self, filter, collation=None): - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. + - `hint` (optional): An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + .. versionchanged:: 3.11 + Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ if filter is not None: validate_is_mapping("filter", filter) + if hint is not None: + if not isinstance(hint, string_type): + hint = helpers._index_document(hint) self._filter = filter self._collation = collation + self._hint = hint def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation) + bulkobj.add_delete(self._filter, 0, collation=self._collation, + hint=self._hint) def __repr__(self): return "DeleteMany(%r, %r)" % (self._filter, self._collation) diff --git a/test/crud/v2/aggregate-merge.json b/test/crud/v2/aggregate-merge.json index 037ae25d24..c61736a0bb 100644 --- a/test/crud/v2/aggregate-merge.json +++ b/test/crud/v2/aggregate-merge.json @@ -1,7 +1,7 @@ { "runOn": [ { - "minServerVersion": "4.2.0" + "minServerVersion": "4.1.11" } ], "data": [ diff --git a/test/crud/v2/bulkWrite-arrayFilters.json b/test/crud/v2/bulkWrite-arrayFilters.json index be26a337a5..2d3ce96de1 100644 --- a/test/crud/v2/bulkWrite-arrayFilters.json +++ b/test/crud/v2/bulkWrite-arrayFilters.json @@ -32,7 +32,7 @@ "database_name": "crud-tests", "tests": [ { - "description": "BulkWrite with arrayFilters", + "description": "BulkWrite updateOne with arrayFilters", "operations": [ { "name": "bulkWrite", @@ -53,7 +53,86 @@ } ] } - }, + } + ], + "options": { + "ordered": true + } + }, + "result": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": {}, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + ], + "ordered": true + }, + "command_name": "update", + "database_name": "crud-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + } + }, + { + "description": "BulkWrite updateMany with arrayFilters", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ { "name": "updateMany", "arguments": { @@ -79,8 +158,8 @@ "deletedCount": 0, "insertedCount": 0, "insertedIds": {}, - "matchedCount": 3, - "modifiedCount": 3, + "matchedCount": 2, + "modifiedCount": 2, "upsertedCount": 0, "upsertedIds": {} } @@ -92,19 +171,6 @@ "command": { "update": "test", "updates": [ - { - "q": {}, - "u": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - }, { "q": {}, "u": { @@ -134,7 +200,7 @@ "_id": 1, "y": [ { - "b": 2 + "b": 3 }, { "b": 2 diff --git a/test/crud/v2/bulkWrite-delete-hint-clientError.json b/test/crud/v2/bulkWrite-delete-hint-clientError.json new file mode 100644 index 0000000000..cfeac904ca --- /dev/null +++ b/test/crud/v2/bulkWrite-delete-hint-clientError.json @@ -0,0 +1,150 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.3.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "BulkWrite_delete_hint", + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (client-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite deleteMany with hints unsupported (client-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/bulkWrite-delete-hint-serverError.json b/test/crud/v2/bulkWrite-delete-hint-serverError.json new file mode 100644 index 0000000000..c68973b0f6 --- /dev/null +++ b/test/crud/v2/bulkWrite-delete-hint-serverError.json @@ -0,0 +1,209 @@ +{ + "runOn": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "BulkWrite_delete_hint", + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (server-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite deleteMany with hints unsupported (server-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/bulkWrite-delete-hint.json b/test/crud/v2/bulkWrite-delete-hint.json new file mode 100644 index 0000000000..ece3238fc3 --- /dev/null +++ b/test/crud/v2/bulkWrite-delete-hint.json @@ -0,0 +1,204 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "BulkWrite_delete_hint", + "tests": [ + { + "description": "BulkWrite deleteOne with hints", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "result": { + "deletedCount": 2, + "insertedCount": 0, + "insertedIds": {}, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite deleteMany with hints", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "result": { + "deletedCount": 3, + "insertedCount": 0, + "insertedIds": {}, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/bulkWrite-update-hint-clientError.json b/test/crud/v2/bulkWrite-update-hint-clientError.json new file mode 100644 index 0000000000..fa919ec515 --- /dev/null +++ b/test/crud/v2/bulkWrite-update-hint-clientError.json @@ -0,0 +1,235 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.3.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "test_bulkwrite_update_hint", + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (client-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite updateMany with update hints unsupported (client-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (client-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/bulkWrite-update-hint-serverError.json b/test/crud/v2/bulkWrite-update-hint-serverError.json new file mode 100644 index 0000000000..e8b96fffeb --- /dev/null +++ b/test/crud/v2/bulkWrite-update-hint-serverError.json @@ -0,0 +1,343 @@ +{ + "runOn": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "test_bulkwrite_update_hint", + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (server-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite updateMany with update hints unsupported (server-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_" + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (server-side error)", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "hint": "_id_" + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/deleteMany-hint-clientError.json b/test/crud/v2/deleteMany-hint-clientError.json new file mode 100644 index 0000000000..3a0d02566b --- /dev/null +++ b/test/crud/v2/deleteMany-hint-clientError.json @@ -0,0 +1,100 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.3.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "DeleteMany_hint", + "tests": [ + { + "description": "DeleteMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "DeleteMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/deleteMany-hint-serverError.json b/test/crud/v2/deleteMany-hint-serverError.json new file mode 100644 index 0000000000..5829e86df8 --- /dev/null +++ b/test/crud/v2/deleteMany-hint-serverError.json @@ -0,0 +1,141 @@ +{ + "runOn": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "DeleteMany_hint", + "tests": [ + { + "description": "DeleteMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "DeleteMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/deleteMany-hint.json b/test/crud/v2/deleteMany-hint.json new file mode 100644 index 0000000000..51ee386066 --- /dev/null +++ b/test/crud/v2/deleteMany-hint.json @@ -0,0 +1,128 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "DeleteMany_hint", + "tests": [ + { + "description": "DeleteMany with hint string", + "operations": [ + { + "object": "collection", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "result": { + "deletedCount": 2 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "DeleteMany with hint document", + "operations": [ + { + "object": "collection", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "result": { + "deletedCount": 2 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/deleteOne-hint-clientError.json b/test/crud/v2/deleteOne-hint-clientError.json new file mode 100644 index 0000000000..97f8ec4924 --- /dev/null +++ b/test/crud/v2/deleteOne-hint-clientError.json @@ -0,0 +1,84 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.3.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "DeleteOne_hint", + "tests": [ + { + "description": "DeleteOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/deleteOne-hint-serverError.json b/test/crud/v2/deleteOne-hint-serverError.json new file mode 100644 index 0000000000..3cf9400a88 --- /dev/null +++ b/test/crud/v2/deleteOne-hint-serverError.json @@ -0,0 +1,121 @@ +{ + "runOn": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "DeleteOne_hint", + "tests": [ + { + "description": "DeleteOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/deleteOne-hint.json b/test/crud/v2/deleteOne-hint.json new file mode 100644 index 0000000000..ec8e7715a2 --- /dev/null +++ b/test/crud/v2/deleteOne-hint.json @@ -0,0 +1,116 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "DeleteOne_hint", + "tests": [ + { + "description": "DeleteOne with hint string", + "operations": [ + { + "object": "collection", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "deleteOne with hint document", + "operations": [ + { + "object": "collection", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndDelete-hint-clientError.json b/test/crud/v2/findOneAndDelete-hint-clientError.json new file mode 100644 index 0000000000..262e78ce75 --- /dev/null +++ b/test/crud/v2/findOneAndDelete-hint-clientError.json @@ -0,0 +1,84 @@ +{ + "runOn": [ + { + "maxServerVersion": "4.0.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndDelete_hint", + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndDelete-hint-serverError.json b/test/crud/v2/findOneAndDelete-hint-serverError.json new file mode 100644 index 0000000000..5d1dd8989f --- /dev/null +++ b/test/crud/v2/findOneAndDelete-hint-serverError.json @@ -0,0 +1,113 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.3" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndDelete_hint", + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndDelete-hint.json b/test/crud/v2/findOneAndDelete-hint.json new file mode 100644 index 0000000000..fe8dcfa4c5 --- /dev/null +++ b/test/crud/v2/findOneAndDelete-hint.json @@ -0,0 +1,110 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndDelete_hint", + "tests": [ + { + "description": "FindOneAndDelete with hint string", + "operations": [ + { + "object": "collection", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "result": { + "_id": 1, + "x": 11 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndReplace-hint-clientError.json b/test/crud/v2/findOneAndReplace-hint-clientError.json new file mode 100644 index 0000000000..08fd4b3ecc --- /dev/null +++ b/test/crud/v2/findOneAndReplace-hint-clientError.json @@ -0,0 +1,90 @@ +{ + "runOn": [ + { + "maxServerVersion": "4.0.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndReplace_hint", + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndReplace-hint-serverError.json b/test/crud/v2/findOneAndReplace-hint-serverError.json new file mode 100644 index 0000000000..6710e6a70e --- /dev/null +++ b/test/crud/v2/findOneAndReplace-hint-serverError.json @@ -0,0 +1,123 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndReplace_hint", + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndUpdate-hint-clientError.json b/test/crud/v2/findOneAndUpdate-hint-clientError.json new file mode 100644 index 0000000000..8cd5cddb51 --- /dev/null +++ b/test/crud/v2/findOneAndUpdate-hint-clientError.json @@ -0,0 +1,94 @@ +{ + "runOn": [ + { + "maxServerVersion": "4.0.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndUpdate_hint", + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/findOneAndUpdate-hint-serverError.json b/test/crud/v2/findOneAndUpdate-hint-serverError.json new file mode 100644 index 0000000000..1f4b2bda8b --- /dev/null +++ b/test/crud/v2/findOneAndUpdate-hint-serverError.json @@ -0,0 +1,131 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndUpdate_hint", + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateMany-hint-clientError.json b/test/crud/v2/updateMany-hint-clientError.json new file mode 100644 index 0000000000..44ebddc53d --- /dev/null +++ b/test/crud/v2/updateMany-hint-clientError.json @@ -0,0 +1,110 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.3.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "test_updatemany_hint", + "tests": [ + { + "description": "UpdateMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "UpdateMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateMany-hint-serverError.json b/test/crud/v2/updateMany-hint-serverError.json new file mode 100644 index 0000000000..86f21246e9 --- /dev/null +++ b/test/crud/v2/updateMany-hint-serverError.json @@ -0,0 +1,161 @@ +{ + "runOn": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "test_updatemany_hint", + "tests": [ + { + "description": "UpdateMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_" + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "UpdateMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + } + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateOne-hint-clientError.json b/test/crud/v2/updateOne-hint-clientError.json new file mode 100644 index 0000000000..82bfe368c7 --- /dev/null +++ b/test/crud/v2/updateOne-hint-clientError.json @@ -0,0 +1,98 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.3.99" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "test_updateone_hint", + "tests": [ + { + "description": "UpdateOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateOne-hint-serverError.json b/test/crud/v2/updateOne-hint-serverError.json new file mode 100644 index 0000000000..8e8037eb8c --- /dev/null +++ b/test/crud/v2/updateOne-hint-serverError.json @@ -0,0 +1,147 @@ +{ + "runOn": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "test_updateone_hint", + "tests": [ + { + "description": "UpdateOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + ] + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateWithPipelines.json b/test/crud/v2/updateWithPipelines.json index 7d20bffb30..a310f2825f 100644 --- a/test/crud/v2/updateWithPipelines.json +++ b/test/crud/v2/updateWithPipelines.json @@ -238,6 +238,171 @@ ] } } + }, + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + ] + }, + "command_name": "update", + "database_name": "crud-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + } + }, + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "result": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ] + }, + "command_name": "update", + "database_name": "crud-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + } } ] } diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4357b29d2f..def7dcc5b0 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -518,6 +518,10 @@ def run_scenario(self, scenario_def, test): sessions = {} session_ids = {} for i in range(2): + # Don't attempt to create sessions if they are not supported by + # the running server version. + if not client_context.sessions_enabled: + break session_name = 'session%d' % i opts = camel_to_snake_args(test['sessionOptions'][session_name]) if 'default_transaction_options' in opts: From bf6af9fd77bc13c0668018500071992e9e5ba05b Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 26 Mar 2020 18:35:20 -0700 Subject: [PATCH 0092/2111] PYTHON-1975 Bump max wire version to 9 (MongoDB 4.4) --- pymongo/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 8b586c4b42..945b1f6921 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -58,7 +58,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "2.6" MIN_SUPPORTED_WIRE_VERSION = 2 -MAX_SUPPORTED_WIRE_VERSION = 8 +MAX_SUPPORTED_WIRE_VERSION = 9 # Frequency to call ismaster on servers, in seconds. HEARTBEAT_FREQUENCY = 10 From c282cc18dd023e5eec1336a114dd1caf1a34ace8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 31 Mar 2020 13:34:05 -0700 Subject: [PATCH 0093/2111] PYTHON-2153 Final workaround for hanging createIndexes wtimeout test --- test/test_read_write_concern_spec.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index ad4f7b644e..e4e4430ad1 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -122,25 +122,21 @@ def insert_command(): ] ops_require_34 = [ ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])), + # SERVER-46668 Delete all the documents in the collection to + # workaround a hang in createIndexes. + ('delete_many', lambda: coll.delete_many({})), + ('create_index', lambda: coll.create_index([('a', DESCENDING)])), + ('create_indexes', lambda: coll.create_indexes([IndexModel('b')])), + ('drop_index', lambda: coll.drop_index([('a', DESCENDING)])), ('create', lambda: db.create_collection('new')), ('rename', lambda: coll.rename('new')), ('drop', lambda: db.new.drop()), ] if client_context.version > (3, 4): ops.extend(ops_require_34) - # SERVER-34776: dropDatabase does not respect wtimeout in 3.6. + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. if client_context.version[:2] != (3, 6): ops.append(('drop_database', lambda: client.drop_database(db))) - # SERVER-46668: createIndexes does not respect wtimeout in 4.4+. - if client_context.version <= (4, 3): - ops.extend([ - ('create_index', - lambda: coll.create_index([('a', DESCENDING)])), - ('create_indexes', - lambda: coll.create_indexes([IndexModel('b')])), - ('drop_index', - lambda: coll.drop_index([('a', DESCENDING)])), - ]) for name, f in ops: # Ensure insert_many and bulk_write still raise BulkWriteError. From fd64f4dd64d5ba75a909d088b73ecb02d65b21b7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Feb 2020 10:52:27 -0800 Subject: [PATCH 0094/2111] PYTHON-2030 Support collection and index creation in multi-doc transactions --- pymongo/client_session.py | 9 +- pymongo/database.py | 11 +- test/test_transactions.py | 35 +++- test/transactions/create-collection.json | 204 +++++++++++++++++++ test/transactions/create-index.json | 237 +++++++++++++++++++++++ test/utils_spec_runner.py | 26 +++ 6 files changed, 514 insertions(+), 8 deletions(-) create mode 100644 test/transactions/create-collection.json create mode 100644 test/transactions/create-index.json diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 0c0e7c436d..f701459795 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -65,10 +65,13 @@ If the block exits with an exception, the transaction automatically calls :meth:`ClientSession.abort_transaction`. -For multi-document transactions, you can only specify read/write (CRUD) -operations on existing collections. For example, a multi-document transaction -cannot include a create or drop collection/index operations, including an +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. A session may only have a single active transaction at a time, multiple transactions on the same session can be executed in sequence. diff --git a/pymongo/database.py b/pymongo/database.py index 7137c96bf6..60dfc3ca46 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -390,6 +390,10 @@ def create_collection(self, name, codec_options=None, - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -403,8 +407,11 @@ def create_collection(self, name, codec_options=None, Removed deprecated argument: options """ with self.__client._tmp_session(session) as s: - if name in self.list_collection_names( - filter={"name": name}, session=s): + # Skip this check in a transaction where listCollections is not + # supported. + if ((not s or not s.in_transaction) and + name in self.list_collection_names( + filter={"name": name}, session=s)): raise CollectionInvalid("collection %s already exists" % name) return Collection(self, name, True, codec_options, diff --git a/test/test_transactions.py b/test/test_transactions.py index 88e6dae5ab..40341cbb80 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -21,7 +21,8 @@ from pymongo import client_session, WriteConcern from pymongo.client_session import TransactionOptions -from pymongo.errors import (ConfigurationError, +from pymongo.errors import (CollectionInvalid, + ConfigurationError, ConnectionFailure, OperationFailure) from pymongo.operations import IndexModel, InsertOne @@ -91,7 +92,6 @@ def test_transaction_options_validation(self): TypeError, "max_commit_time_ms must be an integer or None"): TransactionOptions(max_commit_time_ms="10000") - @client_context.require_transactions def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" @@ -121,7 +121,6 @@ def test_transaction_write_concern_override(self): unsupported_txn_writes = [ (client.drop_database, [db.name], {}), - (db.create_collection, ['collection'], {}), (db.drop_collection, ['collection'], {}), (coll.drop, [], {}), (coll.map_reduce, @@ -135,6 +134,12 @@ def test_transaction_write_concern_override(self): (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] + # Creating a collection in a transaction requires MongoDB 4.4+. + if client_context.version < (4, 3, 4): + unsupported_txn_writes.extend([ + (db.create_collection, ['collection'], {}), + ]) + for op in unsupported_txn_writes: op, args, kwargs = op with client.start_session() as s: @@ -201,6 +206,30 @@ def test_unpin_for_non_transaction_operation(self): self.assertGreater(len(addresses), 1) + @client_context.require_transactions + @client_context.require_version_min(4, 3, 4) + def test_create_collection(self): + client = rs_client() + self.addCleanup(client.close) + db = client.pymongo_test + coll = db.test_create_collection + self.addCleanup(coll.drop) + with client.start_session() as s, s.start_transaction(): + coll2 = db.create_collection(coll.name, session=s) + self.assertEqual(coll, coll2) + coll.insert_one({}, session=s) + + # Outside a transaction we raise CollectionInvalid on existing colls. + with self.assertRaises(CollectionInvalid): + db.create_collection(coll.name) + + # Inside a transaction we raise the OperationFailure from create. + with client.start_session() as s: + s.start_transaction() + with self.assertRaises(OperationFailure) as ctx: + db.create_collection(coll.name, session=s) + self.assertEqual(ctx.exception.code, 48) # NamespaceExists + class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" diff --git a/test/transactions/create-collection.json b/test/transactions/create-collection.json new file mode 100644 index 0000000000..9071c59c41 --- /dev/null +++ b/test/transactions/create-collection.json @@ -0,0 +1,204 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "explicitly create collection using create command", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "create": "test", + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "create", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + }, + { + "description": "implicitly create collection using insert", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + } + ] +} diff --git a/test/transactions/create-index.json b/test/transactions/create-index.json new file mode 100644 index 0000000000..2ff09c9288 --- /dev/null +++ b/test/transactions/create-index.json @@ -0,0 +1,237 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.4", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "create index on a non-existing collection", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "createIndexes", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + }, + { + "description": "create index on a collection created within the same transaction", + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "transaction-tests", + "collection": "test", + "index": "t_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "test", + "writeConcern": null + }, + "command_name": "drop", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "create": "test", + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "create", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": "session0", + "writeConcern": null + }, + "command_name": "createIndexes", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ] + } + ] +} diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index def7dcc5b0..e75d928fc2 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -108,6 +108,26 @@ def assert_session_unpinned(self, session): self.assertIsNone(session._pinned_address) self.assertIsNone(session._transaction.pinned_address) + def assert_collection_exists(self, database, collection): + """Run the assertCollectionExists test operation.""" + db = self.client[database] + self.assertIn(collection, db.list_collection_names()) + + def assert_collection_not_exists(self, database, collection): + """Run the assertCollectionNotExists test operation.""" + db = self.client[database] + self.assertNotIn(collection, db.list_collection_names()) + + def assert_index_exists(self, database, collection, index): + """Run the assertIndexExists test operation.""" + coll = self.client[database][collection] + self.assertIn(index, [doc['name'] for doc in coll.list_indexes()]) + + def assert_index_not_exists(self, database, collection, index): + """Run the assertIndexNotExists test operation.""" + coll = self.client[database][collection] + self.assertNotIn(index, [doc['name'] for doc in coll.list_indexes()]) + def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] self.assertEqual(labels, expected_labels) @@ -310,6 +330,12 @@ def run_operation(self, sessions, collection, operation): arguments['callback'] = lambda _: self.run_operations( sessions, original_collection, copy.deepcopy(callback_ops), in_with_transaction=True) + elif name == 'drop_collection' and arg_name == 'collection': + arguments['name_or_collection'] = arguments.pop(arg_name) + elif name == 'create_collection' and arg_name == 'collection': + arguments['name'] = arguments.pop(arg_name) + elif name == 'create_index' and arg_name == 'keys': + arguments['keys'] = list(arguments.pop(arg_name).items()) else: arguments[c2s] = arguments.pop(arg_name) From d42c5105cd3fb321a1e059c4fec23314d17bcf93 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 6 Apr 2020 13:37:14 -0700 Subject: [PATCH 0095/2111] PYTHON-2183 Test that readPreferenceTags are always interpreted as an array Also resolves PYTHON-2085. --- test/uri_options/auth-options.json | 14 +++++++++++++- test/uri_options/concern-options.json | 9 --------- test/uri_options/read-preference-options.json | 15 +++++++++++++++ 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/test/uri_options/auth-options.json b/test/uri_options/auth-options.json index 65a168b334..fadbac35d2 100644 --- a/test/uri_options/auth-options.json +++ b/test/uri_options/auth-options.json @@ -1,7 +1,7 @@ { "tests": [ { - "description": "Valid auth options are parsed correctly", + "description": "Valid auth options are parsed correctly (GSSAPI)", "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authSource=$external", "valid": true, "warning": false, @@ -15,6 +15,18 @@ }, "authSource": "$external" } + }, + { + "description": "Valid auth options are parsed correctly (SCRAM-SHA-1)", + "uri": "mongodb://foo:bar@example.com/?authMechanism=SCRAM-SHA-1&authSource=authSourceDB", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "authMechanism": "SCRAM-SHA-1", + "authSource": "authSourceDB" + } } ] } diff --git a/test/uri_options/concern-options.json b/test/uri_options/concern-options.json index 2b3783746c..5a8ef6c272 100644 --- a/test/uri_options/concern-options.json +++ b/test/uri_options/concern-options.json @@ -36,15 +36,6 @@ "w": "arbitraryButStillValid" } }, - { - "description": "Too low w causes a warning", - "uri": "mongodb://example.com/?w=-2", - "valid": true, - "warning": true, - "hosts": null, - "auth": null, - "options": {} - }, { "description": "Non-numeric wTimeoutMS causes a warning", "uri": "mongodb://example.com/?wTimeoutMS=invalid", diff --git a/test/uri_options/read-preference-options.json b/test/uri_options/read-preference-options.json index e62ce4fa75..df8c0c0eb8 100644 --- a/test/uri_options/read-preference-options.json +++ b/test/uri_options/read-preference-options.json @@ -21,6 +21,21 @@ "maxStalenessSeconds": 120 } }, + { + "description": "Single readPreferenceTags is parsed as array of size one", + "uri": "mongodb://example.com/?readPreferenceTags=dc:ny", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "ny" + } + ] + } + }, { "description": "Invalid readPreferenceTags causes a warning", "uri": "mongodb://example.com/?readPreferenceTags=invalid", From c96220df0f09c0a468490af0d24f17587060d2cd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 6 Apr 2020 16:15:17 -0700 Subject: [PATCH 0096/2111] Update docs for 3.11.0b0 --- README.rst | 2 +- doc/changelog.rst | 41 ++++++++++++++++++++++++----------------- doc/installation.rst | 4 ++-- 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/README.rst b/README.rst index 5735b2d84f..decd4beacd 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 2.6, 3.0, 3.2, 3.4, 3.6, 4.0 and 4.2. +PyMongo supports MongoDB 2.6, 3.0, 3.2, 3.4, 3.6, 4.0, 4.2, and 4.4. Support / Feedback ================== diff --git a/doc/changelog.rst b/doc/changelog.rst index 83f82b023f..a4dc63742c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,36 +1,43 @@ Changelog ========= -Changes in Version 3.11.0 -------------------------- +Changes in Version 3.11.0b0 +--------------------------- Version 3.11 adds support for MongoDB 4.4. Highlights include: -- Deprecated the ``oplog_replay`` parameter to - :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the - server optimizes queries against the oplog collection without requiring - the user to set this flag. -- Added index hinting support to the - :meth:`pymongo.collection.Collection.replace_one`, - :meth:`pymongo.collection.Collection.update_one`, - :meth:`pymongo.collection.Collection.update_many`, - :meth:`pymongo.collection.Collection.find_one_and_replace`, - and :meth:`pymongo.collection.Collection.find_one_and_update` commands. -- Added index hinting support to the - :class:`pymongo.operations.ReplaceOne`, - :class:`pymongo.operations.UpdateOne`, - and :class:`pymongo.operations.UpdateMany` bulk operations. -- Support for :ref:`OCSP` (Online Certificate Status Protocol) +- Support for :ref:`OCSP` (Online Certificate Status Protocol). - Support for `PyOpenSSL `_ as an alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` support. It will also be installed when using the "tls" extra if the version of Python in use is older than 2.7.9. - Support for the :ref:`MONGODB-AWS` authentication mechanism. +- Added index hinting support to the + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, and + :meth:`~pymongo.collection.Collection.find_one_and_delete` commands. +- Added index hinting support to the + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany` bulk operations. - Added the ``background`` parameter to :meth:`pymongo.database.Database.validate_collection`. For a description of this parameter see the MongoDB documentation for the `validate command`_. - Added the ``allow_disk_use`` parameters to :meth:`pymongo.collection.Collection.find`. +- Support for creating collections in multi-document transactions with + :meth:`~pymongo.database.Database.create_collection` on MongoDB 4.4+. +- Deprecated the ``oplog_replay`` parameter to + :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the + server optimizes queries against the oplog collection without requiring + the user to set this flag. .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ diff --git a/doc/installation.rst b/doc/installation.rst index aa7ec6a41c..adcaad478b 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -276,8 +276,8 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.9.0b1.tar.gz + $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b0.tar.gz or easy_install:: - $ python -m easy_install https://github.com/mongodb/mongo-python-driver/archive/3.9.0b1.tar.gz + $ python -m easy_install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b0.tar.gz From 24e8aebf1201bf67c1aaa898df136ed4c5cf0d13 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Apr 2020 11:31:49 -0700 Subject: [PATCH 0097/2111] PYTHON-2188 Always raise an error when bson encoding exceeds 2GiB Remove unused buffer_write_at_position. --- bson/buffer.c | 25 ++++++++----------------- bson/buffer.h | 5 ----- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/bson/buffer.c b/bson/buffer.c index c60fc44649..0507eb2388 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -89,10 +89,16 @@ static int buffer_grow(buffer_t buffer, int min_length) { /* Assure that `buffer` has at least `size` free bytes (and grow if needed). * Return non-zero on allocation failure. */ static int buffer_assure_space(buffer_t buffer, int size) { - if (buffer->position + size <= buffer->size) { + int new_size = buffer->position + size; + /* Check for overflow. */ + if (new_size < buffer->position) { + return 1; + } + + if (new_size <= buffer->size) { return 0; } - return buffer_grow(buffer, buffer->position + size); + return buffer_grow(buffer, new_size); } /* Save `size` bytes from the current position in `buffer` (and grow if needed). @@ -118,21 +124,6 @@ int buffer_write(buffer_t buffer, const char* data, int size) { return 0; } -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, - const char* data, int size) { - if (position + size > buffer->size) { - buffer_free(buffer); - return 1; - } - - memcpy(buffer->buffer + position, data, size); - return 0; -} - - int buffer_get_position(buffer_t buffer) { return buffer->position; } diff --git a/bson/buffer.h b/bson/buffer.h index 96b1d0f837..1485082d95 100644 --- a/bson/buffer.h +++ b/bson/buffer.h @@ -41,11 +41,6 @@ buffer_position buffer_save_space(buffer_t buffer, int size); * Return non-zero on allocation failure. */ int buffer_write(buffer_t buffer, const char* data, int size); -/* Write `size` bytes from `data` to `buffer` at position `position`. - * Does not change the internal position of `buffer`. - * Return non-zero if buffer isn't large enough for write. */ -int buffer_write_at_position(buffer_t buffer, buffer_position position, const char* data, int size); - /* Getters for the internals of a buffer_t. * Should try to avoid using these as much as possible * since they break the abstraction. */ From 021adc53e8e10adc252bec586bbd6ae3c1ae4f9b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Apr 2020 14:41:37 -0700 Subject: [PATCH 0098/2111] PYTHON-2191 Fix double free when realloc fails in buffer_grow --- bson/buffer.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bson/buffer.c b/bson/buffer.c index 0507eb2388..1d428ddf7b 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -53,7 +53,10 @@ int buffer_free(buffer_t buffer) { if (buffer == NULL) { return 1; } - free(buffer->buffer); + /* Buffer will be NULL when buffer_grow fails. */ + if (buffer->buffer != NULL) { + free(buffer->buffer); + } free(buffer); return 0; } @@ -79,7 +82,6 @@ static int buffer_grow(buffer_t buffer, int min_length) { buffer->buffer = (char*)realloc(buffer->buffer, sizeof(char) * size); if (buffer->buffer == NULL) { free(old_buffer); - free(buffer); return 1; } buffer->size = size; From 643e64880ec1c3ffa70d67c06dfd55755d362fc9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Apr 2020 14:35:38 -0700 Subject: [PATCH 0099/2111] PYTHON-2188 Raise ValueError instead of MemoryError when encoding exceeds 2GiB --- bson/_cbsonmodule.c | 7 ------- bson/buffer.c | 28 +++++++++++++++++++++++----- pymongo/_cmessagemodule.c | 22 ---------------------- 3 files changed, 23 insertions(+), 34 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 1fbb48cc96..ae28c1ba8a 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -193,7 +193,6 @@ static long long millis_from_datetime(PyObject* datetime) { /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { if (buffer_write(buffer, data, size)) { - PyErr_NoMemory(); return 0; } return 1; @@ -923,7 +922,6 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, /* save space for length */ length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); Py_DECREF(scope); return 0; } @@ -1121,7 +1119,6 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, /* save space for length */ length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return 0; } @@ -1140,7 +1137,6 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, PyObject* item_value; if (list_type_byte == -1) { - PyErr_NoMemory(); return 0; } INT2STRING(name, (int)i); @@ -1454,7 +1450,6 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt type_byte = buffer_save_space(buffer, 1); if (type_byte == -1) { - PyErr_NoMemory(); return 0; } if (check_keys && !check_key_name(name, name_length)) { @@ -1704,7 +1699,6 @@ int write_dict(PyObject* self, buffer_t buffer, length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return 0; } @@ -1808,7 +1802,6 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { buffer = buffer_new(); if (!buffer) { destroy_codec_options(&options); - PyErr_NoMemory(); return NULL; } diff --git a/bson/buffer.c b/bson/buffer.c index 1d428ddf7b..66672749fa 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -17,6 +17,10 @@ #include #include +/* Include Python.h so we can set Python's error indicator. */ +#define PY_SSIZE_T_CLEAN +#include "Python.h" + #include "buffer.h" #define INITIAL_BUFFER_SIZE 256 @@ -27,12 +31,19 @@ struct buffer { int position; }; +/* Set Python's error indicator to MemoryError. + * Called after allocation failures. */ +static void set_memory_error() { + PyErr_NoMemory(); +} + /* Allocate and return a new buffer. - * Return NULL on allocation failure. */ + * Return NULL and sets MemoryError on allocation failure. */ buffer_t buffer_new(void) { buffer_t buffer; buffer = (buffer_t)malloc(sizeof(struct buffer)); if (buffer == NULL) { + set_memory_error(); return NULL; } @@ -41,6 +52,7 @@ buffer_t buffer_new(void) { buffer->buffer = (char*)malloc(sizeof(char) * INITIAL_BUFFER_SIZE); if (buffer->buffer == NULL) { free(buffer); + set_memory_error(); return NULL; } @@ -62,7 +74,7 @@ int buffer_free(buffer_t buffer) { } /* Grow `buffer` to at least `min_length`. - * Return non-zero on allocation failure. */ + * Return non-zero and sets MemoryError on allocation failure. */ static int buffer_grow(buffer_t buffer, int min_length) { int old_size = 0; int size = buffer->size; @@ -82,6 +94,7 @@ static int buffer_grow(buffer_t buffer, int min_length) { buffer->buffer = (char*)realloc(buffer->buffer, sizeof(char) * size); if (buffer->buffer == NULL) { free(old_buffer); + set_memory_error(); return 1; } buffer->size = size; @@ -89,11 +102,14 @@ static int buffer_grow(buffer_t buffer, int min_length) { } /* Assure that `buffer` has at least `size` free bytes (and grow if needed). - * Return non-zero on allocation failure. */ + * Return non-zero and sets MemoryError on allocation failure. + * Return non-zero and sets ValueError if `size` would exceed 2GiB. */ static int buffer_assure_space(buffer_t buffer, int size) { int new_size = buffer->position + size; /* Check for overflow. */ if (new_size < buffer->position) { + PyErr_SetString(PyExc_ValueError, + "Document would overflow BSON size limit"); return 1; } @@ -104,7 +120,8 @@ static int buffer_assure_space(buffer_t buffer, int size) { } /* Save `size` bytes from the current position in `buffer` (and grow if needed). - * Return offset for writing, or -1 on allocation failure. */ + * Return offset for writing, or -1 on failure. + * Sets MemoryError or ValueError on failure. */ buffer_position buffer_save_space(buffer_t buffer, int size) { int position = buffer->position; if (buffer_assure_space(buffer, size) != 0) { @@ -115,7 +132,8 @@ buffer_position buffer_save_space(buffer_t buffer, int size) { } /* Write `size` bytes from `data` to `buffer` (and grow if needed). - * Return non-zero on allocation failure. */ + * Return non-zero on failure. + * Sets MemoryError or ValueError on failure. */ int buffer_write(buffer_t buffer, const char* data, int size) { if (buffer_assure_space(buffer, size) != 0) { return 1; diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 7c4a517c5c..4afc078093 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -88,7 +88,6 @@ static int add_last_error(PyObject* self, buffer_t buffer, message_start = buffer_save_space(buffer, 4); if (message_start == -1) { - PyErr_NoMemory(); return 0; } if (!buffer_write_int32(buffer, (int32_t)request_id) || @@ -109,7 +108,6 @@ static int add_last_error(PyObject* self, buffer_t buffer, /* save space for length */ document_start = buffer_save_space(buffer, 4); if (document_start == -1) { - PyErr_NoMemory(); return 0; } @@ -154,7 +152,6 @@ static int init_insert_buffer(buffer_t buffer, int request_id, int options, /* Save space for message length */ int length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); return length_location; } if (!buffer_write_int32(buffer, (int32_t)request_id) || @@ -212,7 +209,6 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - PyErr_NoMemory(); destroy_codec_options(&options); PyMem_Free(collection_name); return NULL; @@ -346,7 +342,6 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { buffer = buffer_new(); if (!buffer) { destroy_codec_options(&options); - PyErr_NoMemory(); PyMem_Free(collection_name); return NULL; } @@ -356,7 +351,6 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { if (length_location == -1) { destroy_codec_options(&options); PyMem_Free(collection_name); - PyErr_NoMemory(); return NULL; } if (!buffer_write_int32(buffer, (int32_t)request_id) || @@ -454,7 +448,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - PyErr_NoMemory(); destroy_codec_options(&options); PyMem_Free(collection_name); return NULL; @@ -465,7 +458,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { if (length_location == -1) { destroy_codec_options(&options); PyMem_Free(collection_name); - PyErr_NoMemory(); return NULL; } @@ -585,7 +577,6 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - PyErr_NoMemory(); PyMem_Free(collection_name); return NULL; } @@ -594,7 +585,6 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { length_location = buffer_save_space(buffer, 4); if (length_location == -1) { PyMem_Free(collection_name); - PyErr_NoMemory(); return NULL; } if (!buffer_write_int32(buffer, (int32_t)request_id) || @@ -665,14 +655,12 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - PyErr_NoMemory(); goto bufferfail; } // save space for message length length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - PyErr_NoMemory(); goto bufferfail; } if (!buffer_write_int32(buffer, (int32_t)request_id) || @@ -879,7 +867,6 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { buffer = buffer_new(); if (!buffer) { destroy_codec_options(&options); - PyErr_NoMemory(); PyMem_Free(collection_name); return NULL; } @@ -944,7 +931,6 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { int message_start; buffer_t new_buffer = buffer_new(); if (!new_buffer) { - PyErr_NoMemory(); goto iterfail; } message_start = init_insert_buffer(new_buffer, @@ -1181,7 +1167,6 @@ _batched_op_msg( /* Save space for size */ size_location = buffer_save_space(buffer, 4); if (size_location == -1) { - PyErr_NoMemory(); return 0; } @@ -1325,7 +1310,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { return NULL; } if (!(buffer = buffer_new())) { - PyErr_NoMemory(); destroy_codec_options(&options); return NULL; } @@ -1381,13 +1365,11 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { return NULL; } if (!(buffer = buffer_new())) { - PyErr_NoMemory(); destroy_codec_options(&options); return NULL; } /* Save space for message length and request id */ if ((buffer_save_space(buffer, 8)) == -1) { - PyErr_NoMemory(); goto fail; } if (!buffer_write_bytes(buffer, @@ -1552,7 +1534,6 @@ _batched_write_command( /* Save space for list document */ lst_len_loc = buffer_save_space(buffer, 4); if (lst_len_loc == -1) { - PyErr_NoMemory(); return 0; } @@ -1672,7 +1653,6 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { return NULL; } if (!(buffer = buffer_new())) { - PyErr_NoMemory(); PyMem_Free(ns); destroy_codec_options(&options); return NULL; @@ -1732,14 +1712,12 @@ _cbson_batched_write_command(PyObject* self, PyObject* args) { return NULL; } if (!(buffer = buffer_new())) { - PyErr_NoMemory(); PyMem_Free(ns); destroy_codec_options(&options); return NULL; } /* Save space for message length and request id */ if ((buffer_save_space(buffer, 8)) == -1) { - PyErr_NoMemory(); goto fail; } if (!buffer_write_bytes(buffer, From 3463f060e9bff6365832b9c5974fa64a7973855c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Apr 2020 10:42:17 -0700 Subject: [PATCH 0100/2111] PYTHON-2191 Fix buffer leak added in 021adc53e8e10adc252bec586bbd6ae3c1ae4f9b --- pymongo/_cmessagemodule.c | 150 +++++++++++++++----------------------- 1 file changed, 57 insertions(+), 93 deletions(-) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 4afc078093..52dbf08526 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -190,9 +190,9 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { unsigned char continue_on_error; codec_options_t options; PyObject* last_error_args; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + PyObject* result = NULL; if (!PyArg_ParseTuple(args, "et#ObbObO&", "utf-8", @@ -209,9 +209,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; + goto fail; } length_location = init_insert_buffer(buffer, @@ -221,10 +219,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { collection_name_length, 0); if (length_location == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - buffer_free(buffer); - return NULL; + goto fail; } iterator = PyObject_GetIter(docs); @@ -234,10 +229,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { PyErr_SetString(InvalidOperation, "input is not iterable"); Py_DECREF(InvalidOperation); } - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } while ((doc = PyIter_Next(iterator)) != NULL) { before = buffer_get_position(buffer); @@ -245,10 +237,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { &options, 1)) { Py_DECREF(doc); Py_DECREF(iterator); - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } Py_DECREF(doc); cur_size = buffer_get_position(buffer) - before; @@ -257,10 +246,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { Py_DECREF(iterator); if (PyErr_Occurred()) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } if (!max_size) { @@ -269,10 +255,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); Py_DECREF(InvalidOperation); } - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } message_length = buffer_get_position(buffer) - length_location; @@ -282,22 +265,21 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { if (safe) { if (!add_last_error(self, buffer, request_id, collection_name, collection_name_length, &options, last_error_args)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } } - PyMem_Free(collection_name); - /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), max_size); +fail: + PyMem_Free(collection_name); destroy_codec_options(&options); - buffer_free(buffer); + if (buffer) { + buffer_free(buffer); + } return result; } @@ -318,9 +300,9 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { codec_options_t options; PyObject* last_error_args; int flags; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + PyObject* result = NULL; if (!PyArg_ParseTuple(args, "et#bbOObObO&", "utf-8", @@ -341,17 +323,13 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, @@ -363,28 +341,19 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { collection_name, collection_name_length + 1) || !buffer_write_int32(buffer, (int32_t)flags)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } before = buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, spec, 0, &options, 1)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } max_size = buffer_get_position(buffer) - before; before = buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, doc, check_keys, &options, 1)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } cur_size = buffer_get_position(buffer) - before; max_size = (cur_size > max_size) ? cur_size : max_size; @@ -396,22 +365,21 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { if (safe) { if (!add_last_error(self, buffer, request_id, collection_name, collection_name_length, &options, last_error_args)) { - destroy_codec_options(&options); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } } - PyMem_Free(collection_name); - /* objectify buffer */ result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), max_size); +fail: + PyMem_Free(collection_name); destroy_codec_options(&options); - buffer_free(buffer); + if (buffer) { + buffer_free(buffer); + } return result; } @@ -430,7 +398,7 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { PyObject* query; PyObject* field_selector; codec_options_t options; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; unsigned char check_keys = 0; PyObject* result = NULL; @@ -448,17 +416,13 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; + goto fail; } /* Pop $clusterTime from dict and write it at the end, avoiding an error @@ -547,11 +511,12 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), max_size); - fail: PyMem_Free(collection_name); destroy_codec_options(&options); - buffer_free(buffer); + if (buffer) { + buffer_free(buffer); + } Py_XDECREF(cluster_time); return result; } @@ -563,9 +528,9 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { Py_ssize_t collection_name_length; int num_to_return; long long cursor_id; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; - PyObject* result; + PyObject* result = NULL; if (!PyArg_ParseTuple(args, "et#iL", "utf-8", @@ -577,15 +542,13 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - PyMem_Free(collection_name); - return NULL; + goto fail; } // save space for message length length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - PyMem_Free(collection_name); - return NULL; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, @@ -597,13 +560,9 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { collection_name_length + 1) || !buffer_write_int32(buffer, (int32_t)num_to_return) || !buffer_write_int64(buffer, (int64_t)cursor_id)) { - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; + goto fail; } - PyMem_Free(collection_name); - message_length = buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); @@ -612,7 +571,11 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { result = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer)); - buffer_free(buffer); +fail: + PyMem_Free(collection_name); + if (buffer) { + buffer_free(buffer); + } return result; } @@ -634,7 +597,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { PyObject* doc; unsigned char check_keys = 0; codec_options_t options; - buffer_t buffer; + buffer_t buffer = NULL; int length_location, message_length; int total_size = 0; int max_doc_size = 0; @@ -655,46 +618,46 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { } buffer = buffer_new(); if (!buffer) { - goto bufferfail; + goto fail; } // save space for message length length_location = buffer_save_space(buffer, 4); if (length_location == -1) { - goto bufferfail; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00" /* responseTo */ "\xdd\x07\x00\x00" /* 2013 */, 8)) { - goto encodefail; + goto fail; } if (!buffer_write_int32(buffer, (int32_t)flags) || !buffer_write_bytes(buffer, "\x00", 1) /* Payload type 0 */) { - goto encodefail; + goto fail; } total_size = write_dict(state->_cbson, buffer, command, 0, &options, 1); if (!total_size) { - goto encodefail; + goto fail; } if (identifier_length) { int payload_one_length_location, payload_length; /* Payload type 1 */ if (!buffer_write_bytes(buffer, "\x01", 1)) { - goto encodefail; + goto fail; } /* save space for payload 0 length */ payload_one_length_location = buffer_save_space(buffer, 4); /* C string identifier */ if (!buffer_write_bytes_ssize_t(buffer, identifier, identifier_length + 1)) { - goto encodefail; + goto fail; } iterator = PyObject_GetIter(docs); if (iterator == NULL) { - goto encodefail; + goto fail; } while ((doc = PyIter_Next(iterator)) != NULL) { int encoded_doc_size = write_dict( @@ -702,7 +665,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { &options, 1); if (!encoded_doc_size) { Py_CLEAR(doc); - goto encodefail; + goto fail; } if (encoded_doc_size > max_doc_size) { max_doc_size = encoded_doc_size; @@ -726,10 +689,11 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { (Py_ssize_t)buffer_get_position(buffer), total_size, max_doc_size); -encodefail: +fail: Py_XDECREF(iterator); - buffer_free(buffer); -bufferfail: + if (buffer) { + buffer_free(buffer); + } PyMem_Free(identifier); destroy_codec_options(&options); return result; From 914d206434f700819d3b6799185835b6ead8e4ec Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Apr 2020 13:03:01 -0700 Subject: [PATCH 0101/2111] PYTHON-2193 Update install dependencies for OCSP tests --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 7554956dbb..0f561c2ffa 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -699,7 +699,7 @@ functions: script: | cd ${DRIVERS_TOOLS}/.evergreen/ocsp /opt/mongodbtoolchain/v3/bin/python3 -m venv ./venv - ./venv/bin/pip3 install asn1crypto oscrypto bottle + ./venv/bin/pip3 install -r ${DRIVERS_TOOLS}/.evergreen/ocsp/mock-ocsp-responder-requirements.txt - command: shell.exec params: background: true @@ -717,7 +717,7 @@ functions: script: | cd ${DRIVERS_TOOLS}/.evergreen/ocsp /opt/mongodbtoolchain/v3/bin/python3 -m venv ./venv - ./venv/bin/pip3 install asn1crypto oscrypto bottle + ./venv/bin/pip3 install -r ${DRIVERS_TOOLS}/.evergreen/ocsp/mock-ocsp-responder-requirements.txt - command: shell.exec params: background: true From 0c5d24ce4889b38e5a7b89e980b26fe90341a4e9 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 9 Apr 2020 16:40:12 -0700 Subject: [PATCH 0102/2111] PYTHON-2181 Raise an informative error including the entire command response when operationTime is missing from aggregate command response --- pymongo/change_stream.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 00b56e4869..f026dd7f5b 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -145,7 +145,12 @@ def _process_result(self, result, session, server, sock_info, slave_ok): if (self._start_at_operation_time is None and self.resume_token is None and sock_info.max_wire_version >= 7): - self._start_at_operation_time = result["operationTime"] + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + "response : %r" % (result, )) def _run_aggregation_cmd(self, session, explicit_session): """Run the full aggregation pipeline for this ChangeStream and return From 5ec01ca6100140c779281f9fb7c8a4a1299ffc20 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Apr 2020 09:39:05 -0700 Subject: [PATCH 0103/2111] BUMP 3.11.0b0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a54ba8bac3..9a14ab1c64 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -64,7 +64,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, '.dev1') +version_tuple = (3, 11, 0, 'b0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index ba051f73be..65cac60711 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0.dev1" +version = "3.11.0b0" f = open("README.rst") try: From 923e83cd1a04674c8e4f6b8fe178dcf84fbe5a7d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Apr 2020 09:43:33 -0700 Subject: [PATCH 0104/2111] BUMP 3.11.0b1.dev0 --- doc/changelog.rst | 4 ++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index a4dc63742c..ea89761a82 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 3.11.0b0 ---------------------------- +Changes in Version 3.11.0b1.dev0 +-------------------------------- Version 3.11 adds support for MongoDB 4.4. Highlights include: diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 9a14ab1c64..cdef15ad3a 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -64,7 +64,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, 'b0') +version_tuple = (3, 11, 0, 'b1.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 65cac60711..e7e7f6e8f7 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0b0" +version = "3.11.0b1.dev0" f = open("README.rst") try: From 5efe0b10b3006bc10e4baa212bbfdac7e488af85 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Apr 2020 13:40:29 -0700 Subject: [PATCH 0105/2111] PYTHON-2112 Change api.mongodb.com to pymongo.readthedocs.io --- CONTRIBUTING.rst | 4 ++-- README.rst | 6 ++++-- pymongo/topology.py | 2 +- setup.py | 6 +++--- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 01857b6763..4c057af571 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -2,7 +2,7 @@ Contributing to PyMongo ======================= PyMongo has a large `community -`_ and +`_ and contributions are always encouraged. Contributions can be as simple as minor tweaks to the documentation. Please read these guidelines before sending a pull request. @@ -41,7 +41,7 @@ General Guidelines Documentation ------------- -To contribute to the `API documentation `_ +To contribute to the `API documentation `_ just make your changes to the inline documentation of the appropriate `source code `_ or `rst file `_ in a diff --git a/README.rst b/README.rst index decd4beacd..792861251c 100644 --- a/README.rst +++ b/README.rst @@ -204,8 +204,10 @@ Here's a basic example (for more see the *examples* section of the docs): Documentation ============= -You will need sphinx_ installed to generate the -documentation. Documentation can be generated by running **python +Documentation is available at [pymongo.readthedocs.io](https://pymongo.readthedocs.io/en/stable/). + +To build the documentation, you will need to install sphinx_. +Documentation can be generated by running **python setup.py doc**. Generated documentation can be found in the *doc/build/html/* directory. diff --git a/pymongo/topology.py b/pymongo/topology.py index 7f65a36dfe..233882bbc3 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -155,7 +155,7 @@ def open(self): warnings.warn( "MongoClient opened before fork. Create MongoClient only " "after forking. See PyMongo's documentation for details: " - "http://api.mongodb.org/python/current/faq.html#" + "https://pymongo.readthedocs.io/en/stable/faq.html#" "is-pymongo-fork-safe") with self._lock: # Reset the session pool to avoid duplicate sessions in diff --git a/setup.py b/setup.py index e7e7f6e8f7..8850e0e551 100755 --- a/setup.py +++ b/setup.py @@ -247,7 +247,7 @@ class custom_build_ext(build_ext): Please see the installation docs for solutions to build issues: -http://api.mongodb.org/python/current/installation.html +https://pymongo.readthedocs.io/en/stable/installation.html Here are some hints for popular operating systems: @@ -269,7 +269,7 @@ class custom_build_ext(build_ext): from binary wheels available on pypi. If you must install from source read the documentation here: -https://api.mongodb.com/python/current/installation.html#installing-from-source-on-windows +https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows If you are seeing this message on macOS / OSX please install PyMongo using pip. Modern versions of pip will install PyMongo from binary @@ -277,7 +277,7 @@ class custom_build_ext(build_ext): of macOS / OSX, or you must install from source read the documentation here: -http://api.mongodb.org/python/current/installation.html#osx +https://pymongo.readthedocs.io/en/stable/installation.html#osx ******************************************************************** """ From 350ada10d55782b9c603c27d0e7e5b5d90e339b0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Apr 2020 11:39:47 -0700 Subject: [PATCH 0106/2111] PYTHON-2206 Rename pool_id to generation to better match CMAP spec --- pymongo/client_session.py | 13 +++++++------ pymongo/monitor.py | 2 +- pymongo/pool.py | 20 ++++++++++---------- pymongo/topology.py | 6 +++--- test/test_client.py | 4 ++-- test/test_topology.py | 4 ++-- test/utils.py | 6 +++--- 7 files changed, 28 insertions(+), 27 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index f701459795..63021a3c0d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -838,13 +838,13 @@ def _start_retryable_write(self): class _ServerSession(object): - def __init__(self, pool_id): + def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)} self.last_use = monotonic.time() self._transaction_id = 0 self.dirty = False - self.pool_id = pool_id + self.generation = generation def mark_dirty(self): """Mark this session as dirty. @@ -876,10 +876,10 @@ class _ServerSessionPool(collections.deque): """ def __init__(self, *args, **kwargs): super(_ServerSessionPool, self).__init__(*args, **kwargs) - self.pool_id = 0 + self.generation = 0 def reset(self): - self.pool_id += 1 + self.generation += 1 self.clear() def pop_all(self): @@ -902,7 +902,7 @@ def get_server_session(self, session_timeout_minutes): if not s.timed_out(session_timeout_minutes): return s - return _ServerSession(self.pool_id) + return _ServerSession(self.generation) def return_server_session(self, server_session, session_timeout_minutes): self._clear_stale(session_timeout_minutes) @@ -912,7 +912,8 @@ def return_server_session(self, server_session, session_timeout_minutes): def return_server_session_no_lock(self, server_session): # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. - if server_session.pool_id == self.pool_id and not server_session.dirty: + if (server_session.generation == self.generation and + not server_session.dirty): self.appendleft(server_session) def _clear_stale(self, session_timeout_minutes): diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 9aaa95c6fb..99d33f2530 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -107,7 +107,7 @@ def __init__( def close(self): super(Monitor, self).close() - # Increment the pool_id and maybe close the socket. If the executor + # Increment the generation and maybe close the socket. If the executor # thread has the socket checked out, it will be closed when checked in. self._pool.reset() diff --git a/pymongo/pool.py b/pymongo/pool.py index ad8e0ba993..87b3602219 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -488,9 +488,9 @@ def __init__(self, sock, pool, address, id): self.compression_settings = pool.opts.compression_settings self.compression_context = None - # The pool's pool_id changes with each reset() so we can close sockets - # created before the last reset. - self.pool_id = pool.pool_id + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.generation = pool.generation self.ready = False def ismaster(self, metadata, cluster_time): @@ -956,7 +956,7 @@ def __init__(self, address, options, handshake=True): # Keep track of resets, so we notice sockets created before the most # recent reset and close them. - self.pool_id = 0 + self.generation = 0 self.pid = os.getpid() self.address = address self.opts = options @@ -985,7 +985,7 @@ def _reset(self, close): with self.lock: if self.closed: return - self.pool_id += 1 + self.generation += 1 self.pid = os.getpid() sockets, self.sockets = self.sockets, collections.deque() self.active_sockets = 0 @@ -1022,10 +1022,10 @@ def reset(self): def close(self): self._reset(close=True) - def remove_stale_sockets(self, reference_pool_id): + def remove_stale_sockets(self, reference_generation): """Removes stale sockets then adds new ones if pool is too small and - has not been reset. The `reference_pool_id` argument specifies the - `pool_id` at the point in time this operation was requested on the + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the pool. """ if self.opts.max_idle_time_seconds is not None: @@ -1050,7 +1050,7 @@ def remove_stale_sockets(self, reference_pool_id): with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. - if self.pool_id != reference_pool_id: + if self.generation != reference_generation: sock_info.close_socket(ConnectionClosedReason.STALE) break self.sockets.appendleft(sock_info) @@ -1205,7 +1205,7 @@ def return_socket(self, sock_info, publish_checkin=True): else: if self.closed: sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) - elif sock_info.pool_id != self.pool_id: + elif sock_info.generation != self.generation: sock_info.close_socket(ConnectionClosedReason.STALE) elif not sock_info.closed: sock_info.update_last_checkin_time() diff --git a/pymongo/topology.py b/pymongo/topology.py index 233882bbc3..d43083b7ea 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -435,10 +435,10 @@ def update_pool(self): servers = [] with self._lock: for server in self._servers.values(): - servers.append((server, server._pool.pool_id)) + servers.append((server, server._pool.generation)) - for server, pool_id in servers: - server._pool.remove_stale_sockets(pool_id) + for server, generation in servers: + server._pool.remove_stale_sockets(generation) def close(self): """Clear pools and terminate monitors. Topology reopens on demand.""" diff --git a/test/test_client.py b/test/test_client.py index c547c98cb5..3ce5b9e41a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1462,7 +1462,7 @@ def test_reset_during_update_pool(self): self.addCleanup(client.close) client.admin.command('ping') pool = get_pool(client) - pool_id = pool.pool_id + generation = pool.generation # Continuously reset the pool. class ResetPoolThread(threading.Thread): @@ -1488,7 +1488,7 @@ def run(self): while True: for _ in range(10): client._topology.update_pool() - if pool_id != pool.pool_id: + if generation != pool.generation: break finally: t.stop() diff --git a/test/test_topology.py b/test/test_topology.py index 017951b575..99f4de9187 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -660,11 +660,11 @@ def _check_with_socket(self, *args, **kwargs): t = create_mock_topology(monitor_class=TestMonitor) server = wait_for_master(t) self.assertEqual(1, ismaster_count[0]) - pool_id = server.pool.pool_id + generation = server.pool.generation # Pool is reset by ismaster failure. t.request_check_all() - self.assertNotEqual(pool_id, server.pool.pool_id) + self.assertNotEqual(generation, server.pool.generation) def test_ismaster_retry(self): # ismaster succeeds at first, then raises socket error, then succeeds. diff --git a/test/utils.py b/test/utils.py index 05e9be12cd..bd5cea79ed 100644 --- a/test/utils.py +++ b/test/utils.py @@ -207,7 +207,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool(object): def __init__(self, *args, **kwargs): - self.pool_id = 0 + self.generation = 0 self._lock = threading.Lock() self.opts = PoolOptions() @@ -219,7 +219,7 @@ def return_socket(self, *args, **kwargs): def _reset(self): with self._lock: - self.pool_id += 1 + self.generation += 1 def reset(self): self._reset() @@ -230,7 +230,7 @@ def close(self): def update_is_writable(self, is_writable): pass - def remove_stale_sockets(self, reference_pool_id): + def remove_stale_sockets(self, reference_generation): pass From ced7d52d98fbc941fa79ee1bc715129424ad8ae6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Apr 2020 12:34:23 -0700 Subject: [PATCH 0107/2111] PYTHON-2205 Don't add stale connections to the pool --- pymongo/pool.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 87b3602219..8bd6602cc9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1205,22 +1205,27 @@ def return_socket(self, sock_info, publish_checkin=True): else: if self.closed: sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) - elif sock_info.generation != self.generation: - sock_info.close_socket(ConnectionClosedReason.STALE) elif not sock_info.closed: - sock_info.update_last_checkin_time() - sock_info.update_is_writable(self.is_writable) with self.lock: - self.sockets.appendleft(sock_info) + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if sock_info.generation != self.generation: + sock_info.close_socket(ConnectionClosedReason.STALE) + else: + sock_info.update_last_checkin_time() + sock_info.update_is_writable(self.is_writable) + self.sockets.appendleft(sock_info) self._socket_semaphore.release() with self.lock: self.active_sockets -= 1 def _perished(self, sock_info): - """This side-effecty function checks if this socket has been idle for + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for for longer than the max idle time, or if the socket has been closed by - some external network error. + some external network error, or if the socket's generation is outdated. Checking sockets lets us avoid seeing *some* :class:`~pymongo.errors.AutoReconnect` exceptions on server @@ -1243,6 +1248,10 @@ def _perished(self, sock_info): sock_info.close_socket(ConnectionClosedReason.ERROR) return True + if sock_info.generation != self.generation: + sock_info.close_socket(ConnectionClosedReason.STALE) + return True + return False def _raise_wait_queue_timeout(self): From 8d41a387f3cb6a9203c2ded728695611ad1d6b54 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 2 Apr 2020 15:34:38 -0700 Subject: [PATCH 0108/2111] PYTHON-2179 Raise client-side error if an index hint is specified for an unacknowledged operation --- pymongo/collection.py | 3 + ...ged-bulkWrite-delete-hint-clientError.json | 155 +++++++++++ ...ged-bulkWrite-update-hint-clientError.json | 245 ++++++++++++++++++ ...nowledged-deleteMany-hint-clientError.json | 105 ++++++++ ...knowledged-deleteOne-hint-clientError.json | 89 +++++++ ...ged-findOneAndDelete-hint-clientError.json | 89 +++++++ ...ed-findOneAndReplace-hint-clientError.json | 95 +++++++ ...ged-findOneAndUpdate-hint-clientError.json | 99 +++++++ ...nowledged-replaceOne-hint-clientError.json | 99 +++++++ ...nowledged-updateMany-hint-clientError.json | 115 ++++++++ ...knowledged-updateOne-hint-clientError.json | 103 ++++++++ 11 files changed, 1197 insertions(+) create mode 100644 test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-deleteMany-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-deleteOne-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-replaceOne-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-updateMany-hint-clientError.json create mode 100644 test/crud/v2/unacknowledged-updateOne-hint-clientError.json diff --git a/pymongo/collection.py b/pymongo/collection.py index a7b563bd93..eba872f95b 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2933,6 +2933,9 @@ def _find_and_modify(session, sock_info, retryable_write): if sock_info.max_wire_version < 8: raise ConfigurationError( 'Must be connected to MongoDB 4.2+ to use hint.') + if not write_concern.acknowledged: + raise ConfigurationError( + 'hint is unsupported for unacknowledged writes.') cmd['hint'] = hint if (sock_info.max_wire_version >= 4 and not write_concern.is_server_default): diff --git a/test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json b/test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json new file mode 100644 index 0000000000..46839db705 --- /dev/null +++ b/test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json @@ -0,0 +1,155 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "BulkWrite_delete_hint", + "tests": [ + { + "description": "Unacknowledged bulkWrite deleteOne with hints fails with client-side error", + "operations": [ + { + "name": "bulkWrite", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "Unacknowledged bulkWrite deleteMany with hints fails with client-side error", + "operations": [ + { + "name": "bulkWrite", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "arguments": { + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json b/test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json new file mode 100644 index 0000000000..4a41d76b35 --- /dev/null +++ b/test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json @@ -0,0 +1,245 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "collection_name": "Bulkwrite_update_hint", + "tests": [ + { + "description": "Unacknowledged bulkWrite updateOne with hints fails with client-side error", + "operations": [ + { + "name": "bulkWrite", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "Unacknowledged bulkWrite updateMany with hints fails with client-side error", + "operations": [ + { + "name": "bulkWrite", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + }, + { + "description": "Unacknowledged bulkWrite replaceOne with hints fails with client-side error", + "operations": [ + { + "name": "bulkWrite", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "arguments": { + "requests": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-deleteMany-hint-clientError.json b/test/crud/v2/unacknowledged-deleteMany-hint-clientError.json new file mode 100644 index 0000000000..532f4282a9 --- /dev/null +++ b/test/crud/v2/unacknowledged-deleteMany-hint-clientError.json @@ -0,0 +1,105 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "DeleteMany_hint", + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-deleteOne-hint-clientError.json b/test/crud/v2/unacknowledged-deleteOne-hint-clientError.json new file mode 100644 index 0000000000..ff3f05ea3e --- /dev/null +++ b/test/crud/v2/unacknowledged-deleteOne-hint-clientError.json @@ -0,0 +1,89 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "DeleteOne_hint", + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json b/test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json new file mode 100644 index 0000000000..076978874d --- /dev/null +++ b/test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json @@ -0,0 +1,89 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "findOneAndDelete_hint", + "tests": [ + { + "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json b/test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json new file mode 100644 index 0000000000..38fbc817be --- /dev/null +++ b/test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json @@ -0,0 +1,95 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "FindOneAndReplace_hint", + "tests": [ + { + "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json b/test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json new file mode 100644 index 0000000000..615b4c0e63 --- /dev/null +++ b/test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json @@ -0,0 +1,99 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "FindOneAndUpdate_hint", + "tests": [ + { + "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-replaceOne-hint-clientError.json b/test/crud/v2/unacknowledged-replaceOne-hint-clientError.json new file mode 100644 index 0000000000..c4add73c2d --- /dev/null +++ b/test/crud/v2/unacknowledged-replaceOne-hint-clientError.json @@ -0,0 +1,99 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "ReplaceOne_hint", + "tests": [ + { + "description": "Unacknowledged ReplaceOne with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "Unacknowledged ReplaceOne with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-updateMany-hint-clientError.json b/test/crud/v2/unacknowledged-updateMany-hint-clientError.json new file mode 100644 index 0000000000..eaf3efd1cf --- /dev/null +++ b/test/crud/v2/unacknowledged-updateMany-hint-clientError.json @@ -0,0 +1,115 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "collection_name": "Updatemany_hint", + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/unacknowledged-updateOne-hint-clientError.json b/test/crud/v2/unacknowledged-updateOne-hint-clientError.json new file mode 100644 index 0000000000..1f8f738012 --- /dev/null +++ b/test/crud/v2/unacknowledged-updateOne-hint-clientError.json @@ -0,0 +1,103 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "UpdateOne_hint", + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + }, + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} From b7f4faeeabd31d917f6d7c5971759165a3a8759a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 17 Apr 2020 10:03:58 -0700 Subject: [PATCH 0109/2111] PYTHON-2189 Ignore StaleConfig transaction errors on 4.5+ mongos --- test/test_transactions.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/test_transactions.py b/test/test_transactions.py index 40341cbb80..9621578420 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -404,7 +404,13 @@ def create_test(scenario_def, test, name): @client_context.require_test_commands @client_context.require_transactions def run_scenario(self): - self.run_scenario(scenario_def, test) + try: + self.run_scenario(scenario_def, test) + except OperationFailure as exc: + if (client_context.version.at_least(4, 5) and + client_context.is_mongos and exc.code == 13388): + self.skipTest('PYTHON-2189 Ignoring StaleConfig error: %r' % ( + exc.details)) return run_scenario From ef2ecc4eac233e0b3e0cd9e64301b362a7749915 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 17 Apr 2020 13:00:29 -0700 Subject: [PATCH 0110/2111] PYTHON-2207 Do not use the admin database for the key vault in FLE tests Admin collections cannot be dropped in sharded clusters in 4.5+. --- .../spec/aggregate.json | 8 +- test/client-side-encryption/spec/basic.json | 8 +- test/client-side-encryption/spec/bulk.json | 4 +- .../spec/bypassAutoEncryption.json | 4 +- test/client-side-encryption/spec/count.json | 4 +- .../spec/countDocuments.json | 4 +- test/client-side-encryption/spec/delete.json | 8 +- .../client-side-encryption/spec/distinct.json | 4 +- test/client-side-encryption/spec/explain.json | 4 +- test/client-side-encryption/spec/find.json | 8 +- .../spec/findOneAndDelete.json | 4 +- .../spec/findOneAndReplace.json | 4 +- .../spec/findOneAndUpdate.json | 4 +- test/client-side-encryption/spec/getMore.json | 4 +- test/client-side-encryption/spec/insert.json | 8 +- .../spec/keyAltName.json | 4 +- .../client-side-encryption/spec/localKMS.json | 4 +- .../spec/localSchema.json | 4 +- .../spec/missingKey.json | 6 +- .../spec/replaceOne.json | 4 +- test/client-side-encryption/spec/types.json | 32 ++++---- .../spec/updateMany.json | 4 +- .../spec/updateOne.json | 4 +- test/test_encryption.py | 76 +++++++++---------- 24 files changed, 109 insertions(+), 109 deletions(-) diff --git a/test/client-side-encryption/spec/aggregate.json b/test/client-side-encryption/spec/aggregate.json index 6bc9242717..a9e79f9edb 100644 --- a/test/client-side-encryption/spec/aggregate.json +++ b/test/client-side-encryption/spec/aggregate.json @@ -157,7 +157,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -187,7 +187,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -280,7 +280,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -310,7 +310,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/basic.json b/test/client-side-encryption/spec/basic.json index 371894e8ca..3f9895fd5d 100644 --- a/test/client-side-encryption/spec/basic.json +++ b/test/client-side-encryption/spec/basic.json @@ -151,7 +151,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -181,7 +181,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -290,7 +290,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -320,7 +320,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/bulk.json index 7a401d5e8e..ead90985a1 100644 --- a/test/client-side-encryption/spec/bulk.json +++ b/test/client-side-encryption/spec/bulk.json @@ -185,7 +185,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -215,7 +215,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/bypassAutoEncryption.json b/test/client-side-encryption/spec/bypassAutoEncryption.json index 42f4473223..9d09cb3fa9 100644 --- a/test/client-side-encryption/spec/bypassAutoEncryption.json +++ b/test/client-side-encryption/spec/bypassAutoEncryption.json @@ -196,7 +196,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -369,7 +369,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/count.json b/test/client-side-encryption/spec/count.json index 9ac5104a09..24f46a110a 100644 --- a/test/client-side-encryption/spec/count.json +++ b/test/client-side-encryption/spec/count.json @@ -156,7 +156,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -186,7 +186,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/countDocuments.json b/test/client-side-encryption/spec/countDocuments.json index d4ae0aeb46..3cf5fbca8b 100644 --- a/test/client-side-encryption/spec/countDocuments.json +++ b/test/client-side-encryption/spec/countDocuments.json @@ -157,7 +157,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -187,7 +187,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/delete.json b/test/client-side-encryption/spec/delete.json index bb9c061556..30fb453a93 100644 --- a/test/client-side-encryption/spec/delete.json +++ b/test/client-side-encryption/spec/delete.json @@ -158,7 +158,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -188,7 +188,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -283,7 +283,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -313,7 +313,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/distinct.json b/test/client-side-encryption/spec/distinct.json index c473030580..7a5f75c4a5 100644 --- a/test/client-side-encryption/spec/distinct.json +++ b/test/client-side-encryption/spec/distinct.json @@ -168,7 +168,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -198,7 +198,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/explain.json b/test/client-side-encryption/spec/explain.json index 6872cedf2b..5ad46bc238 100644 --- a/test/client-side-encryption/spec/explain.json +++ b/test/client-side-encryption/spec/explain.json @@ -162,7 +162,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -192,7 +192,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/find.json b/test/client-side-encryption/spec/find.json index 93cef311c0..b7c5258a13 100644 --- a/test/client-side-encryption/spec/find.json +++ b/test/client-side-encryption/spec/find.json @@ -167,7 +167,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -197,7 +197,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -309,7 +309,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -339,7 +339,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/findOneAndDelete.json b/test/client-side-encryption/spec/findOneAndDelete.json index 2d9f963f23..6261d8601b 100644 --- a/test/client-side-encryption/spec/findOneAndDelete.json +++ b/test/client-side-encryption/spec/findOneAndDelete.json @@ -155,7 +155,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -185,7 +185,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/findOneAndReplace.json b/test/client-side-encryption/spec/findOneAndReplace.json index 1512fb9552..d91bc05998 100644 --- a/test/client-side-encryption/spec/findOneAndReplace.json +++ b/test/client-side-encryption/spec/findOneAndReplace.json @@ -154,7 +154,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -184,7 +184,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/findOneAndUpdate.json b/test/client-side-encryption/spec/findOneAndUpdate.json index a5b41f8455..fad70609ad 100644 --- a/test/client-side-encryption/spec/findOneAndUpdate.json +++ b/test/client-side-encryption/spec/findOneAndUpdate.json @@ -156,7 +156,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -186,7 +186,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/getMore.json index 637f69d509..cf23442226 100644 --- a/test/client-side-encryption/spec/getMore.json +++ b/test/client-side-encryption/spec/getMore.json @@ -186,7 +186,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -216,7 +216,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/insert.json b/test/client-side-encryption/spec/insert.json index beb98c5eb0..78fa8feba0 100644 --- a/test/client-side-encryption/spec/insert.json +++ b/test/client-side-encryption/spec/insert.json @@ -138,7 +138,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -168,7 +168,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -265,7 +265,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -295,7 +295,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/keyAltName.json b/test/client-side-encryption/spec/keyAltName.json index 7088d0b0be..d062bed453 100644 --- a/test/client-side-encryption/spec/keyAltName.json +++ b/test/client-side-encryption/spec/keyAltName.json @@ -138,7 +138,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -163,7 +163,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/localKMS.json b/test/client-side-encryption/spec/localKMS.json index febc1ccfc8..e4d25309c4 100644 --- a/test/client-side-encryption/spec/localKMS.json +++ b/test/client-side-encryption/spec/localKMS.json @@ -121,7 +121,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -151,7 +151,7 @@ } ] }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "find" } diff --git a/test/client-side-encryption/spec/localSchema.json b/test/client-side-encryption/spec/localSchema.json index f939dbc123..7071d6fefd 100644 --- a/test/client-side-encryption/spec/localSchema.json +++ b/test/client-side-encryption/spec/localSchema.json @@ -143,7 +143,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -173,7 +173,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/missingKey.json index a7237f1792..ac8e8320b0 100644 --- a/test/client-side-encryption/spec/missingKey.json +++ b/test/client-side-encryption/spec/missingKey.json @@ -102,7 +102,7 @@ "description": "Insert with encryption on a missing key", "clientOptions": { "autoEncryptOpts": { - "keyVaultNamespace": "admin.different", + "keyVaultNamespace": "keyvault.different", "kmsProviders": { "aws": {} } @@ -147,7 +147,7 @@ "filter": { "name": "different" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -177,7 +177,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/replaceOne.json b/test/client-side-encryption/spec/replaceOne.json index 1287fdea14..5cdb3d40f0 100644 --- a/test/client-side-encryption/spec/replaceOne.json +++ b/test/client-side-encryption/spec/replaceOne.json @@ -155,7 +155,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -185,7 +185,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/types.json index 08928381e1..47e4c27a2e 100644 --- a/test/client-side-encryption/spec/types.json +++ b/test/client-side-encryption/spec/types.json @@ -110,7 +110,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -140,7 +140,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -261,7 +261,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -291,7 +291,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -412,7 +412,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -442,7 +442,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -663,7 +663,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -693,7 +693,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -814,7 +814,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -844,7 +844,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -1064,7 +1064,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -1094,7 +1094,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -1221,7 +1221,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -1251,7 +1251,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } @@ -1376,7 +1376,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -1406,7 +1406,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/updateMany.json b/test/client-side-encryption/spec/updateMany.json index 43c6dd717c..fd1f4d12bd 100644 --- a/test/client-side-encryption/spec/updateMany.json +++ b/test/client-side-encryption/spec/updateMany.json @@ -171,7 +171,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -201,7 +201,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/client-side-encryption/spec/updateOne.json b/test/client-side-encryption/spec/updateOne.json index d6a6de79e2..bed763d720 100644 --- a/test/client-side-encryption/spec/updateOne.json +++ b/test/client-side-encryption/spec/updateOne.json @@ -157,7 +157,7 @@ "filter": { "name": "datakeys" }, - "$db": "admin" + "$db": "keyvault" }, "command_name": "listCollections" } @@ -187,7 +187,7 @@ } ] }, - "$db": "admin", + "$db": "keyvault", "readConcern": { "level": "majority" } diff --git a/test/test_encryption.py b/test/test_encryption.py index 91018d74bf..0903eadca7 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -68,13 +68,13 @@ class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipIf(_HAVE_PYMONGOCRYPT, 'pymongocrypt is installed') def test_init_requires_pymongocrypt(self): with self.assertRaises(ConfigurationError): - AutoEncryptionOpts({}, 'admin.datakeys') + AutoEncryptionOpts({}, 'keyvault.datakeys') @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') def test_init(self): - opts = AutoEncryptionOpts({}, 'admin.datakeys') + opts = AutoEncryptionOpts({}, 'keyvault.datakeys') self.assertEqual(opts._kms_providers, {}) - self.assertEqual(opts._key_vault_namespace, 'admin.datakeys') + self.assertEqual(opts._key_vault_namespace, 'keyvault.datakeys') self.assertEqual(opts._key_vault_client, None) self.assertEqual(opts._schema_map, None) self.assertEqual(opts._bypass_auto_encryption, False) @@ -88,20 +88,20 @@ def test_init(self): def test_init_spawn_args(self): # User can override idleShutdownTimeoutSecs opts = AutoEncryptionOpts( - {}, 'admin.datakeys', + {}, 'keyvault.datakeys', mongocryptd_spawn_args=['--idleShutdownTimeoutSecs=88']) self.assertEqual( opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=88']) # idleShutdownTimeoutSecs is added by default opts = AutoEncryptionOpts( - {}, 'admin.datakeys', mongocryptd_spawn_args=[]) + {}, 'keyvault.datakeys', mongocryptd_spawn_args=[]) self.assertEqual( opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) # Also added when other options are given opts = AutoEncryptionOpts( - {}, 'admin.datakeys', + {}, 'keyvault.datakeys', mongocryptd_spawn_args=['--quiet', '--port=27020']) self.assertEqual( opts._mongocryptd_spawn_args, @@ -120,7 +120,7 @@ def test_default(self): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') def test_kwargs(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') client = MongoClient(auto_encryption_opts=opts, connect=False) self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) @@ -178,7 +178,7 @@ def _test_auto_encrypt(self, opts): # Create the encrypted field's data key. key_vault = create_key_vault( - self.client.admin.datakeys, + self.client.keyvault.datakeys, json_data('custom', 'key-document-local.json')) self.addCleanup(key_vault.drop) @@ -239,19 +239,19 @@ def test_auto_encrypt(self): create_with_schema(self.db.test, json_schema) self.addCleanup(self.db.test.drop) - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') self._test_auto_encrypt(opts) def test_auto_encrypt_local_schema_map(self): # Configure the encrypted field via the local schema_map option. schemas = {'pymongo_test.test': json_data('custom', 'schema.json')} opts = AutoEncryptionOpts( - KMS_PROVIDERS, 'admin.datakeys', schema_map=schemas) + KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas) self._test_auto_encrypt(opts) def test_use_after_close(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) @@ -271,7 +271,7 @@ def setUpClass(cls): @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) msg = 'Auto-encryption requires a minimum MongoDB version of 4.2' @@ -285,7 +285,7 @@ def test_raise_max_wire_version_error(self): client.test.test.bulk_write([InsertOne({})]) def test_raise_unsupported_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'admin.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) msg = 'find_raw_batches does not support auto encryption' @@ -308,10 +308,10 @@ class TestExplicitSimple(EncryptionIntegrationTest): def test_encrypt_decrypt(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) self.addCleanup(client_encryption.close) # Use standard UUID representation. - key_vault = client_context.client.admin.get_collection( + key_vault = client_context.client.keyvault.get_collection( 'datakeys', codec_options=OPTS) self.addCleanup(key_vault.drop) @@ -345,7 +345,7 @@ def test_encrypt_decrypt(self): def test_validation(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) self.addCleanup(client_encryption.close) msg = 'value to decrypt must be a bson.binary.Binary with subtype 6' @@ -363,7 +363,7 @@ def test_validation(self): def test_bson_errors(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) self.addCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. @@ -377,11 +377,11 @@ def test_bson_errors(self): def test_codec_options(self): with self.assertRaisesRegex(TypeError, 'codec_options must be'): ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, None) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) opts = CodecOptions(uuid_representation=JAVA_LEGACY) client_encryption_legacy = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, opts) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, opts) self.addCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. @@ -398,7 +398,7 @@ def test_codec_options(self): # Encrypt the same UUID with STANDARD codec options. client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, @@ -416,7 +416,7 @@ def test_codec_options(self): def test_close(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) client_encryption.close() # Close can be called multiple times. client_encryption.close() @@ -431,7 +431,7 @@ def test_close(self): def test_with_statement(self): with ClientEncryption( - KMS_PROVIDERS, 'admin.datakeys', + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) as client_encryption: pass with self.assertRaisesRegex( @@ -464,7 +464,7 @@ def parse_auto_encrypt_opts(self, opts): if not any(AWS_CREDS.values()): self.skipTest('AWS environment credentials are not set') if 'key_vault_namespace' not in opts: - opts['key_vault_namespace'] = 'admin.datakeys' + opts['key_vault_namespace'] = 'keyvault.datakeys' opts = dict(opts) return AutoEncryptionOpts(**opts) @@ -497,7 +497,7 @@ def setup_scenario(self, scenario_def): key_vault_data = scenario_def['key_vault_data'] if key_vault_data: coll = client_context.client.get_database( - 'admin', + 'keyvault', write_concern=WriteConcern(w='majority'), codec_options=OPTS)['datakeys'] coll.drop() @@ -590,7 +590,7 @@ def test_data_key(self): client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) client.db.coll.drop() - vault = create_key_vault(client.admin.datakeys) + vault = create_key_vault(client.keyvault.datakeys) self.addCleanup(vault.drop) # Configure the encrypted field via the local schema_map option. @@ -609,13 +609,13 @@ def test_data_key(self): } } opts = AutoEncryptionOpts( - self.kms_providers(), 'admin.datakeys', schema_map=schemas) + self.kms_providers(), 'keyvault.datakeys', schema_map=schemas) client_encrypted = rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation='standard') self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', client, OPTS) + self.kms_providers(), 'keyvault.datakeys', client, OPTS) self.addCleanup(client_encryption.close) # Local create data key. @@ -700,7 +700,7 @@ def kms_providers(): def _test_external_key_vault(self, with_external_key_vault): self.client.db.coll.drop() vault = create_key_vault( - self.client.admin.datakeys, + self.client.keyvault.datakeys, json_data('corpus', 'corpus-key-local.json'), json_data('corpus', 'corpus-key-aws.json')) self.addCleanup(vault.drop) @@ -714,7 +714,7 @@ def _test_external_key_vault(self, with_external_key_vault): else: key_vault_client = client_context.client opts = AutoEncryptionOpts( - self.kms_providers(), 'admin.datakeys', schema_map=schemas, + self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, key_vault_client=key_vault_client) client_encrypted = rs_or_single_client( @@ -722,7 +722,7 @@ def _test_external_key_vault(self, with_external_key_vault): self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', key_vault_client, OPTS) + self.kms_providers(), 'keyvault.datakeys', key_vault_client, OPTS) self.addCleanup(client_encryption.close) if with_external_key_vault: @@ -768,7 +768,7 @@ def test_views_are_prohibited(self): self.client.db.create_collection('view', viewOn='coll') self.addCleanup(self.client.db.view.drop) - opts = AutoEncryptionOpts(self.kms_providers(), 'admin.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys') client_encrypted = rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation='standard') self.addCleanup(client_encrypted.close) @@ -822,7 +822,7 @@ def _test_corpus(self, opts): self.addCleanup(coll.drop) vault = create_key_vault( - self.client.admin.datakeys, + self.client.keyvault.datakeys, json_data('corpus', 'corpus-key-local.json'), json_data('corpus', 'corpus-key-aws.json')) self.addCleanup(vault.drop) @@ -832,7 +832,7 @@ def _test_corpus(self, opts): self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'admin.datakeys', client_context.client, + self.kms_providers(), 'keyvault.datakeys', client_context.client, OPTS) self.addCleanup(client_encryption.close) @@ -906,7 +906,7 @@ def _test_corpus(self, opts): self.assertEqual(value['value'], corpus[key]['value'], key) def test_corpus(self): - opts = AutoEncryptionOpts(self.kms_providers(), 'admin.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys') self._test_corpus(opts) def test_corpus_local_schema(self): @@ -914,7 +914,7 @@ def test_corpus_local_schema(self): schemas = {'db.coll': self.fix_up_schema( json_data('corpus', 'corpus-schema.json'))} opts = AutoEncryptionOpts( - self.kms_providers(), 'admin.datakeys', schema_map=schemas) + self.kms_providers(), 'keyvault.datakeys', schema_map=schemas) self._test_corpus(opts) @@ -939,14 +939,14 @@ def setUpClass(cls): # Create the key vault. coll = client_context.client.get_database( - 'admin', + 'keyvault', write_concern=WriteConcern(w='majority'), codec_options=OPTS)['datakeys'] coll.drop() coll.insert_one(json_data('limits', 'limits-key.json')) opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, 'admin.datakeys') + {'local': {'key': LOCAL_MASTER_KEY}}, 'keyvault.datakeys') cls.listener = OvertCommandListener() cls.client_encrypted = rs_or_single_client( auto_encryption_opts=opts, event_listeners=[cls.listener]) @@ -1033,7 +1033,7 @@ class TestCustomEndpoint(EncryptionIntegrationTest): def setUpClass(cls): super(TestCustomEndpoint, cls).setUpClass() cls.client_encryption = ClientEncryption( - {'aws': AWS_CREDS}, 'admin.datakeys', client_context.client, OPTS) + {'aws': AWS_CREDS}, 'keyvault.datakeys', client_context.client, OPTS) def _test_create_data_key(self, master_key): data_key_id = self.client_encryption.create_data_key( From 8256af6ed92edba70cb5dd12138b12692f74c05c Mon Sep 17 00:00:00 2001 From: Pavel Alimpiev <7024354+VaultVulp@users.noreply.github.com> Date: Mon, 20 Apr 2020 19:48:47 +0300 Subject: [PATCH 0111/2111] Update Documentation link in the readme (#440) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 792861251c..1228e9d336 100644 --- a/README.rst +++ b/README.rst @@ -204,7 +204,7 @@ Here's a basic example (for more see the *examples* section of the docs): Documentation ============= -Documentation is available at [pymongo.readthedocs.io](https://pymongo.readthedocs.io/en/stable/). +Documentation is available at `pymongo.readthedocs.io `_. To build the documentation, you will need to install sphinx_. Documentation can be generated by running **python From 463d759ddcfaecb9095df6b4fc9923dff89e528b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 17 Apr 2020 17:00:02 -0700 Subject: [PATCH 0112/2111] PYTHON-2116 Add __repr__ to monitoring events and description classes --- pymongo/monitoring.py | 48 ++++++++++++++++++ pymongo/server.py | 7 +-- pymongo/server_description.py | 8 +++ pymongo/topology.py | 6 +++ pymongo/topology_description.py | 5 ++ test/test_monitoring.py | 86 +++++++++++++++++++++++++++++++++ test/test_server_description.py | 6 +++ test/test_topology.py | 22 ++++++--- 8 files changed, 177 insertions(+), 11 deletions(-) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 103d0d3ee0..25b0e7c53f 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -561,6 +561,11 @@ def database_name(self): """The name of the database this command was run against.""" return self.__db + def __repr__(self): + return "<%s %s db: %r, command: %r, operation_id: %s>" % ( + self.__class__.__name__, self.connection_id, self.database_name, + self.command_name, self.operation_id) + class CommandSucceededEvent(_CommandEvent): """Event published when a command succeeds. @@ -596,6 +601,11 @@ def reply(self): """The server failure document for this operation.""" return self.__reply + def __repr__(self): + return "<%s %s command: %r, operation_id: %s, duration_micros: %s>" % ( + self.__class__.__name__, self.connection_id, + self.command_name, self.operation_id, self.duration_micros) + class CommandFailedEvent(_CommandEvent): """Event published when a command fails. @@ -626,6 +636,13 @@ def failure(self): """The server failure document for this operation.""" return self.__failure + def __repr__(self): + return ( + "<%s %s command: %r, operation_id: %s, duration_micros: %s, " + "failure: %r>" % ( + self.__class__.__name__, self.connection_id, self.command_name, + self.operation_id, self.duration_micros, self.failure)) + class _PoolEvent(object): """Base class for pool events.""" @@ -928,6 +945,10 @@ def topology_id(self): """A unique identifier for the topology this server is a part of.""" return self.__topology_id + def __repr__(self): + return "<%s %s topology_id: %s>" % ( + self.__class__.__name__, self.server_address, self.topology_id) + class ServerDescriptionChangedEvent(_ServerEvent): """Published when server description changes. @@ -954,6 +975,11 @@ def new_description(self): :class:`~pymongo.server_description.ServerDescription`.""" return self.__new_description + def __repr__(self): + return "<%s %s changed from: %s, to: %s>" % ( + self.__class__.__name__, self.server_address, + self.previous_description, self.new_description) + class ServerOpeningEvent(_ServerEvent): """Published when server is initialized. @@ -986,6 +1012,10 @@ def topology_id(self): """A unique identifier for the topology this server is a part of.""" return self.__topology_id + def __repr__(self): + return "<%s topology_id: %s>" % ( + self.__class__.__name__, self.topology_id) + class TopologyDescriptionChangedEvent(TopologyEvent): """Published when the topology description changes. @@ -1012,6 +1042,11 @@ def new_description(self): :class:`~pymongo.topology_description.TopologyDescription`.""" return self.__new_description + def __repr__(self): + return "<%s topology_id: %s changed from: %s, to: %s>" % ( + self.__class__.__name__, self.topology_id, + self.previous_description, self.new_description) + class TopologyOpenedEvent(TopologyEvent): """Published when the topology is initialized. @@ -1045,6 +1080,9 @@ def connection_id(self): to.""" return self.__connection_id + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.connection_id) + class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): """Published when a heartbeat is started. @@ -1078,6 +1116,11 @@ def reply(self): """An instance of :class:`~pymongo.ismaster.IsMaster`.""" return self.__reply + def __repr__(self): + return "<%s %s duration: %s, reply: %s>" % ( + self.__class__.__name__, self.connection_id, + self.duration, self.reply) + class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): """Fired when the server heartbeat fails, either with an "ok: 0" @@ -1103,6 +1146,11 @@ def reply(self): """A subclass of :exc:`Exception`.""" return self.__reply + def __repr__(self): + return "<%s %s duration: %s, reply: %r>" % ( + self.__class__.__name__, self.connection_id, + self.duration, self.reply) + class _EventListeners(object): """Configure event listeners for a client instance. diff --git a/pymongo/server.py b/pymongo/server.py index b1473dfe90..18919b9e2e 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -225,8 +225,5 @@ def _split_message(self, message): request_id, data = message return request_id, data, 0 - def __str__(self): - d = self._description - return '' % ( - d.address[0], d.address[1], - SERVER_TYPE._fields[d.server_type]) + def __repr__(self): + return '<%s %r>' % (self.__class__.__name__, self._description) diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 4f4dcdae8f..2960a1622f 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -228,5 +228,13 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __repr__(self): + errmsg = '' + if self.error: + errmsg = ', error=%r' % (self.error,) + return "<%s %s server_type: %s, rtt: %s%s>" % ( + self.__class__.__name__, self.address, self.server_type_name, + self.round_trip_time, errmsg) + # For unittesting only. Use under no circumstances! _host_to_round_trip_time = {} diff --git a/pymongo/topology.py b/pymongo/topology.py index d43083b7ea..de446bdef1 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -686,3 +686,9 @@ def _error_message(self, selector): else: return ','.join(str(server.error) for server in servers if server.error) + + def __repr__(self): + msg = '' + if not self._opened: + msg = 'CLOSED ' + return '<%s %s%r>' % (self.__class__.__name__, msg, self._description) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b3b912f8de..0362881463 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -288,6 +288,11 @@ def has_writable_server(self): """ return self.has_readable_server(ReadPreference.PRIMARY) + def __repr__(self): + return "<%s id: %s, topology_type: %s, servers: %r>" % ( + self.__class__.__name__, self._topology_settings._topology_id, + self.topology_type_name, list(self._server_descriptions.values())) + # If topology type is Unknown and we receive an ismaster response, what should # the new topology type be? diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 2e16e1c9a4..44d924f798 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1347,5 +1347,91 @@ def test_simple(self): self.assertTrue(isinstance(started.request_id, int)) +class TestEventClasses(PyMongoTestCase): + + def test_command_event_repr(self): + request_id, connection_id, operation_id = 1, ('localhost', 27017), 2 + event = monitoring.CommandStartedEvent( + {'isMaster': 1}, 'admin', request_id, connection_id, operation_id) + self.assertEqual( + repr(event), + "") + delta = datetime.timedelta(milliseconds=100) + event = monitoring.CommandSucceededEvent( + delta, {'ok': 1}, 'isMaster', request_id, connection_id, + operation_id) + self.assertEqual( + repr(event), + "") + event = monitoring.CommandFailedEvent( + delta, {'ok': 0}, 'isMaster', request_id, connection_id, + operation_id) + self.assertEqual( + repr(event), + "") + + def test_server_heartbeat_event_repr(self): + connection_id = ('localhost', 27017) + event = monitoring.ServerHeartbeatStartedEvent(connection_id) + self.assertEqual( + repr(event), + "") + delta = 0.1 + event = monitoring.ServerHeartbeatSucceededEvent( + delta, {'ok': 1}, connection_id) + self.assertEqual( + repr(event), + "") + event = monitoring.ServerHeartbeatFailedEvent( + delta, 'ERROR', connection_id) + self.assertEqual( + repr(event), + "") + + def test_server_event_repr(self): + server_address = ('localhost', 27017) + topology_id = ObjectId('000000000000000000000001') + event = monitoring.ServerOpeningEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "") + event = monitoring.ServerDescriptionChangedEvent( + 'PREV', 'NEW', server_address, topology_id) + self.assertEqual( + repr(event), + "") + event = monitoring.ServerClosedEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "") + + def test_topology_event_repr(self): + topology_id = ObjectId('000000000000000000000001') + event = monitoring.TopologyOpenedEvent(topology_id) + self.assertEqual( + repr(event), + "") + event = monitoring.TopologyDescriptionChangedEvent( + 'PREV', 'NEW', topology_id) + self.assertEqual( + repr(event), + "") + event = monitoring.TopologyClosedEvent(topology_id) + self.assertEqual( + repr(event), + "") + + if __name__ == "__main__": unittest.main() diff --git a/test/test_server_description.py b/test/test_server_description.py index a79b9875e7..0aa89dd8f4 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -160,6 +160,12 @@ def test_all_hosts(self): [('a', 27017), ('b', 27018), ('c', 27017)], sorted(s.all_hosts)) + def test_repr(self): + s = parse_ismaster_response({'ok': 1, 'msg': 'isdbgrid'}) + self.assertEqual(repr(s), + "") + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index 99f4de9187..f8217be2c9 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -114,12 +114,6 @@ def setUp(self): self.addCleanup(self.client_knobs.disable) -# Use assertRaisesRegex if available, otherwise use Python 2.7's -# deprecated assertRaisesRegexp, with a 'p'. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - TopologyTest.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - class TestTopologyConfiguration(TopologyTest): def test_timeout_configuration(self): pool_options = PoolOptions(connect_timeout=1, socket_timeout=2) @@ -623,6 +617,22 @@ def write_batch_size(): self.assertEqual(2, write_batch_size()) + def test_topology_repr(self): + t = create_mock_topology(replica_set_name='rs') + self.addCleanup(t.close) + got_ismaster(t, ('a', 27017), { + 'ok': 1, + 'ismaster': True, + 'setName': 'rs', + 'hosts': ['a', 'b']}) + self.assertEqual( + repr(t.description), + ", " + "]>" % (t._topology_id,)) + def wait_for_master(topology): """Wait for a Topology to discover a writable server. From 4398b4b7da05a08a53e51ab86587795cbe886d7e Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 20 Apr 2020 12:24:18 -0700 Subject: [PATCH 0113/2111] PYTHON-2201 Deprecate the Collection.reindex method --- doc/changelog.rst | 3 +++ pymongo/collection.py | 24 ++++++++++++++++++++---- test/__init__.py | 4 +++- test/test_legacy_api.py | 3 +++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ea89761a82..a058e75178 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -38,6 +38,9 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the server optimizes queries against the oplog collection without requiring the user to set this flag. +- Deprecated :meth:`pymongo.collection.Collection.reindex`. Use + :meth:`~pymongo.database.Database.command` to run the ``reIndex`` command + instead. .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ diff --git a/pymongo/collection.py b/pymongo/collection.py index eba872f95b..0778d0086c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2178,6 +2178,15 @@ def drop_index(self, index_or_name, session=None, **kwargs): def reindex(self, session=None, **kwargs): """Rebuilds all indexes on this collection. + **DEPRECATED** - The :meth:`~reindex` method is deprecated and will be + removed in PyMongo 4.0. Use :meth:`~pymongo.database.Database.command` + to run the ``reIndex`` command directly instead:: + + db.command({"reIndex": ""}) + + .. note:: Starting in MongoDB 4.6, the `reIndex` command can only be + run when connected to a standalone mongod. + :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. @@ -2188,19 +2197,26 @@ def reindex(self, session=None, **kwargs): are built in the foreground) and will be slow for large collections. + .. versionchanged:: 3.11 + Deprecated. + .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword arguments. - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - .. versionchanged:: 3.5 We no longer apply this collection's write concern to this operation. MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns an error if we include the write concern. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. """ + warnings.warn("The reindex method is deprecated and will be removed in " + "PyMongo 4.0. Use the Database.command method to run the " + "reIndex command instead.", + DeprecationWarning, stacklevel=2) cmd = SON([("reIndex", self.__name)]) cmd.update(kwargs) with self._socket_for_writes(session) as sock_info: diff --git a/test/__init__.py b/test/__init__.py index 5943d968f1..54295ff633 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -671,7 +671,9 @@ def mongos_seeds(self): @property def supports_reindex(self): """Does the connected server support reindex?""" - return not (self.version.at_least(4, 1, 0) and self.is_mongos) + return not ((self.version.at_least(4, 1, 0) and self.is_mongos) or + (self.version.at_least(4, 5, 0) and ( + self.is_mongos or self.is_rs))) @property def supports_getpreverror(self): diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 01c4d4ae01..9a0b162c2f 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -105,6 +105,9 @@ def test_ensure_index_deprecation(self): finally: self.db.test.drop() + def test_reindex_deprecation(self): + self.assertRaises(DeprecationWarning, lambda: self.db.test.reindex()) + class TestLegacy(IntegrationTest): From 250364f6087654f2987fde18e5a2a770f89c6a4f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 23 Apr 2020 16:11:28 -0700 Subject: [PATCH 0114/2111] PYTHON-2116 Sort servers by address in topology repr --- pymongo/topology_description.py | 5 ++++- test/test_topology.py | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 0362881463..3529cfd093 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -289,9 +289,12 @@ def has_writable_server(self): return self.has_readable_server(ReadPreference.PRIMARY) def __repr__(self): + # Sort the servers by address. + servers = sorted(self._server_descriptions.values(), + key=lambda sd: sd.address) return "<%s id: %s, topology_type: %s, servers: %r>" % ( self.__class__.__name__, self._topology_settings._topology_id, - self.topology_type_name, list(self._server_descriptions.values())) + self.topology_type_name, servers) # If topology type is Unknown and we receive an ismaster response, what should diff --git a/test/test_topology.py b/test/test_topology.py index f8217be2c9..c4a785af6a 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -624,13 +624,15 @@ def test_topology_repr(self): 'ok': 1, 'ismaster': True, 'setName': 'rs', - 'hosts': ['a', 'b']}) + 'hosts': ['a', 'c', 'b']}) self.assertEqual( repr(t.description), ", " ", " + "]>" % (t._topology_id,)) From 7809376a46dcb88e83a7e10188756b28882fc505 Mon Sep 17 00:00:00 2001 From: Wan Bachtiar Date: Tue, 28 Apr 2020 04:54:02 +1000 Subject: [PATCH 0115/2111] PYTHON-2208 Update community links to be more specific (#441) --- README.rst | 4 ++-- doc/index.rst | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index 1228e9d336..0433b9c80e 100644 --- a/README.rst +++ b/README.rst @@ -22,10 +22,10 @@ Support / Feedback ================== For issues with, questions about, or feedback for PyMongo, please look into -our `support channels `_. Please +our `support channels `_. Please do not email any of the PyMongo developers directly with issues or questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. +Forums `_. Bugs / Feature Requests ======================= diff --git a/doc/index.rst b/doc/index.rst index d649884c4b..caa9ae8ced 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -54,9 +54,9 @@ everything you need to know to use **PyMongo**. Getting Help ------------ If you're having trouble or have questions about PyMongo, ask your question in -our `MongoDB Community Forums `_. Once you -get an answer, it'd be great if you could work it back into this documentation -and contribute! +our `MongoDB Community Forums `_. +Once you get an answer, it'd be great if you could work it back into this +documentation and contribute! Issues ------ From 58317a4c098d178628bdd620e01d1639b28718b6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 27 Apr 2020 15:47:26 -0700 Subject: [PATCH 0116/2111] PYTHON-2215 Fix failing partialFilterExpression test on 4.5-latest --- test/test_collection.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/test_collection.py b/test/test_collection.py index acd2c2c1a1..8acd63d7b3 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -619,11 +619,6 @@ def test_index_filter(self): partialFilterExpression={"x": {"$asdasd": 3}}) self.assertRaises(OperationFailure, db.test.create_index, "x", partialFilterExpression={"$and": 5}) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={ - "$and": [{"$and": [{"x": {"$lt": 2}}, - {"x": {"$gt": 0}}]}, - {"x": {"$exists": True}}]}) self.assertEqual("x_1", db.test.create_index( [('x', ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}})) From 9cc3652ec33d0c86b8858336bcb209ead78f6467 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 27 Apr 2020 18:04:38 -0700 Subject: [PATCH 0117/2111] PYTHON-2186 create_indexes raises an error with commitQuorum on <4.4 --- pymongo/collection.py | 6 ++++++ test/test_collection.py | 14 ++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/pymongo/collection.py b/pymongo/collection.py index 0778d0086c..2865bce091 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1900,6 +1900,7 @@ def create_indexes(self, indexes, session=None, **kwargs): names = [] with self._socket_for_writes(session) as sock_info: supports_collations = sock_info.max_wire_version >= 5 + supports_quorum = sock_info.max_wire_version >= 9 def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): @@ -1916,6 +1917,11 @@ def gen_indexes(): cmd = SON([('createIndexes', self.name), ('indexes', list(gen_indexes()))]) cmd.update(kwargs) + if 'commitQuorum' in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes") + self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, diff --git a/test/test_collection.py b/test/test_collection.py index 8acd63d7b3..0e2eb53d9c 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -234,6 +234,20 @@ def test_create_indexes(self): with self.write_concern_collection() as coll: coll.create_indexes([IndexModel('hello')]) + @client_context.require_version_max(4, 3, -1) + def test_create_indexes_commitQuorum_requires_44(self): + db = self.db + with self.assertRaisesRegex( + ConfigurationError, + 'Must be connected to MongoDB 4\.4\+ to use the commitQuorum ' + 'option for createIndexes'): + db.coll.create_indexes([IndexModel('a')], commitQuorum="majority") + + @client_context.require_no_standalone + @client_context.require_version_min(4, 4, -1) + def test_create_indexes_commitQuorum(self): + self.db.coll.create_indexes([IndexModel('a')], commitQuorum="majority") + def test_create_index(self): db = self.db From 71d1227932fba78f9e500e9f92c15255cb908b53 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 27 Apr 2020 13:29:42 -0700 Subject: [PATCH 0118/2111] PYTHON-2115 Remove threading.Lock() from SocketChecker --- pymongo/pool.py | 8 ++++++-- pymongo/socket_checker.py | 18 +++++++----------- test/test_pooling.py | 22 +--------------------- 3 files changed, 14 insertions(+), 34 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 8bd6602cc9..972a8c1db7 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -487,6 +487,7 @@ def __init__(self, sock, pool, address, id): self.enabled_for_cmap = pool.enabled_for_cmap self.compression_settings = pool.opts.compression_settings self.compression_context = None + self.socket_checker = SocketChecker() # The pool's generation changes with each reset() so we can close # sockets created before the last reset. @@ -752,6 +753,10 @@ def close_socket(self, reason): self.listeners.publish_connection_closed( self.address, self.id, reason) + def socket_closed(self): + """Return True if we know socket has been closed, False otherwise.""" + return self.socket_checker.socket_closed(self.sock) + def send_cluster_time(self, command, session, client): """Add cluster time for MongoDB >= 3.6.""" if self.max_wire_version >= 6 and client: @@ -976,7 +981,6 @@ def __init__(self, address, options, handshake=True): self._socket_semaphore = thread_util.create_semaphore( self.opts.max_pool_size, max_waiters) - self.socket_checker = SocketChecker() if self.enabled_for_cmap: self.opts.event_listeners.publish_pool_created( self.address, self.opts.non_default_options) @@ -1244,7 +1248,7 @@ def _perished(self, sock_info): if (self._check_interval_seconds is not None and ( 0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds)): - if self.socket_checker.socket_closed(sock_info.sock): + if sock_info.socket_closed(): sock_info.close_socket(ConnectionClosedReason.ERROR) return True diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 9b21e69d2d..93d6cb5a42 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -16,7 +16,6 @@ import errno import select -import threading _HAVE_POLL = hasattr(select, "poll") _SelectError = getattr(select, "error", OSError) @@ -34,10 +33,8 @@ class SocketChecker(object): def __init__(self): if _HAVE_POLL: - self._lock = threading.Lock() self._poller = select.poll() else: - self._lock = None self._poller = None def select(self, sock, read=False, write=False, timeout=0): @@ -50,14 +47,13 @@ def select(self, sock, read=False, write=False, timeout=0): mask = mask | select.POLLIN | select.POLLPRI if write: mask = mask | select.POLLOUT - with self._lock: - self._poller.register(sock, mask) - try: - # poll() timeout is in milliseconds. select() - # timeout is in seconds. - res = self._poller.poll(timeout * 1000) - finally: - self._poller.unregister(sock) + self._poller.register(sock, mask) + try: + # poll() timeout is in milliseconds. select() + # timeout is in seconds. + res = self._poller.poll(timeout * 1000) + finally: + self._poller.unregister(sock) else: rlist = [sock] if read else [] wlist = [sock] if write else [] diff --git a/test/test_pooling.py b/test/test_pooling.py index bfe5abc110..8ed4068a6f 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -236,8 +236,7 @@ def test_pool_removes_dead_socket(self): # Simulate a closed socket without telling the SocketInfo it's # closed. sock_info.sock.close() - self.assertTrue( - cx_pool.socket_checker.socket_closed(sock_info.sock)) + self.assertTrue(sock_info.socket_closed()) with cx_pool.get_socket({}) as new_sock_info: self.assertEqual(0, len(cx_pool.sockets)) @@ -257,25 +256,6 @@ def test_socket_closed(self): s.close() self.assertTrue(socket_checker.socket_closed(s)) - def test_socket_closed_thread_safe(self): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.connect((client_context.host, client_context.port)) - self.addCleanup(s.close) - socket_checker = SocketChecker() - - def check_socket(): - for _ in range(1000): - self.assertFalse(socket_checker.socket_closed(s)) - - threads = [] - for i in range(3): - thread = threading.Thread(target=check_socket) - thread.start() - threads.append(thread) - - for thread in threads: - thread.join() - def test_return_socket_after_reset(self): pool = self.create_pool() with pool.get_socket({}) as sock: From 4c727fd9c0815583c2f827ec4393ba66cf40129a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 23 Apr 2020 10:11:26 -0700 Subject: [PATCH 0119/2111] PYTHON-2158 Support mechanism negotiation on the connection handshake --- pymongo/auth.py | 16 ++++++---- pymongo/ismaster.py | 12 +++++++ pymongo/mongo_client.py | 2 +- pymongo/pool.py | 70 ++++++++++++++++++++++++----------------- pymongo/topology.py | 4 +-- test/test_client.py | 3 +- test/utils.py | 2 +- 7 files changed, 69 insertions(+), 40 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index b52c6b0aff..f37a0b4e58 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -562,12 +562,16 @@ def _authenticate_mongo_cr(credentials, sock_info): def _authenticate_default(credentials, sock_info): if sock_info.max_wire_version >= 7: - source = credentials.source - cmd = SON([ - ('ismaster', 1), - ('saslSupportedMechs', source + '.' + credentials.username)]) - mechs = sock_info.command( - source, cmd, publish_events=False).get('saslSupportedMechs', []) + if credentials in sock_info.negotiated_mechanisms: + mechs = sock_info.negotiated_mechanisms[credentials] + else: + source = credentials.source + cmd = SON([ + ('ismaster', 1), + ('saslSupportedMechs', source + '.' + credentials.username)]) + mechs = sock_info.command( + source, cmd, publish_events=False).get( + 'saslSupportedMechs', []) if 'SCRAM-SHA-256' in mechs: return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-256') else: diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index e723ff0a93..e2afec307c 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -156,3 +156,15 @@ def last_write_date(self): @property def compressors(self): return self._doc.get('compression') + + @property + def sasl_supported_mechs(self): + """Supported authentication mechanisms for the current user. + + For example:: + + >>> ismaster.sasl_supported_mechs + ["SCRAM-SHA-1", "SCRAM-SHA-256"] + + """ + return self._doc.get('saslSupportedMechs', []) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ac00a38e8c..694178805a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1751,7 +1751,7 @@ def _process_periodic_tasks(self): maintain connection pool parameters.""" self._process_kill_cursors() try: - self._topology.update_pool() + self._topology.update_pool(self.__all_credentials) except Exception: helpers._handle_exception() diff --git a/pymongo/pool.py b/pymongo/pool.py index 972a8c1db7..b255af2203 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -458,6 +458,16 @@ def metadata(self): return self.__metadata.copy() +def _negotiate_creds(all_credentials): + """Return one credential that needs mechanism negotiation, if any. + """ + if all_credentials: + for creds in all_credentials.values(): + if creds.mechanism == 'DEFAULT' and creds.username: + return creds + return None + + class SocketInfo(object): """Store a socket with some metadata. @@ -488,13 +498,16 @@ def __init__(self, sock, pool, address, id): self.compression_settings = pool.opts.compression_settings self.compression_context = None self.socket_checker = SocketChecker() + # Support for mechanism negotiation on the initial handshake. + # Maps credential to saslSupportedMechs. + self.negotiated_mechanisms = {} # The pool's generation changes with each reset() so we can close # sockets created before the last reset. self.generation = pool.generation self.ready = False - def ismaster(self, metadata, cluster_time): + def ismaster(self, metadata, cluster_time, all_credentials=None): cmd = SON([('ismaster', 1)]) if not self.performed_handshake: cmd['client'] = metadata @@ -504,6 +517,12 @@ def ismaster(self, metadata, cluster_time): if self.max_wire_version >= 6 and cluster_time is not None: cmd['$clusterTime'] = cluster_time + # XXX: Simplify in PyMongo 4.0 when all_credentials is always a single + # unchangeable value per MongoClient. + creds = _negotiate_creds(all_credentials) + if creds: + cmd['saslSupportedMechs'] = creds.source + '.' + creds.username + ismaster = IsMaster(self.command('admin', cmd, publish_events=False)) self.is_writable = ismaster.is_writable self.max_wire_version = ismaster.max_wire_version @@ -520,6 +539,8 @@ def ismaster(self, metadata, cluster_time): self.performed_handshake = True self.op_msg_enabled = ismaster.max_wire_version >= 6 + if creds: + self.negotiated_mechanisms[creds] = ismaster.sasl_supported_mechs return ismaster def command(self, dbname, spec, slave_ok=False, @@ -701,8 +722,7 @@ def check_auth(self, all_credentials): self.authset.discard(credentials) for credentials in cached - authset: - auth.authenticate(credentials, self) - self.authset.add(credentials) + self.authenticate(credentials) # CMAP spec says to publish the ready event only after authenticating # the connection. @@ -721,6 +741,8 @@ def authenticate(self, credentials): """ auth.authenticate(credentials, self) self.authset.add(credentials) + # negotiated_mechanisms are no longer needed. + self.negotiated_mechanisms.pop(credentials, None) def validate_session(self, client, session): """Validate this session before use with client. @@ -1026,7 +1048,7 @@ def reset(self): def close(self): self._reset(close=True) - def remove_stale_sockets(self, reference_generation): + def remove_stale_sockets(self, reference_generation, all_credentials): """Removes stale sockets then adds new ones if pool is too small and has not been reset. The `reference_generation` argument specifies the `generation` at the point in time this operation was requested on the @@ -1050,7 +1072,7 @@ def remove_stale_sockets(self, reference_generation): if not self._socket_semaphore.acquire(False): break try: - sock_info = self.connect() + sock_info = self.connect(all_credentials) with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. @@ -1061,7 +1083,7 @@ def remove_stale_sockets(self, reference_generation): finally: self._socket_semaphore.release() - def connect(self): + def connect(self, all_credentials=None): """Connect to Mongo and return a new SocketInfo. Can raise ConnectionFailure or CertificateError. @@ -1081,9 +1103,6 @@ def connect(self): try: sock = _configured_socket(self.address, self.opts) except socket.error as error: - if sock is not None: - sock.close() - if self.enabled_for_cmap: listeners.publish_connection_closed( self.address, conn_id, ConnectionClosedReason.ERROR) @@ -1092,7 +1111,7 @@ def connect(self): sock_info = SocketInfo(sock, self, self.address, conn_id) if self.handshake: - sock_info.ismaster(self.opts.metadata, None) + sock_info.ismaster(self.opts.metadata, None, all_credentials) self.is_writable = sock_info.is_writable return sock_info @@ -1123,29 +1142,23 @@ def get_socket(self, all_credentials, checkout=False): listeners = self.opts.event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) - # First get a socket, then attempt authentication. Simplifies - # semaphore management in the face of network errors during auth. - sock_info = self._get_socket_no_auth() - checked_auth = False + + sock_info = self._get_socket(all_credentials) + + if self.enabled_for_cmap: + listeners.publish_connection_checked_out( + self.address, sock_info.id) try: - sock_info.check_auth(all_credentials) - checked_auth = True - if self.enabled_for_cmap: - listeners.publish_connection_checked_out( - self.address, sock_info.id) yield sock_info except: # Exception in caller. Decrement semaphore. - self.return_socket(sock_info, publish_checkin=checked_auth) - if self.enabled_for_cmap and not checked_auth: - self.opts.event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) + self.return_socket(sock_info) raise else: if not checkout: self.return_socket(sock_info) - def _get_socket_no_auth(self): + def _get_socket(self, all_credentials): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of @@ -1177,10 +1190,11 @@ def _get_socket_no_auth(self): sock_info = self.sockets.popleft() except IndexError: # Can raise ConnectionFailure or CertificateError. - sock_info = self.connect() + sock_info = self.connect(all_credentials) else: if self._perished(sock_info): sock_info = None + sock_info.check_auth(all_credentials) except Exception: self._socket_semaphore.release() with self.lock: @@ -1193,16 +1207,14 @@ def _get_socket_no_auth(self): return sock_info - def return_socket(self, sock_info, publish_checkin=True): + def return_socket(self, sock_info): """Return the socket to the pool, or if it's closed discard it. :Parameters: - `sock_info`: The socket to check into the pool. - - `publish_checkin`: If False, a ConnectionCheckedInEvent will not - be published. """ listeners = self.opts.event_listeners - if self.enabled_for_cmap and publish_checkin: + if self.enabled_for_cmap: listeners.publish_connection_checked_in(self.address, sock_info.id) if self.pid != os.getpid(): self.reset() diff --git a/pymongo/topology.py b/pymongo/topology.py index de446bdef1..62ce0cbc2d 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -430,7 +430,7 @@ def mark_server_unknown_and_request_check(self, address, error): self._reset_server(address, reset_pool=False, error=error) self._request_check(address) - def update_pool(self): + def update_pool(self, all_credentials): # Remove any stale sockets and add new sockets if pool is too small. servers = [] with self._lock: @@ -438,7 +438,7 @@ def update_pool(self): servers.append((server, server._pool.generation)) for server, generation in servers: - server._pool.remove_stale_sockets(generation) + server._pool.remove_stale_sockets(generation, all_credentials) def close(self): """Clear pools and terminate monitors. Topology reopens on demand.""" diff --git a/test/test_client.py b/test/test_client.py index 3ce5b9e41a..0540786c54 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1487,7 +1487,8 @@ def run(self): try: while True: for _ in range(10): - client._topology.update_pool() + client._topology.update_pool( + client._MongoClient__all_credentials) if generation != pool.generation: break finally: diff --git a/test/utils.py b/test/utils.py index bd5cea79ed..7b5bb7fa5f 100644 --- a/test/utils.py +++ b/test/utils.py @@ -230,7 +230,7 @@ def close(self): def update_is_writable(self, is_writable): pass - def remove_stale_sockets(self, reference_generation): + def remove_stale_sockets(self, *args, **kwargs): pass From 7099e1be8bbbb9520bf58378f269657c2a1961ad Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 13 Apr 2020 11:18:45 -0700 Subject: [PATCH 0120/2111] PYTHON-2199 Reduce race conditions in SDAM error handling Use Pool.generation and topologyVersion to reduce race conditions SDAM error handling. Implement SDAM error handling spec tests. --- pymongo/database.py | 4 +- pymongo/ismaster.py | 4 + pymongo/mongo_client.py | 69 ++--- pymongo/server_description.py | 16 +- pymongo/topology.py | 158 ++++++++--- pymongo/topology_description.py | 8 +- test/barrier.py | 193 +++++++++++++ .../errors/error_handling_handshake.json | 112 ++++++++ .../errors/non-stale-network-error.json | 79 ++++++ .../non-stale-network-timeout-error.json | 87 ++++++ ...Version-greater-InterruptedAtShutdown.json | 99 +++++++ ...eater-InterruptedDueToReplStateChange.json | 99 +++++++ ...ale-topologyVersion-greater-NotMaster.json | 99 +++++++ ...ogyVersion-greater-NotMasterNoSlaveOk.json | 99 +++++++ ...yVersion-greater-NotMasterOrSecondary.json | 99 +++++++ ...ogyVersion-greater-PrimarySteppedDown.json | 99 +++++++ ...ogyVersion-greater-ShutdownInProgress.json | 99 +++++++ ...Version-missing-InterruptedAtShutdown.json | 84 ++++++ ...ssing-InterruptedDueToReplStateChange.json | 84 ++++++ ...ale-topologyVersion-missing-NotMaster.json | 84 ++++++ ...ogyVersion-missing-NotMasterNoSlaveOk.json | 84 ++++++ ...yVersion-missing-NotMasterOrSecondary.json | 84 ++++++ ...ogyVersion-missing-PrimarySteppedDown.json | 84 ++++++ ...ogyVersion-missing-ShutdownInProgress.json | 84 ++++++ ...ccessId-changed-InterruptedAtShutdown.json | 99 +++++++ ...anged-InterruptedDueToReplStateChange.json | 99 +++++++ ...yVersion-proccessId-changed-NotMaster.json | 99 +++++++ ...proccessId-changed-NotMasterNoSlaveOk.json | 99 +++++++ ...occessId-changed-NotMasterOrSecondary.json | 99 +++++++ ...proccessId-changed-PrimarySteppedDown.json | 99 +++++++ ...proccessId-changed-ShutdownInProgress.json | 99 +++++++ .../errors/pre-42-InterruptedAtShutdown.json | 69 +++++ ...re-42-InterruptedDueToReplStateChange.json | 69 +++++ .../errors/pre-42-NotMaster.json | 69 +++++ .../errors/pre-42-NotMasterNoSlaveOk.json | 69 +++++ .../errors/pre-42-NotMasterOrSecondary.json | 69 +++++ .../errors/pre-42-PrimarySteppedDown.json | 69 +++++ .../errors/pre-42-ShutdownInProgress.json | 69 +++++ ...tale-generation-InterruptedAtShutdown.json | 174 ++++++++++++ ...ation-InterruptedDueToReplStateChange.json | 174 ++++++++++++ .../errors/stale-generation-NotMaster.json | 174 ++++++++++++ .../stale-generation-NotMasterNoSlaveOk.json | 174 ++++++++++++ ...stale-generation-NotMasterOrSecondary.json | 174 ++++++++++++ .../stale-generation-PrimarySteppedDown.json | 174 ++++++++++++ .../stale-generation-ShutdownInProgress.json | 174 ++++++++++++ ...dshakeCompletes-InterruptedAtShutdown.json | 174 ++++++++++++ ...letes-InterruptedDueToReplStateChange.json | 174 ++++++++++++ ...ion-afterHandshakeCompletes-NotMaster.json | 174 ++++++++++++ ...HandshakeCompletes-NotMasterNoSlaveOk.json | 174 ++++++++++++ ...ndshakeCompletes-NotMasterOrSecondary.json | 174 ++++++++++++ ...HandshakeCompletes-PrimarySteppedDown.json | 174 ++++++++++++ ...HandshakeCompletes-ShutdownInProgress.json | 174 ++++++++++++ ...ation-afterHandshakeCompletes-network.json | 161 +++++++++++ ...ation-afterHandshakeCompletes-timeout.json | 161 +++++++++++ ...dshakeCompletes-InterruptedAtShutdown.json | 174 ++++++++++++ ...letes-InterruptedDueToReplStateChange.json | 174 ++++++++++++ ...on-beforeHandshakeCompletes-NotMaster.json | 174 ++++++++++++ ...HandshakeCompletes-NotMasterNoSlaveOk.json | 174 ++++++++++++ ...ndshakeCompletes-NotMasterOrSecondary.json | 174 ++++++++++++ ...HandshakeCompletes-PrimarySteppedDown.json | 174 ++++++++++++ ...HandshakeCompletes-ShutdownInProgress.json | 174 ++++++++++++ ...tion-beforeHandshakeCompletes-network.json | 161 +++++++++++ ...tion-beforeHandshakeCompletes-timeout.json | 161 +++++++++++ ...topologyVersion-InterruptedAtShutdown.json | 146 ++++++++++ ...rsion-InterruptedDueToReplStateChange.json | 146 ++++++++++ .../stale-topologyVersion-NotMaster.json | 146 ++++++++++ ...le-topologyVersion-NotMasterNoSlaveOk.json | 146 ++++++++++ ...-topologyVersion-NotMasterOrSecondary.json | 146 ++++++++++ ...le-topologyVersion-PrimarySteppedDown.json | 146 ++++++++++ ...le-topologyVersion-ShutdownInProgress.json | 146 ++++++++++ .../rs/topology_version_equal.json | 99 +++++++ .../rs/topology_version_greater.json | 255 ++++++++++++++++++ .../rs/topology_version_less.json | 95 +++++++ test/test_discovery_and_monitoring.py | 122 +++++++-- test/test_sdam_monitoring_spec.py | 3 + test/test_server_description.py | 24 ++ test/test_topology.py | 58 +++- test/utils.py | 17 ++ 78 files changed, 9060 insertions(+), 120 deletions(-) create mode 100644 test/barrier.py create mode 100644 test/discovery_and_monitoring/errors/error_handling_handshake.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-network-error.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json create mode 100644 test/discovery_and_monitoring/rs/topology_version_equal.json create mode 100644 test/discovery_and_monitoring/rs/topology_version_greater.json create mode 100644 test/discovery_and_monitoring/rs/topology_version_less.json diff --git a/pymongo/database.py b/pymongo/database.py index 60dfc3ca46..3f0a953632 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -29,7 +29,6 @@ from pymongo.errors import (CollectionInvalid, ConfigurationError, InvalidName, - NotMasterError, OperationFailure) from pymongo.message import _first_batch from pymongo.read_preferences import ReadPreference @@ -1158,8 +1157,7 @@ def error(self): # doing so already. primary = self.__client.primary if primary: - self.__client._reset_server_and_request_check( - primary, NotMasterError(error_msg, error)) + self.__client._handle_getlasterror(primary, error_msg) return error def last_status(self): diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index e2afec307c..5223a12766 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -168,3 +168,7 @@ def sasl_supported_mechs(self): """ return self._doc.get('saslSupportedMechs', []) + + @property + def topology_version(self): + return self._doc.get('topologyVersion') diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 694178805a..7b84e2e504 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -59,8 +59,6 @@ ConfigurationError, ConnectionFailure, InvalidOperation, - NetworkTimeout, - NotMasterError, OperationFailure, PyMongoError, ServerSelectionTimeoutError) @@ -68,7 +66,8 @@ from pymongo.server_selectors import (writable_preferred_server_selector, writable_server_selector) from pymongo.server_type import SERVER_TYPE -from pymongo.topology import Topology +from pymongo.topology import (Topology, + _ErrorContext) from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.settings import TopologySettings from pymongo.uri_parser import (_handle_option_deprecations, @@ -1225,8 +1224,7 @@ def _get_topology(self): @contextlib.contextmanager def _get_socket(self, server, session, exhaust=False): - with _MongoClientErrorHandler( - self, server.description.address, session) as err_handler: + with _MongoClientErrorHandler(self, server, session) as err_handler: with server.get_socket( self.__all_credentials, checkout=exhaust) as sock_info: err_handler.contribute_socket(sock_info) @@ -1328,8 +1326,7 @@ def _run_operation_with_response(self, operation, unpack_res, operation.read_preference, operation.session, address=address) with _MongoClientErrorHandler( - self, server.description.address, - operation.session) as err_handler: + self, server, operation.session) as err_handler: err_handler.contribute_socket(operation.exhaust_mgr.sock) return server.run_operation_with_response( operation.exhaust_mgr.sock, @@ -1499,9 +1496,9 @@ def _retryable_write(self, retryable, func, session): with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None) - def _reset_server_and_request_check(self, address, error): + def _handle_getlasterror(self, address, error_msg): """Clear our pool for a server, mark it Unknown, and check it soon.""" - self._topology.reset_server_and_request_check(address, error) + self._topology.handle_getlasterror(address, error_msg) def __eq__(self, other): if isinstance(other, self.__class__): @@ -2167,18 +2164,24 @@ def __next__(self): class _MongoClientErrorHandler(object): """Error handler for MongoClient.""" - __slots__ = ('_client', '_server_address', '_session', '_max_wire_version') + __slots__ = ('_client', '_server_address', '_session', + '_max_wire_version', '_sock_generation') - def __init__(self, client, server_address, session): + def __init__(self, client, server, session): self._client = client - self._server_address = server_address + self._server_address = server.description.address self._session = session self._max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self._sock_generation = server.pool.generation def contribute_socket(self, sock_info): """Provide socket information to the error handler.""" - # Currently, we only extract the max_wire_version information. self._max_wire_version = sock_info.max_wire_version + self._sock_generation = sock_info.generation def __enter__(self): return self @@ -2187,45 +2190,15 @@ def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: return + err_ctx = _ErrorContext( + exc_val, self._max_wire_version, self._sock_generation) + self._client._topology.handle_error(self._server_address, err_ctx) + if issubclass(exc_type, PyMongoError): if self._session and exc_val.has_error_label( "TransientTransactionError"): self._session._unpin_mongos() - if issubclass(exc_type, NetworkTimeout): - # The socket has been closed. Don't reset the server. - # Server Discovery And Monitoring Spec: "When an application - # operation fails because of any network error besides a socket - # timeout...." - if self._session: - self._session._server_session.mark_dirty() - elif issubclass(exc_type, NotMasterError): - # As per the SDAM spec if: - # - the server sees a "not master" error, and - # - the server is not shutting down, and - # - the server version is >= 4.2, then - # we keep the existing connection pool, but mark the server type - # as Unknown and request an immediate check of the server. - # Otherwise, we clear the connection pool, mark the server as - # Unknown and request an immediate check of the server. - err_code = exc_val.details.get('code', -1) - is_shutting_down = err_code in helpers._SHUTDOWN_CODES - if is_shutting_down or (self._max_wire_version <= 7): - # Clear the pool, mark server Unknown and request check. - self._client._reset_server_and_request_check( - self._server_address, exc_val) - else: - self._client._topology.mark_server_unknown_and_request_check( - self._server_address, exc_val) - elif issubclass(exc_type, ConnectionFailure): - # "Client MUST replace the server's description with type Unknown - # ... MUST NOT request an immediate check of the server." - self._client._topology.reset_server(self._server_address, exc_val) + if issubclass(exc_type, ConnectionFailure): if self._session: self._session._server_session.mark_dirty() - elif issubclass(exc_type, OperationFailure): - # Do not request an immediate check since the server is likely - # shutting down. - if exc_val.code in helpers._RETRYABLE_ERROR_CODES: - self._client._topology.reset_server( - self._server_address, exc_val) diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 2960a1622f..f8d0012616 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -36,7 +36,8 @@ class ServerDescription(object): '_max_write_batch_size', '_min_wire_version', '_max_wire_version', '_round_trip_time', '_me', '_is_writable', '_is_readable', '_ls_timeout_minutes', '_error', '_set_version', '_election_id', - '_cluster_time', '_last_write_date', '_last_update_time') + '_cluster_time', '_last_write_date', '_last_update_time', + '_topology_version') def __init__( self, @@ -68,6 +69,10 @@ def __init__( self._me = ismaster.me self._last_update_time = _time() self._error = error + self._topology_version = ismaster.topology_version + if error: + if hasattr(error, 'details') and isinstance(error.details, dict): + self._topology_version = error.details.get('topologyVersion') if ismaster.last_write_date: # Convert from datetime to seconds. @@ -207,6 +212,15 @@ def retryable_reads_supported(self): """Checks if this server supports retryable writes.""" return self._max_wire_version >= 6 + @property + def topology_version(self): + return self._topology_version + + def to_unknown(self): + unknown = ServerDescription(self.address) + unknown._topology_version = self.topology_version + return unknown + def __eq__(self, other): if isinstance(other, ServerDescription): return ((self._address == other.address) and diff --git a/pymongo/topology.py b/pymongo/topology.py index 62ce0cbc2d..a62282d1c3 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -26,14 +26,20 @@ else: import Queue -from pymongo import common -from pymongo import periodic_executor +from pymongo import (common, + helpers, + periodic_executor) from pymongo.pool import PoolOptions from pymongo.topology_description import (updated_topology_description, _updated_topology_description_srv_polling, TopologyDescription, SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE) -from pymongo.errors import ServerSelectionTimeoutError, ConfigurationError +from pymongo.errors import (ConnectionFailure, + ConfigurationError, + NetworkTimeout, + NotMasterError, + OperationFailure, + ServerSelectionTimeoutError) from pymongo.monitor import SrvMonitor from pymongo.monotonic import time as _time from pymongo.server import Server @@ -264,14 +270,17 @@ def _process_change(self, server_description): Hold the lock when calling this. """ td_old = self._description - old_server_description = td_old._server_descriptions[ - server_description.address] + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale isMaster response. Ignore it. + return + suppress_event = ((self._publish_server or self._publish_tp) - and old_server_description == server_description) + and sd_old == server_description) if self._publish_server and not suppress_event: self._events.put(( self._listeners.publish_server_description_changed, - (old_server_description, server_description, + (sd_old, server_description, server_description.address, self._topology_id))) self._description = updated_topology_description( @@ -410,25 +419,15 @@ def reset_pool(self, address): if server: server.pool.reset() - def reset_server(self, address, error): - """Clear our pool for a server and mark it Unknown. - - Do *not* request an immediate check. - """ - with self._lock: - self._reset_server(address, reset_pool=True, error=error) - - def reset_server_and_request_check(self, address, error): + def handle_getlasterror(self, address, error_msg): """Clear our pool for a server, mark it Unknown, and check it soon.""" + error = NotMasterError(error_msg, {'code': 10107, 'errmsg': error_msg}) with self._lock: - self._reset_server(address, reset_pool=True, error=error) - self._request_check(address) - - def mark_server_unknown_and_request_check(self, address, error): - """Mark a server Unknown, and check it soon.""" - with self._lock: - self._reset_server(address, reset_pool=False, error=error) - self._request_check(address) + server = self._servers.get(address) + if server: + self._process_change(ServerDescription(address, error=error)) + server.pool.reset() + server.request_check() def update_pool(self, all_credentials): # Remove any stale sockets and add new sockets if pool is too small. @@ -540,28 +539,78 @@ def _ensure_opened(self): for server in itervalues(self._servers): server.open() - def _reset_server(self, address, reset_pool, error): - """Mark a server Unknown and optionally reset it's pool. - - Hold the lock when calling this. Does *not* request an immediate check. - """ + def _is_stale_error(self, address, err_ctx): server = self._servers.get(address) - - # "server" is None if another thread removed it from the topology. - if server: - if reset_pool: + if server is None: + # Another thread removed this server from the topology. + return True + + if err_ctx.sock_generation != server._pool.generation: + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, 'details'): + if isinstance(error.details, dict): + error_tv = error.details.get('topologyVersion') + + return _is_stale_error_topology_version(cur_tv, error_tv) + + def _handle_error(self, address, err_ctx): + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + exc_type = type(error) + if issubclass(exc_type, NetworkTimeout): + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif issubclass(exc_type, NotMasterError): + # As per the SDAM spec if: + # - the server sees a "not master" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + err_code = error.details.get('code', -1) + is_shutting_down = err_code in helpers._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. server.reset() - - # Mark this server Unknown. + server.request_check() + elif issubclass(exc_type, ConnectionFailure): + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset() + elif issubclass(exc_type, OperationFailure): + # Do not request an immediate check since the server is likely + # shutting down. + if error.code in helpers._NOT_MASTER_CODES: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset() - def _request_check(self, address): - """Wake one monitor. Hold the lock when calling this.""" - server = self._servers.get(address) + def handle_error(self, address, err_ctx): + """Handle an application error. - # "server" is None if another thread removed it from the topology. - if server: - server.request_check() + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + with self._lock: + self._handle_error(address, err_ctx) def _request_check_all(self): """Wake all monitors. Hold the lock when calling this.""" @@ -692,3 +741,30 @@ def __repr__(self): if not self._opened: msg = 'CLOSED ' return '<%s %s%r>' % (self.__class__.__name__, msg, self._description) + + +class _ErrorContext(object): + """An error with context for SDAM error handling.""" + def __init__(self, error, max_wire_version, sock_generation): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + + +def _is_stale_error_topology_version(current_tv, error_tv): + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv['processId'] != error_tv['processId']: + return False + return current_tv['counter'] >= error_tv['counter'] + + +def _is_stale_server_description(current_sd, new_sd): + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv['processId'] != new_tv['processId']: + return False + return current_tv['counter'] > new_tv['counter'] diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 3529cfd093..0090692dc6 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -129,7 +129,8 @@ def has_server(self, address): def reset_server(self, address): """A copy of this description, with one server marked Unknown.""" - return updated_topology_description(self, ServerDescription(address)) + unknown_sd = self._server_descriptions[address].to_unknown() + return updated_topology_description(self, unknown_sd) def reset(self): """A copy of this description, with all servers marked Unknown.""" @@ -479,8 +480,7 @@ def _update_rs_from_primary( max_election_tuple > server_description.election_tuple): # Stale primary, set to type Unknown. - address = server_description.address - sds[address] = ServerDescription(address) + sds[server_description.address] = server_description.to_unknown() return (_check_has_primary(sds), replica_set_name, max_set_version, @@ -500,7 +500,7 @@ def _update_rs_from_primary( and server.address != server_description.address): # Reset old primary's type to Unknown. - sds[server.address] = ServerDescription(server.address) + sds[server.address] = server.to_unknown() # There can be only one prior primary. break diff --git a/test/barrier.py b/test/barrier.py new file mode 100644 index 0000000000..7a614ca07c --- /dev/null +++ b/test/barrier.py @@ -0,0 +1,193 @@ +# Backport of the threading.Barrier class from python 3.8, with small +# changes to support python 2.7. +# https://github.com/python/cpython/blob/v3.8.2/Lib/threading.py#L562-L728 + +from threading import (Condition, + Lock) + +from pymongo.monotonic import time as _time + + +# Backport Condition.wait_for from 3.8.2 +# https://github.com/python/cpython/blob/v3.8.2/Lib/threading.py#L318-L339 +def wait_for(condition, predicate, timeout=None): + """Wait until a condition evaluates to True. + + predicate should be a callable which result will be interpreted as a + boolean value. A timeout may be provided giving the maximum time to + wait. + + """ + endtime = None + waittime = timeout + result = predicate() + while not result: + if waittime is not None: + if endtime is None: + endtime = _time() + waittime + else: + waittime = endtime - _time() + if waittime <= 0: + break + condition.wait(waittime) + result = predicate() + return result + + +# A barrier class. Inspired in part by the pthread_barrier_* api and +# the CyclicBarrier class from Java. See +# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and +# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ +# CyclicBarrier.html +# for information. +# We maintain two main states, 'filling' and 'draining' enabling the barrier +# to be cyclic. Threads are not allowed into it until it has fully drained +# since the previous cycle. In addition, a 'resetting' state exists which is +# similar to 'draining' except that threads leave with a BrokenBarrierError, +# and a 'broken' state in which all threads get the exception. +class Barrier(object): + """Implements a Barrier. + Useful for synchronizing a fixed number of threads at known synchronization + points. Threads block on 'wait()' and are simultaneously awoken once they + have all made that call. + """ + + def __init__(self, parties, action=None, timeout=None): + """Create a barrier, initialised to 'parties' threads. + 'action' is a callable which, when supplied, will be called by one of + the threads after they have all entered the barrier and just prior to + releasing them all. If a 'timeout' is provided, it is used as the + default for all subsequent 'wait()' calls. + """ + self._cond = Condition(Lock()) + self._action = action + self._timeout = timeout + self._parties = parties + self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken + self._count = 0 + + def wait(self, timeout=None): + """Wait for the barrier. + When the specified number of threads have started waiting, they are all + simultaneously awoken. If an 'action' was provided for the barrier, one + of the threads will have executed that callback prior to returning. + Returns an individual index number from 0 to 'parties-1'. + """ + if timeout is None: + timeout = self._timeout + with self._cond: + self._enter() # Block while the barrier drains. + index = self._count + self._count += 1 + try: + if index + 1 == self._parties: + # We release the barrier + self._release() + else: + # We wait until someone releases us + self._wait(timeout) + return index + finally: + self._count -= 1 + # Wake up any threads waiting for barrier to drain. + self._exit() + + # Block until the barrier is ready for us, or raise an exception + # if it is broken. + def _enter(self): + while self._state in (-1, 1): + # It is draining or resetting, wait until done + self._cond.wait() + #see if the barrier is in a broken state + if self._state < 0: + raise BrokenBarrierError + assert self._state == 0 + + # Optionally run the 'action' and release the threads waiting + # in the barrier. + def _release(self): + try: + if self._action: + self._action() + # enter draining state + self._state = 1 + self._cond.notify_all() + except: + #an exception during the _action handler. Break and reraise + self._break() + raise + + # Wait in the barrier until we are released. Raise an exception + # if the barrier is reset or broken. + def _wait(self, timeout): + if not wait_for(self._cond, lambda : self._state != 0, timeout): + #timed out. Break the barrier + self._break() + raise BrokenBarrierError + if self._state < 0: + raise BrokenBarrierError + assert self._state == 1 + + # If we are the last thread to exit the barrier, signal any threads + # waiting for the barrier to drain. + def _exit(self): + if self._count == 0: + if self._state in (-1, 1): + #resetting or draining + self._state = 0 + self._cond.notify_all() + + def reset(self): + """Reset the barrier to the initial state. + Any threads currently waiting will get the BrokenBarrier exception + raised. + """ + with self._cond: + if self._count > 0: + if self._state == 0: + #reset the barrier, waking up threads + self._state = -1 + elif self._state == -2: + #was broken, set it to reset state + #which clears when the last thread exits + self._state = -1 + else: + self._state = 0 + self._cond.notify_all() + + def abort(self): + """Place the barrier into a 'broken' state. + Useful in case of error. Any currently waiting threads and threads + attempting to 'wait()' will have BrokenBarrierError raised. + """ + with self._cond: + self._break() + + def _break(self): + # An internal error was detected. The barrier is set to + # a broken state all parties awakened. + self._state = -2 + self._cond.notify_all() + + @property + def parties(self): + """Return the number of threads required to trip the barrier.""" + return self._parties + + @property + def n_waiting(self): + """Return the number of threads currently waiting at the barrier.""" + # We don't need synchronization here since this is an ephemeral result + # anyway. It returns the correct value in the steady state. + if self._state == 0: + return self._count + return 0 + + @property + def broken(self): + """Return True if the barrier is in a broken state.""" + return self._state == -2 + +# exception raised by the Barrier class +class BrokenBarrierError(RuntimeError): + pass diff --git a/test/discovery_and_monitoring/errors/error_handling_handshake.json b/test/discovery_and_monitoring/errors/error_handling_handshake.json new file mode 100644 index 0000000000..cdd6df6247 --- /dev/null +++ b/test/discovery_and_monitoring/errors/error_handling_handshake.json @@ -0,0 +1,112 @@ +{ + "description": "Network timeouts before and after the handshake completes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore network timeout application error (afterHandshakeCompletes)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Mark server unknown on network timeout application error (beforeHandshakeCompletes)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-network-error.json b/test/discovery_and_monitoring/errors/non-stale-network-error.json new file mode 100644 index 0000000000..d0765dbb6d --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-network-error.json @@ -0,0 +1,79 @@ +{ + "description": "Non-stale network error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale network error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json new file mode 100644 index 0000000000..7c1a197a62 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json @@ -0,0 +1,87 @@ +{ + "description": "Non-stale network timeout error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale network timeout error does not mark server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json new file mode 100644 index 0000000000..68b7f455aa --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..d4a409d268 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json new file mode 100644 index 0000000000..dbd7154573 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotMaster error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..30d8698aac --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotMasterNoSlaveOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json new file mode 100644 index 0000000000..9d1c236565 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater NotMasterOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json new file mode 100644 index 0000000000..d189dd3fba --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json new file mode 100644 index 0000000000..9e88f5ce3f --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json new file mode 100644 index 0000000000..06c61a93da --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..ebf5a1a4a1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json new file mode 100644 index 0000000000..54ce115e68 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotMaster error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..502ebc549c --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotMasterNoSlaveOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json new file mode 100644 index 0000000000..8e84038e29 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing NotMasterOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json new file mode 100644 index 0000000000..f7e0932542 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json new file mode 100644 index 0000000000..5eceb1bcee --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json new file mode 100644 index 0000000000..2b77eb2087 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..584219e508 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json new file mode 100644 index 0000000000..b7bdfabd2d --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotMaster error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..812b973524 --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotMasterNoSlaveOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json new file mode 100644 index 0000000000..027f4bddee --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed NotMasterOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json new file mode 100644 index 0000000000..6a49618cfd --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json new file mode 100644 index 0000000000..3c3c934f8e --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json new file mode 100644 index 0000000000..9e171142d1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..52410f0b27 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMaster.json b/test/discovery_and_monitoring/errors/pre-42-NotMaster.json new file mode 100644 index 0000000000..6cbb23b89c --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-NotMaster.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotMaster error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..fa5c1f37d5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotMasterNoSlaveOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json new file mode 100644 index 0000000000..5023662723 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotMasterOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json new file mode 100644 index 0000000000..d58ff26e52 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json new file mode 100644 index 0000000000..a44ecc3824 --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json new file mode 100644 index 0000000000..f675f2651e --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..a4ae13ee78 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotMaster.json b/test/discovery_and_monitoring/errors/stale-generation-NotMaster.json new file mode 100644 index 0000000000..cfe779ee6f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotMaster.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMaster error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..d58349b622 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json new file mode 100644 index 0000000000..11ee062589 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json new file mode 100644 index 0000000000..2e80ba4949 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json new file mode 100644 index 0000000000..9b5656d48b --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json new file mode 100644 index 0000000000..9da8b60fbb --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation InterruptedAtShutdown error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..f0a7df6170 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json new file mode 100644 index 0000000000..dbdce1583a --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMaster error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMaster error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..707a58bcaf --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json new file mode 100644 index 0000000000..851bea0928 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMasterOrSecondary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json new file mode 100644 index 0000000000..7ac8d2db24 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation PrimarySteppedDown error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json new file mode 100644 index 0000000000..e250c448aa --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation ShutdownInProgress error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json new file mode 100644 index 0000000000..4e11c48eb2 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json @@ -0,0 +1,161 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json new file mode 100644 index 0000000000..27bac32443 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json @@ -0,0 +1,161 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json new file mode 100644 index 0000000000..8b10f5eb75 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation InterruptedAtShutdown error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..ec78d667c2 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation InterruptedDueToReplStateChange error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json new file mode 100644 index 0000000000..760f260d48 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMaster error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMaster error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..bb7946aca3 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json new file mode 100644 index 0000000000..eced40c59c --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation NotMasterOrSecondary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterOrSecondary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json new file mode 100644 index 0000000000..d33dc98db3 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation PrimarySteppedDown error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json new file mode 100644 index 0000000000..ee38cc8bbe --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation ShutdownInProgress error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json new file mode 100644 index 0000000000..9734776f22 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json @@ -0,0 +1,161 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json new file mode 100644 index 0000000000..af8730e5ca --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json @@ -0,0 +1,161 @@ +{ + "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "timeout" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json new file mode 100644 index 0000000000..8449ac63b2 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedAtShutdown error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..0cdd1727d3 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale InterruptedDueToReplStateChange error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json new file mode 100644 index 0000000000..5823d0446f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMaster error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMaster error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..e894dae6d8 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterNoSlaveOk error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json new file mode 100644 index 0000000000..17243c9022 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterOrSecondary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale NotMasterOrSecondary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json new file mode 100644 index 0000000000..93d9678419 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale PrimarySteppedDown error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json new file mode 100644 index 0000000000..563eb60d9f --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale ShutdownInProgress error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_equal.json b/test/discovery_and_monitoring/rs/topology_version_equal.json new file mode 100644 index 0000000000..ba84e059a0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_equal.json @@ -0,0 +1,99 @@ +{ + "description": "Primary with equal topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_greater.json b/test/discovery_and_monitoring/rs/topology_version_greater.json new file mode 100644 index 0000000000..2c80fa2a97 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_greater.json @@ -0,0 +1,255 @@ +{ + "description": "Primary with newer topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "c:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": null + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null + }, + "d:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "e:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/topology_version_less.json b/test/discovery_and_monitoring/rs/topology_version_less.json new file mode 100644 index 0000000000..ae45f803d4 --- /dev/null +++ b/test/discovery_and_monitoring/rs/topology_version_less.json @@ -0,0 +1,95 @@ +{ + "description": "Primary with older topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index cb17104cd4..81ac6de7c9 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -22,16 +22,26 @@ from bson import json_util, Timestamp from pymongo import common -from pymongo.errors import ConfigurationError -from pymongo.topology import Topology +from pymongo.errors import (AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotMasterError, + OperationFailure) +from pymongo.helpers import _check_command_response +from pymongo.topology import (Topology, + _ErrorContext) from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.ismaster import IsMaster from pymongo.server_description import ServerDescription, SERVER_TYPE from pymongo.settings import TopologySettings from pymongo.uri_parser import parse_uri -from test import unittest -from test.utils import (MockPool, - server_name_to_type) +from test import unittest, IntegrationTest +from test.utils import (assertion_context, + Barrier, + get_pool, + server_name_to_type, + rs_or_single_client, + wait_until) # Location of JSON test specifications. @@ -58,9 +68,7 @@ def request_check(self): def create_mock_topology(uri, monitor_class=MockMonitor): - # Some tests in the spec include URIs like mongodb://A/?connect=direct, - # but PyMongo considers any single-seed URI with no setName to be "direct". - parsed_uri = parse_uri(uri.replace('connect=direct', '')) + parsed_uri = parse_uri(uri) replica_set_name = None if 'replicaset' in parsed_uri['options']: replica_set_name = parsed_uri['options']['replicaset'] @@ -68,7 +76,6 @@ def create_mock_topology(uri, monitor_class=MockMonitor): topology_settings = TopologySettings( parsed_uri['nodelist'], replica_set_name=replica_set_name, - pool_class=MockPool, monitor_class=monitor_class) c = Topology(topology_settings) @@ -83,6 +90,33 @@ def got_ismaster(topology, server_address, ismaster_response): topology.on_change(server_description) +def got_app_error(topology, app_error): + server_address = common.partition_node(app_error['address']) + server = topology.get_server_by_address(server_address) + error_type = app_error['type'] + generation = app_error.get('generation', server.pool.generation) + when = app_error['when'] + max_wire_version = app_error['maxWireVersion'] + # XXX: We could get better test coverage by mocking the errors on the + # Pool/SocketInfo. + try: + if error_type == 'command': + _check_command_response(app_error['response']) + elif error_type == 'network': + raise AutoReconnect('mock non-timeout network error') + elif error_type == 'timeout': + raise NetworkTimeout('mock network timeout error') + else: + raise AssertionError('unknown error type: %s' % (error_type,)) + assert False + except (AutoReconnect, NotMasterError, OperationFailure) as e: + if when == 'beforeHandshakeCompletes' and error_type == 'timeout': + raise unittest.SkipTest('PYTHON-2211') + + topology.handle_error( + server_address, _ErrorContext(e, max_wire_version, generation)) + + def get_type(topology, hostname): description = topology.get_server_by_address((hostname, 27017)).description return description.server_type @@ -140,6 +174,16 @@ def check_outcome(self, topology, outcome): expected_server.get('electionId'), actual_server_description.election_id) + self.assertEqual( + expected_server.get('topologyVersion'), + actual_server_description.topology_version) + + expected_pool = expected_server.get('pool') + if expected_pool: + self.assertEqual( + expected_pool.get('generation'), + actual_server.pool.generation) + self.assertEqual(outcome['setName'], topology.description.replica_set_name) self.assertEqual(outcome['logicalSessionTimeoutMinutes'], topology.description.logical_session_timeout_minutes) @@ -152,13 +196,18 @@ def create_test(scenario_def): def run_scenario(self): c = create_mock_topology(scenario_def['uri']) - for phase in scenario_def['phases']: - for response in phase['responses']: - got_ismaster(c, - common.partition_node(response[0]), - response[1]) + for i, phase in enumerate(scenario_def['phases']): + # Including the phase description makes failures easier to debug. + description = phase.get('description', str(i)) + with assertion_context('phase: %s' % (description,)): + for response in phase.get('responses', []): + got_ismaster( + c, common.partition_node(response[0]), response[1]) + + for app_error in phase.get('applicationErrors', []): + got_app_error(c, app_error) - check_outcome(self, c, phase['outcome']) + check_outcome(self, c, phase['outcome']) return run_scenario @@ -210,5 +259,48 @@ def send_cluster_time(time, inc, should_update): send_cluster_time(2, 3, True) +class TestIgnoreStaleErrors(IntegrationTest): + + def test_ignore_stale_connection_errors(self): + N_THREADS = 5 + barrier = Barrier(N_THREADS, timeout=30) + client = rs_or_single_client(minPoolSize=N_THREADS) + self.addCleanup(client.close) + + # Wait for initial discovery. + client.admin.command('ping') + pool = get_pool(client) + starting_generation = pool.generation + wait_until(lambda: len(pool.sockets) == N_THREADS, 'created sockets') + + def mock_command(*args, **kwargs): + # Synchronize all threads to ensure they use the same generation. + barrier.wait() + raise AutoReconnect('mock SocketInfo.command error') + + for sock in pool.sockets: + sock.command = mock_command + + def insert_command(i): + try: + client.test.command('insert', 'test', documents=[{'i': i}]) + except AutoReconnect as exc: + pass + + threads = [] + for i in range(N_THREADS): + threads.append(threading.Thread(target=insert_command, args=(i,))) + for t in threads: + t.start() + for t in threads: + t.join() + + # Expect a single pool reset for the network error + self.assertEqual(starting_generation+1, pool.generation) + + # Server should be selectable. + client.admin.command('ping') + + if __name__ == "__main__": unittest.main() diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 616cce35b4..97412f41d6 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -327,6 +327,9 @@ def marked_unknown_and_rediscovered(): def test_network_error_publishes_events(self): self._test_app_error({'closeConnection': True}, ConnectionFailure) + # In 4.4+, NotMaster errors from failCommand don't cause SDAM state + # changes because topologyVersion is not incremented. + @client_context.require_version_max(4, 3) def test_not_master_error_publishes_events(self): self._test_app_error({'errorCode': 10107, 'closeConnection': False}, NotMasterError) diff --git a/test/test_server_description.py b/test/test_server_description.py index 0aa89dd8f4..11f134464f 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -18,6 +18,8 @@ sys.path[0:0] = [""] +from bson.objectid import ObjectId +from bson.int64 import Int64 from pymongo.server_type import SERVER_TYPE from pymongo.ismaster import IsMaster from pymongo.server_description import ServerDescription @@ -166,6 +168,28 @@ def test_repr(self): "") + def test_topology_version(self): + topology_version = {'processId': ObjectId(), 'counter': Int64('0')} + s = parse_ismaster_response( + {'ok': 1, 'ismaster': True, 'setName': 'rs', + 'topologyVersion': topology_version}) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual(topology_version, s.topology_version) + + # Resetting a server to unknown preserves topology_version. + s_unknown = s.to_unknown() + self.assertEqual(SERVER_TYPE.Unknown, s_unknown.server_type) + self.assertEqual(topology_version, s_unknown.topology_version) + + def test_topology_version_not_present(self): + # No topologyVersion field. + s = parse_ismaster_response( + {'ok': 1, 'ismaster': True, 'setName': 'rs'}) + + self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) + self.assertEqual(None, s.topology_version) + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index c4a785af6a..4b9db0800f 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -18,13 +18,12 @@ sys.path[0:0] = [""] -import threading - from bson.py3compat import imap from pymongo import common from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_type import SERVER_TYPE -from pymongo.topology import Topology +from pymongo.topology import (_ErrorContext, + Topology) from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.errors import (AutoReconnect, ConfigurationError, @@ -402,7 +401,7 @@ def test_close(self): self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) - def test_reset_server(self): + def test_handle_error(self): t = create_mock_topology(replica_set_name='rs') got_ismaster(t, ('a', 27017), { 'ok': 1, @@ -417,7 +416,8 @@ def test_reset_server(self): 'setName': 'rs', 'hosts': ['a', 'b']}) - t.reset_server(('a', 27017), None) + errctx = _ErrorContext(AutoReconnect('mock'), 0, 0) + t.handle_error(('a', 27017), errctx) self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) self.assertEqual('rs', t.description.replica_set_name) @@ -434,18 +434,60 @@ def test_reset_server(self): self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) - t.reset_server(('b', 27017), None) + t.handle_error(('b', 27017), errctx) self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) self.assertEqual('rs', t.description.replica_set_name) self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) - def test_reset_removed_server(self): + def test_handle_getlasterror(self): + t = create_mock_topology(replica_set_name='rs') + got_ismaster(t, ('a', 27017), { + 'ok': 1, + 'ismaster': True, + 'setName': 'rs', + 'hosts': ['a', 'b']}) + + got_ismaster(t, ('b', 27017), { + 'ok': 1, + 'ismaster': False, + 'secondary': True, + 'setName': 'rs', + 'hosts': ['a', 'b']}) + + t.handle_getlasterror(('a', 27017), 'not master') + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) + self.assertEqual('rs', t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, + t.description.topology_type) + + got_ismaster(t, ('a', 27017), { + 'ok': 1, + 'ismaster': True, + 'setName': 'rs', + 'hosts': ['a', 'b']}) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, + t.description.topology_type) + + def test_handle_error_removed_server(self): + t = create_mock_topology(replica_set_name='rs') + + # No error resetting a server not in the TopologyDescription. + errctx = _ErrorContext(AutoReconnect('mock'), 0, 0) + t.handle_error(('b', 27017), errctx) + + # Server was *not* added as type Unknown. + self.assertFalse(t.has_server(('b', 27017))) + + def test_handle_getlasterror_removed_server(self): t = create_mock_topology(replica_set_name='rs') # No error resetting a server not in the TopologyDescription. - t.reset_server(('b', 27017), None) + t.handle_getlasterror(('b', 27017), 'not master') # Server was *not* added as type Unknown. self.assertFalse(t.has_server(('b', 27017))) diff --git a/test/utils.py b/test/utils.py index 7b5bb7fa5f..5f51b7f10f 100644 --- a/test/utils.py +++ b/test/utils.py @@ -48,6 +48,13 @@ db_user, db_pwd) +if sys.version_info[0] < 3: + # Python 2.7, use our backport. + from test.barrier import Barrier +else: + from threading import Barrier + + IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) @@ -889,3 +896,13 @@ def cat_files(dest, *sources): for src in sources: with open(src, 'rb') as fsrc: shutil.copyfileobj(fsrc, fdst) + + +@contextlib.contextmanager +def assertion_context(msg): + """A context manager that adds info to an assertion failure.""" + try: + yield + except AssertionError as exc: + msg = '%s (%s)' % (exc, msg) + py3compat.reraise(type(exc), msg, sys.exc_info()[2]) From 07c834ea3e39f7f5e4c55fea09b208d0164c2901 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 29 Apr 2020 10:21:30 -0700 Subject: [PATCH 0121/2111] PYTHON-2218 Fix race in test_last_write_date --- test/test_max_staleness.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index c313be227d..dd9169f284 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -122,15 +122,21 @@ def test_last_write_date(self): # From max-staleness-tests.rst, "Parse lastWriteDate". client = rs_or_single_client(heartbeatFrequencyMS=500) client.pymongo_test.test.insert_one({}) - time.sleep(2) + # Wait for the server description to be updated. + time.sleep(1) server = client._topology.select_server(writable_server_selector) - last_write = server.description.last_write_date - self.assertTrue(last_write) + first = server.description.last_write_date + self.assertTrue(first) + # The first last_write_date may correspond to a internal server write, + # sleep so that the next write does not occur within the same second. + time.sleep(1) client.pymongo_test.test.insert_one({}) - time.sleep(2) + # Wait for the server description to be updated. + time.sleep(1) server = client._topology.select_server(writable_server_selector) - self.assertGreater(server.description.last_write_date, last_write) - self.assertLess(server.description.last_write_date, last_write + 10) + second = server.description.last_write_date + self.assertGreater(second, first) + self.assertLess(second, first + 10) @client_context.require_version_max(3, 3) def test_last_write_date_absent(self): From d8342367a99af50a19ca6e9afd80ed005f83e724 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 29 Apr 2020 10:32:54 -0700 Subject: [PATCH 0122/2111] PYTHON-2214 Tolerate StaleConfig errors in test_create_collection --- test/test_transactions.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/test_transactions.py b/test/test_transactions.py index 9621578420..cfcc67e95e 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -214,10 +214,15 @@ def test_create_collection(self): db = client.pymongo_test coll = db.test_create_collection self.addCleanup(coll.drop) - with client.start_session() as s, s.start_transaction(): - coll2 = db.create_collection(coll.name, session=s) + + # Use with_transaction to avoid StaleConfig errors on sharded clusters. + def create_and_insert(session): + coll2 = db.create_collection(coll.name, session=session) self.assertEqual(coll, coll2) - coll.insert_one({}, session=s) + coll.insert_one({}, session=session) + + with client.start_session() as s: + s.with_transaction(create_and_insert) # Outside a transaction we raise CollectionInvalid on existing colls. with self.assertRaises(CollectionInvalid): From 1c2a5759509d74d21a8132a6eae473ef3de06ef2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 4 May 2020 15:03:16 -0700 Subject: [PATCH 0123/2111] PYTHON-2221 Resync SDAM error handling spec tests --- .../errors/post-42-InterruptedAtShutdown.json | 69 +++++++++++++++++++ ...st-42-InterruptedDueToReplStateChange.json | 69 +++++++++++++++++++ .../errors/post-42-NotMaster.json | 69 +++++++++++++++++++ .../errors/post-42-NotMasterNoSlaveOk.json | 69 +++++++++++++++++++ .../errors/post-42-NotMasterOrSecondary.json | 69 +++++++++++++++++++ .../errors/post-42-PrimarySteppedDown.json | 69 +++++++++++++++++++ .../errors/post-42-ShutdownInProgress.json | 69 +++++++++++++++++++ .../errors/pre-42-InterruptedAtShutdown.json | 8 +-- ...re-42-InterruptedDueToReplStateChange.json | 10 +-- .../errors/pre-42-NotMaster.json | 10 +-- .../errors/pre-42-NotMasterNoSlaveOk.json | 10 +-- .../errors/pre-42-NotMasterOrSecondary.json | 10 +-- .../errors/pre-42-PrimarySteppedDown.json | 10 +-- .../errors/pre-42-ShutdownInProgress.json | 8 +-- 14 files changed, 516 insertions(+), 33 deletions(-) create mode 100644 test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json create mode 100644 test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json create mode 100644 test/discovery_and_monitoring/errors/post-42-NotMaster.json create mode 100644 test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json create mode 100644 test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json create mode 100644 test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json create mode 100644 test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json new file mode 100644 index 0000000000..9e171142d1 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 InterruptedAtShutdown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedAtShutdown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedAtShutdown", + "code": 11600 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json new file mode 100644 index 0000000000..52410f0b27 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 InterruptedDueToReplStateChange error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 InterruptedDueToReplStateChange error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "InterruptedDueToReplStateChange", + "code": 11602 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotMaster.json b/test/discovery_and_monitoring/errors/post-42-NotMaster.json new file mode 100644 index 0000000000..6cbb23b89c --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotMaster.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 NotMaster error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotMaster error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMaster", + "code": 10107 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json new file mode 100644 index 0000000000..fa5c1f37d5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 NotMasterNoSlaveOk error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotMasterNoSlaveOk error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterNoSlaveOk", + "code": 13435 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json new file mode 100644 index 0000000000..5023662723 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 NotMasterOrSecondary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 NotMasterOrSecondary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "NotMasterOrSecondary", + "code": 13436 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json new file mode 100644 index 0000000000..d58ff26e52 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 PrimarySteppedDown error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 PrimarySteppedDown error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "PrimarySteppedDown", + "code": 189 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json new file mode 100644 index 0000000000..a44ecc3824 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 ShutdownInProgress error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 ShutdownInProgress error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "ShutdownInProgress", + "code": 91 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json index 9e171142d1..5944fe705c 100644 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 InterruptedAtShutdown error", + "description": "Pre-4.2 InterruptedAtShutdown error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 InterruptedAtShutdown error marks server Unknown", + "description": "Pre-4.2 InterruptedAtShutdown error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json index 52410f0b27..06ed118779 100644 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 InterruptedDueToReplStateChange error", + "description": "Pre-4.2 InterruptedDueToReplStateChange error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 InterruptedDueToReplStateChange error marks server Unknown", + "description": "Pre-4.2 InterruptedDueToReplStateChange error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, @@ -56,7 +56,7 @@ "type": "Unknown", "topologyVersion": null, "pool": { - "generation": 0 + "generation": 1 } } }, diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMaster.json b/test/discovery_and_monitoring/errors/pre-42-NotMaster.json index 6cbb23b89c..a6a6bba87a 100644 --- a/test/discovery_and_monitoring/errors/pre-42-NotMaster.json +++ b/test/discovery_and_monitoring/errors/pre-42-NotMaster.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 NotMaster error", + "description": "Pre-4.2 NotMaster error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 NotMaster error marks server Unknown", + "description": "Pre-4.2 NotMaster error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, @@ -56,7 +56,7 @@ "type": "Unknown", "topologyVersion": null, "pool": { - "generation": 0 + "generation": 1 } } }, diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json index fa5c1f37d5..1eb72bc033 100644 --- a/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 NotMasterNoSlaveOk error", + "description": "Pre-4.2 NotMasterNoSlaveOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 NotMasterNoSlaveOk error marks server Unknown", + "description": "Pre-4.2 NotMasterNoSlaveOk error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, @@ -56,7 +56,7 @@ "type": "Unknown", "topologyVersion": null, "pool": { - "generation": 0 + "generation": 1 } } }, diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json index 5023662723..f515898281 100644 --- a/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 NotMasterOrSecondary error", + "description": "Pre-4.2 NotMasterOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 NotMasterOrSecondary error marks server Unknown", + "description": "Pre-4.2 NotMasterOrSecondary error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, @@ -56,7 +56,7 @@ "type": "Unknown", "topologyVersion": null, "pool": { - "generation": 0 + "generation": 1 } } }, diff --git a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json index d58ff26e52..e4c3228afc 100644 --- a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 PrimarySteppedDown error", + "description": "Pre-4.2 PrimarySteppedDown error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 PrimarySteppedDown error marks server Unknown", + "description": "Pre-4.2 PrimarySteppedDown error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, @@ -56,7 +56,7 @@ "type": "Unknown", "topologyVersion": null, "pool": { - "generation": 0 + "generation": 1 } } }, diff --git a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json index a44ecc3824..00dc7c1b5b 100644 --- a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 ShutdownInProgress error", + "description": "Pre-4.2 ShutdownInProgress error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 8 + "maxWireVersion": 7 } ] ], @@ -36,12 +36,12 @@ } }, { - "description": "Post-4.2 ShutdownInProgress error marks server Unknown", + "description": "Pre-4.2 ShutdownInProgress error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", "when": "afterHandshakeCompletes", - "maxWireVersion": 8, + "maxWireVersion": 7, "type": "command", "response": { "ok": 0, From ae5c03df17023e5596b1a64129b9ad9fa7be4ad1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 May 2020 11:38:53 -0700 Subject: [PATCH 0124/2111] PYTHON-2173 Update comment in with_transaction example for the docs manual --- test/test_examples.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_examples.py b/test/test_examples.py index e6fd1f3855..f2747ff46e 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1050,7 +1050,7 @@ def update_employee_info(session): client = MongoClient(uriString) wc_majority = WriteConcern("majority", wtimeout=1000) - # Prereq: Create collections. CRUD operations in transactions must be on existing collections. + # Prereq: Create collections. client.get_database( "mydb1", write_concern=wc_majority).foo.insert_one({'abc': 0}) client.get_database( From 8747837dbd5c098f7e2e644fa99c1b87a903a658 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 May 2020 11:49:10 -0700 Subject: [PATCH 0125/2111] PYTHON-2032 listIndexes no longer includes "ns" as of MongoDB 4.4 --- pymongo/collection.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 2865bce091..c701f429d0 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2236,8 +2236,7 @@ def list_indexes(self, session=None): >>> for index in db.test.list_indexes(): ... print(index) ... - SON([(u'v', 1), (u'key', SON([(u'_id', 1)])), - (u'name', u'_id_'), (u'ns', u'test.test')]) + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) :Parameters: - `session` (optional): a From 6e39ae04c3317dba47cc94083588830a9ff6435a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 May 2020 17:07:56 -0700 Subject: [PATCH 0126/2111] PYTHON-2170 Add support for 4.4 hedged reads --- pymongo/message.py | 11 ++-- pymongo/read_preferences.py | 94 ++++++++++++++++++++++++++++------- test/test_read_preferences.py | 68 +++++++++++++++++++++++++ test/test_topology.py | 5 +- 4 files changed, 151 insertions(+), 27 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 9efb835c12..f06e142fe7 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -95,21 +95,18 @@ def _randint(): def _maybe_add_read_preference(spec, read_preference): """Add $readPreference to spec when appropriate.""" mode = read_preference.mode - tag_sets = read_preference.tag_sets - max_staleness = read_preference.max_staleness + document = read_preference.document # Only add $readPreference if it's something other than primary to avoid # problems with mongos versions that don't support read preferences. Also, # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting # the slaveOkay bit has the same effect). if mode and ( - mode != ReadPreference.SECONDARY_PREFERRED.mode - or tag_sets != [{}] - or max_staleness != -1): - + mode != ReadPreference.SECONDARY_PREFERRED.mode or + len(document) > 1): if "$query" not in spec: spec = SON([("$query", spec)]) - spec["$readPreference"] = read_preference.document + spec["$readPreference"] = document return spec diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index f4425acaa6..53e8980174 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -81,17 +81,30 @@ def _validate_max_staleness(max_staleness): return max_staleness +def _validate_hedge(hedge): + """Validate hedge.""" + if hedge is None: + return None + + if not isinstance(hedge, dict): + raise TypeError("hedge must be a dictionary, not %r" % (hedge,)) + + return hedge + + class _ServerMode(object): """Base class for all read preferences. """ - __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness") + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", + "__hedge") - def __init__(self, mode, tag_sets=None, max_staleness=-1): + def __init__(self, mode, tag_sets=None, max_staleness=-1, hedge=None): self.__mongos_mode = _MONGOS_MODES[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) self.__max_staleness = _validate_max_staleness(max_staleness) + self.__hedge = _validate_hedge(hedge) @property def name(self): @@ -114,6 +127,8 @@ def document(self): doc['tags'] = self.__tag_sets if self.__max_staleness != -1: doc['maxStalenessSeconds'] = self.__max_staleness + if self.__hedge not in (None, {}): + doc['hedge'] = self.__hedge return doc @property @@ -144,6 +159,30 @@ def max_staleness(self): no longer be selected for operations, or -1 for no maximum.""" return self.__max_staleness + @property + def hedge(self): + """The read preference ``hedge`` parameter. + + A dictionary that configures how the server will perform hedged reads. + It consists of the following keys: + + - ``enabled``: Enables or disables hedged reads in sharded clusters. + + Hedged reads are automatically enabled in MongoDB 4.4+ when using a + ``nearest`` read preference. To explicitly enable hedged reads, set + the ``enabled`` key to ``true``:: + + >>> Nearest(hedge={'enabled': True}) + + To explicitly disable hedged reads, set the ``enabled`` key to + ``False``:: + + >>> Nearest(hedge={'enabled': False}) + + .. versionadded:: 3.11 + """ + return self.__hedge + @property def min_wire_version(self): """The wire protocol version the server must support. @@ -158,14 +197,15 @@ def min_wire_version(self): return 0 if self.__max_staleness == -1 else 5 def __repr__(self): - return "%s(tag_sets=%r, max_staleness=%r)" % ( - self.name, self.__tag_sets, self.__max_staleness) + return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( + self.name, self.__tag_sets, self.__max_staleness, self.__hedge) def __eq__(self, other): if isinstance(other, _ServerMode): return (self.mode == other.mode and self.tag_sets == other.tag_sets and - self.max_staleness == other.max_staleness) + self.max_staleness == other.max_staleness and + self.hedge == other.hedge) return NotImplemented def __ne__(self, other): @@ -178,7 +218,8 @@ def __getstate__(self): """ return {'mode': self.__mode, 'tag_sets': self.__tag_sets, - 'max_staleness': self.__max_staleness} + 'max_staleness': self.__max_staleness, + 'hedge': self.__hedge} def __setstate__(self, value): """Restore from pickling.""" @@ -186,6 +227,7 @@ def __setstate__(self, value): self.__mongos_mode = _MONGOS_MODES[self.__mode] self.__tag_sets = _validate_tag_sets(value['tag_sets']) self.__max_staleness = _validate_max_staleness(value['max_staleness']) + self.__hedge = _validate_hedge(value['hedge']) class Primary(_ServerMode): @@ -234,14 +276,17 @@ class PrimaryPreferred(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` to use if the primary is not available. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, - tag_sets, - max_staleness) + def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + super(PrimaryPreferred, self).__init__( + _PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection): """Apply this read preference to Selection.""" @@ -271,12 +316,17 @@ class Secondary(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness) + def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + super(Secondary, self).__init__( + _SECONDARY, tag_sets, max_staleness, hedge) def __call__(self, selection): """Apply this read preference to Selection.""" @@ -303,14 +353,17 @@ class SecondaryPreferred(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(SecondaryPreferred, self).__init__(_SECONDARY_PREFERRED, - tag_sets, - max_staleness) + def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + super(SecondaryPreferred, self).__init__( + _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection): """Apply this read preference to Selection.""" @@ -342,12 +395,17 @@ class Nearest(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. + - `hedge`: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. """ __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1): - super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness) + def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + super(Nearest, self).__init__( + _NEAREST, tag_sets, max_staleness, hedge) def __call__(self, selection): """Apply this read preference to Selection.""" diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 3a7bd69c89..ce79592bbe 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -46,6 +46,7 @@ from test.utils import (connected, ignore_deprecations, one, + OvertCommandListener, rs_client, single_client, wait_until) @@ -567,6 +568,73 @@ def test_read_preference_document(self): with self.assertRaises(ValueError): Nearest(max_staleness=-2) + def test_read_preference_document_hedge(self): + cases = { + 'primaryPreferred': PrimaryPreferred, + 'secondary': Secondary, + 'secondaryPreferred': SecondaryPreferred, + 'nearest': Nearest, + } + for mode, cls in cases.items(): + with self.assertRaises(TypeError): + cls(hedge=[]) + + pref = cls(hedge={}) + self.assertEqual(pref.document, {'mode': mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual( + out, + SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {'enabled': True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual( + out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {'enabled': False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual( + out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {'enabled': False, 'extra': 'option'} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual( + out, SON([("$query", {}), ("$readPreference", pref.document)])) + + # Require OP_MSG so that $readPreference is visible in the command event. + @client_context.require_version_min(3, 6) + def test_send_hedge(self): + cases = { + 'primaryPreferred': PrimaryPreferred, + 'secondary': Secondary, + 'secondaryPreferred': SecondaryPreferred, + 'nearest': Nearest, + } + listener = OvertCommandListener() + client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) + client.admin.command('ping') + for mode, cls in cases.items(): + pref = cls(hedge={'enabled': True}) + coll = client.test.get_collection('test', read_preference=pref) + listener.reset() + coll.find_one() + started = listener.results['started'] + self.assertEqual(len(started), 1, started) + cmd = started[0].command + self.assertIn('$readPreference', cmd) + self.assertEqual(cmd['$readPreference'], pref.document) + def test_maybe_add_read_preference(self): # Primary doesn't add $readPreference diff --git a/test/test_topology.py b/test/test_topology.py index 4b9db0800f..dd89d1e535 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -788,12 +788,13 @@ def test_no_secondary(self): self.assertMessage( 'No replica set members match selector' - ' "Secondary(tag_sets=None, max_staleness=-1)"', + ' "Secondary(tag_sets=None, max_staleness=-1, hedge=None)"', t, ReadPreference.SECONDARY) self.assertMessage( "No replica set members match selector" - " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1)\"", + " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1, " + "hedge=None)\"", t, Secondary(tag_sets=[{'dc': 'ny'}])) def test_bad_replica_set_name(self): From 5efdcb88e2e02d0d096e4d0838f0815f18b994d0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 May 2020 09:29:29 -0700 Subject: [PATCH 0127/2111] PYTHON-2185 Deprecate geoHaystack in create_index(es) --- pymongo/__init__.py | 10 ++++-- pymongo/collection.py | 69 ++++++++++++++++++----------------------- pymongo/helpers.py | 3 +- test/test_legacy_api.py | 12 ++++++- 4 files changed, 50 insertions(+), 44 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index cdef15ad3a..74246e72c2 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -26,9 +26,15 @@ """ GEOHAYSTACK = "geoHaystack" -"""Index specifier for a 2-dimensional `haystack index`_. +"""**DEPRECATED** - Index specifier for a 2-dimensional `haystack index`_. -.. versionadded:: 2.1 +**DEPRECATED** - :attr:`GEOHAYSTACK` is deprecated and will be removed in +PyMongo 4.0. geoHaystack indexes (and the geoSearch command) were deprecated +in MongoDB 4.4. Instead, create a 2d index and use $geoNear or $geoWithin. +See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. + +.. versionchanged:: 3.11 + Deprecated. .. _haystack index: http://docs.mongodb.org/manual/core/geohaystack/ """ diff --git a/pymongo/collection.py b/pymongo/collection.py index c701f429d0..567c8587c7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -56,6 +56,10 @@ _UJOIN = u"%s.%s" _FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} +_HAYSTACK_MSG = ( + "geoHaystack indexes are deprecated as of MongoDB 4.4." + " Instead, create a 2d index and use $geoNear or $geoWithin." + " See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack") class ReturnDocument(object): @@ -1897,10 +1901,24 @@ def create_indexes(self, indexes, session=None, **kwargs): .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ """ common.validate_list('indexes', indexes) + return self.__create_indexes(indexes, session, **kwargs) + + def __create_indexes(self, indexes, session, **kwargs): + """Internal createIndexes helper. + + :Parameters: + - `indexes`: A list of :class:`~pymongo.operations.IndexModel` + instances. + - `session` (optional): a + :class:`~pymongo.client_session.ClientSession`. + - `**kwargs` (optional): optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + """ names = [] with self._socket_for_writes(session) as sock_info: supports_collations = sock_info.max_wire_version >= 5 supports_quorum = sock_info.max_wire_version >= 9 + def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): @@ -1912,15 +1930,20 @@ def gen_indexes(): raise ConfigurationError( "Must be connected to MongoDB " "3.4+ to use collations.") + if 'bucketSize' in document: + # The bucketSize option is required by geoHaystack. + warnings.warn( + _HAYSTACK_MSG, DeprecationWarning, stacklevel=4) names.append(document["name"]) yield document + cmd = SON([('createIndexes', self.name), ('indexes', list(gen_indexes()))]) cmd.update(kwargs) if 'commitQuorum' in kwargs and not supports_quorum: raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use the " - "commitQuorum option for createIndexes") + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes") self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, @@ -1929,36 +1952,6 @@ def gen_indexes(): session=session) return names - def __create_index(self, keys, index_options, session, **kwargs): - """Internal create index helper. - - :Parameters: - - `keys`: a list of tuples [(key, type), (key, type), ...] - - `index_options`: a dict of index options. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - """ - index_doc = helpers._index_document(keys) - index = {"key": index_doc} - collation = validate_collation_or_none( - index_options.pop('collation', None)) - index.update(index_options) - - with self._socket_for_writes(session) as sock_info: - if collation is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use collations.') - else: - index['collation'] = collation - cmd = SON([('createIndexes', self.name), ('indexes', [index])]) - cmd.update(kwargs) - self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - write_concern=self._write_concern_for(session), - session=session) - def create_index(self, keys, session=None, **kwargs): """Creates an index on this collection. @@ -2053,13 +2046,11 @@ def create_index(self, keys, session=None, **kwargs): .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core """ - keys = helpers._index_list(keys) - name = kwargs.setdefault("name", helpers._gen_index_name(keys)) cmd_options = {} if "maxTimeMS" in kwargs: cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") - self.__create_index(keys, kwargs, session, **cmd_options) - return name + index = IndexModel(keys, **kwargs) + return self.__create_indexes([index], session, **cmd_options)[0] def ensure_index(self, key_or_list, cache_for=300, **kwargs): """**DEPRECATED** - Ensures that an index exists on this collection. @@ -2080,8 +2071,8 @@ def ensure_index(self, key_or_list, cache_for=300, **kwargs): if "bucket_size" in kwargs: kwargs["bucketSize"] = kwargs.pop("bucket_size") - keys = helpers._index_list(key_or_list) - name = kwargs.setdefault("name", helpers._gen_index_name(keys)) + index = IndexModel(key_or_list, **kwargs) + name = index.document["name"] # Note that there is a race condition here. One thread could # check if the index is cached and be preempted before creating @@ -2091,7 +2082,7 @@ def ensure_index(self, key_or_list, cache_for=300, **kwargs): # other than wasted round trips. if not self.__database.client._cached(self.__database.name, self.__name, name): - self.__create_index(keys, kwargs, session=None) + self.__create_indexes([index], session=None) self.__database.client._cache_index(self.__database.name, self.__name, name, cache_for) return name diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 309470a284..366d0eca92 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -96,8 +96,7 @@ def _index_document(index_list): raise TypeError("first item in each key pair must be a string") if not isinstance(value, (string_type, int, abc.Mapping)): raise TypeError("second item in each key pair must be 1, -1, " - "'2d', 'geoHaystack', or another valid MongoDB " - "index specifier.") + "'2d', or another valid MongoDB index specifier.") index[key] = value return index diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 9a0b162c2f..9e63e59a82 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -29,7 +29,7 @@ from bson.objectid import ObjectId from bson.py3compat import string_type from bson.son import SON -from pymongo import ASCENDING, DESCENDING +from pymongo import ASCENDING, DESCENDING, GEOHAYSTACK from pymongo.database import Database from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, @@ -43,6 +43,7 @@ WriteConcernError, WTimeoutError) from pymongo.message import _CursorAddress +from pymongo.operations import IndexModel from pymongo.son_manipulator import (AutoReference, NamespaceInjector, ObjectIdShuffler, @@ -108,6 +109,15 @@ def test_ensure_index_deprecation(self): def test_reindex_deprecation(self): self.assertRaises(DeprecationWarning, lambda: self.db.test.reindex()) + def test_geoHaystack_deprecation(self): + self.addCleanup(self.db.test.drop) + keys = [("pos", GEOHAYSTACK), ("type", ASCENDING)] + self.assertRaises( + DeprecationWarning, self.db.test.create_index, keys, bucketSize=1) + indexes = [IndexModel(keys, bucketSize=1)] + self.assertRaises( + DeprecationWarning, self.db.test.create_indexes, indexes) + class TestLegacy(IntegrationTest): From 5d8e814e58dd0868cea96ef904e8e4f2753caf35 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 8 May 2020 14:24:20 -0700 Subject: [PATCH 0128/2111] PYTHON-2237 Use sort when verifying contents of outcome collections --- test/utils_spec_runner.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index e75d928fc2..67f028e89f 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -599,12 +599,11 @@ def run_scenario(self, scenario_def, test): outcome_coll_name, read_preference=ReadPreference.PRIMARY, read_concern=ReadConcern('local')) + actual_data = list(outcome_coll.find(sort=[('_id', 1)])) # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. - self.assertEqual( - wrap_types(expected_c['data']), list(outcome_coll.find())) - + self.assertEqual(wrap_types(expected_c['data']), actual_data) def expect_any_error(op): if isinstance(op, dict): From 9f8468f30957dcaaf8feb2a356108a0d97e0974d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 31 Jan 2020 12:42:43 -0800 Subject: [PATCH 0129/2111] PYTHON-2037 Test operations omit default write concern --- .../operation/default-write-concern-2.6.json | 544 ++++++++++++++++++ .../operation/default-write-concern-3.2.json | 125 ++++ .../operation/default-write-concern-3.4.json | 216 +++++++ .../operation/default-write-concern-4.2.json | 87 +++ test/test_read_write_concern_spec.py | 31 +- test/utils_spec_runner.py | 2 + 6 files changed, 1003 insertions(+), 2 deletions(-) create mode 100644 test/read_write_concern/operation/default-write-concern-2.6.json create mode 100644 test/read_write_concern/operation/default-write-concern-3.2.json create mode 100644 test/read_write_concern/operation/default-write-concern-3.4.json create mode 100644 test/read_write_concern/operation/default-write-concern-4.2.json diff --git a/test/read_write_concern/operation/default-write-concern-2.6.json b/test/read_write_concern/operation/default-write-concern-2.6.json new file mode 100644 index 0000000000..c623298cd7 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-2.6.json @@ -0,0 +1,544 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "2.6" + } + ], + "tests": [ + { + "description": "DeleteOne omits default write concern", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": {} + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "DeleteMany omits default write concern", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": {} + }, + "result": { + "deletedCount": 2 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "BulkWrite with all models omits default write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "ordered": true, + "requests": [ + { + "name": "deleteMany", + "arguments": { + "filter": {} + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 3 + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 3 + } + } + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 1 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 2 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 2 + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 3 + } + }, + "multi": true + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "InsertOne and InsertMany omit default write concern", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "insertMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "insert": "default_write_concern_coll", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "UpdateOne, UpdateMany, and ReplaceOne omit default write concern", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 3 + } + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 3 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "multi": true + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "update": "default_write_concern_coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "x": 3 + } + } + ], + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-3.2.json b/test/read_write_concern/operation/default-write-concern-3.2.json new file mode 100644 index 0000000000..04dd231f04 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-3.2.json @@ -0,0 +1,125 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "3.2" + } + ], + "tests": [ + { + "description": "findAndModify operations omit default write concern", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 2 + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "filter": { + "_id": 2 + } + } + } + ], + "outcome": { + "collection": { + "name": "default_write_concern_coll", + "data": [ + { + "_id": 1, + "x": 1 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + }, + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 2 + }, + "update": { + "x": 2 + }, + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default_write_concern_coll", + "query": { + "_id": 2 + }, + "remove": true, + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-3.4.json b/test/read_write_concern/operation/default-write-concern-3.4.json new file mode 100644 index 0000000000..6519f6f089 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-3.4.json @@ -0,0 +1,216 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "3.4" + } + ], + "tests": [ + { + "description": "Aggregate with $out omits default write concern", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ] + } + } + ], + "outcome": { + "collection": { + "name": "other_collection_name", + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + }, + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "default_write_concern_coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "RunCommand with a write command omits default write concern (runCommand should never inherit write concern)", + "operations": [ + { + "object": "database", + "databaseOptions": { + "writeConcern": {} + }, + "name": "runCommand", + "command_name": "delete", + "arguments": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ] + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "delete": "default_write_concern_coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": null + } + } + } + ] + }, + { + "description": "CreateIndex and dropIndex omits default write concern", + "operations": [ + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "createIndex", + "arguments": { + "keys": { + "x": 1 + } + } + }, + { + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "name": "dropIndex", + "arguments": { + "name": "x_1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "createIndexes": "default_write_concern_coll", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "writeConcern": null + } + } + }, + { + "command_started_event": { + "command": { + "dropIndexes": "default_write_concern_coll", + "index": "x_1", + "writeConcern": null + } + } + } + ] + }, + { + "description": "MapReduce omits default write concern", + "operations": [ + { + "name": "mapReduce", + "object": "collection", + "collectionOptions": { + "writeConcern": {} + }, + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "mapReduce": "default_write_concern_coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + }, + "writeConcern": null + } + } + } + ] + } + ] +} diff --git a/test/read_write_concern/operation/default-write-concern-4.2.json b/test/read_write_concern/operation/default-write-concern-4.2.json new file mode 100644 index 0000000000..fef192d1a3 --- /dev/null +++ b/test/read_write_concern/operation/default-write-concern-4.2.json @@ -0,0 +1,87 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "collection_name": "default_write_concern_coll", + "database_name": "default_write_concern_db", + "runOn": [ + { + "minServerVersion": "4.2" + } + ], + "tests": [ + { + "description": "Aggregate with $merge omits default write concern", + "operations": [ + { + "object": "collection", + "databaseOptions": { + "writeConcern": {} + }, + "collectionOptions": { + "writeConcern": {} + }, + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "default_write_concern_coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } + } + ], + "writeConcern": null + } + } + } + ], + "outcome": { + "collection": { + "name": "other_collection_name", + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index e4e4430ad1..d1d945e6e6 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -34,7 +34,9 @@ from test.utils import (EventListener, disable_replication, enable_replication, - rs_or_single_client) + rs_or_single_client, + TestCreator) +from test.utils_spec_runner import SpecRunner _TEST_PATH = os.path.join( @@ -252,7 +254,10 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): dirname = os.path.split(dirpath)[-1] - if dirname == 'connection-string': + if dirname == 'operation': + # This directory is tested by TestOperations. + continue + elif dirname == 'connection-string': create_test = create_connection_string_test else: create_test = create_document_test @@ -276,5 +281,27 @@ def create_tests(): create_tests() +class TestOperation(SpecRunner): + # Location of JSON test specifications. + TEST_PATH = os.path.join(_TEST_PATH, 'operation') + + def get_outcome_coll_name(self, outcome, collection): + """Spec says outcome has an optional 'collection.name'.""" + return outcome['collection'].get('name', collection.name) + + +def create_operation_test(scenario_def, test, name): + @client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = TestCreator( + create_operation_test, TestOperation, TestOperation.TEST_PATH) +test_creator.create_tests() + + if __name__ == '__main__': unittest.main() diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 67f028e89f..c4d525394c 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -336,6 +336,8 @@ def run_operation(self, sessions, collection, operation): arguments['name'] = arguments.pop(arg_name) elif name == 'create_index' and arg_name == 'keys': arguments['keys'] = list(arguments.pop(arg_name).items()) + elif name == 'drop_index' and arg_name == 'name': + arguments['index_or_name'] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name) From a4f38d7d6ec2b96a9a7d276c7faf9247e1816020 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 8 May 2020 14:48:59 -0700 Subject: [PATCH 0130/2111] PYTHON-2168 Ensure that the WriteConcernError "errInfo" object is propagated --- test/test_read_write_concern_spec.py | 45 ++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index d1d945e6e6..5e890188f3 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -30,7 +30,9 @@ from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import client_context, unittest +from test import (client_context, + IntegrationTest, + unittest) from test.utils import (EventListener, disable_replication, enable_replication, @@ -43,9 +45,8 @@ os.path.dirname(os.path.realpath(__file__)), 'read_write_concern') -class TestReadWriteConcernSpec(unittest.TestCase): +class TestReadWriteConcernSpec(IntegrationTest): - @client_context.require_connection def test_omit_default_read_write_concern(self): listener = EventListener() # Client with default readConcern and writeConcern @@ -171,6 +172,44 @@ def test_raise_wtimeout(self): self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) + @client_context.require_failCommand_fail_point + def test_error_includes_errInfo(self): + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + "errInfo": { + "writeConcern": { + "w": 2, + "wtimeout": 0, + "provenance": "clientSupplied" + } + } + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": expected_wce + }, + } + with self.fail_point(cause_wce): + # Write concern error on insert includes errInfo. + with self.assertRaises(WriteConcernError) as ctx: + self.db.test.insert_one({}) + self.assertEqual(ctx.exception.details, expected_wce) + + # Test bulk_write as well. + with self.assertRaises(BulkWriteError) as ctx: + self.db.test.bulk_write([InsertOne({})]) + expected_details = { + 'writeErrors': [], + 'writeConcernErrors': [expected_wce], + 'nInserted': 1, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, + 'nRemoved': 0, 'upserted': []} + self.assertEqual(ctx.exception.details, expected_details) + def normalize_write_concern(concern): result = {} From fb7533e88810a92ff56821ff4a4923831577e96e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 12 May 2020 11:49:41 -0700 Subject: [PATCH 0131/2111] PYTHON-2099 Make ExceededTimeLimit a retryable writes error Reinstate DuplicateKey code assertions. --- pymongo/helpers.py | 1 + .../insertOne-serverErrors.json | 48 +++++++++++++++++++ .../callback-retry.json | 3 +- test/transactions/abort.json | 3 +- test/transactions/error-labels.json | 3 +- test/utils_spec_runner.py | 10 +++- 6 files changed, 63 insertions(+), 5 deletions(-) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 366d0eca92..51215b4c40 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -50,6 +50,7 @@ 6, # HostUnreachable 89, # NetworkTimeout 9001, # SocketException + 262, # ExceededTimeLimit ]) _UUNDER = u"_" diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/insertOne-serverErrors.json index 3c3c5b1dc3..703ac1e155 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/insertOne-serverErrors.json @@ -575,6 +575,54 @@ } } }, + { + "description": "InsertOne succeeds after ExceededTimeLimit", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 262, + "closeConnection": false + } + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "result": { + "insertedId": 3 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, { "description": "InsertOne fails after Interrupted", "failPoint": { diff --git a/test/transactions-convenient-api/callback-retry.json b/test/transactions-convenient-api/callback-retry.json index ed36434452..a0391c1b5d 100644 --- a/test/transactions-convenient-api/callback-retry.json +++ b/test/transactions-convenient-api/callback-retry.json @@ -235,7 +235,8 @@ "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" - ] + ], + "errorContains": "E11000" } } ], diff --git a/test/transactions/abort.json b/test/transactions/abort.json index 821a15afbe..3729a98298 100644 --- a/test/transactions/abort.json +++ b/test/transactions/abort.json @@ -458,7 +458,8 @@ "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" - ] + ], + "errorContains": "E11000" } }, { diff --git a/test/transactions/error-labels.json b/test/transactions/error-labels.json index 3e2451ade8..8662b6d76d 100644 --- a/test/transactions/error-labels.json +++ b/test/transactions/error-labels.json @@ -42,7 +42,8 @@ "errorLabelsOmit": [ "TransientTransactionError", "UnknownTransactionCommitResult" - ] + ], + "errorContains": "E11000" } }, { diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index c4d525394c..68faa8c346 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -32,7 +32,9 @@ operations) from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor -from pymongo.errors import (OperationFailure, PyMongoError) +from pymongo.errors import (BulkWriteError, + OperationFailure, + PyMongoError) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import _WriteResult, BulkWriteResult @@ -375,8 +377,12 @@ def run_operations(self, sessions, collection, ops, self.run_operation(sessions, collection, op.copy()) if expect_error_message(expected_result): + if isinstance(context.exception, BulkWriteError): + errmsg = str(context.exception.details).lower() + else: + errmsg = str(context.exception).lower() self.assertIn(expected_result['errorContains'].lower(), - str(context.exception).lower()) + errmsg) if expect_error_code(expected_result): self.assertEqual(expected_result['errorCodeName'], context.exception.details.get('codeName')) From 33c93223b1f01ce5d0065a16ad3d38b98e39b8cf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 7 May 2020 20:15:06 -0700 Subject: [PATCH 0132/2111] PYTHON-1651 Publish CommandFailedEvent when bulk write fails with a network error --- pymongo/message.py | 25 ++++++++----- test/__init__.py | 21 +++++------ test/test_monitoring.py | 77 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 103 insertions(+), 20 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index f06e142fe7..c4b76fc1ee 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -117,7 +117,7 @@ def _convert_exception(exception): def _convert_write_result(operation, command, result): - """Convert a legacy write result to write commmand format.""" + """Convert a legacy write result to write command format.""" # Based on _merge_legacy from bulk.py affected = result.get("n", 0) @@ -971,14 +971,17 @@ def legacy_write(self, request_id, msg, max_doc_size, acknowledged, docs): # Comply with APM spec. reply = {'ok': 1} self._succeed(request_id, reply, duration) - except OperationFailure as exc: + except Exception as exc: if self.publish: duration = (datetime.datetime.now() - start) + duration - self._fail( - request_id, - _convert_write_result( - self.name, cmd, exc.details), - duration) + if isinstance(exc, OperationFailure): + failure = _convert_write_result( + self.name, cmd, exc.details) + elif isinstance(exc, NotMasterError): + failure = exc.details + else: + failure = _convert_exception(exc) + self._fail(request_id, failure, duration) raise finally: self.start_time = datetime.datetime.now() @@ -996,10 +999,14 @@ def write_command(self, request_id, msg, docs): if self.publish: duration = (datetime.datetime.now() - start) + duration self._succeed(request_id, reply, duration) - except OperationFailure as exc: + except Exception as exc: if self.publish: duration = (datetime.datetime.now() - start) + duration - self._fail(request_id, exc.details, duration) + if isinstance(exc, (NotMasterError, OperationFailure)): + failure = exc.details + else: + failure = _convert_exception(exc) + self._fail(request_id, failure, duration) raise finally: self.start_time = datetime.datetime.now() diff --git a/test/__init__.py b/test/__init__.py index 54295ff633..a82e62edf3 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -734,6 +734,17 @@ def assertEqualCommand(self, expected, actual, msg=None): def assertEqualReply(self, expected, actual, msg=None): self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on.update(command_args) + client_context.client.admin.command(cmd_on) + try: + yield + finally: + client_context.client.admin.command( + 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" @@ -748,16 +759,6 @@ def setUpClass(cls): else: cls.credentials = {} - @contextmanager - def fail_point(self, command_args): - cmd_on = SON([('configureFailPoint', 'failCommand')]) - cmd_on.update(command_args) - self.client.admin.command(cmd_on) - try: - yield - finally: - self.client.admin.command( - 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') # Use assertRaisesRegex if available, otherwise use Python 2.7's # deprecated assertRaisesRegexp, with a 'p'. diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 44d924f798..52f9d121c3 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -25,7 +25,9 @@ from bson.son import SON from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne from pymongo.command_cursor import CommandCursor -from pymongo.errors import NotMasterError, OperationFailure +from pymongo.errors import (AutoReconnect, + NotMasterError, + OperationFailure) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern from test import (client_context, @@ -34,6 +36,7 @@ sanitize_cmd, unittest) from test.utils import (EventListener, + get_pool, rs_or_single_client, single_client, wait_until) @@ -1172,6 +1175,78 @@ def test_bulk_write(self): ('limit', 1)])])]) self.assertEqualCommand(expected, started[2].command) + @client_context.require_failCommand_fail_point + def test_bulk_write_command_network_error(self): + coll = self.client.pymongo_test.test + self.listener.results.clear() + + insert_network_error = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['insert'], + 'closeConnection': True, + }, + } + with self.fail_point(insert_network_error): + with self.assertRaises(AutoReconnect): + coll.bulk_write([InsertOne({'_id': 1})]) + failed = self.listener.results['failed'] + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, 'insert') + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure['errtype'], 'AutoReconnect') + self.assertTrue(event.failure['errmsg']) + + @client_context.require_failCommand_fail_point + def test_bulk_write_command_error(self): + coll = self.client.pymongo_test.test + self.listener.results.clear() + + insert_command_error = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['insert'], + 'closeConnection': False, + 'errorCode': 10107, # NotMaster + }, + } + with self.fail_point(insert_command_error): + with self.assertRaises(NotMasterError): + coll.bulk_write([InsertOne({'_id': 1})]) + failed = self.listener.results['failed'] + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, 'insert') + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure['code'], 10107) + self.assertTrue(event.failure['errmsg']) + + @client_context.require_version_max(3, 4, 99) + def test_bulk_write_legacy_network_error(self): + self.listener.results.clear() + + # Make the delete operation run on a closed connection. + self.client.admin.command('ping') + pool = get_pool(self.client) + sock_info = pool.sockets[0] + sock_info.sock.close() + + # Test legacy unacknowledged write network error. + coll = self.client.pymongo_test.get_collection( + 'test', write_concern=WriteConcern(w=0)) + with self.assertRaises(AutoReconnect): + coll.bulk_write([InsertOne({'_id': 1})], ordered=False) + failed = self.listener.results['failed'] + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, 'insert') + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure['errtype'], 'AutoReconnect') + self.assertTrue(event.failure['errmsg']) + def test_write_errors(self): coll = self.client.pymongo_test.test coll.drop() From 2c631faa6cb0ff925be380523ac5b54d9713efd2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 8 May 2020 15:17:11 -0700 Subject: [PATCH 0133/2111] PYTHON-2239 Avoid 30 second stalls in TestMongoClientFailover --- test/pymongo_mocks.py | 4 ++-- test/test_client.py | 22 ++++++++++++++++++---- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 7ed2e8a303..bd3e1ae22b 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -51,7 +51,7 @@ def get_socket(self, all_credentials, checkout=False): + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port - with Pool.get_socket(self, all_credentials) as sock_info: + with Pool.get_socket(self, all_credentials, checkout) as sock_info: sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port yield sock_info @@ -74,7 +74,7 @@ def __init__( pool, topology_settings) - def _check_once(self, metadata=None, cluster_time=None): + def _check_once(self): address = self._server_description.address response, rtt = self.client.mock_is_master('%s:%d' % address) return ServerDescription(address, IsMaster(response), rtt) diff --git a/test/test_client.py b/test/test_client.py index 0540786c54..ef1b19485f 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -48,6 +48,7 @@ InvalidURI, NetworkTimeout, OperationFailure, + ServerSelectionTimeoutError, WriteConcernError) from pymongo.monitoring import (ServerHeartbeatListener, ServerHeartbeatStartedEvent) @@ -1731,6 +1732,7 @@ def test_discover_primary(self): mongoses=[], host='b:2', # Pass a secondary. replicaSet='rs') + self.addCleanup(c.close) wait_until(lambda: len(c.nodes) == 3, 'connect') self.assertEqual(c.address, ('a', 1)) @@ -1760,7 +1762,10 @@ def test_reconnect(self): mongoses=[], host='b:2', # Pass a secondary. replicaSet='rs', - retryReads=False) + retryReads=False, + serverSelectionTimeoutMS=100, + ) + self.addCleanup(c.close) wait_until(lambda: len(c.nodes) == 3, 'connect') @@ -1769,8 +1774,13 @@ def test_reconnect(self): c.kill_host('b:2') c.kill_host('c:3') - # MongoClient discovers it's alone. + # MongoClient discovers it's alone. The first attempt raises either + # ServerSelectionTimeoutError or AutoReconnect (from + # MockPool.get_socket). self.assertRaises(AutoReconnect, c.db.collection.find_one) + # The second attempt always raises ServerSelectionTimeoutError. + self.assertRaises(ServerSelectionTimeoutError, + c.db.collection.find_one) # But it can reconnect. c.revive_host('a:1') @@ -1789,7 +1799,9 @@ def _test_network_error(self, operation_callback): host='a:1', replicaSet='rs', connect=False, - retryReads=False) + retryReads=False, + serverSelectionTimeoutMS=100) + self.addCleanup(c.close) # Set host-specific information so we can test whether it is reset. c.set_wire_version_range('a:1', 2, 6) @@ -1799,7 +1811,9 @@ def _test_network_error(self, operation_callback): c.kill_host('a:1') - # MongoClient is disconnected from the primary. + # MongoClient is disconnected from the primary. This raises either + # ServerSelectionTimeoutError or AutoReconnect (from + # MockPool.get_socket). self.assertRaises(AutoReconnect, operation_callback, c) # The primary's description is reset. From 48df9b088f37ac2165f91caab824d60edd5427c9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 May 2020 17:36:35 -0700 Subject: [PATCH 0134/2111] PYTHON-2109 Avoid 30 second stalls in TestSocketError Reduce run time of test_timeout_does_not_mark_member_down. --- test/test_replica_set_client.py | 6 +++--- test/test_replica_set_reconfig.py | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py index 7c8d216b8d..e0456186ae 100644 --- a/test/test_replica_set_client.py +++ b/test/test_replica_set_client.py @@ -166,7 +166,7 @@ def test_timeout_does_not_mark_member_down(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): - c = rs_client(socketTimeoutMS=3000, w=self.w) + c = rs_client(socketTimeoutMS=1000, w=self.w) collection = c.pymongo_test.test collection.insert_one({}) @@ -174,7 +174,7 @@ def test_timeout_does_not_mark_member_down(self): self.assertRaises( NetworkTimeout, collection.find_one, - {'$where': delay(5)}) + {'$where': delay(1.5)}) self.assertTrue(c.primary) collection.find_one() # No error. @@ -186,7 +186,7 @@ def test_timeout_does_not_mark_member_down(self): self.assertRaises( NetworkTimeout, coll.find_one, - {'$where': delay(5)}) + {'$where': delay(1.5)}) self.assertTrue(c.secondaries) diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 347bbab491..d9d39e3010 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -121,7 +121,8 @@ def test_socket_error_marks_member_down(self): members=['a:1', 'b:2'], mongoses=[], host='a:1', - replicaSet='rs') + replicaSet='rs', + serverSelectionTimeoutMS=100) self.addCleanup(c.close) wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') From 45a7963aac012665ebd44e9dfa891e3b89febb41 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 8 May 2020 18:12:20 -0700 Subject: [PATCH 0135/2111] PYTHON-2082 Retryable writes use the RetryableWriteError label Use retryable write logic for transaction commit/abort. Do not assign the TransientTransactionError label to errors outside a transaction. --- pymongo/client_session.py | 61 ++--- pymongo/errors.py | 13 +- pymongo/mongo_client.py | 132 ++++++----- .../bulkWrite-errorLabels.json | 182 ++++++++++++++ .../bulkWrite-serverErrors.json | 85 ++++++- .../deleteOne-errorLabels.json | 106 +++++++++ .../deleteOne-serverErrors.json | 53 ++++- .../findOneAndDelete-errorLabels.json | 117 +++++++++ .../findOneAndDelete-serverErrors.json | 58 ++++- .../findOneAndReplace-errorLabels.json | 121 ++++++++++ .../findOneAndReplace-serverErrors.json | 58 ++++- .../findOneAndUpdate-errorLabels.json | 123 ++++++++++ .../findOneAndUpdate-serverErrors.json | 59 ++++- .../insertMany-errorLabels.json | 129 ++++++++++ .../insertMany-serverErrors.json | 59 ++++- .../insertOne-errorLabels.json | 90 +++++++ .../insertOne-serverErrors.json | 173 +++++++++++++- .../replaceOne-errorLabels.json | 120 ++++++++++ .../replaceOne-serverErrors.json | 57 ++++- .../updateOne-errorLabels.json | 122 ++++++++++ .../updateOne-serverErrors.json | 58 ++++- test/test_sdam_monitoring_spec.py | 6 +- .../commit-retry.json | 3 + test/transactions/bulk.json | 16 +- test/transactions/causal-consistency.json | 21 +- test/transactions/error-labels.json | 147 +++++++++++- test/transactions/mongos-recovery-token.json | 5 +- test/transactions/pin-mongos.json | 4 +- .../retryable-abort-errorLabels.json | 204 ++++++++++++++++ test/transactions/retryable-abort.json | 45 ++++ .../retryable-commit-errorLabels.json | 223 ++++++++++++++++++ test/transactions/retryable-commit.json | 61 ++++- test/transactions/update.json | 16 +- test/transactions/write-concern.json | 1 - 34 files changed, 2536 insertions(+), 192 deletions(-) create mode 100644 test/retryable_writes/bulkWrite-errorLabels.json create mode 100644 test/retryable_writes/deleteOne-errorLabels.json create mode 100644 test/retryable_writes/findOneAndDelete-errorLabels.json create mode 100644 test/retryable_writes/findOneAndReplace-errorLabels.json create mode 100644 test/retryable_writes/findOneAndUpdate-errorLabels.json create mode 100644 test/retryable_writes/insertMany-errorLabels.json create mode 100644 test/retryable_writes/insertOne-errorLabels.json create mode 100644 test/retryable_writes/replaceOne-errorLabels.json create mode 100644 test/retryable_writes/updateOne-errorLabels.json create mode 100644 test/transactions/retryable-abort-errorLabels.json create mode 100644 test/transactions/retryable-commit-errorLabels.json diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 63021a3c0d..7c5db5a42d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -98,13 +98,11 @@ """ import collections -import os -import sys import uuid from bson.binary import Binary from bson.int64 import Int64 -from bson.py3compat import abc, integer_types, reraise_instance +from bson.py3compat import abc, integer_types from bson.son import SON from bson.timestamp import Timestamp @@ -114,7 +112,6 @@ InvalidOperation, OperationFailure, PyMongoError, - ServerSelectionTimeoutError, WTimeoutError) from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern @@ -295,6 +292,7 @@ def __init__(self, opts): self.sharded = False self.pinned_address = None self.recovery_token = None + self.attempt = 0 def active(self): return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) @@ -304,12 +302,13 @@ def reset(self): self.sharded = False self.pinned_address = None self.recovery_token = None + self.attempt = 0 def _reraise_with_unknown_commit(exc): """Re-raise an exception with the UnknownTransactionCommitResult label.""" exc._add_error_label("UnknownTransactionCommitResult") - reraise_instance(exc, trace=sys.exc_info()[2]) + raise def _max_time_expired_error(exc): @@ -579,7 +578,6 @@ def commit_transaction(self): .. versionadded:: 3.7 """ self._check_ended() - retry = False state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation("No transaction started") @@ -594,10 +592,9 @@ def commit_transaction(self): # We're explicitly retrying the commit, move the state back to # "in progress" so that in_transaction returns true. self._transaction.state = _TxnState.IN_PROGRESS - retry = True try: - self._finish_transaction_with_retry("commitTransaction", retry) + self._finish_transaction_with_retry("commitTransaction") except ConnectionFailure as exc: # We do not know if the commit was successfully applied on the # server or if it satisfied the provided write concern, set the @@ -640,44 +637,25 @@ def abort_transaction(self): "Cannot call abortTransaction after calling commitTransaction") try: - self._finish_transaction_with_retry("abortTransaction", False) + self._finish_transaction_with_retry("abortTransaction") except (OperationFailure, ConnectionFailure): # The transactions spec says to ignore abortTransaction errors. pass finally: self._transaction.state = _TxnState.ABORTED - def _finish_transaction_with_retry(self, command_name, explict_retry): + def _finish_transaction_with_retry(self, command_name): """Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". - - `explict_retry`: True when this is an explict commit retry attempt, - ie the application called session.commit_transaction() twice. """ - # This can be refactored with MongoClient._retry_with_session. - try: - return self._finish_transaction(command_name, explict_retry) - except ServerSelectionTimeoutError: - raise - except ConnectionFailure as exc: - try: - return self._finish_transaction(command_name, True) - except ServerSelectionTimeoutError: - # Raise the original error so the application can infer that - # an attempt was made. - raise exc - except OperationFailure as exc: - if exc.code not in _RETRYABLE_ERROR_CODES: - raise - try: - return self._finish_transaction(command_name, True) - except ServerSelectionTimeoutError: - # Raise the original error so the application can infer that - # an attempt was made. - raise exc + def func(session, sock_info, retryable): + return self._finish_transaction(sock_info, command_name) + return self._client._retry_internal(True, func, self, None) - def _finish_transaction(self, command_name, retrying): + def _finish_transaction(self, sock_info, command_name): + self._transaction.attempt += 1 opts = self._transaction.opts wc = opts.write_concern cmd = SON([(command_name, 1)]) @@ -688,7 +666,7 @@ def _finish_transaction(self, command_name, retrying): # Transaction spec says that after the initial commit attempt, # subsequent commitTransaction commands should be upgraded to use # w:"majority" and set a default value of 10 seconds for wtimeout. - if retrying: + if self._transaction.attempt > 1: wc_doc = wc.document wc_doc["w"] = "majority" wc_doc.setdefault("wtimeout", 10000) @@ -697,13 +675,12 @@ def _finish_transaction(self, command_name, retrying): if self._transaction.recovery_token: cmd['recoveryToken'] = self._transaction.recovery_token - with self._client._socket_for_writes(self) as sock_info: - return self._client.admin._command( - sock_info, - cmd, - session=self, - write_concern=wc, - parse_write_concern_error=True) + return self._client.admin._command( + sock_info, + cmd, + session=self, + write_concern=wc, + parse_write_concern_error=True) def _advance_cluster_time(self, cluster_time): """Internal cluster time helper.""" diff --git a/pymongo/errors.py b/pymongo/errors.py index fb4b45628d..c11053f475 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -48,7 +48,7 @@ def _add_error_label(self, label): def _remove_error_label(self, label): """Remove the given label from this error.""" - self._error_labels.remove(label) + self._error_labels.discard(label) if sys.version_info[0] == 2: def __str__(self): @@ -68,12 +68,6 @@ class ProtocolError(PyMongoError): class ConnectionFailure(PyMongoError): """Raised when a connection to the database cannot be made or is lost.""" - def __init__(self, message='', error_labels=None): - if error_labels is None: - # Connection errors are transient errors by default. - error_labels = ("TransientTransactionError",) - super(ConnectionFailure, self).__init__( - message, error_labels=error_labels) class AutoReconnect(ConnectionFailure): @@ -89,7 +83,10 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ def __init__(self, message='', errors=None): - super(AutoReconnect, self).__init__(message) + error_labels = None + if errors is not None and isinstance(errors, dict): + error_labels = errors.get('errorLabels') + super(AutoReconnect, self).__init__(message, error_labels) self.errors = self.details = errors or [] diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7b84e2e504..0c93604a32 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -59,6 +59,7 @@ ConfigurationError, ConnectionFailure, InvalidOperation, + NotMasterError, OperationFailure, PyMongoError, ServerSelectionTimeoutError) @@ -1265,7 +1266,9 @@ def _select_server(self, server_selector, session, address=None): session._pin_mongos(server) return server except PyMongoError as exc: - if session and exc.has_error_label("TransientTransactionError"): + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") session._unpin_mongos() raise @@ -1361,6 +1364,11 @@ def _retry_with_session(self, retryable, func, session, bulk): """ retryable = (retryable and self.retry_writes and session and not session.in_transaction) + return self._retry_internal(retryable, func, session, bulk) + + def _retry_internal(self, retryable, func, session, bulk): + """Internal retryable write helper.""" + max_wire_version = 0 last_error = None retrying = False @@ -1369,7 +1377,7 @@ def is_retrying(): # Increment the transaction id up front to ensure any retry attempt # will use the proper txnNumber, even if server or socket selection # fails before the command can be sent. - if retryable: + if retryable and session and not session.in_transaction: session._start_retryable_write() if bulk: bulk.started_retryable_write = True @@ -1381,6 +1389,7 @@ def is_retrying(): session is not None and server.description.retryable_writes_supported) with self._get_socket(server, session) as sock_info: + max_wire_version = sock_info.max_wire_version if retryable and not supports_session: if is_retrying(): # A retry is not possible because this server does @@ -1398,40 +1407,12 @@ def is_retrying(): # be a persistent outage. Attempting to retry in this case will # most likely be a waste of time. raise - except ConnectionFailure as exc: - if not retryable or is_retrying(): + except Exception as exc: + if not retryable: raise - if bulk: - bulk.retrying = True - else: - retrying = True - last_error = exc - except BulkWriteError as exc: - if not retryable or is_retrying(): - raise - # Check the last writeConcernError to determine if this - # BulkWriteError is retryable. - wces = exc.details['writeConcernErrors'] - wce = wces[-1] if wces else {} - if wce.get('code', 0) not in helpers._RETRYABLE_ERROR_CODES: - raise - if bulk: - bulk.retrying = True - else: - retrying = True - last_error = exc - except OperationFailure as exc: - # retryWrites on MMAPv1 should raise an actionable error. - if (exc.code == 20 and - str(exc).startswith("Transaction numbers")): - errmsg = ( - "This MongoDB deployment does not support " - "retryable writes. Please add retryWrites=false " - "to your connection string.") - raise OperationFailure(errmsg, exc.code, exc.details) - if not retryable or is_retrying(): - raise - if exc.code not in helpers._RETRYABLE_ERROR_CODES: + # Add the RetryableWriteError label. + if (not _retryable_writes_error(exc, max_wire_version) + or is_retrying()): raise if bulk: bulk.retrying = True @@ -2162,26 +2143,66 @@ def __next__(self): next = __next__ +def _retryable_error_doc(exc): + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, BulkWriteError): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details['writeConcernErrors'] + wce = wces[-1] if wces else None + return wce + if isinstance(exc, (NotMasterError, OperationFailure)): + return exc.details + return None + + +def _retryable_writes_error(exc, max_wire_version): + doc = _retryable_error_doc(exc) + if doc: + code = doc.get('code', 0) + # retryWrites on MMAPv1 should raise an actionable error. + if (code == 20 and + str(exc).startswith("Transaction numbers")): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string.") + raise OperationFailure(errmsg, code, exc.details) + if max_wire_version >= 9: + # MongoDB 4.4+ utilizes RetryableWriteError. + return 'RetryableWriteError' in doc.get('errorLabels', []) + else: + if code in helpers._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + return True + return False + + if isinstance(exc, ConnectionFailure): + exc._add_error_label("RetryableWriteError") + return True + return False + + class _MongoClientErrorHandler(object): - """Error handler for MongoClient.""" - __slots__ = ('_client', '_server_address', '_session', - '_max_wire_version', '_sock_generation') + """Handle errors raised when executing an operation.""" + __slots__ = ('client', 'server_address', 'session', 'max_wire_version', + 'sock_generation') def __init__(self, client, server, session): - self._client = client - self._server_address = server.description.address - self._session = session - self._max_wire_version = common.MIN_WIRE_VERSION + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION # XXX: When get_socket fails, this generation could be out of date: # "Note that when a network error occurs before the handshake # completes then the error's generation number is the generation # of the pool at the time the connection attempt was started." - self._sock_generation = server.pool.generation + self.sock_generation = server.pool.generation def contribute_socket(self, sock_info): """Provide socket information to the error handler.""" - self._max_wire_version = sock_info.max_wire_version - self._sock_generation = sock_info.generation + self.max_wire_version = sock_info.max_wire_version + self.sock_generation = sock_info.generation def __enter__(self): return self @@ -2190,15 +2211,16 @@ def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: return - err_ctx = _ErrorContext( - exc_val, self._max_wire_version, self._sock_generation) - self._client._topology.handle_error(self._server_address, err_ctx) + if self.session: + if issubclass(exc_type, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() - if issubclass(exc_type, PyMongoError): - if self._session and exc_val.has_error_label( - "TransientTransactionError"): - self._session._unpin_mongos() + if issubclass(exc_type, PyMongoError): + if exc_val.has_error_label("TransientTransactionError"): + self.session._unpin_mongos() - if issubclass(exc_type, ConnectionFailure): - if self._session: - self._session._server_session.mark_dirty() + err_ctx = _ErrorContext( + exc_val, self.max_wire_version, self.sock_generation) + self.client._topology.handle_error(self.server_address, err_ctx) diff --git a/test/retryable_writes/bulkWrite-errorLabels.json b/test/retryable_writes/bulkWrite-errorLabels.json new file mode 100644 index 0000000000..94ea3ea989 --- /dev/null +++ b/test/retryable_writes/bulkWrite-errorLabels.json @@ -0,0 +1,182 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "BulkWrite succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "1": 3 + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/bulkWrite-serverErrors.json b/test/retryable_writes/bulkWrite-serverErrors.json index 79c81a583b..d9561d568c 100644 --- a/test/retryable_writes/bulkWrite-serverErrors.json +++ b/test/retryable_writes/bulkWrite-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -117,7 +120,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -186,6 +192,81 @@ ] } } + }, + { + "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } } ] } diff --git a/test/retryable_writes/deleteOne-errorLabels.json b/test/retryable_writes/deleteOne-errorLabels.json new file mode 100644 index 0000000000..bff02e1f94 --- /dev/null +++ b/test/retryable_writes/deleteOne-errorLabels.json @@ -0,0 +1,106 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "DeleteOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "result": { + "deletedCount": 1 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "DeleteOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/deleteOne-serverErrors.json b/test/retryable_writes/deleteOne-serverErrors.json index 9ef2bf2f29..69d225759c 100644 --- a/test/retryable_writes/deleteOne-serverErrors.json +++ b/test/retryable_writes/deleteOne-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "delete" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -73,7 +76,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -98,6 +104,49 @@ ] } } + }, + { + "description": "DeleteOne fails with RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + }, + "operation": { + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/findOneAndDelete-errorLabels.json b/test/retryable_writes/findOneAndDelete-errorLabels.json new file mode 100644 index 0000000000..efa62dba2e --- /dev/null +++ b/test/retryable_writes/findOneAndDelete-errorLabels.json @@ -0,0 +1,117 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndDelete succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndDelete fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/findOneAndDelete-serverErrors.json b/test/retryable_writes/findOneAndDelete-serverErrors.json index d72d1a05ba..0785e5d035 100644 --- a/test/retryable_writes/findOneAndDelete-serverErrors.json +++ b/test/retryable_writes/findOneAndDelete-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -79,7 +82,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -110,6 +116,54 @@ ] } } + }, + { + "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/findOneAndReplace-errorLabels.json b/test/retryable_writes/findOneAndReplace-errorLabels.json new file mode 100644 index 0000000000..d9473d139a --- /dev/null +++ b/test/retryable_writes/findOneAndReplace-errorLabels.json @@ -0,0 +1,121 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndReplace succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndReplace fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/findOneAndReplace-serverErrors.json b/test/retryable_writes/findOneAndReplace-serverErrors.json index d5d25e1d78..6ebe057cfd 100644 --- a/test/retryable_writes/findOneAndReplace-serverErrors.json +++ b/test/retryable_writes/findOneAndReplace-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -83,7 +86,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -118,6 +124,54 @@ ] } } + }, + { + "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/findOneAndUpdate-errorLabels.json b/test/retryable_writes/findOneAndUpdate-errorLabels.json new file mode 100644 index 0000000000..1926d7fa5c --- /dev/null +++ b/test/retryable_writes/findOneAndUpdate-errorLabels.json @@ -0,0 +1,123 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "FindOneAndUpdate succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "result": { + "_id": 1, + "x": 11 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "FindOneAndUpdate fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/findOneAndUpdate-serverErrors.json b/test/retryable_writes/findOneAndUpdate-serverErrors.json index b9f57cd825..e6e369c139 100644 --- a/test/retryable_writes/findOneAndUpdate-serverErrors.json +++ b/test/retryable_writes/findOneAndUpdate-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "findAndModify" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -84,7 +87,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -120,6 +126,55 @@ ] } } + }, + { + "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operation": { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/insertMany-errorLabels.json b/test/retryable_writes/insertMany-errorLabels.json new file mode 100644 index 0000000000..c78946e90a --- /dev/null +++ b/test/retryable_writes/insertMany-errorLabels.json @@ -0,0 +1,129 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + } + ], + "tests": [ + { + "description": "InsertMany succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "result": { + "insertedIds": { + "0": 2, + "1": 3 + } + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "InsertMany fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/insertMany-serverErrors.json b/test/retryable_writes/insertMany-serverErrors.json index 773ad9307f..1c6ebafc28 100644 --- a/test/retryable_writes/insertMany-serverErrors.json +++ b/test/retryable_writes/insertMany-serverErrors.json @@ -31,7 +31,10 @@ "failCommands": [ "insert" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -90,7 +93,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -136,6 +142,55 @@ ] } } + }, + { + "description": "InsertMany fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "options": { + "ordered": true + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } } ] } diff --git a/test/retryable_writes/insertOne-errorLabels.json b/test/retryable_writes/insertOne-errorLabels.json new file mode 100644 index 0000000000..9b8d13d524 --- /dev/null +++ b/test/retryable_writes/insertOne-errorLabels.json @@ -0,0 +1,90 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [], + "tests": [ + { + "description": "InsertOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "result": { + "insertedId": 1 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + }, + { + "description": "InsertOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/insertOne-serverErrors.json index 703ac1e155..59f6d9b51a 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/insertOne-serverErrors.json @@ -69,6 +69,53 @@ } } }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "clientOptions": { + "retryWrites": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, { "description": "InsertOne succeeds after NotMaster", "failPoint": { @@ -81,6 +128,9 @@ "insert" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -127,6 +177,9 @@ "insert" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -173,6 +226,9 @@ "insert" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -219,6 +275,9 @@ "insert" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -265,6 +324,9 @@ "insert" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -311,6 +373,9 @@ "insert" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -357,6 +422,9 @@ "insert" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -403,6 +471,9 @@ "insert" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -449,6 +520,9 @@ "insert" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -495,6 +569,9 @@ "insert" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -541,6 +618,9 @@ "insert" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -586,10 +666,11 @@ "failCommands": [ "insert" ], - "writeConcernError": { - "code": 262, - "closeConnection": false - } + "errorCode": 262, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false } }, "operation": { @@ -649,6 +730,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -676,7 +762,10 @@ ], "writeConcernError": { "code": 11600, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -724,7 +813,10 @@ ], "writeConcernError": { "code": 11602, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -772,7 +864,10 @@ ], "writeConcernError": { "code": 189, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -820,7 +915,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -883,6 +981,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -929,6 +1032,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -979,6 +1087,11 @@ }, "outcome": { "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, "collection": { "data": [ { @@ -996,6 +1109,50 @@ ] } } + }, + { + "description": "InsertOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "operation": { + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/replaceOne-errorLabels.json b/test/retryable_writes/replaceOne-errorLabels.json new file mode 100644 index 0000000000..06867e5159 --- /dev/null +++ b/test/retryable_writes/replaceOne-errorLabels.json @@ -0,0 +1,120 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "ReplaceOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "ReplaceOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/replaceOne-serverErrors.json b/test/retryable_writes/replaceOne-serverErrors.json index aac7b2f394..af18bcf1a2 100644 --- a/test/retryable_writes/replaceOne-serverErrors.json +++ b/test/retryable_writes/replaceOne-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -83,7 +86,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -118,6 +124,53 @@ ] } } + }, + { + "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/retryable_writes/updateOne-errorLabels.json b/test/retryable_writes/updateOne-errorLabels.json new file mode 100644 index 0000000000..4a6be3ffba --- /dev/null +++ b/test/retryable_writes/updateOne-errorLabels.json @@ -0,0 +1,122 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "UpdateOne succeeds with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + }, + { + "description": "UpdateOne fails if server does not return RetryableWriteError", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } + } + ] +} diff --git a/test/retryable_writes/updateOne-serverErrors.json b/test/retryable_writes/updateOne-serverErrors.json index 6f6c55dd51..bb442eb68a 100644 --- a/test/retryable_writes/updateOne-serverErrors.json +++ b/test/retryable_writes/updateOne-serverErrors.json @@ -35,7 +35,10 @@ "failCommands": [ "update" ], - "errorCode": 189 + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operation": { @@ -84,7 +87,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -120,6 +126,54 @@ ] } } + }, + { + "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + }, + "operation": { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + "outcome": { + "error": true, + "result": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + }, + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + } } ] } diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 97412f41d6..710bf8732c 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -331,11 +331,13 @@ def test_network_error_publishes_events(self): # changes because topologyVersion is not incremented. @client_context.require_version_max(4, 3) def test_not_master_error_publishes_events(self): - self._test_app_error({'errorCode': 10107, 'closeConnection': False}, + self._test_app_error({'errorCode': 10107, 'closeConnection': False, + 'errorLabels': ['RetryableWriteError']}, NotMasterError) def test_shutdown_error_publishes_events(self): - self._test_app_error({'errorCode': 91, 'closeConnection': False}, + self._test_app_error({'errorCode': 91, 'closeConnection': False, + 'errorLabels': ['RetryableWriteError']}, NotMasterError) diff --git a/test/transactions-convenient-api/commit-retry.json b/test/transactions-convenient-api/commit-retry.json index d4b948ce1a..312116253b 100644 --- a/test/transactions-convenient-api/commit-retry.json +++ b/test/transactions-convenient-api/commit-retry.json @@ -304,6 +304,9 @@ "commitTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, diff --git a/test/transactions/bulk.json b/test/transactions/bulk.json index ea4571c1d5..8a9793b8b3 100644 --- a/test/transactions/bulk.json +++ b/test/transactions/bulk.json @@ -304,9 +304,7 @@ "$set": { "x": 1 } - }, - "multi": false, - "upsert": false + } }, { "q": { @@ -317,7 +315,6 @@ "x": 2 } }, - "multi": false, "upsert": true } ], @@ -379,9 +376,7 @@ }, "u": { "y": 1 - }, - "multi": false, - "upsert": false + } }, { "q": { @@ -389,9 +384,7 @@ }, "u": { "y": 2 - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -454,8 +447,7 @@ "z": 1 } }, - "multi": true, - "upsert": false + "multi": true } ], "ordered": true, diff --git a/test/transactions/causal-consistency.json b/test/transactions/causal-consistency.json index f1ca3d83a8..0e81bf2ff2 100644 --- a/test/transactions/causal-consistency.json +++ b/test/transactions/causal-consistency.json @@ -40,8 +40,7 @@ "$inc": { "count": 1 } - }, - "upsert": false + } }, "result": { "matchedCount": 1, @@ -65,8 +64,7 @@ "$inc": { "count": 1 } - }, - "upsert": false + } }, "result": { "matchedCount": 1, @@ -93,9 +91,7 @@ "$inc": { "count": 1 } - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -123,9 +119,7 @@ "$inc": { "count": 1 } - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -212,8 +206,7 @@ "$inc": { "count": 1 } - }, - "upsert": false + } }, "result": { "matchedCount": 1, @@ -260,9 +253,7 @@ "$inc": { "count": 1 } - }, - "multi": false, - "upsert": false + } } ], "ordered": true, diff --git a/test/transactions/error-labels.json b/test/transactions/error-labels.json index 8662b6d76d..2d3eed3ccc 100644 --- a/test/transactions/error-labels.json +++ b/test/transactions/error-labels.json @@ -134,6 +134,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -223,6 +224,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -312,6 +314,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -408,6 +411,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -461,7 +465,7 @@ } }, { - "description": "add transient label to connection errors", + "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -496,6 +500,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -511,6 +516,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -533,6 +539,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -549,6 +556,7 @@ "TransientTransactionError" ], "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ] } @@ -663,7 +671,7 @@ } }, { - "description": "add unknown commit label to connection errors", + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -699,6 +707,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -801,7 +810,7 @@ } }, { - "description": "add unknown commit label to retryable commit errors", + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -811,7 +820,10 @@ "failCommands": [ "commitTransaction" ], - "errorCode": 11602 + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ] } }, "operations": [ @@ -837,6 +849,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -939,7 +952,7 @@ } }, { - "description": "add unknown commit label to writeConcernError ShutdownInProgress", + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -951,7 +964,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, @@ -985,6 +1001,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -1089,7 +1106,104 @@ } }, { - "description": "add unknown commit label to writeConcernError WriteConcernFailed", + "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0", + "arguments": { + "options": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "errorLabelsContain": [], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1138,6 +1252,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } @@ -1220,7 +1335,7 @@ } }, { - "description": "add unknown commit label to writeConcernError WriteConcernFailed with wtimeout", + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed with wtimeout", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1273,6 +1388,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } @@ -1355,7 +1471,7 @@ } }, { - "description": "omit unknown commit label to writeConcernError UnsatisfiableWriteConcern", + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1401,6 +1517,7 @@ "object": "session0", "result": { "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError", "UnknownTransactionCommitResult" ] @@ -1461,7 +1578,7 @@ } }, { - "description": "omit unknown commit label to writeConcernError UnknownReplWriteConcern", + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1507,6 +1624,7 @@ "object": "session0", "result": { "errorLabelsOmit": [ + "RetryableWriteConcern", "TransientTransactionError", "UnknownTransactionCommitResult" ] @@ -1567,7 +1685,7 @@ } }, { - "description": "do not add unknown commit label to MaxTimeMSExpired inside transactions", + "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1614,6 +1732,7 @@ }, "result": { "errorLabelsOmit": [ + "RetryableWriteError", "UnknownTransactionCommitResult", "TransientTransactionError" ] @@ -1696,7 +1815,7 @@ } }, { - "description": "add unknown commit label to MaxTimeMSExpired", + "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1743,6 +1862,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } @@ -1827,7 +1947,7 @@ } }, { - "description": "add unknown commit label to writeConcernError MaxTimeMSExpired", + "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1877,6 +1997,7 @@ "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ + "RetryableWriteError", "TransientTransactionError" ] } diff --git a/test/transactions/mongos-recovery-token.json b/test/transactions/mongos-recovery-token.json index 50c7349c1e..35ef45a039 100644 --- a/test/transactions/mongos-recovery-token.json +++ b/test/transactions/mongos-recovery-token.json @@ -181,7 +181,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } } diff --git a/test/transactions/pin-mongos.json b/test/transactions/pin-mongos.json index 5eb4fc57d9..8e9d049d04 100644 --- a/test/transactions/pin-mongos.json +++ b/test/transactions/pin-mongos.json @@ -875,7 +875,7 @@ "failCommands": [ "commitTransaction" ], - "errorCode": 50 + "errorCode": 51 } } } @@ -887,7 +887,7 @@ "errorLabelsOmit": [ "TransientTransactionError" ], - "errorCode": 50 + "errorCode": 51 } }, { diff --git a/test/transactions/retryable-abort-errorLabels.json b/test/transactions/retryable-abort-errorLabels.json new file mode 100644 index 0000000000..1110ce2c32 --- /dev/null +++ b/test/transactions/retryable-abort-errorLabels.json @@ -0,0 +1,204 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "abortTransaction only retries once with RetryableWriteError from server", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "abortTransaction does not retry without RetryableWriteError label", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "abortTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "abortTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/transactions/retryable-abort.json b/test/transactions/retryable-abort.json index f6b7b0e49a..5a3aaa7bf8 100644 --- a/test/transactions/retryable-abort.json +++ b/test/transactions/retryable-abort.json @@ -413,6 +413,9 @@ "abortTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -514,6 +517,9 @@ "abortTransaction" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -615,6 +621,9 @@ "abortTransaction" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -716,6 +725,9 @@ "abortTransaction" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -817,6 +829,9 @@ "abortTransaction" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -918,6 +933,9 @@ "abortTransaction" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1019,6 +1037,9 @@ "abortTransaction" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1120,6 +1141,9 @@ "abortTransaction" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1221,6 +1245,9 @@ "abortTransaction" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1322,6 +1349,9 @@ "abortTransaction" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1423,6 +1453,9 @@ "abortTransaction" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1525,6 +1558,9 @@ ], "writeConcernError": { "code": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } @@ -1639,6 +1675,9 @@ ], "writeConcernError": { "code": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } @@ -1753,6 +1792,9 @@ ], "writeConcernError": { "code": 189, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } @@ -1867,6 +1909,9 @@ ], "writeConcernError": { "code": 91, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } diff --git a/test/transactions/retryable-commit-errorLabels.json b/test/transactions/retryable-commit-errorLabels.json new file mode 100644 index 0000000000..e0818f237b --- /dev/null +++ b/test/transactions/retryable-commit-errorLabels.json @@ -0,0 +1,223 @@ +{ + "runOn": [ + { + "minServerVersion": "4.3.1", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "commitTransaction does not retry error without RetryableWriteError label", + "clientOptions": { + "retryWrites": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0", + "result": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + }, + { + "description": "commitTransaction retries once with RetryableWriteError from server", + "clientOptions": { + "retryWrites": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/transactions/retryable-commit.json b/test/transactions/retryable-commit.json index b17438b700..4895c6e0c2 100644 --- a/test/transactions/retryable-commit.json +++ b/test/transactions/retryable-commit.json @@ -57,6 +57,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -207,6 +208,7 @@ "object": "session0", "result": { "errorLabelsContain": [ + "RetryableWriteError", "UnknownTransactionCommitResult" ], "errorLabelsOmit": [ @@ -353,7 +355,9 @@ "result": { "errorCodeName": "Interrupted", "errorLabelsOmit": [ - "TransientTransactionError" + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" ] } } @@ -406,7 +410,7 @@ } }, { - "description": "commitTransaction fails after WriteConcernError Interrupted", + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -417,8 +421,8 @@ "commitTransaction" ], "writeConcernError": { - "code": 11601, - "errmsg": "operation was interrupted" + "code": 100, + "errmsg": "Not enough data-bearing nodes" } } }, @@ -452,7 +456,9 @@ "object": "session0", "result": { "errorLabelsOmit": [ - "TransientTransactionError" + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" ] } } @@ -629,6 +635,9 @@ "commitTransaction" ], "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -737,6 +746,9 @@ "commitTransaction" ], "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -845,6 +857,9 @@ "commitTransaction" ], "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -953,6 +968,9 @@ "commitTransaction" ], "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1061,6 +1079,9 @@ "commitTransaction" ], "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1169,6 +1190,9 @@ "commitTransaction" ], "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1277,6 +1301,9 @@ "commitTransaction" ], "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1385,6 +1412,9 @@ "commitTransaction" ], "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1493,6 +1523,9 @@ "commitTransaction" ], "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1601,6 +1634,9 @@ "commitTransaction" ], "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1709,6 +1745,9 @@ "commitTransaction" ], "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], "closeConnection": false } }, @@ -1818,6 +1857,9 @@ ], "writeConcernError": { "code": 11600, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } @@ -1937,6 +1979,9 @@ ], "writeConcernError": { "code": 11602, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } @@ -2056,6 +2101,9 @@ ], "writeConcernError": { "code": 189, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } @@ -2175,6 +2223,9 @@ ], "writeConcernError": { "code": 91, + "errorLabels": [ + "RetryableWriteError" + ], "errmsg": "Replication is being shut down" } } diff --git a/test/transactions/update.json b/test/transactions/update.json index 13cf2c9268..e33bf5b810 100644 --- a/test/transactions/update.json +++ b/test/transactions/update.json @@ -116,7 +116,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ], @@ -145,9 +144,7 @@ }, "u": { "y": 1 - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -179,8 +176,7 @@ "z": 1 } }, - "multi": true, - "upsert": false + "multi": true } ], "ordered": true, @@ -346,7 +342,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ], @@ -375,9 +370,7 @@ }, "u": { "y": 1 - }, - "multi": false, - "upsert": false + } } ], "ordered": true, @@ -409,8 +402,7 @@ "z": 1 } }, - "multi": true, - "upsert": false + "multi": true } ], "ordered": true, diff --git a/test/transactions/write-concern.json b/test/transactions/write-concern.json index 88d062635f..84b1ea3650 100644 --- a/test/transactions/write-concern.json +++ b/test/transactions/write-concern.json @@ -877,7 +877,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ], From 0eace78cf483ceecaf308cf7bdcec50e8e50ed7e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 May 2020 16:12:08 -0700 Subject: [PATCH 0136/2111] PYTHON-2158 Support speculative authentication attempts in connection handshake --- pymongo/auth.py | 103 +++++++++++++++++++++++++++++++++++------- pymongo/ismaster.py | 5 ++ pymongo/pool.py | 18 ++++++++ test/test_auth.py | 27 +++++++---- test/test_database.py | 47 +++++++++++++++---- test/test_ssl.py | 16 +++++-- 6 files changed, 177 insertions(+), 39 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index f37a0b4e58..9eca28c981 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -254,9 +254,22 @@ def _parse_scram_response(response): return dict(item.split(b"=", 1) for item in response.split(b",")) +def _authenticate_scram_start(credentials, mechanism): + username = credentials.username + user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") + nonce = standard_b64encode(os.urandom(32)) + first_bare = b"n=" + user + b",r=" + nonce + + cmd = SON([('saslStart', 1), + ('mechanism', mechanism), + ('payload', Binary(b"n,," + first_bare)), + ('autoAuthorize', 1), + ('options', {'skipEmptyExchange': True})]) + return nonce, first_bare, cmd + + def _authenticate_scram(credentials, sock_info, mechanism): """Authenticate using SCRAM.""" - username = credentials.username if mechanism == 'SCRAM-SHA-256': digest = "sha256" @@ -272,16 +285,14 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Make local _hmac = hmac.HMAC - user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") - nonce = standard_b64encode(os.urandom(32)) - first_bare = b"n=" + user + b",r=" + nonce - - cmd = SON([('saslStart', 1), - ('mechanism', mechanism), - ('payload', Binary(b"n,," + first_bare)), - ('autoAuthorize', 1), - ('options', {'skipEmptyExchange': True})]) - res = sock_info.command(source, cmd) + ctx = sock_info.auth_ctx.get(credentials) + if ctx and ctx.speculate_succeeded(): + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start( + credentials, mechanism) + res = sock_info.command(source, cmd) server_first = res['payload'] parsed = _parse_scram_response(server_first) @@ -516,15 +527,17 @@ def _authenticate_cram_md5(credentials, sock_info): def _authenticate_x509(credentials, sock_info): """Authenticate using MONGODB-X509. """ - query = SON([('authenticate', 1), - ('mechanism', 'MONGODB-X509')]) - if credentials.username is not None: - query['user'] = credentials.username - elif sock_info.max_wire_version < 5: + ctx = sock_info.auth_ctx.get(credentials) + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials).speculate_command() + if credentials.username is None and sock_info.max_wire_version < 5: raise ConfigurationError( "A username is required for MONGODB-X509 authentication " "when connected to MongoDB versions older than 3.4.") - sock_info.command('$external', query) + sock_info.command('$external', cmd) def _authenticate_aws(credentials, sock_info): @@ -597,6 +610,62 @@ def _authenticate_default(credentials, sock_info): } +class _AuthContext(object): + def __init__(self, credentials): + self.credentials = credentials + self.speculative_authenticate = None + + @staticmethod + def from_credentials(creds): + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return spec_cls(creds) + return None + + def speculate_command(self): + raise NotImplementedError + + def parse_response(self, ismaster): + self.speculative_authenticate = ismaster.speculative_authenticate + + def speculate_succeeded(self): + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__(self, credentials, mechanism): + super(_ScramContext, self).__init__(credentials) + self.scram_data = None + self.mechanism = mechanism + + def speculate_command(self): + nonce, first_bare, cmd = _authenticate_scram_start( + self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd['db'] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self): + cmd = SON([('authenticate', 1), + ('mechanism', 'MONGODB-X509')]) + if self.credentials.username is not None: + cmd['user'] = self.credentials.username + return cmd + + +_SPECULATIVE_AUTH_MAP = { + 'MONGODB-X509': _X509Context, + 'SCRAM-SHA-1': functools.partial(_ScramContext, mechanism='SCRAM-SHA-1'), + 'SCRAM-SHA-256': functools.partial(_ScramContext, + mechanism='SCRAM-SHA-256'), + 'DEFAULT': functools.partial(_ScramContext, mechanism='SCRAM-SHA-256'), +} + + def authenticate(credentials, sock_info): """Authenticate sock_info.""" mechanism = credentials.mechanism diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index 5223a12766..fb2d6a8682 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -169,6 +169,11 @@ def sasl_supported_mechs(self): """ return self._doc.get('saslSupportedMechs', []) + @property + def speculative_authenticate(self): + """The speculativeAuthenticate field.""" + return self._doc.get('speculativeAuthenticate') + @property def topology_version(self): return self._doc.get('topologyVersion') diff --git a/pymongo/pool.py b/pymongo/pool.py index b255af2203..de1d682e5d 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -468,6 +468,15 @@ def _negotiate_creds(all_credentials): return None +def _speculative_context(all_credentials): + """Return the _AuthContext to use for speculative auth, if any. + """ + if all_credentials and len(all_credentials) == 1: + creds = next(itervalues(all_credentials)) + return auth._AuthContext.from_credentials(creds) + return None + + class SocketInfo(object): """Store a socket with some metadata. @@ -501,6 +510,7 @@ def __init__(self, sock, pool, address, id): # Support for mechanism negotiation on the initial handshake. # Maps credential to saslSupportedMechs. self.negotiated_mechanisms = {} + self.auth_ctx = {} # The pool's generation changes with each reset() so we can close # sockets created before the last reset. @@ -522,6 +532,9 @@ def ismaster(self, metadata, cluster_time, all_credentials=None): creds = _negotiate_creds(all_credentials) if creds: cmd['saslSupportedMechs'] = creds.source + '.' + creds.username + auth_ctx = _speculative_context(all_credentials) + if auth_ctx: + cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() ismaster = IsMaster(self.command('admin', cmd, publish_events=False)) self.is_writable = ismaster.is_writable @@ -541,6 +554,10 @@ def ismaster(self, metadata, cluster_time, all_credentials=None): self.op_msg_enabled = ismaster.max_wire_version >= 6 if creds: self.negotiated_mechanisms[creds] = ismaster.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(ismaster) + if auth_ctx.speculate_succeeded(): + self.auth_ctx[auth_ctx.credentials] = auth_ctx return ismaster def command(self, dbname, spec, slave_ok=False, @@ -743,6 +760,7 @@ def authenticate(self, credentials): self.authset.add(credentials) # negotiated_mechanisms are no longer needed. self.negotiated_mechanisms.pop(credentials, None) + self.auth_ctx.pop(credentials, None) def validate_session(self, client, session): """Validate this session before use with client. diff --git a/test/test_auth.py b/test/test_auth.py index 14b2f94394..6dccc6ff11 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -418,18 +418,20 @@ def test_scram_skip_empty_exchange(self): client = rs_or_single_client_noauth( username='sha256', password='pwd', authSource='testscram', event_listeners=[listener]) - client.admin.command('isMaster') + client.testscram.command('dbstats') - # Assert we sent the skipEmptyExchange option. - first_event = listener.results['started'][0] - self.assertEqual(first_event.command_name, 'saslStart') - self.assertEqual( - first_event.command['options'], {'skipEmptyExchange': True}) + if client_context.version < (4, 4, -1): + # Assert we sent the skipEmptyExchange option. + first_event = listener.results['started'][0] + self.assertEqual(first_event.command_name, 'saslStart') + self.assertEqual( + first_event.command['options'], {'skipEmptyExchange': True}) # Assert the third exchange was skipped on servers that support it. + # Note that the first exchange occurs on the connection handshake. started = listener.started_command_names() - if client_context.version.at_least(4, 3, 3): - self.assertEqual(started, ['saslStart', 'saslContinue']) + if client_context.version.at_least(4, 4, -1): + self.assertEqual(started, ['saslContinue']) else: self.assertEqual( started, ['saslStart', 'saslContinue', 'saslContinue']) @@ -578,8 +580,13 @@ def test_scram(self): 'mongodb://both:pwd@%s:%d/testscram' % (host, port), event_listeners=[self.listener]) client.testscram.command('dbstats') - started = self.listener.results['started'][0] - self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') + if client_context.version.at_least(4, 4, -1): + # Speculative authentication in 4.4+ sends saslStart with the + # handshake. + self.assertEqual(self.listener.results['started'], []) + else: + started = self.listener.results['started'][0] + self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') client = rs_or_single_client_noauth( 'mongodb://both:pwd@%s:%d/testscram?authMechanism=SCRAM-SHA-1' diff --git a/test/test_database.py b/test/test_database.py index 0dfdccdea1..15f3be70fe 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -51,7 +51,8 @@ SkipTest, unittest, IntegrationTest) -from test.utils import (ignore_deprecations, +from test.utils import (EventListener, + ignore_deprecations, remove_all_users, rs_or_single_client_noauth, rs_or_single_client, @@ -677,14 +678,6 @@ def test_authenticate_multiple(self): admin_db_auth = self.client.admin users_db_auth = self.client.pymongo_test - # Non-root client. - client = rs_or_single_client_noauth() - admin_db = client.admin - users_db = client.pymongo_test - other_db = client.pymongo_test1 - - self.assertRaises(OperationFailure, users_db.test.find_one) - admin_db_auth.add_user( 'ro-admin', 'pass', @@ -695,15 +688,36 @@ def test_authenticate_multiple(self): 'user', 'pass', roles=["userAdmin", "readWrite"]) self.addCleanup(remove_all_users, users_db_auth) + # Non-root client. + listener = EventListener() + client = rs_or_single_client_noauth(event_listeners=[listener]) + admin_db = client.admin + users_db = client.pymongo_test + other_db = client.pymongo_test1 + + self.assertRaises(OperationFailure, users_db.test.find_one) + self.assertEqual(listener.started_command_names(), ['find']) + listener.reset() + # Regular user should be able to query its own db, but # no other. users_db.authenticate('user', 'pass') + if client_context.version.at_least(3, 0): + self.assertEqual(listener.started_command_names()[0], 'saslStart') + else: + self.assertEqual(listener.started_command_names()[0], 'getnonce') + self.assertEqual(0, users_db.test.count_documents({})) self.assertRaises(OperationFailure, other_db.test.find_one) + listener.reset() # Admin read-only user should be able to query any db, # but not write. admin_db.authenticate('ro-admin', 'pass') + if client_context.version.at_least(3, 0): + self.assertEqual(listener.started_command_names()[0], 'saslStart') + else: + self.assertEqual(listener.started_command_names()[0], 'getnonce') self.assertEqual(None, other_db.test.find_one()) self.assertRaises(OperationFailure, other_db.test.insert_one, {}) @@ -711,8 +725,23 @@ def test_authenticate_multiple(self): # Close all sockets. client.close() + listener.reset() # We should still be able to write to the regular user's db. self.assertTrue(users_db.test.delete_many({})) + names = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + # No speculation with multiple users (but we do skipEmptyExchange). + self.assertEqual( + names, ['saslStart', 'saslContinue', 'saslStart', + 'saslContinue', 'delete']) + elif client_context.version.at_least(3, 0): + self.assertEqual( + names, ['saslStart', 'saslContinue', 'saslContinue', + 'saslStart', 'saslContinue', 'saslContinue', 'delete']) + else: + self.assertEqual( + names, ['getnonce', 'authenticate', + 'getnonce', 'authenticate', 'delete']) # And read from other dbs... self.assertEqual(0, other_db.test.count_documents({})) diff --git a/test/test_ssl.py b/test/test_ssl.py index 0987354c79..006b62ec7a 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -39,9 +39,11 @@ SkipTest, unittest, HAVE_IPADDRESS) -from test.utils import (remove_all_users, +from test.utils import (EventListener, cat_files, - connected) + connected, + remove_all_users) + _HAVE_PYOPENSSL = False try: @@ -582,16 +584,24 @@ def test_mongodb_x509_auth(self): self.assertRaises(OperationFailure, noauth.pymongo_test.test.count) + listener = EventListener() auth = MongoClient( client_context.pair, authMechanism='MONGODB-X509', ssl=True, ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + ssl_certfile=CLIENT_PEM, + event_listeners=[listener]) if client_context.version.at_least(3, 3, 12): # No error auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ['find']) + else: + self.assertEqual(names, ['authenticate', 'find']) else: # Should require a username with self.assertRaises(ConfigurationError): From 29960237dcdd9d90402d6c76c4296aec3d01d3fc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 27 May 2020 14:28:49 -0700 Subject: [PATCH 0137/2111] PYTHON-2260 Include Python.h before any standard headers --- bson/buffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bson/buffer.c b/bson/buffer.c index 66672749fa..0b1941cb57 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -14,13 +14,13 @@ * limitations under the License. */ -#include -#include - /* Include Python.h so we can set Python's error indicator. */ #define PY_SSIZE_T_CLEAN #include "Python.h" +#include +#include + #include "buffer.h" #define INITIAL_BUFFER_SIZE 256 From 4760d0781505c0f13d5d1bc061ff89075927eb2f Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Tue, 12 May 2020 00:59:12 -0700 Subject: [PATCH 0138/2111] PYTHON-2152 Expand native UUID handling support; Implement UUID specification --- bson/__init__.py | 53 ++++----- bson/_cbsonmodule.c | 164 ++++++++-------------------- bson/binary.py | 236 ++++++++++++++++++++++++++++++++++------ bson/codec_options.py | 26 +++-- bson/json_util.py | 45 ++++---- doc/api/bson/binary.rst | 3 + pymongo/common.py | 12 +- test/test_binary.py | 229 +++++++++++++++++++++++++++++++++++++- test/test_bson.py | 4 +- test/test_common.py | 2 +- test/test_json_util.py | 34 +++++- 11 files changed, 575 insertions(+), 233 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index c8ac12e46e..37794c325c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,9 +76,10 @@ from codecs import (utf_8_decode as _utf_8_decode, utf_8_encode as _utf_8_encode) -from bson.binary import (Binary, OLD_UUID_SUBTYPE, +from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, + OLD_UUID_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY, - UUIDLegacy) + UUIDLegacy, UUID_SUBTYPE) from bson.code import Code from bson.codec_options import ( CodecOptions, DEFAULT_CODEC_OPTIONS, _raw_document_class) @@ -303,26 +304,29 @@ def _get_binary(data, view, position, obj_end, opts, dummy1): end = position + length if length < 0 or end > obj_end: raise InvalidBSON('bad binary object length') - if subtype == 3: - # Java Legacy + + # Convert UUID subtypes to native UUIDs. + # TODO: PYTHON-2245 Decoding should follow UUID spec in PyMongo 4.0+ + if subtype in ALL_UUID_SUBTYPES: uuid_representation = opts.uuid_representation - if uuid_representation == JAVA_LEGACY: - java = data[position:end] - value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) - # C# legacy - elif uuid_representation == CSHARP_LEGACY: - value = uuid.UUID(bytes_le=data[position:end]) - # Python - else: - value = uuid.UUID(bytes=data[position:end]) - return value, end - if subtype == 4: - return uuid.UUID(bytes=data[position:end]), end + binary_value = Binary(data[position:end], subtype) + if uuid_representation == UuidRepresentation.UNSPECIFIED: + return binary_value, end + if subtype == UUID_SUBTYPE: + # Legacy behavior: use STANDARD with binary subtype 4. + uuid_representation = UuidRepresentation.STANDARD + elif uuid_representation == UuidRepresentation.STANDARD: + # subtype == OLD_UUID_SUBTYPE + # Legacy behavior: STANDARD is the same as PYTHON_LEGACY. + uuid_representation = UuidRepresentation.PYTHON_LEGACY + return binary_value.as_uuid(uuid_representation), end + # Python3 special case. Decode subtype 0 to 'bytes'. if PY3 and subtype == 0: value = data[position:end] else: value = Binary(data[position:end], subtype) + return value, end @@ -633,21 +637,8 @@ def _encode_binary(name, value, dummy0, dummy1): def _encode_uuid(name, value, dummy, opts): """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation - # Python Legacy Common Case - if uuid_representation == OLD_UUID_SUBTYPE: - return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes - # Java Legacy - elif uuid_representation == JAVA_LEGACY: - from_uuid = value.bytes - data = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] - return b"\x05" + name + b'\x10\x00\x00\x00\x03' + data - # C# legacy - elif uuid_representation == CSHARP_LEGACY: - # Microsoft GUID representation. - return b"\x05" + name + b'\x10\x00\x00\x00\x03' + value.bytes_le - # New - return b"\x05" + name + b'\x10\x00\x00\x00\x04' + value.bytes - + binval = Binary.from_uuid(value, uuid_representation=uuid_representation) + return _encode_binary(name, binval, dummy, opts) def _encode_objectid(name, value, dummy0, dummy1): """Encode bson.objectid.ObjectId.""" diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index ae28c1ba8a..f457f96b03 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -78,6 +78,7 @@ static struct module_state _state; #define STANDARD 4 #define JAVA_LEGACY 5 #define CSHARP_LEGACY 6 +#define UNSPECIFIED 0 #define BSON_MAX_SIZE 2147483647 /* The smallest possible BSON document, i.e. "{}" */ @@ -583,19 +584,6 @@ static int write_element_to_buffer(PyObject* self, buffer_t buffer, return result; } -static void -_fix_java(const char* in, char* out) { - int i, j; - for (i = 0, j = 7; i < j; i++, j--) { - out[i] = in[j]; - out[j] = in[i]; - } - for (i = 8, j = 15; i < j; i++, j--) { - out[i] = in[j]; - out[j] = in[i]; - } -} - static void _set_cannot_encode(PyObject* value) { PyObject* type = NULL; @@ -1276,14 +1264,9 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, uuid_type = _get_object(state->UUID, "uuid", "UUID"); if (uuid_type && PyObject_IsInstance(value, uuid_type)) { - /* Just a special case of Binary above, but - * simpler to do as a separate case. */ - PyObject* bytes; - /* Could be bytes, bytearray, str... */ - const char* data; - /* UUID is always 16 bytes */ - int size = 16; - char subtype; + PyObject* binary_type = NULL; + PyObject* binary_value = NULL; + int result; Py_DECREF(uuid_type); /* PyObject_IsInstance returns -1 on error */ @@ -1291,58 +1274,25 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - if (options->uuid_rep == JAVA_LEGACY - || options->uuid_rep == CSHARP_LEGACY) { - subtype = 3; - } - else { - subtype = options->uuid_rep; - } - - *(buffer_get_buffer(buffer) + type_byte) = 0x05; - if (!buffer_write_int32(buffer, (int32_t)size)) { - return 0; - } - if (!buffer_write_bytes(buffer, &subtype, 1)) { + binary_type = _get_object(state->Binary, "bson", "Binary"); + if (binary_type == NULL) { return 0; } - if (options->uuid_rep == CSHARP_LEGACY) { - /* Legacy C# byte order */ - bytes = PyObject_GetAttrString(value, "bytes_le"); - } - else { - bytes = PyObject_GetAttrString(value, "bytes"); - } - if (!bytes) { - return 0; - } -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_AsString(bytes); -#else - data = PyString_AsString(bytes); -#endif - if (data == NULL) { - Py_DECREF(bytes); + binary_value = PyObject_CallMethod(binary_type, "from_uuid", "(Oi)", value, options->uuid_rep); + if (binary_value == NULL) { + Py_DECREF(binary_type); return 0; } - if (options->uuid_rep == JAVA_LEGACY) { - /* Store in legacy java byte order. */ - char as_legacy_java[16]; - _fix_java(data, as_legacy_java); - if (!buffer_write_bytes(buffer, as_legacy_java, size)) { - Py_DECREF(bytes); - return 0; - } - } - else { - if (!buffer_write_bytes(buffer, data, size)) { - Py_DECREF(bytes); - return 0; - } - } - Py_DECREF(bytes); - return 1; + + result = _write_element_to_buffer(self, buffer, + type_byte, binary_value, + check_keys, options, + in_custom_call, + in_fallback_call); + Py_DECREF(binary_type); + Py_DECREF(binary_value); + return result; } Py_XDECREF(mapping_type); Py_XDECREF(uuid_type); @@ -1823,7 +1773,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned* position, unsigned char type, unsigned max, const codec_options_t* options) { struct module_state *state = GETSTATE(self); - PyObject* value = NULL; switch (type) { case 1: @@ -2063,70 +2012,49 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (!data) { goto invalid; } - /* Encode as UUID, not Binary */ + /* Encode as UUID or Binary based on options->uuid_rep + * TODO: PYTHON-2245 Decoding should follow UUID spec in PyMongo 4.0 */ if (subtype == 3 || subtype == 4) { - PyObject* kwargs; - PyObject* args = PyTuple_New(0); + PyObject* binary_type = NULL; + PyObject* binary_value = NULL; + char uuid_rep = options->uuid_rep; + /* UUID should always be 16 bytes */ - if (!args || length != 16) { - Py_DECREF(data); - goto invalid; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(data); - Py_DECREF(args); - goto invalid; + if (length != 16) { + goto uuiderror; } - /* - * From this point, we hold refs to args, kwargs, and data. - * If anything fails, goto uuiderror to clean them up. - */ - if (subtype == 3 && options->uuid_rep == CSHARP_LEGACY) { - /* Legacy C# byte order */ - if ((PyDict_SetItemString(kwargs, "bytes_le", data)) == -1) - goto uuiderror; + binary_type = _get_object(state->Binary, "bson", "Binary"); + if (binary_type == NULL) { + goto uuiderror; } - else { - if (subtype == 3 && options->uuid_rep == JAVA_LEGACY) { - /* Convert from legacy java byte order */ - char big_endian[16]; - _fix_java(buffer + *position, big_endian); - /* Free the previously created PyString object */ - Py_DECREF(data); -#if PY_MAJOR_VERSION >= 3 - data = PyBytes_FromStringAndSize(big_endian, length); -#else - data = PyString_FromStringAndSize(big_endian, length); -#endif - if (data == NULL) - goto uuiderror; - } - if ((PyDict_SetItemString(kwargs, "bytes", data)) == -1) - goto uuiderror; + binary_value = PyObject_CallFunction(binary_type, "(Oi)", data, subtype); + if (binary_value == NULL) { + goto uuiderror; } - if ((type_to_create = _get_object(state->UUID, "uuid", "UUID"))) { - value = PyObject_Call(type_to_create, args, kwargs); - Py_DECREF(type_to_create); + + if (uuid_rep == UNSPECIFIED) { + value = binary_value; + Py_INCREF(value); + } else { + if (subtype == 4) { + uuid_rep = STANDARD; + } else if (uuid_rep == STANDARD) { + uuid_rep = PYTHON_LEGACY; + } + value = PyObject_CallMethod(binary_value, "as_uuid", "(i)", uuid_rep); } - Py_DECREF(args); - Py_DECREF(kwargs); + uuiderror: + Py_XDECREF(binary_type); + Py_XDECREF(binary_value); Py_DECREF(data); if (!value) { goto invalid; } - *position += length; break; - - uuiderror: - Py_DECREF(args); - Py_DECREF(kwargs); - Py_XDECREF(data); - goto invalid; } #if PY_MAJOR_VERSION >= 3 diff --git a/bson/binary.py b/bson/binary.py index 1c833b5a56..cb89c69da2 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -13,6 +13,7 @@ # limitations under the License. from uuid import UUID +from warnings import warn from bson.py3compat import PY3 @@ -55,57 +56,104 @@ Changed to subtype 4. """ -STANDARD = UUID_SUBTYPE -"""The standard UUID representation. -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary, using RFC-4122 byte order with -binary subtype :data:`UUID_SUBTYPE`. +class UuidRepresentation: + UNSPECIFIED = 0 + """An unspecified UUID representation. -.. versionadded:: 3.0 -""" + When configured, :class:`uuid.UUID` instances will **not** be + automatically encoded to or decoded from :class:`~bson.binary.Binary`. + When encoding a :class:`uuid.UUID` instance, an error will be raised. + To encode a :class:`uuid.UUID` instance with this configuration, it must + be wrapped in the :class:`~bson.binary.Binary` class by the application + code. When decoding a BSON binary field with a UUID subtype, a + :class:`~bson.binary.Binary` instance will be returned instead of a + :class:`uuid.UUID` instance. + + .. versionadded:: 3.11 + """ + + STANDARD = UUID_SUBTYPE + """The standard UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary, using RFC-4122 byte order with + binary subtype :data:`UUID_SUBTYPE`. + + .. versionadded:: 3.11 + """ + + PYTHON_LEGACY = OLD_UUID_SUBTYPE + """The Python legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary, using RFC-4122 byte order with + binary subtype :data:`OLD_UUID_SUBTYPE`. + + .. versionadded:: 3.11 + """ + + JAVA_LEGACY = 5 + """The Java legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, + using the Java driver's legacy byte order. + + .. versionadded:: 3.11 + """ + + CSHARP_LEGACY = 6 + """The C#/.net legacy UUID representation. + + :class:`uuid.UUID` instances will automatically be encoded to + and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, + using the C# driver's legacy byte order. + + .. versionadded:: 3.11 + """ -PYTHON_LEGACY = OLD_UUID_SUBTYPE -"""The Python legacy UUID representation. -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary, using RFC-4122 byte order with -binary subtype :data:`OLD_UUID_SUBTYPE`. +STANDARD = UuidRepresentation.STANDARD +"""An alias for :data:`UuidRepresentation.STANDARD`. .. versionadded:: 3.0 """ -JAVA_LEGACY = 5 -"""The Java legacy UUID representation. +PYTHON_LEGACY = UuidRepresentation.PYTHON_LEGACY +"""An alias for :data:`UuidRepresentation.PYTHON_LEGACY`. + +.. versionadded:: 3.0 +""" -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, -using the Java driver's legacy byte order. +JAVA_LEGACY = UuidRepresentation.JAVA_LEGACY +"""An alias for :data:`UuidRepresentation.JAVA_LEGACY`. .. versionchanged:: 3.6 - BSON binary subtype 4 is decoded using RFC-4122 byte order. + BSON binary subtype 4 is decoded using RFC-4122 byte order. .. versionadded:: 2.3 """ -CSHARP_LEGACY = 6 -"""The C#/.net legacy UUID representation. - -:class:`uuid.UUID` instances will automatically be encoded to -and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, -using the C# driver's legacy byte order. +CSHARP_LEGACY = UuidRepresentation.CSHARP_LEGACY +"""An alias for :data:`UuidRepresentation.CSHARP_LEGACY`. .. versionchanged:: 3.6 - BSON binary subtype 4 is decoded using RFC-4122 byte order. + BSON binary subtype 4 is decoded using RFC-4122 byte order. .. versionadded:: 2.3 """ ALL_UUID_SUBTYPES = (OLD_UUID_SUBTYPE, UUID_SUBTYPE) -ALL_UUID_REPRESENTATIONS = (STANDARD, PYTHON_LEGACY, JAVA_LEGACY, CSHARP_LEGACY) +ALL_UUID_REPRESENTATIONS = (UuidRepresentation.UNSPECIFIED, + UuidRepresentation.STANDARD, + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY) UUID_REPRESENTATION_NAMES = { - PYTHON_LEGACY: 'PYTHON_LEGACY', - STANDARD: 'STANDARD', - JAVA_LEGACY: 'JAVA_LEGACY', - CSHARP_LEGACY: 'CSHARP_LEGACY'} + UuidRepresentation.UNSPECIFIED: 'UuidRepresentation.UNSPECIFIED', + UuidRepresentation.STANDARD: 'UuidRepresentation.STANDARD', + UuidRepresentation.PYTHON_LEGACY: 'UuidRepresentation.PYTHON_LEGACY', + UuidRepresentation.JAVA_LEGACY: 'UuidRepresentation.JAVA_LEGACY', + UuidRepresentation.CSHARP_LEGACY: 'UuidRepresentation.CSHARP_LEGACY'} MD5_SUBTYPE = 5 """BSON binary subtype for an MD5 hash. @@ -155,6 +203,99 @@ def __new__(cls, data, subtype=BINARY_SUBTYPE): self.__subtype = subtype return self + @classmethod + def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): + """Create a BSON Binary object from a Python UUID. + + Creates a :class:`~bson.binary.Binary` object from a + :class:`uuid.UUID` instance. Assumes that the native + :class:`uuid.UUID` instance uses the byte-order implied by the + provided ``uuid_representation``. + + Raises :exc:`TypeError` if `uuid` is not an instance of + :class:`~uuid.UUID`. + + :Parameters: + - `uuid`: A :class:`uuid.UUID` instance. + - `uuid_representation`: A member of + :class:`~bson.binary.UuidRepresentation`. Default: + :const:`~bson.binary.UuidRepresentation.STANDARD`. + + .. versionadded:: 3.11 + """ + if not isinstance(uuid, UUID): + raise TypeError("uuid must be an instance of uuid.UUID") + + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError("uuid_representation must be a value " + "from bson.binary.UuidRepresentation") + + if uuid_representation == UuidRepresentation.UNSPECIFIED: + raise ValueError( + "cannot encode native uuid.UUID with " + "UuidRepresentation.UNSPECIFIED. UUIDs can be manually " + "converted to bson.Binary instances using " + "bson.Binary.from_uuid() or a different UuidRepresentation " + "can be configured.") + + subtype = OLD_UUID_SUBTYPE + if uuid_representation == UuidRepresentation.PYTHON_LEGACY: + payload = uuid.bytes + elif uuid_representation == UuidRepresentation.JAVA_LEGACY: + from_uuid = uuid.bytes + payload = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] + elif uuid_representation == UuidRepresentation.CSHARP_LEGACY: + payload = uuid.bytes_le + else: + # uuid_representation == UuidRepresentation.STANDARD + subtype = UUID_SUBTYPE + payload = uuid.bytes + + return cls(payload, subtype) + + def as_uuid(self, uuid_representation=UuidRepresentation.STANDARD): + """Create a Python UUID from this BSON Binary object. + + Decodes this binary object as a native :class:`uuid.UUID` instance + with the provided ``uuid_representation``. + + Raises :exc:`ValueError` if this :class:`~bson.binary.Binary` instance + does not contain a UUID. + + :Parameters: + - `uuid_representation`: A member of + :class:`~bson.binary.UuidRepresentation`. Default: + :const:`~bson.binary.UuidRepresentation.STANDARD`. + + .. versionadded:: 3.11 + """ + if self.subtype not in ALL_UUID_SUBTYPES: + raise ValueError("cannot decode subtype %s as a uuid" % ( + self.subtype,)) + + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError("uuid_representation must be a value from " + "bson.binary.UuidRepresentation") + + if uuid_representation == UuidRepresentation.UNSPECIFIED: + raise ValueError("uuid_representation cannot be UNSPECIFIED") + elif uuid_representation == UuidRepresentation.PYTHON_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes=self) + elif uuid_representation == UuidRepresentation.JAVA_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes=self[0:8][::-1] + self[8:16][::-1]) + elif uuid_representation == UuidRepresentation.CSHARP_LEGACY: + if self.subtype == OLD_UUID_SUBTYPE: + return UUID(bytes_le=self) + else: + # uuid_representation == UuidRepresentation.STANDARD + if self.subtype == UUID_SUBTYPE: + return UUID(bytes=self) + + raise ValueError("cannot decode subtype %s to %s" % ( + self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation])) + @property def subtype(self): """Subtype of this binary data. @@ -188,7 +329,26 @@ def __repr__(self): class UUIDLegacy(Binary): - """UUID wrapper to support working with UUIDs stored as PYTHON_LEGACY. + """**DEPRECATED** - UUID wrapper to support working with UUIDs stored as + PYTHON_LEGACY. + + .. note:: This class has been deprecated and will be removed in + PyMongo 4.0. Use :meth:`~bson.binary.Binary.from_uuid` and + :meth:`~bson.binary.Binary.as_uuid` with the appropriate + :class:`~bson.binary.UuidRepresentation` to handle legacy-formatted + UUIDs instead.:: + + from bson import Binary, UUIDLegacy, UuidRepresentation + import uuid + + my_uuid = uuid.uuid4() + legacy_uuid = UUIDLegacy(my_uuid) + binary_uuid = Binary.from_uuid( + my_uuid, UuidRepresentation.PYTHON_LEGACY) + + assert legacy_uuid == binary_uuid + assert legacy_uuid.uuid == binary_uuid.as_uuid( + UuidRepresentation.PYTHON_LEGACY) .. doctest:: @@ -218,13 +378,25 @@ class UUIDLegacy(Binary): >>> coll.find_one({'uuid': my_uuid})['uuid'] UUID('...') - Raises TypeError if `obj` is not an instance of :class:`~uuid.UUID`. + Raises :exc:`TypeError` if `obj` is not an instance of :class:`~uuid.UUID`. :Parameters: - `obj`: An instance of :class:`~uuid.UUID`. + + .. versionchanged:: 3.11 + Deprecated. The same functionality can be replicated using the + :meth:`~Binary.from_uuid` and :meth:`~Binary.to_uuid` methods with + :data:`~UuidRepresentation.PYTHON_LEGACY`. + .. versionadded:: 2.1 """ def __new__(cls, obj): + warn( + "The UUIDLegacy class has been deprecated and will be removed " + "in PyMongo 4.0. Use the Binary.from_uuid() and Binary.to_uuid() " + "with the appropriate UuidRepresentation to handle " + "legacy-formatted UUIDs instead.", + DeprecationWarning, stacklevel=2) if not isinstance(obj, UUID): raise TypeError("obj must be an instance of uuid.UUID") self = Binary.__new__(cls, obj.bytes, OLD_UUID_SUBTYPE) diff --git a/bson/codec_options.py b/bson/codec_options.py index 471d695a98..a514cc92d0 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -15,14 +15,15 @@ """Tools for specifying BSON codec options.""" import datetime +import warnings from abc import abstractmethod from collections import namedtuple from bson.py3compat import ABC, abc, abstractproperty, string_type -from bson.binary import (ALL_UUID_REPRESENTATIONS, - PYTHON_LEGACY, +from bson.binary import (UuidRepresentation, + ALL_UUID_REPRESENTATIONS, UUID_REPRESENTATION_NAMES) @@ -239,7 +240,8 @@ class CodecOptions(_options_base): """ def __new__(cls, document_class=dict, - tz_aware=False, uuid_representation=PYTHON_LEGACY, + tz_aware=False, + uuid_representation=None, unicode_decode_error_handler="strict", tzinfo=None, type_registry=None): if not (issubclass(document_class, abc.MutableMapping) or @@ -249,9 +251,17 @@ def __new__(cls, document_class=dict, "sublass of collections.MutableMapping") if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") - if uuid_representation not in ALL_UUID_REPRESENTATIONS: + if uuid_representation is None: + warnings.warn( + "Starting in PyMongo 4.0, the default uuidRepresentation " + "will be changed to 'unspecified'. Applications will need to " + "explicitly set 'uuidRepresentation=pythonLegacy' in the " + "connection string to preserve current behavior.", + DeprecationWarning, stacklevel=2) + uuid_representation = UuidRepresentation.PYTHON_LEGACY + elif uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError("uuid_representation must be a value " - "from bson.binary.ALL_UUID_REPRESENTATIONS") + "from bson.binary.UuidRepresentation") if not isinstance(unicode_decode_error_handler, (string_type, None)): raise ValueError("unicode_decode_error_handler must be a string " "or None") @@ -314,7 +324,8 @@ def with_options(self, **kwargs): ) -DEFAULT_CODEC_OPTIONS = CodecOptions() +DEFAULT_CODEC_OPTIONS = CodecOptions( + uuid_representation=UuidRepresentation.PYTHON_LEGACY) def _parse_codec_options(options): @@ -324,8 +335,7 @@ def _parse_codec_options(options): 'document_class', DEFAULT_CODEC_OPTIONS.document_class), tz_aware=options.get( 'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware), - uuid_representation=options.get( - 'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation), + uuid_representation=options.get('uuidrepresentation'), unicode_decode_error_handler=options.get( 'unicode_decode_error_handler', DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler), diff --git a/bson/json_util.py b/bson/json_util.py index 35bdc3070d..14c364e7fb 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -110,14 +110,13 @@ import json import math import re -import sys import uuid from pymongo.errors import ConfigurationError import bson -from bson import EPOCH_AWARE, EPOCH_NAIVE, RE_TYPE, SON -from bson.binary import (Binary, JAVA_LEGACY, CSHARP_LEGACY, OLD_UUID_SUBTYPE, +from bson import EPOCH_AWARE, RE_TYPE, SON +from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, UUID_SUBTYPE) from bson.code import Code from bson.codec_options import CodecOptions @@ -245,9 +244,9 @@ class JSONOptions(CodecOptions): - `document_class`: BSON documents returned by :func:`loads` will be decoded to an instance of this class. Must be a subclass of :class:`collections.MutableMapping`. Defaults to :class:`dict`. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`uuid.UUID`. Defaults to - :const:`~bson.binary.PYTHON_LEGACY`. + - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` + to use when encoding and decoding instances of :class:`uuid.UUID`. + Defaults to :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY`. - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type `Date` will be decoded to timezone aware instances of :class:`datetime.datetime`. Otherwise they will be naive. Defaults @@ -494,14 +493,20 @@ def _parse_legacy_uuid(doc): def _binary_or_uuid(data, subtype, json_options): # special handling for UUID - if subtype == OLD_UUID_SUBTYPE: - if json_options.uuid_representation == CSHARP_LEGACY: - return uuid.UUID(bytes_le=data) - if json_options.uuid_representation == JAVA_LEGACY: - data = data[7::-1] + data[:7:-1] - return uuid.UUID(bytes=data) - if subtype == UUID_SUBTYPE: - return uuid.UUID(bytes=data) + if subtype in ALL_UUID_SUBTYPES: + uuid_representation = json_options.uuid_representation + binary_value = Binary(data, subtype) + if uuid_representation == UuidRepresentation.UNSPECIFIED: + return binary_value + if subtype == UUID_SUBTYPE: + # Legacy behavior: use STANDARD with binary subtype 4. + uuid_representation = UuidRepresentation.STANDARD + elif uuid_representation == UuidRepresentation.STANDARD: + # subtype == OLD_UUID_SUBTYPE + # Legacy behavior: STANDARD is the same as PYTHON_LEGACY. + uuid_representation = UuidRepresentation.PYTHON_LEGACY + return binary_value.as_uuid(uuid_representation) + if PY3 and subtype == 0: return data return Binary(data, subtype) @@ -795,15 +800,9 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): return _encode_binary(obj, 0, json_options) if isinstance(obj, uuid.UUID): if json_options.strict_uuid: - data = obj.bytes - subtype = OLD_UUID_SUBTYPE - if json_options.uuid_representation == CSHARP_LEGACY: - data = obj.bytes_le - elif json_options.uuid_representation == JAVA_LEGACY: - data = data[7::-1] + data[:7:-1] - elif json_options.uuid_representation == UUID_SUBTYPE: - subtype = UUID_SUBTYPE - return _encode_binary(data, subtype, json_options) + binval = Binary.from_uuid( + obj, uuid_representation=json_options.uuid_representation) + return _encode_binary(binval, binval.subtype, json_options) else: return {"$uuid": obj.hex} if isinstance(obj, Decimal128): diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index ab9d58f819..ab4d599f8c 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -16,6 +16,9 @@ .. autodata:: MD5_SUBTYPE .. autodata:: USER_DEFINED_SUBTYPE + .. autoclass:: UuidRepresentation + :members: + .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) :members: :show-inheritance: diff --git a/pymongo/common.py b/pymongo/common.py index 945b1f6921..d0177cea00 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -19,8 +19,7 @@ import warnings from bson import SON -from bson.binary import (STANDARD, PYTHON_LEGACY, - JAVA_LEGACY, CSHARP_LEGACY) +from bson.binary import UuidRepresentation from bson.codec_options import CodecOptions, TypeRegistry from bson.py3compat import abc, integer_types, iteritems, string_type, PY3 from bson.raw_bson import RawBSONDocument @@ -149,10 +148,11 @@ def raise_config_error(key, dummy): # Mapping of URI uuid representation options to valid subtypes. _UUID_REPRESENTATIONS = { - 'standard': STANDARD, - 'pythonLegacy': PYTHON_LEGACY, - 'javaLegacy': JAVA_LEGACY, - 'csharpLegacy': CSHARP_LEGACY + 'unspecified': UuidRepresentation.UNSPECIFIED, + 'standard': UuidRepresentation.STANDARD, + 'pythonLegacy': UuidRepresentation.PYTHON_LEGACY, + 'javaLegacy': UuidRepresentation.JAVA_LEGACY, + 'csharpLegacy': UuidRepresentation.CSHARP_LEGACY } diff --git a/test/test_binary.py b/test/test_binary.py index 392cd97c84..39de987c10 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -31,8 +31,10 @@ from bson.codec_options import CodecOptions from bson.py3compat import PY3 from bson.son import SON +from pymongo.common import validate_uuid_representation from pymongo.mongo_client import MongoClient -from test import client_context, unittest +from pymongo.write_concern import WriteConcern +from test import client_context, unittest, IntegrationTest from test.utils import ignore_deprecations @@ -144,11 +146,13 @@ def test_hash(self): self.assertEqual(hash(Binary(b"hello world", 42)), hash(two)) def test_uuid_subtype_4(self): - """uuid_representation should be ignored when decoding subtype 4.""" + """uuid_representation should be ignored when decoding subtype 4 for + all UuidRepresentation values except UNSPECIFIED.""" expected_uuid = uuid.uuid4() doc = {"uuid": Binary(expected_uuid.bytes, 4)} encoded = encode(doc) - for uuid_representation in ALL_UUID_REPRESENTATIONS: + for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - + {UuidRepresentation.UNSPECIFIED}): options = CodecOptions(uuid_representation=uuid_representation) self.assertEqual(expected_uuid, decode(encoded, options)["uuid"]) @@ -296,8 +300,9 @@ def test_uuid_queries(self): self.assertEqual(1, coll.count_documents({})) # Test UUIDLegacy queries. - coll = db.get_collection("test", - CodecOptions(uuid_representation=STANDARD)) + coll = db.get_collection( + "test", CodecOptions( + uuid_representation=UuidRepresentation.STANDARD)) self.assertEqual(0, coll.find({'uuid': uu}).count()) cur = coll.find({'uuid': UUIDLegacy(uu)}) self.assertEqual(1, cur.count()) @@ -364,5 +369,219 @@ def test_buffer_protocol(self): self.assertEqual(b0, Binary(array.array('B', b'123'), 2)) +class TestUuidSpecExplicitCoding(unittest.TestCase): + @classmethod + def setUpClass(cls): + super(TestUuidSpecExplicitCoding, cls).setUpClass() + cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") + + @staticmethod + def _hex_to_bytes(hexstring): + if PY3: + return bytes.fromhex(hexstring) + return hexstring.decode("hex") + + # Explicit encoding prose test #1 + def test_encoding_1(self): + obj = Binary.from_uuid(self.uuid) + expected_obj = Binary( + self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + self.assertEqual(obj, expected_obj) + + def _test_encoding_w_uuid_rep( + self, uuid_rep, expected_hexstring, expected_subtype): + obj = Binary.from_uuid(self.uuid, uuid_rep) + expected_obj = Binary( + self._hex_to_bytes(expected_hexstring), expected_subtype) + self.assertEqual(obj, expected_obj) + + # Explicit encoding prose test #2 + def test_encoding_2(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.STANDARD, + "00112233445566778899AABBCCDDEEFF", 4) + + # Explicit encoding prose test #3 + def test_encoding_3(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.JAVA_LEGACY, + "7766554433221100FFEEDDCCBBAA9988", 3) + + # Explicit encoding prose test #4 + def test_encoding_4(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.CSHARP_LEGACY, + "33221100554477668899AABBCCDDEEFF", 3) + + # Explicit encoding prose test #5 + def test_encoding_5(self): + self._test_encoding_w_uuid_rep( + UuidRepresentation.PYTHON_LEGACY, + "00112233445566778899AABBCCDDEEFF", 3) + + # Explicit encoding prose test #6 + def test_encoding_6(self): + with self.assertRaises(ValueError): + Binary.from_uuid(self.uuid, UuidRepresentation.UNSPECIFIED) + + # Explicit decoding prose test #1 + def test_decoding_1(self): + obj = Binary( + self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + + # Case i: + self.assertEqual(obj.as_uuid(), self.uuid) + # Case ii: + self.assertEqual(obj.as_uuid(UuidRepresentation.STANDARD), self.uuid) + # Cases iii-vi: + for uuid_rep in (UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.PYTHON_LEGACY): + with self.assertRaises(ValueError): + obj.as_uuid(uuid_rep) + + def _test_decoding_legacy(self, hexstring, uuid_rep): + obj = Binary(self._hex_to_bytes(hexstring), 3) + + # Case i: + with self.assertRaises(ValueError): + obj.as_uuid() + # Cases ii-iii: + for rep in (UuidRepresentation.STANDARD, + UuidRepresentation.UNSPECIFIED): + with self.assertRaises(ValueError): + obj.as_uuid(rep) + # Case iv: + self.assertEqual(obj.as_uuid(uuid_rep), + self.uuid) + + # Explicit decoding prose test #2 + def test_decoding_2(self): + self._test_decoding_legacy( + "7766554433221100FFEEDDCCBBAA9988", + UuidRepresentation.JAVA_LEGACY) + + # Explicit decoding prose test #3 + def test_decoding_3(self): + self._test_decoding_legacy( + "33221100554477668899AABBCCDDEEFF", + UuidRepresentation.CSHARP_LEGACY) + + # Explicit decoding prose test #4 + def test_decoding_4(self): + self._test_decoding_legacy( + "00112233445566778899AABBCCDDEEFF", + UuidRepresentation.PYTHON_LEGACY) + + +class TestUuidSpecImplicitCoding(IntegrationTest): + @classmethod + def setUpClass(cls): + super(TestUuidSpecImplicitCoding, cls).setUpClass() + cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") + + @staticmethod + def _hex_to_bytes(hexstring): + if PY3: + return bytes.fromhex(hexstring) + return hexstring.decode("hex") + + def _get_coll_w_uuid_rep(self, uuid_rep): + codec_options = self.client.codec_options.with_options( + uuid_representation=validate_uuid_representation(None, uuid_rep)) + coll = self.db.get_collection( + 'pymongo_test', codec_options=codec_options, + write_concern=WriteConcern("majority")) + return coll + + def _test_encoding(self, uuid_rep, expected_hexstring, expected_subtype): + coll = self._get_coll_w_uuid_rep(uuid_rep) + coll.delete_many({}) + coll.insert_one({'_id': self.uuid}) + self.assertTrue( + coll.find_one({"_id": Binary( + self._hex_to_bytes(expected_hexstring), expected_subtype)})) + + # Implicit encoding prose test #1 + def test_encoding_1(self): + self._test_encoding( + "javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) + + # Implicit encoding prose test #2 + def test_encoding_2(self): + self._test_encoding( + "csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) + + # Implicit encoding prose test #3 + def test_encoding_3(self): + self._test_encoding( + "pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) + + # Implicit encoding prose test #4 + def test_encoding_4(self): + self._test_encoding( + "standard", "00112233445566778899AABBCCDDEEFF", 4) + + # Implicit encoding prose test #5 + def test_encoding_5(self): + with self.assertRaises(ValueError): + self._test_encoding( + "unspecifed", "dummy", -1) + + def _test_decoding(self, client_uuid_representation_string, + legacy_field_uuid_representation, + expected_standard_field_value, + expected_legacy_field_value): + coll = self._get_coll_w_uuid_rep(client_uuid_representation_string) + coll.drop() + + standard_val = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + legacy_val = Binary.from_uuid(self.uuid, legacy_field_uuid_representation) + coll.insert_one({'standard': standard_val, 'legacy': legacy_val}) + + doc = coll.find_one() + self.assertEqual(doc['standard'], expected_standard_field_value) + self.assertEqual(doc['legacy'], expected_legacy_field_value) + + # Implicit decoding prose test #1 + def test_decoding_1(self): + # TODO: these assertions will change after PYTHON-2245. Specifically, + # the 'standard' field will be decoded as a Binary subtype 4. + binary_value = Binary.from_uuid( + self.uuid, UuidRepresentation.PYTHON_LEGACY) + self._test_decoding( + "javaLegacy", UuidRepresentation.JAVA_LEGACY, + self.uuid, self.uuid) + self._test_decoding( + "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, + self.uuid, self.uuid) + self._test_decoding( + "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, + self.uuid, self.uuid) + + # Implicit decoding pose test #2 + def test_decoding_2(self): + # TODO: these assertions will change after PYTHON-2245. Specifically, + # the 'legacy' field will be decoded as a Binary subtype 3. + binary_value = Binary.from_uuid( + self.uuid, UuidRepresentation.PYTHON_LEGACY) + self._test_decoding( + "standard", UuidRepresentation.PYTHON_LEGACY, + self.uuid, binary_value.as_uuid(UuidRepresentation.PYTHON_LEGACY)) + + # Implicit decoding pose test #3 + def test_decoding_3(self): + expected_standard_value = Binary.from_uuid( + self.uuid, UuidRepresentation.STANDARD) + for legacy_uuid_rep in (UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.JAVA_LEGACY): + expected_legacy_value = Binary.from_uuid( + self.uuid, legacy_uuid_rep) + self._test_decoding( + "unspecified", legacy_uuid_rep, + expected_standard_value, expected_legacy_value) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_bson.py b/test/test_bson.py index dd604c7389..ad726f71bb 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -649,7 +649,6 @@ def test_tuple(self): decode(encode({"tuple": (1, 2)}))) def test_uuid(self): - id = uuid.uuid4() transformed_id = decode(encode({"id": id}))["id"] @@ -991,7 +990,6 @@ def test_tz_aware(self): self.assertTrue(CodecOptions(tz_aware=True).tz_aware) def test_uuid_representation(self): - self.assertRaises(ValueError, CodecOptions, uuid_representation=None) self.assertRaises(ValueError, CodecOptions, uuid_representation=7) self.assertRaises(ValueError, CodecOptions, uuid_representation=2) @@ -1003,7 +1001,7 @@ def test_tzinfo(self): def test_codec_options_repr(self): r = ("CodecOptions(document_class=dict, tz_aware=False, " - "uuid_representation=PYTHON_LEGACY, " + "uuid_representation=UuidRepresentation.PYTHON_LEGACY, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " "fallback_encoder=None))") diff --git a/test/test_common.py b/test/test_common.py index 5175dd8bfd..5a35fd8bb2 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -25,7 +25,7 @@ from bson.objectid import ObjectId from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern -from test import client_context, IntegrationTest +from test import client_context, unittest, IntegrationTest from test.utils import connected, rs_or_single_client, single_client diff --git a/test/test_json_util.py b/test/test_json_util.py index 75b177e442..6499818b2f 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -22,14 +22,11 @@ sys.path[0:0] = [""] -from pymongo.errors import ConfigurationError - -from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON +from bson import json_util, EPOCH_AWARE, SON from bson.json_util import (DatetimeRepresentation, STRICT_JSON_OPTIONS) from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE, - USER_DEFINED_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY, - STANDARD) + USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD) from bson.code import Code from bson.dbref import DBRef from bson.int64 import Int64 @@ -271,7 +268,8 @@ def test_uuid(self): doc, json_util.loads( '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}')) - for uuid_representation in ALL_UUID_REPRESENTATIONS: + for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - + {UuidRepresentation.UNSPECIFIED}): options = json_util.JSONOptions( strict_uuid=True, uuid_representation=uuid_representation) self.round_trip(doc, json_options=options) @@ -281,6 +279,30 @@ def test_uuid(self): '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_options=options)) + def test_uuid_uuid_rep_unspecified(self): + _uuid = uuid.uuid4() + options = json_util.JSONOptions( + strict_uuid=True, + uuid_representation=UuidRepresentation.UNSPECIFIED) + + # Cannot directly encode native UUIDs with UNSPECIFIED. + doc = {'uuid': _uuid} + with self.assertRaises(ValueError): + json_util.dumps(doc, json_options=options) + + # All UUID subtypes are decoded as Binary with UNSPECIFIED. + # subtype 3 + doc = {'uuid': Binary(_uuid.bytes, subtype=3)} + ext_json_str = json_util.dumps(doc) + self.assertEqual( + doc, json_util.loads(ext_json_str, json_options=options)) + # subtype 4 + doc = {'uuid': Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps(doc) + self.assertEqual( + doc, json_util.loads(ext_json_str, json_options=options)) + + def test_binary(self): if PY3: bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} From 5b49557c591ba0ac18b6509f6cae4d5b510e52f8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 29 May 2020 17:28:09 -0700 Subject: [PATCH 0139/2111] PYTHON-2268 Close clients in test suite --- pymongo/periodic_executor.py | 4 ++ pymongo/settings.py | 4 ++ test/__init__.py | 42 ++++++++++++++++ test/pymongo_mocks.py | 8 ++-- test/test_auth.py | 2 - test/test_client.py | 4 ++ test/test_collation.py | 1 + test/test_command_monitoring_spec.py | 4 ++ ...nnections_survive_primary_stepdown_spec.py | 4 ++ test/test_custom_types.py | 1 + test/test_examples.py | 48 ++++++++----------- test/test_legacy_api.py | 2 + test/test_mongos_load_balancing.py | 1 + test/test_monitoring.py | 5 ++ test/test_pooling.py | 3 ++ test/test_read_concern.py | 1 + test/test_read_preferences.py | 1 + test/test_replica_set_client.py | 20 ++++---- test/test_retryable_writes.py | 2 + test/test_session.py | 6 +++ test/test_transactions.py | 6 +++ 21 files changed, 128 insertions(+), 41 deletions(-) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index ba9664fa78..5777e5ab2c 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -50,6 +50,10 @@ def __init__(self, interval, min_interval, target, name=None): self._thread_will_exit = False self._lock = threading.Lock() + def __repr__(self): + return '<%s(name=%s) object at 0x%x>' % ( + self.__class__.__name__, self._name, id(self)) + def open(self): """Start. Multiple calls have no effect. diff --git a/pymongo/settings.py b/pymongo/settings.py index 2a02f05d5d..dd0ac3c1ac 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -15,6 +15,7 @@ """Represent MongoClient's configuration.""" import threading +import traceback from bson.objectid import ObjectId from pymongo import common, monitor, pool @@ -60,6 +61,9 @@ def __init__(self, self._heartbeat_frequency = heartbeat_frequency self._direct = (len(self._seeds) == 1 and not replica_set_name) self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = ''.join(traceback.format_stack()) @property def seeds(self): diff --git a/test/__init__.py b/test/__init__.py index a82e62edf3..73a91c0b2b 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -795,6 +795,44 @@ def setup(): warnings.simplefilter("always") +def _get_executors(topology): + executors = [] + for server in topology._servers.values(): + # Some MockMonitor do not have an _executor. + executors.append(getattr(server._monitor, '_executor', None)) + executors.append(topology._Topology__events_executor) + if topology._srv_monitor: + executors.append(topology._srv_monitor._executor) + return [e for e in executors if e is not None] + + +def all_executors_stopped(topology): + running = [e for e in _get_executors(topology) if not e._stopped] + if running: + print(' Topology %s has THREADS RUNNING: %s, created at: %s' % ( + topology, running, topology._settings._stack)) + return False + return True + + +def print_unclosed_clients(): + from pymongo.topology import Topology + processed = set() + # Call collect to manually cleanup any would-be gc'd clients to avoid + # false positives. + gc.collect() + for obj in gc.get_objects(): + try: + if isinstance(obj, Topology): + # Avoid printing the same Topology multiple times. + if obj._topology_id in processed: + continue + all_executors_stopped(obj) + processed.add(obj._topology_id) + except ReferenceError: + pass + + def teardown(): garbage = [] for g in gc.garbage: @@ -813,6 +851,10 @@ def teardown(): c.drop_database("pymongo_test_bernie") c.close() + # Jython does not support gc.get_objects. + if not sys.platform.startswith('java'): + print_unclosed_clients() + class PymongoTestRunner(unittest.TextTestRunner): def run(self, test): diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index bd3e1ae22b..388f89178f 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -65,8 +65,9 @@ def __init__( topology, pool, topology_settings): - # MockMonitor gets a 'client' arg, regular monitors don't. - self.client = client + # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it + # to avoid cycles. + self.client = weakref.proxy(client) Monitor.__init__( self, server_description, @@ -75,8 +76,9 @@ def __init__( topology_settings) def _check_once(self): + client = self.client address = self._server_description.address - response, rtt = self.client.mock_is_master('%s:%d' % address) + response, rtt = client.mock_is_master('%s:%d' % address) return ServerDescription(address, IsMaster(response), rtt) diff --git a/test/test_auth.py b/test/test_auth.py index 6dccc6ff11..c8e4ef1ab9 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -695,8 +695,6 @@ def setUp(self): client_context.create_user('admin', 'admin', 'pass') client_context.create_user( 'pymongo_test', 'user', 'pass', ['userAdmin', 'readWrite']) - self.client = rs_or_single_client_noauth( - username='admin', password='pass') def tearDown(self): client_context.drop_user('pymongo_test', 'user') diff --git a/test/test_client.py b/test/test_client.py index ef1b19485f..df2221eb31 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -101,6 +101,10 @@ def setUpClass(cls): cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) + @classmethod + def tearDownClass(cls): + cls.client.close() + def test_keyword_arg_defaults(self): client = MongoClient(socketTimeoutMS=None, connectTimeoutMS=20000, diff --git a/test/test_collation.py b/test/test_collation.py index 7cb4d8b5c1..d87a2a9aca 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -105,6 +105,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None + cls.client.close() def tearDown(self): self.listener.results.clear() diff --git a/test/test_command_monitoring_spec.py b/test/test_command_monitoring_spec.py index 3d41d4b487..3363a4256a 100644 --- a/test/test_command_monitoring_spec.py +++ b/test/test_command_monitoring_spec.py @@ -48,6 +48,10 @@ def setUpClass(cls): cls.listener = EventListener() cls.client = single_client(event_listeners=[cls.listener]) + @classmethod + def tearDownClass(cls): + cls.client.close() + def tearDown(self): self.listener.results.clear() diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 4a63e0e239..63cf127e32 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -51,6 +51,10 @@ def setUpClass(cls): cls.coll = cls.db.get_collection( "step-down", write_concern=WriteConcern("majority")) + @classmethod + def tearDownClass(cls): + cls.client.close() + def setUp(self): # Note that all ops use same write-concern as self.db (majority). self.db.drop_collection("step-down") diff --git a/test/test_custom_types.py b/test/test_custom_types.py index ba0bb0ca69..41b79a96d9 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -911,6 +911,7 @@ def create_targets(self, *args, **kwargs): kwargs['type_registry'] = codec_options.type_registry kwargs['document_class'] = codec_options.document_class self.watched_target = rs_client(*args, **kwargs) + self.addCleanup(self.watched_target.close) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. self.input_target.insert_one({'data': 'dummy'}) diff --git a/test/test_examples.py b/test/test_examples.py index f2747ff46e..16e1936d57 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -27,27 +27,26 @@ from pymongo.write_concern import WriteConcern from test import client_context, unittest, IntegrationTest -from test.utils import rs_client, rs_or_single_client +from test.utils import rs_client -class TestSampleShellCommands(unittest.TestCase): +class TestSampleShellCommands(IntegrationTest): @classmethod - @client_context.require_connection def setUpClass(cls): - cls.client = rs_or_single_client(w="majority") + super(TestSampleShellCommands, cls).setUpClass() # Run once before any tests run. - cls.client.pymongo_test.inventory.drop() + cls.db.inventory.drop() @classmethod def tearDownClass(cls): - client_context.client.drop_database("pymongo_test") + cls.client.drop_database("pymongo_test") def tearDown(self): # Run after every test. - self.client.pymongo_test.inventory.drop() + self.db.inventory.drop() def test_first_three_examples(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 1 db.inventory.insert_one( @@ -84,7 +83,7 @@ def test_first_three_examples(self): self.assertEqual(db.inventory.count_documents({}), 4) def test_query_top_level_fields(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 6 db.inventory.insert_many([ @@ -151,7 +150,7 @@ def test_query_top_level_fields(self): self.assertEqual(len(list(cursor)), 2) def test_query_embedded_documents(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 14 # Subdocument key order matters in a few of these examples so we have @@ -214,7 +213,7 @@ def test_query_embedded_documents(self): self.assertEqual(len(list(cursor)), 1) def test_query_arrays(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 20 db.inventory.insert_many([ @@ -290,7 +289,7 @@ def test_query_arrays(self): self.assertEqual(len(list(cursor)), 1) def test_query_array_of_documents(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 29 # Subdocument key order matters in a few of these examples so we have @@ -372,7 +371,7 @@ def test_query_array_of_documents(self): self.assertEqual(len(list(cursor)), 2) def test_query_null(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 38 db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}]) @@ -397,7 +396,7 @@ def test_query_null(self): self.assertEqual(len(list(cursor)), 1) def test_projection(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 42 db.inventory.insert_many([ @@ -528,7 +527,7 @@ def test_projection(self): self.assertEqual(len(doc["instock"]), 1) def test_update_and_replace(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 51 db.inventory.insert_many([ @@ -614,7 +613,7 @@ def test_update_and_replace(self): self.assertEqual(len(doc["instock"]), 2) def test_delete(self): - db = client_context.client.pymongo_test + db = self.db # Start Example 55 db.inventory.insert_many([ @@ -664,7 +663,7 @@ def test_delete(self): @client_context.require_replica_set @client_context.require_no_mmap def test_change_streams(self): - db = client_context.client.pymongo_test + db = self.db done = False def insert_docs(): @@ -706,7 +705,7 @@ def insert_docs(): t.join() def test_aggregate_examples(self): - db = client_context.client.pymongo_test + db = self.db # Start Aggregation Example 1 db.sales.aggregate([ @@ -792,7 +791,7 @@ def test_aggregate_examples(self): # End Aggregation Example 4 def test_commands(self): - db = client_context.client.pymongo_test + db = self.db db.restaurants.insert_one({}) # Start runCommand Example 1 @@ -804,7 +803,7 @@ def test_commands(self): # End runCommand Example 2 def test_index_management(self): - db = client_context.client.pymongo_test + db = self.db # Start Index Example 1 db.records.create_index("score") @@ -821,7 +820,7 @@ def test_index_management(self): @client_context.require_replica_set def test_misc(self): # Marketing examples - client = client_context.client + client = self.client self.addCleanup(client.drop_database, "test") self.addCleanup(client.drop_database, "my_database") @@ -843,13 +842,6 @@ def test_misc(self): class TestTransactionExamples(IntegrationTest): - - @classmethod - @client_context.require_connection - def setUpClass(cls): - super(TestTransactionExamples, cls).setUpClass() - cls.client = rs_or_single_client(w="majority") - @client_context.require_version_max(4, 4, 99) # PYTHON-2154 skip on 4.5+ @client_context.require_transactions def test_transactions(self): diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 9e63e59a82..fb1bd2b8ac 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -2306,6 +2306,8 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.deprecation_filter.stop() + if cls.secondary: + cls.secondary.close() def cause_wtimeout(self, batch): if self.need_replication_stopped: diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index 7df2239e7d..18e05125b2 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -71,6 +71,7 @@ def mock_client(self, **kwargs): host='a:1,b:2,c:3', connect=False, **kwargs) + self.addCleanup(mock_client.close) # Latencies in seconds. mock_client.mock_rtts['a:1'] = 0.020 diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 52f9d121c3..ede08c011d 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -52,6 +52,10 @@ def setUpClass(cls): event_listeners=[cls.listener], retryWrites=False) + @classmethod + def tearDownClass(cls): + cls.client.close() + def tearDown(self): self.listener.results.clear() @@ -1401,6 +1405,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): monitoring._LISTENERS = cls.saved_listeners + cls.client.close() def setUp(self): self.listener.results.clear() diff --git a/test/test_pooling.py b/test/test_pooling.py index 8ed4068a6f..b1728d791a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -161,6 +161,9 @@ def setUp(self): db.unique.insert_one({"_id": "jesse"}) db.test.insert_many([{} for _ in range(10)]) + def tearDown(self): + self.c.close() + def create_pool( self, pair=(client_context.host, client_context.port), diff --git a/test/test_read_concern.py b/test/test_read_concern.py index abd69309a9..2eef4cb1d9 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -34,6 +34,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + cls.client.close() client_context.client.pymongo_test.drop_collection('coll') def tearDown(self): diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index ce79592bbe..821c277e80 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -370,6 +370,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.c.drop_database('pymongo_test') + cls.c.close() def executed_on_which_server(self, client, fn, *args, **kwargs): """Execute fn(*args, **kwargs) and return the Server instance used.""" diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py index e0456186ae..c653784181 100644 --- a/test/test_replica_set_client.py +++ b/test/test_replica_set_client.py @@ -300,6 +300,7 @@ def test_wire_version(self): host='a:1', replicaSet='rs', connect=False) + self.addCleanup(c.close) c.set_wire_version_range('a:1', 3, 7) c.set_wire_version_range('b:2', 2, 3) @@ -330,15 +331,17 @@ class TestReplicaSetClientInternalIPs(MockClientTest): def test_connect_with_internal_ips(self): # Client is passed an IP it can reach, 'a:1', but the RS config # only contains unreachable IPs like 'internal-ip'. PYTHON-608. + client = MockClient( + standalones=[], + members=['a:1'], + mongoses=[], + ismaster_hosts=['internal-ip:27017'], + host='a:1', + replicaSet='rs', + serverSelectionTimeoutMS=100) + self.addCleanup(client.close) with self.assertRaises(AutoReconnect) as context: - connected(MockClient( - standalones=[], - members=['a:1'], - mongoses=[], - ismaster_hosts=['internal-ip:27017'], - host='a:1', - replicaSet='rs', - serverSelectionTimeoutMS=100)) + connected(client) self.assertEqual( "Could not reach any servers in [('internal-ip', 27017)]." @@ -356,6 +359,7 @@ def test_max_write_batch_size(self): host='a:1', replicaSet='rs', connect=False) + self.addCleanup(c.close) c.set_max_write_batch_size('a:1', 1) c.set_max_write_batch_size('b:2', 2) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 7f9a429b1e..88a122d513 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -192,6 +192,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.knobs.disable() + cls.client.close() @client_context.require_version_min(3, 5) @client_context.require_no_standalone @@ -226,6 +227,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.knobs.disable() + cls.client.close() super(TestRetryableWrites, cls).tearDownClass() def setUp(self): diff --git a/test/test_session.py b/test/test_session.py index 3c90fa5fa1..a7c8e54e25 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -78,6 +78,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) + cls.client2.close() super(TestSession, cls).tearDownClass() def setUp(self): @@ -85,6 +86,7 @@ def setUp(self): self.session_checker_listener = SessionTestListener() self.client = rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener]) + self.addCleanup(self.client.close) self.db = self.client.pymongo_test self.initial_lsids = set(s['id'] for s in session_ids(self.client)) @@ -783,6 +785,10 @@ def setUpClass(cls): cls.listener = SessionTestListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) + @classmethod + def tearDownClass(cls): + cls.client.close() + @client_context.require_sessions def setUp(self): super(TestCausalConsistency, self).setUp() diff --git a/test/test_transactions.py b/test/test_transactions.py index cfcc67e95e..ff707c5f92 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -56,6 +56,12 @@ def setUpClass(cls): for address in client_context.mongoses: cls.mongos_clients.append(single_client('%s:%s' % address)) + @classmethod + def tearDownClass(cls): + for client in cls.mongos_clients: + client.close() + super(TransactionsBase, cls).tearDownClass() + def maybe_skip_scenario(self, test): super(TransactionsBase, self).maybe_skip_scenario(test) if ('secondary' in self.id() and From 4966d934a53a45354ae7388db309d34fb1d82747 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 Jun 2020 17:04:28 -0700 Subject: [PATCH 0140/2111] PYTHON-2182 Use namespace returned from find command in getMore --- pymongo/cursor.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index f62f2f66f1..c93cf080ba 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -226,6 +226,10 @@ def __init__(self, collection, filter=None, projection=None, skip=0, if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] + # The namespace to use for find/getMore commands. + self.__dbname = collection.database.name + self.__collname = collection.name + @property def collection(self): """The :class:`~pymongo.collection.Collection` that this @@ -1037,6 +1041,10 @@ def __send_message(self, operation): self.__id = cursor['id'] if cmd_name == 'find': documents = cursor['firstBatch'] + # Update the namespace used for future getMore commands. + ns = cursor.get('ns') + if ns: + self.__dbname, self.__collname = ns.split('.', 1) else: documents = cursor['nextBatch'] self.__data = deque(documents) @@ -1116,8 +1124,8 @@ def _refresh(self): limit = self.__batch_size # Exhaust cursors don't send getMore messages. - g = self._getmore_class(self.__collection.database.name, - self.__collection.name, + g = self._getmore_class(self.__dbname, + self.__collname, limit, self.__id, self.__codec_options, From 4be82828f9e3b1a7fc6a1011ae63fb96b90d22f4 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Fri, 29 May 2020 12:00:02 -0700 Subject: [PATCH 0141/2111] PYTHON-2208 Revise issues and help sections of documentation --- README.rst | 5 +++-- doc/index.rst | 11 +++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 0433b9c80e..e65f71a697 100644 --- a/README.rst +++ b/README.rst @@ -2,6 +2,7 @@ PyMongo ======= :Info: See `the mongo site `_ for more information. See `GitHub `_ for the latest source. +:Documentation: Available at `pymongo.readthedocs.io `_ :Author: Mike Dirolf :Maintainer: Bernie Hackett @@ -22,10 +23,10 @@ Support / Feedback ================== For issues with, questions about, or feedback for PyMongo, please look into -our `support channels `_. Please +our `support channels `_. Please do not email any of the PyMongo developers directly with issues or questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. +Forums `_. Bugs / Feature Requests ======================= diff --git a/doc/index.rst b/doc/index.rst index caa9ae8ced..05265578ae 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -53,8 +53,10 @@ everything you need to know to use **PyMongo**. Getting Help ------------ -If you're having trouble or have questions about PyMongo, ask your question in -our `MongoDB Community Forums `_. +If you're having trouble or have questions about PyMongo, ask your question on +our `MongoDB Community Forum `_. +You may also want to consider a +`commercial support subscription `_. Once you get an answer, it'd be great if you could work it back into this documentation and contribute! @@ -65,6 +67,11 @@ commented on) at the main `MongoDB JIRA bug tracker `_, in the "Python Driver" project. +Feature Requests / Feedback +--------------------------- +Use our `feedback engine `_ +to send us feature requests and general feedback about PyMongo. + Contributing ------------ **PyMongo** has a large :doc:`community ` and From 719b025d11855f55e287b8feae4db01de7fd0e17 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 Jun 2020 20:39:24 -0700 Subject: [PATCH 0142/2111] PYTHON-2254 Fix Cursor.clone with various options This change adds support for cloning cursors with: "empty", "show_record_id", "return_key", "allow_disk_use", "snapshot", and "exhaust". --- pymongo/cursor.py | 4 +++- test/test_cursor.py | 39 ++++++++++++++++++++++++--------------- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index c93cf080ba..522edf17d5 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -285,7 +285,9 @@ def _clone(self, deepcopy=True, base=None): "max_time_ms", "max_await_time_ms", "comment", "max", "min", "ordering", "explain", "hint", "batch_size", "max_scan", "manipulate", - "query_flags", "modifiers", "collation") + "query_flags", "modifiers", "collation", "empty", + "show_record_id", "return_key", "allow_disk_use", + "snapshot", "exhaust") data = dict((k, v) for k, v in iteritems(self.__dict__) if k.startswith('_Cursor__') and k[9:] in values_to_clone) if deepcopy: diff --git a/test/test_cursor.py b/test/test_cursor.py index 12720aa74d..1d8aae2af1 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -914,6 +914,8 @@ def test_rewind(self): self.assertEqual(cursor, cursor.rewind()) + # manipulate, oplog_reply, and snapshot are all deprecated. + @ignore_deprecations def test_clone(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -952,31 +954,30 @@ def test_clone(self): # Just test attributes cursor = self.db.test.find({"x": re.compile("^hello.*")}, + projection={'_id': False}, skip=1, no_cursor_timeout=True, cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], allow_partial_results=True, + oplog_replay=True, + batch_size=123, manipulate=False, - projection={'_id': False}).limit(2) + collation={'locale': 'en_US'}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True).limit(2) cursor.min([('a', 1)]).max([('b', 3)]) cursor.add_option(128) cursor.comment('hi!') + # Every attribute should be the same. cursor2 = cursor.clone() - self.assertEqual(cursor._Cursor__skip, cursor2._Cursor__skip) - self.assertEqual(cursor._Cursor__limit, cursor2._Cursor__limit) - self.assertEqual(type(cursor._Cursor__codec_options), - type(cursor2._Cursor__codec_options)) - self.assertEqual(cursor._Cursor__manipulate, - cursor2._Cursor__manipulate) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__comment, - cursor2._Cursor__comment) - self.assertEqual(cursor._Cursor__min, - cursor2._Cursor__min) - self.assertEqual(cursor._Cursor__max, - cursor2._Cursor__max) + self.assertEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) @@ -1011,6 +1012,14 @@ def test_clone(self): self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) + def test_clone_empty(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + cursor = self.db.test.find()[2:2] + cursor2 = cursor.clone() + self.assertRaises(StopIteration, cursor.next) + self.assertRaises(StopIteration, cursor2.next) + @ignore_deprecations def test_count_with_fields(self): self.db.test.drop() From 903643b3d053528646a9d645af45d15d637cc403 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 Jun 2020 15:42:18 -0700 Subject: [PATCH 0143/2111] PYTHON-2138 Use pymongo-auth-aws for MONGODB-AWS support --- .evergreen/run-mongodb-aws-ecs-test.sh | 2 +- .evergreen/run-mongodb-aws-test.sh | 3 +- README.rst | 5 +- doc/installation.rst | 5 +- pymongo/auth.py | 16 +- pymongo/auth_aws.py | 203 +++++-------------------- setup.py | 2 +- 7 files changed, 50 insertions(+), 186 deletions(-) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 6be913eff3..43954dedd2 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -35,9 +35,9 @@ authtest () { $VIRTUALENV -p $PYTHON --system-site-packages --never-download venvaws . venvaws/bin/activate - pip install requests botocore cd src + pip install '.[aws]' python test/auth_aws/test_auth_aws.py cd - deactivate diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index 8765d4702c..f0d59e960a 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -55,8 +55,7 @@ authtest () { else . venvaws/bin/activate fi - pip install requests botocore - + pip install '.[aws]' python test/auth_aws/test_auth_aws.py deactivate rm -rf venvaws diff --git a/README.rst b/README.rst index e65f71a697..4cb6658df0 100644 --- a/README.rst +++ b/README.rst @@ -100,9 +100,8 @@ dependency can be installed automatically along with PyMongo:: $ python -m pip install pymongo[gssapi] -MONGODB-AWS authentication requires `botocore -`_ and `requests -`_:: +MONGODB-AWS authentication requires `pymongo-auth-aws +`_:: $ python -m pip install pymongo[aws] diff --git a/doc/installation.rst b/doc/installation.rst index adcaad478b..0bda72e16f 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -56,9 +56,8 @@ dependency can be installed automatically along with PyMongo:: $ python -m pip install pymongo[gssapi] -:ref:`MONGODB-AWS` authentication requires `botocore -`_ and `requests -`_:: +:ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws +`_:: $ python -m pip install pymongo[aws] diff --git a/pymongo/auth.py b/pymongo/auth.py index 9eca28c981..89febe581f 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -43,7 +43,7 @@ from bson.binary import Binary from bson.py3compat import string_type, _unicode, PY3 from bson.son import SON -from pymongo.auth_aws import _HAVE_MONGODB_AWS, _auth_aws, _AWSCredential +from pymongo.auth_aws import _authenticate_aws from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep @@ -540,20 +540,6 @@ def _authenticate_x509(credentials, sock_info): sock_info.command('$external', cmd) -def _authenticate_aws(credentials, sock_info): - """Authenticate using MONGODB-AWS. - """ - if not _HAVE_MONGODB_AWS: - raise ConfigurationError( - "MONGODB-AWS authentication requires botocore and requests: " - "install these libraries with: " - "python -m pip install 'pymongo[aws]'") - - _auth_aws(_AWSCredential( - credentials.username, credentials.password, - credentials.mechanism_properties.aws_session_token), sock_info) - - def _authenticate_mongo_cr(credentials, sock_info): """Authenticate using MONGODB-CR. """ diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index fdb6ec88f8..e57bec324d 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -14,188 +14,69 @@ """MONGODB-AWS Authentication helpers.""" -import os - try: - - from botocore.auth import SigV4Auth - from botocore.awsrequest import AWSRequest - from botocore.credentials import Credentials - - import requests - + import pymongo_auth_aws + from pymongo_auth_aws import (AwsCredential, + AwsSaslContext, + PyMongoAuthAwsError) _HAVE_MONGODB_AWS = True except ImportError: _HAVE_MONGODB_AWS = False import bson - - -from base64 import standard_b64encode -from collections import namedtuple - from bson.binary import Binary from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure -_AWS_REL_URI = 'http://169.254.170.2/' -_AWS_EC2_URI = 'http://169.254.169.254/' -_AWS_EC2_PATH = 'latest/meta-data/iam/security-credentials/' -_AWS_HTTP_TIMEOUT = 10 - - -_AWSCredential = namedtuple('_AWSCredential', - ['username', 'password', 'token']) -"""MONGODB-AWS credentials.""" - - -def _aws_temp_credentials(): - """Construct temporary MONGODB-AWS credentials.""" - access_key = os.environ.get('AWS_ACCESS_KEY_ID') - secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY') - if access_key and secret_key: - return _AWSCredential( - access_key, secret_key, os.environ.get('AWS_SESSION_TOKEN')) - # If the environment variable - # AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is set then drivers MUST - # assume that it was set by an AWS ECS agent and use the URI - # http://169.254.170.2/$AWS_CONTAINER_CREDENTIALS_RELATIVE_URI to - # obtain temporary credentials. - relative_uri = os.environ.get('AWS_CONTAINER_CREDENTIALS_RELATIVE_URI') - if relative_uri is not None: - try: - res = requests.get(_AWS_REL_URI+relative_uri, - timeout=_AWS_HTTP_TIMEOUT) - res_json = res.json() - except (ValueError, requests.exceptions.RequestException): - raise OperationFailure( - 'temporary MONGODB-AWS credentials could not be obtained') - else: - # If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is - # not set drivers MUST assume we are on an EC2 instance and use the - # endpoint - # http://169.254.169.254/latest/meta-data/iam/security-credentials - # / - # whereas role-name can be obtained from querying the URI - # http://169.254.169.254/latest/meta-data/iam/security-credentials/. - try: - # Get token - headers = {'X-aws-ec2-metadata-token-ttl-seconds': "30"} - res = requests.post(_AWS_EC2_URI+'latest/api/token', - headers=headers, timeout=_AWS_HTTP_TIMEOUT) - token = res.content - # Get role name - headers = {'X-aws-ec2-metadata-token': token} - res = requests.get(_AWS_EC2_URI+_AWS_EC2_PATH, headers=headers, - timeout=_AWS_HTTP_TIMEOUT) - role = res.text - # Get temp creds - res = requests.get(_AWS_EC2_URI+_AWS_EC2_PATH+role, - headers=headers, timeout=_AWS_HTTP_TIMEOUT) - res_json = res.json() - except (ValueError, requests.exceptions.RequestException): - raise OperationFailure( - 'temporary MONGODB-AWS credentials could not be obtained') - - try: - temp_user = res_json['AccessKeyId'] - temp_password = res_json['SecretAccessKey'] - token = res_json['Token'] - except KeyError: - # If temporary credentials cannot be obtained then drivers MUST - # fail authentication and raise an error. - raise OperationFailure( - 'temporary MONGODB-AWS credentials could not be obtained') - - return _AWSCredential(temp_user, temp_password, token) - - -_AWS4_HMAC_SHA256 = 'AWS4-HMAC-SHA256' -_AWS_SERVICE = 'sts' - +class _AwsSaslContext(AwsSaslContext): + # Dependency injection: + def binary_type(self): + """Return the bson.binary.Binary type.""" + return Binary -def _get_region(sts_host): - """""" - parts = sts_host.split('.') - if len(parts) == 1 or sts_host == 'sts.amazonaws.com': - return 'us-east-1' # Default + def bson_encode(self, doc): + """Encode a dictionary to BSON.""" + return bson.encode(doc) - if len(parts) > 2 or not all(parts): - raise OperationFailure("Server returned an invalid sts host") + def bson_decode(self, data): + """Decode BSON to a dictionary.""" + return bson.decode(data) - return parts[1] - - -def _aws_auth_header(credentials, server_nonce, sts_host): - """Signature Version 4 Signing Process to construct the authorization header - """ - region = _get_region(sts_host) - request_parameters = 'Action=GetCallerIdentity&Version=2011-06-15' - encoded_nonce = standard_b64encode(server_nonce).decode('utf8') - request_headers = { - 'Content-Type': 'application/x-www-form-urlencoded', - 'Content-Length': str(len(request_parameters)), - 'Host': sts_host, - 'X-MongoDB-Server-Nonce': encoded_nonce, - 'X-MongoDB-GS2-CB-Flag': 'n', - } - request = AWSRequest(method="POST", url="/", data=request_parameters, - headers=request_headers) - boto_creds = Credentials(credentials.username, credentials.password, - token=credentials.token) - auth = SigV4Auth(boto_creds, "sts", region) - auth.add_auth(request) - final = { - 'a': request.headers['Authorization'], - 'd': request.headers['X-Amz-Date'] - } - if credentials.token: - final['t'] = credentials.token - return final - - -def _auth_aws(credentials, sock_info): +def _authenticate_aws(credentials, sock_info): """Authenticate using MONGODB-AWS. """ if not _HAVE_MONGODB_AWS: raise ConfigurationError( - "MONGODB-AWS authentication requires botocore and requests: " - "install these libraries with: " - "python -m pip install 'pymongo[aws]'") + "MONGODB-AWS authentication requires pymongo-auth-aws: " + "install with: python -m pip install 'pymongo[aws]'") if sock_info.max_wire_version < 9: raise ConfigurationError( "MONGODB-AWS authentication requires MongoDB version 4.4 or later") - # If a username and password are not provided, drivers MUST query - # a link-local AWS address for temporary credentials. - if credentials.username is None: - credentials = _aws_temp_credentials() - - # Client first. - client_nonce = os.urandom(32) - payload = {'r': Binary(client_nonce), 'p': 110} - client_first = SON([('saslStart', 1), - ('mechanism', 'MONGODB-AWS'), - ('payload', Binary(bson.encode(payload)))]) - server_first = sock_info.command('$external', client_first) - - server_payload = bson.decode(server_first['payload']) - server_nonce = server_payload['s'] - if len(server_nonce) != 64 or not server_nonce.startswith(client_nonce): - raise OperationFailure("Server returned an invalid nonce.") - sts_host = server_payload['h'] - if len(sts_host) < 1 or len(sts_host) > 255 or '..' in sts_host: - # Drivers must also validate that the host is greater than 0 and less - # than or equal to 255 bytes per RFC 1035. - raise OperationFailure("Server returned an invalid sts host.") - - payload = _aws_auth_header(credentials, server_nonce, sts_host) - client_second = SON([('saslContinue', 1), - ('conversationId', server_first['conversationId']), - ('payload', Binary(bson.encode(payload)))]) - res = sock_info.command('$external', client_second) - if not res['done']: - raise OperationFailure('MONGODB-AWS conversation failed to complete.') + try: + ctx = _AwsSaslContext(AwsCredential( + credentials.username, credentials.password, + credentials.mechanism_properties.aws_session_token)) + client_payload = ctx.step(None) + client_first = SON([('saslStart', 1), + ('mechanism', 'MONGODB-AWS'), + ('payload', client_payload)]) + server_first = sock_info.command('$external', client_first) + res = server_first + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + client_payload = ctx.step(res['payload']) + cmd = SON([('saslContinue', 1), + ('conversationId', server_first['conversationId']), + ('payload', client_payload)]) + res = sock_info.command('$external', cmd) + if res['done']: + # SASL complete. + break + except PyMongoAuthAwsError as exc: + # Convert to OperationFailure and include pymongo-auth-aws version. + raise OperationFailure('%s (pymongo-auth-aws version %s)' % ( + exc, pymongo_auth_aws.__version__)) diff --git a/setup.py b/setup.py index 8850e0e551..fc1ee93f79 100755 --- a/setup.py +++ b/setup.py @@ -329,7 +329,7 @@ def build_extension(self, ext): 'snappy': ['python-snappy'], 'tls': [], 'zstd': ['zstandard'], - 'aws': ['requests<3.0.0', 'botocore'], + 'aws': ['pymongo-auth-aws<2.0.0'], } # https://jira.mongodb.org/browse/PYTHON-2117 From fbafa9c847ee24d1035ce972fcea09476d6d0795 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 5 Jun 2020 16:22:31 -0700 Subject: [PATCH 0144/2111] PYTHON-2138 Fix NameError: name 'AwsSaslContext' is not defined --- pymongo/auth_aws.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index e57bec324d..ff07a12e7f 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -21,6 +21,9 @@ PyMongoAuthAwsError) _HAVE_MONGODB_AWS = True except ImportError: + class AwsSaslContext(object): + def __init__(self, credentials): + pass _HAVE_MONGODB_AWS = False import bson From 9a9f42bb9960302ee047eaef5bf61cfc08e621d7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 8 May 2020 16:39:05 -0700 Subject: [PATCH 0145/2111] PYTHON-2236 Reset the server pool only after marking the server Unknown --- pymongo/monitor.py | 6 ++++-- test/test_client.py | 7 ++----- test/test_topology.py | 11 ++++++++--- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 99d33f2530..5400077b9d 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -142,10 +142,12 @@ def _check_with_retry(self): if self._publish: self._listeners.publish_server_heartbeat_failed( address, error_time, error) - self._topology.reset_pool(address) default = ServerDescription(address, error=error) + # Reset the server pool only after marking the server Unknown. + self._topology.on_change(default) + self._topology.reset_pool(address) + self._avg_round_trip_time.reset() if not retry: - self._avg_round_trip_time.reset() # Server type defaults to Unknown. return default diff --git a/test/test_client.py b/test/test_client.py index df2221eb31..084235a1b8 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1767,7 +1767,7 @@ def test_reconnect(self): host='b:2', # Pass a secondary. replicaSet='rs', retryReads=False, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) self.addCleanup(c.close) @@ -1782,9 +1782,6 @@ def test_reconnect(self): # ServerSelectionTimeoutError or AutoReconnect (from # MockPool.get_socket). self.assertRaises(AutoReconnect, c.db.collection.find_one) - # The second attempt always raises ServerSelectionTimeoutError. - self.assertRaises(ServerSelectionTimeoutError, - c.db.collection.find_one) # But it can reconnect. c.revive_host('a:1') @@ -1804,7 +1801,7 @@ def _test_network_error(self, operation_callback): replicaSet='rs', connect=False, retryReads=False, - serverSelectionTimeoutMS=100) + serverSelectionTimeoutMS=1000) self.addCleanup(c.close) # Set host-specific information so we can test whether it is reset. diff --git a/test/test_topology.py b/test/test_topology.py index dd89d1e535..cf89ae3bd3 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -730,7 +730,8 @@ def _check_with_socket(self, *args, **kwargs): if ismaster_count[0] in (1, 3): return IsMaster({'ok': 1, 'maxWireVersion': 6}), 0 else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect( + 'mock monitor error #%s' % (ismaster_count[0],)) t = create_mock_topology(monitor_class=TestMonitor) server = wait_for_master(t) @@ -738,10 +739,14 @@ def _check_with_socket(self, *args, **kwargs): self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) - # Second ismaster call, then immediately the third. + # Second ismaster call. t.request_check_all() - self.assertEqual(3, ismaster_count[0]) + # The third ismaster call (the immediate retry) happens sometime soon + # after the failed check triggered by request_check_all. Wait until + # the server becomes known again. + t.select_server(writable_server_selector, 0.250) self.assertEqual(SERVER_TYPE.Standalone, get_type(t, 'a')) + self.assertEqual(3, ismaster_count[0]) def test_internal_monitor_error(self): exception = AssertionError('internal error') From 0743c0b2222e5d562f3d54e83baef2ffbf6c10f3 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 26 Mar 2020 19:46:20 -0700 Subject: [PATCH 0146/2111] PYTHON-2121 add directConnection URI option --- doc/changelog.rst | 2 + pymongo/client_options.py | 6 ++ pymongo/common.py | 1 + pymongo/mongo_client.py | 15 +++- pymongo/server_description.py | 4 +- pymongo/settings.py | 10 ++- pymongo/topology_description.py | 15 +++- pymongo/uri_parser.py | 7 ++ .../rs/discover_arbiters.json | 4 +- .../rs/discover_arbiters_replicaset.json | 41 ++++++++++ .../rs/discover_ghost.json | 31 ++++++++ .../rs/discover_ghost_replicaset.json | 35 +++++++++ .../rs/discover_hidden.json | 45 +++++++++++ .../rs/discover_hidden_replicaset.json | 45 +++++++++++ .../rs/discover_passives.json | 4 +- .../rs/discover_passives_replicaset.json | 78 +++++++++++++++++++ .../rs/discover_primary.json | 4 +- .../rs/discover_primary_replicaset.json | 39 ++++++++++ .../rs/discover_rsother.json | 44 +++++++++++ .../rs/discover_rsother_replicaset.json | 64 +++++++++++++++ .../rs/discover_secondary.json | 4 +- .../rs/discover_secondary_replicaset.json | 40 ++++++++++ .../rs/replicaset_rsnp.json | 25 ++++++ .../rs/secondary_mismatched_me.json | 4 +- .../rs/topology_version_greater.json | 3 +- .../sharded/discover_single_mongos.json | 30 +++++++ .../single/direct_connection_external_ip.json | 2 +- .../single/direct_connection_mongos.json | 4 +- .../single/direct_connection_replicaset.json | 31 ++++++++ .../single/direct_connection_rsarbiter.json | 4 +- .../single/direct_connection_rsprimary.json | 4 +- .../single/direct_connection_rssecondary.json | 4 +- .../single/direct_connection_slave.json | 2 +- .../single/direct_connection_standalone.json | 4 +- .../direct_connection_unavailable_seed.json | 25 ++++++ .../direct_connection_wrong_set_name.json | 63 +++++++++++++++ .../single/discover_standalone.json | 30 +++++++ .../single/discover_unavailable_seed.json | 25 ++++++ .../single/too_old_then_upgraded.json | 54 +++++++++++++ .../srv_seedlist/direct-connection-false.json | 15 ++++ test/srv_seedlist/direct-connection-true.json | 7 ++ test/test_discovery_and_monitoring.py | 26 ++----- test/test_dns.py | 9 +-- test/uri_options/connection-options.json | 51 ++++++++++++ 44 files changed, 899 insertions(+), 61 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/discover_ghost.json create mode 100644 test/discovery_and_monitoring/rs/discover_ghost_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/discover_hidden.json create mode 100644 test/discovery_and_monitoring/rs/discover_hidden_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/discover_passives_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/discover_primary_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/discover_rsother.json create mode 100644 test/discovery_and_monitoring/rs/discover_rsother_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/discover_secondary_replicaset.json create mode 100644 test/discovery_and_monitoring/rs/replicaset_rsnp.json create mode 100644 test/discovery_and_monitoring/sharded/discover_single_mongos.json create mode 100644 test/discovery_and_monitoring/single/direct_connection_replicaset.json create mode 100644 test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json create mode 100644 test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json create mode 100644 test/discovery_and_monitoring/single/discover_standalone.json create mode 100644 test/discovery_and_monitoring/single/discover_unavailable_seed.json create mode 100644 test/discovery_and_monitoring/single/too_old_then_upgraded.json create mode 100644 test/srv_seedlist/direct-connection-false.json create mode 100644 test/srv_seedlist/direct-connection-true.json diff --git a/doc/changelog.rst b/doc/changelog.rst index a058e75178..219070356a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -12,6 +12,8 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: support. It will also be installed when using the "tls" extra if the version of Python in use is older than 2.7.9. - Support for the :ref:`MONGODB-AWS` authentication mechanism. +- Support for the ``directConnection`` URI option and kwarg to + :class:`~pymongo.mongo_client.MongoClient`. - Added index hinting support to the :meth:`~pymongo.collection.Collection.replace_one`, :meth:`~pymongo.collection.Collection.update_one`, diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 4f40629e7d..9611bb0bda 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -151,6 +151,7 @@ def __init__(self, username, password, database, options): self.__codec_options = _parse_codec_options(options) self.__credentials = _parse_credentials( username, password, database, options) + self.__direct_connection = options.get('directconnection') self.__local_threshold_ms = options.get( 'localthresholdms', common.LOCAL_THRESHOLD_MS) # self.__server_selection_timeout is in seconds. Must use full name for @@ -191,6 +192,11 @@ def credentials(self): """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" return self.__credentials + @property + def direct_connection(self): + """Whether to connect to the deployment in 'Single' topology.""" + return self.__direct_connection + @property def local_threshold_ms(self): """The local threshold for this instance.""" diff --git a/pymongo/common.py b/pymongo/common.py index d0177cea00..68e1af7a41 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -594,6 +594,7 @@ def validate_tzinfo(dummy, value): 'authsource': validate_string, 'compressors': validate_compressors, 'connecttimeoutms': validate_timeout_or_none, + 'directconnection': validate_boolean_or_string, 'heartbeatfrequencyms': validate_timeout_or_none, 'journal': validate_boolean_or_string, 'localthresholdms': validate_positive_float_or_zero, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 0c93604a32..2a6e9d180f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -205,6 +205,12 @@ def __init__( - `connect` (optional): if ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect on the first operation. + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. | **Other optional parameters can be passed as keyword arguments:** @@ -492,8 +498,10 @@ def __init__( .. mongodoc:: connections .. versionchanged:: 3.11 - Added the ``tlsDisableOCSPEndpointCheck`` keyword argument and - URI option. + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` .. versionchanged:: 3.9 Added the ``retryReads`` keyword argument and URI option. @@ -707,7 +715,8 @@ def __init__( server_selection_timeout=options.server_selection_timeout, server_selector=options.server_selector, heartbeat_frequency=options.heartbeat_frequency, - fqdn=fqdn) + fqdn=fqdn, + direct_connection=options.direct_connection) self._topology = Topology(self._topology_settings) if connect: diff --git a/pymongo/server_description.py b/pymongo/server_description.py index f8d0012616..6372147f24 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -216,8 +216,8 @@ def retryable_reads_supported(self): def topology_version(self): return self._topology_version - def to_unknown(self): - unknown = ServerDescription(self.address) + def to_unknown(self, error=None): + unknown = ServerDescription(self.address, error=error) unknown._topology_version = self.topology_version return unknown diff --git a/pymongo/settings.py b/pymongo/settings.py index dd0ac3c1ac..05d15d0de5 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -38,7 +38,8 @@ def __init__(self, server_selection_timeout=SERVER_SELECTION_TIMEOUT, heartbeat_frequency=common.HEARTBEAT_FREQUENCY, server_selector=None, - fqdn=None): + fqdn=None, + direct_connection=None): """Represent MongoClient's configuration. Take a list of (host, port) pairs and optional replica set name. @@ -59,7 +60,12 @@ def __init__(self, self._server_selector = server_selector self._fqdn = fqdn self._heartbeat_frequency = heartbeat_frequency - self._direct = (len(self._seeds) == 1 and not replica_set_name) + + if direct_connection is None: + self._direct = (len(self._seeds) == 1 and not self.replica_set_name) + else: + self._direct = direct_connection + self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 0090692dc6..0b72b65f3d 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -337,6 +337,14 @@ def updated_topology_description(topology_description, server_description): sds[address] = server_description if topology_type == TOPOLOGY_TYPE.Single: + # Set server type to Unknown if replica set name does not match. + if (set_name is not None and + set_name != server_description.replica_set_name): + error = ConfigurationError( + "client is configured to connect to a replica set named " + "'%s' but this node belongs to a set named '%s'" % ( + set_name, server_description.replica_set_name)) + sds[address] = server_description.to_unknown(error=error) # Single type never changes. return TopologyDescription( TOPOLOGY_TYPE.Single, @@ -348,8 +356,11 @@ def updated_topology_description(topology_description, server_description): if topology_type == TOPOLOGY_TYPE.Unknown: if server_type == SERVER_TYPE.Standalone: - sds.pop(address) - + if len(topology_description._topology_settings.seeds) == 1: + topology_type = TOPOLOGY_TYPE.Single + else: + # Remove standalone from Topology when given multiple seeds. + sds.pop(address) elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 779a8c5ed9..9c782ac1a4 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -479,6 +479,10 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, fqdn = None if is_srv: + if options.get('directConnection'): + raise ConfigurationError( + "Cannot specify directConnection=true with " + "%s URIs" % (SRV_SCHEME,)) nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: raise InvalidURI( @@ -508,6 +512,9 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, options["ssl"] = True if validate else 'true' else: nodes = split_hosts(hosts, default_port=default_port) + if len(nodes) > 1 and options.get('directConnection'): + raise ConfigurationError( + "Cannot specify multiple hosts with directConnection=true") return { 'nodelist': nodes, diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json index ced7baeb65..ad337c127a 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -1,6 +1,6 @@ { - "description": "Discover arbiters", - "uri": "mongodb://a/?replicaSet=rs", + "description": "Discover arbiters with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json new file mode 100644 index 0000000000..dc00dca5f0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -0,0 +1,41 @@ +{ + "description": "Discover arbiters with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "arbiters": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json new file mode 100644 index 0000000000..1e2ca91bcb --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -0,0 +1,31 @@ +{ + "description": "Discover ghost with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json new file mode 100644 index 0000000000..df504b6ca4 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -0,0 +1,35 @@ +{ + "description": "Discover ghost with replicaSet URI option", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "isreplicaset": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "RSGhost", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json new file mode 100644 index 0000000000..cb68120eaf --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -0,0 +1,45 @@ +{ + "description": "Discover hidden with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json new file mode 100644 index 0000000000..216328dfa5 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -0,0 +1,45 @@ +{ + "description": "Discover hidden with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json index e46249d668..05922dc51c 100644 --- a/test/discovery_and_monitoring/rs/discover_passives.json +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -1,6 +1,6 @@ { - "description": "Discover passives", - "uri": "mongodb://a/?replicaSet=rs", + "description": "Discover passives with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json new file mode 100644 index 0000000000..f9d8c2e032 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -0,0 +1,78 @@ +{ + "description": "Discover passives with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "passive": true, + "hosts": [ + "a:27017" + ], + "passives": [ + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json index ea2cce9b72..b9032144d4 100644 --- a/test/discovery_and_monitoring/rs/discover_primary.json +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -1,6 +1,6 @@ { - "description": "Replica set discovery from primary", - "uri": "mongodb://a/?replicaSet=rs", + "description": "Discover primary with directConnection URI option", + "uri": "mongodb://a/?directConnection=false", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json new file mode 100644 index 0000000000..6f639b1c7e --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -0,0 +1,39 @@ +{ + "description": "Discover primary with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json new file mode 100644 index 0000000000..2cf5a5a6db --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -0,0 +1,44 @@ +{ + "description": "Discover RSOther with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "secondary": false, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "b:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json new file mode 100644 index 0000000000..d9420ca529 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -0,0 +1,64 @@ +{ + "description": "Discover RSOther with replicaSet URI option", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "hidden": true, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "secondary": false, + "hosts": [ + "c:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSOther", + "setName": "rs" + }, + "b:27017": { + "type": "RSOther", + "setName": "rs" + }, + "c:27017": { + "type": "Unknown", + "setName": null + }, + "d:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json index 7210b3845c..02123625a7 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary.json +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -1,6 +1,6 @@ { - "description": "Replica set discovery from secondary", - "uri": "mongodb://b/?replicaSet=rs", + "description": "Discover secondary with directConnection URI option", + "uri": "mongodb://b/?directConnection=false", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json new file mode 100644 index 0000000000..3dde3166b4 --- /dev/null +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -0,0 +1,40 @@ +{ + "description": "Discover secondary with replicaSet URI option", + "uri": "mongodb://b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "ismaster": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json new file mode 100644 index 0000000000..a0f69de486 --- /dev/null +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -0,0 +1,25 @@ +{ + "description": "replicaSet URI option causes starting topology to be RSNP", + "uri": "mongodb://a/?replicaSet=rs&directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": {}, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json index d2a70f6788..769e272a66 100644 --- a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -1,5 +1,6 @@ { "description": "Secondary mismatched me", + "uri": "mongodb://localhost:27017/?replicaSet=rs", "phases": [ { "outcome": { @@ -35,6 +36,5 @@ ] ] } - ], - "uri": "mongodb://localhost:27017/?replicaSet=rs" + ] } diff --git a/test/discovery_and_monitoring/rs/topology_version_greater.json b/test/discovery_and_monitoring/rs/topology_version_greater.json index 2c80fa2a97..afa8108ea2 100644 --- a/test/discovery_and_monitoring/rs/topology_version_greater.json +++ b/test/discovery_and_monitoring/rs/topology_version_greater.json @@ -157,8 +157,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 9, - "topologyVersion": null + "maxWireVersion": 9 } ] ], diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json new file mode 100644 index 0000000000..427889f8cc --- /dev/null +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -0,0 +1,30 @@ +{ + "description": "Discover single mongos", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "msg": "isdbgrid", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Mongos", + "setName": null + } + }, + "topologyType": "Sharded", + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json index 4458150186..afd5edc1d2 100644 --- a/test/discovery_and_monitoring/single/direct_connection_external_ip.json +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -1,6 +1,6 @@ { "description": "Direct connection to RSPrimary via external IP", - "uri": "mongodb://a", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json index a7fa079490..9175049cc6 100644 --- a/test/discovery_and_monitoring/single/direct_connection_mongos.json +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -1,6 +1,6 @@ { - "description": "Connect to mongos", - "uri": "mongodb://a", + "description": "Direct connection to mongos", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json new file mode 100644 index 0000000000..c629a709be --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -0,0 +1,31 @@ +{ + "description": "Direct connection with replicaSet URI option", + "uri": "mongodb://a/?replicaSet=rs&directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json index 3ef374d6f1..b07beb31ed 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -1,6 +1,6 @@ { - "description": "Connect to RSArbiter", - "uri": "mongodb://a", + "description": "Direct connection to RSArbiter", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json index bd5aaf7f04..7216a13345 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -1,6 +1,6 @@ { - "description": "Connect to RSPrimary", - "uri": "mongodb://a", + "description": "Direct connection to RSPrimary", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json index 3b4f3c8c5a..573036f2aa 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -1,6 +1,6 @@ { - "description": "Connect to RSSecondary", - "uri": "mongodb://a", + "description": "Direct connection to RSSecondary", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_slave.json b/test/discovery_and_monitoring/single/direct_connection_slave.json index a40debd183..720ec3dd82 100644 --- a/test/discovery_and_monitoring/single/direct_connection_slave.json +++ b/test/discovery_and_monitoring/single/direct_connection_slave.json @@ -1,6 +1,6 @@ { "description": "Direct connection to slave", - "uri": "mongodb://a", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json index 2ecff9b9ae..c53d76e76e 100644 --- a/test/discovery_and_monitoring/single/direct_connection_standalone.json +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -1,6 +1,6 @@ { - "description": "Connect to standalone", - "uri": "mongodb://a", + "description": "Direct connection to standalone", + "uri": "mongodb://a/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json b/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json new file mode 100644 index 0000000000..16f2735da5 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_unavailable_seed.json @@ -0,0 +1,25 @@ +{ + "description": "Direct connection to unavailable seed", + "uri": "mongodb://a/?directConnection=true", + "phases": [ + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json new file mode 100644 index 0000000000..de0b4b2aa7 --- /dev/null +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -0,0 +1,63 @@ +{ + "description": "Direct connection to RSPrimary with wrong set name", + "uri": "mongodb://a/?directConnection=true&replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "wrong", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json new file mode 100644 index 0000000000..eb6c6ae746 --- /dev/null +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -0,0 +1,30 @@ +{ + "description": "Discover standalone", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/discover_unavailable_seed.json b/test/discovery_and_monitoring/single/discover_unavailable_seed.json new file mode 100644 index 0000000000..b1f306c2be --- /dev/null +++ b/test/discovery_and_monitoring/single/discover_unavailable_seed.json @@ -0,0 +1,25 @@ +{ + "description": "Discover unavailable seed", + "uri": "mongodb://a/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "Unknown", + "logicalSessionTimeoutMinutes": null, + "setName": null + } + } + ] +} diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json new file mode 100644 index 0000000000..7da46856fb --- /dev/null +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -0,0 +1,54 @@ +{ + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 6", + "uri": "mongodb://a", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": false + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Standalone", + "setName": null + } + }, + "topologyType": "Single", + "logicalSessionTimeoutMinutes": null, + "setName": null, + "compatible": true + } + } + ] +} diff --git a/test/srv_seedlist/direct-connection-false.json b/test/srv_seedlist/direct-connection-false.json new file mode 100644 index 0000000000..1d57bdcb3c --- /dev/null +++ b/test/srv_seedlist/direct-connection-false.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?directConnection=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true, + "directConnection": false + } +} diff --git a/test/srv_seedlist/direct-connection-true.json b/test/srv_seedlist/direct-connection-true.json new file mode 100644 index 0000000000..ace6700106 --- /dev/null +++ b/test/srv_seedlist/direct-connection-true.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?directConnection=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because directConnection=true is incompatible with SRV URIs." +} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 81ac6de7c9..9062d7db59 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -21,20 +21,17 @@ sys.path[0:0] = [""] from bson import json_util, Timestamp -from pymongo import common +from pymongo import common, MongoClient from pymongo.errors import (AutoReconnect, ConfigurationError, NetworkTimeout, NotMasterError, OperationFailure) from pymongo.helpers import _check_command_response -from pymongo.topology import (Topology, - _ErrorContext) +from pymongo.topology import _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.ismaster import IsMaster from pymongo.server_description import ServerDescription, SERVER_TYPE -from pymongo.settings import TopologySettings -from pymongo.uri_parser import parse_uri from test import unittest, IntegrationTest from test.utils import (assertion_context, Barrier, @@ -68,25 +65,13 @@ def request_check(self): def create_mock_topology(uri, monitor_class=MockMonitor): - parsed_uri = parse_uri(uri) - replica_set_name = None - if 'replicaset' in parsed_uri['options']: - replica_set_name = parsed_uri['options']['replicaset'] - - topology_settings = TopologySettings( - parsed_uri['nodelist'], - replica_set_name=replica_set_name, - monitor_class=monitor_class) - - c = Topology(topology_settings) - c.open() - return c + mc = MongoClient(uri, _monitor_class=monitor_class) + return mc._get_topology() def got_ismaster(topology, server_address, ismaster_response): server_description = ServerDescription( server_address, IsMaster(ismaster_response), 0) - topology.on_change(server_description) @@ -185,8 +170,9 @@ def check_outcome(self, topology, outcome): actual_server.pool.generation) self.assertEqual(outcome['setName'], topology.description.replica_set_name) - self.assertEqual(outcome['logicalSessionTimeoutMinutes'], + self.assertEqual(outcome.get('logicalSessionTimeoutMinutes'), topology.description.logical_session_timeout_minutes) + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type)) diff --git a/test/test_dns.py b/test/test_dns.py index 58c9005d3e..ad50403213 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -52,16 +52,9 @@ def run_test(self): seeds = split_hosts(','.join(seeds)) if hosts: hosts = frozenset(split_hosts(','.join(hosts))) - if options: - for key, value in options.items(): - # Convert numbers / booleans to strings for comparison - if isinstance(value, bool): - options[key] = 'true' if value else 'false' - elif isinstance(value, (int, float)): - options[key] = str(value) if seeds: - result = parse_uri(uri, validate=False) + result = parse_uri(uri, validate=True) self.assertEqual(sorted(result['nodelist']), sorted(seeds)) if options: opts = result['options'] diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index 1e2dccd6e2..12b8c7e2bd 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -117,6 +117,57 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "directConnection=true", + "uri": "mongodb://example.com/?directConnection=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": true + } + }, + { + "description": "directConnection=true with multiple seeds", + "uri": "mongodb://example1.com,example2.com/?directConnection=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "directConnection=false", + "uri": "mongodb://example.com/?directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": false + } + }, + { + "description": "directConnection=false with multiple seeds", + "uri": "mongodb://example1.com,example2.com/?directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "directConnection": false + } + }, + { + "description": "Invalid directConnection value", + "uri": "mongodb://example.com/?directConnection=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } From 6932d25639e35a53a67a7960be0603720ce48b00 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 8 Jun 2020 15:08:44 -0700 Subject: [PATCH 0147/2111] PYTHON-2253 raise client-side error when allowDiskUse is specified with MongoDB <= 3.2 --- pymongo/collection.py | 3 +- pymongo/cursor.py | 4 +- pymongo/message.py | 5 +- .../v2/find-allowdiskuse-clientError.json | 40 ++++++++++++ .../v2/find-allowdiskuse-serverError.json | 61 +++++++++++++++++++ 5 files changed, 109 insertions(+), 4 deletions(-) create mode 100644 test/crud/v2/find-allowdiskuse-clientError.json create mode 100644 test/crud/v2/find-allowdiskuse-serverError.json diff --git a/pymongo/collection.py b/pymongo/collection.py index 567c8587c7..88ead4df04 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1442,7 +1442,8 @@ def find(self, *args, **kwargs): disk files to store data exceeding the system memory limit while processing a blocking sort operation. The option has no effect if MongoDB can satisfy the specified sort using an index, or if the - blocking sort requires less memory than the 100 MiB limit. + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. .. note:: There are a number of caveats to using :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 522edf17d5..2a994389ac 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -441,7 +441,9 @@ def allow_disk_use(self, allow_disk_use): """Specifies whether MongoDB can use temporary disk files while processing a blocking sort operation. - Raises :exc:`TypeError` is `allow_disk_use` is not a boolean. + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** :Parameters: - `allow_disk_use`: if True, MongoDB may use temporary diff --git a/pymongo/message.py b/pymongo/message.py index c4b76fc1ee..56e02979ce 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -283,8 +283,9 @@ def use_command(self, sock_info, exhaust): 'version of %d.' % (sock_info.max_wire_version,)) if sock_info.max_wire_version < 4 and self.allow_disk_use is not None: - # Ignore allowDiskUse for MongoDB < 3.2. - self.allow_disk_use = None + raise ConfigurationError( + 'Specifying allowDiskUse is unsupported with a max wire ' + 'version of %d.' % (sock_info.max_wire_version,)) sock_info.validate_session(self.client, self.session) diff --git a/test/crud/v2/find-allowdiskuse-clientError.json b/test/crud/v2/find-allowdiskuse-clientError.json new file mode 100644 index 0000000000..5ea013966a --- /dev/null +++ b/test/crud/v2/find-allowdiskuse-clientError.json @@ -0,0 +1,40 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.0.99" + } + ], + "collection_name": "test_find_allowdiskuse_clienterror", + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 3.2 server", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "error": true + } + ], + "expectations": [] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 3.2 server", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "error": true + } + ], + "expectations": [] + } + ] +} diff --git a/test/crud/v2/find-allowdiskuse-serverError.json b/test/crud/v2/find-allowdiskuse-serverError.json new file mode 100644 index 0000000000..31aa50e951 --- /dev/null +++ b/test/crud/v2/find-allowdiskuse-serverError.json @@ -0,0 +1,61 @@ +{ + "runOn": [ + { + "minServerVersion": "3.2", + "maxServerVersion": "4.3.0" + } + ], + "collection_name": "test_find_allowdiskuse_servererror", + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": true + } + } + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": false + } + } + } + ] + } + ] +} From f0585087518f0beb4a4d056841f00e797dde0e98 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 8 Jun 2020 12:24:37 -0700 Subject: [PATCH 0148/2111] PYTHON-2266 Test MONGODB-AWS and OCSP with 4.4 Remove debian71 and ubuntu-12.04 which no longer exist in Evergreen --- .evergreen/config.yml | 118 +++++++++++++++++++++++------------------- 1 file changed, 65 insertions(+), 53 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0f561c2ffa..41ebe8fc24 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1106,7 +1106,6 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: @@ -1119,7 +1118,6 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: @@ -1132,7 +1130,6 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: @@ -1145,7 +1142,6 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: @@ -1157,7 +1153,6 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: @@ -1170,7 +1165,6 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: @@ -1182,20 +1176,35 @@ tasks: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - VERSION: "latest" TOPOLOGY: "server" - func: "run-ocsp-test" vars: OCSP_TLS_SHOULD_SUCCEED: "0" - - name: "aws-auth-test" + - name: "aws-auth-test-4.4" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "4.4" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" + + - name: "aws-auth-test-latest" commands: - func: "bootstrap mongo-orchestration" vars: AUTH: "auth" - # TODO: SSL?? ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" + VERSION: "latest" - func: "add aws auth variables to file" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1243,10 +1252,6 @@ axes: display_name: "Archlinux" run_on: archlinux-test batchtime: 10080 # 7 days - - id: debian71 - display_name: "Debian 7.1" - run_on: debian71-test - batchtime: 10080 # 7 days - id: debian81 display_name: "Debian 8.1" run_on: debian81-test @@ -1293,10 +1298,6 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/suse12-64/master/latest/libmongocrypt.tar.gz - - id: ubuntu-12.04 - display_name: "Ubuntu 12.04" - run_on: ubuntu1204-test - batchtime: 10080 # 7 days - id: ubuntu-16.04 display_name: "Ubuntu 16.04" run_on: ubuntu1604-test @@ -1392,6 +1393,47 @@ axes: variables: COMPRESSORS: "zstd" + # Choice of MongoDB server version + - id: mongodb-version + display_name: "MongoDB" + values: + - id: "2.6" + display_name: "MongoDB 2.6" + variables: + VERSION: "2.6" + - id: "3.0" + display_name: "MongoDB 3.0" + variables: + VERSION: "3.0" + - id: "3.2" + display_name: "MongoDB 3.2" + variables: + VERSION: "3.2" + - id: "3.4" + display_name: "MongoDB 3.4" + variables: + VERSION: "3.4" + - id: "3.6" + display_name: "MongoDB 3.6" + variables: + VERSION: "3.6" + - id: "4.0" + display_name: "MongoDB 4.0" + variables: + VERSION: "4.0" + - id: "4.2" + display_name: "MongoDB 4.2" + variables: + VERSION: "4.2" + - id: "4.4" + display_name: "MongoDB 4.4" + variables: + VERSION: "4.4" + - id: "latest" + display_name: "MongoDB latest" + variables: + VERSION: "latest" + # Choice of Python runtime version - id: python-version display_name: "Python" @@ -1637,39 +1679,6 @@ buildvariants: - ".4.0" - ".2.6" -- matrix_name: "tests-no-36-plus" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 and <3.6 with SSL. - - ubuntu-12.04 - auth-ssl: "*" - # Ubuntu 12 ships Python 2.7.3. We want to test that version with - # and without C extensions - c-extensions: "*" - display_name: "${platform} ${auth-ssl} ${c-extensions}" - tasks: - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - -- matrix_name: "tests-no-40-plus" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 and <4.0 with SSL. - - debian71 - auth-ssl: "*" - # Debian 7 ships Python 2.7.3. We want to test that version with - # and without C extensions - c-extensions: "*" - display_name: "${platform} ${auth-ssl} ${c-extensions}" - tasks: - - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - - matrix_name: "tests-archlinux" matrix_spec: platform: @@ -2177,9 +2186,10 @@ buildvariants: matrix_spec: platform: ubuntu-16.04 python-version: ["2.7", "3.4", "3.8", "pypy", "pypy3.5"] + mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" - display_name: "OCSP test ${python-version} ${platform}" + display_name: "OCSP test ${python-version} ${mongodb-version} ${platform}" tasks: - name: ".ocsp" @@ -2189,7 +2199,8 @@ buildvariants: python-version: ["system-python", "system-python3"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: - - name: "aws-auth-test" + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-latest" - matrix_name: "aws-auth-test-windows" matrix_spec: @@ -2197,7 +2208,8 @@ buildvariants: python-version-windows: "*" display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" tasks: - - name: "aws-auth-test" + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-latest" # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available From 4c56ead95522cccc49ce09ae3bf025baaec0666e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 8 Jun 2020 15:18:51 -0700 Subject: [PATCH 0149/2111] PYTHON-2149 Test PyOpenSSL with all MongoDB versions --- .evergreen/config.yml | 65 ++++++++++++++++--------------- .evergreen/run-pyopenssl-tests.sh | 54 ------------------------- .evergreen/run-tests.sh | 35 +++++++++++++++++ 3 files changed, 69 insertions(+), 85 deletions(-) delete mode 100644 .evergreen/run-pyopenssl-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 41ebe8fc24..264c028dd8 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -339,19 +339,6 @@ functions: ${PREPARE_SHELL} PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-cdecimal-tests.sh - "run pyopenssl tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - if [ -n "${MONGODB_STARTED}" ]; then - export PYMONGO_MUST_CONNECT=1 - fi - PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-pyopenssl-tests.sh - "run doctests": - command: shell.exec type: test @@ -399,6 +386,9 @@ functions: export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi + if [ -n "${test_pyopenssl}" ]; then + export TEST_PYOPENSSL=1 + fi if [ -n "${SETDEFAULTENCODING}" ]; then export SETDEFAULTENCODING="${SETDEFAULTENCODING}" fi @@ -1076,17 +1066,6 @@ tasks: TOPOLOGY: "server" - func: "run cdecimal tests" - # Use latest for this, since we're - # adding this to support OCSP stapling - - name: "pyopenssl" - tags: ["pyopenssl"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run pyopenssl tests" - - name: "no-server" tags: ["no-server"] commands: @@ -1632,6 +1611,17 @@ axes: tags: ["encryption_tag"] variables: test_encryption: true + batchtime: 10080 # 7 days + + # Run pyopenssl tests? + - id: pyopenssl + display_name: "PyOpenSSL" + values: + - id: "enabled" + display_name: "PyOpenSSL" + variables: + test_pyopenssl: true + batchtime: 10080 # 7 days # Run setdefaultencoding before running the test suite? - id: setdefaultencoding @@ -1834,39 +1824,52 @@ buildvariants: python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "pypy", "pypy3.5"] auth: "*" ssl: "ssl" + pyopenssl: "*" + # Only test "noauth" with Python 3.7. + exclude_spec: + platform: ubuntu-16.04 + python-version: ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5"] + auth: "noauth" + ssl: "ssl" + pyopenssl: "*" display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - - "pyopenssl" + - '.replica_set !.2.6 !.3.0' + # Test standalone and sharded only on 4.4. + - '.4.4' - matrix_name: "test-pyopenssl-old-py27" matrix_spec: platform: # Supported OSes with pre-2.7.9 CPython versions. - rhel70 # CPython 2.7.5 - auth: "*" + auth: "auth" ssl: "ssl" + pyopenssl: "*" display_name: "PyOpenSSL ${platform} Pre-2.7.9 Python ${auth}" tasks: - - "pyopenssl" + - '.replica_set' - matrix_name: "tests-pyopenssl-macOS" matrix_spec: platform: macos-1014 - auth: "*" + auth: "auth" ssl: "ssl" + pyopenssl: "*" display_name: "PyOpenSSL ${platform} ${auth}" tasks: - - "pyopenssl" + - '.replica_set !.2.6 !.3.0' - matrix_name: "tests-pyopenssl-windows" matrix_spec: platform: windows-64-vsMulti-small python-version-windows: "*" - auth: "*" + auth: "auth" ssl: "ssl" + pyopenssl: "*" display_name: "PyOpenSSL ${platform} ${python-version-windows} ${auth}" tasks: - - "pyopenssl" + - '.replica_set' - matrix_name: "tests-python-version-rhel62-test-encryption" matrix_spec: diff --git a/.evergreen/run-pyopenssl-tests.sh b/.evergreen/run-pyopenssl-tests.sh deleted file mode 100644 index ea4271572b..0000000000 --- a/.evergreen/run-pyopenssl-tests.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -export DB_USER="bob" -export DB_PASSWORD="pwd123" -export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" -export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" - -if [ -z "$PYTHON_BINARY" ]; then - echo "No python binary specified" - PYTHON=$(command -v python || command -v python3) || true - if [ -z "$PYTHON" ]; then - echo "Cannot test without python or python3 installed!" - exit 1 - fi -else - PYTHON="$PYTHON_BINARY" -fi - -if $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv" -elif command -v virtualenv; then - # We can remove this fallback after: - # https://github.com/10gen/mongo-python-toolchain/issues/8 - VIRTUALENV="$(command -v virtualenv) -p $PYTHON" -else - echo "Cannot test without virtualenv" - exit 1 -fi - -$VIRTUALENV pyopenssltest -if [ "Windows_NT" = "$OS" ]; then - . pyopenssltest/Scripts/activate -else - . pyopenssltest/bin/activate -fi -trap "deactivate; rm -rf pyopenssltest" EXIT HUP - -IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") -if [ $IS_PYTHON_2 = "1" ]; then - echo "Using a Python 2" - # Upgrade pip to install the cryptography wheel and not the tar. - # <20.1 because 20.0.2 says a future release may drop support for 2.7. - pip install --upgrade 'pip<20.1' - # Upgrade setuptools because cryptography requires 18.5+. - # <45 because 45.0 dropped support for 2.7. - pip install --upgrade 'setuptools<45' -fi - -pip install pyopenssl requests service_identity -python -c 'import sys; print(sys.version)' -python setup.py test diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 8b27e0ec84..50902d0206 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -89,6 +89,41 @@ else PYTHON="$PYTHON_BINARY" fi +# PyOpenSSL test setup. +if [ -n "$TEST_PYOPENSSL" ]; then + if $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" + elif command -v virtualenv; then + # We can remove this fallback after: + # https://github.com/10gen/mongo-python-toolchain/issues/8 + VIRTUALENV="$(command -v virtualenv) -p $PYTHON" + else + echo "Cannot test without virtualenv" + exit 1 + fi + + $VIRTUALENV pyopenssltest + if [ "Windows_NT" = "$OS" ]; then + . pyopenssltest/Scripts/activate + else + . pyopenssltest/bin/activate + fi + trap "deactivate; rm -rf pyopenssltest" EXIT HUP + + IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") + if [ $IS_PYTHON_2 = "1" ]; then + echo "Using a Python 2" + # Upgrade pip to install the cryptography wheel and not the tar. + # <20.1 because 20.0.2 says a future release may drop support for 2.7. + python -m pip install --upgrade 'pip<20.1' + # Upgrade setuptools because cryptography requires 18.5+. + # <45 because 45.0 dropped support for 2.7. + python -m pip install --upgrade 'setuptools<45' + fi + + python -m pip install pyopenssl requests service_identity +fi + if [ -n "$TEST_ENCRYPTION" ]; then if [ -z "$LIBMONGOCRYPT_URL" ]; then echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" From 6d2f2b516e0b98e64f01266e16311643699457ee Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 8 Jun 2020 22:25:36 -0700 Subject: [PATCH 0150/2111] PYTHON-2144 Test OCSP on macOS and Windows Add ECDSA testing on Ubuntu. ECDSA certs are not supported on macOS/Windows, only test RSA. Log error message when OCSP HTTP request fails. Remove nohup which does not work on macOS. --- .evergreen/config.yml | 419 ++++++++++++++++++++++++++++++----- .evergreen/run-ocsp-tests.sh | 8 +- pymongo/ocsp_support.py | 4 +- test/ocsp/test_ocsp.py | 18 +- tools/ocsptest.py | 5 +- 5 files changed, 386 insertions(+), 68 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 264c028dd8..ac5e4147f9 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -679,44 +679,79 @@ functions: script: | ${PREPARE_SHELL} PYTHON_BINARY=${PYTHON_BINARY} \ - CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/rsa/ca.pem" \ + CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ sh ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-tests.sh - "run-valid-ocsp-server": + run-valid-ocsp-server: - command: shell.exec params: script: | cd ${DRIVERS_TOOLS}/.evergreen/ocsp - /opt/mongodbtoolchain/v3/bin/python3 -m venv ./venv - ./venv/bin/pip3 install -r ${DRIVERS_TOOLS}/.evergreen/ocsp/mock-ocsp-responder-requirements.txt + ${python3_binary} -m venv ./venv + ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt - command: shell.exec params: background: true script: | cd ${DRIVERS_TOOLS}/.evergreen/ocsp - nohup ./venv/bin/python3 ocsp_mock.py \ - --ca_file rsa/ca.pem \ - --ocsp_responder_cert rsa/ca.crt \ - --ocsp_responder_key rsa/ca.key \ + ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ -p 8100 -v - - "run-revoked-ocsp-server": + run-revoked-ocsp-server: - command: shell.exec params: script: | cd ${DRIVERS_TOOLS}/.evergreen/ocsp - /opt/mongodbtoolchain/v3/bin/python3 -m venv ./venv - ./venv/bin/pip3 install -r ${DRIVERS_TOOLS}/.evergreen/ocsp/mock-ocsp-responder-requirements.txt + ${python3_binary} -m venv ./venv + ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt - command: shell.exec params: background: true script: | cd ${DRIVERS_TOOLS}/.evergreen/ocsp - nohup ./venv/bin/python3 ocsp_mock.py \ - --ca_file rsa/ca.pem \ - --ocsp_responder_cert rsa/ca.crt \ - --ocsp_responder_key rsa/ca.key \ + ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ + -p 8100 \ + -v \ + --fault revoked + run-valid-delegate-ocsp-server: + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + ${python3_binary} -m venv ./venv + ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt + - command: shell.exec + params: + background: true + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ + -p 8100 -v + run-revoked-delegate-ocsp-server: + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + ${python3_binary} -m venv ./venv + ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt + - command: shell.exec + params: + background: true + script: | + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ -p 8100 \ -v \ --fault revoked @@ -1078,87 +1113,329 @@ tasks: commands: - func: "run atlas tests" - - name: "test-ocsp-valid-cert-server-staples" - tags: ["ocsp"] + - name: test-ocsp-rsa-valid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: - - func: "run-valid-ocsp-server" + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "1" + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" - - name: "test-ocsp-invalid-cert-server-staples" - tags: ["ocsp"] + - name: test-ocsp-rsa-invalid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: - - func: "run-revoked-ocsp-server" + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "0" + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" - - name: "test-ocsp-valid-cert-server-does-not-staple" - tags: ["ocsp"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] commands: - - func: "run-valid-ocsp-server" + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "1" + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" - - name: "test-ocsp-invalid-cert-server-does-not-staple" - tags: ["ocsp"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] commands: - - func: "run-revoked-ocsp-server" + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "0" + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" - - name: "test-ocsp-soft-fail" - tags: ["ocsp"] + - name: test-ocsp-rsa-soft-fail + tags: ["ocsp", "ocsp-rsa"] commands: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "1" + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" - - name: "test-ocsp-malicious-invalid-cert-mustStaple-server-does-not-staple" - tags: ["ocsp"] + - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] commands: - - func: "run-revoked-ocsp-server" + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "0" + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" - - name: "test-ocsp-malicious-no-responder-mustStaple-server-does-not-staple" - tags: ["ocsp"] + - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] commands: - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - TOPOLOGY: "server" - - func: "run-ocsp-test" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-delegate-valid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples + tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-rsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "rsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "rsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-valid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-invalid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-valid-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-soft-fail + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test vars: - OCSP_TLS_SHOULD_SUCCEED: "0" + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples + tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-valid-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "true" + + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" + + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + tags: ["ocsp", "ocsp-ecdsa"] + commands: + - func: run-revoked-delegate-ocsp-server + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "bootstrap mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" + - func: run-ocsp-test + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_TLS_SHOULD_SUCCEED: "false" - name: "aws-auth-test-4.4" commands: @@ -1283,6 +1560,7 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604/master/latest/libmongocrypt.tar.gz + python3_binary: "/opt/python/3.8/bin/python3" - id: ubuntu-18.04 display_name: "Ubuntu 18.04" run_on: ubuntu1804-test @@ -1312,6 +1590,7 @@ axes: variables: skip_ECS_auth_test: true python3_binary: "C:/python/Python38/python.exe" + venv_bin_dir: "Scripts" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz # Test with authentication? @@ -2192,10 +2471,36 @@ buildvariants: mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" - display_name: "OCSP test ${python-version} ${mongodb-version} ${platform}" + display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" + batchtime: 20160 # 14 days tasks: - name: ".ocsp" +- matrix_name: "ocsp-test-windows" + matrix_spec: + platform: windows-64-vsMulti-small + python-version-windows: ["2.7", "3.4", "3.8"] + mongodb-version: ["4.4", "latest"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" + batchtime: 20160 # 14 days + tasks: + # Windows MongoDB servers do not staple OCSP responses and only support RSA. + - name: ".ocsp-rsa !.ocsp-staple" + +- matrix_name: "ocsp-test-macos" + matrix_spec: + platform: macos-1014 + mongodb-version: ["4.4", "latest"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${platform} ${mongodb-version}" + batchtime: 20160 # 14 days + tasks: + # macOS MongoDB servers do not staple OCSP responses and only support RSA. + - name: ".ocsp-rsa !.ocsp-staple" + - matrix_name: "aws-auth-test" matrix_spec: platform: [ubuntu-18.04, macos-1014] diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh index 25006811dc..17abb395ca 100644 --- a/.evergreen/run-ocsp-tests.sh +++ b/.evergreen/run-ocsp-tests.sh @@ -6,7 +6,7 @@ set -o errexit if [ -z "$PYTHON_BINARY" ]; then echo "No python binary specified" PYTHON=$(command -v python || command -v python3) || true - if [ -z "$PYTHON_BINARY" ]; then + if [ -z "$PYTHON" ]; then echo "Cannot test without python or python3 installed!" exit 1 fi @@ -38,12 +38,12 @@ if [ $IS_PYTHON_2 = "1" ]; then echo "Using a Python 2" # Upgrade pip to install the cryptography wheel and not the tar. # <20.1 because 20.0.2 says a future release may drop support for 2.7. - pip install --upgrade 'pip<20.1' + python -m pip install --upgrade 'pip<20.1' # Upgrade setuptools because cryptography requires 18.5+. # <45 because 45.0 dropped support for 2.7. - pip install --upgrade 'setuptools<45' + python -m pip install --upgrade 'setuptools<45' fi -pip install pyopenssl requests service_identity +python -m pip install pyopenssl requests service_identity OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED} CA_FILE=${CA_FILE} python test/ocsp/test_ocsp.py diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 20e029037e..f7f8975041 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -247,8 +247,8 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): data=ocsp_request.public_bytes(_Encoding.DER), headers={'Content-Type': 'application/ocsp-request'}, timeout=5) - except _RequestException: - _LOGGER.debug("HTTP request failed") + except _RequestException as exc: + _LOGGER.debug("HTTP request failed: %s", exc) return None if response.status_code != 200: _LOGGER.debug("HTTP request returned %d", response.status_code) diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index c65ad269cd..34270f872a 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -27,13 +27,23 @@ CA_FILE = os.environ.get("CA_FILE") -OCSP_TLS_SHOULD_SUCCEED = bool(int(os.environ.get('OCSP_TLS_SHOULD_SUCCEED', 0))) +OCSP_TLS_SHOULD_SUCCEED = (os.environ.get('OCSP_TLS_SHOULD_SUCCEED') == 'true') + +# Enable logs in this format: +# 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response +FORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s' +logging.basicConfig(format=FORMAT, level=logging.DEBUG) + +if sys.platform == 'win32': + # The non-stapled OCSP endpoint check is slow on Windows. + TIMEOUT_MS = 5000 +else: + TIMEOUT_MS = 500 -logging.basicConfig(level=logging.DEBUG) def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=500" - "&tlsCAFile=%s&%s" % (CA_FILE, options)) + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" + "&tlsCAFile=%s&%s") % (TIMEOUT_MS, CA_FILE, options) print(uri) client = pymongo.MongoClient(uri) client.admin.command('ismaster') diff --git a/tools/ocsptest.py b/tools/ocsptest.py index 1504169e2d..3476bf7a23 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -20,7 +20,10 @@ from pymongo.pyopenssl_context import SSLContext, PROTOCOL_SSLv23 -logging.basicConfig(level=logging.DEBUG) +# Enable logs in this format: +# 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response +FORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s' +logging.basicConfig(format=FORMAT, level=logging.DEBUG) def check_ocsp(host, port, capath): ctx = SSLContext(PROTOCOL_SSLv23) From 8fdda6857fb35cb9c8063ee9c3a76020e4811778 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 9 Jun 2020 09:28:16 -0700 Subject: [PATCH 0151/2111] BUMP 3.11.0b1 --- doc/changelog.rst | 17 +++++++++++++---- doc/installation.rst | 4 ++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 219070356a..32666bfff4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 3.11.0b1.dev0 --------------------------------- +Changes in Version 3.11.0b1 +--------------------------- Version 3.11 adds support for MongoDB 4.4. Highlights include: @@ -14,6 +14,11 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: - Support for the :ref:`MONGODB-AWS` authentication mechanism. - Support for the ``directConnection`` URI option and kwarg to :class:`~pymongo.mongo_client.MongoClient`. +- Support for speculative authentication attempts in connection handshakes + which reduces the number of network roundtrips needed to authenticate new + connections on MongoDB 4.4+. +- Support for creating collections in multi-document transactions with + :meth:`~pymongo.database.Database.create_collection` on MongoDB 4.4+. - Added index hinting support to the :meth:`~pymongo.collection.Collection.replace_one`, :meth:`~pymongo.collection.Collection.update_one`, @@ -34,8 +39,12 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: of this parameter see the MongoDB documentation for the `validate command`_. - Added the ``allow_disk_use`` parameters to :meth:`pymongo.collection.Collection.find`. -- Support for creating collections in multi-document transactions with - :meth:`~pymongo.database.Database.create_collection` on MongoDB 4.4+. +- Added the ``hedge`` parameter to + :class:`~pymongo.read_preferences.PrimaryPreferred`, + :class:`~pymongo.read_preferences.Secondary`, + :class:`~pymongo.read_preferences.SecondaryPreferred`, + :class:`~pymongo.read_preferences.Nearest` to support disabling + (or explicitly enabling) hedged reads in MongoDB 4.4+. - Deprecated the ``oplog_replay`` parameter to :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the server optimizes queries against the oplog collection without requiring diff --git a/doc/installation.rst b/doc/installation.rst index 0bda72e16f..f349917405 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -275,8 +275,8 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b0.tar.gz + $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b1.tar.gz or easy_install:: - $ python -m easy_install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b0.tar.gz + $ python -m easy_install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b1.tar.gz diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 74246e72c2..dd02d9e720 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -70,7 +70,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, 'b1.dev0') +version_tuple = (3, 11, 0, 'b1') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index fc1ee93f79..c8f6b9668f 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0b1.dev0" +version = "3.11.0b1" f = open("README.rst") try: From aeb0bd74de17253ac3dd3efbf660f9d54eaab526 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 9 Jun 2020 10:04:03 -0700 Subject: [PATCH 0152/2111] BUMP 3.11.0b2.dev0 --- doc/changelog.rst | 4 ++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 32666bfff4..8a54c78f27 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 3.11.0b1 ---------------------------- +Changes in Version 3.11.0b2.dev0 +-------------------------------- Version 3.11 adds support for MongoDB 4.4. Highlights include: diff --git a/pymongo/__init__.py b/pymongo/__init__.py index dd02d9e720..ba6b17af72 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -70,7 +70,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, 'b1') +version_tuple = (3, 11, 0, 'b2.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index c8f6b9668f..7379b260ee 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0b1" +version = "3.11.0b2.dev0" f = open("README.rst") try: From 69dde9ebfeb0422c03df0cd7b6c2ec7e08e3e6a8 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 10 Jun 2020 16:38:29 -0700 Subject: [PATCH 0153/2111] PYTHON-2285 Only start kill cursors background thread if/when the client connects to the MongoDB deployment --- pymongo/mongo_client.py | 6 +++--- test/test_client.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 2a6e9d180f..33f627f2cc 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -719,8 +719,6 @@ def __init__( direct_connection=options.direct_connection) self._topology = Topology(self._topology_settings) - if connect: - self._topology.open() def target(): client = self_ref() @@ -739,7 +737,9 @@ def target(): # this closure. When the client is freed, stop the executor soon. self_ref = weakref.ref(self, executor.close) self._kill_cursors_executor = executor - executor.open() + + if connect: + self._get_topology() self._encrypter = None if self.__options.auto_encryption_opts: diff --git a/test/test_client.py b/test/test_client.py index 084235a1b8..be4bff0e1d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -790,6 +790,24 @@ def test_close_stops_kill_cursors_thread(self): client.close() self.assertTrue(client._kill_cursors_executor._stopped) + def test_uri_connect_option(self): + # Ensure that topology is not opened if connect=False. + client = rs_client(connect=False) + self.assertFalse(client._topology._opened) + + # Ensure kill cursors thread has not been started. + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + + # Using the client should open topology and start the thread. + client.admin.command('isMaster') + self.assertTrue(client._topology._opened) + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + + # Tear down. + client.close() + def test_close_does_not_open_servers(self): client = rs_client(connect=False) topology = client._topology From 70fb1cce952746e24bb23502e94cfffeeb80bf68 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Jun 2020 11:40:24 -0700 Subject: [PATCH 0154/2111] PYTHON-2293 Fix OCSP test script for Windows --- tools/ocsptest.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/tools/ocsptest.py b/tools/ocsptest.py index 3476bf7a23..b2d5c0a495 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -18,7 +18,8 @@ from ssl import CERT_REQUIRED -from pymongo.pyopenssl_context import SSLContext, PROTOCOL_SSLv23 +from pymongo.pyopenssl_context import SSLContext +from pymongo.ssl_support import get_ssl_context # Enable logs in this format: # 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response @@ -26,12 +27,18 @@ logging.basicConfig(format=FORMAT, level=logging.DEBUG) def check_ocsp(host, port, capath): - ctx = SSLContext(PROTOCOL_SSLv23) - ctx.verify_mode = CERT_REQUIRED - if capath is not None: - ctx.load_verify_locations(capath) - else: - ctx.set_default_verify_paths() + ctx = get_ssl_context( + None, # certfile + None, # keyfile + None, # passphrase + capath, + CERT_REQUIRED, + None, # crlfile + True, # match_hostname + True) # check_ocsp_endpoint + + # Ensure we're using pyOpenSSL. + assert isinstance(ctx, SSLContext) s = socket.socket() s.connect((host, port)) From 74202455aa33b762641caeeaf4b87b7c58675820 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Tue, 16 Jun 2020 11:53:45 -0700 Subject: [PATCH 0155/2111] PYTHON-2278 Fix Jython SDAM test failures --- .evergreen/run-tests.sh | 3 +-- test/__init__.py | 3 ++- test/test_client.py | 20 ++++++++++++++++++++ test/test_discovery_and_monitoring.py | 27 ++++++++++++++++++++++----- test/utils.py | 10 ++++++---- 5 files changed, 51 insertions(+), 12 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 50902d0206..9987bd6945 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -169,8 +169,7 @@ else fi # Don't download unittest-xml-reporting from pypi, which often fails. -HAVE_XMLRUNNER=$($PYTHON -c "import pkgutil, sys; sys.stdout.write('1' if pkgutil.find_loader('xmlrunner') else '0')") -if [ $HAVE_XMLRUNNER = "1" ]; then +if $PYTHON -c "import xmlrunner"; then # The xunit output dir must be a Python style absolute path. XUNIT_DIR="$(pwd)/xunit-results" if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin diff --git a/test/__init__.py b/test/__init__.py index 73a91c0b2b..2d7fdb6f88 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -27,7 +27,8 @@ try: from xmlrunner import XMLTestRunner HAVE_XML = True -except ImportError: +# ValueError is raised when version 3+ is installed on Jython 2.7. +except (ImportError, ValueError): HAVE_XML = False try: diff --git a/test/test_client.py b/test/test_client.py index be4bff0e1d..6b4f23165e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -60,6 +60,7 @@ from pymongo.server_selectors import (any_server_selector, writable_server_selector) from pymongo.server_type import SERVER_TYPE +from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.write_concern import WriteConcern from test import (client_context, @@ -1552,6 +1553,25 @@ def stall_connect(*args, **kwargs): # Each ping command should not take more than 2 seconds self.assertLess(total, 2) + @client_context.require_replica_set + def test_direct_connection(self): + # direct_connection=True should result in Single topology. + client = rs_or_single_client(directConnection=True) + client.admin.command('ping') + self.assertEqual(len(client.nodes), 1) + self.assertEqual(client._topology_settings.get_topology_type(), + TOPOLOGY_TYPE.Single) + client.close() + + # direct_connection=False should result in RS topology. + client = rs_or_single_client(directConnection=False) + client.admin.command('ping') + self.assertGreaterEqual(len(client.nodes), 1) + self.assertIn(client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, + TOPOLOGY_TYPE.ReplicaSetWithPrimary]) + client.close() + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 9062d7db59..4b17fc8530 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -21,17 +21,19 @@ sys.path[0:0] = [""] from bson import json_util, Timestamp -from pymongo import common, MongoClient +from pymongo import common from pymongo.errors import (AutoReconnect, ConfigurationError, NetworkTimeout, NotMasterError, OperationFailure) from pymongo.helpers import _check_command_response -from pymongo.topology import _ErrorContext -from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.ismaster import IsMaster from pymongo.server_description import ServerDescription, SERVER_TYPE +from pymongo.settings import TopologySettings +from pymongo.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.uri_parser import parse_uri from test import unittest, IntegrationTest from test.utils import (assertion_context, Barrier, @@ -65,8 +67,23 @@ def request_check(self): def create_mock_topology(uri, monitor_class=MockMonitor): - mc = MongoClient(uri, _monitor_class=monitor_class) - return mc._get_topology() + parsed_uri = parse_uri(uri) + replica_set_name = None + direct_connection = None + if 'replicaset' in parsed_uri['options']: + replica_set_name = parsed_uri['options']['replicaset'] + if 'directConnection' in parsed_uri['options']: + direct_connection = parsed_uri['options']['directConnection'] + + topology_settings = TopologySettings( + parsed_uri['nodelist'], + replica_set_name=replica_set_name, + monitor_class=monitor_class, + direct_connection=direct_connection) + + c = Topology(topology_settings) + c.open() + return c def got_ismaster(topology, server_address, ismaster_response): diff --git a/test/utils.py b/test/utils.py index 5f51b7f10f..4e213d50b5 100644 --- a/test/utils.py +++ b/test/utils.py @@ -427,12 +427,13 @@ def _connection_string(h, authenticate): return "mongodb://%s" % (str(h),) -def _mongo_client(host, port, authenticate=True, direct=False, **kwargs): +def _mongo_client(host, port, authenticate=True, directConnection=False, + **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port client_options = client_context.default_client_options.copy() - if client_context.replica_set_name and not direct: + if client_context.replica_set_name and not directConnection: client_options['replicaSet'] = client_context.replica_set_name client_options.update(kwargs) @@ -444,12 +445,13 @@ def _mongo_client(host, port, authenticate=True, direct=False, **kwargs): def single_client_noauth(h=None, p=None, **kwargs): """Make a direct connection. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, direct=True, **kwargs) + return _mongo_client(h, p, authenticate=False, + directConnection=True, **kwargs) def single_client(h=None, p=None, **kwargs): """Make a direct connection, and authenticate if necessary.""" - return _mongo_client(h, p, direct=True, **kwargs) + return _mongo_client(h, p, directConnection=True, **kwargs) def rs_client_noauth(h=None, p=None, **kwargs): From 84fd04ec6d3397847da1e4a6ba1b70d5d809f6b3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Jun 2020 18:24:08 -0700 Subject: [PATCH 0156/2111] PYTHON-1852 Use TLS option names in test suite ClientContext (#442) --- test/__init__.py | 42 ++++++++++++--------------------- test/test_client.py | 4 ++-- test/test_dns.py | 4 ++-- test/test_replica_set_client.py | 2 +- test/test_ssl.py | 2 +- 5 files changed, 21 insertions(+), 33 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 2d7fdb6f88..175103effc 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -81,15 +81,12 @@ CLIENT_PEM = os.environ.get('CLIENT_PEM', os.path.join(CERT_PATH, 'client.pem')) CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem')) -CERT_REQS = validate_cert_reqs('CERT_REQS', os.environ.get('CERT_REQS')) -_SSL_OPTIONS = dict(ssl=True) +TLS_OPTIONS = dict(tls=True) if CLIENT_PEM: - _SSL_OPTIONS['ssl_certfile'] = CLIENT_PEM + TLS_OPTIONS['tlsCertificateKeyFile'] = CLIENT_PEM if CA_PEM: - _SSL_OPTIONS['ssl_ca_certs'] = CA_PEM -if CERT_REQS is not None: - _SSL_OPTIONS['ssl_cert_reqs'] = CERT_REQS + TLS_OPTIONS['tlsCAFile'] = CA_PEM COMPRESSORS = os.environ.get("COMPRESSORS") @@ -187,8 +184,7 @@ def __init__(self): self.mongoses = [] self.is_rs = False self.has_ipv6 = False - self.ssl = False - self.ssl_cert_none = False + self.tls = False self.ssl_certfile = False self.server_is_resolvable = is_server_resolvable() self.default_client_options = {} @@ -235,13 +231,11 @@ def _init_client(self): self.client = self._connect(host, port) if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? - self.client = self._connect(host, port, **_SSL_OPTIONS) + self.client = self._connect(host, port, **TLS_OPTIONS) if self.client: - self.ssl = True - self.default_client_options.update(_SSL_OPTIONS) + self.tls = True + self.default_client_options.update(TLS_OPTIONS) self.ssl_certfile = True - if _SSL_OPTIONS.get('ssl_cert_reqs') == ssl.CERT_NONE: - self.ssl_cert_none = True if self.client: self.connected = True @@ -608,22 +602,16 @@ def require_failCommand_fail_point(self, func): "failCommand fail point must be supported", func=func) - def require_ssl(self, func): - """Run a test only if the client can connect over SSL.""" - return self._require(lambda: self.ssl, - "Must be able to connect via SSL", + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, + "Must be able to connect via TLS", func=func) - def require_no_ssl(self, func): - """Run a test only if the client can connect over SSL.""" - return self._require(lambda: not self.ssl, - "Must be able to connect without SSL", - func=func) - - def require_ssl_cert_none(self, func): - """Run a test only if the client can connect with ssl.CERT_NONE.""" - return self._require(lambda: self.ssl_cert_none, - "Must be able to connect with ssl.CERT_NONE", + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, + "Must be able to connect without TLS", func=func) def require_ssl_certfile(self, func): diff --git a/test/test_client.py b/test/test_client.py index 6b4f23165e..0b203b7eb6 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -919,7 +919,7 @@ def test_lazy_auth_raises_operation_failure(self): assertRaisesExactly( OperationFailure, lazy_client.test.collection.find_one) - @client_context.require_no_ssl + @client_context.require_no_tls def test_unix_socket(self): if not hasattr(socket, "AF_UNIX"): raise SkipTest("UNIX-sockets are not supported on this system") @@ -1086,7 +1086,7 @@ def test_tz_aware(self): @client_context.require_ipv6 def test_ipv6(self): - if client_context.ssl: + if client_context.tls: if not HAVE_IPADDRESS: raise SkipTest("Need the ipaddress module to test with SSL") diff --git a/test/test_dns.py b/test/test_dns.py index ad50403213..fc5f98e3b6 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -40,7 +40,7 @@ class TestDNS(unittest.TestCase): def create_test(test_case): @client_context.require_replica_set - @client_context.require_ssl + @client_context.require_tls def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") @@ -68,7 +68,7 @@ def run_test(self): # The replica set members must be configured as 'localhost'. if hostname == 'localhost': copts = client_context.default_client_options.copy() - if client_context.ssl is True: + if client_context.tls is True: # Our test certs don't support the SRV hosts used in these tests. copts['ssl_match_hostname'] = False diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py index c653784181..c0a577b1d4 100644 --- a/test/test_replica_set_client.py +++ b/test/test_replica_set_client.py @@ -195,7 +195,7 @@ def test_timeout_does_not_mark_member_down(self): @client_context.require_ipv6 def test_ipv6(self): - if client_context.ssl: + if client_context.tls: if not HAVE_IPADDRESS: raise SkipTest("Need the ipaddress module to test with SSL") diff --git a/test/test_ssl.py b/test/test_ssl.py index 006b62ec7a..7c76da6428 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -196,7 +196,7 @@ def tearDownClass(cls): MongoClient.PORT = cls.saved_port super(TestSSL, cls).tearDownClass() - @client_context.require_ssl + @client_context.require_tls def test_simple_ssl(self): # Expects the server to be running with ssl and with # no --sslPEMKeyFile or with --sslWeakCertificateValidation From 18de676657c5a01fb83f0adcae8d01ee2f8f9feb Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 24 Jun 2020 09:56:56 -0700 Subject: [PATCH 0157/2111] PYTHON-2297 Close connection to avoid resource warning when auth fails (#443) --- pymongo/pool.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index de1d682e5d..92e473473d 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1200,8 +1200,8 @@ def _get_socket(self, all_credentials): self.active_sockets += 1 # We've now acquired the semaphore and must release it on error. + sock_info = None try: - sock_info = None while sock_info is None: try: with self.lock: @@ -1214,6 +1214,9 @@ def _get_socket(self, all_credentials): sock_info = None sock_info.check_auth(all_credentials) except Exception: + if sock_info: + # We checked out a socket but authentication failed. + sock_info.close_socket(ConnectionClosedReason.ERROR) self._socket_semaphore.release() with self.lock: self.active_sockets -= 1 From e07366a4ada60469cada699e144552219528f222 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 9 Jun 2020 17:09:01 -0700 Subject: [PATCH 0158/2111] PYTHON-2255 Reduce default keepalive time to 120 seconds to align with Azure defaults --- pymongo/pool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 92e473473d..4f44e09929 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -122,7 +122,7 @@ def _set_non_inheritable_non_atomic(dummy): """Dummy function for platforms that don't provide fcntl.""" pass -_MAX_TCP_KEEPIDLE = 300 +_MAX_TCP_KEEPIDLE = 120 _MAX_TCP_KEEPINTVL = 10 _MAX_TCP_KEEPCNT = 9 From a90f80436c396c0b0025dbb89b7ecb6cae51f849 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 9 Jun 2020 17:57:24 -0700 Subject: [PATCH 0159/2111] PYTHON-2103 Test that GridFS supports indexes created in the shell --- test/test_gridfs_bucket.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 9addb84309..21feda719d 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -17,14 +17,17 @@ """Tests for the gridfs package. """ import datetime +import itertools import threading import time import gridfs from bson.binary import Binary +from bson.int64 import Int64 from bson.objectid import ObjectId from bson.py3compat import StringIO, string_type +from bson.son import SON from gridfs.errors import NoFile, CorruptGridFile from pymongo.errors import (ConfigurationError, ConnectionFailure, @@ -161,6 +164,25 @@ def test_upload_ensures_index(self): info.get('key') == [('filename', 1), ('uploadDate', 1)] for info in files.index_information().values())) + def test_ensure_index_shell_compat(self): + files = self.db.fs.files + for i, j in itertools.combinations_with_replacement( + [1, 1.0, Int64(1)], 2): + # Create the index with different numeric types (as might be done + # from the mongo shell). + shell_index = [('filename', i), ('uploadDate', j)] + self.db.command('createIndexes', files.name, + indexes=[{'key': SON(shell_index), + 'name': 'filename_1.0_uploadDate_1.0'}]) + + # No error. + self.fs.upload_from_stream("filename", b"data") + + self.assertTrue(any( + info.get('key') == [('filename', 1), ('uploadDate', 1)] + for info in files.index_information().values())) + files.drop() + def test_alt_collection(self): oid = self.alt.upload_from_stream("test_filename", b"hello world") From e608ff41662c694f7ba84e9d1a004463677886cf Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 24 Jun 2020 12:00:49 -0700 Subject: [PATCH 0160/2111] PYTHON-2192 - Use krb5's canonicalization algorithm --- .evergreen/run-enterprise-auth-tests.sh | 1 + pymongo/auth.py | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.evergreen/run-enterprise-auth-tests.sh b/.evergreen/run-enterprise-auth-tests.sh index cc2c8dce0c..030a0ab020 100644 --- a/.evergreen/run-enterprise-auth-tests.sh +++ b/.evergreen/run-enterprise-auth-tests.sh @@ -18,6 +18,7 @@ if [ ${PLATFORM} != "Java" ]; then if [ "Windows_NT" = "$OS" ]; then echo "Setting GSSAPI_PASS" export GSSAPI_PASS=${SASL_PASS} + export GSSAPI_CANONICALIZE="true" else # BUILD-3830 touch ${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty diff --git a/pymongo/auth.py b/pymongo/auth.py index 89febe581f..3052b73d9b 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -375,6 +375,20 @@ def _auth_key(nonce, username, password): return _unicode(md5hash.hexdigest()) +def _canonicalize_hostname(hostname): + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( + hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME)[0] + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + + return name[0].lower() + + def _authenticate_gssapi(credentials, sock_info): """Authenticate using GSSAPI. """ @@ -390,7 +404,7 @@ def _authenticate_gssapi(credentials, sock_info): # the security context. See RFC 4752, Section 3.1, first paragraph. host = sock_info.address[0] if props.canonicalize_host_name: - host = socket.getfqdn(host) + host = _canonicalize_hostname(host) service = props.service_name + '@' + host if props.service_realm is not None: service = service + '@' + props.service_realm From 815c924bc3821ef9336b09fff600e23fa9bb10ed Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 25 Jun 2020 17:04:04 -0400 Subject: [PATCH 0161/2111] PYTHON-2136: added more diagnostic information for ServerSelectionTimeoutError (#444) * PYTHON-2136: added more diagnostic information for ServerSelectionTimeoutError * made error message more human readable * fixed tests * fixed remaining testcase * fixed formatting * more formatting fixes --- pymongo/topology.py | 3 ++- test/test_replica_set_client.py | 3 +-- test/test_topology.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/topology.py b/pymongo/topology.py index a62282d1c3..0a49dbb174 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -213,7 +213,8 @@ def _select_servers_loop(self, selector, timeout, address): # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( - self._error_message(selector)) + "%s, Timeout: %ss, Topology Description: %r" % + (self._error_message(selector), timeout, self.description)) self._ensure_opened() self._request_check_all() diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py index c0a577b1d4..619499c392 100644 --- a/test/test_replica_set_client.py +++ b/test/test_replica_set_client.py @@ -343,8 +343,7 @@ def test_connect_with_internal_ips(self): with self.assertRaises(AutoReconnect) as context: connected(client) - self.assertEqual( - "Could not reach any servers in [('internal-ip', 27017)]." + self.assertIn("Could not reach any servers in [('internal-ip', 27017)]." " Replica set is configured with internal hostnames or IPs?", str(context.exception)) diff --git a/test/test_topology.py b/test/test_topology.py index cf89ae3bd3..50f4ecc74b 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -766,7 +766,7 @@ def assertMessage(self, message, topology, selector=any_server_selector): with self.assertRaises(ConnectionFailure) as context: topology.select_server(selector, server_selection_timeout=0) - self.assertEqual(message, str(context.exception)) + self.assertIn(message, str(context.exception)) def test_no_primary(self): t = create_mock_topology(replica_set_name='rs') From 58aaede0fe77e1f579b1a203cebffea91b9f66e8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Jun 2020 14:03:21 -0700 Subject: [PATCH 0162/2111] PYTHON-2281 Properly reduce keep alive time on Windows --- pymongo/pool.py | 49 +++++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 4f44e09929..21ce73c8d0 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -28,7 +28,7 @@ IPADDR_SAFE as _IPADDR_SAFE) from bson import DEFAULT_CODEC_OPTIONS -from bson.py3compat import imap, itervalues, _unicode, integer_types +from bson.py3compat import imap, itervalues, _unicode from bson.son import SON from pymongo import auth, helpers, thread_util, __version__ from pymongo.client_session import _validate_session_write_concern @@ -132,31 +132,36 @@ def _set_non_inheritable_non_atomic(dummy): except ImportError: import winreg + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + try: with winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters") as key: - _DEFAULT_TCP_IDLE_MS, _ = winreg.QueryValueEx(key, "KeepAliveTime") - _DEFAULT_TCP_INTERVAL_MS, _ = winreg.QueryValueEx( - key, "KeepAliveInterval") - # Make sure these are integers. - if not isinstance(_DEFAULT_TCP_IDLE_MS, integer_types): - raise ValueError - if not isinstance(_DEFAULT_TCP_INTERVAL_MS, integer_types): - raise ValueError - except (OSError, ValueError): - # We could not check the default values so do not attempt to override. - def _set_keepalive_times(dummy): - pass - else: - def _set_keepalive_times(sock): - idle_ms = min(_DEFAULT_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_DEFAULT_TCP_INTERVAL_MS, - _MAX_TCP_KEEPINTVL * 1000) - if (idle_ms < _DEFAULT_TCP_IDLE_MS or - interval_ms < _DEFAULT_TCP_INTERVAL_MS): - sock.ioctl(socket.SIO_KEEPALIVE_VALS, - (1, idle_ms, interval_ms)) + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, + _MAX_TCP_KEEPINTVL * 1000) + if (idle_ms < _WINDOWS_TCP_IDLE_MS or + interval_ms < _WINDOWS_TCP_INTERVAL_MS): + sock.ioctl(socket.SIO_KEEPALIVE_VALS, + (1, idle_ms, interval_ms)) else: def _set_tcp_option(sock, tcp_option, max_value): if hasattr(socket, tcp_option): From 67a23429bae8fdfa2db5d768a34060c132984886 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 30 Jun 2020 12:30:28 -0400 Subject: [PATCH 0163/2111] PYTHON-1787: add details to OperationFailure exception and NotMasterError (#448) PYTHON-1787-add details to OperationFailure and NotMasterError by adding a __repr__ function https://jira.mongodb.org/browse/PYTHON-1787 --- doc/contributors.rst | 1 + pymongo/errors.py | 11 ++++++++++- test/test_database.py | 6 +++--- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 9773b38224..4118d55586 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -87,3 +87,4 @@ The following is a list of people who have contributed to - Felipe Rodrigues(fbidu) - Terence Honles (terencehonles) - Paul Fisher (thetorpedodog) +- Julius Park (juliusgeo) diff --git a/pymongo/errors.py b/pymongo/errors.py index c11053f475..cf32546850 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -113,7 +113,11 @@ class NotMasterError(AutoReconnect): Subclass of :exc:`~pymongo.errors.AutoReconnect`. """ - + def __str__(self): + output_str = "%s, full error: %s" % (self._message, self.__details) + if sys.version_info[0] == 2 and isinstance(output_str, unicode): + return output_str.encode('utf-8', errors='replace') + return output_str class ServerSelectionTimeoutError(AutoReconnect): """Thrown when no MongoDB server is available for an operation @@ -167,6 +171,11 @@ def details(self): """ return self.__details + def __str__(self): + output_str = "%s, full error: %s" % (self._message, self.__details) + if sys.version_info[0] == 2 and isinstance(output_str, unicode): + return output_str.encode('utf-8', errors='replace') + return output_str class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is diff --git a/test/test_database.py b/test/test_database.py index 15f3be70fe..43fac30b0e 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -972,7 +972,7 @@ def test_mongos_response(self): with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document) - self.assertEqual('inner', str(context.exception)) + self.assertIn('inner', str(context.exception)) # If a shard has no primary and you run a command like dbstats, which # cannot be run on a secondary, mongos's response includes empty "raw" @@ -985,7 +985,7 @@ def test_mongos_response(self): with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document) - self.assertEqual('outer', str(context.exception)) + self.assertIn('outer', str(context.exception)) # Raw error has ok: 0 but no errmsg. Not a known case, but test it. error_document = { @@ -996,7 +996,7 @@ def test_mongos_response(self): with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document) - self.assertEqual('outer', str(context.exception)) + self.assertIn('outer', str(context.exception)) @client_context.require_test_commands @client_context.require_no_mongos From bfd297f1ea91e50d8133785a873a49a52571a53a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 1 Jul 2020 10:48:19 -0700 Subject: [PATCH 0164/2111] PYTHON-2271 Add MongoDB 4.0, 4.2, 4.4 to perf benchmark --- .evergreen/perf.yml | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index a077b8c067..2f8f54e58d 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -233,6 +233,39 @@ tasks: - func: "attach benchmark test results" - func: "send dashboard data" + - name: "perf-4.0-standalone" + tags: ["perf"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "4.0" + TOPOLOGY: "server" + - func: "run perf tests" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "perf-4.2-standalone" + tags: ["perf"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "4.2" + TOPOLOGY: "server" + - func: "run perf tests" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "perf-4.4-standalone" + tags: ["perf"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "4.4" + TOPOLOGY: "server" + - func: "run perf tests" + - func: "attach benchmark test results" + - func: "send dashboard data" + buildvariants: - name: "perf-tests" @@ -243,4 +276,6 @@ buildvariants: - name: "perf-3.0-standalone" - name: "perf-3.4-standalone" - name: "perf-3.6-standalone" - + - name: "perf-4.0-standalone" + - name: "perf-4.2-standalone" + - name: "perf-4.4-standalone" From 0b375a26049ef3c74feebd9d45bbfadcd813178a Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 1 Jul 2020 10:53:52 -0700 Subject: [PATCH 0165/2111] PYTHON-2295 MongoClient with multiple hosts directConnection=True should raise an error (#446) --- pymongo/mongo_client.py | 5 +++++ test/test_client.py | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 33f627f2cc..7920b6a530 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -670,6 +670,11 @@ def __init__( # Normalize combined options. opts = _normalize_options(opts) + # Ensure directConnection was not True if there are multiple seeds. + if len(seeds) > 1 and opts.get('directconnection'): + raise ConfigurationError( + "Cannot specify multiple hosts with directConnection=true") + # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) diff --git a/test/test_client.py b/test/test_client.py index 0b203b7eb6..8013dd7272 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1572,6 +1572,10 @@ def test_direct_connection(self): TOPOLOGY_TYPE.ReplicaSetWithPrimary]) client.close() + # directConnection=True, should error with multiple hosts as a list. + with self.assertRaises(ConfigurationError): + MongoClient(['host1', 'host2'], directConnection=True) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From 1f4123e4bf54f9ed689ce77ffb8dfbccc3e688f0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 2 Jun 2020 12:21:39 -0700 Subject: [PATCH 0166/2111] PYTHON-2123 Streaming heartbeat protocol MongoClient now requires 2 connections and 2 threads to each MongoDB 4.4+ server. With one connection, the server streams (or pushes) updated heartbeat info. With the other connection, the client periodically pings the server to establish an accurate round-trip time (RTT). This change optimizes the discovery of server state changes such as replica set elections. Additional changes: - Mark server Unknown before retrying isMaster check. - Always reset the pool _after_ marking the server unknown. - Configure fail point before creating the client in test SpecRunner. - Unfreeze with replSetFreeze:0 to ensure a speedy elections in test suite. --- pymongo/errors.py | 6 + pymongo/ismaster.py | 10 +- pymongo/message.py | 27 +- pymongo/monitor.py | 289 +++++++++++--- pymongo/network.py | 78 +++- pymongo/periodic_executor.py | 21 +- pymongo/pool.py | 89 ++++- pymongo/server_description.py | 3 +- pymongo/socket_checker.py | 18 +- pymongo/topology.py | 4 + test/__init__.py | 16 +- .../cancel-server-check.json | 130 +++++++ .../find-network-error.json | 144 +++++++ .../find-shutdown-error.json | 168 ++++++++ .../insert-network-error.json | 156 ++++++++ .../insert-shutdown-error.json | 167 ++++++++ .../isMaster-command-error.json | 245 ++++++++++++ .../isMaster-network-error.json | 225 +++++++++++ .../isMaster-timeout.json | 359 ++++++++++++++++++ .../rediscover-quickly-after-step-down.json | 165 ++++++++ test/test_client.py | 19 +- test/test_cmap.py | 41 +- ...nnections_survive_primary_stepdown_spec.py | 8 +- test/test_discovery_and_monitoring.py | 110 +++++- test/test_monitor.py | 5 +- test/test_sdam_monitoring_spec.py | 2 +- test/test_session.py | 1 + test/test_streaming_protocol.py | 183 +++++++++ test/test_topology.py | 16 +- test/utils.py | 40 +- test/utils_selection_tests.py | 3 + test/utils_spec_runner.py | 188 +++++---- 32 files changed, 2703 insertions(+), 233 deletions(-) create mode 100644 test/discovery_and_monitoring_integration/cancel-server-check.json create mode 100644 test/discovery_and_monitoring_integration/find-network-error.json create mode 100644 test/discovery_and_monitoring_integration/find-shutdown-error.json create mode 100644 test/discovery_and_monitoring_integration/insert-network-error.json create mode 100644 test/discovery_and_monitoring_integration/insert-shutdown-error.json create mode 100644 test/discovery_and_monitoring_integration/isMaster-command-error.json create mode 100644 test/discovery_and_monitoring_integration/isMaster-network-error.json create mode 100644 test/discovery_and_monitoring_integration/isMaster-timeout.json create mode 100644 test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json create mode 100644 test/test_streaming_protocol.py diff --git a/pymongo/errors.py b/pymongo/errors.py index cf32546850..a309a9e7a5 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -282,3 +282,9 @@ def __init__(self, cause): def cause(self): """The exception that caused this encryption or decryption error.""" return self.__cause + + +class _OperationCancelled(AutoReconnect): + """Internal error raised when a socket operation is cancelled. + """ + pass diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index fb2d6a8682..a273ffab8f 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -46,9 +46,10 @@ def _get_server_type(doc): class IsMaster(object): - __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable') + __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', + '_awaitable') - def __init__(self, doc): + def __init__(self, doc, awaitable=False): """Parse an ismaster response from the server.""" self._server_type = _get_server_type(doc) self._doc = doc @@ -60,6 +61,7 @@ def __init__(self, doc): self._is_readable = ( self.server_type == SERVER_TYPE.RSSecondary or self._is_writable) + self._awaitable = awaitable @property def document(self): @@ -177,3 +179,7 @@ def speculative_authenticate(self): @property def topology_version(self): return self._doc.get('topologyVersion') + + @property + def awaitable(self): + return self._awaitable diff --git a/pymongo/message.py b/pymongo/message.py index 56e02979ce..e04d4c3b94 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1563,6 +1563,11 @@ def raw_command_response(self): # This should never be called on _OpReply. raise NotImplementedError + @property + def more_to_come(self): + """Is the moreToCome bit set on this response?""" + return False + @classmethod def unpack(cls, msg): """Construct an _OpReply from raw bytes.""" @@ -1583,6 +1588,11 @@ class _OpMsg(object): UNPACK_FROM = struct.Struct("1 section") diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 5400077b9d..1c62e0112a 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -14,14 +14,19 @@ """Class to monitor a MongoDB server on a background thread.""" +import atexit +import threading import weakref from pymongo import common, periodic_executor -from pymongo.errors import OperationFailure +from pymongo.errors import (NotMasterError, + OperationFailure, + _OperationCancelled) +from pymongo.ismaster import IsMaster from pymongo.monotonic import time as _time +from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription -from pymongo.server_type import SERVER_TYPE from pymongo.srv_resolver import _SrvResolver @@ -49,9 +54,17 @@ def target(): self._executor = executor + def _on_topology_gc(dummy=None): + # This prevents GC from waiting 10 seconds for isMaster to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + # Avoid cycles. When self or topology is freed, stop executor soon. self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) def open(self): """Start monitoring, or restart after a fork. @@ -60,12 +73,16 @@ def open(self): """ self._executor.open() + def gc_safe_close(self): + """GC safe close.""" + self._executor.close() + def close(self): """Close and stop monitoring. open() restarts the monitor after closing. """ - self._executor.close() + self.gc_safe_close() def join(self, timeout=None): """Wait for the monitor to stop.""" @@ -99,72 +116,113 @@ def __init__( self._server_description = server_description self._pool = pool self._settings = topology_settings - self._avg_round_trip_time = MovingAverage() self._listeners = self._settings._pool_options.event_listeners pub = self._listeners is not None self._publish = pub and self._listeners.enabled_for_server_heartbeat + self._cancel_context = None + self._rtt_monitor = _RttMonitor( + topology, topology_settings, topology._create_pool_for_monitor( + server_description.address)) + self.heartbeater = None - def close(self): - super(Monitor, self).close() + def cancel_check(self): + """Cancel any concurrent isMaster check. + + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + def _start_rtt_monitor(self): + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + self._rtt_monitor.close() + + def gc_safe_close(self): + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() + def close(self): + self.gc_safe_close() + self._rtt_monitor.close() # Increment the generation and maybe close the socket. If the executor # thread has the socket checked out, it will be closed when checked in. + self._reset_connection() + + def _reset_connection(self): + # Clear our pooled connection. self._pool.reset() def _run(self): try: - self._server_description = self._check_with_retry() + prev_sd = self._server_description + try: + self._server_description = self._check_server() + except _OperationCancelled as exc: + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return self._topology.on_change(self._server_description) + + if (self._server_description.is_server_type_known and + self._server_description.topology_version): + self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error: + # Reset the server pool only after marking the server Unknown. + self._topology.reset_pool(self._server_description.address) + if prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() except ReferenceError: # Topology was garbage-collected. self.close() - def _check_with_retry(self): - """Call ismaster once or twice. Reset server's pool on error. + def _check_server(self): + """Call isMaster or read the next streaming response. Returns a ServerDescription. """ - # According to the spec, if an ismaster call fails we reset the - # server's pool. If a server was once connected, change its type - # to Unknown only after retrying once. - address = self._server_description.address - retry = True - if self._server_description.server_type == SERVER_TYPE.Unknown: - retry = False - start = _time() try: - return self._check_once() + try: + return self._check_once() + except (OperationFailure, NotMasterError) as exc: + # Update max cluster time even when isMaster fails. + self._topology.receive_cluster_time( + exc.details.get('$clusterTime')) + raise except ReferenceError: raise except Exception as error: - error_time = _time() - start + address = self._server_description.address + duration = _time() - start if self._publish: self._listeners.publish_server_heartbeat_failed( - address, error_time, error) - default = ServerDescription(address, error=error) - # Reset the server pool only after marking the server Unknown. - self._topology.on_change(default) - self._topology.reset_pool(address) - self._avg_round_trip_time.reset() - if not retry: - # Server type defaults to Unknown. - return default - - # Try a second and final time. If it fails return original error. - # Always send metadata: this is a new connection. - start = _time() - try: - return self._check_once() - except ReferenceError: + address, duration, error) + self._reset_connection() + if isinstance(error, _OperationCancelled): raise - except Exception as error: - error_time = _time() - start - if self._publish: - self._listeners.publish_server_heartbeat_failed( - address, error_time, error) - self._avg_round_trip_time.reset() - return default + self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) def _check_once(self): """A single attempt to call ismaster. @@ -173,35 +231,46 @@ def _check_once(self): """ address = self._server_description.address if self._publish: + # PYTHON-2299: Add the "awaited" field to heartbeat events. self._listeners.publish_server_heartbeat_started(address) + + if self._cancel_context and self._cancel_context.cancelled: + self._reset_connection() with self._pool.get_socket({}) as sock_info: + self._cancel_context = sock_info.cancel_context response, round_trip_time = self._check_with_socket(sock_info) - self._avg_round_trip_time.add_sample(round_trip_time) - sd = ServerDescription( - address=address, - ismaster=response, - round_trip_time=self._avg_round_trip_time.get()) + if not response.awaitable: + self._rtt_monitor.add_sample(round_trip_time) + + sd = ServerDescription(address, response, + self._rtt_monitor.average()) if self._publish: self._listeners.publish_server_heartbeat_succeeded( address, round_trip_time, response) - return sd - def _check_with_socket(self, sock_info): + def _check_with_socket(self, conn): """Return (IsMaster, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ + cluster_time = self._topology.max_cluster_time() start = _time() - try: - return (sock_info.ismaster(self._pool.opts.metadata, - self._topology.max_cluster_time()), - _time() - start) - except OperationFailure as exc: - # Update max cluster time even when isMaster fails. - self._topology.receive_cluster_time( - exc.details.get('$clusterTime')) - raise + if conn.more_to_come: + # Read the next streaming isMaster (MongoDB 4.4+). + response = IsMaster(conn._next_reply(), awaitable=True) + elif (conn.performed_handshake and + self._server_description.topology_version): + # Initiate streaming isMaster (MongoDB 4.4+). + response = conn._ismaster( + cluster_time, + self._server_description.topology_version, + self._settings.heartbeat_frequency, + None) + else: + # New connection handshake or polling isMaster (MongoDB <4.4). + response = conn._ismaster(cluster_time, None, None, None) + return response, _time() - start class SrvMonitor(MonitorBase): @@ -252,3 +321,105 @@ def _get_seedlist(self): self._executor.update_interval( max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology, topology_settings, pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super(_RttMonitor, self).__init__( + topology, + "pymongo_server_rtt_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL) + + self._pool = pool + self._moving_average = MovingAverage() + self._lock = threading.Lock() + + def close(self): + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + self._pool.reset() + + def add_sample(self, sample): + """Add a RTT sample.""" + with self._lock: + self._moving_average.add_sample(sample) + + def average(self): + """Get the calculated average, or None if no samples yet.""" + with self._lock: + return self._moving_average.get() + + def reset(self): + """Reset the average RTT.""" + with self._lock: + return self._moving_average.reset() + + def _run(self): + try: + # NOTE: This thread is only run when when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = self._ping() + self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + self.close() + except Exception: + self._pool.reset() + + def _ping(self): + """Run an "isMaster" command and return the RTT.""" + with self._pool.get_socket({}) as sock_info: + start = _time() + sock_info.ismaster() + return _time() - start + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor): + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref): + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors(): + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources(): + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: + shutdown() + shutdown = _shutdown_executors + if shutdown: + shutdown() + + +atexit.register(_shutdown_resources) diff --git a/pymongo/network.py b/pymongo/network.py index cf714a4208..3224cf6498 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -16,8 +16,10 @@ import datetime import errno +import socket import struct + from bson import _decode_all_selective from bson.py3compat import PY3 @@ -27,15 +29,18 @@ from pymongo.errors import (AutoReconnect, NotMasterError, OperationFailure, - ProtocolError) -from pymongo.message import _UNPACK_REPLY + ProtocolError, + NetworkTimeout, + _OperationCancelled) +from pymongo.message import _UNPACK_REPLY, _OpMsg +from pymongo.monotonic import time from pymongo.socket_checker import _errno_from_exception _UNPACK_HEADER = struct.Struct(" 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + timeout = max(min(deadline - time(), _POLL_TIMEOUT), 0.001) + else: + timeout = _POLL_TIMEOUT + readable = sock_info.socket_checker.select( + sock, read=True, timeout=timeout) + if context.cancelled: + raise _OperationCancelled('isMaster cancelled') + if readable: + return + if deadline and time() > deadline: + raise socket.timeout("timed out") + # memoryview was introduced in Python 2.7 but we only use it on Python 3 # because before 2.7.4 the struct module did not support memoryview: # https://bugs.python.org/issue10212. # In Jython, using slice assignment on a memoryview results in a # NullPointerException. if not PY3: - def _receive_data_on_socket(sock, length): + def _receive_data_on_socket(sock_info, length, deadline): buf = bytearray(length) i = 0 while length: try: - chunk = sock.recv(length) + wait_for_read(sock_info, deadline) + chunk = sock_info.sock.recv(length) except (IOError, OSError) as exc: if _errno_from_exception(exc) == errno.EINTR: continue @@ -231,13 +276,14 @@ def _receive_data_on_socket(sock, length): return bytes(buf) else: - def _receive_data_on_socket(sock, length): + def _receive_data_on_socket(sock_info, length, deadline): buf = bytearray(length) mv = memoryview(buf) bytes_read = 0 while bytes_read < length: try: - chunk_length = sock.recv_into(mv[bytes_read:]) + wait_for_read(sock_info, deadline) + chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) except (IOError, OSError) as exc: if _errno_from_exception(exc) == errno.EINTR: continue diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 5777e5ab2c..09ff411201 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -14,7 +14,6 @@ """Run a target function on a background thread.""" -import atexit import threading import time import weakref @@ -46,6 +45,7 @@ def __init__(self, interval, min_interval, target, name=None): self._stopped = False self._thread = None self._name = name + self._skip_sleep = False self._thread_will_exit = False self._lock = threading.Lock() @@ -109,6 +109,9 @@ def wake(self): def update_interval(self, new_interval): self._interval = new_interval + def skip_sleep(self): + self._skip_sleep = True + def __should_stop(self): with self._lock: if self._stopped: @@ -129,12 +132,14 @@ def _run(self): raise - deadline = _time() + self._interval - - while not self._stopped and _time() < deadline: - time.sleep(self._min_interval) - if self._event: - break # Early wake. + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = _time() + self._interval + while not self._stopped and _time() < deadline: + time.sleep(self._min_interval) + if self._event: + break # Early wake. self._event = False @@ -177,5 +182,3 @@ def _shutdown_executors(): executor.join(1) executor = None - -atexit.register(_shutdown_executors) diff --git a/pymongo/pool.py b/pymongo/pool.py index 21ce73c8d0..6e848b7bba 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -482,6 +482,20 @@ def _speculative_context(all_credentials): return None +class _CancellationContext(object): + def __init__(self): + self._cancelled = False + + def cancel(self): + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self): + """Was cancel called?""" + return self._cancelled + + class SocketInfo(object): """Store a socket with some metadata. @@ -521,13 +535,34 @@ def __init__(self, sock, pool, address, id): # sockets created before the last reset. self.generation = pool.generation self.ready = False - - def ismaster(self, metadata, cluster_time, all_credentials=None): + self.cancel_context = None + if not pool.handshake: + # This is a Monitor connection. + self.cancel_context = _CancellationContext() + self.opts = pool.opts + self.more_to_come = False + + def ismaster(self, all_credentials=None): + return self._ismaster(None, None, None, all_credentials) + + def _ismaster(self, cluster_time, topology_version, + heartbeat_frequency, all_credentials): cmd = SON([('ismaster', 1)]) - if not self.performed_handshake: - cmd['client'] = metadata + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd['client'] = self.opts.metadata if self.compression_settings: cmd['compression'] = self.compression_settings.compressors + elif topology_version is not None: + cmd['topologyVersion'] = topology_version + cmd['maxAwaitTimeMS'] = int(heartbeat_frequency*1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.sock.settimeout( + self.opts.connect_timeout + heartbeat_frequency) if self.max_wire_version >= 6 and cluster_time is not None: cmd['$clusterTime'] = cluster_time @@ -541,7 +576,9 @@ def ismaster(self, metadata, cluster_time, all_credentials=None): if auth_ctx: cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() - ismaster = IsMaster(self.command('admin', cmd, publish_events=False)) + doc = self.command('admin', cmd, publish_events=False, + exhaust_allowed=awaitable) + ismaster = IsMaster(doc, awaitable=awaitable) self.is_writable = ismaster.is_writable self.max_wire_version = ismaster.max_wire_version self.max_bson_size = ismaster.max_bson_size @@ -550,12 +587,11 @@ def ismaster(self, metadata, cluster_time, all_credentials=None): self.supports_sessions = ( ismaster.logical_session_timeout_minutes is not None) self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos - if not self.performed_handshake and self.compression_settings: + if performing_handshake and self.compression_settings: ctx = self.compression_settings.get_compression_context( ismaster.compressors) self.compression_context = ctx - self.performed_handshake = True self.op_msg_enabled = ismaster.max_wire_version >= 6 if creds: self.negotiated_mechanisms[creds] = ismaster.sasl_supported_mechs @@ -565,6 +601,14 @@ def ismaster(self, metadata, cluster_time, all_credentials=None): self.auth_ctx[auth_ctx.credentials] = auth_ctx return ismaster + def _next_reply(self): + reply = self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers._check_command_response(response_doc) + return response_doc + def command(self, dbname, spec, slave_ok=False, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, check=True, @@ -577,7 +621,8 @@ def command(self, dbname, spec, slave_ok=False, client=None, retryable_write=False, publish_events=True, - user_fields=None): + user_fields=None, + exhaust_allowed=False): """Execute a command or raise an error. :Parameters: @@ -635,7 +680,7 @@ def command(self, dbname, spec, slave_ok=False, if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self.sock, dbname, spec, slave_ok, + return command(self, dbname, spec, slave_ok, self.is_mongos, read_preference, codec_options, session, client, check, allowable_errors, self.address, check_keys, listeners, @@ -645,7 +690,8 @@ def command(self, dbname, spec, slave_ok=False, compression_ctx=self.compression_context, use_op_msg=self.op_msg_enabled, unacknowledged=unacknowledged, - user_fields=user_fields) + user_fields=user_fields, + exhaust_allowed=exhaust_allowed) except OperationFailure: raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. @@ -675,8 +721,7 @@ def receive_message(self, request_id): If any exception is raised, the socket is closed. """ try: - return receive_message(self.sock, request_id, - self.max_message_size) + return receive_message(self, request_id, self.max_message_size) except BaseException as error: self._raise_connection_failure(error) @@ -785,19 +830,27 @@ def validate_session(self, client, session): def close_socket(self, reason): """Close this connection with a reason.""" + if self.closed: + return + self._close_socket() + if reason and self.enabled_for_cmap: + self.listeners.publish_connection_closed( + self.address, self.id, reason) + + def _close_socket(self): + """Close this connection.""" if self.closed: return self.closed = True - # Avoid exceptions on interpreter shutdown. + if self.cancel_context: + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. try: self.sock.close() except Exception: pass - if reason and self.enabled_for_cmap: - self.listeners.publish_connection_closed( - self.address, self.id, reason) - def socket_closed(self): """Return True if we know socket has been closed, False otherwise.""" return self.socket_checker.socket_closed(self.sock) @@ -1134,7 +1187,7 @@ def connect(self, all_credentials=None): sock_info = SocketInfo(sock, self, self.address, conn_id) if self.handshake: - sock_info.ismaster(self.opts.metadata, None, all_credentials) + sock_info.ismaster(all_credentials) self.is_writable = sock_info.is_writable return sock_info diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 6372147f24..4a1fe38604 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -235,7 +235,8 @@ def __eq__(self, other): (self._election_id == other.election_id) and (self._primary == other.primary) and (self._ls_timeout_minutes == - other.logical_session_timeout_minutes)) + other.logical_session_timeout_minutes) and + (self._error == other.error)) return NotImplemented diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 93d6cb5a42..672159c245 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -38,7 +38,10 @@ def __init__(self): self._poller = None def select(self, sock, read=False, write=False, timeout=0): - """Select for reads or writes with a timeout in seconds.""" + """Select for reads or writes with a timeout in seconds. + + Returns True if the socket is readable/writable, False on timeout. + """ while True: try: if self._poller: @@ -52,23 +55,31 @@ def select(self, sock, read=False, write=False, timeout=0): # poll() timeout is in milliseconds. select() # timeout is in seconds. res = self._poller.poll(timeout * 1000) + # poll returns a possibly-empty list containing + # (fd, event) 2-tuples for the descriptors that have + # events or errors to report. Return True if the list + # is not empty. + return bool(res) finally: self._poller.unregister(sock) else: rlist = [sock] if read else [] wlist = [sock] if write else [] res = select.select(rlist, wlist, [sock], timeout) + # select returns a 3-tuple of lists of objects that are + # ready: subsets of the first three arguments. Return + # True if any of the lists are not empty. + return any(res) except (_SelectError, IOError) as exc: if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue raise - return res def socket_closed(self, sock): """Return True if we know socket has been closed, False otherwise. """ try: - res = self.select(sock, read=True) + return self.select(sock, read=True) except (RuntimeError, KeyError): # RuntimeError is raised during a concurrent poll. KeyError # is raised by unregister if the socket is not in the poller. @@ -84,4 +95,3 @@ def socket_closed(self, sock): # Any other exceptions should be attributed to a closed # or invalid socket. return True - return any(res) diff --git a/pymongo/topology.py b/pymongo/topology.py index 0a49dbb174..252db05f38 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -596,6 +596,10 @@ def _handle_error(self, address, err_ctx): self._process_change(ServerDescription(address, error=error)) # Clear the pool. server.reset() + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the isMaster check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() elif issubclass(exc_type, OperationFailure): # Do not request an immediate check since the server is likely # shutting down. diff --git a/test/__init__.py b/test/__init__.py index 175103effc..a517e1a275 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -226,6 +226,8 @@ def _connect(self, host, port, **kwargs): self.connection_attempts.append( 'failed to connect client %r: %s' % (client, exc)) return None + finally: + client.close() def _init_client(self): self.client = self._connect(host, port) @@ -602,6 +604,14 @@ def require_failCommand_fail_point(self, func): "failCommand fail point must be supported", func=func) + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 + return self._require(lambda: (self.test_commands_enabled and + self.version >= (4, 4, -1)), + "failCommand appName must be supported", + func=func) + def require_tls(self, func): """Run a test only if the client can connect over TLS.""" return self._require(lambda: self.tls, @@ -788,10 +798,14 @@ def _get_executors(topology): executors = [] for server in topology._servers.values(): # Some MockMonitor do not have an _executor. - executors.append(getattr(server._monitor, '_executor', None)) + if hasattr(server._monitor, '_executor'): + executors.append(server._monitor._executor) + if hasattr(server._monitor, '_rtt_monitor'): + executors.append(server._monitor._rtt_monitor._executor) executors.append(topology._Topology__events_executor) if topology._srv_monitor: executors.append(topology._srv_monitor._executor) + return [e for e in executors if e is not None] diff --git a/test/discovery_and_monitoring_integration/cancel-server-check.json b/test/discovery_and_monitoring_integration/cancel-server-check.json new file mode 100644 index 0000000000..9586350959 --- /dev/null +++ b/test/discovery_and_monitoring_integration/cancel-server-check.json @@ -0,0 +1,130 @@ +{ + "runOn": [ + { + "minServerVersion": "4.0", + "topology": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topology": [ + "sharded" + ] + } + ], + "database_name": "sdam-tests", + "collection_name": "cancel-server-check", + "data": [], + "tests": [ + { + "description": "Cancel server check", + "clientOptions": { + "retryWrites": true, + "heartbeatFrequencyMS": 10000, + "serverSelectionTimeoutMS": 5000, + "appname": "cancelServerCheckTest" + }, + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "result": { + "insertedId": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "result": { + "insertedId": 3 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/find-network-error.json b/test/discovery_and_monitoring_integration/find-network-error.json new file mode 100644 index 0000000000..4db2634cd6 --- /dev/null +++ b/test/discovery_and_monitoring_integration/find-network-error.json @@ -0,0 +1,144 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "find-network-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on find", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + }, + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "find-network-error" + }, + "command_name": "find", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "find-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/find-shutdown-error.json b/test/discovery_and_monitoring_integration/find-shutdown-error.json new file mode 100644 index 0000000000..65de8398b1 --- /dev/null +++ b/test/discovery_and_monitoring_integration/find-shutdown-error.json @@ -0,0 +1,168 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "find-shutdown-error", + "data": [], + "tests": [ + { + "description": "Concurrent shutdown error on find", + "clientOptions": { + "retryWrites": false, + "retryReads": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorFindTest" + }, + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "appName": "shutdownErrorFindTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + } + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "error": true + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "error": true + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/insert-network-error.json b/test/discovery_and_monitoring_integration/insert-network-error.json new file mode 100644 index 0000000000..fa8bb253e1 --- /dev/null +++ b/test/discovery_and_monitoring_integration/insert-network-error.json @@ -0,0 +1,156 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "insert-network-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on insert", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true, + "appName": "insertNetworkErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "insertNetworkErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/insert-shutdown-error.json b/test/discovery_and_monitoring_integration/insert-shutdown-error.json new file mode 100644 index 0000000000..edde149a91 --- /dev/null +++ b/test/discovery_and_monitoring_integration/insert-shutdown-error.json @@ -0,0 +1,167 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "insert-shutdown-error", + "data": [], + "tests": [ + { + "description": "Concurrent shutdown error on insert", + "clientOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorInsertTest" + }, + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "appName": "shutdownErrorInsertTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + } + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "error": true + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "error": true + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/isMaster-command-error.json b/test/discovery_and_monitoring_integration/isMaster-command-error.json new file mode 100644 index 0000000000..4bdfd9adff --- /dev/null +++ b/test/discovery_and_monitoring_integration/isMaster-command-error.json @@ -0,0 +1,245 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "isMaster-command-error", + "data": [], + "tests": [ + { + "description": "Command error on Monitor handshake", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "commandErrorHandshakeTest", + "closeConnection": false, + "errorCode": 91 + } + }, + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorHandshakeTest" + }, + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + }, + { + "description": "Command error on Monitor check", + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 1000, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorCheckTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "commandErrorCheckTest", + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750, + "errorCode": 91 + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "isMaster-command-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/isMaster-network-error.json b/test/discovery_and_monitoring_integration/isMaster-network-error.json new file mode 100644 index 0000000000..eb1f3eac19 --- /dev/null +++ b/test/discovery_and_monitoring_integration/isMaster-network-error.json @@ -0,0 +1,225 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "isMaster-network-error", + "data": [], + "tests": [ + { + "description": "Network error on Monitor handshake", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "networkErrorHandshakeTest", + "closeConnection": true + } + }, + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorHandshakeTest" + }, + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + }, + { + "description": "Network error on Monitor check", + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorCheckTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "networkErrorCheckTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "isMaster-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/isMaster-timeout.json b/test/discovery_and_monitoring_integration/isMaster-timeout.json new file mode 100644 index 0000000000..eeee612be8 --- /dev/null +++ b/test/discovery_and_monitoring_integration/isMaster-timeout.json @@ -0,0 +1,359 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "isMaster-timeout", + "data": [], + "tests": [ + { + "description": "Network timeout on Monitor handshake", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "timeoutMonitorHandshakeTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorHandshakeTest" + }, + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + }, + { + "description": "Network timeout on Monitor check", + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 750, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorCheckTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "timeoutMonitorCheckTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "isMaster-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + }, + { + "description": "Driver extends timeout while streaming", + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "extendsTimeoutTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 2000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "isMaster-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "isMaster-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json new file mode 100644 index 0000000000..2d0a998a69 --- /dev/null +++ b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json @@ -0,0 +1,165 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "sdam-tests", + "collection_name": "test-replSetStepDown", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Rediscover quickly after replSetStepDown", + "clientOptions": { + "appname": "replSetStepDownTest", + "heartbeatFrequencyMS": 60000, + "serverSelectionTimeoutMS": 5000, + "w": "majority" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "recordPrimary", + "object": "testRunner" + }, + { + "name": "runAdminCommand", + "object": "testRunner", + "command_name": "replSetFreeze", + "arguments": { + "command": { + "replSetFreeze": 0 + }, + "readPreference": { + "mode": "Secondary" + } + } + }, + { + "name": "runAdminCommand", + "object": "testRunner", + "command_name": "replSetStepDown", + "arguments": { + "command": { + "replSetStepDown": 20, + "secondaryCatchUpPeriodSecs": 20, + "force": false + } + } + }, + { + "name": "waitForPrimaryChange", + "object": "testRunner", + "arguments": { + "timeoutMS": 5000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/test_client.py b/test/test_client.py index 8013dd7272..5667282896 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -816,6 +816,19 @@ def test_close_does_not_open_servers(self): client.close() self.assertEqual(topology._servers, {}) + def test_close_closes_sockets(self): + client = rs_client() + self.addCleanup(client.close) + client.test.test.find_one() + topology = client._topology + client.close() + for server in topology._servers.values(): + self.assertFalse(server._pool.sockets) + self.assertTrue(server._monitor._executor._stopped) + self.assertTrue(server._monitor._rtt_monitor._executor._stopped) + self.assertFalse(server._monitor._pool.sockets) + self.assertFalse(server._monitor._rtt_monitor._pool.sockets) + def test_bad_uri(self): with self.assertRaises(InvalidURI): MongoClient("http://localhost") @@ -1636,12 +1649,12 @@ def receive_message(request_id): msg += encode({'$err': 'mock err', 'code': 0}) return message._OpReply.unpack(msg) - saved = sock_info.receive_message sock_info.receive_message = receive_message self.assertRaises(OperationFailure, list, cursor) - sock_info.receive_message = saved + # Unpatch the instance. + del sock_info.receive_message - # The socket is returned the pool and it still works. + # The socket is returned to the pool and it still works. self.assertEqual(200, collection.count_documents({})) self.assertIn(sock_info, pool.sockets) diff --git a/test/test_cmap.py b/test/test_cmap.py index 28de566338..6cd0118d5d 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -50,6 +50,7 @@ single_client, TestCreator, wait_until) +from test.utils_spec_runner import SpecRunnerThread OBJECT_TYPES = { @@ -70,40 +71,6 @@ } -class CMAPThread(threading.Thread): - def __init__(self, name): - super(CMAPThread, self).__init__() - self.name = name - self.exc = None - self.setDaemon(True) - self.cond = threading.Condition() - self.ops = [] - self.stopped = False - - def schedule(self, work): - self.ops.append(work) - with self.cond: - self.cond.notify() - - def stop(self): - self.stopped = True - with self.cond: - self.cond.notify() - - def run(self): - while not self.stopped or self.ops: - if not self. ops: - with self.cond: - self.cond.wait(10) - if self.ops: - try: - work = self.ops.pop(0) - work() - except Exception as exc: - self.exc = exc - self.stop() - - class TestCMAP(IntegrationTest): # Location of JSON test specifications. TEST_PATH = os.path.join( @@ -114,7 +81,7 @@ class TestCMAP(IntegrationTest): def start(self, op): """Run the 'start' thread operation.""" target = op['target'] - thread = CMAPThread(target) + thread = SpecRunnerThread(target) thread.start() self.targets[target] = thread @@ -344,6 +311,8 @@ def test_5_check_out_fails_connection_error(self): def mock_connect(*args, **kwargs): raise ConnectionFailure('connect failed') pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, 'connect') # Attempt to create a new connection. with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): @@ -374,6 +343,8 @@ def mock_check_auth(self, *args, **kwargs): def mock_connect(*args, **kwargs): sock_info = connect(*args, **kwargs) sock_info.check_auth = functools.partial(mock_check_auth, sock_info) + # Un-patch to break the cyclic reference. + self.addCleanup(delattr, sock_info, 'check_auth') return sock_info pool.connect = mock_connect # Un-patch Pool.connect to break the cyclic reference. diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 63cf127e32..8690962afa 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -28,6 +28,7 @@ IntegrationTest) from test.utils import (CMAPListener, ensure_all_connected, + repl_set_step_down, rs_or_single_client) @@ -38,7 +39,8 @@ def setUpClass(cls): super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() cls.listener = CMAPListener() cls.client = rs_or_single_client(event_listeners=[cls.listener], - retryWrites=False) + retryWrites=False, + heartbeatFrequencyMS=500) # Ensure connections to all servers in replica set. This is to test # that the is_writable flag is properly updated for sockets that @@ -84,9 +86,7 @@ def test_get_more_iteration(self): for _ in range(batch_size): cursor.next() # Force step-down the primary. - res = self.client.admin.command( - SON([("replSetStepDown", 5), ("force", True)])) - self.assertEqual(res["ok"], 1.0) + repl_set_step_down(self.client, replSetStepDown=5, force=True) # Get next batch of results. for _ in range(batch_size): cursor.next() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 4b17fc8530..5f4f6c076e 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -17,11 +17,13 @@ import os import sys import threading +import time sys.path[0:0] = [""] from bson import json_util, Timestamp -from pymongo import common +from pymongo import (common, + monitoring) from pymongo.errors import (AutoReconnect, ConfigurationError, NetworkTimeout, @@ -36,11 +38,14 @@ from pymongo.uri_parser import parse_uri from test import unittest, IntegrationTest from test.utils import (assertion_context, + client_context, Barrier, get_pool, server_name_to_type, rs_or_single_client, + TestCreator, wait_until) +from test.utils_spec_runner import SpecRunner, SpecRunnerThread # Location of JSON test specifications. @@ -51,7 +56,9 @@ class MockMonitor(object): def __init__(self, server_description, topology, pool, topology_settings): self._server_description = server_description - self._topology = topology + + def cancel_check(self): + pass def open(self): pass @@ -305,5 +312,104 @@ def insert_command(i): client.admin.command('ping') +class TestIntegration(SpecRunner): + # Location of JSON test specifications. + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + 'discovery_and_monitoring_integration') + + def _event_count(self, event): + if event == 'ServerMarkedUnknownEvent': + def marked_unknown(e): + return (isinstance(e, monitoring.ServerDescriptionChangedEvent) + and not e.new_description.is_server_type_known) + return len(self.server_listener.matching(marked_unknown)) + # Only support CMAP events for now. + self.assertTrue(event.startswith('Pool') or event.startswith('Conn')) + event_type = getattr(monitoring, event) + return self.pool_listener.event_count(event_type) + + def assert_event_count(self, event, count): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + self.assertEqual(self._event_count(event), count) + + def wait_for_event(self, event, count): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + wait_until(lambda: self._event_count(event) >= count, + 'find %s %s event(s)' % (count, event)) + + def configure_fail_point(self, fail_point): + """Run the configureFailPoint test operation. + """ + self.set_fail_point(fail_point) + self.addCleanup(self.set_fail_point, { + 'configureFailPoint': fail_point['configureFailPoint'], + 'mode': 'off'}) + + def run_admin_command(self, command, **kwargs): + """Run the runAdminCommand test operation. + """ + self.client.admin.command(command, **kwargs) + + def record_primary(self): + """Run the recordPrimary test operation. + """ + self._previous_primary = self.scenario_client.primary + + def wait_for_primary_change(self, timeout_ms): + """Run the waitForPrimaryChange test operation. + """ + def primary_changed(): + primary = self.scenario_client.primary + if primary is None: + return False + return primary != self._previous_primary + timeout = timeout_ms/1000.0 + wait_until(primary_changed, 'change primary', timeout=timeout) + + def wait(self, ms): + """Run the "wait" test operation. + """ + time.sleep(ms/1000.0) + + def start_thread(self, name): + """Run the 'startThread' thread operation.""" + thread = SpecRunnerThread(name) + thread.start() + self.targets[name] = thread + + def run_on_thread(self, sessions, collection, name, operation): + """Run the 'runOnThread' operation.""" + thread = self.targets[name] + thread.schedule(lambda: self._run_op( + sessions, collection, operation, False)) + + def wait_for_thread(self, name): + """Run the 'waitForThread' operation.""" + thread = self.targets[name] + thread.stop() + thread.join() + if thread.exc: + raise thread.exc + + +def create_spec_test(scenario_def, test, name): + @client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = TestCreator(create_spec_test, TestIntegration, TestIntegration.TEST_PATH) +test_creator.create_tests() + + if __name__ == "__main__": unittest.main() diff --git a/test/test_monitor.py b/test/test_monitor.py index fe014e34a0..61e2057b52 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -38,6 +38,7 @@ def get_executors(client): executors = [] for server in client._topology._servers.values(): executors.append(server._monitor._executor) + executors.append(server._monitor._rtt_monitor._executor) executors.append(client._kill_cursors_executor) executors.append(client._topology._Topology__events_executor) return [e for e in executors if e is not None] @@ -54,7 +55,7 @@ class TestMonitor(IntegrationTest): def test_cleanup_executors_on_client_del(self): client = create_client() executors = get_executors(client) - self.assertEqual(len(executors), 3) + self.assertEqual(len(executors), 4) # Each executor stores a weakref to itself in _EXECUTORS. executor_refs = [ @@ -71,7 +72,7 @@ def test_cleanup_executors_on_client_del(self): def test_cleanup_executors_on_client_close(self): client = create_client() executors = get_executors(client) - self.assertEqual(len(executors), 3) + self.assertEqual(len(executors), 4) client.close() diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 710bf8732c..bcd2cbc4d8 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -320,7 +320,7 @@ def marked_unknown_and_rediscovered(): # Expect a single ServerDescriptionChangedEvent for the network error. marked_unknown_events = self.listener.matching(marked_unknown) - self.assertEqual(len(marked_unknown_events), 1) + self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) self.assertIsInstance( marked_unknown_events[0].new_description.error, expected_error) diff --git a/test/test_session.py b/test/test_session.py index a7c8e54e25..f00f58bd98 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -754,6 +754,7 @@ def test_unacknowledged_writes(self): # Ensure the collection exists. self.client.pymongo_test.test_unacked_writes.insert_one({}) client = rs_or_single_client(w=0, event_listeners=[self.listener]) + self.addCleanup(client.close) db = client.pymongo_test coll = db.test_unacked_writes ops = [ diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py new file mode 100644 index 0000000000..e6c64ffa9a --- /dev/null +++ b/test/test_streaming_protocol.py @@ -0,0 +1,183 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" + +import sys +import time + +sys.path[0:0] = [""] + +from pymongo import monitoring + +from test import (client_context, + IntegrationTest, + unittest) +from test.utils import (HeartbeatEventListener, + rs_or_single_client, + ServerEventListener, + wait_until) + + +class TestStreamingProtocol(IntegrationTest): + @client_context.require_failCommand_appName + def test_failCommand_streaming(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + client = rs_or_single_client( + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, + appName='failingIsMasterTest') + self.addCleanup(client.close) + # Force a connection. + client.admin.command('ping') + address = client.address + listener.reset() + + fail_ismaster = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 4}, + 'data': { + 'failCommands': ['isMaster'], + 'closeConnection': False, + 'errorCode': 10107, + 'appName': 'failingIsMasterTest', + }, + } + with self.fail_point(fail_ismaster): + def _marked_unknown(event): + return (event.server_address == address + and not event.new_description.is_server_type_known) + + def _discovered_node(event): + return (event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known) + + def marked_unknown(): + return len(listener.matching(_marked_unknown)) >= 1 + + def rediscovered(): + return len(listener.matching(_discovered_node)) >= 1 + + # Topology events are published asynchronously + wait_until(marked_unknown, 'mark node unknown') + wait_until(rediscovered, 'rediscover node') + + # Server should be selectable. + client.admin.command('ping') + + @client_context.require_failCommand_appName + def test_streaming_rtt(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + # On Windows, RTT can actually be 0.0 because time.time() only has + # 1-15 millisecond resolution. We need to delay the initial isMaster + # to ensure that RTT is never zero. + name = 'streamingRttTest' + delay_ismaster = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 1000}, + 'data': { + 'failCommands': ['isMaster'], + 'blockConnection': True, + 'blockTimeMS': 20, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': name, + }, + } + with self.fail_point(delay_ismaster): + client = rs_or_single_client( + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName=name) + self.addCleanup(client.close) + # Force a connection. + client.admin.command('ping') + address = client.address + + delay_ismaster['data']['blockTimeMS'] = 500 + delay_ismaster['data']['appName'] = name + with self.fail_point(delay_ismaster): + def rtt_exceeds_250_ms(): + # XXX: Add a public TopologyDescription getter to MongoClient? + topology = client._topology + sd = topology.description.server_descriptions()[address] + return sd.round_trip_time > 0.250 + + wait_until(rtt_exceeds_250_ms, 'exceed 250ms RTT') + + # Server should be selectable. + client.admin.command('ping') + + def changed_event(event): + return (event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent)) + + # There should only be one event published, for the initial discovery. + events = listener.matching(changed_event) + self.assertEqual(1, len(events)) + self.assertGreater(events[0].new_description.round_trip_time, 0) + + @client_context.require_failCommand_appName + def test_monitor_waits_after_server_check_error(self): + hb_listener = HeartbeatEventListener() + client = rs_or_single_client( + event_listeners=[hb_listener], heartbeatFrequencyMS=500, + appName='waitAfterErrorTest') + self.addCleanup(client.close) + # Force a connection. + client.admin.command('ping') + address = client.address + + fail_ismaster = { + 'mode': {'times': 50}, + 'data': { + 'failCommands': ['isMaster'], + 'closeConnection': False, + 'errorCode': 91, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': 'waitAfterErrorTest', + }, + } + with self.fail_point(fail_ismaster): + time.sleep(2) + + # Server should be selectable. + client.admin.command('ping') + + def hb_started(event): + return (isinstance(event, monitoring.ServerHeartbeatStartedEvent) + and event.connection_id == address) + + hb_started_events = hb_listener.matching(hb_started) + # Explanation of the expected heartbeat events: + # Time: event + # 0ms: create MongoClient + # 1ms: run monitor handshake, 1 + # 2ms: run awaitable isMaster, 2 + # 3ms: run configureFailPoint + # 502ms: isMaster fails for the first time with command error + # 1002ms: run monitor handshake, 3 + # 1502ms: run monitor handshake, 4 + # 2002ms: run monitor handshake, 5 + # 2003ms: disable configureFailPoint + # 2004ms: isMaster succeeds, 6 + # 2004ms: awaitable isMaster, 7 + self.assertGreater(len(hb_started_events), 7) + # This can be reduced to ~15 after SERVER-49220 is fixed. + self.assertLess(len(hb_started_events), 40) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index 50f4ecc74b..dba7eeef15 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -42,9 +42,11 @@ class MockMonitor(object): def __init__(self, server_description, topology, pool, topology_settings): self._server_description = server_description - self._topology = topology self.opened = False + def cancel_check(self): + pass + def open(self): self.opened = True @@ -232,6 +234,7 @@ def _check_with_socket(self, *args, **kwargs): raise AutoReconnect('mock monitor error') t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) s = t.select_server(writable_server_selector) self.assertEqual(125, s.description.round_trip_time) @@ -712,6 +715,7 @@ def _check_with_socket(self, *args, **kwargs): raise AutoReconnect('mock monitor error') t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) server = wait_for_master(t) self.assertEqual(1, ismaster_count[0]) generation = server.pool.generation @@ -734,18 +738,21 @@ def _check_with_socket(self, *args, **kwargs): 'mock monitor error #%s' % (ismaster_count[0],)) t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) server = wait_for_master(t) self.assertEqual(1, ismaster_count[0]) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) - # Second ismaster call. + # Second ismaster call, server is marked Unknown, then the monitor + # immediately runs a retry (third ismaster). t.request_check_all() # The third ismaster call (the immediate retry) happens sometime soon # after the failed check triggered by request_check_all. Wait until # the server becomes known again. - t.select_server(writable_server_selector, 0.250) - self.assertEqual(SERVER_TYPE.Standalone, get_type(t, 'a')) + server = t.select_server(writable_server_selector, 0.250) + self.assertEqual(SERVER_TYPE.Standalone, + server.description.server_type) self.assertEqual(3, ismaster_count[0]) def test_internal_monitor_error(self): @@ -756,6 +763,7 @@ def _check_with_socket(self, *args, **kwargs): raise exception t = create_mock_topology(monitor_class=TestMonitor) + self.addCleanup(t.close) with self.assertRaisesRegex(ConnectionFailure, 'internal error'): t.select_server(any_server_selector, server_selection_timeout=0.5) diff --git a/test/utils.py b/test/utils.py index 4e213d50b5..34f62cc449 100644 --- a/test/utils.py +++ b/test/utils.py @@ -31,12 +31,14 @@ from bson import json_util, py3compat from bson.objectid import ObjectId +from bson.son import SON from pymongo import (MongoClient, monitoring, read_preferences) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener -from pymongo.pool import PoolOptions +from pymongo.pool import (_CancellationContext, + PoolOptions) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, @@ -160,8 +162,7 @@ def failed(self, event): super(OvertCommandListener, self).failed(event) -class ServerAndTopologyEventListener(monitoring.ServerListener, - monitoring.TopologyListener): +class _ServerEventListener(object): """Listens to all events.""" def __init__(self): @@ -185,6 +186,16 @@ def reset(self): self.results = [] +class ServerEventListener(_ServerEventListener, + monitoring.ServerListener): + """Listens to Server events.""" + + +class ServerAndTopologyEventListener(ServerEventListener, + monitoring.TopologyListener): + """Listens to Server and Topology events.""" + + class HeartbeatEventListener(monitoring.ServerHeartbeatListener): """Listens to only server heartbeat events.""" @@ -200,9 +211,18 @@ def succeeded(self, event): def failed(self, event): self.results.append(event) + def matching(self, matcher): + """Return the matching events.""" + results = self.results[:] + return [event for event in results if matcher(event)] + class MockSocketInfo(object): - def close(self): + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + + def close_socket(self, reason): pass def __enter__(self): @@ -218,7 +238,7 @@ def __init__(self, *args, **kwargs): self._lock = threading.Lock() self.opts = PoolOptions() - def get_socket(self, all_credentials): + def get_socket(self, all_credentials, checkout=False): return MockSocketInfo() def return_socket(self, *args, **kwargs): @@ -677,6 +697,16 @@ def wait_until(predicate, success_description, timeout=10): time.sleep(interval) +def repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([('replSetStepDown', 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + client.admin.command( + 'replSetFreeze', 0, read_preference=ReadPreference.SECONDARY) + client.admin.command(cmd) + def is_mongos(client): res = client.admin.command('ismaster') return res.get('msg', '') == 'isdbgrid' diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index ad17166807..8bea70ae37 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -36,6 +36,9 @@ class MockMonitor(object): def __init__(self, server_description, topology, pool, topology_settings): pass + def cancel_check(self): + pass + def open(self): pass diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 68faa8c346..4ab4d1d104 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -15,7 +15,7 @@ """Utilities for testing driver specs.""" import copy -import sys +import threading from bson import decode, encode @@ -48,8 +48,46 @@ camel_to_snake_args, camel_to_upper_camel, CompareType, + CMAPListener, OvertCommandListener, - rs_client, parse_read_preference) + parse_read_preference, + rs_client, + ServerAndTopologyEventListener, + HeartbeatEventListener) + + +class SpecRunnerThread(threading.Thread): + def __init__(self, name): + super(SpecRunnerThread, self).__init__() + self.name = name + self.exc = None + self.setDaemon(True) + self.cond = threading.Condition() + self.ops = [] + self.stopped = False + + def schedule(self, work): + self.ops.append(work) + with self.cond: + self.cond.notify() + + def stop(self): + self.stopped = True + with self.cond: + self.cond.notify() + + def run(self): + while not self.stopped or self.ops: + if not self. ops: + with self.cond: + self.cond.wait(10) + if self.ops: + try: + work = self.ops.pop(0) + work() + except Exception as exc: + self.exc = exc + self.stop() class SpecRunner(IntegrationTest): @@ -60,7 +98,8 @@ def setUpClass(cls): cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, + min_heartbeat_interval=0.1) cls.knobs.enable() @classmethod @@ -70,7 +109,10 @@ def tearDownClass(cls): def setUp(self): super(SpecRunner, self).setUp() + self.targets = {} self.listener = None + self.pool_listener = None + self.server_listener = None self.maxDiff = None def _set_fail_point(self, client, command_args): @@ -315,7 +357,8 @@ def run_operation(self, sessions, collection, operation): arguments["requests"] = requests elif arg_name == "session": arguments['session'] = sessions[arguments['session']] - elif name == 'command' and arg_name == 'command': + elif (name in ('command', 'run_admin_command') and + arg_name == 'command'): # Ensure the first key is the command name. ordered_command = SON([(operation['command_name'], 1)]) ordered_command.update(arguments['command']) @@ -343,6 +386,10 @@ def run_operation(self, sessions, collection, operation): else: arguments[c2s] = arguments.pop(arg_name) + if name == 'run_on_thread': + args = {'sessions': sessions, 'collection': collection} + args.update(arguments) + arguments = args result = cmd(**dict(arguments)) if name == "aggregate": @@ -367,45 +414,48 @@ def allowable_errors(self, op): """Allow encryption spec to override expected error classes.""" return (PyMongoError,) + def _run_op(self, sessions, collection, op, in_with_transaction): + expected_result = op.get('result') + if expect_error(op): + with self.assertRaises(self.allowable_errors(op), + msg=op['name']) as context: + self.run_operation(sessions, collection, op.copy()) + + if expect_error_message(expected_result): + if isinstance(context.exception, BulkWriteError): + errmsg = str(context.exception.details).lower() + else: + errmsg = str(context.exception).lower() + self.assertIn(expected_result['errorContains'].lower(), + errmsg) + if expect_error_code(expected_result): + self.assertEqual(expected_result['errorCodeName'], + context.exception.details.get('codeName')) + if expect_error_labels_contain(expected_result): + self.assertErrorLabelsContain( + context.exception, + expected_result['errorLabelsContain']) + if expect_error_labels_omit(expected_result): + self.assertErrorLabelsOmit( + context.exception, + expected_result['errorLabelsOmit']) + + # Reraise the exception if we're in the with_transaction + # callback. + if in_with_transaction: + raise context.exception + else: + result = self.run_operation(sessions, collection, op.copy()) + if 'result' in op: + if op['name'] == 'runCommand': + self.check_command_result(expected_result, result) + else: + self.check_result(expected_result, result) + def run_operations(self, sessions, collection, ops, in_with_transaction=False): for op in ops: - expected_result = op.get('result') - if expect_error(op): - with self.assertRaises(self.allowable_errors(op), - msg=op['name']) as context: - self.run_operation(sessions, collection, op.copy()) - - if expect_error_message(expected_result): - if isinstance(context.exception, BulkWriteError): - errmsg = str(context.exception.details).lower() - else: - errmsg = str(context.exception).lower() - self.assertIn(expected_result['errorContains'].lower(), - errmsg) - if expect_error_code(expected_result): - self.assertEqual(expected_result['errorCodeName'], - context.exception.details.get('codeName')) - if expect_error_labels_contain(expected_result): - self.assertErrorLabelsContain( - context.exception, - expected_result['errorLabelsContain']) - if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit( - context.exception, - expected_result['errorLabelsOmit']) - - # Reraise the exception if we're in the with_transaction - # callback. - if in_with_transaction: - raise context.exception - else: - result = self.run_operation(sessions, collection, op.copy()) - if 'result' in op: - if op['name'] == 'runCommand': - self.check_command_result(expected_result, result) - else: - self.check_result(expected_result, result) + self._run_op(sessions, collection, op, in_with_transaction) # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): @@ -517,7 +567,29 @@ def setup_scenario(self, scenario_def): def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) + + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + self.kill_all_sessions() + self.addCleanup(self.kill_all_sessions) + self.setup_scenario(scenario_def) + database_name = self.get_scenario_db_name(scenario_def) + collection_name = self.get_scenario_coll_name(scenario_def) + # SPEC-1245 workaround StaleDbVersion on distinct + for c in self.mongos_clients: + c[database_name][collection_name].distinct("x") + + # Configure the fail point before creating the client. + if 'failPoint' in test: + fp = test['failPoint'] + self.set_fail_point(fp) + self.addCleanup(self.set_fail_point, { + 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + listener = OvertCommandListener() + pool_listener = CMAPListener() + server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. client_options = self.parse_client_options(test['clientOptions']) # MMAPv1 does not support retryable writes. @@ -526,28 +598,21 @@ def run_scenario(self, scenario_def, test): self.skipTest("MMAPv1 does not support retryWrites=True") use_multi_mongos = test['useMultipleMongoses'] if client_context.is_mongos and use_multi_mongos: - client = rs_client(client_context.mongos_seeds(), - event_listeners=[listener], **client_options) + client = rs_client( + client_context.mongos_seeds(), + event_listeners=[listener, pool_listener, server_listener], + **client_options) else: - client = rs_client(event_listeners=[listener], **client_options) + client = rs_client( + event_listeners=[listener, pool_listener, server_listener], + **client_options) + self.scenario_client = client self.listener = listener + self.pool_listener = pool_listener + self.server_listener = server_listener # Close the client explicitly to avoid having too many threads open. self.addCleanup(client.close) - # Kill all sessions before and after each test to prevent an open - # transaction (from a test failure) from blocking collection/database - # operations during test set up and tear down. - self.kill_all_sessions() - self.addCleanup(self.kill_all_sessions) - - database_name = self.get_scenario_db_name(scenario_def) - collection_name = self.get_scenario_coll_name(scenario_def) - self.setup_scenario(scenario_def) - - # SPEC-1245 workaround StaleDbVersion on distinct - for c in self.mongos_clients: - c[database_name][collection_name].distinct("x") - # Create session0 and session1. sessions = {} session_ids = {} @@ -572,14 +637,6 @@ def run_scenario(self, scenario_def, test): self.addCleanup(end_sessions, sessions) - if 'failPoint' in test: - fp = test['failPoint'] - self.set_fail_point(fp) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) - - listener.results.clear() - collection = client[database_name][collection_name] self.run_test_ops(sessions, collection, test) @@ -613,6 +670,7 @@ def run_scenario(self, scenario_def, test): # CompareType(Binary) doesn't work. self.assertEqual(wrap_types(expected_c['data']), actual_data) + def expect_any_error(op): if isinstance(op, dict): return op.get('error') From 9fc7ed1e11a8019595cc082f517328c8bec29b04 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Thu, 25 Jun 2020 10:38:12 -0700 Subject: [PATCH 0167/2111] PYTHON-2143 Use an allow-list to determine resumable change stream errors --- pymongo/change_stream.py | 34 ++++- pymongo/errors.py | 14 +- pymongo/helpers.py | 21 ++- pymongo/server.py | 3 +- .../change_streams/change-streams-errors.json | 53 ++++++- test/test_change_stream.py | 143 ++++++++---------- 6 files changed, 170 insertions(+), 98 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index f026dd7f5b..b86dca5415 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -25,6 +25,7 @@ from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor from pymongo.errors import (ConnectionFailure, + CursorNotFound, InvalidOperation, OperationFailure, PyMongoError) @@ -32,11 +33,25 @@ # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. -_NON_RESUMABLE_GETMORE_ERRORS = frozenset([ - 11601, # Interrupted - 136, # CappedPositionLost - 237, # CursorKilled - None, # No error code was returned. +_RESUMABLE_GETMORE_ERRORS = frozenset([ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotMaster + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotMasterNoSlaveOk + 13436, # NotMasterOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + 216, # ElectionInProgress ]) @@ -283,12 +298,15 @@ def try_next(self): # one resume attempt. try: change = self._cursor._try_next(True) - except ConnectionFailure: + except (ConnectionFailure, CursorNotFound): self._resume() change = self._cursor._try_next(False) except OperationFailure as exc: - if (exc.code in _NON_RESUMABLE_GETMORE_ERRORS or - exc.has_error_label("NonResumableChangeStreamError")): + is_resumable = ((exc.max_wire_version >= 9 and + exc.has_error_label("ResumableChangeStreamError")) or + (exc.max_wire_version < 9 and + exc.code in _RESUMABLE_GETMORE_ERRORS)) + if not is_resumable: raise self._resume() change = self._cursor._try_next(False) diff --git a/pymongo/errors.py b/pymongo/errors.py index a309a9e7a5..bf31aeba1f 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -140,11 +140,13 @@ class ConfigurationError(PyMongoError): class OperationFailure(PyMongoError): """Raised when a database operation fails. + .. versionadded:: 3.11 + The :attr:`max_wire_version` attribute. .. versionadded:: 2.7 The :attr:`details` attribute. """ - def __init__(self, error, code=None, details=None): + def __init__(self, error, code=None, details=None, max_wire_version=None): error_labels = None if details is not None: error_labels = details.get('errorLabels') @@ -152,6 +154,7 @@ def __init__(self, error, code=None, details=None): error, error_labels=error_labels) self.__code = code self.__details = details + self.__max_wire_version = max_wire_version @property def code(self): @@ -171,6 +174,15 @@ def details(self): """ return self.__details + @property + def max_wire_version(self): + """The latest version of the wire protocol supported by the socket + that was used to run the operation that raised this exception. + + PyMongo does not always record this value and it may be None. + """ + return self.__max_wire_version + def __str__(self): output_str = "%s, full error: %s" % (self._message, self.__details) if sys.version_info[0] == 2 and isinstance(output_str, unicode): diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 51215b4c40..0cd2b00b8a 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -103,14 +103,16 @@ def _index_document(index_list): def _check_command_response(response, msg=None, allowable_errors=None, - parse_write_concern_error=False): + parse_write_concern_error=False, + max_wire_version=None): """Check the response to a command for errors. """ if "ok" not in response: # Server didn't recognize our message as a command. raise OperationFailure(response.get("$err"), response.get("code"), - response) + response, + max_wire_version) if parse_write_concern_error and 'writeConcernError' in response: _raise_write_concern_error(response['writeConcernError']) @@ -146,19 +148,24 @@ def _check_command_response(response, msg=None, allowable_errors=None, details.get("assertion", "")) raise OperationFailure(errmsg, details.get("assertionCode"), - response) + response, + max_wire_version) # Other errors # findAndModify with upsert can raise duplicate key error if code in (11000, 11001, 12582): - raise DuplicateKeyError(errmsg, code, response) + raise DuplicateKeyError(errmsg, code, response, + max_wire_version) elif code == 50: - raise ExecutionTimeout(errmsg, code, response) + raise ExecutionTimeout(errmsg, code, response, + max_wire_version) elif code == 43: - raise CursorNotFound(errmsg, code, response) + raise CursorNotFound(errmsg, code, response, + max_wire_version) msg = msg or "%s" - raise OperationFailure(msg % errmsg, code, response) + raise OperationFailure(msg % errmsg, code, response, + max_wire_version) def _check_gle_response(result): diff --git a/pymongo/server.py b/pymongo/server.py index 18919b9e2e..a45b1eefdd 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -133,7 +133,8 @@ def run_operation_with_response( first = docs[0] operation.client._process_response( first, operation.session) - _check_command_response(first) + _check_command_response( + first, max_wire_version=sock_info.max_wire_version) except Exception as exc: if publish: duration = datetime.now() - start diff --git a/test/change_streams/change-streams-errors.json b/test/change_streams/change-streams-errors.json index 5ebbd28f46..7b7cea30a4 100644 --- a/test/change_streams/change-streams-errors.json +++ b/test/change_streams/change-streams-errors.json @@ -75,7 +75,6 @@ { "description": "Change Stream should error when _id is projected out", "minServerVersion": "4.1.11", - "maxServerVersion": "4.3.3", "target": "collection", "topology": [ "replicaset", @@ -103,10 +102,54 @@ ], "result": { "error": { - "code": 280, - "errorLabels": [ - "NonResumableChangeStreamError" - ] + "code": 280 + } + } + }, + { + "description": "change stream errors on MaxTimeMSExpired", + "minServerVersion": "4.2", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 50, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [ + { + "$project": { + "_id": 0 + } + } + ], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "z": 3 + } + } + } + ], + "result": { + "error": { + "code": 50 } } } diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 669c774a3c..f050d7c564 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -37,7 +37,7 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient -from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS +# from pymongo.change_stream import _NON_RESUMABLE_GETMORE_ERRORS from pymongo.command_cursor import CommandCursor from pymongo.errors import (InvalidOperation, OperationFailure, ServerSelectionTimeoutError) @@ -555,47 +555,11 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): self.assertEqual(listener.results['started'][0].command_name, 'aggregate') - # Prose test no. 5 - def test_does_not_resume_fatal_errors(self): - """ChangeStream will not attempt to resume fatal server errors.""" - if client_context.supports_failCommand_fail_point: - # failCommand does not support returning no errorCode. - TEST_ERROR_CODES = _NON_RESUMABLE_GETMORE_ERRORS - {None} - @contextmanager - def generate_error(change_stream, code): - fail_point = {'mode': {'times': 1}, 'data': { - 'errorCode': code, 'failCommands': ['getMore']}} - with self.fail_point(fail_point): - yield - else: - TEST_ERROR_CODES = _NON_RESUMABLE_GETMORE_ERRORS - @contextmanager - def generate_error(change_stream, code): - def mock_try_next(*args, **kwargs): - change_stream._cursor.close() - raise OperationFailure('Mock server error', code=code) - - original_cursor = change_stream._cursor - change_stream._cursor._try_next = mock_try_next - try: - yield - finally: - # Un patch the instance. - del original_cursor._try_next - - for code in TEST_ERROR_CODES: - with self.change_stream() as change_stream: - self.watched_collection().insert_one({}) - with generate_error(change_stream, code): - with self.assertRaises(OperationFailure): - next(change_stream) - with self.assertRaises(StopIteration): - next(change_stream) - + # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED - # readPreference is not configurable using the watch() helpers so we can - # skip this test. Also, PyMongo performs server selection for each - # operation which ensure compliance with this prose test. + # Reason: readPreference is not configurable using the watch() helpers + # so we can skip this test. Also, PyMongo performs server selection for + # each operation which ensure compliance with this prose test. # Prose test no. 7 def test_initial_empty_batch(self): @@ -1075,7 +1039,7 @@ class TestAllScenarios(unittest.TestCase): @classmethod @client_context.require_connection def setUpClass(cls): - cls.listener = WhiteListEventListener("aggregate") + cls.listener = WhiteListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod @@ -1086,14 +1050,66 @@ def setUp(self): self.listener.results.clear() def setUpCluster(self, scenario_dict): - assets = [ - (scenario_dict["database_name"], scenario_dict["collection_name"]), - (scenario_dict["database2_name"], scenario_dict["collection2_name"]), - ] + assets = [(scenario_dict["database_name"], + scenario_dict["collection_name"]), + (scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"))] for db, coll in assets: self.client.drop_database(db) self.client[db].create_collection(coll) + def setFailPoint(self, scenario_dict): + fail_point = scenario_dict.get("failPoint") + if fail_point is None: + return + + fail_cmd = SON([('configureFailPoint', 'failCommand')]) + fail_cmd.update(fail_point) + client_context.client.admin.command(fail_cmd) + self.addCleanup( + client_context.client.admin.command, + 'configureFailPoint', fail_cmd['configureFailPoint'], mode='off') + + def assert_list_contents_are_subset(self, superlist, sublist): + """Check that each element in sublist is a subset of the corresponding + element in superlist.""" + self.assertEqual(len(superlist), len(sublist)) + for sup, sub in zip(superlist, sublist): + if isinstance(sub, dict): + self.assert_dict_is_subset(sup, sub) + continue + if isinstance(sub, (list, tuple)): + self.assert_list_contents_are_subset(sup, sub) + continue + self.assertEqual(sup, sub) + + def assert_dict_is_subset(self, superdict, subdict): + """Check that subdict is a subset of superdict.""" + exempt_fields = ["documentKey", "_id", "getMore"] + for key, value in iteritems(subdict): + if key not in superdict: + self.fail('Key %s not found in %s' % (key, superdict)) + if isinstance(value, dict): + self.assert_dict_is_subset(superdict[key], value) + continue + if isinstance(value, (list, tuple)): + self.assert_list_contents_are_subset(superdict[key], value) + continue + if key in exempt_fields: + # Only check for presence of these exempt fields, but not value. + self.assertIn(key, superdict) + else: + self.assertEqual(superdict[key], value) + + def check_event(self, event, expectation_dict): + if event is None: + self.fail() + for key, value in iteritems(expectation_dict): + if isinstance(value, dict): + self.assert_dict_is_subset(getattr(event, key), value) + else: + self.assertEqual(getattr(event, key), value) + def tearDown(self): self.listener.results.clear() @@ -1147,36 +1163,11 @@ def run_operation(client, operation): return cmd(**arguments) -def assert_dict_is_subset(superdict, subdict): - """Check that subdict is a subset of superdict.""" - exempt_fields = ["documentKey", "_id"] - for key, value in iteritems(subdict): - if key not in superdict: - assert False - if isinstance(value, dict): - assert_dict_is_subset(superdict[key], value) - continue - if key in exempt_fields: - superdict[key] = "42" - assert superdict[key] == value - - -def check_event(event, expectation_dict): - if event is None: - raise AssertionError - for key, value in iteritems(expectation_dict): - if isinstance(value, dict): - assert_dict_is_subset( - getattr(event, key), value - ) - else: - assert getattr(event, key) == value - - def create_test(scenario_def, test): def run_scenario(self): # Set up self.setUpCluster(scenario_def) + self.setFailPoint(test) is_error = test["result"].get("error", False) try: with get_change_stream( @@ -1202,17 +1193,17 @@ def run_scenario(self): else: # Check for expected output from change streams for change, expected_changes in zip(changes, test["result"]["success"]): - assert_dict_is_subset(change, expected_changes) + self.assert_dict_is_subset(change, expected_changes) self.assertEqual(len(changes), len(test["result"]["success"])) finally: # Check for expected events results = self.listener.results - for expectation in test.get("expectations", []): - for idx, (event_type, event_desc) in enumerate(iteritems(expectation)): + for idx, expectation in enumerate(test.get("expectations", [])): + for event_type, event_desc in iteritems(expectation): results_key = event_type.split("_")[1] event = results[results_key][idx] if len(results[results_key]) > idx else None - check_event(event, event_desc) + self.check_event(event, event_desc) return run_scenario From 956ce3d4b0edfa9c1d946109db743f82ed0bfc0a Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Mon, 29 Jun 2020 19:53:21 -0700 Subject: [PATCH 0168/2111] Incorporate review changes --- pymongo/change_stream.py | 4 ++-- pymongo/errors.py | 5 +++++ pymongo/helpers.py | 10 +++++----- pymongo/network.py | 4 ++-- pymongo/pool.py | 15 ++++++++------- pymongo/server.py | 2 +- test/test_change_stream.py | 1 - test/test_database.py | 10 +++++----- test/test_discovery_and_monitoring.py | 2 +- 9 files changed, 29 insertions(+), 24 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index b86dca5415..7509315675 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -302,9 +302,9 @@ def try_next(self): self._resume() change = self._cursor._try_next(False) except OperationFailure as exc: - is_resumable = ((exc.max_wire_version >= 9 and + is_resumable = ((exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError")) or - (exc.max_wire_version < 9 and + (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS)) if not is_resumable: raise diff --git a/pymongo/errors.py b/pymongo/errors.py index bf31aeba1f..80e48692aa 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -156,6 +156,10 @@ def __init__(self, error, code=None, details=None, max_wire_version=None): self.__details = details self.__max_wire_version = max_wire_version + @property + def _max_wire_version(self): + return self.__max_wire_version + @property def code(self): """The error code returned by the server, if any. @@ -189,6 +193,7 @@ def __str__(self): return output_str.encode('utf-8', errors='replace') return output_str + class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is invalidated on the server. diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 0cd2b00b8a..67b2e15842 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -102,9 +102,9 @@ def _index_document(index_list): return index -def _check_command_response(response, msg=None, allowable_errors=None, - parse_write_concern_error=False, - max_wire_version=None): +def _check_command_response(response, max_wire_version, msg=None, + allowable_errors=None, + parse_write_concern_error=False): """Check the response to a command for errors. """ if "ok" not in response: @@ -168,10 +168,10 @@ def _check_command_response(response, msg=None, allowable_errors=None, max_wire_version) -def _check_gle_response(result): +def _check_gle_response(result, max_wire_version): """Return getlasterror response as a dict, or raise OperationFailure.""" # Did getlasterror itself fail? - _check_command_response(result) + _check_command_response(result, max_wire_version) if result.get("wtimeout", False): # MongoDB versions before 1.8.0 return the error message in an "errmsg" diff --git a/pymongo/network.py b/pymongo/network.py index 3224cf6498..759872ef16 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -40,7 +40,7 @@ _UNPACK_HEADER = struct.Struct(" Date: Wed, 1 Jul 2020 16:25:09 -0700 Subject: [PATCH 0169/2111] cleanup cruft --- pymongo/errors.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pymongo/errors.py b/pymongo/errors.py index 80e48692aa..aaf51e0bc4 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -140,8 +140,6 @@ class ConfigurationError(PyMongoError): class OperationFailure(PyMongoError): """Raised when a database operation fails. - .. versionadded:: 3.11 - The :attr:`max_wire_version` attribute. .. versionadded:: 2.7 The :attr:`details` attribute. """ From dd23624100842bafee1b62d4f59bdfae89803423 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 1 Jul 2020 16:37:39 -0700 Subject: [PATCH 0170/2111] handle None case --- pymongo/change_stream.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 7509315675..08b2043def 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -302,6 +302,8 @@ def try_next(self): self._resume() change = self._cursor._try_next(False) except OperationFailure as exc: + if exc._max_wire_version is None: + raise is_resumable = ((exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError")) or (exc._max_wire_version < 9 and From 8e3fd0040e63199315cc5578593e1248bdf5be50 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 1 Jul 2020 16:49:36 -0700 Subject: [PATCH 0171/2111] cruft removal 2 --- pymongo/errors.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/pymongo/errors.py b/pymongo/errors.py index aaf51e0bc4..7a6902be8b 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -154,6 +154,12 @@ def __init__(self, error, code=None, details=None, max_wire_version=None): self.__details = details self.__max_wire_version = max_wire_version + def __str__(self): + output_str = "%s, full error: %s" % (self._message, self.__details) + if sys.version_info[0] == 2 and isinstance(output_str, unicode): + return output_str.encode('utf-8', errors='replace') + return output_str + @property def _max_wire_version(self): return self.__max_wire_version @@ -176,21 +182,6 @@ def details(self): """ return self.__details - @property - def max_wire_version(self): - """The latest version of the wire protocol supported by the socket - that was used to run the operation that raised this exception. - - PyMongo does not always record this value and it may be None. - """ - return self.__max_wire_version - - def __str__(self): - output_str = "%s, full error: %s" % (self._message, self.__details) - if sys.version_info[0] == 2 and isinstance(output_str, unicode): - return output_str.encode('utf-8', errors='replace') - return output_str - class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is From d97a43ea9b14a8456051b1b22babf424378e5fef Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 1 Jul 2020 16:58:56 -0700 Subject: [PATCH 0172/2111] cleanup --- pymongo/network.py | 5 +++-- pymongo/pool.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pymongo/network.py b/pymongo/network.py index 759872ef16..d9d645fa91 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -40,7 +40,7 @@ _UNPACK_HEADER = struct.Struct(" Date: Wed, 1 Jul 2020 17:24:41 -0700 Subject: [PATCH 0173/2111] remove unnecessary changes --- pymongo/errors.py | 12 ++++++------ pymongo/pool.py | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pymongo/errors.py b/pymongo/errors.py index 7a6902be8b..e5d52bfe3b 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -154,12 +154,6 @@ def __init__(self, error, code=None, details=None, max_wire_version=None): self.__details = details self.__max_wire_version = max_wire_version - def __str__(self): - output_str = "%s, full error: %s" % (self._message, self.__details) - if sys.version_info[0] == 2 and isinstance(output_str, unicode): - return output_str.encode('utf-8', errors='replace') - return output_str - @property def _max_wire_version(self): return self.__max_wire_version @@ -182,6 +176,12 @@ def details(self): """ return self.__details + def __str__(self): + output_str = "%s, full error: %s" % (self._message, self.__details) + if sys.version_info[0] == 2 and isinstance(output_str, unicode): + return output_str.encode('utf-8', errors='replace') + return output_str + class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is diff --git a/pymongo/pool.py b/pymongo/pool.py index 1668008954..74965620e1 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -680,11 +680,11 @@ def command(self, dbname, spec, slave_ok=False, if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self, dbname, spec, - slave_ok, self.is_mongos, read_preference, - codec_options, session, client, check, - allowable_errors, self.address, check_keys, - listeners, self.max_bson_size, read_concern, + return command(self, dbname, spec, slave_ok, + self.is_mongos, read_preference, codec_options, + session, client, check, allowable_errors, + self.address, check_keys, listeners, + self.max_bson_size, read_concern, parse_write_concern_error=parse_write_concern_error, collation=collation, compression_ctx=self.compression_context, From 04926c6ccdd167a6498acaeeae3f8ce7bea5a7c0 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 1 Jul 2020 17:52:35 -0700 Subject: [PATCH 0174/2111] fix monitor failure --- pymongo/pool.py | 2 +- test/test_discovery_and_monitoring.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 74965620e1..b04e4bd33c 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -606,7 +606,7 @@ def _next_reply(self): self.more_to_come = reply.more_to_come unpacked_docs = reply.unpack_response() response_doc = unpacked_docs[0] - helpers._check_command_response(response_doc) + helpers._check_command_response(response_doc, self.max_wire_version) return response_doc def command(self, dbname, spec, slave_ok=False, diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 27f385d6a3..ef97bcc67a 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -334,7 +334,8 @@ def assert_event_count(self, event, count): Assert the given event was published exactly `count` times. """ - self.assertEqual(self._event_count(event), count) + self.assertEqual(self._event_count(event), count, + 'expected %s not %r' % (count, event)) def wait_for_event(self, event, count): """Run the waitForEvent test operation. From 4457714d1b1a9f2e0d3d8b73fb913d024e7512dc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 1 Jul 2020 18:32:58 -0700 Subject: [PATCH 0175/2111] PYTHON-2082 Unpin session after RetryableWriteErrors from commitTransaction (#451) PYTHON-2154 PYTHON-2189 Remove 4.5 transaction test workarounds --- pymongo/mongo_client.py | 30 +++++++++++++++++------------- test/test_examples.py | 1 - test/test_transactions.py | 8 +------- 3 files changed, 18 insertions(+), 21 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7920b6a530..5d4703c0ef 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1421,12 +1421,15 @@ def is_retrying(): # be a persistent outage. Attempting to retry in this case will # most likely be a waste of time. raise - except Exception as exc: + except PyMongoError as exc: if not retryable: raise - # Add the RetryableWriteError label. - if (not _retryable_writes_error(exc, max_wire_version) - or is_retrying()): + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version) + retryable_error = exc.has_error_label("RetryableWriteError") + if retryable_error: + session._unpin_mongos() + if is_retrying() or not retryable_error: raise if bulk: bulk.retrying = True @@ -2170,7 +2173,7 @@ def _retryable_error_doc(exc): return None -def _retryable_writes_error(exc, max_wire_version): +def _add_retryable_write_error(exc, max_wire_version): doc = _retryable_error_doc(exc) if doc: code = doc.get('code', 0) @@ -2183,18 +2186,18 @@ def _retryable_writes_error(exc, max_wire_version): "to your connection string.") raise OperationFailure(errmsg, code, exc.details) if max_wire_version >= 9: - # MongoDB 4.4+ utilizes RetryableWriteError. - return 'RetryableWriteError' in doc.get('errorLabels', []) + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get('errorLabels', []): + exc._add_error_label(label) else: if code in helpers._RETRYABLE_ERROR_CODES: exc._add_error_label("RetryableWriteError") - return True - return False - if isinstance(exc, ConnectionFailure): + # Connection errors are always retryable except NotMasterError which is + # handled above. + if (isinstance(exc, ConnectionFailure) and + not isinstance(exc, NotMasterError)): exc._add_error_label("RetryableWriteError") - return True - return False class _MongoClientErrorHandler(object): @@ -2232,7 +2235,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.session._server_session.mark_dirty() if issubclass(exc_type, PyMongoError): - if exc_val.has_error_label("TransientTransactionError"): + if (exc_val.has_error_label("TransientTransactionError") or + exc_val.has_error_label("RetryableWriteError")): self.session._unpin_mongos() err_ctx = _ErrorContext( diff --git a/test/test_examples.py b/test/test_examples.py index 16e1936d57..c0913b34c3 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -842,7 +842,6 @@ def test_misc(self): class TestTransactionExamples(IntegrationTest): - @client_context.require_version_max(4, 4, 99) # PYTHON-2154 skip on 4.5+ @client_context.require_transactions def test_transactions(self): # Transaction examples diff --git a/test/test_transactions.py b/test/test_transactions.py index ff707c5f92..85345e2748 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -415,13 +415,7 @@ def create_test(scenario_def, test, name): @client_context.require_test_commands @client_context.require_transactions def run_scenario(self): - try: - self.run_scenario(scenario_def, test) - except OperationFailure as exc: - if (client_context.version.at_least(4, 5) and - client_context.is_mongos and exc.code == 13388): - self.skipTest('PYTHON-2189 Ignoring StaleConfig error: %r' % ( - exc.details)) + self.run_scenario(scenario_def, test) return run_scenario From a075eb798f97379cec2d7d1cc971a774ccaeb9b7 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 2 Jul 2020 13:53:08 -0400 Subject: [PATCH 0176/2111] PYTHON-1787: fix NotMasterError no attribute error (#450) --- .gitignore | 1 + pymongo/errors.py | 24 +++++++------- test/test_database.py | 2 +- test/test_errors.py | 73 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 88 insertions(+), 12 deletions(-) create mode 100644 test/test_errors.py diff --git a/.gitignore b/.gitignore index 385160b014..de435d109e 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ pymongo.egg-info/ *.egg .tox mongocryptd.pid +.idea/ diff --git a/pymongo/errors.py b/pymongo/errors.py index e5d52bfe3b..0dfa1f237d 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -100,6 +100,14 @@ class NetworkTimeout(AutoReconnect): """ +def _format_detailed_error(message, details): + if details is not None: + message = "%s, full error: %s" % (message, details) + if sys.version_info[0] == 2 and isinstance(message, unicode): + message = message.encode('utf-8', errors='replace') + return message + + class NotMasterError(AutoReconnect): """The server responded "not master" or "node is recovering". @@ -113,11 +121,10 @@ class NotMasterError(AutoReconnect): Subclass of :exc:`~pymongo.errors.AutoReconnect`. """ - def __str__(self): - output_str = "%s, full error: %s" % (self._message, self.__details) - if sys.version_info[0] == 2 and isinstance(output_str, unicode): - return output_str.encode('utf-8', errors='replace') - return output_str + def __init__(self, message='', errors=None): + super(NotMasterError, self).__init__( + _format_detailed_error(message, errors), errors=errors) + class ServerSelectionTimeoutError(AutoReconnect): """Thrown when no MongoDB server is available for an operation @@ -149,7 +156,7 @@ def __init__(self, error, code=None, details=None, max_wire_version=None): if details is not None: error_labels = details.get('errorLabels') super(OperationFailure, self).__init__( - error, error_labels=error_labels) + _format_detailed_error(error, details), error_labels=error_labels) self.__code = code self.__details = details self.__max_wire_version = max_wire_version @@ -176,11 +183,6 @@ def details(self): """ return self.__details - def __str__(self): - output_str = "%s, full error: %s" % (self._message, self.__details) - if sys.version_info[0] == 2 and isinstance(output_str, unicode): - return output_str.encode('utf-8', errors='replace') - return output_str class CursorNotFound(OperationFailure): diff --git a/test/test_database.py b/test/test_database.py index 18eb322ea5..349f090054 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -959,7 +959,7 @@ def test_command_response_without_ok(self): try: helpers._check_command_response({'$err': 'foo'}, None) except OperationFailure as e: - self.assertEqual(e.args[0], 'foo') + self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") else: self.fail("_check_command_response didn't raise OperationFailure") diff --git a/test/test_errors.py b/test/test_errors.py new file mode 100644 index 0000000000..32d7af3284 --- /dev/null +++ b/test/test_errors.py @@ -0,0 +1,73 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import traceback + +sys.path[0:0] = [""] + +from pymongo.errors import (NotMasterError, + OperationFailure) +from test import (PyMongoTestCase, + unittest) + + +class TestErrors(PyMongoTestCase): + def test_not_master_error(self): + exc = NotMasterError("not master test", {"errmsg": "error"}) + self.assertIn("full error", str(exc)) + try: + raise exc + except NotMasterError: + self.assertIn("full error", traceback.format_exc()) + + def test_operation_failure(self): + exc = OperationFailure("operation failure test", 10, + {"errmsg": "error"}) + self.assertIn("full error", str(exc)) + try: + raise exc + except OperationFailure: + self.assertIn("full error", traceback.format_exc()) + + def _test_unicode_strs(self, exc): + if sys.version_info[0] == 2: + self.assertEqual("unicode \xf0\x9f\x90\x8d, full error: {" + "'errmsg': u'unicode \\U0001f40d'}", str(exc)) + elif 'PyPy' in sys.version: + # PyPy displays unicode in repr differently. + self.assertEqual("unicode \U0001f40d, full error: {" + "'errmsg': 'unicode \\U0001f40d'}", str(exc)) + else: + self.assertEqual("unicode \U0001f40d, full error: {" + "'errmsg': 'unicode \U0001f40d'}", str(exc)) + try: + raise exc + except Exception: + self.assertIn("full error", traceback.format_exc()) + + def test_unicode_strs_operation_failure(self): + exc = OperationFailure(u'unicode \U0001f40d', 10, + {"errmsg": u'unicode \U0001f40d'}) + self._test_unicode_strs(exc) + + def test_unicode_strs_not_master_error(self): + exc = NotMasterError(u'unicode \U0001f40d', + {"errmsg": u'unicode \U0001f40d'}) + self._test_unicode_strs(exc) + + + +if __name__ == "__main__": + unittest.main() From c2d6343110c99e48aaaf98929b8b69b5fa6f0d58 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Jul 2020 14:17:20 -0700 Subject: [PATCH 0177/2111] PYTHON-2163 Suppress ragged EOFs when using pyOpenSSL to match the stdlib (#453) Wrap pyOpenSSL connection errors with AutoReconnect. --- pymongo/pool.py | 10 ++++++---- pymongo/pyopenssl_context.py | 30 ++++++++++++++++++++++++------ 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index b04e4bd33c..a18e005ba5 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1007,7 +1007,7 @@ def _configured_socket(address, options): # Raise CertificateError directly like we do after match_hostname # below. raise - except IOError as exc: + except (IOError, OSError, _SSLError) as exc: sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1176,15 +1176,17 @@ def connect(self, all_credentials=None): if self.enabled_for_cmap: listeners.publish_connection_created(self.address, conn_id) - sock = None try: sock = _configured_socket(self.address, self.opts) - except socket.error as error: + except Exception as error: if self.enabled_for_cmap: listeners.publish_connection_closed( self.address, conn_id, ConnectionClosedReason.ERROR) - _raise_connection_failure(self.address, error) + if isinstance(error, (IOError, OSError, _SSLError)): + _raise_connection_failure(self.address, error) + + raise sock_info = SocketInfo(sock, self, self.address, conn_id) if self.handshake: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index bb10eb6a00..10c35141fd 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -83,14 +83,20 @@ def _is_ip_address(address): _SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +def _ragged_eof(exc): + """Return True if the OpenSSL.SSL.SysCallError is a ragged EOF.""" + return exc.args == (-1, 'Unexpected EOF') + + # https://github.com/pyca/pyopenssl/issues/168 # https://github.com/pyca/pyopenssl/issues/176 # https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets class _sslConn(_SSL.Connection): - def __init__(self, *args, **kwargs): + def __init__(self, ctx, sock, suppress_ragged_eofs): self.socket_checker = _SocketChecker() - super(_sslConn, self).__init__(*args, **kwargs) + self.suppress_ragged_eofs = suppress_ragged_eofs + super(_sslConn, self).__init__(ctx, sock) def _call(self, call, *args, **kwargs): timeout = self.gettimeout() @@ -110,10 +116,22 @@ def do_handshake(self, *args, **kwargs): return self._call(super(_sslConn, self).do_handshake, *args, **kwargs) def recv(self, *args, **kwargs): - return self._call(super(_sslConn, self).recv, *args, **kwargs) + try: + return self._call(super(_sslConn, self).recv, *args, **kwargs) + except _SSL.SysCallError as exc: + # Suppress ragged EOFs to match the stdlib. + if self.suppress_ragged_eofs and _ragged_eof(exc): + return b"" + raise def recv_into(self, *args, **kwargs): - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + try: + return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + except _SSL.SysCallError as exc: + # Suppress ragged EOFs to match the stdlib. + if self.suppress_ragged_eofs and _ragged_eof(exc): + return 0 + raise def sendall(self, buf, flags=0): view = memoryview(buf) @@ -266,12 +284,12 @@ def set_default_verify_paths(self): def wrap_socket(self, sock, server_side=False, do_handshake_on_connect=True, - suppress_ragged_eofs=True, # TODO: Add support to _sslConn. + suppress_ragged_eofs=True, server_hostname=None, session=None): """Wrap an existing Python socket sock and return a TLS socket object. """ - ssl_conn = _sslConn(self._ctx, sock) + ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs) if session: ssl_conn.set_session(session) if server_side is True: From a1de506b7e61f98618eed87f38e00155ebb4cf94 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jul 2020 15:17:23 -0700 Subject: [PATCH 0178/2111] PYTHON-2282 Resync maxStalenessSeconds spec tests (#452) --- .../DefaultNoMaxStaleness.json | 136 ++++----- .../ReplicaSetNoPrimary/Incompatible.json | 68 ++--- .../ReplicaSetNoPrimary/LastUpdateTime.json | 164 +++++------ .../MaxStalenessTooSmall.json | 20 ++ .../ReplicaSetNoPrimary/Nearest.json | 164 +++++------ .../ReplicaSetNoPrimary/Nearest2.json | 164 +++++------ .../ReplicaSetNoPrimary/NoKnownServers.json | 37 +-- .../ReplicaSetNoPrimary/PrimaryPreferred.json | 118 ++++---- .../PrimaryPreferred_tags.json | 158 +++++------ .../ReplicaSetNoPrimary/Secondary.json | 212 +++++++------- .../SecondaryPreferred.json | 116 ++++---- .../SecondaryPreferred_tags.json | 212 +++++++------- .../ReplicaSetNoPrimary/ZeroMaxStaleness.json | 68 ++--- .../DefaultNoMaxStaleness.json | 136 ++++----- .../ReplicaSetWithPrimary/Incompatible.json | 68 ++--- .../ReplicaSetWithPrimary/LastUpdateTime.json | 164 +++++------ .../ReplicaSetWithPrimary/LongHeartbeat.json | 140 +++++----- .../ReplicaSetWithPrimary/LongHeartbeat2.json | 70 ++--- .../MaxStalenessTooSmall.json | 70 ++--- .../MaxStalenessWithModePrimary.json | 66 ++--- .../ReplicaSetWithPrimary/Nearest.json | 164 +++++------ .../ReplicaSetWithPrimary/Nearest2.json | 164 +++++------ .../ReplicaSetWithPrimary/Nearest_tags.json | 158 +++++------ .../PrimaryPreferred.json | 118 ++++---- .../PrimaryPreferred_incompatible.json | 68 ++--- .../SecondaryPreferred.json | 116 ++++---- .../SecondaryPreferred_tags.json | 264 +++++++++--------- .../SecondaryPreferred_tags2.json | 182 ++++++------ .../ReplicaSetWithPrimary/Secondary_tags.json | 264 +++++++++--------- .../Secondary_tags2.json | 182 ++++++------ .../ZeroMaxStaleness.json | 68 ++--- test/max_staleness/Sharded/Incompatible.json | 68 ++--- .../Sharded/SmallMaxStaleness.json | 140 +++++----- test/max_staleness/Single/Incompatible.json | 44 +-- .../Single/SmallMaxStaleness.json | 94 +++---- .../Unknown/SmallMaxStaleness.json | 33 +-- 36 files changed, 2250 insertions(+), 2228 deletions(-) create mode 100644 test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json diff --git a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json index bf15fe7345..1e3dd0bfd9 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -1,74 +1,74 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json b/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json index f0eceefc21..7f9fa764c7 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json index 24a5c21cab..e1abef2844 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 25002, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 25001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 5 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 25002, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 25002, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 25002, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 25001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + "maxWireVersion": 5 } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json new file mode 100644 index 0000000000..28e5e2aa4a --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/MaxStalenessTooSmall.json @@ -0,0 +1,20 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "Unknown" + }, + { + "address": "b:27017", + "type": "Unknown" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "error": true +} diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json index 8f047ee901..53549e6431 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 5 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + "maxWireVersion": 5 } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json index 18314cb6c7..e2768c7fb8 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 5 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + "maxWireVersion": 5 } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json b/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json index 847f2874c1..5905fcbc60 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json +++ b/test/max_staleness/ReplicaSetNoPrimary/NoKnownServers.json @@ -1,20 +1,21 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "type": "Unknown" - }, - { - "address": "b:27017", - "type": "Unknown" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "Unknown" + }, + { + "address": "b:27017", + "type": "Unknown" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 90 + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json index 72fff11454..8c6be6886a 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -1,64 +1,64 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "maxStalenessSeconds": 90, - "mode": "PrimaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 90 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json index 0f6865624d..26007c026e 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -1,84 +1,84 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "PrimaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json index 2ea70b1629..7d5eb58f4d 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json @@ -1,111 +1,111 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json index db45042330..df0bb5d77f 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -1,63 +1,63 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "SecondaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json index ab4e5a3b23..1ac3ea0aed 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -1,111 +1,111 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json index 23da14cad2..cb5dc5175a 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 0, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 0 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json index d8418c139e..ed18d5837e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -1,74 +1,74 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json b/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json index ec65af9cf8..d27ea11202 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json index 89725d9000..bbd8238e8a 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 5 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 125001, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 125001, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "maxWireVersion": 5 } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json index 3ac92f341a..cb05f52aa2 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json @@ -1,76 +1,76 @@ { - "heartbeatFrequencyMS": 120000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "heartbeatFrequencyMS": 120000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 130, - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 130 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json index 2ed9f75f9e..be169a3dcb 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -1,37 +1,37 @@ { - "error": true, - "heartbeatFrequencyMS": 120000, - "read_preference": { - "maxStalenessSeconds": 129, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "heartbeatFrequencyMS": 120000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 129 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json index 049cd7c4cd..173f5742a2 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -1,37 +1,37 @@ { - "error": true, - "heartbeatFrequencyMS": 500, - "read_preference": { - "maxStalenessSeconds": 89, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "heartbeatFrequencyMS": 500, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 89 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json index 54155d40fa..eee3462783 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -1,35 +1,35 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 120 - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "maxStalenessSeconds": 120 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json index ea8ba031d6..753fb82ca3 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 5 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "maxWireVersion": 5 } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json index 86bf3b988c..6233c0815a 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json @@ -1,88 +1,88 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "type": "RSSecondary", + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest" + }, + "maxWireVersion": 5 }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5 + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "maxWireVersion": 5 } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json index e147a57f67..9a1cd3bb12 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json @@ -1,84 +1,84 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSPrimary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json index b7dd197031..107ae2755e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -1,64 +1,64 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "PrimaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json index 77ba55276b..a6681f6a13 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "PrimaryPreferred" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "PrimaryPreferred", + "maxStalenessSeconds": 150 + }, + "error": true } diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json index 01a9aea7e9..5f8a21f15c 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -1,63 +1,63 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } } - ], - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "SecondaryPreferred" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json index 5ca04f4355..09ce6d6bd0 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -1,138 +1,138 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "e:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "e:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json index f512ff2609..3700c30453 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -1,96 +1,96 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "SecondaryPreferred", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json index 7df8eac73c..f117159f64 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json @@ -1,138 +1,138 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "d:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "e:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "tags": { + "data_center": "tokyo" + } + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 50, + "lastUpdateTime": 1, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1000001" + } + }, + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 1, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1000001" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "d:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "e:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json index d74234c6b1..b739c6141b 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json @@ -1,96 +1,96 @@ { - "heartbeatFrequencyMS": 25000, - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + "heartbeatFrequencyMS": 25000, + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "125002" + } + }, + "maxWireVersion": 5 + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" } - ], - "read_preference": { - "maxStalenessSeconds": 150, - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - }, - { - "data_center": "tokyo" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "125002" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "tokyo" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "read_preference": { + "mode": "Secondary", + "maxStalenessSeconds": 150, + "tag_sets": [ + { + "data_center": "nyc" + }, + { + "data_center": "tokyo" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + }, + "maxWireVersion": 5, + "tags": { + "data_center": "tokyo" + } } + ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json index 126c19a19e..f17aa93a3f 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 0, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - }, - "maxWireVersion": 5, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "type": "RSPrimary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "2" + } + } + }, + { + "address": "b:27017", + "type": "RSSecondary", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 0 + }, + "error": true } diff --git a/test/max_staleness/Sharded/Incompatible.json b/test/max_staleness/Sharded/Incompatible.json index 5e954166de..c261383f4a 100644 --- a/test/max_staleness/Sharded/Incompatible.json +++ b/test/max_staleness/Sharded/Incompatible.json @@ -1,36 +1,36 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "Mongos" - } - ], - "type": "Sharded" - } + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + }, + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "error": true } diff --git a/test/max_staleness/Sharded/SmallMaxStaleness.json b/test/max_staleness/Sharded/SmallMaxStaleness.json index 2656f66c63..27b9f1c12f 100644 --- a/test/max_staleness/Sharded/SmallMaxStaleness.json +++ b/test/max_staleness/Sharded/SmallMaxStaleness.json @@ -1,76 +1,76 @@ { - "heartbeatFrequencyMS": 10000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" + }, + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" + { + "address": "b:27017", + "type": "Mongos", + "avg_rtt_ms": 50, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "Mongos", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - }, - { - "address": "b:27017", - "avg_rtt_ms": 50, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Mongos" - } - ], - "type": "Sharded" + } } + ] } diff --git a/test/max_staleness/Single/Incompatible.json b/test/max_staleness/Single/Incompatible.json index 852202638f..b37fec7c1a 100644 --- a/test/max_staleness/Single/Incompatible.json +++ b/test/max_staleness/Single/Incompatible.json @@ -1,24 +1,24 @@ { - "error": true, - "read_preference": { - "maxStalenessSeconds": 120, - "mode": "Nearest" - }, - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 4, - "type": "Standalone" - } - ], - "type": "Single" - } + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 4, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "error": true } diff --git a/test/max_staleness/Single/SmallMaxStaleness.json b/test/max_staleness/Single/SmallMaxStaleness.json index 7c1792861c..c6b10231b8 100644 --- a/test/max_staleness/Single/SmallMaxStaleness.json +++ b/test/max_staleness/Single/SmallMaxStaleness.json @@ -1,52 +1,52 @@ { - "heartbeatFrequencyMS": 10000, - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Standalone" + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } } - ], - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Standalone" + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - }, - "maxWireVersion": 5, - "type": "Standalone" - } - ], - "type": "Single" + } } + ], + "in_latency_window": [ + { + "address": "a:27017", + "type": "Standalone", + "avg_rtt_ms": 5, + "lastUpdateTime": 0, + "maxWireVersion": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] } diff --git a/test/max_staleness/Unknown/SmallMaxStaleness.json b/test/max_staleness/Unknown/SmallMaxStaleness.json index fc196abc92..bf6174b8e4 100644 --- a/test/max_staleness/Unknown/SmallMaxStaleness.json +++ b/test/max_staleness/Unknown/SmallMaxStaleness.json @@ -1,18 +1,19 @@ { - "heartbeatFrequencyMS": 10000, - "in_latency_window": [], - "read_preference": { - "maxStalenessSeconds": 1, - "mode": "Nearest" - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "type": "Unknown" - } - ], - "type": "Unknown" - } + "heartbeatFrequencyMS": 10000, + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "type": "Unknown", + "maxWireVersion": 5 + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 1 + }, + "suitable_servers": [], + "in_latency_window": [] } From 7e2790cc446b5023410429e3fe4272a8ad532e73 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jul 2020 16:16:21 -0700 Subject: [PATCH 0179/2111] PYTHON-2220 Mention Atlas Search in documentation (#455) --- pymongo/__init__.py | 4 ++++ pymongo/cursor.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index ba6b17af72..b87668cd65 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -58,6 +58,10 @@ TEXT = "text" """Index specifier for a `text index`_. +.. seealso:: MongoDB's `Atlas Search + `_ which offers more advanced + text search functionality. + .. versionadded:: 2.7.1 .. _text index: http://docs.mongodb.org/manual/core/index-text/ diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 2a994389ac..ad116d4124 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -745,6 +745,9 @@ def sort(self, key_or_list, direction=None): for doc in cursor: print(doc) + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. From 5d92b2f5523876653018a4427962ef8ee8f7e6ff Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jul 2020 18:55:17 -0700 Subject: [PATCH 0180/2111] PYTHON-2243 Raise informative error message when attempting a GridFS operation in a transaction (#454) --- doc/changelog.rst | 7 ++++++ gridfs/__init__.py | 25 ++++++++++++++------ gridfs/grid_file.py | 14 +++++++++++ test/test_transactions.py | 50 +++++++++++++++++++++++++++++++++++++-- 4 files changed, 87 insertions(+), 9 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8a54c78f27..9e1c65e108 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -53,6 +53,13 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: :meth:`~pymongo.database.Database.command` to run the ``reIndex`` command instead. +Unavoidable breaking changes: + +- :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS` do not support + multi-document transactions. Running a GridFS operation in a transaction + now always raises the following error: + ``InvalidOperation: GridFS does not support multi-document transactions`` + .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ Issues Resolved diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 6c56a605e6..f291e8c9ca 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -26,7 +26,8 @@ GridOut, GridOutCursor, DEFAULT_CHUNK_SIZE, - _clear_entity_type_registry) + _clear_entity_type_registry, + _disallow_transactions) from pymongo import (ASCENDING, DESCENDING) from pymongo.common import UNAUTHORIZED_CODES, validate_string @@ -50,6 +51,10 @@ def __init__(self, database, collection="fs", disable_md5=False): computed for uploaded files. Useful in environments where MD5 cannot be used for regulatory or other reasons. Defaults to False. + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + .. versionchanged:: 3.1 Indexes are only ensured on the first write to the DB. @@ -68,7 +73,6 @@ def __init__(self, database, collection="fs", disable_md5=False): raise ConfigurationError('database must use ' 'acknowledged write_concern') - self.__database = database self.__collection = database[collection] self.__files = self.__collection.files self.__chunks = self.__collection.chunks @@ -88,8 +92,6 @@ def new_file(self, **kwargs): :Parameters: - `**kwargs` (optional): keyword arguments for file creation """ - # No need for __ensure_index_files_id() here; GridIn ensures - # the (files_id, n) index when needed. return GridIn( self.__collection, disable_md5=self.__disable_md5, **kwargs) @@ -192,6 +194,7 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): if filename is not None: query["filename"] = filename + _disallow_transactions(session) cursor = self.__files.find(query, session=session) if version < 0: skip = abs(version) - 1 @@ -249,6 +252,7 @@ def delete(self, file_id, session=None): .. versionchanged:: 3.1 ``delete`` no longer ensures indexes. """ + _disallow_transactions(session) self.__files.delete_one({"_id": file_id}, session=session) self.__chunks.delete_many({"files_id": file_id}, session=session) @@ -266,6 +270,7 @@ def list(self, session=None): .. versionchanged:: 3.1 ``list`` no longer ensures indexes. """ + _disallow_transactions(session) # With an index, distinct includes documents with no filename # as None. return [ @@ -299,6 +304,7 @@ def find_one(self, filter=None, session=None, *args, **kwargs): if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} + _disallow_transactions(session) for f in self.find(filter, *args, session=session, **kwargs): return f @@ -403,6 +409,7 @@ def exists(self, document_or_id=None, session=None, **kwargs): .. versionchanged:: 3.6 Added ``session`` parameter. """ + _disallow_transactions(session) if kwargs: f = self.__files.find_one(kwargs, ["_id"], session=session) else: @@ -439,6 +446,10 @@ def __init__(self, db, bucket_name="fs", computed for uploaded files. Useful in environments where MD5 cannot be used for regulatory or other reasons. Defaults to False. + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFSBucket does not support multi-document transactions. + .. versionadded:: 3.1 .. mongodoc:: gridfs @@ -452,7 +463,6 @@ def __init__(self, db, bucket_name="fs", if not wtc.acknowledged: raise ConfigurationError('write concern must be acknowledged') - self._db = db self._bucket_name = bucket_name self._collection = db[bucket_name] self._disable_md5 = disable_md5 @@ -746,6 +756,7 @@ def delete(self, file_id, session=None): .. versionchanged:: 3.6 Added ``session`` parameter. """ + _disallow_transactions(session) res = self._files.delete_one({"_id": file_id}, session=session) self._chunks.delete_many({"files_id": file_id}, session=session) if not res.deleted_count: @@ -839,9 +850,8 @@ def open_download_stream_by_name(self, filename, revision=-1, session=None): Added ``session`` parameter. """ validate_string("filename", filename) - query = {"filename": filename} - + _disallow_transactions(session) cursor = self._files.find(query, session=session) if revision < 0: skip = abs(revision) - 1 @@ -922,6 +932,7 @@ def rename(self, file_id, new_filename, session=None): .. versionchanged:: 3.6 Added ``session`` parameter. """ + _disallow_transactions(session) result = self._files.update_one({"_id": file_id}, {"$set": {"filename": new_filename}}, session=session) diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 88b71ebb1f..23c629be68 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -31,6 +31,7 @@ from pymongo.errors import (ConfigurationError, CursorNotFound, DuplicateKeyError, + InvalidOperation, OperationFailure) from pymongo.read_preferences import ReadPreference @@ -105,6 +106,12 @@ def _clear_entity_type_registry(entity, **kwargs): return entity.with_options(codec_options=codecopts, **kwargs) +def _disallow_transactions(session): + if session and session.in_transaction: + raise InvalidOperation( + 'GridFS does not support multi-document transactions') + + class GridIn(object): """Class to write data to GridFS. """ @@ -168,6 +175,7 @@ def __init__( if not root_collection.write_concern.acknowledged: raise ConfigurationError('root_collection must use ' 'acknowledged write_concern') + _disallow_transactions(session) # Handle alternative naming if "content_type" in kwargs: @@ -207,6 +215,7 @@ def __create_index(self, collection, index_key, unique): def __ensure_indexes(self): if not object.__getattribute__(self, "_ensured_index"): + _disallow_transactions(self._session) self.__create_index(self._coll.files, _F_INDEX, False) self.__create_index(self._coll.chunks, _C_INDEX, True) object.__setattr__(self, "_ensured_index", True) @@ -456,6 +465,7 @@ def __init__(self, root_collection, file_id=None, file_document=None, if not isinstance(root_collection, Collection): raise TypeError("root_collection must be an " "instance of Collection") + _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) @@ -483,6 +493,7 @@ def __init__(self, root_collection, file_id=None, file_document=None, def _ensure_file(self): if not self._file: + _disallow_transactions(self._session) self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) if not self._file: @@ -718,6 +729,7 @@ def _create_cursor(self): filter = {"files_id": self._id} if self._next_chunk > 0: filter["n"] = {"$gte": self._next_chunk} + _disallow_transactions(self._session) self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) @@ -810,6 +822,7 @@ def __init__(self, collection, filter=None, skip=0, limit=0, .. mongodoc:: cursors """ + _disallow_transactions(session) collection = _clear_entity_type_registry(collection) # Hold on to the base "fs" collection to create GridOut objects later. @@ -823,6 +836,7 @@ def __init__(self, collection, filter=None, skip=0, limit=0, def next(self): """Get next GridOut object from cursor. """ + _disallow_transactions(self.session) # Work around "super is not iterable" issue in Python 3.x next_file = super(GridOutCursor, self).next() return GridOut(self.__root_collection, file_document=next_file, diff --git a/test/test_transactions.py b/test/test_transactions.py index 85345e2748..a114db6e25 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -19,16 +19,21 @@ sys.path[0:0] = [""] +from bson.py3compat import StringIO + from pymongo import client_session, WriteConcern from pymongo.client_session import TransactionOptions from pymongo.errors import (CollectionInvalid, ConfigurationError, ConnectionFailure, + InvalidOperation, OperationFailure) from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference +from gridfs import GridFS, GridFSBucket + from test import unittest, client_context from test.utils import (rs_client, single_client, wait_until, OvertCommandListener, @@ -215,8 +220,7 @@ def test_unpin_for_non_transaction_operation(self): @client_context.require_transactions @client_context.require_version_min(4, 3, 4) def test_create_collection(self): - client = rs_client() - self.addCleanup(client.close) + client = client_context.client db = client.pymongo_test coll = db.test_create_collection self.addCleanup(coll.drop) @@ -241,6 +245,48 @@ def create_and_insert(session): db.create_collection(coll.name, session=s) self.assertEqual(ctx.exception.code, 48) # NamespaceExists + @client_context.require_transactions + def test_gridfs_does_not_support_transactions(self): + client = client_context.client + db = client.pymongo_test + gfs = GridFS(db) + bucket = GridFSBucket(db) + + def gridfs_find(*args, **kwargs): + return gfs.find(*args, **kwargs).next() + + def gridfs_open_upload_stream(*args, **kwargs): + bucket.open_upload_stream(*args, **kwargs).write(b'1') + + gridfs_ops = [ + (gfs.put, (b'123',)), + (gfs.get, (1,)), + (gfs.get_version, ('name',)), + (gfs.get_last_version, ('name',)), + (gfs.delete, (1, )), + (gfs.list, ()), + (gfs.find_one, ()), + (gridfs_find, ()), + (gfs.exists, ()), + (gridfs_open_upload_stream, ('name',)), + (bucket.upload_from_stream, ('name', b'data',)), + (bucket.download_to_stream, (1, StringIO(),)), + (bucket.download_to_stream_by_name, ('name', StringIO(),)), + (bucket.delete, (1,)), + (bucket.find, ()), + (bucket.open_download_stream, (1,)), + (bucket.open_download_stream_by_name, ('name',)), + (bucket.rename, (1, 'new-name',)), + ] + + with client.start_session() as s, s.start_transaction(): + for op, args in gridfs_ops: + with self.assertRaisesRegex( + InvalidOperation, + 'GridFS does not support multi-document transactions', + ): + op(*args, session=s) + class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" From 1c29c1a65e358dcfba58e8582c0d0a51e6c7ce12 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 8 Jul 2020 14:02:34 -0700 Subject: [PATCH 0181/2111] PYTHON-2299 Add the "awaited" field to heartbeat events (#457) --- pymongo/monitor.py | 9 +++--- pymongo/monitoring.py | 55 ++++++++++++++++++++++++--------- test/test_monitoring.py | 4 +-- test/test_streaming_protocol.py | 16 ++++++++++ 4 files changed, 64 insertions(+), 20 deletions(-) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 1c62e0112a..3d8d167b96 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -212,11 +212,13 @@ def _check_server(self): except ReferenceError: raise except Exception as error: - address = self._server_description.address + sd = self._server_description + address = sd.address duration = _time() - start if self._publish: + awaited = sd.is_server_type_known and sd.topology_version self._listeners.publish_server_heartbeat_failed( - address, duration, error) + address, duration, error, awaited) self._reset_connection() if isinstance(error, _OperationCancelled): raise @@ -231,7 +233,6 @@ def _check_once(self): """ address = self._server_description.address if self._publish: - # PYTHON-2299: Add the "awaited" field to heartbeat events. self._listeners.publish_server_heartbeat_started(address) if self._cancel_context and self._cancel_context.cancelled: @@ -246,7 +247,7 @@ def _check_once(self): self._rtt_monitor.average()) if self._publish: self._listeners.publish_server_heartbeat_succeeded( - address, round_trip_time, response) + address, round_trip_time, response, response.awaitable) return sd def _check_with_socket(self, conn): diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 25b0e7c53f..7f90ae1b75 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1099,12 +1099,13 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply') + __slots__ = ('__duration', '__reply', '__awaited') - def __init__(self, duration, reply, *args): - super(ServerHeartbeatSucceededEvent, self).__init__(*args) + def __init__(self, duration, reply, connection_id, awaited=False): + super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply + self.__awaited = awaited @property def duration(self): @@ -1116,10 +1117,20 @@ def reply(self): """An instance of :class:`~pymongo.ismaster.IsMaster`.""" return self.__reply + @property + def awaited(self): + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + """ + return self.__awaited + def __repr__(self): - return "<%s %s duration: %s, reply: %s>" % ( + return "<%s %s duration: %s, awaited: %s, reply: %s>" % ( self.__class__.__name__, self.connection_id, - self.duration, self.reply) + self.duration, self.awaited, self.reply) class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): @@ -1129,12 +1140,13 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply') + __slots__ = ('__duration', '__reply', '__awaited') - def __init__(self, duration, reply, *args): - super(ServerHeartbeatFailedEvent, self).__init__(*args) + def __init__(self, duration, reply, connection_id, awaited=False): + super(ServerHeartbeatFailedEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply + self.__awaited = awaited @property def duration(self): @@ -1146,10 +1158,20 @@ def reply(self): """A subclass of :exc:`Exception`.""" return self.__reply + @property + def awaited(self): + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + """ + return self.__awaited + def __repr__(self): - return "<%s %s duration: %s, reply: %r>" % ( + return "<%s %s duration: %s, awaited: %s, reply: %r>" % ( self.__class__.__name__, self.connection_id, - self.duration, self.reply) + self.duration, self.awaited, self.reply) class _EventListeners(object): @@ -1303,7 +1325,7 @@ def publish_server_heartbeat_started(self, connection_id): _handle_exception() def publish_server_heartbeat_succeeded(self, connection_id, duration, - reply): + reply, awaited): """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. @@ -1312,15 +1334,18 @@ def publish_server_heartbeat_succeeded(self, connection_id, duration, - `duration`: The execution time of the event in the highest possible resolution for the platform. - `reply`: The command reply. + - `awaited`: True if the response was awaited. """ - event = ServerHeartbeatSucceededEvent(duration, reply, connection_id) + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, + awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_server_heartbeat_failed(self, connection_id, duration, reply): + def publish_server_heartbeat_failed(self, connection_id, duration, reply, + awaited): """Publish a ServerHeartbeatFailedEvent to all server heartbeat listeners. @@ -1329,8 +1354,10 @@ def publish_server_heartbeat_failed(self, connection_id, duration, reply): - `duration`: The execution time of the event in the highest possible resolution for the platform. - `reply`: The command reply. + - `awaited`: True if the response was awaited. """ - event = ServerHeartbeatFailedEvent(duration, reply, connection_id) + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, + awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.failed(event) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index ede08c011d..6cff8fb0dc 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1466,13 +1466,13 @@ def test_server_heartbeat_event_repr(self): self.assertEqual( repr(event), "") + "duration: 0.1, awaited: False, reply: {'ok': 1}>") event = monitoring.ServerHeartbeatFailedEvent( delta, 'ERROR', connection_id) self.assertEqual( repr(event), "") + "duration: 0.1, awaited: False, reply: 'ERROR'>") def test_server_event_repr(self): server_address = ('localhost', 27017) diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index e6c64ffa9a..982872f271 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -160,6 +160,14 @@ def hb_started(event): return (isinstance(event, monitoring.ServerHeartbeatStartedEvent) and event.connection_id == address) + def hb_succeeded(event): + return (isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + and event.connection_id == address) + + def hb_failed(event): + return (isinstance(event, monitoring.ServerHeartbeatFailedEvent) + and event.connection_id == address) + hb_started_events = hb_listener.matching(hb_started) # Explanation of the expected heartbeat events: # Time: event @@ -178,6 +186,14 @@ def hb_started(event): # This can be reduced to ~15 after SERVER-49220 is fixed. self.assertLess(len(hb_started_events), 40) + # Check the awaited flag. + hb_succeeded_events = hb_listener.matching(hb_succeeded) + hb_failed_events = hb_listener.matching(hb_failed) + self.assertFalse(hb_succeeded_events[0].awaited) + self.assertTrue(hb_succeeded_events[1].awaited) + self.assertTrue(hb_failed_events[0].awaited) + self.assertFalse(hb_failed_events[1].awaited) + if __name__ == "__main__": unittest.main() From 065001ef2e419cf53b95b91e9fdb5e4e8e75fc5a Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 8 Jul 2020 14:51:17 -0700 Subject: [PATCH 0182/2111] PYTHON-2305 Cache postBatchResumeToken when an aggregate command returns an empty firstBatch (#456) --- doc/changelog.rst | 6 +++++- pymongo/change_stream.py | 17 ++++++++++------- test/test_change_stream.py | 7 +++++-- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 9e1c65e108..b06bc8e111 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,7 +4,8 @@ Changelog Changes in Version 3.11.0b2.dev0 -------------------------------- -Version 3.11 adds support for MongoDB 4.4. Highlights include: +Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. +Highlights include: - Support for :ref:`OCSP` (Online Certificate Status Protocol). - Support for `PyOpenSSL `_ as an @@ -52,6 +53,9 @@ Version 3.11 adds support for MongoDB 4.4. Highlights include: - Deprecated :meth:`pymongo.collection.Collection.reindex`. Use :meth:`~pymongo.database.Database.command` to run the ``reIndex`` command instead. +- Fixed a bug in change streams that could cause PyMongo to miss some change + documents when resuming a stream that was started without a resume token and + whose first batch did not contain any change documents. Unavoidable breaking changes: diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 08b2043def..f742e126c6 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -125,7 +125,7 @@ def _change_stream_options(self): if resume_token is not None: if self._uses_start_after: options['startAfter'] = resume_token - if self._uses_resume_after: + else: options['resumeAfter'] = resume_token if self._start_at_operation_time is not None: @@ -149,17 +149,20 @@ def _aggregation_pipeline(self): return full_pipeline def _process_result(self, result, session, server, sock_info, slave_ok): - """Callback that caches the startAtOperationTime from a changeStream - aggregate command response containing an empty batch of change - documents. + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. This is implemented as a callback because we need access to the wire version in order to determine whether to cache this value. """ if not result['cursor']['firstBatch']: - if (self._start_at_operation_time is None and - self.resume_token is None and - sock_info.max_wire_version >= 7): + if 'postBatchResumeToken' in result['cursor']: + self._resume_token = result['cursor']['postBatchResumeToken'] + elif (self._start_at_operation_time is None and + self._uses_resume_after is False and + self._uses_start_after is False and + sock_info.max_wire_version >= 7): self._start_at_operation_time = result.get("operationTime") # PYTHON-2181: informative error on missing operationTime. if self._start_at_operation_time is None: diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 7c55c6e962..862143a5df 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -127,7 +127,6 @@ def test_watch(self): self.assertEqual([{'$project': {'foo': 0}}], change_stream._pipeline) self.assertEqual('updateLookup', change_stream._full_document) - self.assertIsNone(change_stream.resume_token) self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) @@ -472,8 +471,10 @@ def _get_expected_resume_token(self, stream, listener, listener is a WhiteListEventListener that listens for aggregate and getMore commands.""" if previous_change is None or stream._cursor._has_next(): - return self._get_expected_resume_token_legacy( + token = self._get_expected_resume_token_legacy( stream, listener, previous_change) + if token is not None: + return token response = listener.results['succeeded'][-1].reply return response['cursor']['postBatchResumeToken'] @@ -1061,6 +1062,8 @@ def setFailPoint(self, scenario_dict): fail_point = scenario_dict.get("failPoint") if fail_point is None: return + elif not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") fail_cmd = SON([('configureFailPoint', 'failCommand')]) fail_cmd.update(fail_point) From 426f5fdef7828de443efd82b627d2b3f7099bba5 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 8 Jul 2020 15:45:25 -0700 Subject: [PATCH 0183/2111] PYTHON-2292 Fix failing doctest due to UuidRepresentation (#458) PYTHON-2277 Remove UuidRepresentation DeprecationWarning --- bson/codec_options.py | 6 ------ doc/faq.rst | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index a514cc92d0..82079b71a6 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -252,12 +252,6 @@ def __new__(cls, document_class=dict, if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") if uuid_representation is None: - warnings.warn( - "Starting in PyMongo 4.0, the default uuidRepresentation " - "will be changed to 'unspecified'. Applications will need to " - "explicitly set 'uuidRepresentation=pythonLegacy' in the " - "connection string to preserve current behavior.", - DeprecationWarning, stacklevel=2) uuid_representation = UuidRepresentation.PYTHON_LEGACY elif uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError("uuid_representation must be a value " diff --git a/doc/faq.rst b/doc/faq.rst index 2f72746b40..faa4d803ae 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -237,7 +237,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> opts CodecOptions(document_class=, tz_aware=False, - uuid_representation=PYTHON_LEGACY, + uuid_representation=UuidRepresentation.PYTHON_LEGACY, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) From e3f40c30ade252c06744aa7a0ff444f709a513e8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 8 Jul 2020 17:18:02 -0700 Subject: [PATCH 0184/2111] PYTHON-2165 Deprecate MongoClient is_locked, fsync, and unlock helpers (#459) --- doc/api/pymongo/mongo_client.rst | 4 +-- doc/changelog.rst | 18 ++++++++-- pymongo/mongo_client.py | 60 +++++++++++++++++++++++++++++--- test/test_client.py | 21 +++++++---- test/test_monitoring.py | 14 ++++---- test/test_session.py | 1 + 6 files changed, 96 insertions(+), 22 deletions(-) diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index ed110e8f93..ac774679c1 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -34,7 +34,6 @@ .. autoattribute:: read_preference .. autoattribute:: write_concern .. autoattribute:: read_concern - .. autoattribute:: is_locked .. automethod:: start_session .. automethod:: list_databases .. automethod:: list_database_names @@ -43,9 +42,10 @@ .. automethod:: get_default_database .. automethod:: get_database .. automethod:: server_info + .. automethod:: watch .. automethod:: close_cursor .. automethod:: kill_cursors .. automethod:: set_cursor_manager - .. automethod:: watch + .. autoattribute:: is_locked .. automethod:: fsync .. automethod:: unlock diff --git a/doc/changelog.rst b/doc/changelog.rst index b06bc8e111..20c30fd6ff 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -46,6 +46,12 @@ Highlights include: :class:`~pymongo.read_preferences.SecondaryPreferred`, :class:`~pymongo.read_preferences.Nearest` to support disabling (or explicitly enabling) hedged reads in MongoDB 4.4+. +- Fixed a bug in change streams that could cause PyMongo to miss some change + documents when resuming a stream that was started without a resume token and + whose first batch did not contain any change documents. + +Deprecations: + - Deprecated the ``oplog_replay`` parameter to :meth:`pymongo.collection.Collection.find`. Starting in MongoDB 4.4, the server optimizes queries against the oplog collection without requiring @@ -53,9 +59,15 @@ Highlights include: - Deprecated :meth:`pymongo.collection.Collection.reindex`. Use :meth:`~pymongo.database.Database.command` to run the ``reIndex`` command instead. -- Fixed a bug in change streams that could cause PyMongo to miss some change - documents when resuming a stream that was started without a resume token and - whose first batch did not contain any change documents. +- Deprecated :meth:`pymongo.mongo_client.MongoClient.fsync`. Use + :meth:`~pymongo.database.Database.command` to run the ``fsync`` command + instead. +- Deprecated :meth:`pymongo.mongo_client.MongoClient.unlock`. Use + :meth:`~pymongo.database.Database.command` to run the ``fsyncUnlock`` command + instead. See the documentation for more information. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.is_locked`. Use + :meth:`~pymongo.database.Database.command` to run the ``currentOp`` command + instead. See the documentation for more information. Unavoidable breaking changes: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5d4703c0ef..e5545fb798 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -2083,15 +2083,33 @@ def _database_default_options(self, name): @property def is_locked(self): - """Is this server locked? While locked, all write operations - are blocked, although read operations may still be allowed. + """**DEPRECATED**: Is this server locked? While locked, all write + operations are blocked, although read operations may still be allowed. Use :meth:`unlock` to unlock. + + Deprecated. Users of MongoDB version 3.2 or newer can run the + `currentOp command`_ directly with + :meth:`~pymongo.database.Database.command`:: + + is_locked = client.admin.command('currentOp').get('fsyncLock') + + Users of MongoDB version 2.6 and 3.0 can query the "inprog" virtual + collection:: + + is_locked = client.admin["$cmd.sys.inprog"].find_one().get('fsyncLock') + + .. versionchanged:: 3.11 + Deprecated. + + .. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ """ + warnings.warn("is_locked is deprecated. See the documentation for " + "more information.", DeprecationWarning, stacklevel=2) ops = self._database_default_options('admin')._current_op() return bool(ops.get('fsyncLock', 0)) def fsync(self, **kwargs): - """Flush all pending writes to datafiles. + """**DEPRECATED**: Flush all pending writes to datafiles. Optional parameters can be passed as keyword arguments: - `lock`: If True lock the server to disallow writes. @@ -2106,6 +2124,14 @@ def fsync(self, **kwargs): options = {'async': True} client.fsync(**options) + Deprecated. Run the `fsync command`_ directly with + :meth:`~pymongo.database.Database.command` instead. For example:: + + client.admin.command('fsync', lock=True) + + .. versionchanged:: 3.11 + Deprecated. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2114,20 +2140,46 @@ def fsync(self, **kwargs): .. warning:: MongoDB does not support the `async` option on Windows and will raise an exception on that platform. + + .. _fsync command: https://docs.mongodb.com/manual/reference/command/fsync/ """ + warnings.warn("fsync is deprecated. Use " + "client.admin.command('fsync') instead.", + DeprecationWarning, stacklevel=2) self.admin.command("fsync", read_preference=ReadPreference.PRIMARY, **kwargs) def unlock(self, session=None): - """Unlock a previously locked server. + """**DEPRECATED**: Unlock a previously locked server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + Deprecated. Users of MongoDB version 3.2 or newer can run the + `fsyncUnlock command`_ directly with + :meth:`~pymongo.database.Database.command`:: + + client.admin.command('fsyncUnlock') + + Users of MongoDB version 2.6 and 3.0 can query the "unlock" virtual + collection:: + + client.admin["$cmd.sys.unlock"].find_one() + + .. versionchanged:: 3.11 + Deprecated. + .. versionchanged:: 3.6 Added ``session`` parameter. + + .. _fsyncUnlock command: https://docs.mongodb.com/manual/reference/command/fsyncUnlock/ """ + warnings.warn("unlock is deprecated. Use " + "client.admin.command('fsyncUnlock') instead. For " + "MongoDB 2.6 and 3.0, see the documentation for " + "more information.", + DeprecationWarning, stacklevel=2) cmd = SON([("fsyncUnlock", 1)]) with self._socket_for_writes(session) as sock_info: if sock_info.max_wire_version >= 4: diff --git a/test/test_client.py b/test/test_client.py index 5667282896..39234cdc6d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1120,6 +1120,7 @@ def test_ipv6(self): self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) + @ignore_deprecations @client_context.require_no_mongos def test_fsync_lock_unlock(self): if server_is_master_with_slave(client_context.client): @@ -1143,13 +1144,19 @@ def test_fsync_lock_unlock(self): time.sleep(1) self.assertFalse(locked) - def test_is_locked_does_not_raise_warning(self): - client = rs_or_single_client() - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter("always") - _ = client.is_locked - self.assertFalse( - any(issubclass(w.category, DeprecationWarning) for w in ctx)) + def test_deprecated_methods(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + with self.assertRaisesRegex(DeprecationWarning, + 'is_locked is deprecated'): + _ = self.client.is_locked + if not client_context.is_mongos: + with self.assertRaisesRegex(DeprecationWarning, + 'fsync is deprecated'): + self.client.fsync(lock=True) + with self.assertRaisesRegex(DeprecationWarning, + 'unlock is deprecated'): + self.client.unlock() def test_contextlib(self): client = rs_or_single_client() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 6cff8fb0dc..6b33211159 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -37,6 +37,7 @@ unittest) from test.utils import (EventListener, get_pool, + ignore_deprecations, rs_or_single_client, single_client, wait_until) @@ -1336,12 +1337,13 @@ def test_first_batch_helper(self): self.assertTrue('ok' in succeeded.reply) if not client_context.is_mongos: - self.client.fsync(lock=True) - self.listener.results.clear() - self.client.unlock() - # Wait for async unlock... - wait_until( - lambda: not self.client.is_locked, "unlock the database") + with ignore_deprecations(): + self.client.fsync(lock=True) + self.listener.results.clear() + self.client.unlock() + # Wait for async unlock... + wait_until( + lambda: not self.client.is_locked, "unlock the database") started = results['started'][0] succeeded = results['succeeded'][0] self.assertEqual(0, len(results['failed'])) diff --git a/test/test_session.py b/test/test_session.py index f00f58bd98..1705344da0 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -227,6 +227,7 @@ def test_end_sessions(self): client.close() self.assertEqual(len(listener.results['started']), 0) + @ignore_deprecations # fsync and unlock def test_client(self): client = self.client From 3c1dd61ae9a3e57fb1330c90104017dc09ddc7b3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 8 Jul 2020 17:40:02 -0700 Subject: [PATCH 0185/2111] PYTHON-2256 Document that a ClientSession cannot be used for multiple operations concurrently (#460) --- pymongo/client_session.py | 16 +++++++++++++++- pymongo/mongo_client.py | 5 ++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 7c5db5a42d..dec2f4f918 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -336,7 +336,17 @@ def _within_time_limit(start_time): class ClientSession(object): - """A session for ordering sequential operations.""" + """A session for ordering sequential operations. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`ClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`ClientSession`, call + :meth:`~pymongo.mongo_client.MongoClient.start_session`. + """ def __init__(self, client, server_session, options, authset, implicit): # A MongoClient, a _ServerSession, a SessionOptions, and a set. self._client = client @@ -461,6 +471,10 @@ def callback(session, custom_arg, custom_kwarg=None): however, ``with_transaction`` will return without taking further action. + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + When ``callback`` raises an exception, ``with_transaction`` automatically aborts the current transaction. When ``callback`` or :meth:`~ClientSession.commit_transaction` raises an exception that diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e5545fb798..d434eb8298 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1779,7 +1779,10 @@ def start_session(self, deprecated method :meth:`~pymongo.database.Database.authenticate`. A :class:`~pymongo.client_session.ClientSession` may only be used with - the MongoClient that started it. + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. :Returns: An instance of :class:`~pymongo.client_session.ClientSession`. From 64f77068e197d97943e6fb42bf2789e10c940098 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 8 Jul 2020 17:46:07 -0700 Subject: [PATCH 0186/2111] PYTHON-1438 Mark a server unknown when connection handshake fails with a network timeout error (#461) --- pymongo/mongo_client.py | 7 +++++-- pymongo/topology.py | 7 +++++-- test/test_discovery_and_monitoring.py | 11 ++++++++--- test/test_topology.py | 4 ++-- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index d434eb8298..ed8205b97d 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -2258,7 +2258,7 @@ def _add_retryable_write_error(exc, max_wire_version): class _MongoClientErrorHandler(object): """Handle errors raised when executing an operation.""" __slots__ = ('client', 'server_address', 'session', 'max_wire_version', - 'sock_generation') + 'sock_generation', 'completed_handshake') def __init__(self, client, server, session): self.client = client @@ -2270,11 +2270,13 @@ def __init__(self, client, server, session): # completes then the error's generation number is the generation # of the pool at the time the connection attempt was started." self.sock_generation = server.pool.generation + self.completed_handshake = False def contribute_socket(self, sock_info): """Provide socket information to the error handler.""" self.max_wire_version = sock_info.max_wire_version self.sock_generation = sock_info.generation + self.completed_handshake = True def __enter__(self): return self @@ -2295,5 +2297,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.session._unpin_mongos() err_ctx = _ErrorContext( - exc_val, self.max_wire_version, self.sock_generation) + exc_val, self.max_wire_version, self.sock_generation, + self.completed_handshake) self.client._topology.handle_error(self.server_address, err_ctx) diff --git a/pymongo/topology.py b/pymongo/topology.py index 252db05f38..749299bb74 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -567,7 +567,8 @@ def _handle_error(self, address, err_ctx): server = self._servers[address] error = err_ctx.error exc_type = type(error) - if issubclass(exc_type, NetworkTimeout): + if (issubclass(exc_type, NetworkTimeout) and + err_ctx.completed_handshake): # The socket has been closed. Don't reset the server. # Server Discovery And Monitoring Spec: "When an application # operation fails because of any network error besides a socket @@ -750,10 +751,12 @@ def __repr__(self): class _ErrorContext(object): """An error with context for SDAM error handling.""" - def __init__(self, error, max_wire_version, sock_generation): + def __init__(self, error, max_wire_version, sock_generation, + completed_handshake): self.error = error self.max_wire_version = max_wire_version self.sock_generation = sock_generation + self.completed_handshake = completed_handshake def _is_stale_error_topology_version(current_tv, error_tv): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index ef97bcc67a..bc05ca78bf 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -119,11 +119,16 @@ def got_app_error(topology, app_error): raise AssertionError('unknown error type: %s' % (error_type,)) assert False except (AutoReconnect, NotMasterError, OperationFailure) as e: - if when == 'beforeHandshakeCompletes' and error_type == 'timeout': - raise unittest.SkipTest('PYTHON-2211') + if when == 'beforeHandshakeCompletes': + completed_handshake = False + elif when == 'afterHandshakeCompletes': + completed_handshake = True + else: + assert False, 'Unknown when field %s' % (when,) topology.handle_error( - server_address, _ErrorContext(e, max_wire_version, generation)) + server_address, _ErrorContext(e, max_wire_version, generation, + completed_handshake)) def get_type(topology, hostname): diff --git a/test/test_topology.py b/test/test_topology.py index dba7eeef15..c593c06382 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -419,7 +419,7 @@ def test_handle_error(self): 'setName': 'rs', 'hosts': ['a', 'b']}) - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0) + errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True) t.handle_error(('a', 27017), errctx) self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) @@ -480,7 +480,7 @@ def test_handle_error_removed_server(self): t = create_mock_topology(replica_set_name='rs') # No error resetting a server not in the TopologyDescription. - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0) + errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True) t.handle_error(('b', 27017), errctx) # Server was *not* added as type Unknown. From f80c82453b51d45f2ce7f9798086809f489e53b9 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 9 Jul 2020 21:32:25 -0400 Subject: [PATCH 0187/2111] PYTHON-2076: Add example event loggers (#449) Added example event loggers and documentation for them to make it easier for users to start using listeners in their code. JIRA ticket: https://jira.mongodb.org/browse/PYTHON-2076 --- doc/api/pymongo/event_loggers.rst | 7 + doc/api/pymongo/index.rst | 1 + pymongo/event_loggers.py | 207 ++++++++++++++++++++++++++++++ pymongo/monitoring.py | 4 + 4 files changed, 219 insertions(+) create mode 100644 doc/api/pymongo/event_loggers.rst create mode 100644 pymongo/event_loggers.py diff --git a/doc/api/pymongo/event_loggers.rst b/doc/api/pymongo/event_loggers.rst new file mode 100644 index 0000000000..f79bfb2345 --- /dev/null +++ b/doc/api/pymongo/event_loggers.rst @@ -0,0 +1,7 @@ +:mod:`event_loggers` -- Example loggers +=========================================== + + +.. automodule:: pymongo.event_loggers + :synopsis: A collection of simple listeners for monitoring driver events. + :members: \ No newline at end of file diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 1b6dedfa8d..9fc90a1e1b 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -56,3 +56,4 @@ Sub-modules: son_manipulator uri_parser write_concern + event_loggers diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py new file mode 100644 index 0000000000..5019ea5489 --- /dev/null +++ b/pymongo/event_loggers.py @@ -0,0 +1,207 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Example event logger classes. + +.. versionadded:: 3.11 + +These loggers can be registered using :func:`register` or +:class:`~pymongo.mongo_client.MongoClient`. + +``monitoring.register(CommandLogger())`` + +or + +``MongoClient(event_listeners=[CommandLogger()])`` +""" + + +import logging + +from pymongo import monitoring + + +class CommandLogger(monitoring.CommandListener): + """A simple listener that logs command events. + + Listens for :class:`~pymongo.monitoring.CommandStartedEvent`, + :class:`~pymongo.monitoring.CommandSucceededEvent` and + :class:`~pymongo.monitoring.CommandFailedEvent` events and + logs them at the `INFO` severity level using :mod:`logging`. + .. versionadded:: 3.11 + """ + def started(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event)) + + def failed(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event)) + + +class ServerLogger(monitoring.ServerListener): + """A simple listener that logs server discovery events. + + Listens for :class:`~pymongo.monitoring.ServerOpeningEvent`, + :class:`~pymongo.monitoring.ServerDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.ServerClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + def opened(self, event): + logging.info("Server {0.server_address} added to topology " + "{0.topology_id}".format(event)) + + def description_changed(self, event): + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + "Server {0.server_address} changed type from " + "{0.previous_description.server_type_name} to " + "{0.new_description.server_type_name}".format(event)) + + def closed(self, event): + logging.warning("Server {0.server_address} removed from topology " + "{0.topology_id}".format(event)) + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """A simple listener that logs server heartbeat events. + + Listens for :class:`~pymongo.monitoring.ServerHeartbeatStartedEvent`, + :class:`~pymongo.monitoring.ServerHeartbeatSucceededEvent`, + and :class:`~pymongo.monitoring.ServerHeartbeatFailedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + def started(self, event): + logging.info("Heartbeat sent to server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + # The reply.document attribute was added in PyMongo 3.4. + logging.info("Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event)) + + def failed(self, event): + logging.warning("Heartbeat to server {0.connection_id} " + "failed with error {0.reply}".format(event)) + + +class TopologyLogger(monitoring.TopologyListener): + """A simple listener that logs server topology events. + + Listens for :class:`~pymongo.monitoring.TopologyOpenedEvent`, + :class:`~pymongo.monitoring.TopologyDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.TopologyClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + def opened(self, event): + logging.info("Topology with id {0.topology_id} " + "opened".format(event)) + + def description_changed(self, event): + logging.info("Topology description updated for " + "topology id {0.topology_id}".format(event)) + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + "Topology {0.topology_id} changed type from " + "{0.previous_description.topology_type_name} to " + "{0.new_description.topology_type_name}".format(event)) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event): + logging.info("Topology with id {0.topology_id} " + "closed".format(event)) + + +class ConnectionPoolLogger(monitoring.ConnectionPoolListener): + """A simple listener that logs server connection pool events. + + Listens for :class:`~pymongo.monitoring.PoolCreatedEvent`, + :class:`~pymongo.monitoring.PoolClearedEvent`, + :class:`~pymongo.monitoring.PoolClosedEvent`, + :~pymongo.monitoring.class:`ConnectionCreatedEvent`, + :class:`~pymongo.monitoring.ConnectionReadyEvent`, + :class:`~pymongo.monitoring.ConnectionClosedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutStartedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutFailedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckedOutEvent`, + and :class:`~pymongo.monitoring.ConnectionCheckedInEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + def pool_created(self, event): + logging.info("[pool {0.address}] pool created".format(event)) + + def pool_cleared(self, event): + logging.info("[pool {0.address}] pool cleared".format(event)) + + def pool_closed(self, event): + logging.info("[pool {0.address}] pool closed".format(event)) + + def connection_created(self, event): + logging.info("[pool {0.address}][conn #{0.connection_id}] " + "connection created".format(event)) + + def connection_ready(self, event): + logging.info("[pool {0.address}][conn #{0.connection_id}] " + "connection setup succeeded".format(event)) + + def connection_closed(self, event): + logging.info("[pool {0.address}][conn #{0.connection_id}] " + "connection closed, reason: " + "{0.reason}".format(event)) + + def connection_check_out_started(self, event): + logging.info("[pool {0.address}] connection check out " + "started".format(event)) + + def connection_check_out_failed(self, event): + logging.info("[pool {0.address}] connection check out " + "failed, reason: {0.reason}".format(event)) + + def connection_checked_out(self, event): + logging.info("[pool {0.address}][conn #{0.connection_id}] " + "connection checked out of pool".format(event)) + + def connection_checked_in(self, event): + logging.info("[pool {0.address}][conn #{0.connection_id}] " + "connection checked into pool".format(event)) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 7f90ae1b75..462b22b334 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -16,6 +16,10 @@ .. versionadded:: 3.1 +.. attention:: Starting in PyMongo 3.11, the monitoring classes outlined below + are included in the PyMongo distribution under the + :mod:`~pymongo.event_loggers` submodule. + Use :func:`register` to register global listeners for specific events. Listeners must inherit from one of the abstract classes below and implement the correct functions for that class. From f24e1653361576ddc23739715543e3784b5feddb Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 13 Jul 2020 11:03:02 -0700 Subject: [PATCH 0188/2111] PYTHON-2263 Respect UuidRepresentation.UNSPECIFIED when parsing $uuid fields in extended JSON (#464) --- bson/json_util.py | 9 ++++++--- test/test_json_util.py | 6 +++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index 14c364e7fb..7b789b0f30 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -449,7 +449,7 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): if "$code" in dct: return _parse_canonical_code(dct) if "$uuid" in dct: - return _parse_legacy_uuid(dct) + return _parse_legacy_uuid(dct, json_options) if "$undefined" in dct: return None if "$numberLong" in dct: @@ -484,11 +484,14 @@ def _parse_legacy_regex(doc): return Regex(pattern, flags) -def _parse_legacy_uuid(doc): +def _parse_legacy_uuid(doc, json_options): """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) - return uuid.UUID(doc["$uuid"]) + if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: + return Binary.from_uuid(uuid.UUID(doc["$uuid"])) + else: + return uuid.UUID(doc["$uuid"]) def _binary_or_uuid(data, subtype, json_options): diff --git a/test/test_json_util.py b/test/test_json_util.py index 6499818b2f..e8b64a16d1 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -301,7 +301,11 @@ def test_uuid_uuid_rep_unspecified(self): ext_json_str = json_util.dumps(doc) self.assertEqual( doc, json_util.loads(ext_json_str, json_options=options)) - + # $uuid-encoded fields + doc = {'uuid': Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps({'uuid': _uuid}) + self.assertEqual( + doc, json_util.loads(ext_json_str, json_options=options)) def test_binary(self): if PY3: From 05267fb46578640c6479928d13a07de4b83bcbf7 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 13 Jul 2020 11:03:29 -0700 Subject: [PATCH 0189/2111] PYTHON-2315 Special case resuming change streams from CursorNotFound errors (#463) --- .../change-streams-resume-whitelist.json | 1749 +++++++++++++++++ 1 file changed, 1749 insertions(+) create mode 100644 test/change_streams/change-streams-resume-whitelist.json diff --git a/test/change_streams/change-streams-resume-whitelist.json b/test/change_streams/change-streams-resume-whitelist.json new file mode 100644 index 0000000000..39f883ee5e --- /dev/null +++ b/test/change_streams/change-streams-resume-whitelist.json @@ -0,0 +1,1749 @@ +{ + "collection_name": "test", + "database_name": "change-stream-tests", + "tests": [ + { + "description": "change stream resumes after a network error", + "minServerVersion": "4.2", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after HostUnreachable", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after HostNotFound", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NetworkTimeout", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 89, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after ShutdownInProgress", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 91, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 189, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 262, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after SocketException", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 9001, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NotMaster", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 10107, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11600, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11602, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NotMasterNoSlaveOk", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13435, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NotMasterOrSecondary", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13436, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after StaleShardVersion", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 63, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after StaleEpoch", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 150, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after RetryChangeStream", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 234, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 133, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after CursorNotFound", + "minServerVersion": "4.2", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 43, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + } + ] +} From b6bf4f01576c087f187263357d5cb19b0ad03606 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 15 Jul 2020 08:55:42 -0700 Subject: [PATCH 0190/2111] PYTHON-2320 Use select instead of poll on Jython (#466) --- pymongo/socket_checker.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 672159c245..886782b78b 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -16,8 +16,11 @@ import errno import select +import sys -_HAVE_POLL = hasattr(select, "poll") +# PYTHON-2320: Jython does not fully support poll on SSL sockets, +# https://bugs.jython.org/issue2900 +_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith('java') _SelectError = getattr(select, "error", OSError) From 63574b93603cda2c87fd99f1b90033970380b72b Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 15 Jul 2020 11:03:59 -0700 Subject: [PATCH 0191/2111] BUMP 3.11.0rc0 --- doc/changelog.rst | 4 ++-- doc/installation.rst | 6 +----- pymongo/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 20c30fd6ff..8698688473 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 3.11.0b2.dev0 --------------------------------- +Changes in Version 3.11.0rc0 +---------------------------- Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. Highlights include: diff --git a/doc/installation.rst b/doc/installation.rst index f349917405..f9f01a671b 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -275,8 +275,4 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b1.tar.gz - -or easy_install:: - - $ python -m easy_install https://github.com/mongodb/mongo-python-driver/archive/3.11.0b1.tar.gz + $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0rc0.tar.gz diff --git a/pymongo/__init__.py b/pymongo/__init__.py index b87668cd65..c771a09c82 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, 'b2.dev0') +version_tuple = (3, 11, 0, 'rc0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 7379b260ee..53c8e43481 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0b2.dev0" +version = "3.11.0rc0" f = open("README.rst") try: From 9b69338ef42d0c80210ae751780ae720e64aa768 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 15 Jul 2020 11:07:23 -0700 Subject: [PATCH 0192/2111] BUMP 3.11.0rc1.dev0 --- doc/changelog.rst | 4 ++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8698688473..c78cefb7f6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 3.11.0rc0 ----------------------------- +Changes in Version 3.11.0rc1.dev0 +--------------------------------- Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. Highlights include: diff --git a/pymongo/__init__.py b/pymongo/__init__.py index c771a09c82..066eaea09a 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, 'rc0') +version_tuple = (3, 11, 0, 'rc1.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 53c8e43481..528f8183c2 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0rc0" +version = "3.11.0rc1.dev0" f = open("README.rst") try: From 98f8470b5b79049949b5636e0882a3a40c753f70 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 16 Jul 2020 18:18:19 -0700 Subject: [PATCH 0193/2111] PYTHON-2306 Test that change streams don't resume from ElectionInProgress (#465) --- .../change_streams/change-streams-errors.json | 14 +- .../change-streams-resume-errorLabels.json | 1634 +++++++++++++++++ test/test_change_stream.py | 7 +- 3 files changed, 1642 insertions(+), 13 deletions(-) create mode 100644 test/change_streams/change-streams-resume-errorLabels.json diff --git a/test/change_streams/change-streams-errors.json b/test/change_streams/change-streams-errors.json index 7b7cea30a4..19cbc74288 100644 --- a/test/change_streams/change-streams-errors.json +++ b/test/change_streams/change-streams-errors.json @@ -107,7 +107,7 @@ } }, { - "description": "change stream errors on MaxTimeMSExpired", + "description": "change stream errors on ElectionInProgress", "minServerVersion": "4.2", "failPoint": { "configureFailPoint": "failCommand", @@ -118,7 +118,7 @@ "failCommands": [ "getMore" ], - "errorCode": 50, + "errorCode": 216, "closeConnection": false } }, @@ -127,13 +127,7 @@ "replicaset", "sharded" ], - "changeStreamPipeline": [ - { - "$project": { - "_id": 0 - } - } - ], + "changeStreamPipeline": [], "changeStreamOptions": {}, "operations": [ { @@ -149,7 +143,7 @@ ], "result": { "error": { - "code": 50 + "code": 216 } } } diff --git a/test/change_streams/change-streams-resume-errorLabels.json b/test/change_streams/change-streams-resume-errorLabels.json new file mode 100644 index 0000000000..cf8957b21f --- /dev/null +++ b/test/change_streams/change-streams-resume-errorLabels.json @@ -0,0 +1,1634 @@ +{ + "collection_name": "test", + "database_name": "change-stream-tests", + "tests": [ + { + "description": "change stream resumes after HostUnreachable", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 6, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after HostNotFound", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 7, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NetworkTimeout", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 89, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after ShutdownInProgress", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 91, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 189, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 262, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after SocketException", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 9001, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NotMaster", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 10107, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11600, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11602, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NotMasterNoSlaveOk", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13435, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after NotMasterOrSecondary", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13436, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after StaleShardVersion", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 63, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after StaleEpoch", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 150, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after RetryChangeStream", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 234, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 133, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream resumes if error contains ResumableChangeStreamError", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 50, + "closeConnection": false, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "getMore": 42, + "collection": "test" + }, + "command_name": "getMore", + "database_name": "change-stream-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "test", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "command_name": "aggregate", + "database_name": "change-stream-tests" + } + } + ], + "result": { + "success": [ + { + "_id": "42", + "documentKey": "42", + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "x": { + "$numberInt": "1" + } + } + } + ] + } + }, + { + "description": "change stream does not resume if error does not contain ResumableChangeStreamError", + "minServerVersion": "4.3.1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + }, + "target": "collection", + "topology": [ + "replicaset", + "sharded" + ], + "changeStreamPipeline": [], + "changeStreamOptions": {}, + "operations": [ + { + "database": "change-stream-tests", + "collection": "test", + "name": "insertOne", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "result": { + "error": { + "code": 6 + } + } + } + ] +} diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 862143a5df..231b9101eb 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1194,9 +1194,10 @@ def run_scenario(self): else: # Check for expected output from change streams - for change, expected_changes in zip(changes, test["result"]["success"]): - self.assert_dict_is_subset(change, expected_changes) - self.assertEqual(len(changes), len(test["result"]["success"])) + if test["result"].get("success"): + for change, expected_changes in zip(changes, test["result"]["success"]): + self.assert_dict_is_subset(change, expected_changes) + self.assertEqual(len(changes), len(test["result"]["success"])) finally: # Check for expected events From 7a539f227a9524b27ef469826ef9ee5bd4533773 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 22 Jul 2020 11:49:57 -0700 Subject: [PATCH 0194/2111] PYTHON-2294 Resync SDAM spec tests to workaround slow elections Windows and macOS (#468) PYTHON-2296 Test behavior of connectTimeoutMS=0 with streaming protocol PYTHON-2311 Workaround inherent race in flaky streaming protocol test --- pymongo/common.py | 14 +- pymongo/mongo_client.py | 8 +- .../connectTimeoutMS.json | 148 ++++++++++++++++++ .../rediscover-quickly-after-step-down.json | 6 +- test/test_client.py | 22 ++- test/test_session.py | 1 + test/test_streaming_protocol.py | 52 ++++-- 7 files changed, 230 insertions(+), 21 deletions(-) create mode 100644 test/discovery_and_monitoring_integration/connectTimeoutMS.json diff --git a/pymongo/common.py b/pymongo/common.py index 68e1af7a41..cab9d526ce 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -333,6 +333,16 @@ def validate_timeout_or_zero(option, value): return validate_positive_float(option, value) / 1000.0 +def validate_timeout_or_none_or_zero(option, value): + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. value=0 and value="0" are treated the + same as value=None which means unlimited timeout. + """ + if value is None or value == 0 or value == "0": + return None + return validate_positive_float(option, value) / 1000.0 + + def validate_max_staleness(option, value): """Validates maxStalenessSeconds according to the Max Staleness Spec.""" if value == -1 or value == "-1": @@ -593,7 +603,7 @@ def validate_tzinfo(dummy, value): 'authmechanismproperties': validate_auth_mechanism_properties, 'authsource': validate_string, 'compressors': validate_compressors, - 'connecttimeoutms': validate_timeout_or_none, + 'connecttimeoutms': validate_timeout_or_none_or_zero, 'directconnection': validate_boolean_or_string, 'heartbeatfrequencyms': validate_timeout_or_none, 'journal': validate_boolean_or_string, @@ -608,7 +618,7 @@ def validate_tzinfo(dummy, value): 'retryreads': validate_boolean_or_string, 'retrywrites': validate_boolean_or_string, 'serverselectiontimeoutms': validate_timeout_or_zero, - 'sockettimeoutms': validate_timeout_or_none, + 'sockettimeoutms': validate_timeout_or_none_or_zero, 'ssl_keyfile': validate_readable, 'tls': validate_boolean_or_string, 'tlsallowinvalidcertificates': validate_allow_invalid_certs, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ed8205b97d..ac9c73160c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -227,11 +227,13 @@ def __init__( - `socketTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait for a response after sending an ordinary (non-monitoring) database operation before concluding that - a network error has occurred. Defaults to ``None`` (no timeout). + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). - `connectTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait during server monitoring when connecting a new socket to a server before concluding the server - is unavailable. Defaults to ``20000`` (20 seconds). + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). - `server_selector`: (callable or None) Optional, user-provided function that augments server selection rules. The function should accept as an argument a list of @@ -631,7 +633,7 @@ def __init__( # Determine connection timeout from kwargs. timeout = keyword_opts.get("connecttimeoutms") if timeout is not None: - timeout = common.validate_timeout_or_none( + timeout = common.validate_timeout_or_none_or_zero( keyword_opts.cased_key("connecttimeoutms"), timeout) res = uri_parser.parse_uri( entity, port, validate=True, warn=True, normalize=False, diff --git a/test/discovery_and_monitoring_integration/connectTimeoutMS.json b/test/discovery_and_monitoring_integration/connectTimeoutMS.json new file mode 100644 index 0000000000..1192b6b9aa --- /dev/null +++ b/test/discovery_and_monitoring_integration/connectTimeoutMS.json @@ -0,0 +1,148 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "connectTimeoutMS", + "data": [], + "tests": [ + { + "description": "connectTimeoutMS=0", + "clientOptions": { + "retryWrites": false, + "connectTimeoutMS": 0, + "heartbeatFrequencyMS": 500, + "appname": "connectTimeoutMS=0" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "connectTimeoutMS=0", + "blockConnection": true, + "blockTimeMS": 550 + } + } + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 750 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json index 2d0a998a69..41fbdc695c 100644 --- a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json +++ b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json @@ -64,8 +64,8 @@ "command_name": "replSetStepDown", "arguments": { "command": { - "replSetStepDown": 20, - "secondaryCatchUpPeriodSecs": 20, + "replSetStepDown": 30, + "secondaryCatchUpPeriodSecs": 30, "force": false } } @@ -74,7 +74,7 @@ "name": "waitForPrimaryChange", "object": "testRunner", "arguments": { - "timeoutMS": 5000 + "timeoutMS": 15000 } }, { diff --git a/test/test_client.py b/test/test_client.py index 39234cdc6d..5333fc35ff 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -134,6 +134,24 @@ def test_keyword_arg_defaults(self): self.assertEqual(ReadPreference.PRIMARY, client.read_preference) self.assertAlmostEqual(12, client.server_selection_timeout) + def test_connect_timeout(self): + client = MongoClient(connect=False, connectTimeoutMS=None, + socketTimeoutMS=None) + pool_opts = client._MongoClient__options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + client = MongoClient(connect=False, connectTimeoutMS=0, + socketTimeoutMS=0) + pool_opts = client._MongoClient__options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + client = MongoClient( + 'mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0', + connect=False) + pool_opts = client._MongoClient__options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + def test_types(self): self.assertRaises(TypeError, MongoClient, 1) self.assertRaises(TypeError, MongoClient, 1.14) @@ -996,8 +1014,8 @@ def test_socket_timeout_ms_validation(self): c = connected(rs_or_single_client(socketTimeoutMS=None)) self.assertEqual(None, get_pool(c).opts.socket_timeout) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=0) + c = connected(rs_or_single_client(socketTimeoutMS=0)) + self.assertEqual(None, get_pool(c).opts.socket_timeout) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) diff --git a/test/test_session.py b/test/test_session.py index 1705344da0..50dfd8a060 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -1249,6 +1249,7 @@ def test_cluster_time(self): # Prevent heartbeats from updating $clusterTime between operations. client = rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) + self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 982872f271..c94179a456 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -26,6 +26,7 @@ unittest) from test.utils import (HeartbeatEventListener, rs_or_single_client, + single_client, ServerEventListener, wait_until) @@ -160,14 +161,6 @@ def hb_started(event): return (isinstance(event, monitoring.ServerHeartbeatStartedEvent) and event.connection_id == address) - def hb_succeeded(event): - return (isinstance(event, monitoring.ServerHeartbeatSucceededEvent) - and event.connection_id == address) - - def hb_failed(event): - return (isinstance(event, monitoring.ServerHeartbeatFailedEvent) - and event.connection_id == address) - hb_started_events = hb_listener.matching(hb_started) # Explanation of the expected heartbeat events: # Time: event @@ -186,13 +179,50 @@ def hb_failed(event): # This can be reduced to ~15 after SERVER-49220 is fixed. self.assertLess(len(hb_started_events), 40) - # Check the awaited flag. + @client_context.require_failCommand_appName + def test_heartbeat_awaited_flag(self): + hb_listener = HeartbeatEventListener() + client = single_client( + event_listeners=[hb_listener], heartbeatFrequencyMS=500, + appName='heartbeatEventAwaitedFlag') + self.addCleanup(client.close) + # Force a connection. + client.admin.command('ping') + + def hb_succeeded(event): + return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + + def hb_failed(event): + return isinstance(event, monitoring.ServerHeartbeatFailedEvent) + + fail_heartbeat = { + 'mode': {'times': 2}, + 'data': { + 'failCommands': ['isMaster'], + 'closeConnection': True, + 'appName': 'heartbeatEventAwaitedFlag', + }, + } + with self.fail_point(fail_heartbeat): + wait_until(lambda: hb_listener.matching(hb_failed), + "published failed event") + # Reconnect. + client.admin.command('ping') + hb_succeeded_events = hb_listener.matching(hb_succeeded) hb_failed_events = hb_listener.matching(hb_failed) self.assertFalse(hb_succeeded_events[0].awaited) - self.assertTrue(hb_succeeded_events[1].awaited) self.assertTrue(hb_failed_events[0].awaited) - self.assertFalse(hb_failed_events[1].awaited) + # Depending on thread scheduling, the failed heartbeat could occur on + # the second or third check. + events = [type(e) for e in hb_listener.results[:4]] + if events == [monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent]: + self.assertFalse(hb_succeeded_events[1].awaited) + else: + self.assertTrue(hb_succeeded_events[1].awaited) if __name__ == "__main__": From c16b5b95a15d04c23d2a5c253d83f0405e476070 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 27 Jul 2020 11:27:08 -0700 Subject: [PATCH 0195/2111] PYTHON-2331 Fix set_memory_error complier warning (#469) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bson/buffer.c:36:13: warning: function declaration isn’t a prototype [-Wstrict-prototypes] 36 | static void set_memory_error() { | ^~~~~~~~~~~~~~~~ --- bson/buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bson/buffer.c b/bson/buffer.c index 0b1941cb57..bb92ab3ee5 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -33,7 +33,7 @@ struct buffer { /* Set Python's error indicator to MemoryError. * Called after allocation failures. */ -static void set_memory_error() { +static void set_memory_error(void) { PyErr_NoMemory(); } From b04e3343cb4aa0cf20b4f9a3209b255dc5d47bbf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 27 Jul 2020 13:27:05 -0700 Subject: [PATCH 0196/2111] PYTHON-2328 Reset the connection pool in Topology.on_change (#470) PYTHON-2304 Ensure _RttMonitor closes socket on when the client is closed --- pymongo/monitor.py | 16 +++++++++------- pymongo/topology.py | 22 +++++++++++----------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 3d8d167b96..81502591bf 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -177,7 +177,10 @@ def _run(self): # discover that we've been cancelled. self._executor.skip_sleep() return - self._topology.on_change(self._server_description) + + # Update the Topology and clear the server pool on error. + self._topology.on_change(self._server_description, + reset_pool=self._server_description.error) if (self._server_description.is_server_type_known and self._server_description.topology_version): @@ -185,12 +188,9 @@ def _run(self): # Immediately check for the next streaming response. self._executor.skip_sleep() - if self._server_description.error: - # Reset the server pool only after marking the server Unknown. - self._topology.reset_pool(self._server_description.address) - if prev_sd.is_server_type_known: - # Immediately retry on network errors. - self._executor.skip_sleep() + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() except ReferenceError: # Topology was garbage-collected. self.close() @@ -377,6 +377,8 @@ def _run(self): def _ping(self): """Run an "isMaster" command and return the RTT.""" with self._pool.get_socket({}) as sock_info: + if self._executor._stopped: + raise Exception('_RttMonitor closed') start = _time() sock_info.ismaster() return _time() - start diff --git a/pymongo/topology.py b/pymongo/topology.py index 749299bb74..eb84a344e0 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -265,7 +265,7 @@ def select_server_by_address(self, address, server_selection_timeout, address) - def _process_change(self, server_description): + def _process_change(self, server_description, reset_pool=False): """Process a new ServerDescription on an opened topology. Hold the lock when calling this. @@ -303,10 +303,16 @@ def _process_change(self, server_description): SRV_POLLING_TOPOLOGIES): self._srv_monitor.close() + # Clear the pool from a failed heartbeat. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + server.pool.reset() + # Wake waiters in select_servers(). self._condition.notify_all() - def on_change(self, server_description): + def on_change(self, server_description, reset_pool=False): """Process a new ServerDescription after an ismaster call completes.""" # We do no I/O holding the lock. with self._lock: @@ -320,7 +326,7 @@ def on_change(self, server_description): # that didn't include this server. if (self._opened and self._description.has_server(server_description.address)): - self._process_change(server_description) + self._process_change(server_description, reset_pool) def _process_srv_update(self, seedlist): """Process a new seedlist on an opened topology. @@ -414,20 +420,14 @@ def request_check_all(self, wait_time=5): self._request_check_all() self._condition.wait(wait_time) - def reset_pool(self, address): - with self._lock: - server = self._servers.get(address) - if server: - server.pool.reset() - def handle_getlasterror(self, address, error_msg): """Clear our pool for a server, mark it Unknown, and check it soon.""" error = NotMasterError(error_msg, {'code': 10107, 'errmsg': error_msg}) with self._lock: server = self._servers.get(address) if server: - self._process_change(ServerDescription(address, error=error)) - server.pool.reset() + self._process_change( + ServerDescription(address, error=error), True) server.request_check() def update_pool(self, all_credentials): From c92150d1771ba0f9be4299db251e695b437ff0fd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 27 Jul 2020 17:20:43 -0700 Subject: [PATCH 0197/2111] PYTHON-1631 Automate release wheels for Windows and manylinux (#473) --- .evergreen/build-mac.sh | 12 +++++++ .evergreen/build-manylinux-internal.sh | 25 ++++++++++++++ .evergreen/build-manylinux.sh | 40 ++++++++++++++++++++++ .evergreen/build-windows.sh | 18 ++++++++++ .evergreen/config.yml | 47 ++++++++++++++++++++++++-- .evergreen/release.sh | 9 +++++ 6 files changed, 149 insertions(+), 2 deletions(-) create mode 100755 .evergreen/build-mac.sh create mode 100755 .evergreen/build-manylinux-internal.sh create mode 100755 .evergreen/build-manylinux.sh create mode 100755 .evergreen/build-windows.sh create mode 100755 .evergreen/release.sh diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh new file mode 100755 index 0000000000..95be59c340 --- /dev/null +++ b/.evergreen/build-mac.sh @@ -0,0 +1,12 @@ +#!/bin/bash -ex + +for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do + if [[ $VERSION == "2.7" ]]; then + rm -rf build + python$VERSION setup.py bdist_egg + fi + rm -rf build + python$VERSION setup.py bdist_wheel +done + +ls dist diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh new file mode 100755 index 0000000000..c810700a29 --- /dev/null +++ b/.evergreen/build-manylinux-internal.sh @@ -0,0 +1,25 @@ +#!/bin/bash -ex +cd /pymongo + +# Compile wheels +for PYBIN in /opt/python/*/bin; do + # Skip Python 3.3 and 3.9. + if [[ "$PYBIN" == *"cp33"* || "$PYBIN" == *"cp39"* ]]; then + continue + fi + # https://github.com/pypa/manylinux/issues/49 + rm -rf build + ${PYBIN}/python setup.py bdist_wheel +done + +# https://github.com/pypa/manylinux/issues/49 +rm -rf build + +# Audit wheels and write multilinux1 tag +for whl in dist/*.whl; do + # Skip already built manylinux1 wheels. + if [[ "$whl" != *"manylinux"* ]]; then + auditwheel repair $whl -w dist + rm $whl + fi +done diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh new file mode 100755 index 0000000000..16b05dbea4 --- /dev/null +++ b/.evergreen/build-manylinux.sh @@ -0,0 +1,40 @@ +#!/bin/bash -ex + +docker version + +# 2020-03-20-2fda31c Was the last release to include Python 3.4. +images=(quay.io/pypa/manylinux1_x86_64:2020-03-20-2fda31c \ + quay.io/pypa/manylinux1_i686:2020-03-20-2fda31c \ + quay.io/pypa/manylinux1_x86_64 \ + quay.io/pypa/manylinux1_i686 \ + quay.io/pypa/manylinux2014_x86_64 \ + quay.io/pypa/manylinux2014_i686) +# aarch64/ppc64le/s390x work on macOS locally but not on linux in evergreen: +# [2020/07/23 00:24:00.482] + docker run --rm -v /data/mci/cd100cec6341abda533450fb3f2fab99/src:/pymongo quay.io/pypa/manylinux2014_aarch64 /pymongo/.evergreen/build-manylinux-internal.sh +# [2020/07/23 00:24:01.186] standard_init_linux.go:211: exec user process caused "exec format error" +# +# Could be related to: +# https://github.com/pypa/manylinux/issues/410 +# quay.io/pypa/manylinux2014_aarch64 \ +# quay.io/pypa/manylinux2014_ppc64le \ +# quay.io/pypa/manylinux2014_s390x) + +for image in "${images[@]}"; do + docker pull $image + docker run --rm -v `pwd`:/pymongo $image /pymongo/.evergreen/build-manylinux-internal.sh +done + +ls dist + +# Check for any unexpected files. +unexpected=$(find dist \! \( -iname dist -or \ + -iname '*cp27*' -or \ + -iname '*cp34*' -or \ + -iname '*cp35*' -or \ + -iname '*cp36*' -or \ + -iname '*cp37*' -or \ + -iname '*cp38*' \)) +if [ -n "$unexpected" ]; then + echo "Unexpected files:" $unexpected + exit 1 +fi diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh new file mode 100755 index 0000000000..a34506f551 --- /dev/null +++ b/.evergreen/build-windows.sh @@ -0,0 +1,18 @@ +#!/bin/bash -ex + +for VERSION in 27 34 35 36 37 38; do + PYTHON=C:/Python/Python${VERSION}/python.exe + PYTHON32=C:/Python/32/Python${VERSION}/python.exe + if [[ $VERSION == "2.7" ]]; then + rm -rf build + $PYTHON setup.py bdist_egg + rm -rf build + $PYTHON32 setup.py bdist_egg + fi + rm -rf build + $PYTHON setup.py bdist_wheel + rm -rf build + $PYTHON32 setup.py bdist_wheel +done + +ls dist diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac5e4147f9..24cdd26238 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -756,6 +756,13 @@ functions: -v \ --fault revoked + "teardown_docker": + - command: shell.exec + params: + script: | + # Remove all Docker images + docker rmi -f $(docker images -a -q) &> /dev/null || true + pre: - func: "fetch source" - func: "prepare resources" @@ -773,6 +780,7 @@ post: - func: "upload test results" - func: "stop mongo-orchestration" - func: "cleanup" + - func: "teardown_docker" tasks: @@ -806,6 +814,36 @@ tasks: genhtml --version || true valgrind --version || true + + - name: "release" + tags: ["release"] + git_tag_only: true + commands: + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + set -o xtrace + ${PREPARE_SHELL} + .evergreen/release.sh + - command: archive.targz_pack + params: + target: "release-files.tgz" + source_dir: "src/dist" + include: + - "*" + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: release-files.tgz + remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/release/${task_id}-${execution}-release-files.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Release files + # Standard test tasks {{{ - name: "mockupdb" @@ -2065,8 +2103,6 @@ buildvariants: - ".latest" - ".4.4" - ".4.2" - variables: - set_xtrace_on: on - matrix_name: "tests-python-version-rhel62-test-ssl" matrix_spec: @@ -2519,6 +2555,13 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-latest" +- matrix_name: "Release" + matrix_spec: + platform: [ubuntu-18.04, windows-64-vsMulti-small] + display_name: "Release ${platform}" + tasks: + - name: "release" + # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ diff --git a/.evergreen/release.sh b/.evergreen/release.sh new file mode 100755 index 0000000000..759786b934 --- /dev/null +++ b/.evergreen/release.sh @@ -0,0 +1,9 @@ +#!/bin/bash -ex + +if [ $(uname -s) = "Darwin" ]; then + .evergreen/build-mac.sh +elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + .evergreen/build-windows.sh +else + .evergreen/build-manylinux.sh +fi From de1e29305c70a309aa52d30b780b3eb332265ab2 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 29 Jul 2020 11:26:50 -0700 Subject: [PATCH 0198/2111] PYTHON-2219 Document hidden index option (#474) --- pymongo/collection.py | 27 ++++++++++++++++----------- pymongo/operations.py | 16 +++++++++++----- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 88ead4df04..7a3d0bd99f 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1985,8 +1985,9 @@ def create_index(self, keys, session=None, **kwargs): - `name`: custom name to use for this index - if none is given, a name will be generated. - - `unique`: if ``True`` creates a uniqueness constraint on the index. - - `background`: if ``True`` this index should be created in the + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. @@ -2002,13 +2003,15 @@ def create_index(self, keys, session=None, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires server version >=3.2. + a partial index. Requires MongoDB >=3.2. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. Requires MongoDB >= 3.4. - `wildcardProjection`: Allows users to include or exclude specific - field paths from a `wildcard index`_ using the { "$**" : 1} key - pattern. Requires server version >= 4.2. + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. See the MongoDB documentation for a full list of supported options by server version. @@ -2030,6 +2033,8 @@ def create_index(self, keys, session=None, **kwargs): options (see the above list) should be passed as keyword arguments + .. versionchanged:: 3.11 + Added the ``hidden`` option. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for passing maxTimeMS in kwargs. @@ -2037,11 +2042,11 @@ def create_index(self, keys, session=None, **kwargs): Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. Support the `collation` option. .. versionchanged:: 3.2 - Added partialFilterExpression to support partial indexes. + Added partialFilterExpression to support partial indexes. .. versionchanged:: 3.0 - Renamed `key_or_list` to `keys`. Removed the `cache_for` option. - :meth:`create_index` no longer caches index names. Removed support - for the drop_dups and bucket_size aliases. + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. .. mongodoc:: indexes diff --git a/pymongo/operations.py b/pymongo/operations.py index f72ef82aa0..ea6ee2fb2b 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -390,8 +390,8 @@ def __init__(self, keys, **kwargs): - `name`: custom name to use for this index - if none is given, a name will be generated. - - `unique`: if ``True`` creates a uniqueness constraint on the index. - - `background`: if ``True`` this index should be created in the + - `unique`: if ``True``, creates a uniqueness constraint on the index. + - `background`: if ``True``, this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. @@ -407,12 +407,15 @@ def __init__(self, keys, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires server version >= 3.2. + a partial index. Requires MongoDB >= 3.2. - `collation`: An instance of :class:`~pymongo.collation.Collation` that specifies the collation to use in MongoDB >= 3.4. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the { "$**" : 1} key - pattern. Requires server version >= 4.2. + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. See the MongoDB documentation for a full list of supported options by server version. @@ -424,8 +427,11 @@ def __init__(self, keys, **kwargs): options (see the above list) should be passed as keyword arguments + .. versionchanged:: 3.11 + Added the ``hidden`` option. .. versionchanged:: 3.2 - Added partialFilterExpression to support partial indexes. + Added the ``partialFilterExpression`` option to support partial + indexes. .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core """ From ff327b3e310f5a04706f4c8edb1b9ad4f484c409 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 29 Jul 2020 14:46:48 -0700 Subject: [PATCH 0199/2111] PYTHON-2252 Add examples and documentation for new UUID behavior (#467) --- bson/binary.py | 15 +- bson/codec_options.py | 5 +- doc/examples/index.rst | 1 + doc/examples/uuid.rst | 509 ++++++++++++++++++++++++++++++++++++++++ pymongo/mongo_client.py | 5 +- 5 files changed, 531 insertions(+), 4 deletions(-) create mode 100644 doc/examples/uuid.rst diff --git a/bson/binary.py b/bson/binary.py index cb89c69da2..d1f5aae7d2 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -69,6 +69,8 @@ class UuidRepresentation: code. When decoding a BSON binary field with a UUID subtype, a :class:`~bson.binary.Binary` instance will be returned instead of a :class:`uuid.UUID` instance. + + See :ref:`unspecified-representation-details` for details. .. versionadded:: 3.11 """ @@ -79,6 +81,8 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`UUID_SUBTYPE`. + + See :ref:`standard-representation-details` for details. .. versionadded:: 3.11 """ @@ -89,6 +93,8 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`OLD_UUID_SUBTYPE`. + + See :ref:`python-legacy-representation-details` for details. .. versionadded:: 3.11 """ @@ -99,6 +105,8 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the Java driver's legacy byte order. + + See :ref:`java-legacy-representation-details` for details. .. versionadded:: 3.11 """ @@ -109,6 +117,8 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the C# driver's legacy byte order. + + See :ref:`csharp-legacy-representation-details` for details. .. versionadded:: 3.11 """ @@ -220,6 +230,7 @@ def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): - `uuid_representation`: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. + See :ref:`handling-uuid-data-example` for details. .. versionadded:: 3.11 """ @@ -236,7 +247,8 @@ def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): "UuidRepresentation.UNSPECIFIED. UUIDs can be manually " "converted to bson.Binary instances using " "bson.Binary.from_uuid() or a different UuidRepresentation " - "can be configured.") + "can be configured. See the documentation for " + "UuidRepresentation for more information.") subtype = OLD_UUID_SUBTYPE if uuid_representation == UuidRepresentation.PYTHON_LEGACY: @@ -266,6 +278,7 @@ def as_uuid(self, uuid_representation=UuidRepresentation.STANDARD): - `uuid_representation`: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. + See :ref:`handling-uuid-data-example` for details. .. versionadded:: 3.11 """ diff --git a/bson/codec_options.py b/bson/codec_options.py index 82079b71a6..833908fad0 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -218,7 +218,10 @@ class CodecOptions(_options_base): naive. Defaults to ``False``. - `uuid_representation`: The BSON representation to use when encoding and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.PYTHON_LEGACY`. + :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include diff --git a/doc/examples/index.rst b/doc/examples/index.rst index baadd74464..f8828cdfd7 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -32,3 +32,4 @@ MongoDB, you can start it like so: tailable tls encryption + uuid diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst new file mode 100644 index 0000000000..9b6762dc88 --- /dev/null +++ b/doc/examples/uuid.rst @@ -0,0 +1,509 @@ +.. _handling-uuid-data-example: + +Handling UUID Data +================== + +PyMongo ships with built-in support for dealing with UUID types. +It is straightforward to store native :class:`uuid.UUID` objects +to MongoDB and retrieve them as native :class:`uuid.UUID` objects:: + + from pymongo import MongoClient + from bson.binary import UuidRepresentation + from uuid import uuid4 + + # use the 'standard' representation for cross-language compatibility. + client = MongoClient(uuid_representation=UuidRepresentation.STANDARD) + collection = client.get_database('uuid_db').get_collection('uuid_coll') + + # remove all documents from collection + collection.delete_many({}) + + # create a native uuid object + uuid_obj = uuid4() + + # save the native uuid object to MongoDB + collection.insert_one({'uuid': uuid_obj}) + + # retrieve the stored uuid object from MongoDB + document = collection.find_one({}) + + # check that the retrieved UUID matches the inserted UUID + assert document['uuid'] == uuid_obj + +Native :class:`uuid.UUID` objects can also be used as part of MongoDB +queries:: + + document = collection.find({'uuid': uuid_obj}) + assert document['uuid'] == uuid_obj + +The above examples illustrate the simplest of use-cases - one where the +UUID is generated by, and used in the same application. However, +the situation can be significantly more complex when dealing with a MongoDB +deployment that contains UUIDs created by other drivers as the Java and CSharp +drivers have historically encoded UUIDs using a byte-order that is different +from the one used by PyMongo. Applications that require interoperability across +these drivers must specify the appropriate +:class:`~bson.binary.UuidRepresentation`. + +In the following sections, we describe how drivers have historically differed +in their encoding of UUIDs, and how applications can use the +:class:`~bson.binary.UuidRepresentation` configuration option to maintain +cross-language compatibility. + +.. attention:: New applications that do not share a MongoDB deployment with + any other application and that have never stored UUIDs in MongoDB + should use the ``standard`` UUID representation for cross-language + compatibility. See :ref:`configuring-uuid-representation` for details + on how to configure the :class:`~bson.binary.UuidRepresentation`. + +.. _example-legacy-uuid: + +Legacy Handling of UUID Data +---------------------------- + +Historically, MongoDB Drivers have used different byte-ordering +while serializing UUID types to :class:`~bson.binary.Binary`. +Consider, for instance, a UUID with the following canonical textual +representation:: + + 00112233-4455-6677-8899-aabbccddeeff + +This UUID would historically be serialized by the Python driver as:: + + 00112233-4455-6677-8899-aabbccddeeff + +The same UUID would historically be serialized by the C# driver as:: + + 33221100-5544-7766-8899-aabbccddeeff + +Finally, the same UUID would historically be serialized by the Java driver as:: + + 77665544-3322-1100-ffee-ddccbbaa9988 + +.. note:: For in-depth information about the the byte-order historically + used by different drivers, see the `Handling of Native UUID Types + Specification + `_. + +This difference in the byte-order of UUIDs encoded by different drivers can +result in highly unintuitive behavior in some scenarios. We detail two such +scenarios in the next sections. + +Scenario 1: Applications Share a MongoDB Deployment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Consider the following situation: + +* Application ``C`` written in C# generates a UUID and uses it as the ``_id`` + of a document that it proceeds to insert into the ``uuid_test`` collection of + the ``example_db`` database. Let's assume that the canonical textual + representation of the generated UUID is:: + + 00112233-4455-6677-8899-aabbccddeeff + +* Application ``P`` written in Python attempts to ``find`` the document + written by application ``C`` in the following manner:: + + from uuid import UUID + collection = client.example_db.uuid_test + result = collection.find_one({'_id': UUID('00112233-4455-6677-8899-aabbccddeeff')}) + + In this instance, ``result`` will never be the document that + was inserted by application ``C`` in the previous step. This is because of + the different byte-order used by the C# driver for representing UUIDs as + BSON Binary. The following query, on the other hand, will successfully find + this document:: + + result = collection.find_one({'_id': UUID('33221100-5544-7766-8899-aabbccddeeff')}) + +This example demonstrates how the differing byte-order used by different +drivers can hamper interoperability. To workaround this problem, users should +configure their ``MongoClient`` with the appropriate +:class:`~bson.binary.UuidRepresentation` (in this case, ``client`` in application +``P`` can be configured to use the +:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation to +avoid the unintuitive behavior) as described in +:ref:`configuring-uuid-representation`. + +Scenario 2: Round-Tripping UUIDs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the following examples, we see how using a misconfigured +:class:`~bson.binary.UuidRepresentation` can cause an application +to inadvertently change the :class:`~bson.binary.Binary` subtype, and in some +cases, the bytes of the :class:`~bson.binary.Binary` field itself when +round-tripping documents containing UUIDs. + +Consider the following situation:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.PYTHON_LEGACY stores a Binary subtype-3 UUID + python_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=python_opts) + collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)})['_id'] == 'foo' + + # Retrieving this document using UuidRepresentation.STANDARD returns a native UUID + std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + std_collection = client.testdb.get_collection('test', codec_options=std_opts) + doc = std_collection.find_one({'_id': 'foo'}) + assert doc['uuid'] == input_uuid + + # Round-tripping the retrieved document silently changes the Binary subtype to 4 + std_collection.replace_one({'_id': 'foo'}, doc) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) is None + round_tripped_doc = collection.find_one({'uuid': Binary(input_uuid.bytes, 4)}) + assert doc == round_tripped_doc + + +In this example, round-tripping the document using the incorrect +:class:`~bson.binary.UuidRepresentation` (``STANDARD`` instead of +``PYTHON_LEGACY``) changes the :class:`~bson.binary.Binary` subtype as a +side-effect. **Note that this can also happen when the situation is reversed - +i.e. when the original document is written using ``STANDARD`` representation +and then round-tripped using the ``PYTHON_LEGACY`` representation.** + +In the next example, we see the consequences of incorrectly using a +representation that modifies byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) +when round-tripping documents:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.STANDARD stores a Binary subtype-4 UUID + std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=std_opts) + collection.insert_one({'_id': 'baz', 'uuid': input_uuid}) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)})['_id'] == 'baz' + + # Retrieving this document using UuidRepresentation.JAVA_LEGACY returns a native UUID + # without modifying the UUID byte-order + java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + java_collection = client.testdb.get_collection('test', codec_options=java_opts) + doc = java_collection.find_one({'_id': 'baz'}) + assert doc['uuid'] == input_uuid + + # Round-tripping the retrieved document silently changes the Binary bytes and subtype + java_collection.replace_one({'_id': 'baz'}, doc) + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) is None + assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)}) is None + round_tripped_doc = collection.find_one({'_id': 'baz'}) + assert round_tripped_doc['uuid'] == Binary(input_uuid.bytes, 3).as_uuid(UuidRepresentation.JAVA_LEGACY) + + +In this case, using the incorrect :class:`~bson.binary.UuidRepresentation` +(``JAVA_LEGACY`` instead of ``STANDARD``) changes the +:class:`~bson.binary.Binary` bytes and subtype as a side-effect. +**Note that this happens when any representation that +manipulates byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) is incorrectly +used to round-trip UUIDs written with ``STANDARD``. When the situation is +reversed - i.e. when the original document is written using ``CSHARP_LEGACY`` +or ``JAVA_LEGACY`` and then round-tripped using ``STANDARD`` - +only the :class:`~bson.binary.Binary` subtype is changed.** + +.. note:: Starting in PyMongo 4.0, these issue will be resolved as + the ``STANDARD`` representation will decode Binary subtype 3 fields as + :class:`~bson.binary.Binary` objects of subtype 3 (instead of + :class:`uuid.UUID`), and each of the ``LEGACY_*`` representations will + decode Binary subtype 4 fields to :class:`~bson.binary.Binary` objects of + subtype 4 (instead of :class:`uuid.UUID`). + +.. _configuring-uuid-representation: + +Configuring a UUID Representation +--------------------------------- + +Users can workaround the problems described above by configuring their +applications with the appropriate :class:`~bson.binary.UuidRepresentation`. +Configuring the representation modifies PyMongo's behavior while +encoding :class:`uuid.UUID` objects to BSON and decoding +Binary subtype 3 and 4 fields from BSON. + +Applications can set the UUID representation in one of the following ways: + +#. At the ``MongoClient`` level using the ``uuidRepresentation`` URI option, + e.g.:: + + client = MongoClient("mongodb://a:27107/?uuidRepresentation=javaLegacy") + + Valid values are: + + .. list-table:: + :header-rows: 1 + + * - Value + - UUID Representation + + * - ``pythonLegacy`` + - :ref:`python-legacy-representation-details` + + * - ``javaLegacy`` + - :ref:`java-legacy-representation-details` + + * - ``csharpLegacy`` + - :ref:`csharp-legacy-representation-details` + + * - ``standard`` + - :ref:`standard-representation-details` + + * - ``unspecified`` + - :ref:`unspecified-representation-details` + +#. Using the ``uuid_representation`` kwarg option, e.g.:: + + from bson.binary import UuidRepresentation + client = MongoClient(uuid_representation=UuidRepresentation.PYTHON_LEGACY) + +#. By supplying a suitable :class:`~bson.codec_options.CodecOptions` + instance, e.g.:: + + from bson.codec_options import CodecOptions + csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) + csharp_database = client.get_database('csharp_db', codec_options=csharp_opts) + csharp_collection = client.testdb.get_collection('csharp_coll', codec_options=csharp_opts) + +Supported UUID Representations +------------------------------ + +.. list-table:: + :header-rows: 1 + + * - UUID Representation + - Default? + - Encode :class:`uuid.UUID` to + - Decode :class:`~bson.binary.Binary` subtype 4 to + - Decode :class:`~bson.binary.Binary` subtype 3 to + + * - :ref:`python-legacy-representation-details` + - Yes, in PyMongo>=2.9,<4 + - :class:`~bson.binary.Binary` subtype 3 with standard byte-order + - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 4 in PyMongo>=4 + - :class:`uuid.UUID` + + * - :ref:`java-legacy-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 3 with Java legacy byte-order + - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 4 in PyMongo>=4 + - :class:`uuid.UUID` + + * - :ref:`csharp-legacy-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 3 with C# legacy byte-order + - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 4 in PyMongo>=4 + - :class:`uuid.UUID` + + * - :ref:`standard-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` + - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 3 in PyMongo>=4 + + * - :ref:`unspecified-representation-details` + - Yes, in PyMongo>=4 + - Raise :exc:`ValueError` + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 3 in PyMongo>=4 + +We now detail the behavior and use-case for each supported UUID +representation. + +.. _python-legacy-representation-details: + +``PYTHON_LEGACY`` +^^^^^^^^^^^^^^^^^ + +.. attention:: This uuid representation should be used when reading UUIDs + generated by existing applications that use the Python driver + but **don't** explicitly set a UUID representation. + +.. attention:: :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` + has been the default uuid representation since PyMongo 2.9. + +The :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` representation +corresponds to the legacy representation of UUIDs used by PyMongo. This +representation conforms with +`RFC 4122 Section 4.1.2 `_. + +The following example illustrates the use of this representation:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import UuidRepresentation + + # No configured UUID representation + collection = client.python_legacy.get_collection('test', codec_options=DEFAULT_CODEC_OPTIONS) + + # Using UuidRepresentation.PYTHON_LEGACY + pylegacy_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) + pylegacy_collection = client.python_legacy.get_collection('test', codec_options=pylegacy_opts) + + # UUIDs written by PyMongo with no UuidRepresentation configured can be queried using PYTHON_LEGACY + uuid_1 = uuid4() + collection.insert_one({'uuid': uuid_1}) + document = pylegacy_collection.find_one({'uuid': uuid_1}) + + # UUIDs written using PYTHON_LEGACY can be read by PyMongo with no UuidRepresentation configured + uuid_2 = uuid4() + pylegacy_collection.insert_one({'uuid': uuid_2}) + document = collection.find_one({'uuid': uuid_2}) + +``PYTHON_LEGACY`` encodes native :class:`uuid.UUID` objects to +:class:`~bson.binary.Binary` subtype 3 objects, preserving the same +byte-order as :attr:`~uuid.UUID.bytes`:: + + from bson.binary import Binary + + document = collection.find_one({'uuid': Binary(uuid_2.bytes, subtype=3)}) + assert document['uuid'] == uuid_2 + +.. _java-legacy-representation-details: + +``JAVA_LEGACY`` +^^^^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used when reading UUIDs + written to MongoDB by the legacy applications (i.e. applications that don't + use the ``STANDARD`` representation) using the Java driver. + +The :data:`~bson.binary.UuidRepresentation.JAVA_LEGACY` representation +corresponds to the legacy representation of UUIDs used by the MongoDB Java +Driver. + +.. note:: The ``JAVA_LEGACY`` representation reverses the order of bytes 0-7, + and bytes 8-15. + +As an example, consider the same UUID described in :ref:`example-legacy-uuid`. +Let us assume that an application used the Java driver without an explicitly +specified UUID representation to insert the example UUID +``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this +value using PyMongo with no UUID representation specified, we end up with an +entirely different UUID:: + + UUID('77665544-3322-1100-ffee-ddccbbaa9988') + +However, if we explicitly set the representation to +:data:`~bson.binary.UuidRepresentation.JAVA_LEGACY`, we get the correct result:: + + UUID('00112233-4455-6677-8899-aabbccddeeff') + +PyMongo uses the specified UUID representation to reorder the BSON bytes and +load them correctly. ``JAVA_LEGACY`` encodes native :class:`uuid.UUID` objects +to :class:`~bson.binary.Binary` subtype 3 objects, while performing the same +byte-reordering as the legacy Java driver's UUID to BSON encoder. + +.. _csharp-legacy-representation-details: + +``CSHARP_LEGACY`` +^^^^^^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used when reading UUIDs + written to MongoDB by the legacy applications (i.e. applications that don't + use the ``STANDARD`` representation) using the C# driver. + +The :data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation +corresponds to the legacy representation of UUIDs used by the MongoDB Java +Driver. + +.. note:: The ``CSHARP_LEGACY`` representation reverses the order of bytes 0-3, + bytes 4-5, and bytes 6-7. + +As an example, consider the same UUID described in :ref:`example-legacy-uuid`. +Let us assume that an application used the C# driver without an explicitly +specified UUID representation to insert the example UUID +``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this +value using PyMongo with no UUID representation specified, we end up with an +entirely different UUID:: + + UUID('33221100-5544-7766-8899-aabbccddeeff') + +However, if we explicitly set the representation to +:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY`, we get the correct result:: + + UUID('00112233-4455-6677-8899-aabbccddeeff') + +PyMongo uses the specified UUID representation to reorder the BSON bytes and +load them correctly. ``CSHARP_LEGACY`` encodes native :class:`uuid.UUID` +objects to :class:`~bson.binary.Binary` subtype 3 objects, while performing +the same byte-reordering as the legacy C# driver's UUID to BSON encoder. + +.. _standard-representation-details: + +``STANDARD`` +^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used by new applications + that have never stored UUIDs in MongoDB. + +The :data:`~bson.binary.UuidRepresentation.STANDARD` representation +enables cross-language compatibility by ensuring the same byte-ordering +when encoding UUIDs from all drivers. UUIDs written by a driver with this +representation configured will be handled correctly by every other provided +it is also configured with the ``STANDARD`` representation. + +``STANDARD`` encodes native :class:`uuid.UUID` objects to +:class:`~bson.binary.Binary` subtype 4 objects. + +.. _unspecified-representation-details: + +``UNSPECIFIED`` +^^^^^^^^^^^^^^^ + +.. attention:: Starting in PyMongo 4.0, + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` will be the default + UUID representation used by PyMongo. + +The :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` representation +prevents the incorrect interpretation of UUID bytes by stopping short of +automatically converting UUID fields in BSON to native UUID types. Loading +a UUID when using this representation returns a :class:`~bson.binary.Binary` +object instead. If required, users can coerce the decoded +:class:`~bson.binary.Binary` objects into native UUIDs using the +:meth:`~bson.binary.Binary.as_uuid` method and specifying the appropriate +representation format. The following example shows +what this might look like for a UUID stored by the C# driver:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.CSHARP_LEGACY + csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) + + # Store a legacy C#-formatted UUID + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=csharp_opts) + collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) + + # Using UuidRepresentation.UNSPECIFIED + unspec_opts = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + unspec_collection = client.testdb.get_collection('test', codec_options=unspec_opts) + + # UUID fields are decoded as Binary when UuidRepresentation.UNSPECIFIED is configured + document = unspec_collection.find_one({'_id': 'foo'}) + decoded_field = document['uuid'] + assert isinstance(decoded_field, Binary) + + # Binary.as_uuid() can be used to coerce the decoded value to a native UUID + decoded_uuid = decoded_field.as_uuid(UuidRepresentation.CSHARP_LEGACY) + assert decoded_uuid == input_uuid + +Native :class:`uuid.UUID` objects cannot directly be encoded to +:class:`~bson.binary.Binary` when the UUID representation is ``UNSPECIFIED`` +and attempting to do so will result in an exception:: + + unspec_collection.insert_one({'_id': 'bar', 'uuid': uuid4()}) + Traceback (most recent call last): + ... + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information. + +Instead, applications using :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` +must explicitly coerce a native UUID using the +:meth:`~bson.binary.Binary.from_uuid` method:: + + explicit_binary = Binary.from_uuid(uuid4(), UuidRepresentation.PYTHON_LEGACY) + unspec_collection.insert_one({'_id': 'bar', 'uuid': explicit_binary}) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ac9c73160c..ef39cfc65a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -341,8 +341,9 @@ def __init__( - `uuidRepresentation`: The BSON representation to use when encoding from and decoding to instances of :class:`~uuid.UUID`. Valid values are `pythonLegacy` (the default), `javaLegacy`, - `csharpLegacy` and `standard`. New applications should consider - setting this to `standard` for cross language compatibility. + `csharpLegacy`, `standard` and `unspecified`. New applications + should consider setting this to `standard` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. | **Write Concern options:** | (Only set if passed. No default values.) From 83578dc35fb97a94684bab36c5b2572e99642c65 Mon Sep 17 00:00:00 2001 From: TylerWilley <68749197+TylerWilley@users.noreply.github.com> Date: Wed, 29 Jul 2020 17:17:38 -0600 Subject: [PATCH 0200/2111] PYTHON-2334: Fix gevent.Timeout race condition (#472) If gevent raises a Timeout during self.lock acquisition, a _socket_semaphore count will be lost. Using "with" will release the condition even on exception being raised. --- pymongo/pool.py | 13 +++++++++---- pymongo/thread_util.py | 31 +++++++++++++++---------------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index a18e005ba5..92814e8570 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1257,12 +1257,15 @@ def _get_socket(self, all_credentials): if not self._socket_semaphore.acquire( True, self.opts.wait_queue_timeout): self._raise_wait_queue_timeout() - with self.lock: - self.active_sockets += 1 # We've now acquired the semaphore and must release it on error. sock_info = None + incremented = False try: + with self.lock: + self.active_sockets += 1 + incremented = True + while sock_info is None: try: with self.lock: @@ -1279,8 +1282,10 @@ def _get_socket(self, all_credentials): # We checked out a socket but authentication failed. sock_info.close_socket(ConnectionClosedReason.ERROR) self._socket_semaphore.release() - with self.lock: - self.active_sockets -= 1 + + if incremented: + with self.lock: + self.active_sockets -= 1 if self.enabled_for_cmap: self.opts.event_listeners.publish_connection_check_out_failed( diff --git a/pymongo/thread_util.py b/pymongo/thread_util.py index 3869ec322f..0cf0a127f2 100644 --- a/pymongo/thread_util.py +++ b/pymongo/thread_util.py @@ -40,22 +40,21 @@ def acquire(self, blocking=True, timeout=None): raise ValueError("can't specify timeout for non-blocking acquire") rc = False endtime = None - self._cond.acquire() - while self._value == 0: - if not blocking: - break - if timeout is not None: - if endtime is None: - endtime = _time() + timeout - else: - timeout = endtime - _time() - if timeout <= 0: - break - self._cond.wait(timeout) - else: - self._value = self._value - 1 - rc = True - self._cond.release() + with self._cond: + while self._value == 0: + if not blocking: + break + if timeout is not None: + if endtime is None: + endtime = _time() + timeout + else: + timeout = endtime - _time() + if timeout <= 0: + break + self._cond.wait(timeout) + else: + self._value = self._value - 1 + rc = True return rc __enter__ = acquire From c99254fe974110f6749028e8693b9c820349ff09 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 29 Jul 2020 18:05:18 -0700 Subject: [PATCH 0201/2111] PYTHON-2334 Add regression test for gevent.Timeout compatibility (#475) Use with statement in Semaphore.release. --- pymongo/thread_util.py | 7 +++---- test/test_client.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/pymongo/thread_util.py b/pymongo/thread_util.py index 0cf0a127f2..3dac4e25fa 100644 --- a/pymongo/thread_util.py +++ b/pymongo/thread_util.py @@ -60,10 +60,9 @@ def acquire(self, blocking=True, timeout=None): __enter__ = acquire def release(self): - self._cond.acquire() - self._value = self._value + 1 - self._cond.notify() - self._cond.release() + with self._cond: + self._value = self._value + 1 + self._cond.notify() def __exit__(self, t, v, tb): self.release() diff --git a/test/test_client.py b/test/test_client.py index 5333fc35ff..19ea1375c2 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1944,6 +1944,36 @@ def poller(): task.kill() self.assertTrue(task.dead) + def test_gevent_timeout(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import spawn, Timeout + client = rs_or_single_client(maxPoolSize=1) + coll = client.pymongo_test.test + coll.insert_one({}) + + def contentious_task(): + # The 10 second timeout causes this test to fail without blocking + # forever if a bug like PYTHON-2334 is reintroduced. + with Timeout(10): + coll.find_one({'$where': delay(1)}) + + def timeout_task(): + with Timeout(.5): + try: + coll.find_one({}) + except Timeout: + pass + + ct = spawn(contentious_task) + tt = spawn(timeout_task) + tt.join(15) + ct.join(15) + self.assertTrue(tt.dead) + self.assertTrue(ct.dead) + self.assertIsNone(tt.get()) + self.assertIsNone(ct.get()) + if __name__ == "__main__": unittest.main() From 9fa94db01a52ea46269220db2f00eb1affcf08fa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 30 Jul 2020 13:24:44 -0700 Subject: [PATCH 0202/2111] BUMP 3.11 (#476) --- doc/changelog.rst | 6 ++++++ pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c78cefb7f6..3fed417d06 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -35,6 +35,10 @@ Highlights include: :class:`~pymongo.operations.UpdateMany`, :class:`~pymongo.operations.DeleteOne`, and :class:`~pymongo.operations.DeleteMany` bulk operations. +- Added support for :data:`bson.binary.UuidRepresentation.UNSPECIFIED` and + ``MongoClient(uuidRepresentation='unspecified')`` which will become the + default UUID representation starting in PyMongo 4.0. See + :ref:`handling-uuid-data-example` for details. - Added the ``background`` parameter to :meth:`pymongo.database.Database.validate_collection`. For a description of this parameter see the MongoDB documentation for the `validate command`_. @@ -49,6 +53,8 @@ Highlights include: - Fixed a bug in change streams that could cause PyMongo to miss some change documents when resuming a stream that was started without a resume token and whose first batch did not contain any change documents. +- Fixed an bug where using gevent.Timeout to timeout an operation could + lead to a deadlock. Deprecations: diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 066eaea09a..a373cb181d 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0, 'rc1.dev0') +version_tuple = (3, 11, 0) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 528f8183c2..79b938ddfb 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0rc1.dev0" +version = "3.11.0" f = open("README.rst") try: From 316830d7b5c45e27217ff6df17c0c75817674096 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 30 Jul 2020 13:27:09 -0700 Subject: [PATCH 0203/2111] BUMP 3.11.1.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a373cb181d..a0b7aa4dca 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0) +version_tuple = (3, 11, 1, ".dev0") def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 79b938ddfb..f73622fe14 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0" +version = "3.11.1.dev0" f = open("README.rst") try: From 31949fb7e5877938a52e36e1d3c5738671d5c095 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 30 Jul 2020 13:45:25 -0700 Subject: [PATCH 0204/2111] BUMP 3.11 (with proper changelog title) --- doc/changelog.rst | 4 ++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3fed417d06..19a4265c58 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 3.11.0rc1.dev0 ---------------------------------- +Changes in Version 3.11.0 +------------------------- Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. Highlights include: diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a0b7aa4dca..a373cb181d 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 1, ".dev0") +version_tuple = (3, 11, 0) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index f73622fe14..79b938ddfb 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.1.dev0" +version = "3.11.0" f = open("README.rst") try: From add995feb4793129b4971c4e29670f35a4f7619b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 30 Jul 2020 14:04:10 -0700 Subject: [PATCH 0205/2111] BUMP 3.11.1.dev1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a373cb181d..315ee2c5b6 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 0) +version_tuple = (3, 11, 1, '.dev1') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 79b938ddfb..5f7e306c18 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.0" +version = "3.11.1.dev1" f = open("README.rst") try: From 7903a1c4e1c6642adf26649d79773f44f6f5a149 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 12 Aug 2020 14:55:19 -0700 Subject: [PATCH 0206/2111] PYTHON-2332 Skip threaded SDAM tests when cdecimal is monkey patched (#477) Add 60 second timeout for joining threads in SDAM tests. --- test/test_discovery_and_monitoring.py | 14 +++++++++++++- test/utils.py | 10 ++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index bc05ca78bf..c676647e66 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -38,6 +38,7 @@ from pymongo.uri_parser import parse_uri from test import unittest, IntegrationTest from test.utils import (assertion_context, + cdecimal_patched, client_context, Barrier, get_pool, @@ -334,6 +335,15 @@ def marked_unknown(e): event_type = getattr(monitoring, event) return self.pool_listener.event_count(event_type) + def maybe_skip_scenario(self, test): + """Override to skip threaded tests when cdecimal is installed on 2.7 + """ + super(TestIntegration, self).maybe_skip_scenario(test) + # PYTHON-2332 + ops = [op['name'] for op in test['operations']] + if cdecimal_patched() and 'startThread' in ops: + raise unittest.SkipTest('PYTHON-2332 test fails with cdecimal') + def assert_event_count(self, event, count): """Run the assertEventCount test operation. @@ -400,9 +410,11 @@ def wait_for_thread(self, name): """Run the 'waitForThread' operation.""" thread = self.targets[name] thread.stop() - thread.join() + thread.join(60) if thread.exc: raise thread.exc + self.assertFalse( + thread.is_alive(), 'Thread %s is still running' % (name,)) def create_spec_test(scenario_def, test, name): diff --git a/test/utils.py b/test/utils.py index 34f62cc449..2f5b845544 100644 --- a/test/utils.py +++ b/test/utils.py @@ -871,6 +871,16 @@ def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() +def cdecimal_patched(): + """Check if Python 2.7 cdecimal patching is active.""" + try: + import decimal + import cdecimal + return decimal is cdecimal + except ImportError: + return False + + def disable_replication(client): """Disable replication on all secondaries, requires MongoDB 3.2.""" for host, port in client.secondaries: From 031492aefee205eacb6aff1644f38de4bc1fa223 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 14 Aug 2020 16:41:27 -0700 Subject: [PATCH 0207/2111] PYTHON-2339 Build manylinux2014_aarch64/ppc64le/s390x releases (#478) --- .evergreen/build-manylinux.sh | 14 ++++---------- .evergreen/config.yml | 8 +++++++- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 16b05dbea4..501b0297df 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -8,16 +8,10 @@ images=(quay.io/pypa/manylinux1_x86_64:2020-03-20-2fda31c \ quay.io/pypa/manylinux1_x86_64 \ quay.io/pypa/manylinux1_i686 \ quay.io/pypa/manylinux2014_x86_64 \ - quay.io/pypa/manylinux2014_i686) -# aarch64/ppc64le/s390x work on macOS locally but not on linux in evergreen: -# [2020/07/23 00:24:00.482] + docker run --rm -v /data/mci/cd100cec6341abda533450fb3f2fab99/src:/pymongo quay.io/pypa/manylinux2014_aarch64 /pymongo/.evergreen/build-manylinux-internal.sh -# [2020/07/23 00:24:01.186] standard_init_linux.go:211: exec user process caused "exec format error" -# -# Could be related to: -# https://github.com/pypa/manylinux/issues/410 -# quay.io/pypa/manylinux2014_aarch64 \ -# quay.io/pypa/manylinux2014_ppc64le \ -# quay.io/pypa/manylinux2014_s390x) + quay.io/pypa/manylinux2014_i686 \ + quay.io/pypa/manylinux2014_aarch64 \ + quay.io/pypa/manylinux2014_ppc64le \ + quay.io/pypa/manylinux2014_s390x) for image in "${images[@]}"; do docker pull $image diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 24cdd26238..9c599427d1 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1605,6 +1605,12 @@ axes: batchtime: 10080 # 7 days variables: python3_binary: python3 + - id: ubuntu-20.04 + display_name: "Ubuntu 20.04" + run_on: ubuntu2004-small + batchtime: 10080 # 7 days + variables: + python3_binary: python3 - id: ubuntu1604-arm64-small display_name: "Ubuntu 16.04 (ARM64)" run_on: ubuntu1604-arm64-small @@ -2557,7 +2563,7 @@ buildvariants: - matrix_name: "Release" matrix_spec: - platform: [ubuntu-18.04, windows-64-vsMulti-small] + platform: [ubuntu-20.04, windows-64-vsMulti-small] display_name: "Release ${platform}" tasks: - name: "release" From 959039b213ee90e09983475a7db20dd8c523e76d Mon Sep 17 00:00:00 2001 From: ishmum123 Date: Sat, 22 Aug 2020 00:10:40 +0600 Subject: [PATCH 0208/2111] PYTHON-1915: Prohibit copying ClientSession objects (#480) --- doc/contributors.rst | 1 + pymongo/client_session.py | 3 +++ test/test_session.py | 5 +++++ 3 files changed, 9 insertions(+) diff --git a/doc/contributors.rst b/doc/contributors.rst index 4118d55586..74164ffd59 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -88,3 +88,4 @@ The following is a list of people who have contributed to - Terence Honles (terencehonles) - Paul Fisher (thetorpedodog) - Julius Park (juliusgeo) +- Ishmum Jawad Khan (ishmum123) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index dec2f4f918..e2d7caca58 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -827,6 +827,9 @@ def _start_retryable_write(self): self._check_ended() self._server_session.inc_transaction_id() + def __copy__(self): + raise TypeError('A ClientSession cannot be copied, create a new session instead') + class _ServerSession(object): def __init__(self, generation): diff --git a/test/test_session.py b/test/test_session.py index 50dfd8a060..e1db9ccfd1 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -779,6 +779,11 @@ def drop_db(): wait_until(drop_db, 'dropped database after w=0 writes') + def test_session_not_copyable(self): + client = self.client + with client.start_session() as s: + self.assertRaises(TypeError, lambda: copy.copy(s)) + class TestCausalConsistency(unittest.TestCase): From 4a12caae0a5404311d438942dbf8bacb87c5ba5e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Aug 2020 10:56:15 -0700 Subject: [PATCH 0209/2111] PYTHON-2351 Update sdam monitoring tests with directConnection uri option (#481) --- .../discovered_standalone.json | 104 ++++++++++++++++++ test/sdam_monitoring/standalone.json | 4 +- ...ne_suppress_equal_description_changes.json | 4 +- 3 files changed, 108 insertions(+), 4 deletions(-) create mode 100644 test/sdam_monitoring/discovered_standalone.json diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json new file mode 100644 index 0000000000..c3ab59834f --- /dev/null +++ b/test/sdam_monitoring/discovered_standalone.json @@ -0,0 +1,104 @@ +{ + "description": "Monitoring a discovered standalone connection", + "uri": "mongodb://a:27017/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 4 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 5d40286c97..3ff10f820f 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -1,6 +1,6 @@ { - "description": "Monitoring a standalone connection", - "uri": "mongodb://a:27017", + "description": "Monitoring a direct connection", + "uri": "mongodb://a:27017/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json index a4b2d10da8..ceab1449cc 100644 --- a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -1,6 +1,6 @@ { - "description": "Monitoring a standalone connection - suppress update events for equal server descriptions", - "uri": "mongodb://a:27017", + "description": "Monitoring a direct connection - suppress update events for equal server descriptions", + "uri": "mongodb://a:27017/?directConnection=true", "phases": [ { "responses": [ From 963759af33a38a5d47ef946521f0a85bc5a1e2aa Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 9 Sep 2020 14:15:21 -0700 Subject: [PATCH 0210/2111] PYTHON-2354 Add support for JSONOptions.with_options (#482) --- bson/codec_options.py | 12 +++--------- bson/json_util.py | 20 ++++++++++++++++++++ test/test_json_util.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 9 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 833908fad0..0db900f59b 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -310,15 +310,9 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ - return CodecOptions( - kwargs.get('document_class', self.document_class), - kwargs.get('tz_aware', self.tz_aware), - kwargs.get('uuid_representation', self.uuid_representation), - kwargs.get('unicode_decode_error_handler', - self.unicode_decode_error_handler), - kwargs.get('tzinfo', self.tzinfo), - kwargs.get('type_registry', self.type_registry) - ) + opts = self._asdict() + opts.update(kwargs) + return CodecOptions(**opts) DEFAULT_CODEC_OPTIONS = CodecOptions( diff --git a/bson/json_util.py b/bson/json_util.py index 7b789b0f30..f4c1b498f6 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -311,6 +311,26 @@ def _arguments_repr(self): self.json_mode, super(JSONOptions, self)._arguments_repr())) + def with_options(self, **kwargs): + """ + Make a copy of this JSONOptions, overriding some options:: + + >>> from bson.json_util import CANONICAL_JSON_OPTIONS + >>> CANONICAL_JSON_OPTIONS.tz_aware + True + >>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False) + >>> json_options.tz_aware + False + + .. versionadded:: 3.12 + """ + opts = self._asdict() + for opt in ('strict_number_long', 'datetime_representation', + 'strict_uuid', 'json_mode'): + opts[opt] = kwargs.get(opt, getattr(self, opt)) + opts.update(kwargs) + return JSONOptions(**opts) + LEGACY_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.LEGACY) """:class:`JSONOptions` for encoding to PyMongo's legacy JSON format. diff --git a/test/test_json_util.py b/test/test_json_util.py index e8b64a16d1..7906b276f5 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -52,6 +52,34 @@ def round_trip(self, doc, **kwargs): def test_basic(self): self.round_trip({"hello": "world"}) + def test_json_options_with_options(self): + opts = json_util.JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG) + self.assertEqual( + opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) + opts2 = opts.with_options( + datetime_representation=DatetimeRepresentation.ISO8601) + self.assertEqual( + opts2.datetime_representation, DatetimeRepresentation.ISO8601) + + opts = json_util.JSONOptions(strict_number_long=True) + self.assertEqual(opts.strict_number_long, True) + opts2 = opts.with_options(strict_number_long=False) + self.assertEqual(opts2.strict_number_long, False) + + opts = json_util.CANONICAL_JSON_OPTIONS + self.assertNotEqual( + opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) + opts2 = opts.with_options( + uuid_representation=UuidRepresentation.JAVA_LEGACY) + self.assertEqual( + opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.document_class, dict) + opts3 = opts2.with_options(document_class=SON) + self.assertEqual( + opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts3.document_class, SON) + def test_objectid(self): self.round_trip({"id": ObjectId()}) From dc94ca628e10011f81310df351b604a0beb559a7 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 9 Sep 2020 17:24:29 -0700 Subject: [PATCH 0211/2111] PYTHON-2361 Support parsing as extended JSON representation for subtype 4 binary (#483) --- bson/json_util.py | 2 ++ test/bson_corpus/array.json | 10 ++++++++-- test/bson_corpus/binary.json | 16 ++++++++++++++++ test/bson_corpus/datetime.json | 6 ++++++ test/bson_corpus/decimal128-2.json | 1 + test/bson_corpus/decimal128-5.json | 1 + test/bson_corpus/double.json | 16 ++++++++-------- test/bson_corpus/multi-type-deprecated.json | 9 +++++---- test/bson_corpus/timestamp.json | 5 +++++ test/bson_corpus/top.json | 10 +++++++--- test/test_bson_corpus.py | 9 ++++++++- 11 files changed, 67 insertions(+), 18 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index f4c1b498f6..1eef9270ef 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -508,6 +508,8 @@ def _parse_legacy_uuid(doc, json_options): """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) + if not isinstance(doc["$uuid"], text_type): + raise TypeError('$uuid must be a string: %s' % (doc,)) if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) else: diff --git a/test/bson_corpus/array.json b/test/bson_corpus/array.json index 1c654cf36b..9ff953e5ae 100644 --- a/test/bson_corpus/array.json +++ b/test/bson_corpus/array.json @@ -14,16 +14,22 @@ "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" }, { - "description": "Single Element Array with index set incorrectly", + "description": "Single Element Array with index set incorrectly to empty string", "degenerate_bson": "130000000461000B00000010000A0000000000", "canonical_bson": "140000000461000C0000001030000A0000000000", "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" }, { - "description": "Single Element Array with index set incorrectly", + "description": "Single Element Array with index set incorrectly to ab", "degenerate_bson": "150000000461000D000000106162000A0000000000", "canonical_bson": "140000000461000C0000001030000A0000000000", "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Multi Element Array with duplicate indexes", + "degenerate_bson": "1b000000046100130000001030000a000000103000140000000000", + "canonical_bson": "1b000000046100130000001030000a000000103100140000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}, {\"$numberInt\": \"20\"}]}" } ], "decodeErrors": [ diff --git a/test/bson_corpus/binary.json b/test/bson_corpus/binary.json index 90a15c1a1c..324c56abde 100644 --- a/test/bson_corpus/binary.json +++ b/test/bson_corpus/binary.json @@ -39,6 +39,12 @@ "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}" }, + { + "description": "subtype 0x04 UUID", + "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}", + "degenerate_extjson": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}" + }, { "description": "subtype 0x05", "canonical_bson": "1D000000057800100000000573FFD26444B34C6990E8E7D1DFC035D400", @@ -81,5 +87,15 @@ "description": "subtype 0x02 length negative one", "bson": "130000000578000600000002FFFFFFFFFFFF00" } + ], + "parseErrors": [ + { + "description": "$uuid wrong type", + "string": "{\"x\" : { \"$uuid\" : { \"data\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}}" + }, + { + "description": "$uuid invalid value", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-90e8-e7d1dfc035d4\"}}" + } ] } diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json index 60506ce174..f857afdc36 100644 --- a/test/bson_corpus/datetime.json +++ b/test/bson_corpus/datetime.json @@ -25,6 +25,12 @@ "description" : "Y10K", "canonical_bson" : "1000000009610000DC1FD277E6000000", "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" + }, + { + "description": "leading zero ms", + "canonical_bson": "10000000096100D1D6D6CC3B01000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"2012-12-24T12:15:30.001Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330001\"}}}" } ], "decodeErrors": [ diff --git a/test/bson_corpus/decimal128-2.json b/test/bson_corpus/decimal128-2.json index de73b86ffb..316d3b0e61 100644 --- a/test/bson_corpus/decimal128-2.json +++ b/test/bson_corpus/decimal128-2.json @@ -790,3 +790,4 @@ } ] } + diff --git a/test/bson_corpus/decimal128-5.json b/test/bson_corpus/decimal128-5.json index 778bf96c4b..e976eae407 100644 --- a/test/bson_corpus/decimal128-5.json +++ b/test/bson_corpus/decimal128-5.json @@ -399,3 +399,4 @@ } ] } + diff --git a/test/bson_corpus/double.json b/test/bson_corpus/double.json index d13fd5c471..7be4ff45e6 100644 --- a/test/bson_corpus/double.json +++ b/test/bson_corpus/double.json @@ -28,16 +28,16 @@ "relaxed_extjson": "{\"d\" : -1.0001220703125}" }, { - "description": "1.2345678901234568e+18", - "canonical_bson": "1000000001640081E97DF41022B14300", - "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678901234568e+18\"}}", - "relaxed_extjson": "{\"d\" : 1.2345678901234568E+18}" + "description": "1.2345678921232E+18", + "canonical_bson": "100000000164002a1bf5f41022b14300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678921232e+18\"}}", + "relaxed_extjson": "{\"d\" : 1.2345678921232E+18}" }, { - "description": "-1.2345678901234568e+18", - "canonical_bson": "1000000001640081E97DF41022B1C300", - "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678901234568e+18\"}}", - "relaxed_extjson": "{\"d\" : -1.2345678901234568e+18}" + "description": "-1.2345678921232E+18", + "canonical_bson": "100000000164002a1bf5f41022b1c300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678921232e+18\"}}", + "relaxed_extjson": "{\"d\" : -1.2345678921232E+18}" }, { "description": "0.0", diff --git a/test/bson_corpus/multi-type-deprecated.json b/test/bson_corpus/multi-type-deprecated.json index e804e23c8a..665f388cd4 100644 --- a/test/bson_corpus/multi-type-deprecated.json +++ b/test/bson_corpus/multi-type-deprecated.json @@ -5,10 +5,11 @@ "valid": [ { "description": "All BSON types", - "canonical_bson": "3B020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000E00000064622E636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000", - "converted_bson": "4b020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002e0000000224726566000e00000064622e636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000", - "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": {\"$symbol\": \"symbol\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$dbPointer\": {\"$ref\": \"db.collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": {\"$undefined\": true}}", - "converted_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": \"symbol\", \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$ref\": \"db.collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": null}" + "canonical_bson": "38020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000B000000636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000", + "converted_bson": "48020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002b0000000224726566000b000000636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000", + "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": {\"$symbol\": \"symbol\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$dbPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": {\"$undefined\": true}}", + "converted_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": \"symbol\", \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": null}" } ] } + diff --git a/test/bson_corpus/timestamp.json b/test/bson_corpus/timestamp.json index c76bc2998e..6f46564a32 100644 --- a/test/bson_corpus/timestamp.json +++ b/test/bson_corpus/timestamp.json @@ -18,6 +18,11 @@ "description": "Timestamp with high-order bit set on both seconds and increment", "canonical_bson": "10000000116100FFFFFFFFFFFFFFFF00", "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4294967295, \"i\" : 4294967295} } }" + }, + { + "description": "Timestamp with high-order bit set on both seconds and increment (not UINT32_MAX)", + "canonical_bson": "1000000011610000286BEE00286BEE00", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4000000000, \"i\" : 4000000000} } }" } ], "decodeErrors": [ diff --git a/test/bson_corpus/top.json b/test/bson_corpus/top.json index 68b51195ab..5352a3faf3 100644 --- a/test/bson_corpus/top.json +++ b/test/bson_corpus/top.json @@ -69,11 +69,11 @@ "parseErrors": [ { "description" : "Bad $regularExpression (extra field)", - "string" : "{\"a\" : \"$regularExpression\": {\"pattern\": \"abc\", \"options\": \"\", \"unrelated\": true}}}" + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\", \"options\": \"\", \"unrelated\": true}}}" }, { "description" : "Bad $regularExpression (missing options field)", - "string" : "{\"a\" : \"$regularExpression\": {\"pattern\": \"abc\"}}}" + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\"}}}" }, { "description": "Bad $regularExpression (pattern is number, not string)", @@ -85,7 +85,7 @@ }, { "description" : "Bad $regularExpression (missing pattern field)", - "string" : "{\"a\" : \"$regularExpression\": {\"options\":\"ix\"}}}" + "string" : "{\"a\" : {\"$regularExpression\": {\"options\":\"ix\"}}}" }, { "description": "Bad $oid (number, not string)", @@ -151,6 +151,10 @@ "description": "Bad $code (type is number, not string)", "string": "{\"a\" : {\"$code\" : 42}}" }, + { + "description": "Bad $code (type is number, not string) when $scope is also present", + "string": "{\"a\" : {\"$code\" : 42, \"$scope\" : {}}}" + }, { "description": "Bad $code (extra field)", "string": "{\"a\" : {\"$code\" : \"\", \"unrelated\": true}}" diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 0c461cf404..780ea49a3c 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -115,7 +115,7 @@ def run_test(self): continue # Special case for testing encoding UUID as binary subtype 0x04. - if description == 'subtype 0x04': + if description.startswith('subtype 0x04'): encode_extjson = to_extjson_uuid_04 encode_bson = to_bson_uuid_04 else: @@ -203,6 +203,13 @@ def run_test(self): 'case: ' + description) except (ValueError, KeyError, TypeError, InvalidId): pass + elif bson_type == '0x05': + try: + decode_extjson(parse_error_case['string']) + raise AssertionError('exception not raised for test ' + 'case: ' + description) + except (TypeError, ValueError): + pass else: raise AssertionError('cannot test parseErrors for type ' + bson_type) From c54974067772f268a9a9dc6dbba0a490e6b8e842 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 11 Sep 2020 11:35:31 -0700 Subject: [PATCH 0212/2111] PYTHON-2362 Use dnspython<2.0 to avoid timeouts (#484) --- .evergreen/run-tests.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9987bd6945..58e3f42ac2 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -201,6 +201,12 @@ if [ -n "$COVERAGE" -a $PYTHON_IMPL = "CPython" ]; then fi fi +if $PYTHON -c 'import dns'; then + # Trying with/without --user to avoid: + # ERROR: Can not perform a '--user' install. User site-packages are not visible in this virtualenv. + $PYTHON -m pip install --upgrade --user 'dnspython<2.0.0' || $PYTHON -m pip install --upgrade 'dnspython<2.0.0' +fi + $PYTHON setup.py clean if [ -z "$GREEN_FRAMEWORK" ]; then if [ -z "$C_EXTENSIONS" -a $PYTHON_IMPL = "CPython" ]; then From 1b97eddfbd4414bf28955d84a97cb93865e37d40 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 14 Sep 2020 11:45:36 -0700 Subject: [PATCH 0213/2111] PYTHON-2262 Test Python 3.9 in Evergreen (#485) --- .evergreen/config.yml | 34 ++++++++++++++++++++++++++-------- doc/changelog.rst | 17 +++++++++++++++++ setup.py | 1 + test/test_custom_types.py | 2 +- 4 files changed, 45 insertions(+), 9 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9c599427d1..0293f9a6a4 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1767,6 +1767,10 @@ axes: display_name: "Python 3.8" variables: PYTHON_BINARY: "/opt/python/3.8/bin/python3" + - id: "3.9" + display_name: "Python 3.9" + variables: + PYTHON_BINARY: "/opt/python/3.9/bin/python3" - id: "pypy" display_name: "PyPy" variables: @@ -1821,6 +1825,10 @@ axes: display_name: "Python 3.8" variables: PYTHON_BINARY: "C:/python/Python38/python.exe" + - id: "3.9" + display_name: "Python 3.9" + variables: + PYTHON_BINARY: "C:/python/Python39/python.exe" - id: python-version-windows-32 display_name: "Python" @@ -1849,6 +1857,10 @@ axes: display_name: "32-bit Python 3.8" variables: PYTHON_BINARY: "C:/python/32/Python38/python.exe" + - id: "3.9" + display_name: "32-bit Python 3.9" + variables: + PYTHON_BINARY: "C:/python/32/Python39/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version @@ -2142,14 +2154,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "pypy", "pypy3.5"] auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.8", "3.9", "pypy", "pypy3.5"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2225,7 +2237,7 @@ buildvariants: - matrix_name: "tests-python-version-ubuntu1604-without-c-extensions" matrix_spec: platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7", "3.8"] + python-version: &openssl-102-plus-pythons ["3.7", "3.8", "3.9"] c-extensions: without-c-extensions auth-ssl: noauth-nossl display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" @@ -2242,7 +2254,7 @@ buildvariants: matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] c-extensions: "*" compression: "*" exclude_spec: @@ -2256,9 +2268,9 @@ buildvariants: python-version: ["jython2.7"] c-extensions: "*" compression: ["snappy", "zstd"] - # Some tests fail with CPython 3.8 and python-snappy + # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy - platform: ubuntu-16.04 - python-version: ["3.8"] + python-version: ["3.8", "3.9"] c-extensions: "*" compression: ["snappy"] display_name: "${compression} ${c-extensions} ${python-version} ${platform}" @@ -2311,7 +2323,7 @@ buildvariants: - matrix_name: "tests-python-version-requires-openssl-102-plus-test-ssl" matrix_spec: platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7", "3.8"] + python-version: *openssl-102-plus-pythons auth-ssl: "*" display_name: "${python-version} OpenSSL 1.0.2 ${platform} ${auth-ssl}" tasks: @@ -2347,6 +2359,12 @@ buildvariants: python-version-windows: "*" auth-ssl: "*" encryption: "*" + exclude_spec: + # PYTHON-2366 Skip 3.9 due to cryptography install failures + - platform: "*" + python-version-windows: ["3.9"] + auth-ssl: "*" + encryption: "*" display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions @@ -2509,7 +2527,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.8", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.8", "3.9", "pypy", "pypy3.5"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" diff --git a/doc/changelog.rst b/doc/changelog.rst index 19a4265c58..2143c18eac 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,23 @@ Changelog ========= +Changes in Version 3.12.0 +------------------------- + +Version 3.12 adds support for Python 3.9 and includes a number of bug fixes. +Highlights include: + +- Support for Python 3.9. +- New method :class:`bson.json_util.JSONOptions.with_options`. + +Issues Resolved +............... + +See the `PyMongo 3.12.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 + Changes in Version 3.11.0 ------------------------- diff --git a/setup.py b/setup.py index 5f7e306c18..28ab6f6a37 100755 --- a/setup.py +++ b/setup.py @@ -420,6 +420,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 41b79a96d9..3172ed2834 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -255,7 +255,7 @@ def fallback_encoder(value): class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): - msg = "Can't instantiate abstract class .* with abstract methods .*" + msg = "Can't instantiate abstract class" def run_test(base, attrs, fail): codec = type('testcodec', (base,), attrs) if fail: From e1915fc89ba01f4f22f2a3ad1074978ff0fdf5b4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Sep 2020 09:06:19 -0700 Subject: [PATCH 0214/2111] PYTHON-2372 Build macOS releases in Evergreen (#486) --- .evergreen/build-mac.sh | 7 +++++-- .evergreen/config.yml | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 95be59c340..d089d3947d 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -2,11 +2,14 @@ for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do if [[ $VERSION == "2.7" ]]; then + PYTHON=/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python rm -rf build - python$VERSION setup.py bdist_egg + $PYTHON setup.py bdist_egg + else + PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 fi rm -rf build - python$VERSION setup.py bdist_wheel + $PYTHON setup.py bdist_wheel done ls dist diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0293f9a6a4..a47c777121 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2581,7 +2581,7 @@ buildvariants: - matrix_name: "Release" matrix_spec: - platform: [ubuntu-20.04, windows-64-vsMulti-small] + platform: [ubuntu-20.04, windows-64-vsMulti-small, macos-1014] display_name: "Release ${platform}" tasks: - name: "release" From 8e7026a83fbda69ad50ffbd382f3e5334272c2dd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Sep 2020 14:00:05 -0700 Subject: [PATCH 0215/2111] PYTHON-2345 Ensure release files can be installed (#487) --- .evergreen/build-mac.sh | 30 +++++++++++++- .evergreen/build-manylinux-internal.sh | 45 ++++++++++++++------- .evergreen/build-manylinux.sh | 2 +- .evergreen/build-windows.sh | 36 +++++++++++------ .evergreen/utils.sh | 55 ++++++++++++++++++++++++++ 5 files changed, 141 insertions(+), 27 deletions(-) create mode 100755 .evergreen/utils.sh diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index d089d3947d..b50afc1976 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -1,5 +1,13 @@ #!/bin/bash -ex +# Get access to testinstall. +. .evergreen/utils.sh + +# Create temp directory for validated files. +rm -rf validdist +mkdir -p validdist +mv dist/* validdist || true + for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do if [[ $VERSION == "2.7" ]]; then PYTHON=/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python @@ -9,7 +17,27 @@ for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 fi rm -rf build - $PYTHON setup.py bdist_wheel + + # Install wheel if not already there. + if ! $PYTHON -m wheel version; then + createvirtualenv $PYTHON releasevenv + WHEELPYTHON=python + pip install --upgrade wheel + else + WHEELPYTHON=$PYTHON + fi + + $WHEELPYTHON setup.py bdist_wheel + deactivate || true + rm -rf releasevenv + + # Test that each wheel is installable. + for release in dist/*; do + testinstall $PYTHON $release + mv $release validdist/ + done done +mv validdist/* dist +rm -rf validdist ls dist diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index c810700a29..4d330e4418 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -1,25 +1,42 @@ #!/bin/bash -ex -cd /pymongo +cd /src + +# Get access to testinstall. +. .evergreen/utils.sh + +# Create temp directory for validated files. +rm -rf validdist +mkdir -p validdist +mv dist/* validdist || true # Compile wheels -for PYBIN in /opt/python/*/bin; do +for PYTHON in /opt/python/*/bin/python; do # Skip Python 3.3 and 3.9. - if [[ "$PYBIN" == *"cp33"* || "$PYBIN" == *"cp39"* ]]; then + if [[ "$PYTHON" == *"cp33"* || "$PYTHON" == *"cp39"* ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 rm -rf build - ${PYBIN}/python setup.py bdist_wheel -done + $PYTHON setup.py bdist_wheel + rm -rf build -# https://github.com/pypa/manylinux/issues/49 -rm -rf build + # Audit wheels and write multilinux tag + for whl in dist/*.whl; do + # Skip already built manylinux1 wheels. + if [[ "$whl" != *"manylinux"* ]]; then + auditwheel repair $whl -w dist + rm $whl + fi + done -# Audit wheels and write multilinux1 tag -for whl in dist/*.whl; do - # Skip already built manylinux1 wheels. - if [[ "$whl" != *"manylinux"* ]]; then - auditwheel repair $whl -w dist - rm $whl - fi + # Test that each wheel is installable. + # Test without virtualenv because it's not present on manylinux containers. + for release in dist/*; do + testinstall $PYTHON $release "without-virtualenv" + mv $release validdist/ + done done + +mv validdist/* dist +rm -rf validdist +ls dist diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 501b0297df..1889754ae0 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -15,7 +15,7 @@ images=(quay.io/pypa/manylinux1_x86_64:2020-03-20-2fda31c \ for image in "${images[@]}"; do docker pull $image - docker run --rm -v `pwd`:/pymongo $image /pymongo/.evergreen/build-manylinux-internal.sh + docker run --rm -v `pwd`:/src $image /src/.evergreen/build-manylinux-internal.sh done ls dist diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index a34506f551..b12b384d15 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -1,18 +1,32 @@ #!/bin/bash -ex +# Get access to testinstall. +. .evergreen/utils.sh + +# Create temp directory for validated files. +rm -rf validdist +mkdir -p validdist +mv dist/* validdist || true + for VERSION in 27 34 35 36 37 38; do - PYTHON=C:/Python/Python${VERSION}/python.exe - PYTHON32=C:/Python/32/Python${VERSION}/python.exe - if [[ $VERSION == "2.7" ]]; then + _pythons=(C:/Python/Python${VERSION}/python.exe \ + C:/Python/32/Python${VERSION}/python.exe) + for PYTHON in "${_pythons[@]}"; do + if [[ $VERSION == "2.7" ]]; then + rm -rf build + $PYTHON setup.py bdist_egg + fi rm -rf build - $PYTHON setup.py bdist_egg - rm -rf build - $PYTHON32 setup.py bdist_egg - fi - rm -rf build - $PYTHON setup.py bdist_wheel - rm -rf build - $PYTHON32 setup.py bdist_wheel + $PYTHON setup.py bdist_wheel + + # Test that each wheel is installable. + for release in dist/*; do + testinstall $PYTHON $release + mv $release validdist/ + done + done done +mv validdist/* dist +rm -rf validdist ls dist diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh new file mode 100755 index 0000000000..f8dd1a6184 --- /dev/null +++ b/.evergreen/utils.sh @@ -0,0 +1,55 @@ +#!/bin/bash -ex + +# Usage: +# createvirtualenv /path/to/python /output/path/for/venv +# * param1: Python binary to use for the virtualenv +# * param2: Path to the virtualenv to create +function createvirtualenv { + PYTHON=$1 + VENVPATH=$2 + if $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" + elif command -v virtualenv; then + VIRTUALENV="$(command -v virtualenv) -p $PYTHON" + else + echo "Cannot test without virtualenv" + exit 1 + fi + $VIRTUALENV --system-site-packages --never-download $VENVPATH + if [ "Windows_NT" = "$OS" ]; then + . $VENVPATH/Scripts/activate + else + . $VENVPATH/bin/activate + fi +} + +# Usage: +# testinstall /path/to/python /path/to/.whl/or/.egg ["no-virtualenv"] +# * param1: Python binary to test +# * param2: Path to the wheel or egg file to install +# * param3 (optional): If set to a non-empty string, don't create a virtualenv. Used in manylinux containers. +function testinstall { + PYTHON=$1 + RELEASE=$2 + NO_VIRTUALENV=$3 + + if [ -z "$NO_VIRTUALENV" ]; then + createvirtualenv $PYTHON venvtestinstall + PYTHON=python + fi + + if [[ $RELEASE == *.egg ]]; then + $PYTHON -m easy_install $RELEASE + else + $PYTHON -m pip install --upgrade $RELEASE + fi + cd tools + $PYTHON fail_if_no_c.py + $PYTHON -m pip uninstall -y pymongo + cd .. + + if [ -z "$NO_VIRTUALENV" ]; then + deactivate + rm -rf venvtestinstall + fi +} From cccf37f5563c0bf1b1cba9980cf3a877762f15d0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Sep 2020 10:39:01 -0700 Subject: [PATCH 0216/2111] PYTHON-1631 Document new release process (#488) --- RELEASE.rst | 83 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 59 insertions(+), 24 deletions(-) diff --git a/RELEASE.rst b/RELEASE.rst index c7a030d526..15f617e26e 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -31,39 +31,74 @@ that changes the major version number. Doing a Release --------------- -1. Test releases on Python 2.7 and 3.4+ on Windows, Linux and OSX, - with and without the C extensions. It's generally enough to just run the - tests on 2.7, 3.4 and the latest 3.x version with and without the - extensions on a single platform, and then just test any version on the - other platforms as a sanity check. `python setup.py test` will build the - extensions and test. `python tools/clean.py` will remove the extensions, - and then `python setup.py --no_ext test` will run the tests without - them. You can also run the doctests: `python setup.py doc -t`. - -2. Add release notes to doc/changelog.rst. Generally just summarize/clarify +1. PyMongo is tested on Evergreen. Ensure the latest commit are passing CI + as expected: https://evergreen.mongodb.com/waterfall/mongo-python-driver. + To test locally, ``python setup.py test`` will build the C extensions and + test. ``python tools/clean.py`` will remove the extensions, + and then ``python setup.py --no_ext test`` will run the tests without + them. You can also run the doctests: ``python setup.py doc -t``. + +2. Check Jira to ensure all the tickets in this version have been completed. + +3. Add release notes to doc/changelog.rst. Generally just summarize/clarify the git log, but you might add some more long form notes for big changes. -3. Search and replace the "devN" version number w/ the new version number (see - note above). +4. Search and replace the "devN" version number w/ the new version number (see + note above in `Versioning`_). + +5. Make sure version number is updated in setup.py and pymongo/__init__.py + +6. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. + +7. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m '3.11.0' ``. -4. Make sure version number is updated in setup.py and pymongo/__init__.py +8. Push commit / tag, eg ``git push && git push --tags``. -5. Commit with a BUMP version_number message. +9. Pushing a tag will trigger a release process in Evergreen which builds + wheels and eggs for manylinux, macOS, and Windows. Wait for these jobs to + complete and then download the "Release files" archive from each task. See: + https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release -6. Tag w/ version_number + Unpack each downloaded archive so that we can upload the included files. For + the next steps let's assume we unpacked these files into the following paths:: -7. Push commit / tag. + $ ls path/to/manylinux + pymongo--cp27-cp27m-manylinux1_i686.whl + ... + pymongo--cp38-cp38-manylinux2014_x86_64.whl + $ ls path/to/mac/ + pymongo--cp27-cp27m-macosx_10_14_intel.whl + ... + pymongo--py2.7-macosx-10.14-intel.egg + $ ls path/to/windows/ + pymongo--cp27-cp27m-win32.whl + ... + pymongo--cp38-cp38-win_amd64.whl -8. Push source to PyPI: `python setup.py sdist upload` +10. Build the source distribution:: -9. Push binaries to PyPI; for each version of python and platform do:`python - setup.py bdist_egg upload`. Probably best to do `python setup.py bdist_egg` - first, to make sure the egg builds properly. We also publish wheels. - `python setup.py bdist_wheel upload`. + $ git clone git@github.com:mongodb/mongo-python-driver.git + $ cd mongo-python-driver + $ git checkout "" + $ python3 setup.py sdist -10. Make sure to push a build of the new docs (see the apidocs repo). + This will create the following distribution:: -11. Bump the version number to .dev0 in setup.py/__init__.py, + $ ls dist + pymongo-.tar.gz + +11. Upload all the release packages to PyPI with twine:: + + $ python3 -m twine upload dist/*.tar.gz path/to/manylinux/* path/to/mac/* path/to/windows/* + +12. Make sure the new version appears on https://pymongo.readthedocs.io/. If the + new version does not show up automatically, trigger a rebuild of "latest": + https://readthedocs.org/projects/pymongo/builds/ + +13. Bump the version number to .dev0 in setup.py/__init__.py, commit, push. -12. Announce! +14. Publish the release version in Jira. + +15. Announce the release on: + https://developer.mongodb.com/community/forums/c/community/release-notes/ From 8afbc645a77120012f79fb802194de394ccd09f4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 28 Sep 2020 15:00:40 -0700 Subject: [PATCH 0217/2111] PYTHON-2375 Remove macos system python workaround for missing wheel package (#491) --- .evergreen/build-mac.sh | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index b50afc1976..d68b54ffaa 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -18,18 +18,7 @@ for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do fi rm -rf build - # Install wheel if not already there. - if ! $PYTHON -m wheel version; then - createvirtualenv $PYTHON releasevenv - WHEELPYTHON=python - pip install --upgrade wheel - else - WHEELPYTHON=$PYTHON - fi - - $WHEELPYTHON setup.py bdist_wheel - deactivate || true - rm -rf releasevenv + $PYTHON setup.py bdist_wheel # Test that each wheel is installable. for release in dist/*; do From 65699332c470955f395cb49c0f4b83d5c7e9e995 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 28 Sep 2020 15:55:07 -0700 Subject: [PATCH 0218/2111] PYTHON-2376 Fix change stream test failures due to new updateDescription.truncatedArrays field in changeEvent documents (#489) --- test/test_change_stream.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 231b9101eb..851b599f99 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -367,8 +367,13 @@ def test_change_operations(self): self.assertEqual(change['operationType'], 'update') self.assertEqual(change['ns'], expected_ns) self.assertNotIn('fullDocument', change) - self.assertEqual({'updatedFields': {'new': 1}, - 'removedFields': ['foo']}, + + expected_update_description = { + 'updatedFields': {'new': 1}, + 'removedFields': ['foo']} + if client_context.version.at_least(4, 5, 0): + expected_update_description['truncatedArrays'] = [] + self.assertEqual(expected_update_description, change['updateDescription']) # Replace. self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'}) From b2fba416e9442c4002023311ddb1e63e348c63c4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 2 Oct 2020 11:30:39 -0700 Subject: [PATCH 0219/2111] PYTHON-2342 Prefer checking error codes over error messages (#492) --- pymongo/helpers.py | 92 +++++++++++++++++++++------------------------- pymongo/network.py | 3 +- pymongo/server.py | 6 +-- 3 files changed, 45 insertions(+), 56 deletions(-) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 67b2e15842..d5590d516e 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -102,7 +102,7 @@ def _index_document(index_list): return index -def _check_command_response(response, max_wire_version, msg=None, +def _check_command_response(response, max_wire_version, allowable_errors=None, parse_write_concern_error=False): """Check the response to a command for errors. @@ -117,55 +117,47 @@ def _check_command_response(response, max_wire_version, msg=None, if parse_write_concern_error and 'writeConcernError' in response: _raise_write_concern_error(response['writeConcernError']) - if not response["ok"]: - - details = response - # Mongos returns the error details in a 'raw' object - # for some errors. - if "raw" in response: - for shard in itervalues(response["raw"]): - # Grab the first non-empty raw error from a shard. - if shard.get("errmsg") and not shard.get("ok"): - details = shard - break - - errmsg = details["errmsg"] - if (allowable_errors is None - or (errmsg not in allowable_errors - and details.get("code") not in allowable_errors)): - - code = details.get("code") - # Server is "not master" or "recovering" - if code in _NOT_MASTER_CODES: - raise NotMasterError(errmsg, response) - elif ("not master" in errmsg - or "node is recovering" in errmsg): - raise NotMasterError(errmsg, response) - - # Server assertion failures - if errmsg == "db assertion failure": - errmsg = ("db assertion failure, assertion: '%s'" % - details.get("assertion", "")) - raise OperationFailure(errmsg, - details.get("assertionCode"), - response, - max_wire_version) - - # Other errors - # findAndModify with upsert can raise duplicate key error - if code in (11000, 11001, 12582): - raise DuplicateKeyError(errmsg, code, response, - max_wire_version) - elif code == 50: - raise ExecutionTimeout(errmsg, code, response, - max_wire_version) - elif code == 43: - raise CursorNotFound(errmsg, code, response, - max_wire_version) - - msg = msg or "%s" - raise OperationFailure(msg % errmsg, code, response, - max_wire_version) + if response["ok"]: + return + + details = response + # Mongos returns the error details in a 'raw' object + # for some errors. + if "raw" in response: + for shard in itervalues(response["raw"]): + # Grab the first non-empty raw error from a shard. + if shard.get("errmsg") and not shard.get("ok"): + details = shard + break + + errmsg = details["errmsg"] + code = details.get("code") + + # For allowable errors, only check for error messages when the code is not + # included. + if allowable_errors: + if code is not None: + if code in allowable_errors: + return + elif errmsg in allowable_errors: + return + + # Server is "not master" or "recovering" + if code in _NOT_MASTER_CODES: + raise NotMasterError(errmsg, response) + elif "not master" in errmsg or "node is recovering" in errmsg: + raise NotMasterError(errmsg, response) + + # Other errors + # findAndModify with upsert can raise duplicate key error + if code in (11000, 11001, 12582): + raise DuplicateKeyError(errmsg, code, response, max_wire_version) + elif code == 50: + raise ExecutionTimeout(errmsg, code, response, max_wire_version) + elif code == 43: + raise CursorNotFound(errmsg, code, response, max_wire_version) + + raise OperationFailure(errmsg, code, response, max_wire_version) def _check_gle_response(result, max_wire_version): diff --git a/pymongo/network.py b/pymongo/network.py index d9d645fa91..6dbcd3a7ac 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -157,8 +157,7 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, client._process_response(response_doc, session) if check: helpers._check_command_response( - response_doc, sock_info.max_wire_version, None, - allowable_errors, + response_doc, sock_info.max_wire_version, allowable_errors, parse_write_concern_error=parse_write_concern_error) except Exception as exc: if publish: diff --git a/pymongo/server.py b/pymongo/server.py index 8ea361e100..eb145d409a 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -131,10 +131,8 @@ def run_operation_with_response( user_fields=user_fields) if use_cmd: first = docs[0] - operation.client._process_response( - first, operation.session) - _check_command_response( - first, sock_info.max_wire_version) + operation.client._process_response(first, operation.session) + _check_command_response(first, sock_info.max_wire_version) except Exception as exc: if publish: duration = datetime.now() - start From 1c2651be58f8541e30839c7ab7dfde13ce8af3ce Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 2 Oct 2020 12:56:33 -0700 Subject: [PATCH 0220/2111] PYTHON-2357 Specify error label in retryable writes test (#494) PYTHON-2356 Add errorLabelsContain/errorLabelsOmit support to retryable writes tests --- .../insertOne-serverErrors.json | 5 ++++- test/test_retryable_writes.py | 22 +++++++------------ 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/insertOne-serverErrors.json index 59f6d9b51a..cb1e6f826b 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/insertOne-serverErrors.json @@ -966,7 +966,10 @@ ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down" + "errmsg": "Replication is being shut down", + "errorLabels": [ + "RetryableWriteError" + ] } } }, diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 88a122d513..3060b641c8 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -38,7 +38,6 @@ from pymongo.write_concern import WriteConcern from test import unittest, client_context, IntegrationTest, SkipTest, client_knobs -from test.test_crud_v1 import check_result as crud_v1_check_result from test.utils import (rs_or_single_client, DeprecationFilter, OvertCommandListener, @@ -62,20 +61,15 @@ def get_scenario_coll_name(self, scenario_def): return scenario_def.get('collection_name', 'test') def run_test_ops(self, sessions, collection, test): + # Transform retryable writes spec format into transactions. + operation = test['operation'] outcome = test['outcome'] - should_fail = outcome.get('error') - result = None - error = None - try: - result = self.run_operation( - sessions, collection, test['operation']) - except (ConnectionFailure, OperationFailure) as exc: - error = exc - if should_fail: - self.assertIsNotNone(error, 'should have raised an error') - else: - self.assertIsNone(error) - crud_v1_check_result(self, outcome['result'], result) + if 'error' in outcome: + operation['error'] = outcome['error'] + if 'result' in outcome: + operation['result'] = outcome['result'] + test['operations'] = [operation] + super(TestAllScenarios, self).run_test_ops(sessions, collection, test) def create_test(scenario_def, test, name): From 594b211ff1d73e4c5c1f1631390c2bf605453a27 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 6 Oct 2020 11:01:11 -0700 Subject: [PATCH 0221/2111] PYTHON-2382 Destroy codec options struct in _cbson._element_to_dict (#496) --- bson/_cbsonmodule.c | 1 + 1 file changed, 1 insertion(+) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index f457f96b03..34f3ab6f67 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -2713,6 +2713,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { return NULL; } + destroy_codec_options(&options); return result_tuple; } From 337a08c43d832c6838f07a46d6fac8ebbdacb2b6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 6 Oct 2020 11:37:27 -0700 Subject: [PATCH 0222/2111] PYTHON-2360 Ensure ConnectionCreatedEvents are emitted before ConnectionReadyEvents (#493) Connections created in the background (for minPoolSize) are authenticated. --- pymongo/pool.py | 6 +++ test/cmap/connection-must-have-id.json | 18 ++++++--- test/cmap/connection-must-order-ids.json | 18 ++++++--- test/cmap/pool-checkin-destroy-closed.json | 9 +++-- test/cmap/pool-checkin-destroy-stale.json | 9 +++-- test/cmap/pool-checkin-make-available.json | 9 +++-- test/cmap/pool-checkin.json | 3 +- test/cmap/pool-checkout-connection.json | 20 +++++++--- test/cmap/pool-checkout-multiple.json | 9 +++-- test/cmap/pool-checkout-no-idle.json | 12 ++++-- test/cmap/pool-checkout-no-stale.json | 12 ++++-- test/cmap/pool-close-destroy-conns.json | 6 ++- test/cmap/pool-create-max-size.json | 45 ++++++++++++++-------- test/cmap/pool-create-min-size.json | 17 ++++++-- test/cmap/wait-queue-timeout.json | 15 +++++--- test/test_cmap.py | 23 ++--------- 16 files changed, 148 insertions(+), 83 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 92814e8570..9aed758456 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1193,6 +1193,12 @@ def connect(self, all_credentials=None): sock_info.ismaster(all_credentials) self.is_writable = sock_info.is_writable + try: + sock_info.check_auth(all_credentials) + except Exception: + sock_info.close_socket(ConnectionClosedReason.ERROR) + raise + return sock_info @contextlib.contextmanager diff --git a/test/cmap/connection-must-have-id.json b/test/cmap/connection-must-have-id.json index 487a5979d0..7ed6790228 100644 --- a/test/cmap/connection-must-have-id.json +++ b/test/cmap/connection-must-have-id.json @@ -12,26 +12,32 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/connection-must-order-ids.json b/test/cmap/connection-must-order-ids.json index dda515c1a9..9b839e8f06 100644 --- a/test/cmap/connection-must-order-ids.json +++ b/test/cmap/connection-must-order-ids.json @@ -12,26 +12,32 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 2 + "connectionId": 2, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkin-destroy-closed.json b/test/cmap/pool-checkin-destroy-closed.json index 3b6f1d2484..a73afbf752 100644 --- a/test/cmap/pool-checkin-destroy-closed.json +++ b/test/cmap/pool-checkin-destroy-closed.json @@ -18,7 +18,8 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolClosed", @@ -26,12 +27,14 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "poolClosed" + "reason": "poolClosed", + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkin-destroy-stale.json b/test/cmap/pool-checkin-destroy-stale.json index 7faa44d33c..600c052071 100644 --- a/test/cmap/pool-checkin-destroy-stale.json +++ b/test/cmap/pool-checkin-destroy-stale.json @@ -18,7 +18,8 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolCleared", @@ -26,12 +27,14 @@ }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale" + "reason": "stale", + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkin-make-available.json b/test/cmap/pool-checkin-make-available.json index 838194fe8e..015928c50d 100644 --- a/test/cmap/pool-checkin-make-available.json +++ b/test/cmap/pool-checkin-make-available.json @@ -18,15 +18,18 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkin.json b/test/cmap/pool-checkin.json index 5e93c207a9..7073895ad2 100644 --- a/test/cmap/pool-checkin.json +++ b/test/cmap/pool-checkin.json @@ -15,7 +15,8 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-connection.json b/test/cmap/pool-checkout-connection.json index e6e108ce58..4d39b15688 100644 --- a/test/cmap/pool-checkout-connection.json +++ b/test/cmap/pool-checkout-connection.json @@ -9,16 +9,26 @@ ], "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionReady", + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 } ], "ignore": [ - "ConnectionPoolCreated", - "ConnectionCreated", - "ConnectionReady" + "ConnectionPoolCreated" ] } diff --git a/test/cmap/pool-checkout-multiple.json b/test/cmap/pool-checkout-multiple.json index f3ecdb9be9..fee0d076cf 100644 --- a/test/cmap/pool-checkout-multiple.json +++ b/test/cmap/pool-checkout-multiple.json @@ -43,15 +43,18 @@ "events": [ { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-no-idle.json b/test/cmap/pool-checkout-no-idle.json index 77ce40deac..74325d655d 100644 --- a/test/cmap/pool-checkout-no-idle.json +++ b/test/cmap/pool-checkout-no-idle.json @@ -30,20 +30,24 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 1, - "reason": "idle" + "reason": "idle", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-no-stale.json b/test/cmap/pool-checkout-no-stale.json index e5ebedfbe5..67ee507fe8 100644 --- a/test/cmap/pool-checkout-no-stale.json +++ b/test/cmap/pool-checkout-no-stale.json @@ -26,11 +26,13 @@ }, { "type": "ConnectionCheckedOut", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 1 + "connectionId": 1, + "address": 42 }, { "type": "ConnectionPoolCleared", @@ -39,11 +41,13 @@ { "type": "ConnectionClosed", "connectionId": 1, - "reason": "stale" + "reason": "stale", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 2 + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-close-destroy-conns.json b/test/cmap/pool-close-destroy-conns.json index 2bc50419b4..e1fb9d0783 100644 --- a/test/cmap/pool-close-destroy-conns.json +++ b/test/cmap/pool-close-destroy-conns.json @@ -24,12 +24,14 @@ "events": [ { "type": "ConnectionCheckedIn", - "connectionId": 2 + "connectionId": 2, + "address": 42 }, { "type": "ConnectionClosed", "connectionId": 2, - "reason": "poolClosed" + "reason": "poolClosed", + "address": 42 }, { "type": "ConnectionPoolClosed", diff --git a/test/cmap/pool-create-max-size.json b/test/cmap/pool-create-max-size.json index 2ba7bdf62b..b585d0daec 100644 --- a/test/cmap/pool-create-max-size.json +++ b/test/cmap/pool-create-max-size.json @@ -53,59 +53,74 @@ "options": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-create-min-size.json b/test/cmap/pool-create-min-size.json index 470988043f..4fdc42f4eb 100644 --- a/test/cmap/pool-create-min-size.json +++ b/test/cmap/pool-create-min-size.json @@ -11,6 +11,11 @@ "event": "ConnectionCreated", "count": 3 }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 3 + }, { "name": "checkOut" } @@ -23,19 +28,23 @@ }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCreated", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/wait-queue-timeout.json b/test/cmap/wait-queue-timeout.json index 90ec2f62d9..ee7cf27955 100644 --- a/test/cmap/wait-queue-timeout.json +++ b/test/cmap/wait-queue-timeout.json @@ -39,22 +39,27 @@ }, "events": [ { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckedOut", - "connectionId": 42 + "connectionId": 42, + "address": 42 }, { - "type": "ConnectionCheckOutStarted" + "type": "ConnectionCheckOutStarted", + "address": 42 }, { "type": "ConnectionCheckOutFailed", - "reason": "timeout" + "reason": "timeout", + "address": 42 }, { "type": "ConnectionCheckedIn", - "connectionId": 42 + "connectionId": 42, + "address": 42 } ], "ignore": [ diff --git a/test/test_cmap.py b/test/test_cmap.py index 6cd0118d5d..bd22cdd729 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -23,6 +23,7 @@ sys.path[0:0] = [""] from pymongo.errors import (ConnectionFailure, + OperationFailure, PyMongoError) from pymongo.monitoring import (ConnectionCheckedInEvent, ConnectionCheckedOutEvent, @@ -331,27 +332,12 @@ def mock_connect(*args, **kwargs): def test_5_check_out_fails_auth_error(self): listener = CMAPListener() - client = single_client(event_listeners=[listener]) + client = single_client(username="notauser", password="fail", + event_listeners=[listener]) self.addCleanup(client.close) - pool = get_pool(client) - connect = pool.connect - - def mock_check_auth(self, *args, **kwargs): - self.close_socket(ConnectionClosedReason.ERROR) - raise ConnectionFailure('auth failed') - - def mock_connect(*args, **kwargs): - sock_info = connect(*args, **kwargs) - sock_info.check_auth = functools.partial(mock_check_auth, sock_info) - # Un-patch to break the cyclic reference. - self.addCleanup(delattr, sock_info, 'check_auth') - return sock_info - pool.connect = mock_connect - # Un-patch Pool.connect to break the cyclic reference. - self.addCleanup(delattr, pool, 'connect') # Attempt to create a new connection. - with self.assertRaisesRegex(ConnectionFailure, 'auth failed'): + with self.assertRaisesRegex(OperationFailure, 'failed'): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) @@ -362,7 +348,6 @@ def mock_connect(*args, **kwargs): self.assertIsInstance(listener.events[3], ConnectionClosedEvent) self.assertIsInstance(listener.events[4], ConnectionCheckOutFailedEvent) - self.assertIsInstance(listener.events[5], PoolClearedEvent) failed_event = listener.events[4] self.assertEqual( From d3e66a67021c3a587d125e46fb996c422164f2ba Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 8 Oct 2020 10:13:42 -0700 Subject: [PATCH 0223/2111] PYTHON-2308 Test that we exclusively depend on existence of logicalSessionsTimeoutMinutes for sessions support (#499) --- test/sessions/dirty-session-errors.json | 165 --------------------- test/sessions/server-support.json | 181 ++++++++++++++++++++++++ 2 files changed, 181 insertions(+), 165 deletions(-) create mode 100644 test/sessions/server-support.json diff --git a/test/sessions/dirty-session-errors.json b/test/sessions/dirty-session-errors.json index 9eccff0593..bdc67e457e 100644 --- a/test/sessions/dirty-session-errors.json +++ b/test/sessions/dirty-session-errors.json @@ -21,171 +21,6 @@ } ], "tests": [ - { - "description": "Clean explicit session is not discarded", - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0" - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - }, - "lsid": "session0" - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Clean implicit session is not discarded", - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, { "description": "Dirty explicit session is discarded", "clientOptions": { diff --git a/test/sessions/server-support.json b/test/sessions/server-support.json new file mode 100644 index 0000000000..967c9143fd --- /dev/null +++ b/test/sessions/server-support.json @@ -0,0 +1,181 @@ +{ + "runOn": [ + { + "minServerVersion": "3.6.0" + } + ], + "database_name": "session-tests", + "collection_name": "test", + "data": [ + { + "_id": 1 + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "result": { + "insertedId": 2 + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": -1 + } + }, + "result": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": "session0" + }, + "command_name": "insert", + "database_name": "session-tests" + } + }, + { + "command_started_event": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": "session0" + }, + "command_name": "find", + "database_name": "session-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "result": { + "insertedId": 2 + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": -1 + } + }, + "result": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true + }, + "command_name": "insert", + "database_name": "session-tests" + } + }, + { + "command_started_event": { + "command": { + "find": "test", + "filter": { + "_id": -1 + } + }, + "command_name": "find", + "database_name": "session-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + } + ] +} From 2818a32855a53799b58343bff0a46c5227057b19 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 12 Oct 2020 12:21:45 -0700 Subject: [PATCH 0224/2111] PYTHON-2392 Implicit sessions should always be discarded after connection errors (#498) PYTHON-2075 Add more sessions tests with more read and write commands --- pymongo/mongo_client.py | 17 +- test/sessions/dirty-session-errors.json | 315 +++++++++++++++++++++++- 2 files changed, 324 insertions(+), 8 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ef39cfc65a..4ed23855f3 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1827,17 +1827,20 @@ def _tmp_session(self, session, close=True): return s = self._ensure_session(session) - if s and close: - with s: - # Call end_session when we exit this scope. - yield s - elif s: + if s: try: - # Only call end_session on error. yield s - except Exception: + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. s.end_session() raise + finally: + # Call end_session when we exit this scope. + if close: + s.end_session() else: yield None diff --git a/test/sessions/dirty-session-errors.json b/test/sessions/dirty-session-errors.json index bdc67e457e..408904ac5f 100644 --- a/test/sessions/dirty-session-errors.json +++ b/test/sessions/dirty-session-errors.json @@ -192,6 +192,149 @@ } } }, + { + "description": "Dirty explicit session is discarded (non-bulk write)", + "clientOptions": { + "retryWrites": true + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1 + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": -1 + } + }, + "result": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "readConcern": null, + "writeConcern": null + }, + "command_name": "findAndModify", + "database_name": "session-tests" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "readConcern": null, + "writeConcern": null + }, + "command_name": "findAndModify", + "database_name": "session-tests" + } + }, + { + "command_started_event": { + "command": { + "find": "test", + "filter": { + "_id": -1 + } + }, + "command_name": "find", + "database_name": "session-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, { "description": "Dirty implicit session is discarded (write)", "clientOptions": { @@ -300,6 +443,128 @@ } } }, + { + "description": "Dirty implicit session is discarded (non-bulk write)", + "clientOptions": { + "retryWrites": true + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + }, + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1 + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": -1 + } + }, + "result": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "txnNumber": { + "$numberLong": "1" + }, + "readConcern": null, + "writeConcern": null + }, + "command_name": "findAndModify", + "database_name": "session-tests" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "txnNumber": { + "$numberLong": "1" + }, + "readConcern": null, + "writeConcern": null + }, + "command_name": "findAndModify", + "database_name": "session-tests" + } + }, + { + "command_started_event": { + "command": { + "find": "test", + "filter": { + "_id": -1 + } + }, + "command_name": "find", + "database_name": "session-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, { "description": "Dirty implicit session is discarded (read)", "failPoint": { @@ -353,6 +618,54 @@ ] } } + }, + { + "description": "Dirty implicit session is discarded (non-cursor returning read)", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + }, + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "error": true + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": -1 + } + }, + "result": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner" + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } } ] -} +} \ No newline at end of file From 1002938a8e9782e8a2f000aad6ca27079fcb49a8 Mon Sep 17 00:00:00 2001 From: Nick Loadholtes Date: Wed, 14 Oct 2020 12:48:06 -0400 Subject: [PATCH 0225/2111] Fix maxIdleTimeMS typo in docs (#503) --- doc/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/faq.rst b/doc/faq.rst index faa4d803ae..8e820229ab 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -77,7 +77,7 @@ network errors, causing the total number of sockets (both in use and idle) to drop below the minimum, more sockets are opened until the minimum is reached. The maximum number of milliseconds that a connection can remain idle in the -pool before being removed and replaced can be set with ``maxIdleTime``, which +pool before being removed and replaced can be set with ``maxIdleTimeMS``, which defaults to `None` (no limit). The default configuration for a :class:`~pymongo.mongo_client.MongoClient` From 87e1d4b967356f8240e8d5f0e95cb5da27980e96 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 14 Oct 2020 16:08:43 -0700 Subject: [PATCH 0226/2111] Revert "PYTHON-2362 Use dnspython<2.0 to avoid timeouts (#484)" (#501) This reverts commit c54974067772f268a9a9dc6dbba0a490e6b8e842. --- .evergreen/run-tests.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 58e3f42ac2..9987bd6945 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -201,12 +201,6 @@ if [ -n "$COVERAGE" -a $PYTHON_IMPL = "CPython" ]; then fi fi -if $PYTHON -c 'import dns'; then - # Trying with/without --user to avoid: - # ERROR: Can not perform a '--user' install. User site-packages are not visible in this virtualenv. - $PYTHON -m pip install --upgrade --user 'dnspython<2.0.0' || $PYTHON -m pip install --upgrade 'dnspython<2.0.0' -fi - $PYTHON setup.py clean if [ -z "$GREEN_FRAMEWORK" ]; then if [ -z "$C_EXTENSIONS" -a $PYTHON_IMPL = "CPython" ]; then From 7f1644c6db15622f1038a2a3ae389264f3a2e7ac Mon Sep 17 00:00:00 2001 From: Martin Uhrin Date: Sat, 17 Oct 2020 00:16:52 +0200 Subject: [PATCH 0227/2111] Added the mincePy ODM library to documented tools (#502) --- doc/tools.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/tools.rst b/doc/tools.rst index 9633bdc195..65b38c16a8 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -47,6 +47,15 @@ Humongolus possible. The code is available for download `at GitHub `_. Tutorials and usage examples are also available at GitHub. + +MincePy + `MincePy `_ is an + object-document mapper (ODM) designed to make any Python object storable + and queryable in a MongoDB database. It is designed with machine learning + and big-data computational and experimental science applications in mind + but is entirely general and can be useful to anyone looking to organise, + share, or process large amounts data with as little change to their current + workflow as possible. Ming `Ming `_ (the Merciless) is a @@ -71,7 +80,7 @@ MotorEngine It implements the same modeling APIs to be data-portable, meaning that a model defined in MongoEngine can be read in MotorEngine. The source is `available on GitHub `_. - + uMongo `uMongo `_ is a Python MongoDB ODM. Its inception comes from two needs: the lack of async ODM and the From b210bffc750ab1848f1db6f3f752526d91ee3fd3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 19 Oct 2020 15:22:28 -0700 Subject: [PATCH 0228/2111] PYTHON-2344 Update TLS examples to use unified TLS URI options (#504) --- doc/examples/authentication.rst | 33 ++++++++---------- doc/examples/tls.rst | 59 +++++++++++++++++---------------- 2 files changed, 44 insertions(+), 48 deletions(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index bf5fff1303..4989e6a91e 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -128,29 +128,26 @@ MONGODB-X509 The MONGODB-X509 mechanism authenticates a username derived from the distinguished subject name of the X.509 certificate presented by the driver -during SSL negotiation. This authentication method requires the use of SSL -connections with certificate validation and is available in MongoDB 2.6 -and newer:: +during TLS/SSL negotiation. This authentication method requires the use of +TLS/SSL connections with certificate validation and is available in +MongoDB 2.6 and newer:: - >>> import ssl >>> from pymongo import MongoClient >>> client = MongoClient('example.com', ... username="" ... authMechanism="MONGODB-X509", - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', + ... tlsCAFile='/path/to/ca.pem') MONGODB-X509 authenticates against the $external virtual database, so you do not have to specify a database in the URI:: >>> uri = "mongodb://@example.com/?authMechanism=MONGODB-X509" >>> client = MongoClient(uri, - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', + ... tlsCAFile='/path/to/ca.pem') >>> .. versionchanged:: 3.4 @@ -242,17 +239,15 @@ These examples use the $external virtual database for LDAP support:: >>> SASL PLAIN is a clear-text authentication mechanism. We **strongly** recommend -that you connect to MongoDB using SSL with certificate validation when using -the SASL PLAIN mechanism:: +that you connect to MongoDB using TLS/SSL with certificate validation when +using the SASL PLAIN mechanism:: - >>> import ssl >>> from pymongo import MongoClient >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" >>> client = MongoClient(uri, - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', - ... ssl_cert_reqs=ssl.CERT_REQUIRED, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', + ... tlsCAFile='/path/to/ca.pem') >>> .. _MONGODB-AWS: diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 133c4125de..780dec3938 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -77,14 +77,14 @@ Basic configuration ................... In many cases connecting to MongoDB over TLS/SSL requires nothing more than -passing ``ssl=True`` as a keyword argument to +passing ``tls=True`` as a keyword argument to :class:`~pymongo.mongo_client.MongoClient`:: - >>> client = pymongo.MongoClient('example.com', ssl=True) + >>> client = pymongo.MongoClient('example.com', tls=True) -Or passing ``ssl=true`` in the URI:: +Or passing ``tls=true`` in the URI:: - >>> client = pymongo.MongoClient('mongodb://example.com/?ssl=true') + >>> client = pymongo.MongoClient('mongodb://example.com/?tls=true') This configures PyMongo to connect to the server using TLS, verify the server's certificate and verify that the host you are attempting to connect to is listed @@ -94,17 +94,17 @@ Certificate verification policy ............................... By default, PyMongo is configured to require a certificate from the server when -TLS is enabled. This is configurable using the `ssl_cert_reqs` option. To -disable this requirement pass ``ssl.CERT_NONE`` as a keyword parameter:: +TLS is enabled. This is configurable using the ``tlsAllowInvalidCertificates`` +option. To disable this requirement pass ``tlsAllowInvalidCertificates=True`` +as a keyword parameter:: - >>> import ssl >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_cert_reqs=ssl.CERT_NONE) + ... tls=True, + ... tlsAllowInvalidCertificates=True) Or, in the URI:: - >>> uri = 'mongodb://example.com/?ssl=true&ssl_cert_reqs=CERT_NONE' + >>> uri = 'mongodb://example.com/?tls=true&tlsAllowInvalidCertificates=true' >>> client = pymongo.MongoClient(uri) Specifying a CA file @@ -113,32 +113,32 @@ Specifying a CA file In some cases you may want to configure PyMongo to use a specific set of CA certificates. This is most often the case when you are acting as your own certificate authority rather than using server certificates signed by a well -known authority. The `ssl_ca_certs` option takes a path to a CA file. It can be +known authority. The ``tlsCAFile`` option takes a path to a CA file. It can be passed as a keyword argument:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_ca_certs='/path/to/ca.pem') + ... tls=True, + ... tlsCAFile='/path/to/ca.pem') Or, in the URI:: - >>> uri = 'mongodb://example.com/?ssl=true&ssl_ca_certs=/path/to/ca.pem' + >>> uri = 'mongodb://example.com/?tls=true&tlsCAFile=/path/to/ca.pem' >>> client = pymongo.MongoClient(uri) Specifying a certificate revocation list ........................................ Python 2.7.9+ (pypy 2.5.1+) and 3.4+ provide support for certificate revocation -lists. The `ssl_crlfile` option takes a path to a CRL file. It can be passed as -a keyword argument:: +lists. The ``tlsCRLFile`` option takes a path to a CRL file. It can be passed +as a keyword argument:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_crlfile='/path/to/crl.pem') + ... tls=True, + ... tlsCRLFile='/path/to/crl.pem') Or, in the URI:: - >>> uri = 'mongodb://example.com/?ssl=true&ssl_crlfile=/path/to/crl.pem' + >>> uri = 'mongodb://example.com/?tls=true&tlsCRLFile=/path/to/crl.pem' >>> client = pymongo.MongoClient(uri) .. note:: Certificate revocation lists and :ref:`OCSP` cannot be used together. @@ -147,28 +147,29 @@ Client certificates ................... PyMongo can be configured to present a client certificate using the -`ssl_certfile` option:: +``tlsCertificateKeyFile`` option:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem') + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem') If the private key for the client certificate is stored in a separate file use -the `ssl_keyfile` option:: +the ``ssl_keyfile`` option:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', ... ssl_keyfile='/path/to/key.pem') Python 2.7.9+ (pypy 2.5.1+) and 3.3+ support providing a password or passphrase -to decrypt encrypted private keys. Use the `ssl_pem_passphrase` option:: +to decrypt encrypted private keys. Use the ``tlsCertificateKeyFilePassword`` +option:: >>> client = pymongo.MongoClient('example.com', - ... ssl=True, - ... ssl_certfile='/path/to/client.pem', + ... tls=True, + ... tlsCertificateKeyFile='/path/to/client.pem', ... ssl_keyfile='/path/to/key.pem', - ... ssl_pem_passphrase=) + ... tlsCertificateKeyFilePassword=) These options can also be passed as part of the MongoDB URI. From e340428f24954263f1914fe1f341a8ea9a9061e5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 21 Oct 2020 12:29:12 -0700 Subject: [PATCH 0229/2111] PYTHON-2402 Update suse12-test to suse12-sp5-small --- .evergreen/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a47c777121..1d89a83f7d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1588,7 +1588,7 @@ axes: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel72-zseries-test/master/latest/libmongocrypt.tar.gz - id: suse12-x86-64-test display_name: "SUSE 12 (x86_64)" - run_on: suse12-test + run_on: suse12-sp5-small batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/suse12-64/master/latest/libmongocrypt.tar.gz From c8be79f4a88c2d6d96c1098d92fee20973d28daf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 21 Oct 2020 13:27:00 -0700 Subject: [PATCH 0230/2111] PYTHON-2367 Add release automation for Python 3.9 (#505) PYTHON-2375 Reinstate macos system python workaround for missing wheel package Increase task timeout because the manylinux build task takes >30 minutes. --- .evergreen/build-mac.sh | 13 ++++++++++++- .evergreen/build-manylinux-internal.sh | 3 +-- .evergreen/build-manylinux.sh | 3 ++- .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 1 + 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index d68b54ffaa..b50afc1976 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -18,7 +18,18 @@ for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do fi rm -rf build - $PYTHON setup.py bdist_wheel + # Install wheel if not already there. + if ! $PYTHON -m wheel version; then + createvirtualenv $PYTHON releasevenv + WHEELPYTHON=python + pip install --upgrade wheel + else + WHEELPYTHON=$PYTHON + fi + + $WHEELPYTHON setup.py bdist_wheel + deactivate || true + rm -rf releasevenv # Test that each wheel is installable. for release in dist/*; do diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 4d330e4418..38deddd194 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,8 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - # Skip Python 3.3 and 3.9. - if [[ "$PYTHON" == *"cp33"* || "$PYTHON" == *"cp39"* ]]; then + if [[ ! $PYTHON =~ (cp27|cp34|cp35|cp36|cp37|cp38|cp39) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 1889754ae0..ba727654ec 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -27,7 +27,8 @@ unexpected=$(find dist \! \( -iname dist -or \ -iname '*cp35*' -or \ -iname '*cp36*' -or \ -iname '*cp37*' -or \ - -iname '*cp38*' \)) + -iname '*cp38*' -or \ + -iname '*cp39*' \)) if [ -n "$unexpected" ]; then echo "Unexpected files:" $unexpected exit 1 diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index b12b384d15..89235dd79a 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 27 34 35 36 37 38; do +for VERSION in 27 34 35 36 37 38 39; do _pythons=(C:/Python/Python${VERSION}/python.exe \ C:/Python/32/Python${VERSION}/python.exe) for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1d89a83f7d..12040c5b23 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -817,6 +817,7 @@ tasks: - name: "release" tags: ["release"] + exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). git_tag_only: true commands: - command: shell.exec From a7710210a7664b1313b6ae407bd3c5c011bf1851 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 28 Oct 2020 20:20:48 -0700 Subject: [PATCH 0231/2111] PYTHON-1960 Use a virtualenv to run encryption tests (#508) --- .evergreen/run-tests.sh | 28 ++++++++++++++++++++-------- .evergreen/utils.sh | 4 ++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9987bd6945..c1a7286437 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: @@ -47,6 +47,9 @@ if [ "$SSL" != "nossl" ]; then export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" fi +# For createvirtualenv. +. .evergreen/utils.sh + if [ -z "$PYTHON_BINARY" ]; then VIRTUALENV=$(command -v virtualenv) || true if [ -z "$VIRTUALENV" ]; then @@ -109,6 +112,7 @@ if [ -n "$TEST_PYOPENSSL" ]; then . pyopenssltest/bin/activate fi trap "deactivate; rm -rf pyopenssltest" EXIT HUP + PYTHON=python IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") if [ $IS_PYTHON_2 = "1" ]; then @@ -125,6 +129,14 @@ if [ -n "$TEST_PYOPENSSL" ]; then fi if [ -n "$TEST_ENCRYPTION" ]; then + createvirtualenv $PYTHON venv-encryption + trap "deactivate; rm -rf venv-encryption" EXIT HUP + PYTHON=python + + if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + $PYTHON -m pip install -U setuptools + fi + if [ -z "$LIBMONGOCRYPT_URL" ]; then echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" exit 1 @@ -152,13 +164,13 @@ if [ -n "$TEST_ENCRYPTION" ]; then exit 1 fi - git clone --branch master git@github.com:mongodb/libmongocrypt.git libmongocrypt_git - $PYTHON -m pip install --upgrade ./libmongocrypt_git/bindings/python - # TODO: use a virtualenv - trap "$PYTHON -m pip uninstall -y pymongocrypt" EXIT HUP - $PYTHON -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" - $PYTHON -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" - # PATH is set by PREPARE_SHELL. + # TODO: Test with 'pip install pymongocrypt' + git clone --branch master https://github.com/mongodb/libmongocrypt.git libmongocrypt_git + python -m pip install --upgrade ./libmongocrypt_git/bindings/python + python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" + python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" + # PATH is updated by PREPARE_SHELL for access to mongocryptd. + fi PYTHON_IMPL=$($PYTHON -c "import platform, sys; sys.stdout.write(platform.python_implementation())") diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index f8dd1a6184..9a92f15295 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -4,7 +4,7 @@ # createvirtualenv /path/to/python /output/path/for/venv # * param1: Python binary to use for the virtualenv # * param2: Path to the virtualenv to create -function createvirtualenv { +createvirtualenv () { PYTHON=$1 VENVPATH=$2 if $PYTHON -m virtualenv --version; then @@ -28,7 +28,7 @@ function createvirtualenv { # * param1: Python binary to test # * param2: Path to the wheel or egg file to install # * param3 (optional): If set to a non-empty string, don't create a virtualenv. Used in manylinux containers. -function testinstall { +testinstall () { PYTHON=$1 RELEASE=$2 NO_VIRTUALENV=$3 From e49c418264c1a4dde833e16b1cbb1503f24223ec Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 29 Oct 2020 13:44:04 -0700 Subject: [PATCH 0232/2111] PYTHON-2371 Add Azure and GCP support for CSFLE (#506) --- .evergreen/config.yml | 11 +- pymongo/encryption.py | 44 +++++- pymongo/encryption_options.py | 18 ++- .../custom/azure-dek.json | 33 +++++ .../custom/azure-gcp-schema.json | 32 +++++ .../custom/gcp-dek.json | 35 +++++ test/test_encryption.py | 130 ++++++++++++++++++ 7 files changed, 292 insertions(+), 11 deletions(-) create mode 100644 test/client-side-encryption/custom/azure-dek.json create mode 100644 test/client-side-encryption/custom/azure-gcp-schema.json create mode 100644 test/client-side-encryption/custom/gcp-dek.json diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 12040c5b23..c161aeaa21 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -357,9 +357,14 @@ functions: working_dir: "src" script: | if [ -n "${test_encryption}" ]; then - cat < fle_aws_creds.sh + cat < fle_creds.sh export FLE_AWS_KEY="${fle_aws_key}" export FLE_AWS_SECRET="${fle_aws_secret}" + export FLE_AZURE_CLIENTID="${fle_azure_clientid}" + export FLE_AZURE_TENANTID="${fle_azure_tenantid}" + export FLE_AZURE_CLIENTSECRET="${fle_azure_clientsecret}" + export FLE_GCP_EMAIL="${fle_gcp_email}" + export FLE_GCP_PRIVATEKEY="${fle_gcp_privatekey}" EOT fi - command: shell.exec @@ -381,8 +386,8 @@ functions: if [ -n "${test_encryption}" ]; then # Disable xtrace (just in case it was accidentally set). set +x - . ./fle_aws_creds.sh - rm -f ./fle_aws_creds.sh + . ./fle_creds.sh + rm -f ./fle_creds.sh export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 952470809d..f3aa2aa283 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -358,9 +358,21 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used to generate KMS messages. - - `local`: Map with "key" as a 96-byte array or string. "key" - is the master key used to encrypt/decrypt data keys. This key - should be generated and stored as securely as possible. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string (unicode on Python 2). + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string (unicode on Python 2) which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. - `key_vault_namespace`: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption @@ -409,8 +421,10 @@ def create_data_key(self, kms_provider, master_key=None, "aws" and "local". - `master_key`: Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is - not applicable and may be omitted. If the `kms_provider` is "aws" - it is required and has the following fields:: + not applicable and may be omitted. + + If the `kms_provider` is "aws" it is required and has the + following fields:: - `region` (string): Required. The AWS region, e.g. "us-east-1". - `key` (string): Required. The Amazon Resource Name (ARN) to @@ -419,6 +433,26 @@ def create_data_key(self, kms_provider, master_key=None, requests to. May include port number, e.g. "kms.us-east-1.amazonaws.com:443". + If the `kms_provider` is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + - `key_alt_names` (optional): An optional list of string alternate names used to reference a key. If a key is created with alternate names, then encryption may refer to the key by the unique alternate diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 3158b3e84d..e45a3f94d5 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -59,9 +59,21 @@ def __init__(self, kms_providers, key_vault_namespace, - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used to generate KMS messages. - - `local`: Map with "key" as a 96-byte array or string. "key" - is the master key used to encrypt/decrypt data keys. This key - should be generated and stored as securely as possible. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string (unicode on Python 2). + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string (unicode on Python 2) which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. - `key_vault_namespace`: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption diff --git a/test/client-side-encryption/custom/azure-dek.json b/test/client-side-encryption/custom/azure-dek.json new file mode 100644 index 0000000000..8e50eca340 --- /dev/null +++ b/test/client-side-encryption/custom/azure-dek.json @@ -0,0 +1,33 @@ +{ + "_id": { + "$binary": { + "base64": "As3URE1jRcyHOPjaLWHOXA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "df6fFLZqBsZSnQz2SnTYWNBtznIHktVSDMaidAdL7yVVgxBJQ0DyPZUR2HDQB4hdYym3w4C+VGqzcyTZNJOXn6nJzpGrGlIQMcjv93HE4sP2d245ShQCi1nTkLmMaXN63E2fzltOY3jW7ojf5Z4+r8kxmzyfymmSRgo0w8AF7lUWvFhnBYoE4tE322L31vtAK3Zj8pTPvw8/TcUdMSI9Y669IIzxbMy5yMPmdzpnb8nceUv6/CJoeiLhbt5GgaHqIAv7tHFOY8ZX8ztowMLa3GeAjd9clvzraDTqrfMFYco/kDKAW5iPQQ+Xuy1fP8tyFp0ZwaL/7Ed2sc819j8FTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-kevinalbs.vault.azure.net", + "keyName": "test-key" + } +} + diff --git a/test/client-side-encryption/custom/azure-gcp-schema.json b/test/client-side-encryption/custom/azure-gcp-schema.json new file mode 100644 index 0000000000..24cf682cd3 --- /dev/null +++ b/test/client-side-encryption/custom/azure-gcp-schema.json @@ -0,0 +1,32 @@ +{ + "db.coll": { + "bsonType": "object", + "properties": { + "secret_azure": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "As3URE1jRcyHOPjaLWHOXA==", + "subType": "04" + } + }], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + }, + "secret_gcp": { + "encrypt": { + "keyId": [{ + "$binary": { + "base64": "osU8SLxJRHONbl8Oh5o+eg==", + "subType": "04" + } + }], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + } +} + diff --git a/test/client-side-encryption/custom/gcp-dek.json b/test/client-side-encryption/custom/gcp-dek.json new file mode 100644 index 0000000000..14b895111f --- /dev/null +++ b/test/client-side-encryption/custom/gcp-dek.json @@ -0,0 +1,35 @@ +{ + "_id": { + "$binary": { + "base64": "osU8SLxJRHONbl8Oh5o+eg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAg4LDql74hjYPZ957Z7YpCrD6yTVVXKegflJDstQ/xngTyx0SiQEAkWNo/fjPj6jMNSvEop07/29Fu72QHFDRYM3e/KFHfnMQjKzfxb1yX1dC6MbO5FZG/UNBkXlJgPqbHNVuizea3QC24kV5iOiEb4nTM7+RW+8TfVb6QerWWe6MjC+kNpj4LMVcc1lFfVDeGgpJLyMLNGitrjR16qH8qQTNbGNy0toTL69JUmgS8Q==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "csfle-poc", + "location": "global", + "keyRing": "test", + "keyName": "quickstart" + } +} + diff --git a/test/test_encryption.py b/test/test_encryption.py index 0903eadca7..8066cb39a3 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -20,6 +20,7 @@ import traceback import socket import sys +import textwrap import uuid sys.path[0:0] = [""] @@ -30,6 +31,7 @@ STANDARD, UUID_SUBTYPE) from bson.codec_options import CodecOptions +from bson.py3compat import _unicode from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON @@ -52,6 +54,7 @@ from test.utils import (TestCreator, camel_to_snake_args, OvertCommandListener, + WhiteListEventListener, rs_or_single_client, wait_until) from test.utils_spec_runner import SpecRunner @@ -1105,5 +1108,132 @@ def test_05_endpoint_invalid_host(self): 'aws', master_key=master_key) +class AzureGCPEncryptionTestMixin(object): + DEK = None + KMS_PROVIDER_MAP = None + KEYVAULT_DB = 'keyvault' + KEYVAULT_COLL = 'datakeys' + + def setUp(self): + keyvault = self.client.get_database( + self.KEYVAULT_DB).get_collection( + self.KEYVAULT_COLL) + create_key_vault(keyvault, self.DEK) + + def _test_explicit(self, expectation): + client_encryption = ClientEncryption( + self.KMS_PROVIDER_MAP, + '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + client_context.client, + OPTS) + self.addCleanup(client_encryption.close) + + ciphertext = client_encryption.encrypt( + 'test', + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=Binary.from_uuid(self.DEK['_id'], STANDARD)) + + self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) + self.assertEqual(client_encryption.decrypt(ciphertext), 'test') + + def _test_automatic(self, expectation_extjson, payload): + encrypted_db = "db" + encrypted_coll = "coll" + keyvault_namespace = '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + + encryption_opts = AutoEncryptionOpts( + self.KMS_PROVIDER_MAP, + keyvault_namespace, + schema_map=self.SCHEMA_MAP) + + insert_listener = WhiteListEventListener('insert') + client = rs_or_single_client( + auto_encryption_opts=encryption_opts, + event_listeners=[insert_listener]) + self.addCleanup(client.close) + + coll = client.get_database(encrypted_db).get_collection( + encrypted_coll, codec_options=OPTS, + write_concern=WriteConcern("majority")) + coll.drop() + + expected_document = json_util.loads( + expectation_extjson, json_options=JSON_OPTS) + + coll.insert_one(payload) + event = insert_listener.results['started'][0] + inserted_doc = event.command['documents'][0] + + for key, value in expected_document.items(): + self.assertEqual(value, inserted_doc[key]) + + output_doc = coll.find_one({}) + for key, value in payload.items(): + self.assertEqual(output_doc[key], value) + + +AZURE_CREDS = { + 'tenantId': os.environ.get('FLE_AZURE_TENANTID', ''), + 'clientId': os.environ.get('FLE_AZURE_CLIENTID', ''), + 'clientSecret': os.environ.get('FLE_AZURE_CLIENTSECRET', '')} + + +class TestAzureEncryption(AzureGCPEncryptionTestMixin, + EncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(AZURE_CREDS.values()), + 'Azure environment credentials are not set') + def setUpClass(cls): + cls.KMS_PROVIDER_MAP = {'azure': AZURE_CREDS} + cls.DEK = json_data(BASE, 'custom', 'azure-dek.json') + cls.SCHEMA_MAP = json_data(BASE, 'custom', 'azure-gcp-schema.json') + super(TestAzureEncryption, cls).setUpClass() + + def test_explicit(self): + return self._test_explicit( + 'AQLN1ERNY0XMhzj42i1hzlwC8/OSU9bHfaQRmmRF5l7d5ZpqJX13qF5zSyExo8N9c1b6uS/LoKrHNzcEMKNrkpi3jf2HiShTFRF0xi8AOD9yfw==') + + def test_automatic(self): + expected_document_extjson = textwrap.dedent(""" + {"secret_azure": { + "$binary": { + "base64": "AQLN1ERNY0XMhzj42i1hzlwC8/OSU9bHfaQRmmRF5l7d5ZpqJX13qF5zSyExo8N9c1b6uS/LoKrHNzcEMKNrkpi3jf2HiShTFRF0xi8AOD9yfw==", + "subType": "06"} + }}""") + return self._test_automatic( + expected_document_extjson, {"secret_azure": "test"}) + + +GCP_CREDS = { + 'email': os.environ.get('FLE_GCP_EMAIL', ''), + 'privateKey': _unicode(os.environ.get('FLE_GCP_PRIVATEKEY', ''))} + + +class TestGCPEncryption(AzureGCPEncryptionTestMixin, + EncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(GCP_CREDS.values()), + 'GCP environment credentials are not set') + def setUpClass(cls): + cls.KMS_PROVIDER_MAP = {'gcp': GCP_CREDS} + cls.DEK = json_data(BASE, 'custom', 'gcp-dek.json') + cls.SCHEMA_MAP = json_data(BASE, 'custom', 'azure-gcp-schema.json') + super(TestGCPEncryption, cls).setUpClass() + + def test_explicit(self): + return self._test_explicit( + 'AaLFPEi8SURzjW5fDoeaPnoCGcOFAmFOPpn5584VPJJ8iXIgml3YDxMRZD9IWv5otyoft8fBzL1LsDEp0lTeB32cV1gOj0IYeAKHhGIleuHZtA==') + + def test_automatic(self): + expected_document_extjson = textwrap.dedent(""" + {"secret_gcp": { + "$binary": { + "base64": "AaLFPEi8SURzjW5fDoeaPnoCGcOFAmFOPpn5584VPJJ8iXIgml3YDxMRZD9IWv5otyoft8fBzL1LsDEp0lTeB32cV1gOj0IYeAKHhGIleuHZtA==", + "subType": "06"} + }}""") + return self._test_automatic( + expected_document_extjson, {"secret_gcp": "test"}) + + if __name__ == "__main__": unittest.main() From 1e2a52fe8bd590665cb9b495a285b7d283c42f45 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 4 Nov 2020 07:59:54 -0800 Subject: [PATCH 0233/2111] PYTHON-2404 Update CSFLE spec tests for KMS providers 'azure' and 'gcp' (#509) --- .../corpus/corpus-encrypted.json | 3662 ++++++++++++- .../corpus/corpus-key-azure.json | 33 + .../corpus/corpus-key-gcp.json | 35 + .../corpus/corpus-schema.json | 3254 +++++++++++- .../client-side-encryption/corpus/corpus.json | 4580 ++++++++++++++++- .../custom/azure-dek.json | 12 +- .../custom/azure-gcp-schema.json | 5 +- .../custom/gcp-dek.json | 14 +- .../client-side-encryption/spec/azureKMS.json | 222 + test/client-side-encryption/spec/gcpKMS.json | 224 + test/test_encryption.py | 402 +- 11 files changed, 11894 insertions(+), 549 deletions(-) create mode 100644 test/client-side-encryption/corpus/corpus-key-azure.json create mode 100644 test/client-side-encryption/corpus/corpus-key-gcp.json create mode 100644 test/client-side-encryption/spec/azureKMS.json create mode 100644 test/client-side-encryption/spec/gcpKMS.json diff --git a/test/client-side-encryption/corpus/corpus-encrypted.json b/test/client-side-encryption/corpus/corpus-encrypted.json index 998b058b0f..a11682688a 100644 --- a/test/client-side-encryption/corpus/corpus-encrypted.json +++ b/test/client-side-encryption/corpus/corpus-encrypted.json @@ -4021,5 +4021,3665 @@ "subType": "06" } } + }, + "azure_double_rand_auto_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAB0S2kOZe54q6iZqeTLndkX+kehTKtb30jTP7FS+Zx+cxhFs626OrGY+jrH41cLfroCccacyNHUZFRinfqZPNOyw==", + "subType": "06" + } + } + }, + "azure_double_rand_auto_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABYViH7PLjCIdmTibW9dGCJADwXx2dRSMYxEmulPu89clAoeLDa8pwJ7YxLFQCcTGmZRfmp58dDDAzV8tyyE8QMg==", + "subType": "06" + } + } + }, + "azure_double_rand_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABeRahSj4pniBp0rLIEZE8MdeyiIKcYuTZiuGzGiXbFbntEPow88DFHIBSxbMGR7p/8jCpPL+GqBwFkPkafXbMzg==", + "subType": "06" + } + } + }, + "azure_double_rand_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAABdaa3vKtO4cAEUjYJfOPl1KbbgeWtphfUuJd6MxR9VReNSf1jc+kONwmkPVQs2WyZ1n+TSQMGRoBp1nHRttDdTg==", + "subType": "06" + } + } + }, + "azure_double_det_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "azure_double_det_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "azure_string_rand_auto_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACeoztcDg9oZ7ixHinReWQTrAumpsfyb0E1s3BGOFHgBCi1tW79CEXfqN8riFRc1YeRTlN4k5ShgHaBWBlax+XoQ==", + "subType": "06" + } + } + }, + "azure_string_rand_auto_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACov9cXQvDHeKOS5Gxcxa8vdAcTsTXDYgUucGzsCyh4TnTWKGQEVk3DHndUXX569TKCjq5QsC//oWEwweCn1nZ4g==", + "subType": "06" + } + } + }, + "azure_string_rand_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACKU5qTdMdO0buQ/37ZRANUAAafcsoNMOTxJsDOfkqUb+/kRgM1ePlwVvk4EJiAGhJ/4SEmEOpwv05TT3PxGur2Q==", + "subType": "06" + } + } + }, + "azure_string_rand_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAACX/ODKGHUyAKxoJ/c/3lEDBTc+eP/VS8OHrLhYoP96McpnFSgYi5jfUwvrFYa715fkass4N0nAHE6TzoGTYyk6Q==", + "subType": "06" + } + } + }, + "azure_string_det_auto_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_string_det_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_string_det_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAACmVI7YK4JLOzutEdQ79he817Vk5EDP/3hXwOlGmERZCtp8J8HcqClhV+pyvRLGbwmlh12fbSs9nEp7mrobQm9wA==", + "subType": "06" + } + } + }, + "azure_object_rand_auto_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADWkZMsfCo4dOPMH1RXC7GkZFt1RCjJf0vaLDA09ih1Jl47SOetZELQ7B1TQjRQitktzrfD43jk8Fn4J5ZYZu1qQ==", + "subType": "06" + } + } + }, + "azure_object_rand_auto_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADJFMymfstltZP1oAqj4bgbCk8uLGtCd12eLqvSq0ZO+JDvls7PAovwmoWwigHunP8BBXT8sLydK+jn1sHfnhrlw==", + "subType": "06" + } + } + }, + "azure_object_rand_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADCen+XrLYKg7gIVubVfdbQwuJ0mFHxhSUUyyBWj4RCeLeLUYXckboPGixXWB9XdwcOnInfF9u6qvktY67GtYASQ==", + "subType": "06" + } + } + }, + "azure_object_rand_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAADnUyp/7eLmxxxOdsP+mNuJABK4PQoKFWDAY7lDrH6MYa03ryASOihPZWYZWXZLrbAf7cQQhElEkKqKwY8+NXgqg==", + "subType": "06" + } + } + }, + "azure_object_det_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_array_rand_auto_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEtk14WyoatZcNPlg3y/XJNsBt6neFJeQwR06B9rMGV58oIsmeE5zMtUOBYTgzlnwyKpqI/XVAg8s1VxvsrvGCyLVPwGVyDztwtMgVSW6QM3s=", + "subType": "06" + } + } + }, + "azure_array_rand_auto_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAERTO63J4Nj1BpFlqVduA2IrAiGoV4jEOH3FnFgx7ZP7da/YBmLX/bc1EqdpC8v4faHxp74iU0xAB0yW4WgySDX7rriL5cw9sMpqgLRaBxGug=", + "subType": "06" + } + } + }, + "azure_array_rand_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEs09qQdNVwh+KFqKPREQkw0XFdRNHAvjYJzs5MDE9+QxvtKlmVKSK3wkxDdCrcH4r7ePV2nCy2h1IHYqaDnnt4s5dSawI2l88iTT+bBcCSrU=", + "subType": "06" + } + } + }, + "azure_array_rand_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAEaQ/YL50up4YIMJuVJSiAP06IQ+YjdKLIfkN/prbOZMiXErcD1Vq1hwGhfGdpEsLVu8E7IhJb4wakVC/2dLZoRP95az6HqRRauNNZAIQMKfY=", + "subType": "06" + } + } + }, + "azure_array_det_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_binData=00_rand_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFl/leuLAHf1p6aRKHdFyN9FM6MW2XzBemql2xQgqkwJ6YOQXW6Pu/aI1scXVOrvrSu3+wBvByjHu++1AqFgzZRQ==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_auto_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAF4Nq/LwyufT/mx0LtFSkupNHTuyjbr4yUy1N5/37XhkpqZ1e4sWCHGNaTDEm5+cvdnbqZ/MMkBv855dc8N7vnGA==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFv1Kbv54uXJ76Ih63vtmszQtzkXqDlv8LDCFO3sjzu70+tgRXOhLm3J8uZpwoiNkgM6oNLn0en7tnEekYB9++CA==", + "subType": "06" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFgcYC1n7cGGXpv0qf1Kb8t9y/6kbhscGt2QJkQpAiqadFPPYDU/wwaKdDz94NpAHMZizUbhf9tvZ3UXl1bozhDA==", + "subType": "06" + } + } + }, + "azure_binData=00_det_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=00_det_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFvswfP3+jgia6rAyrypvbso3Xm4d7MEgJRUCWFYzA+9ov++vmeirgoTp/rFavTNOPb+61fvl1WKbVwrgODusaMg==", + "subType": "06" + } + } + }, + "azure_binData=04_rand_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFMzMC3BLn/zWE9dxpcD8G0h4aifSY0zSHS9xTVJXgq21s2WU++Ov2UvHatVozmtZltsUN9JvSWqOBQRkFsrXvI7bc4lYfOoOmfpTHFcRDA/c=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_auto_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFDlBN5hUTcjamOg/sgyeG0S52kphsjUgvlpuqHYz6VVdLtZ69cGHOVqqyml3x2rVqWUZJjd4ZodOhlwWq9p+i5IYNot2QaBvi8NZSaiThTc0=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFjvS2ozJuAL3rCvyBpraVtgL91OMdiskmgYnyfKlzd8EhYLd1cL4yxnTUjRXx+W+p8uN0/QZo+mynhcWnwcq83raY+I1HftSTx+S6rZ0qyDM=", + "subType": "06" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAFqUMd/I0yOdy5W4THvFc6yrgSzB6arkRs/06b0M9Ii+QtAY6vbz+/aJ0Iy3Jm8TahC1wOZVmTj5luQpr+PHZMCEAFadv+0K/Nsx6xVhAh9gg=", + "subType": "06" + } + } + }, + "azure_binData=04_det_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_binData=04_det_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAFmN+KMrERGmfmue8/hG4D+ZcGzxC2HntdYBLjEolzvS9FV5JH/adxyUAnMpyL8FNznARL51rbv/G1nXPn9mPabsQ4BtWEAQbHx9TiXd+xbB0=", + "subType": "06" + } + } + }, + "azure_undefined_rand_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_rand_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_objectId_rand_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAH3sYVJpCKi310YxndMwm5ltEbbiRO1RwZxxeEkzI8tptbNXC8t7RkrT8VSJZ43wbGYCiqH5RZy9v8pYwtUm4STw==", + "subType": "06" + } + } + }, + "azure_objectId_rand_auto_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHD7agzVEc0JwesHHhkpGYIDAHQ+3Hc691kqic6YmVvK2N45fD5aRKftaZNs5OxSj3tNHSo7lQ+DVtPj8uSSpsVg==", + "subType": "06" + } + } + }, + "azure_objectId_rand_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHEgKgy2mpMLpfeEWqbvQOaRZAy+cEGXGon3e53/JoH6dZneEyyt4ZrcrK6uRqyUPWX0q104JbCYxfbtHtdzWgPQ==", + "subType": "06" + } + } + }, + "azure_objectId_rand_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAHqSv6Nruw3TIi7y0FPRjSfnJmWSdv5XMhAtnHNkT8MVuHeM32ayo0yc8dTA1wlkRtAI5JrGxTfERCXYuCojvvXg==", + "subType": "06" + } + } + }, + "azure_objectId_det_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_objectId_det_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_objectId_det_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAHcPRjIOyLDUJCDcdWkUySKCFS2AFkIa1OQyQAfC3Zh5HwJ1O7j2o+iYKRerhbni8lBiZH7EUMm1JcxM99lLC5jQ==", + "subType": "06" + } + } + }, + "azure_bool_rand_auto_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIYVWPvzSmiCs9LwRlv/AoQWhaS5mzoKX4W26M5eg/gPjOZbEVYOV80pWMxCcZWRAyV/NDWDUmKtRQDMU9b8lCJw==", + "subType": "06" + } + } + }, + "azure_bool_rand_auto_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIsAB01Ugqtw4T9SkuJBQN1y/ewpRAyz0vjFPdKI+jmPMmaXpMlXDJU8ZbTKm/nh6sjJCFcY5oZJ83ylbp2gHc6w==", + "subType": "06" + } + } + }, + "azure_bool_rand_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIr8/qFd564X1mqHEhB0y7bzGFdrHuw+Gk45nXla3VvGHzeIJy6j2Wdl0uziWslMmBvNp8WweW+jQ6E2Fu7SiojQ==", + "subType": "06" + } + } + }, + "azure_bool_rand_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAIWsca5FAnS2zhHnmKmexvvXMTgsZZ7uAFHnjQassUcay6mvIWH4hOnGiRxt5Zm0wO4S6cZq+PZrmEH5/n9rJcJQ==", + "subType": "06" + } + } + }, + "azure_bool_det_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "azure_bool_det_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "azure_date_rand_auto_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJwKo7XW5daIFlwY1mDAnJdHlcUgF+74oViL28hQGhde63pkPyyS6lPkYrc1gcCK5DL7PwsSX4Vb9SsNAG9860xw==", + "subType": "06" + } + } + }, + "azure_date_rand_auto_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJYZdWIqvqTztGKJkSASMEOjyrUFKnYql8fMIEzfEZWx2BYsIkxxOUUUCASg/Jsn09fTLVQ7yLD+LwycuI2uaXsw==", + "subType": "06" + } + } + }, + "azure_date_rand_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJuWzKqi3KV8GbGGnT7i9N4BACUuNjt5AgKsjWIfrWRXK1+jRQFq0bYlVWaliT9CNIygL2aTF0H4eHl55PAI84MQ==", + "subType": "06" + } + } + }, + "azure_date_rand_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAJ5JTtTuP4zTnEbaVlS/W59SrZ08LOC4ZIl+h+H4RnfHUfBXDwUou+APolVaYko+VZMKecrikdPeewgzWaqazJ1g==", + "subType": "06" + } + } + }, + "azure_date_det_auto_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_date_det_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_date_det_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAJCREIp/SPolAZcVU1iOmaJaN2tFId5HhrjNmhp6xhA1AIPLnN+U7TAqesxFN7iebR9fXI5fZxYNgyWqQC1rqUJw==", + "subType": "06" + } + } + }, + "azure_null_rand_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_rand_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_regex_rand_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALsMm3W2ogEiI6m0l8dS5Xhqnw+vMBvN1EesOTqAZOk4tQleX6fWARwUUnjFxbuejU7ISb50fc/Ul+ntL9z/2nHQ==", + "subType": "06" + } + } + }, + "azure_regex_rand_auto_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALITQNQI0hfCeMTxH0Hce1Cf5tinQG+Bq8EolUACvxUUQcDqIXfFXn19tV/Qyj4lIdnnwh/18hiswgEpJRK7uLGw==", + "subType": "06" + } + } + }, + "azure_regex_rand_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAALw/1QI/bKeiGUrrtC+yXOTvxZ2mJjSelPPGOm1mge0ws8DsX0DPHmo6MjhnRO4u0c/LWiE3hwHG2rYjAFlFXZ5A==", + "subType": "06" + } + } + }, + "azure_regex_rand_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAL6Sl58UfFCHCZzWIB4r19/ZjeSRAoWeTFCFedKiwyR8/xnL+8jzXK/9+vTIspP6j35lFapr+f4iBNB9WjdpYNKA==", + "subType": "06" + } + } + }, + "azure_regex_det_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_regex_det_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_regex_det_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAALxshM91Tsql/8kPe3dC16oP36XSUIN6godiRVIJLJ+NAwYtEkThthQsln7CrkIxIx6npN6A/hw1CBJERS/cqWhw==", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAMaAd1v/XCYM2Kzi/f4utR6aHOFORmzZ17EepEjkn5IeKshktUpPWjI/dBwSunn5Qxx2zI3nm06c3SDvp6tw8qb7u4qXjLQYhlsQ0bHvvm+vE=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAM6VNjkN9bMIzfC7AX0ZhOEXPpyPE0nzYq3c5TNHrgeGWdZDR9GVdbO9t55zQrQJJ2Mmevh8c0WaAUV+YODv7ty6TDBsPbaKWWqMzu/v9RXHo=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAM66tywuMhwdyUjxfl7EOdKHNCLeIPnct3PgKrAKlOQFjiNQUIA2ShVy0qYpJcvvFsuQ5e8Bjr0IqeBc8mC7n4euRSM1UXpLqI5XHgXMMaYpI=", + "subType": "06" + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAMtPQEbZ4gWoSYjVZLd5X6j0XxutWY1Ecrys2ErKRgZaxP0uGe8uw0cnr2Z5PYylaYmsSicLwD1PwWY42PKmaGBDraHmdfqDOPvrNxhBrfU/E=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAMxUcVqq6RpAUCv08qGkmjuwVAIgLeYyh7xZnMeCYVGmhJKIP1Zdt1SvRGRV0jzwCQmXgxNd04adRwJnG/PRQIsL9aH3ilJgEnUbOo1nqR7yw=", + "subType": "06" + } + } + }, + "azure_javascript_rand_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANWXPb5z3a0S7F26vkmBF3fV+oXYUj15OEtnSlXlUrc+gbhbPDxSvCPnTBEy5sNu4ndkvEZZxYgZInkF2q4rhlfQ==", + "subType": "06" + } + } + }, + "azure_javascript_rand_auto_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANN4mcwLz/J4eOUknhVsy6kdF1ThDP8cx6dNpOwJWAiyPHEsn+i6JmMTlfQMBrUp9HB/u3R+jLO5yz4XgLUKE8Tw==", + "subType": "06" + } + } + }, + "azure_javascript_rand_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANJ+t5Z8hSQaoNzszzkWndAo4A0avDf9bKFa7euznz8ZYInnl9RUVqWMyxjSuIotAvTyYSJzxh+w2hKCgVf+MjEA==", + "subType": "06" + } + } + }, + "azure_javascript_rand_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAANRLOQFpmkEg/KdWMmaurkNtUhy45rgtoipc9kQz6olgDWiMim81XC0AW5cOvjbHXL3w7Du28Kwdsp4j0PTTXHUQ==", + "subType": "06" + } + } + }, + "azure_javascript_det_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_javascript_det_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_javascript_det_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAANUrNUS/7/dmKVWBd+2JKGEn1hxbFSyu3p5sDNatukG2m16t4WwxzmYAg8PuQbAxekprs7iaLA+7D2Kn3ZuMSQOw==", + "subType": "06" + } + } + }, + "azure_symbol_rand_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAORMcgtQSU+/2Qlq57neRrVuAFSeSwkqdo+z1fh6IKjyEzhCy+u5bTzSzTopyKJQTCUZA2mSpRezWkM87oiGfhMFkBRVreMcE62eH+BLlgUaM=", + "subType": "06" + } + } + }, + "azure_symbol_rand_auto_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAOIKlAw/A3nwHn0tO2cYtJx0azB8MGmXtt+bRptzn8yHlUSpMpYaiU0ssBBiLkmMLAITYebLqDk3NHESyP7PvbSfX1E2XVn2Nf694ZqPWMec8=", + "subType": "06" + } + } + }, + "azure_symbol_rand_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAO8SXW76AEr/6D6zyP1RYwmwdVM2AINaXZn3Ipy+fynWTUV6XIPIRR7xMTttNo2zlh7fgXDZ28PmjooGlQzn0q0JVQmXPCIPM3aqAmMcgyuqg=", + "subType": "06" + } + } + }, + "azure_symbol_rand_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAOtoJWm2Ucre0foHIiOutsX1WIyub7t3Lby3/F8zRXn+l6ixlTjAPgWFwpRnYg96Lt2ACDDQ9CO51ejr9qk0b8LDBwG3qU5Cuibsp7vo1VsdI=", + "subType": "06" + } + } + }, + "azure_symbol_det_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_symbol_det_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_symbol_det_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAOvp/FMMmWVMkiuN51uFMFBiRQAcc9jftlNsHsLoNtohZaGni26kgX94b+/EI8pdWF5xA/73JlGlij0Rt+vC9s/zTDItRpn0bJL54WPphDcmA=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPCw9NnvJyuTYIgZxr1w1UiG85PGZ4rO62DWWDF98HwVM/Y6u7hNdNjkaWjYFsPMl38ioHw/pS8GFR62QmH2RAw/BV0wI7pNy2evANr3i3gKg=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPXQzqnQ2UWkIYof8/OfadNMa7iVKAbOaiu7YGm8iVrx+W6uxKLPFugVqHtQ29hYXXf33xr8rqGNxDlAe7/x1OeYEif71f7LUkmKF9WxJV9Ko=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAP0nxlppgPyjLx0eBempbOlL21G6KbABSrE6+YuNDcsjJjxCQuLR9+aoAwa+yCDEC7GZ1E3oP489edKUuNpE4Ts26jy4aRegu4DmyECUeBwAg=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAPO89afu9Sb+cK9wwM1cO1DPjvu5UNyObjjTScy1hy9PzllJGfj7b84f0Ah74jPYsMPwI0Eslu/IYF3+5jmquq5Qp/VUQESlxqRqRK0xIeMfs=", + "subType": "06" + } + } + }, + "azure_javascriptWithScope_det_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_int_rand_auto_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQUyy4uWmWdzypsK81q9egREg4s80X3L2hzxJzC+fL08Xzy1z9grpPPCfJrluUVKMMGmmZR8gJPJ70igN3unJbzg==", + "subType": "06" + } + } + }, + "azure_int_rand_auto_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQr4gyoHKpGsSJo8CMsYSJk/KilFMJhsDCmxrha7yfNW1uR5sjyZj4B4s6uTXGw76x7aR/AvecDlY3QFJb8L1mjg==", + "subType": "06" + } + } + }, + "azure_int_rand_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQ0zgXYPV1MuEFksmDpVDoWkoZQelm3+rYrMiT64KYywO//75799W8TbR3a7O6Q/ErjKQOin2OCp8EWwZqTDdz5w==", + "subType": "06" + } + } + }, + "azure_int_rand_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAQG+qz00yizREbP3tla1elMiwf8TKLbUU2XWUP+E0vey/wvbjTTIzqwUlz/b9St77CHJhavypP3hMrngXR9GapbQ==", + "subType": "06" + } + } + }, + "azure_int_det_auto_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_int_det_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_int_det_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAQCkJH+CataLqp/xBjO77QBprC2xPV+rE+goSZ3C6aqwXIeTYHTOqEbeaFb5iZcqYH5nWvNvnfbZSIMyvSfrPjhw==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARwcXYtx+A7g/zGkjGdkyVxZGCO9Nzj3D70NIpl2TeH2j9qYGP4DenwL1xSgrL2Ez+X58d2BvNhKrjA9y2w1Z8kA==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_auto_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARQ0Pjx3l92Aqhn2e1hot2M9rQ6aLPE2Iw8AVhm5AD8FWywWih12Fn2p9+kiE33yKPOCyrTWQHKPtB4yYhqnJgGg==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAARvFMlIzh2IjpHkTJ8buqTOqBA0+CxVDsZacUhSHVMgJLN+0DJsJy8OfkmKMu9Lk5hULY00Udoja87x+79mYfmeQ==", + "subType": "06" + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAAR+2SCd7V5ukAkh7CYpNPIatzTL8osNoA4Mb5jjjbos8eMamImw0fbH8YA+Rdm4CgGdQQ9VDX7MtMWlArkj0Jpew==", + "subType": "06" + } + } + }, + "azure_timestamp_det_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_timestamp_det_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_timestamp_det_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAARe72T/oC09QGE1vuljb6ZEHa6llEwMLT+C4s9u1fREkOKndpmrOlGE8zOey4teizY1ypOMkIZ8GDQJJ4kLSpNkQ==", + "subType": "06" + } + } + }, + "azure_long_rand_auto_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASSSgX7k8iw0xFe0AiIzOu0e0P7Ujyfsk/Cdl0fR5X8V3QLVER+1Qa47Qpb8iWL2VLBSh+55HvIEtvhWn8SwXaog==", + "subType": "06" + } + } + }, + "azure_long_rand_auto_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASUhKr5K7ulGTeFbhIvJ2DDE10gRAFn5+2zqnsIFSY8lYV2PBYcENdeNBXZs6kyIAYhJdQyuOChVCerTI5jmQWDw==", + "subType": "06" + } + } + }, + "azure_long_rand_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASHxawpjTHdXYRWQSZ7Qi7gFC+o4dW2mPH8s5nQkPFY/EubcJbdAZ5HFp66NfPaDJ/NSH6Vy+TkpX3683RC+bjSQ==", + "subType": "06" + } + } + }, + "azure_long_rand_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAASVaMAv6UjuBOUZMJ9qz+58TQWmgaMpS9xrJziJY80ml9aRlDTtRubP7U40CgbDvrtY1QgHbkF/di1XDCB6iXMMg==", + "subType": "06" + } + } + }, + "azure_long_det_auto_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_long_det_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_long_det_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQGVERAAAAAAAAAAAAAAAAAS06L8oEPeMvVlA32VlobdOWG24OoyMbv9PyYsHLsbT0bHFwU7lYUSQG9EkYVRNPEDzvXpciE1jT7KT8CRY8XT/g==", + "subType": "06" + } + } + }, + "azure_decimal_rand_auto_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATJ6LZgPu9F+rPtYsMuvwOx62+g1dAk858BUtE9FjC/300DnbDiolhkHNcyoFs07NYUNgLthW2rISb/ejmsDCt/oqnf8zWYf9vrJEfHaS/Ocw=", + "subType": "06" + } + } + }, + "azure_decimal_rand_auto_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATX8eD6qFYWKwIGvXtQG79fXKuPW9hkIV0OwrmNNIqRltw6gPHl+/1X8Q6rgmjCxqvhB05AxTj7xz64gP+ILkPQY8e8VGuCOvOdwDo2IPwy18=", + "subType": "06" + } + } + }, + "azure_decimal_rand_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATBjQ9E5wDdTS/iI1XDqGmDBC5aLbPB4nSyrjRLfv1zEoPRjmcHlQmMRJA0mori2VQv6EBFNHeczFCenJaSAkuh77czeXM2vH3T6qwEIDs4dw=", + "subType": "06" + } + } + }, + "azure_decimal_rand_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AgGVERAAAAAAAAAAAAAAAAATtkjbhdve7MNuLaTm6qvaewuVUxeC1DMz1fd4RC4jeiBFMd5uZUVJTiOIerwQ6P5G5lkMlezKDWgKl2FUvZH6c7V3JknhsaWcV5iLWGUL6Zc=", + "subType": "06" + } + } + }, + "azure_decimal_det_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_minKey_rand_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_rand_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_maxKey_rand_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_rand_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_double_rand_auto_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABFoHQxnh1XSC0k1B01uFFg7rE9sZVBn4PXo26JX8gx9tuxu+4l9Avb23H9BfOzuWiEc43iw87K/W2y0VfKp5CCg==", + "subType": "06" + } + } + }, + "gcp_double_rand_auto_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABRkZkEtQEFB/r268cNfYRQbN4u5Cxjl9Uh+8wq9TFWLQH2E/9wj2vTLlxQ2cQsM7Qd+XxR5idjfBf9CKAfvUa/A==", + "subType": "06" + } + } + }, + "gcp_double_rand_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABDSUZ+0BbDDEZxCXA+J2T6Js8Uor2dfXSf7s/hpLrg6dxcW2chpht9XLiLOXG5w83TzCAI5pF8cQgBpBpYjR8RQ==", + "subType": "06" + } + } + }, + "gcp_double_rand_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAABCYxugs7L+4S+1rr0VILSbtBm79JPTLuzluQAv0+8hbu5Z6zReOL6Ta1vQH1oA+pSPGYA4euye3zNl1X6ZewbPw==", + "subType": "06" + } + } + }, + "gcp_double_det_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "gcp_double_det_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "gcp_string_rand_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACx3wSslJEiD80YLTH0n4Bbs4yWVPQl15AU8pZMLLQePqEtI+BJy3t2bqNP1098jS0CGSf+LQmQvXhJn1aNFeMTw==", + "subType": "06" + } + } + }, + "gcp_string_rand_auto_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAC5BTe5KP5UxSIk6dJlkz8aaZ/9fg44XPWHafiiL/48lcv3AWbu2gcBo1EDuc1sJQu6XMrtDCRQ7PCHsL7sEQMGQ==", + "subType": "06" + } + } + }, + "gcp_string_rand_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACyJN55OcyXXJ71x8VphTaIuIg6kQtGgVKPhWx0LSdYc6JOjB6LTdA7SEWiSlSWWFZE26UmKcPbkbLDAYf4IVrzQ==", + "subType": "06" + } + } + }, + "gcp_string_rand_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAACoa0d9gqfPP5s3+GoruwzxoQFgli8SmjpTVRLAOcFxqGdfrwSbpYffSw/OR45sZPxXCL6T2MtUvZsl7ukv0jBnw==", + "subType": "06" + } + } + }, + "gcp_string_det_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_string_det_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_string_det_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAACTCkyETcWayIZ9YEoQEBVIF3i7iXEe6M3KjYYaSVCYdqSbSHBzlwKWYbP+Xj/MMYBYTLZ1aiRQWCMK4gWPYppZw==", + "subType": "06" + } + } + }, + "gcp_object_rand_auto_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADy+8fkyeNYdIK001YogXfKc25zRXS1VGIFVWR6jRfrexy9C8LBBfX3iDwGNPbP2pkC3Tq16OoziQB6iNGf7s7yg==", + "subType": "06" + } + } + }, + "gcp_object_rand_auto_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADixoDdvm57gH8ooOaKI57WyZD5uaPmuYgmrgAFuV8I+oaalqYctnNSYlzQKCMQX/mIcTxvW3oOWY7+IzAz7npvw==", + "subType": "06" + } + } + }, + "gcp_object_rand_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADvq0OAoijgHaVMhsoNMdfWFLyISDo6Y13sYM0CoBXS/oXJNIJJvhgKPbFSV/h4IgiDLy4qNYOTJQvpqt094RPgQ==", + "subType": "06" + } + } + }, + "gcp_object_rand_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAADuTZF7/uqGjFbjzBYspPkxGWvvVAEN/ib8bfPOQrEobtTWuU+ju9H3TlT9DMuFy7RdUZnPB0D3HkM8+zky5xeBw==", + "subType": "06" + } + } + }, + "gcp_object_det_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_array_rand_auto_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAE085kJIBX6S93D94bcRjkOegEKsksi2R1cxoVDoOpSdHh3S6bZAOh50W405wvnOKf3KTP9SICDUehQKQZSC026Y5dwVQ2GiM7PtpSedthKJs=", + "subType": "06" + } + } + }, + "gcp_array_rand_auto_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAEk/FAXsaqyVr6I+MY5L0axeLhskcEfLZeB8whLMKbjLDLa8Iep+IdrFVSfKo03Zr/7Ah8Js01aT6+Vt4EDMJK0mGKZJOjsrAf3b6RS+Mzebg=", + "subType": "06" + } + } + }, + "gcp_array_rand_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAEDY7J9JGiurctYr7ytakNjcryVm42fkubcVpQpUYEkpK/G9NLGjrJuFgNW5ZVjYiPKEBbDB7vEtJqGux0BU++hrvVHNJ3wUT2mbDE18NE4KE=", + "subType": "06" + } + } + }, + "gcp_array_rand_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAErFFlw8W9J2y+751RnYLw0TSK9ThD6sP3i4zPbZtiuhc90RFoJhScvqM9i4sDKuYePZZRLBxdX4EZhZClOmswCGDLCIWsQlSvCwgDcIsRR/w=", + "subType": "06" + } + } + }, + "gcp_array_det_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_binData=00_rand_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF0R5BNkQKfm6wx/tob8nVGDEYV/pvy9UeCqc9gFNuB5d9KxCkgyxryV65rbB90OriqvWFO2jcxzchRYgRI3fQ+A==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF4wcT8XGc3xNdKYDX5/cbUwPDdnkIXlWWCCYeSXSk2oWPxMZnPsVQ44nXKJJsKitoE3r/hL1sSG5239WzCWyx9g==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAF07OFs5mlx0AB6QBanaybLuhuFbG+19KxSqHlSgELcz6TQKI6equX97OZdaWSWf2SSeiYm5E6+Y3lgA5l4KxC2A==", + "subType": "06" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFZ74Q7JMm7y2i3wRmjIRKefhmdnrhP1NXJgploi+44eQ2eRraZsW7peGPYyIfsXEbhgV5+aLmiYgvemBywfdogQ==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFhwJkocj36WXoY3mg2GWUrJ5IQTo9MvkwEwRFKdkcxm9pX2PZPK7bN5ZWw3IFcQ/0GfaW6V4LYr8WarZdLF0p5g==", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFmDO47RTVXzm8D4hfhLICILrQJg3yOwG3HYfCdz7yaanPow2Y6bMxvXxk+kDS29aS8pJKDqJQQoMGc1ZFD3yYKsLQHRi/8rW6TNDQd4sCQ00=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFpiu9Q3LTuPmgdWBqo5Kw0vGF9xU1rMyE4xwR8GccZ7ZMrUcR4AnZnAP7ah5Oz8e7qonNYX4d09obesYSLlIjyK7J7qg+GWiEURgbvmOngaA=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFHRy8dveGuMng9WMmadIp39jD7iEfl3bEjKmzyNoAc0wIcSJZo9kdGbNEwZ4p+A1gz273fmAt/AJwAxwvqdlanLWBr4wiSKz1Mu9VaBcTlyY=", + "subType": "06" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAFiqO+sKodqXuVox0zTbKuY4Ng0QE1If2hDLWXljAEZdYABPk20UJyL/CHR49WP2Cwvi4evJCf8sEfKpR+ugPiyxWzP3iVe6qqTzP93BBjqoc=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAFEp5Gut6iENHUqDMVdBm4cxQy35gnslTf7vSWW9InFh323BvaTTiubxbxTiMKIa/u47MfMprL9HNQSwgpAQc4lped+YnlRW8RYvTcG4frFtA=", + "subType": "06" + } + } + }, + "gcp_undefined_rand_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_rand_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_objectId_rand_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAH8Kt6coc8bPI4QIwS1tIdk6pPA05xlZvrOyAQgvoqaozMtWzG15OunQLDdS3yJ5WRiV7kO6CIKqRrvL2RykB5sw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_auto_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHU5Yzmz2mbgNQrGSvglgVuv14nQWzipBkZUVSO4eYZ7wLrj/9t0fnizsu7Isgg5oA9fV0Snh/A9pDnHZWoccXUw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHsdq5/FLqbjMDiNzf+6k9yxUtFVjS/xSqErqaboOl21934pAzgkOzBGodpKKFuK0Ta4f3h21XS+84wlIYPMlTtw==", + "subType": "06" + } + } + }, + "gcp_objectId_rand_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAHokIdXxNQ/NBMdMAVNxyVuz/J5pMMdtfxxJxr7PbsRJ3FoD2QNjTgE1Wsz0G4o09Wv9UWD+/mIqPVlLgx1sRtPw==", + "subType": "06" + } + } + }, + "gcp_objectId_det_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_objectId_det_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_objectId_det_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAHkcbaj3Hy3b4HkjRkMgiw5h6jBW7Sc56QSJmAPmVSc2T4B8d79A49dW0RyEiInZJcnVRjrYzUTRtgRaG4/FRd8g==", + "subType": "06" + } + } + }, + "gcp_bool_rand_auto_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIf7vUYS5XFrEU4g03lzj9dk8a2MkaQdlH8nE/507D2Gm5XKQLi2jCENZ9UaQm3MQtVr4Uqrgz2GZiQHt9mXcG3w==", + "subType": "06" + } + } + }, + "gcp_bool_rand_auto_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIdOC4Tx/TaVLRtOL/Qh8RUFIzHFB6nSegZoITwZeDethd8V3+R+aIAgzfN3pvmZzagHyVCm2nbNYJNdjOJhuDrg==", + "subType": "06" + } + } + }, + "gcp_bool_rand_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIzB14mX2vaZdiW9kGc+wYEgTCXA0FB5AVEyuERD00+K7U5Otlc6ZUwMtb9nGUu+M7PnnfxiDFHCrUWrTkAZzSUw==", + "subType": "06" + } + } + }, + "gcp_bool_rand_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAIhRLg79ACCMfeERBgG1wirirrZXZzbK11RxHkAbf14Fji2L3sdMBdLBU5I028+rmtDdC7khcNMt11V6XGKpAjnA==", + "subType": "06" + } + } + }, + "gcp_bool_det_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "gcp_bool_det_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "gcp_date_rand_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJL+mjI8xBmSahOOi3XkGRGxjhGNdJb445KZtRAaUdCV0vMKbrefuiDHJDPCYo7mLYNhRSIhQfs63IFYMrlKP26A==", + "subType": "06" + } + } + }, + "gcp_date_rand_auto_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJbeyqO5FRmqvPYyOb0tdKtK6JOg8QKbCl37/iFeEm7N0T0Pjb8Io4U0ndB3O6fjokc3kDQrZcQkV+OFWIMuKFjw==", + "subType": "06" + } + } + }, + "gcp_date_rand_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJVz3rSYIcoYtM0tZ8pB2Ytgh8RvYPeZvW7aUVJfZkZlIhfUHOHEf5kHqxzt8E1l2n3lmK/7ZVCFUuCCmr8cZyWw==", + "subType": "06" + } + } + }, + "gcp_date_rand_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAJAiQqNyUcpuDEpFt7skp2NSHFCux2XObrIIFgXReYgtWoapL/n4zksJXl89PGavzNPBZbzgEa8uwwAe+S+Y6TLg==", + "subType": "06" + } + } + }, + "gcp_date_det_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_date_det_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_date_det_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAJmATV2A1P5DmrS8uES6AMD9y+EU3x7u4K4J0p296iSkCEgIdZZORhPIEnuJK3FHw1II6IEShW2nd7sOJRZSGKcg==", + "subType": "06" + } + } + }, + "gcp_null_rand_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_rand_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_regex_rand_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALiebb3hWwJRqlgVEhLYKKvo6cnlU7BFnZnvlZ8GuIr11fUvcnS9Tg2m7vPmfL7WVyuNrXlR48x28Es49YuaxuIg==", + "subType": "06" + } + } + }, + "gcp_regex_rand_auto_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALouDFNLVgBXqhJvBRj9DKacuD1AQ2NAVDW93P9NpZDFFwGOFxmKUcklbPj8KkHqvma8ovVUBTLLUDR+tKFRvC2Q==", + "subType": "06" + } + } + }, + "gcp_regex_rand_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALtdcT9+3R1he4eniT+1opqs/YtujFlqzBXssv+hCKhJQVY/IXde32nNpQ1WTgUc7jfIJl/v9HvuA9cDHPtDWWTg==", + "subType": "06" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAALAwlRAlj4Zpn+wu9eOcs5CsNgrkVwrgmu1tc4wyQp0Lt+3UcplYsXQMrMPcTx3yB0JcI4Kh65n/DrAaA+G/a6iw==", + "subType": "06" + } + } + }, + "gcp_regex_det_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_regex_det_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_regex_det_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAALbCutQ7D94gk0djewcQiEdMFVVa21+Dn5enQf/mqPi3o7vPy7OejDBk9fiZRffsioRMhlx2cxqa8T3+AkeN96yg==", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMG8P+Y2YNIgknxE0/yPDCHASBvCU1IJwsEyaJPuOjn03enxEN7z/wbjVMN0lGUptDP3SVL+OIZtQ35VRP84MtnbdhcfZWqMhLjzrCjmtHUEg=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMKCLFUN6ApB5fSVEWazRddhKTEwgqI/mxfe0BBxht69pZQYhTjhOJP0YcIrtr+RCeHOa4FIJgQod1CFOellIzO5YH5CuV4wPxCAlOdbJcBK8=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAM7ULEA6uKKv4Pu4Sa3aAt7dXtEwfQC98aJoLBapHT+xXtn5GWPynOZQNtV3lGaYExQjiGdYbzOcav3SVy/sYTe3ktgkQnuZfe0tk0zyvKIMM=", + "subType": "06" + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAMoMveHO1MadAKuT498xiKWWBUKRbH7k7P2YETDg/BufVw0swos07rk6WJa1vqyF61QEmACjy4pmlK/5P0VfKJBAIvif51YqHPQkobJVS3nVA=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAMz+9m1bE+Th9YeyPmJdtJPO0F5QYsGYtU/Eom/LSoYjDmTmV2ehkKx/cevIxJfZUc+Mvv/uGoeuubGl8tiX4l+f6yLrSIS6QBtIHYKXk+JNE=", + "subType": "06" + } + } + }, + "gcp_javascript_rand_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANqBD0ITMn4BaFnDp7BX7vXbRBkFwmjQRVUeBbwsQtv5WVlJMAd/2+w7tyH8Wc44x0/9U/DA5GVhpTrtdDyPBI3w==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_auto_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANtA0q4mbkAaKX4x1xk0/094Mln0wnh2bYnI6s6dh+l2WLDH7A9JMZxCl6kc4uOsEfbOvjP/PLIYtdMGs14EjM5A==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANfrW3pmeiFdBFt5tJS6Auq9Wo/J4r/vMRiueLWxig5S1zYuf9kFPJMK/nN9HqQPIcBIJIC2i/uEPgeepaNXACCw==", + "subType": "06" + } + } + }, + "gcp_javascript_rand_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAANL7UZNzpwfwhRn/HflWIE9CSxGYNwLSo9d86HsOJ42rrZKq6HQqm/hiEAg0lyqCxVIVFxYEc2BUWSaq4/+SSyZw==", + "subType": "06" + } + } + }, + "gcp_javascript_det_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_javascript_det_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_javascript_det_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAANB2d97R8nUJqnG0JPsWzyFe5pct5jvUljdkPnlZvLN1ZH+wSu4WmLfjri6IzzYP//f8tywn4Il+R4lZ0Kr/RAeA==", + "subType": "06" + } + } + }, + "gcp_symbol_rand_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOsGdnr6EKcBdOAvYrP0o1pWbhhJbYsqfVwwwS1zq6ZkBayOss2J3TuYwBGXhJFlq3iIiWLdxGQ883XIvuAECnqUNuvpK2rOLwtDg8xJLiH24=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_auto_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOpfa6CUSnJBvnWdd7pSZ2pXAbYm68Yka6xa/fuyhVx/Tc926/JpqmOmQtXqbOj8dZra0rQ3/yxHySwgD7s9Qr+xvyL7LvAguGkGmEV5H4Xz4=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAO085iqYGFdtjiFWHcNqE0HuKMNHmk49DVh+pX8Pb4p3ehB57JL1nRqaXqHPqhFenxSEInT/te9HQRr+ADcHADvUGsScfm/n85v85nq6X+5y4=", + "subType": "06" + } + } + }, + "gcp_symbol_rand_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAOiidb+2TsbAb2wc7MtDzb/UYsjgVNSw410Sz9pm+Uy7aZROE5SURKXdLjrCH2ZM2a+XCAl3o9yAoNgmAjEvYVxjmyzLK00EVjT42MBOrdA+k=", + "subType": "06" + } + } + }, + "gcp_symbol_det_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_symbol_det_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_symbol_det_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAOFBGo77joqvZl7QQMB9ebMsAI3uro8ILQTJsTUgAqNzSh1mNzqihGHZYe84xtgMrVxNuwcjkidkRbNnLXWLuarOx4tgmOLx5A5G1eYEe3s7Q=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAPUsQHeXWhdmyfQ2Sq1ev1HMuMhBTc/FZFKO9tMMcI9qzjr+z4IdCOFCcx24/T/6NCsDpMiOGNnCdaBCCNRwNM0CTIkpHNLO+RSZORDgAsm9Q=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAPRZawtuu0gErebyFqiQw0LxniWhdeujGzaqfAXriGo/2fU7PalzTlWQa8wsv0y7Q/i1K4JbQwCEFpJWLppmtZshCGbVWjpPljB2BH4NNrLPE=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAP0qkQjuKmKIqdrsrR9djxt+1jFlEL7K9bP1oz7QWuY38dZJOoGwa6G1bP4wDzjsucJLCEgU2IY+t7BHraBFXvR/Aar8ID5eXcvJ7iOPIyqUw=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAP6L41iuBWGLg3hQZuhXp4MupTQvIT07+/+CRY292sC02mehk5BkuSOEVrehlvyvBJFKia4Bqd/UWvY8PnUPLqFKTLnokONWbAuh36y3gjStw=", + "subType": "06" + } + } + }, + "gcp_javascriptWithScope_det_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_int_rand_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQ+6oRKWMSvC+3UGrHSyGeVlR9bFnZtFTmYlUoGn04k6ndtCl8rsmBVUV6dMMYd7znnZtTSIGPI8q6jwf/NJjdIw==", + "subType": "06" + } + } + }, + "gcp_int_rand_auto_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQnz5jAbrrdutTPFA4m3MvlVJr3bpurTKY5xjwO5k8DZpeWTJzr+kVEJjG6M8/RgC/0UFNgBBrDbDhYa8PZHRijw==", + "subType": "06" + } + } + }, + "gcp_int_rand_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQfRFoxUgjrv8up/eZ/fLlr/z++d/jFm30nYvKqsnQT7vkmmujJWc8yAtthR9OI6W5biBgAkounqRHhvatLZC6gA==", + "subType": "06" + } + } + }, + "gcp_int_rand_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAQY/ePk59RY6vLejx9a5ITwkT9000KAubVSqMoQwv7lNXO+GKZfZoLHG6k1MA/IxTvl1Zbz1Tw1bTctmj0HPEGNA==", + "subType": "06" + } + } + }, + "gcp_int_det_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_int_det_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_int_det_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAQE9RVV9pOuysUUEGKq0u6ztFM0gTpoOHcHsTFQstA7+L9XTvxWEgL3RgNeq5KtKdODlxl62niV8dnQwlSoDSSWw==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARLnk1LpJIriKr6iiY1yBDGnfkRaHNwWcQyL+mORtYC4+AQ6oMv0qpGrJxS2QCbYY1tGmAISqZHCIExCG+TIv4bw==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARaqYXh9AVZI6gvRZrBwbprE5P3K5Qf4PIK1ca+mLRNOof0EExyAhtku7mYXusLeq0ww/tV6Zt1cA36KsT8a0Nog==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARLXzBjkCN8BpfXDIrb94kuZCD07Uo/DMBfMIWQtAb1++tTheUoY2ClQz33Luh4g8NXwuMJ7h8ufE70N2+b1yrUg==", + "subType": "06" + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAARe44QH9ZvTAuHsWhEMoue8eHod+cJpBm+Kl/Xtw7NI/6UTOOHC5Kkg20EvX3+GwXdAGk0bUSCFiTZb/yPox1OlA==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAARzXjP6d6j/iQxiz1/TC/m+IfAGLFH9wY2ksS//i9x15QttlhcRrT3XmPvxaP5OjTHac4Gq3m2aXiJH56lETyl8A==", + "subType": "06" + } + } + }, + "gcp_long_rand_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAASuGZs48eEyVBJ9vvM6cvRySfuR0WM4kL7lx52rSGXBKtkZywyP5rJwNtRn9WTBMDqc1O/4jUgYXpqHx39SLhUPA==", + "subType": "06" + } + } + }, + "gcp_long_rand_auto_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAS/62F71oKTX1GlvOP89uNhXpIyLZ5OdnuLeM/hvL5HWyOudSb06cG3+xnPg3QgppAYFK5X2PGgrEcrA87AykLPg==", + "subType": "06" + } + } + }, + "gcp_long_rand_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAASSgx+p4YzTvjZ+GCZCFHEKHNXJUSloPnLRHE4iJ515Epb8Tox7h8/aIAkB3ulnDS9BiT5UKdye2TWf8OBEwkXzg==", + "subType": "06" + } + } + }, + "gcp_long_rand_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAAStqszyEfltpgd3aYeoyqaJX27OX861o06VhNX/N2fdSfKx0NQq/hWlWTkX6hK3hjCijiTtHmhFQR6QLkHD/6THw==", + "subType": "06" + } + } + }, + "gcp_long_det_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_long_det_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_long_det_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ARgjwAAAAAAAAAAAAAAAAAAS0wJHtZKnxJlWnlSu0xuq7bZR25UdwcbdCRSaXBC0EXEFuqlzrZSn1lcwKPKGZQO8EQ6SdQDqK95alMLmM8eQrQ==", + "subType": "06" + } + } + }, + "gcp_decimal_rand_auto_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATg4U3nbHBX/Az3ie2yurEIJO6cFryQWKiCpBbx1z0NF7RXd7kFC1XzaY6zcBjfl2AfRO8FFmgjTmFXb6gTRSSF0iAZJZTslfe3n6YFtwSKDI=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_auto_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATdSSyp0ewboV5zI3T3TV/FOrdx0UQbFHhqcH+yqpotoWPSw5dxE+BEoihYLeaPKuVU/rUIY4TUv05Egj7Ovg62Kpk3cPscxsGtE/T2Ppbt6o=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATl7k20T22pf5Y9knVwIDyOIlbHyZBJqyi3Mai8APEZIYjpSKDKs8QNAH69CIjupyge8Izw4Cuch0bRrvMbp6YFfrUgk1JIQ4iLKkqqzHpBTY=", + "subType": "06" + } + } + }, + "gcp_decimal_rand_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AhgjwAAAAAAAAAAAAAAAAAATF7YLkhkuLhXdxrQk2fJTs128tRNYHeodkqw7ha/TxW3Czr5gE272gnkdzfNoS7uu9XwOr1yjrC6y/8gHALAWn77WvGrAlBktLQbIIinsuds=", + "subType": "06" + } + } + }, + "gcp_decimal_det_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_minKey_rand_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_rand_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_maxKey_rand_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_rand_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } } -} +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-azure.json b/test/client-side-encryption/corpus/corpus-key-azure.json new file mode 100644 index 0000000000..31a564edb8 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-azure.json @@ -0,0 +1,33 @@ +{ + "_id": { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["azure"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-gcp.json b/test/client-side-encryption/corpus/corpus-key-gcp.json new file mode 100644 index 0000000000..79d6999b08 --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-gcp.json @@ -0,0 +1,35 @@ +{ + "_id": { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["gcp"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-schema.json b/test/client-side-encryption/corpus/corpus-schema.json index e4838d8aae..f145f712a4 100644 --- a/test/client-side-encryption/corpus/corpus-schema.json +++ b/test/client-side-encryption/corpus/corpus-schema.json @@ -34,11 +34,19 @@ }, "aws_double_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_double_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_rand_auto_id": { "bsonType": "object", @@ -73,11 +81,19 @@ }, "aws_string_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_det_auto_id": { "bsonType": "object", @@ -100,11 +116,19 @@ }, "aws_string_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_string_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_object_rand_auto_id": { "bsonType": "object", @@ -139,11 +163,19 @@ }, "aws_object_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_object_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_array_rand_auto_id": { "bsonType": "object", @@ -178,11 +210,19 @@ }, "aws_array_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_array_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_rand_auto_id": { "bsonType": "object", @@ -217,11 +257,19 @@ }, "aws_binData=00_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_det_auto_id": { "bsonType": "object", @@ -244,11 +292,19 @@ }, "aws_binData=00_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=00_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_rand_auto_id": { "bsonType": "object", @@ -283,11 +339,19 @@ }, "aws_binData=04_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_det_auto_id": { "bsonType": "object", @@ -310,11 +374,19 @@ }, "aws_binData=04_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_binData=04_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_rand_auto_id": { "bsonType": "object", @@ -349,11 +421,19 @@ }, "aws_objectId_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_det_auto_id": { "bsonType": "object", @@ -376,11 +456,19 @@ }, "aws_objectId_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_objectId_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_bool_rand_auto_id": { "bsonType": "object", @@ -415,11 +503,19 @@ }, "aws_bool_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_bool_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_rand_auto_id": { "bsonType": "object", @@ -454,11 +550,19 @@ }, "aws_date_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_det_auto_id": { "bsonType": "object", @@ -481,11 +585,19 @@ }, "aws_date_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_date_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_rand_auto_id": { "bsonType": "object", @@ -520,11 +632,19 @@ }, "aws_regex_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_det_auto_id": { "bsonType": "object", @@ -547,11 +667,19 @@ }, "aws_regex_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_regex_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_rand_auto_id": { "bsonType": "object", @@ -586,11 +714,19 @@ }, "aws_dbPointer_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_det_auto_id": { "bsonType": "object", @@ -613,11 +749,19 @@ }, "aws_dbPointer_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_dbPointer_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_rand_auto_id": { "bsonType": "object", @@ -652,11 +796,19 @@ }, "aws_javascript_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_det_auto_id": { "bsonType": "object", @@ -679,11 +831,19 @@ }, "aws_javascript_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascript_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_rand_auto_id": { "bsonType": "object", @@ -718,11 +878,19 @@ }, "aws_symbol_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_det_auto_id": { "bsonType": "object", @@ -745,11 +913,19 @@ }, "aws_symbol_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_symbol_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascriptWithScope_rand_auto_id": { "bsonType": "object", @@ -784,11 +960,19 @@ }, "aws_javascriptWithScope_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_javascriptWithScope_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_rand_auto_id": { "bsonType": "object", @@ -823,11 +1007,19 @@ }, "aws_int_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_det_auto_id": { "bsonType": "object", @@ -850,11 +1042,19 @@ }, "aws_int_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_int_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_rand_auto_id": { "bsonType": "object", @@ -889,11 +1089,19 @@ }, "aws_timestamp_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_det_auto_id": { "bsonType": "object", @@ -916,11 +1124,19 @@ }, "aws_timestamp_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_timestamp_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_rand_auto_id": { "bsonType": "object", @@ -955,11 +1171,19 @@ }, "aws_long_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_det_auto_id": { "bsonType": "object", @@ -982,11 +1206,19 @@ }, "aws_long_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_long_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_decimal_rand_auto_id": { "bsonType": "object", @@ -1021,11 +1253,19 @@ }, "aws_decimal_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "aws_decimal_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_double_rand_auto_id": { "bsonType": "object", @@ -1060,11 +1300,19 @@ }, "local_double_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_double_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_rand_auto_id": { "bsonType": "object", @@ -1099,11 +1347,19 @@ }, "local_string_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_det_auto_id": { "bsonType": "object", @@ -1126,11 +1382,19 @@ }, "local_string_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_string_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_object_rand_auto_id": { "bsonType": "object", @@ -1165,11 +1429,19 @@ }, "local_object_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_object_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_array_rand_auto_id": { "bsonType": "object", @@ -1204,11 +1476,19 @@ }, "local_array_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_array_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_rand_auto_id": { "bsonType": "object", @@ -1243,11 +1523,19 @@ }, "local_binData=00_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_det_auto_id": { "bsonType": "object", @@ -1270,11 +1558,19 @@ }, "local_binData=00_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=00_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_rand_auto_id": { "bsonType": "object", @@ -1309,11 +1605,19 @@ }, "local_binData=04_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_det_auto_id": { "bsonType": "object", @@ -1336,11 +1640,19 @@ }, "local_binData=04_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_binData=04_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_rand_auto_id": { "bsonType": "object", @@ -1375,11 +1687,19 @@ }, "local_objectId_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_det_auto_id": { "bsonType": "object", @@ -1402,11 +1722,19 @@ }, "local_objectId_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_objectId_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_bool_rand_auto_id": { "bsonType": "object", @@ -1441,11 +1769,19 @@ }, "local_bool_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_bool_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_rand_auto_id": { "bsonType": "object", @@ -1480,11 +1816,19 @@ }, "local_date_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_det_auto_id": { "bsonType": "object", @@ -1507,11 +1851,19 @@ }, "local_date_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_date_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_rand_auto_id": { "bsonType": "object", @@ -1546,11 +1898,19 @@ }, "local_regex_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_det_auto_id": { "bsonType": "object", @@ -1573,11 +1933,19 @@ }, "local_regex_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_regex_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_rand_auto_id": { "bsonType": "object", @@ -1612,11 +1980,19 @@ }, "local_dbPointer_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_det_auto_id": { "bsonType": "object", @@ -1639,11 +2015,19 @@ }, "local_dbPointer_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_dbPointer_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_rand_auto_id": { "bsonType": "object", @@ -1678,11 +2062,19 @@ }, "local_javascript_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_det_auto_id": { "bsonType": "object", @@ -1705,11 +2097,19 @@ }, "local_javascript_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascript_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_rand_auto_id": { "bsonType": "object", @@ -1744,11 +2144,19 @@ }, "local_symbol_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_det_auto_id": { "bsonType": "object", @@ -1771,11 +2179,19 @@ }, "local_symbol_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_symbol_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascriptWithScope_rand_auto_id": { "bsonType": "object", @@ -1810,11 +2226,19 @@ }, "local_javascriptWithScope_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_javascriptWithScope_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_rand_auto_id": { "bsonType": "object", @@ -1849,11 +2273,19 @@ }, "local_int_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_det_auto_id": { "bsonType": "object", @@ -1876,11 +2308,19 @@ }, "local_int_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_int_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_rand_auto_id": { "bsonType": "object", @@ -1915,11 +2355,19 @@ }, "local_timestamp_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_det_auto_id": { "bsonType": "object", @@ -1942,11 +2390,19 @@ }, "local_timestamp_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_timestamp_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_rand_auto_id": { "bsonType": "object", @@ -1981,11 +2437,19 @@ }, "local_long_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_det_auto_id": { "bsonType": "object", @@ -2008,11 +2472,19 @@ }, "local_long_det_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_long_det_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_decimal_rand_auto_id": { "bsonType": "object", @@ -2047,11 +2519,2551 @@ }, "local_decimal_rand_explicit_id": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } }, "local_decimal_rand_explicit_altname": { "bsonType": "object", - "properties": { "value": { "bsonType": "binData" } } + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "azure_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "azure_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "azure_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "azure_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "azure_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "azure_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "azure_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "azure_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "azure_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "azure_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "azure_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "azure_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "azure_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "azure_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "azure_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "azure_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "azure_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "azure_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "azure_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "azure_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "azure_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "azure_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "azure_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "azure_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "azure_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "azure_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "azure_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "azure_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "azure_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "azure_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZUREAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "azure_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_azure", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "azure_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "azure_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "gcp_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "gcp_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "gcp_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "gcp_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "gcp_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "gcp_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "gcp_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "gcp_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "gcp_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "gcp_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "gcp_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "gcp_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "gcp_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "gcp_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "gcp_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "gcp_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "gcp_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "gcp_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "gcp_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "gcp_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "gcp_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "gcp_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "gcp_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "gcp_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "gcp_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "gcp_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCPAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "gcp_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_gcp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "gcp_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "gcp_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } } } -} +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus.json b/test/client-side-encryption/corpus/corpus.json index cbf7a091a1..55bbaf99c2 100644 --- a/test/client-side-encryption/corpus/corpus.json +++ b/test/client-side-encryption/corpus/corpus.json @@ -2,6 +2,8 @@ "_id": "client_side_encryption_corpus", "altname_aws": "aws", "altname_local": "local", + "altname_azure": "azure", + "altname_gcp": "gcp", "aws_double_rand_auto_id": { "kms": "aws", "type": "double", @@ -9,7 +11,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_rand_auto_altname": { "kms": "aws", @@ -18,7 +22,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_rand_explicit_id": { "kms": "aws", @@ -27,7 +33,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_rand_explicit_altname": { "kms": "aws", @@ -36,7 +44,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_det_explicit_id": { "kms": "aws", @@ -45,7 +55,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_double_det_explicit_altname": { "kms": "aws", @@ -54,7 +66,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "aws_string_rand_auto_id": { "kms": "aws", @@ -126,7 +140,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_rand_auto_altname": { "kms": "aws", @@ -135,7 +153,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_rand_explicit_id": { "kms": "aws", @@ -144,7 +166,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_rand_explicit_altname": { "kms": "aws", @@ -153,7 +179,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_det_explicit_id": { "kms": "aws", @@ -162,7 +192,11 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_object_det_explicit_altname": { "kms": "aws", @@ -171,7 +205,11 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "aws_array_rand_auto_id": { "kms": "aws", @@ -181,9 +219,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_rand_auto_altname": { @@ -194,9 +238,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_rand_explicit_id": { @@ -207,9 +257,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_rand_explicit_altname": { @@ -220,9 +276,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_det_explicit_id": { @@ -233,9 +295,15 @@ "identifier": "id", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_array_det_explicit_altname": { @@ -246,9 +314,15 @@ "identifier": "altname", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "aws_binData=00_rand_auto_id": { @@ -258,7 +332,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_rand_auto_altname": { "kms": "aws", @@ -267,7 +346,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_rand_explicit_id": { "kms": "aws", @@ -276,7 +360,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_rand_explicit_altname": { "kms": "aws", @@ -285,7 +374,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_det_auto_id": { "kms": "aws", @@ -294,7 +388,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_det_explicit_id": { "kms": "aws", @@ -303,7 +402,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=00_det_explicit_altname": { "kms": "aws", @@ -312,7 +416,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "aws_binData=04_rand_auto_id": { "kms": "aws", @@ -322,7 +431,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_rand_auto_altname": { @@ -333,7 +445,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_rand_explicit_id": { @@ -344,7 +459,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_rand_explicit_altname": { @@ -355,7 +473,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_det_auto_id": { @@ -366,7 +487,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_det_explicit_id": { @@ -377,7 +501,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_binData=04_det_explicit_altname": { @@ -388,7 +515,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "aws_undefined_rand_explicit_id": { @@ -398,7 +528,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_undefined_rand_explicit_altname": { "kms": "aws", @@ -407,7 +539,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_undefined_det_explicit_id": { "kms": "aws", @@ -416,7 +550,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_undefined_det_explicit_altname": { "kms": "aws", @@ -425,7 +561,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "aws_objectId_rand_auto_id": { "kms": "aws", @@ -434,7 +572,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_rand_auto_altname": { "kms": "aws", @@ -443,7 +583,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_rand_explicit_id": { "kms": "aws", @@ -452,7 +594,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_rand_explicit_altname": { "kms": "aws", @@ -461,7 +605,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_det_auto_id": { "kms": "aws", @@ -470,7 +616,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_det_explicit_id": { "kms": "aws", @@ -479,7 +627,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_objectId_det_explicit_altname": { "kms": "aws", @@ -488,7 +638,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "aws_bool_rand_auto_id": { "kms": "aws", @@ -551,7 +703,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_rand_auto_altname": { "kms": "aws", @@ -560,7 +716,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_rand_explicit_id": { "kms": "aws", @@ -569,7 +729,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_rand_explicit_altname": { "kms": "aws", @@ -578,7 +742,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_det_auto_id": { "kms": "aws", @@ -587,7 +755,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_det_explicit_id": { "kms": "aws", @@ -596,7 +768,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_date_det_explicit_altname": { "kms": "aws", @@ -605,7 +781,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "aws_null_rand_explicit_id": { "kms": "aws", @@ -650,7 +830,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_rand_auto_altname": { "kms": "aws", @@ -659,7 +844,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_rand_explicit_id": { "kms": "aws", @@ -668,7 +858,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_rand_explicit_altname": { "kms": "aws", @@ -677,7 +872,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_det_auto_id": { "kms": "aws", @@ -686,7 +886,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_det_explicit_id": { "kms": "aws", @@ -695,7 +900,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_regex_det_explicit_altname": { "kms": "aws", @@ -704,7 +914,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "aws_dbPointer_rand_auto_id": { "kms": "aws", @@ -716,7 +931,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -730,7 +947,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -744,7 +963,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -758,7 +979,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -772,7 +995,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -786,7 +1011,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -800,7 +1027,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -811,7 +1040,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_rand_auto_altname": { "kms": "aws", @@ -820,7 +1051,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_rand_explicit_id": { "kms": "aws", @@ -829,7 +1062,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_rand_explicit_altname": { "kms": "aws", @@ -838,7 +1073,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_det_auto_id": { "kms": "aws", @@ -847,7 +1084,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_det_explicit_id": { "kms": "aws", @@ -856,7 +1095,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_javascript_det_explicit_altname": { "kms": "aws", @@ -865,7 +1106,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "aws_symbol_rand_auto_id": { "kms": "aws", @@ -874,7 +1117,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_rand_auto_altname": { "kms": "aws", @@ -883,7 +1128,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_rand_explicit_id": { "kms": "aws", @@ -892,7 +1139,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_rand_explicit_altname": { "kms": "aws", @@ -901,7 +1150,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_det_auto_id": { "kms": "aws", @@ -910,7 +1161,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_det_explicit_id": { "kms": "aws", @@ -919,7 +1172,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_symbol_det_explicit_altname": { "kms": "aws", @@ -928,7 +1183,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "aws_javascriptWithScope_rand_auto_id": { "kms": "aws", @@ -937,7 +1194,10 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_rand_auto_altname": { "kms": "aws", @@ -946,7 +1206,10 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_rand_explicit_id": { "kms": "aws", @@ -955,7 +1218,10 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_rand_explicit_altname": { "kms": "aws", @@ -964,7 +1230,10 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_det_explicit_id": { "kms": "aws", @@ -973,7 +1242,10 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_javascriptWithScope_det_explicit_altname": { "kms": "aws", @@ -982,7 +1254,10 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "aws_int_rand_auto_id": { "kms": "aws", @@ -991,7 +1266,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_rand_auto_altname": { "kms": "aws", @@ -1000,7 +1277,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_rand_explicit_id": { "kms": "aws", @@ -1009,7 +1288,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_rand_explicit_altname": { "kms": "aws", @@ -1018,7 +1299,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_det_auto_id": { "kms": "aws", @@ -1027,7 +1310,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_det_explicit_id": { "kms": "aws", @@ -1036,7 +1321,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_int_det_explicit_altname": { "kms": "aws", @@ -1045,7 +1332,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "aws_timestamp_rand_auto_id": { "kms": "aws", @@ -1054,7 +1343,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_rand_auto_altname": { "kms": "aws", @@ -1063,7 +1357,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_rand_explicit_id": { "kms": "aws", @@ -1072,7 +1371,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_rand_explicit_altname": { "kms": "aws", @@ -1081,7 +1385,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_det_auto_id": { "kms": "aws", @@ -1090,7 +1399,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_det_explicit_id": { "kms": "aws", @@ -1099,7 +1413,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_timestamp_det_explicit_altname": { "kms": "aws", @@ -1108,7 +1427,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "aws_long_rand_auto_id": { "kms": "aws", @@ -1117,7 +1441,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_rand_auto_altname": { "kms": "aws", @@ -1126,7 +1452,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_rand_explicit_id": { "kms": "aws", @@ -1135,7 +1463,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_rand_explicit_altname": { "kms": "aws", @@ -1144,7 +1474,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_det_auto_id": { "kms": "aws", @@ -1153,7 +1485,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_det_explicit_id": { "kms": "aws", @@ -1162,7 +1496,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_long_det_explicit_altname": { "kms": "aws", @@ -1171,7 +1507,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "aws_decimal_rand_auto_id": { "kms": "aws", @@ -1180,7 +1518,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_rand_auto_altname": { "kms": "aws", @@ -1189,7 +1529,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_rand_explicit_id": { "kms": "aws", @@ -1198,7 +1540,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_rand_explicit_altname": { "kms": "aws", @@ -1207,7 +1551,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_det_explicit_id": { "kms": "aws", @@ -1216,7 +1562,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_decimal_det_explicit_altname": { "kms": "aws", @@ -1225,7 +1573,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "aws_minKey_rand_explicit_id": { "kms": "aws", @@ -1234,7 +1584,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_minKey_rand_explicit_altname": { "kms": "aws", @@ -1243,7 +1595,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_minKey_det_explicit_id": { "kms": "aws", @@ -1252,7 +1606,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_minKey_det_explicit_altname": { "kms": "aws", @@ -1261,7 +1617,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "aws_maxKey_rand_explicit_id": { "kms": "aws", @@ -1270,7 +1628,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "aws_maxKey_rand_explicit_altname": { "kms": "aws", @@ -1279,7 +1639,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "aws_maxKey_det_explicit_id": { "kms": "aws", @@ -1288,7 +1650,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "aws_maxKey_det_explicit_altname": { "kms": "aws", @@ -1297,7 +1661,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_double_rand_auto_id": { "kms": "local", @@ -1306,7 +1672,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_rand_auto_altname": { "kms": "local", @@ -1315,7 +1683,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_rand_explicit_id": { "kms": "local", @@ -1324,7 +1694,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_rand_explicit_altname": { "kms": "local", @@ -1333,7 +1705,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_det_explicit_id": { "kms": "local", @@ -1342,7 +1716,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDouble": "1.234" } + "value": { + "$numberDouble": "1.234" + } }, "local_double_det_explicit_altname": { "kms": "local", @@ -1351,8 +1727,10 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDouble": "1.234" } - }, + "value": { + "$numberDouble": "1.234" + } + }, "local_string_rand_auto_id": { "kms": "local", "type": "string", @@ -1423,7 +1801,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_rand_auto_altname": { "kms": "local", @@ -1432,7 +1814,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_rand_explicit_id": { "kms": "local", @@ -1441,7 +1827,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_rand_explicit_altname": { "kms": "local", @@ -1450,7 +1840,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_det_explicit_id": { "kms": "local", @@ -1459,7 +1853,11 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_object_det_explicit_altname": { "kms": "local", @@ -1468,7 +1866,11 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "x": { "$numberInt": "1" } } + "value": { + "x": { + "$numberInt": "1" + } + } }, "local_array_rand_auto_id": { "kms": "local", @@ -1478,9 +1880,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_rand_auto_altname": { @@ -1491,9 +1899,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_rand_explicit_id": { @@ -1504,9 +1918,15 @@ "identifier": "id", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_rand_explicit_altname": { @@ -1517,9 +1937,15 @@ "identifier": "altname", "allowed": true, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_det_explicit_id": { @@ -1530,9 +1956,15 @@ "identifier": "id", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_array_det_explicit_altname": { @@ -1543,9 +1975,15 @@ "identifier": "altname", "allowed": false, "value": [ - { "$numberInt": "1" }, - { "$numberInt": "2" }, - { "$numberInt": "3" } + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } ] }, "local_binData=00_rand_auto_id": { @@ -1555,7 +1993,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_rand_auto_altname": { "kms": "local", @@ -1564,7 +2007,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_rand_explicit_id": { "kms": "local", @@ -1573,7 +2021,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_rand_explicit_altname": { "kms": "local", @@ -1582,7 +2035,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_det_auto_id": { "kms": "local", @@ -1591,7 +2049,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_det_explicit_id": { "kms": "local", @@ -1600,7 +2063,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=00_det_explicit_altname": { "kms": "local", @@ -1609,7 +2077,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$binary": { "base64": "AQIDBA==", "subType": "00" } } + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } }, "local_binData=04_rand_auto_id": { "kms": "local", @@ -1619,7 +2092,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_rand_auto_altname": { @@ -1630,7 +2106,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_rand_explicit_id": { @@ -1641,7 +2120,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_rand_explicit_altname": { @@ -1652,7 +2134,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_det_auto_id": { @@ -1663,7 +2148,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_det_explicit_id": { @@ -1674,7 +2162,10 @@ "identifier": "id", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_binData=04_det_explicit_altname": { @@ -1685,7 +2176,10 @@ "identifier": "altname", "allowed": true, "value": { - "$binary": { "base64": "AAECAwQFBgcICQoLDA0ODw==", "subType": "04" } + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } } }, "local_undefined_rand_explicit_id": { @@ -1695,7 +2189,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_undefined_rand_explicit_altname": { "kms": "local", @@ -1704,7 +2200,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_undefined_det_explicit_id": { "kms": "local", @@ -1713,7 +2211,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_undefined_det_explicit_altname": { "kms": "local", @@ -1722,7 +2222,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$undefined": true } + "value": { + "$undefined": true + } }, "local_objectId_rand_auto_id": { "kms": "local", @@ -1731,7 +2233,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_rand_auto_altname": { "kms": "local", @@ -1740,7 +2244,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_rand_explicit_id": { "kms": "local", @@ -1749,7 +2255,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_rand_explicit_altname": { "kms": "local", @@ -1758,7 +2266,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_det_auto_id": { "kms": "local", @@ -1767,7 +2277,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_det_explicit_id": { "kms": "local", @@ -1776,7 +2288,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_objectId_det_explicit_altname": { "kms": "local", @@ -1785,7 +2299,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$oid": "01234567890abcdef0123456" } + "value": { + "$oid": "01234567890abcdef0123456" + } }, "local_bool_rand_auto_id": { "kms": "local", @@ -1848,7 +2364,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_rand_auto_altname": { "kms": "local", @@ -1857,7 +2377,11 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_rand_explicit_id": { "kms": "local", @@ -1866,7 +2390,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_rand_explicit_altname": { "kms": "local", @@ -1875,7 +2403,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_det_auto_id": { "kms": "local", @@ -1884,7 +2416,11 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_det_explicit_id": { "kms": "local", @@ -1893,7 +2429,11 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_date_det_explicit_altname": { "kms": "local", @@ -1902,7 +2442,11 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$date": { "$numberLong": "12345" } } + "value": { + "$date": { + "$numberLong": "12345" + } + } }, "local_null_rand_explicit_id": { "kms": "local", @@ -1947,7 +2491,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_rand_auto_altname": { "kms": "local", @@ -1956,7 +2505,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_rand_explicit_id": { "kms": "local", @@ -1965,7 +2519,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_rand_explicit_altname": { "kms": "local", @@ -1974,7 +2533,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_det_auto_id": { "kms": "local", @@ -1983,7 +2547,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_det_explicit_id": { "kms": "local", @@ -1992,7 +2561,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_regex_det_explicit_altname": { "kms": "local", @@ -2001,7 +2575,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$regularExpression": { "pattern": ".*", "options": "" } } + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } }, "local_dbPointer_rand_auto_id": { "kms": "local", @@ -2013,7 +2592,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2027,7 +2608,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2041,7 +2624,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2055,7 +2640,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2069,7 +2656,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2083,7 +2672,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2097,7 +2688,9 @@ "value": { "$dbPointer": { "$ref": "db.example", - "$id": { "$oid": "01234567890abcdef0123456" } + "$id": { + "$oid": "01234567890abcdef0123456" + } } } }, @@ -2108,7 +2701,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_rand_auto_altname": { "kms": "local", @@ -2117,7 +2712,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_rand_explicit_id": { "kms": "local", @@ -2126,7 +2723,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_rand_explicit_altname": { "kms": "local", @@ -2135,7 +2734,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_det_auto_id": { "kms": "local", @@ -2144,7 +2745,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_det_explicit_id": { "kms": "local", @@ -2153,7 +2756,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_javascript_det_explicit_altname": { "kms": "local", @@ -2162,7 +2767,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1" } + "value": { + "$code": "x=1" + } }, "local_symbol_rand_auto_id": { "kms": "local", @@ -2171,7 +2778,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_rand_auto_altname": { "kms": "local", @@ -2180,7 +2789,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_rand_explicit_id": { "kms": "local", @@ -2189,7 +2800,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_rand_explicit_altname": { "kms": "local", @@ -2198,7 +2811,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_det_auto_id": { "kms": "local", @@ -2207,7 +2822,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_det_explicit_id": { "kms": "local", @@ -2216,7 +2833,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_symbol_det_explicit_altname": { "kms": "local", @@ -2225,7 +2844,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$symbol": "mongodb-symbol" } + "value": { + "$symbol": "mongodb-symbol" + } }, "local_javascriptWithScope_rand_auto_id": { "kms": "local", @@ -2234,7 +2855,10 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_rand_auto_altname": { "kms": "local", @@ -2243,7 +2867,10 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_rand_explicit_id": { "kms": "local", @@ -2252,7 +2879,10 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_rand_explicit_altname": { "kms": "local", @@ -2261,7 +2891,10 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_det_explicit_id": { "kms": "local", @@ -2270,7 +2903,10 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_javascriptWithScope_det_explicit_altname": { "kms": "local", @@ -2279,7 +2915,10 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$code": "x=1", "$scope": {} } + "value": { + "$code": "x=1", + "$scope": {} + } }, "local_int_rand_auto_id": { "kms": "local", @@ -2288,7 +2927,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_rand_auto_altname": { "kms": "local", @@ -2297,7 +2938,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_rand_explicit_id": { "kms": "local", @@ -2306,7 +2949,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_rand_explicit_altname": { "kms": "local", @@ -2315,7 +2960,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_det_auto_id": { "kms": "local", @@ -2324,7 +2971,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_det_explicit_id": { "kms": "local", @@ -2333,7 +2982,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_int_det_explicit_altname": { "kms": "local", @@ -2342,7 +2993,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberInt": "123" } + "value": { + "$numberInt": "123" + } }, "local_timestamp_rand_auto_id": { "kms": "local", @@ -2351,7 +3004,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_rand_auto_altname": { "kms": "local", @@ -2360,7 +3018,12 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_rand_explicit_id": { "kms": "local", @@ -2369,7 +3032,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_rand_explicit_altname": { "kms": "local", @@ -2378,7 +3046,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_det_auto_id": { "kms": "local", @@ -2387,7 +3060,12 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_det_explicit_id": { "kms": "local", @@ -2396,7 +3074,12 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_timestamp_det_explicit_altname": { "kms": "local", @@ -2405,7 +3088,12 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$timestamp": { "t": 0, "i": 12345 } } + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } }, "local_long_rand_auto_id": { "kms": "local", @@ -2414,7 +3102,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_rand_auto_altname": { "kms": "local", @@ -2423,7 +3113,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_rand_explicit_id": { "kms": "local", @@ -2432,7 +3124,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_rand_explicit_altname": { "kms": "local", @@ -2441,7 +3135,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_det_auto_id": { "kms": "local", @@ -2450,7 +3146,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_det_explicit_id": { "kms": "local", @@ -2459,7 +3157,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_long_det_explicit_altname": { "kms": "local", @@ -2468,7 +3168,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberLong": "456" } + "value": { + "$numberLong": "456" + } }, "local_decimal_rand_auto_id": { "kms": "local", @@ -2477,7 +3179,9 @@ "method": "auto", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_rand_auto_altname": { "kms": "local", @@ -2486,7 +3190,9 @@ "method": "auto", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_rand_explicit_id": { "kms": "local", @@ -2495,7 +3201,9 @@ "method": "explicit", "identifier": "id", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_rand_explicit_altname": { "kms": "local", @@ -2504,7 +3212,9 @@ "method": "explicit", "identifier": "altname", "allowed": true, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_det_explicit_id": { "kms": "local", @@ -2513,7 +3223,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_decimal_det_explicit_altname": { "kms": "local", @@ -2522,7 +3234,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$numberDecimal": "1.234" } + "value": { + "$numberDecimal": "1.234" + } }, "local_minKey_rand_explicit_id": { "kms": "local", @@ -2531,7 +3245,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_minKey_rand_explicit_altname": { "kms": "local", @@ -2540,7 +3256,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_minKey_det_explicit_id": { "kms": "local", @@ -2549,7 +3267,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_minKey_det_explicit_altname": { "kms": "local", @@ -2558,7 +3278,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$minKey": 1 } + "value": { + "$minKey": 1 + } }, "local_maxKey_rand_explicit_id": { "kms": "local", @@ -2567,7 +3289,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_maxKey_rand_explicit_altname": { "kms": "local", @@ -2576,7 +3300,9 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_maxKey_det_explicit_id": { "kms": "local", @@ -2585,7 +3311,9 @@ "method": "explicit", "identifier": "id", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } }, "local_maxKey_det_explicit_altname": { "kms": "local", @@ -2594,7 +3322,3331 @@ "method": "explicit", "identifier": "altname", "allowed": false, - "value": { "$maxKey": 1 } + "value": { + "$maxKey": 1 + } + }, + "azure_double_rand_auto_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_auto_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_rand_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_det_explicit_id": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_double_det_explicit_altname": { + "kms": "azure", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "azure_string_rand_auto_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_auto_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_rand_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_auto_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_explicit_id": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "azure_string_det_explicit_altname": { + "kms": "azure", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "azure_object_rand_auto_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_auto_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_rand_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_id": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_object_det_explicit_altname": { + "kms": "azure", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "azure_array_rand_auto_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_auto_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_rand_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_id": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_array_det_explicit_altname": { + "kms": "azure", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "azure_binData=00_rand_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_auto_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_rand_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_auto_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_explicit_id": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=00_det_explicit_altname": { + "kms": "azure", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "azure_binData=04_rand_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_auto_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_rand_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_auto_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_explicit_id": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_binData=04_det_explicit_altname": { + "kms": "azure", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "azure_undefined_rand_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_rand_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_id": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_undefined_det_explicit_altname": { + "kms": "azure", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "azure_objectId_rand_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_auto_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_rand_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_auto_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_explicit_id": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_objectId_det_explicit_altname": { + "kms": "azure", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "azure_bool_rand_auto_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "azure_bool_rand_auto_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "azure_bool_rand_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "azure_bool_rand_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "azure_bool_det_explicit_id": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "azure_bool_det_explicit_altname": { + "kms": "azure", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "azure_date_rand_auto_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_auto_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_rand_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_auto_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_explicit_id": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_date_det_explicit_altname": { + "kms": "azure", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "azure_null_rand_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_rand_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_id": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "azure_null_det_explicit_altname": { + "kms": "azure", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "azure_regex_rand_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_auto_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_rand_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_auto_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_explicit_id": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_regex_det_explicit_altname": { + "kms": "azure", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "azure_dbPointer_rand_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_auto_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_rand_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_auto_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_explicit_id": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_dbPointer_det_explicit_altname": { + "kms": "azure", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "azure_javascript_rand_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_auto_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_rand_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_auto_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_explicit_id": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_javascript_det_explicit_altname": { + "kms": "azure", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "azure_symbol_rand_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_auto_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_rand_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_auto_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_explicit_id": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_symbol_det_explicit_altname": { + "kms": "azure", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "azure_javascriptWithScope_rand_auto_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_auto_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_rand_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_id": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_javascriptWithScope_det_explicit_altname": { + "kms": "azure", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "azure_int_rand_auto_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_auto_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_rand_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_auto_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_explicit_id": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_int_det_explicit_altname": { + "kms": "azure", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "azure_timestamp_rand_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_auto_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_rand_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_auto_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_explicit_id": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_timestamp_det_explicit_altname": { + "kms": "azure", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "azure_long_rand_auto_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_auto_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_rand_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_auto_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_explicit_id": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_long_det_explicit_altname": { + "kms": "azure", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "azure_decimal_rand_auto_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_auto_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_rand_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_id": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_decimal_det_explicit_altname": { + "kms": "azure", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "azure_minKey_rand_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_rand_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_id": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_minKey_det_explicit_altname": { + "kms": "azure", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "azure_maxKey_rand_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_rand_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_id": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "azure_maxKey_det_explicit_altname": { + "kms": "azure", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_double_rand_auto_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_auto_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_rand_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_det_explicit_id": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_double_det_explicit_altname": { + "kms": "gcp", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "gcp_string_rand_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_auto_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_rand_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_auto_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_explicit_id": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "gcp_string_det_explicit_altname": { + "kms": "gcp", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "gcp_object_rand_auto_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_auto_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_rand_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_id": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_object_det_explicit_altname": { + "kms": "gcp", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "gcp_array_rand_auto_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_auto_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_rand_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_id": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_array_det_explicit_altname": { + "kms": "gcp", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "gcp_binData=00_rand_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_auto_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_auto_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_explicit_id": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=00_det_explicit_altname": { + "kms": "gcp", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "gcp_binData=04_rand_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_auto_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_rand_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_auto_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_explicit_id": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_binData=04_det_explicit_altname": { + "kms": "gcp", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "gcp_undefined_rand_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_rand_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_id": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_undefined_det_explicit_altname": { + "kms": "gcp", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "gcp_objectId_rand_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_auto_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_rand_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_auto_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_explicit_id": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_objectId_det_explicit_altname": { + "kms": "gcp", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "gcp_bool_rand_auto_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "gcp_bool_rand_auto_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "gcp_bool_rand_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "gcp_bool_rand_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "gcp_bool_det_explicit_id": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "gcp_bool_det_explicit_altname": { + "kms": "gcp", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "gcp_date_rand_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_auto_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_rand_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_auto_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_explicit_id": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_date_det_explicit_altname": { + "kms": "gcp", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "gcp_null_rand_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_rand_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_id": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "gcp_null_det_explicit_altname": { + "kms": "gcp", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "gcp_regex_rand_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_auto_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_rand_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_auto_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_explicit_id": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_regex_det_explicit_altname": { + "kms": "gcp", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "gcp_dbPointer_rand_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_auto_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_rand_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_auto_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_explicit_id": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_dbPointer_det_explicit_altname": { + "kms": "gcp", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "gcp_javascript_rand_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_auto_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_rand_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_auto_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_explicit_id": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_javascript_det_explicit_altname": { + "kms": "gcp", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "gcp_symbol_rand_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_auto_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_rand_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_auto_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_explicit_id": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_symbol_det_explicit_altname": { + "kms": "gcp", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "gcp_javascriptWithScope_rand_auto_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_auto_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_rand_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_id": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_javascriptWithScope_det_explicit_altname": { + "kms": "gcp", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "gcp_int_rand_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_auto_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_rand_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_auto_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_explicit_id": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_int_det_explicit_altname": { + "kms": "gcp", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "gcp_timestamp_rand_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_auto_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_rand_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_auto_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_explicit_id": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_timestamp_det_explicit_altname": { + "kms": "gcp", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "gcp_long_rand_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_auto_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_rand_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_auto_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_explicit_id": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_long_det_explicit_altname": { + "kms": "gcp", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "gcp_decimal_rand_auto_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_auto_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_rand_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_id": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_decimal_det_explicit_altname": { + "kms": "gcp", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "gcp_minKey_rand_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_rand_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_id": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_minKey_det_explicit_altname": { + "kms": "gcp", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "gcp_maxKey_rand_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_rand_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_id": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "gcp_maxKey_det_explicit_altname": { + "kms": "gcp", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } }, "payload=0,algo=rand": { "kms": "local", @@ -2902,4 +6954,4 @@ "allowed": true, "value": "aaaaaaaaaaaaaaaa" } -} +} \ No newline at end of file diff --git a/test/client-side-encryption/custom/azure-dek.json b/test/client-side-encryption/custom/azure-dek.json index 8e50eca340..e644c971c6 100644 --- a/test/client-side-encryption/custom/azure-dek.json +++ b/test/client-side-encryption/custom/azure-dek.json @@ -1,13 +1,13 @@ { "_id": { "$binary": { - "base64": "As3URE1jRcyHOPjaLWHOXA==", + "base64": "AZURE+AAAAAAAAAAAAAAAA==", "subType": "04" } }, "keyMaterial": { "$binary": { - "base64": "df6fFLZqBsZSnQz2SnTYWNBtznIHktVSDMaidAdL7yVVgxBJQ0DyPZUR2HDQB4hdYym3w4C+VGqzcyTZNJOXn6nJzpGrGlIQMcjv93HE4sP2d245ShQCi1nTkLmMaXN63E2fzltOY3jW7ojf5Z4+r8kxmzyfymmSRgo0w8AF7lUWvFhnBYoE4tE322L31vtAK3Zj8pTPvw8/TcUdMSI9Y669IIzxbMy5yMPmdzpnb8nceUv6/CJoeiLhbt5GgaHqIAv7tHFOY8ZX8ztowMLa3GeAjd9clvzraDTqrfMFYco/kDKAW5iPQQ+Xuy1fP8tyFp0ZwaL/7Ed2sc819j8FTQ==", + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", "subType": "00" } }, @@ -26,8 +26,8 @@ }, "masterKey": { "provider": "azure", - "keyVaultEndpoint": "key-vault-kevinalbs.vault.azure.net", - "keyName": "test-key" - } + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["altname", "azure_altname"] } - diff --git a/test/client-side-encryption/custom/azure-gcp-schema.json b/test/client-side-encryption/custom/azure-gcp-schema.json index 24cf682cd3..441949f6d6 100644 --- a/test/client-side-encryption/custom/azure-gcp-schema.json +++ b/test/client-side-encryption/custom/azure-gcp-schema.json @@ -6,7 +6,7 @@ "encrypt": { "keyId": [{ "$binary": { - "base64": "As3URE1jRcyHOPjaLWHOXA==", + "base64": "AZURE+AAAAAAAAAAAAAAAA==", "subType": "04" } }], @@ -18,7 +18,7 @@ "encrypt": { "keyId": [{ "$binary": { - "base64": "osU8SLxJRHONbl8Oh5o+eg==", + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", "subType": "04" } }], @@ -29,4 +29,3 @@ } } } - diff --git a/test/client-side-encryption/custom/gcp-dek.json b/test/client-side-encryption/custom/gcp-dek.json index 14b895111f..968c8b9176 100644 --- a/test/client-side-encryption/custom/gcp-dek.json +++ b/test/client-side-encryption/custom/gcp-dek.json @@ -1,13 +1,13 @@ { "_id": { "$binary": { - "base64": "osU8SLxJRHONbl8Oh5o+eg==", + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", "subType": "04" } }, "keyMaterial": { "$binary": { - "base64": "CiQAg4LDql74hjYPZ957Z7YpCrD6yTVVXKegflJDstQ/xngTyx0SiQEAkWNo/fjPj6jMNSvEop07/29Fu72QHFDRYM3e/KFHfnMQjKzfxb1yX1dC6MbO5FZG/UNBkXlJgPqbHNVuizea3QC24kV5iOiEb4nTM7+RW+8TfVb6QerWWe6MjC+kNpj4LMVcc1lFfVDeGgpJLyMLNGitrjR16qH8qQTNbGNy0toTL69JUmgS8Q==", + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", "subType": "00" } }, @@ -26,10 +26,10 @@ }, "masterKey": { "provider": "gcp", - "projectId": "csfle-poc", + "projectId": "devprod-drivers", "location": "global", - "keyRing": "test", - "keyName": "quickstart" - } + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": ["altname", "gcp_altname"] } - diff --git a/test/client-side-encryption/spec/azureKMS.json b/test/client-side-encryption/spec/azureKMS.json new file mode 100644 index 0000000000..97af4c8ecf --- /dev/null +++ b/test/client-side-encryption/spec/azureKMS.json @@ -0,0 +1,222 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "azure_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using Azure KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "azure": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_azure": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "datakeys" + }, + "$db": "keyvault" + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/gcpKMS.json b/test/client-side-encryption/spec/gcpKMS.json new file mode 100644 index 0000000000..a715a7d152 --- /dev/null +++ b/test/client-side-encryption/spec/gcpKMS.json @@ -0,0 +1,224 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "gcp_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using GCP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "gcp": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_gcp": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "datakeys" + }, + "$db": "keyvault" + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 8066cb39a3..9eb6260684 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -443,12 +443,20 @@ def test_with_statement(self): # Spec tests - AWS_CREDS = { 'accessKeyId': os.environ.get('FLE_AWS_KEY', ''), 'secretAccessKey': os.environ.get('FLE_AWS_SECRET', '') } +AZURE_CREDS = { + 'tenantId': os.environ.get('FLE_AZURE_TENANTID', ''), + 'clientId': os.environ.get('FLE_AZURE_CLIENTID', ''), + 'clientSecret': os.environ.get('FLE_AZURE_CLIENTSECRET', '')} + +GCP_CREDS = { + 'email': os.environ.get('FLE_GCP_EMAIL', ''), + 'privateKey': _unicode(os.environ.get('FLE_GCP_PRIVATEKEY', ''))} + class TestSpec(SpecRunner): @@ -466,6 +474,14 @@ def parse_auto_encrypt_opts(self, opts): kms_providers['aws'] = AWS_CREDS if not any(AWS_CREDS.values()): self.skipTest('AWS environment credentials are not set') + if 'azure' in kms_providers: + kms_providers['azure'] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest('Azure environment credentials are not set') + if 'gcp' in kms_providers: + kms_providers['gcp'] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest('GCP environment credentials are not set') if 'key_vault_namespace' not in opts: opts['key_vault_namespace'] = 'keyvault.datakeys' opts = dict(opts) @@ -556,6 +572,10 @@ def run_scenario(self): base64.b64decode(b'LOCALAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) AWS_KEY_ID = Binary( base64.b64decode(b'AWSAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) +AZURE_KEY_ID = Binary( + base64.b64decode(b'AZUREAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) +GCP_KEY_ID = Binary( + base64.b64decode(b'GCPAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) def create_with_schema(coll, json_schema): @@ -578,120 +598,128 @@ def create_key_vault(vault, *data_keys): class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): + KMS_PROVIDERS = {'aws': AWS_CREDS, + 'azure': AZURE_CREDS, + 'gcp': GCP_CREDS, + 'local': {'key': LOCAL_MASTER_KEY}} + + MASTER_KEYS = { + 'aws': { + 'region': 'us-east-1', + 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-' + '4bd9-9f25-e30687b580d0'}, + 'azure': { + 'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', + 'keyName': 'key-name-csfle'}, + 'gcp': { + 'projectId': 'devprod-drivers', + 'location': 'global', + 'keyRing': 'key-ring-csfle', + 'keyName': 'key-name-csfle'}, + 'local': None + } + @classmethod - @unittest.skipUnless(all(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any([all(AWS_CREDS.values()), + all(AZURE_CREDS.values()), + all(GCP_CREDS.values())]), + 'No environment credentials are set') def setUpClass(cls): super(TestDataKeyDoubleEncryption, cls).setUpClass() - - @staticmethod - def kms_providers(): - return {'aws': AWS_CREDS, 'local': {'key': LOCAL_MASTER_KEY}} - - def test_data_key(self): - listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) - client.db.coll.drop() - vault = create_key_vault(client.keyvault.datakeys) - self.addCleanup(vault.drop) + cls.listener = OvertCommandListener() + cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client.db.coll.drop() + cls.vault = create_key_vault(cls.client.keyvault.datakeys) # Configure the encrypted field via the local schema_map option. schemas = { - "db.coll": { - "bsonType": "object", - "properties": { - "encrypted_placeholder": { - "encrypt": { - "keyId": "/placeholder", - "bsonType": "string", - "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + "db.coll": { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } } - } } - } } opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas) - client_encrypted = rs_or_single_client( + cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas) + cls.client_encrypted = rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation='standard') - self.addCleanup(client_encrypted.close) - - client_encryption = ClientEncryption( - self.kms_providers(), 'keyvault.datakeys', client, OPTS) - self.addCleanup(client_encryption.close) + cls.client_encryption = ClientEncryption( + cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS) - # Local create data key. - listener.reset() - local_datakey_id = client_encryption.create_data_key( - 'local', key_alt_names=['local_altname']) - self.assertBinaryUUID(local_datakey_id) - cmd = listener.results['started'][-1] - self.assertEqual('insert', cmd.command_name) - self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) - docs = list(vault.find({'_id': local_datakey_id})) - self.assertEqual(len(docs), 1) - self.assertEqual(docs[0]['masterKey']['provider'], 'local') + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.vault.drop() + cls.client_encrypted.close() + cls.client_encryption.close() - # Local encrypt by key_id. - local_encrypted = client_encryption.encrypt( - 'hello local', - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=local_datakey_id) - self.assertEncrypted(local_encrypted) - client_encrypted.db.coll.insert_one( - {'_id': 'local', 'value': local_encrypted}) - doc_decrypted = client_encrypted.db.coll.find_one({'_id': 'local'}) - self.assertEqual(doc_decrypted['value'], 'hello local') - - # Local encrypt by key_alt_name. - local_encrypted_altname = client_encryption.encrypt( - 'hello local', - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='local_altname') - self.assertEqual(local_encrypted_altname, local_encrypted) + def setUp(self): + self.listener.reset() - # AWS create data key. - listener.reset() - master_key = { - 'region': 'us-east-1', - 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-' - '9f25-e30687b580d0' - } - aws_datakey_id = client_encryption.create_data_key( - 'aws', master_key=master_key, key_alt_names=['aws_altname']) - self.assertBinaryUUID(aws_datakey_id) - cmd = listener.results['started'][-1] + def run_test(self, provider_name): + # Create data key. + master_key = self.MASTER_KEYS[provider_name] + datakey_id = self.client_encryption.create_data_key( + provider_name, master_key=master_key, + key_alt_names=['%s_altname' % (provider_name,)]) + self.assertBinaryUUID(datakey_id) + cmd = self.listener.results['started'][-1] self.assertEqual('insert', cmd.command_name) self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) - docs = list(vault.find({'_id': aws_datakey_id})) + docs = list(self.vault.find({'_id': datakey_id})) self.assertEqual(len(docs), 1) - self.assertEqual(docs[0]['masterKey']['provider'], 'aws') + self.assertEqual(docs[0]['masterKey']['provider'], provider_name) - # AWS encrypt by key_id. - aws_encrypted = client_encryption.encrypt( - 'hello aws', + # Encrypt by key_id. + encrypted = self.client_encryption.encrypt( + 'hello %s' % (provider_name,), Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=aws_datakey_id) - self.assertEncrypted(aws_encrypted) - client_encrypted.db.coll.insert_one( - {'_id': 'aws', 'value': aws_encrypted}) - doc_decrypted = client_encrypted.db.coll.find_one({'_id': 'aws'}) - self.assertEqual(doc_decrypted['value'], 'hello aws') - - # AWS encrypt by key_alt_name. - aws_encrypted_altname = client_encryption.encrypt( - 'hello aws', + key_id=datakey_id) + self.assertEncrypted(encrypted) + self.client_encrypted.db.coll.insert_one( + {'_id': provider_name, 'value': encrypted}) + doc_decrypted = self.client_encrypted.db.coll.find_one( + {'_id': provider_name}) + self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) + + # Encrypt by key_alt_name. + encrypted_altname = self.client_encryption.encrypt( + 'hello %s' % (provider_name,), Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='aws_altname') - self.assertEqual(aws_encrypted_altname, aws_encrypted) + key_alt_name='%s_altname' % (provider_name,)) + self.assertEqual(encrypted_altname, encrypted) # Explicitly encrypting an auto encrypted field. msg = (r'Cannot encrypt element of type binData because schema ' r'requires that type is one of: \[ string \]') with self.assertRaisesRegex(EncryptionError, msg): - client_encrypted.db.coll.insert_one( - {'encrypted_placeholder': local_encrypted}) + self.client_encrypted.db.coll.insert_one( + {'encrypted_placeholder': encrypted}) + + def test_data_key_local(self): + self.run_test('local') + + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_data_key_aws(self): + self.run_test('aws') + + @unittest.skipUnless(all(AZURE_CREDS.values()), + 'Azure environment credentials are not set') + def test_data_key_azure(self): + self.run_test('azure') + + @unittest.skipUnless(all(GCP_CREDS.values()), + 'GCP environment credentials are not set') + def test_data_key_gcp(self): + self.run_test('gcp') class TestExternalKeyVault(EncryptionIntegrationTest): @@ -791,7 +819,10 @@ def setUpClass(cls): @staticmethod def kms_providers(): - return {'aws': AWS_CREDS, 'local': {'key': LOCAL_MASTER_KEY}} + return {'aws': AWS_CREDS, + 'azure': AZURE_CREDS, + 'gcp': GCP_CREDS, + 'local': {'key': LOCAL_MASTER_KEY}} @staticmethod def fix_up_schema(json_schema): @@ -827,7 +858,9 @@ def _test_corpus(self, opts): vault = create_key_vault( self.client.keyvault.datakeys, json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json')) + json_data('corpus', 'corpus-key-aws.json'), + json_data('corpus', 'corpus-key-azure.json'), + json_data('corpus', 'corpus-key-gcp.json')) self.addCleanup(vault.drop) client_encrypted = rs_or_single_client( @@ -843,7 +876,8 @@ def _test_corpus(self, opts): corpus_copied = SON() for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) - if key in ('_id', 'altname_aws', 'altname_local'): + if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', + 'altname_local'): continue if value['method'] == 'auto': continue @@ -851,12 +885,16 @@ def _test_corpus(self, opts): identifier = value['identifier'] self.assertIn(identifier, ('id', 'altname')) kms = value['kms'] - self.assertIn(kms, ('local', 'aws')) + self.assertIn(kms, ('local', 'aws', 'azure', 'gcp')) if identifier == 'id': if kms == 'local': kwargs = dict(key_id=LOCAL_KEY_ID) - else: + elif kms == 'aws': kwargs = dict(key_id=AWS_KEY_ID) + elif kms == 'azure': + kwargs = dict(key_id=AZURE_KEY_ID) + else: + kwargs = dict(key_id=GCP_KEY_ID) else: kwargs = dict(key_alt_name=kms) @@ -888,7 +926,8 @@ def _test_corpus(self, opts): 'corpus', 'corpus-encrypted.json'), corpus) corpus_encrypted_actual = coll.find_one() for key, value in corpus_encrypted_actual.items(): - if key in ('_id', 'altname_aws', 'altname_local'): + if key in ('_id', 'altname_aws', 'altname_azure', + 'altname_gcp', 'altname_local'): continue if value['algo'] == 'det': @@ -1026,50 +1065,80 @@ def test_06_insert_fails_over_16MiB(self): self.assertIn('object to insert too large', err['errmsg']) - class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" @classmethod - @unittest.skipUnless(all(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any([all(AWS_CREDS.values()), + all(AZURE_CREDS.values()), + all(GCP_CREDS.values())]), + 'No environment credentials are set') def setUpClass(cls): super(TestCustomEndpoint, cls).setUpClass() - cls.client_encryption = ClientEncryption( - {'aws': AWS_CREDS}, 'keyvault.datakeys', client_context.client, OPTS) - def _test_create_data_key(self, master_key): + def setUp(self): + kms_providers = {'aws': AWS_CREDS, + 'azure': AZURE_CREDS, + 'gcp': GCP_CREDS} + self.client_encryption = ClientEncryption( + kms_providers=kms_providers, + key_vault_namespace='keyvault.datakeys', + key_vault_client=client_context.client, + codec_options=OPTS) + + kms_providers_invalid = copy.deepcopy(kms_providers) + kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'example.com:443' + kms_providers_invalid['gcp']['endpoint'] = 'example.com:443' + self.client_encryption_invalid = ClientEncryption( + kms_providers=kms_providers_invalid, + key_vault_namespace='keyvault.datakeys', + key_vault_client=client_context.client, + codec_options=OPTS) + + def tearDown(self): + self.client_encryption.close() + self.client_encryption_invalid.close() + + def run_test_expected_success(self, provider_name, master_key): data_key_id = self.client_encryption.create_data_key( - 'aws', master_key=master_key) + provider_name, master_key=master_key) encrypted = self.client_encryption.encrypt( 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id) self.assertEqual('test', self.client_encryption.decrypt(encrypted)) - def test_02_aws_region_key(self): - self._test_create_data_key({ - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0") - }) + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_01_aws_region_key(self): + self.run_test_expected_success( + 'aws', + {"region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0")}) - def test_03_aws_region_key_endpoint(self): - self._test_create_data_key({ - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com" - }) + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_02_aws_region_key_endpoint(self): + self.run_test_expected_success( + 'aws', + {"region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com"}) - def test_04_aws_region_key_endpoint_port(self): - self._test_create_data_key({ - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:443" - }) + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_03_aws_region_key_endpoint_port(self): + self.run_test_expected_success( + 'aws', + {"region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com:443"}) - def test_05_endpoint_invalid_port(self): + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_04_aws_endpoint_invalid_port(self): master_key = { "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/" @@ -1081,7 +1150,9 @@ def test_05_endpoint_invalid_port(self): 'aws', master_key=master_key) self.assertIsInstance(ctx.exception.cause, socket.error) - def test_05_endpoint_wrong_region(self): + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_05_aws_endpoint_wrong_region(self): master_key = { "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/" @@ -1096,7 +1167,9 @@ def test_05_endpoint_wrong_region(self): self.client_encryption.create_data_key( 'aws', master_key=master_key) - def test_05_endpoint_invalid_host(self): + @unittest.skipUnless(all(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def test_06_aws_endpoint_invalid_host(self): master_key = { "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/" @@ -1107,6 +1180,52 @@ def test_05_endpoint_invalid_host(self): self.client_encryption.create_data_key( 'aws', master_key=master_key) + @unittest.skipUnless(all(AZURE_CREDS.values()), + 'Azure environment credentials are not set') + def test_07_azure(self): + master_key = {'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', + 'keyName': 'key-name-csfle'} + self.run_test_expected_success('azure', master_key) + + # The full error should be something like: + # "Invalid JSON in KMS response. HTTP status=404. Error: Got parse error at '<', position 0: 'SPECIAL_EXPECTED'" + with self.assertRaisesRegex(EncryptionError, 'parse error'): + self.client_encryption_invalid.create_data_key( + 'azure', master_key=master_key) + + @unittest.skipUnless(all(GCP_CREDS.values()), + 'GCP environment credentials are not set') + def test_08_gcp_valid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "cloudkms.googleapis.com:443"} + self.run_test_expected_success('gcp', master_key) + + # The full error should be something like: + # "Invalid JSON in KMS response. HTTP status=404. Error: Got parse error at '<', position 0: 'SPECIAL_EXPECTED'" + with self.assertRaisesRegex(EncryptionError, 'parse error'): + self.client_encryption_invalid.create_data_key( + 'gcp', master_key=master_key) + + @unittest.skipUnless(all(GCP_CREDS.values()), + 'GCP environment credentials are not set') + def test_09_gcp_invalid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "example.com:443"} + + # The full error should be something like: + # "Invalid KMS response, no access_token returned. HTTP status=200" + with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): + self.client_encryption.create_data_key( + 'gcp', master_key=master_key) + class AzureGCPEncryptionTestMixin(object): DEK = None @@ -1129,12 +1248,12 @@ def _test_explicit(self, expectation): self.addCleanup(client_encryption.close) ciphertext = client_encryption.encrypt( - 'test', + 'string0', algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=Binary.from_uuid(self.DEK['_id'], STANDARD)) self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) - self.assertEqual(client_encryption.decrypt(ciphertext), 'test') + self.assertEqual(client_encryption.decrypt(ciphertext), 'string0') def _test_automatic(self, expectation_extjson, payload): encrypted_db = "db" @@ -1172,16 +1291,10 @@ def _test_automatic(self, expectation_extjson, payload): self.assertEqual(output_doc[key], value) -AZURE_CREDS = { - 'tenantId': os.environ.get('FLE_AZURE_TENANTID', ''), - 'clientId': os.environ.get('FLE_AZURE_CLIENTID', ''), - 'clientSecret': os.environ.get('FLE_AZURE_CLIENTSECRET', '')} - - class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(any(AZURE_CREDS.values()), + @unittest.skipUnless(all(AZURE_CREDS.values()), 'Azure environment credentials are not set') def setUpClass(cls): cls.KMS_PROVIDER_MAP = {'azure': AZURE_CREDS} @@ -1191,28 +1304,23 @@ def setUpClass(cls): def test_explicit(self): return self._test_explicit( - 'AQLN1ERNY0XMhzj42i1hzlwC8/OSU9bHfaQRmmRF5l7d5ZpqJX13qF5zSyExo8N9c1b6uS/LoKrHNzcEMKNrkpi3jf2HiShTFRF0xi8AOD9yfw==') + 'AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==') def test_automatic(self): expected_document_extjson = textwrap.dedent(""" {"secret_azure": { "$binary": { - "base64": "AQLN1ERNY0XMhzj42i1hzlwC8/OSU9bHfaQRmmRF5l7d5ZpqJX13qF5zSyExo8N9c1b6uS/LoKrHNzcEMKNrkpi3jf2HiShTFRF0xi8AOD9yfw==", + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", "subType": "06"} }}""") return self._test_automatic( - expected_document_extjson, {"secret_azure": "test"}) - - -GCP_CREDS = { - 'email': os.environ.get('FLE_GCP_EMAIL', ''), - 'privateKey': _unicode(os.environ.get('FLE_GCP_PRIVATEKEY', ''))} + expected_document_extjson, {"secret_azure": "string0"}) class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(any(GCP_CREDS.values()), + @unittest.skipUnless(all(GCP_CREDS.values()), 'GCP environment credentials are not set') def setUpClass(cls): cls.KMS_PROVIDER_MAP = {'gcp': GCP_CREDS} @@ -1222,17 +1330,17 @@ def setUpClass(cls): def test_explicit(self): return self._test_explicit( - 'AaLFPEi8SURzjW5fDoeaPnoCGcOFAmFOPpn5584VPJJ8iXIgml3YDxMRZD9IWv5otyoft8fBzL1LsDEp0lTeB32cV1gOj0IYeAKHhGIleuHZtA==') + 'ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==') def test_automatic(self): expected_document_extjson = textwrap.dedent(""" {"secret_gcp": { "$binary": { - "base64": "AaLFPEi8SURzjW5fDoeaPnoCGcOFAmFOPpn5584VPJJ8iXIgml3YDxMRZD9IWv5otyoft8fBzL1LsDEp0lTeB32cV1gOj0IYeAKHhGIleuHZtA==", + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", "subType": "06"} }}""") return self._test_automatic( - expected_document_extjson, {"secret_gcp": "test"}) + expected_document_extjson, {"secret_gcp": "string0"}) if __name__ == "__main__": From b009ad7b205344003df6456c22d5285ced45153c Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 4 Nov 2020 10:04:30 -0800 Subject: [PATCH 0234/2111] PYTHON-2416 Properly close client in test_encryption.TestDataKeyDoubleEncryption (#510) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 9eb6260684..28ba331e39 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -655,8 +655,8 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.client.close() cls.vault.drop() + cls.client.close() cls.client_encrypted.close() cls.client_encryption.close() From c92e5520b52a9b2ae482a20903f28c8286e5ddbd Mon Sep 17 00:00:00 2001 From: Chris Cho Date: Wed, 4 Nov 2020 17:00:57 -0500 Subject: [PATCH 0235/2111] DOP-1671: update GTM code (#507) --- doc/_templates/layout.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index ed7851bdef..6141284a48 100644 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -4,14 +4,14 @@ {% if theme_googletag %} - + })(window,document,'script','dataLayer','GTM-GDFN'); {% endif %} From 98205b8384865ba54094ad3d7a062ce3a2baf827 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 Nov 2020 16:22:23 -0800 Subject: [PATCH 0236/2111] PYTHON-2421 Stop testing geoSearch/geoHaystack on MongoDB 4.5+ (#512) --- test/test_collection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_collection.py b/test/test_collection.py index 0e2eb53d9c..220ddab3a8 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -450,6 +450,8 @@ def test_index_geo2d(self): index_info = db.test.index_information()['loc_2d'] self.assertEqual([('loc', '2d')], index_info['key']) + # geoSearch was deprecated in 4.4 and removed in 5.0 + @client_context.require_version_max(4, 5) @client_context.require_no_mongos def test_index_haystack(self): db = self.db From f1f8cad86dc5d08c57486c8ce93ccd9945185875 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 Nov 2020 12:21:36 -0800 Subject: [PATCH 0237/2111] PYTHON-2403 Add macOS Python 3.9 release automation (#513) --- .evergreen/build-mac.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index b50afc1976..74d50b0817 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8; do +for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8 3.9; do if [[ $VERSION == "2.7" ]]; then PYTHON=/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python rm -rf build From 30523d282cae2120543bb4a8494c99aa6ceab81f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 12 Nov 2020 11:04:55 -0800 Subject: [PATCH 0238/2111] PYTHON-2415 Fix pickle support for BulkWriteError exceptions (#514) --- pymongo/errors.py | 2 ++ test/test_errors.py | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/pymongo/errors.py b/pymongo/errors.py index 0dfa1f237d..ba6fec5876 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -240,6 +240,8 @@ class BulkWriteError(OperationFailure): def __init__(self, results): super(BulkWriteError, self).__init__( "batch op errors occurred", 65, results) + # For pickle support + self.args = (results,) class InvalidOperation(PyMongoError): diff --git a/test/test_errors.py b/test/test_errors.py index 32d7af3284..5829d01de5 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pickle import sys import traceback sys.path[0:0] = [""] -from pymongo.errors import (NotMasterError, +from pymongo.errors import (BulkWriteError, + EncryptionError, + NotMasterError, OperationFailure) from test import (PyMongoTestCase, unittest) @@ -67,6 +70,37 @@ def test_unicode_strs_not_master_error(self): {"errmsg": u'unicode \U0001f40d'}) self._test_unicode_strs(exc) + def assertPyMongoErrorEqual(self, exc1, exc2): + self.assertEqual(exc1._message, exc2._message) + self.assertEqual(exc1._error_labels, exc2._error_labels) + self.assertEqual(exc1.args, exc2.args) + self.assertEqual(str(exc1), str(exc2)) + + def assertOperationFailureEqual(self, exc1, exc2): + self.assertPyMongoErrorEqual(exc1, exc2) + self.assertEqual(exc1.code, exc2.code) + self.assertEqual(exc1.details, exc2.details) + self.assertEqual(exc1._max_wire_version, exc2._max_wire_version) + + def test_pickle_NotMasterError(self): + exc = NotMasterError("not master test", {"errmsg": "error"}) + self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc))) + + def test_pickle_OperationFailure(self): + exc = OperationFailure('error', code=5, details={}, max_wire_version=7) + self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) + + def test_pickle_BulkWriteError(self): + exc = BulkWriteError({}) + self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) + + def test_pickle_EncryptionError(self): + cause = OperationFailure('error', code=5, details={}, + max_wire_version=7) + exc = EncryptionError(cause) + exc2 = pickle.loads(pickle.dumps(exc)) + self.assertPyMongoErrorEqual(exc, exc2) + self.assertOperationFailureEqual(cause, exc2.cause) if __name__ == "__main__": From 17dca5c504c0b259a91a12b165e7c2675263708d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 16 Nov 2020 15:09:31 -0800 Subject: [PATCH 0239/2111] Revert "PYTHON-1915: Prohibit copying ClientSession objects (#480)" This reverts commit 959039b213ee90e09983475a7db20dd8c523e76d. --- doc/contributors.rst | 1 - pymongo/client_session.py | 3 --- test/test_session.py | 5 ----- 3 files changed, 9 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 74164ffd59..4118d55586 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -88,4 +88,3 @@ The following is a list of people who have contributed to - Terence Honles (terencehonles) - Paul Fisher (thetorpedodog) - Julius Park (juliusgeo) -- Ishmum Jawad Khan (ishmum123) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index e2d7caca58..dec2f4f918 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -827,9 +827,6 @@ def _start_retryable_write(self): self._check_ended() self._server_session.inc_transaction_id() - def __copy__(self): - raise TypeError('A ClientSession cannot be copied, create a new session instead') - class _ServerSession(object): def __init__(self, generation): diff --git a/test/test_session.py b/test/test_session.py index e1db9ccfd1..50dfd8a060 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -779,11 +779,6 @@ def drop_db(): wait_until(drop_db, 'dropped database after w=0 writes') - def test_session_not_copyable(self): - client = self.client - with client.start_session() as s: - self.assertRaises(TypeError, lambda: copy.copy(s)) - class TestCausalConsistency(unittest.TestCase): From 1d651b9be8c155131c669516896fce2b832e6dc7 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 16 Nov 2020 16:01:14 -0800 Subject: [PATCH 0240/2111] BUMP 3.11.1 (#516) --- doc/changelog.rst | 27 ++++++++++++++++++++++----- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 2143c18eac..5d37295825 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,22 +1,39 @@ Changelog ========= -Changes in Version 3.12.0 +Changes in Version 3.11.1 ------------------------- -Version 3.12 adds support for Python 3.9 and includes a number of bug fixes. +Version 3.11.1 adds support for Python 3.9 and includes a number of bugfixes. Highlights include: - Support for Python 3.9. -- New method :class:`bson.json_util.JSONOptions.with_options`. +- Initial support for Azure and GCP KMS providers for client side field level + encryption is in beta. See the docstring for + :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption`. **Note: Backwards-breaking changes may be + made before the final release.** +- Fixed a bug where the :class:`bson.json_util.JSONOptions` API did not match + the :class:`bson.codec_options.CodecOptions` API due to the absence of + a :meth:`bson.json_util.JSONOptions.with_options` method. This method has now + been added. +- Fixed a bug which made it impossible to serialize + :class:`~pymongo.errors.BulkWriteError` instances using :mod:`pickle`. +- Fixed a bug wherein PyMongo did not always discard an implicit session after + encountering a network error. +- Fixed a bug where connections created in the background were not + authenticated. +- Fixed a memory leak in the :mod:`bson` module when using a + :class:`~bson.codec_options.TypeRegistry`. Issues Resolved ............... -See the `PyMongo 3.12.0 release notes in JIRA`_ for the list of resolved issues +See the `PyMongo 3.11.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 +.. _PyMongo 3.11.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29997 Changes in Version 3.11.0 ------------------------- diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 315ee2c5b6..399e6ba22e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 1, '.dev1') +version_tuple = (3, 11, 1) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 28ab6f6a37..3bfcb15074 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.1.dev1" +version = "3.11.1" f = open("README.rst") try: From ce40d1191e4a7dd6c6639d6eccc29db4a6b0e714 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 20 Nov 2020 10:27:14 -0800 Subject: [PATCH 0241/2111] BUMP 4.0.dev0 --- doc/changelog.rst | 11 +++++++++++ pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5d37295825..a71b81ff27 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,17 @@ Changelog ========= +Changes in Version 4.0 +---------------------- + +Issues Resolved +............... + +See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 + Changes in Version 3.11.1 ------------------------- diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 399e6ba22e..796ef15db1 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -74,7 +74,7 @@ ALL = 2 """Profile all operations.""" -version_tuple = (3, 11, 1) +version_tuple = (4, 0, '.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 3bfcb15074..b921040454 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ except ImportError: _HAVE_SPHINX = False -version = "3.11.1" +version = "4.0.dev0" f = open("README.rst") try: From 4928b9088d65e994a3f9acf75ed87fe77728e585 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 20 Nov 2020 12:14:11 -0800 Subject: [PATCH 0242/2111] PYTHON-2436 Skip failing bulk insert test on 4.8+ --- test/test_legacy_api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index fb1bd2b8ac..e0a5e2ac8d 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -2112,6 +2112,7 @@ def test_multiple_error_unordered_batch(self): 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, result) + @client_context.require_version_max(4, 8) # PYTHON-2436 def test_large_inserts_ordered(self): big = 'x' * self.coll.database.client.max_bson_size batch = self.coll.initialize_ordered_bulk_op() From 6c92e6c67e339b5201c9fac2d5fa2530c9a0d660 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 20 Nov 2020 18:58:47 -0800 Subject: [PATCH 0243/2111] PYTHON-2433 Fix Python 3 ServerDescription/Exception memory leak (#520) When the SDAM monitor check fails, a ServerDescription is created from the exception. This exception is kept alive via the ServerDescription.error field. Unfortunately, the exception's traceback contains a reference to the previous ServerDescription. Altogether this means that each consecutively failing check leaks memory by building an ever growing chain of ServerDescription -> Exception -> Traceback -> Frame -> ServerDescription -> ... objects. This change breaks the chain and prevents the memory leak by clearing the Exception's __traceback__, __context__, and __cause__ fields. --- pymongo/monitor.py | 12 ++++++++++++ test/test_client.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 81502591bf..27f9bbb992 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -18,6 +18,8 @@ import threading import weakref +from bson.py3compat import PY3 + from pymongo import common, periodic_executor from pymongo.errors import (NotMasterError, OperationFailure, @@ -30,6 +32,14 @@ from pymongo.srv_resolver import _SrvResolver +def _sanitize(error): + """PYTHON-2433 Clear error traceback info.""" + if PY3: + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + class MonitorBase(object): def __init__(self, topology, name, interval, min_interval): """Base class to do periodic work on a background thread. @@ -169,6 +179,7 @@ def _run(self): try: self._server_description = self._check_server() except _OperationCancelled as exc: + _sanitize(exc) # Already closed the connection, wait for the next check. self._server_description = ServerDescription( self._server_description.address, error=exc) @@ -212,6 +223,7 @@ def _check_server(self): except ReferenceError: raise except Exception as error: + _sanitize(error) sd = self._server_description address = sd.address duration = _time() - start diff --git a/test/test_client.py b/test/test_client.py index 19ea1375c2..dc21b357fc 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -57,6 +57,7 @@ from pymongo.driver_info import DriverInfo from pymongo.pool import SocketInfo, _METADATA from pymongo.read_preferences import ReadPreference +from pymongo.server_description import ServerDescription from pymongo.server_selectors import (any_server_selector, writable_server_selector) from pymongo.server_type import SERVER_TYPE @@ -1614,6 +1615,33 @@ def test_direct_connection(self): with self.assertRaises(ConfigurationError): MongoClient(['host1', 'host2'], directConnection=True) + def test_continuous_network_errors(self): + def server_description_count(): + i = 0 + for obj in gc.get_objects(): + try: + if isinstance(obj, ServerDescription): + i += 1 + except ReferenceError: + pass + return i + gc.collect() + with client_knobs(min_heartbeat_interval=0.003): + client = MongoClient( + 'invalid:27017', + heartbeatFrequencyMS=3, + serverSelectionTimeoutMS=100) + initial_count = server_description_count() + self.addCleanup(client.close) + with self.assertRaises(ServerSelectionTimeoutError): + client.test.test.find_one() + gc.collect() + final_count = server_description_count() + # If a bug like PYTHON-2433 is reintroduced then too many + # ServerDescriptions will be kept alive and this test will fail: + # AssertionError: 4 != 22 within 5 delta (18 difference) + self.assertAlmostEqual(initial_count, final_count, delta=5) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From 22a7e8085c5b05896b03a7d65000bbf3c74fa3bb Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 20 Nov 2020 21:45:42 -0800 Subject: [PATCH 0244/2111] PYTHON-2431 Fix MONGODB-AWS auth tests on macOS (#521) --- .evergreen/config.yml | 2 +- .evergreen/run-mongodb-aws-test.sh | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c161aeaa21..5e2ce14d3a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1568,7 +1568,7 @@ axes: variables: skip_EC2_auth_test: true skip_ECS_auth_test: true - python3_binary: python3 + python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 display_name: "RHEL 6.2 (x86_64)" diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index f0d59e960a..e276a82bb4 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -39,7 +39,12 @@ fi # show test output set -x -VIRTUALENV=$(command -v virtualenv) +# Workaround macOS python 3.9 incompatibility with system virtualenv. +if [ $(uname -s) = "Darwin" ]; then + VIRTUALENV="/Library/Frameworks/Python.framework/Versions/3.9/bin/python3 -m virtualenv" +else + VIRTUALENV=$(command -v virtualenv) +fi authtest () { if [ "Windows_NT" = "$OS" ]; then From 92aed33694a2181f446e061e5aa34f2f53d31210 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 20 Nov 2020 22:19:37 -0800 Subject: [PATCH 0245/2111] PYTHON-2433 Skip test_continuous_network_errors on Jython --- test/test_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_client.py b/test/test_client.py index dc21b357fc..04831a71c5 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1615,6 +1615,8 @@ def test_direct_connection(self): with self.assertRaises(ConfigurationError): MongoClient(['host1', 'host2'], directConnection=True) + @unittest.skipIf(sys.platform.startswith('java'), + 'Jython does not support gc.get_objects') def test_continuous_network_errors(self): def server_description_count(): i = 0 From 86d58113e52c8331ef83fd5849019c5422d7e62b Mon Sep 17 00:00:00 2001 From: Pascal Corpet Date: Mon, 23 Nov 2020 18:46:33 +0100 Subject: [PATCH 0246/2111] PYTHON-2438 Fix str representation of BulkWriteError (#522) --- pymongo/errors.py | 5 +++-- test/test_errors.py | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pymongo/errors.py b/pymongo/errors.py index ba6fec5876..fd55226ab3 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -240,8 +240,9 @@ class BulkWriteError(OperationFailure): def __init__(self, results): super(BulkWriteError, self).__init__( "batch op errors occurred", 65, results) - # For pickle support - self.args = (results,) + + def __reduce__(self): + return self.__class__, (self.details,) class InvalidOperation(PyMongoError): diff --git a/test/test_errors.py b/test/test_errors.py index 5829d01de5..968c8ebd74 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -93,6 +93,7 @@ def test_pickle_OperationFailure(self): def test_pickle_BulkWriteError(self): exc = BulkWriteError({}) self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) + self.assertIn("batch op errors occurred", str(exc)) def test_pickle_EncryptionError(self): cause = OperationFailure('error', code=5, details={}, From d1fd3f7e982f5d1ae3e8b8b8edeba7e146bb02f8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 23 Nov 2020 15:55:54 -0800 Subject: [PATCH 0247/2111] PYTHON-2363 Rate limit new connection creations via maxConnecting (#511) At most 2 connections can be in the pending state per connection pool. The pending state covers all the work required to setup a new connection including TCP, TLS, and MongoDB authentication. For example, if two threads are currently creating connections, a third thread will wait for either an existing connection to be checked back into the pool or for one of the two threads to finish creating a connection. The change reduces the likelihood of connection storms and improves the driver's ability to reuse existing connections. --- doc/faq.rst | 23 +++- pymongo/common.py | 3 + pymongo/pool.py | 94 ++++++++++++-- ...ol-checkout-maxConnecting-is-enforced.json | 104 +++++++++++++++ .../pool-checkout-maxConnecting-timeout.json | 98 +++++++++++++++ ...out-returned-connection-maxConnecting.json | 119 ++++++++++++++++++ test/test_cmap.py | 21 +++- test/test_pooling.py | 34 +++++ test/utils_spec_runner.py | 4 +- 9 files changed, 477 insertions(+), 23 deletions(-) create mode 100644 test/cmap/pool-checkout-maxConnecting-is-enforced.json create mode 100644 test/cmap/pool-checkout-maxConnecting-timeout.json create mode 100644 test/cmap/pool-checkout-returned-connection-maxConnecting.json diff --git a/doc/faq.rst b/doc/faq.rst index 8e820229ab..dc9973e27b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -58,17 +58,32 @@ to 100. If there are ``maxPoolSize`` connections to a server and all are in use, the next request to that server will wait until one of the connections becomes available. -The client instance opens one additional socket per server in your MongoDB +The client instance opens two additional sockets per server in your MongoDB topology for monitoring the server's state. -For example, a client connected to a 3-node replica set opens 3 monitoring +For example, a client connected to a 3-node replica set opens 6 monitoring sockets. It also opens as many sockets as needed to support a multi-threaded application's concurrent operations on each server, up to ``maxPoolSize``. With a ``maxPoolSize`` of 100, if the application only uses the primary (the default), then only the primary connection pool grows and the total connections -is at most 103. If the application uses a +is at most 106. If the application uses a :class:`~pymongo.read_preferences.ReadPreference` to query the secondaries, -their pools also grow and the total connections can reach 303. +their pools also grow and the total connections can reach 306. + +Additionally, the pools are rate limited such that each connection pool can +only create at most 2 connections in parallel at any time. The connection +creation covers covers all the work required to setup a new connection +including DNS, TCP, SSL/TLS, MongoDB handshake, and MongoDB authentication. +For example, if three threads concurrently attempt to check out a connection +from an empty pool, the first two threads will begin creating new connections +while the third thread will wait. The third thread stops waiting when either: + +- one of the first two threads finishes creating a connection, or +- an existing connection is checked back into the pool. + +Rate limiting concurrent connection creation reduces the likelihood of +connection storms and improves the driver's ability to reuse existing +connections. It is possible to set the minimum number of concurrent connections to each server with ``minPoolSize``, which defaults to 0. The connection pool will be diff --git a/pymongo/common.py b/pymongo/common.py index cab9d526ce..81555ef392 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -89,6 +89,9 @@ # Default value for minPoolSize. MIN_POOL_SIZE = 0 +# The maximum number of concurrent connection creation attempts per pool. +MAX_CONNECTING = 2 + # Default value for maxIdleTimeMS. MAX_IDLE_TIME_MS = None diff --git a/pymongo/pool.py b/pymongo/pool.py index 9aed758456..529e359342 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -28,11 +28,12 @@ IPADDR_SAFE as _IPADDR_SAFE) from bson import DEFAULT_CODEC_OPTIONS -from bson.py3compat import imap, itervalues, _unicode +from bson.py3compat import imap, itervalues, _unicode, PY3 from bson.son import SON from pymongo import auth, helpers, thread_util, __version__ from pymongo.client_session import _validate_session_write_concern from pymongo.common import (MAX_BSON_SIZE, + MAX_CONNECTING, MAX_IDLE_TIME_SEC, MAX_MESSAGE_SIZE, MAX_POOL_SIZE, @@ -285,6 +286,20 @@ def _raise_connection_failure(address, error, msg_prefix=None): else: raise AutoReconnect(msg) +if PY3: + def _cond_wait(condition, deadline): + timeout = deadline - _time() if deadline else None + return condition.wait(timeout) +else: + def _cond_wait(condition, deadline): + timeout = deadline - _time() if deadline else None + condition.wait(timeout) + # Python 2.7 always returns False for wait(), + # manually check for a timeout. + if timeout and _time() >= deadline: + return False + return True + class PoolOptions(object): @@ -294,7 +309,7 @@ class PoolOptions(object): '__wait_queue_timeout', '__wait_queue_multiple', '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', '__event_listeners', '__appname', '__driver', '__metadata', - '__compression_settings') + '__compression_settings', '__max_connecting') def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, @@ -303,7 +318,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, wait_queue_multiple=None, ssl_context=None, ssl_match_hostname=True, socket_keepalive=True, event_listeners=None, appname=None, driver=None, - compression_settings=None): + compression_settings=None, max_connecting=MAX_CONNECTING): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size @@ -319,6 +334,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__appname = appname self.__driver = driver self.__compression_settings = compression_settings + self.__max_connecting = max_connecting self.__metadata = copy.deepcopy(_METADATA) if appname: self.__metadata['application'] = {'name': appname} @@ -357,6 +373,8 @@ def non_default_options(self): opts['maxIdleTimeMS'] = self.__max_idle_time_seconds * 1000 if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: opts['waitQueueTimeoutMS'] = self.__wait_queue_timeout * 1000 + if self.__max_connecting != MAX_CONNECTING: + opts['maxConnecting'] = self.__max_connecting return opts @property @@ -381,6 +399,13 @@ def min_pool_size(self): """ return self.__min_pool_size + @property + def max_connecting(self): + """The maximum number of concurrent connection creation attempts per + pool. Defaults to 2. + """ + return self.__max_connecting + @property def max_idle_time_seconds(self): """The maximum number of seconds that a connection can remain @@ -1080,6 +1105,9 @@ def __init__(self, address, options, handshake=True): self._socket_semaphore = thread_util.create_semaphore( self.opts.max_pool_size, max_waiters) + self._max_connecting_cond = threading.Condition(self.lock) + self._max_connecting = self.opts.max_connecting + self._pending = 0 if self.enabled_for_cmap: self.opts.event_listeners.publish_pool_created( self.address, self.opts.non_default_options) @@ -1143,21 +1171,34 @@ def remove_stale_sockets(self, reference_generation, all_credentials): if (len(self.sockets) + self.active_sockets >= self.opts.min_pool_size): # There are enough sockets in the pool. - break + return # We must acquire the semaphore to respect max_pool_size. if not self._socket_semaphore.acquire(False): - break + return + incremented = False try: + with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True sock_info = self.connect(all_credentials) with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. if self.generation != reference_generation: sock_info.close_socket(ConnectionClosedReason.STALE) - break + return self.sockets.appendleft(sock_info) finally: + if incremented: + # Notify after adding the socket to the pool. + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() self._socket_semaphore.release() def connect(self, all_credentials=None): @@ -1260,6 +1301,10 @@ def _get_socket(self, all_credentials): 'pool') # Get a free socket or create one. + if self.opts.wait_queue_timeout: + deadline = _time() + self.opts.wait_queue_timeout + else: + deadline = None if not self._socket_semaphore.acquire( True, self.opts.wait_queue_timeout): self._raise_wait_queue_timeout() @@ -1267,21 +1312,42 @@ def _get_socket(self, all_credentials): # We've now acquired the semaphore and must release it on error. sock_info = None incremented = False + emitted_event = False try: with self.lock: self.active_sockets += 1 incremented = True while sock_info is None: - try: - with self.lock: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + with self._max_connecting_cond: + while not (self.sockets or + self._pending < self._max_connecting): + if not _cond_wait(self._max_connecting_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if (self.sockets or + self._pending < self._max_connecting): + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout() + + try: sock_info = self.sockets.popleft() - except IndexError: - # Can raise ConnectionFailure or CertificateError. - sock_info = self.connect(all_credentials) - else: + except IndexError: + self._pending += 1 + if sock_info: # We got a socket from the pool if self._perished(sock_info): sock_info = None + continue + else: # We need to create a new connection + try: + sock_info = self.connect(all_credentials) + finally: + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() sock_info.check_auth(all_credentials) except Exception: if sock_info: @@ -1293,7 +1359,7 @@ def _get_socket(self, all_credentials): with self.lock: self.active_sockets -= 1 - if self.enabled_for_cmap: + if self.enabled_for_cmap and not emitted_event: self.opts.event_listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.CONN_ERROR) raise @@ -1324,6 +1390,8 @@ def return_socket(self, sock_info): sock_info.update_last_checkin_time() sock_info.update_is_writable(self.is_writable) self.sockets.appendleft(sock_info) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() self._socket_semaphore.release() with self.lock: diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..358e0801b8 --- /dev/null +++ b/test/cmap/pool-checkout-maxConnecting-is-enforced.json @@ -0,0 +1,104 @@ +{ + "version": 1, + "style": "integration", + "description": "maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "wait", + "thread": "thread2", + "ms": 100 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "wait", + "thread": "thread3", + "ms": 100 + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 3 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-checkout-maxConnecting-timeout.json b/test/cmap/pool-checkout-maxConnecting-timeout.json new file mode 100644 index 0000000000..ef71216efc --- /dev/null +++ b/test/cmap/pool-checkout-maxConnecting-timeout.json @@ -0,0 +1,98 @@ +{ + "version": 1, + "style": "integration", + "description": "waiting on maxConnecting is limited by WaitQueueTimeoutMS", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 50 + }, + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 2 + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "waitForThread", + "target": "thread3" + } + ], + "error": { + "type": "WaitQueueTimeoutError", + "message": "Timed out while checking out a connection from connection pool" + }, + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "timeout", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-checkout-returned-connection-maxConnecting.json b/test/cmap/pool-checkout-returned-connection-maxConnecting.json new file mode 100644 index 0000000000..308d640f0e --- /dev/null +++ b/test/cmap/pool-checkout-returned-connection-maxConnecting.json @@ -0,0 +1,119 @@ +{ + "version": 1, + "style": "integration", + "description": "threads blocked by maxConnecting check out returned connections", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750 + } + }, + "poolOptions": { + "maxPoolSize": 10, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "checkOut", + "label": "conn0" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "wait", + "ms": 100 + }, + { + "name": "checkIn", + "connection": "conn0" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 4 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42, + "connectionId": 1 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionClosed", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} diff --git a/test/test_cmap.py b/test/test_cmap.py index bd22cdd729..b217087432 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -14,14 +14,14 @@ """Execute Transactions Spec tests.""" -import functools import os import sys import time -import threading sys.path[0:0] = [""] +from bson.son import SON + from pymongo.errors import (ConnectionFailure, OperationFailure, PyMongoError) @@ -184,13 +184,28 @@ def check_error(self, actual, expected): self.check_object(actual, expected) self.assertIn(message, str(actual)) + def _set_fail_point(self, client, command_args): + cmd = SON([('configureFailPoint', 'failCommand')]) + cmd.update(command_args) + client.admin.command(cmd) + + def set_fail_point(self, command_args): + self._set_fail_point(self.client, command_args) + def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.assertEqual(scenario_def['version'], 1) - self.assertEqual(scenario_def['style'], 'unit') + self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() self._ops = [] + # Configure the fail point before creating the client. + if 'failPoint' in test: + fp = test['failPoint'] + self.set_fail_point(fp) + self.addCleanup(self.set_fail_point, { + 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] client = single_client(**opts) diff --git a/test/test_pooling.py b/test/test_pooling.py index b1728d791a..156d28103c 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -379,6 +379,40 @@ def test_no_wait_queue_multiple(self): for socket_info in socks: socket_info.close_socket(None) + def test_maxConnecting(self): + client = rs_or_single_client() + self.addCleanup(client.close) + pool = get_pool(client) + docs = [] + + # Run 50 short running operations + def find_one(): + docs.append(client.test.test.find_one({'$where': delay(0.001)})) + threads = [threading.Thread(target=find_one) for _ in range(50)] + for thread in threads: + thread.start() + for thread in threads: + thread.join(10) + + self.assertEqual(len(docs), 50) + self.assertLessEqual(len(pool.sockets), 50) + # TLS and auth make connection establishment more expensive than + # the artificially delayed query which leads to more threads + # hitting maxConnecting. The end result is fewer total connections + # and better latency. + if client_context.tls and client_context.auth_enabled: + self.assertLessEqual(len(pool.sockets), 30) + else: + self.assertLessEqual(len(pool.sockets), 50) + # MongoDB 4.4.1 with auth + ssl: + # maxConnecting = 2: 6 connections in ~0.231+ seconds + # maxConnecting = unbounded: 50 connections in ~0.642+ seconds + # + # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: + # maxConnecting = 2: 15-22 connections in ~0.108+ seconds + # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds + print(len(pool.sockets)) + class TestPoolMaxSize(_TestPoolingBase): def test_max_pool_size(self): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4ab4d1d104..a15537c718 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -121,11 +121,9 @@ def _set_fail_point(self, client, command_args): client.admin.command(cmd) def set_fail_point(self, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) - cmd.update(command_args) clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - self._set_fail_point(client, cmd) + self._set_fail_point(client, command_args) def targeted_fail_point(self, session, fail_point): """Run the targetedFailPoint test operation. From 807ab5ac9c153039b17bf5ffe4cd0d1d900c6631 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 23 Nov 2020 18:46:58 -0800 Subject: [PATCH 0248/2111] PYTHON-2363 Skip CMAP test when failCommand is not supported (#523) --- test/test_cmap.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_cmap.py b/test/test_cmap.py index b217087432..95de866f1f 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -190,6 +190,8 @@ def _set_fail_point(self, client, command_args): client.admin.command(cmd) def set_fail_point(self, command_args): + if not client_context.supports_failCommand_fail_point: + self.skipTest('failCommand fail point must be supported') self._set_fail_point(self.client, command_args) def run_scenario(self, scenario_def, test): From 4119d35d040df5abefc67f05791f352bd1efe6e4 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 24 Nov 2020 12:11:22 -0800 Subject: [PATCH 0249/2111] PYTHON-2440 Workaround namedtuple._asdict() bug on Python 3.4 (#525) --- bson/codec_options.py | 13 ++++++++++++- bson/json_util.py | 12 +++++++++++- test/test_client.py | 3 +++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 0db900f59b..8b8482905d 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -295,6 +295,17 @@ def _arguments_repr(self): self.unicode_decode_error_handler, self.tzinfo, self.type_registry)) + def _options_dict(self): + """Dictionary of the arguments used to create this object.""" + # TODO: PYTHON-2442 use _asdict() instead + return { + 'document_class': self.document_class, + 'tz_aware': self.tz_aware, + 'uuid_representation': self.uuid_representation, + 'unicode_decode_error_handler': self.unicode_decode_error_handler, + 'tzinfo': self.tzinfo, + 'type_registry': self.type_registry} + def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._arguments_repr()) @@ -310,7 +321,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ - opts = self._asdict() + opts = self._options_dict() opts.update(kwargs) return CodecOptions(**opts) diff --git a/bson/json_util.py b/bson/json_util.py index 1eef9270ef..38c39a12c8 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -311,6 +311,16 @@ def _arguments_repr(self): self.json_mode, super(JSONOptions, self)._arguments_repr())) + def _options_dict(self): + # TODO: PYTHON-2442 use _asdict() instead + options_dict = super(JSONOptions, self)._options_dict() + options_dict.update({ + 'strict_number_long': self.strict_number_long, + 'datetime_representation': self.datetime_representation, + 'strict_uuid': self.strict_uuid, + 'json_mode': self.json_mode}) + return options_dict + def with_options(self, **kwargs): """ Make a copy of this JSONOptions, overriding some options:: @@ -324,7 +334,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.12 """ - opts = self._asdict() + opts = self._options_dict() for opt in ('strict_number_long', 'datetime_representation', 'strict_uuid', 'json_mode'): opts[opt] = kwargs.get(opt, getattr(self, opt)) diff --git a/test/test_client.py b/test/test_client.py index 04831a71c5..f02a075b3a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -280,6 +280,9 @@ def test_read_preference(self): readpreference=ReadPreference.NEAREST.mongos_mode) self.assertEqual(c.read_preference, ReadPreference.NEAREST) + @unittest.skipIf( + sys.version_info[0] == 3 and sys.version_info[1] == 4, + "PYTHON-2442: workaround namedtuple._asdict() bug on Python 3.4") def test_metadata(self): metadata = copy.deepcopy(_METADATA) metadata['application'] = {'name': 'foobar'} From 5625860688ff7ede833c789bd6713c3ec675f12d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 1 Dec 2020 07:57:30 -1000 Subject: [PATCH 0250/2111] PYTHON-2443 Fix TypeError when pyOpenSSL socket has timeout of None (#527) --- pymongo/socket_checker.py | 5 +++-- test/test_pooling.py | 36 +++++++++++++++++++++++++++++++++++- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 886782b78b..48f168be48 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -41,7 +41,7 @@ def __init__(self): self._poller = None def select(self, sock, read=False, write=False, timeout=0): - """Select for reads or writes with a timeout in seconds. + """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. """ @@ -57,7 +57,8 @@ def select(self, sock, read=False, write=False, timeout=0): try: # poll() timeout is in milliseconds. select() # timeout is in seconds. - res = self._poller.poll(timeout * 1000) + timeout_ = None if timeout is None else timeout * 1000 + res = self._poller.poll(timeout_) # poll returns a possibly-empty list containing # (fd, event) 2-tuples for the descriptors that have # events or errors to report. Return True if the list diff --git a/test/test_pooling.py b/test/test_pooling.py index 156d28103c..d103991600 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -21,7 +21,10 @@ import threading import time -from pymongo import MongoClient +from bson.son import SON +from bson.codec_options import DEFAULT_CODEC_OPTIONS + +from pymongo import MongoClient, message from pymongo.errors import (AutoReconnect, ConnectionFailure, DuplicateKeyError, @@ -259,6 +262,37 @@ def test_socket_closed(self): s.close() self.assertTrue(socket_checker.socket_closed(s)) + def test_socket_checker(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((client_context.host, client_context.port)) + socket_checker = SocketChecker() + # Socket has nothing to read. + self.assertFalse(socket_checker.select(s, read=True)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0)) + self.assertFalse(socket_checker.select(s, read=True, timeout=.05)) + # Socket is writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) + # Make the socket readable + _, msg, _ = message.query( + 0, 'admin.$cmd', 0, -1, SON([('isMaster', 1)]), None, + DEFAULT_CODEC_OPTIONS) + s.sendall(msg) + # Block until the socket is readable. + self.assertTrue(socket_checker.select(s, read=True, timeout=None)) + self.assertTrue(socket_checker.select(s, read=True)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0)) + self.assertTrue(socket_checker.select(s, read=True, timeout=.05)) + # Socket is still writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + def test_return_socket_after_reset(self): pool = self.create_pool() with pool.get_socket({}) as sock: From ac07e0f4e22e2f52ec9a3305dbaa81449e6516d8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Dec 2020 08:59:33 -1000 Subject: [PATCH 0251/2111] PYTHON-2447 Fix race in CMAP maxConnecting test (#529) --- ...ol-checkout-maxConnecting-is-enforced.json | 21 +++++++++---------- test/test_cmap.py | 9 ++++++-- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-maxConnecting-is-enforced.json index 358e0801b8..4b67b73add 100644 --- a/test/cmap/pool-checkout-maxConnecting-is-enforced.json +++ b/test/cmap/pool-checkout-maxConnecting-is-enforced.json @@ -30,32 +30,31 @@ "name": "start", "target": "thread1" }, - { - "name": "checkOut", - "thread": "thread1" - }, { "name": "start", "target": "thread2" }, { - "name": "wait", - "thread": "thread2", - "ms": 100 + "name": "start", + "target": "thread3" }, { "name": "checkOut", - "thread": "thread2" + "thread": "thread1" }, { - "name": "start", - "target": "thread3" + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 }, { "name": "wait", - "thread": "thread3", "ms": 100 }, + { + "name": "checkOut", + "thread": "thread2" + }, { "name": "checkOut", "thread": "thread3" diff --git a/test/test_cmap.py b/test/test_cmap.py index 95de866f1f..ef40d07bf1 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -172,6 +172,8 @@ def check_events(self, events, ignore): """Check the events of a test.""" actual_events = self.actual_events(ignore) for actual, expected in zip(actual_events, events): + self.logs.append('Checking event actual: %r vs expected: %r' % ( + actual, expected)) self.check_event(actual, expected) if len(events) > len(actual_events): @@ -196,6 +198,7 @@ def set_fail_point(self, command_args): def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" + self.logs = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() @@ -240,8 +243,7 @@ def cleanup(): self.check_events(test['events'], test['ignore']) except Exception: # Print the events after a test failure. - print() - print('Failed test: %r' % (test['description'],)) + print('\nFailed test: %r' % (test['description'],)) print('Operations:') for op in self._ops: print(op) @@ -252,6 +254,9 @@ def cleanup(): print('Events:') for event in self.listener.events: print(event) + print('Log:') + for log in self.logs: + print(log) raise POOL_OPTIONS = { From e95d2187b64d37973f62ad1afd223cebfc485a9b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Dec 2020 10:41:39 -1000 Subject: [PATCH 0252/2111] PYTHON-2395 Consider connection pool health during server selection (#515) Change the driver to maintain a count of in-progress operations to each server (per client). When selecting a mongos server, the driver now picks 2 suitable servers at random and selects the server with fewer in-progress operations. Previously, the driver selected a mongos server at random. The new behavior is intended to route operations away from unhealthy or slow servers in highly concurrent single client workloads. PYTHON-2460 Only reset Pool.active_sockets to 0 after a fork() --- pymongo/pool.py | 13 +- pymongo/topology.py | 12 +- .../in_window/equilibrium.json | 46 +++++ .../in_window/many-choices.json | 106 ++++++++++++ .../in_window/one-least-two-tied.json | 46 +++++ .../in_window/rs-equilibrium.json | 46 +++++ .../in_window/rs-three-choices.json | 46 +++++ .../in_window/three-choices.json | 46 +++++ .../in_window/two-choices.json | 36 ++++ .../server_selection/in_window/two-least.json | 46 +++++ test/test_server_selection_in_window.py | 157 ++++++++++++++++++ test/utils.py | 5 +- test/utils_selection_tests.py | 58 ++++--- 13 files changed, 632 insertions(+), 31 deletions(-) create mode 100644 test/server_selection/in_window/equilibrium.json create mode 100644 test/server_selection/in_window/many-choices.json create mode 100644 test/server_selection/in_window/one-least-two-tied.json create mode 100644 test/server_selection/in_window/rs-equilibrium.json create mode 100644 test/server_selection/in_window/rs-three-choices.json create mode 100644 test/server_selection/in_window/three-choices.json create mode 100644 test/server_selection/in_window/two-choices.json create mode 100644 test/server_selection/in_window/two-least.json create mode 100644 test/test_server_selection_in_window.py diff --git a/pymongo/pool.py b/pymongo/pool.py index 529e359342..20164bb8aa 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1111,15 +1111,20 @@ def __init__(self, address, options, handshake=True): if self.enabled_for_cmap: self.opts.event_listeners.publish_pool_created( self.address, self.opts.non_default_options) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count = 0 def _reset(self, close): with self.lock: if self.closed: return self.generation += 1 - self.pid = os.getpid() + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 sockets, self.sockets = self.sockets, collections.deque() - self.active_sockets = 0 if close: self.closed = True @@ -1300,6 +1305,9 @@ def _get_socket(self, all_credentials): 'Attempted to check out a connection from closed connection ' 'pool') + with self.lock: + self.operation_count += 1 + # Get a free socket or create one. if self.opts.wait_queue_timeout: deadline = _time() + self.opts.wait_queue_timeout @@ -1396,6 +1404,7 @@ def return_socket(self, sock_info): self._socket_semaphore.release() with self.lock: self.active_sockets -= 1 + self.operation_count -= 1 def _perished(self, sock_info): """Return True and close the connection if it is "perished". diff --git a/pymongo/topology.py b/pymongo/topology.py index eb84a344e0..20b8bbc082 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -238,9 +238,15 @@ def select_server(self, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" - return random.choice(self.select_servers(selector, - server_selection_timeout, - address)) + servers = self.select_servers( + selector, server_selection_timeout, address) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 def select_server_by_address(self, address, server_selection_timeout=None): diff --git a/test/server_selection/in_window/equilibrium.json b/test/server_selection/in_window/equilibrium.json new file mode 100644 index 0000000000..c5f177d49b --- /dev/null +++ b/test/server_selection/in_window/equilibrium.json @@ -0,0 +1,46 @@ +{ + "description": "When in equilibrium selection is evenly distributed", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 5 + }, + { + "address": "b:27017", + "operation_count": 5 + }, + { + "address": "c:27017", + "operation_count": 5 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.33, + "b:27017": 0.33, + "c:27017": 0.33 + } + } +} diff --git a/test/server_selection/in_window/many-choices.json b/test/server_selection/in_window/many-choices.json new file mode 100644 index 0000000000..7e940513ef --- /dev/null +++ b/test/server_selection/in_window/many-choices.json @@ -0,0 +1,106 @@ +{ + "description": "Selections from many choices occur at correct frequencies", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "d:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "e:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "f:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "g:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "i:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 0 + }, + { + "address": "b:27017", + "operation_count": 5 + }, + { + "address": "c:27017", + "operation_count": 5 + }, + { + "address": "d:27017", + "operation_count": 10 + }, + { + "address": "e:27017", + "operation_count": 10 + }, + { + "address": "f:27017", + "operation_count": 20 + }, + { + "address": "g:27017", + "operation_count": 20 + }, + { + "address": "h:27017", + "operation_count": 50 + }, + { + "address": "i:27017", + "operation_count": 60 + } + ], + "iterations": 10000, + "outcome": { + "tolerance": 0.03, + "expected_frequencies": { + "a:27017": 0.22, + "b:27017": 0.18, + "c:27017": 0.18, + "d:27017": 0.125, + "e:27017": 0.125, + "f:27017": 0.074, + "g:27017": 0.074, + "h:27017": 0.0277, + "i:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/one-least-two-tied.json b/test/server_selection/in_window/one-least-two-tied.json new file mode 100644 index 0000000000..ed7526e716 --- /dev/null +++ b/test/server_selection/in_window/one-least-two-tied.json @@ -0,0 +1,46 @@ +{ + "description": "Least operations gets most selections, two tied share the rest", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 16 + }, + { + "address": "b:27017", + "operation_count": 10 + }, + { + "address": "c:27017", + "operation_count": 16 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.165, + "b:27017": 0.66, + "c:27017": 0.165 + } + } +} diff --git a/test/server_selection/in_window/rs-equilibrium.json b/test/server_selection/in_window/rs-equilibrium.json new file mode 100644 index 0000000000..61c6687e50 --- /dev/null +++ b/test/server_selection/in_window/rs-equilibrium.json @@ -0,0 +1,46 @@ +{ + "description": "When in equilibrium selection is evenly distributed (replica set)", + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 6 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 6 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.33, + "b:27017": 0.33, + "c:27017": 0.33 + } + } +} diff --git a/test/server_selection/in_window/rs-three-choices.json b/test/server_selection/in_window/rs-three-choices.json new file mode 100644 index 0000000000..3fdc15205c --- /dev/null +++ b/test/server_selection/in_window/rs-three-choices.json @@ -0,0 +1,46 @@ +{ + "description": "Selections from three servers occur at proper distributions (replica set)", + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "RSSecondary" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 3 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 20 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.66, + "b:27017": 0.33, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/three-choices.json b/test/server_selection/in_window/three-choices.json new file mode 100644 index 0000000000..7b5b414549 --- /dev/null +++ b/test/server_selection/in_window/three-choices.json @@ -0,0 +1,46 @@ +{ + "description": "Selections from three servers occur at proper distributions", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 3 + }, + { + "address": "b:27017", + "operation_count": 6 + }, + { + "address": "c:27017", + "operation_count": 20 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.66, + "b:27017": 0.33, + "c:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/two-choices.json b/test/server_selection/in_window/two-choices.json new file mode 100644 index 0000000000..2c7a605d8d --- /dev/null +++ b/test/server_selection/in_window/two-choices.json @@ -0,0 +1,36 @@ +{ + "description": "Better of two choices always selected", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 0 + }, + { + "address": "b:27017", + "operation_count": 5 + } + ], + "iterations": 100, + "outcome": { + "tolerance": 0, + "expected_frequencies": { + "a:27017": 1, + "b:27017": 0 + } + } +} diff --git a/test/server_selection/in_window/two-least.json b/test/server_selection/in_window/two-least.json new file mode 100644 index 0000000000..73214fc647 --- /dev/null +++ b/test/server_selection/in_window/two-least.json @@ -0,0 +1,46 @@ +{ + "description": "Two tied for least operations share all selections", + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "b:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + }, + { + "address": "c:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "mocked_topology_state": [ + { + "address": "a:27017", + "operation_count": 10 + }, + { + "address": "b:27017", + "operation_count": 10 + }, + { + "address": "c:27017", + "operation_count": 16 + } + ], + "iterations": 2000, + "outcome": { + "tolerance": 0.05, + "expected_frequencies": { + "a:27017": 0.5, + "b:27017": 0.5, + "c:27017": 0 + } + } +} diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py new file mode 100644 index 0000000000..bdd778e815 --- /dev/null +++ b/test/test_server_selection_in_window.py @@ -0,0 +1,157 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" + +import os +import threading + +from pymongo.common import clean_node +from pymongo.read_preferences import ReadPreference +from test import client_context, IntegrationTest, unittest +from test.utils_selection_tests import create_topology +from test.utils import TestCreator, rs_client, OvertCommandListener + + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + os.path.join('server_selection', 'in_window')) + + +class TestAllScenarios(unittest.TestCase): + def run_scenario(self, scenario_def): + topology = create_topology(scenario_def) + + # Update mock operation_count state: + for mock in scenario_def['mocked_topology_state']: + address = clean_node(mock['address']) + server = topology.get_server_by_address(address) + server.pool.operation_count = mock['operation_count'] + + pref = ReadPreference.NEAREST + counts = dict((address, 0) for address in + topology._description.server_descriptions()) + + # Number of times to repeat server selection + iterations = scenario_def['iterations'] + for _ in range(iterations): + server = topology.select_server(pref, server_selection_timeout=0) + counts[server.description.address] += 1 + + # Verify expected_frequencies + outcome = scenario_def['outcome'] + tolerance = outcome['tolerance'] + expected_frequencies = outcome['expected_frequencies'] + for host_str, freq in expected_frequencies.items(): + address = clean_node(host_str) + actual_freq = float(counts[address])/iterations + if freq == 0: + # Should be exactly 0. + self.assertEqual(actual_freq, 0) + else: + # Should be within 'tolerance'. + self.assertAlmostEqual(actual_freq, freq, delta=tolerance) + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def) + + return run_scenario + + +class CustomTestCreator(TestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + Server selection in_window tests do not have a 'tests' field. + The whole file represents a single test case. + """ + return [scenario_def] + + +CustomTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() + + +class FinderThread(threading.Thread): + def __init__(self, collection, iterations): + super(FinderThread, self).__init__() + self.daemon = True + self.collection = collection + self.iterations = iterations + self.passed = False + + def run(self): + for _ in range(self.iterations): + self.collection.find_one({}) + self.passed = True + + +class TestProse(IntegrationTest): + def frequencies(self, client, listener): + coll = client.test.test + N_FINDS = 10 + N_THREADS = 10 + threads = [FinderThread(coll, N_FINDS) for _ in range(N_THREADS)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + events = listener.results['started'] + self.assertEqual(len(events), N_FINDS * N_THREADS) + nodes = client.nodes + self.assertEqual(len(nodes), 2) + freqs = {address: 0 for address in nodes} + for event in events: + freqs[event.connection_id] += 1 + for address in freqs: + freqs[address] = freqs[address]/float(len(events)) + return freqs + + @client_context.require_failCommand_appName + @client_context.require_multiple_mongoses + def test_load_balancing(self): + listener = OvertCommandListener() + client = rs_client(client_context.mongos_seeds(), + appName='loadBalancingTest', + event_listeners=[listener]) + self.addCleanup(client.close) + # Delay find commands on + delay_finds = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 10000}, + 'data': { + 'failCommands': ['find'], + 'blockConnection': True, + 'blockTimeMS': 500, + 'appName': 'loadBalancingTest', + }, + } + with self.fail_point(delay_finds): + nodes = client_context.client.nodes + self.assertEqual(len(nodes), 1) + delayed_server = next(iter(nodes)) + freqs = self.frequencies(client, listener) + self.assertLessEqual(freqs[delayed_server], 0.25) + listener.reset() + freqs = self.frequencies(client, listener) + self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/utils.py b/test/utils.py index 2f5b845544..3e24c684f8 100644 --- a/test/utils.py +++ b/test/utils.py @@ -233,10 +233,11 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool(object): - def __init__(self, *args, **kwargs): + def __init__(self, address, options, handshake=True): self.generation = 0 self._lock = threading.Lock() - self.opts = PoolOptions() + self.opts = options + self.operation_count = 0 def get_socket(self, all_credentials, checkout=False): return MockSocketInfo() diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 8bea70ae37..0d4edb085c 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -130,40 +130,50 @@ def get_topology_settings_dict(**kwargs): return settings -def create_test(scenario_def): - def run_scenario(self): - # Initialize topologies. - if 'heartbeatFrequencyMS' in scenario_def: - frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 - else: - frequency = HEARTBEAT_FREQUENCY +def create_topology(scenario_def, **kwargs): + # Initialize topologies. + if 'heartbeatFrequencyMS' in scenario_def: + frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 + else: + frequency = HEARTBEAT_FREQUENCY - seeds, hosts = get_addresses( - scenario_def['topology_description']['servers']) + seeds, hosts = get_addresses( + scenario_def['topology_description']['servers']) + + settings = get_topology_settings_dict( + heartbeat_frequency=frequency, + seeds=seeds, + **kwargs + ) + + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + topology = Topology(TopologySettings(**settings)) + topology.open() - settings = get_topology_settings_dict( - heartbeat_frequency=frequency, - seeds=seeds - ) + # Update topologies with server descriptions. + for server in scenario_def['topology_description']['servers']: + server_description = make_server_description(server, hosts) + topology.on_change(server_description) + return topology + + +def create_test(scenario_def): + def run_scenario(self): + _, hosts = get_addresses( + scenario_def['topology_description']['servers']) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. - top_latency = Topology(TopologySettings(**settings)) - top_latency.open() + top_latency = create_topology(scenario_def) # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. - settings['local_threshold_ms'] = 1000000 - top_suitable = Topology(TopologySettings(**settings)) - top_suitable.open() - - # Update topologies with server descriptions. - for server in scenario_def['topology_description']['servers']: - server_description = make_server_description(server, hosts) - top_suitable.on_change(server_description) - top_latency.on_change(server_description) + top_suitable = create_topology( + scenario_def, local_threshold_ms=1000000) # Create server selector. if scenario_def.get("operation") == "write": From 61232b7f21d8f0ad9023c8ef0912017d062fafc0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 8 Dec 2020 09:37:36 -1000 Subject: [PATCH 0253/2111] PYTHON-2457 Test that clients wait 500ms between failed heartbeat checks (#524) --- test/test_streaming_protocol.py | 68 ++++++++++++++------------------- 1 file changed, 28 insertions(+), 40 deletions(-) diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index c94179a456..8b815a84eb 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -130,54 +130,42 @@ def changed_event(event): self.assertEqual(1, len(events)) self.assertGreater(events[0].new_description.round_trip_time, 0) + @client_context.require_version_min(4, 9, -1) @client_context.require_failCommand_appName def test_monitor_waits_after_server_check_error(self): - hb_listener = HeartbeatEventListener() - client = rs_or_single_client( - event_listeners=[hb_listener], heartbeatFrequencyMS=500, - appName='waitAfterErrorTest') - self.addCleanup(client.close) - # Force a connection. - client.admin.command('ping') - address = client.address - + # This test implements: + # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks fail_ismaster = { - 'mode': {'times': 50}, + 'mode': {'times': 5}, 'data': { 'failCommands': ['isMaster'], - 'closeConnection': False, - 'errorCode': 91, - # This can be uncommented after SERVER-49220 is fixed. - # 'appName': 'waitAfterErrorTest', + 'errorCode': 1234, + 'appName': 'SDAMMinHeartbeatFrequencyTest', }, } with self.fail_point(fail_ismaster): - time.sleep(2) - - # Server should be selectable. - client.admin.command('ping') - - def hb_started(event): - return (isinstance(event, monitoring.ServerHeartbeatStartedEvent) - and event.connection_id == address) - - hb_started_events = hb_listener.matching(hb_started) - # Explanation of the expected heartbeat events: - # Time: event - # 0ms: create MongoClient - # 1ms: run monitor handshake, 1 - # 2ms: run awaitable isMaster, 2 - # 3ms: run configureFailPoint - # 502ms: isMaster fails for the first time with command error - # 1002ms: run monitor handshake, 3 - # 1502ms: run monitor handshake, 4 - # 2002ms: run monitor handshake, 5 - # 2003ms: disable configureFailPoint - # 2004ms: isMaster succeeds, 6 - # 2004ms: awaitable isMaster, 7 - self.assertGreater(len(hb_started_events), 7) - # This can be reduced to ~15 after SERVER-49220 is fixed. - self.assertLess(len(hb_started_events), 40) + start = time.time() + client = single_client( + appName='SDAMMinHeartbeatFrequencyTest', + serverSelectionTimeoutMS=5000) + self.addCleanup(client.close) + # Force a connection. + client.admin.command('ping') + duration = time.time() - start + # Explanation of the expected events: + # 0ms: run configureFailPoint + # 1ms: create MongoClient + # 2ms: failed monitor handshake, 1 + # 502ms: failed monitor handshake, 2 + # 1002ms: failed monitor handshake, 3 + # 1502ms: failed monitor handshake, 4 + # 2002ms: failed monitor handshake, 5 + # 2502ms: monitor handshake succeeds + # 2503ms: run awaitable isMaster + # 2504ms: application handshake succeeds + # 2505ms: ping command succeeds + self.assertGreaterEqual(duration, 2) + self.assertLessEqual(duration, 3.5) @client_context.require_failCommand_appName def test_heartbeat_awaited_flag(self): From f45847392587eab007ce934c8ad8c250c22423bd Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 14 Dec 2020 19:03:19 -0800 Subject: [PATCH 0254/2111] PYTHON-2452 Ensure command-responses with RetryableWriteError label are retried on MongoDB 4.4+ (#530) --- pymongo/helpers.py | 9 +++++- test/test_retryable_writes.py | 60 ++++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index d5590d516e..46cecfccfa 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -115,7 +115,11 @@ def _check_command_response(response, max_wire_version, max_wire_version) if parse_write_concern_error and 'writeConcernError' in response: - _raise_write_concern_error(response['writeConcernError']) + _error = response["writeConcernError"] + _labels = response.get("errorLabels") + if _labels: + _error.update({'errorLabels': _labels}) + _raise_write_concern_error(_error) if response["ok"]: return @@ -223,6 +227,9 @@ def _check_write_command_response(result): error = result.get("writeConcernError") if error: + error_labels = result.get("errorLabels") + if error_labels: + error.update({'errorLabels': error_labels}) _raise_write_concern_error(error) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 3060b641c8..91d3d8257b 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -20,14 +20,17 @@ sys.path[0:0] = [""] +from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.int64 import Int64 from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.errors import (ConnectionFailure, OperationFailure, - ServerSelectionTimeoutError) + ServerSelectionTimeoutError, + WriteConcernError) from pymongo.mongo_client import MongoClient from pymongo.operations import (InsertOne, DeleteMany, @@ -43,6 +46,7 @@ OvertCommandListener, TestCreator) from test.utils_spec_runner import SpecRunner +from test.version import Version # Location of JSON test specifications. _TEST_PATH = os.path.join( @@ -454,6 +458,60 @@ def test_batch_splitting_retry_fails(self): self.assertEqual(coll.find_one(projection={'_id': True}), {'_id': 1}) +class TestWriteConcernError(IntegrationTest): + @classmethod + @client_context.require_replica_set + @client_context.require_no_mmap + @client_context.require_failCommand_fail_point + def setUpClass(cls): + super(TestWriteConcernError, cls).setUpClass() + cls.fail_insert = { + 'configureFailPoint': 'failCommand', + 'mode': {'times': 2}, + 'data': { + 'failCommands': ['insert'], + 'writeConcernError': { + 'code': 91, + 'errmsg': 'Replication is being shut down'}, + }} + + @client_context.require_version_min(4, 0) + def test_RetryableWriteError_error_label(self): + listener = OvertCommandListener() + client = rs_or_single_client( + retryWrites=True, event_listeners=[listener]) + + # Ensure collection exists. + client.pymongo_test.testcoll.insert_one({}) + + with self.fail_point(self.fail_insert): + with self.assertRaises(WriteConcernError) as cm: + client.pymongo_test.testcoll.insert_one({}) + self.assertTrue(cm.exception.has_error_label( + 'RetryableWriteError')) + + if client_context.version >= Version(4, 4): + # In MongoDB 4.4+ we rely on the server returning the error label. + self.assertIn( + 'RetryableWriteError', + listener.results['succeeded'][-1].reply['errorLabels']) + + @client_context.require_version_min(4, 4) + def test_RetryableWriteError_error_label_RawBSONDocument(self): + # using RawBSONDocument should not cause errorLabel parsing to fail + with self.fail_point(self.fail_insert): + with self.client.start_session() as s: + s._start_retryable_write() + result = self.client.pymongo_test.command( + 'insert', 'testcoll', documents=[{'_id': 1}], + txnNumber=s._server_session.transaction_id, session=s, + codec_options=DEFAULT_CODEC_OPTIONS.with_options( + document_class=RawBSONDocument)) + + self.assertIn('writeConcernError', result) + self.assertIn('RetryableWriteError', result['errorLabels']) + + # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): @client_context.require_version_min(3, 6) From 3ecd9479d47363afa78f0f0699b8154f2f1c6f6f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Dec 2020 15:32:25 -1000 Subject: [PATCH 0255/2111] PYTHON-2366 Test OCSP+FLE with Python 3.9 (#534) PYTHON-2449 Move all pypy cryptography/pyopenssl testing to Debian 9.2 with OpenSSL 1.1.0f PYTHON-2449 Fix Windows cryptography installation by upgrading pip and using --prefer-binary --- .evergreen/config.yml | 51 ++++++++++++++++----- .evergreen/run-ocsp-tests.sh | 34 ++------------ .evergreen/run-tests.sh | 34 ++------------ .evergreen/test-encryption-requirements.txt | 4 ++ .evergreen/utils.sh | 11 +++-- 5 files changed, 61 insertions(+), 73 deletions(-) create mode 100644 .evergreen/test-encryption-requirements.txt diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 5e2ce14d3a..8000bea133 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1561,6 +1561,7 @@ axes: run_on: debian92-test batchtime: 10080 # 7 days variables: + python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/debian92/master/latest/libmongocrypt.tar.gz - id: macos-1014 display_name: "macOS 10.14" @@ -2160,14 +2161,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9"] auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.8", "3.9", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.5", "3.6", "3.8", "3.9"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2177,6 +2178,19 @@ buildvariants: # Test standalone and sharded only on 4.4. - '.4.4' +- matrix_name: "tests-pyopenssl-pypy" + matrix_spec: + platform: debian92 + python-version: ["pypy", "pypy3.5", "pypy3.6"] + auth: "auth" + ssl: "ssl" + pyopenssl: "*" + display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" + tasks: + - '.replica_set !.2.6 !.3.0 !.3.2 !.3.4' + # Test standalone and sharded only on 4.4. + - '.4.4' + - matrix_name: "test-pyopenssl-old-py27" matrix_spec: platform: @@ -2214,7 +2228,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "pypy3.6"] + python-version: ["2.7", "3.4", "3.5", "3.6"] auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2223,6 +2237,15 @@ buildvariants: display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions +- matrix_name: "tests-pypy-debian-test-encryption" + matrix_spec: + platform: debian92 + python-version: ["pypy", "pypy3.5", "pypy3.6"] + auth-ssl: noauth-nossl + encryption: "*" + display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" + tasks: *encryption-server-versions + - matrix_name: "tests-python-version-rhel62-without-c-extensions" matrix_spec: platform: rhel62 @@ -2365,12 +2388,6 @@ buildvariants: python-version-windows: "*" auth-ssl: "*" encryption: "*" - exclude_spec: - # PYTHON-2366 Skip 3.9 due to cryptography install failures - - platform: "*" - python-version-windows: ["3.9"] - auth-ssl: "*" - encryption: "*" display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions @@ -2533,7 +2550,19 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.8", "3.9", "pypy", "pypy3.5"] + python-version: ["2.7", "3.4", "3.8", "3.9"] + mongodb-version: ["4.4", "latest"] + auth: "noauth" + ssl: "ssl" + display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" + batchtime: 20160 # 14 days + tasks: + - name: ".ocsp" + +- matrix_name: "ocsp-test-pypy" + matrix_spec: + platform: debian92 + python-version: ["pypy", "pypy3.5", "pypy3.6"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2545,7 +2574,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["2.7", "3.4", "3.8"] + python-version-windows: ["2.7", "3.4", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh index 17abb395ca..75fba7e2c9 100644 --- a/.evergreen/run-ocsp-tests.sh +++ b/.evergreen/run-ocsp-tests.sh @@ -3,6 +3,9 @@ set -o xtrace set -o errexit +# For createvirtualenv. +. .evergreen/utils.sh + if [ -z "$PYTHON_BINARY" ]; then echo "No python binary specified" PYTHON=$(command -v python || command -v python3) || true @@ -14,36 +17,9 @@ else PYTHON="$PYTHON_BINARY" fi -if $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv" -elif command -v virtualenv; then - # We can remove this fallback after: - # https://github.com/10gen/mongo-python-toolchain/issues/8 - VIRTUALENV="$(command -v virtualenv) -p $PYTHON" -else - echo "Cannot test without virtualenv" - exit 1 -fi - -$VIRTUALENV --never-download --no-wheel ocsptest -if [ "Windows_NT" = "$OS" ]; then - . ocsptest/Scripts/activate -else - . ocsptest/bin/activate -fi +createvirtualenv $PYTHON ocsptest trap "deactivate; rm -rf ocsptest" EXIT HUP -IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") -if [ $IS_PYTHON_2 = "1" ]; then - echo "Using a Python 2" - # Upgrade pip to install the cryptography wheel and not the tar. - # <20.1 because 20.0.2 says a future release may drop support for 2.7. - python -m pip install --upgrade 'pip<20.1' - # Upgrade setuptools because cryptography requires 18.5+. - # <45 because 45.0 dropped support for 2.7. - python -m pip install --upgrade 'setuptools<45' -fi - -python -m pip install pyopenssl requests service_identity +python -m pip install --prefer-binary pyopenssl requests service_identity OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED} CA_FILE=${CA_FILE} python test/ocsp/test_ocsp.py diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index c1a7286437..66b126a608 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -94,38 +94,11 @@ fi # PyOpenSSL test setup. if [ -n "$TEST_PYOPENSSL" ]; then - if $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv" - elif command -v virtualenv; then - # We can remove this fallback after: - # https://github.com/10gen/mongo-python-toolchain/issues/8 - VIRTUALENV="$(command -v virtualenv) -p $PYTHON" - else - echo "Cannot test without virtualenv" - exit 1 - fi - - $VIRTUALENV pyopenssltest - if [ "Windows_NT" = "$OS" ]; then - . pyopenssltest/Scripts/activate - else - . pyopenssltest/bin/activate - fi + createvirtualenv $PYTHON pyopenssltest trap "deactivate; rm -rf pyopenssltest" EXIT HUP PYTHON=python - IS_PYTHON_2=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (3,) else '0')") - if [ $IS_PYTHON_2 = "1" ]; then - echo "Using a Python 2" - # Upgrade pip to install the cryptography wheel and not the tar. - # <20.1 because 20.0.2 says a future release may drop support for 2.7. - python -m pip install --upgrade 'pip<20.1' - # Upgrade setuptools because cryptography requires 18.5+. - # <45 because 45.0 dropped support for 2.7. - python -m pip install --upgrade 'setuptools<45' - fi - - python -m pip install pyopenssl requests service_identity + python -m pip install --prefer-binary pyopenssl requests service_identity fi if [ -n "$TEST_ENCRYPTION" ]; then @@ -166,7 +139,8 @@ if [ -n "$TEST_ENCRYPTION" ]; then # TODO: Test with 'pip install pymongocrypt' git clone --branch master https://github.com/mongodb/libmongocrypt.git libmongocrypt_git - python -m pip install --upgrade ./libmongocrypt_git/bindings/python + python -m pip install --prefer-binary -r .evergreen/test-encryption-requirements.txt + python -m pip install ./libmongocrypt_git/bindings/python python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by PREPARE_SHELL for access to mongocryptd. diff --git a/.evergreen/test-encryption-requirements.txt b/.evergreen/test-encryption-requirements.txt new file mode 100644 index 0000000000..3b66938bb8 --- /dev/null +++ b/.evergreen/test-encryption-requirements.txt @@ -0,0 +1,4 @@ +# cffi==1.14.3 was the last installable release on RHEL 6.2 with Python 3.4 +cffi==1.14.3;python_version=="3.4" +cffi>=1.12.0,<2;python_version!="3.4" +cryptography>=2,<4 diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 9a92f15295..ff73be01d9 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -8,19 +8,24 @@ createvirtualenv () { PYTHON=$1 VENVPATH=$2 if $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv" + VIRTUALENV="$PYTHON -m virtualenv --never-download" + elif $PYTHON -m venv -h>/dev/null; then + VIRTUALENV="$PYTHON -m venv" elif command -v virtualenv; then - VIRTUALENV="$(command -v virtualenv) -p $PYTHON" + VIRTUALENV="$(command -v virtualenv) -p $PYTHON --never-download" else echo "Cannot test without virtualenv" exit 1 fi - $VIRTUALENV --system-site-packages --never-download $VENVPATH + $VIRTUALENV $VENVPATH if [ "Windows_NT" = "$OS" ]; then . $VENVPATH/Scripts/activate else . $VENVPATH/bin/activate fi + # Upgrade to the latest versions of pip setuptools wheel so that + # pip can always download the latest cryptography+cffi wheels. + python -m pip install --upgrade pip setuptools wheel } # Usage: From eb5bd9c8583112dd7a99f083a09b28aeb387358f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Dec 2020 17:08:34 -1000 Subject: [PATCH 0256/2111] PYTHON-2441 Reduce false positives in test_continuous_network_errors --- test/test_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_client.py b/test/test_client.py index f02a075b3a..af4605201d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1645,7 +1645,7 @@ def server_description_count(): # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: # AssertionError: 4 != 22 within 5 delta (18 difference) - self.assertAlmostEqual(initial_count, final_count, delta=5) + self.assertAlmostEqual(initial_count, final_count, delta=10) class TestExhaustCursor(IntegrationTest): From 733ab2527bfec62cdf7e570c0ffebe7efdf92096 Mon Sep 17 00:00:00 2001 From: Pascal Corpet Date: Thu, 17 Dec 2020 04:44:56 +0100 Subject: [PATCH 0257/2111] PYTHON-2466 Make pymongo client, database and collection objects hashable. (#533) --- pymongo/collection.py | 3 +++ pymongo/database.py | 3 +++ pymongo/mongo_client.py | 3 +++ test/test_client.py | 4 ++++ test/test_collection.py | 3 +++ test/test_database.py | 3 +++ 6 files changed, 19 insertions(+) diff --git a/pymongo/collection.py b/pymongo/collection.py index 7a3d0bd99f..4adce0a384 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -303,6 +303,9 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __hash__(self): + return hash((self.__database, self.__name)) + @property def full_name(self): """The full name of this :class:`Collection`. diff --git a/pymongo/database.py b/pymongo/database.py index 3f0a953632..a31e918ac3 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -272,6 +272,9 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __hash__(self): + return hash((self.__client, self.__name)) + def __repr__(self): return "Database(%r, %r)" % (self.__client, self.__name) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4ed23855f3..6e1e24481a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1509,6 +1509,9 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __hash__(self): + return hash(self.address) + def _repr_helper(self): def option_repr(option, value): """Fix options whose __repr__ isn't usable in a constructor.""" diff --git a/test/test_client.py b/test/test_client.py index af4605201d..6f2d587ed8 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -627,6 +627,10 @@ def test_equality(self): # Explicitly test inequality self.assertFalse(client_context.client != c) + def test_hashable(self): + c = connected(rs_or_single_client()) + self.assertIn(c, {client_context.client}) + def test_host_w_port(self): with self.assertRaises(ValueError): connected(MongoClient("%s:1234567" % (client_context.host,), diff --git a/test/test_collection.py b/test/test_collection.py index 220ddab3a8..96a3e454ab 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -154,6 +154,9 @@ def test_equality(self): self.assertEqual(self.db.test.mike, self.db["test.mike"]) self.assertEqual(self.db.test["mike"], self.db["test.mike"]) + def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + @client_context.require_version_min(3, 3, 9) def test_create(self): # No Exception. diff --git a/test/test_database.py b/test/test_database.py index 349f090054..cdaf31332b 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -126,6 +126,9 @@ def test_equality(self): self.assertFalse(Database(self.client, "test") != Database(self.client, "test")) + def test_hashable(self): + self.assertIn(self.client.test, {Database(self.client, "test")}) + def test_get_coll(self): db = Database(self.client, "pymongo_test") self.assertEqual(db.test, db["test"]) From c673d8b3cea48f65615cf632fb287e1b9e57be72 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 17 Dec 2020 13:58:03 -0800 Subject: [PATCH 0258/2111] PYTHON-2318 Atlas Data Lake testing (#500) --- .evergreen/config.yml | 37 ++++++++++++ .evergreen/run-tests.sh | 23 ++++++-- test/__init__.py | 18 +++++- test/crud_v2_format.py | 48 ++++++++++++++++ test/data_lake/aggregate.json | 53 ++++++++++++++++++ test/data_lake/estimatedDocumentCount.json | 25 +++++++++ test/data_lake/find.json | 65 ++++++++++++++++++++++ test/data_lake/getMore.json | 57 +++++++++++++++++++ test/data_lake/listCollections.json | 25 +++++++++ test/data_lake/listDatabases.json | 24 ++++++++ test/data_lake/runCommand.json | 31 +++++++++++ test/test_crud_v2.py | 35 ++---------- test/test_data_lake.py | 61 ++++++++++++++++++++ test/utils_spec_runner.py | 2 +- 14 files changed, 466 insertions(+), 38 deletions(-) create mode 100644 test/crud_v2_format.py create mode 100644 test/data_lake/aggregate.json create mode 100644 test/data_lake/estimatedDocumentCount.json create mode 100644 test/data_lake/find.json create mode 100644 test/data_lake/getMore.json create mode 100644 test/data_lake/listCollections.json create mode 100644 test/data_lake/listDatabases.json create mode 100644 test/data_lake/runCommand.json create mode 100644 test/test_data_lake.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8000bea133..c37309df7f 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -301,6 +301,25 @@ functions: - key: MONGODB_STARTED value: "1" + "bootstrap data lake": + - command: shell.exec + type: setup + params: + script: | + set -o xtrace + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake + DRIVERS_TOOLS="${DRIVERS_TOOLS}" sh build-mongohouse-local.sh + - command: shell.exec + type: setup + params: + background: true + script: | + set -o xtrace + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake + DRIVERS_TOOLS="${DRIVERS_TOOLS}" sh run-mongohouse-local.sh + "stop mongo-orchestration": - command: shell.exec params: @@ -405,6 +424,7 @@ functions: COMPRESSORS=${COMPRESSORS} \ AUTH=${AUTH} \ SSL=${SSL} \ + DATA_LAKE=${DATA_LAKE} \ sh ${PROJECT_DIRECTORY}/.evergreen/run-tests.sh "run enterprise auth tests": @@ -1157,6 +1177,13 @@ tasks: commands: - func: "run atlas tests" + - name: atlas-data-lake-tests + commands: + - func: "bootstrap data lake" + - func: "run tests" + vars: + DATA_LAKE: "true" + - name: test-ocsp-rsa-valid-cert-server-staples tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: @@ -2547,6 +2574,16 @@ buildvariants: tasks: - name: "atlas-connect" +- matrix_name: "data-lake-spec-tests" + matrix_spec: + platform: ubuntu-16.04 + python-version: ["2.7", "3.4", "3.8"] + auth: "auth" + c-extensions: "*" + display_name: "Atlas Data Lake ${python-version} ${c-extensions}" + tasks: + - name: atlas-data-lake-tests + - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 66b126a608..50357d49df 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -19,7 +19,6 @@ else set +x fi - AUTH=${AUTH:-noauth} SSL=${SSL:-nossl} PYTHON_BINARY=${PYTHON_BINARY:-} @@ -30,6 +29,7 @@ COMPRESSORS=${COMPRESSORS:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} SETDEFAULTENCODING=${SETDEFAULTENCODING:-} +DATA_LAKE=${DATA_LAKE:-} if [ -n "$COMPRESSORS" ]; then export COMPRESSORS=$COMPRESSORS @@ -38,8 +38,13 @@ fi export JAVA_HOME=/opt/java/jdk8 if [ "$AUTH" != "noauth" ]; then - export DB_USER="bob" - export DB_PASSWORD="pwd123" + if [ -z "$DATA_LAKE" ]; then + export DB_USER="bob" + export DB_PASSWORD="pwd123" + else + export DB_USER="mhuser" + export DB_PASSWORD="pencil" + fi fi if [ "$SSL" != "nossl" ]; then @@ -149,9 +154,15 @@ fi PYTHON_IMPL=$($PYTHON -c "import platform, sys; sys.stdout.write(platform.python_implementation())") if [ $PYTHON_IMPL = "Jython" ]; then - EXTRA_ARGS="-J-XX:-UseGCOverheadLimit -J-Xmx4096m" + PYTHON_ARGS="-J-XX:-UseGCOverheadLimit -J-Xmx4096m" +else + PYTHON_ARGS="" +fi + +if [ -z "$DATA_LAKE" ]; then + TEST_ARGS="" else - EXTRA_ARGS="" + TEST_ARGS="-s test.test_data_lake" fi # Don't download unittest-xml-reporting from pypi, which often fails. @@ -200,7 +211,7 @@ if [ -z "$GREEN_FRAMEWORK" ]; then # causing this script to exit. $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" fi - $COVERAGE_OR_PYTHON $EXTRA_ARGS $COVERAGE_ARGS setup.py $C_EXTENSIONS test $OUTPUT + $COVERAGE_OR_PYTHON $PYTHON_ARGS $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT else # --no_ext has to come before "test" so there is no way to toggle extensions here. $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT diff --git a/test/__init__.py b/test/__init__.py index a517e1a275..18e88dc660 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -191,6 +191,7 @@ def __init__(self): self.sessions_enabled = False self.client = None self.conn_lock = threading.Lock() + self.is_data_lake = False if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS @@ -231,6 +232,19 @@ def _connect(self, host, port, **kwargs): def _init_client(self): self.client = self._connect(host, port) + + if self.client is not None: + # Return early when connected to dataLake as mongohoused does not + # support the getCmdLineOpts command and is tested without TLS. + build_info = self.client.admin.command('buildInfo') + if 'dataLake' in build_info: + self.is_data_lake = True + self.auth_enabled = True + self.client = self._connect( + host, port, username=db_user, password=db_pwd) + self.connected = True + return + if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = self._connect(host, port, **TLS_OPTIONS) @@ -845,14 +859,14 @@ def teardown(): if garbage: assert False, '\n'.join(garbage) c = client_context.client - if c: + if c and not client_context.is_data_lake: c.drop_database("pymongo-pooling-tests") c.drop_database("pymongo_test") c.drop_database("pymongo_test1") c.drop_database("pymongo_test2") c.drop_database("pymongo_test_mike") c.drop_database("pymongo_test_bernie") - c.close() + c.close() # Jython does not support gc.get_objects. if not sys.platform.startswith('java'): diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py new file mode 100644 index 0000000000..55dcaae5f5 --- /dev/null +++ b/test/crud_v2_format.py @@ -0,0 +1,48 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""v2 format CRUD test runner. + +https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.rst +""" + +from test.utils_spec_runner import SpecRunner + + +class TestCrudV2(SpecRunner): + # Default test database and collection names. + TEST_DB = None + TEST_COLLECTION = None + + def get_scenario_db_name(self, scenario_def): + """Crud spec says database_name is optional.""" + return scenario_def.get('database_name', self.TEST_DB) + + def get_scenario_coll_name(self, scenario_def): + """Crud spec says collection_name is optional.""" + return scenario_def.get('collection_name', self.TEST_COLLECTION) + + def get_object_name(self, op): + """Crud spec says object is optional and defaults to 'collection'.""" + return op.get('object', 'collection') + + def get_outcome_coll_name(self, outcome, collection): + """Crud spec says outcome has an optional 'collection.name'.""" + return outcome['collection'].get('name', collection.name) + + def setup_scenario(self, scenario_def): + """Allow specs to override a test's setup.""" + # PYTHON-1935 Only create the collection if there is data to insert. + if scenario_def['data']: + super(TestCrudV2, self).setup_scenario(scenario_def) diff --git a/test/data_lake/aggregate.json b/test/data_lake/aggregate.json new file mode 100644 index 0000000000..99995bca41 --- /dev/null +++ b/test/data_lake/aggregate.json @@ -0,0 +1,53 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "Aggregate with pipeline (project, sort, limit)", + "operations": [ + { + "object": "collection", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + }, + { + "$sort": { + "a": 1 + } + }, + { + "$limit": 2 + } + ] + }, + "result": [ + { + "a": 1, + "b": 2, + "c": 3 + }, + { + "a": 2, + "b": 3, + "c": 4 + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "driverdata" + } + } + } + ] + } + ] +} diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json new file mode 100644 index 0000000000..d039a51f06 --- /dev/null +++ b/test/data_lake/estimatedDocumentCount.json @@ -0,0 +1,25 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "estimatedDocumentCount succeeds", + "operations": [ + { + "object": "collection", + "name": "estimatedDocumentCount", + "result": 15 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "count": "driverdata" + } + } + } + ] + } + ] +} diff --git a/test/data_lake/find.json b/test/data_lake/find.json new file mode 100644 index 0000000000..8a3468a135 --- /dev/null +++ b/test/data_lake/find.json @@ -0,0 +1,65 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "Find with projection and sort", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": { + "b": { + "$gt": 5 + } + }, + "projection": { + "_id": 0 + }, + "sort": { + "a": 1 + }, + "limit": 5 + }, + "result": [ + { + "a": 5, + "b": 6, + "c": 7 + }, + { + "a": 6, + "b": 7, + "c": 8 + }, + { + "a": 7, + "b": 8, + "c": 9 + }, + { + "a": 8, + "b": 9, + "c": 10 + }, + { + "a": 9, + "b": 10, + "c": 11 + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "driverdata" + } + } + } + ] + } + ] +} diff --git a/test/data_lake/getMore.json b/test/data_lake/getMore.json new file mode 100644 index 0000000000..fa1deab4f3 --- /dev/null +++ b/test/data_lake/getMore.json @@ -0,0 +1,57 @@ +{ + "collection_name": "driverdata", + "database_name": "test", + "tests": [ + { + "description": "A successful find event with getMore", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "driverdata", + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "command_name": "find", + "database_name": "test" + } + }, + { + "command_started_event": { + "command": { + "batchSize": 1 + }, + "command_name": "getMore", + "database_name": "cursors" + } + } + ] + } + ] +} \ No newline at end of file diff --git a/test/data_lake/listCollections.json b/test/data_lake/listCollections.json new file mode 100644 index 0000000000..8d8a8f6c1b --- /dev/null +++ b/test/data_lake/listCollections.json @@ -0,0 +1,25 @@ +{ + "database_name": "test", + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "name": "listCollections", + "object": "database" + } + ], + "expectations": [ + { + "command_started_event": { + "command_name": "listCollections", + "database_name": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/test/data_lake/listDatabases.json b/test/data_lake/listDatabases.json new file mode 100644 index 0000000000..f8ec9a0bf4 --- /dev/null +++ b/test/data_lake/listDatabases.json @@ -0,0 +1,24 @@ +{ + "tests": [ + { + "description": "ListDatabases succeeds", + "operations": [ + { + "name": "listDatabases", + "object": "client" + } + ], + "expectations": [ + { + "command_started_event": { + "command_name": "listDatabases", + "database_name": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/test/data_lake/runCommand.json b/test/data_lake/runCommand.json new file mode 100644 index 0000000000..f72e863ba5 --- /dev/null +++ b/test/data_lake/runCommand.json @@ -0,0 +1,31 @@ +{ + "database_name": "test", + "tests": [ + { + "description": "ping succeeds using runCommand", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command_name": "ping", + "database_name": "test", + "command": { + "ping": 1 + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/test/test_crud_v2.py b/test/test_crud_v2.py index 562e119aad..6d9514f91c 100644 --- a/test/test_crud_v2.py +++ b/test/test_crud_v2.py @@ -20,41 +20,19 @@ sys.path[0:0] = [""] from test import unittest +from test.crud_v2_format import TestCrudV2 from test.utils import TestCreator -from test.utils_spec_runner import SpecRunner # Location of JSON test specifications. _TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'crud', 'v2') -# Default test database and collection names. -TEST_DB = 'testdb' -TEST_COLLECTION = 'testcollection' - -class TestSpec(SpecRunner): - def get_scenario_db_name(self, scenario_def): - """Crud spec says database_name is optional.""" - return scenario_def.get('database_name', TEST_DB) - - def get_scenario_coll_name(self, scenario_def): - """Crud spec says collection_name is optional.""" - return scenario_def.get('collection_name', TEST_COLLECTION) - - def get_object_name(self, op): - """Crud spec says object is optional and defaults to 'collection'.""" - return op.get('object', 'collection') - - def get_outcome_coll_name(self, outcome, collection): - """Crud spec says outcome has an optional 'collection.name'.""" - return outcome['collection'].get('name', collection.name) - - def setup_scenario(self, scenario_def): - """Allow specs to override a test's setup.""" - # PYTHON-1935 Only create the collection if there is data to insert. - if scenario_def['data']: - super(TestSpec, self).setup_scenario(scenario_def) +class TestSpec(TestCrudV2): + # Default test database and collection names. + TEST_DB = 'testdb' + TEST_COLLECTION = 'testcollection' def create_test(scenario_def, test, name): @@ -64,8 +42,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) -test_creator.create_tests() +TestCreator(create_test, TestSpec, _TEST_PATH).create_tests() if __name__ == "__main__": diff --git a/test/test_data_lake.py b/test/test_data_lake.py new file mode 100644 index 0000000000..4ce2cd5080 --- /dev/null +++ b/test/test_data_lake.py @@ -0,0 +1,61 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test Atlas Data Lake.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import client_context, unittest +from test.crud_v2_format import TestCrudV2 +from test.utils import TestCreator + + +# Location of JSON test specifications. +_TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "data_lake") + + +class DataLakeTestSpec(TestCrudV2): + # Default test database and collection names. + TEST_DB = 'test' + TEST_COLLECTION = 'driverdata' + + @classmethod + def setUpClass(cls): + super(DataLakeTestSpec, cls).setUpClass() + # Skip these tests unless connected to data lake. + if not client_context.is_data_lake: + raise unittest.SkipTest('Not connected to Atlas Data Lake') + + def setup_scenario(self, scenario_def): + # Spec tests MUST NOT insert data/drop collection for + # data lake testing. + pass + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +TestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index a15537c718..09798fb809 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -525,7 +525,7 @@ def check_events(self, test, listener, session_ids): def maybe_skip_scenario(self, test): if test.get('skipReason'): - raise unittest.SkipTest(test.get('skipReason')) + self.skipTest(test.get('skipReason')) def get_scenario_db_name(self, scenario_def): """Allow subclasses to override a test's database name.""" From 2eecf525d9016c11143b364e3a1aed70f2c9627a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 21 Dec 2020 15:54:02 -1000 Subject: [PATCH 0259/2111] PYTHON-2474 Fix non-disabled client_knobs bug in Data Lake tests (#537) --- test/__init__.py | 20 ++++++++++++++++++++ test/test_data_lake.py | 5 ++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 18e88dc660..88e48b7cd9 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -21,6 +21,7 @@ import sys import threading import time +import traceback import unittest import warnings @@ -130,6 +131,8 @@ def __init__( self.old_min_heartbeat_interval = None self.old_kill_cursor_frequency = None self.old_events_queue_frequency = None + self._enabled = True + self._stack = None def enable(self): self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY @@ -148,6 +151,9 @@ def enable(self): if self.events_queue_frequency is not None: common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = ''.join(traceback.format_stack()) def __enter__(self): self.enable() @@ -157,10 +163,24 @@ def disable(self): common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False def __exit__(self, exc_type, exc_val, exc_tb): self.disable() + def __del__(self): + if self._enabled: + print( + '\nERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, ' + 'MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, ' + 'EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s' % ( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack)) + self.disable() + def _all_users(db): return set(u['user'] for u in db.command('usersInfo').get('users', [])) diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 4ce2cd5080..d762f11217 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -35,11 +35,10 @@ class DataLakeTestSpec(TestCrudV2): TEST_COLLECTION = 'driverdata' @classmethod + @unittest.skipUnless(client_context.is_data_lake, + 'Not connected to Atlas Data Lake') def setUpClass(cls): super(DataLakeTestSpec, cls).setUpClass() - # Skip these tests unless connected to data lake. - if not client_context.is_data_lake: - raise unittest.SkipTest('Not connected to Atlas Data Lake') def setup_scenario(self, scenario_def): # Spec tests MUST NOT insert data/drop collection for From 6b0123594aeb6d4fa618568a28cab3344c5422f0 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 21 Dec 2020 19:22:29 -0800 Subject: [PATCH 0260/2111] PYTHON-2033 Unified Test Format (#519) --- test/__init__.py | 10 + test/test_unified_format.py | 73 ++ .../example-insertOne.json | 100 ++ .../collectionData-additionalProperties.json | 40 + ...ollectionData-collectionName-required.json | 38 + .../collectionData-collectionName-type.json | 39 + .../collectionData-databaseName-required.json | 38 + .../collectionData-databaseName-type.json | 39 + .../collectionData-documents-items.json | 41 + .../collectionData-documents-required.json | 38 + .../collectionData-documents-type.json | 39 + ...rDatabaseOptions-additionalProperties.json | 27 + ...ionOrDatabaseOptions-readConcern-type.json | 27 + ...OrDatabaseOptions-readPreference-type.json | 27 + ...onOrDatabaseOptions-writeConcern-type.json | 27 + .../invalid/createEntities-items.json | 13 + .../invalid/createEntities-minItems.json | 11 + .../invalid/createEntities-type.json | 11 + .../invalid/description-required.json | 9 + .../invalid/entity-additionalProperties.json | 15 + .../entity-bucket-additionalProperties.json | 31 + .../entity-bucket-bucketOptions-type.json | 31 + .../entity-bucket-database-required.json | 29 + .../invalid/entity-bucket-database-type.json | 30 + .../invalid/entity-bucket-id-required.json | 29 + .../invalid/entity-bucket-id-type.json | 30 + .../entity-client-additionalProperties.json | 18 + .../invalid/entity-client-id-required.json | 15 + .../invalid/entity-client-id-type.json | 17 + ...t-ignoreCommandMonitoringEvents-items.json | 20 + ...gnoreCommandMonitoringEvents-minItems.json | 18 + ...nt-ignoreCommandMonitoringEvents-type.json | 18 + .../entity-client-observeEvents-enum.json | 20 + .../entity-client-observeEvents-items.json | 20 + .../entity-client-observeEvents-minItems.json | 18 + .../entity-client-observeEvents-type.json | 18 + .../entity-client-uriOptions-type.json | 18 + ...ntity-client-useMultipleMongoses-type.json | 18 + ...ntity-collection-additionalProperties.json | 32 + ...ty-collection-collectionName-required.json | 30 + ...entity-collection-collectionName-type.json | 31 + ...ity-collection-collectionOptions-type.json | 32 + .../entity-collection-database-required.json | 30 + .../entity-collection-database-type.json | 31 + .../entity-collection-id-required.json | 30 + .../invalid/entity-collection-id-type.json | 31 + .../entity-database-additionalProperties.json | 25 + .../entity-database-client-required.json | 23 + .../invalid/entity-database-client-type.json | 24 + ...entity-database-databaseName-required.json | 23 + .../entity-database-databaseName-type.json | 24 + .../entity-database-databaseOptions-type.json | 25 + .../invalid/entity-database-id-required.json | 23 + .../invalid/entity-database-id-type.json | 24 + .../invalid/entity-maxProperties.json | 22 + .../invalid/entity-minProperties.json | 13 + .../entity-session-additionalProperties.json | 24 + .../entity-session-client-required.json | 22 + .../invalid/entity-session-client-type.json | 23 + .../invalid/entity-session-id-required.json | 22 + .../invalid/entity-session-id-type.json | 23 + .../entity-session-sessionOptions-type.json | 24 + .../entity-stream-additionalProperties.json | 19 + .../entity-stream-hexBytes-pattern.json | 18 + .../entity-stream-hexBytes-required.json | 17 + .../invalid/entity-stream-hexBytes-type.json | 18 + .../invalid/entity-stream-id-required.json | 17 + .../invalid/entity-stream-id-type.json | 18 + .../expectedError-additionalProperties.json | 25 + .../invalid/expectedError-errorCode-type.json | 25 + .../expectedError-errorCodeName-type.json | 25 + .../expectedError-errorContains-type.json | 25 + ...xpectedError-errorLabelsContain-items.json | 27 + ...ctedError-errorLabelsContain-minItems.json | 25 + ...expectedError-errorLabelsContain-type.json | 25 + .../expectedError-errorLabelsOmit-items.json | 27 + ...xpectedError-errorLabelsOmit-minItems.json | 25 + .../expectedError-errorLabelsOmit-type.json | 25 + .../expectedError-isClientError-type.json | 25 + .../invalid/expectedError-isError-const.json | 25 + .../invalid/expectedError-isError-type.json | 25 + .../invalid/expectedError-minProperties.json | 23 + .../expectedEvent-additionalProperties.json | 32 + ...t-commandFailedEvent-commandName-type.json | 34 + ...mandStartedEvent-additionalProperties.json | 34 + ...vent-commandStartedEvent-command-type.json | 34 + ...-commandStartedEvent-commandName-type.json | 34 + ...commandStartedEvent-databaseName-type.json | 34 + ...ommandSucceededEvent-commandName-type.json | 34 + ...vent-commandSucceededEvent-reply-type.json | 34 + .../invalid/expectedEvent-maxProperties.json | 33 + .../invalid/expectedEvent-minProperties.json | 30 + ...dEventsForClient-additionalProperties.json | 29 + ...pectedEventsForClient-client-required.json | 27 + .../expectedEventsForClient-client-type.json | 28 + .../expectedEventsForClient-events-items.json | 30 + ...pectedEventsForClient-events-required.json | 27 + .../expectedEventsForClient-events-type.json | 28 + .../invalid/initialData-items.json | 13 + .../invalid/initialData-minItems.json | 11 + .../invalid/initialData-type.json | 11 + .../operation-additionalProperties.json | 23 + .../invalid/operation-arguments-type.json | 23 + ...pectError-conflicts_with_expectResult.json | 26 + ...ror-conflicts_with_saveResultAsEntity.json | 26 + .../invalid/operation-expectError-type.json | 23 + .../invalid/operation-expectEvents-type.json | 23 + .../invalid/operation-name-required.json | 21 + .../invalid/operation-name-type.json | 22 + .../invalid/operation-object-required.json | 21 + .../invalid/operation-object-type.json | 22 + .../operation-saveResultAsEntity-type.json | 23 + ...runOnRequirement-additionalProperties.json | 16 + ...nRequirement-maxServerVersion-pattern.json | 15 + ...unOnRequirement-maxServerVersion-type.json | 15 + .../runOnRequirement-minProperties.json | 13 + ...nRequirement-minServerVersion-pattern.json | 15 + ...unOnRequirement-minServerVersion-type.json | 15 + .../runOnRequirement-topologies-enum.json | 17 + .../runOnRequirement-topologies-items.json | 17 + .../runOnRequirement-topologies-minItems.json | 15 + .../runOnRequirement-topologies-type.json | 15 + .../invalid/runOnRequirements-items.json | 13 + .../invalid/runOnRequirements-minItems.json | 11 + .../invalid/runOnRequirements-type.json | 11 + .../invalid/schemaVersion-pattern.json | 10 + .../invalid/schemaVersion-required.json | 9 + .../invalid/schemaVersion-type.json | 10 + .../invalid/test-additionalProperties.json | 11 + .../invalid/test-description-required.json | 9 + .../invalid/test-description-type.json | 10 + .../invalid/test-expectEvents-items.json | 13 + .../invalid/test-expectEvents-type.json | 11 + .../invalid/test-operations-items.json | 12 + .../invalid/test-operations-required.json | 9 + .../invalid/test-operations-type.json | 10 + .../invalid/test-outcome-items.json | 13 + .../invalid/test-outcome-minItems.json | 11 + .../invalid/test-outcome-type.json | 11 + .../invalid/test-runOnRequirements-items.json | 13 + .../test-runOnRequirements-minItems.json | 11 + .../invalid/test-runOnRequirements-type.json | 11 + .../invalid/test-skipReason-type.json | 11 + .../invalid/tests-items.json | 7 + .../invalid/tests-minItems.json | 5 + .../invalid/tests-required.json | 4 + .../invalid/tests-type.json | 5 + .../entity-bucket-database-undefined.json | 18 + .../entity-collection-database-undefined.json | 19 + .../entity-database-client-undefined.json | 19 + .../entity-session-client-undefined.json | 18 + .../returnDocument-enum-invalid.json | 66 ++ .../valid-fail/schemaVersion-unsupported.json | 10 + .../valid-pass/poc-change-streams.json | 414 +++++++ .../valid-pass/poc-command-monitoring.json | 222 ++++ .../valid-pass/poc-crud.json | 446 +++++++ .../valid-pass/poc-gridfs.json | 301 +++++ .../valid-pass/poc-retryable-reads.json | 433 +++++++ .../valid-pass/poc-retryable-writes.json | 483 ++++++++ .../valid-pass/poc-sessions.json | 466 ++++++++ .../poc-transactions-convenient-api.json | 505 ++++++++ .../poc-transactions-mongos-pin-auto.json | 409 +++++++ .../valid-pass/poc-transactions.json | 322 +++++ test/unified_format.py | 1032 +++++++++++++++++ test/utils.py | 131 ++- test/utils_spec_runner.py | 102 +- 166 files changed, 8646 insertions(+), 94 deletions(-) create mode 100644 test/test_unified_format.py create mode 100644 test/unified-test-format/example-insertOne.json create mode 100644 test/unified-test-format/invalid/collectionData-additionalProperties.json create mode 100644 test/unified-test-format/invalid/collectionData-collectionName-required.json create mode 100644 test/unified-test-format/invalid/collectionData-collectionName-type.json create mode 100644 test/unified-test-format/invalid/collectionData-databaseName-required.json create mode 100644 test/unified-test-format/invalid/collectionData-databaseName-type.json create mode 100644 test/unified-test-format/invalid/collectionData-documents-items.json create mode 100644 test/unified-test-format/invalid/collectionData-documents-required.json create mode 100644 test/unified-test-format/invalid/collectionData-documents-type.json create mode 100644 test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json create mode 100644 test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json create mode 100644 test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json create mode 100644 test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json create mode 100644 test/unified-test-format/invalid/createEntities-items.json create mode 100644 test/unified-test-format/invalid/createEntities-minItems.json create mode 100644 test/unified-test-format/invalid/createEntities-type.json create mode 100644 test/unified-test-format/invalid/description-required.json create mode 100644 test/unified-test-format/invalid/entity-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-bucket-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json create mode 100644 test/unified-test-format/invalid/entity-bucket-database-required.json create mode 100644 test/unified-test-format/invalid/entity-bucket-database-type.json create mode 100644 test/unified-test-format/invalid/entity-bucket-id-required.json create mode 100644 test/unified-test-format/invalid/entity-bucket-id-type.json create mode 100644 test/unified-test-format/invalid/entity-client-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-client-id-required.json create mode 100644 test/unified-test-format/invalid/entity-client-id-type.json create mode 100644 test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json create mode 100644 test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json create mode 100644 test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json create mode 100644 test/unified-test-format/invalid/entity-client-observeEvents-enum.json create mode 100644 test/unified-test-format/invalid/entity-client-observeEvents-items.json create mode 100644 test/unified-test-format/invalid/entity-client-observeEvents-minItems.json create mode 100644 test/unified-test-format/invalid/entity-client-observeEvents-type.json create mode 100644 test/unified-test-format/invalid/entity-client-uriOptions-type.json create mode 100644 test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json create mode 100644 test/unified-test-format/invalid/entity-collection-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-collection-collectionName-required.json create mode 100644 test/unified-test-format/invalid/entity-collection-collectionName-type.json create mode 100644 test/unified-test-format/invalid/entity-collection-collectionOptions-type.json create mode 100644 test/unified-test-format/invalid/entity-collection-database-required.json create mode 100644 test/unified-test-format/invalid/entity-collection-database-type.json create mode 100644 test/unified-test-format/invalid/entity-collection-id-required.json create mode 100644 test/unified-test-format/invalid/entity-collection-id-type.json create mode 100644 test/unified-test-format/invalid/entity-database-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-database-client-required.json create mode 100644 test/unified-test-format/invalid/entity-database-client-type.json create mode 100644 test/unified-test-format/invalid/entity-database-databaseName-required.json create mode 100644 test/unified-test-format/invalid/entity-database-databaseName-type.json create mode 100644 test/unified-test-format/invalid/entity-database-databaseOptions-type.json create mode 100644 test/unified-test-format/invalid/entity-database-id-required.json create mode 100644 test/unified-test-format/invalid/entity-database-id-type.json create mode 100644 test/unified-test-format/invalid/entity-maxProperties.json create mode 100644 test/unified-test-format/invalid/entity-minProperties.json create mode 100644 test/unified-test-format/invalid/entity-session-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-session-client-required.json create mode 100644 test/unified-test-format/invalid/entity-session-client-type.json create mode 100644 test/unified-test-format/invalid/entity-session-id-required.json create mode 100644 test/unified-test-format/invalid/entity-session-id-type.json create mode 100644 test/unified-test-format/invalid/entity-session-sessionOptions-type.json create mode 100644 test/unified-test-format/invalid/entity-stream-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json create mode 100644 test/unified-test-format/invalid/entity-stream-hexBytes-required.json create mode 100644 test/unified-test-format/invalid/entity-stream-hexBytes-type.json create mode 100644 test/unified-test-format/invalid/entity-stream-id-required.json create mode 100644 test/unified-test-format/invalid/entity-stream-id-type.json create mode 100644 test/unified-test-format/invalid/expectedError-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedError-errorCode-type.json create mode 100644 test/unified-test-format/invalid/expectedError-errorCodeName-type.json create mode 100644 test/unified-test-format/invalid/expectedError-errorContains-type.json create mode 100644 test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json create mode 100644 test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json create mode 100644 test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json create mode 100644 test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json create mode 100644 test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json create mode 100644 test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json create mode 100644 test/unified-test-format/invalid/expectedError-isClientError-type.json create mode 100644 test/unified-test-format/invalid/expectedError-isError-const.json create mode 100644 test/unified-test-format/invalid/expectedError-isError-type.json create mode 100644 test/unified-test-format/invalid/expectedError-minProperties.json create mode 100644 test/unified-test-format/invalid/expectedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json create mode 100644 test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json create mode 100644 test/unified-test-format/invalid/expectedEvent-maxProperties.json create mode 100644 test/unified-test-format/invalid/expectedEvent-minProperties.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-client-required.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-client-type.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-events-items.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-events-required.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-events-type.json create mode 100644 test/unified-test-format/invalid/initialData-items.json create mode 100644 test/unified-test-format/invalid/initialData-minItems.json create mode 100644 test/unified-test-format/invalid/initialData-type.json create mode 100644 test/unified-test-format/invalid/operation-additionalProperties.json create mode 100644 test/unified-test-format/invalid/operation-arguments-type.json create mode 100644 test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json create mode 100644 test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json create mode 100644 test/unified-test-format/invalid/operation-expectError-type.json create mode 100644 test/unified-test-format/invalid/operation-expectEvents-type.json create mode 100644 test/unified-test-format/invalid/operation-name-required.json create mode 100644 test/unified-test-format/invalid/operation-name-type.json create mode 100644 test/unified-test-format/invalid/operation-object-required.json create mode 100644 test/unified-test-format/invalid/operation-object-type.json create mode 100644 test/unified-test-format/invalid/operation-saveResultAsEntity-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-additionalProperties.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-minProperties.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-topologies-enum.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-topologies-items.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-topologies-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirements-items.json create mode 100644 test/unified-test-format/invalid/runOnRequirements-minItems.json create mode 100644 test/unified-test-format/invalid/runOnRequirements-type.json create mode 100644 test/unified-test-format/invalid/schemaVersion-pattern.json create mode 100644 test/unified-test-format/invalid/schemaVersion-required.json create mode 100644 test/unified-test-format/invalid/schemaVersion-type.json create mode 100644 test/unified-test-format/invalid/test-additionalProperties.json create mode 100644 test/unified-test-format/invalid/test-description-required.json create mode 100644 test/unified-test-format/invalid/test-description-type.json create mode 100644 test/unified-test-format/invalid/test-expectEvents-items.json create mode 100644 test/unified-test-format/invalid/test-expectEvents-type.json create mode 100644 test/unified-test-format/invalid/test-operations-items.json create mode 100644 test/unified-test-format/invalid/test-operations-required.json create mode 100644 test/unified-test-format/invalid/test-operations-type.json create mode 100644 test/unified-test-format/invalid/test-outcome-items.json create mode 100644 test/unified-test-format/invalid/test-outcome-minItems.json create mode 100644 test/unified-test-format/invalid/test-outcome-type.json create mode 100644 test/unified-test-format/invalid/test-runOnRequirements-items.json create mode 100644 test/unified-test-format/invalid/test-runOnRequirements-minItems.json create mode 100644 test/unified-test-format/invalid/test-runOnRequirements-type.json create mode 100644 test/unified-test-format/invalid/test-skipReason-type.json create mode 100644 test/unified-test-format/invalid/tests-items.json create mode 100644 test/unified-test-format/invalid/tests-minItems.json create mode 100644 test/unified-test-format/invalid/tests-required.json create mode 100644 test/unified-test-format/invalid/tests-type.json create mode 100644 test/unified-test-format/valid-fail/entity-bucket-database-undefined.json create mode 100644 test/unified-test-format/valid-fail/entity-collection-database-undefined.json create mode 100644 test/unified-test-format/valid-fail/entity-database-client-undefined.json create mode 100644 test/unified-test-format/valid-fail/entity-session-client-undefined.json create mode 100644 test/unified-test-format/valid-fail/returnDocument-enum-invalid.json create mode 100644 test/unified-test-format/valid-fail/schemaVersion-unsupported.json create mode 100644 test/unified-test-format/valid-pass/poc-change-streams.json create mode 100644 test/unified-test-format/valid-pass/poc-command-monitoring.json create mode 100644 test/unified-test-format/valid-pass/poc-crud.json create mode 100644 test/unified-test-format/valid-pass/poc-gridfs.json create mode 100644 test/unified-test-format/valid-pass/poc-retryable-reads.json create mode 100644 test/unified-test-format/valid-pass/poc-retryable-writes.json create mode 100644 test/unified-test-format/valid-pass/poc-sessions.json create mode 100644 test/unified-test-format/valid-pass/poc-transactions-convenient-api.json create mode 100644 test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json create mode 100644 test/unified-test-format/valid-pass/poc-transactions.json create mode 100644 test/unified_format.py diff --git a/test/__init__.py b/test/__init__.py index 88e48b7cd9..d24601551b 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -613,6 +613,16 @@ def is_topology_type(self, topologies): return True if 'sharded' in topologies and self.is_mongos: return True + if 'sharded-replicaset' in topologies and self.is_mongos: + shards = list(client_context.client.config.shards.find()) + for shard in shards: + # For a 3-member RS-backed sharded cluster, shard['host'] + # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' + # Otherwise it will be 'ip1:port1' + host_spec = shard['host'] + if not len(host_spec.split('/')) > 1: + return False + return True return False def require_cluster_type(self, topologies=[]): diff --git a/test/test_unified_format.py b/test/test_unified_format.py new file mode 100644 index 0000000000..a32918c4d0 --- /dev/null +++ b/test/test_unified_format.py @@ -0,0 +1,73 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path[0:0] = [""] + +from bson import ObjectId + +from test import unittest +from test.unified_format import generate_test_classes, MatchEvaluatorUtil + + +_TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'unified-test-format') + + +globals().update(generate_test_classes( + os.path.join(_TEST_PATH, 'valid-pass'), + module=__name__, + class_name_prefix='UnifiedTestFormat', + expected_failures=[ + 'Client side error in command starting transaction', # PYTHON-1894 + ])) + + +globals().update(generate_test_classes( + os.path.join(_TEST_PATH, 'valid-fail'), + module=__name__, + class_name_prefix='UnifiedTestFormat', + bypass_test_generation_errors=True, + expected_failures=[ + '.*', # All tests expected to fail + ])) + + +class TestMatchEvaluatorUtil(unittest.TestCase): + def setUp(self): + self.match_evaluator = MatchEvaluatorUtil(self) + + def test_unsetOrMatches(self): + spec = {'$$unsetOrMatches': {'y': {'$$unsetOrMatches': 2}}} + for actual in [{}, {'y': 2}, None]: + self.match_evaluator.match_result(spec, actual) + + spec = {'x': {'$$unsetOrMatches': {'y': {'$$unsetOrMatches': 2}}}} + for actual in [{}, {'x': {}}, {'x': {'y': 2}}]: + self.match_evaluator.match_result(spec, actual) + + def test_type(self): + self.match_evaluator.match_result( + {'operationType': 'insert', + 'ns': {'db': 'change-stream-tests', 'coll': 'test'}, + 'fullDocument': {'_id': {'$$type': 'objectId'}, 'x': 1}}, + {'operationType': 'insert', + 'fullDocument': {'_id': ObjectId('5fc93511ac93941052098f0c'), 'x': 1}, + 'ns': {'db': 'change-stream-tests', 'coll': 'test'}}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified-test-format/example-insertOne.json b/test/unified-test-format/example-insertOne.json new file mode 100644 index 0000000000..be41f9eacb --- /dev/null +++ b/test/unified-test-format/example-insertOne.json @@ -0,0 +1,100 @@ +{ + "description": "example-insertOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "insertOne", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-additionalProperties.json b/test/unified-test-format/invalid/collectionData-additionalProperties.json new file mode 100644 index 0000000000..2d85093109 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-additionalProperties.json @@ -0,0 +1,40 @@ +{ + "description": "collectionData-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": [], + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-collectionName-required.json b/test/unified-test-format/invalid/collectionData-collectionName-required.json new file mode 100644 index 0000000000..040dd86a1c --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-collectionName-required.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-collectionName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "databaseName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-collectionName-type.json b/test/unified-test-format/invalid/collectionData-collectionName-type.json new file mode 100644 index 0000000000..676d822e5e --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-collectionName-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-collectionName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": 0, + "databaseName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-required.json b/test/unified-test-format/invalid/collectionData-databaseName-required.json new file mode 100644 index 0000000000..7548f9d5be --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-databaseName-required.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-databaseName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-type.json b/test/unified-test-format/invalid/collectionData-databaseName-type.json new file mode 100644 index 0000000000..ef719bbf6a --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-databaseName-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-items.json b/test/unified-test-format/invalid/collectionData-documents-items.json new file mode 100644 index 0000000000..2916718d50 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-items.json @@ -0,0 +1,41 @@ +{ + "description": "collectionData-documents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": [ + 0 + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-required.json b/test/unified-test-format/invalid/collectionData-documents-required.json new file mode 100644 index 0000000000..7b8a7ead2a --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-required.json @@ -0,0 +1,38 @@ +{ + "description": "collectionData-documents-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-documents-type.json b/test/unified-test-format/invalid/collectionData-documents-type.json new file mode 100644 index 0000000000..953cabae6e --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-documents-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-documents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "documents": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json new file mode 100644 index 0000000000..beef260eed --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-additionalProperties.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "foo": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json new file mode 100644 index 0000000000..1b9f4bcbea --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readConcern-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-readConcern-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "readConcern": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json new file mode 100644 index 0000000000..988b594d13 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-readPreference-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-readPreference-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "readPreference": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json new file mode 100644 index 0000000000..bd2157c5cb --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-writeConcern-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-writeConcern-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "writeConcern": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-items.json b/test/unified-test-format/invalid/createEntities-items.json new file mode 100644 index 0000000000..8e9d6ff702 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-items.json @@ -0,0 +1,13 @@ +{ + "description": "createEntities-items", + "schemaVersion": "1.0", + "createEntities": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-minItems.json b/test/unified-test-format/invalid/createEntities-minItems.json new file mode 100644 index 0000000000..3654923d28 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "createEntities-minItems", + "schemaVersion": "1.0", + "createEntities": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/createEntities-type.json b/test/unified-test-format/invalid/createEntities-type.json new file mode 100644 index 0000000000..ce3c382c93 --- /dev/null +++ b/test/unified-test-format/invalid/createEntities-type.json @@ -0,0 +1,11 @@ +{ + "description": "createEntities-type", + "schemaVersion": "1.0", + "createEntities": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/description-required.json b/test/unified-test-format/invalid/description-required.json new file mode 100644 index 0000000000..e4e0d0efdf --- /dev/null +++ b/test/unified-test-format/invalid/description-required.json @@ -0,0 +1,9 @@ +{ + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-additionalProperties.json b/test/unified-test-format/invalid/entity-additionalProperties.json new file mode 100644 index 0000000000..38b8898787 --- /dev/null +++ b/test/unified-test-format/invalid/entity-additionalProperties.json @@ -0,0 +1,15 @@ +{ + "description": "entity-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-additionalProperties.json b/test/unified-test-format/invalid/entity-bucket-additionalProperties.json new file mode 100644 index 0000000000..46f9b4038e --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "entity-bucket-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json b/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json new file mode 100644 index 0000000000..c3d7423e65 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-bucketOptions-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-bucket-bucketOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0", + "bucketOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-database-required.json b/test/unified-test-format/invalid/entity-bucket-database-required.json new file mode 100644 index 0000000000..1fde5a96c9 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-database-required.json @@ -0,0 +1,29 @@ +{ + "description": "entity-bucket-database-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-database-type.json b/test/unified-test-format/invalid/entity-bucket-database-type.json new file mode 100644 index 0000000000..798d273fb0 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-database-type.json @@ -0,0 +1,30 @@ +{ + "description": "entity-bucket-database-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": "bucket0", + "database": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-id-required.json b/test/unified-test-format/invalid/entity-bucket-id-required.json new file mode 100644 index 0000000000..c547d8ea3c --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-id-required.json @@ -0,0 +1,29 @@ +{ + "description": "entity-bucket-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-bucket-id-type.json b/test/unified-test-format/invalid/entity-bucket-id-type.json new file mode 100644 index 0000000000..f4e10ee630 --- /dev/null +++ b/test/unified-test-format/invalid/entity-bucket-id-type.json @@ -0,0 +1,30 @@ +{ + "description": "entity-bucket-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "bucket": { + "id": 0, + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-additionalProperties.json b/test/unified-test-format/invalid/entity-client-additionalProperties.json new file mode 100644 index 0000000000..467e1d6ae1 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-id-required.json b/test/unified-test-format/invalid/entity-client-id-required.json new file mode 100644 index 0000000000..4be2fbf8e8 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-client-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-id-type.json b/test/unified-test-format/invalid/entity-client-id-type.json new file mode 100644 index 0000000000..cdc7cbc0e7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-client-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json new file mode 100644 index 0000000000..1252ac82d7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-items.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": [ + 0 + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json new file mode 100644 index 0000000000..e78068a442 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json new file mode 100644 index 0000000000..5ac2b340c5 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-ignoreCommandMonitoringEvents-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-ignoreCommandMonitoringEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "ignoreCommandMonitoringEvents": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-enum.json b/test/unified-test-format/invalid/entity-client-observeEvents-enum.json new file mode 100644 index 0000000000..c39c94eee2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-enum.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeEvents-enum", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "foo" + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-items.json b/test/unified-test-format/invalid/entity-client-observeEvents-items.json new file mode 100644 index 0000000000..3aee11e3d5 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-items.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeEvents-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + 0 + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json b/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json new file mode 100644 index 0000000000..e70d90c0a7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeEvents-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeEvents-type.json b/test/unified-test-format/invalid/entity-client-observeEvents-type.json new file mode 100644 index 0000000000..c144e32369 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeEvents-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-uriOptions-type.json b/test/unified-test-format/invalid/entity-client-uriOptions-type.json new file mode 100644 index 0000000000..4252480e98 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-uriOptions-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-uriOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json b/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json new file mode 100644 index 0000000000..e429cd71f8 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-useMultipleMongoses-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-useMultipleMongoses-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-additionalProperties.json b/test/unified-test-format/invalid/entity-collection-additionalProperties.json new file mode 100644 index 0000000000..90ee2b1ca0 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-additionalProperties.json @@ -0,0 +1,32 @@ +{ + "description": "entity-collection-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionName-required.json b/test/unified-test-format/invalid/entity-collection-collectionName-required.json new file mode 100644 index 0000000000..2446722e5e --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionName-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-collectionName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionName-type.json b/test/unified-test-format/invalid/entity-collection-collectionName-type.json new file mode 100644 index 0000000000..ccad66aac9 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionName-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-collectionName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json b/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json new file mode 100644 index 0000000000..52220c1cd1 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-collectionOptions-type.json @@ -0,0 +1,32 @@ +{ + "description": "entity-collection-collectionOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo", + "collectionOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-database-required.json b/test/unified-test-format/invalid/entity-collection-database-required.json new file mode 100644 index 0000000000..ba96b43f76 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-database-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-database-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-database-type.json b/test/unified-test-format/invalid/entity-collection-database-type.json new file mode 100644 index 0000000000..b87134498d --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-database-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-database-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": 0, + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-id-required.json b/test/unified-test-format/invalid/entity-collection-id-required.json new file mode 100644 index 0000000000..84e5352ead --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-id-required.json @@ -0,0 +1,30 @@ +{ + "description": "entity-collection-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "database": "database0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-collection-id-type.json b/test/unified-test-format/invalid/entity-collection-id-type.json new file mode 100644 index 0000000000..f0821e5250 --- /dev/null +++ b/test/unified-test-format/invalid/entity-collection-id-type.json @@ -0,0 +1,31 @@ +{ + "description": "entity-collection-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": 0, + "database": "database0", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-additionalProperties.json b/test/unified-test-format/invalid/entity-database-additionalProperties.json new file mode 100644 index 0000000000..964cd27966 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "entity-database-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-client-required.json b/test/unified-test-format/invalid/entity-database-client-required.json new file mode 100644 index 0000000000..54f99cf13e --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-client-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-client-type.json b/test/unified-test-format/invalid/entity-database-client-type.json new file mode 100644 index 0000000000..ff4584c405 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-client-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": 0, + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseName-required.json b/test/unified-test-format/invalid/entity-database-databaseName-required.json new file mode 100644 index 0000000000..64cca95c49 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseName-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-databaseName-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseName-type.json b/test/unified-test-format/invalid/entity-database-databaseName-type.json new file mode 100644 index 0000000000..bd01aef781 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseName-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-databaseOptions-type.json b/test/unified-test-format/invalid/entity-database-databaseOptions-type.json new file mode 100644 index 0000000000..bc22ad3129 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-databaseOptions-type.json @@ -0,0 +1,25 @@ +{ + "description": "entity-database-databaseOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-id-required.json b/test/unified-test-format/invalid/entity-database-id-required.json new file mode 100644 index 0000000000..0b65cf1159 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-id-required.json @@ -0,0 +1,23 @@ +{ + "description": "entity-database-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-database-id-type.json b/test/unified-test-format/invalid/entity-database-id-type.json new file mode 100644 index 0000000000..98b5789d04 --- /dev/null +++ b/test/unified-test-format/invalid/entity-database-id-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-database-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": 0, + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-maxProperties.json b/test/unified-test-format/invalid/entity-maxProperties.json new file mode 100644 index 0000000000..f4a6b7c914 --- /dev/null +++ b/test/unified-test-format/invalid/entity-maxProperties.json @@ -0,0 +1,22 @@ +{ + "description": "entity-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + }, + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-minProperties.json b/test/unified-test-format/invalid/entity-minProperties.json new file mode 100644 index 0000000000..d89949ce30 --- /dev/null +++ b/test/unified-test-format/invalid/entity-minProperties.json @@ -0,0 +1,13 @@ +{ + "description": "entity-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + {} + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-additionalProperties.json b/test/unified-test-format/invalid/entity-session-additionalProperties.json new file mode 100644 index 0000000000..ab4cd2014f --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "entity-session-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-client-required.json b/test/unified-test-format/invalid/entity-session-client-required.json new file mode 100644 index 0000000000..8c9ed72e99 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "entity-session-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-client-type.json b/test/unified-test-format/invalid/entity-session-client-type.json new file mode 100644 index 0000000000..b5ccc3f60f --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "entity-session-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-id-required.json b/test/unified-test-format/invalid/entity-session-id-required.json new file mode 100644 index 0000000000..3e5d5c5439 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-id-required.json @@ -0,0 +1,22 @@ +{ + "description": "entity-session-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-id-type.json b/test/unified-test-format/invalid/entity-session-id-type.json new file mode 100644 index 0000000000..dcd46e5be7 --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-id-type.json @@ -0,0 +1,23 @@ +{ + "description": "entity-session-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": 0, + "client": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-session-sessionOptions-type.json b/test/unified-test-format/invalid/entity-session-sessionOptions-type.json new file mode 100644 index 0000000000..0ee15891eb --- /dev/null +++ b/test/unified-test-format/invalid/entity-session-sessionOptions-type.json @@ -0,0 +1,24 @@ +{ + "description": "entity-session-sessionOptions-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-additionalProperties.json b/test/unified-test-format/invalid/entity-stream-additionalProperties.json new file mode 100644 index 0000000000..c8e76e9985 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-additionalProperties.json @@ -0,0 +1,19 @@ +{ + "description": "entity-stream-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": "FF", + "foo": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json b/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json new file mode 100644 index 0000000000..7381893b55 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-pattern.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-hexBytes-pattern", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": "FFF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-required.json b/test/unified-test-format/invalid/entity-stream-hexBytes-required.json new file mode 100644 index 0000000000..cc3bf09b20 --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-stream-hexBytes-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-hexBytes-type.json b/test/unified-test-format/invalid/entity-stream-hexBytes-type.json new file mode 100644 index 0000000000..e6e2299eac --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-hexBytes-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-hexBytes-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": "stream0", + "hexBytes": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-id-required.json b/test/unified-test-format/invalid/entity-stream-id-required.json new file mode 100644 index 0000000000..ff814d4e9c --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-id-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-stream-id-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "hexBytes": "FF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-stream-id-type.json b/test/unified-test-format/invalid/entity-stream-id-type.json new file mode 100644 index 0000000000..5fc654d97e --- /dev/null +++ b/test/unified-test-format/invalid/entity-stream-id-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-stream-id-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "stream": { + "id": 0, + "hexBytes": "FF" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-additionalProperties.json b/test/unified-test-format/invalid/expectedError-additionalProperties.json new file mode 100644 index 0000000000..3a79df8e34 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "foo": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorCode-type.json b/test/unified-test-format/invalid/expectedError-errorCode-type.json new file mode 100644 index 0000000000..b6b6f5d05a --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorCode-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorCode-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorCode": "foo" + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorCodeName-type.json b/test/unified-test-format/invalid/expectedError-errorCodeName-type.json new file mode 100644 index 0000000000..3ac5e43045 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorCodeName-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorCodeName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorCodeName": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorContains-type.json b/test/unified-test-format/invalid/expectedError-errorContains-type.json new file mode 100644 index 0000000000..847a987dff --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorContains-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorContains-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorContains": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json new file mode 100644 index 0000000000..4eab56ad18 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-items.json @@ -0,0 +1,27 @@ +{ + "description": "expectedError-errorLabelsContain-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": [ + 0 + ] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json new file mode 100644 index 0000000000..48162110aa --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-minItems.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsContain-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": [] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json b/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json new file mode 100644 index 0000000000..a0aba918b5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsContain-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsContain-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsContain": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json new file mode 100644 index 0000000000..6c94d07135 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-items.json @@ -0,0 +1,27 @@ +{ + "description": "expectedError-errorLabelsOmit-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": [ + 0 + ] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json new file mode 100644 index 0000000000..88c6582028 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-minItems.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsOmit-minItems", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": [] + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json new file mode 100644 index 0000000000..5f57114fea --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorLabelsOmit-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorLabelsOmit-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorLabelsOmit": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isClientError-type.json b/test/unified-test-format/invalid/expectedError-isClientError-type.json new file mode 100644 index 0000000000..bfcc06679b --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isClientError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isClientError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isClientError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isError-const.json b/test/unified-test-format/invalid/expectedError-isError-const.json new file mode 100644 index 0000000000..6a398bbf22 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isError-const.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isError-const", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": false + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isError-type.json b/test/unified-test-format/invalid/expectedError-isError-type.json new file mode 100644 index 0000000000..354aff31f4 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-minProperties.json b/test/unified-test-format/invalid/expectedError-minProperties.json new file mode 100644 index 0000000000..10e0b89ab7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-minProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedError-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": {} + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedEvent-additionalProperties.json new file mode 100644 index 0000000000..2c4f7d27e7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-additionalProperties.json @@ -0,0 +1,32 @@ +{ + "description": "expectedEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "foo": 0 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json new file mode 100644 index 0000000000..ea6078faae --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandFailedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..ee6eb50658 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandStartedEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "foo": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json new file mode 100644 index 0000000000..4c9483caf3 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandStartedEvent-command-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json new file mode 100644 index 0000000000..a5a66096a0 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandStartedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json new file mode 100644 index 0000000000..dc040ec108 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandStartedEvent-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json b/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json new file mode 100644 index 0000000000..4a20e906b9 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandSucceededEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json b/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json new file mode 100644 index 0000000000..5464542751 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json @@ -0,0 +1,34 @@ +{ + "description": "expectedEvent-commandSucceededEvent-reply-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "reply": 0 + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-maxProperties.json b/test/unified-test-format/invalid/expectedEvent-maxProperties.json new file mode 100644 index 0000000000..f01441946f --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-maxProperties.json @@ -0,0 +1,33 @@ +{ + "description": "expectedEvent-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": {}, + "commandSucceededEvent": {} + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-minProperties.json b/test/unified-test-format/invalid/expectedEvent-minProperties.json new file mode 100644 index 0000000000..ebcc494894 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEvent-minProperties.json @@ -0,0 +1,30 @@ +{ + "description": "expectedEvent-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + {} + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json new file mode 100644 index 0000000000..6ecf5931fb --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "expectedEventsForClient-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [], + "foo": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-required.json b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json new file mode 100644 index 0000000000..b879db8598 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json @@ -0,0 +1,27 @@ +{ + "description": "expectedEventsForClient-client-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "events": [] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-type.json b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json new file mode 100644 index 0000000000..4ee5427df1 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-client-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": 0, + "events": [] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-items.json b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json new file mode 100644 index 0000000000..ee8ce4a403 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json @@ -0,0 +1,30 @@ +{ + "description": "expectedEventsForClient-events-items", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": [ + 0 + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-required.json b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json new file mode 100644 index 0000000000..7f1bc6fb53 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json @@ -0,0 +1,27 @@ +{ + "description": "expectedEventsForClient-events-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-type.json b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json new file mode 100644 index 0000000000..f171fc2b93 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": [ + { + "client": "client0", + "events": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-items.json b/test/unified-test-format/invalid/initialData-items.json new file mode 100644 index 0000000000..9c27d554f9 --- /dev/null +++ b/test/unified-test-format/invalid/initialData-items.json @@ -0,0 +1,13 @@ +{ + "description": "initialData-items", + "schemaVersion": "1.0", + "initialData": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-minItems.json b/test/unified-test-format/invalid/initialData-minItems.json new file mode 100644 index 0000000000..984100a2be --- /dev/null +++ b/test/unified-test-format/invalid/initialData-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "initialData-minItems", + "schemaVersion": "1.0", + "initialData": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/initialData-type.json b/test/unified-test-format/invalid/initialData-type.json new file mode 100644 index 0000000000..c33585e03a --- /dev/null +++ b/test/unified-test-format/invalid/initialData-type.json @@ -0,0 +1,11 @@ +{ + "description": "initialData-type", + "schemaVersion": "1.0", + "initialData": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-additionalProperties.json b/test/unified-test-format/invalid/operation-additionalProperties.json new file mode 100644 index 0000000000..8f2f1434ec --- /dev/null +++ b/test/unified-test-format/invalid/operation-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "operation-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-arguments-type.json b/test/unified-test-format/invalid/operation-arguments-type.json new file mode 100644 index 0000000000..a22f3921c3 --- /dev/null +++ b/test/unified-test-format/invalid/operation-arguments-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-arguments-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "arguments": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json b/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json new file mode 100644 index 0000000000..bc15fbac76 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-conflicts_with_expectResult.json @@ -0,0 +1,26 @@ +{ + "description": "operation-expectError-conflicts_with_expectResult", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": true + }, + "expectResult": {} + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json b/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json new file mode 100644 index 0000000000..dead4a3b9d --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-conflicts_with_saveResultAsEntity.json @@ -0,0 +1,26 @@ +{ + "description": "operation-expectError-conflicts_with_saveResultAsEntity", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isError": true + }, + "saveResultAsEntity": "foo" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectError-type.json b/test/unified-test-format/invalid/operation-expectError-type.json new file mode 100644 index 0000000000..b224ba3535 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectError-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-expectError-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-expectEvents-type.json b/test/unified-test-format/invalid/operation-expectEvents-type.json new file mode 100644 index 0000000000..ecd4c011a9 --- /dev/null +++ b/test/unified-test-format/invalid/operation-expectEvents-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-expectEvents-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-name-required.json b/test/unified-test-format/invalid/operation-name-required.json new file mode 100644 index 0000000000..42fcb3a308 --- /dev/null +++ b/test/unified-test-format/invalid/operation-name-required.json @@ -0,0 +1,21 @@ +{ + "description": "operation-name-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-name-type.json b/test/unified-test-format/invalid/operation-name-type.json new file mode 100644 index 0000000000..2f91da078a --- /dev/null +++ b/test/unified-test-format/invalid/operation-name-type.json @@ -0,0 +1,22 @@ +{ + "description": "operation-name-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": 0, + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-object-required.json b/test/unified-test-format/invalid/operation-object-required.json new file mode 100644 index 0000000000..c0410ce3fd --- /dev/null +++ b/test/unified-test-format/invalid/operation-object-required.json @@ -0,0 +1,21 @@ +{ + "description": "operation-object-required", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-object-type.json b/test/unified-test-format/invalid/operation-object-type.json new file mode 100644 index 0000000000..edb0a0b51a --- /dev/null +++ b/test/unified-test-format/invalid/operation-object-type.json @@ -0,0 +1,22 @@ +{ + "description": "operation-object-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json b/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json new file mode 100644 index 0000000000..65ead94c7a --- /dev/null +++ b/test/unified-test-format/invalid/operation-saveResultAsEntity-type.json @@ -0,0 +1,23 @@ +{ + "description": "operation-saveResultAsEntity-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "saveResultAsEntity": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json b/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json new file mode 100644 index 0000000000..79fa687e45 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-additionalProperties.json @@ -0,0 +1,16 @@ +{ + "description": "runOnRequirement-additionalProperties", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "foo": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json new file mode 100644 index 0000000000..78766eb925 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-pattern.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-maxServerVersion-pattern", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": "1.2.3.4" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json new file mode 100644 index 0000000000..ffc9118ba2 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-maxServerVersion-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-maxServerVersion-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "maxServerVersion": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minProperties.json b/test/unified-test-format/invalid/runOnRequirement-minProperties.json new file mode 100644 index 0000000000..c2bfed3be7 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minProperties.json @@ -0,0 +1,13 @@ +{ + "description": "runOnRequirement-minProperties", + "schemaVersion": "1.0", + "runOnRequirements": [ + {} + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json new file mode 100644 index 0000000000..19abc1755f --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-pattern.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-minServerVersion-pattern", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "1.2.3.4" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json new file mode 100644 index 0000000000..688d1c67ee --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-minServerVersion-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-minServerVersion-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json b/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json new file mode 100644 index 0000000000..f62e5040d4 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-enum.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-topologies-enum", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + "foo" + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-items.json b/test/unified-test-format/invalid/runOnRequirement-topologies-items.json new file mode 100644 index 0000000000..a205b3293d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-items.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-topologies-items", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + 0 + ] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json b/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json new file mode 100644 index 0000000000..16f29b3f4b --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-minItems.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-topologies-minItems", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-topologies-type.json b/test/unified-test-format/invalid/runOnRequirement-topologies-type.json new file mode 100644 index 0000000000..f6d147cd6f --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-topologies-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-topologies-type", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-items.json b/test/unified-test-format/invalid/runOnRequirements-items.json new file mode 100644 index 0000000000..40ec84a3f3 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-items.json @@ -0,0 +1,13 @@ +{ + "description": "runOnRequirements-items", + "schemaVersion": "1.0", + "runOnRequirements": [ + 0 + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-minItems.json b/test/unified-test-format/invalid/runOnRequirements-minItems.json new file mode 100644 index 0000000000..4ca9f99b5d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "runOnRequirements-minItems", + "schemaVersion": "1.0", + "runOnRequirements": [], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirements-type.json b/test/unified-test-format/invalid/runOnRequirements-type.json new file mode 100644 index 0000000000..98b859f3ea --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirements-type.json @@ -0,0 +1,11 @@ +{ + "description": "runOnRequirements-type", + "schemaVersion": "1.0", + "runOnRequirements": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-pattern.json b/test/unified-test-format/invalid/schemaVersion-pattern.json new file mode 100644 index 0000000000..bcb8980516 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-pattern.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-pattern", + "schemaVersion": "1.2.3.4", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-required.json b/test/unified-test-format/invalid/schemaVersion-required.json new file mode 100644 index 0000000000..7388ff0bf1 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-required.json @@ -0,0 +1,9 @@ +{ + "description": "schemaVersion-required", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/schemaVersion-type.json b/test/unified-test-format/invalid/schemaVersion-type.json new file mode 100644 index 0000000000..646473a209 --- /dev/null +++ b/test/unified-test-format/invalid/schemaVersion-type.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-type", + "schemaVersion": 0, + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-additionalProperties.json b/test/unified-test-format/invalid/test-additionalProperties.json new file mode 100644 index 0000000000..a699319c30 --- /dev/null +++ b/test/unified-test-format/invalid/test-additionalProperties.json @@ -0,0 +1,11 @@ +{ + "description": "test-additionalProperties", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "foo": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-description-required.json b/test/unified-test-format/invalid/test-description-required.json new file mode 100644 index 0000000000..8bf23014d4 --- /dev/null +++ b/test/unified-test-format/invalid/test-description-required.json @@ -0,0 +1,9 @@ +{ + "description": "test-description-required", + "schemaVersion": "1.0", + "tests": [ + { + "operation": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-description-type.json b/test/unified-test-format/invalid/test-description-type.json new file mode 100644 index 0000000000..bba3690449 --- /dev/null +++ b/test/unified-test-format/invalid/test-description-type.json @@ -0,0 +1,10 @@ +{ + "description": "test-description-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": 0, + "operation": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-items.json b/test/unified-test-format/invalid/test-expectEvents-items.json new file mode 100644 index 0000000000..394f74746c --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-expectEvents-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectEvents-type.json b/test/unified-test-format/invalid/test-expectEvents-type.json new file mode 100644 index 0000000000..1569f0a0d7 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectEvents-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-items.json b/test/unified-test-format/invalid/test-operations-items.json new file mode 100644 index 0000000000..00af8e7453 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-items.json @@ -0,0 +1,12 @@ +{ + "description": "test-operations-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-required.json b/test/unified-test-format/invalid/test-operations-required.json new file mode 100644 index 0000000000..67c6f83044 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-required.json @@ -0,0 +1,9 @@ +{ + "description": "test-operations-required", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo" + } + ] +} diff --git a/test/unified-test-format/invalid/test-operations-type.json b/test/unified-test-format/invalid/test-operations-type.json new file mode 100644 index 0000000000..1e8b5b2496 --- /dev/null +++ b/test/unified-test-format/invalid/test-operations-type.json @@ -0,0 +1,10 @@ +{ + "description": "test-operations-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-items.json b/test/unified-test-format/invalid/test-outcome-items.json new file mode 100644 index 0000000000..cf6bb54f87 --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-outcome-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-minItems.json b/test/unified-test-format/invalid/test-outcome-minItems.json new file mode 100644 index 0000000000..aadf8e514a --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-outcome-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-outcome-type.json b/test/unified-test-format/invalid/test-outcome-type.json new file mode 100644 index 0000000000..e60c119d7e --- /dev/null +++ b/test/unified-test-format/invalid/test-outcome-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-outcome-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "outcome": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-items.json b/test/unified-test-format/invalid/test-runOnRequirements-items.json new file mode 100644 index 0000000000..866bebb51f --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-runOnRequirements-items", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-minItems.json b/test/unified-test-format/invalid/test-runOnRequirements-minItems.json new file mode 100644 index 0000000000..d61f063849 --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-runOnRequirements-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-runOnRequirements-type.json b/test/unified-test-format/invalid/test-runOnRequirements-type.json new file mode 100644 index 0000000000..5b25b1005d --- /dev/null +++ b/test/unified-test-format/invalid/test-runOnRequirements-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-runOnRequirements-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "runOnRequirements": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/test-skipReason-type.json b/test/unified-test-format/invalid/test-skipReason-type.json new file mode 100644 index 0000000000..0408e76834 --- /dev/null +++ b/test/unified-test-format/invalid/test-skipReason-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-skipReason-type", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "skipReason": 0 + } + ] +} diff --git a/test/unified-test-format/invalid/tests-items.json b/test/unified-test-format/invalid/tests-items.json new file mode 100644 index 0000000000..11f37469e4 --- /dev/null +++ b/test/unified-test-format/invalid/tests-items.json @@ -0,0 +1,7 @@ +{ + "description": "tests-items", + "schemaVersion": "1.0", + "tests": [ + 0 + ] +} diff --git a/test/unified-test-format/invalid/tests-minItems.json b/test/unified-test-format/invalid/tests-minItems.json new file mode 100644 index 0000000000..3f74f94af7 --- /dev/null +++ b/test/unified-test-format/invalid/tests-minItems.json @@ -0,0 +1,5 @@ +{ + "description": "tests-minItems", + "schemaVersion": "1.0", + "tests": [] +} diff --git a/test/unified-test-format/invalid/tests-required.json b/test/unified-test-format/invalid/tests-required.json new file mode 100644 index 0000000000..de4b2fd063 --- /dev/null +++ b/test/unified-test-format/invalid/tests-required.json @@ -0,0 +1,4 @@ +{ + "description": "tests-required", + "schemaVersion": "1.0" +} diff --git a/test/unified-test-format/invalid/tests-type.json b/test/unified-test-format/invalid/tests-type.json new file mode 100644 index 0000000000..62d8194a41 --- /dev/null +++ b/test/unified-test-format/invalid/tests-type.json @@ -0,0 +1,5 @@ +{ + "description": "tests-type", + "schemaVersion": "1.0", + "tests": 0 +} diff --git a/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json b/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json new file mode 100644 index 0000000000..7f7f1978c3 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-bucket-database-undefined.json @@ -0,0 +1,18 @@ +{ + "description": "entity-bucket-database-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "bucket": { + "id": "bucket0", + "database": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-collection-database-undefined.json b/test/unified-test-format/valid-fail/entity-collection-database-undefined.json new file mode 100644 index 0000000000..20b0733e34 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-collection-database-undefined.json @@ -0,0 +1,19 @@ +{ + "description": "entity-collection-database-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "collection": { + "id": "collection0", + "database": "foo", + "collectionName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-database-client-undefined.json b/test/unified-test-format/valid-fail/entity-database-client-undefined.json new file mode 100644 index 0000000000..0f8110e6d3 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-database-client-undefined.json @@ -0,0 +1,19 @@ +{ + "description": "entity-database-client-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "database": { + "id": "database0", + "client": "foo", + "databaseName": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-session-client-undefined.json b/test/unified-test-format/valid-fail/entity-session-client-undefined.json new file mode 100644 index 0000000000..260356436a --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-session-client-undefined.json @@ -0,0 +1,18 @@ +{ + "description": "entity-session-client-undefined", + "schemaVersion": "1.0", + "createEntities": [ + { + "session": { + "id": "session0", + "client": "foo" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json b/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json new file mode 100644 index 0000000000..ea425fb568 --- /dev/null +++ b/test/unified-test-format/valid-fail/returnDocument-enum-invalid.json @@ -0,0 +1,66 @@ +{ + "description": "returnDocument-enum-invalid", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "tests": [ + { + "description": "FindOneAndReplace returnDocument invalid enum value", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "invalid" + } + } + ] + }, + { + "description": "FindOneAndUpdate returnDocument invalid enum value", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "invalid" + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/schemaVersion-unsupported.json b/test/unified-test-format/valid-fail/schemaVersion-unsupported.json new file mode 100644 index 0000000000..ceb5532917 --- /dev/null +++ b/test/unified-test-format/valid-fail/schemaVersion-unsupported.json @@ -0,0 +1,10 @@ +{ + "description": "schemaVersion-unsupported", + "schemaVersion": "0.1", + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-change-streams.json b/test/unified-test-format/valid-pass/poc-change-streams.json new file mode 100644 index 0000000000..2a2c41a682 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-change-streams.json @@ -0,0 +1,414 @@ +{ + "description": "poc-change-streams", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "change-stream-tests" + } + }, + { + "database": { + "id": "database2", + "client": "client1", + "databaseName": "change-stream-tests-2" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "test2" + } + }, + { + "collection": { + "id": "collection3", + "database": "database2", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [] + }, + { + "collectionName": "test2", + "databaseName": "change-stream-tests", + "documents": [] + }, + { + "collectionName": "test", + "databaseName": "change-stream-tests-2", + "documents": [] + } + ], + "tests": [ + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection2", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection3", + "arguments": { + "document": { + "y": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "z": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test2" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests-2", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "y": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "z": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true, + "fullDocument": { + "$$unsetOrMatches": "default" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "batchSize": 1, + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 2 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "fullDocument": { + "_id": { + "$$type": "objectId" + }, + "x": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + }, + "resumeAfter": { + "$$exists": true + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": { + "fullDocument": { + "$$unsetOrMatches": "default" + }, + "resumeAfter": { + "$$exists": true + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "change-stream-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-command-monitoring.json b/test/unified-test-format/valid-pass/poc-command-monitoring.json new file mode 100644 index 0000000000..499396e0ba --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-command-monitoring.json @@ -0,0 +1,222 @@ +{ + "description": "poc-command-monitoring", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find event with a getmore and the server kills the cursor", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json new file mode 100644 index 0000000000..2ed86d6150 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -0,0 +1,446 @@ +{ + "description": "poc-crud", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database0", + "collectionName": "coll2", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "collectionName": "coll2", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + }, + { + "collectionName": "aggregate_out", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "BulkWrite with mixed ordered operations", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "3": 4 + } + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 1, + "upsertedIds": { + "5": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 34 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "name": "insertMany", + "object": "collection1", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "replaceOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "readConcern majority with out stage", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection2", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "aggregate_out" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll2", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "aggregate_out" + } + ], + "readConcern": { + "level": "majority" + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "aggregate_out", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $listLocalSessions", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database1", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "dummy": "dummy field" + } + }, + { + "$project": { + "_id": 0, + "dummy": 1 + } + } + ] + }, + "expectResult": [ + { + "dummy": "dummy field" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-gridfs.json b/test/unified-test-format/valid-pass/poc-gridfs.json new file mode 100644 index 0000000000..1f07a19bf6 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-gridfs.json @@ -0,0 +1,301 @@ +{ + "description": "poc-gridfs", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Delete when length is 10", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ] + }, + { + "description": "Download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } + } + ] + }, + { + "description": "Download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "Download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "Upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "chunkSizeBytes": 4 + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "oid0" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {}, + "sort": { + "uploadDate": -1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "oid0" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "_id": { + "$gt": { + "$oid": "000000000000000000000007" + } + } + }, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "oid0" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "oid0" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-reads.json b/test/unified-test-format/valid-pass/poc-retryable-reads.json new file mode 100644 index 0000000000..2b65d501a7 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-retryable-reads.json @@ -0,0 +1,433 @@ +{ + "description": "poc-retryable-reads", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "retryReads": false + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 2 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection1", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {} + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds on second attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "name": "listDatabases", + "object": "client0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json new file mode 100644 index 0000000000..3e42aacb80 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -0,0 +1,483 @@ +{ + "description": "poc-retryable-writes", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "expectResult": { + "insertedCount": { + "$$unsetOrMatches": 2 + }, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after multiple retryable writeConcernErrors", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-sessions.json b/test/unified-test-format/valid-pass/poc-sessions.json new file mode 100644 index 0000000000..75f3489428 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-sessions.json @@ -0,0 +1,466 @@ +{ + "description": "poc-sessions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json new file mode 100644 index 0000000000..820ed65927 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -0,0 +1,505 @@ +{ + "description": "poc-transactions-convenient-api", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + }, + { + "session": { + "id": "session2", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction and no transaction options set", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from client", + "operations": [ + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from defaultTransactionOptions", + "operations": [ + { + "name": "withTransaction", + "object": "session2", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session2", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json new file mode 100644 index 0000000000..a0b297d59a --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -0,0 +1,409 @@ +{ + "description": "poc-transactions-mongos-pin-auto", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "remain pinned after non-transient Interrupted error on insertOne", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorCodeName": "Interrupted" + } + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$type": "object" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$type": "object" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-transactions.json b/test/unified-test-format/valid-pass/poc-transactions.json new file mode 100644 index 0000000000..62528f9ce1 --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-transactions.json @@ -0,0 +1,322 @@ +{ + "description": "poc-transactions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": { + ".": "." + } + } + }, + "expectError": { + "isClientError": true + } + }, + { + "name": "assertSessionTransactionState", + "object": "testRunner", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "explicitly create collection using create command", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "create index on a non-existing collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "session": "session0", + "name": "x_1", + "keys": { + "x": 1 + } + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "x_1" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py new file mode 100644 index 0000000000..3500b5634d --- /dev/null +++ b/test/unified_format.py @@ -0,0 +1,1032 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +""" + +import copy +import datetime +import functools +import os +import re +import sys +import types + +from bson import json_util, Code, Decimal128, DBRef, SON, Int64, MaxKey, MinKey +from bson.binary import Binary +from bson.objectid import ObjectId +from bson.py3compat import abc, integer_types, iteritems, text_type, PY3 +from bson.regex import Regex, RE_TYPE + +from gridfs import GridFSBucket + +from pymongo import ASCENDING, MongoClient +from pymongo.client_session import ClientSession, TransactionOptions, _TxnState +from pymongo.change_stream import ChangeStream +from pymongo.collection import Collection +from pymongo.database import Database +from pymongo.errors import BulkWriteError, InvalidOperation, PyMongoError +from pymongo.monitoring import ( + CommandFailedEvent, CommandListener, CommandStartedEvent, + CommandSucceededEvent, _SENSITIVE_COMMANDS) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.results import BulkWriteResult +from pymongo.write_concern import WriteConcern + +from test import client_context, unittest, IntegrationTest +from test.utils import ( + camel_to_snake, rs_or_single_client, single_client, snake_to_camel) + +from test.version import Version +from test.utils import ( + camel_to_snake_args, parse_collection_options, parse_spec_options, + prepare_spec_arguments) + + +JSON_OPTS = json_util.JSONOptions(tz_aware=False) + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass. + + Vendored from six: https://github.com/benjaminp/six/blob/master/six.py + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d['__orig_bases__'] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def is_run_on_requirement_satisfied(requirement): + topology_satisfied = True + req_topologies = requirement.get('topologies') + if req_topologies: + topology_satisfied = client_context.is_topology_type( + req_topologies) + + min_version_satisfied = True + req_min_server_version = requirement.get('minServerVersion') + if req_min_server_version: + min_version_satisfied = Version.from_string( + req_min_server_version) <= client_context.version + + max_version_satisfied = True + req_max_server_version = requirement.get('maxServerVersion') + if req_max_server_version: + max_version_satisfied = Version.from_string( + req_max_server_version) >= client_context.version + + return (topology_satisfied and min_version_satisfied and + max_version_satisfied) + + +def parse_collection_or_database_options(options): + return parse_collection_options(options) + + +def parse_bulk_write_result(result): + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] + for int_idx in result.upserted_ids} + return { + 'deletedCount': result.deleted_count, + 'insertedCount': result.inserted_count, + 'matchedCount': result.matched_count, + 'modifiedCount': result.modified_count, + 'upsertedCount': result.upserted_count, + 'upsertedIds': upserted_ids} + + +def parse_bulk_write_error_result(error): + write_result = BulkWriteResult(error.details, True) + return parse_bulk_write_result(write_result) + + +class EventListenerUtil(CommandListener): + def __init__(self, observe_events, ignore_commands): + self._event_types = set(observe_events) + self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) + self._ignore_commands.add('configurefailpoint') + self.results = [] + + def _observe_event(self, event): + if event.command_name.lower() not in self._ignore_commands: + self.results.append(event) + + def started(self, event): + if 'commandStartedEvent' in self._event_types: + self._observe_event(event) + + def succeeded(self, event): + if 'commandSucceededEvent' in self._event_types: + self._observe_event(event) + + def failed(self, event): + if 'commandFailedEvent' in self._event_types: + self._observe_event(event) + + +class EntityMapUtil(object): + """Utility class that implements an entity map as per the unified + test format specification.""" + def __init__(self, test_class): + self._entities = {} + self._listeners = {} + self._session_lsids = {} + self._test_class = test_class + + def __getitem__(self, item): + try: + return self._entities[item] + except KeyError: + self._test_class.fail('Could not find entity named %s in map' % ( + item,)) + + def __setitem__(self, key, value): + if not isinstance(key, text_type): + self._test_class.fail( + 'Expected entity name of type str, got %s' % (type(key))) + + if key in self._entities: + self._test_class.fail('Entity named %s already in map' % (key,)) + + self._entities[key] = value + + def _create_entity(self, entity_spec): + if len(entity_spec) != 1: + self._test_class.fail( + "Entity spec %s did not contain exactly one top-level key" % ( + entity_spec,)) + + entity_type, spec = next(iteritems(entity_spec)) + if entity_type == 'client': + kwargs = {} + observe_events = spec.get('observeEvents', []) + ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) + if len(observe_events) or len(ignore_commands): + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil(observe_events, ignore_commands) + self._listeners[spec['id']] = listener + kwargs['event_listeners'] = [listener] + if client_context.is_mongos and spec.get('useMultipleMongoses'): + kwargs['h'] = client_context.mongos_seeds() + kwargs.update(spec.get('uriOptions', {})) + client = rs_or_single_client(**kwargs) + self[spec['id']] = client + self._test_class.addCleanup(client.close) + return + elif entity_type == 'database': + client = self[spec['client']] + if not isinstance(client, MongoClient): + self._test_class.fail( + 'Expected entity %s to be of type MongoClient, got %s' % ( + spec['client'], type(client))) + options = parse_collection_or_database_options( + spec.get('databaseOptions', {})) + self[spec['id']] = client.get_database( + spec['databaseName'], **options) + return + elif entity_type == 'collection': + database = self[spec['database']] + if not isinstance(database, Database): + self._test_class.fail( + 'Expected entity %s to be of type Database, got %s' % ( + spec['database'], type(database))) + options = parse_collection_or_database_options( + spec.get('collectionOptions', {})) + self[spec['id']] = database.get_collection( + spec['collectionName'], **options) + return + elif entity_type == 'session': + client = self[spec['client']] + if not isinstance(client, MongoClient): + self._test_class.fail( + 'Expected entity %s to be of type MongoClient, got %s' % ( + spec['client'], type(client))) + opts = camel_to_snake_args(spec.get('sessionOptions', {})) + if 'default_transaction_options' in opts: + txn_opts = parse_spec_options( + opts['default_transaction_options']) + txn_opts = TransactionOptions(**txn_opts) + opts = copy.deepcopy(opts) + opts['default_transaction_options'] = txn_opts + session = client.start_session(**dict(opts)) + self[spec['id']] = session + self._session_lsids[spec['id']] = copy.deepcopy(session.session_id) + self._test_class.addCleanup(session.end_session) + return + elif entity_type == 'bucket': + # TODO: implement the 'bucket' entity type + self._test_class.skipTest( + 'GridFS is not currently supported (PYTHON-2459)') + self._test_class.fail( + 'Unable to create entity of unknown type %s' % (entity_type,)) + + def create_entities_from_spec(self, entity_spec): + for spec in entity_spec: + self._create_entity(spec) + + def get_listener_for_client(self, client_name): + client = self[client_name] + if not isinstance(client, MongoClient): + self._test_class.fail( + 'Expected entity %s to be of type MongoClient, got %s' % ( + client_name, type(client))) + + listener = self._listeners.get(client_name) + if not listener: + self._test_class.fail( + 'No listeners configured for client %s' % (client_name,)) + + return listener + + def get_lsid_for_session(self, session_name): + session = self[session_name] + if not isinstance(session, ClientSession): + self._test_class.fail( + 'Expected entity %s to be of type ClientSession, got %s' % ( + session_name, type(session))) + + try: + return session.session_id + except InvalidOperation: + # session has been closed. + return self._session_lsids[session_name] + + +if not PY3: + binary_types = (Binary,) + long_types = (Int64, long) + unicode_type = unicode +else: + binary_types = (Binary, bytes) + long_types = (Int64,) + unicode_type = str + + +BSON_TYPE_ALIAS_MAP = { + # https://docs.mongodb.com/manual/reference/operator/query/type/ + # https://pymongo.readthedocs.io/en/stable/api/bson/index.html + 'double': (float,), + 'string': (text_type,), + 'object': (abc.Mapping,), + 'array': (abc.MutableSequence,), + 'binData': binary_types, + 'undefined': (type(None),), + 'objectId': (ObjectId,), + 'bool': (bool,), + 'date': (datetime.datetime,), + 'null': (type(None),), + 'regex': (Regex, RE_TYPE), + 'dbPointer': (DBRef,), + 'javascript': (unicode_type, Code), + 'symbol': (unicode_type,), + 'javascriptWithScope': (unicode_type, Code), + 'int': (int,), + 'long': (Int64,), + 'decimal': (Decimal128,), + 'maxKey': (MaxKey,), + 'minKey': (MinKey,), +} + + +class MatchEvaluatorUtil(object): + """Utility class that implements methods for evaluating matches as per + the unified test format specification.""" + def __init__(self, test_class): + self._test_class = test_class + + def _operation_exists(self, spec, actual, key_to_compare): + if spec is True: + self._test_class.assertIn(key_to_compare, actual) + elif spec is False: + self._test_class.assertNotIn(key_to_compare, actual) + else: + self._test_class.fail( + 'Expected boolean value for $$exists operator, got %s' % ( + spec,)) + + def __type_alias_to_type(self, alias): + if alias not in BSON_TYPE_ALIAS_MAP: + self._test_class.fail('Unrecognized BSON type alias %s' % (alias,)) + return BSON_TYPE_ALIAS_MAP[alias] + + def _operation_type(self, spec, actual, key_to_compare): + if isinstance(spec, abc.MutableSequence): + permissible_types = tuple([ + t for alias in spec for t in self.__type_alias_to_type(alias)]) + else: + permissible_types = self.__type_alias_to_type(spec) + self._test_class.assertIsInstance( + actual[key_to_compare], permissible_types) + + def _operation_matchesEntity(self, spec, actual, key_to_compare): + expected_entity = self._test_class.entity_map[spec] + self._test_class.assertIsInstance(expected_entity, abc.Mapping) + self._test_class.assertEqual(expected_entity, actual[key_to_compare]) + + def _operation_matchesHexBytes(self, spec, actual, key_to_compare): + raise NotImplementedError + + def _operation_unsetOrMatches(self, spec, actual, key_to_compare): + if key_to_compare is None and not actual: + # top-level document can be None when unset + return + + if key_to_compare not in actual: + # we add a dummy value for the compared key to pass map size check + actual[key_to_compare] = 'dummyValue' + return + self.match_result(spec, actual[key_to_compare], in_recursive_call=True) + + def _operation_sessionLsid(self, spec, actual, key_to_compare): + expected_lsid = self._test_class.entity_map.get_lsid_for_session(spec) + self._test_class.assertEqual(expected_lsid, actual[key_to_compare]) + + def _evaluate_special_operation(self, opname, spec, actual, + key_to_compare): + method_name = '_operation_%s' % (opname.strip('$'),) + try: + method = getattr(self, method_name) + except AttributeError: + self._test_class.fail( + 'Unsupported special matching operator %s' % (opname,)) + else: + method(spec, actual, key_to_compare) + + def _evaluate_if_special_operation(self, expectation, actual, + key_to_compare=None): + """Returns True if a special operation is evaluated, False + otherwise. If the ``expectation`` map contains a single key, + value pair we check it for a special operation. + If given, ``key_to_compare`` is assumed to be the key in + ``expectation`` whose corresponding value needs to be + evaluated for a possible special operation. ``key_to_compare`` + is ignored when ``expectation`` has only one key.""" + if not isinstance(expectation, abc.Mapping): + return False + + is_special_op, opname, spec = False, False, False + + if key_to_compare is not None: + if key_to_compare.startswith('$$'): + is_special_op = True + opname = key_to_compare + spec = expectation[key_to_compare] + key_to_compare = None + else: + nested = expectation[key_to_compare] + if isinstance(nested, abc.Mapping) and len(nested) == 1: + opname, spec = next(iteritems(nested)) + if opname.startswith('$$'): + is_special_op = True + elif len(expectation) == 1: + opname, spec = next(iteritems(expectation)) + if opname.startswith('$$'): + is_special_op = True + key_to_compare = None + + if is_special_op: + self._evaluate_special_operation( + opname=opname, + spec=spec, + actual=actual, + key_to_compare=key_to_compare) + return True + + return False + + def _match_document(self, expectation, actual, is_root): + if self._evaluate_if_special_operation(expectation, actual): + return + + self._test_class.assertIsInstance(actual, abc.Mapping) + for key, value in iteritems(expectation): + if self._evaluate_if_special_operation(expectation, actual, key): + continue + + self._test_class.assertIn(key, actual) + self.match_result(value, actual[key], in_recursive_call=True) + + if not is_root: + self._test_class.assertEqual( + set(expectation.keys()), set(actual.keys())) + + def match_result(self, expectation, actual, + in_recursive_call=False): + if isinstance(expectation, abc.Mapping): + return self._match_document( + expectation, actual, is_root=not in_recursive_call) + + if isinstance(expectation, abc.MutableSequence): + self._test_class.assertIsInstance(actual, abc.MutableSequence) + for e, a in zip(expectation, actual): + if isinstance(e, abc.Mapping): + self._match_document( + e, a, is_root=not in_recursive_call) + else: + self.match_result(e, a, in_recursive_call=True) + return + + # account for flexible numerics in element-wise comparison + if (isinstance(expectation, integer_types) or + isinstance(expectation, float)): + self._test_class.assertEqual(expectation, actual) + else: + self._test_class.assertIsInstance(actual, type(expectation)) + self._test_class.assertEqual(expectation, actual) + + def match_event(self, expectation, actual): + event_type, spec = next(iteritems(expectation)) + + # every event type has the commandName field + command_name = spec.get('commandName') + if command_name: + self._test_class.assertEqual(command_name, actual.command_name) + + if event_type == 'commandStartedEvent': + self._test_class.assertIsInstance(actual, CommandStartedEvent) + command = spec.get('command') + database_name = spec.get('databaseName') + if command: + self.match_result(command, actual.command) + if database_name: + self._test_class.assertEqual( + database_name, actual.database_name) + elif event_type == 'commandSucceededEvent': + self._test_class.assertIsInstance(actual, CommandSucceededEvent) + reply = spec.get('reply') + if reply: + self.match_result(reply, actual.reply) + elif event_type == 'commandFailedEvent': + self._test_class.assertIsInstance(actual, CommandFailedEvent) + else: + self._test_class.fail( + 'Unsupported event type %s' % (event_type,)) + + +class UnifiedSpecTestMixinV1(IntegrationTest): + """Mixin class to run test cases from test specification files. + + Assumes that tests conform to the `unified test format + `_. + + Specification of the test suite being currently run is available as + a class attribute ``TEST_SPEC``. + """ + SCHEMA_VERSION = Version.from_string('1.0') + + @staticmethod + def should_run_on(run_on_spec): + if not run_on_spec: + # Always run these tests. + return True + + for req in run_on_spec: + if is_run_on_requirement_satisfied(req): + return True + return False + + def insert_initial_data(self, initial_data): + for collection_data in initial_data: + coll_name = collection_data['collectionName'] + db_name = collection_data['databaseName'] + documents = collection_data['documents'] + + coll = self.client.get_database(db_name).get_collection( + coll_name, write_concern=WriteConcern(w="majority")) + coll.drop() + + if len(documents) > 0: + coll.insert_many(documents) + else: + # ensure collection exists + result = coll.insert_one({}) + coll.delete_one({'_id': result.inserted_id}) + + @classmethod + def setUpClass(cls): + # super call creates internal client cls.client + super(UnifiedSpecTestMixinV1, cls).setUpClass() + + # process file-level runOnRequirements + run_on_spec = cls.TEST_SPEC.get('runOnRequirements', []) + if not cls.should_run_on(run_on_spec): + raise unittest.SkipTest( + '%s runOnRequirements not satisfied' % (cls.__name__,)) + + # add any special-casing for skipping tests here + if client_context.storage_engine == 'mmapv1': + if 'retryable-writes' in cls.TEST_SPEC['description']: + raise unittest.SkipTest( + "MMAPv1 does not support retryWrites=True") + + @classmethod + def tearDownClass(cls): + super(UnifiedSpecTestMixinV1, cls).tearDownClass() + cls.client.close() + + def setUp(self): + super(UnifiedSpecTestMixinV1, self).setUp() + + # process schemaVersion + # note: we check major schema version during class generation + # note: we do this here because we cannot run assertions in setUpClass + version = Version.from_string(self.TEST_SPEC['schemaVersion']) + self.assertLessEqual( + version, self.SCHEMA_VERSION, + 'expected schema version %s or lower, got %s' % ( + self.SCHEMA_VERSION, version)) + + # initialize internals + self.match_evaluator = MatchEvaluatorUtil(self) + + def maybe_skip_test(self, spec): + # add any special-casing for skipping tests here + if client_context.storage_engine == 'mmapv1': + if 'Dirty explicit session is discarded' in spec['description']: + raise unittest.SkipTest( + "MMAPv1 does not support retryWrites=True") + + def process_error(self, exception, spec): + is_error = spec.get('isError') + is_client_error = spec.get('isClientError') + error_contains = spec.get('errorContains') + error_code = spec.get('errorCode') + error_code_name = spec.get('errorCodeName') + error_labels_contain = spec.get('errorLabelsContain') + error_labels_omit = spec.get('errorLabelsOmit') + expect_result = spec.get('expectResult') + + if is_error: + # already satisfied because exception was raised + pass + + if is_client_error: + self.assertNotIsInstance(exception, PyMongoError) + + if error_contains: + if isinstance(exception, BulkWriteError): + errmsg = str(exception.details).lower() + else: + errmsg = str(exception).lower() + self.assertIn(error_contains.lower(), errmsg) + + if error_code: + self.assertEqual( + error_code, exception.details.get('code')) + + if error_code_name: + self.assertEqual( + error_code_name, exception.details.get('codeName')) + + if error_labels_contain: + labels = [err_label for err_label in error_labels_contain + if exception.has_error_label(err_label)] + self.assertEqual(labels, error_labels_contain) + + if error_labels_omit: + for err_label in error_labels_omit: + if exception.has_error_label(err_label): + self.fail("Exception '%s' unexpectedly had label '%s'" % ( + exception, err_label)) + + if expect_result: + if isinstance(exception, BulkWriteError): + result = parse_bulk_write_error_result( + exception) + self.match_evaluator.match_result(expect_result, result) + else: + self.fail("expectResult can only be specified with %s " + "exceptions" % (BulkWriteError,)) + + def __raise_if_unsupported(self, opname, target, *target_types): + if not isinstance(target, target_types): + self.fail('Operation %s not supported for entity ' + 'of type %s' % (opname, type(target))) + + def __entityOperation_createChangeStream(self, target, *args, **kwargs): + if client_context.storage_engine == 'mmapv1': + self.skipTest("MMAPv1 does not support change streams") + self.__raise_if_unsupported( + 'createChangeStream', target, MongoClient, Database, Collection) + return target.watch(*args, **kwargs) + + def _clientOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream( + target, *args, **kwargs) + + def _databaseOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream( + target, *args, **kwargs) + + def _collectionOperation_createChangeStream(self, target, *args, **kwargs): + return self.__entityOperation_createChangeStream( + target, *args, **kwargs) + + def _databaseOperation_runCommand(self, target, *args, **kwargs): + self.__raise_if_unsupported('runCommand', target, Database) + return target.command(*args, **kwargs) + + def __entityOperation_aggregate(self, target, *args, **kwargs): + self.__raise_if_unsupported('aggregate', target, Database, Collection) + return list(target.aggregate(*args, **kwargs)) + + def _databaseOperation_aggregate(self, target, *args, **kwargs): + return self.__entityOperation_aggregate(target, *args, **kwargs) + + def _collectionOperation_aggregate(self, target, *args, **kwargs): + return self.__entityOperation_aggregate(target, *args, **kwargs) + + def _collectionOperation_bulkWrite(self, target, *args, **kwargs): + self.__raise_if_unsupported('bulkWrite', target, Collection) + write_result = target.bulk_write(*args, **kwargs) + return parse_bulk_write_result(write_result) + + def _collectionOperation_find(self, target, *args, **kwargs): + self.__raise_if_unsupported('find', target, Collection) + find_cursor = target.find(*args, **kwargs) + return list(find_cursor) + + def _collectionOperation_findOneAndReplace(self, target, *args, **kwargs): + self.__raise_if_unsupported('findOneAndReplace', target, Collection) + return target.find_one_and_replace(*args, **kwargs) + + def _collectionOperation_findOneAndUpdate(self, target, *args, **kwargs): + self.__raise_if_unsupported('findOneAndReplace', target, Collection) + return target.find_one_and_update(*args, **kwargs) + + def _collectionOperation_insertMany(self, target, *args, **kwargs): + self.__raise_if_unsupported('insertMany', target, Collection) + result = target.insert_many(*args, **kwargs) + return {idx: _id for idx, _id in enumerate(result.inserted_ids)} + + def _collectionOperation_insertOne(self, target, *args, **kwargs): + self.__raise_if_unsupported('insertOne', target, Collection) + result = target.insert_one(*args, **kwargs) + return {'insertedId': result.inserted_id} + + def _sessionOperation_withTransaction(self, target, *args, **kwargs): + if client_context.storage_engine == 'mmapv1': + self.skipTest('MMAPv1 does not support document-level locking') + self.__raise_if_unsupported('withTransaction', target, ClientSession) + return target.with_transaction(*args, **kwargs) + + def _sessionOperation_startTransaction(self, target, *args, **kwargs): + if client_context.storage_engine == 'mmapv1': + self.skipTest('MMAPv1 does not support document-level locking') + self.__raise_if_unsupported('startTransaction', target, ClientSession) + return target.start_transaction(*args, **kwargs) + + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, + *args, **kwargs): + self.__raise_if_unsupported( + 'iterateUntilDocumentOrError', target, ChangeStream) + return next(target) + + def run_entity_operation(self, spec): + target = self.entity_map[spec['object']] + opname = spec['name'] + opargs = spec.get('arguments') + expect_error = spec.get('expectError') + if opargs: + arguments = parse_spec_options(copy.deepcopy(opargs)) + prepare_spec_arguments(spec, arguments, camel_to_snake(opname), + self.entity_map, self.run_operations) + else: + arguments = tuple() + + if isinstance(target, MongoClient): + method_name = '_clientOperation_%s' % (opname,) + elif isinstance(target, Database): + method_name = '_databaseOperation_%s' % (opname,) + elif isinstance(target, Collection): + method_name = '_collectionOperation_%s' % (opname,) + elif isinstance(target, ChangeStream): + method_name = '_changeStreamOperation_%s' % (opname,) + elif isinstance(target, ClientSession): + method_name = '_sessionOperation_%s' % (opname,) + elif isinstance(target, GridFSBucket): + raise NotImplementedError + else: + method_name = 'doesNotExist' + + try: + method = getattr(self, method_name) + except AttributeError: + try: + cmd = getattr(target, camel_to_snake(opname)) + except AttributeError: + self.fail('Unsupported operation %s on entity %s' % ( + opname, target)) + else: + cmd = functools.partial(method, target) + + try: + result = cmd(**dict(arguments)) + except Exception as exc: + if expect_error: + return self.process_error(exc, expect_error) + raise + + if 'expectResult' in spec: + self.match_evaluator.match_result(spec['expectResult'], result) + + save_as_entity = spec.get('saveResultAsEntity') + if save_as_entity: + self.entity_map[save_as_entity] = result + + def __set_fail_point(self, client, command_args): + if not client_context.test_commands_enabled: + self.skipTest('Test commands must be enabled') + + cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on.update(command_args) + client.admin.command(cmd_on) + self.addCleanup( + client.admin.command, + 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + + def _testOperation_failPoint(self, spec): + self.__set_fail_point( + client=self.entity_map[spec['client']], + command_args=spec['failPoint']) + + def _testOperation_targetedFailPoint(self, spec): + session = self.entity_map[spec['session']] + if not session._pinned_address: + self.fail("Cannot use targetedFailPoint operation with unpinned " + "session %s" % (spec['session'],)) + + client = single_client('%s:%s' % session._pinned_address) + self.__set_fail_point( + client=client, command_args=spec['failPoint']) + self.addCleanup(client.close) + + def _testOperation_assertSessionTransactionState(self, spec): + session = self.entity_map[spec['session']] + expected_state = getattr(_TxnState, spec['state'].upper()) + self.assertEqual(expected_state, session._transaction.state) + + def _testOperation_assertSessionPinned(self, spec): + session = self.entity_map[spec['session']] + self.assertIsNotNone(session._pinned_address) + + def _testOperation_assertSessionUnpinned(self, spec): + session = self.entity_map[spec['session']] + self.assertIsNone(session._pinned_address) + + def __get_last_two_command_lsids(self, listener): + cmd_started_events = [] + for event in reversed(listener.results): + if isinstance(event, CommandStartedEvent): + cmd_started_events.append(event) + if len(cmd_started_events) < 2: + self.fail('Needed 2 CommandStartedEvents to compare lsids, ' + 'got %s' % (len(cmd_started_events))) + return tuple([e.command['lsid'] for e in cmd_started_events][:2]) + + def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec['client']) + self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec['client']) + self.assertEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSessionDirty(self, spec): + session = self.entity_map[spec['session']] + self.assertTrue(session._server_session.dirty) + + def _testOperation_assertSessionNotDirty(self, spec): + session = self.entity_map[spec['session']] + return self.assertFalse(session._server_session.dirty) + + def _testOperation_assertCollectionExists(self, spec): + database_name = spec['databaseName'] + collection_name = spec['collectionName'] + collection_name_list = list( + self.client.get_database(database_name).list_collection_names()) + self.assertIn(collection_name, collection_name_list) + + def _testOperation_assertCollectionNotExists(self, spec): + database_name = spec['databaseName'] + collection_name = spec['collectionName'] + collection_name_list = list( + self.client.get_database(database_name).list_collection_names()) + self.assertNotIn(collection_name, collection_name_list) + + def _testOperation_assertIndexExists(self, spec): + collection = self.client[spec['databaseName']][spec['collectionName']] + index_names = [idx['name'] for idx in collection.list_indexes()] + self.assertIn(spec['indexName'], index_names) + + def _testOperation_assertIndexNotExists(self, spec): + collection = self.client[spec['databaseName']][spec['collectionName']] + for index in collection.list_indexes(): + self.assertNotEqual(spec['indexName'], index['name']) + + def run_special_operation(self, spec): + opname = spec['name'] + method_name = '_testOperation_%s' % (opname,) + try: + method = getattr(self, method_name) + except AttributeError: + self.fail('Unsupported special test operation %s' % (opname,)) + else: + method(spec['arguments']) + + def run_operations(self, spec): + for op in spec: + target = op['object'] + if target != 'testRunner': + self.run_entity_operation(op) + else: + self.run_special_operation(op) + + def check_events(self, spec): + for event_spec in spec: + client_name = event_spec['client'] + events = event_spec['events'] + listener = self.entity_map.get_listener_for_client(client_name) + + if len(events) == 0: + self.assertEqual(listener.results, []) + continue + + if len(events) > len(listener.results): + self.fail('Expected to see %s events, got %s' % ( + len(events), len(listener.results))) + + for idx, expected_event in enumerate(events): + self.match_evaluator.match_event( + expected_event, listener.results[idx]) + + def verify_outcome(self, spec): + for collection_data in spec: + coll_name = collection_data['collectionName'] + db_name = collection_data['databaseName'] + expected_documents = collection_data['documents'] + + coll = self.client.get_database(db_name).get_collection( + coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern(level='local')) + + if expected_documents: + sorted_expected_documents = sorted( + expected_documents, key=lambda doc: doc['_id']) + actual_documents = list( + coll.find({}, sort=[('_id', ASCENDING)])) + self.assertListEqual(sorted_expected_documents, + actual_documents) + + def run_scenario(self, spec): + # maybe skip test manually + self.maybe_skip_test(spec) + + # process test-level runOnRequirements + run_on_spec = spec.get('runOnRequirements', []) + if not self.should_run_on(run_on_spec): + raise unittest.SkipTest('runOnRequirements not satisfied') + + # process skipReason + skip_reason = spec.get('skipReason', None) + if skip_reason is not None: + raise unittest.SkipTest('%s' % (skip_reason,)) + + # process createEntities + self.entity_map = EntityMapUtil(self) + self.entity_map.create_entities_from_spec( + self.TEST_SPEC.get('createEntities', [])) + + # process initialData + self.insert_initial_data(self.TEST_SPEC.get('initialData', [])) + + # process operations + self.run_operations(spec['operations']) + + # process expectEvents + self.check_events(spec.get('expectEvents', [])) + + # process outcome + self.verify_outcome(spec.get('outcome', [])) + + +class UnifiedSpecTestMeta(type): + """Metaclass for generating test classes.""" + def __init__(cls, *args, **kwargs): + super(UnifiedSpecTestMeta, cls).__init__(*args, **kwargs) + + def create_test(spec): + def test_case(self): + self.run_scenario(spec) + return test_case + + for test_spec in cls.TEST_SPEC['tests']: + description = test_spec['description'] + test_name = 'test_%s' % (description.strip('. '). + replace(' ', '_').replace('.', '_'),) + test_method = create_test(copy.deepcopy(test_spec)) + test_method.__name__ = str(test_name) + + for fail_pattern in cls.EXPECTED_FAILURES: + if re.search(fail_pattern, description): + test_method = unittest.expectedFailure(test_method) + break + + setattr(cls, test_name, test_method) + + +_ALL_MIXIN_CLASSES = [ + UnifiedSpecTestMixinV1, + # add mixin classes for new schema major versions here +] + + +_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES} + + +def generate_test_classes(test_path, module=__name__, class_name_prefix='', + expected_failures=[], + bypass_test_generation_errors=False): + """Method for generating test classes. Returns a dictionary where keys are + the names of test classes and values are the test class objects.""" + test_klasses = {} + + def test_base_class_factory(test_spec): + """Utility that creates the base class to use for test generation. + This is needed to ensure that cls.TEST_SPEC is appropriately set when + the metaclass __init__ is invoked.""" + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): + TEST_SPEC = test_spec + EXPECTED_FAILURES = expected_failures + return SpecTestBase + + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + fpath = os.path.join(dirpath, filename) + with open(fpath) as scenario_stream: + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = json_util.loads( + scenario_stream.read(), json_options=opts) + + test_type = os.path.splitext(filename)[0] + snake_class_name = 'Test%s_%s_%s' % ( + class_name_prefix, dirname.replace('-', '_'), + test_type.replace('-', '_').replace('.', '_')) + class_name = snake_to_camel(snake_class_name) + + try: + schema_version = Version.from_string( + scenario_def['schemaVersion']) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get( + schema_version[0]) + if mixin_class is None: + raise ValueError( + "test file '%s' has unsupported schemaVersion '%s'" % ( + fpath, schema_version)) + test_klasses[class_name] = type( + class_name, + (mixin_class, test_base_class_factory(scenario_def),), + {'__module__': module}) + except Exception: + if bypass_test_generation_errors: + continue + raise + + return test_klasses diff --git a/test/utils.py b/test/utils.py index 3e24c684f8..cf38e76e6a 100644 --- a/test/utils.py +++ b/test/utils.py @@ -17,6 +17,7 @@ import collections import contextlib +import copy import functools import os import re @@ -31,10 +32,12 @@ from bson import json_util, py3compat from bson.objectid import ObjectId +from bson.py3compat import iteritems, string_type from bson.son import SON from pymongo import (MongoClient, - monitoring, read_preferences) + monitoring, operations, read_preferences) +from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener from pymongo.pool import (_CancellationContext, @@ -563,6 +566,11 @@ def camel_to_snake_args(arguments): return arguments +def snake_to_camel(snake): + # Regex to convert snake_case to lowerCamelCase. + return re.sub(r'_([a-z])', lambda m: m.group(1).upper(), snake) + + def parse_collection_options(opts): if 'readPreference' in opts: opts['read_preference'] = parse_read_preference( @@ -949,3 +957,124 @@ def assertion_context(msg): except AssertionError as exc: msg = '%s (%s)' % (exc, msg) py3compat.reraise(type(exc), msg, sys.exc_info()[2]) + + +def parse_spec_options(opts): + if 'readPreference' in opts: + opts['read_preference'] = parse_read_preference( + opts.pop('readPreference')) + + if 'writeConcern' in opts: + opts['write_concern'] = WriteConcern( + **dict(opts.pop('writeConcern'))) + + if 'readConcern' in opts: + opts['read_concern'] = ReadConcern( + **dict(opts.pop('readConcern'))) + + if 'maxTimeMS' in opts: + opts['max_time_ms'] = opts.pop('maxTimeMS') + + if 'maxCommitTimeMS' in opts: + opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') + + if 'hint' in opts: + hint = opts.pop('hint') + if not isinstance(hint, string_type): + hint = list(iteritems(hint)) + opts['hint'] = hint + + # Properly format 'hint' arguments for the Bulk API tests. + if 'requests' in opts: + reqs = opts.pop('requests') + for req in reqs: + if 'name' in req: + # CRUD v2 format + args = req.pop('arguments', {}) + if 'hint' in args: + hint = args.pop('hint') + if not isinstance(hint, string_type): + hint = list(iteritems(hint)) + args['hint'] = hint + req['arguments'] = args + else: + # Unified test format + bulk_model, spec = next(iteritems(req)) + if 'hint' in spec: + hint = spec.pop('hint') + if not isinstance(hint, string_type): + hint = list(iteritems(hint)) + spec['hint'] = hint + opts['requests'] = reqs + + return dict(opts) + + +def prepare_spec_arguments(spec, arguments, opname, entity_map, + with_txn_callback): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + # PyMongo accepts sort as list of tuples. + if arg_name == "sort": + sort_dict = arguments[arg_name] + arguments[arg_name] = list(iteritems(sort_dict)) + # Named "key" instead not fieldName. + if arg_name == "fieldName": + arguments["key"] = arguments.pop(arg_name) + # Aggregate uses "batchSize", while find uses batch_size. + elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and + opname == "aggregate"): + continue + # Requires boolean returnDocument. + elif arg_name == "returnDocument": + arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) + elif c2s == "requests": + # Parse each request into a bulk write model. + requests = [] + for request in arguments["requests"]: + if 'name' in request: + # CRUD v2 format + bulk_model = camel_to_upper_camel(request["name"]) + bulk_class = getattr(operations, bulk_model) + bulk_arguments = camel_to_snake_args(request["arguments"]) + else: + # Unified test format + bulk_model, spec = next(iteritems(request)) + bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) + bulk_arguments = camel_to_snake_args(spec) + requests.append(bulk_class(**dict(bulk_arguments))) + arguments["requests"] = requests + elif arg_name == "session": + arguments['session'] = entity_map[arguments['session']] + elif (opname in ('command', 'run_admin_command') and + arg_name == 'command'): + # Ensure the first key is the command name. + ordered_command = SON([(spec['command_name'], 1)]) + ordered_command.update(arguments['command']) + arguments['command'] = ordered_command + elif opname == 'open_download_stream' and arg_name == 'id': + arguments['file_id'] = arguments.pop(arg_name) + elif opname != 'find' and c2s == 'max_time_ms': + # find is the only method that accepts snake_case max_time_ms. + # All other methods take kwargs which must use the server's + # camelCase maxTimeMS. See PYTHON-1855. + arguments['maxTimeMS'] = arguments.pop('max_time_ms') + elif opname == 'with_transaction' and arg_name == 'callback': + if 'operations' in arguments[arg_name]: + # CRUD v2 format + callback_ops = arguments[arg_name]['operations'] + else: + # Unified test format + callback_ops = arguments[arg_name] + arguments['callback'] = lambda _: with_txn_callback( + copy.deepcopy(callback_ops)) + elif opname == 'drop_collection' and arg_name == 'collection': + arguments['name_or_collection'] = arguments.pop(arg_name) + elif opname == 'create_collection' and arg_name == 'collection': + arguments['name'] = arguments.pop(arg_name) + elif opname == 'create_index' and arg_name == 'keys': + arguments['keys'] = list(arguments.pop(arg_name).items()) + elif opname == 'drop_index' and arg_name == 'name': + arguments['index_or_name'] = arguments.pop(arg_name) + else: + arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 09798fb809..2e2318591e 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -15,6 +15,7 @@ """Utilities for testing driver specs.""" import copy +import functools import threading @@ -50,7 +51,9 @@ CompareType, CMAPListener, OvertCommandListener, + parse_spec_options, parse_read_preference, + prepare_spec_arguments, rs_client, ServerAndTopologyEventListener, HeartbeatEventListener) @@ -249,44 +252,7 @@ def get_object_name(self, op): @staticmethod def parse_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) - - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) - - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) - - if 'maxTimeMS' in opts: - opts['max_time_ms'] = opts.pop('maxTimeMS') - - if 'maxCommitTimeMS' in opts: - opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') - - if 'hint' in opts: - hint = opts.pop('hint') - if not isinstance(hint, string_type): - hint = list(iteritems(hint)) - opts['hint'] = hint - - # Properly format 'hint' arguments for the Bulk API tests. - if 'requests' in opts: - reqs = opts.pop('requests') - for req in reqs: - args = req.pop('arguments') - if 'hint' in args: - hint = args.pop('hint') - if not isinstance(hint, string_type): - hint = list(iteritems(hint)) - args['hint'] = hint - req['arguments'] = args - opts['requests'] = reqs - - return dict(opts) + return parse_spec_options(opts) def run_operation(self, sessions, collection, operation): original_collection = collection @@ -328,61 +294,11 @@ def run_operation(self, sessions, collection, operation): cmd = getattr(obj, name) - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - # PyMongo accepts sort as list of tuples. - if arg_name == "sort": - sort_dict = arguments[arg_name] - arguments[arg_name] = list(iteritems(sort_dict)) - # Named "key" instead not fieldName. - if arg_name == "fieldName": - arguments["key"] = arguments.pop(arg_name) - # Aggregate uses "batchSize", while find uses batch_size. - elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and - name == "aggregate"): - continue - # Requires boolean returnDocument. - elif arg_name == "returnDocument": - arguments[c2s] = arguments.pop(arg_name) == "After" - elif c2s == "requests": - # Parse each request into a bulk write model. - requests = [] - for request in arguments["requests"]: - bulk_model = camel_to_upper_camel(request["name"]) - bulk_class = getattr(operations, bulk_model) - bulk_arguments = camel_to_snake_args(request["arguments"]) - requests.append(bulk_class(**dict(bulk_arguments))) - arguments["requests"] = requests - elif arg_name == "session": - arguments['session'] = sessions[arguments['session']] - elif (name in ('command', 'run_admin_command') and - arg_name == 'command'): - # Ensure the first key is the command name. - ordered_command = SON([(operation['command_name'], 1)]) - ordered_command.update(arguments['command']) - arguments['command'] = ordered_command - elif name == 'open_download_stream' and arg_name == 'id': - arguments['file_id'] = arguments.pop(arg_name) - elif name != 'find' and c2s == 'max_time_ms': - # find is the only method that accepts snake_case max_time_ms. - # All other methods take kwargs which must use the server's - # camelCase maxTimeMS. See PYTHON-1855. - arguments['maxTimeMS'] = arguments.pop('max_time_ms') - elif name == 'with_transaction' and arg_name == 'callback': - callback_ops = arguments[arg_name]['operations'] - arguments['callback'] = lambda _: self.run_operations( - sessions, original_collection, copy.deepcopy(callback_ops), - in_with_transaction=True) - elif name == 'drop_collection' and arg_name == 'collection': - arguments['name_or_collection'] = arguments.pop(arg_name) - elif name == 'create_collection' and arg_name == 'collection': - arguments['name'] = arguments.pop(arg_name) - elif name == 'create_index' and arg_name == 'keys': - arguments['keys'] = list(arguments.pop(arg_name).items()) - elif name == 'drop_index' and arg_name == 'name': - arguments['index_or_name'] = arguments.pop(arg_name) - else: - arguments[c2s] = arguments.pop(arg_name) + with_txn_callback = functools.partial( + self.run_operations, sessions, original_collection, + in_with_transaction=True) + prepare_spec_arguments(operation, arguments, name, sessions, + with_txn_callback) if name == 'run_on_thread': args = {'sessions': sessions, 'collection': collection} From a9d668c3b927bb141917817db76e66df9da8737f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Jan 2021 12:50:36 -0800 Subject: [PATCH 0261/2111] PYTHON-1878 Add mongodb+srv URIs to Atlas Connectivity tests (#538) Enable xtrace with silent:false to make test failures easier to diagnose. --- .evergreen/config.yml | 25 ++++++++- .evergreen/run-atlas-tests.sh | 41 +++++++++------ test/atlas/test_connection.py | 98 ++++++++++++++++++++++++++--------- 3 files changed, 123 insertions(+), 41 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c37309df7f..dcb2f46886 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -444,8 +444,29 @@ functions: silent: true working_dir: "src" script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} ATLAS_REPL='${atlas_repl}' ATLAS_SHRD='${atlas_shrd}' ATLAS_FREE='${atlas_free}' ATLAS_TLS11='${atlas_tls11}' ATLAS_TLS12='${atlas_tls12}' sh ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh + cat < prepare_atlas_connectivity.sh + export ATLAS_FREE='${atlas_free}' + export ATLAS_REPL='${atlas_repl}' + export ATLAS_SHRD='${atlas_shrd}' + export ATLAS_TLS11='${atlas_tls11}' + export ATLAS_TLS12='${atlas_tls12}' + export ATLAS_SRV_FREE='${atlas_srv_free}' + export ATLAS_SRV_REPL='${atlas_srv_repl}' + export ATLAS_SRV_SHRD='${atlas_srv_shrd}' + export ATLAS_SRV_TLS11='${atlas_srv_tls11}' + export ATLAS_SRV_TLS12='${atlas_srv_tls12}' + EOT + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + # Disable xtrace (just in case it was accidentally set). + set +x + . ./prepare_atlas_connectivity.sh + rm -f ./prepare_atlas_connectivity.sh + + PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh "add aws auth variables to file": - command: shell.exec diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index af861ce350..0927f26977 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -1,7 +1,8 @@ #!/bin/bash -# Don't trace to avoid secrets showing up in the logs +# Exit on error and enable trace. set -o errexit +set -o xtrace export JAVA_HOME=/opt/java/jdk8 @@ -15,27 +16,37 @@ if [ -z "$PYTHON_BINARY" ]; then fi IMPL=$(${PYTHON_BINARY} -c "import platform, sys; sys.stdout.write(platform.python_implementation())") -if [ $IMPL = "Jython" -o $IMPL = "PyPy" ]; then - echo "Using Jython or PyPy" + +if [ $IMPL = "Jython" ]; then + # The venv created by createvirtualenv is incompatible with Jython $PYTHON_BINARY -m virtualenv --never-download --no-wheel atlastest . atlastest/bin/activate - trap "deactivate; rm -rf atlastest" EXIT HUP - pip install certifi - PYTHON=python else - IS_PRE_279=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('1' if sys.version_info < (2, 7, 9) else '0')") + # All other pythons work with createvirtualenv. + . .evergreen/utils.sh + createvirtualenv $PYTHON_BINARY atlastest +fi +trap "deactivate; rm -rf atlastest" EXIT HUP + +if [ $IMPL = "Jython" -o $IMPL = "PyPy" ]; then + echo "Using Jython or PyPy" + python -m pip install certifi +else + IS_PRE_279=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (2, 7, 9) else '0')") if [ $IS_PRE_279 = "1" ]; then echo "Using a Pre-2.7.9 CPython" - $PYTHON_BINARY -m virtualenv --never-download --no-wheel atlastest - . atlastest/bin/activate - trap "deactivate; rm -rf atlastest" EXIT HUP - pip install pyopenssl>=17.2.0 service_identity>18.1.0 - PYTHON=python + python -m pip install pyopenssl>=17.2.0 service_identity>18.1.0 else echo "Using CPython 2.7.9+" - PYTHON=$PYTHON_BINARY fi fi -echo "Running tests" -$PYTHON test/atlas/test_connection.py +echo "Running tests without dnspython" +python test/atlas/test_connection.py + +# dnspython is incompatible with Jython so don't test that combination. +if [ $IMPL != "Jython" ]; then + python -m pip install dnspython + echo "Running tests with dnspython" + MUST_TEST_SRV="1" python test/atlas/test_connection.py +fi diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 813295977a..f4226b7e5b 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -18,20 +18,41 @@ import sys import unittest +from collections import defaultdict + sys.path[0:0] = [""] import pymongo from pymongo.ssl_support import HAS_SNI - -_REPL = os.environ.get("ATLAS_REPL") -_SHRD = os.environ.get("ATLAS_SHRD") -_FREE = os.environ.get("ATLAS_FREE") -_TLS11 = os.environ.get("ATLAS_TLS11") -_TLS12 = os.environ.get("ATLAS_TLS12") - - -def _connect(uri): +try: + import dns + HAS_DNS = True +except ImportError: + HAS_DNS = False + + +URIS = { + "ATLAS_REPL": os.environ.get("ATLAS_REPL"), + "ATLAS_SHRD": os.environ.get("ATLAS_SHRD"), + "ATLAS_FREE": os.environ.get("ATLAS_FREE"), + "ATLAS_TLS11": os.environ.get("ATLAS_TLS11"), + "ATLAS_TLS12": os.environ.get("ATLAS_TLS12"), + "ATLAS_SRV_REPL": os.environ.get("ATLAS_SRV_REPL"), + "ATLAS_SRV_SHRD": os.environ.get("ATLAS_SRV_SHRD"), + "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), + "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), + "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), +} + +# Set this variable to true to run the SRV tests even when dnspython is not +# installed. +MUST_TEST_SRV = os.environ.get("MUST_TEST_SRV") + + +def connect(uri): + if not uri: + raise Exception("Must set env variable to test.") client = pymongo.MongoClient(uri) # No TLS error client.admin.command('ismaster') @@ -40,29 +61,58 @@ def _connect(uri): class TestAtlasConnect(unittest.TestCase): - - @classmethod - def setUpClass(cls): - if not all([_REPL, _SHRD, _FREE]): - raise Exception( - "Must set ATLAS_REPL/SHRD/FREE env variables to test.") + @unittest.skipUnless(HAS_SNI, 'Free tier requires SNI support') + def test_free_tier(self): + connect(URIS['ATLAS_FREE']) def test_replica_set(self): - _connect(_REPL) + connect(URIS['ATLAS_REPL']) def test_sharded_cluster(self): - _connect(_SHRD) - - def test_free_tier(self): - if not HAS_SNI: - raise unittest.SkipTest("Free tier requires SNI support.") - _connect(_FREE) + connect(URIS['ATLAS_SHRD']) def test_tls_11(self): - _connect(_TLS11) + connect(URIS['ATLAS_TLS11']) def test_tls_12(self): - _connect(_TLS12) + connect(URIS['ATLAS_TLS12']) + + def connect_srv(self, uri): + connect(uri) + self.assertIn('mongodb+srv://', uri) + + @unittest.skipUnless(HAS_SNI, 'Free tier requires SNI support') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + def test_srv_free_tier(self): + self.connect_srv(URIS['ATLAS_SRV_FREE']) + + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + def test_srv_replica_set(self): + self.connect_srv(URIS['ATLAS_SRV_REPL']) + + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + def test_srv_sharded_cluster(self): + self.connect_srv(URIS['ATLAS_SRV_SHRD']) + + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + def test_srv_tls_11(self): + self.connect_srv(URIS['ATLAS_SRV_TLS11']) + + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + def test_srv_tls_12(self): + self.connect_srv(URIS['ATLAS_SRV_TLS12']) + + def test_uniqueness(self): + """Ensure that we don't accidentally duplicate the test URIs.""" + uri_to_names = defaultdict(list) + for name, uri in URIS.items(): + if uri: + uri_to_names[uri].append(name) + duplicates = [names for names in uri_to_names.values() + if len(names) > 1] + self.assertFalse(duplicates, 'Error: the following env variables have ' + 'duplicate values: %s' % (duplicates,)) + if __name__ == '__main__': From 86b40c195de61425cd6ad3ecd80f13451521200b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Jan 2021 15:15:37 -0800 Subject: [PATCH 0262/2111] PYTHON-2462 Avoid connection storms: implement pool PAUSED state (#531) Mark server unknown and clear the pool when background connections fail. Eagerly evict threads from the wait queue when pool is paused. Evicted threads will raise the following error: AutoReconnect('localhost:27017: connection pool paused') Introduce PoolClearedEvent and ConnectionPoolListener.pool_ready. CMAP spec test changes: - CMAP unit tests should not use real monitors - Assert that CMAP threads complete all scheduled operations --- doc/changelog.rst | 5 + pymongo/event_loggers.py | 3 + pymongo/mongo_client.py | 2 +- pymongo/monitoring.py | 34 +++++ pymongo/pool.py | 135 ++++++++++++++---- pymongo/server.py | 2 +- pymongo/thread_util.py | 129 ----------------- pymongo/topology.py | 19 ++- test/__init__.py | 15 ++ test/cmap/connection-must-have-id.json | 4 + test/cmap/connection-must-order-ids.json | 4 + test/cmap/pool-checkin-destroy-closed.json | 4 + test/cmap/pool-checkin-destroy-stale.json | 4 + test/cmap/pool-checkin-make-available.json | 4 + test/cmap/pool-checkin.json | 4 + test/cmap/pool-checkout-connection.json | 4 + test/cmap/pool-checkout-error-closed.json | 4 + ...ol-checkout-maxConnecting-is-enforced.json | 6 +- .../pool-checkout-maxConnecting-timeout.json | 6 +- test/cmap/pool-checkout-multiple.json | 4 + test/cmap/pool-checkout-no-idle.json | 4 + test/cmap/pool-checkout-no-stale.json | 7 + ...out-returned-connection-maxConnecting.json | 4 + test/cmap/pool-clear-clears-waitqueue.json | 104 ++++++++++++++ test/cmap/pool-clear-min-size.json | 67 +++++++++ test/cmap/pool-clear-paused.json | 32 +++++ test/cmap/pool-clear-ready.json | 69 +++++++++ test/cmap/pool-close-destroy-conns.json | 4 + test/cmap/pool-create-max-size.json | 6 +- test/cmap/pool-create-min-size-error.json | 62 ++++++++ test/cmap/pool-create-min-size.json | 11 ++ test/cmap/pool-ready-ready.json | 39 +++++ test/cmap/pool-ready.json | 57 ++++++++ test/cmap/wait-queue-timeout.json | 6 +- .../isMaster-command-error.json | 8 -- .../isMaster-network-error.json | 8 -- .../isMaster-timeout.json | 8 -- .../minPoolSize-error.json | 101 +++++++++++++ test/pymongo_mocks.py | 21 +++ test/test_client.py | 16 ++- test/test_cmap.py | 117 ++++++++++++--- test/test_discovery_and_monitoring.py | 66 ++++++--- test/test_heartbeat_monitoring.py | 4 +- test/test_pooling.py | 7 +- test/test_streaming_protocol.py | 2 +- test/test_topology.py | 25 +--- test/utils.py | 48 +++++-- test/utils_selection_tests.py | 20 +-- 48 files changed, 1023 insertions(+), 292 deletions(-) delete mode 100644 pymongo/thread_util.py create mode 100644 test/cmap/pool-clear-clears-waitqueue.json create mode 100644 test/cmap/pool-clear-min-size.json create mode 100644 test/cmap/pool-clear-paused.json create mode 100644 test/cmap/pool-clear-ready.json create mode 100644 test/cmap/pool-create-min-size-error.json create mode 100644 test/cmap/pool-ready-ready.json create mode 100644 test/cmap/pool-ready.json create mode 100644 test/discovery_and_monitoring_integration/minPoolSize-error.json diff --git a/doc/changelog.rst b/doc/changelog.rst index a71b81ff27..5567089b4c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,11 @@ Changelog Changes in Version 4.0 ---------------------- +Breaking Changes in 4.0 +``````````````````````` + +- Removed :mod:`~pymongo.thread_util`. + Issues Resolved ............... diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 5019ea5489..7d5501c372 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -171,6 +171,9 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): def pool_created(self, event): logging.info("[pool {0.address}] pool created".format(event)) + def pool_ready(self, event): + logging.info("[pool {0.address}] pool ready".format(event)) + def pool_cleared(self, event): logging.info("[pool {0.address}] pool cleared".format(event)) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6e1e24481a..2948b33216 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -737,7 +737,7 @@ def target(): executor = periodic_executor.PeriodicExecutor( interval=common.KILL_CURSOR_FREQUENCY, - min_interval=0.5, + min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, name="pymongo_kill_cursors_thread") diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 462b22b334..e5b3d88c41 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -255,6 +255,18 @@ def pool_created(self, event): """ raise NotImplementedError + def pool_ready(self, event): + """Abstract method to handle a :class:`PoolReadyEvent`. + + Emitted when a Connection Pool is marked ready. + + :Parameters: + - `event`: An instance of :class:`PoolReadyEvent`. + + .. versionadded:: 4.0 + """ + raise NotImplementedError + def pool_cleared(self, event): """Abstract method to handle a `PoolClearedEvent`. @@ -692,6 +704,18 @@ def __repr__(self): self.__class__.__name__, self.address, self.__options) +class PoolReadyEvent(_PoolEvent): + """Published when a Connection Pool is marked ready. + + :Parameters: + - `address`: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 4.0 + """ + __slots__ = () + + class PoolClearedEvent(_PoolEvent): """Published when a Connection Pool is cleared. @@ -1475,6 +1499,16 @@ def publish_pool_created(self, address, options): except Exception: _handle_exception() + def publish_pool_ready(self, address): + """Publish a :class:`PoolReadyEvent` to all pool listeners. + """ + event = PoolReadyEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_ready(event) + except Exception: + _handle_exception() + def publish_pool_cleared(self, address): """Publish a :class:`PoolClearedEvent` to all pool listeners. """ diff --git a/pymongo/pool.py b/pymongo/pool.py index 20164bb8aa..d39c9ed8f4 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -30,7 +30,7 @@ from bson import DEFAULT_CODEC_OPTIONS from bson.py3compat import imap, itervalues, _unicode, PY3 from bson.son import SON -from pymongo import auth, helpers, thread_util, __version__ +from pymongo import auth, helpers, __version__ from pymongo.client_session import _validate_session_write_concern from pymongo.common import (MAX_BSON_SIZE, MAX_CONNECTING, @@ -46,6 +46,7 @@ CertificateError, ConnectionFailure, ConfigurationError, + ExceededMaxWaiters, InvalidOperation, DocumentTooLarge, NetworkTimeout, @@ -309,7 +310,8 @@ class PoolOptions(object): '__wait_queue_timeout', '__wait_queue_multiple', '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', '__event_listeners', '__appname', '__driver', '__metadata', - '__compression_settings', '__max_connecting') + '__compression_settings', '__max_connecting', + '__pause_enabled') def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, @@ -318,7 +320,8 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, wait_queue_multiple=None, ssl_context=None, ssl_match_hostname=True, socket_keepalive=True, event_listeners=None, appname=None, driver=None, - compression_settings=None, max_connecting=MAX_CONNECTING): + compression_settings=None, max_connecting=MAX_CONNECTING, + pause_enabled=True): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size @@ -335,6 +338,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__driver = driver self.__compression_settings = compression_settings self.__max_connecting = max_connecting + self.__pause_enabled = pause_enabled self.__metadata = copy.deepcopy(_METADATA) if appname: self.__metadata['application'] = {'name': appname} @@ -406,6 +410,10 @@ def max_connecting(self): """ return self.__max_connecting + @property + def pause_enabled(self): + return self.__pause_enabled + @property def max_idle_time_seconds(self): """The maximum number of seconds that a connection can remain @@ -1058,6 +1066,12 @@ class _PoolClosedError(PyMongoError): pass +class PoolState(object): + PAUSED = 1 + READY = 2 + CLOSED = 3 + + # Do *not* explicitly inherit from object or Jython won't call __del__ # http://bugs.jython.org/issue1057 class Pool: @@ -1068,6 +1082,10 @@ def __init__(self, address, options, handshake=True): - `options`: a PoolOptions instance - `handshake`: whether to call ismaster for each new SocketInfo """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY # Check a socket's health with socket_closed() every once in a while. # Can override for testing: 0 to always check, None to never check. self._check_interval_seconds = 1 @@ -1079,7 +1097,6 @@ def __init__(self, address, options, handshake=True): self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 - self.closed = False # Track whether the sockets in this pool are writeable or not. self.is_writable = None @@ -1098,13 +1115,23 @@ def __init__(self, address, options, handshake=True): if (self.opts.wait_queue_multiple is None or self.opts.max_pool_size is None): - max_waiters = None + max_waiters = float('inf') else: max_waiters = ( self.opts.max_pool_size * self.opts.wait_queue_multiple) - - self._socket_semaphore = thread_util.create_semaphore( - self.opts.max_pool_size, max_waiters) + # The first portion of the wait queue. + # Enforces: maxPoolSize and waitQueueMultiple + # Also used for: clearing the wait queue + self.size_cond = threading.Condition(self.lock) + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if self.max_pool_size is None: + self.max_pool_size = float('inf') + self.waiters = 0 + self.max_waiters = max_waiters + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue self._max_connecting_cond = threading.Condition(self.lock) self._max_connecting = self.opts.max_connecting self._pending = 0 @@ -1114,10 +1141,23 @@ def __init__(self, address, options, handshake=True): # Similar to active_sockets but includes threads in the wait queue. self.operation_count = 0 - def _reset(self, close): - with self.lock: + def ready(self): + old_state, self.state = self.state, PoolState.READY + if old_state != PoolState.READY: + if self.enabled_for_cmap: + self.opts.event_listeners.publish_pool_ready(self.address) + + @property + def closed(self): + return self.state == PoolState.CLOSED + + def _reset(self, close, pause=True): + old_state = self.state + with self.size_cond: if self.closed: return + if self.opts.pause_enabled and pause: + old_state, self.state = self.state, PoolState.PAUSED self.generation += 1 newpid = os.getpid() if self.pid != newpid: @@ -1126,7 +1166,10 @@ def _reset(self, close): self.operation_count = 0 sockets, self.sockets = self.sockets, collections.deque() if close: - self.closed = True + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() listeners = self.opts.event_listeners # CMAP spec says that close() MUST close sockets before publishing the @@ -1138,7 +1181,7 @@ def _reset(self, close): if self.enabled_for_cmap: listeners.publish_pool_closed(self.address) else: - if self.enabled_for_cmap: + if old_state != PoolState.PAUSED and self.enabled_for_cmap: listeners.publish_pool_cleared(self.address) for sock_info in sockets: sock_info.close_socket(ConnectionClosedReason.STALE) @@ -1155,6 +1198,9 @@ def update_is_writable(self, is_writable): def reset(self): self._reset(close=False) + def reset_without_pause(self): + self._reset(close=False, pause=False) + def close(self): self._reset(close=True) @@ -1164,6 +1210,9 @@ def remove_stale_sockets(self, reference_generation, all_credentials): `generation` at the point in time this operation was requested on the pool. """ + if self.state != PoolState.READY: + return + if self.opts.max_idle_time_seconds is not None: with self.lock: while (self.sockets and @@ -1172,15 +1221,14 @@ def remove_stale_sockets(self, reference_generation, all_credentials): sock_info.close_socket(ConnectionClosedReason.IDLE) while True: - with self.lock: + with self.size_cond: + # There are enough sockets in the pool. if (len(self.sockets) + self.active_sockets >= self.opts.min_pool_size): - # There are enough sockets in the pool. return - - # We must acquire the semaphore to respect max_pool_size. - if not self._socket_semaphore.acquire(False): - return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 incremented = False try: with self._max_connecting_cond: @@ -1204,7 +1252,10 @@ def remove_stale_sockets(self, reference_generation, all_credentials): with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() - self._socket_semaphore.release() + + with self.size_cond: + self.requests -= 1 + self.size_cond.notify() def connect(self, all_credentials=None): """Connect to Mongo and return a new SocketInfo. @@ -1289,6 +1340,14 @@ def get_socket(self, all_credentials, checkout=False): if not checkout: self.return_socket(sock_info) + def _raise_if_not_ready(self, emit_event): + if self.state != PoolState.READY: + if self.enabled_for_cmap and emit_event: + self.opts.event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR) + _raise_connection_failure( + self.address, AutoReconnect('connection pool paused')) + def _get_socket(self, all_credentials): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. @@ -1313,9 +1372,26 @@ def _get_socket(self, all_credentials): deadline = _time() + self.opts.wait_queue_timeout else: deadline = None - if not self._socket_semaphore.acquire( - True, self.opts.wait_queue_timeout): - self._raise_wait_queue_timeout() + + with self.size_cond: + self._raise_if_not_ready(emit_event=True) + if self.waiters >= self.max_waiters: + raise ExceededMaxWaiters( + 'exceeded max waiters: %s threads already waiting' % ( + self.waiters)) + self.waiters += 1 + try: + while not (self.requests < self.max_pool_size): + if not _cond_wait(self.size_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout() + self._raise_if_not_ready(emit_event=True) + finally: + self.waiters -= 1 + self.requests += 1 # We've now acquired the semaphore and must release it on error. sock_info = None @@ -1330,6 +1406,7 @@ def _get_socket(self, all_credentials): # CMAP: we MUST wait for either maxConnecting OR for a socket # to be checked back into the pool. with self._max_connecting_cond: + self._raise_if_not_ready(emit_event=False) while not (self.sockets or self._pending < self._max_connecting): if not _cond_wait(self._max_connecting_cond, deadline): @@ -1340,6 +1417,7 @@ def _get_socket(self, all_credentials): self._max_connecting_cond.notify() emitted_event = True self._raise_wait_queue_timeout() + self._raise_if_not_ready(emit_event=False) try: sock_info = self.sockets.popleft() @@ -1361,11 +1439,11 @@ def _get_socket(self, all_credentials): if sock_info: # We checked out a socket but authentication failed. sock_info.close_socket(ConnectionClosedReason.ERROR) - self._socket_semaphore.release() - - if incremented: - with self.lock: + with self.size_cond: + self.requests -= 1 + if incremented: self.active_sockets -= 1 + self.size_cond.notify() if self.enabled_for_cmap and not emitted_event: self.opts.event_listeners.publish_connection_check_out_failed( @@ -1401,10 +1479,11 @@ def return_socket(self, sock_info): # Notify any threads waiting to create a connection. self._max_connecting_cond.notify() - self._socket_semaphore.release() - with self.lock: + with self.size_cond: + self.requests -= 1 self.active_sockets -= 1 self.operation_count -= 1 + self.size_cond.notify() def _perished(self, sock_info): """Return True and close the connection if it is "perished". diff --git a/pymongo/server.py b/pymongo/server.py index eb145d409a..c5ddd9bea2 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -61,7 +61,7 @@ def close(self): self._events.put((self._listener.publish_server_closed, (self._description.address, self._topology_id))) self._monitor.close() - self._pool.reset() + self._pool.reset_without_pause() def request_check(self): """Check the server's state soon.""" diff --git a/pymongo/thread_util.py b/pymongo/thread_util.py deleted file mode 100644 index 3dac4e25fa..0000000000 --- a/pymongo/thread_util.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright 2012-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for multi-threading support.""" - -import threading -try: - from time import monotonic as _time -except ImportError: - from time import time as _time - -from pymongo.monotonic import time as _time -from pymongo.errors import ExceededMaxWaiters - - -### Begin backport from CPython 3.2 for timeout support for Semaphore.acquire -class Semaphore: - - # After Tim Peters' semaphore class, but not quite the same (no maximum) - - def __init__(self, value=1): - if value < 0: - raise ValueError("semaphore initial value must be >= 0") - self._cond = threading.Condition(threading.Lock()) - self._value = value - - def acquire(self, blocking=True, timeout=None): - if not blocking and timeout is not None: - raise ValueError("can't specify timeout for non-blocking acquire") - rc = False - endtime = None - with self._cond: - while self._value == 0: - if not blocking: - break - if timeout is not None: - if endtime is None: - endtime = _time() + timeout - else: - timeout = endtime - _time() - if timeout <= 0: - break - self._cond.wait(timeout) - else: - self._value = self._value - 1 - rc = True - return rc - - __enter__ = acquire - - def release(self): - with self._cond: - self._value = self._value + 1 - self._cond.notify() - - def __exit__(self, t, v, tb): - self.release() - - @property - def counter(self): - return self._value - - -class BoundedSemaphore(Semaphore): - """Semaphore that checks that # releases is <= # acquires""" - def __init__(self, value=1): - Semaphore.__init__(self, value) - self._initial_value = value - - def release(self): - if self._value >= self._initial_value: - raise ValueError("Semaphore released too many times") - return Semaphore.release(self) -### End backport from CPython 3.2 - - -class DummySemaphore(object): - def __init__(self, value=None): - pass - - def acquire(self, blocking=True, timeout=None): - return True - - def release(self): - pass - - -class MaxWaitersBoundedSemaphore(object): - def __init__(self, semaphore_class, value=1, max_waiters=1): - self.waiter_semaphore = semaphore_class(max_waiters) - self.semaphore = semaphore_class(value) - - def acquire(self, blocking=True, timeout=None): - if not self.waiter_semaphore.acquire(False): - raise ExceededMaxWaiters() - try: - return self.semaphore.acquire(blocking, timeout) - finally: - self.waiter_semaphore.release() - - def __getattr__(self, name): - return getattr(self.semaphore, name) - - -class MaxWaitersBoundedSemaphoreThread(MaxWaitersBoundedSemaphore): - def __init__(self, value=1, max_waiters=1): - MaxWaitersBoundedSemaphore.__init__( - self, BoundedSemaphore, value, max_waiters) - - -def create_semaphore(max_size, max_waiters): - if max_size is None: - return DummySemaphore() - else: - if max_waiters is None: - return BoundedSemaphore(max_size) - else: - return MaxWaitersBoundedSemaphoreThread(max_size, max_waiters) diff --git a/pymongo/topology.py b/pymongo/topology.py index 20b8bbc082..db00280c5d 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -39,6 +39,7 @@ NetworkTimeout, NotMasterError, OperationFailure, + PyMongoError, ServerSelectionTimeoutError) from pymongo.monitor import SrvMonitor from pymongo.monotonic import time as _time @@ -282,6 +283,12 @@ def _process_change(self, server_description, reset_pool=False): # This is a stale isMaster response. Ignore it. return + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_server_type_known: + server = self._servers.get(server_description.address) + if server: + server.pool.ready() + suppress_event = ((self._publish_server or self._publish_tp) and sd_old == server_description) if self._publish_server and not suppress_event: @@ -444,7 +451,13 @@ def update_pool(self, all_credentials): servers.append((server, server._pool.generation)) for server, generation in servers: - server._pool.remove_stale_sockets(generation, all_credentials) + pool = server._pool + try: + pool.remove_stale_sockets(generation, all_credentials) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False) + self.handle_error(pool.address, ctx) + raise def close(self): """Clear pools and terminate monitors. Topology reopens on demand.""" @@ -686,7 +699,9 @@ def _create_pool_for_monitor(self, address): ssl_match_hostname=options.ssl_match_hostname, event_listeners=options.event_listeners, appname=options.appname, - driver=options.driver) + driver=options.driver, + pause_enabled=False, + ) return self._settings.pool_class(address, monitor_pool_options, handshake=False) diff --git a/test/__init__.py b/test/__init__.py index d24601551b..d9d362288f 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -687,6 +687,21 @@ def require_sessions(self, func): "Sessions not supported", func=func) + def supports_retryable_writes(self): + if self.storage_engine == 'mmapv1': + return False + if not self.sessions_enabled: + return False + if self.version.at_least(3, 6): + return self.is_mongos or self.is_rs + return False + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require(self.supports_retryable_writes, + "This server does not support retryable writes", + func=func) + def supports_transactions(self): if self.storage_engine == 'mmapv1': return False diff --git a/test/cmap/connection-must-have-id.json b/test/cmap/connection-must-have-id.json index 7ed6790228..f2d6fb95e9 100644 --- a/test/cmap/connection-must-have-id.json +++ b/test/cmap/connection-must-have-id.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must have an ID number associated with it", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" }, @@ -42,6 +45,7 @@ ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionPoolClosed", "ConnectionReady" ] diff --git a/test/cmap/connection-must-order-ids.json b/test/cmap/connection-must-order-ids.json index 9b839e8f06..b7c2751dd7 100644 --- a/test/cmap/connection-must-order-ids.json +++ b/test/cmap/connection-must-order-ids.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must have IDs assigned in order of creation", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" }, @@ -42,6 +45,7 @@ ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionPoolClosed", "ConnectionReady" ] diff --git a/test/cmap/pool-checkin-destroy-closed.json b/test/cmap/pool-checkin-destroy-closed.json index a73afbf752..55d0c03752 100644 --- a/test/cmap/pool-checkin-destroy-closed.json +++ b/test/cmap/pool-checkin-destroy-closed.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must destroy checked in connection if pool has been closed", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -39,6 +42,7 @@ ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkin-destroy-stale.json b/test/cmap/pool-checkin-destroy-stale.json index 600c052071..6ffb8f53d1 100644 --- a/test/cmap/pool-checkin-destroy-stale.json +++ b/test/cmap/pool-checkin-destroy-stale.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must destroy checked in connection if it is stale", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -39,6 +42,7 @@ ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkin-make-available.json b/test/cmap/pool-checkin-make-available.json index 015928c50d..41c522ae67 100644 --- a/test/cmap/pool-checkin-make-available.json +++ b/test/cmap/pool-checkin-make-available.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must make valid checked in connection available", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -34,6 +37,7 @@ ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkin.json b/test/cmap/pool-checkin.json index 7073895ad2..3b40cec6f4 100644 --- a/test/cmap/pool-checkin.json +++ b/test/cmap/pool-checkin.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must have a method of allowing the driver to check in a connection", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -21,6 +24,7 @@ ], "ignore": [ "ConnectionPoolCreated", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionClosed", diff --git a/test/cmap/pool-checkout-connection.json b/test/cmap/pool-checkout-connection.json index 4d39b15688..d89b342605 100644 --- a/test/cmap/pool-checkout-connection.json +++ b/test/cmap/pool-checkout-connection.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must be able to check out a connection", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" } @@ -29,6 +32,7 @@ } ], "ignore": [ + "ConnectionPoolReady", "ConnectionPoolCreated" ] } diff --git a/test/cmap/pool-checkout-error-closed.json b/test/cmap/pool-checkout-error-closed.json index 3823c23a78..ee2926e1c0 100644 --- a/test/cmap/pool-checkout-error-closed.json +++ b/test/cmap/pool-checkout-error-closed.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must throw error if checkOut is called on a closed pool", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn1" @@ -57,6 +60,7 @@ } ], "ignore": [ + "ConnectionPoolReady", "ConnectionCreated", "ConnectionReady", "ConnectionClosed" diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-maxConnecting-is-enforced.json index 4b67b73add..80797398ff 100644 --- a/test/cmap/pool-checkout-maxConnecting-is-enforced.json +++ b/test/cmap/pool-checkout-maxConnecting-is-enforced.json @@ -26,6 +26,9 @@ "waitQueueTimeoutMS": 5000 }, "operations": [ + { + "name": "ready" + }, { "name": "start", "target": "thread1" @@ -98,6 +101,7 @@ "ConnectionCheckedIn", "ConnectionCheckedOut", "ConnectionClosed", - "ConnectionPoolCreated" + "ConnectionPoolCreated", + "ConnectionPoolReady" ] } diff --git a/test/cmap/pool-checkout-maxConnecting-timeout.json b/test/cmap/pool-checkout-maxConnecting-timeout.json index ef71216efc..9d97a6178f 100644 --- a/test/cmap/pool-checkout-maxConnecting-timeout.json +++ b/test/cmap/pool-checkout-maxConnecting-timeout.json @@ -26,6 +26,9 @@ "waitQueueTimeoutMS": 50 }, "operations": [ + { + "name": "ready" + }, { "name": "start", "target": "thread1" @@ -93,6 +96,7 @@ "ConnectionCheckedIn", "ConnectionCheckedOut", "ConnectionClosed", - "ConnectionPoolCreated" + "ConnectionPoolCreated", + "ConnectionPoolReady" ] } diff --git a/test/cmap/pool-checkout-multiple.json b/test/cmap/pool-checkout-multiple.json index fee0d076cf..07a4eda629 100644 --- a/test/cmap/pool-checkout-multiple.json +++ b/test/cmap/pool-checkout-multiple.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must be able to check out multiple connections at the same time", "operations": [ + { + "name": "ready" + }, { "name": "start", "target": "thread1" @@ -59,6 +62,7 @@ ], "ignore": [ "ConnectionCreated", + "ConnectionPoolReady", "ConnectionReady", "ConnectionPoolCreated", "ConnectionCheckOutStarted" diff --git a/test/cmap/pool-checkout-no-idle.json b/test/cmap/pool-checkout-no-idle.json index 74325d655d..7e6563228d 100644 --- a/test/cmap/pool-checkout-no-idle.json +++ b/test/cmap/pool-checkout-no-idle.json @@ -6,6 +6,9 @@ "maxIdleTimeMS": 10 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -52,6 +55,7 @@ ], "ignore": [ "ConnectionReady", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionCheckOutStarted" ] diff --git a/test/cmap/pool-checkout-no-stale.json b/test/cmap/pool-checkout-no-stale.json index 67ee507fe8..fcf20621ee 100644 --- a/test/cmap/pool-checkout-no-stale.json +++ b/test/cmap/pool-checkout-no-stale.json @@ -3,6 +3,9 @@ "style": "unit", "description": "must destroy and must not check out a stale connection if found while iterating available connections", "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn" @@ -14,6 +17,9 @@ { "name": "clear" }, + { + "name": "ready" + }, { "name": "checkOut" } @@ -52,6 +58,7 @@ ], "ignore": [ "ConnectionReady", + "ConnectionPoolReady", "ConnectionCreated", "ConnectionCheckOutStarted" ] diff --git a/test/cmap/pool-checkout-returned-connection-maxConnecting.json b/test/cmap/pool-checkout-returned-connection-maxConnecting.json index 308d640f0e..7ff59ab392 100644 --- a/test/cmap/pool-checkout-returned-connection-maxConnecting.json +++ b/test/cmap/pool-checkout-returned-connection-maxConnecting.json @@ -26,6 +26,9 @@ "waitQueueTimeoutMS": 5000 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn0" @@ -111,6 +114,7 @@ } ], "ignore": [ + "ConnectionPoolReady", "ConnectionClosed", "ConnectionReady", "ConnectionPoolCreated", diff --git a/test/cmap/pool-clear-clears-waitqueue.json b/test/cmap/pool-clear-clears-waitqueue.json new file mode 100644 index 0000000000..8df1bfdfb0 --- /dev/null +++ b/test/cmap/pool-clear-clears-waitqueue.json @@ -0,0 +1,104 @@ +{ + "version": 1, + "style": "unit", + "description": "clearing pool clears the WaitQueue", + "poolOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 30000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "start", + "target": "thread3" + }, + { + "name": "checkOut", + "thread": "thread3" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutStarted", + "count": 4 + }, + { + "name": "clear" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 3, + "timeout": 1000 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionPoolCreated", + "ConnectionCreated", + "ConnectionReady", + "ConnectionCheckedIn", + "ConnectionClosed" + ] +} diff --git a/test/cmap/pool-clear-min-size.json b/test/cmap/pool-clear-min-size.json new file mode 100644 index 0000000000..00c477c620 --- /dev/null +++ b/test/cmap/pool-clear-min-size.json @@ -0,0 +1,67 @@ +{ + "version": 1, + "style": "unit", + "description": "pool clear halts background minPoolSize establishments", + "poolOptions": { + "minPoolSize": 1 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 1 + }, + { + "name": "clear" + }, + { + "name": "wait", + "ms": 200 + }, + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionClosed" + ] +} diff --git a/test/cmap/pool-clear-paused.json b/test/cmap/pool-clear-paused.json new file mode 100644 index 0000000000..847f08d849 --- /dev/null +++ b/test/cmap/pool-clear-paused.json @@ -0,0 +1,32 @@ +{ + "version": 1, + "style": "unit", + "description": "clearing a paused pool emits no events", + "operations": [ + { + "name": "clear" + }, + { + "name": "ready" + }, + { + "name": "clear" + }, + { + "name": "clear" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-clear-ready.json b/test/cmap/pool-clear-ready.json new file mode 100644 index 0000000000..800c3545ad --- /dev/null +++ b/test/cmap/pool-clear-ready.json @@ -0,0 +1,69 @@ +{ + "version": 1, + "style": "unit", + "description": "after clear, cannot check out connections until pool ready", + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "clear" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42, + "connectionId": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "address": 42, + "reason": "connectionError" + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionReady", + "ConnectionCheckOutStarted", + "ConnectionCreated" + ] +} diff --git a/test/cmap/pool-close-destroy-conns.json b/test/cmap/pool-close-destroy-conns.json index e1fb9d0783..a3d58a2136 100644 --- a/test/cmap/pool-close-destroy-conns.json +++ b/test/cmap/pool-close-destroy-conns.json @@ -3,6 +3,9 @@ "style": "unit", "description": "When a pool is closed, it MUST first destroy all available connections in that pool", "operations": [ + { + "name": "ready" + }, { "name": "checkOut" }, @@ -40,6 +43,7 @@ ], "ignore": [ "ConnectionCreated", + "ConnectionPoolReady", "ConnectionReady", "ConnectionPoolCreated", "ConnectionCheckOutStarted", diff --git a/test/cmap/pool-create-max-size.json b/test/cmap/pool-create-max-size.json index b585d0daec..e3a1fa8eda 100644 --- a/test/cmap/pool-create-max-size.json +++ b/test/cmap/pool-create-max-size.json @@ -6,6 +6,9 @@ "maxPoolSize": 3 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn1" @@ -124,6 +127,7 @@ } ], "ignore": [ - "ConnectionReady" + "ConnectionReady", + "ConnectionPoolReady" ] } diff --git a/test/cmap/pool-create-min-size-error.json b/test/cmap/pool-create-min-size-error.json new file mode 100644 index 0000000000..4b655123d0 --- /dev/null +++ b/test/cmap/pool-create-min-size-error.json @@ -0,0 +1,62 @@ +{ + "version": 1, + "style": "integration", + "description": "error during minPoolSize population clears pool", + "runOn": [ + { + "minServerVersion": "4.2.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 50 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "closeConnection": true + } + }, + "poolOptions": { + "minPoolSize": 1 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "waitForEvent", + "event": "ConnectionClosed", + "count": 1 + }, + { + "name": "wait", + "ms": 200 + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-create-min-size.json b/test/cmap/pool-create-min-size.json index 4fdc42f4eb..43118f7841 100644 --- a/test/cmap/pool-create-min-size.json +++ b/test/cmap/pool-create-min-size.json @@ -6,6 +6,13 @@ "minPoolSize": 3 }, "operations": [ + { + "name": "wait", + "ms": 200 + }, + { + "name": "ready" + }, { "name": "waitForEvent", "event": "ConnectionCreated", @@ -26,6 +33,10 @@ "address": 42, "options": 42 }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, { "type": "ConnectionCreated", "connectionId": 42, diff --git a/test/cmap/pool-ready-ready.json b/test/cmap/pool-ready-ready.json new file mode 100644 index 0000000000..25dfa9c97c --- /dev/null +++ b/test/cmap/pool-ready-ready.json @@ -0,0 +1,39 @@ +{ + "version": 1, + "style": "unit", + "description": "readying a ready pool emits no events", + "operations": [ + { + "name": "ready" + }, + { + "name": "ready" + }, + { + "name": "ready" + }, + { + "name": "clear" + }, + { + "name": "ready" + } + ], + "events": [ + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-ready.json b/test/cmap/pool-ready.json new file mode 100644 index 0000000000..29ce7326cf --- /dev/null +++ b/test/cmap/pool-ready.json @@ -0,0 +1,57 @@ +{ + "version": 1, + "style": "unit", + "description": "pool starts as cleared and becomes ready", + "operations": [ + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + }, + { + "name": "ready" + }, + { + "name": "checkOut" + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCheckOutFailed", + "reason": "connectionError", + "address": 42 + }, + { + "type": "ConnectionPoolReady", + "address": 42 + }, + { + "type": "ConnectionCheckOutStarted", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolCreated", + "ConnectionReady" + ] +} diff --git a/test/cmap/wait-queue-timeout.json b/test/cmap/wait-queue-timeout.json index ee7cf27955..993209a353 100644 --- a/test/cmap/wait-queue-timeout.json +++ b/test/cmap/wait-queue-timeout.json @@ -7,6 +7,9 @@ "waitQueueTimeoutMS": 20 }, "operations": [ + { + "name": "ready" + }, { "name": "checkOut", "label": "conn0" @@ -66,6 +69,7 @@ "ConnectionCreated", "ConnectionReady", "ConnectionClosed", - "ConnectionPoolCreated" + "ConnectionPoolCreated", + "ConnectionPoolReady" ] } diff --git a/test/discovery_and_monitoring_integration/isMaster-command-error.json b/test/discovery_and_monitoring_integration/isMaster-command-error.json index 4bdfd9adff..0a735dc334 100644 --- a/test/discovery_and_monitoring_integration/isMaster-command-error.json +++ b/test/discovery_and_monitoring_integration/isMaster-command-error.json @@ -39,14 +39,6 @@ "count": 1 } }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, { "name": "insertMany", "object": "collection", diff --git a/test/discovery_and_monitoring_integration/isMaster-network-error.json b/test/discovery_and_monitoring_integration/isMaster-network-error.json index eb1f3eac19..2385a41646 100644 --- a/test/discovery_and_monitoring_integration/isMaster-network-error.json +++ b/test/discovery_and_monitoring_integration/isMaster-network-error.json @@ -38,14 +38,6 @@ "count": 1 } }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, { "name": "insertMany", "object": "collection", diff --git a/test/discovery_and_monitoring_integration/isMaster-timeout.json b/test/discovery_and_monitoring_integration/isMaster-timeout.json index eeee612be8..50ad482778 100644 --- a/test/discovery_and_monitoring_integration/isMaster-timeout.json +++ b/test/discovery_and_monitoring_integration/isMaster-timeout.json @@ -39,14 +39,6 @@ "count": 1 } }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, { "name": "insertMany", "object": "collection", diff --git a/test/discovery_and_monitoring_integration/minPoolSize-error.json b/test/discovery_and_monitoring_integration/minPoolSize-error.json new file mode 100644 index 0000000000..9605ee4f5f --- /dev/null +++ b/test/discovery_and_monitoring_integration/minPoolSize-error.json @@ -0,0 +1,101 @@ +{ + "runOn": [ + { + "minServerVersion": "4.9" + } + ], + "database_name": "sdam-tests", + "collection_name": "sdam-minPoolSize-error", + "data": [], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + }, + "clientOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000, + "directConnection": true + }, + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolReadyEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": {} + } + }, + "error": true + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + } + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": 1 + } + }, + "error": false + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolReadyEvent", + "count": 2 + } + } + ] + } + ] +} diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 388f89178f..7520f2bfb1 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -57,6 +57,27 @@ def get_socket(self, all_credentials, checkout=False): yield sock_info +class DummyMonitor(object): + def __init__(self, server_description, topology, pool, topology_settings): + self._server_description = server_description + self.opened = False + + def cancel_check(self): + pass + + def join(self): + pass + + def open(self): + self.opened = True + + def request_check(self): + pass + + def close(self): + self.opened = False + + class MockMonitor(Monitor): def __init__( self, diff --git a/test/test_client.py b/test/test_client.py index 6f2d587ed8..337602883a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -62,6 +62,7 @@ writable_server_selector) from pymongo.server_type import SERVER_TYPE from pymongo.settings import TOPOLOGY_TYPE +from pymongo.topology import _ErrorContext from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.write_concern import WriteConcern from test import (client_context, @@ -1090,7 +1091,8 @@ def test_waitQueueMultiple(self): client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2) pool = get_pool(client) self.assertEqual(pool.opts.wait_queue_multiple, 2) - self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6) + self.assertEqual(pool.max_waiters, 6) + self.assertEqual(pool.max_pool_size, 3) def test_socketKeepAlive(self): for socketKeepAlive in [True, False]: @@ -1341,7 +1343,7 @@ def test_exhaust_network_error(self): self.assertTrue(sock_info.closed) # The semaphore was decremented despite the error. - self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) + self.assertEqual(0, pool.requests) @client_context.require_auth def test_auth_network_error(self): @@ -1546,7 +1548,9 @@ def stop(self): def run(self): while self.running: - self.pool.reset() + exc = AutoReconnect('mock pool error') + ctx = _ErrorContext(exc, 0, pool.generation, False) + client._topology.handle_error(pool.address, ctx) time.sleep(0.001) t = ResetPoolThread(pool) @@ -1680,7 +1684,7 @@ def test_exhaust_query_server_error(self): # The socket was checked in and the semaphore was decremented. self.assertIn(sock_info, pool.sockets) - self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) + self.assertEqual(0, pool.requests) def test_exhaust_getmore_server_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked @@ -1739,7 +1743,7 @@ def test_exhaust_query_network_error(self): # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) - self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) + self.assertEqual(0, pool.requests) def test_exhaust_getmore_network_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked @@ -1766,7 +1770,7 @@ def test_exhaust_getmore_network_error(self): # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) - self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) + self.assertEqual(0, pool.requests) class TestClientLazyConnect(IntegrationTest): diff --git a/test/test_cmap.py b/test/test_cmap.py index ef40d07bf1..bf5328fcda 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -16,6 +16,7 @@ import os import sys +import threading import time sys.path[0:0] = [""] @@ -35,23 +36,27 @@ ConnectionCreatedEvent, ConnectionReadyEvent, PoolCreatedEvent, + PoolReadyEvent, PoolClearedEvent, PoolClosedEvent) from pymongo.read_preferences import ReadPreference -from pymongo.pool import _PoolClosedError +from pymongo.pool import _PoolClosedError, PoolState -from test import (IntegrationTest, +from test import (client_knobs, + IntegrationTest, unittest) from test.utils import (camel_to_snake, client_context, CMAPListener, get_pool, get_pools, + OvertCommandListener, rs_or_single_client, single_client, TestCreator, wait_until) from test.utils_spec_runner import SpecRunnerThread +from test.pymongo_mocks import DummyMonitor OBJECT_TYPES = { @@ -64,6 +69,7 @@ 'ConnectionReady': ConnectionReadyEvent, 'ConnectionCheckOutStarted': ConnectionCheckOutStartedEvent, 'ConnectionPoolCreated': PoolCreatedEvent, + 'ConnectionPoolReady': PoolReadyEvent, 'ConnectionPoolCleared': PoolClearedEvent, 'ConnectionPoolClosed': PoolClosedEvent, # Error types. @@ -98,13 +104,15 @@ def wait_for_thread(self, op): thread.join() if thread.exc: raise thread.exc + self.assertFalse(thread.ops) def wait_for_event(self, op): """Run the 'waitForEvent' operation.""" event = OBJECT_TYPES[op['event']] count = op['count'] + timeout = op.get('timeout', 10000) / 1000.0 wait_until(lambda: self.listener.event_count(event) >= count, - 'find %s %s event(s)' % (count, event)) + 'find %s %s event(s)' % (count, event), timeout=timeout) def check_out(self, op): """Run the 'checkOut' operation.""" @@ -121,6 +129,10 @@ def check_in(self, op): sock_info = self.labels[label] self.pool.return_socket(sock_info) + def ready(self, op): + """Run the 'ready' operation.""" + self.pool.ready() + def clear(self, op): """Run the 'clear' operation.""" self.pool.reset() @@ -213,9 +225,13 @@ def run_scenario(self, scenario_def, test): opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] - client = single_client(**opts) + opts['_monitor_class'] = DummyMonitor + with client_knobs(kill_cursor_frequency=.05, + min_heartbeat_interval=.05): + client = single_client(**opts) self.addCleanup(client.close) - self.pool = get_pool(client) + # self.pool = get_pools(client)[0] + self.pool = list(client._get_topology()._servers.values())[0].pool # Map of target names to Thread objects. self.targets = dict() @@ -342,13 +358,14 @@ def mock_connect(*args, **kwargs): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) - self.assertIsInstance(listener.events[1], - ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) self.assertIsInstance(listener.events[2], + ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) - self.assertIsInstance(listener.events[3], PoolClearedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) - failed_event = listener.events[2] + failed_event = listener.events[3] self.assertEqual( failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) @@ -363,17 +380,16 @@ def test_5_check_out_fails_auth_error(self): client.admin.command('isMaster') self.assertIsInstance(listener.events[0], PoolCreatedEvent) - self.assertIsInstance(listener.events[1], + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[2], ConnectionCreatedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) # Error happens here. - self.assertIsInstance(listener.events[3], ConnectionClosedEvent) - self.assertIsInstance(listener.events[4], + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) - - failed_event = listener.events[4] - self.assertEqual( - failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + self.assertEqual(listener.events[5].reason, + ConnectionCheckOutFailedReason.CONN_ERROR) # # Extra non-spec tests @@ -398,6 +414,73 @@ def test_events_repr(self): self.assertRepr(PoolClearedEvent(host)) self.assertRepr(PoolClosedEvent(host)) + def test_close_leaves_pool_unpaused(self): + # Needed until we implement PYTHON-2463. This test is related to + # test_threads.TestThreads.test_client_disconnect + listener = CMAPListener() + client = single_client(event_listeners=[listener]) + client.admin.command('ping') + pool = get_pool(client) + client.close() + self.assertEqual(1, listener.event_count(PoolClearedEvent)) + self.assertEqual(PoolState.READY, pool.state) + # Checking out a connection should succeed + with pool.get_socket({}): + pass + + @client_context.require_version_max(4, 3) # Remove after SERVER-53624. + @client_context.require_retryable_writes + @client_context.require_failCommand_fail_point + def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = rs_or_single_client( + maxPoolSize=1, + heartbeatFrequencyMS=500, + event_listeners=[cmap_listener, cmd_listener]) + self.addCleanup(client.close) + threads = [InsertThread(client.pymongo_test.test) for _ in range(3)] + fail_command = { + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['insert'], + 'blockConnection': True, + 'blockTimeMS': 1000, + 'errorCode': 91 + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # The two threads in the wait queue fail the initial connection check + # out attempt and then succeed on retry. + self.assertEqual( + 2, cmap_listener.event_count(ConnectionCheckOutFailedEvent)) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + self.assertEqual(4, len(cmd_listener.results['started'])) + self.assertEqual(3, len(cmd_listener.results['succeeded'])) + self.assertEqual(1, len(cmd_listener.results['failed'])) + + +class InsertThread(threading.Thread): + def __init__(self, collection): + super(InsertThread, self).__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.insert_one({}) + self.passed = True + def create_test(scenario_def, test, name): def run_scenario(self): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index c676647e66..601c0b5156 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -39,14 +39,18 @@ from test import unittest, IntegrationTest from test.utils import (assertion_context, cdecimal_patched, + CMAPListener, client_context, Barrier, get_pool, + HeartbeatEventListener, server_name_to_type, rs_or_single_client, + single_client, TestCreator, wait_until) from test.utils_spec_runner import SpecRunner, SpecRunnerThread +from test.pymongo_mocks import DummyMonitor # Location of JSON test specifications. @@ -54,27 +58,7 @@ os.path.dirname(os.path.realpath(__file__)), 'discovery_and_monitoring') -class MockMonitor(object): - def __init__(self, server_description, topology, pool, topology_settings): - self._server_description = server_description - - def cancel_check(self): - pass - - def open(self): - pass - - def close(self): - pass - - def join(self): - pass - - def request_check(self): - pass - - -def create_mock_topology(uri, monitor_class=MockMonitor): +def create_mock_topology(uri, monitor_class=DummyMonitor): parsed_uri = parse_uri(uri) replica_set_name = None direct_connection = None @@ -318,6 +302,46 @@ def insert_command(i): client.admin.command('ping') +class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): + pass + + +class TestPoolManagement(IntegrationTest): + @client_context.require_failCommand_appName + def test_pool_unpause(self): + # This test implements the prose test "Connection Pool Management" + listener = CMAPHeartbeatListener() + client = single_client(appName="SDAMPoolManagementTest", + heartbeatFrequencyMS=500, + event_listeners=[listener]) + self.addCleanup(client.close) + # Assert that ConnectionPoolReadyEvent occurs after the first + # ServerHeartbeatSucceededEvent. + listener.wait_for_event(monitoring.PoolReadyEvent, 1) + pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] + hb_succeeded = listener.events_by_type( + monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater( + listener.events.index(pool_ready), + listener.events.index(hb_succeeded)) + + listener.reset() + fail_ismaster = { + 'mode': {'times': 2}, + 'data': { + 'failCommands': ['isMaster'], + 'errorCode': 1234, + 'appName': 'SDAMPoolManagementTest', + }, + } + with self.fail_point(fail_ismaster): + listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + listener.wait_for_event(monitoring.PoolClearedEvent, 1) + listener.wait_for_event( + monitoring.ServerHeartbeatSucceededEvent, 1) + listener.wait_for_event(monitoring.PoolReadyEvent, 1) + + class TestIntegration(SpecRunner): # Location of JSON test specifications. TEST_PATH = os.path.join( diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 61a0afc15c..3929412f36 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -51,12 +51,12 @@ def _check_with_socket(self, *args, **kwargs): # monitor thread may run multiple times during the execution # of this test. wait_until( - lambda: len(listener.results) >= expected_len, + lambda: len(listener.events) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. - for expected, actual in zip(expected_results, listener.results): + for expected, actual in zip(expected_results, listener.events): self.assertEqual(expected, actual.__class__.__name__) self.assertEqual(actual.connection_id, diff --git a/test/test_pooling.py b/test/test_pooling.py index d103991600..024996a03c 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -176,7 +176,9 @@ def create_pool( pool_options = client_context.client._topology_settings.pool_options kwargs['ssl_context'] = pool_options.ssl_context kwargs['ssl_match_hostname'] = pool_options.ssl_match_hostname - return Pool(pair, PoolOptions(*args, **kwargs)) + pool = Pool(pair, PoolOptions(*args, **kwargs)) + pool.ready() + return pool class TestPooling(_TestPoolingBase): @@ -483,7 +485,7 @@ def f(): joinall(threads) self.assertEqual(nthreads, self.n_passed) self.assertTrue(len(cx_pool.sockets) > 1) - self.assertEqual(max_pool_size, cx_pool._socket_semaphore.counter) + self.assertEqual(0, cx_pool.requests) def test_max_pool_size_none(self): c = rs_or_single_client(maxPoolSize=None) @@ -529,6 +531,7 @@ def test_max_pool_size_with_connection_failure(self): connect_timeout=1, socket_timeout=1, wait_queue_timeout=1)) + test_pool.ready() # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 8b815a84eb..769dc6c674 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -203,7 +203,7 @@ def hb_failed(event): self.assertTrue(hb_failed_events[0].awaited) # Depending on thread scheduling, the failed heartbeat could occur on # the second or third check. - events = [type(e) for e in hb_listener.results[:4]] + events = [type(e) for e in hb_listener.events[:4]] if events == [monitoring.ServerHeartbeatStartedEvent, monitoring.ServerHeartbeatSucceededEvent, monitoring.ServerHeartbeatStartedEvent, diff --git a/test/test_topology.py b/test/test_topology.py index c593c06382..1b3bfe5ab3 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -37,24 +37,7 @@ from pymongo.settings import TopologySettings from test import client_knobs, unittest from test.utils import MockPool, wait_until - - -class MockMonitor(object): - def __init__(self, server_description, topology, pool, topology_settings): - self._server_description = server_description - self.opened = False - - def cancel_check(self): - pass - - def open(self): - self.opened = True - - def request_check(self): - pass - - def close(self): - self.opened = False +from test.pymongo_mocks import DummyMonitor class SetNameDiscoverySettings(TopologySettings): @@ -68,7 +51,7 @@ def get_topology_type(self): def create_mock_topology( seeds=None, replica_set_name=None, - monitor_class=MockMonitor): + monitor_class=DummyMonitor): partitioned_seeds = list(imap(common.partition_node, seeds or ['a'])) topology_settings = TopologySettings( partitioned_seeds, @@ -501,7 +484,7 @@ def test_discover_set_name_from_primary(self): topology_settings = SetNameDiscoverySettings( seeds=[address], pool_class=MockPool, - monitor_class=MockMonitor) + monitor_class=DummyMonitor) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) @@ -537,7 +520,7 @@ def test_discover_set_name_from_secondary(self): topology_settings = SetNameDiscoverySettings( seeds=[address], pool_class=MockPool, - monitor_class=MockMonitor) + monitor_class=DummyMonitor) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) diff --git a/test/utils.py b/test/utils.py index cf38e76e6a..ff301540c1 100644 --- a/test/utils.py +++ b/test/utils.py @@ -63,7 +63,7 @@ IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) -class CMAPListener(ConnectionPoolListener): +class BaseListener(object): def __init__(self): self.events = [] @@ -74,9 +74,26 @@ def add_event(self, event): self.events.append(event) def event_count(self, event_type): - return len([event for event in self.events[:] - if isinstance(event, event_type)]) + return len(self.events_by_type(event_type)) + def events_by_type(self, event_type): + """Return the matching events by event class. + + event_type can be a single class or a tuple of classes. + """ + return self.matching(lambda e: isinstance(e, event_type)) + + def matching(self, matcher): + """Return the matching events.""" + return [event for event in self.events[:] if matcher(event)] + + def wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + wait_until(lambda: self.event_count(event) >= count, + 'find %s %s event(s)' % (count, event)) + + +class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): self.add_event(event) @@ -101,6 +118,9 @@ def connection_checked_in(self, event): def pool_created(self, event): self.add_event(event) + def pool_ready(self, event): + self.add_event(event) + def pool_cleared(self, event): self.add_event(event) @@ -199,25 +219,17 @@ class ServerAndTopologyEventListener(ServerEventListener, """Listens to Server and Topology events.""" -class HeartbeatEventListener(monitoring.ServerHeartbeatListener): +class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): """Listens to only server heartbeat events.""" - def __init__(self): - self.results = [] - def started(self, event): - self.results.append(event) + self.add_event(event) def succeeded(self, event): - self.results.append(event) + self.add_event(event) def failed(self, event): - self.results.append(event) - - def matching(self, matcher): - """Return the matching events.""" - results = self.results[:] - return [event for event in results if matcher(event)] + self.add_event(event) class MockSocketInfo(object): @@ -252,9 +264,15 @@ def _reset(self): with self._lock: self.generation += 1 + def ready(self): + pass + def reset(self): self._reset() + def reset_without_pause(self): + self._reset() + def close(self): self._reset() diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 0d4edb085c..21a4332f42 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -30,23 +30,7 @@ from pymongo.topology import Topology from test import unittest from test.utils import MockPool, parse_read_preference - - -class MockMonitor(object): - def __init__(self, server_description, topology, pool, topology_settings): - pass - - def cancel_check(self): - pass - - def open(self): - pass - - def request_check(self): - pass - - def close(self): - pass +from test.pymongo_mocks import DummyMonitor def get_addresses(server_list): @@ -122,7 +106,7 @@ def get_topology_type_name(scenario_def): def get_topology_settings_dict(**kwargs): settings = dict( - monitor_class=MockMonitor, + monitor_class=DummyMonitor, heartbeat_frequency=HEARTBEAT_FREQUENCY, pool_class=MockPool ) From c96c5a94533365356fd574a52ff4ded4621c9f67 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 11 Jan 2021 18:12:13 -0800 Subject: [PATCH 0263/2111] PYTHON-2388 Begin PyMongo 4.0 migration guide (#540) --- doc/changelog.rst | 10 ++++++- doc/index.rst | 4 +++ doc/migrate-to-pymongo4.rst | 53 +++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 doc/migrate-to-pymongo4.rst diff --git a/doc/changelog.rst b/doc/changelog.rst index 5567089b4c..c9f4ef4dca 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,9 +4,17 @@ Changelog Changes in Version 4.0 ---------------------- +.. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. + +PyMongo 4.0 brings a number of improvements as well as some backward breaking +changes. For example, all APIs deprecated in PyMongo 3.X have been removed. +Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` +before upgrading from PyMongo 3.x. + Breaking Changes in 4.0 -``````````````````````` +....................... +- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. - Removed :mod:`~pymongo.thread_util`. Issues Resolved diff --git a/doc/index.rst b/doc/index.rst index 05265578ae..9d7ac2b89b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -31,6 +31,9 @@ everything you need to know to use **PyMongo**. :doc:`faq` Some questions that come up often. +:doc:`migrate-to-pymongo4` + A PyMongo 3.x to 4.x migration guide. + :doc:`migrate-to-pymongo3` A PyMongo 2.x to 3.x migration guide. @@ -119,5 +122,6 @@ Indices and tables contributors changelog python3 + migrate-to-pymongo4 migrate-to-pymongo3 developer/index diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst new file mode 100644 index 0000000000..418d475f87 --- /dev/null +++ b/doc/migrate-to-pymongo4.rst @@ -0,0 +1,53 @@ +PyMongo 4 Migration Guide +========================= + +.. contents:: + +.. testsetup:: + + from pymongo import MongoClient, ReadPreference + client = MongoClient() + collection = client.my_database.my_collection + +PyMongo 4.0 brings a number of improvements as well as some backward breaking +changes. This guide provides a roadmap for migrating an existing application +from PyMongo 3.x to 4.x or writing libraries that will work with both +PyMongo 3.x and 4.x. + +PyMongo 3 +--------- + +The first step in any successful migration involves upgrading to, or +requiring, at least that latest version of PyMongo 3.x. If your project has a +requirements.txt file, add the line "pymongo >= 3.12, < 4.0" until you have +completely migrated to PyMongo 4. Most of the key new methods and options from +PyMongo 4.0 are backported in PyMongo 3.12 making migration much easier. + +.. note:: Users of PyMongo 2.X who wish to upgrade to 4.x must first upgrade + to PyMongo 3.x by following the :doc:`migrate-to-pymongo3`. + +Python 3.6+ +----------- + +PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to +upgrade to 4.x must first upgrade to Python 3.6+. Users upgrading from +Python 2 should consult the :doc:`python3`. + +Enable Deprecation Warnings +--------------------------- + +:exc:`DeprecationWarning` is raised by most methods removed in PyMongo 4.0. +Make sure you enable runtime warnings to see where deprecated functions and +methods are being used in your application:: + + python -Wd + +Warnings can also be changed to errors:: + + python -Wd -Werror + +.. note:: Not all deprecated features raise :exc:`DeprecationWarning` when + used. See `Removed features with no migration path`_. + +Removed features with no migration path +--------------------------------------- From ac2f506ba262b6360275fba96bb4fa1d43ae50f7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 11 Jan 2021 18:16:00 -0800 Subject: [PATCH 0264/2111] PYTHON-2453 Add MongoDB Versioned API (#536) Add pymongo.server_api.ServerApi and the MongoClient server_api option. Support Unified Test Format version 1.1 (serverParameters in runOnRequirements) Skip dropRole tests due to SERVER-53499. --- .evergreen/config.yml | 26 + .evergreen/run-tests.sh | 6 + doc/api/pymongo/index.rst | 1 + doc/api/pymongo/server_api.rst | 11 + doc/changelog.rst | 5 + pymongo/client_options.py | 4 +- pymongo/client_session.py | 9 + pymongo/common.py | 11 + pymongo/database.py | 6 + pymongo/message.py | 2 + pymongo/mongo_client.py | 11 + pymongo/pool.py | 22 +- pymongo/server_api.py | 129 ++ pymongo/topology.py | 1 + test/__init__.py | 22 +- test/test_bulk.py | 1 + test/test_pooling.py | 1 + test/test_versioned_api.py | 127 ++ test/unified_format.py | 25 +- .../crud-api-version-1-strict.json | 1073 +++++++++++++++++ test/versioned-api/crud-api-version-1.json | 1067 ++++++++++++++++ ...ommand-helper-no-api-version-declared.json | 117 ++ .../test-commands-deprecation-errors.json | 73 ++ .../test-commands-strict-mode.json | 74 ++ test/versioned-api/transaction-handling.json | 232 ++++ 25 files changed, 3047 insertions(+), 9 deletions(-) create mode 100644 doc/api/pymongo/server_api.rst create mode 100644 pymongo/server_api.py create mode 100644 test/test_versioned_api.py create mode 100644 test/versioned-api/crud-api-version-1-strict.json create mode 100644 test/versioned-api/crud-api-version-1.json create mode 100644 test/versioned-api/runcommand-helper-no-api-version-declared.json create mode 100644 test/versioned-api/test-commands-deprecation-errors.json create mode 100644 test/versioned-api/test-commands-strict-mode.json create mode 100644 test/versioned-api/transaction-handling.json diff --git a/.evergreen/config.yml b/.evergreen/config.yml index dcb2f46886..e4c99ca806 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -290,6 +290,7 @@ functions: STORAGE_ENGINE=${STORAGE_ENGINE} \ DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ + REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update @@ -425,6 +426,7 @@ functions: AUTH=${AUTH} \ SSL=${SSL} \ DATA_LAKE=${DATA_LAKE} \ + MONGODB_API_VERSION=${MONGODB_API_VERSION} \ sh ${PROJECT_DIRECTORY}/.evergreen/run-tests.sh "run enterprise auth tests": @@ -2023,6 +2025,19 @@ axes: variables: SETDEFAULTENCODING: "cp1251" + - id: requireApiVersion + display_name: "requireApiVersion" + values: + - id: "requireApiVersion1" + display_name: "requireApiVersion1" + tags: [ "requireApiVersion_tag" ] + variables: + # REQUIRE_API_VERSION is set to make drivers-evergreen-tools + # start a cluster with the requireApiVersion parameter. + REQUIRE_API_VERSION: "1" + # MONGODB_API_VERSION is the apiVersion to use in the test suite. + MONGODB_API_VERSION: "1" + buildvariants: - matrix_name: "tests-all" matrix_spec: @@ -2605,6 +2620,17 @@ buildvariants: tasks: - name: atlas-data-lake-tests +- matrix_name: "versioned-api-tests" + matrix_spec: + platform: ubuntu-16.04 + python-version: ["2.7", "3.9"] + auth: "auth" + requireApiVersion: "*" + display_name: "requireApiVersion ${python-version}" + tasks: + # Versioned API was introduced in MongoDB 4.7 + - "test-latest-standalone" + - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 50357d49df..d1ecfdcda2 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -26,6 +26,7 @@ GREEN_FRAMEWORK=${GREEN_FRAMEWORK:-} C_EXTENSIONS=${C_EXTENSIONS:-} COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} +MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} SETDEFAULTENCODING=${SETDEFAULTENCODING:-} @@ -35,6 +36,11 @@ if [ -n "$COMPRESSORS" ]; then export COMPRESSORS=$COMPRESSORS fi +if [ -n "$MONGODB_API_VERSION" ]; then + export MONGODB_API_VERSION=$MONGODB_API_VERSION +fi + + export JAVA_HOME=/opt/java/jdk8 if [ "$AUTH" != "noauth" ]; then diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 9fc90a1e1b..fb48c94ea6 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -54,6 +54,7 @@ Sub-modules: read_preferences results son_manipulator + server_api uri_parser write_concern event_loggers diff --git a/doc/api/pymongo/server_api.rst b/doc/api/pymongo/server_api.rst new file mode 100644 index 0000000000..d961d07f1a --- /dev/null +++ b/doc/api/pymongo/server_api.rst @@ -0,0 +1,11 @@ +:mod:`server_api` -- Support for MongoDB Versioned API +====================================================== + +.. automodule:: pymongo.server_api + :synopsis: Support for MongoDB Versioned API + + .. autoclass:: pymongo.server_api.ServerApi + :members: + + .. autoclass:: pymongo.server_api.ServerApiVersion + :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index c9f4ef4dca..91d78e5909 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -17,6 +17,11 @@ Breaking Changes in 4.0 - Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. - Removed :mod:`~pymongo.thread_util`. +Notable improvements +.................... + +- Support for MongoDB Versioned API, see :class:`~pymongo.server_api.ServerApi`. + Issues Resolved ............... diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 9611bb0bda..8892526c70 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -125,6 +125,7 @@ def _parse_pool_options(options): event_listeners = options.get('event_listeners') appname = options.get('appname') driver = options.get('driver') + server_api = options.get('server_api') compression_settings = CompressionSettings( options.get('compressors', []), options.get('zlibcompressionlevel', -1)) @@ -138,7 +139,8 @@ def _parse_pool_options(options): _EventListeners(event_listeners), appname, driver, - compression_settings) + compression_settings, + server_api=server_api) class ClientOptions(object): diff --git a/pymongo/client_session.py b/pymongo/client_session.py index dec2f4f918..c88f3f2776 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -297,6 +297,9 @@ def __init__(self, opts): def active(self): return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) + def starting(self): + return self.state == _TxnState.STARTING + def reset(self): self.state = _TxnState.NONE self.sharded = False @@ -762,6 +765,12 @@ def in_transaction(self): """ return self._transaction.active() + @property + def _starting_transaction(self): + """True if this session is starting a multi-statement transaction. + """ + return self._transaction.starting() + @property def _pinned_address(self): """The mongos address this transaction was created on.""" diff --git a/pymongo/common.py b/pymongo/common.py index 81555ef392..a4d91b7b1c 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -27,6 +27,7 @@ from pymongo.compression_support import (validate_compressors, validate_zlib_compression_level) from pymongo.driver_info import DriverInfo +from pymongo.server_api import ServerApi from pymongo.encryption_options import validate_auto_encryption_opts_or_none from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners @@ -528,6 +529,15 @@ def validate_driver_or_none(option, value): return value +def validate_server_api_or_none(option, value): + """Validate the server_api keyword arg.""" + if value is None: + return value + if not isinstance(value, ServerApi): + raise TypeError("%s must be an instance of ServerApi" % (option,)) + return value + + def validate_is_callable_or_none(option, value): """Validates that 'value' is a callable.""" if value is None: @@ -643,6 +653,7 @@ def validate_tzinfo(dummy, value): NONSPEC_OPTIONS_VALIDATOR_MAP = { 'connect': validate_boolean_or_string, 'driver': validate_driver_or_none, + 'server_api': validate_server_api_or_none, 'fsync': validate_boolean_or_string, 'minpoolsize': validate_non_negative_integer, 'socketkeepalive': validate_boolean_or_string, diff --git a/pymongo/database.py b/pymongo/database.py index a31e918ac3..42055ec816 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -704,6 +704,12 @@ def command(self, command, value=1, check=True, .. note:: :meth:`command` does **not** apply any custom TypeDecoders when decoding the command response. + .. note:: If this client has been configured to use MongoDB Versioned + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automactically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + .. versionchanged:: 3.6 Added ``session`` parameter. diff --git a/pymongo/message.py b/pymongo/message.py index e04d4c3b94..5df1c87074 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -307,6 +307,7 @@ def as_command(self, sock_info): self.name = 'explain' cmd = SON([('explain', cmd)]) session = self.session + sock_info.add_server_api(cmd, session) if session: session._apply_to(cmd, False, self.read_preference) # Explain does not support readConcern. @@ -892,6 +893,7 @@ def __init__(self, database_name, command, sock_info, operation_id, self.compress = True if sock_info.compression_context else False self.op_type = op_type self.codec = codec + sock_info.add_server_api(command, session) def _batch_command(self, docs): namespace = self.db_name + '.$cmd' diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 2948b33216..bf439d1ecd 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -498,8 +498,19 @@ def __init__( and automatically decrypt results. See :ref:`automatic-client-side-encryption` for an example. + | **Versioned API options:** + | (If not set explicitly, Versioned API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Versioned API. See :ref:`versioned-api-ref` for + details. + .. mongodoc:: connections + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + .. versionchanged:: 3.11 Added the following keyword arguments and URI options: diff --git a/pymongo/pool.py b/pymongo/pool.py index d39c9ed8f4..9c0c4066d6 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -60,6 +60,7 @@ from pymongo.network import (command, receive_message) from pymongo.read_preferences import ReadPreference +from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker # Always use our backport so we always have support for IP address matching @@ -311,7 +312,7 @@ class PoolOptions(object): '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', - '__pause_enabled') + '__pause_enabled', '__server_api') def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, @@ -321,8 +322,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, ssl_match_hostname=True, socket_keepalive=True, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, - pause_enabled=True): - + pause_enabled=True, server_api=None): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds @@ -339,6 +339,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__compression_settings = compression_settings self.__max_connecting = max_connecting self.__pause_enabled = pause_enabled + self.__server_api = server_api self.__metadata = copy.deepcopy(_METADATA) if appname: self.__metadata['application'] = {'name': appname} @@ -495,6 +496,12 @@ def metadata(self): """ return self.__metadata.copy() + @property + def server_api(self): + """A pymongo.server_api.ServerApi or None. + """ + return self.__server_api + def _negotiate_creds(all_credentials): """Return one credential that needs mechanism negotiation, if any. @@ -705,6 +712,7 @@ def command(self, dbname, spec, slave_ok=False, raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use a collation.') + self.add_server_api(spec, session) if session: session._apply_to(spec, retryable_write, read_preference) self.send_cluster_time(spec, session, client) @@ -894,6 +902,14 @@ def send_cluster_time(self, command, session, client): if self.max_wire_version >= 6 and client: client._send_cluster_time(command, session) + def add_server_api(self, command, session): + """Add server_api parameters.""" + if (session and session.in_transaction and + not session._starting_transaction): + return + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + def update_last_checkin_time(self): self.last_checkin_time = _time() diff --git a/pymongo/server_api.py b/pymongo/server_api.py new file mode 100644 index 0000000000..cf739659e3 --- /dev/null +++ b/pymongo/server_api.py @@ -0,0 +1,129 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for MongoDB Versioned API. + +.. _versioned-api-ref: + +MongoDB Versioned API +===================== + +To configure MongoDB Versioned API, pass the ``server_api`` keyword option to +:class:`~pymongo.mongo_client.MongoClient`:: + + from pymongo.mongo_client import MongoClient + from pymongo.server_api import ServerApi + + client = MongoClient(server_api=ServerApi('1')) + +Note that Versioned API requires MongoDB >=5.0. + +Strict Mode +``````````` + +When ``strict`` mode is configured, commands that are not supported in the +given :attr:`ServerApi.version` will fail. For example:: + + >>> client = MongoClient(server_api=ServerApi('1', strict=True)) + >>> client.test.command('count', 'test') + Traceback (most recent call last): + ... + pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError' + +Classes +======= +""" + + +class ServerApiVersion: + """An enum that defines values for :attr:`ServerApi.version`. + + .. versionadded:: 3.12 + """ + + V1 = "1" + """Server API version "1".""" + + +class ServerApi(object): + """MongoDB Versioned API.""" + def __init__(self, version, strict=None, deprecation_errors=None): + """Options to configure MongoDB Versioned API. + + :Parameters: + - `version`: The API version string. Must be one of the values in + :class:`ServerApiVersion`. + - `strict` (optional): Set to ``True`` to enable API strict mode. + Defaults to ``None`` which means "use the server's default". + - `deprecation_errors` (optional): Set to ``True`` to enable + deprecation errors. Defaults to ``None`` which means "use the + server's default". + + .. versionadded:: 3.12 + """ + if version != ServerApiVersion.V1: + raise ValueError("Unknown ServerApi version: %s" % (version,)) + if strict is not None and not isinstance(strict, bool): + raise TypeError( + "Wrong type for ServerApi strict, value must be an instance " + "of bool, not %s" % (type(strict),)) + if (deprecation_errors is not None and + not isinstance(deprecation_errors, bool)): + raise TypeError( + "Wrong type for ServerApi deprecation_errors, value must be " + "an instance of bool, not %s" % (type(deprecation_errors),)) + self._version = version + self._strict = strict + self._deprecation_errors = deprecation_errors + + @property + def version(self): + """The API version setting. + + This value is sent to the server in the "apiVersion" field. + """ + return self._version + + @property + def strict(self): + """The API strict mode setting. + + When set, this value is sent to the server in the "apiStrict" field. + """ + return self._strict + + @property + def deprecation_errors(self): + """The API deprecation errors setting. + + When set, this value is sent to the server in the + "apiDeprecationErrors" field. + """ + return self._deprecation_errors + + +def _add_to_command(cmd, server_api): + """Internal helper which adds API versioning options to a command. + + :Parameters: + - `cmd`: The command. + - `server_api` (optional): A :class:`ServerApi` or ``None``. + """ + if not server_api: + return + cmd['apiVersion'] = server_api.version + if server_api.strict is not None: + cmd['apiStrict'] = server_api.strict + if server_api.deprecation_errors is not None: + cmd['apiDeprecationErrors'] = server_api.deprecation_errors diff --git a/pymongo/topology.py b/pymongo/topology.py index db00280c5d..2fc455efb8 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -701,6 +701,7 @@ def _create_pool_for_monitor(self, address): appname=options.appname, driver=options.driver, pause_enabled=False, + server_api=options.server_api, ) return self._settings.pool_class(address, monitor_pool_options, diff --git a/test/__init__.py b/test/__init__.py index d9d362288f..560dc16065 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -48,6 +48,7 @@ from bson.son import SON from pymongo import common, message from pymongo.common import partition_node +from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, validate_cert_reqs from test.version import Version @@ -90,6 +91,8 @@ TLS_OPTIONS['tlsCAFile'] = CA_PEM COMPRESSORS = os.environ.get("COMPRESSORS") +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") + def is_server_resolvable(): """Returns True if 'server' is resolvable.""" @@ -200,6 +203,7 @@ def __init__(self): self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False + self.server_parameters = None self.is_mongos = False self.mongoses = [] self.is_rs = False @@ -212,9 +216,11 @@ def __init__(self): self.client = None self.conn_lock = threading.Lock() self.is_data_lake = False - if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api @property def ismaster(self): @@ -226,8 +232,7 @@ def _connect(self, host, port, **kwargs): timeout_ms = 10000 else: timeout_ms = 5000 - if COMPRESSORS: - kwargs["compressors"] = COMPRESSORS + kwargs.update(self.default_client_options) client = pymongo.MongoClient( host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) try: @@ -341,6 +346,8 @@ def _init_client(self): self.nodes = set([(host, port)]) self.w = len(ismaster.get("hosts", [])) or 1 self.version = Version.from_client(self.client) + self.server_parameters = self.client.admin.command( + 'getParameter', '*') if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True @@ -723,6 +730,12 @@ def require_transactions(self, func): "Transactions are not supported", func=func) + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require(lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func) + def mongos_seeds(self): return ','.join('%s:%s' % address for address in self.mongoses) @@ -766,6 +779,9 @@ def sanitize_cmd(cmd): cp.pop('$db', None) cp.pop('$readPreference', None) cp.pop('lsid', None) + if MONGODB_API_VERSION: + # Versioned api parameters + cp.pop('apiVersion', None) # OP_MSG encoding may move the payload type one field to the # end of the command. Do the same here. name = next(iter(cp)) diff --git a/test/test_bulk.py b/test/test_bulk.py index ec148a4ac5..22e971d576 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -364,6 +364,7 @@ class BulkAuthorizationTestBase(BulkTestBase): @classmethod @client_context.require_auth + @client_context.require_no_api_version def setUpClass(cls): super(BulkAuthorizationTestBase, cls).setUpClass() diff --git a/test/test_pooling.py b/test/test_pooling.py index 024996a03c..57338a965d 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -176,6 +176,7 @@ def create_pool( pool_options = client_context.client._topology_settings.pool_options kwargs['ssl_context'] = pool_options.ssl_context kwargs['ssl_match_hostname'] = pool_options.ssl_match_hostname + kwargs['server_api'] = pool_options.server_api pool = Pool(pair, PoolOptions(*args, **kwargs)) pool.ready() return pool diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py new file mode 100644 index 0000000000..fc7c919af6 --- /dev/null +++ b/test/test_versioned_api.py @@ -0,0 +1,127 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path[0:0] = [""] + +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi, ServerApiVersion + +from test import client_context, IntegrationTest, unittest +from test.unified_format import generate_test_classes +from test.utils import OvertCommandListener, rs_or_single_client + + +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'versioned-api') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApi(IntegrationTest): + def test_server_api_defaults(self): + api = ServerApi(ServerApiVersion.V1) + self.assertEqual(api.version, '1') + self.assertIsNone(api.strict) + self.assertIsNone(api.deprecation_errors) + + def test_server_api_explicit_false(self): + api = ServerApi('1', strict=False, deprecation_errors=False) + self.assertEqual(api.version, '1') + self.assertFalse(api.strict) + self.assertFalse(api.deprecation_errors) + + def test_server_api_strict(self): + api = ServerApi('1', strict=True, deprecation_errors=True) + self.assertEqual(api.version, '1') + self.assertTrue(api.strict) + self.assertTrue(api.deprecation_errors) + + def test_server_api_validation(self): + with self.assertRaises(ValueError): + ServerApi('2') + with self.assertRaises(TypeError): + ServerApi('1', strict='not-a-bool') + with self.assertRaises(TypeError): + ServerApi('1', deprecation_errors='not-a-bool') + with self.assertRaises(TypeError): + MongoClient(server_api='not-a-ServerApi') + + def assertServerApi(self, event): + self.assertIn('apiVersion', event.command) + self.assertEqual(event.command['apiVersion'], '1') + + def assertNoServerApi(self, event): + self.assertNotIn('apiVersion', event.command) + + def assertServerApiOnlyInFirstCommand(self, events): + self.assertServerApi(events[0]) + for event in events[1:]: + self.assertNoServerApi(event) + + @client_context.require_version_min(4, 7) + def test_command_options(self): + listener = OvertCommandListener() + client = rs_or_single_client(server_api=ServerApi('1'), + event_listeners=[listener]) + self.addCleanup(client.close) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + list(coll.find(batch_size=25)) + client.admin.command('ping') + for event in listener.results['started']: + if event.command_name == 'getMore': + self.assertNoServerApi(event) + else: + self.assertServerApi(event) + + @client_context.require_version_min(4, 7) + @client_context.require_transactions + def test_command_options_txn(self): + listener = OvertCommandListener() + client = rs_or_single_client(server_api=ServerApi('1'), + event_listeners=[listener]) + self.addCleanup(client.close) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + coll.insert_many([{} for _ in range(100)], session=s) + list(coll.find(batch_size=25, session=s)) + client.test.command('find', 'test', session=s) + self.assertServerApiOnlyInFirstCommand(listener.results['started']) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + list(coll.find(batch_size=25, session=s)) + coll.insert_many([{} for _ in range(100)], session=s) + client.test.command('find', 'test', session=s) + self.assertServerApiOnlyInFirstCommand(listener.results['started']) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + client.test.command('find', 'test', session=s) + list(coll.find(batch_size=25, session=s)) + coll.insert_many([{} for _ in range(100)], session=s) + self.assertServerApiOnlyInFirstCommand(listener.results['started']) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 3500b5634d..04d22c2010 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -45,6 +45,7 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult +from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern from test import client_context, unittest, IntegrationTest @@ -106,8 +107,17 @@ def is_run_on_requirement_satisfied(requirement): max_version_satisfied = Version.from_string( req_max_server_version) >= client_context.version + params_satisfied = True + params = requirement.get('serverParameters') + if params: + for param, val in params.items(): + if param not in client_context.server_parameters: + params_satisfied = False + elif client_context.server_parameters[param] != val: + params_satisfied = False + return (topology_satisfied and min_version_satisfied and - max_version_satisfied) + max_version_satisfied and params_satisfied) def parse_collection_or_database_options(options): @@ -200,6 +210,11 @@ def _create_entity(self, entity_spec): if client_context.is_mongos and spec.get('useMultipleMongoses'): kwargs['h'] = client_context.mongos_seeds() kwargs.update(spec.get('uriOptions', {})) + server_api = spec.get('serverApi') + if server_api: + kwargs['server_api'] = ServerApi( + server_api['version'], strict=server_api.get('strict'), + deprecation_errors=server_api.get('deprecationErrors')) client = rs_or_single_client(**kwargs) self[spec['id']] = client self._test_class.addCleanup(client.close) @@ -478,6 +493,12 @@ def match_event(self, expectation, actual): command = spec.get('command') database_name = spec.get('databaseName') if command: + if actual.command_name == 'update': + # TODO: remove this once PYTHON-1744 is done. + # Add upsert and multi fields back into expectations. + for update in command['updates']: + update.setdefault('upsert', False) + update.setdefault('multi', False) self.match_result(command, actual.command) if database_name: self._test_class.assertEqual( @@ -503,7 +524,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string('1.0') + SCHEMA_VERSION = Version.from_string('1.1') @staticmethod def should_run_on(run_on_spec): diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json new file mode 100644 index 0000000000..5b4ccdb659 --- /dev/null +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -0,0 +1,1073 @@ +{ + "description": "CRUD Api Version 1 (strict)", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.7" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "client", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "aggregate on collection appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate on database appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "adminDatabase", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "bulkWrite appends declared API version", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 6, + "x": 66 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "countDocuments appends declared API version", + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$gt": 11 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "x": { + "$gt": 11 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "deleteMany appends declared API version", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "deleteOne appends declared API version", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 7 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "distinct appends declared API version", + "operations": [ + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isError": true, + "errorContains": "command distinct is not in API Version 1", + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "x", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount appends declared API version", + "skipReason": "DRIVERS-1437 count was removed from API version 1", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": {} + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "find command with declared API version appends to the command, but getMore does not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete appends declared API version", + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "remove": true, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace appends declared API version", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate appends declared API version", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "insertMany appends declared API version", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "insertOne appends declared API version", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6, + "x": 66 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "replaceOne appends declared API version", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateMany appends declared API version", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateOne appends declared API version", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json new file mode 100644 index 0000000000..6584d8d2ae --- /dev/null +++ b/test/versioned-api/crud-api-version-1.json @@ -0,0 +1,1067 @@ +{ + "description": "CRUD Api Version 1", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.7" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "deprecationErrors": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "client", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "aggregate on collection appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "aggregate on database appends declared API version", + "operations": [ + { + "name": "aggregate", + "object": "adminDatabase", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "bulkWrite appends declared API version", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 6, + "x": 66 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "countDocuments appends declared API version", + "operations": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$gt": 11 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "x": { + "$gt": 11 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "deleteMany appends declared API version", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "x": { + "$nin": [ + 24, + 34 + ] + } + }, + "limit": 0 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "deleteOne appends declared API version", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 7 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 7 + }, + "limit": 1 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "distinct appends declared API version", + "operations": [ + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "x", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount appends declared API version", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": {} + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "find command with declared API version appends to the command, but getMore does not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete appends declared API version", + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "remove": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace appends declared API version", + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate appends declared API version", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "insertMany appends declared API version", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "insertOne appends declared API version", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6, + "x": 66 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "replaceOne appends declared API version", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "updateMany appends declared API version", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, + { + "description": "updateOne appends declared API version", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$inc": { + "x": 1 + } + } + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/runcommand-helper-no-api-version-declared.json b/test/versioned-api/runcommand-helper-no-api-version-declared.json new file mode 100644 index 0000000000..65c24ef460 --- /dev/null +++ b/test/versioned-api/runcommand-helper-no-api-version-declared.json @@ -0,0 +1,117 @@ +{ + "description": "RunCommand helper: No API version declared", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.7", + "serverParameters": { + "requireApiVersion": false + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "runCommand does not inspect or change the command document", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1, + "apiVersion": "server_will_never_support_this_api_version" + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "apiVersion": "server_will_never_support_this_api_version", + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + }, + "commandName": "ping", + "databaseName": "versioned-api-tests" + } + } + ] + } + ] + }, + { + "description": "runCommand does not prevent sending invalid API version declarations", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1, + "apiStrict": true + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "apiVersion": { + "$$exists": false + }, + "apiStrict": true, + "apiDeprecationErrors": { + "$$exists": false + } + }, + "commandName": "ping", + "databaseName": "versioned-api-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/test-commands-deprecation-errors.json b/test/versioned-api/test-commands-deprecation-errors.json new file mode 100644 index 0000000000..f4be168f69 --- /dev/null +++ b/test/versioned-api/test-commands-deprecation-errors.json @@ -0,0 +1,73 @@ +{ + "description": "Test commands: deprecation errors", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.7", + "serverParameters": { + "enableTestCommands": true, + "acceptAPIVersion2": true + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "Running a command that is deprecated raises a deprecation error", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "testDeprecationInVersion2", + "command": { + "testDeprecationInVersion2": 1, + "apiVersion": "2", + "apiDeprecationErrors": true + } + }, + "expectError": { + "isError": true, + "errorContains": "command testDeprecationInVersion2 is deprecated in API Version 2", + "errorCodeName": "APIDeprecationError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "testDeprecationInVersion2": 1, + "apiVersion": "2", + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/test-commands-strict-mode.json b/test/versioned-api/test-commands-strict-mode.json new file mode 100644 index 0000000000..ebace44319 --- /dev/null +++ b/test/versioned-api/test-commands-strict-mode.json @@ -0,0 +1,74 @@ +{ + "description": "Test commands: strict mode", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.7", + "serverParameters": { + "enableTestCommands": true + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + } + ], + "tests": [ + { + "description": "Running a command that is not part of the versioned API results in an error", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "testVersion2", + "command": { + "testVersion2": 1 + } + }, + "expectError": { + "isError": true, + "errorContains": "command testVersion2 is not in API Version 1", + "errorCodeName": "APIStrictError" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "testVersion2": 1, + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json new file mode 100644 index 0000000000..313135c4bf --- /dev/null +++ b/test/versioned-api/transaction-handling.json @@ -0,0 +1,232 @@ +{ + "description": "Transaction handling", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.7", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "versioned-api-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "_yamlAnchors": { + "versions": [ + { + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + { + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + ] + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "versioned-api-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Only the first command in a transaction declares an API version", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} From 55eef0e3beb5defd1e013b30845aa4baa1d12408 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 11 Jan 2021 18:35:02 -0800 Subject: [PATCH 0265/2111] PYTHON-2455 Add DOCS ticket step to release checklist (#541) --- RELEASE.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/RELEASE.rst b/RELEASE.rst index 15f617e26e..d8eaa94b1d 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -102,3 +102,7 @@ Doing a Release 15. Announce the release on: https://developer.mongodb.com/community/forums/c/community/release-notes/ + +16. File a ticket for DOCS highlighting changes in server version and Python + version compatibility or the lack thereof, for example: + https://jira.mongodb.org/browse/DOCS-12644 From b3c26a7f99129f30a38037ab8862b245c29a3c06 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 11 Jan 2021 18:41:36 -0800 Subject: [PATCH 0266/2111] PYTHON-1316 Remove eval, system_js, and SystemJS (#542) --- doc/api/pymongo/database.rst | 4 -- doc/changelog.rst | 3 ++ doc/migrate-to-pymongo4.rst | 21 ++++++++- pymongo/database.py | 82 ------------------------------------ test/test_database.py | 71 ------------------------------- 5 files changed, 23 insertions(+), 158 deletions(-) diff --git a/doc/api/pymongo/database.rst b/doc/api/pymongo/database.rst index b6b0aeba58..a88131b799 100644 --- a/doc/api/pymongo/database.rst +++ b/doc/api/pymongo/database.rst @@ -27,7 +27,3 @@ .. autoattribute:: read_preference .. autoattribute:: write_concern .. autoattribute:: read_concern - - - .. autoclass:: pymongo.database.SystemJS - :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index 91d78e5909..38a14b1fae 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -15,6 +15,9 @@ Breaking Changes in 4.0 ....................... - Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. +- Removed :meth:`~pymongo.database.Database.eval`, + :data:`~pymongo.database.Database.system_js` and + :class:`~pymongo.database.SystemJS`. - Removed :mod:`~pymongo.thread_util`. Notable improvements diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 418d475f87..698a12978b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -7,7 +7,8 @@ PyMongo 4 Migration Guide from pymongo import MongoClient, ReadPreference client = MongoClient() - collection = client.my_database.my_collection + database = client.my_database + collection = database.my_collection PyMongo 4.0 brings a number of improvements as well as some backward breaking changes. This guide provides a roadmap for migrating an existing application @@ -51,3 +52,21 @@ Warnings can also be changed to errors:: Removed features with no migration path --------------------------------------- + +Database.eval, Database.system_js, and SystemJS are removed +........................................................... + +Removed :meth:`~pymongo.database.Database.eval`, +:data:`~pymongo.database.Database.system_js` and +:class:`~pymongo.database.SystemJS`. The eval command was deprecated in +MongoDB 3.0 and removed in MongoDB 4.2. There is no replacement for eval with +MongoDB 4.2+. + +However, on MongoDB <= 4.0, code like this:: + + >>> result = database.eval('function (x) {return x;}', 3) + +can be changed to this:: + + >>> from bson.code import Code + >>> result = database.command('eval', Code('function (x) {return x;}'), args=[3]).get('retval') diff --git a/pymongo/database.py b/pymongo/database.py index 42055ec816..4c7001d161 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -145,14 +145,6 @@ def method_overwritten(instance, method): if method_overwritten(manipulator, "transform_outgoing"): self.__outgoing_manipulators.insert(0, manipulator) - @property - def system_js(self): - """**DEPRECATED**: :class:`SystemJS` helper for this :class:`Database`. - - See the documentation for :class:`SystemJS` for more details. - """ - return SystemJS(self) - @property def client(self): """The client instance for this :class:`Database`.""" @@ -1547,77 +1539,3 @@ def dereference(self, dbref, session=None, **kwargs): self.__name)) return self[dbref.collection].find_one( {"_id": dbref.id}, session=session, **kwargs) - - def eval(self, code, *args): - """**DEPRECATED**: Evaluate a JavaScript expression in MongoDB. - - :Parameters: - - `code`: string representation of JavaScript code to be - evaluated - - `args` (optional): additional positional arguments are - passed to the `code` being evaluated - - .. warning:: the eval command is deprecated in MongoDB 3.0 and - will be removed in a future server version. - """ - warnings.warn("Database.eval() is deprecated", - DeprecationWarning, stacklevel=2) - - if not isinstance(code, Code): - code = Code(code) - - result = self.command("$eval", code, args=args) - return result.get("retval", None) - - def __call__(self, *args, **kwargs): - """This is only here so that some API misusages are easier to debug. - """ - raise TypeError("'Database' object is not callable. If you meant to " - "call the '%s' method on a '%s' object it is " - "failing because no such method exists." % ( - self.__name, self.__client.__class__.__name__)) - - -class SystemJS(object): - """**DEPRECATED**: Helper class for dealing with stored JavaScript. - """ - - def __init__(self, database): - """**DEPRECATED**: Get a system js helper for the database `database`. - - SystemJS will be removed in PyMongo 4.0. - """ - warnings.warn("SystemJS is deprecated", - DeprecationWarning, stacklevel=2) - - if not database.write_concern.acknowledged: - database = database.client.get_database( - database.name, write_concern=DEFAULT_WRITE_CONCERN) - # can't just assign it since we've overridden __setattr__ - object.__setattr__(self, "_db", database) - - def __setattr__(self, name, code): - self._db.system.js.replace_one( - {"_id": name}, {"_id": name, "value": Code(code)}, True) - - def __setitem__(self, name, code): - self.__setattr__(name, code) - - def __delattr__(self, name): - self._db.system.js.delete_one({"_id": name}) - - def __delitem__(self, name): - self.__delattr__(name) - - def __getattr__(self, name): - return lambda *args: self._db.eval(Code("function() { " - "return this[name].apply(" - "this, arguments); }", - scope={'name': name}), *args) - - def __getitem__(self, name): - return self.__getattr__(name) - - def list(self): - """Get a list of the names of the functions stored in this database.""" - return [x["_id"] for x in self._db.system.js.find(projection=["_id"])] diff --git a/test/test_database.py b/test/test_database.py index cdaf31332b..b81353c462 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -806,35 +806,6 @@ def test_deref_kwargs(self): db.dereference(DBRef("test", 4), projection={"_id": False})) - @client_context.require_no_auth - @client_context.require_version_max(4, 1, 0) - def test_eval(self): - db = self.client.pymongo_test - db.test.drop() - - with ignore_deprecations(): - self.assertRaises(TypeError, db.eval, None) - self.assertRaises(TypeError, db.eval, 5) - self.assertRaises(TypeError, db.eval, []) - - self.assertEqual(3, db.eval("function (x) {return x;}", 3)) - self.assertEqual(3, db.eval(u"function (x) {return x;}", 3)) - - self.assertEqual(None, - db.eval("function (x) {db.test.save({y:x});}", 5)) - self.assertEqual(db.test.find_one()["y"], 5) - - self.assertEqual(5, db.eval("function (x, y) {return x + y;}", 2, 3)) - self.assertEqual(5, db.eval("function () {return 5;}")) - self.assertEqual(5, db.eval("2 + 3;")) - - self.assertEqual(5, db.eval(Code("2 + 3;"))) - self.assertRaises(OperationFailure, db.eval, Code("return i;")) - self.assertEqual(2, db.eval(Code("return i;", {"i": 2}))) - self.assertEqual(5, db.eval(Code("i + 3;", {"i": 2}))) - - self.assertRaises(OperationFailure, db.eval, "5 ++ 5;") - # TODO some of these tests belong in the collection level testing. def test_insert_find_one(self): db = self.client.pymongo_test @@ -910,48 +881,6 @@ def test_delete(self): db.test.delete_many({}) self.assertFalse(db.test.find_one()) - @client_context.require_no_auth - @client_context.require_version_max(4, 1, 0) - def test_system_js(self): - db = self.client.pymongo_test - db.system.js.delete_many({}) - - self.assertEqual(0, db.system.js.count_documents({})) - db.system_js.add = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) - self.assertEqual(1, db.system.js.count_documents({})) - self.assertEqual(6, db.system_js.add(1, 5)) - del db.system_js.add - self.assertEqual(0, db.system.js.count_documents({})) - - db.system_js['add'] = "function(a, b) { return a + b; }" - self.assertEqual('add', db.system.js.find_one()['_id']) - self.assertEqual(1, db.system.js.count_documents({})) - self.assertEqual(6, db.system_js['add'](1, 5)) - del db.system_js['add'] - self.assertEqual(0, db.system.js.count_documents({})) - self.assertRaises(OperationFailure, db.system_js.add, 1, 5) - - # TODO right now CodeWScope doesn't work w/ system js - # db.system_js.scope = Code("return hello;", {"hello": 8}) - # self.assertEqual(8, db.system_js.scope()) - - self.assertRaises(OperationFailure, db.system_js.non_existant) - - def test_system_js_list(self): - db = self.client.pymongo_test - db.system.js.delete_many({}) - self.assertEqual([], db.system_js.list()) - - db.system_js.foo = "function() { return 'blah'; }" - self.assertEqual(["foo"], db.system_js.list()) - - db.system_js.bar = "function() { return 'baz'; }" - self.assertEqual(set(["foo", "bar"]), set(db.system_js.list())) - - del db.system_js.foo - self.assertEqual(["bar"], db.system_js.list()) - def test_command_response_without_ok(self): # Sometimes (SERVER-10891) the server's response to a badly-formatted # command document will have no 'ok' field. We should raise From dad9813b1d57f1afc2a4f10bb7f8d6a1922e8500 Mon Sep 17 00:00:00 2001 From: Alexander Golin Date: Tue, 12 Jan 2021 16:12:50 -0500 Subject: [PATCH 0267/2111] PYTHON-2455 Change DOCS to DOCSP and replace example link accordingly (#544) --- RELEASE.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE.rst b/RELEASE.rst index d8eaa94b1d..b99c955157 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -103,6 +103,6 @@ Doing a Release 15. Announce the release on: https://developer.mongodb.com/community/forums/c/community/release-notes/ -16. File a ticket for DOCS highlighting changes in server version and Python +16. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: - https://jira.mongodb.org/browse/DOCS-12644 + https://jira.mongodb.org/browse/DOCSP-13536 From 06924cb00bbe36c2f750181b62a3685dcdb59052 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Jan 2021 14:29:58 -0800 Subject: [PATCH 0268/2111] PYTHON-2489 Fix "no server" test suite, fix unified test runCommand (#543) --- test/__init__.py | 17 +++++++++-------- test/unified_format.py | 8 ++++++-- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 560dc16065..47b5fb282e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -920,14 +920,15 @@ def teardown(): if garbage: assert False, '\n'.join(garbage) c = client_context.client - if c and not client_context.is_data_lake: - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") - c.close() + if c: + if not client_context.is_data_lake: + c.drop_database("pymongo-pooling-tests") + c.drop_database("pymongo_test") + c.drop_database("pymongo_test1") + c.drop_database("pymongo_test2") + c.drop_database("pymongo_test_mike") + c.drop_database("pymongo_test_bernie") + c.close() # Jython does not support gc.get_objects. if not sys.platform.startswith('java'): diff --git a/test/unified_format.py b/test/unified_format.py index 04d22c2010..9c3fdd7328 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -674,9 +674,13 @@ def _collectionOperation_createChangeStream(self, target, *args, **kwargs): return self.__entityOperation_createChangeStream( target, *args, **kwargs) - def _databaseOperation_runCommand(self, target, *args, **kwargs): + def _databaseOperation_runCommand(self, target, **kwargs): self.__raise_if_unsupported('runCommand', target, Database) - return target.command(*args, **kwargs) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop('command_name'), 1)]) + ordered_command.update(kwargs['command']) + kwargs['command'] = ordered_command + return target.command(**kwargs) def __entityOperation_aggregate(self, target, *args, **kwargs): self.__raise_if_unsupported('aggregate', target, Database, Collection) From fb4c20adfaf64fe9b2a5c8a7f84b2a38dc12d6b2 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 20 Nov 2020 12:02:40 -0800 Subject: [PATCH 0269/2111] PYTHON-2133 - Stop testing Python 2.7 on Evergreen --- .evergreen/config.yml | 148 +++++------------------- .evergreen/run-atlas-tests.sh | 41 ++----- .evergreen/run-cdecimal-tests.sh | 12 -- .evergreen/run-enterprise-auth-tests.sh | 45 +++---- .evergreen/run-mongodb-aws-ecs-test.sh | 3 +- .evergreen/run-mongodb-aws-test.sh | 2 +- .evergreen/run-ocsp-tests.sh | 4 +- .evergreen/run-perf-tests.sh | 2 +- .evergreen/run-tests.sh | 47 ++------ .evergreen/utils.sh | 3 +- test/test_ssl.py | 7 +- 11 files changed, 69 insertions(+), 245 deletions(-) delete mode 100644 .evergreen/run-cdecimal-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index e4c99ca806..6a6af56c0b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -514,7 +514,7 @@ functions: silent: true script: | cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python -c "import sys, urllib as ul; sys.stdout.write(ul.quote_plus(sys.argv[1]))"' + alias urlencode='python3 -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' USER=$(urlencode ${iam_auth_ecs_account}) PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) MONGODB_URI="mongodb://$USER:$PASS@localhost" @@ -552,8 +552,8 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python -c "import sys, urllib as ul; sys.stdout.write(ul.quote_plus(sys.argv[1]))"' - alias jsonkey='python -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' + alias urlencode='python3 -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' + alias jsonkey='python3 -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' USER=$(jsonkey AccessKeyId) USER=$(urlencode $USER) PASS=$(jsonkey SecretAccessKey) @@ -626,7 +626,7 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias jsonkey='python -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' + alias jsonkey='python3 -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' export AWS_ACCESS_KEY_ID=$(jsonkey AccessKeyId) export AWS_SECRET_ACCESS_KEY=$(jsonkey SecretAccessKey) export AWS_SESSION_TOKEN=$(jsonkey SessionToken) @@ -1193,7 +1193,7 @@ tasks: commands: - func: "run tests" vars: - PYTHON_BINARY: /opt/python/2.7/bin/python + PYTHON_BINARY: /opt/python/3.6/bin/python3 - name: "atlas-connect" tags: ["atlas-connect"] @@ -1799,10 +1799,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "2.7" - display_name: "Python 2.7" - variables: - PYTHON_BINARY: "/opt/python/2.7/bin/python" - id: "3.4" display_name: "Python 3.4" variables: @@ -1828,10 +1824,6 @@ axes: display_name: "Python 3.9" variables: PYTHON_BINARY: "/opt/python/3.9/bin/python3" - - id: "pypy" - display_name: "PyPy" - variables: - PYTHON_BINARY: "/opt/python/pypy/bin/pypy" - id: "pypy3.5" display_name: "PyPy 3.5" variables: @@ -1840,16 +1832,6 @@ axes: display_name: "PyPy 3.6" variables: PYTHON_BINARY: "/opt/python/pypy3.6/bin/pypy3" - - id: "jython2.7" - display_name: "Jython 2.7" - batchtime: 10080 # 7 days - variables: - PYTHON_BINARY: "/opt/python/jython2.7/bin/jython" - # System python - - id: "system-python" - display_name: "Python" - variables: - PYTHON_BINARY: "python" - id: "system-python3" display_name: "Python3" variables: @@ -1858,10 +1840,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "2.7" - display_name: "Python 2.7" - variables: - PYTHON_BINARY: "C:/python/Python27/python.exe" - id: "3.4" display_name: "Python 3.4" variables: @@ -1890,10 +1868,6 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "2.7" - display_name: "32-bit Python 2.7" - variables: - PYTHON_BINARY: "C:/python/32/Python27/python.exe" - id: "3.4" display_name: "32-bit Python 3.4" variables: @@ -2015,16 +1989,6 @@ axes: test_pyopenssl: true batchtime: 10080 # 7 days - # Run setdefaultencoding before running the test suite? - - id: setdefaultencoding - display_name: "setdefaultencoding" - values: - - id: "setdefaultencoding" - display_name: "setdefaultencoding" - tags: ["setdefaultencoding_tag"] - variables: - SETDEFAULTENCODING: "cp1251" - - id: requireApiVersion display_name: "requireApiVersion" values: @@ -2044,7 +2008,6 @@ buildvariants: platform: # OSes that support versions of MongoDB>=2.6 with SSL. - awslinux - - rhel70 auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: &all-server-versions @@ -2063,7 +2026,6 @@ buildvariants: platform: # OSes that support versions of MongoDB>=2.6 with SSL. - awslinux - - rhel70 auth-ssl: "*" encryption: "*" display_name: "Encryption ${platform} ${auth-ssl}" @@ -2196,7 +2158,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: &rhel62-pythons ["2.7", "3.4", "3.5", "3.6", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] + python-version: &rhel62-pythons ["3.4", "3.5", "3.6", "pypy3.5", "pypy3.6"] auth: "*" ssl: "*" coverage: "*" @@ -2206,32 +2168,20 @@ buildvariants: auth: "noauth" ssl: "ssl" coverage: "*" - - platform: rhel62 - python-version: "!jython2.7" # Test Jython with Auth/NoSSL - auth: "auth" - ssl: "nossl" - coverage: "*" - - platform: rhel62 - # PYTHON-498: disable Jython SSL tests - python-version: "jython2.7" - # EVG-1410: exlcude_spec must specifiy values for all axes - auth: "*" - ssl: "ssl" - coverage: "*" display_name: "${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions - matrix_name: "tests-pyopenssl" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9"] + python-version: ["3.4", "3.5", "3.6", "3.7", "3.8", "3.9"] auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.8", "3.9"] + python-version: ["3.4", "3.5", "3.6", "3.8", "3.9"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2244,7 +2194,7 @@ buildvariants: - matrix_name: "tests-pyopenssl-pypy" matrix_spec: platform: debian92 - python-version: ["pypy", "pypy3.5", "pypy3.6"] + python-version: ["pypy3.5", "pypy3.6"] auth: "auth" ssl: "ssl" pyopenssl: "*" @@ -2254,18 +2204,6 @@ buildvariants: # Test standalone and sharded only on 4.4. - '.4.4' -- matrix_name: "test-pyopenssl-old-py27" - matrix_spec: - platform: - # Supported OSes with pre-2.7.9 CPython versions. - - rhel70 # CPython 2.7.5 - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} Pre-2.7.9 Python ${auth}" - tasks: - - '.replica_set' - - matrix_name: "tests-pyopenssl-macOS" matrix_spec: platform: macos-1014 @@ -2291,7 +2229,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["2.7", "3.4", "3.5", "3.6"] + python-version: ["3.4", "3.5", "3.6"] auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2303,7 +2241,7 @@ buildvariants: - matrix_name: "tests-pypy-debian-test-encryption" matrix_spec: platform: debian92 - python-version: ["pypy", "pypy3.5", "pypy3.6"] + python-version: ["pypy3.5", "pypy3.6"] auth-ssl: noauth-nossl encryption: "*" display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" @@ -2319,7 +2257,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: rhel62 - python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] + python-version: ["pypy3.5", "pypy3.6"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2346,20 +2284,15 @@ buildvariants: matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "pypy", "pypy3.5", "pypy3.6", "jython2.7"] + python-version: ["3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "pypy3.5", "pypy3.6"] c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-16.04 - python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] + python-version: ["pypy3.5", "pypy3.6"] c-extensions: "with-c-extensions" compression: "*" - # Jython doesn't support some compression types. - - platform: ubuntu-16.04 - python-version: ["jython2.7"] - c-extensions: "*" - compression: ["snappy", "zstd"] # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy - platform: ubuntu-16.04 python-version: ["3.8", "3.9"] @@ -2390,7 +2323,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: rhel62 - python-version: ["pypy", "pypy3.5", "pypy3.6", "jython2.7"] + python-version: ["pypy3.5", "pypy3.6"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2454,12 +2387,12 @@ buildvariants: display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Storage engine tests on RHEL 6.2 (x86_64) with Python 2.7. +# Storage engine tests on RHEL 6.2 (x86_64) with Python 3.6. - matrix_name: "tests-storage-engines" matrix_spec: platform: rhel62 storage-engine: "*" - python-version: 2.7 + python-version: 3.6 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: @@ -2497,26 +2430,16 @@ buildvariants: add_tasks: - "test-3.0-standalone" -# enableTestCommands=0 tests on RHEL 6.2 (x86_64) with Python 2.7. +# enableTestCommands=0 tests on RHEL 6.2 (x86_64) with Python 3.6. - matrix_name: "test-disableTestCommands" matrix_spec: platform: rhel62 disableTestCommands: "*" - python-version: "2.7" + python-version: "3.6" display_name: "Disable test commands ${python-version} ${platform}" tasks: - ".latest" -# setdefaultencoding tests on RHEL 6.2 (x86_64) with Python 2.7. -- matrix_name: "test-setdefaultencoding" - matrix_spec: - platform: rhel62 - setdefaultencoding: "*" - python-version: "2.7" - display_name: "setdefaultencoding ${python-version} ${platform}" - tasks: - - "test-latest-standalone" - - matrix_name: "test-linux-enterprise-auth" matrix_spec: platform: rhel62 @@ -2538,7 +2461,7 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: rhel62 - python-version: ["2.7", "3.4", "3.6"] + python-version: ["3.4", "3.6"] mod-wsgi-version: "*" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: @@ -2548,7 +2471,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: platform: rhel62 - python-version: 2.7 + python-version: 3.6 display_name: "MockupDB Tests" tasks: - name: "mockupdb" @@ -2556,19 +2479,11 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: platform: rhel62 - python-version: ["2.7", "3.4"] + python-version: ["3.4"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" -- matrix_name: "cdecimal" - matrix_spec: - platform: rhel62 - python-version: 2.7 - display_name: "cdecimal ${python-version} ${platform}" - tasks: - - name: "cdecimal" - - name: "no-server" display_name: "No server test" run_on: @@ -2587,13 +2502,6 @@ buildvariants: expansions: set_xtrace_on: on -- matrix_name: "atlas-connect-pre-279" - matrix_spec: - platform: rhel70 - display_name: "Atlas connect Pre-2.7.9 Python ${platform}" - tasks: - - name: "atlas-connect" - - matrix_name: "atlas-connect" matrix_spec: platform: rhel62 @@ -2613,7 +2521,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.8"] + python-version: ["3.4", "3.8"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2623,7 +2531,7 @@ buildvariants: - matrix_name: "versioned-api-tests" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.9"] + python-version: ["3.6", "3.9"] auth: "auth" requireApiVersion: "*" display_name: "requireApiVersion ${python-version}" @@ -2634,7 +2542,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 - python-version: ["2.7", "3.4", "3.8", "3.9"] + python-version: ["3.4", "3.8", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2646,7 +2554,7 @@ buildvariants: - matrix_name: "ocsp-test-pypy" matrix_spec: platform: debian92 - python-version: ["pypy", "pypy3.5", "pypy3.6"] + python-version: ["pypy3.5", "pypy3.6"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2658,7 +2566,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["2.7", "3.4", "3.9"] + python-version-windows: ["3.4", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2683,7 +2591,7 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: platform: [ubuntu-18.04, macos-1014] - python-version: ["system-python", "system-python3"] + python-version: ["system-python3"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: - name: "aws-auth-test-4.4" diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index 0927f26977..3f8a1b45f0 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -4,49 +4,22 @@ set -o errexit set -o xtrace -export JAVA_HOME=/opt/java/jdk8 - if [ -z "$PYTHON_BINARY" ]; then echo "No python binary specified" - PYTHON_BINARY=$(command -v python || command -v python3) || true + PYTHON_BINARY=$(command -v python3) || true if [ -z "$PYTHON_BINARY" ]; then - echo "Cannot test without python or python3 installed!" + echo "Cannot test without python3 installed!" exit 1 fi fi -IMPL=$(${PYTHON_BINARY} -c "import platform, sys; sys.stdout.write(platform.python_implementation())") - -if [ $IMPL = "Jython" ]; then - # The venv created by createvirtualenv is incompatible with Jython - $PYTHON_BINARY -m virtualenv --never-download --no-wheel atlastest - . atlastest/bin/activate -else - # All other pythons work with createvirtualenv. - . .evergreen/utils.sh - createvirtualenv $PYTHON_BINARY atlastest -fi +. .evergreen/utils.sh +createvirtualenv $PYTHON_BINARY atlastest trap "deactivate; rm -rf atlastest" EXIT HUP -if [ $IMPL = "Jython" -o $IMPL = "PyPy" ]; then - echo "Using Jython or PyPy" - python -m pip install certifi -else - IS_PRE_279=$(python -c "import sys; sys.stdout.write('1' if sys.version_info < (2, 7, 9) else '0')") - if [ $IS_PRE_279 = "1" ]; then - echo "Using a Pre-2.7.9 CPython" - python -m pip install pyopenssl>=17.2.0 service_identity>18.1.0 - else - echo "Using CPython 2.7.9+" - fi -fi - echo "Running tests without dnspython" python test/atlas/test_connection.py -# dnspython is incompatible with Jython so don't test that combination. -if [ $IMPL != "Jython" ]; then - python -m pip install dnspython - echo "Running tests with dnspython" - MUST_TEST_SRV="1" python test/atlas/test_connection.py -fi +python -m pip install dnspython +echo "Running tests with dnspython" +MUST_TEST_SRV="1" python test/atlas/test_connection.py diff --git a/.evergreen/run-cdecimal-tests.sh b/.evergreen/run-cdecimal-tests.sh deleted file mode 100644 index 1a341a5025..0000000000 --- a/.evergreen/run-cdecimal-tests.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -virtualenv -p ${PYTHON_BINARY} cdecimaltest -trap "deactivate; rm -rf cdecimaltest" EXIT HUP -. cdecimaltest/bin/activate -# No cdecimal tarballs on pypi. -pip install http://www.bytereef.org/software/mpdecimal/releases/cdecimal-2.3.tar.gz -python -c 'import sys; print(sys.version)' -python cdecimal_test.py diff --git a/.evergreen/run-enterprise-auth-tests.sh b/.evergreen/run-enterprise-auth-tests.sh index 030a0ab020..e86e489d0d 100644 --- a/.evergreen/run-enterprise-auth-tests.sh +++ b/.evergreen/run-enterprise-auth-tests.sh @@ -5,39 +5,28 @@ set -o errexit echo "Running enterprise authentication tests" -export JAVA_HOME=/opt/java/jdk8 - -PLATFORM="$(${PYTHON_BINARY} -c 'import platform, sys; sys.stdout.write(platform.system())')" - export DB_USER="bob" export DB_PASSWORD="pwd123" -EXTRA_ARGS="" - -# There is no kerberos package for Jython, but we do want to test PLAIN. -if [ ${PLATFORM} != "Java" ]; then - if [ "Windows_NT" = "$OS" ]; then - echo "Setting GSSAPI_PASS" - export GSSAPI_PASS=${SASL_PASS} - export GSSAPI_CANONICALIZE="true" - else - # BUILD-3830 - touch ${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - export KRB5_CONFIG=${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - - echo "Writing keytab" - echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab - echo "Running kinit" - kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p ${PRINCIPAL} - fi - echo "Setting GSSAPI variables" - export GSSAPI_HOST=${SASL_HOST} - export GSSAPI_PORT=${SASL_PORT} - export GSSAPI_PRINCIPAL=${PRINCIPAL} +if [ "Windows_NT" = "$OS" ]; then + echo "Setting GSSAPI_PASS" + export GSSAPI_PASS=${SASL_PASS} + export GSSAPI_CANONICALIZE="true" else - EXTRA_ARGS="-J-XX:-UseGCOverheadLimit -J-Xmx4096m" + # BUILD-3830 + touch ${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty + export KRB5_CONFIG=${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty + + echo "Writing keytab" + echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab + echo "Running kinit" + kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p ${PRINCIPAL} fi +echo "Setting GSSAPI variables" +export GSSAPI_HOST=${SASL_HOST} +export GSSAPI_PORT=${SASL_PORT} +export GSSAPI_PRINCIPAL=${PRINCIPAL} echo "Running tests" ${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} $EXTRA_ARGS setup.py test --xunit-output=xunit-results +${PYTHON_BINARY} setup.py test --xunit-output=xunit-results diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 43954dedd2..00dd8e419a 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -33,7 +33,7 @@ authtest () { echo "Running MONGODB-AWS ECS authentication tests with $PYTHON" $PYTHON --version - $VIRTUALENV -p $PYTHON --system-site-packages --never-download venvaws + $VIRTUALENV -p $PYTHON --never-download venvaws . venvaws/bin/activate cd src @@ -44,5 +44,4 @@ authtest () { rm -rf venvaws } -PYTHON=$(command -v python) authtest PYTHON=$(command -v python3) authtest diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index e276a82bb4..6ab480bd72 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -54,7 +54,7 @@ authtest () { echo "Running MONGODB-AWS authentication tests with $PYTHON" $PYTHON --version - $VIRTUALENV -p $PYTHON --system-site-packages --never-download venvaws + $VIRTUALENV -p $PYTHON --never-download venvaws if [ "Windows_NT" = "$OS" ]; then . venvaws/Scripts/activate else diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh index 75fba7e2c9..0eb101aaa2 100644 --- a/.evergreen/run-ocsp-tests.sh +++ b/.evergreen/run-ocsp-tests.sh @@ -8,9 +8,9 @@ set -o errexit if [ -z "$PYTHON_BINARY" ]; then echo "No python binary specified" - PYTHON=$(command -v python || command -v python3) || true + PYTHON=$(command -v python3) || true if [ -z "$PYTHON" ]; then - echo "Cannot test without python or python3 installed!" + echo "Cannot test without python3 installed!" exit 1 fi else diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index bbebf34c98..41f154b739 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -14,7 +14,7 @@ export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data" export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" MTCBIN=/opt/mongodbtoolchain/v2/bin -VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python2.7" +VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python3" $VIRTUALENV pyperftest . pyperftest/bin/activate diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index d1ecfdcda2..19188726ed 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -11,7 +11,6 @@ set -o errexit # Exit the script with error if any of the commands fail # COVERAGE If non-empty, run the test suite with coverage. # TEST_ENCRYPTION If non-empty, install pymongocrypt. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# SETDEFAULTENCODING The encoding to set via sys.setdefaultencoding. if [ -n "${SET_XTRACE_ON}" ]; then set -o xtrace @@ -29,7 +28,6 @@ COMPRESSORS=${COMPRESSORS:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} -SETDEFAULTENCODING=${SETDEFAULTENCODING:-} DATA_LAKE=${DATA_LAKE:-} if [ -n "$COMPRESSORS" ]; then @@ -40,9 +38,6 @@ if [ -n "$MONGODB_API_VERSION" ]; then export MONGODB_API_VERSION=$MONGODB_API_VERSION fi - -export JAVA_HOME=/opt/java/jdk8 - if [ "$AUTH" != "noauth" ]; then if [ -z "$DATA_LAKE" ]; then export DB_USER="bob" @@ -62,43 +57,26 @@ fi . .evergreen/utils.sh if [ -z "$PYTHON_BINARY" ]; then - VIRTUALENV=$(command -v virtualenv) || true - if [ -z "$VIRTUALENV" ]; then - PYTHON=$(command -v python || command -v python3) || true - if [ -z "$PYTHON" ]; then - echo "Cannot test without python or python3 installed!" - exit 1 - fi - else - $VIRTUALENV pymongotestvenv - . pymongotestvenv/bin/activate - PYTHON=python - trap "deactivate; rm -rf pymongotestvenv" EXIT HUP + # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a + # system python3 doesn't exist. This seems to only be an issue on RHEL 7.x. + PYTHON=$(command -v python3 || command -v /opt/mongodbtoolchain/v2/bin/python3) || true + if [ -z "$PYTHON" ]; then + echo "Cannot test without python3 installed!" + exit 1 fi elif [ "$COMPRESSORS" = "snappy" ]; then - $PYTHON_BINARY -m virtualenv --system-site-packages --never-download snappytest + $PYTHON_BINARY -m virtualenv --never-download snappytest . snappytest/bin/activate trap "deactivate; rm -rf snappytest" EXIT HUP # 0.5.2 has issues in pypy3(.5) pip install python-snappy==0.5.1 PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then - $PYTHON_BINARY -m virtualenv --system-site-packages --never-download zstdtest + $PYTHON_BINARY -m virtualenv --never-download zstdtest . zstdtest/bin/activate trap "deactivate; rm -rf zstdtest" EXIT HUP pip install zstandard PYTHON=python -elif [ -n "$SETDEFAULTENCODING" ]; then - $PYTHON_BINARY -m virtualenv --system-site-packages --never-download encodingtest - . encodingtest/bin/activate - trap "deactivate; rm -rf encodingtest" EXIT HUP - mkdir test-sitecustomize - cat < test-sitecustomize/sitecustomize.py -import sys -sys.setdefaultencoding("$SETDEFAULTENCODING") -EOT - export PYTHONPATH="$(pwd)/test-sitecustomize" - PYTHON=python else PYTHON="$PYTHON_BINARY" fi @@ -158,13 +136,6 @@ if [ -n "$TEST_ENCRYPTION" ]; then fi -PYTHON_IMPL=$($PYTHON -c "import platform, sys; sys.stdout.write(platform.python_implementation())") -if [ $PYTHON_IMPL = "Jython" ]; then - PYTHON_ARGS="-J-XX:-UseGCOverheadLimit -J-Xmx4096m" -else - PYTHON_ARGS="" -fi - if [ -z "$DATA_LAKE" ]; then TEST_ARGS="" else @@ -217,7 +188,7 @@ if [ -z "$GREEN_FRAMEWORK" ]; then # causing this script to exit. $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" fi - $COVERAGE_OR_PYTHON $PYTHON_ARGS $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT + $COVERAGE_OR_PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT else # --no_ext has to come before "test" so there is no way to toggle extensions here. $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index ff73be01d9..cbc07d52fd 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -10,9 +10,8 @@ createvirtualenv () { if $PYTHON -m virtualenv --version; then VIRTUALENV="$PYTHON -m virtualenv --never-download" elif $PYTHON -m venv -h>/dev/null; then + # System virtualenv might not be compatible with the python3 on our path VIRTUALENV="$PYTHON -m venv" - elif command -v virtualenv; then - VIRTUALENV="$(command -v virtualenv) -p $PYTHON --never-download" else echo "Cannot test without virtualenv" exit 1 diff --git a/test/test_ssl.py b/test/test_ssl.py index 7c76da6428..a5efcefddf 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -51,15 +51,12 @@ import OpenSSL import requests import service_identity + from pymongo.ocsp_support import _load_trusted_ca_certs _HAVE_PYOPENSSL = True except ImportError: - pass - -if _HAVE_PYOPENSSL: - from pymongo.ocsp_support import _load_trusted_ca_certs -else: _load_trusted_ca_certs = None + if HAVE_SSL: import ssl From dea4b9019371720d81b37245f24abbbc3107e0fc Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 13 Jan 2021 20:04:16 -0800 Subject: [PATCH 0270/2111] PYTHON-2133 Drop install support for Python 2 --- .evergreen/config.yml | 19 -- .travis.yml | 2 - MANIFEST.in | 1 - README.rst | 15 +- RELEASE.rst | 16 +- cdecimal_test.py | 66 ------ doc/atlas.rst | 27 +-- doc/compatibility-policy.rst | 16 +- doc/installation.rst | 108 ++------- ez_setup.py | 414 ----------------------------------- setup.py | 132 ++++------- 11 files changed, 77 insertions(+), 739 deletions(-) delete mode 100644 cdecimal_test.py delete mode 100644 ez_setup.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6a6af56c0b..408a210d9b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -349,16 +349,6 @@ functions: ${PREPARE_SHELL} PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh - "run cdecimal tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-cdecimal-tests.sh - "run doctests": - command: shell.exec type: test @@ -1179,15 +1169,6 @@ tasks: TOPOLOGY: "replica_set" - func: "run mod_wsgi tests" - - name: "cdecimal" - tags: ["cdecimal"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run cdecimal tests" - - name: "no-server" tags: ["no-server"] commands: diff --git a/.travis.yml b/.travis.yml index 5dd72f6da5..585f3c28df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,13 +1,11 @@ language: python python: - - 2.7 - 3.4 - 3.5 - 3.6 - 3.7 - 3.8 - - pypy - pypy3.5 services: diff --git a/MANIFEST.in b/MANIFEST.in index 5b8d2a5651..d017d16ab0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,7 +1,6 @@ include README.rst include LICENSE include THIRD-PARTY-NOTICES -include ez_setup.py recursive-include doc *.rst recursive-include doc *.py recursive-include doc *.conf diff --git a/README.rst b/README.rst index 4cb6658df0..0341ae44bb 100644 --- a/README.rst +++ b/README.rst @@ -89,7 +89,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 2.7, 3.4+, PyPy, and PyPy3.5+. +PyMongo supports CPython 3.4+ and PyPy3.5+. Optional dependencies: @@ -119,9 +119,6 @@ PyMongo:: $ python -m pip install pymongo[tls] -.. note:: Users of Python versions older than 2.7.9 will also - receive the dependencies for OCSP when using the tls extra. - OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests `_ and `service_identity @@ -149,16 +146,6 @@ command:: $ python -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption] -Other optional packages: - -- `backports.pbkdf2 `_, - improves authentication performance with SCRAM-SHA-1 and SCRAM-SHA-256. - It especially improves performance on Python versions older than 2.7.8. -- `monotonic `_ adds support for - a monotonic clock, which improves reliability in environments - where clock adjustments are frequent. Not needed in Python 3. - - Additional dependencies are: - (to generate documentation) sphinx_ diff --git a/RELEASE.rst b/RELEASE.rst index b99c955157..220908084b 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -33,10 +33,10 @@ Doing a Release 1. PyMongo is tested on Evergreen. Ensure the latest commit are passing CI as expected: https://evergreen.mongodb.com/waterfall/mongo-python-driver. - To test locally, ``python setup.py test`` will build the C extensions and - test. ``python tools/clean.py`` will remove the extensions, - and then ``python setup.py --no_ext test`` will run the tests without - them. You can also run the doctests: ``python setup.py doc -t``. + To test locally, ``python3 setup.py test`` will build the C extensions and + test. ``python3 tools/clean.py`` will remove the extensions, + and then ``python3 setup.py --no_ext test`` will run the tests without + them. You can also run the doctests: ``python3 setup.py doc -t``. 2. Check Jira to ensure all the tickets in this version have been completed. @@ -63,17 +63,11 @@ Doing a Release the next steps let's assume we unpacked these files into the following paths:: $ ls path/to/manylinux - pymongo--cp27-cp27m-manylinux1_i686.whl - ... pymongo--cp38-cp38-manylinux2014_x86_64.whl - $ ls path/to/mac/ - pymongo--cp27-cp27m-macosx_10_14_intel.whl ... - pymongo--py2.7-macosx-10.14-intel.egg $ ls path/to/windows/ - pymongo--cp27-cp27m-win32.whl - ... pymongo--cp38-cp38-win_amd64.whl + ... 10. Build the source distribution:: diff --git a/cdecimal_test.py b/cdecimal_test.py deleted file mode 100644 index 88262ffd98..0000000000 --- a/cdecimal_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2017 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test PyMongo with cdecimal monkey-patched over stdlib decimal.""" - -import getopt -import sys - -try: - import cdecimal - _HAVE_CDECIMAL = True -except ImportError: - _HAVE_CDECIMAL = False - - -def run(args): - """Run tests with cdecimal monkey-patched over stdlib decimal.""" - # Monkey-patch. - sys.modules['decimal'] = cdecimal - - # Run the tests. - sys.argv[:] = ['setup.py', 'test'] + list(args) - import setup - - -def main(): - """Parse options and run tests.""" - usage = """python %s - -Test PyMongo with cdecimal monkey-patched over decimal.""" % (sys.argv[0],) - - try: - opts, args = getopt.getopt( - sys.argv[1:], "h", ["help"]) - except getopt.GetoptError as err: - print(str(err)) - print(usage) - sys.exit(2) - - for option_name, _ in opts: - if option_name in ("-h", "--help"): - print(usage) - sys.exit() - else: - assert False, "unhandled option" - - if not _HAVE_CDECIMAL: - print("The cdecimal package is not installed.") - sys.exit(1) - - run(args) # Command line args to setup.py, like what test to run. - - -if __name__ == '__main__': - main() diff --git a/doc/atlas.rst b/doc/atlas.rst index 59605b58e7..0a64b294ce 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -7,32 +7,7 @@ Atlas to :class:`~pymongo.mongo_client.MongoClient`:: client = pymongo.MongoClient() -Connections to Atlas require TLS/SSL. For connections using TLS/SSL, PyMongo -may require third party dependencies as determined by your version of Python. -With PyMongo 3.3+, you can install PyMongo 3.3+ and any TLS/SSL-related -dependencies using the following pip command:: - - $ python -m pip install pymongo[tls] - -Starting with PyMongo 3.11 this installs `PyOpenSSL -`_, `requests`_ -and `service_identity -`_ -for users of Python versions older than 2.7.9. PyOpenSSL supports SNI for these -old Python versions, allowing applictions to connect to Altas free and shared -tier instances. - -Earlier versions of PyMongo require you to manually install the dependencies. -For a list of TLS/SSL-related dependencies, see :doc:`examples/tls`. - -.. note:: Connecting to Atlas "Free Tier" or "Shared Cluster" instances - requires Server Name Indication (SNI) support. SNI support requires CPython - 2.7.9 / PyPy 2.5.1 or newer or PyMongo 3.11+ with PyOpenSSL. - To check if your version of Python supports SNI run the following command:: - - $ python -c "import ssl; print(getattr(ssl, 'HAS_SNI', False))" - - You should see "True". +Connections to Atlas require TLS/SSL. .. warning:: Industry best practices recommend, and some regulations require, the use of TLS 1.1 or newer. Though no application changes are required for diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst index 08c5d57937..ed22c97351 100644 --- a/doc/compatibility-policy.rst +++ b/doc/compatibility-policy.rst @@ -18,9 +18,9 @@ effort to release at least one minor version that *deprecates* it. We add `DeprecationWarning`_. You can ensure your code is future-proof by running your code with the latest PyMongo release and looking for DeprecationWarnings. -Starting with Python 2.7, the interpreter silences DeprecationWarnings by -default. For example, the following code uses the deprecated ``insert`` -method but does not raise any warning: +The interpreter silences DeprecationWarnings by default. For example, the +following code uses the deprecated ``insert`` method but does not raise any +warning: .. code-block:: python @@ -32,13 +32,13 @@ method but does not raise any warning: To print deprecation warnings to stderr, run python with "-Wd":: - $ python -Wd insert.py + $ python3 -Wd insert.py insert.py:4: DeprecationWarning: insert is deprecated. Use insert_one or insert_many instead. client.test.test.insert({}) You can turn warnings into exceptions with "python -We":: - $ python -We insert.py + $ python3 -We insert.py Traceback (most recent call last): File "insert.py", line 4, in client.test.test.insert({}) @@ -55,8 +55,8 @@ deprecated PyMongo features. .. _semantic versioning: http://semver.org/ .. _DeprecationWarning: - https://docs.python.org/2/library/exceptions.html#exceptions.DeprecationWarning + https://docs.python.org/3/library/exceptions.html#DeprecationWarning -.. _the warnings module: https://docs.python.org/2/library/warnings.html +.. _the warnings module: https://docs.python.org/3/library/warnings.html -.. _the -W command line option: https://docs.python.org/2/using/cmdline.html#cmdoption-W +.. _the -W command line option: https://docs.python.org/3/using/cmdline.html#cmdoption-w diff --git a/doc/installation.rst b/doc/installation.rst index f9f01a671b..ea437aa4bd 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -15,37 +15,20 @@ Installing with pip We recommend using `pip `_ to install pymongo on all platforms:: - $ python -m pip install pymongo + $ python3 -m pip install pymongo To get a specific version of pymongo:: - $ python -m pip install pymongo==3.5.1 + $ python3 -m pip install pymongo==3.5.1 To upgrade using pip:: - $ python -m pip install --upgrade pymongo - -.. note:: - pip does not support installing python packages in .egg format. If you would - like to install PyMongo from a .egg provided on pypi use easy_install - instead. - -Installing with easy_install ----------------------------- - -To use ``easy_install`` from -`setuptools `_ do:: - - $ python -m easy_install pymongo - -To upgrade do:: - - $ python -m easy_install -U pymongo + $ python3 -m pip install --upgrade pymongo Dependencies ------------ -PyMongo supports CPython 2.7, 3.4+, PyPy, and PyPy3.5+. +PyMongo supports CPython 3.4+ and PyPy3.5+. Optional dependencies: @@ -54,66 +37,44 @@ GSSAPI authentication requires `pykerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python -m pip install pymongo[gssapi] + $ python3 -m pip install pymongo[gssapi] :ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws `_:: - $ python -m pip install pymongo[aws] + $ python3 -m pip install pymongo[aws] Support for mongodb+srv:// URIs requires `dnspython `_:: - $ python -m pip install pymongo[srv] - -TLS / SSL support may require `ipaddress -`_ and `certifi -`_ or `wincertstore -`_ depending on the Python -version in use. The necessary dependencies can be installed along with -PyMongo:: - - $ python -m pip install pymongo[tls] - -.. note:: Users of Python versions older than 2.7.9 will also - receive the dependencies for OCSP when using the tls extra. + $ python3 -m pip install pymongo[srv] :ref:`OCSP` requires `PyOpenSSL `_, `requests `_ and `service_identity `_:: - $ python -m pip install pymongo[ocsp] + $ python3 -m pip install pymongo[ocsp] Wire protocol compression with snappy requires `python-snappy `_:: - $ python -m pip install pymongo[snappy] + $ python3 -m pip install pymongo[snappy] Wire protocol compression with zstandard requires `zstandard `_:: - $ python -m pip install pymongo[zstd] + $ python3 -m pip install pymongo[zstd] :ref:`Client-Side Field Level Encryption` requires `pymongocrypt `_:: - $ python -m pip install pymongo[encryption] + $ python3 -m pip install pymongo[encryption] You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption] - -Other optional packages: - -- `backports.pbkdf2 `_, - improves authentication performance with SCRAM-SHA-1 and SCRAM-SHA-256. - It especially improves performance on Python versions older than 2.7.8. -- `monotonic `_ adds support for - a monotonic clock, which improves reliability in environments - where clock adjustments are frequent. Not needed in Python 3. - + $ python3 -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption] Installing from source ---------------------- @@ -124,7 +85,7 @@ latest source from GitHub and install the driver from the resulting tree:: $ git clone git://github.com/mongodb/mongo-python-driver.git pymongo $ cd pymongo/ - $ python setup.py install + $ python3 setup.py install Installing from source on Unix .............................. @@ -172,9 +133,8 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 2.7 >= 2.7.4 or Python 3.4+ downloaded from -python.org. In all cases Xcode must be installed with 'UNIX Development -Support'. +versions of Python 3.4+ downloaded from python.org. In all cases Xcode must be +installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with Xcode throws an error when it encounters compiler flags it doesn't recognize. @@ -209,8 +169,7 @@ requirements apply to both CPython and ActiveState's ActivePython: For Python 3.5 and newer install Visual Studio 2015. For Python 3.4 install Visual Studio 2010. You must use the full version of Visual Studio 2010 as Visual C++ Express does not provide 64-bit compilers. Make sure that -you check the "x64 Compilers and Tools" option under Visual C++. For Python 2.7 -install the `Microsoft Visual C++ Compiler for Python 2.7`_. +you check the "x64 Compilers and Tools" option under Visual C++. 32-bit Windows ~~~~~~~~~~~~~~ @@ -219,10 +178,6 @@ For Python 3.5 and newer install Visual Studio 2015. For Python 3.4 install Visual C++ 2010 Express. -For Python 2.7 install the `Microsoft Visual C++ Compiler for Python 2.7`_ - -.. _`Microsoft Visual C++ Compiler for Python 2.7`: https://www.microsoft.com/en-us/download/details.aspx?id=44266 - .. _install-no-c: Installing Without C Extensions @@ -237,34 +192,7 @@ If you wish to install PyMongo without the C extensions, even if the extensions build properly, it can be done using a command line option to *setup.py*:: - $ python setup.py --no_ext install - -Building PyMongo egg Packages ------------------------------ - -Some organizations do not allow compilers and other build tools on production -systems. To install PyMongo on these systems with C extensions you may need to -build custom egg packages. Make sure that you have installed the dependencies -listed above for your operating system then run the following command in the -PyMongo source directory:: - - $ python setup.py bdist_egg - -The egg package can be found in the dist/ subdirectory. The file name will -resemble “pymongo-3.6-py2.7-linux-x86_64.egg” but may have a different name -depending on your platform and the version of python you use to compile. - -.. warning:: - - These “binary distributions,” will only work on systems that resemble the - environment on which you built the package. In other words, ensure that - operating systems and versions of Python and architecture (i.e. “32” or “64” - bit) match. - -Copy this file to the target system and issue the following command to install the -package:: - - $ sudo python -m easy_install pymongo-3.6-py2.7-linux-x86_64.egg + $ python3 setup.py --no_ext install Installing a beta or release candidate -------------------------------------- @@ -275,4 +203,4 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0rc0.tar.gz + $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0rc0.tar.gz diff --git a/ez_setup.py b/ez_setup.py deleted file mode 100644 index 800c31ef6b..0000000000 --- a/ez_setup.py +++ /dev/null @@ -1,414 +0,0 @@ -#!/usr/bin/env python - -""" -Setuptools bootstrapping installer. - -Maintained at https://github.com/pypa/setuptools/tree/bootstrap. - -Run this script to install or upgrade setuptools. - -This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. -""" - -import os -import shutil -import sys -import tempfile -import zipfile -import optparse -import subprocess -import platform -import textwrap -import contextlib - -from distutils import log - -try: - from urllib.request import urlopen -except ImportError: - from urllib2 import urlopen - -try: - from site import USER_SITE -except ImportError: - USER_SITE = None - -# 33.1.1 is the last version that supports setuptools self upgrade/installation. -DEFAULT_VERSION = "33.1.1" -DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" -DEFAULT_SAVE_DIR = os.curdir -DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools" - -MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' - -log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION)) - - -def _python_cmd(*args): - """ - Execute a command. - - Return True if the command succeeded. - """ - args = (sys.executable,) + args - return subprocess.call(args) == 0 - - -def _install(archive_filename, install_args=()): - """Install Setuptools.""" - with archive_context(archive_filename): - # installing - log.warn('Installing Setuptools') - if not _python_cmd('setup.py', 'install', *install_args): - log.warn('Something went wrong during the installation.') - log.warn('See the error message above.') - # exitcode will be 2 - return 2 - - -def _build_egg(egg, archive_filename, to_dir): - """Build Setuptools egg.""" - with archive_context(archive_filename): - # building an egg - log.warn('Building a Setuptools egg in %s', to_dir) - _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) - # returning the result - log.warn(egg) - if not os.path.exists(egg): - raise IOError('Could not build the egg.') - - -class ContextualZipFile(zipfile.ZipFile): - - """Supplement ZipFile class to support context manager for Python 2.6.""" - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def __new__(cls, *args, **kwargs): - """Construct a ZipFile or ContextualZipFile as appropriate.""" - if hasattr(zipfile.ZipFile, '__exit__'): - return zipfile.ZipFile(*args, **kwargs) - return super(ContextualZipFile, cls).__new__(cls) - - -@contextlib.contextmanager -def archive_context(filename): - """ - Unzip filename to a temporary directory, set to the cwd. - - The unzipped target is cleaned up after. - """ - tmpdir = tempfile.mkdtemp() - log.warn('Extracting in %s', tmpdir) - old_wd = os.getcwd() - try: - os.chdir(tmpdir) - try: - with ContextualZipFile(filename) as archive: - archive.extractall() - except zipfile.BadZipfile as err: - if not err.args: - err.args = ('', ) - err.args = err.args + ( - MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), - ) - raise - - # going in the directory - subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) - os.chdir(subdir) - log.warn('Now working in %s', subdir) - yield - - finally: - os.chdir(old_wd) - shutil.rmtree(tmpdir) - - -def _do_download(version, download_base, to_dir, download_delay): - """Download Setuptools.""" - py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) - tp = 'setuptools-{version}-{py_desig}.egg' - egg = os.path.join(to_dir, tp.format(**locals())) - if not os.path.exists(egg): - archive = download_setuptools(version, download_base, - to_dir, download_delay) - _build_egg(egg, archive, to_dir) - sys.path.insert(0, egg) - - # Remove previously-imported pkg_resources if present (see - # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). - if 'pkg_resources' in sys.modules: - _unload_pkg_resources() - - import setuptools - setuptools.bootstrap_install_from = egg - - -def use_setuptools( - version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=DEFAULT_SAVE_DIR, download_delay=15): - """ - Ensure that a setuptools version is installed. - - Return None. Raise SystemExit if the requested version - or later cannot be installed. - """ - to_dir = os.path.abspath(to_dir) - - # prior to importing, capture the module state for - # representative modules. - rep_modules = 'pkg_resources', 'setuptools' - imported = set(sys.modules).intersection(rep_modules) - - try: - import pkg_resources - pkg_resources.require("setuptools>=" + version) - # a suitable version is already installed - return - except ImportError: - # pkg_resources not available; setuptools is not installed; download - pass - except pkg_resources.DistributionNotFound: - # no version of setuptools was found; allow download - pass - except pkg_resources.VersionConflict as VC_err: - if imported: - _conflict_bail(VC_err, version) - - # otherwise, unload pkg_resources to allow the downloaded version to - # take precedence. - del pkg_resources - _unload_pkg_resources() - - return _do_download(version, download_base, to_dir, download_delay) - - -def _conflict_bail(VC_err, version): - """ - Setuptools was imported prior to invocation, so it is - unsafe to unload it. Bail out. - """ - conflict_tmpl = textwrap.dedent(""" - The required version of setuptools (>={version}) is not available, - and can't be installed while this script is running. Please - install a more recent version first, using - 'easy_install -U setuptools'. - - (Currently using {VC_err.args[0]!r}) - """) - msg = conflict_tmpl.format(**locals()) - sys.stderr.write(msg) - sys.exit(2) - - -def _unload_pkg_resources(): - sys.meta_path = [ - importer - for importer in sys.meta_path - if importer.__class__.__module__ != 'pkg_resources.extern' - ] - del_modules = [ - name for name in sys.modules - if name.startswith('pkg_resources') - ] - for mod_name in del_modules: - del sys.modules[mod_name] - - -def _clean_check(cmd, target): - """ - Run the command to download target. - - If the command fails, clean up before re-raising the error. - """ - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - if os.access(target, os.F_OK): - os.unlink(target) - raise - - -def download_file_powershell(url, target): - """ - Download the file at url to target using Powershell. - - Powershell will validate trust. - Raise an exception if the command cannot complete. - """ - target = os.path.abspath(target) - ps_cmd = ( - "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " - "[System.Net.CredentialCache]::DefaultCredentials; " - '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' - % locals() - ) - cmd = [ - 'powershell', - '-Command', - ps_cmd, - ] - _clean_check(cmd, target) - - -def has_powershell(): - """Determine if Powershell is available.""" - if platform.system() != 'Windows': - return False - cmd = ['powershell', '-Command', 'echo test'] - with open(os.path.devnull, 'wb') as devnull: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except Exception: - return False - return True -download_file_powershell.viable = has_powershell - - -def download_file_curl(url, target): - cmd = ['curl', url, '--location', '--silent', '--output', target] - _clean_check(cmd, target) - - -def has_curl(): - cmd = ['curl', '--version'] - with open(os.path.devnull, 'wb') as devnull: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except Exception: - return False - return True -download_file_curl.viable = has_curl - - -def download_file_wget(url, target): - cmd = ['wget', url, '--quiet', '--output-document', target] - _clean_check(cmd, target) - - -def has_wget(): - cmd = ['wget', '--version'] - with open(os.path.devnull, 'wb') as devnull: - try: - subprocess.check_call(cmd, stdout=devnull, stderr=devnull) - except Exception: - return False - return True -download_file_wget.viable = has_wget - - -def download_file_insecure(url, target): - """Use Python to download the file, without connection authentication.""" - src = urlopen(url) - try: - # Read all the data in one block. - data = src.read() - finally: - src.close() - - # Write all the data in one block to avoid creating a partial file. - with open(target, "wb") as dst: - dst.write(data) -download_file_insecure.viable = lambda: True - - -def get_best_downloader(): - downloaders = ( - download_file_powershell, - download_file_curl, - download_file_wget, - download_file_insecure, - ) - viable_downloaders = (dl for dl in downloaders if dl.viable()) - return next(viable_downloaders, None) - - -def download_setuptools( - version=DEFAULT_VERSION, download_base=DEFAULT_URL, - to_dir=DEFAULT_SAVE_DIR, delay=15, - downloader_factory=get_best_downloader): - """ - Download setuptools from a specified location and return its filename. - - `version` should be a valid setuptools version number that is available - as an sdist for download under the `download_base` URL (which should end - with a '/'). `to_dir` is the directory where the egg will be downloaded. - `delay` is the number of seconds to pause before an actual download - attempt. - - ``downloader_factory`` should be a function taking no arguments and - returning a function for downloading a URL to a target. - """ - # making sure we use the absolute path - to_dir = os.path.abspath(to_dir) - zip_name = "setuptools-%s.zip" % version - url = download_base + zip_name - saveto = os.path.join(to_dir, zip_name) - if not os.path.exists(saveto): # Avoid repeated downloads - log.warn("Downloading %s", url) - downloader = downloader_factory() - downloader(url, saveto) - return os.path.realpath(saveto) - - -def _build_install_args(options): - """ - Build the arguments to 'python setup.py install' on the setuptools package. - - Returns list of command line arguments. - """ - return ['--user'] if options.user_install else [] - - -def _parse_args(): - """Parse the command line for options.""" - parser = optparse.OptionParser() - parser.add_option( - '--user', dest='user_install', action='store_true', default=False, - help='install in user site package') - parser.add_option( - '--download-base', dest='download_base', metavar="URL", - default=DEFAULT_URL, - help='alternative URL from where to download the setuptools package') - parser.add_option( - '--insecure', dest='downloader_factory', action='store_const', - const=lambda: download_file_insecure, default=get_best_downloader, - help='Use internal, non-validating downloader' - ) - parser.add_option( - '--version', help="Specify which version to download", - default=DEFAULT_VERSION, - ) - parser.add_option( - '--to-dir', - help="Directory to save (and re-use) package", - default=DEFAULT_SAVE_DIR, - ) - options, args = parser.parse_args() - # positional arguments are ignored - return options - - -def _download_args(options): - """Return args for download_setuptools function from cmdline args.""" - return dict( - version=options.version, - download_base=options.download_base, - downloader_factory=options.downloader_factory, - to_dir=options.to_dir, - ) - - -def main(): - """Install or upgrade setuptools and EasyInstall.""" - options = _parse_args() - archive = download_setuptools(**_download_args(options)) - return _install(archive, _build_install_args(options)) - -if __name__ == '__main__': - sys.exit(main()) diff --git a/setup.py b/setup.py index b921040454..dc54b07046 100755 --- a/setup.py +++ b/setup.py @@ -5,8 +5,8 @@ import warnings -if sys.version_info[:2] < (2, 7): - raise RuntimeError("Python version >= 2.7 required.") +if sys.version_info[:2] < (3, 4): + raise RuntimeError("Python version >= 3.4 required.") # Hack to silence atexit traceback in some Python versions @@ -15,14 +15,7 @@ except ImportError: pass -# Don't force people to install setuptools unless -# we have to. -try: - from setuptools import setup, __version__ as _setuptools_version -except ImportError: - from ez_setup import use_setuptools - use_setuptools() - from setuptools import setup, __version__ as _setuptools_version +from setuptools import setup, __version__ as _setuptools_version from distutils.cmd import Command from distutils.command.build_ext import build_ext @@ -152,45 +145,45 @@ def run(self): raise RuntimeError( "You must install Sphinx to build or test the documentation.") - if sys.version_info[0] >= 3: - import doctest - from doctest import OutputChecker as _OutputChecker - - # Match u or U (possibly followed by r or R), removing it. - # r/R can follow u/U but not precede it. Don't match the - # single character string 'u' or 'U'. - _u_literal_re = re.compile( - r"(\W|^)(?=1.16.0,<2.0.0"], } -# https://jira.mongodb.org/browse/PYTHON-2117 -# Environment marker support didn't settle down until version 20.10 -# https://setuptools.readthedocs.io/en/latest/history.html#v20-10-0 -_use_env_markers = tuple(map(int, _setuptools_version.split('.')[:2])) > (20, 9) - -# TLS and DNS extras -# We install PyOpenSSL and service_identity for Python < 2.7.9 to -# get support for SNI, which is required to connection to Altas -# free and shared tier. -if sys.version_info[0] == 2: - if _use_env_markers: - # For building wheels on Python versions >= 2.7.9 - for req in pyopenssl_reqs: - extras_require['tls'].append( - "%s ; python_full_version < '2.7.9'" % (req,)) - if sys.platform == 'win32': - extras_require['tls'].append( - "wincertstore>=0.2 ; python_full_version < '2.7.9'") - else: - extras_require['tls'].append( - "certifi ; python_full_version < '2.7.9'") - elif sys.version_info < (2, 7, 9): - # For installing from source or egg files on Python versions - # older than 2.7.9, or systems that have setuptools versions - # older than 20.10. - extras_require['tls'].extend(pyopenssl_reqs) - if sys.platform == 'win32': - extras_require['tls'].append("wincertstore>=0.2") - else: - extras_require['tls'].append("certifi") - extras_require.update({'srv': ["dnspython>=1.16.0,<1.17.0"]}) - extras_require.update({'tls': ["ipaddress"]}) -else: - extras_require.update({'srv': ["dnspython>=1.16.0,<2.0.0"]}) - # GSSAPI extras if sys.platform == 'win32': extras_require['gssapi'] = ["winkerberos>=0.5.0"] @@ -404,7 +362,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.4", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", @@ -412,8 +370,6 @@ def build_extension(self, ext): "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", From 112812928bf3273f4e5522f39215a8dd4e118873 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 14 Jan 2021 14:08:58 -0800 Subject: [PATCH 0271/2111] PYTHON-2482 Test Versioned API with a server started with acceptAPIVersion2 (#545) --- .evergreen/config.yml | 20 ++- .../crud-api-version-1-strict.json | 1 + .../test-commands-deprecation-errors.json | 3 +- test/versioned-api/transaction-handling.json | 156 ++++++++++++++++++ 4 files changed, 174 insertions(+), 6 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 408a210d9b..8f301b8277 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1970,18 +1970,27 @@ axes: test_pyopenssl: true batchtime: 10080 # 7 days - - id: requireApiVersion - display_name: "requireApiVersion" + - id: versionedApi + display_name: "versionedApi" values: + # Test against a cluster with requireApiVersion=1. - id: "requireApiVersion1" display_name: "requireApiVersion1" - tags: [ "requireApiVersion_tag" ] + tags: [ "versionedApi_tag" ] variables: # REQUIRE_API_VERSION is set to make drivers-evergreen-tools # start a cluster with the requireApiVersion parameter. REQUIRE_API_VERSION: "1" # MONGODB_API_VERSION is the apiVersion to use in the test suite. MONGODB_API_VERSION: "1" + # Test against a cluster with acceptAPIVersion2 but without + # requireApiVersion, and don't automatically add apiVersion to + # clients created in the test suite. + - id: "acceptAPIVersion2" + display_name: "acceptAPIVersion2" + tags: [ "versionedApi_tag" ] + variables: + ORCHESTRATION_FILE: "versioned-api-testing.json" buildvariants: - matrix_name: "tests-all" @@ -2514,8 +2523,9 @@ buildvariants: platform: ubuntu-16.04 python-version: ["3.6", "3.9"] auth: "auth" - requireApiVersion: "*" - display_name: "requireApiVersion ${python-version}" + versionedApi: "*" + display_name: "Versioned API ${versionedApi} ${python-version}" + batchtime: 10080 # 7 days tasks: # Versioned API was introduced in MongoDB 4.7 - "test-latest-standalone" diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json index 5b4ccdb659..9604368a16 100644 --- a/test/versioned-api/crud-api-version-1-strict.json +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -141,6 +141,7 @@ }, { "description": "aggregate on database appends declared API version", + "skipReason": "DRIVERS-1505 $listLocalSessions is not supported in API version 1", "operations": [ { "name": "aggregate", diff --git a/test/versioned-api/test-commands-deprecation-errors.json b/test/versioned-api/test-commands-deprecation-errors.json index f4be168f69..bdbc3f92ce 100644 --- a/test/versioned-api/test-commands-deprecation-errors.json +++ b/test/versioned-api/test-commands-deprecation-errors.json @@ -6,7 +6,8 @@ "minServerVersion": "4.7", "serverParameters": { "enableTestCommands": true, - "acceptAPIVersion2": true + "acceptAPIVersion2": true, + "requireApiVersion": false } } ], diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json index 313135c4bf..64e9706b5e 100644 --- a/test/versioned-api/transaction-handling.json +++ b/test/versioned-api/transaction-handling.json @@ -227,6 +227,162 @@ ] } ] + }, + { + "description": "Committing a transaction twice does not append server API options", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session" + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + } + ] + } + ] } ] } + From c70071df1db841964ecf517893d2c23f3cb51764 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 14 Jan 2021 14:00:06 -0800 Subject: [PATCH 0272/2111] PYTHON-2133 Remove Py2 compatibility from bson --- bson/__init__.py | 164 +++++++++++++++--------------------------- bson/binary.py | 4 +- bson/code.py | 14 ++-- bson/codec_options.py | 29 ++++---- bson/dbref.py | 13 ++-- bson/decimal128.py | 27 ++----- bson/json_util.py | 38 +++++----- bson/objectid.py | 15 ++-- bson/raw_bson.py | 7 +- bson/regex.py | 5 +- bson/son.py | 7 +- bson/timestamp.py | 5 +- test/test_raw_bson.py | 3 + 13 files changed, 124 insertions(+), 207 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 37794c325c..0f63929bba 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -75,6 +75,7 @@ from codecs import (utf_8_decode as _utf_8_decode, utf_8_encode as _utf_8_encode) +from collections import abc as _abc from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, OLD_UUID_SUBTYPE, @@ -92,13 +93,6 @@ from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.py3compat import (abc, - b, - PY3, - iteritems, - text_type, - string_type, - reraise) from bson.regex import Regex from bson.son import SON, RE_TYPE from bson.timestamp import Timestamp @@ -147,46 +141,18 @@ _UNPACK_TIMESTAMP_FROM = struct.Struct("= obj_end: raise InvalidBSON("invalid object length") @@ -256,7 +222,7 @@ def _get_array(data, view, position, obj_end, opts, element_name): """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] end = position + size - 1 - if data[end] != _OBJEND: + if data[end] != 0: raise InvalidBSON("bad eoo") position += 4 @@ -321,8 +287,8 @@ def _get_binary(data, view, position, obj_end, opts, dummy1): uuid_representation = UuidRepresentation.PYTHON_LEGACY return binary_value.as_uuid(uuid_representation), end - # Python3 special case. Decode subtype 0 to 'bytes'. - if PY3 and subtype == 0: + # Decode subtype 0 to 'bytes'. + if subtype == 0: value = data[position:end] else: value = Binary(data[position:end], subtype) @@ -410,27 +376,27 @@ def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions _ELEMENT_GETTER = { - _maybe_ord(BSONNUM): _get_float, - _maybe_ord(BSONSTR): _get_string, - _maybe_ord(BSONOBJ): _get_object, - _maybe_ord(BSONARR): _get_array, - _maybe_ord(BSONBIN): _get_binary, - _maybe_ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # Deprecated undefined - _maybe_ord(BSONOID): _get_oid, - _maybe_ord(BSONBOO): _get_boolean, - _maybe_ord(BSONDAT): _get_date, - _maybe_ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), - _maybe_ord(BSONRGX): _get_regex, - _maybe_ord(BSONREF): _get_ref, # Deprecated DBPointer - _maybe_ord(BSONCOD): _get_code, - _maybe_ord(BSONSYM): _get_string, # Deprecated symbol - _maybe_ord(BSONCWS): _get_code_w_scope, - _maybe_ord(BSONINT): _get_int, - _maybe_ord(BSONTIM): _get_timestamp, - _maybe_ord(BSONLON): _get_int64, - _maybe_ord(BSONDEC): _get_decimal128, - _maybe_ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), - _maybe_ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w)} + ord(BSONNUM): _get_float, + ord(BSONSTR): _get_string, + ord(BSONOBJ): _get_object, + ord(BSONARR): _get_array, + ord(BSONBIN): _get_binary, + ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # Deprecated undefined + ord(BSONOID): _get_oid, + ord(BSONBOO): _get_boolean, + ord(BSONDAT): _get_date, + ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), + ord(BSONRGX): _get_regex, + ord(BSONREF): _get_ref, # Deprecated DBPointer + ord(BSONCOD): _get_code, + ord(BSONSYM): _get_string, # Deprecated symbol + ord(BSONCWS): _get_code_w_scope, + ord(BSONINT): _get_int, + ord(BSONTIM): _get_timestamp, + ord(BSONLON): _get_int64, + ord(BSONDEC): _get_decimal128, + ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), + ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w)} if _USE_C: @@ -488,7 +454,7 @@ def _bson_to_dict(data, opts): except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() - reraise(InvalidBSON, exc_value, exc_tb) + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) if _USE_C: _bson_to_dict = _cbson._bson_to_dict @@ -498,7 +464,7 @@ def _bson_to_dict(data, opts): _PACK_LENGTH_SUBTYPE = struct.Struct(">> class AttributeDict(dict): ... # A dict that supports attribute access. @@ -212,7 +213,7 @@ class CodecOptions(_options_base): :Parameters: - `document_class`: BSON documents returned in queries will be decoded to an instance of this class. Must be a subclass of - :class:`~collections.MutableMapping`. Defaults to :class:`dict`. + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone aware instances of :class:`~datetime.datetime`. Otherwise they will be naive. Defaults to ``False``. @@ -247,11 +248,11 @@ def __new__(cls, document_class=dict, uuid_representation=None, unicode_decode_error_handler="strict", tzinfo=None, type_registry=None): - if not (issubclass(document_class, abc.MutableMapping) or + if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): raise TypeError("document_class must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping") + "sublass of collections.abc.MutableMapping") if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") if uuid_representation is None: @@ -259,7 +260,7 @@ def __new__(cls, document_class=dict, elif uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError("uuid_representation must be a value " "from bson.binary.UuidRepresentation") - if not isinstance(unicode_decode_error_handler, (string_type, None)): + if not isinstance(unicode_decode_error_handler, (str, None)): raise ValueError("unicode_decode_error_handler must be a string " "or None") if tzinfo is not None: diff --git a/bson/dbref.py b/bson/dbref.py index 3ec5463492..9ef842a31a 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -16,7 +16,6 @@ from copy import deepcopy -from bson.py3compat import iteritems, string_type from bson.son import SON @@ -45,12 +44,10 @@ def __init__(self, collection, id, database=None, _extra={}, **kwargs): .. mongodoc:: dbrefs """ - if not isinstance(collection, string_type): - raise TypeError("collection must be an " - "instance of %s" % string_type.__name__) - if database is not None and not isinstance(database, string_type): - raise TypeError("database must be an " - "instance of %s" % string_type.__name__) + if not isinstance(collection, str): + raise TypeError("collection must be an instance of str") + if database is not None and not isinstance(database, str): + raise TypeError("database must be an instance of str") self.__collection = collection self.__id = id @@ -104,7 +101,7 @@ def as_doc(self): def __repr__(self): extra = "".join([", %s=%r" % (k, v) - for k, v in iteritems(self.__kwargs)]) + for k, v in self.__kwargs.items()]) if self.database is None: return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, diff --git a/bson/decimal128.py b/bson/decimal128.py index 0c0fc10c67..528e0f9a35 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -23,18 +23,6 @@ import struct import sys -from bson.py3compat import (PY3 as _PY3, - string_type as _string_type) - - -if _PY3: - _from_bytes = int.from_bytes # pylint: disable=no-member, invalid-name -else: - import binascii - def _from_bytes(value, dummy, _int=int, _hexlify=binascii.hexlify): - "An implementation of int.from_bytes for python 2.x." - return _int(_hexlify(value), 16) - _PACK_64 = struct.Struct("= 3.3, cdecimal - decimal.Context(clamp=1) # pylint: disable=unexpected-keyword-arg - _CTX_OPTIONS['clamp'] = 1 -except TypeError: - # Python < 3.3 - _CTX_OPTIONS['_clamp'] = 1 - _DEC128_CTX = decimal.Context(**_CTX_OPTIONS.copy()) @@ -237,7 +218,7 @@ class Decimal128(object): _type_marker = 19 def __init__(self, value): - if isinstance(value, (_string_type, decimal.Decimal)): + if isinstance(value, (str, decimal.Decimal)): self.__high, self.__low = _decimal_to_128(value) elif isinstance(value, (list, tuple)): if len(value) != 2: @@ -285,7 +266,7 @@ def to_decimal(self): # cdecimal only accepts a tuple for digits. digits = tuple( - int(digit) for digit in str(_from_bytes(arr, 'big'))) + int(digit) for digit in str(int.from_bytes(arr, 'big'))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent)) diff --git a/bson/json_util.py b/bson/json_util.py index 38c39a12c8..a702ea988c 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -126,8 +126,6 @@ from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.py3compat import (PY3, iteritems, integer_types, string_type, - text_type) from bson.regex import Regex from bson.timestamp import Timestamp from bson.tz_util import utc @@ -443,10 +441,10 @@ def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): """Recursive helper method that converts BSON types so they can be converted into json. """ - if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support + if hasattr(obj, 'items'): return SON(((k, _json_convert(v, json_options)) - for k, v in iteritems(obj))) - elif hasattr(obj, '__iter__') and not isinstance(obj, (text_type, bytes)): + for k, v in obj.items())) + elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes)): return list((_json_convert(v, json_options) for v in obj)) try: return default(obj, json_options) @@ -518,7 +516,7 @@ def _parse_legacy_uuid(doc, json_options): """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) - if not isinstance(doc["$uuid"], text_type): + if not isinstance(doc["$uuid"], str): raise TypeError('$uuid must be a string: %s' % (doc,)) if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) @@ -542,7 +540,7 @@ def _binary_or_uuid(data, subtype, json_options): uuid_representation = UuidRepresentation.PYTHON_LEGACY return binary_value.as_uuid(uuid_representation) - if PY3 and subtype == 0: + if subtype == 0: return data return Binary(data, subtype) @@ -561,9 +559,9 @@ def _parse_canonical_binary(doc, json_options): binary = doc["$binary"] b64 = binary["base64"] subtype = binary["subType"] - if not isinstance(b64, string_type): + if not isinstance(b64, str): raise TypeError('$binary base64 must be a string: %s' % (doc,)) - if not isinstance(subtype, string_type) or len(subtype) > 2: + if not isinstance(subtype, str) or len(subtype) > 2: raise TypeError('$binary subType must be a string at most 2 ' 'characters: %s' % (doc,)) if len(binary) != 2: @@ -580,7 +578,7 @@ def _parse_canonical_datetime(doc, json_options): if len(doc) != 1: raise TypeError('Bad $date, extra field(s): %s' % (doc,)) # mongoexport 2.6 and newer - if isinstance(dtm, string_type): + if isinstance(dtm, str): # Parse offset if dtm[-1] == 'Z': dt = dtm[:-1] @@ -645,7 +643,7 @@ def _parse_canonical_symbol(doc): symbol = doc['$symbol'] if len(doc) != 1: raise TypeError('Bad $symbol, extra field(s): %s' % (doc,)) - return text_type(symbol) + return str(symbol) def _parse_canonical_code(doc): @@ -704,7 +702,7 @@ def _parse_canonical_int32(doc): i_str = doc['$numberInt'] if len(doc) != 1: raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,)) - if not isinstance(i_str, string_type): + if not isinstance(i_str, str): raise TypeError('$numberInt must be string: %s' % (doc,)) return int(i_str) @@ -722,7 +720,7 @@ def _parse_canonical_double(doc): d_str = doc['$numberDouble'] if len(doc) != 1: raise TypeError('Bad $numberDouble, extra field(s): %s' % (doc,)) - if not isinstance(d_str, string_type): + if not isinstance(d_str, str): raise TypeError('$numberDouble must be string: %s' % (doc,)) return float(d_str) @@ -732,7 +730,7 @@ def _parse_canonical_decimal128(doc): d_str = doc['$numberDecimal'] if len(doc) != 1: raise TypeError('Bad $numberDecimal, extra field(s): %s' % (doc,)) - if not isinstance(d_str, string_type): + if not isinstance(d_str, str): raise TypeError('$numberDecimal must be string: %s' % (doc,)) return Decimal128(d_str) @@ -809,7 +807,7 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): flags += "u" if obj.flags & re.VERBOSE: flags += "x" - if isinstance(obj.pattern, text_type): + if isinstance(obj.pattern, str): pattern = obj.pattern else: pattern = obj.pattern.decode('utf-8') @@ -831,7 +829,7 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): ('$scope', _json_convert(obj.scope, json_options))]) if isinstance(obj, Binary): return _encode_binary(obj, obj.subtype, json_options) - if PY3 and isinstance(obj, bytes): + if isinstance(obj, bytes): return _encode_binary(obj, 0, json_options) if isinstance(obj, uuid.UUID): if json_options.strict_uuid: @@ -845,10 +843,10 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): if isinstance(obj, bool): return obj if (json_options.json_mode == JSONMode.CANONICAL and - isinstance(obj, integer_types)): + isinstance(obj, int)): if -2 ** 31 <= obj < 2 ** 31: - return {'$numberInt': text_type(obj)} - return {'$numberLong': text_type(obj)} + return {'$numberInt': str(obj)} + return {'$numberLong': str(obj)} if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): if math.isnan(obj): return {'$numberDouble': 'NaN'} @@ -859,5 +857,5 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): # repr() will return the shortest string guaranteed to produce the # original value, when float() is called on it. str produces a # shorter string in Python 2. - return {'$numberDouble': text_type(repr(obj))} + return {'$numberDouble': str(repr(obj))} raise TypeError("%r is not JSON serializable" % obj) diff --git a/bson/objectid.py b/bson/objectid.py index c6fa652f3b..ef601e8481 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -27,7 +27,6 @@ from random import SystemRandom from bson.errors import InvalidId -from bson.py3compat import PY3, bytes_from_hex, string_type, text_type from bson.tz_util import utc @@ -203,17 +202,17 @@ def __validate(self, oid): if isinstance(oid, ObjectId): self.__id = oid.binary # bytes or unicode in python 2, str in python 3 - elif isinstance(oid, string_type): + elif isinstance(oid, str): if len(oid) == 24: try: - self.__id = bytes_from_hex(oid) + self.__id = bytes.fromhex(oid) except (TypeError, ValueError): _raise_invalid_id(oid) else: _raise_invalid_id(oid) else: - raise TypeError("id must be an instance of (bytes, %s, ObjectId), " - "not %s" % (text_type.__name__, type(oid))) + raise TypeError("id must be an instance of (bytes, str, ObjectId), " + "not %s" % (type(oid),)) @property def binary(self): @@ -251,15 +250,13 @@ def __setstate__(self, value): # ObjectIds pickled in python 2.x used `str` for __id. # In python 3.x this has to be converted to `bytes` # by encoding latin-1. - if PY3 and isinstance(oid, text_type): + if isinstance(oid, str): self.__id = oid.encode('latin-1') else: self.__id = oid def __str__(self): - if PY3: - return binascii.hexlify(self.__id).decode() - return binascii.hexlify(self.__id) + return binascii.hexlify(self.__id).decode() def __repr__(self): return "ObjectId('%s')" % (str(self),) diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 6a7cf5045d..31b0d1b66c 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -15,14 +15,15 @@ """Tools for representing raw BSON documents. """ +from collections.abc import Mapping as _Mapping + from bson import _raw_to_dict, _get_object_size -from bson.py3compat import abc, iteritems from bson.codec_options import ( DEFAULT_CODEC_OPTIONS as DEFAULT, _RAW_BSON_DOCUMENT_MARKER) from bson.son import SON -class RawBSONDocument(abc.Mapping): +class RawBSONDocument(_Mapping): """Representation for a MongoDB document that provides access to the raw BSON bytes that compose it. @@ -88,7 +89,7 @@ def raw(self): def items(self): """Lazily decode and iterate elements in this document.""" - return iteritems(self.__inflated) + return self.__inflated.items() @property def __inflated(self): diff --git a/bson/regex.py b/bson/regex.py index f9d39ad83d..0624f9ab34 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -18,7 +18,6 @@ import re from bson.son import RE_TYPE -from bson.py3compat import string_type, text_type def str_flags_to_int(str_flags): @@ -86,11 +85,11 @@ def __init__(self, pattern, flags=0): - `flags`: (optional) an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ - if not isinstance(pattern, (text_type, bytes)): + if not isinstance(pattern, (str, bytes)): raise TypeError("pattern must be a string, not %s" % type(pattern)) self.pattern = pattern - if isinstance(flags, string_type): + if isinstance(flags, str): self.flags = str_flags_to_int(flags) elif isinstance(flags, int): self.flags = flags diff --git a/bson/son.py b/bson/son.py index 701cb23186..bef655c8f7 100644 --- a/bson/son.py +++ b/bson/son.py @@ -21,8 +21,7 @@ import copy import re -from bson.py3compat import abc, iteritems - +from collections.abc import Mapping as _Mapping # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type @@ -178,10 +177,10 @@ def to_dict(self): def transform_value(value): if isinstance(value, list): return [transform_value(v) for v in value] - elif isinstance(value, abc.Mapping): + elif isinstance(value, _Mapping): return dict([ (k, transform_value(v)) - for k, v in iteritems(value)]) + for k, v in value.items()]) else: return value diff --git a/bson/timestamp.py b/bson/timestamp.py index 7ea755117a..5e497f4c8c 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -18,7 +18,6 @@ import calendar import datetime -from bson.py3compat import integer_types from bson.tz_util import utc UPPERBOUND = 4294967296 @@ -52,9 +51,9 @@ def __init__(self, time, inc): if time.utcoffset() is not None: time = time - time.utcoffset() time = int(calendar.timegm(time.timetuple())) - if not isinstance(time, integer_types): + if not isinstance(time, int): raise TypeError("time must be an instance of int") - if not isinstance(inc, integer_types): + if not isinstance(inc, int): raise TypeError("inc must be an instance of int") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index d0a394e1c1..652dba9278 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -13,8 +13,11 @@ # limitations under the License. import datetime +import sys import uuid +sys.path[0:0] = [""] + from bson import decode, encode from bson.binary import Binary, JAVA_LEGACY from bson.codec_options import CodecOptions From 387bfa0bfae07bbdb138d901e894a0241a762877 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 11:29:23 -0800 Subject: [PATCH 0273/2111] PYTHON-2310 Remove MongoClient.fsync, unlock, and is_locked (#546) --- doc/api/pymongo/mongo_client.rst | 3 - doc/changelog.rst | 3 + doc/migrate-to-pymongo4.rst | 46 ++++++++++++ pymongo/mongo_client.py | 116 ------------------------------- test/test_client.py | 38 ---------- test/test_monitoring.py | 26 ------- test/test_session.py | 16 ----- 7 files changed, 49 insertions(+), 199 deletions(-) diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index ac774679c1..e7dcb727bf 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -46,6 +46,3 @@ .. automethod:: close_cursor .. automethod:: kill_cursors .. automethod:: set_cursor_manager - .. autoattribute:: is_locked - .. automethod:: fsync - .. automethod:: unlock diff --git a/doc/changelog.rst b/doc/changelog.rst index 38a14b1fae..904db340f0 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -18,6 +18,9 @@ Breaking Changes in 4.0 - Removed :meth:`~pymongo.database.Database.eval`, :data:`~pymongo.database.Database.system_js` and :class:`~pymongo.database.SystemJS`. +- Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, + :meth:`pymongo.mongo_client.MongoClient.unlock`, and + :attr:`pymongo.mongo_client.MongoClient.is_locked`. - Removed :mod:`~pymongo.thread_util`. Notable improvements diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 698a12978b..b0f747679e 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -50,6 +50,52 @@ Warnings can also be changed to errors:: .. note:: Not all deprecated features raise :exc:`DeprecationWarning` when used. See `Removed features with no migration path`_. +MongoClient +----------- + +MongoClient.fsync is removed +............................ + +Removed :meth:`pymongo.mongo_client.MongoClient.fsync`. Run the +`fsync command`_ directly with :meth:`~pymongo.database.Database.command` +instead. For example:: + + client.admin.command('fsync', lock=True) + +.. _fsync command: https://docs.mongodb.com/manual/reference/command/fsync/ + +MongoClient.unlock is removed +............................. + +Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Users of MongoDB +version 3.2 or newer can run the `fsyncUnlock command`_ directly with +:meth:`~pymongo.database.Database.command`:: + + client.admin.command('fsyncUnlock') + +Users of MongoDB version 2.6 and 3.0 can query the "unlock" virtual +collection:: + + client.admin["$cmd.sys.unlock"].find_one() + +.. _fsyncUnlock command: https://docs.mongodb.com/manual/reference/command/fsyncUnlock/ + +MongoClient.is_locked is removed +................................ + +Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Users of MongoDB +version 3.2 or newer can run the `currentOp command`_ directly with +:meth:`~pymongo.database.Database.command`:: + + is_locked = client.admin.command('currentOp').get('fsyncLock') + +Users of MongoDB version 2.6 and 3.0 can query the "inprog" virtual +collection:: + + is_locked = client.admin["$cmd.sys.inprog"].find_one().get('fsyncLock') + +.. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ + Removed features with no migration path --------------------------------------- diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index bf439d1ecd..83025861b7 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -2104,122 +2104,6 @@ def _database_default_options(self, name): read_preference=ReadPreference.PRIMARY, write_concern=DEFAULT_WRITE_CONCERN) - @property - def is_locked(self): - """**DEPRECATED**: Is this server locked? While locked, all write - operations are blocked, although read operations may still be allowed. - Use :meth:`unlock` to unlock. - - Deprecated. Users of MongoDB version 3.2 or newer can run the - `currentOp command`_ directly with - :meth:`~pymongo.database.Database.command`:: - - is_locked = client.admin.command('currentOp').get('fsyncLock') - - Users of MongoDB version 2.6 and 3.0 can query the "inprog" virtual - collection:: - - is_locked = client.admin["$cmd.sys.inprog"].find_one().get('fsyncLock') - - .. versionchanged:: 3.11 - Deprecated. - - .. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ - """ - warnings.warn("is_locked is deprecated. See the documentation for " - "more information.", DeprecationWarning, stacklevel=2) - ops = self._database_default_options('admin')._current_op() - return bool(ops.get('fsyncLock', 0)) - - def fsync(self, **kwargs): - """**DEPRECATED**: Flush all pending writes to datafiles. - - Optional parameters can be passed as keyword arguments: - - `lock`: If True lock the server to disallow writes. - - `async`: If True don't block while synchronizing. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. note:: Starting with Python 3.7 `async` is a reserved keyword. - The async option to the fsync command can be passed using a - dictionary instead:: - - options = {'async': True} - client.fsync(**options) - - Deprecated. Run the `fsync command`_ directly with - :meth:`~pymongo.database.Database.command` instead. For example:: - - client.admin.command('fsync', lock=True) - - .. versionchanged:: 3.11 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. warning:: `async` and `lock` can not be used together. - - .. warning:: MongoDB does not support the `async` option - on Windows and will raise an exception on that - platform. - - .. _fsync command: https://docs.mongodb.com/manual/reference/command/fsync/ - """ - warnings.warn("fsync is deprecated. Use " - "client.admin.command('fsync') instead.", - DeprecationWarning, stacklevel=2) - self.admin.command("fsync", - read_preference=ReadPreference.PRIMARY, **kwargs) - - def unlock(self, session=None): - """**DEPRECATED**: Unlock a previously locked server. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - Deprecated. Users of MongoDB version 3.2 or newer can run the - `fsyncUnlock command`_ directly with - :meth:`~pymongo.database.Database.command`:: - - client.admin.command('fsyncUnlock') - - Users of MongoDB version 2.6 and 3.0 can query the "unlock" virtual - collection:: - - client.admin["$cmd.sys.unlock"].find_one() - - .. versionchanged:: 3.11 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. _fsyncUnlock command: https://docs.mongodb.com/manual/reference/command/fsyncUnlock/ - """ - warnings.warn("unlock is deprecated. Use " - "client.admin.command('fsyncUnlock') instead. For " - "MongoDB 2.6 and 3.0, see the documentation for " - "more information.", - DeprecationWarning, stacklevel=2) - cmd = SON([("fsyncUnlock", 1)]) - with self._socket_for_writes(session) as sock_info: - if sock_info.max_wire_version >= 4: - try: - with self._tmp_session(session) as s: - sock_info.command( - "admin", cmd, session=s, client=self) - except OperationFailure as exc: - # Ignore "DB not locked" to replicate old behavior - if exc.code != 125: - raise - else: - message._first_batch(sock_info, "admin", "$cmd.sys.unlock", - {}, -1, True, self.codec_options, - ReadPreference.PRIMARY, cmd, - self._event_listeners) - def __enter__(self): return self diff --git a/test/test_client.py b/test/test_client.py index 337602883a..9ce658a64d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1148,44 +1148,6 @@ def test_ipv6(self): self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) - @ignore_deprecations - @client_context.require_no_mongos - def test_fsync_lock_unlock(self): - if server_is_master_with_slave(client_context.client): - raise SkipTest('SERVER-7714') - - self.assertFalse(self.client.is_locked) - # async flushing not supported on windows... - if sys.platform not in ('cygwin', 'win32'): - # Work around async becoming a reserved keyword in Python 3.7 - opts = {'async': True} - self.client.fsync(**opts) - self.assertFalse(self.client.is_locked) - self.client.fsync(lock=True) - self.assertTrue(self.client.is_locked) - locked = True - self.client.unlock() - for _ in range(5): - locked = self.client.is_locked - if not locked: - break - time.sleep(1) - self.assertFalse(locked) - - def test_deprecated_methods(self): - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with self.assertRaisesRegex(DeprecationWarning, - 'is_locked is deprecated'): - _ = self.client.is_locked - if not client_context.is_mongos: - with self.assertRaisesRegex(DeprecationWarning, - 'fsync is deprecated'): - self.client.fsync(lock=True) - with self.assertRaisesRegex(DeprecationWarning, - 'unlock is deprecated'): - self.client.unlock() - def test_contextlib(self): client = rs_or_single_client() client.pymongo_test.drop_collection("test") diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 6b33211159..ba6d2a44f1 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1336,32 +1336,6 @@ def test_first_batch_helper(self): self.assertTrue('inprog' in succeeded.reply) self.assertTrue('ok' in succeeded.reply) - if not client_context.is_mongos: - with ignore_deprecations(): - self.client.fsync(lock=True) - self.listener.results.clear() - self.client.unlock() - # Wait for async unlock... - wait_until( - lambda: not self.client.is_locked, "unlock the database") - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = {'fsyncUnlock': 1} - self.assertEqualCommand(expected, started.command) - self.assertEqual('admin', started.database_name) - self.assertEqual('fsyncUnlock', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('info' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) - def test_sensitive_commands(self): listeners = self.client._event_listeners diff --git a/test/test_session.py b/test/test_session.py index 50dfd8a060..b36654b16d 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -230,28 +230,12 @@ def test_end_sessions(self): @ignore_deprecations # fsync and unlock def test_client(self): client = self.client - - # Make sure if the test fails we unlock the server. - def unlock(): - try: - client.unlock() - except OperationFailure: - pass - - self.addCleanup(unlock) - ops = [ (client.server_info, [], {}), (client.database_names, [], {}), (client.drop_database, ['pymongo_test'], {}), ] - if not client_context.is_mongos: - ops.extend([ - (client.fsync, [], {'lock': True}), - (client.unlock, [], {}), - ]) - self._test_ops(client, *ops) def test_database(self): From 3c899aeb89959acc98366c7d704cbf6cc03bbd12 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 14:01:45 -0800 Subject: [PATCH 0274/2111] PYTHON-1592 Remove Collection.parallel_scan (#547) --- doc/api/pymongo/collection.rst | 1 - doc/changelog.rst | 9 ++-- doc/migrate-to-pymongo4.rst | 9 ++++ pymongo/collection.py | 87 ---------------------------------- test/test_collection.py | 46 ------------------ test/test_session.py | 42 ---------------- 6 files changed, 14 insertions(+), 180 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index d68653070a..dddf8dc34e 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -68,7 +68,6 @@ .. automethod:: options .. automethod:: map_reduce .. automethod:: inline_map_reduce - .. automethod:: parallel_scan .. automethod:: initialize_unordered_bulk_op .. automethod:: initialize_ordered_bulk_op .. automethod:: group diff --git a/doc/changelog.rst b/doc/changelog.rst index 904db340f0..943ec9625f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -15,13 +15,14 @@ Breaking Changes in 4.0 ....................... - Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. -- Removed :meth:`~pymongo.database.Database.eval`, - :data:`~pymongo.database.Database.system_js` and - :class:`~pymongo.database.SystemJS`. +- Removed :meth:`pymongo.database.Database.eval`, + :data:`pymongo.database.Database.system_js` and + :class:`pymongo.database.SystemJS`. - Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, :meth:`pymongo.mongo_client.MongoClient.unlock`, and :attr:`pymongo.mongo_client.MongoClient.is_locked`. -- Removed :mod:`~pymongo.thread_util`. +- Removed :meth:`pymongo.collection.Collection.parallel_scan`. +- Removed :mod:`pymongo.thread_util`. Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index b0f747679e..8871f80e0a 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -116,3 +116,12 @@ can be changed to this:: >>> from bson.code import Code >>> result = database.command('eval', Code('function (x) {return x;}'), args=[3]).get('retval') + + +Collection.parallel_scan is removed +................................... + +Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 +removed the `parallelCollectionScan command`_. There is no replacement. + +.. _parallelCollectionScan command: https://docs.mongodb.com/manual/reference/command/parallelCollectionScan/ diff --git a/pymongo/collection.py b/pymongo/collection.py index 4adce0a384..025c1723e9 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1559,93 +1559,6 @@ def find_raw_batches(self, *args, **kwargs): return RawBatchCursor(self, *args, **kwargs) - def parallel_scan(self, num_cursors, session=None, **kwargs): - """**DEPRECATED**: Scan this entire collection in parallel. - - Returns a list of up to ``num_cursors`` cursors that can be iterated - concurrently. As long as the collection is not modified during - scanning, each document appears once in one of the cursors result - sets. - - For example, to process each document in a collection using some - thread-safe ``process_document()`` function: - - >>> def process_cursor(cursor): - ... for document in cursor: - ... # Some thread-safe processing function: - ... process_document(document) - >>> - >>> # Get up to 4 cursors. - ... - >>> cursors = collection.parallel_scan(4) - >>> threads = [ - ... threading.Thread(target=process_cursor, args=(cursor,)) - ... for cursor in cursors] - >>> - >>> for thread in threads: - ... thread.start() - >>> - >>> for thread in threads: - ... thread.join() - >>> - >>> # All documents have now been processed. - - The :meth:`parallel_scan` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :Parameters: - - `num_cursors`: the number of cursors to return - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs`: additional options for the parallelCollectionScan - command can be passed as keyword arguments. - - .. note:: Requires server version **>= 2.5.5**. - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Added back support for arbitrary keyword arguments. MongoDB 3.4 - adds support for maxTimeMS as an option to the - parallelCollectionScan command. - - .. versionchanged:: 3.0 - Removed support for arbitrary keyword arguments, since - the parallelCollectionScan command has no optional arguments. - """ - warnings.warn("parallel_scan is deprecated. MongoDB 4.2 will remove " - "the parallelCollectionScan command.", - DeprecationWarning, stacklevel=2) - cmd = SON([('parallelCollectionScan', self.__name), - ('numCursors', num_cursors)]) - cmd.update(kwargs) - - with self._socket_for_reads(session) as (sock_info, slave_ok): - # We call sock_info.command here directly, instead of - # calling self._command to avoid using an implicit session. - result = sock_info.command( - self.__database.name, - cmd, - slave_ok, - self._read_preference_for(session), - self.codec_options, - read_concern=self.read_concern, - parse_write_concern_error=True, - session=session, - client=self.__database.client) - - cursors = [] - for cursor in result['cursors']: - cursors.append(CommandCursor( - self, cursor['cursor'], sock_info.address, - session=session, explicit_session=session is not None)) - - return cursors - def _count(self, cmd, collation=None, session=None): """Internal count helper.""" # XXX: "ns missing" checks can be removed when we drop support for diff --git a/test/test_collection.py b/test/test_collection.py index 96a3e454ab..c2b06ca37a 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1737,52 +1737,6 @@ def test_aggregation_cursor_alive(self): self.assertTrue(cursor.alive) - @client_context.require_no_mongos - @client_context.require_version_max(4, 1, 0) - @ignore_deprecations - def test_parallel_scan(self): - db = self.db - db.drop_collection("test") - if client_context.has_secondaries: - # Test that getMore messages are sent to the right server. - db = self.client.get_database( - db.name, - read_preference=ReadPreference.SECONDARY, - write_concern=WriteConcern(w=self.w)) - - coll = db.test - coll.insert_many([{'_id': i} for i in range(8000)]) - docs = [] - threads = [threading.Thread(target=docs.extend, args=(cursor,)) - for cursor in coll.parallel_scan(3)] - for t in threads: - t.start() - for t in threads: - t.join() - - self.assertEqual( - set(range(8000)), - set(doc['_id'] for doc in docs)) - - @client_context.require_no_mongos - @client_context.require_version_min(3, 3, 10) - @client_context.require_version_max(4, 1, 0) - @client_context.require_test_commands - @ignore_deprecations - def test_parallel_scan_max_time_ms(self): - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") - try: - self.assertRaises(ExecutionTimeout, - self.db.test.parallel_scan, - 3, - maxTimeMS=1) - finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") - def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") diff --git a/test/test_session.py b/test/test_session.py index b36654b16d..61f8ff4204 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -323,40 +323,6 @@ def test_collection(self): self._test_ops(client, *ops) - @client_context.require_no_mongos - @client_context.require_version_max(4, 1, 0) - @ignore_deprecations - def test_parallel_collection_scan(self): - listener = self.listener - client = self.client - coll = client.pymongo_test.collection - coll.insert_many([{'_id': i} for i in range(1000)]) - - listener.results.clear() - - def scan(session=None): - cursors = coll.parallel_scan(4, session=session) - for c in cursors: - c.batch_size(2) - list(c) - - listener.results.clear() - with client.start_session() as session: - scan(session) - cursor_lsids = {} - for event in listener.results['started']: - self.assertIn( - 'lsid', event.command, - "parallel_scan sent no lsid with %s" % (event.command_name, )) - - if event.command_name == 'getMore': - cursor_id = event.command['getMore'] - if cursor_id in cursor_lsids: - self.assertEqual(cursor_lsids[cursor_id], - event.command['lsid']) - else: - cursor_lsids[cursor_id] = event.command['lsid'] - def test_cursor_clone(self): coll = self.client.pymongo_test.collection # Ensure some batches. @@ -874,14 +840,6 @@ def test_reads(self): lambda coll, session: coll.inline_map_reduce( 'function() {}', 'function() {}', session=session), exception=map_reduce_exc) - if (not client_context.is_mongos and - not client_context.version.at_least(4, 1, 0)): - def scan(coll, session): - cursors = coll.parallel_scan(1, session=session) - for cur in cursors: - list(cur) - self._test_reads( - lambda coll, session: scan(coll, session=session)) self.assertRaises( ConfigurationError, From 773767900c8a13164ceae42f5157e8dbb7b8e516 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 14:32:35 -0800 Subject: [PATCH 0275/2111] PYTHON-2502 Remove Python 2.7 from release scripts (#548) PYTHON-1300 Stop shipping .egg files. PYTHON-2507 Future proof pip version upgrade. --- .evergreen/build-mac.sh | 10 ++-------- .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 1 - .evergreen/build-windows.sh | 6 +----- .evergreen/config.yml | 2 +- .evergreen/utils.sh | 22 ++++++++++++++-------- 6 files changed, 19 insertions(+), 24 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 74d50b0817..bbed5c2c59 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,14 +8,8 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 2.7 3.4 3.5 3.6 3.7 3.8 3.9; do - if [[ $VERSION == "2.7" ]]; then - PYTHON=/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python - rm -rf build - $PYTHON setup.py bdist_egg - else - PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 - fi +for VERSION in 3.4 3.5 3.6 3.7 3.8 3.9; do + PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build # Install wheel if not already there. diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 38deddd194..0c57b43700 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp27|cp34|cp35|cp36|cp37|cp38|cp39) ]]; then + if [[ ! $PYTHON =~ (cp34|cp35|cp36|cp37|cp38|cp39) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index ba727654ec..93dcca43f0 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -22,7 +22,6 @@ ls dist # Check for any unexpected files. unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp27*' -or \ -iname '*cp34*' -or \ -iname '*cp35*' -or \ -iname '*cp36*' -or \ diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 89235dd79a..577fddb8f8 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,14 +8,10 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 27 34 35 36 37 38 39; do +for VERSION in 34 35 36 37 38 39; do _pythons=(C:/Python/Python${VERSION}/python.exe \ C:/Python/32/Python${VERSION}/python.exe) for PYTHON in "${_pythons[@]}"; do - if [[ $VERSION == "2.7" ]]; then - rm -rf build - $PYTHON setup.py bdist_egg - fi rm -rf build $PYTHON setup.py bdist_wheel diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8f301b8277..d138ec6283 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -856,7 +856,6 @@ tasks: - name: "release" tags: ["release"] exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). - git_tag_only: true commands: - command: shell.exec type: test @@ -2601,6 +2600,7 @@ buildvariants: matrix_spec: platform: [ubuntu-20.04, windows-64-vsMulti-small, macos-1014] display_name: "Release ${platform}" + batchtime: 20160 # 14 days tasks: - name: "release" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index cbc07d52fd..4ffa50ac13 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -24,13 +24,23 @@ createvirtualenv () { fi # Upgrade to the latest versions of pip setuptools wheel so that # pip can always download the latest cryptography+cffi wheels. - python -m pip install --upgrade pip setuptools wheel + PYTHON_VERSION=$(python -c 'import sys;print("%s.%s" % sys.version_info[:2])') + if [[ $PYTHON_VERSION == "3.4" ]]; then + # pip 19.2 dropped support for Python 3.4. + python -m pip install --upgrade 'pip<19.2' + elif [[ $PYTHON_VERSION == "3.5" ]]; then + # pip 21 will drop support for 3.5. + python -m pip install --upgrade 'pip<21' + else + python -m pip install --upgrade pip + fi + python -m pip install --upgrade setuptools wheel } # Usage: -# testinstall /path/to/python /path/to/.whl/or/.egg ["no-virtualenv"] +# testinstall /path/to/python /path/to/.whl ["no-virtualenv"] # * param1: Python binary to test -# * param2: Path to the wheel or egg file to install +# * param2: Path to the wheel to install # * param3 (optional): If set to a non-empty string, don't create a virtualenv. Used in manylinux containers. testinstall () { PYTHON=$1 @@ -42,11 +52,7 @@ testinstall () { PYTHON=python fi - if [[ $RELEASE == *.egg ]]; then - $PYTHON -m easy_install $RELEASE - else - $PYTHON -m pip install --upgrade $RELEASE - fi + $PYTHON -m pip install --upgrade $RELEASE cd tools $PYTHON fail_if_no_c.py $PYTHON -m pip uninstall -y pymongo From 6e8c3708b85613fdafa5cac640e17943aa1160b1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 16:53:42 -0800 Subject: [PATCH 0276/2111] PYTHON-1587 Remove MongoClient.database_names and Database.collection_names (#551) --- doc/api/pymongo/mongo_client.rst | 1 - doc/api/pymongo/mongo_replica_set_client.rst | 1 - doc/changelog.rst | 2 ++ doc/migrate-to-pymongo4.rst | 31 ++++++++++++++++++++ pymongo/database.py | 26 ---------------- pymongo/mongo_client.py | 18 ------------ test/test_client.py | 10 ++----- test/test_database.py | 21 ++++--------- test/test_read_preferences.py | 1 - test/test_session.py | 8 ++--- 10 files changed, 44 insertions(+), 75 deletions(-) diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index e7dcb727bf..a35718835d 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -37,7 +37,6 @@ .. automethod:: start_session .. automethod:: list_databases .. automethod:: list_database_names - .. automethod:: database_names .. automethod:: drop_database .. automethod:: get_default_database .. automethod:: get_database diff --git a/doc/api/pymongo/mongo_replica_set_client.rst b/doc/api/pymongo/mongo_replica_set_client.rst index b92e53186a..a614f2555a 100644 --- a/doc/api/pymongo/mongo_replica_set_client.rst +++ b/doc/api/pymongo/mongo_replica_set_client.rst @@ -24,7 +24,6 @@ .. autoattribute:: codec_options .. autoattribute:: read_preference .. autoattribute:: write_concern - .. automethod:: database_names .. automethod:: drop_database .. automethod:: get_database .. automethod:: close_cursor diff --git a/doc/changelog.rst b/doc/changelog.rst index 943ec9625f..060ee4376b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -21,6 +21,8 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, :meth:`pymongo.mongo_client.MongoClient.unlock`, and :attr:`pymongo.mongo_client.MongoClient.is_locked`. +- Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. +- Removed :meth:`pymongo.database.Database.collection_names`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :mod:`pymongo.thread_util`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 8871f80e0a..2c2927c41b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -96,6 +96,37 @@ collection:: .. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ +MongoClient.database_names is removed +..................................... + +Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. Use +:meth:`~pymongo.mongo_client.MongoClient.list_database_names` instead. Code like +this:: + + names = client.database_names() + +can be changed to this:: + + names = client.list_database_names() + +Database +-------- + +Database.collection_names is removed +.................................... + +Removed :meth:`pymongo.database.Database.collection_names`. Use +:meth:`~pymongo.database.Database.list_collection_names` instead. Code like +this:: + + names = client.collection_names() + non_system_names = client.collection_names(include_system_collections=False) + +can be changed to this:: + + names = client.list_collection_names() + non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) + Removed features with no migration path --------------------------------------- diff --git a/pymongo/database.py b/pymongo/database.py index 4c7001d161..9ef03a58a3 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -37,7 +37,6 @@ _INDEX_REGEX = {"name": {"$regex": r"^(?!.*\$)"}} -_SYSTEM_FILTER = {"filter": {"name": {"$regex": r"^(?!system\.)"}}} def _check_name(name): @@ -863,31 +862,6 @@ def list_collection_names(self, session=None, filter=None, **kwargs): return [result["name"] for result in self.list_collections(session=session, **kwargs)] - def collection_names(self, include_system_collections=True, - session=None): - """**DEPRECATED**: Get a list of all the collection names in this - database. - - :Parameters: - - `include_system_collections` (optional): if ``False`` list - will not include system collections (e.g ``system.indexes``) - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.7 - Deprecated. Use :meth:`list_collection_names` instead. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - warnings.warn("collection_names is deprecated. Use " - "list_collection_names instead.", - DeprecationWarning, stacklevel=2) - kws = {} if include_system_collections else _SYSTEM_FILTER - return [result["name"] - for result in self.list_collections(session=session, - nameOnly=True, **kws)] - def drop_collection(self, name_or_collection, session=None): """Drop a collection. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 83025861b7..7a45c0f85f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1931,24 +1931,6 @@ def list_database_names(self, session=None): return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] - def database_names(self, session=None): - """**DEPRECATED**: Get a list of the names of all databases on the - connected server. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.7 - Deprecated. Use :meth:`list_database_names` instead. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - warnings.warn("database_names is deprecated. Use list_database_names " - "instead.", DeprecationWarning, stacklevel=2) - return self.list_database_names(session) - def drop_database(self, name_or_database, session=None): """Drop a database. diff --git a/test/test_client.py b/test/test_client.py index 9ce658a64d..df8122bbae 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -712,23 +712,17 @@ def test_list_databases(self): for doc in cursor: self.assertEqual(["name"], list(doc)) - def _test_list_names(self, meth): + def test_list_database_names(self): self.client.pymongo_test.test.insert_one({"dummy": u"object"}) self.client.pymongo_test_mike.test.insert_one({"dummy": u"object"}) cmd_docs = self.client.admin.command("listDatabases")["databases"] cmd_names = [doc["name"] for doc in cmd_docs] - db_names = meth() + db_names = self.client.list_database_names() self.assertTrue("pymongo_test" in db_names) self.assertTrue("pymongo_test_mike" in db_names) self.assertEqual(db_names, cmd_names) - def test_list_database_names(self): - self._test_list_names(self.client.list_database_names) - - def test_database_names(self): - self._test_list_names(self.client.database_names) - def test_drop_database(self): self.assertRaises(TypeError, self.client.drop_database, 5) self.assertRaises(TypeError, self.client.drop_database, None) diff --git a/test/test_database.py b/test/test_database.py index b81353c462..262fcf7df0 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -163,19 +163,20 @@ def test_create_collection(self): self.assertTrue(u"test.foo" in db.list_collection_names()) self.assertRaises(CollectionInvalid, db.create_collection, "test.foo") - def _test_collection_names(self, meth, **no_system_kwargs): + def test_list_collection_names(self): db = Database(self.client, "pymongo_test") db.test.insert_one({"dummy": u"object"}) db.test.mike.insert_one({"dummy": u"object"}) - colls = getattr(db, meth)() + colls = db.list_collection_names() self.assertTrue("test" in colls) self.assertTrue("test.mike" in colls) for coll in colls: self.assertTrue("$" not in coll) db.systemcoll.test.insert_one({}) - no_system_collections = getattr(db, meth)(**no_system_kwargs) + no_system_collections = db.list_collection_names( + filter={"name": {"$regex": r"^(?!system\.)"}}) for coll in no_system_collections: self.assertTrue(not coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) @@ -186,19 +187,10 @@ def _test_collection_names(self, meth, **no_system_kwargs): db["coll" + str(i)].insert_one({}) # No Error try: - getattr(db, meth)() + db.list_collection_names() finally: self.client.drop_database("many_collections") - def test_collection_names(self): - self._test_collection_names( - 'collection_names', include_system_collections=False) - - def test_list_collection_names(self): - self._test_collection_names( - 'list_collection_names', filter={ - "name": {"$regex": r"^(?!system\.)"}}) - def test_list_collection_names_filter(self): listener = OvertCommandListener() results = listener.results @@ -306,8 +298,7 @@ def test_list_collections(self): self.client.drop_database("pymongo_test") - def test_collection_names_single_socket(self): - # Test that Database.collection_names only requires one socket. + def test_list_collection_names_single_socket(self): client = rs_or_single_client(maxPoolSize=1) client.drop_database('test_collection_names_single_socket') db = client.test_collection_names_single_socket diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 821c277e80..a85cb03d2d 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -160,7 +160,6 @@ def test_reads_from_secondary(self): self.assertEqual(10, len(list(coll.find()))) # Test some database helpers. - self.assertIsNotNone(db.collection_names()) self.assertIsNotNone(db.list_collection_names()) self.assertIsNotNone(db.validate_collection("test")) self.assertIsNotNone(db.command("ping")) diff --git a/test/test_session.py b/test/test_session.py index 61f8ff4204..2e2038501b 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -227,12 +227,11 @@ def test_end_sessions(self): client.close() self.assertEqual(len(listener.results['started']), 0) - @ignore_deprecations # fsync and unlock def test_client(self): client = self.client ops = [ (client.server_info, [], {}), - (client.database_names, [], {}), + (client.list_database_names, [], {}), (client.drop_database, ['pymongo_test'], {}), ] @@ -244,7 +243,6 @@ def test_database(self): ops = [ (db.command, ['ping'], {}), (db.create_collection, ['collection'], {}), - (db.collection_names, [], {}), (db.list_collection_names, [], {}), (db.validate_collection, ['collection'], {}), (db.drop_collection, ['collection'], {}), @@ -1129,7 +1127,7 @@ def test_explicit_session_logout(self): db.collection.bulk_write([InsertOne({})], session=s) with self.assertRaisesRegex(InvalidOperation, err): - db.collection_names(session=s) + db.list_collection_names(session=s) with self.assertRaisesRegex(InvalidOperation, err): db.collection.find_one(session=s) @@ -1147,7 +1145,7 @@ def test_implicit_session_logout(self): for name, f in [ ('bulk_write', lambda: db.collection.bulk_write([InsertOne({})])), - ('collection_names', db.collection_names), + ('list_collection_names', db.list_collection_names), ('find_one', db.collection.find_one), ('aggregate', lambda: list(db.collection.aggregate([]))) ]: From 56925fd97f0d1e5514d3ca738289c5179f522dde Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 17:11:15 -0800 Subject: [PATCH 0277/2111] PYTHON-1321 Remove MongoReplicaSetClient (#552) --- doc/api/pymongo/index.rst | 5 - doc/api/pymongo/mongo_replica_set_client.rst | 32 -- doc/changelog.rst | 1 + doc/migrate-to-pymongo4.rst | 9 + pymongo/__init__.py | 1 - pymongo/common.py | 2 +- pymongo/mongo_client.py | 6 +- pymongo/mongo_replica_set_client.py | 48 --- pymongo/read_preferences.py | 4 +- test/test_gridfs.py | 18 +- test/test_gridfs_bucket.py | 18 +- test/test_read_preferences.py | 16 +- test/test_replica_set_client.py | 376 ------------------- 13 files changed, 37 insertions(+), 499 deletions(-) delete mode 100644 doc/api/pymongo/mongo_replica_set_client.rst delete mode 100644 pymongo/mongo_replica_set_client.py delete mode 100644 test/test_replica_set_client.py diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index fb48c94ea6..51bcc708aa 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -9,10 +9,6 @@ Alias for :class:`pymongo.mongo_client.MongoClient`. - .. data:: MongoReplicaSetClient - - Alias for :class:`pymongo.mongo_replica_set_client.MongoReplicaSetClient`. - .. data:: ReadPreference Alias for :class:`pymongo.read_preferences.ReadPreference`. @@ -46,7 +42,6 @@ Sub-modules: errors message mongo_client - mongo_replica_set_client monitoring operations pool diff --git a/doc/api/pymongo/mongo_replica_set_client.rst b/doc/api/pymongo/mongo_replica_set_client.rst deleted file mode 100644 index a614f2555a..0000000000 --- a/doc/api/pymongo/mongo_replica_set_client.rst +++ /dev/null @@ -1,32 +0,0 @@ -:mod:`mongo_replica_set_client` -- Tools for connecting to a MongoDB replica set -================================================================================ - -.. automodule:: pymongo.mongo_replica_set_client - :synopsis: Tools for connecting to a MongoDB replica set - - .. autoclass:: pymongo.mongo_replica_set_client.MongoReplicaSetClient(hosts_or_uri, document_class=dict, tz_aware=False, connect=True, **kwargs) - - .. automethod:: close - - .. describe:: c[db_name] || c.db_name - - Get the `db_name` :class:`~pymongo.database.Database` on :class:`MongoReplicaSetClient` `c`. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - - .. autoattribute:: primary - .. autoattribute:: secondaries - .. autoattribute:: arbiters - .. autoattribute:: max_pool_size - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: local_threshold_ms - .. autoattribute:: codec_options - .. autoattribute:: read_preference - .. autoattribute:: write_concern - .. automethod:: drop_database - .. automethod:: get_database - .. automethod:: close_cursor - .. automethod:: kill_cursors - .. automethod:: set_cursor_manager - .. automethod:: get_default_database diff --git a/doc/changelog.rst b/doc/changelog.rst index 060ee4376b..98fcd4bce6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -25,6 +25,7 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.database.Database.collection_names`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :mod:`pymongo.thread_util`. +- Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 2c2927c41b..8fcd52df06 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -50,6 +50,15 @@ Warnings can also be changed to errors:: .. note:: Not all deprecated features raise :exc:`DeprecationWarning` when used. See `Removed features with no migration path`_. +MongoReplicaSetClient +--------------------- + +Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +Since PyMongo 3.0, ``MongoReplicaSetClient`` has been identical to +:class:`pymongo.mongo_client.MongoClient`. Applications can simply replace +``MongoReplicaSetClient`` with :class:`pymongo.mongo_client.MongoClient` and +get the same behavior. + MongoClient ----------- diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 796ef15db1..1ce7f82c13 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -89,7 +89,6 @@ def get_version_string(): MAX_SUPPORTED_WIRE_VERSION) from pymongo.cursor import CursorType from pymongo.mongo_client import MongoClient -from pymongo.mongo_replica_set_client import MongoReplicaSetClient from pymongo.operations import (IndexModel, InsertOne, DeleteOne, diff --git a/pymongo/common.py b/pymongo/common.py index a4d91b7b1c..f45bc71285 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -364,7 +364,7 @@ def validate_read_preference(dummy, value): def validate_read_preference_mode(dummy, value): - """Validate read preference mode for a MongoReplicaSetClient. + """Validate read preference mode for a MongoClient. .. versionchanged:: 3.5 Returns the original ``value`` instead of the validated read preference diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7a45c0f85f..de78188e1d 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1001,8 +1001,7 @@ def primary(self): `replicaSet` option. .. versionadded:: 3.0 - MongoClient gained this property in version 3.0 when - MongoReplicaSetClient's functionality was merged in. + MongoClient gained this property in version 3.0. """ return self._topology.get_primary() @@ -1015,8 +1014,7 @@ def secondaries(self): client was created without the `replicaSet` option. .. versionadded:: 3.0 - MongoClient gained this property in version 3.0 when - MongoReplicaSetClient's functionality was merged in. + MongoClient gained this property in version 3.0. """ return self._topology.get_secondaries() diff --git a/pymongo/mongo_replica_set_client.py b/pymongo/mongo_replica_set_client.py deleted file mode 100644 index c9436c24e5..0000000000 --- a/pymongo/mongo_replica_set_client.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2011-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Deprecated. See :doc:`/examples/high_availability`.""" - -import warnings - -from pymongo import mongo_client - - -class MongoReplicaSetClient(mongo_client.MongoClient): - """Deprecated alias for :class:`~pymongo.mongo_client.MongoClient`. - - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` - will be removed in a future version of PyMongo. - - .. versionchanged:: 3.0 - :class:`~pymongo.mongo_client.MongoClient` is now the one and only - client class for a standalone server, mongos, or replica set. - It includes the functionality that had been split into - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`: it - can connect to a replica set, discover all its members, and monitor - the set for stepdowns, elections, and reconfigs. - - The ``refresh`` method is removed from - :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`, - as are the ``seeds`` and ``hosts`` properties. - """ - def __init__(self, *args, **kwargs): - warnings.warn('MongoReplicaSetClient is deprecated, use MongoClient' - ' to connect to a replica set', - DeprecationWarning, stacklevel=2) - - super(MongoReplicaSetClient, self).__init__(*args, **kwargs) - - def __repr__(self): - return "MongoReplicaSetClient(%s)" % (self._repr_helper(),) diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 53e8980174..5ba833e53f 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -38,7 +38,7 @@ def _validate_tag_sets(tag_sets): - """Validate tag sets for a MongoReplicaSetClient. + """Validate tag sets for a MongoClient. """ if tag_sets is None: return tag_sets @@ -144,7 +144,7 @@ def tag_sets(self): To specify a priority-order for tag sets, provide a list of tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag set, ``{}``, means "read from any member that matches the mode, - ignoring tags." MongoReplicaSetClient tries each set of tags in turn + ignoring tags." MongoClient tries each set of tags in turn until it finds a set of tags with at least one matching member. .. seealso:: `Data-Center Awareness diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 82422f968f..0d05e0b781 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -28,11 +28,10 @@ from bson.py3compat import StringIO, string_type from pymongo.mongo_client import MongoClient from pymongo.errors import (ConfigurationError, - ConnectionFailure, + NotMasterError, ServerSelectionTimeoutError) from pymongo.read_preferences import ReadPreference from gridfs.errors import CorruptGridFile, FileExists, NoFile -from test.test_replica_set_client import TestReplicaSetClientBase from test import (client_context, unittest, IntegrationTest) @@ -487,7 +486,7 @@ def test_md5(self): self.assertIsNone(gout.md5) -class TestGridfsReplicaSet(TestReplicaSetClientBase): +class TestGridfsReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) @@ -500,7 +499,7 @@ def tearDownClass(cls): def test_gridfs_replica_set(self): rsc = rs_client( - w=self.w, + w=client_context.w, read_preference=ReadPreference.SECONDARY) fs = gridfs.GridFS(rsc.gfsreplica, 'gfsreplicatest') @@ -513,10 +512,7 @@ def test_gridfs_replica_set(self): self.assertEqual(b'foo', content) def test_gridfs_secondary(self): - primary_host, primary_port = self.primary - primary_connection = single_client(primary_host, primary_port) - - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) @@ -526,12 +522,12 @@ def test_gridfs_secondary(self): fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') # This won't detect secondary, raises error - self.assertRaises(ConnectionFailure, fs.put, b'foo') + self.assertRaises(NotMasterError, fs.put, b'foo') def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) client = single_client( secondary_host, secondary_port, @@ -543,7 +539,7 @@ def test_gridfs_secondary_lazy(self): # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(ConnectionFailure, fs.put, 'data') + self.assertRaises(NotMasterError, fs.put, 'data') if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 21feda719d..17b5364bb2 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -30,14 +30,13 @@ from bson.son import SON from gridfs.errors import NoFile, CorruptGridFile from pymongo.errors import (ConfigurationError, - ConnectionFailure, + NotMasterError, ServerSelectionTimeoutError) from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference from test import (client_context, unittest, IntegrationTest) -from test.test_replica_set_client import TestReplicaSetClientBase from test.utils import (ignore_deprecations, joinall, one, @@ -504,7 +503,7 @@ def test_md5(self): self.assertIsNone(gout.md5) -class TestGridfsBucketReplicaSet(TestReplicaSetClientBase): +class TestGridfsBucketReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) @@ -517,7 +516,7 @@ def tearDownClass(cls): def test_gridfs_replica_set(self): rsc = rs_client( - w=self.w, + w=client_context.w, read_preference=ReadPreference.SECONDARY) gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, 'gfsbucketreplicatest') @@ -526,10 +525,7 @@ def test_gridfs_replica_set(self): self.assertEqual(b'foo', content) def test_gridfs_secondary(self): - primary_host, primary_port = self.primary - primary_connection = single_client(primary_host, primary_port) - - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY) @@ -540,13 +536,13 @@ def test_gridfs_secondary(self): secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') # This won't detect secondary, raises error - self.assertRaises(ConnectionFailure, gfs.upload_from_stream, + self.assertRaises(NotMasterError, gfs.upload_from_stream, "test_filename", b'foo') def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. - secondary_host, secondary_port = one(self.secondaries) + secondary_host, secondary_port = one(self.client.secondaries) client = single_client( secondary_host, secondary_port, @@ -560,7 +556,7 @@ def test_gridfs_secondary_lazy(self): # Connects, doesn't create index. self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") - self.assertRaises(ConnectionFailure, gfs.upload_from_stream, + self.assertRaises(NotMasterError, gfs.upload_from_stream, "test_filename", b'data') diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index a85cb03d2d..2a8eb96e9f 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -39,10 +39,10 @@ from test import (SkipTest, client_context, + IntegrationTest, unittest, db_user, db_pwd) -from test.test_replica_set_client import TestReplicaSetClientBase from test.utils import (connected, ignore_deprecations, one, @@ -87,7 +87,7 @@ def test_deepcopy(self): self.assertEqual(pref, copy.deepcopy(pref)) -class TestReadPreferencesBase(TestReplicaSetClientBase): +class TestReadPreferencesBase(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) @@ -100,7 +100,7 @@ def setUp(self): self.client.pymongo_test.test.drop() self.client.get_database( "pymongo_test", - write_concern=WriteConcern(w=self.w)).test.insert_many( + write_concern=WriteConcern(w=client_context.w)).test.insert_many( [{'_id': i} for i in range(10)]) self.addCleanup(self.client.pymongo_test.test.drop) @@ -130,7 +130,7 @@ def read_from_which_kind(self, client): def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) wait_until( - lambda: len(c.nodes - c.arbiters) == self.w, + lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) @@ -284,7 +284,7 @@ def test_nearest(self): read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds - data_members = set(self.hosts).difference(set(self.arbiters)) + data_members = {self.client.primary} | self.client.secondaries # This is a probabilistic test; track which members we've read from so # far, and keep reading until we've used all the members or give up. @@ -347,7 +347,7 @@ def record_a_read(self, address): ] -class TestCommandAndReadPreference(TestReplicaSetClientBase): +class TestCommandAndReadPreference(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) @@ -355,7 +355,7 @@ def setUpClass(cls): super(TestCommandAndReadPreference, cls).setUpClass() cls.c = ReadPrefTester( client_context.pair, - replicaSet=cls.name, + replicaSet=client_context.replica_set_name, # Ignore round trip times, to test ReadPreference modes only. localThresholdMS=1000*1000) if client_context.auth_enabled: @@ -363,7 +363,7 @@ def setUpClass(cls): cls.client_version = Version.from_client(cls.c) # mapReduce and group fail with no collection coll = cls.c.pymongo_test.get_collection( - 'test', write_concern=WriteConcern(w=cls.w)) + 'test', write_concern=WriteConcern(w=client_context.w)) coll.insert_one({}) @classmethod diff --git a/test/test_replica_set_client.py b/test/test_replica_set_client.py deleted file mode 100644 index 619499c392..0000000000 --- a/test/test_replica_set_client.py +++ /dev/null @@ -1,376 +0,0 @@ -# Copyright 2011-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the mongo_replica_set_client module.""" - -import sys -import warnings -import time - -sys.path[0:0] = [""] - -from bson.codec_options import CodecOptions -from bson.son import SON -from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, partition_node -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - NetworkTimeout, - NotMasterError, - OperationFailure) -from pymongo.mongo_client import MongoClient -from pymongo.mongo_replica_set_client import MongoReplicaSetClient -from pymongo.read_preferences import ReadPreference, Secondary, Nearest -from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - unittest, - SkipTest, - db_pwd, - db_user, - MockClientTest, - HAVE_IPADDRESS) -from test.pymongo_mocks import MockClient -from test.utils import (connected, - delay, - ignore_deprecations, - one, - rs_client, - single_client, - wait_until) - - -class TestReplicaSetClientBase(IntegrationTest): - - @classmethod - @client_context.require_replica_set - def setUpClass(cls): - super(TestReplicaSetClientBase, cls).setUpClass() - cls.name = client_context.replica_set_name - cls.w = client_context.w - - ismaster = client_context.ismaster - cls.hosts = set(partition_node(h.lower()) for h in ismaster['hosts']) - cls.arbiters = set(partition_node(h) - for h in ismaster.get("arbiters", [])) - - repl_set_status = client_context.client.admin.command( - 'replSetGetStatus') - primary_info = [ - m for m in repl_set_status['members'] - if m['stateStr'] == 'PRIMARY' - ][0] - - cls.primary = partition_node(primary_info['name'].lower()) - cls.secondaries = set( - partition_node(m['name'].lower()) - for m in repl_set_status['members'] - if m['stateStr'] == 'SECONDARY') - - -class TestReplicaSetClient(TestReplicaSetClientBase): - def test_deprecated(self): - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - with self.assertRaises(DeprecationWarning): - MongoReplicaSetClient() - - def test_connect(self): - client = MongoClient( - client_context.pair, - replicaSet='fdlksjfdslkjfd', - serverSelectionTimeoutMS=100) - - with self.assertRaises(ConnectionFailure): - client.test.test.find_one() - - def test_repr(self): - with ignore_deprecations(): - client = MongoReplicaSetClient( - client_context.host, - client_context.port, - replicaSet=self.name) - - self.assertIn("MongoReplicaSetClient(host=[", repr(client)) - self.assertIn(client_context.pair, repr(client)) - - def test_properties(self): - c = client_context.client - c.admin.command('ping') - - wait_until(lambda: c.primary == self.primary, "discover primary") - wait_until(lambda: c.arbiters == self.arbiters, "discover arbiters") - wait_until(lambda: c.secondaries == self.secondaries, - "discover secondaries") - - self.assertEqual(c.primary, self.primary) - self.assertEqual(c.secondaries, self.secondaries) - self.assertEqual(c.arbiters, self.arbiters) - self.assertEqual(c.max_pool_size, 100) - - # Make sure MongoClient's properties are copied to Database and - # Collection. - for obj in c, c.pymongo_test, c.pymongo_test.test: - self.assertEqual(obj.codec_options, CodecOptions()) - self.assertEqual(obj.read_preference, ReadPreference.PRIMARY) - self.assertEqual(obj.write_concern, WriteConcern()) - - cursor = c.pymongo_test.test.find() - self.assertEqual( - ReadPreference.PRIMARY, cursor._read_preference()) - - tag_sets = [{'dc': 'la', 'rack': '2'}, {'foo': 'bar'}] - secondary = Secondary(tag_sets=tag_sets) - c = rs_client( - maxPoolSize=25, - document_class=SON, - tz_aware=True, - read_preference=secondary, - localThresholdMS=77, - j=True) - - self.assertEqual(c.max_pool_size, 25) - - for obj in c, c.pymongo_test, c.pymongo_test.test: - self.assertEqual(obj.codec_options, CodecOptions(SON, True)) - self.assertEqual(obj.read_preference, secondary) - self.assertEqual(obj.write_concern, WriteConcern(j=True)) - - cursor = c.pymongo_test.test.find() - self.assertEqual( - secondary, cursor._read_preference()) - - nearest = Nearest(tag_sets=[{'dc': 'ny'}, {}]) - cursor = c.pymongo_test.get_collection( - "test", read_preference=nearest).find() - - self.assertEqual(nearest, cursor._read_preference()) - self.assertEqual(c.max_bson_size, 16777216) - c.close() - - @client_context.require_secondaries_count(1) - def test_timeout_does_not_mark_member_down(self): - # If a query times out, the client shouldn't mark the member "down". - - # Disable background refresh. - with client_knobs(heartbeat_frequency=999999): - c = rs_client(socketTimeoutMS=1000, w=self.w) - collection = c.pymongo_test.test - collection.insert_one({}) - - # Query the primary. - self.assertRaises( - NetworkTimeout, - collection.find_one, - {'$where': delay(1.5)}) - - self.assertTrue(c.primary) - collection.find_one() # No error. - - coll = collection.with_options( - read_preference=ReadPreference.SECONDARY) - - # Query the secondary. - self.assertRaises( - NetworkTimeout, - coll.find_one, - {'$where': delay(1.5)}) - - self.assertTrue(c.secondaries) - - # No error. - coll.find_one() - - @client_context.require_ipv6 - def test_ipv6(self): - if client_context.tls: - if not HAVE_IPADDRESS: - raise SkipTest("Need the ipaddress module to test with SSL") - - port = client_context.port - c = rs_client("mongodb://[::1]:%d" % (port,)) - - # Client switches to IPv4 once it has first ismaster response. - msg = 'discovered primary with IPv4 address "%r"' % (self.primary,) - wait_until(lambda: c.primary == self.primary, msg) - - # Same outcome with both IPv4 and IPv6 seeds. - c = rs_client("mongodb://[::1]:%d,localhost:%d" % (port, port)) - - wait_until(lambda: c.primary == self.primary, msg) - - if client_context.auth_enabled: - auth_str = "%s:%s@" % (db_user, db_pwd) - else: - auth_str = "" - - uri = "mongodb://%slocalhost:%d,[::1]:%d" % (auth_str, port, port) - client = rs_client(uri) - client.pymongo_test.test.insert_one({"dummy": u"object"}) - client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) - - dbs = client.list_database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) - client.close() - - def _test_kill_cursor_explicit(self, read_pref): - with client_knobs(kill_cursor_frequency=0.01): - c = rs_client(read_preference=read_pref, w=self.w) - db = c.pymongo_test - db.drop_collection("test") - - test = db.test - test.insert_many([{"i": i} for i in range(20)]) - - # Partially evaluate cursor so it's left alive, then kill it - cursor = test.find().batch_size(10) - next(cursor) - self.assertNotEqual(0, cursor.cursor_id) - - if read_pref == ReadPreference.PRIMARY: - msg = "Expected cursor's address to be %s, got %s" % ( - c.primary, cursor.address) - - self.assertEqual(cursor.address, c.primary, msg) - else: - self.assertNotEqual( - cursor.address, c.primary, - "Expected cursor's address not to be primary") - - cursor_id = cursor.cursor_id - - # Cursor dead on server - trigger a getMore on the same cursor_id - # and check that the server returns an error. - cursor2 = cursor.clone() - cursor2._Cursor__id = cursor_id - - if sys.platform.startswith('java') or 'PyPy' in sys.version: - # Explicitly kill cursor. - cursor.close() - else: - # Implicitly kill it in CPython. - del cursor - - time.sleep(5) - self.assertRaises(OperationFailure, lambda: list(cursor2)) - - def test_kill_cursor_explicit_primary(self): - self._test_kill_cursor_explicit(ReadPreference.PRIMARY) - - @client_context.require_secondaries_count(1) - def test_kill_cursor_explicit_secondary(self): - self._test_kill_cursor_explicit(ReadPreference.SECONDARY) - - @client_context.require_secondaries_count(1) - def test_not_master_error(self): - secondary_address = one(self.secondaries) - direct_client = single_client(*secondary_address) - - with self.assertRaises(NotMasterError): - direct_client.pymongo_test.collection.insert_one({}) - - db = direct_client.get_database( - "pymongo_test", write_concern=WriteConcern(w=0)) - with self.assertRaises(NotMasterError): - db.collection.insert_one({}) - - -class TestReplicaSetWireVersion(MockClientTest): - - @client_context.require_connection - @client_context.require_no_auth - def test_wire_version(self): - c = MockClient( - standalones=[], - members=['a:1', 'b:2', 'c:3'], - mongoses=[], - host='a:1', - replicaSet='rs', - connect=False) - self.addCleanup(c.close) - - c.set_wire_version_range('a:1', 3, 7) - c.set_wire_version_range('b:2', 2, 3) - c.set_wire_version_range('c:3', 3, 4) - c.db.command('ismaster') # Connect. - - # A secondary doesn't overlap with us. - c.set_wire_version_range('b:2', - MAX_SUPPORTED_WIRE_VERSION + 1, - MAX_SUPPORTED_WIRE_VERSION + 2) - - def raises_configuration_error(): - try: - c.db.collection.find_one() - return False - except ConfigurationError: - return True - - wait_until(raises_configuration_error, - 'notice we are incompatible with server') - - self.assertRaises(ConfigurationError, c.db.collection.insert_one, {}) - - -class TestReplicaSetClientInternalIPs(MockClientTest): - - @client_context.require_connection - def test_connect_with_internal_ips(self): - # Client is passed an IP it can reach, 'a:1', but the RS config - # only contains unreachable IPs like 'internal-ip'. PYTHON-608. - client = MockClient( - standalones=[], - members=['a:1'], - mongoses=[], - ismaster_hosts=['internal-ip:27017'], - host='a:1', - replicaSet='rs', - serverSelectionTimeoutMS=100) - self.addCleanup(client.close) - with self.assertRaises(AutoReconnect) as context: - connected(client) - - self.assertIn("Could not reach any servers in [('internal-ip', 27017)]." - " Replica set is configured with internal hostnames or IPs?", - str(context.exception)) - -class TestReplicaSetClientMaxWriteBatchSize(MockClientTest): - - @client_context.require_connection - def test_max_write_batch_size(self): - c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs', - connect=False) - self.addCleanup(c.close) - - c.set_max_write_batch_size('a:1', 1) - c.set_max_write_batch_size('b:2', 2) - - # Uses primary's max batch size. - self.assertEqual(c.max_write_batch_size, 1) - - # b becomes primary. - c.mock_primary = 'b:2' - wait_until(lambda: c.max_write_batch_size == 2, - 'update max_write_batch_size') - - -if __name__ == "__main__": - unittest.main() From 0e250bef7599d4f0b216773dfc57775607043e4b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 17:52:54 -0800 Subject: [PATCH 0278/2111] PYTHON-1301/PYTHON-1302/PYTHON-1588 Remove deprecated cursor manager APIs (#550) Remove MongoClient.set_cursor_manager and pymongo.cursor_manager. Remove MongoClient.kill_cursors and MongoClient.close_cursor. Co-authored-by: kAldown --- doc/api/pymongo/cursor_manager.rst | 6 -- doc/api/pymongo/index.rst | 1 - doc/api/pymongo/mongo_client.rst | 3 - doc/changelog.rst | 6 ++ doc/migrate-to-pymongo4.rst | 18 ++++- pymongo/common.py | 3 +- pymongo/cursor.py | 4 - pymongo/cursor_manager.py | 65 ---------------- pymongo/mongo_client.py | 119 ++--------------------------- test/test_cursor_manager.py | 95 ----------------------- test/test_custom_types.py | 3 + test/test_legacy_api.py | 57 -------------- 12 files changed, 36 insertions(+), 344 deletions(-) delete mode 100644 doc/api/pymongo/cursor_manager.rst delete mode 100644 pymongo/cursor_manager.py delete mode 100644 test/test_cursor_manager.py diff --git a/doc/api/pymongo/cursor_manager.rst b/doc/api/pymongo/cursor_manager.rst deleted file mode 100644 index 8851b66f70..0000000000 --- a/doc/api/pymongo/cursor_manager.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`cursor_manager` -- Managers to handle when cursors are killed after being closed -====================================================================================== - -.. automodule:: pymongo.cursor_manager - :synopsis: Managers to handle when cursors are killed after being closed - :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 51bcc708aa..99688e841a 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -34,7 +34,6 @@ Sub-modules: collection command_cursor cursor - cursor_manager database driver_info encryption diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index a35718835d..73a79bb9b2 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -42,6 +42,3 @@ .. automethod:: get_database .. automethod:: server_info .. automethod:: watch - .. automethod:: close_cursor - .. automethod:: kill_cursors - .. automethod:: set_cursor_manager diff --git a/doc/changelog.rst b/doc/changelog.rst index 98fcd4bce6..644a5b5599 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -24,6 +24,12 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. - Removed :meth:`pymongo.database.Database.collection_names`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. +- Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use + :meth:`pymongo.cursor.Cursor.close` instead. +- Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. +- Removed :class:`pymongo.cursor_manager.CursorManager` and + :mod:`pymongo.cursor_manager`. +- Removed :meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. - Removed :mod:`pymongo.thread_util`. - Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 8fcd52df06..a6328bdf00 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -139,6 +139,23 @@ can be changed to this:: Removed features with no migration path --------------------------------------- +cursor_manager support is removed +................................. + +Removed :class:`pymongo.cursor_manager.CursorManager`, +:mod:`pymongo.cursor_manager`, and +:meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. + +MongoClient.close_cursor is removed +................................... + +Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor` and +:meth:`pymongo.mongo_client.MongoClient.kill_cursors`. Instead, close cursors +with :meth:`pymongo.cursor.Cursor.close` or +:meth:`pymongo.command_cursor.CommandCursor.close`. + +.. _killCursors command: https://docs.mongodb.com/manual/reference/command/killCursors/ + Database.eval, Database.system_js, and SystemJS are removed ........................................................... @@ -157,7 +174,6 @@ can be changed to this:: >>> from bson.code import Code >>> result = database.command('eval', Code('function (x) {return x;}'), args=[3]).get('retval') - Collection.parallel_scan is removed ................................... diff --git a/pymongo/common.py b/pymongo/common.py index f45bc71285..a416dcd405 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,8 @@ # Frequency to call ismaster on servers, in seconds. HEARTBEAT_FREQUENCY = 10 -# Frequency to process kill-cursors, in seconds. See MongoClient.close_cursor. +# Frequency to clean up unclosed cursors, in seconds. +# See MongoClient._process_kill_cursors. KILL_CURSOR_FREQUENCY = 1 # Frequency to process events queue, in seconds. diff --git a/pymongo/cursor.py b/pymongo/cursor.py index ad116d4124..9a464df42b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1171,10 +1171,6 @@ def alive(self): def cursor_id(self): """Returns the id of the cursor - Useful if you need to manage cursor ids and want to handle killing - cursors manually using - :meth:`~pymongo.mongo_client.MongoClient.kill_cursors` - .. versionadded:: 2.2 """ return self.__id diff --git a/pymongo/cursor_manager.py b/pymongo/cursor_manager.py deleted file mode 100644 index c05cf301e7..0000000000 --- a/pymongo/cursor_manager.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""DEPRECATED - A manager to handle when cursors are killed after they are -closed. - -New cursor managers should be defined as subclasses of CursorManager and can be -installed on a client by calling -:meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager`. - -.. versionchanged:: 3.3 - Deprecated, for real this time. - -.. versionchanged:: 3.0 - Undeprecated. :meth:`~pymongo.cursor_manager.CursorManager.close` now - requires an `address` argument. The ``BatchCursorManager`` class is removed. -""" - -import warnings -import weakref -from bson.py3compat import integer_types - - -class CursorManager(object): - """DEPRECATED - The cursor manager base class.""" - - def __init__(self, client): - """Instantiate the manager. - - :Parameters: - - `client`: a MongoClient - """ - warnings.warn( - "Cursor managers are deprecated.", - DeprecationWarning, - stacklevel=2) - self.__client = weakref.ref(client) - - def close(self, cursor_id, address): - """Kill a cursor. - - Raises TypeError if cursor_id is not an instance of (int, long). - - :Parameters: - - `cursor_id`: cursor id to close - - `address`: the cursor's server's (host, port) pair - - .. versionchanged:: 3.0 - Now requires an `address` argument. - """ - if not isinstance(cursor_id, integer_types): - raise TypeError("cursor_id must be an integer") - - self.__client().kill_cursors([cursor_id], address) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index de78188e1d..1dcb248254 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -53,7 +53,6 @@ from pymongo.change_stream import ClusterChangeStream from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.cursor_manager import CursorManager from pymongo.errors import (AutoReconnect, BulkWriteError, ConfigurationError, @@ -704,7 +703,6 @@ def __init__( self.__default_database_name = dbase self.__lock = threading.Lock() - self.__cursor_manager = None self.__kill_cursors_queue = [] self._event_listeners = options.pool_options.event_listeners @@ -1209,35 +1207,6 @@ def close(self): # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. self._encrypter.close() - def set_cursor_manager(self, manager_class): - """DEPRECATED - Set this client's cursor manager. - - Raises :class:`TypeError` if `manager_class` is not a subclass of - :class:`~pymongo.cursor_manager.CursorManager`. A cursor manager - handles closing cursors. Different managers can implement different - policies in terms of when to actually kill a cursor that has - been closed. - - :Parameters: - - `manager_class`: cursor manager to use - - .. versionchanged:: 3.3 - Deprecated, for real this time. - - .. versionchanged:: 3.0 - Undeprecated. - """ - warnings.warn( - "set_cursor_manager is Deprecated", - DeprecationWarning, - stacklevel=2) - manager = manager_class(self) - if not isinstance(manager, CursorManager): - raise TypeError("manager_class must be a subclass of " - "CursorManager") - - self.__cursor_manager = manager - def _get_topology(self): """Get the internal :class:`~pymongo.topology.Topology` object. @@ -1580,39 +1549,6 @@ def __getitem__(self, name): """ return database.Database(self, name) - def close_cursor(self, cursor_id, address=None): - """DEPRECATED - Send a kill cursors message soon with the given id. - - Raises :class:`TypeError` if `cursor_id` is not an instance of - ``(int, long)``. What closing the cursor actually means - depends on this client's cursor manager. - - This method may be called from a :class:`~pymongo.cursor.Cursor` - destructor during garbage collection, so it isn't safe to take a - lock or do network I/O. Instead, we schedule the cursor to be closed - soon on a background thread. - - :Parameters: - - `cursor_id`: id of cursor to close - - `address` (optional): (host, port) pair of the cursor's server. - If it is not provided, the client attempts to close the cursor on - the primary or standalone, or a mongos server. - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 3.0 - Added ``address`` parameter. - """ - warnings.warn( - "close_cursor is deprecated.", - DeprecationWarning, - stacklevel=2) - if not isinstance(cursor_id, integer_types): - raise TypeError("cursor_id must be an instance of (int, long)") - - self._close_cursor(cursor_id, address) - def _close_cursor(self, cursor_id, address): """Send a kill cursors message with the given id. @@ -1620,61 +1556,22 @@ def _close_cursor(self, cursor_id, address): cursor manager. If there is none, the cursor is closed asynchronously on a background thread. """ - if self.__cursor_manager is not None: - self.__cursor_manager.close(cursor_id, address) - else: - self.__kill_cursors_queue.append((address, [cursor_id])) + self.__kill_cursors_queue.append((address, [cursor_id])) def _close_cursor_now(self, cursor_id, address=None, session=None): """Send a kill cursors message with the given id. - What closing the cursor actually means depends on this client's - cursor manager. If there is none, the cursor is closed synchronously - on the current thread. + The cursor is closed synchronously on the current thread. """ if not isinstance(cursor_id, integer_types): raise TypeError("cursor_id must be an instance of (int, long)") - if self.__cursor_manager is not None: - self.__cursor_manager.close(cursor_id, address) - else: - try: - self._kill_cursors( - [cursor_id], address, self._get_topology(), session) - except PyMongoError: - # Make another attempt to kill the cursor later. - self.__kill_cursors_queue.append((address, [cursor_id])) - - def kill_cursors(self, cursor_ids, address=None): - """DEPRECATED - Send a kill cursors message soon with the given ids. - - Raises :class:`TypeError` if `cursor_ids` is not an instance of - ``list``. - - :Parameters: - - `cursor_ids`: list of cursor ids to kill - - `address` (optional): (host, port) pair of the cursor's server. - If it is not provided, the client attempts to close the cursor on - the primary or standalone, or a mongos server. - - .. versionchanged:: 3.3 - Deprecated. - - .. versionchanged:: 3.0 - Now accepts an `address` argument. Schedules the cursors to be - closed on a background thread instead of sending the message - immediately. - """ - warnings.warn( - "kill_cursors is deprecated.", - DeprecationWarning, - stacklevel=2) - - if not isinstance(cursor_ids, list): - raise TypeError("cursor_ids must be a list") - - # "Atomic", needs no lock. - self.__kill_cursors_queue.append((address, cursor_ids)) + try: + self._kill_cursors( + [cursor_id], address, self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self.__kill_cursors_queue.append((address, [cursor_id])) def _kill_cursors(self, cursor_ids, address, topology, session): """Send a kill cursors message with the given ids.""" diff --git a/test/test_cursor_manager.py b/test/test_cursor_manager.py deleted file mode 100644 index 1b0114028d..0000000000 --- a/test/test_cursor_manager.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the cursor_manager module.""" - -import sys -import warnings - -sys.path[0:0] = [""] - -from pymongo.cursor_manager import CursorManager -from pymongo.errors import CursorNotFound -from pymongo.message import _CursorAddress -from test import (client_context, - client_knobs, - unittest, - IntegrationTest, - SkipTest) -from test.utils import rs_or_single_client, wait_until - - -class TestCursorManager(IntegrationTest): - - @classmethod - def setUpClass(cls): - super(TestCursorManager, cls).setUpClass() - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - cls.collection = cls.db.test - cls.collection.drop() - - # Ensure two batches. - cls.collection.insert_many([{'_id': i} for i in range(200)]) - - @classmethod - def tearDownClass(cls): - cls.warn_context.__exit__() - cls.warn_context = None - cls.collection.drop() - - def test_cursor_manager_validation(self): - with self.assertRaises(TypeError): - client_context.client.set_cursor_manager(1) - - def test_cursor_manager(self): - self.close_was_called = False - - test_case = self - - class CM(CursorManager): - def __init__(self, client): - super(CM, self).__init__(client) - - def close(self, cursor_id, address): - test_case.close_was_called = True - super(CM, self).close(cursor_id, address) - - with client_knobs(kill_cursor_frequency=0.01): - client = rs_or_single_client(maxPoolSize=1) - client.set_cursor_manager(CM) - - # Create a cursor on the same client so we're certain the getMore - # is sent after the killCursors message. - cursor = client.pymongo_test.test.find().batch_size(1) - next(cursor) - client.close_cursor( - cursor.cursor_id, - _CursorAddress(self.client.address, self.collection.full_name)) - - def raises_cursor_not_found(): - try: - next(cursor) - return False - except CursorNotFound: - return True - - wait_until(raises_cursor_not_found, 'close cursor') - self.assertTrue(self.close_was_called) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 3172ed2834..bd9a835528 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -858,6 +858,7 @@ class TestCollectionChangeStreamsWCustomTypes( @client_context.require_no_standalone def setUpClass(cls): super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() + cls.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -879,6 +880,7 @@ class TestDatabaseChangeStreamsWCustomTypes( @client_context.require_no_standalone def setUpClass(cls): super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() + cls.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -900,6 +902,7 @@ class TestClusterChangeStreamsWCustomTypes( @client_context.require_no_standalone def setUpClass(cls): super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() + cls.db.test.delete_many({}) def tearDown(self): self.input_target.drop() diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index e0a5e2ac8d..1c909c9043 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -1366,63 +1366,6 @@ def run(self): self.assertEqual(10001, coll.count()) coll.drop() - def test_kill_cursors_with_cursoraddress(self): - coll = self.client.pymongo_test.test - coll.drop() - - coll.insert_many([{'_id': i} for i in range(200)]) - cursor = coll.find().batch_size(1) - next(cursor) - self.client.kill_cursors( - [cursor.cursor_id], - _CursorAddress(self.client.address, coll.full_name)) - - # Prevent killcursors from reaching the server while a getmore is in - # progress -- the server logs "Assertion: 16089:Cannot kill active - # cursor." - time.sleep(2) - - def raises_cursor_not_found(): - try: - next(cursor) - return False - except CursorNotFound: - return True - - wait_until(raises_cursor_not_found, 'close cursor') - - def test_kill_cursors_with_tuple(self): - # Some evergreen distros (Debian 7.1) still test against 3.6.5 where - # OP_KILL_CURSORS does not work. - if (client_context.is_mongos and client_context.auth_enabled and - (3, 6, 0) <= client_context.version < (3, 6, 6)): - raise SkipTest("SERVER-33553 This server version does not support " - "OP_KILL_CURSORS") - - coll = self.client.pymongo_test.test - coll.drop() - - coll.insert_many([{'_id': i} for i in range(200)]) - cursor = coll.find().batch_size(1) - next(cursor) - self.client.kill_cursors( - [cursor.cursor_id], - self.client.address) - - # Prevent killcursors from reaching the server while a getmore is in - # progress -- the server logs "Assertion: 16089:Cannot kill active - # cursor." - time.sleep(2) - - def raises_cursor_not_found(): - try: - next(cursor) - return False - except CursorNotFound: - return True - - wait_until(raises_cursor_not_found, 'close cursor') - class TestLegacyBulk(BulkTestBase): From da620c7671e43c912b27cc500bb66684404b995a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Jan 2021 12:35:18 -0800 Subject: [PATCH 0279/2111] PYTHON-2506 Fix versioned API test for db.aggregate --- test/versioned-api/crud-api-version-1-strict.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json index 9604368a16..a2eb02e432 100644 --- a/test/versioned-api/crud-api-version-1-strict.json +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -141,7 +141,6 @@ }, { "description": "aggregate on database appends declared API version", - "skipReason": "DRIVERS-1505 $listLocalSessions is not supported in API version 1", "operations": [ { "name": "aggregate", @@ -155,6 +154,9 @@ "$limit": 1 } ] + }, + "expectError": { + "errorCodeName": "APIStrictError" } } ], From 88bb1b4608ed87754436cc65e46808129a377479 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 14 Jan 2021 20:33:02 -0800 Subject: [PATCH 0280/2111] PYTHON-2133 Remove Py2 compatibility from gridfs --- gridfs/__init__.py | 15 ++++++++------- gridfs/grid_file.py | 20 ++++++++++---------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index f291e8c9ca..ab8a5a02bb 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -20,7 +20,14 @@ .. mongodoc:: gridfs """ -from bson.py3compat import abc +from collections import abc + +from pymongo import (ASCENDING, + DESCENDING) +from pymongo.common import UNAUTHORIZED_CODES, validate_string +from pymongo.database import Database +from pymongo.errors import ConfigurationError, OperationFailure + from gridfs.errors import NoFile from gridfs.grid_file import (GridIn, GridOut, @@ -28,12 +35,6 @@ DEFAULT_CHUNK_SIZE, _clear_entity_type_registry, _disallow_transactions) -from pymongo import (ASCENDING, - DESCENDING) -from pymongo.common import UNAUTHORIZED_CODES, validate_string -from pymongo.database import Database -from pymongo.errors import ConfigurationError, OperationFailure - class GridFS(object): """An instance of GridFS on top of a single Database. diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 23c629be68..e49ca472af 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -23,8 +23,6 @@ from bson.son import SON from bson.binary import Binary from bson.objectid import ObjectId -from bson.py3compat import text_type, StringIO -from gridfs.errors import CorruptGridFile, FileExists, NoFile from pymongo import ASCENDING from pymongo.collection import Collection from pymongo.cursor import Cursor @@ -35,6 +33,8 @@ OperationFailure) from pymongo.read_preferences import ReadPreference +from gridfs.errors import CorruptGridFile, FileExists, NoFile + try: _SEEK_SET = os.SEEK_SET _SEEK_CUR = os.SEEK_CUR @@ -195,7 +195,7 @@ def __init__( object.__setattr__(self, "_coll", coll) object.__setattr__(self, "_chunks", coll.chunks) object.__setattr__(self, "_file", kwargs) - object.__setattr__(self, "_buffer", StringIO()) + object.__setattr__(self, "_buffer", io.BytesIO()) object.__setattr__(self, "_position", 0) object.__setattr__(self, "_chunk_number", 0) object.__setattr__(self, "_closed", False) @@ -297,7 +297,7 @@ def __flush_buffer(self): """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() - self._buffer = StringIO() + self._buffer = io.BytesIO() def __flush(self): """Flush the file to the database. @@ -369,15 +369,15 @@ def write(self, data): read = data.read except AttributeError: # string - if not isinstance(data, (text_type, bytes)): + if not isinstance(data, (str, bytes)): raise TypeError("can only write strings or file-like objects") - if isinstance(data, text_type): + if isinstance(data, str): try: data = data.encode(self.encoding) except AttributeError: raise TypeError("must specify an encoding for file in " - "order to write %s" % (text_type.__name__,)) - read = StringIO(data).read + "order to write str") + read = io.BytesIO(data).read if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete @@ -560,7 +560,7 @@ def read(self, size=-1): return EMPTY received = 0 - data = StringIO() + data = io.BytesIO() while received < size: chunk_data = self.readchunk() received += len(chunk_data) @@ -595,7 +595,7 @@ def readline(self, size=-1): return EMPTY received = 0 - data = StringIO() + data = io.BytesIO() while received < size: chunk_data = self.readchunk() pos = chunk_data.find(NEWLN, 0, size) From 6c2d62900673702a2ee440b7fc69f259f25b8e75 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 15 Jan 2021 17:30:13 -0800 Subject: [PATCH 0281/2111] PYTHON-2133 Remove Py2 compatibility from pymongo --- pymongo/auth.py | 132 ++++++----------------------------- pymongo/client_session.py | 9 +-- pymongo/collection.py | 48 ++++++------- pymongo/command_cursor.py | 5 +- pymongo/common.py | 41 +++++------ pymongo/cursor.py | 23 +++--- pymongo/daemon.py | 35 ++-------- pymongo/database.py | 56 ++++++--------- pymongo/driver_info.py | 9 +-- pymongo/errors.py | 15 ---- pymongo/helpers.py | 21 +++--- pymongo/ismaster.py | 3 +- pymongo/message.py | 18 ++--- pymongo/mongo_client.py | 12 ++-- pymongo/monitor.py | 9 +-- pymongo/monitoring.py | 3 +- pymongo/network.py | 64 +++++------------ pymongo/operations.py | 10 ++- pymongo/pool.py | 89 ++++++----------------- pymongo/pyopenssl_context.py | 9 +-- pymongo/read_concern.py | 4 +- pymongo/read_preferences.py | 5 +- pymongo/saslprep.py | 5 +- pymongo/son_manipulator.py | 3 +- pymongo/srv_resolver.py | 18 ++--- pymongo/ssl_support.py | 14 ++-- pymongo/topology.py | 13 ++-- pymongo/uri_parser.py | 17 ++--- pymongo/write_concern.py | 7 +- 29 files changed, 218 insertions(+), 479 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index 3052b73d9b..1e31e25949 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -20,10 +20,15 @@ import os import socket -try: - from urllib import quote -except ImportError: - from urllib.parse import quote +from base64 import standard_b64decode, standard_b64encode +from collections import namedtuple +from urllib.parse import quote + +from bson.binary import Binary +from bson.son import SON +from pymongo.auth_aws import _authenticate_aws +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep HAVE_KERBEROS = True _USE_PRINCIPAL = False @@ -37,16 +42,6 @@ except ImportError: HAVE_KERBEROS = False -from base64 import standard_b64decode, standard_b64encode -from collections import namedtuple - -from bson.binary import Binary -from bson.py3compat import string_type, _unicode, PY3 -from bson.son import SON -from pymongo.auth_aws import _authenticate_aws -from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.saslprep import saslprep - MECHANISMS = frozenset( ['GSSAPI', @@ -159,94 +154,9 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): mech, source_database, user, passwd, None, _Cache()) -if PY3: - def _xor(fir, sec): - """XOR two byte strings together (python 3.x).""" - return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) - - - _from_bytes = int.from_bytes - _to_bytes = int.to_bytes -else: - from binascii import (hexlify as _hexlify, - unhexlify as _unhexlify) - - - def _xor(fir, sec): - """XOR two byte strings together (python 2.x).""" - return b"".join([chr(ord(x) ^ ord(y)) for x, y in zip(fir, sec)]) - - - def _from_bytes(value, dummy, _int=int, _hexlify=_hexlify): - """An implementation of int.from_bytes for python 2.x.""" - return _int(_hexlify(value), 16) - - - def _to_bytes(value, length, dummy, _unhexlify=_unhexlify): - """An implementation of int.to_bytes for python 2.x.""" - fmt = '%%0%dx' % (2 * length,) - return _unhexlify(fmt % value) - - -try: - # The fastest option, if it's been compiled to use OpenSSL's HMAC. - from backports.pbkdf2 import pbkdf2_hmac as _hi -except ImportError: - try: - # Python 2.7.8+, or Python 3.4+. - from hashlib import pbkdf2_hmac as _hi - except ImportError: - - def _hi(hash_name, data, salt, iterations): - """A simple implementation of PBKDF2-HMAC.""" - mac = hmac.HMAC(data, None, getattr(hashlib, hash_name)) - - def _digest(msg, mac=mac): - """Get a digest for msg.""" - _mac = mac.copy() - _mac.update(msg) - return _mac.digest() - - from_bytes = _from_bytes - to_bytes = _to_bytes - - _u1 = _digest(salt + b'\x00\x00\x00\x01') - _ui = from_bytes(_u1, 'big') - for _ in range(iterations - 1): - _u1 = _digest(_u1) - _ui ^= from_bytes(_u1, 'big') - return to_bytes(_ui, mac.digest_size, 'big') - -try: - from hmac import compare_digest -except ImportError: - if PY3: - def _xor_bytes(a, b): - return a ^ b - else: - def _xor_bytes(a, b, _ord=ord): - return _ord(a) ^ _ord(b) - - # Python 2.x < 2.7.7 - # Note: This method is intentionally obtuse to prevent timing attacks. Do - # not refactor it! - # References: - # - http://bugs.python.org/issue14532 - # - http://bugs.python.org/issue14955 - # - http://bugs.python.org/issue15061 - def compare_digest(a, b, _xor_bytes=_xor_bytes): - left = None - right = b - if len(a) == len(b): - left = a - result = 0 - if len(a) != len(b): - left = b - result = 1 - - for x, y in zip(left, right): - result |= _xor_bytes(x, y) - return result == 0 +def _xor(fir, sec): + """XOR two byte strings together (python 3.x).""" + return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) def _parse_scram_response(response): @@ -313,7 +223,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Salt and / or iterations could change for a number of different # reasons. Either changing invalidates the cache. if not client_key or salt != csalt or iterations != citerations: - salted_pass = _hi( + salted_pass = hashlib.pbkdf2_hmac( digest, data, standard_b64decode(salt), iterations) client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() @@ -333,7 +243,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): res = sock_info.command(source, cmd) parsed = _parse_scram_response(res['payload']) - if not compare_digest(parsed[b'v'], server_sig): + if not hmac.compare_digest(parsed[b'v'], server_sig): raise OperationFailure("Server returned an invalid signature.") # A third empty challenge may be required if the server does not support @@ -350,19 +260,17 @@ def _authenticate_scram(credentials, sock_info, mechanism): def _password_digest(username, password): """Get a password digest to use for authentication. """ - if not isinstance(password, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(password, str): + raise TypeError("password must be an instance of str") if len(password) == 0: raise ValueError("password can't be empty") - if not isinstance(username, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(username, str): + raise TypeError("username must be an instance of str") md5hash = hashlib.md5() data = "%s:mongo:%s" % (username, password) md5hash.update(data.encode('utf-8')) - return _unicode(md5hash.hexdigest()) + return md5hash.hexdigest() def _auth_key(nonce, username, password): @@ -372,7 +280,7 @@ def _auth_key(nonce, username, password): md5hash = hashlib.md5() data = "%s%s%s" % (nonce, username, digest) md5hash.update(data.encode('utf-8')) - return _unicode(md5hash.hexdigest()) + return md5hash.hexdigest() def _canonicalize_hostname(hostname): diff --git a/pymongo/client_session.py b/pymongo/client_session.py index c88f3f2776..13a70840db 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -100,9 +100,10 @@ import collections import uuid +from collections.abc import Mapping as _Mapping + from bson.binary import Binary from bson.int64 import Int64 -from bson.py3compat import abc, integer_types from bson.son import SON from bson.timestamp import Timestamp @@ -157,7 +158,7 @@ def default_transaction_options(self): class TransactionOptions(object): """Options for :meth:`ClientSession.start_transaction`. - + :Parameters: - `read_concern` (optional): The :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. @@ -208,7 +209,7 @@ def __init__(self, read_concern=None, write_concern=None, "pymongo.read_preferences for valid " "options." % (read_preference,)) if max_commit_time_ms is not None: - if not isinstance(max_commit_time_ms, integer_types): + if not isinstance(max_commit_time_ms, int): raise TypeError( "max_commit_time_ms must be an integer or None") @@ -715,7 +716,7 @@ def advance_cluster_time(self, cluster_time): :data:`~pymongo.client_session.ClientSession.cluster_time` from another `ClientSession` instance. """ - if not isinstance(cluster_time, abc.Mapping): + if not isinstance(cluster_time, _Mapping): raise TypeError( "cluster_time must be a subclass of collections.Mapping") if not isinstance(cluster_time.get("clusterTime"), Timestamp): diff --git a/pymongo/collection.py b/pymongo/collection.py index 025c1723e9..016de4fad2 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -17,12 +17,10 @@ import datetime import warnings +from collections import abc + from bson.code import Code from bson.objectid import ObjectId -from bson.py3compat import (_unicode, - abc, - integer_types, - string_type) from bson.raw_bson import RawBSONDocument from bson.codec_options import CodecOptions from bson.son import SON @@ -162,9 +160,8 @@ def __init__(self, database, name, create=False, codec_options=None, write_concern or database.write_concern, read_concern or database.read_concern) - if not isinstance(name, string_type): - raise TypeError("name must be an instance " - "of %s" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name must be an instance of str") if not name or ".." in name: raise InvalidName("collection names cannot be empty") @@ -181,7 +178,7 @@ def __init__(self, database, name, create=False, codec_options=None, collation = validate_collation_or_none(kwargs.pop('collation', None)) self.__database = database - self.__name = _unicode(name) + self.__name = name self.__full_name = _UJOIN % (self.__database.name, self.__name) if create or kwargs or collation: self.__create(kwargs, collation, session) @@ -805,7 +802,7 @@ def _update(self, sock_info, criteria, document, upsert=False, elif not acknowledged: raise ConfigurationError( 'hint is unsupported for unacknowledged writes.') - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) update_doc['hint'] = hint @@ -1156,7 +1153,7 @@ def _delete( elif not acknowledged: raise ConfigurationError( 'hint is unsupported for unacknowledged writes.') - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) delete_doc['hint'] = hint command = SON([('delete', self.name), @@ -1686,7 +1683,7 @@ def count_documents(self, filter, session=None, **kwargs): cmd = SON([('aggregate', self.__name), ('pipeline', pipeline), ('cursor', {})]) - if "hint" in kwargs and not isinstance(kwargs["hint"], string_type): + if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) @@ -1774,7 +1771,7 @@ def count(self, filter=None, session=None, **kwargs): if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter - if "hint" in kwargs and not isinstance(kwargs["hint"], string_type): + if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) @@ -1983,7 +1980,7 @@ def ensure_index(self, key_or_list, cache_for=300, **kwargs): warnings.warn("ensure_index is deprecated. Use create_index instead.", DeprecationWarning, stacklevel=2) # The types supported by datetime.timedelta. - if not (isinstance(cache_for, integer_types) or + if not (isinstance(cache_for, int) or isinstance(cache_for, float)): raise TypeError("cache_for must be an integer or float.") @@ -2079,8 +2076,8 @@ def drop_index(self, index_or_name, session=None, **kwargs): if isinstance(index_or_name, list): name = helpers._gen_index_name(index_or_name) - if not isinstance(name, string_type): - raise TypeError("index_or_name must be an index name or list") + if not isinstance(name, str): + raise TypeError("index_or_name must be an instance of str or list") self.__database.client._purge_index( self.__database.name, self.__name, name) @@ -2537,7 +2534,7 @@ def group(self, key, condition, initial, reduce, finalize=None, **kwargs): "stage or the map_reduce method instead.", DeprecationWarning, stacklevel=2) group = {} - if isinstance(key, string_type): + if isinstance(key, str): group["$keyf"] = Code(key) elif key is not None: group = {"key": helpers._fields_list_to_dict(key, "key")} @@ -2586,9 +2583,8 @@ def rename(self, new_name, session=None, **kwargs): when connected to MongoDB >= 3.4. """ - if not isinstance(new_name, string_type): - raise TypeError("new_name must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(new_name, str): + raise TypeError("new_name must be an instance of str") if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") @@ -2645,9 +2641,8 @@ def distinct(self, key, filter=None, session=None, **kwargs): Support the `collation` option. """ - if not isinstance(key, string_type): - raise TypeError("key must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(key, str): + raise TypeError("key must be an instance of str") cmd = SON([("distinct", self.__name), ("key", key)]) if filter is not None: @@ -2759,9 +2754,8 @@ def map_reduce(self, map, reduce, out, full_response=False, session=None, .. mongodoc:: mapreduce """ - if not isinstance(out, (string_type, abc.Mapping)): - raise TypeError("'out' must be an instance of " - "%s or a mapping" % (string_type.__name__,)) + if not isinstance(out, (str, abc.Mapping)): + raise TypeError("'out' must be an instance of str or a mapping") response = self._map_reduce(map, reduce, out, session, ReadPreference.PRIMARY, **kwargs) @@ -2847,7 +2841,7 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, common.validate_boolean("upsert", upsert) cmd["upsert"] = upsert if hint is not None: - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) write_concern = self._write_concern_for_cmd(cmd, session) @@ -3264,7 +3258,7 @@ def remove(self, spec_or_id=None, multi=True, **kwargs): return self._delete_retryable( spec_or_id, multi, write_concern, collation=collation) - def find_and_modify(self, query={}, update=None, + def find_and_modify(self, query=None, update=None, upsert=False, sort=None, full_response=False, manipulate=False, **kwargs): """Update and return an object. diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 196d94dda5..4c82f754ad 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -16,7 +16,6 @@ from collections import deque -from bson.py3compat import integer_types from pymongo.errors import (ConnectionFailure, InvalidOperation, NotMasterError, @@ -57,7 +56,7 @@ def __init__(self, collection, cursor_info, address, retrieved=0, self.batch_size(batch_size) - if (not isinstance(max_await_time_ms, integer_types) + if (not isinstance(max_await_time_ms, int) and max_await_time_ms is not None): raise TypeError("max_await_time_ms must be an integer or None") @@ -108,7 +107,7 @@ def batch_size(self, batch_size): :Parameters: - `batch_size`: The size of each batch of results requested. """ - if not isinstance(batch_size, integer_types): + if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") diff --git a/pymongo/common.py b/pymongo/common.py index a416dcd405..9e61c328c5 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -18,10 +18,12 @@ import datetime import warnings +from collections import abc, OrderedDict +from urllib.parse import unquote_plus + from bson import SON from bson.binary import UuidRepresentation from bson.codec_options import CodecOptions, TypeRegistry -from bson.py3compat import abc, integer_types, iteritems, string_type, PY3 from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.compression_support import (validate_compressors, @@ -37,16 +39,7 @@ validate_allow_invalid_certs) from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern -try: - from collections import OrderedDict - ORDERED_TYPES = (SON, OrderedDict) -except ImportError: - ORDERED_TYPES = (SON,) - -if PY3: - from urllib.parse import unquote_plus -else: - from urllib import unquote_plus +ORDERED_TYPES = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024 ** 2) @@ -170,7 +163,7 @@ def validate_boolean(option, value): def validate_boolean_or_string(option, value): """Validates that value is True, False, 'true', or 'false'.""" - if isinstance(value, string_type): + if isinstance(value, str): if value not in ('true', 'false'): raise ValueError("The value of %s must be " "'true' or 'false'" % (option,)) @@ -181,9 +174,9 @@ def validate_boolean_or_string(option, value): def validate_integer(option, value): """Validates that 'value' is an integer (or basestring representation). """ - if isinstance(value, integer_types): + if isinstance(value, int): return value - elif isinstance(value, string_type): + elif isinstance(value, str): try: return int(value) except ValueError: @@ -244,10 +237,10 @@ def validate_string(option, value): """Validates that 'value' is an instance of `basestring` for Python 2 or `str` for Python 3. """ - if isinstance(value, string_type): + if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be " - "an instance of %s" % (option, string_type.__name__)) + raise TypeError("Wrong type for %s, value must be an instance of " + "str" % (option,)) def validate_string_or_none(option, value): @@ -261,9 +254,9 @@ def validate_string_or_none(option, value): def validate_int_or_basestring(option, value): """Validates that 'value' is an integer or string. """ - if isinstance(value, integer_types): + if isinstance(value, int): return value - elif isinstance(value, string_type): + elif isinstance(value, str): try: return int(value) except ValueError: @@ -275,9 +268,9 @@ def validate_int_or_basestring(option, value): def validate_non_negative_int_or_basestring(option, value): """Validates that 'value' is an integer or string. """ - if isinstance(value, integer_types): + if isinstance(value, int): return value - elif isinstance(value, string_type): + elif isinstance(value, str): try: val = int(value) except ValueError: @@ -722,7 +715,7 @@ def validate_tzinfo(dummy, value): # Augment the option validator map with pymongo-specific option information. URI_OPTIONS_VALIDATOR_MAP.update(NONSPEC_OPTIONS_VALIDATOR_MAP) -for optname, aliases in iteritems(URI_OPTIONS_ALIAS_MAP): +for optname, aliases in URI_OPTIONS_ALIAS_MAP.items(): for alias in aliases: if alias not in URI_OPTIONS_VALIDATOR_MAP: URI_OPTIONS_VALIDATOR_MAP[alias] = ( @@ -785,7 +778,7 @@ def get_validated_options(options, warn=True): get_normed_key = lambda x: x.lower() get_setter_key = lambda x: x - for opt, value in iteritems(options): + for opt, value in options.items(): normed_key = get_normed_key(opt) try: validator = URI_OPTIONS_VALIDATOR_MAP.get( @@ -972,4 +965,4 @@ def update(self, other): self[key] = other[key] def cased_key(self, key): - return self.__casedkeys[key.lower()] \ No newline at end of file + return self.__casedkeys[key.lower()] diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 9a464df42b..7ada005d8b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -21,9 +21,6 @@ from bson import RE_TYPE from bson.code import Code -from bson.py3compat import (iteritems, - integer_types, - string_type) from bson.son import SON from pymongo import helpers from pymongo.common import validate_boolean, validate_is_mapping @@ -156,7 +153,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping("modifiers", modifiers) - if not isinstance(batch_size, integer_types): + if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") @@ -288,7 +285,7 @@ def _clone(self, deepcopy=True, base=None): "query_flags", "modifiers", "collation", "empty", "show_record_id", "return_key", "allow_disk_use", "snapshot", "exhaust") - data = dict((k, v) for k, v in iteritems(self.__dict__) + data = dict((k, v) for k, v in self.__dict__.items() if k.startswith('_Cursor__') and k[9:] in values_to_clone) if deepcopy: data = self._deepcopy(data) @@ -472,7 +469,7 @@ def limit(self, limit): .. mongodoc:: limit """ - if not isinstance(limit, integer_types): + if not isinstance(limit, int): raise TypeError("limit must be an integer") if self.__exhaust: raise InvalidOperation("Can't use limit and exhaust together.") @@ -501,7 +498,7 @@ def batch_size(self, batch_size): :Parameters: - `batch_size`: The size of each batch of results requested. """ - if not isinstance(batch_size, integer_types): + if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") @@ -522,7 +519,7 @@ def skip(self, skip): :Parameters: - `skip`: the number of results to skip """ - if not isinstance(skip, integer_types): + if not isinstance(skip, int): raise TypeError("skip must be an integer") if skip < 0: raise ValueError("skip must be >= 0") @@ -544,7 +541,7 @@ def max_time_ms(self, max_time_ms): :Parameters: - `max_time_ms`: the time limit after which the operation is aborted """ - if (not isinstance(max_time_ms, integer_types) + if (not isinstance(max_time_ms, int) and max_time_ms is not None): raise TypeError("max_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -569,7 +566,7 @@ def max_await_time_ms(self, max_await_time_ms): .. versionadded:: 3.2 """ - if (not isinstance(max_await_time_ms, integer_types) + if (not isinstance(max_await_time_ms, int) and max_await_time_ms is not None): raise TypeError("max_await_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -636,7 +633,7 @@ def __getitem__(self, index): self.__limit = limit return self - if isinstance(index, integer_types): + if isinstance(index, int): if index < 0: raise IndexError("Cursor instances do not support negative " "indices") @@ -877,7 +874,7 @@ def __set_hint(self, index): self.__hint = None return - if isinstance(index, string_type): + if isinstance(index, str): self.__hint = index else: self.__hint = helpers._index_document(index) @@ -1241,7 +1238,7 @@ def _deepcopy(self, x, memo=None): if not hasattr(x, 'items'): y, is_list, iterator = [], True, enumerate(x) else: - y, is_list, iterator = {}, False, iteritems(x) + y, is_list, iterator = {}, False, x.items() if memo is None: memo = {} diff --git a/pymongo/daemon.py b/pymongo/daemon.py index f066a02c23..521b4a6b4e 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -22,39 +22,18 @@ import os import subprocess import sys -import time # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) -if sys.version_info[0] < 3: - def _popen_wait(popen, timeout): - """Implement wait timeout support for Python 2.""" - from pymongo.monotonic import time as _time - deadline = _time() + timeout - # Initial delay of 1ms - delay = .0005 - while True: - returncode = popen.poll() - if returncode is not None: - return returncode - - remaining = deadline - _time() - if remaining <= 0: - # Just return None instead of raising an error. - return None - delay = min(delay * 2, remaining, .5) - time.sleep(delay) - -else: - def _popen_wait(popen, timeout): - """Implement wait timeout support for Python 3.""" - try: - return popen.wait(timeout=timeout) - except subprocess.TimeoutExpired: - # Silence TimeoutExpired errors. - return None +def _popen_wait(popen, timeout): + """Implement wait timeout support for Python 3.""" + try: + return popen.wait(timeout=timeout) + except subprocess.TimeoutExpired: + # Silence TimeoutExpired errors. + return None def _silence_resource_warning(popen): diff --git a/pymongo/database.py b/pymongo/database.py index 9ef03a58a3..a18867523c 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -16,10 +16,8 @@ import warnings -from bson.code import Code from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.dbref import DBRef -from bson.py3compat import iteritems, string_type, _unicode from bson.son import SON from pymongo import auth, common from pymongo.aggregation import _DatabaseAggregationCommand @@ -33,7 +31,6 @@ from pymongo.message import _first_batch from pymongo.read_preferences import ReadPreference from pymongo.son_manipulator import SONManipulator -from pymongo.write_concern import DEFAULT_WRITE_CONCERN _INDEX_REGEX = {"name": {"$regex": r"^(?!.*\$)"}} @@ -102,14 +99,13 @@ def __init__(self, client, name, codec_options=None, read_preference=None, write_concern or client.write_concern, read_concern or client.read_concern) - if not isinstance(name, string_type): - raise TypeError("name must be an instance " - "of %s" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name must be an instance of str") if name != '$external': _check_name(name) - self.__name = _unicode(name) + self.__name = name self.__client = client self.__incoming_manipulators = [] @@ -612,7 +608,7 @@ def _command(self, sock_info, command, slave_ok=False, value=1, check=True, write_concern=None, parse_write_concern_error=False, session=None, **kwargs): """Internal command helper.""" - if isinstance(command, string_type): + if isinstance(command, str): command = SON([(command, value)]) command.update(kwargs) @@ -887,15 +883,14 @@ def drop_collection(self, name_or_collection, session=None): if isinstance(name, Collection): name = name.name - if not isinstance(name, string_type): - raise TypeError("name_or_collection must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name_or_collection must be an instance of str") self.__client._purge_index(self.__name, name) with self.__client._socket_for_writes(session) as sock_info: return self._command( - sock_info, 'drop', value=_unicode(name), + sock_info, 'drop', value=name, allowable_errors=['ns not found', 26], write_concern=self._write_concern_for(session), parse_write_concern_error=True, @@ -937,11 +932,11 @@ def validate_collection(self, name_or_collection, if isinstance(name, Collection): name = name.name - if not isinstance(name, string_type): - raise TypeError("name_or_collection must be an instance of " - "%s or Collection" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name_or_collection must be an instance of str or " + "Collection") - cmd = SON([("validate", _unicode(name)), + cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) if background is not None: @@ -957,7 +952,7 @@ def validate_collection(self, name_or_collection, raise CollectionInvalid("%s invalid: %s" % (name, info)) # Sharded results elif "raw" in result: - for _, res in iteritems(result["raw"]): + for _, res in result["raw"].items(): if "result" in res: info = res["result"] if (info.find("exception") != -1 or @@ -1313,13 +1308,11 @@ def add_user(self, name, password=None, read_only=None, session=None, warnings.warn("add_user is deprecated and will be removed in PyMongo " "4.0. Use db.command with createUser or updateUser " "instead", DeprecationWarning, stacklevel=2) - if not isinstance(name, string_type): - raise TypeError("name must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(name, str): + raise TypeError("name must be an instance of str") if password is not None: - if not isinstance(password, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) + if not isinstance(password, str): + raise TypeError("password must be an instance of str") if len(password) == 0: raise ValueError("password can't be empty") if read_only is not None: @@ -1440,19 +1433,16 @@ def authenticate(self, name=None, password=None, .. mongodoc:: authenticate """ - if name is not None and not isinstance(name, string_type): - raise TypeError("name must be an " - "instance of %s" % (string_type.__name__,)) - if password is not None and not isinstance(password, string_type): - raise TypeError("password must be an " - "instance of %s" % (string_type.__name__,)) - if source is not None and not isinstance(source, string_type): - raise TypeError("source must be an " - "instance of %s" % (string_type.__name__,)) + if name is not None and not isinstance(name, str): + raise TypeError("name must be an instance of str") + if password is not None and not isinstance(password, str): + raise TypeError("password must be an instance of str") + if source is not None and not isinstance(source, str): + raise TypeError("source must be an instance of str") common.validate_auth_mechanism('mechanism', mechanism) validated_options = common._CaseInsensitiveDictionary() - for option, value in iteritems(kwargs): + for option, value in kwargs.items(): normalized, val = common.validate_auth_option(option, value) validated_options[normalized] = val diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 1f5235aca7..9ffcce4872 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -16,8 +16,6 @@ from collections import namedtuple -from bson.py3compat import string_type - class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): """Info about a driver wrapping PyMongo. @@ -30,10 +28,9 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): """ def __new__(cls, name=None, version=None, platform=None): self = super(DriverInfo, cls).__new__(cls, name, version, platform) - for name, value in self._asdict().items(): - if value is not None and not isinstance(value, string_type): + for key, value in self._asdict().items(): + if value is not None and not isinstance(value, str): raise TypeError("Wrong type for DriverInfo %s option, value " - "must be an instance of %s" % ( - name, string_type.__name__)) + "must be an instance of str" % (key,)) return self diff --git a/pymongo/errors.py b/pymongo/errors.py index fd55226ab3..257783f527 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -14,8 +14,6 @@ """Exceptions raised by PyMongo.""" -import sys - from bson.errors import * try: @@ -50,17 +48,6 @@ def _remove_error_label(self, label): """Remove the given label from this error.""" self._error_labels.discard(label) - if sys.version_info[0] == 2: - def __str__(self): - if isinstance(self._message, unicode): - return self._message.encode('utf-8', errors='replace') - return str(self._message) - - def __unicode__(self): - if isinstance(self._message, unicode): - return self._message - return unicode(self._message, 'utf-8', errors='replace') - class ProtocolError(PyMongoError): """Raised for failures related to the wire protocol.""" @@ -103,8 +90,6 @@ class NetworkTimeout(AutoReconnect): def _format_detailed_error(message, details): if details is not None: message = "%s, full error: %s" % (message, details) - if sys.version_info[0] == 2 and isinstance(message, unicode): - message = message.encode('utf-8', errors='replace') return message diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 46cecfccfa..899ab28a04 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -17,7 +17,8 @@ import sys import traceback -from bson.py3compat import abc, iteritems, itervalues, string_type +from collections import abc + from bson.son import SON from pymongo import ASCENDING from pymongo.errors import (CursorNotFound, @@ -68,7 +69,7 @@ def _index_list(key_or_list, direction=None): if direction is not None: return [(key_or_list, direction)] else: - if isinstance(key_or_list, string_type): + if isinstance(key_or_list, str): return [(key_or_list, ASCENDING)] elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, " @@ -84,7 +85,7 @@ def _index_document(index_list): if isinstance(index_list, abc.Mapping): raise TypeError("passing a dict to sort/create_index/hint is not " "allowed - use a list of tuples instead. did you " - "mean %r?" % list(iteritems(index_list))) + "mean %r?" % list(index_list.items())) elif not isinstance(index_list, (list, tuple)): raise TypeError("must use a list of (key, direction) pairs, " "not: " + repr(index_list)) @@ -93,9 +94,10 @@ def _index_document(index_list): index = SON() for (key, value) in index_list: - if not isinstance(key, string_type): - raise TypeError("first item in each key pair must be a string") - if not isinstance(value, (string_type, int, abc.Mapping)): + if not isinstance(key, str): + raise TypeError( + "first item in each key pair must be an instance of str") + if not isinstance(value, (str, int, abc.Mapping)): raise TypeError("second item in each key pair must be 1, -1, " "'2d', or another valid MongoDB index specifier.") index[key] = value @@ -128,7 +130,7 @@ def _check_command_response(response, max_wire_version, # Mongos returns the error details in a 'raw' object # for some errors. if "raw" in response: - for shard in itervalues(response["raw"]): + for shard in response["raw"].values(): # Grab the first non-empty raw error from a shard. if shard.get("errmsg") and not shard.get("ok"): details = shard @@ -257,10 +259,9 @@ def _fields_list_to_dict(fields, option_name): return fields if isinstance(fields, (abc.Sequence, abc.Set)): - if not all(isinstance(field, string_type) for field in fields): + if not all(isinstance(field, str) for field in fields): raise TypeError("%s must be a list of key names, each an " - "instance of %s" % (option_name, - string_type.__name__)) + "instance of str" % (option_name,)) return dict.fromkeys(fields, 1) raise TypeError("%s must be a mapping or " diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index a273ffab8f..5654ef6066 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -16,7 +16,6 @@ import itertools -from bson.py3compat import imap from pymongo import common from pymongo.server_type import SERVER_TYPE @@ -78,7 +77,7 @@ def server_type(self): @property def all_hosts(self): """List of hosts, passives, and arbiters known to this server.""" - return set(imap(common.clean_node, itertools.chain( + return set(map(common.clean_node, itertools.chain( self._doc.get('hosts', []), self._doc.get('passives', []), self._doc.get('arbiters', [])))) diff --git a/pymongo/message.py b/pymongo/message.py index 5df1c87074..07bcbd6def 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,15 +24,15 @@ import random import struct +from io import BytesIO as _BytesIO + import bson from bson import (CodecOptions, - decode, encode, _dict_to_bson, _make_c_string) from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.raw_bson import _inflate_bson, DEFAULT_RAW_BSON_OPTIONS -from bson.py3compat import b, StringIO from bson.son import SON try: @@ -1111,7 +1111,7 @@ def _insert_message(insert_message, send_safe): send_safe = safe or not continue_on_error last_error = None - data = StringIO() + data = _BytesIO() data.write(struct.pack(" deadline: raise socket.timeout("timed out") -# memoryview was introduced in Python 2.7 but we only use it on Python 3 -# because before 2.7.4 the struct module did not support memoryview: -# https://bugs.python.org/issue10212. -# In Jython, using slice assignment on a memoryview results in a -# NullPointerException. -if not PY3: - def _receive_data_on_socket(sock_info, length, deadline): - buf = bytearray(length) - i = 0 - while length: - try: - wait_for_read(sock_info, deadline) - chunk = sock_info.sock.recv(length) - except (IOError, OSError) as exc: - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk == b"": - raise AutoReconnect("connection closed") - - buf[i:i + len(chunk)] = chunk - i += len(chunk) - length -= len(chunk) - - return bytes(buf) -else: - def _receive_data_on_socket(sock_info, length, deadline): - buf = bytearray(length) - mv = memoryview(buf) - bytes_read = 0 - while bytes_read < length: - try: - wait_for_read(sock_info, deadline) - chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) - except (IOError, OSError) as exc: - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk_length == 0: - raise AutoReconnect("connection closed") - - bytes_read += chunk_length - - return mv +def _receive_data_on_socket(sock_info, length, deadline): + buf = bytearray(length) + mv = memoryview(buf) + bytes_read = 0 + while bytes_read < length: + try: + wait_for_read(sock_info, deadline) + chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) + except (IOError, OSError) as exc: + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise AutoReconnect("connection closed") + + bytes_read += chunk_length + + return mv diff --git a/pymongo/operations.py b/pymongo/operations.py index ea6ee2fb2b..8cf850ef55 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -14,8 +14,6 @@ """Operation class definitions.""" -from bson.py3compat import string_type - from pymongo import helpers from pymongo.common import validate_boolean, validate_is_mapping, validate_list from pymongo.collation import validate_collation_or_none @@ -84,7 +82,7 @@ def __init__(self, filter, collation=None, hint=None): if filter is not None: validate_is_mapping("filter", filter) if hint is not None: - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) self._filter = filter self._collation = collation @@ -138,7 +136,7 @@ def __init__(self, filter, collation=None, hint=None): if filter is not None: validate_is_mapping("filter", filter) if hint is not None: - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) self._filter = filter self._collation = collation @@ -198,7 +196,7 @@ def __init__(self, filter, replacement, upsert=False, collation=None, if upsert is not None: validate_boolean("upsert", upsert) if hint is not None: - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) self._filter = filter @@ -243,7 +241,7 @@ def __init__(self, filter, doc, upsert, collation, array_filters, hint): if array_filters is not None: validate_list("array_filters", array_filters) if hint is not None: - if not isinstance(hint, string_type): + if not isinstance(hint, str): hint = helpers._index_document(hint) diff --git a/pymongo/pool.py b/pymongo/pool.py index 9c0c4066d6..472a9bf042 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -14,6 +14,7 @@ import contextlib import copy +import ipaddress import os import platform import socket @@ -21,14 +22,7 @@ import threading import collections - -from pymongo.ssl_support import ( - SSLError as _SSLError, - HAS_SNI as _HAVE_SNI, - IPADDR_SAFE as _IPADDR_SAFE) - from bson import DEFAULT_CODEC_OPTIONS -from bson.py3compat import imap, itervalues, _unicode, PY3 from bson.son import SON from pymongo import auth, helpers, __version__ from pymongo.client_session import _validate_session_write_concern @@ -65,51 +59,19 @@ from pymongo.socket_checker import SocketChecker # Always use our backport so we always have support for IP address matching from pymongo.ssl_match_hostname import match_hostname +from pymongo.ssl_support import ( + SSLError as _SSLError, + HAS_SNI as _HAVE_SNI, + IPADDR_SAFE as _IPADDR_SAFE) # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are # not permitted for SNI hostname. -try: - from ipaddress import ip_address - def is_ip_address(address): - try: - ip_address(_unicode(address)) - return True - except (ValueError, UnicodeError): - return False -except ImportError: - if hasattr(socket, 'inet_pton') and socket.has_ipv6: - # Most *nix, recent Windows - def is_ip_address(address): - try: - # inet_pton rejects IPv4 literals with leading zeros - # (e.g. 192.168.0.01), inet_aton does not, and we - # can connect to them without issue. Use inet_aton. - socket.inet_aton(address) - return True - except socket.error: - try: - socket.inet_pton(socket.AF_INET6, address) - return True - except socket.error: - return False - else: - # No inet_pton - def is_ip_address(address): - try: - socket.inet_aton(address) - return True - except socket.error: - if ':' in address: - # ':' is not a valid character for a hostname. If we get - # here a few things have to be true: - # - We're on a recent version of python 2.7 (2.7.9+). - # Older 2.7 versions don't support SNI. - # - We're on Windows XP or some unusual Unix that doesn't - # have inet_pton. - # - The application is using IPv6 literals with TLS, which - # is pretty unusual. - return True - return False +def is_ip_address(address): + try: + ipaddress.ip_address(address) + return True + except (ValueError, UnicodeError): + return False try: from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC @@ -246,17 +208,17 @@ def _set_keepalive_times(sock): if platform.python_implementation().startswith('PyPy'): _METADATA['platform'] = ' '.join( (platform.python_implementation(), - '.'.join(imap(str, sys.pypy_version_info)), - '(Python %s)' % '.'.join(imap(str, sys.version_info)))) + '.'.join(map(str, sys.pypy_version_info)), + '(Python %s)' % '.'.join(map(str, sys.version_info)))) elif sys.platform.startswith('java'): _METADATA['platform'] = ' '.join( (platform.python_implementation(), - '.'.join(imap(str, sys.version_info)), + '.'.join(map(str, sys.version_info)), '(%s)' % ' '.join((platform.system(), platform.release())))) else: _METADATA['platform'] = ' '.join( (platform.python_implementation(), - '.'.join(imap(str, sys.version_info)))) + '.'.join(map(str, sys.version_info)))) # If the first getaddrinfo call of this interpreter's life is on a thread, @@ -288,19 +250,10 @@ def _raise_connection_failure(address, error, msg_prefix=None): else: raise AutoReconnect(msg) -if PY3: - def _cond_wait(condition, deadline): - timeout = deadline - _time() if deadline else None - return condition.wait(timeout) -else: - def _cond_wait(condition, deadline): - timeout = deadline - _time() if deadline else None - condition.wait(timeout) - # Python 2.7 always returns False for wait(), - # manually check for a timeout. - if timeout and _time() >= deadline: - return False - return True + +def _cond_wait(condition, deadline): + timeout = deadline - _time() if deadline else None + return condition.wait(timeout) class PoolOptions(object): @@ -517,7 +470,7 @@ def _speculative_context(all_credentials): """Return the _AuthContext to use for speculative auth, if any. """ if all_credentials and len(all_credentials) == 1: - creds = next(itervalues(all_credentials)) + creds = next(iter(all_credentials.values())) return auth._AuthContext.from_credentials(creds) return None @@ -822,7 +775,7 @@ def check_auth(self, all_credentials): - `all_credentials`: dict, maps auth source to MongoCredential. """ if all_credentials or self.authset: - cached = set(itervalues(all_credentials)) + cached = set(all_credentials.values()) authset = self.authset.copy() # Logout any credentials that no longer exist in the cache. diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 10c35141fd..906e9db2df 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -32,9 +32,6 @@ CertificateError as _SICertificateError, VerificationError as _SIVerificationError) -from cryptography.hazmat.backends import default_backend as _default_backend - -from bson.py3compat import _unicode from pymongo.errors import CertificateError as _CertificateError from pymongo.monotonic import time as _time from pymongo.ocsp_support import ( @@ -72,7 +69,7 @@ def _is_ip_address(address): try: - _ip_address(_unicode(address)) + _ip_address(address) return True except (ValueError, UnicodeError): return False @@ -316,9 +313,9 @@ def wrap_socket(self, sock, server_side=False, if self.check_hostname and server_hostname is not None: try: if _is_ip_address(server_hostname): - _verify_ip_address(ssl_conn, _unicode(server_hostname)) + _verify_ip_address(ssl_conn, server_hostname) else: - _verify_hostname(ssl_conn, _unicode(server_hostname)) + _verify_hostname(ssl_conn, server_hostname) except (_SICertificateError, _SIVerificationError) as exc: raise _CertificateError(str(exc)) return ssl_conn diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index 3ba8c854a5..7e9cc4485c 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -14,8 +14,6 @@ """Tools for working with read concerns.""" -from bson.py3compat import string_type - class ReadConcern(object): """ReadConcern @@ -32,7 +30,7 @@ class ReadConcern(object): """ def __init__(self, level=None): - if level is None or isinstance(level, string_type): + if level is None or isinstance(level, str): self.__level = level else: raise TypeError( diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 5ba833e53f..6916cc5fac 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -14,7 +14,8 @@ """Utilities for choosing which member of a replica set to read from.""" -from bson.py3compat import abc, integer_types +from collections import abc + from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError from pymongo.server_selectors import (member_with_tags_server_selector, @@ -72,7 +73,7 @@ def _validate_max_staleness(max_staleness): if max_staleness == -1: return -1 - if not isinstance(max_staleness, integer_types): + if not isinstance(max_staleness, int): raise TypeError(_invalid_max_staleness_msg(max_staleness)) if max_staleness <= 0: diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index baa1a40663..02a407d2b6 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -14,7 +14,6 @@ """An implementation of RFC4013 SASLprep.""" -from bson.py3compat import text_type as _text_type try: import stringprep @@ -22,7 +21,7 @@ HAVE_STRINGPREP = False def saslprep(data): """SASLprep dummy""" - if isinstance(data, _text_type): + if isinstance(data, str): raise TypeError( "The stringprep module is not available. Usernames and " "passwords must be ASCII strings.") @@ -61,7 +60,7 @@ def saslprep(data, prohibit_unassigned_code_points=True): :Returns: The SASLprep'ed version of `data`. """ - if not isinstance(data, _text_type): + if not isinstance(data, str): return data if prohibit_unassigned_code_points: diff --git a/pymongo/son_manipulator.py b/pymongo/son_manipulator.py index f470d6f338..a371d64938 100644 --- a/pymongo/son_manipulator.py +++ b/pymongo/son_manipulator.py @@ -33,9 +33,10 @@ :meth:`~pymongo.collection.Collection.find_one_and_update`. """ +from collections import abc + from bson.dbref import DBRef from bson.objectid import ObjectId -from bson.py3compat import abc from bson.son import SON diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 3ccd529372..cf9ccffc43 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -20,22 +20,16 @@ except ImportError: _HAVE_DNSPYTHON = False -from bson.py3compat import PY3 - from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError -if PY3: - # dnspython can return bytes or str from various parts - # of its API depending on version. We always want str. - def maybe_decode(text): - if isinstance(text, bytes): - return text.decode() - return text -else: - def maybe_decode(text): - return text +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text): + if isinstance(text, bytes): + return text.decode() + return text class _SrvResolver(object): diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index b5847244c3..91cafa6d6f 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -18,7 +18,6 @@ import sys import threading -from bson.py3compat import string_type from pymongo.errors import ConfigurationError HAVE_SSL = True @@ -65,7 +64,7 @@ def validate_cert_reqs(option, value): """ if value is None: return value - if isinstance(value, string_type) and hasattr(_stdlibssl, value): + if isinstance(value, str) and hasattr(_stdlibssl, value): value = getattr(_stdlibssl, value) if value in (CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED): @@ -106,7 +105,7 @@ def get_ssl_context(*args): check_ocsp_endpoint) = args verify_mode = CERT_REQUIRED if cert_reqs is None else cert_reqs ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) - # SSLContext.check_hostname was added in CPython 2.7.9 and 3.4. + # SSLContext.check_hostname was added in CPython 3.4. if hasattr(ctx, "check_hostname"): if _ssl.CHECK_HOSTNAME_SAFE and verify_mode != CERT_NONE: ctx.check_hostname = match_hostname @@ -143,18 +142,23 @@ def get_ssl_context(*args): if ca_certs is not None: ctx.load_verify_locations(ca_certs) elif cert_reqs != CERT_NONE: - # CPython >= 2.7.9 or >= 3.4.0, pypy >= 2.5.1 + # CPython 3.4+ ssl module only, doesn't exist in PyOpenSSL if hasattr(ctx, "load_default_certs"): ctx.load_default_certs() - # Python >= 3.2.0, useless on Windows. + # Always useless on Windows. elif (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")): ctx.set_default_verify_paths() + # This is needed with PyOpenSSL on Windows + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths elif sys.platform == "win32" and HAVE_WINCERTSTORE: with _WINCERTSLOCK: if _WINCERTS is None: _load_wincerts() ctx.load_verify_locations(_WINCERTS.name) + # This is necessary with PyOpenSSL on macOS when homebrew isn't + # installed. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths elif HAVE_CERTIFI: ctx.load_verify_locations(certifi.where()) else: diff --git a/pymongo/topology.py b/pymongo/topology.py index 2fc455efb8..d8ea8f8bee 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -15,17 +15,12 @@ """Internal class to monitor a topology of one or more servers.""" import os +import queue import random import threading import warnings import weakref -from bson.py3compat import itervalues, PY3 -if PY3: - import queue as Queue -else: - import Queue - from pymongo import (common, helpers, periodic_executor) @@ -62,7 +57,7 @@ def process_events_queue(queue_ref): while True: try: event = q.get_nowait() - except Queue.Empty: + except queue.Empty: break else: fn, args = event @@ -85,7 +80,7 @@ def __init__(self, topology_settings): self.__events_executor = None if self._publish_server or self._publish_tp: - self._events = Queue.Queue(maxsize=100) + self._events = queue.Queue(maxsize=100) if self._publish_tp: self._events.put((self._listeners.publish_topology_opened, @@ -556,7 +551,7 @@ def _ensure_opened(self): self._srv_monitor.open() # Ensure that the monitors are open. - for server in itervalues(self._servers): + for server in self._servers.values(): server.open() def _is_stale_error(self, address, err_ctx): diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 9c782ac1a4..a683b2fa6d 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -17,12 +17,7 @@ import re import warnings -from bson.py3compat import string_type, PY3 - -if PY3: - from urllib.parse import unquote_plus -else: - from urllib import unquote_plus +from urllib.parse import unquote_plus from pymongo.common import ( get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, @@ -53,12 +48,8 @@ def parse_userinfo(userinfo): Now uses `urllib.unquote_plus` so `+` characters must be escaped. """ if '@' in userinfo or userinfo.count(':') > 1: - if PY3: - quote_fn = "urllib.parse.quote_plus" - else: - quote_fn = "urllib.quote_plus" raise InvalidURI("Username and password must be escaped according to " - "RFC 3986, use %s()." % quote_fn) + "RFC 3986, use urllib.parse.quote_plus") user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. if not user: @@ -113,7 +104,7 @@ def parse_host(entity, default_port=DEFAULT_PORT): "address literal must be enclosed in '[' " "and ']' according to RFC 2732.") host, port = host.split(':', 1) - if isinstance(port, string_type): + if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) @@ -534,4 +525,4 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, pprint.pprint(parse_uri(sys.argv[1])) except InvalidURI as exc: print(exc) - sys.exit(0) \ No newline at end of file + sys.exit(0) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 14ad63de88..ebc997c0db 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -14,7 +14,6 @@ """Tools for working with write concerns.""" -from bson.py3compat import integer_types, string_type from pymongo.errors import ConfigurationError @@ -53,7 +52,7 @@ def __init__(self, w=None, wtimeout=None, j=None, fsync=None): self.__acknowledged = True if wtimeout is not None: - if not isinstance(wtimeout, integer_types): + if not isinstance(wtimeout, int): raise TypeError("wtimeout must be an integer") if wtimeout < 0: raise ValueError("wtimeout cannot be less than 0") @@ -76,11 +75,11 @@ def __init__(self, w=None, wtimeout=None, j=None, fsync=None): raise ConfigurationError("Cannot set w to 0 and j to True") if w is not None: - if isinstance(w, integer_types): + if isinstance(w, int): if w < 0: raise ValueError("w cannot be less than 0") self.__acknowledged = w > 0 - elif not isinstance(w, string_type): + elif not isinstance(w, str): raise TypeError("w must be an integer or string") self.__document["w"] = w From a72e8b88230465939b5e5cacaa9353bcb90d183a Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 20 Jan 2021 09:40:36 -0800 Subject: [PATCH 0282/2111] PYTHON-2133 Remove py2 support from test Also delete bson/py3compat.py --- bson/int64.py | 8 +- bson/py3compat.py | 107 -------------- test/barrier.py | 193 -------------------------- test/qcheck.py | 32 ++--- test/test_binary.py | 38 ++--- test/test_bson.py | 99 +++++-------- test/test_bson_corpus.py | 11 +- test/test_change_stream.py | 14 +- test/test_client.py | 3 +- test/test_collection.py | 10 +- test/test_crud_v1.py | 7 +- test/test_cursor.py | 19 +-- test/test_custom_types.py | 4 +- test/test_database.py | 18 +-- test/test_decimal128.py | 10 +- test/test_discovery_and_monitoring.py | 3 +- test/test_encryption.py | 7 +- test/test_errors.py | 5 +- test/test_grid_file.py | 12 +- test/test_gridfs.py | 47 +++++-- test/test_gridfs_bucket.py | 22 +-- test/test_gridfs_spec.py | 9 +- test/test_json_util.py | 12 +- test/test_legacy_api.py | 19 +-- test/test_monitoring.py | 7 +- test/test_objectid.py | 13 +- test/test_read_preferences.py | 3 +- test/test_session.py | 5 +- test/test_son.py | 8 +- test/test_ssl.py | 3 - test/test_topology.py | 3 +- test/test_transactions.py | 8 +- test/test_uri_parser.py | 3 +- test/unicode/test_utf8.py | 47 ------- test/unified_format.py | 30 ++-- test/utils.py | 37 ++--- test/utils_spec_runner.py | 17 +-- 37 files changed, 203 insertions(+), 690 deletions(-) delete mode 100644 bson/py3compat.py delete mode 100644 test/barrier.py diff --git a/bson/int64.py b/bson/int64.py index 77e9812304..4fce5ad7bd 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -14,13 +14,7 @@ """A BSON wrapper for long (int in python3)""" -from bson.py3compat import PY3 - -if PY3: - long = int - - -class Int64(long): +class Int64(int): """Representation of the BSON int64 type. This is necessary because every integral number is an :class:`int` in diff --git a/bson/py3compat.py b/bson/py3compat.py deleted file mode 100644 index 84d1ea00fd..0000000000 --- a/bson/py3compat.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Utility functions and definitions for python3 compatibility.""" - -import sys - -PY3 = sys.version_info[0] == 3 - -if PY3: - import codecs - import collections.abc as abc - import _thread as thread - from abc import ABC, abstractmethod - from io import BytesIO as StringIO - - def abstractproperty(func): - return property(abstractmethod(func)) - - MAXSIZE = sys.maxsize - - imap = map - - def b(s): - # BSON and socket operations deal in binary data. In - # python 3 that means instances of `bytes`. In python - # 2.7 you can create an alias for `bytes` using - # the b prefix (e.g. b'foo'). - # See http://python3porting.com/problems.html#nicer-solutions - return codecs.latin_1_encode(s)[0] - - def bytes_from_hex(h): - return bytes.fromhex(h) - - def iteritems(d): - return iter(d.items()) - - def itervalues(d): - return iter(d.values()) - - def reraise(exctype, value, trace=None): - raise exctype(str(value)).with_traceback(trace) - - def reraise_instance(exc_instance, trace=None): - raise exc_instance.with_traceback(trace) - - def _unicode(s): - return s - - text_type = str - string_type = str - integer_types = int -else: - import collections as abc - import thread - from abc import ABCMeta, abstractproperty - - from itertools import imap - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - - ABC = ABCMeta('ABC', (object,), {}) - - MAXSIZE = sys.maxint - - def b(s): - # See comments above. In python 2.x b('foo') is just 'foo'. - return s - - def bytes_from_hex(h): - return h.decode('hex') - - def iteritems(d): - return d.iteritems() - - def itervalues(d): - return d.itervalues() - - def reraise(exctype, value, trace=None): - _reraise(exctype, str(value), trace) - - def reraise_instance(exc_instance, trace=None): - _reraise(exc_instance, None, trace) - - # "raise x, y, z" raises SyntaxError in Python 3 - exec("""def _reraise(exc, value, trace): - raise exc, value, trace -""") - - _unicode = unicode - - string_type = basestring - text_type = unicode - integer_types = (int, long) diff --git a/test/barrier.py b/test/barrier.py deleted file mode 100644 index 7a614ca07c..0000000000 --- a/test/barrier.py +++ /dev/null @@ -1,193 +0,0 @@ -# Backport of the threading.Barrier class from python 3.8, with small -# changes to support python 2.7. -# https://github.com/python/cpython/blob/v3.8.2/Lib/threading.py#L562-L728 - -from threading import (Condition, - Lock) - -from pymongo.monotonic import time as _time - - -# Backport Condition.wait_for from 3.8.2 -# https://github.com/python/cpython/blob/v3.8.2/Lib/threading.py#L318-L339 -def wait_for(condition, predicate, timeout=None): - """Wait until a condition evaluates to True. - - predicate should be a callable which result will be interpreted as a - boolean value. A timeout may be provided giving the maximum time to - wait. - - """ - endtime = None - waittime = timeout - result = predicate() - while not result: - if waittime is not None: - if endtime is None: - endtime = _time() + waittime - else: - waittime = endtime - _time() - if waittime <= 0: - break - condition.wait(waittime) - result = predicate() - return result - - -# A barrier class. Inspired in part by the pthread_barrier_* api and -# the CyclicBarrier class from Java. See -# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and -# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/ -# CyclicBarrier.html -# for information. -# We maintain two main states, 'filling' and 'draining' enabling the barrier -# to be cyclic. Threads are not allowed into it until it has fully drained -# since the previous cycle. In addition, a 'resetting' state exists which is -# similar to 'draining' except that threads leave with a BrokenBarrierError, -# and a 'broken' state in which all threads get the exception. -class Barrier(object): - """Implements a Barrier. - Useful for synchronizing a fixed number of threads at known synchronization - points. Threads block on 'wait()' and are simultaneously awoken once they - have all made that call. - """ - - def __init__(self, parties, action=None, timeout=None): - """Create a barrier, initialised to 'parties' threads. - 'action' is a callable which, when supplied, will be called by one of - the threads after they have all entered the barrier and just prior to - releasing them all. If a 'timeout' is provided, it is used as the - default for all subsequent 'wait()' calls. - """ - self._cond = Condition(Lock()) - self._action = action - self._timeout = timeout - self._parties = parties - self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken - self._count = 0 - - def wait(self, timeout=None): - """Wait for the barrier. - When the specified number of threads have started waiting, they are all - simultaneously awoken. If an 'action' was provided for the barrier, one - of the threads will have executed that callback prior to returning. - Returns an individual index number from 0 to 'parties-1'. - """ - if timeout is None: - timeout = self._timeout - with self._cond: - self._enter() # Block while the barrier drains. - index = self._count - self._count += 1 - try: - if index + 1 == self._parties: - # We release the barrier - self._release() - else: - # We wait until someone releases us - self._wait(timeout) - return index - finally: - self._count -= 1 - # Wake up any threads waiting for barrier to drain. - self._exit() - - # Block until the barrier is ready for us, or raise an exception - # if it is broken. - def _enter(self): - while self._state in (-1, 1): - # It is draining or resetting, wait until done - self._cond.wait() - #see if the barrier is in a broken state - if self._state < 0: - raise BrokenBarrierError - assert self._state == 0 - - # Optionally run the 'action' and release the threads waiting - # in the barrier. - def _release(self): - try: - if self._action: - self._action() - # enter draining state - self._state = 1 - self._cond.notify_all() - except: - #an exception during the _action handler. Break and reraise - self._break() - raise - - # Wait in the barrier until we are released. Raise an exception - # if the barrier is reset or broken. - def _wait(self, timeout): - if not wait_for(self._cond, lambda : self._state != 0, timeout): - #timed out. Break the barrier - self._break() - raise BrokenBarrierError - if self._state < 0: - raise BrokenBarrierError - assert self._state == 1 - - # If we are the last thread to exit the barrier, signal any threads - # waiting for the barrier to drain. - def _exit(self): - if self._count == 0: - if self._state in (-1, 1): - #resetting or draining - self._state = 0 - self._cond.notify_all() - - def reset(self): - """Reset the barrier to the initial state. - Any threads currently waiting will get the BrokenBarrier exception - raised. - """ - with self._cond: - if self._count > 0: - if self._state == 0: - #reset the barrier, waking up threads - self._state = -1 - elif self._state == -2: - #was broken, set it to reset state - #which clears when the last thread exits - self._state = -1 - else: - self._state = 0 - self._cond.notify_all() - - def abort(self): - """Place the barrier into a 'broken' state. - Useful in case of error. Any currently waiting threads and threads - attempting to 'wait()' will have BrokenBarrierError raised. - """ - with self._cond: - self._break() - - def _break(self): - # An internal error was detected. The barrier is set to - # a broken state all parties awakened. - self._state = -2 - self._cond.notify_all() - - @property - def parties(self): - """Return the number of threads required to trip the barrier.""" - return self._parties - - @property - def n_waiting(self): - """Return the number of threads currently waiting at the barrier.""" - # We don't need synchronization here since this is an ephemeral result - # anyway. It returns the correct value in the steady state. - if self._state == 0: - return self._count - return 0 - - @property - def broken(self): - """Return True if the barrier is in a broken state.""" - return self._state == -2 - -# exception raised by the Barrier class -class BrokenBarrierError(RuntimeError): - pass diff --git a/test/qcheck.py b/test/qcheck.py index 4d039f75b5..0135497c09 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -12,22 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -import random -import traceback import datetime +import random import re import sys +import traceback + sys.path[0:0] = [""] -from bson.binary import Binary from bson.dbref import DBRef from bson.objectid import ObjectId -from bson.py3compat import MAXSIZE, PY3, iteritems from bson.son import SON -if PY3: - unichr = chr - gen_target = 100 reduction_attempts = 10 examples = 5 @@ -59,7 +55,7 @@ def gen_int(): def gen_float(): - return lambda: (random.random() - 0.5) * MAXSIZE + return lambda: (random.random() - 0.5) * sys.maxsize def gen_boolean(): @@ -74,12 +70,8 @@ def gen_printable_string(gen_length): return lambda: "".join(gen_list(gen_printable_char(), gen_length)()) -if PY3: - def gen_char(set=None): - return lambda: bytes([random.randint(0, 255)]) -else: - def gen_char(set=None): - return lambda: chr(random.randint(0, 255)) +def gen_char(set=None): + return lambda: bytes([random.randint(0, 255)]) def gen_string(gen_length): @@ -87,7 +79,7 @@ def gen_string(gen_length): def gen_unichar(): - return lambda: unichr(random.randint(1, 0xFFF)) + return lambda: chr(random.randint(1, 0xFFF)) def gen_unicode(gen_length): @@ -150,15 +142,9 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - bintype = Binary - if PY3: - # If we used Binary in python3 tests would fail since we - # decode BSON binary subtype 0 to bytes. Testing this with - # bytes in python3 makes a lot more sense. - bintype = bytes choices = [gen_unicode(gen_range(0, 50)), gen_printable_string(gen_range(0, 50)), - my_map(gen_string(gen_range(0, 1000)), bintype), + my_map(gen_string(gen_range(0, 1000)), bytes), gen_int(), gen_float(), gen_boolean(), @@ -195,7 +181,7 @@ def simplify(case): # TODO this is a hack return (True, simplified) else: # simplify a value - simplified_items = list(iteritems(simplified)) + simplified_items = list(simplified.items()) if not len(simplified_items): return (False, case) (key, value) = random.choice(simplified_items) diff --git a/test/test_binary.py b/test/test_binary.py index 39de987c10..34e80bfa40 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -17,6 +17,7 @@ import array import base64 import copy +import mmap import pickle import platform import sys @@ -29,11 +30,12 @@ from bson import decode, encode from bson.binary import * from bson.codec_options import CodecOptions -from bson.py3compat import PY3 from bson.son import SON + from pymongo.common import validate_uuid_representation from pymongo.mongo_client import MongoClient from pymongo.write_concern import WriteConcern + from test import client_context, unittest, IntegrationTest from test.utils import ignore_deprecations @@ -328,14 +330,9 @@ def test_pickle(self): b1 = Binary(b'123', 2) # For testing backwards compatibility with pre-2.4 pymongo - if PY3: - p = (b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" - b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" - b"\x05K\x02sb.") - else: - p = (b"ccopy_reg\n_reconstructor\np0\n(cbson.binary\nBinary\np1\nc" - b"__builtin__\nstr\np2\nS'123'\np3\ntp4\nRp5\n(dp6\nS'_Binary" - b"__subtype'\np7\nI2\nsb.") + p = (b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" + b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" + b"\x05K\x02sb.") if not sys.version.startswith('3.0'): self.assertEqual(b1, pickle.loads(p)) @@ -357,16 +354,11 @@ def test_buffer_protocol(self): self.assertEqual(b0, Binary(memoryview(b'123'), 2)) self.assertEqual(b0, Binary(bytearray(b'123'), 2)) - # mmap.mmap and array.array only expose the - # buffer interface in python 3.x - if PY3: - # No mmap module in Jython - import mmap - with mmap.mmap(-1, len(b'123')) as mm: - mm.write(b'123') - mm.seek(0) - self.assertEqual(b0, Binary(mm, 2)) - self.assertEqual(b0, Binary(array.array('B', b'123'), 2)) + with mmap.mmap(-1, len(b'123')) as mm: + mm.write(b'123') + mm.seek(0) + self.assertEqual(b0, Binary(mm, 2)) + self.assertEqual(b0, Binary(array.array('B', b'123'), 2)) class TestUuidSpecExplicitCoding(unittest.TestCase): @@ -377,9 +369,7 @@ def setUpClass(cls): @staticmethod def _hex_to_bytes(hexstring): - if PY3: - return bytes.fromhex(hexstring) - return hexstring.decode("hex") + return bytes.fromhex(hexstring) # Explicit encoding prose test #1 def test_encoding_1(self): @@ -482,9 +472,7 @@ def setUpClass(cls): @staticmethod def _hex_to_bytes(hexstring): - if PY3: - return bytes.fromhex(hexstring) - return hexstring.decode("hex") + return bytes.fromhex(hexstring) def _get_coll_w_uuid_rep(self, uuid_rep): codec_options = self.client.codec_options.with_options( diff --git a/test/test_bson.py b/test/test_bson.py index ad726f71bb..6637ca5dac 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -16,14 +16,19 @@ """Test the bson module.""" +import array import collections import datetime +import mmap import os import re import sys import tempfile import uuid +from collections import abc, OrderedDict +from io import BytesIO + sys.path[0:0] = [""] import bson @@ -42,23 +47,18 @@ from bson.int64 import Int64 from bson.objectid import ObjectId from bson.dbref import DBRef -from bson.py3compat import abc, iteritems, PY3, StringIO, text_type from bson.son import SON from bson.timestamp import Timestamp from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) + InvalidDocument) from bson.max_key import MaxKey from bson.min_key import MinKey from bson.tz_util import (FixedOffset, utc) -from test import qcheck, SkipTest, unittest +from test import qcheck, unittest from test.utils import ExceptionCatchingThread -if PY3: - long = int - class NotADict(abc.MutableMapping): """Non-dict type that implements the mapping protocol.""" @@ -134,7 +134,7 @@ def helper(doc): helper({}) helper({"test": u"hello"}) self.assertTrue(isinstance(decoder(encoder( - {"hello": "world"}))["hello"], text_type)) + {"hello": "world"}))["hello"], str)) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) @@ -293,7 +293,7 @@ def test_basic_decode(self): b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" b"\x05\x00\x00\x00\x00"))) self.assertEqual([{"test": u"hello world"}, {}], - list(decode_file_iter(StringIO( + list(decode_file_iter(BytesIO( b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" @@ -305,14 +305,11 @@ def test_decode_all_buffer_protocol(self): self.assertEqual(docs, decode_all(bytearray(bs))) self.assertEqual(docs, decode_all(memoryview(bs))) self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1])) - if PY3: - import array - import mmap - self.assertEqual(docs, decode_all(array.array('B', bs))) - with mmap.mmap(-1, len(bs)) as mm: - mm.write(bs) - mm.seek(0) - self.assertEqual(docs, decode_all(mm)) + self.assertEqual(docs, decode_all(array.array('B', bs))) + with mmap.mmap(-1, len(bs)) as mm: + mm.write(bs) + mm.seek(0) + self.assertEqual(docs, decode_all(mm)) def test_decode_buffer_protocol(self): doc = {'foo': 'bar'} @@ -321,21 +318,18 @@ def test_decode_buffer_protocol(self): self.assertEqual(doc, decode(bytearray(bs))) self.assertEqual(doc, decode(memoryview(bs))) self.assertEqual(doc, decode(memoryview(b'1' + bs + b'1')[1:-1])) - if PY3: - import array - import mmap - self.assertEqual(doc, decode(array.array('B', bs))) - with mmap.mmap(-1, len(bs)) as mm: - mm.write(bs) - mm.seek(0) - self.assertEqual(doc, decode(mm)) + self.assertEqual(doc, decode(array.array('B', bs))) + with mmap.mmap(-1, len(bs)) as mm: + mm.write(bs) + mm.seek(0) + self.assertEqual(doc, decode(mm)) def test_invalid_decodes(self): # Invalid object size (not enough bytes in document for even # an object size of first object. # NOTE: decode_all and decode_iter don't care, not sure if they should? self.assertRaises(InvalidBSON, list, - decode_file_iter(StringIO(b"\x1B"))) + decode_file_iter(BytesIO(b"\x1B"))) bad_bsons = [ # An object size that's too small to even include the object size, @@ -366,7 +360,7 @@ def test_invalid_decodes(self): with self.assertRaises(InvalidBSON, msg=msg): list(decode_iter(data)) with self.assertRaises(InvalidBSON, msg=msg): - list(decode_file_iter(StringIO(data))) + list(decode_file_iter(BytesIO(data))) with tempfile.TemporaryFile() as scratch: scratch.write(data) scratch.seek(0, os.SEEK_SET) @@ -498,10 +492,7 @@ def test_bytes_as_keys(self): # Since `bytes` are stored as Binary you can't use them # as keys in python 3.x. Using binary data as a key makes # no sense in BSON anyway and little sense in python. - if PY3: - self.assertRaises(InvalidDocument, encode, doc) - else: - self.assertTrue(encode(doc)) + self.assertRaises(InvalidDocument, encode, doc) def test_datetime_encode_decode(self): # Negative timestamps @@ -604,13 +595,6 @@ def test_dst(self): self.assertEqual(d, decode(encode(d))) def test_bad_encode(self): - if not PY3: - # Python3 treats this as a unicode string which won't raise - # an exception. If we passed the string as bytes instead we - # still wouldn't get an error since we store bytes as BSON - # binary subtype 0. - self.assertRaises(InvalidStringData, encode, - {"lalala": '\xf4\xe0\xf0\xe1\xc0 Color Touch'}) # Work around what seems like a regression in python 3.5.0. # See http://bugs.python.org/issue25222 if sys.version_info[:2] < (3, 5): @@ -622,13 +606,13 @@ def test_bad_encode(self): self.assertRaises(Exception, encode, evil_data) def test_overflow(self): - self.assertTrue(encode({"x": long(9223372036854775807)})) + self.assertTrue(encode({"x": 9223372036854775807})) self.assertRaises(OverflowError, encode, - {"x": long(9223372036854775808)}) + {"x": 9223372036854775808}) - self.assertTrue(encode({"x": long(-9223372036854775808)})) + self.assertTrue(encode({"x": -9223372036854775808})) self.assertRaises(OverflowError, encode, - {"x": long(-9223372036854775809)}) + {"x": -9223372036854775809}) def test_small_long_encode_decode(self): encoded1 = encode({'x': 256}) @@ -682,25 +666,10 @@ def test_utf8(self): # b'a\xe9' == u"aé".encode("iso-8859-1") iso8859_bytes = b'a\xe9' y = {"hello": iso8859_bytes} - if PY3: - # Stored as BSON binary subtype 0. - out = decode(encode(y)) - self.assertTrue(isinstance(out['hello'], bytes)) - self.assertEqual(out['hello'], iso8859_bytes) - else: - # Python 2. - try: - encode(y) - except InvalidStringData as e: - self.assertTrue(repr(iso8859_bytes) in str(e)) - - # The next two tests only make sense in python 2.x since - # you can't use `bytes` type as document keys in python 3.x. - x = {u"aéあ".encode("utf-8"): u"aéあ".encode("utf-8")} - self.assertEqual(w, decode(encode(x))) - - z = {iso8859_bytes: "hello"} - self.assertRaises(InvalidStringData, encode, z) + # Stored as BSON binary subtype 0. + out = decode(encode(y)) + self.assertTrue(isinstance(out['hello'], bytes)) + self.assertEqual(out['hello'], iso8859_bytes) def test_null_character(self): doc = {"a": "\x00"} @@ -767,24 +736,20 @@ class _myint(int): class _myfloat(float): pass - class _myunicode(text_type): + class _myunicode(str): pass d = {'a': _myint(42), 'b': _myfloat(63.9), 'c': _myunicode('hello world') } d2 = decode(encode(d)) - for key, value in iteritems(d2): + for key, value in d2.items(): orig_value = d[key] orig_type = orig_value.__class__.__bases__[0] self.assertEqual(type(value), orig_type) self.assertEqual(value, orig_type(value)) def test_ordered_dict(self): - try: - from collections import OrderedDict - except ImportError: - raise SkipTest("No OrderedDict") d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) self.assertEqual( d, decode(encode(d), CodecOptions(document_class=OrderedDict))) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 780ea49a3c..6590d00a3e 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -33,7 +33,6 @@ from bson.dbref import DBRef from bson.errors import InvalidBSON, InvalidId from bson.json_util import JSONMode -from bson.py3compat import text_type, b from bson.son import SON from test import unittest @@ -56,7 +55,7 @@ _DEPRECATED_BSON_TYPES = { # Symbol - '0x0E': text_type, + '0x0E': str, # Undefined '0x06': type(None), # DBPointer @@ -122,7 +121,7 @@ def run_test(self): encode_extjson = to_extjson encode_bson = to_bson - cB = binascii.unhexlify(b(valid_case['canonical_bson'])) + cB = binascii.unhexlify(valid_case['canonical_bson'].encode('utf8')) cEJ = valid_case['canonical_extjson'] rEJ = valid_case.get('relaxed_extjson') dEJ = valid_case.get('degenerate_extjson') @@ -139,7 +138,7 @@ def run_test(self): if deprecated: if 'converted_bson' in valid_case: converted_bson = binascii.unhexlify( - b(valid_case['converted_bson'])) + valid_case['converted_bson'].encode('utf8')) self.assertEqual(encode_bson(decoded_bson), converted_bson) self.assertJsonEqual( encode_extjson(decode_bson(converted_bson)), @@ -167,7 +166,7 @@ def run_test(self): # Test round-tripping degenerate bson. if 'degenerate_bson' in valid_case: - dB = binascii.unhexlify(b(valid_case['degenerate_bson'])) + dB = binascii.unhexlify(valid_case['degenerate_bson'].encode('utf8')) self.assertEqual(encode_bson(decode_bson(dB)), cB) # Test round-tripping degenerate extended json. @@ -186,7 +185,7 @@ def run_test(self): for decode_error_case in case_spec.get('decodeErrors', []): with self.assertRaises(InvalidBSON): decode_bson( - binascii.unhexlify(b(decode_error_case['bson']))) + binascii.unhexlify(decode_error_case['bson'].encode('utf8'))) for parse_error_case in case_spec.get('parseErrors', []): if bson_type == '0x13': diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 851b599f99..293c3bfc1a 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -23,7 +23,6 @@ import time import uuid -from contextlib import contextmanager from itertools import product sys.path[0:0] = [''] @@ -33,7 +32,6 @@ Binary, STANDARD, PYTHON_LEGACY) -from bson.py3compat import iteritems from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient @@ -1093,7 +1091,7 @@ def assert_list_contents_are_subset(self, superlist, sublist): def assert_dict_is_subset(self, superdict, subdict): """Check that subdict is a subset of superdict.""" exempt_fields = ["documentKey", "_id", "getMore"] - for key, value in iteritems(subdict): + for key, value in subdict.items(): if key not in superdict: self.fail('Key %s not found in %s' % (key, superdict)) if isinstance(value, dict): @@ -1111,7 +1109,7 @@ def assert_dict_is_subset(self, superdict, subdict): def check_event(self, event, expectation_dict): if event is None: self.fail() - for key, value in iteritems(expectation_dict): + for key, value in expectation_dict.items(): if isinstance(value, dict): self.assert_dict_is_subset(getattr(event, key), value) else: @@ -1149,9 +1147,9 @@ def get_change_stream(client, scenario_def, test): cs_pipeline = test["changeStreamPipeline"] options = test["changeStreamOptions"] cs_options = {} - for key, value in iteritems(options): + for key, value in options.items(): cs_options[camel_to_snake(key)] = value - + # Create and return change stream return cs_target.watch(pipeline=cs_pipeline, **cs_options) @@ -1203,12 +1201,12 @@ def run_scenario(self): for change, expected_changes in zip(changes, test["result"]["success"]): self.assert_dict_is_subset(change, expected_changes) self.assertEqual(len(changes), len(test["result"]["success"])) - + finally: # Check for expected events results = self.listener.results for idx, expectation in enumerate(test.get("expectations", [])): - for event_type, event_desc in iteritems(expectation): + for event_type, event_desc in expectation.items(): results_key = event_type.split("_")[1] event = results[results_key][idx] if len(results[results_key]) > idx else None self.check_event(event, event_desc) diff --git a/test/test_client.py b/test/test_client.py index df8122bbae..add8a789c7 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -24,6 +24,7 @@ import struct import sys import time +import _thread as thread import threading import warnings @@ -31,7 +32,6 @@ from bson import encode from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry -from bson.py3compat import thread from bson.son import SON from bson.tz_util import utc import pymongo @@ -90,7 +90,6 @@ rs_client, rs_or_single_client, rs_or_single_client_noauth, - server_is_master_with_slave, single_client, wait_until) diff --git a/test/test_collection.py b/test/test_collection.py index c2b06ca37a..db6202154d 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -19,7 +19,6 @@ import contextlib import re import sys -import threading from codecs import utf_8_decode from collections import defaultdict @@ -32,7 +31,6 @@ from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from bson.py3compat import itervalues from bson.son import SON from pymongo import (ASCENDING, DESCENDING, GEO2D, GEOHAYSTACK, GEOSPHERE, HASHED, TEXT) @@ -376,7 +374,7 @@ def check_result(result): reindexed = db.test.reindex() if 'raw' in reindexed: # mongos - for result in itervalues(reindexed['raw']): + for result in reindexed['raw'].values(): check_result(result) else: check_result(reindexed) @@ -1349,12 +1347,6 @@ def test_write_error_unicode(self): self.assertIn('E11000 duplicate key error', str(ctx.exception)) - if sys.version_info[0] == 2: - # Test unicode(error) conversion. - self.assertIn('E11000 duplicate key error', - unicode(ctx.exception)) - - def test_wtimeout(self): # Ensure setting wtimeout doesn't disable write concern altogether. # See SERVER-12596. diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 48df16df90..c93d6a22e5 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -14,14 +14,11 @@ """Test the collection module.""" -import json import os -import re import sys sys.path[0:0] = [""] -from bson.py3compat import iteritems from pymongo import operations, WriteConcern from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor @@ -35,7 +32,7 @@ UpdateOne, UpdateMany) -from test import unittest, client_context, IntegrationTest +from test import unittest, IntegrationTest from test.utils import (camel_to_snake, camel_to_upper_camel, camel_to_snake_args, drop_collections, TestCreator) @@ -113,7 +110,7 @@ def run_operation(collection, test): # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] - arguments[arg_name] = list(iteritems(sort_dict)) + arguments[arg_name] = list(sort_dict.items()) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) diff --git a/test/test_cursor.py b/test/test_cursor.py index 1d8aae2af1..8cb98c0ef0 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -27,7 +27,6 @@ from bson import decode_all from bson.code import Code -from bson.py3compat import PY3 from bson.son import SON from pymongo import (ASCENDING, DESCENDING, @@ -40,7 +39,6 @@ InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference from test import (client_context, unittest, IntegrationTest) @@ -49,9 +47,6 @@ rs_or_single_client, WhiteListEventListener) -if PY3: - long = int - class TestCursor(IntegrationTest): def test_deepcopy_cursor_littered_with_regexes(self): @@ -167,7 +162,7 @@ def test_max_time_ms(self): coll.insert_one({"amalia": 2}) coll.find().max_time_ms(None) - coll.find().max_time_ms(long(1)) + coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) self.assertEqual(999, cursor._Cursor__max_time_ms) @@ -215,7 +210,7 @@ def test_max_await_time_ms(self): coll.insert_one({"amalia": 2}) coll.find().max_await_time_ms(None) - coll.find().max_await_time_ms(long(1)) + coll.find().max_await_time_ms(1) # When cursor is not tailable_await cursor = coll.find() @@ -414,7 +409,7 @@ def test_limit(self): self.assertRaises(TypeError, db.test.find().limit, None) self.assertRaises(TypeError, db.test.find().limit, "hello") self.assertRaises(TypeError, db.test.find().limit, 5.5) - self.assertTrue(db.test.find().limit(long(5))) + self.assertTrue(db.test.find().limit(5)) db.test.drop() db.test.insert_many([{"x": i} for i in range(100)]) @@ -560,7 +555,7 @@ def test_batch_size(self): self.assertRaises(TypeError, db.test.find().batch_size, "hello") self.assertRaises(TypeError, db.test.find().batch_size, 5.5) self.assertRaises(ValueError, db.test.find().batch_size, -1) - self.assertTrue(db.test.find().batch_size(long(5))) + self.assertTrue(db.test.find().batch_size(5)) a = db.test.find() for _ in a: break @@ -684,7 +679,7 @@ def test_skip(self): self.assertRaises(TypeError, db.test.find().skip, "hello") self.assertRaises(TypeError, db.test.find().skip, 5.5) self.assertRaises(ValueError, db.test.find().skip, -5) - self.assertTrue(db.test.find().skip(long(5))) + self.assertTrue(db.test.find().skip(5)) db.drop_collection("test") @@ -1059,7 +1054,7 @@ def test_getitem_slice_index(self): self.assertEqual(5, len(list(self.db.test.find()[20:25]))) self.assertEqual(5, len(list( - self.db.test.find()[long(20):long(25)]))) + self.db.test.find()[20:25]))) for a, b in zip(count(20), self.db.test.find()[20:25]): self.assertEqual(a, b['i']) @@ -1104,7 +1099,7 @@ def test_getitem_numeric_index(self): self.assertEqual(50, self.db.test.find()[50]['i']) self.assertEqual(50, self.db.test.find().skip(50)[0]['i']) self.assertEqual(50, self.db.test.find().skip(49)[1]['i']) - self.assertEqual(50, self.db.test.find()[long(50)]['i']) + self.assertEqual(50, self.db.test.find()[50]['i']) self.assertEqual(99, self.db.test.find()[99]['i']) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index bd9a835528..e48acedff4 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -17,6 +17,7 @@ import datetime import sys import tempfile + from collections import OrderedDict from decimal import Decimal from random import random @@ -39,7 +40,6 @@ from bson.errors import InvalidDocument from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument -from bson.py3compat import text_type from gridfs import GridIn, GridOut @@ -110,7 +110,7 @@ def transform_python(self, value): class UppercaseTextDecoder(TypeDecoder): - bson_type = text_type + bson_type = str def transform_bson(self, value): return value.upper() diff --git a/test/test_database.py b/test/test_database.py index 262fcf7df0..de0415f4b3 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -21,13 +21,11 @@ sys.path[0:0] = [""] -from bson.code import Code from bson.codec_options import CodecOptions from bson.int64 import Int64 from bson.regex import Regex from bson.dbref import DBRef from bson.objectid import ObjectId -from bson.py3compat import string_type, text_type, PY3 from bson.son import SON from pymongo import (ALL, auth, @@ -63,10 +61,6 @@ from test.test_custom_types import DECIMAL_CODECOPTS -if PY3: - long = int - - class TestDatabaseNoConnect(unittest.TestCase): """Test Database features on a client that does not connect. """ @@ -432,10 +426,10 @@ def test_profiling_info(self): # These basically clue us in to server changes. self.assertTrue(isinstance(info[0]['responseLength'], int)) self.assertTrue(isinstance(info[0]['millis'], int)) - self.assertTrue(isinstance(info[0]['client'], string_type)) - self.assertTrue(isinstance(info[0]['user'], string_type)) - self.assertTrue(isinstance(info[0]['ns'], string_type)) - self.assertTrue(isinstance(info[0]['op'], string_type)) + self.assertTrue(isinstance(info[0]['client'], str)) + self.assertTrue(isinstance(info[0]['user'], str)) + self.assertTrue(isinstance(info[0]['ns'], str)) + self.assertTrue(isinstance(info[0]['op'], str)) self.assertTrue(isinstance(info[0]["ts"], datetime.datetime)) @client_context.require_no_mongos @@ -513,7 +507,7 @@ def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, None) self.assertTrue(isinstance(auth._password_digest("mike", "password"), - text_type)) + str)) self.assertEqual(auth._password_digest("mike", "password"), u"cd7e45b3b2767dc2fa9b6b548457ed00") self.assertEqual(auth._password_digest("mike", "password"), @@ -828,7 +822,7 @@ def test_insert_find_one(self): def test_long(self): db = self.client.pymongo_test db.test.drop() - db.test.insert_one({"x": long(9223372036854775807)}) + db.test.insert_one({"x": 9223372036854775807}) retrieved = db.test.find_one()['x'] self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) diff --git a/test/test_decimal128.py b/test/test_decimal128.py index e72c1273e4..b242b7dfa5 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -14,22 +14,14 @@ """Tests for Decimal128.""" -import codecs -import glob -import json -import os.path import pickle import sys -from binascii import unhexlify -from decimal import Decimal, DecimalException +from decimal import Decimal sys.path[0:0] = [""] -from bson import BSON from bson.decimal128 import Decimal128, create_decimal128_context -from bson.json_util import dumps, loads -from bson.py3compat import b from test import client_context, unittest class TestDecimal128(unittest.TestCase): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 601c0b5156..52527464d8 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -41,7 +41,6 @@ cdecimal_patched, CMAPListener, client_context, - Barrier, get_pool, HeartbeatEventListener, server_name_to_type, @@ -263,7 +262,7 @@ class TestIgnoreStaleErrors(IntegrationTest): def test_ignore_stale_connection_errors(self): N_THREADS = 5 - barrier = Barrier(N_THREADS, timeout=30) + barrier = threading.Barrier(N_THREADS, timeout=30) client = rs_or_single_client(minPoolSize=N_THREADS) self.addCleanup(client.close) diff --git a/test/test_encryption.py b/test/test_encryption.py index 28ba331e39..90da8b3dcf 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -31,7 +31,6 @@ STANDARD, UUID_SUBTYPE) from bson.codec_options import CodecOptions -from bson.py3compat import _unicode from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON @@ -455,7 +454,7 @@ def test_with_statement(self): GCP_CREDS = { 'email': os.environ.get('FLE_GCP_EMAIL', ''), - 'privateKey': _unicode(os.environ.get('FLE_GCP_PRIVATEKEY', ''))} + 'privateKey': os.environ.get('FLE_GCP_PRIVATEKEY', '')} class TestSpec(SpecRunner): @@ -1307,7 +1306,7 @@ def test_explicit(self): 'AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==') def test_automatic(self): - expected_document_extjson = textwrap.dedent(""" + expected_document_extjson = textwrap.dedent(""" {"secret_azure": { "$binary": { "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", @@ -1333,7 +1332,7 @@ def test_explicit(self): 'ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==') def test_automatic(self): - expected_document_extjson = textwrap.dedent(""" + expected_document_extjson = textwrap.dedent(""" {"secret_gcp": { "$binary": { "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", diff --git a/test/test_errors.py b/test/test_errors.py index 968c8ebd74..dd5fc11897 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -45,10 +45,7 @@ def test_operation_failure(self): self.assertIn("full error", traceback.format_exc()) def _test_unicode_strs(self, exc): - if sys.version_info[0] == 2: - self.assertEqual("unicode \xf0\x9f\x90\x8d, full error: {" - "'errmsg': u'unicode \\U0001f40d'}", str(exc)) - elif 'PyPy' in sys.version: + if 'PyPy' in sys.version: # PyPy displays unicode in repr differently. self.assertEqual("unicode \U0001f40d, full error: {" "'errmsg': 'unicode \\U0001f40d'}", str(exc)) diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 2de173cab9..705b9f032e 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -20,10 +20,12 @@ import datetime import sys import zipfile + +from io import BytesIO + sys.path[0:0] = [""] from bson.objectid import ObjectId -from bson.py3compat import StringIO from gridfs import GridFS from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, _SEEK_CUR, @@ -234,7 +236,7 @@ def test_grid_out_cursor_options(self): cursor = GridOutCursor(self.db.fs, {}) cursor_clone = cursor.clone() - + cursor_dict = cursor.__dict__.copy() cursor_dict.pop('_Cursor__session') cursor_clone_dict = cursor_clone.__dict__.copy() @@ -301,7 +303,7 @@ def test_write_file_like(self): five = GridIn(self.db.fs, chunk_size=2) five.write(b"hello") - buffer = StringIO(b" world") + buffer = BytesIO(b" world") five.write(buffer) five.write(b" and mongodb") five.close() @@ -567,7 +569,7 @@ def test_context_manager(self): def test_prechunked_string(self): def write_me(s, chunk_size): - buf = StringIO(s) + buf = BytesIO(s) infile = GridIn(self.db.fs) while True: to_write = buf.read(chunk_size) @@ -646,7 +648,7 @@ def test_survive_cursor_not_found(self): self.assertIn("getMore", listener.started_command_names()) def test_zip(self): - zf = StringIO() + zf = BytesIO() z = zipfile.ZipFile(zf, "w") z.writestr("test.txt", b"hello world") z.close() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 0d05e0b781..1827944c5d 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -16,21 +16,23 @@ """Tests for the gridfs package. """ -import sys -sys.path[0:0] = [""] import datetime +import sys import threading import time -import gridfs + +from io import BytesIO + +sys.path[0:0] = [""] from bson.binary import Binary -from bson.py3compat import StringIO, string_type from pymongo.mongo_client import MongoClient from pymongo.errors import (ConfigurationError, NotMasterError, ServerSelectionTimeoutError) from pymongo.read_preferences import ReadPreference +import gridfs from gridfs.errors import CorruptGridFile, FileExists, NoFile from test import (client_context, unittest, @@ -156,7 +158,7 @@ def test_empty_file(self): self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], string_type)) + self.assertTrue(isinstance(raw["md5"], str)) def test_corrupt_chunk(self): files_id = self.fs.put(b'foobar') @@ -311,23 +313,38 @@ def test_get_version_with_metadata(self): time.sleep(0.01) three = self.fs.put(b"baz", filename="test", author="author2") - self.assertEqual(b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read()) - self.assertEqual(b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read()) - self.assertEqual(b"foo", self.fs.get_version(filename="test", author="author1", version=0).read()) - self.assertEqual(b"bar", self.fs.get_version(filename="test", author="author1", version=1).read()) - self.assertEqual(b"baz", self.fs.get_version(filename="test", author="author2", version=0).read()) - self.assertEqual(b"baz", self.fs.get_version(filename="test", version=-1).read()) - self.assertEqual(b"baz", self.fs.get_version(filename="test", version=2).read()) + self.assertEqual( + b"foo", + self.fs.get_version( + filename="test", author="author1", version=-2).read()) + self.assertEqual( + b"bar", self.fs.get_version( + filename="test", author="author1", version=-1).read()) + self.assertEqual( + b"foo", self.fs.get_version( + filename="test", author="author1", version=0).read()) + self.assertEqual( + b"bar", self.fs.get_version( + filename="test", author="author1", version=1).read()) + self.assertEqual( + b"baz", self.fs.get_version( + filename="test", author="author2", version=0).read()) + self.assertEqual( + b"baz", self.fs.get_version(filename="test", version=-1).read()) + self.assertEqual( + b"baz", self.fs.get_version(filename="test", version=2).read()) - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) + self.assertRaises( + NoFile, self.fs.get_version, filename="test", author="author3") + self.assertRaises( + NoFile, self.fs.get_version, filename="test", author="author1", version=2) self.fs.delete(one) self.fs.delete(two) self.fs.delete(three) def test_put_filelike(self): - oid = self.fs.put(StringIO(b"hello world"), chunk_size=1) + oid = self.fs.put(BytesIO(b"hello world"), chunk_size=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) self.assertEqual(b"hello world", self.fs.get(oid).read()) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 17b5364bb2..98478bf7bf 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -21,13 +21,13 @@ import threading import time -import gridfs +from io import BytesIO from bson.binary import Binary from bson.int64 import Int64 from bson.objectid import ObjectId -from bson.py3compat import StringIO, string_type from bson.son import SON +import gridfs from gridfs.errors import NoFile, CorruptGridFile from pymongo.errors import (ConfigurationError, NotMasterError, @@ -131,7 +131,7 @@ def test_empty_file(self): self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], string_type)) + self.assertTrue(isinstance(raw["md5"], str)) def test_corrupt_chunk(self): files_id = self.fs.upload_from_stream("test_filename", @@ -293,7 +293,7 @@ def test_get_version(self): def test_upload_from_stream(self): oid = self.fs.upload_from_stream("test_file", - StringIO(b"hello world"), + BytesIO(b"hello world"), chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) self.assertEqual(b"hello world", @@ -303,7 +303,7 @@ def test_upload_from_stream_with_id(self): oid = ObjectId() self.fs.upload_from_stream_with_id(oid, "test_file_custom_id", - StringIO(b"custom id"), + BytesIO(b"custom id"), chunk_size_bytes=1) self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) @@ -416,11 +416,11 @@ def test_abort(self): {"files_id": gin._id})) def test_download_to_stream(self): - file1 = StringIO(b"hello world") + file1 = BytesIO(b"hello world") # Test with one chunk. oid = self.fs.upload_from_stream("one_chunk", file1) self.assertEqual(1, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream(oid, file2) file1.seek(0) file2.seek(0) @@ -434,18 +434,18 @@ def test_download_to_stream(self): file1, chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream(oid, file2) file1.seek(0) file2.seek(0) self.assertEqual(file1.read(), file2.read()) def test_download_to_stream_by_name(self): - file1 = StringIO(b"hello world") + file1 = BytesIO(b"hello world") # Test with one chunk. oid = self.fs.upload_from_stream("one_chunk", file1) self.assertEqual(1, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream_by_name("one_chunk", file2) file1.seek(0) file2.seek(0) @@ -458,7 +458,7 @@ def test_download_to_stream_by_name(self): self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - file2 = StringIO() + file2 = BytesIO() self.fs.download_to_stream_by_name("many_chunks", file2) file1.seek(0) file2.seek(0) diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index bda6f52816..e9e097568c 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -13,22 +13,21 @@ # limitations under the License. """Test GridFSBucket class.""" + import copy import datetime import os -import sys import re +import sys from json import loads -import gridfs - sys.path[0:0] = [""] from bson import Binary from bson.int64 import Int64 from bson.json_util import object_hook -from bson.py3compat import bytes_from_hex +import gridfs from gridfs.errors import NoFile, CorruptGridFile from test import (unittest, IntegrationTest) @@ -210,7 +209,7 @@ def str2hex(jsn): for key, val in jsn.items(): if key in ("data", "source", "result"): if "$hex" in val: - jsn[key] = Binary(bytes_from_hex(val['$hex'])) + jsn[key] = Binary(bytes.fromhex(val['$hex'])) if isinstance(jsn[key], dict): str2hex(jsn[key]) if isinstance(jsn[key], list): diff --git a/test/test_json_util.py b/test/test_json_util.py index 7906b276f5..5b6efbc636 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -39,8 +39,6 @@ from test import unittest, IntegrationTest -PY3 = sys.version_info[0] == 3 - class TestJsonUtil(unittest.TestCase): def round_tripped(self, doc, **kwargs): @@ -336,10 +334,7 @@ def test_uuid_uuid_rep_unspecified(self): doc, json_util.loads(ext_json_str, json_options=options)) def test_binary(self): - if PY3: - bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} - else: - bin_type_dict = {"bin": Binary(b"\x00\x01\x02\x03\x04")} + bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} md5_type_dict = { "md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac', MD5_SUBTYPE)} @@ -352,10 +347,7 @@ def test_binary(self): # Binary with subtype 0 is decoded into bytes in Python 3. bin = json_util.loads( '{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin'] - if PY3: - self.assertEqual(type(bin), bytes) - else: - self.assertEqual(type(bin), Binary) + self.assertEqual(type(bin), bytes) # PYTHON-443 ensure old type formats are supported json_bin_dump = json_util.dumps(bin_type_dict) diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 1c909c9043..6d7a6425bc 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -19,7 +19,6 @@ import threading import time import uuid -import warnings sys.path[0:0] = [""] @@ -27,14 +26,11 @@ from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from bson.py3compat import string_type from bson.son import SON from pymongo import ASCENDING, DESCENDING, GEOHAYSTACK -from pymongo.database import Database from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, ConfigurationError, - CursorNotFound, DocumentTooLarge, DuplicateKeyError, InvalidDocument, @@ -42,7 +38,6 @@ OperationFailure, WriteConcernError, WTimeoutError) -from pymongo.message import _CursorAddress from pymongo.operations import IndexModel from pymongo.son_manipulator import (AutoReference, NamespaceInjector, @@ -143,14 +138,8 @@ def test_insert_find_one(self): self.assertEqual(doc["_id"], _id) self.assertTrue(isinstance(_id, ObjectId)) - doc_class = dict - # Work around http://bugs.jython.org/issue1728 - if (sys.platform.startswith('java') and - sys.version_info[:3] >= (2, 5, 2)): - doc_class = SON - db = self.client.get_database( - db.name, codec_options=CodecOptions(document_class=doc_class)) + db.name, codec_options=CodecOptions(document_class=dict)) def remove_insert_find_one(doc): db.test.remove({}) @@ -2315,7 +2304,7 @@ def test_write_concern_failure_ordered(self): failed = result['writeConcernErrors'][0] self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], string_type)) + self.assertTrue(isinstance(failed['errmsg'], str)) self.coll.delete_many({}) self.coll.create_index('a', unique=True) @@ -2413,12 +2402,12 @@ def test_write_concern_failure_unordered(self): failed = result['writeErrors'][0] self.assertEqual(2, failed['index']) self.assertEqual(11000, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], string_type)) + self.assertTrue(isinstance(failed['errmsg'], str)) self.assertEqual(1, failed['op']['a']) failed = result['writeConcernErrors'][0] self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], string_type)) + self.assertTrue(isinstance(failed['errmsg'], str)) upserts = result['upserted'] self.assertEqual(1, len(upserts)) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index ba6d2a44f1..f46ebe69b3 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -21,7 +21,6 @@ sys.path[0:0] = [""] from bson.objectid import ObjectId -from bson.py3compat import text_type from bson.son import SON from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne from pymongo.command_cursor import CommandCursor @@ -37,10 +36,8 @@ unittest) from test.utils import (EventListener, get_pool, - ignore_deprecations, rs_or_single_client, - single_client, - wait_until) + single_client) class TestCommandMonitoring(PyMongoTestCase): @@ -821,7 +818,7 @@ def test_non_bulk_writes(self): error = errors[0] self.assertEqual(0, error.get('index')) self.assertIsInstance(error.get('code'), int) - self.assertIsInstance(error.get('errmsg'), text_type) + self.assertIsInstance(error.get('errmsg'), str) def test_legacy_writes(self): with warnings.catch_warnings(): diff --git a/test/test_objectid.py b/test/test_objectid.py index df80caf397..7b26e7da87 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -23,7 +23,6 @@ from bson.errors import InvalidId from bson.objectid import ObjectId, _MAX_COUNTER_VALUE -from bson.py3compat import PY3, _unicode from bson.tz_util import (FixedOffset, utc) from test import SkipTest, unittest @@ -50,7 +49,7 @@ def test_creation(self): def test_unicode(self): a = ObjectId() - self.assertEqual(a, ObjectId(_unicode(a))) + self.assertEqual(a, ObjectId(a)) self.assertEqual(ObjectId("123456789012123456789012"), ObjectId(u"123456789012123456789012")) self.assertRaises(InvalidId, ObjectId, u"hello") @@ -139,13 +138,9 @@ def test_pickle_backwards_compatability(self): b"object\np2\nNtp3\nRp4\n" b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb.") - if PY3: - # Have to load using 'latin-1' since these were pickled in python2.x. - oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1') - oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1') - else: - oid_1_9 = pickle.loads(pickled_with_1_9) - oid_1_10 = pickle.loads(pickled_with_1_10) + # Have to load using 'latin-1' since these were pickled in python2.x. + oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1') + oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1') self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000")) self.assertEqual(oid_1_9, oid_1_10) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 2a8eb96e9f..2bb1c57f29 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -23,7 +23,6 @@ sys.path[0:0] = [""] -from bson.py3compat import MAXSIZE from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure from pymongo.message import _maybe_add_read_preference @@ -430,7 +429,7 @@ def test_create_collection(self): # the collection already exists. self._test_primary_helper( lambda: self.c.pymongo_test.create_collection( - 'some_collection%s' % random.randint(0, MAXSIZE))) + 'some_collection%s' % random.randint(0, sys.maxsize))) @client_context.require_version_max(4, 1, 0, -1) def test_group(self): diff --git a/test/test_session.py b/test/test_session.py index 2e2038501b..6a6ed12e0d 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -18,8 +18,9 @@ import os import sys +from io import BytesIO + from bson import DBRef -from bson.py3compat import StringIO from gridfs import GridFS, GridFSBucket from pymongo import ASCENDING, InsertOne, IndexModel, OFF, monitoring from pymongo.common import _MAX_END_SESSIONS @@ -465,7 +466,7 @@ def find(session=None): for f in files: f.read() - sio = StringIO() + sio = BytesIO() self._test_ops( client, diff --git a/test/test_son.py b/test/test_son.py index 921f85d45c..a8ac49060a 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -21,9 +21,8 @@ sys.path[0:0] = [""] -from bson.py3compat import b from bson.son import SON -from test import SkipTest, unittest +from test import unittest class TestSON(unittest.TestCase): @@ -107,11 +106,10 @@ def test_pickle(self): def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo # version 2.1.1 - pickled_with_2_1_1 = b( + pickled_with_2_1_1 = ( "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb." - ) + "S'_SON__keys'\np7\n(lp8\nsb.").encode('utf8') son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) diff --git a/test/test_ssl.py b/test/test_ssl.py index a5efcefddf..0edc6509ce 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -450,9 +450,6 @@ def test_validation_with_system_ca_certs(self): if sys.platform == "win32": raise SkipTest("Can't test system ca certs on Windows.") - if sys.version_info < (2, 7, 9): - raise SkipTest("Can't load system CA certificates.") - if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL): raise SkipTest( diff --git a/test/test_topology.py b/test/test_topology.py index 1b3bfe5ab3..9cd019c14a 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -18,7 +18,6 @@ sys.path[0:0] = [""] -from bson.py3compat import imap from pymongo import common from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_type import SERVER_TYPE @@ -52,7 +51,7 @@ def create_mock_topology( seeds=None, replica_set_name=None, monitor_class=DummyMonitor): - partitioned_seeds = list(imap(common.partition_node, seeds or ['a'])) + partitioned_seeds = list(map(common.partition_node, seeds or ['a'])) topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, diff --git a/test/test_transactions.py b/test/test_transactions.py index a114db6e25..859c8f3e25 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -17,9 +17,9 @@ import os import sys -sys.path[0:0] = [""] +from io import BytesIO -from bson.py3compat import StringIO +sys.path[0:0] = [""] from pymongo import client_session, WriteConcern from pymongo.client_session import TransactionOptions @@ -270,8 +270,8 @@ def gridfs_open_upload_stream(*args, **kwargs): (gfs.exists, ()), (gridfs_open_upload_stream, ('name',)), (bucket.upload_from_stream, ('name', b'data',)), - (bucket.download_to_stream, (1, StringIO(),)), - (bucket.download_to_stream_by_name, ('name', StringIO(),)), + (bucket.download_to_stream, (1, BytesIO(),)), + (bucket.download_to_stream_by_name, ('name', BytesIO(),)), (bucket.delete, (1,)), (bucket.find, ()), (bucket.open_download_stream, (1,)), diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 8d1f55a0dd..6e09f6a4dc 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -26,7 +26,6 @@ sys.path[0:0] = [""] from bson.binary import JAVA_LEGACY -from bson.py3compat import string_type, _unicode from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI from pymongo.uri_parser import (parse_userinfo, @@ -169,7 +168,7 @@ def test_split_options(self): split_options('connectTimeoutMS=0.1')) self.assertTrue(split_options('connectTimeoutMS=300')) self.assertTrue(isinstance(split_options('w=5')['w'], int)) - self.assertTrue(isinstance(split_options('w=5.5')['w'], string_type)) + self.assertTrue(isinstance(split_options('w=5.5')['w'], str)) self.assertTrue(split_options('w=foo')) self.assertTrue(split_options('w=majority')) self.assertTrue(split_options('wtimeoutms=500')) diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py index d5cbdb7bbc..65738d5c04 100644 --- a/test/unicode/test_utf8.py +++ b/test/unicode/test_utf8.py @@ -4,7 +4,6 @@ from bson import encode from bson.errors import InvalidStringData -from bson.py3compat import PY3 from test import unittest class TestUTF8(unittest.TestCase): @@ -26,51 +25,5 @@ def _assert_same_utf8_validation(self, data): self.assertEqual(py_is_legal, bson_is_legal, data) - @unittest.skipIf(PY3, "python3 has strong separation between bytes/unicode") - def test_legal_utf8_full_coverage(self): - # This test takes 400 seconds. Which is too long to run each time. - # However it is the only one which covers all possible bit combinations - # in the 244 space. - b1 = chr(0xf4) - - for b2 in map(chr, range(255)): - m2 = b1 + b2 - self._assert_same_utf8_validation(m2) - - for b3 in map(chr, range(255)): - m3 = m2 + b3 - self._assert_same_utf8_validation(m3) - - for b4 in map(chr, range(255)): - m4 = m3 + b4 - self._assert_same_utf8_validation(m4) - - # In python3: - # - 'bytes' are not checked with isLegalutf - # - 'unicode' We cannot create unicode objects with invalid utf8, since it - # would result in non valid code-points. - @unittest.skipIf(PY3, "python3 has strong separation between bytes/unicode") - def test_legal_utf8_few_samples(self): - good_samples = [ - '\xf4\x80\x80\x80', - '\xf4\x8a\x80\x80', - '\xf4\x8e\x80\x80', - '\xf4\x81\x80\x80', - ] - - for data in good_samples: - self._assert_same_utf8_validation(data) - - bad_samples = [ - '\xf4\x00\x80\x80', - '\xf4\x3a\x80\x80', - '\xf4\x7f\x80\x80', - '\xf4\x90\x80\x80', - '\xf4\xff\x80\x80', - ] - - for data in bad_samples: - self._assert_same_utf8_validation(data) - if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 9c3fdd7328..fc23a193b1 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -25,10 +25,11 @@ import sys import types +from collections import abc + from bson import json_util, Code, Decimal128, DBRef, SON, Int64, MaxKey, MinKey from bson.binary import Binary from bson.objectid import ObjectId -from bson.py3compat import abc, integer_types, iteritems, text_type, PY3 from bson.regex import Regex, RE_TYPE from gridfs import GridFSBucket @@ -182,7 +183,7 @@ def __getitem__(self, item): item,)) def __setitem__(self, key, value): - if not isinstance(key, text_type): + if not isinstance(key, str): self._test_class.fail( 'Expected entity name of type str, got %s' % (type(key))) @@ -197,7 +198,7 @@ def _create_entity(self, entity_spec): "Entity spec %s did not contain exactly one top-level key" % ( entity_spec,)) - entity_type, spec = next(iteritems(entity_spec)) + entity_type, spec = next(iter(entity_spec.items())) if entity_type == 'client': kwargs = {} observe_events = spec.get('observeEvents', []) @@ -298,21 +299,16 @@ def get_lsid_for_session(self, session_name): return self._session_lsids[session_name] -if not PY3: - binary_types = (Binary,) - long_types = (Int64, long) - unicode_type = unicode -else: - binary_types = (Binary, bytes) - long_types = (Int64,) - unicode_type = str +binary_types = (Binary, bytes) +long_types = (Int64,) +unicode_type = str BSON_TYPE_ALIAS_MAP = { # https://docs.mongodb.com/manual/reference/operator/query/type/ # https://pymongo.readthedocs.io/en/stable/api/bson/index.html 'double': (float,), - 'string': (text_type,), + 'string': (str,), 'object': (abc.Mapping,), 'array': (abc.MutableSequence,), 'binData': binary_types, @@ -421,11 +417,11 @@ def _evaluate_if_special_operation(self, expectation, actual, else: nested = expectation[key_to_compare] if isinstance(nested, abc.Mapping) and len(nested) == 1: - opname, spec = next(iteritems(nested)) + opname, spec = next(iter(nested.items())) if opname.startswith('$$'): is_special_op = True elif len(expectation) == 1: - opname, spec = next(iteritems(expectation)) + opname, spec = next(iter(expectation.items())) if opname.startswith('$$'): is_special_op = True key_to_compare = None @@ -445,7 +441,7 @@ def _match_document(self, expectation, actual, is_root): return self._test_class.assertIsInstance(actual, abc.Mapping) - for key, value in iteritems(expectation): + for key, value in expectation.items(): if self._evaluate_if_special_operation(expectation, actual, key): continue @@ -473,7 +469,7 @@ def match_result(self, expectation, actual, return # account for flexible numerics in element-wise comparison - if (isinstance(expectation, integer_types) or + if (isinstance(expectation, int) or isinstance(expectation, float)): self._test_class.assertEqual(expectation, actual) else: @@ -481,7 +477,7 @@ def match_result(self, expectation, actual, self._test_class.assertEqual(expectation, actual) def match_event(self, expectation, actual): - event_type, spec = next(iteritems(expectation)) + event_type, spec = next(iter(expectation.items())) # every event type has the commandName field command_name = spec.get('commandName') diff --git a/test/utils.py b/test/utils.py index ff301540c1..be5be176b2 100644 --- a/test/utils.py +++ b/test/utils.py @@ -30,18 +30,16 @@ from collections import defaultdict from functools import partial -from bson import json_util, py3compat +from bson import json_util from bson.objectid import ObjectId -from bson.py3compat import iteritems, string_type from bson.son import SON from pymongo import (MongoClient, monitoring, operations, read_preferences) from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener -from pymongo.pool import (_CancellationContext, - PoolOptions) +from pymongo.monitoring import _SENSITIVE_COMMANDS +from pymongo.pool import _CancellationContext from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, @@ -53,12 +51,6 @@ db_user, db_pwd) -if sys.version_info[0] < 3: - # Python 2.7, use our backport. - from test.barrier import Barrier -else: - from threading import Barrier - IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) @@ -289,7 +281,7 @@ def __init__(self, data): def convert(v): if isinstance(v, collections.Mapping): return ScenarioDict(v) - if isinstance(v, (py3compat.string_type, bytes)): + if isinstance(v, (str, bytes)): return v if isinstance(v, collections.Sequence): return [convert(item) for item in v] @@ -974,7 +966,8 @@ def assertion_context(msg): yield except AssertionError as exc: msg = '%s (%s)' % (exc, msg) - py3compat.reraise(type(exc), msg, sys.exc_info()[2]) + exc_type, exc_val, exc_tb = sys.exc_info() + raise exc_type(exc_val).with_traceback(exc_tb) def parse_spec_options(opts): @@ -998,8 +991,8 @@ def parse_spec_options(opts): if 'hint' in opts: hint = opts.pop('hint') - if not isinstance(hint, string_type): - hint = list(iteritems(hint)) + if not isinstance(hint, str): + hint = list(hint.items()) opts['hint'] = hint # Properly format 'hint' arguments for the Bulk API tests. @@ -1011,17 +1004,17 @@ def parse_spec_options(opts): args = req.pop('arguments', {}) if 'hint' in args: hint = args.pop('hint') - if not isinstance(hint, string_type): - hint = list(iteritems(hint)) + if not isinstance(hint, str): + hint = list(hint.items()) args['hint'] = hint req['arguments'] = args else: # Unified test format - bulk_model, spec = next(iteritems(req)) + bulk_model, spec = next(iter(req.items())) if 'hint' in spec: hint = spec.pop('hint') - if not isinstance(hint, string_type): - hint = list(iteritems(hint)) + if not isinstance(hint, str): + hint = list(hint.items()) spec['hint'] = hint opts['requests'] = reqs @@ -1035,7 +1028,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, # PyMongo accepts sort as list of tuples. if arg_name == "sort": sort_dict = arguments[arg_name] - arguments[arg_name] = list(iteritems(sort_dict)) + arguments[arg_name] = list(sort_dict.items()) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) @@ -1057,7 +1050,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, bulk_arguments = camel_to_snake_args(request["arguments"]) else: # Unified test format - bulk_model, spec = next(iteritems(request)) + bulk_model, spec = next(iter(request.items())) bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) bulk_arguments = camel_to_snake_args(spec) requests.append(bulk_class(**dict(bulk_arguments))) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 2e2318591e..17175884da 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -14,23 +14,20 @@ """Utilities for testing driver specs.""" -import copy import functools import threading +from collections import abc from bson import decode, encode from bson.binary import Binary, STANDARD from bson.codec_options import CodecOptions from bson.int64 import Int64 -from bson.py3compat import iteritems, abc, string_type, text_type from bson.son import SON from gridfs import GridFSBucket -from pymongo import (client_session, - helpers, - operations) +from pymongo import client_session from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor from pymongo.errors import (BulkWriteError, @@ -43,20 +40,16 @@ from test import (client_context, client_knobs, - IntegrationTest, - unittest) + IntegrationTest) from test.utils import (camel_to_snake, camel_to_snake_args, - camel_to_upper_camel, CompareType, CMAPListener, OvertCommandListener, parse_spec_options, - parse_read_preference, prepare_spec_arguments, rs_client, - ServerAndTopologyEventListener, - HeartbeatEventListener) + ServerAndTopologyEventListener) class SpecRunnerThread(threading.Thread): @@ -594,7 +587,7 @@ def expect_any_error(op): def expect_error_message(expected_result): if isinstance(expected_result, dict): - return isinstance(expected_result['errorContains'], text_type) + return isinstance(expected_result['errorContains'], str) return False From 02abb6d584d0423e788fd664374586909616fff8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jan 2021 15:04:33 -0800 Subject: [PATCH 0283/2111] PYTHON-2516 Fix coverage task and C extension check (#553) --- .evergreen/run-tests.sh | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 19188726ed..a431769439 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -161,15 +161,13 @@ $PYTHON -c 'import sys; print(sys.version)' # files in the xunit-results/ directory. # Run the tests with coverage if requested and coverage is installed. -# Only cover CPython. Jython and PyPy report suspiciously low coverage. -COVERAGE_OR_PYTHON="$PYTHON" +# Only cover CPython. PyPy reports suspiciously low coverage. +PYTHON_IMPL=$($PYTHON -c "import platform; print(platform.python_implementation())") COVERAGE_ARGS="" if [ -n "$COVERAGE" -a $PYTHON_IMPL = "CPython" ]; then - COVERAGE_BIN="$(dirname "$PYTHON")/coverage" - if $COVERAGE_BIN --version; then + if $PYTHON -m coverage --version; then echo "INFO: coverage is installed, running tests with coverage..." - COVERAGE_OR_PYTHON="$COVERAGE_BIN" - COVERAGE_ARGS="run --branch" + COVERAGE_ARGS="-m coverage run --branch" else echo "INFO: coverage is not installed, running tests without coverage..." fi @@ -188,7 +186,7 @@ if [ -z "$GREEN_FRAMEWORK" ]; then # causing this script to exit. $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" fi - $COVERAGE_OR_PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT + $PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT else # --no_ext has to come before "test" so there is no way to toggle extensions here. $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT From e17299ab2ec0c7d5a244e8fc16b544285a52ee48 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 20 Jan 2021 13:12:47 -0800 Subject: [PATCH 0284/2111] PYTHON-2133 Remove u prefixes from code --- pymongo/bulk.py | 8 +----- pymongo/collection.py | 7 +++-- pymongo/helpers.py | 3 +-- pymongo/message.py | 10 +++---- pymongo/saslprep.py | 6 ++--- test/qcheck.py | 4 +-- test/test_binary.py | 2 +- test/test_bson.py | 42 ++++++++++++++--------------- test/test_client.py | 12 ++++----- test/test_code.py | 3 --- test/test_collection.py | 12 ++++----- test/test_database.py | 60 ++++++++++++++++++++--------------------- test/test_dbref.py | 19 ++++++------- test/test_grid_file.py | 8 +++--- test/test_gridfs.py | 8 +++--- test/test_legacy_api.py | 14 +++++----- test/test_objectid.py | 5 +--- test/test_saslprep.py | 17 ++++++------ 18 files changed, 109 insertions(+), 131 deletions(-) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 0189c2eac2..573a4b7ee2 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -54,12 +54,6 @@ _COMMANDS = ('insert', 'update', 'delete') -# These string literals are used when we create fake server return -# documents client side. We use unicode literals in python 2.x to -# match the actual return values from the server. -_UOP = u"op" - - class _Run(object): """Represents a batch of write operations. """ @@ -123,7 +117,7 @@ def _merge_command(run, full_result, offset, result): idx = doc["index"] + offset replacement["index"] = run.index(idx) # Add the failed operation to the error document. - replacement[_UOP] = run.ops[idx] + replacement["op"] = run.ops[idx] full_result["writeErrors"].append(replacement) wc_error = result.get("writeConcernError") diff --git a/pymongo/collection.py b/pymongo/collection.py index 016de4fad2..84acbc0d45 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -52,7 +52,6 @@ UpdateResult) from pymongo.write_concern import WriteConcern -_UJOIN = u"%s.%s" _FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} _HAYSTACK_MSG = ( "geoHaystack indexes are deprecated as of MongoDB 4.4." @@ -179,7 +178,7 @@ def __init__(self, database, name, create=False, codec_options=None, self.__database = database self.__name = name - self.__full_name = _UJOIN % (self.__database.name, self.__name) + self.__full_name = "%s.%s" % (self.__database.name, self.__name) if create or kwargs or collation: self.__create(kwargs, collation, session) @@ -272,7 +271,7 @@ def __getattr__(self, name): - `name`: the name of the collection to get """ if name.startswith('_'): - full_name = _UJOIN % (self.__name, name) + full_name = "%s.%s" % (self.__name, name) raise AttributeError( "Collection has no attribute %r. To access the %s" " collection, use database['%s']." % ( @@ -281,7 +280,7 @@ def __getattr__(self, name): def __getitem__(self, name): return Collection(self.__database, - _UJOIN % (self.__name, name), + "%s.%s" % (self.__name, name), False, self.codec_options, self.read_preference, diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 899ab28a04..4d306c6bad 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -53,12 +53,11 @@ 9001, # SocketException 262, # ExceededTimeLimit ]) -_UUNDER = u"_" def _gen_index_name(keys): """Generate an index name from the set of fields it is over.""" - return _UUNDER.join(["%s_%s" % item for item in keys]) + return "_".join(["%s_%s" % item for item in keys]) def _index_list(key_or_list, direction=None): diff --git a/pymongo/message.py b/pymongo/message.py index 07bcbd6def..eb2a5fb7e7 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -81,8 +81,6 @@ 'delete': 'deletes' } -_UJOIN = u"%s.%s" - _UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions( unicode_decode_error_handler='replace') @@ -263,7 +261,7 @@ def __init__(self, flags, db, coll, ntoskip, spec, fields, self._as_command = None def namespace(self): - return _UJOIN % (self.db, self.coll) + return "%s.%s" % (self.db, self.coll) def use_command(self, sock_info, exhaust): use_find_cmd = False @@ -346,7 +344,7 @@ def get_message(self, set_slave_ok, sock_info, use_cmd=False): set_slave_ok, False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size - ns = _UJOIN % (self.db, "$cmd") + ns = "%s.%s" % (self.db, "$cmd") ntoreturn = -1 # All DB commands return 1 document else: # OP_QUERY treats ntoreturn of -1 and 1 the same, return @@ -393,7 +391,7 @@ def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, self._as_command = None def namespace(self): - return _UJOIN % (self.db, self.coll) + return "%s.%s" % (self.db, self.coll) def use_command(self, sock_info, exhaust): sock_info.validate_session(self.client, self.session) @@ -435,7 +433,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): False, False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size - ns = _UJOIN % (self.db, "$cmd") + ns = "%s.%s" % (self.db, "$cmd") return query(0, ns, 0, -1, spec, None, self.codec_options, ctx=ctx) return get_more(ns, self.ntoreturn, self.cursor_id, ctx) diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 02a407d2b6..3a4284feb8 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -24,7 +24,7 @@ def saslprep(data): if isinstance(data, str): raise TypeError( "The stringprep module is not available. Usernames and " - "passwords must be ASCII strings.") + "passwords must be instances of bytes.") return data else: HAVE_STRINGPREP = True @@ -74,8 +74,8 @@ def saslprep(data, prohibit_unassigned_code_points=True): # commonly mapped to nothing characters to, well, nothing. in_table_c12 = stringprep.in_table_c12 in_table_b1 = stringprep.in_table_b1 - data = u"".join( - [u"\u0020" if in_table_c12(elt) else elt + data = "".join( + ["\u0020" if in_table_c12(elt) else elt for elt in data if not in_table_b1(elt)]) # RFC3454 section 2, step 2 - Normalize diff --git a/test/qcheck.py b/test/qcheck.py index 0135497c09..57e0940b72 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -83,7 +83,7 @@ def gen_unichar(): def gen_unicode(gen_length): - return lambda: u"".join([x for x in + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) @@ -116,7 +116,7 @@ def gen_regexp(gen_length): # TODO our patterns only consist of one letter. # this is because of a bug in CPython's regex equality testing, # which I haven't quite tracked down, so I'm just ignoring it... - pattern = lambda: u"".join(gen_list(choose_lifted(u"a"), gen_length)()) + pattern = lambda: "".join(gen_list(choose_lifted("a"), gen_length)()) def gen_flags(): flags = 0 diff --git a/test/test_binary.py b/test/test_binary.py index 34e80bfa40..88bf81fcd9 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -100,7 +100,7 @@ def test_exceptions(self): if platform.python_implementation() != "Jython": # Jython's memoryview accepts unicode strings... # https://bugs.jython.org/issue2784 - self.assertRaises(TypeError, Binary, u"hello") + self.assertRaises(TypeError, Binary, "hello") def test_subtype(self): one = Binary(b"hello") diff --git a/test/test_bson.py b/test/test_bson.py index 6637ca5dac..35a94148c6 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -132,17 +132,17 @@ def helper(doc): self.assertEqual(doc, decoder(encoder(doc))) helper({}) - helper({"test": u"hello"}) + helper({"test": "hello"}) self.assertTrue(isinstance(decoder(encoder( {"hello": "world"}))["hello"], str)) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) - helper({u"hello": 0.0013109}) + helper({"hello": 0.0013109}) helper({"something": True}) helper({"false": False}) - helper({"an array": [1, True, 3.8, u"world"]}) - helper({"an object": doc_class({"test": u"something"})}) + helper({"an array": [1, True, 3.8, "world"]}) + helper({"an object": doc_class({"test": "something"})}) helper({"a binary": Binary(b"test", 100)}) helper({"a binary": Binary(b"test", 128)}) helper({"a binary": Binary(b"test", 254)}) @@ -191,7 +191,7 @@ def test_encoding_defaultdict(self): def test_basic_validation(self): self.assertRaises(TypeError, is_valid, 100) - self.assertRaises(TypeError, is_valid, u"test") + self.assertRaises(TypeError, is_valid, "test") self.assertRaises(TypeError, is_valid, 10.4) self.assertInvalid(b"test") @@ -277,22 +277,22 @@ def test_random_data_is_not_bson(self): qcheck.gen_string(qcheck.gen_range(0, 40))) def test_basic_decode(self): - self.assertEqual({"test": u"hello world"}, + self.assertEqual({"test": "hello world"}, decode(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" b"\x72\x6C\x64\x00\x00")) - self.assertEqual([{"test": u"hello world"}, {}], + self.assertEqual([{"test": "hello world"}, {}], decode_all(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" b"\x05\x00\x00\x00\x00")) - self.assertEqual([{"test": u"hello world"}, {}], + self.assertEqual([{"test": "hello world"}, {}], list(decode_iter( b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" b"\x05\x00\x00\x00\x00"))) - self.assertEqual([{"test": u"hello world"}, {}], + self.assertEqual([{"test": "hello world"}, {}], list(decode_file_iter(BytesIO( b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" @@ -380,11 +380,11 @@ def test_basic_encode(self): self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00")) self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00") - self.assertEqual(encode({"test": u"hello world"}), + self.assertEqual(encode({"test": "hello world"}), b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" b"\x64\x00\x00") - self.assertEqual(encode({u"mike": 100}), + self.assertEqual(encode({"mike": 100}), b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" b"\x00\x00\x00") self.assertEqual(encode({"hello": 1.5}), @@ -433,7 +433,7 @@ def test_basic_encode(self): b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" b"\x00\x00return function(){ return x; }\x00\t\x00" b"\x00\x00\x08x\x00\x00\x00\x00") - unicode_empty_scope = Code(u"function(){ return 'héllo';}", {}) + unicode_empty_scope = Code("function(){ return 'héllo';}", {}) self.assertEqual(encode({'$field': unicode_empty_scope}), b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" @@ -660,10 +660,10 @@ def test_non_string_keys(self): self.assertRaises(InvalidDocument, encode, {8.9: "test"}) def test_utf8(self): - w = {u"aéあ": u"aéあ"} + w = {"aéあ": "aéあ"} self.assertEqual(w, decode(encode(w))) - # b'a\xe9' == u"aé".encode("iso-8859-1") + # b'a\xe9' == "aé".encode("iso-8859-1") iso8859_bytes = b'a\xe9' y = {"hello": iso8859_bytes} # Stored as BSON binary subtype 0. @@ -678,16 +678,16 @@ def test_null_character(self): # This test doesn't make much sense in Python2 # since {'a': '\x00'} == {'a': u'\x00'}. # Decoding here actually returns {'a': '\x00'} - doc = {"a": u"\x00"} + doc = {"a": "\x00"} self.assertEqual(doc, decode(encode(doc))) self.assertRaises(InvalidDocument, encode, {b"\x00": "a"}) - self.assertRaises(InvalidDocument, encode, {u"\x00": "a"}) + self.assertRaises(InvalidDocument, encode, {"\x00": "a"}) self.assertRaises(InvalidDocument, encode, {"a": re.compile(b"ab\x00c")}) self.assertRaises(InvalidDocument, encode, - {"a": re.compile(u"ab\x00c")}) + {"a": re.compile("ab\x00c")}) def test_move_id(self): self.assertEqual(b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" @@ -996,11 +996,11 @@ def test_unicode_decode_error_handler(self): dec = decode(invalid_key, CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: u"foobar"}) + self.assertEqual(dec, {replaced_key: "foobar"}) dec = decode(invalid_key, CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: u"foobar"}) + self.assertEqual(dec, {ignored_key: "foobar"}) self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions( unicode_decode_error_handler="strict")) @@ -1014,11 +1014,11 @@ def test_unicode_decode_error_handler(self): dec = decode(invalid_val, CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {u"keystr": replaced_val}) + self.assertEqual(dec, {"keystr": replaced_val}) dec = decode(invalid_val, CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {u"keystr": ignored_val}) + self.assertEqual(dec, {"keystr": ignored_val}) self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions( unicode_decode_error_handler="strict")) diff --git a/test/test_client.py b/test/test_client.py index add8a789c7..96e17e3e6d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -712,8 +712,8 @@ def test_list_databases(self): self.assertEqual(["name"], list(doc)) def test_list_database_names(self): - self.client.pymongo_test.test.insert_one({"dummy": u"object"}) - self.client.pymongo_test_mike.test.insert_one({"dummy": u"object"}) + self.client.pymongo_test.test.insert_one({"dummy": "object"}) + self.client.pymongo_test_mike.test.insert_one({"dummy": "object"}) cmd_docs = self.client.admin.command("listDatabases")["databases"] cmd_names = [doc["name"] for doc in cmd_docs] @@ -726,8 +726,8 @@ def test_drop_database(self): self.assertRaises(TypeError, self.client.drop_database, 5) self.assertRaises(TypeError, self.client.drop_database, None) - self.client.pymongo_test.test.insert_one({"dummy": u"object"}) - self.client.pymongo_test2.test.insert_one({"dummy": u"object"}) + self.client.pymongo_test.test.insert_one({"dummy": "object"}) + self.client.pymongo_test2.test.insert_one({"dummy": "object"}) dbs = self.client.list_database_names() self.assertIn("pymongo_test", dbs) self.assertIn("pymongo_test2", dbs) @@ -1134,8 +1134,8 @@ def test_ipv6(self): uri += '/?replicaSet=' + client_context.replica_set_name client = rs_or_single_client_noauth(uri) - client.pymongo_test.test.insert_one({"dummy": u"object"}) - client.pymongo_test_bernie.test.insert_one({"dummy": u"object"}) + client.pymongo_test.test.insert_one({"dummy": "object"}) + client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) diff --git a/test/test_code.py b/test/test_code.py index a2e257adf8..8d564707f0 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -28,11 +28,8 @@ def test_types(self): self.assertRaises(TypeError, Code, 5) self.assertRaises(TypeError, Code, None) self.assertRaises(TypeError, Code, "aoeu", 5) - self.assertRaises(TypeError, Code, u"aoeu", 5) self.assertTrue(Code("aoeu")) - self.assertTrue(Code(u"aoeu")) self.assertTrue(Code("aoeu", {})) - self.assertTrue(Code(u"aoeu", {})) def test_read_only(self): c = Code("blah") diff --git a/test/test_collection.py b/test/test_collection.py index db6202154d..eafc38b067 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1197,7 +1197,7 @@ def test_id_can_be_anything(self): self.assertEqual(obj["_id"], numeric) for x in db.test.find(): - self.assertEqual(x["hello"], u"world") + self.assertEqual(x["hello"], "world") self.assertTrue("_id" in x) def test_invalid_key_names(self): @@ -2085,11 +2085,11 @@ def test_map_reduce(self): def test_messages_with_unicode_collection_names(self): db = self.db - db[u"Employés"].insert_one({"x": 1}) - db[u"Employés"].replace_one({"x": 1}, {"x": 2}) - db[u"Employés"].delete_many({}) - db[u"Employés"].find_one() - list(db[u"Employés"].find()) + db["Employés"].insert_one({"x": 1}) + db["Employés"].replace_one({"x": 1}, {"x": 2}) + db["Employés"].delete_many({}) + db["Employés"].find_one() + list(db["Employés"].find()) def test_drop_indexes_non_existent(self): self.db.drop_collection("test") diff --git a/test/test_database.py b/test/test_database.py index de0415f4b3..af0fca8603 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -75,7 +75,7 @@ def test_name(self): self.assertRaises(InvalidName, Database, self.client, 'my"db') self.assertRaises(InvalidName, Database, self.client, "my\x00db") self.assertRaises(InvalidName, Database, - self.client, u"my\u0000db") + self.client, "my\u0000db") self.assertEqual("name", Database(self.client, "name").name) def test_get_collection(self): @@ -133,7 +133,7 @@ def test_get_coll(self): def test_repr(self): self.assertEqual(repr(Database(self.client, "pymongo_test")), "Database(%r, %s)" % (self.client, - repr(u"pymongo_test"))) + repr("pymongo_test"))) def test_create_collection(self): db = Database(self.client, "pymongo_test") @@ -148,19 +148,19 @@ def test_create_collection(self): self.assertRaises(InvalidName, db.create_collection, "coll..ection") test = db.create_collection("test") - self.assertTrue(u"test" in db.list_collection_names()) - test.insert_one({"hello": u"world"}) + self.assertTrue("test" in db.list_collection_names()) + test.insert_one({"hello": "world"}) self.assertEqual(db.test.find_one()["hello"], "world") db.drop_collection("test.foo") db.create_collection("test.foo") - self.assertTrue(u"test.foo" in db.list_collection_names()) + self.assertTrue("test.foo" in db.list_collection_names()) self.assertRaises(CollectionInvalid, db.create_collection, "test.foo") def test_list_collection_names(self): db = Database(self.client, "pymongo_test") - db.test.insert_one({"dummy": u"object"}) - db.test.mike.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) + db.test.mike.insert_one({"dummy": "object"}) colls = db.list_collection_names() self.assertTrue("test" in colls) @@ -220,8 +220,8 @@ def test_list_collection_names_filter(self): def test_list_collections(self): self.client.drop_database("pymongo_test") db = Database(self.client, "pymongo_test") - db.test.insert_one({"dummy": u"object"}) - db.test.mike.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) + db.test.mike.insert_one({"dummy": "object"}) results = db.list_collections() colls = [result["name"] for result in results] @@ -308,22 +308,22 @@ def test_drop_collection(self): self.assertRaises(TypeError, db.drop_collection, 5) self.assertRaises(TypeError, db.drop_collection, None) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) db.drop_collection("test") self.assertFalse("test" in db.list_collection_names()) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) - db.drop_collection(u"test") + db.drop_collection("test") self.assertFalse("test" in db.list_collection_names()) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) db.drop_collection(db.test) self.assertFalse("test" in db.list_collection_names()) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertTrue("test" in db.list_collection_names()) db.test.drop() self.assertFalse("test" in db.list_collection_names()) @@ -343,7 +343,7 @@ def test_validate_collection(self): self.assertRaises(TypeError, db.validate_collection, 5) self.assertRaises(TypeError, db.validate_collection, None) - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) self.assertRaises(OperationFailure, db.validate_collection, "test.doesnotexist") @@ -360,7 +360,7 @@ def test_validate_collection(self): @client_context.require_version_min(4, 3, 3) def test_validate_collection_background(self): db = self.client.pymongo_test - db.test.insert_one({"dummy": u"object"}) + db.test.insert_one({"dummy": "object"}) coll = db.test self.assertTrue(db.validate_collection(coll, background=False)) # The inMemory storage engine does not support background=True. @@ -509,11 +509,9 @@ def test_password_digest(self): self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) self.assertEqual(auth._password_digest("mike", "password"), - u"cd7e45b3b2767dc2fa9b6b548457ed00") - self.assertEqual(auth._password_digest("mike", "password"), - auth._password_digest(u"mike", u"password")) - self.assertEqual(auth._password_digest("Gustave", u"Dor\xe9"), - u"81e0e2364499209f466e75926a162d73") + "cd7e45b3b2767dc2fa9b6b548457ed00") + self.assertEqual(auth._password_digest("Gustave", "Dor\xe9"), + "81e0e2364499209f466e75926a162d73") @client_context.require_auth def test_authenticate_add_remove_user(self): @@ -559,7 +557,7 @@ def check_auth(username, password): if not client_context.version.at_least(3, 7, 2) or HAVE_STRINGPREP: # Unicode name and password. - check_auth(u"mike", u"password") + check_auth("mike", "password") auth_db.remove_user("mike") self.assertRaises( @@ -567,15 +565,15 @@ def check_auth(username, password): # Add / authenticate / change password self.assertRaises( - OperationFailure, check_auth, "Gustave", u"Dor\xe9") - auth_db.add_user("Gustave", u"Dor\xe9", roles=["read"]) - check_auth("Gustave", u"Dor\xe9") + OperationFailure, check_auth, "Gustave", "Dor\xe9") + auth_db.add_user("Gustave", "Dor\xe9", roles=["read"]) + check_auth("Gustave", "Dor\xe9") # Change password. auth_db.add_user("Gustave", "password", roles=["read"]) self.assertRaises( - OperationFailure, check_auth, "Gustave", u"Dor\xe9") - check_auth("Gustave", u"password") + OperationFailure, check_auth, "Gustave", "Dor\xe9") + check_auth("Gustave", "password") @client_context.require_auth @ignore_deprecations @@ -796,18 +794,18 @@ def test_insert_find_one(self): db = self.client.pymongo_test db.test.drop() - a_doc = SON({"hello": u"world"}) + a_doc = SON({"hello": "world"}) a_key = db.test.insert_one(a_doc).inserted_id self.assertTrue(isinstance(a_doc["_id"], ObjectId)) self.assertEqual(a_doc["_id"], a_key) self.assertEqual(a_doc, db.test.find_one({"_id": a_doc["_id"]})) self.assertEqual(a_doc, db.test.find_one(a_key)) self.assertEqual(None, db.test.find_one(ObjectId())) - self.assertEqual(a_doc, db.test.find_one({"hello": u"world"})) - self.assertEqual(None, db.test.find_one({"hello": u"test"})) + self.assertEqual(a_doc, db.test.find_one({"hello": "world"})) + self.assertEqual(None, db.test.find_one({"hello": "test"})) b = db.test.find_one() - b["hello"] = u"mike" + b["hello"] = "mike" db.test.replace_one({"_id": b["_id"]}, b) self.assertNotEqual(a_doc, db.test.find_one(a_key)) diff --git a/test/test_dbref.py b/test/test_dbref.py index 4996a2f00e..51ff52aaf5 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -36,9 +36,8 @@ def test_creation(self): self.assertRaises(TypeError, DBRef, None, a) self.assertRaises(TypeError, DBRef, "coll", a, 5) self.assertTrue(DBRef("coll", a)) - self.assertTrue(DBRef(u"coll", a)) - self.assertTrue(DBRef(u"coll", 5)) - self.assertTrue(DBRef(u"coll", 5, "database")) + self.assertTrue(DBRef("coll", 5)) + self.assertTrue(DBRef("coll", 5, "database")) def test_read_only(self): a = DBRef("coll", ObjectId()) @@ -59,7 +58,7 @@ def test_repr(self): self.assertEqual(repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), "DBRef('coll', ObjectId('1234567890abcdef12345678'))") - self.assertEqual(repr(DBRef(u"coll", + self.assertEqual(repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), "DBRef(%s, ObjectId('1234567890abcdef12345678'))" % (repr(u'coll'),) @@ -75,23 +74,21 @@ def test_equality(self): obj_id = ObjectId("1234567890abcdef12345678") self.assertEqual(DBRef('foo', 5), DBRef('foo', 5)) - self.assertEqual(DBRef("coll", obj_id), DBRef(u"coll", obj_id)) + self.assertEqual(DBRef("coll", obj_id), DBRef("coll", obj_id)) self.assertNotEqual(DBRef("coll", obj_id), - DBRef(u"coll", obj_id, "foo")) + DBRef("coll", obj_id, "foo")) self.assertNotEqual(DBRef("coll", obj_id), DBRef("col", obj_id)) self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", ObjectId(b"123456789011"))) self.assertNotEqual(DBRef("coll", obj_id), 4) - self.assertEqual(DBRef("coll", obj_id, "foo"), - DBRef(u"coll", obj_id, "foo")) self.assertNotEqual(DBRef("coll", obj_id, "foo"), - DBRef(u"coll", obj_id, "bar")) + DBRef("coll", obj_id, "bar")) # Explicitly test inequality self.assertFalse(DBRef('foo', 5) != DBRef('foo', 5)) - self.assertFalse(DBRef("coll", obj_id) != DBRef(u"coll", obj_id)) + self.assertFalse(DBRef("coll", obj_id) != DBRef("coll", obj_id)) self.assertFalse(DBRef("coll", obj_id, "foo") != - DBRef(u"coll", obj_id, "foo")) + DBRef("coll", obj_id, "foo")) def test_kwargs(self): self.assertEqual(DBRef("coll", 5, foo="bar"), diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 705b9f032e..d95e6a2429 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -506,21 +506,21 @@ def test_readchunk(self): def test_write_unicode(self): f = GridIn(self.db.fs) - self.assertRaises(TypeError, f.write, u"foo") + self.assertRaises(TypeError, f.write, "foo") f = GridIn(self.db.fs, encoding="utf-8") - f.write(u"foo") + f.write("foo") f.close() g = GridOut(self.db.fs, f._id) self.assertEqual(b"foo", g.read()) f = GridIn(self.db.fs, encoding="iso-8859-1") - f.write(u"aé") + f.write("aé") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual(u"aé".encode("iso-8859-1"), g.read()) + self.assertEqual("aé".encode("iso-8859-1"), g.read()) def test_set_after_close(self): f = GridIn(self.db.fs, _id="foo", bar="baz") diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 1827944c5d..e9c5301e28 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -385,14 +385,14 @@ def test_exists(self): self.assertFalse(self.fs.exists({"foo": {"$gt": 12}})) def test_put_unicode(self): - self.assertRaises(TypeError, self.fs.put, u"hello") + self.assertRaises(TypeError, self.fs.put, "hello") - oid = self.fs.put(u"hello", encoding="utf-8") + oid = self.fs.put("hello", encoding="utf-8") self.assertEqual(b"hello", self.fs.get(oid).read()) self.assertEqual("utf-8", self.fs.get(oid).encoding) - oid = self.fs.put(u"aé", encoding="iso-8859-1") - self.assertEqual(u"aé".encode("iso-8859-1"), self.fs.get(oid).read()) + oid = self.fs.put("aé", encoding="iso-8859-1") + self.assertEqual("aé".encode("iso-8859-1"), self.fs.get(oid).read()) self.assertEqual("iso-8859-1", self.fs.get(oid).encoding) def test_missing_length_iter(self): diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 6d7a6425bc..8cc215c459 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -131,7 +131,7 @@ def test_insert_find_one(self): db = self.db db.test.drop() self.assertEqual(0, len(list(db.test.find()))) - doc = {"hello": u"world"} + doc = {"hello": "world"} _id = db.test.insert(doc) self.assertEqual(1, len(list(db.test.find()))) self.assertEqual(doc, db.test.find_one()) @@ -167,13 +167,13 @@ def test_insert_multiple(self): # Tests legacy insert. db = self.db db.drop_collection("test") - doc1 = {"hello": u"world"} - doc2 = {"hello": u"mike"} + doc1 = {"hello": "world"} + doc2 = {"hello": "mike"} self.assertEqual(db.test.find().count(), 0) ids = db.test.insert([doc1, doc2]) self.assertEqual(db.test.find().count(), 2) - self.assertEqual(doc1, db.test.find_one({"hello": u"world"})) - self.assertEqual(doc2, db.test.find_one({"hello": u"mike"})) + self.assertEqual(doc1, db.test.find_one({"hello": "world"})) + self.assertEqual(doc2, db.test.find_one({"hello": "mike"})) self.assertEqual(2, len(ids)) self.assertEqual(doc1["_id"], ids[0]) @@ -281,7 +281,7 @@ def test_insert_iterables(self): db.drop_collection("test") self.assertEqual(db.test.find().count(), 0) - db.test.insert(({"hello": u"world"}, {"hello": u"world"})) + db.test.insert(({"hello": "world"}, {"hello": "world"})) self.assertEqual(db.test.find().count(), 2) db.drop_collection("test") @@ -1062,7 +1062,7 @@ def test_auto_ref_and_deref(self): db.test.b.remove({}) db.test.c.remove({}) - a = {"hello": u"world"} + a = {"hello": "world"} db.test.a.save(a) b = {"test": a} diff --git a/test/test_objectid.py b/test/test_objectid.py index 7b26e7da87..490505234e 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -50,14 +50,11 @@ def test_creation(self): def test_unicode(self): a = ObjectId() self.assertEqual(a, ObjectId(a)) - self.assertEqual(ObjectId("123456789012123456789012"), - ObjectId(u"123456789012123456789012")) - self.assertRaises(InvalidId, ObjectId, u"hello") + self.assertRaises(InvalidId, ObjectId, "hello") def test_from_hex(self): ObjectId("123456789012123456789012") self.assertRaises(InvalidId, ObjectId, "123456789012123456789G12") - self.assertRaises(InvalidId, ObjectId, u"123456789012123456789G12") def test_repr_str(self): self.assertEqual(repr(ObjectId("1234567890abcdef12345678")), diff --git a/test/test_saslprep.py b/test/test_saslprep.py index 6fdf0452d8..c694224a6c 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -13,7 +13,6 @@ # limitations under the License. import sys -import warnings sys.path[0:0] = [""] @@ -26,18 +25,18 @@ def test_saslprep(self): try: import stringprep except ImportError: - self.assertRaises(TypeError, saslprep, u"anything...") + self.assertRaises(TypeError, saslprep, "anything...") # Bytes strings are ignored. self.assertEqual(saslprep(b"user"), b"user") else: # Examples from RFC4013, Section 3. - self.assertEqual(saslprep(u"I\u00ADX"), u"IX") - self.assertEqual(saslprep(u"user"), u"user") - self.assertEqual(saslprep(u"USER"), u"USER") - self.assertEqual(saslprep(u"\u00AA"), u"a") - self.assertEqual(saslprep(u"\u2168"), u"IX") - self.assertRaises(ValueError, saslprep, u"\u0007") - self.assertRaises(ValueError, saslprep, u"\u0627\u0031") + self.assertEqual(saslprep("I\u00ADX"), "IX") + self.assertEqual(saslprep("user"), "user") + self.assertEqual(saslprep("USER"), "USER") + self.assertEqual(saslprep("\u00AA"), "a") + self.assertEqual(saslprep("\u2168"), "IX") + self.assertRaises(ValueError, saslprep, "\u0007") + self.assertRaises(ValueError, saslprep, "\u0627\u0031") # Bytes strings are ignored. self.assertEqual(saslprep(b"user"), b"user") From 454d77b783fa84d02d3ff8a41cdd96ad13a42d78 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jan 2021 16:38:07 -0800 Subject: [PATCH 0285/2111] PYTHON-1309 Remove helpers for getLastError (#554) Remove Database.error, Database.last_status, Database.previous_error, and Database.reset_error_history. --- doc/changelog.rst | 4 ++ doc/migrate-to-pymongo4.rst | 11 +++++ pymongo/database.py | 82 ------------------------------------- pymongo/mongo_client.py | 4 -- pymongo/topology.py | 10 ----- test/test_database.py | 44 -------------------- test/test_legacy_api.py | 19 --------- test/test_topology.py | 41 ------------------- 8 files changed, 15 insertions(+), 200 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 644a5b5599..92da293754 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -23,6 +23,10 @@ Breaking Changes in 4.0 :attr:`pymongo.mongo_client.MongoClient.is_locked`. - Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. - Removed :meth:`pymongo.database.Database.collection_names`. +- Removed :meth:`pymongo.database.Database.error`, + :meth:`pymongo.database.Database.last_status`, + :meth:`pymongo.database.Database.previous_error`, + :meth:`pymongo.database.Database.reset_error_history`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`pymongo.cursor.Cursor.close` instead. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index a6328bdf00..ef2e6104c8 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -174,6 +174,17 @@ can be changed to this:: >>> from bson.code import Code >>> result = database.command('eval', Code('function (x) {return x;}'), args=[3]).get('retval') +Database.error, Database.last_status, Database.previous_error, and Database.reset_error_history are removed +........................................................................................................... + +Removed :meth:`pymongo.database.Database.error`, +:meth:`pymongo.database.Database.last_status`, +:meth:`pymongo.database.Database.previous_error`, and +:meth:`pymongo.database.Database.reset_error_history`. +These methods are obsolete: all MongoDB write operations use an acknowledged +write concern and report their errors by default. These methods were +deprecated in PyMongo 2.8. + Collection.parallel_scan is removed ................................... diff --git a/pymongo/database.py b/pymongo/database.py index a18867523c..ed4683927e 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1105,88 +1105,6 @@ def profiling_info(self, session=None): """ return list(self["system.profile"].find(session=session)) - def error(self): - """**DEPRECATED**: Get the error if one occurred on the last operation. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("Database.error() is deprecated", - DeprecationWarning, stacklevel=2) - - error = self.command("getlasterror") - error_msg = error.get("err", "") - if error_msg is None: - return None - if error_msg.startswith("not master"): - # Reset primary server and request check, if another thread isn't - # doing so already. - primary = self.__client.primary - if primary: - self.__client._handle_getlasterror(primary, error_msg) - return error - - def last_status(self): - """**DEPRECATED**: Get status information from the last operation. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - Returns a SON object with status information. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("last_status() is deprecated", - DeprecationWarning, stacklevel=2) - - return self.command("getlasterror") - - def previous_error(self): - """**DEPRECATED**: Get the most recent error on this database. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - Only returns errors that have occurred since the last call to - :meth:`reset_error_history`. Returns None if no such errors have - occurred. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("previous_error() is deprecated", - DeprecationWarning, stacklevel=2) - - error = self.command("getpreverror") - if error.get("err", 0) is None: - return None - return error - - def reset_error_history(self): - """**DEPRECATED**: Reset the error history of this database. - - This method is obsolete: all MongoDB write operations (insert, update, - remove, and so on) use the write concern ``w=1`` and report their - errors by default. - - Calls to :meth:`previous_error` will only return errors that have - occurred since the most recent call to this method. - - .. versionchanged:: 2.8 - Deprecated. - """ - warnings.warn("reset_error_history() is deprecated", - DeprecationWarning, stacklevel=2) - - self.command("reseterror") - def __iter__(self): return self diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 86aeeb9617..e05cd86a70 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1473,10 +1473,6 @@ def _retryable_write(self, retryable, func, session): with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None) - def _handle_getlasterror(self, address, error_msg): - """Clear our pool for a server, mark it Unknown, and check it soon.""" - self._topology.handle_getlasterror(address, error_msg) - def __eq__(self, other): if isinstance(other, self.__class__): return self.address == other.address diff --git a/pymongo/topology.py b/pymongo/topology.py index d8ea8f8bee..a3465597d4 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -428,16 +428,6 @@ def request_check_all(self, wait_time=5): self._request_check_all() self._condition.wait(wait_time) - def handle_getlasterror(self, address, error_msg): - """Clear our pool for a server, mark it Unknown, and check it soon.""" - error = NotMasterError(error_msg, {'code': 10107, 'errmsg': error_msg}) - with self._lock: - server = self._servers.get(address) - if server: - self._process_change( - ServerDescription(address, error=error), True) - server.request_check() - def update_pool(self, all_credentials): # Remove any stale sockets and add new sockets if pool is too small. servers = [] diff --git a/test/test_database.py b/test/test_database.py index af0fca8603..0c3271aa8f 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -432,50 +432,6 @@ def test_profiling_info(self): self.assertTrue(isinstance(info[0]['op'], str)) self.assertTrue(isinstance(info[0]["ts"], datetime.datetime)) - @client_context.require_no_mongos - @ignore_deprecations - def test_errors(self): - # We must call getlasterror, etc. on same socket as last operation. - db = rs_or_single_client(maxPoolSize=1).pymongo_test - db.reset_error_history() - self.assertEqual(None, db.error()) - if client_context.supports_getpreverror: - self.assertEqual(None, db.previous_error()) - - db.test.insert_one({"_id": 1}) - unacked = db.test.with_options(write_concern=WriteConcern(w=0)) - - unacked.insert_one({"_id": 1}) - self.assertTrue(db.error()) - if client_context.supports_getpreverror: - self.assertTrue(db.previous_error()) - - unacked.insert_one({"_id": 1}) - self.assertTrue(db.error()) - - if client_context.supports_getpreverror: - prev_error = db.previous_error() - self.assertEqual(prev_error["nPrev"], 1) - del prev_error["nPrev"] - prev_error.pop("lastOp", None) - error = db.error() - error.pop("lastOp", None) - # getLastError includes "connectionId" in recent - # server versions, getPrevError does not. - error.pop("connectionId", None) - self.assertEqualReply(error, prev_error) - - db.test.find_one() - self.assertEqual(None, db.error()) - if client_context.supports_getpreverror: - self.assertTrue(db.previous_error()) - self.assertEqual(db.previous_error()["nPrev"], 2) - - db.reset_error_history() - self.assertEqual(None, db.error()) - if client_context.supports_getpreverror: - self.assertEqual(None, db.previous_error()) - def test_command(self): self.maxDiff = None db = self.client.admin diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 8cc215c459..146e06f10e 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -1033,25 +1033,6 @@ def test_group_uuid_representation(self): coll.group([], {"_id": uu}, {"count": 0}, reduce)) - def test_last_status(self): - # Tests many legacy API elements. - # We must call getlasterror on same socket as the last operation. - db = rs_or_single_client(maxPoolSize=1).pymongo_test - collection = db.test_last_status - collection.remove({}) - collection.save({"i": 1}) - - collection.update({"i": 1}, {"$set": {"i": 2}}, w=0) - # updatedExisting is always false on mongos after an OP_MSG - # unacknowledged write. - if not (client_context.version >= (3, 6) and client_context.is_mongos): - self.assertTrue(db.last_status()["updatedExisting"]) - wait_until(lambda: collection.find_one({"i": 2}), - "found updated w=0 doc") - - collection.update({"i": 1}, {"$set": {"i": 500}}, w=0) - self.assertFalse(db.last_status()["updatedExisting"]) - def test_auto_ref_and_deref(self): # Legacy API. db = self.client.pymongo_test diff --git a/test/test_topology.py b/test/test_topology.py index 9cd019c14a..2abcab47b1 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -426,38 +426,6 @@ def test_handle_error(self): self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) - def test_handle_getlasterror(self): - t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_ismaster(t, ('b', 27017), { - 'ok': 1, - 'ismaster': False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - t.handle_getlasterror(('a', 27017), 'not master') - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) - - got_ismaster(t, ('a', 27017), { - 'ok': 1, - 'ismaster': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) - def test_handle_error_removed_server(self): t = create_mock_topology(replica_set_name='rs') @@ -468,15 +436,6 @@ def test_handle_error_removed_server(self): # Server was *not* added as type Unknown. self.assertFalse(t.has_server(('b', 27017))) - def test_handle_getlasterror_removed_server(self): - t = create_mock_topology(replica_set_name='rs') - - # No error resetting a server not in the TopologyDescription. - t.handle_getlasterror(('b', 27017), 'not master') - - # Server was *not* added as type Unknown. - self.assertFalse(t.has_server(('b', 27017))) - def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. From c65b89d8a155c840d223337540b392acf4769460 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 21 Jan 2021 11:25:19 -0800 Subject: [PATCH 0286/2111] PYTHON-1319 Remove Collection.ensure_index and reindex (#555) --- doc/api/pymongo/collection.rst | 2 - doc/changelog.rst | 2 + doc/migrate-to-pymongo4.rst | 41 +++++++++ pymongo/collection.py | 92 +------------------ pymongo/database.py | 2 - pymongo/mongo_client.py | 59 ------------- test/__init__.py | 12 --- test/test_collation.py | 8 -- test/test_collection.py | 38 -------- test/test_legacy_api.py | 155 +-------------------------------- test/test_session.py | 11 --- 11 files changed, 46 insertions(+), 376 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index dddf8dc34e..98df4432e3 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -60,7 +60,6 @@ .. automethod:: create_indexes .. automethod:: drop_index .. automethod:: drop_indexes - .. automethod:: reindex .. automethod:: list_indexes .. automethod:: index_information .. automethod:: drop @@ -77,4 +76,3 @@ .. automethod:: update(spec, document, upsert=False, manipulate=False, multi=False, check_keys=True, **kwargs) .. automethod:: remove(spec_or_id=None, multi=True, **kwargs) .. automethod:: find_and_modify - .. automethod:: ensure_index diff --git a/doc/changelog.rst b/doc/changelog.rst index 92da293754..248372e15a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -28,6 +28,8 @@ Breaking Changes in 4.0 :meth:`pymongo.database.Database.previous_error`, :meth:`pymongo.database.Database.reset_error_history`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. +- Removed :meth:`pymongo.collection.Collection.ensure_index`. +- Removed :meth:`pymongo.collection.Collection.reindex`. - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`pymongo.cursor.Cursor.close` instead. - Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index ef2e6104c8..68788c05b1 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -136,6 +136,47 @@ can be changed to this:: names = client.list_collection_names() non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) + +Collection +---------- + +Collection.ensure_index is removed +.................................. + +Removed :meth:`pymongo.collection.Collection.ensure_index`. Use +:meth:`~pymongo.collection.Collection.create_index` or +:meth:`~pymongo.collection.Collection.create_indexes` instead. Note that +``ensure_index`` maintained an in memory cache of recently created indexes +whereas the newer methods do not. Applications should avoid frequent calls +to :meth:`~pymongo.collection.Collection.create_index` or +:meth:`~pymongo.collection.Collection.create_indexes`. Code like this:: + + def persist(self, document): + my_collection.ensure_index('a', unique=True) + my_collection.insert_one(document) + +Can be changed to this:: + + def persist(self, document): + if not self.created_index: + my_collection.create_index('a', unique=True) + self.created_index = True + my_collection.insert_one(document) + +Collection.reindex is removed +............................. + +Removed :meth:`pymongo.collection.Collection.reindex`. Run the +`reIndex command`_ directly instead. Code like this:: + + >>> result = database.my_collection.reindex() + +can be changed to this:: + + >>> result = database.command('reIndex', 'my_collection') + +.. _reIndex command: https://docs.mongodb.com/manual/reference/command/reIndex/ + Removed features with no migration path --------------------------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index 84acbc0d45..5314b6e26e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1970,42 +1970,6 @@ def create_index(self, keys, session=None, **kwargs): index = IndexModel(keys, **kwargs) return self.__create_indexes([index], session, **cmd_options)[0] - def ensure_index(self, key_or_list, cache_for=300, **kwargs): - """**DEPRECATED** - Ensures that an index exists on this collection. - - .. versionchanged:: 3.0 - **DEPRECATED** - """ - warnings.warn("ensure_index is deprecated. Use create_index instead.", - DeprecationWarning, stacklevel=2) - # The types supported by datetime.timedelta. - if not (isinstance(cache_for, int) or - isinstance(cache_for, float)): - raise TypeError("cache_for must be an integer or float.") - - if "drop_dups" in kwargs: - kwargs["dropDups"] = kwargs.pop("drop_dups") - - if "bucket_size" in kwargs: - kwargs["bucketSize"] = kwargs.pop("bucket_size") - - index = IndexModel(key_or_list, **kwargs) - name = index.document["name"] - - # Note that there is a race condition here. One thread could - # check if the index is cached and be preempted before creating - # and caching the index. This means multiple threads attempting - # to create the same index concurrently could send the index - # to the server two or more times. This has no practical impact - # other than wasted round trips. - if not self.__database.client._cached(self.__database.name, - self.__name, name): - self.__create_indexes([index], session=None) - self.__database.client._cache_index(self.__database.name, - self.__name, name, cache_for) - return name - return None - def drop_indexes(self, session=None, **kwargs): """Drops all indexes on this collection. @@ -2031,7 +1995,6 @@ def drop_indexes(self, session=None, **kwargs): when connected to MongoDB >= 3.4. """ - self.__database.client._purge_index(self.__database.name, self.__name) self.drop_index("*", session=session, **kwargs) def drop_index(self, index_or_name, session=None, **kwargs): @@ -2048,8 +2011,8 @@ def drop_index(self, index_or_name, session=None, **kwargs): .. warning:: if a custom name was used on index creation (by - passing the `name` parameter to :meth:`create_index` or - :meth:`ensure_index`) the index **must** be dropped by name. + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. :Parameters: - `index_or_name`: index (or name of index) to drop @@ -2078,8 +2041,6 @@ def drop_index(self, index_or_name, session=None, **kwargs): if not isinstance(name, str): raise TypeError("index_or_name must be an instance of str or list") - self.__database.client._purge_index( - self.__database.name, self.__name, name) cmd = SON([("dropIndexes", self.__name), ("index", name)]) cmd.update(kwargs) with self._socket_for_writes(session) as sock_info: @@ -2090,55 +2051,6 @@ def drop_index(self, index_or_name, session=None, **kwargs): write_concern=self._write_concern_for(session), session=session) - def reindex(self, session=None, **kwargs): - """Rebuilds all indexes on this collection. - - **DEPRECATED** - The :meth:`~reindex` method is deprecated and will be - removed in PyMongo 4.0. Use :meth:`~pymongo.database.Database.command` - to run the ``reIndex`` command directly instead:: - - db.command({"reIndex": ""}) - - .. note:: Starting in MongoDB 4.6, the `reIndex` command can only be - run when connected to a standalone mongod. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): optional arguments to the reIndex - command (like maxTimeMS) can be passed as keyword arguments. - - .. warning:: reindex blocks all other operations (indexes - are built in the foreground) and will be slow for large - collections. - - .. versionchanged:: 3.11 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.5 - We no longer apply this collection's write concern to this operation. - MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns - an error if we include the write concern. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - """ - warnings.warn("The reindex method is deprecated and will be removed in " - "PyMongo 4.0. Use the Database.command method to run the " - "reIndex command instead.", - DeprecationWarning, stacklevel=2) - cmd = SON([("reIndex", self.__name)]) - cmd.update(kwargs) - with self._socket_for_writes(session) as sock_info: - return self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, - session=session) - def list_indexes(self, session=None): """Get a cursor over the index documents for this collection. diff --git a/pymongo/database.py b/pymongo/database.py index ed4683927e..73fcab760f 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -886,8 +886,6 @@ def drop_collection(self, name_or_collection, session=None): if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") - self.__client._purge_index(self.__name, name) - with self.__client._socket_for_writes(session) as sock_info: return self._command( sock_info, 'drop', value=name, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e05cd86a70..ede6454c15 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -705,10 +705,6 @@ def __init__( self._event_listeners = options.pool_options.event_listeners - # Cache of existing indexes used by ensure_index ops. - self.__index_cache = {} - self.__index_cache_lock = threading.Lock() - super(MongoClient, self).__init__(options.codec_options, options.read_preference, options.write_concern, @@ -794,60 +790,6 @@ def _purge_credentials(self, source): """Purge credentials from the authentication cache.""" self.__all_credentials.pop(source, None) - def _cached(self, dbname, coll, index): - """Test if `index` is cached.""" - cache = self.__index_cache - now = datetime.datetime.utcnow() - with self.__index_cache_lock: - return (dbname in cache and - coll in cache[dbname] and - index in cache[dbname][coll] and - now < cache[dbname][coll][index]) - - def _cache_index(self, dbname, collection, index, cache_for): - """Add an index to the index cache for ensure_index operations.""" - now = datetime.datetime.utcnow() - expire = datetime.timedelta(seconds=cache_for) + now - - with self.__index_cache_lock: - if dbname not in self.__index_cache: - self.__index_cache[dbname] = {} - self.__index_cache[dbname][collection] = {} - self.__index_cache[dbname][collection][index] = expire - - elif collection not in self.__index_cache[dbname]: - self.__index_cache[dbname][collection] = {} - self.__index_cache[dbname][collection][index] = expire - - else: - self.__index_cache[dbname][collection][index] = expire - - def _purge_index(self, database_name, - collection_name=None, index_name=None): - """Purge an index from the index cache. - - If `index_name` is None purge an entire collection. - - If `collection_name` is None purge an entire database. - """ - with self.__index_cache_lock: - if not database_name in self.__index_cache: - return - - if collection_name is None: - del self.__index_cache[database_name] - return - - if not collection_name in self.__index_cache[database_name]: - return - - if index_name is None: - del self.__index_cache[database_name][collection_name] - return - - if index_name in self.__index_cache[database_name][collection_name]: - del self.__index_cache[database_name][collection_name][index_name] - def _server_property(self, attr_name): """An attribute of the current server's description. @@ -1854,7 +1796,6 @@ def drop_database(self, name_or_database, session=None): raise TypeError("name_or_database must be an instance " "of str or a Database") - self._purge_index(name) with self._socket_for_writes(session) as sock_info: self[name]._command( sock_info, diff --git a/test/__init__.py b/test/__init__.py index 47b5fb282e..e6a3c208d5 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -739,18 +739,6 @@ def require_no_api_version(self, func): def mongos_seeds(self): return ','.join('%s:%s' % address for address in self.mongoses) - @property - def supports_reindex(self): - """Does the connected server support reindex?""" - return not ((self.version.at_least(4, 1, 0) and self.is_mongos) or - (self.version.at_least(4, 5, 0) and ( - self.is_mongos or self.is_rs))) - - @property - def supports_getpreverror(self): - """Does the connected server support getpreverror?""" - return not (self.version.at_least(4, 1, 0) or self.is_mongos) - @property def supports_failCommand_fail_point(self): """Does the server support the failCommand fail point?""" diff --git a/test/test_collation.py b/test/test_collation.py index d87a2a9aca..c8d6c15e10 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -142,14 +142,6 @@ def test_create_index(self): self.collation.document, ci_cmd['indexes'][0]['collation']) - @raisesConfigurationErrorForOldMongoDB - def test_ensure_index(self): - self.db.test.ensure_index('foo', collation=self.collation) - ci_cmd = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - ci_cmd['indexes'][0]['collation']) - @raisesConfigurationErrorForOldMongoDB def test_aggregate(self): self.db.test.aggregate([{'$group': {'_id': 42}}], diff --git a/test/test_collection.py b/test/test_collection.py index eafc38b067..c8cda97e69 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -343,49 +343,11 @@ def test_index_management_max_time_ms(self): ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) self.assertRaises( ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.reindex, maxTimeMS=1) finally: self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") - def test_reindex(self): - if not client_context.supports_reindex: - raise unittest.SkipTest( - "reindex is no longer supported by mongos 4.1+") - db = self.db - db.drop_collection("test") - db.test.insert_one({"foo": "bar", "who": "what", "when": "how"}) - db.test.create_index("foo") - db.test.create_index("who") - db.test.create_index("when") - info = db.test.index_information() - - def check_result(result): - self.assertEqual(4, result['nIndexes']) - indexes = result['indexes'] - names = [idx['name'] for idx in indexes] - for name in names: - self.assertTrue(name in info) - for key in info: - self.assertTrue(key in names) - - reindexed = db.test.reindex() - if 'raw' in reindexed: - # mongos - for result in reindexed['raw'].values(): - check_result(result) - else: - check_result(reindexed) - - coll = Collection( - self.db, - 'test', - write_concern=WriteConcern(w=100)) - # No error since writeConcern is not sent. - coll.reindex() - def test_list_indexes(self): db = self.db db.test.drop() diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 146e06f10e..6b4e7209e0 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -93,17 +93,6 @@ def test_add_son_manipulator_deprecation(self): self.assertRaises(DeprecationWarning, lambda: db.add_son_manipulator(AutoReference(db))) - def test_ensure_index_deprecation(self): - try: - self.assertRaises( - DeprecationWarning, - lambda: self.db.test.ensure_index('i')) - finally: - self.db.test.drop() - - def test_reindex_deprecation(self): - self.assertRaises(DeprecationWarning, lambda: self.db.test.reindex()) - def test_geoHaystack_deprecation(self): self.addCleanup(self.db.test.drop) keys = [("pos", GEOHAYSTACK), ("type", ASCENDING)] @@ -766,7 +755,7 @@ def test_find_and_modify(self): c.insert({'_id': 1, 'i': 1}) # Test that we raise DuplicateKeyError when appropriate. - c.ensure_index('i', unique=True) + c.create_index('i', unique=True) self.assertRaises(DuplicateKeyError, c.find_and_modify, query={'i': 1, 'j': 1}, update={'$set': {'k': 1}}, upsert=True) @@ -1195,148 +1184,6 @@ def test_manipulator_properties(self): self.assertEqual(['AutoReference'], db.outgoing_copying_manipulators) - def test_ensure_index(self): - db = self.db - - self.assertRaises(TypeError, db.test.ensure_index, {"hello": 1}) - self.assertRaises(TypeError, - db.test.ensure_index, {"hello": 1}, cache_for='foo') - - db.test.drop_indexes() - - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_indexes() - self.assertEqual("foo", - db.test.ensure_index("goodbye", name="foo")) - self.assertEqual(None, db.test.ensure_index("goodbye", name="foo")) - - db.test.drop_indexes() - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.drop_collection("test") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - self.assertEqual(None, db.test.ensure_index("goodbye")) - - db.test.drop_index("goodbye_1") - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye", cache_for=1)) - time.sleep(1.2) - self.assertEqual("goodbye_1", - db.test.ensure_index("goodbye")) - # Make sure the expiration time is updated. - self.assertEqual(None, - db.test.ensure_index("goodbye")) - - # Clean up indexes for later tests - db.test.drop_indexes() - - @client_context.require_version_max(4, 1) # PYTHON-1734 - def test_ensure_index_threaded(self): - coll = self.db.threaded_index_creation - index_docs = [] - - class Indexer(threading.Thread): - def run(self): - coll.ensure_index('foo0') - coll.ensure_index('foo1') - coll.ensure_index('foo2') - index_docs.append(coll.index_information()) - - try: - threads = [] - for _ in range(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for thread in threads: - thread.start() - - joinall(threads) - - first = index_docs[0] - for index_doc in index_docs[1:]: - self.assertEqual(index_doc, first) - finally: - coll.drop() - - def test_ensure_purge_index_threaded(self): - coll = self.db.threaded_index_creation - - class Indexer(threading.Thread): - def run(self): - coll.ensure_index('foo') - try: - coll.drop_index('foo') - except OperationFailure: - # The index may have already been dropped. - pass - coll.ensure_index('foo') - coll.drop_indexes() - coll.create_index('foo') - - try: - threads = [] - for _ in range(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for thread in threads: - thread.start() - - joinall(threads) - - self.assertTrue('foo_1' in coll.index_information()) - finally: - coll.drop() - - @client_context.require_version_max(4, 1) # PYTHON-1734 - def test_ensure_unique_index_threaded(self): - coll = self.db.test_unique_threaded - coll.drop() - coll.insert_many([{'foo': i} for i in range(10000)]) - - class Indexer(threading.Thread): - def run(self): - try: - coll.ensure_index('foo', unique=True) - coll.insert_one({'foo': 'bar'}) - coll.insert_one({'foo': 'bar'}) - except OperationFailure: - pass - - threads = [] - for _ in range(10): - t = Indexer() - t.setDaemon(True) - threads.append(t) - - for i in range(10): - threads[i].start() - - joinall(threads) - - self.assertEqual(10001, coll.count()) - coll.drop() - - class TestLegacyBulk(BulkTestBase): @classmethod diff --git a/test/test_session.py b/test/test_session.py index 6a6ed12e0d..0773d8a8cb 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -317,9 +317,6 @@ def test_collection(self): (coll.aggregate, [[]], {}), ]) - if client_context.supports_reindex: - ops.append((coll.reindex, [], {})) - self._test_ops(client, *ops) def test_cursor_clone(self): @@ -909,10 +906,6 @@ def test_writes(self): self._test_writes( lambda coll, session: coll.drop_indexes(session=session)) - if client_context.supports_reindex: - self._test_writes( - lambda coll, session: coll.reindex(session=session)) - def _test_no_read_concern(self, op): coll = self.client.pymongo_test.test with self.client.start_session() as sess: @@ -976,10 +969,6 @@ def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( lambda coll, session: coll.find({}, session=session).explain()) - if client_context.supports_reindex: - self._test_no_read_concern( - lambda coll, session: coll.reindex(session=session)) - @client_context.require_no_standalone @client_context.require_version_max(4, 1, 0) def test_aggregate_out_does_not_include_read_concern(self): From 521f7b9af42a33c813c3ba58448f075072f8b42d Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 20 Jan 2021 16:08:31 -0800 Subject: [PATCH 0287/2111] PYTHON-2133 Fix up docs And finish deleting python 2 specific code. --- CONTRIBUTING.rst | 2 +- README.rst | 8 +- bson/__init__.py | 35 +++----- bson/dbref.py | 2 +- bson/json_util.py | 5 +- bson/objectid.py | 9 +- bson/son.py | 2 +- doc/conf.py | 8 +- doc/examples/aggregation.rst | 22 ++--- doc/examples/authentication.rst | 3 +- doc/examples/bulk.rst | 32 +++---- doc/examples/custom_type.rst | 8 +- doc/examples/geo.rst | 26 +++--- doc/examples/gridfs.rst | 8 +- doc/examples/high_availability.rst | 6 +- doc/examples/tls.rst | 32 +------ doc/faq.rst | 8 +- doc/tutorial.rst | 121 +++++++++++--------------- gridfs/__init__.py | 11 ++- gridfs/grid_file.py | 14 ++- pymongo/collection.py | 88 +++++++++---------- pymongo/common.py | 3 +- pymongo/compression_support.py | 4 - pymongo/encryption.py | 4 +- pymongo/encryption_options.py | 4 +- pymongo/message.py | 16 ++-- pymongo/mongo_client.py | 11 +-- pymongo/pool.py | 2 +- pymongo/saslprep.py | 4 +- pymongo/server_selectors.py | 2 - pymongo/ssl_context.py | 110 +++-------------------- setup.py | 40 --------- test/test_auth.py | 38 ++++---- test/test_binary.py | 5 +- test/test_bson.py | 13 ++- test/test_code.py | 2 +- test/test_collection.py | 4 +- test/test_cursor.py | 1 - test/test_dbref.py | 2 +- test/test_discovery_and_monitoring.py | 10 --- test/test_errors.py | 8 +- test/test_raw_bson.py | 14 +-- test/test_ssl.py | 6 +- test/utils.py | 10 --- 44 files changed, 259 insertions(+), 504 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 4c057af571..b191debceb 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,7 +19,7 @@ that might not be of interest or that has already been addressed. Supported Interpreters ---------------------- -PyMongo supports CPython 2.7, 3.4+, PyPy, and PyPy3.5+. Language +PyMongo supports CPython 3.4+ and PyPy3.5+. Language features not supported by all interpreters can not be used. Style Guide diff --git a/README.rst b/README.rst index 0341ae44bb..20c3b56e78 100644 --- a/README.rst +++ b/README.rst @@ -160,9 +160,9 @@ Here's a basic example (for more see the *examples* section of the docs): >>> client = pymongo.MongoClient("localhost", 27017) >>> db = client.test >>> db.name - u'test' + 'test' >>> db.my_collection - Collection(Database(MongoClient('localhost', 27017), u'test'), u'my_collection') + Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') >>> db.my_collection.insert_one({"x": 10}).inserted_id ObjectId('4aba15ebe23f6b53b0000000') >>> db.my_collection.insert_one({"x": 8}).inserted_id @@ -170,7 +170,7 @@ Here's a basic example (for more see the *examples* section of the docs): >>> db.my_collection.insert_one({"x": 11}).inserted_id ObjectId('4aba160ee23f6b543e000002') >>> db.my_collection.find_one() - {u'x': 10, u'_id': ObjectId('4aba15ebe23f6b53b0000000')} + {'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} >>> for item in db.my_collection.find(): ... print(item["x"]) ... @@ -178,7 +178,7 @@ Here's a basic example (for more see the *examples* section of the docs): 8 11 >>> db.my_collection.create_index("x") - u'x_1' + 'x_1' >>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): ... print(item["x"]) ... diff --git a/bson/__init__.py b/bson/__init__.py index 0f63929bba..05482d9130 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -22,11 +22,9 @@ None null both bool boolean both int [#int]_ int32 / int64 py -> bson -long int64 py -> bson `bson.int64.Int64` int64 both float number (real) both -string string py -> bson -unicode string both +str string both list array both dict / `SON` object both datetime.datetime [#dt]_ [#dt2]_ date both @@ -36,17 +34,11 @@ `bson.objectid.ObjectId` oid both `bson.dbref.DBRef` dbref both None undefined bson -> py -unicode code bson -> py -`bson.code.Code` code py -> bson -unicode symbol bson -> py -bytes (Python 3) [#bytes]_ binary both +`bson.code.Code` code both +str symbol bson -> py +bytes [#bytes]_ binary both ======================================= ============= =================== -Note that, when using Python 2.x, to save binary data it must be wrapped as -an instance of `bson.binary.Binary`. Otherwise it will be saved as a BSON -string and retrieved as unicode. Users of Python 3.x can use the Python bytes -type. - .. [#int] A Python int will be saved as a BSON int32 or BSON int64 depending on its size. A BSON int32 will always decode to a Python int. A BSON int64 will always decode to a :class:`~bson.int64.Int64`. @@ -58,10 +50,8 @@ objects from ``re.compile()`` are both saved as BSON regular expressions. BSON regular expressions are decoded as :class:`~bson.regex.Regex` instances. -.. [#bytes] The bytes type from Python 3.x is encoded as BSON binary with - subtype 0. In Python 3.x it will be decoded back to bytes. In Python 2.x - it will be decoded to an instance of :class:`~bson.binary.Binary` with - subtype 0. +.. [#bytes] The bytes type is encoded as BSON binary with + subtype 0. It will be decoded back to bytes. """ import calendar @@ -161,7 +151,7 @@ def _get_int(data, view, position, dummy0, dummy1, dummy2): def _get_c_string(data, view, position, opts): - """Decode a BSON 'C' string to python unicode string.""" + """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 @@ -173,7 +163,7 @@ def _get_float(data, view, position, dummy0, dummy1, dummy2): def _get_string(data, view, position, obj_end, opts, dummy): - """Decode a BSON string to python unicode string.""" + """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] position += 4 if length < 1 or obj_end - position < length: @@ -573,7 +563,7 @@ def _encode_list(name, value, check_keys, opts): def _encode_text(name, value, dummy0, dummy1): - """Encode a python unicode (python 2.x) / str (python 3.x).""" + """Encode a python str.""" value = _utf_8_encode(value)[0] return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" @@ -616,12 +606,11 @@ def _encode_none(name, dummy0, dummy1, dummy2): def _encode_regex(name, value, dummy0, dummy1): """Encode a python regex or bson.regex.Regex.""" flags = value.flags - # Python 2 common case - if flags == 0: - return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" # Python 3 common case - elif flags == re.UNICODE: + if flags == re.UNICODE: return b"\x0B" + name + _make_c_string_check(value.pattern) + b"u\x00" + elif flags == 0: + return b"\x0B" + name + _make_c_string_check(value.pattern) + b"\x00" else: sflags = b"" if flags & re.IGNORECASE: diff --git a/bson/dbref.py b/bson/dbref.py index 9ef842a31a..f4395b76cd 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -57,7 +57,7 @@ def __init__(self, collection, id, database=None, _extra={}, **kwargs): @property def collection(self): - """Get the name of this DBRef's collection as unicode. + """Get the name of this DBRef's collection. """ return self.__collection diff --git a/bson/json_util.py b/bson/json_util.py index a702ea988c..7b42d5668f 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -30,7 +30,7 @@ >>> from bson.json_util import loads >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]') - [{u'foo': [1, 2]}, {u'bar': {u'hello': u'world'}}, {u'code': Code('function x() { return 1; }', {})}, {u'bin': Binary('...', 128)}] + [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] Example usage (serialization): @@ -855,7 +855,6 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): return {'$numberDouble': representation} elif json_options.json_mode == JSONMode.CANONICAL: # repr() will return the shortest string guaranteed to produce the - # original value, when float() is called on it. str produces a - # shorter string in Python 2. + # original value, when float() is called on it. return {'$numberDouble': str(repr(obj))} raise TypeError("%r is not JSON serializable" % obj) diff --git a/bson/objectid.py b/bson/objectid.py index ef601e8481..6129df35b2 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -70,7 +70,7 @@ def __init__(self, oid=None): By default, ``ObjectId()`` creates a new unique identifier. The optional parameter `oid` can be an :class:`ObjectId`, or any 12 - :class:`bytes` or, in Python 2, any 12-character :class:`str`. + :class:`bytes`. For example, the 12 bytes b'foo-bar-quux' do not follow the ObjectId specification but they are acceptable input:: @@ -78,14 +78,10 @@ def __init__(self, oid=None): >>> ObjectId(b'foo-bar-quux') ObjectId('666f6f2d6261722d71757578') - `oid` can also be a :class:`unicode` or :class:`str` of 24 hex digits:: + `oid` can also be a :class:`str` of 24 hex digits:: >>> ObjectId('0123456789ab0123456789ab') ObjectId('0123456789ab0123456789ab') - >>> - >>> # A u-prefixed unicode literal: - >>> ObjectId(u'0123456789ab0123456789ab') - ObjectId('0123456789ab0123456789ab') Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor 24 hex digits, or :class:`TypeError` if `oid` is not an accepted type. @@ -201,7 +197,6 @@ def __validate(self, oid): """ if isinstance(oid, ObjectId): self.__id = oid.binary - # bytes or unicode in python 2, str in python 3 elif isinstance(oid, str): if len(oid) == 24: try: diff --git a/bson/son.py b/bson/son.py index bef655c8f7..c6baaa98c4 100644 --- a/bson/son.py +++ b/bson/son.py @@ -33,7 +33,7 @@ class SON(dict): A subclass of dict that maintains ordering of keys and provides a few extra niceties for dealing with SON. SON provides an API - similar to collections.OrderedDict from Python 2.7+. + similar to collections.OrderedDict. """ def __init__(self, data=None, **kwargs): diff --git a/doc/conf.py b/doc/conf.py index c545ab093f..381abb299d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -27,8 +27,8 @@ master_doc = 'index' # General information about the project. -project = u'PyMongo' -copyright = u'MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc' +project = 'PyMongo' +copyright = 'MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc' html_show_sphinx = False # The version info for the project you're documenting, acts as replacement for @@ -152,8 +152,8 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'PyMongo.tex', u'PyMongo Documentation', - u'Michael Dirolf', 'manual'), + ('index', 'PyMongo.tex', 'PyMongo Documentation', + 'Michael Dirolf', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index f816eed6e5..a2a7214b36 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -58,15 +58,15 @@ eg "$sort": ... ] >>> import pprint >>> pprint.pprint(list(db.things.aggregate(pipeline))) - [{u'_id': u'cat', u'count': 3}, - {u'_id': u'dog', u'count': 2}, - {u'_id': u'mouse', u'count': 1}] + [{'_id': 'cat', 'count': 3}, + {'_id': 'dog', 'count': 2}, + {'_id': 'mouse', 'count': 1}] To run an explain plan for this aggregation use the :meth:`~pymongo.database.Database.command` method:: >>> db.command('aggregate', 'things', pipeline=pipeline, explain=True) - {u'ok': 1.0, u'stages': [...]} + {'ok': 1.0, 'stages': [...]} As well as simple aggregations the aggregation framework provides projection capabilities to reshape the returned data. Using projections and aggregation, @@ -123,9 +123,9 @@ iterate over the result collection: >>> for doc in result.find().sort("_id"): ... pprint.pprint(doc) ... - {u'_id': u'cat', u'value': 3.0} - {u'_id': u'dog', u'value': 2.0} - {u'_id': u'mouse', u'value': 1.0} + {'_id': 'cat', 'value': 3.0} + {'_id': 'dog', 'value': 2.0} + {'_id': 'mouse', 'value': 1.0} Advanced Map/Reduce ------------------- @@ -140,7 +140,7 @@ response to the map/reduce command, rather than just the result collection: >>> pprint.pprint( ... db.things.map_reduce(mapper, reducer, "myresults", full_response=True)) - {...u'ok': 1.0,... u'result': u'myresults'...} + {...'ok': 1.0,... 'result': 'myresults'...} All of the optional map/reduce parameters are also supported, simply pass them as keyword arguments. In this example we use the `query` parameter to limit the @@ -153,8 +153,8 @@ documents that will be mapped over: >>> for doc in results.find().sort("_id"): ... pprint.pprint(doc) ... - {u'_id': u'cat', u'value': 1.0} - {u'_id': u'dog', u'value': 1.0} + {'_id': 'cat', 'value': 1.0} + {'_id': 'dog', 'value': 1.0} You can use :class:`~bson.son.SON` or :class:`collections.OrderedDict` to specify a different database to store the result collection: @@ -168,6 +168,6 @@ specify a different database to store the result collection: ... reducer, ... out=SON([("replace", "results"), ("db", "outdb")]), ... full_response=True)) - {...u'ok': 1.0,... u'result': {u'collection': u'results', u'db': u'outdb'}...} + {...'ok': 1.0,... 'result': {'collection': 'results', 'db': 'outdb'}...} .. seealso:: The full list of options for MongoDB's `map reduce engine `_ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 4989e6a91e..d0a1fba15b 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -11,8 +11,7 @@ Percent-Escaping Username and Password -------------------------------------- Username and password must be percent-escaped with -:meth:`urllib.parse.quote_plus` in Python 3, or :meth:`urllib.quote_plus` in -Python 2, to be used in a MongoDB URI. For example, in Python 3:: +:meth:`urllib.parse.quote_plus`, to be used in a MongoDB URI. For example:: >>> from pymongo import MongoClient >>> import urllib.parse diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 019817fbd0..9e8a57a803 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -70,7 +70,7 @@ of operations performed. 'nModified': 2, 'nRemoved': 10000, 'nUpserted': 1, - 'upserted': [{u'_id': 4, u'index': 5}], + 'upserted': [{'_id': 4, 'index': 5}], 'writeConcernErrors': [], 'writeErrors': []} @@ -107,10 +107,10 @@ the failure. 'nUpserted': 0, 'upserted': [], 'writeConcernErrors': [], - 'writeErrors': [{u'code': 11000, - u'errmsg': u'...E11000...duplicate key error...', - u'index': 1,... - u'op': {'_id': 4}}]} + 'writeErrors': [{'code': 11000, + 'errmsg': '...E11000...duplicate key error...', + 'index': 1,... + 'op': {'_id': 4}}]} .. _unordered_bulk: @@ -145,14 +145,14 @@ and fourth operations succeed. 'nUpserted': 0, 'upserted': [], 'writeConcernErrors': [], - 'writeErrors': [{u'code': 11000, - u'errmsg': u'...E11000...duplicate key error...', - u'index': 0,... - u'op': {'_id': 1}}, - {u'code': 11000, - u'errmsg': u'...E11000...duplicate key error...', - u'index': 2,... - u'op': {'_id': 3}}]} + 'writeErrors': [{'code': 11000, + 'errmsg': '...E11000...duplicate key error...', + 'index': 0,... + 'op': {'_id': 1}}, + {'code': 11000, + 'errmsg': '...E11000...duplicate key error...', + 'index': 2,... + 'op': {'_id': 3}}]} Write Concern ............. @@ -177,7 +177,7 @@ after all operations are attempted, regardless of execution order. 'nRemoved': 0, 'nUpserted': 0, 'upserted': [], - 'writeConcernErrors': [{u'code': 64... - u'errInfo': {u'wtimeout': True}, - u'errmsg': u'waiting for replication timed out'}], + 'writeConcernErrors': [{'code': 64... + 'errInfo': {'wtimeout': True}, + 'errmsg': 'waiting for replication timed out'}], 'writeErrors': []} diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst index 591a250b6c..404a6c8b55 100644 --- a/doc/examples/custom_type.rst +++ b/doc/examples/custom_type.rst @@ -138,7 +138,7 @@ Now, we can seamlessly encode and decode instances of >>> mydoc = collection.find_one() >>> import pprint >>> pprint.pprint(mydoc) - {u'_id': ObjectId('...'), u'num': Decimal('45.321')} + {'_id': ObjectId('...'), 'num': Decimal('45.321')} We can see what's actually being saved to the database by creating a fresh @@ -149,7 +149,7 @@ MongoDB: >>> vanilla_collection = db.get_collection('test') >>> pprint.pprint(vanilla_collection.find_one()) - {u'_id': ObjectId('...'), u'num': Decimal128('45.321')} + {'_id': ObjectId('...'), 'num': Decimal128('45.321')} Encoding Subtypes @@ -217,7 +217,7 @@ object, we can seamlessly encode instances of ``DecimalInt``: >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) - {u'_id': ObjectId('...'), u'num': Decimal('45.321')} + {'_id': ObjectId('...'), 'num': Decimal('45.321')} Note that the ``transform_bson`` method of the base codec class results in these values being decoded as ``Decimal`` (and not ``DecimalInt``). @@ -310,7 +310,7 @@ We can now seamlessly encode instances of :py:class:`~decimal.Decimal`: >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) - {u'_id': ObjectId('...'), u'num': Decimal128('45.321')} + {'_id': ObjectId('...'), 'num': Decimal128('45.321')} .. note:: diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 26de95cb6a..5caa36eafc 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -22,7 +22,7 @@ Creating a geospatial index in pymongo is easy: >>> from pymongo import MongoClient, GEO2D >>> db = MongoClient().geo_example >>> db.places.create_index([("loc", GEO2D)]) - u'loc_2d' + 'loc_2d' Inserting Places ---------------- @@ -53,9 +53,9 @@ Using the geospatial index we can find documents near another point: >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): ... pprint.pprint(doc) ... - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} - {u'_id': ObjectId('...'), u'loc': [1, 2]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [1, 2]} .. note:: If using :data:`pymongo.GEOSPHERE`, using $nearSphere is recommended. @@ -68,9 +68,9 @@ The $maxDistance operator requires the use of :class:`~bson.son.SON`: >>> for doc in db.places.find(query).limit(3): ... pprint.pprint(doc) ... - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} - {u'_id': ObjectId('...'), u'loc': [1, 2]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [1, 2]} It's also possible to query for all items within a given rectangle (specified by lower-left and upper-right coordinates): @@ -80,8 +80,8 @@ It's also possible to query for all items within a given rectangle >>> query = {"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}} >>> for doc in db.places.find(query).sort('_id'): ... pprint.pprint(doc) - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [4, 4]} Or circle (specified by center point and radius): @@ -91,15 +91,15 @@ Or circle (specified by center point and radius): >>> for doc in db.places.find(query).sort('_id'): ... pprint.pprint(doc) ... - {u'_id': ObjectId('...'), u'loc': [2, 5]} - {u'_id': ObjectId('...'), u'loc': [1, 2]} - {u'_id': ObjectId('...'), u'loc': [4, 4]} + {'_id': ObjectId('...'), 'loc': [2, 5]} + {'_id': ObjectId('...'), 'loc': [1, 2]} + {'_id': ObjectId('...'), 'loc': [4, 4]} geoNear queries are also supported using :class:`~bson.son.SON`:: >>> from bson.son import SON >>> db.command(SON([('geoNear', 'places'), ('near', [1, 2])])) - {u'ok': 1.0, u'stats': ...} + {'ok': 1.0, 'stats': ...} .. warning:: Starting in MongoDB version 4.0, MongoDB deprecates the **geoNear** command. Use one of the following operations instead. diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst index db55bd2b59..a015f6a9fd 100644 --- a/doc/examples/gridfs.rst +++ b/doc/examples/gridfs.rst @@ -52,7 +52,7 @@ file: .. doctest:: >>> fs.get(a).read() - 'hello world' + b'hello world' :meth:`~gridfs.GridFS.get` returns a file-like object, so we get the file's contents by calling :meth:`~gridfs.grid_file.GridOut.read`. @@ -68,11 +68,11 @@ keyword arguments: >>> b = fs.put(fs.get(a), filename="foo", bar="baz") >>> out = fs.get(b) >>> out.read() - 'hello world' + b'hello world' >>> out.filename - u'foo' + 'foo' >>> out.bar - u'baz' + 'baz' >>> out.upload_date datetime.datetime(...) diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index b52b22a106..6a72ba75b8 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -111,7 +111,7 @@ set:: >>> from time import sleep >>> c = MongoClient(replicaset='foo'); print(c.nodes); sleep(0.1); print(c.nodes) frozenset([]) - frozenset([(u'localhost', 27019), (u'localhost', 27017), (u'localhost', 27018)]) + frozenset([('localhost', 27019), ('localhost', 27017), ('localhost', 27018)]) You need not wait for replica set discovery in your application, however. If you need to do any operation with a MongoClient, such as a @@ -132,7 +132,7 @@ connect to the replica set and perform a couple of basic operations:: >>> db.test.insert_one({"x": 1}).inserted_id ObjectId('...') >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} + {'x': 1, '_id': ObjectId('...')} By checking the host and port, we can see that we're connected to *localhost:27017*, which is the current primary:: @@ -162,7 +162,7 @@ general). At that point the driver will connect to the new primary and the operation will succeed:: >>> db.test.find_one() - {u'x': 1, u'_id': ObjectId('...')} + {'x': 1, '_id': ObjectId('...')} >>> db.client.address ('localhost', 27018) diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 780dec3938..07351dc9d5 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -16,30 +16,6 @@ command:: $ python -m pip install pymongo[tls] -Starting with PyMongo 3.11 this installs `PyOpenSSL -`_, `requests`_ -and `service_identity -`_ -for users of Python versions older than 2.7.9. PyOpenSSL supports SNI for these -old Python versions allowing applictions to connect to Altas free and shared -tier instances. - -Earlier versions of PyMongo require you to manually install the dependencies -listed below. - -Python 2.x -`````````` -The `ipaddress`_ module is required on all platforms. - -When using CPython < 2.7.9 or PyPy < 2.5.1: - -- On Windows, the `wincertstore`_ module is required. -- On all other platforms, the `certifi`_ module is required. - -.. _ipaddress: https://pypi.python.org/pypi/ipaddress -.. _wincertstore: https://pypi.python.org/pypi/wincertstore -.. _certifi: https://pypi.python.org/pypi/certifi - .. warning:: Industry best practices recommend, and some regulations require, the use of TLS 1.1 or newer. Though no application changes are required for PyMongo to make use of the newest protocols, some operating systems or @@ -128,8 +104,7 @@ Or, in the URI:: Specifying a certificate revocation list ........................................ -Python 2.7.9+ (pypy 2.5.1+) and 3.4+ provide support for certificate revocation -lists. The ``tlsCRLFile`` option takes a path to a CRL file. It can be passed +The ``tlsCRLFile`` option takes a path to a CRL file. It can be passed as a keyword argument:: >>> client = pymongo.MongoClient('example.com', @@ -161,9 +136,8 @@ the ``ssl_keyfile`` option:: ... tlsCertificateKeyFile='/path/to/client.pem', ... ssl_keyfile='/path/to/key.pem') -Python 2.7.9+ (pypy 2.5.1+) and 3.3+ support providing a password or passphrase -to decrypt encrypted private keys. Use the ``tlsCertificateKeyFilePassword`` -option:: +Python supports providing a password or passphrase to decrypt encrypted +private keys. Use the ``tlsCertificateKeyFilePassword`` option:: >>> client = pymongo.MongoClient('example.com', ... tls=True, diff --git a/doc/faq.rst b/doc/faq.rst index dc9973e27b..89d8e8a7ff 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -238,7 +238,7 @@ Therefore, Python dicts are not guaranteed to show keys in the order they are stored in BSON. Here, "a" is shown before "b": >>> print(collection.find_one()) - {u'_id': 1.0, u'subdocument': {u'a': 1.0, u'b': 1.0}} + {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} To preserve order when reading BSON, use the :class:`~bson.son.SON` class, which is a dict that remembers its key order. First, get a handle to the @@ -264,7 +264,7 @@ Now, documents and subdocuments in query results are represented with .. doctest:: key-order >>> print(collection_son.find_one()) - SON([(u'_id', 1.0), (u'subdocument', SON([(u'b', 1.0), (u'a', 1.0)]))]) + SON([('_id', 1.0), ('subdocument', SON([('b', 1.0), ('a', 1.0)]))]) The subdocument's actual storage layout is now visible: "b" is before "a". @@ -287,7 +287,7 @@ There are two solutions. First, you can match the subdocument field-by-field: >>> collection.find_one({'subdocument.a': 1.0, ... 'subdocument.b': 1.0}) - {u'_id': 1.0, u'subdocument': {u'a': 1.0, u'b': 1.0}} + {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} The query matches any subdocument with an "a" of 1.0 and a "b" of 1.0, regardless of the order you specify them in Python or the order they are stored @@ -298,7 +298,7 @@ The second solution is to use a :class:`~bson.son.SON` to specify the key order: >>> query = {'subdocument': SON([('b', 1.0), ('a', 1.0)])} >>> collection.find_one(query) - {u'_id': 1.0, u'subdocument': {u'a': 1.0, u'b': 1.0}} + {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} The key order you use when you create a :class:`~bson.son.SON` is preserved when it is serialized to BSON and used as a query. Thus you can create a diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 3e77ab1d8b..2ec6c44da8 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -143,7 +143,7 @@ of the collections in our database: .. doctest:: >>> db.list_collection_names() - [u'posts'] + ['posts'] Getting a Single Document With :meth:`~pymongo.collection.Collection.find_one` ------------------------------------------------------------------------------ @@ -159,11 +159,11 @@ document from the posts collection: >>> import pprint >>> pprint.pprint(posts.find_one()) - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} The result is a dictionary matching the one that we inserted previously. @@ -177,11 +177,11 @@ our results to a document with author "Mike" we do: .. doctest:: >>> pprint.pprint(posts.find_one({"author": "Mike"})) - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} If we try with a different author, like "Eliot", we'll get no result: @@ -201,11 +201,11 @@ We can also find a post by its ``_id``, which in our example is an ObjectId: >>> post_id ObjectId(...) >>> pprint.pprint(posts.find_one({"_id": post_id})) - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} Note that an ObjectId is not the same as its string representation: @@ -229,23 +229,6 @@ case to **convert the ObjectId from a string** before passing it to .. seealso:: :ref:`web-application-querying-by-objectid` -A Note On Unicode Strings -------------------------- -You probably noticed that the regular Python strings we stored earlier look -different when retrieved from the server (e.g. u'Mike' instead of 'Mike'). -A short explanation is in order. - -MongoDB stores data in `BSON format `_. BSON strings are -UTF-8 encoded so PyMongo must ensure that any strings it stores contain only -valid UTF-8 data. Regular strings () are validated and stored -unaltered. Unicode strings () are encoded UTF-8 first. The -reason our example string is represented in the Python shell as u'Mike' instead -of 'Mike' is that PyMongo decodes each BSON string to a Python unicode string, -not a regular str. - -`You can read more about Python unicode strings here -`_. - Bulk Inserts ------------ In order to make querying a little more interesting, let's insert a @@ -293,21 +276,21 @@ document in the ``posts`` collection: >>> for post in posts.find(): ... pprint.pprint(post) ... - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'bulk', u'insert'], - u'text': u'Another post!'} - {u'_id': ObjectId('...'), - u'author': u'Eliot', - u'date': datetime.datetime(...), - u'text': u'and pretty easy too!', - u'title': u'MongoDB is fun'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['bulk', 'insert'], + 'text': 'Another post!'} + {'_id': ObjectId('...'), + 'author': 'Eliot', + 'date': datetime.datetime(...), + 'text': 'and pretty easy too!', + 'title': 'MongoDB is fun'} Just like we did with :meth:`~pymongo.collection.Collection.find_one`, we can pass a document to :meth:`~pymongo.collection.Collection.find` @@ -319,16 +302,16 @@ author is "Mike": >>> for post in posts.find({"author": "Mike"}): ... pprint.pprint(post) ... - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'mongodb', u'python', u'pymongo'], - u'text': u'My first blog post!'} - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'bulk', u'insert'], - u'text': u'Another post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['mongodb', 'python', 'pymongo'], + 'text': 'My first blog post!'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['bulk', 'insert'], + 'text': 'Another post!'} Counting -------- @@ -362,16 +345,16 @@ than a certain date, but also sort the results by author: >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): ... pprint.pprint(post) ... - {u'_id': ObjectId('...'), - u'author': u'Eliot', - u'date': datetime.datetime(...), - u'text': u'and pretty easy too!', - u'title': u'MongoDB is fun'} - {u'_id': ObjectId('...'), - u'author': u'Mike', - u'date': datetime.datetime(...), - u'tags': [u'bulk', u'insert'], - u'text': u'Another post!'} + {'_id': ObjectId('...'), + 'author': 'Eliot', + 'date': datetime.datetime(...), + 'text': 'and pretty easy too!', + 'title': 'MongoDB is fun'} + {'_id': ObjectId('...'), + 'author': 'Mike', + 'date': datetime.datetime(...), + 'tags': ['bulk', 'insert'], + 'text': 'Another post!'} Here we use the special ``"$lt"`` operator to do a range query, and also call :meth:`~pymongo.cursor.Cursor.sort` to sort the results @@ -393,7 +376,7 @@ First, we'll need to create the index: >>> result = db.profiles.create_index([('user_id', pymongo.ASCENDING)], ... unique=True) >>> sorted(list(db.profiles.index_information())) - [u'_id_', u'user_id_1'] + ['_id_', 'user_id_1'] Notice that we have two indexes now: one is the index on ``_id`` that MongoDB creates automatically, and the other is the index on ``user_id`` we just diff --git a/gridfs/__init__.py b/gridfs/__init__.py index ab8a5a02bb..c12f93f1de 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -107,12 +107,11 @@ def put(self, data, **kwargs): finally: f.close() - `data` can be either an instance of :class:`str` (:class:`bytes` - in python 3) or a file-like object providing a :meth:`read` method. - If an `encoding` keyword argument is passed, `data` can also be a - :class:`unicode` (:class:`str` in python 3) instance, which will - be encoded as `encoding` before being written. Any keyword arguments - will be passed through to the created file - see + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the ``"_id"`` of the created file. diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index e49ca472af..5a48fee176 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -144,11 +144,8 @@ def __init__( - ``"chunkSize"`` or ``"chunk_size"``: size of each of the chunks, in bytes (default: 255 kb) - - ``"encoding"``: encoding used for this file. In Python 2, - any :class:`unicode` that is written to the file will be - converted to a :class:`str`. In Python 3, any :class:`str` - that is written to the file will be converted to - :class:`bytes`. + - ``"encoding"``: encoding used for this file. Any :class:`str` + that is written to the file will be converted to :class:`bytes`. :Parameters: - `root_collection`: root collection to write to @@ -345,15 +342,14 @@ def write(self, data): `data` can be either a string of bytes or a file-like object (implementing :meth:`read`). If the file has an :attr:`encoding` attribute, `data` can also be a - :class:`unicode` (:class:`str` in python 3) instance, which - will be encoded as :attr:`encoding` before being written. + :class:`str` instance, which will be encoded as + :attr:`encoding` before being written. Due to buffering, the data may not actually be written to the database until the :meth:`close` method is called. Raises :class:`ValueError` if this file is already closed. Raises :class:`TypeError` if `data` is not an instance of - :class:`str` (:class:`bytes` in python 3), a file-like object, - or an instance of :class:`unicode` (:class:`str` in python 3). + :class:`bytes`, a file-like object, or an instance of :class:`str`. Unicode data is only allowed if the file has an :attr:`encoding` attribute. diff --git a/pymongo/collection.py b/pymongo/collection.py index 5314b6e26e..97787f3d67 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -439,8 +439,8 @@ def bulk_write(self, requests, ordered=True, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')} - {u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} >>> # DeleteMany, UpdateOne, and UpdateMany are also available. ... >>> from pymongo import InsertOne, DeleteOne, ReplaceOne @@ -458,9 +458,9 @@ def bulk_write(self, requests, ordered=True, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')} - {u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')} - {u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} :Parameters: - `requests`: A list of write operations (see examples above). @@ -660,7 +660,7 @@ def insert_one(self, document, bypass_document_validation=False, >>> result.inserted_id ObjectId('54f112defba522406c9cc208') >>> db.test.find_one({'x': 1}) - {u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')} + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} :Parameters: - `document`: The document to insert. Must be a mutable mapping @@ -876,7 +876,7 @@ def replace_one(self, filter, replacement, upsert=False, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')} + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} >>> result = db.test.replace_one({'x': 1}, {'y': 1}) >>> result.matched_count 1 @@ -885,7 +885,7 @@ def replace_one(self, filter, replacement, upsert=False, >>> for doc in db.test.find({}): ... print(doc) ... - {u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')} + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} The *upsert* option can be used to insert a new document if a matching document does not exist. @@ -898,7 +898,7 @@ def replace_one(self, filter, replacement, upsert=False, >>> result.upserted_id ObjectId('54f11e5c8891e756a6e1abd4') >>> db.test.find_one({'x': 1}) - {u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')} + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} :Parameters: - `filter`: A query that matches the document to replace. @@ -955,9 +955,9 @@ def update_one(self, filter, update, upsert=False, >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) >>> result.matched_count 1 @@ -966,9 +966,9 @@ def update_one(self, filter, update, upsert=False, >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 4, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} :Parameters: - `filter`: A query that matches the document to update. @@ -1031,9 +1031,9 @@ def update_many(self, filter, update, upsert=False, array_filters=None, >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) >>> result.matched_count 3 @@ -1042,9 +1042,9 @@ def update_many(self, filter, update, upsert=False, array_filters=None, >>> for doc in db.test.find(): ... print(doc) ... - {u'x': 4, u'_id': 0} - {u'x': 4, u'_id': 1} - {u'x': 4, u'_id': 2} + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} :Parameters: - `filter`: A query that matches the documents to update. @@ -2123,10 +2123,10 @@ def index_information(self, session=None): like this: >>> db.test.create_index("x", unique=True) - u'x_1' + 'x_1' >>> db.test.index_information() - {u'_id_': {u'key': [(u'_id', 1)]}, - u'x_1': {u'unique': True, u'key': [(u'x', 1)]}} + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} :Parameters: - `session` (optional): a @@ -2800,7 +2800,7 @@ def find_one_and_delete(self, filter, >>> db.test.count_documents({'x': 1}) 2 >>> db.test.find_one_and_delete({'x': 1}) - {u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} >>> db.test.count_documents({'x': 1}) 1 @@ -2809,17 +2809,17 @@ def find_one_and_delete(self, filter, >>> for doc in db.test.find({'x': 1}): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> db.test.find_one_and_delete( ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 2} The *projection* option can be used to limit the fields returned. >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) - {u'x': 1} + {'x': 1} :Parameters: - `filter`: A query that matches the document to delete. @@ -2877,17 +2877,17 @@ def find_one_and_replace(self, filter, replacement, >>> for doc in db.test.find({}): ... print(doc) ... - {u'x': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) - {u'x': 1, u'_id': 0} + {'x': 1, '_id': 0} >>> for doc in db.test.find({}): ... print(doc) ... - {u'y': 1, u'_id': 0} - {u'x': 1, u'_id': 1} - {u'x': 1, u'_id': 2} + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} :Parameters: - `filter`: A query that matches the document to replace. @@ -2953,7 +2953,7 @@ def find_one_and_update(self, filter, update, >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) - {u'_id': 665, u'done': False, u'count': 25}} + {'_id': 665, 'done': False, 'count': 25}} Returns ``None`` if no document matches the filter. @@ -2971,7 +2971,7 @@ def find_one_and_update(self, filter, update, ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) - {u'_id': u'userid', u'seq': 1} + {'_id': 'userid', 'seq': 1} You can limit the fields returned with the *projection* option. @@ -2980,7 +2980,7 @@ def find_one_and_update(self, filter, update, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... return_document=ReturnDocument.AFTER) - {u'seq': 2} + {'seq': 2} The *upsert* option can be used to create the document if it doesn't already exist. @@ -2993,20 +2993,20 @@ def find_one_and_update(self, filter, update, ... projection={'seq': True, '_id': False}, ... upsert=True, ... return_document=ReturnDocument.AFTER) - {u'seq': 1} + {'seq': 1} If multiple documents match *filter*, a *sort* can be applied. >>> for doc in db.test.find({'done': True}): ... print(doc) ... - {u'_id': 665, u'done': True, u'result': {u'count': 26}} - {u'_id': 701, u'done': True, u'result': {u'count': 17}} + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) - {u'_id': 701, u'done': True, u'result': {u'count': 17}} + {'_id': 701, 'done': True, 'result': {'count': 17}} :Parameters: - `filter`: A query that matches the document to update. diff --git a/pymongo/common.py b/pymongo/common.py index 9e61c328c5..a36cf6d6b7 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -234,8 +234,7 @@ def validate_non_negative_integer_or_none(option, value): def validate_string(option, value): - """Validates that 'value' is an instance of `basestring` for Python 2 - or `str` for Python 3. + """Validates that 'value' is an instance of `str`. """ if isinstance(value, str): return value diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index b6662f22f8..2dd54e7f78 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -142,10 +142,6 @@ def decompress(data, compressor_id): # https://github.com/andrix/python-snappy/issues/65 # This only matters when data is a memoryview since # id(bytes(data)) == id(data) when data is a bytes. - # NOTE: bytes(memoryview) returns the memoryview repr - # in Python 2.7. The right thing to do in 2.7 is call - # memoryview.tobytes(), but we currently only use - # memoryview in Python 3.x. return snappy.uncompress(bytes(data)) elif compressor_id == ZlibContext.compressor_id: return zlib.decompress(data) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index f3aa2aa283..a1edf45dce 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -364,12 +364,12 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, These are the Azure Active Directory credentials used to generate Azure Key Vault messages. - `gcp`: Map with "email" as a string and "privateKey" - as `bytes` or a base64 encoded string (unicode on Python 2). + as `bytes` or a base64 encoded string. Additionally, "endpoint" may also be specified as a string (defaults to 'oauth2.googleapis.com'). These are the credentials used to generate Google Cloud KMS messages. - `local`: Map with "key" as `bytes` (96 bytes in length) or - a base64 encoded string (unicode on Python 2) which decodes + a base64 encoded string which decodes to 96 bytes. "key" is the master key used to encrypt/decrypt data keys. This key should be generated and stored as securely as possible. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index e45a3f94d5..da3a0f1913 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -65,12 +65,12 @@ def __init__(self, kms_providers, key_vault_namespace, These are the Azure Active Directory credentials used to generate Azure Key Vault messages. - `gcp`: Map with "email" as a string and "privateKey" - as `bytes` or a base64 encoded string (unicode on Python 2). + as `bytes` or a base64 encoded string. Additionally, "endpoint" may also be specified as a string (defaults to 'oauth2.googleapis.com'). These are the credentials used to generate Google Cloud KMS messages. - `local`: Map with "key" as `bytes` (96 bytes in length) or - a base64 encoded string (unicode on Python 2) which decodes + a base64 encoded string which decodes to 96 bytes. "key" is the master key used to encrypt/decrypt data keys. This key should be generated and stored as securely as possible. diff --git a/pymongo/message.py b/pymongo/message.py index eb2a5fb7e7..25de5a72ff 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1574,8 +1574,6 @@ def unpack(cls, msg): # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) - # Convert Python 3 memoryview to bytes. Note we should call - # memoryview.tobytes() if we start using memoryview in Python 2.7. documents = bytes(msg[20:]) return cls(flags, cursor_id, number_returned, documents) @@ -1649,8 +1647,6 @@ def unpack(cls, msg): if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") - # Convert Python 3 memoryview to bytes. Note we should call - # memoryview.tobytes() if we start using memoryview in Python 2.7. payload_document = bytes(msg[5:]) return cls(flags, payload_document) @@ -1699,17 +1695,17 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, # listIndexes if 'cursor' in cmd: result = { - u'cursor': { - u'firstBatch': docs, - u'id': reply.cursor_id, - u'ns': u'%s.%s' % (db, coll) + 'cursor': { + 'firstBatch': docs, + 'id': reply.cursor_id, + 'ns': '%s.%s' % (db, coll) }, - u'ok': 1.0 + 'ok': 1.0 } # fsyncUnlock, currentOp else: result = docs[0] if docs else {} - result[u'ok'] = 1.0 + result['ok'] = 1.0 if publish: duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ede6454c15..a9fa282289 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -26,9 +26,9 @@ >>> from pymongo import MongoClient >>> c = MongoClient() >>> c.test_database - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), u'test_database') + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') >>> c['test-database'] - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), u'test-database') + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') """ import contextlib @@ -118,12 +118,7 @@ def __init__( passwords reserved characters like ':', '/', '+' and '@' must be percent encoded following RFC 2396:: - try: - # Python 3.x - from urllib.parse import quote_plus - except ImportError: - # Python 2.x - from urllib import quote_plus + from urllib.parse import quote_plus uri = "mongodb://%s:%s@%s" % ( quote_plus(user), quote_plus(password), host) diff --git a/pymongo/pool.py b/pymongo/pool.py index 472a9bf042..e92df9b594 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -225,7 +225,7 @@ def _set_keepalive_times(sock): # while the main thread holds the import lock, getaddrinfo deadlocks trying # to import the IDNA codec. Import it here, where presumably we're on the # main thread, to avoid the deadlock. See PYTHON-607. -u'foo'.encode('idna') +'foo'.encode('idna') def _raise_connection_failure(address, error, msg_prefix=None): diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 3a4284feb8..08a780c055 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -49,8 +49,8 @@ def saslprep(data, prohibit_unassigned_code_points=True): :Parameters: - `data`: The string to SASLprep. Unicode strings - (python 2.x unicode, 3.x str) are supported. Byte strings - (python 2.x str, 3.x bytes) are ignored. + (:class:`str`) are supported. Byte strings + (:class:`bytes`) are ignored. - `prohibit_unassigned_code_points`: True / False. RFC 3454 and RFCs for various SASL mechanisms distinguish between `queries` (unassigned code points allowed) and diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index 01f13065c2..cc18450ad8 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -72,8 +72,6 @@ def topology_type(self): def __bool__(self): return bool(self.server_descriptions) - __nonzero__ = __bool__ # Python 2. - def __getitem__(self, item): return self.server_descriptions[item] diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 6ca28a58bf..5bcbc5d9c3 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -19,114 +19,26 @@ # PROTOCOL_TLS_CLIENT is Python 3.6+ PROTOCOL_SSLv23 = getattr(_ssl, "PROTOCOL_TLS_CLIENT", _ssl.PROTOCOL_SSLv23) -# Python 2.7.9+ OP_NO_SSLv2 = getattr(_ssl, "OP_NO_SSLv2", 0) -# Python 2.7.9+ OP_NO_SSLv3 = getattr(_ssl, "OP_NO_SSLv3", 0) -# Python 2.7.9+, OpenSSL 1.0.0+ OP_NO_COMPRESSION = getattr(_ssl, "OP_NO_COMPRESSION", 0) # Python 3.7+, OpenSSL 1.1.0h+ OP_NO_RENEGOTIATION = getattr(_ssl, "OP_NO_RENEGOTIATION", 0) -# Python 2.7.9+ HAS_SNI = getattr(_ssl, "HAS_SNI", False) IS_PYOPENSSL = False # Base Exception class SSLError = _ssl.SSLError -try: - # CPython 2.7.9+ - from ssl import SSLContext - if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): - from ssl import VERIFY_CRL_CHECK_LEAF - # Python 3.7 uses OpenSSL's hostname matching implementation - # making it the obvious version to start using SSLConext.check_hostname. - # Python 3.6 might have been a good version, but it suffers - # from https://bugs.python.org/issue32185. - # We'll use our bundled match_hostname for older Python - # versions, which also supports IP address matching - # with Python < 3.5. - CHECK_HOSTNAME_SAFE = _sys.version_info[:2] >= (3, 7) -except ImportError: - from pymongo.errors import ConfigurationError - - class SSLContext(object): - """A fake SSLContext. - - This implements an API similar to ssl.SSLContext from python 3.2 - but does not implement methods or properties that would be - incompatible with ssl.wrap_socket from python 2.7 < 2.7.9. - - You must pass protocol which must be one of the PROTOCOL_* constants - defined in the ssl module. ssl.PROTOCOL_SSLv23 is recommended for maximum - interoperability. - """ - - __slots__ = ('_cafile', '_certfile', - '_keyfile', '_protocol', '_verify_mode') - - def __init__(self, protocol): - self._cafile = None - self._certfile = None - self._keyfile = None - self._protocol = protocol - self._verify_mode = _ssl.CERT_NONE - - @property - def protocol(self): - """The protocol version chosen when constructing the context. - This attribute is read-only. - """ - return self._protocol - - def __get_verify_mode(self): - """Whether to try to verify other peers' certificates and how to - behave if verification fails. This attribute must be one of - ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. - """ - return self._verify_mode - - def __set_verify_mode(self, value): - """Setter for verify_mode.""" - self._verify_mode = value - - verify_mode = property(__get_verify_mode, __set_verify_mode) - - def load_cert_chain(self, certfile, keyfile=None, password=None): - """Load a private key and the corresponding certificate. The certfile - string must be the path to a single file in PEM format containing the - certificate as well as any number of CA certificates needed to - establish the certificate's authenticity. The keyfile string, if - present, must point to a file containing the private key. Otherwise - the private key will be taken from certfile as well. - """ - if password is not None: - raise ConfigurationError( - "Support for ssl_pem_passphrase requires " - "python 2.7.9+ (pypy 2.5.1+), python 3 or " - "PyOpenSSL") - self._certfile = certfile - self._keyfile = keyfile - - def load_verify_locations(self, cafile=None, dummy=None): - """Load a set of "certification authority"(CA) certificates used to - validate other peers' certificates when `~verify_mode` is other than - ssl.CERT_NONE. - """ - self._cafile = cafile - - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, dummy=None): - """Wrap an existing Python socket sock and return an ssl.SSLSocket - object. - """ - return _ssl.wrap_socket(sock, keyfile=self._keyfile, - certfile=self._certfile, - server_side=server_side, - cert_reqs=self._verify_mode, - ssl_version=self._protocol, - ca_certs=self._cafile, - do_handshake_on_connect=do_handshake_on_connect, - suppress_ragged_eofs=suppress_ragged_eofs) +from ssl import SSLContext +if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): + from ssl import VERIFY_CRL_CHECK_LEAF +# Python 3.7 uses OpenSSL's hostname matching implementation +# making it the obvious version to start using SSLConext.check_hostname. +# Python 3.6 might have been a good version, but it suffers +# from https://bugs.python.org/issue32185. +# We'll use our bundled match_hostname for older Python +# versions, which also supports IP address matching +# with Python < 3.5. +CHECK_HOSTNAME_SAFE = _sys.version_info[:2] >= (3, 7) diff --git a/setup.py b/setup.py index dc54b07046..18ac6ad541 100755 --- a/setup.py +++ b/setup.py @@ -145,46 +145,6 @@ def run(self): raise RuntimeError( "You must install Sphinx to build or test the documentation.") - # TODO: Convert all the docs to Python 3 and delete all this. - import doctest - from doctest import OutputChecker as _OutputChecker - - # Match u or U (possibly followed by r or R), removing it. - # r/R can follow u/U but not precede it. Don't match the - # single character string 'u' or 'U'. - _u_literal_re = re.compile( - r"(\W|^)(? Date: Thu, 21 Jan 2021 19:57:06 -0800 Subject: [PATCH 0288/2111] PYTHON-1974 Drop support for Python 3.4 --- .evergreen/build-mac.sh | 2 +- .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 1 - .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 53 ++++++++------------- .evergreen/test-encryption-requirements.txt | 4 +- .evergreen/utils.sh | 5 +- .travis.yml | 1 - CONTRIBUTING.rst | 2 +- README.rst | 2 +- doc/faq.rst | 2 +- doc/installation.rst | 20 ++------ doc/python3.rst | 2 +- pymongo/pool.py | 14 +++--- pymongo/ssl_support.py | 6 +-- setup.py | 8 ++-- test/test_bson.py | 16 +++---- test/test_client.py | 3 -- 18 files changed, 51 insertions(+), 94 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index bbed5c2c59..fd6d571ef1 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.4 3.5 3.6 3.7 3.8 3.9; do +for VERSION in 3.5 3.6 3.7 3.8 3.9; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 0c57b43700..dda1089f0d 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp34|cp35|cp36|cp37|cp38|cp39) ]]; then + if [[ ! $PYTHON =~ (cp35|cp36|cp37|cp38|cp39) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 93dcca43f0..8c8c101f86 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -22,7 +22,6 @@ ls dist # Check for any unexpected files. unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp34*' -or \ -iname '*cp35*' -or \ -iname '*cp36*' -or \ -iname '*cp37*' -or \ diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 577fddb8f8..ca1c198ba8 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 34 35 36 37 38 39; do +for VERSION in 35 36 37 38 39; do _pythons=(C:/Python/Python${VERSION}/python.exe \ C:/Python/32/Python${VERSION}/python.exe) for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d138ec6283..9cbcfd5584 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1582,10 +1582,6 @@ axes: display_name: "Archlinux" run_on: archlinux-test batchtime: 10080 # 7 days - - id: debian81 - display_name: "Debian 8.1" - run_on: debian81-test - batchtime: 10080 # 7 days - id: debian92 display_name: "Debian 9.2" run_on: debian92-test @@ -1607,6 +1603,9 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-62-64-bit/master/latest/libmongocrypt.tar.gz + # Note that rhel70 isn't currently used since it doesn't + # have a system Python 3. We'll switch to rhel70 as our main test + # system (using /opt/python) in a future change. - id: rhel70 display_name: "RHEL 7.0" run_on: rhel70-small @@ -1623,12 +1622,6 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel72-zseries-test/master/latest/libmongocrypt.tar.gz - - id: suse12-x86-64-test - display_name: "SUSE 12 (x86_64)" - run_on: suse12-sp5-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/suse12-64/master/latest/libmongocrypt.tar.gz - id: ubuntu-16.04 display_name: "Ubuntu 16.04" run_on: ubuntu1604-test @@ -1779,10 +1772,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "3.4" - display_name: "Python 3.4" - variables: - PYTHON_BINARY: "/opt/python/3.4/bin/python3" - id: "3.5" display_name: "Python 3.5" batchtime: 10080 # 7 days @@ -1820,10 +1809,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "3.4" - display_name: "Python 3.4" - variables: - PYTHON_BINARY: "C:/python/Python34/python.exe" - id: "3.5" display_name: "Python 3.5" variables: @@ -1848,10 +1833,6 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "3.4" - display_name: "32-bit Python 3.4" - variables: - PYTHON_BINARY: "C:/python/32/Python34/python.exe" - id: "3.5" display_name: "32-bit Python 3.5" variables: @@ -2047,7 +2028,6 @@ buildvariants: platform: # OSes that support versions of MongoDB>=3.2 with SSL. - ubuntu-16.04 - - suse12-x86-64-test - rhel71-power8-test auth-ssl: "*" display_name: "${platform} ${auth-ssl}" @@ -2106,7 +2086,6 @@ buildvariants: matrix_spec: platform: # OSes that support versions of MongoDB>=3.4 <4.2 with SSL. - - debian81 - ubuntu1604-power8-test - ubuntu1604-arm64-small auth-ssl: "*" @@ -2147,7 +2126,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: &rhel62-pythons ["3.4", "3.5", "3.6", "pypy3.5", "pypy3.6"] + python-version: &rhel62-pythons ["3.5", "3.6", "pypy3.5", "pypy3.6"] auth: "*" ssl: "*" coverage: "*" @@ -2163,14 +2142,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: platform: ubuntu-16.04 - python-version: ["3.4", "3.5", "3.6", "3.7", "3.8", "3.9"] + python-version: ["3.5", "3.6", "3.7", "3.8", "3.9"] auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-16.04 - python-version: ["3.4", "3.5", "3.6", "3.8", "3.9"] + python-version: ["3.5", "3.6", "3.8", "3.9"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2218,7 +2197,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["3.4", "3.5", "3.6"] + python-version: ["3.5", "3.6"] auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2273,7 +2252,7 @@ buildvariants: matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "pypy3.5", "pypy3.6"] + python-version: ["3.5", "3.6", "3.7", "3.8", "3.9", "pypy3.5", "pypy3.6"] c-extensions: "*" compression: "*" exclude_spec: @@ -2450,7 +2429,13 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: rhel62 - python-version: ["3.4", "3.6"] + # The toolchain doesn't currently include mod-wsgi + # built for CPython 3.5 or 3.8, mod-wsgi doesn't yet + # claim to support 3.9. Python 3.7+ won't build on rhel6 + # and we need to do some work to migrate mod-wsgi testing + # to a different OS. For now we're stuck just testing with + # Python 3.6. + python-version: ["3.6"] mod-wsgi-version: "*" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: @@ -2468,7 +2453,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: platform: rhel62 - python-version: ["3.4"] + python-version: ["3.5"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" @@ -2510,7 +2495,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-16.04 - python-version: ["3.4", "3.8"] + python-version: ["3.5", "3.9"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2532,7 +2517,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 - python-version: ["3.4", "3.8", "3.9"] + python-version: ["3.5", "3.8", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2556,7 +2541,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.4", "3.9"] + python-version-windows: ["3.5", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" diff --git a/.evergreen/test-encryption-requirements.txt b/.evergreen/test-encryption-requirements.txt index 3b66938bb8..b5a752b6b8 100644 --- a/.evergreen/test-encryption-requirements.txt +++ b/.evergreen/test-encryption-requirements.txt @@ -1,4 +1,2 @@ -# cffi==1.14.3 was the last installable release on RHEL 6.2 with Python 3.4 -cffi==1.14.3;python_version=="3.4" -cffi>=1.12.0,<2;python_version!="3.4" +cffi>=1.12.0,<2 cryptography>=2,<4 diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 4ffa50ac13..0d741fb59d 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -25,10 +25,7 @@ createvirtualenv () { # Upgrade to the latest versions of pip setuptools wheel so that # pip can always download the latest cryptography+cffi wheels. PYTHON_VERSION=$(python -c 'import sys;print("%s.%s" % sys.version_info[:2])') - if [[ $PYTHON_VERSION == "3.4" ]]; then - # pip 19.2 dropped support for Python 3.4. - python -m pip install --upgrade 'pip<19.2' - elif [[ $PYTHON_VERSION == "3.5" ]]; then + if [[ $PYTHON_VERSION == "3.5" ]]; then # pip 21 will drop support for 3.5. python -m pip install --upgrade 'pip<21' else diff --git a/.travis.yml b/.travis.yml index 585f3c28df..b13397eacb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: python python: - - 3.4 - 3.5 - 3.6 - 3.7 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index b191debceb..b9a9856e30 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,7 +19,7 @@ that might not be of interest or that has already been addressed. Supported Interpreters ---------------------- -PyMongo supports CPython 3.4+ and PyPy3.5+. Language +PyMongo supports CPython 3.5+ and PyPy3.5+. Language features not supported by all interpreters can not be used. Style Guide diff --git a/README.rst b/README.rst index 20c3b56e78..b6db345be2 100644 --- a/README.rst +++ b/README.rst @@ -89,7 +89,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 3.4+ and PyPy3.5+. +PyMongo supports CPython 3.5+ and PyPy3.5+. Optional dependencies: diff --git a/doc/faq.rst b/doc/faq.rst index 89d8e8a7ff..99d8f1db04 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -134,7 +134,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.4+ and PyPy3.5+. See the :doc:`python3` for details. +PyMongo supports CPython 3.5+ and PyPy3.5+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index ea437aa4bd..aa17087b7c 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.4+ and PyPy3.5+. +PyMongo supports CPython 3.5+ and PyPy3.5+. Optional dependencies: @@ -133,7 +133,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.4+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.5+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with @@ -163,20 +163,10 @@ Installing from source on Windows If you want to install PyMongo with C extensions from source the following requirements apply to both CPython and ActiveState's ActivePython: -64-bit Windows -~~~~~~~~~~~~~~ +Windows +~~~~~~~ -For Python 3.5 and newer install Visual Studio 2015. For Python 3.4 -install Visual Studio 2010. You must use the full version of Visual Studio -2010 as Visual C++ Express does not provide 64-bit compilers. Make sure that -you check the "x64 Compilers and Tools" option under Visual C++. - -32-bit Windows -~~~~~~~~~~~~~~ - -For Python 3.5 and newer install Visual Studio 2015. - -For Python 3.4 install Visual C++ 2010 Express. +Install Visual Studio 2015+. .. _install-no-c: diff --git a/doc/python3.rst b/doc/python3.rst index 89b0afca57..ce45ee499e 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -6,7 +6,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.4+ and PyPy3.5+. +PyMongo supports CPython 3.5+ and PyPy3.5+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- diff --git a/pymongo/pool.py b/pymongo/pool.py index e92df9b594..6dae7a644a 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -151,15 +151,13 @@ def _set_keepalive_times(sock): ]) if sys.platform.startswith('linux'): - # platform.linux_distribution was deprecated in Python 3.5. - if sys.version_info[:2] < (3, 5): - # Distro name and version (e.g. Ubuntu 16.04 xenial) - _name = ' '.join([part for part in - platform.linux_distribution() if part]) - else: - _name = platform.system() + # platform.linux_distribution was deprecated in Python 3.5 + # and removed in Python 3.8. Starting in Python 3.5 it + # raises DeprecationWarning + # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 + _name = platform.system() _METADATA['os'] = SON([ - ('type', platform.system()), + ('type', _name), ('name', _name), ('architecture', platform.machine()), # Kernel version (e.g. 4.4.0-17-generic). diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 91cafa6d6f..d8e55a1f7c 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -132,17 +132,13 @@ def get_ssl_context(*args): if _ssl.IS_PYOPENSSL: raise ConfigurationError( "ssl_crlfile cannot be used with PyOpenSSL") - if not hasattr(ctx, "verify_flags"): - raise ConfigurationError( - "Support for ssl_crlfile requires " - "python 2.7.9+ (pypy 2.5.1+) or 3.4+") # Match the server's behavior. ctx.verify_flags = getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) elif cert_reqs != CERT_NONE: - # CPython 3.4+ ssl module only, doesn't exist in PyOpenSSL + # CPython ssl module only, doesn't exist in PyOpenSSL if hasattr(ctx, "load_default_certs"): ctx.load_default_certs() # Always useless on Windows. diff --git a/setup.py b/setup.py index 18ac6ad541..2c2b9e53c4 100755 --- a/setup.py +++ b/setup.py @@ -5,8 +5,8 @@ import warnings -if sys.version_info[:2] < (3, 4): - raise RuntimeError("Python version >= 3.4 required.") +if sys.version_info[:2] < (3, 5): + raise RuntimeError("Python version >= 3.5 required.") # Hack to silence atexit traceback in some Python versions @@ -322,7 +322,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=3.4", + python_requires=">=3.5", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", @@ -331,7 +331,7 @@ def build_extension(self, ext): "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", diff --git a/test/test_bson.py b/test/test_bson.py index a28a0d67f4..7c14c625ce 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -594,16 +594,14 @@ def test_dst(self): d = {"x": datetime.datetime(1993, 4, 4, 2)} self.assertEqual(d, decode(encode(d))) + @unittest.skip('Disabled due to http://bugs.python.org/issue25222') def test_bad_encode(self): - # Work around what seems like a regression in python 3.5.0. - # See http://bugs.python.org/issue25222 - if sys.version_info[:2] < (3, 5): - evil_list = {'a': []} - evil_list['a'].append(evil_list) - evil_dict = {} - evil_dict['a'] = evil_dict - for evil_data in [evil_dict, evil_list]: - self.assertRaises(Exception, encode, evil_data) + evil_list = {'a': []} + evil_list['a'].append(evil_list) + evil_dict = {} + evil_dict['a'] = evil_dict + for evil_data in [evil_dict, evil_list]: + self.assertRaises(Exception, encode, evil_data) def test_overflow(self): self.assertTrue(encode({"x": 9223372036854775807})) diff --git a/test/test_client.py b/test/test_client.py index 96e17e3e6d..ad628a224f 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -280,9 +280,6 @@ def test_read_preference(self): readpreference=ReadPreference.NEAREST.mongos_mode) self.assertEqual(c.read_preference, ReadPreference.NEAREST) - @unittest.skipIf( - sys.version_info[0] == 3 and sys.version_info[1] == 4, - "PYTHON-2442: workaround namedtuple._asdict() bug on Python 3.4") def test_metadata(self): metadata = copy.deepcopy(_METADATA) metadata['application'] = {'name': 'foobar'} From e01d9a37e73a2054fd7cdcd75246debd553c3f6e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 22 Jan 2021 17:11:15 -0800 Subject: [PATCH 0289/2111] PYTHON-1320 Remove legacy CRUD methods (#556) Remove save, insert, update, remove, and find_and_modify. Remove tools/benchmark.py --- bson/regex.py | 2 +- doc/api/pymongo/collection.rst | 5 - doc/changelog.rst | 5 + doc/compatibility-policy.rst | 2 +- doc/faq.rst | 7 +- doc/migrate-to-pymongo3.rst | 4 +- doc/migrate-to-pymongo4.rst | 105 +++- pymongo/collection.py | 239 +------- pymongo/helpers.py | 11 - test/test_bulk.py | 4 +- test/test_collation.py | 27 - test/test_collection.py | 133 +++-- test/test_custom_types.py | 18 +- test/test_legacy_api.py | 1009 -------------------------------- test/test_monitoring.py | 310 ++-------- test/test_retryable_writes.py | 46 +- test/test_write_concern.py | 7 + tools/benchmark.py | 165 ------ 18 files changed, 273 insertions(+), 1826 deletions(-) delete mode 100644 tools/benchmark.py diff --git a/bson/regex.py b/bson/regex.py index 0624f9ab34..3a9042500b 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -53,7 +53,7 @@ def from_native(cls, regex): >>> pattern = re.compile('.*') >>> regex = Regex.from_native(pattern) >>> regex.flags ^= re.UNICODE - >>> db.collection.insert({'pattern': regex}) + >>> db.collection.insert_one({'pattern': regex}) :Parameters: - `regex`: A regular expression object from ``re.compile()``. diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 98df4432e3..ada01352e9 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -71,8 +71,3 @@ .. automethod:: initialize_ordered_bulk_op .. automethod:: group .. automethod:: count - .. automethod:: insert(doc_or_docs, manipulate=True, check_keys=True, continue_on_error=False, **kwargs) - .. automethod:: save(to_save, manipulate=True, check_keys=True, **kwargs) - .. automethod:: update(spec, document, upsert=False, manipulate=False, multi=False, check_keys=True, **kwargs) - .. automethod:: remove(spec_or_id=None, multi=True, **kwargs) - .. automethod:: find_and_modify diff --git a/doc/changelog.rst b/doc/changelog.rst index 248372e15a..db50129f31 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -30,6 +30,11 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :meth:`pymongo.collection.Collection.ensure_index`. - Removed :meth:`pymongo.collection.Collection.reindex`. +- Removed :meth:`pymongo.collection.Collection.save` +- Removed :meth:`pymongo.collection.Collection.insert` +- Removed :meth:`pymongo.collection.Collection.update` +- Removed :meth:`pymongo.collection.Collection.remove` +- Removed :meth:`pymongo.collection.Collection.find_and_modify` - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`pymongo.cursor.Cursor.close` instead. - Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst index ed22c97351..a20b9681eb 100644 --- a/doc/compatibility-policy.rst +++ b/doc/compatibility-policy.rst @@ -24,7 +24,7 @@ warning: .. code-block:: python - # "insert.py" + # "insert.py" (with PyMongo 3.X) from pymongo import MongoClient client = MongoClient() diff --git a/doc/faq.rst b/doc/faq.rst index 99d8f1db04..5efb380796 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -202,6 +202,9 @@ documents that already have an ``_id`` field, added by your application. Key order in subdocuments -- why does my query work in the shell but not PyMongo? --------------------------------------------------------------------------------- +.. + Note: We should rework this section now that Python 3.6+ has ordered dict. + .. testsetup:: key-order from bson.son import SON @@ -220,9 +223,9 @@ is displayed: .. code-block:: javascript > // mongo shell. - > db.collection.insert( { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } ) + > db.collection.insertOne( { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } ) WriteResult({ "nInserted" : 1 }) - > db.collection.find() + > db.collection.findOne() { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } PyMongo represents BSON documents as Python dicts by default, and the order diff --git a/doc/migrate-to-pymongo3.rst b/doc/migrate-to-pymongo3.rst index bb396ddee0..9b30fc36f0 100644 --- a/doc/migrate-to-pymongo3.rst +++ b/doc/migrate-to-pymongo3.rst @@ -340,14 +340,14 @@ this:: >>> oid = collection.insert({"a": 2}, w="majority") -can be changed to this with PyMongo 2.9 or later: +can be changed to this with PyMongo 3 or later: .. doctest:: >>> from pymongo import WriteConcern >>> coll2 = collection.with_options( ... write_concern=WriteConcern(w="majority")) - >>> oid = coll2.insert({"a": 2}) + >>> oid = coll2.insert_one({"a": 2}) .. seealso:: :meth:`~pymongo.database.Database.get_collection` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 68788c05b1..cf05114ca8 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -140,6 +140,103 @@ can be changed to this:: Collection ---------- +Collection.insert is removed +............................ + +Removed :meth:`pymongo.collection.Collection.insert`. Use +:meth:`~pymongo.collection.Collection.insert_one` or +:meth:`~pymongo.collection.Collection.insert_many` instead. + +Code like this:: + + collection.insert({'doc': 1}) + collection.insert([{'doc': 2}, {'doc': 3}]) + +Can be changed to this:: + + collection.insert_one({'my': 'document'}) + collection.insert_many([{'doc': 2}, {'doc': 3}]) + +Collection.save is removed +.......................... + +Removed :meth:`pymongo.collection.Collection.save`. Applications will +get better performance using :meth:`~pymongo.collection.Collection.insert_one` +to insert a new document and :meth:`~pymongo.collection.Collection.update_one` +to update an existing document. Code like this:: + + doc = collection.find_one({"_id": "some id"}) + doc["some field"] = + db.collection.save(doc) + +Can be changed to this:: + + result = collection.update_one({"_id": "some id"}, {"$set": {"some field": }}) + +If performance is not a concern and refactoring is untenable, ``save`` can be +implemented like so:: + + def save(doc): + if '_id' in doc: + collection.replace_one({'_id': doc['_id']}, doc, upsert=True) + return doc['_id'] + else: + res = collection.insert_one(doc) + return res.inserted_id + +Collection.update is removed +............................ + +Removed :meth:`pymongo.collection.Collection.update`. Use +:meth:`~pymongo.collection.Collection.update_one` +to update a single document or +:meth:`~pymongo.collection.Collection.update_many` to update multiple +documents. Code like this:: + + collection.update({}, {'$set': {'a': 1}}) + collection.update({}, {'$set': {'b': 1}}, multi=True) + +Can be changed to this:: + + collection.update_one({}, {'$set': {'a': 1}}) + collection.update_many({}, {'$set': {'b': 1}}) + +Collection.remove is removed +............................ + +Removed :meth:`pymongo.collection.Collection.remove`. Use +:meth:`~pymongo.collection.Collection.delete_one` +to delete a single document or +:meth:`~pymongo.collection.Collection.delete_many` to delete multiple +documents. Code like this:: + + collection.remove({'a': 1}, multi=False) + collection.remove({'b': 1}) + +Can be changed to this:: + + collection.delete_one({'a': 1}) + collection.delete_many({'b': 1}) + +Collection.find_and_modify is removed +..................................... + +Removed :meth:`pymongo.collection.Collection.find_and_modify`. Use +:meth:`~pymongo.collection.Collection.find_one_and_update`, +:meth:`~pymongo.collection.Collection.find_one_and_replace`, or +:meth:`~pymongo.collection.Collection.find_one_and_delete` instead. +Code like this:: + + updated_doc = collection.find_and_modify({'a': 1}, {'$set': {'b': 1}}) + replaced_doc = collection.find_and_modify({'b': 1}, {'c': 1}) + deleted_doc = collection.find_and_modify({'c': 1}, remove=True) + +Can be changed to this:: + + updated_doc = collection.find_one_and_update({'a': 1}, {'$set': {'b': 1}}) + replaced_doc = collection.find_one_and_replace({'b': 1}, {'c': 1}) + deleted_doc = collection.find_one_and_delete({'c': 1}) + Collection.ensure_index is removed .................................. @@ -152,16 +249,16 @@ to :meth:`~pymongo.collection.Collection.create_index` or :meth:`~pymongo.collection.Collection.create_indexes`. Code like this:: def persist(self, document): - my_collection.ensure_index('a', unique=True) - my_collection.insert_one(document) + collection.ensure_index('a', unique=True) + collection.insert_one(document) Can be changed to this:: def persist(self, document): if not self.created_index: - my_collection.create_index('a', unique=True) + collection.create_index('a', unique=True) self.created_index = True - my_collection.insert_one(document) + collection.insert_one(document) Collection.reindex is removed ............................. diff --git a/pymongo/collection.py b/pymongo/collection.py index 97787f3d67..d600f27079 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -31,17 +31,14 @@ _CollectionRawAggregationCommand) from pymongo.bulk import BulkOperationBuilder, _Bulk from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.common import ORDERED_TYPES from pymongo.collation import validate_collation_or_none from pymongo.change_stream import CollectionChangeStream from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import (BulkWriteError, - ConfigurationError, +from pymongo.errors import (ConfigurationError, InvalidName, InvalidOperation, OperationFailure) -from pymongo.helpers import (_check_write_command_response, - _raise_last_error) +from pymongo.helpers import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS from pymongo.operations import IndexModel from pymongo.read_preferences import ReadPreference @@ -604,52 +601,6 @@ def _insert_command(session, sock_info, retryable_write): if not isinstance(doc, RawBSONDocument): return doc.get('_id') - def _insert(self, docs, ordered=True, check_keys=True, - manipulate=False, write_concern=None, op_id=None, - bypass_doc_val=False, session=None): - """Internal insert helper.""" - if isinstance(docs, abc.Mapping): - return self._insert_one( - docs, ordered, check_keys, manipulate, write_concern, op_id, - bypass_doc_val, session) - - ids = [] - - if manipulate: - def gen(): - """Generator that applies SON manipulators to each document - and adds _id if necessary. - """ - _db = self.__database - for doc in docs: - # Apply user-configured SON manipulators. This order of - # operations is required for backwards compatibility, - # see PYTHON-709. - doc = _db._apply_incoming_manipulators(doc, self) - if not (isinstance(doc, RawBSONDocument) or '_id' in doc): - doc['_id'] = ObjectId() - - doc = _db._apply_incoming_copying_manipulators(doc, self) - ids.append(doc['_id']) - yield doc - else: - def gen(): - """Generator that only tracks existing _ids.""" - for doc in docs: - # Don't inflate RawBSONDocument by touching fields. - if not isinstance(doc, RawBSONDocument): - ids.append(doc.get('_id')) - yield doc - - write_concern = write_concern or self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_doc_val) - blk.ops = [(message._INSERT, doc) for doc in gen()] - try: - blk.execute(write_concern, session=session) - except BulkWriteError as bwe: - _raise_last_error(bwe.details) - return ids - def insert_one(self, document, bypass_document_validation=False, session=None): """Insert a single document. @@ -694,10 +645,10 @@ def insert_one(self, document, bypass_document_validation=False, write_concern = self._write_concern_for(session) return InsertOneResult( - self._insert(document, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - session=session), + self._insert_one( + document, ordered=True, check_keys=True, manipulate=False, + write_concern=write_concern, op_id=None, + bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) def insert_many(self, documents, ordered=True, @@ -3068,184 +3019,6 @@ def find_one_and_update(self, filter, update, array_filters, hint=hint, session=session, **kwargs) - def save(self, to_save, manipulate=True, check_keys=True, **kwargs): - """Save a document in this collection. - - **DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("save is deprecated. Use insert_one or replace_one " - "instead", DeprecationWarning, stacklevel=2) - common.validate_is_document_type("to_save", to_save) - - write_concern = None - collation = validate_collation_or_none(kwargs.pop('collation', None)) - if kwargs: - write_concern = WriteConcern(**kwargs) - - if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save): - return self._insert( - to_save, True, check_keys, manipulate, write_concern) - else: - self._update_retryable( - {"_id": to_save["_id"]}, to_save, True, - check_keys, False, manipulate, write_concern, - collation=collation) - return to_save.get("_id") - - def insert(self, doc_or_docs, manipulate=True, - check_keys=True, continue_on_error=False, **kwargs): - """Insert a document(s) into this collection. - - **DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("insert is deprecated. Use insert_one or insert_many " - "instead.", DeprecationWarning, stacklevel=2) - write_concern = None - if kwargs: - write_concern = WriteConcern(**kwargs) - return self._insert(doc_or_docs, not continue_on_error, - check_keys, manipulate, write_concern) - - def update(self, spec, document, upsert=False, manipulate=False, - multi=False, check_keys=True, **kwargs): - """Update a document(s) in this collection. - - **DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or - :meth:`update_many` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("update is deprecated. Use replace_one, update_one or " - "update_many instead.", DeprecationWarning, stacklevel=2) - common.validate_is_mapping("spec", spec) - common.validate_is_mapping("document", document) - if document: - # If a top level key begins with '$' this is a modify operation - # and we should skip key validation. It doesn't matter which key - # we check here. Passing a document with a mix of top level keys - # starting with and without a '$' is invalid and the server will - # raise an appropriate exception. - first = next(iter(document)) - if first.startswith('$'): - check_keys = False - - write_concern = None - collation = validate_collation_or_none(kwargs.pop('collation', None)) - if kwargs: - write_concern = WriteConcern(**kwargs) - return self._update_retryable( - spec, document, upsert, check_keys, multi, manipulate, - write_concern, collation=collation) - - def remove(self, spec_or_id=None, multi=True, **kwargs): - """Remove a document(s) from this collection. - - **DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead. - - .. versionchanged:: 3.0 - Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write - operations. - """ - warnings.warn("remove is deprecated. Use delete_one or delete_many " - "instead.", DeprecationWarning, stacklevel=2) - if spec_or_id is None: - spec_or_id = {} - if not isinstance(spec_or_id, abc.Mapping): - spec_or_id = {"_id": spec_or_id} - write_concern = None - collation = validate_collation_or_none(kwargs.pop('collation', None)) - if kwargs: - write_concern = WriteConcern(**kwargs) - return self._delete_retryable( - spec_or_id, multi, write_concern, collation=collation) - - def find_and_modify(self, query=None, update=None, - upsert=False, sort=None, full_response=False, - manipulate=False, **kwargs): - """Update and return an object. - - **DEPRECATED** - Use :meth:`find_one_and_delete`, - :meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead. - """ - warnings.warn("find_and_modify is deprecated, use find_one_and_delete" - ", find_one_and_replace, or find_one_and_update instead", - DeprecationWarning, stacklevel=2) - - if not update and not kwargs.get('remove', None): - raise ValueError("Must either update or remove") - - if update and kwargs.get('remove', None): - raise ValueError("Can't do both update and remove") - - # No need to include empty args - if query: - kwargs['query'] = query - if update: - kwargs['update'] = update - if upsert: - kwargs['upsert'] = upsert - if sort: - # Accept a list of tuples to match Cursor's sort parameter. - if isinstance(sort, list): - kwargs['sort'] = helpers._index_document(sort) - # Accept OrderedDict, SON, and dict with len == 1 so we - # don't break existing code already using find_and_modify. - elif (isinstance(sort, ORDERED_TYPES) or - isinstance(sort, dict) and len(sort) == 1): - warnings.warn("Passing mapping types for `sort` is deprecated," - " use a list of (key, direction) pairs instead", - DeprecationWarning, stacklevel=2) - kwargs['sort'] = sort - else: - raise TypeError("sort must be a list of (key, direction) " - "pairs, a dict of len 1, or an instance of " - "SON or OrderedDict") - - fields = kwargs.pop("fields", None) - if fields is not None: - kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields") - - collation = validate_collation_or_none(kwargs.pop('collation', None)) - - cmd = SON([("findAndModify", self.__name)]) - cmd.update(kwargs) - - write_concern = self._write_concern_for_cmd(cmd, None) - - def _find_and_modify(session, sock_info, retryable_write): - if (sock_info.max_wire_version >= 4 and - not write_concern.is_server_default): - cmd['writeConcern'] = write_concern.document - result = self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, - collation=collation, - session=session, retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS) - - _check_write_command_response(result) - return result - - out = self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, None) - - if full_response: - return out - else: - document = out.get('value') - if manipulate: - document = self.__database._fix_outgoing(document, self) - return document - def __iter__(self): return self diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 4d306c6bad..107e061acf 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -234,17 +234,6 @@ def _check_write_command_response(result): _raise_write_concern_error(error) -def _raise_last_error(bulk_write_result): - """Backward compatibility helper for insert error handling. - """ - # Prefer write errors over write concern errors - write_errors = bulk_write_result.get("writeErrors") - if write_errors: - _raise_last_write_error(write_errors) - - _raise_write_concern_error(bulk_write_result["writeConcernErrors"][-1]) - - def _fields_list_to_dict(fields, option_name): """Takes a sequence of field names and returns a matching dictionary. diff --git a/test/test_bulk.py b/test/test_bulk.py index 22e971d576..7d8176e57f 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -295,8 +295,8 @@ def test_upsert(self): self.assertEqual(self.coll.count_documents({'foo': 'bar'}), 1) def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. - n_docs = 2100 + # Ensure we don't exceed server's maxWriteBatchSize size limit. + n_docs = self.client.max_write_batch_size + 100 requests = [InsertOne({}) for _ in range(n_docs)] result = self.coll.bulk_write(requests, ordered=False) self.assertEqual(n_docs, result.inserted_count) diff --git a/test/test_collation.py b/test/test_collation.py index c8d6c15e10..c6d9baa466 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -217,30 +217,8 @@ def test_delete(self): self.collation.document, command['deletes'][0]['collation']) - self.listener.results.clear() - self.db.test.remove({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) - @raisesConfigurationErrorForOldMongoDB def test_update(self): - self.db.test.update({'foo': 42}, {'$set': {'foo': 'bar'}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) - - self.listener.results.clear() - self.db.test.save({'_id': 12345}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) - - self.listener.results.clear() self.db.test.replace_one({'foo': 42}, {'foo': 43}, collation=self.collation) command = self.listener.results['started'][0].command @@ -266,11 +244,6 @@ def test_update(self): @raisesConfigurationErrorForOldMongoDB def test_find_and(self): - self.db.test.find_and_modify({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - self.assertCollationInLastCommand() - - self.listener.results.clear() self.db.test.find_one_and_delete({'foo': 42}, collation=self.collation) self.assertCollationInLastCommand() diff --git a/test/test_collection.py b/test/test_collection.py index 3600954c0f..0ac4aed2a7 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -847,6 +847,36 @@ def test_command_document_too_large(self): self.assertRaises( DocumentTooLarge, coll.delete_one, {'data': large}) + def test_write_large_document(self): + max_size = self.db.client.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + self.assertRaises(OperationFailure, self.db.test.insert_one, + {"foo": max_str}) + self.assertRaises(OperationFailure, self.db.test.replace_one, + {}, {"foo": max_str}, upsert=True) + self.assertRaises(OperationFailure, self.db.test.insert_many, + [{"x": 1}, {"foo": max_str}]) + self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) + self.assertRaises(DocumentTooLarge, unack_coll.replace_one, + {"bar": "x"}, {"bar": "x" * (max_size - 14)}) + # This will pass with OP_UPDATE or the update command. + self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) + + def test_bad_dbref(self): + # Incomplete DBRefs. + ref_only = {'ref': {'$ref': 'collection'}} + id_only = {'ref': {'$id': ObjectId()}} + self.assertRaises(InvalidDocument, self.db.test.insert_one, ref_only) + self.assertRaises(InvalidDocument, self.db.test.insert_one, id_only) + @client_context.require_version_min(3, 1, 9, -1) def test_insert_bypass_document_validation(self): db = self.db @@ -1269,26 +1299,12 @@ def test_write_error_text_handling(self): db.test.insert_one, {"text": text}) - self.assertRaises(DuplicateKeyError, - db.test.insert, - {"text": text}) - - self.assertRaises(DuplicateKeyError, - db.test.insert, - [{"text": text}]) - self.assertRaises(DuplicateKeyError, db.test.replace_one, {"_id": ObjectId()}, {"text": text}, upsert=True) - self.assertRaises(DuplicateKeyError, - db.test.update, - {"_id": ObjectId()}, - {"text": text}, - upsert=True) - # Should raise BulkWriteError, not InvalidBSON self.assertRaises(BulkWriteError, db.test.insert_many, @@ -1941,13 +1957,79 @@ def test_min_query(self): self.assertEqual(2, docs[0]["x"]) def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. + # Ensure we don't exceed server's maxWriteBatchSize size limit. self.db.test.drop() - n_docs = 2100 + n_docs = self.client.max_write_batch_size + 100 self.db.test.insert_many([{} for _ in range(n_docs)]) self.assertEqual(n_docs, self.db.test.count_documents({})) self.db.test.drop() + def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addCleanup(self.client.drop_database, 'test_insert_large_batch') + max_bson_size = self.client.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = 'x' * int(max_bson_size / 2) + + # Batch insert that requires 2 batches. + successful_insert = [{'x': big_string}, {'x': big_string}, + {'x': big_string}, {'x': big_string}] + db.collection_0.insert_many(successful_insert) + self.assertEqual(4, db.collection_0.count_documents({})) + + db.collection_0.drop() + + # Test that inserts fail after first error. + insert_second_fails = [{'_id': 'id0', 'x': big_string}, + {'_id': 'id0', 'x': big_string}, + {'_id': 'id1', 'x': big_string}, + {'_id': 'id2', 'x': big_string}] + + with self.assertRaises(BulkWriteError): + db.collection_1.insert_many(insert_second_fails) + + self.assertEqual(1, db.collection_1.count_documents({})) + + db.collection_1.drop() + + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options( + write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_second_fails) + wait_until(lambda: 1 == db.collection_2.count_documents({}), + 'insert 1 document', timeout=60) + + db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [{'_id': 'id0', 'x': big_string}, + {'_id': 'id0', 'x': big_string}, + {'_id': 'id1', 'x': big_string}, + {'_id': 'id1', 'x': big_string}] + + with self.assertRaises(OperationFailure) as context: + db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn('id1', str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, db.collection_3.count_documents({})) + + db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options( + write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_two_failures, ordered=False) + + # Only the first and third documents are inserted. + wait_until(lambda: 2 == db.collection_4.count_documents({}), + 'insert 2 documents', timeout=60) + + db.collection_4.drop() + def test_map_reduce(self): db = self.db db.drop_collection("test") @@ -2168,12 +2250,6 @@ def test_find_one_and_write_concern(self): db.command('ismaster') results.clear() if client_context.version.at_least(3, 1, 9, -1): - c_w0.find_and_modify( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - c_w0.find_one_and_update( {'_id': 1}, {'$set': {'foo': 'bar'}}) self.assertEqual( @@ -2196,10 +2272,6 @@ def test_find_one_and_write_concern(self): 'test', write_concern=WriteConcern( w=len(client_context.nodes) + 1)) - self.assertRaises( - WriteConcernError, - c_wc_error.find_and_modify, - {'_id': 1}, {'$set': {'foo': 'bar'}}) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_update, @@ -2214,11 +2286,6 @@ def test_find_one_and_write_concern(self): {'w': 0}, results['started'][0].command['writeConcern']) results.clear() else: - c_w0.find_and_modify( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - c_w0.find_one_and_update( {'_id': 1}, {'$set': {'foo': 'bar'}}) self.assertNotIn('writeConcern', results['started'][0].command) @@ -2232,10 +2299,6 @@ def test_find_one_and_write_concern(self): self.assertNotIn('writeConcern', results['started'][0].command) results.clear() - c_default.find_and_modify({'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}}) self.assertNotIn('writeConcern', results['started'][0].command) results.clear() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index e48acedff4..c74da24795 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -49,7 +49,7 @@ from test import client_context, unittest from test.test_client import IntegrationTest -from test.utils import ignore_deprecations, rs_client +from test.utils import rs_client class DecimalEncoder(TypeEncoder): @@ -692,22 +692,6 @@ def test_find_one_and__w_custom_type_decoder(self): self.assertEqual(doc['x'].value, 3) self.assertIsNone(c.find_one()) - @ignore_deprecations - def test_find_and_modify_w_custom_type_decoder(self): - db = self.db - c = db.get_collection('test', codec_options=UNINT_DECODER_CODECOPTS) - c.insert_one({'_id': 1, 'x': Int64(1)}) - - doc = c.find_and_modify({'_id': 1}, {'$inc': {'x': Int64(10)}}) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 1) - - doc = c.find_one() - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 11) - class TestGridFileCustomType(IntegrationTest): def setUp(self): diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 6b4e7209e0..b4e65d1280 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -31,7 +31,6 @@ from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, ConfigurationError, - DocumentTooLarge, DuplicateKeyError, InvalidDocument, InvalidOperation, @@ -48,9 +47,7 @@ from test.test_client import IntegrationTest from test.test_bulk import BulkTestBase, BulkAuthorizationTestBase from test.utils import (DeprecationFilter, - joinall, oid_generated_on_process, - rs_or_single_client, rs_or_single_client_noauth, single_client, wait_until) @@ -67,27 +64,6 @@ def setUpClass(cls): def tearDownClass(cls): cls.deprecation_filter.stop() - def test_save_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.save({})) - - def test_insert_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.insert({})) - - def test_update_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.update({}, {})) - - def test_remove_deprecation(self): - self.assertRaises( - DeprecationWarning, lambda: self.db.test.remove({})) - - def test_find_and_modify_deprecation(self): - self.assertRaises( - DeprecationWarning, - lambda: self.db.test.find_and_modify({'i': 5}, {})) - def test_add_son_manipulator_deprecation(self): db = self.client.pymongo_test self.assertRaises(DeprecationWarning, @@ -115,795 +91,6 @@ def setUpClass(cls): def tearDownClass(cls): cls.deprecation_filter.stop() - def test_insert_find_one(self): - # Tests legacy insert. - db = self.db - db.test.drop() - self.assertEqual(0, len(list(db.test.find()))) - doc = {"hello": "world"} - _id = db.test.insert(doc) - self.assertEqual(1, len(list(db.test.find()))) - self.assertEqual(doc, db.test.find_one()) - self.assertEqual(doc["_id"], _id) - self.assertTrue(isinstance(_id, ObjectId)) - - db = self.client.get_database( - db.name, codec_options=CodecOptions(document_class=dict)) - - def remove_insert_find_one(doc): - db.test.remove({}) - db.test.insert(doc) - # SON equality is order sensitive. - return db.test.find_one() == doc.to_dict() - - qcheck.check_unittest(self, remove_insert_find_one, - qcheck.gen_mongo_dict(3)) - - def test_generator_insert(self): - # Only legacy insert currently supports insert from a generator. - db = self.db - db.test.remove({}) - self.assertEqual(db.test.find().count(), 0) - db.test.insert(({'a': i} for i in range(5)), manipulate=False) - self.assertEqual(5, db.test.count()) - db.test.remove({}) - - db.test.insert(({'a': i} for i in range(5)), manipulate=True) - self.assertEqual(5, db.test.count()) - db.test.remove({}) - - def test_insert_multiple(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test") - doc1 = {"hello": "world"} - doc2 = {"hello": "mike"} - self.assertEqual(db.test.find().count(), 0) - ids = db.test.insert([doc1, doc2]) - self.assertEqual(db.test.find().count(), 2) - self.assertEqual(doc1, db.test.find_one({"hello": "world"})) - self.assertEqual(doc2, db.test.find_one({"hello": "mike"})) - - self.assertEqual(2, len(ids)) - self.assertEqual(doc1["_id"], ids[0]) - self.assertEqual(doc2["_id"], ids[1]) - - ids = db.test.insert([{"hello": 1}]) - self.assertTrue(isinstance(ids, list)) - self.assertEqual(1, len(ids)) - - self.assertRaises(InvalidOperation, db.test.insert, []) - - # Generator that raises StopIteration on first call to next(). - self.assertRaises(InvalidOperation, db.test.insert, (i for i in [])) - - def test_insert_multiple_with_duplicate(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test_insert_multiple_with_duplicate") - collection = db.test_insert_multiple_with_duplicate - collection.create_index([('i', ASCENDING)], unique=True) - - # No error - collection.insert([{'i': i} for i in range(5, 10)], w=0) - wait_until(lambda: 5 == collection.count(), 'insert 5 documents') - - db.drop_collection("test_insert_multiple_with_duplicate") - collection.create_index([('i', ASCENDING)], unique=True) - - # No error - collection.insert([{'i': 1}] * 2, w=0) - wait_until(lambda: 1 == collection.count(), 'insert 1 document') - - self.assertRaises( - DuplicateKeyError, - lambda: collection.insert([{'i': 2}] * 2), - ) - - db.drop_collection("test_insert_multiple_with_duplicate") - db = self.client.get_database( - db.name, write_concern=WriteConcern(w=0)) - - collection = db.test_insert_multiple_with_duplicate - collection.create_index([('i', ASCENDING)], unique=True) - - # No error. - collection.insert([{'i': 1}] * 2) - wait_until(lambda: 1 == collection.count(), 'insert 1 document') - - # Implied acknowledged. - self.assertRaises( - DuplicateKeyError, - lambda: collection.insert([{'i': 2}] * 2, fsync=True), - ) - - # Explicit acknowledged. - self.assertRaises( - DuplicateKeyError, - lambda: collection.insert([{'i': 2}] * 2, w=1)) - - db.drop_collection("test_insert_multiple_with_duplicate") - - @client_context.require_replica_set - def test_insert_prefers_write_errors(self): - # Tests legacy insert. - collection = self.db.test_insert_prefers_write_errors - self.db.drop_collection(collection.name) - collection.insert_one({'_id': 1}) - large = 's' * 1024 * 1024 * 15 - with self.assertRaises(DuplicateKeyError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}]) - self.assertEqual(1, collection.count()) - - with self.assertRaises(DuplicateKeyError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}], - continue_on_error=True) - self.assertEqual(2, collection.count()) - collection.delete_one({'_id': 2}) - - # A writeError followed by a writeConcernError should prefer to raise - # the writeError. - with self.assertRaises(DuplicateKeyError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}], - continue_on_error=True, - w=len(client_context.nodes) + 10, wtimeout=1) - self.assertEqual(2, collection.count()) - collection.delete_many({}) - - with self.assertRaises(WriteConcernError): - collection.insert( - [{'_id': 1, 's': large}, {'_id': 2, 's': large}], - continue_on_error=True, - w=len(client_context.nodes) + 10, wtimeout=1) - self.assertEqual(2, collection.count()) - - def test_insert_iterables(self): - # Tests legacy insert. - db = self.db - - self.assertRaises(TypeError, db.test.insert, 4) - self.assertRaises(TypeError, db.test.insert, None) - self.assertRaises(TypeError, db.test.insert, True) - - db.drop_collection("test") - self.assertEqual(db.test.find().count(), 0) - db.test.insert(({"hello": "world"}, {"hello": "world"})) - self.assertEqual(db.test.find().count(), 2) - - db.drop_collection("test") - self.assertEqual(db.test.find().count(), 0) - db.test.insert(map(lambda x: {"hello": "world"}, - itertools.repeat(None, 10))) - self.assertEqual(db.test.find().count(), 10) - - def test_insert_manipulate_false(self): - # Test two aspects of legacy insert with manipulate=False: - # 1. The return value is None or [None] as appropriate. - # 2. _id is not set on the passed-in document object. - collection = self.db.test_insert_manipulate_false - collection.drop() - oid = ObjectId() - doc = {'a': oid} - - try: - # The return value is None. - self.assertTrue(collection.insert(doc, manipulate=False) is None) - # insert() shouldn't set _id on the passed-in document object. - self.assertEqual({'a': oid}, doc) - - # Bulk insert. The return value is a list of None. - self.assertEqual([None], collection.insert([{}], manipulate=False)) - - docs = [{}, {}] - ids = collection.insert(docs, manipulate=False) - self.assertEqual([None, None], ids) - self.assertEqual([{}, {}], docs) - finally: - collection.drop() - - def test_continue_on_error(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test_continue_on_error") - collection = db.test_continue_on_error - oid = collection.insert({"one": 1}) - self.assertEqual(1, collection.count()) - - docs = [] - docs.append({"_id": oid, "two": 2}) # Duplicate _id. - docs.append({"three": 3}) - docs.append({"four": 4}) - docs.append({"five": 5}) - - with self.assertRaises(DuplicateKeyError): - collection.insert(docs, manipulate=False) - - self.assertEqual(1, collection.count()) - - with self.assertRaises(DuplicateKeyError): - collection.insert(docs, manipulate=False, continue_on_error=True) - - self.assertEqual(4, collection.count()) - - collection.remove({}, w=client_context.w) - - oid = collection.insert({"_id": oid, "one": 1}, w=0) - wait_until(lambda: 1 == collection.count(), 'insert 1 document') - - docs[0].pop("_id") - docs[2]["_id"] = oid - - with self.assertRaises(DuplicateKeyError): - collection.insert(docs, manipulate=False) - - self.assertEqual(3, collection.count()) - collection.insert(docs, manipulate=False, continue_on_error=True, w=0) - wait_until(lambda: 6 == collection.count(), 'insert 3 documents') - - def test_acknowledged_insert(self): - # Tests legacy insert. - db = self.db - db.drop_collection("test_acknowledged_insert") - collection = db.test_acknowledged_insert - - a = {"hello": "world"} - collection.insert(a) - collection.insert(a, w=0) - self.assertRaises(OperationFailure, - collection.insert, a) - - def test_insert_adds_id(self): - # Tests legacy insert. - doc = {"hello": "world"} - self.db.test.insert(doc) - self.assertTrue("_id" in doc) - - docs = [{"hello": "world"}, {"hello": "world"}] - self.db.test.insert(docs) - for doc in docs: - self.assertTrue("_id" in doc) - - def test_insert_large_batch(self): - # Tests legacy insert. - db = self.client.test_insert_large_batch - self.addCleanup(self.client.drop_database, 'test_insert_large_batch') - max_bson_size = self.client.max_bson_size - # Write commands are limited to 16MB + 16k per batch - big_string = 'x' * int(max_bson_size / 2) - - # Batch insert that requires 2 batches. - successful_insert = [{'x': big_string}, {'x': big_string}, - {'x': big_string}, {'x': big_string}] - db.collection_0.insert(successful_insert, w=1) - self.assertEqual(4, db.collection_0.count()) - - db.collection_0.drop() - - # Test that inserts fail after first error. - insert_second_fails = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id2', 'x': big_string}] - - with self.assertRaises(DuplicateKeyError): - db.collection_1.insert(insert_second_fails) - - self.assertEqual(1, db.collection_1.count()) - - db.collection_1.drop() - - # 2 batches, 2nd insert fails, don't continue on error. - self.assertTrue(db.collection_2.insert(insert_second_fails, w=0)) - wait_until(lambda: 1 == db.collection_2.count(), - 'insert 1 document', timeout=60) - - db.collection_2.drop() - - # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are - # dupes. Acknowledged, continue on error. - insert_two_failures = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id1', 'x': big_string}] - - with self.assertRaises(OperationFailure) as context: - db.collection_3.insert(insert_two_failures, - continue_on_error=True, w=1) - - self.assertIn('id1', str(context.exception)) - - # Only the first and third documents should be inserted. - self.assertEqual(2, db.collection_3.count()) - - db.collection_3.drop() - - # 2 batches, 2 errors, unacknowledged, continue on error. - db.collection_4.insert(insert_two_failures, continue_on_error=True, w=0) - - # Only the first and third documents are inserted. - wait_until(lambda: 2 == db.collection_4.count(), - 'insert 2 documents', timeout=60) - - db.collection_4.drop() - - def test_bad_dbref(self): - # Requires the legacy API to test. - c = self.db.test - c.drop() - - # Incomplete DBRefs. - self.assertRaises( - InvalidDocument, - c.insert_one, {'ref': {'$ref': 'collection'}}) - - self.assertRaises( - InvalidDocument, - c.insert_one, {'ref': {'$id': ObjectId()}}) - - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} - - - def test_update(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - - id1 = db.test.save({"x": 5}) - db.test.update({}, {"$inc": {"x": 1}}) - self.assertEqual(db.test.find_one(id1)["x"], 6) - - id2 = db.test.save({"x": 1}) - db.test.update({"x": 6}, {"$inc": {"x": 1}}) - self.assertEqual(db.test.find_one(id1)["x"], 7) - self.assertEqual(db.test.find_one(id2)["x"], 1) - - def test_update_manipulate(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - db.test.insert({'_id': 1}) - db.test.update({'_id': 1}, {'a': 1}, manipulate=True) - self.assertEqual( - {'_id': 1, 'a': 1}, - db.test.find_one()) - - class AddField(SONManipulator): - def transform_incoming(self, son, dummy): - son['field'] = 'value' - return son - - db.add_son_manipulator(AddField()) - db.test.update({'_id': 1}, {'a': 2}, manipulate=False) - self.assertEqual( - {'_id': 1, 'a': 2}, - db.test.find_one()) - - db.test.update({'_id': 1}, {'a': 3}, manipulate=True) - self.assertEqual( - {'_id': 1, 'a': 3, 'field': 'value'}, - db.test.find_one()) - - def test_update_nmodified(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - ismaster = self.client.admin.command('ismaster') - used_write_commands = (ismaster.get("maxWireVersion", 0) > 1) - - db.test.insert({'_id': 1}) - result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) - if used_write_commands: - self.assertEqual(1, result['nModified']) - else: - self.assertFalse('nModified' in result) - - # x is already 1. - result = db.test.update({'_id': 1}, {'$set': {'x': 1}}) - if used_write_commands: - self.assertEqual(0, result['nModified']) - else: - self.assertFalse('nModified' in result) - - def test_multi_update(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - - db.test.save({"x": 4, "y": 3}) - db.test.save({"x": 5, "y": 5}) - db.test.save({"x": 4, "y": 4}) - - db.test.update({"x": 4}, {"$set": {"y": 5}}, multi=True) - - self.assertEqual(3, db.test.count()) - for doc in db.test.find(): - self.assertEqual(5, doc["y"]) - - self.assertEqual(2, db.test.update({"x": 4}, {"$set": {"y": 6}}, - multi=True)["n"]) - - def test_upsert(self): - # Tests legacy update. - db = self.db - db.drop_collection("test") - - db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) - db.test.update({"page": "/"}, {"$inc": {"count": 1}}, upsert=True) - - self.assertEqual(1, db.test.count()) - self.assertEqual(2, db.test.find_one()["count"]) - - def test_acknowledged_update(self): - # Tests legacy update. - db = self.db - db.drop_collection("test_acknowledged_update") - collection = db.test_acknowledged_update - collection.create_index("x", unique=True) - - collection.insert({"x": 5}) - _id = collection.insert({"x": 4}) - - self.assertEqual( - None, collection.update({"_id": _id}, {"$inc": {"x": 1}}, w=0)) - - self.assertRaises(DuplicateKeyError, collection.update, - {"_id": _id}, {"$inc": {"x": 1}}) - - self.assertEqual(1, collection.update({"_id": _id}, - {"$inc": {"x": 2}})["n"]) - - self.assertEqual(0, collection.update({"_id": "foo"}, - {"$inc": {"x": 2}})["n"]) - db.drop_collection("test_acknowledged_update") - - def test_update_backward_compat(self): - # MongoDB versions >= 2.6.0 don't return the updatedExisting field - # and return upsert _id in an array subdocument. This test should - # pass regardless of server version or type (mongod/s). - # Tests legacy update. - c = self.db.test - c.drop() - oid = ObjectId() - res = c.update({'_id': oid}, {'$set': {'a': 'a'}}, upsert=True) - self.assertFalse(res.get('updatedExisting')) - self.assertEqual(oid, res.get('upserted')) - - res = c.update({'_id': oid}, {'$set': {'b': 'b'}}) - self.assertTrue(res.get('updatedExisting')) - - def test_save(self): - # Tests legacy save. - self.db.drop_collection("test_save") - collection = self.db.test_save - - # Save a doc with autogenerated id - _id = collection.save({"hello": "world"}) - self.assertEqual(collection.find_one()["_id"], _id) - self.assertTrue(isinstance(_id, ObjectId)) - - # Save a doc with explicit id - collection.save({"_id": "explicit_id", "hello": "bar"}) - doc = collection.find_one({"_id": "explicit_id"}) - self.assertEqual(doc['_id'], 'explicit_id') - self.assertEqual(doc['hello'], 'bar') - - # Save docs with _id field already present (shouldn't create new docs) - self.assertEqual(2, collection.count()) - collection.save({'_id': _id, 'hello': 'world'}) - self.assertEqual(2, collection.count()) - collection.save({'_id': 'explicit_id', 'hello': 'baz'}) - self.assertEqual(2, collection.count()) - self.assertEqual( - 'baz', - collection.find_one({'_id': 'explicit_id'})['hello'] - ) - - # Acknowledged mode. - collection.create_index("hello", unique=True) - # No exception, even though we duplicate the first doc's "hello" value - collection.save({'_id': 'explicit_id', 'hello': 'world'}, w=0) - - self.assertRaises( - DuplicateKeyError, - collection.save, - {'_id': 'explicit_id', 'hello': 'world'}) - self.db.drop_collection("test") - - def test_save_with_invalid_key(self): - if client_context.version.at_least(3, 5, 8): - raise SkipTest("MongoDB >= 3.5.8 allows dotted fields in updates") - # Tests legacy save. - self.db.drop_collection("test") - self.assertTrue(self.db.test.insert({"hello": "world"})) - doc = self.db.test.find_one() - doc['a.b'] = 'c' - self.assertRaises(OperationFailure, self.db.test.save, doc) - - def test_acknowledged_save(self): - # Tests legacy save. - db = self.db - db.drop_collection("test_acknowledged_save") - collection = db.test_acknowledged_save - collection.create_index("hello", unique=True) - - collection.save({"hello": "world"}) - collection.save({"hello": "world"}, w=0) - self.assertRaises(DuplicateKeyError, collection.save, - {"hello": "world"}) - db.drop_collection("test_acknowledged_save") - - def test_save_adds_id(self): - # Tests legacy save. - doc = {"hello": "jesse"} - self.db.test.save(doc) - self.assertTrue("_id" in doc) - - def test_save_returns_id(self): - doc = {"hello": "jesse"} - _id = self.db.test.save(doc) - self.assertTrue(isinstance(_id, ObjectId)) - self.assertEqual(_id, doc["_id"]) - doc["hi"] = "bernie" - _id = self.db.test.save(doc) - self.assertTrue(isinstance(_id, ObjectId)) - self.assertEqual(_id, doc["_id"]) - - def test_remove_one(self): - # Tests legacy remove. - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - self.db.test.insert({"x": 1}) - self.db.test.insert({"y": 1}) - self.db.test.insert({"z": 1}) - self.assertEqual(3, self.db.test.count()) - - self.db.test.remove(multi=False) - self.assertEqual(2, self.db.test.count()) - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - def test_remove_all(self): - # Tests legacy remove. - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - self.db.test.insert({"x": 1}) - self.db.test.insert({"y": 1}) - self.assertEqual(2, self.db.test.count()) - - self.db.test.remove() - self.assertEqual(0, self.db.test.count()) - - def test_remove_non_objectid(self): - # Tests legacy remove. - db = self.db - db.drop_collection("test") - - db.test.insert_one({"_id": 5}) - - self.assertEqual(1, db.test.count()) - db.test.remove(5) - self.assertEqual(0, db.test.count()) - - def test_write_large_document(self): - # Tests legacy insert, save, and update. - max_size = self.db.client.max_bson_size - half_size = int(max_size / 2) - self.assertEqual(max_size, 16777216) - - self.assertRaises(OperationFailure, self.db.test.insert, - {"foo": "x" * max_size}) - self.assertRaises(OperationFailure, self.db.test.save, - {"foo": "x" * max_size}) - self.assertRaises(OperationFailure, self.db.test.insert, - [{"x": 1}, {"foo": "x" * max_size}]) - self.db.test.insert([{"foo": "x" * half_size}, - {"foo": "x" * half_size}]) - - self.db.test.insert({"bar": "x"}) - # Use w=0 here to test legacy doc size checking in all server versions - self.assertRaises(DocumentTooLarge, self.db.test.update, - {"bar": "x"}, {"bar": "x" * (max_size - 14)}, w=0) - # This will pass with OP_UPDATE or the update command. - self.db.test.update({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - - def test_last_error_options(self): - # Tests legacy write methods. - self.db.test.save({"x": 1}, w=1, wtimeout=1) - self.db.test.insert({"x": 1}, w=1, wtimeout=1) - self.db.test.remove({"x": 1}, w=1, wtimeout=1) - self.db.test.update({"x": 1}, {"y": 2}, w=1, wtimeout=1) - - if client_context.replica_set_name: - # client_context.w is the number of hosts in the replica set - w = client_context.w + 1 - - # MongoDB 2.8+ raises error code 100, CannotSatisfyWriteConcern, - # if w > number of members. Older versions just time out after 1 ms - # as if they had enough secondaries but some are lagging. They - # return an error with 'wtimeout': True and no code. - def wtimeout_err(f, *args, **kwargs): - try: - f(*args, **kwargs) - except WTimeoutError as exc: - self.assertIsNotNone(exc.details) - except OperationFailure as exc: - self.assertIsNotNone(exc.details) - self.assertEqual(100, exc.code, - "Unexpected error: %r" % exc) - else: - self.fail("%s should have failed" % f) - - coll = self.db.test - wtimeout_err(coll.save, {"x": 1}, w=w, wtimeout=1) - wtimeout_err(coll.insert, {"x": 1}, w=w, wtimeout=1) - wtimeout_err(coll.update, {"x": 1}, {"y": 2}, w=w, wtimeout=1) - wtimeout_err(coll.remove, {"x": 1}, w=w, wtimeout=1) - - # can't use fsync and j options together - self.assertRaises(ConfigurationError, self.db.test.insert, - {"_id": 1}, j=True, fsync=True) - - def test_find_and_modify(self): - c = self.db.test - c.drop() - c.insert({'_id': 1, 'i': 1}) - - # Test that we raise DuplicateKeyError when appropriate. - c.create_index('i', unique=True) - self.assertRaises(DuplicateKeyError, - c.find_and_modify, query={'i': 1, 'j': 1}, - update={'$set': {'k': 1}}, upsert=True) - c.drop_indexes() - - # Test correct findAndModify - self.assertEqual({'_id': 1, 'i': 1}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, remove=True)) - - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual(None, c.find_and_modify({'_id': 1}, - {'$inc': {'i': 1}}, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - upsert=True, new=True)) - - self.assertEqual({'_id': 1, 'i': 2}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - fields=['i'])) - self.assertEqual({'_id': 1, 'i': 4}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1})) - - # Test with full_response=True. - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, upsert=True, - full_response=True, - fields={'i': 1}) - self.assertEqual({'_id': 1, 'i': 5}, result["value"]) - self.assertEqual(True, - result["lastErrorObject"]["updatedExisting"]) - - result = c.find_and_modify({'_id': 2}, {'$inc': {'i': 1}}, - new=True, upsert=True, - full_response=True, - fields={'i': 1}) - self.assertEqual({'_id': 2, 'i': 1}, result["value"]) - self.assertEqual(False, - result["lastErrorObject"]["updatedExisting"]) - - class ExtendedDict(dict): - pass - - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1}) - self.assertFalse(isinstance(result, ExtendedDict)) - c = self.db.get_collection( - "test", codec_options=CodecOptions(document_class=ExtendedDict)) - result = c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, fields={'i': 1}) - self.assertTrue(isinstance(result, ExtendedDict)) - - def test_find_and_modify_with_sort(self): - c = self.db.test - c.drop() - for j in range(5): - c.insert({'j': j, 'i': 0}) - - sort = {'j': DESCENDING} - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = {'j': ASCENDING} - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = [('j', DESCENDING)] - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = [('j', ASCENDING)] - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = SON([('j', DESCENDING)]) - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = SON([('j', ASCENDING)]) - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - - try: - from collections import OrderedDict - sort = OrderedDict([('j', DESCENDING)]) - self.assertEqual(4, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - sort = OrderedDict([('j', ASCENDING)]) - self.assertEqual(0, c.find_and_modify({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) - except ImportError: - pass - # Test that a standard dict with two keys is rejected. - sort = {'j': DESCENDING, 'foo': DESCENDING} - self.assertRaises(TypeError, c.find_and_modify, - {}, {'$inc': {'i': 1}}, sort=sort) - - def test_find_and_modify_with_manipulator(self): - class AddCollectionNameManipulator(SONManipulator): - def will_copy(self): - return True - - def transform_incoming(self, son, dummy): - copy = SON(son) - if 'collection' in copy: - del copy['collection'] - return copy - - def transform_outgoing(self, son, collection): - copy = SON(son) - copy['collection'] = collection.name - return copy - - db = self.client.pymongo_test - db.add_son_manipulator(AddCollectionNameManipulator()) - - c = db.test - c.drop() - c.insert({'_id': 1, 'i': 1}) - - # Test correct findAndModify - # With manipulators - self.assertEqual({'_id': 1, 'i': 1, 'collection': 'test'}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - manipulate=True)) - self.assertEqual({'_id': 1, 'i': 3, 'collection': 'test'}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True, manipulate=True)) - # With out manipulators - self.assertEqual({'_id': 1, 'i': 3}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 5}, - c.find_and_modify({'_id': 1}, {'$inc': {'i': 1}}, - new=True)) - @client_context.require_version_max(4, 1, 0, -1) def test_group(self): db = self.db @@ -1022,167 +209,6 @@ def test_group_uuid_representation(self): coll.group([], {"_id": uu}, {"count": 0}, reduce)) - def test_auto_ref_and_deref(self): - # Legacy API. - db = self.client.pymongo_test - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - - db.test.a.remove({}) - db.test.b.remove({}) - db.test.c.remove({}) - - a = {"hello": "world"} - db.test.a.save(a) - - b = {"test": a} - db.test.b.save(b) - - c = {"another test": b} - db.test.c.save(c) - - a["hello"] = "mike" - db.test.a.save(a) - - self.assertEqual(db.test.a.find_one(), a) - self.assertEqual(db.test.b.find_one()["test"], a) - self.assertEqual(db.test.c.find_one()["another test"]["test"], a) - self.assertEqual(db.test.b.find_one(), b) - self.assertEqual(db.test.c.find_one()["another test"], b) - self.assertEqual(db.test.c.find_one(), c) - - def test_auto_ref_and_deref_list(self): - # Legacy API. - db = self.client.pymongo_test - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - - db.drop_collection("users") - db.drop_collection("messages") - - message_1 = {"title": "foo"} - db.messages.save(message_1) - message_2 = {"title": "bar"} - db.messages.save(message_2) - - user = {"messages": [message_1, message_2]} - db.users.save(user) - db.messages.update(message_1, {"title": "buzz"}) - - self.assertEqual("buzz", db.users.find_one()["messages"][0]["title"]) - self.assertEqual("bar", db.users.find_one()["messages"][1]["title"]) - - def test_object_to_dict_transformer(self): - # PYTHON-709: Some users rely on their custom SONManipulators to run - # before any other checks, so they can insert non-dict objects and - # have them dictified before the _id is inserted or any other - # processing. - # Tests legacy API elements. - class Thing(object): - def __init__(self, value): - self.value = value - - class ThingTransformer(SONManipulator): - def transform_incoming(self, thing, dummy): - return {'value': thing.value} - - db = self.client.foo - db.add_son_manipulator(ThingTransformer()) - t = Thing('value') - - db.test.remove() - db.test.insert([t]) - out = db.test.find_one() - self.assertEqual('value', out.get('value')) - - def test_son_manipulator_outgoing(self): - class Thing(object): - def __init__(self, value): - self.value = value - - class ThingTransformer(SONManipulator): - def transform_outgoing(self, doc, collection): - # We don't want this applied to the command return - # value in pymongo.cursor.Cursor. - if 'value' in doc: - return Thing(doc['value']) - return doc - - db = self.client.foo - db.add_son_manipulator(ThingTransformer()) - - db.test.delete_many({}) - db.test.insert_one({'value': 'value'}) - out = db.test.find_one() - self.assertTrue(isinstance(out, Thing)) - self.assertEqual('value', out.value) - - out = next(db.test.aggregate([], cursor={})) - self.assertTrue(isinstance(out, Thing)) - self.assertEqual('value', out.value) - - def test_son_manipulator_inheritance(self): - # Tests legacy API elements. - class Thing(object): - def __init__(self, value): - self.value = value - - class ThingTransformer(SONManipulator): - def transform_incoming(self, thing, dummy): - return {'value': thing.value} - - def transform_outgoing(self, son, dummy): - return Thing(son['value']) - - class Child(ThingTransformer): - pass - - db = self.client.foo - db.add_son_manipulator(Child()) - t = Thing('value') - - db.test.remove() - db.test.insert([t]) - out = db.test.find_one() - self.assertTrue(isinstance(out, Thing)) - self.assertEqual('value', out.value) - - def test_disabling_manipulators(self): - - class IncByTwo(SONManipulator): - def transform_outgoing(self, son, collection): - if 'foo' in son: - son['foo'] += 2 - return son - - db = self.client.pymongo_test - db.add_son_manipulator(IncByTwo()) - c = db.test - c.drop() - c.insert({'foo': 0}) - self.assertEqual(2, c.find_one()['foo']) - self.assertEqual(0, c.find_one(manipulate=False)['foo']) - - self.assertEqual(2, c.find_one(manipulate=True)['foo']) - c.drop() - - def test_manipulator_properties(self): - db = self.client.foo - self.assertEqual([], db.incoming_manipulators) - self.assertEqual([], db.incoming_copying_manipulators) - self.assertEqual([], db.outgoing_manipulators) - self.assertEqual([], db.outgoing_copying_manipulators) - db.add_son_manipulator(AutoReference(db)) - db.add_son_manipulator(NamespaceInjector()) - db.add_son_manipulator(ObjectIdShuffler()) - self.assertEqual(1, len(db.incoming_manipulators)) - self.assertEqual(db.incoming_manipulators, ['NamespaceInjector']) - self.assertEqual(2, len(db.incoming_copying_manipulators)) - for name in db.incoming_copying_manipulators: - self.assertTrue(name in ('ObjectIdShuffler', 'AutoReference')) - self.assertEqual([], db.outgoing_manipulators) - self.assertEqual(['AutoReference'], - db.outgoing_copying_manipulators) class TestLegacyBulk(BulkTestBase): @@ -1207,41 +233,6 @@ def test_find(self): # No error. bulk.find({}) - @client_context.require_version_min(3, 1, 9, -1) - def test_bypass_document_validation_bulk_op(self): - - # Test insert - self.coll.insert_one({"z": 0}) - self.db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - bulk = self.coll.initialize_ordered_bulk_op( - bypass_document_validation=False) - bulk.insert({"z": -1}) # error - self.assertRaises(BulkWriteError, bulk.execute) - self.assertEqual(0, self.coll.count({"z": -1})) - - bulk = self.coll.initialize_ordered_bulk_op( - bypass_document_validation=True) - bulk.insert({"z": -1}) - bulk.execute() - self.assertEqual(1, self.coll.count({"z": -1})) - - self.coll.insert_one({"z": 0}) - self.db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - bulk = self.coll.initialize_unordered_bulk_op( - bypass_document_validation=False) - bulk.insert({"z": -1}) # error - self.assertRaises(BulkWriteError, bulk.execute) - self.assertEqual(1, self.coll.count({"z": -1})) - - bulk = self.coll.initialize_unordered_bulk_op( - bypass_document_validation=True) - bulk.insert({"z": -1}) - bulk.execute() - self.assertEqual(2, self.coll.count({"z": -1})) - self.coll.drop() - def test_insert(self): expected = { 'nMatched': 0, diff --git a/test/test_monitoring.py b/test/test_monitoring.py index f46ebe69b3..46cfe87c4a 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -37,7 +37,8 @@ from test.utils import (EventListener, get_pool, rs_or_single_client, - single_client) + single_client, + wait_until) class TestCommandMonitoring(PyMongoTestCase): @@ -820,230 +821,6 @@ def test_non_bulk_writes(self): self.assertIsInstance(error.get('code'), int) self.assertIsInstance(error.get('errmsg'), str) - def test_legacy_writes(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - - coll = self.client.pymongo_test.test - coll.drop() - self.listener.results.clear() - - # Implied write concern insert - _id = coll.insert({'x': 1}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': _id, 'x': 1}])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # Unacknowledged insert - self.listener.results.clear() - _id = coll.insert({'x': 1}, w=0) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': _id, 'x': 1}]), - ('writeConcern', {'w': 0})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertEqual(succeeded.reply, {'ok': 1}) - - # Explicit write concern insert - self.listener.results.clear() - _id = coll.insert({'x': 1}, w=1) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': _id, 'x': 1}]), - ('writeConcern', {'w': 1})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # remove all - self.listener.results.clear() - res = coll.remove({'x': 1}, w=1) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 1}), - ('limit', 0)])]), - ('writeConcern', {'w': 1})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(res['n'], reply.get('n')) - - # upsert - self.listener.results.clear() - oid = ObjectId() - coll.update({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True, w=1) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': oid}), - ('u', {'_id': oid, 'x': 1}), - ('multi', False), - ('upsert', True)])]), - ('writeConcern', {'w': 1})]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted')) - - # update one - self.listener.results.clear() - coll.update({'x': 1}, {'$inc': {'x': 1}}) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 1}), - ('u', {'$inc': {'x': 1}}), - ('multi', False), - ('upsert', False)])])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # update many - self.listener.results.clear() - coll.update({'x': 2}, {'$inc': {'x': 1}}, multi=True) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 2}), - ('u', {'$inc': {'x': 1}}), - ('multi', True), - ('upsert', False)])])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - # remove one - self.listener.results.clear() - coll.remove({'x': 3}, multi=False) - results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 3}), - ('limit', 1)])])]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - - self.assertEqual(0, coll.count_documents({})) - def test_insert_many(self): # This always uses the bulk API. coll = self.client.pymongo_test.test @@ -1086,51 +863,48 @@ def test_insert_many(self): self.assertEqual(documents, docs) self.assertEqual(6, count) - def test_legacy_insert_many(self): + def test_insert_many_unacknowledged(self): # On legacy servers this uses bulk OP_INSERT. - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - - coll = self.client.pymongo_test.test - coll.drop() - self.listener.results.clear() + coll = self.client.pymongo_test.test + coll.drop() + unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.listener.results.clear() - # Force two batches on legacy servers. - big = 'x' * (1024 * 1024 * 12) - docs = [{'_id': i, 'big': big} for i in range(6)] - coll.insert(docs) - results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) - documents = [] - count = 0 - operation_id = started[0].operation_id - self.assertIsInstance(operation_id, int) - for start, succeed in zip(started, succeeded): - self.assertIsInstance(start, monitoring.CommandStartedEvent) - cmd = sanitize_cmd(start.command) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) - self.assertIsInstance(start.request_id, int) - self.assertEqual(self.client.address, start.connection_id) - self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeed.duration_micros, int) - self.assertEqual(start.command_name, succeed.command_name) - self.assertEqual(start.request_id, succeed.request_id) - self.assertEqual(start.connection_id, succeed.connection_id) - self.assertEqual(start.operation_id, operation_id) - self.assertEqual(succeed.operation_id, operation_id) - reply = succeed.reply - self.assertEqual(1, reply.get('ok')) - count += reply.get('n', 0) - self.assertEqual(documents, docs) - self.assertEqual(6, count) + # Force two batches on legacy servers. + big = 'x' * (1024 * 1024 * 12) + docs = [{'_id': i, 'big': big} for i in range(6)] + unack_coll.insert_many(docs) + results = self.listener.results + started = results['started'] + succeeded = results['succeeded'] + self.assertEqual(0, len(results['failed'])) + documents = [] + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + cmd.pop('writeConcern', None) + self.assertEqual(['insert', 'ordered', 'documents'], + list(cmd.keys())) + self.assertEqual(coll.name, cmd['insert']) + self.assertIs(True, cmd['ordered']) + documents.extend(cmd['documents']) + self.assertEqual('pymongo_test', start.database_name) + self.assertEqual('insert', start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + self.assertEqual(1, succeed.reply.get('ok')) + self.assertEqual(documents, docs) + wait_until(lambda: coll.count_documents({}) == 6, + 'insert documents with w=0') def test_bulk_write(self): coll = self.client.pymongo_test.test diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 91d3d8257b..484ef740d3 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -88,7 +88,7 @@ def run_scenario(self): test_creator.create_tests() -def _retryable_single_statement_ops(coll): +def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), (coll.bulk_write, [[InsertOne({}), @@ -110,29 +110,6 @@ def _retryable_single_statement_ops(coll): ] -def retryable_single_statement_ops(coll): - return _retryable_single_statement_ops(coll) + [ - # Deprecated methods. - # Insert with single or multiple documents. - (coll.insert, [{}], {}), - (coll.insert, [[{}]], {}), - (coll.insert, [[{}, {}]], {}), - # Save with and without an _id. - (coll.save, [{}], {}), - (coll.save, [{'_id': ObjectId()}], {}), - # Non-multi update. - (coll.update, [{}, {'$set': {'a': 1}}], {}), - # Non-multi remove. - (coll.remove, [{}], {'multi': False}), - # Replace. - (coll.find_and_modify, [{}, {'a': 3}], {}), - # Update. - (coll.find_and_modify, [{}, {'$set': {'a': 1}}], {}), - # Delete. - (coll.find_and_modify, [{}, {}], {'remove': True}), - ] - - def non_retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), @@ -140,25 +117,6 @@ def non_retryable_single_statement_ops(coll): (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), (coll.update_many, [{}, {'$set': {'a': 1}}], {}), (coll.delete_many, [{}], {}), - # Deprecated methods. - # Multi remove. - (coll.remove, [{}], {}), - # Multi update. - (coll.update, [{}, {'$set': {'a': 1}}], {'multi': True}), - # Unacknowledged deprecated methods. - (coll.insert, [{}], {'w': 0}), - # Unacknowledged Non-multi update. - (coll.update, [{}, {'$set': {'a': 1}}], {'w': 0}), - # Unacknowledged Non-multi remove. - (coll.remove, [{}], {'multi': False, 'w': 0}), - # Unacknowledged Replace. - (coll.find_and_modify, [{}, {'a': 3}], {'writeConcern': {'w': 0}}), - # Unacknowledged Update. - (coll.find_and_modify, [{}, {'$set': {'a': 1}}], - {'writeConcern': {'w': 0}}), - # Unacknowledged Delete. - (coll.find_and_modify, [{}, {}], - {'remove': True, 'writeConcern': {'w': 0}}), ] @@ -533,7 +491,7 @@ def raise_connection_err_select_server(*args, **kwargs): topology.select_server = select_server raise ConnectionFailure('Connection refused') - for method, args, kwargs in _retryable_single_statement_ops( + for method, args, kwargs in retryable_single_statement_ops( client.db.retryable_write_test): listener.results.clear() topology.select_server = raise_connection_err_select_server diff --git a/test/test_write_concern.py b/test/test_write_concern.py index c6b803618a..5e1c0f73f9 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -17,11 +17,18 @@ import collections import unittest +from pymongo.errors import ConfigurationError from pymongo.write_concern import WriteConcern class TestWriteConcern(unittest.TestCase): + def test_invalid(self): + # Can't use fsync and j options together + self.assertRaises(ConfigurationError, WriteConcern, j=True, fsync=True) + # Can't use w=0 and j options together + self.assertRaises(ConfigurationError, WriteConcern, w=0, j=True) + def test_equality(self): concern = WriteConcern(j=True, wtimeout=3000) self.assertEqual(concern, WriteConcern(j=True, wtimeout=3000)) diff --git a/tools/benchmark.py b/tools/benchmark.py deleted file mode 100644 index a0fc74a121..0000000000 --- a/tools/benchmark.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2009-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB benchmarking suite.""" -from __future__ import print_function - -import time -import sys -sys.path[0:0] = [""] - -import datetime - -from pymongo import mongo_client -from pymongo import ASCENDING - -trials = 2 -per_trial = 5000 -batch_size = 100 -small = {} -medium = {"integer": 5, - "number": 5.05, - "boolean": False, - "array": ["test", "benchmark"] - } -# this is similar to the benchmark data posted to the user list -large = {"base_url": "http://www.example.com/test-me", - "total_word_count": 6743, - "access_time": datetime.datetime.utcnow(), - "meta_tags": {"description": "i am a long description string", - "author": "Holly Man", - "dynamically_created_meta_tag": "who know\n what" - }, - "page_structure": {"counted_tags": 3450, - "no_of_js_attached": 10, - "no_of_images": 6 - }, - "harvested_words": ["10gen", "web", "open", "source", "application", - "paas", "platform-as-a-service", "technology", - "helps", "developers", "focus", "building", - "mongodb", "mongo"] * 20 - } - - -def setup_insert(db, collection, object): - db.drop_collection(collection) - - -def insert(db, collection, object): - for i in range(per_trial): - to_insert = object.copy() - to_insert["x"] = i - db[collection].insert(to_insert) - - -def insert_batch(db, collection, object): - for i in range(per_trial / batch_size): - db[collection].insert([object] * batch_size) - - -def find_one(db, collection, x): - for _ in range(per_trial): - db[collection].find_one({"x": x}) - - -def find(db, collection, x): - for _ in range(per_trial): - for _ in db[collection].find({"x": x}): - pass - - -def timed(name, function, args=[], setup=None): - times = [] - for _ in range(trials): - if setup: - setup(*args) - start = time.time() - function(*args) - times.append(time.time() - start) - best_time = min(times) - print("{0:s}{1:d}".format(name + (60 - len(name)) * ".", per_trial / best_time)) - return best_time - - -def main(): - c = mongo_client.MongoClient(connectTimeoutMS=60*1000) # jack up timeout - c.drop_database("benchmark") - db = c.benchmark - - timed("insert (small, no index)", insert, - [db, 'small_none', small], setup_insert) - timed("insert (medium, no index)", insert, - [db, 'medium_none', medium], setup_insert) - timed("insert (large, no index)", insert, - [db, 'large_none', large], setup_insert) - - db.small_index.create_index("x", ASCENDING) - timed("insert (small, indexed)", insert, [db, 'small_index', small]) - db.medium_index.create_index("x", ASCENDING) - timed("insert (medium, indexed)", insert, [db, 'medium_index', medium]) - db.large_index.create_index("x", ASCENDING) - timed("insert (large, indexed)", insert, [db, 'large_index', large]) - - timed("batch insert (small, no index)", insert_batch, - [db, 'small_bulk', small], setup_insert) - timed("batch insert (medium, no index)", insert_batch, - [db, 'medium_bulk', medium], setup_insert) - timed("batch insert (large, no index)", insert_batch, - [db, 'large_bulk', large], setup_insert) - - timed("find_one (small, no index)", find_one, - [db, 'small_none', per_trial / 2]) - timed("find_one (medium, no index)", find_one, - [db, 'medium_none', per_trial / 2]) - timed("find_one (large, no index)", find_one, - [db, 'large_none', per_trial / 2]) - - timed("find_one (small, indexed)", find_one, - [db, 'small_index', per_trial / 2]) - timed("find_one (medium, indexed)", find_one, - [db, 'medium_index', per_trial / 2]) - timed("find_one (large, indexed)", find_one, - [db, 'large_index', per_trial / 2]) - - timed("find (small, no index)", find, [db, 'small_none', per_trial / 2]) - timed("find (medium, no index)", find, [db, 'medium_none', per_trial / 2]) - timed("find (large, no index)", find, [db, 'large_none', per_trial / 2]) - - timed("find (small, indexed)", find, [db, 'small_index', per_trial / 2]) - timed("find (medium, indexed)", find, [db, 'medium_index', per_trial / 2]) - timed("find (large, indexed)", find, [db, 'large_index', per_trial / 2]) - -# timed("find range (small, no index)", find, -# [db, 'small_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) -# timed("find range (medium, no index)", find, -# [db, 'medium_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) -# timed("find range (large, no index)", find, -# [db, 'large_none', -# {"$gt": per_trial / 4, "$lt": 3 * per_trial / 4}]) - - timed("find range (small, indexed)", find, - [db, 'small_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - timed("find range (medium, indexed)", find, - [db, 'medium_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - timed("find range (large, indexed)", find, - [db, 'large_index', - {"$gt": per_trial / 2, "$lt": per_trial / 2 + batch_size}]) - -if __name__ == "__main__": -# cProfile.run("main()") - main() From cf877e95c7ed231f61ccac20e9ecbb4920de3741 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 25 Jan 2021 12:46:54 -0800 Subject: [PATCH 0290/2111] PYTHON-2503 Always use time.monotonic For monotonic time needs. --- pymongo/client_session.py | 12 +++++----- pymongo/monitor.py | 14 ++++++------ pymongo/monotonic.py | 38 -------------------------------- pymongo/network.py | 8 +++---- pymongo/periodic_executor.py | 6 ++--- pymongo/pool.py | 14 ++++++------ pymongo/pyopenssl_context.py | 6 ++--- pymongo/server_description.py | 5 +++-- pymongo/topology.py | 18 +++++++-------- test/performance/perf_test.py | 10 ++++----- test/test_client.py | 5 ++--- test/test_monotonic.py | 41 ----------------------------------- test/test_session.py | 4 ++-- 13 files changed, 50 insertions(+), 131 deletions(-) delete mode 100644 pymongo/monotonic.py delete mode 100644 test/test_monotonic.py diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 13a70840db..245ba5d455 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -98,6 +98,7 @@ """ import collections +import time import uuid from collections.abc import Mapping as _Mapping @@ -107,7 +108,6 @@ from bson.son import SON from bson.timestamp import Timestamp -from pymongo import monotonic from pymongo.errors import (ConfigurationError, ConnectionFailure, InvalidOperation, @@ -336,7 +336,7 @@ def _max_time_expired_error(exc): def _within_time_limit(start_time): """Are we within the with_transaction retry limit?""" - return monotonic.time() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT class ClientSession(object): @@ -518,7 +518,7 @@ def callback(session, custom_arg, custom_kwarg=None): .. versionadded:: 3.9 """ - start_time = monotonic.time() + start_time = time.monotonic() while True: self.start_transaction( read_concern, write_concern, read_preference, @@ -797,7 +797,7 @@ def _txn_read_preference(self): def _apply_to(self, command, is_retryable, read_preference): self._check_ended() - self._server_session.last_use = monotonic.time() + self._server_session.last_use = time.monotonic() command['lsid'] = self._server_session.session_id if not self.in_transaction: @@ -842,7 +842,7 @@ class _ServerSession(object): def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)} - self.last_use = monotonic.time() + self.last_use = time.monotonic() self._transaction_id = 0 self.dirty = False self.generation = generation @@ -856,7 +856,7 @@ def mark_dirty(self): self.dirty = True def timed_out(self, session_timeout_minutes): - idle_seconds = monotonic.time() - self.last_use + idle_seconds = time.monotonic() - self.last_use # Timed out if we have less than a minute to live. return idle_seconds > (session_timeout_minutes - 1) * 60 diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 1579ec5132..7a3a4f22b6 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -16,6 +16,7 @@ import atexit import threading +import time import weakref from pymongo import common, periodic_executor @@ -23,7 +24,6 @@ OperationFailure, _OperationCancelled) from pymongo.ismaster import IsMaster -from pymongo.monotonic import time as _time from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -208,7 +208,7 @@ def _check_server(self): Returns a ServerDescription. """ - start = _time() + start = time.monotonic() try: try: return self._check_once() @@ -223,7 +223,7 @@ def _check_server(self): _sanitize(error) sd = self._server_description address = sd.address - duration = _time() - start + duration = time.monotonic() - start if self._publish: awaited = sd.is_server_type_known and sd.topology_version self._listeners.publish_server_heartbeat_failed( @@ -265,7 +265,7 @@ def _check_with_socket(self, conn): Can raise ConnectionFailure or OperationFailure. """ cluster_time = self._topology.max_cluster_time() - start = _time() + start = time.monotonic() if conn.more_to_come: # Read the next streaming isMaster (MongoDB 4.4+). response = IsMaster(conn._next_reply(), awaitable=True) @@ -280,7 +280,7 @@ def _check_with_socket(self, conn): else: # New connection handshake or polling isMaster (MongoDB <4.4). response = conn._ismaster(cluster_time, None, None, None) - return response, _time() - start + return response, time.monotonic() - start class SrvMonitor(MonitorBase): @@ -388,9 +388,9 @@ def _ping(self): with self._pool.get_socket({}) as sock_info: if self._executor._stopped: raise Exception('_RttMonitor closed') - start = _time() + start = time.monotonic() sock_info.ismaster() - return _time() - start + return time.monotonic() - start # Close monitors to cancel any in progress streaming checks before joining diff --git a/pymongo/monotonic.py b/pymongo/monotonic.py deleted file mode 100644 index 3be25b8b17..0000000000 --- a/pymongo/monotonic.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2014-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Time. Monotonic if possible. -""" - -from __future__ import absolute_import - -__all__ = ['time'] - -try: - # Patches standard time module. - # From https://pypi.python.org/pypi/Monotime. - import monotime -except ImportError: - pass - -try: - # From https://pypi.python.org/pypi/monotonic. - from monotonic import monotonic as time -except ImportError: - try: - # Monotime or Python 3. - from time import monotonic as time - except ImportError: - # Not monotonic. - from time import time diff --git a/pymongo/network.py b/pymongo/network.py index 5a5852f45d..682a51df73 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -18,6 +18,7 @@ import errno import socket import struct +import time from bson import _decode_all_selective @@ -31,7 +32,6 @@ ProtocolError, _OperationCancelled) from pymongo.message import _UNPACK_REPLY, _OpMsg -from pymongo.monotonic import time from pymongo.socket_checker import _errno_from_exception @@ -185,7 +185,7 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): """Receive a raw BSON message or raise socket.error.""" timeout = sock_info.sock.gettimeout() if timeout: - deadline = time() + timeout + deadline = time.monotonic() + timeout else: deadline = None # Ignore the response's request id. @@ -236,7 +236,7 @@ def wait_for_read(sock_info, deadline): # Wait up to 500ms for the socket to become readable and then # check for cancellation. if deadline: - timeout = max(min(deadline - time(), _POLL_TIMEOUT), 0.001) + timeout = max(min(deadline - time.monotonic(), _POLL_TIMEOUT), 0.001) else: timeout = _POLL_TIMEOUT readable = sock_info.socket_checker.select( @@ -245,7 +245,7 @@ def wait_for_read(sock_info, deadline): raise _OperationCancelled('isMaster cancelled') if readable: return - if deadline and time() > deadline: + if deadline and time.monotonic() > deadline: raise socket.timeout("timed out") def _receive_data_on_socket(sock_info, length, deadline): diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 09ff411201..e1690ee9b1 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -18,8 +18,6 @@ import time import weakref -from pymongo.monotonic import time as _time - class PeriodicExecutor(object): def __init__(self, interval, min_interval, target, name=None): @@ -135,8 +133,8 @@ def _run(self): if self._skip_sleep: self._skip_sleep = False else: - deadline = _time() + self._interval - while not self._stopped and _time() < deadline: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: time.sleep(self._min_interval) if self._event: break # Early wake. diff --git a/pymongo/pool.py b/pymongo/pool.py index 6dae7a644a..2b433875ca 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -12,6 +12,7 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. +import collections import contextlib import copy import ipaddress @@ -20,7 +21,7 @@ import socket import sys import threading -import collections +import time from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -48,7 +49,6 @@ OperationFailure, PyMongoError) from pymongo.ismaster import IsMaster -from pymongo.monotonic import time as _time from pymongo.monitoring import (ConnectionCheckOutFailedReason, ConnectionClosedReason) from pymongo.network import (command, @@ -250,7 +250,7 @@ def _raise_connection_failure(address, error, msg_prefix=None): def _cond_wait(condition, deadline): - timeout = deadline - _time() if deadline else None + timeout = deadline - time.monotonic() if deadline else None return condition.wait(timeout) @@ -502,7 +502,7 @@ def __init__(self, sock, pool, address, id): self.id = id self.authset = set() self.closed = False - self.last_checkin_time = _time() + self.last_checkin_time = time.monotonic() self.performed_handshake = False self.is_writable = False self.max_wire_version = MAX_WIRE_VERSION @@ -862,14 +862,14 @@ def add_server_api(self, command, session): _add_to_command(command, self.opts.server_api) def update_last_checkin_time(self): - self.last_checkin_time = _time() + self.last_checkin_time = time.monotonic() def update_is_writable(self, is_writable): self.is_writable = is_writable def idle_time_seconds(self): """Seconds since this socket was last checked into its pool.""" - return _time() - self.last_checkin_time + return time.monotonic() - self.last_checkin_time def _raise_connection_failure(self, error): # Catch *all* exceptions from socket methods and close the socket. In @@ -1336,7 +1336,7 @@ def _get_socket(self, all_credentials): # Get a free socket or create one. if self.opts.wait_queue_timeout: - deadline = _time() + self.opts.wait_queue_timeout + deadline = time.monotonic() + self.opts.wait_queue_timeout else: deadline = None diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 906e9db2df..3d5cb933f2 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -18,6 +18,7 @@ import socket as _socket import ssl as _stdlibssl +import time from errno import EINTR as _EINTR @@ -33,7 +34,6 @@ VerificationError as _SIVerificationError) from pymongo.errors import CertificateError as _CertificateError -from pymongo.monotonic import time as _time from pymongo.ocsp_support import ( _load_trusted_ca_certs, _ocsp_callback) @@ -98,14 +98,14 @@ def __init__(self, ctx, sock, suppress_ragged_eofs): def _call(self, call, *args, **kwargs): timeout = self.gettimeout() if timeout: - start = _time() + start = time.monotonic() while True: try: return call(*args, **kwargs) except _RETRY_ERRORS: self.socket_checker.select( self, True, True, timeout) - if timeout and _time() - start > timeout: + if timeout and time.monotonic() - start > timeout: raise _socket.timeout("timed out") continue diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 4a1fe38604..897faa3d33 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -14,10 +14,11 @@ """Represent one server the driver is connected to.""" +import time + from bson import EPOCH_NAIVE from pymongo.server_type import SERVER_TYPE from pymongo.ismaster import IsMaster -from pymongo.monotonic import time as _time class ServerDescription(object): @@ -67,7 +68,7 @@ def __init__( self._ls_timeout_minutes = ismaster.logical_session_timeout_minutes self._round_trip_time = round_trip_time self._me = ismaster.me - self._last_update_time = _time() + self._last_update_time = time.monotonic() self._error = error self._topology_version = ismaster.topology_version if error: diff --git a/pymongo/topology.py b/pymongo/topology.py index a3465597d4..194884a752 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -18,17 +18,14 @@ import queue import random import threading +import time import warnings import weakref from pymongo import (common, helpers, periodic_executor) -from pymongo.pool import PoolOptions -from pymongo.topology_description import (updated_topology_description, - _updated_topology_description_srv_polling, - TopologyDescription, - SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE) +from pymongo.client_session import _ServerSessionPool from pymongo.errors import (ConnectionFailure, ConfigurationError, NetworkTimeout, @@ -37,7 +34,7 @@ PyMongoError, ServerSelectionTimeoutError) from pymongo.monitor import SrvMonitor -from pymongo.monotonic import time as _time +from pymongo.pool import PoolOptions from pymongo.server import Server from pymongo.server_description import ServerDescription from pymongo.server_selectors import (any_server_selector, @@ -46,7 +43,10 @@ readable_server_selector, writable_server_selector, Selection) -from pymongo.client_session import _ServerSessionPool +from pymongo.topology_description import (updated_topology_description, + _updated_topology_description_srv_polling, + TopologyDescription, + SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE) def process_events_queue(queue_ref): @@ -200,7 +200,7 @@ def select_servers(self, def _select_servers_loop(self, selector, timeout, address): """select_servers() guts. Hold the lock when calling this.""" - now = _time() + now = time.monotonic() end_time = now + timeout server_descriptions = self._description.apply_selector( selector, address, custom_selector=self._settings.server_selector) @@ -221,7 +221,7 @@ def _select_servers_loop(self, selector, timeout, address): # held the lock until now. self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() - now = _time() + now = time.monotonic() server_descriptions = self._description.apply_selector( selector, address, custom_selector=self._settings.server_selector) diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 68baadecbc..4a2ba2fea5 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -18,6 +18,7 @@ import os import sys import tempfile +import time import warnings try: @@ -31,7 +32,6 @@ from bson.json_util import loads from gridfs import GridFSBucket from pymongo import MongoClient -from pymongo.monotonic import time from test import client_context, host, port, unittest NUM_ITERATIONS = 100 @@ -59,11 +59,11 @@ def tearDownModule(): class Timer(object): def __enter__(self): - self.start = time() + self.start = time.monotonic() return self def __exit__(self, *args): - self.end = time() + self.end = time.monotonic() self.interval = self.end - self.start @@ -107,10 +107,10 @@ def percentile(self, percentile): def runTest(self): results = [] - start = time() + start = time.monotonic() self.max_iterations = NUM_ITERATIONS for i in range(NUM_ITERATIONS): - if time() - start > MAX_ITERATION_TIME: + if time.monotonic() - start > MAX_ITERATION_TIME: warnings.warn('Test timed out, completed %s iterations.' % i) break self.before() diff --git a/test/test_client.py b/test/test_client.py index ad628a224f..b5add1df79 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -53,7 +53,6 @@ from pymongo.monitoring import (ServerHeartbeatListener, ServerHeartbeatStartedEvent) from pymongo.mongo_client import MongoClient -from pymongo.monotonic import time as monotonic_time from pymongo.driver_info import DriverInfo from pymongo.pool import SocketInfo, _METADATA from pymongo.read_preferences import ReadPreference @@ -1549,9 +1548,9 @@ def stall_connect(*args, **kwargs): # Assert that application operations do not block. for _ in range(10): - start = monotonic_time() + start = time.monotonic() client.admin.command('ping') - total = monotonic_time() - start + total = time.monotonic() - start # Each ping command should not take more than 2 seconds self.assertLess(total, 2) diff --git a/test/test_monotonic.py b/test/test_monotonic.py deleted file mode 100644 index 411a25abcf..0000000000 --- a/test/test_monotonic.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2018-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the monotonic module.""" - -import sys - -sys.path[0:0] = [""] - -from pymongo.monotonic import time as pymongo_time - -from test import unittest - - -class TestMonotonic(unittest.TestCase): - def test_monotonic_time(self): - try: - from monotonic import monotonic - self.assertIs(monotonic, pymongo_time) - except ImportError: - if sys.version_info[:2] >= (3, 3): - from time import monotonic - self.assertIs(monotonic, pymongo_time) - else: - from time import time - self.assertIs(time, pymongo_time) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_session.py b/test/test_session.py index 0773d8a8cb..dbd3be5a28 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -17,6 +17,7 @@ import copy import os import sys +import time from io import BytesIO @@ -27,7 +28,6 @@ from pymongo.errors import (ConfigurationError, InvalidOperation, OperationFailure) -from pymongo.monotonic import time as _time from pymongo.read_concern import ReadConcern from test import IntegrationTest, client_context, db_user, db_pwd, unittest, SkipTest from test.utils import (ignore_deprecations, @@ -108,7 +108,7 @@ def _test_ops(self, client, *ops): for f, args, kw in ops: with client.start_session() as s: last_use = s._server_session.last_use - start = _time() + start = time.monotonic() self.assertLessEqual(last_use, start) listener.results.clear() # In case "f" modifies its inputs. From 7c1060cfecd7f2daf44e377d7a78bc4ad2565664 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jan 2021 16:22:00 -0800 Subject: [PATCH 0291/2111] PYTHON-1307 Remove SONManipulator APIs (#557) --- doc/api/pymongo/collection.rst | 4 +- doc/api/pymongo/cursor.rst | 4 +- doc/api/pymongo/index.rst | 1 - doc/api/pymongo/son_manipulator.rst | 6 - doc/changelog.rst | 25 +++- doc/migrate-to-pymongo4.rst | 32 +++++ pymongo/bulk.py | 6 +- pymongo/collection.py | 20 +-- pymongo/command_cursor.py | 3 +- pymongo/cursor.py | 19 +-- pymongo/database.py | 129 ------------------- pymongo/son_manipulator.py | 192 ---------------------------- test/test_cursor.py | 8 +- test/test_legacy_api.py | 23 +--- test/test_son_manipulator.py | 124 ------------------ 15 files changed, 72 insertions(+), 524 deletions(-) delete mode 100644 doc/api/pymongo/son_manipulator.rst delete mode 100644 pymongo/son_manipulator.py delete mode 100644 test/test_son_manipulator.py diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index ada01352e9..55712c45be 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -47,8 +47,8 @@ .. automethod:: aggregate .. automethod:: aggregate_raw_batches .. automethod:: watch - .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None) - .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) + .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) .. automethod:: find_one(filter=None, *args, **kwargs) .. automethod:: find_one_and_delete .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) diff --git a/doc/api/pymongo/cursor.rst b/doc/api/pymongo/cursor.rst index 4b9943f9a2..ffede12375 100644 --- a/doc/api/pymongo/cursor.rst +++ b/doc/api/pymongo/cursor.rst @@ -15,7 +15,7 @@ .. autoattribute:: EXHAUST :annotation: - .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) + .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) :members: .. describe:: c[index] @@ -24,4 +24,4 @@ .. automethod:: __getitem__ - .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None) + .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 99688e841a..1a54a5b42d 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -47,7 +47,6 @@ Sub-modules: read_concern read_preferences results - son_manipulator server_api uri_parser write_concern diff --git a/doc/api/pymongo/son_manipulator.rst b/doc/api/pymongo/son_manipulator.rst deleted file mode 100644 index 87503e6f83..0000000000 --- a/doc/api/pymongo/son_manipulator.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`son_manipulator` -- Manipulators that can edit SON documents as they are saved or retrieved -================================================================================================= - -.. automodule:: pymongo.son_manipulator - :synopsis: Manipulators that can edit SON documents as they are saved or retrieved - :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index db50129f31..f900e8fcba 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -30,11 +30,11 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :meth:`pymongo.collection.Collection.ensure_index`. - Removed :meth:`pymongo.collection.Collection.reindex`. -- Removed :meth:`pymongo.collection.Collection.save` -- Removed :meth:`pymongo.collection.Collection.insert` -- Removed :meth:`pymongo.collection.Collection.update` -- Removed :meth:`pymongo.collection.Collection.remove` -- Removed :meth:`pymongo.collection.Collection.find_and_modify` +- Removed :meth:`pymongo.collection.Collection.save`. +- Removed :meth:`pymongo.collection.Collection.insert`. +- Removed :meth:`pymongo.collection.Collection.update`. +- Removed :meth:`pymongo.collection.Collection.remove`. +- Removed :meth:`pymongo.collection.Collection.find_and_modify`. - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`pymongo.cursor.Cursor.close` instead. - Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. @@ -43,6 +43,21 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. - Removed :mod:`pymongo.thread_util`. - Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +- Removed :mod:`pymongo.son_manipulator`, + :class:`pymongo.son_manipulator.SONManipulator`, + :class:`pymongo.son_manipulator.ObjectIdInjector`, + :class:`pymongo.son_manipulator.ObjectIdShuffler`, + :class:`pymongo.son_manipulator.AutoReference`, + :class:`pymongo.son_manipulator.NamespaceInjector`, + :meth:`pymongo.database.Database.add_son_manipulator`, + :attr:`pymongo.database.Database.outgoing_copying_manipulators`, + :attr:`pymongo.database.Database.outgoing_manipulators`, + :attr:`pymongo.database.Database.incoming_copying_manipulators`, and + :attr:`pymongo.database.Database.incoming_manipulators`. +- Removed the ``manipulate`` parameter from + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, and + :meth:`~pymongo.cursor.Cursor`. Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index cf05114ca8..9840bcb46c 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -274,6 +274,38 @@ can be changed to this:: .. _reIndex command: https://docs.mongodb.com/manual/reference/command/reIndex/ +SONManipulator is removed +------------------------- + +Removed :mod:`pymongo.son_manipulator`, +:class:`pymongo.son_manipulator.SONManipulator`, +:class:`pymongo.son_manipulator.ObjectIdInjector`, +:class:`pymongo.son_manipulator.ObjectIdShuffler`, +:class:`pymongo.son_manipulator.AutoReference`, +:class:`pymongo.son_manipulator.NamespaceInjector`, +:meth:`pymongo.database.Database.add_son_manipulator`, +:attr:`pymongo.database.Database.outgoing_copying_manipulators`, +:attr:`pymongo.database.Database.outgoing_manipulators`, +:attr:`pymongo.database.Database.incoming_copying_manipulators`, and +:attr:`pymongo.database.Database.incoming_manipulators`. + +Removed the ``manipulate`` parameter from +:meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, and +:meth:`~pymongo.cursor.Cursor`. + +The :class:`pymongo.son_manipulator.SONManipulator` API has limitations as a +technique for transforming your data and was deprecated in PyMongo 3.0. +Instead, it is more flexible and straightforward to transform outgoing +documents in your own code before passing them to PyMongo, and transform +incoming documents after receiving them from PyMongo. + +Alternatively, if your application uses the ``SONManipulator`` API to convert +custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and +:class:`~bson.codec_options.TypeRegistry` APIs may be a suitable alternative. +For more information, see the +:doc:`custom type example `. + Removed features with no migration path --------------------------------------- diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 573a4b7ee2..751feaf253 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -478,9 +478,9 @@ def execute_no_results(self, sock_info, generator): sock_info, operation['q'], doc, - operation['upsert'], - check_keys, - operation['multi'], + upsert=operation['upsert'], + check_keys=check_keys, + multi=operation['multi'], write_concern=write_concern, op_id=op_id, ordered=self.ordered, diff --git a/pymongo/collection.py b/pymongo/collection.py index d600f27079..32415b7a02 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -554,15 +554,9 @@ def _legacy_write(self, sock_info, name, cmd, op_id, def _insert_one( self, doc, ordered, - check_keys, manipulate, write_concern, op_id, bypass_doc_val, + check_keys, write_concern, op_id, bypass_doc_val, session): """Internal helper for inserting a single document.""" - if manipulate: - doc = self.__database._apply_incoming_manipulators(doc, self) - if not isinstance(doc, RawBSONDocument) and '_id' not in doc: - doc['_id'] = ObjectId() - doc = self.__database._apply_incoming_copying_manipulators(doc, - self) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged command = SON([('insert', self.name), @@ -646,7 +640,7 @@ def insert_one(self, document, bypass_document_validation=False, write_concern = self._write_concern_for(session) return InsertOneResult( self._insert_one( - document, ordered=True, check_keys=True, manipulate=False, + document, ordered=True, check_keys=True, write_concern=write_concern, op_id=None, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) @@ -712,14 +706,12 @@ def gen(): return InsertManyResult(inserted_ids, write_concern.acknowledged) def _update(self, sock_info, criteria, document, upsert=False, - check_keys=True, multi=False, manipulate=False, + check_keys=True, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None, retryable_write=False): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) - if manipulate: - document = self.__database._fix_incoming(document, self) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged @@ -801,7 +793,7 @@ def _update(self, sock_info, criteria, document, upsert=False, def _update_retryable( self, criteria, document, upsert=False, - check_keys=True, multi=False, manipulate=False, + check_keys=True, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None): @@ -809,7 +801,7 @@ def _update_retryable( def _update(session, sock_info, retryable_write): return self._update( sock_info, criteria, document, upsert=upsert, - check_keys=check_keys, multi=multi, manipulate=manipulate, + check_keys=check_keys, multi=multi, write_concern=write_concern, op_id=op_id, ordered=ordered, bypass_doc_val=bypass_doc_val, collation=collation, array_filters=array_filters, hint=hint, session=session, @@ -1346,8 +1338,6 @@ def find(self, *args, **kwargs): oplogReplay query flag. Default: False. - `batch_size` (optional): Limits the number of documents returned in a single batch. - - `manipulate` (optional): **DEPRECATED** - If True, apply any - outgoing SON manipulators before returning. Default: True. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 4c82f754ad..afcb764c7a 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -268,8 +268,7 @@ def _try_next(self, get_more_allowed): if not len(self.__data) and not self.__killed and get_more_allowed: self._refresh() if len(self.__data): - coll = self.__collection - return coll.database._fix_outgoing(self.__data.popleft(), coll) + return self.__data.popleft() else: return None diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 7ada005d8b..b051b068af 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -108,7 +108,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, - modifiers=None, batch_size=0, manipulate=True, + modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, @@ -181,7 +181,6 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__max_await_time_ms = None self.__max = max self.__min = min - self.__manipulate = manipulate self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id @@ -281,7 +280,7 @@ def _clone(self, deepcopy=True, base=None): values_to_clone = ("spec", "projection", "skip", "limit", "max_time_ms", "max_await_time_ms", "comment", "max", "min", "ordering", "explain", "hint", - "batch_size", "max_scan", "manipulate", + "batch_size", "max_scan", "query_flags", "modifiers", "collation", "empty", "show_record_id", "return_key", "allow_disk_use", "snapshot", "exhaust") @@ -1198,12 +1197,7 @@ def next(self): if self.__empty: raise StopIteration if len(self.__data) or self._refresh(): - if self.__manipulate: - _db = self.__collection.database - return _db._fix_outgoing(self.__data.popleft(), - self.__collection) - else: - return self.__data.popleft() + return self.__data.popleft() else: raise StopIteration @@ -1277,15 +1271,8 @@ def __init__(self, *args, **kwargs): .. mongodoc:: cursors """ - manipulate = kwargs.get('manipulate') - kwargs['manipulate'] = False super(RawBatchCursor, self).__init__(*args, **kwargs) - # Throw only after cursor's initialized, to prevent errors in __del__. - if manipulate: - raise InvalidOperation( - "Cannot use RawBatchCursor with manipulate=True") - def _unpack_response(self, response, cursor_id, codec_options, user_fields=None, legacy_response=False): return response.raw_response(cursor_id) diff --git a/pymongo/database.py b/pymongo/database.py index 73fcab760f..f965a7b532 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -30,7 +30,6 @@ OperationFailure) from pymongo.message import _first_batch from pymongo.read_preferences import ReadPreference -from pymongo.son_manipulator import SONManipulator _INDEX_REGEX = {"name": {"$regex": r"^(?!.*\$)"}} @@ -108,38 +107,6 @@ def __init__(self, client, name, codec_options=None, read_preference=None, self.__name = name self.__client = client - self.__incoming_manipulators = [] - self.__incoming_copying_manipulators = [] - self.__outgoing_manipulators = [] - self.__outgoing_copying_manipulators = [] - - def add_son_manipulator(self, manipulator): - """Add a new son manipulator to this database. - - **DEPRECATED** - `add_son_manipulator` is deprecated. - - .. versionchanged:: 3.0 - Deprecated add_son_manipulator. - """ - warnings.warn("add_son_manipulator is deprecated", - DeprecationWarning, stacklevel=2) - base = SONManipulator() - def method_overwritten(instance, method): - """Test if this method has been overridden.""" - return (getattr( - instance, method).__func__ != getattr(base, method).__func__) - - if manipulator.will_copy(): - if method_overwritten(manipulator, "transform_incoming"): - self.__incoming_copying_manipulators.insert(0, manipulator) - if method_overwritten(manipulator, "transform_outgoing"): - self.__outgoing_copying_manipulators.insert(0, manipulator) - else: - if method_overwritten(manipulator, "transform_incoming"): - self.__incoming_manipulators.insert(0, manipulator) - if method_overwritten(manipulator, "transform_outgoing"): - self.__outgoing_manipulators.insert(0, manipulator) - @property def client(self): """The client instance for this :class:`Database`.""" @@ -150,66 +117,6 @@ def name(self): """The name of this :class:`Database`.""" return self.__name - @property - def incoming_manipulators(self): - """**DEPRECATED**: All incoming SON manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.incoming_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__incoming_manipulators] - - @property - def incoming_copying_manipulators(self): - """**DEPRECATED**: All incoming SON copying manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.incoming_copying_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__incoming_copying_manipulators] - - @property - def outgoing_manipulators(self): - """**DEPRECATED**: All outgoing SON manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.outgoing_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__outgoing_manipulators] - - @property - def outgoing_copying_manipulators(self): - """**DEPRECATED**: All outgoing SON copying manipulators. - - .. versionchanged:: 3.5 - Deprecated. - - .. versionadded:: 2.0 - """ - warnings.warn("Database.outgoing_copying_manipulators() is deprecated", - DeprecationWarning, stacklevel=2) - - return [manipulator.__class__.__name__ - for manipulator in self.__outgoing_copying_manipulators] - def with_options(self, codec_options=None, read_preference=None, write_concern=None, read_concern=None): """Get a clone of this database changing the specified settings. @@ -407,42 +314,6 @@ def create_collection(self, name, codec_options=None, read_preference, write_concern, read_concern, session=s, **kwargs) - def _apply_incoming_manipulators(self, son, collection): - """Apply incoming manipulators to `son`.""" - for manipulator in self.__incoming_manipulators: - son = manipulator.transform_incoming(son, collection) - return son - - def _apply_incoming_copying_manipulators(self, son, collection): - """Apply incoming copying manipulators to `son`.""" - for manipulator in self.__incoming_copying_manipulators: - son = manipulator.transform_incoming(son, collection) - return son - - def _fix_incoming(self, son, collection): - """Apply manipulators to an incoming SON object before it gets stored. - - :Parameters: - - `son`: the son object going into the database - - `collection`: the collection the son object is being saved in - """ - son = self._apply_incoming_manipulators(son, collection) - son = self._apply_incoming_copying_manipulators(son, collection) - return son - - def _fix_outgoing(self, son, collection): - """Apply manipulators to a SON object as it comes out of the database. - - :Parameters: - - `son`: the son object coming out of the database - - `collection`: the collection the son object was saved in - """ - for manipulator in reversed(self.__outgoing_manipulators): - son = manipulator.transform_outgoing(son, collection) - for manipulator in reversed(self.__outgoing_copying_manipulators): - son = manipulator.transform_outgoing(son, collection) - return son - def aggregate(self, pipeline, session=None, **kwargs): """Perform a database-level aggregation. diff --git a/pymongo/son_manipulator.py b/pymongo/son_manipulator.py deleted file mode 100644 index a371d64938..0000000000 --- a/pymongo/son_manipulator.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""**DEPRECATED**: Manipulators that can edit SON objects as they enter and exit -a database. - -The :class:`~pymongo.son_manipulator.SONManipulator` API has limitations as a -technique for transforming your data. Instead, it is more flexible and -straightforward to transform outgoing documents in your own code before passing -them to PyMongo, and transform incoming documents after receiving them from -PyMongo. SON Manipulators will be removed from PyMongo in 4.0. - -PyMongo does **not** apply SON manipulators to documents passed to -the modern methods :meth:`~pymongo.collection.Collection.bulk_write`, -:meth:`~pymongo.collection.Collection.insert_one`, -:meth:`~pymongo.collection.Collection.insert_many`, -:meth:`~pymongo.collection.Collection.update_one`, or -:meth:`~pymongo.collection.Collection.update_many`. SON manipulators are -**not** applied to documents returned by the modern methods -:meth:`~pymongo.collection.Collection.find_one_and_delete`, -:meth:`~pymongo.collection.Collection.find_one_and_replace`, and -:meth:`~pymongo.collection.Collection.find_one_and_update`. -""" - -from collections import abc - -from bson.dbref import DBRef -from bson.objectid import ObjectId -from bson.son import SON - - -class SONManipulator(object): - """A base son manipulator. - - This manipulator just saves and restores objects without changing them. - """ - - def will_copy(self): - """Will this SON manipulator make a copy of the incoming document? - - Derived classes that do need to make a copy should override this - method, returning True instead of False. All non-copying manipulators - will be applied first (so that the user's document will be updated - appropriately), followed by copying manipulators. - """ - return False - - def transform_incoming(self, son, collection): - """Manipulate an incoming SON object. - - :Parameters: - - `son`: the SON object to be inserted into the database - - `collection`: the collection the object is being inserted into - """ - if self.will_copy(): - return SON(son) - return son - - def transform_outgoing(self, son, collection): - """Manipulate an outgoing SON object. - - :Parameters: - - `son`: the SON object being retrieved from the database - - `collection`: the collection this object was stored in - """ - if self.will_copy(): - return SON(son) - return son - - -class ObjectIdInjector(SONManipulator): - """A son manipulator that adds the _id field if it is missing. - - .. versionchanged:: 2.7 - ObjectIdInjector is no longer used by PyMongo, but remains in this - module for backwards compatibility. - """ - - def transform_incoming(self, son, collection): - """Add an _id field if it is missing. - """ - if not "_id" in son: - son["_id"] = ObjectId() - return son - - -# This is now handled during BSON encoding (for performance reasons), -# but I'm keeping this here as a reference for those implementing new -# SONManipulators. -class ObjectIdShuffler(SONManipulator): - """A son manipulator that moves _id to the first position. - """ - - def will_copy(self): - """We need to copy to be sure that we are dealing with SON, not a dict. - """ - return True - - def transform_incoming(self, son, collection): - """Move _id to the front if it's there. - """ - if not "_id" in son: - return son - transformed = SON({"_id": son["_id"]}) - transformed.update(son) - return transformed - - -class NamespaceInjector(SONManipulator): - """A son manipulator that adds the _ns field. - """ - - def transform_incoming(self, son, collection): - """Add the _ns field to the incoming object - """ - son["_ns"] = collection.name - return son - - -class AutoReference(SONManipulator): - """Transparently reference and de-reference already saved embedded objects. - - This manipulator should probably only be used when the NamespaceInjector is - also being used, otherwise it doesn't make too much sense - documents can - only be auto-referenced if they have an *_ns* field. - - NOTE: this will behave poorly if you have a circular reference. - - TODO: this only works for documents that are in the same database. To fix - this we'll need to add a DatabaseInjector that adds *_db* and then make - use of the optional *database* support for DBRefs. - """ - - def __init__(self, db): - self.database = db - - def will_copy(self): - """We need to copy so the user's document doesn't get transformed refs. - """ - return True - - def transform_incoming(self, son, collection): - """Replace embedded documents with DBRefs. - """ - - def transform_value(value): - if isinstance(value, abc.MutableMapping): - if "_id" in value and "_ns" in value: - return DBRef(value["_ns"], transform_value(value["_id"])) - else: - return transform_dict(SON(value)) - elif isinstance(value, list): - return [transform_value(v) for v in value] - return value - - def transform_dict(object): - for (key, value) in object.items(): - object[key] = transform_value(value) - return object - - return transform_dict(SON(son)) - - def transform_outgoing(self, son, collection): - """Replace DBRefs with embedded documents. - """ - - def transform_value(value): - if isinstance(value, DBRef): - return self.database.dereference(value) - elif isinstance(value, list): - return [transform_value(v) for v in value] - elif isinstance(value, abc.MutableMapping): - return transform_dict(SON(value)) - return value - - def transform_dict(object): - for (key, value) in object.items(): - object[key] = transform_value(value) - return object - - return transform_dict(SON(son)) diff --git a/test/test_cursor.py b/test/test_cursor.py index f7e726398f..582d0641e5 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -908,7 +908,7 @@ def test_rewind(self): self.assertEqual(cursor, cursor.rewind()) - # manipulate, oplog_reply, and snapshot are all deprecated. + # oplog_reply, and snapshot are all deprecated. @ignore_deprecations def test_clone(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -956,7 +956,6 @@ def test_clone(self): allow_partial_results=True, oplog_replay=True, batch_size=123, - manipulate=False, collation={'locale': 'en_US'}, hint=[("_id", 1)], max_scan=100, @@ -1466,11 +1465,6 @@ def test_find_raw(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - def test_manipulate(self): - c = self.db.test - with self.assertRaises(InvalidOperation): - c.find_raw_batches(manipulate=True) - def test_explain(self): c = self.db.test c.insert_one({}) diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index b4e65d1280..30dda8754a 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -14,10 +14,7 @@ """Test various legacy / deprecated API features.""" -import itertools import sys -import threading -import time import uuid sys.path[0:0] = [""] @@ -25,25 +22,16 @@ from bson.binary import PYTHON_LEGACY, STANDARD from bson.code import Code from bson.codec_options import CodecOptions -from bson.objectid import ObjectId from bson.son import SON -from pymongo import ASCENDING, DESCENDING, GEOHAYSTACK +from pymongo import ASCENDING, GEOHAYSTACK from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, ConfigurationError, - DuplicateKeyError, InvalidDocument, InvalidOperation, - OperationFailure, - WriteConcernError, - WTimeoutError) + OperationFailure) from pymongo.operations import IndexModel -from pymongo.son_manipulator import (AutoReference, - NamespaceInjector, - ObjectIdShuffler, - SONManipulator) -from pymongo.write_concern import WriteConcern -from test import client_context, qcheck, unittest, SkipTest +from test import client_context, unittest, SkipTest from test.test_client import IntegrationTest from test.test_bulk import BulkTestBase, BulkAuthorizationTestBase from test.utils import (DeprecationFilter, @@ -64,11 +52,6 @@ def setUpClass(cls): def tearDownClass(cls): cls.deprecation_filter.stop() - def test_add_son_manipulator_deprecation(self): - db = self.client.pymongo_test - self.assertRaises(DeprecationWarning, - lambda: db.add_son_manipulator(AutoReference(db))) - def test_geoHaystack_deprecation(self): self.addCleanup(self.db.test.drop) keys = [("pos", GEOHAYSTACK), ("type", ASCENDING)] diff --git a/test/test_son_manipulator.py b/test/test_son_manipulator.py deleted file mode 100644 index b4b9544f5a..0000000000 --- a/test/test_son_manipulator.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for SONManipulators. -""" - -import sys -import warnings - -sys.path[0:0] = [""] - -from bson.son import SON -from pymongo import MongoClient -from pymongo.son_manipulator import (NamespaceInjector, - ObjectIdInjector, - ObjectIdShuffler, - SONManipulator) -from test import client_context, qcheck, unittest - - -class TestSONManipulator(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - client = MongoClient( - client_context.host, client_context.port, connect=False) - cls.db = client.pymongo_test - - @classmethod - def tearDownClass(cls): - cls.warn_context.__exit__() - cls.warn_context = None - - def test_basic(self): - manip = SONManipulator() - collection = self.db.test - - def incoming_is_identity(son): - return son == manip.transform_incoming(son, collection) - qcheck.check_unittest(self, incoming_is_identity, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_id_injection(self): - manip = ObjectIdInjector() - collection = self.db.test - - def incoming_adds_id(son): - son = manip.transform_incoming(son, collection) - assert "_id" in son - return True - qcheck.check_unittest(self, incoming_adds_id, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_id_shuffling(self): - manip = ObjectIdShuffler() - collection = self.db.test - - def incoming_moves_id(son_in): - son = manip.transform_incoming(son_in, collection) - if not "_id" in son: - return True - for (k, v) in son.items(): - self.assertEqual(k, "_id") - break - # Key order matters in SON equality test, - # matching collections.OrderedDict - if isinstance(son_in, SON): - return son_in.to_dict() == son.to_dict() - return son_in == son - - self.assertTrue(incoming_moves_id({})) - self.assertTrue(incoming_moves_id({"_id": 12})) - self.assertTrue(incoming_moves_id({"hello": "world", "_id": 12})) - self.assertTrue(incoming_moves_id(SON([("hello", "world"), - ("_id", 12)]))) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - - def test_ns_injection(self): - manip = NamespaceInjector() - collection = self.db.test - - def incoming_adds_ns(son): - son = manip.transform_incoming(son, collection) - assert "_ns" in son - return son["_ns"] == collection.name - qcheck.check_unittest(self, incoming_adds_ns, - qcheck.gen_mongo_dict(3)) - - def outgoing_is_identity(son): - return son == manip.transform_outgoing(son, collection) - qcheck.check_unittest(self, outgoing_is_identity, - qcheck.gen_mongo_dict(3)) - -if __name__ == "__main__": - unittest.main() From ac4bacb66c66d9ab9d52671c467f68c057af767d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jan 2021 17:07:48 -0800 Subject: [PATCH 0292/2111] PYTHON-1323 Removed Collection.group (#559) --- doc/api/pymongo/collection.rst | 1 - doc/changelog.rst | 1 + doc/migrate-to-pymongo4.rst | 10 +++ pymongo/collection.py | 40 ----------- test/test_collation.py | 8 --- test/test_custom_types.py | 13 ---- test/test_legacy_api.py | 118 --------------------------------- test/test_read_preferences.py | 9 +-- 8 files changed, 12 insertions(+), 188 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 55712c45be..240318c0e9 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -69,5 +69,4 @@ .. automethod:: inline_map_reduce .. automethod:: initialize_unordered_bulk_op .. automethod:: initialize_ordered_bulk_op - .. automethod:: group .. automethod:: count diff --git a/doc/changelog.rst b/doc/changelog.rst index f900e8fcba..c184dcb0d2 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -35,6 +35,7 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.collection.Collection.update`. - Removed :meth:`pymongo.collection.Collection.remove`. - Removed :meth:`pymongo.collection.Collection.find_and_modify`. +- Removed :meth:`pymongo.collection.Collection.group`. - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`pymongo.cursor.Cursor.close` instead. - Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 9840bcb46c..b2b883a9df 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -237,6 +237,16 @@ Can be changed to this:: replaced_doc = collection.find_one_and_replace({'b': 1}, {'c': 1}) deleted_doc = collection.find_one_and_delete({'c': 1}) +Collection.group is removed +........................... + +Removed :meth:`pymongo.collection.Collection.group`. This method was +deprecated in PyMongo 3.5. MongoDB 4.2 removed the `group command`_. +Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage +instead. + +.. _group command: https://docs.mongodb.com/manual/reference/command/group/ + Collection.ensure_index is removed .................................. diff --git a/pymongo/collection.py b/pymongo/collection.py index 32415b7a02..4b69e5be1e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2366,46 +2366,6 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - def group(self, key, condition, initial, reduce, finalize=None, **kwargs): - """Perform a query similar to an SQL *group by* operation. - - **DEPRECATED** - The group command was deprecated in MongoDB 3.4. The - :meth:`~group` method is deprecated and will be removed in PyMongo 4.0. - Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce` - instead. - - .. versionchanged:: 3.5 - Deprecated the group method. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 2.2 - Removed deprecated argument: command - """ - warnings.warn("The group method is deprecated and will be removed in " - "PyMongo 4.0. Use the aggregate method with the $group " - "stage or the map_reduce method instead.", - DeprecationWarning, stacklevel=2) - group = {} - if isinstance(key, str): - group["$keyf"] = Code(key) - elif key is not None: - group = {"key": helpers._fields_list_to_dict(key, "key")} - group["ns"] = self.__name - group["$reduce"] = Code(reduce) - group["cond"] = condition - group["initial"] = initial - if finalize is not None: - group["finalize"] = Code(finalize) - - cmd = SON([("group", group)]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd.update(kwargs) - - with self._socket_for_reads(session=None) as (sock_info, slave_ok): - return self._command(sock_info, cmd, slave_ok, - collation=collation, - user_fields={'retval': 1})["retval"] - def rename(self, new_name, session=None, **kwargs): """Rename this collection. diff --git a/test/test_collation.py b/test/test_collation.py index c6d9baa466..4954af7bdd 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -188,14 +188,6 @@ def test_explain_command(self): self.collation.document, self.last_command_started()['explain']['collation']) - @raisesConfigurationErrorForOldMongoDB - @client_context.require_version_max(4, 1, 0, -1) - def test_group(self): - self.db.test.group('foo', {'foo': {'$gt': 42}}, {}, - 'function(a, b) { return a; }', - collation=self.collation) - self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_map_reduce(self): self.db.test.map_reduce('function() {}', 'function() {}', 'output', diff --git a/test/test_custom_types.py b/test/test_custom_types.py index c74da24795..89f530c951 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -588,19 +588,6 @@ def run_test(doc_cls): for doc_cls in [RawBSONDocument, OrderedDict]: run_test(doc_cls) - @client_context.require_version_max(4, 1, 0, -1) - def test_group_w_custom_type(self): - db = self.db - test = db.get_collection('test', codec_options=UNINT_CODECOPTS) - test.insert_many([ - {'sku': 'a', 'qty': UndecipherableInt64Type(2)}, - {'sku': 'b', 'qty': UndecipherableInt64Type(5)}, - {'sku': 'a', 'qty': UndecipherableInt64Type(1)}]) - - self.assertEqual([{'sku': 'b', 'qty': UndecipherableInt64Type(5)},], - test.group(["sku", "qty"], {"sku": "b"}, {}, - "function (obj, prev) { }")) - def test_aggregate_w_custom_type_decoder(self): db = self.db db.test.insert_many([ diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 30dda8754a..333aaeb89a 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -74,124 +74,6 @@ def setUpClass(cls): def tearDownClass(cls): cls.deprecation_filter.stop() - @client_context.require_version_max(4, 1, 0, -1) - def test_group(self): - db = self.db - db.drop_collection("test") - - self.assertEqual([], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - db.test.insert_many([{"a": 2}, {"b": 5}, {"a": 1}]) - - self.assertEqual([{"count": 3}], - db.test.group([], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - self.assertEqual([{"count": 1}], - db.test.group([], {"a": {"$gt": 1}}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - db.test.insert_one({"a": 2, "b": 3}) - - self.assertEqual([{"a": 2, "count": 2}, - {"a": None, "count": 1}, - {"a": 1, "count": 1}], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - # modifying finalize - self.assertEqual([{"a": 2, "count": 3}, - {"a": None, "count": 2}, - {"a": 1, "count": 2}], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { obj.count++; }")) - - # returning finalize - self.assertEqual([2, 1, 1], - db.test.group(["a"], {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { return obj.count; }")) - - # keyf - self.assertEqual([2, 2], - db.test.group("function (obj) { if (obj.a == 2) " - "{ return {a: true} }; " - "return {b: true}; }", {}, {"count": 0}, - "function (obj, prev) " - "{ prev.count++; }", - "function (obj) { return obj.count; }")) - - # no key - self.assertEqual([{"count": 4}], - db.test.group(None, {}, {"count": 0}, - "function (obj, prev) { prev.count++; }" - )) - - self.assertRaises(OperationFailure, db.test.group, - [], {}, {}, "5 ++ 5") - - @client_context.require_version_max(4, 1, 0, -1) - def test_group_with_scope(self): - db = self.db - db.drop_collection("test") - db.test.insert_many([{"a": 1}, {"b": 1}]) - - reduce_function = "function (obj, prev) { prev.count += inc_value; }" - - self.assertEqual(2, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 1}))[0]['count']) - self.assertEqual(4, db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 2}))[0]['count']) - - self.assertEqual(1, - db.test.group([], {}, {"count": 0}, - Code(reduce_function, - {"inc_value": 0.5}))[0]['count']) - - self.assertEqual(2, db.test.group( - [], {}, {"count": 0}, - Code(reduce_function, {"inc_value": 1}))[0]['count']) - - self.assertEqual(4, db.test.group( - [], {}, {"count": 0}, - Code(reduce_function, {"inc_value": 2}))[0]['count']) - - self.assertEqual(1, db.test.group( - [], {}, {"count": 0}, - Code(reduce_function, {"inc_value": 0.5}))[0]['count']) - - @client_context.require_version_max(4, 1, 0, -1) - def test_group_uuid_representation(self): - db = self.db - coll = db.uuid - coll.drop() - uu = uuid.uuid4() - coll.insert_one({"_id": uu, "a": 2}) - coll.insert_one({"_id": uuid.uuid4(), "a": 1}) - - reduce = "function (obj, prev) { prev.count++; }" - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - self.assertEqual([], - coll.group([], {"_id": uu}, - {"count": 0}, reduce)) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual([{"count": 1}], - coll.group([], {"_id": uu}, - {"count": 0}, reduce)) - class TestLegacyBulk(BulkTestBase): diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 2bb1c57f29..9515099a04 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -360,7 +360,7 @@ def setUpClass(cls): if client_context.auth_enabled: cls.c.admin.authenticate(db_user, db_pwd) cls.client_version = Version.from_client(cls.c) - # mapReduce and group fail with no collection + # mapReduce fails if the collection does not exist. coll = cls.c.pymongo_test.get_collection( 'test', write_concern=WriteConcern(w=client_context.w)) coll.insert_one({}) @@ -431,13 +431,6 @@ def test_create_collection(self): lambda: self.c.pymongo_test.create_collection( 'some_collection%s' % random.randint(0, sys.maxsize))) - @client_context.require_version_max(4, 1, 0, -1) - def test_group(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - self._test_coll_helper(True, self.c.pymongo_test.test, 'group', - {'a': 1}, {}, {}, 'function() { }') - def test_map_reduce(self): self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce', 'function() { }', 'function() { }', From ab35e0df7f5dc3e6e87a863f6a7dfd426a42997a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 26 Jan 2021 10:46:35 -0800 Subject: [PATCH 0293/2111] PYTHON-1326 Remove the "useCursor" aggregate option (#560) --- doc/changelog.rst | 2 ++ doc/migrate-to-pymongo4.rst | 8 ++++++++ pymongo/aggregation.py | 11 +---------- pymongo/collection.py | 19 ++++--------------- test/test_collection.py | 16 ++-------------- 5 files changed, 17 insertions(+), 39 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c184dcb0d2..45ad3b085c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -36,6 +36,8 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.collection.Collection.remove`. - Removed :meth:`pymongo.collection.Collection.find_and_modify`. - Removed :meth:`pymongo.collection.Collection.group`. +- Removed the ``useCursor`` option for + :meth:`~pymongo.collection.Collection.aggregate`. - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`pymongo.cursor.Cursor.close` instead. - Removed :meth:`pymongo.mongo_client.MongoClient.kill_cursors`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index b2b883a9df..1cdf0e59d1 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -140,6 +140,14 @@ can be changed to this:: Collection ---------- +The useCursor option for Collection.aggregate is removed +........................................................ + +Removed the ``useCursor`` option for +:meth:`~pymongo.collection.Collection.aggregate` which was deprecated in +PyMongo 3.6. The option was only necessary when upgrading from MongoDB 2.4 +to MongoDB 2.6. + Collection.insert is removed ............................ diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 7383a35111..438a3421bc 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -169,15 +169,6 @@ def get_cursor(self, session, server, sock_info, slave_ok): class _CollectionAggregationCommand(_AggregationCommand): - def __init__(self, *args, **kwargs): - # Pop additional option and initialize parent class. - use_cursor = kwargs.pop("use_cursor", True) - super(_CollectionAggregationCommand, self).__init__(*args, **kwargs) - - # Remove the cursor document if the user has set use_cursor to False. - self._use_cursor = use_cursor - if not self._use_cursor: - self._options.pop("cursor", None) @property def _aggregation_target(self): @@ -201,7 +192,7 @@ def __init__(self, *args, **kwargs): super(_CollectionRawAggregationCommand, self).__init__(*args, **kwargs) # For raw-batches, we set the initial batchSize for the cursor to 0. - if self._use_cursor and not self._performs_write: + if not self._performs_write: self._options["cursor"]["batchSize"] = 0 diff --git a/pymongo/collection.py b/pymongo/collection.py index 4b69e5be1e..bf6d8beec3 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2124,19 +2124,9 @@ def options(self, session=None): def _aggregate(self, aggregation_command, pipeline, cursor_class, session, explicit_session, **kwargs): - # Remove things that are not command options. - use_cursor = True - if "useCursor" in kwargs: - warnings.warn( - "The useCursor option is deprecated " - "and will be removed in PyMongo 4.0", - DeprecationWarning, stacklevel=2) - use_cursor = common.validate_boolean( - "useCursor", kwargs.pop("useCursor", True)) - cmd = aggregation_command( self, cursor_class, pipeline, kwargs, explicit_session, - user_fields={'cursor': {'firstBatch': 1}}, use_cursor=use_cursor) + user_fields={'cursor': {'firstBatch': 1}}) return self.__database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), session, retryable=not cmd._performs_write) @@ -2155,13 +2145,10 @@ def aggregate(self, pipeline, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow the operation to run in milliseconds. - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor, or `useCursor` is - ``False``. + batch. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. - - `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0. The :meth:`aggregate` method obeys the :attr:`read_preference` of this :class:`Collection`, except when ``$out`` or ``$merge`` are used, in @@ -2186,6 +2173,8 @@ def aggregate(self, pipeline, session=None, **kwargs): A :class:`~pymongo.command_cursor.CommandCursor` over the result set. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. .. versionchanged:: 3.9 Apply this collection's read concern to pipelines containing the `$out` stage when connected to MongoDB >= 4.2. diff --git a/test/test_collection.py b/test/test_collection.py index 0ac4aed2a7..fe4bf28ad6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1615,12 +1615,7 @@ def test_aggregate(self): self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} - # MongoDB 3.5.1+ requires either the 'cursor' or 'explain' options. - if client_context.version.at_least(3, 5, 1): - result = db.test.aggregate([pipeline]) - else: - result = db.test.aggregate([pipeline], useCursor=False) - + result = db.test.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) self.assertEqual([{'foo': [1, 2]}], list(result)) @@ -1639,11 +1634,7 @@ def test_aggregate_raw_bson(self): coll = db.get_collection( 'test', codec_options=CodecOptions(document_class=RawBSONDocument)) - # MongoDB 3.5.1+ requires either the 'cursor' or 'explain' options. - if client_context.version.at_least(3, 5, 1): - result = coll.aggregate([pipeline]) - else: - result = coll.aggregate([pipeline], useCursor=False) + result = coll.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) first_result = next(result) self.assertIsInstance(first_result, RawBSONDocument) @@ -1655,9 +1646,6 @@ def test_aggregation_cursor_validation(self): cursor = db.test.aggregate([projection], cursor={}) self.assertTrue(isinstance(cursor, CommandCursor)) - cursor = db.test.aggregate([projection], useCursor=True) - self.assertTrue(isinstance(cursor, CommandCursor)) - def test_aggregation_cursor(self): db = self.db if client_context.has_secondaries: From 1cf8eb31b5e24f4af962c5cf3b83b088be3047d7 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 25 Jan 2021 15:57:11 -0800 Subject: [PATCH 0294/2111] PYTHON-2505 Use match_hostname from the ssl module --- THIRD-PARTY-NOTICES | 54 -------------- pymongo/pool.py | 5 +- pymongo/ssl_match_hostname.py | 132 ---------------------------------- 3 files changed, 2 insertions(+), 189 deletions(-) delete mode 100644 pymongo/ssl_match_hostname.py diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 0bbd9b1644..4f2edb8660 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -95,57 +95,3 @@ Unicode Standard, and to make copies of this file in any form for internal or external distribution as long as this notice remains attached. -4) License Notice for ssl_match_hostname.py -------------------------------------------- - -Python License (Python-2.0) - -Python License, Version 2 (Python-2.0) - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python -alone or in any derivative version, provided, however, that PSF's -License Agreement and PSF's notice of copyright, i.e., "Copyright (c) -2001-2013 Python Software Foundation; All Rights Reserved" are retained in -Python alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. diff --git a/pymongo/pool.py b/pymongo/pool.py index 2b433875ca..33a93105cc 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -18,6 +18,7 @@ import ipaddress import os import platform +import ssl import socket import sys import threading @@ -57,8 +58,6 @@ from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -# Always use our backport so we always have support for IP address matching -from pymongo.ssl_match_hostname import match_hostname from pymongo.ssl_support import ( SSLError as _SSLError, HAS_SNI as _HAVE_SNI, @@ -1017,7 +1016,7 @@ def _configured_socket(address, options): getattr(ssl_context, "check_hostname", False) and options.ssl_match_hostname): try: - match_hostname(sock.getpeercert(), hostname=host) + ssl.match_hostname(sock.getpeercert(), hostname=host) except CertificateError: sock.close() raise diff --git a/pymongo/ssl_match_hostname.py b/pymongo/ssl_match_hostname.py deleted file mode 100644 index 8da3aa5d3a..0000000000 --- a/pymongo/ssl_match_hostname.py +++ /dev/null @@ -1,132 +0,0 @@ -# Backport of the match_hostname logic from python 3.5, with small -# changes to support IP address matching on python 2.7 and 3.4. - -import re -import sys - -try: - # Python 3.4+, or the ipaddress module from pypi. - from ipaddress import ip_address -except ImportError: - ip_address = lambda address: None - -# ipaddress.ip_address requires unicode -if sys.version_info[0] < 3: - _unicode = unicode -else: - _unicode = lambda value: value - -from pymongo.errors import CertificateError - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - ip = ip_address(_unicode(ipname).rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - host_ip = ip_address(_unicode(hostname)) - except (ValueError, UnicodeError): - # Not an IP address (common case) - host_ip = None - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") From 2565b4d2913db2ab47990ac74e517e9faf854cf3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 26 Jan 2021 12:49:49 -0800 Subject: [PATCH 0295/2111] PYTHON-1312 Remove Database.add_user and Database.remove_user (#561) --- doc/changelog.rst | 2 + doc/migrate-to-pymongo4.rst | 33 +++++++ pymongo/database.py | 173 +----------------------------------- test/test_database.py | 150 +------------------------------ test/test_session.py | 13 --- 5 files changed, 40 insertions(+), 331 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 45ad3b085c..ae0e3e65e6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -27,6 +27,8 @@ Breaking Changes in 4.0 :meth:`pymongo.database.Database.last_status`, :meth:`pymongo.database.Database.previous_error`, :meth:`pymongo.database.Database.reset_error_history`. +- Removed :meth:`pymongo.database.Database.add_user` and + :meth:`pymongo.database.Database.remove_user`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :meth:`pymongo.collection.Collection.ensure_index`. - Removed :meth:`pymongo.collection.Collection.reindex`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 1cdf0e59d1..059483b09d 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -136,6 +136,39 @@ can be changed to this:: names = client.list_collection_names() non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) +Database.add_user is removed +............................ + +Removed :meth:`pymongo.database.Database.add_user` which was deprecated in +PyMongo 3.6. Use the `createUser command`_ or `updateUser command`_ instead. +To create a user:: + + db.command("createUser", "admin", pwd="password", roles=["dbAdmin"]) + +To create a read-only user:: + + db.command("createUser", "user", pwd="password", roles=["read"]) + +To change a password:: + + db.command("updateUser", "user", pwd="newpassword") + +Or change roles:: + + db.command("updateUser", "user", roles=["readWrite"]) + +.. _createUser command: https://docs.mongodb.com/manual/reference/command/createUser/ +.. _updateUser command: https://docs.mongodb.com/manual/reference/command/updateUser/ + +Database.remove_user is removed +............................... + +Removed :meth:`pymongo.database.Database.remove_user` which was deprecated in +PyMongo 3.6. Use the `dropUser command`_ instead:: + + db.command("dropUser", "user") + +.. _dropUser command: https://docs.mongodb.com/manual/reference/command/createUser/ Collection ---------- diff --git a/pymongo/database.py b/pymongo/database.py index f965a7b532..b1ae52d939 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -25,9 +25,7 @@ from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor from pymongo.errors import (CollectionInvalid, - ConfigurationError, - InvalidName, - OperationFailure) + InvalidName) from pymongo.message import _first_batch from pymongo.read_preferences import ReadPreference @@ -982,175 +980,6 @@ def __next__(self): next = __next__ - def _default_role(self, read_only): - """Return the default user role for this database.""" - if self.name == "admin": - if read_only: - return "readAnyDatabase" - else: - return "root" - else: - if read_only: - return "read" - else: - return "dbOwner" - - def _create_or_update_user( - self, create, name, password, read_only, session=None, **kwargs): - """Use a command to create (if create=True) or modify a user. - """ - opts = {} - if read_only or (create and "roles" not in kwargs): - warnings.warn("Creating a user with the read_only option " - "or without roles is deprecated in MongoDB " - ">= 2.6", DeprecationWarning) - - opts["roles"] = [self._default_role(read_only)] - - if read_only: - warnings.warn("The read_only option is deprecated in MongoDB " - ">= 2.6, use 'roles' instead", DeprecationWarning) - - if password is not None: - if "digestPassword" in kwargs: - raise ConfigurationError("The digestPassword option is not " - "supported via add_user. Please use " - "db.command('createUser', ...) " - "instead for this option.") - opts["pwd"] = password - - # Don't send {} as writeConcern. - if self.write_concern.acknowledged and self.write_concern.document: - opts["writeConcern"] = self.write_concern.document - opts.update(kwargs) - - if create: - command_name = "createUser" - else: - command_name = "updateUser" - - self.command(command_name, name, session=session, **opts) - - def add_user(self, name, password=None, read_only=None, session=None, - **kwargs): - """**DEPRECATED**: Create user `name` with password `password`. - - Add a new user with permissions for this :class:`Database`. - - .. note:: Will change the password if user `name` already exists. - - .. note:: add_user is deprecated and will be removed in PyMongo - 4.0. Starting with MongoDB 2.6 user management is handled with four - database commands, createUser_, usersInfo_, updateUser_, and - dropUser_. - - To create a user:: - - db.command("createUser", "admin", pwd="password", roles=["root"]) - - To create a read-only user:: - - db.command("createUser", "user", pwd="password", roles=["read"]) - - To change a password:: - - db.command("updateUser", "user", pwd="newpassword") - - Or change roles:: - - db.command("updateUser", "user", roles=["readWrite"]) - - .. _createUser: https://docs.mongodb.com/manual/reference/command/createUser/ - .. _usersInfo: https://docs.mongodb.com/manual/reference/command/usersInfo/ - .. _updateUser: https://docs.mongodb.com/manual/reference/command/updateUser/ - .. _dropUser: https://docs.mongodb.com/manual/reference/command/createUser/ - - .. warning:: Never create or modify users over an insecure network without - the use of TLS. See :doc:`/examples/tls` for more information. - - :Parameters: - - `name`: the name of the user to create - - `password` (optional): the password of the user to create. Can not - be used with the ``userSource`` argument. - - `read_only` (optional): if ``True`` the user will be read only - - `**kwargs` (optional): optional fields for the user document - (e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See - ``_ - for more information. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.7 - Added support for SCRAM-SHA-256 users with MongoDB 4.0 and later. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Deprecated add_user. - - .. versionchanged:: 2.5 - Added kwargs support for optional fields introduced in MongoDB 2.4 - - .. versionchanged:: 2.2 - Added support for read only users - """ - warnings.warn("add_user is deprecated and will be removed in PyMongo " - "4.0. Use db.command with createUser or updateUser " - "instead", DeprecationWarning, stacklevel=2) - if not isinstance(name, str): - raise TypeError("name must be an instance of str") - if password is not None: - if not isinstance(password, str): - raise TypeError("password must be an instance of str") - if len(password) == 0: - raise ValueError("password can't be empty") - if read_only is not None: - read_only = common.validate_boolean('read_only', read_only) - if 'roles' in kwargs: - raise ConfigurationError("Can not use " - "read_only and roles together") - - try: - uinfo = self.command("usersInfo", name, session=session) - # Create the user if not found in uinfo, otherwise update one. - self._create_or_update_user( - (not uinfo["users"]), name, password, read_only, - session=session, **kwargs) - except OperationFailure as exc: - # Unauthorized. Attempt to create the user in case of - # localhost exception. - if exc.code == 13: - self._create_or_update_user( - True, name, password, read_only, session=session, **kwargs) - else: - raise - - def remove_user(self, name, session=None): - """**DEPRECATED**: Remove user `name` from this :class:`Database`. - - User `name` will no longer have permissions to access this - :class:`Database`. - - .. note:: remove_user is deprecated and will be removed in PyMongo - 4.0. Use the dropUser command instead:: - - db.command("dropUser", "user") - - :Parameters: - - `name`: the name of the user to remove - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Deprecated remove_user. - """ - warnings.warn("remove_user is deprecated and will be removed in " - "PyMongo 4.0. Use db.command with dropUser " - "instead", DeprecationWarning, stacklevel=2) - cmd = SON([("dropUser", name)]) - # Don't send {} as writeConcern. - if self.write_concern.acknowledged and self.write_concern.document: - cmd["writeConcern"] = self.write_concern.document - self.command(cmd, session=session) - def authenticate(self, name=None, password=None, source=None, mechanism='DEFAULT', **kwargs): """**DEPRECATED**: Authenticate to use this database. diff --git a/test/test_database.py b/test/test_database.py index 0c3271aa8f..ef91aebac4 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -469,165 +469,23 @@ def test_password_digest(self): self.assertEqual(auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73") - @client_context.require_auth - def test_authenticate_add_remove_user(self): - # "self.client" is logged in as root. - auth_db = self.client.pymongo_test - - def check_auth(username, password): - c = rs_or_single_client_noauth( - username=username, - password=password, - authSource="pymongo_test") - - c.pymongo_test.collection.find_one() - - # Configuration errors - self.assertRaises(ValueError, auth_db.add_user, "user", '') - self.assertRaises(TypeError, auth_db.add_user, "user", 'password', 15) - self.assertRaises(TypeError, auth_db.add_user, - "user", 'password', 'True') - self.assertRaises(ConfigurationError, auth_db.add_user, - "user", 'password', True, roles=['read']) - - with warnings.catch_warnings(): - warnings.simplefilter("error", DeprecationWarning) - self.assertRaises(DeprecationWarning, auth_db.add_user, - "user", "password") - self.assertRaises(DeprecationWarning, auth_db.add_user, - "user", "password", True) - - with ignore_deprecations(): - self.assertRaises(ConfigurationError, auth_db.add_user, - "user", "password", digestPassword=True) - - # Add / authenticate / remove - auth_db.add_user("mike", "password", roles=["read"]) - self.addCleanup(remove_all_users, auth_db) - self.assertRaises(TypeError, check_auth, 5, "password") - self.assertRaises(TypeError, check_auth, "mike", 5) - self.assertRaises(OperationFailure, - check_auth, "mike", "not a real password") - self.assertRaises(OperationFailure, check_auth, "faker", "password") - check_auth("mike", "password") - - if not client_context.version.at_least(3, 7, 2) or HAVE_STRINGPREP: - # Unicode name and password. - check_auth("mike", "password") - - auth_db.remove_user("mike") - self.assertRaises( - OperationFailure, check_auth, "mike", "password") - - # Add / authenticate / change password - self.assertRaises( - OperationFailure, check_auth, "Gustave", "Dor\xe9") - auth_db.add_user("Gustave", "Dor\xe9", roles=["read"]) - check_auth("Gustave", "Dor\xe9") - - # Change password. - auth_db.add_user("Gustave", "password", roles=["read"]) - self.assertRaises( - OperationFailure, check_auth, "Gustave", "Dor\xe9") - check_auth("Gustave", "password") - - @client_context.require_auth - @ignore_deprecations - def test_make_user_readonly(self): - # "self.client" is logged in as root. - auth_db = self.client.pymongo_test - - # Make a read-write user. - auth_db.add_user('jesse', 'pw') - self.addCleanup(remove_all_users, auth_db) - - # Check that we're read-write by default. - c = rs_or_single_client_noauth(username='jesse', - password='pw', - authSource='pymongo_test') - - c.pymongo_test.collection.insert_one({}) - - # Make the user read-only. - auth_db.add_user('jesse', 'pw', read_only=True) - - c = rs_or_single_client_noauth(username='jesse', - password='pw', - authSource='pymongo_test') - - self.assertRaises(OperationFailure, - c.pymongo_test.collection.insert_one, - {}) - - @client_context.require_auth - @ignore_deprecations - def test_default_roles(self): - # "self.client" is logged in as root. - auth_admin = self.client.admin - auth_admin.add_user('test_default_roles', 'pass') - self.addCleanup(client_context.drop_user, 'admin', 'test_default_roles') - info = auth_admin.command( - 'usersInfo', 'test_default_roles')['users'][0] - - self.assertEqual("root", info['roles'][0]['role']) - - # Read only "admin" user - auth_admin.add_user('ro-admin', 'pass', read_only=True) - self.addCleanup(client_context.drop_user, 'admin', 'ro-admin') - info = auth_admin.command('usersInfo', 'ro-admin')['users'][0] - self.assertEqual("readAnyDatabase", info['roles'][0]['role']) - - # "Non-admin" user - auth_db = self.client.pymongo_test - auth_db.add_user('user', 'pass') - self.addCleanup(remove_all_users, auth_db) - info = auth_db.command('usersInfo', 'user')['users'][0] - self.assertEqual("dbOwner", info['roles'][0]['role']) - - # Read only "Non-admin" user - auth_db.add_user('ro-user', 'pass', read_only=True) - info = auth_db.command('usersInfo', 'ro-user')['users'][0] - self.assertEqual("read", info['roles'][0]['role']) - - @client_context.require_auth - @ignore_deprecations - def test_new_user_cmds(self): - # "self.client" is logged in as root. - auth_db = self.client.pymongo_test - auth_db.add_user("amalia", "password", roles=["userAdmin"]) - self.addCleanup(client_context.drop_user, "pymongo_test", "amalia") - - db = rs_or_single_client_noauth(username="amalia", - password="password", - authSource="pymongo_test").pymongo_test - - # This tests the ability to update user attributes. - db.add_user("amalia", "new_password", - customData={"secret": "koalas"}) - - user_info = db.command("usersInfo", "amalia") - self.assertTrue(user_info["users"]) - amalia_user = user_info["users"][0] - self.assertEqual(amalia_user["user"], "amalia") - self.assertEqual(amalia_user["customData"], {"secret": "koalas"}) - @client_context.require_auth @ignore_deprecations def test_authenticate_multiple(self): # "self.client" is logged in as root. self.client.drop_database("pymongo_test") self.client.drop_database("pymongo_test1") - admin_db_auth = self.client.admin users_db_auth = self.client.pymongo_test - admin_db_auth.add_user( + client_context.create_user( + 'admin', 'ro-admin', 'pass', roles=["userAdmin", "readAnyDatabase"]) self.addCleanup(client_context.drop_user, 'admin', 'ro-admin') - users_db_auth.add_user( - 'user', 'pass', roles=["userAdmin", "readWrite"]) + client_context.create_user( + 'pymongo_test', 'user', 'pass', roles=["userAdmin", "readWrite"]) self.addCleanup(remove_all_users, users_db_auth) # Non-root client. diff --git a/test/test_session.py b/test/test_session.py index dbd3be5a28..e183338e4d 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -258,19 +258,6 @@ def test_database(self): self._test_ops(client, *ops) - @client_context.require_auth - @ignore_deprecations - def test_user_admin(self): - client = self.client - db = client.pymongo_test - - self._test_ops( - client, - (db.add_user, ['session-test', 'pass'], {'roles': ['read']}), - # Do it again to test updateUser command. - (db.add_user, ['session-test', 'pass'], {'roles': ['read']}), - (db.remove_user, ['session-test'], {})) - @staticmethod def collection_write_ops(coll): """Generate database write ops for tests.""" From 96b75808d146af8c0684a39e6d24356904b39078 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 26 Jan 2021 13:03:30 -0800 Subject: [PATCH 0296/2111] PYTHON-2133 Remove py2 support from extensions --- bson/_cbsonmodule.c | 322 +------------------------------------- bson/_cbsonmodule.h | 6 - pymongo/_cmessagemodule.c | 82 ++-------- 3 files changed, 17 insertions(+), 393 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 34f3ab6f67..400a51b4e3 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -56,17 +56,7 @@ struct module_state { PyObject* CodecOptions; }; -/* The Py_TYPE macro was introduced in CPython 2.6 */ -#ifndef Py_TYPE -#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) -#endif - -#if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) -#else -#define GETSTATE(m) (&_state) -static struct module_state _state; -#endif /* Maximum number of regex flags */ #define FLAGS_SIZE 7 @@ -228,19 +218,11 @@ static int write_unicode(buffer_t buffer, PyObject* py_string) { if (!encoded) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AS_STRING(encoded); -#else - data = PyString_AS_STRING(encoded); -#endif if (!data) goto unicodefail; -#if PY_MAJOR_VERSION >= 3 if ((size = _downcast_and_check(PyBytes_GET_SIZE(encoded), 1)) == -1) -#else - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) -#endif goto unicodefail; if (!buffer_write_int32(buffer, (int32_t)size)) @@ -261,23 +243,15 @@ static int write_unicode(buffer_t buffer, PyObject* py_string) { static int write_string(buffer_t buffer, PyObject* py_string) { int size; const char* data; -#if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(py_string)){ return write_unicode(buffer, py_string); } data = PyBytes_AsString(py_string); -#else - data = PyString_AsString(py_string); -#endif if (!data) { return 0; } -#if PY_MAJOR_VERSION >= 3 if ((size = _downcast_and_check(PyBytes_Size(py_string), 1)) == -1) -#else - if ((size = _downcast_and_check(PyString_Size(py_string), 1)) == -1) -#endif return 0; if (!buffer_write_int32(buffer, (int32_t)size)) { @@ -370,20 +344,12 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || -#if PY_MAJOR_VERSION >= 3 _load_object(&state->Mapping, "collections.abc", "Mapping") || -#else - _load_object(&state->Mapping, "collections", "Mapping") || -#endif _load_object(&state->CodecOptions, "bson.codec_options", "CodecOptions")) { return 1; } /* Reload our REType hack too. */ -#if PY_MAJOR_VERSION >= 3 empty_string = PyBytes_FromString(""); -#else - empty_string = PyString_FromString(""); -#endif if (empty_string == NULL) { state->REType = NULL; return 1; @@ -433,13 +399,8 @@ static long _type_marker(PyObject* object) { * or method. In some cases "value" could be a subtype of something * we know how to serialize. Make a best effort to encode these types. */ -#if PY_MAJOR_VERSION >= 3 if (type_marker && PyLong_CheckExact(type_marker)) { type = PyLong_AsLong(type_marker); -#else - if (type_marker && PyInt_CheckExact(type_marker)) { - type = PyInt_AsLong(type_marker); -#endif Py_DECREF(type_marker); /* * Py(Long|Int)_AsLong returns -1 for error but -1 is a valid value @@ -596,40 +557,8 @@ _set_cannot_encode(PyObject* value) { if (type == NULL) { goto error; } -#if PY_MAJOR_VERSION >= 3 PyErr_Format(InvalidDocument, "cannot encode object: %R, of type: %R", value, type); -#else - else { - PyObject* value_repr = NULL; - PyObject* type_repr = NULL; - char* value_str = NULL; - char* type_str = NULL; - - value_repr = PyObject_Repr(value); - if (value_repr == NULL) { - goto py2error; - } - value_str = PyString_AsString(value_repr); - if (value_str == NULL) { - goto py2error; - } - type_repr = PyObject_Repr(type); - if (type_repr == NULL) { - goto py2error; - } - type_str = PyString_AsString(type_repr); - if (type_str == NULL) { - goto py2error; - } - - PyErr_Format(InvalidDocument, "cannot encode object: %s, of type: %s", - value_str, type_str); -py2error: - Py_XDECREF(type_repr); - Py_XDECREF(value_repr); - } -#endif error: Py_XDECREF(type); Py_XDECREF(InvalidDocument); @@ -661,11 +590,7 @@ static int _write_regex_to_buffer( if (!py_flags) { return 0; } -#if PY_MAJOR_VERSION >= 3 int_flags = PyLong_AsLong(py_flags); -#else - int_flags = PyInt_AsLong(py_flags); -#endif Py_DECREF(py_flags); if (int_flags == -1 && PyErr_Occurred()) { return 0; @@ -686,7 +611,6 @@ static int _write_regex_to_buffer( check_utf8 = 1; } -#if PY_MAJOR_VERSION >= 3 if (!(pattern_data = PyBytes_AsString(encoded_pattern))) { Py_DECREF(encoded_pattern); return 0; @@ -695,16 +619,6 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } -#else - if (!(pattern_data = PyString_AsString(encoded_pattern))) { - Py_DECREF(encoded_pattern); - return 0; - } - if ((pattern_length = _downcast_and_check(PyString_Size(encoded_pattern), 0)) == -1) { - Py_DECREF(encoded_pattern); - return 0; - } -#endif status = check_string((const unsigned char*)pattern_data, pattern_length, check_utf8, 1); if (status == NOT_UTF_8) { @@ -800,20 +714,12 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!subtype_object) { return 0; } -#if PY_MAJOR_VERSION >= 3 subtype = (char)PyLong_AsLong(subtype_object); -#else - subtype = (char)PyInt_AsLong(subtype_object); -#endif if (subtype == -1) { Py_DECREF(subtype_object); return 0; } -#if PY_MAJOR_VERSION >= 3 size = _downcast_and_check(PyBytes_Size(value), 0); -#else - size = _downcast_and_check(PyString_Size(value), 0); -#endif if (size == -1) { Py_DECREF(subtype_object); return 0; @@ -821,11 +727,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, Py_DECREF(subtype_object); if (subtype == 2) { -#if PY_MAJOR_VERSION >= 3 int other_size = _downcast_and_check(PyBytes_Size(value), 4); -#else - int other_size = _downcast_and_check(PyString_Size(value), 4); -#endif if (other_size == -1) return 0; if (!buffer_write_int32(buffer, other_size)) { @@ -843,11 +745,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AsString(value); -#else - data = PyString_AsString(value); -#endif if (!data) { return 0; } @@ -864,11 +762,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!pystring) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AsString(pystring); -#else - data = PyString_AsString(pystring); -#endif if (!data) { Py_DECREF(pystring); return 0; @@ -988,11 +882,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!pystring) { return 0; } -#if PY_MAJOR_VERSION >= 3 data = PyBytes_AsString(pystring); -#else - data = PyString_AsString(pystring); -#endif if (!data) { Py_DECREF(pystring); return 0; @@ -1050,13 +940,8 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, *(buffer_get_buffer(buffer) + type_byte) = 0x08; return buffer_write_bytes(buffer, &c, 1); } -#if PY_MAJOR_VERSION >= 3 else if (PyLong_Check(value)) { const long long_value = PyLong_AsLong(value); -#else - else if (PyInt_Check(value)) { - const long long_value = PyInt_AsLong(value); -#endif const int int_value = (int)long_value; if (PyErr_Occurred() || long_value != int_value) { /* Overflow */ @@ -1073,17 +958,6 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } *(buffer_get_buffer(buffer) + type_byte) = 0x10; return buffer_write_int32(buffer, (int32_t)int_value); -#if PY_MAJOR_VERSION < 3 - } else if (PyLong_Check(value)) { - const long long long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_int64(buffer, (int64_t)long_long_value); -#endif } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); *(buffer_get_buffer(buffer) + type_byte) = 0x01; @@ -1151,7 +1025,6 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; -#if PY_MAJOR_VERSION >= 3 /* Python3 special case. Store bytes as BSON binary subtype 0. */ } else if (PyBytes_Check(value)) { char subtype = 0; @@ -1172,52 +1045,6 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } return 1; -#else - /* PyString_Check only works in Python 2.x. */ - } else if (PyString_Check(value)) { - result_t status; - const char* data; - int size; - if (!(data = PyString_AS_STRING(value))) - return 0; - if ((size = _downcast_and_check(PyString_GET_SIZE(value), 1)) == -1) - return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x02; - status = check_string((const unsigned char*)data, size - 1, 1, 0); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyObject* repr = PyObject_Repr(value); - char* repr_as_cstr = repr ? PyString_AsString(repr) : NULL; - if (repr_as_cstr) { - PyObject *message = PyString_FromFormat( - "strings in documents must be valid UTF-8: %s", - repr_as_cstr); - - if (message) { - PyErr_SetObject(InvalidStringData, message); - Py_DECREF(message); - } - } else { - /* repr(value) failed, use a generic message. */ - PyErr_SetString( - InvalidStringData, - "strings in documents must be valid UTF-8"); - } - Py_XDECREF(repr); - Py_DECREF(InvalidStringData); - } - return 0; - } - if (!buffer_write_int32(buffer, (int32_t)size)) { - return 0; - } - if (!buffer_write_bytes(buffer, data, size)) { - return 0; - } - return 1; -#endif } else if (PyUnicode_Check(value)) { *(buffer_get_buffer(buffer) + type_byte) = 0x02; return write_unicode(buffer, value); @@ -1247,11 +1074,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, * Try Mapping and UUID last since we have to import * them if we're in a sub-interpreter. */ -#if PY_MAJOR_VERSION >= 3 mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); -#else - mapping_type = _get_object(state->Mapping, "collections", "Mapping"); -#endif if (mapping_type && PyObject_IsInstance(value, mapping_type)) { Py_DECREF(mapping_type); /* PyObject_IsInstance returns -1 on error */ @@ -1347,13 +1170,8 @@ static int check_key_name(const char* name, int name_length) { if (name_length > 0 && name[0] == '$') { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromFormat( "key '%s' must not start with '$'", name); -#else - PyObject* errmsg = PyString_FromFormat( - "key '%s' must not start with '$'", name); -#endif if (errmsg) { PyErr_SetObject(InvalidDocument, errmsg); Py_DECREF(errmsg); @@ -1365,13 +1183,8 @@ static int check_key_name(const char* name, int name_length) { if (strchr(name, '.')) { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromFormat( "key '%s' must not contain '.'", name); -#else - PyObject* errmsg = PyString_FromFormat( - "key '%s' must not contain '.'", name); -#endif if (errmsg) { PyErr_SetObject(InvalidDocument, errmsg); Py_DECREF(errmsg); @@ -1428,7 +1241,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, if (!encoded) { return 0; } -#if PY_MAJOR_VERSION >= 3 if (!(data = PyBytes_AS_STRING(encoded))) { Py_DECREF(encoded); return 0; @@ -1437,16 +1249,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, Py_DECREF(encoded); return 0; } -#else - if (!(data = PyString_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); - return 0; - } -#endif if (strlen(data) != (size_t)(size - 1)) { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { @@ -1457,56 +1259,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, Py_DECREF(encoded); return 0; } -#if PY_MAJOR_VERSION < 3 - } else if (PyString_Check(key)) { - result_t status; - encoded = key; - Py_INCREF(encoded); - - if (!(data = PyString_AS_STRING(encoded))) { - Py_DECREF(encoded); - return 0; - } - if ((size = _downcast_and_check(PyString_GET_SIZE(encoded), 1)) == -1) { - Py_DECREF(encoded); - return 0; - } - status = check_string((const unsigned char*)data, size - 1, 1, 1); - - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "strings in documents must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded); - return 0; - } else if (status == HAS_NULL) { - PyObject* InvalidDocument = _error("InvalidDocument"); - if (InvalidDocument) { - PyErr_SetString(InvalidDocument, - "Key names must not contain the NULL byte"); - Py_DECREF(InvalidDocument); - } - Py_DECREF(encoded); - return 0; - } -#endif } else { PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyObject* repr = PyObject_Repr(key); if (repr) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromString( "documents must have only string keys, key was "); -#else - PyObject* errmsg = PyString_FromString( - "documents must have only string keys, key was "); -#endif if (errmsg) { -#if PY_MAJOR_VERSION >= 3 PyObject* error = PyUnicode_Concat(errmsg, repr); if (error) { PyErr_SetObject(InvalidDocument, error); @@ -1514,13 +1274,6 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, } Py_DECREF(errmsg); Py_DECREF(repr); -#else - PyString_ConcatAndDel(&errmsg, repr); - if (errmsg) { - PyErr_SetObject(InvalidDocument, errmsg); - Py_DECREF(errmsg); - } -#endif } else { Py_DECREF(repr); } @@ -1596,18 +1349,13 @@ int write_dict(PyObject* self, buffer_t buffer, return write_raw_doc(buffer, dict); } -#if PY_MAJOR_VERSION >= 3 mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); -#else - mapping_type = _get_object(state->Mapping, "collections", "Mapping"); -#endif if (mapping_type) { if (!PyObject_IsInstance(dict, mapping_type)) { PyObject* repr; Py_DECREF(mapping_type); if ((repr = PyObject_Repr(dict))) { -#if PY_MAJOR_VERSION >= 3 PyObject* errmsg = PyUnicode_FromString( "encoder expected a mapping type but got: "); if (errmsg) { @@ -1619,17 +1367,6 @@ int write_dict(PyObject* self, buffer_t buffer, Py_DECREF(errmsg); Py_DECREF(repr); } -#else - PyObject* errmsg = PyString_FromString( - "encoder expected a mapping type but got: "); - if (errmsg) { - PyString_ConcatAndDel(&errmsg, repr); - if (errmsg) { - PyErr_SetObject(PyExc_TypeError, errmsg); - Py_DECREF(errmsg); - } - } -#endif else { Py_DECREF(repr); } @@ -1762,7 +1499,7 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { } /* objectify buffer */ - result = Py_BuildValue(BYTES_FORMAT_STRING, buffer_get_buffer(buffer), + result = Py_BuildValue("y#", buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer)); destroy_codec_options(&options); buffer_free(buffer); @@ -1833,7 +1570,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (options->is_raw_bson) { value = PyObject_CallFunction( - options->document_class, BYTES_FORMAT_STRING "O", + options->document_class, "y#O", buffer + *position, (Py_ssize_t)size, options->options_obj); if (!value) { goto invalid; @@ -1990,7 +1727,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } } -#if PY_MAJOR_VERSION >= 3 /* Python3 special case. Decode BSON binary subtype 0 to bytes. */ if (subtype == 0) { value = PyBytes_FromStringAndSize(buffer + *position, length); @@ -2002,13 +1738,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } else { data = PyBytes_FromStringAndSize(buffer + *position, length); } -#else - if (subtype == 2) { - data = PyString_FromStringAndSize(buffer + *position + 4, length - 4); - } else { - data = PyString_FromStringAndSize(buffer + *position, length); - } -#endif if (!data) { goto invalid; } @@ -2057,11 +1786,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, break; } -#if PY_MAJOR_VERSION >= 3 st = PyLong_FromLong(subtype); -#else - st = PyInt_FromLong(subtype); -#endif if (!st) { Py_DECREF(data); goto invalid; @@ -2092,7 +1817,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - value = PyObject_CallFunction(objectid_type, BYTES_FORMAT_STRING, + value = PyObject_CallFunction(objectid_type, "y#", buffer + *position, (Py_ssize_t)12); Py_DECREF(objectid_type); } @@ -2279,7 +2004,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, *position += coll_length; if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - id = PyObject_CallFunction(objectid_type, BYTES_FORMAT_STRING, + id = PyObject_CallFunction(objectid_type, "y#", buffer + *position, (Py_ssize_t)12); Py_DECREF(objectid_type); } @@ -2409,11 +2134,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } memcpy(&i, buffer + *position, 4); i = (int32_t)BSON_UINT32_FROM_LE(i); -#if PY_MAJOR_VERSION >= 3 value = PyLong_FromLong(i); -#else - value = PyInt_FromLong(i); -#endif if (!value) { goto invalid; } @@ -2467,7 +2188,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, "Decimal128"))) { value = PyObject_CallMethod(dec128, "from_bid", - BYTES_FORMAT_STRING, + "y#", buffer + *position, (Py_ssize_t)16); Py_DECREF(dec128); @@ -2685,20 +2406,11 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { } } -#if PY_MAJOR_VERSION >= 3 if (!PyBytes_Check(bson)) { PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a bytes object"); -#else - if (!PyString_Check(bson)) { - PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a string"); -#endif return NULL; } -#if PY_MAJOR_VERSION >= 3 string = PyBytes_AS_STRING(bson); -#else - string = PyString_AS_STRING(bson); -#endif new_position = _element_to_dict(self, string, position, max, &options, &name, &value); @@ -2847,7 +2559,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { /* No need to decode fields if using RawBSONDocument */ if (options.is_raw_bson) { result = PyObject_CallFunction( - options.document_class, BYTES_FORMAT_STRING "O", string, (Py_ssize_t)size, + options.document_class, "y#O", string, (Py_ssize_t)size, options_obj); } else { @@ -2939,7 +2651,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { /* No need to decode fields if using RawBSONDocument. */ if (options.is_raw_bson) { dict = PyObject_CallFunction( - options.document_class, BYTES_FORMAT_STRING "O", string, (Py_ssize_t)size, + options.document_class, "y#O", string, (Py_ssize_t)size, options_obj); } else { dict = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); @@ -2978,7 +2690,6 @@ static PyMethodDef _CBSONMethods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 #define INITERROR return NULL static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->Binary); @@ -3024,11 +2735,6 @@ static struct PyModuleDef moduledef = { PyMODINIT_FUNC PyInit__cbson(void) -#else -#define INITERROR return -PyMODINIT_FUNC -init_cbson(void) -#endif { PyObject *m; PyObject *c_api_object; @@ -3053,20 +2759,12 @@ init_cbson(void) (void *) buffer_write_int32_at_position; _cbson_API[_cbson_downcast_and_check_INDEX] = (void *) _downcast_and_check; -#if PY_VERSION_HEX >= 0x03010000 /* PyCapsule is new in python 3.1 */ c_api_object = PyCapsule_New((void *) _cbson_API, "_cbson._C_API", NULL); -#else - c_api_object = PyCObject_FromVoidPtr((void *) _cbson_API, NULL); -#endif if (c_api_object == NULL) INITERROR; -#if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&moduledef); -#else - m = Py_InitModule("_cbson", _CBSONMethods); -#endif if (m == NULL) { Py_DECREF(c_api_object); INITERROR; @@ -3075,21 +2773,15 @@ init_cbson(void) /* Import several python objects */ if (_load_python_objects(m)) { Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 Py_DECREF(m); -#endif INITERROR; } if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 Py_DECREF(m); -#endif INITERROR; } -#if PY_MAJOR_VERSION >= 3 return m; -#endif } diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 69590d5647..12a2c8ac67 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -45,12 +45,6 @@ #define STRCAT(dest, n, src) strcat((dest), (src)) #endif -#if PY_MAJOR_VERSION >= 3 -#define BYTES_FORMAT_STRING "y#" -#else -#define BYTES_FORMAT_STRING "s#" -#endif - typedef struct type_registry_t { PyObject* encoder_map; PyObject* decoder_map; diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 52dbf08526..aeeb052db5 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -31,12 +31,7 @@ struct module_state { }; /* See comments about module initialization in _cbsonmodule.c */ -#if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) -#else -#define GETSTATE(m) (&_state) -static struct module_state _state; -#endif #define DOC_TOO_LARGE_FMT "BSON document too large (%d bytes)" \ " - the connected server supports" \ @@ -270,7 +265,7 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { } /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, + result = Py_BuildValue("iy#i", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), max_size); @@ -370,7 +365,7 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { } /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, + result = Py_BuildValue("iy#i", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), max_size); @@ -507,7 +502,7 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "i", request_id, + result = Py_BuildValue("iy#i", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), max_size); @@ -568,7 +563,7 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING, request_id, + result = Py_BuildValue("iy#", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer)); fail: @@ -684,7 +679,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { buffer, length_location, (int32_t)message_length); /* objectify buffer */ - result = Py_BuildValue("i" BYTES_FORMAT_STRING "ii", request_id, + result = Py_BuildValue("iy#ii", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), total_size, @@ -704,11 +699,7 @@ static void _set_document_too_large(int size, long max) { PyObject* DocumentTooLarge = _error("DocumentTooLarge"); if (DocumentTooLarge) { -#if PY_MAJOR_VERSION >= 3 PyObject* error = PyUnicode_FromFormat(DOC_TOO_LARGE_FMT, size, max); -#else - PyObject* error = PyString_FromFormat(DOC_TOO_LARGE_FMT, size, max); -#endif if (error) { PyErr_SetObject(DocumentTooLarge, error); Py_DECREF(error); @@ -733,7 +724,7 @@ _send_insert(PyObject* self, PyObject* ctx, /* The max_doc_size parameter for legacy_bulk_insert is the max size of * any document in buffer. We enforced max size already, pass 0 here. */ return PyObject_CallMethod(ctx, "legacy_bulk_insert", - "i" BYTES_FORMAT_STRING "iNOi", + "iy#iNOi", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), @@ -792,11 +783,7 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { */ send_safe = (safe || !continue_on_error); max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { destroy_codec_options(&options); @@ -805,11 +792,7 @@ static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { } max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); -#if PY_MAJOR_VERSION >= 3 max_message_size = PyLong_AsLong(max_message_size_obj); -#else - max_message_size = PyInt_AsLong(max_message_size_obj); -#endif Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { destroy_codec_options(&options); @@ -1080,33 +1063,21 @@ _batched_op_msg( char* flags = ack ? "\x00\x00\x00\x00" : "\x02\x00\x00\x00"; max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; } max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); -#if PY_MAJOR_VERSION >= 3 max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); -#else - max_write_batch_size = PyInt_AsLong(max_write_batch_size_obj); -#endif Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; } max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); -#if PY_MAJOR_VERSION >= 3 max_message_size = PyLong_AsLong(max_message_size_obj); -#else - max_message_size = PyInt_AsLong(max_message_size_obj); -#endif Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { return 0; @@ -1295,7 +1266,7 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { goto fail; } - result = Py_BuildValue(BYTES_FORMAT_STRING "O", + result = Py_BuildValue("y#O", buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), to_publish); @@ -1364,7 +1335,7 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { position = buffer_get_position(buffer); buffer_write_int32_at_position(buffer, 0, (int32_t)position); buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); - result = Py_BuildValue("i" BYTES_FORMAT_STRING "O", request_id, + result = Py_BuildValue("iy#O", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), to_publish); @@ -1400,11 +1371,7 @@ _batched_write_command( PyObject* iterator = NULL; max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); -#if PY_MAJOR_VERSION >= 3 max_bson_size = PyLong_AsLong(max_bson_size_obj); -#else - max_bson_size = PyInt_AsLong(max_bson_size_obj); -#endif Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; @@ -1416,11 +1383,7 @@ _batched_write_command( max_cmd_size = max_bson_size + 16382; max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); -#if PY_MAJOR_VERSION >= 3 max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); -#else - max_write_batch_size = PyInt_AsLong(max_write_batch_size_obj); -#endif Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; @@ -1430,11 +1393,7 @@ _batched_write_command( // Normally this this value is equal to max_bson_size (16MiB). However, // when auto encryption is enabled max_split_size is reduced to 2MiB. max_split_size_obj = PyObject_GetAttrString(ctx, "max_split_size"); -#if PY_MAJOR_VERSION >= 3 max_split_size = PyLong_AsLong(max_split_size_obj); -#else - max_split_size = PyInt_AsLong(max_split_size_obj); -#endif Py_XDECREF(max_split_size_obj); if (max_split_size == -1) { return 0; @@ -1640,7 +1599,7 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { goto fail; } - result = Py_BuildValue(BYTES_FORMAT_STRING "O", + result = Py_BuildValue("y#O", buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), to_publish); @@ -1713,7 +1672,7 @@ _cbson_batched_write_command(PyObject* self, PyObject* args) { position = buffer_get_position(buffer); buffer_write_int32_at_position(buffer, 0, (int32_t)position); buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); - result = Py_BuildValue("i" BYTES_FORMAT_STRING "O", request_id, + result = Py_BuildValue("iy#O", request_id, buffer_get_buffer(buffer), (Py_ssize_t)buffer_get_position(buffer), to_publish); @@ -1749,7 +1708,6 @@ static PyMethodDef _CMessageMethods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 #define INITERROR return NULL static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->_cbson); @@ -1775,11 +1733,6 @@ static struct PyModuleDef moduledef = { PyMODINIT_FUNC PyInit__cmessage(void) -#else -#define INITERROR return -PyMODINIT_FUNC -init_cmessage(void) -#endif { PyObject *_cbson = NULL; PyObject *c_api_object = NULL; @@ -1800,22 +1753,13 @@ init_cmessage(void) if (c_api_object == NULL) { goto fail; } -#if PY_VERSION_HEX >= 0x03010000 _cbson_API = (void **)PyCapsule_GetPointer(c_api_object, "_cbson._C_API"); -#else - _cbson_API = (void **)PyCObject_AsVoidPtr(c_api_object); -#endif if (_cbson_API == NULL) { goto fail; } -#if PY_MAJOR_VERSION >= 3 /* Returns a new reference. */ m = PyModule_Create(&moduledef); -#else - /* Returns a borrowed reference. */ - m = Py_InitModule("_cmessage", _CMessageMethods); -#endif if (m == NULL) { goto fail; } @@ -1824,16 +1768,10 @@ init_cmessage(void) Py_DECREF(c_api_object); -#if PY_MAJOR_VERSION >= 3 return m; -#else - return; -#endif fail: -#if PY_MAJOR_VERSION >= 3 Py_XDECREF(m); -#endif Py_XDECREF(c_api_object); Py_XDECREF(_cbson); INITERROR; From 6ff2883f82753f83189393eb19c9d7c9fd64c879 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 27 Jan 2021 13:08:44 -0800 Subject: [PATCH 0297/2111] PYTHON-2445 PYTHON-2530 Fix MONGODB-AWS auth tests (#562) --- .evergreen/config.yml | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9cbcfd5584..f80896ffb9 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -504,7 +504,7 @@ functions: silent: true script: | cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python3 -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' + alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' USER=$(urlencode ${iam_auth_ecs_account}) PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) MONGODB_URI="mongodb://$USER:$PASS@localhost" @@ -531,7 +531,7 @@ functions: else . mongovenv/bin/activate fi - pip install boto3 + pip install --upgrade boto3 cd ${DRIVERS_TOOLS}/.evergreen/auth_aws mongo aws_e2e_assume_role.js - command: shell.exec @@ -542,8 +542,8 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='python3 -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' - alias jsonkey='python3 -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' + alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' + alias jsonkey='${python3_binary} -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' USER=$(jsonkey AccessKeyId) USER=$(urlencode $USER) PASS=$(jsonkey SecretAccessKey) @@ -616,7 +616,7 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias jsonkey='python3 -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' + alias jsonkey='${python3_binary} -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' export AWS_ACCESS_KEY_ID=$(jsonkey AccessKeyId) export AWS_SECRET_ACCESS_KEY=$(jsonkey SecretAccessKey) export AWS_SESSION_TOKEN=$(jsonkey SessionToken) @@ -640,6 +640,12 @@ functions: echo "This platform does not support the ECS auth test, skipping..." exit 0 fi + # The mongovenv was created earlier in "run aws auth test with assume role credentials". + if [ "Windows_NT" = "$OS" ]; then + . mongovenv/Scripts/activate + else + . mongovenv/bin/activate + fi cd ${DRIVERS_TOOLS}/.evergreen/auth_aws cat < setup.js const mongo_binaries = "$MONGODB_BINARIES"; From 70b927a01db02453fa20c87affa306bcc67a313c Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 27 Jan 2021 16:28:12 -0800 Subject: [PATCH 0298/2111] PYTHON-2508 Improve PyOpenSSL on Windows and macOS --- README.rst | 15 +++------ doc/changelog.rst | 2 ++ doc/examples/tls.rst | 10 ------ pymongo/pyopenssl_context.py | 59 ++++++++++++++++++++++++++++++++---- pymongo/ssl_support.py | 54 +-------------------------------- setup.py | 5 +++ test/test_ssl.py | 28 ++++++++--------- 7 files changed, 77 insertions(+), 96 deletions(-) diff --git a/README.rst b/README.rst index b6db345be2..05008492cc 100644 --- a/README.rst +++ b/README.rst @@ -110,19 +110,12 @@ Support for mongodb+srv:// URIs requires `dnspython $ python -m pip install pymongo[srv] -TLS / SSL support may require `ipaddress -`_ and `certifi -`_ or `wincertstore -`_ depending on the Python -version in use. The necessary dependencies can be installed along with -PyMongo:: - - $ python -m pip install pymongo[tls] - OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests -`_ and `service_identity -`_:: +`_, `service_identity +`_ and may +require `certifi +`_:: $ python -m pip install pymongo[ocsp] diff --git a/doc/changelog.rst b/doc/changelog.rst index ae0e3e65e6..7b0d812113 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -63,6 +63,8 @@ Breaking Changes in 4.0 :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, and :meth:`~pymongo.cursor.Cursor`. +- The "tls" install extra is no longer necessary or supported and will be + ignored by pip. Notable improvements .................... diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 07351dc9d5..327453bfd8 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -6,16 +6,6 @@ configuration options supported by PyMongo. See `the server documentation `_ to configure MongoDB. -Dependencies -............ - -For connections using TLS/SSL, PyMongo may require third party dependencies as -determined by your version of Python. With PyMongo 3.3+, you can install -PyMongo 3.3+ and any TLS/SSL-related dependencies using the following pip -command:: - - $ python -m pip install pymongo[tls] - .. warning:: Industry best practices recommend, and some regulations require, the use of TLS 1.1 or newer. Though no application changes are required for PyMongo to make use of the newest protocols, some operating systems or diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 3d5cb933f2..2118106e63 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -18,14 +18,15 @@ import socket as _socket import ssl as _stdlibssl -import time +import sys as _sys +import time as _time from errno import EINTR as _EINTR -# service_identity requires this for py27, so it should always be available from ipaddress import ip_address as _ip_address -from OpenSSL import SSL as _SSL +from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate +from OpenSSL import crypto as _crypto, SSL as _SSL from service_identity.pyopenssl import ( verify_hostname as _verify_hostname, verify_ip_address as _verify_ip_address) @@ -33,7 +34,9 @@ CertificateError as _SICertificateError, VerificationError as _SIVerificationError) -from pymongo.errors import CertificateError as _CertificateError +from pymongo.errors import ( + CertificateError as _CertificateError, + ConfigurationError as _ConfigurationError) from pymongo.ocsp_support import ( _load_trusted_ca_certs, _ocsp_callback) @@ -41,6 +44,12 @@ from pymongo.socket_checker import ( _errno_from_exception, SocketChecker as _SocketChecker) +try: + import certifi + _HAVE_CERTIFI = True +except ImportError: + _HAVE_CERTIFI = False + PROTOCOL_SSLv23 = _SSL.SSLv23_METHOD # Always available OP_NO_SSLv2 = _SSL.OP_NO_SSLv2 @@ -98,14 +107,14 @@ def __init__(self, ctx, sock, suppress_ragged_eofs): def _call(self, call, *args, **kwargs): timeout = self.gettimeout() if timeout: - start = time.monotonic() + start = _time.monotonic() while True: try: return call(*args, **kwargs) except _RETRY_ERRORS: self.socket_checker.select( self, True, True, timeout) - if timeout and time.monotonic() - start > timeout: + if timeout and _time.monotonic() - start > timeout: raise _socket.timeout("timed out") continue @@ -272,6 +281,44 @@ def load_verify_locations(self, cafile=None, capath=None): self._ctx.load_verify_locations(cafile, capath) self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) + def _load_certifi(self): + """Attempt to load CA certs from certifi.""" + if _HAVE_CERTIFI: + self.load_verify_locations(certifi.where()) + else: + raise _ConfigurationError( + "tlsAllowInvalidCertificates is False but no system " + "CA certificates could be loaded. Please install the " + "certifi package, or provide a path to a CA file using " + "the tlsCAFile option") + + def _load_wincerts(self, store): + """Attempt to load CA certs from Windows trust store.""" + cert_store = self._ctx.get_cert_store() + oid = _stdlibssl.Purpose.SERVER_AUTH.oid + for cert, encoding, trust in _stdlibssl.enum_certificates(store): + if encoding == "x509_asn": + if trust is True or oid in trust: + cert_store.add_cert( + _crypto.X509.from_cryptography( + _load_der_x509_certificate(cert))) + + def load_default_certs(self): + """A PyOpenSSL version of load_default_certs from CPython.""" + # PyOpenSSL is incapable of loading CA certs from Windows, and mostly + # incapable on macOS. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths + if _sys.platform == "win32": + try: + for storename in ('CA', 'ROOT'): + self._load_wincerts(storename) + except PermissionError: + # Fall back to certifi + self._load_certifi() + elif _sys.platform == "darwin": + self._load_certifi() + self._ctx.set_default_verify_paths() + def set_default_verify_paths(self): """Specify that the platform provided CA certificates are to be used for verification purposes.""" diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index d8e55a1f7c..ca6ee8575d 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -14,9 +14,7 @@ """Support for SSL in PyMongo.""" -import atexit import sys -import threading from pymongo.errors import ConfigurationError @@ -30,22 +28,6 @@ except ImportError: HAVE_SSL = False -HAVE_CERTIFI = False -try: - import certifi - HAVE_CERTIFI = True -except ImportError: - pass - -HAVE_WINCERTSTORE = False -try: - from wincertstore import CertFile - HAVE_WINCERTSTORE = True -except ImportError: - pass - -_WINCERTSLOCK = threading.Lock() -_WINCERTS = None if HAVE_SSL: # Note: The validate* functions below deal with users passing @@ -82,17 +64,6 @@ def validate_allow_invalid_certs(option, value): return CERT_NONE return CERT_REQUIRED - def _load_wincerts(): - """Set _WINCERTS to an instance of wincertstore.Certfile.""" - global _WINCERTS - - certfile = CertFile() - certfile.addstore("CA") - certfile.addstore("ROOT") - atexit.register(certfile.close) - - _WINCERTS = certfile - def get_ssl_context(*args): """Create and return an SSLContext object.""" (certfile, @@ -138,30 +109,7 @@ def get_ssl_context(*args): if ca_certs is not None: ctx.load_verify_locations(ca_certs) elif cert_reqs != CERT_NONE: - # CPython ssl module only, doesn't exist in PyOpenSSL - if hasattr(ctx, "load_default_certs"): - ctx.load_default_certs() - # Always useless on Windows. - elif (sys.platform != "win32" and - hasattr(ctx, "set_default_verify_paths")): - ctx.set_default_verify_paths() - # This is needed with PyOpenSSL on Windows - # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - elif sys.platform == "win32" and HAVE_WINCERTSTORE: - with _WINCERTSLOCK: - if _WINCERTS is None: - _load_wincerts() - ctx.load_verify_locations(_WINCERTS.name) - # This is necessary with PyOpenSSL on macOS when homebrew isn't - # installed. - # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - elif HAVE_CERTIFI: - ctx.load_verify_locations(certifi.where()) - else: - raise ConfigurationError( - "`ssl_cert_reqs` is not ssl.CERT_NONE and no system " - "CA certificates could be loaded. `ssl_ca_certs` is " - "required.") + ctx.load_default_certs() ctx.verify_mode = verify_mode return ctx else: diff --git a/setup.py b/setup.py index 2c2b9e53c4..6ff561a098 100755 --- a/setup.py +++ b/setup.py @@ -275,6 +275,11 @@ def build_extension(self, ext): # in set_default_verify_paths we should really avoid. # service_identity 18.1.0 introduced support for IP addr matching. pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] +if sys.platform in ('win32', 'darwin'): + # Fallback to certifi on Windows if we can't load CA certs from the system + # store and just use certifi on macOS. + # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths + pyopenssl_reqs.append('certifi') extras_require = { 'encryption': ['pymongocrypt<2.0.0'], diff --git a/test/test_ssl.py b/test/test_ssl.py index 024230105b..82d99d5d18 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -336,25 +336,21 @@ def test_cert_ssl_validation_hostname_matching(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - - # Python > 2.7.9. If SSLContext doesn't have load_default_certs - # it also doesn't have check_hostname. ctx = get_ssl_context( None, None, None, None, ssl.CERT_NONE, None, False, True) - if hasattr(ctx, 'load_default_certs'): - self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, True, True) - self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, False, True) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context( + None, None, None, None, ssl.CERT_NONE, None, True, True) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context( + None, None, None, None, ssl.CERT_REQUIRED, None, False, True) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context( + None, None, None, None, ssl.CERT_REQUIRED, None, True, True) + if _PY37PLUS or _HAVE_PYOPENSSL: + self.assertTrue(ctx.check_hostname) + else: self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, True, True) - if _PY37PLUS: - self.assertTrue(ctx.check_hostname) - else: - self.assertFalse(ctx.check_hostname) response = self.client.admin.command('ismaster') From 7ca1efda43cdc5f79b1c88174c80fdee33307523 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 28 Jan 2021 12:58:48 -0800 Subject: [PATCH 0299/2111] PYTHON-2445 Use new setup script for MONGODB-AWS testing --- .evergreen/config.yml | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f80896ffb9..2dcc66bcd7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -496,6 +496,7 @@ functions: script: | ${PREPARE_SHELL} cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate_venv.sh mongo aws_e2e_regular_aws.js - command: shell.exec type: test @@ -524,15 +525,8 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - # The aws_e2e_assume_role script requires python3 with boto3. - virtualenv -p ${python3_binary} mongovenv - if [ "Windows_NT" = "$OS" ]; then - . mongovenv/Scripts/activate - else - . mongovenv/bin/activate - fi - pip install --upgrade boto3 cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate_venv.sh mongo aws_e2e_assume_role.js - command: shell.exec type: test @@ -571,13 +565,8 @@ functions: echo "This platform does not support the EC2 auth test, skipping..." exit 0 fi - # The mongovenv was created earlier in "run aws auth test with assume role credentials". - if [ "Windows_NT" = "$OS" ]; then - . mongovenv/Scripts/activate - else - . mongovenv/bin/activate - fi cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate_venv.sh mongo aws_e2e_ec2.js - command: shell.exec type: test @@ -640,13 +629,8 @@ functions: echo "This platform does not support the ECS auth test, skipping..." exit 0 fi - # The mongovenv was created earlier in "run aws auth test with assume role credentials". - if [ "Windows_NT" = "$OS" ]; then - . mongovenv/Scripts/activate - else - . mongovenv/bin/activate - fi cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate_venv.sh cat < setup.js const mongo_binaries = "$MONGODB_BINARIES"; const project_dir = "$PROJECT_DIRECTORY"; From 846e92528043476155eae24049ab15aca6ca3bad Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Feb 2021 13:16:18 -0800 Subject: [PATCH 0300/2111] PYTHON-2537 Fix benchmark when using Python 3 (#565) --- .evergreen/run-perf-tests.sh | 2 +- test/performance/perf_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index 41f154b739..cdf598c3bf 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -22,7 +22,7 @@ pip install simplejson python setup.py build_ext -i start_time=$(date +%s) -python test/performance/perf_test.py +python test/performance/perf_test.py --locals end_time=$(date +%s) elapsed_secs=$((end_time-start_time)) diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 4a2ba2fea5..9f6e268df0 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -399,7 +399,7 @@ def insert_json_file_with_file_id(filename): def read_json_file(filename): coll = proc_client.perftest.corpus - temp = tempfile.TemporaryFile() + temp = tempfile.TemporaryFile(mode='w') try: temp.writelines( [json.dumps(doc) + '\n' for From 0217ba32bd655c927b1a70ab81895a2d5d22323e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Feb 2021 16:01:31 -0800 Subject: [PATCH 0301/2111] PYTHON-1974 Remove manylinux containers only needed for 3.4 (#566) --- .evergreen/build-manylinux.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 8c8c101f86..aa81a87a0a 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -2,10 +2,7 @@ docker version -# 2020-03-20-2fda31c Was the last release to include Python 3.4. -images=(quay.io/pypa/manylinux1_x86_64:2020-03-20-2fda31c \ - quay.io/pypa/manylinux1_i686:2020-03-20-2fda31c \ - quay.io/pypa/manylinux1_x86_64 \ +images=(quay.io/pypa/manylinux1_x86_64 \ quay.io/pypa/manylinux1_i686 \ quay.io/pypa/manylinux2014_x86_64 \ quay.io/pypa/manylinux2014_i686 \ From 913860334ecdf0160514b9c2723fac3024391f3e Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 3 Feb 2021 13:55:59 -0800 Subject: [PATCH 0302/2111] PYTHON-2386 Drop support for Python 3.5 --- .evergreen/build-mac.sh | 2 +- .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 1 - .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 49 +++++++++----------------- .evergreen/utils.sh | 10 +----- .travis.yml | 2 -- CONTRIBUTING.rst | 2 +- README.rst | 2 +- doc/faq.rst | 2 +- doc/installation.rst | 4 +-- doc/python3.rst | 2 +- pymongo/ssl_context.py | 3 -- setup.py | 7 ++-- test/test_encryption.py | 5 --- 15 files changed, 29 insertions(+), 66 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index fd6d571ef1..f8f40e263f 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.5 3.6 3.7 3.8 3.9; do +for VERSION in 3.6 3.7 3.8 3.9; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index dda1089f0d..2a1f169482 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp35|cp36|cp37|cp38|cp39) ]]; then + if [[ ! $PYTHON =~ (cp36|cp37|cp38|cp39) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index aa81a87a0a..a100a6fc80 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -19,7 +19,6 @@ ls dist # Check for any unexpected files. unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp35*' -or \ -iname '*cp36*' -or \ -iname '*cp37*' -or \ -iname '*cp38*' -or \ diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index ca1c198ba8..f44cff18ac 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 35 36 37 38 39; do +for VERSION in 36 37 38 39; do _pythons=(C:/Python/Python${VERSION}/python.exe \ C:/Python/32/Python${VERSION}/python.exe) for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2dcc66bcd7..23445d75eb 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1762,11 +1762,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "3.5" - display_name: "Python 3.5" - batchtime: 10080 # 7 days - variables: - PYTHON_BINARY: "/opt/python/3.5/bin/python3" - id: "3.6" display_name: "Python 3.6" variables: @@ -1783,10 +1778,6 @@ axes: display_name: "Python 3.9" variables: PYTHON_BINARY: "/opt/python/3.9/bin/python3" - - id: "pypy3.5" - display_name: "PyPy 3.5" - variables: - PYTHON_BINARY: "/opt/python/pypy3.5/bin/pypy3" - id: "pypy3.6" display_name: "PyPy 3.6" variables: @@ -1799,10 +1790,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "3.5" - display_name: "Python 3.5" - variables: - PYTHON_BINARY: "C:/python/Python35/python.exe" - id: "3.6" display_name: "Python 3.6" variables: @@ -1823,10 +1810,6 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "3.5" - display_name: "32-bit Python 3.5" - variables: - PYTHON_BINARY: "C:/python/32/Python35/python.exe" - id: "3.6" display_name: "32-bit Python 3.6" variables: @@ -2116,7 +2099,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: &rhel62-pythons ["3.5", "3.6", "pypy3.5", "pypy3.6"] + python-version: &rhel62-pythons ["3.6", "pypy3.6"] auth: "*" ssl: "*" coverage: "*" @@ -2132,14 +2115,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: platform: ubuntu-16.04 - python-version: ["3.5", "3.6", "3.7", "3.8", "3.9"] + python-version: ["3.6", "3.7", "3.8", "3.9"] auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-16.04 - python-version: ["3.5", "3.6", "3.8", "3.9"] + python-version: ["3.6", "3.8", "3.9"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2152,7 +2135,7 @@ buildvariants: - matrix_name: "tests-pyopenssl-pypy" matrix_spec: platform: debian92 - python-version: ["pypy3.5", "pypy3.6"] + python-version: ["pypy3.6"] auth: "auth" ssl: "ssl" pyopenssl: "*" @@ -2187,7 +2170,7 @@ buildvariants: matrix_spec: platform: rhel62 # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["3.5", "3.6"] + python-version: ["3.6"] auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2199,7 +2182,7 @@ buildvariants: - matrix_name: "tests-pypy-debian-test-encryption" matrix_spec: platform: debian92 - python-version: ["pypy3.5", "pypy3.6"] + python-version: ["pypy3.6"] auth-ssl: noauth-nossl encryption: "*" display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" @@ -2215,7 +2198,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: rhel62 - python-version: ["pypy3.5", "pypy3.6"] + python-version: ["pypy3.6"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2242,13 +2225,13 @@ buildvariants: matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["3.5", "3.6", "3.7", "3.8", "3.9", "pypy3.5", "pypy3.6"] + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6"] c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-16.04 - python-version: ["pypy3.5", "pypy3.6"] + python-version: ["pypy3.6"] c-extensions: "with-c-extensions" compression: "*" # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy @@ -2281,7 +2264,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: rhel62 - python-version: ["pypy3.5", "pypy3.6"] + python-version: ["pypy3.6"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2420,7 +2403,7 @@ buildvariants: matrix_spec: platform: rhel62 # The toolchain doesn't currently include mod-wsgi - # built for CPython 3.5 or 3.8, mod-wsgi doesn't yet + # built for CPython 3.8, mod-wsgi doesn't yet # claim to support 3.9. Python 3.7+ won't build on rhel6 # and we need to do some work to migrate mod-wsgi testing # to a different OS. For now we're stuck just testing with @@ -2443,7 +2426,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: platform: rhel62 - python-version: ["3.5"] + python-version: ["3.6"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" @@ -2485,7 +2468,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-16.04 - python-version: ["3.5", "3.9"] + python-version: ["3.6", "3.9"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2507,7 +2490,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: ubuntu-16.04 - python-version: ["3.5", "3.8", "3.9"] + python-version: ["3.6", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2519,7 +2502,7 @@ buildvariants: - matrix_name: "ocsp-test-pypy" matrix_spec: platform: debian92 - python-version: ["pypy3.5", "pypy3.6"] + python-version: ["pypy3.6"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" @@ -2531,7 +2514,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.5", "3.9"] + python-version-windows: ["3.6", "3.9"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 0d741fb59d..d6b9ebecc7 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -22,15 +22,7 @@ createvirtualenv () { else . $VENVPATH/bin/activate fi - # Upgrade to the latest versions of pip setuptools wheel so that - # pip can always download the latest cryptography+cffi wheels. - PYTHON_VERSION=$(python -c 'import sys;print("%s.%s" % sys.version_info[:2])') - if [[ $PYTHON_VERSION == "3.5" ]]; then - # pip 21 will drop support for 3.5. - python -m pip install --upgrade 'pip<21' - else - python -m pip install --upgrade pip - fi + python -m pip install --upgrade pip python -m pip install --upgrade setuptools wheel } diff --git a/.travis.yml b/.travis.yml index b13397eacb..5f5499235b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,9 @@ language: python python: - - 3.5 - 3.6 - 3.7 - 3.8 - - pypy3.5 services: - mongodb diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index b9a9856e30..cf451172a4 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,7 +19,7 @@ that might not be of interest or that has already been addressed. Supported Interpreters ---------------------- -PyMongo supports CPython 3.5+ and PyPy3.5+. Language +PyMongo supports CPython 3.6+ and PyPy3.6+. Language features not supported by all interpreters can not be used. Style Guide diff --git a/README.rst b/README.rst index 05008492cc..5b25e69f12 100644 --- a/README.rst +++ b/README.rst @@ -89,7 +89,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 3.5+ and PyPy3.5+. +PyMongo supports CPython 3.6+ and PyPy3.6+. Optional dependencies: diff --git a/doc/faq.rst b/doc/faq.rst index 5efb380796..c6e959b61b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -134,7 +134,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.5+ and PyPy3.5+. See the :doc:`python3` for details. +PyMongo supports CPython 3.6+ and PyPy3.6+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index aa17087b7c..72e478cfc4 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.5+ and PyPy3.5+. +PyMongo supports CPython 3.6+ and PyPy3.6+. Optional dependencies: @@ -133,7 +133,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.5+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.6+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/python3.rst b/doc/python3.rst index ce45ee499e..e001c55c8e 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -6,7 +6,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.5+ and PyPy3.5+. +PyMongo supports CPython 3.6+ and PyPy3.6+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 5bcbc5d9c3..2f35676f87 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -38,7 +38,4 @@ # making it the obvious version to start using SSLConext.check_hostname. # Python 3.6 might have been a good version, but it suffers # from https://bugs.python.org/issue32185. -# We'll use our bundled match_hostname for older Python -# versions, which also supports IP address matching -# with Python < 3.5. CHECK_HOSTNAME_SAFE = _sys.version_info[:2] >= (3, 7) diff --git a/setup.py b/setup.py index 6ff561a098..89abad91cc 100755 --- a/setup.py +++ b/setup.py @@ -5,8 +5,8 @@ import warnings -if sys.version_info[:2] < (3, 5): - raise RuntimeError("Python version >= 3.5 required.") +if sys.version_info[:2] < (3, 6): + raise RuntimeError("Python version >= 3.6 required.") # Hack to silence atexit traceback in some Python versions @@ -327,7 +327,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=3.5", + python_requires=">=3.6", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", @@ -337,7 +337,6 @@ def build_extension(self, ext): "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", diff --git a/test/test_encryption.py b/test/test_encryption.py index 90da8b3dcf..938628e8c8 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -504,11 +504,6 @@ def maybe_skip_scenario(self, test): desc = test['description'].lower() if 'type=symbol' in desc: self.skipTest('PyMongo does not support the symbol type') - if desc == 'explain a find with deterministic encryption': - # PyPy and Python 3.6+ have ordered dict. - if sys.version_info[:2] < (3, 6) and 'PyPy' not in sys.version: - self.skipTest( - 'explain test does not work without ordered dict') def setup_scenario(self, scenario_def): """Override a test's setup.""" From c15028a6c714519e5ec86d1a9bb50e95a04de36b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 11 Feb 2021 16:00:48 -0800 Subject: [PATCH 0303/2111] PYTHON-2578 Improve clarity of TLS settings for KMS requests (#567) Note that cert_reqs=None and cert_reqs=CERT_REQUIRED are identical so this does not change any behavior. --- pymongo/encryption.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a1edf45dce..a54a9b13a9 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -49,11 +49,15 @@ from pymongo.mongo_client import MongoClient from pymongo.pool import _configured_socket, PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.ssl_support import get_ssl_context +from pymongo.ssl_support import get_ssl_context, HAVE_SSL from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern from pymongo.daemon import _spawn_daemon +if HAVE_SSL: + from ssl import CERT_REQUIRED +else: + CERT_REQUIRED = None _HTTPS_PORT = 443 _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. @@ -107,7 +111,17 @@ def kms_request(self, kms_context): endpoint = kms_context.endpoint message = kms_context.message host, port = parse_host(endpoint, _HTTPS_PORT) - ctx = get_ssl_context(None, None, None, None, None, None, True, True) + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # keyfile + None, # passphrase + None, # ca_certs + CERT_REQUIRED, # cert_reqs + None, # crlfile + True, # match_hostname + True) # check_ocsp_endpoint opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, socket_timeout=_KMS_CONNECT_TIMEOUT, ssl_context=ctx) From 95974617bd871121596853d349d372469a0accb1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 17 Feb 2021 14:16:07 -0800 Subject: [PATCH 0304/2111] PYTHON-1314 Remove Database.authenticate and Database.logout (#568) --- doc/changelog.rst | 2 + doc/migrate-to-pymongo4.rst | 21 +++ pymongo/auth.py | 4 - pymongo/client_session.py | 3 +- pymongo/database.py | 110 ------------ pymongo/mongo_client.py | 50 +----- pymongo/pool.py | 22 +-- test/__init__.py | 11 ++ test/test_auth.py | 307 +++++++++++----------------------- test/test_client.py | 40 +---- test/test_database.py | 89 +--------- test/test_legacy_api.py | 18 +- test/test_read_preferences.py | 10 +- test/test_session.py | 108 +----------- test/test_ssl.py | 11 +- test/test_threads.py | 34 +--- 16 files changed, 154 insertions(+), 686 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 7b0d812113..a9396072a3 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -23,6 +23,8 @@ Breaking Changes in 4.0 :attr:`pymongo.mongo_client.MongoClient.is_locked`. - Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. - Removed :meth:`pymongo.database.Database.collection_names`. +- Removed :meth:`pymongo.database.Database.authenticate` and + :meth:`pymongo.database.Database.logout`. - Removed :meth:`pymongo.database.Database.error`, :meth:`pymongo.database.Database.last_status`, :meth:`pymongo.database.Database.previous_error`, diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 059483b09d..fa847a83b7 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -121,6 +121,27 @@ can be changed to this:: Database -------- +Database.authenticate and Database.logout are removed +..................................................... + +Removed :meth:`pymongo.database.Database.authenticate` and +:meth:`pymongo.database.Database.logout`. Authenticating multiple users +on the same client conflicts with support for logical sessions in MongoDB 3.6+. +To authenticate as multiple users, create multiple instances of +:class:`~pymongo.mongo_client.MongoClient`. Code like this:: + + client = MongoClient() + client.admin.authenticate('user1', 'pass1') + client.admin.authenticate('user2', 'pass2') + +can be changed to this:: + + client1 = MongoClient(username='user1', password='pass1') + client2 = MongoClient(username='user2', password='pass2') + +Alternatively, create a single user that contains all the authentication privileges +required by your application. + Database.collection_names is removed .................................... diff --git a/pymongo/auth.py b/pymongo/auth.py index 1e31e25949..4d22717c28 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -580,7 +580,3 @@ def authenticate(credentials, sock_info): auth_func = _AUTH_MAP.get(mechanism) auth_func(credentials, sock_info) - -def logout(source, sock_info): - """Log out from a database.""" - sock_info.command(source, {'logout': 1}) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 245ba5d455..5e3f081940 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -351,12 +351,11 @@ class ClientSession(object): :class:`ClientSession`, call :meth:`~pymongo.mongo_client.MongoClient.start_session`. """ - def __init__(self, client, server_session, options, authset, implicit): + def __init__(self, client, server_session, options, implicit): # A MongoClient, a _ServerSession, a SessionOptions, and a set. self._client = client self._server_session = server_session self._options = options - self._authset = authset self._cluster_time = None self._operation_time = None # Is this an implicitly created session? diff --git a/pymongo/database.py b/pymongo/database.py index b1ae52d939..a810c67a6c 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -980,116 +980,6 @@ def __next__(self): next = __next__ - def authenticate(self, name=None, password=None, - source=None, mechanism='DEFAULT', **kwargs): - """**DEPRECATED**: Authenticate to use this database. - - .. warning:: Starting in MongoDB 3.6, calling :meth:`authenticate` - invalidates all existing cursors. It may also leave logical sessions - open on the server for up to 30 minutes until they time out. - - Authentication lasts for the life of the underlying client - instance, or until :meth:`logout` is called. - - Raises :class:`TypeError` if (required) `name`, (optional) `password`, - or (optional) `source` is not an instance of :class:`basestring` - (:class:`str` in python 3). - - .. note:: - - This method authenticates the current connection, and - will also cause all new :class:`~socket.socket` connections - in the underlying client instance to be authenticated automatically. - - - Authenticating more than once on the same database with different - credentials is not supported. You must call :meth:`logout` before - authenticating with new credentials. - - - When sharing a client instance between multiple threads, all - threads will share the authentication. If you need different - authentication profiles for different purposes you must use - distinct client instances. - - :Parameters: - - `name`: the name of the user to authenticate. Optional when - `mechanism` is MONGODB-X509 and the MongoDB server version is - >= 3.4. - - `password` (optional): the password of the user to authenticate. - Not used with GSSAPI or MONGODB-X509 authentication. - - `source` (optional): the database to authenticate on. If not - specified the current database is used. - - `mechanism` (optional): See :data:`~pymongo.auth.MECHANISMS` for - options. If no mechanism is specified, PyMongo automatically uses - MONGODB-CR when connected to a pre-3.0 version of MongoDB, - SCRAM-SHA-1 when connected to MongoDB 3.0 through 3.6, and - negotiates the mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) when - connected to MongoDB 4.0+. - - `authMechanismProperties` (optional): Used to specify - authentication mechanism specific options. To specify the service - name for GSSAPI authentication pass - ``authMechanismProperties='SERVICE_NAME:'``. - To specify the session token for MONGODB-AWS authentication pass - ``authMechanismProperties='AWS_SESSION_TOKEN:'``. - - .. versionchanged:: 3.7 - Added support for SCRAM-SHA-256 with MongoDB 4.0 and later. - - .. versionchanged:: 3.5 - Deprecated. Authenticating multiple users conflicts with support for - logical sessions in MongoDB 3.6. To authenticate as multiple users, - create multiple instances of MongoClient. - - .. versionadded:: 2.8 - Use SCRAM-SHA-1 with MongoDB 3.0 and later. - - .. versionchanged:: 2.5 - Added the `source` and `mechanism` parameters. :meth:`authenticate` - now raises a subclass of :class:`~pymongo.errors.PyMongoError` if - authentication fails due to invalid credentials or configuration - issues. - - .. mongodoc:: authenticate - """ - if name is not None and not isinstance(name, str): - raise TypeError("name must be an instance of str") - if password is not None and not isinstance(password, str): - raise TypeError("password must be an instance of str") - if source is not None and not isinstance(source, str): - raise TypeError("source must be an instance of str") - common.validate_auth_mechanism('mechanism', mechanism) - - validated_options = common._CaseInsensitiveDictionary() - for option, value in kwargs.items(): - normalized, val = common.validate_auth_option(option, value) - validated_options[normalized] = val - - credentials = auth._build_credentials_tuple( - mechanism, - source, - name, - password, - validated_options, - self.name) - - self.client._cache_credentials( - self.name, - credentials, - connect=True) - - return True - - def logout(self): - """**DEPRECATED**: Deauthorize use of this database. - - .. warning:: Starting in MongoDB 3.6, calling :meth:`logout` - invalidates all existing cursors. It may also leave logical sessions - open on the server for up to 30 minutes until they time out. - """ - warnings.warn("Database.logout() is deprecated", - DeprecationWarning, stacklevel=2) - - # Sockets will be deauthenticated as they are used. - self.client._purge_credentials(self.name) - def dereference(self, dbref, session=None, **kwargs): """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a9fa282289..35aa0a6229 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -708,7 +708,7 @@ def __init__( self.__all_credentials = {} creds = options.credentials if creds: - self._cache_credentials(creds.source, creds) + self.__all_credentials[creds.source] = creds self._topology_settings = TopologySettings( seeds=seeds, @@ -753,38 +753,6 @@ def target(): self._encrypter = _Encrypter.create( self, self.__options.auto_encryption_opts) - def _cache_credentials(self, source, credentials, connect=False): - """Save a set of authentication credentials. - - The credentials are used to login a socket whenever one is created. - If `connect` is True, verify the credentials on the server first. - """ - # Don't let other threads affect this call's data. - all_credentials = self.__all_credentials.copy() - - if source in all_credentials: - # Nothing to do if we already have these credentials. - if credentials == all_credentials[source]: - return - raise OperationFailure('Another user is already authenticated ' - 'to this database. You must logout first.') - - if connect: - server = self._get_topology().select_server( - writable_preferred_server_selector) - - # get_socket() logs out of the database if logged in with old - # credentials, and logs in with new ones. - with server.get_socket(all_credentials) as sock_info: - sock_info.authenticate(credentials) - - # If several threads run _cache_credentials at once, last one wins. - self.__all_credentials[source] = credentials - - def _purge_credentials(self, source): - """Purge credentials from the authentication cache.""" - self.__all_credentials.pop(source, None) - def _server_property(self, attr_name): """An attribute of the current server's description. @@ -1594,19 +1562,11 @@ def _process_periodic_tasks(self): helpers._handle_exception() def __start_session(self, implicit, **kwargs): - # Driver Sessions Spec: "If startSession is called when multiple users - # are authenticated drivers MUST raise an error with the error message - # 'Cannot call startSession when multiple users are authenticated.'" - authset = set(self.__all_credentials.values()) - if len(authset) > 1: - raise InvalidOperation("Cannot call start_session when" - " multiple users are authenticated") - # Raises ConfigurationError if sessions are not supported. server_session = self._get_server_session() opts = client_session.SessionOptions(**kwargs) return client_session.ClientSession( - self, server_session, opts, authset, implicit) + self, server_session, opts, implicit) def start_session(self, causal_consistency=True, @@ -1617,9 +1577,7 @@ def start_session(self, :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. - Requires MongoDB 3.6. It is an error to call :meth:`start_session` - if this client has been authenticated to multiple databases using the - deprecated method :meth:`~pymongo.database.Database.authenticate`. + Requires MongoDB 3.6. A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :class:`ClientSession` instances are @@ -1655,7 +1613,7 @@ def _ensure_session(self, session=None): # should always opt-in. return self.__start_session(True, causal_consistency=False) except (ConfigurationError, InvalidOperation): - # Sessions not supported, or multiple users authenticated. + # Sessions not supported. return None @contextlib.contextmanager diff --git a/pymongo/pool.py b/pymongo/pool.py index 33a93105cc..5b5f60a335 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -771,17 +771,10 @@ def check_auth(self, all_credentials): :Parameters: - `all_credentials`: dict, maps auth source to MongoCredential. """ - if all_credentials or self.authset: - cached = set(all_credentials.values()) - authset = self.authset.copy() - - # Logout any credentials that no longer exist in the cache. - for credentials in authset - cached: - auth.logout(credentials.source, self) - self.authset.discard(credentials) - - for credentials in cached - authset: - self.authenticate(credentials) + if all_credentials: + for credentials in all_credentials.values(): + if credentials not in self.authset: + self.authenticate(credentials) # CMAP spec says to publish the ready event only after authenticating # the connection. @@ -807,18 +800,13 @@ def authenticate(self, credentials): def validate_session(self, client, session): """Validate this session before use with client. - Raises error if this session is logged in as a different user or - the client is not the one that created the session. + Raises error if the client is not the one that created the session. """ if session: if session._client is not client: raise InvalidOperation( 'Can only use session with the MongoClient that' ' started it') - if session._authset != self.authset: - raise InvalidOperation( - 'Cannot use session after authenticating with different' - ' credentials') def close_socket(self, reason): """Close this connection with a reason.""" diff --git a/test/__init__.py b/test/__init__.py index e6a3c208d5..063a634b42 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -222,6 +222,17 @@ def __init__(self): server_api = ServerApi(MONGODB_API_VERSION) self.default_client_options["server_api"] = server_api + @property + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + if client_context.auth_enabled: + opts['username'] = db_user + opts['password'] = db_pwd + if self.replica_set_name: + opts['replicaSet'] = self.replica_set_name + return opts + @property def ismaster(self): return self.client.admin.command('isMaster') diff --git a/test/test_auth.py b/test/test_auth.py index 8e178189bc..49081c7cce 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -73,31 +73,6 @@ def run(self): self.success = True -class DBAuthenticateThread(threading.Thread): - """Used in testing threaded authentication. - - This does db.test.find_one() with a 1-second delay to ensure it must - check out and authenticate multiple sockets from the pool concurrently. - - :Parameters: - `db`: An auth-protected db with a 'test' collection containing one - document. - """ - - def __init__(self, db, username, password): - super(DBAuthenticateThread, self).__init__() - self.db = db - self.username = username - self.password = password - self.success = False - - def run(self): - self.db.authenticate(self.username, self.password) - assert self.db.test.find_one({'$where': delay(1)}) is not None - self.success = True - - - class TestGSSAPI(unittest.TestCase): @classmethod @@ -314,24 +289,6 @@ def test_sasl_plain(self): client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): - - with ignore_deprecations(): - client = MongoClient(SASL_HOST, SASL_PORT) - - # Bad username - self.assertRaises(OperationFailure, client.ldap.authenticate, - 'not-user', SASL_PASS, SASL_DB, 'PLAIN') - self.assertRaises(OperationFailure, client.ldap.test.find_one) - self.assertRaises(OperationFailure, client.ldap.test.insert_one, - {"failed": True}) - - # Bad password - self.assertRaises(OperationFailure, client.ldap.authenticate, - SASL_USER, 'not-pwd', SASL_DB, 'PLAIN') - self.assertRaises(OperationFailure, client.ldap.test.find_one) - self.assertRaises(OperationFailure, client.ldap.test.insert_one, - {"failed": True}) - def auth_string(user, password): uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' 'authSource=%s' % (quote_plus(user), @@ -368,12 +325,6 @@ def tearDown(self): def test_scram_sha1(self): host, port = client_context.host, client_context.port - with ignore_deprecations(): - client = rs_or_single_client_noauth() - self.assertTrue(client.pymongo_test.authenticate( - 'user', 'pass', mechanism='SCRAM-SHA-1')) - client.pymongo_test.command('dbstats') - client = rs_or_single_client_noauth( 'mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' % (host, port)) @@ -391,6 +342,7 @@ def test_scram_sha1(self): db.command('dbstats') +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation class TestSCRAM(unittest.TestCase): @client_context.require_auth @@ -432,148 +384,50 @@ def test_scram_skip_empty_exchange(self): self.assertEqual( started, ['saslStart', 'saslContinue', 'saslContinue']) - @ignore_deprecations def test_scram(self): - host, port = client_context.host, client_context.port - + # Step 1: create users client_context.create_user( - 'testscram', - 'sha1', - 'pwd', - roles=['dbOwner'], + 'testscram', 'sha1', 'pwd', roles=['dbOwner'], mechanisms=['SCRAM-SHA-1']) - client_context.create_user( - 'testscram', - 'sha256', - 'pwd', - roles=['dbOwner'], + 'testscram', 'sha256', 'pwd', roles=['dbOwner'], mechanisms=['SCRAM-SHA-256']) - client_context.create_user( - 'testscram', - 'both', - 'pwd', - roles=['dbOwner'], + 'testscram', 'both', 'pwd', roles=['dbOwner'], mechanisms=['SCRAM-SHA-1', 'SCRAM-SHA-256']) + # Step 2: verify auth success cases client = rs_or_single_client_noauth( - event_listeners=[self.listener]) - self.assertTrue( - client.testscram.authenticate('sha1', 'pwd')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'sha1', 'pwd', mechanism='SCRAM-SHA-1')) + username='sha1', password='pwd', authSource='testscram') client.testscram.command('dbstats') - client.testscram.logout() - self.assertRaises( - OperationFailure, - client.testscram.authenticate, - 'sha1', 'pwd', mechanism='SCRAM-SHA-256') - - self.assertTrue( - client.testscram.authenticate('sha256', 'pwd')) + + client = rs_or_single_client_noauth( + username='sha1', password='pwd', authSource='testscram', + authMechanism='SCRAM-SHA-1') client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'sha256', 'pwd', mechanism='SCRAM-SHA-256')) + + client = rs_or_single_client_noauth( + username='sha256', password='pwd', authSource='testscram') client.testscram.command('dbstats') - client.testscram.logout() - self.assertRaises( - OperationFailure, - client.testscram.authenticate, - 'sha256', 'pwd', mechanism='SCRAM-SHA-1') - self.listener.results.clear() - self.assertTrue( - client.testscram.authenticate('both', 'pwd')) - started = self.listener.results['started'][0] - self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') + client = rs_or_single_client_noauth( + username='sha256', password='pwd', authSource='testscram', + authMechanism='SCRAM-SHA-256') client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'both', 'pwd', mechanism='SCRAM-SHA-256')) + + # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 + client = rs_or_single_client_noauth( + username='both', password='pwd', authSource='testscram', + authMechanism='SCRAM-SHA-1') client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'both', 'pwd', mechanism='SCRAM-SHA-1')) + client = rs_or_single_client_noauth( + username='both', password='pwd', authSource='testscram', + authMechanism='SCRAM-SHA-256') client.testscram.command('dbstats') - client.testscram.logout() - - self.assertRaises( - OperationFailure, - client.testscram.authenticate, - 'not-a-user', 'pwd') - - if HAVE_STRINGPREP: - # Test the use of SASLprep on passwords. For example, - # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') - # becomes 'IX'. SASLprep is only supported when the standard - # library provides stringprep. - client_context.create_user( - 'testscram', - '\u2168', - '\u2163', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) - - client_context.create_user( - 'testscram', - 'IX', - 'IX', - roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) - - self.assertTrue( - client.testscram.authenticate('\u2168', '\u2163')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - '\u2168', '\u2163', mechanism='SCRAM-SHA-256')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate('\u2168', 'IV')) - client.testscram.command('dbstats') - client.testscram.logout() - - self.assertTrue( - client.testscram.authenticate('IX', 'I\u00ADX')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate( - 'IX', 'I\u00ADX', mechanism='SCRAM-SHA-256')) - client.testscram.command('dbstats') - client.testscram.logout() - self.assertTrue( - client.testscram.authenticate('IX', 'IX')) - client.testscram.command('dbstats') - client.testscram.logout() - - client = rs_or_single_client_noauth( - 'mongodb://\u2168:\u2163@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://\u2168:IV@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - - client = rs_or_single_client_noauth( - 'mongodb://IX:I\u00ADX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://IX:IX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') self.listener.results.clear() client = rs_or_single_client_noauth( - 'mongodb://both:pwd@%s:%d/testscram' % (host, port), + username='both', password='pwd', authSource='testscram', event_listeners=[self.listener]) client.testscram.command('dbstats') if client_context.version.at_least(4, 4, -1): @@ -584,17 +438,26 @@ def test_scram(self): started = self.listener.results['started'][0] self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') + # Step 3: verify auth failure conditions client = rs_or_single_client_noauth( - 'mongodb://both:pwd@%s:%d/testscram?authMechanism=SCRAM-SHA-1' - % (host, port)) - client.testscram.command('dbstats') + username='sha1', password='pwd', authSource='testscram', + authMechanism='SCRAM-SHA-256') + with self.assertRaises(OperationFailure): + client.testscram.command('dbstats') client = rs_or_single_client_noauth( - 'mongodb://both:pwd@%s:%d/testscram?authMechanism=SCRAM-SHA-256' - % (host, port)) - client.testscram.command('dbstats') + username='sha256', password='pwd', authSource='testscram', + authMechanism='SCRAM-SHA-1') + with self.assertRaises(OperationFailure): + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + username='not-a-user', password='pwd', authSource='testscram') + with self.assertRaises(OperationFailure): + client.testscram.command('dbstats') if client_context.is_rs: + host, port = client_context.host, client_context.port uri = ('mongodb://both:pwd@%s:%d/testscram' '?replicaSet=%s' % (host, port, client_context.replica_set_name)) @@ -604,6 +467,62 @@ def test_scram(self): 'testscram', read_preference=ReadPreference.SECONDARY) db.command('dbstats') + @unittest.skipUnless(HAVE_STRINGPREP, 'Cannot test without stringprep') + def test_scram_saslprep(self): + # Step 4: test SASLprep + host, port = client_context.host, client_context.port + # Test the use of SASLprep on passwords. For example, + # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') + # becomes 'IX'. SASLprep is only supported when the standard + # library provides stringprep. + client_context.create_user( + 'testscram', '\u2168', '\u2163', roles=['dbOwner'], + mechanisms=['SCRAM-SHA-256']) + client_context.create_user( + 'testscram', 'IX', 'IX', roles=['dbOwner'], + mechanisms=['SCRAM-SHA-256']) + + client = rs_or_single_client_noauth( + username='\u2168', password='\u2163', authSource='testscram') + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + username='\u2168', password='\u2163', authSource='testscram', + authMechanism='SCRAM-SHA-256') + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + username='\u2168', password='IV', authSource='testscram') + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + username='IX', password='I\u00ADX', authSource='testscram') + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + username='IX', password='I\u00ADX', authSource='testscram', + authMechanism='SCRAM-SHA-256') + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + username='IX', password='IX', authSource='testscram', + authMechanism='SCRAM-SHA-256') + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + 'mongodb://\u2168:\u2163@%s:%d/testscram' % (host, port)) + client.testscram.command('dbstats') + client = rs_or_single_client_noauth( + 'mongodb://\u2168:IV@%s:%d/testscram' % (host, port)) + client.testscram.command('dbstats') + + client = rs_or_single_client_noauth( + 'mongodb://IX:I\u00ADX@%s:%d/testscram' % (host, port)) + client.testscram.command('dbstats') + client = rs_or_single_client_noauth( + 'mongodb://IX:IX@%s:%d/testscram' % (host, port)) + client.testscram.command('dbstats') + def test_cache(self): client = single_client() # Force authentication. @@ -651,38 +570,6 @@ def test_scram_threaded(self): thread.join() self.assertTrue(thread.success) -class TestThreadedAuth(unittest.TestCase): - - @client_context.require_auth - def test_db_authenticate_threaded(self): - - db = client_context.client.db - coll = db.test - coll.drop() - coll.insert_one({'_id': 1}) - - client_context.create_user( - 'db', - 'user', - 'pass', - roles=['dbOwner']) - self.addCleanup(db.command, 'dropUser', 'user') - - db = rs_or_single_client_noauth().db - db.authenticate('user', 'pass') - # No error. - db.authenticate('user', 'pass') - - db = rs_or_single_client_noauth().db - threads = [] - for _ in range(4): - threads.append(DBAuthenticateThread(db, 'user', 'pass')) - for thread in threads: - thread.start() - for thread in threads: - thread.join() - self.assertTrue(thread.success) - class TestAuthURIOptions(unittest.TestCase): diff --git a/test/test_client.py b/test/test_client.py index b5add1df79..9f8135dc71 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -35,7 +35,7 @@ from bson.son import SON from bson.tz_util import utc import pymongo -from pymongo import auth, message +from pymongo import message from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS from pymongo.command_cursor import CommandCursor from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD @@ -80,7 +80,6 @@ FunctionCallRecorder, get_pool, gevent_monkey_patched, - ignore_deprecations, is_greenthread_patched, lazy_client_trial, NTHREADS, @@ -908,37 +907,6 @@ def test_username_and_password(self): with self.assertRaises(OperationFailure): rs_or_single_client(username="ad min", password="foo").server_info() - @client_context.require_auth - @ignore_deprecations - def test_multiple_logins(self): - client_context.create_user( - 'pymongo_test', 'user1', 'pass', roles=['readWrite']) - client_context.create_user( - 'pymongo_test', 'user2', 'pass', roles=['readWrite']) - self.addCleanup(remove_all_users, self.client.pymongo_test) - - client = rs_or_single_client_noauth( - "mongodb://user1:pass@%s:%d/pymongo_test" % ( - client_context.host, client_context.port)) - - client.pymongo_test.test.find_one() - with self.assertRaises(OperationFailure): - # Can't log in to the same database with multiple users. - client.pymongo_test.authenticate('user2', 'pass') - - client.pymongo_test.test.find_one() - client.pymongo_test.logout() - with self.assertRaises(OperationFailure): - client.pymongo_test.test.find_one() - - client.pymongo_test.authenticate('user2', 'pass') - client.pymongo_test.test.find_one() - - with self.assertRaises(OperationFailure): - client.pymongo_test.authenticate('user1', 'pass') - - client.pymongo_test.test.find_one() - @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( @@ -1306,12 +1274,6 @@ def test_auth_network_error(self): waitQueueTimeoutMS=1, retryReads=False)) - # Simulate an authenticate() call on a different socket. - credentials = auth._build_credentials_tuple( - 'DEFAULT', 'admin', db_user, db_pwd, {}, None) - - c._cache_credentials('test', credentials, connect=False) - # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) diff --git a/test/test_database.py b/test/test_database.py index ef91aebac4..e910c4b9c9 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -17,7 +17,6 @@ import datetime import re import sys -import warnings sys.path[0:0] = [""] @@ -43,17 +42,12 @@ from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.saslprep import HAVE_STRINGPREP from pymongo.write_concern import WriteConcern from test import (client_context, SkipTest, unittest, IntegrationTest) -from test.utils import (EventListener, - ignore_deprecations, - remove_all_users, - rs_or_single_client_noauth, - rs_or_single_client, +from test.utils import (rs_or_single_client, server_started_with_auth, wait_until, IMPOSSIBLE_WRITE_CONCERN, @@ -469,87 +463,6 @@ def test_password_digest(self): self.assertEqual(auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73") - @client_context.require_auth - @ignore_deprecations - def test_authenticate_multiple(self): - # "self.client" is logged in as root. - self.client.drop_database("pymongo_test") - self.client.drop_database("pymongo_test1") - users_db_auth = self.client.pymongo_test - - client_context.create_user( - 'admin', - 'ro-admin', - 'pass', - roles=["userAdmin", "readAnyDatabase"]) - - self.addCleanup(client_context.drop_user, 'admin', 'ro-admin') - client_context.create_user( - 'pymongo_test', 'user', 'pass', roles=["userAdmin", "readWrite"]) - self.addCleanup(remove_all_users, users_db_auth) - - # Non-root client. - listener = EventListener() - client = rs_or_single_client_noauth(event_listeners=[listener]) - admin_db = client.admin - users_db = client.pymongo_test - other_db = client.pymongo_test1 - - self.assertRaises(OperationFailure, users_db.test.find_one) - self.assertEqual(listener.started_command_names(), ['find']) - listener.reset() - - # Regular user should be able to query its own db, but - # no other. - users_db.authenticate('user', 'pass') - if client_context.version.at_least(3, 0): - self.assertEqual(listener.started_command_names()[0], 'saslStart') - else: - self.assertEqual(listener.started_command_names()[0], 'getnonce') - - self.assertEqual(0, users_db.test.count_documents({})) - self.assertRaises(OperationFailure, other_db.test.find_one) - - listener.reset() - # Admin read-only user should be able to query any db, - # but not write. - admin_db.authenticate('ro-admin', 'pass') - if client_context.version.at_least(3, 0): - self.assertEqual(listener.started_command_names()[0], 'saslStart') - else: - self.assertEqual(listener.started_command_names()[0], 'getnonce') - self.assertEqual(None, other_db.test.find_one()) - self.assertRaises(OperationFailure, - other_db.test.insert_one, {}) - - # Close all sockets. - client.close() - - listener.reset() - # We should still be able to write to the regular user's db. - self.assertTrue(users_db.test.delete_many({})) - names = listener.started_command_names() - if client_context.version.at_least(4, 4, -1): - # No speculation with multiple users (but we do skipEmptyExchange). - self.assertEqual( - names, ['saslStart', 'saslContinue', 'saslStart', - 'saslContinue', 'delete']) - elif client_context.version.at_least(3, 0): - self.assertEqual( - names, ['saslStart', 'saslContinue', 'saslContinue', - 'saslStart', 'saslContinue', 'saslContinue', 'delete']) - else: - self.assertEqual( - names, ['getnonce', 'authenticate', - 'getnonce', 'authenticate', 'delete']) - - # And read from other dbs... - self.assertEqual(0, other_db.test.count_documents({})) - - # But still not write to other dbs. - self.assertRaises(OperationFailure, - other_db.test.insert_one, {}) - def test_id_ordering(self): # PyMongo attempts to have _id show up first # when you iterate key/value pairs in a document. diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 333aaeb89a..a28238b95a 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -15,13 +15,9 @@ """Test various legacy / deprecated API features.""" import sys -import uuid sys.path[0:0] = [""] -from bson.binary import PYTHON_LEGACY, STANDARD -from bson.code import Code -from bson.codec_options import CodecOptions from bson.son import SON from pymongo import ASCENDING, GEOHAYSTACK from pymongo.common import partition_node @@ -1113,10 +1109,9 @@ def tearDownClass(cls): def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth() - db = cli.pymongo_test - coll = db.test - db.authenticate('readonly', 'pw') + cli = rs_or_single_client_noauth( + username='readonly', password='pw', authSource='pymongo_test') + coll = cli.pymongo_test.test bulk = coll.initialize_ordered_bulk_op() bulk.insert({'x': 1}) self.assertRaises(OperationFailure, bulk.execute) @@ -1124,10 +1119,9 @@ def test_readonly(self): def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth() - db = cli.pymongo_test - coll = db.test - db.authenticate('noremove', 'pw') + cli = rs_or_single_client_noauth( + username='noremove', password='pw', authSource='pymongo_test') + coll = cli.pymongo_test.test bulk = coll.initialize_ordered_bulk_op() bulk.insert({'x': 1}) bulk.find({'x': 2}).upsert().replace_one({'x': 2}) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 9515099a04..93a282a6d8 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -19,7 +19,6 @@ import pickle import random import sys -import warnings sys.path[0:0] = [""] @@ -39,9 +38,7 @@ from test import (SkipTest, client_context, IntegrationTest, - unittest, - db_user, - db_pwd) + unittest) from test.utils import (connected, ignore_deprecations, one, @@ -312,7 +309,7 @@ def test_nearest(self): class ReadPrefTester(MongoClient): def __init__(self, *args, **kwargs): self.has_read_from = set() - client_options = client_context.default_client_options.copy() + client_options = client_context.client_options client_options.update(kwargs) super(ReadPrefTester, self).__init__(*args, **client_options) @@ -354,11 +351,8 @@ def setUpClass(cls): super(TestCommandAndReadPreference, cls).setUpClass() cls.c = ReadPrefTester( client_context.pair, - replicaSet=client_context.replica_set_name, # Ignore round trip times, to test ReadPreference modes only. localThresholdMS=1000*1000) - if client_context.auth_enabled: - cls.c.admin.authenticate(db_user, db_pwd) cls.client_version = Version.from_client(cls.c) # mapReduce fails if the collection does not exist. coll = cls.c.pymongo_test.get_collection( diff --git a/test/test_session.py b/test/test_session.py index e183338e4d..6f32dac819 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -29,7 +29,7 @@ InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern -from test import IntegrationTest, client_context, db_user, db_pwd, unittest, SkipTest +from test import IntegrationTest, client_context, unittest, SkipTest from test.utils import (ignore_deprecations, rs_or_single_client, EventListener, @@ -1040,112 +1040,6 @@ def test_cluster_time_no_server_support(self): self.assertIsNone(after_cluster_time) -class TestSessionsMultiAuth(IntegrationTest): - @client_context.require_auth - @client_context.require_sessions - def setUp(self): - super(TestSessionsMultiAuth, self).setUp() - - client_context.create_user( - 'pymongo_test', 'second-user', 'pass', roles=['readWrite']) - self.addCleanup(client_context.drop_user, 'pymongo_test','second-user') - - @ignore_deprecations - def test_session_authenticate_multiple(self): - listener = SessionTestListener() - # Logged in as root. - client = rs_or_single_client(event_listeners=[listener]) - db = client.pymongo_test - db.authenticate('second-user', 'pass') - - with self.assertRaises(InvalidOperation): - client.start_session() - - # No implicit sessions. - listener.results.clear() - db.collection.find_one() - event = listener.first_command_started() - self.assertNotIn( - 'lsid', event.command, - "find_one with multi-auth shouldn't have sent lsid with %s" % ( - event.command_name)) - - @ignore_deprecations - def test_explicit_session_logout(self): - listener = SessionTestListener() - - # Changing auth invalidates the session. Start as root. - client = rs_or_single_client(event_listeners=[listener]) - db = client.pymongo_test - db.collection.insert_many([{} for _ in range(10)]) - self.addCleanup(db.collection.drop) - - with client.start_session() as s: - listener.results.clear() - cursor = db.collection.find(session=s).batch_size(2) - next(cursor) - event = listener.first_command_started() - self.assertEqual(event.command_name, 'find') - self.assertEqual( - s.session_id, event.command.get('lsid'), - "find() sent wrong lsid with %s cmd" % (event.command_name,)) - - client.admin.logout() - db.authenticate('second-user', 'pass') - - err = ('Cannot use session after authenticating with different' - ' credentials') - - with self.assertRaisesRegex(InvalidOperation, err): - # Auth has changed between find and getMore. - list(cursor) - - with self.assertRaisesRegex(InvalidOperation, err): - db.collection.bulk_write([InsertOne({})], session=s) - - with self.assertRaisesRegex(InvalidOperation, err): - db.list_collection_names(session=s) - - with self.assertRaisesRegex(InvalidOperation, err): - db.collection.find_one(session=s) - - with self.assertRaisesRegex(InvalidOperation, err): - list(db.collection.aggregate([], session=s)) - - @ignore_deprecations - def test_implicit_session_logout(self): - listener = SessionTestListener() - - # Changing auth doesn't invalidate the session. Start as root. - client = rs_or_single_client(event_listeners=[listener]) - db = client.pymongo_test - - for name, f in [ - ('bulk_write', lambda: db.collection.bulk_write([InsertOne({})])), - ('list_collection_names', db.list_collection_names), - ('find_one', db.collection.find_one), - ('aggregate', lambda: list(db.collection.aggregate([]))) - ]: - def sub_test(): - listener.results.clear() - f() - for event in listener.results['started']: - self.assertIn( - 'lsid', event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) - - # We switch auth without clearing the pool of session ids. The - # server considers these to be new sessions since it's a new user. - # The old sessions time out on the server after 30 minutes. - client.admin.logout() - db.authenticate('second-user', 'pass') - sub_test() - db.logout() - client.admin.authenticate(db_user, db_pwd) - sub_test() - - class TestSessionsNotSupported(IntegrationTest): @client_context.require_version_max(3, 5, 10) def test_sessions_not_supported(self): diff --git a/test/test_ssl.py b/test/test_ssl.py index 82d99d5d18..71ea142a27 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -30,8 +30,6 @@ from pymongo.write_concern import WriteConcern from test import (IntegrationTest, client_context, - db_pwd, - db_user, SkipTest, unittest, HAVE_IPADDRESS) @@ -548,14 +546,7 @@ def test_wincertstore(self): @client_context.require_ssl_certfile def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port - ssl_client = MongoClient( - client_context.pair, - ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) - self.addCleanup(remove_all_users, ssl_client['$external']) - - ssl_client.admin.authenticate(db_user, db_pwd) + self.addCleanup(remove_all_users, client_context.client['$external']) # Give x509 user all necessary privileges. client_context.create_user('$external', MONGODB_X509_USERNAME, roles=[ diff --git a/test/test_threads.py b/test/test_threads.py index 21d24a6b4d..854d3cab05 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -17,13 +17,10 @@ import threading from test import (client_context, - db_user, - db_pwd, IntegrationTest, unittest) -from test.utils import rs_or_single_client_noauth, rs_or_single_client +from test.utils import rs_or_single_client from test.utils import joinall -from pymongo.errors import OperationFailure @client_context.require_connection @@ -203,34 +200,5 @@ def test_client_disconnect(self): self.assertTrue(t.passed) -class TestThreadsAuth(IntegrationTest): - @classmethod - @client_context.require_auth - def setUpClass(cls): - super(TestThreadsAuth, cls).setUpClass() - - def test_auto_auth_login(self): - # Create the database upfront to workaround SERVER-39167. - self.client.auth_test.test.insert_one({}) - self.addCleanup(self.client.drop_database, "auth_test") - client = rs_or_single_client_noauth() - self.assertRaises(OperationFailure, client.auth_test.test.find_one) - - # Admin auth - client.admin.authenticate(db_user, db_pwd) - - nthreads = 10 - threads = [] - for _ in range(nthreads): - t = AutoAuthenticateThreads(client.auth_test.test, 10) - t.start() - threads.append(t) - - joinall(threads) - - for t in threads: - self.assertTrue(t.success) - - if __name__ == "__main__": unittest.main() From 99a4f2845017fe53a3ce9f820d85d2ac1c9de18d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Feb 2021 08:52:36 -0800 Subject: [PATCH 0305/2111] PYTHON-2539 Test AWS temporary credentials via "sessionToken" for CSFLE (#569) --- .evergreen/config.yml | 4 + .evergreen/run-tests.sh | 3 + .evergreen/test-encryption-requirements.txt | 4 +- pymongo/encryption.py | 3 +- pymongo/encryption_options.py | 3 +- .../spec/awsTemporary.json | 237 ++++++++++++++++++ test/test_encryption.py | 51 ++-- 7 files changed, 287 insertions(+), 18 deletions(-) create mode 100644 test/client-side-encryption/spec/awsTemporary.json diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 23445d75eb..02c173d990 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -375,6 +375,10 @@ functions: export FLE_AZURE_CLIENTSECRET="${fle_azure_clientsecret}" export FLE_GCP_EMAIL="${fle_gcp_email}" export FLE_GCP_PRIVATEKEY="${fle_gcp_privatekey}" + # Needed for generating temporary aws credentials. + export AWS_ACCESS_KEY_ID="${fle_aws_key}" + export AWS_SECRET_ACCESS_KEY="${fle_aws_secret}" + export AWS_DEFAULT_REGION=us-east-1 EOT fi - command: shell.exec diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index a431769439..e0e73efc3f 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -134,6 +134,9 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by PREPARE_SHELL for access to mongocryptd. + # Get access to the AWS temporary credentials: + # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN + . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh fi if [ -z "$DATA_LAKE" ]; then diff --git a/.evergreen/test-encryption-requirements.txt b/.evergreen/test-encryption-requirements.txt index b5a752b6b8..d02df867ad 100644 --- a/.evergreen/test-encryption-requirements.txt +++ b/.evergreen/test-encryption-requirements.txt @@ -1,2 +1,4 @@ cffi>=1.12.0,<2 -cryptography>=2,<4 +cryptography>=2 +# boto3 is required by drivers-evergreen-tools/.evergreen/csfle/set-temp-creds.sh +boto3<2 diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a54a9b13a9..cb91b0afac 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -371,7 +371,8 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used - to generate KMS messages. + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. - `azure`: Map with "tenantId", "clientId", and "clientSecret" as strings. Additionally, "identityPlatformEndpoint" may also be specified as a string (defaults to 'login.microsoftonline.com'). diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index da3a0f1913..9ad0f22ba3 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -58,7 +58,8 @@ def __init__(self, kms_providers, key_vault_namespace, - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used - to generate KMS messages. + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. - `azure`: Map with "tenantId", "clientId", and "clientSecret" as strings. Additionally, "identityPlatformEndpoint" may also be specified as a string (defaults to 'login.microsoftonline.com'). diff --git a/test/client-side-encryption/spec/awsTemporary.json b/test/client-side-encryption/spec/awsTemporary.json new file mode 100644 index 0000000000..80257c6c2e --- /dev/null +++ b/test/client-side-encryption/spec/awsTemporary.json @@ -0,0 +1,237 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using the AWS provider with temporary credentials", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "awsTemporary": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "datakeys" + }, + "$db": "keyvault" + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + }, + { + "description": "Insert with invalid temporary credentials", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "awsTemporaryNoSessionToken": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "result": { + "errorContains": "security token" + } + } + ] + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 938628e8c8..cf516c26e1 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -447,6 +447,17 @@ def test_with_statement(self): 'secretAccessKey': os.environ.get('FLE_AWS_SECRET', '') } +AWS_TEMP_CREDS = { + 'accessKeyId': os.environ.get('CSFLE_AWS_TEMP_ACCESS_KEY_ID', ''), + 'secretAccessKey': os.environ.get('CSFLE_AWS_TEMP_SECRET_ACCESS_KEY', ''), + 'sessionToken': os.environ.get('CSFLE_AWS_TEMP_SESSION_TOKEN', '') +} + +AWS_TEMP_NO_SESSION_CREDS = { + 'accessKeyId': os.environ.get('CSFLE_AWS_TEMP_ACCESS_KEY_ID', ''), + 'secretAccessKey': os.environ.get('CSFLE_AWS_TEMP_SECRET_ACCESS_KEY', '') +} + AZURE_CREDS = { 'tenantId': os.environ.get('FLE_AZURE_TENANTID', ''), 'clientId': os.environ.get('FLE_AZURE_CLIENTID', ''), @@ -473,6 +484,16 @@ def parse_auto_encrypt_opts(self, opts): kms_providers['aws'] = AWS_CREDS if not any(AWS_CREDS.values()): self.skipTest('AWS environment credentials are not set') + if 'awsTemporary' in kms_providers: + kms_providers['aws'] = AWS_TEMP_CREDS + del kms_providers['awsTemporary'] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest('AWS Temp environment credentials are not set') + if 'awsTemporaryNoSessionToken' in kms_providers: + kms_providers['aws'] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers['awsTemporaryNoSessionToken'] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest('AWS Temp environment credentials are not set') if 'azure' in kms_providers: kms_providers['azure'] = AZURE_CREDS if not any(AZURE_CREDS.values()): @@ -700,17 +721,17 @@ def run_test(self, provider_name): def test_data_key_local(self): self.run_test('local') - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_data_key_aws(self): self.run_test('aws') - @unittest.skipUnless(all(AZURE_CREDS.values()), + @unittest.skipUnless(any(AZURE_CREDS.values()), 'Azure environment credentials are not set') def test_data_key_azure(self): self.run_test('azure') - @unittest.skipUnless(all(GCP_CREDS.values()), + @unittest.skipUnless(any(GCP_CREDS.values()), 'GCP environment credentials are not set') def test_data_key_gcp(self): self.run_test('gcp') @@ -806,7 +827,7 @@ def test_views_are_prohibited(self): class TestCorpus(EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def setUpClass(cls): super(TestCorpus, cls).setUpClass() @@ -1101,7 +1122,7 @@ def run_test_expected_success(self, provider_name, master_key): key_id=data_key_id) self.assertEqual('test', self.client_encryption.decrypt(encrypted)) - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_01_aws_region_key(self): self.run_test_expected_success( @@ -1110,7 +1131,7 @@ def test_01_aws_region_key(self): "key": ("arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0")}) - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_02_aws_region_key_endpoint(self): self.run_test_expected_success( @@ -1120,7 +1141,7 @@ def test_02_aws_region_key_endpoint(self): "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com"}) - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_03_aws_region_key_endpoint_port(self): self.run_test_expected_success( @@ -1130,7 +1151,7 @@ def test_03_aws_region_key_endpoint_port(self): "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:443"}) - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_04_aws_endpoint_invalid_port(self): master_key = { @@ -1144,7 +1165,7 @@ def test_04_aws_endpoint_invalid_port(self): 'aws', master_key=master_key) self.assertIsInstance(ctx.exception.cause, socket.error) - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_05_aws_endpoint_wrong_region(self): master_key = { @@ -1161,7 +1182,7 @@ def test_05_aws_endpoint_wrong_region(self): self.client_encryption.create_data_key( 'aws', master_key=master_key) - @unittest.skipUnless(all(AWS_CREDS.values()), + @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def test_06_aws_endpoint_invalid_host(self): master_key = { @@ -1174,7 +1195,7 @@ def test_06_aws_endpoint_invalid_host(self): self.client_encryption.create_data_key( 'aws', master_key=master_key) - @unittest.skipUnless(all(AZURE_CREDS.values()), + @unittest.skipUnless(any(AZURE_CREDS.values()), 'Azure environment credentials are not set') def test_07_azure(self): master_key = {'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', @@ -1187,7 +1208,7 @@ def test_07_azure(self): self.client_encryption_invalid.create_data_key( 'azure', master_key=master_key) - @unittest.skipUnless(all(GCP_CREDS.values()), + @unittest.skipUnless(any(GCP_CREDS.values()), 'GCP environment credentials are not set') def test_08_gcp_valid_endpoint(self): master_key = { @@ -1204,7 +1225,7 @@ def test_08_gcp_valid_endpoint(self): self.client_encryption_invalid.create_data_key( 'gcp', master_key=master_key) - @unittest.skipUnless(all(GCP_CREDS.values()), + @unittest.skipUnless(any(GCP_CREDS.values()), 'GCP environment credentials are not set') def test_09_gcp_invalid_endpoint(self): master_key = { @@ -1288,7 +1309,7 @@ def _test_automatic(self, expectation_extjson, payload): class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(all(AZURE_CREDS.values()), + @unittest.skipUnless(any(AZURE_CREDS.values()), 'Azure environment credentials are not set') def setUpClass(cls): cls.KMS_PROVIDER_MAP = {'azure': AZURE_CREDS} @@ -1314,7 +1335,7 @@ def test_automatic(self): class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(all(GCP_CREDS.values()), + @unittest.skipUnless(any(GCP_CREDS.values()), 'GCP environment credentials are not set') def setUpClass(cls): cls.KMS_PROVIDER_MAP = {'gcp': GCP_CREDS} From 87e76bda5db0c5d446d8bccb22d0458afac2f980 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 23 Feb 2021 14:39:18 -0800 Subject: [PATCH 0306/2111] PYTHON-2341 Migrate testing to Amazon1 Also fixes PYTHON-2008, testing mod_wsgi with newer Python versions. Also adds PyPy 3.7 to the test matrix. --- .evergreen/config.yml | 143 ++++++++++++++---------------------------- 1 file changed, 48 insertions(+), 95 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 02c173d990..c678535fd4 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1786,6 +1786,10 @@ axes: display_name: "PyPy 3.6" variables: PYTHON_BINARY: "/opt/python/pypy3.6/bin/pypy3" + - id: "pypy3.7" + display_name: "PyPy 3.7" + variables: + PYTHON_BINARY: "/opt/python/pypy3.7/bin/pypy3" - id: "system-python3" display_name: "Python3" variables: @@ -2099,16 +2103,15 @@ buildvariants: - ".4.4" - ".4.2" -- matrix_name: "tests-python-version-rhel62-test-ssl" +- matrix_name: "tests-python-version-amazon1-test-ssl" matrix_spec: - platform: rhel62 - # RHEL 6.2 does not support Python 3.7.x and later. - python-version: &rhel62-pythons ["3.6", "pypy3.6"] + platform: awslinux + python-version: &amazon1-pythons ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] auth: "*" ssl: "*" coverage: "*" exclude_spec: - - platform: rhel62 + - platform: awslinux python-version: "*" auth: "noauth" ssl: "ssl" @@ -2139,7 +2142,7 @@ buildvariants: - matrix_name: "tests-pyopenssl-pypy" matrix_spec: platform: debian92 - python-version: ["pypy3.6"] + python-version: ["pypy3.6", "pypy3.7"] auth: "auth" ssl: "ssl" pyopenssl: "*" @@ -2170,11 +2173,10 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-rhel62-test-encryption" +- matrix_name: "tests-python-version-amazon1-test-encryption" matrix_spec: - platform: rhel62 - # RHEL 6.2 does not support Python 3.7.x and later. - python-version: ["3.6"] + platform: awslinux + python-version: ["3.6", "3.7", "3.8", "3.9"] auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2186,56 +2188,40 @@ buildvariants: - matrix_name: "tests-pypy-debian-test-encryption" matrix_spec: platform: debian92 - python-version: ["pypy3.6"] + python-version: ["pypy3.6", "pypy3.7"] auth-ssl: noauth-nossl encryption: "*" display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions -- matrix_name: "tests-python-version-rhel62-without-c-extensions" +- matrix_name: "tests-python-version-amazon1-without-c-extensions" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: awslinux + python-version: *amazon1-pythons c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: rhel62 - python-version: ["pypy3.6"] + - platform: awslinux + python-version: ["pypy3.6", "pypy3.7"] c-extensions: "*" auth-ssl: "*" coverage: "*" display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu1604-without-c-extensions" - matrix_spec: - platform: ubuntu-16.04 - python-version: &openssl-102-plus-pythons ["3.7", "3.8", "3.9"] - c-extensions: without-c-extensions - auth-ssl: noauth-nossl - display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" - tasks: - - ".latest" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - matrix_name: "tests-python-version-ubuntu16-compression" matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-16.04 - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6"] + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-16.04 - python-version: ["pypy3.6"] + python-version: ["pypy3.6", "pypy3.7"] c-extensions: "with-c-extensions" compression: "*" # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy @@ -2259,16 +2245,16 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-rhel62" +- matrix_name: "tests-python-version-green-framework-amazon1" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: awslinux + python-version: *amazon1-pythons green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: rhel62 - python-version: ["pypy3.6"] + - platform: awslinux + python-version: ["pypy3.6", "pypy3.7"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2290,34 +2276,10 @@ buildvariants: display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" tasks: *all-server-versions -- matrix_name: "tests-python-version-requires-openssl-102-plus-test-ssl" - matrix_spec: - platform: ubuntu-16.04 - python-version: *openssl-102-plus-pythons - auth-ssl: "*" - display_name: "${python-version} OpenSSL 1.0.2 ${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - -- matrix_name: "tests-python-version-requires-openssl-102-plus-test-encryption" - matrix_spec: - platform: ubuntu-16.04 - python-version: *openssl-102-plus-pythons - auth-ssl: "noauth-nossl" - encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions - - matrix_name: "tests-python-version-supports-openssl-110-test-ssl" matrix_spec: platform: debian92 - python-version: *openssl-102-plus-pythons + python-version: *amazon1-pythons auth-ssl: "*" display_name: "${python-version} OpenSSL 1.1.0 ${platform} ${auth-ssl}" tasks: @@ -2332,16 +2294,16 @@ buildvariants: display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Storage engine tests on RHEL 6.2 (x86_64) with Python 3.6. +# Storage engine tests on Amazon1 (x86_64) with Python 3.6. - matrix_name: "tests-storage-engines" matrix_spec: - platform: rhel62 + platform: awslinux storage-engine: "*" python-version: 3.6 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: rhel62 + platform: awslinux storage-engine: ["inmemory"] python-version: "*" then: @@ -2355,7 +2317,7 @@ buildvariants: - "test-3.2-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: rhel62 + platform: awslinux storage-engine: ["mmapv1"] python-version: "*" then: @@ -2368,17 +2330,17 @@ buildvariants: - "test-3.2-standalone" - if: # No need to test this on later server versions as it becomes the default - platform: rhel62 + platform: awslinux storage-engine: ["wiredtiger"] python-version: "*" then: add_tasks: - "test-3.0-standalone" -# enableTestCommands=0 tests on RHEL 6.2 (x86_64) with Python 3.6. +# enableTestCommands=0 tests on Amazon1 (x86_64) with Python 3.6. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: rhel62 + platform: awslinux disableTestCommands: "*" python-version: "3.6" display_name: "Disable test commands ${python-version} ${platform}" @@ -2387,8 +2349,8 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons + platform: awslinux + python-version: *amazon1-pythons auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" tasks: @@ -2405,15 +2367,14 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: - platform: rhel62 - # The toolchain doesn't currently include mod-wsgi - # built for CPython 3.8, mod-wsgi doesn't yet - # claim to support 3.9. Python 3.7+ won't build on rhel6 - # and we need to do some work to migrate mod-wsgi testing - # to a different OS. For now we're stuck just testing with - # Python 3.6. - python-version: ["3.6"] + platform: awslinux + python-version: ["3.6", "3.7", "3.8", "3.9"] mod-wsgi-version: "*" + exclude_spec: + # mod-wsgi 3.5 won't build against CPython 3.8+ + - platform: awslinux + python-version: ["3.8", "3.9"] + mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: - name: "mod-wsgi-standalone" @@ -2421,7 +2382,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: rhel62 + platform: awslinux python-version: 3.6 display_name: "MockupDB Tests" tasks: @@ -2429,7 +2390,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: rhel62 + platform: awslinux python-version: ["3.6"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -2438,7 +2399,7 @@ buildvariants: - name: "no-server" display_name: "No server test" run_on: - - rhel62-small + - amazon1-2018-test tasks: - name: "no-server" expansions: @@ -2455,16 +2416,8 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: rhel62 - python-version: *rhel62-pythons - display_name: "Atlas connect ${python-version} ${platform}" - tasks: - - name: "atlas-connect" - -- matrix_name: "atlas-connect-openssl-102-plus" - matrix_spec: - platform: debian92 - python-version: *openssl-102-plus-pythons + platform: awslinux + python-version: *amazon1-pythons display_name: "Atlas connect ${python-version} ${platform}" tasks: - name: "atlas-connect" @@ -2506,7 +2459,7 @@ buildvariants: - matrix_name: "ocsp-test-pypy" matrix_spec: platform: debian92 - python-version: ["pypy3.6"] + python-version: ["pypy3.6", "pypy3.7"] mongodb-version: ["4.4", "latest"] auth: "noauth" ssl: "ssl" From 3e977127285adef21c57d2dffe66c7a1f53a4826 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 1 Mar 2021 11:38:10 -0800 Subject: [PATCH 0307/2111] PYTHON-2472 add a metadataClient for CSFLE (#539) --- pymongo/encryption.py | 72 +++--- pymongo/encryption_options.py | 3 +- pymongo/mongo_client.py | 24 +- .../spec/aggregate.json | 24 -- .../spec/awsTemporary.json | 12 - .../client-side-encryption/spec/azureKMS.json | 12 - test/client-side-encryption/spec/basic.json | 24 -- test/client-side-encryption/spec/bulk.json | 12 - test/client-side-encryption/spec/count.json | 12 - .../spec/countDocuments.json | 12 - test/client-side-encryption/spec/delete.json | 24 -- .../client-side-encryption/spec/distinct.json | 12 - test/client-side-encryption/spec/explain.json | 12 - test/client-side-encryption/spec/find.json | 24 -- .../spec/findOneAndDelete.json | 12 - .../spec/findOneAndReplace.json | 12 - .../spec/findOneAndUpdate.json | 12 - test/client-side-encryption/spec/gcpKMS.json | 12 - test/client-side-encryption/spec/getMore.json | 12 - test/client-side-encryption/spec/insert.json | 24 -- .../spec/keyAltName.json | 12 - .../client-side-encryption/spec/localKMS.json | 12 - .../spec/localSchema.json | 12 - .../spec/maxWireVersion.json | 3 + .../spec/missingKey.json | 12 - .../spec/replaceOne.json | 12 - test/client-side-encryption/spec/types.json | 96 -------- .../spec/updateMany.json | 12 - .../spec/updateOne.json | 12 - test/test_encryption.py | 218 ++++++++++++++++++ test/utils.py | 18 ++ 31 files changed, 307 insertions(+), 475 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index cb91b0afac..7f55de0dfb 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -247,23 +247,57 @@ def close(self): class _Encrypter(object): - def __init__(self, io_callbacks, opts): - """Encrypts and decrypts MongoDB commands. + """Encrypts and decrypts MongoDB commands. - This class is used to support automatic encryption and decryption of - MongoDB commands. + This class is used to support automatic encryption and decryption of + MongoDB commands.""" + def __init__(self, client, opts): + """Create a _Encrypter for a client. :Parameters: - - `io_callbacks`: A :class:`MongoCryptCallback`. + - `client`: The encrypted MongoClient. - `opts`: The encrypted client's :class:`AutoEncryptionOpts`. """ if opts._schema_map is None: schema_map = None else: schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + + def _get_internal_client(encrypter, mongo_client): + if mongo_client.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate( + minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split('.', 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client = MongoClient( + opts._mongocryptd_uri, connect=False, + serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS) + + io_callbacks = _EncryptionIO( + metadata_client, key_vault_coll, mongocryptd_client, opts) self._auto_encrypter = AutoEncrypter(io_callbacks, MongoCryptOptions( opts._kms_providers, schema_map)) - self._bypass_auto_encryption = opts._bypass_auto_encryption self._closed = False def encrypt(self, database, cmd, check_keys, codec_options): @@ -313,29 +347,9 @@ def close(self): """Cleanup resources.""" self._closed = True self._auto_encrypter.close() - - @staticmethod - def create(client, opts): - """Create a _CommandEncyptor for a client. - - :Parameters: - - `client`: The encrypted MongoClient. - - `opts`: The encrypted client's :class:`AutoEncryptionOpts`. - - :Returns: - A :class:`_CommandEncrypter` for this client. - """ - key_vault_client = opts._key_vault_client or client - db, coll = opts._key_vault_namespace.split('.', 1) - key_vault_coll = key_vault_client[db][coll] - - mongocryptd_client = MongoClient( - opts._mongocryptd_uri, connect=False, - serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS) - - io_callbacks = _EncryptionIO( - client, key_vault_coll, mongocryptd_client, opts) - return _Encrypter(io_callbacks, opts) + if self._internal_client: + self._internal_client.close() + self._internal_client = None class Algorithm(object): diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 9ad0f22ba3..fd1226c7c6 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -29,7 +29,8 @@ class AutoEncryptionOpts(object): """Options to configure automatic client-side field level encryption.""" def __init__(self, kms_providers, key_vault_namespace, - key_vault_client=None, schema_map=None, + key_vault_client=None, + schema_map=None, bypass_auto_encryption=False, mongocryptd_uri='mongodb://localhost:27020', mongocryptd_bypass_spawn=False, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 35aa0a6229..21e6d66bb5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -489,6 +489,15 @@ def __init__( configures this client to automatically encrypt collection commands and automatically decrypt results. See :ref:`automatic-client-side-encryption` for an example. + If a :class:`MongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``MongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` | **Versioned API options:** | (If not set explicitly, Versioned API will not be enabled.) @@ -607,6 +616,14 @@ def __init__( client.__my_database__ """ + self.__init_kwargs = {'host': host, + 'port': port, + 'document_class': document_class, + 'tz_aware': tz_aware, + 'connect': connect, + 'type_registry': type_registry, + **kwargs} + if host is None: host = self.HOST if isinstance(host, str): @@ -750,9 +767,14 @@ def target(): self._encrypter = None if self.__options.auto_encryption_opts: from pymongo.encryption import _Encrypter - self._encrypter = _Encrypter.create( + self._encrypter = _Encrypter( self, self.__options.auto_encryption_opts) + def _duplicate(self, **kwargs): + args = self.__init_kwargs.copy() + args.update(kwargs) + return MongoClient(**args) + def _server_property(self, attr_name): """An attribute of the current server's description. diff --git a/test/client-side-encryption/spec/aggregate.json b/test/client-side-encryption/spec/aggregate.json index a9e79f9edb..7de725b71d 100644 --- a/test/client-side-encryption/spec/aggregate.json +++ b/test/client-side-encryption/spec/aggregate.json @@ -150,18 +150,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -273,18 +261,6 @@ "command_name": "aggregate" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/awsTemporary.json b/test/client-side-encryption/spec/awsTemporary.json index 80257c6c2e..10eb85feee 100644 --- a/test/client-side-encryption/spec/awsTemporary.json +++ b/test/client-side-encryption/spec/awsTemporary.json @@ -130,18 +130,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/azureKMS.json b/test/client-side-encryption/spec/azureKMS.json index 97af4c8ecf..f0f5329d70 100644 --- a/test/client-side-encryption/spec/azureKMS.json +++ b/test/client-side-encryption/spec/azureKMS.json @@ -139,18 +139,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/basic.json b/test/client-side-encryption/spec/basic.json index 3f9895fd5d..3ed066f530 100644 --- a/test/client-side-encryption/spec/basic.json +++ b/test/client-side-encryption/spec/basic.json @@ -144,18 +144,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -283,18 +271,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/bulk.json index ead90985a1..1b62e5e8ab 100644 --- a/test/client-side-encryption/spec/bulk.json +++ b/test/client-side-encryption/spec/bulk.json @@ -178,18 +178,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/count.json b/test/client-side-encryption/spec/count.json index 24f46a110a..9df8cd639e 100644 --- a/test/client-side-encryption/spec/count.json +++ b/test/client-side-encryption/spec/count.json @@ -149,18 +149,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/countDocuments.json b/test/client-side-encryption/spec/countDocuments.json index 3cf5fbca8b..07ff97f264 100644 --- a/test/client-side-encryption/spec/countDocuments.json +++ b/test/client-side-encryption/spec/countDocuments.json @@ -150,18 +150,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/delete.json b/test/client-side-encryption/spec/delete.json index 30fb453a93..a6f4ffde91 100644 --- a/test/client-side-encryption/spec/delete.json +++ b/test/client-side-encryption/spec/delete.json @@ -151,18 +151,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -276,18 +264,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/distinct.json b/test/client-side-encryption/spec/distinct.json index 7a5f75c4a5..9786b07814 100644 --- a/test/client-side-encryption/spec/distinct.json +++ b/test/client-side-encryption/spec/distinct.json @@ -161,18 +161,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/explain.json b/test/client-side-encryption/spec/explain.json index 5ad46bc238..0e451e4818 100644 --- a/test/client-side-encryption/spec/explain.json +++ b/test/client-side-encryption/spec/explain.json @@ -155,18 +155,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/find.json b/test/client-side-encryption/spec/find.json index b7c5258a13..1feddab0e3 100644 --- a/test/client-side-encryption/spec/find.json +++ b/test/client-side-encryption/spec/find.json @@ -160,18 +160,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -302,18 +290,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/findOneAndDelete.json b/test/client-side-encryption/spec/findOneAndDelete.json index 6261d8601b..e418a4581b 100644 --- a/test/client-side-encryption/spec/findOneAndDelete.json +++ b/test/client-side-encryption/spec/findOneAndDelete.json @@ -148,18 +148,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/findOneAndReplace.json b/test/client-side-encryption/spec/findOneAndReplace.json index d91bc05998..78baca8432 100644 --- a/test/client-side-encryption/spec/findOneAndReplace.json +++ b/test/client-side-encryption/spec/findOneAndReplace.json @@ -147,18 +147,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/findOneAndUpdate.json b/test/client-side-encryption/spec/findOneAndUpdate.json index fad70609ad..1d85851151 100644 --- a/test/client-side-encryption/spec/findOneAndUpdate.json +++ b/test/client-side-encryption/spec/findOneAndUpdate.json @@ -149,18 +149,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/gcpKMS.json b/test/client-side-encryption/spec/gcpKMS.json index a715a7d152..297d5d0dc8 100644 --- a/test/client-side-encryption/spec/gcpKMS.json +++ b/test/client-side-encryption/spec/gcpKMS.json @@ -141,18 +141,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/getMore.json index cf23442226..ee99bf7537 100644 --- a/test/client-side-encryption/spec/getMore.json +++ b/test/client-side-encryption/spec/getMore.json @@ -179,18 +179,6 @@ "command_name": "find" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/insert.json b/test/client-side-encryption/spec/insert.json index 78fa8feba0..cf2910fd7a 100644 --- a/test/client-side-encryption/spec/insert.json +++ b/test/client-side-encryption/spec/insert.json @@ -131,18 +131,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -258,18 +246,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/keyAltName.json b/test/client-side-encryption/spec/keyAltName.json index d062bed453..7f71b9dbeb 100644 --- a/test/client-side-encryption/spec/keyAltName.json +++ b/test/client-side-encryption/spec/keyAltName.json @@ -131,18 +131,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/localKMS.json b/test/client-side-encryption/spec/localKMS.json index e4d25309c4..67c4ba1308 100644 --- a/test/client-side-encryption/spec/localKMS.json +++ b/test/client-side-encryption/spec/localKMS.json @@ -114,18 +114,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/localSchema.json b/test/client-side-encryption/spec/localSchema.json index 7071d6fefd..4698520f6f 100644 --- a/test/client-side-encryption/spec/localSchema.json +++ b/test/client-side-encryption/spec/localSchema.json @@ -136,18 +136,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/maxWireVersion.json b/test/client-side-encryption/spec/maxWireVersion.json index 144786290d..c1088a0ecf 100644 --- a/test/client-side-encryption/spec/maxWireVersion.json +++ b/test/client-side-encryption/spec/maxWireVersion.json @@ -50,6 +50,9 @@ "autoEncryptOpts": { "kmsProviders": { "aws": {} + }, + "extraOptions": { + "mongocryptdBypassSpawn": true } } }, diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/missingKey.json index ac8e8320b0..275147bb72 100644 --- a/test/client-side-encryption/spec/missingKey.json +++ b/test/client-side-encryption/spec/missingKey.json @@ -140,18 +140,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "different" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/replaceOne.json b/test/client-side-encryption/spec/replaceOne.json index 5cdb3d40f0..9757686819 100644 --- a/test/client-side-encryption/spec/replaceOne.json +++ b/test/client-side-encryption/spec/replaceOne.json @@ -148,18 +148,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/types.json index 47e4c27a2e..a070f8bff7 100644 --- a/test/client-side-encryption/spec/types.json +++ b/test/client-side-encryption/spec/types.json @@ -103,18 +103,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -254,18 +242,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -405,18 +381,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -656,18 +620,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -807,18 +759,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -1057,18 +997,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -1214,18 +1142,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { @@ -1369,18 +1285,6 @@ } ], "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/updateMany.json b/test/client-side-encryption/spec/updateMany.json index fd1f4d12bd..823909044b 100644 --- a/test/client-side-encryption/spec/updateMany.json +++ b/test/client-side-encryption/spec/updateMany.json @@ -164,18 +164,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/client-side-encryption/spec/updateOne.json b/test/client-side-encryption/spec/updateOne.json index bed763d720..23bada964f 100644 --- a/test/client-side-encryption/spec/updateOne.json +++ b/test/client-side-encryption/spec/updateOne.json @@ -150,18 +150,6 @@ "command_name": "listCollections" } }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "datakeys" - }, - "$db": "keyvault" - }, - "command_name": "listCollections" - } - }, { "command_started_event": { "command": { diff --git a/test/test_encryption.py b/test/test_encryption.py index cf516c26e1..af637d8566 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -53,6 +53,7 @@ from test.utils import (TestCreator, camel_to_snake_args, OvertCommandListener, + TopologyEventListener, WhiteListEventListener, rs_or_single_client, wait_until) @@ -1358,5 +1359,222 @@ def test_automatic(self): expected_document_extjson, {"secret_gcp": "string0"}) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests +class TestDeadlockProse(EncryptionIntegrationTest): + def setUp(self): + self.client_test = rs_or_single_client( + maxPoolSize=1, readConcernLevel='majority', w='majority', + uuidRepresentation='standard') + self.addCleanup(self.client_test.close) + + self.client_keyvault_listener = OvertCommandListener() + self.client_keyvault = rs_or_single_client( + maxPoolSize=1, readConcernLevel='majority', w='majority', + event_listeners=[self.client_keyvault_listener]) + self.addCleanup(self.client_keyvault.close) + + self.client_test.keyvault.datakeys.drop() + self.client_test.db.coll.drop() + self.client_test.keyvault.datakeys.insert_one( + json_data('external', 'external-key.json')) + _ = self.client_test.db.create_collection( + 'coll', validator={'$jsonSchema': json_data( + 'external', 'external-schema.json')}, + codec_options=OPTS) + + client_encryption = ClientEncryption( + kms_providers={'local': {'key': LOCAL_MASTER_KEY}}, + key_vault_namespace='keyvault.datakeys', + key_vault_client=self.client_test, codec_options=OPTS) + self.ciphertext = client_encryption.encrypt( + 'string0', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_alt_name='local') + client_encryption.close() + + self.client_listener = OvertCommandListener() + self.topology_listener = TopologyEventListener() + self.optargs = ({'local': {'key': LOCAL_MASTER_KEY}}, 'keyvault.datakeys') + + def _run_test(self, max_pool_size, auto_encryption_opts): + client_encrypted = rs_or_single_client( + readConcernLevel='majority', + w='majority', + maxPoolSize=max_pool_size, + auto_encryption_opts=auto_encryption_opts, + event_listeners=[self.client_listener, self.topology_listener]) + + if auto_encryption_opts._bypass_auto_encryption == True: + self.client_test.db.coll.insert_one( + {"_id": 0, "encrypted": self.ciphertext}) + elif auto_encryption_opts._bypass_auto_encryption == False: + client_encrypted.db.coll.insert_one( + {"_id": 0, "encrypted": "string0"}) + else: + raise RuntimeError("bypass_auto_encryption must be a bool") + + result = client_encrypted.db.coll.find_one({"_id": 0}) + self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) + + self.addCleanup(client_encrypted.close) + + def test_case_1(self): + self._run_test(max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=False, + key_vault_client=None)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 4) + self.assertEqual(cev[0].command_name, 'listCollections') + self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[1].command_name, 'find') + self.assertEqual(cev[1].database_name, 'keyvault') + self.assertEqual(cev[2].command_name, 'insert') + self.assertEqual(cev[2].database_name, 'db') + self.assertEqual(cev[3].command_name, 'find') + self.assertEqual(cev[3].database_name, 'db') + + self.assertEqual(len(self.topology_listener.results['opened']), 2) + + def test_case_2(self): + self._run_test(max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=False, + key_vault_client=self.client_keyvault)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, 'listCollections') + self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[1].command_name, 'insert') + self.assertEqual(cev[1].database_name, 'db') + self.assertEqual(cev[2].command_name, 'find') + self.assertEqual(cev[2].database_name, 'db') + + cev = self.client_keyvault_listener.results['started'] + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'keyvault') + + self.assertEqual(len(self.topology_listener.results['opened']), 2) + + def test_case_3(self): + self._run_test(max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=True, + key_vault_client=None)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[1].command_name, 'find') + self.assertEqual(cev[1].database_name, 'keyvault') + + self.assertEqual(len(self.topology_listener.results['opened']), 2) + + def test_case_4(self): + self._run_test(max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=True, + key_vault_client=self.client_keyvault)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'db') + + cev = self.client_keyvault_listener.results['started'] + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'keyvault') + + self.assertEqual(len(self.topology_listener.results['opened']), 1) + + def test_case_5(self): + self._run_test(max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=False, + key_vault_client=None)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 5) + self.assertEqual(cev[0].command_name, 'listCollections') + self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[1].command_name, 'listCollections') + self.assertEqual(cev[1].database_name, 'keyvault') + self.assertEqual(cev[2].command_name, 'find') + self.assertEqual(cev[2].database_name, 'keyvault') + self.assertEqual(cev[3].command_name, 'insert') + self.assertEqual(cev[3].database_name, 'db') + self.assertEqual(cev[4].command_name, 'find') + self.assertEqual(cev[4].database_name, 'db') + + self.assertEqual(len(self.topology_listener.results['opened']), 1) + + def test_case_6(self): + self._run_test(max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=False, + key_vault_client=self.client_keyvault)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, 'listCollections') + self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[1].command_name, 'insert') + self.assertEqual(cev[1].database_name, 'db') + self.assertEqual(cev[2].command_name, 'find') + self.assertEqual(cev[2].database_name, 'db') + + cev = self.client_keyvault_listener.results['started'] + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'keyvault') + + self.assertEqual(len(self.topology_listener.results['opened']), 1) + + def test_case_7(self): + self._run_test(max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=True, + key_vault_client=None)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[1].command_name, 'find') + self.assertEqual(cev[1].database_name, 'keyvault') + + self.assertEqual(len(self.topology_listener.results['opened']), 1) + + def test_case_8(self): + self._run_test(max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, + bypass_auto_encryption=True, + key_vault_client=self.client_keyvault)) + + cev = self.client_listener.results['started'] + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'db') + + cev = self.client_keyvault_listener.results['started'] + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, 'find') + self.assertEqual(cev[0].database_name, 'keyvault') + + self.assertEqual(len(self.topology_listener.results['opened']), 1) + + if __name__ == "__main__": unittest.main() diff --git a/test/utils.py b/test/utils.py index 3b4193c292..62815c88aa 100644 --- a/test/utils.py +++ b/test/utils.py @@ -143,6 +143,24 @@ def reset(self): self.results.clear() +class TopologyEventListener(monitoring.TopologyListener): + def __init__(self): + self.results = defaultdict(list) + + def closed(self, event): + self.results['closed'].append(event) + + def description_changed(self, event): + self.results['description_changed'].append(event) + + def opened(self, event): + self.results['opened'].append(event) + + def reset(self): + """Reset the state of this listener.""" + self.results.clear() + + class WhiteListEventListener(EventListener): def __init__(self, *commands): From 20d5a9cf814d3a22310d162b67762569e231d772 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 1 Mar 2021 14:09:27 -0800 Subject: [PATCH 0308/2111] PYTHON-2543 Do not mark a server unknown from a "writeErrors" response (#570) --- pymongo/topology.py | 6 +- .../errors/write_errors_ignored.json | 96 +++++++++++++++++++ test/test_discovery_and_monitoring.py | 4 +- 3 files changed, 104 insertions(+), 2 deletions(-) create mode 100644 test/discovery_and_monitoring/errors/write_errors_ignored.json diff --git a/pymongo/topology.py b/pymongo/topology.py index 194884a752..4e07db86b5 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -32,7 +32,8 @@ NotMasterError, OperationFailure, PyMongoError, - ServerSelectionTimeoutError) + ServerSelectionTimeoutError, + WriteError) from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -578,6 +579,9 @@ def _handle_error(self, address, err_ctx): # operation fails because of any network error besides a socket # timeout...." return + elif issubclass(exc_type, WriteError): + # Ignore writeErrors. + return elif issubclass(exc_type, NotMasterError): # As per the SDAM spec if: # - the server sees a "not master" error, and diff --git a/test/discovery_and_monitoring/errors/write_errors_ignored.json b/test/discovery_and_monitoring/errors/write_errors_ignored.json new file mode 100644 index 0000000000..6b80673c12 --- /dev/null +++ b/test/discovery_and_monitoring/errors/write_errors_ignored.json @@ -0,0 +1,96 @@ +{ + "description": "writeErrors field is ignored", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore command error with writeErrors field", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 1, + "writeErrors": [ + { + "errmsg": "NotMasterNoSlaveOk", + "code": 13435 + } + ] + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index ddb87271a9..4ffffffaea 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -29,7 +29,8 @@ NetworkTimeout, NotMasterError, OperationFailure) -from pymongo.helpers import _check_command_response +from pymongo.helpers import (_check_command_response, + _check_write_command_response) from pymongo.ismaster import IsMaster from pymongo.server_description import ServerDescription, SERVER_TYPE from pymongo.settings import TopologySettings @@ -94,6 +95,7 @@ def got_app_error(topology, app_error): try: if error_type == 'command': _check_command_response(app_error['response'], max_wire_version) + _check_write_command_response(app_error['response']) elif error_type == 'network': raise AutoReconnect('mock non-timeout network error') elif error_type == 'timeout': From 4088c1cee0f277eafcf591f93809367ed47d4bee Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 2 Mar 2021 11:06:56 -0800 Subject: [PATCH 0309/2111] Python 2548/add update description.truncated arrays field (#572) --- .../{ => legacy}/change-streams-errors.json | 0 .../change-streams-resume-errorLabels.json | 0 .../change-streams-resume-whitelist.json | 0 .../{ => legacy}/change-streams.json | 0 .../unified/change-streams.json | 116 ++++++++++++++++++ test/test_change_stream.py | 15 ++- 6 files changed, 126 insertions(+), 5 deletions(-) rename test/change_streams/{ => legacy}/change-streams-errors.json (100%) rename test/change_streams/{ => legacy}/change-streams-resume-errorLabels.json (100%) rename test/change_streams/{ => legacy}/change-streams-resume-whitelist.json (100%) rename test/change_streams/{ => legacy}/change-streams.json (100%) create mode 100644 test/change_streams/unified/change-streams.json diff --git a/test/change_streams/change-streams-errors.json b/test/change_streams/legacy/change-streams-errors.json similarity index 100% rename from test/change_streams/change-streams-errors.json rename to test/change_streams/legacy/change-streams-errors.json diff --git a/test/change_streams/change-streams-resume-errorLabels.json b/test/change_streams/legacy/change-streams-resume-errorLabels.json similarity index 100% rename from test/change_streams/change-streams-resume-errorLabels.json rename to test/change_streams/legacy/change-streams-resume-errorLabels.json diff --git a/test/change_streams/change-streams-resume-whitelist.json b/test/change_streams/legacy/change-streams-resume-whitelist.json similarity index 100% rename from test/change_streams/change-streams-resume-whitelist.json rename to test/change_streams/legacy/change-streams-resume-whitelist.json diff --git a/test/change_streams/change-streams.json b/test/change_streams/legacy/change-streams.json similarity index 100% rename from test/change_streams/change-streams.json rename to test/change_streams/legacy/change-streams.json diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json new file mode 100644 index 0000000000..adaf00de2d --- /dev/null +++ b/test/change_streams/unified/change-streams.json @@ -0,0 +1,116 @@ +{ + "description": "change-streams", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "Test array truncation", + "runOnRequirements": [ + { + "minServerVersion": "4.7", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1, + "array": [ + "foo", + { + "a": "bar" + }, + 1, + 2, + 3 + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "array": [ + "foo", + { + "a": "bar" + } + ] + } + } + ] + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": {}, + "removedFields": [], + "truncatedArrays": [ + { + "field": "array", + "newSize": 2 + } + ] + } + } + } + ] + } + ] +} diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 293c3bfc1a..5f615e613b 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -43,6 +43,7 @@ from pymongo.write_concern import WriteConcern from test import client_context, unittest, IntegrationTest +from test.unified_format import generate_test_classes from test.utils import ( EventListener, WhiteListEventListener, rs_or_single_client, wait_until) @@ -1037,7 +1038,7 @@ def test_read_concern(self): pass -class TestAllScenarios(unittest.TestCase): +class TestAllLegacyScenarios(unittest.TestCase): @classmethod @client_context.require_connection @@ -1120,8 +1121,7 @@ def tearDown(self): _TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'change_streams' -) + os.path.dirname(os.path.realpath(__file__)), 'change_streams') def camel_to_snake(camel): @@ -1215,7 +1215,7 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')): dirname = os.path.split(dirpath)[-1] for filename in filenames: @@ -1251,11 +1251,16 @@ def create_tests(): str(test['description'].replace(" ", "_"))) new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) + setattr(TestAllLegacyScenarios, new_test.__name__, new_test) create_tests() +globals().update(generate_test_classes( + os.path.join(_TEST_PATH, 'unified'), + module=__name__,)) + + if __name__ == '__main__': unittest.main() From 4364b7c43a8ed114eb0948f44a626f3d352176fa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Mar 2021 16:33:42 -0800 Subject: [PATCH 0310/2111] PYTHON-2596 Include host in error message when connection is closed (#575) Use raise from syntax when re-raising exceptions in the pool. --- pymongo/network.py | 5 ++--- pymongo/pool.py | 11 ++--------- test/test_client.py | 12 ++++++++++++ 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/pymongo/network.py b/pymongo/network.py index 682a51df73..39f6504487 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -26,8 +26,7 @@ from pymongo import helpers, message from pymongo.common import MAX_MESSAGE_SIZE from pymongo.compression_support import decompress, _NO_COMPRESSION -from pymongo.errors import (AutoReconnect, - NotMasterError, +from pymongo.errors import (NotMasterError, OperationFailure, ProtocolError, _OperationCancelled) @@ -261,7 +260,7 @@ def _receive_data_on_socket(sock_info, length, deadline): continue raise if chunk_length == 0: - raise AutoReconnect("connection closed") + raise OSError("connection closed") bytes_read += chunk_length diff --git a/pymongo/pool.py b/pymongo/pool.py index 5b5f60a335..cc4c4ffce3 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -236,16 +236,9 @@ def _raise_connection_failure(address, error, msg_prefix=None): if msg_prefix: msg = msg_prefix + msg if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) - elif isinstance(error, _SSLError) and 'timed out' in str(error): - # CPython 2.7 and PyPy 2.x do not distinguish network - # timeouts from other SSLErrors (https://bugs.python.org/issue10272). - # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised - # on the above platforms. - raise NetworkTimeout(msg) + raise NetworkTimeout(msg) from error else: - raise AutoReconnect(msg) + raise AutoReconnect(msg) from error def _cond_wait(condition, deadline): diff --git a/test/test_client.py b/test/test_client.py index 9f8135dc71..3e656ab46e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1568,6 +1568,18 @@ def server_description_count(): # AssertionError: 4 != 22 within 5 delta (18 difference) self.assertAlmostEqual(initial_count, final_count, delta=10) + @client_context.require_failCommand_fail_point + def test_network_error_message(self): + client = single_client(retryReads=False) + self.addCleanup(client.close) + client.admin.command('ping') # connect + with self.fail_point({'mode': {'times': 1}, + 'data': {'closeConnection': True, + 'failCommands': ['find']}}): + expected = '%s:%s: connection closed' % client.address + with self.assertRaisesRegex(AutoReconnect, expected): + client.pymongo_test.test.find_one({}) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From 77ad2737fa48145a724117a0d6ad5cc82da18592 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 4 Mar 2021 09:23:37 -0800 Subject: [PATCH 0311/2111] PYTHON-2591 Use server toolchain Python on Ubuntu-16 with x86-64, arm64, power8 (#573) PYTHON-2590 Use server toolchain RHEL 7.2 ZSeries (#573) --- .evergreen/run-tests.sh | 12 +++++++----- .evergreen/utils.sh | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index e0e73efc3f..7a78401264 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -58,11 +58,13 @@ fi if [ -z "$PYTHON_BINARY" ]; then # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a - # system python3 doesn't exist. This seems to only be an issue on RHEL 7.x. - PYTHON=$(command -v python3 || command -v /opt/mongodbtoolchain/v2/bin/python3) || true - if [ -z "$PYTHON" ]; then - echo "Cannot test without python3 installed!" - exit 1 + # system python3 doesn't exist or exists but is older than 3.6. + if is_python_36 $(command -v python3); then + PYTHON=$(command -v python3) + elif is_python_36 $(command -v /opt/mongodbtoolchain/v2/bin/python3); then + PYTHON=$(command -v /opt/mongodbtoolchain/v2/bin/python3) + else + echo "Cannot test without python3.6+ installed!" fi elif [ "$COMPRESSORS" = "snappy" ]; then $PYTHON_BINARY -m virtualenv --never-download snappytest diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index d6b9ebecc7..be5adadd12 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -52,3 +52,18 @@ testinstall () { rm -rf venvtestinstall fi } + +# Function that returns success if the provided Python binary is version 3.6 or later +# Usage: +# is_python_36 /path/to/python +# * param1: Python binary +is_python_36() { + if [ -z "$1" ]; then + return 1 + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 6))"; then + # runs when sys.version_info[:2] >= (3, 6) + return 0 + else + return 1 + fi +} From de7c7b8be2bce0bb807fa5bf74ead2c71874adec Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 Mar 2021 10:03:52 -0800 Subject: [PATCH 0312/2111] PYTHON-2544 Do not check error messages when an error code is present (#574) Add 10058 as a "not master" error code to account for MongoDB<=3.2 errors. --- pymongo/helpers.py | 6 +- ...ologyVersion-greater-LegacyNotPrimary.json | 99 ++++++++++ ...ologyVersion-missing-LegacyNotPrimary.json | 84 +++++++++ ...n-proccessId-changed-LegacyNotPrimary.json | 99 ++++++++++ .../errors/post-42-LegacyNotPrimary.json | 69 +++++++ .../errors/pre-42-LegacyNotPrimary.json | 69 +++++++ .../errors/prefer-error-code.json | 130 +++++++++++++ ...erHandshakeCompletes-LegacyNotPrimary.json | 174 ++++++++++++++++++ ...ation-afterHandshakeCompletes-network.json | 4 +- ...ation-afterHandshakeCompletes-timeout.json | 4 +- ...reHandshakeCompletes-LegacyNotPrimary.json | 174 ++++++++++++++++++ ...tion-beforeHandshakeCompletes-network.json | 4 +- ...tion-beforeHandshakeCompletes-timeout.json | 4 +- ...tale-topologyVersion-LegacyNotPrimary.json | 146 +++++++++++++++ 14 files changed, 1056 insertions(+), 10 deletions(-) create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/prefer-error-code.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json create mode 100644 test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 107e061acf..2fb3ce3372 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -39,6 +39,7 @@ # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). _NOT_MASTER_CODES = frozenset([ + 10058, # LegacyNotPrimary <=3.2 "not master" error code 10107, # NotMaster 13435, # NotMasterNoSlaveOk 11602, # InterruptedDueToReplStateChange @@ -148,8 +149,9 @@ def _check_command_response(response, max_wire_version, return # Server is "not master" or "recovering" - if code in _NOT_MASTER_CODES: - raise NotMasterError(errmsg, response) + if code is not None: + if code in _NOT_MASTER_CODES: + raise NotMasterError(errmsg, response) elif "not master" in errmsg or "node is recovering" in errmsg: raise NotMasterError(errmsg, response) diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json new file mode 100644 index 0000000000..e1f33b81bb --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json new file mode 100644 index 0000000000..ccaacd1cfe --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json new file mode 100644 index 0000000000..da36e9b33c --- /dev/null +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..731da196b5 --- /dev/null +++ b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..db5acd718d --- /dev/null +++ b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json @@ -0,0 +1,69 @@ +{ + "description": "Pre-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/prefer-error-code.json b/test/discovery_and_monitoring/errors/prefer-error-code.json new file mode 100644 index 0000000000..486103a457 --- /dev/null +++ b/test/discovery_and_monitoring/errors/prefer-error-code.json @@ -0,0 +1,130 @@ +{ + "description": "Do not check errmsg when code exists", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"not master\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "not master", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"node is recovering\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "node is recovering", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..2169c4a6fb --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation LegacyNotPrimary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json index 4e11c48eb2..94b8322d2d 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "description": "Stale generation network error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale network error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json index 27bac32443..490703de90 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "description": "Stale generation timeout error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale timeout error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..674cb994cd --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation LegacyNotPrimary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json index 9734776f22..3e581773eb 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "description": "Stale generation network error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale network error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json index af8730e5ca..24c8c6e507 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "description": "Stale generation timeout error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale timeout error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json new file mode 100644 index 0000000000..beee51e666 --- /dev/null +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} From ec6337e3a7a012131d215dd343cbd91a3d6d9f97 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 9 Mar 2021 12:33:12 -0800 Subject: [PATCH 0313/2111] PYTHON-2583 Bump minimum required PyMongoCrypt version to 1.1.0 (#576) --- doc/changelog.rst | 2 ++ setup.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index a9396072a3..62f46f275c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -67,6 +67,8 @@ Breaking Changes in 4.0 :meth:`~pymongo.cursor.Cursor`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. +- PyMongoCrypt 1.1.0 or later is now required for client side field level + encryption support. Notable improvements .................... diff --git a/setup.py b/setup.py index 89abad91cc..f09e2c6f04 100755 --- a/setup.py +++ b/setup.py @@ -282,7 +282,7 @@ def build_extension(self, ext): pyopenssl_reqs.append('certifi') extras_require = { - 'encryption': ['pymongocrypt<2.0.0'], + 'encryption': ['pymongocrypt>=1.1.0,<2.0.0'], 'ocsp': pyopenssl_reqs, 'snappy': ['python-snappy'], 'zstd': ['zstandard'], From 92a74330357dcc5e62aca7c42851a47e7e8a4ab6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Mar 2021 12:51:30 -0700 Subject: [PATCH 0314/2111] PYTHON-2580 Provide explicit guidance on handling command errors during the handshake (#571) Command errors during the handshake MUST use SDAM error handling rules. Mark server unknown after auth failures. Test network timeout errors pre/post auth. PoolClearedError MUST NOT mark the server Unknown. Add "authEnabled" runOn requirement for SDAM integration tests. --- pymongo/topology.py | 30 +- .../auth-error.json | 140 ++++++++ .../auth-misc-command-error.json | 140 ++++++++ .../auth-network-error.json | 140 ++++++++ .../auth-network-timeout-error.json | 143 ++++++++ .../auth-shutdown-error.json | 140 ++++++++ .../find-network-timeout-error.json | 119 +++++++ .../pool-cleared-error.json | 307 ++++++++++++++++++ test/utils.py | 11 +- 9 files changed, 1155 insertions(+), 15 deletions(-) create mode 100644 test/discovery_and_monitoring_integration/auth-error.json create mode 100644 test/discovery_and_monitoring_integration/auth-misc-command-error.json create mode 100644 test/discovery_and_monitoring_integration/auth-network-error.json create mode 100644 test/discovery_and_monitoring_integration/auth-network-timeout-error.json create mode 100644 test/discovery_and_monitoring_integration/auth-shutdown-error.json create mode 100644 test/discovery_and_monitoring_integration/find-network-timeout-error.json create mode 100644 test/discovery_and_monitoring_integration/pool-cleared-error.json diff --git a/pymongo/topology.py b/pymongo/topology.py index 4e07db86b5..c4eb947b64 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -582,7 +582,7 @@ def _handle_error(self, address, err_ctx): elif issubclass(exc_type, WriteError): # Ignore writeErrors. return - elif issubclass(exc_type, NotMasterError): + elif issubclass(exc_type, (NotMasterError, OperationFailure)): # As per the SDAM spec if: # - the server sees a "not master" error, and # - the server is not shutting down, and @@ -591,14 +591,23 @@ def _handle_error(self, address, err_ctx): # as Unknown and request an immediate check of the server. # Otherwise, we clear the connection pool, mark the server as # Unknown and request an immediate check of the server. - err_code = error.details.get('code', -1) - is_shutting_down = err_code in helpers._SHUTDOWN_CODES - # Mark server Unknown, clear the pool, and request check. - self._process_change(ServerDescription(address, error=error)) - if is_shutting_down or (err_ctx.max_wire_version <= 7): + if hasattr(error, 'code'): + err_code = error.code + else: + err_code = error.details.get('code', -1) + if err_code in helpers._NOT_MASTER_CODES: + is_shutting_down = err_code in helpers._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + server.reset() + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + self._process_change(ServerDescription(address, error=error)) # Clear the pool. server.reset() - server.request_check() elif issubclass(exc_type, ConnectionFailure): # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." @@ -609,13 +618,6 @@ def _handle_error(self, address, err_ctx): # reading or writing`_, clients MUST cancel the isMaster check on # that server and close the current monitoring connection." server._monitor.cancel_check() - elif issubclass(exc_type, OperationFailure): - # Do not request an immediate check since the server is likely - # shutting down. - if error.code in helpers._NOT_MASTER_CODES: - self._process_change(ServerDescription(address, error=error)) - # Clear the pool. - server.reset() def handle_error(self, address, err_ctx): """Handle an application error. diff --git a/test/discovery_and_monitoring_integration/auth-error.json b/test/discovery_and_monitoring_integration/auth-error.json new file mode 100644 index 0000000000..064d660e32 --- /dev/null +++ b/test/discovery_and_monitoring_integration/auth-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-misc-command-error.json b/test/discovery_and_monitoring_integration/auth-misc-command-error.json new file mode 100644 index 0000000000..70dd59251d --- /dev/null +++ b/test/discovery_and_monitoring_integration/auth-misc-command-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-misc-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-network-error.json b/test/discovery_and_monitoring_integration/auth-network-error.json new file mode 100644 index 0000000000..a75a398c5e --- /dev/null +++ b/test/discovery_and_monitoring_integration/auth-network-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-network-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json b/test/discovery_and_monitoring_integration/auth-network-timeout-error.json new file mode 100644 index 0000000000..a4ee7d9eff --- /dev/null +++ b/test/discovery_and_monitoring_integration/auth-network-timeout-error.json @@ -0,0 +1,143 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-network-timeout-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-shutdown-error.json b/test/discovery_and_monitoring_integration/auth-shutdown-error.json new file mode 100644 index 0000000000..2dab90e1c5 --- /dev/null +++ b/test/discovery_and_monitoring_integration/auth-shutdown-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-shutdown-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/find-network-timeout-error.json b/test/discovery_and_monitoring_integration/find-network-timeout-error.json new file mode 100644 index 0000000000..c4e10b3a76 --- /dev/null +++ b/test/discovery_and_monitoring_integration/find-network-timeout-error.json @@ -0,0 +1,119 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "find-network-timeout-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + }, + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "error": true + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "find-network-timeout-error" + }, + "command_name": "find", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + } + } + ] +} diff --git a/test/discovery_and_monitoring_integration/pool-cleared-error.json b/test/discovery_and_monitoring_integration/pool-cleared-error.json new file mode 100644 index 0000000000..061503c259 --- /dev/null +++ b/test/discovery_and_monitoring_integration/pool-cleared-error.json @@ -0,0 +1,307 @@ +{ + "runOn": [ + { + "minServerVersion": "4.9", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "sdam-tests", + "collection_name": "pool-cleared-error", + "data": [], + "tests": [ + { + "description": "PoolClearedError does not mark server unknown", + "clientOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + }, + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100, + "closeConnection": true, + "appName": "poolClearedErrorTest" + } + } + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread3" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread4" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread5" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread6" + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 5 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 7 + } + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread3" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread4" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread5" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread6" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 8 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + } + ] + } + } + } + ] +} diff --git a/test/utils.py b/test/utils.py index 62815c88aa..4a46aae21b 100644 --- a/test/utils.py +++ b/test/utils.py @@ -412,6 +412,14 @@ def max_server_version(run_on_req): return client_context.version <= max_ver return True + @staticmethod + def valid_auth_enabled(run_on_req): + if 'authEnabled' in run_on_req: + if run_on_req['authEnabled']: + return client_context.auth_enabled + return not client_context.auth_enabled + return True + def should_run_on(self, scenario_def): run_on = scenario_def.get('runOn', []) if not run_on: @@ -421,7 +429,8 @@ def should_run_on(self, scenario_def): for req in run_on: if (self.valid_topology(req) and self.min_server_version(req) and - self.max_server_version(req)): + self.max_server_version(req) and + self.valid_auth_enabled(req)): return True return False From 8ef452407649786574cb70dcf0ccd97c16813651 Mon Sep 17 00:00:00 2001 From: William Zhou Date: Wed, 17 Mar 2021 16:15:22 -0700 Subject: [PATCH 0315/2111] PYTHON-1359: Add Example for RawBSONDocument (#578) Add doctest/example for inserting/retrieving RawBSONDocument --- bson/raw_bson.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 31b0d1b66c..4ee0394ad4 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -13,6 +13,46 @@ # limitations under the License. """Tools for representing raw BSON documents. + +Inserting and Retrieving RawBSONDocuments +========================================= + +Example: Moving a document between different databases/collections + +.. testsetup:: + from pymongo import MongoClient + client = MongoClient(document_class=RawBSONDocument) + client.drop_database('db') + client.drop_database('replica_db') + +.. doctest:: + + >>> import bson + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> client = MongoClient(document_class=RawBSONDocument) + >>> db = client.db + >>> result = db.test.insert_many([{'a': 1}, + ... {'b': 1}, + ... {'c': 1}, + ... {'d': 1}]) + >>> replica_db = client.replica_db + >>> for doc in db.test.find(): + ... print(f"raw document: {doc.raw}") + ... print(f"decoded document: {bson.decode(doc.raw)}") + ... result = replica_db.test.insert_one(doc) + raw document: b'...' + decoded document: {'_id': ObjectId('...'), 'a': 1} + raw document: b'...' + decoded document: {'_id': ObjectId('...'), 'b': 1} + raw document: b'...' + decoded document: {'_id': ObjectId('...'), 'c': 1} + raw document: b'...' + decoded document: {'_id': ObjectId('...'), 'd': 1} + +For use cases like moving documents across different databases or writing binary +blobs to disk, using raw BSON documents provides better speed and avoids the +overhead of decoding or encoding BSON. """ from collections.abc import Mapping as _Mapping From 80adc13195593f59b58b08a22d53f6f99dfaf0ec Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 19 Mar 2021 21:47:53 -0700 Subject: [PATCH 0316/2111] PYTHON-2615 Reinstate TLS network timeout workaround due to eventlet (#581) PYTHON-2616 Fix test_network_error_message when TLS is enabled. --- pymongo/pool.py | 6 ++++++ test/test_client.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index cc4c4ffce3..6dd3f2be48 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -237,6 +237,12 @@ def _raise_connection_failure(address, error, msg_prefix=None): msg = msg_prefix + msg if isinstance(error, socket.timeout): raise NetworkTimeout(msg) from error + elif isinstance(error, _SSLError) and 'timed out' in str(error): + # Eventlet does not distinguish TLS network timeouts from other + # SSLErrors (https://github.com/eventlet/eventlet/issues/692). + # Luckily, we can work around this limitation because the phrase + # 'timed out' appears in all the timeout related SSLErrors raised. + raise NetworkTimeout(msg) from error else: raise AutoReconnect(msg) from error diff --git a/test/test_client.py b/test/test_client.py index 3e656ab46e..31850f701e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1576,7 +1576,7 @@ def test_network_error_message(self): with self.fail_point({'mode': {'times': 1}, 'data': {'closeConnection': True, 'failCommands': ['find']}}): - expected = '%s:%s: connection closed' % client.address + expected = '%s:%s: ' % client.address with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) From 94f4de1f2e58209e1e8c1db46975b76c3cee6abb Mon Sep 17 00:00:00 2001 From: William Zhou Date: Mon, 22 Mar 2021 11:13:42 -0700 Subject: [PATCH 0317/2111] PYTHON-1690: Fix error message when insert_many is given a single RawBSONDocument instead of a list (#580) --- pymongo/collection.py | 4 +++- test/test_collection.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index bf6d8beec3..7be1191f5d 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -686,7 +686,9 @@ def insert_many(self, documents, ordered=True, .. versionadded:: 3.0 """ - if not isinstance(documents, abc.Iterable) or not documents: + if (not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents): raise TypeError("documents must be a non-empty list") inserted_ids = [] def gen(): diff --git a/test/test_collection.py b/test/test_collection.py index fe4bf28ad6..673816025a 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -786,6 +786,25 @@ def test_insert_many(self): self.assertFalse(result.acknowledged) self.assertEqual(20, db.test.count_documents({})) + def test_insert_many_invalid(self): + db = self.db + + with self.assertRaisesRegex( + TypeError, "documents must be a non-empty list"): + db.test.insert_many({}) + + with self.assertRaisesRegex( + TypeError, "documents must be a non-empty list"): + db.test.insert_many([]) + + with self.assertRaisesRegex( + TypeError, "documents must be a non-empty list"): + db.test.insert_many(1) + + with self.assertRaisesRegex( + TypeError, "documents must be a non-empty list"): + db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) + def test_delete_one(self): self.db.test.drop() From 0752280adaa69b3cd80fabdf96aac31a26af826f Mon Sep 17 00:00:00 2001 From: William Zhou Date: Thu, 25 Mar 2021 10:14:16 -0700 Subject: [PATCH 0318/2111] PYTHON-2480: Add MongoClient helper to access the current TopologyDescription (#583) --- doc/api/pymongo/index.rst | 2 ++ doc/api/pymongo/mongo_client.rst | 1 + doc/api/pymongo/server_description.rst | 6 +----- doc/api/pymongo/topology_description.rst | 6 +----- doc/changelog.rst | 2 ++ pymongo/mongo_client.py | 22 ++++++++++++++++++++++ test/test_client.py | 3 +++ 7 files changed, 32 insertions(+), 10 deletions(-) diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 1a54a5b42d..5770145936 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -48,6 +48,8 @@ Sub-modules: read_preferences results server_api + server_description + topology_description uri_parser write_concern event_loggers diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index 73a79bb9b2..e48af01ad2 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -15,6 +15,7 @@ Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. .. autoattribute:: event_listeners + .. autoattribute:: topology_description .. autoattribute:: address .. autoattribute:: primary .. autoattribute:: secondaries diff --git a/doc/api/pymongo/server_description.rst b/doc/api/pymongo/server_description.rst index 2d354fca6f..fc6b55ec74 100644 --- a/doc/api/pymongo/server_description.rst +++ b/doc/api/pymongo/server_description.rst @@ -6,8 +6,4 @@ .. automodule:: pymongo.server_description .. autoclass:: pymongo.server_description.ServerDescription() - - .. autoattribute:: address - .. autoattribute:: all_hosts - .. autoattribute:: server_type - .. autoattribute:: server_type_name + :members: diff --git a/doc/api/pymongo/topology_description.rst b/doc/api/pymongo/topology_description.rst index b14f1bf2c2..8141507df7 100644 --- a/doc/api/pymongo/topology_description.rst +++ b/doc/api/pymongo/topology_description.rst @@ -6,9 +6,5 @@ .. automodule:: pymongo.topology_description .. autoclass:: pymongo.topology_description.TopologyDescription() + :members: - .. automethod:: has_readable_server(read_preference=ReadPreference.PRIMARY) - .. automethod:: has_writable_server - .. automethod:: server_descriptions - .. autoattribute:: topology_type - .. autoattribute:: topology_type_name diff --git a/doc/changelog.rst b/doc/changelog.rst index 62f46f275c..e0cc3a6913 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -11,6 +11,8 @@ changes. For example, all APIs deprecated in PyMongo 3.X have been removed. Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` before upgrading from PyMongo 3.x. +- Added :attr:`pymongo.mongo_client.MongoClient.topology_description`. + Breaking Changes in 4.0 ....................... diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 21e6d66bb5..5dda8efd81 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -891,6 +891,28 @@ def event_listeners(self): """ return self._event_listeners.event_listeners + @property + def topology_description(self): + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :Returns: + An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + return self._topology.description + @property def address(self): """(host, port) of the current standalone, primary, or mongos, or None. diff --git a/test/test_client.py b/test/test_client.py index 31850f701e..dd5e37392a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -62,6 +62,7 @@ from pymongo.server_type import SERVER_TYPE from pymongo.settings import TOPOLOGY_TYPE from pymongo.topology import _ErrorContext +from pymongo.topology_description import TopologyDescription from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.write_concern import WriteConcern from test import (client_context, @@ -597,6 +598,8 @@ def test_init_disconnected(self): self.assertFalse(c.secondaries) c = rs_or_single_client(connect=False) self.assertIsInstance(c.max_write_batch_size, int) + self.assertIsInstance(c.topology_description, TopologyDescription) + self.assertEqual(c.topology_description, c._topology._description) if client_context.is_rs: # The primary's host and port are from the replica set config. From 97bad5a653ae914a7459951964b9eaf39e84c654 Mon Sep 17 00:00:00 2001 From: William Zhou Date: Tue, 30 Mar 2021 15:37:31 -0700 Subject: [PATCH 0319/2111] PYTHON-2628: Fix 'encryption::create_data_key` docstring to use existing algorithm --- pymongo/encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 7f55de0dfb..f3e59a38b4 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -491,7 +491,7 @@ def create_data_key(self, kms_provider, master_key=None, client_encryption.create_data_key("local", keyAltNames=["name1"]) # reference the key with the alternate name client_encryption.encrypt("457-55-5462", keyAltName="name1", - algorithm=Algorithm.Random) + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) :Returns: The ``_id`` of the created data key document as a From 1882e99f777a588f9c6085bf1dc003b13508a913 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 31 Mar 2021 11:31:06 -0500 Subject: [PATCH 0320/2111] PYTHON-2536 Document versioned API usage (#584) --- pymongo/server_api.py | 53 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 7 deletions(-) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index cf739659e3..4a1b925ca9 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -19,28 +19,67 @@ MongoDB Versioned API ===================== +Starting in MongoDB 5.0, applications can specify the server API version +to use when creating a :class:`~pymongo.mongo_client.MongoClient`. Doing so +ensures that the driver behaves in a manner compatible with that server API +version, regardless of the server's actual release version. + +Declaring an API Version +```````````````````````` + +.. attention:: Versioned API requires MongoDB >=5.0. + To configure MongoDB Versioned API, pass the ``server_api`` keyword option to :class:`~pymongo.mongo_client.MongoClient`:: - from pymongo.mongo_client import MongoClient - from pymongo.server_api import ServerApi + >>> from pymongo.mongo_client import MongoClient + >>> from pymongo.server_api import ServerApi + >>> + >>> # Declare API version "1" for MongoClient "client" + >>> server_api = ServerApi('1') + >>> client = MongoClient(server_api=server_api) - client = MongoClient(server_api=ServerApi('1')) +The declared API version is applied to all commands run through ``client``, +including those sent through the generic +:meth:`~pymongo.database.Database.command` helper. -Note that Versioned API requires MongoDB >=5.0. +.. note:: Declaring an API version on the + :class:`~pymongo.mongo_client.MongoClient` **and** specifying versioned + API options in :meth:`~pymongo.database.Database.command` command document + is not supported and will lead to undefined behaviour. + +To run any command without declaring a server API version or using a different +API version, create a separate :class:`~pymongo.mongo_client.MongoClient` +instance. Strict Mode ``````````` -When ``strict`` mode is configured, commands that are not supported in the -given :attr:`ServerApi.version` will fail. For example:: +Configuring ``strict`` mode will cause the MongoDB server to reject all +commands that are not part of the declared :attr:`ServerApi.version`. This +includes command options and aggregation pipeline stages. + +For example:: - >>> client = MongoClient(server_api=ServerApi('1', strict=True)) + >>> server_api = ServerApi('1', strict=True) + >>> client = MongoClient(server_api=server_api) >>> client.test.command('count', 'test') Traceback (most recent call last): ... pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError' +Detecting API Deprecations +`````````````````````````` + +The ``deprecationErrors`` option can be used to enable command failures +when using functionality that is deprecated from the configured +:attr:`ServerApi.version`. For example:: + + >>> server_api = ServerApi('1', deprecation_errors=True) + >>> client = MongoClient(server_api=server_api) + +Note that at the time of this writing, no deprecated APIs exist. + Classes ======= """ From c0321ef2c516a32bb793cd94bd95de33dae1a994 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 2 Apr 2021 10:09:27 -0700 Subject: [PATCH 0321/2111] PYTHON-2630 Statically initialize Py_buffer to avoid false positives in Coverity (#588) --- bson/_cbsonmodule.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 400a51b4e3..15a37cb0a3 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -2501,7 +2501,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { codec_options_t options; PyObject* result = NULL; PyObject* options_obj; - Py_buffer view; + Py_buffer view = {0}; if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && convert_codec_options(options_obj, &options))) { @@ -2580,7 +2580,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* result = NULL; codec_options_t options; PyObject* options_obj; - Py_buffer view; + Py_buffer view = {0}; if (!PyArg_ParseTuple(args, "O|O", &bson, &options_obj)) { return NULL; From cc029a1e6208863eaab453777363d3935b927f32 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 2 Apr 2021 10:17:04 -0700 Subject: [PATCH 0322/2111] PYTHON-2631 Add missing error message to InvalidBSON error (#589) --- bson/_cbsonmodule.c | 2 +- test/test_bson.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 15a37cb0a3..2eb9e992d5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -2342,7 +2342,7 @@ static int _element_to_dict(PyObject* self, const char* string, if (name_length > BSON_MAX_SIZE || position + name_length >= max) { PyObject* InvalidBSON = _error("InvalidBSON"); if (InvalidBSON) { - PyErr_SetNone(InvalidBSON); + PyErr_SetString(InvalidBSON, "field name too large"); Py_DECREF(InvalidBSON); } return -1; diff --git a/test/test_bson.py b/test/test_bson.py index 7c14c625ce..ccae6fcd66 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -367,6 +367,13 @@ def test_invalid_decodes(self): with self.assertRaises(InvalidBSON, msg=msg): list(decode_file_iter(scratch)) + def test_invalid_field_name(self): + # Decode a truncated field + with self.assertRaises(InvalidBSON) as ctx: + decode(b'\x0b\x00\x00\x00\x02field\x00') + # Assert that the InvalidBSON error message is not empty. + self.assertTrue(str(ctx.exception)) + def test_data_timestamp(self): self.assertEqual({"test": Timestamp(4, 20)}, decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" From 4c7718eb5a4dc8be4e528197ca553e0ffad3496b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 19 Apr 2021 13:24:54 -0700 Subject: [PATCH 0323/2111] PYTHON-2634 Only update pools for data-bearing servers (#590) Fixes a noisy OperationFailure: Authentication failed error. Do not attempt to create unneeded connections to arbiters, ghosts, hidden members, or unknown members. --- pymongo/topology.py | 20 +++++++--- test/pymongo_mocks.py | 19 ++++++--- test/test_client.py | 93 ++++++++++++++++++++++++++++++++++++++----- test/test_cmap.py | 16 +++++++- 4 files changed, 124 insertions(+), 24 deletions(-) diff --git a/pymongo/topology.py b/pymongo/topology.py index c4eb947b64..a101ec9c1c 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -429,20 +429,30 @@ def request_check_all(self, wait_time=5): self._request_check_all() self._condition.wait(wait_time) + def data_bearing_servers(self): + """Return a list of all data-bearing servers. + + This includes any server that might be selected for an operation. + """ + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers + def update_pool(self, all_credentials): # Remove any stale sockets and add new sockets if pool is too small. servers = [] with self._lock: - for server in self._servers.values(): - servers.append((server, server._pool.generation)) + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.generation)) for server, generation in servers: - pool = server._pool try: - pool.remove_stale_sockets(generation, all_credentials) + server.pool.remove_stale_sockets(generation, all_credentials) except PyMongoError as exc: ctx = _ErrorContext(exc, 0, generation, False) - self.handle_error(pool.address, ctx) + self.handle_error(server.description.address, ctx) raise def close(self): diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 7520f2bfb1..8a28284bf5 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -106,13 +106,13 @@ def _check_once(self): class MockClient(MongoClient): def __init__( self, standalones, members, mongoses, ismaster_hosts=None, - *args, **kwargs): + arbiters=None, down_hosts=None, *args, **kwargs): """A MongoClient connected to the default server, with a mock topology. - standalones, members, mongoses determine the configuration of the - topology. They are formatted like ['a:1', 'b:2']. ismaster_hosts - provides an alternative host list for the server's mocked ismaster - response; see test_connect_with_internal_ips. + standalones, members, mongoses, arbiters, and down_hosts determine the + configuration of the topology. They are formatted like ['a:1', 'b:2']. + ismaster_hosts provides an alternative host list for the server's + mocked ismaster response; see test_connect_with_internal_ips. """ self.mock_standalones = standalones[:] self.mock_members = members[:] @@ -122,6 +122,9 @@ def __init__( else: self.mock_primary = None + # Hosts that should be considered an arbiter. + self.mock_arbiters = arbiters[:] if arbiters else [] + if ismaster_hosts is not None: self.mock_ismaster_hosts = ismaster_hosts else: @@ -130,7 +133,7 @@ def __init__( self.mock_mongoses = mongoses[:] # Hosts that should raise socket errors. - self.mock_down_hosts = [] + self.mock_down_hosts = down_hosts[:] if down_hosts else [] # Hostname -> (min wire version, max wire version) self.mock_wire_versions = {} @@ -203,6 +206,10 @@ def mock_is_master(self, host): if self.mock_primary: response['primary'] = self.mock_primary + + if host in self.mock_arbiters: + response['arbiterOnly'] = True + response['secondary'] = False elif host in self.mock_mongoses: response = { 'ok': 1, diff --git a/test/test_client.py b/test/test_client.py index dd5e37392a..b55db450ec 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -35,7 +35,7 @@ from bson.son import SON from bson.tz_util import utc import pymongo -from pymongo import message +from pymongo import message, monitoring from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS from pymongo.command_cursor import CommandCursor from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD @@ -57,7 +57,7 @@ from pymongo.pool import SocketInfo, _METADATA from pymongo.read_preferences import ReadPreference from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (any_server_selector, +from pymongo.server_selectors import (readable_server_selector, writable_server_selector) from pymongo.server_type import SERVER_TYPE from pymongo.settings import TOPOLOGY_TYPE @@ -77,6 +77,7 @@ from test.pymongo_mocks import MockClient from test.utils import (assertRaisesExactly, connected, + CMAPListener, delay, FunctionCallRecorder, get_pool, @@ -448,21 +449,25 @@ def test_uri_security_options(self): class TestClient(IntegrationTest): - def test_max_idle_time_reaper(self): + def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove sockets when maxIdleTimeMS not set client = rs_or_single_client() - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) self.assertTrue(sock_info in server._pool.sockets) client.close() + def test_max_idle_time_reaper_removes_stale_minPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) with server._pool.get_socket({}) as sock_info: pass # When the reaper runs at the same time as the get_socket, two @@ -474,11 +479,14 @@ def test_max_idle_time_reaper(self): "replace stale socket") client.close() + def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new sockets. client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) with server._pool.get_socket({}) as sock_info: pass # When the reaper runs at the same time as the get_socket, @@ -490,9 +498,12 @@ def test_max_idle_time_reaper(self): "replace stale socket") client.close() + def test_max_idle_time_reaper_removes_stale(self): + with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) with server._pool.get_socket({}) as sock_info_one: pass # Assert that the pool does not close sockets prematurely. @@ -508,12 +519,14 @@ def test_max_idle_time_reaper(self): def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=.1): client = rs_or_single_client() - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) self.assertEqual(0, len(server._pool.sockets)) # Assert that pool started up at minPoolSize client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) wait_until(lambda: 10 == len(server._pool.sockets), "pool initialized with 10 sockets") @@ -528,7 +541,8 @@ def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) @@ -542,7 +556,8 @@ def test_max_idle_time_checkout(self): # Test that sockets are reused if maxIdleTimeMS is not set. client = rs_or_single_client() - server = client._get_topology().select_server(any_server_selector) + server = client._get_topology().select_server( + readable_server_selector) with server._pool.get_socket({}) as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) @@ -1944,5 +1959,61 @@ def timeout_task(): self.assertIsNone(ct.get()) +class TestClientPool(MockClientTest): + + def test_rs_client_does_not_maintain_pool_to_arbiters(self): + listener = CMAPListener() + c = MockClient( + standalones=[], + members=['a:1', 'b:2', 'c:3', 'd:4'], + mongoses=[], + arbiters=['c:3'], # c:3 is an arbiter. + down_hosts=['d:4'], # d:4 is unreachable. + host=['a:1', 'b:2', 'c:3', 'd:4'], + replicaSet='rs', + minPoolSize=1, # minPoolSize + event_listeners=[listener], + ) + self.addCleanup(c.close) + + wait_until(lambda: len(c.nodes) == 3, 'connect') + self.assertEqual(c.address, ('a', 1)) + self.assertEqual(c.arbiters, set([('c', 3)])) + # Assert that we create 2 and only 2 pooled connections. + listener.wait_for_event(monitoring.ConnectionReadyEvent, 2) + self.assertEqual( + listener.event_count(monitoring.ConnectionCreatedEvent), 2) + # Assert that we do not create connections to arbiters. + arbiter = c._topology.get_server_by_address(('c', 3)) + self.assertFalse(arbiter.pool.sockets) + # Assert that we do not create connections to unknown servers. + arbiter = c._topology.get_server_by_address(('d', 4)) + self.assertFalse(arbiter.pool.sockets) + + def test_direct_client_maintains_pool_to_arbiter(self): + listener = CMAPListener() + c = MockClient( + standalones=[], + members=['a:1', 'b:2', 'c:3'], + mongoses=[], + arbiters=['c:3'], # c:3 is an arbiter. + host='c:3', + directConnection=True, + minPoolSize=1, # minPoolSize + event_listeners=[listener], + ) + self.addCleanup(c.close) + + print(c.topology_description) + wait_until(lambda: len(c.nodes) == 1, 'connect') + self.assertEqual(c.address, ('c', 3)) + # Assert that we create 1 pooled connection. + listener.wait_for_event(monitoring.ConnectionReadyEvent, 1) + self.assertEqual( + listener.event_count(monitoring.ConnectionCreatedEvent), 1) + arbiter = c._topology.get_server_by_address(('c', 3)) + self.assertEqual(len(arbiter.pool.sockets), 1) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_cmap.py b/test/test_cmap.py index bf5328fcda..b4a14bb97c 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -41,6 +41,7 @@ PoolClosedEvent) from pymongo.read_preferences import ReadPreference from pymongo.pool import _PoolClosedError, PoolState +from pymongo.topology_description import updated_topology_description from test import (client_knobs, IntegrationTest, @@ -226,12 +227,23 @@ def run_scenario(self, scenario_def, test): opts = test['poolOptions'].copy() opts['event_listeners'] = [self.listener] opts['_monitor_class'] = DummyMonitor + opts['connect'] = False with client_knobs(kill_cursor_frequency=.05, min_heartbeat_interval=.05): client = single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = client_context.client._topology.description + sd = td.server_descriptions()[(client_context.host, + client_context.port)] + client._topology._description = updated_topology_description( + client._topology._description, sd) + client._get_topology() self.addCleanup(client.close) - # self.pool = get_pools(client)[0] - self.pool = list(client._get_topology()._servers.values())[0].pool + self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. self.targets = dict() From 85f9f7a8a118ee08243698309b646ea6d75a9037 Mon Sep 17 00:00:00 2001 From: William Zhou Date: Wed, 21 Apr 2021 11:32:50 -0700 Subject: [PATCH 0324/2111] PYTHON-2397: MongoClient(ssl=True, tls=False) fails with an AttributeError (#592) --- pymongo/mongo_client.py | 2 +- test/test_client.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5dda8efd81..ffb4a7ca65 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -647,7 +647,7 @@ def __init__( username = None password = None dbase = None - opts = {} + opts = common._CaseInsensitiveDictionary() fqdn = None for entity in host: if "://" in entity: diff --git a/test/test_client.py b/test/test_client.py index b55db450ec..2544bcef4c 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -446,6 +446,9 @@ def test_uri_security_options(self): MongoClient('mongodb://localhost/?tlsInsecure=true', connect=False, ssl_cert_reqs=True) + # Conflicting kwargs should raise InvalidURI + with self.assertRaises(InvalidURI): + MongoClient(ssl=True, tls=False) class TestClient(IntegrationTest): From b978827414a2698499a41b0fd3660df2e8cc9f56 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 21 Apr 2021 12:04:59 -0700 Subject: [PATCH 0325/2111] PYTHON-2584 Use large localThresholdMS to fix test_load_balancing (#595) --- test/test_server_selection_in_window.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index bdd778e815..c599210d11 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -21,7 +21,7 @@ from pymongo.read_preferences import ReadPreference from test import client_context, IntegrationTest, unittest from test.utils_selection_tests import create_topology -from test.utils import TestCreator, rs_client, OvertCommandListener +from test.utils import TestCreator, rs_client, OvertCommandListener, wait_until # Location of JSON test specifications. @@ -127,10 +127,14 @@ def frequencies(self, client, listener): @client_context.require_multiple_mongoses def test_load_balancing(self): listener = OvertCommandListener() + # PYTHON-2584: Use a large localThresholdMS to avoid the impact of + # varying RTTs. client = rs_client(client_context.mongos_seeds(), appName='loadBalancingTest', - event_listeners=[listener]) + event_listeners=[listener], + localThresholdMS=10000) self.addCleanup(client.close) + wait_until(lambda: len(client.nodes) == 2, 'discover both nodes') # Delay find commands on delay_finds = { 'configureFailPoint': 'failCommand', From aaba51d927618e7ac2be7c208a014f9d66cb2c40 Mon Sep 17 00:00:00 2001 From: William Zhou Date: Wed, 21 Apr 2021 12:20:53 -0700 Subject: [PATCH 0326/2111] PYTHON-2234: When mongocryptd spawn fails, the driver does not indicate what it tried to spawn (#591) --- pymongo/daemon.py | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/pymongo/daemon.py b/pymongo/daemon.py index 521b4a6b4e..dfec915212 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -23,6 +23,8 @@ import subprocess import sys +from pymongo.errors import PyMongoError + # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) @@ -54,12 +56,16 @@ def _silence_resource_warning(popen): def _spawn_daemon(args): """Spawn a daemon process (Windows).""" - with open(os.devnull, 'r+b') as devnull: - popen = subprocess.Popen( - args, - creationflags=_DETACHED_PROCESS, - stdin=devnull, stderr=devnull, stdout=devnull) - _silence_resource_warning(popen) + try: + with open(os.devnull, 'r+b') as devnull: + popen = subprocess.Popen( + args, + creationflags=_DETACHED_PROCESS, + stdin=devnull, stderr=devnull, stdout=devnull) + _silence_resource_warning(popen) + except FileNotFoundError as exc: + raise PyMongoError( + f'Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}') else: # On Unix we spawn the daemon process with a double Popen. # 1) The first Popen runs this file as a Python script using the current @@ -74,12 +80,15 @@ def _spawn_daemon(args): # we spawn the mongocryptd daemon process. def _spawn(args): """Spawn the process and silence stdout/stderr.""" - with open(os.devnull, 'r+b') as devnull: - return subprocess.Popen( - args, - close_fds=True, - stdin=devnull, stderr=devnull, stdout=devnull) - + try: + with open(os.devnull, 'r+b') as devnull: + return subprocess.Popen( + args, + close_fds=True, + stdin=devnull, stderr=devnull, stdout=devnull) + except FileNotFoundError as exc: + raise PyMongoError( + f'Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}') def _spawn_daemon_double_popen(args): """Spawn a daemon process using a double subprocess.Popen.""" From d06f3f350457a88780c24d19009e48041159985e Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 21 Apr 2021 15:51:41 -0700 Subject: [PATCH 0327/2111] PYTHON-2234 Only rely on standard library in daemon.py (#597) --- pymongo/daemon.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pymongo/daemon.py b/pymongo/daemon.py index dfec915212..f0253547d9 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -22,13 +22,14 @@ import os import subprocess import sys +import warnings -from pymongo.errors import PyMongoError # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) + def _popen_wait(popen, timeout): """Implement wait timeout support for Python 3.""" try: @@ -47,7 +48,9 @@ def _silence_resource_warning(popen): # "ResourceWarning: subprocess XXX is still running". # See https://bugs.python.org/issue38890 and # https://bugs.python.org/issue26741. - popen.returncode = 0 + # popen is None when mongocryptd spawning fails + if popen is not None: + popen.returncode = 0 if sys.platform == 'win32': @@ -64,8 +67,9 @@ def _spawn_daemon(args): stdin=devnull, stderr=devnull, stdout=devnull) _silence_resource_warning(popen) except FileNotFoundError as exc: - raise PyMongoError( - f'Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}') + warnings.warn(f'Failed to start {args[0]}: is it on your $PATH?\n' + f'Original exception: {exc}', RuntimeWarning, + stacklevel=2) else: # On Unix we spawn the daemon process with a double Popen. # 1) The first Popen runs this file as a Python script using the current @@ -87,8 +91,10 @@ def _spawn(args): close_fds=True, stdin=devnull, stderr=devnull, stdout=devnull) except FileNotFoundError as exc: - raise PyMongoError( - f'Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}') + warnings.warn(f'Failed to start {args[0]}: is it on your $PATH?\n' + f'Original exception: {exc}', RuntimeWarning, + stacklevel=2) + def _spawn_daemon_double_popen(args): """Spawn a daemon process using a double subprocess.Popen.""" From 1818553fc952ee14c82457f71c823a0833b1fc08 Mon Sep 17 00:00:00 2001 From: Khanh Nguyen <44149581+Kn99HN@users.noreply.github.com> Date: Fri, 23 Apr 2021 13:58:25 -0400 Subject: [PATCH 0328/2111] PYTHON-1880: Raise a warning when no_cursor_timeout is used with an implicit session (#594) --- pymongo/cursor.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index b051b068af..9d39de544b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -144,6 +144,14 @@ def __init__(self, collection, filter=None, projection=None, skip=0, if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) + if no_cursor_timeout and not self.__explicit_session: + warnings.warn("use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://docs.mongodb.com/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, stacklevel=2) if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") From fac0372ba0a0607fe00d16711f7d8d21e2ce13a8 Mon Sep 17 00:00:00 2001 From: Janosh Riebesell Date: Fri, 23 Apr 2021 21:48:09 +0200 Subject: [PATCH 0329/2111] PYTHON-2364 Replace deprecated dns.resolver.query with dns.resolver.resolve (#598) Fall back to dns.resolver.query for dns v1 compat. --- pymongo/srv_resolver.py | 15 +++++++++++---- test/test_client.py | 8 ++++---- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index cf9ccffc43..45a4f66611 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -16,6 +16,13 @@ try: from dns import resolver + + try: + # dnspython >= 2 + from dns.resolver import resolve as _resolve + except ImportError: + # dnspython 1.X + from dns.resolver import query as _resolve _HAVE_DNSPYTHON = True except ImportError: _HAVE_DNSPYTHON = False @@ -48,8 +55,8 @@ def __init__(self, fqdn, connect_timeout=None): def get_options(self): try: - results = resolver.query(self.__fqdn, 'TXT', - lifetime=self.__connect_timeout) + results = _resolve(self.__fqdn, 'TXT', + lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): # No TXT records return None @@ -63,8 +70,8 @@ def get_options(self): def _resolve_uri(self, encapsulate_errors): try: - results = resolver.query('_mongodb._tcp.' + self.__fqdn, 'SRV', - lifetime=self.__connect_timeout) + results = _resolve('_mongodb._tcp.' + self.__fqdn, 'SRV', + lifetime=self.__connect_timeout) except Exception as exc: if not encapsulate_errors: # Raise the original error. diff --git a/test/test_client.py b/test/test_client.py index 2544bcef4c..e62516bd58 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -392,11 +392,11 @@ def test_uri_option_precedence(self): _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. - from pymongo.srv_resolver import resolver - patched_resolver = FunctionCallRecorder(resolver.query) - pymongo.srv_resolver.resolver.query = patched_resolver + from pymongo.srv_resolver import _resolve + patched_resolver = FunctionCallRecorder(_resolve) + pymongo.srv_resolver._resolve = patched_resolver def reset_resolver(): - pymongo.srv_resolver.resolver.query = resolver.query + pymongo.srv_resolver._resolve = _resolve self.addCleanup(reset_resolver) # Setup. From cd823c8ed1f618f54daad232d642ef9375b45e41 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 23 Apr 2021 15:11:35 -0700 Subject: [PATCH 0330/2111] PYTHON-2600 Resync spec tests for versioned api (#599) Also resolves PYTHON-2599 and PYTHON-2641. --- .evergreen/config.yml | 8 +- ...ommand-helper-no-api-version-declared.json | 2 +- .../test-commands-deprecation-errors.json | 4 +- .../test-commands-strict-mode.json | 2 +- test/versioned-api/transaction-handling.json | 135 +++++++++++++++++- 5 files changed, 141 insertions(+), 10 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c678535fd4..b77ed0d382 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1944,11 +1944,11 @@ axes: REQUIRE_API_VERSION: "1" # MONGODB_API_VERSION is the apiVersion to use in the test suite. MONGODB_API_VERSION: "1" - # Test against a cluster with acceptAPIVersion2 but without + # Test against a cluster with acceptApiVersion2 but without # requireApiVersion, and don't automatically add apiVersion to # clients created in the test suite. - - id: "acceptAPIVersion2" - display_name: "acceptAPIVersion2" + - id: "acceptApiVersion2" + display_name: "acceptApiVersion2" tags: [ "versionedApi_tag" ] variables: ORCHESTRATION_FILE: "versioned-api-testing.json" @@ -2434,7 +2434,7 @@ buildvariants: - matrix_name: "versioned-api-tests" matrix_spec: - platform: ubuntu-16.04 + platform: ubuntu-18.04 python-version: ["3.6", "3.9"] auth: "auth" versionedApi: "*" diff --git a/test/versioned-api/runcommand-helper-no-api-version-declared.json b/test/versioned-api/runcommand-helper-no-api-version-declared.json index 65c24ef460..e901887e4c 100644 --- a/test/versioned-api/runcommand-helper-no-api-version-declared.json +++ b/test/versioned-api/runcommand-helper-no-api-version-declared.json @@ -3,7 +3,7 @@ "schemaVersion": "1.1", "runOnRequirements": [ { - "minServerVersion": "4.7", + "minServerVersion": "4.9", "serverParameters": { "requireApiVersion": false } diff --git a/test/versioned-api/test-commands-deprecation-errors.json b/test/versioned-api/test-commands-deprecation-errors.json index bdbc3f92ce..0668df830a 100644 --- a/test/versioned-api/test-commands-deprecation-errors.json +++ b/test/versioned-api/test-commands-deprecation-errors.json @@ -3,10 +3,10 @@ "schemaVersion": "1.1", "runOnRequirements": [ { - "minServerVersion": "4.7", + "minServerVersion": "4.9", "serverParameters": { "enableTestCommands": true, - "acceptAPIVersion2": true, + "acceptApiVersion2": true, "requireApiVersion": false } } diff --git a/test/versioned-api/test-commands-strict-mode.json b/test/versioned-api/test-commands-strict-mode.json index ebace44319..1705ba7bff 100644 --- a/test/versioned-api/test-commands-strict-mode.json +++ b/test/versioned-api/test-commands-strict-mode.json @@ -3,7 +3,7 @@ "schemaVersion": "1.1", "runOnRequirements": [ { - "minServerVersion": "4.7", + "minServerVersion": "4.9", "serverParameters": { "enableTestCommands": true } diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json index 64e9706b5e..a740405d3a 100644 --- a/test/versioned-api/transaction-handling.json +++ b/test/versioned-api/transaction-handling.json @@ -3,7 +3,7 @@ "schemaVersion": "1.1", "runOnRequirements": [ { - "minServerVersion": "4.7", + "minServerVersion": "4.9", "topologies": [ "replicaset", "sharded-replicaset" @@ -382,7 +382,138 @@ ] } ] + }, + { + "description": "abortTransaction does not include an API version", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 6, + "x": 66 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 7, + "x": 77 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 6, + "x": 66 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "startTransaction": true, + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 7, + "x": 77 + } + ], + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "apiVersion": { + "$$exists": false + }, + "apiStrict": { + "$$exists": false + }, + "apiDeprecationErrors": { + "$$exists": false + } + } + } + } + ] + } + ] } ] } - From 5388fde214af10745dc4c2a6df5cad09d6d37fae Mon Sep 17 00:00:00 2001 From: Khanh Nguyen <44149581+Kn99HN@users.noreply.github.com> Date: Mon, 26 Apr 2021 11:18:51 -0400 Subject: [PATCH 0331/2111] PYTHON-2605: Improve mongodb+srv:// error message when dnspython is not installed (#602) --- pymongo/uri_parser.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index a683b2fa6d..60d03ba497 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -16,6 +16,7 @@ """Tools to parse and validate a MongoDB URI.""" import re import warnings +import sys from urllib.parse import unquote_plus @@ -416,8 +417,12 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, scheme_free = uri[SCHEME_LEN:] elif uri.startswith(SRV_SCHEME): if not _HAVE_DNSPYTHON: - raise ConfigurationError('The "dnspython" module must be ' - 'installed to use mongodb+srv:// URIs') + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + 'installed to use mongodb+srv:// URIs. ' + 'To fix this error install pymongo with the srv extra:\n ' + '%s -m pip install "pymongo[srv]"' % (python_path)) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: From 61ab9caa6c37be6e85ac672d48cac80bbe3abb03 Mon Sep 17 00:00:00 2001 From: Khanh Nguyen <44149581+Kn99HN@users.noreply.github.com> Date: Mon, 26 Apr 2021 16:55:29 -0400 Subject: [PATCH 0332/2111] docs: Update link to sphinx website (#608) --- README.rst | 2 +- doc/contributors.rst | 1 + doc/index.rst | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 5b25e69f12..7ced4ec2aa 100644 --- a/README.rst +++ b/README.rst @@ -205,4 +205,4 @@ Or with Eventlet's:: $ python green_framework_test.py eventlet -.. _sphinx: http://sphinx.pocoo.org/ +.. _sphinx: https://www.sphinx-doc.org/en/master/ diff --git a/doc/contributors.rst b/doc/contributors.rst index 4118d55586..c763c424c5 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -88,3 +88,4 @@ The following is a list of people who have contributed to - Terence Honles (terencehonles) - Paul Fisher (thetorpedodog) - Julius Park (juliusgeo) +- Khanh Nguyen (KN99HN) \ No newline at end of file diff --git a/doc/index.rst b/doc/index.rst index 9d7ac2b89b..9ef6907181 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -92,7 +92,7 @@ For older versions of the documentation please see the About This Documentation ------------------------ This documentation is generated using the `Sphinx -`_ documentation generator. The source files +`_ documentation generator. The source files for the documentation are located in the *doc/* directory of the **PyMongo** distribution. To generate the docs locally run the following command from the root directory of the **PyMongo** source: From 93046431dfc3d7e60598de9c727df3e02609f5e6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 27 Apr 2021 11:00:20 -0700 Subject: [PATCH 0333/2111] PYTHON-2603 Standardize on ubuntu1804 zseries, power8, and arm64 (#600) PYTHON-2647 Fix test_use_openssl_when_available when service_identity<18.1 is installed --- .evergreen/config.yml | 78 +++++++++---------------------------------- test/test_ssl.py | 4 ++- 2 files changed, 18 insertions(+), 64 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index b77ed0d382..3634c97faa 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1606,16 +1606,6 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel71-power8-test - display_name: "RHEL 7.1 (POWER8)" - run_on: rhel71-power8-test - batchtime: 10080 # 7 days - - id: rhel72-zseries-test - display_name: "RHEL 7.2 (zSeries)" - run_on: rhel72-zseries-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel72-zseries-test/master/latest/libmongocrypt.tar.gz - id: ubuntu-16.04 display_name: "Ubuntu 16.04" run_on: ubuntu1604-test @@ -1635,19 +1625,17 @@ axes: batchtime: 10080 # 7 days variables: python3_binary: python3 - - id: ubuntu1604-arm64-small - display_name: "Ubuntu 16.04 (ARM64)" - run_on: ubuntu1604-arm64-small + - id: ubuntu1804-zseries + display_name: "Ubuntu 18.04 (zSeries)" + run_on: ubuntu1804-zseries-small batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604-arm64/master/latest/libmongocrypt.tar.gz - - id: ubuntu1604-power8-test - display_name: "Ubuntu 16.04 (POWER8)" - run_on: ubuntu1604-power8-test + - id: ubuntu1804-power8 + display_name: "Ubuntu 18.04 (POWER8)" + run_on: ubuntu1804-power8-small batchtime: 10080 # 7 days - - id: ubuntu1804-arm64-test + - id: ubuntu1804-arm64 display_name: "Ubuntu 18.04 (ARM64)" - run_on: ubuntu1804-arm64-test + run_on: ubuntu1804-arm64-small batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz @@ -2009,7 +1997,6 @@ buildvariants: platform: # OSes that support versions of MongoDB>=3.2 with SSL. - ubuntu-16.04 - - rhel71-power8-test auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: @@ -2063,60 +2050,25 @@ buildvariants: display_name: "Encryption ${platform} ${auth} ${ssl}" tasks: *encryption-server-versions -- matrix_name: "test-os-requires-34-no-42plus" +# Test one server version (4.2) with zSeries, POWER8, and ARM. +- matrix_name: "test-different-cpu-architectures" matrix_spec: platform: - # OSes that support versions of MongoDB>=3.4 <4.2 with SSL. - - ubuntu1604-power8-test - - ubuntu1604-arm64-small + - ubuntu1804-zseries # Ubuntu 18 or RHEL 8.x? + - ubuntu1804-power8 # Ubuntu 18 or RHEL 7? + - ubuntu1804-arm64 auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: - - ".4.0" - - ".3.6" - - ".3.4" - -- matrix_name: "test-os-requires-34" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=3.4 with SSL. - - rhel72-zseries-test - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - -- matrix_name: "test-os-requires-42" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=4.2 with SSL. - - ubuntu1804-arm64-test - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".4.4" - ".4.2" - matrix_name: "tests-python-version-amazon1-test-ssl" matrix_spec: platform: awslinux python-version: &amazon1-pythons ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] - auth: "*" - ssl: "*" + auth-ssl: "*" coverage: "*" - exclude_spec: - - platform: awslinux - python-version: "*" - auth: "noauth" - ssl: "ssl" - coverage: "*" - display_name: "${python-version} ${platform} ${auth} ${ssl} ${coverage}" + display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" tasks: *all-server-versions - matrix_name: "tests-pyopenssl" diff --git a/test/test_ssl.py b/test/test_ssl.py index 71ea142a27..430f9d576e 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -45,6 +45,8 @@ import OpenSSL import requests import service_identity + # Ensure service_identity>=18.1 is installed + from service_identity.pyopenssl import verify_ip_address from pymongo.ocsp_support import _load_trusted_ca_certs _HAVE_PYOPENSSL = True except ImportError: @@ -155,7 +157,7 @@ def test_config_ssl(self): ssl.CERT_REQUIRED) @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") - def test_use_openssl_when_available(self): + def test_use_pyopenssl_when_available(self): self.assertTrue(_ssl.IS_PYOPENSSL) @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") From 0f8f9da2b87e6f7e7c8bccc7c886a792de56edcf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 27 Apr 2021 12:54:38 -0700 Subject: [PATCH 0334/2111] PYTHON-2624 Increase serverSelectionTimeoutMS for mongocryptd connection (#604) --- pymongo/encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index f3e59a38b4..1153b17fbe 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -61,7 +61,7 @@ _HTTPS_PORT = 443 _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. -_MONGOCRYPTD_TIMEOUT_MS = 1000 +_MONGOCRYPTD_TIMEOUT_MS = 10000 _DATA_KEY_OPTS = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding From 7c8571020884e499b33b7f4682b1fd5e0f2655d2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 27 Apr 2021 14:00:21 -0700 Subject: [PATCH 0335/2111] PYTHON-2570 Resync unified tests version 1.1 or lower (#601) --- ...ient-serverApi-deprecationErrors-type.json | 21 +++++++ .../entity-client-serverApi-strict-type.json | 21 +++++++ .../invalid/entity-client-serverApi-type.json | 18 ++++++ ...ity-client-serverApi-version-required.json | 18 ++++++ .../entity-client-serverApi-version-type.json | 20 +++++++ ...ctedCommandEvent-additionalProperties.json | 27 +++++++++ ...t-commandFailedEvent-commandName-type.json | 29 ++++++++++ ...mandStartedEvent-additionalProperties.json | 29 ++++++++++ ...vent-commandStartedEvent-command-type.json | 29 ++++++++++ ...-commandStartedEvent-commandName-type.json | 29 ++++++++++ ...commandStartedEvent-databaseName-type.json | 29 ++++++++++ ...ommandSucceededEvent-commandName-type.json | 29 ++++++++++ ...vent-commandSucceededEvent-reply-type.json | 29 ++++++++++ .../expectedCommandEvent-maxProperties.json | 28 ++++++++++ .../expectedCommandEvent-minProperties.json | 25 +++++++++ ...dEventsForClient-additionalProperties.json | 15 ++--- ...pectedEventsForClient-client-required.json | 11 +--- .../expectedEventsForClient-client-type.json | 13 ++--- .../expectedEventsForClient-events-items.json | 15 ++--- ...pectedEventsForClient-events-required.json | 11 +--- .../expectedEventsForClient-events-type.json | 13 ++--- .../entity-client-apiVersion-unsupported.json | 20 +++++++ .../valid-fail/operation-failure.json | 56 +++++++++++++++++++ .../valid-pass/poc-retryable-writes.json | 16 +++--- 24 files changed, 490 insertions(+), 61 deletions(-) create mode 100644 test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json create mode 100644 test/unified-test-format/invalid/entity-client-serverApi-strict-type.json create mode 100644 test/unified-test-format/invalid/entity-client-serverApi-type.json create mode 100644 test/unified-test-format/invalid/entity-client-serverApi-version-required.json create mode 100644 test/unified-test-format/invalid/entity-client-serverApi-version-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-minProperties.json create mode 100644 test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json create mode 100644 test/unified-test-format/valid-fail/operation-failure.json diff --git a/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json b/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json new file mode 100644 index 0000000000..b688dae631 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-deprecationErrors-type.json @@ -0,0 +1,21 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "1", + "deprecationErrors": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json b/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json new file mode 100644 index 0000000000..0b2fdc4849 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-strict-type.json @@ -0,0 +1,21 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "1", + "strict": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-type.json b/test/unified-test-format/invalid/entity-client-serverApi-type.json new file mode 100644 index 0000000000..20c9d1dce3 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-serverApi-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-version-required.json b/test/unified-test-format/invalid/entity-client-serverApi-version-required.json new file mode 100644 index 0000000000..8bef92b06f --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-version-required.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-serverApi-version-required", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": {} + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-serverApi-version-type.json b/test/unified-test-format/invalid/entity-client-serverApi-version-type.json new file mode 100644 index 0000000000..2c36ff57ed --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-serverApi-version-type.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-serverApi-version-type", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": 0 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json new file mode 100644 index 0000000000..9e45cbadda --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-additionalProperties.json @@ -0,0 +1,27 @@ +{ + "description": "expectedCommandEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "foo": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json new file mode 100644 index 0000000000..a571d8e0c0 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..996332d27d --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-additionalProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "foo": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json new file mode 100644 index 0000000000..8f89460617 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-command-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-command-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json new file mode 100644 index 0000000000..121947b06f --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json new file mode 100644 index 0000000000..97d2b84f68 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-databaseName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json new file mode 100644 index 0000000000..bde2f4817b --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-commandName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-commandName-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "commandName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json new file mode 100644 index 0000000000..9df04acd29 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-reply-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-reply-type", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "reply": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json new file mode 100644 index 0000000000..dd8b0e7e7c --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-maxProperties.json @@ -0,0 +1,28 @@ +{ + "description": "expectedCommandEvent-maxProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": {}, + "commandSucceededEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json b/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json new file mode 100644 index 0000000000..0f3e711a18 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-minProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedCommandEvent-minProperties", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + {} + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json index 6ecf5931fb..90ed9c3273 100644 --- a/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json +++ b/test/unified-test-format/invalid/expectedEventsForClient-additionalProperties.json @@ -11,17 +11,12 @@ "tests": [ { "description": "foo", - "operations": [ + "operations": [], + "expectEvents": [ { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [], - "foo": 0 - } - ] + "client": "client0", + "events": [], + "foo": 0 } ] } diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-required.json b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json index b879db8598..24b6330de7 100644 --- a/test/unified-test-format/invalid/expectedEventsForClient-client-required.json +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-required.json @@ -11,15 +11,10 @@ "tests": [ { "description": "foo", - "operations": [ + "operations": [], + "expectEvents": [ { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "events": [] - } - ] + "events": [] } ] } diff --git a/test/unified-test-format/invalid/expectedEventsForClient-client-type.json b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json index 4ee5427df1..6e66857ee6 100644 --- a/test/unified-test-format/invalid/expectedEventsForClient-client-type.json +++ b/test/unified-test-format/invalid/expectedEventsForClient-client-type.json @@ -11,16 +11,11 @@ "tests": [ { "description": "foo", - "operations": [ + "operations": [], + "expectEvents": [ { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": 0, - "events": [] - } - ] + "client": 0, + "events": [] } ] } diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-items.json b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json index ee8ce4a403..c1fcd4a6c3 100644 --- a/test/unified-test-format/invalid/expectedEventsForClient-events-items.json +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-items.json @@ -11,17 +11,12 @@ "tests": [ { "description": "foo", - "operations": [ + "operations": [], + "expectEvents": [ { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - 0 - ] - } + "client": "client0", + "events": [ + 0 ] } ] diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-required.json b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json index 7f1bc6fb53..39c1e9e12d 100644 --- a/test/unified-test-format/invalid/expectedEventsForClient-events-required.json +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-required.json @@ -11,15 +11,10 @@ "tests": [ { "description": "foo", - "operations": [ + "operations": [], + "expectEvents": [ { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0" - } - ] + "client": "client0" } ] } diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events-type.json b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json index f171fc2b93..4199d042b0 100644 --- a/test/unified-test-format/invalid/expectedEventsForClient-events-type.json +++ b/test/unified-test-format/invalid/expectedEventsForClient-events-type.json @@ -11,16 +11,11 @@ "tests": [ { "description": "foo", - "operations": [ + "operations": [], + "expectEvents": [ { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": 0 - } - ] + "client": "client0", + "events": 0 } ] } diff --git a/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json b/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json new file mode 100644 index 0000000000..d92d23dcaf --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-apiVersion-unsupported.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-apiVersion-unsupported", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "serverApi": { + "version": "server_will_never_support_this_api_version" + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-failure.json b/test/unified-test-format/valid-fail/operation-failure.json new file mode 100644 index 0000000000..8f6cae1521 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-failure.json @@ -0,0 +1,56 @@ +{ + "description": "operation-failure", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "operation-failure" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Unsupported command", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "unsupportedCommand", + "command": { + "unsupportedCommand": 1 + } + } + } + ] + }, + { + "description": "Unsupported query operator", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$unsupportedQueryOperator": 1 + } + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json index 3e42aacb80..30c1d54152 100644 --- a/test/unified-test-format/valid-pass/poc-retryable-writes.json +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -297,13 +297,15 @@ "ordered": true }, "expectResult": { - "insertedCount": { - "$$unsetOrMatches": 2 - }, - "insertedIds": { - "$$unsetOrMatches": { - "0": 3, - "1": 4 + "$$unsetOrMatches": { + "insertedCount": { + "$$unsetOrMatches": 2 + }, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } } } } From 6412fed059647c2f717ba11073cdcc4cfe097e74 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 27 Apr 2021 15:52:55 -0700 Subject: [PATCH 0336/2111] PYTHON-2634 Skip arbiter tests when no server is running (#611) --- test/test_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/test_client.py b/test/test_client.py index e62516bd58..3754cb0ac3 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1964,6 +1964,7 @@ def timeout_task(): class TestClientPool(MockClientTest): + @client_context.require_connection def test_rs_client_does_not_maintain_pool_to_arbiters(self): listener = CMAPListener() c = MockClient( @@ -1993,6 +1994,7 @@ def test_rs_client_does_not_maintain_pool_to_arbiters(self): arbiter = c._topology.get_server_by_address(('d', 4)) self.assertFalse(arbiter.pool.sockets) + @client_context.require_connection def test_direct_client_maintains_pool_to_arbiter(self): listener = CMAPListener() c = MockClient( @@ -2007,7 +2009,6 @@ def test_direct_client_maintains_pool_to_arbiter(self): ) self.addCleanup(c.close) - print(c.topology_description) wait_until(lambda: len(c.nodes) == 1, 'connect') self.assertEqual(c.address, ('c', 3)) # Assert that we create 1 pooled connection. From a44e719dca39e1f10c648b58415e9c2586c2b8cd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Apr 2021 12:18:54 -0700 Subject: [PATCH 0337/2111] PYTHON-2533 Add support for sample_rate and filter in set_profiling_level (#605) --- pymongo/database.py | 24 ++++++++++++++++++++---- test/test_database.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index a810c67a6c..1291c2a14f 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -914,7 +914,8 @@ def profiling_level(self, session=None): assert result["was"] >= 0 and result["was"] <= 2 return result["was"] - def set_profiling_level(self, level, slow_ms=None, session=None): + def set_profiling_level(self, level, slow_ms=None, session=None, + sample_rate=None, filter=None): """Set the database's profiling level. :Parameters: @@ -925,6 +926,10 @@ def set_profiling_level(self, level, slow_ms=None, session=None): slower than the `slow_ms` level will get written to the logs. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `sample_rate` (optional): The fraction of slow operations that + should be profiled or logged expressed as a float between 0 and 1. + - `filter` (optional): A filter expression that controls which + operations are profiled and logged. Possible `level` values: @@ -942,6 +947,9 @@ def set_profiling_level(self, level, slow_ms=None, session=None): (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). + .. versionchanged:: 3.12 + Added the ``sample_rate`` and ``filter`` parameters. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -953,10 +961,18 @@ def set_profiling_level(self, level, slow_ms=None, session=None): if slow_ms is not None and not isinstance(slow_ms, int): raise TypeError("slow_ms must be an integer") + if sample_rate is not None and not isinstance(sample_rate, float): + raise TypeError( + "sample_rate must be a float, not %r" % (sample_rate,)) + + cmd = SON(profile=level) if slow_ms is not None: - self.command("profile", level, slowms=slow_ms, session=session) - else: - self.command("profile", level, session=session) + cmd['slowms'] = slow_ms + if sample_rate is not None: + cmd['sampleRate'] = sample_rate + if filter is not None: + cmd['filter'] = filter + self.command(cmd, session=session) def profiling_info(self, session=None): """Returns a list containing current profiling information. diff --git a/test/test_database.py b/test/test_database.py index e910c4b9c9..0f64f2a46e 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -397,6 +397,35 @@ def test_profiling_levels(self): db.set_profiling_level(OFF, 100) # back to default self.assertEqual(100, db.command("profile", -1)['slowms']) + @client_context.require_no_mongos + @client_context.require_version_min(3, 6) + def test_profiling_sample_rate(self): + db = self.client.pymongo_test + with self.assertRaises(TypeError): + db.set_profiling_level(SLOW_ONLY, 50, sample_rate='1') + with self.assertRaises(TypeError): + db.set_profiling_level(SLOW_ONLY, 50, sample_rate=1) + + db.set_profiling_level(SLOW_ONLY, 50, sample_rate=0.0) + db.set_profiling_level(SLOW_ONLY, 50, sample_rate=1.0) + db.set_profiling_level(SLOW_ONLY, 50, sample_rate=0.5) + profile = db.command("profile", -1) + self.assertEqual(50, profile['slowms']) + self.assertEqual(0.5, profile['sampleRate']) + db.set_profiling_level(OFF, 100) # back to default + self.assertEqual(100, db.command("profile", -1)['slowms']) + + @client_context.require_no_mongos + @client_context.require_version_min(4, 4, 2) + def test_profiling_filter(self): + db = self.client.pymongo_test + db.set_profiling_level(ALL, filter={'ns': {'$eq': 'test.test'}}) + profile = db.command("profile", -1) + self.assertEqual({'ns': {'$eq': 'test.test'}}, profile['filter']) + # filter='unset' resets the filter back to the default. + db.set_profiling_level(OFF, 100, filter='unset') + self.assertEqual(100, db.command("profile", -1)['slowms']) + @client_context.require_no_mongos def test_profiling_info(self): db = self.client.pymongo_test From 61c687687286267e8b5c958cad0d669b5450a48a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Apr 2021 12:38:13 -0700 Subject: [PATCH 0338/2111] PYTHON-2635 Unpin sessions after all abortTransaction attempts (#609) Add unified test runner for transactions. --- pymongo/client_session.py | 1 + test/test_transactions.py | 13 +- test/transactions/{ => legacy}/abort.json | 0 test/transactions/{ => legacy}/bulk.json | 0 .../{ => legacy}/causal-consistency.json | 0 test/transactions/{ => legacy}/commit.json | 0 test/transactions/{ => legacy}/count.json | 0 .../{ => legacy}/create-collection.json | 0 .../{ => legacy}/create-index.json | 0 test/transactions/{ => legacy}/delete.json | 0 .../{ => legacy}/error-labels.json | 0 test/transactions/{ => legacy}/errors.json | 0 .../{ => legacy}/findOneAndDelete.json | 0 .../{ => legacy}/findOneAndReplace.json | 0 .../{ => legacy}/findOneAndUpdate.json | 0 test/transactions/{ => legacy}/insert.json | 0 test/transactions/{ => legacy}/isolation.json | 0 .../{ => legacy}/mongos-pin-auto.json | 0 .../{ => legacy}/mongos-recovery-token.json | 0 .../transactions/{ => legacy}/pin-mongos.json | 0 .../{ => legacy}/read-concern.json | 0 test/transactions/{ => legacy}/read-pref.json | 0 test/transactions/{ => legacy}/reads.json | 0 .../retryable-abort-errorLabels.json | 0 .../{ => legacy}/retryable-abort.json | 0 .../retryable-commit-errorLabels.json | 0 .../{ => legacy}/retryable-commit.json | 0 .../{ => legacy}/retryable-writes.json | 0 .../{ => legacy}/run-command.json | 0 .../transaction-options-repl.json | 0 .../{ => legacy}/transaction-options.json | 0 test/transactions/{ => legacy}/update.json | 0 .../{ => legacy}/write-concern.json | 0 test/transactions/unified/mongos-unpin.json | 373 ++++++++++++++++++ test/unified_format.py | 3 +- 35 files changed, 386 insertions(+), 4 deletions(-) rename test/transactions/{ => legacy}/abort.json (100%) rename test/transactions/{ => legacy}/bulk.json (100%) rename test/transactions/{ => legacy}/causal-consistency.json (100%) rename test/transactions/{ => legacy}/commit.json (100%) rename test/transactions/{ => legacy}/count.json (100%) rename test/transactions/{ => legacy}/create-collection.json (100%) rename test/transactions/{ => legacy}/create-index.json (100%) rename test/transactions/{ => legacy}/delete.json (100%) rename test/transactions/{ => legacy}/error-labels.json (100%) rename test/transactions/{ => legacy}/errors.json (100%) rename test/transactions/{ => legacy}/findOneAndDelete.json (100%) rename test/transactions/{ => legacy}/findOneAndReplace.json (100%) rename test/transactions/{ => legacy}/findOneAndUpdate.json (100%) rename test/transactions/{ => legacy}/insert.json (100%) rename test/transactions/{ => legacy}/isolation.json (100%) rename test/transactions/{ => legacy}/mongos-pin-auto.json (100%) rename test/transactions/{ => legacy}/mongos-recovery-token.json (100%) rename test/transactions/{ => legacy}/pin-mongos.json (100%) rename test/transactions/{ => legacy}/read-concern.json (100%) rename test/transactions/{ => legacy}/read-pref.json (100%) rename test/transactions/{ => legacy}/reads.json (100%) rename test/transactions/{ => legacy}/retryable-abort-errorLabels.json (100%) rename test/transactions/{ => legacy}/retryable-abort.json (100%) rename test/transactions/{ => legacy}/retryable-commit-errorLabels.json (100%) rename test/transactions/{ => legacy}/retryable-commit.json (100%) rename test/transactions/{ => legacy}/retryable-writes.json (100%) rename test/transactions/{ => legacy}/run-command.json (100%) rename test/transactions/{ => legacy}/transaction-options-repl.json (100%) rename test/transactions/{ => legacy}/transaction-options.json (100%) rename test/transactions/{ => legacy}/update.json (100%) rename test/transactions/{ => legacy}/write-concern.json (100%) create mode 100644 test/transactions/unified/mongos-unpin.json diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 5e3f081940..950fe0dc13 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -660,6 +660,7 @@ def abort_transaction(self): pass finally: self._transaction.state = _TxnState.ABORTED + self._unpin_mongos() def _finish_transaction_with_retry(self, command_name): """Run commit or abort with one retry after any retryable error. diff --git a/test/test_transactions.py b/test/test_transactions.py index 859c8f3e25..2e154d4bc5 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -39,10 +39,13 @@ wait_until, OvertCommandListener, TestCreator) from test.utils_spec_runner import SpecRunner +from test.unified_format import generate_test_classes # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions') +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'transactions', 'legacy') +UNIFIED_TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'transactions', 'unified') _TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG') @@ -466,7 +469,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestTransactions, _TEST_PATH) +test_creator = TestCreator(create_test, TestTransactions, TEST_PATH) test_creator.create_tests() @@ -474,5 +477,9 @@ def run_scenario(self): TestTransactionsConvenientAPI.TEST_PATH).create_tests() +# Generate unified tests. +globals().update(generate_test_classes(UNIFIED_TEST_PATH, module=__name__)) + + if __name__ == "__main__": unittest.main() diff --git a/test/transactions/abort.json b/test/transactions/legacy/abort.json similarity index 100% rename from test/transactions/abort.json rename to test/transactions/legacy/abort.json diff --git a/test/transactions/bulk.json b/test/transactions/legacy/bulk.json similarity index 100% rename from test/transactions/bulk.json rename to test/transactions/legacy/bulk.json diff --git a/test/transactions/causal-consistency.json b/test/transactions/legacy/causal-consistency.json similarity index 100% rename from test/transactions/causal-consistency.json rename to test/transactions/legacy/causal-consistency.json diff --git a/test/transactions/commit.json b/test/transactions/legacy/commit.json similarity index 100% rename from test/transactions/commit.json rename to test/transactions/legacy/commit.json diff --git a/test/transactions/count.json b/test/transactions/legacy/count.json similarity index 100% rename from test/transactions/count.json rename to test/transactions/legacy/count.json diff --git a/test/transactions/create-collection.json b/test/transactions/legacy/create-collection.json similarity index 100% rename from test/transactions/create-collection.json rename to test/transactions/legacy/create-collection.json diff --git a/test/transactions/create-index.json b/test/transactions/legacy/create-index.json similarity index 100% rename from test/transactions/create-index.json rename to test/transactions/legacy/create-index.json diff --git a/test/transactions/delete.json b/test/transactions/legacy/delete.json similarity index 100% rename from test/transactions/delete.json rename to test/transactions/legacy/delete.json diff --git a/test/transactions/error-labels.json b/test/transactions/legacy/error-labels.json similarity index 100% rename from test/transactions/error-labels.json rename to test/transactions/legacy/error-labels.json diff --git a/test/transactions/errors.json b/test/transactions/legacy/errors.json similarity index 100% rename from test/transactions/errors.json rename to test/transactions/legacy/errors.json diff --git a/test/transactions/findOneAndDelete.json b/test/transactions/legacy/findOneAndDelete.json similarity index 100% rename from test/transactions/findOneAndDelete.json rename to test/transactions/legacy/findOneAndDelete.json diff --git a/test/transactions/findOneAndReplace.json b/test/transactions/legacy/findOneAndReplace.json similarity index 100% rename from test/transactions/findOneAndReplace.json rename to test/transactions/legacy/findOneAndReplace.json diff --git a/test/transactions/findOneAndUpdate.json b/test/transactions/legacy/findOneAndUpdate.json similarity index 100% rename from test/transactions/findOneAndUpdate.json rename to test/transactions/legacy/findOneAndUpdate.json diff --git a/test/transactions/insert.json b/test/transactions/legacy/insert.json similarity index 100% rename from test/transactions/insert.json rename to test/transactions/legacy/insert.json diff --git a/test/transactions/isolation.json b/test/transactions/legacy/isolation.json similarity index 100% rename from test/transactions/isolation.json rename to test/transactions/legacy/isolation.json diff --git a/test/transactions/mongos-pin-auto.json b/test/transactions/legacy/mongos-pin-auto.json similarity index 100% rename from test/transactions/mongos-pin-auto.json rename to test/transactions/legacy/mongos-pin-auto.json diff --git a/test/transactions/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json similarity index 100% rename from test/transactions/mongos-recovery-token.json rename to test/transactions/legacy/mongos-recovery-token.json diff --git a/test/transactions/pin-mongos.json b/test/transactions/legacy/pin-mongos.json similarity index 100% rename from test/transactions/pin-mongos.json rename to test/transactions/legacy/pin-mongos.json diff --git a/test/transactions/read-concern.json b/test/transactions/legacy/read-concern.json similarity index 100% rename from test/transactions/read-concern.json rename to test/transactions/legacy/read-concern.json diff --git a/test/transactions/read-pref.json b/test/transactions/legacy/read-pref.json similarity index 100% rename from test/transactions/read-pref.json rename to test/transactions/legacy/read-pref.json diff --git a/test/transactions/reads.json b/test/transactions/legacy/reads.json similarity index 100% rename from test/transactions/reads.json rename to test/transactions/legacy/reads.json diff --git a/test/transactions/retryable-abort-errorLabels.json b/test/transactions/legacy/retryable-abort-errorLabels.json similarity index 100% rename from test/transactions/retryable-abort-errorLabels.json rename to test/transactions/legacy/retryable-abort-errorLabels.json diff --git a/test/transactions/retryable-abort.json b/test/transactions/legacy/retryable-abort.json similarity index 100% rename from test/transactions/retryable-abort.json rename to test/transactions/legacy/retryable-abort.json diff --git a/test/transactions/retryable-commit-errorLabels.json b/test/transactions/legacy/retryable-commit-errorLabels.json similarity index 100% rename from test/transactions/retryable-commit-errorLabels.json rename to test/transactions/legacy/retryable-commit-errorLabels.json diff --git a/test/transactions/retryable-commit.json b/test/transactions/legacy/retryable-commit.json similarity index 100% rename from test/transactions/retryable-commit.json rename to test/transactions/legacy/retryable-commit.json diff --git a/test/transactions/retryable-writes.json b/test/transactions/legacy/retryable-writes.json similarity index 100% rename from test/transactions/retryable-writes.json rename to test/transactions/legacy/retryable-writes.json diff --git a/test/transactions/run-command.json b/test/transactions/legacy/run-command.json similarity index 100% rename from test/transactions/run-command.json rename to test/transactions/legacy/run-command.json diff --git a/test/transactions/transaction-options-repl.json b/test/transactions/legacy/transaction-options-repl.json similarity index 100% rename from test/transactions/transaction-options-repl.json rename to test/transactions/legacy/transaction-options-repl.json diff --git a/test/transactions/transaction-options.json b/test/transactions/legacy/transaction-options.json similarity index 100% rename from test/transactions/transaction-options.json rename to test/transactions/legacy/transaction-options.json diff --git a/test/transactions/update.json b/test/transactions/legacy/update.json similarity index 100% rename from test/transactions/update.json rename to test/transactions/legacy/update.json diff --git a/test/transactions/write-concern.json b/test/transactions/legacy/write-concern.json similarity index 100% rename from test/transactions/write-concern.json rename to test/transactions/legacy/write-concern.json diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json new file mode 100644 index 0000000000..33127198a8 --- /dev/null +++ b/test/transactions/unified/mongos-unpin.json @@ -0,0 +1,373 @@ +{ + "description": "mongos-unpin", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "mongos-unpin-db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "mongos-unpin-db", + "documents": [] + } + ], + "_yamlAnchors": { + "anchors": 24 + }, + "tests": [ + { + "description": "unpin after TransientTransctionError error on commit", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin after TransientTransctionError error on abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin after non-transient error on abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction write operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + }, + { + "description": "unpin when a non-transaction read operation uses a session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertSessionPinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertSessionUnpinned", + "object": "testRunner", + "arguments": { + "session": "session0" + } + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index fc23a193b1..3a5e5782c0 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -820,11 +820,12 @@ def _testOperation_assertSessionTransactionState(self, spec): def _testOperation_assertSessionPinned(self, spec): session = self.entity_map[spec['session']] - self.assertIsNotNone(session._pinned_address) + self.assertIsNotNone(session._transaction.pinned_address) def _testOperation_assertSessionUnpinned(self, spec): session = self.entity_map[spec['session']] self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) def __get_last_two_command_lsids(self, listener): cmd_started_events = [] From 14ac9a3fde779d3d0234342a7ae2da57b5e10f43 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Apr 2021 15:02:06 -0700 Subject: [PATCH 0339/2111] PYTHON-2547 Change estimated_document_count() to use $collStats instead of count on 4.9+ (#606) Fix CRUD v1 aggregate $out change for https://github.com/mongodb/specifications/commit/3f3a3c225d2605c4b1f9c4b5d408b4bb44f36214 PYTHON-2301 ValueError is an acceptable error for CRUD v2 error:true tests --- pymongo/collection.py | 65 +- test/crud/unified/estimatedDocumentCount.json | 562 +++++++++++ test/crud/v1/read/aggregate-out.json | 20 - .../bulkWrite-arrayFilters-clientError.json | 110 +++ test/crud/v2/bulkWrite-update-validation.json | 151 +++ test/crud/v2/find-allowdiskuse.json | 142 +-- .../v2/findOneAndDelete-hint-serverError.json | 2 +- test/crud/v2/replaceOne-validation.json | 41 + test/crud/v2/updateMany-validation.json | 57 ++ test/crud/v2/updateOne-validation.json | 39 + test/crud_v2_format.py | 6 + test/retryable_reads/aggregate-merge.json | 98 ++ .../estimatedDocumentCount-4.9.json | 246 +++++ ...son => estimatedDocumentCount-pre4.9.json} | 2 + ...timatedDocumentCount-serverErrors-4.9.json | 911 ++++++++++++++++++ ...tedDocumentCount-serverErrors-pre4.9.json} | 2 + test/retryable_reads/listIndexNames.json | 12 +- test/test_crud_unified.py | 34 + test/test_crud_v1.py | 8 +- .../crud-api-version-1-strict.json | 48 +- test/versioned-api/crud-api-version-1.json | 49 +- 21 files changed, 2475 insertions(+), 130 deletions(-) create mode 100644 test/crud/unified/estimatedDocumentCount.json create mode 100644 test/crud/v2/bulkWrite-arrayFilters-clientError.json create mode 100644 test/crud/v2/bulkWrite-update-validation.json create mode 100644 test/crud/v2/replaceOne-validation.json create mode 100644 test/crud/v2/updateMany-validation.json create mode 100644 test/crud/v2/updateOne-validation.json create mode 100644 test/retryable_reads/aggregate-merge.json create mode 100644 test/retryable_reads/estimatedDocumentCount-4.9.json rename test/retryable_reads/{estimatedDocumentCount.json => estimatedDocumentCount-pre4.9.json} (97%) create mode 100644 test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json rename test/retryable_reads/{estimatedDocumentCount-serverErrors.json => estimatedDocumentCount-serverErrors-pre4.9.json} (99%) create mode 100644 test/test_crud_unified.py diff --git a/pymongo/collection.py b/pymongo/collection.py index 7be1191f5d..62fc3d256b 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1498,38 +1498,49 @@ def find_raw_batches(self, *args, **kwargs): return RawBatchCursor(self, *args, **kwargs) + def _count_cmd(self, session, sock_info, slave_ok, cmd, collation): + """Internal count command helper.""" + # XXX: "ns missing" checks can be removed when we drop support for + # MongoDB 3.0, see SERVER-17051. + res = self._command( + sock_info, + cmd, + slave_ok, + allowable_errors=["ns missing"], + codec_options=self.__write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session) + if res.get("errmsg", "") == "ns missing": + return 0 + return int(res["n"]) + def _count(self, cmd, collation=None, session=None): """Internal count helper.""" # XXX: "ns missing" checks can be removed when we drop support for # MongoDB 3.0, see SERVER-17051. def _cmd(session, server, sock_info, slave_ok): - res = self._command( - sock_info, - cmd, - slave_ok, - allowable_errors=["ns missing"], - codec_options=self.__write_response_codec_options, - read_concern=self.read_concern, - collation=collation, - session=session) - if res.get("errmsg", "") == "ns missing": - return 0 - return int(res["n"]) + return self._count_cmd( + session, sock_info, slave_ok, cmd, collation) return self.__database.client._retryable_read( _cmd, self._read_preference_for(session), session) def _aggregate_one_result( - self, sock_info, slave_ok, cmd, collation=None, session=None): + self, sock_info, slave_ok, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, cmd, slave_ok, + allowable_errors=[26], # Ignore NamespaceNotFound. codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, session=session) + # cursor will not be present for NamespaceNotFound errors. + if 'cursor' not in result: + return None batch = result['cursor']['firstBatch'] return batch[0] if batch else None @@ -1554,9 +1565,31 @@ def estimated_document_count(self, **kwargs): if 'session' in kwargs: raise ConfigurationError( 'estimated_document_count does not support sessions') - cmd = SON([('count', self.__name)]) - cmd.update(kwargs) - return self._count(cmd) + + def _cmd(session, server, sock_info, slave_ok): + if sock_info.max_wire_version >= 12: + # MongoDB 4.9+ + pipeline = [ + {'$collStats': {'count': {}}}, + {'$group': {'_id': 1, 'n': {'$sum': '$count'}}}, + ] + cmd = SON([('aggregate', self.__name), + ('pipeline', pipeline), + ('cursor', {})]) + cmd.update(kwargs) + result = self._aggregate_one_result( + sock_info, slave_ok, cmd, collation=None, session=session) + if not result: + return 0 + return int(result['n']) + else: + # MongoDB < 4.9 + cmd = SON([('count', self.__name)]) + cmd.update(kwargs) + return self._count_cmd(None, sock_info, slave_ok, cmd, None) + + return self.__database.client._retryable_read( + _cmd, self.read_preference, None) def count_documents(self, filter, session=None, **kwargs): """Count the number of documents in this collection. diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..bcd66ea954 --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount.json @@ -0,0 +1,562 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount uses $collStats on 4.9.0 or greater", + "runOnRequirements": [ + { + "minServerVersion": "4.9.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with maxTimeMS on 4.9.0 or greater", + "runOnRequirements": [ + { + "minServerVersion": "4.9.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "maxTimeMS": 6000 + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ], + "maxTimeMS": 6000 + }, + "commandName": "aggregate", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount on non-existent collection on 4.9.0 or greater", + "runOnRequirements": [ + { + "minServerVersion": "4.9.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection1", + "expectResult": 0 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll1", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly on 4.9.0 or greater--command error", + "runOnRequirements": [ + { + "minServerVersion": "4.9.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "errorCode": 8 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly on 4.9.0 or greater--socket error", + "runOnRequirements": [ + { + "minServerVersion": "4.9.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount uses count on less than 4.9.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.8.99" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with maxTimeMS on less than 4.9.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.8.99" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "maxTimeMS": 6000 + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "maxTimeMS": 6000 + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount on non-existent collection on less than 4.9.0", + "runOnRequirements": [ + { + "maxServerVersion": "4.8.99" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection1", + "expectResult": 0 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll1" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly on less than 4.9.0--command error", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "maxServerVersion": "4.8.99", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.8.99", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "errorCode": 8 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount errors correctly on less than 4.9.0--socket error", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "maxServerVersion": "4.8.99", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.8.99", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection0", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0" + }, + "commandName": "count", + "databaseName": "edc-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/v1/read/aggregate-out.json b/test/crud/v1/read/aggregate-out.json index 205cf76571..4e33f9288f 100644 --- a/test/crud/v1/read/aggregate-out.json +++ b/test/crud/v1/read/aggregate-out.json @@ -41,16 +41,6 @@ } }, "outcome": { - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], "collection": { "name": "other_test_collection", "data": [ @@ -92,16 +82,6 @@ } }, "outcome": { - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], "collection": { "name": "other_test_collection", "data": [ diff --git a/test/crud/v2/bulkWrite-arrayFilters-clientError.json b/test/crud/v2/bulkWrite-arrayFilters-clientError.json new file mode 100644 index 0000000000..22e22f0efb --- /dev/null +++ b/test/crud/v2/bulkWrite-arrayFilters-clientError.json @@ -0,0 +1,110 @@ +{ + "runOn": [ + { + "maxServerVersion": "3.5.5" + } + ], + "data": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite on server that doesn't support arrayFilters", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.0.b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [] + }, + { + "description": "BulkWrite on server that doesn't support arrayFilters with arrayFilters on second op", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.0.b": 2 + } + } + } + }, + { + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "options": { + "ordered": true + } + }, + "error": true + } + ], + "expectations": [] + } + ] +} diff --git a/test/crud/v2/bulkWrite-update-validation.json b/test/crud/v2/bulkWrite-update-validation.json new file mode 100644 index 0000000000..481e13c45c --- /dev/null +++ b/test/crud/v2/bulkWrite-update-validation.json @@ -0,0 +1,151 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + }, + { + "description": "BulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/find-allowdiskuse.json b/test/crud/v2/find-allowdiskuse.json index 2df4dbc98e..b2862563b9 100644 --- a/test/crud/v2/find-allowdiskuse.json +++ b/test/crud/v2/find-allowdiskuse.json @@ -1,78 +1,78 @@ { - "runOn": [ + "runOn": [ + { + "minServerVersion": "4.3.1" + } + ], + "collection_name": "test_find_allowdiskuse", + "tests": [ + { + "description": "Find does not send allowDiskuse when value is not specified", + "operations": [ { - "minServerVersion": "4.3.1" + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + } } - ], - "collection_name": "test_find_allowdiskuse", - "tests": [ + ], + "expectations": [ { - "description": "Find does not send allowDiskuse when value is not specified", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse", - "allowDiskUse": null - } - } - } - ] - }, + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": null + } + } + } + ] + }, + { + "description": "Find sends allowDiskuse false when false is specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + } + } + ], + "expectations": [ { - "description": "Find sends allowDiskuse false when false is specified", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": false - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse", - "allowDiskUse": false - } - } - } - ] - }, + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": false + } + } + } + ] + }, + { + "description": "Find sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + } + } + ], + "expectations": [ { - "description": "Find sends allowDiskUse true when true is specified", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": true - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse", - "allowDiskUse": true - } - } - } - ] + "command_started_event": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": true + } + } } - ] -} \ No newline at end of file + ] + } + ] +} diff --git a/test/crud/v2/findOneAndDelete-hint-serverError.json b/test/crud/v2/findOneAndDelete-hint-serverError.json index 5d1dd8989f..9412b36f23 100644 --- a/test/crud/v2/findOneAndDelete-hint-serverError.json +++ b/test/crud/v2/findOneAndDelete-hint-serverError.json @@ -62,7 +62,7 @@ } }, { - "description": "FindOneAndDelete with hint document", + "description": "FindOneAndDelete with hint document unsupported (server-side error)", "operations": [ { "object": "collection", diff --git a/test/crud/v2/replaceOne-validation.json b/test/crud/v2/replaceOne-validation.json new file mode 100644 index 0000000000..2de4a6728b --- /dev/null +++ b/test/crud/v2/replaceOne-validation.json @@ -0,0 +1,41 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + } + ], + "tests": [ + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "object": "collection", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateMany-validation.json b/test/crud/v2/updateMany-validation.json new file mode 100644 index 0000000000..a85ccfa86e --- /dev/null +++ b/test/crud/v2/updateMany-validation.json @@ -0,0 +1,57 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "tests": [ + { + "description": "UpdateOne requires atomic modifiers", + "operations": [ + { + "object": "collection", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + } + } + ] +} diff --git a/test/crud/v2/updateOne-validation.json b/test/crud/v2/updateOne-validation.json new file mode 100644 index 0000000000..6c919f5ea0 --- /dev/null +++ b/test/crud/v2/updateOne-validation.json @@ -0,0 +1,39 @@ +{ + "data": [ + { + "_id": 1, + "x": 11 + } + ], + "tests": [ + { + "description": "UpdateOne requires atomic modifiers", + "operations": [ + { + "object": "collection", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + }, + "error": true + } + ], + "expectations": [], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "x": 11 + } + ] + } + } + } + ] +} diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py index 55dcaae5f5..dbdea40d46 100644 --- a/test/crud_v2_format.py +++ b/test/crud_v2_format.py @@ -25,6 +25,12 @@ class TestCrudV2(SpecRunner): TEST_DB = None TEST_COLLECTION = None + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super(TestCrudV2, self).allowable_errors(op) + errors += (ValueError,) + return errors + def get_scenario_db_name(self, scenario_def): """Crud spec says database_name is optional.""" return scenario_def.get('database_name', self.TEST_DB) diff --git a/test/retryable_reads/aggregate-merge.json b/test/retryable_reads/aggregate-merge.json new file mode 100644 index 0000000000..b401d741ba --- /dev/null +++ b/test/retryable_reads/aggregate-merge.json @@ -0,0 +1,98 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.11" + } + ], + "database_name": "retryable-reads-tests", + "collection_name": "coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "tests": [ + { + "description": "Aggregate with $merge does not retry", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + }, + "operations": [ + { + "object": "collection", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "command_name": "aggregate", + "database_name": "retryable-reads-tests" + } + } + ] + } + ] +} diff --git a/test/retryable_reads/estimatedDocumentCount-4.9.json b/test/retryable_reads/estimatedDocumentCount-4.9.json new file mode 100644 index 0000000000..a4c46fc074 --- /dev/null +++ b/test/retryable_reads/estimatedDocumentCount-4.9.json @@ -0,0 +1,246 @@ +{ + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "database_name": "retryable-reads-tests", + "collection_name": "coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds on first attempt", + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds on second attempt", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount fails on first attempt", + "clientOptions": { + "retryReads": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount fails on second attempt", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + } + ] +} diff --git a/test/retryable_reads/estimatedDocumentCount.json b/test/retryable_reads/estimatedDocumentCount-pre4.9.json similarity index 97% rename from test/retryable_reads/estimatedDocumentCount.json rename to test/retryable_reads/estimatedDocumentCount-pre4.9.json index 8dfa15a2cd..44be966ae7 100644 --- a/test/retryable_reads/estimatedDocumentCount.json +++ b/test/retryable_reads/estimatedDocumentCount-pre4.9.json @@ -2,6 +2,7 @@ "runOn": [ { "minServerVersion": "4.0", + "maxServerVersion": "4.8.99", "topology": [ "single", "replicaset" @@ -9,6 +10,7 @@ }, { "minServerVersion": "4.1.7", + "maxServerVersion": "4.8.99", "topology": [ "sharded" ] diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json b/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json new file mode 100644 index 0000000000..af4dc52ea8 --- /dev/null +++ b/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json @@ -0,0 +1,911 @@ +{ + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "database_name": "retryable-reads-tests", + "collection_name": "coll", + "data": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotMaster", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotMasterNoSlaveOk", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotMasterOrSecondary", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostNotFound", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostUnreachable", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NetworkTimeout", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after SocketException", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "result": 2 + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount fails after two NotMaster errors", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + }, + { + "description": "EstimatedDocumentCount fails after NotMaster when retryReads is false", + "clientOptions": { + "retryReads": false + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + }, + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection", + "error": true + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "database_name": "retryable-reads-tests" + } + } + ] + } + ] +} diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors.json b/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json similarity index 99% rename from test/retryable_reads/estimatedDocumentCount-serverErrors.json rename to test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json index 1af21d1fe9..c11e609cd4 100644 --- a/test/retryable_reads/estimatedDocumentCount-serverErrors.json +++ b/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json @@ -2,6 +2,7 @@ "runOn": [ { "minServerVersion": "4.0", + "maxServerVersion": "4.8.99", "topology": [ "single", "replicaset" @@ -9,6 +10,7 @@ }, { "minServerVersion": "4.1.7", + "maxServerVersion": "4.8.99", "topology": [ "sharded" ] diff --git a/test/retryable_reads/listIndexNames.json b/test/retryable_reads/listIndexNames.json index ef2a6d7306..912c706015 100644 --- a/test/retryable_reads/listIndexNames.json +++ b/test/retryable_reads/listIndexNames.json @@ -30,7 +30,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -61,7 +61,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -69,7 +69,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -104,7 +104,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -136,7 +136,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } @@ -144,7 +144,7 @@ { "command_started_event": { "command": { - "listIndexNames": "coll" + "listIndexes": "coll" }, "database_name": "retryable-reads-tests" } diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py new file mode 100644 index 0000000000..a2aece6ff0 --- /dev/null +++ b/test/test_crud_unified.py @@ -0,0 +1,34 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CRUD unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'crud', 'unified') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index c93d6a22e5..6650ae29d0 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -125,11 +125,6 @@ def run_operation(collection, test): result = cmd(**arguments) - if operation == "aggregate": - if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: - out = collection.database[arguments["pipeline"][-1]["$out"]] - result = out.find() - if isinstance(result, Cursor) or isinstance(result, CommandCursor): return list(result) @@ -154,7 +149,8 @@ def run_scenario(self): run_operation(self.db.test, test) else: result = run_operation(self.db.test, test) - check_result(self, expected_result, result) + if expected_result is not None: + check_result(self, expected_result, result) # Assert final state is expected. expected_c = test['outcome'].get('collection') diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json index a2eb02e432..2705b505a8 100644 --- a/test/versioned-api/crud-api-version-1-strict.json +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -3,7 +3,7 @@ "schemaVersion": "1.1", "runOnRequirements": [ { - "minServerVersion": "4.7" + "minServerVersion": "4.9" } ], "createEntities": [ @@ -301,6 +301,12 @@ "$inc": { "x": 1 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -353,7 +359,10 @@ "x": 1 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "apiVersion": "1", @@ -397,6 +406,9 @@ "_id": 4, "x": 44 }, + "multi": { + "$$unsetOrMatches": false + }, "upsert": true } ], @@ -596,7 +608,6 @@ }, { "description": "estimatedDocumentCount appends declared API version", - "skipReason": "DRIVERS-1437 count was removed from API version 1", "operations": [ { "name": "estimatedDocumentCount", @@ -611,7 +622,22 @@ { "commandStartedEvent": { "command": { - "count": "test", + "aggregate": "test", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ], "apiVersion": "1", "apiStrict": true, "apiDeprecationErrors": { @@ -952,6 +978,9 @@ "_id": 4, "x": 44 }, + "multi": { + "$$unsetOrMatches": false + }, "upsert": true } ], @@ -1007,7 +1036,10 @@ "x": 1 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "apiVersion": "1", @@ -1057,6 +1089,12 @@ "$inc": { "x": 1 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index 6584d8d2ae..9171858376 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -3,7 +3,7 @@ "schemaVersion": "1.1", "runOnRequirements": [ { - "minServerVersion": "4.7" + "minServerVersion": "4.9" } ], "createEntities": [ @@ -298,6 +298,12 @@ "$inc": { "x": 1 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -350,7 +356,10 @@ "x": 1 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "apiVersion": "1", @@ -394,6 +403,9 @@ "_id": 4, "x": 44 }, + "multi": { + "$$unsetOrMatches": false + }, "upsert": true } ], @@ -587,7 +599,7 @@ ] }, { - "description": "estimatedDocumentCount appends declared API version", + "description": "estimatedDocumentCount appends declared API version on 4.9.0 or greater", "operations": [ { "name": "estimatedDocumentCount", @@ -602,7 +614,22 @@ { "commandStartedEvent": { "command": { - "count": "test", + "aggregate": "test", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ], "apiVersion": "1", "apiStrict": { "$$unsetOrMatches": false @@ -943,6 +970,9 @@ "_id": 4, "x": 44 }, + "multi": { + "$$unsetOrMatches": false + }, "upsert": true } ], @@ -998,7 +1028,10 @@ "x": 1 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "apiVersion": "1", @@ -1048,6 +1081,12 @@ "$inc": { "x": 1 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], From acfa7b615c5d263a2fc94d7746771beb6fff4016 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Apr 2021 15:09:56 -0700 Subject: [PATCH 0340/2111] PYTHON-2667 Fix SRV support when running with eventlet (#612) --- pymongo/srv_resolver.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 45a4f66611..42be08a4d3 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -16,13 +16,6 @@ try: from dns import resolver - - try: - # dnspython >= 2 - from dns.resolver import resolve as _resolve - except ImportError: - # dnspython 1.X - from dns.resolver import query as _resolve _HAVE_DNSPYTHON = True except ImportError: _HAVE_DNSPYTHON = False @@ -39,6 +32,15 @@ def maybe_decode(text): return text +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +def _resolve(*args, **kwargs): + if hasattr(resolver, 'resolve'): + # dnspython >= 2 + return resolver.resolve(*args, **kwargs) + # dnspython 1.X + return resolver.query(*args, **kwargs) + + class _SrvResolver(object): def __init__(self, fqdn, connect_timeout=None): self.__fqdn = fqdn From 1390283a5d92c8941a84eb72d86d66b0d2127f4e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 30 Apr 2021 14:20:10 -0700 Subject: [PATCH 0341/2111] PYTHON-2658 Remove NPS survey (#615) --- doc/conf.py | 7 ------- doc/static/delighted.js | 22 ---------------------- 2 files changed, 29 deletions(-) delete mode 100644 doc/static/delighted.js diff --git a/doc/conf.py b/doc/conf.py index 381abb299d..4ce2b5bc0b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -91,13 +91,6 @@ # Additional static files. html_static_path = ['static'] -# These paths are either relative to html_static_path -# or fully qualified paths (eg. https://...) -# Note: html_js_files was added in Sphinx 1.8. -html_js_files = [ - 'delighted.js', -] - # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None diff --git a/doc/static/delighted.js b/doc/static/delighted.js deleted file mode 100644 index d373bd0acf..0000000000 --- a/doc/static/delighted.js +++ /dev/null @@ -1,22 +0,0 @@ -/* eslint-disable */ -// Delighted -!function(e,t,r,n,a){if(!e[a]){for(var i=e[a]=[],s=0;s Date: Tue, 4 May 2021 10:51:58 -0700 Subject: [PATCH 0342/2111] PYTHON-2678 Resync SRV spec tests (#613) Add support for validating parsed_options and running non-TLS tests. --- .../srv_seedlist/encoded-userinfo-and-db.json | 21 ++++++++++++ .../txt-record-not-allowed-option.json | 7 ++++ ...txt-record-with-overridden-ssl-option.json | 16 +++++++++ .../srv_seedlist/uri-with-admin-database.json | 19 +++++++++++ test/srv_seedlist/uri-with-auth.json | 17 ++++++++++ test/test_dns.py | 33 ++++++++++++++----- 6 files changed, 105 insertions(+), 8 deletions(-) create mode 100644 test/srv_seedlist/encoded-userinfo-and-db.json create mode 100644 test/srv_seedlist/txt-record-not-allowed-option.json create mode 100644 test/srv_seedlist/txt-record-with-overridden-ssl-option.json create mode 100644 test/srv_seedlist/uri-with-admin-database.json create mode 100644 test/srv_seedlist/uri-with-auth.json diff --git a/test/srv_seedlist/encoded-userinfo-and-db.json b/test/srv_seedlist/encoded-userinfo-and-db.json new file mode 100644 index 0000000000..70c6c23a39 --- /dev/null +++ b/test/srv_seedlist/encoded-userinfo-and-db.json @@ -0,0 +1,21 @@ +{ + "uri": "mongodb+srv://b*b%40f3tt%3D:%244to%40L8%3DMC@test3.test.build.10gen.cc/mydb%3F?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "user": "b*b@f3tt=", + "password": "$4to@L8=MC", + "db": "mydb?" + }, + "comment": "Encoded user, pass, and DB parse correctly" +} diff --git a/test/srv_seedlist/txt-record-not-allowed-option.json b/test/srv_seedlist/txt-record-not-allowed-option.json new file mode 100644 index 0000000000..2a5cf2f007 --- /dev/null +++ b/test/srv_seedlist/txt-record-not-allowed-option.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because socketTimeoutMS is not an allowed option." +} diff --git a/test/srv_seedlist/txt-record-with-overridden-ssl-option.json b/test/srv_seedlist/txt-record-with-overridden-ssl-option.json new file mode 100644 index 0000000000..0ebc737bd5 --- /dev/null +++ b/test/srv_seedlist/txt-record-with-overridden-ssl-option.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?ssl=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "authSource": "thisDB", + "ssl": false + } +} diff --git a/test/srv_seedlist/uri-with-admin-database.json b/test/srv_seedlist/uri-with-admin-database.json new file mode 100644 index 0000000000..32710d75f7 --- /dev/null +++ b/test/srv_seedlist/uri-with-admin-database.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/adminDB?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "auth_database": "adminDB" + } +} diff --git a/test/srv_seedlist/uri-with-auth.json b/test/srv_seedlist/uri-with-auth.json new file mode 100644 index 0000000000..cc7257d85b --- /dev/null +++ b/test/srv_seedlist/uri-with-auth.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "parsed_options": { + "user": "auser", + "password": "apass" + }, + "comment": "Should preserve auth credentials" +} diff --git a/test/test_dns.py b/test/test_dns.py index fc5f98e3b6..c4c1b00733 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -30,7 +30,7 @@ from test.utils import wait_until -_TEST_PATH = os.path.join( +TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'srv_seedlist') class TestDNS(unittest.TestCase): @@ -40,7 +40,6 @@ class TestDNS(unittest.TestCase): def create_test(test_case): @client_context.require_replica_set - @client_context.require_tls def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") @@ -48,6 +47,15 @@ def run_test(self): seeds = test_case['seeds'] hosts = test_case['hosts'] options = test_case.get('options') + parsed_options = test_case.get('parsed_options') + # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. + needs_tls = not (options and (options.get('ssl') == False or + options.get('tls') == False)) + if needs_tls and not client_context.tls: + self.skipTest('this test requires a TLS cluster') + if not needs_tls and client_context.tls: + self.skipTest('this test requires a non-TLS cluster') + if seeds: seeds = split_hosts(','.join(seeds)) if hosts: @@ -63,18 +71,27 @@ def run_test(self): 'readPreferenceTags', opts.pop('readpreferencetags')) opts['readPreferenceTags'] = rpts self.assertEqual(result['options'], options) + if parsed_options: + for opt, expected in parsed_options.items(): + if opt == 'user': + self.assertEqual(result['username'], expected) + elif opt == 'password': + self.assertEqual(result['password'], expected) + elif opt == 'auth_database' or opt == 'db': + self.assertEqual(result['database'], expected) hostname = next(iter(client_context.client.nodes))[0] # The replica set members must be configured as 'localhost'. if hostname == 'localhost': copts = client_context.default_client_options.copy() - if client_context.tls is True: - # Our test certs don't support the SRV hosts used in these tests. - copts['ssl_match_hostname'] = False + # Remove tls since SRV parsing should add it automatically. + copts.pop('tls', None) + if client_context.tls: + # Our test certs don't support the SRV hosts used in these + # tests. + copts['tlsAllowInvalidHostnames'] = True client = MongoClient(uri, **copts) - # Force server selection - client.admin.command('ismaster') wait_until( lambda: hosts == client.nodes, 'match test hosts to client nodes') @@ -90,7 +107,7 @@ def run_test(self): def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(TEST_PATH, '*.json')): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as dns_test_file: test_method = create_test(json.load(dns_test_file)) From 2c41c6fe958ad0f33c9e164c36a0e9f08600bf3b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 May 2021 12:51:05 -0700 Subject: [PATCH 0343/2111] PYTHON-2671 Support loadBalanced URI option (#614) Add workaround in test_dns until PYTHON-2679 is completed. --- pymongo/client_options.py | 6 +++ pymongo/common.py | 1 + pymongo/mongo_client.py | 13 +++---- pymongo/settings.py | 9 ++++- pymongo/uri_parser.py | 29 +++++++++++--- test/__init__.py | 7 ++++ .../loadBalanced-directConnection.json | 14 +++++++ .../loadBalanced-replicaSet-errors.json | 7 ++++ .../loadBalanced-true-multiple-hosts.json | 7 ++++ .../load-balanced/loadBalanced-true-txt.json | 13 +++++++ .../direct-connection-false.json | 0 .../direct-connection-true.json | 0 .../encoded-userinfo-and-db.json | 0 .../replica-set/loadBalanced-false-txt.json | 15 ++++++++ .../longer-parent-in-return.json | 0 .../misformatted-option.json | 0 .../{ => replica-set}/no-results.json | 0 .../{ => replica-set}/not-enough-parts.json | 0 .../one-result-default-port.json | 0 .../one-txt-record-multiple-strings.json | 0 .../{ => replica-set}/one-txt-record.json | 0 .../parent-part-mismatch1.json | 0 .../parent-part-mismatch2.json | 0 .../parent-part-mismatch3.json | 0 .../parent-part-mismatch4.json | 0 .../parent-part-mismatch5.json | 0 .../returned-parent-too-short.json | 0 .../returned-parent-wrong.json | 0 .../two-results-default-port.json | 0 .../two-results-nonstandard-port.json | 0 .../{ => replica-set}/two-txt-records.json | 0 .../txt-record-not-allowed-option.json | 0 ...txt-record-with-overridden-ssl-option.json | 0 ...txt-record-with-overridden-uri-option.json | 0 .../txt-record-with-unallowed-option.json | 0 .../uri-with-admin-database.json | 0 .../{ => replica-set}/uri-with-auth.json | 0 .../{ => replica-set}/uri-with-port.json | 0 .../{ => replica-set}/uri-with-two-hosts.json | 0 test/test_dns.py | 38 ++++++++++++++----- 40 files changed, 137 insertions(+), 22 deletions(-) create mode 100644 test/srv_seedlist/load-balanced/loadBalanced-directConnection.json create mode 100644 test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json create mode 100644 test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json create mode 100644 test/srv_seedlist/load-balanced/loadBalanced-true-txt.json rename test/srv_seedlist/{ => replica-set}/direct-connection-false.json (100%) rename test/srv_seedlist/{ => replica-set}/direct-connection-true.json (100%) rename test/srv_seedlist/{ => replica-set}/encoded-userinfo-and-db.json (100%) create mode 100644 test/srv_seedlist/replica-set/loadBalanced-false-txt.json rename test/srv_seedlist/{ => replica-set}/longer-parent-in-return.json (100%) rename test/srv_seedlist/{ => replica-set}/misformatted-option.json (100%) rename test/srv_seedlist/{ => replica-set}/no-results.json (100%) rename test/srv_seedlist/{ => replica-set}/not-enough-parts.json (100%) rename test/srv_seedlist/{ => replica-set}/one-result-default-port.json (100%) rename test/srv_seedlist/{ => replica-set}/one-txt-record-multiple-strings.json (100%) rename test/srv_seedlist/{ => replica-set}/one-txt-record.json (100%) rename test/srv_seedlist/{ => replica-set}/parent-part-mismatch1.json (100%) rename test/srv_seedlist/{ => replica-set}/parent-part-mismatch2.json (100%) rename test/srv_seedlist/{ => replica-set}/parent-part-mismatch3.json (100%) rename test/srv_seedlist/{ => replica-set}/parent-part-mismatch4.json (100%) rename test/srv_seedlist/{ => replica-set}/parent-part-mismatch5.json (100%) rename test/srv_seedlist/{ => replica-set}/returned-parent-too-short.json (100%) rename test/srv_seedlist/{ => replica-set}/returned-parent-wrong.json (100%) rename test/srv_seedlist/{ => replica-set}/two-results-default-port.json (100%) rename test/srv_seedlist/{ => replica-set}/two-results-nonstandard-port.json (100%) rename test/srv_seedlist/{ => replica-set}/two-txt-records.json (100%) rename test/srv_seedlist/{ => replica-set}/txt-record-not-allowed-option.json (100%) rename test/srv_seedlist/{ => replica-set}/txt-record-with-overridden-ssl-option.json (100%) rename test/srv_seedlist/{ => replica-set}/txt-record-with-overridden-uri-option.json (100%) rename test/srv_seedlist/{ => replica-set}/txt-record-with-unallowed-option.json (100%) rename test/srv_seedlist/{ => replica-set}/uri-with-admin-database.json (100%) rename test/srv_seedlist/{ => replica-set}/uri-with-auth.json (100%) rename test/srv_seedlist/{ => replica-set}/uri-with-port.json (100%) rename test/srv_seedlist/{ => replica-set}/uri-with-two-hosts.json (100%) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 8892526c70..346f7ad6ef 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -173,6 +173,7 @@ def __init__(self, username, password, database, options): self.__server_selector = options.get( 'server_selector', any_server_selector) self.__auto_encryption_opts = options.get('auto_encryption_opts') + self.__load_balanced = options.get('loadbalanced') @property def _options(self): @@ -257,3 +258,8 @@ def retry_reads(self): def auto_encryption_opts(self): """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" return self.__auto_encryption_opts + + @property + def load_balanced(self): + """True if the client was configured to connect to a load balancer.""" + return self.__load_balanced diff --git a/pymongo/common.py b/pymongo/common.py index a36cf6d6b7..466891cf2d 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -623,6 +623,7 @@ def validate_tzinfo(dummy, value): 'replicaset': validate_string_or_none, 'retryreads': validate_boolean_or_string, 'retrywrites': validate_boolean_or_string, + 'loadbalanced': validate_boolean_or_string, 'serverselectiontimeoutms': validate_timeout_or_zero, 'sockettimeoutms': validate_timeout_or_none_or_zero, 'ssl_keyfile': validate_readable, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ffb4a7ca65..a5700cbf08 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -70,7 +70,8 @@ from pymongo.settings import TopologySettings from pymongo.uri_parser import (_handle_option_deprecations, _handle_security_options, - _normalize_options) + _normalize_options, + _check_options) from pymongo.write_concern import DEFAULT_WRITE_CONCERN @@ -692,11 +693,7 @@ def __init__( opts = _handle_security_options(opts) # Normalize combined options. opts = _normalize_options(opts) - - # Ensure directConnection was not True if there are multiple seeds. - if len(seeds) > 1 and opts.get('directconnection'): - raise ConfigurationError( - "Cannot specify multiple hosts with directConnection=true") + _check_options(seeds, opts) # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) @@ -739,7 +736,9 @@ def __init__( server_selector=options.server_selector, heartbeat_frequency=options.heartbeat_frequency, fqdn=fqdn, - direct_connection=options.direct_connection) + direct_connection=options.direct_connection, + load_balanced=options.load_balanced, + ) self._topology = Topology(self._topology_settings) diff --git a/pymongo/settings.py b/pymongo/settings.py index 05d15d0de5..91807ffc00 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -39,7 +39,8 @@ def __init__(self, heartbeat_frequency=common.HEARTBEAT_FREQUENCY, server_selector=None, fqdn=None, - direct_connection=None): + direct_connection=None, + load_balanced=None): """Represent MongoClient's configuration. Take a list of (host, port) pairs and optional replica set name. @@ -65,6 +66,7 @@ def __init__(self, self._direct = (len(self._seeds) == 1 and not self.replica_set_name) else: self._direct = direct_connection + self._load_balanced = load_balanced self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the @@ -124,6 +126,11 @@ def direct(self): """ return self._direct + @property + def load_balanced(self): + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + def get_topology_type(self): if self.direct: return TOPOLOGY_TYPE.Single diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 60d03ba497..6801900c9b 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -362,7 +362,26 @@ def split_hosts(hosts, default_port=DEFAULT_PORT): _BAD_DB_CHARS = re.compile('[' + re.escape(r'/ "$') + ']') _ALLOWED_TXT_OPTS = frozenset( - ['authsource', 'authSource', 'replicaset', 'replicaSet']) + ['authsource', 'authSource', 'replicaset', 'replicaSet', 'loadbalanced', + 'loadBalanced']) + + +def _check_options(nodes, options): + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get('directconnection'): + raise ConfigurationError( + 'Cannot specify multiple hosts with directConnection=true') + + if options.get('loadbalanced'): + if len(nodes) > 1: + raise ConfigurationError( + 'Cannot specify multiple hosts with loadBalanced=true') + if options.get('directconnection'): + raise ConfigurationError( + 'Cannot specify directConnection=true with loadBalanced=true') + if options.get('replicaset'): + raise ConfigurationError( + 'Cannot specify replicaSet with loadBalanced=true') def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, @@ -500,7 +519,8 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( - "Only authSource and replicaSet are supported from DNS") + "Only authSource, replicaSet, and loadBalanced are " + "supported from DNS") for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val @@ -508,9 +528,8 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, options["ssl"] = True if validate else 'true' else: nodes = split_hosts(hosts, default_port=default_port) - if len(nodes) > 1 and options.get('directConnection'): - raise ConfigurationError( - "Cannot specify multiple hosts with directConnection=true") + + _check_options(nodes, options) return { 'nodelist': nodes, diff --git a/test/__init__.py b/test/__init__.py index 063a634b42..49075a2d26 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -216,6 +216,7 @@ def __init__(self): self.client = None self.conn_lock = threading.Lock() self.is_data_lake = False + self.load_balancer = False if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS if MONGODB_API_VERSION: @@ -616,6 +617,12 @@ def require_no_standalone(self, func): "Must be connected to a replica set or mongos", func=func) + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require(lambda: self.load_balancer, + "Must be connected to a load balancer", + func=func) + def check_auth_with_sharding(self, func): """Skip a test when connected to mongos < 2.0 and running with auth.""" condition = lambda: not (self.auth_enabled and diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json new file mode 100644 index 0000000000..7f41932bb2 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test20.test.build.10gen.cc/?directConnection=false", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017" + ], + "options": { + "loadBalanced": true, + "ssl": true, + "directConnection": false + } +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json new file mode 100644 index 0000000000..9ed5ff22c2 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test20.test.build.10gen.cc/?replicaSet=replset", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because loadBalanced=true is incompatible with replicaSet" +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json b/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json new file mode 100644 index 0000000000..f425c06b30 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-multiple-hosts.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?loadBalanced=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because loadBalanced is true but the SRV record resolves to multiple hosts" +} diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json new file mode 100644 index 0000000000..0117b3e9cb --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -0,0 +1,13 @@ +{ + "uri": "mongodb+srv://test20.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017" + ], + "options": { + "loadBalanced": true, + "ssl": true + } +} diff --git a/test/srv_seedlist/direct-connection-false.json b/test/srv_seedlist/replica-set/direct-connection-false.json similarity index 100% rename from test/srv_seedlist/direct-connection-false.json rename to test/srv_seedlist/replica-set/direct-connection-false.json diff --git a/test/srv_seedlist/direct-connection-true.json b/test/srv_seedlist/replica-set/direct-connection-true.json similarity index 100% rename from test/srv_seedlist/direct-connection-true.json rename to test/srv_seedlist/replica-set/direct-connection-true.json diff --git a/test/srv_seedlist/encoded-userinfo-and-db.json b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json similarity index 100% rename from test/srv_seedlist/encoded-userinfo-and-db.json rename to test/srv_seedlist/replica-set/encoded-userinfo-and-db.json diff --git a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json new file mode 100644 index 0000000000..fd2e565c7b --- /dev/null +++ b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test21.test.build.10gen.cc/", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "loadBalanced": false, + "ssl": true + } +} diff --git a/test/srv_seedlist/longer-parent-in-return.json b/test/srv_seedlist/replica-set/longer-parent-in-return.json similarity index 100% rename from test/srv_seedlist/longer-parent-in-return.json rename to test/srv_seedlist/replica-set/longer-parent-in-return.json diff --git a/test/srv_seedlist/misformatted-option.json b/test/srv_seedlist/replica-set/misformatted-option.json similarity index 100% rename from test/srv_seedlist/misformatted-option.json rename to test/srv_seedlist/replica-set/misformatted-option.json diff --git a/test/srv_seedlist/no-results.json b/test/srv_seedlist/replica-set/no-results.json similarity index 100% rename from test/srv_seedlist/no-results.json rename to test/srv_seedlist/replica-set/no-results.json diff --git a/test/srv_seedlist/not-enough-parts.json b/test/srv_seedlist/replica-set/not-enough-parts.json similarity index 100% rename from test/srv_seedlist/not-enough-parts.json rename to test/srv_seedlist/replica-set/not-enough-parts.json diff --git a/test/srv_seedlist/one-result-default-port.json b/test/srv_seedlist/replica-set/one-result-default-port.json similarity index 100% rename from test/srv_seedlist/one-result-default-port.json rename to test/srv_seedlist/replica-set/one-result-default-port.json diff --git a/test/srv_seedlist/one-txt-record-multiple-strings.json b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json similarity index 100% rename from test/srv_seedlist/one-txt-record-multiple-strings.json rename to test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json diff --git a/test/srv_seedlist/one-txt-record.json b/test/srv_seedlist/replica-set/one-txt-record.json similarity index 100% rename from test/srv_seedlist/one-txt-record.json rename to test/srv_seedlist/replica-set/one-txt-record.json diff --git a/test/srv_seedlist/parent-part-mismatch1.json b/test/srv_seedlist/replica-set/parent-part-mismatch1.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch1.json rename to test/srv_seedlist/replica-set/parent-part-mismatch1.json diff --git a/test/srv_seedlist/parent-part-mismatch2.json b/test/srv_seedlist/replica-set/parent-part-mismatch2.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch2.json rename to test/srv_seedlist/replica-set/parent-part-mismatch2.json diff --git a/test/srv_seedlist/parent-part-mismatch3.json b/test/srv_seedlist/replica-set/parent-part-mismatch3.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch3.json rename to test/srv_seedlist/replica-set/parent-part-mismatch3.json diff --git a/test/srv_seedlist/parent-part-mismatch4.json b/test/srv_seedlist/replica-set/parent-part-mismatch4.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch4.json rename to test/srv_seedlist/replica-set/parent-part-mismatch4.json diff --git a/test/srv_seedlist/parent-part-mismatch5.json b/test/srv_seedlist/replica-set/parent-part-mismatch5.json similarity index 100% rename from test/srv_seedlist/parent-part-mismatch5.json rename to test/srv_seedlist/replica-set/parent-part-mismatch5.json diff --git a/test/srv_seedlist/returned-parent-too-short.json b/test/srv_seedlist/replica-set/returned-parent-too-short.json similarity index 100% rename from test/srv_seedlist/returned-parent-too-short.json rename to test/srv_seedlist/replica-set/returned-parent-too-short.json diff --git a/test/srv_seedlist/returned-parent-wrong.json b/test/srv_seedlist/replica-set/returned-parent-wrong.json similarity index 100% rename from test/srv_seedlist/returned-parent-wrong.json rename to test/srv_seedlist/replica-set/returned-parent-wrong.json diff --git a/test/srv_seedlist/two-results-default-port.json b/test/srv_seedlist/replica-set/two-results-default-port.json similarity index 100% rename from test/srv_seedlist/two-results-default-port.json rename to test/srv_seedlist/replica-set/two-results-default-port.json diff --git a/test/srv_seedlist/two-results-nonstandard-port.json b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json similarity index 100% rename from test/srv_seedlist/two-results-nonstandard-port.json rename to test/srv_seedlist/replica-set/two-results-nonstandard-port.json diff --git a/test/srv_seedlist/two-txt-records.json b/test/srv_seedlist/replica-set/two-txt-records.json similarity index 100% rename from test/srv_seedlist/two-txt-records.json rename to test/srv_seedlist/replica-set/two-txt-records.json diff --git a/test/srv_seedlist/txt-record-not-allowed-option.json b/test/srv_seedlist/replica-set/txt-record-not-allowed-option.json similarity index 100% rename from test/srv_seedlist/txt-record-not-allowed-option.json rename to test/srv_seedlist/replica-set/txt-record-not-allowed-option.json diff --git a/test/srv_seedlist/txt-record-with-overridden-ssl-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json similarity index 100% rename from test/srv_seedlist/txt-record-with-overridden-ssl-option.json rename to test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json diff --git a/test/srv_seedlist/txt-record-with-overridden-uri-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json similarity index 100% rename from test/srv_seedlist/txt-record-with-overridden-uri-option.json rename to test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json diff --git a/test/srv_seedlist/txt-record-with-unallowed-option.json b/test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json similarity index 100% rename from test/srv_seedlist/txt-record-with-unallowed-option.json rename to test/srv_seedlist/replica-set/txt-record-with-unallowed-option.json diff --git a/test/srv_seedlist/uri-with-admin-database.json b/test/srv_seedlist/replica-set/uri-with-admin-database.json similarity index 100% rename from test/srv_seedlist/uri-with-admin-database.json rename to test/srv_seedlist/replica-set/uri-with-admin-database.json diff --git a/test/srv_seedlist/uri-with-auth.json b/test/srv_seedlist/replica-set/uri-with-auth.json similarity index 100% rename from test/srv_seedlist/uri-with-auth.json rename to test/srv_seedlist/replica-set/uri-with-auth.json diff --git a/test/srv_seedlist/uri-with-port.json b/test/srv_seedlist/replica-set/uri-with-port.json similarity index 100% rename from test/srv_seedlist/uri-with-port.json rename to test/srv_seedlist/replica-set/uri-with-port.json diff --git a/test/srv_seedlist/uri-with-two-hosts.json b/test/srv_seedlist/replica-set/uri-with-two-hosts.json similarity index 100% rename from test/srv_seedlist/uri-with-two-hosts.json rename to test/srv_seedlist/replica-set/uri-with-two-hosts.json diff --git a/test/test_dns.py b/test/test_dns.py index c4c1b00733..16814063cc 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -30,16 +30,28 @@ from test.utils import wait_until -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'srv_seedlist') +class TestDNSRepl(unittest.TestCase): + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'srv_seedlist', 'replica-set') + load_balanced = False -class TestDNS(unittest.TestCase): - pass + @client_context.require_replica_set + def setUp(self): + pass + + +class TestDNSLoadBalanced(unittest.TestCase): + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'srv_seedlist', 'load-balanced') + load_balanced = True + + @client_context.require_load_balancer + def setUp(self): + pass def create_test(test_case): - @client_context.require_replica_set def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") @@ -91,6 +103,12 @@ def run_test(self): # tests. copts['tlsAllowInvalidHostnames'] = True + # The SRV spec tests assume drivers auto discover replica set + # members. This should be removed during PYTHON-2679. + if not self.load_balanced and ( + 'directconnection' not in result['options']): + copts['directConnection'] = False + client = MongoClient(uri, **copts) wait_until( lambda: hosts == client.nodes, @@ -106,15 +124,17 @@ def run_test(self): return run_test -def create_tests(): - for filename in glob.glob(os.path.join(TEST_PATH, '*.json')): +def create_tests(cls): + for filename in glob.glob(os.path.join(cls.TEST_PATH, '*.json')): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as dns_test_file: test_method = create_test(json.load(dns_test_file)) - setattr(TestDNS, 'test_' + test_suffix, test_method) + setattr(cls, 'test_' + test_suffix, test_method) + +create_tests(TestDNSRepl) +create_tests(TestDNSLoadBalanced) -create_tests() class TestParsingErrors(unittest.TestCase): From 6e1009e8b60755dd2cfe9aef0b66a5626ed0ff7a Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 5 May 2021 15:50:01 -0700 Subject: [PATCH 0344/2111] PYTHON-2396 Deprecate ssl_keyfile and ssl_certfile URI options (#616) --- doc/examples/tls.rst | 24 +++++++++++++++--------- pymongo/common.py | 8 ++++++++ pymongo/mongo_client.py | 22 ++++++++-------------- test/test_ssl.py | 12 ++++++++++++ 4 files changed, 43 insertions(+), 23 deletions(-) diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 327453bfd8..2f72555d7a 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -118,24 +118,30 @@ PyMongo can be configured to present a client certificate using the ... tls=True, ... tlsCertificateKeyFile='/path/to/client.pem') -If the private key for the client certificate is stored in a separate file use -the ``ssl_keyfile`` option:: +If the private key for the client certificate is stored in a separate file, +it should be concatenated with the certificate file. For example, to +concatenate a PEM-formatted certificate file ``cert.pem`` and a PEM-formatted +keyfile ``key.pem`` into a single file ``combined.pem``, on Unix systems, +users can run:: + + $ cat key.pem cert.pem > combined.pem + +PyMongo can be configured with the concatenated certificate keyfile using the +``tlsCertificateKeyFile`` option:: >>> client = pymongo.MongoClient('example.com', ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... ssl_keyfile='/path/to/key.pem') + ... tlsCertificateKeyFile='/path/to/combined.pem') -Python supports providing a password or passphrase to decrypt encrypted -private keys. Use the ``tlsCertificateKeyFilePassword`` option:: +If the private key contained in the certificate keyfile is encrypted, users +can provide a password or passphrase to decrypt the encrypted private keys +using the ``tlsCertificateKeyFilePassword`` option:: >>> client = pymongo.MongoClient('example.com', ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... ssl_keyfile='/path/to/key.pem', + ... tlsCertificateKeyFile='/path/to/combined.pem', ... tlsCertificateKeyFilePassword=) - These options can also be passed as part of the MongoDB URI. .. _OCSP: diff --git a/pymongo/common.py b/pymongo/common.py index 466891cf2d..b64b88278b 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -707,6 +707,14 @@ def validate_tzinfo(dummy, value): 'ssl_match_hostname': ('renamed', 'tlsAllowInvalidHostnames'), 'ssl_crlfile': ('renamed', 'tlsCRLFile'), 'ssl_ca_certs': ('renamed', 'tlsCAFile'), + 'ssl_certfile': ('removed', ( + 'Instead of using ssl_certfile to specify the certificate file, ' + 'use tlsCertificateKeyFile to pass a single file containing both ' + 'the client certificate and the private key')), + 'ssl_keyfile': ('removed', ( + 'Instead of using ssl_keyfile to specify the private keyfile, ' + 'use tlsCertificateKeyFile to pass a single file containing both ' + 'the client certificate and the private key')), 'ssl_pem_passphrase': ('renamed', 'tlsCertificateKeyFilePassword'), 'waitqueuemultiple': ('removed', ( 'Instead of using waitQueueMultiple to bound queuing, limit the size ' diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a5700cbf08..c3098cea7a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -450,28 +450,18 @@ def __init__( certificates passed from the other end of the connection. Implies ``tls=True``. Defaults to ``None``. - `tlsCertificateKeyFile`: A file containing the client certificate - and private key. If you want to pass the certificate and private - key as separate files, use the ``ssl_certfile`` and ``ssl_keyfile`` - options instead. Implies ``tls=True``. Defaults to ``None``. + and private key. Implies ``tls=True``. Defaults to ``None``. - `tlsCRLFile`: A file containing a PEM or DER formatted certificate revocation list. Only supported by python 2.7.9+ (pypy 2.5.1+). Implies ``tls=True``. Defaults to ``None``. - `tlsCertificateKeyFilePassword`: The password or passphrase for - decrypting the private key in ``tlsCertificateKeyFile`` or - ``ssl_keyfile``. Only necessary if the private key is encrypted. - Only supported by python 2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults - to ``None``. + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Only supported by + python 2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults to ``None``. - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables certificate revocation status checking via the OCSP responder specified on the server certificate. Defaults to ``False``. - `ssl`: (boolean) Alias for ``tls``. - - `ssl_certfile`: The certificate file used to identify the local - connection against mongod. Implies ``tls=True``. Defaults to - ``None``. - - `ssl_keyfile`: The private keyfile used to identify the local - connection against mongod. Can be omitted if the keyfile is - included with the ``tlsCertificateKeyFile``. Implies ``tls=True``. - Defaults to ``None``. | **Read Concern options:** | (If not set explicitly, this will use the server default) @@ -512,6 +502,10 @@ def __init__( .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. .. versionchanged:: 3.11 Added the following keyword arguments and URI options: diff --git a/test/test_ssl.py b/test/test_ssl.py index 430f9d576e..ee153db28a 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -36,6 +36,7 @@ from test.utils import (EventListener, cat_files, connected, + ignore_deprecations, remove_all_users) @@ -93,6 +94,7 @@ def test_no_ssl_module(self): MongoClient, ssl_certfile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + @ignore_deprecations def test_config_ssl(self): # Tests various ssl configurations self.assertRaises(ValueError, MongoClient, ssl='foo') @@ -196,6 +198,7 @@ def test_simple_ssl(self): self.assertClientWorks(self.client) @client_context.require_ssl_certfile + @ignore_deprecations def test_ssl_pem_passphrase(self): # Expects the server to be running with server.pem and ca.pem # @@ -228,6 +231,7 @@ def test_ssl_pem_passphrase(self): @client_context.require_ssl_certfile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_implicitly_set(self): # Expects the server to be running with server.pem and ca.pem # @@ -251,6 +255,7 @@ def test_cert_ssl_implicitly_set(self): @client_context.require_ssl_certfile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_validation(self): # Expects the server to be running with server.pem and ca.pem # @@ -288,6 +293,7 @@ def test_cert_ssl_validation(self): @client_context.require_ssl_certfile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_uri_support(self): # Expects the server to be running with server.pem and ca.pem # @@ -301,6 +307,7 @@ def test_cert_ssl_uri_support(self): @client_context.require_ssl_certfile @client_context.require_no_auth + @ignore_deprecations def test_cert_ssl_validation_optional(self): # Expects the server to be running with server.pem and ca.pem # @@ -331,6 +338,7 @@ def test_cert_ssl_validation_optional(self): @client_context.require_ssl_certfile @client_context.require_server_resolvable + @ignore_deprecations def test_cert_ssl_validation_hostname_matching(self): # Expects the server to be running with server.pem and ca.pem # @@ -394,6 +402,7 @@ def test_cert_ssl_validation_hostname_matching(self): **self.credentials)) @client_context.require_ssl_certfile + @ignore_deprecations def test_ssl_crlfile_support(self): if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF') or _ssl.IS_PYOPENSSL: self.assertRaises( @@ -432,6 +441,7 @@ def test_ssl_crlfile_support(self): @client_context.require_ssl_certfile @client_context.require_server_resolvable + @ignore_deprecations def test_validation_with_system_ca_certs(self): # Expects the server to be running with server.pem and ca.pem. # @@ -546,6 +556,7 @@ def test_wincertstore(self): @client_context.require_auth @client_context.require_ssl_certfile + @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port self.addCleanup(remove_all_users, client_context.client['$external']) @@ -647,6 +658,7 @@ def test_mongodb_x509_auth(self): self.fail("Invalid certificate accepted.") @client_context.require_ssl_certfile + @ignore_deprecations def test_connect_with_ca_bundle(self): def remove(path): try: From f64b563d9ea11cf2af55326929802ce8ba6b0ea9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 May 2021 14:33:51 -0700 Subject: [PATCH 0345/2111] PYTHON-2629 Use hello command when API Version is declared (#610) PYTHON-2697 Update CMAP runner to ignore extra events --- pymongo/auth.py | 5 ++--- pymongo/compression_support.py | 3 ++- pymongo/hello.py | 22 +++++++++++++++++++ pymongo/ismaster.py | 5 ++++- pymongo/pool.py | 9 +++++++- ...ol-checkout-maxConnecting-is-enforced.json | 3 ++- .../pool-checkout-maxConnecting-timeout.json | 3 ++- test/cmap/pool-checkout-no-idle.json | 15 ++++++++----- test/cmap/pool-checkout-no-stale.json | 15 ++++++++----- ...out-returned-connection-maxConnecting.json | 3 ++- test/cmap/pool-clear-clears-waitqueue.json | 5 +---- test/cmap/pool-create-min-size-error.json | 13 ++++++----- .../errors/write_errors_ignored.json | 3 ++- .../connectTimeoutMS.json | 3 ++- .../isMaster-command-error.json | 8 ++++--- .../isMaster-network-error.json | 8 ++++--- .../isMaster-timeout.json | 6 +++-- .../minPoolSize-error.json | 3 ++- .../pool-cleared-error.json | 8 +++---- test/test_cmap.py | 2 -- test/test_discovery_and_monitoring.py | 2 +- test/test_streaming_protocol.py | 8 +++---- 22 files changed, 102 insertions(+), 50 deletions(-) create mode 100644 pymongo/hello.py diff --git a/pymongo/auth.py b/pymongo/auth.py index 4d22717c28..9861e77174 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -487,9 +487,8 @@ def _authenticate_default(credentials, sock_info): mechs = sock_info.negotiated_mechanisms[credentials] else: source = credentials.source - cmd = SON([ - ('ismaster', 1), - ('saslSupportedMechs', source + '.' + credentials.username)]) + cmd = sock_info.hello_cmd() + cmd['saslSupportedMechs'] = source + '.' + credentials.username mechs = sock_info.command( source, cmd, publish_events=False).get( 'saslSupportedMechs', []) diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 2dd54e7f78..d367595288 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -34,10 +34,11 @@ except ImportError: _HAVE_ZSTD = False +from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS _SUPPORTED_COMPRESSORS = set(["snappy", "zlib", "zstd"]) -_NO_COMPRESSION = set(['ismaster']) +_NO_COMPRESSION = set([HelloCompat.CMD, HelloCompat.LEGACY_CMD]) _NO_COMPRESSION.update(_SENSITIVE_COMMANDS) diff --git a/pymongo/hello.py b/pymongo/hello.py new file mode 100644 index 0000000000..34963a40c8 --- /dev/null +++ b/pymongo/hello.py @@ -0,0 +1,22 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for the 'hello' and legacy hello commands.""" + + +class HelloCompat: + CMD = 'hello' + LEGACY_CMD = 'ismaster' + PRIMARY = 'isWritablePrimary' + LEGACY_PRIMARY = 'ismaster' diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index 5654ef6066..0c38fa6b18 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -17,6 +17,7 @@ import itertools from pymongo import common +from pymongo.hello import HelloCompat from pymongo.server_type import SERVER_TYPE @@ -30,7 +31,9 @@ def _get_server_type(doc): elif doc.get('setName'): if doc.get('hidden'): return SERVER_TYPE.RSOther - elif doc.get('ismaster'): + elif doc.get(HelloCompat.PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get(HelloCompat.LEGACY_PRIMARY): return SERVER_TYPE.RSPrimary elif doc.get('secondary'): return SERVER_TYPE.RSSecondary diff --git a/pymongo/pool.py b/pymongo/pool.py index 6dd3f2be48..7690bc8b48 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -49,6 +49,7 @@ NotMasterError, OperationFailure, PyMongoError) +from pymongo.hello import HelloCompat from pymongo.ismaster import IsMaster from pymongo.monitoring import (ConnectionCheckOutFailedReason, ConnectionClosedReason) @@ -531,12 +532,18 @@ def __init__(self, sock, pool, address, id): self.opts = pool.opts self.more_to_come = False + def hello_cmd(self): + if self.opts.server_api: + return SON([(HelloCompat.CMD, 1)]) + else: + return SON([(HelloCompat.LEGACY_CMD, 1)]) + def ismaster(self, all_credentials=None): return self._ismaster(None, None, None, all_credentials) def _ismaster(self, cluster_time, topology_version, heartbeat_frequency, all_credentials): - cmd = SON([('ismaster', 1)]) + cmd = self.hello_cmd() performing_handshake = not self.performed_handshake awaitable = False if performing_handshake: diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-maxConnecting-is-enforced.json index 80797398ff..732478bf7e 100644 --- a/test/cmap/pool-checkout-maxConnecting-is-enforced.json +++ b/test/cmap/pool-checkout-maxConnecting-is-enforced.json @@ -14,7 +14,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "closeConnection": false, "blockConnection": true, diff --git a/test/cmap/pool-checkout-maxConnecting-timeout.json b/test/cmap/pool-checkout-maxConnecting-timeout.json index 9d97a6178f..84ddf8fdba 100644 --- a/test/cmap/pool-checkout-maxConnecting-timeout.json +++ b/test/cmap/pool-checkout-maxConnecting-timeout.json @@ -14,7 +14,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "closeConnection": false, "blockConnection": true, diff --git a/test/cmap/pool-checkout-no-idle.json b/test/cmap/pool-checkout-no-idle.json index 7e6563228d..9a668857bb 100644 --- a/test/cmap/pool-checkout-no-idle.json +++ b/test/cmap/pool-checkout-no-idle.json @@ -23,6 +23,16 @@ }, { "name": "checkOut" + }, + { + "name": "waitForEvent", + "event": "ConnectionClosed", + "count": 1 + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 } ], "events": [ @@ -46,11 +56,6 @@ "connectionId": 1, "reason": "idle", "address": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 2, - "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-no-stale.json b/test/cmap/pool-checkout-no-stale.json index fcf20621ee..11bd492c89 100644 --- a/test/cmap/pool-checkout-no-stale.json +++ b/test/cmap/pool-checkout-no-stale.json @@ -22,6 +22,16 @@ }, { "name": "checkOut" + }, + { + "name": "waitForEvent", + "event": "ConnectionClosed", + "count": 1 + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 } ], "events": [ @@ -49,11 +59,6 @@ "connectionId": 1, "reason": "stale", "address": 42 - }, - { - "type": "ConnectionCheckedOut", - "connectionId": 2, - "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-returned-connection-maxConnecting.json b/test/cmap/pool-checkout-returned-connection-maxConnecting.json index 7ff59ab392..965d56f6d8 100644 --- a/test/cmap/pool-checkout-returned-connection-maxConnecting.json +++ b/test/cmap/pool-checkout-returned-connection-maxConnecting.json @@ -14,7 +14,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "closeConnection": false, "blockConnection": true, diff --git a/test/cmap/pool-clear-clears-waitqueue.json b/test/cmap/pool-clear-clears-waitqueue.json index 8df1bfdfb0..d4aef928c7 100644 --- a/test/cmap/pool-clear-clears-waitqueue.json +++ b/test/cmap/pool-clear-clears-waitqueue.json @@ -73,10 +73,6 @@ "type": "ConnectionCheckOutStarted", "address": 42 }, - { - "type": "ConnectionPoolCleared", - "address": 42 - }, { "type": "ConnectionCheckOutFailed", "reason": "connectionError", @@ -95,6 +91,7 @@ ], "ignore": [ "ConnectionPoolReady", + "ConnectionPoolCleared", "ConnectionPoolCreated", "ConnectionCreated", "ConnectionReady", diff --git a/test/cmap/pool-create-min-size-error.json b/test/cmap/pool-create-min-size-error.json index 4b655123d0..930e2a8d45 100644 --- a/test/cmap/pool-create-min-size-error.json +++ b/test/cmap/pool-create-min-size-error.json @@ -4,7 +4,7 @@ "description": "error during minPoolSize population clears pool", "runOn": [ { - "minServerVersion": "4.2.0" + "minServerVersion": "4.9.0" } ], "failPoint": { @@ -14,13 +14,16 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], - "closeConnection": true + "closeConnection": true, + "appName": "poolCreateMinSizeErrorTest" } }, "poolOptions": { - "minPoolSize": 1 + "minPoolSize": 1, + "appName": "poolCreateMinSizeErrorTest" }, "operations": [ { @@ -28,7 +31,7 @@ }, { "name": "waitForEvent", - "event": "ConnectionClosed", + "event": "ConnectionPoolCleared", "count": 1 }, { diff --git a/test/discovery_and_monitoring/errors/write_errors_ignored.json b/test/discovery_and_monitoring/errors/write_errors_ignored.json index 6b80673c12..6c511c1b6e 100644 --- a/test/discovery_and_monitoring/errors/write_errors_ignored.json +++ b/test/discovery_and_monitoring/errors/write_errors_ignored.json @@ -63,7 +63,8 @@ "writeErrors": [ { "errmsg": "NotMasterNoSlaveOk", - "code": 13435 + "code": 13435, + "index": 0 } ] } diff --git a/test/discovery_and_monitoring_integration/connectTimeoutMS.json b/test/discovery_and_monitoring_integration/connectTimeoutMS.json index 1192b6b9aa..b75eb58536 100644 --- a/test/discovery_and_monitoring_integration/connectTimeoutMS.json +++ b/test/discovery_and_monitoring_integration/connectTimeoutMS.json @@ -42,7 +42,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "connectTimeoutMS=0", "blockConnection": true, diff --git a/test/discovery_and_monitoring_integration/isMaster-command-error.json b/test/discovery_and_monitoring_integration/isMaster-command-error.json index 0a735dc334..0567dd3323 100644 --- a/test/discovery_and_monitoring_integration/isMaster-command-error.json +++ b/test/discovery_and_monitoring_integration/isMaster-command-error.json @@ -1,7 +1,7 @@ { "runOn": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.9" } ], "database_name": "sdam-tests", @@ -17,7 +17,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "commandErrorHandshakeTest", "closeConnection": false, @@ -120,7 +121,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "commandErrorCheckTest", "closeConnection": false, diff --git a/test/discovery_and_monitoring_integration/isMaster-network-error.json b/test/discovery_and_monitoring_integration/isMaster-network-error.json index 2385a41646..617fc74dbc 100644 --- a/test/discovery_and_monitoring_integration/isMaster-network-error.json +++ b/test/discovery_and_monitoring_integration/isMaster-network-error.json @@ -1,7 +1,7 @@ { "runOn": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.9" } ], "database_name": "sdam-tests", @@ -17,7 +17,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "networkErrorHandshakeTest", "closeConnection": true @@ -119,7 +120,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "networkErrorCheckTest", "closeConnection": true diff --git a/test/discovery_and_monitoring_integration/isMaster-timeout.json b/test/discovery_and_monitoring_integration/isMaster-timeout.json index 50ad482778..d37e7ee687 100644 --- a/test/discovery_and_monitoring_integration/isMaster-timeout.json +++ b/test/discovery_and_monitoring_integration/isMaster-timeout.json @@ -17,7 +17,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "timeoutMonitorHandshakeTest", "blockConnection": true, @@ -120,7 +121,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "timeoutMonitorCheckTest", "blockConnection": true, diff --git a/test/discovery_and_monitoring_integration/minPoolSize-error.json b/test/discovery_and_monitoring_integration/minPoolSize-error.json index 9605ee4f5f..66f310ce72 100644 --- a/test/discovery_and_monitoring_integration/minPoolSize-error.json +++ b/test/discovery_and_monitoring_integration/minPoolSize-error.json @@ -17,7 +17,8 @@ }, "data": { "failCommands": [ - "isMaster" + "isMaster", + "hello" ], "appName": "SDAMminPoolSizeError", "closeConnection": true diff --git a/test/discovery_and_monitoring_integration/pool-cleared-error.json b/test/discovery_and_monitoring_integration/pool-cleared-error.json index 061503c259..52456f9e13 100644 --- a/test/discovery_and_monitoring_integration/pool-cleared-error.json +++ b/test/discovery_and_monitoring_integration/pool-cleared-error.json @@ -128,7 +128,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread2", + "name": "thread3", "operation": { "name": "insertOne", "object": "collection", @@ -144,7 +144,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread1", + "name": "thread4", "operation": { "name": "insertOne", "object": "collection", @@ -160,7 +160,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread2", + "name": "thread5", "operation": { "name": "insertOne", "object": "collection", @@ -176,7 +176,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread2", + "name": "thread6", "operation": { "name": "insertOne", "object": "collection", diff --git a/test/test_cmap.py b/test/test_cmap.py index b4a14bb97c..7a9ab51804 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -191,8 +191,6 @@ def check_events(self, events, ignore): if len(events) > len(actual_events): self.fail('missing events: %r' % (events[len(actual_events):],)) - elif len(events) < len(actual_events): - self.fail('extra events: %r' % (actual_events[len(events):],)) def check_error(self, actual, expected): message = expected.pop('message') diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 4ffffffaea..ddd017bb9a 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -329,7 +329,7 @@ def test_pool_unpause(self): fail_ismaster = { 'mode': {'times': 2}, 'data': { - 'failCommands': ['isMaster'], + 'failCommands': ['isMaster', 'hello'], 'errorCode': 1234, 'appName': 'SDAMPoolManagementTest', }, diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 769dc6c674..07310586a7 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -49,7 +49,7 @@ def test_failCommand_streaming(self): 'configureFailPoint': 'failCommand', 'mode': {'times': 4}, 'data': { - 'failCommands': ['isMaster'], + 'failCommands': ['isMaster', 'hello'], 'closeConnection': False, 'errorCode': 10107, 'appName': 'failingIsMasterTest', @@ -90,7 +90,7 @@ def test_streaming_rtt(self): 'configureFailPoint': 'failCommand', 'mode': {'times': 1000}, 'data': { - 'failCommands': ['isMaster'], + 'failCommands': ['isMaster', 'hello'], 'blockConnection': True, 'blockTimeMS': 20, # This can be uncommented after SERVER-49220 is fixed. @@ -138,7 +138,7 @@ def test_monitor_waits_after_server_check_error(self): fail_ismaster = { 'mode': {'times': 5}, 'data': { - 'failCommands': ['isMaster'], + 'failCommands': ['isMaster', 'hello'], 'errorCode': 1234, 'appName': 'SDAMMinHeartbeatFrequencyTest', }, @@ -186,7 +186,7 @@ def hb_failed(event): fail_heartbeat = { 'mode': {'times': 2}, 'data': { - 'failCommands': ['isMaster'], + 'failCommands': ['isMaster', 'hello'], 'closeConnection': True, 'appName': 'heartbeatEventAwaitedFlag', }, From ac61cf87a911d72b095ea00663e051f6ac148c7a Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 10 May 2021 16:47:28 -0700 Subject: [PATCH 0346/2111] PYTHON-2662 Deprecate database profiler helpers (#617) --- pymongo/__init__.py | 31 ++++++++++++++++++++++--- pymongo/database.py | 53 ++++++++++++++++++++++++++++++++++++++++--- test/test_database.py | 16 ++++++++++++- 3 files changed, 93 insertions(+), 7 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 1ce7f82c13..fff8d46026 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -68,11 +68,36 @@ """ OFF = 0 -"""No database profiling.""" +"""**DEPRECATED** - No database profiling. + +**DEPRECATED** - :attr:`OFF` is deprecated and will be removed in PyMongo 4.0. +Instead, specify this profiling level using the numeric value ``0``. +See https://docs.mongodb.com/manual/tutorial/manage-the-database-profiler + +.. versionchanged:: 3.12 + Deprecated +""" SLOW_ONLY = 1 -"""Only profile slow operations.""" +"""**DEPRECATED** - Only profile slow operations. + +**DEPRECATED** - :attr:`SLOW_ONLY` is deprecated and will be removed in +PyMongo 4.0. Instead, specify this profiling level using the numeric +value ``1``. +See https://docs.mongodb.com/manual/tutorial/manage-the-database-profiler + +.. versionchanged:: 3.12 + Deprecated +""" ALL = 2 -"""Profile all operations.""" +"""**DEPRECATED** - Profile all operations. + +**DEPRECATED** - :attr:`ALL` is deprecated and will be removed in PyMongo 4.0. +Instead, specify this profiling level using the numeric value ``2``. +See https://docs.mongodb.com/manual/tutorial/manage-the-database-profiler + +.. versionchanged:: 3.12 + Deprecated +""" version_tuple = (4, 0, '.dev0') diff --git a/pymongo/database.py b/pymongo/database.py index 1291c2a14f..8de73161a3 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -895,7 +895,18 @@ def current_op(self, include_all=False, session=None): return self._current_op(include_all, session) def profiling_level(self, session=None): - """Get the database's current profiling level. + """**DEPRECATED**: Get the database's current profiling level. + + Starting with PyMongo 3.12, this helper is obsolete. Instead, users + can run the `profile command`_, using the :meth:`command` + helper to get the current profiler level. Running the + `profile command`_ with the level set to ``-1`` returns the current + profiler information without changing it:: + + res = db.command("profile", -1) + profiling_level = res["was"] + + The format of ``res`` depends on the version of MongoDB in use. Returns one of (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). @@ -904,11 +915,18 @@ def profiling_level(self, session=None): - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + .. versionchanged:: 3.12 + Deprecated. + .. versionchanged:: 3.6 Added ``session`` parameter. .. mongodoc:: profiling + .. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ """ + warnings.warn("profiling_level() is deprecated. See the documentation " + "for more information", + DeprecationWarning, stacklevel=2) result = self.command("profile", -1, session=session) assert result["was"] >= 0 and result["was"] <= 2 @@ -916,7 +934,13 @@ def profiling_level(self, session=None): def set_profiling_level(self, level, slow_ms=None, session=None, sample_rate=None, filter=None): - """Set the database's profiling level. + """**DEPRECATED**: Set the database's profiling level. + + Starting with PyMongo 3.12, this helper is obsolete. Instead, users + can directly run the `profile command`_, using the :meth:`command` + helper, e.g.:: + + res = db.command("profile", 2, filter={"op": "query"}) :Parameters: - `level`: Specifies a profiling level, see list of possible values @@ -949,12 +973,18 @@ def set_profiling_level(self, level, slow_ms=None, session=None, .. versionchanged:: 3.12 Added the ``sample_rate`` and ``filter`` parameters. + Deprecated. .. versionchanged:: 3.6 Added ``session`` parameter. .. mongodoc:: profiling + .. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ """ + warnings.warn("set_profiling_level() is deprecated. See the " + "documentation for more information", + DeprecationWarning, stacklevel=2) + if not isinstance(level, int) or level < 0 or level > 2: raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)") @@ -975,17 +1005,34 @@ def set_profiling_level(self, level, slow_ms=None, session=None, self.command(cmd, session=session) def profiling_info(self, session=None): - """Returns a list containing current profiling information. + """**DEPRECATED**: Returns a list containing current profiling + information. + + Starting with PyMongo 3.12, this helper is obsolete. Instead, users + can view the database profiler output by running + :meth:`~pymongo.collection.Collection.find` against the + ``system.profile`` collection as detailed in the `profiler output`_ + documentation:: + + profiling_info = list(db["system.profile"].find()) :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + .. versionchanged:: 3.12 + Deprecated. + .. versionchanged:: 3.6 Added ``session`` parameter. .. mongodoc:: profiling + .. _profiler output: https://docs.mongodb.com/manual/reference/database-profiler/ """ + warnings.warn("profiling_info() is deprecated. See the " + "documentation for more information", + DeprecationWarning, stacklevel=2) + return list(self["system.profile"].find(session=session)) def __iter__(self): diff --git a/test/test_database.py b/test/test_database.py index 0f64f2a46e..9ee854a533 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -47,9 +47,11 @@ SkipTest, unittest, IntegrationTest) -from test.utils import (rs_or_single_client, +from test.utils import (ignore_deprecations, + rs_or_single_client, server_started_with_auth, wait_until, + DeprecationFilter, IMPOSSIBLE_WRITE_CONCERN, OvertCommandListener) from test.test_custom_types import DECIMAL_CODECOPTS @@ -369,6 +371,7 @@ def test_validate_collection_background(self): db.validate_collection(coll, full=True, background=True) @client_context.require_no_mongos + @ignore_deprecations def test_profiling_levels(self): db = self.client.pymongo_test self.assertEqual(db.profiling_level(), OFF) # default @@ -399,6 +402,7 @@ def test_profiling_levels(self): @client_context.require_no_mongos @client_context.require_version_min(3, 6) + @ignore_deprecations def test_profiling_sample_rate(self): db = self.client.pymongo_test with self.assertRaises(TypeError): @@ -417,6 +421,7 @@ def test_profiling_sample_rate(self): @client_context.require_no_mongos @client_context.require_version_min(4, 4, 2) + @ignore_deprecations def test_profiling_filter(self): db = self.client.pymongo_test db.set_profiling_level(ALL, filter={'ns': {'$eq': 'test.test'}}) @@ -427,6 +432,7 @@ def test_profiling_filter(self): self.assertEqual(100, db.command("profile", -1)['slowms']) @client_context.require_no_mongos + @ignore_deprecations def test_profiling_info(self): db = self.client.pymongo_test @@ -455,6 +461,14 @@ def test_profiling_info(self): self.assertTrue(isinstance(info[0]['op'], str)) self.assertTrue(isinstance(info[0]["ts"], datetime.datetime)) + def test_profiling_helpers_deprecated(self): + filter = DeprecationFilter('error') + self.addCleanup(filter.stop) + db = self.client.pymongo_test + self.assertRaises(DeprecationWarning, db.profiling_level) + self.assertRaises(DeprecationWarning, db.profiling_info) + self.assertRaises(DeprecationWarning, db.set_profiling_level, OFF) + def test_command(self): self.maxDiff = None db = self.client.admin From 048ee81836fa477e4f4052bea5b8e6ae58483c48 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 17 May 2021 13:58:06 -0700 Subject: [PATCH 0347/2111] PYTHON-2719 RawBatchCursor must raise StopIteration instead of returning empty bytes when the cursor contains no results (#624) --- pymongo/message.py | 4 +++- test/test_cursor.py | 7 +++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pymongo/message.py b/pymongo/message.py index 25de5a72ff..d08636ff11 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1526,7 +1526,9 @@ def raw_response(self, cursor_id=None): error_object.get("$err"), error_object.get("code"), error_object) - return [self.documents] + if self.documents: + return [self.documents] + return [] def unpack_response(self, cursor_id=None, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, diff --git a/test/test_cursor.py b/test/test_cursor.py index 582d0641e5..fb657c3f6b 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1471,7 +1471,14 @@ def test_explain(self): explanation = c.find_raw_batches().explain() self.assertIsInstance(explanation, dict) + def test_empty(self): + self.db.test.drop() + cursor = self.db.test.find_raw_batches() + with self.assertRaises(StopIteration): + next(cursor) + def test_clone(self): + self.db.test.insert_one({}) cursor = self.db.test.find_raw_batches() # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. self.assertIsInstance(next(cursor.clone()), bytes) From e221b49dfc3cfdddbb138a8701ece27fdd131242 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 17 May 2021 15:26:50 -0700 Subject: [PATCH 0348/2111] PYTHON-2684 Send Versioned API options with getMore+txn commands (#618) --- pymongo/message.py | 5 +- pymongo/pool.py | 7 +- test/test_versioned_api.py | 29 +-- test/unified_format.py | 2 +- .../crud-api-version-1-strict.json | 19 +- test/versioned-api/crud-api-version-1.json | 19 +- ...ommand-helper-no-api-version-declared.json | 12 +- .../test-commands-strict-mode.json | 5 +- test/versioned-api/transaction-handling.json | 205 ++---------------- 9 files changed, 62 insertions(+), 241 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index d08636ff11..5a07d5d078 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -305,7 +305,7 @@ def as_command(self, sock_info): self.name = 'explain' cmd = SON([('explain', cmd)]) session = self.session - sock_info.add_server_api(cmd, session) + sock_info.add_server_api(cmd) if session: session._apply_to(cmd, False, self.read_preference) # Explain does not support readConcern. @@ -409,6 +409,7 @@ def as_command(self, sock_info): if self.session: self.session._apply_to(cmd, False, self.read_preference) + sock_info.add_server_api(cmd) sock_info.send_cluster_time(cmd, self.session, self.client) # Support auto encryption client = self.client @@ -891,7 +892,7 @@ def __init__(self, database_name, command, sock_info, operation_id, self.compress = True if sock_info.compression_context else False self.op_type = op_type self.codec = codec - sock_info.add_server_api(command, session) + sock_info.add_server_api(command) def _batch_command(self, docs): namespace = self.db_name + '.$cmd' diff --git a/pymongo/pool.py b/pymongo/pool.py index 7690bc8b48..a0dda5d9f9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -668,7 +668,7 @@ def command(self, dbname, spec, slave_ok=False, raise ConfigurationError( 'Must be connected to MongoDB 3.4+ to use a collation.') - self.add_server_api(spec, session) + self.add_server_api(spec) if session: session._apply_to(spec, retryable_write, read_preference) self.send_cluster_time(spec, session, client) @@ -846,11 +846,8 @@ def send_cluster_time(self, command, session, client): if self.max_wire_version >= 6 and client: client._send_cluster_time(command, session) - def add_server_api(self, command, session): + def add_server_api(self, command): """Add server_api parameters.""" - if (session and session.in_transaction and - not session._starting_transaction): - return if self.opts.server_api: _add_to_command(command, self.opts.server_api) diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index fc7c919af6..27a1dc7fe5 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -68,10 +68,9 @@ def assertServerApi(self, event): def assertNoServerApi(self, event): self.assertNotIn('apiVersion', event.command) - def assertServerApiOnlyInFirstCommand(self, events): - self.assertServerApi(events[0]) - for event in events[1:]: - self.assertNoServerApi(event) + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) @client_context.require_version_min(4, 7) def test_command_options(self): @@ -84,11 +83,7 @@ def test_command_options(self): self.addCleanup(coll.delete_many, {}) list(coll.find(batch_size=25)) client.admin.command('ping') - for event in listener.results['started']: - if event.command_name == 'getMore': - self.assertNoServerApi(event) - else: - self.assertServerApi(event) + self.assertServerApiInAllCommands(listener.results['started']) @client_context.require_version_min(4, 7) @client_context.require_transactions @@ -106,21 +101,7 @@ def test_command_options_txn(self): coll.insert_many([{} for _ in range(100)], session=s) list(coll.find(batch_size=25, session=s)) client.test.command('find', 'test', session=s) - self.assertServerApiOnlyInFirstCommand(listener.results['started']) - - listener.reset() - with client.start_session() as s, s.start_transaction(): - list(coll.find(batch_size=25, session=s)) - coll.insert_many([{} for _ in range(100)], session=s) - client.test.command('find', 'test', session=s) - self.assertServerApiOnlyInFirstCommand(listener.results['started']) - - listener.reset() - with client.start_session() as s, s.start_transaction(): - client.test.command('find', 'test', session=s) - list(coll.find(batch_size=25, session=s)) - coll.insert_many([{} for _ in range(100)], session=s) - self.assertServerApiOnlyInFirstCommand(listener.results['started']) + self.assertServerApiInAllCommands(listener.results['started']) if __name__ == "__main__": diff --git a/test/unified_format.py b/test/unified_format.py index 3a5e5782c0..0aea11b7ad 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -520,7 +520,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string('1.1') + SCHEMA_VERSION = Version.from_string('1.4') @staticmethod def should_run_on(run_on_spec): diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json index 2705b505a8..29a0ec4e3b 100644 --- a/test/versioned-api/crud-api-version-1-strict.json +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -1,6 +1,6 @@ { "description": "CRUD Api Version 1 (strict)", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.9" @@ -141,6 +141,11 @@ }, { "description": "aggregate on database appends declared API version", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "aggregate", @@ -651,7 +656,7 @@ ] }, { - "description": "find command with declared API version appends to the command, but getMore does not", + "description": "find and getMore append API version", "operations": [ { "name": "find", @@ -712,14 +717,10 @@ "long" ] }, - "apiVersion": { - "$$exists": false - }, - "apiStrict": { - "$$exists": false - }, + "apiVersion": "1", + "apiStrict": true, "apiDeprecationErrors": { - "$$exists": false + "$$unsetOrMatches": false } } } diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index 9171858376..1f135eea18 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -1,6 +1,6 @@ { "description": "CRUD Api Version 1", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.9" @@ -141,6 +141,11 @@ }, { "description": "aggregate on database appends declared API version", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "aggregate", @@ -643,7 +648,7 @@ ] }, { - "description": "find command with declared API version appends to the command, but getMore does not", + "description": "find and getMore append API version", "operations": [ { "name": "find", @@ -704,15 +709,11 @@ "long" ] }, - "apiVersion": { - "$$exists": false - }, + "apiVersion": "1", "apiStrict": { - "$$exists": false + "$$unsetOrMatches": false }, - "apiDeprecationErrors": { - "$$exists": false - } + "apiDeprecationErrors": true } } } diff --git a/test/versioned-api/runcommand-helper-no-api-version-declared.json b/test/versioned-api/runcommand-helper-no-api-version-declared.json index e901887e4c..17e0126d10 100644 --- a/test/versioned-api/runcommand-helper-no-api-version-declared.json +++ b/test/versioned-api/runcommand-helper-no-api-version-declared.json @@ -1,6 +1,6 @@ { "description": "RunCommand helper: No API version declared", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.9", @@ -29,6 +29,11 @@ "tests": [ { "description": "runCommand does not inspect or change the command document", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "runCommand", @@ -72,6 +77,11 @@ }, { "description": "runCommand does not prevent sending invalid API version declarations", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "runCommand", diff --git a/test/versioned-api/test-commands-strict-mode.json b/test/versioned-api/test-commands-strict-mode.json index 1705ba7bff..9c4ebea785 100644 --- a/test/versioned-api/test-commands-strict-mode.json +++ b/test/versioned-api/test-commands-strict-mode.json @@ -1,12 +1,13 @@ { "description": "Test commands: strict mode", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.9", "serverParameters": { "enableTestCommands": true - } + }, + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json index a740405d3a..5c627bb351 100644 --- a/test/versioned-api/transaction-handling.json +++ b/test/versioned-api/transaction-handling.json @@ -6,7 +6,8 @@ "minServerVersion": "4.9", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded-replicaset", + "load-balanced" ] } ], @@ -53,17 +54,6 @@ "apiDeprecationErrors": { "$$unsetOrMatches": false } - }, - { - "apiVersion": { - "$$exists": false - }, - "apiStrict": { - "$$exists": false - }, - "apiDeprecationErrors": { - "$$exists": false - } } ] }, @@ -97,12 +87,13 @@ ], "tests": [ { - "description": "Only the first command in a transaction declares an API version", + "description": "All commands in a transaction declare an API version", "runOnRequirements": [ { "topologies": [ "replicaset", - "sharded-replicaset" + "sharded-replicaset", + "load-balanced" ] } ], @@ -193,119 +184,6 @@ "lsid": { "$$sessionLsid": "session" }, - "apiVersion": { - "$$exists": false - }, - "apiStrict": { - "$$exists": false - }, - "apiDeprecationErrors": { - "$$exists": false - } - } - } - }, - { - "commandStartedEvent": { - "command": { - "commitTransaction": 1, - "lsid": { - "$$sessionLsid": "session" - }, - "apiVersion": { - "$$exists": false - }, - "apiStrict": { - "$$exists": false - }, - "apiDeprecationErrors": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "Committing a transaction twice does not append server API options", - "runOnRequirements": [ - { - "topologies": [ - "replicaset", - "sharded-replicaset" - ] - } - ], - "operations": [ - { - "name": "startTransaction", - "object": "session" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session", - "document": { - "_id": 6, - "x": 66 - } - }, - "expectResult": { - "$$unsetOrMatches": { - "insertedId": { - "$$unsetOrMatches": 6 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session", - "document": { - "_id": 7, - "x": 77 - } - }, - "expectResult": { - "$$unsetOrMatches": { - "insertedId": { - "$$unsetOrMatches": 7 - } - } - } - }, - { - "name": "commitTransaction", - "object": "session" - }, - { - "name": "commitTransaction", - "object": "session" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 6, - "x": 66 - } - ], - "lsid": { - "$$sessionLsid": "session" - }, - "startTransaction": true, "apiVersion": "1", "apiStrict": { "$$unsetOrMatches": false @@ -316,31 +194,6 @@ } } }, - { - "commandStartedEvent": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 7, - "x": 77 - } - ], - "lsid": { - "$$sessionLsid": "session" - }, - "apiVersion": { - "$$exists": false - }, - "apiStrict": { - "$$exists": false - }, - "apiDeprecationErrors": { - "$$exists": false - } - } - } - }, { "commandStartedEvent": { "command": { @@ -348,33 +201,12 @@ "lsid": { "$$sessionLsid": "session" }, - "apiVersion": { - "$$exists": false - }, - "apiStrict": { - "$$exists": false - }, - "apiDeprecationErrors": { - "$$exists": false - } - } - } - }, - { - "commandStartedEvent": { - "command": { - "commitTransaction": 1, - "lsid": { - "$$sessionLsid": "session" - }, - "apiVersion": { - "$$exists": false - }, + "apiVersion": "1", "apiStrict": { - "$$exists": false + "$$unsetOrMatches": false }, "apiDeprecationErrors": { - "$$exists": false + "$$unsetOrMatches": false } } } @@ -384,12 +216,13 @@ ] }, { - "description": "abortTransaction does not include an API version", + "description": "abortTransaction includes an API version", "runOnRequirements": [ { "topologies": [ "replicaset", - "sharded-replicaset" + "sharded-replicaset", + "load-balanced" ] } ], @@ -480,14 +313,12 @@ "lsid": { "$$sessionLsid": "session" }, - "apiVersion": { - "$$exists": false - }, + "apiVersion": "1", "apiStrict": { - "$$exists": false + "$$unsetOrMatches": false }, "apiDeprecationErrors": { - "$$exists": false + "$$unsetOrMatches": false } } } @@ -499,14 +330,12 @@ "lsid": { "$$sessionLsid": "session" }, - "apiVersion": { - "$$exists": false - }, + "apiVersion": "1", "apiStrict": { - "$$exists": false + "$$unsetOrMatches": false }, "apiDeprecationErrors": { - "$$exists": false + "$$unsetOrMatches": false } } } From 2a74601572bbb77bc8a1c94a842082ca4739b86d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 18 May 2021 10:20:36 -0700 Subject: [PATCH 0349/2111] PYTHON-2676 Unified Test Runner changes in preparation for Load Balancer Support (#623) Resync crud, change stream, SDAM, server_selection, transactions, uri-options, tests. PYTHON-2348 Correctly express lack of event assertions in change stream tests. --- test/__init__.py | 2 + .../legacy/change-streams-errors.json | 8 +- ...n => change-streams-resume-allowlist.json} | 9 +- .../change-streams-resume-errorLabels.json | 60 ++- .../change_streams/legacy/change-streams.json | 2 +- test/crud/unified/aggregate-merge.json | 497 ++++++++++++++++++ .../unified/aggregate-out-readConcern.json | 406 ++++++++++++++ test/crud/unified/aggregate.json | 166 ++++++ .../bulkWrite-arrayFilters-clientError.json | 103 ++-- test/crud/unified/bulkWrite-arrayFilters.json | 279 ++++++++++ .../bulkWrite-delete-hint-clientError.json | 119 +++-- .../bulkWrite-delete-hint-serverError.json | 252 +++++++++ test/crud/unified/bulkWrite-delete-hint.json | 247 +++++++++ .../bulkWrite-update-hint-clientError.json | 151 ++++-- .../bulkWrite-update-hint-serverError.json | 422 +++++++++++++++ test/crud/unified/bulkWrite-update-hint.json | 445 ++++++++++++++++ .../bulkWrite-update-validation.json | 121 +++-- test/crud/{v2 => unified}/db-aggregate.json | 40 +- .../unified/deleteMany-hint-clientError.json | 149 ++++++ .../unified/deleteMany-hint-serverError.json | 190 +++++++ test/crud/unified/deleteMany-hint.json | 173 ++++++ .../unified/deleteOne-hint-clientError.json | 133 +++++ .../unified/deleteOne-hint-serverError.json | 170 ++++++ test/crud/unified/deleteOne-hint.json | 161 ++++++ .../find-allowdiskuse-clientError.json | 79 +++ .../find-allowdiskuse-serverError.json | 100 ++++ test/crud/unified/find-allowdiskuse.json | 120 +++++ test/crud/unified/find.json | 156 ++++++ .../findOneAndDelete-hint-clientError.json | 133 +++++ .../findOneAndDelete-hint-serverError.json | 162 ++++++ test/crud/unified/findOneAndDelete-hint.json | 155 ++++++ .../findOneAndReplace-hint-clientError.json | 139 +++++ .../findOneAndReplace-hint-serverError.json | 172 ++++++ test/crud/unified/findOneAndReplace-hint.json | 173 ++++++ .../findOneAndUpdate-hint-clientError.json | 143 +++++ .../findOneAndUpdate-hint-serverError.json | 180 +++++++ test/crud/unified/findOneAndUpdate-hint.json | 181 +++++++ test/crud/unified/replaceOne-hint.json | 203 +++++++ test/crud/unified/replaceOne-validation.json | 82 +++ ...ged-bulkWrite-delete-hint-clientError.json | 132 +++-- ...ged-bulkWrite-update-hint-clientError.json | 169 +++--- ...nowledged-deleteMany-hint-clientError.json | 149 ++++++ ...knowledged-deleteOne-hint-clientError.json | 133 +++++ ...ged-findOneAndDelete-hint-clientError.json | 133 +++++ ...ed-findOneAndReplace-hint-clientError.json | 139 +++++ ...ged-findOneAndUpdate-hint-clientError.json | 143 +++++ ...nowledged-replaceOne-hint-clientError.json | 143 +++++ ...nowledged-updateMany-hint-clientError.json | 159 ++++++ ...knowledged-updateOne-hint-clientError.json | 147 ++++++ .../updateMany-hint-clientError.json | 95 +++- .../unified/updateMany-hint-serverError.json | 216 ++++++++ test/crud/unified/updateMany-hint.json | 219 ++++++++ test/crud/unified/updateMany-validation.json | 98 ++++ .../unified/updateOne-hint-clientError.json | 147 ++++++ .../unified/updateOne-hint-serverError.json | 208 ++++++++ test/crud/unified/updateOne-hint.json | 211 ++++++++ test/crud/unified/updateOne-validation.json | 80 +++ test/crud/unified/updateWithPipelines.json | 494 +++++++++++++++++ test/crud/v2/aggregate-merge.json | 415 --------------- test/crud/v2/aggregate-out-readConcern.json | 385 -------------- test/crud/v2/bulkWrite-arrayFilters.json | 226 -------- .../v2/bulkWrite-delete-hint-serverError.json | 209 -------- test/crud/v2/bulkWrite-delete-hint.json | 204 ------- .../v2/bulkWrite-update-hint-serverError.json | 343 ------------ test/crud/v2/bulkWrite-update-hint.json | 366 ------------- test/crud/v2/deleteMany-hint-clientError.json | 100 ---- test/crud/v2/deleteMany-hint-serverError.json | 141 ----- test/crud/v2/deleteMany-hint.json | 128 ----- test/crud/v2/deleteOne-hint-clientError.json | 84 --- test/crud/v2/deleteOne-hint-serverError.json | 121 ----- test/crud/v2/deleteOne-hint.json | 116 ---- .../v2/find-allowdiskuse-clientError.json | 40 -- .../v2/find-allowdiskuse-serverError.json | 61 --- test/crud/v2/find-allowdiskuse.json | 78 --- .../v2/findOneAndDelete-hint-clientError.json | 84 --- .../v2/findOneAndDelete-hint-serverError.json | 113 ---- test/crud/v2/findOneAndDelete-hint.json | 110 ---- .../findOneAndReplace-hint-clientError.json | 90 ---- .../findOneAndReplace-hint-serverError.json | 123 ----- test/crud/v2/findOneAndReplace-hint.json | 128 ----- .../v2/findOneAndUpdate-hint-clientError.json | 94 ---- .../v2/findOneAndUpdate-hint-serverError.json | 131 ----- test/crud/v2/findOneAndUpdate-hint.json | 136 ----- test/crud/v2/replaceOne-hint.json | 146 ----- test/crud/v2/replaceOne-validation.json | 41 -- ...nowledged-deleteMany-hint-clientError.json | 105 ---- ...knowledged-deleteOne-hint-clientError.json | 89 ---- ...ged-findOneAndDelete-hint-clientError.json | 89 ---- ...ed-findOneAndReplace-hint-clientError.json | 95 ---- ...ged-findOneAndUpdate-hint-clientError.json | 99 ---- ...nowledged-replaceOne-hint-clientError.json | 99 ---- ...nowledged-updateMany-hint-clientError.json | 115 ---- ...knowledged-updateOne-hint-clientError.json | 103 ---- test/crud/v2/updateMany-hint-serverError.json | 161 ------ test/crud/v2/updateMany-hint.json | 168 ------ test/crud/v2/updateMany-validation.json | 57 -- test/crud/v2/updateOne-hint-clientError.json | 98 ---- test/crud/v2/updateOne-hint-serverError.json | 147 ------ test/crud/v2/updateOne-hint.json | 154 ------ test/crud/v2/updateOne-validation.json | 39 -- test/crud/v2/updateWithPipelines.json | 408 -------------- .../rs/ghost_discovered.json | 35 -- .../rs/rsother_discovered.json | 64 --- .../single/unavailable_seed.json | 25 - .../aggregate-serverErrors.json | 13 +- test/retryable_reads/aggregate.json | 3 +- ...angeStreams-client.watch-serverErrors.json | 16 +- .../changeStreams-client.watch.json | 6 +- ...ngeStreams-db.coll.watch-serverErrors.json | 16 +- .../changeStreams-db.coll.watch.json | 6 +- .../changeStreams-db.watch-serverErrors.json | 16 +- .../changeStreams-db.watch.json | 6 +- test/retryable_reads/count-serverErrors.json | 13 +- test/retryable_reads/count.json | 3 +- .../countDocuments-serverErrors.json | 13 +- test/retryable_reads/countDocuments.json | 3 +- .../distinct-serverErrors.json | 13 +- test/retryable_reads/distinct.json | 3 +- ...timatedDocumentCount-serverErrors-4.9.json | 10 +- ...atedDocumentCount-serverErrors-pre4.9.json | 10 +- test/retryable_reads/find-serverErrors.json | 13 +- test/retryable_reads/find.json | 3 +- .../retryable_reads/findOne-serverErrors.json | 13 +- test/retryable_reads/findOne.json | 3 +- .../gridfs-download-serverErrors.json | 13 +- test/retryable_reads/gridfs-download.json | 3 +- .../gridfs-downloadByName-serverErrors.json | 13 +- .../gridfs-downloadByName.json | 3 +- .../listCollectionNames-serverErrors.json | 13 +- test/retryable_reads/listCollectionNames.json | 3 +- .../listCollectionObjects-serverErrors.json | 13 +- .../listCollectionObjects.json | 3 +- .../listCollections-serverErrors.json | 13 +- test/retryable_reads/listCollections.json | 3 +- .../listDatabaseNames-serverErrors.json | 13 +- test/retryable_reads/listDatabaseNames.json | 3 +- .../listDatabaseObjects-serverErrors.json | 13 +- test/retryable_reads/listDatabaseObjects.json | 3 +- .../listDatabases-serverErrors.json | 13 +- test/retryable_reads/listDatabases.json | 3 +- .../listIndexNames-serverErrors.json | 13 +- test/retryable_reads/listIndexNames.json | 3 +- .../listIndexes-serverErrors.json | 13 +- test/retryable_reads/listIndexes.json | 3 +- test/retryable_reads/mapReduce.json | 3 +- .../bulkWrite-errorLabels.json | 3 +- .../bulkWrite-serverErrors.json | 3 +- test/retryable_writes/deleteMany.json | 3 +- .../deleteOne-errorLabels.json | 3 +- .../deleteOne-serverErrors.json | 3 +- .../findOneAndDelete-errorLabels.json | 3 +- .../findOneAndDelete-serverErrors.json | 3 +- .../findOneAndReplace-errorLabels.json | 3 +- .../findOneAndReplace-serverErrors.json | 3 +- .../findOneAndUpdate-errorLabels.json | 3 +- .../findOneAndUpdate-serverErrors.json | 3 +- .../insertMany-errorLabels.json | 3 +- .../insertMany-serverErrors.json | 3 +- .../insertOne-errorLabels.json | 3 +- .../insertOne-serverErrors.json | 9 +- .../replaceOne-errorLabels.json | 3 +- .../replaceOne-serverErrors.json | 3 +- test/retryable_writes/updateMany.json | 3 +- .../updateOne-errorLabels.json | 3 +- .../updateOne-serverErrors.json | 3 +- test/server_selection/rtt/first_value.json | 6 +- .../rtt/first_value_zero.json | 6 +- test/server_selection/rtt/value_test_1.json | 6 +- test/server_selection/rtt/value_test_2.json | 6 +- test/server_selection/rtt/value_test_3.json | 6 +- test/server_selection/rtt/value_test_4.json | 6 +- test/server_selection/rtt/value_test_5.json | 6 +- .../ReplicaSetNoPrimary/read/Nearest.json | 110 ++-- .../read/Nearest_multiple.json | 126 ++--- .../read/Nearest_non_matching.json | 64 +-- .../read/PossiblePrimary.json | 21 + .../read/PossiblePrimaryNearest.json | 21 + .../ReplicaSetNoPrimary/read/Primary.json | 57 +- .../read/PrimaryPreferred.json | 106 ++-- .../read/PrimaryPreferred_non_matching.json | 64 +-- .../ReplicaSetNoPrimary/read/Secondary.json | 110 ++-- .../read/SecondaryPreferred.json | 110 ++-- .../read/SecondaryPreferred_non_matching.json | 64 +-- .../read/Secondary_multi_tags.json | 110 ++-- .../read/Secondary_multi_tags2.json | 110 ++-- .../read/Secondary_non_matching.json | 64 +-- .../write/SecondaryPreferred.json | 64 +-- .../ReplicaSetWithPrimary/read/Nearest.json | 142 ++--- .../read/Nearest_multiple.json | 158 +++--- .../read/Nearest_non_matching.json | 80 +-- .../ReplicaSetWithPrimary/read/Primary.json | 103 ++-- .../read/PrimaryPreferred.json | 106 ++-- .../read/PrimaryPreferred_non_matching.json | 110 ++-- .../ReplicaSetWithPrimary/read/Secondary.json | 126 ++--- .../read/SecondaryPreferred.json | 126 ++--- .../read/SecondaryPreferred_non_matching.json | 110 ++-- .../read/SecondaryPreferred_tags.json | 94 ++-- .../read/Secondary_non_matching.json | 80 +-- .../write/SecondaryPreferred.json | 110 ++-- .../Sharded/read/Nearest.json | 45 ++ .../Sharded/read/Primary.json | 40 ++ .../Sharded/read/PrimaryPreferred.json | 45 ++ .../Sharded/read/Secondary.json | 45 ++ .../Sharded/read/SecondaryPreferred.json | 99 ++-- .../Sharded/write/Nearest.json | 45 ++ .../Sharded/write/Primary.json | 40 ++ .../Sharded/write/PrimaryPreferred.json | 45 ++ .../Sharded/write/Secondary.json | 45 ++ .../Sharded/write/SecondaryPreferred.json | 99 ++-- .../Single/read/SecondaryPreferred.json | 80 +-- .../Single/write/SecondaryPreferred.json | 80 +-- .../Unknown/read/SecondaryPreferred.json | 30 +- .../Unknown/write/SecondaryPreferred.json | 30 +- test/test_change_stream.py | 4 +- test/test_transactions.py | 7 - ...rud_v2.py => test_transactions_unified.py} | 30 +- .../commit-retry.json | 2 +- test/transactions/legacy/error-labels.json | 5 +- test/transactions/legacy/mongos-pin-auto.json | 3 +- .../legacy/mongos-recovery-token.json | 6 +- test/transactions/legacy/pin-mongos.json | 6 +- test/transactions/legacy/retryable-abort.json | 6 +- .../transactions/legacy/retryable-commit.json | 6 +- test/transactions/unified/mongos-unpin.json | 12 +- test/unified_format.py | 50 +- test/uri_options/connection-options.json | 70 +++ test/uri_options/connection-pool-options.json | 17 +- test/uri_options/read-preference-options.json | 2 +- test/utils.py | 3 +- test/utils_selection_tests.py | 2 +- 230 files changed, 11621 insertions(+), 8438 deletions(-) rename test/change_streams/legacy/{change-streams-resume-whitelist.json => change-streams-resume-allowlist.json} (99%) create mode 100644 test/crud/unified/aggregate-merge.json create mode 100644 test/crud/unified/aggregate-out-readConcern.json create mode 100644 test/crud/unified/aggregate.json rename test/crud/{v2 => unified}/bulkWrite-arrayFilters-clientError.json (53%) create mode 100644 test/crud/unified/bulkWrite-arrayFilters.json rename test/crud/{v2 => unified}/bulkWrite-delete-hint-clientError.json (54%) create mode 100644 test/crud/unified/bulkWrite-delete-hint-serverError.json create mode 100644 test/crud/unified/bulkWrite-delete-hint.json rename test/crud/{v2 => unified}/bulkWrite-update-hint-clientError.json (63%) create mode 100644 test/crud/unified/bulkWrite-update-hint-serverError.json create mode 100644 test/crud/unified/bulkWrite-update-hint.json rename test/crud/{v2 => unified}/bulkWrite-update-validation.json (54%) rename test/crud/{v2 => unified}/db-aggregate.json (68%) create mode 100644 test/crud/unified/deleteMany-hint-clientError.json create mode 100644 test/crud/unified/deleteMany-hint-serverError.json create mode 100644 test/crud/unified/deleteMany-hint.json create mode 100644 test/crud/unified/deleteOne-hint-clientError.json create mode 100644 test/crud/unified/deleteOne-hint-serverError.json create mode 100644 test/crud/unified/deleteOne-hint.json create mode 100644 test/crud/unified/find-allowdiskuse-clientError.json create mode 100644 test/crud/unified/find-allowdiskuse-serverError.json create mode 100644 test/crud/unified/find-allowdiskuse.json create mode 100644 test/crud/unified/find.json create mode 100644 test/crud/unified/findOneAndDelete-hint-clientError.json create mode 100644 test/crud/unified/findOneAndDelete-hint-serverError.json create mode 100644 test/crud/unified/findOneAndDelete-hint.json create mode 100644 test/crud/unified/findOneAndReplace-hint-clientError.json create mode 100644 test/crud/unified/findOneAndReplace-hint-serverError.json create mode 100644 test/crud/unified/findOneAndReplace-hint.json create mode 100644 test/crud/unified/findOneAndUpdate-hint-clientError.json create mode 100644 test/crud/unified/findOneAndUpdate-hint-serverError.json create mode 100644 test/crud/unified/findOneAndUpdate-hint.json create mode 100644 test/crud/unified/replaceOne-hint.json create mode 100644 test/crud/unified/replaceOne-validation.json rename test/crud/{v2 => unified}/unacknowledged-bulkWrite-delete-hint-clientError.json (53%) rename test/crud/{v2 => unified}/unacknowledged-bulkWrite-update-hint-clientError.json (62%) create mode 100644 test/crud/unified/unacknowledged-deleteMany-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-deleteOne-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-replaceOne-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-updateMany-hint-clientError.json create mode 100644 test/crud/unified/unacknowledged-updateOne-hint-clientError.json rename test/crud/{v2 => unified}/updateMany-hint-clientError.json (50%) create mode 100644 test/crud/unified/updateMany-hint-serverError.json create mode 100644 test/crud/unified/updateMany-hint.json create mode 100644 test/crud/unified/updateMany-validation.json create mode 100644 test/crud/unified/updateOne-hint-clientError.json create mode 100644 test/crud/unified/updateOne-hint-serverError.json create mode 100644 test/crud/unified/updateOne-hint.json create mode 100644 test/crud/unified/updateOne-validation.json create mode 100644 test/crud/unified/updateWithPipelines.json delete mode 100644 test/crud/v2/aggregate-merge.json delete mode 100644 test/crud/v2/aggregate-out-readConcern.json delete mode 100644 test/crud/v2/bulkWrite-arrayFilters.json delete mode 100644 test/crud/v2/bulkWrite-delete-hint-serverError.json delete mode 100644 test/crud/v2/bulkWrite-delete-hint.json delete mode 100644 test/crud/v2/bulkWrite-update-hint-serverError.json delete mode 100644 test/crud/v2/bulkWrite-update-hint.json delete mode 100644 test/crud/v2/deleteMany-hint-clientError.json delete mode 100644 test/crud/v2/deleteMany-hint-serverError.json delete mode 100644 test/crud/v2/deleteMany-hint.json delete mode 100644 test/crud/v2/deleteOne-hint-clientError.json delete mode 100644 test/crud/v2/deleteOne-hint-serverError.json delete mode 100644 test/crud/v2/deleteOne-hint.json delete mode 100644 test/crud/v2/find-allowdiskuse-clientError.json delete mode 100644 test/crud/v2/find-allowdiskuse-serverError.json delete mode 100644 test/crud/v2/find-allowdiskuse.json delete mode 100644 test/crud/v2/findOneAndDelete-hint-clientError.json delete mode 100644 test/crud/v2/findOneAndDelete-hint-serverError.json delete mode 100644 test/crud/v2/findOneAndDelete-hint.json delete mode 100644 test/crud/v2/findOneAndReplace-hint-clientError.json delete mode 100644 test/crud/v2/findOneAndReplace-hint-serverError.json delete mode 100644 test/crud/v2/findOneAndReplace-hint.json delete mode 100644 test/crud/v2/findOneAndUpdate-hint-clientError.json delete mode 100644 test/crud/v2/findOneAndUpdate-hint-serverError.json delete mode 100644 test/crud/v2/findOneAndUpdate-hint.json delete mode 100644 test/crud/v2/replaceOne-hint.json delete mode 100644 test/crud/v2/replaceOne-validation.json delete mode 100644 test/crud/v2/unacknowledged-deleteMany-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-deleteOne-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-replaceOne-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-updateMany-hint-clientError.json delete mode 100644 test/crud/v2/unacknowledged-updateOne-hint-clientError.json delete mode 100644 test/crud/v2/updateMany-hint-serverError.json delete mode 100644 test/crud/v2/updateMany-hint.json delete mode 100644 test/crud/v2/updateMany-validation.json delete mode 100644 test/crud/v2/updateOne-hint-clientError.json delete mode 100644 test/crud/v2/updateOne-hint-serverError.json delete mode 100644 test/crud/v2/updateOne-hint.json delete mode 100644 test/crud/v2/updateOne-validation.json delete mode 100644 test/crud/v2/updateWithPipelines.json delete mode 100644 test/discovery_and_monitoring/rs/ghost_discovered.json delete mode 100644 test/discovery_and_monitoring/rs/rsother_discovered.json delete mode 100644 test/discovery_and_monitoring/single/unavailable_seed.json create mode 100644 test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json create mode 100644 test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json create mode 100644 test/server_selection/server_selection/Sharded/read/Nearest.json create mode 100644 test/server_selection/server_selection/Sharded/read/Primary.json create mode 100644 test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json create mode 100644 test/server_selection/server_selection/Sharded/read/Secondary.json create mode 100644 test/server_selection/server_selection/Sharded/write/Nearest.json create mode 100644 test/server_selection/server_selection/Sharded/write/Primary.json create mode 100644 test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json create mode 100644 test/server_selection/server_selection/Sharded/write/Secondary.json rename test/{test_crud_v2.py => test_transactions_unified.py} (54%) diff --git a/test/__init__.py b/test/__init__.py index 49075a2d26..83dac398e4 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -632,6 +632,8 @@ def check_auth_with_sharding(self, func): func=func) def is_topology_type(self, topologies): + if 'load-balanced' in topologies and self.load_balancer: + return True if 'single' in topologies and not (self.is_mongos or self.is_rs): return True if 'replicaset' in topologies and self.is_rs: diff --git a/test/change_streams/legacy/change-streams-errors.json b/test/change_streams/legacy/change-streams-errors.json index 19cbc74288..7b3fa80689 100644 --- a/test/change_streams/legacy/change-streams-errors.json +++ b/test/change_streams/legacy/change-streams-errors.json @@ -14,7 +14,7 @@ "changeStreamPipeline": [], "changeStreamOptions": {}, "operations": [], - "expectations": [], + "expectations": null, "result": { "error": { "code": 40573 @@ -78,7 +78,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [ { @@ -125,7 +126,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, diff --git a/test/change_streams/legacy/change-streams-resume-whitelist.json b/test/change_streams/legacy/change-streams-resume-allowlist.json similarity index 99% rename from test/change_streams/legacy/change-streams-resume-whitelist.json rename to test/change_streams/legacy/change-streams-resume-allowlist.json index 39f883ee5e..baffc8fba9 100644 --- a/test/change_streams/legacy/change-streams-resume-whitelist.json +++ b/test/change_streams/legacy/change-streams-resume-allowlist.json @@ -20,7 +20,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -777,7 +778,7 @@ } }, { - "description": "change stream resumes after NotMaster", + "description": "change stream resumes after NotWritablePrimary", "minServerVersion": "4.2", "maxServerVersion": "4.2.99", "failPoint": { @@ -1068,7 +1069,7 @@ } }, { - "description": "change stream resumes after NotMasterNoSlaveOk", + "description": "change stream resumes after NotPrimaryNoSecondaryOk", "minServerVersion": "4.2", "maxServerVersion": "4.2.99", "failPoint": { @@ -1165,7 +1166,7 @@ } }, { - "description": "change stream resumes after NotMasterOrSecondary", + "description": "change stream resumes after NotPrimaryOrSecondary", "minServerVersion": "4.2", "maxServerVersion": "4.2.99", "failPoint": { diff --git a/test/change_streams/legacy/change-streams-resume-errorLabels.json b/test/change_streams/legacy/change-streams-resume-errorLabels.json index cf8957b21f..2bac61d3b1 100644 --- a/test/change_streams/legacy/change-streams-resume-errorLabels.json +++ b/test/change_streams/legacy/change-streams-resume-errorLabels.json @@ -18,7 +18,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -111,7 +112,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -204,7 +206,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -297,7 +300,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -390,7 +394,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -483,7 +488,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -576,7 +582,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -654,7 +661,7 @@ } }, { - "description": "change stream resumes after NotMaster", + "description": "change stream resumes after NotWritablePrimary", "minServerVersion": "4.3.1", "failPoint": { "configureFailPoint": "failGetMoreAfterCursorCheckout", @@ -669,7 +676,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -762,7 +770,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -855,7 +864,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -933,7 +943,7 @@ } }, { - "description": "change stream resumes after NotMasterNoSlaveOk", + "description": "change stream resumes after NotPrimaryNoSecondaryOk", "minServerVersion": "4.3.1", "failPoint": { "configureFailPoint": "failGetMoreAfterCursorCheckout", @@ -948,7 +958,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1026,7 +1037,7 @@ } }, { - "description": "change stream resumes after NotMasterOrSecondary", + "description": "change stream resumes after NotPrimaryOrSecondary", "minServerVersion": "4.3.1", "failPoint": { "configureFailPoint": "failGetMoreAfterCursorCheckout", @@ -1041,7 +1052,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1134,7 +1146,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1227,7 +1240,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1320,7 +1334,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1413,7 +1428,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1512,7 +1528,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, @@ -1608,7 +1625,8 @@ "target": "collection", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ], "changeStreamPipeline": [], "changeStreamOptions": {}, diff --git a/test/change_streams/legacy/change-streams.json b/test/change_streams/legacy/change-streams.json index 4aeb2c7f70..54b76af0a3 100644 --- a/test/change_streams/legacy/change-streams.json +++ b/test/change_streams/legacy/change-streams.json @@ -82,7 +82,7 @@ } } ], - "expectations": [], + "expectations": null, "result": { "success": [ { diff --git a/test/crud/unified/aggregate-merge.json b/test/crud/unified/aggregate-merge.json new file mode 100644 index 0000000000..c34e93a699 --- /dev/null +++ b/test/crud/unified/aggregate-merge.json @@ -0,0 +1,497 @@ +{ + "description": "aggregate-merge", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_aggregate_merge" + } + }, + { + "collection": { + "id": "collection_readConcern_majority", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_local", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_available", + "database": "database0", + "collectionName": "test_aggregate_merge", + "collectionOptions": { + "readConcern": { + "level": "available" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test_aggregate_merge", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and batch size of 0", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "batchSize": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "cursor": {} + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and majority readConcern", + "operations": [ + { + "object": "collection_readConcern_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and local readConcern", + "operations": [ + { + "object": "collection_readConcern_local", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "local" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge and available readConcern", + "operations": [ + { + "object": "collection_readConcern_available", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_merge", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_test_collection" + } + } + ], + "readConcern": { + "level": "available" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-out-readConcern.json b/test/crud/unified/aggregate-out-readConcern.json new file mode 100644 index 0000000000..ae1beedde5 --- /dev/null +++ b/test/crud/unified/aggregate-out-readConcern.json @@ -0,0 +1,406 @@ +{ + "description": "aggregate-out-readConcern", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.1.0", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern" + } + }, + { + "collection": { + "id": "collection_readConcern_majority", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_local", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_available", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "available" + } + } + } + }, + { + "collection": { + "id": "collection_readConcern_linearizable", + "database": "database0", + "collectionName": "test_aggregate_out_readconcern", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test_aggregate_out_readconcern", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "readConcern majority with out stage", + "operations": [ + { + "object": "collection_readConcern_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern local with out stage", + "operations": [ + { + "object": "collection_readConcern_local", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "local" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern available with out stage", + "operations": [ + { + "object": "collection_readConcern_available", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "available" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "readConcern linearizable with out stage", + "operations": [ + { + "object": "collection_readConcern_linearizable", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test_aggregate_out_readconcern", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "readConcern": { + "level": "linearizable" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json new file mode 100644 index 0000000000..dcdad761e8 --- /dev/null +++ b/test/crud/unified/aggregate.json @@ -0,0 +1,166 @@ +{ + "description": "aggregate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "aggregate-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "aggregate-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "aggregate with multiple batches works", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2 + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/bulkWrite-arrayFilters-clientError.json b/test/crud/unified/bulkWrite-arrayFilters-clientError.json similarity index 53% rename from test/crud/v2/bulkWrite-arrayFilters-clientError.json rename to test/crud/unified/bulkWrite-arrayFilters-clientError.json index 22e22f0efb..6073890dd3 100644 --- a/test/crud/v2/bulkWrite-arrayFilters-clientError.json +++ b/test/crud/unified/bulkWrite-arrayFilters-clientError.json @@ -1,29 +1,61 @@ { - "runOn": [ + "description": "bulkWrite-arrayFilters-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ { "maxServerVersion": "3.5.5" } ], - "data": [ + "createEntities": [ { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "y": [ + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ { - "b": 0 + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] }, { - "b": 1 + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] } ] } @@ -33,12 +65,12 @@ "description": "BulkWrite on server that doesn't support arrayFilters", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": {}, "update": { "$set": { @@ -53,25 +85,30 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [] + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] }, { "description": "BulkWrite on server that doesn't support arrayFilters with arrayFilters on second op", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": {}, "update": { "$set": { @@ -81,8 +118,7 @@ } }, { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": {}, "update": { "$set": { @@ -97,14 +133,19 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [] + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] } ] } diff --git a/test/crud/unified/bulkWrite-arrayFilters.json b/test/crud/unified/bulkWrite-arrayFilters.json new file mode 100644 index 0000000000..4d7b4b7947 --- /dev/null +++ b/test/crud/unified/bulkWrite-arrayFilters.json @@ -0,0 +1,279 @@ +{ + "description": "bulkWrite-arrayFilters", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": { + "$set": { + "y.$[i].b": 2 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/bulkWrite-delete-hint-clientError.json b/test/crud/unified/bulkWrite-delete-hint-clientError.json similarity index 54% rename from test/crud/v2/bulkWrite-delete-hint-clientError.json rename to test/crud/unified/bulkWrite-delete-hint-clientError.json index cfeac904ca..c55067be22 100644 --- a/test/crud/v2/bulkWrite-delete-hint-clientError.json +++ b/test/crud/unified/bulkWrite-delete-hint-clientError.json @@ -1,39 +1,70 @@ { - "runOn": [ + "description": "bulkWrite-delete-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ { "maxServerVersion": "3.3.99" } ], - "data": [ + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } }, { - "_id": 3, - "x": 33 - }, + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ { - "_id": 4, - "x": 44 + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] } ], - "collection_name": "BulkWrite_delete_hint", "tests": [ { "description": "BulkWrite deleteOne with hints unsupported (client-side error)", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 }, @@ -41,8 +72,7 @@ } }, { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 2 }, @@ -52,17 +82,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -81,18 +118,18 @@ } ] } - } + ] }, { "description": "BulkWrite deleteMany with hints unsupported (client-side error)", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "deleteMany", - "arguments": { + "deleteMany": { "filter": { "_id": { "$lt": 3 @@ -102,8 +139,7 @@ } }, { - "name": "deleteMany", - "arguments": { + "deleteMany": { "filter": { "_id": { "$gte": 4 @@ -115,17 +151,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -144,7 +187,7 @@ } ] } - } + ] } ] } diff --git a/test/crud/unified/bulkWrite-delete-hint-serverError.json b/test/crud/unified/bulkWrite-delete-hint-serverError.json new file mode 100644 index 0000000000..30b9010ac3 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint-serverError.json @@ -0,0 +1,252 @@ +{ + "description": "bulkWrite-delete-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-delete-hint.json b/test/crud/unified/bulkWrite-delete-hint.json new file mode 100644 index 0000000000..31f3865323 --- /dev/null +++ b/test/crud/unified/bulkWrite-delete-hint.json @@ -0,0 +1,247 @@ +{ + "description": "bulkWrite-delete-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + }, + { + "q": { + "_id": 2 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite deleteMany with hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 3, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_delete_hint", + "deletes": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "hint": "_id_", + "limit": 0 + }, + { + "q": { + "_id": { + "$gte": 4 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/bulkWrite-update-hint-clientError.json b/test/crud/unified/bulkWrite-update-hint-clientError.json similarity index 63% rename from test/crud/v2/bulkWrite-update-hint-clientError.json rename to test/crud/unified/bulkWrite-update-hint-clientError.json index fa919ec515..68a92065aa 100644 --- a/test/crud/v2/bulkWrite-update-hint-clientError.json +++ b/test/crud/unified/bulkWrite-update-hint-clientError.json @@ -1,39 +1,70 @@ { - "runOn": [ + "description": "bulkWrite-update-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ { "maxServerVersion": "3.3.99" } ], - "data": [ + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } }, { - "_id": 3, - "x": 33 - }, + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ { - "_id": 4, - "x": 44 + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] } ], - "collection_name": "test_bulkwrite_update_hint", "tests": [ { "description": "BulkWrite updateOne with update hints unsupported (client-side error)", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -46,8 +77,7 @@ } }, { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -62,17 +92,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -91,18 +128,18 @@ } ] } - } + ] }, { "description": "BulkWrite updateMany with update hints unsupported (client-side error)", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": { "_id": { "$lt": 3 @@ -117,8 +154,7 @@ } }, { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": { "_id": { "$lt": 3 @@ -135,17 +171,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -164,18 +207,18 @@ } ] } - } + ] }, { "description": "BulkWrite replaceOne with update hints unsupported (client-side error)", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "replaceOne", - "arguments": { + "replaceOne": { "filter": { "_id": 3 }, @@ -186,8 +229,7 @@ } }, { - "name": "replaceOne", - "arguments": { + "replaceOne": { "filter": { "_id": 4 }, @@ -200,17 +242,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -229,7 +278,7 @@ } ] } - } + ] } ] } diff --git a/test/crud/unified/bulkWrite-update-hint-serverError.json b/test/crud/unified/bulkWrite-update-hint-serverError.json new file mode 100644 index 0000000000..2a9a6795ca --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint-serverError.json @@ -0,0 +1,422 @@ +{ + "description": "bulkWrite-update-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-hint.json b/test/crud/unified/bulkWrite-update-hint.json new file mode 100644 index 0000000000..b8445d80d4 --- /dev/null +++ b/test/crud/unified/bulkWrite-update-hint.json @@ -0,0 +1,445 @@ +{ + "description": "bulkWrite-update-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_bulkwrite_update_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite updateMany with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$lt": 3 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 4, + "modifiedCount": 4, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": { + "$lt": 3 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 13 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with update hints", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 333 + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 444 + }, + "hint": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_bulkwrite_update_hint", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "x": 333 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": "_id_" + }, + { + "q": { + "_id": 4 + }, + "u": { + "x": 444 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "_id": 1 + } + } + ], + "ordered": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 444 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/bulkWrite-update-validation.json b/test/crud/unified/bulkWrite-update-validation.json similarity index 54% rename from test/crud/v2/bulkWrite-update-validation.json rename to test/crud/unified/bulkWrite-update-validation.json index 481e13c45c..9ed7db5121 100644 --- a/test/crud/v2/bulkWrite-update-validation.json +++ b/test/crud/unified/bulkWrite-update-validation.json @@ -1,16 +1,48 @@ { - "data": [ + "description": "bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] } ], "tests": [ @@ -18,12 +50,12 @@ "description": "BulkWrite replaceOne prohibits atomic modifiers", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "replaceOne", - "arguments": { + "replaceOne": { "filter": { "_id": 1 }, @@ -36,13 +68,22 @@ } ] }, - "error": true + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -57,18 +98,18 @@ } ] } - } + ] }, { "description": "BulkWrite updateOne requires atomic modifiers", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -79,13 +120,22 @@ } ] }, - "error": true + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -100,18 +150,18 @@ } ] } - } + ] }, { "description": "BulkWrite updateMany requires atomic modifiers", "operations": [ { + "object": "collection0", "name": "bulkWrite", "arguments": { "requests": [ { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": { "_id": { "$gt": 1 @@ -124,13 +174,22 @@ } ] }, - "error": true + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -145,7 +204,7 @@ } ] } - } + ] } ] } diff --git a/test/crud/v2/db-aggregate.json b/test/crud/unified/db-aggregate.json similarity index 68% rename from test/crud/v2/db-aggregate.json rename to test/crud/unified/db-aggregate.json index d88b9e1819..5015405bfc 100644 --- a/test/crud/v2/db-aggregate.json +++ b/test/crud/unified/db-aggregate.json @@ -1,17 +1,43 @@ { - "runOn": [ + "description": "db-aggregate", + "schemaVersion": "1.4", + "runOnRequirements": [ { - "minServerVersion": "3.6.0" + "minServerVersion": "3.6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } } ], - "database_name": "admin", "tests": [ { "description": "Aggregate with $listLocalSessions", "operations": [ { + "object": "database0", "name": "aggregate", - "object": "database", "arguments": { "pipeline": [ { @@ -33,7 +59,7 @@ } ] }, - "result": [ + "expectResult": [ { "dummy": "dummy field" } @@ -45,8 +71,8 @@ "description": "Aggregate with $listLocalSessions and allowDiskUse", "operations": [ { + "object": "database0", "name": "aggregate", - "object": "database", "arguments": { "pipeline": [ { @@ -69,7 +95,7 @@ ], "allowDiskUse": true }, - "result": [ + "expectResult": [ { "dummy": "dummy field" } diff --git a/test/crud/unified/deleteMany-hint-clientError.json b/test/crud/unified/deleteMany-hint-clientError.json new file mode 100644 index 0000000000..285f567f02 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-clientError.json @@ -0,0 +1,149 @@ +{ + "description": "deleteMany-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-serverError.json b/test/crud/unified/deleteMany-hint-serverError.json new file mode 100644 index 0000000000..90bfb89fc7 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-serverError.json @@ -0,0 +1,190 @@ +{ + "description": "deleteMany-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint.json b/test/crud/unified/deleteMany-hint.json new file mode 100644 index 0000000000..b0cdc03048 --- /dev/null +++ b/test/crud/unified/deleteMany-hint.json @@ -0,0 +1,173 @@ +{ + "description": "deleteMany-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany with hint string", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "DeleteMany with hint document", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteMany_hint", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + }, + "limit": 0 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-clientError.json b/test/crud/unified/deleteOne-hint-clientError.json new file mode 100644 index 0000000000..b6b2932bdc --- /dev/null +++ b/test/crud/unified/deleteOne-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "deleteOne-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-serverError.json b/test/crud/unified/deleteOne-hint-serverError.json new file mode 100644 index 0000000000..8b1398c75d --- /dev/null +++ b/test/crud/unified/deleteOne-hint-serverError.json @@ -0,0 +1,170 @@ +{ + "description": "deleteOne-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint.json b/test/crud/unified/deleteOne-hint.json new file mode 100644 index 0000000000..9e3970a540 --- /dev/null +++ b/test/crud/unified/deleteOne-hint.json @@ -0,0 +1,161 @@ +{ + "description": "deleteOne-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint" + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": "_id_", + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "deleteOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "DeleteOne_hint", + "deletes": [ + { + "q": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "limit": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse-clientError.json b/test/crud/unified/find-allowdiskuse-clientError.json new file mode 100644 index 0000000000..a47fd8e254 --- /dev/null +++ b/test/crud/unified/find-allowdiskuse-clientError.json @@ -0,0 +1,79 @@ +{ + "description": "find-allowdiskuse-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "3.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse_clienterror" + } + } + ], + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 3.2 server", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 3.2 server", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse-serverError.json b/test/crud/unified/find-allowdiskuse-serverError.json new file mode 100644 index 0000000000..a7907ba254 --- /dev/null +++ b/test/crud/unified/find-allowdiskuse-serverError.json @@ -0,0 +1,100 @@ +{ + "description": "find-allowdiskuse-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse_servererror" + } + } + ], + "tests": [ + { + "description": "Find fails when allowDiskUse true is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": true + } + } + } + ] + } + ] + }, + { + "description": "Find fails when allowDiskUse false is specified against pre 4.4 server (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse_servererror", + "filter": {}, + "allowDiskUse": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-allowdiskuse.json b/test/crud/unified/find-allowdiskuse.json new file mode 100644 index 0000000000..8d4cf66bf8 --- /dev/null +++ b/test/crud/unified/find-allowdiskuse.json @@ -0,0 +1,120 @@ +{ + "description": "find-allowdiskuse", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_find_allowdiskuse" + } + } + ], + "tests": [ + { + "description": "Find does not send allowDiskuse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Find sends allowDiskuse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": false + } + } + } + ] + } + ] + }, + { + "description": "Find sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test_find_allowdiskuse", + "allowDiskUse": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find.json b/test/crud/unified/find.json new file mode 100644 index 0000000000..275d5d351a --- /dev/null +++ b/test/crud/unified/find.json @@ -0,0 +1,156 @@ +{ + "description": "find", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "find-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "find-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with multiple batches works", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2 + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "find-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "find-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2 + }, + "commandName": "getMore", + "databaseName": "find-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-clientError.json b/test/crud/unified/findOneAndDelete-hint-clientError.json new file mode 100644 index 0000000000..d04125edd2 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "findOneAndDelete-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-serverError.json b/test/crud/unified/findOneAndDelete-hint-serverError.json new file mode 100644 index 0000000000..23c01f48f1 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-serverError.json @@ -0,0 +1,162 @@ +{ + "description": "findOneAndDelete-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.3" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint.json b/test/crud/unified/findOneAndDelete-hint.json new file mode 100644 index 0000000000..0180010dc8 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint.json @@ -0,0 +1,155 @@ +{ + "description": "findOneAndDelete-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": "_id_", + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndDelete_hint", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + }, + "remove": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-clientError.json b/test/crud/unified/findOneAndReplace-hint-clientError.json new file mode 100644 index 0000000000..c483b23028 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-clientError.json @@ -0,0 +1,139 @@ +{ + "description": "findOneAndReplace-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-serverError.json b/test/crud/unified/findOneAndReplace-hint-serverError.json new file mode 100644 index 0000000000..e8f1c89360 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-serverError.json @@ -0,0 +1,172 @@ +{ + "description": "findOneAndReplace-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint.json b/test/crud/unified/findOneAndReplace-hint.json new file mode 100644 index 0000000000..13ac6a9c90 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint.json @@ -0,0 +1,173 @@ +{ + "description": "findOneAndReplace-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndReplace_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndReplace_hint", + "query": { + "_id": 1 + }, + "update": { + "x": 33 + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 33 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-clientError.json b/test/crud/unified/findOneAndUpdate-hint-clientError.json new file mode 100644 index 0000000000..dace72b0ad --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-clientError.json @@ -0,0 +1,143 @@ +{ + "description": "findOneAndUpdate-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-serverError.json b/test/crud/unified/findOneAndUpdate-hint-serverError.json new file mode 100644 index 0000000000..1413ced2e3 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-serverError.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndUpdate-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.3.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint.json b/test/crud/unified/findOneAndUpdate-hint.json new file mode 100644 index 0000000000..68cef18ef9 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint.json @@ -0,0 +1,181 @@ +{ + "description": "findOneAndUpdate-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndUpdate_hint" + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate with hint string", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate with hint document", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "findOneAndUpdate_hint", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "findOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint.json b/test/crud/unified/replaceOne-hint.json new file mode 100644 index 0000000000..edb1ceb7c9 --- /dev/null +++ b/test/crud/unified/replaceOne-hint.json @@ -0,0 +1,203 @@ +{ + "description": "replaceOne-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_replaceone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + ] + }, + { + "description": "ReplaceOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_replaceone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_replaceone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 111 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-validation.json b/test/crud/unified/replaceOne-validation.json new file mode 100644 index 0000000000..56e2de079c --- /dev/null +++ b/test/crud/unified/replaceOne-validation.json @@ -0,0 +1,82 @@ +{ + "description": "replaceOne-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne prohibits atomic modifiers", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json b/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json similarity index 53% rename from test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json rename to test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json index 46839db705..dca8108109 100644 --- a/test/crud/v2/unacknowledged-bulkWrite-delete-hint-clientError.json +++ b/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json @@ -1,39 +1,70 @@ { - "data": [ + "description": "unacknowledged-bulkWrite-delete-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } }, { - "_id": 3, - "x": 33 - }, + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_delete_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ { - "_id": 4, - "x": 44 + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] } ], - "collection_name": "BulkWrite_delete_hint", "tests": [ { "description": "Unacknowledged bulkWrite deleteOne with hints fails with client-side error", "operations": [ { + "object": "collection0", "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, "arguments": { "requests": [ { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 1 }, @@ -41,8 +72,7 @@ } }, { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 2 }, @@ -52,17 +82,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -81,23 +118,18 @@ } ] } - } + ] }, { "description": "Unacknowledged bulkWrite deleteMany with hints fails with client-side error", "operations": [ { + "object": "collection0", "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, "arguments": { "requests": [ { - "name": "deleteMany", - "arguments": { + "deleteMany": { "filter": { "_id": { "$lt": 3 @@ -107,8 +139,7 @@ } }, { - "name": "deleteMany", - "arguments": { + "deleteMany": { "filter": { "_id": { "$gte": 4 @@ -120,17 +151,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_delete_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -149,7 +187,7 @@ } ] } - } + ] } ] } diff --git a/test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json b/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json similarity index 62% rename from test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json rename to test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json index 4a41d76b35..22377b9ac1 100644 --- a/test/crud/v2/unacknowledged-bulkWrite-update-hint-clientError.json +++ b/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json @@ -1,39 +1,70 @@ { - "data": [ + "description": "unacknowledged-bulkWrite-update-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } }, { - "_id": 3, - "x": 33 - }, + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "Bulkwrite_update_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ { - "_id": 4, - "x": 44 + "collectionName": "Bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] } ], - "collection_name": "Bulkwrite_update_hint", "tests": [ { "description": "Unacknowledged bulkWrite updateOne with hints fails with client-side error", "operations": [ { + "object": "collection0", "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, "arguments": { "requests": [ { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -46,8 +77,7 @@ } }, { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -62,17 +92,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "Bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -91,23 +128,18 @@ } ] } - } + ] }, { "description": "Unacknowledged bulkWrite updateMany with hints fails with client-side error", "operations": [ { + "object": "collection0", "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, "arguments": { "requests": [ { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": { "_id": { "$lt": 3 @@ -122,8 +154,7 @@ } }, { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": { "_id": { "$lt": 3 @@ -140,17 +171,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "Bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -169,23 +207,18 @@ } ] } - } + ] }, { "description": "Unacknowledged bulkWrite replaceOne with hints fails with client-side error", "operations": [ { + "object": "collection0", "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, "arguments": { "requests": [ { - "name": "replaceOne", - "arguments": { + "replaceOne": { "filter": { "_id": 3 }, @@ -196,8 +229,7 @@ } }, { - "name": "replaceOne", - "arguments": { + "replaceOne": { "filter": { "_id": 4 }, @@ -210,17 +242,24 @@ } } ], - "options": { - "ordered": true - } + "ordered": true }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "Bulkwrite_update_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -239,7 +278,7 @@ } ] } - } + ] } ] } diff --git a/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json b/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json new file mode 100644 index 0000000000..21776eae80 --- /dev/null +++ b/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json @@ -0,0 +1,149 @@ +{ + "description": "unacknowledged-deleteMany-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteMany_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteMany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json b/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json new file mode 100644 index 0000000000..870c08339c --- /dev/null +++ b/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "unacknowledged-deleteOne-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "DeleteOne_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "DeleteOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json new file mode 100644 index 0000000000..a19cd77638 --- /dev/null +++ b/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json @@ -0,0 +1,133 @@ +{ + "description": "unacknowledged-findOneAndDelete-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "findOneAndDelete_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "findOneAndDelete_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json new file mode 100644 index 0000000000..c60bfdef17 --- /dev/null +++ b/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json @@ -0,0 +1,139 @@ +{ + "description": "unacknowledged-findOneAndReplace-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "FindOneAndReplace_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "FindOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "FindOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 33 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "FindOneAndReplace_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json new file mode 100644 index 0000000000..506510a3c9 --- /dev/null +++ b/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json @@ -0,0 +1,143 @@ +{ + "description": "unacknowledged-findOneAndUpdate-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "FindOneAndUpdate_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "FindOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "FindOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "FindOneAndUpdate_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json b/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json new file mode 100644 index 0000000000..b4f4bed5f9 --- /dev/null +++ b/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json @@ -0,0 +1,143 @@ +{ + "description": "unacknowledged-replaceOne-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "ReplaceOne_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "ReplaceOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged ReplaceOne with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "ReplaceOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Unacknowledged ReplaceOne with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "ReplaceOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-updateMany-hint-clientError.json b/test/crud/unified/unacknowledged-updateMany-hint-clientError.json new file mode 100644 index 0000000000..3087dc4dbc --- /dev/null +++ b/test/crud/unified/unacknowledged-updateMany-hint-clientError.json @@ -0,0 +1,159 @@ +{ + "description": "unacknowledged-updateMany-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "Updatemany_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "Updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "Updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "Updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-updateOne-hint-clientError.json b/test/crud/unified/unacknowledged-updateOne-hint-clientError.json new file mode 100644 index 0000000000..208703c26f --- /dev/null +++ b/test/crud/unified/unacknowledged-updateOne-hint-clientError.json @@ -0,0 +1,147 @@ +{ + "description": "unacknowledged-updateOne-hint-clientError", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "UpdateOne_hint", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "UpdateOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "UpdateOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "UpdateOne_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/updateMany-hint-clientError.json b/test/crud/unified/updateMany-hint-clientError.json similarity index 50% rename from test/crud/v2/updateMany-hint-clientError.json rename to test/crud/unified/updateMany-hint-clientError.json index 44ebddc53d..99c66c919e 100644 --- a/test/crud/v2/updateMany-hint-clientError.json +++ b/test/crud/unified/updateMany-hint-clientError.json @@ -1,30 +1,61 @@ { - "runOn": [ + "description": "updateMany-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ { "maxServerVersion": "3.3.99" } ], - "data": [ + "createEntities": [ { - "_id": 1, - "x": 11 + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } }, { - "_id": 3, - "x": 33 + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] } ], - "collection_name": "test_updatemany_hint", "tests": [ { "description": "UpdateMany with hint string unsupported (client-side error)", "operations": [ { - "object": "collection", + "object": "collection0", "name": "updateMany", "arguments": { "filter": { @@ -39,13 +70,22 @@ }, "hint": "_id_" }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -60,13 +100,13 @@ } ] } - } + ] }, { "description": "UpdateMany with hint document unsupported (client-side error)", "operations": [ { - "object": "collection", + "object": "collection0", "name": "updateMany", "arguments": { "filter": { @@ -83,13 +123,22 @@ "_id": 1 } }, - "error": true + "expectError": { + "isError": true + } } ], - "expectations": [], - "outcome": { - "collection": { - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ { "_id": 1, "x": 11 @@ -104,7 +153,7 @@ } ] } - } + ] } ] } diff --git a/test/crud/unified/updateMany-hint-serverError.json b/test/crud/unified/updateMany-hint-serverError.json new file mode 100644 index 0000000000..cc5ecfe26c --- /dev/null +++ b/test/crud/unified/updateMany-hint-serverError.json @@ -0,0 +1,216 @@ +{ + "description": "updateMany-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-hint.json b/test/crud/unified/updateMany-hint.json new file mode 100644 index 0000000000..e5f707fb5d --- /dev/null +++ b/test/crud/unified/updateMany-hint.json @@ -0,0 +1,219 @@ +{ + "description": "updateMany-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updatemany_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with hint string", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": "_id_", + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateMany with hint document", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updatemany_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "hint": { + "_id": 1 + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updatemany_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-validation.json b/test/crud/unified/updateMany-validation.json new file mode 100644 index 0000000000..45d0b77620 --- /dev/null +++ b/test/crud/unified/updateMany-validation.json @@ -0,0 +1,98 @@ +{ + "description": "updateMany-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne requires atomic modifiers", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-clientError.json b/test/crud/unified/updateOne-hint-clientError.json new file mode 100644 index 0000000000..8c0ddbd1d5 --- /dev/null +++ b/test/crud/unified/updateOne-hint-clientError.json @@ -0,0 +1,147 @@ +{ + "description": "updateOne-hint-clientError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "maxServerVersion": "3.3.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document unsupported (client-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-serverError.json b/test/crud/unified/updateOne-hint-serverError.json new file mode 100644 index 0000000000..d8a46da944 --- /dev/null +++ b/test/crud/unified/updateOne-hint-serverError.json @@ -0,0 +1,208 @@ +{ + "description": "updateOne-hint-serverError", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "3.4.0", + "maxServerVersion": "4.1.9" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document unsupported (server-side error)", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint.json b/test/crud/unified/updateOne-hint.json new file mode 100644 index 0000000000..9277c605f6 --- /dev/null +++ b/test/crud/unified/updateOne-hint.json @@ -0,0 +1,211 @@ +{ + "description": "updateOne-hint", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test_updateone_hint" + } + } + ], + "initialData": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with hint string", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_", + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "UpdateOne with hint document", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test_updateone_hint", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test_updateone_hint", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-validation.json b/test/crud/unified/updateOne-validation.json new file mode 100644 index 0000000000..8336efc0d1 --- /dev/null +++ b/test/crud/unified/updateOne-validation.json @@ -0,0 +1,80 @@ +{ + "description": "updateOne-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "crud-v2" + } + } + ], + "initialData": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne requires atomic modifiers", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "crud-v2", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateWithPipelines.json b/test/crud/unified/updateWithPipelines.json new file mode 100644 index 0000000000..12ae04665a --- /dev/null +++ b/test/crud/unified/updateWithPipelines.json @@ -0,0 +1,494 @@ +{ + "description": "updateWithPipelines", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + }, + { + "description": "UpdateMany using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate using pipelines", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "commandName": "findAndModify", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + }, + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + }, + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/v2/aggregate-merge.json b/test/crud/v2/aggregate-merge.json deleted file mode 100644 index c61736a0bb..0000000000 --- a/test/crud/v2/aggregate-merge.json +++ /dev/null @@ -1,415 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.11" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test_aggregate_merge", - "tests": [ - { - "description": "Aggregate with $merge", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and batch size of 0", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "batchSize": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "cursor": {} - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and majority readConcern", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "readConcern": { - "level": "majority" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and local readConcern", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "local" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "readConcern": { - "level": "local" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $merge and available readConcern", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "available" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_merge", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$merge": { - "into": "other_test_collection" - } - } - ], - "readConcern": { - "level": "available" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/aggregate-out-readConcern.json b/test/crud/v2/aggregate-out-readConcern.json deleted file mode 100644 index c39ee0e281..0000000000 --- a/test/crud/v2/aggregate-out-readConcern.json +++ /dev/null @@ -1,385 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test_aggregate_out_readconcern", - "tests": [ - { - "description": "readConcern majority with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "majority" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "readConcern local with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "local" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "local" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "readConcern available with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "available" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "available" - } - } - } - } - ], - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "readConcern linearizable with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "linearizable" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "linearizable" - } - } - } - } - ] - }, - { - "description": "invalid readConcern with out stage", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "collectionOptions": { - "readConcern": { - "level": "!invalid123" - } - }, - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test_aggregate_out_readconcern", - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "readConcern": { - "level": "!invalid123" - } - } - } - } - ] - } - ] -} diff --git a/test/crud/v2/bulkWrite-arrayFilters.json b/test/crud/v2/bulkWrite-arrayFilters.json deleted file mode 100644 index 2d3ce96de1..0000000000 --- a/test/crud/v2/bulkWrite-arrayFilters.json +++ /dev/null @@ -1,226 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.5.6" - } - ], - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ], - "collection_name": "test", - "database_name": "crud-tests", - "tests": [ - { - "description": "BulkWrite updateOne with arrayFilters", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 2 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ] - } - } - }, - { - "description": "BulkWrite updateMany with arrayFilters", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": { - "$set": { - "y.$[i].b": 2 - } - }, - "multi": true, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 2 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 2 - } - ] - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/bulkWrite-delete-hint-serverError.json b/test/crud/v2/bulkWrite-delete-hint-serverError.json deleted file mode 100644 index c68973b0f6..0000000000 --- a/test/crud/v2/bulkWrite-delete-hint-serverError.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.4.0", - "maxServerVersion": "4.3.3" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ], - "collection_name": "BulkWrite_delete_hint", - "tests": [ - { - "description": "BulkWrite deleteOne with hints unsupported (server-side error)", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 2 - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "BulkWrite_delete_hint", - "deletes": [ - { - "q": { - "_id": 1 - }, - "hint": "_id_", - "limit": 1 - }, - { - "q": { - "_id": 2 - }, - "hint": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite deleteMany with hints unsupported (server-side error)", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "hint": "_id_" - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gte": 4 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "BulkWrite_delete_hint", - "deletes": [ - { - "q": { - "_id": { - "$lt": 3 - } - }, - "hint": "_id_", - "limit": 0 - }, - { - "q": { - "_id": { - "$gte": 4 - } - }, - "hint": { - "_id": 1 - }, - "limit": 0 - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/bulkWrite-delete-hint.json b/test/crud/v2/bulkWrite-delete-hint.json deleted file mode 100644 index ece3238fc3..0000000000 --- a/test/crud/v2/bulkWrite-delete-hint.json +++ /dev/null @@ -1,204 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ], - "collection_name": "BulkWrite_delete_hint", - "tests": [ - { - "description": "BulkWrite deleteOne with hints", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 2 - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 2, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "BulkWrite_delete_hint", - "deletes": [ - { - "q": { - "_id": 1 - }, - "hint": "_id_", - "limit": 1 - }, - { - "q": { - "_id": 2 - }, - "hint": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite deleteMany with hints", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "hint": "_id_" - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gte": 4 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 3, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "BulkWrite_delete_hint", - "deletes": [ - { - "q": { - "_id": { - "$lt": 3 - } - }, - "hint": "_id_", - "limit": 0 - }, - { - "q": { - "_id": { - "$gte": 4 - } - }, - "hint": { - "_id": 1 - }, - "limit": 0 - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/bulkWrite-update-hint-serverError.json b/test/crud/v2/bulkWrite-update-hint-serverError.json deleted file mode 100644 index e8b96fffeb..0000000000 --- a/test/crud/v2/bulkWrite-update-hint-serverError.json +++ /dev/null @@ -1,343 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.4.0", - "maxServerVersion": "4.1.9" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ], - "collection_name": "test_bulkwrite_update_hint", - "tests": [ - { - "description": "BulkWrite updateOne with update hints unsupported (server-side error)", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_bulkwrite_update_hint", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite updateMany with update hints unsupported (server-side error)", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_bulkwrite_update_hint", - "updates": [ - { - "q": { - "_id": { - "$lt": 3 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": "_id_" - }, - { - "q": { - "_id": { - "$lt": 3 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": { - "_id": 1 - } - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite replaceOne with update hints unsupported (server-side error)", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 3 - }, - "replacement": { - "x": 333 - }, - "hint": "_id_" - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 444 - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_bulkwrite_update_hint", - "updates": [ - { - "q": { - "_id": 3 - }, - "u": { - "x": 333 - }, - "hint": "_id_" - }, - { - "q": { - "_id": 4 - }, - "u": { - "x": 444 - }, - "hint": { - "_id": 1 - } - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/bulkWrite-update-hint.json b/test/crud/v2/bulkWrite-update-hint.json deleted file mode 100644 index 15e169f76c..0000000000 --- a/test/crud/v2/bulkWrite-update-hint.json +++ /dev/null @@ -1,366 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ], - "collection_name": "test_bulkwrite_update_hint", - "tests": [ - { - "description": "BulkWrite updateOne with update hints", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_bulkwrite_update_hint", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 13 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite updateMany with update hints", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 4, - "modifiedCount": 4, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_bulkwrite_update_hint", - "updates": [ - { - "q": { - "_id": { - "$lt": 3 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": "_id_" - }, - { - "q": { - "_id": { - "$lt": 3 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": { - "_id": 1 - } - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 13 - }, - { - "_id": 2, - "x": 24 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite replaceOne with update hints", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 3 - }, - "replacement": { - "x": 333 - }, - "hint": "_id_" - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 444 - }, - "hint": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - }, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0, - "upsertedIds": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_bulkwrite_update_hint", - "updates": [ - { - "q": { - "_id": 3 - }, - "u": { - "x": 333 - }, - "hint": "_id_" - }, - { - "q": { - "_id": 4 - }, - "u": { - "x": 444 - }, - "hint": { - "_id": 1 - } - } - ], - "ordered": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 333 - }, - { - "_id": 4, - "x": 444 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/deleteMany-hint-clientError.json b/test/crud/v2/deleteMany-hint-clientError.json deleted file mode 100644 index 3a0d02566b..0000000000 --- a/test/crud/v2/deleteMany-hint-clientError.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "3.3.99" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "DeleteMany_hint", - "tests": [ - { - "description": "DeleteMany with hint string unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "DeleteMany with hint document unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/deleteMany-hint-serverError.json b/test/crud/v2/deleteMany-hint-serverError.json deleted file mode 100644 index 5829e86df8..0000000000 --- a/test/crud/v2/deleteMany-hint-serverError.json +++ /dev/null @@ -1,141 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.4.0", - "maxServerVersion": "4.3.3" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "DeleteMany_hint", - "tests": [ - { - "description": "DeleteMany with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteMany_hint", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_", - "limit": 0 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "DeleteMany with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteMany_hint", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - }, - "limit": 0 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/deleteMany-hint.json b/test/crud/v2/deleteMany-hint.json deleted file mode 100644 index 51ee386066..0000000000 --- a/test/crud/v2/deleteMany-hint.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "DeleteMany_hint", - "tests": [ - { - "description": "DeleteMany with hint string", - "operations": [ - { - "object": "collection", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_" - }, - "result": { - "deletedCount": 2 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteMany_hint", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_", - "limit": 0 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - }, - { - "description": "DeleteMany with hint document", - "operations": [ - { - "object": "collection", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "result": { - "deletedCount": 2 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteMany_hint", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - }, - "limit": 0 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/deleteOne-hint-clientError.json b/test/crud/v2/deleteOne-hint-clientError.json deleted file mode 100644 index 97f8ec4924..0000000000 --- a/test/crud/v2/deleteOne-hint-clientError.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "3.3.99" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "DeleteOne_hint", - "tests": [ - { - "description": "DeleteOne with hint string unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne with hint document unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/deleteOne-hint-serverError.json b/test/crud/v2/deleteOne-hint-serverError.json deleted file mode 100644 index 3cf9400a88..0000000000 --- a/test/crud/v2/deleteOne-hint-serverError.json +++ /dev/null @@ -1,121 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.4.0", - "maxServerVersion": "4.3.3" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "DeleteOne_hint", - "tests": [ - { - "description": "DeleteOne with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteOne_hint", - "deletes": [ - { - "q": { - "_id": 1 - }, - "hint": "_id_", - "limit": 1 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteOne_hint", - "deletes": [ - { - "q": { - "_id": 1 - }, - "hint": { - "_id": 1 - }, - "limit": 1 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/deleteOne-hint.json b/test/crud/v2/deleteOne-hint.json deleted file mode 100644 index ec8e7715a2..0000000000 --- a/test/crud/v2/deleteOne-hint.json +++ /dev/null @@ -1,116 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "DeleteOne_hint", - "tests": [ - { - "description": "DeleteOne with hint string", - "operations": [ - { - "object": "collection", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "result": { - "deletedCount": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteOne_hint", - "deletes": [ - { - "q": { - "_id": 1 - }, - "hint": "_id_", - "limit": 1 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "deleteOne with hint document", - "operations": [ - { - "object": "collection", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "DeleteOne_hint", - "deletes": [ - { - "q": { - "_id": 1 - }, - "hint": { - "_id": 1 - }, - "limit": 1 - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/find-allowdiskuse-clientError.json b/test/crud/v2/find-allowdiskuse-clientError.json deleted file mode 100644 index 5ea013966a..0000000000 --- a/test/crud/v2/find-allowdiskuse-clientError.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "3.0.99" - } - ], - "collection_name": "test_find_allowdiskuse_clienterror", - "tests": [ - { - "description": "Find fails when allowDiskUse true is specified against pre 3.2 server", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": true - }, - "error": true - } - ], - "expectations": [] - }, - { - "description": "Find fails when allowDiskUse false is specified against pre 3.2 server", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": false - }, - "error": true - } - ], - "expectations": [] - } - ] -} diff --git a/test/crud/v2/find-allowdiskuse-serverError.json b/test/crud/v2/find-allowdiskuse-serverError.json deleted file mode 100644 index 31aa50e951..0000000000 --- a/test/crud/v2/find-allowdiskuse-serverError.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.2", - "maxServerVersion": "4.3.0" - } - ], - "collection_name": "test_find_allowdiskuse_servererror", - "tests": [ - { - "description": "Find fails when allowDiskUse true is specified against pre 4.4 server (server-side error)", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": true - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse_servererror", - "filter": {}, - "allowDiskUse": true - } - } - } - ] - }, - { - "description": "Find fails when allowDiskUse false is specified against pre 4.4 server (server-side error)", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": false - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse_servererror", - "filter": {}, - "allowDiskUse": false - } - } - } - ] - } - ] -} diff --git a/test/crud/v2/find-allowdiskuse.json b/test/crud/v2/find-allowdiskuse.json deleted file mode 100644 index b2862563b9..0000000000 --- a/test/crud/v2/find-allowdiskuse.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1" - } - ], - "collection_name": "test_find_allowdiskuse", - "tests": [ - { - "description": "Find does not send allowDiskuse when value is not specified", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {} - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse", - "allowDiskUse": null - } - } - } - ] - }, - { - "description": "Find sends allowDiskuse false when false is specified", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": false - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse", - "allowDiskUse": false - } - } - } - ] - }, - { - "description": "Find sends allowDiskUse true when true is specified", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": {}, - "allowDiskUse": true - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test_find_allowdiskuse", - "allowDiskUse": true - } - } - } - ] - } - ] -} diff --git a/test/crud/v2/findOneAndDelete-hint-clientError.json b/test/crud/v2/findOneAndDelete-hint-clientError.json deleted file mode 100644 index 262e78ce75..0000000000 --- a/test/crud/v2/findOneAndDelete-hint-clientError.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "4.0.99" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndDelete_hint", - "tests": [ - { - "description": "FindOneAndDelete with hint string unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete with hint document", - "operations": [ - { - "object": "collection", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndDelete-hint-serverError.json b/test/crud/v2/findOneAndDelete-hint-serverError.json deleted file mode 100644 index 9412b36f23..0000000000 --- a/test/crud/v2/findOneAndDelete-hint-serverError.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0", - "maxServerVersion": "4.3.3" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndDelete_hint", - "tests": [ - { - "description": "FindOneAndDelete with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndDelete_hint", - "query": { - "_id": 1 - }, - "hint": "_id_", - "remove": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndDelete_hint", - "query": { - "_id": 1 - }, - "hint": { - "_id": 1 - }, - "remove": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndDelete-hint.json b/test/crud/v2/findOneAndDelete-hint.json deleted file mode 100644 index fe8dcfa4c5..0000000000 --- a/test/crud/v2/findOneAndDelete-hint.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndDelete_hint", - "tests": [ - { - "description": "FindOneAndDelete with hint string", - "operations": [ - { - "object": "collection", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndDelete_hint", - "query": { - "_id": 1 - }, - "hint": "_id_", - "remove": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete with hint document", - "operations": [ - { - "object": "collection", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndDelete_hint", - "query": { - "_id": 1 - }, - "hint": { - "_id": 1 - }, - "remove": true - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndReplace-hint-clientError.json b/test/crud/v2/findOneAndReplace-hint-clientError.json deleted file mode 100644 index 08fd4b3ecc..0000000000 --- a/test/crud/v2/findOneAndReplace-hint-clientError.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "4.0.99" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndReplace_hint", - "tests": [ - { - "description": "FindOneAndReplace with hint string unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace with hint document unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndReplace-hint-serverError.json b/test/crud/v2/findOneAndReplace-hint-serverError.json deleted file mode 100644 index 6710e6a70e..0000000000 --- a/test/crud/v2/findOneAndReplace-hint-serverError.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0", - "maxServerVersion": "4.3.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndReplace_hint", - "tests": [ - { - "description": "FindOneAndReplace with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndReplace_hint", - "query": { - "_id": 1 - }, - "update": { - "x": 33 - }, - "hint": "_id_" - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndReplace_hint", - "query": { - "_id": 1 - }, - "update": { - "x": 33 - }, - "hint": { - "_id": 1 - } - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndReplace-hint.json b/test/crud/v2/findOneAndReplace-hint.json deleted file mode 100644 index 263fdf9623..0000000000 --- a/test/crud/v2/findOneAndReplace-hint.json +++ /dev/null @@ -1,128 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndReplace_hint", - "tests": [ - { - "description": "FindOneAndReplace with hint string", - "operations": [ - { - "object": "collection", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": "_id_" - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndReplace_hint", - "query": { - "_id": 1 - }, - "update": { - "x": 33 - }, - "hint": "_id_" - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 33 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace with hint document", - "operations": [ - { - "object": "collection", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndReplace_hint", - "query": { - "_id": 1 - }, - "update": { - "x": 33 - }, - "hint": { - "_id": 1 - } - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 33 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndUpdate-hint-clientError.json b/test/crud/v2/findOneAndUpdate-hint-clientError.json deleted file mode 100644 index 8cd5cddb51..0000000000 --- a/test/crud/v2/findOneAndUpdate-hint-clientError.json +++ /dev/null @@ -1,94 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "4.0.99" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndUpdate_hint", - "tests": [ - { - "description": "FindOneAndUpdate with hint string unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate with hint document unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndUpdate-hint-serverError.json b/test/crud/v2/findOneAndUpdate-hint-serverError.json deleted file mode 100644 index 1f4b2bda8b..0000000000 --- a/test/crud/v2/findOneAndUpdate-hint-serverError.json +++ /dev/null @@ -1,131 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0", - "maxServerVersion": "4.3.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndUpdate_hint", - "tests": [ - { - "description": "FindOneAndUpdate with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndUpdate_hint", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndUpdate_hint", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/findOneAndUpdate-hint.json b/test/crud/v2/findOneAndUpdate-hint.json deleted file mode 100644 index 451eecc013..0000000000 --- a/test/crud/v2/findOneAndUpdate-hint.json +++ /dev/null @@ -1,136 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndUpdate_hint", - "tests": [ - { - "description": "FindOneAndUpdate with hint string", - "operations": [ - { - "object": "collection", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndUpdate_hint", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate with hint document", - "operations": [ - { - "object": "collection", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "findOneAndUpdate_hint", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/replaceOne-hint.json b/test/crud/v2/replaceOne-hint.json deleted file mode 100644 index de4aa4d02f..0000000000 --- a/test/crud/v2/replaceOne-hint.json +++ /dev/null @@ -1,146 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "test_replaceone_hint", - "tests": [ - { - "description": "ReplaceOne with hint string", - "operations": [ - { - "object": "collection", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": "_id_" - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_replaceone_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "x": 111 - }, - "hint": "_id_" - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 111 - } - ] - } - } - }, - { - "description": "ReplaceOne with hint document", - "operations": [ - { - "object": "collection", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": { - "_id": 1 - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_replaceone_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "x": 111 - }, - "hint": { - "_id": 1 - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 111 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/replaceOne-validation.json b/test/crud/v2/replaceOne-validation.json deleted file mode 100644 index 2de4a6728b..0000000000 --- a/test/crud/v2/replaceOne-validation.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "ReplaceOne prohibits atomic modifiers", - "operations": [ - { - "object": "collection", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "$set": { - "x": 22 - } - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-deleteMany-hint-clientError.json b/test/crud/v2/unacknowledged-deleteMany-hint-clientError.json deleted file mode 100644 index 532f4282a9..0000000000 --- a/test/crud/v2/unacknowledged-deleteMany-hint-clientError.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "DeleteMany_hint", - "tests": [ - { - "description": "Unacknowledged deleteMany with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Unacknowledged deleteMany with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-deleteOne-hint-clientError.json b/test/crud/v2/unacknowledged-deleteOne-hint-clientError.json deleted file mode 100644 index ff3f05ea3e..0000000000 --- a/test/crud/v2/unacknowledged-deleteOne-hint-clientError.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "DeleteOne_hint", - "tests": [ - { - "description": "Unacknowledged deleteOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Unacknowledged deleteOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json b/test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json deleted file mode 100644 index 076978874d..0000000000 --- a/test/crud/v2/unacknowledged-findOneAndDelete-hint-clientError.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "findOneAndDelete_hint", - "tests": [ - { - "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json b/test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json deleted file mode 100644 index 38fbc817be..0000000000 --- a/test/crud/v2/unacknowledged-findOneAndReplace-hint-clientError.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "FindOneAndReplace_hint", - "tests": [ - { - "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json b/test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json deleted file mode 100644 index 615b4c0e63..0000000000 --- a/test/crud/v2/unacknowledged-findOneAndUpdate-hint-clientError.json +++ /dev/null @@ -1,99 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "FindOneAndUpdate_hint", - "tests": [ - { - "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-replaceOne-hint-clientError.json b/test/crud/v2/unacknowledged-replaceOne-hint-clientError.json deleted file mode 100644 index c4add73c2d..0000000000 --- a/test/crud/v2/unacknowledged-replaceOne-hint-clientError.json +++ /dev/null @@ -1,99 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "ReplaceOne_hint", - "tests": [ - { - "description": "Unacknowledged ReplaceOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Unacknowledged ReplaceOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-updateMany-hint-clientError.json b/test/crud/v2/unacknowledged-updateMany-hint-clientError.json deleted file mode 100644 index eaf3efd1cf..0000000000 --- a/test/crud/v2/unacknowledged-updateMany-hint-clientError.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "Updatemany_hint", - "tests": [ - { - "description": "Unacknowledged updateMany with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Unacknowledged updateMany with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/unacknowledged-updateOne-hint-clientError.json b/test/crud/v2/unacknowledged-updateOne-hint-clientError.json deleted file mode 100644 index 1f8f738012..0000000000 --- a/test/crud/v2/unacknowledged-updateOne-hint-clientError.json +++ /dev/null @@ -1,103 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "UpdateOne_hint", - "tests": [ - { - "description": "Unacknowledged updateOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Unacknowledged updateOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateMany-hint-serverError.json b/test/crud/v2/updateMany-hint-serverError.json deleted file mode 100644 index 86f21246e9..0000000000 --- a/test/crud/v2/updateMany-hint-serverError.json +++ /dev/null @@ -1,161 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.4.0", - "maxServerVersion": "4.1.9" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test_updatemany_hint", - "tests": [ - { - "description": "UpdateMany with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updatemany_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": "_id_" - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "UpdateMany with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updatemany_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": { - "_id": 1 - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateMany-hint.json b/test/crud/v2/updateMany-hint.json deleted file mode 100644 index 489348917f..0000000000 --- a/test/crud/v2/updateMany-hint.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test_updatemany_hint", - "tests": [ - { - "description": "UpdateMany with hint string", - "operations": [ - { - "object": "collection", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updatemany_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": "_id_" - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 34 - } - ] - } - } - }, - { - "description": "UpdateMany with hint document", - "operations": [ - { - "object": "collection", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updatemany_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "hint": { - "_id": 1 - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 34 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateMany-validation.json b/test/crud/v2/updateMany-validation.json deleted file mode 100644 index a85ccfa86e..0000000000 --- a/test/crud/v2/updateMany-validation.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "UpdateOne requires atomic modifiers", - "operations": [ - { - "object": "collection", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "x": 44 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateOne-hint-clientError.json b/test/crud/v2/updateOne-hint-clientError.json deleted file mode 100644 index 82bfe368c7..0000000000 --- a/test/crud/v2/updateOne-hint-clientError.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "runOn": [ - { - "maxServerVersion": "3.3.99" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "test_updateone_hint", - "tests": [ - { - "description": "UpdateOne with hint string unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne with hint document unsupported (client-side error)", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateOne-hint-serverError.json b/test/crud/v2/updateOne-hint-serverError.json deleted file mode 100644 index 8e8037eb8c..0000000000 --- a/test/crud/v2/updateOne-hint-serverError.json +++ /dev/null @@ -1,147 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.4.0", - "maxServerVersion": "4.1.9" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "test_updateone_hint", - "tests": [ - { - "description": "UpdateOne with hint string unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updateone_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne with hint document unsupported (server-side error)", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updateone_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateOne-hint.json b/test/crud/v2/updateOne-hint.json deleted file mode 100644 index 43f76da498..0000000000 --- a/test/crud/v2/updateOne-hint.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2.0" - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "collection_name": "test_updateone_hint", - "tests": [ - { - "description": "UpdateOne with hint string", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updateone_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - } - ] - } - } - }, - { - "description": "UpdateOne with hint document", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test_updateone_hint", - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - ] - } - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateOne-validation.json b/test/crud/v2/updateOne-validation.json deleted file mode 100644 index 6c919f5ea0..0000000000 --- a/test/crud/v2/updateOne-validation.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "UpdateOne requires atomic modifiers", - "operations": [ - { - "object": "collection", - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "x": 22 - } - }, - "error": true - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/crud/v2/updateWithPipelines.json b/test/crud/v2/updateWithPipelines.json deleted file mode 100644 index a310f2825f..0000000000 --- a/test/crud/v2/updateWithPipelines.json +++ /dev/null @@ -1,408 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.11" - } - ], - "data": [ - { - "_id": 1, - "x": 1, - "y": 1, - "t": { - "u": { - "v": 1 - } - } - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ], - "collection_name": "test", - "database_name": "crud-tests", - "tests": [ - { - "description": "UpdateOne using pipelines", - "operations": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - ] - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - } - }, - { - "description": "UpdateMany using pipelines", - "operations": [ - { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate using pipelines", - "operations": [ - { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "command_name": "findAndModify", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - } - }, - { - "description": "UpdateOne in bulk write using pipelines", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ] - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - ] - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - } - }, - { - "description": "UpdateMany in bulk write using pipelines", - "operations": [ - { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ] - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "crud-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/ghost_discovered.json b/test/discovery_and_monitoring/rs/ghost_discovered.json deleted file mode 100644 index bf22cbb0eb..0000000000 --- a/test/discovery_and_monitoring/rs/ghost_discovered.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "description": "Ghost discovered", - "uri": "mongodb://a,b/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "b:27017", - { - "ok": 1, - "ismaster": false, - "isreplicaset": true, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null - }, - "b:27017": { - "type": "RSGhost", - "setName": null - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/rsother_discovered.json b/test/discovery_and_monitoring/rs/rsother_discovered.json deleted file mode 100644 index c575501d80..0000000000 --- a/test/discovery_and_monitoring/rs/rsother_discovered.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "description": "RSOther discovered", - "uri": "mongodb://a,b/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "ismaster": false, - "secondary": true, - "hidden": true, - "hosts": [ - "c:27017", - "d:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 6 - } - ], - [ - "b:27017", - { - "ok": 1, - "ismaster": false, - "secondary": false, - "hosts": [ - "c:27017", - "d:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSOther", - "setName": "rs" - }, - "b:27017": { - "type": "RSOther", - "setName": "rs" - }, - "c:27017": { - "type": "Unknown", - "setName": null - }, - "d:27017": { - "type": "Unknown", - "setName": null - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/single/unavailable_seed.json b/test/discovery_and_monitoring/single/unavailable_seed.json deleted file mode 100644 index e9cce02ebf..0000000000 --- a/test/discovery_and_monitoring/single/unavailable_seed.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "description": "Unavailable seed", - "uri": "mongodb://a", - "phases": [ - { - "responses": [ - [ - "a:27017", - {} - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null - } - }, - "topologyType": "Single", - "logicalSessionTimeoutMinutes": null, - "setName": null - } - } - ] -} diff --git a/test/retryable_reads/aggregate-serverErrors.json b/test/retryable_reads/aggregate-serverErrors.json index 04208bc95b..1155f808dc 100644 --- a/test/retryable_reads/aggregate-serverErrors.json +++ b/test/retryable_reads/aggregate-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -218,7 +219,7 @@ ] }, { - "description": "Aggregate succeeds after NotMaster", + "description": "Aggregate succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -311,7 +312,7 @@ ] }, { - "description": "Aggregate succeeds after NotMasterNoSlaveOk", + "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -404,7 +405,7 @@ ] }, { - "description": "Aggregate succeeds after NotMasterOrSecondary", + "description": "Aggregate succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1055,7 +1056,7 @@ ] }, { - "description": "Aggregate fails after two NotMaster errors", + "description": "Aggregate fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -1139,7 +1140,7 @@ ] }, { - "description": "Aggregate fails after NotMaster when retryReads is false", + "description": "Aggregate fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/aggregate.json b/test/retryable_reads/aggregate.json index 30a6e05e69..f23d5c6793 100644 --- a/test/retryable_reads/aggregate.json +++ b/test/retryable_reads/aggregate.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/changeStreams-client.watch-serverErrors.json index cf6c230ec8..73dbfee916 100644 --- a/test/retryable_reads/changeStreams-client.watch-serverErrors.json +++ b/test/retryable_reads/changeStreams-client.watch-serverErrors.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", @@ -141,7 +143,7 @@ ] }, { - "description": "client.watch succeeds after NotMaster", + "description": "client.watch succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -196,7 +198,7 @@ ] }, { - "description": "client.watch succeeds after NotMasterNoSlaveOk", + "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -251,7 +253,7 @@ ] }, { - "description": "client.watch succeeds after NotMasterOrSecondary", + "description": "client.watch succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -636,7 +638,7 @@ ] }, { - "description": "client.watch fails after two NotMaster errors", + "description": "client.watch fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -692,7 +694,7 @@ ] }, { - "description": "client.watch fails after NotMaster when retryReads is false", + "description": "client.watch fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/changeStreams-client.watch.json b/test/retryable_reads/changeStreams-client.watch.json index 9a2ccad095..30a53037ad 100644 --- a/test/retryable_reads/changeStreams-client.watch.json +++ b/test/retryable_reads/changeStreams-client.watch.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json index eb7df1e264..77b3af04f4 100644 --- a/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json +++ b/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", @@ -133,7 +135,7 @@ ] }, { - "description": "db.coll.watch succeeds after NotMaster", + "description": "db.coll.watch succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -184,7 +186,7 @@ ] }, { - "description": "db.coll.watch succeeds after NotMasterNoSlaveOk", + "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -235,7 +237,7 @@ ] }, { - "description": "db.coll.watch succeeds after NotMasterOrSecondary", + "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -592,7 +594,7 @@ ] }, { - "description": "db.coll.watch fails after two NotMaster errors", + "description": "db.coll.watch fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -644,7 +646,7 @@ ] }, { - "description": "db.coll.watch fails after NotMaster when retryReads is false", + "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/changeStreams-db.coll.watch.json b/test/retryable_reads/changeStreams-db.coll.watch.json index 3408c84236..27f6105a4b 100644 --- a/test/retryable_reads/changeStreams-db.coll.watch.json +++ b/test/retryable_reads/changeStreams-db.coll.watch.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/changeStreams-db.watch-serverErrors.json index e070f56a01..7a87534508 100644 --- a/test/retryable_reads/changeStreams-db.watch-serverErrors.json +++ b/test/retryable_reads/changeStreams-db.watch-serverErrors.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", @@ -133,7 +135,7 @@ ] }, { - "description": "db.watch succeeds after NotMaster", + "description": "db.watch succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -184,7 +186,7 @@ ] }, { - "description": "db.watch succeeds after NotMasterNoSlaveOk", + "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -235,7 +237,7 @@ ] }, { - "description": "db.watch succeeds after NotMasterOrSecondary", + "description": "db.watch succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -592,7 +594,7 @@ ] }, { - "description": "db.watch fails after two NotMaster errors", + "description": "db.watch fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -644,7 +646,7 @@ ] }, { - "description": "db.watch fails after NotMaster when retryReads is false", + "description": "db.watch fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/changeStreams-db.watch.json b/test/retryable_reads/changeStreams-db.watch.json index bec09c49b7..e6b0b9b781 100644 --- a/test/retryable_reads/changeStreams-db.watch.json +++ b/test/retryable_reads/changeStreams-db.watch.json @@ -9,8 +9,10 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" - ] + "sharded", + "load-balanced" + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/retryable_reads/count-serverErrors.json b/test/retryable_reads/count-serverErrors.json index 839680fe59..36a0c17cab 100644 --- a/test/retryable_reads/count-serverErrors.json +++ b/test/retryable_reads/count-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -114,7 +115,7 @@ ] }, { - "description": "Count succeeds after NotMaster", + "description": "Count succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -157,7 +158,7 @@ ] }, { - "description": "Count succeeds after NotMasterNoSlaveOk", + "description": "Count succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -200,7 +201,7 @@ ] }, { - "description": "Count succeeds after NotMasterOrSecondary", + "description": "Count succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -501,7 +502,7 @@ ] }, { - "description": "Count fails after two NotMaster errors", + "description": "Count fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -544,7 +545,7 @@ ] }, { - "description": "Count fails after NotMaster when retryReads is false", + "description": "Count fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/count.json b/test/retryable_reads/count.json index 0ccf4982ba..139a545131 100644 --- a/test/retryable_reads/count.json +++ b/test/retryable_reads/count.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/countDocuments-serverErrors.json b/test/retryable_reads/countDocuments-serverErrors.json index f45eadfa0c..782ea5e4f1 100644 --- a/test/retryable_reads/countDocuments-serverErrors.json +++ b/test/retryable_reads/countDocuments-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -166,7 +167,7 @@ ] }, { - "description": "CountDocuments succeeds after NotMaster", + "description": "CountDocuments succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -235,7 +236,7 @@ ] }, { - "description": "CountDocuments succeeds after NotMasterNoSlaveOk", + "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -304,7 +305,7 @@ ] }, { - "description": "CountDocuments succeeds after NotMasterOrSecondary", + "description": "CountDocuments succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -787,7 +788,7 @@ ] }, { - "description": "CountDocuments fails after two NotMaster errors", + "description": "CountDocuments fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -856,7 +857,7 @@ ] }, { - "description": "CountDocuments fails after NotMaster when retryReads is false", + "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/countDocuments.json b/test/retryable_reads/countDocuments.json index b4ccf36684..57a64e45b7 100644 --- a/test/retryable_reads/countDocuments.json +++ b/test/retryable_reads/countDocuments.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/distinct-serverErrors.json b/test/retryable_reads/distinct-serverErrors.json index 50fd6a5505..d7c6018a62 100644 --- a/test/retryable_reads/distinct-serverErrors.json +++ b/test/retryable_reads/distinct-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -158,7 +159,7 @@ ] }, { - "description": "Distinct succeeds after NotMaster", + "description": "Distinct succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -221,7 +222,7 @@ ] }, { - "description": "Distinct succeeds after NotMasterNoSlaveOk", + "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -284,7 +285,7 @@ ] }, { - "description": "Distinct succeeds after NotMasterOrSecondary", + "description": "Distinct succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -725,7 +726,7 @@ ] }, { - "description": "Distinct fails after two NotMaster errors", + "description": "Distinct fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -785,7 +786,7 @@ ] }, { - "description": "Distinct fails after NotMaster when retryReads is false", + "description": "Distinct fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/distinct.json b/test/retryable_reads/distinct.json index b5885e27eb..1fd415da81 100644 --- a/test/retryable_reads/distinct.json +++ b/test/retryable_reads/distinct.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json b/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json index af4dc52ea8..756b02b3a8 100644 --- a/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json +++ b/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json @@ -158,7 +158,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMaster", + "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -228,7 +228,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMasterNoSlaveOk", + "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -298,7 +298,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMasterOrSecondary", + "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -788,7 +788,7 @@ ] }, { - "description": "EstimatedDocumentCount fails after two NotMaster errors", + "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -858,7 +858,7 @@ ] }, { - "description": "EstimatedDocumentCount fails after NotMaster when retryReads is false", + "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json b/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json index c11e609cd4..0b9a2615d1 100644 --- a/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json +++ b/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json @@ -110,7 +110,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMaster", + "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -150,7 +150,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMasterNoSlaveOk", + "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -190,7 +190,7 @@ ] }, { - "description": "EstimatedDocumentCount succeeds after NotMasterOrSecondary", + "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -470,7 +470,7 @@ ] }, { - "description": "EstimatedDocumentCount fails after two NotMaster errors", + "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -510,7 +510,7 @@ ] }, { - "description": "EstimatedDocumentCount fails after NotMaster when retryReads is false", + "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/find-serverErrors.json b/test/retryable_reads/find-serverErrors.json index 44ecf34d2f..f6b96c6dcb 100644 --- a/test/retryable_reads/find-serverErrors.json +++ b/test/retryable_reads/find-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -188,7 +189,7 @@ ] }, { - "description": "Find succeeds after NotMaster", + "description": "Find succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -262,7 +263,7 @@ ] }, { - "description": "Find succeeds after NotMasterNoSlaveOk", + "description": "Find succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -336,7 +337,7 @@ ] }, { - "description": "Find succeeds after NotMasterOrSecondary", + "description": "Find succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -854,7 +855,7 @@ ] }, { - "description": "Find fails after two NotMaster errors", + "description": "Find fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -911,7 +912,7 @@ ] }, { - "description": "Find fails after NotMaster when retryReads is false", + "description": "Find fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/find.json b/test/retryable_reads/find.json index 56479ff1d8..00d419c0da 100644 --- a/test/retryable_reads/find.json +++ b/test/retryable_reads/find.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/findOne-serverErrors.json b/test/retryable_reads/findOne-serverErrors.json index b8229483d2..d039ef247e 100644 --- a/test/retryable_reads/findOne-serverErrors.json +++ b/test/retryable_reads/findOne-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -148,7 +149,7 @@ ] }, { - "description": "FindOne succeeds after NotMaster", + "description": "FindOne succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -202,7 +203,7 @@ ] }, { - "description": "FindOne succeeds after NotMasterNoSlaveOk", + "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -256,7 +257,7 @@ ] }, { - "description": "FindOne succeeds after NotMasterOrSecondary", + "description": "FindOne succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -634,7 +635,7 @@ ] }, { - "description": "FindOne fails after two NotMaster errors", + "description": "FindOne fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -685,7 +686,7 @@ ] }, { - "description": "FindOne fails after NotMaster when retryReads is false", + "description": "FindOne fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/findOne.json b/test/retryable_reads/findOne.json index d296a9cdb5..b9deb73d2a 100644 --- a/test/retryable_reads/findOne.json +++ b/test/retryable_reads/findOne.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/gridfs-download-serverErrors.json b/test/retryable_reads/gridfs-download-serverErrors.json index 84e50e370c..cec3a5016a 100644 --- a/test/retryable_reads/gridfs-download-serverErrors.json +++ b/test/retryable_reads/gridfs-download-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -191,7 +192,7 @@ ] }, { - "description": "Download succeeds after NotMaster", + "description": "Download succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -261,7 +262,7 @@ ] }, { - "description": "Download succeeds after NotMasterNoSlaveOk", + "description": "Download succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -331,7 +332,7 @@ ] }, { - "description": "Download succeeds after NotMasterOrSecondary", + "description": "Download succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -821,7 +822,7 @@ ] }, { - "description": "Download fails after two NotMaster errors", + "description": "Download fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -876,7 +877,7 @@ ] }, { - "description": "Download fails after NotMaster when retryReads is false", + "description": "Download fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/gridfs-download.json b/test/retryable_reads/gridfs-download.json index a5c5ef4d55..4d0d5a17e4 100644 --- a/test/retryable_reads/gridfs-download.json +++ b/test/retryable_reads/gridfs-download.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/gridfs-downloadByName-serverErrors.json index de439ce4b2..a64230d38a 100644 --- a/test/retryable_reads/gridfs-downloadByName-serverErrors.json +++ b/test/retryable_reads/gridfs-downloadByName-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -179,7 +180,7 @@ ] }, { - "description": "DownloadByName succeeds after NotMaster", + "description": "DownloadByName succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -243,7 +244,7 @@ ] }, { - "description": "DownloadByName succeeds after NotMasterNoSlaveOk", + "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -307,7 +308,7 @@ ] }, { - "description": "DownloadByName succeeds after NotMasterOrSecondary", + "description": "DownloadByName succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -755,7 +756,7 @@ ] }, { - "description": "DownloadByName fails after two NotMaster errors", + "description": "DownloadByName fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -804,7 +805,7 @@ ] }, { - "description": "DownloadByName fails after NotMaster when retryReads is false", + "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/gridfs-downloadByName.json b/test/retryable_reads/gridfs-downloadByName.json index 0634a09bff..48f2168cfc 100644 --- a/test/retryable_reads/gridfs-downloadByName.json +++ b/test/retryable_reads/gridfs-downloadByName.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listCollectionNames-serverErrors.json b/test/retryable_reads/listCollectionNames-serverErrors.json index 27c13d6301..bbdce625ad 100644 --- a/test/retryable_reads/listCollectionNames-serverErrors.json +++ b/test/retryable_reads/listCollectionNames-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListCollectionNames succeeds after NotMaster", + "description": "ListCollectionNames succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListCollectionNames succeeds after NotMasterNoSlaveOk", + "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListCollectionNames succeeds after NotMasterOrSecondary", + "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListCollectionNames fails after two NotMaster errors", + "description": "ListCollectionNames fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListCollectionNames fails after NotMaster when retryReads is false", + "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listCollectionNames.json b/test/retryable_reads/listCollectionNames.json index 437fc36a40..73d96a3cf7 100644 --- a/test/retryable_reads/listCollectionNames.json +++ b/test/retryable_reads/listCollectionNames.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listCollectionObjects-serverErrors.json b/test/retryable_reads/listCollectionObjects-serverErrors.json index 3922713df9..ab469dfe30 100644 --- a/test/retryable_reads/listCollectionObjects-serverErrors.json +++ b/test/retryable_reads/listCollectionObjects-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListCollectionObjects succeeds after NotMaster", + "description": "ListCollectionObjects succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListCollectionObjects succeeds after NotMasterNoSlaveOk", + "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListCollectionObjects succeeds after NotMasterOrSecondary", + "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListCollectionObjects fails after two NotMaster errors", + "description": "ListCollectionObjects fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListCollectionObjects fails after NotMaster when retryReads is false", + "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listCollectionObjects.json b/test/retryable_reads/listCollectionObjects.json index 1f537b743f..1fb0f18437 100644 --- a/test/retryable_reads/listCollectionObjects.json +++ b/test/retryable_reads/listCollectionObjects.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listCollections-serverErrors.json b/test/retryable_reads/listCollections-serverErrors.json index 6972073b18..def9ac4595 100644 --- a/test/retryable_reads/listCollections-serverErrors.json +++ b/test/retryable_reads/listCollections-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListCollections succeeds after NotMaster", + "description": "ListCollections succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListCollections succeeds after NotMasterNoSlaveOk", + "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListCollections succeeds after NotMasterOrSecondary", + "description": "ListCollections succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListCollections fails after two NotMaster errors", + "description": "ListCollections fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListCollections fails after NotMaster when retryReads is false", + "description": "ListCollections fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listCollections.json b/test/retryable_reads/listCollections.json index a6b452e64f..2427883621 100644 --- a/test/retryable_reads/listCollections.json +++ b/test/retryable_reads/listCollections.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listDatabaseNames-serverErrors.json b/test/retryable_reads/listDatabaseNames-serverErrors.json index 11faf58bf0..1dd8e4415a 100644 --- a/test/retryable_reads/listDatabaseNames-serverErrors.json +++ b/test/retryable_reads/listDatabaseNames-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListDatabaseNames succeeds after NotMaster", + "description": "ListDatabaseNames succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListDatabaseNames succeeds after NotMasterNoSlaveOk", + "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListDatabaseNames succeeds after NotMasterOrSecondary", + "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListDatabaseNames fails after two NotMaster errors", + "description": "ListDatabaseNames fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListDatabaseNames fails after NotMaster when retryReads is false", + "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listDatabaseNames.json b/test/retryable_reads/listDatabaseNames.json index b35f7ab185..b431f57016 100644 --- a/test/retryable_reads/listDatabaseNames.json +++ b/test/retryable_reads/listDatabaseNames.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listDatabaseObjects-serverErrors.json b/test/retryable_reads/listDatabaseObjects-serverErrors.json index 38082f2e28..bc497bb088 100644 --- a/test/retryable_reads/listDatabaseObjects-serverErrors.json +++ b/test/retryable_reads/listDatabaseObjects-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListDatabaseObjects succeeds after NotMaster", + "description": "ListDatabaseObjects succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListDatabaseObjects succeeds after NotMasterNoSlaveOk", + "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListDatabaseObjects succeeds after NotMasterOrSecondary", + "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListDatabaseObjects fails after two NotMaster errors", + "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListDatabaseObjects fails after NotMaster when retryReads is false", + "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listDatabaseObjects.json b/test/retryable_reads/listDatabaseObjects.json index cbd2c6763a..267fe921ca 100644 --- a/test/retryable_reads/listDatabaseObjects.json +++ b/test/retryable_reads/listDatabaseObjects.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listDatabases-serverErrors.json b/test/retryable_reads/listDatabases-serverErrors.json index 4047f749ff..ed7bcbc398 100644 --- a/test/retryable_reads/listDatabases-serverErrors.json +++ b/test/retryable_reads/listDatabases-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -93,7 +94,7 @@ ] }, { - "description": "ListDatabases succeeds after NotMaster", + "description": "ListDatabases succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -130,7 +131,7 @@ ] }, { - "description": "ListDatabases succeeds after NotMasterNoSlaveOk", + "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -167,7 +168,7 @@ ] }, { - "description": "ListDatabases succeeds after NotMasterOrSecondary", + "description": "ListDatabases succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -426,7 +427,7 @@ ] }, { - "description": "ListDatabases fails after two NotMaster errors", + "description": "ListDatabases fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -464,7 +465,7 @@ ] }, { - "description": "ListDatabases fails after NotMaster when retryReads is false", + "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listDatabases.json b/test/retryable_reads/listDatabases.json index 3cb8bbd083..69ef9788f8 100644 --- a/test/retryable_reads/listDatabases.json +++ b/test/retryable_reads/listDatabases.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listIndexNames-serverErrors.json b/test/retryable_reads/listIndexNames-serverErrors.json index 1a9ba83bc6..2d3265ec85 100644 --- a/test/retryable_reads/listIndexNames-serverErrors.json +++ b/test/retryable_reads/listIndexNames-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -97,7 +98,7 @@ ] }, { - "description": "ListIndexNames succeeds after NotMaster", + "description": "ListIndexNames succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -136,7 +137,7 @@ ] }, { - "description": "ListIndexNames succeeds after NotMasterNoSlaveOk", + "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -175,7 +176,7 @@ ] }, { - "description": "ListIndexNames succeeds after NotMasterOrSecondary", + "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -448,7 +449,7 @@ ] }, { - "description": "ListIndexNames fails after two NotMaster errors", + "description": "ListIndexNames fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -488,7 +489,7 @@ ] }, { - "description": "ListIndexNames fails after NotMaster when retryReads is false", + "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listIndexNames.json b/test/retryable_reads/listIndexNames.json index 912c706015..fbdb420f8a 100644 --- a/test/retryable_reads/listIndexNames.json +++ b/test/retryable_reads/listIndexNames.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/listIndexes-serverErrors.json b/test/retryable_reads/listIndexes-serverErrors.json index 16b61d535d..25c5b0e448 100644 --- a/test/retryable_reads/listIndexes-serverErrors.json +++ b/test/retryable_reads/listIndexes-serverErrors.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -97,7 +98,7 @@ ] }, { - "description": "ListIndexes succeeds after NotMaster", + "description": "ListIndexes succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -136,7 +137,7 @@ ] }, { - "description": "ListIndexes succeeds after NotMasterNoSlaveOk", + "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -175,7 +176,7 @@ ] }, { - "description": "ListIndexes succeeds after NotMasterOrSecondary", + "description": "ListIndexes succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -448,7 +449,7 @@ ] }, { - "description": "ListIndexes fails after two NotMaster errors", + "description": "ListIndexes fails after two NotWritablePrimary errors", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -488,7 +489,7 @@ ] }, { - "description": "ListIndexes fails after NotMaster when retryReads is false", + "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", "clientOptions": { "retryReads": false }, diff --git a/test/retryable_reads/listIndexes.json b/test/retryable_reads/listIndexes.json index f460ea7684..5cb620ae45 100644 --- a/test/retryable_reads/listIndexes.json +++ b/test/retryable_reads/listIndexes.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_reads/mapReduce.json b/test/retryable_reads/mapReduce.json index 9dc7a56f3c..e76aa76cbb 100644 --- a/test/retryable_reads/mapReduce.json +++ b/test/retryable_reads/mapReduce.json @@ -10,7 +10,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/bulkWrite-errorLabels.json b/test/retryable_writes/bulkWrite-errorLabels.json index 94ea3ea989..66c3ecb336 100644 --- a/test/retryable_writes/bulkWrite-errorLabels.json +++ b/test/retryable_writes/bulkWrite-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/bulkWrite-serverErrors.json b/test/retryable_writes/bulkWrite-serverErrors.json index d9561d568c..9d792ceafb 100644 --- a/test/retryable_writes/bulkWrite-serverErrors.json +++ b/test/retryable_writes/bulkWrite-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/deleteMany.json b/test/retryable_writes/deleteMany.json index 642ad11fb4..faa21c44f1 100644 --- a/test/retryable_writes/deleteMany.json +++ b/test/retryable_writes/deleteMany.json @@ -4,7 +4,8 @@ "minServerVersion": "3.6", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/deleteOne-errorLabels.json b/test/retryable_writes/deleteOne-errorLabels.json index bff02e1f94..c14692fd1a 100644 --- a/test/retryable_writes/deleteOne-errorLabels.json +++ b/test/retryable_writes/deleteOne-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/deleteOne-serverErrors.json b/test/retryable_writes/deleteOne-serverErrors.json index 69d225759c..4eab2fa296 100644 --- a/test/retryable_writes/deleteOne-serverErrors.json +++ b/test/retryable_writes/deleteOne-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/findOneAndDelete-errorLabels.json b/test/retryable_writes/findOneAndDelete-errorLabels.json index efa62dba2e..60e6e0a7bc 100644 --- a/test/retryable_writes/findOneAndDelete-errorLabels.json +++ b/test/retryable_writes/findOneAndDelete-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/findOneAndDelete-serverErrors.json b/test/retryable_writes/findOneAndDelete-serverErrors.json index 0785e5d035..4c10861614 100644 --- a/test/retryable_writes/findOneAndDelete-serverErrors.json +++ b/test/retryable_writes/findOneAndDelete-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/findOneAndReplace-errorLabels.json b/test/retryable_writes/findOneAndReplace-errorLabels.json index d9473d139a..afa2f47af4 100644 --- a/test/retryable_writes/findOneAndReplace-errorLabels.json +++ b/test/retryable_writes/findOneAndReplace-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/findOneAndReplace-serverErrors.json b/test/retryable_writes/findOneAndReplace-serverErrors.json index 6ebe057cfd..64c69e2f6d 100644 --- a/test/retryable_writes/findOneAndReplace-serverErrors.json +++ b/test/retryable_writes/findOneAndReplace-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/findOneAndUpdate-errorLabels.json b/test/retryable_writes/findOneAndUpdate-errorLabels.json index 1926d7fa5c..19b3a9e771 100644 --- a/test/retryable_writes/findOneAndUpdate-errorLabels.json +++ b/test/retryable_writes/findOneAndUpdate-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/findOneAndUpdate-serverErrors.json b/test/retryable_writes/findOneAndUpdate-serverErrors.json index e6e369c139..9f54604992 100644 --- a/test/retryable_writes/findOneAndUpdate-serverErrors.json +++ b/test/retryable_writes/findOneAndUpdate-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/insertMany-errorLabels.json b/test/retryable_writes/insertMany-errorLabels.json index c78946e90a..65fd377fa6 100644 --- a/test/retryable_writes/insertMany-errorLabels.json +++ b/test/retryable_writes/insertMany-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/insertMany-serverErrors.json b/test/retryable_writes/insertMany-serverErrors.json index 1c6ebafc28..7b45b506c9 100644 --- a/test/retryable_writes/insertMany-serverErrors.json +++ b/test/retryable_writes/insertMany-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/insertOne-errorLabels.json b/test/retryable_writes/insertOne-errorLabels.json index 9b8d13d524..d90ac5dfbd 100644 --- a/test/retryable_writes/insertOne-errorLabels.json +++ b/test/retryable_writes/insertOne-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/insertOne-serverErrors.json index cb1e6f826b..e8571f8cf9 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/insertOne-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], @@ -117,7 +118,7 @@ } }, { - "description": "InsertOne succeeds after NotMaster", + "description": "InsertOne succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -166,7 +167,7 @@ } }, { - "description": "InsertOne succeeds after NotMasterOrSecondary", + "description": "InsertOne succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -215,7 +216,7 @@ } }, { - "description": "InsertOne succeeds after NotMasterNoSlaveOk", + "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { diff --git a/test/retryable_writes/replaceOne-errorLabels.json b/test/retryable_writes/replaceOne-errorLabels.json index 06867e5159..6029b875dc 100644 --- a/test/retryable_writes/replaceOne-errorLabels.json +++ b/test/retryable_writes/replaceOne-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/replaceOne-serverErrors.json b/test/retryable_writes/replaceOne-serverErrors.json index af18bcf1a2..7457228cd7 100644 --- a/test/retryable_writes/replaceOne-serverErrors.json +++ b/test/retryable_writes/replaceOne-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/updateMany.json b/test/retryable_writes/updateMany.json index 14288c2860..46fef73e74 100644 --- a/test/retryable_writes/updateMany.json +++ b/test/retryable_writes/updateMany.json @@ -4,7 +4,8 @@ "minServerVersion": "3.6", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/updateOne-errorLabels.json b/test/retryable_writes/updateOne-errorLabels.json index 4a6be3ffba..5bd00cde90 100644 --- a/test/retryable_writes/updateOne-errorLabels.json +++ b/test/retryable_writes/updateOne-errorLabels.json @@ -4,7 +4,8 @@ "minServerVersion": "4.3.1", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/retryable_writes/updateOne-serverErrors.json b/test/retryable_writes/updateOne-serverErrors.json index bb442eb68a..1160198019 100644 --- a/test/retryable_writes/updateOne-serverErrors.json +++ b/test/retryable_writes/updateOne-serverErrors.json @@ -9,7 +9,8 @@ { "minServerVersion": "4.1.7", "topology": [ - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/server_selection/rtt/first_value.json b/test/server_selection/rtt/first_value.json index 2e92195606..421944da36 100644 --- a/test/server_selection/rtt/first_value.json +++ b/test/server_selection/rtt/first_value.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": "NULL", - "new_avg_rtt": 10, - "new_rtt_ms": 10 + "avg_rtt_ms": "NULL", + "new_rtt_ms": 10, + "new_avg_rtt": 10 } diff --git a/test/server_selection/rtt/first_value_zero.json b/test/server_selection/rtt/first_value_zero.json index 1953a742a4..d5bfc41b25 100644 --- a/test/server_selection/rtt/first_value_zero.json +++ b/test/server_selection/rtt/first_value_zero.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": "NULL", - "new_avg_rtt": 0, - "new_rtt_ms": 0 + "avg_rtt_ms": "NULL", + "new_rtt_ms": 0, + "new_avg_rtt": 0 } diff --git a/test/server_selection/rtt/value_test_1.json b/test/server_selection/rtt/value_test_1.json index bfa3eb32bf..ed6a80ce29 100644 --- a/test/server_selection/rtt/value_test_1.json +++ b/test/server_selection/rtt/value_test_1.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 0, - "new_avg_rtt": 1.0, - "new_rtt_ms": 5 + "avg_rtt_ms": 0, + "new_rtt_ms": 5, + "new_avg_rtt": 1 } diff --git a/test/server_selection/rtt/value_test_2.json b/test/server_selection/rtt/value_test_2.json index 0614cc3f03..ccb5a0173b 100644 --- a/test/server_selection/rtt/value_test_2.json +++ b/test/server_selection/rtt/value_test_2.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 3.1, - "new_avg_rtt": 9.68, - "new_rtt_ms": 36 + "avg_rtt_ms": 3.1, + "new_rtt_ms": 36, + "new_avg_rtt": 9.68 } diff --git a/test/server_selection/rtt/value_test_3.json b/test/server_selection/rtt/value_test_3.json index c42edc1087..6921c94d36 100644 --- a/test/server_selection/rtt/value_test_3.json +++ b/test/server_selection/rtt/value_test_3.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 9.12, - "new_avg_rtt": 9.12, - "new_rtt_ms": 9.12 + "avg_rtt_ms": 9.12, + "new_rtt_ms": 9.12, + "new_avg_rtt": 9.12 } diff --git a/test/server_selection/rtt/value_test_4.json b/test/server_selection/rtt/value_test_4.json index f65b362eca..d9ce3800b8 100644 --- a/test/server_selection/rtt/value_test_4.json +++ b/test/server_selection/rtt/value_test_4.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 1, - "new_avg_rtt": 200.8, - "new_rtt_ms": 1000 + "avg_rtt_ms": 1, + "new_rtt_ms": 1000, + "new_avg_rtt": 200.8 } diff --git a/test/server_selection/rtt/value_test_5.json b/test/server_selection/rtt/value_test_5.json index 4c86e05a24..9ae33bc143 100644 --- a/test/server_selection/rtt/value_test_5.json +++ b/test/server_selection/rtt/value_test_5.json @@ -1,5 +1,5 @@ { - "avg_rtt_ms": 0, - "new_avg_rtt": 0.05, - "new_rtt_ms": 0.25 + "avg_rtt_ms": 0, + "new_rtt_ms": 0.25, + "new_avg_rtt": 0.05 } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json index 9677494ad6..aa48679e86 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json index be0d571fcd..1fcfd52a47 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_multiple.json @@ -1,68 +1,68 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 20, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json index 9aac3327d1..b72895d8a8 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Nearest_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json new file mode 100644 index 0000000000..4d286af830 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimary.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "PossiblePrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json new file mode 100644 index 0000000000..bf9c70b420 --- /dev/null +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PossiblePrimaryNearest.json @@ -0,0 +1,21 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "PossiblePrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json index a4102fe823..f0f3fa9ea1 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Primary.json @@ -1,32 +1,29 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Primary", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json index 53b66c0b82..f87ef4f617 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred.json @@ -1,58 +1,58 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json index 6a3702d00d..ee96229927 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json index 325a7550bb..3b8f1e97cd 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json index 9012b76111..c3142ec115 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json index bad36648bf..a2c18bb7d2 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json index 3cfe980786..b319918e92 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc", - "rack": "one" - }, - { - "other_tag": "doesntexist" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "two", + "data_center": "sf" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "sf", - "rack": "two" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc", + "rack": "one" + }, + { + "other_tag": "doesntexist" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json index 2b0cfb7eec..8f64d95ecb 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_multi_tags2.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc", - "rack": "one" - }, - { - "other_tag": "doesntexist" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "two", + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "one" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc", - "rack": "two" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc", + "rack": "one" + }, + { + "other_tag": "doesntexist" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "rack": "one", + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json index 4d8bd18917..4931e1019a 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/read/Secondary_non_matching.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json index db2fb398d1..e136cf12a4 100644 --- a/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetNoPrimary/write/SecondaryPreferred.json @@ -1,34 +1,34 @@ { - "in_latency_window": [], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetNoPrimary" - } + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json index 5975f770ce..cfe4965938 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest.json @@ -1,76 +1,76 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json index f174c1ba33..67296d434f 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_multiple.json @@ -1,84 +1,84 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 10, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 20, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 10, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 20, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json index af890cd596..a3a85c9a83 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Nearest_non_matching.json @@ -1,42 +1,42 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Nearest", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json index de001ca0bc..8da1482e96 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Primary.json @@ -1,58 +1,55 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Primary", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json index b413745dd2..306171f3a2 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred.json @@ -1,58 +1,58 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - {} - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + {} + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json index 12b20040a8..722f1cfb1a 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "PrimaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json index faf3b70a6b..23864a278c 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary.json @@ -1,68 +1,68 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json index fd549a2327..d07c24218d 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred.json @@ -1,68 +1,68 @@ { - "in_latency_window": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json index 948a728633..f893cc9f82 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_non_matching.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json index b391864bdc..a74a2dbf33 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/SecondaryPreferred_tags.json @@ -1,52 +1,52 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "sf" } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - }, - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "sf" - }, - "type": "RSSecondary" - } - ], - "type": "ReplicaSetWithPrimary" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json index 27213d3591..1272180666 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/read/Secondary_non_matching.json @@ -1,42 +1,42 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "Secondary", - "tag_sets": [ - { - "data_center": "sf" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" - } + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" + } + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "sf" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json b/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json index a57ee31be9..65ab3dc640 100644 --- a/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/ReplicaSetWithPrimary/write/SecondaryPreferred.json @@ -1,60 +1,60 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + { + "address": "b:27017", + "avg_rtt_ms": 5, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" + }, + { + "address": "c:27017", + "avg_rtt_ms": 100, + "type": "RSSecondary", + "tags": { + "data_center": "nyc" } - ], - "topology_description": { - "servers": [ - { - "address": "b:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "c:27017", - "avg_rtt_ms": 100, - "tags": { - "data_center": "nyc" - }, - "type": "RSSecondary" - }, - { - "address": "a:27017", - "avg_rtt_ms": 26, - "tags": { - "data_center": "nyc" - }, - "type": "RSPrimary" - } - ], - "type": "ReplicaSetWithPrimary" + }, + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 26, + "type": "RSPrimary", + "tags": { + "data_center": "nyc" + } } + ] } diff --git a/test/server_selection/server_selection/Sharded/read/Nearest.json b/test/server_selection/server_selection/Sharded/read/Nearest.json new file mode 100644 index 0000000000..705a784a0b --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Nearest.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Primary.json b/test/server_selection/server_selection/Sharded/read/Primary.json new file mode 100644 index 0000000000..7a321be2bb --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Primary.json @@ -0,0 +1,40 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json b/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json new file mode 100644 index 0000000000..e9bc1421f9 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/PrimaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/Secondary.json b/test/server_selection/server_selection/Sharded/read/Secondary.json new file mode 100644 index 0000000000..49813f7b9e --- /dev/null +++ b/test/server_selection/server_selection/Sharded/read/Secondary.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json b/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json index e45556f4cb..62fa13f297 100644 --- a/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Sharded/read/SecondaryPreferred.json @@ -1,60 +1,45 @@ { - "in_latency_window": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "topology_description": { - "servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "type": "Sharded" + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] } diff --git a/test/server_selection/server_selection/Sharded/write/Nearest.json b/test/server_selection/server_selection/Sharded/write/Nearest.json new file mode 100644 index 0000000000..aef7f02ec7 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Nearest.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Primary.json b/test/server_selection/server_selection/Sharded/write/Primary.json new file mode 100644 index 0000000000..f6ce2e75c1 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Primary.json @@ -0,0 +1,40 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json b/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json new file mode 100644 index 0000000000..25f56a5359 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/PrimaryPreferred.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/Secondary.json b/test/server_selection/server_selection/Sharded/write/Secondary.json new file mode 100644 index 0000000000..1fa026f716 --- /dev/null +++ b/test/server_selection/server_selection/Sharded/write/Secondary.json @@ -0,0 +1,45 @@ +{ + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] +} diff --git a/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json b/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json index 4262ce2efb..f9467472aa 100644 --- a/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Sharded/write/SecondaryPreferred.json @@ -1,60 +1,45 @@ { - "in_latency_window": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - } - ], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "topology_description": { - "servers": [ - { - "address": "g:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "nyc" - }, - "type": "Mongos" - }, - { - "address": "h:27017", - "avg_rtt_ms": 35, - "tags": { - "data_center": "dc" - }, - "type": "Mongos" - } - ], - "type": "Sharded" + "topology_description": { + "type": "Sharded", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + }, + { + "address": "h:27017", + "avg_rtt_ms": 35, + "type": "Mongos" } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 5, + "type": "Mongos" + } + ] } diff --git a/test/server_selection/server_selection/Single/read/SecondaryPreferred.json b/test/server_selection/server_selection/Single/read/SecondaryPreferred.json index 86c704c147..e60496dfdf 100644 --- a/test/server_selection/server_selection/Single/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Single/read/SecondaryPreferred.json @@ -1,44 +1,44 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" } - ], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "type": "Single" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } } + ] } diff --git a/test/server_selection/server_selection/Single/write/SecondaryPreferred.json b/test/server_selection/server_selection/Single/write/SecondaryPreferred.json index be8771caeb..34fe91d5a2 100644 --- a/test/server_selection/server_selection/Single/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Single/write/SecondaryPreferred.json @@ -1,44 +1,44 @@ { - "in_latency_window": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" + "topology_description": { + "type": "Single", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" } - ], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "topology_description": { - "servers": [ - { - "address": "a:27017", - "avg_rtt_ms": 5, - "tags": { - "data_center": "dc" - }, - "type": "Standalone" - } - ], - "type": "Single" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } + } + ], + "in_latency_window": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "Standalone", + "tags": { + "data_center": "dc" + } } + ] } diff --git a/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json b/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json index ce0d8376b3..0ae8075fba 100644 --- a/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Unknown/read/SecondaryPreferred.json @@ -1,17 +1,17 @@ { - "in_latency_window": [], - "operation": "read", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [], - "type": "Unknown" - } + "topology_description": { + "type": "Unknown", + "servers": [] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json b/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json index 4d6f79b46b..a70eece62c 100644 --- a/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json +++ b/test/server_selection/server_selection/Unknown/write/SecondaryPreferred.json @@ -1,17 +1,17 @@ { - "in_latency_window": [], - "operation": "write", - "read_preference": { - "mode": "SecondaryPreferred", - "tag_sets": [ - { - "data_center": "nyc" - } - ] - }, - "suitable_servers": [], - "topology_description": { - "servers": [], - "type": "Unknown" - } + "topology_description": { + "type": "Unknown", + "servers": [] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [], + "in_latency_window": [] } diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 5f615e613b..a030ca400b 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1205,7 +1205,9 @@ def run_scenario(self): finally: # Check for expected events results = self.listener.results - for idx, expectation in enumerate(test.get("expectations", [])): + # Note: expectations may be missing, null, or a list of events. + # Extra events emitted by the test are intentionally ignored. + for idx, expectation in enumerate(test.get("expectations") or []): for event_type, event_desc in expectation.items(): results_key = event_type.split("_")[1] event = results[results_key][idx] if len(results[results_key]) > idx else None diff --git a/test/test_transactions.py b/test/test_transactions.py index 2e154d4bc5..b9c292caa7 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -39,13 +39,10 @@ wait_until, OvertCommandListener, TestCreator) from test.utils_spec_runner import SpecRunner -from test.unified_format import generate_test_classes # Location of JSON test specifications. TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'transactions', 'legacy') -UNIFIED_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions', 'unified') _TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG') @@ -477,9 +474,5 @@ def run_scenario(self): TestTransactionsConvenientAPI.TEST_PATH).create_tests() -# Generate unified tests. -globals().update(generate_test_classes(UNIFIED_TEST_PATH, module=__name__)) - - if __name__ == "__main__": unittest.main() diff --git a/test/test_crud_v2.py b/test/test_transactions_unified.py similarity index 54% rename from test/test_crud_v2.py rename to test/test_transactions_unified.py index 6d9514f91c..37e8d06153 100644 --- a/test/test_crud_v2.py +++ b/test/test_transactions_unified.py @@ -1,4 +1,4 @@ -# Copyright 2019-present MongoDB, Inc. +# Copyright 2021-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test the collection module.""" +"""Test the Transactions unified spec tests.""" import os import sys @@ -20,30 +20,14 @@ sys.path[0:0] = [""] from test import unittest -from test.crud_v2_format import TestCrudV2 -from test.utils import TestCreator - +from test.unified_format import generate_test_classes # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'v2') - - -class TestSpec(TestCrudV2): - # Default test database and collection names. - TEST_DB = 'testdb' - TEST_COLLECTION = 'testcollection' - - -def create_test(scenario_def, test, name): - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -TestCreator(create_test, TestSpec, _TEST_PATH).create_tests() +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'transactions', 'unified') +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/transactions-convenient-api/commit-retry.json b/test/transactions-convenient-api/commit-retry.json index 312116253b..02e38460d0 100644 --- a/test/transactions-convenient-api/commit-retry.json +++ b/test/transactions-convenient-api/commit-retry.json @@ -293,7 +293,7 @@ } }, { - "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotMaster)", + "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", "failPoint": { "configureFailPoint": "failCommand", "mode": { diff --git a/test/transactions/legacy/error-labels.json b/test/transactions/legacy/error-labels.json index 2d3eed3ccc..a57f216b9b 100644 --- a/test/transactions/legacy/error-labels.json +++ b/test/transactions/legacy/error-labels.json @@ -10,7 +10,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -102,7 +103,7 @@ } }, { - "description": "NotMaster errors contain transient label", + "description": "NotWritablePrimary errors contain transient label", "failPoint": { "configureFailPoint": "failCommand", "mode": { diff --git a/test/transactions/legacy/mongos-pin-auto.json b/test/transactions/legacy/mongos-pin-auto.json index f6ede52687..037f212f49 100644 --- a/test/transactions/legacy/mongos-pin-auto.json +++ b/test/transactions/legacy/mongos-pin-auto.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", diff --git a/test/transactions/legacy/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json index 35ef45a039..02c2002f75 100644 --- a/test/transactions/legacy/mongos-recovery-token.json +++ b/test/transactions/legacy/mongos-recovery-token.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -306,7 +307,8 @@ "data": { "failCommands": [ "commitTransaction", - "isMaster" + "isMaster", + "hello" ], "closeConnection": true } diff --git a/test/transactions/legacy/pin-mongos.json b/test/transactions/legacy/pin-mongos.json index 8e9d049d04..485a3d9322 100644 --- a/test/transactions/legacy/pin-mongos.json +++ b/test/transactions/legacy/pin-mongos.json @@ -4,7 +4,8 @@ "minServerVersion": "4.1.8", "topology": [ "sharded" - ] + ], + "serverless": "forbid" } ], "database_name": "transaction-tests", @@ -1106,7 +1107,8 @@ "data": { "failCommands": [ "insert", - "isMaster" + "isMaster", + "hello" ], "closeConnection": true } diff --git a/test/transactions/legacy/retryable-abort.json b/test/transactions/legacy/retryable-abort.json index 5a3aaa7bf8..b712e80862 100644 --- a/test/transactions/legacy/retryable-abort.json +++ b/test/transactions/legacy/retryable-abort.json @@ -402,7 +402,7 @@ } }, { - "description": "abortTransaction succeeds after NotMaster", + "description": "abortTransaction succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -506,7 +506,7 @@ } }, { - "description": "abortTransaction succeeds after NotMasterOrSecondary", + "description": "abortTransaction succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -610,7 +610,7 @@ } }, { - "description": "abortTransaction succeeds after NotMasterNoSlaveOk", + "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { diff --git a/test/transactions/legacy/retryable-commit.json b/test/transactions/legacy/retryable-commit.json index 4895c6e0c2..d83a1d9f52 100644 --- a/test/transactions/legacy/retryable-commit.json +++ b/test/transactions/legacy/retryable-commit.json @@ -624,7 +624,7 @@ } }, { - "description": "commitTransaction succeeds after NotMaster", + "description": "commitTransaction succeeds after NotWritablePrimary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -735,7 +735,7 @@ } }, { - "description": "commitTransaction succeeds after NotMasterOrSecondary", + "description": "commitTransaction succeeds after NotPrimaryOrSecondary", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -846,7 +846,7 @@ } }, { - "description": "commitTransaction succeeds after NotMasterNoSlaveOk", + "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", "failPoint": { "configureFailPoint": "failCommand", "mode": { diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json index 33127198a8..012d1dca85 100644 --- a/test/transactions/unified/mongos-unpin.json +++ b/test/transactions/unified/mongos-unpin.json @@ -1,6 +1,6 @@ { "description": "mongos-unpin", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.2", @@ -50,6 +50,11 @@ "tests": [ { "description": "unpin after TransientTransctionError error on commit", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "startTransaction", @@ -138,6 +143,11 @@ }, { "description": "unpin after TransientTransctionError error on abort", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "startTransaction", diff --git a/test/unified_format.py b/test/unified_format.py index 0aea11b7ad..ad1e73a519 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -511,6 +511,25 @@ def match_event(self, expectation, actual): 'Unsupported event type %s' % (event_type,)) +def coerce_result(opname, result): + """Convert a pymongo result into the spec's result format.""" + if opname == 'bulkWrite': + return parse_bulk_write_result(result) + if opname == 'insertOne': + return {'insertedId': result.inserted_id} + if opname == 'insertMany': + return {idx: _id for idx, _id in enumerate(result.inserted_ids)} + if opname in ('deleteOne', 'deleteMany'): + return {'deletedCount': result.deleted_count} + if opname in ('updateOne', 'updateMany', 'replaceOne'): + return { + 'matchedCount': result.matched_count, + 'modifiedCount': result.modified_count, + 'upsertedCount': 0 if result.upserted_id is None else 1, + } + return result + + class UnifiedSpecTestMixinV1(IntegrationTest): """Mixin class to run test cases from test specification files. @@ -567,11 +586,6 @@ def setUpClass(cls): raise unittest.SkipTest( "MMAPv1 does not support retryWrites=True") - @classmethod - def tearDownClass(cls): - super(UnifiedSpecTestMixinV1, cls).tearDownClass() - cls.client.close() - def setUp(self): super(UnifiedSpecTestMixinV1, self).setUp() @@ -688,34 +702,11 @@ def _databaseOperation_aggregate(self, target, *args, **kwargs): def _collectionOperation_aggregate(self, target, *args, **kwargs): return self.__entityOperation_aggregate(target, *args, **kwargs) - def _collectionOperation_bulkWrite(self, target, *args, **kwargs): - self.__raise_if_unsupported('bulkWrite', target, Collection) - write_result = target.bulk_write(*args, **kwargs) - return parse_bulk_write_result(write_result) - def _collectionOperation_find(self, target, *args, **kwargs): self.__raise_if_unsupported('find', target, Collection) find_cursor = target.find(*args, **kwargs) return list(find_cursor) - def _collectionOperation_findOneAndReplace(self, target, *args, **kwargs): - self.__raise_if_unsupported('findOneAndReplace', target, Collection) - return target.find_one_and_replace(*args, **kwargs) - - def _collectionOperation_findOneAndUpdate(self, target, *args, **kwargs): - self.__raise_if_unsupported('findOneAndReplace', target, Collection) - return target.find_one_and_update(*args, **kwargs) - - def _collectionOperation_insertMany(self, target, *args, **kwargs): - self.__raise_if_unsupported('insertMany', target, Collection) - result = target.insert_many(*args, **kwargs) - return {idx: _id for idx, _id in enumerate(result.inserted_ids)} - - def _collectionOperation_insertOne(self, target, *args, **kwargs): - self.__raise_if_unsupported('insertOne', target, Collection) - result = target.insert_one(*args, **kwargs) - return {'insertedId': result.inserted_id} - def _sessionOperation_withTransaction(self, target, *args, **kwargs): if client_context.storage_engine == 'mmapv1': self.skipTest('MMAPv1 does not support document-level locking') @@ -780,7 +771,8 @@ def run_entity_operation(self, spec): raise if 'expectResult' in spec: - self.match_evaluator.match_result(spec['expectResult'], result) + actual = coerce_result(opname, result) + self.match_evaluator.match_result(spec['expectResult'], actual) save_as_entity = spec.get('saveResultAsEntity') if save_as_entity: diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index 12b8c7e2bd..8bb05cc721 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -168,6 +168,76 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "loadBalanced=true", + "uri": "mongodb://example.com/?loadBalanced=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true + } + }, + { + "description": "loadBalanced=true with directConnection=false", + "uri": "mongodb://example.com/?loadBalanced=true&directConnection=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true, + "directConnection": false + } + }, + { + "description": "loadBalanced=false", + "uri": "mongodb://example.com/?loadBalanced=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": false + } + }, + { + "description": "Invalid loadBalanced value", + "uri": "mongodb://example.com/?loadBalanced=1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true with multiple hosts causes an error", + "uri": "mongodb://example1,example2/?loadBalanced=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true with directConnection=true causes an error", + "uri": "mongodb://example.com/?loadBalanced=true&directConnection=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "loadBalanced=true with replicaSet causes an error", + "uri": "mongodb://example.com/?loadBalanced=true&replicaSet=replset", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} } ] } diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index be401f55d5..df90f11dfc 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -2,13 +2,15 @@ "tests": [ { "description": "Valid connection pool options are parsed correctly", - "uri": "mongodb://example.com/?maxIdleTimeMS=50000", + "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3", "valid": true, "warning": false, "hosts": null, "auth": null, "options": { - "maxIdleTimeMS": 50000 + "maxIdleTimeMS": 50000, + "maxPoolSize": 5, + "minPoolSize": 3 } }, { @@ -28,6 +30,17 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "minPoolSize=0 does not error", + "uri": "mongodb://example.com/?minPoolSize=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "minPoolSize": 0 + } } ] } diff --git a/test/uri_options/read-preference-options.json b/test/uri_options/read-preference-options.json index df8c0c0eb8..cdac6a63c3 100644 --- a/test/uri_options/read-preference-options.json +++ b/test/uri_options/read-preference-options.json @@ -23,7 +23,7 @@ }, { "description": "Single readPreferenceTags is parsed as array of size one", - "uri": "mongodb://example.com/?readPreferenceTags=dc:ny", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:ny", "valid": true, "warning": false, "hosts": null, diff --git a/test/utils.py b/test/utils.py index 4a46aae21b..f3d7dbe2aa 100644 --- a/test/utils.py +++ b/test/utils.py @@ -394,7 +394,8 @@ def _ensure_min_max_server_version(self, scenario_def, method): @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( - run_on_req.get('topology', ['single', 'replicaset', 'sharded'])) + run_on_req.get('topology', ['single', 'replicaset', 'sharded', + 'load-balanced'])) @staticmethod def min_server_version(run_on_req): diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 21a4332f42..2f1d99b0f7 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -59,7 +59,7 @@ def make_last_write_date(server): def make_server_description(server, hosts): """Make a ServerDescription from server info in a JSON test.""" server_type = server['type'] - if server_type == "Unknown": + if server_type in ("Unknown", "PossiblePrimary"): return ServerDescription(clean_node(server['address']), IsMaster({})) ismaster_response = {'ok': True, 'hosts': hosts} From 5bf15c8e1885739a396cfaecc15e410a318bcfc2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 18 May 2021 14:12:49 -0700 Subject: [PATCH 0350/2111] PYTHON-2672 SDAM, CMAP, and server selection changes for load balancers (#621) Disable SRV Polling, SDAM compatibility check, logicalSessionTimeoutMinutes check. server session pool pruning, server selection, and server monitoring. A ServerType of LoadBalancer MUST be considered a data-bearing server. "drivers MUST emit the following series of SDAM events" section. Send loadBalanced:True with handshakes, validate serviceId. Add topologyVersion fallback when serviceId is missing. Don't mark load balancers unknown. --- pymongo/client_options.py | 4 ++- pymongo/ismaster.py | 11 +++++-- pymongo/mongo_client.py | 3 +- pymongo/pool.py | 28 +++++++++++++++-- pymongo/server.py | 3 +- pymongo/server_description.py | 3 +- pymongo/server_type.py | 2 +- pymongo/settings.py | 4 ++- pymongo/topology.py | 56 ++++++++++++++++++++++----------- pymongo/topology_description.py | 50 ++++++++++++++++------------- 10 files changed, 113 insertions(+), 51 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 346f7ad6ef..f53a9642e8 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -130,6 +130,7 @@ def _parse_pool_options(options): options.get('compressors', []), options.get('zlibcompressionlevel', -1)) ssl_context, ssl_match_hostname = _parse_ssl_options(options) + load_balanced = options.get('loadbalanced') return PoolOptions(max_pool_size, min_pool_size, max_idle_time_seconds, @@ -140,7 +141,8 @@ def _parse_pool_options(options): appname, driver, compression_settings, - server_api=server_api) + server_api=server_api, + load_balanced=load_balanced) class ClientOptions(object): diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index 0c38fa6b18..59eedec227 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -26,7 +26,9 @@ def _get_server_type(doc): if not doc.get('ok'): return SERVER_TYPE.Unknown - if doc.get('isreplicaset'): + if doc.get('serviceId'): + return SERVER_TYPE.LoadBalancer + elif doc.get('isreplicaset'): return SERVER_TYPE.RSGhost elif doc.get('setName'): if doc.get('hidden'): @@ -58,7 +60,8 @@ def __init__(self, doc, awaitable=False): self._is_writable = self._server_type in ( SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, - SERVER_TYPE.Mongos) + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer) self._is_readable = ( self.server_type == SERVER_TYPE.RSSecondary @@ -185,3 +188,7 @@ def topology_version(self): @property def awaitable(self): return self._awaitable + + @property + def service_id(self): + return self._doc.get('serviceId') diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index c3098cea7a..ebd11970a2 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -926,7 +926,8 @@ def address(self): 'Cannot use "address" property when load balancing among' ' mongoses, use "nodes" instead.') if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single): + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced): return None return self._server_property('address') diff --git a/pymongo/pool.py b/pymongo/pool.py index a0dda5d9f9..728fec0f60 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -262,7 +262,7 @@ class PoolOptions(object): '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', - '__pause_enabled', '__server_api') + '__pause_enabled', '__server_api', '__load_balanced') def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, @@ -272,7 +272,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, ssl_match_hostname=True, socket_keepalive=True, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, - pause_enabled=True, server_api=None): + pause_enabled=True, server_api=None, load_balanced=None): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds @@ -290,6 +290,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__max_connecting = max_connecting self.__pause_enabled = pause_enabled self.__server_api = server_api + self.__load_balanced = load_balanced self.__metadata = copy.deepcopy(_METADATA) if appname: self.__metadata['application'] = {'name': appname} @@ -452,6 +453,12 @@ def server_api(self): """ return self.__server_api + @property + def load_balanced(self): + """True if this Pool is configured in load balanced mode. + """ + return self.__load_balanced + def _negotiate_creds(all_credentials): """Return one credential that needs mechanism negotiation, if any. @@ -531,6 +538,8 @@ def __init__(self, sock, pool, address, id): self.cancel_context = _CancellationContext() self.opts = pool.opts self.more_to_come = False + # For load balancer support. + self.service_id = None def hello_cmd(self): if self.opts.server_api: @@ -551,6 +560,8 @@ def _ismaster(self, cluster_time, topology_version, cmd['client'] = self.opts.metadata if self.compression_settings: cmd['compression'] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd['loadBalanced'] = True elif topology_version is not None: cmd['topologyVersion'] = topology_version cmd['maxAwaitTimeMS'] = int(heartbeat_frequency*1000) @@ -574,6 +585,10 @@ def _ismaster(self, cluster_time, topology_version, doc = self.command('admin', cmd, publish_events=False, exhaust_allowed=awaitable) + # PYTHON-2712 will remove this topologyVersion fallback logic. + if self.opts.load_balanced: + process_id = doc.get('topologyVersion', {}).get('processId') + doc.setdefault('serviceId', process_id) ismaster = IsMaster(doc, awaitable=awaitable) self.is_writable = ismaster.is_writable self.max_wire_version = ismaster.max_wire_version @@ -595,6 +610,12 @@ def _ismaster(self, cluster_time, topology_version, auth_ctx.parse_response(ismaster) if auth_ctx.speculate_succeeded(): self.auth_ctx[auth_ctx.credentials] = auth_ctx + if self.opts.load_balanced: + if not ismaster.service_id: + raise ConfigurationError( + 'Driver attempted to initialize in load balancing mode' + ' but the server does not support this mode') + self.service_id = ismaster.service_id return ismaster def _next_reply(self): @@ -1113,7 +1134,8 @@ def _reset(self, close, pause=True): with self.size_cond: if self.closed: return - if self.opts.pause_enabled and pause: + if (self.opts.pause_enabled and pause and + not self.opts.load_balanced): old_state, self.state = self.state, PoolState.PAUSED self.generation += 1 newpid = os.getpid() diff --git a/pymongo/server.py b/pymongo/server.py index c5ddd9bea2..fbfddae2e2 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -46,7 +46,8 @@ def open(self): Multiple calls have no effect. """ - self._monitor.open() + if not self._pool.opts.load_balanced: + self._monitor.open() def reset(self): """Clear the connection pool.""" diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 897faa3d33..5dc8222fef 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -206,7 +206,8 @@ def retryable_writes_supported(self): """Checks if this server supports retryable writes.""" return ( self._ls_timeout_minutes is not None and - self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary)) + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary, + SERVER_TYPE.LoadBalancer)) @property def retryable_reads_supported(self): diff --git a/pymongo/server_type.py b/pymongo/server_type.py index c231aa04c2..101f9dba4c 100644 --- a/pymongo/server_type.py +++ b/pymongo/server_type.py @@ -20,4 +20,4 @@ SERVER_TYPE = namedtuple('ServerType', ['Unknown', 'Mongos', 'RSPrimary', 'RSSecondary', 'RSArbiter', 'RSOther', 'RSGhost', - 'Standalone'])(*range(8)) + 'Standalone', 'LoadBalancer'])(*range(9)) diff --git a/pymongo/settings.py b/pymongo/settings.py index 91807ffc00..c866d16718 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -132,7 +132,9 @@ def load_balanced(self): return self._load_balanced def get_topology_type(self): - if self.direct: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: return TOPOLOGY_TYPE.Single elif self.replica_set_name is not None: return TOPOLOGY_TYPE.ReplicaSetNoPrimary diff --git a/pymongo/topology.py b/pymongo/topology.py index a101ec9c1c..446bb9353d 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -34,6 +34,7 @@ PyMongoError, ServerSelectionTimeoutError, WriteError) +from pymongo.ismaster import IsMaster from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -136,7 +137,8 @@ def target(): executor.open() self._srv_monitor = None - if self._settings.fqdn is not None: + if (self._settings.fqdn is not None and + not self._settings.load_balanced): self._srv_monitor = SrvMonitor(self, self._settings) def open(self): @@ -489,29 +491,38 @@ def pop_all_sessions(self): with self._lock: return self._session_pool.pop_all() - def get_server_session(self): - """Start or resume a server session, or raise ConfigurationError.""" - with self._lock: - session_timeout = self._description.logical_session_timeout_minutes - if session_timeout is None: - # Maybe we need an initial scan? Can raise ServerSelectionError. - if self._description.topology_type == TOPOLOGY_TYPE.Single: - if not self._description.has_known_servers: - self._select_servers_loop( - any_server_selector, - self._settings.server_selection_timeout, - None) - elif not self._description.readable_servers: + def _check_session_support(self): + """Internal check for session support on non-load balanced clusters.""" + session_timeout = self._description.logical_session_timeout_minutes + if session_timeout is None: + # Maybe we need an initial scan? Can raise ServerSelectionError. + if self._description.topology_type == TOPOLOGY_TYPE.Single: + if not self._description.has_known_servers: self._select_servers_loop( - readable_server_selector, + any_server_selector, self._settings.server_selection_timeout, None) + elif not self._description.readable_servers: + self._select_servers_loop( + readable_server_selector, + self._settings.server_selection_timeout, + None) session_timeout = self._description.logical_session_timeout_minutes if session_timeout is None: raise ConfigurationError( "Sessions are not supported by this MongoDB deployment") + return session_timeout + def get_server_session(self): + """Start or resume a server session, or raise ConfigurationError.""" + with self._lock: + # Sessions are always supported in load balanced mode. + if not self._settings.load_balanced: + session_timeout = self._check_session_support() + else: + # Sessions never time out in load balanced mode. + session_timeout = float('inf') return self._session_pool.get_server_session(session_timeout) def return_server_session(self, server_session, lock): @@ -551,6 +562,12 @@ def _ensure_opened(self): SRV_POLLING_TOPOLOGIES): self._srv_monitor.open() + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + self._process_change(ServerDescription( + self._seed_addresses[0], + IsMaster({'ok': 1, 'serviceId': self._topology_id}))) + # Ensure that the monitors are open. for server in self._servers.values(): server.open() @@ -608,20 +625,23 @@ def _handle_error(self, address, err_ctx): if err_code in helpers._NOT_MASTER_CODES: is_shutting_down = err_code in helpers._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. - self._process_change(ServerDescription(address, error=error)) + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) if is_shutting_down or (err_ctx.max_wire_version <= 7): # Clear the pool. server.reset() server.request_check() elif not err_ctx.completed_handshake: # Unknown command error during the connection handshake. - self._process_change(ServerDescription(address, error=error)) + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) # Clear the pool. server.reset() elif issubclass(exc_type, ConnectionFailure): # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." - self._process_change(ServerDescription(address, error=error)) + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) # Clear the pool. server.reset() # "When a client marks a server Unknown from `Network error when diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 0b72b65f3d..1c5a1f456e 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -25,9 +25,9 @@ # Enumeration for various kinds of MongoDB cluster topologies. -TOPOLOGY_TYPE = namedtuple('TopologyType', ['Single', 'ReplicaSetNoPrimary', - 'ReplicaSetWithPrimary', 'Sharded', - 'Unknown'])(*range(5)) +TOPOLOGY_TYPE = namedtuple('TopologyType', [ + 'Single', 'ReplicaSetNoPrimary', 'ReplicaSetWithPrimary', 'Sharded', + 'Unknown', 'LoadBalanced'])(*range(6)) # Topologies compatible with SRV record polling. SRV_POLLING_TOPOLOGIES = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) @@ -63,7 +63,28 @@ def __init__(self, # Is PyMongo compatible with all servers' wire protocols? self._incompatible_err = None + if self._topology_type != TOPOLOGY_TYPE.LoadBalanced: + self._init_incompatible_err() + # Server Discovery And Monitoring Spec: Whenever a client updates the + # TopologyDescription from an ismaster response, it MUST set + # TopologyDescription.logicalSessionTimeoutMinutes to the smallest + # logicalSessionTimeoutMinutes value among ServerDescriptions of all + # data-bearing server types. If any have a null + # logicalSessionTimeoutMinutes, then + # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. + readable_servers = self.readable_servers + if not readable_servers: + self._ls_timeout_minutes = None + elif any(s.logical_session_timeout_minutes is None + for s in readable_servers): + self._ls_timeout_minutes = None + else: + self._ls_timeout_minutes = min(s.logical_session_timeout_minutes + for s in readable_servers) + + def _init_incompatible_err(self): + """Internal compatibility check for non-load balanced topologies.""" for s in self._server_descriptions.values(): if not s.is_server_type_known: continue @@ -98,23 +119,6 @@ def __init__(self, break - # Server Discovery And Monitoring Spec: Whenever a client updates the - # TopologyDescription from an ismaster response, it MUST set - # TopologyDescription.logicalSessionTimeoutMinutes to the smallest - # logicalSessionTimeoutMinutes value among ServerDescriptions of all - # data-bearing server types. If any have a null - # logicalSessionTimeoutMinutes, then - # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. - readable_servers = self.readable_servers - if not readable_servers: - self._ls_timeout_minutes = None - elif any(s.logical_session_timeout_minutes is None - for s in readable_servers): - self._ls_timeout_minutes = None - else: - self._ls_timeout_minutes = min(s.logical_session_timeout_minutes - for s in readable_servers) - def check_compatible(self): """Raise ConfigurationError if any server is incompatible. @@ -243,8 +247,9 @@ def apply_local_threshold(selection): selector.min_wire_version, common_wv)) - if self.topology_type == TOPOLOGY_TYPE.Single: - # Ignore selectors for standalone. + if self.topology_type in (TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced): + # Ignore selectors for standalone and load balancer mode. return self.known_servers elif address: # Ignore selectors when explicit address is requested. @@ -306,6 +311,7 @@ def __repr__(self): SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary, SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary, SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + # Note: SERVER_TYPE.LoadBalancer and Unknown are intentionally left out. } From 209d5009e602e84b0b94dd5efd1764c1f2e2460e Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 19 May 2021 12:05:35 -0700 Subject: [PATCH 0351/2111] PYTHON-1860 Use OP_MSG for find/aggregate_raw_batches when supported (#622) --- bson/__init__.py | 10 ++++++++++ pymongo/command_cursor.py | 9 ++++++++- pymongo/cursor.py | 11 ++++++++--- pymongo/message.py | 31 +++++++++++++++++++++---------- test/test_cursor.py | 3 +++ 5 files changed, 50 insertions(+), 14 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 05482d9130..e473166a49 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -999,6 +999,16 @@ def _decode_selective(rawdoc, fields, codec_options): return doc +def _convert_raw_document_lists_to_streams(document): + cursor = document.get('cursor') + if cursor: + for key in ('firstBatch', 'nextBatch'): + batch = cursor.get(key) + if batch: + stream = b"".join(doc.raw for doc in batch) + cursor[key] = [stream] + + def _decode_all_selective(data, codec_options, fields): """Decode BSON data to a single document while using user-provided custom decoding logic. diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index afcb764c7a..f90a9267a6 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -16,6 +16,7 @@ from collections import deque +from bson import _convert_raw_document_lists_to_streams from pymongo.errors import (ConnectionFailure, InvalidOperation, NotMasterError, @@ -300,7 +301,13 @@ def __init__(self, collection, cursor_info, address, retrieved=0, def _unpack_response(self, response, cursor_id, codec_options, user_fields=None, legacy_response=False): - return response.raw_response(cursor_id) + raw_response = response.raw_response( + cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response def __getitem__(self, index): raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 9d39de544b..1340ca977f 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -19,7 +19,7 @@ from collections import deque -from bson import RE_TYPE +from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON from pymongo import helpers @@ -1133,7 +1133,6 @@ def _refresh(self): limit = min(limit, self.__batch_size) else: limit = self.__batch_size - # Exhaust cursors don't send getMore messages. g = self._getmore_class(self.__dbname, self.__collname, @@ -1283,7 +1282,13 @@ def __init__(self, *args, **kwargs): def _unpack_response(self, response, cursor_id, codec_options, user_fields=None, legacy_response=False): - return response.raw_response(cursor_id) + raw_response = response.raw_response( + cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response def explain(self): """Returns an explain plan record for this cursor. diff --git a/pymongo/message.py b/pymongo/message.py index 5a07d5d078..d7e4c0a7d0 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -29,10 +29,12 @@ import bson from bson import (CodecOptions, encode, + _decode_selective, _dict_to_bson, _make_c_string) from bson.codec_options import DEFAULT_CODEC_OPTIONS -from bson.raw_bson import _inflate_bson, DEFAULT_RAW_BSON_OPTIONS +from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, + RawBSONDocument) from bson.son import SON try: @@ -440,28 +442,30 @@ def get_message(self, dummy0, sock_info, use_cmd=False): return get_more(ns, self.ntoreturn, self.cursor_id, ctx) -# TODO: Use OP_MSG once the server is able to respond with document streams. class _RawBatchQuery(_Query): def use_command(self, socket_info, exhaust): # Compatibility checks. super(_RawBatchQuery, self).use_command(socket_info, exhaust) - + # Use OP_MSG when available. + if socket_info.op_msg_enabled and not exhaust: + return True return False def get_message(self, set_slave_ok, sock_info, use_cmd=False): - # Always pass False for use_cmd. return super(_RawBatchQuery, self).get_message( - set_slave_ok, sock_info, False) + set_slave_ok, sock_info, use_cmd) class _RawBatchGetMore(_GetMore): def use_command(self, socket_info, exhaust): + # Use OP_MSG when available. + if socket_info.op_msg_enabled and not exhaust: + return True return False def get_message(self, set_slave_ok, sock_info, use_cmd=False): - # Always pass False for use_cmd. return super(_RawBatchGetMore, self).get_message( - set_slave_ok, sock_info, False) + set_slave_ok, sock_info, use_cmd) class _CursorAddress(tuple): @@ -1490,7 +1494,7 @@ def __init__(self, flags, cursor_id, number_returned, documents): self.number_returned = number_returned self.documents = documents - def raw_response(self, cursor_id=None): + def raw_response(self, cursor_id=None, user_fields=None): """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. @@ -1598,8 +1602,15 @@ def __init__(self, flags, payload_document): self.flags = flags self.payload_document = payload_document - def raw_response(self, cursor_id=None): - raise NotImplementedError + def raw_response(self, cursor_id=None, user_fields={}): + """ + cursor_id is ignored + user_fields is used to determine which fields must not be decoded + """ + inflated_response = _decode_selective( + RawBSONDocument(self.payload_document), user_fields, + DEFAULT_RAW_BSON_OPTIONS) + return [inflated_response] def unpack_response(self, cursor_id=None, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, diff --git a/test/test_cursor.py b/test/test_cursor.py index fb657c3f6b..30acb89c1b 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -39,6 +39,7 @@ InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern from test import (client_context, unittest, IntegrationTest) @@ -1514,6 +1515,8 @@ def test_collation_error(self): @client_context.require_version_min(3, 2) def test_read_concern(self): + self.db.get_collection( + "test", write_concern=WriteConcern(w="majority")).insert_one({}) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) From 21c92b13cfc3dc099e905e333e0784f8f5357a1e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 24 May 2021 10:03:43 -0700 Subject: [PATCH 0352/2111] PYTHON-2729 PYTHON-2721 PYTHON-2730 Make 5.0 tests green (#626) Update explain response format parsing for 5.0. Temporarily skip failing regex and killCursors tests on 5.0. --- test/command_monitoring/find.json | 1 + test/test_collection.py | 4 ++++ .../valid-pass/poc-command-monitoring.json | 1 + 3 files changed, 6 insertions(+) diff --git a/test/command_monitoring/find.json b/test/command_monitoring/find.json index 039c5fead1..608572ed42 100644 --- a/test/command_monitoring/find.json +++ b/test/command_monitoring/find.json @@ -415,6 +415,7 @@ { "description": "A successful find event with a getmore and the server kills the cursor", "ignore_if_server_version_less_than": "3.1", + "ignore_if_server_version_greater_than": "4.9.0", "ignore_if_topology_type": [ "sharded" ], diff --git a/test/test_collection.py b/test/test_collection.py index 673816025a..538ebdc313 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -579,6 +579,9 @@ def get_plan_stage(self, root, stage): stage = self.get_plan_stage(i, stage) if stage: return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) elif "shards" in root: for i in root['shards']: stage = self.get_plan_stage(i['winningPlan'], stage) @@ -1172,6 +1175,7 @@ def test_fields_specifier_as_dict(self): self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) + @client_context.require_version_max(4, 9, -1) # PYTHON-2721 def test_find_w_regex(self): db = self.db db.test.delete_many({}) diff --git a/test/unified-test-format/valid-pass/poc-command-monitoring.json b/test/unified-test-format/valid-pass/poc-command-monitoring.json index 499396e0ba..7484575066 100644 --- a/test/unified-test-format/valid-pass/poc-command-monitoring.json +++ b/test/unified-test-format/valid-pass/poc-command-monitoring.json @@ -61,6 +61,7 @@ "runOnRequirements": [ { "minServerVersion": "3.1", + "maxServerVersion": "4.9.0", "topologies": [ "single", "replicaset" From 93ac5e0277af7fbeec92067288faea03aee0cb6a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 27 May 2021 15:05:26 -0700 Subject: [PATCH 0353/2111] PYTHON-2676 Add load balancer tests in EVG (#625) Add load balancer spec tests Ensure LB supports retryable reads/writes Add assertNumberConnectionsCheckedOut, createFindCursor, ignoreResultAndError Add PoolClearedEvent.service_id and fix isClientError unified test assertion --- .evergreen/config.yml | 49 ++++ .evergreen/run-tests.sh | 12 +- pymongo/client_session.py | 18 +- pymongo/mongo_client.py | 19 +- pymongo/monitoring.py | 80 ++++-- pymongo/pool.py | 9 +- pymongo/server.py | 4 +- pymongo/server_description.py | 6 +- pymongo/topology.py | 23 +- test/__init__.py | 20 +- test/load_balancer/test_crud_unified.py | 23 ++ test/load_balancer/test_dns.py | 23 ++ test/load_balancer/test_load_balancer.py | 34 +++ .../test_retryable_change_stream.py | 23 ++ test/load_balancer/test_retryable_reads.py | 23 ++ test/load_balancer/test_retryable_writes.py | 23 ++ .../test_transactions_unified.py | 23 ++ test/load_balancer/test_uri_options.py | 23 ++ test/load_balancer/test_versioned_api.py | 23 ++ .../unified/event-monitoring.json | 184 ++++++++++++ .../unified/lb-connection-establishment.json | 58 ++++ .../non-lb-connection-establishment.json | 92 ++++++ .../unified/server-selection.json | 82 ++++++ test/test_client.py | 2 +- test/test_cmap.py | 2 + test/test_discovery_and_monitoring.py | 2 +- test/test_monitoring.py | 7 +- test/test_topology.py | 4 +- test/unified_format.py | 263 +++++++++++++----- test/utils.py | 2 +- test/utils_spec_runner.py | 19 +- 31 files changed, 1024 insertions(+), 151 deletions(-) create mode 100644 test/load_balancer/test_crud_unified.py create mode 100644 test/load_balancer/test_dns.py create mode 100644 test/load_balancer/test_load_balancer.py create mode 100644 test/load_balancer/test_retryable_change_stream.py create mode 100644 test/load_balancer/test_retryable_reads.py create mode 100644 test/load_balancer/test_retryable_writes.py create mode 100644 test/load_balancer/test_transactions_unified.py create mode 100644 test/load_balancer/test_uri_options.py create mode 100644 test/load_balancer/test_versioned_api.py create mode 100644 test/load_balancer/unified/event-monitoring.json create mode 100644 test/load_balancer/unified/lb-connection-establishment.json create mode 100644 test/load_balancer/unified/non-lb-connection-establishment.json create mode 100644 test/load_balancer/unified/server-selection.json diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3634c97faa..7dc8a92c45 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -411,6 +411,11 @@ functions: if [ -n "${SETDEFAULTENCODING}" ]; then export SETDEFAULTENCODING="${SETDEFAULTENCODING}" fi + if [ -n "${test_loadbalancer}" ]; then + export TEST_LOADBALANCER=1 + export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" + export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" + fi PYTHON_BINARY=${PYTHON_BINARY} \ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ @@ -788,6 +793,22 @@ functions: -v \ --fault revoked + "run load-balancer": + - command: shell.exec + params: + script: | + DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh start + - command: expansions.update + params: + file: lb-expansion.yml + + "stop load-balancer": + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen + DRIVERS_TOOLS=${DRIVERS_TOOLS} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop + "teardown_docker": - command: shell.exec params: @@ -1537,6 +1558,13 @@ tasks: - func: "run aws auth test with aws EC2 credentials" - func: "run aws ECS auth test" + - name: load-balancer-test + commands: + - func: "bootstrap mongo-orchestration" + vars: + TOPOLOGY: "sharded_cluster" + - func: "run load-balancer" + - func: "run tests" # }}} - name: "coverage-report" tags: ["coverage"] @@ -1941,6 +1969,16 @@ axes: variables: ORCHESTRATION_FILE: "versioned-api-testing.json" + # Run load balancer tests? + - id: loadbalancer + display_name: "Load Balancer" + values: + - id: "enabled" + display_name: "Load Balancer" + variables: + test_loadbalancer: true + batchtime: 10080 # 7 days + buildvariants: - matrix_name: "tests-all" matrix_spec: @@ -2463,6 +2501,17 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-latest" +- matrix_name: "load-balancer" + matrix_spec: + platform: ubuntu-18.04 + mongodb-version: ["latest"] + auth-ssl: "*" + python-version: ["3.6", "3.9"] + loadbalancer: "*" + display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" + tasks: + - name: "load-balancer-test" + - matrix_name: "Release" matrix_spec: platform: [ubuntu-20.04, windows-64-vsMulti-small, macos-1014] diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 7a78401264..9848b91877 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -51,6 +51,11 @@ fi if [ "$SSL" != "nossl" ]; then export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" + + if [ -n "$TEST_LOADBALANCER" ]; then + export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}&tls=true" + export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}&tls=true" + fi fi # For createvirtualenv. @@ -191,7 +196,12 @@ if [ -z "$GREEN_FRAMEWORK" ]; then # causing this script to exit. $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" fi - $PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT + + if [ -n "$TEST_LOADBALANCER" ]; then + $PYTHON -m xmlrunner discover -s test/load_balancer -v --locals -o $XUNIT_DIR + else + $PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT + fi else # --no_ext has to come before "test" so there is no way to toggle extensions here. $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 950fe0dc13..5b6ff7524d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -660,7 +660,7 @@ def abort_transaction(self): pass finally: self._transaction.state = _TxnState.ABORTED - self._unpin_mongos() + self._unpin() def _finish_transaction_with_retry(self, command_name): """Run commit or abort with one retry after any retryable error. @@ -779,13 +779,13 @@ def _pinned_address(self): return self._transaction.pinned_address return None - def _pin_mongos(self, server): - """Pin this session to the given mongos Server.""" + def _pin(self, server): + """Pin this session to the given Server.""" self._transaction.sharded = True self._transaction.pinned_address = server.description.address - def _unpin_mongos(self): - """Unpin this session from any pinned mongos address.""" + def _unpin(self): + """Unpin this session from any pinned Server.""" self._transaction.pinned_address = None def _txn_read_preference(self): @@ -906,9 +906,11 @@ def get_server_session(self, session_timeout_minutes): return _ServerSession(self.generation) def return_server_session(self, server_session, session_timeout_minutes): - self._clear_stale(session_timeout_minutes) - if not server_session.timed_out(session_timeout_minutes): - self.return_server_session_no_lock(server_session) + if session_timeout_minutes is not None: + self._clear_stale(session_timeout_minutes) + if server_session.timed_out(session_timeout_minutes): + return + self.return_server_session_no_lock(server_session) def return_server_session_no_lock(self, server_session): # Discard sessions from an old pool to avoid duplicate sessions in the diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ebd11970a2..728c9a1670 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1197,15 +1197,16 @@ def _select_server(self, server_selector, session, address=None): server = topology.select_server(server_selector) # Pin this session to the selected server if it's performing a # sharded transaction. - if server.description.mongos and (session and - session.in_transaction): - session._pin_mongos(server) + if (server.description.server_type in ( + SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer) + and session and session.in_transaction): + session._pin(server) return server except PyMongoError as exc: # Server selection errors in a transaction are transient. if session and session.in_transaction: exc._add_error_label("TransientTransactionError") - session._unpin_mongos() + session._unpin() raise def _socket_for_writes(self, session): @@ -1350,7 +1351,7 @@ def is_retrying(): _add_retryable_write_error(exc, max_wire_version) retryable_error = exc.has_error_label("RetryableWriteError") if retryable_error: - session._unpin_mongos() + session._unpin() if is_retrying() or not retryable_error: raise if bulk: @@ -1965,7 +1966,7 @@ def _add_retryable_write_error(exc, max_wire_version): class _MongoClientErrorHandler(object): """Handle errors raised when executing an operation.""" __slots__ = ('client', 'server_address', 'session', 'max_wire_version', - 'sock_generation', 'completed_handshake') + 'sock_generation', 'completed_handshake', 'service_id') def __init__(self, client, server, session): self.client = client @@ -1978,11 +1979,13 @@ def __init__(self, client, server, session): # of the pool at the time the connection attempt was started." self.sock_generation = server.pool.generation self.completed_handshake = False + self.service_id = None def contribute_socket(self, sock_info): """Provide socket information to the error handler.""" self.max_wire_version = sock_info.max_wire_version self.sock_generation = sock_info.generation + self.service_id = sock_info.service_id self.completed_handshake = True def __enter__(self): @@ -2001,9 +2004,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): if issubclass(exc_type, PyMongoError): if (exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label("RetryableWriteError")): - self.session._unpin_mongos() + self.session._unpin() err_ctx = _ErrorContext( exc_val, self.max_wire_version, self.sock_generation, - self.completed_handshake) + self.completed_handshake, self.service_id) self.client._topology.handle_error(self.server_address, err_ctx) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 7537765637..b53629d12b 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -512,13 +512,16 @@ def register(listener): class _CommandEvent(object): """Base class for command events.""" - __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id") + __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", + "__service_id") - def __init__(self, command_name, request_id, connection_id, operation_id): + def __init__(self, command_name, request_id, connection_id, operation_id, + service_id=None): self.__cmd_name = command_name self.__rqst_id = request_id self.__conn_id = connection_id self.__op_id = operation_id + self.__service_id = service_id @property def command_name(self): @@ -535,6 +538,14 @@ def connection_id(self): """The address (host, port) of the server this command was sent to.""" return self.__conn_id + @property + def service_id(self): + """The service_id this command was sent to, or ``None``. + + .. versionadded:: 3.12 + """ + return self.__service_id + @property def operation_id(self): """An id for this series of events or None.""" @@ -551,15 +562,17 @@ class CommandStartedEvent(_CommandEvent): - `connection_id`: The address (host, port) of the server this command was sent to. - `operation_id`: An optional identifier for a series of related events. + - `service_id`: The service_id this command was sent to, or ``None``. """ __slots__ = ("__cmd", "__db") - def __init__(self, command, database_name, *args): + def __init__(self, command, database_name, *args, service_id=None): if not command: raise ValueError("%r is not a valid command" % (command,)) # Command name must be first key. command_name = next(iter(command)) - super(CommandStartedEvent, self).__init__(command_name, *args) + super(CommandStartedEvent, self).__init__( + command_name, *args, service_id=service_id) if command_name.lower() in _SENSITIVE_COMMANDS: self.__cmd = {} else: @@ -577,9 +590,12 @@ def database_name(self): return self.__db def __repr__(self): - return "<%s %s db: %r, command: %r, operation_id: %s>" % ( - self.__class__.__name__, self.connection_id, self.database_name, - self.command_name, self.operation_id) + return ( + "<%s %s db: %r, command: %r, operation_id: %s, " + "service_id: %s>") % ( + self.__class__.__name__, self.connection_id, + self.database_name, self.command_name, self.operation_id, + self.service_id) class CommandSucceededEvent(_CommandEvent): @@ -593,13 +609,15 @@ class CommandSucceededEvent(_CommandEvent): - `connection_id`: The address (host, port) of the server this command was sent to. - `operation_id`: An optional identifier for a series of related events. + - `service_id`: The service_id this command was sent to, or ``None``. """ __slots__ = ("__duration_micros", "__reply") def __init__(self, duration, reply, command_name, - request_id, connection_id, operation_id): + request_id, connection_id, operation_id, service_id=None): super(CommandSucceededEvent, self).__init__( - command_name, request_id, connection_id, operation_id) + command_name, request_id, connection_id, operation_id, + service_id=service_id) self.__duration_micros = _to_micros(duration) if command_name.lower() in _SENSITIVE_COMMANDS: self.__reply = {} @@ -617,9 +635,12 @@ def reply(self): return self.__reply def __repr__(self): - return "<%s %s command: %r, operation_id: %s, duration_micros: %s>" % ( - self.__class__.__name__, self.connection_id, - self.command_name, self.operation_id, self.duration_micros) + return ( + "<%s %s command: %r, operation_id: %s, duration_micros: %s, " + "service_id: %s>") % ( + self.__class__.__name__, self.connection_id, + self.command_name, self.operation_id, self.duration_micros, + self.service_id) class CommandFailedEvent(_CommandEvent): @@ -633,11 +654,12 @@ class CommandFailedEvent(_CommandEvent): - `connection_id`: The address (host, port) of the server this command was sent to. - `operation_id`: An optional identifier for a series of related events. + - `service_id`: The service_id this command was sent to, or ``None``. """ __slots__ = ("__duration_micros", "__failure") - def __init__(self, duration, failure, *args): - super(CommandFailedEvent, self).__init__(*args) + def __init__(self, duration, failure, *args, service_id=None): + super(CommandFailedEvent, self).__init__(*args, service_id=service_id) self.__duration_micros = _to_micros(duration) self.__failure = failure @@ -654,9 +676,10 @@ def failure(self): def __repr__(self): return ( "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "failure: %r>" % ( + "failure: %r, service_id: %s>") % ( self.__class__.__name__, self.connection_id, self.command_name, - self.operation_id, self.duration_micros, self.failure)) + self.operation_id, self.duration_micros, self.failure, + self.service_id) class _PoolEvent(object): @@ -721,10 +744,29 @@ class PoolClearedEvent(_PoolEvent): :Parameters: - `address`: The address (host, port) pair of the server this Pool is attempting to connect to. + - `service_id`: The service_id this command was sent to, or ``None``. .. versionadded:: 3.9 """ - __slots__ = () + __slots__ = ("__service_id",) + + def __init__(self, address, service_id=None): + super(PoolClearedEvent, self).__init__(address) + self.__service_id = service_id + + @property + def service_id(self): + """Connections with this service_id are cleared. + + When service_id is ``None``, all connections in the pool are cleared. + + .. versionadded:: 3.12 + """ + return self.__service_id + + def __repr__(self): + return '%s(%r, %r)' % ( + self.__class__.__name__, self.address, self.__service_id) class PoolClosedEvent(_PoolEvent): @@ -1508,10 +1550,10 @@ def publish_pool_ready(self, address): except Exception: _handle_exception() - def publish_pool_cleared(self, address): + def publish_pool_cleared(self, address, service_id): """Publish a :class:`PoolClearedEvent` to all pool listeners. """ - event = PoolClearedEvent(address) + event = PoolClearedEvent(address, service_id) for subscriber in self.__cmap_listeners: try: subscriber.pool_cleared(event) diff --git a/pymongo/pool.py b/pymongo/pool.py index 728fec0f60..23ccdcab67 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1129,7 +1129,7 @@ def ready(self): def closed(self): return self.state == PoolState.CLOSED - def _reset(self, close, pause=True): + def _reset(self, close, pause=True, service_id=None): old_state = self.state with self.size_cond: if self.closed: @@ -1161,7 +1161,8 @@ def _reset(self, close, pause=True): listeners.publish_pool_closed(self.address) else: if old_state != PoolState.PAUSED and self.enabled_for_cmap: - listeners.publish_pool_cleared(self.address) + listeners.publish_pool_cleared(self.address, + service_id=service_id) for sock_info in sockets: sock_info.close_socket(ConnectionClosedReason.STALE) @@ -1174,8 +1175,8 @@ def update_is_writable(self, is_writable): for socket in self.sockets: socket.update_is_writable(self.is_writable) - def reset(self): - self._reset(close=False) + def reset(self, service_id=None): + self._reset(close=False, service_id=service_id) def reset_without_pause(self): self._reset(close=False, pause=False) diff --git a/pymongo/server.py b/pymongo/server.py index fbfddae2e2..e9e29f49ea 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -49,9 +49,9 @@ def open(self): if not self._pool.opts.load_balanced: self._monitor.open() - def reset(self): + def reset(self, service_id=None): """Clear the connection pool.""" - self.pool.reset() + self.pool.reset(service_id) def close(self): """Clear the connection pool and stop the monitor. diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 5dc8222fef..19cc349c78 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -204,10 +204,10 @@ def is_server_type_known(self): @property def retryable_writes_supported(self): """Checks if this server supports retryable writes.""" - return ( + return (( self._ls_timeout_minutes is not None and - self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary, - SERVER_TYPE.LoadBalancer)) + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary)) + or self._server_type == SERVER_TYPE.LoadBalancer) @property def retryable_reads_supported(self): diff --git a/pymongo/topology.py b/pymongo/topology.py index 446bb9353d..18d5c4c8f4 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -453,7 +453,7 @@ def update_pool(self, all_credentials): try: server.pool.remove_stale_sockets(generation, all_credentials) except PyMongoError as exc: - ctx = _ErrorContext(exc, 0, generation, False) + ctx = _ErrorContext(exc, 0, generation, False, None) self.handle_error(server.description.address, ctx) raise @@ -528,11 +528,9 @@ def get_server_session(self): def return_server_session(self, server_session, lock): if lock: with self._lock: - session_timeout = \ - self._description.logical_session_timeout_minutes - if session_timeout is not None: - self._session_pool.return_server_session(server_session, - session_timeout) + self._session_pool.return_server_session( + server_session, + self._description.logical_session_timeout_minutes) else: # Called from a __del__ method, can't use a lock. self._session_pool.return_server_session_no_lock(server_session) @@ -566,7 +564,8 @@ def _ensure_opened(self): # Emit initial SDAM events for load balancer mode. self._process_change(ServerDescription( self._seed_addresses[0], - IsMaster({'ok': 1, 'serviceId': self._topology_id}))) + IsMaster({'ok': 1, 'serviceId': self._topology_id, + 'maxWireVersion': 13}))) # Ensure that the monitors are open. for server in self._servers.values(): @@ -599,6 +598,7 @@ def _handle_error(self, address, err_ctx): server = self._servers[address] error = err_ctx.error exc_type = type(error) + service_id = err_ctx.service_id if (issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake): # The socket has been closed. Don't reset the server. @@ -629,21 +629,21 @@ def _handle_error(self, address, err_ctx): self._process_change(ServerDescription(address, error=error)) if is_shutting_down or (err_ctx.max_wire_version <= 7): # Clear the pool. - server.reset() + server.reset(service_id) server.request_check() elif not err_ctx.completed_handshake: # Unknown command error during the connection handshake. if not self._settings.load_balanced: self._process_change(ServerDescription(address, error=error)) # Clear the pool. - server.reset() + server.reset(service_id) elif issubclass(exc_type, ConnectionFailure): # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." if not self._settings.load_balanced: self._process_change(ServerDescription(address, error=error)) # Clear the pool. - server.reset() + server.reset(service_id) # "When a client marks a server Unknown from `Network error when # reading or writing`_, clients MUST cancel the isMaster check on # that server and close the current monitoring connection." @@ -795,11 +795,12 @@ def __repr__(self): class _ErrorContext(object): """An error with context for SDAM error handling.""" def __init__(self, error, max_wire_version, sock_generation, - completed_handshake): + completed_handshake, service_id): self.error = error self.max_wire_version = max_wire_version self.sock_generation = sock_generation self.completed_handshake = completed_handshake + self.service_id = service_id def _is_stale_error_topology_version(current_tv, error_tv): diff --git a/test/__init__.py b/test/__init__.py index 83dac398e4..9e76b28f50 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -50,6 +50,7 @@ from pymongo.common import partition_node from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, validate_cert_reqs +from pymongo.uri_parser import parse_uri from test.version import Version if HAVE_SSL: @@ -92,6 +93,14 @@ COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) +SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") +MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") +if TEST_LOADBALANCER: + res = parse_uri(SINGLE_MONGOS_LB_URI) + host, port = res['nodelist'][0] + db_user = res['username'] or db_user + db_pwd = res['password'] or db_pwd def is_server_resolvable(): @@ -190,6 +199,7 @@ def _all_users(db): class ClientContext(object): + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI def __init__(self): """Create a client and grab essential information from the server.""" @@ -216,7 +226,9 @@ def __init__(self): self.client = None self.conn_lock = threading.Lock() self.is_data_lake = False - self.load_balancer = False + self.load_balancer = TEST_LOADBALANCER + if self.load_balancer: + self.default_client_options["loadBalanced"] = True if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS if MONGODB_API_VERSION: @@ -632,8 +644,10 @@ def check_auth_with_sharding(self, func): func=func) def is_topology_type(self, topologies): - if 'load-balanced' in topologies and self.load_balancer: - return True + if self.load_balancer: + if 'load-balanced' in topologies: + return True + return False if 'single' in topologies and not (self.is_mongos or self.is_rs): return True if 'replicaset' in topologies and self.is_rs: diff --git a/test/load_balancer/test_crud_unified.py b/test/load_balancer/test_crud_unified.py new file mode 100644 index 0000000000..dfe0935bba --- /dev/null +++ b/test/load_balancer/test_crud_unified.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_crud_unified import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_dns.py b/test/load_balancer/test_dns.py new file mode 100644 index 0000000000..047b98b121 --- /dev/null +++ b/test/load_balancer/test_dns.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_dns import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_load_balancer.py b/test/load_balancer/test_load_balancer.py new file mode 100644 index 0000000000..c31ff58ef1 --- /dev/null +++ b/test/load_balancer/test_load_balancer.py @@ -0,0 +1,34 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'unified') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/load_balancer/test_retryable_change_stream.py b/test/load_balancer/test_retryable_change_stream.py new file mode 100644 index 0000000000..b7c902dd30 --- /dev/null +++ b/test/load_balancer/test_retryable_change_stream.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_change_stream import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_retryable_reads.py b/test/load_balancer/test_retryable_reads.py new file mode 100644 index 0000000000..c5de3c9078 --- /dev/null +++ b/test/load_balancer/test_retryable_reads.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_retryable_reads import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_retryable_writes.py b/test/load_balancer/test_retryable_writes.py new file mode 100644 index 0000000000..3800641b08 --- /dev/null +++ b/test/load_balancer/test_retryable_writes.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_retryable_writes import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_transactions_unified.py b/test/load_balancer/test_transactions_unified.py new file mode 100644 index 0000000000..2572028046 --- /dev/null +++ b/test/load_balancer/test_transactions_unified.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_transactions_unified import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_uri_options.py b/test/load_balancer/test_uri_options.py new file mode 100644 index 0000000000..b644d7d334 --- /dev/null +++ b/test/load_balancer/test_uri_options.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_uri_spec import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/test_versioned_api.py b/test/load_balancer/test_versioned_api.py new file mode 100644 index 0000000000..7e801968cb --- /dev/null +++ b/test/load_balancer/test_versioned_api.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.test_versioned_api import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/load_balancer/unified/event-monitoring.json b/test/load_balancer/unified/event-monitoring.json new file mode 100644 index 0000000000..938c70bf38 --- /dev/null +++ b/test/load_balancer/unified/event-monitoring.json @@ -0,0 +1,184 @@ +{ + "description": "monitoring events include correct fields", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "command started and succeeded events include serviceId", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServiceId": true + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServiceId": true + } + } + ] + } + ] + }, + { + "description": "command failed events include serviceId", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "hasServiceId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServiceId": true + } + } + ] + } + ] + }, + { + "description": "poolClearedEvent events include serviceId", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "hasServiceId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServiceId": true + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "hasServiceId": true + } + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/unified/lb-connection-establishment.json b/test/load_balancer/unified/lb-connection-establishment.json new file mode 100644 index 0000000000..0eaadf30c2 --- /dev/null +++ b/test/load_balancer/unified/lb-connection-establishment.json @@ -0,0 +1,58 @@ +{ + "description": "connection establishment for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "loadBalanced": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + } + ], + "tests": [ + { + "description": "operations against load balancers fail if URI contains loadBalanced=false", + "skipReason": "servers have not implemented LB support yet so they will not fail the connection handshake in this case", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/load_balancer/unified/non-lb-connection-establishment.json b/test/load_balancer/unified/non-lb-connection-establishment.json new file mode 100644 index 0000000000..6aaa7bdf98 --- /dev/null +++ b/test/load_balancer/unified/non-lb-connection-establishment.json @@ -0,0 +1,92 @@ +{ + "description": "connection establishment if loadBalanced is specified for non-load balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "single", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "lbTrueClient", + "useMultipleMongoses": false, + "uriOptions": { + "loadBalanced": true + } + } + }, + { + "database": { + "id": "lbTrueDatabase", + "client": "lbTrueClient", + "databaseName": "lbTrueDb" + } + }, + { + "client": { + "id": "lbFalseClient", + "uriOptions": { + "loadBalanced": false + } + } + }, + { + "database": { + "id": "lbFalseDatabase", + "client": "lbFalseClient", + "databaseName": "lbFalseDb" + } + } + ], + "_yamlAnchors": { + "runCommandArguments": [ + { + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + } + } + ] + }, + "tests": [ + { + "description": "operations against non-load balanced clusters fail if URI contains loadBalanced=true", + "operations": [ + { + "name": "runCommand", + "object": "lbTrueDatabase", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "errorContains": "Driver attempted to initialize in load balancing mode, but the server does not support this mode" + } + } + ] + }, + { + "description": "operations against non-load balanced clusters succeed if URI contains loadBalanced=false", + "operations": [ + { + "name": "runCommand", + "object": "lbFalseDatabase", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + } + } + ] + } + ] +} diff --git a/test/load_balancer/unified/server-selection.json b/test/load_balancer/unified/server-selection.json new file mode 100644 index 0000000000..00c7e4c95b --- /dev/null +++ b/test/load_balancer/unified/server-selection.json @@ -0,0 +1,82 @@ +{ + "description": "server selection for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "$readPreference is sent for load-balanced clusters", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "$readPreference": { + "mode": "secondaryPreferred" + } + }, + "commandName": "find", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_client.py b/test/test_client.py index 3754cb0ac3..5d57c32c1c 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1483,7 +1483,7 @@ def stop(self): def run(self): while self.running: exc = AutoReconnect('mock pool error') - ctx = _ErrorContext(exc, 0, pool.generation, False) + ctx = _ErrorContext(exc, 0, pool.generation, False, None) client._topology.handle_error(pool.address, ctx) time.sleep(0.001) diff --git a/test/test_cmap.py b/test/test_cmap.py index 7a9ab51804..053f27ba73 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -22,6 +22,7 @@ sys.path[0:0] = [""] from bson.son import SON +from bson.objectid import ObjectId from pymongo.errors import (ConnectionFailure, OperationFailure, @@ -422,6 +423,7 @@ def test_events_repr(self): self.assertRepr(ConnectionCheckOutStartedEvent(host)) self.assertRepr(PoolCreatedEvent(host, {})) self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) self.assertRepr(PoolClosedEvent(host)) def test_close_leaves_pool_unpaused(self): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index ddd017bb9a..7fa96b5e18 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -113,7 +113,7 @@ def got_app_error(topology, app_error): topology.handle_error( server_address, _ErrorContext(e, max_wire_version, generation, - completed_handshake)) + completed_handshake, None)) def get_type(topology, hostname): diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 46cfe87c4a..31e8282828 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1183,7 +1183,7 @@ def test_command_event_repr(self): self.assertEqual( repr(event), "") + "command: 'isMaster', operation_id: 2, service_id: None>") delta = datetime.timedelta(milliseconds=100) event = monitoring.CommandSucceededEvent( delta, {'ok': 1}, 'isMaster', request_id, connection_id, @@ -1191,7 +1191,8 @@ def test_command_event_repr(self): self.assertEqual( repr(event), "") + "command: 'isMaster', operation_id: 2, duration_micros: 100000, " + "service_id: None>") event = monitoring.CommandFailedEvent( delta, {'ok': 0}, 'isMaster', request_id, connection_id, operation_id) @@ -1199,7 +1200,7 @@ def test_command_event_repr(self): repr(event), "") + "failure: {'ok': 0}, service_id: None>") def test_server_heartbeat_event_repr(self): connection_id = ('localhost', 27017) diff --git a/test/test_topology.py b/test/test_topology.py index 2abcab47b1..5e2f683f70 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -401,7 +401,7 @@ def test_handle_error(self): 'setName': 'rs', 'hosts': ['a', 'b']}) - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True) + errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True, None) t.handle_error(('a', 27017), errctx) self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) @@ -430,7 +430,7 @@ def test_handle_error_removed_server(self): t = create_mock_topology(replica_set_name='rs') # No error resetting a server not in the TopologyDescription. - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True) + errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True, None) t.handle_error(('b', 27017), errctx) # Server was *not* added as type Unknown. diff --git a/test/unified_format.py b/test/unified_format.py index ad1e73a519..7fa7f55136 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -39,10 +39,16 @@ from pymongo.change_stream import ChangeStream from pymongo.collection import Collection from pymongo.database import Database -from pymongo.errors import BulkWriteError, InvalidOperation, PyMongoError +from pymongo.errors import ( + BulkWriteError, ConnectionFailure, InvalidOperation, NotMasterError, + PyMongoError) from pymongo.monitoring import ( CommandFailedEvent, CommandListener, CommandStartedEvent, - CommandSucceededEvent, _SENSITIVE_COMMANDS) + CommandSucceededEvent, _SENSITIVE_COMMANDS, PoolCreatedEvent, + PoolReadyEvent, PoolClearedEvent, PoolClosedEvent, ConnectionCreatedEvent, + ConnectionReadyEvent, ConnectionClosedEvent, + ConnectionCheckOutStartedEvent, ConnectionCheckOutFailedEvent, + ConnectionCheckedOutEvent, ConnectionCheckedInEvent) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult @@ -51,7 +57,8 @@ from test import client_context, unittest, IntegrationTest from test.utils import ( - camel_to_snake, rs_or_single_client, single_client, snake_to_camel) + camel_to_snake, get_pool, rs_or_single_client, single_client, + snake_to_camel, CMAPListener) from test.version import Version from test.utils import ( @@ -142,28 +149,52 @@ def parse_bulk_write_error_result(error): return parse_bulk_write_result(write_result) -class EventListenerUtil(CommandListener): +class NonLazyCursor(object): + """A find cursor proxy that creates the remote cursor when initialized.""" + def __init__(self, find_cursor): + self.find_cursor = find_cursor + # Create the server side cursor. + self.first_result = next(find_cursor, None) + + def __next__(self): + if self.first_result is not None: + first = self.first_result + self.first_result = None + return first + return next(self.find_cursor) + + def close(self): + self.find_cursor.close() + + +class EventListenerUtil(CMAPListener, CommandListener): def __init__(self, observe_events, ignore_commands): - self._event_types = set(observe_events) + self._event_types = set(name.lower() for name in observe_events) self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add('configurefailpoint') - self.results = [] + super(EventListenerUtil, self).__init__() + + def get_events(self, event_type): + if event_type == 'command': + return [e for e in self.events if 'Command' in type(e).__name__] + return [e for e in self.events if 'Command' not in type(e).__name__] + + def add_event(self, event): + if type(event).__name__.lower() in self._event_types: + super(EventListenerUtil, self).add_event(event) - def _observe_event(self, event): + def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: - self.results.append(event) + self.add_event(event) def started(self, event): - if 'commandStartedEvent' in self._event_types: - self._observe_event(event) + self._command_event(event) def succeeded(self, event): - if 'commandSucceededEvent' in self._event_types: - self._observe_event(event) + self._command_event(event) def failed(self, event): - if 'commandFailedEvent' in self._event_types: - self._observe_event(event) + self._command_event(event) class EntityMapUtil(object): @@ -173,28 +204,28 @@ def __init__(self, test_class): self._entities = {} self._listeners = {} self._session_lsids = {} - self._test_class = test_class + self.test = test_class def __getitem__(self, item): try: return self._entities[item] except KeyError: - self._test_class.fail('Could not find entity named %s in map' % ( + self.test.fail('Could not find entity named %s in map' % ( item,)) def __setitem__(self, key, value): if not isinstance(key, str): - self._test_class.fail( + self.test.fail( 'Expected entity name of type str, got %s' % (type(key))) if key in self._entities: - self._test_class.fail('Entity named %s already in map' % (key,)) + self.test.fail('Entity named %s already in map' % (key,)) self._entities[key] = value def _create_entity(self, entity_spec): if len(entity_spec) != 1: - self._test_class.fail( + self.test.fail( "Entity spec %s did not contain exactly one top-level key" % ( entity_spec,)) @@ -203,13 +234,17 @@ def _create_entity(self, entity_spec): kwargs = {} observe_events = spec.get('observeEvents', []) ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) + # TODO: SUPPORT storeEventsAsEntities if len(observe_events) or len(ignore_commands): ignore_commands = [cmd.lower() for cmd in ignore_commands] listener = EventListenerUtil(observe_events, ignore_commands) self._listeners[spec['id']] = listener kwargs['event_listeners'] = [listener] - if client_context.is_mongos and spec.get('useMultipleMongoses'): - kwargs['h'] = client_context.mongos_seeds() + if spec.get('useMultipleMongoses'): + if client_context.load_balancer: + kwargs['h'] = client_context.MULTI_MONGOS_LB_URI + elif client_context.is_mongos: + kwargs['h'] = client_context.mongos_seeds() kwargs.update(spec.get('uriOptions', {})) server_api = spec.get('serverApi') if server_api: @@ -218,12 +253,12 @@ def _create_entity(self, entity_spec): deprecation_errors=server_api.get('deprecationErrors')) client = rs_or_single_client(**kwargs) self[spec['id']] = client - self._test_class.addCleanup(client.close) + self.test.addCleanup(client.close) return elif entity_type == 'database': client = self[spec['client']] if not isinstance(client, MongoClient): - self._test_class.fail( + self.test.fail( 'Expected entity %s to be of type MongoClient, got %s' % ( spec['client'], type(client))) options = parse_collection_or_database_options( @@ -234,7 +269,7 @@ def _create_entity(self, entity_spec): elif entity_type == 'collection': database = self[spec['database']] if not isinstance(database, Database): - self._test_class.fail( + self.test.fail( 'Expected entity %s to be of type Database, got %s' % ( spec['database'], type(database))) options = parse_collection_or_database_options( @@ -245,7 +280,7 @@ def _create_entity(self, entity_spec): elif entity_type == 'session': client = self[spec['client']] if not isinstance(client, MongoClient): - self._test_class.fail( + self.test.fail( 'Expected entity %s to be of type MongoClient, got %s' % ( spec['client'], type(client))) opts = camel_to_snake_args(spec.get('sessionOptions', {})) @@ -258,13 +293,13 @@ def _create_entity(self, entity_spec): session = client.start_session(**dict(opts)) self[spec['id']] = session self._session_lsids[spec['id']] = copy.deepcopy(session.session_id) - self._test_class.addCleanup(session.end_session) + self.test.addCleanup(session.end_session) return elif entity_type == 'bucket': # TODO: implement the 'bucket' entity type - self._test_class.skipTest( + self.test.skipTest( 'GridFS is not currently supported (PYTHON-2459)') - self._test_class.fail( + self.test.fail( 'Unable to create entity of unknown type %s' % (entity_type,)) def create_entities_from_spec(self, entity_spec): @@ -274,13 +309,13 @@ def create_entities_from_spec(self, entity_spec): def get_listener_for_client(self, client_name): client = self[client_name] if not isinstance(client, MongoClient): - self._test_class.fail( + self.test.fail( 'Expected entity %s to be of type MongoClient, got %s' % ( client_name, type(client))) listener = self._listeners.get(client_name) if not listener: - self._test_class.fail( + self.test.fail( 'No listeners configured for client %s' % (client_name,)) return listener @@ -288,7 +323,7 @@ def get_listener_for_client(self, client_name): def get_lsid_for_session(self, session_name): session = self[session_name] if not isinstance(session, ClientSession): - self._test_class.fail( + self.test.fail( 'Expected entity %s to be of type ClientSession, got %s' % ( session_name, type(session))) @@ -334,21 +369,21 @@ class MatchEvaluatorUtil(object): """Utility class that implements methods for evaluating matches as per the unified test format specification.""" def __init__(self, test_class): - self._test_class = test_class + self.test = test_class def _operation_exists(self, spec, actual, key_to_compare): if spec is True: - self._test_class.assertIn(key_to_compare, actual) + self.test.assertIn(key_to_compare, actual) elif spec is False: - self._test_class.assertNotIn(key_to_compare, actual) + self.test.assertNotIn(key_to_compare, actual) else: - self._test_class.fail( + self.test.fail( 'Expected boolean value for $$exists operator, got %s' % ( spec,)) def __type_alias_to_type(self, alias): if alias not in BSON_TYPE_ALIAS_MAP: - self._test_class.fail('Unrecognized BSON type alias %s' % (alias,)) + self.test.fail('Unrecognized BSON type alias %s' % (alias,)) return BSON_TYPE_ALIAS_MAP[alias] def _operation_type(self, spec, actual, key_to_compare): @@ -357,13 +392,13 @@ def _operation_type(self, spec, actual, key_to_compare): t for alias in spec for t in self.__type_alias_to_type(alias)]) else: permissible_types = self.__type_alias_to_type(spec) - self._test_class.assertIsInstance( + self.test.assertIsInstance( actual[key_to_compare], permissible_types) def _operation_matchesEntity(self, spec, actual, key_to_compare): - expected_entity = self._test_class.entity_map[spec] - self._test_class.assertIsInstance(expected_entity, abc.Mapping) - self._test_class.assertEqual(expected_entity, actual[key_to_compare]) + expected_entity = self.test.entity_map[spec] + self.test.assertIsInstance(expected_entity, abc.Mapping) + self.test.assertEqual(expected_entity, actual[key_to_compare]) def _operation_matchesHexBytes(self, spec, actual, key_to_compare): raise NotImplementedError @@ -380,8 +415,8 @@ def _operation_unsetOrMatches(self, spec, actual, key_to_compare): self.match_result(spec, actual[key_to_compare], in_recursive_call=True) def _operation_sessionLsid(self, spec, actual, key_to_compare): - expected_lsid = self._test_class.entity_map.get_lsid_for_session(spec) - self._test_class.assertEqual(expected_lsid, actual[key_to_compare]) + expected_lsid = self.test.entity_map.get_lsid_for_session(spec) + self.test.assertEqual(expected_lsid, actual[key_to_compare]) def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): @@ -389,7 +424,7 @@ def _evaluate_special_operation(self, opname, spec, actual, try: method = getattr(self, method_name) except AttributeError: - self._test_class.fail( + self.test.fail( 'Unsupported special matching operator %s' % (opname,)) else: method(spec, actual, key_to_compare) @@ -440,16 +475,16 @@ def _match_document(self, expectation, actual, is_root): if self._evaluate_if_special_operation(expectation, actual): return - self._test_class.assertIsInstance(actual, abc.Mapping) + self.test.assertIsInstance(actual, abc.Mapping) for key, value in expectation.items(): if self._evaluate_if_special_operation(expectation, actual, key): continue - self._test_class.assertIn(key, actual) + self.test.assertIn(key, actual) self.match_result(value, actual[key], in_recursive_call=True) if not is_root: - self._test_class.assertEqual( + self.test.assertEqual( set(expectation.keys()), set(actual.keys())) def match_result(self, expectation, actual, @@ -459,7 +494,7 @@ def match_result(self, expectation, actual, expectation, actual, is_root=not in_recursive_call) if isinstance(expectation, abc.MutableSequence): - self._test_class.assertIsInstance(actual, abc.MutableSequence) + self.test.assertIsInstance(actual, abc.MutableSequence) for e, a in zip(expectation, actual): if isinstance(e, abc.Mapping): self._match_document( @@ -471,21 +506,22 @@ def match_result(self, expectation, actual, # account for flexible numerics in element-wise comparison if (isinstance(expectation, int) or isinstance(expectation, float)): - self._test_class.assertEqual(expectation, actual) + self.test.assertEqual(expectation, actual) else: - self._test_class.assertIsInstance(actual, type(expectation)) - self._test_class.assertEqual(expectation, actual) + self.test.assertIsInstance(actual, type(expectation)) + self.test.assertEqual(expectation, actual) - def match_event(self, expectation, actual): - event_type, spec = next(iter(expectation.items())) + def match_event(self, event_type, expectation, actual): + name, spec = next(iter(expectation.items())) - # every event type has the commandName field - command_name = spec.get('commandName') - if command_name: - self._test_class.assertEqual(command_name, actual.command_name) + # every command event has the commandName field + if event_type == 'command': + command_name = spec.get('commandName') + if command_name: + self.test.assertEqual(command_name, actual.command_name) - if event_type == 'commandStartedEvent': - self._test_class.assertIsInstance(actual, CommandStartedEvent) + if name == 'commandStartedEvent': + self.test.assertIsInstance(actual, CommandStartedEvent) command = spec.get('command') database_name = spec.get('databaseName') if command: @@ -497,18 +533,47 @@ def match_event(self, expectation, actual): update.setdefault('multi', False) self.match_result(command, actual.command) if database_name: - self._test_class.assertEqual( + self.test.assertEqual( database_name, actual.database_name) - elif event_type == 'commandSucceededEvent': - self._test_class.assertIsInstance(actual, CommandSucceededEvent) + elif name == 'commandSucceededEvent': + self.test.assertIsInstance(actual, CommandSucceededEvent) reply = spec.get('reply') if reply: self.match_result(reply, actual.reply) - elif event_type == 'commandFailedEvent': - self._test_class.assertIsInstance(actual, CommandFailedEvent) + elif name == 'commandFailedEvent': + self.test.assertIsInstance(actual, CommandFailedEvent) + elif name == 'poolCreatedEvent': + self.test.assertIsInstance(actual, PoolCreatedEvent) + elif name == 'poolReadyEvent': + self.test.assertIsInstance(actual, PoolReadyEvent) + elif name == 'poolClearedEvent': + self.test.assertIsInstance(actual, PoolClearedEvent) + if spec.get('hasServiceId'): + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + elif name == 'poolClosedEvent': + self.test.assertIsInstance(actual, PoolClosedEvent) + elif name == 'connectionCreatedEvent': + self.test.assertIsInstance(actual, ConnectionCreatedEvent) + elif name == 'connectionReadyEvent': + self.test.assertIsInstance(actual, ConnectionReadyEvent) + elif name == 'connectionClosedEvent': + self.test.assertIsInstance(actual, ConnectionClosedEvent) + self.test.assertEqual(actual.reason, spec['reason']) + elif name == 'connectionCheckOutStartedEvent': + self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) + elif name == 'connectionCheckOutFailedEvent': + self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) + self.test.assertEqual(actual.reason, spec['reason']) + elif name == 'connectionCheckedOutEvent': + self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) + elif name == 'connectionCheckedInEvent': + self.test.assertIsInstance(actual, ConnectionCheckedInEvent) else: - self._test_class.fail( - 'Unsupported event type %s' % (event_type,)) + self.test.fail( + 'Unsupported event type %s' % (name,)) def coerce_result(opname, result): @@ -623,7 +688,11 @@ def process_error(self, exception, spec): pass if is_client_error: - self.assertNotIsInstance(exception, PyMongoError) + # Connection errors are considered client errors. + if isinstance(exception, ConnectionFailure): + self.assertNotIsInstance(exception, NotMasterError) + else: + self.assertNotIsInstance(exception, PyMongoError) if error_contains: if isinstance(exception, BulkWriteError): @@ -692,6 +761,12 @@ def _databaseOperation_runCommand(self, target, **kwargs): kwargs['command'] = ordered_command return target.command(**kwargs) + def _databaseOperation_listCollections(self, target, *args, **kwargs): + if 'batch_size' in kwargs: + kwargs['cursor'] = {'batchSize': kwargs.pop('batch_size')} + cursor = target.list_collections(*args, **kwargs) + return list(cursor) + def __entityOperation_aggregate(self, target, *args, **kwargs): self.__raise_if_unsupported('aggregate', target, Database, Collection) return list(target.aggregate(*args, **kwargs)) @@ -707,6 +782,16 @@ def _collectionOperation_find(self, target, *args, **kwargs): find_cursor = target.find(*args, **kwargs) return list(find_cursor) + def _collectionOperation_createFindCursor(self, target, *args, **kwargs): + self.__raise_if_unsupported('find', target, Collection) + return NonLazyCursor(target.find(*args, **kwargs)) + + def _collectionOperation_listIndexes(self, target, *args, **kwargs): + if 'batch_size' in kwargs: + self.skipTest('PyMongo does not support batch_size for ' + 'list_indexes') + return target.list_indexes(*args, **kwargs) + def _sessionOperation_withTransaction(self, target, *args, **kwargs): if client_context.storage_engine == 'mmapv1': self.skipTest('MMAPv1 does not support document-level locking') @@ -725,11 +810,27 @@ def _changeStreamOperation_iterateUntilDocumentOrError(self, target, 'iterateUntilDocumentOrError', target, ChangeStream) return next(target) + def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported( + 'iterateUntilDocumentOrError', target, NonLazyCursor) + return next(target) + + def _cursor_close(self, target, *args, **kwargs): + self.__raise_if_unsupported('close', target, NonLazyCursor) + return target.close() + def run_entity_operation(self, spec): target = self.entity_map[spec['object']] opname = spec['name'] opargs = spec.get('arguments') expect_error = spec.get('expectError') + save_as_entity = spec.get('saveResultAsEntity') + expect_result = spec.get('expectResult') + ignore = spec.get('ignoreResultAndError') + if ignore and (expect_error or save_as_entity or expect_result): + raise ValueError( + 'ignoreResultAndError is incompatible with saveResultAsEntity' + ', expectError, and expectResult') if opargs: arguments = parse_spec_options(copy.deepcopy(opargs)) prepare_spec_arguments(spec, arguments, camel_to_snake(opname), @@ -745,6 +846,8 @@ def run_entity_operation(self, spec): method_name = '_collectionOperation_%s' % (opname,) elif isinstance(target, ChangeStream): method_name = '_changeStreamOperation_%s' % (opname,) + elif isinstance(target, NonLazyCursor): + method_name = '_cursor_%s' % (opname,) elif isinstance(target, ClientSession): method_name = '_sessionOperation_%s' % (opname,) elif isinstance(target, GridFSBucket): @@ -766,15 +869,16 @@ def run_entity_operation(self, spec): try: result = cmd(**dict(arguments)) except Exception as exc: + if ignore: + return if expect_error: return self.process_error(exc, expect_error) raise - if 'expectResult' in spec: + if expect_result: actual = coerce_result(opname, result) - self.match_evaluator.match_result(spec['expectResult'], actual) + self.match_evaluator.match_result(expect_result, actual) - save_as_entity = spec.get('saveResultAsEntity') if save_as_entity: self.entity_map[save_as_entity] = result @@ -821,7 +925,7 @@ def _testOperation_assertSessionUnpinned(self, spec): def __get_last_two_command_lsids(self, listener): cmd_started_events = [] - for event in reversed(listener.results): + for event in reversed(listener.events): if isinstance(event, CommandStartedEvent): cmd_started_events.append(event) if len(cmd_started_events) < 2: @@ -869,6 +973,11 @@ def _testOperation_assertIndexNotExists(self, spec): for index in collection.list_indexes(): self.assertNotEqual(spec['indexName'], index['name']) + def _testOperation_assertNumberConnectionsCheckedOut(self, spec): + client = self.entity_map[spec['client']] + pool = get_pool(client) + self.assertEqual(spec['connections'], pool.active_sockets) + def run_special_operation(self, spec): opname = spec['name'] method_name = '_testOperation_%s' % (opname,) @@ -891,19 +1000,23 @@ def check_events(self, spec): for event_spec in spec: client_name = event_spec['client'] events = event_spec['events'] - listener = self.entity_map.get_listener_for_client(client_name) + # Valid types: 'command', 'cmap' + event_type = event_spec.get('eventType', 'command') + assert event_type in ('command', 'cmap') + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events(event_type) if len(events) == 0: - self.assertEqual(listener.results, []) + self.assertEqual(actual_events, []) continue - if len(events) > len(listener.results): + if len(events) > len(actual_events): self.fail('Expected to see %s events, got %s' % ( - len(events), len(listener.results))) + len(events), len(actual_events))) for idx, expected_event in enumerate(events): self.match_evaluator.match_event( - expected_event, listener.results[idx]) + event_type, expected_event, actual_events[idx]) def verify_outcome(self, spec): for collection_data in spec: diff --git a/test/utils.py b/test/utils.py index f3d7dbe2aa..682782a432 100644 --- a/test/utils.py +++ b/test/utils.py @@ -277,7 +277,7 @@ def _reset(self): def ready(self): pass - def reset(self): + def reset(self, service_id=None): self._reset() def reset_without_pause(self): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 17175884da..5f79789ec8 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -504,15 +504,16 @@ def run_scenario(self, scenario_def, test): client_context.storage_engine == 'mmapv1'): self.skipTest("MMAPv1 does not support retryWrites=True") use_multi_mongos = test['useMultipleMongoses'] - if client_context.is_mongos and use_multi_mongos: - client = rs_client( - client_context.mongos_seeds(), - event_listeners=[listener, pool_listener, server_listener], - **client_options) - else: - client = rs_client( - event_listeners=[listener, pool_listener, server_listener], - **client_options) + host = None + if use_multi_mongos: + if client_context.load_balancer: + host = client_context.MULTI_MONGOS_LB_URI + elif client_context.is_mongos: + host = client_context.mongos_seeds() + client = rs_client( + h=host, + event_listeners=[listener, pool_listener, server_listener], + **client_options) self.scenario_client = client self.listener = listener self.pool_listener = pool_listener From d26bf933ed22f9355959ed6c957b6fcce1ead228 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 27 May 2021 17:16:50 -0700 Subject: [PATCH 0354/2111] PYTHON-1636 Support exhaust cursors in OP_MSG (#629) --- pymongo/cursor.py | 18 ++++++++++---- pymongo/message.py | 43 +++++++++++++++++++------------- pymongo/response.py | 15 +++++++++-- pymongo/server.py | 31 ++++++++++++++--------- test/test_monitoring.py | 55 +++++++++++++++++------------------------ 5 files changed, 94 insertions(+), 68 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 1340ca977f..c6a0246bfa 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -78,17 +78,21 @@ class CursorType(object): # This has to be an old style class due to # http://bugs.jython.org/issue1057 -class _SocketManager: +class _ExhaustManager: """Used with exhaust cursors to ensure the socket is returned. """ - def __init__(self, sock, pool): + def __init__(self, sock, pool, more_to_come): self.sock = sock self.pool = pool + self.more_to_come = more_to_come self.__closed = False def __del__(self): self.close() + def update_exhaust(self, more_to_come): + self.more_to_come = more_to_come + def close(self): """Return this instance's socket to the connection pool. """ @@ -1039,10 +1043,14 @@ def __send_message(self, operation): raise self.__address = response.address - if self.__exhaust and not self.__exhaust_mgr: + if self.__exhaust: # 'response' is an ExhaustResponse. - self.__exhaust_mgr = _SocketManager(response.socket_info, - response.pool) + if not self.__exhaust_mgr: + self.__exhaust_mgr = _ExhaustManager(response.socket_info, + response.pool, + response.more_to_come) + else: + self.__exhaust_mgr.update_exhaust(response.more_to_come) cmd_name = operation.name docs = response.docs diff --git a/pymongo/message.py b/pymongo/message.py index d7e4c0a7d0..307260263f 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -267,9 +267,11 @@ def namespace(self): def use_command(self, sock_info, exhaust): use_find_cmd = False - if sock_info.max_wire_version >= 4: - if not exhaust: - use_find_cmd = True + if sock_info.max_wire_version >= 4 and not exhaust: + use_find_cmd = True + elif sock_info.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_find_cmd = True elif not self.read_concern.ok_for_legacy: raise ConfigurationError( 'read concern level of %s is not valid ' @@ -396,8 +398,15 @@ def namespace(self): return "%s.%s" % (self.db, self.coll) def use_command(self, sock_info, exhaust): + use_cmd = False + if sock_info.max_wire_version >= 4 and not exhaust: + use_cmd = True + elif sock_info.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_cmd = True + sock_info.validate_session(self.client, self.session) - return sock_info.max_wire_version >= 4 and not exhaust + return use_cmd def as_command(self, sock_info): """Return a getMore command document for this query.""" @@ -431,8 +440,12 @@ def get_message(self, dummy0, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] if sock_info.op_msg_enabled: + if self.exhaust_mgr: + flags = _OpMsg.EXHAUST_ALLOWED + else: + flags = 0 request_id, msg, size, _ = _op_msg( - 0, spec, self.db, None, + flags, spec, self.db, None, False, False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -446,27 +459,23 @@ class _RawBatchQuery(_Query): def use_command(self, socket_info, exhaust): # Compatibility checks. super(_RawBatchQuery, self).use_command(socket_info, exhaust) - # Use OP_MSG when available. - if socket_info.op_msg_enabled and not exhaust: + if socket_info.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif socket_info.op_msg_enabled and not exhaust: return True return False - def get_message(self, set_slave_ok, sock_info, use_cmd=False): - return super(_RawBatchQuery, self).get_message( - set_slave_ok, sock_info, use_cmd) - class _RawBatchGetMore(_GetMore): def use_command(self, socket_info, exhaust): - # Use OP_MSG when available. - if socket_info.op_msg_enabled and not exhaust: + if socket_info.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif socket_info.op_msg_enabled and not exhaust: return True return False - def get_message(self, set_slave_ok, sock_info, use_cmd=False): - return super(_RawBatchGetMore, self).get_message( - set_slave_ok, sock_info, use_cmd) - class _CursorAddress(tuple): """The server address (host, port) of a cursor, with namespace property.""" diff --git a/pymongo/response.py b/pymongo/response.py index 56cc532f57..474e2c4d3b 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -67,11 +67,12 @@ def docs(self): """The decoded document(s).""" return self._docs + class ExhaustResponse(Response): - __slots__ = ('_socket_info', '_pool') + __slots__ = ('_socket_info', '_pool', '_more_to_come') def __init__(self, data, address, socket_info, pool, request_id, duration, - from_command, docs): + from_command, docs, more_to_come): """Represent a response to an exhaust cursor's initial query. :Parameters: @@ -82,6 +83,9 @@ def __init__(self, data, address, socket_info, pool, request_id, duration, - `request_id`: The request id of this operation. - `duration`: The duration of the operation. - `from_command`: If the response is the result of a db command. + - `docs`: List of documents. + - `more_to_come`: Bool indicating whether cursor is ready to be + exhausted. """ super(ExhaustResponse, self).__init__(data, address, @@ -90,6 +94,7 @@ def __init__(self, data, address, socket_info, pool, request_id, duration, from_command, docs) self._socket_info = socket_info self._pool = pool + self._more_to_come = more_to_come @property def socket_info(self): @@ -105,3 +110,9 @@ def socket_info(self): def pool(self): """The Pool from which the SocketInfo came.""" return self._pool + + @property + def more_to_come(self): + """If true, server is ready to send batches on the socket until the + result set is exhausted or there is an error.""" + return self._more_to_come diff --git a/pymongo/server.py b/pymongo/server.py index e9e29f49ea..389f8e7290 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -20,7 +20,7 @@ from pymongo.errors import NotMasterError, OperationFailure from pymongo.helpers import _check_command_response -from pymongo.message import _convert_exception +from pymongo.message import _convert_exception, _OpMsg from pymongo.response import Response, ExhaustResponse from pymongo.server_type import SERVER_TYPE @@ -95,16 +95,15 @@ def run_operation_with_response( if publish: start = datetime.now() - send_message = not operation.exhaust_mgr - - if send_message: - use_cmd = operation.use_command(sock_info, exhaust) + use_cmd = operation.use_command(sock_info, exhaust) + more_to_come = (operation.exhaust_mgr + and operation.exhaust_mgr.more_to_come) + if more_to_come: + request_id = 0 + else: message = operation.get_message( set_slave_okay, sock_info, use_cmd) request_id, data, max_doc_size = self._split_message(message) - else: - use_cmd = False - request_id = 0 if publish: cmd, dbn = operation.as_command(sock_info) @@ -113,11 +112,11 @@ def run_operation_with_response( start = datetime.now() try: - if send_message: + if more_to_come: + reply = sock_info.receive_message(None) + else: sock_info.send_message(data, max_doc_size) reply = sock_info.receive_message(request_id) - else: - reply = sock_info.receive_message(None) # Unpack and check for command errors. if use_cmd: @@ -176,6 +175,13 @@ def run_operation_with_response( decrypted, operation.codec_options, user_fields) if exhaust: + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(reply.cursor_id) response = ExhaustResponse( data=reply, address=self._description.address, @@ -184,7 +190,8 @@ def run_operation_with_response( duration=duration, request_id=request_id, from_command=use_cmd, - docs=docs) + docs=docs, + more_to_come=more_to_come) else: response = Response( data=reply, diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 31e8282828..16c0166b5b 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -456,7 +456,7 @@ def test_not_master_error(self): @client_context.require_no_mongos def test_exhaust(self): self.client.pymongo_test.test.drop() - self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) self.listener.results.clear() cursor = self.client.pymongo_test.test.find( projection={'_id': False}, @@ -470,12 +470,10 @@ def test_exhaust(self): self.assertEqual(0, len(results['failed'])) self.assertTrue( isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 5)]), - started.command) + self.assertEqualCommand(SON([('find', 'test'), + ('filter', {}), + ('projection', {'_id': False}), + ('batchSize', 5)]), started.command) self.assertEqual('find', started.command_name) self.assertEqual(cursor.address, started.connection_id) self.assertEqual('pymongo_test', started.database_name) @@ -496,32 +494,25 @@ def test_exhaust(self): self.listener.results.clear() tuple(cursor) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 5)]), - started.command) - self.assertEqual('getMore', started.command_name) - self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) - self.assertEqual(cursor.address, succeeded.connection_id) - expected_result = { - 'cursor': {'id': 0, - 'ns': 'pymongo_test.test', - 'nextBatch': [{} for _ in range(5)]}, - 'ok': 1} - self.assertEqualReply(expected_result, succeeded.reply) + for event in results['started']: + self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([('getMore', cursor_id), + ('collection', 'test'), + ('batchSize', 5)]), event.command) + self.assertEqual('getMore', event.command_name) + self.assertEqual(cursor.address, event.connection_id) + self.assertEqual('pymongo_test', event.database_name) + self.assertTrue(isinstance(event.request_id, int)) + for event in results['succeeded']: + self.assertTrue( + isinstance(event, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(event.duration_micros, int)) + self.assertEqual('getMore', event.command_name) + self.assertTrue(isinstance(event.request_id, int)) + self.assertEqual(cursor.address, event.connection_id) + # Last getMore receives a response with cursor id 0. + self.assertEqual(0, results['succeeded'][-1].reply['cursor']['id']) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): From c36ec37281be1d663f8eba1169b29b09fffef451 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 7 Jun 2021 12:12:49 -0700 Subject: [PATCH 0355/2111] PYTHON-2727 Test against MongoDB 5.0 in Evergreen (#631) --- .evergreen/config.yml | 120 ++++++++++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 46 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 7dc8a92c45..622582d274 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1129,6 +1129,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-5.0-standalone" + tags: ["5.0", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "5.0" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-5.0-replica_set" + tags: ["5.0", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "5.0" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-5.0-sharded_cluster" + tags: ["5.0", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "5.0" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-latest-standalone" tags: ["latest", "standalone"] commands: @@ -1542,6 +1569,22 @@ tasks: - func: "run aws auth test with aws EC2 credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-5.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "5.0" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" + - name: "aws-auth-test-latest" commands: - func: "bootstrap mongo-orchestration" @@ -1599,6 +1642,7 @@ axes: run_on: amazon1-2018-test batchtime: 10080 # 7 days variables: + python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - id: archlinux-test display_name: "Archlinux" @@ -1643,10 +1687,10 @@ axes: python3_binary: "/opt/python/3.8/bin/python3" - id: ubuntu-18.04 display_name: "Ubuntu 18.04" - run_on: ubuntu1804-test + run_on: ubuntu1804-small batchtime: 10080 # 7 days variables: - python3_binary: python3 + python3_binary: "/opt/python/3.8/bin/python3" - id: ubuntu-20.04 display_name: "Ubuntu 20.04" run_on: ubuntu2004-small @@ -1771,6 +1815,10 @@ axes: display_name: "MongoDB 4.4" variables: VERSION: "4.4" + - id: "5.0" + display_name: "MongoDB 5.0" + variables: + VERSION: "5.0" - id: "latest" display_name: "MongoDB latest" variables: @@ -1989,6 +2037,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: &all-server-versions - ".latest" + - ".5.0" - ".4.4" - ".4.2" - ".4.0" @@ -2008,6 +2057,7 @@ buildvariants: display_name: "Encryption ${platform} ${auth-ssl}" tasks: &encryption-server-versions - ".latest" + - ".5.0" - ".4.4" - ".4.2" - ".4.0" @@ -2030,22 +2080,6 @@ buildvariants: - ".3.0" - ".2.6" -- matrix_name: "tests-os-requires-32" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=3.2 with SSL. - - ubuntu-16.04 - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - - ".3.4" - - ".3.2" - - matrix_name: "test-macos" matrix_spec: platform: @@ -2062,6 +2096,7 @@ buildvariants: display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".5.0" - ".4.4" - ".4.2" - ".4.0" @@ -2111,14 +2146,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: ubuntu-16.04 + platform: awslinux python-version: ["3.6", "3.7", "3.8", "3.9"] auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: ubuntu-16.04 + platform: awslinux python-version: ["3.6", "3.8", "3.9"] auth: "noauth" ssl: "ssl" @@ -2126,8 +2161,8 @@ buildvariants: display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - '.replica_set !.2.6 !.3.0' - # Test standalone and sharded only on 4.4. - - '.4.4' + # Test standalone and sharded only on 5.0 and later. + - '.5.0' - matrix_name: "tests-pyopenssl-pypy" matrix_spec: @@ -2139,8 +2174,8 @@ buildvariants: display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - '.replica_set !.2.6 !.3.0 !.3.2 !.3.4' - # Test standalone and sharded only on 4.4. - - '.4.4' + # Test standalone and sharded only on 5.0 and later. + - '.5.0' - matrix_name: "tests-pyopenssl-macOS" matrix_spec: @@ -2201,27 +2236,28 @@ buildvariants: display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu16-compression" +- matrix_name: "tests-python-version-ubuntu18-compression" matrix_spec: # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 - platform: ubuntu-16.04 + platform: ubuntu-18.04 python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: ubuntu-16.04 + - platform: ubuntu-18.04 python-version: ["pypy3.6", "pypy3.7"] c-extensions: "with-c-extensions" compression: "*" # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy - - platform: ubuntu-16.04 + - platform: ubuntu-18.04 python-version: ["3.8", "3.9"] c-extensions: "*" compression: ["snappy"] display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" + - "test-5.0-standalone" - "test-4.4-standalone" - "test-4.2-standalone" rules: @@ -2299,6 +2335,7 @@ buildvariants: then: add_tasks: - "test-latest-standalone" + - "test-5.0-standalone" - "test-4.4-standalone" - "test-4.2-standalone" - "test-4.0-standalone" @@ -2433,24 +2470,13 @@ buildvariants: tasks: # Versioned API was introduced in MongoDB 4.7 - "test-latest-standalone" + - "test-5.0-standalone" - matrix_name: "ocsp-test" matrix_spec: - platform: ubuntu-16.04 - python-version: ["3.6", "3.9"] - mongodb-version: ["4.4", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" - batchtime: 20160 # 14 days - tasks: - - name: ".ocsp" - -- matrix_name: "ocsp-test-pypy" - matrix_spec: - platform: debian92 - python-version: ["pypy3.6", "pypy3.7"] - mongodb-version: ["4.4", "latest"] + platform: awslinux + python-version: ["3.6", "3.9", "pypy3.6", "pypy3.7"] + mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" @@ -2462,7 +2488,7 @@ buildvariants: matrix_spec: platform: windows-64-vsMulti-small python-version-windows: ["3.6", "3.9"] - mongodb-version: ["4.4", "latest"] + mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" @@ -2474,7 +2500,7 @@ buildvariants: - matrix_name: "ocsp-test-macos" matrix_spec: platform: macos-1014 - mongodb-version: ["4.4", "latest"] + mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${mongodb-version}" @@ -2490,6 +2516,7 @@ buildvariants: display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" - matrix_name: "aws-auth-test-windows" @@ -2499,12 +2526,13 @@ buildvariants: display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" tasks: - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" - matrix_name: "load-balancer" matrix_spec: platform: ubuntu-18.04 - mongodb-version: ["latest"] + mongodb-version: ["5.0", "latest"] auth-ssl: "*" python-version: ["3.6", "3.9"] loadbalancer: "*" From 048f54ddde999459d27ff6d5a6fc5db0de28a490 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 8 Jun 2021 14:07:53 -0700 Subject: [PATCH 0356/2111] PYTHON-2710 Version API connection examples for ecosystem docs (#636) * PYTHON-2710 Version API connection examples for ecosystem docs * fix * more fixes --- test/test_examples.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/test/test_examples.py b/test/test_examples.py index c0913b34c3..3e5d8bc86c 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -24,6 +24,7 @@ from pymongo.errors import ConnectionFailure, OperationFailure from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern from test import client_context, unittest, IntegrationTest @@ -1108,5 +1109,34 @@ def test_causal_consistency(self): # End Causal Consistency Example 2 +class TestVersionedApiExamples(IntegrationTest): + @client_context.require_version_min(4, 7) + def test_versioned_api(self): + # Versioned API examples + MongoClient = lambda _, server_api: rs_client( + server_api=server_api, connect=False) + uri = None + + # Start Versioned API Example 1 + from pymongo.server_api import ServerApi + client = MongoClient(uri, server_api=ServerApi("1")) + # End Versioned API Example 1 + + # Start Versioned API Example 2 + client = MongoClient( + uri, server_api=ServerApi("1", strict=True)) + # End Versioned API Example 2 + + # Start Versioned API Example 3 + client = MongoClient( + uri, server_api=ServerApi("1", strict=False)) + # End Versioned API Example 3 + + # Start Versioned API Example 4 + client = MongoClient( + uri, server_api=ServerApi("1", deprecation_errors=True)) + # End Versioned API Example 4 + + if __name__ == "__main__": unittest.main() From b69d00d21b8445a5ee1e310ec2fdbf3e237f1de9 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 8 Jun 2021 14:13:43 -0700 Subject: [PATCH 0357/2111] =?UTF-8?q?PYTHON-2734=20Document=20that=20find?= =?UTF-8?q?=5Fraw=5Fbatches=20now=20sends=20user-specified=20R=E2=80=A6=20?= =?UTF-8?q?(#634)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * PYTHON-2734 Document that find_raw_batches now sends user-specified ReadConcern to the server instead of raising ConfigurationError * fix * fix2 * fix 3 --- pymongo/collection.py | 4 ++++ test/test_cursor.py | 1 + 2 files changed, 5 insertions(+) diff --git a/pymongo/collection.py b/pymongo/collection.py index 62fc3d256b..203ea32ae1 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1483,6 +1483,10 @@ def find_raw_batches(self, *args, **kwargs): .. note:: find_raw_batches does not support sessions or auto encryption. + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + .. versionadded:: 3.6 """ # OP_MSG with document stream returns is required to support diff --git a/test/test_cursor.py b/test/test_cursor.py index 30acb89c1b..dc0be8f688 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1514,6 +1514,7 @@ def test_collation_error(self): next(self.db.test.find_raw_batches(collation=Collation('en_US'))) @client_context.require_version_min(3, 2) + @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): self.db.get_collection( "test", write_concern=WriteConcern(w="majority")).insert_one({}) From 9c1ff6ad9da88692014848a5fed01f964138de4d Mon Sep 17 00:00:00 2001 From: Tyler Willey Date: Fri, 28 May 2021 13:06:44 -0600 Subject: [PATCH 0358/2111] PYTHON-2743 Fix compatibility with gevent.Timeout (#633) gevent.Timeout extends BaseException, not Exception. --- pymongo/pool.py | 6 +++--- test/test_client.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 23ccdcab67..df4a430cd6 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1255,7 +1255,7 @@ def connect(self, all_credentials=None): try: sock = _configured_socket(self.address, self.opts) - except Exception as error: + except BaseException as error: if self.enabled_for_cmap: listeners.publish_connection_closed( self.address, conn_id, ConnectionClosedReason.ERROR) @@ -1272,7 +1272,7 @@ def connect(self, all_credentials=None): try: sock_info.check_auth(all_credentials) - except Exception: + except BaseException: sock_info.close_socket(ConnectionClosedReason.ERROR) raise @@ -1415,7 +1415,7 @@ def _get_socket(self, all_credentials): self._pending -= 1 self._max_connecting_cond.notify() sock_info.check_auth(all_credentials) - except Exception: + except BaseException: if sock_info: # We checked out a socket but authentication failed. sock_info.close_socket(ConnectionClosedReason.ERROR) diff --git a/test/test_client.py b/test/test_client.py index 5d57c32c1c..a8246e3380 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1961,6 +1961,42 @@ def timeout_task(): self.assertIsNone(tt.get()) self.assertIsNone(ct.get()) + def test_gevent_timeout_when_creating_connection(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import Timeout, spawn + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.pymongo_test.test + pool = get_pool(client) + + # Patch the pool to delay the connect method. + def delayed_connect(*args, **kwargs): + time.sleep(3) + return pool.__class__.connect(pool, *args, **kwargs) + + pool.connect = delayed_connect + + def timeout_task(): + with Timeout(1): + try: + coll.find_one({}) + return False + except Timeout: + return True + + tt = spawn(timeout_task) + tt.join(10) + + # Assert that we got our active_sockets count back + self.assertEqual(pool.active_sockets, 0) + # Assert the greenlet is dead + self.assertTrue(tt.dead) + # Assert that the Timeout was raised all the way to the try + self.assertTrue(tt.get()) + # Unpatch the instance. + del pool.connect + class TestClientPool(MockClientTest): From 112ee69de88f456969d12459d87ce4bcbaac4d5c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Jun 2021 09:52:30 -0700 Subject: [PATCH 0359/2111] PYTHON-2674 Pool.reset only clears connections to the given serviceId (#628) --- pymongo/mongo_client.py | 2 +- pymongo/pool.py | 64 ++++++++++++++++++++++++--- pymongo/topology.py | 6 ++- test/test_client.py | 7 +-- test/test_discovery_and_monitoring.py | 10 +++-- test/test_topology.py | 4 +- test/unified_format.py | 10 ++++- test/utils.py | 11 +++-- 8 files changed, 90 insertions(+), 24 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 728c9a1670..dde4ef2426 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1977,7 +1977,7 @@ def __init__(self, client, server, session): # "Note that when a network error occurs before the handshake # completes then the error's generation number is the generation # of the pool at the time the connection attempt was started." - self.sock_generation = server.pool.generation + self.sock_generation = server.pool.gen.get_overall() self.completed_handshake = False self.service_id = None diff --git a/pymongo/pool.py b/pymongo/pool.py index df4a430cd6..53766f65f3 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -530,7 +530,8 @@ def __init__(self, sock, pool, address, id): # The pool's generation changes with each reset() so we can close # sockets created before the last reset. - self.generation = pool.generation + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() self.ready = False self.cancel_context = None if not pool.handshake: @@ -616,6 +617,7 @@ def _ismaster(self, cluster_time, topology_version, 'Driver attempted to initialize in load balancing mode' ' but the server does not support this mode') self.service_id = ismaster.service_id + self.generation = self.pool_gen.get(self.service_id) return ismaster def _next_reply(self): @@ -1044,6 +1046,37 @@ class _PoolClosedError(PyMongoError): pass +class _PoolGeneration(object): + def __init__(self): + # Maps service_id to generation. + self._generations = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id): + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self): + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id): + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen, service_id): + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + class PoolState(object): PAUSED = 1 READY = 2 @@ -1080,7 +1113,8 @@ def __init__(self, address, options, handshake=True): # Keep track of resets, so we notice sockets created before the most # recent reset and close them. - self.generation = 0 + # self.generation = 0 + self.gen = _PoolGeneration() self.pid = os.getpid() self.address = address self.opts = options @@ -1137,13 +1171,25 @@ def _reset(self, close, pause=True, service_id=None): if (self.opts.pause_enabled and pause and not self.opts.load_balanced): old_state, self.state = self.state, PoolState.PAUSED - self.generation += 1 + self.gen.inc(service_id) newpid = os.getpid() if self.pid != newpid: self.pid = newpid self.active_sockets = 0 self.operation_count = 0 - sockets, self.sockets = self.sockets, collections.deque() + if service_id is None: + sockets, self.sockets = self.sockets, collections.deque() + else: + discard = collections.deque() + keep = collections.deque() + for sock_info in self.sockets: + if sock_info.service_id == service_id: + discard.append(sock_info) + else: + keep.append(sock_info) + sockets = discard + self.sockets = keep + if close: self.state = PoolState.CLOSED # Clear the wait queue @@ -1184,6 +1230,9 @@ def reset_without_pause(self): def close(self): self._reset(close=True) + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) + def remove_stale_sockets(self, reference_generation, all_credentials): """Removes stale sockets then adds new ones if pool is too small and has not been reset. The `reference_generation` argument specifies the @@ -1222,7 +1271,7 @@ def remove_stale_sockets(self, reference_generation, all_credentials): with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. - if self.generation != reference_generation: + if self.gen.get_overall() != reference_generation: sock_info.close_socket(ConnectionClosedReason.STALE) return self.sockets.appendleft(sock_info) @@ -1450,7 +1499,8 @@ def return_socket(self, sock_info): with self.lock: # Hold the lock to ensure this section does not race with # Pool.reset(). - if sock_info.generation != self.generation: + if self.stale_generation(sock_info.generation, + sock_info.service_id): sock_info.close_socket(ConnectionClosedReason.STALE) else: sock_info.update_last_checkin_time() @@ -1493,7 +1543,7 @@ def _perished(self, sock_info): sock_info.close_socket(ConnectionClosedReason.ERROR) return True - if sock_info.generation != self.generation: + if self.stale_generation(sock_info.generation, sock_info.service_id): sock_info.close_socket(ConnectionClosedReason.STALE) return True diff --git a/pymongo/topology.py b/pymongo/topology.py index 18d5c4c8f4..baa4293ddb 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -447,7 +447,8 @@ def update_pool(self, all_credentials): # Only update pools for data-bearing servers. for sd in self.data_bearing_servers(): server = self._servers[sd.address] - servers.append((server, server.pool.generation)) + servers.append((server, + server.pool.gen.get_overall())) for server, generation in servers: try: @@ -577,7 +578,8 @@ def _is_stale_error(self, address, err_ctx): # Another thread removed this server from the topology. return True - if err_ctx.sock_generation != server._pool.generation: + if server._pool.stale_generation( + err_ctx.sock_generation, err_ctx.service_id): # This is an outdated error from a previous pool version. return True diff --git a/test/test_client.py b/test/test_client.py index a8246e3380..88adaac6ae 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1468,7 +1468,7 @@ def test_reset_during_update_pool(self): self.addCleanup(client.close) client.admin.command('ping') pool = get_pool(client) - generation = pool.generation + generation = pool.gen.get_overall() # Continuously reset the pool. class ResetPoolThread(threading.Thread): @@ -1483,7 +1483,8 @@ def stop(self): def run(self): while self.running: exc = AutoReconnect('mock pool error') - ctx = _ErrorContext(exc, 0, pool.generation, False, None) + ctx = _ErrorContext( + exc, 0, pool.gen.get_overall(), False, None) client._topology.handle_error(pool.address, ctx) time.sleep(0.001) @@ -1497,7 +1498,7 @@ def run(self): for _ in range(10): client._topology.update_pool( client._MongoClient__all_credentials) - if generation != pool.generation: + if generation != pool.gen.get_overall(): break finally: t.stop() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 7fa96b5e18..0cbc40f1ff 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -87,7 +87,8 @@ def got_app_error(topology, app_error): server_address = common.partition_node(app_error['address']) server = topology.get_server_by_address(server_address) error_type = app_error['type'] - generation = app_error.get('generation', server.pool.generation) + generation = app_error.get( + 'generation', server.pool.gen.get_overall()) when = app_error['when'] max_wire_version = app_error['maxWireVersion'] # XXX: We could get better test coverage by mocking the errors on the @@ -181,7 +182,7 @@ def check_outcome(self, topology, outcome): if expected_pool: self.assertEqual( expected_pool.get('generation'), - actual_server.pool.generation) + actual_server.pool.gen.get_overall()) self.assertEqual(outcome['setName'], topology.description.replica_set_name) self.assertEqual(outcome.get('logicalSessionTimeoutMinutes'), @@ -270,7 +271,7 @@ def test_ignore_stale_connection_errors(self): # Wait for initial discovery. client.admin.command('ping') pool = get_pool(client) - starting_generation = pool.generation + starting_generation = pool.gen.get_overall() wait_until(lambda: len(pool.sockets) == N_THREADS, 'created sockets') def mock_command(*args, **kwargs): @@ -296,7 +297,8 @@ def insert_command(i): t.join() # Expect a single pool reset for the network error - self.assertEqual(starting_generation+1, pool.generation) + self.assertEqual( + starting_generation+1, pool.gen.get_overall()) # Server should be selectable. client.admin.command('ping') diff --git a/test/test_topology.py b/test/test_topology.py index 5e2f683f70..bb94207f4d 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -659,11 +659,11 @@ def _check_with_socket(self, *args, **kwargs): self.addCleanup(t.close) server = wait_for_master(t) self.assertEqual(1, ismaster_count[0]) - generation = server.pool.generation + generation = server.pool.gen.get_overall() # Pool is reset by ismaster failure. t.request_check_all() - self.assertNotEqual(generation, server.pool.generation) + self.assertNotEqual(generation, server.pool.gen.get_overall()) def test_ismaster_retry(self): # ismaster succeeds at first, then raises socket error, then succeeds. diff --git a/test/unified_format.py b/test/unified_format.py index 7fa7f55136..91e02e9e26 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -124,8 +124,16 @@ def is_run_on_requirement_satisfied(requirement): elif client_context.server_parameters[param] != val: params_satisfied = False + auth_satisfied = True + req_auth = requirement.get('auth') + if req_auth is not None: + if req_auth: + auth_satisfied = client_context.auth_enabled + else: + auth_satisfied = not client_context.auth_enabled + return (topology_satisfied and min_version_satisfied and - max_version_satisfied and params_satisfied) + max_version_satisfied and params_satisfied and auth_satisfied) def parse_collection_or_database_options(options): diff --git a/test/utils.py b/test/utils.py index 682782a432..fa2865c837 100644 --- a/test/utils.py +++ b/test/utils.py @@ -39,7 +39,7 @@ from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure from pymongo.monitoring import _SENSITIVE_COMMANDS -from pymongo.pool import _CancellationContext +from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, @@ -259,20 +259,23 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool(object): def __init__(self, address, options, handshake=True): - self.generation = 0 + self.gen = _PoolGeneration() self._lock = threading.Lock() self.opts = options self.operation_count = 0 + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) + def get_socket(self, all_credentials, checkout=False): return MockSocketInfo() def return_socket(self, *args, **kwargs): pass - def _reset(self): + def _reset(self, service_id=None): with self._lock: - self.generation += 1 + self.gen.inc(service_id) def ready(self): pass From 7a48831124015d5636f5478817727a7cd4be8d8c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Jun 2021 14:12:18 -0700 Subject: [PATCH 0360/2111] PYTHON-2673 Add load balancer connection pinning spec tests --- test/load_balancer/unified/cursors.json | 1228 +++++++++++++ .../unified/sdam-error-handling.json | 508 ++++++ test/load_balancer/unified/transactions.json | 1606 +++++++++++++++++ 3 files changed, 3342 insertions(+) create mode 100644 test/load_balancer/unified/cursors.json create mode 100644 test/load_balancer/unified/sdam-error-handling.json create mode 100644 test/load_balancer/unified/transactions.json diff --git a/test/load_balancer/unified/cursors.json b/test/load_balancer/unified/cursors.json new file mode 100644 index 0000000000..43e4fbb4f6 --- /dev/null +++ b/test/load_balancer/unified/cursors.json @@ -0,0 +1,1228 @@ +{ + "description": "cursors are correctly pinned to connections for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database0", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "database0Name", + "documents": [] + }, + { + "collectionName": "coll2", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "no connection is pinned if all documents are returned in the initial batch", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {} + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned when the cursor is drained", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 3 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "close", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are returned to the pool when the cursor is closed", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connections are not returned after an network error during getMore", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + } + ] + } + ] + }, + { + "description": "pinned connections are returned after a network error during a killCursors request", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandFailedEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + } + ] + } + ] + }, + { + "description": "pinned connections are not returned to the pool after a non-network error on getMore", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "errorCode": 7 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "firstBatch": { + "$$type": "array" + }, + "ns": { + "$$type": "string" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "aggregate pins the cursor to a connection", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "cursor": { + "batchSize": 2 + } + }, + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "listCollections pins the cursor to a connection", + "operations": [ + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": {}, + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "cursor": { + "batchSize": 2 + } + }, + "commandName": "listCollections", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": { + "$$type": "string" + } + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "listIndexes pins the cursor to a connection", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "y": 1 + }, + "name": "y_1" + } + }, + { + "name": "listIndexes", + "object": "collection0", + "arguments": { + "batchSize": 2 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll0", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ] + }, + "commandName": "createIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll0", + "indexes": [ + { + "name": "y_1", + "key": { + "y": 1 + } + } + ] + }, + "commandName": "createIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll0", + "cursor": { + "batchSize": 2 + } + }, + "commandName": "listIndexes", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": 0, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "change streams pin to a connection", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "changeStream0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/unified/sdam-error-handling.json b/test/load_balancer/unified/sdam-error-handling.json new file mode 100644 index 0000000000..63aabc04db --- /dev/null +++ b/test/load_balancer/unified/sdam-error-handling.json @@ -0,0 +1,508 @@ +{ + "description": "state change errors are correctly handled", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "observedEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + }, + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "singleClient", + "useMultipleMongoses": false, + "uriOptions": { + "appname": "lbSDAMErrorTestClient", + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "singleDB", + "client": "singleClient", + "databaseName": "singleDB" + } + }, + { + "collection": { + "id": "singleColl", + "database": "singleDB", + "collectionName": "singleColl" + } + }, + { + "client": { + "id": "multiClient", + "useMultipleMongoses": true, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent", + "connectionCheckedInEvent", + "connectionClosedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "multiDB", + "client": "multiClient", + "databaseName": "multiDB" + } + }, + { + "collection": { + "id": "multiColl", + "database": "multiDB", + "collectionName": "multiColl" + } + } + ], + "initialData": [ + { + "collectionName": "singleColl", + "databaseName": "singleDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + }, + { + "collectionName": "multiColl", + "databaseName": "multiDB", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "only connections for a specific serviceId are closed when pools are cleared", + "operations": [ + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "multiColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "close", + "object": "cursor1" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "multiClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 11600 + } + }, + { + "name": "insertOne", + "object": "multiColl", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "multiClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "stale" + } + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "errors during the initial connection hello are ignore", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + } + ] + } + ] + }, + { + "description": "errors during authentication are processed", + "runOnRequirements": [ + { + "auth": true + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "lbSDAMErrorTestClient" + } + } + } + }, + { + "name": "insertOne", + "object": "singleColl", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } + } + ] + } + ] + }, + { + "description": "stale errors are ignored", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "createFindCursor", + "object": "singleColl", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor1", + "expectError": { + "isClientError": true + } + }, + { + "name": "close", + "object": "cursor1" + } + ], + "expectEvents": [ + { + "client": "singleClient", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/unified/transactions.json b/test/load_balancer/unified/transactions.json new file mode 100644 index 0000000000..add2453848 --- /dev/null +++ b/test/load_balancer/unified/transactions.json @@ -0,0 +1,1606 @@ +{ + "description": "transactions are correctly pinned to connections for load-balanced clusters", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionClosedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "_yamlAnchors": { + "documents": [ + { + "_id": 4 + } + ] + }, + "tests": [ + { + "description": "sessions are reused in LB mode", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ] + }, + { + "description": "all operations go to the same mongos", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "transaction can be committed multiple times", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is not released after a non-transient CRUD error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "errorCode": 51, + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is not released after a non-transient commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 51, + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a non-transient abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network CRUD error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network CRUD error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "expectError": { + "errorCode": 24, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network commit error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0", + "ignoreResultAndError": true + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient non-network abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 24 + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released after a transient network abort error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": { + "reason": "error" + } + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is released on successful abort", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is returned when a new transaction is started", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + } + ] + } + ] + }, + { + "description": "pinned connection is returned when a non-transaction operation uses the session", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "a connection can be shared by a transaction and a cursor", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2, + "session": "session0" + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "killCursors" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} From c8f32a7a376e027cbe30dcb8df9bf05778e61248 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 24 May 2021 17:49:44 -0700 Subject: [PATCH 0361/2111] PYTHON-2673 Connection pinning behavior for load balanced clusters (#630) Tweak spec test because pymongo unpins cursors eagerly after errors. Tweak spec test for PoolClearedEvent ordering when MongoDB handshake fails (see DRIVERS-1785). Only skip killCursors for some error codes. Rely on SDAM error handling to close the connection after a state change error. Add service_id to various events. Retain reference to pinned sockets to prevent premptive closure by CPython's cyclic GC. --- pymongo/aggregation.py | 4 +- pymongo/change_stream.py | 2 +- pymongo/client_session.py | 44 ++++- pymongo/collection.py | 28 ++- pymongo/command_cursor.py | 77 ++++---- pymongo/cursor.py | 115 ++++++----- pymongo/database.py | 12 +- pymongo/message.py | 59 +++--- pymongo/mongo_client.py | 186 ++++++++++-------- pymongo/monitoring.py | 21 +- pymongo/network.py | 9 +- pymongo/pool.py | 60 +++++- pymongo/response.py | 22 +-- pymongo/server.py | 41 ++-- test/load_balancer/test_load_balancer.py | 22 ++- test/load_balancer/unified/cursors.json | 8 +- .../unified/sdam-error-handling.json | 6 +- test/pymongo_mocks.py | 5 +- test/test_client.py | 6 +- test/test_monitoring.py | 6 +- test/test_read_preferences.py | 5 +- test/unified_format.py | 23 ++- test/utils.py | 2 +- 23 files changed, 466 insertions(+), 297 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 438a3421bc..ae1b9d9eb8 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -161,11 +161,13 @@ def get_cursor(self, session, server, sock_info, slave_ok): } # Create and return cursor instance. - return self._cursor_class( + cmd_cursor = self._cursor_class( self._cursor_collection(cursor), cursor, sock_info.address, batch_size=self._batch_size or 0, max_await_time_ms=self._max_await_time_ms, session=session, explicit_session=self._explicit_session) + cmd_cursor._maybe_pin_connection(sock_info) + return cmd_cursor class _CollectionAggregationCommand(_AggregationCommand): diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index f742e126c6..fcb9d9f520 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -181,7 +181,7 @@ def _run_aggregation_cmd(self, session, explicit_session): return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), - session) + session, pin=self._client._should_pin_cursor(session)) def _create_cursor(self): with self._client._tmp_session(self._session, close=False) as s: diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 5b6ff7524d..9db8184826 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -108,6 +108,7 @@ from bson.son import SON from bson.timestamp import Timestamp +from pymongo.cursor import _SocketManager from pymongo.errors import (ConfigurationError, ConnectionFailure, InvalidOperation, @@ -117,6 +118,7 @@ from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern @@ -292,6 +294,7 @@ def __init__(self, opts): self.state = _TxnState.NONE self.sharded = False self.pinned_address = None + self.sock_mgr = None self.recovery_token = None self.attempt = 0 @@ -301,10 +304,29 @@ def active(self): def starting(self): return self.state == _TxnState.STARTING + @property + def pinned_conn(self): + if self.active() and self.sock_mgr: + return self.sock_mgr.sock + return None + + def pin(self, server, sock_info): + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + sock_info.pinned = True + self.sock_mgr = _SocketManager(sock_info, False) + + def unpin(self): + self.pinned_address = None + if self.sock_mgr: + self.sock_mgr.close() + self.sock_mgr = None + def reset(self): + self.unpin() self.state = _TxnState.NONE self.sharded = False - self.pinned_address = None self.recovery_token = None self.attempt = 0 @@ -374,6 +396,9 @@ def _end_session(self, lock): try: if self.in_transaction: self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + self._unpin() finally: self._client._return_server_session(self._server_session, lock) self._server_session = None @@ -779,14 +804,18 @@ def _pinned_address(self): return self._transaction.pinned_address return None - def _pin(self, server): - """Pin this session to the given Server.""" - self._transaction.sharded = True - self._transaction.pinned_address = server.description.address + @property + def _pinned_connection(self): + """The connection this transaction was started on.""" + return self._transaction.pinned_conn + + def _pin(self, server, sock_info): + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, sock_info) def _unpin(self): """Unpin this session from any pinned Server.""" - self._transaction.pinned_address = None + self._transaction.unpin() def _txn_read_preference(self): """Return read preference of this transaction or None.""" @@ -800,9 +829,6 @@ def _apply_to(self, command, is_retryable, read_preference): self._server_session.last_use = time.monotonic() command['lsid'] = self._server_session.session_id - if not self.in_transaction: - self._transaction.reset() - if is_retryable: command['txnNumber'] = self._server_session.transaction_id return diff --git a/pymongo/collection.py b/pymongo/collection.py index 203ea32ae1..cf8f679be1 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -520,7 +520,8 @@ def _legacy_write(self, sock_info, name, cmd, op_id, if publish: duration = datetime.datetime.now() - start listeners.publish_command_start( - cmd, self.__database.name, rqst_id, sock_info.address, op_id) + cmd, self.__database.name, rqst_id, sock_info.address, op_id, + sock_info.service_id) start = datetime.datetime.now() try: result = sock_info.legacy_write(rqst_id, msg, max_size, False) @@ -534,12 +535,14 @@ def _legacy_write(self, sock_info, name, cmd, op_id, reply = message._convert_write_result( name, cmd, details) listeners.publish_command_success( - dur, reply, name, rqst_id, sock_info.address, op_id) + dur, reply, name, rqst_id, sock_info.address, + op_id, sock_info.service_id) raise else: details = message._convert_exception(exc) listeners.publish_command_failure( - dur, details, name, rqst_id, sock_info.address, op_id) + dur, details, name, rqst_id, sock_info.address, op_id, + sock_info.service_id) raise if publish: if result is not None: @@ -549,7 +552,8 @@ def _legacy_write(self, sock_info, name, cmd, op_id, reply = {'ok': 1} duration = (datetime.datetime.now() - start) + duration listeners.publish_command_success( - duration, reply, name, rqst_id, sock_info.address, op_id) + duration, reply, name, rqst_id, sock_info.address, op_id, + sock_info.service_id) return result def _insert_one( @@ -2072,9 +2076,9 @@ def _cmd(session, server, sock_info, slave_ok): if exc.code != 26: raise cursor = {'id': 0, 'firstBatch': []} - return CommandCursor(coll, cursor, sock_info.address, - session=s, - explicit_session=session is not None) + cmd_cursor = CommandCursor( + coll, cursor, sock_info.address, session=s, + explicit_session=session is not None) else: res = message._first_batch( sock_info, self.__database.name, "system.indexes", @@ -2084,10 +2088,13 @@ def _cmd(session, server, sock_info, slave_ok): cursor = res["cursor"] # Note that a collection can only have 64 indexes, so there # will never be a getMore call. - return CommandCursor(coll, cursor, sock_info.address) + cmd_cursor = CommandCursor(coll, cursor, sock_info.address) + cmd_cursor._maybe_pin_connection(sock_info) + return cmd_cursor return self.__database.client._retryable_read( - _cmd, read_pref, session) + _cmd, read_pref, session, + pin=self.__database.client._should_pin_cursor(session)) def index_information(self, session=None): """Get information on this collection's indexes. @@ -2168,7 +2175,8 @@ def _aggregate(self, aggregation_command, pipeline, cursor_class, session, user_fields={'cursor': {'firstBatch': 1}}) return self.__database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), session, - retryable=not cmd._performs_write) + retryable=not cmd._performs_write, + pin=self.database.client._should_pin_cursor(session)) def aggregate(self, pipeline, session=None, **kwargs): """Perform an aggregation using the aggregation framework on this diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index f90a9267a6..fa219d0381 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -17,13 +17,14 @@ from collections import deque from bson import _convert_raw_document_lists_to_streams +from pymongo.cursor import _SocketManager, _CURSOR_CLOSED_ERRORS from pymongo.errors import (ConnectionFailure, InvalidOperation, - NotMasterError, OperationFailure) from pymongo.message import (_CursorAddress, _GetMore, _RawBatchGetMore) +from pymongo.response import PinnedResponse class CommandCursor(object): @@ -37,6 +38,7 @@ def __init__(self, collection, cursor_info, address, retrieved=0, The parameter 'retrieved' is unused. """ + self.__sock_mgr = None self.__collection = collection self.__id = cursor_info['id'] self.__data = deque(cursor_info['firstBatch']) @@ -75,11 +77,15 @@ def __die(self, synchronous=False): self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( - self.__id, address, session=self.__session) + self.__id, address, session=self.__session, + sock_mgr=self.__sock_mgr) else: # The cursor will be closed later in a different session. self.__collection.database.client._close_cursor( self.__id, address) + if self.__sock_mgr: + self.__sock_mgr.close() + self.__sock_mgr = None self.__end_session(synchronous) def __end_session(self, synchronous): @@ -127,52 +133,58 @@ def _post_batch_resume_token(self): changeStream aggregate or getMore.""" return self.__postbatchresumetoken + def _maybe_pin_connection(self, sock_info): + client = self.__collection.database.client + if not client._should_pin_cursor(self.__session): + return + if not self.__sock_mgr: + sock_mgr = _SocketManager(sock_info, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self.__id == 0: + sock_mgr.close() + else: + self.__sock_mgr = sock_mgr + def __send_message(self, operation): """Send a getmore message and handle the response. """ - def kill(): - self.__killed = True - self.__end_session(True) - client = self.__collection.database.client try: - response = client._run_operation_with_response( + response = client._run_operation( operation, self._unpack_response, address=self.__address) - except OperationFailure: - kill() - raise - except NotMasterError: - # Don't send kill cursors to another server after a "not master" - # error. It's completely pointless. - kill() + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self.__killed = True + # Return the session and pinned connection, if necessary. + self.close() raise except ConnectionFailure: - # Don't try to send kill cursors on another socket - # or to another server. It can cause a _pinValue - # assertion on some server releases if we get here - # due to a socket timeout. - kill() + # Don't send killCursors because the cursor is already closed. + self.__killed = True + # Return the session and pinned connection, if necessary. + self.close() raise except Exception: - # Close the cursor - self.__die() + self.close() raise - from_command = response.from_command - reply = response.data - docs = response.docs - - if from_command: - cursor = docs[0]['cursor'] + if isinstance(response, PinnedResponse): + if not self.__sock_mgr: + self.__sock_mgr = _SocketManager(response.socket_info, + response.more_to_come) + if response.from_command: + cursor = response.docs[0]['cursor'] documents = cursor['nextBatch'] self.__postbatchresumetoken = cursor.get('postBatchResumeToken') self.__id = cursor['id'] else: - documents = docs - self.__id = reply.cursor_id + documents = response.docs + self.__id = response.data.cursor_id if self.__id == 0: - kill() + self.__die(True) self.__data = deque(documents) def _unpack_response(self, response, cursor_id, codec_options, @@ -203,10 +215,9 @@ def _refresh(self): self.__session, self.__collection.database.client, self.__max_await_time_ms, - False)) + self.__sock_mgr, False)) else: # Cursor id is zero nothing else to return - self.__killed = True - self.__end_session(True) + self.__die(True) return len(self.__data) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index c6a0246bfa..22b0de20d6 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -15,6 +15,7 @@ """Cursor class to iterate over Mongo query results.""" import copy +import threading import warnings from collections import deque @@ -27,7 +28,6 @@ from pymongo.collation import validate_collation_or_none from pymongo.errors import (ConnectionFailure, InvalidOperation, - NotMasterError, OperationFailure) from pymongo.message import (_CursorAddress, _GetMore, @@ -35,7 +35,37 @@ _Query, _RawBatchQuery) from pymongo.monitoring import ConnectionClosedReason - +from pymongo.response import PinnedResponse + +# These errors mean that the server has already killed the cursor so there is +# no need to send killCursors. +_CURSOR_CLOSED_ERRORS = frozenset([ + 43, # CursorNotFound + 50, # MaxTimeMSExpired + 175, # QueryPlanKilled + 237, # CursorKilled + + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, +]) _QUERY_OPTIONS = { "tailable_cursor": 2, @@ -78,14 +108,14 @@ class CursorType(object): # This has to be an old style class due to # http://bugs.jython.org/issue1057 -class _ExhaustManager: +class _SocketManager: """Used with exhaust cursors to ensure the socket is returned. """ - def __init__(self, sock, pool, more_to_come): + def __init__(self, sock, more_to_come): self.sock = sock - self.pool = pool self.more_to_come = more_to_come self.__closed = False + self.lock = threading.Lock() def __del__(self): self.close() @@ -98,8 +128,8 @@ def close(self): """ if not self.__closed: self.__closed = True - self.pool.return_socket(self.sock) - self.sock, self.pool = None, None + self.sock.unpin() + self.sock = None class Cursor(object): @@ -128,7 +158,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, # an error to avoid attribute errors during garbage collection. self.__id = None self.__exhaust = False - self.__exhaust_mgr = None + self.__sock_mgr = None self.__killed = False if session: @@ -319,24 +349,26 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: - if self.__exhaust and self.__exhaust_mgr: + if self.__exhaust and self.__sock_mgr: # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. - self.__exhaust_mgr.sock.close_socket( + self.__sock_mgr.sock.close_socket( ConnectionClosedReason.ERROR) else: address = _CursorAddress( self.__address, self.__collection.full_name) if synchronous: self.__collection.database.client._close_cursor_now( - self.__id, address, session=self.__session) + self.__id, address, session=self.__session, + sock_mgr=self.__sock_mgr) else: # The cursor will be closed later in a different session. self.__collection.database.client._close_cursor( self.__id, address) - if self.__exhaust and self.__exhaust_mgr: - self.__exhaust_mgr.close() + if self.__sock_mgr: + self.__sock_mgr.close() + self.__sock_mgr = None if self.__session and not self.__explicit_session: self.__session._end_session(lock=synchronous) self.__session = None @@ -1004,53 +1036,35 @@ def __send_message(self, operation): "exhaust cursors do not support auto encryption") try: - response = client._run_operation_with_response( - operation, self._unpack_response, exhaust=self.__exhaust, - address=self.__address) - except OperationFailure: - self.__killed = True - - # Make sure exhaust socket is returned immediately, if necessary. - self.__die() - + response = client._run_operation( + operation, self._unpack_response, address=self.__address) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: + # Don't send killCursors because the cursor is already closed. + self.__killed = True + self.close() # If this is a tailable cursor the error is likely # due to capped collection roll over. Setting # self.__killed to True ensures Cursor.alive will be # False. No need to re-raise. - if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]: + if (exc.code in _CURSOR_CLOSED_ERRORS and + self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]): return raise - except NotMasterError: - # Don't send kill cursors to another server after a "not master" - # error. It's completely pointless. - self.__killed = True - - # Make sure exhaust socket is returned immediately, if necessary. - self.__die() - - raise except ConnectionFailure: - # Don't try to send kill cursors on another socket - # or to another server. It can cause a _pinValue - # assertion on some server releases if we get here - # due to a socket timeout. + # Don't send killCursors because the cursor is already closed. self.__killed = True - self.__die() + self.close() raise except Exception: - # Close the cursor - self.__die() + self.close() raise self.__address = response.address - if self.__exhaust: - # 'response' is an ExhaustResponse. - if not self.__exhaust_mgr: - self.__exhaust_mgr = _ExhaustManager(response.socket_info, - response.pool, - response.more_to_come) - else: - self.__exhaust_mgr.update_exhaust(response.more_to_come) + if isinstance(response, PinnedResponse): + if not self.__sock_mgr: + self.__sock_mgr = _SocketManager(response.socket_info, + response.more_to_come) cmd_name = operation.name docs = response.docs @@ -1078,7 +1092,6 @@ def __send_message(self, operation): self.__retrieved += response.data.number_returned if self.__id == 0: - self.__killed = True # Don't wait for garbage collection to call __del__, return the # socket and the session to the pool now. self.__die() @@ -1132,7 +1145,8 @@ def _refresh(self): self.__collation, self.__session, self.__collection.database.client, - self.__allow_disk_use) + self.__allow_disk_use, + self.__exhaust) self.__send_message(q) elif self.__id: # Get More if self.__limit: @@ -1151,7 +1165,8 @@ def _refresh(self): self.__session, self.__collection.database.client, self.__max_await_time_ms, - self.__exhaust_mgr) + self.__sock_mgr, + self.__exhaust) self.__send_message(g) return len(self.__data) diff --git a/pymongo/database.py b/pymongo/database.py index 8de73161a3..9e2b221fe2 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -377,7 +377,8 @@ def aggregate(self, pipeline, session=None, **kwargs): user_fields={'cursor': {'firstBatch': 1}}) return self.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(s), s, - retryable=not cmd._performs_write) + retryable=not cmd._performs_write, + pin=self.client._should_pin_cursor(s)) def watch(self, pipeline=None, full_document=None, resume_after=None, max_await_time_ms=None, batch_size=None, collation=None, @@ -636,7 +637,7 @@ def _list_collections(self, sock_info, slave_okay, session, sock_info, cmd, slave_okay, read_preference=read_preference, session=tmp_session)["cursor"] - return CommandCursor( + cmd_cursor = CommandCursor( coll, cursor, sock_info.address, @@ -656,7 +657,9 @@ def _list_collections(self, sock_info, slave_okay, session, ("pipeline", pipeline), ("cursor", kwargs.get("cursor", {}))]) cursor = self._command(sock_info, cmd, slave_okay)["cursor"] - return CommandCursor(coll, cursor, sock_info.address) + cmd_cursor = CommandCursor(coll, cursor, sock_info.address) + cmd_cursor._maybe_pin_connection(sock_info) + return cmd_cursor def list_collections(self, session=None, filter=None, **kwargs): """Get a cursor over the collectons of this database. @@ -688,7 +691,8 @@ def _cmd(session, server, sock_info, slave_okay): **kwargs) return self.__client._retryable_read( - _cmd, read_pref, session) + _cmd, read_pref, session, + pin=self.client._should_pin_cursor(session)) def list_collection_names(self, session=None, filter=None, **kwargs): """Get a list of all the collection names in this database. diff --git a/pymongo/message.py b/pymongo/message.py index 307260263f..4edf8abced 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -234,16 +234,17 @@ class _Query(object): __slots__ = ('flags', 'db', 'coll', 'ntoskip', 'spec', 'fields', 'codec_options', 'read_preference', 'limit', 'batch_size', 'name', 'read_concern', 'collation', - 'session', 'client', 'allow_disk_use', '_as_command') + 'session', 'client', 'allow_disk_use', '_as_command', + 'exhaust') # For compatibility with the _GetMore class. - exhaust_mgr = None + sock_mgr = None cursor_id = None def __init__(self, flags, db, coll, ntoskip, spec, fields, codec_options, read_preference, limit, batch_size, read_concern, collation, session, client, - allow_disk_use): + allow_disk_use, exhaust): self.flags = flags self.db = db self.coll = coll @@ -261,13 +262,14 @@ def __init__(self, flags, db, coll, ntoskip, spec, fields, self.allow_disk_use = allow_disk_use self.name = 'find' self._as_command = None + self.exhaust = exhaust def namespace(self): return "%s.%s" % (self.db, self.coll) - def use_command(self, sock_info, exhaust): + def use_command(self, sock_info): use_find_cmd = False - if sock_info.max_wire_version >= 4 and not exhaust: + if sock_info.max_wire_version >= 4 and not self.exhaust: use_find_cmd = True elif sock_info.max_wire_version >= 8: # OP_MSG supports exhaust on MongoDB 4.2+ @@ -375,13 +377,13 @@ class _GetMore(object): __slots__ = ('db', 'coll', 'ntoreturn', 'cursor_id', 'max_await_time_ms', 'codec_options', 'read_preference', 'session', 'client', - 'exhaust_mgr', '_as_command') + 'sock_mgr', '_as_command', 'exhaust') name = 'getMore' def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, read_preference, session, client, max_await_time_ms, - exhaust_mgr): + sock_mgr, exhaust): self.db = db self.coll = coll self.ntoreturn = ntoreturn @@ -391,15 +393,16 @@ def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, self.session = session self.client = client self.max_await_time_ms = max_await_time_ms - self.exhaust_mgr = exhaust_mgr + self.sock_mgr = sock_mgr self._as_command = None + self.exhaust = exhaust def namespace(self): return "%s.%s" % (self.db, self.coll) - def use_command(self, sock_info, exhaust): + def use_command(self, sock_info): use_cmd = False - if sock_info.max_wire_version >= 4 and not exhaust: + if sock_info.max_wire_version >= 4 and not self.exhaust: use_cmd = True elif sock_info.max_wire_version >= 8: # OP_MSG supports exhaust on MongoDB 4.2+ @@ -440,7 +443,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] if sock_info.op_msg_enabled: - if self.exhaust_mgr: + if self.sock_mgr: flags = _OpMsg.EXHAUST_ALLOWED else: flags = 0 @@ -456,23 +459,23 @@ def get_message(self, dummy0, sock_info, use_cmd=False): class _RawBatchQuery(_Query): - def use_command(self, socket_info, exhaust): + def use_command(self, sock_info): # Compatibility checks. - super(_RawBatchQuery, self).use_command(socket_info, exhaust) - if socket_info.max_wire_version >= 8: + super(_RawBatchQuery, self).use_command(sock_info) + if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True - elif socket_info.op_msg_enabled and not exhaust: + elif sock_info.op_msg_enabled and not self.exhaust: return True return False class _RawBatchGetMore(_GetMore): - def use_command(self, socket_info, exhaust): - if socket_info.max_wire_version >= 8: + def use_command(self, sock_info): + if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True - elif socket_info.op_msg_enabled and not exhaust: + elif sock_info.op_msg_enabled and not self.exhaust: return True return False @@ -1033,20 +1036,23 @@ def _start(self, request_id, docs): cmd[self.field] = docs self.listeners.publish_command_start( cmd, self.db_name, - request_id, self.sock_info.address, self.op_id) + request_id, self.sock_info.address, self.op_id, + self.sock_info.service_id) return cmd def _succeed(self, request_id, reply, duration): """Publish a CommandSucceededEvent.""" self.listeners.publish_command_success( duration, reply, self.name, - request_id, self.sock_info.address, self.op_id) + request_id, self.sock_info.address, self.op_id, + self.sock_info.service_id) def _fail(self, request_id, failure, duration): """Publish a CommandFailedEvent.""" self.listeners.publish_command_failure( duration, failure, self.name, - request_id, self.sock_info.address, self.op_id) + request_id, self.sock_info.address, self.op_id, + self.sock_info.service_id) # From the Client Side Encryption spec: @@ -1686,7 +1692,7 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, query = _Query( 0, db, coll, 0, query, None, codec_options, read_preference, ntoreturn, 0, DEFAULT_READ_CONCERN, None, None, - None, None) + None, None, False) name = next(iter(cmd)) publish = listeners.enabled_for_commands @@ -1698,7 +1704,8 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, if publish: encoding_duration = datetime.datetime.now() - start listeners.publish_command_start( - cmd, db, request_id, sock_info.address) + cmd, db, request_id, sock_info.address, + service_id=sock_info.service_id) start = datetime.datetime.now() sock_info.send_message(msg, max_doc_size) @@ -1713,7 +1720,8 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, else: failure = _convert_exception(exc) listeners.publish_command_failure( - duration, failure, name, request_id, sock_info.address) + duration, failure, name, request_id, sock_info.address, + service_id=sock_info.service_id) raise # listIndexes if 'cursor' in cmd: @@ -1732,6 +1740,7 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, if publish: duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( - duration, result, name, request_id, sock_info.address) + duration, result, name, request_id, sock_info.address, + service_id=sock_info.service_id) return result diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dde4ef2426..4eb54e1c58 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -60,6 +60,7 @@ OperationFailure, PyMongoError, ServerSelectionTimeoutError) +from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (writable_preferred_server_selector, writable_server_selector) @@ -1160,10 +1161,20 @@ def _get_topology(self): return self._topology @contextlib.contextmanager - def _get_socket(self, server, session, exhaust=False): + def _get_socket(self, server, session, pin=False): + in_txn = session and session.in_transaction with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session._pinned_connection: + yield session._pinned_connection + return with server.get_socket( - self.__all_credentials, checkout=exhaust) as sock_info: + self.__all_credentials, checkout=pin, + handler=err_handler) as sock_info: + # Pin this session to the selected server or connection. + if (in_txn and server.description.server_type in ( + SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer)): + session._pin(server, sock_info) err_handler.contribute_socket(sock_info) if (self._encrypter and not self._encrypter._bypass_auto_encryption and @@ -1186,6 +1197,8 @@ def _select_server(self, server_selector, session, address=None): """ try: topology = self._get_topology() + if session and not session.in_transaction: + session._transaction.reset() address = address or (session and session._pinned_address) if address: # We're running a getMore or this session is pinned to a mongos. @@ -1195,12 +1208,6 @@ def _select_server(self, server_selector, session, address=None): % address) else: server = topology.select_server(server_selector) - # Pin this session to the selected server if it's performing a - # sharded transaction. - if (server.description.server_type in ( - SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer) - and session and session.in_transaction): - session._pin(server) return server except PyMongoError as exc: # Server selection errors in a transaction are transient. @@ -1214,8 +1221,7 @@ def _socket_for_writes(self, session): return self._get_socket(server, session) @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session, - exhaust=False): + def _slaveok_for_server(self, read_preference, server, session, pin=False): assert read_preference is not None, "read_preference must not be None" # Get a socket for a server matching the read preference, and yield # sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to @@ -1226,7 +1232,7 @@ def _slaveok_for_server(self, read_preference, server, session, topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single - with self._get_socket(server, session, exhaust=exhaust) as sock_info: + with self._get_socket(server, session, pin=pin) as sock_info: slave_ok = (single and not sock_info.is_mongos) or ( read_preference != ReadPreference.PRIMARY) yield sock_info, slave_ok @@ -1249,47 +1255,41 @@ def _socket_for_reads(self, read_preference, session): read_preference != ReadPreference.PRIMARY) yield sock_info, slave_ok - def _run_operation_with_response(self, operation, unpack_res, - exhaust=False, address=None): + def _should_pin_cursor(self, session): + return (self.__options.load_balanced and + not (session and session.in_transaction)) + + def _run_operation(self, operation, unpack_res, pin=False, address=None): """Run a _Query/_GetMore operation and return a Response. :Parameters: - `operation`: a _Query or _GetMore object. - `unpack_res`: A callable that decodes the wire protocol response. - - `exhaust` (optional): If True, the socket used stays checked out. - It is returned along with its Pool in the Response. - `address` (optional): Optional address when sending a message to a specific server, used for getMore. """ - if operation.exhaust_mgr: + pin = self._should_pin_cursor(operation.session) or operation.exhaust + if operation.sock_mgr: server = self._select_server( operation.read_preference, operation.session, address=address) - with _MongoClientErrorHandler( - self, server, operation.session) as err_handler: - err_handler.contribute_socket(operation.exhaust_mgr.sock) - return server.run_operation_with_response( - operation.exhaust_mgr.sock, - operation, - True, - self._event_listeners, - exhaust, - unpack_res) + with operation.sock_mgr.lock: + with _MongoClientErrorHandler( + self, server, operation.session) as err_handler: + err_handler.contribute_socket(operation.sock_mgr.sock) + return server.run_operation( + operation.sock_mgr.sock, operation, True, + self._event_listeners, pin, unpack_res) def _cmd(session, server, sock_info, slave_ok): - return server.run_operation_with_response( - sock_info, - operation, - slave_ok, - self._event_listeners, - exhaust, + return server.run_operation( + sock_info, operation, slave_ok, self._event_listeners, pin, unpack_res) return self._retryable_read( _cmd, operation.read_preference, operation.session, - address=address, - retryable=isinstance(operation, message._Query), - exhaust=exhaust) + address=address, retryable=isinstance(operation, message._Query), + pin=pin) def _retry_with_session(self, retryable, func, session, bulk): """Execute an operation with at most one consecutive retries @@ -1361,7 +1361,7 @@ def is_retrying(): last_error = exc def _retryable_read(self, func, read_pref, session, address=None, - retryable=True, exhaust=False): + retryable=True, pin=False): """Execute an operation with at most one consecutive retries Returns func()'s return value on success. On error retries the same @@ -1381,9 +1381,9 @@ def _retryable_read(self, func, read_pref, session, address=None, read_pref, session, address=address) if not server.description.retryable_reads_supported: retryable = False - with self._slaveok_for_server(read_pref, server, session, - exhaust=exhaust) as (sock_info, - slave_ok): + with self._slaveok_for_server( + read_pref, server, session, pin=pin) as ( + sock_info, slave_ok): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. @@ -1496,7 +1496,8 @@ def _close_cursor(self, cursor_id, address): """ self.__kill_cursors_queue.append((address, [cursor_id])) - def _close_cursor_now(self, cursor_id, address=None, session=None): + def _close_cursor_now(self, cursor_id, address=None, session=None, + sock_mgr=None): """Send a kill cursors message with the given id. The cursor is closed synchronously on the current thread. @@ -1505,16 +1506,20 @@ def _close_cursor_now(self, cursor_id, address=None, session=None): raise TypeError("cursor_id must be an instance of int") try: - self._kill_cursors( - [cursor_id], address, self._get_topology(), session) + if sock_mgr: + with sock_mgr.lock: + # Cursor is pinned to LB outside of a transaction. + self._kill_cursor_impl( + [cursor_id], address, session, sock_mgr.sock) + else: + self._kill_cursors( + [cursor_id], address, self._get_topology(), session) except PyMongoError: # Make another attempt to kill the cursor later. self.__kill_cursors_queue.append((address, [cursor_id])) def _kill_cursors(self, cursor_ids, address, topology, session): """Send a kill cursors message with the given ids.""" - listeners = self._event_listeners - publish = listeners.enabled_for_commands if address: # address could be a tuple or _CursorAddress, but # select_server_by_address needs (host, port). @@ -1523,49 +1528,55 @@ def _kill_cursors(self, cursor_ids, address, topology, session): # Application called close_cursor() with no address. server = topology.select_server(writable_server_selector) + with self._get_socket(server, session) as sock_info: + self._kill_cursor_impl(cursor_ids, address, session, sock_info) + + def _kill_cursor_impl(self, cursor_ids, address, session, sock_info): + listeners = self._event_listeners + publish = listeners.enabled_for_commands + try: namespace = address.namespace db, coll = namespace.split('.', 1) except AttributeError: namespace = None db = coll = "OP_KILL_CURSORS" - spec = SON([('killCursors', coll), ('cursors', cursor_ids)]) - with server.get_socket(self.__all_credentials) as sock_info: - if sock_info.max_wire_version >= 4 and namespace is not None: - sock_info.command(db, spec, session=session, client=self) - else: - if publish: - start = datetime.datetime.now() - request_id, msg = message.kill_cursors(cursor_ids) - if publish: - duration = datetime.datetime.now() - start - # Here and below, address could be a tuple or - # _CursorAddress. We always want to publish a - # tuple to match the rest of the monitoring - # API. - listeners.publish_command_start( - spec, db, request_id, tuple(address)) - start = datetime.datetime.now() - - try: - sock_info.send_message(msg, 0) - except Exception as exc: - if publish: - dur = ((datetime.datetime.now() - start) + duration) - listeners.publish_command_failure( - dur, message._convert_exception(exc), - 'killCursors', request_id, - tuple(address)) - raise + if sock_info.max_wire_version >= 4 and namespace is not None: + sock_info.command(db, spec, session=session, client=self) + else: + if publish: + start = datetime.datetime.now() + request_id, msg = message.kill_cursors(cursor_ids) + if publish: + duration = datetime.datetime.now() - start + # Here and below, address could be a tuple or + # _CursorAddress. We always want to publish a + # tuple to match the rest of the monitoring + # API. + listeners.publish_command_start( + spec, db, request_id, tuple(address), + service_id=sock_info.service_id) + start = datetime.datetime.now() + try: + sock_info.send_message(msg, 0) + except Exception as exc: if publish: - duration = ((datetime.datetime.now() - start) + duration) - # OP_KILL_CURSORS returns no reply, fake one. - reply = {'cursorsUnknown': cursor_ids, 'ok': 1} - listeners.publish_command_success( - duration, reply, 'killCursors', request_id, - tuple(address)) + dur = ((datetime.datetime.now() - start) + duration) + listeners.publish_command_failure( + dur, message._convert_exception(exc), + 'killCursors', request_id, + tuple(address), service_id=sock_info.service_id) + raise + + if publish: + duration = ((datetime.datetime.now() - start) + duration) + # OP_KILL_CURSORS returns no reply, fake one. + reply = {'cursorsUnknown': cursor_ids, 'ok': 1} + listeners.publish_command_success( + duration, reply, 'killCursors', request_id, + tuple(address), service_id=sock_info.service_id) def _process_kill_cursors(self): """Process any pending kill cursors requests.""" @@ -1966,7 +1977,8 @@ def _add_retryable_write_error(exc, max_wire_version): class _MongoClientErrorHandler(object): """Handle errors raised when executing an operation.""" __slots__ = ('client', 'server_address', 'session', 'max_wire_version', - 'sock_generation', 'completed_handshake', 'service_id') + 'sock_generation', 'completed_handshake', 'service_id', + 'handled') def __init__(self, client, server, session): self.client = client @@ -1980,6 +1992,7 @@ def __init__(self, client, server, session): self.sock_generation = server.pool.gen.get_overall() self.completed_handshake = False self.service_id = None + self.handled = False def contribute_socket(self, sock_info): """Provide socket information to the error handler.""" @@ -1988,13 +2001,10 @@ def contribute_socket(self, sock_info): self.service_id = sock_info.service_id self.completed_handshake = True - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if exc_type is None: + def handle(self, exc_type, exc_val): + if self.handled or exc_type is None: return - + self.handled = True if self.session: if issubclass(exc_type, ConnectionFailure): if self.session.in_transaction: @@ -2010,3 +2020,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): exc_val, self.max_wire_version, self.sock_generation, self.completed_handshake, self.service_id) self.client._topology.handle_error(self.server_address, err_ctx) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return self.handle(exc_type, exc_val) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index b53629d12b..5e86e93e02 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1310,7 +1310,8 @@ def event_listeners(self): self.__topology_listeners[:]) def publish_command_start(self, command, database_name, - request_id, connection_id, op_id=None): + request_id, connection_id, op_id=None, + service_id=None): """Publish a CommandStartedEvent to all command listeners. :Parameters: @@ -1321,11 +1322,13 @@ def publish_command_start(self, command, database_name, - `connection_id`: The address (host, port) of the server this command was sent to. - `op_id`: The (optional) operation id for this operation. + - `service_id`: The service_id this command was sent to, or ``None``. """ if op_id is None: op_id = request_id event = CommandStartedEvent( - command, database_name, request_id, connection_id, op_id) + command, database_name, request_id, connection_id, op_id, + service_id=service_id) for subscriber in self.__command_listeners: try: subscriber.started(event) @@ -1333,7 +1336,8 @@ def publish_command_start(self, command, database_name, _handle_exception() def publish_command_success(self, duration, reply, command_name, - request_id, connection_id, op_id=None): + request_id, connection_id, op_id=None, + service_id=None): """Publish a CommandSucceededEvent to all command listeners. :Parameters: @@ -1344,11 +1348,13 @@ def publish_command_success(self, duration, reply, command_name, - `connection_id`: The address (host, port) of the server this command was sent to. - `op_id`: The (optional) operation id for this operation. + - `service_id`: The service_id this command was sent to, or ``None``. """ if op_id is None: op_id = request_id event = CommandSucceededEvent( - duration, reply, command_name, request_id, connection_id, op_id) + duration, reply, command_name, request_id, connection_id, op_id, + service_id) for subscriber in self.__command_listeners: try: subscriber.succeeded(event) @@ -1356,7 +1362,8 @@ def publish_command_success(self, duration, reply, command_name, _handle_exception() def publish_command_failure(self, duration, failure, command_name, - request_id, connection_id, op_id=None): + request_id, connection_id, op_id=None, + service_id=None): """Publish a CommandFailedEvent to all command listeners. :Parameters: @@ -1368,11 +1375,13 @@ def publish_command_failure(self, duration, failure, command_name, - `connection_id`: The address (host, port) of the server this command was sent to. - `op_id`: The (optional) operation id for this operation. + - `service_id`: The service_id this command was sent to, or ``None``. """ if op_id is None: op_id = request_id event = CommandFailedEvent( - duration, failure, command_name, request_id, connection_id, op_id) + duration, failure, command_name, request_id, connection_id, op_id, + service_id=service_id) for subscriber in self.__command_listeners: try: subscriber.failed(event) diff --git a/pymongo/network.py b/pymongo/network.py index 39f6504487..813a7cc38c 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -134,7 +134,8 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, if publish: encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start(orig, dbname, request_id, address) + listeners.publish_command_start(orig, dbname, request_id, address, + service_id=sock_info.service_id) start = datetime.datetime.now() try: @@ -164,12 +165,14 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, else: failure = message._convert_exception(exc) listeners.publish_command_failure( - duration, failure, name, request_id, address) + duration, failure, name, request_id, address, + service_id=sock_info.service_id) raise if publish: duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( - duration, response_doc, name, request_id, address) + duration, response_doc, name, request_id, address, + service_id=sock_info.service_id) if client and client._encrypter and reply: decrypted = client._encrypter.decrypt(reply.raw_command_response()) diff --git a/pymongo/pool.py b/pymongo/pool.py index 53766f65f3..5d62e4beed 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -23,6 +23,7 @@ import sys import threading import time +import weakref from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -503,6 +504,7 @@ class SocketInfo(object): - `id`: the id of this socket in it's pool """ def __init__(self, sock, pool, address, id): + self.pool_ref = weakref.ref(pool) self.sock = sock self.address = address self.id = id @@ -541,6 +543,17 @@ def __init__(self, sock, pool, address, id): self.more_to_come = False # For load balancer support. self.service_id = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned = False + + def unpin(self): + self.pinned = False + pool = self.pool_ref() + if pool: + pool.return_socket(self) + else: + self.close_socket(ConnectionClosedReason.STALE) def hello_cmd(self): if self.opts.server_api: @@ -712,7 +725,7 @@ def command(self, dbname, spec, slave_ok=False, unacknowledged=unacknowledged, user_fields=user_fields, exhaust_allowed=exhaust_allowed) - except OperationFailure: + except (OperationFailure, NotMasterError): raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. except BaseException as error: @@ -898,7 +911,13 @@ def _raise_connection_failure(self, error): # ...) is called in Python code, which experiences the signal as a # KeyboardInterrupt from the start, rather than as an initial # socket.error, so we catch that, close the socket, and reraise it. - self.close_socket(ConnectionClosedReason.ERROR) + # + # The connection closed event will be emitted later in return_socket. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + self.close_socket(reason) # SSLError from PyOpenSSL inherits directly from Exception. if isinstance(error, (IOError, OSError, _SSLError)): _raise_connection_failure(self.address, error) @@ -1152,6 +1171,10 @@ def __init__(self, address, options, handshake=True): self.address, self.opts.non_default_options) # Similar to active_sockets but includes threads in the wait queue. self.operation_count = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets = set() def ready(self): old_state, self.state = self.state, PoolState.READY @@ -1328,7 +1351,7 @@ def connect(self, all_credentials=None): return sock_info @contextlib.contextmanager - def get_socket(self, all_credentials, checkout=False): + def get_socket(self, all_credentials, checkout=False, handler=None): """Get a socket from the pool. Use with a "with" statement. Returns a :class:`SocketInfo` object wrapping a connected @@ -1349,12 +1372,15 @@ def get_socket(self, all_credentials, checkout=False): :Parameters: - `all_credentials`: dict, maps auth source to MongoCredential. - `checkout` (optional): keep socket checked out. + - `handler` (optional): A _MongoClientErrorHandler. """ listeners = self.opts.event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) sock_info = self._get_socket(all_credentials) + if checkout: + self.__pinned_sockets.add(sock_info) if self.enabled_for_cmap: listeners.publish_connection_checked_out( @@ -1362,11 +1388,23 @@ def get_socket(self, all_credentials, checkout=False): try: yield sock_info except: - # Exception in caller. Decrement semaphore. - self.return_socket(sock_info) + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = sock_info.pinned + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + handler.handle(exc_type, exc_val) + if not pinned: + self.return_socket(sock_info) raise else: - if not checkout: + if sock_info.pinned: + self.__pinned_sockets.add(sock_info) + elif not checkout: self.return_socket(sock_info) def _raise_if_not_ready(self, emit_event): @@ -1487,6 +1525,8 @@ def return_socket(self, sock_info): :Parameters: - `sock_info`: The socket to check into the pool. """ + self.__pinned_sockets.discard(sock_info) + sock_info.pinned = False listeners = self.opts.event_listeners if self.enabled_for_cmap: listeners.publish_connection_checked_in(self.address, sock_info.id) @@ -1495,7 +1535,13 @@ def return_socket(self, sock_info): else: if self.closed: sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) - elif not sock_info.closed: + elif sock_info.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + listeners.publish_connection_closed( + self.address, sock_info.id, + ConnectionClosedReason.ERROR) + else: with self.lock: # Hold the lock to ensure this section does not race with # Pool.reset(). diff --git a/pymongo/response.py b/pymongo/response.py index 474e2c4d3b..3094399da6 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -68,10 +68,10 @@ def docs(self): return self._docs -class ExhaustResponse(Response): - __slots__ = ('_socket_info', '_pool', '_more_to_come') +class PinnedResponse(Response): + __slots__ = ('_socket_info', '_more_to_come') - def __init__(self, data, address, socket_info, pool, request_id, duration, + def __init__(self, data, address, socket_info, request_id, duration, from_command, docs, more_to_come): """Represent a response to an exhaust cursor's initial query. @@ -87,13 +87,12 @@ def __init__(self, data, address, socket_info, pool, request_id, duration, - `more_to_come`: Bool indicating whether cursor is ready to be exhausted. """ - super(ExhaustResponse, self).__init__(data, - address, - request_id, - duration, - from_command, docs) + super(PinnedResponse, self).__init__(data, + address, + request_id, + duration, + from_command, docs) self._socket_info = socket_info - self._pool = pool self._more_to_come = more_to_come @property @@ -106,11 +105,6 @@ def socket_info(self): """ return self._socket_info - @property - def pool(self): - """The Pool from which the SocketInfo came.""" - return self._pool - @property def more_to_come(self): """If true, server is ready to send batches on the socket until the diff --git a/pymongo/server.py b/pymongo/server.py index 389f8e7290..672a3b1c1a 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -21,7 +21,7 @@ from pymongo.errors import NotMasterError, OperationFailure from pymongo.helpers import _check_command_response from pymongo.message import _convert_exception, _OpMsg -from pymongo.response import Response, ExhaustResponse +from pymongo.response import Response, PinnedResponse from pymongo.server_type import SERVER_TYPE _CURSOR_DOC_FIELDS = {'cursor': {'firstBatch': 1, 'nextBatch': 1}} @@ -68,14 +68,8 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() - def run_operation_with_response( - self, - sock_info, - operation, - set_slave_okay, - listeners, - exhaust, - unpack_res): + def run_operation(self, sock_info, operation, set_slave_okay, listeners, + pin, unpack_res): """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -87,7 +81,7 @@ def run_operation_with_response( - `set_slave_okay`: Pass to operation.get_message. - `all_credentials`: dict, maps auth source to MongoCredential. - `listeners`: Instance of _EventListeners or None. - - `exhaust`: If True, then this is an exhaust cursor operation. + - `pin`: If True, then this is a pinned cursor operation. - `unpack_res`: A callable that decodes the wire protocol response. """ duration = None @@ -95,9 +89,9 @@ def run_operation_with_response( if publish: start = datetime.now() - use_cmd = operation.use_command(sock_info, exhaust) - more_to_come = (operation.exhaust_mgr - and operation.exhaust_mgr.more_to_come) + use_cmd = operation.use_command(sock_info) + more_to_come = (operation.sock_mgr + and operation.sock_mgr.more_to_come) if more_to_come: request_id = 0 else: @@ -108,7 +102,8 @@ def run_operation_with_response( if publish: cmd, dbn = operation.as_command(sock_info) listeners.publish_command_start( - cmd, dbn, request_id, sock_info.address) + cmd, dbn, request_id, sock_info.address, + service_id=sock_info.service_id) start = datetime.now() try: @@ -142,7 +137,8 @@ def run_operation_with_response( failure = _convert_exception(exc) listeners.publish_command_failure( duration, failure, operation.name, - request_id, sock_info.address) + request_id, sock_info.address, + service_id=sock_info.service_id) raise if publish: @@ -163,7 +159,7 @@ def run_operation_with_response( res["cursor"]["nextBatch"] = docs listeners.publish_command_success( duration, res, operation.name, request_id, - sock_info.address) + sock_info.address, service_id=sock_info.service_id) # Decrypt response. client = operation.client @@ -174,19 +170,20 @@ def run_operation_with_response( docs = _decode_all_selective( decrypted, operation.codec_options, user_fields) - if exhaust: + if pin: if isinstance(reply, _OpMsg): # In OP_MSG, the server keeps sending only if the # more_to_come flag is set. more_to_come = reply.more_to_come else: # In OP_REPLY, the server keeps sending until cursor_id is 0. - more_to_come = bool(reply.cursor_id) - response = ExhaustResponse( + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.sock_mgr: + operation.sock_mgr.update_exhaust(more_to_come) + response = PinnedResponse( data=reply, address=self._description.address, socket_info=sock_info, - pool=self._pool, duration=duration, request_id=request_id, from_command=use_cmd, @@ -203,8 +200,8 @@ def run_operation_with_response( return response - def get_socket(self, all_credentials, checkout=False): - return self.pool.get_socket(all_credentials, checkout) + def get_socket(self, all_credentials, checkout=False, handler=None): + return self.pool.get_socket(all_credentials, checkout, handler) @property def description(self): diff --git a/test/load_balancer/test_load_balancer.py b/test/load_balancer/test_load_balancer.py index c31ff58ef1..99f8855cad 100644 --- a/test/load_balancer/test_load_balancer.py +++ b/test/load_balancer/test_load_balancer.py @@ -19,8 +19,8 @@ sys.path[0:0] = [""] -from test import unittest - +from test import unittest, IntegrationTest, client_context +from test.utils import get_pool from test.unified_format import generate_test_classes # Location of JSON test specifications. @@ -30,5 +30,23 @@ # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +class TestLB(IntegrationTest): + @client_context.require_load_balancer + def test_unpin_committed_transaction(self): + pool = get_pool(self.client) + with self.client.start_session() as session: + with session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + self.db.test.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + def test_client_can_be_reopened(self): + self.client.close() + self.db.test.find_one({}) + + if __name__ == "__main__": unittest.main() diff --git a/test/load_balancer/unified/cursors.json b/test/load_balancer/unified/cursors.json index 43e4fbb4f6..4e2a55fd43 100644 --- a/test/load_balancer/unified/cursors.json +++ b/test/load_balancer/unified/cursors.json @@ -376,7 +376,7 @@ ] }, { - "description": "pinned connections are not returned after an network error during getMore", + "description": "pinned connections are returned after an network error during getMore", "operations": [ { "name": "failPoint", @@ -440,7 +440,7 @@ "object": "testRunner", "arguments": { "client": "client0", - "connections": 1 + "connections": 0 } }, { @@ -659,7 +659,7 @@ ] }, { - "description": "pinned connections are not returned to the pool after a non-network error on getMore", + "description": "pinned connections are returned to the pool after a non-network error on getMore", "operations": [ { "name": "failPoint", @@ -715,7 +715,7 @@ "object": "testRunner", "arguments": { "client": "client0", - "connections": 1 + "connections": 0 } }, { diff --git a/test/load_balancer/unified/sdam-error-handling.json b/test/load_balancer/unified/sdam-error-handling.json index 63aabc04db..462fa0aac5 100644 --- a/test/load_balancer/unified/sdam-error-handling.json +++ b/test/load_balancer/unified/sdam-error-handling.json @@ -366,9 +366,6 @@ { "connectionCreatedEvent": {} }, - { - "poolClearedEvent": {} - }, { "connectionClosedEvent": { "reason": "error" @@ -378,6 +375,9 @@ "connectionCheckOutFailedEvent": { "reason": "connectionError" } + }, + { + "poolClearedEvent": {} } ] } diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 8a28284bf5..540dd68e31 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -40,7 +40,7 @@ def __init__(self, client, pair, *args, **kwargs): Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) @contextlib.contextmanager - def get_socket(self, all_credentials, checkout=False): + def get_socket(self, all_credentials, checkout=False, handler=None): client = self.client host_and_port = '%s:%s' % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: @@ -51,7 +51,8 @@ def get_socket(self, all_credentials, checkout=False): + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port - with Pool.get_socket(self, all_credentials, checkout) as sock_info: + with Pool.get_socket( + self, all_credentials, checkout, handler) as sock_info: sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port yield sock_info diff --git a/test/test_client.py b/test/test_client.py index 88adaac6ae..f97691176d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1323,11 +1323,11 @@ def test_stale_getmore(self): with self.assertRaises(AutoReconnect): client = rs_client(connect=False, serverSelectionTimeoutMS=100) - client._run_operation_with_response( + client._run_operation( operation=message._GetMore('pymongo_test', 'collection', 101, 1234, client.codec_options, ReadPreference.PRIMARY, - None, client, None, None), + None, client, None, None, False), unpack_res=Cursor( client.pymongo_test.collection)._unpack_response, address=('not-a-member', 27017)) @@ -1708,7 +1708,7 @@ def test_exhaust_getmore_network_error(self): cursor.next() # Cause a network error. - sock_info = cursor._Cursor__exhaust_mgr.sock + sock_info = cursor._Cursor__sock_mgr.sock sock_info.sock.close() # A getmore fails. diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 16c0166b5b..bdf24e240d 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -20,6 +20,7 @@ sys.path[0:0] = [""] +from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne @@ -397,7 +398,8 @@ def test_command_and_get_more(self): def test_get_more_failure(self): address = self.client.address coll = self.client.pymongo_test.test - cursor_doc = {"id": 12345, "firstBatch": [], "ns": coll.full_name} + cursor_id = Int64(12345) + cursor_doc = {"id": cursor_id, "firstBatch": [], "ns": coll.full_name} cursor = CommandCursor(coll, cursor_doc, address) try: next(cursor) @@ -410,7 +412,7 @@ def test_get_more_failure(self): self.assertTrue( isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', 12345), + SON([('getMore', cursor_id), ('collection', 'test')]), started.command) self.assertEqual('getMore', started.command_name) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 93a282a6d8..773aed3f51 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -322,10 +322,9 @@ def _socket_for_reads(self, read_preference, session): yield sock_info, slave_ok @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session, - exhaust=False): + def _slaveok_for_server(self, read_preference, server, session, pin=False): context = super(ReadPrefTester, self)._slaveok_for_server( - read_preference, server, session, exhaust=exhaust) + read_preference, server, session, pin=pin) with context as (sock_info, slave_ok): self.record_a_read(sock_info.address) yield sock_info, slave_ok diff --git a/test/unified_format.py b/test/unified_format.py index 91e02e9e26..d2433e5b49 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -519,6 +519,14 @@ def match_result(self, expectation, actual, self.test.assertIsInstance(actual, type(expectation)) self.test.assertEqual(expectation, actual) + def assertHasServiceId(self, spec, actual): + if 'hasServiceId' in spec: + if spec.get('hasServiceId'): + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + def match_event(self, event_type, expectation, actual): name, spec = next(iter(expectation.items())) @@ -543,24 +551,23 @@ def match_event(self, event_type, expectation, actual): if database_name: self.test.assertEqual( database_name, actual.database_name) + self.assertHasServiceId(spec, actual) elif name == 'commandSucceededEvent': self.test.assertIsInstance(actual, CommandSucceededEvent) reply = spec.get('reply') if reply: self.match_result(reply, actual.reply) + self.assertHasServiceId(spec, actual) elif name == 'commandFailedEvent': self.test.assertIsInstance(actual, CommandFailedEvent) + self.assertHasServiceId(spec, actual) elif name == 'poolCreatedEvent': self.test.assertIsInstance(actual, PoolCreatedEvent) elif name == 'poolReadyEvent': self.test.assertIsInstance(actual, PoolReadyEvent) elif name == 'poolClearedEvent': self.test.assertIsInstance(actual, PoolClearedEvent) - if spec.get('hasServiceId'): - self.test.assertIsNotNone(actual.service_id) - self.test.assertIsInstance(actual.service_id, ObjectId) - else: - self.test.assertIsNone(actual.service_id) + self.assertHasServiceId(spec, actual) elif name == 'poolClosedEvent': self.test.assertIsInstance(actual, PoolClosedEvent) elif name == 'connectionCreatedEvent': @@ -569,12 +576,14 @@ def match_event(self, event_type, expectation, actual): self.test.assertIsInstance(actual, ConnectionReadyEvent) elif name == 'connectionClosedEvent': self.test.assertIsInstance(actual, ConnectionClosedEvent) - self.test.assertEqual(actual.reason, spec['reason']) + if 'reason' in spec: + self.test.assertEqual(actual.reason, spec['reason']) elif name == 'connectionCheckOutStartedEvent': self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) elif name == 'connectionCheckOutFailedEvent': self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) - self.test.assertEqual(actual.reason, spec['reason']) + if 'reason' in spec: + self.test.assertEqual(actual.reason, spec['reason']) elif name == 'connectionCheckedOutEvent': self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) elif name == 'connectionCheckedInEvent': diff --git a/test/utils.py b/test/utils.py index fa2865c837..c8336440e0 100644 --- a/test/utils.py +++ b/test/utils.py @@ -267,7 +267,7 @@ def __init__(self, address, options, handshake=True): def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def get_socket(self, all_credentials, checkout=False): + def get_socket(self, all_credentials, checkout=False, handler=None): return MockSocketInfo() def return_socket(self, *args, **kwargs): From bf78a9b2efd112eb996eb9334ab753848a2864fc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Jun 2021 14:30:01 -0700 Subject: [PATCH 0362/2111] PYTHON-2744 Run LB tests against non-LB clusters (#638) Fix serviceId fallback to make spec test pass. Fix socket leak when SocketInfo connection handshake fails. --- pymongo/pool.py | 15 +++--- test/__init__.py | 3 ++ test/load_balancer/test_crud_unified.py | 2 +- test/load_balancer/test_dns.py | 2 +- test/load_balancer/test_load_balancer.py | 30 +---------- .../test_retryable_change_stream.py | 2 +- test/load_balancer/test_retryable_reads.py | 2 +- test/load_balancer/test_retryable_writes.py | 2 +- .../test_transactions_unified.py | 2 +- test/load_balancer/test_uri_options.py | 2 +- test/load_balancer/test_versioned_api.py | 2 +- test/test_load_balancer.py | 52 +++++++++++++++++++ test/unified_format.py | 4 ++ 13 files changed, 78 insertions(+), 42 deletions(-) create mode 100644 test/test_load_balancer.py diff --git a/pymongo/pool.py b/pymongo/pool.py index 5d62e4beed..15e5b4873f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -226,6 +226,9 @@ def _set_keepalive_times(sock): # main thread, to avoid the deadlock. See PYTHON-607. 'foo'.encode('idna') +# Remove after PYTHON-2712 +_MOCK_SERVICE_ID = False + def _raise_connection_failure(address, error, msg_prefix=None): """Convert a socket.error to ConnectionFailure and raise it.""" @@ -600,7 +603,7 @@ def _ismaster(self, cluster_time, topology_version, doc = self.command('admin', cmd, publish_events=False, exhaust_allowed=awaitable) # PYTHON-2712 will remove this topologyVersion fallback logic. - if self.opts.load_balanced: + if self.opts.load_balanced and _MOCK_SERVICE_ID: process_id = doc.get('topologyVersion', {}).get('processId') doc.setdefault('serviceId', process_id) ismaster = IsMaster(doc, awaitable=awaitable) @@ -627,7 +630,7 @@ def _ismaster(self, cluster_time, topology_version, if self.opts.load_balanced: if not ismaster.service_id: raise ConfigurationError( - 'Driver attempted to initialize in load balancing mode' + 'Driver attempted to initialize in load balancing mode,' ' but the server does not support this mode') self.service_id = ismaster.service_id self.generation = self.pool_gen.get(self.service_id) @@ -1338,11 +1341,11 @@ def connect(self, all_credentials=None): raise sock_info = SocketInfo(sock, self, self.address, conn_id) - if self.handshake: - sock_info.ismaster(all_credentials) - self.is_writable = sock_info.is_writable - try: + if self.handshake: + sock_info.ismaster(all_credentials) + self.is_writable = sock_info.is_writable + sock_info.check_auth(all_credentials) except BaseException: sock_info.close_socket(ConnectionClosedReason.ERROR) diff --git a/test/__init__.py b/test/__init__.py index 9e76b28f50..1e76e58b88 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -97,6 +97,9 @@ SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") if TEST_LOADBALANCER: + # Remove after PYTHON-2712 + from pymongo import pool + pool._MOCK_SERVICE_ID = True res = parse_uri(SINGLE_MONGOS_LB_URI) host, port = res['nodelist'][0] db_user = res['username'] or db_user diff --git a/test/load_balancer/test_crud_unified.py b/test/load_balancer/test_crud_unified.py index dfe0935bba..4363f293fd 100644 --- a/test/load_balancer/test_crud_unified.py +++ b/test/load_balancer/test_crud_unified.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_crud_unified import * if __name__ == '__main__': diff --git a/test/load_balancer/test_dns.py b/test/load_balancer/test_dns.py index 047b98b121..34e2329c8c 100644 --- a/test/load_balancer/test_dns.py +++ b/test/load_balancer/test_dns.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_dns import * if __name__ == '__main__': diff --git a/test/load_balancer/test_load_balancer.py b/test/load_balancer/test_load_balancer.py index 99f8855cad..77e824e59d 100644 --- a/test/load_balancer/test_load_balancer.py +++ b/test/load_balancer/test_load_balancer.py @@ -14,38 +14,12 @@ """Test the Load Balancer unified spec tests.""" -import os import sys sys.path[0:0] = [""] -from test import unittest, IntegrationTest, client_context -from test.utils import get_pool -from test.unified_format import generate_test_classes - -# Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'unified') - -# Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) - - -class TestLB(IntegrationTest): - @client_context.require_load_balancer - def test_unpin_committed_transaction(self): - pool = get_pool(self.client) - with self.client.start_session() as session: - with session.start_transaction(): - self.assertEqual(pool.active_sockets, 0) - self.db.test.insert_one({}, session=session) - self.assertEqual(pool.active_sockets, 1) # Pinned. - self.assertEqual(pool.active_sockets, 1) # Still pinned. - self.assertEqual(pool.active_sockets, 0) # Unpinned. - - def test_client_can_be_reopened(self): - self.client.close() - self.db.test.find_one({}) +from test import unittest +from test.test_load_balancer import * if __name__ == "__main__": diff --git a/test/load_balancer/test_retryable_change_stream.py b/test/load_balancer/test_retryable_change_stream.py index b7c902dd30..f08e27e9d6 100644 --- a/test/load_balancer/test_retryable_change_stream.py +++ b/test/load_balancer/test_retryable_change_stream.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_change_stream import * if __name__ == '__main__': diff --git a/test/load_balancer/test_retryable_reads.py b/test/load_balancer/test_retryable_reads.py index c5de3c9078..73510fab76 100644 --- a/test/load_balancer/test_retryable_reads.py +++ b/test/load_balancer/test_retryable_reads.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_retryable_reads import * if __name__ == '__main__': diff --git a/test/load_balancer/test_retryable_writes.py b/test/load_balancer/test_retryable_writes.py index 3800641b08..c920acb818 100644 --- a/test/load_balancer/test_retryable_writes.py +++ b/test/load_balancer/test_retryable_writes.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_retryable_writes import * if __name__ == '__main__': diff --git a/test/load_balancer/test_transactions_unified.py b/test/load_balancer/test_transactions_unified.py index 2572028046..d2f7eac94b 100644 --- a/test/load_balancer/test_transactions_unified.py +++ b/test/load_balancer/test_transactions_unified.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_transactions_unified import * if __name__ == '__main__': diff --git a/test/load_balancer/test_uri_options.py b/test/load_balancer/test_uri_options.py index b644d7d334..c7151d330a 100644 --- a/test/load_balancer/test_uri_options.py +++ b/test/load_balancer/test_uri_options.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_uri_spec import * if __name__ == '__main__': diff --git a/test/load_balancer/test_versioned_api.py b/test/load_balancer/test_versioned_api.py index 7e801968cb..2b188a6b1e 100644 --- a/test/load_balancer/test_versioned_api.py +++ b/test/load_balancer/test_versioned_api.py @@ -13,10 +13,10 @@ # limitations under the License. import sys -import unittest sys.path[0:0] = [""] +from test import unittest from test.test_versioned_api import * if __name__ == '__main__': diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py new file mode 100644 index 0000000000..90bde87e5f --- /dev/null +++ b/test/test_load_balancer.py @@ -0,0 +1,52 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest, IntegrationTest, client_context +from test.utils import get_pool +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'load_balancer', 'unified') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestLB(IntegrationTest): + @client_context.require_load_balancer + def test_unpin_committed_transaction(self): + pool = get_pool(self.client) + with self.client.start_session() as session: + with session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + self.db.test.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + def test_client_can_be_reopened(self): + self.client.close() + self.db.test.find_one({}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index d2433e5b49..284fee1ed1 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -891,6 +891,10 @@ def run_entity_operation(self, spec): if expect_error: return self.process_error(exc, expect_error) raise + else: + if expect_error: + self.fail('Excepted error %s but "%s" succeeded: %s' % ( + expect_error, opname, result)) if expect_result: actual = coerce_result(opname, result) From a906e57a7c914d3aebd5a7ef62bd015f4b1adf40 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Jun 2021 12:09:15 -0700 Subject: [PATCH 0363/2111] PYTHON-2731 Run load balancer test suite with all Python versions (#640) --- .evergreen/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 622582d274..053dc0cdec 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2534,7 +2534,7 @@ buildvariants: platform: ubuntu-18.04 mongodb-version: ["5.0", "latest"] auth-ssl: "*" - python-version: ["3.6", "3.9"] + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] loadbalancer: "*" display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" tasks: From fd9391df5c7c810f0048ad420067f977a33034a9 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 15 Jun 2021 13:10:33 -0700 Subject: [PATCH 0364/2111] PYTHON-2557 Timeseries collection support This change also resolves PYTHON-2604. --- pymongo/database.py | 4 + .../timeseries-collection.json | 271 ++++++++++++++++++ test/test_collection_management.py | 34 +++ test/unified_format.py | 6 +- test/utils.py | 7 +- 5 files changed, 318 insertions(+), 4 deletions(-) create mode 100644 test/collection_management/timeseries-collection.json create mode 100644 test/test_collection_management.py diff --git a/pymongo/database.py b/pymongo/database.py index 9e2b221fe2..b9efac9408 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -256,6 +256,10 @@ def create_collection(self, name, codec_options=None, size of the collection. - "capped": if True, this is a capped collection - "max": maximum number of objects if capped (optional) + - `timeseries`: a document specifying configuration options for + timeseries collections + - `expireAfterSeconds`: the number of seconds after which a + document in a timeseries collection expires See the MongoDB documentation for a full list of supported options by server version. diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json new file mode 100644 index 0000000000..99f642e597 --- /dev/null +++ b/test/collection_management/timeseries-collection.json @@ -0,0 +1,271 @@ +{ + "description": "timeseries-collection", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ts-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ts-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with all options", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + }, + { + "description": "insertMany with duplicate ids", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "sort": { + "time": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "expireAfterSeconds": 604800, + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630482" + } + } + }, + { + "_id": 1, + "time": { + "$date": { + "$numberLong": "1552949630483" + } + } + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": {}, + "sort": { + "time": 1 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_collection_management.py b/test/test_collection_management.py new file mode 100644 index 0000000000..342e612583 --- /dev/null +++ b/test/test_collection_management.py @@ -0,0 +1,34 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection management unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'collection_management') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 284fee1ed1..9bc5d4afae 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -103,17 +103,19 @@ def is_run_on_requirement_satisfied(requirement): topology_satisfied = client_context.is_topology_type( req_topologies) + server_version = Version(*client_context.version[:3]) + min_version_satisfied = True req_min_server_version = requirement.get('minServerVersion') if req_min_server_version: min_version_satisfied = Version.from_string( - req_min_server_version) <= client_context.version + req_min_server_version) <= server_version max_version_satisfied = True req_max_server_version = requirement.get('maxServerVersion') if req_max_server_version: max_version_satisfied = Version.from_string( - req_max_server_version) >= client_context.version + req_max_server_version) >= server_version params_satisfied = True params = requirement.get('serverParameters') diff --git a/test/utils.py b/test/utils.py index c8336440e0..fb29bb37e5 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1102,8 +1102,11 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, copy.deepcopy(callback_ops)) elif opname == 'drop_collection' and arg_name == 'collection': arguments['name_or_collection'] = arguments.pop(arg_name) - elif opname == 'create_collection' and arg_name == 'collection': - arguments['name'] = arguments.pop(arg_name) + elif opname == 'create_collection': + if arg_name == 'collection': + arguments['name'] = arguments.pop(arg_name) + # Any other arguments to create_collection are passed through + # **kwargs. elif opname == 'create_index' and arg_name == 'keys': arguments['keys'] = list(arguments.pop(arg_name).items()) elif opname == 'drop_index' and arg_name == 'name': From 83adc9af038440e3ecd8939039de31e25a7e0c3b Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 16 Jun 2021 15:20:13 -0700 Subject: [PATCH 0365/2111] PYTHON-2740 Bump maxWireVersion for MongoDB 5.0 --- pymongo/common.py | 2 +- test/test_topology.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index b64b88278b..7bac951e0d 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -51,7 +51,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "2.6" MIN_SUPPORTED_WIRE_VERSION = 2 -MAX_SUPPORTED_WIRE_VERSION = 9 +MAX_SUPPORTED_WIRE_VERSION = 13 # Frequency to call ismaster on servers, in seconds. HEARTBEAT_FREQUENCY = 10 diff --git a/test/test_topology.py b/test/test_topology.py index bb94207f4d..f3db8cbde7 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -528,8 +528,8 @@ def test_wire_version(self): 'ismaster': True, 'setName': 'rs', 'hosts': ['a'], - 'minWireVersion': 11, - 'maxWireVersion': 12}) + 'minWireVersion': 21, + 'maxWireVersion': 22}) try: t.select_servers(any_server_selector) @@ -537,7 +537,7 @@ def test_wire_version(self): # Error message should say which server failed and why. self.assertEqual( str(e), - "Server at a:27017 requires wire version 11, but this version " + "Server at a:27017 requires wire version 21, but this version " "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,)) else: From 3de63373aadc67518868b1db15b40e7bc8e981ab Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 16 Jun 2021 18:35:34 -0700 Subject: [PATCH 0366/2111] PYTHON-2553 Test document validation error details --- test/test_read_write_concern_spec.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 5e890188f3..49f1823d2b 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -25,7 +25,8 @@ from pymongo.errors import (BulkWriteError, ConfigurationError, WTimeoutError, - WriteConcernError) + WriteConcernError, + WriteError) from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern @@ -210,6 +211,27 @@ def test_error_includes_errInfo(self): 'nRemoved': 0, 'upserted': []} self.assertEqual(ctx.exception.details, expected_details) + @client_context.require_version_min(4, 9) + def test_write_error_details_exposes_errinfo(self): + listener = EventListener() + client = rs_or_single_client(event_listeners=[listener]) + db = client.errinfotest + self.addCleanup(client.drop_database, "errinfotest") + validator = {"x": {"$type": "string"}} + db.create_collection("test", validator=validator) + with self.assertRaises(WriteError) as ctx: + db.test.insert_one({'x': 1}) + self.assertEqual(ctx.exception.code, 121) + self.assertIsNotNone(ctx.exception.details) + self.assertIsNotNone(ctx.exception.details.get('errInfo')) + for event in listener.results['succeeded']: + if event.command_name == 'insert': + self.assertEqual( + event.reply['writeErrors'][0], ctx.exception.details) + break + else: + self.fail("Couldn't find insert event.") + def normalize_write_concern(concern): result = {} From cfbc3a7995779974e1da1d855b4b36ce93580d6a Mon Sep 17 00:00:00 2001 From: Shrikant Sharat Kandula Date: Mon, 21 Jun 2021 22:50:07 +0530 Subject: [PATCH 0367/2111] Fix typo in list_collections docstring (collectons -> collections) (#644) --- pymongo/database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/database.py b/pymongo/database.py index b9efac9408..8928287ee2 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -666,7 +666,7 @@ def _list_collections(self, sock_info, slave_okay, session, return cmd_cursor def list_collections(self, session=None, filter=None, **kwargs): - """Get a cursor over the collectons of this database. + """Get a cursor over the collections of this database. :Parameters: - `session` (optional): a From abb081a0122a3c8a84ce1b19e8428b031b43651e Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 17 Jun 2021 16:00:25 -0700 Subject: [PATCH 0368/2111] PYTHON-2741 Test aggregate let support --- test/crud/unified/aggregate-let.json | 478 +++++++++++++++++++++++++++ 1 file changed, 478 insertions(+) create mode 100644 test/crud/unified/aggregate-let.json diff --git a/test/crud/unified/aggregate-let.json b/test/crud/unified/aggregate-let.json new file mode 100644 index 0000000000..4ce8256cb7 --- /dev/null +++ b/test/crud/unified/aggregate-let.json @@ -0,0 +1,478 @@ +{ + "description": "aggregate-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "bar" + }, + "rand": { + "$rand": {} + } + } + }, + "expectResult": [ + { + "x": "foo", + "y": "bar", + "rand": { + "$$type": "double" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "bar" + }, + "rand": { + "$rand": {} + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with let option and dollar-prefixed $literal value", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "$bar" + }, + "rand": { + "$rand": {} + } + } + }, + "expectResult": [ + { + "x": "foo", + "y": "$bar", + "rand": { + "$$type": "double" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 0, + "x": "$$x", + "y": "$$y", + "rand": "$$rand" + } + } + ], + "let": { + "id": 1, + "x": "foo", + "y": { + "$literal": "$bar" + }, + "rand": { + "$rand": {} + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "2.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate to collection with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Aggregate to collection with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "2.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll1" + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ] + } + ] +} From 59dc6d8ca0d2f0d43e8f9f1af659f29e63554cbb Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 21 Jun 2021 18:07:28 -0700 Subject: [PATCH 0369/2111] PYTHON-2718 Test redaction of security sensitive command monitoring events (#637) --- pymongo/monitoring.py | 17 +- .../{ => legacy}/bulkWrite.json | 0 .../{ => legacy}/command.json | 0 .../{ => legacy}/deleteMany.json | 0 .../{ => legacy}/deleteOne.json | 0 .../command_monitoring/{ => legacy}/find.json | 0 .../{ => legacy}/insertMany.json | 0 .../{ => legacy}/insertOne.json | 0 .../{ => legacy}/unacknowledgedBulkWrite.json | 0 .../{ => legacy}/updateMany.json | 0 .../{ => legacy}/updateOne.json | 0 .../unified/redacted-commands.json | 492 ++++++++++++++++++ .../test_command_monitoring_unified.py | 23 + ...c.py => test_command_monitoring_legacy.py} | 8 +- test/test_command_monitoring_unified.py | 38 ++ test/unified_format.py | 17 +- 16 files changed, 585 insertions(+), 10 deletions(-) rename test/command_monitoring/{ => legacy}/bulkWrite.json (100%) rename test/command_monitoring/{ => legacy}/command.json (100%) rename test/command_monitoring/{ => legacy}/deleteMany.json (100%) rename test/command_monitoring/{ => legacy}/deleteOne.json (100%) rename test/command_monitoring/{ => legacy}/find.json (100%) rename test/command_monitoring/{ => legacy}/insertMany.json (100%) rename test/command_monitoring/{ => legacy}/insertOne.json (100%) rename test/command_monitoring/{ => legacy}/unacknowledgedBulkWrite.json (100%) rename test/command_monitoring/{ => legacy}/updateMany.json (100%) rename test/command_monitoring/{ => legacy}/updateOne.json (100%) create mode 100644 test/command_monitoring/unified/redacted-commands.json create mode 100644 test/load_balancer/test_command_monitoring_unified.py rename test/{test_command_monitoring_spec.py => test_command_monitoring_legacy.py} (97%) create mode 100644 test/test_command_monitoring_unified.py diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 5e86e93e02..0643a26d1b 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -509,6 +509,15 @@ def register(listener): "updateuser", "copydbgetnonce", "copydbsaslstart", "copydb"]) +# The "hello" command is also deemed sensitive when attempting speculative +# authentication. +def _is_speculative_authenticate(command_name, doc): + if (command_name.lower() in ('hello', 'ismaster') and + 'speculativeAuthenticate' in doc): + return True + return False + + class _CommandEvent(object): """Base class for command events.""" @@ -573,7 +582,9 @@ def __init__(self, command, database_name, *args, service_id=None): command_name = next(iter(command)) super(CommandStartedEvent, self).__init__( command_name, *args, service_id=service_id) - if command_name.lower() in _SENSITIVE_COMMANDS: + cmd_name, cmd_doc = command_name.lower(), command[command_name] + if (cmd_name in _SENSITIVE_COMMANDS or + _is_speculative_authenticate(cmd_name, command)): self.__cmd = {} else: self.__cmd = command @@ -619,7 +630,9 @@ def __init__(self, duration, reply, command_name, command_name, request_id, connection_id, operation_id, service_id=service_id) self.__duration_micros = _to_micros(duration) - if command_name.lower() in _SENSITIVE_COMMANDS: + cmd_name = command_name.lower() + if (cmd_name in _SENSITIVE_COMMANDS or + _is_speculative_authenticate(cmd_name, reply)): self.__reply = {} else: self.__reply = reply diff --git a/test/command_monitoring/bulkWrite.json b/test/command_monitoring/legacy/bulkWrite.json similarity index 100% rename from test/command_monitoring/bulkWrite.json rename to test/command_monitoring/legacy/bulkWrite.json diff --git a/test/command_monitoring/command.json b/test/command_monitoring/legacy/command.json similarity index 100% rename from test/command_monitoring/command.json rename to test/command_monitoring/legacy/command.json diff --git a/test/command_monitoring/deleteMany.json b/test/command_monitoring/legacy/deleteMany.json similarity index 100% rename from test/command_monitoring/deleteMany.json rename to test/command_monitoring/legacy/deleteMany.json diff --git a/test/command_monitoring/deleteOne.json b/test/command_monitoring/legacy/deleteOne.json similarity index 100% rename from test/command_monitoring/deleteOne.json rename to test/command_monitoring/legacy/deleteOne.json diff --git a/test/command_monitoring/find.json b/test/command_monitoring/legacy/find.json similarity index 100% rename from test/command_monitoring/find.json rename to test/command_monitoring/legacy/find.json diff --git a/test/command_monitoring/insertMany.json b/test/command_monitoring/legacy/insertMany.json similarity index 100% rename from test/command_monitoring/insertMany.json rename to test/command_monitoring/legacy/insertMany.json diff --git a/test/command_monitoring/insertOne.json b/test/command_monitoring/legacy/insertOne.json similarity index 100% rename from test/command_monitoring/insertOne.json rename to test/command_monitoring/legacy/insertOne.json diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/legacy/unacknowledgedBulkWrite.json similarity index 100% rename from test/command_monitoring/unacknowledgedBulkWrite.json rename to test/command_monitoring/legacy/unacknowledgedBulkWrite.json diff --git a/test/command_monitoring/updateMany.json b/test/command_monitoring/legacy/updateMany.json similarity index 100% rename from test/command_monitoring/updateMany.json rename to test/command_monitoring/legacy/updateMany.json diff --git a/test/command_monitoring/updateOne.json b/test/command_monitoring/legacy/updateOne.json similarity index 100% rename from test/command_monitoring/updateOne.json rename to test/command_monitoring/legacy/updateOne.json diff --git a/test/command_monitoring/unified/redacted-commands.json b/test/command_monitoring/unified/redacted-commands.json new file mode 100644 index 0000000000..c53f018d32 --- /dev/null +++ b/test/command_monitoring/unified/redacted-commands.json @@ -0,0 +1,492 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + } + ], + "tests": [ + { + "description": "authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "authenticate", + "command": { + "authenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslStart", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslStart", + "command": { + "saslStart": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslContinue", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslContinue", + "command": { + "saslContinue": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": "private" + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createUser", + "command": { + "createUser": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "updateUser", + "command": { + "updateUser": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydb", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydb", + "command": { + "copydb": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": "private", + "speculativeAuthenticate": "foo" + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": "private", + "speculativeAuthenticate": "foo" + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": "private", + "speculativeAuthenticate": "foo" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate is not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": "public" + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": "public" + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": "public" + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": "public" + } + } + }, + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": "public" + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": "public" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/load_balancer/test_command_monitoring_unified.py b/test/load_balancer/test_command_monitoring_unified.py new file mode 100644 index 0000000000..6b8ef98325 --- /dev/null +++ b/test/load_balancer/test_command_monitoring_unified.py @@ -0,0 +1,23 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.test_command_monitoring_unified import * + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_monitoring_spec.py b/test/test_command_monitoring_legacy.py similarity index 97% rename from test/test_command_monitoring_spec.py rename to test/test_command_monitoring_legacy.py index 3363a4256a..16bdf1c68e 100644 --- a/test/test_command_monitoring_spec.py +++ b/test/test_command_monitoring_legacy.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Run the command monitoring spec tests.""" +"""Run the command monitoring legacy-format spec tests.""" import os import re @@ -26,7 +26,8 @@ from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern from test import unittest, client_context -from test.utils import single_client, wait_until, EventListener, parse_read_preference +from test.utils import ( + single_client, wait_until, EventListener, parse_read_preference) # Location of JSON test specifications. _TEST_PATH = os.path.join( @@ -204,7 +205,7 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: @@ -239,5 +240,6 @@ def create_tests(): create_tests() + if __name__ == "__main__": unittest.main() diff --git a/test/test_command_monitoring_unified.py b/test/test_command_monitoring_unified.py new file mode 100644 index 0000000000..9390c9fec6 --- /dev/null +++ b/test/test_command_monitoring_unified.py @@ -0,0 +1,38 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + + +# Location of JSON test specifications. +_TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + 'command_monitoring') + + +globals().update(generate_test_classes( + os.path.join(_TEST_PATH, 'unified'), + module=__name__,)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 9bc5d4afae..d24e280b28 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -178,10 +178,14 @@ def close(self): class EventListenerUtil(CMAPListener, CommandListener): - def __init__(self, observe_events, ignore_commands): + def __init__(self, observe_events, ignore_commands, + observe_sensitive_commands): self._event_types = set(name.lower() for name in observe_events) - self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) - self._ignore_commands.add('configurefailpoint') + if observe_sensitive_commands: + self._ignore_commands = set(ignore_commands) + else: + self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) + self._ignore_commands.add('configurefailpoint') super(EventListenerUtil, self).__init__() def get_events(self, event_type): @@ -244,10 +248,13 @@ def _create_entity(self, entity_spec): kwargs = {} observe_events = spec.get('observeEvents', []) ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) + observe_sensitive_commands = spec.get( + 'observeSensitiveCommands', False) # TODO: SUPPORT storeEventsAsEntities if len(observe_events) or len(ignore_commands): ignore_commands = [cmd.lower() for cmd in ignore_commands] - listener = EventListenerUtil(observe_events, ignore_commands) + listener = EventListenerUtil( + observe_events, ignore_commands, observe_sensitive_commands) self._listeners[spec['id']] = listener kwargs['event_listeners'] = [listener] if spec.get('useMultipleMongoses'): @@ -623,7 +630,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string('1.4') + SCHEMA_VERSION = Version.from_string('1.5') @staticmethod def should_run_on(run_on_spec): From 4c77d7c8552a4ae21edb6503f879340691ffed8c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 21 Jun 2021 18:29:36 -0700 Subject: [PATCH 0370/2111] PYTHON-2677 Better wait queue timeout errors for load balanced clusters (#639) Remove checkout argument in favor of SocketInfo.pin_txn/pin_cursor() --- pymongo/change_stream.py | 2 +- pymongo/client_session.py | 2 +- pymongo/collection.py | 6 +- pymongo/command_cursor.py | 1 + pymongo/database.py | 6 +- pymongo/mongo_client.py | 24 ++- pymongo/pool.py | 59 +++++-- pymongo/server.py | 10 +- .../unified/wait-queue-timeouts.json | 153 ++++++++++++++++++ test/pymongo_mocks.py | 5 +- test/test_cmap.py | 4 +- test/test_pooling.py | 16 +- test/test_read_preferences.py | 4 +- test/utils.py | 2 +- 14 files changed, 234 insertions(+), 60 deletions(-) create mode 100644 test/load_balancer/unified/wait-queue-timeouts.json diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index fcb9d9f520..f742e126c6 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -181,7 +181,7 @@ def _run_aggregation_cmd(self, session, explicit_session): return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), - session, pin=self._client._should_pin_cursor(session)) + session) def _create_cursor(self): with self._client._tmp_session(self._session, close=False) as s: diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 9db8184826..8e5b2a597a 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -314,7 +314,7 @@ def pin(self, server, sock_info): self.sharded = True self.pinned_address = server.description.address if server.description.server_type == SERVER_TYPE.LoadBalancer: - sock_info.pinned = True + sock_info.pin_txn() self.sock_mgr = _SocketManager(sock_info, False) def unpin(self): diff --git a/pymongo/collection.py b/pymongo/collection.py index cf8f679be1..7943e08aa8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2093,8 +2093,7 @@ def _cmd(session, server, sock_info, slave_ok): return cmd_cursor return self.__database.client._retryable_read( - _cmd, read_pref, session, - pin=self.__database.client._should_pin_cursor(session)) + _cmd, read_pref, session) def index_information(self, session=None): """Get information on this collection's indexes. @@ -2175,8 +2174,7 @@ def _aggregate(self, aggregation_command, pipeline, cursor_class, session, user_fields={'cursor': {'firstBatch': 1}}) return self.__database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), session, - retryable=not cmd._performs_write, - pin=self.database.client._should_pin_cursor(session)) + retryable=not cmd._performs_write) def aggregate(self, pipeline, session=None, **kwargs): """Perform an aggregation using the aggregation framework on this diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index fa219d0381..aabec3999f 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -138,6 +138,7 @@ def _maybe_pin_connection(self, sock_info): if not client._should_pin_cursor(self.__session): return if not self.__sock_mgr: + sock_info.pin_cursor() sock_mgr = _SocketManager(sock_info, False) # Ensure the connection gets returned when the entire result is # returned in the first batch. diff --git a/pymongo/database.py b/pymongo/database.py index 8928287ee2..0a814fb74e 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -381,8 +381,7 @@ def aggregate(self, pipeline, session=None, **kwargs): user_fields={'cursor': {'firstBatch': 1}}) return self.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(s), s, - retryable=not cmd._performs_write, - pin=self.client._should_pin_cursor(s)) + retryable=not cmd._performs_write) def watch(self, pipeline=None, full_document=None, resume_after=None, max_await_time_ms=None, batch_size=None, collation=None, @@ -695,8 +694,7 @@ def _cmd(session, server, sock_info, slave_okay): **kwargs) return self.__client._retryable_read( - _cmd, read_pref, session, - pin=self.client._should_pin_cursor(session)) + _cmd, read_pref, session) def list_collection_names(self, session=None, filter=None, **kwargs): """Get a list of all the collection names in this database. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4eb54e1c58..ad82306b85 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1161,7 +1161,7 @@ def _get_topology(self): return self._topology @contextlib.contextmanager - def _get_socket(self, server, session, pin=False): + def _get_socket(self, server, session): in_txn = session and session.in_transaction with _MongoClientErrorHandler(self, server, session) as err_handler: # Reuse the pinned connection, if it exists. @@ -1169,8 +1169,7 @@ def _get_socket(self, server, session, pin=False): yield session._pinned_connection return with server.get_socket( - self.__all_credentials, checkout=pin, - handler=err_handler) as sock_info: + self.__all_credentials, handler=err_handler) as sock_info: # Pin this session to the selected server or connection. if (in_txn and server.description.server_type in ( SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer)): @@ -1221,7 +1220,7 @@ def _socket_for_writes(self, session): return self._get_socket(server, session) @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session, pin=False): + def _slaveok_for_server(self, read_preference, server, session): assert read_preference is not None, "read_preference must not be None" # Get a socket for a server matching the read preference, and yield # sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to @@ -1232,7 +1231,7 @@ def _slaveok_for_server(self, read_preference, server, session, pin=False): topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single - with self._get_socket(server, session, pin=pin) as sock_info: + with self._get_socket(server, session) as sock_info: slave_ok = (single and not sock_info.is_mongos) or ( read_preference != ReadPreference.PRIMARY) yield sock_info, slave_ok @@ -1259,7 +1258,7 @@ def _should_pin_cursor(self, session): return (self.__options.load_balanced and not (session and session.in_transaction)) - def _run_operation(self, operation, unpack_res, pin=False, address=None): + def _run_operation(self, operation, unpack_res, address=None): """Run a _Query/_GetMore operation and return a Response. :Parameters: @@ -1268,7 +1267,6 @@ def _run_operation(self, operation, unpack_res, pin=False, address=None): - `address` (optional): Optional address when sending a message to a specific server, used for getMore. """ - pin = self._should_pin_cursor(operation.session) or operation.exhaust if operation.sock_mgr: server = self._select_server( operation.read_preference, operation.session, address=address) @@ -1279,17 +1277,16 @@ def _run_operation(self, operation, unpack_res, pin=False, address=None): err_handler.contribute_socket(operation.sock_mgr.sock) return server.run_operation( operation.sock_mgr.sock, operation, True, - self._event_listeners, pin, unpack_res) + self._event_listeners, unpack_res) def _cmd(session, server, sock_info, slave_ok): return server.run_operation( - sock_info, operation, slave_ok, self._event_listeners, pin, + sock_info, operation, slave_ok, self._event_listeners, unpack_res) return self._retryable_read( _cmd, operation.read_preference, operation.session, - address=address, retryable=isinstance(operation, message._Query), - pin=pin) + address=address, retryable=isinstance(operation, message._Query)) def _retry_with_session(self, retryable, func, session, bulk): """Execute an operation with at most one consecutive retries @@ -1361,7 +1358,7 @@ def is_retrying(): last_error = exc def _retryable_read(self, func, read_pref, session, address=None, - retryable=True, pin=False): + retryable=True): """Execute an operation with at most one consecutive retries Returns func()'s return value on success. On error retries the same @@ -1381,8 +1378,7 @@ def _retryable_read(self, func, read_pref, session, address=None, read_pref, session, address=address) if not server.description.retryable_reads_supported: retryable = False - with self._slaveok_for_server( - read_pref, server, session, pin=pin) as ( + with self._slaveok_for_server(read_pref, server, session) as ( sock_info, slave_ok): if retrying and not retryable: # A retry is not possible because this server does diff --git a/pymongo/pool.py b/pymongo/pool.py index 15e5b4873f..335f8d07f5 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -548,10 +548,18 @@ def __init__(self, sock, pool, address, id): self.service_id = None # When executing a transaction in load balancing mode, this flag is # set to true to indicate that the session now owns the connection. - self.pinned = False + self.pinned_txn = False + self.pinned_cursor = False + + def pin_txn(self): + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self): + self.pinned_cursor = True + assert not self.pinned_txn def unpin(self): - self.pinned = False pool = self.pool_ref() if pool: pool.return_socket(self) @@ -1178,6 +1186,8 @@ def __init__(self, address, options, handshake=True): # from thinking that a cursor's pinned connection can be GC'd when the # cursor is GC'd (see PYTHON-2751). self.__pinned_sockets = set() + self.ncursors = 0 + self.ntxns = 0 def ready(self): old_state, self.state = self.state, PoolState.READY @@ -1354,7 +1364,7 @@ def connect(self, all_credentials=None): return sock_info @contextlib.contextmanager - def get_socket(self, all_credentials, checkout=False, handler=None): + def get_socket(self, all_credentials, handler=None): """Get a socket from the pool. Use with a "with" statement. Returns a :class:`SocketInfo` object wrapping a connected @@ -1362,7 +1372,7 @@ def get_socket(self, all_credentials, checkout=False, handler=None): This method should always be used in a with-statement:: - with pool.get_socket(credentials, checkout) as socket_info: + with pool.get_socket(credentials) as socket_info: socket_info.send_message(msg) data = socket_info.receive_message(op_code, request_id) @@ -1374,7 +1384,6 @@ def get_socket(self, all_credentials, checkout=False, handler=None): :Parameters: - `all_credentials`: dict, maps auth source to MongoCredential. - - `checkout` (optional): keep socket checked out. - `handler` (optional): A _MongoClientErrorHandler. """ listeners = self.opts.event_listeners @@ -1382,9 +1391,6 @@ def get_socket(self, all_credentials, checkout=False, handler=None): listeners.publish_connection_check_out_started(self.address) sock_info = self._get_socket(all_credentials) - if checkout: - self.__pinned_sockets.add(sock_info) - if self.enabled_for_cmap: listeners.publish_connection_checked_out( self.address, sock_info.id) @@ -1395,7 +1401,7 @@ def get_socket(self, all_credentials, checkout=False, handler=None): # Note that when pinned is True, the session owns the # connection and it is responsible for checking the connection # back into the pool. - pinned = sock_info.pinned + pinned = sock_info.pinned_txn or sock_info.pinned_cursor if handler: # Perform SDAM error handling rules while the connection is # still checked out. @@ -1404,11 +1410,16 @@ def get_socket(self, all_credentials, checkout=False, handler=None): if not pinned: self.return_socket(sock_info) raise - else: - if sock_info.pinned: + if sock_info.pinned_txn: + with self.lock: self.__pinned_sockets.add(sock_info) - elif not checkout: - self.return_socket(sock_info) + self.ntxns += 1 + elif sock_info.pinned_cursor: + with self.lock: + self.__pinned_sockets.add(sock_info) + self.ncursors += 1 + else: + self.return_socket(sock_info) def _raise_if_not_ready(self, emit_event): if self.state != PoolState.READY: @@ -1528,8 +1539,11 @@ def return_socket(self, sock_info): :Parameters: - `sock_info`: The socket to check into the pool. """ + txn = sock_info.pinned_txn + cursor = sock_info.pinned_cursor + sock_info.pinned_txn = False + sock_info.pinned_cursor = False self.__pinned_sockets.discard(sock_info) - sock_info.pinned = False listeners = self.opts.event_listeners if self.enabled_for_cmap: listeners.publish_connection_checked_in(self.address, sock_info.id) @@ -1559,6 +1573,10 @@ def return_socket(self, sock_info): self._max_connecting_cond.notify() with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 self.requests -= 1 self.active_sockets -= 1 self.operation_count -= 1 @@ -1603,9 +1621,18 @@ def _raise_wait_queue_timeout(self): if self.enabled_for_cmap: listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.TIMEOUT) + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise ConnectionFailure( + 'Timeout waiting for connection from the connection pool. ' + 'maxPoolSize: %s, connections in use by cursors: %s, ' + 'connections in use by transactions: %s, connections in use ' + 'by other operations: %s, wait_queue_timeout: %s' % ( + self.opts.max_pool_size, self.ncursors, self.ntxns, + other_ops, self.opts.wait_queue_timeout)) raise ConnectionFailure( - 'Timed out while checking out a connection from connection pool ' - 'with max_size %r and wait_queue_timeout %r' % ( + 'Timed out while checking out a connection from connection pool. ' + 'maxPoolSize: %s, wait_queue_timeout: %s' % ( self.opts.max_pool_size, self.opts.wait_queue_timeout)) def __del__(self): diff --git a/pymongo/server.py b/pymongo/server.py index 672a3b1c1a..19529237d4 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -69,7 +69,7 @@ def request_check(self): self._monitor.request_check() def run_operation(self, sock_info, operation, set_slave_okay, listeners, - pin, unpack_res): + unpack_res): """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -81,7 +81,6 @@ def run_operation(self, sock_info, operation, set_slave_okay, listeners, - `set_slave_okay`: Pass to operation.get_message. - `all_credentials`: dict, maps auth source to MongoCredential. - `listeners`: Instance of _EventListeners or None. - - `pin`: If True, then this is a pinned cursor operation. - `unpack_res`: A callable that decodes the wire protocol response. """ duration = None @@ -170,7 +169,8 @@ def run_operation(self, sock_info, operation, set_slave_okay, listeners, docs = _decode_all_selective( decrypted, operation.codec_options, user_fields) - if pin: + if client._should_pin_cursor(operation.session) or operation.exhaust: + sock_info.pin_cursor() if isinstance(reply, _OpMsg): # In OP_MSG, the server keeps sending only if the # more_to_come flag is set. @@ -200,8 +200,8 @@ def run_operation(self, sock_info, operation, set_slave_okay, listeners, return response - def get_socket(self, all_credentials, checkout=False, handler=None): - return self.pool.get_socket(all_credentials, checkout, handler) + def get_socket(self, all_credentials, handler=None): + return self.pool.get_socket(all_credentials, handler) @property def description(self): diff --git a/test/load_balancer/unified/wait-queue-timeouts.json b/test/load_balancer/unified/wait-queue-timeouts.json new file mode 100644 index 0000000000..61575d6706 --- /dev/null +++ b/test/load_balancer/unified/wait-queue-timeouts.json @@ -0,0 +1,153 @@ +{ + "description": "wait queue timeout errors include details about checked out connections", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "uriOptions": { + "maxPoolSize": 1, + "waitQueueTimeoutMS": 5 + }, + "observeEvents": [ + "connectionCheckedOutEvent", + "connectionCheckOutFailedEvent" + ] + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "wait queue timeout errors include cursor statistics", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "maxPoolSize: 1, connections in use by cursors: 1, connections in use by transactions: 0, connections in use by other operations: 0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckOutFailedEvent": {} + } + ] + } + ] + }, + { + "description": "wait queue timeout errors include transaction statistics", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "maxPoolSize: 1, connections in use by cursors: 0, connections in use by transactions: 1, connections in use by other operations: 0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckOutFailedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 540dd68e31..af59e0cbe4 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -40,7 +40,7 @@ def __init__(self, client, pair, *args, **kwargs): Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) @contextlib.contextmanager - def get_socket(self, all_credentials, checkout=False, handler=None): + def get_socket(self, all_credentials, handler=None): client = self.client host_and_port = '%s:%s' % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: @@ -51,8 +51,7 @@ def get_socket(self, all_credentials, checkout=False, handler=None): + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port - with Pool.get_socket( - self, all_credentials, checkout, handler) as sock_info: + with Pool.get_socket(self, all_credentials, handler) as sock_info: sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port yield sock_info diff --git a/test/test_cmap.py b/test/test_cmap.py index 053f27ba73..cfa11e9bc0 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -119,7 +119,9 @@ def wait_for_event(self, op): def check_out(self, op): """Run the 'checkOut' operation.""" label = op['label'] - with self.pool.get_socket({}, checkout=True) as sock_info: + with self.pool.get_socket({}) as sock_info: + # Call 'pin_cursor' so we can hold the socket. + sock_info.pin_cursor() if label: self.labels[label] = sock_info else: diff --git a/test/test_pooling.py b/test/test_pooling.py index 57338a965d..11fcb1ce7a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -125,8 +125,9 @@ def __init__(self, client, pool): def run_mongo_thread(self): self.state = 'get_socket' - # Pass 'checkout' so we can hold the socket. - with self.pool.get_socket({}, checkout=True) as sock: + # Call 'pin_cursor' so we can hold the socket. + with self.pool.get_socket({}) as sock: + sock.pin_cursor() self.sock = sock self.state = 'sock' @@ -326,11 +327,9 @@ def test_pool_check(self): # Back to normal, semaphore was correctly released. cx_pool.address = address - with cx_pool.get_socket({}, checkout=True) as sock_info: + with cx_pool.get_socket({}): pass - sock_info.close_socket(None) - def test_wait_queue_timeout(self): wait_queue_timeout = 2 # Seconds pool = self.create_pool( @@ -400,8 +399,9 @@ def test_no_wait_queue_multiple(self): socks = [] for _ in range(2): - # Pass 'checkout' so we can hold the socket. - with pool.get_socket({}, checkout=True) as sock: + # Call 'pin_cursor' so we can hold the socket. + with pool.get_socket({}) as sock: + sock.pin_cursor() socks.append(sock) threads = [] @@ -539,7 +539,7 @@ def test_max_pool_size_with_connection_failure(self): # socket from pool" instead of AutoReconnect. for i in range(2): with self.assertRaises(AutoReconnect) as context: - with test_pool.get_socket({}, checkout=True): + with test_pool.get_socket({}): pass # Testing for AutoReconnect instead of ConnectionFailure, above, diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 773aed3f51..d02c1cacc2 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -322,9 +322,9 @@ def _socket_for_reads(self, read_preference, session): yield sock_info, slave_ok @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session, pin=False): + def _slaveok_for_server(self, read_preference, server, session): context = super(ReadPrefTester, self)._slaveok_for_server( - read_preference, server, session, pin=pin) + read_preference, server, session) with context as (sock_info, slave_ok): self.record_a_read(sock_info.address) yield sock_info, slave_ok diff --git a/test/utils.py b/test/utils.py index fb29bb37e5..02a0a58838 100644 --- a/test/utils.py +++ b/test/utils.py @@ -267,7 +267,7 @@ def __init__(self, address, options, handshake=True): def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def get_socket(self, all_credentials, checkout=False, handler=None): + def get_socket(self, all_credentials, handler=None): return MockSocketInfo() def return_socket(self, *args, **kwargs): From 640fee9d5d7c937cb39dedd3218178c76b927631 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 21 Jun 2021 16:48:57 -0700 Subject: [PATCH 0371/2111] PYTHON-2556 Disable dots and dollars validation --- doc/changelog.rst | 3 + pymongo/collection.py | 6 +- pymongo/message.py | 2 +- test/bson_corpus/document.json | 20 + test/bson_corpus/top.json | 25 +- .../bulkWrite-insertOne-dots_and_dollars.json | 374 +++++++++++ ...bulkWrite-replaceOne-dots_and_dollars.json | 532 +++++++++++++++ .../unified/bulkWrite-update-validation.json | 34 +- ...bulkWrite-updateMany-dots_and_dollars.json | 452 +++++++++++++ .../bulkWrite-updateOne-dots_and_dollars.json | 460 +++++++++++++ .../findOneAndReplace-dots_and_dollars.json | 430 +++++++++++++ .../findOneAndUpdate-dots_and_dollars.json | 380 +++++++++++ .../unified/insertMany-dots_and_dollars.json | 334 ++++++++++ .../unified/insertOne-dots_and_dollars.json | 606 ++++++++++++++++++ .../unified/replaceOne-dots_and_dollars.json | 567 ++++++++++++++++ test/crud/unified/replaceOne-validation.json | 18 +- .../unified/updateMany-dots_and_dollars.json | 404 ++++++++++++ test/crud/unified/updateMany-validation.json | 20 +- .../unified/updateOne-dots_and_dollars.json | 412 ++++++++++++ test/crud/unified/updateOne-validation.json | 18 +- test/test_collection.py | 35 - test/test_legacy_api.py | 9 - .../valid-pass/poc-transactions.json | 11 +- test/unified_format.py | 8 +- 24 files changed, 5051 insertions(+), 109 deletions(-) create mode 100644 test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json create mode 100644 test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json create mode 100644 test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json create mode 100644 test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json create mode 100644 test/crud/unified/findOneAndReplace-dots_and_dollars.json create mode 100644 test/crud/unified/findOneAndUpdate-dots_and_dollars.json create mode 100644 test/crud/unified/insertMany-dots_and_dollars.json create mode 100644 test/crud/unified/insertOne-dots_and_dollars.json create mode 100644 test/crud/unified/replaceOne-dots_and_dollars.json create mode 100644 test/crud/unified/updateMany-dots_and_dollars.json create mode 100644 test/crud/unified/updateOne-dots_and_dollars.json diff --git a/doc/changelog.rst b/doc/changelog.rst index e0cc3a6913..d3462b7bb9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,9 @@ Changes in Version 4.0 .. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. +.. warning:: PyMongo now allows insertion of documents with keys that include + dots ('.') or start with dollar signs ('$'). + PyMongo 4.0 brings a number of improvements as well as some backward breaking changes. For example, all APIs deprecated in PyMongo 3.X have been removed. Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` diff --git a/pymongo/collection.py b/pymongo/collection.py index 7943e08aa8..64ec74b750 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -644,7 +644,7 @@ def insert_one(self, document, bypass_document_validation=False, write_concern = self._write_concern_for(session) return InsertOneResult( self._insert_one( - document, ordered=True, check_keys=True, + document, ordered=True, check_keys=False, write_concern=write_concern, op_id=None, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) @@ -712,7 +712,7 @@ def gen(): return InsertManyResult(inserted_ids, write_concern.acknowledged) def _update(self, sock_info, criteria, document, upsert=False, - check_keys=True, multi=False, + check_keys=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None, retryable_write=False): @@ -799,7 +799,7 @@ def _update(self, sock_info, criteria, document, upsert=False, def _update_retryable( self, criteria, document, upsert=False, - check_keys=True, multi=False, + check_keys=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None): diff --git a/pymongo/message.py b/pymongo/message.py index 4edf8abced..3141c4299b 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -938,7 +938,7 @@ def execute_unack(self, docs, client): @property def check_keys(self): """Should we check keys for this operation type?""" - return self.op_type == _INSERT + return False @property def max_bson_size(self): diff --git a/test/bson_corpus/document.json b/test/bson_corpus/document.json index 3ec9187044..10bf637d6d 100644 --- a/test/bson_corpus/document.json +++ b/test/bson_corpus/document.json @@ -17,6 +17,26 @@ "description": "Single-character key subdoc", "canonical_bson": "160000000378000E0000000261000200000062000000", "canonical_extjson": "{\"x\" : {\"a\" : \"b\"}}" + }, + { + "description": "Dollar-prefixed key in sub-document", + "canonical_bson": "170000000378000F000000022461000200000062000000", + "canonical_extjson": "{\"x\" : {\"$a\" : \"b\"}}" + }, + { + "description": "Dollar as key in sub-document", + "canonical_bson": "160000000378000E0000000224000200000061000000", + "canonical_extjson": "{\"x\" : {\"$\" : \"a\"}}" + }, + { + "description": "Dotted key in sub-document", + "canonical_bson": "180000000378001000000002612E62000200000063000000", + "canonical_extjson": "{\"x\" : {\"a.b\" : \"c\"}}" + }, + { + "description": "Dot as key in sub-document", + "canonical_bson": "160000000378000E000000022E000200000061000000", + "canonical_extjson": "{\"x\" : {\".\" : \"a\"}}" } ], "decodeErrors": [ diff --git a/test/bson_corpus/top.json b/test/bson_corpus/top.json index 5352a3faf3..ce7d4cdf9b 100644 --- a/test/bson_corpus/top.json +++ b/test/bson_corpus/top.json @@ -3,9 +3,24 @@ "bson_type": "0x00", "valid": [ { - "description": "Document with keys that start with $", + "description": "Dollar-prefixed key in top-level document", "canonical_bson": "0F00000010246B6579002A00000000", "canonical_extjson": "{\"$key\": {\"$numberInt\": \"42\"}}" + }, + { + "description": "Dollar as key in top-level document", + "canonical_bson": "0E00000002240002000000610000", + "canonical_extjson": "{\"$\": \"a\"}" + }, + { + "description": "Dotted key in top-level document", + "canonical_bson": "1000000002612E620002000000630000", + "canonical_extjson": "{\"a.b\": \"c\"}" + }, + { + "description": "Dot as key in top-level document", + "canonical_bson": "0E000000022E0002000000610000", + "canonical_extjson": "{\".\": \"a\"}" } ], "decodeErrors": [ @@ -199,14 +214,6 @@ "description": "Bad $date (extra field)", "string": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330501\"}, \"unrelated\": true}}" }, - { - "description": "Bad DBRef (ref is number, not string)", - "string": "{\"x\" : {\"$ref\" : 42, \"$id\" : \"abc\"}}" - }, - { - "description": "Bad DBRef (db is number, not string)", - "string": "{\"x\" : {\"$ref\" : \"a\", \"$id\" : \"abc\", \"$db\" : 42}}" - }, { "description": "Bad $minKey (boolean, not integer)", "string": "{\"a\" : {\"$minKey\" : true}}" diff --git a/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json new file mode 100644 index 0000000000..92bbb1aaf2 --- /dev/null +++ b/test/crud/unified/bulkWrite-insertOne-dots_and_dollars.json @@ -0,0 +1,374 @@ +{ + "description": "bulkWrite-insertOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "$a": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "$a": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json new file mode 100644 index 0000000000..fce647d8f4 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-dots_and_dollars.json @@ -0,0 +1,532 @@ +{ + "description": "bulkWrite-replaceOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-update-validation.json b/test/crud/unified/bulkWrite-update-validation.json index 9ed7db5121..f9bfda0edd 100644 --- a/test/crud/unified/bulkWrite-update-validation.json +++ b/test/crud/unified/bulkWrite-update-validation.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-update-validation", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "createEntities": [ { "client": { @@ -14,21 +14,21 @@ "database": { "id": "database0", "client": "client0", - "databaseName": "crud-v2" + "databaseName": "crud-tests" } }, { "collection": { "id": "collection0", "database": "database0", - "collectionName": "crud-v2" + "collectionName": "coll0" } } ], "initialData": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, @@ -50,8 +50,8 @@ "description": "BulkWrite replaceOne prohibits atomic modifiers", "operations": [ { - "object": "collection0", "name": "bulkWrite", + "object": "collection0", "arguments": { "requests": [ { @@ -69,7 +69,7 @@ ] }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -81,8 +81,8 @@ ], "outcome": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, @@ -104,8 +104,8 @@ "description": "BulkWrite updateOne requires atomic modifiers", "operations": [ { - "object": "collection0", "name": "bulkWrite", + "object": "collection0", "arguments": { "requests": [ { @@ -121,7 +121,7 @@ ] }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -133,8 +133,8 @@ ], "outcome": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, @@ -156,8 +156,8 @@ "description": "BulkWrite updateMany requires atomic modifiers", "operations": [ { - "object": "collection0", "name": "bulkWrite", + "object": "collection0", "arguments": { "requests": [ { @@ -175,7 +175,7 @@ ] }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -187,8 +187,8 @@ ], "outcome": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, diff --git a/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json b/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json new file mode 100644 index 0000000000..35a5cdd52a --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-dots_and_dollars.json @@ -0,0 +1,452 @@ +{ + "description": "bulkWrite-updateMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json b/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json new file mode 100644 index 0000000000..cbbe113ce8 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-dots_and_dollars.json @@ -0,0 +1,460 @@ +{ + "description": "bulkWrite-updateOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-dots_and_dollars.json b/test/crud/unified/findOneAndReplace-dots_and_dollars.json new file mode 100644 index 0000000000..19ac447f84 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-dots_and_dollars.json @@ -0,0 +1,430 @@ +{ + "description": "findOneAndReplace-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a.b": 1 + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a.b": 1 + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-dots_and_dollars.json b/test/crud/unified/findOneAndUpdate-dots_and_dollars.json new file mode 100644 index 0000000000..40eb547392 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-dots_and_dollars.json @@ -0,0 +1,380 @@ +{ + "description": "findOneAndUpdate-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "_id": 1, + "foo": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "new": { + "$$unsetOrMatches": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-dots_and_dollars.json b/test/crud/unified/insertMany-dots_and_dollars.json new file mode 100644 index 0000000000..3b66ac0621 --- /dev/null +++ b/test/crud/unified/insertMany-dots_and_dollars.json @@ -0,0 +1,334 @@ +{ + "description": "insertMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + }, + "expectResult": { + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + }, + "expectResult": { + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-dots_and_dollars.json b/test/crud/unified/insertOne-dots_and_dollars.json new file mode 100644 index 0000000000..1a30df4a02 --- /dev/null +++ b/test/crud/unified/insertOne-dots_and_dollars.json @@ -0,0 +1,606 @@ +{ + "description": "insertOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Inserting document with top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "$a": 1 + } + }, + "expectResult": { + "insertedCount": 1, + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with top-level dollar-prefixed key on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "$a": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "$a": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with top-level dotted key", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "insertedCount": 1, + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in embedded doc", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "insertedCount": 1, + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in embedded doc", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "insertedCount": 1, + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dollar-prefixed key in _id yields server-side error", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "$a": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$a": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with dotted key in _id on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "a.b": 1 + } + } + }, + "expectResult": { + "insertedCount": 1, + "insertedId": { + "$$unsetOrMatches": { + "a.b": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + ] + }, + { + "description": "Inserting document with dotted key in _id on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": { + "a.b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "a.b": 1 + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + }, + { + "description": "Inserting document with DBRef-like keys", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "$db": "foo" + } + } + }, + "expectResult": { + "insertedCount": 1, + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1, + "a": { + "$db": "foo" + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$db": "foo" + } + } + ] + } + ] + }, + { + "description": "Unacknowledged write using dollar-prefixed or dotted keys may be silently rejected on pre-5.0 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "document": { + "_id": { + "$a": 1 + } + } + }, + "expectResult": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll1", + "documents": [ + { + "_id": { + "$a": 1 + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-dots_and_dollars.json b/test/crud/unified/replaceOne-dots_and_dollars.json new file mode 100644 index 0000000000..d5003dc5ea --- /dev/null +++ b/test/crud/unified/replaceOne-dots_and_dollars.json @@ -0,0 +1,567 @@ +{ + "description": "replaceOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Replacing document with top-level dotted key on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with top-level dotted key on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a.b": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a.b": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "$b": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dollar-prefixed key in embedded doc on pre-5.0 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on 3.6+ server", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "a": { + "b.c": 1 + } + } + ] + } + ] + }, + { + "description": "Replacing document with dotted key in embedded doc on pre-3.6 server yields server-side error", + "runOnRequirements": [ + { + "maxServerVersion": "3.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "b.c": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "b.c": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Unacknowledged write using dollar-prefixed or dotted keys may be silently rejected on pre-5.0 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "a": { + "$b": 1 + } + } + }, + "expectResult": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll1", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "a": { + "$b": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-validation.json b/test/crud/unified/replaceOne-validation.json index 56e2de079c..6f5b173e02 100644 --- a/test/crud/unified/replaceOne-validation.json +++ b/test/crud/unified/replaceOne-validation.json @@ -1,6 +1,6 @@ { "description": "replaceOne-validation", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "createEntities": [ { "client": { @@ -14,21 +14,21 @@ "database": { "id": "database0", "client": "client0", - "databaseName": "crud-v2" + "databaseName": "crud-tests" } }, { "collection": { "id": "collection0", "database": "database0", - "collectionName": "crud-v2" + "collectionName": "coll0" } } ], "initialData": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, @@ -42,8 +42,8 @@ "description": "ReplaceOne prohibits atomic modifiers", "operations": [ { - "object": "collection0", "name": "replaceOne", + "object": "collection0", "arguments": { "filter": { "_id": 1 @@ -55,7 +55,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -67,8 +67,8 @@ ], "outcome": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, diff --git a/test/crud/unified/updateMany-dots_and_dollars.json b/test/crud/unified/updateMany-dots_and_dollars.json new file mode 100644 index 0000000000..5d3b9d0453 --- /dev/null +++ b/test/crud/unified/updateMany-dots_and_dollars.json @@ -0,0 +1,404 @@ +{ + "description": "updateMany-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-validation.json b/test/crud/unified/updateMany-validation.json index 45d0b77620..e3e46a1384 100644 --- a/test/crud/unified/updateMany-validation.json +++ b/test/crud/unified/updateMany-validation.json @@ -1,6 +1,6 @@ { "description": "updateMany-validation", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "createEntities": [ { "client": { @@ -14,21 +14,21 @@ "database": { "id": "database0", "client": "client0", - "databaseName": "crud-v2" + "databaseName": "crud-tests" } }, { "collection": { "id": "collection0", "database": "database0", - "collectionName": "crud-v2" + "collectionName": "coll0" } } ], "initialData": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, @@ -47,11 +47,11 @@ ], "tests": [ { - "description": "UpdateOne requires atomic modifiers", + "description": "UpdateMany requires atomic modifiers", "operations": [ { - "object": "collection0", "name": "updateMany", + "object": "collection0", "arguments": { "filter": { "_id": { @@ -63,7 +63,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -75,8 +75,8 @@ ], "outcome": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, diff --git a/test/crud/unified/updateOne-dots_and_dollars.json b/test/crud/unified/updateOne-dots_and_dollars.json new file mode 100644 index 0000000000..798d522cba --- /dev/null +++ b/test/crud/unified/updateOne-dots_and_dollars.json @@ -0,0 +1,412 @@ +{ + "description": "updateOne-dots_and_dollars", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {} + } + ] + } + ], + "tests": [ + { + "description": "Updating document to set top-level dollar-prefixed key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "$a": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set top-level dotted key on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceWith": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$$ROOT" + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": {}, + "a.b": 1 + } + ] + } + ] + }, + { + "description": "Updating document to set dollar-prefixed key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "$a" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "$a": 1 + } + } + ] + } + ] + }, + { + "description": "Updating document to set dotted key in embedded doc on 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "foo": { + "$setField": { + "field": { + "$literal": "a.b" + }, + "value": 1, + "input": "$foo" + } + } + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "foo": { + "a.b": 1 + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-validation.json b/test/crud/unified/updateOne-validation.json index 8336efc0d1..1464642c59 100644 --- a/test/crud/unified/updateOne-validation.json +++ b/test/crud/unified/updateOne-validation.json @@ -1,6 +1,6 @@ { "description": "updateOne-validation", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "createEntities": [ { "client": { @@ -14,21 +14,21 @@ "database": { "id": "database0", "client": "client0", - "databaseName": "crud-v2" + "databaseName": "crud-tests" } }, { "collection": { "id": "collection0", "database": "database0", - "collectionName": "crud-v2" + "collectionName": "coll0" } } ], "initialData": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, @@ -42,8 +42,8 @@ "description": "UpdateOne requires atomic modifiers", "operations": [ { - "object": "collection0", "name": "updateOne", + "object": "collection0", "arguments": { "filter": { "_id": 1 @@ -53,7 +53,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -65,8 +65,8 @@ ], "outcome": [ { - "collectionName": "crud-v2", - "databaseName": "crud-v2", + "collectionName": "coll0", + "databaseName": "crud-tests", "documents": [ { "_id": 1, diff --git a/test/test_collection.py b/test/test_collection.py index 538ebdc313..b9137a2cb2 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -892,13 +892,6 @@ def test_write_large_document(self): # This will pass with OP_UPDATE or the update command. self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - def test_bad_dbref(self): - # Incomplete DBRefs. - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} - self.assertRaises(InvalidDocument, self.db.test.insert_one, ref_only) - self.assertRaises(InvalidDocument, self.db.test.insert_one, id_only) - @client_context.require_version_min(3, 1, 9, -1) def test_insert_bypass_document_validation(self): db = self.db @@ -1215,34 +1208,6 @@ def test_id_can_be_anything(self): self.assertEqual(x["hello"], "world") self.assertTrue("_id" in x) - def test_invalid_key_names(self): - db = self.db - db.test.drop() - - db.test.insert_one({"hello": "world"}) - db.test.insert_one({"hello": {"hello": "world"}}) - - self.assertRaises(InvalidDocument, db.test.insert_one, - {"$hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {"$hello": "world"}}) - - db.test.insert_one({"he$llo": "world"}) - db.test.insert_one({"hello": {"hello$": "world"}}) - - self.assertRaises(InvalidDocument, db.test.insert_one, - {".hello": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {".hello": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello.": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {"hello.": "world"}}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hel.lo": "world"}) - self.assertRaises(InvalidDocument, db.test.insert_one, - {"hello": {"hel.lo": "world"}}) - def test_unique_index(self): db = self.db db.drop_collection("test") diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index a28238b95a..246fe333ff 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -131,15 +131,6 @@ def test_insert(self): self.assertEqual(2, self.coll.count()) - def test_insert_check_keys(self): - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'$dollar': 1}) - self.assertRaises(InvalidDocument, bulk.execute) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.insert({'a.b': 1}) - self.assertRaises(InvalidDocument, bulk.execute) - def test_update(self): expected = { diff --git a/test/unified-test-format/valid-pass/poc-transactions.json b/test/unified-test-format/valid-pass/poc-transactions.json index 62528f9ce1..0355ca2060 100644 --- a/test/unified-test-format/valid-pass/poc-transactions.json +++ b/test/unified-test-format/valid-pass/poc-transactions.json @@ -61,14 +61,15 @@ "object": "session0" }, { - "name": "insertOne", + "name": "updateOne", "object": "collection0", "arguments": { "session": "session0", - "document": { - "_id": { - ".": "." - } + "filter": { + "_id": 1 + }, + "update": { + "x": 1 } }, "expectError": { diff --git a/test/unified_format.py b/test/unified_format.py index d24e280b28..3da9df7e8a 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -607,9 +607,11 @@ def coerce_result(opname, result): if opname == 'bulkWrite': return parse_bulk_write_result(result) if opname == 'insertOne': - return {'insertedId': result.inserted_id} + return {'insertedId': result.inserted_id, 'insertedCount': 1} if opname == 'insertMany': - return {idx: _id for idx, _id in enumerate(result.inserted_ids)} + res = {idx: _id for idx, _id in enumerate(result.inserted_ids)} + res['insertedCount'] = len(result.inserted_ids) + return res if opname in ('deleteOne', 'deleteMany'): return {'deletedCount': result.deleted_count} if opname in ('updateOne', 'updateMany', 'replaceOne'): @@ -698,6 +700,8 @@ def maybe_skip_test(self, spec): if 'Dirty explicit session is discarded' in spec['description']: raise unittest.SkipTest( "MMAPv1 does not support retryWrites=True") + elif 'Client side error in command starting transaction' in spec['description']: + raise unittest.SkipTest("Implement PYTHON-1894") def process_error(self, exception, spec): is_error = spec.get('isError') From ff6ca53328bfa62f35cb169d6440c2a642b853d2 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 22 Jun 2021 13:24:07 -0700 Subject: [PATCH 0372/2111] PYTHON-2572 Introduce NotPrimaryError and deprecate NotMasterError (#646) --- pymongo/change_stream.py | 6 ++--- pymongo/errors.py | 22 ++++++++++++++++--- pymongo/helpers.py | 20 ++++++++--------- pymongo/message.py | 14 ++++++------ pymongo/mongo_client.py | 8 +++---- pymongo/monitor.py | 4 ++-- pymongo/network.py | 4 ++-- pymongo/pool.py | 14 ++++++------ pymongo/server.py | 4 ++-- pymongo/topology.py | 6 ++--- ...nnections_survive_primary_stepdown_spec.py | 8 +++---- test/test_discovery_and_monitoring.py | 4 ++-- test/test_errors.py | 20 +++++++++++------ test/test_gridfs.py | 6 ++--- test/test_gridfs_bucket.py | 6 ++--- test/test_monitoring.py | 6 ++--- test/test_sdam_monitoring_spec.py | 6 ++--- test/unified_format.py | 4 ++-- 18 files changed, 92 insertions(+), 70 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index f742e126c6..fe685694a2 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -41,11 +41,11 @@ 189, # PrimarySteppedDown 262, # ExceededTimeLimit 9001, # SocketException - 10107, # NotMaster + 10107, # NotWritablePrimary 11600, # InterruptedAtShutdown 11602, # InterruptedDueToReplStateChange - 13435, # NotMasterNoSlaveOk - 13436, # NotMasterOrSecondary + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary 63, # StaleShardVersion 150, # StaleEpoch 13388, # StaleConfig diff --git a/pymongo/errors.py b/pymongo/errors.py index 257783f527..33629b5fa6 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -94,7 +94,22 @@ def _format_detailed_error(message, details): class NotMasterError(AutoReconnect): - """The server responded "not master" or "node is recovering". + """**DEPRECATED** - The server responded "not master" or + "node is recovering". + + This exception has been deprecated and will be removed in PyMongo 4.0. + Use :exc:`~pymongo.errors.NotPrimaryError` instead. + + .. versionchanged:: 3.12 + Deprecated. Use :exc:`~pymongo.errors.NotPrimaryError` instead. + """ + def __init__(self, message='', errors=None): + super(NotMasterError, self).__init__( + _format_detailed_error(message, errors), errors=errors) + + +class NotPrimaryError(NotMasterError): + """The server responded "not primary" or "node is recovering". These errors result from a query, write, or command. The operation failed because the client thought it was using the primary but the primary has @@ -105,10 +120,11 @@ class NotMasterError(AutoReconnect): its view of the server as soon as possible after throwing this exception. Subclass of :exc:`~pymongo.errors.AutoReconnect`. + + .. versionadded:: 3.12 """ def __init__(self, message='', errors=None): - super(NotMasterError, self).__init__( - _format_detailed_error(message, errors), errors=errors) + super(NotPrimaryError, self).__init__(message, errors=errors) class ServerSelectionTimeoutError(AutoReconnect): diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 2fb3ce3372..1d8005ac31 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -24,7 +24,7 @@ from pymongo.errors import (CursorNotFound, DuplicateKeyError, ExecutionTimeout, - NotMasterError, + NotPrimaryError, OperationFailure, WriteError, WriteConcernError, @@ -35,15 +35,15 @@ 11600, # InterruptedAtShutdown 91, # ShutdownInProgress ]) -# From the SDAM spec, the "not master" error codes are combined with the +# From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). _NOT_MASTER_CODES = frozenset([ - 10058, # LegacyNotPrimary <=3.2 "not master" error code - 10107, # NotMaster - 13435, # NotMasterNoSlaveOk + 10058, # LegacyNotPrimary <=3.2 "not primary" error code + 10107, # NotWritablePrimary + 13435, # NotPrimaryNoSecondaryOk 11602, # InterruptedDueToReplStateChange - 13436, # NotMasterOrSecondary + 13436, # NotPrimaryOrSecondary 189, # PrimarySteppedDown ]) | _SHUTDOWN_CODES # From the retryable writes spec. @@ -148,12 +148,12 @@ def _check_command_response(response, max_wire_version, elif errmsg in allowable_errors: return - # Server is "not master" or "recovering" + # Server is "not primary" or "recovering" if code is not None: if code in _NOT_MASTER_CODES: - raise NotMasterError(errmsg, response) + raise NotPrimaryError(errmsg, response) elif "not master" in errmsg or "node is recovering" in errmsg: - raise NotMasterError(errmsg, response) + raise NotPrimaryError(errmsg, response) # Other errors # findAndModify with upsert can raise duplicate key error @@ -185,7 +185,7 @@ def _check_gle_response(result, max_wire_version): return result if error_msg.startswith("not master"): - raise NotMasterError(error_msg, result) + raise NotPrimaryError(error_msg, result) details = result diff --git a/pymongo/message.py b/pymongo/message.py index 3141c4299b..15c564475f 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -47,7 +47,7 @@ DocumentTooLarge, ExecutionTimeout, InvalidOperation, - NotMasterError, + NotPrimaryError, OperationFailure, ProtocolError) from pymongo.read_concern import DEFAULT_READ_CONCERN @@ -995,7 +995,7 @@ def legacy_write(self, request_id, msg, max_doc_size, acknowledged, docs): if isinstance(exc, OperationFailure): failure = _convert_write_result( self.name, cmd, exc.details) - elif isinstance(exc, NotMasterError): + elif isinstance(exc, NotPrimaryError): failure = exc.details else: failure = _convert_exception(exc) @@ -1020,7 +1020,7 @@ def write_command(self, request_id, msg, docs): except Exception as exc: if self.publish: duration = (datetime.datetime.now() - start) + duration - if isinstance(exc, (NotMasterError, OperationFailure)): + if isinstance(exc, (NotPrimaryError, OperationFailure)): failure = exc.details else: failure = _convert_exception(exc) @@ -1514,7 +1514,7 @@ def raw_response(self, cursor_id=None, user_fields=None): Check the response for errors and unpack. - Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or + Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. :Parameters: @@ -1537,7 +1537,7 @@ def raw_response(self, cursor_id=None, user_fields=None): # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith("not master"): - raise NotMasterError(error_object["$err"], error_object) + raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: raise ExecutionTimeout(error_object.get("$err"), error_object.get("code"), @@ -1558,7 +1558,7 @@ def unpack_response(self, cursor_id=None, Check the response for errors and unpack, returning a dictionary containing the response data. - Can raise CursorNotFound, NotMasterError, ExecutionTimeout, or + Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. :Parameters: @@ -1715,7 +1715,7 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, except Exception as exc: if publish: duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotMasterError, OperationFailure)): + if isinstance(exc, (NotPrimaryError, OperationFailure)): failure = exc.details else: failure = _convert_exception(exc) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ad82306b85..af1d0b5cd7 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -56,7 +56,7 @@ ConfigurationError, ConnectionFailure, InvalidOperation, - NotMasterError, + NotPrimaryError, OperationFailure, PyMongoError, ServerSelectionTimeoutError) @@ -1938,7 +1938,7 @@ def _retryable_error_doc(exc): wces = exc.details['writeConcernErrors'] wce = wces[-1] if wces else None return wce - if isinstance(exc, (NotMasterError, OperationFailure)): + if isinstance(exc, (NotPrimaryError, OperationFailure)): return exc.details return None @@ -1963,10 +1963,10 @@ def _add_retryable_write_error(exc, max_wire_version): if code in helpers._RETRYABLE_ERROR_CODES: exc._add_error_label("RetryableWriteError") - # Connection errors are always retryable except NotMasterError which is + # Connection errors are always retryable except NotPrimaryError which is # handled above. if (isinstance(exc, ConnectionFailure) and - not isinstance(exc, NotMasterError)): + not isinstance(exc, NotPrimaryError)): exc._add_error_label("RetryableWriteError") diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 7a3a4f22b6..8a60abce0a 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -20,7 +20,7 @@ import weakref from pymongo import common, periodic_executor -from pymongo.errors import (NotMasterError, +from pymongo.errors import (NotPrimaryError, OperationFailure, _OperationCancelled) from pymongo.ismaster import IsMaster @@ -212,7 +212,7 @@ def _check_server(self): try: try: return self._check_once() - except (OperationFailure, NotMasterError) as exc: + except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when isMaster fails. self._topology.receive_cluster_time( exc.details.get('$clusterTime')) diff --git a/pymongo/network.py b/pymongo/network.py index 813a7cc38c..b0e06a7173 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -26,7 +26,7 @@ from pymongo import helpers, message from pymongo.common import MAX_MESSAGE_SIZE from pymongo.compression_support import decompress, _NO_COMPRESSION -from pymongo.errors import (NotMasterError, +from pymongo.errors import (NotPrimaryError, OperationFailure, ProtocolError, _OperationCancelled) @@ -160,7 +160,7 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, except Exception as exc: if publish: duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotMasterError, OperationFailure)): + if isinstance(exc, (NotPrimaryError, OperationFailure)): failure = exc.details else: failure = message._convert_exception(exc) diff --git a/pymongo/pool.py b/pymongo/pool.py index 335f8d07f5..a8a645f893 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -47,7 +47,7 @@ InvalidOperation, DocumentTooLarge, NetworkTimeout, - NotMasterError, + NotPrimaryError, OperationFailure, PyMongoError) from pymongo.hello import HelloCompat @@ -736,7 +736,7 @@ def command(self, dbname, spec, slave_ok=False, unacknowledged=unacknowledged, user_fields=user_fields, exhaust_allowed=exhaust_allowed) - except (OperationFailure, NotMasterError): + except (OperationFailure, NotPrimaryError): raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. except BaseException as error: @@ -770,13 +770,13 @@ def receive_message(self, request_id): self._raise_connection_failure(error) def _raise_if_not_writable(self, unacknowledged): - """Raise NotMasterError on unacknowledged write if this socket is not + """Raise NotPrimaryError on unacknowledged write if this socket is not writable. """ if unacknowledged and not self.is_writable: - # Write won't succeed, bail as if we'd received a not master error. - raise NotMasterError("not master", { - "ok": 0, "errmsg": "not master", "code": 10107}) + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", { + "ok": 0, "errmsg": "not primary", "code": 10107}) def legacy_write(self, request_id, msg, max_doc_size, with_last_error): """Send OP_INSERT, etc., optionally returning response as a dict. @@ -811,7 +811,7 @@ def write_command(self, request_id, msg): reply = self.receive_message(request_id) result = reply.command_response() - # Raises NotMasterError or OperationFailure. + # Raises NotPrimaryError or OperationFailure. helpers._check_command_response(result, self.max_wire_version) return result diff --git a/pymongo/server.py b/pymongo/server.py index 19529237d4..10037cbf9c 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -18,7 +18,7 @@ from bson import _decode_all_selective -from pymongo.errors import NotMasterError, OperationFailure +from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers import _check_command_response from pymongo.message import _convert_exception, _OpMsg from pymongo.response import Response, PinnedResponse @@ -130,7 +130,7 @@ def run_operation(self, sock_info, operation, set_slave_okay, listeners, except Exception as exc: if publish: duration = datetime.now() - start - if isinstance(exc, (NotMasterError, OperationFailure)): + if isinstance(exc, (NotPrimaryError, OperationFailure)): failure = exc.details else: failure = _convert_exception(exc) diff --git a/pymongo/topology.py b/pymongo/topology.py index baa4293ddb..3b5437a322 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -29,7 +29,7 @@ from pymongo.errors import (ConnectionFailure, ConfigurationError, NetworkTimeout, - NotMasterError, + NotPrimaryError, OperationFailure, PyMongoError, ServerSelectionTimeoutError, @@ -611,9 +611,9 @@ def _handle_error(self, address, err_ctx): elif issubclass(exc_type, WriteError): # Ignore writeErrors. return - elif issubclass(exc_type, (NotMasterError, OperationFailure)): + elif issubclass(exc_type, (NotPrimaryError, OperationFailure)): # As per the SDAM spec if: - # - the server sees a "not master" error, and + # - the server sees a "not primary" error, and # - the server is not shutting down, and # - the server version is >= 4.2, then # we keep the existing connection pool, but mark the server type diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 8690962afa..6ed16fa50e 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -20,7 +20,7 @@ from bson import SON from pymongo import monitoring -from pymongo.errors import NotMasterError +from pymongo.errors import NotPrimaryError from pymongo.write_concern import WriteConcern from test import (client_context, @@ -93,10 +93,10 @@ def test_get_more_iteration(self): # Verify pool not cleared. self.verify_pool_not_cleared() # Attempt insertion to mark server description as stale and prevent a - # notMaster error on the subsequent operation. + # NotPrimaryError on the subsequent operation. try: self.coll.insert_one({}) - except NotMasterError: + except NotPrimaryError: pass # Next insert should succeed on the new primary without clearing pool. self.coll.insert_one({}) @@ -109,7 +109,7 @@ def run_scenario(self, error_code, retry, pool_status_checker): "errorCode": error_code}}) self.addCleanup(self.set_fail_point, {"mode": "off"}) # Insert record and verify failure. - with self.assertRaises(NotMasterError) as exc: + with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) self.assertEqual(exc.exception.details['code'], error_code) # Retry before CMAPListener assertion if retry_before=True. diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 0cbc40f1ff..fc5a3ae0b5 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -27,7 +27,7 @@ from pymongo.errors import (AutoReconnect, ConfigurationError, NetworkTimeout, - NotMasterError, + NotPrimaryError, OperationFailure) from pymongo.helpers import (_check_command_response, _check_write_command_response) @@ -104,7 +104,7 @@ def got_app_error(topology, app_error): else: raise AssertionError('unknown error type: %s' % (error_type,)) assert False - except (AutoReconnect, NotMasterError, OperationFailure) as e: + except (AutoReconnect, NotPrimaryError, OperationFailure) as e: if when == 'beforeHandshakeCompletes': completed_handshake = False elif when == 'afterHandshakeCompletes': diff --git a/test/test_errors.py b/test/test_errors.py index ecd969accb..6ae8ee69be 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -20,6 +20,7 @@ from pymongo.errors import (BulkWriteError, EncryptionError, + NotPrimaryError, NotMasterError, OperationFailure) from test import (PyMongoTestCase, @@ -27,12 +28,12 @@ class TestErrors(PyMongoTestCase): - def test_not_master_error(self): - exc = NotMasterError("not master test", {"errmsg": "error"}) + def test_not_primary_error(self): + exc = NotPrimaryError("not primary test", {"errmsg": "error"}) self.assertIn("full error", str(exc)) try: raise exc - except NotMasterError: + except NotPrimaryError: self.assertIn("full error", traceback.format_exc()) def test_operation_failure(self): @@ -63,8 +64,8 @@ def test_unicode_strs_operation_failure(self): self._test_unicode_strs(exc) def test_unicode_strs_not_master_error(self): - exc = NotMasterError('unicode \U0001f40d', - {"errmsg": 'unicode \U0001f40d'}) + exc = NotPrimaryError('unicode \U0001f40d', + {"errmsg": 'unicode \U0001f40d'}) self._test_unicode_strs(exc) def assertPyMongoErrorEqual(self, exc1, exc2): @@ -79,8 +80,8 @@ def assertOperationFailureEqual(self, exc1, exc2): self.assertEqual(exc1.details, exc2.details) self.assertEqual(exc1._max_wire_version, exc2._max_wire_version) - def test_pickle_NotMasterError(self): - exc = NotMasterError("not master test", {"errmsg": "error"}) + def test_pickle_NotPrimaryError(self): + exc = NotPrimaryError("not primary test", {"errmsg": "error"}) self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc))) def test_pickle_OperationFailure(self): @@ -100,6 +101,11 @@ def test_pickle_EncryptionError(self): self.assertPyMongoErrorEqual(exc, exc2) self.assertOperationFailureEqual(cause, exc2.cause) + def test_NotMasterError_catches_NotPrimaryError(self): + with self.assertRaises(NotMasterError) as exc: + raise NotPrimaryError("not primary test", {"errmsg": "error"}) + self.assertIn("full error", str(exc.exception)) + if __name__ == "__main__": unittest.main() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index e9c5301e28..b21191cfb3 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -29,7 +29,7 @@ from bson.binary import Binary from pymongo.mongo_client import MongoClient from pymongo.errors import (ConfigurationError, - NotMasterError, + NotPrimaryError, ServerSelectionTimeoutError) from pymongo.read_preferences import ReadPreference import gridfs @@ -539,7 +539,7 @@ def test_gridfs_secondary(self): fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') # This won't detect secondary, raises error - self.assertRaises(NotMasterError, fs.put, b'foo') + self.assertRaises(NotPrimaryError, fs.put, b'foo') def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to @@ -556,7 +556,7 @@ def test_gridfs_secondary_lazy(self): # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotMasterError, fs.put, 'data') + self.assertRaises(NotPrimaryError, fs.put, 'data') if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 98478bf7bf..f31949f03c 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -30,7 +30,7 @@ import gridfs from gridfs.errors import NoFile, CorruptGridFile from pymongo.errors import (ConfigurationError, - NotMasterError, + NotPrimaryError, ServerSelectionTimeoutError) from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference @@ -536,7 +536,7 @@ def test_gridfs_secondary(self): secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') # This won't detect secondary, raises error - self.assertRaises(NotMasterError, gfs.upload_from_stream, + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b'foo') def test_gridfs_secondary_lazy(self): @@ -556,7 +556,7 @@ def test_gridfs_secondary_lazy(self): # Connects, doesn't create index. self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") - self.assertRaises(NotMasterError, gfs.upload_from_stream, + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b'data') diff --git a/test/test_monitoring.py b/test/test_monitoring.py index bdf24e240d..464231b524 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -26,7 +26,7 @@ from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne from pymongo.command_cursor import CommandCursor from pymongo.errors import (AutoReconnect, - NotMasterError, + NotPrimaryError, OperationFailure) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -438,7 +438,7 @@ def test_not_master_error(self): error = None try: client.pymongo_test.test.find_one_and_delete({}) - except NotMasterError as exc: + except NotPrimaryError as exc: error = exc.errors results = self.listener.results started = results['started'][0] @@ -983,7 +983,7 @@ def test_bulk_write_command_error(self): }, } with self.fail_point(insert_command_error): - with self.assertRaises(NotMasterError): + with self.assertRaises(NotPrimaryError): coll.bulk_write([InsertOne({'_id': 1})]) failed = self.listener.results['failed'] self.assertEqual(1, len(failed)) diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index bcd2cbc4d8..0e8b285138 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -25,7 +25,7 @@ from pymongo import monitoring from pymongo.common import clean_node from pymongo.errors import (ConnectionFailure, - NotMasterError) + NotPrimaryError) from pymongo.ismaster import IsMaster from pymongo.monitor import Monitor from pymongo.server_description import ServerDescription @@ -333,12 +333,12 @@ def test_network_error_publishes_events(self): def test_not_master_error_publishes_events(self): self._test_app_error({'errorCode': 10107, 'closeConnection': False, 'errorLabels': ['RetryableWriteError']}, - NotMasterError) + NotPrimaryError) def test_shutdown_error_publishes_events(self): self._test_app_error({'errorCode': 91, 'closeConnection': False, 'errorLabels': ['RetryableWriteError']}, - NotMasterError) + NotPrimaryError) if __name__ == "__main__": diff --git a/test/unified_format.py b/test/unified_format.py index 3da9df7e8a..b4219f647c 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -40,7 +40,7 @@ from pymongo.collection import Collection from pymongo.database import Database from pymongo.errors import ( - BulkWriteError, ConnectionFailure, InvalidOperation, NotMasterError, + BulkWriteError, ConnectionFailure, InvalidOperation, NotPrimaryError, PyMongoError) from pymongo.monitoring import ( CommandFailedEvent, CommandListener, CommandStartedEvent, @@ -720,7 +720,7 @@ def process_error(self, exception, spec): if is_client_error: # Connection errors are considered client errors. if isinstance(exception, ConnectionFailure): - self.assertNotIsInstance(exception, NotMasterError) + self.assertNotIsInstance(exception, NotPrimaryError) else: self.assertNotIsInstance(exception, PyMongoError) From 07146ceba7303dbf6aca6ad6461598a8af9b5837 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Jun 2021 15:31:33 -0700 Subject: [PATCH 0373/2111] PYTHON-2761 Don't return a pinned connection to the pool multiple times (#645) --- pymongo/pool.py | 7 +++++-- test/test_load_balancer.py | 9 +++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index a8a645f893..14b66fa734 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -550,6 +550,7 @@ def __init__(self, sock, pool, address, id): # set to true to indicate that the session now owns the connection. self.pinned_txn = False self.pinned_cursor = False + self.active = False def pin_txn(self): self.pinned_txn = True @@ -1407,7 +1408,7 @@ def get_socket(self, all_credentials, handler=None): # still checked out. exc_type, exc_val, _ = sys.exc_info() handler.handle(exc_type, exc_val) - if not pinned: + if not pinned and sock_info.active: self.return_socket(sock_info) raise if sock_info.pinned_txn: @@ -1418,7 +1419,7 @@ def get_socket(self, all_credentials, handler=None): with self.lock: self.__pinned_sockets.add(sock_info) self.ncursors += 1 - else: + elif sock_info.active: self.return_socket(sock_info) def _raise_if_not_ready(self, emit_event): @@ -1531,6 +1532,7 @@ def _get_socket(self, all_credentials): self.address, ConnectionCheckOutFailedReason.CONN_ERROR) raise + sock_info.active = True return sock_info def return_socket(self, sock_info): @@ -1541,6 +1543,7 @@ def return_socket(self, sock_info): """ txn = sock_info.pinned_txn cursor = sock_info.pinned_cursor + sock_info.active = False sock_info.pinned_txn = False sock_info.pinned_cursor = False self.__pinned_sockets.discard(sock_info) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 90bde87e5f..a3b58a2ec2 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -32,6 +32,15 @@ class TestLB(IntegrationTest): + + def test_connections_are_only_returned_once(self): + pool = get_pool(self.client) + nconns = len(pool.sockets) + self.db.test.find_one({}) + self.assertEqual(len(pool.sockets), nconns) + self.db.test.aggregate([{'$limit': 1}]) + self.assertEqual(len(pool.sockets), nconns) + @client_context.require_load_balancer def test_unpin_committed_transaction(self): pool = get_pool(self.client) From 6bebaf90153f244f7fe215d833321a172f860006 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Jun 2021 15:35:50 -0700 Subject: [PATCH 0374/2111] PYTHON-2757 PYTHON-2730 Resync command monitoring killCursors tests (#643) --- test/command_monitoring/legacy/find.json | 4 ++-- .../valid-pass/poc-command-monitoring.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/command_monitoring/legacy/find.json b/test/command_monitoring/legacy/find.json index 608572ed42..55b185cc58 100644 --- a/test/command_monitoring/legacy/find.json +++ b/test/command_monitoring/legacy/find.json @@ -413,9 +413,9 @@ ] }, { - "description": "A successful find event with a getmore and the server kills the cursor", + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", "ignore_if_server_version_less_than": "3.1", - "ignore_if_server_version_greater_than": "4.9.0", + "ignore_if_server_version_greater_than": "4.4", "ignore_if_topology_type": [ "sharded" ], diff --git a/test/unified-test-format/valid-pass/poc-command-monitoring.json b/test/unified-test-format/valid-pass/poc-command-monitoring.json index 7484575066..fe0a5ae991 100644 --- a/test/unified-test-format/valid-pass/poc-command-monitoring.json +++ b/test/unified-test-format/valid-pass/poc-command-monitoring.json @@ -57,11 +57,11 @@ ], "tests": [ { - "description": "A successful find event with a getmore and the server kills the cursor", + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", "runOnRequirements": [ { "minServerVersion": "3.1", - "maxServerVersion": "4.9.0", + "maxServerVersion": "4.4.99", "topologies": [ "single", "replicaset" From a32259037f2e2fd5e9db96974d7cf70ece81520f Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 22 Jun 2021 16:25:17 -0700 Subject: [PATCH 0375/2111] PYTHON-2724 Add FAQ to PyMongo documentation pointing users to PyMongoArrow (#651) --- doc/faq.rst | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/faq.rst b/doc/faq.rst index c6e959b61b..5454db448c 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -45,6 +45,17 @@ multithreaded contexts with ``fork()``, see http://bugs.python.org/issue6721. .. _connection-pooling: +Can PyMongo help me load the results of my query as a Pandas ``DataFrame``? +--------------------------------------------------------------------------- + +While PyMongo itself does not provide any APIs for working with +numerical or columnar data, +`PyMongoArrow `_ +is a companion library to PyMongo that makes it easy to load MongoDB query result sets as +`Pandas DataFrames `_, +`NumPy ndarrays `_, or +`Apache Arrow Tables `_. + How does connection pooling work in PyMongo? -------------------------------------------- @@ -458,8 +469,8 @@ No. PyMongo creates Python threads which `PythonAnywhere `_ does not support. For more information see `PYTHON-1495 `_. -How can I use something like Python's :mod:`json` module to encode my documents to JSON? ----------------------------------------------------------------------------------------- +How can I use something like Python's ``json`` module to encode my documents to JSON? +------------------------------------------------------------------------------------- :mod:`~bson.json_util` is PyMongo's built in, flexible tool for using Python's :mod:`json` module with BSON documents and `MongoDB Extended JSON `_. The From 3ef01179a24d1df22ceb37b85e509aad88db9cf9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Jun 2021 17:25:05 -0700 Subject: [PATCH 0376/2111] PYTHON-2764 Fix unified test coerce_result on unack writes (#652) --- test/unified_format.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/unified_format.py b/test/unified_format.py index b4219f647c..1292792d16 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -604,6 +604,8 @@ def match_event(self, event_type, expectation, actual): def coerce_result(opname, result): """Convert a pymongo result into the spec's result format.""" + if hasattr(result, 'acknowledged') and not result.acknowledged: + return {'acknowledged': False} if opname == 'bulkWrite': return parse_bulk_write_result(result) if opname == 'insertOne': From 6bc5e088aff8bbdde4ad2c9b4848fa8431516a62 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Jun 2021 17:29:26 -0700 Subject: [PATCH 0377/2111] PYTHON-1272 Fix deadlock when garbage collecting pinned cursors and sessions (#642) It's not safe to return the pinned connection to the pool from within Cursor.del because the Pool's lock may be held by a python thread while the cyclic garbage collector runs. Instead we send the cursor cleanup request to the client's background thread. The thread will send killCursors on the pinned socket and then return the socket to the pool. Also fixed a similar bug when garbage collecting a pinned session. --- pymongo/client_session.py | 12 +++- pymongo/command_cursor.py | 32 ++++++----- pymongo/cursor.py | 59 ++++++++------------ pymongo/mongo_client.py | 61 +++++++++++++++++--- test/__init__.py | 5 +- test/test_gridfs.py | 9 +++ test/test_load_balancer.py | 104 ++++++++++++++++++++++++++++++++++- test/test_retryable_reads.py | 4 +- 8 files changed, 219 insertions(+), 67 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 8e5b2a597a..21f2f597f3 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -289,7 +289,7 @@ class _TxnState(object): class _Transaction(object): """Internal class to hold transaction information in a ClientSession.""" - def __init__(self, opts): + def __init__(self, opts, client): self.opts = opts self.state = _TxnState.NONE self.sharded = False @@ -297,6 +297,7 @@ def __init__(self, opts): self.sock_mgr = None self.recovery_token = None self.attempt = 0 + self.client = client def active(self): return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) @@ -330,6 +331,13 @@ def reset(self): self.recovery_token = None self.attempt = 0 + def __del__(self): + if self.sock_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.sock_mgr) + self.sock_mgr = None + def _reraise_with_unknown_commit(exc): """Re-raise an exception with the UnknownTransactionCommitResult label.""" @@ -382,7 +390,7 @@ def __init__(self, client, server_session, options, implicit): self._operation_time = None # Is this an implicitly created session? self._implicit = implicit - self._transaction = _Transaction(None) + self._transaction = _Transaction(None, client) def end_session(self): """Finish this session. If a transaction has started, abort it. diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index aabec3999f..a317bd55b2 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -64,8 +64,7 @@ def __init__(self, collection, cursor_info, address, retrieved=0, raise TypeError("max_await_time_ms must be an integer or None") def __del__(self): - if self.__id and not self.__killed: - self.__die() + self.__die() def __die(self, synchronous=False): """Closes this cursor. @@ -73,20 +72,23 @@ def __die(self, synchronous=False): already_killed = self.__killed self.__killed = True if self.__id and not already_killed: + cursor_id = self.__id address = _CursorAddress( self.__address, self.__collection.full_name) - if synchronous: - self.__collection.database.client._close_cursor_now( - self.__id, address, session=self.__session, - sock_mgr=self.__sock_mgr) - else: - # The cursor will be closed later in a different session. - self.__collection.database.client._close_cursor( - self.__id, address) - if self.__sock_mgr: - self.__sock_mgr.close() - self.__sock_mgr = None - self.__end_session(synchronous) + else: + # Skip killCursors. + cursor_id = 0 + address = None + self.__collection.database.client._cleanup_cursor( + synchronous, + cursor_id, + address, + self.__sock_mgr, + self.__session, + self.__explicit_session) + if not self.__explicit_session: + self.__session = None + self.__sock_mgr = None def __end_session(self, synchronous): if self.__session and not self.__explicit_session: @@ -185,7 +187,7 @@ def __send_message(self, operation): self.__id = response.data.cursor_id if self.__id == 0: - self.__die(True) + self.close() self.__data = deque(documents) def _unpack_response(self, response, cursor_id, codec_options, diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 22b0de20d6..82ec21ad30 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -34,7 +34,6 @@ _RawBatchGetMore, _Query, _RawBatchQuery) -from pymongo.monitoring import ConnectionClosedReason from pymongo.response import PinnedResponse # These errors mean that the server has already killed the cursor so there is @@ -106,28 +105,23 @@ class CursorType(object): """ -# This has to be an old style class due to -# http://bugs.jython.org/issue1057 -class _SocketManager: +class _SocketManager(object): """Used with exhaust cursors to ensure the socket is returned. """ def __init__(self, sock, more_to_come): self.sock = sock self.more_to_come = more_to_come - self.__closed = False + self.closed = False self.lock = threading.Lock() - def __del__(self): - self.close() - def update_exhaust(self, more_to_come): self.more_to_come = more_to_come def close(self): """Return this instance's socket to the connection pool. """ - if not self.__closed: - self.__closed = True + if not self.closed: + self.closed = True self.sock.unpin() self.sock = None @@ -156,6 +150,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. + self.__collection = collection self.__id = None self.__exhaust = False self.__sock_mgr = None @@ -208,7 +203,6 @@ def __init__(self, collection, filter=None, projection=None, skip=0, projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") - self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip @@ -293,6 +287,7 @@ def rewind(self): be sent to the server, even if the resultant data has already been retrieved by this cursor. """ + self.close() self.__data = deque() self.__id = None self.__address = None @@ -349,29 +344,23 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: - if self.__exhaust and self.__sock_mgr: - # If this is an exhaust cursor and we haven't completely - # exhausted the result set we *must* close the socket - # to stop the server from sending more data. - self.__sock_mgr.sock.close_socket( - ConnectionClosedReason.ERROR) - else: - address = _CursorAddress( - self.__address, self.__collection.full_name) - if synchronous: - self.__collection.database.client._close_cursor_now( - self.__id, address, session=self.__session, - sock_mgr=self.__sock_mgr) - else: - # The cursor will be closed later in a different session. - self.__collection.database.client._close_cursor( - self.__id, address) - if self.__sock_mgr: - self.__sock_mgr.close() - self.__sock_mgr = None - if self.__session and not self.__explicit_session: - self.__session._end_session(lock=synchronous) + cursor_id = self.__id + address = _CursorAddress( + self.__address, self.__collection.full_name) + else: + # Skip killCursors. + cursor_id = 0 + address = None + self.__collection.database.client._cleanup_cursor( + synchronous, + cursor_id, + address, + self.__sock_mgr, + self.__session, + self.__explicit_session) + if not self.__explicit_session: self.__session = None + self.__sock_mgr = None def close(self): """Explicitly close / kill this cursor. @@ -1094,10 +1083,10 @@ def __send_message(self, operation): if self.__id == 0: # Don't wait for garbage collection to call __del__, return the # socket and the session to the pool now. - self.__die() + self.close() if self.__limit and self.__id and self.__limit <= self.__retrieved: - self.__die() + self.close() def _unpack_response(self, response, cursor_id, codec_options, user_fields=None, legacy_response=False): diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index af1d0b5cd7..2d3d83e259 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1483,14 +1483,46 @@ def __getitem__(self, name): """ return database.Database(self, name) - def _close_cursor(self, cursor_id, address): - """Send a kill cursors message with the given id. + def _cleanup_cursor(self, locks_allowed, cursor_id, address, sock_mgr, + session, explicit_session): + """Cleanup a cursor from cursor.close() or __del__. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. - What closing the cursor actually means depends on this client's - cursor manager. If there is none, the cursor is closed asynchronously - on a background thread. + :Parameters: + - `locks_allowed`: True if we are allowed to acquire locks. + - `cursor_id`: The cursor id which may be 0. + - `address`: The _CursorAddress. + - `sock_mgr`: The _SocketManager for the pinned connection or None. + - `session`: The cursor's session. + - `explicit_session`: True if the session was passed explicitly. """ - self.__kill_cursors_queue.append((address, [cursor_id])) + if locks_allowed: + if cursor_id: + if sock_mgr and sock_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + sock_mgr.sock.close_socket( + ConnectionClosedReason.ERROR) + else: + self._close_cursor_now( + cursor_id, address, session=session, + sock_mgr=sock_mgr) + if sock_mgr: + sock_mgr.close() + else: + # The cursor will be closed later in a different session. + if cursor_id or sock_mgr: + self._close_cursor_soon(cursor_id, address, sock_mgr) + if session and not explicit_session: + session._end_session(lock=locks_allowed) + + def _close_cursor_soon(self, cursor_id, address, sock_mgr=None): + """Request that a cursor and/or connection be cleaned up soon.""" + self.__kill_cursors_queue.append((address, cursor_id, sock_mgr)) def _close_cursor_now(self, cursor_id, address=None, session=None, sock_mgr=None): @@ -1512,7 +1544,7 @@ def _close_cursor_now(self, cursor_id, address=None, session=None, [cursor_id], address, self._get_topology(), session) except PyMongoError: # Make another attempt to kill the cursor later. - self.__kill_cursors_queue.append((address, [cursor_id])) + self._close_cursor_soon(cursor_id, address) def _kill_cursors(self, cursor_ids, address, topology, session): """Send a kill cursors message with the given ids.""" @@ -1577,15 +1609,26 @@ def _kill_cursor_impl(self, cursor_ids, address, session, sock_info): def _process_kill_cursors(self): """Process any pending kill cursors requests.""" address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] # Other threads or the GC may append to the queue concurrently. while True: try: - address, cursor_ids = self.__kill_cursors_queue.pop() + address, cursor_id, sock_mgr = self.__kill_cursors_queue.pop() except IndexError: break - address_to_cursor_ids[address].extend(cursor_ids) + if sock_mgr: + pinned_cursors.append((address, cursor_id, sock_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, sock_mgr in pinned_cursors: + try: + self._cleanup_cursor(True, cursor_id, address, sock_mgr, + None, False) + except Exception: + helpers._handle_exception() # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: diff --git a/test/__init__.py b/test/__init__.py index 1e76e58b88..2ee697ef7a 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -185,8 +185,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): def __del__(self): if self._enabled: - print( - '\nERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, ' + msg = ( + 'ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, ' 'MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, ' 'EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s' % ( common.HEARTBEAT_FREQUENCY, @@ -195,6 +195,7 @@ def __del__(self): common.EVENTS_QUEUE_FREQUENCY, self._stack)) self.disable() + raise Exception(msg) def _all_users(db): diff --git a/test/test_gridfs.py b/test/test_gridfs.py index b21191cfb3..2679e7b823 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -34,6 +34,7 @@ from pymongo.read_preferences import ReadPreference import gridfs from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file import GridOutCursor from test import (client_context, unittest, IntegrationTest) @@ -445,6 +446,14 @@ def test_gridfs_find(self): cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__. + cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ + with self.assertRaises(TypeError): + cursor.__init__(self.db.fs.files, {}, {"_id": True}) + cursor.__del__() # no error + def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index a3b58a2ec2..9090192c1e 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -14,13 +14,15 @@ """Test the Load Balancer unified spec tests.""" +import gc import os import sys +import threading sys.path[0:0] = [""] from test import unittest, IntegrationTest, client_context -from test.utils import get_pool +from test.utils import get_pool, wait_until, ExceptionCatchingThread from test.unified_format import generate_test_classes # Location of JSON test specifications. @@ -56,6 +58,106 @@ def test_client_can_be_reopened(self): self.client.close() self.db.test.find_one({}) + @client_context.require_failCommand_fail_point + def test_cursor_gc(self): + def create_resource(coll): + cursor = coll.find({}, batch_size=3) + next(cursor) + return cursor + self._test_no_gc_deadlock(create_resource) + + @client_context.require_failCommand_fail_point + def test_command_cursor_gc(self): + def create_resource(coll): + cursor = coll.aggregate([], batchSize=3) + next(cursor) + return cursor + self._test_no_gc_deadlock(create_resource) + + def _test_no_gc_deadlock(self, create_resource): + pool = get_pool(self.client) + self.assertEqual(pool.active_sockets, 0) + self.db.test.insert_many([{} for _ in range(10)]) + # Cause the initial find attempt to fail to induce a reference cycle. + args = { + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find", "aggregate" + ], + "errorCode": 91, + "closeConnection": True, + } + } + with self.fail_point(args): + resource = create_resource(self.db.test) + if client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + thread = PoolLocker(pool) + thread.start() + self.assertTrue(thread.locked.wait(5), 'timed out') + # Garbage collect the resource while the pool is locked to ensure we + # don't deadlock. + del resource + gc.collect() + thread.unlock.set() + thread.join(5) + self.assertFalse(thread.is_alive()) + self.assertIsNone(thread.exc) + + wait_until(lambda: pool.active_sockets == 0, 'return socket') + # Run another operation to ensure the socket still works. + self.db.test.delete_many({}) + + @client_context.require_transactions + def test_session_gc(self): + pool = get_pool(self.client) + self.assertEqual(pool.active_sockets, 0) + session = self.client.start_session() + session.start_transaction() + self.client.test_session_gc.test.find_one({}, session=session) + if client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + thread = PoolLocker(pool) + thread.start() + self.assertTrue(thread.locked.wait(5), 'timed out') + # Garbage collect the session while the pool is locked to ensure we + # don't deadlock. + del session + # On PyPy it can take a few rounds to collect the session. + for _ in range(3): + gc.collect() + thread.unlock.set() + thread.join(5) + self.assertFalse(thread.is_alive()) + self.assertIsNone(thread.exc) + + wait_until(lambda: pool.active_sockets == 0, 'return socket') + # Run another operation to ensure the socket still works. + self.db.test.delete_many({}) + + +class PoolLocker(ExceptionCatchingThread): + def __init__(self, pool): + super(PoolLocker, self).__init__(target=self.lock_pool) + self.pool = pool + self.daemon = True + self.locked = threading.Event() + self.unlock = threading.Event() + + def lock_pool(self): + with self.pool.lock: + self.locked.set() + # Wait for the unlock flag. + unlock_pool = self.unlock.wait(10) + if not unlock_pool: + raise Exception('timed out waiting for unlock signal:' + ' deadlock?') + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index cce6f0a451..be31799d33 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -53,13 +53,11 @@ def test_uri(self): class TestSpec(SpecRunner): @classmethod - @client_context.require_version_min(4, 0) + @client_context.require_failCommand_fail_point # TODO: remove this once PYTHON-1948 is done. @client_context.require_no_mmap def setUpClass(cls): super(TestSpec, cls).setUpClass() - if client_context.is_mongos and client_context.version[:2] <= (4, 0): - raise unittest.SkipTest("4.0 mongos does not support failCommand") def maybe_skip_scenario(self, test): super(TestSpec, self).maybe_skip_scenario(test) From 00f7fe8ce3647486b2110f9cf9369c2827bd1606 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 23 Jun 2021 11:05:57 -0700 Subject: [PATCH 0378/2111] PYTHON-2748 Fix error in UUID example (#650) --- doc/examples/uuid.rst | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst index 9b6762dc88..4d55fbeae2 100644 --- a/doc/examples/uuid.rst +++ b/doc/examples/uuid.rst @@ -255,19 +255,27 @@ Applications can set the UUID representation in one of the following ways: * - ``unspecified`` - :ref:`unspecified-representation-details` -#. Using the ``uuid_representation`` kwarg option, e.g.:: +#. At the ``MongoClient`` level using the ``uuidRepresentation`` kwarg + option, e.g.:: from bson.binary import UuidRepresentation - client = MongoClient(uuid_representation=UuidRepresentation.PYTHON_LEGACY) + client = MongoClient(uuidRepresentation=UuidRepresentation.PYTHON_LEGACY) -#. By supplying a suitable :class:`~bson.codec_options.CodecOptions` - instance, e.g.:: +#. At the ``Database`` or ``Collection`` level by supplying a suitable + :class:`~bson.codec_options.CodecOptions` instance, e.g.:: from bson.codec_options import CodecOptions csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) + java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + + # Get database/collection from client with csharpLegacy UUID representation csharp_database = client.get_database('csharp_db', codec_options=csharp_opts) csharp_collection = client.testdb.get_collection('csharp_coll', codec_options=csharp_opts) + # Get database/collection from existing database/collection with javaLegacy UUID representation + java_database = csharp_database.with_options(codec_options=java_opts) + java_collection = csharp_collection.with_options(codec_options=java_opts) + Supported UUID Representations ------------------------------ From f11be6cfa6ae8f9b6be5197997f92fa7f9ee82d5 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 8 Jun 2021 10:54:10 -0700 Subject: [PATCH 0379/2111] PYTHON-2586 Changes to support Python 3.10 --- .evergreen/config.yml | 23 +++++++++++++++++++++++ setup.py | 29 ++++++++++++----------------- test/utils.py | 7 +++---- 3 files changed, 38 insertions(+), 21 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 053dc0cdec..6ab459e428 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1846,6 +1846,10 @@ axes: display_name: "Python 3.9" variables: PYTHON_BINARY: "/opt/python/3.9/bin/python3" + - id: "3.10" + display_name: "Python 3.10" + variables: + PYTHON_BINARY: "/opt/python/3.10/bin/python3" - id: "pypy3.6" display_name: "PyPy 3.6" variables: @@ -1878,6 +1882,10 @@ axes: display_name: "Python 3.9" variables: PYTHON_BINARY: "C:/python/Python39/python.exe" + - id: "3.10" + display_name: "Python 3.10" + variables: + PYTHON_BINARY: "C:/python/Python310/python.exe" - id: python-version-windows-32 display_name: "Python" @@ -1898,6 +1906,10 @@ axes: display_name: "32-bit Python 3.9" variables: PYTHON_BINARY: "C:/python/32/Python39/python.exe" + - id: "3.10" + display_name: "32-bit Python 3.10" + variables: + PYTHON_BINARY: "C:/python/32/Python310/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version @@ -2144,6 +2156,17 @@ buildvariants: display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" tasks: *all-server-versions +- matrix_name: "tests-python-version-ubuntu20-ssl" + matrix_spec: + platform: ubuntu-20.04 + python-version: ["3.10"] + auth-ssl: "*" + display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" + tasks: + - ".latest" + - ".5.0" + - ".4.4" + - matrix_name: "tests-pyopenssl" matrix_spec: platform: awslinux diff --git a/setup.py b/setup.py index f09e2c6f04..88ec93a007 100755 --- a/setup.py +++ b/setup.py @@ -17,11 +17,15 @@ from setuptools import setup, __version__ as _setuptools_version -from distutils.cmd import Command -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError, DistutilsOptionError -from distutils.errors import DistutilsPlatformError, DistutilsExecError -from distutils.core import Extension + +if sys.version_info[:2] < (3, 10): + from distutils.cmd import Command + from distutils.command.build_ext import build_ext + from distutils.core import Extension +else: + from setuptools import Command + from setuptools.command.build_ext import build_ext + from setuptools.extension import Extension _HAVE_SPHINX = True try: @@ -82,7 +86,7 @@ def finalize_options(self): if self.test_suite is None and self.test_module is None: self.test_module = 'test' elif self.test_module is not None and self.test_suite is not None: - raise DistutilsOptionError( + raise Exception( "You may specify a module or suite, but not both" ) @@ -176,15 +180,6 @@ def run(self): " %s/\n" % (mode, path)) -if sys.platform == 'win32': - # distutils.msvc9compiler can raise an IOError when failing to - # find the compiler - build_errors = (CCompilerError, DistutilsExecError, - DistutilsPlatformError, IOError) -else: - build_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) - - class custom_build_ext(build_ext): """Allow C extension building to fail. @@ -237,7 +232,7 @@ class custom_build_ext(build_ext): def run(self): try: build_ext.run(self) - except DistutilsPlatformError: + except Exception: e = sys.exc_info()[1] sys.stdout.write('%s\n' % str(e)) warnings.warn(self.warning_message % ("Extension modules", @@ -249,7 +244,7 @@ def build_extension(self, ext): name = ext.name try: build_ext.build_extension(self, ext) - except build_errors: + except Exception: e = sys.exc_info()[1] sys.stdout.write('%s\n' % str(e)) warnings.warn(self.warning_message % ("The %s extension " diff --git a/test/utils.py b/test/utils.py index 02a0a58838..ae75d76472 100644 --- a/test/utils.py +++ b/test/utils.py @@ -15,7 +15,6 @@ """Utilities for testing pymongo """ -import collections import contextlib import copy import functools @@ -27,7 +26,7 @@ import time import warnings -from collections import defaultdict +from collections import abc, defaultdict from functools import partial from bson import json_util @@ -300,11 +299,11 @@ class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" def __init__(self, data): def convert(v): - if isinstance(v, collections.Mapping): + if isinstance(v, abc.Mapping): return ScenarioDict(v) if isinstance(v, (str, bytes)): return v - if isinstance(v, collections.Sequence): + if isinstance(v, abc.Sequence): return [convert(item) for item in v] return v From ef6b06ce1f719b8b2c2d78f635800fdfaf5b969d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 23 Jun 2021 12:31:20 -0700 Subject: [PATCH 0380/2111] PYTHON-2765 Fix test_exhaust failure due to OP_MSG and __del__ changes (#653) --- test/test_collection.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/test/test_collection.py b/test/test_collection.py index b9137a2cb2..896684330d 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1849,33 +1849,43 @@ def test_exhaust(self): self.db.test.insert_many([{'i': i} for i in range(150)]) client = rs_or_single_client(maxPoolSize=1) - socks = get_pool(client).sockets + self.addCleanup(client.close) + pool = get_pool(client) # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) next(cur) - self.assertEqual(0, len(socks)) + self.assertEqual(0, len(pool.sockets)) for _ in cur: pass - self.assertEqual(1, len(socks)) + self.assertEqual(1, len(pool.sockets)) # Same as previous but don't call next() for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): pass - self.assertEqual(1, len(socks)) - - # If the Cursor instance is discarded before being - # completely iterated we have to close and - # discard the socket. - cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) - next(cur) - self.assertEqual(0, len(socks)) + self.assertEqual(1, len(pool.sockets)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, + batch_size=2) + if client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + next(cur) + else: + next(cur) + self.assertEqual(0, len(pool.sockets)) if sys.platform.startswith('java') or 'PyPy' in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cur = None + # Wait until the background thread returns the socket. + wait_until(lambda: pool.active_sockets == 0, 'return socket') # The socket should be discarded. - self.assertEqual(0, len(socks)) + self.assertEqual(0, len(pool.sockets)) def test_distinct(self): self.db.drop_collection("test") From a7921604f1c78cdd76d05410f862989128697e9c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 24 Jun 2021 12:32:52 -0700 Subject: [PATCH 0381/2111] PYTHON-2768 Add SDAM and server selection spec tests for load balancers (#655) --- .../load-balanced/discover_load_balancer.json | 28 ++++++ test/sdam_monitoring/load_balancer.json | 93 +++++++++++++++++++ .../LoadBalanced/read/Nearest.json | 35 +++++++ .../LoadBalanced/read/Primary.json | 30 ++++++ .../LoadBalanced/read/PrimaryPreferred.json | 35 +++++++ .../LoadBalanced/read/Secondary.json | 35 +++++++ .../LoadBalanced/read/SecondaryPreferred.json | 35 +++++++ .../LoadBalanced/write/Nearest.json | 35 +++++++ .../LoadBalanced/write/Primary.json | 30 ++++++ .../LoadBalanced/write/PrimaryPreferred.json | 35 +++++++ .../LoadBalanced/write/Secondary.json | 35 +++++++ .../write/SecondaryPreferred.json | 35 +++++++ test/test_discovery_and_monitoring.py | 6 +- test/test_sdam_monitoring_spec.py | 2 +- test/utils_selection_tests.py | 7 ++ 15 files changed, 474 insertions(+), 2 deletions(-) create mode 100644 test/discovery_and_monitoring/load-balanced/discover_load_balancer.json create mode 100644 test/sdam_monitoring/load_balancer.json create mode 100644 test/server_selection/server_selection/LoadBalanced/read/Nearest.json create mode 100644 test/server_selection/server_selection/LoadBalanced/read/Primary.json create mode 100644 test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json create mode 100644 test/server_selection/server_selection/LoadBalanced/read/Secondary.json create mode 100644 test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json create mode 100644 test/server_selection/server_selection/LoadBalanced/write/Nearest.json create mode 100644 test/server_selection/server_selection/LoadBalanced/write/Primary.json create mode 100644 test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json create mode 100644 test/server_selection/server_selection/LoadBalanced/write/Secondary.json create mode 100644 test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json diff --git a/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json b/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json new file mode 100644 index 0000000000..d2e34478e6 --- /dev/null +++ b/test/discovery_and_monitoring/load-balanced/discover_load_balancer.json @@ -0,0 +1,28 @@ +{ + "description": "Load balancer can be discovered and only has the address property set", + "uri": "mongodb://a/?loadBalanced=true", + "phases": [ + { + "outcome": { + "servers": { + "a:27017": { + "type": "LoadBalancer", + "setName": null, + "setVersion": null, + "electionId": null, + "logicalSessionTimeoutMinutes": null, + "minWireVersion": null, + "maxWireVersion": null, + "topologyVersion": null + } + }, + "topologyType": "LoadBalanced", + "setName": null, + "logicalSessionTimeoutMinutes": null, + "maxSetVersion": null, + "maxElectionId": null, + "compatible": true + } + } + ] +} diff --git a/test/sdam_monitoring/load_balancer.json b/test/sdam_monitoring/load_balancer.json new file mode 100644 index 0000000000..09b1537193 --- /dev/null +++ b/test/sdam_monitoring/load_balancer.json @@ -0,0 +1,93 @@ +{ + "description": "Monitoring a load balancer", + "uri": "mongodb://a:27017/?loadBalanced=true", + "phases": [ + { + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "LoadBalancer" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "LoadBalanced", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "LoadBalancer" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Nearest.json b/test/server_selection/server_selection/LoadBalanced/read/Nearest.json new file mode 100644 index 0000000000..76fa336d55 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Nearest.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Primary.json b/test/server_selection/server_selection/LoadBalanced/read/Primary.json new file mode 100644 index 0000000000..5a4a0aa93a --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Primary.json @@ -0,0 +1,30 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json new file mode 100644 index 0000000000..9aa151cd06 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/PrimaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/Secondary.json b/test/server_selection/server_selection/LoadBalanced/read/Secondary.json new file mode 100644 index 0000000000..c49e30370b --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/Secondary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json new file mode 100644 index 0000000000..18e46877b4 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/read/SecondaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Nearest.json b/test/server_selection/server_selection/LoadBalanced/write/Nearest.json new file mode 100644 index 0000000000..e52e343332 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Nearest.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Primary.json b/test/server_selection/server_selection/LoadBalanced/write/Primary.json new file mode 100644 index 0000000000..9061b25208 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Primary.json @@ -0,0 +1,30 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Primary" + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json new file mode 100644 index 0000000000..5c94dc410d --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/PrimaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "PrimaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/Secondary.json b/test/server_selection/server_selection/LoadBalanced/write/Secondary.json new file mode 100644 index 0000000000..5493867e12 --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/Secondary.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Secondary", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json b/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json new file mode 100644 index 0000000000..f7905f1d5f --- /dev/null +++ b/test/server_selection/server_selection/LoadBalanced/write/SecondaryPreferred.json @@ -0,0 +1,35 @@ +{ + "topology_description": { + "type": "LoadBalanced", + "servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "SecondaryPreferred", + "tag_sets": [ + { + "data_center": "nyc" + } + ] + }, + "suitable_servers": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ], + "in_latency_window": [ + { + "address": "g:27017", + "avg_rtt_ms": 0, + "type": "LoadBalancer" + } + ] +} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index fc5a3ae0b5..2a440cdf38 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -61,16 +61,20 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): parsed_uri = parse_uri(uri) replica_set_name = None direct_connection = None + load_balanced = None if 'replicaset' in parsed_uri['options']: replica_set_name = parsed_uri['options']['replicaset'] if 'directConnection' in parsed_uri['options']: direct_connection = parsed_uri['options']['directConnection'] + if 'loadBalanced' in parsed_uri['options']: + load_balanced = parsed_uri['options']['loadBalanced'] topology_settings = TopologySettings( parsed_uri['nodelist'], replica_set_name=replica_set_name, monitor_class=monitor_class, - direct_connection=direct_connection) + direct_connection=direct_connection, + load_balanced=load_balanced) c = Topology(topology_settings) c.open() diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 0e8b285138..ddcb7e1416 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -194,7 +194,7 @@ def _run(self): try: for phase in scenario_def['phases']: - for (source, response) in phase['responses']: + for (source, response) in phase.get('responses', []): source_address = clean_node(source) topology.on_change(ServerDescription( address=source_address, diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 2f1d99b0f7..57a2673033 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -124,6 +124,10 @@ def create_topology(scenario_def, **kwargs): seeds, hosts = get_addresses( scenario_def['topology_description']['servers']) + topology_type = get_topology_type_name(scenario_def) + if topology_type == 'LoadBalanced': + kwargs.setdefault('load_balanced', True) + settings = get_topology_settings_dict( heartbeat_frequency=frequency, seeds=seeds, @@ -141,6 +145,9 @@ def create_topology(scenario_def, **kwargs): server_description = make_server_description(server, hosts) topology.on_change(server_description) + if topology_type == 'LoadBalanced': + assert topology.description.topology_type_name == 'LoadBalanced' + return topology From 14160aed043989bf495778453b74eeb2a3d51822 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 25 Jun 2021 16:12:12 -0700 Subject: [PATCH 0382/2111] PYTHON-2767 Support snapshot reads on secondaries (#656) Add the MongoClient.start_session snapshot option. --- pymongo/client_session.py | 66 +- pymongo/message.py | 8 +- pymongo/mongo_client.py | 8 +- pymongo/network.py | 6 +- test/load_balancer/test_sessions_unified.py | 23 + .../{ => legacy}/dirty-session-errors.json | 2 +- .../sessions/{ => legacy}/server-support.json | 0 ...t-sessions-not-supported-server-error.json | 105 ++ test/sessions/unified/snapshot-sessions.json | 939 ++++++++++++++++++ test/test_session.py | 22 +- test/test_sessions_unified.py | 33 + test/unified_format.py | 13 +- 12 files changed, 1193 insertions(+), 32 deletions(-) create mode 100644 test/load_balancer/test_sessions_unified.py rename test/sessions/{ => legacy}/dirty-session-errors.json (99%) rename test/sessions/{ => legacy}/server-support.json (100%) create mode 100644 test/sessions/unified/snapshot-sessions-not-supported-server-error.json create mode 100644 test/sessions/unified/snapshot-sessions.json create mode 100644 test/test_sessions_unified.py diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 21f2f597f3..e93ac93eba 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -126,14 +126,29 @@ class SessionOptions(object): """Options for a new :class:`ClientSession`. :Parameters: - - `causal_consistency` (optional): If True (the default), read - operations are causally ordered within the session. + - `causal_consistency` (optional): If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. - `default_transaction_options` (optional): The default TransactionOptions to use for transactions started on this session. + - `snapshot` (optional): If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. """ def __init__(self, - causal_consistency=True, - default_transaction_options=None): + causal_consistency=None, + default_transaction_options=None, + snapshot=False): + if snapshot: + if causal_consistency: + raise ConfigurationError('snapshot reads do not support ' + 'causal_consistency=True') + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True self._causal_consistency = causal_consistency if default_transaction_options is not None: if not isinstance(default_transaction_options, TransactionOptions): @@ -142,6 +157,7 @@ def __init__(self, "pymongo.client_session.TransactionOptions, not: %r" % (default_transaction_options,)) self._default_transaction_options = default_transaction_options + self._snapshot = snapshot @property def causal_consistency(self): @@ -157,6 +173,14 @@ def default_transaction_options(self): """ return self._default_transaction_options + @property + def snapshot(self): + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + class TransactionOptions(object): """Options for :meth:`ClientSession.start_transaction`. @@ -388,6 +412,7 @@ def __init__(self, client, server_session, options, implicit): self._options = options self._cluster_time = None self._operation_time = None + self._snapshot_time = None # Is this an implicitly created session? self._implicit = implicit self._transaction = _Transaction(None, client) @@ -603,6 +628,10 @@ def start_transaction(self, read_concern=None, write_concern=None, """ self._check_ended() + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in " + "snapshot sessions") + if self.in_transaction: raise InvalidOperation("Transaction already in progress") @@ -781,6 +810,12 @@ def _process_response(self, reply): """Process a response to a command that was run with this session.""" self._advance_cluster_time(reply.get('$clusterTime')) self._advance_operation_time(reply.get('operationTime')) + if self._options.snapshot and self._snapshot_time is None: + if 'cursor' in reply: + ct = reply['cursor'].get('atClusterTime') + else: + ct = reply.get('atClusterTime') + self._snapshot_time = ct if self.in_transaction and self._transaction.sharded: recovery_token = reply.get('recoveryToken') if recovery_token: @@ -854,15 +889,9 @@ def _apply_to(self, command, is_retryable, read_preference): if self._transaction.opts.read_concern: rc = self._transaction.opts.read_concern.document - else: - rc = {} - - if (self.options.causal_consistency - and self.operation_time is not None): - rc['afterClusterTime'] = self.operation_time - - if rc: - command['readConcern'] = rc + if rc: + command['readConcern'] = rc + self._update_read_concern(command) command['txnNumber'] = self._server_session.transaction_id command['autocommit'] = False @@ -871,6 +900,17 @@ def _start_retryable_write(self): self._check_ended() self._server_session.inc_transaction_id() + def _update_read_concern(self, cmd): + if (self.options.causal_consistency + and self.operation_time is not None): + cmd.setdefault('readConcern', {})[ + 'afterClusterTime'] = self.operation_time + if self.options.snapshot: + rc = cmd.setdefault('readConcern', {}) + rc['level'] = 'snapshot' + if self._snapshot_time is not None: + rc['atClusterTime'] = self._snapshot_time + class _ServerSession(object): def __init__(self, generation): diff --git a/pymongo/message.py b/pymongo/message.py index 15c564475f..fb243695e2 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -315,12 +315,8 @@ def as_command(self, sock_info): if session: session._apply_to(cmd, False, self.read_preference) # Explain does not support readConcern. - if (not explain and session.options.causal_consistency - and session.operation_time is not None - and not session.in_transaction): - cmd.setdefault( - 'readConcern', {})[ - 'afterClusterTime'] = session.operation_time + if not explain and not session.in_transaction: + session._update_read_concern(cmd) sock_info.send_cluster_time(cmd, session, self.client) # Support auto encryption client = self.client diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 2d3d83e259..ceb9e5140b 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1658,8 +1658,9 @@ def __start_session(self, implicit, **kwargs): self, server_session, opts, implicit) def start_session(self, - causal_consistency=True, - default_transaction_options=None): + causal_consistency=None, + default_transaction_options=None, + snapshot=False): """Start a logical session. This method takes the same parameters as @@ -1682,7 +1683,8 @@ def start_session(self, return self.__start_session( False, causal_consistency=causal_consistency, - default_transaction_options=default_transaction_options) + default_transaction_options=default_transaction_options, + snapshot=snapshot) def _get_server_session(self): """Internal: start or resume a _ServerSession.""" diff --git a/pymongo/network.py b/pymongo/network.py index b0e06a7173..b18a902373 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -90,10 +90,8 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, if read_concern and not (session and session.in_transaction): if read_concern.level: spec['readConcern'] = read_concern.document - if (session and session.options.causal_consistency - and session.operation_time is not None): - spec.setdefault( - 'readConcern', {})['afterClusterTime'] = session.operation_time + if session: + session._update_read_concern(spec) if collation is not None: spec['collation'] = collation diff --git a/test/load_balancer/test_sessions_unified.py b/test/load_balancer/test_sessions_unified.py new file mode 100644 index 0000000000..f489b16a83 --- /dev/null +++ b/test/load_balancer/test_sessions_unified.py @@ -0,0 +1,23 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.test_sessions_unified import * + +if __name__ == '__main__': + unittest.main() diff --git a/test/sessions/dirty-session-errors.json b/test/sessions/legacy/dirty-session-errors.json similarity index 99% rename from test/sessions/dirty-session-errors.json rename to test/sessions/legacy/dirty-session-errors.json index 408904ac5f..77f71c7623 100644 --- a/test/sessions/dirty-session-errors.json +++ b/test/sessions/legacy/dirty-session-errors.json @@ -668,4 +668,4 @@ } } ] -} \ No newline at end of file +} diff --git a/test/sessions/server-support.json b/test/sessions/legacy/server-support.json similarity index 100% rename from test/sessions/server-support.json rename to test/sessions/legacy/server-support.json diff --git a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json b/test/sessions/unified/snapshot-sessions-not-supported-server-error.json new file mode 100644 index 0000000000..896a11c6eb --- /dev/null +++ b/test/sessions/unified/snapshot-sessions-not-supported-server-error.json @@ -0,0 +1,105 @@ +{ + "description": "snapshot-sessions-not-supported-server-error", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "4.4.99", + "topologies": [ + "replicaset, sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Server returns an error on find with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/unified/snapshot-sessions.json b/test/sessions/unified/snapshot-sessions.json new file mode 100644 index 0000000000..4170a96699 --- /dev/null +++ b/test/sessions/unified/snapshot-sessions.json @@ -0,0 +1,939 @@ +{ + "description": "snapshot-sessions", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "findAndModify", + "insert", + "update" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + }, + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Find operation with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 13 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 13 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Distinct operation with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 2, + "x": 12 + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session1" + }, + "expectResult": [ + 11, + 12 + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 2, + "x": 13 + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectResult": [ + 11, + 13 + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session1" + }, + "expectResult": [ + 11, + 12 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Aggregate operation with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session1" + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 13 + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1, + "x": 13 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session1" + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Mixed operation with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "After" + }, + "expectResult": { + "_id": 1, + "x": 12 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "Write commands with snapshot session do not affect snapshot reads", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 22, + "x": 33 + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "session": "session0", + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "First snapshot read does not send atClusterTime", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + }, + "commandName": "find", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "StartTransaction fails in snapshot session", + "operations": [ + { + "name": "startTransaction", + "object": "session0", + "expectError": { + "isError": true, + "isClientError": true, + "errorContains": "Transactions are not supported in snapshot sessions" + } + } + ] + } + ] +} diff --git a/test/test_session.py b/test/test_session.py index 6f32dac819..ab78e30f89 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -21,6 +21,8 @@ from io import BytesIO +sys.path[0:0] = [""] + from bson import DBRef from gridfs import GridFS, GridFSBucket from pymongo import ASCENDING, InsertOne, IndexModel, OFF, monitoring @@ -712,6 +714,21 @@ def drop_db(): wait_until(drop_db, 'dropped database after w=0 writes') + def test_snapshot_incompatible_with_causal_consistency(self): + with self.client.start_session(causal_consistency=False, + snapshot=False): + pass + with self.client.start_session(causal_consistency=False, + snapshot=True): + pass + with self.client.start_session(causal_consistency=True, + snapshot=False): + pass + with self.assertRaises(ConfigurationError): + with self.client.start_session(causal_consistency=True, + snapshot=True): + pass + class TestCausalConsistency(unittest.TestCase): @@ -1153,7 +1170,7 @@ def insert_and_aggregate(): class TestSpec(SpecRunner): # Location of JSON test specifications. TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'sessions') + os.path.dirname(os.path.realpath(__file__)), 'sessions', 'legacy') def last_two_command_events(self): """Return the last two command started events.""" @@ -1198,3 +1215,6 @@ def run_scenario(self): test_creator = TestCreator(create_test, TestSpec, TestSpec.TEST_PATH) test_creator.create_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py new file mode 100644 index 0000000000..fe25536e7e --- /dev/null +++ b/test/test_sessions_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Sessions unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'sessions', 'unified') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 1292792d16..33c0530524 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -40,8 +40,8 @@ from pymongo.collection import Collection from pymongo.database import Database from pymongo.errors import ( - BulkWriteError, ConnectionFailure, InvalidOperation, NotPrimaryError, - PyMongoError) + BulkWriteError, ConnectionFailure, ConfigurationError, InvalidOperation, + NotPrimaryError, PyMongoError) from pymongo.monitoring import ( CommandFailedEvent, CommandListener, CommandStartedEvent, CommandSucceededEvent, _SENSITIVE_COMMANDS, PoolCreatedEvent, @@ -501,8 +501,11 @@ def _match_document(self, expectation, actual, is_root): self.match_result(value, actual[key], in_recursive_call=True) if not is_root: - self.test.assertEqual( - set(expectation.keys()), set(actual.keys())) + expected_keys = set(expectation.keys()) + for key, value in expectation.items(): + if value == {'$$exists': False}: + expected_keys.remove(key) + self.test.assertEqual(expected_keys, set(actual.keys())) def match_result(self, expectation, actual, in_recursive_call=False): @@ -723,6 +726,8 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(exception, ConnectionFailure): self.assertNotIsInstance(exception, NotPrimaryError) + elif isinstance(exception, (InvalidOperation, ConfigurationError)): + pass else: self.assertNotIsInstance(exception, PyMongoError) From b4b7a07b81b0e1ed196b253f58da5f323337137f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 25 Jun 2021 16:20:21 -0700 Subject: [PATCH 0383/2111] PYTHON-2762 Avoid duplicating unified test files for LB testing (#649) Create new client for each cursor/session __del__ test. Always close cursors in spec tests. --- .evergreen/run-tests.sh | 6 +-- test/__init__.py | 36 ++++++++++++++++ test/load_balancer/{unified => }/cursors.json | 0 .../{unified => }/event-monitoring.json | 0 .../lb-connection-establishment.json | 0 .../non-lb-connection-establishment.json | 0 .../{unified => }/sdam-error-handling.json | 0 .../{unified => }/server-selection.json | 0 .../test_command_monitoring_unified.py | 23 ---------- test/load_balancer/test_crud_unified.py | 23 ---------- test/load_balancer/test_dns.py | 23 ---------- test/load_balancer/test_load_balancer.py | 26 ----------- .../test_retryable_change_stream.py | 23 ---------- test/load_balancer/test_retryable_reads.py | 23 ---------- test/load_balancer/test_retryable_writes.py | 23 ---------- .../test_transactions_unified.py | 23 ---------- test/load_balancer/test_uri_options.py | 23 ---------- test/load_balancer/test_versioned_api.py | 23 ---------- .../{unified => }/transactions.json | 0 .../{unified => }/wait-queue-timeouts.json | 0 test/test_change_stream.py | 5 ++- test/test_heartbeat_monitoring.py | 4 +- test/test_load_balancer.py | 43 ++++++++++++------- test/test_mongos_load_balancing.py | 1 + test/test_read_preferences.py | 3 +- test/test_replica_set_reconfig.py | 1 + test/test_retryable_reads.py | 1 + test/test_retryable_writes.py | 4 ++ test/test_sdam_monitoring_spec.py | 9 ++-- test/test_versioned_api.py | 2 + test/unified_format.py | 9 +++- test/utils_spec_runner.py | 4 ++ 32 files changed, 97 insertions(+), 264 deletions(-) rename test/load_balancer/{unified => }/cursors.json (100%) rename test/load_balancer/{unified => }/event-monitoring.json (100%) rename test/load_balancer/{unified => }/lb-connection-establishment.json (100%) rename test/load_balancer/{unified => }/non-lb-connection-establishment.json (100%) rename test/load_balancer/{unified => }/sdam-error-handling.json (100%) rename test/load_balancer/{unified => }/server-selection.json (100%) delete mode 100644 test/load_balancer/test_command_monitoring_unified.py delete mode 100644 test/load_balancer/test_crud_unified.py delete mode 100644 test/load_balancer/test_dns.py delete mode 100644 test/load_balancer/test_load_balancer.py delete mode 100644 test/load_balancer/test_retryable_change_stream.py delete mode 100644 test/load_balancer/test_retryable_reads.py delete mode 100644 test/load_balancer/test_retryable_writes.py delete mode 100644 test/load_balancer/test_transactions_unified.py delete mode 100644 test/load_balancer/test_uri_options.py delete mode 100644 test/load_balancer/test_versioned_api.py rename test/load_balancer/{unified => }/transactions.json (100%) rename test/load_balancer/{unified => }/wait-queue-timeouts.json (100%) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9848b91877..4577d2160c 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -197,11 +197,7 @@ if [ -z "$GREEN_FRAMEWORK" ]; then $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" fi - if [ -n "$TEST_LOADBALANCER" ]; then - $PYTHON -m xmlrunner discover -s test/load_balancer -v --locals -o $XUNIT_DIR - else - $PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT - fi + $PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT else # --no_ext has to come before "test" so there is no way to toggle extensions here. $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT diff --git a/test/__init__.py b/test/__init__.py index 2ee697ef7a..2eee971775 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -589,6 +589,24 @@ def sec_count(): return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + @property + def supports_secondary_read_pref(self): + if self.has_secondaries: + return True + if self.is_mongos: + shard = self.client.config.shards.find_one()['host'] + num_members = shard.count(',') + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require(lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read " + "preference") + def require_no_replica_set(self, func): """Run a test if the client is *not* connected to a replica set.""" return self._require( @@ -639,6 +657,13 @@ def require_load_balancer(self, func): "Must be connected to a load balancer", func=func) + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer. + """ + return self._require(lambda: not self.load_balancer, + "Must not be connected to a load balancer", + func=func) + def check_auth_with_sharding(self, func): """Skip a test when connected to mongos < 2.0 and running with auth.""" condition = lambda: not (self.auth_enabled and @@ -852,6 +877,9 @@ class IntegrationTest(PyMongoTestCase): @classmethod @client_context.require_connection def setUpClass(cls): + if (client_context.load_balancer and + not getattr(cls, 'RUN_ON_LOAD_BALANCER', False)): + raise SkipTest('this test does not support load balancers') cls.client = client_context.client cls.db = cls.client.pymongo_test if client_context.auth_enabled: @@ -875,6 +903,14 @@ class MockClientTest(unittest.TestCase): The class temporarily overrides HEARTBEAT_FREQUENCY to speed up tests. """ + # MockClients tests that use replicaSet, directConnection=True, pass + # multiple seed addresses, or wait for heartbeat events are incompatible + # with loadBalanced=True. + @classmethod + @client_context.require_no_load_balancer + def setUpClass(cls): + pass + def setUp(self): super(MockClientTest, self).setUp() diff --git a/test/load_balancer/unified/cursors.json b/test/load_balancer/cursors.json similarity index 100% rename from test/load_balancer/unified/cursors.json rename to test/load_balancer/cursors.json diff --git a/test/load_balancer/unified/event-monitoring.json b/test/load_balancer/event-monitoring.json similarity index 100% rename from test/load_balancer/unified/event-monitoring.json rename to test/load_balancer/event-monitoring.json diff --git a/test/load_balancer/unified/lb-connection-establishment.json b/test/load_balancer/lb-connection-establishment.json similarity index 100% rename from test/load_balancer/unified/lb-connection-establishment.json rename to test/load_balancer/lb-connection-establishment.json diff --git a/test/load_balancer/unified/non-lb-connection-establishment.json b/test/load_balancer/non-lb-connection-establishment.json similarity index 100% rename from test/load_balancer/unified/non-lb-connection-establishment.json rename to test/load_balancer/non-lb-connection-establishment.json diff --git a/test/load_balancer/unified/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json similarity index 100% rename from test/load_balancer/unified/sdam-error-handling.json rename to test/load_balancer/sdam-error-handling.json diff --git a/test/load_balancer/unified/server-selection.json b/test/load_balancer/server-selection.json similarity index 100% rename from test/load_balancer/unified/server-selection.json rename to test/load_balancer/server-selection.json diff --git a/test/load_balancer/test_command_monitoring_unified.py b/test/load_balancer/test_command_monitoring_unified.py deleted file mode 100644 index 6b8ef98325..0000000000 --- a/test/load_balancer/test_command_monitoring_unified.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_command_monitoring_unified import * - -if __name__ == "__main__": - unittest.main() diff --git a/test/load_balancer/test_crud_unified.py b/test/load_balancer/test_crud_unified.py deleted file mode 100644 index 4363f293fd..0000000000 --- a/test/load_balancer/test_crud_unified.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_crud_unified import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_dns.py b/test/load_balancer/test_dns.py deleted file mode 100644 index 34e2329c8c..0000000000 --- a/test/load_balancer/test_dns.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_dns import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_load_balancer.py b/test/load_balancer/test_load_balancer.py deleted file mode 100644 index 77e824e59d..0000000000 --- a/test/load_balancer/test_load_balancer.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test the Load Balancer unified spec tests.""" - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_load_balancer import * - - -if __name__ == "__main__": - unittest.main() diff --git a/test/load_balancer/test_retryable_change_stream.py b/test/load_balancer/test_retryable_change_stream.py deleted file mode 100644 index f08e27e9d6..0000000000 --- a/test/load_balancer/test_retryable_change_stream.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_change_stream import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_retryable_reads.py b/test/load_balancer/test_retryable_reads.py deleted file mode 100644 index 73510fab76..0000000000 --- a/test/load_balancer/test_retryable_reads.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_retryable_reads import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_retryable_writes.py b/test/load_balancer/test_retryable_writes.py deleted file mode 100644 index c920acb818..0000000000 --- a/test/load_balancer/test_retryable_writes.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_retryable_writes import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_transactions_unified.py b/test/load_balancer/test_transactions_unified.py deleted file mode 100644 index d2f7eac94b..0000000000 --- a/test/load_balancer/test_transactions_unified.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_transactions_unified import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_uri_options.py b/test/load_balancer/test_uri_options.py deleted file mode 100644 index c7151d330a..0000000000 --- a/test/load_balancer/test_uri_options.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_uri_spec import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/test_versioned_api.py b/test/load_balancer/test_versioned_api.py deleted file mode 100644 index 2b188a6b1e..0000000000 --- a/test/load_balancer/test_versioned_api.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_versioned_api import * - -if __name__ == '__main__': - unittest.main() diff --git a/test/load_balancer/unified/transactions.json b/test/load_balancer/transactions.json similarity index 100% rename from test/load_balancer/unified/transactions.json rename to test/load_balancer/transactions.json diff --git a/test/load_balancer/unified/wait-queue-timeouts.json b/test/load_balancer/wait-queue-timeouts.json similarity index 100% rename from test/load_balancer/unified/wait-queue-timeouts.json rename to test/load_balancer/wait-queue-timeouts.json diff --git a/test/test_change_stream.py b/test/test_change_stream.py index a030ca400b..669a819aa6 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -49,6 +49,8 @@ class TestChangeStreamBase(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + def change_stream_with_client(self, client, *args, **kwargs): """Create a change stream using the given client and return it.""" raise NotImplementedError @@ -1038,7 +1040,8 @@ def test_read_concern(self): pass -class TestAllLegacyScenarios(unittest.TestCase): +class TestAllLegacyScenarios(IntegrationTest): + RUN_ON_LOAD_BALANCER = True @classmethod @client_context.require_connection diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 3929412f36..8c66557023 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -22,12 +22,12 @@ from pymongo.errors import ConnectionFailure from pymongo.ismaster import IsMaster from pymongo.monitor import Monitor -from test import unittest, client_knobs +from test import unittest, client_knobs, IntegrationTest from test.utils import (HeartbeatEventListener, MockPool, single_client, wait_until) -class TestHeartbeatMonitoring(unittest.TestCase): +class TestHeartbeatMonitoring(IntegrationTest): def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 9090192c1e..5c0eadf036 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -22,18 +22,22 @@ sys.path[0:0] = [""] from test import unittest, IntegrationTest, client_context -from test.utils import get_pool, wait_until, ExceptionCatchingThread +from test.utils import (ExceptionCatchingThread, + get_pool, + rs_client, + wait_until) from test.unified_format import generate_test_classes # Location of JSON test specifications. TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'load_balancer', 'unified') + os.path.dirname(os.path.realpath(__file__)), 'load_balancer') # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) class TestLB(IntegrationTest): + RUN_ON_LOAD_BALANCER = True def test_connections_are_only_returned_once(self): pool = get_pool(self.client) @@ -45,11 +49,14 @@ def test_connections_are_only_returned_once(self): @client_context.require_load_balancer def test_unpin_committed_transaction(self): - pool = get_pool(self.client) - with self.client.start_session() as session: + client = rs_client() + self.addCleanup(client.close) + pool = get_pool(client) + coll = client[self.db.name].test + with client.start_session() as session: with session.start_transaction(): self.assertEqual(pool.active_sockets, 0) - self.db.test.insert_one({}, session=session) + coll.insert_one({}, session=session) self.assertEqual(pool.active_sockets, 1) # Pinned. self.assertEqual(pool.active_sockets, 1) # Still pinned. self.assertEqual(pool.active_sockets, 0) # Unpinned. @@ -75,9 +82,12 @@ def create_resource(coll): self._test_no_gc_deadlock(create_resource) def _test_no_gc_deadlock(self, create_resource): - pool = get_pool(self.client) + client = rs_client() + self.addCleanup(client.close) + pool = get_pool(client) + coll = client[self.db.name].test + coll.insert_many([{} for _ in range(10)]) self.assertEqual(pool.active_sockets, 0) - self.db.test.insert_many([{} for _ in range(10)]) # Cause the initial find attempt to fail to induce a reference cycle. args = { "mode": { @@ -92,7 +102,7 @@ def _test_no_gc_deadlock(self, create_resource): } } with self.fail_point(args): - resource = create_resource(self.db.test) + resource = create_resource(coll) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. @@ -102,7 +112,9 @@ def _test_no_gc_deadlock(self, create_resource): # Garbage collect the resource while the pool is locked to ensure we # don't deadlock. del resource - gc.collect() + # On PyPy it can take a few rounds to collect the cursor. + for _ in range(3): + gc.collect() thread.unlock.set() thread.join(5) self.assertFalse(thread.is_alive()) @@ -110,15 +122,16 @@ def _test_no_gc_deadlock(self, create_resource): wait_until(lambda: pool.active_sockets == 0, 'return socket') # Run another operation to ensure the socket still works. - self.db.test.delete_many({}) + coll.delete_many({}) @client_context.require_transactions def test_session_gc(self): - pool = get_pool(self.client) - self.assertEqual(pool.active_sockets, 0) - session = self.client.start_session() + client = rs_client() + self.addCleanup(client.close) + pool = get_pool(client) + session = client.start_session() session.start_transaction() - self.client.test_session_gc.test.find_one({}, session=session) + client.test_session_gc.test.find_one({}, session=session) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. @@ -138,7 +151,7 @@ def test_session_gc(self): wait_until(lambda: pool.active_sockets == 0, 'return socket') # Run another operation to ensure the socket still works. - self.db.test.delete_many({}) + client[self.db.name].test.delete_many({}) class PoolLocker(ExceptionCatchingThread): diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index 18e05125b2..96a7e9f6bd 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -28,6 +28,7 @@ @client_context.require_connection +@client_context.require_no_load_balancer def setUpModule(): pass diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index d02c1cacc2..d70ad7e076 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -601,10 +601,11 @@ def test_read_preference_document_hedge(self): def test_send_hedge(self): cases = { 'primaryPreferred': PrimaryPreferred, - 'secondary': Secondary, 'secondaryPreferred': SecondaryPreferred, 'nearest': Nearest, } + if client_context.supports_secondary_read_pref: + cases['secondary'] = Secondary listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index d9d39e3010..62dc0ac0a9 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -26,6 +26,7 @@ @client_context.require_connection +@client_context.require_no_load_balancer def setUpModule(): pass diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index be31799d33..381f2c1d80 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -51,6 +51,7 @@ def test_uri(self): class TestSpec(SpecRunner): + RUN_ON_LOAD_BALANCER = True @classmethod @client_context.require_failCommand_fail_point diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 484ef740d3..db2e1455db 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -54,6 +54,7 @@ class TestAllScenarios(SpecRunner): + RUN_ON_LOAD_BALANCER = True def get_object_name(self, op): return op.get('object', 'collection') @@ -121,6 +122,7 @@ def non_retryable_single_statement_ops(coll): class IgnoreDeprecationsTest(IntegrationTest): + RUN_ON_LOAD_BALANCER = True @classmethod def setUpClass(cls): @@ -417,6 +419,8 @@ def test_batch_splitting_retry_fails(self): class TestWriteConcernError(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + @classmethod @client_context.require_replica_set @client_context.require_no_mmap diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index ddcb7e1416..3afdd23570 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -168,12 +168,11 @@ def compare_multiple_events(i, expected_results, actual_results): return j, True, '' -class TestAllScenarios(unittest.TestCase): +class TestAllScenarios(IntegrationTest): - @classmethod - @client_context.require_connection - def setUp(cls): - cls.all_listener = ServerAndTopologyEventListener() + def setUp(self): + super(TestAllScenarios, self).setUp() + self.all_listener = ServerAndTopologyEventListener() def create_test(scenario_def): diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 27a1dc7fe5..f092c434bc 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -33,6 +33,8 @@ class TestServerApi(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + def test_server_api_defaults(self): api = ServerApi(ServerApiVersion.V1) self.assertEqual(api.version, '1') diff --git a/test/unified_format.py b/test/unified_format.py index 33c0530524..353e8753c4 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -638,6 +638,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ SCHEMA_VERSION = Version.from_string('1.5') + RUN_ON_LOAD_BALANCER = True @staticmethod def should_run_on(run_on_spec): @@ -776,7 +777,9 @@ def __entityOperation_createChangeStream(self, target, *args, **kwargs): self.skipTest("MMAPv1 does not support change streams") self.__raise_if_unsupported( 'createChangeStream', target, MongoClient, Database, Collection) - return target.watch(*args, **kwargs) + stream = target.watch(*args, **kwargs) + self.addCleanup(stream.close) + return stream def _clientOperation_createChangeStream(self, target, *args, **kwargs): return self.__entityOperation_createChangeStream( @@ -821,7 +824,9 @@ def _collectionOperation_find(self, target, *args, **kwargs): def _collectionOperation_createFindCursor(self, target, *args, **kwargs): self.__raise_if_unsupported('find', target, Collection) - return NonLazyCursor(target.find(*args, **kwargs)) + cursor = NonLazyCursor(target.find(*args, **kwargs)) + self.addCleanup(cursor.close) + return cursor def _collectionOperation_listIndexes(self, target, *args, **kwargs): if 'batch_size' in kwargs: diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 5f79789ec8..80173f7c3a 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -299,6 +299,10 @@ def run_operation(self, sessions, collection, operation): arguments = args result = cmd(**dict(arguments)) + # Cleanup open change stream cursors. + if name == "watch": + self.addCleanup(result.close) + if name == "aggregate": if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. From 67ebd5cab4798f0bc566d0f4222b6097502df0bc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 25 Jun 2021 16:24:30 -0700 Subject: [PATCH 0384/2111] PYTHON-2762 Remove duplicate unified sessions test --- test/load_balancer/test_sessions_unified.py | 23 --------------------- 1 file changed, 23 deletions(-) delete mode 100644 test/load_balancer/test_sessions_unified.py diff --git a/test/load_balancer/test_sessions_unified.py b/test/load_balancer/test_sessions_unified.py deleted file mode 100644 index f489b16a83..0000000000 --- a/test/load_balancer/test_sessions_unified.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2021-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.test_sessions_unified import * - -if __name__ == '__main__': - unittest.main() From a94504bde92272cab06c6b9aaf5331234cfc9004 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 25 Jun 2021 11:26:01 -0700 Subject: [PATCH 0385/2111] PYTHON-2726 Document read preference quirks --- doc/mongo_extensions.py | 2 +- pymongo/errors.py | 2 +- pymongo/read_preferences.py | 8 ++++++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/mongo_extensions.py b/doc/mongo_extensions.py index cc6fe40ab2..47ea9b7c89 100644 --- a/doc/mongo_extensions.py +++ b/doc/mongo_extensions.py @@ -75,7 +75,7 @@ def process_mongodoc_nodes(app, doctree, fromdocname): anchor = name["ids"][0] break for para in node.traverse(nodes.paragraph): - tag = str(para.traverse()[1]) + tag = str(list(para.traverse())[1]) link = mongoref("", "") link["refuri"] = "http://dochub.mongodb.org/core/%s" % tag link["name"] = anchor diff --git a/pymongo/errors.py b/pymongo/errors.py index 33629b5fa6..b0bcc3ed1f 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -101,7 +101,7 @@ class NotMasterError(AutoReconnect): Use :exc:`~pymongo.errors.NotPrimaryError` instead. .. versionchanged:: 3.12 - Deprecated. Use :exc:`~pymongo.errors.NotPrimaryError` instead. + Deprecated. Use :exc:`~pymongo.errors.NotPrimaryError` instead. """ def __init__(self, message='', errors=None): super(NotMasterError, self).__init__( diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 6916cc5fac..3c07c16ba1 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -269,6 +269,10 @@ class PrimaryPreferred(_ServerMode): * When connected to a replica set queries are sent to the primary if available, otherwise a secondary. + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to an available secondary until the + primary of the replica set is discovered. + :Parameters: - `tag_sets`: The :attr:`~tag_sets` to use if the primary is not available. @@ -347,6 +351,10 @@ class SecondaryPreferred(_ServerMode): * When connected to a replica set queries are distributed among secondaries, or the primary if no secondary is available. + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to the primary of the replica set until + an available secondary is discovered. + :Parameters: - `tag_sets`: The :attr:`~tag_sets` for this read preference. - `max_staleness`: (integer, in seconds) The maximum estimated From 354c96a414ea30cdb9bd23971a28e3cc2fab3a89 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 28 Jun 2021 15:11:52 -0700 Subject: [PATCH 0386/2111] PYTHON-2779 Fix topologies field in snapshot reads test (#657) --- test/__init__.py | 4 ++++ ...shot-sessions-not-supported-server-error.json | 16 +++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 2eee971775..28c46918f7 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -673,6 +673,10 @@ def check_auth_with_sharding(self, func): func=func) def is_topology_type(self, topologies): + unknown = set(topologies) - {'single', 'replicaset', 'sharded', + 'sharded-replicaset', 'load-balanced'} + if unknown: + raise AssertionError('Unknown topologies: %r' % (unknown,)) if self.load_balancer: if 'load-balanced' in topologies: return True diff --git a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json b/test/sessions/unified/snapshot-sessions-not-supported-server-error.json index 896a11c6eb..b6ce00216a 100644 --- a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json +++ b/test/sessions/unified/snapshot-sessions-not-supported-server-error.json @@ -4,9 +4,12 @@ "runOnRequirements": [ { "minServerVersion": "3.6", - "maxServerVersion": "4.4.99", + "maxServerVersion": "4.4.99" + }, + { + "minServerVersion": "3.6", "topologies": [ - "replicaset, sharded-replicaset" + "single" ] } ], @@ -17,6 +20,11 @@ "observeEvents": [ "commandStartedEvent", "commandFailedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "findAndModify", + "insert", + "update" ] } }, @@ -87,9 +95,7 @@ "$$exists": false } } - }, - "commandName": "find", - "databaseName": "database0" + } } }, { From 748b9ce7b956826594898cac88dff7846297e585 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 28 Jun 2021 17:50:18 -0700 Subject: [PATCH 0387/2111] PYTHON-2575 Set cargo path for cryptography builds --- .evergreen/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6ab459e428..5ad93283cb 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -74,7 +74,8 @@ functions: export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" - export PATH="$MONGODB_BINARIES:$PATH" + # Installation of cryptography requires a rust compiler on some machines + export PATH="$MONGODB_BINARIES:/home/admin/.cargo/bin:$PATH" export PROJECT="${project}" EOT # See what we've done From 4152600ae6f7d9044d67a2601b0e7bdf3f301a30 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 29 Jun 2021 14:40:48 -0700 Subject: [PATCH 0388/2111] PYTHON-2777 Raise client side error for snapshot reads on <5.0 (#659) --- pymongo/bulk.py | 3 +- pymongo/client_session.py | 9 +- pymongo/message.py | 6 +- pymongo/network.py | 2 +- pymongo/pool.py | 3 +- ...t-sessions-not-supported-client-error.json | 113 ++++++++++++++++++ ...t-sessions-not-supported-server-error.json | 96 +++++++++++++-- 7 files changed, 213 insertions(+), 19 deletions(-) create mode 100644 test/sessions/unified/snapshot-sessions-not-supported-client-error.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 751feaf253..893d8a83ee 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -296,7 +296,8 @@ def _execute_command(self, generator, write_concern, session, if retryable and not self.started_retryable_write: session._start_retryable_write() self.started_retryable_write = True - session._apply_to(cmd, retryable, ReadPreference.PRIMARY) + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, + sock_info) sock_info.send_cluster_time(cmd, session, client) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible in one command. diff --git a/pymongo/client_session.py b/pymongo/client_session.py index e93ac93eba..5c2fdd9fa1 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -866,7 +866,7 @@ def _txn_read_preference(self): return self._transaction.opts.read_preference return None - def _apply_to(self, command, is_retryable, read_preference): + def _apply_to(self, command, is_retryable, read_preference, sock_info): self._check_ended() self._server_session.last_use = time.monotonic() @@ -891,7 +891,7 @@ def _apply_to(self, command, is_retryable, read_preference): rc = self._transaction.opts.read_concern.document if rc: command['readConcern'] = rc - self._update_read_concern(command) + self._update_read_concern(command, sock_info) command['txnNumber'] = self._server_session.transaction_id command['autocommit'] = False @@ -900,12 +900,15 @@ def _start_retryable_write(self): self._check_ended() self._server_session.inc_transaction_id() - def _update_read_concern(self, cmd): + def _update_read_concern(self, cmd, sock_info): if (self.options.causal_consistency and self.operation_time is not None): cmd.setdefault('readConcern', {})[ 'afterClusterTime'] = self.operation_time if self.options.snapshot: + if sock_info.max_wire_version < 13: + raise ConfigurationError( + 'Snapshot reads require MongoDB 5.0 or later') rc = cmd.setdefault('readConcern', {}) rc['level'] = 'snapshot' if self._snapshot_time is not None: diff --git a/pymongo/message.py b/pymongo/message.py index fb243695e2..13cee23cd3 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -313,10 +313,10 @@ def as_command(self, sock_info): session = self.session sock_info.add_server_api(cmd) if session: - session._apply_to(cmd, False, self.read_preference) + session._apply_to(cmd, False, self.read_preference, sock_info) # Explain does not support readConcern. if not explain and not session.in_transaction: - session._update_read_concern(cmd) + session._update_read_concern(cmd, sock_info) sock_info.send_cluster_time(cmd, session, self.client) # Support auto encryption client = self.client @@ -418,7 +418,7 @@ def as_command(self, sock_info): self.max_await_time_ms) if self.session: - self.session._apply_to(cmd, False, self.read_preference) + self.session._apply_to(cmd, False, self.read_preference, sock_info) sock_info.add_server_api(cmd) sock_info.send_cluster_time(cmd, self.session, self.client) # Support auto encryption diff --git a/pymongo/network.py b/pymongo/network.py index b18a902373..5d6439cdc0 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -91,7 +91,7 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, if read_concern.level: spec['readConcern'] = read_concern.document if session: - session._update_read_concern(spec) + session._update_read_concern(spec, sock_info) if collation is not None: spec['collation'] = collation diff --git a/pymongo/pool.py b/pymongo/pool.py index 14b66fa734..cc07620da5 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -718,7 +718,8 @@ def command(self, dbname, spec, slave_ok=False, self.add_server_api(spec) if session: - session._apply_to(spec, retryable_write, read_preference) + session._apply_to(spec, retryable_write, read_preference, + self) self.send_cluster_time(spec, session, client) listeners = self.listeners if publish_events else None unacknowledged = write_concern and not write_concern.acknowledged diff --git a/test/sessions/unified/snapshot-sessions-not-supported-client-error.json b/test/sessions/unified/snapshot-sessions-not-supported-client-error.json new file mode 100644 index 0000000000..129aa8d74c --- /dev/null +++ b/test/sessions/unified/snapshot-sessions-not-supported-client-error.json @@ -0,0 +1,113 @@ +{ + "description": "snapshot-sessions-not-supported-client-error", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "4.4.99" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Client error on find with snapshot", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [] + }, + { + "description": "Client error on aggregate with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [] + }, + { + "description": "Client error on distinct with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectError": { + "isClientError": true, + "errorContains": "Snapshot reads require MongoDB 5.0 or later" + } + } + ], + "expectEvents": [] + } + ] +} diff --git a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json b/test/sessions/unified/snapshot-sessions-not-supported-server-error.json index b6ce00216a..79213f314f 100644 --- a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json +++ b/test/sessions/unified/snapshot-sessions-not-supported-server-error.json @@ -3,11 +3,7 @@ "schemaVersion": "1.0", "runOnRequirements": [ { - "minServerVersion": "3.6", - "maxServerVersion": "4.4.99" - }, - { - "minServerVersion": "3.6", + "minServerVersion": "5.0", "topologies": [ "single" ] @@ -20,11 +16,6 @@ "observeEvents": [ "commandStartedEvent", "commandFailedEvent" - ], - "ignoreCommandMonitoringEvents": [ - "findAndModify", - "insert", - "update" ] } }, @@ -106,6 +97,91 @@ ] } ] + }, + { + "description": "Server returns an error on aggregate with snapshot", + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on distinct with snapshot", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "distinct" + } + } + ] + } + ] } ] } From 88480299b73ced383a832771c44c95ebc805bcb0 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 29 Jun 2021 17:07:51 -0700 Subject: [PATCH 0389/2111] PYTHON-2766 Warn users away from cursor slices --- doc/api/pymongo/cursor.rst | 2 +- pymongo/cursor.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/doc/api/pymongo/cursor.rst b/doc/api/pymongo/cursor.rst index ffede12375..68b52bccee 100644 --- a/doc/api/pymongo/cursor.rst +++ b/doc/api/pymongo/cursor.rst @@ -20,7 +20,7 @@ .. describe:: c[index] - See :meth:`__getitem__`. + See :meth:`__getitem__` and read the warning. .. automethod:: __getitem__ diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 82ec21ad30..2815a1a276 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -612,6 +612,18 @@ def max_await_time_ms(self, max_await_time_ms): def __getitem__(self, index): """Get a single document or a slice of documents from this cursor. + .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. From b991185fd7a70d5ffb51ed8ee30770ec1b6f867a Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 30 Jun 2021 13:21:44 -0700 Subject: [PATCH 0390/2111] PYTHON-2715 Use hello command for monitoring when supported (#654) --- pymongo/ismaster.py | 4 ++++ pymongo/pool.py | 6 ++++-- .../errors/error_handling_handshake.json | 3 ++- .../errors/non-stale-network-error.json | 3 ++- .../non-stale-network-timeout-error.json | 3 ++- ...Version-greater-InterruptedAtShutdown.json | 3 ++- ...eater-InterruptedDueToReplStateChange.json | 3 ++- ...ologyVersion-greater-LegacyNotPrimary.json | 3 ++- ...sion-greater-NotPrimaryNoSecondaryOk.json} | 9 +++++---- ...ersion-greater-NotPrimaryOrSecondary.json} | 9 +++++---- ...gyVersion-greater-NotWritablePrimary.json} | 9 +++++---- ...ogyVersion-greater-PrimarySteppedDown.json | 3 ++- ...ogyVersion-greater-ShutdownInProgress.json | 3 ++- ...Version-missing-InterruptedAtShutdown.json | 3 ++- ...ssing-InterruptedDueToReplStateChange.json | 3 ++- ...ologyVersion-missing-LegacyNotPrimary.json | 3 ++- ...sion-missing-NotPrimaryNoSecondaryOk.json} | 9 +++++---- ...ersion-missing-NotPrimaryOrSecondary.json} | 9 +++++---- ...gyVersion-missing-NotWritablePrimary.json} | 9 +++++---- ...ogyVersion-missing-PrimarySteppedDown.json | 3 ++- ...ogyVersion-missing-ShutdownInProgress.json | 3 ++- ...ccessId-changed-InterruptedAtShutdown.json | 3 ++- ...anged-InterruptedDueToReplStateChange.json | 3 ++- ...n-proccessId-changed-LegacyNotPrimary.json | 3 ++- ...ssId-changed-NotPrimaryNoSecondaryOk.json} | 9 +++++---- ...cessId-changed-NotPrimaryOrSecondary.json} | 9 +++++---- ...roccessId-changed-NotWritablePrimary.json} | 9 +++++---- ...proccessId-changed-PrimarySteppedDown.json | 3 ++- ...proccessId-changed-ShutdownInProgress.json | 3 ++- .../errors/post-42-InterruptedAtShutdown.json | 3 ++- ...st-42-InterruptedDueToReplStateChange.json | 3 ++- .../errors/post-42-LegacyNotPrimary.json | 3 ++- ...n => post-42-NotPrimaryNoSecondaryOk.json} | 9 +++++---- ...son => post-42-NotPrimaryOrSecondary.json} | 9 +++++---- ...r.json => post-42-NotWritablePrimary.json} | 9 +++++---- .../errors/post-42-PrimarySteppedDown.json | 3 ++- .../errors/post-42-ShutdownInProgress.json | 3 ++- .../errors/pre-42-InterruptedAtShutdown.json | 3 ++- ...re-42-InterruptedDueToReplStateChange.json | 3 ++- .../errors/pre-42-LegacyNotPrimary.json | 3 ++- ...on => pre-42-NotPrimaryNoSecondaryOk.json} | 11 +++++----- ...json => pre-42-NotPrimaryOrSecondary.json} | 9 +++++---- ...Ok.json => pre-42-NotWritablePrimary.json} | 11 +++++----- .../errors/pre-42-PrimarySteppedDown.json | 3 ++- .../errors/pre-42-ShutdownInProgress.json | 3 ++- .../errors/prefer-error-code.json | 7 ++++--- ...tale-generation-InterruptedAtShutdown.json | 6 ++++-- ...ation-InterruptedDueToReplStateChange.json | 6 ++++-- ...e-generation-NotPrimaryNoSecondaryOk.json} | 12 ++++++----- ...ale-generation-NotPrimaryOrSecondary.json} | 12 ++++++----- ... stale-generation-NotWritablePrimary.json} | 12 ++++++----- .../stale-generation-PrimarySteppedDown.json | 6 ++++-- .../stale-generation-ShutdownInProgress.json | 6 ++++-- ...dshakeCompletes-InterruptedAtShutdown.json | 6 ++++-- ...letes-InterruptedDueToReplStateChange.json | 6 ++++-- ...erHandshakeCompletes-LegacyNotPrimary.json | 6 ++++-- ...akeCompletes-NotPrimaryNoSecondaryOk.json} | 14 +++++++------ ...shakeCompletes-NotPrimaryOrSecondary.json} | 12 ++++++----- ...andshakeCompletes-NotWritablePrimary.json} | 14 +++++++------ ...HandshakeCompletes-PrimarySteppedDown.json | 6 ++++-- ...HandshakeCompletes-ShutdownInProgress.json | 6 ++++-- ...ation-afterHandshakeCompletes-network.json | 6 ++++-- ...ation-afterHandshakeCompletes-timeout.json | 6 ++++-- ...dshakeCompletes-InterruptedAtShutdown.json | 6 ++++-- ...letes-InterruptedDueToReplStateChange.json | 6 ++++-- ...reHandshakeCompletes-LegacyNotPrimary.json | 6 ++++-- ...akeCompletes-NotPrimaryNoSecondaryOk.json} | 14 +++++++------ ...shakeCompletes-NotPrimaryOrSecondary.json} | 12 ++++++----- ...andshakeCompletes-NotWritablePrimary.json} | 14 +++++++------ ...HandshakeCompletes-PrimarySteppedDown.json | 6 ++++-- ...HandshakeCompletes-ShutdownInProgress.json | 6 ++++-- ...tion-beforeHandshakeCompletes-network.json | 6 ++++-- ...tion-beforeHandshakeCompletes-timeout.json | 6 ++++-- ...topologyVersion-InterruptedAtShutdown.json | 3 ++- ...rsion-InterruptedDueToReplStateChange.json | 3 ++- ...tale-topologyVersion-LegacyNotPrimary.json | 3 ++- ...ologyVersion-NotPrimaryNoSecondaryOk.json} | 13 ++++++------ ...opologyVersion-NotPrimaryOrSecondary.json} | 13 ++++++------ ...e-topologyVersion-NotWritablePrimary.json} | 13 ++++++------ ...le-topologyVersion-PrimarySteppedDown.json | 3 ++- ...le-topologyVersion-ShutdownInProgress.json | 3 ++- .../errors/write_errors_ignored.json | 5 +++-- .../rs/compatible.json | 4 ++-- .../rs/compatible_unknown.json | 2 +- .../rs/discover_arbiters.json | 2 +- .../rs/discover_arbiters_replicaset.json | 2 +- .../rs/discover_ghost.json | 2 +- .../rs/discover_ghost_replicaset.json | 2 +- .../rs/discover_hidden.json | 2 +- .../rs/discover_hidden_replicaset.json | 2 +- .../rs/discover_passives.json | 4 ++-- .../rs/discover_passives_replicaset.json | 4 ++-- .../rs/discover_primary.json | 2 +- .../rs/discover_primary_replicaset.json | 2 +- .../rs/discover_rsother.json | 2 +- .../rs/discover_rsother_replicaset.json | 4 ++-- .../rs/discover_secondary.json | 2 +- .../rs/discover_secondary_replicaset.json | 2 +- .../rs/discovery.json | 8 ++++---- .../rs/equal_electionids.json | 4 ++-- .../rs/hosts_differ_from_seeds.json | 2 +- .../rs/incompatible_arbiter.json | 2 +- .../rs/incompatible_ghost.json | 2 +- .../rs/incompatible_other.json | 2 +- .../rs/ls_timeout.json | 12 +++++------ .../rs/member_reconfig.json | 4 ++-- .../rs/member_standalone.json | 4 ++-- .../rs/new_primary.json | 4 ++-- .../rs/new_primary_new_electionid.json | 6 +++--- .../rs/new_primary_new_setversion.json | 6 +++--- .../rs/new_primary_wrong_set_name.json | 4 ++-- .../rs/normalize_case.json | 2 +- .../rs/normalize_case_me.json | 4 ++-- .../rs/null_election_id.json | 8 ++++---- .../rs/primary_becomes_ghost.json | 4 ++-- .../rs/primary_becomes_mongos.json | 4 ++-- .../rs/primary_becomes_standalone.json | 2 +- .../rs/primary_changes_set_name.json | 4 ++-- .../rs/primary_disconnect.json | 2 +- .../rs/primary_disconnect_electionid.json | 10 +++++----- .../rs/primary_disconnect_setversion.json | 10 +++++----- ...int_from_secondary_with_mismatched_me.json | 4 ++-- .../rs/primary_mismatched_me.json | 2 +- .../rs/primary_mismatched_me_not_removed.json | 4 ++-- .../rs/primary_reports_new_member.json | 8 ++++---- .../primary_to_no_primary_mismatched_me.json | 4 ++-- .../rs/primary_wrong_set_name.json | 2 +- .../discovery_and_monitoring/rs/repeated.json | 10 +++++----- .../rs/replicaset_rsnp.json | 2 +- .../rs/response_from_removed.json | 4 ++-- .../rs/sec_not_auth.json | 4 ++-- .../rs/secondary_ignore_ok_0.json | 4 ++-- .../rs/secondary_mismatched_me.json | 2 +- .../rs/secondary_wrong_set_name.json | 2 +- ...secondary_wrong_set_name_with_primary.json | 4 ++-- .../rs/setversion_without_electionid.json | 4 ++-- .../rs/stepdown_change_set_name.json | 4 ++-- test/discovery_and_monitoring/rs/too_new.json | 4 ++-- test/discovery_and_monitoring/rs/too_old.json | 4 ++-- .../rs/topology_version_equal.json | 4 ++-- .../rs/topology_version_greater.json | 10 +++++----- .../rs/topology_version_less.json | 4 ++-- .../rs/unexpected_mongos.json | 2 +- .../rs/use_setversion_without_electionid.json | 6 +++--- .../rs/wrong_set_name.json | 2 +- .../sharded/compatible.json | 4 ++-- .../sharded/discover_single_mongos.json | 2 +- .../sharded/ls_timeout_mongos.json | 8 ++++---- .../sharded/mongos_disconnect.json | 6 +++--- .../sharded/multiple_mongoses.json | 4 ++-- .../sharded/non_mongos_removed.json | 4 ++-- .../sharded/too_new.json | 4 ++-- .../sharded/too_old.json | 4 ++-- .../single/compatible.json | 2 +- .../single/direct_connection_external_ip.json | 2 +- .../single/direct_connection_mongos.json | 2 +- .../single/direct_connection_replicaset.json | 2 +- .../single/direct_connection_rsarbiter.json | 2 +- .../single/direct_connection_rsprimary.json | 2 +- .../single/direct_connection_rssecondary.json | 2 +- .../single/direct_connection_standalone.json | 2 +- .../direct_connection_wrong_set_name.json | 4 ++-- .../single/discover_standalone.json | 2 +- .../single/ls_timeout_standalone.json | 2 +- .../single/not_ok_response.json | 6 +++--- .../single/standalone_removed.json | 2 +- ...son => standalone_using_legacy_hello.json} | 6 +++--- .../single/too_new.json | 2 +- .../single/too_old.json | 2 +- .../single/too_old_then_upgraded.json | 4 ++-- .../connectTimeoutMS.json | 4 ++-- ...nd-error.json => hello-command-error.json} | 16 +++++++-------- ...rk-error.json => hello-network-error.json} | 16 +++++++-------- ...Master-timeout.json => hello-timeout.json} | 20 +++++++++---------- .../minPoolSize-error.json | 4 ++-- .../discovered_standalone.json | 3 ++- .../replica_set_with_no_primary.json | 3 ++- .../replica_set_with_primary.json | 3 ++- .../replica_set_with_removal.json | 6 ++++-- .../sdam_monitoring/required_replica_set.json | 2 +- test/sdam_monitoring/standalone.json | 2 +- ...ne_suppress_equal_description_changes.json | 4 ++-- 182 files changed, 528 insertions(+), 410 deletions(-) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json => non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json} (87%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-greater-NotMasterOrSecondary.json => non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json} (87%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-greater-NotMaster.json => non-stale-topologyVersion-greater-NotWritablePrimary.json} (88%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json => non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json} (85%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-missing-NotMasterOrSecondary.json => non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json} (85%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-missing-NotMaster.json => non-stale-topologyVersion-missing-NotWritablePrimary.json} (86%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json => non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json} (92%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json => non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json} (92%) rename test/discovery_and_monitoring/errors/{non-stale-topologyVersion-proccessId-changed-NotMaster.json => non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json} (92%) rename test/discovery_and_monitoring/errors/{post-42-NotMasterNoSlaveOk.json => post-42-NotPrimaryNoSecondaryOk.json} (84%) rename test/discovery_and_monitoring/errors/{post-42-NotMasterOrSecondary.json => post-42-NotPrimaryOrSecondary.json} (84%) rename test/discovery_and_monitoring/errors/{post-42-NotMaster.json => post-42-NotWritablePrimary.json} (85%) rename test/discovery_and_monitoring/errors/{pre-42-NotMaster.json => pre-42-NotPrimaryNoSecondaryOk.json} (81%) rename test/discovery_and_monitoring/errors/{pre-42-NotMasterOrSecondary.json => pre-42-NotPrimaryOrSecondary.json} (83%) rename test/discovery_and_monitoring/errors/{pre-42-NotMasterNoSlaveOk.json => pre-42-NotWritablePrimary.json} (84%) rename test/discovery_and_monitoring/errors/{stale-generation-NotMasterNoSlaveOk.json => stale-generation-NotPrimaryNoSecondaryOk.json} (92%) rename test/discovery_and_monitoring/errors/{stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json => stale-generation-NotPrimaryOrSecondary.json} (92%) rename test/discovery_and_monitoring/errors/{stale-generation-afterHandshakeCompletes-NotMaster.json => stale-generation-NotWritablePrimary.json} (92%) rename test/discovery_and_monitoring/errors/{stale-generation-NotMaster.json => stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json} (91%) rename test/discovery_and_monitoring/errors/{stale-generation-NotMasterOrSecondary.json => stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json} (91%) rename test/discovery_and_monitoring/errors/{stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json => stale-generation-afterHandshakeCompletes-NotWritablePrimary.json} (92%) rename test/discovery_and_monitoring/errors/{stale-generation-beforeHandshakeCompletes-NotMaster.json => stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json} (91%) rename test/discovery_and_monitoring/errors/{stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json => stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json} (91%) rename test/discovery_and_monitoring/errors/{stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json => stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json} (92%) rename test/discovery_and_monitoring/errors/{stale-topologyVersion-NotMasterNoSlaveOk.json => stale-topologyVersion-NotPrimaryNoSecondaryOk.json} (88%) rename test/discovery_and_monitoring/errors/{stale-topologyVersion-NotMasterOrSecondary.json => stale-topologyVersion-NotPrimaryOrSecondary.json} (89%) rename test/discovery_and_monitoring/errors/{stale-topologyVersion-NotMaster.json => stale-topologyVersion-NotWritablePrimary.json} (89%) rename test/discovery_and_monitoring/single/{direct_connection_slave.json => standalone_using_legacy_hello.json} (79%) rename test/discovery_and_monitoring_integration/{isMaster-command-error.json => hello-command-error.json} (94%) rename test/discovery_and_monitoring_integration/{isMaster-network-error.json => hello-network-error.json} (93%) rename test/discovery_and_monitoring_integration/{isMaster-timeout.json => hello-timeout.json} (95%) diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py index 59eedec227..e3a8f7f067 100644 --- a/pymongo/ismaster.py +++ b/pymongo/ismaster.py @@ -192,3 +192,7 @@ def awaitable(self): @property def service_id(self): return self._doc.get('serviceId') + + @property + def hello_ok(self): + return self._doc.get('helloOk', False) diff --git a/pymongo/pool.py b/pymongo/pool.py index cc07620da5..acce1c1ec2 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -521,6 +521,7 @@ def __init__(self, sock, pool, address, id): self.max_message_size = MAX_MESSAGE_SIZE self.max_write_batch_size = MAX_WRITE_BATCH_SIZE self.supports_sessions = False + self.hello_ok = None self.is_mongos = False self.op_msg_enabled = False self.listeners = pool.opts.event_listeners @@ -568,10 +569,10 @@ def unpin(self): self.close_socket(ConnectionClosedReason.STALE) def hello_cmd(self): - if self.opts.server_api: + if self.opts.server_api or self.hello_ok: return SON([(HelloCompat.CMD, 1)]) else: - return SON([(HelloCompat.LEGACY_CMD, 1)]) + return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) def ismaster(self, all_credentials=None): return self._ismaster(None, None, None, all_credentials) @@ -623,6 +624,7 @@ def _ismaster(self, cluster_time, topology_version, self.max_write_batch_size = ismaster.max_write_batch_size self.supports_sessions = ( ismaster.logical_session_timeout_minutes is not None) + self.hello_ok = ismaster.hello_ok self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos if performing_handshake and self.compression_settings: ctx = self.compression_settings.get_compression_context( diff --git a/test/discovery_and_monitoring/errors/error_handling_handshake.json b/test/discovery_and_monitoring/errors/error_handling_handshake.json index cdd6df6247..56ca7d1132 100644 --- a/test/discovery_and_monitoring/errors/error_handling_handshake.json +++ b/test/discovery_and_monitoring/errors/error_handling_handshake.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-network-error.json b/test/discovery_and_monitoring/errors/non-stale-network-error.json index d0765dbb6d..c22a47dc8a 100644 --- a/test/discovery_and_monitoring/errors/non-stale-network-error.json +++ b/test/discovery_and_monitoring/errors/non-stale-network-error.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json index 7c1a197a62..03dc5b66c9 100644 --- a/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json +++ b/test/discovery_and_monitoring/errors/non-stale-network-timeout-error.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json index 68b7f455aa..777e703a3c 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json index d4a409d268..c4aa7fb71b 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json index e1f33b81bb..2a9bc8a5cf 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json similarity index 87% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json index 30d8698aac..638aa306cb 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion greater NotMasterNoSlaveOk error", + "description": "Non-stale topologyVersion greater NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion greater NotMasterNoSlaveOk error marks server Unknown", + "description": "Non-stale topologyVersion greater NotPrimaryNoSecondaryOk error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json similarity index 87% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json index 9d1c236565..f327954a9d 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion greater NotMasterOrSecondary error", + "description": "Non-stale topologyVersion greater NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion greater NotMasterOrSecondary error marks server Unknown", + "description": "Non-stale topologyVersion greater NotPrimaryOrSecondary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json similarity index 88% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json index dbd7154573..0ac02fb19b 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotMaster.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion greater NotMaster error", + "description": "Non-stale topologyVersion greater NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion greater NotMaster error marks server Unknown", + "description": "Non-stale topologyVersion greater NotWritablePrimary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json index d189dd3fba..daf2a7e8e1 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json index 9e88f5ce3f..a7d9e1fe24 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-greater-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json index 06c61a93da..2c59e785ab 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json index ebf5a1a4a1..f2cb834e83 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json index ccaacd1cfe..095128d615 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json similarity index 85% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json index 502ebc549c..3d7312d4a5 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion missing NotMasterNoSlaveOk error", + "description": "Non-stale topologyVersion missing NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion missing NotMasterNoSlaveOk error marks server Unknown", + "description": "Non-stale topologyVersion missing NotPrimaryNoSecondaryOk error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435 } } diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json similarity index 85% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json index 8e84038e29..a457ba3072 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion missing NotMasterOrSecondary error", + "description": "Non-stale topologyVersion missing NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion missing NotMasterOrSecondary error marks server Unknown", + "description": "Non-stale topologyVersion missing NotPrimaryOrSecondary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436 } } diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json similarity index 86% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json index 54ce115e68..b7427a3f3d 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotMaster.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion missing NotMaster error", + "description": "Non-stale topologyVersion missing NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion missing NotMaster error marks server Unknown", + "description": "Non-stale topologyVersion missing NotWritablePrimary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107 } } diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json index f7e0932542..8146a60d6e 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json index 5eceb1bcee..c7597007d7 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-missing-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json index 2b77eb2087..8448c60599 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json index 584219e508..9d601c4ede 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json index da36e9b33c..8be833f104 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json similarity index 92% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json index 812b973524..f2f94c0d00 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion proccessId changed NotMasterNoSlaveOk error", + "description": "Non-stale topologyVersion proccessId changed NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion proccessId changed NotMasterNoSlaveOk error marks server Unknown", + "description": "Non-stale topologyVersion proccessId changed NotPrimaryNoSecondaryOk error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json similarity index 92% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json index 027f4bddee..6d3b397566 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion proccessId changed NotMasterOrSecondary error", + "description": "Non-stale topologyVersion proccessId changed NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion proccessId changed NotMasterOrSecondary error marks server Unknown", + "description": "Non-stale topologyVersion proccessId changed NotPrimaryOrSecondary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json similarity index 92% rename from test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json rename to test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json index b7bdfabd2d..332ddf5ec1 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotMaster.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Non-stale topologyVersion proccessId changed NotMaster error", + "description": "Non-stale topologyVersion proccessId changed NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Non-stale topologyVersion proccessId changed NotMaster error marks server Unknown", + "description": "Non-stale topologyVersion proccessId changed NotWritablePrimary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json index 6a49618cfd..c22a537f58 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json index 3c3c934f8e..eaaab79273 100644 --- a/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/non-stale-topologyVersion-proccessId-changed-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json index 9e171142d1..40c4ed6c80 100644 --- a/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json index 52410f0b27..5c489f5ecb 100644 --- a/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/post-42-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json index 731da196b5..f0851b299e 100644 --- a/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/post-42-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json similarity index 84% rename from test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json index fa5c1f37d5..a675f0ca54 100644 --- a/test/discovery_and_monitoring/errors/post-42-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/post-42-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 NotMasterNoSlaveOk error", + "description": "Post-4.2 NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,7 @@ } }, { - "description": "Post-4.2 NotMasterNoSlaveOk error marks server Unknown", + "description": "Post-4.2 NotPrimaryNoSecondaryOk error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -45,7 +46,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435 } } diff --git a/test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json similarity index 84% rename from test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json index 5023662723..ea9bf1d16b 100644 --- a/test/discovery_and_monitoring/errors/post-42-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/post-42-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 NotMasterOrSecondary error", + "description": "Post-4.2 NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,7 @@ } }, { - "description": "Post-4.2 NotMasterOrSecondary error marks server Unknown", + "description": "Post-4.2 NotPrimaryOrSecondary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -45,7 +46,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436 } } diff --git a/test/discovery_and_monitoring/errors/post-42-NotMaster.json b/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json similarity index 85% rename from test/discovery_and_monitoring/errors/post-42-NotMaster.json rename to test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json index 6cbb23b89c..10211fca70 100644 --- a/test/discovery_and_monitoring/errors/post-42-NotMaster.json +++ b/test/discovery_and_monitoring/errors/post-42-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Post-4.2 NotMaster error", + "description": "Post-4.2 NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,7 @@ } }, { - "description": "Post-4.2 NotMaster error marks server Unknown", + "description": "Post-4.2 NotWritablePrimary error marks server Unknown", "applicationErrors": [ { "address": "a:27017", @@ -45,7 +46,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107 } } diff --git a/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json index d58ff26e52..fa98d0bf06 100644 --- a/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/post-42-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json index a44ecc3824..cd587205b6 100644 --- a/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/post-42-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json index 5944fe705c..9f6ea212e5 100644 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json index 06ed118779..7e5f235713 100644 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json index db5acd718d..1635f1a856 100644 --- a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMaster.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json similarity index 81% rename from test/discovery_and_monitoring/errors/pre-42-NotMaster.json rename to test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json index a6a6bba87a..0e70ede02c 100644 --- a/test/discovery_and_monitoring/errors/pre-42-NotMaster.json +++ b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Pre-4.2 NotMaster error", + "description": "Pre-4.2 NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,7 @@ } }, { - "description": "Pre-4.2 NotMaster error marks server Unknown and clears the pool", + "description": "Pre-4.2 NotPrimaryNoSecondaryOk error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", @@ -45,8 +46,8 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", - "code": 10107 + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435 } } ], diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json similarity index 83% rename from test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json index f515898281..3fefb21663 100644 --- a/test/discovery_and_monitoring/errors/pre-42-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Pre-4.2 NotMasterOrSecondary error", + "description": "Pre-4.2 NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,7 @@ } }, { - "description": "Pre-4.2 NotMasterOrSecondary error marks server Unknown and clears the pool", + "description": "Pre-4.2 NotPrimaryOrSecondary error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", @@ -45,7 +46,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436 } } diff --git a/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json similarity index 84% rename from test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json index 1eb72bc033..d010da0a5b 100644 --- a/test/discovery_and_monitoring/errors/pre-42-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Pre-4.2 NotMasterNoSlaveOk error", + "description": "Pre-4.2 NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +37,7 @@ } }, { - "description": "Pre-4.2 NotMasterNoSlaveOk error marks server Unknown and clears the pool", + "description": "Pre-4.2 NotWritablePrimary error marks server Unknown and clears the pool", "applicationErrors": [ { "address": "a:27017", @@ -45,8 +46,8 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", - "code": 13435 + "errmsg": "NotWritablePrimary", + "code": 10107 } } ], diff --git a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json index e4c3228afc..02956d201d 100644 --- a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json index 00dc7c1b5b..fc3a5aa6fe 100644 --- a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/prefer-error-code.json b/test/discovery_and_monitoring/errors/prefer-error-code.json index 486103a457..21d123f429 100644 --- a/test/discovery_and_monitoring/errors/prefer-error-code.json +++ b/test/discovery_and_monitoring/errors/prefer-error-code.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "errmsg \"not master\" gets ignored when error code exists", + "description": "errmsg \"not writable primary\" gets ignored when error code exists", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "not master", + "errmsg": "not writable primary", "code": 1 } } diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json index f675f2651e..2f7c7fd13b 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json index a4ae13ee78..b0b51ef676 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/stale-generation-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json similarity index 92% rename from test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json index d58349b622..b68e23b7a7 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error", + "description": "Stale generation NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,7 +136,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json similarity index 92% rename from test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json index 851bea0928..d9b3562654 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/stale-generation-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterOrSecondary error afterHandshakeCompletes", + "description": "Stale generation NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMasterOrSecondary error (stale generation)", + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,7 +136,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json b/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json similarity index 92% rename from test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json rename to test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json index dbdce1583a..90889356dd 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMaster.json +++ b/test/discovery_and_monitoring/errors/stale-generation-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMaster error afterHandshakeCompletes", + "description": "Stale generation NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMaster error (stale generation)", + "description": "Ignore stale NotWritablePrimary error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,7 +136,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json index 2e80ba4949..0a707a1c07 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/stale-generation-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json index 9b5656d48b..5da3413d5b 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/stale-generation-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json index 9da8b60fbb..d29310fb61 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json index f0a7df6170..376bb93770 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json index 2169c4a6fb..990fc45e4e 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotMaster.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json similarity index 91% rename from test/discovery_and_monitoring/errors/stale-generation-NotMaster.json rename to test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json index cfe779ee6f..1744a82f77 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-NotMaster.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMaster error", + "description": "Stale generation NotPrimaryNoSecondaryOk error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMaster error (stale generation)", + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,8 +136,8 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", - "code": 10107, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, "topologyVersion": { "processId": { "$oid": "000000000000000000000001" diff --git a/test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json similarity index 91% rename from test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json index 11ee062589..57ca1cf158 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterOrSecondary error", + "description": "Stale generation NotPrimaryOrSecondary error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMasterOrSecondary error (stale generation)", + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,7 +136,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json similarity index 92% rename from test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json index 707a58bcaf..995453c82b 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "description": "Stale generation NotWritablePrimary error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale NotWritablePrimary error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,8 +136,8 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", - "code": 13435, + "errmsg": "NotWritablePrimary", + "code": 10107, "topologyVersion": { "processId": { "$oid": "000000000000000000000001" diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json index 7ac8d2db24..bf4c85d24f 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json index e250c448aa..9374900e06 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json index 94b8322d2d..f5d01b6540 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-network.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json index 490703de90..fa84343b0b 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json +++ b/test/discovery_and_monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json index 8b10f5eb75..72fac9a86e 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json index ec78d667c2..3c713592a3 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json index 674cb994cd..257b6ec6fb 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json similarity index 91% rename from test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json rename to test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json index 760f260d48..dcb5716f44 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMaster.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMaster error beforeHandshakeCompletes", + "description": "Stale generation NotPrimaryNoSecondaryOk error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMaster error (stale generation)", + "description": "Ignore stale NotPrimaryNoSecondaryOk error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,8 +136,8 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", - "code": 10107, + "errmsg": "NotPrimaryNoSecondaryOk", + "code": 13435, "topologyVersion": { "processId": { "$oid": "000000000000000000000001" diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json similarity index 91% rename from test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json index eced40c59c..58cefafae9 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterOrSecondary error beforeHandshakeCompletes", + "description": "Stale generation NotPrimaryOrSecondary error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMasterOrSecondary error (stale generation)", + "description": "Ignore stale NotPrimaryOrSecondary error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,7 +136,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json similarity index 92% rename from test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json index bb7946aca3..c92b01e054 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "description": "Stale generation NotWritablePrimary error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -124,7 +126,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale NotWritablePrimary error (stale generation)", "applicationErrors": [ { "address": "a:27017", @@ -134,8 +136,8 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", - "code": 13435, + "errmsg": "NotWritablePrimary", + "code": 10107, "topologyVersion": { "processId": { "$oid": "000000000000000000000001" diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json index d33dc98db3..62759b6ad9 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json index ee38cc8bbe..4661632c4f 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json index 3e581773eb..15b044fc73 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json index 24c8c6e507..acbb9e581e 100644 --- a/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json +++ b/test/discovery_and_monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -82,7 +83,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json index 8449ac63b2..f2207a04d5 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedAtShutdown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json index 0cdd1727d3..4387451ce6 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-InterruptedDueToReplStateChange.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json index beee51e666..8c0cf00f22 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json similarity index 88% rename from test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json rename to test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json index e894dae6d8..99a828326c 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterNoSlaveOk.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryNoSecondaryOk.json @@ -1,5 +1,5 @@ { - "description": "Stale topologyVersion NotMasterNoSlaveOk error", + "description": "Stale topologyVersion NotPrimaryNoSecondaryOk error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (topologyVersion less)", + "description": "Ignore stale NotPrimaryNoSecondaryOk error (topologyVersion less)", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435, "topologyVersion": { "processId": { @@ -97,7 +98,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (topologyVersion equal)", + "description": "Ignore stale NotPrimaryNoSecondaryOk error (topologyVersion equal)", "applicationErrors": [ { "address": "a:27017", @@ -106,7 +107,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json similarity index 89% rename from test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json rename to test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json index 17243c9022..ba2ea87106 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMasterOrSecondary.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotPrimaryOrSecondary.json @@ -1,5 +1,5 @@ { - "description": "Stale topologyVersion NotMasterOrSecondary error", + "description": "Stale topologyVersion NotPrimaryOrSecondary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Ignore stale NotMasterOrSecondary error (topologyVersion less)", + "description": "Ignore stale NotPrimaryOrSecondary error (topologyVersion less)", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { @@ -97,7 +98,7 @@ } }, { - "description": "Ignore stale NotMasterOrSecondary error (topologyVersion equal)", + "description": "Ignore stale NotPrimaryOrSecondary error (topologyVersion equal)", "applicationErrors": [ { "address": "a:27017", @@ -106,7 +107,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMasterOrSecondary", + "errmsg": "NotPrimaryOrSecondary", "code": 13436, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json similarity index 89% rename from test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json rename to test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json index 5823d0446f..8edd317a73 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-NotMaster.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-NotWritablePrimary.json @@ -1,5 +1,5 @@ { - "description": "Stale topologyVersion NotMaster error", + "description": "Stale topologyVersion NotWritablePrimary error", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -51,7 +52,7 @@ } }, { - "description": "Ignore stale NotMaster error (topologyVersion less)", + "description": "Ignore stale NotWritablePrimary error (topologyVersion less)", "applicationErrors": [ { "address": "a:27017", @@ -60,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107, "topologyVersion": { "processId": { @@ -97,7 +98,7 @@ } }, { - "description": "Ignore stale NotMaster error (topologyVersion equal)", + "description": "Ignore stale NotWritablePrimary error (topologyVersion equal)", "applicationErrors": [ { "address": "a:27017", @@ -106,7 +107,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "NotMaster", + "errmsg": "NotWritablePrimary", "code": 10107, "topologyVersion": { "processId": { diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json index 93d9678419..da8e4755eb 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-PrimarySteppedDown.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json index 563eb60d9f..aa252e1dc4 100644 --- a/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json +++ b/test/discovery_and_monitoring/errors/stale-topologyVersion-ShutdownInProgress.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/errors/write_errors_ignored.json b/test/discovery_and_monitoring/errors/write_errors_ignored.json index 6c511c1b6e..b588807e08 100644 --- a/test/discovery_and_monitoring/errors/write_errors_ignored.json +++ b/test/discovery_and_monitoring/errors/write_errors_ignored.json @@ -9,7 +9,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -62,7 +63,7 @@ "ok": 1, "writeErrors": [ { - "errmsg": "NotMasterNoSlaveOk", + "errmsg": "NotPrimaryNoSecondaryOk", "code": 13435, "index": 0 } diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json index d670770f6d..5a01fe4935 100644 --- a/test/discovery_and_monitoring/rs/compatible.json +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +22,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json index 1105da8764..237f663fd1 100644 --- a/test/discovery_and_monitoring/rs/compatible_unknown.json +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json index ad337c127a..f30bfd0b6a 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json index dc00dca5f0..f7ed29fb68 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json index 1e2ca91bcb..b4dd6212a7 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost.json +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json index df504b6ca4..3e673b739e 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json index cb68120eaf..08a3cc1b2d 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden.json +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json index 216328dfa5..95346a8380 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json index 05922dc51c..2c71f461db 100644 --- a/test/discovery_and_monitoring/rs/discover_passives.json +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -43,7 +43,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "passive": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json index f9d8c2e032..c50d94f21d 100644 --- a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -43,7 +43,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "passive": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json index b9032144d4..c322fe5f76 100644 --- a/test/discovery_and_monitoring/rs/discover_primary.json +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json index 6f639b1c7e..0ba8771df1 100644 --- a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json index 2cf5a5a6db..ec50271bde 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother.json +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": false, "hosts": [ "c:27017", diff --git a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json index d9420ca529..fb8590019a 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ @@ -24,7 +24,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": false, "hosts": [ "c:27017", diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json index 02123625a7..fb758e31de 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary.json +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json index 3dde3166b4..69697114dd 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discovery.json b/test/discovery_and_monitoring/rs/discovery.json index 57ed568e3b..4a489f68ab 100644 --- a/test/discovery_and_monitoring/rs/discovery.json +++ b/test/discovery_and_monitoring/rs/discovery.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -47,7 +47,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "d:27017", @@ -91,7 +91,7 @@ "d:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017", @@ -134,7 +134,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json index f8d20b350d..0e8efc46cf 100644 --- a/test/discovery_and_monitoring/rs/equal_electionids.json +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json index a67db57d0c..9323768b00 100644 --- a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json +++ b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017" diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json index aa582208d6..7f6279b51a 100644 --- a/test/discovery_and_monitoring/rs/incompatible_arbiter.json +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json index 088159c3ab..c6bc53ca0a 100644 --- a/test/discovery_and_monitoring/rs/incompatible_ghost.json +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json index b65d674b42..8870eb6622 100644 --- a/test/discovery_and_monitoring/rs/incompatible_other.json +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/ls_timeout.json b/test/discovery_and_monitoring/rs/ls_timeout.json index 6860742c9e..a04ecaef89 100644 --- a/test/discovery_and_monitoring/rs/ls_timeout.json +++ b/test/discovery_and_monitoring/rs/ls_timeout.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -53,7 +53,7 @@ "d:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 @@ -90,7 +90,7 @@ "e:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "hosts": [ "a:27017", "b:27017", @@ -136,7 +136,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", @@ -184,7 +184,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "setName": "rs", "hidden": true, "logicalSessionTimeoutMinutes": 1, @@ -226,7 +226,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/member_reconfig.json b/test/discovery_and_monitoring/rs/member_reconfig.json index 336acff023..2b075241bb 100644 --- a/test/discovery_and_monitoring/rs/member_reconfig.json +++ b/test/discovery_and_monitoring/rs/member_reconfig.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -41,7 +41,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/member_standalone.json b/test/discovery_and_monitoring/rs/member_standalone.json index a97dfabf52..15beec0046 100644 --- a/test/discovery_and_monitoring/rs/member_standalone.json +++ b/test/discovery_and_monitoring/rs/member_standalone.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } @@ -32,7 +32,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json index eb73b304bd..e629e3b28b 100644 --- a/test/discovery_and_monitoring/rs/new_primary.json +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -41,7 +41,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index 67f314b1ed..6c88dc2399 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -54,7 +54,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -100,7 +100,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index c1ec50c845..cebdf9ab4e 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -54,7 +54,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -100,7 +100,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json index 7be79d2d3c..9bcab322ed 100644 --- a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -41,7 +41,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/normalize_case.json b/test/discovery_and_monitoring/rs/normalize_case.json index 4d0b0ae629..6cfd75168f 100644 --- a/test/discovery_and_monitoring/rs/normalize_case.json +++ b/test/discovery_and_monitoring/rs/normalize_case.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "A:27017" diff --git a/test/discovery_and_monitoring/rs/normalize_case_me.json b/test/discovery_and_monitoring/rs/normalize_case_me.json index e854e7fb43..c89522275c 100644 --- a/test/discovery_and_monitoring/rs/normalize_case_me.json +++ b/test/discovery_and_monitoring/rs/normalize_case_me.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "me": "A:27017", "hosts": [ @@ -51,7 +51,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "me": "B:27017", diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 3de0a74e41..d3c096597f 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -52,7 +52,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -104,7 +104,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -151,7 +151,7 @@ "c:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json index 897120f1fb..3ca8f23786 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +36,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json index 8d4967b7dd..4b33cbea14 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +36,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json index e35c75f4bc..9dc8254920 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/primary_changes_set_name.json b/test/discovery_and_monitoring/rs/primary_changes_set_name.json index d008326123..45434d45f3 100644 --- a/test/discovery_and_monitoring/rs/primary_changes_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_changes_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +36,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect.json b/test/discovery_and_monitoring/rs/primary_disconnect.json index 271ca5874e..06a103962a 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index 59c8faf180..a374e3e4fb 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -101,7 +101,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -144,7 +144,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -190,7 +190,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index beb023e4f4..f1e2c0097c 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -101,7 +101,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -144,7 +144,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -190,7 +190,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json index 806fda37c3..fb4fe4dc13 100644 --- a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "me": "c:27017", "hosts": [ @@ -39,7 +39,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "me": "b:27017", "hosts": [ "b:27017" diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_mismatched_me.json index 8d18a6971f..49ab1bf74e 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me.json @@ -26,7 +26,7 @@ "a:27017", "b:27017" ], - "ismaster": true, + "isWritablePrimary": true, "ok": 1, "setName": "rs", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json index a9e01987c8..41d9452844 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -12,7 +12,7 @@ "localhost:27017", "localhost:27018" ], - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "primary": "localhost:27017", "me": "a:27017", @@ -47,7 +47,7 @@ "localhost:27017", "localhost:27018" ], - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "localhost:27017", diff --git a/test/discovery_and_monitoring/rs/primary_reports_new_member.json b/test/discovery_and_monitoring/rs/primary_reports_new_member.json index 6ed55ab3d1..8b99a4399e 100644 --- a/test/discovery_and_monitoring/rs/primary_reports_new_member.json +++ b/test/discovery_and_monitoring/rs/primary_reports_new_member.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -42,7 +42,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -75,7 +75,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -113,7 +113,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "b:27017", diff --git a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json index fdb250ffef..5e63a509be 100644 --- a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -42,7 +42,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "c:27017", "d:27017" diff --git a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json index eda4787173..c87b18d1b2 100644 --- a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json index 392d485794..1921bba580 100644 --- a/test/discovery_and_monitoring/rs/repeated.json +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -1,5 +1,5 @@ { - "description": "Repeated ismaster response must be processed", + "description": "Repeated isWritablePrimary response must be processed", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ @@ -45,7 +45,7 @@ "c:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } @@ -72,7 +72,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ @@ -109,7 +109,7 @@ "c:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "c:27017" diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json index a0f69de486..89f0740db9 100644 --- a/test/discovery_and_monitoring/rs/replicaset_rsnp.json +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/rs/response_from_removed.json b/test/discovery_and_monitoring/rs/response_from_removed.json index dd3562d7fc..f6dfe3b3a6 100644 --- a/test/discovery_and_monitoring/rs/response_from_removed.json +++ b/test/discovery_and_monitoring/rs/response_from_removed.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" @@ -36,7 +36,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/sec_not_auth.json b/test/discovery_and_monitoring/rs/sec_not_auth.json index 7d5e700035..4f448c9068 100644 --- a/test/discovery_and_monitoring/rs/sec_not_auth.json +++ b/test/discovery_and_monitoring/rs/sec_not_auth.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +22,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index 6d3033eeee..e3c262bff7 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +22,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json index 769e272a66..3c3634e6fa 100644 --- a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -27,7 +27,7 @@ "a:27017", "b:27017" ], - "ismaster": false, + "isWritablePrimary": false, "ok": 1, "setName": "rs", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json index 4c132b633e..be621c06c8 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json index 73cbab7c5d..eebc196447 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -41,7 +41,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 0500c6d157..59333b533a 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -46,7 +46,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json index 39a4f532dd..ce20d1a542 100644 --- a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json +++ b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -36,7 +36,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/too_new.json b/test/discovery_and_monitoring/rs/too_new.json index 945145af88..97de822036 100644 --- a/test/discovery_and_monitoring/rs/too_new.json +++ b/test/discovery_and_monitoring/rs/too_new.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +22,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json index 3f9eadc4bc..5dae8a2b35 100644 --- a/test/discovery_and_monitoring/rs/too_old.json +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -22,7 +22,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/topology_version_equal.json b/test/discovery_and_monitoring/rs/topology_version_equal.json index ba84e059a0..a2e81e338d 100644 --- a/test/discovery_and_monitoring/rs/topology_version_equal.json +++ b/test/discovery_and_monitoring/rs/topology_version_equal.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -52,7 +52,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/topology_version_greater.json b/test/discovery_and_monitoring/rs/topology_version_greater.json index afa8108ea2..c1bc773de4 100644 --- a/test/discovery_and_monitoring/rs/topology_version_greater.json +++ b/test/discovery_and_monitoring/rs/topology_version_greater.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -52,7 +52,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -101,7 +101,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "c:27017" @@ -150,7 +150,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "d:27017" @@ -184,7 +184,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "e:27017" diff --git a/test/discovery_and_monitoring/rs/topology_version_less.json b/test/discovery_and_monitoring/rs/topology_version_less.json index ae45f803d4..9376065646 100644 --- a/test/discovery_and_monitoring/rs/topology_version_less.json +++ b/test/discovery_and_monitoring/rs/topology_version_less.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -52,7 +52,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/unexpected_mongos.json b/test/discovery_and_monitoring/rs/unexpected_mongos.json index 95c7aa9dce..f74d298969 100644 --- a/test/discovery_and_monitoring/rs/unexpected_mongos.json +++ b/test/discovery_and_monitoring/rs/unexpected_mongos.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 16225d6b83..6e9c2370dc 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -54,7 +54,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -94,7 +94,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/rs/wrong_set_name.json b/test/discovery_and_monitoring/rs/wrong_set_name.json index 45be2f502b..cc4e3e963c 100644 --- a/test/discovery_and_monitoring/rs/wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/wrong_set_name.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "b:27017", diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json index 3dae1f7ea1..3f6df3e09e 100644 --- a/test/discovery_and_monitoring/sharded/compatible.json +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 1000 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json index 427889f8cc..b3d07f893f 100644 --- a/test/discovery_and_monitoring/sharded/discover_single_mongos.json +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json index 96f8dec17a..7a46adee51 100644 --- a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json +++ b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, @@ -19,7 +19,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, @@ -49,7 +49,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, @@ -60,7 +60,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/mongos_disconnect.json b/test/discovery_and_monitoring/sharded/mongos_disconnect.json index 04015694a8..f0f98648a7 100644 --- a/test/discovery_and_monitoring/sharded/mongos_disconnect.json +++ b/test/discovery_and_monitoring/sharded/mongos_disconnect.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -70,7 +70,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/multiple_mongoses.json b/test/discovery_and_monitoring/sharded/multiple_mongoses.json index 6e60fd05c7..7539836099 100644 --- a/test/discovery_and_monitoring/sharded/multiple_mongoses.json +++ b/test/discovery_and_monitoring/sharded/multiple_mongoses.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/sharded/non_mongos_removed.json b/test/discovery_and_monitoring/sharded/non_mongos_removed.json index 58cf7c07d7..a9c3a4b6a3 100644 --- a/test/discovery_and_monitoring/sharded/non_mongos_removed.json +++ b/test/discovery_and_monitoring/sharded/non_mongos_removed.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "b:27017" ], diff --git a/test/discovery_and_monitoring/sharded/too_new.json b/test/discovery_and_monitoring/sharded/too_new.json index 9521e11789..b4f9f14951 100644 --- a/test/discovery_and_monitoring/sharded/too_new.json +++ b/test/discovery_and_monitoring/sharded/too_new.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 999, "maxWireVersion": 1000 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid" } ] diff --git a/test/discovery_and_monitoring/sharded/too_old.json b/test/discovery_and_monitoring/sharded/too_old.json index 6bd187f61d..41ffed925d 100644 --- a/test/discovery_and_monitoring/sharded/too_old.json +++ b/test/discovery_and_monitoring/sharded/too_old.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid" } ] diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json index ee6b847ade..9c91ae1db1 100644 --- a/test/discovery_and_monitoring/single/compatible.json +++ b/test/discovery_and_monitoring/single/compatible.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json index afd5edc1d2..fcc2e83667 100644 --- a/test/discovery_and_monitoring/single/direct_connection_external_ip.json +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "b:27017" ], diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json index 9175049cc6..2cb39ca9bb 100644 --- a/test/discovery_and_monitoring/single/direct_connection_mongos.json +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json index c629a709be..a9fb7ef9fa 100644 --- a/test/discovery_and_monitoring/single/direct_connection_replicaset.json +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json index b07beb31ed..8adc36d44e 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "arbiterOnly": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json index 7216a13345..ccd89e279b 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json index 573036f2aa..588b3a88ad 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json index c53d76e76e..2ec2f575fe 100644 --- a/test/discovery_and_monitoring/single/direct_connection_standalone.json +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json index de0b4b2aa7..429bd561e2 100644 --- a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -36,7 +36,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json index eb6c6ae746..595a2d1fa9 100644 --- a/test/discovery_and_monitoring/single/discover_standalone.json +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/ls_timeout_standalone.json b/test/discovery_and_monitoring/single/ls_timeout_standalone.json index ae6c8ba11b..e48f6151f9 100644 --- a/test/discovery_and_monitoring/single/ls_timeout_standalone.json +++ b/test/discovery_and_monitoring/single/ls_timeout_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/not_ok_response.json b/test/discovery_and_monitoring/single/not_ok_response.json index 06f71305dc..0223459e68 100644 --- a/test/discovery_and_monitoring/single/not_ok_response.json +++ b/test/discovery_and_monitoring/single/not_ok_response.json @@ -1,5 +1,5 @@ { - "description": "Handle a not-ok ismaster response", + "description": "Handle a not-ok hello response", "uri": "mongodb://a", "phases": [ { @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } @@ -17,7 +17,7 @@ "a:27017", { "ok": 0, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/standalone_removed.json b/test/discovery_and_monitoring/single/standalone_removed.json index 4c363ffffb..b88ee89531 100644 --- a/test/discovery_and_monitoring/single/standalone_removed.json +++ b/test/discovery_and_monitoring/single/standalone_removed.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/direct_connection_slave.json b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json similarity index 79% rename from test/discovery_and_monitoring/single/direct_connection_slave.json rename to test/discovery_and_monitoring/single/standalone_using_legacy_hello.json index 720ec3dd82..46660fa8de 100644 --- a/test/discovery_and_monitoring/single/direct_connection_slave.json +++ b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json @@ -1,6 +1,6 @@ { - "description": "Direct connection to slave", - "uri": "mongodb://a/?directConnection=true", + "description": "Connect to standalone using legacy hello", + "uri": "mongodb://a", "phases": [ { "responses": [ @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "ismaster": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/single/too_new.json b/test/discovery_and_monitoring/single/too_new.json index 38e4621d60..5320c4a261 100644 --- a/test/discovery_and_monitoring/single/too_new.json +++ b/test/discovery_and_monitoring/single/too_new.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 999, "maxWireVersion": 1000 } diff --git a/test/discovery_and_monitoring/single/too_old.json b/test/discovery_and_monitoring/single/too_old.json index fbf68262c0..55ef82acb7 100644 --- a/test/discovery_and_monitoring/single/too_old.json +++ b/test/discovery_and_monitoring/single/too_old.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true + "isWritablePrimary": true } ] ], diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json index 7da46856fb..2bad1e40bf 100644 --- a/test/discovery_and_monitoring/single/too_old_then_upgraded.json +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true + "isWritablePrimary": true } ] ], @@ -31,7 +31,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring_integration/connectTimeoutMS.json b/test/discovery_and_monitoring_integration/connectTimeoutMS.json index b75eb58536..36a6dc4507 100644 --- a/test/discovery_and_monitoring_integration/connectTimeoutMS.json +++ b/test/discovery_and_monitoring_integration/connectTimeoutMS.json @@ -42,8 +42,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "connectTimeoutMS=0", "blockConnection": true, diff --git a/test/discovery_and_monitoring_integration/isMaster-command-error.json b/test/discovery_and_monitoring_integration/hello-command-error.json similarity index 94% rename from test/discovery_and_monitoring_integration/isMaster-command-error.json rename to test/discovery_and_monitoring_integration/hello-command-error.json index 0567dd3323..05a93e751c 100644 --- a/test/discovery_and_monitoring_integration/isMaster-command-error.json +++ b/test/discovery_and_monitoring_integration/hello-command-error.json @@ -5,7 +5,7 @@ } ], "database_name": "sdam-tests", - "collection_name": "isMaster-command-error", + "collection_name": "hello-command-error", "data": [], "tests": [ { @@ -17,8 +17,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "commandErrorHandshakeTest", "closeConnection": false, @@ -59,7 +59,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-command-error", + "insert": "hello-command-error", "documents": [ { "_id": 1 @@ -121,8 +121,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "commandErrorCheckTest", "closeConnection": false, @@ -184,7 +184,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-command-error", + "insert": "hello-command-error", "documents": [ { "_id": 1 @@ -201,7 +201,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-command-error", + "insert": "hello-command-error", "documents": [ { "_id": 3 diff --git a/test/discovery_and_monitoring_integration/isMaster-network-error.json b/test/discovery_and_monitoring_integration/hello-network-error.json similarity index 93% rename from test/discovery_and_monitoring_integration/isMaster-network-error.json rename to test/discovery_and_monitoring_integration/hello-network-error.json index 617fc74dbc..b699363923 100644 --- a/test/discovery_and_monitoring_integration/isMaster-network-error.json +++ b/test/discovery_and_monitoring_integration/hello-network-error.json @@ -5,7 +5,7 @@ } ], "database_name": "sdam-tests", - "collection_name": "isMaster-network-error", + "collection_name": "hello-network-error", "data": [], "tests": [ { @@ -17,8 +17,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "networkErrorHandshakeTest", "closeConnection": true @@ -58,7 +58,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-network-error", + "insert": "hello-network-error", "documents": [ { "_id": 1 @@ -120,8 +120,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "networkErrorCheckTest", "closeConnection": true @@ -164,7 +164,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-network-error", + "insert": "hello-network-error", "documents": [ { "_id": 1 @@ -181,7 +181,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-network-error", + "insert": "hello-network-error", "documents": [ { "_id": 3 diff --git a/test/discovery_and_monitoring_integration/isMaster-timeout.json b/test/discovery_and_monitoring_integration/hello-timeout.json similarity index 95% rename from test/discovery_and_monitoring_integration/isMaster-timeout.json rename to test/discovery_and_monitoring_integration/hello-timeout.json index d37e7ee687..7bdc61a912 100644 --- a/test/discovery_and_monitoring_integration/isMaster-timeout.json +++ b/test/discovery_and_monitoring_integration/hello-timeout.json @@ -5,7 +5,7 @@ } ], "database_name": "sdam-tests", - "collection_name": "isMaster-timeout", + "collection_name": "hello-timeout", "data": [], "tests": [ { @@ -17,8 +17,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "timeoutMonitorHandshakeTest", "blockConnection": true, @@ -59,7 +59,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-timeout", + "insert": "hello-timeout", "documents": [ { "_id": 1 @@ -121,8 +121,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "timeoutMonitorCheckTest", "blockConnection": true, @@ -182,7 +182,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-timeout", + "insert": "hello-timeout", "documents": [ { "_id": 1 @@ -199,7 +199,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-timeout", + "insert": "hello-timeout", "documents": [ { "_id": 3 @@ -298,7 +298,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-timeout", + "insert": "hello-timeout", "documents": [ { "_id": 1 @@ -315,7 +315,7 @@ { "command_started_event": { "command": { - "insert": "isMaster-timeout", + "insert": "hello-timeout", "documents": [ { "_id": 3 diff --git a/test/discovery_and_monitoring_integration/minPoolSize-error.json b/test/discovery_and_monitoring_integration/minPoolSize-error.json index 66f310ce72..9f8e4f6f8b 100644 --- a/test/discovery_and_monitoring_integration/minPoolSize-error.json +++ b/test/discovery_and_monitoring_integration/minPoolSize-error.json @@ -17,8 +17,8 @@ }, "data": { "failCommands": [ - "isMaster", - "hello" + "hello", + "isMaster" ], "appName": "SDAMminPoolSizeError", "closeConnection": true diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json index c3ab59834f..caf31b3391 100644 --- a/test/sdam_monitoring/discovered_standalone.json +++ b/test/sdam_monitoring/discovered_standalone.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 4 } diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json index 33010d49fb..768aa7a3e1 100644 --- a/test/sdam_monitoring/replica_set_with_no_primary.json +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": false, + "helloOk": true, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "setVersion": 1, diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json index 04caeba652..da66403541 100644 --- a/test/sdam_monitoring/replica_set_with_primary.json +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -8,7 +8,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "setVersion": 1, "primary": "a:27017", diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json index 3cad92d6b8..16941021a3 100644 --- a/test/sdam_monitoring/replica_set_with_removal.json +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -60,7 +60,8 @@ "a:27017", { "ok": 1, - "ismaster": true, + "helloOk": true, + "isWritablePrimary": true, "setName": "rs", "setVersion": 1, "primary": "a:27017", @@ -75,7 +76,8 @@ "b:27017", { "ok": 1, - "ismaster": true + "helloOk": true, + "isWritablePrimary": true } ] ], diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json index 0f64bde118..d86b1dfcc6 100644 --- a/test/sdam_monitoring/required_replica_set.json +++ b/test/sdam_monitoring/required_replica_set.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "setVersion": 1, "primary": "a:27017", diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 3ff10f820f..69a100f454 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 4 } diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json index ceab1449cc..1771f85fc0 100644 --- a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 4 } @@ -17,7 +17,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 4 } From 853155dd6a05f6bfe650e51066dc5e7575078983 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 30 Jun 2021 13:27:06 -0700 Subject: [PATCH 0391/2111] PYTHON-2790 Fix doctest setup in raw_bson --- bson/raw_bson.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 4ee0394ad4..339354d7dd 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -19,18 +19,14 @@ Example: Moving a document between different databases/collections -.. testsetup:: - from pymongo import MongoClient - client = MongoClient(document_class=RawBSONDocument) - client.drop_database('db') - client.drop_database('replica_db') - .. doctest:: >>> import bson >>> from pymongo import MongoClient >>> from bson.raw_bson import RawBSONDocument >>> client = MongoClient(document_class=RawBSONDocument) + >>> client.drop_database('db') + >>> client.drop_database('replica_db') >>> db = client.db >>> result = db.test.insert_many([{'a': 1}, ... {'b': 1}, From b823b95de1de494386f5d7cd7266a91a31d784f1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Jun 2021 18:31:00 -0700 Subject: [PATCH 0392/2111] PYTHON-2791 Ignore erroneous serviceId field for non-LB connections (#663) --- pymongo/pool.py | 5 +++++ pymongo/topology_description.py | 2 +- test/test_topology.py | 28 +++++++++++++++++++++++++--- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index acce1c1ec2..654dbf3ab0 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -616,6 +616,8 @@ def _ismaster(self, cluster_time, topology_version, if self.opts.load_balanced and _MOCK_SERVICE_ID: process_id = doc.get('topologyVersion', {}).get('processId') doc.setdefault('serviceId', process_id) + if not self.opts.load_balanced: + doc.pop('serviceId', None) ismaster = IsMaster(doc, awaitable=awaitable) self.is_writable = ismaster.is_writable self.max_wire_version = ismaster.max_wire_version @@ -653,6 +655,9 @@ def _next_reply(self): unpacked_docs = reply.unpack_response() response_doc = unpacked_docs[0] helpers._check_command_response(response_doc, self.max_wire_version) + # Remove after PYTHON-2712. + if not self.opts.load_balanced: + response_doc.pop('serviceId', None) return response_doc def command(self, dbname, spec, slave_ok=False, diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 1c5a1f456e..4282c4ff31 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -361,7 +361,7 @@ def updated_topology_description(topology_description, server_description): topology_description._topology_settings) if topology_type == TOPOLOGY_TYPE.Unknown: - if server_type == SERVER_TYPE.Standalone: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): if len(topology_description._topology_settings.seeds) == 1: topology_type = TOPOLOGY_TYPE.Single else: diff --git a/test/test_topology.py b/test/test_topology.py index f3db8cbde7..9a4bf512cd 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -18,6 +18,8 @@ sys.path[0:0] = [""] +from bson.objectid import ObjectId + from pymongo import common from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_type import SERVER_TYPE @@ -276,7 +278,7 @@ def test_readable_writable(self): 'setName': 'rs', 'hosts': ['a', 'b']}) - self.assertTrue( + self.assertEqual( t.description.topology_type_name, 'ReplicaSetWithPrimary') self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) @@ -301,7 +303,7 @@ def test_readable_writable(self): 'setName': 'rs', 'hosts': ['a', 'b']}) - self.assertTrue( + self.assertEqual( t.description.topology_type_name, 'ReplicaSetNoPrimary') self.assertFalse(t.description.has_writable_server()) self.assertFalse(t.description.has_readable_server()) @@ -326,7 +328,7 @@ def test_readable_writable(self): 'hosts': ['a', 'b'], 'tags': {'tag': 'exists'}}) - self.assertTrue( + self.assertEqual( t.description.topology_type_name, 'ReplicaSetWithPrimary') self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) @@ -621,6 +623,26 @@ def test_topology_repr(self): "]>" % (t._topology_id,)) + def test_unexpected_load_balancer(self): + # Note: This behavior should not be reachable in practice but we + # should handle it gracefully nonetheless. See PYTHON-2791. + # Load balancers are included in topology with a single seed. + t = create_mock_topology(seeds=['a']) + mock_lb_response = {'ok': 1, 'msg': 'isdbgrid', + 'serviceId': ObjectId(), 'maxWireVersion': 13} + got_ismaster(t, ('a', 27017), mock_lb_response) + sds = t.description.server_descriptions() + self.assertIn(('a', 27017), sds) + self.assertEqual(sds[('a', 27017)].server_type_name, 'LoadBalancer') + self.assertEqual(t.description.topology_type_name, 'Single') + self.assertTrue(t.description.has_writable_server()) + + # Load balancers are removed from a topology with multiple seeds. + t = create_mock_topology(seeds=['a', 'b']) + got_ismaster(t, ('a', 27017), mock_lb_response) + self.assertNotIn(('a', 27017), t.description.server_descriptions()) + self.assertEqual(t.description.topology_type_name, 'Unknown') + def wait_for_master(topology): """Wait for a Topology to discover a writable server. From 0e0c4fd9443723e5c613a5964143528ee4ffe13d Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 30 Jun 2021 19:14:22 -0700 Subject: [PATCH 0393/2111] PYTHON-2389 Add session support to find_raw_batches and aggregate_raw_batches (#658) --- pymongo/collection.py | 38 +++++------- pymongo/message.py | 2 + test/test_cursor.py | 141 ++++++++++++++++++++++++++++++++++++++++++ test/test_session.py | 16 ++--- 4 files changed, 164 insertions(+), 33 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 64ec74b750..2fa0d92886 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1484,21 +1484,16 @@ def find_raw_batches(self, *args, **kwargs): >>> for batch in cursor: ... print(bson.decode_all(batch)) - .. note:: find_raw_batches does not support sessions or auto - encryption. + .. note:: find_raw_batches does not support auto encryption. .. versionchanged:: 3.12 Instead of ignoring the user-specified read concern, this method now sends it to the server when connected to MongoDB 3.6+. + Added session support. + .. versionadded:: 3.6 """ - # OP_MSG with document stream returns is required to support - # sessions. - if "session" in kwargs: - raise ConfigurationError( - "find_raw_batches does not support sessions") - # OP_MSG is required to support encryption. if self.__database.client._encrypter: raise InvalidOperation( @@ -2256,7 +2251,7 @@ def aggregate(self, pipeline, session=None, **kwargs): explicit_session=session is not None, **kwargs) - def aggregate_raw_batches(self, pipeline, **kwargs): + def aggregate_raw_batches(self, pipeline, session=None, **kwargs): """Perform an aggregation and retrieve batches of raw BSON. Similar to the :meth:`aggregate` method but returns a @@ -2273,28 +2268,25 @@ def aggregate_raw_batches(self, pipeline, **kwargs): >>> for batch in cursor: ... print(bson.decode_all(batch)) - .. note:: aggregate_raw_batches does not support sessions or auto - encryption. + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. .. versionadded:: 3.6 """ - # OP_MSG with document stream returns is required to support - # sessions. - if "session" in kwargs: - raise ConfigurationError( - "aggregate_raw_batches does not support sessions") - # OP_MSG is required to support encryption. if self.__database.client._encrypter: raise InvalidOperation( "aggregate_raw_batches does not support auto encryption") - return self._aggregate(_CollectionRawAggregationCommand, - pipeline, - RawBatchCommandCursor, - session=None, - explicit_session=False, - **kwargs) + with self.__database.client._tmp_session(session, close=False) as s: + return self._aggregate(_CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + explicit_session=session is not None, + **kwargs) def watch(self, pipeline=None, full_document=None, resume_after=None, max_await_time_ms=None, batch_size=None, collation=None, diff --git a/pymongo/message.py b/pymongo/message.py index 13cee23cd3..79b56ec1a0 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -468,6 +468,8 @@ def use_command(self, sock_info): class _RawBatchGetMore(_GetMore): def use_command(self, sock_info): + # Compatibility checks. + super(_RawBatchGetMore, self).use_command(sock_info) if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True diff --git a/test/test_cursor.py b/test/test_cursor.py index dc0be8f688..021e4d7cb4 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -27,6 +27,7 @@ from bson import decode_all from bson.code import Code +from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo import (ASCENDING, DESCENDING, @@ -44,6 +45,7 @@ unittest, IntegrationTest) from test.utils import (EventListener, + OvertCommandListener, ignore_deprecations, rs_or_single_client, WhiteListEventListener) @@ -1466,6 +1468,76 @@ def test_find_raw(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) + @client_context.require_transactions + def test_find_raw_transaction(self): + c = self.db.test + c.drop() + docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + with client.start_session() as session: + with session.start_transaction(): + batches = list(client[self.db.name].test.find_raw_batches( + session=session).sort('_id')) + cmd = listener.results['started'][0] + self.assertEqual(cmd.command_name, 'find') + self.assertEqual(cmd.command['$clusterTime'], + decode_all(session.cluster_time.raw)[0]) + self.assertEqual(cmd.command['startTransaction'], True) + self.assertEqual(cmd.command['txnNumber'], 1) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_sessions + @client_context.require_failCommand_fail_point + def test_find_raw_retryable_reads(self): + c = self.db.test + c.drop() + docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], + retryReads=True) + with self.fail_point({ + 'mode': {'times': 1}, 'data': {'failCommands': ['find'], + 'closeConnection': True}}): + batches = list( + client[self.db.name].test.find_raw_batches().sort('_id')) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.results['started']), 2) + for cmd in listener.results['started']: + self.assertEqual(cmd.command_name, 'find') + + @client_context.require_version_min(5, 0, 0) + @client_context.require_no_standalone + def test_find_raw_snapshot_reads(self): + c = self.db.get_collection( + "test", write_concern=WriteConcern(w="majority")) + c.drop() + docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], + retryReads=True) + db = client[self.db.name] + with client.start_session(snapshot=True) as session: + db.test.distinct('x', {}, session=session) + batches = list(db.test.find_raw_batches( + session=session).sort('_id')) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.results['started'][1].command + self.assertEqual(find_cmd['readConcern']['level'], 'snapshot') + self.assertIsNotNone(find_cmd['readConcern']['atClusterTime']) + def test_explain(self): c = self.db.test c.insert_one({}) @@ -1590,6 +1662,75 @@ def test_aggregate_raw(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) + @client_context.require_transactions + def test_aggregate_raw_transaction(self): + c = self.db.test + c.drop() + docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + with client.start_session() as session: + with session.start_transaction(): + batches = list(client[self.db.name].test.aggregate_raw_batches( + [{'$sort': {'_id': 1}}], session=session)) + cmd = listener.results['started'][0] + self.assertEqual(cmd.command_name, 'aggregate') + self.assertEqual(cmd.command['$clusterTime'], session.cluster_time) + self.assertEqual(cmd.command['startTransaction'], True) + self.assertEqual(cmd.command['txnNumber'], 1) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @client_context.require_sessions + @client_context.require_failCommand_fail_point + def test_aggregate_raw_retryable_reads(self): + c = self.db.test + c.drop() + docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], + retryReads=True) + with self.fail_point({ + 'mode': {'times': 1}, 'data': {'failCommands': ['aggregate'], + 'closeConnection': True}}): + batches = list(client[self.db.name].test.aggregate_raw_batches( + [{'$sort': {'_id': 1}}])) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.results['started']), 3) + cmds = listener.results['started'] + self.assertEqual(cmds[0].command_name, 'aggregate') + self.assertEqual(cmds[1].command_name, 'aggregate') + + @client_context.require_version_min(5, 0, -1) + @client_context.require_no_standalone + def test_aggregate_raw_snapshot_reads(self): + c = self.db.get_collection( + "test", write_concern=WriteConcern(w="majority")) + c.drop() + docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + c.insert_many(docs) + + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener], + retryReads=True) + db = client[self.db.name] + with client.start_session(snapshot=True) as session: + db.test.distinct('x', {}, session=session) + batches = list(db.test.aggregate_raw_batches( + [{'$sort': {'_id': 1}}], session=session)) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.results['started'][1].command + self.assertEqual(find_cmd['readConcern']['level'], 'snapshot') + self.assertIsNotNone(find_cmd['readConcern']['atClusterTime']) + def test_server_error(self): c = self.db.test c.drop() diff --git a/test/test_session.py b/test/test_session.py index ab78e30f89..41837ab217 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -824,6 +824,12 @@ def test_reads(self): lambda coll, session: coll.count_documents({}, session=session)) self._test_reads( lambda coll, session: coll.distinct('foo', session=session)) + self._test_reads( + lambda coll, session: list(coll.aggregate_raw_batches( + [], session=session))) + self._test_reads( + lambda coll, session: list(coll.find_raw_batches( + {}, session=session))) # SERVER-40938 removed support for casually consistent mapReduce. map_reduce_exc = None @@ -841,16 +847,6 @@ def test_reads(self): 'function() {}', 'function() {}', session=session), exception=map_reduce_exc) - self.assertRaises( - ConfigurationError, - self._test_reads, - lambda coll, session: list( - coll.aggregate_raw_batches([], session=session))) - self.assertRaises( - ConfigurationError, - self._test_reads, - lambda coll, session: list( - coll.find_raw_batches({}, session=session))) self.assertRaises( ConfigurationError, self._test_reads, From fd845654fbe5391fcd17995349e740ba7cf93cea Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Jun 2021 20:01:43 -0700 Subject: [PATCH 0394/2111] PYTHON-2776 Disable writes and other unsupported operations in snapshot reads (#660) Rely on the server to report an error for unsupported snapshot read operations by sending readConcern with all commands, even writes. --- pymongo/client_session.py | 3 + .../snapshot-sessions-unsupported-ops.json | 493 ++++++++++++++++++ test/sessions/unified/snapshot-sessions.json | 58 ++- test/unified_format.py | 2 +- 4 files changed, 553 insertions(+), 3 deletions(-) create mode 100644 test/sessions/unified/snapshot-sessions-unsupported-ops.json diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 5c2fdd9fa1..ef85fcc632 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -869,6 +869,9 @@ def _txn_read_preference(self): def _apply_to(self, command, is_retryable, read_preference, sock_info): self._check_ended() + if self.options.snapshot: + self._update_read_concern(command, sock_info) + self._server_session.last_use = time.monotonic() command['lsid'] = self._server_session.session_id diff --git a/test/sessions/unified/snapshot-sessions-unsupported-ops.json b/test/sessions/unified/snapshot-sessions-unsupported-ops.json new file mode 100644 index 0000000000..1021b7f264 --- /dev/null +++ b/test/sessions/unified/snapshot-sessions-unsupported-ops.json @@ -0,0 +1,493 @@ +{ + "description": "snapshot-sessions-unsupported-ops", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "session": { + "id": "session0", + "client": "client0", + "sessionOptions": { + "snapshot": true + } + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Server returns an error on insertOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 22, + "x": 22 + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on insertMany with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 22, + "x": 22 + }, + { + "_id": 33, + "x": 33 + } + ] + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on deleteOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on updateOne with snapshot", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on findOneAndUpdate with snapshot", + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listDatabases with snapshot", + "operations": [ + { + "name": "listDatabases", + "object": "client0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listCollections with snapshot", + "operations": [ + { + "name": "listCollections", + "object": "database0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on listIndexes with snapshot", + "operations": [ + { + "name": "listIndexes", + "object": "collection0", + "arguments": { + "session": "session0" + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "Server returns an error on runCommand with snapshot", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "listCollections", + "command": { + "listCollections": 1 + } + }, + "expectError": { + "isError": true, + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/unified/snapshot-sessions.json b/test/sessions/unified/snapshot-sessions.json index 4170a96699..75b577b039 100644 --- a/test/sessions/unified/snapshot-sessions.json +++ b/test/sessions/unified/snapshot-sessions.json @@ -655,6 +655,62 @@ } ] }, + { + "description": "countDocuments operation with snapshot", + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": 2 + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": false + } + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "readConcern": { + "level": "snapshot", + "atClusterTime": { + "$$exists": true + } + } + } + } + } + ] + } + ] + }, { "description": "Mixed operation with snapshot", "operations": [ @@ -813,7 +869,6 @@ "name": "insertOne", "object": "collection0", "arguments": { - "session": "session0", "document": { "_id": 22, "x": 33 @@ -827,7 +882,6 @@ "filter": { "_id": 1 }, - "session": "session0", "update": { "$inc": { "x": 1 diff --git a/test/unified_format.py b/test/unified_format.py index 353e8753c4..8623ffb0ff 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -556,7 +556,7 @@ def match_event(self, event_type, expectation, actual): if actual.command_name == 'update': # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into expectations. - for update in command['updates']: + for update in command.get('updates', []): update.setdefault('upsert', False) update.setdefault('multi', False) self.match_result(command, actual.command) From a14212564025ca7bb8130a4784dff258e348d023 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 6 Jul 2021 11:58:30 -0700 Subject: [PATCH 0395/2111] PYTHON-2775 Add docs for snapshot reads (#662) --- pymongo/client_session.py | 44 ++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index ef85fcc632..1c2709b32d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -44,6 +44,8 @@ Transactions ============ +.. versionadded:: 3.7 + MongoDB 4.0 adds support for transactions on replica set primaries. A transaction is associated with a :class:`ClientSession`. To start a transaction on a session, use :meth:`ClientSession.start_transaction` in a with-statement. @@ -76,23 +78,55 @@ A session may only have a single active transaction at a time, multiple transactions on the same session can be executed in sequence. -.. versionadded:: 3.7 - Sharded Transactions ^^^^^^^^^^^^^^^^^^^^ +.. versionadded:: 3.9 + PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB -4.2. Sharded transactions have the same API as replica set transactions. +>=4.2. Sharded transactions have the same API as replica set transactions. When running a transaction against a sharded cluster, the session is pinned to the mongos server selected for the first operation in the transaction. All subsequent operations that are part of the same transaction are routed to the same mongos server. When the transaction is completed, by running either commitTransaction or abortTransaction, the session is unpinned. -.. versionadded:: 3.9 - .. mongodoc:: transactions +Snapshot Reads +============== + +.. versionadded:: 3.12 + +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.mongo_client.MongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + with client.start_session(snapshot=True) as session: + order = orders.find_one({"sku": "abc123"}, session=session) + inventory = inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.collection.Collection.find` +- :meth:`~pymongo.collection.Collection.find_one` +- :meth:`~pymongo.collection.Collection.aggregate` +- :meth:`~pymongo.collection.Collection.count_documents` +- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) + Classes ======= """ From 907bb7e3dc8956980ce19a2d8775847d84859a6e Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 6 Jul 2021 13:43:14 -0700 Subject: [PATCH 0396/2111] PYTHON-2794 Fix up dots and dollars spec tests --- .../unified/insertMany-dots_and_dollars.json | 36 +++++++------ .../unified/insertOne-dots_and_dollars.json | 50 +++++++++++-------- .../valid-pass/poc-crud.json | 19 ++++--- .../valid-pass/poc-retryable-writes.json | 3 -- test/unified_format.py | 6 +-- 5 files changed, 62 insertions(+), 52 deletions(-) diff --git a/test/crud/unified/insertMany-dots_and_dollars.json b/test/crud/unified/insertMany-dots_and_dollars.json index 3b66ac0621..eed8997df9 100644 --- a/test/crud/unified/insertMany-dots_and_dollars.json +++ b/test/crud/unified/insertMany-dots_and_dollars.json @@ -53,10 +53,11 @@ ] }, "expectResult": { - "insertedCount": 1, - "insertedIds": { - "$$unsetOrMatches": { - "0": 1 + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } } } } @@ -162,10 +163,11 @@ ] }, "expectResult": { - "insertedCount": 1, - "insertedIds": { - "$$unsetOrMatches": { - "0": 1 + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } } } } @@ -221,10 +223,11 @@ ] }, "expectResult": { - "insertedCount": 1, - "insertedIds": { - "$$unsetOrMatches": { - "0": 1 + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } } } } @@ -284,10 +287,11 @@ ] }, "expectResult": { - "insertedCount": 1, - "insertedIds": { - "$$unsetOrMatches": { - "0": 1 + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } } } } diff --git a/test/crud/unified/insertOne-dots_and_dollars.json b/test/crud/unified/insertOne-dots_and_dollars.json index 1a30df4a02..fdc17af2e8 100644 --- a/test/crud/unified/insertOne-dots_and_dollars.json +++ b/test/crud/unified/insertOne-dots_and_dollars.json @@ -63,9 +63,10 @@ } }, "expectResult": { - "insertedCount": 1, - "insertedId": { - "$$unsetOrMatches": 1 + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } } } } @@ -166,9 +167,10 @@ } }, "expectResult": { - "insertedCount": 1, - "insertedId": { - "$$unsetOrMatches": 1 + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } } } } @@ -221,9 +223,10 @@ } }, "expectResult": { - "insertedCount": 1, - "insertedId": { - "$$unsetOrMatches": 1 + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } } } } @@ -280,9 +283,10 @@ } }, "expectResult": { - "insertedCount": 1, - "insertedId": { - "$$unsetOrMatches": 1 + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } } } } @@ -390,10 +394,11 @@ } }, "expectResult": { - "insertedCount": 1, - "insertedId": { - "$$unsetOrMatches": { - "a.b": 1 + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": { + "a.b": 1 + } } } } @@ -501,9 +506,10 @@ } }, "expectResult": { - "insertedCount": 1, - "insertedId": { - "$$unsetOrMatches": 1 + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } } } } @@ -564,8 +570,10 @@ } }, "expectResult": { - "acknowledged": { - "$$unsetOrMatches": false + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } } } } diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json index 2ed86d6150..7bb072de88 100644 --- a/test/unified-test-format/valid-pass/poc-crud.json +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -1,6 +1,6 @@ { "description": "poc-crud", - "schemaVersion": "1.0", + "schemaVersion": "1.4", "createEntities": [ { "client": { @@ -242,12 +242,14 @@ }, "expectError": { "expectResult": { - "deletedCount": 0, - "insertedCount": 2, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} + "$$unsetOrMatches": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } } } } @@ -406,7 +408,8 @@ "description": "Aggregate with $listLocalSessions", "runOnRequirements": [ { - "minServerVersion": "3.6.0" + "minServerVersion": "3.6.0", + "serverless": "forbid" } ], "operations": [ diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json index 30c1d54152..50160799f3 100644 --- a/test/unified-test-format/valid-pass/poc-retryable-writes.json +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -298,9 +298,6 @@ }, "expectResult": { "$$unsetOrMatches": { - "insertedCount": { - "$$unsetOrMatches": 2 - }, "insertedIds": { "$$unsetOrMatches": { "0": 3, diff --git a/test/unified_format.py b/test/unified_format.py index 8623ffb0ff..e402e20aed 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -612,11 +612,9 @@ def coerce_result(opname, result): if opname == 'bulkWrite': return parse_bulk_write_result(result) if opname == 'insertOne': - return {'insertedId': result.inserted_id, 'insertedCount': 1} + return {'insertedId': result.inserted_id} if opname == 'insertMany': - res = {idx: _id for idx, _id in enumerate(result.inserted_ids)} - res['insertedCount'] = len(result.inserted_ids) - return res + return {idx: _id for idx, _id in enumerate(result.inserted_ids)} if opname in ('deleteOne', 'deleteMany'): return {'deletedCount': result.deleted_count} if opname in ('updateOne', 'updateMany', 'replaceOne'): From dde28d78cb1d7bf628702a1776aae7b1170c0618 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 6 Jul 2021 15:23:47 -0700 Subject: [PATCH 0397/2111] PYTHON-2393 Document unicode error handler for MongoClient --- pymongo/mongo_client.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ceb9e5140b..ee08ed3efc 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -189,9 +189,6 @@ def __init__( - `port` (optional): port number on which to connect - `document_class` (optional): default class to use for documents returned from queries on this client - - `type_registry` (optional): instance of - :class:`~bson.codec_options.TypeRegistry` to enable encoding - and decoding of custom types. - `tz_aware` (optional): if ``True``, :class:`~datetime.datetime` instances returned as values in a document by this :class:`MongoClient` will be timezone @@ -199,15 +196,18 @@ def __init__( - `connect` (optional): if ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect on the first operation. + - `type_registry` (optional): instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + + | **Other optional parameters can be passed as keyword arguments:** + - `directConnection` (optional): if ``True``, forces this client to connect directly to the specified MongoDB host as a standalone. If ``false``, the client connects to the entire replica set of which the given MongoDB host(s) is a part. If this is ``True`` and a mongodb+srv:// URI or a URI containing multiple seeds is provided, an exception will be raised. - - | **Other optional parameters can be passed as keyword arguments:** - - `maxPoolSize` (optional): The maximum allowable number of concurrent connections to each connected server. Requests to a server will block if there are `maxPoolSize` outstanding @@ -338,6 +338,10 @@ def __init__( `csharpLegacy`, `standard` and `unspecified`. New applications should consider setting this to `standard` for cross language compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', and 'ignore'. Defaults to 'strict'. | **Write Concern options:** | (Only set if passed. No default values.) From 8675dc0ea104bf1713f3efc8430be5441497b143 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 7 Jul 2021 15:24:49 -0700 Subject: [PATCH 0398/2111] PYTHON-2799 Use namespace returned from initial command response for killCursors (#666) --- pymongo/command_cursor.py | 2 +- pymongo/cursor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index a317bd55b2..be10ffb42a 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -74,7 +74,7 @@ def __die(self, synchronous=False): if self.__id and not already_killed: cursor_id = self.__id address = _CursorAddress( - self.__address, self.__collection.full_name) + self.__address, self.__ns) else: # Skip killCursors. cursor_id = 0 diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 2815a1a276..2f6550d52c 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -346,7 +346,7 @@ def __die(self, synchronous=False): if self.__id and not already_killed: cursor_id = self.__id address = _CursorAddress( - self.__address, self.__collection.full_name) + self.__address, "%s.%s" % (self.__dbname, self.__collname)) else: # Skip killCursors. cursor_id = 0 From 00ed2321bacd16b6ecdabd32549f5e4db9b19ea0 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 7 Jul 2021 23:55:52 -0700 Subject: [PATCH 0399/2111] PYTHON-2475 Implement Atlas Data Lake prose specification tests (#665) * PYTHON-2475 Add prose specification tests for Atlas Data Lake * add prose tests * Update evergreen config to bootstrap ADL * add sleep before connecting to ADL * print buildinfo * print buildInfo in conditional block * refactor skiplogic * remove sleep * fix debugging code * Ensure ADL tests run * ensure suite fails if not connected * fix test failure * improve data_lake variable extraction * review changes --- .evergreen/config.yml | 6 ++-- test/__init__.py | 7 +++++ test/test_data_lake.py | 70 +++++++++++++++++++++++++++++++++++++++--- 3 files changed, 75 insertions(+), 8 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 5ad93283cb..ed5b99b7e1 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -310,8 +310,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake - DRIVERS_TOOLS="${DRIVERS_TOOLS}" sh build-mongohouse-local.sh + sh ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec type: setup params: @@ -319,8 +318,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake - DRIVERS_TOOLS="${DRIVERS_TOOLS}" sh run-mongohouse-local.sh + sh ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-local.sh "stop mongo-orchestration": - command: shell.exec diff --git a/test/__init__.py b/test/__init__.py index 28c46918f7..fa03de3263 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -535,6 +535,13 @@ def require_connection(self, func): "Cannot connect to MongoDB on %s" % (self.pair,), func=func) + def require_data_lake(self, func): + """Run a test only if we are connected to Atlas Data Lake.""" + return self._require( + lambda: self.is_data_lake, + "Not connected to Atlas Data Lake on %s" % (self.pair,), + func=func) + def require_no_mmap(self, func): """Run a test only if the server is not using the MMAPv1 storage engine. Only works for standalone and replica sets; tests are diff --git a/test/test_data_lake.py b/test/test_data_lake.py index d762f11217..2954efe651 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -19,9 +19,11 @@ sys.path[0:0] = [""] -from test import client_context, unittest +from pymongo.auth import MECHANISMS +from test import client_context, unittest, IntegrationTest from test.crud_v2_format import TestCrudV2 -from test.utils import TestCreator +from test.utils import ( + rs_client_noauth, rs_or_single_client, OvertCommandListener, TestCreator) # Location of JSON test specifications. @@ -29,14 +31,74 @@ os.path.dirname(os.path.realpath(__file__)), "data_lake") +class TestDataLakeMustConnect(IntegrationTest): + def test_connected_to_data_lake(self): + data_lake = os.environ.get('DATA_LAKE') + if not data_lake: + self.skipTest('DATA_LAKE is not set') + + self.assertTrue(client_context.is_data_lake, + 'client context.is_data_lake must be True when ' + 'DATA_LAKE is set') + + +class TestDataLakeProse(IntegrationTest): + # Default test database and collection names. + TEST_DB = 'test' + TEST_COLLECTION = 'driverdata' + + @classmethod + @client_context.require_data_lake + def setUpClass(cls): + super(TestDataLakeProse, cls).setUpClass() + + # Test killCursors + def test_1(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + cursor = client[self.TEST_DB][self.TEST_COLLECTION].find( + {}, batch_size=2) + next(cursor) + + # find command assertions + find_cmd = listener.results["succeeded"][-1] + self.assertEqual(find_cmd.command_name, "find") + cursor_id = find_cmd.reply["cursor"]["id"] + cursor_ns = find_cmd.reply["cursor"]["ns"] + + # killCursors command assertions + cursor.close() + started = listener.results["started"][-1] + self.assertEqual(started.command_name, 'killCursors') + succeeded = listener.results["succeeded"][-1] + self.assertEqual(succeeded.command_name, 'killCursors') + + self.assertIn(cursor_id, started.command["cursors"]) + target_ns = ".".join([started.command['$db'], + started.command['killCursors']]) + self.assertEqual(cursor_ns, target_ns) + + self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) + + # Test no auth + def test_2(self): + client = rs_client_noauth() + client.admin.command('ping') + + # Test with auth + def test_3(self): + for mechanism in ['SCRAM-SHA-1', 'SCRAM-SHA-256']: + client = rs_or_single_client(authMechanism=mechanism) + client[self.TEST_DB][self.TEST_COLLECTION].find_one() + + class DataLakeTestSpec(TestCrudV2): # Default test database and collection names. TEST_DB = 'test' TEST_COLLECTION = 'driverdata' @classmethod - @unittest.skipUnless(client_context.is_data_lake, - 'Not connected to Atlas Data Lake') + @client_context.require_data_lake def setUpClass(cls): super(DataLakeTestSpec, cls).setUpClass() From c8d920a46bfb7b054326b3e983943bfc794cb676 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 7 Jul 2021 17:04:46 -0700 Subject: [PATCH 0400/2111] PYTHON-2795 Improve host parsing and error messages --- pymongo/mongo_client.py | 5 ++++- pymongo/srv_resolver.py | 15 +++++++++++++-- test/test_client.py | 14 ++++++++++++++ test/test_dns.py | 12 ++++++++++-- 4 files changed, 41 insertions(+), 5 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ee08ed3efc..5a4df213df 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -650,7 +650,10 @@ def __init__( opts = common._CaseInsensitiveDictionary() fqdn = None for entity in host: - if "://" in entity: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: # Determine connection timeout from kwargs. timeout = keyword_opts.get("connecttimeoutms") if timeout is not None: diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 42be08a4d3..c53ce378e3 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -14,6 +14,8 @@ """Support for resolving hosts and options from mongodb+srv:// URIs.""" +import ipaddress + try: from dns import resolver _HAVE_DNSPYTHON = True @@ -40,6 +42,9 @@ def _resolve(*args, **kwargs): # dnspython 1.X return resolver.query(*args, **kwargs) +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?") class _SrvResolver(object): def __init__(self, fqdn, connect_timeout=None): @@ -47,13 +52,19 @@ def __init__(self, fqdn, connect_timeout=None): self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + try: self.__plist = self.__fqdn.split(".")[1:] except Exception: - raise ConfigurationError("Invalid URI host: %s" % (fqdn,)) + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) self.__slen = len(self.__plist) if self.__slen < 2: - raise ConfigurationError("Invalid URI host: %s" % (fqdn,)) + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) def get_options(self): try: diff --git a/test/test_client.py b/test/test_client.py index f97691176d..26a0af5ae1 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -166,6 +166,20 @@ def test_max_pool_size_zero(self): with self.assertRaises(ValueError): MongoClient(maxPoolSize=0) + def test_uri_detection(self): + self.assertRaises( + ConfigurationError, + MongoClient, + "/foo") + self.assertRaises( + ConfigurationError, + MongoClient, + "://") + self.assertRaises( + ConfigurationError, + MongoClient, + "foo/") + def test_get_db(self): def make_db(base, name): return base[name] diff --git a/test/test_dns.py b/test/test_dns.py index 16814063cc..2cca4d4487 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -142,12 +142,20 @@ class TestParsingErrors(unittest.TestCase): def test_invalid_host(self): self.assertRaisesRegex( ConfigurationError, - "Invalid URI host: mongodb", + "Invalid URI host: mongodb is not", MongoClient, "mongodb+srv://mongodb") self.assertRaisesRegex( ConfigurationError, - "Invalid URI host: mongodb.com", + "Invalid URI host: mongodb.com is not", MongoClient, "mongodb+srv://mongodb.com") + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: an IP address is not", + MongoClient, "mongodb+srv://127.0.0.1") + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: an IP address is not", + MongoClient, "mongodb+srv://[::1]") if __name__ == '__main__': From 98b64ee76bd1f486e7d36076f638db712a8eee76 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 9 Jul 2021 11:01:54 -0700 Subject: [PATCH 0401/2111] PYTHON-2096 Validate that mongocryptd is not spawned if bypassAutoEncryption=true (#668) --- test/test_encryption.py | 48 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index af637d8566..a63311d720 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -35,6 +35,7 @@ from bson.json_util import JSONOptions from bson.son import SON +from pymongo import encryption from pymongo.cursor import CursorType from pymongo.encryption import (Algorithm, ClientEncryption) @@ -44,6 +45,7 @@ EncryptionError, InvalidOperation, OperationFailure, + ServerSelectionTimeoutError, WriteError) from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne @@ -1576,5 +1578,51 @@ def test_case_8(self): self.assertEqual(len(self.topology_listener.results['opened']), 1) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd +class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): + def test_mongocryptd_bypass_spawn(self): + # Lower the mongocryptd timeout to reduce the test run time. + self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS + encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + def reset_timeout(): + encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + self.addCleanup(reset_timeout) + + # Configure the encrypted field via the local schema_map option. + schemas = {'db.coll': json_data('external', 'external-schema.json')} + opts = AutoEncryptionOpts( + {'local': {'key': LOCAL_MASTER_KEY}}, + 'keyvault.datakeys', + schema_map=schemas, + mongocryptd_bypass_spawn=True, + mongocryptd_uri='mongodb://localhost:27027/', + mongocryptd_spawn_args=[ + '--pidfilepath=bypass-spawning-mongocryptd.pid', + '--port=27027'] + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + with self.assertRaisesRegex(EncryptionError, 'Timeout'): + client_encrypted.db.coll.insert_one({'encrypted': 'test'}) + + def test_bypassAutoEncryption(self): + opts = AutoEncryptionOpts( + {'local': {'key': LOCAL_MASTER_KEY}}, + 'keyvault.datakeys', + bypass_auto_encryption=True, + mongocryptd_spawn_args=[ + '--pidfilepath=bypass-spawning-mongocryptd.pid', + '--port=27027'] + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.insert_one({"unencrypted": "test"}) + # Validate that mongocryptd was not spawned: + mongocryptd_client = MongoClient( + 'mongodb://localhost:27027/?serverSelectionTimeoutMS=500') + with self.assertRaises(ServerSelectionTimeoutError): + mongocryptd_client.admin.command('ping') + + if __name__ == "__main__": unittest.main() From 834500de569a37a6ce683614830aabbd3e2aa068 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 9 Jul 2021 13:24:09 -0700 Subject: [PATCH 0402/2111] PYTHON-2608 Test that KMS TLS connections verify peer certificates (#667) Use bash for all evergreen scripts. --- .evergreen/config.yml | 22 ++++++------ .evergreen/install-dependencies.sh | 2 +- .evergreen/run-mod-wsgi-tests.sh | 2 +- .evergreen/run-tests.sh | 9 +++++ test/test_encryption.py | 57 ++++++++++++++++++++++++++++++ 5 files changed, 79 insertions(+), 13 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ed5b99b7e1..3c365d66bd 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -292,7 +292,7 @@ functions: DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ - sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -310,7 +310,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh + bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec type: setup params: @@ -318,7 +318,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-local.sh + bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-local.sh "stop mongo-orchestration": - command: shell.exec @@ -326,7 +326,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh + bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh "run mod_wsgi tests": - command: shell.exec @@ -336,7 +336,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-mod-wsgi-tests.sh + PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mod-wsgi-tests.sh "run mockupdb tests": - command: shell.exec @@ -346,7 +346,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh + PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh "run doctests": - command: shell.exec @@ -356,7 +356,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} sh ${PROJECT_DIRECTORY}/.evergreen/run-doctests.sh + PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-doctests.sh "run tests": - command: shell.exec @@ -425,7 +425,7 @@ functions: SSL=${SSL} \ DATA_LAKE=${DATA_LAKE} \ MONGODB_API_VERSION=${MONGODB_API_VERSION} \ - sh ${PROJECT_DIRECTORY}/.evergreen/run-tests.sh + bash ${PROJECT_DIRECTORY}/.evergreen/run-tests.sh "run enterprise auth tests": - command: shell.exec @@ -435,7 +435,7 @@ functions: working_dir: "src" script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} SASL_HOST=${sasl_host} SASL_PORT=${sasl_port} SASL_USER=${sasl_user} SASL_PASS=${sasl_pass} SASL_DB=${sasl_db} PRINCIPAL=${principal} GSSAPI_DB=${gssapi_db} KEYTAB_BASE64=${keytab_base64} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh + PYTHON_BINARY=${PYTHON_BINARY} SASL_HOST=${sasl_host} SASL_PORT=${sasl_port} SASL_USER=${sasl_user} SASL_PASS=${sasl_pass} SASL_DB=${sasl_db} PRINCIPAL=${principal} GSSAPI_DB=${gssapi_db} KEYTAB_BASE64=${keytab_base64} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh "run atlas tests": - command: shell.exec @@ -705,7 +705,7 @@ functions: ${PREPARE_SHELL} file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && sh $file || echo "$file not available, skipping" + [ -f "$file" ] && bash $file || echo "$file not available, skipping" "run-ocsp-test": - command: shell.exec @@ -717,7 +717,7 @@ functions: PYTHON_BINARY=${PYTHON_BINARY} \ CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ - sh ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-tests.sh + bash ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-tests.sh run-valid-ocsp-server: - command: shell.exec diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh index f28a957746..9f4bcdbb59 100644 --- a/.evergreen/install-dependencies.sh +++ b/.evergreen/install-dependencies.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index 5e8b7ca2ac..725023cc3a 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -o xtrace set -o errexit diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4577d2160c..00d095d727 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -144,6 +144,15 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh + + # Start the mock KMS servers. + if [ "$OS" != "Windows_NT" ]; then + pushd ${DRIVERS_TOOLS}/.evergreen/csfle + python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & + python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & + trap 'kill $(jobs -p)' EXIT HUP + popd + fi fi if [ -z "$DATA_LAKE" ]; then diff --git a/test/test_encryption.py b/test/test_encryption.py index a63311d720..d79339d41b 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -19,6 +19,7 @@ import os import traceback import socket +import ssl import sys import textwrap import uuid @@ -49,6 +50,7 @@ WriteError) from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne +from pymongo.ssl_support import _ssl from pymongo.write_concern import WriteConcern from test import unittest, IntegrationTest, PyMongoTestCase, client_context @@ -60,6 +62,7 @@ rs_or_single_client, wait_until) from test.utils_spec_runner import SpecRunner +from test.test_ssl import CA_PEM def get_client_opts(client): @@ -1624,5 +1627,59 @@ def test_bypassAutoEncryption(self): mongocryptd_client.admin.command('ping') +# https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests +class TestKmsTLSProse(EncryptionIntegrationTest): + @unittest.skipIf(sys.platform == 'win32', + "Can't test system ca certs on Windows") + @unittest.skipIf(ssl.OPENSSL_VERSION.lower().startswith('libressl') and + sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL, + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable.") + @unittest.skipUnless(any(AWS_CREDS.values()), + 'AWS environment credentials are not set') + def setUp(self): + self.original_certs = os.environ.get('SSL_CERT_FILE') + def restore_certs(): + if self.original_certs is None: + os.environ.pop('SSL_CERT_FILE') + else: + os.environ['SSL_CERT_FILE'] = self.original_certs + # Tell OpenSSL where CA certificates live. + os.environ['SSL_CERT_FILE'] = CA_PEM + self.addCleanup(restore_certs) + + self.client_encrypted = ClientEncryption( + {'aws': AWS_CREDS}, 'keyvault.datakeys', self.client, OPTS) + self.addCleanup(self.client_encrypted.close) + + def test_invalid_kms_certificate_expired(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8000", + } + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encrypted.create_data_key('aws', master_key=key) + + def test_invalid_hostname_in_kms_certificate(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/" + "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8001", + } + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encrypted.create_data_key('aws', master_key=key) + + if __name__ == "__main__": unittest.main() From 948ebb27f48e5bd6ba3dd39e2cf9ea6bde64a99d Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Mon, 12 Jul 2021 11:04:29 -0700 Subject: [PATCH 0403/2111] PYTHON-2800 Add Atlas connectivity tests for MongoDB Serverless (#669) --- .evergreen/config.yml | 2 ++ test/atlas/test_connection.py | 10 +++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3c365d66bd..70eb6c52be 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -450,11 +450,13 @@ functions: export ATLAS_SHRD='${atlas_shrd}' export ATLAS_TLS11='${atlas_tls11}' export ATLAS_TLS12='${atlas_tls12}' + export ATLAS_SERVERLESS='${atlas_serverless}' export ATLAS_SRV_FREE='${atlas_srv_free}' export ATLAS_SRV_REPL='${atlas_srv_repl}' export ATLAS_SRV_SHRD='${atlas_srv_shrd}' export ATLAS_SRV_TLS11='${atlas_srv_tls11}' export ATLAS_SRV_TLS12='${atlas_srv_tls12}' + export ATLAS_SRV_SERVERLESS='${atlas_srv_serverless}' EOT - command: shell.exec type: test diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index f4226b7e5b..ef2a83369f 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -38,11 +38,13 @@ "ATLAS_FREE": os.environ.get("ATLAS_FREE"), "ATLAS_TLS11": os.environ.get("ATLAS_TLS11"), "ATLAS_TLS12": os.environ.get("ATLAS_TLS12"), + "ATLAS_SERVERLESS": os.environ.get("ATLAS_SERVERLESS"), "ATLAS_SRV_REPL": os.environ.get("ATLAS_SRV_REPL"), "ATLAS_SRV_SHRD": os.environ.get("ATLAS_SRV_SHRD"), "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), + "ATLAS_SRV_SERVERLESS": os.environ.get("ATLAS_SRV_SERVERLESS"), } # Set this variable to true to run the SRV tests even when dnspython is not @@ -77,6 +79,9 @@ def test_tls_11(self): def test_tls_12(self): connect(URIS['ATLAS_TLS12']) + def test_serverless(self): + connect(URIS['ATLAS_SERVERLESS']) + def connect_srv(self, uri): connect(uri) self.assertIn('mongodb+srv://', uri) @@ -102,6 +107,10 @@ def test_srv_tls_11(self): def test_srv_tls_12(self): self.connect_srv(URIS['ATLAS_SRV_TLS12']) + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + def test_srv_serverless(self): + self.connect_srv(URIS['ATLAS_SRV_SERVERLESS']) + def test_uniqueness(self): """Ensure that we don't accidentally duplicate the test URIs.""" uri_to_names = defaultdict(list) @@ -114,6 +123,5 @@ def test_uniqueness(self): 'duplicate values: %s' % (duplicates,)) - if __name__ == '__main__': unittest.main() From 6d1ebf4597acc49ebd3e37ed9c9a993f1737f001 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 12 Jul 2021 14:17:01 -0700 Subject: [PATCH 0404/2111] PYTHON-2798 Workaround windows cert issue with SSL_CERT_FILE (#670) --- .evergreen/run-tests.sh | 14 ++++++----- test/__init__.py | 24 ++++++++++++++++++- test/test_encryption.py | 48 +++++++++++++++++++++---------------- test/test_ssl.py | 53 ++++++++++++++++------------------------- 4 files changed, 79 insertions(+), 60 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 00d095d727..d45e9d5235 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -146,13 +146,15 @@ if [ -n "$TEST_ENCRYPTION" ]; then . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh # Start the mock KMS servers. - if [ "$OS" != "Windows_NT" ]; then - pushd ${DRIVERS_TOOLS}/.evergreen/csfle - python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & - python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & - trap 'kill $(jobs -p)' EXIT HUP - popd + if [ "$OS" = "Windows_NT" ]; then + # Remove after BUILD-13574. + python -m pip install certifi fi + pushd ${DRIVERS_TOOLS}/.evergreen/csfle + python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & + python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & + trap 'kill $(jobs -p)' EXIT HUP + popd fi if [ -z "$DATA_LAKE" ]; then diff --git a/test/__init__.py b/test/__init__.py index fa03de3263..49d20ccfb8 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -49,7 +49,7 @@ from pymongo import common, message from pymongo.common import partition_node from pymongo.server_api import ServerApi -from pymongo.ssl_support import HAVE_SSL, validate_cert_reqs +from pymongo.ssl_support import HAVE_SSL, _ssl from pymongo.uri_parser import parse_uri from test.version import Version @@ -898,6 +898,10 @@ def setUpClass(cls): else: cls.credentials = {} + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) + # Use assertRaisesRegex if available, otherwise use Python 2.7's # deprecated assertRaisesRegexp, with a 'p'. @@ -1043,3 +1047,21 @@ def clear_warning_registry(): for name, module in list(sys.modules.items()): if hasattr(module, "__warningregistry__"): setattr(module, "__warningregistry__", {}) + + +class SystemCertsPatcher(object): + def __init__(self, ca_certs): + if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and + sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable.") + self.original_certs = os.environ.get('SSL_CERT_FILE') + # Tell OpenSSL where CA certificates live. + os.environ['SSL_CERT_FILE'] = ca_certs + + def disable(self): + if self.original_certs is None: + os.environ.pop('SSL_CERT_FILE') + else: + os.environ['SSL_CERT_FILE'] = self.original_certs diff --git a/test/test_encryption.py b/test/test_encryption.py index d79339d41b..6a71a5a424 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -19,7 +19,6 @@ import os import traceback import socket -import ssl import sys import textwrap import uuid @@ -50,10 +49,14 @@ WriteError) from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne -from pymongo.ssl_support import _ssl from pymongo.write_concern import WriteConcern +from test.test_ssl import CA_PEM -from test import unittest, IntegrationTest, PyMongoTestCase, client_context +from test import (unittest, + client_context, + IntegrationTest, + PyMongoTestCase, + SystemCertsPatcher) from test.utils import (TestCreator, camel_to_snake_args, OvertCommandListener, @@ -62,7 +65,26 @@ rs_or_single_client, wait_until) from test.utils_spec_runner import SpecRunner -from test.test_ssl import CA_PEM + +try: + import certifi + HAVE_CERTIFI = True +except ImportError: + HAVE_CERTIFI = False + +patcher = None + + +def setUpModule(): + if sys.platform == 'win32' and HAVE_CERTIFI: + # Remove after BUILD-13574. + global patcher + patcher = SystemCertsPatcher(certifi.where()) + + +def tearDownModule(): + if patcher: + patcher.disable() def get_client_opts(client): @@ -1629,25 +1651,11 @@ def test_bypassAutoEncryption(self): # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): - @unittest.skipIf(sys.platform == 'win32', - "Can't test system ca certs on Windows") - @unittest.skipIf(ssl.OPENSSL_VERSION.lower().startswith('libressl') and - sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL, - "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable.") @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') def setUp(self): - self.original_certs = os.environ.get('SSL_CERT_FILE') - def restore_certs(): - if self.original_certs is None: - os.environ.pop('SSL_CERT_FILE') - else: - os.environ['SSL_CERT_FILE'] = self.original_certs - # Tell OpenSSL where CA certificates live. - os.environ['SSL_CERT_FILE'] = CA_PEM - self.addCleanup(restore_certs) - + super(TestKmsTLSProse, self).setUp() + self.patch_system_certs(CA_PEM) self.client_encrypted = ClientEncryption( {'aws': AWS_CREDS}, 'keyvault.datakeys', self.client, OPTS) self.addCleanup(self.client_encrypted.close) diff --git a/test/test_ssl.py b/test/test_ssl.py index ee153db28a..65ed16968c 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -449,45 +449,32 @@ def test_validation_with_system_ca_certs(self): # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # --sslWeakCertificateValidation # - if sys.platform == "win32": - raise SkipTest("Can't test system ca certs on Windows.") - - if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and - sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL): - raise SkipTest( - "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable.") - - # Tell OpenSSL where CA certificates live. - os.environ['SSL_CERT_FILE'] = CA_PEM - try: - with self.assertRaises(ConnectionFailure): - # Server cert is verified but hostname matching fails - connected(MongoClient('server', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) - - # Server cert is verified. Disable hostname matching. + self.patch_system_certs(CA_PEM) + with self.assertRaises(ConnectionFailure): + # Server cert is verified but hostname matching fails connected(MongoClient('server', ssl=True, - ssl_match_hostname=False, serverSelectionTimeoutMS=100, **self.credentials)) - # Server cert and hostname are verified. - connected(MongoClient('localhost', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) + # Server cert is verified. Disable hostname matching. + connected(MongoClient('server', + ssl=True, + ssl_match_hostname=False, + serverSelectionTimeoutMS=100, + **self.credentials)) - # Server cert and hostname are verified. - connected( - MongoClient( - 'mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100', - **self.credentials)) - finally: - os.environ.pop('SSL_CERT_FILE') + # Server cert and hostname are verified. + connected(MongoClient('localhost', + ssl=True, + serverSelectionTimeoutMS=100, + **self.credentials)) + + # Server cert and hostname are verified. + connected( + MongoClient( + 'mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100', + **self.credentials)) def test_system_certs_config_error(self): ctx = get_ssl_context( From da49bd88a214c9274f06997482c2ea8d6ccdaf33 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 12 Jul 2021 16:54:50 -0700 Subject: [PATCH 0405/2111] PYTHON-2806 Fix test_aggregate_raw_transaction (#673) --- test/test_cursor.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/test/test_cursor.py b/test/test_cursor.py index 021e4d7cb4..b2de09429b 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1483,10 +1483,13 @@ def test_find_raw_transaction(self): session=session).sort('_id')) cmd = listener.results['started'][0] self.assertEqual(cmd.command_name, 'find') - self.assertEqual(cmd.command['$clusterTime'], - decode_all(session.cluster_time.raw)[0]) + self.assertIn('$clusterTime', cmd.command) self.assertEqual(cmd.command['startTransaction'], True) self.assertEqual(cmd.command['txnNumber'], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.results['succeeded'][-1] + self.assertEqual(last_cmd.reply['$clusterTime']['clusterTime'], + session.cluster_time['clusterTime']) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1677,9 +1680,13 @@ def test_aggregate_raw_transaction(self): [{'$sort': {'_id': 1}}], session=session)) cmd = listener.results['started'][0] self.assertEqual(cmd.command_name, 'aggregate') - self.assertEqual(cmd.command['$clusterTime'], session.cluster_time) + self.assertIn('$clusterTime', cmd.command) self.assertEqual(cmd.command['startTransaction'], True) self.assertEqual(cmd.command['txnNumber'], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.results['succeeded'][-1] + self.assertEqual(last_cmd.reply['$clusterTime']['clusterTime'], + session.cluster_time['clusterTime']) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) From 72206a07d1cabda7223ba1639a543069614a6246 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 13 Jul 2021 08:41:26 -0700 Subject: [PATCH 0406/2111] PYTHON-2809 Skip Jython serverless test --- test/atlas/test_connection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index ef2a83369f..c5b23c7251 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -79,6 +79,8 @@ def test_tls_11(self): def test_tls_12(self): connect(URIS['ATLAS_TLS12']) + @unittest.skipIf(sys.platform.startswith('java'), + 'Jython does not support serverless TLS') def test_serverless(self): connect(URIS['ATLAS_SERVERLESS']) From 9a4c64f32547e4f12dca769f386fc5931b4084c9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 13 Jul 2021 09:47:51 -0700 Subject: [PATCH 0407/2111] Revert "PYTHON-2809 Skip Jython serverless test" This reverts commit 72206a07d1cabda7223ba1639a543069614a6246. --- test/atlas/test_connection.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index c5b23c7251..ef2a83369f 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -79,8 +79,6 @@ def test_tls_11(self): def test_tls_12(self): connect(URIS['ATLAS_TLS12']) - @unittest.skipIf(sys.platform.startswith('java'), - 'Jython does not support serverless TLS') def test_serverless(self): connect(URIS['ATLAS_SERVERLESS']) From 01e34cebdb9aac96c72ddb649e9b0040a0dfd3a0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 15 Jul 2021 14:12:11 -0700 Subject: [PATCH 0408/2111] PYTHON-2769 Test redaction of replies to security-sensitive commands (#676) Resync command monitoring and unified test format tests. Redact entire hello response when the command started contained speculativeAuthenticate. Make OP_REPLY cursor.cursor_id always be an Int64. --- pymongo/message.py | 3 +- pymongo/monitoring.py | 8 +- pymongo/network.py | 6 +- test/command_monitoring/legacy/bulkWrite.json | 4 +- .../command_monitoring/legacy/insertMany.json | 12 +- .../command_monitoring/legacy/updateMany.json | 6 +- test/command_monitoring/legacy/updateOne.json | 9 +- .../unified/redacted-commands.json | 241 +++++++++++++++--- test/test_command_monitoring_legacy.py | 9 + .../example-insertOne.json | 100 -------- ...-client-observeSensitiveCommands-type.json | 18 ++ ...ectionCheckOutFailedEvent-reason-type.json | 23 ++ ...kOutStartedEvent-additionalProperties.json | 23 ++ ...onCheckedInEvent-additionalProperties.json | 23 ++ ...nCheckedOutEvent-additionalProperties.json | 23 ++ ...ent-connectionClosedEvent-reason-type.json | 23 ++ ...tionCreatedEvent-additionalProperties.json | 23 ++ ...ectionReadyEvent-additionalProperties.json | 23 ++ ...nt-poolClearedEvent-hasServiceId-type.json | 23 ++ ...-poolClosedEvent-additionalProperties.json | 23 ++ ...poolCreatedEvent-additionalProperties.json | 23 ++ ...t-poolReadyEvent-additionalProperties.json | 23 ++ ...-commandFailedEvent-hasServiceId-type.json | 29 +++ ...commandStartedEvent-hasServiceId-type.json | 29 +++ ...mmandSucceededEvent-hasServiceId-type.json | 29 +++ .../expectedEvent-additionalProperties.json | 32 --- ...t-commandFailedEvent-commandName-type.json | 34 --- ...mandStartedEvent-additionalProperties.json | 34 --- ...vent-commandStartedEvent-command-type.json | 34 --- ...-commandStartedEvent-commandName-type.json | 34 --- ...commandStartedEvent-databaseName-type.json | 34 --- ...ommandSucceededEvent-commandName-type.json | 34 --- ...vent-commandSucceededEvent-reply-type.json | 34 --- .../invalid/expectedEvent-maxProperties.json | 33 --- .../invalid/expectedEvent-minProperties.json | 30 --- ...xpectedEventsForClient-eventType-enum.json | 24 ++ ...xpectedEventsForClient-eventType-type.json | 24 ++ ...-events_conflicts_with_cmap_eventType.json | 28 ++ ...ents_conflicts_with_command_eventType.json | 28 ++ ...ents_conflicts_with_default_eventType.json | 27 ++ ...ltAndError-conflicts_with_expectError.json | 19 ++ ...tAndError-conflicts_with_expectResult.json | 17 ++ ...ror-conflicts_with_saveResultAsEntity.json | 17 ++ .../invalid/runOnRequirement-auth-type.json | 15 ++ .../runOnRequirement-serverless-enum.json | 15 ++ .../runOnRequirement-serverless-type.json | 15 ++ ...reEventsAsEntity-additionalProperties.json | 26 ++ .../storeEventsAsEntity-events-enum.json | 25 ++ .../storeEventsAsEntity-events-minItems.json | 23 ++ .../storeEventsAsEntity-events-required.json | 22 ++ .../storeEventsAsEntity-events-type.json | 23 ++ .../storeEventsAsEntity-id-required.json | 24 ++ .../invalid/storeEventsAsEntity-id-type.json | 25 ++ .../assertNumberConnectionsCheckedOut.json | 63 +++++ .../valid-fail/entity-find-cursor.json | 62 +++++ .../valid-fail/ignoreResultAndError.json | 72 ++++++ .../assertNumberConnectionsCheckedOut.json | 27 ++ .../valid-pass/entity-client-cmap-events.json | 71 ++++++ .../valid-pass/entity-find-cursor.json | 182 +++++++++++++ .../expectedEventsForClient-eventType.json | 126 +++++++++ .../valid-pass/ignoreResultAndError.json | 59 +++++ .../valid-pass/poc-crud.json | 3 +- test/unified_format.py | 12 +- 63 files changed, 1662 insertions(+), 501 deletions(-) delete mode 100644 test/unified-test-format/example-insertOne.json create mode 100644 test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-additionalProperties.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-maxProperties.json delete mode 100644 test/unified-test-format/invalid/expectedEvent-minProperties.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json create mode 100644 test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json create mode 100644 test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json create mode 100644 test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-auth-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-serverless-enum.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-serverless-type.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-required.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-type.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-id-required.json create mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-id-type.json create mode 100644 test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json create mode 100644 test/unified-test-format/valid-fail/entity-find-cursor.json create mode 100644 test/unified-test-format/valid-fail/ignoreResultAndError.json create mode 100644 test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json create mode 100644 test/unified-test-format/valid-pass/entity-client-cmap-events.json create mode 100644 test/unified-test-format/valid-pass/entity-find-cursor.json create mode 100644 test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json create mode 100644 test/unified-test-format/valid-pass/ignoreResultAndError.json diff --git a/pymongo/message.py b/pymongo/message.py index 79b56ec1a0..0ca4ca71fd 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -33,6 +33,7 @@ _dict_to_bson, _make_c_string) from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.int64 import Int64 from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument) from bson.son import SON @@ -1503,7 +1504,7 @@ class _OpReply(object): def __init__(self, flags, cursor_id, number_returned, documents): self.flags = flags - self.cursor_id = cursor_id + self.cursor_id = Int64(cursor_id) self.number_returned = number_returned self.documents = documents diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 0643a26d1b..c72e836807 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1350,7 +1350,8 @@ def publish_command_start(self, command, database_name, def publish_command_success(self, duration, reply, command_name, request_id, connection_id, op_id=None, - service_id=None): + service_id=None, + speculative_hello=False): """Publish a CommandSucceededEvent to all command listeners. :Parameters: @@ -1362,9 +1363,14 @@ def publish_command_success(self, duration, reply, command_name, command was sent to. - `op_id`: The (optional) operation id for this operation. - `service_id`: The service_id this command was sent to, or ``None``. + - `speculative_hello`: Was the command sent with speculative auth? """ if op_id is None: op_id = request_id + if speculative_hello: + # Redact entire response when the command started contained + # speculativeAuthenticate. + reply = {} event = CommandSucceededEvent( duration, reply, command_name, request_id, connection_id, op_id, service_id) diff --git a/pymongo/network.py b/pymongo/network.py index 5d6439cdc0..e575f0eb3c 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -31,6 +31,7 @@ ProtocolError, _OperationCancelled) from pymongo.message import _UNPACK_REPLY, _OpMsg +from pymongo.monitoring import _is_speculative_authenticate from pymongo.socket_checker import _errno_from_exception @@ -82,6 +83,7 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, name = next(iter(spec)) ns = dbname + '.$cmd' flags = 4 if slave_ok else 0 + speculative_hello = False # Publish the original command document, perhaps with lsid and $clusterTime. orig = spec @@ -98,6 +100,7 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, publish = listeners is not None and listeners.enabled_for_commands if publish: start = datetime.datetime.now() + speculative_hello = _is_speculative_authenticate(name, spec) if compression_ctx and name.lower() in _NO_COMPRESSION: compression_ctx = None @@ -170,7 +173,8 @@ def command(sock_info, dbname, spec, slave_ok, is_mongos, duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( duration, response_doc, name, request_id, address, - service_id=sock_info.service_id) + service_id=sock_info.service_id, + speculative_hello=speculative_hello) if client and client._encrypter and reply: decrypted = client._encrypter.decrypt(reply.raw_command_response()) diff --git a/test/command_monitoring/legacy/bulkWrite.json b/test/command_monitoring/legacy/bulkWrite.json index c5cd5a2399..ca5a9a105c 100644 --- a/test/command_monitoring/legacy/bulkWrite.json +++ b/test/command_monitoring/legacy/bulkWrite.json @@ -86,9 +86,7 @@ "$set": { "x": 333 } - }, - "upsert": false, - "multi": false + } } ], "ordered": true diff --git a/test/command_monitoring/legacy/insertMany.json b/test/command_monitoring/legacy/insertMany.json index 327da61b74..0becf928e4 100644 --- a/test/command_monitoring/legacy/insertMany.json +++ b/test/command_monitoring/legacy/insertMany.json @@ -32,9 +32,7 @@ "x": 22 } ], - "options": { - "ordered": true - } + "ordered": true }, "command_name": "insert", "database_name": "command-monitoring-tests" @@ -75,9 +73,7 @@ "x": 11 } ], - "options": { - "ordered": true - } + "ordered": true }, "command_name": "insert", "database_name": "command-monitoring-tests" @@ -128,9 +124,7 @@ "x": 22 } ], - "options": { - "ordered": false - } + "ordered": false }, "command_name": "insert", "database_name": "command-monitoring-tests" diff --git a/test/command_monitoring/legacy/updateMany.json b/test/command_monitoring/legacy/updateMany.json index 8e98fc92fd..d82792fc4e 100644 --- a/test/command_monitoring/legacy/updateMany.json +++ b/test/command_monitoring/legacy/updateMany.json @@ -51,8 +51,7 @@ "x": 1 } }, - "multi": true, - "upsert": false + "multi": true } ] }, @@ -106,8 +105,7 @@ "x": 1 } }, - "multi": true, - "upsert": false + "multi": true } ] }, diff --git a/test/command_monitoring/legacy/updateOne.json b/test/command_monitoring/legacy/updateOne.json index 565b749704..ba41dbb0c0 100644 --- a/test/command_monitoring/legacy/updateOne.json +++ b/test/command_monitoring/legacy/updateOne.json @@ -50,9 +50,7 @@ "$inc": { "x": 1 } - }, - "multi": false, - "upsert": false + } } ] }, @@ -103,7 +101,6 @@ "x": 1 } }, - "multi": false, "upsert": true } ] @@ -163,9 +160,7 @@ "$nothing": { "x": 1 } - }, - "multi": false, - "upsert": false + } } ] }, diff --git a/test/command_monitoring/unified/redacted-commands.json b/test/command_monitoring/unified/redacted-commands.json index c53f018d32..0f85dc3e94 100644 --- a/test/command_monitoring/unified/redacted-commands.json +++ b/test/command_monitoring/unified/redacted-commands.json @@ -12,7 +12,8 @@ "client": { "id": "client", "observeEvents": [ - "commandStartedEvent" + "commandStartedEvent", + "commandSucceededEvent" ], "observeSensitiveCommands": true } @@ -35,7 +36,10 @@ "arguments": { "commandName": "authenticate", "command": { - "authenticate": "private" + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" } }, "expectError": { @@ -53,6 +57,15 @@ "command": { "authenticate": { "$$exists": false + }, + "mechanism": { + "$$exists": false + }, + "user": { + "$$exists": false + }, + "db": { + "$$exists": false } } } @@ -70,7 +83,9 @@ "arguments": { "commandName": "saslStart", "command": { - "saslStart": "private" + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" } }, "expectError": { @@ -88,6 +103,12 @@ "command": { "saslStart": { "$$exists": false + }, + "payload": { + "$$exists": false + }, + "db": { + "$$exists": false } } } @@ -105,7 +126,9 @@ "arguments": { "commandName": "saslContinue", "command": { - "saslContinue": "private" + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" } }, "expectError": { @@ -123,6 +146,12 @@ "command": { "saslContinue": { "$$exists": false + }, + "conversationId": { + "$$exists": false + }, + "payload": { + "$$exists": false } } } @@ -140,7 +169,7 @@ "arguments": { "commandName": "getnonce", "command": { - "getnonce": "private" + "getnonce": 1 } } } @@ -158,6 +187,19 @@ } } } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } } ] } @@ -172,7 +214,9 @@ "arguments": { "commandName": "createUser", "command": { - "createUser": "private" + "createUser": "private", + "pwd": {}, + "roles": [] } }, "expectError": { @@ -190,6 +234,12 @@ "command": { "createUser": { "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false } } } @@ -207,7 +257,9 @@ "arguments": { "commandName": "updateUser", "command": { - "updateUser": "private" + "updateUser": "private", + "pwd": {}, + "roles": [] } }, "expectError": { @@ -225,6 +277,12 @@ "command": { "updateUser": { "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false } } } @@ -340,6 +398,11 @@ }, { "description": "hello with speculative authenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], "operations": [ { "name": "runCommand", @@ -347,26 +410,62 @@ "arguments": { "commandName": "hello", "command": { - "hello": "private", - "speculativeAuthenticate": "foo" + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } } - }, - "expectError": { - "isError": true } - }, + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate", + "operations": [ { "name": "runCommand", "object": "database", "arguments": { "commandName": "ismaster", "command": { - "ismaster": "private", - "speculativeAuthenticate": "foo" + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } } - }, - "expectError": { - "isError": true } }, { @@ -375,12 +474,11 @@ "arguments": { "commandName": "isMaster", "command": { - "isMaster": "private", - "speculativeAuthenticate": "foo" + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } } - }, - "expectError": { - "isError": true } } ], @@ -390,20 +488,26 @@ "events": [ { "commandStartedEvent": { - "commandName": "hello", + "commandName": "ismaster", "command": { - "hello": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { "$$exists": false } } } }, { - "commandStartedEvent": { + "commandSucceededEvent": { "commandName": "ismaster", - "command": { + "reply": { "ismaster": { "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false } } } @@ -414,6 +518,22 @@ "command": { "isMaster": { "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false } } } @@ -424,6 +544,11 @@ }, { "description": "hello without speculative authenticate is not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], "operations": [ { "name": "runCommand", @@ -431,17 +556,47 @@ "arguments": { "commandName": "hello", "command": { - "hello": "public" + "hello": 1 } } - }, + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate is not redacted", + "operations": [ { "name": "runCommand", "object": "database", "arguments": { "commandName": "ismaster", "command": { - "ismaster": "public" + "ismaster": 1 } } }, @@ -451,7 +606,7 @@ "arguments": { "commandName": "isMaster", "command": { - "isMaster": "public" + "isMaster": 1 } } } @@ -462,17 +617,19 @@ "events": [ { "commandStartedEvent": { - "commandName": "hello", + "commandName": "ismaster", "command": { - "hello": "public" + "ismaster": 1 } } }, { - "commandStartedEvent": { + "commandSucceededEvent": { "commandName": "ismaster", - "command": { - "ismaster": "public" + "reply": { + "ismaster": { + "$$exists": true + } } } }, @@ -480,7 +637,17 @@ "commandStartedEvent": { "commandName": "isMaster", "command": { - "isMaster": "public" + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } } } } diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index 16bdf1c68e..acebe3a23d 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -146,6 +146,15 @@ def run_scenario(self): event.command['getMore'] = 42 elif event.command_name == 'killCursors': event.command['cursors'] = [42] + elif event.command_name == 'update': + # TODO: remove this once PYTHON-1744 is done. + # Add upsert and multi fields back into + # expectations. + updates = expectation[event_type]['command'][ + 'updates'] + for update in updates: + update.setdefault('upsert', False) + update.setdefault('multi', False) elif event_type == "command_succeeded_event": event = ( res['succeeded'].pop(0) if len(res['succeeded']) else None) diff --git a/test/unified-test-format/example-insertOne.json b/test/unified-test-format/example-insertOne.json deleted file mode 100644 index be41f9eacb..0000000000 --- a/test/unified-test-format/example-insertOne.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "description": "example-insertOne", - "schemaVersion": "1.0", - "runOnRequirements": [ - { - "minServerVersion": "2.6" - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll" - } - } - ], - "initialData": [ - { - "collectionName": "coll", - "databaseName": "test", - "documents": [ - { - "_id": 1 - } - ] - } - ], - "tests": [ - { - "description": "insertOne", - "operations": [ - { - "object": "collection0", - "name": "insertOne", - "arguments": { - "document": { - "_id": 2 - } - }, - "expectResult": { - "insertedId": { - "$$unsetOrMatches": 2 - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "documents": [ - { - "_id": 2 - } - ] - } - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "coll", - "databaseName": "test", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json b/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json new file mode 100644 index 0000000000..c5572f1fbe --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeSensitiveCommands-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeSensitiveCommands-type", + "schemaVersion": "1.5", + "createEntities": [ + { + "client": { + "id": "client0", + "observeSensitiveCommands": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json new file mode 100644 index 0000000000..110ce7869e --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutFailedEvent-reason-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckOutFailedEvent-reason-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutFailedEvent": { + "reason": 10 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json new file mode 100644 index 0000000000..f84e208d6a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckOutStartedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json new file mode 100644 index 0000000000..56ffcdee72 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedInEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckedInEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedInEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json new file mode 100644 index 0000000000..9b804aad0a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCheckedOutEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCheckedOutEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json new file mode 100644 index 0000000000..053cd0b413 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionClosedEvent-reason-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionClosedEvent-reason-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionClosedEvent": { + "reason": 10 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json new file mode 100644 index 0000000000..c2edc3f6aa --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionCreatedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionCreatedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json new file mode 100644 index 0000000000..994fb63314 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-connectionReadyEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-connectionReadyEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..5a1a25d463 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-hasServiceId-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClearedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json new file mode 100644 index 0000000000..c181707f4a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClosedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClosedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClosedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json new file mode 100644 index 0000000000..6aaa59a600 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolCreatedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolCreatedEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolCreatedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json new file mode 100644 index 0000000000..66c803a5d8 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolReadyEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolReadyEvent-additionalProperties", + "schemaVersion": "1.3", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolReadyEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..5314dc9f80 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json new file mode 100644 index 0000000000..39ab925efb --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json new file mode 100644 index 0000000000..edc9d3cd72 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServiceId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-hasServiceId-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "hasServiceId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedEvent-additionalProperties.json deleted file mode 100644 index 2c4f7d27e7..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-additionalProperties.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "description": "expectedEvent-additionalProperties", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "foo": 0 - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json deleted file mode 100644 index ea6078faae..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandFailedEvent-commandName-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandFailedEvent-commandName-type", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandFailedEvent": { - "commandName": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json deleted file mode 100644 index ee6eb50658..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-additionalProperties.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandStartedEvent-additionalProperties", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "foo": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json deleted file mode 100644 index 4c9483caf3..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-command-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandStartedEvent-command-type", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json deleted file mode 100644 index a5a66096a0..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-commandName-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandStartedEvent-commandName-type", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "commandName": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json deleted file mode 100644 index dc040ec108..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandStartedEvent-databaseName-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandStartedEvent-databaseName-type", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "databaseName": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json b/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json deleted file mode 100644 index 4a20e906b9..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-commandName-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandSucceededEvent-commandName-type", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandSucceededEvent": { - "commandName": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json b/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json deleted file mode 100644 index 5464542751..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-commandSucceededEvent-reply-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "description": "expectedEvent-commandSucceededEvent-reply-type", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandSucceededEvent": { - "reply": 0 - } - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-maxProperties.json b/test/unified-test-format/invalid/expectedEvent-maxProperties.json deleted file mode 100644 index f01441946f..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-maxProperties.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "description": "expectedEvent-maxProperties", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": {}, - "commandSucceededEvent": {} - } - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEvent-minProperties.json b/test/unified-test-format/invalid/expectedEvent-minProperties.json deleted file mode 100644 index ebcc494894..0000000000 --- a/test/unified-test-format/invalid/expectedEvent-minProperties.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "description": "expectedEvent-minProperties", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [ - { - "name": "foo", - "object": "client0", - "expectEvents": [ - { - "client": "client0", - "events": [ - {} - ] - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json b/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json new file mode 100644 index 0000000000..6e26cfaa7e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-eventType-enum.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-eventType-enum", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid eventType value", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "foo", + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json b/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json new file mode 100644 index 0000000000..105bb001e5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-eventType-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-eventType-type", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid eventType type", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": 10, + "events": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json new file mode 100644 index 0000000000..b380219912 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_cmap_eventType.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_cmap_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is cmap", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "commandStartedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json new file mode 100644 index 0000000000..08446fe180 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_command_eventType.json @@ -0,0 +1,28 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_command_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is command", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "command", + "events": [ + { + "poolCreatedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json new file mode 100644 index 0000000000..c31efbb8b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-events_conflicts_with_default_eventType.json @@ -0,0 +1,27 @@ +{ + "description": "expectedEventsForClient-events_conflicts_with_default_eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "invalid event when eventType is unset", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "poolCreatedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json new file mode 100644 index 0000000000..b47e6be2a1 --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectError.json @@ -0,0 +1,19 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_expectError", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with expectError", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "expectError": { + "isError": true + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json new file mode 100644 index 0000000000..03c5a1dbbc --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_expectResult.json @@ -0,0 +1,17 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_expectResult", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with expectResult", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json new file mode 100644 index 0000000000..6745dff2eb --- /dev/null +++ b/test/unified-test-format/invalid/operation-ignoreResultAndError-conflicts_with_saveResultAsEntity.json @@ -0,0 +1,17 @@ +{ + "description": "operation-ignoreResultAndError-conflicts_with_saveResultAsEntity", + "schemaVersion": "1.3", + "tests": [ + { + "description": "ignoreResultAndError used with saveResultAsEntity", + "operations": [ + { + "name": "foo", + "object": "bar", + "ignoreResultAndError": true, + "saveResultAsEntity": "entity0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-auth-type.json b/test/unified-test-format/invalid/runOnRequirement-auth-type.json new file mode 100644 index 0000000000..e5475d079d --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-auth-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-auth-type", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "auth": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json b/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json new file mode 100644 index 0000000000..031fa539df --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-serverless-enum.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-serverless-enum", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-serverless-type.json b/test/unified-test-format/invalid/runOnRequirement-serverless-type.json new file mode 100644 index 0000000000..1aa41712f9 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-serverless-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-serverless-type", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": 1234 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json b/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json new file mode 100644 index 0000000000..5357da8d8d --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json @@ -0,0 +1,26 @@ +{ + "description": "storeEventsAsEntity-additionalProperties", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [ + "CommandStartedEvent" + ], + "foo": 0 + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json new file mode 100644 index 0000000000..ee99a55381 --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json @@ -0,0 +1,25 @@ +{ + "description": "storeEventsAsEntity-events-enum", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [ + "foo" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json new file mode 100644 index 0000000000..ddab042b1b --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json @@ -0,0 +1,23 @@ +{ + "description": "storeEventsAsEntity-events-minItems", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json new file mode 100644 index 0000000000..90b45918ce --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json @@ -0,0 +1,22 @@ +{ + "description": "storeEventsAsEntity-events-required", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events" + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json new file mode 100644 index 0000000000..1b920ebd5d --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json @@ -0,0 +1,23 @@ +{ + "description": "storeEventsAsEntity-events-type", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": 0 + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json new file mode 100644 index 0000000000..71387c5315 --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json @@ -0,0 +1,24 @@ +{ + "description": "storeEventsAsEntity-id-required", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "events": [ + "CommandStartedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json new file mode 100644 index 0000000000..4f52dc2533 --- /dev/null +++ b/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json @@ -0,0 +1,25 @@ +{ + "description": "storeEventsAsEntity-id-type", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": 0, + "events": [ + "CommandStartedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json b/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json new file mode 100644 index 0000000000..9799bb2f65 --- /dev/null +++ b/test/unified-test-format/valid-fail/assertNumberConnectionsCheckedOut.json @@ -0,0 +1,63 @@ +{ + "description": "assertNumberConnectionsCheckedOut", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + } + ], + "tests": [ + { + "description": "operation fails if client field is not specified", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "connections": 1 + } + } + ] + }, + { + "description": "operation fails if connections field is not specified", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ] + }, + { + "description": "operation fails if client entity does not exist", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client1" + } + } + ] + }, + { + "description": "operation fails if number of connections is incorrect", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 1 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-find-cursor.json b/test/unified-test-format/valid-fail/entity-find-cursor.json new file mode 100644 index 0000000000..f4c5bcdf48 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-find-cursor.json @@ -0,0 +1,62 @@ +{ + "description": "entity-find-cursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "createFindCursor fails if filter is not specified", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "saveResultAsEntity": "cursor0" + } + ] + }, + { + "description": "iterateUntilDocumentOrError fails if it references a nonexistent entity", + "operations": [ + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0" + } + ] + }, + { + "description": "close fails if it references a nonexistent entity", + "operations": [ + { + "name": "close", + "object": "cursor0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError.json b/test/unified-test-format/valid-fail/ignoreResultAndError.json new file mode 100644 index 0000000000..4457040b4f --- /dev/null +++ b/test/unified-test-format/valid-fail/ignoreResultAndError.json @@ -0,0 +1,72 @@ +{ + "description": "ignoreResultAndError", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "operation errors are not ignored if ignoreResultAndError is false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": false + } + ] + }, + { + "description": "malformed operation fails if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "foo": "bar" + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json b/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json new file mode 100644 index 0000000000..a9fc063f33 --- /dev/null +++ b/test/unified-test-format/valid-pass/assertNumberConnectionsCheckedOut.json @@ -0,0 +1,27 @@ +{ + "description": "assertNumberConnectionsCheckedOut", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + } + ], + "tests": [ + { + "description": "basic assertion succeeds", + "operations": [ + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client0", + "connections": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-client-cmap-events.json b/test/unified-test-format/valid-pass/entity-client-cmap-events.json new file mode 100644 index 0000000000..3209033def --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-client-cmap-events.json @@ -0,0 +1,71 @@ +{ + "description": "entity-client-cmap-events", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "events are captured during an operation", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-find-cursor.json b/test/unified-test-format/valid-pass/entity-find-cursor.json new file mode 100644 index 0000000000..85b8f69d7f --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-find-cursor.json @@ -0,0 +1,182 @@ +{ + "description": "entity-find-cursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ], + "tests": [ + { + "description": "cursors can be created, iterated, and closed", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 3 + } + }, + { + "name": "close", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0Name" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "ns": { + "$$type": "string" + }, + "firstBatch": { + "$$type": "array" + } + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursor": { + "id": { + "$$type": "long" + }, + "ns": { + "$$type": "string" + }, + "nextBatch": { + "$$type": "array" + } + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll0", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "reply": { + "cursorsKilled": { + "$$unsetOrMatches": { + "$$type": "array" + } + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json b/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json new file mode 100644 index 0000000000..fe308df965 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-eventType.json @@ -0,0 +1,126 @@ +{ + "description": "expectedEventsForClient-eventType", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "eventType can be set to command and cmap", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + } + ] + } + ] + }, + { + "description": "eventType defaults to command if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + }, + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/ignoreResultAndError.json b/test/unified-test-format/valid-pass/ignoreResultAndError.json new file mode 100644 index 0000000000..2e9b1c58ab --- /dev/null +++ b/test/unified-test-format/valid-pass/ignoreResultAndError.json @@ -0,0 +1,59 @@ +{ + "description": "ignoreResultAndError", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "operation errors are ignored if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json index 7bb072de88..0790d9b789 100644 --- a/test/unified-test-format/valid-pass/poc-crud.json +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -323,7 +323,8 @@ "topologies": [ "replicaset", "sharded-replicaset" - ] + ], + "serverless": "forbid" } ], "operations": [ diff --git a/test/unified_format.py b/test/unified_format.py index e402e20aed..48a1e49407 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -250,7 +250,7 @@ def _create_entity(self, entity_spec): ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) observe_sensitive_commands = spec.get( 'observeSensitiveCommands', False) - # TODO: SUPPORT storeEventsAsEntities + # TODO: PYTHON-2511 support storeEventsAsEntities if len(observe_events) or len(ignore_commands): ignore_commands = [cmd.lower() for cmd in ignore_commands] listener = EventListenerUtil( @@ -409,8 +409,8 @@ def _operation_type(self, spec, actual, key_to_compare): t for alias in spec for t in self.__type_alias_to_type(alias)]) else: permissible_types = self.__type_alias_to_type(spec) - self.test.assertIsInstance( - actual[key_to_compare], permissible_types) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertIsInstance(value, permissible_types) def _operation_matchesEntity(self, spec, actual, key_to_compare): expected_entity = self.test.entity_map[spec] @@ -822,6 +822,8 @@ def _collectionOperation_find(self, target, *args, **kwargs): def _collectionOperation_createFindCursor(self, target, *args, **kwargs): self.__raise_if_unsupported('find', target, Collection) + if 'filter' not in kwargs: + self.fail('createFindCursor requires a "filter" argument') cursor = NonLazyCursor(target.find(*args, **kwargs)) self.addCleanup(cursor.close) return cursor @@ -909,7 +911,9 @@ def run_entity_operation(self, spec): try: result = cmd(**dict(arguments)) except Exception as exc: - if ignore: + # Ignore all operation errors but to avoid masking bugs don't + # ignore things like TypeError and ValueError. + if ignore and isinstance(exc, (PyMongoError,)): return if expect_error: return self.process_error(exc, expect_error) From c93194a2e6a5a4e3caada9e7e288cb2ab373f4cd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 23 Jul 2021 12:21:42 -0700 Subject: [PATCH 0409/2111] PYTHON-2838 Skip getlasterror test on >=5.0 --- test/test_collection.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_collection.py b/test/test_collection.py index 896684330d..8a8f4aa686 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1549,6 +1549,7 @@ def test_acknowledged_delete(self): self.assertEqual(2, db.test.delete_many({}).deleted_count) self.assertEqual(0, db.test.delete_many({}).deleted_count) + @client_context.require_version_max(4, 9) def test_manual_last_error(self): coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) coll.insert_one({"x": 1}) From 9833ce0a03ae4d88b5aeaa076f4881b8699c7ce0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 26 Jul 2021 15:26:51 -0700 Subject: [PATCH 0410/2111] PYTHON-2802 Link to create command docs in create_collection (#678) PYTHON-2840 Document "let" support for aggregation. --- pymongo/collection.py | 37 ++++++++++++---------- pymongo/database.py | 72 +++++++++++++++++++++++-------------------- 2 files changed, 60 insertions(+), 49 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 2fa0d92886..8a4db9a87c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2175,21 +2175,6 @@ def aggregate(self, pipeline, session=None, **kwargs): """Perform an aggregation using the aggregation framework on this collection. - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. - The :meth:`aggregate` method obeys the :attr:`read_preference` of this :class:`Collection`, except when ``$out`` or ``$merge`` are used, in which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` @@ -2207,7 +2192,27 @@ def aggregate(self, pipeline, session=None, **kwargs): - `pipeline`: a list of aggregation pipeline stages - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): See list of options above. + - `**kwargs` (optional): extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. :Returns: A :class:`~pymongo.command_cursor.CommandCursor` over the result diff --git a/pymongo/database.py b/pymongo/database.py index 0a814fb74e..9d88945b89 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -248,22 +248,6 @@ def create_collection(self, name, codec_options=None, creation. :class:`~pymongo.errors.CollectionInvalid` will be raised if the collection already exists. - Options should be passed as keyword arguments to this method. Supported - options vary with MongoDB release. Some examples include: - - - "size": desired initial size for the collection (in - bytes). For capped collections this size is the max - size of the collection. - - "capped": if True, this is a capped collection - - "max": maximum number of objects if capped (optional) - - `timeseries`: a document specifying configuration options for - timeseries collections - - `expireAfterSeconds`: the number of seconds after which a - document in a timeseries collection expires - - See the MongoDB documentation for a full list of supported options by - server version. - :Parameters: - `name`: the name of the collection to create - `codec_options` (optional): An instance of @@ -286,7 +270,21 @@ def create_collection(self, name, codec_options=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): additional keyword arguments will - be passed as options for the create collection command + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size``: desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped``: if True, this is a capped collection + - ``max``: maximum number of objects if capped (optional) + - ``timeseries``: a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds``: the number of seconds after which a + document in a timeseries collection expires .. versionchanged:: 3.11 This method is now supported inside multi-document transactions @@ -303,6 +301,9 @@ def create_collection(self, name, codec_options=None, .. versionchanged:: 2.2 Removed deprecated argument: options + + .. _create collection command: + https://docs.mongodb.com/manual/reference/command/create """ with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not @@ -331,21 +332,6 @@ def aggregate(self, pipeline, session=None, **kwargs): for operation in cursor: print(operation) - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - The :meth:`aggregate` method obeys the :attr:`read_preference` of this :class:`Database`, except when ``$out`` or ``$merge`` are used, in which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` @@ -361,7 +347,27 @@ def aggregate(self, pipeline, session=None, **kwargs): - `pipeline`: a list of aggregation pipeline stages - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): See list of options above. + - `**kwargs` (optional): extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. :Returns: A :class:`~pymongo.command_cursor.CommandCursor` over the result From f07da34f9738019a19d3410b8f7fd3e242fdb9b2 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Tue, 27 Jul 2021 16:35:09 -0700 Subject: [PATCH 0411/2111] PYTHON-2545 Test Atlas Serverless (#664) --- .evergreen/config.yml | 66 +++++++++++- .evergreen/run-tests.sh | 11 +- test/__init__.py | 101 +++++++++++------- test/crud/unified/aggregate-let.json | 5 +- .../unified/aggregate-out-readConcern.json | 5 +- test/crud/v1/read/aggregate-collation.json | 1 + test/crud/v1/read/aggregate-out.json | 1 + test/crud/v1/read/count-collation.json | 1 + test/crud/v1/read/distinct-collation.json | 1 + test/crud/v1/read/find-collation.json | 1 + test/crud/v1/write/bulkWrite-collation.json | 1 + test/crud/v1/write/deleteMany-collation.json | 1 + test/crud/v1/write/deleteOne-collation.json | 1 + .../v1/write/findOneAndDelete-collation.json | 1 + .../v1/write/findOneAndReplace-collation.json | 1 + .../v1/write/findOneAndUpdate-collation.json | 1 + test/crud/v1/write/replaceOne-collation.json | 1 + test/crud/v1/write/updateMany-collation.json | 1 + test/crud/v1/write/updateOne-collation.json | 1 + test/retryable_reads/mapReduce.json | 3 +- test/test_auth.py | 14 ++- test/test_change_stream.py | 3 + test/test_client_context.py | 9 ++ test/test_collation.py | 8 +- test/test_crud_unified.py | 3 +- test/test_crud_v1.py | 4 +- test/test_monitoring.py | 14 ++- test/test_pooling.py | 6 +- test/test_read_concern.py | 9 +- test/test_read_preferences.py | 5 +- test/test_retryable_reads.py | 23 +++- test/test_retryable_writes.py | 3 + test/test_session.py | 21 +++- test/test_transactions.py | 1 + test/test_versioned_api.py | 1 + test/unified_format.py | 18 +++- test/utils.py | 26 ++++- 37 files changed, 291 insertions(+), 82 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 70eb6c52be..66b551de9c 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -415,6 +415,12 @@ functions: export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" fi + if [ -n "${test_serverless}" ]; then + export TEST_SERVERLESS=1 + export MONGODB_URI="${MONGODB_URI}" + export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" + export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" + fi PYTHON_BINARY=${PYTHON_BINARY} \ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ @@ -836,9 +842,41 @@ post: - func: "cleanup" - func: "teardown_docker" -tasks: - +task_groups: + - name: serverless_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: "fetch source" + - func: "prepare resources" + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + set +o xtrace + SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ + SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ + SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ + bash ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh + - command: expansions.update + params: + file: serverless-expansion.yml + teardown_group: + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + set +o xtrace + SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ + SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ + SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ + SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \ + bash ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh + tasks: + - ".serverless" +tasks: # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants - name: getdata @@ -1184,6 +1222,11 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-serverless" + tags: ["serverless"] + commands: + - func: "run tests" + - name: "test-enterprise-auth" tags: ["enterprise-auth"] commands: @@ -2040,6 +2083,15 @@ axes: test_loadbalancer: true batchtime: 10080 # 7 days + - id: serverless + display_name: "Serverless" + values: + - id: "enabled" + display_name: "Serverless" + variables: + test_serverless: true + batchtime: 10080 # 7 days + buildvariants: - matrix_name: "tests-all" matrix_spec: @@ -2473,6 +2525,16 @@ buildvariants: tasks: - name: "atlas-connect" +- matrix_name: "serverless" + matrix_spec: + platform: awslinux + python-version: *amazon1-pythons + auth-ssl: auth-ssl + serverless: "*" + display_name: "Serverless ${python-version} ${platform}" + tasks: + - "serverless_task_group" + - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-16.04 diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index d45e9d5235..ff6c54ac24 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -39,12 +39,15 @@ if [ -n "$MONGODB_API_VERSION" ]; then fi if [ "$AUTH" != "noauth" ]; then - if [ -z "$DATA_LAKE" ]; then - export DB_USER="bob" - export DB_PASSWORD="pwd123" - else + if [ ! -z "$DATA_LAKE" ]; then export DB_USER="mhuser" export DB_PASSWORD="pencil" + elif [ ! -z "$TEST_SERVERLESS" ]; then + export DB_USER=$SERVERLESS_ATLAS_USER + export DB_PASSWORD=$SERVERLESS_ATLAS_PASSWORD + else + export DB_USER="bob" + export DB_PASSWORD="pwd123" fi fi diff --git a/test/__init__.py b/test/__init__.py index 49d20ccfb8..54761fb55d 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -94,6 +94,7 @@ COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) +TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") if TEST_LOADBALANCER: @@ -104,6 +105,13 @@ host, port = res['nodelist'][0] db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd +elif TEST_SERVERLESS: + res = parse_uri(os.environ["MONGODB_URI"]) + host, port = res['nodelist'].pop(0) + additional_serverless_mongoses = res['nodelist'] + db_user = res['username'] or db_user + db_pwd = res['password'] or db_pwd + TLS_OPTIONS = {'tls': True} def is_server_resolvable(): @@ -231,6 +239,7 @@ def __init__(self): self.conn_lock = threading.Lock() self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER + self.serverless = TEST_SERVERLESS if self.load_balancer: self.default_client_options["loadBalanced"] = True if COMPRESSORS: @@ -309,22 +318,26 @@ def _init_client(self): if self.client: self.connected = True - try: - self.cmd_line = self.client.admin.command('getCmdLineOpts') - except pymongo.errors.OperationFailure as e: - msg = e.details.get('errmsg', '') - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: - # Unauthorized. - self.auth_enabled = True - else: - raise + if self.serverless: + self.auth_enabled = True else: - self.auth_enabled = self._server_started_with_auth() + try: + self.cmd_line = self.client.admin.command('getCmdLineOpts') + except pymongo.errors.OperationFailure as e: + msg = e.details.get('errmsg', '') + if e.code == 13 or 'unauthorized' in msg or 'login' in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: - # See if db_user already exists. - if not self._check_user_provided(): - _create_user(self.client.admin, db_user, db_pwd) + if not self.serverless: + # See if db_user already exists. + if not self._check_user_provided(): + _create_user(self.client.admin, db_user, db_pwd) self.client = self._connect( host, port, username=db_user, password=db_pwd, @@ -334,10 +347,13 @@ def _init_client(self): # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command('getCmdLineOpts') - self.server_status = self.client.admin.command('serverStatus') - if self.storage_engine == "mmapv1": - # MMAPv1 does not support retryWrites=True. - self.default_client_options['retryWrites'] = False + if self.serverless: + self.server_status = {} + else: + self.server_status = self.client.admin.command('serverStatus') + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options['retryWrites'] = False ismaster = self.ismaster self.sessions_enabled = 'logicalSessionTimeoutMinutes' in ismaster @@ -374,33 +390,41 @@ def _init_client(self): self.nodes = set([(host, port)]) self.w = len(ismaster.get("hosts", [])) or 1 self.version = Version.from_client(self.client) - self.server_parameters = self.client.admin.command( - 'getParameter', '*') - if 'enableTestCommands=1' in self.cmd_line['argv']: + if TEST_SERVERLESS: self.test_commands_enabled = True - elif 'parsed' in self.cmd_line: - params = self.cmd_line['parsed'].get('setParameter', []) - if 'enableTestCommands=1' in params: + self.has_ipv6 = False + else: + self.server_parameters = self.client.admin.command( + 'getParameter', '*') + if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True - else: - params = self.cmd_line['parsed'].get('setParameter', {}) - if params.get('enableTestCommands') == '1': + elif 'parsed' in self.cmd_line: + params = self.cmd_line['parsed'].get('setParameter', []) + if 'enableTestCommands=1' in params: self.test_commands_enabled = True + else: + params = self.cmd_line['parsed'].get('setParameter', {}) + if params.get('enableTestCommands') == '1': + self.test_commands_enabled = True + self.has_ipv6 = self._server_started_with_ipv6() self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') - self.has_ipv6 = self._server_started_with_ipv6() if self.is_mongos: - # Check for another mongos on the next port. - address = self.client.address - next_address = address[0], address[1] + 1 - self.mongoses.append(address) - mongos_client = self._connect(*next_address, - **self.default_client_options) - if mongos_client: - ismaster = mongos_client.admin.command('ismaster') - if ismaster.get('msg') == 'isdbgrid': - self.mongoses.append(next_address) + if self.serverless: + self.mongoses.append(self.client.address) + self.mongoses.extend(additional_serverless_mongoses) + else: + # Check for another mongos on the next port. + address = self.client.address + next_address = address[0], address[1] + 1 + self.mongoses.append(address) + mongos_client = self._connect( + *next_address, **self.default_client_options) + if mongos_client: + ismaster = mongos_client.admin.command('ismaster') + if ismaster.get('msg') == 'isdbgrid': + self.mongoses.append(next_address) def init(self): with self.conn_lock: @@ -891,6 +915,9 @@ def setUpClass(cls): if (client_context.load_balancer and not getattr(cls, 'RUN_ON_LOAD_BALANCER', False)): raise SkipTest('this test does not support load balancers') + if (client_context.serverless and + not getattr(cls, 'RUN_ON_SERVERLESS', False)): + raise SkipTest('this test does not support serverless') cls.client = client_context.client cls.db = cls.client.pymongo_test if client_context.auth_enabled: diff --git a/test/crud/unified/aggregate-let.json b/test/crud/unified/aggregate-let.json index 4ce8256cb7..d3b76bd65a 100644 --- a/test/crud/unified/aggregate-let.json +++ b/test/crud/unified/aggregate-let.json @@ -1,6 +1,6 @@ { "description": "aggregate-let", - "schemaVersion": "1.0", + "schemaVersion": "1.4", "createEntities": [ { "client": { @@ -310,7 +310,8 @@ "description": "Aggregate to collection with let option", "runOnRequirements": [ { - "minServerVersion": "5.0" + "minServerVersion": "5.0", + "serverless": "forbid" } ], "operations": [ diff --git a/test/crud/unified/aggregate-out-readConcern.json b/test/crud/unified/aggregate-out-readConcern.json index ae1beedde5..e293457c1c 100644 --- a/test/crud/unified/aggregate-out-readConcern.json +++ b/test/crud/unified/aggregate-out-readConcern.json @@ -1,13 +1,14 @@ { "description": "aggregate-out-readConcern", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.1.0", "topologies": [ "replicaset", "sharded" - ] + ], + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/v1/read/aggregate-collation.json b/test/crud/v1/read/aggregate-collation.json index 85662a442f..d958e447bf 100644 --- a/test/crud/v1/read/aggregate-collation.json +++ b/test/crud/v1/read/aggregate-collation.json @@ -6,6 +6,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Aggregate with collation", diff --git a/test/crud/v1/read/aggregate-out.json b/test/crud/v1/read/aggregate-out.json index 4e33f9288f..c195e163e0 100644 --- a/test/crud/v1/read/aggregate-out.json +++ b/test/crud/v1/read/aggregate-out.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "2.6", + "serverless": "forbid", "tests": [ { "description": "Aggregate with $out", diff --git a/test/crud/v1/read/count-collation.json b/test/crud/v1/read/count-collation.json index 6f75282fe0..7d61508493 100644 --- a/test/crud/v1/read/count-collation.json +++ b/test/crud/v1/read/count-collation.json @@ -6,6 +6,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Count documents with collation", diff --git a/test/crud/v1/read/distinct-collation.json b/test/crud/v1/read/distinct-collation.json index 0af0c67cb7..984991a43b 100644 --- a/test/crud/v1/read/distinct-collation.json +++ b/test/crud/v1/read/distinct-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Distinct with a collation", diff --git a/test/crud/v1/read/find-collation.json b/test/crud/v1/read/find-collation.json index 53d0e94900..4e56c05253 100644 --- a/test/crud/v1/read/find-collation.json +++ b/test/crud/v1/read/find-collation.json @@ -6,6 +6,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "Find with a collation", diff --git a/test/crud/v1/write/bulkWrite-collation.json b/test/crud/v1/write/bulkWrite-collation.json index 8e9d1bcb1a..bc90aa8172 100644 --- a/test/crud/v1/write/bulkWrite-collation.json +++ b/test/crud/v1/write/bulkWrite-collation.json @@ -22,6 +22,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "BulkWrite with delete operations and collation", diff --git a/test/crud/v1/write/deleteMany-collation.json b/test/crud/v1/write/deleteMany-collation.json index d17bf3bcb9..fce75e488a 100644 --- a/test/crud/v1/write/deleteMany-collation.json +++ b/test/crud/v1/write/deleteMany-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "DeleteMany when many documents match with collation", diff --git a/test/crud/v1/write/deleteOne-collation.json b/test/crud/v1/write/deleteOne-collation.json index 2f7f921130..9bcef411ef 100644 --- a/test/crud/v1/write/deleteOne-collation.json +++ b/test/crud/v1/write/deleteOne-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "DeleteOne when many documents matches with collation", diff --git a/test/crud/v1/write/findOneAndDelete-collation.json b/test/crud/v1/write/findOneAndDelete-collation.json index 1ff37d2e88..32480da842 100644 --- a/test/crud/v1/write/findOneAndDelete-collation.json +++ b/test/crud/v1/write/findOneAndDelete-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "FindOneAndDelete when one document matches with collation", diff --git a/test/crud/v1/write/findOneAndReplace-collation.json b/test/crud/v1/write/findOneAndReplace-collation.json index babb2f7c11..9b3c25005b 100644 --- a/test/crud/v1/write/findOneAndReplace-collation.json +++ b/test/crud/v1/write/findOneAndReplace-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "FindOneAndReplace when one document matches with collation returning the document after modification", diff --git a/test/crud/v1/write/findOneAndUpdate-collation.json b/test/crud/v1/write/findOneAndUpdate-collation.json index 04c1fe73ec..8abab7bd6b 100644 --- a/test/crud/v1/write/findOneAndUpdate-collation.json +++ b/test/crud/v1/write/findOneAndUpdate-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "FindOneAndUpdate when many documents match with collation returning the document before modification", diff --git a/test/crud/v1/write/replaceOne-collation.json b/test/crud/v1/write/replaceOne-collation.json index a668fe7383..fa4cbe9970 100644 --- a/test/crud/v1/write/replaceOne-collation.json +++ b/test/crud/v1/write/replaceOne-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "ReplaceOne when one document matches with collation", diff --git a/test/crud/v1/write/updateMany-collation.json b/test/crud/v1/write/updateMany-collation.json index 3cb49f2298..8becfd806b 100644 --- a/test/crud/v1/write/updateMany-collation.json +++ b/test/crud/v1/write/updateMany-collation.json @@ -14,6 +14,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "UpdateMany when many documents match with collation", diff --git a/test/crud/v1/write/updateOne-collation.json b/test/crud/v1/write/updateOne-collation.json index c49112d519..3afdb83e0f 100644 --- a/test/crud/v1/write/updateOne-collation.json +++ b/test/crud/v1/write/updateOne-collation.json @@ -10,6 +10,7 @@ } ], "minServerVersion": "3.4", + "serverless": "forbid", "tests": [ { "description": "UpdateOne when one document matches with collation", diff --git a/test/retryable_reads/mapReduce.json b/test/retryable_reads/mapReduce.json index e76aa76cbb..9327a23052 100644 --- a/test/retryable_reads/mapReduce.json +++ b/test/retryable_reads/mapReduce.json @@ -12,7 +12,8 @@ "topology": [ "sharded", "load-balanced" - ] + ], + "serverless": "forbid" } ], "database_name": "retryable-reads-tests", diff --git a/test/test_auth.py b/test/test_auth.py index 49081c7cce..f7b13a1cf3 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -27,7 +27,7 @@ from pymongo.errors import OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP -from test import client_context, SkipTest, unittest, Version +from test import client_context, IntegrationTest, SkipTest, unittest, Version from test.utils import (delay, ignore_deprecations, single_client, @@ -303,11 +303,12 @@ def auth_string(user, password): self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ismaster') -class TestSCRAMSHA1(unittest.TestCase): +class TestSCRAMSHA1(IntegrationTest): @client_context.require_auth @client_context.require_version_min(2, 7, 2) def setUp(self): + super(TestSCRAMSHA1, self).setUp() # Before 2.7.7, SCRAM-SHA-1 had to be enabled from the command line. if client_context.version < Version(2, 7, 7): cmd_line = client_context.cmd_line @@ -321,6 +322,7 @@ def setUp(self): def tearDown(self): client_context.drop_user('pymongo_test', 'user') + super(TestSCRAMSHA1, self).tearDown() def test_scram_sha1(self): host, port = client_context.host, client_context.port @@ -343,11 +345,12 @@ def test_scram_sha1(self): # https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation -class TestSCRAM(unittest.TestCase): +class TestSCRAM(IntegrationTest): @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): + super(TestSCRAM, self).setUp() self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS monitoring._SENSITIVE_COMMANDS = set([]) self.listener = WhiteListEventListener("saslStart") @@ -356,6 +359,7 @@ def tearDown(self): monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS client_context.client.testscram.command("dropAllUsersFromDatabase") client_context.client.drop_database("testscram") + super(TestSCRAM, self).tearDown() def test_scram_skip_empty_exchange(self): listener = WhiteListEventListener("saslStart", "saslContinue") @@ -571,10 +575,11 @@ def test_scram_threaded(self): self.assertTrue(thread.success) -class TestAuthURIOptions(unittest.TestCase): +class TestAuthURIOptions(IntegrationTest): @client_context.require_auth def setUp(self): + super(TestAuthURIOptions, self).setUp() client_context.create_user('admin', 'admin', 'pass') client_context.create_user( 'pymongo_test', 'user', 'pass', ['userAdmin', 'readWrite']) @@ -582,6 +587,7 @@ def setUp(self): def tearDown(self): client_context.drop_user('pymongo_test', 'user') client_context.drop_user('admin', 'admin') + super(TestAuthURIOptions, self).tearDown() def test_uri_options(self): # Test default to admin diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 669a819aa6..8c1bec1a68 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1046,14 +1046,17 @@ class TestAllLegacyScenarios(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): + super(TestAllLegacyScenarios, cls).setUpClass() cls.listener = WhiteListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod def tearDownClass(cls): cls.client.close() + super(TestAllLegacyScenarios, cls).tearDownClass() def setUp(self): + super(TestAllLegacyScenarios, self).setUp() self.listener.results.clear() def setUpCluster(self, scenario_dict): diff --git a/test/test_client_context.py b/test/test_client_context.py index 512347daa6..52a1ee92b9 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -30,6 +30,15 @@ def test_must_connect(self): 'PYMONGO_MUST_CONNECT is set. Failed attempts:\n%s' % (client_context.connection_attempt_info(),)) + def test_serverless(self): + if 'TEST_SERVERLESS' not in os.environ: + raise SkipTest('TEST_SERVERLESS is not set') + + self.assertTrue(client_context.connected and client_context.serverless, + 'client context must be connected to serverless when ' + 'TEST_SERVERLESS is set. Failed attempts:\n%s' % + (client_context.connection_attempt_info(),)) + def test_enableTestCommands_is_disabled(self): if 'PYMONGO_DISABLE_TEST_COMMANDS' not in os.environ: raise SkipTest('PYMONGO_DISABLE_TEST_COMMANDS is not set') diff --git a/test/test_collation.py b/test/test_collation.py index 4954af7bdd..3aca63d47e 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -25,7 +25,7 @@ from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, ReplaceOne, UpdateMany, UpdateOne) from pymongo.write_concern import WriteConcern -from test import unittest, client_context +from test import client_context, IntegrationTest, unittest from test.utils import EventListener, ignore_deprecations, rs_or_single_client @@ -88,11 +88,11 @@ def wrapper(self, *args, **kwargs): return wrapper -class TestCollation(unittest.TestCase): - +class TestCollation(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): + super(TestCollation, cls).setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -106,9 +106,11 @@ def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None cls.client.close() + super(TestCollation, cls).tearDownClass() def tearDown(self): self.listener.results.clear() + super(TestCollation, self).tearDown() def last_command_started(self): return self.listener.results['started'][-1].command diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py index a2aece6ff0..a435c1caa1 100644 --- a/test/test_crud_unified.py +++ b/test/test_crud_unified.py @@ -28,7 +28,8 @@ os.path.dirname(os.path.realpath(__file__)), 'crud', 'unified') # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) +globals().update(generate_test_classes( + TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) if __name__ == "__main__": unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 6650ae29d0..17f77a1635 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -32,7 +32,7 @@ UpdateOne, UpdateMany) -from test import unittest, IntegrationTest +from test import client_context, unittest, IntegrationTest from test.utils import (camel_to_snake, camel_to_upper_camel, camel_to_snake_args, drop_collections, TestCreator) @@ -42,7 +42,7 @@ class TestAllScenarios(IntegrationTest): - pass + RUN_ON_SERVERLESS = True def check_result(self, expected_result, result): diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 464231b524..a6fe5c9df5 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -32,7 +32,7 @@ from pymongo.write_concern import WriteConcern from test import (client_context, client_knobs, - PyMongoTestCase, + IntegrationTest, sanitize_cmd, unittest) from test.utils import (EventListener, @@ -42,11 +42,12 @@ wait_until) -class TestCommandMonitoring(PyMongoTestCase): +class TestCommandMonitoring(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): + super(TestCommandMonitoring, cls).setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client( event_listeners=[cls.listener], @@ -55,9 +56,11 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.client.close() + super(TestCommandMonitoring, cls).tearDownClass() def tearDown(self): self.listener.results.clear() + super(TestCommandMonitoring, self).tearDown() def test_started_simple(self): self.client.pymongo_test.command('ismaster') @@ -1129,11 +1132,12 @@ def test_sensitive_commands(self): self.assertEqual({}, succeeded.reply) -class TestGlobalListener(PyMongoTestCase): +class TestGlobalListener(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): + super(TestGlobalListener, cls).setUpClass() cls.listener = EventListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) @@ -1146,8 +1150,10 @@ def setUpClass(cls): def tearDownClass(cls): monitoring._LISTENERS = cls.saved_listeners cls.client.close() + super(TestGlobalListener, cls).tearDownClass() def setUp(self): + super(TestGlobalListener, self).setUp() self.listener.results.clear() def test_simple(self): @@ -1167,7 +1173,7 @@ def test_simple(self): self.assertTrue(isinstance(started.request_id, int)) -class TestEventClasses(PyMongoTestCase): +class TestEventClasses(unittest.TestCase): def test_command_event_repr(self): request_id, connection_id, operation_id = 1, ('localhost', 27017), 2 diff --git a/test/test_pooling.py b/test/test_pooling.py index 11fcb1ce7a..5746f56d17 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -34,7 +34,7 @@ from pymongo.pool import Pool, PoolOptions from pymongo.socket_checker import SocketChecker -from test import client_context, unittest +from test import client_context, IntegrationTest, unittest from test.utils import (get_pool, joinall, delay, @@ -154,10 +154,11 @@ def run_cases(client, cases): assert t.passed, "%s.run() threw an exception" % repr(t) -class _TestPoolingBase(unittest.TestCase): +class _TestPoolingBase(IntegrationTest): """Base class for all connection-pool tests.""" def setUp(self): + super(_TestPoolingBase, self).setUp() self.c = rs_or_single_client() db = self.c[DB] db.unique.drop() @@ -167,6 +168,7 @@ def setUp(self): def tearDown(self): self.c.close() + super(_TestPoolingBase, self).tearDown() def create_pool( self, diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 2eef4cb1d9..0dc9b609bc 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -15,18 +15,19 @@ """Test the read_concern module.""" from bson.son import SON -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.errors import ConfigurationError from pymongo.read_concern import ReadConcern -from test import client_context, PyMongoTestCase +from test import client_context, IntegrationTest from test.utils import single_client, rs_or_single_client, OvertCommandListener -class TestReadConcern(PyMongoTestCase): +class TestReadConcern(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): + super(TestReadConcern, cls).setUpClass() cls.listener = OvertCommandListener() cls.client = single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -36,9 +37,11 @@ def setUpClass(cls): def tearDownClass(cls): cls.client.close() client_context.client.pymongo_test.drop_collection('coll') + super(TestReadConcern, cls).tearDownClass() def tearDown(self): self.listener.results.clear() + super(TestReadConcern, self).tearDown() def test_read_concern(self): rc = ReadConcern() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index d70ad7e076..6f6634234e 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -49,7 +49,7 @@ from test.version import Version -class TestSelections(unittest.TestCase): +class TestSelections(IntegrationTest): @client_context.require_connection def test_bool(self): @@ -471,7 +471,8 @@ def test_moving_average(self): avg.add_sample(30) self.assertAlmostEqual(15.6, avg.get()) -class TestMongosAndReadPreference(unittest.TestCase): + +class TestMongosAndReadPreference(IntegrationTest): def test_read_preference_document(self): diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 381f2c1d80..1995b5dc35 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -52,6 +52,7 @@ def test_uri(self): class TestSpec(SpecRunner): RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True @classmethod @client_context.require_failCommand_fail_point @@ -68,11 +69,25 @@ def maybe_skip_scenario(self, test): if name.lower() in test['description'].lower(): self.skipTest('PyMongo does not support %s' % (name,)) - # Skip changeStream related tests on MMAPv1. + # Serverless does not support $out and collation. + if client_context.serverless: + for operation in test['operations']: + if operation['name'] == 'aggregate': + for stage in operation['arguments']['pipeline']: + if "$out" in stage: + self.skipTest( + "MongoDB Serverless does not support $out") + if "collation" in operation['arguments']: + self.skipTest( + "MongoDB Serverless does not support collations") + + # Skip changeStream related tests on MMAPv1 and serverless. test_name = self.id().rsplit('.')[-1] - if ('changestream' in test_name.lower() and - client_context.storage_engine == 'mmapv1'): - self.skipTest("MMAPv1 does not support change streams.") + if 'changestream' in test_name.lower(): + if client_context.storage_engine == 'mmapv1': + self.skipTest("MMAPv1 does not support change streams.") + if client_context.serverless: + self.skipTest("Serverless does not support change streams.") def get_scenario_coll_name(self, scenario_def): """Override a test's collection name to support GridFS tests.""" diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index db2e1455db..0368f97a6c 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -55,6 +55,7 @@ class TestAllScenarios(SpecRunner): RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True def get_object_name(self, op): return op.get('object', 'collection') @@ -123,6 +124,7 @@ def non_retryable_single_statement_ops(coll): class IgnoreDeprecationsTest(IntegrationTest): RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True @classmethod def setUpClass(cls): @@ -420,6 +422,7 @@ def test_batch_splitting_retry_fails(self): class TestWriteConcernError(IntegrationTest): RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True @classmethod @client_context.require_replica_set diff --git a/test/test_session.py b/test/test_session.py index 41837ab217..df1865c4c5 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -958,16 +958,26 @@ def test_writes_do_not_include_read_concern(self): lambda coll, session: coll.drop_index("foo_1", session=session)) self._test_no_read_concern( lambda coll, session: coll.drop_indexes(session=session)) + + # Not a write, but explain also doesn't support readConcern. self._test_no_read_concern( - lambda coll, session: coll.map_reduce( - 'function() {}', 'function() {}', 'mrout', session=session)) + lambda coll, session: coll.find({}, session=session).explain()) - # They are not writes, but currentOp and explain also don't support - # readConcern. + @client_context.require_no_standalone + @unittest.skipIf(client_context.serverless, + "Serverless does not support currentOp") + def test_writes_do_not_include_read_concern_current_op(self): + # Not a write, but currentOp also doesn't support readConcern. self._test_no_read_concern( lambda coll, session: coll.database.current_op(session=session)) + + @client_context.require_no_standalone + @unittest.skipIf(client_context.serverless, + "Serverless does not support mapReduce") + def test_writes_do_not_include_read_concern_map_reduce(self): self._test_no_read_concern( - lambda coll, session: coll.find({}, session=session).explain()) + lambda coll, session: coll.map_reduce( + 'function() {}', 'function() {}', 'mrout', session=session)) @client_context.require_no_standalone @client_context.require_version_max(4, 1, 0) @@ -1164,6 +1174,7 @@ def insert_and_aggregate(): class TestSpec(SpecRunner): + RUN_ON_SERVERLESS = True # Location of JSON test specifications. TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'sessions', 'legacy') diff --git a/test/test_transactions.py b/test/test_transactions.py index b9c292caa7..1e0318dd74 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -76,6 +76,7 @@ def maybe_skip_scenario(self, test): class TestTransactions(TransactionsBase): + RUN_ON_SERVERLESS = True @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index f092c434bc..44fc89ac73 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -34,6 +34,7 @@ class TestServerApi(IntegrationTest): RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True def test_server_api_defaults(self): api = ServerApi(ServerApiVersion.V1) diff --git a/test/unified_format.py b/test/unified_format.py index 48a1e49407..3c8c0ff04f 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -117,6 +117,14 @@ def is_run_on_requirement_satisfied(requirement): max_version_satisfied = Version.from_string( req_max_server_version) >= server_version + serverless = requirement.get('serverless') + if serverless == "require": + serverless_satisfied = client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + params_satisfied = True params = requirement.get('serverParameters') if params: @@ -135,7 +143,8 @@ def is_run_on_requirement_satisfied(requirement): auth_satisfied = not client_context.auth_enabled return (topology_satisfied and min_version_satisfied and - max_version_satisfied and params_satisfied and auth_satisfied) + max_version_satisfied and serverless_satisfied and + params_satisfied and auth_satisfied) def parse_collection_or_database_options(options): @@ -1154,7 +1163,8 @@ def test_case(self): def generate_test_classes(test_path, module=__name__, class_name_prefix='', expected_failures=[], - bypass_test_generation_errors=False): + bypass_test_generation_errors=False, + **kwargs): """Method for generating test classes. Returns a dictionary where keys are the names of test classes and values are the test class objects.""" test_klasses = {} @@ -1195,10 +1205,12 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): raise ValueError( "test file '%s' has unsupported schemaVersion '%s'" % ( fpath, schema_version)) + module_dict = {'__module__': module} + module_dict.update(kwargs) test_klasses[class_name] = type( class_name, (mixin_class, test_base_class_factory(scenario_def),), - {'__module__': module}) + module_dict) except Exception: if bypass_test_generation_errors: continue diff --git a/test/utils.py b/test/utils.py index ae75d76472..2301389c36 100644 --- a/test/utils.py +++ b/test/utils.py @@ -24,6 +24,7 @@ import sys import threading import time +import unittest import warnings from collections import abc, defaultdict @@ -391,6 +392,18 @@ def _ensure_min_max_server_version(self, scenario_def, method): if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) + if 'serverless' in scenario_def: + serverless = scenario_def['serverless'] + if serverless == "require": + serverless_satisfied = client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + method = unittest.skipUnless( + serverless_satisfied, + "Serverless requirement not satisfied")(method) + return method @staticmethod @@ -423,6 +436,16 @@ def valid_auth_enabled(run_on_req): return not client_context.auth_enabled return True + @staticmethod + def serverless_ok(run_on_req): + serverless = run_on_req['serverless'] + if serverless == "require": + return client_context.serverless + elif serverless == "forbid": + return not client_context.serverless + else: # unset or "allow" + return True + def should_run_on(self, scenario_def): run_on = scenario_def.get('runOn', []) if not run_on: @@ -433,7 +456,8 @@ def should_run_on(self, scenario_def): if (self.valid_topology(req) and self.min_server_version(req) and self.max_server_version(req) and - self.valid_auth_enabled(req)): + self.valid_auth_enabled(req) and + self.serverless_ok(req)): return True return False From 7acb58be81d2a05c8d1142cda17c7cde0dd70cae Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Jul 2021 11:30:16 -0700 Subject: [PATCH 0412/2111] PYTHON-2750 Don't mark arbiter pools ready unless directly connected (#682) --- pymongo/topology.py | 10 ++++++---- test/test_client.py | 6 ++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/pymongo/topology.py b/pymongo/topology.py index 3b5437a322..dd8315a6db 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -281,8 +281,12 @@ def _process_change(self, server_description, reset_pool=False): # This is a stale isMaster response. Ignore it. return + new_td = updated_topology_description( + self._description, server_description) # CMAP: Ensure the pool is "ready" when the server is selectable. - if server_description.is_server_type_known: + if (server_description.is_readable + or (server_description.is_server_type_known and + new_td.topology_type == TOPOLOGY_TYPE.Single)): server = self._servers.get(server_description.address) if server: server.pool.ready() @@ -295,9 +299,7 @@ def _process_change(self, server_description, reset_pool=False): (sd_old, server_description, server_description.address, self._topology_id))) - self._description = updated_topology_description( - self._description, server_description) - + self._description = new_td self._update_servers() self._receive_cluster_time_no_lock(server_description.cluster_time) diff --git a/test/test_client.py b/test/test_client.py index 26a0af5ae1..2f23adcf92 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -2044,6 +2044,9 @@ def test_rs_client_does_not_maintain_pool_to_arbiters(self): # Assert that we do not create connections to unknown servers. arbiter = c._topology.get_server_by_address(('d', 4)) self.assertFalse(arbiter.pool.sockets) + # Arbiter pool is not marked ready. + self.assertEqual( + listener.event_count(monitoring.PoolReadyEvent), 2) @client_context.require_connection def test_direct_client_maintains_pool_to_arbiter(self): @@ -2068,6 +2071,9 @@ def test_direct_client_maintains_pool_to_arbiter(self): listener.event_count(monitoring.ConnectionCreatedEvent), 1) arbiter = c._topology.get_server_by_address(('c', 3)) self.assertEqual(len(arbiter.pool.sockets), 1) + # Arbiter pool is marked ready. + self.assertEqual( + listener.event_count(monitoring.PoolReadyEvent), 1) if __name__ == "__main__": From 775bf923c981890e1ad18f5cda023ecd28e93bad Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Jul 2021 11:31:07 -0700 Subject: [PATCH 0413/2111] PYTHON-2699 Emit PoolReadyEvent before resuming the background thread (#683) --- pymongo/pool.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 654dbf3ab0..58f1652e05 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1199,10 +1199,12 @@ def __init__(self, address, options, handshake=True): self.ntxns = 0 def ready(self): - old_state, self.state = self.state, PoolState.READY - if old_state != PoolState.READY: - if self.enabled_for_cmap: - self.opts.event_listeners.publish_pool_ready(self.address) + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + self.opts.event_listeners.publish_pool_ready(self.address) @property def closed(self): @@ -1284,8 +1286,10 @@ def remove_stale_sockets(self, reference_generation, all_credentials): `generation` at the point in time this operation was requested on the pool. """ - if self.state != PoolState.READY: - return + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + return if self.opts.max_idle_time_seconds is not None: with self.lock: From a949142480856ebce0f9b769ba61b011564e9f2a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Jul 2021 14:45:43 -0700 Subject: [PATCH 0414/2111] PYTHON-2816 Generate pip < 20.3 compatible manylinux wheels (#679) Split old/new manylinux wheel generation into two tasks. --- .evergreen/build-manylinux-internal.sh | 4 +- .evergreen/build-manylinux.sh | 31 +++++++++++---- .evergreen/config.yml | 54 ++++++++++++++++++-------- 3 files changed, 63 insertions(+), 26 deletions(-) diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 2a1f169482..4016782e79 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -19,9 +19,9 @@ for PYTHON in /opt/python/*/bin/python; do $PYTHON setup.py bdist_wheel rm -rf build - # Audit wheels and write multilinux tag + # Audit wheels and write manylinux tag for whl in dist/*.whl; do - # Skip already built manylinux1 wheels. + # Skip already built manylinux wheels. if [[ "$whl" != *"manylinux"* ]]; then auditwheel repair $whl -w dist rm $whl diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index a100a6fc80..61b8b840d3 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -1,14 +1,29 @@ #!/bin/bash -ex docker version - -images=(quay.io/pypa/manylinux1_x86_64 \ - quay.io/pypa/manylinux1_i686 \ - quay.io/pypa/manylinux2014_x86_64 \ - quay.io/pypa/manylinux2014_i686 \ - quay.io/pypa/manylinux2014_aarch64 \ - quay.io/pypa/manylinux2014_ppc64le \ - quay.io/pypa/manylinux2014_s390x) +# manylinux1 2021-05-05-b64d921 and manylinux2014 2021-05-05-1ac6ef3 were +# the last releases to generate pip < 20.3 compatible wheels. After that +# auditwheel was upgraded to v4 which produces PEP 600 manylinux_x_y wheels +# which requires pip >= 20.3. We use the older docker image to support older +# pip versions. +BUILD_WITH_TAG="$1" +if [ -n "$BUILD_WITH_TAG" ]; then + images=(quay.io/pypa/manylinux1_x86_64:2021-05-05-b64d921 \ + quay.io/pypa/manylinux1_i686:2021-05-05-b64d921 \ + quay.io/pypa/manylinux2014_x86_64:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_i686:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_aarch64:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_ppc64le:2021-05-05-1ac6ef3 \ + quay.io/pypa/manylinux2014_s390x:2021-05-05-1ac6ef3) +else + images=(quay.io/pypa/manylinux1_x86_64 \ + quay.io/pypa/manylinux1_i686 \ + quay.io/pypa/manylinux2014_x86_64 \ + quay.io/pypa/manylinux2014_i686 \ + quay.io/pypa/manylinux2014_aarch64 \ + quay.io/pypa/manylinux2014_ppc64le \ + quay.io/pypa/manylinux2014_s390x) +fi for image in "${images[@]}"; do docker pull $image diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 66b551de9c..18ef5efecb 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -823,6 +823,24 @@ functions: # Remove all Docker images docker rmi -f $(docker images -a -q) &> /dev/null || true + "upload release": + - command: archive.targz_pack + params: + target: "release-files.tgz" + source_dir: "src/dist" + include: + - "*" + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: release-files.tgz + remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/release/${task_id}-${execution}-release-files.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Release files + pre: - func: "fetch source" - func: "prepare resources" @@ -906,7 +924,6 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release" tags: ["release"] exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). @@ -919,22 +936,21 @@ tasks: set -o xtrace ${PREPARE_SHELL} .evergreen/release.sh - - command: archive.targz_pack - params: - target: "release-files.tgz" - source_dir: "src/dist" - include: - - "*" - - command: s3.put + - func: "upload release" + + - name: "release-old-manylinux" + tags: ["release"] + exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). + commands: + - command: shell.exec + type: test params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: release-files.tgz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/release/${task_id}-${execution}-release-files.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Release files + working_dir: "src" + script: | + set -o xtrace + ${PREPARE_SHELL} + .evergreen/build-manylinux.sh BUILD_WITH_TAG + - func: "upload release" # Standard test tasks {{{ @@ -2633,6 +2649,12 @@ buildvariants: batchtime: 20160 # 14 days tasks: - name: "release" + rules: + - if: + platform: ubuntu-20.04 + then: + add_tasks: + - name: "release-old-manylinux" # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available From 70a1fec9a2a6565cba27ff9ae6cbba15db6add32 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 28 Jul 2021 15:55:21 -0700 Subject: [PATCH 0415/2111] PYTHON-1363 Remove unused retrieved parameter from CommandCursor (#689) --- pymongo/command_cursor.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index be10ffb42a..6d0349a909 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -31,13 +31,10 @@ class CommandCursor(object): """A cursor / iterator over command cursors.""" _getmore_class = _GetMore - def __init__(self, collection, cursor_info, address, retrieved=0, + def __init__(self, collection, cursor_info, address, batch_size=0, max_await_time_ms=None, session=None, explicit_session=False): - """Create a new command cursor. - - The parameter 'retrieved' is unused. - """ + """Create a new command cursor.""" self.__sock_mgr = None self.__collection = collection self.__id = cursor_info['id'] @@ -297,7 +294,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class RawBatchCommandCursor(CommandCursor): _getmore_class = _RawBatchGetMore - def __init__(self, collection, cursor_info, address, retrieved=0, + def __init__(self, collection, cursor_info, address, batch_size=0, max_await_time_ms=None, session=None, explicit_session=False): """Create a new cursor / iterator over raw batches of BSON data. @@ -310,7 +307,7 @@ def __init__(self, collection, cursor_info, address, retrieved=0, """ assert not cursor_info.get('firstBatch') super(RawBatchCommandCursor, self).__init__( - collection, cursor_info, address, retrieved, batch_size, + collection, cursor_info, address, batch_size, max_await_time_ms, session, explicit_session) def _unpack_response(self, response, cursor_id, codec_options, From 0209e4a4a4b64b4dcd05f7ce717c066df0e3116d Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Wed, 28 Jul 2021 16:01:32 -0700 Subject: [PATCH 0416/2111] PYTHON-2571 Remove NotMasterError (#688) --- doc/changelog.rst | 2 ++ doc/migrate-to-pymongo4.rst | 6 ++++++ pymongo/errors.py | 20 +++----------------- test/test_errors.py | 6 ------ 4 files changed, 11 insertions(+), 23 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index d3462b7bb9..5c943407d3 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -70,6 +70,8 @@ Breaking Changes in 4.0 :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, and :meth:`~pymongo.cursor.Cursor`. +- Removed :exc:`pymongo.errors.NotMasterError`. + Use :exc:`pymongo.errors.NotPrimaryError` instead. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. - PyMongoCrypt 1.1.0 or later is now required for client side field level diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index fa847a83b7..b64ada865b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -378,6 +378,12 @@ custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and For more information, see the :doc:`custom type example `. +NotMasterError is removed +------------------------- + +Removed :exc:`~pymongo.errors.NotMasterError`. +Use :exc:`~pymongo.errors.NotPrimaryError` instead. + Removed features with no migration path --------------------------------------- diff --git a/pymongo/errors.py b/pymongo/errors.py index b0bcc3ed1f..dd1c244e37 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -93,22 +93,7 @@ def _format_detailed_error(message, details): return message -class NotMasterError(AutoReconnect): - """**DEPRECATED** - The server responded "not master" or - "node is recovering". - - This exception has been deprecated and will be removed in PyMongo 4.0. - Use :exc:`~pymongo.errors.NotPrimaryError` instead. - - .. versionchanged:: 3.12 - Deprecated. Use :exc:`~pymongo.errors.NotPrimaryError` instead. - """ - def __init__(self, message='', errors=None): - super(NotMasterError, self).__init__( - _format_detailed_error(message, errors), errors=errors) - - -class NotPrimaryError(NotMasterError): +class NotPrimaryError(AutoReconnect): """The server responded "not primary" or "node is recovering". These errors result from a query, write, or command. The operation failed @@ -124,7 +109,8 @@ class NotPrimaryError(NotMasterError): .. versionadded:: 3.12 """ def __init__(self, message='', errors=None): - super(NotPrimaryError, self).__init__(message, errors=errors) + super(NotPrimaryError, self).__init__( + _format_detailed_error(message, errors), errors=errors) class ServerSelectionTimeoutError(AutoReconnect): diff --git a/test/test_errors.py b/test/test_errors.py index 6ae8ee69be..aec7bf478d 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -21,7 +21,6 @@ from pymongo.errors import (BulkWriteError, EncryptionError, NotPrimaryError, - NotMasterError, OperationFailure) from test import (PyMongoTestCase, unittest) @@ -101,11 +100,6 @@ def test_pickle_EncryptionError(self): self.assertPyMongoErrorEqual(exc, exc2) self.assertOperationFailureEqual(cause, exc2.cause) - def test_NotMasterError_catches_NotPrimaryError(self): - with self.assertRaises(NotMasterError) as exc: - raise NotPrimaryError("not primary test", {"errmsg": "error"}) - self.assertIn("full error", str(exc.exception)) - if __name__ == "__main__": unittest.main() From 3f8c104157fdff9ddd2bcd85b707d8216046510d Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 29 Jul 2021 10:32:51 -0700 Subject: [PATCH 0417/2111] PYTHON-2288 Remove IsMaster (#690) --- doc/changelog.rst | 2 + doc/migrate-to-pymongo4.rst | 6 + pymongo/hello.py | 185 ++++++++++++++++++++++++ pymongo/ismaster.py | 198 -------------------------- pymongo/monitor.py | 4 +- pymongo/pool.py | 5 +- pymongo/server_description.py | 4 +- pymongo/topology.py | 4 +- test/pymongo_mocks.py | 4 +- test/test_discovery_and_monitoring.py | 4 +- test/test_heartbeat_monitoring.py | 6 +- test/test_sdam_monitoring_spec.py | 4 +- test/test_server.py | 4 +- test/test_server_description.py | 4 +- test/test_topology.py | 10 +- test/utils_selection_tests.py | 6 +- 16 files changed, 222 insertions(+), 228 deletions(-) delete mode 100644 pymongo/ismaster.py diff --git a/doc/changelog.rst b/doc/changelog.rst index 5c943407d3..f8c610606b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -55,6 +55,8 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. - Removed :mod:`pymongo.thread_util`. - Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. +- Removed :class:`~pymongo.ismaster.IsMaster`. + Use :class:`~pymongo.hello.Hello` instead. - Removed :mod:`pymongo.son_manipulator`, :class:`pymongo.son_manipulator.SONManipulator`, :class:`pymongo.son_manipulator.ObjectIdInjector`, diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index b64ada865b..a08eeb5ffe 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -378,6 +378,12 @@ custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and For more information, see the :doc:`custom type example `. +IsMaster is removed +------------------- + +Removed :class:`pymongo.ismaster.IsMaster`. +Use :class:`pymongo.hello.Hello` instead. + NotMasterError is removed ------------------------- diff --git a/pymongo/hello.py b/pymongo/hello.py index 34963a40c8..290f27dcc9 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -14,9 +14,194 @@ """Helpers for the 'hello' and legacy hello commands.""" +import itertools + +from pymongo import common +from pymongo.server_type import SERVER_TYPE + class HelloCompat: CMD = 'hello' LEGACY_CMD = 'ismaster' PRIMARY = 'isWritablePrimary' LEGACY_PRIMARY = 'ismaster' + + +def _get_server_type(doc): + """Determine the server type from a hello response.""" + if not doc.get('ok'): + return SERVER_TYPE.Unknown + + if doc.get('serviceId'): + return SERVER_TYPE.LoadBalancer + elif doc.get('isreplicaset'): + return SERVER_TYPE.RSGhost + elif doc.get('setName'): + if doc.get('hidden'): + return SERVER_TYPE.RSOther + elif doc.get(HelloCompat.PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get(HelloCompat.LEGACY_PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get('secondary'): + return SERVER_TYPE.RSSecondary + elif doc.get('arbiterOnly'): + return SERVER_TYPE.RSArbiter + else: + return SERVER_TYPE.RSOther + elif doc.get('msg') == 'isdbgrid': + return SERVER_TYPE.Mongos + else: + return SERVER_TYPE.Standalone + + +class Hello(object): + """Parse a hello response from the server. + + .. versionadded:: 3.12 + """ + __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', + '_awaitable') + + def __init__(self, doc, awaitable=False): + self._server_type = _get_server_type(doc) + self._doc = doc + self._is_writable = self._server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.Standalone, + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer) + + self._is_readable = ( + self.server_type == SERVER_TYPE.RSSecondary + or self._is_writable) + self._awaitable = awaitable + + @property + def document(self): + """The complete hello command response document. + + .. versionadded:: 3.4 + """ + return self._doc.copy() + + @property + def server_type(self): + return self._server_type + + @property + def all_hosts(self): + """List of hosts, passives, and arbiters known to this server.""" + return set(map(common.clean_node, itertools.chain( + self._doc.get('hosts', []), + self._doc.get('passives', []), + self._doc.get('arbiters', [])))) + + @property + def tags(self): + """Replica set member tags or empty dict.""" + return self._doc.get('tags', {}) + + @property + def primary(self): + """This server's opinion about who the primary is, or None.""" + if self._doc.get('primary'): + return common.partition_node(self._doc['primary']) + else: + return None + + @property + def replica_set_name(self): + """Replica set name or None.""" + return self._doc.get('setName') + + @property + def max_bson_size(self): + return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) + + @property + def max_message_size(self): + return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) + + @property + def max_write_batch_size(self): + return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) + + @property + def min_wire_version(self): + return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) + + @property + def max_wire_version(self): + return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) + + @property + def set_version(self): + return self._doc.get('setVersion') + + @property + def election_id(self): + return self._doc.get('electionId') + + @property + def cluster_time(self): + return self._doc.get('$clusterTime') + + @property + def logical_session_timeout_minutes(self): + return self._doc.get('logicalSessionTimeoutMinutes') + + @property + def is_writable(self): + return self._is_writable + + @property + def is_readable(self): + return self._is_readable + + @property + def me(self): + me = self._doc.get('me') + if me: + return common.clean_node(me) + + @property + def last_write_date(self): + return self._doc.get('lastWrite', {}).get('lastWriteDate') + + @property + def compressors(self): + return self._doc.get('compression') + + @property + def sasl_supported_mechs(self): + """Supported authentication mechanisms for the current user. + + For example:: + + >>> hello.sasl_supported_mechs + ["SCRAM-SHA-1", "SCRAM-SHA-256"] + + """ + return self._doc.get('saslSupportedMechs', []) + + @property + def speculative_authenticate(self): + """The speculativeAuthenticate field.""" + return self._doc.get('speculativeAuthenticate') + + @property + def topology_version(self): + return self._doc.get('topologyVersion') + + @property + def awaitable(self): + return self._awaitable + + @property + def service_id(self): + return self._doc.get('serviceId') + + @property + def hello_ok(self): + return self._doc.get('helloOk', False) diff --git a/pymongo/ismaster.py b/pymongo/ismaster.py deleted file mode 100644 index e3a8f7f067..0000000000 --- a/pymongo/ismaster.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Parse a response to the 'ismaster' command.""" - -import itertools - -from pymongo import common -from pymongo.hello import HelloCompat -from pymongo.server_type import SERVER_TYPE - - -def _get_server_type(doc): - """Determine the server type from an ismaster response.""" - if not doc.get('ok'): - return SERVER_TYPE.Unknown - - if doc.get('serviceId'): - return SERVER_TYPE.LoadBalancer - elif doc.get('isreplicaset'): - return SERVER_TYPE.RSGhost - elif doc.get('setName'): - if doc.get('hidden'): - return SERVER_TYPE.RSOther - elif doc.get(HelloCompat.PRIMARY): - return SERVER_TYPE.RSPrimary - elif doc.get(HelloCompat.LEGACY_PRIMARY): - return SERVER_TYPE.RSPrimary - elif doc.get('secondary'): - return SERVER_TYPE.RSSecondary - elif doc.get('arbiterOnly'): - return SERVER_TYPE.RSArbiter - else: - return SERVER_TYPE.RSOther - elif doc.get('msg') == 'isdbgrid': - return SERVER_TYPE.Mongos - else: - return SERVER_TYPE.Standalone - - -class IsMaster(object): - __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', - '_awaitable') - - def __init__(self, doc, awaitable=False): - """Parse an ismaster response from the server.""" - self._server_type = _get_server_type(doc) - self._doc = doc - self._is_writable = self._server_type in ( - SERVER_TYPE.RSPrimary, - SERVER_TYPE.Standalone, - SERVER_TYPE.Mongos, - SERVER_TYPE.LoadBalancer) - - self._is_readable = ( - self.server_type == SERVER_TYPE.RSSecondary - or self._is_writable) - self._awaitable = awaitable - - @property - def document(self): - """The complete ismaster command response document. - - .. versionadded:: 3.4 - """ - return self._doc.copy() - - @property - def server_type(self): - return self._server_type - - @property - def all_hosts(self): - """List of hosts, passives, and arbiters known to this server.""" - return set(map(common.clean_node, itertools.chain( - self._doc.get('hosts', []), - self._doc.get('passives', []), - self._doc.get('arbiters', [])))) - - @property - def tags(self): - """Replica set member tags or empty dict.""" - return self._doc.get('tags', {}) - - @property - def primary(self): - """This server's opinion about who the primary is, or None.""" - if self._doc.get('primary'): - return common.partition_node(self._doc['primary']) - else: - return None - - @property - def replica_set_name(self): - """Replica set name or None.""" - return self._doc.get('setName') - - @property - def max_bson_size(self): - return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) - - @property - def max_message_size(self): - return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) - - @property - def max_write_batch_size(self): - return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) - - @property - def min_wire_version(self): - return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) - - @property - def max_wire_version(self): - return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) - - @property - def set_version(self): - return self._doc.get('setVersion') - - @property - def election_id(self): - return self._doc.get('electionId') - - @property - def cluster_time(self): - return self._doc.get('$clusterTime') - - @property - def logical_session_timeout_minutes(self): - return self._doc.get('logicalSessionTimeoutMinutes') - - @property - def is_writable(self): - return self._is_writable - - @property - def is_readable(self): - return self._is_readable - - @property - def me(self): - me = self._doc.get('me') - if me: - return common.clean_node(me) - - @property - def last_write_date(self): - return self._doc.get('lastWrite', {}).get('lastWriteDate') - - @property - def compressors(self): - return self._doc.get('compression') - - @property - def sasl_supported_mechs(self): - """Supported authentication mechanisms for the current user. - - For example:: - - >>> ismaster.sasl_supported_mechs - ["SCRAM-SHA-1", "SCRAM-SHA-256"] - - """ - return self._doc.get('saslSupportedMechs', []) - - @property - def speculative_authenticate(self): - """The speculativeAuthenticate field.""" - return self._doc.get('speculativeAuthenticate') - - @property - def topology_version(self): - return self._doc.get('topologyVersion') - - @property - def awaitable(self): - return self._awaitable - - @property - def service_id(self): - return self._doc.get('serviceId') - - @property - def hello_ok(self): - return self._doc.get('helloOk', False) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 8a60abce0a..4817ad2401 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -23,7 +23,7 @@ from pymongo.errors import (NotPrimaryError, OperationFailure, _OperationCancelled) -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -268,7 +268,7 @@ def _check_with_socket(self, conn): start = time.monotonic() if conn.more_to_come: # Read the next streaming isMaster (MongoDB 4.4+). - response = IsMaster(conn._next_reply(), awaitable=True) + response = Hello(conn._next_reply(), awaitable=True) elif (conn.performed_handshake and self._server_description.topology_version): # Initiate streaming isMaster (MongoDB 4.4+). diff --git a/pymongo/pool.py b/pymongo/pool.py index 58f1652e05..6dfd32ef0c 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -50,8 +50,7 @@ NotPrimaryError, OperationFailure, PyMongoError) -from pymongo.hello import HelloCompat -from pymongo.ismaster import IsMaster +from pymongo.hello import HelloCompat, Hello from pymongo.monitoring import (ConnectionCheckOutFailedReason, ConnectionClosedReason) from pymongo.network import (command, @@ -618,7 +617,7 @@ def _ismaster(self, cluster_time, topology_version, doc.setdefault('serviceId', process_id) if not self.opts.load_balanced: doc.pop('serviceId', None) - ismaster = IsMaster(doc, awaitable=awaitable) + ismaster = Hello(doc, awaitable=awaitable) self.is_writable = ismaster.is_writable self.max_wire_version = ismaster.max_wire_version self.max_bson_size = ismaster.max_bson_size diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 19cc349c78..462ad135cc 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -18,7 +18,7 @@ from bson import EPOCH_NAIVE from pymongo.server_type import SERVER_TYPE -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello class ServerDescription(object): @@ -48,7 +48,7 @@ def __init__( error=None): self._address = address if not ismaster: - ismaster = IsMaster({}) + ismaster = Hello({}) self._server_type = ismaster.server_type self._all_hosts = ismaster.all_hosts diff --git a/pymongo/topology.py b/pymongo/topology.py index dd8315a6db..e08c12ab04 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -34,7 +34,7 @@ PyMongoError, ServerSelectionTimeoutError, WriteError) -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -567,7 +567,7 @@ def _ensure_opened(self): # Emit initial SDAM events for load balancer mode. self._process_change(ServerDescription( self._seed_addresses[0], - IsMaster({'ok': 1, 'serviceId': self._topology_id, + Hello({'ok': 1, 'serviceId': self._topology_id, 'maxWireVersion': 13}))) # Ensure that the monitors are open. diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index af59e0cbe4..511560b59e 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -21,7 +21,7 @@ from pymongo import common from pymongo import MongoClient from pymongo.errors import AutoReconnect, NetworkTimeout -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.monitor import Monitor from pymongo.pool import Pool from pymongo.server_description import ServerDescription @@ -100,7 +100,7 @@ def _check_once(self): client = self.client address = self._server_description.address response, rtt = client.mock_is_master('%s:%d' % address) - return ServerDescription(address, IsMaster(response), rtt) + return ServerDescription(address, Hello(response), rtt) class MockClient(MongoClient): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 2a440cdf38..0b3ac6da5d 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -31,7 +31,7 @@ OperationFailure) from pymongo.helpers import (_check_command_response, _check_write_command_response) -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.server_description import ServerDescription, SERVER_TYPE from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext @@ -83,7 +83,7 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): def got_ismaster(topology, server_address, ismaster_response): server_description = ServerDescription( - server_address, IsMaster(ismaster_response), 0) + server_address, Hello(ismaster_response), 0) topology.on_change(server_description) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 8c66557023..bd926f772d 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] from pymongo.errors import ConnectionFailure -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.monitor import Monitor from test import unittest, client_knobs, IntegrationTest from test.utils import (HeartbeatEventListener, MockPool, single_client, @@ -38,7 +38,7 @@ class MockMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if isinstance(responses[1], Exception): raise responses[1] - return IsMaster(responses[1]), 99 + return Hello(responses[1]), 99 m = single_client( h=uri, @@ -62,7 +62,7 @@ def _check_with_socket(self, *args, **kwargs): self.assertEqual(actual.connection_id, responses[0]) if expected != 'ServerHeartbeatStartedEvent': - if isinstance(actual.reply, IsMaster): + if isinstance(actual.reply, Hello): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) else: diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 3afdd23570..cb168bb2bd 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -26,7 +26,7 @@ from pymongo.common import clean_node from pymongo.errors import (ConnectionFailure, NotPrimaryError) -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.monitor import Monitor from pymongo.server_description import ServerDescription from pymongo.topology_description import TOPOLOGY_TYPE @@ -197,7 +197,7 @@ def _run(self): source_address = clean_node(source) topology.on_change(ServerDescription( address=source_address, - ismaster=IsMaster(response), + ismaster=Hello(response), round_trip_time=0)) expected_results = phase['outcome']['events'] diff --git a/test/test_server.py b/test/test_server.py index d6b92e2fde..ca3ce76cd3 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -18,7 +18,7 @@ sys.path[0:0] = [""] -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.server import Server from pymongo.server_description import ServerDescription from test import unittest @@ -26,7 +26,7 @@ class TestServer(unittest.TestCase): def test_repr(self): - ismaster = IsMaster({'ok': 1}) + ismaster = Hello({'ok': 1}) sd = ServerDescription(('localhost', 27017), ismaster) server = Server(sd, pool=object(), monitor=object()) self.assertTrue('Standalone' in str(server)) diff --git a/test/test_server_description.py b/test/test_server_description.py index 11f134464f..27ea5bf371 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -21,7 +21,7 @@ from bson.objectid import ObjectId from bson.int64 import Int64 from pymongo.server_type import SERVER_TYPE -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.server_description import ServerDescription from test import unittest @@ -29,7 +29,7 @@ def parse_ismaster_response(doc): - ismaster_response = IsMaster(doc) + ismaster_response = Hello(doc) return ServerDescription(address, ismaster_response) diff --git a/test/test_topology.py b/test/test_topology.py index 9a4bf512cd..4a3018b72d 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -29,7 +29,7 @@ from pymongo.errors import (AutoReconnect, ConfigurationError, ConnectionFailure) -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.monitor import Monitor from pymongo.pool import PoolOptions from pymongo.server_description import ServerDescription @@ -67,7 +67,7 @@ def create_mock_topology( def got_ismaster(topology, server_address, ismaster_response): server_description = ServerDescription( - server_address, IsMaster(ismaster_response), 0) + server_address, Hello(ismaster_response), 0) topology.on_change(server_description) @@ -212,7 +212,7 @@ def test_round_trip_time(self): class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: - return (IsMaster({'ok': 1, 'maxWireVersion': 6}), + return (Hello({'ok': 1, 'maxWireVersion': 6}), round_trip_time) else: raise AutoReconnect('mock monitor error') @@ -673,7 +673,7 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): ismaster_count[0] += 1 if ismaster_count[0] == 1: - return IsMaster({'ok': 1, 'maxWireVersion': 6}), 0 + return Hello({'ok': 1, 'maxWireVersion': 6}), 0 else: raise AutoReconnect('mock monitor error') @@ -695,7 +695,7 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): ismaster_count[0] += 1 if ismaster_count[0] in (1, 3): - return IsMaster({'ok': 1, 'maxWireVersion': 6}), 0 + return Hello({'ok': 1, 'maxWireVersion': 6}), 0 else: raise AutoReconnect( 'mock monitor error #%s' % (ismaster_count[0],)) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 57a2673033..4037705ae4 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -23,7 +23,7 @@ from bson import json_util from pymongo.common import clean_node, HEARTBEAT_FREQUENCY from pymongo.errors import AutoReconnect, ConfigurationError -from pymongo.ismaster import IsMaster +from pymongo.hello import Hello from pymongo.server_description import ServerDescription from pymongo.settings import TopologySettings from pymongo.server_selectors import writable_server_selector @@ -60,7 +60,7 @@ def make_server_description(server, hosts): """Make a ServerDescription from server info in a JSON test.""" server_type = server['type'] if server_type in ("Unknown", "PossiblePrimary"): - return ServerDescription(clean_node(server['address']), IsMaster({})) + return ServerDescription(clean_node(server['address']), Hello({})) ismaster_response = {'ok': True, 'hosts': hosts} if server_type != "Standalone" and server_type != "Mongos": @@ -85,7 +85,7 @@ def make_server_description(server, hosts): # Sets _last_update_time to now. sd = ServerDescription(clean_node(server['address']), - IsMaster(ismaster_response), + Hello(ismaster_response), round_trip_time=server['avg_rtt_ms'] / 1000.0) if 'lastUpdateTime' in server: From f3486d7ad7097d5e6f858b8f1ed781e6109603a2 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 29 Jul 2021 15:32:53 -0700 Subject: [PATCH 0418/2111] PYTHON-2842 Integration tests for observeSensitiveCommands field (#684) --- .../valid-pass/observeSensitiveCommands.json | 691 ++++++++++++++++++ test/unified_format.py | 16 +- 2 files changed, 705 insertions(+), 2 deletions(-) create mode 100644 test/unified-test-format/valid-pass/observeSensitiveCommands.json diff --git a/test/unified-test-format/valid-pass/observeSensitiveCommands.json b/test/unified-test-format/valid-pass/observeSensitiveCommands.json new file mode 100644 index 0000000000..411ca19c5d --- /dev/null +++ b/test/unified-test-format/valid-pass/observeSensitiveCommands.json @@ -0,0 +1,691 @@ +{ + "description": "observeSensitiveCommands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": false + } + }, + { + "client": { + "id": "client2", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "observeSensitiveCommands" + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "observeSensitiveCommands" + } + }, + { + "database": { + "id": "database2", + "client": "client2", + "databaseName": "observeSensitiveCommands" + } + } + ], + "tests": [ + { + "description": "getnonce is observed with observeSensitiveCommands=true", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce is not observed with observeSensitiveCommands=false", + "operations": [ + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [] + } + ] + }, + { + "description": "getnonce is not observed by default", + "operations": [ + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "hello with speculativeAuthenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + }, + { + "client": "client1", + "events": [] + }, + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "hello without speculativeAuthenticate is always observed", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculativeAuthenticate", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + }, + { + "client": "client1", + "events": [] + }, + { + "client": "client2", + "events": [] + } + ] + }, + { + "description": "legacy hello without speculativeAuthenticate is always observed", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database1", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database2", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + }, + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 3c8c0ff04f..890113afaf 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -191,8 +191,10 @@ def __init__(self, observe_events, ignore_commands, observe_sensitive_commands): self._event_types = set(name.lower() for name in observe_events) if observe_sensitive_commands: + self._observe_sensitive_commands = True self._ignore_commands = set(ignore_commands) else: + self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add('configurefailpoint') super(EventListenerUtil, self).__init__() @@ -211,10 +213,20 @@ def _command_event(self, event): self.add_event(event) def started(self, event): - self._command_event(event) + if event.command == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) def succeeded(self, event): - self._command_event(event) + if event.reply == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) def failed(self, event): self._command_event(event) From f86b2c6bf83eee8fbc40865fa5fb16d5bb791fe3 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 29 Jul 2021 17:17:22 -0700 Subject: [PATCH 0419/2111] PYTHON-2827 Versioned API migration example for ecosystem docs (#687) --- test/test_examples.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/test/test_examples.py b/test/test_examples.py index 3e5d8bc86c..ec9021ec26 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1137,6 +1137,47 @@ def test_versioned_api(self): uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 + @client_context.require_version_min(4, 7) + def test_versioned_api_migration(self): + # SERVER-58785 + if (client_context.is_topology_type(["sharded"]) and + not client_context.version.at_least(5, 0, 2)): + self.skipTest("This test needs MongoDB 5.0.2 or newer") + + client = rs_client(server_api=ServerApi("1", strict=True)) + client.db.sales.drop() + + # Start Versioned API Example 5 + def strptime(s): + return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") + client.db.sales.insert_many([ + {"_id": 1, "item": "abc", "price": 10, "quantity": 2, "date": strptime("2021-01-01T08:00:00Z")}, + {"_id": 2, "item": "jkl", "price": 20, "quantity": 1, "date": strptime("2021-02-03T09:00:00Z")}, + {"_id": 3, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-03T09:05:00Z")}, + {"_id": 4, "item": "abc", "price": 10, "quantity": 10, "date": strptime("2021-02-15T08:00:00Z")}, + {"_id": 5, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T09:05:00Z")}, + {"_id": 6, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-15T12:05:10Z")}, + {"_id": 7, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T14:12:12Z")}, + {"_id": 8, "item": "abc", "price": 10, "quantity": 5, "date": strptime("2021-03-16T20:20:13Z")} + ]) + # End Versioned API Example 5 + + with self.assertRaisesRegex( + OperationFailure, "Provided apiStrict:true, but the command " + "count is not in API Version 1"): + client.db.command('count', 'sales', query={}) + # Start Versioned API Example 6 + # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} + # End Versioned API Example 6 + + # Start Versioned API Example 7 + client.db.sales.count_documents({}) + # End Versioned API Example 7 + + # Start Versioned API Example 8 + # 8 + # End Versioned API Example 8 + if __name__ == "__main__": unittest.main() From 97a84e199eeca70d0eb9e78fd719feb10edc2955 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 29 Jul 2021 17:25:15 -0700 Subject: [PATCH 0420/2111] PYTHON-2697 Fix races in various CMAP spec tests (#685) --- test/cmap/pool-checkout-no-idle.json | 13 +++++++------ test/cmap/pool-checkout-no-stale.json | 13 ++++++++----- test/cmap/pool-clear-min-size.json | 3 ++- test/cmap/pool-create-min-size-error.json | 1 + test/cmap/wait-queue-timeout.json | 2 +- test/test_cmap.py | 15 +++++++++++++-- 6 files changed, 32 insertions(+), 15 deletions(-) diff --git a/test/cmap/pool-checkout-no-idle.json b/test/cmap/pool-checkout-no-idle.json index 9a668857bb..0b0fe572ff 100644 --- a/test/cmap/pool-checkout-no-idle.json +++ b/test/cmap/pool-checkout-no-idle.json @@ -3,7 +3,8 @@ "style": "unit", "description": "must destroy and must not check out an idle connection if found while iterating available connections", "poolOptions": { - "maxIdleTimeMS": 10 + "maxIdleTimeMS": 10, + "backgroundThreadIntervalMS": -1 }, "operations": [ { @@ -24,11 +25,6 @@ { "name": "checkOut" }, - { - "name": "waitForEvent", - "event": "ConnectionClosed", - "count": 1 - }, { "name": "waitForEvent", "event": "ConnectionCheckedOut", @@ -56,6 +52,11 @@ "connectionId": 1, "reason": "idle", "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-no-stale.json b/test/cmap/pool-checkout-no-stale.json index 11bd492c89..ec76f4e9c8 100644 --- a/test/cmap/pool-checkout-no-stale.json +++ b/test/cmap/pool-checkout-no-stale.json @@ -2,6 +2,9 @@ "version": 1, "style": "unit", "description": "must destroy and must not check out a stale connection if found while iterating available connections", + "poolOptions": { + "backgroundThreadIntervalMS": -1 + }, "operations": [ { "name": "ready" @@ -23,11 +26,6 @@ { "name": "checkOut" }, - { - "name": "waitForEvent", - "event": "ConnectionClosed", - "count": 1 - }, { "name": "waitForEvent", "event": "ConnectionCheckedOut", @@ -59,6 +57,11 @@ "connectionId": 1, "reason": "stale", "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-clear-min-size.json b/test/cmap/pool-clear-min-size.json index 00c477c620..239df871b8 100644 --- a/test/cmap/pool-clear-min-size.json +++ b/test/cmap/pool-clear-min-size.json @@ -3,7 +3,8 @@ "style": "unit", "description": "pool clear halts background minPoolSize establishments", "poolOptions": { - "minPoolSize": 1 + "minPoolSize": 1, + "backgroundThreadIntervalMS": 50 }, "operations": [ { diff --git a/test/cmap/pool-create-min-size-error.json b/test/cmap/pool-create-min-size-error.json index 930e2a8d45..1c744b850c 100644 --- a/test/cmap/pool-create-min-size-error.json +++ b/test/cmap/pool-create-min-size-error.json @@ -23,6 +23,7 @@ }, "poolOptions": { "minPoolSize": 1, + "backgroundThreadIntervalMS": 50, "appName": "poolCreateMinSizeErrorTest" }, "operations": [ diff --git a/test/cmap/wait-queue-timeout.json b/test/cmap/wait-queue-timeout.json index 993209a353..fbcbdfb04d 100644 --- a/test/cmap/wait-queue-timeout.json +++ b/test/cmap/wait-queue-timeout.json @@ -4,7 +4,7 @@ "description": "must aggressively timeout threads enqueued longer than waitQueueTimeoutMS", "poolOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 20 + "waitQueueTimeoutMS": 50 }, "operations": [ { diff --git a/test/test_cmap.py b/test/test_cmap.py index cfa11e9bc0..d70691dcf6 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -229,7 +229,13 @@ def run_scenario(self, scenario_def, test): opts['event_listeners'] = [self.listener] opts['_monitor_class'] = DummyMonitor opts['connect'] = False - with client_knobs(kill_cursor_frequency=.05, + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop('backgroundThreadIntervalMS', 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval/1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=.05): client = single_client(**opts) # Update the SD to a known type because the DummyMonitor will not. @@ -242,7 +248,12 @@ def run_scenario(self, scenario_def, test): client_context.port)] client._topology._description = updated_topology_description( client._topology._description, sd) - client._get_topology() + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + client._topology.open() + else: + client._get_topology() self.addCleanup(client.close) self.pool = list(client._topology._servers.values())[0].pool From f541e7731c94b87ca3f6ed7e765b38cecc2d72b1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 30 Jul 2021 17:56:01 -0700 Subject: [PATCH 0421/2111] PYTHON-2602 Test that pool paused errors are retryable (#681) Allow client_knobs to be used as a decorator. --- pymongo/topology.py | 2 +- test/__init__.py | 27 ++++++++--- test/test_cmap.py | 53 --------------------- test/test_retryable_reads.py | 89 +++++++++++++++++++++++++++++++++- test/test_retryable_writes.py | 90 +++++++++++++++++++++++++++++++++-- 5 files changed, 195 insertions(+), 66 deletions(-) diff --git a/pymongo/topology.py b/pymongo/topology.py index e08c12ab04..340e504d1d 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -125,7 +125,7 @@ def target(): executor = periodic_executor.PeriodicExecutor( interval=common.EVENTS_QUEUE_FREQUENCY, - min_interval=0.5, + min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, name="pymongo_events_thread") diff --git a/test/__init__.py b/test/__init__.py index 54761fb55d..8099cc51fb 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -191,6 +191,16 @@ def disable(self): def __exit__(self, exc_type, exc_val, exc_tb): self.disable() + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + def wrap(*args, **kwargs): + with self: + return f(*args, **kwargs) + return wrap + + return make_wrapper(func) + def __del__(self): if self._enabled: msg = ( @@ -761,6 +771,16 @@ def require_failCommand_appName(self, func): "failCommand appName must be supported", func=func) + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection. + """ + return self._require( + lambda: (self.test_commands_enabled and ( + (not self.is_mongos and self.version >= (4, 2, 9))) or + (self.is_mongos and self.version >= (4, 4))), + "failCommand blockConnection is not supported", + func=func) + def require_tls(self, func): """Run a test only if the client can connect over TLS.""" return self._require(lambda: self.tls, @@ -847,7 +867,6 @@ def supports_failCommand_fail_point(self): return (self.version.at_least(4, 0) and self.test_commands_enabled) - @property def requires_hint_with_min_max_queries(self): """Does the server require a hint with min/max queries.""" @@ -930,12 +949,6 @@ def patch_system_certs(self, ca_certs): self.addCleanup(patcher.disable) -# Use assertRaisesRegex if available, otherwise use Python 2.7's -# deprecated assertRaisesRegexp, with a 'p'. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - class MockClientTest(unittest.TestCase): """Base class for TestCases that use MockClient. diff --git a/test/test_cmap.py b/test/test_cmap.py index d70691dcf6..3a7b708526 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -453,59 +453,6 @@ def test_close_leaves_pool_unpaused(self): with pool.get_socket({}): pass - @client_context.require_version_max(4, 3) # Remove after SERVER-53624. - @client_context.require_retryable_writes - @client_context.require_failCommand_fail_point - def test_pool_paused_error_is_retryable(self): - cmap_listener = CMAPListener() - cmd_listener = OvertCommandListener() - client = rs_or_single_client( - maxPoolSize=1, - heartbeatFrequencyMS=500, - event_listeners=[cmap_listener, cmd_listener]) - self.addCleanup(client.close) - threads = [InsertThread(client.pymongo_test.test) for _ in range(3)] - fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91 - }, - } - with self.fail_point(fail_command): - for thread in threads: - thread.start() - for thread in threads: - thread.join() - for thread in threads: - self.assertTrue(thread.passed) - - # The two threads in the wait queue fail the initial connection check - # out attempt and then succeed on retry. - self.assertEqual( - 2, cmap_listener.event_count(ConnectionCheckOutFailedEvent)) - - # Connection check out failures are not reflected in command - # monitoring because we only publish command events _after_ checking - # out a connection. - self.assertEqual(4, len(cmd_listener.results['started'])) - self.assertEqual(3, len(cmd_listener.results['succeeded'])) - self.assertEqual(1, len(cmd_listener.results['failed'])) - - -class InsertThread(threading.Thread): - def __init__(self, collection): - super(InsertThread, self).__init__() - self.daemon = True - self.collection = collection - self.passed = False - - def run(self): - self.collection.insert_one({}) - self.passed = True - def create_test(scenario_def, test, name): def run_scenario(self): diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 1995b5dc35..963ff3e183 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -15,15 +15,28 @@ """Test retryable reads spec.""" import os +import pprint import sys +import threading sys.path[0:0] = [""] from pymongo.mongo_client import MongoClient +from pymongo.monitoring import (ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent) from pymongo.write_concern import WriteConcern -from test import unittest, client_context, PyMongoTestCase -from test.utils import TestCreator +from test import (client_context, + client_knobs, + IntegrationTest, + PyMongoTestCase, + unittest) +from test.utils import (CMAPListener, + OvertCommandListener, + rs_or_single_client, + TestCreator) from test.utils_spec_runner import SpecRunner @@ -123,5 +136,77 @@ def run_scenario(self): test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) test_creator.create_tests() + +class FindThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.find_one({}) + self.passed = True + + +class TestPoolPausedError(IntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + RUN_ON_SERVERLESS = False + + @client_context.require_failCommand_blockConnection + @client_knobs(heartbeat_frequency=.05, min_heartbeat_interval=.05) + def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = rs_or_single_client( + maxPoolSize=1, + event_listeners=[cmap_listener, cmd_listener]) + self.addCleanup(client.close) + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['find'], + 'blockConnection': True, + 'blockTimeMS': 1000, + 'errorCode': 91, + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type(( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + PoolClearedEvent)) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance( + cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, + ConnectionCheckOutFailedReason.CONN_ERROR, + msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.results['started'] + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.results['succeeded'] + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.results['failed'] + self.assertEqual(1, len(failed), msg) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 0368f97a6c..464ff39ac4 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -16,13 +16,14 @@ import copy import os +import pprint import sys +import threading sys.path[0:0] = [""] from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.int64 import Int64 -from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON @@ -32,6 +33,10 @@ ServerSelectionTimeoutError, WriteConcernError) from pymongo.mongo_client import MongoClient +from pymongo.monitoring import (ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent) from pymongo.operations import (InsertOne, DeleteMany, DeleteOne, @@ -40,10 +45,15 @@ UpdateOne) from pymongo.write_concern import WriteConcern -from test import unittest, client_context, IntegrationTest, SkipTest, client_knobs -from test.utils import (rs_or_single_client, +from test import (client_context, + client_knobs, + IntegrationTest, + SkipTest, + unittest) +from test.utils import (CMAPListener, DeprecationFilter, OvertCommandListener, + rs_or_single_client, TestCreator) from test.utils_spec_runner import SpecRunner from test.version import Version @@ -153,6 +163,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.knobs.disable() cls.client.close() + super(TestRetryableWritesMMAPv1, cls).tearDownClass() @client_context.require_version_min(3, 5) @client_context.require_no_standalone @@ -477,6 +488,79 @@ def test_RetryableWriteError_error_label_RawBSONDocument(self): self.assertIn('RetryableWriteError', result['errorLabels']) +class InsertThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + def run(self): + self.collection.insert_one({}) + self.passed = True + + +class TestPoolPausedError(IntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + RUN_ON_SERVERLESS = False + + @client_context.require_failCommand_blockConnection + @client_context.require_retryable_writes + @client_knobs(heartbeat_frequency=.05, min_heartbeat_interval=.05) + def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = rs_or_single_client( + maxPoolSize=1, + event_listeners=[cmap_listener, cmd_listener]) + self.addCleanup(client.close) + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['insert'], + 'blockConnection': True, + 'blockTimeMS': 1000, + 'errorCode': 91, + 'errorLabels': ['RetryableWriteError'], + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type(( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + PoolClearedEvent)) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance( + cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, + ConnectionCheckOutFailedReason.CONN_ERROR, + msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.results['started'] + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.results['succeeded'] + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.results['failed'] + self.assertEqual(1, len(failed), msg) + + # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): @client_context.require_version_min(3, 6) From 568205135e764d176430623b65eac4ec0880b113 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 Aug 2021 10:35:37 -0700 Subject: [PATCH 0422/2111] PYTHON-2855 Update mock server filename for KMS testing --- .evergreen/run-tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index ff6c54ac24..efc82000f9 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -154,8 +154,8 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -m pip install certifi fi pushd ${DRIVERS_TOOLS}/.evergreen/csfle - python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & - python -u lib/kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & trap 'kill $(jobs -p)' EXIT HUP popd fi From 3513ab72b044f905493f9fbfe537e4a5f7639540 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 Aug 2021 11:09:11 -0700 Subject: [PATCH 0423/2111] PYTHON-2528 Remove Database.current_op --- doc/changelog.rst | 1 + doc/migrate-to-pymongo4.rst | 16 ++++++++++ pymongo/database.py | 56 ----------------------------------- test/test_database.py | 9 ------ test/test_monitoring.py | 18 ----------- test/test_read_preferences.py | 3 -- test/test_session.py | 9 ------ 7 files changed, 17 insertions(+), 95 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index f8c610606b..1c482cbc35 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -28,6 +28,7 @@ Breaking Changes in 4.0 :attr:`pymongo.mongo_client.MongoClient.is_locked`. - Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. - Removed :meth:`pymongo.database.Database.collection_names`. +- Removed :meth:`pymongo.database.Database.current_op`. - Removed :meth:`pymongo.database.Database.authenticate` and :meth:`pymongo.database.Database.logout`. - Removed :meth:`pymongo.database.Database.error`, diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index a08eeb5ffe..0cefc21fc9 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -157,6 +157,22 @@ can be changed to this:: names = client.list_collection_names() non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) +Database.current_op is removed +.............................. + +Removed :meth:`pymongo.database.Database.current_op`. Use +:meth:`~pymongo.database.Database.aggregate` instead with the +`$currentOp aggregation pipeline stage`_. Code like +this:: + + ops = client.admin.current_op()['inprog'] + +can be changed to this:: + + ops = list(client.admin.aggregate([{'$currentOp': {}}])) + +.. _$currentOp aggregation pipeline stage: https://docs.mongodb.com/manual/reference/operator/aggregation/currentOp/ + Database.add_user is removed ............................ diff --git a/pymongo/database.py b/pymongo/database.py index 9d88945b89..584824e062 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -850,62 +850,6 @@ def validate_collection(self, name_or_collection, return result - def _current_op(self, include_all=False, session=None): - """Helper for running $currentOp.""" - cmd = SON([("currentOp", 1), ("$all", include_all)]) - with self.__client._socket_for_writes(session) as sock_info: - if sock_info.max_wire_version >= 4: - return self.__client.admin._command( - sock_info, cmd, codec_options=self.codec_options, - session=session) - else: - spec = {"$all": True} if include_all else {} - return _first_batch(sock_info, "admin", "$cmd.sys.inprog", - spec, -1, True, self.codec_options, - ReadPreference.PRIMARY, cmd, - self.client._event_listeners) - - def current_op(self, include_all=False, session=None): - """**DEPRECATED**: Get information on operations currently running. - - Starting with MongoDB 3.6 this helper is obsolete. The functionality - provided by this helper is available in MongoDB 3.6+ using the - `$currentOp aggregation pipeline stage`_, which can be used with - :meth:`aggregate`. Note that, while this helper can only return - a single document limited to a 16MB result, :meth:`aggregate` - returns a cursor avoiding that limitation. - - Users of MongoDB versions older than 3.6 can use the `currentOp command`_ - directly:: - - # MongoDB 3.2 and 3.4 - client.admin.command("currentOp") - - Or query the "inprog" virtual collection:: - - # MongoDB 2.6 and 3.0 - client.admin["$cmd.sys.inprog"].find_one() - - :Parameters: - - `include_all` (optional): if ``True`` also list currently - idle operations in the result - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.9 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. _$currentOp aggregation pipeline stage: https://docs.mongodb.com/manual/reference/operator/aggregation/currentOp/ - .. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ - """ - warnings.warn("current_op() is deprecated. See the documentation for " - "more information", - DeprecationWarning, stacklevel=2) - return self._current_op(include_all, session) - def profiling_level(self, session=None): """**DEPRECATED**: Get the database's current profiling level. diff --git a/test/test_database.py b/test/test_database.py index 9ee854a533..b19aba0ad0 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -739,15 +739,6 @@ def test_with_options(self): self.assertEqual( getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) - def test_current_op_codec_options(self): - class MySON(SON): - pass - opts = CodecOptions(document_class=MySON) - db = self.client.get_database("pymongo_test", codec_options=opts) - current_op = db.current_op(True) - self.assertTrue(current_op['inprog']) - self.assertIsInstance(current_op, MySON) - class TestDatabaseAggregation(IntegrationTest): def setUp(self): diff --git a/test/test_monitoring.py b/test/test_monitoring.py index a6fe5c9df5..3ff9f04a81 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1084,24 +1084,6 @@ def test_first_batch_helper(self): self.assertTrue('ok' in succeeded.reply) self.listener.results.clear() - self.client.pymongo_test.current_op(True) - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('currentOp', 1), ('$all', True)]) - self.assertEqualCommand(expected, started.command) - self.assertEqual('admin', started.database_name) - self.assertEqual('currentOp', started.command_name) - self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) - self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) - self.assertIsInstance(succeeded.duration_micros, int) - self.assertEqual(started.command_name, succeeded.command_name) - self.assertEqual(started.request_id, succeeded.request_id) - self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('inprog' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) def test_sensitive_commands(self): listeners = self.client._event_listeners diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 6f6634234e..7a913d23bb 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -166,9 +166,6 @@ def test_reads_from_secondary(self): self.assertIsNotNone(coll.aggregate([])) self.assertIsNotNone(coll.index_information()) - # Test some "magic" namespace helpers. - self.assertIsNotNone(db.current_op()) - class TestReadPreferences(TestReadPreferencesBase): diff --git a/test/test_session.py b/test/test_session.py index df1865c4c5..e92b5bbbbd 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -249,7 +249,6 @@ def test_database(self): (db.list_collection_names, [], {}), (db.validate_collection, ['collection'], {}), (db.drop_collection, ['collection'], {}), - (db.current_op, [], {}), (db.profiling_info, [], {}), (db.dereference, [DBRef('collection', 1)], {}), ] @@ -963,14 +962,6 @@ def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( lambda coll, session: coll.find({}, session=session).explain()) - @client_context.require_no_standalone - @unittest.skipIf(client_context.serverless, - "Serverless does not support currentOp") - def test_writes_do_not_include_read_concern_current_op(self): - # Not a write, but currentOp also doesn't support readConcern. - self._test_no_read_concern( - lambda coll, session: coll.database.current_op(session=session)) - @client_context.require_no_standalone @unittest.skipIf(client_context.serverless, "Serverless does not support mapReduce") From a28b05bf245d7b44bf4964421cc341beff00081c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 Aug 2021 10:54:03 -0700 Subject: [PATCH 0424/2111] PYTHON-1410 Remove "safe" legacy messages --- pymongo/_cmessagemodule.c | 28 ++++----------------------- pymongo/collection.py | 7 +++---- pymongo/message.py | 40 ++++++++++++++------------------------- 3 files changed, 21 insertions(+), 54 deletions(-) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index aeeb052db5..70df158abf 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -181,20 +181,17 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { int before, cur_size, max_size = 0; int flags = 0; unsigned char check_keys; - unsigned char safe; unsigned char continue_on_error; codec_options_t options; - PyObject* last_error_args; buffer_t buffer = NULL; int length_location, message_length; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "et#ObbObO&", + if (!PyArg_ParseTuple(args, "et#ObbO&", "utf-8", &collection_name, &collection_name_length, - &docs, &check_keys, &safe, - &last_error_args, + &docs, &check_keys, &continue_on_error, convert_codec_options, &options)) { return NULL; @@ -257,13 +254,6 @@ static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); - if (safe) { - if (!add_last_error(self, buffer, request_id, collection_name, - collection_name_length, &options, last_error_args)) { - goto fail; - } - } - /* objectify buffer */ result = Py_BuildValue("iy#i", request_id, buffer_get_buffer(buffer), @@ -290,21 +280,18 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { PyObject* spec; unsigned char multi; unsigned char upsert; - unsigned char safe; unsigned char check_keys; codec_options_t options; - PyObject* last_error_args; int flags; buffer_t buffer = NULL; int length_location, message_length; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "et#bbOObObO&", + if (!PyArg_ParseTuple(args, "et#bbOObO&", "utf-8", &collection_name, &collection_name_length, - &upsert, &multi, &spec, &doc, &safe, - &last_error_args, &check_keys, + &upsert, &multi, &spec, &doc, &check_keys, convert_codec_options, &options)) { return NULL; } @@ -357,13 +344,6 @@ static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); - if (safe) { - if (!add_last_error(self, buffer, request_id, collection_name, - collection_name_length, &options, last_error_args)) { - goto fail; - } - } - /* objectify buffer */ result = Py_BuildValue("iy#i", request_id, buffer_get_buffer(buffer), diff --git a/pymongo/collection.py b/pymongo/collection.py index 8a4db9a87c..2a0ffd6cea 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -575,7 +575,7 @@ def _insert_command(session, sock_info, retryable_write): return self._legacy_write( sock_info, 'insert', command, op_id, bypass_doc_val, message.insert, self.__full_name, - [doc], check_keys, False, write_concern.document, False, + [doc], check_keys, False, self.__write_response_codec_options) if bypass_doc_val and sock_info.max_wire_version >= 4: @@ -765,8 +765,8 @@ def _update(self, sock_info, criteria, document, upsert=False, return self._legacy_write( sock_info, 'update', command, op_id, bypass_doc_val, message.update, self.__full_name, upsert, - multi, criteria, document, False, write_concern.document, - check_keys, self.__write_response_codec_options) + multi, criteria, document, check_keys, + self.__write_response_codec_options) # Update command. if bypass_doc_val and sock_info.max_wire_version >= 4: @@ -1115,7 +1115,6 @@ def _delete( return self._legacy_write( sock_info, 'delete', command, op_id, False, message.delete, self.__full_name, criteria, - False, write_concern.document, self.__write_response_codec_options, int(not multi)) # Delete command. diff --git a/pymongo/message.py b/pymongo/message.py index 0ca4ca71fd..60be0c7e75 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -580,28 +580,25 @@ def _insert_compressed( return rid, msg, max_bson_size -def _insert_uncompressed(collection_name, docs, check_keys, - safe, last_error_args, continue_on_error, opts): +def _insert_uncompressed(collection_name, docs, check_keys, continue_on_error, + opts): """Internal insert message helper.""" op_insert, max_bson_size = _insert( collection_name, docs, check_keys, continue_on_error, opts) rid, msg = __pack_message(2002, op_insert) - if safe: - rid, gle, _ = __last_error(collection_name, last_error_args) - return rid, msg + gle, max_bson_size return rid, msg, max_bson_size if _use_c: _insert_uncompressed = _cmessage._insert_message -def insert(collection_name, docs, check_keys, - safe, last_error_args, continue_on_error, opts, ctx=None): +def insert(collection_name, docs, check_keys, continue_on_error, opts, + ctx=None): """Get an **insert** message.""" if ctx: return _insert_compressed( collection_name, docs, check_keys, continue_on_error, opts, ctx) - return _insert_uncompressed(collection_name, docs, check_keys, safe, - last_error_args, continue_on_error, opts) + return _insert_uncompressed(collection_name, docs, check_keys, + continue_on_error, opts) def _update(collection_name, upsert, multi, spec, doc, check_keys, opts): @@ -630,28 +627,25 @@ def _update_compressed( return rid, msg, max_bson_size -def _update_uncompressed(collection_name, upsert, multi, spec, - doc, safe, last_error_args, check_keys, opts): +def _update_uncompressed(collection_name, upsert, multi, spec, doc, + check_keys, opts): """Internal update message helper.""" op_update, max_bson_size = _update( collection_name, upsert, multi, spec, doc, check_keys, opts) rid, msg = __pack_message(2001, op_update) - if safe: - rid, gle, _ = __last_error(collection_name, last_error_args) - return rid, msg + gle, max_bson_size return rid, msg, max_bson_size if _use_c: _update_uncompressed = _cmessage._update_message -def update(collection_name, upsert, multi, spec, - doc, safe, last_error_args, check_keys, opts, ctx=None): +def update(collection_name, upsert, multi, spec, doc, check_keys, opts, + ctx=None): """Get an **update** message.""" if ctx: return _update_compressed( collection_name, upsert, multi, spec, doc, check_keys, opts, ctx) return _update_uncompressed(collection_name, upsert, multi, spec, - doc, safe, last_error_args, check_keys, opts) + doc, check_keys, opts) _pack_op_msg_flags_type = struct.Struct(" Date: Thu, 5 Aug 2021 17:58:15 -0700 Subject: [PATCH 0425/2111] PYTHON-1318 Remove initialize_unordered_bulk_op and initialize_ordered_bulk_op (#692) PYTHON-2436 Unskip test_large_inserts_ordered on MongoDB 5.0. --- doc/api/pymongo/collection.rst | 2 - doc/changelog.rst | 4 + doc/migrate-to-pymongo4.rst | 45 ++ pymongo/bulk.py | 180 ------ pymongo/collection.py | 65 +- test/test_bulk.py | 580 ++++++++++++++++- test/test_collation.py | 48 -- test/test_collection.py | 14 + test/test_legacy_api.py | 1079 +------------------------------- test/test_session.py | 10 - 10 files changed, 632 insertions(+), 1395 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 240318c0e9..8430c5a06e 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -67,6 +67,4 @@ .. automethod:: options .. automethod:: map_reduce .. automethod:: inline_map_reduce - .. automethod:: initialize_unordered_bulk_op - .. automethod:: initialize_ordered_bulk_op .. automethod:: count diff --git a/doc/changelog.rst b/doc/changelog.rst index 1c482cbc35..c1b6bd9f34 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -45,6 +45,10 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.collection.Collection.update`. - Removed :meth:`pymongo.collection.Collection.remove`. - Removed :meth:`pymongo.collection.Collection.find_and_modify`. +- Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op`, + :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`, and + :class:`pymongo.bulk.BulkOperationBuilder`. Use + :meth:`pymongo.collection.Collection.bulk_write` instead. - Removed :meth:`pymongo.collection.Collection.group`. - Removed the ``useCursor`` option for :meth:`~pymongo.collection.Collection.aggregate`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 0cefc21fc9..ba3116899e 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -315,6 +315,51 @@ Can be changed to this:: replaced_doc = collection.find_one_and_replace({'b': 1}, {'c': 1}) deleted_doc = collection.find_one_and_delete({'c': 1}) +Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed +................................................................................. + +Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op` +and :class:`pymongo.bulk.BulkOperationBuilder`. Use +:meth:`pymongo.collection.Collection.bulk_write` instead. Code like this:: + + batch = coll.initialize_ordered_bulk_op() + batch.insert({'a': 1}) + batch.find({'a': 1}).update_one({'$set': {'b': 1}}) + batch.find({'a': 2}).upsert().replace_one({'b': 2}) + batch.find({'a': 3}).remove() + result = batch.execute() + +Can be changed to this:: + + coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + ReplaceOne({'a': 2}, {'b': 2}, upsert=True), + DeleteOne({'a': 3}), + ]) + +Collection.initialize_unordered_bulk_op is removed +.................................................. + +Removed :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`. +Use :meth:`pymongo.collection.Collection.bulk_write` instead. Code like this:: + + batch = coll.initialize_unordered_bulk_op() + batch.insert({'a': 1}) + batch.find({'a': 1}).update_one({'$set': {'b': 1}}) + batch.find({'a': 2}).upsert().replace_one({'b': 2}) + batch.find({'a': 3}).remove() + result = batch.execute() + +Can be changed to this:: + + coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + ReplaceOne({'a': 2}, {'b': 2}, upsert=True), + DeleteOne({'a': 3}), + ], ordered=False) + Collection.group is removed ........................... diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 893d8a83ee..ff3f5974df 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -521,183 +521,3 @@ def execute(self, write_concern, session): self.execute_no_results(sock_info, generator) else: return self.execute_command(generator, write_concern, session) - - -class BulkUpsertOperation(object): - """An interface for adding upsert operations. - """ - - __slots__ = ('__selector', '__bulk', '__collation') - - def __init__(self, selector, bulk, collation): - self.__selector = selector - self.__bulk = bulk - self.__collation = collation - - def update_one(self, update): - """Update one document matching the selector. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, - update, multi=False, upsert=True, - collation=self.__collation) - - def update(self, update): - """Update all documents matching the selector. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, - update, multi=True, upsert=True, - collation=self.__collation) - - def replace_one(self, replacement): - """Replace one entire document matching the selector criteria. - - :Parameters: - - `replacement` (dict): the replacement document - """ - self.__bulk.add_replace(self.__selector, replacement, upsert=True, - collation=self.__collation) - - -class BulkWriteOperation(object): - """An interface for adding update or remove operations. - """ - - __slots__ = ('__selector', '__bulk', '__collation') - - def __init__(self, selector, bulk, collation): - self.__selector = selector - self.__bulk = bulk - self.__collation = collation - - def update_one(self, update): - """Update one document matching the selector criteria. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, update, multi=False, - collation=self.__collation) - - def update(self, update): - """Update all documents matching the selector criteria. - - :Parameters: - - `update` (dict): the update operations to apply - """ - self.__bulk.add_update(self.__selector, update, multi=True, - collation=self.__collation) - - def replace_one(self, replacement): - """Replace one entire document matching the selector criteria. - - :Parameters: - - `replacement` (dict): the replacement document - """ - self.__bulk.add_replace(self.__selector, replacement, - collation=self.__collation) - - def remove_one(self): - """Remove a single document matching the selector criteria. - """ - self.__bulk.add_delete(self.__selector, _DELETE_ONE, - collation=self.__collation) - - def remove(self): - """Remove all documents matching the selector criteria. - """ - self.__bulk.add_delete(self.__selector, _DELETE_ALL, - collation=self.__collation) - - def upsert(self): - """Specify that all chained update operations should be - upserts. - - :Returns: - - A :class:`BulkUpsertOperation` instance, used to add - update operations to this bulk operation. - """ - return BulkUpsertOperation(self.__selector, self.__bulk, - self.__collation) - - -class BulkOperationBuilder(object): - """**DEPRECATED**: An interface for executing a batch of write operations. - """ - - __slots__ = '__bulk' - - def __init__(self, collection, ordered=True, - bypass_document_validation=False): - """**DEPRECATED**: Initialize a new BulkOperationBuilder instance. - - :Parameters: - - `collection`: A :class:`~pymongo.collection.Collection` instance. - - `ordered` (optional): If ``True`` all operations will be executed - serially, in the order provided, and the entire execution will - abort on the first error. If ``False`` operations will be executed - in arbitrary order (possibly in parallel on the server), reporting - any errors that occurred after attempting all operations. Defaults - to ``True``. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 3.5 - Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` - instead. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - """ - self.__bulk = _Bulk(collection, ordered, bypass_document_validation) - - def find(self, selector, collation=None): - """Specify selection criteria for bulk operations. - - :Parameters: - - `selector` (dict): the selection criteria for update - and remove operations. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. - - :Returns: - - A :class:`BulkWriteOperation` instance, used to add - update and remove operations to this bulk operation. - - .. versionchanged:: 3.4 - Added the `collation` option. - - """ - validate_is_mapping("selector", selector) - return BulkWriteOperation(selector, self.__bulk, collation) - - def insert(self, document): - """Insert a single document. - - :Parameters: - - `document` (dict): the document to insert - - .. seealso:: :ref:`writes-and-ids` - """ - self.__bulk.add_insert(document) - - def execute(self, write_concern=None): - """Execute all provided operations. - - :Parameters: - - write_concern (optional): the write concern for this bulk - execution. - """ - if write_concern is not None: - write_concern = WriteConcern(**write_concern) - return self.__bulk.execute(write_concern, session=None) diff --git a/pymongo/collection.py b/pymongo/collection.py index 2a0ffd6cea..848b39280b 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -29,7 +29,7 @@ message) from pymongo.aggregation import (_CollectionAggregationCommand, _CollectionRawAggregationCommand) -from pymongo.bulk import BulkOperationBuilder, _Bulk +from pymongo.bulk import _Bulk from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor from pymongo.collation import validate_collation_or_none from pymongo.change_stream import CollectionChangeStream @@ -358,69 +358,6 @@ def with_options(self, codec_options=None, read_preference=None, write_concern or self.write_concern, read_concern or self.read_concern) - def initialize_unordered_bulk_op(self, bypass_document_validation=False): - """**DEPRECATED** - Initialize an unordered batch of write operations. - - Operations will be performed on the server in arbitrary order, - possibly in parallel. All operations will be attempted. - - :Parameters: - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. - - See :ref:`unordered_bulk` for examples. - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 3.5 - Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` - instead. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 2.7 - """ - warnings.warn("initialize_unordered_bulk_op is deprecated", - DeprecationWarning, stacklevel=2) - return BulkOperationBuilder(self, False, bypass_document_validation) - - def initialize_ordered_bulk_op(self, bypass_document_validation=False): - """**DEPRECATED** - Initialize an ordered batch of write operations. - - Operations will be performed on the server serially, in the - order provided. If an error occurs all remaining operations - are aborted. - - :Parameters: - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance. - - See :ref:`ordered_bulk` for examples. - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 3.5 - Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write` - instead. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 2.7 - """ - warnings.warn("initialize_ordered_bulk_op is deprecated", - DeprecationWarning, stacklevel=2) - return BulkOperationBuilder(self, True, bypass_document_validation) - def bulk_write(self, requests, ordered=True, bypass_document_validation=False, session=None): """Send a batch of write operations to the server. diff --git a/test/test_bulk.py b/test/test_bulk.py index 7d8176e57f..9834da55cb 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -19,16 +19,20 @@ sys.path[0:0] = [""] from bson.objectid import ObjectId -from pymongo.operations import * -from pymongo.errors import (ConfigurationError, +from pymongo.common import partition_node +from pymongo.errors import (BulkWriteError, + ConfigurationError, InvalidOperation, OperationFailure) +from pymongo.operations import * from pymongo.write_concern import WriteConcern from test import (client_context, unittest, IntegrationTest) from test.utils import (remove_all_users, - rs_or_single_client_noauth) + rs_or_single_client_noauth, + single_client, + wait_until) class BulkTestBase(IntegrationTest): @@ -37,8 +41,7 @@ class BulkTestBase(IntegrationTest): def setUpClass(cls): super(BulkTestBase, cls).setUpClass() cls.coll = cls.db.test - ismaster = client_context.client.admin.command('ismaster') - cls.has_write_commands = (ismaster.get("maxWireVersion", 0) > 1) + cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) def setUp(self): super(BulkTestBase, self).setUp() @@ -48,11 +51,7 @@ def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" for key, value in expected.items(): if key == 'nModified': - if self.has_write_commands: - self.assertEqual(value, actual['nModified']) - else: - # Legacy servers don't include nModified in the response. - self.assertFalse('nModified' in actual) + self.assertEqual(value, actual['nModified']) elif key == 'upserted': expected_upserts = value actual_upserts = actual['upserted'] @@ -182,7 +181,7 @@ def test_array_filters_validation(self): self.assertRaises(TypeError, UpdateOne, {}, {}, array_filters={}) def test_array_filters_unacknowledged(self): - coll = self.coll.with_options(write_concern=WriteConcern(w=0)) + coll = self.coll_w0 update_one = UpdateOne( {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) update_many = UpdateMany( @@ -338,9 +337,7 @@ def gen(): self.assertEqual(5, len(result.inserted_ids)) def test_bulk_write_no_results(self): - - coll = self.coll.with_options(write_concern=WriteConcern(w=0)) - result = coll.bulk_write([InsertOne({})]) + result = self.coll_w0.bulk_write([InsertOne({})]) self.assertFalse(result.acknowledged) self.assertRaises(InvalidOperation, lambda: result.inserted_count) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -359,6 +356,294 @@ def test_bulk_write_invalid_arguments(self): with self.assertRaises(TypeError): self.coll.bulk_write([{}]) + def test_upsert_large(self): + big = 'a' * (client_context.client.max_bson_size - 37) + result = self.coll.bulk_write([ + UpdateOne({'x': 1}, {'$set': {'s': big}}, upsert=True)]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 1, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': '...'}]}, + result.bulk_api_result) + + self.assertEqual(1, self.coll.count_documents({'x': 1})) + + def test_client_generated_upsert_id(self): + result = self.coll.bulk_write([ + UpdateOne({'_id': 0}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': 2}, {'_id': 2}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': 0}, + {'index': 1, '_id': 1}, + {'index': 2, '_id': 2}]}, + result.bulk_api_result) + + def test_single_ordered_batch(self): + result = self.coll.bulk_write([ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), + InsertOne({'a': 3}), + DeleteOne({'a': 3}), + ]) + self.assertEqualResponse( + {'nMatched': 1, + 'nModified': 1, + 'nUpserted': 1, + 'nInserted': 2, + 'nRemoved': 1, + 'upserted': [{'index': 2, '_id': '...'}]}, + result.bulk_api_result) + + def test_single_error_ordered_batch(self): + self.coll.create_index('a', unique=True) + self.addCleanup(self.coll.drop_index, [('a', 1)]) + requests = [ + InsertOne({'b': 1, 'a': 1}), + UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), + InsertOne({'b': 3, 'a': 2}), + ] + try: + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 0, + 'nInserted': 1, + 'nRemoved': 0, + 'upserted': [], + 'writeConcernErrors': [], + 'writeErrors': [ + {'index': 1, + 'code': 11000, + 'errmsg': '...', + 'op': {'q': {'b': 2}, + 'u': {'$set': {'a': 1}}, + 'multi': False, + 'upsert': True}}]}, + result) + + def test_multiple_error_ordered_batch(self): + self.coll.create_index('a', unique=True) + self.addCleanup(self.coll.drop_index, [('a', 1)]) + requests = [ + InsertOne({'b': 1, 'a': 1}), + UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), + UpdateOne({'b': 3}, {'$set': {'a': 2}}, upsert=True), + UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), + InsertOne({'b': 4, 'a': 3}), + InsertOne({'b': 5, 'a': 1}), + ] + + try: + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 0, + 'nInserted': 1, + 'nRemoved': 0, + 'upserted': [], + 'writeConcernErrors': [], + 'writeErrors': [ + {'index': 1, + 'code': 11000, + 'errmsg': '...', + 'op': {'q': {'b': 2}, + 'u': {'$set': {'a': 1}}, + 'multi': False, + 'upsert': True}}]}, + result) + + def test_single_unordered_batch(self): + requests = [ + InsertOne({'a': 1}), + UpdateOne({'a': 1}, {'$set': {'b': 1}}), + UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), + InsertOne({'a': 3}), + DeleteOne({'a': 3}), + ] + result = self.coll.bulk_write(requests, ordered=False) + self.assertEqualResponse( + {'nMatched': 1, + 'nModified': 1, + 'nUpserted': 1, + 'nInserted': 2, + 'nRemoved': 1, + 'upserted': [{'index': 2, '_id': '...'}], + 'writeErrors': [], + 'writeConcernErrors': []}, + result.bulk_api_result) + + def test_single_error_unordered_batch(self): + self.coll.create_index('a', unique=True) + self.addCleanup(self.coll.drop_index, [('a', 1)]) + requests = [ + InsertOne({'b': 1, 'a': 1}), + UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), + InsertOne({'b': 3, 'a': 2}), + ] + + try: + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 0, + 'nInserted': 2, + 'nRemoved': 0, + 'upserted': [], + 'writeConcernErrors': [], + 'writeErrors': [ + {'index': 1, + 'code': 11000, + 'errmsg': '...', + 'op': {'q': {'b': 2}, + 'u': {'$set': {'a': 1}}, + 'multi': False, + 'upsert': True}}]}, + result) + + def test_multiple_error_unordered_batch(self): + self.coll.create_index('a', unique=True) + self.addCleanup(self.coll.drop_index, [('a', 1)]) + requests = [ + InsertOne({'b': 1, 'a': 1}), + UpdateOne({'b': 2}, {'$set': {'a': 3}}, upsert=True), + UpdateOne({'b': 3}, {'$set': {'a': 4}}, upsert=True), + UpdateOne({'b': 4}, {'$set': {'a': 3}}, upsert=True), + InsertOne({'b': 5, 'a': 2}), + InsertOne({'b': 6, 'a': 1}), + ] + + try: + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + # Assume the update at index 1 runs before the update at index 3, + # although the spec does not require it. Same for inserts. + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 2, + 'nInserted': 2, + 'nRemoved': 0, + 'upserted': [ + {'index': 1, '_id': '...'}, + {'index': 2, '_id': '...'}], + 'writeConcernErrors': [], + 'writeErrors': [ + {'index': 3, + 'code': 11000, + 'errmsg': '...', + 'op': {'q': {'b': 4}, + 'u': {'$set': {'a': 3}}, + 'multi': False, + 'upsert': True}}, + {'index': 5, + 'code': 11000, + 'errmsg': '...', + 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, + result) + + def test_large_inserts_ordered(self): + big = 'x' * self.coll.database.client.max_bson_size + requests = [ + InsertOne({'b': 1, 'a': 1}), + InsertOne({'big': big}), + InsertOne({'b': 2, 'a': 2}), + ] + + try: + self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(1, result['nInserted']) + + self.coll.delete_many({}) + + big = 'x' * (1024 * 1024 * 4) + result = self.coll.bulk_write([ + InsertOne({'a': 1, 'big': big}), + InsertOne({'a': 2, 'big': big}), + InsertOne({'a': 3, 'big': big}), + InsertOne({'a': 4, 'big': big}), + InsertOne({'a': 5, 'big': big}), + InsertOne({'a': 6, 'big': big}), + ]) + + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, self.coll.count_documents({})) + + def test_large_inserts_unordered(self): + big = 'x' * self.coll.database.client.max_bson_size + requests = [ + InsertOne({'b': 1, 'a': 1}), + InsertOne({'big': big}), + InsertOne({'b': 2, 'a': 2}), + ] + + try: + self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, result['nInserted']) + + self.coll.delete_many({}) + + big = 'x' * (1024 * 1024 * 4) + result = self.coll.bulk_write([ + InsertOne({'a': 1, 'big': big}), + InsertOne({'a': 2, 'big': big}), + InsertOne({'a': 3, 'big': big}), + InsertOne({'a': 4, 'big': big}), + InsertOne({'a': 5, 'big': big}), + InsertOne({'a': 6, 'big': big}), + ], ordered=False) + + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, self.coll.count_documents({})) + class BulkAuthorizationTestBase(BulkTestBase): @@ -387,6 +672,73 @@ def tearDown(self): remove_all_users(self.db) +class TestBulkUnacknowledged(BulkTestBase): + + def tearDown(self): + self.coll.delete_many({}) + + def test_no_results_ordered_success(self): + requests = [ + InsertOne({'a': 1}), + UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), + InsertOne({'a': 2}), + DeleteOne({'a': 1}), + ] + result = self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + wait_until(lambda: 2 == self.coll.count_documents({}), + 'insert 2 documents') + wait_until(lambda: self.coll.find_one({'_id': 1}) is None, + 'removed {"_id": 1}') + + def test_no_results_ordered_failure(self): + requests = [ + InsertOne({'_id': 1}), + UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), + InsertOne({'_id': 2}), + # Fails with duplicate key error. + InsertOne({'_id': 1}), + # Should not be executed since the batch is ordered. + DeleteOne({'_id': 1}), + ] + result = self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + wait_until(lambda: 3 == self.coll.count_documents({}), + 'insert 3 documents') + self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) + + def test_no_results_unordered_success(self): + requests = [ + InsertOne({'a': 1}), + UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), + InsertOne({'a': 2}), + DeleteOne({'a': 1}), + ] + result = self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + wait_until(lambda: 2 == self.coll.count_documents({}), + 'insert 2 documents') + wait_until(lambda: self.coll.find_one({'_id': 1}) is None, + 'removed {"_id": 1}') + + def test_no_results_unordered_failure(self): + requests = [ + InsertOne({'_id': 1}), + UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), + InsertOne({'_id': 2}), + # Fails with duplicate key error. + InsertOne({'_id': 1}), + # Should be executed since the batch is unordered. + DeleteOne({'_id': 1}), + ] + result = self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + wait_until(lambda: 2 == self.coll.count_documents({}), + 'insert 2 documents') + wait_until(lambda: self.coll.find_one({'_id': 1}) is None, + 'removed {"_id": 1}') + + class TestBulkAuthorization(BulkAuthorizationTestBase): def test_readonly(self): @@ -415,5 +767,205 @@ def test_no_remove(self): self.assertRaises(OperationFailure, coll.bulk_write, requests) self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) + +class TestBulkWriteConcern(BulkTestBase): + + @classmethod + def setUpClass(cls): + super(TestBulkWriteConcern, cls).setUpClass() + cls.w = client_context.w + cls.secondary = None + if cls.w > 1: + for member in client_context.ismaster['hosts']: + if member != client_context.ismaster['primary']: + cls.secondary = single_client(*partition_node(member)) + break + + # We tested wtimeout errors by specifying a write concern greater than + # the number of members, but in MongoDB 2.7.8+ this causes a different + # sort of error, "Not enough data-bearing nodes". In recent servers we + # use a failpoint to pause replication on a secondary. + cls.need_replication_stopped = client_context.version.at_least(2, 7, 8) + + @classmethod + def tearDownClass(cls): + if cls.secondary: + cls.secondary.close() + + def cause_wtimeout(self, requests, ordered): + if self.need_replication_stopped: + if not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") + + self.secondary.admin.command('configureFailPoint', + 'rsSyncApplyStop', + mode='alwaysOn') + + try: + coll = self.coll.with_options( + write_concern=WriteConcern(w=self.w, wtimeout=1)) + return coll.bulk_write(requests, ordered=ordered) + finally: + self.secondary.admin.command('configureFailPoint', + 'rsSyncApplyStop', + mode='off') + else: + coll = self.coll.with_options( + write_concern=WriteConcern(w=self.w + 1, wtimeout=1)) + return coll.bulk_write(requests, ordered=ordered) + + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_write_concern_failure_ordered(self): + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = coll_ww.bulk_write([ + DeleteOne({"something": "that does no exist"})]) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({'a': 1}), + InsertOne({'a': 2}) + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 0, + 'nInserted': 2, + 'nRemoved': 0, + 'upserted': [], + 'writeErrors': []}, + result) + + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(result['writeConcernErrors']) > 0) + + failed = result['writeConcernErrors'][0] + self.assertEqual(64, failed['code']) + self.assertTrue(isinstance(failed['errmsg'], str)) + + self.coll.delete_many({}) + self.coll.create_index('a', unique=True) + self.addCleanup(self.coll.drop_index, [('a', 1)]) + + # Fail due to write concern support as well + # as duplicate key error on ordered batch. + requests = [ + InsertOne({'a': 1}), + ReplaceOne({'a': 3}, {'b': 1}, upsert=True), + InsertOne({'a': 1}), + InsertOne({'a': 2}), + ] + try: + self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 1, + 'nInserted': 1, + 'nRemoved': 0, + 'upserted': [{'index': 1, '_id': '...'}], + 'writeErrors': [ + {'index': 2, + 'code': 11000, + 'errmsg': '...', + 'op': {'_id': '...', 'a': 1}}]}, + result) + + self.assertTrue(len(result['writeConcernErrors']) > 1) + failed = result['writeErrors'][0] + self.assertTrue("duplicate" in failed['errmsg']) + + @client_context.require_replica_set + @client_context.require_secondaries_count(1) + def test_write_concern_failure_unordered(self): + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = coll_ww.bulk_write([ + DeleteOne({"something": "that does no exist"})], ordered=False) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({'a': 1}), + UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), + InsertOne({'a': 2}), + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, result['nInserted']) + self.assertEqual(1, result['nUpserted']) + self.assertEqual(0, len(result['writeErrors'])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(result['writeConcernErrors']) > 1) + + self.coll.delete_many({}) + self.coll.create_index('a', unique=True) + self.addCleanup(self.coll.drop_index, [('a', 1)]) + + # Fail due to write concern support as well + # as duplicate key error on unordered batch. + requests = [ + InsertOne({'a': 1}), + UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), + InsertOne({'a': 1}), + InsertOne({'a': 2}), + ] + try: + self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, result['nInserted']) + self.assertEqual(1, result['nUpserted']) + self.assertEqual(1, len(result['writeErrors'])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(result['writeConcernErrors']) > 1) + + failed = result['writeErrors'][0] + self.assertEqual(2, failed['index']) + self.assertEqual(11000, failed['code']) + self.assertTrue(isinstance(failed['errmsg'], str)) + self.assertEqual(1, failed['op']['a']) + + failed = result['writeConcernErrors'][0] + self.assertEqual(64, failed['code']) + self.assertTrue(isinstance(failed['errmsg'], str)) + + upserts = result['upserted'] + self.assertEqual(1, len(upserts)) + self.assertEqual(1, upserts[0]['index']) + self.assertTrue(upserts[0].get('_id')) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_collation.py b/test/test_collation.py index 3aca63d47e..2052fc8db3 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -282,49 +282,6 @@ def check_ops(ops): check_ops(delete_cmd['deletes']) check_ops(update_cmd['updates']) - @raisesConfigurationErrorForOldMongoDB - def test_bulk(self): - bulk = self.db.test.initialize_ordered_bulk_op() - bulk.find({'noCollation': 42}).remove_one() - bulk.find({'noCollation': 42}).remove() - bulk.find({'foo': 42}, collation=self.collation).remove_one() - bulk.find({'foo': 42}, collation=self.collation).remove() - bulk.find({'noCollation': 24}).replace_one({'bar': 42}) - bulk.find({'noCollation': 84}).upsert().update_one( - {'$set': {'foo': 10}}) - bulk.find({'noCollation': 45}).update({'$set': {'bar': 42}}) - bulk.find({'foo': 24}, collation=self.collation).replace_one( - {'foo': 42}) - bulk.find({'foo': 84}, collation=self.collation).upsert().update_one( - {'$set': {'foo': 10}}) - bulk.find({'foo': 45}, collation=self.collation).update({ - '$set': {'foo': 42}}) - bulk.execute() - - delete_cmd = self.listener.results['started'][0].command - update_cmd = self.listener.results['started'][1].command - - def check_ops(ops): - for op in ops: - if 'noCollation' in op['q']: - self.assertNotIn('collation', op) - else: - self.assertEqual(self.collation.document, - op['collation']) - - check_ops(delete_cmd['deletes']) - check_ops(update_cmd['updates']) - - @client_context.require_version_max(3, 3, 8) - def test_mixed_bulk_collation(self): - bulk = self.db.test.initialize_unordered_bulk_op() - bulk.find({'foo': 42}).upsert().update_one( - {'$set': {'bar': 10}}) - bulk.find({'foo': 43}, collation=self.collation).remove_one() - with self.assertRaises(ConfigurationError): - bulk.execute() - self.assertIsNone(self.db.test.find_one({'foo': 42})) - @raisesConfigurationErrorForOldMongoDB def test_indexes_same_keys_different_collations(self): self.db.test.drop() @@ -356,11 +313,6 @@ def test_unacknowledged_write(self): collection.update_one( {'hello': 'world'}, {'$set': {'hello': 'moon'}}, collation=self.collation) - bulk = collection.initialize_ordered_bulk_op() - bulk.find({'hello': 'world'}, collation=self.collation).update_one( - {'$set': {'hello': 'moon'}}) - with self.assertRaises(ConfigurationError): - bulk.execute() update_one = UpdateOne({'hello': 'world'}, {'$set': {'hello': 'moon'}}, collation=self.collation) with self.assertRaises(ConfigurationError): diff --git a/test/test_collection.py b/test/test_collection.py index 8a8f4aa686..b8fdbf7ee6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -789,6 +789,20 @@ def test_insert_many(self): self.assertFalse(result.acknowledged) self.assertEqual(20, db.test.count_documents({})) + def test_insert_many_generator(self): + coll = self.db.test + coll.delete_many({}) + + def gen(): + yield {'a': 1, 'b': 1} + yield {'a': 1, 'b': 2} + yield {'a': 2, 'b': 3} + yield {'a': 3, 'b': 5} + yield {'a': 5, 'b': 8} + + result = coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + def test_insert_many_invalid(self): db = self.db diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py index 246fe333ff..2149f1f816 100644 --- a/test/test_legacy_api.py +++ b/test/test_legacy_api.py @@ -18,23 +18,11 @@ sys.path[0:0] = [""] -from bson.son import SON from pymongo import ASCENDING, GEOHAYSTACK -from pymongo.common import partition_node -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidDocument, - InvalidOperation, - OperationFailure) from pymongo.operations import IndexModel -from test import client_context, unittest, SkipTest +from test import unittest from test.test_client import IntegrationTest -from test.test_bulk import BulkTestBase, BulkAuthorizationTestBase -from test.utils import (DeprecationFilter, - oid_generated_on_process, - rs_or_single_client_noauth, - single_client, - wait_until) +from test.utils import DeprecationFilter class TestDeprecations(IntegrationTest): @@ -58,1068 +46,5 @@ def test_geoHaystack_deprecation(self): DeprecationWarning, self.db.test.create_indexes, indexes) -class TestLegacy(IntegrationTest): - - @classmethod - def setUpClass(cls): - super(TestLegacy, cls).setUpClass() - cls.w = client_context.w - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - -class TestLegacyBulk(BulkTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulk, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_empty(self): - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(InvalidOperation, bulk.execute) - - def test_find(self): - # find() requires a selector. - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.find) - self.assertRaises(TypeError, bulk.find, 'foo') - # No error. - bulk.find({}) - - def test_insert(self): - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.insert, 1) - - # find() before insert() is prohibited. - self.assertRaises(AttributeError, lambda: bulk.find({}).insert({})) - - # We don't allow multiple documents per call. - self.assertRaises(TypeError, bulk.insert, [{}, {}]) - self.assertRaises(TypeError, bulk.insert, ({} for _ in range(2))) - - bulk.insert({}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(1, self.coll.count()) - doc = self.coll.find_one() - self.assertTrue(oid_generated_on_process(doc['_id'])) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.insert({}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(2, self.coll.count()) - - def test_update(self): - - expected = { - 'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # update() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.update({'$set': {'x': 1}})) - - self.assertRaises(TypeError, bulk.find({}).update, 1) - self.assertRaises(ValueError, bulk.find({}).update, {}) - - # All fields must be $-operators. - self.assertRaises(ValueError, bulk.find({}).update, {'foo': 'bar'}) - bulk.find({}).update({'$set': {'foo': 'bar'}}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 2) - - # All fields must be $-operators -- validated server-side. - bulk = self.coll.initialize_ordered_bulk_op() - updates = SON([('$set', {'x': 1}), ('y', 1)]) - bulk.find({}).update(updates) - self.assertRaises(BulkWriteError, bulk.execute) - - self.coll.delete_many({}) - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).update({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 2) - - self.coll.insert_one({'x': 1}) - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).update({'$set': {'x': 42}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(1, self.coll.find({'x': 42}).count()) - - # Second time, x is already 42 so nModified is 0. - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 42}).update({'$set': {'x': 42}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - def test_update_one(self): - - expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # update_one() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.update_one({'$set': {'x': 1}})) - - self.assertRaises(TypeError, bulk.find({}).update_one, 1) - self.assertRaises(ValueError, bulk.find({}).update_one, {}) - self.assertRaises(ValueError, bulk.find({}).update_one, {'foo': 'bar'}) - bulk.find({}).update_one({'$set': {'foo': 'bar'}}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - self.coll.delete_many({}) - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).update_one({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - # All fields must be $-operators -- validated server-side. - bulk = self.coll.initialize_ordered_bulk_op() - updates = SON([('$set', {'x': 1}), ('y', 1)]) - bulk.find({}).update_one(updates) - self.assertRaises(BulkWriteError, bulk.execute) - - def test_replace_one(self): - - expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - self.assertRaises(TypeError, bulk.find({}).replace_one, 1) - self.assertRaises(ValueError, - bulk.find({}).replace_one, {'$set': {'foo': 'bar'}}) - bulk.find({}).replace_one({'foo': 'bar'}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'foo': 'bar'}).count(), 1) - - self.coll.delete_many({}) - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).replace_one({'bim': 'baz'}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - def test_remove(self): - # Test removing all documents, ordered. - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - # remove() must be preceded by find(). - self.assertRaises(AttributeError, lambda: bulk.remove()) - bulk.find({}).remove() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.count(), 0) - - # Test removing some documents, ordered. - self.coll.insert_many([{}, {'x': 1}, {}, {'x': 1}]) - - bulk = self.coll.initialize_ordered_bulk_op() - - bulk.find({'x': 1}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 2) - self.coll.delete_many({}) - - # Test removing all documents, unordered. - self.coll.insert_many([{}, {}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - # Test removing some documents, unordered. - self.assertEqual(self.coll.count(), 0) - - self.coll.insert_many([{}, {'x': 1}, {}, {'x': 1}]) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).remove() - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.count(), 2) - self.coll.delete_many({}) - - def test_remove_one(self): - - bulk = self.coll.initialize_ordered_bulk_op() - - # remove_one() must be preceded by find(). - self.assertRaises(AttributeError, lambda: bulk.remove_one()) - - # Test removing one document, empty selector. - # First ordered, then unordered. - self.coll.insert_many([{}, {}]) - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] - } - - bulk.find({}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.count(), 1) - - self.coll.insert_one({}) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual(self.coll.count(), 1) - - # Test removing one document, with a selector. - # First ordered, then unordered. - self.coll.insert_one({'x': 1}) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({'x': 1}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual([{}], list(self.coll.find({}, {'_id': False}))) - self.coll.insert_one({'x': 1}) - - bulk = self.coll.initialize_unordered_bulk_op() - bulk.find({'x': 1}).remove_one() - result = bulk.execute() - self.assertEqualResponse(expected, result) - - self.assertEqual([{}], list(self.coll.find({}, {'_id': False}))) - - def test_upsert(self): - bulk = self.coll.initialize_ordered_bulk_op() - - # upsert() requires find() first. - self.assertRaises( - AttributeError, - lambda: bulk.upsert()) - - expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}] - } - - bulk.find({}).upsert().replace_one({'foo': 'bar'}) - result = bulk.execute() - self.assertEqualResponse(expected, result) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({}).upsert().update_one({'$set': {'bim': 'baz'}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'baz'}).count(), 1) - - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({}).upsert().update({'$set': {'bim': 'bop'}}) - # Non-upsert, no matches. - bulk.find({'x': 1}).update({'$set': {'x': 2}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - self.assertEqual(self.coll.find({'bim': 'bop'}).count(), 1) - self.assertEqual(self.coll.find({'x': 2}).count(), 0) - - def test_upsert_large(self): - big = 'a' * (client_context.client.max_bson_size - 37) - bulk = self.coll.initialize_ordered_bulk_op() - bulk.find({'x': 1}).upsert().update({'$set': {'s': big}}) - result = bulk.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}]}, - result) - - self.assertEqual(1, self.coll.find({'x': 1}).count()) - - def test_client_generated_upsert_id(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.find({'_id': 0}).upsert().update_one({'$set': {'a': 0}}) - batch.find({'a': 1}).upsert().replace_one({'_id': 1}) - # This is just here to make the counts right in all cases. - batch.find({'_id': 2}).upsert().replace_one({'_id': 2}) - result = batch.execute() - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': 0}, - {'index': 1, '_id': 1}, - {'index': 2, '_id': 2}]}, - result) - - def test_single_ordered_batch(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 1}).update_one({'$set': {'b': 1}}) - batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}}) - batch.insert({'a': 3}) - batch.find({'a': 3}).remove() - result = batch.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}]}, - result) - - def test_single_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 3, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - - def test_multiple_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.find({'b': 3}).upsert().update_one({'$set': {'a': 2}}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 4, 'a': 3}) - batch.insert({'b': 5, 'a': 1}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - - def test_single_unordered_batch(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 1}).update_one({'$set': {'b': 1}}) - batch.find({'a': 2}).upsert().update_one({'$set': {'b': 2}}) - batch.insert({'a': 3}) - batch.find({'a': 3}).remove() - result = batch.execute() - self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}], - 'writeErrors': [], - 'writeConcernErrors': []}, - result) - - def test_single_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 1}}) - batch.insert({'b': 3, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) - - def test_multiple_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.find({'b': 2}).upsert().update_one({'$set': {'a': 3}}) - batch.find({'b': 3}).upsert().update_one({'$set': {'a': 4}}) - batch.find({'b': 4}).upsert().update_one({'$set': {'a': 3}}) - batch.insert({'b': 5, 'a': 2}) - batch.insert({'b': 6, 'a': 1}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - # Assume the update at index 1 runs before the update at index 3, - # although the spec does not require it. Same for inserts. - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 2, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [ - {'index': 1, '_id': '...'}, - {'index': 2, '_id': '...'}], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 3, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 4}, - 'u': {'$set': {'a': 3}}, - 'multi': False, - 'upsert': True}}, - {'index': 5, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, - result) - - @client_context.require_version_max(4, 8) # PYTHON-2436 - def test_large_inserts_ordered(self): - big = 'x' * self.coll.database.client.max_bson_size - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.insert({'big': big}) - batch.insert({'b': 2, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(1, result['nInserted']) - - self.coll.delete_many({}) - - big = 'x' * (1024 * 1024 * 4) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1, 'big': big}) - batch.insert({'a': 2, 'big': big}) - batch.insert({'a': 3, 'big': big}) - batch.insert({'a': 4, 'big': big}) - batch.insert({'a': 5, 'big': big}) - batch.insert({'a': 6, 'big': big}) - result = batch.execute() - - self.assertEqual(6, result['nInserted']) - self.assertEqual(6, self.coll.count()) - - def test_large_inserts_unordered(self): - big = 'x' * self.coll.database.client.max_bson_size - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'b': 1, 'a': 1}) - batch.insert({'big': big}) - batch.insert({'b': 2, 'a': 2}) - - try: - batch.execute() - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - - self.coll.delete_many({}) - - big = 'x' * (1024 * 1024 * 4) - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1, 'big': big}) - batch.insert({'a': 2, 'big': big}) - batch.insert({'a': 3, 'big': big}) - batch.insert({'a': 4, 'big': big}) - batch.insert({'a': 5, 'big': big}) - batch.insert({'a': 6, 'big': big}) - result = batch.execute() - - self.assertEqual(6, result['nInserted']) - self.assertEqual(6, self.coll.count()) - - def test_numerous_inserts(self): - # Ensure we don't exceed server's 1000-document batch size limit. - n_docs = 2100 - batch = self.coll.initialize_unordered_bulk_op() - for _ in range(n_docs): - batch.insert({}) - - result = batch.execute() - self.assertEqual(n_docs, result['nInserted']) - self.assertEqual(n_docs, self.coll.count()) - - # Same with ordered bulk. - self.coll.delete_many({}) - batch = self.coll.initialize_ordered_bulk_op() - for _ in range(n_docs): - batch.insert({}) - - result = batch.execute() - self.assertEqual(n_docs, result['nInserted']) - self.assertEqual(n_docs, self.coll.count()) - - def test_multiple_execution(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({}) - batch.execute() - self.assertRaises(InvalidOperation, batch.execute) - - def test_generator_insert(self): - def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} - - result = self.coll.insert_many(gen()) - self.assertEqual(5, len(result.inserted_ids)) - - -class TestLegacyBulkNoResults(BulkTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulkNoResults, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def tearDown(self): - self.coll.delete_many({}) - - def test_no_results_ordered_success(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 2 == self.coll.count(), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') - - def test_no_results_ordered_failure(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - # Fails with duplicate key error. - batch.insert({'_id': 1}) - # Should not be executed since the batch is ordered. - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 3 == self.coll.count(), - 'insert 3 documents') - self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) - - def test_no_results_unordered_success(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 2 == self.coll.count(), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') - - def test_no_results_unordered_failure(self): - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'_id': 1}) - batch.find({'_id': 3}).upsert().update_one({'$set': {'b': 1}}) - batch.insert({'_id': 2}) - # Fails with duplicate key error. - batch.insert({'_id': 1}) - # Should be executed since the batch is unordered. - batch.find({'_id': 1}).remove_one() - self.assertTrue(batch.execute({'w': 0}) is None) - wait_until(lambda: 2 == self.coll.count(), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') - - -class TestLegacyBulkWriteConcern(BulkTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulkWriteConcern, cls).setUpClass() - cls.w = client_context.w - cls.secondary = None - if cls.w > 1: - for member in client_context.ismaster['hosts']: - if member != client_context.ismaster['primary']: - cls.secondary = single_client(*partition_node(member)) - break - - # We tested wtimeout errors by specifying a write concern greater than - # the number of members, but in MongoDB 2.7.8+ this causes a different - # sort of error, "Not enough data-bearing nodes". In recent servers we - # use a failpoint to pause replication on a secondary. - cls.need_replication_stopped = client_context.version.at_least(2, 7, 8) - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - if cls.secondary: - cls.secondary.close() - - def cause_wtimeout(self, batch): - if self.need_replication_stopped: - if not client_context.test_commands_enabled: - raise SkipTest("Test commands must be enabled.") - - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='alwaysOn') - - try: - return batch.execute({'w': self.w, 'wtimeout': 1}) - finally: - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='off') - else: - return batch.execute({'w': self.w + 1, 'wtimeout': 1}) - - def test_fsync_and_j(self): - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - self.assertRaises( - ConfigurationError, - batch.execute, {'fsync': True, 'j': True}) - - @client_context.require_replica_set - def test_write_concern_failure_ordered(self): - # Ensure we don't raise on wnote. - batch = self.coll.initialize_ordered_bulk_op() - batch.find({"something": "that does no exist"}).remove() - self.assertTrue(batch.execute({"w": self.w})) - - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.insert({'a': 2}) - - # Replication wtimeout is a 'soft' error. - # It shouldn't stop batch processing. - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': []}, - result) - - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 0) - - failed = result['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) - - self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - - # Fail due to write concern support as well - # as duplicate key error on ordered batch. - batch = self.coll.initialize_ordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().replace_one({'b': 1}) - batch.insert({'a': 1}) - batch.insert({'a': 2}) - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [{'index': 1, '_id': '...'}], - 'writeErrors': [ - {'index': 2, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'a': 1}}]}, - result) - - self.assertTrue(len(result['writeConcernErrors']) > 1) - failed = result['writeErrors'][0] - self.assertTrue("duplicate" in failed['errmsg']) - - @client_context.require_replica_set - def test_write_concern_failure_unordered(self): - # Ensure we don't raise on wnote. - batch = self.coll.initialize_unordered_bulk_op() - batch.find({"something": "that does no exist"}).remove() - self.assertTrue(batch.execute({"w": self.w})) - - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, 'b': 1}}) - batch.insert({'a': 2}) - - # Replication wtimeout is a 'soft' error. - # It shouldn't stop batch processing. - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(0, len(result['writeErrors'])) - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) - - self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) - - # Fail due to write concern support as well - # as duplicate key error on unordered batch. - batch = self.coll.initialize_unordered_bulk_op() - batch.insert({'a': 1}) - batch.find({'a': 3}).upsert().update_one({'$set': {'a': 3, - 'b': 1}}) - batch.insert({'a': 1}) - batch.insert({'a': 2}) - try: - self.cause_wtimeout(batch) - except BulkWriteError as exc: - result = exc.details - self.assertEqual(exc.code, 65) - else: - self.fail("Error not raised") - - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(1, len(result['writeErrors'])) - # When talking to legacy servers there will be a - # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) - - failed = result['writeErrors'][0] - self.assertEqual(2, failed['index']) - self.assertEqual(11000, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) - self.assertEqual(1, failed['op']['a']) - - failed = result['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) - - upserts = result['upserted'] - self.assertEqual(1, len(upserts)) - self.assertEqual(1, upserts[0]['index']) - self.assertTrue(upserts[0].get('_id')) - - -class TestLegacyBulkAuthorization(BulkAuthorizationTestBase): - - @classmethod - def setUpClass(cls): - super(TestLegacyBulkAuthorization, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_readonly(self): - # We test that an authorization failure aborts the batch and is raised - # as OperationFailure. - cli = rs_or_single_client_noauth( - username='readonly', password='pw', authSource='pymongo_test') - coll = cli.pymongo_test.test - bulk = coll.initialize_ordered_bulk_op() - bulk.insert({'x': 1}) - self.assertRaises(OperationFailure, bulk.execute) - - def test_no_remove(self): - # We test that an authorization failure aborts the batch and is raised - # as OperationFailure. - cli = rs_or_single_client_noauth( - username='noremove', password='pw', authSource='pymongo_test') - coll = cli.pymongo_test.test - bulk = coll.initialize_ordered_bulk_op() - bulk.insert({'x': 1}) - bulk.find({'x': 2}).upsert().replace_one({'x': 2}) - bulk.find({}).remove() # Prohibited. - bulk.insert({'x': 3}) # Never attempted. - self.assertRaises(OperationFailure, bulk.execute) - self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) - if __name__ == "__main__": unittest.main() diff --git a/test/test_session.py b/test/test_session.py index e92b5bbbbd..5bcad4e750 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -1081,14 +1081,6 @@ def test_cluster_time(self): self.addCleanup(collection.drop) self.addCleanup(client.pymongo_test.collection2.drop) - def bulk_insert(ordered): - if ordered: - bulk = collection.initialize_ordered_bulk_op() - else: - bulk = collection.initialize_unordered_bulk_op() - bulk.insert({}) - bulk.execute() - def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) @@ -1130,8 +1122,6 @@ def insert_and_aggregate(): ('delete_one', lambda: collection.delete_one({})), ('delete_many', lambda: collection.delete_many({})), ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('ordered bulk', lambda: bulk_insert(True)), - ('unordered bulk', lambda: bulk_insert(False)), ('rename_and_drop', rename_and_drop), ] From 3e02957998fb2fcde0f427a6efe54e782d048295 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Aug 2021 12:33:26 -0700 Subject: [PATCH 0426/2111] PYTHON-2288 Finish removing IsMaster from docs --- doc/api/pymongo/ismaster.rst | 10 ---------- pymongo/monitor.py | 2 +- pymongo/monitoring.py | 2 +- pymongo/server_description.py | 2 +- test/test_streaming_protocol.py | 4 ++-- 5 files changed, 5 insertions(+), 15 deletions(-) delete mode 100644 doc/api/pymongo/ismaster.rst diff --git a/doc/api/pymongo/ismaster.rst b/doc/api/pymongo/ismaster.rst deleted file mode 100644 index 881e874e17..0000000000 --- a/doc/api/pymongo/ismaster.rst +++ /dev/null @@ -1,10 +0,0 @@ -:orphan: - -:mod:`ismaster` -- A wrapper for ismaster command responses. -============================================================ - -.. automodule:: pymongo.ismaster - - .. autoclass:: pymongo.ismaster.IsMaster(doc) - - .. autoattribute:: document diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 4817ad2401..df673b720d 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -260,7 +260,7 @@ def _check_once(self): return sd def _check_with_socket(self, conn): - """Return (IsMaster, round_trip_time). + """Return (Hello, round_trip_time). Can raise ConnectionFailure or OperationFailure. """ diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index c72e836807..147be2a46d 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1196,7 +1196,7 @@ def duration(self): @property def reply(self): - """An instance of :class:`~pymongo.ismaster.IsMaster`.""" + """An instance of :class:`~pymongo.hello.Hello`.""" return self.__reply @property diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 462ad135cc..1c64a8fd53 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -26,7 +26,7 @@ class ServerDescription(object): :Parameters: - `address`: A (host, port) pair - - `ismaster`: Optional IsMaster instance + - `ismaster`: Optional Hello instance - `round_trip_time`: Optional float - `error`: Optional, the last error attempting to connect to the server """ diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 07310586a7..f2b2c7ccaa 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -38,7 +38,7 @@ def test_failCommand_streaming(self): hb_listener = HeartbeatEventListener() client = rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, - appName='failingIsMasterTest') + appName='failingHeartbeatTest') self.addCleanup(client.close) # Force a connection. client.admin.command('ping') @@ -52,7 +52,7 @@ def test_failCommand_streaming(self): 'failCommands': ['isMaster', 'hello'], 'closeConnection': False, 'errorCode': 10107, - 'appName': 'failingIsMasterTest', + 'appName': 'failingHeartbeatTest', }, } with self.fail_point(fail_ismaster): From edda903b5b4f7f41339b8fe18f8c19d772c6a711 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Aug 2021 12:54:52 -0700 Subject: [PATCH 0427/2111] PYTHON-2711 Remove profile command helpers (#693) --- doc/api/pymongo/database.rst | 3 - doc/changelog.rst | 9 +++ doc/migrate-to-pymongo4.rst | 41 ++++++++++ pymongo/__init__.py | 32 -------- pymongo/database.py | 141 ----------------------------------- test/test_cursor.py | 9 +-- test/test_database.py | 104 +------------------------- test/test_session.py | 8 +- 8 files changed, 55 insertions(+), 292 deletions(-) diff --git a/doc/api/pymongo/database.rst b/doc/api/pymongo/database.rst index a88131b799..b40a77dff3 100644 --- a/doc/api/pymongo/database.rst +++ b/doc/api/pymongo/database.rst @@ -5,9 +5,6 @@ :synopsis: Database level operations .. autodata:: pymongo.auth.MECHANISMS - .. autodata:: pymongo.OFF - .. autodata:: pymongo.SLOW_ONLY - .. autodata:: pymongo.ALL .. autoclass:: pymongo.database.Database :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index c1b6bd9f34..f7009c2258 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -37,6 +37,14 @@ Breaking Changes in 4.0 :meth:`pymongo.database.Database.reset_error_history`. - Removed :meth:`pymongo.database.Database.add_user` and :meth:`pymongo.database.Database.remove_user`. +- Removed support for database profiler helpers + :meth:`~pymongo.database.Database.profiling_level`, + :meth:`~pymongo.database.Database.set_profiling_level`, + and :meth:`~pymongo.database.Database.profiling_info`. Instead, users + should run the `profile command`_ with the + :meth:`~pymongo.database.Database.command` helper directly. +- Removed :attr:`pymongo.OFF`, :attr:`pymongo.SLOW_ONLY`, and + :attr:`pymongo.ALL`. - Removed :meth:`pymongo.collection.Collection.parallel_scan`. - Removed :meth:`pymongo.collection.Collection.ensure_index`. - Removed :meth:`pymongo.collection.Collection.reindex`. @@ -96,6 +104,7 @@ See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 +.. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ Changes in Version 3.11.1 ------------------------- diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index ba3116899e..35aa0e6686 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -207,6 +207,47 @@ PyMongo 3.6. Use the `dropUser command`_ instead:: .. _dropUser command: https://docs.mongodb.com/manual/reference/command/createUser/ +Database.profiling_level is removed +................................... + +Removed :meth:`pymongo.database.Database.profiling_level` which was deprecated in +PyMongo 3.12. Use the `profile command`_ instead. Code like this:: + + level = db.profiling_level() + +Can be changed to this:: + + profile = db.command('profile', -1) + level = profile['was'] + +.. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ + +Database.set_profiling_level is removed +....................................... + +Removed :meth:`pymongo.database.Database.set_profiling_level` which was deprecated in +PyMongo 3.12. Use the `profile command`_ instead. Code like this:: + + db.set_profiling_level(pymongo.ALL, filter={'op': 'query'}) + +Can be changed to this:: + + res = db.command('profile', 2, filter={'op': 'query'}) + +Database.profiling_info is removed +.................................. + +Removed :meth:`pymongo.database.Database.profiling_info` which was deprecated in +PyMongo 3.12. Query the `'system.profile' collection`_ instead. Code like this:: + + profiling_info = db.profiling_info() + +Can be changed to this:: + + profiling_info = list(db['system.profile'].find()) + +.. _'system.profile' collection: https://docs.mongodb.com/manual/reference/database-profiler/ + Collection ---------- diff --git a/pymongo/__init__.py b/pymongo/__init__.py index fff8d46026..0b76e87563 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -67,38 +67,6 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -OFF = 0 -"""**DEPRECATED** - No database profiling. - -**DEPRECATED** - :attr:`OFF` is deprecated and will be removed in PyMongo 4.0. -Instead, specify this profiling level using the numeric value ``0``. -See https://docs.mongodb.com/manual/tutorial/manage-the-database-profiler - -.. versionchanged:: 3.12 - Deprecated -""" -SLOW_ONLY = 1 -"""**DEPRECATED** - Only profile slow operations. - -**DEPRECATED** - :attr:`SLOW_ONLY` is deprecated and will be removed in -PyMongo 4.0. Instead, specify this profiling level using the numeric -value ``1``. -See https://docs.mongodb.com/manual/tutorial/manage-the-database-profiler - -.. versionchanged:: 3.12 - Deprecated -""" -ALL = 2 -"""**DEPRECATED** - Profile all operations. - -**DEPRECATED** - :attr:`ALL` is deprecated and will be removed in PyMongo 4.0. -Instead, specify this profiling level using the numeric value ``2``. -See https://docs.mongodb.com/manual/tutorial/manage-the-database-profiler - -.. versionchanged:: 3.12 - Deprecated -""" - version_tuple = (4, 0, '.dev0') def get_version_string(): diff --git a/pymongo/database.py b/pymongo/database.py index 584824e062..61d87611dc 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -850,147 +850,6 @@ def validate_collection(self, name_or_collection, return result - def profiling_level(self, session=None): - """**DEPRECATED**: Get the database's current profiling level. - - Starting with PyMongo 3.12, this helper is obsolete. Instead, users - can run the `profile command`_, using the :meth:`command` - helper to get the current profiler level. Running the - `profile command`_ with the level set to ``-1`` returns the current - profiler information without changing it:: - - res = db.command("profile", -1) - profiling_level = res["was"] - - The format of ``res`` depends on the version of MongoDB in use. - - Returns one of (:data:`~pymongo.OFF`, - :data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`). - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.12 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. mongodoc:: profiling - .. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ - """ - warnings.warn("profiling_level() is deprecated. See the documentation " - "for more information", - DeprecationWarning, stacklevel=2) - result = self.command("profile", -1, session=session) - - assert result["was"] >= 0 and result["was"] <= 2 - return result["was"] - - def set_profiling_level(self, level, slow_ms=None, session=None, - sample_rate=None, filter=None): - """**DEPRECATED**: Set the database's profiling level. - - Starting with PyMongo 3.12, this helper is obsolete. Instead, users - can directly run the `profile command`_, using the :meth:`command` - helper, e.g.:: - - res = db.command("profile", 2, filter={"op": "query"}) - - :Parameters: - - `level`: Specifies a profiling level, see list of possible values - below. - - `slow_ms`: Optionally modify the threshold for the profile to - consider a query or operation. Even if the profiler is off queries - slower than the `slow_ms` level will get written to the logs. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `sample_rate` (optional): The fraction of slow operations that - should be profiled or logged expressed as a float between 0 and 1. - - `filter` (optional): A filter expression that controls which - operations are profiled and logged. - - Possible `level` values: - - +----------------------------+------------------------------------+ - | Level | Setting | - +============================+====================================+ - | :data:`~pymongo.OFF` | Off. No profiling. | - +----------------------------+------------------------------------+ - | :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. | - +----------------------------+------------------------------------+ - | :data:`~pymongo.ALL` | On. Includes all operations. | - +----------------------------+------------------------------------+ - - Raises :class:`ValueError` if level is not one of - (:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`, - :data:`~pymongo.ALL`). - - .. versionchanged:: 3.12 - Added the ``sample_rate`` and ``filter`` parameters. - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. mongodoc:: profiling - .. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ - """ - warnings.warn("set_profiling_level() is deprecated. See the " - "documentation for more information", - DeprecationWarning, stacklevel=2) - - if not isinstance(level, int) or level < 0 or level > 2: - raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)") - - if slow_ms is not None and not isinstance(slow_ms, int): - raise TypeError("slow_ms must be an integer") - - if sample_rate is not None and not isinstance(sample_rate, float): - raise TypeError( - "sample_rate must be a float, not %r" % (sample_rate,)) - - cmd = SON(profile=level) - if slow_ms is not None: - cmd['slowms'] = slow_ms - if sample_rate is not None: - cmd['sampleRate'] = sample_rate - if filter is not None: - cmd['filter'] = filter - self.command(cmd, session=session) - - def profiling_info(self, session=None): - """**DEPRECATED**: Returns a list containing current profiling - information. - - Starting with PyMongo 3.12, this helper is obsolete. Instead, users - can view the database profiler output by running - :meth:`~pymongo.collection.Collection.find` against the - ``system.profile`` collection as detailed in the `profiler output`_ - documentation:: - - profiling_info = list(db["system.profile"].find()) - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.12 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. mongodoc:: profiling - .. _profiler output: https://docs.mongodb.com/manual/reference/database-profiler/ - """ - warnings.warn("profiling_info() is deprecated. See the " - "documentation for more information", - DeprecationWarning, stacklevel=2) - - return list(self["system.profile"].find(session=session)) - def __iter__(self): return self diff --git a/test/test_cursor.py b/test/test_cursor.py index b2de09429b..3ba455c188 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -27,12 +27,9 @@ from bson import decode_all from bson.code import Code -from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo import (ASCENDING, - DESCENDING, - ALL, - OFF) + DESCENDING) from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType from pymongo.errors import (ConfigurationError, @@ -1290,7 +1287,7 @@ def test_comment(self): query_key = "query.$comment" self.client.drop_database(self.db) - self.db.set_profiling_level(ALL) + self.db.command('profile', 2) # Profile ALL commands. try: list(self.db.test.find().comment('foo')) op = self.db.system.profile.find({'ns': 'pymongo_test.test', @@ -1312,7 +1309,7 @@ def test_comment(self): 'command.comment': 'foo'}) self.assertEqual(op.count(), 1) finally: - self.db.set_profiling_level(OFF) + self.db.command('profile', 0) # Turn off profiling. self.db.system.profile.drop() self.db.test.insert_many([{}, {}]) diff --git a/test/test_database.py b/test/test_database.py index b19aba0ad0..37bda18b75 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -26,10 +26,7 @@ from bson.dbref import DBRef from bson.objectid import ObjectId from bson.son import SON -from pymongo import (ALL, - auth, - OFF, - SLOW_ONLY, +from pymongo import (auth, helpers) from pymongo.collection import Collection from pymongo.database import Database @@ -370,105 +367,6 @@ def test_validate_collection_background(self): with self.assertRaises(OperationFailure): db.validate_collection(coll, full=True, background=True) - @client_context.require_no_mongos - @ignore_deprecations - def test_profiling_levels(self): - db = self.client.pymongo_test - self.assertEqual(db.profiling_level(), OFF) # default - - self.assertRaises(ValueError, db.set_profiling_level, 5.5) - self.assertRaises(ValueError, db.set_profiling_level, None) - self.assertRaises(ValueError, db.set_profiling_level, -1) - self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, 5.5) - self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, '1') - - db.set_profiling_level(SLOW_ONLY) - self.assertEqual(db.profiling_level(), SLOW_ONLY) - - db.set_profiling_level(ALL) - self.assertEqual(db.profiling_level(), ALL) - - db.set_profiling_level(OFF) - self.assertEqual(db.profiling_level(), OFF) - - db.set_profiling_level(SLOW_ONLY, 50) - self.assertEqual(50, db.command("profile", -1)['slowms']) - - db.set_profiling_level(ALL, -1) - self.assertEqual(-1, db.command("profile", -1)['slowms']) - - db.set_profiling_level(OFF, 100) # back to default - self.assertEqual(100, db.command("profile", -1)['slowms']) - - @client_context.require_no_mongos - @client_context.require_version_min(3, 6) - @ignore_deprecations - def test_profiling_sample_rate(self): - db = self.client.pymongo_test - with self.assertRaises(TypeError): - db.set_profiling_level(SLOW_ONLY, 50, sample_rate='1') - with self.assertRaises(TypeError): - db.set_profiling_level(SLOW_ONLY, 50, sample_rate=1) - - db.set_profiling_level(SLOW_ONLY, 50, sample_rate=0.0) - db.set_profiling_level(SLOW_ONLY, 50, sample_rate=1.0) - db.set_profiling_level(SLOW_ONLY, 50, sample_rate=0.5) - profile = db.command("profile", -1) - self.assertEqual(50, profile['slowms']) - self.assertEqual(0.5, profile['sampleRate']) - db.set_profiling_level(OFF, 100) # back to default - self.assertEqual(100, db.command("profile", -1)['slowms']) - - @client_context.require_no_mongos - @client_context.require_version_min(4, 4, 2) - @ignore_deprecations - def test_profiling_filter(self): - db = self.client.pymongo_test - db.set_profiling_level(ALL, filter={'ns': {'$eq': 'test.test'}}) - profile = db.command("profile", -1) - self.assertEqual({'ns': {'$eq': 'test.test'}}, profile['filter']) - # filter='unset' resets the filter back to the default. - db.set_profiling_level(OFF, 100, filter='unset') - self.assertEqual(100, db.command("profile", -1)['slowms']) - - @client_context.require_no_mongos - @ignore_deprecations - def test_profiling_info(self): - db = self.client.pymongo_test - - db.system.profile.drop() - db.set_profiling_level(ALL) - db.test.find_one() - db.set_profiling_level(OFF) - - info = db.profiling_info() - self.assertTrue(isinstance(info, list)) - - # Check if we're going to fail because of SERVER-4754, in which - # profiling info isn't collected if mongod was started with --auth - if server_started_with_auth(self.client): - raise SkipTest( - "We need SERVER-4754 fixed for the rest of this test to pass" - ) - - self.assertTrue(len(info) >= 1) - # These basically clue us in to server changes. - self.assertTrue(isinstance(info[0]['responseLength'], int)) - self.assertTrue(isinstance(info[0]['millis'], int)) - self.assertTrue(isinstance(info[0]['client'], str)) - self.assertTrue(isinstance(info[0]['user'], str)) - self.assertTrue(isinstance(info[0]['ns'], str)) - self.assertTrue(isinstance(info[0]['op'], str)) - self.assertTrue(isinstance(info[0]["ts"], datetime.datetime)) - - def test_profiling_helpers_deprecated(self): - filter = DeprecationFilter('error') - self.addCleanup(filter.stop) - db = self.client.pymongo_test - self.assertRaises(DeprecationWarning, db.profiling_level) - self.assertRaises(DeprecationWarning, db.profiling_info) - self.assertRaises(DeprecationWarning, db.set_profiling_level, OFF) - def test_command(self): self.maxDiff = None db = self.client.admin diff --git a/test/test_session.py b/test/test_session.py index 5bcad4e750..45fcae636b 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -25,7 +25,7 @@ from bson import DBRef from gridfs import GridFS, GridFSBucket -from pymongo import ASCENDING, InsertOne, IndexModel, OFF, monitoring +from pymongo import ASCENDING, InsertOne, IndexModel, monitoring from pymongo.common import _MAX_END_SESSIONS from pymongo.errors import (ConfigurationError, InvalidOperation, @@ -249,14 +249,8 @@ def test_database(self): (db.list_collection_names, [], {}), (db.validate_collection, ['collection'], {}), (db.drop_collection, ['collection'], {}), - (db.profiling_info, [], {}), (db.dereference, [DBRef('collection', 1)], {}), ] - - if not client_context.is_mongos: - ops.append((db.set_profiling_level, [OFF], {})) - ops.append((db.profiling_level, [], {})) - self._test_ops(client, *ops) @staticmethod From 93a042f2e8812b9fb63010a5de5e16bcf6828912 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Aug 2021 13:13:23 -0700 Subject: [PATCH 0428/2111] PYTHON-2235 Remove pymongo.GEOHAYSTACK (#694) --- doc/api/pymongo/collection.rst | 1 - doc/changelog.rst | 1 + doc/migrate-to-pymongo4.rst | 7 +++++ pymongo/__init__.py | 14 ---------- pymongo/collection.py | 13 ++------- pymongo/operations.py | 5 ++-- test/test_collection.py | 5 ++-- test/test_legacy_api.py | 50 ---------------------------------- 8 files changed, 14 insertions(+), 82 deletions(-) delete mode 100644 test/test_legacy_api.py diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 8430c5a06e..302f259026 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -7,7 +7,6 @@ .. autodata:: pymongo.ASCENDING .. autodata:: pymongo.DESCENDING .. autodata:: pymongo.GEO2D - .. autodata:: pymongo.GEOHAYSTACK .. autodata:: pymongo.GEOSPHERE .. autodata:: pymongo.HASHED .. autodata:: pymongo.TEXT diff --git a/doc/changelog.rst b/doc/changelog.rst index f7009c2258..8dc4db288c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -87,6 +87,7 @@ Breaking Changes in 4.0 :meth:`~pymongo.cursor.Cursor`. - Removed :exc:`pymongo.errors.NotMasterError`. Use :exc:`pymongo.errors.NotPrimaryError` instead. +- Removed :attr:`pymongo.GEOHAYSTACK`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. - PyMongoCrypt 1.1.0 or later is now required for client side field level diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 35aa0e6686..93f9baf646 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -492,6 +492,13 @@ NotMasterError is removed Removed :exc:`~pymongo.errors.NotMasterError`. Use :exc:`~pymongo.errors.NotPrimaryError` instead. +pymongo.GEOHAYSTACK is removed +------------------------------ + +Removed :attr:`pymongo.GEOHAYSTACK`. Replace with "geoHaystack" or create a +2d index and use $geoNear or $geoWithin instead. +See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. + Removed features with no migration path --------------------------------------- diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 0b76e87563..0ec31e9e7e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -25,20 +25,6 @@ .. _geospatial index: http://docs.mongodb.org/manual/core/2d/ """ -GEOHAYSTACK = "geoHaystack" -"""**DEPRECATED** - Index specifier for a 2-dimensional `haystack index`_. - -**DEPRECATED** - :attr:`GEOHAYSTACK` is deprecated and will be removed in -PyMongo 4.0. geoHaystack indexes (and the geoSearch command) were deprecated -in MongoDB 4.4. Instead, create a 2d index and use $geoNear or $geoWithin. -See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. - -.. versionchanged:: 3.11 - Deprecated. - -.. _haystack index: http://docs.mongodb.org/manual/core/geohaystack/ -""" - GEOSPHERE = "2dsphere" """Index specifier for a `spherical geospatial index`_. diff --git a/pymongo/collection.py b/pymongo/collection.py index 848b39280b..a2fb4bd194 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -50,10 +50,6 @@ from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} -_HAYSTACK_MSG = ( - "geoHaystack indexes are deprecated as of MongoDB 4.4." - " Instead, create a 2d index and use $geoNear or $geoWithin." - " See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack") class ReturnDocument(object): @@ -1758,10 +1754,6 @@ def gen_indexes(): raise ConfigurationError( "Must be connected to MongoDB " "3.4+ to use collations.") - if 'bucketSize' in document: - # The bucketSize option is required by geoHaystack. - warnings.warn( - _HAYSTACK_MSG, DeprecationWarning, stacklevel=4) names.append(document["name"]) yield document @@ -1787,9 +1779,8 @@ def create_index(self, keys, session=None, **kwargs): The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, - :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, - :data:`~pymongo.TEXT`). + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). To create a single key ascending index on the key ``'mike'`` we just use a string argument:: diff --git a/pymongo/operations.py b/pymongo/operations.py index 8cf850ef55..b5d670e0ff 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -380,9 +380,8 @@ def __init__(self, keys, **kwargs): The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, - :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, - :data:`~pymongo.TEXT`). + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: diff --git a/test/test_collection.py b/test/test_collection.py index b8fdbf7ee6..cae4306b6e 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -32,8 +32,7 @@ from bson.codec_options import CodecOptions from bson.objectid import ObjectId from bson.son import SON -from pymongo import (ASCENDING, DESCENDING, GEO2D, - GEOHAYSTACK, GEOSPHERE, HASHED, TEXT) +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT from pymongo.bulk import BulkWriteError from pymongo.collection import Collection, ReturnDocument from pymongo.command_cursor import CommandCursor @@ -430,7 +429,7 @@ def test_index_haystack(self): "pos": {"long": 59.1, "lat": 87.2}, "type": "office" }) db.test.create_index( - [("pos", GEOHAYSTACK), ("type", ASCENDING)], + [("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1 ) diff --git a/test/test_legacy_api.py b/test/test_legacy_api.py deleted file mode 100644 index 2149f1f816..0000000000 --- a/test/test_legacy_api.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test various legacy / deprecated API features.""" - -import sys - -sys.path[0:0] = [""] - -from pymongo import ASCENDING, GEOHAYSTACK -from pymongo.operations import IndexModel -from test import unittest -from test.test_client import IntegrationTest -from test.utils import DeprecationFilter - - -class TestDeprecations(IntegrationTest): - - @classmethod - def setUpClass(cls): - super(TestDeprecations, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter("error") - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - - def test_geoHaystack_deprecation(self): - self.addCleanup(self.db.test.drop) - keys = [("pos", GEOHAYSTACK), ("type", ASCENDING)] - self.assertRaises( - DeprecationWarning, self.db.test.create_index, keys, bucketSize=1) - indexes = [IndexModel(keys, bucketSize=1)] - self.assertRaises( - DeprecationWarning, self.db.test.create_indexes, indexes) - - -if __name__ == "__main__": - unittest.main() From 9bc27c9c0b10962982b6eb0a03e31a86a126d3a4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Aug 2021 13:20:36 -0700 Subject: [PATCH 0429/2111] PYTHON-2797 Update docs for 5.0 support Add changelog entries for 3.11.2, 3.11.3, and 3.12.0. --- README.rst | 2 +- doc/api/pymongo/collection.rst | 2 +- doc/changelog.rst | 141 +++++++++++++++++++++++++++++++-- pymongo/client_session.py | 2 + 4 files changed, 137 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index 7ced4ec2aa..c9f2eef8db 100644 --- a/README.rst +++ b/README.rst @@ -17,7 +17,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 2.6, 3.0, 3.2, 3.4, 3.6, 4.0, 4.2, and 4.4. +PyMongo supports MongoDB 2.6, 3.0, 3.2, 3.4, 3.6, 4.0, 4.2, 4.4, and 5.0. Support / Feedback ================== diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 302f259026..1787f22c7a 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -47,7 +47,7 @@ .. automethod:: aggregate_raw_batches .. automethod:: watch .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) - .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) .. automethod:: find_one(filter=None, *args, **kwargs) .. automethod:: find_one_and_delete .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8dc4db288c..05e737c017 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,16 +6,11 @@ Changes in Version 4.0 .. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. -.. warning:: PyMongo now allows insertion of documents with keys that include - dots ('.') or start with dollar signs ('$'). - PyMongo 4.0 brings a number of improvements as well as some backward breaking changes. For example, all APIs deprecated in PyMongo 3.X have been removed. Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` before upgrading from PyMongo 3.x. -- Added :attr:`pymongo.mongo_client.MongoClient.topology_description`. - Breaking Changes in 4.0 ....................... @@ -90,13 +85,12 @@ Breaking Changes in 4.0 - Removed :attr:`pymongo.GEOHAYSTACK`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. -- PyMongoCrypt 1.1.0 or later is now required for client side field level - encryption support. Notable improvements .................... -- Support for MongoDB Versioned API, see :class:`~pymongo.server_api.ServerApi`. +- Enhanced connection pooling to create connections more efficiently and + avoid connection storms. Issues Resolved ............... @@ -105,8 +99,139 @@ See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 + +Changes in Version 3.12.0 +------------------------- + +.. warning:: PyMongo 3.12.0 deprecates support for Python 2.7, 3.4 and 3.5. + These Python versions will not be supported by PyMongo 4. + +.. warning:: PyMongo now allows insertion of documents with keys that include + dots ('.') or start with dollar signs ('$'). + +- PyMongoCrypt 1.1.0 or later is now required for client side field level + encryption support. + +Notable improvements +.................... + +- Added support for MongoDB 5.0. +- Support for MongoDB Versioned API, see :class:`~pymongo.server_api.ServerApi`. +- Support for snapshot reads on secondaries (see :ref:`snapshot-reads-ref`). +- Support for Azure and GCP KMS providers for client side field level + encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.encryption_options.AutoEncryptionOpts`, + and :mod:`~pymongo.encryption`. +- Support AWS authentication with temporary credentials when connecting to KMS + in client side field level encryption. +- Support for connecting to load balanced MongoDB clusters via the new + ``loadBalanced`` URI option. +- Support for creating timeseries collections via the ``timeseries`` and + ``expireAfterSeconds`` arguments to + :meth:`~pymongo.database.Database.create_collection`. +- Added :attr:`pymongo.mongo_client.MongoClient.topology_description`. +- Added hash support to :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database` and + :class:`~pymongo.collection.Collection` (`PYTHON-2466`_). +- Improved the error message returned by + :meth:`~pymongo.collection.Collection.insert_many` when supplied with an + argument of incorrect type (`PYTHON-1690`_). +- Added session and read concern support to + :meth:`~pymongo.collection.Collection.find_raw_batches` + and :meth:`~pymongo.collection.Collection.aggregate_raw_batches`. + +Bug fixes +......... + +- Fixed a bug that could cause the driver to deadlock during automatic + client side field level encryption (`PYTHON-2472`_). +- Fixed a potential deadlock when garbage collecting an unclosed exhaust + :class:`~pymongo.cursor.Cursor`. +- Fixed an bug where using gevent.Timeout to timeout an operation could + lead to a deadlock. +- Fixed the following bug with Atlas Data Lake. When closing cursors, + pymongo now sends killCursors with the namespace returned the cursor's + initial command response. +- Fixed a bug in :class:`~pymongo.cursor.RawBatchCursor` that caused it to + return an empty bytestring when the cursor contained no results. It now + raises :exc:`StopIteration` instead. + +Deprecations +............ + +- Deprecated support for Python 2.7, 3.4 and 3.5. +- Deprecated support for database profiler helpers + :meth:`~pymongo.database.Database.profiling_level`, + :meth:`~pymongo.database.Database.set_profiling_level`, + and :meth:`~pymongo.database.Database.profiling_info`. Instead, users + should run the `profile command`_ with the + :meth:`~pymongo.database.Database.command` helper directly. +- Deprecated :exc:`~pymongo.errors.NotMasterError`. Users should + use :exc:`~pymongo.errors.NotPrimaryError` instead. +- Deprecated :class:`~pymongo.ismaster.IsMaster` and :mod:`~pymongo.ismaster` + which will be removed in PyMongo 4.0 and are replaced by + :class:`~pymongo.hello.Hello` and :mod:`~pymongo.hello` which provide the + same API. +- Deprecated the :mod:`pymongo.messeage` module. +- Deprecated the ``ssl_keyfile`` and ``ssl_certfile`` URI options in favor + of ``tlsCertificateKeyFile`` (see :doc:`examples/tls`). + +.. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 +.. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 +.. _PYTHON-2472: https://jira.mongodb.org/browse/PYTHON-2472 .. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ +Issues Resolved +............... + +See the `PyMongo 3.12.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 + +Changes in Version 3.11.3 +------------------------- + +Issues Resolved +............... + +Version 3.11.3 fixes a bug that prevented PyMongo from retrying writes after +a ``writeConcernError`` on MongoDB 4.4+ (`PYTHON-2452`_) + +See the `PyMongo 3.11.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2452: https://jira.mongodb.org/browse/PYTHON-2452 +.. _PyMongo 3.11.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30355 + +Changes in Version 3.11.2 +------------------------- + +Issues Resolved +............... + +Version 3.11.2 includes a number of bugfixes. Highlights include: + +- Fixed a memory leak caused by failing SDAM monitor checks on Python 3 (`PYTHON-2433`_). +- Fixed a regression that changed the string representation of + :exc:`~pymongo.errors.BulkWriteError` (`PYTHON-2438`_). +- Fixed a bug that made it impossible to use + :meth:`bson.codec_options.CodecOptions.with_options` and + :meth:`~bson.json_util.JSONOptions.with_options` on some early versions of + Python 3.4 and Python 3.5 due to a bug in the standard library implementation + of :meth:`collections.namedtuple._asdict` (`PYTHON-2440`_). +- Fixed a bug that resulted in a :exc:`TypeError` exception when a PyOpenSSL + socket was configured with a timeout of ``None`` (`PYTHON-2443`_). + +See the `PyMongo 3.11.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2433: https://jira.mongodb.org/browse/PYTHON-2433 +.. _PYTHON-2438: https://jira.mongodb.org/browse/PYTHON-2438 +.. _PYTHON-2440: https://jira.mongodb.org/browse/PYTHON-2440 +.. _PYTHON-2443: https://jira.mongodb.org/browse/PYTHON-2443 +.. _PyMongo 3.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30315 + Changes in Version 3.11.1 ------------------------- diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 1c2709b32d..78410d325f 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -93,6 +93,8 @@ .. mongodoc:: transactions +.. _snapshot-reads-ref: + Snapshot Reads ============== From 369d175993fa3fae01985e2bf68d88d03c954ede Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Aug 2021 13:33:21 -0700 Subject: [PATCH 0430/2111] PYTHON-2430 Remove all helpers in the message module (#695) --- doc/api/pymongo/index.rst | 1 - doc/api/pymongo/message.rst | 6 ---- doc/changelog.rst | 3 ++ doc/migrate-to-pymongo4.rst | 7 ++++ pymongo/collection.py | 6 ++-- pymongo/message.py | 67 +++++++++++++++++++------------------ pymongo/mongo_client.py | 2 +- pymongo/network.py | 2 +- test/test_pooling.py | 2 +- 9 files changed, 50 insertions(+), 46 deletions(-) delete mode 100644 doc/api/pymongo/message.rst diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 5770145936..8a3a99b815 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -39,7 +39,6 @@ Sub-modules: encryption encryption_options errors - message mongo_client monitoring operations diff --git a/doc/api/pymongo/message.rst b/doc/api/pymongo/message.rst deleted file mode 100644 index 0a28052fb6..0000000000 --- a/doc/api/pymongo/message.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`message` -- Tools for creating messages to be sent to MongoDB -=================================================================== - -.. automodule:: pymongo.message - :synopsis: Tools for creating messages to be sent to MongoDB - :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index 05e737c017..189e33e003 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -80,6 +80,9 @@ Breaking Changes in 4.0 :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, and :meth:`~pymongo.cursor.Cursor`. +- Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, + :meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, + :meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. - Removed :exc:`pymongo.errors.NotMasterError`. Use :exc:`pymongo.errors.NotPrimaryError` instead. - Removed :attr:`pymongo.GEOHAYSTACK`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 93f9baf646..7c741e8cbf 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -555,3 +555,10 @@ Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 removed the `parallelCollectionScan command`_. There is no replacement. .. _parallelCollectionScan command: https://docs.mongodb.com/manual/reference/command/parallelCollectionScan/ + +pymongo.message helpers are removed +................................... + +Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, +:meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, +:meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. diff --git a/pymongo/collection.py b/pymongo/collection.py index a2fb4bd194..e4cb2487b4 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -507,7 +507,7 @@ def _insert_command(session, sock_info, retryable_write): # Legacy OP_INSERT. return self._legacy_write( sock_info, 'insert', command, op_id, - bypass_doc_val, message.insert, self.__full_name, + bypass_doc_val, message._insert, self.__full_name, [doc], check_keys, False, self.__write_response_codec_options) @@ -697,7 +697,7 @@ def _update(self, sock_info, criteria, document, upsert=False, # Legacy OP_UPDATE. return self._legacy_write( sock_info, 'update', command, op_id, - bypass_doc_val, message.update, self.__full_name, upsert, + bypass_doc_val, message._update, self.__full_name, upsert, multi, criteria, document, check_keys, self.__write_response_codec_options) @@ -1047,7 +1047,7 @@ def _delete( # Legacy OP_DELETE. return self._legacy_write( sock_info, 'delete', command, op_id, - False, message.delete, self.__full_name, criteria, + False, message._delete, self.__full_name, criteria, self.__write_response_codec_options, int(not multi)) # Delete command. diff --git a/pymongo/message.py b/pymongo/message.py index 60be0c7e75..12ca524ffd 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -364,9 +364,9 @@ def get_message(self, set_slave_ok, sock_info, use_cmd=False): spec = _maybe_add_read_preference(spec, self.read_preference) - return query(flags, ns, self.ntoskip, ntoreturn, - spec, None if use_cmd else self.fields, - self.codec_options, ctx=sock_info.compression_context) + return _query(flags, ns, self.ntoskip, ntoreturn, + spec, None if use_cmd else self.fields, + self.codec_options, ctx=sock_info.compression_context) class _GetMore(object): @@ -450,9 +450,10 @@ def get_message(self, dummy0, sock_info, use_cmd=False): ctx=sock_info.compression_context) return request_id, msg, size ns = "%s.%s" % (self.db, "$cmd") - return query(0, ns, 0, -1, spec, None, self.codec_options, ctx=ctx) + return _query(0, ns, 0, -1, spec, None, self.codec_options, + ctx=ctx) - return get_more(ns, self.ntoreturn, self.cursor_id, ctx) + return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) class _RawBatchQuery(_Query): @@ -532,8 +533,8 @@ def __last_error(namespace, args): cmd = SON([("getlasterror", 1)]) cmd.update(args) splitns = namespace.split('.', 1) - return query(0, splitns[0] + '.$cmd', 0, -1, cmd, - None, DEFAULT_CODEC_OPTIONS) + return _query(0, splitns[0] + '.$cmd', 0, -1, cmd, + None, DEFAULT_CODEC_OPTIONS) _pack_header = struct.Struct(" max_bson_size): message._raise_document_too_large(name, size, max_bson_size) else: - request_id, msg, size = message.query( + request_id, msg, size = message._query( flags, ns, 0, -1, spec, None, codec_options, check_keys, compression_ctx) diff --git a/test/test_pooling.py b/test/test_pooling.py index 5746f56d17..97a994ec38 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -282,7 +282,7 @@ def test_socket_checker(self): self.assertTrue(socket_checker.select(s, write=True, timeout=0)) self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) # Make the socket readable - _, msg, _ = message.query( + _, msg, _ = message._query( 0, 'admin.$cmd', 0, -1, SON([('isMaster', 1)]), None, DEFAULT_CODEC_OPTIONS) s.sendall(msg) From 5fd175c0d8daa0bfbd8b24f90b3a07355e082e01 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Aug 2021 16:52:41 -0700 Subject: [PATCH 0431/2111] PYTHON-2529 Remove UUIDLegacy (#698) --- bson/__init__.py | 3 +- bson/binary.py | 89 ------------------------------------- bson/codec_options.py | 4 +- doc/api/bson/binary.rst | 4 -- doc/changelog.rst | 5 +++ doc/migrate-to-pymongo4.rst | 14 ++++++ test/test_binary.py | 28 +++++------- test/test_bson.py | 6 +-- test/test_common.py | 7 +-- 9 files changed, 38 insertions(+), 122 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index e473166a49..298112d73c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -70,7 +70,7 @@ from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, OLD_UUID_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY, - UUIDLegacy, UUID_SUBTYPE) + UUID_SUBTYPE) from bson.code import Code from bson.codec_options import ( CodecOptions, DEFAULT_CODEC_OPTIONS, _raw_document_class) @@ -707,7 +707,6 @@ def _encode_maxkey(name, dummy0, dummy1, dummy2): RE_TYPE: _encode_regex, SON: _encode_mapping, Timestamp: _encode_timestamp, - UUIDLegacy: _encode_binary, Decimal128: _encode_decimal128, # Special case. This will never be looked up directly. _abc.Mapping: _encode_mapping, diff --git a/bson/binary.py b/bson/binary.py index 7759e97090..7110fede26 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -337,92 +337,3 @@ def __ne__(self, other): def __repr__(self): return "Binary(%s, %s)" % (bytes.__repr__(self), self.__subtype) - - -class UUIDLegacy(Binary): - """**DEPRECATED** - UUID wrapper to support working with UUIDs stored as - PYTHON_LEGACY. - - .. note:: This class has been deprecated and will be removed in - PyMongo 4.0. Use :meth:`~bson.binary.Binary.from_uuid` and - :meth:`~bson.binary.Binary.as_uuid` with the appropriate - :class:`~bson.binary.UuidRepresentation` to handle legacy-formatted - UUIDs instead.:: - - from bson import Binary, UUIDLegacy, UuidRepresentation - import uuid - - my_uuid = uuid.uuid4() - legacy_uuid = UUIDLegacy(my_uuid) - binary_uuid = Binary.from_uuid( - my_uuid, UuidRepresentation.PYTHON_LEGACY) - - assert legacy_uuid == binary_uuid - assert legacy_uuid.uuid == binary_uuid.as_uuid( - UuidRepresentation.PYTHON_LEGACY) - - .. doctest:: - - >>> import uuid - >>> from bson.binary import Binary, UUIDLegacy, STANDARD - >>> from bson.codec_options import CodecOptions - >>> my_uuid = uuid.uuid4() - >>> coll = db.get_collection('test', - ... CodecOptions(uuid_representation=STANDARD)) - >>> coll.insert_one({'uuid': Binary(my_uuid.bytes, 3)}).inserted_id - ObjectId('...') - >>> coll.count_documents({'uuid': my_uuid}) - 0 - >>> coll.count_documents({'uuid': UUIDLegacy(my_uuid)}) - 1 - >>> coll.find({'uuid': UUIDLegacy(my_uuid)})[0]['uuid'] - UUID('...') - >>> - >>> # Convert from subtype 3 to subtype 4 - >>> doc = coll.find_one({'uuid': UUIDLegacy(my_uuid)}) - >>> coll.replace_one({"_id": doc["_id"]}, doc).matched_count - 1 - >>> coll.count_documents({'uuid': UUIDLegacy(my_uuid)}) - 0 - >>> coll.count_documents({'uuid': {'$in': [UUIDLegacy(my_uuid), my_uuid]}}) - 1 - >>> coll.find_one({'uuid': my_uuid})['uuid'] - UUID('...') - - Raises :exc:`TypeError` if `obj` is not an instance of :class:`~uuid.UUID`. - - :Parameters: - - `obj`: An instance of :class:`~uuid.UUID`. - - .. versionchanged:: 3.11 - Deprecated. The same functionality can be replicated using the - :meth:`~Binary.from_uuid` and :meth:`~Binary.to_uuid` methods with - :data:`~UuidRepresentation.PYTHON_LEGACY`. - .. versionadded:: 2.1 - """ - - def __new__(cls, obj): - warn( - "The UUIDLegacy class has been deprecated and will be removed " - "in PyMongo 4.0. Use the Binary.from_uuid() and Binary.to_uuid() " - "with the appropriate UuidRepresentation to handle " - "legacy-formatted UUIDs instead.", - DeprecationWarning, stacklevel=2) - if not isinstance(obj, UUID): - raise TypeError("obj must be an instance of uuid.UUID") - self = Binary.__new__(cls, obj.bytes, OLD_UUID_SUBTYPE) - self.__uuid = obj - return self - - def __getnewargs__(self): - # Support copy and deepcopy - return (self.__uuid,) - - @property - def uuid(self): - """UUID instance wrapped by this UUIDLegacy instance. - """ - return self.__uuid - - def __repr__(self): - return "UUIDLegacy('%s')" % self.__uuid diff --git a/bson/codec_options.py b/bson/codec_options.py index 1a853d1fc3..9ce772427d 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -207,8 +207,8 @@ class CodecOptions(_options_base): See :doc:`/examples/datetimes` for examples using the `tz_aware` and `tzinfo` options. - See :class:`~bson.binary.UUIDLegacy` for examples using the - `uuid_representation` option. + See :doc:`examples/uuid` for examples using the `uuid_representation` + option. :Parameters: - `document_class`: BSON documents returned in queries will be decoded diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index ab4d599f8c..a754d43d52 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -22,7 +22,3 @@ .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) :members: :show-inheritance: - - .. autoclass:: UUIDLegacy(obj) - :members: - :show-inheritance: diff --git a/doc/changelog.rst b/doc/changelog.rst index 189e33e003..d97c8dbe6b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -86,6 +86,7 @@ Breaking Changes in 4.0 - Removed :exc:`pymongo.errors.NotMasterError`. Use :exc:`pymongo.errors.NotPrimaryError` instead. - Removed :attr:`pymongo.GEOHAYSTACK`. +- Removed :class:`bson.binary.UUIDLegacy`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. @@ -307,6 +308,8 @@ Highlights include: ``MongoClient(uuidRepresentation='unspecified')`` which will become the default UUID representation starting in PyMongo 4.0. See :ref:`handling-uuid-data-example` for details. +- New methods :meth:`bson.binary.Binary.from_uuid` and + :meth:`bson.binary.Binary.as_uuid`. - Added the ``background`` parameter to :meth:`pymongo.database.Database.validate_collection`. For a description of this parameter see the MongoDB documentation for the `validate command`_. @@ -342,6 +345,8 @@ Deprecations: - Deprecated :attr:`pymongo.mongo_client.MongoClient.is_locked`. Use :meth:`~pymongo.database.Database.command` to run the ``currentOp`` command instead. See the documentation for more information. +- Deprecated :class:`bson.binary.UUIDLegacy`. Use + :meth:`bson.binary.Binary.from_uuid` instead. Unavoidable breaking changes: diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 7c741e8cbf..6b84c7fcbd 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -499,6 +499,20 @@ Removed :attr:`pymongo.GEOHAYSTACK`. Replace with "geoHaystack" or create a 2d index and use $geoNear or $geoWithin instead. See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. +UUIDLegacy is removed +--------------------- + +Removed :class:`bson.binary.UUIDLegacy`. Use +:meth:`bson.binary.Binary.from_uuid` instead. Code like this:: + + uu = uuid.uuid4() + uuid_legacy = UUIDLegacy(uu) + +can be changed to this:: + + uu = uuid.uuid4() + uuid_legacy = Binary.from_uuid(uu, PYTHON_LEGACY) + Removed features with no migration path --------------------------------------- diff --git a/test/test_binary.py b/test/test_binary.py index 8018c74b51..440c04667a 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -287,9 +287,7 @@ def test_uri_to_uuid(self): CSHARP_LEGACY) @client_context.require_connection - @ignore_deprecations def test_uuid_queries(self): - db = client_context.client.pymongo_test coll = db.test coll.drop() @@ -298,29 +296,23 @@ def test_uuid_queries(self): coll.insert_one({'uuid': Binary(uu.bytes, 3)}) self.assertEqual(1, coll.count_documents({})) - # Test UUIDLegacy queries. + # Test regular UUID queries (using subtype 4). coll = db.get_collection( "test", CodecOptions( uuid_representation=UuidRepresentation.STANDARD)) - self.assertEqual(0, coll.find({'uuid': uu}).count()) - cur = coll.find({'uuid': UUIDLegacy(uu)}) - self.assertEqual(1, cur.count()) - retrieved = next(cur) - self.assertEqual(uu, retrieved['uuid']) - - # Test regular UUID queries (using subtype 4). + self.assertEqual(0, coll.count_documents({'uuid': uu})) coll.insert_one({'uuid': uu}) self.assertEqual(2, coll.count_documents({})) - cur = coll.find({'uuid': uu}) - self.assertEqual(1, cur.count()) - retrieved = next(cur) - self.assertEqual(uu, retrieved['uuid']) + docs = list(coll.find({'uuid': uu})) + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]['uuid']) # Test both. - predicate = {'uuid': {'$in': [uu, UUIDLegacy(uu)]}} + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {'uuid': {'$in': [uu, uu_legacy]}} self.assertEqual(2, coll.count_documents(predicate)) - cur = coll.find(predicate) - self.assertEqual(2, cur.count()) + docs = list(coll.find(predicate)) + self.assertEqual(2, len(docs)) coll.drop() def test_pickle(self): @@ -338,7 +330,7 @@ def test_pickle(self): self.assertEqual(b1, pickle.loads(pickle.dumps(b1, proto))) uu = uuid.uuid4() - uul = UUIDLegacy(uu) + uul = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) self.assertEqual(uul, copy.copy(uul)) self.assertEqual(uul, copy.deepcopy(uul)) diff --git a/test/test_bson.py b/test/test_bson.py index ccae6fcd66..ee30c8948c 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -41,7 +41,7 @@ EPOCH_AWARE, is_valid, Regex) -from bson.binary import Binary, UUIDLegacy +from bson.binary import Binary, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions from bson.int64 import Int64 @@ -646,14 +646,12 @@ def test_uuid(self): self.assertNotEqual(uuid.uuid4(), transformed_id) def test_uuid_legacy(self): - id = uuid.uuid4() - legacy = UUIDLegacy(id) + legacy = Binary.from_uuid(id, UuidRepresentation.PYTHON_LEGACY) self.assertEqual(3, legacy.subtype) transformed = decode(encode({"uuid": legacy}))["uuid"] self.assertTrue(isinstance(transformed, uuid.UUID)) self.assertEqual(id, transformed) - self.assertNotEqual(UUIDLegacy(uuid.uuid4()), UUIDLegacy(transformed)) # The C extension was segfaulting on unicode RegExs, so we have this test # that doesn't really test anything but the lack of a segfault. diff --git a/test/test_common.py b/test/test_common.py index 5a35fd8bb2..896df50e56 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -19,7 +19,7 @@ sys.path[0:0] = [""] -from bson.binary import UUIDLegacy, PYTHON_LEGACY, STANDARD +from bson.binary import Binary, PYTHON_LEGACY, STANDARD from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId @@ -53,7 +53,8 @@ def test_uuid_representation(self): "uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) self.assertEqual(None, coll.find_one({'uu': uu})) - self.assertEqual(uu, coll.find_one({'uu': UUIDLegacy(uu)})['uu']) + uul = Binary.from_uuid(uu, PYTHON_LEGACY) + self.assertEqual(uu, coll.find_one({'uu': uul})['uu']) # Test count_documents self.assertEqual(0, coll.count_documents({'uu': uu})) @@ -104,7 +105,7 @@ def test_uuid_representation(self): self.assertEqual(6, self.db.command( 'findAndModify', 'uuid', update={'$set': {'i': 7}}, - query={'_id': UUIDLegacy(uu)})['value']['i']) + query={'_id': Binary.from_uuid(uu, PYTHON_LEGACY)})['value']['i']) # Test (inline)_map_reduce coll.drop() From be47e4ca1494d3dbfd6eea5d23b797b2c9897fc6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 Aug 2021 10:02:36 -0700 Subject: [PATCH 0432/2111] PYTHON-2532 Remove modifiers option for find methods (#696) --- doc/api/pymongo/collection.rst | 4 +- doc/api/pymongo/cursor.rst | 4 +- doc/changelog.rst | 5 ++- doc/migrate-to-pymongo4.rst | 33 +++++++++++++++ pymongo/collection.py | 6 +-- pymongo/cursor.py | 21 ++++------ test/command_monitoring/legacy/find.json | 28 ++++++------- test/test_command_monitoring_legacy.py | 2 + test/test_cursor.py | 52 ------------------------ 9 files changed, 66 insertions(+), 89 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 1787f22c7a..420c060e7e 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -46,8 +46,8 @@ .. automethod:: aggregate .. automethod:: aggregate_raw_batches .. automethod:: watch - .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) - .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) .. automethod:: find_one(filter=None, *args, **kwargs) .. automethod:: find_one_and_delete .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) diff --git a/doc/api/pymongo/cursor.rst b/doc/api/pymongo/cursor.rst index 68b52bccee..513f051abb 100644 --- a/doc/api/pymongo/cursor.rst +++ b/doc/api/pymongo/cursor.rst @@ -15,7 +15,7 @@ .. autoattribute:: EXHAUST :annotation: - .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. autoclass:: pymongo.cursor.Cursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) :members: .. describe:: c[index] @@ -24,4 +24,4 @@ .. automethod:: __getitem__ - .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) + .. autoclass:: pymongo.cursor.RawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) diff --git a/doc/changelog.rst b/doc/changelog.rst index d97c8dbe6b..fb937638e4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -76,9 +76,10 @@ Breaking Changes in 4.0 :attr:`pymongo.database.Database.outgoing_manipulators`, :attr:`pymongo.database.Database.incoming_copying_manipulators`, and :attr:`pymongo.database.Database.incoming_manipulators`. -- Removed the ``manipulate`` parameter from +- Removed the ``manipulate`` and ``modifiers`` parameters from :meth:`~pymongo.collection.Collection.find`, - :meth:`~pymongo.collection.Collection.find_one`, and + :meth:`~pymongo.collection.Collection.find_one`, + :meth:`~pymongo.collection.Collection.find_raw_batches`, and :meth:`~pymongo.cursor.Cursor`. - Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, :meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 6b84c7fcbd..eedd90b6bb 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -448,6 +448,39 @@ can be changed to this:: .. _reIndex command: https://docs.mongodb.com/manual/reference/command/reIndex/ +The modifiers parameter is removed +.................................. + +Removed the ``modifiers`` parameter from +:meth:`~pymongo.collection.Collection.find`, +:meth:`~pymongo.collection.Collection.find_one`, +:meth:`~pymongo.collection.Collection.find_raw_batches`, and +:meth:`~pymongo.cursor.Cursor`. Pass the options directly to the method +instead. Code like this:: + + cursor = coll.find({}, modifiers={ + "$comment": "comment", + "$hint": {"_id": 1}, + "$min": {"_id": 0}, + "$max": {"_id": 6}, + "$maxTimeMS": 6000, + "$returnKey": False, + "$showDiskLoc": False, + }) + +can be changed to this:: + + cursor = coll.find( + {}, + comment="comment", + hint={"_id": 1}, + min={"_id": 0}, + max={"_id": 6}, + max_time_ms=6000, + return_key=False, + show_record_id=False, + ) + SONManipulator is removed ------------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index e4cb2487b4..aba15cfebe 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1313,9 +1313,6 @@ def find(self, *args, **kwargs): interpret and trace the operation in the server logs and in profile data. Pass this as an alternative to calling :meth:`~pymongo.cursor.Cursor.comment` on the cursor. - - `modifiers` (optional): **DEPRECATED** - A dict specifying - additional MongoDB query modifiers. Use the keyword arguments listed - above instead. - `allow_disk_use` (optional): if True, MongoDB may use temporary disk files to store data exceeding the system memory limit while processing a blocking sort operation. The option has no effect if @@ -1339,6 +1336,9 @@ def find(self, *args, **kwargs): connection will be closed and discarded without being returned to the connection pool. + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + .. versionchanged:: 3.11 Added the ``allow_disk_use`` option. Deprecated the ``oplog_replay`` option. Support for this option is diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 2f6550d52c..7a8c08c9e1 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -136,10 +136,10 @@ def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, - modifiers=None, batch_size=0, + batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, - max=None, min=None, return_key=False, show_record_id=False, - snapshot=False, comment=None, session=None, + max=None, min=None, return_key=None, show_record_id=None, + snapshot=None, comment=None, session=None, allow_disk_use=None): """Create a new cursor. @@ -186,10 +186,6 @@ def __init__(self, collection, filter=None, projection=None, skip=0, raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) - if modifiers is not None: - warnings.warn("the 'modifiers' parameter is deprecated", - DeprecationWarning, stacklevel=2) - validate_is_mapping("modifiers", modifiers) if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: @@ -208,7 +204,6 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__skip = skip self.__limit = limit self.__batch_size = batch_size - self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False @@ -318,7 +313,7 @@ def _clone(self, deepcopy=True, base=None): "max_time_ms", "max_await_time_ms", "comment", "max", "min", "ordering", "explain", "hint", "batch_size", "max_scan", - "query_flags", "modifiers", "collation", "empty", + "query_flags", "collation", "empty", "show_record_id", "return_key", "allow_disk_use", "snapshot", "exhaust") data = dict((k, v) for k, v in self.__dict__.items() @@ -370,7 +365,7 @@ def close(self): def __query_spec(self): """Get the spec to use for a query. """ - operators = self.__modifiers.copy() + operators = {} if self.__ordering: operators["$orderby"] = self.__ordering if self.__explain: @@ -387,12 +382,12 @@ def __query_spec(self): operators["$max"] = self.__max if self.__min: operators["$min"] = self.__min - if self.__return_key: + if self.__return_key is not None: operators["$returnKey"] = self.__return_key - if self.__show_record_id: + if self.__show_record_id is not None: # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. operators["$showDiskLoc"] = self.__show_record_id - if self.__snapshot: + if self.__snapshot is not None: operators["$snapshot"] = self.__snapshot if operators: diff --git a/test/command_monitoring/legacy/find.json b/test/command_monitoring/legacy/find.json index 55b185cc58..e2bb95306f 100644 --- a/test/command_monitoring/legacy/find.json +++ b/test/command_monitoring/legacy/find.json @@ -89,21 +89,19 @@ "skip": { "$numberLong": "2" }, - "modifiers": { - "$comment": "test", - "$hint": { - "_id": 1 - }, - "$max": { - "_id": 6 - }, - "$maxTimeMS": 6000, - "$min": { - "_id": 0 - }, - "$returnKey": false, - "$showDiskLoc": false - } + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + }, + "returnKey": false, + "showRecordId": false } }, "expectations": [ diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index acebe3a23d..2daa60679e 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -109,6 +109,8 @@ def run_scenario(self): elif name == 'find': if 'sort' in args: args['sort'] = list(args['sort'].items()) + if 'hint' in args: + args['hint'] = list(args['hint'].items()) for arg in 'skip', 'limit': if arg in args: args[arg] = int(args[arg]) diff --git a/test/test_cursor.py b/test/test_cursor.py index 3ba455c188..f743c6dcec 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1317,58 +1317,6 @@ def test_comment(self): next(cursor) self.assertRaises(InvalidOperation, cursor.comment, 'hello') - def test_modifiers(self): - c = self.db.test - - # "modifiers" is deprecated. - with ignore_deprecations(): - cur = c.find() - self.assertTrue('$query' not in cur._Cursor__query_spec()) - cur = c.find().comment("testing").max_time_ms(500) - self.assertTrue('$query' in cur._Cursor__query_spec()) - self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing") - self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500) - cur = c.find( - modifiers={"$maxTimeMS": 500, "$comment": "testing"}) - self.assertTrue('$query' in cur._Cursor__query_spec()) - self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing") - self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500) - - # Keyword arg overwrites modifier. - # If we remove the "modifiers" arg, delete this test after checking - # that TestCommandMonitoring.test_find_options covers all cases. - cur = c.find(comment="hi", modifiers={"$comment": "bye"}) - self.assertEqual(cur._Cursor__query_spec()["$comment"], "hi") - - cur = c.find(max_scan=1, modifiers={"$maxScan": 2}) - self.assertEqual(cur._Cursor__query_spec()["$maxScan"], 1) - - cur = c.find(max_time_ms=1, modifiers={"$maxTimeMS": 2}) - self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 1) - - cur = c.find(min=1, modifiers={"$min": 2}) - self.assertEqual(cur._Cursor__query_spec()["$min"], 1) - - cur = c.find(max=1, modifiers={"$max": 2}) - self.assertEqual(cur._Cursor__query_spec()["$max"], 1) - - cur = c.find(return_key=True, modifiers={"$returnKey": False}) - self.assertEqual(cur._Cursor__query_spec()["$returnKey"], True) - - cur = c.find(hint=[("a", 1)], modifiers={"$hint": {"b": "1"}}) - self.assertEqual(cur._Cursor__query_spec()["$hint"], {"a": 1}) - - # The arg is named show_record_id after the "find" command arg, the - # modifier is named $showDiskLoc for the OP_QUERY modifier. It's - # stored as $showDiskLoc then upgraded to showRecordId if we send a - # "find" command. - cur = c.find(show_record_id=True, modifiers={"$showDiskLoc": False}) - self.assertEqual(cur._Cursor__query_spec()["$showDiskLoc"], True) - - if not client_context.version.at_least(3, 7, 3): - cur = c.find(snapshot=True, modifiers={"$snapshot": False}) - self.assertEqual(cur._Cursor__query_spec()["$snapshot"], True) - def test_alive(self): self.db.test.delete_many({}) self.db.test.insert_many([{} for _ in range(3)]) From e3a61b9ff7d6b93ef4f0dc2403294fee81a592f3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 Aug 2021 10:53:28 -0700 Subject: [PATCH 0433/2111] PYTHON-1840 Remove waitQueueMultiple and ExceededMaxWaiters (#699) --- doc/changelog.rst | 9 ++++--- doc/migrate-to-pymongo4.rst | 9 +++++++ pymongo/client_options.py | 3 +-- pymongo/common.py | 3 --- pymongo/errors.py | 9 ------- pymongo/mongo_client.py | 6 ++--- pymongo/pool.py | 47 +++++++++---------------------------- test/test_client.py | 9 ------- test/test_pooling.py | 33 +++----------------------- test/test_uri_parser.py | 9 ------- 10 files changed, 33 insertions(+), 104 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index fb937638e4..96c0d7d373 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -15,13 +15,16 @@ Breaking Changes in 4.0 ....................... - Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. -- Removed :meth:`pymongo.database.Database.eval`, - :data:`pymongo.database.Database.system_js` and - :class:`pymongo.database.SystemJS`. +- Removed the ``waitQueueMultiple`` keyword argument to + :class:`~pymongo.mongo_client.MongoClient` and removed + :exc:`pymongo.errors.ExceededMaxWaiters`. - Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, :meth:`pymongo.mongo_client.MongoClient.unlock`, and :attr:`pymongo.mongo_client.MongoClient.is_locked`. - Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. +- Removed :meth:`pymongo.database.Database.eval`, + :data:`pymongo.database.Database.system_js` and + :class:`pymongo.database.SystemJS`. - Removed :meth:`pymongo.database.Database.collection_names`. - Removed :meth:`pymongo.database.Database.current_op`. - Removed :meth:`pymongo.database.Database.authenticate` and diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index eedd90b6bb..ef8b48b8d0 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -62,6 +62,15 @@ get the same behavior. MongoClient ----------- +The waitQueueMultiple parameter is removed +.......................................... + +Removed the ``waitQueueMultiple`` keyword argument to +:class:`~pymongo.mongo_client.MongoClient` and removed +:exc:`pymongo.errors.ExceededMaxWaiters`. Instead of using +``waitQueueMultiple`` to bound queuing, limit the size of the thread +pool in your application. + MongoClient.fsync is removed ............................ diff --git a/pymongo/client_options.py b/pymongo/client_options.py index f53a9642e8..460dd66427 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -121,7 +121,6 @@ def _parse_pool_options(options): socket_timeout = options.get('sockettimeoutms') wait_queue_timeout = options.get( 'waitqueuetimeoutms', common.WAIT_QUEUE_TIMEOUT) - wait_queue_multiple = options.get('waitqueuemultiple') event_listeners = options.get('event_listeners') appname = options.get('appname') driver = options.get('driver') @@ -135,7 +134,7 @@ def _parse_pool_options(options): min_pool_size, max_idle_time_seconds, connect_timeout, socket_timeout, - wait_queue_timeout, wait_queue_multiple, + wait_queue_timeout, ssl_context, ssl_match_hostname, socket_keepalive, _EventListeners(event_listeners), appname, diff --git a/pymongo/common.py b/pymongo/common.py index 7bac951e0d..98470b7554 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -716,9 +716,6 @@ def validate_tzinfo(dummy, value): 'use tlsCertificateKeyFile to pass a single file containing both ' 'the client certificate and the private key')), 'ssl_pem_passphrase': ('renamed', 'tlsCertificateKeyFilePassword'), - 'waitqueuemultiple': ('removed', ( - 'Instead of using waitQueueMultiple to bound queuing, limit the size ' - 'of the thread pool in your application server')) } # Augment the option validator map with pymongo-specific option information. diff --git a/pymongo/errors.py b/pymongo/errors.py index dd1c244e37..1b15fc1f54 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -248,15 +248,6 @@ class InvalidURI(ConfigurationError): """Raised when trying to parse an invalid mongodb URI.""" -class ExceededMaxWaiters(PyMongoError): - """Raised when a thread tries to get a connection from a pool and - ``maxPoolSize * waitQueueMultiple`` threads are already waiting. - - .. versionadded:: 2.6 - """ - pass - - class DocumentTooLarge(InvalidDocument): """Raised when an encoded document is too large for the connected server. """ diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dab22cc335..c5a88c4fd3 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -243,9 +243,6 @@ def __init__( - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) a thread will wait for a socket from the pool if the pool has no free sockets. Defaults to ``None`` (no timeout). - - `waitQueueMultiple`: (integer or None) Multiplied by maxPoolSize - to give the number of threads allowed to wait for a socket at one - time. Defaults to ``None`` (no limit). - `heartbeatFrequencyMS`: (optional) The number of milliseconds between periodic server checks, or None to accept the default frequency of 10 seconds. @@ -505,6 +502,9 @@ def __init__( .. mongodoc:: connections + .. versionchanged:: 3.12 + Removed the ``waitQueueMultiple`` keyword argument. + .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. The following keyword arguments were deprecated: diff --git a/pymongo/pool.py b/pymongo/pool.py index 6dfd32ef0c..aa615b6c94 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -43,7 +43,6 @@ CertificateError, ConnectionFailure, ConfigurationError, - ExceededMaxWaiters, InvalidOperation, DocumentTooLarge, NetworkTimeout, @@ -261,7 +260,7 @@ class PoolOptions(object): __slots__ = ('__max_pool_size', '__min_pool_size', '__max_idle_time_seconds', '__connect_timeout', '__socket_timeout', - '__wait_queue_timeout', '__wait_queue_multiple', + '__wait_queue_timeout', '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', @@ -271,7 +270,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, max_idle_time_seconds=MAX_IDLE_TIME_SEC, connect_timeout=None, socket_timeout=None, wait_queue_timeout=WAIT_QUEUE_TIMEOUT, - wait_queue_multiple=None, ssl_context=None, + ssl_context=None, ssl_match_hostname=True, socket_keepalive=True, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, @@ -282,7 +281,6 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__connect_timeout = connect_timeout self.__socket_timeout = socket_timeout self.__wait_queue_timeout = wait_queue_timeout - self.__wait_queue_multiple = wait_queue_multiple self.__ssl_context = ssl_context self.__ssl_match_hostname = ssl_match_hostname self.__socket_keepalive = socket_keepalive @@ -396,13 +394,6 @@ def wait_queue_timeout(self): """ return self.__wait_queue_timeout - @property - def wait_queue_multiple(self): - """Multiplied by max_pool_size to give the number of threads allowed - to wait for a socket at one time. - """ - return self.__wait_queue_multiple - @property def ssl_context(self): """An SSLContext instance or None. @@ -1163,22 +1154,14 @@ def __init__(self, address, options, handshake=True): self.opts.event_listeners is not None and self.opts.event_listeners.enabled_for_cmap) - if (self.opts.wait_queue_multiple is None or - self.opts.max_pool_size is None): - max_waiters = float('inf') - else: - max_waiters = ( - self.opts.max_pool_size * self.opts.wait_queue_multiple) # The first portion of the wait queue. - # Enforces: maxPoolSize and waitQueueMultiple + # Enforces: maxPoolSize # Also used for: clearing the wait queue self.size_cond = threading.Condition(self.lock) self.requests = 0 self.max_pool_size = self.opts.max_pool_size if self.max_pool_size is None: self.max_pool_size = float('inf') - self.waiters = 0 - self.max_waiters = max_waiters # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue @@ -1468,22 +1451,14 @@ def _get_socket(self, all_credentials): with self.size_cond: self._raise_if_not_ready(emit_event=True) - if self.waiters >= self.max_waiters: - raise ExceededMaxWaiters( - 'exceeded max waiters: %s threads already waiting' % ( - self.waiters)) - self.waiters += 1 - try: - while not (self.requests < self.max_pool_size): - if not _cond_wait(self.size_cond, deadline): - # Timed out, notify the next thread to ensure a - # timeout doesn't consume the condition. - if self.requests < self.max_pool_size: - self.size_cond.notify() - self._raise_wait_queue_timeout() - self._raise_if_not_ready(emit_event=True) - finally: - self.waiters -= 1 + while not (self.requests < self.max_pool_size): + if not _cond_wait(self.size_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout() + self._raise_if_not_ready(emit_event=True) self.requests += 1 # We've now acquired the semaphore and must release it on error. diff --git a/test/test_client.py b/test/test_client.py index 2f23adcf92..2cd64dc48f 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -111,7 +111,6 @@ def test_keyword_arg_defaults(self): client = MongoClient(socketTimeoutMS=None, connectTimeoutMS=20000, waitQueueTimeoutMS=None, - waitQueueMultiple=None, replicaSet=None, read_preference=ReadPreference.PRIMARY, ssl=False, @@ -128,7 +127,6 @@ def test_keyword_arg_defaults(self): # socket.Socket.settimeout takes a float in seconds self.assertEqual(20.0, pool_opts.connect_timeout) self.assertEqual(None, pool_opts.wait_queue_timeout) - self.assertEqual(None, pool_opts.wait_queue_multiple) self.assertTrue(pool_opts.socket_keepalive) self.assertEqual(None, pool_opts.ssl_context) self.assertEqual(None, options.replica_set_name) @@ -1079,13 +1077,6 @@ def test_waitQueueTimeoutMS(self): client = rs_or_single_client(waitQueueTimeoutMS=2000) self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2) - def test_waitQueueMultiple(self): - client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2) - pool = get_pool(client) - self.assertEqual(pool.opts.wait_queue_multiple, 2) - self.assertEqual(pool.max_waiters, 6) - self.assertEqual(pool.max_pool_size, 3) - def test_socketKeepAlive(self): for socketKeepAlive in [True, False]: with warnings.catch_warnings(record=True) as ctx: diff --git a/test/test_pooling.py b/test/test_pooling.py index 97a994ec38..5b711a16ae 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -27,8 +27,7 @@ from pymongo import MongoClient, message from pymongo.errors import (AutoReconnect, ConnectionFailure, - DuplicateKeyError, - ExceededMaxWaiters) + DuplicateKeyError) sys.path[0:0] = [""] @@ -113,8 +112,7 @@ class SocketGetter(MongoThread): """Utility for TestPooling. Checks out a socket and holds it forever. Used in - test_no_wait_queue_timeout, test_wait_queue_multiple, and - test_no_wait_queue_multiple. + test_no_wait_queue_timeout. """ def __init__(self, client, pool): super(SocketGetter, self).__init__(client) @@ -350,7 +348,6 @@ def test_wait_queue_timeout(self): "Waited %.2f seconds for a socket, expected %f" % ( duration, wait_queue_timeout)) - def test_no_wait_queue_timeout(self): # Verify get_socket() with no wait_queue_timeout blocks forever. pool = self.create_pool(max_pool_size=1) @@ -372,31 +369,7 @@ def test_no_wait_queue_timeout(self): self.assertEqual(t.state, 'sock') self.assertEqual(t.sock, s1) - def test_wait_queue_multiple(self): - wait_queue_multiple = 3 - pool = self.create_pool( - max_pool_size=2, wait_queue_multiple=wait_queue_multiple) - - # Reach max_size sockets. - with pool.get_socket({}): - with pool.get_socket({}): - - # Reach max_size * wait_queue_multiple waiters. - threads = [] - for _ in range(6): - t = SocketGetter(self.c, pool) - t.start() - threads.append(t) - - time.sleep(1) - for t in threads: - self.assertEqual(t.state, 'get_socket') - - with self.assertRaises(ExceededMaxWaiters): - with pool.get_socket({}): - pass - - def test_no_wait_queue_multiple(self): + def test_checkout_more_than_max_pool_size(self): pool = self.create_pool(max_pool_size=2) socks = [] diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 6e09f6a4dc..068cf13c0d 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -488,15 +488,6 @@ def test_normalize_options(self): "fsync": True, "wtimeoutms": 10} self.assertEqual(res, parse_uri(uri)["options"]) - def test_waitQueueMultiple_deprecated(self): - uri = "mongodb://example.com/?waitQueueMultiple=5" - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter('always') - parse_uri(uri) - - self.assertEqual(len(ctx), 1) - self.assertTrue(issubclass(ctx[0].category, DeprecationWarning)) - def test_unquote_after_parsing(self): quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" unquoted_val = "val!@#$%^&*()_+,: etc" From c663fb69cc33b177c51301a34240b14316c25238 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 Aug 2021 12:50:15 -0700 Subject: [PATCH 0434/2111] PYTHON-2856 Properly assert 0 events in snapshot reads tests (#697) expectEvents must be non-empty if present. --- ...t-sessions-not-supported-client-error.json | 21 ++++++++++++++++--- .../invalid/test-expectEvents-minItems.json | 11 ++++++++++ test/unified_format.py | 5 ++++- 3 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 test/unified-test-format/invalid/test-expectEvents-minItems.json diff --git a/test/sessions/unified/snapshot-sessions-not-supported-client-error.json b/test/sessions/unified/snapshot-sessions-not-supported-client-error.json index 129aa8d74c..208e4cfe63 100644 --- a/test/sessions/unified/snapshot-sessions-not-supported-client-error.json +++ b/test/sessions/unified/snapshot-sessions-not-supported-client-error.json @@ -70,7 +70,12 @@ } } ], - "expectEvents": [] + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] }, { "description": "Client error on aggregate with snapshot", @@ -88,7 +93,12 @@ } } ], - "expectEvents": [] + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] }, { "description": "Client error on distinct with snapshot", @@ -107,7 +117,12 @@ } } ], - "expectEvents": [] + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] } ] } diff --git a/test/unified-test-format/invalid/test-expectEvents-minItems.json b/test/unified-test-format/invalid/test-expectEvents-minItems.json new file mode 100644 index 0000000000..0da3a56f79 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectEvents-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectEvents-minItems", + "schemaVersion": "1.0", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 890113afaf..bd818b7077 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1132,7 +1132,10 @@ def run_scenario(self, spec): self.run_operations(spec['operations']) # process expectEvents - self.check_events(spec.get('expectEvents', [])) + if 'expectEvents' in spec: + expect_events = spec['expectEvents'] + self.assertTrue(expect_events, 'expectEvents must be non-empty') + self.check_events(expect_events) # process outcome self.verify_outcome(spec.get('outcome', [])) From e1b068d37a406fbca640bd8e36c081292c98f523 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 Aug 2021 13:52:29 -0700 Subject: [PATCH 0435/2111] PYTHON-2525 Remove Collection.count and Cursor.count (#700) --- doc/api/pymongo/collection.rst | 1 - doc/changelog.rst | 2 + doc/migrate-to-pymongo4.rst | 40 +++++++++ pymongo/collection.py | 90 -------------------- pymongo/cursor.py | 59 ------------- test/auth_aws/test_auth_aws.py | 4 - test/ocsp/test_ocsp.py | 4 - test/test_collation.py | 12 +-- test/test_collection.py | 17 +--- test/test_command_monitoring_legacy.py | 4 +- test/test_crud_v1.py | 2 + test/test_cursor.py | 113 ++----------------------- test/test_examples.py | 2 +- test/test_gridfs.py | 9 +- test/test_gridfs_bucket.py | 9 +- test/test_read_concern.py | 10 +-- test/test_read_preferences.py | 5 -- test/test_session.py | 8 +- test/test_ssl.py | 3 +- test/utils_spec_runner.py | 2 + 20 files changed, 75 insertions(+), 321 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 420c060e7e..494a296610 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -66,4 +66,3 @@ .. automethod:: options .. automethod:: map_reduce .. automethod:: inline_map_reduce - .. automethod:: count diff --git a/doc/changelog.rst b/doc/changelog.rst index 96c0d7d373..4e52cafc29 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -51,6 +51,7 @@ Breaking Changes in 4.0 - Removed :meth:`pymongo.collection.Collection.update`. - Removed :meth:`pymongo.collection.Collection.remove`. - Removed :meth:`pymongo.collection.Collection.find_and_modify`. +- Removed :meth:`pymongo.collection.Collection.count`. - Removed :meth:`pymongo.collection.Collection.initialize_ordered_bulk_op`, :meth:`pymongo.collection.Collection.initialize_unordered_bulk_op`, and :class:`pymongo.bulk.BulkOperationBuilder`. Use @@ -64,6 +65,7 @@ Breaking Changes in 4.0 - Removed :class:`pymongo.cursor_manager.CursorManager` and :mod:`pymongo.cursor_manager`. - Removed :meth:`pymongo.mongo_client.MongoClient.set_cursor_manager`. +- Removed :meth:`pymongo.cursor.Cursor.count`. - Removed :mod:`pymongo.thread_util`. - Removed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`. - Removed :class:`~pymongo.ismaster.IsMaster`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index ef8b48b8d0..a613a15c46 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -365,6 +365,46 @@ Can be changed to this:: replaced_doc = collection.find_one_and_replace({'b': 1}, {'c': 1}) deleted_doc = collection.find_one_and_delete({'c': 1}) +Collection.count and Cursor.count is removed +............................................ + +Removed :meth:`pymongo.collection.Collection.count` and +:meth:`pymongo.cursor.Cursor.count`. Use +:meth:`~pymongo.collection.Collection.count_documents` or +:meth:`~pymongo.collection.Collection.estimated_document_count` instead. +Code like this:: + + ntotal = collection.count({}) + nmatched = collection.count({'price': {'$gte': 10}}) + # Or via the Cursor.count api: + ntotal = collection.find({}).count() + nmatched = collection.find({'price': {'$gte': 10}}).count() + +Can be changed to this:: + + ntotal = collection.estimated_document_count() + nmatched = collection.count_documents({'price': {'$gte': 10}}) + +.. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+--------------------------------------------------------------+ + | Operator | Replacement | + +=============+==============================================================+ + | $where | `$expr`_ | + +-------------+--------------------------------------------------------------+ + | $near | `$geoWithin`_ with `$center`_; i.e. | + | | ``{'$geoWithin': {'$center': [[,], ]}}`` | + +-------------+--------------------------------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_; i.e. | + | | ``{'$geoWithin': {'$centerSphere': [[,], ]}}`` | + +-------------+--------------------------------------------------------------+ + +.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ +.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ +.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center +.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere + Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed ................................................................................. diff --git a/pymongo/collection.py b/pymongo/collection.py index aba15cfebe..09a641eb78 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1450,17 +1450,6 @@ def _count_cmd(self, session, sock_info, slave_ok, cmd, collation): return 0 return int(res["n"]) - def _count(self, cmd, collation=None, session=None): - """Internal count helper.""" - # XXX: "ns missing" checks can be removed when we drop support for - # MongoDB 3.0, see SERVER-17051. - def _cmd(session, server, sock_info, slave_ok): - return self._count_cmd( - session, sock_info, slave_ok, cmd, collation) - - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) - def _aggregate_one_result( self, sock_info, slave_ok, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" @@ -1608,85 +1597,6 @@ def _cmd(session, server, sock_info, slave_ok): return self.__database.client._retryable_read( _cmd, self._read_preference_for(session), session) - def count(self, filter=None, session=None, **kwargs): - """**DEPRECATED** - Get the number of documents in this collection. - - The :meth:`count` method is deprecated and **not** supported in a - transaction. Please use :meth:`count_documents` or - :meth:`estimated_document_count` instead. - - All optional count parameters should be passed as keyword arguments - to this method. Valid options include: - - - `skip` (int): The number of matching documents to skip before - returning results. - - `limit` (int): The maximum number of documents to count. A limit - of 0 (the default) is equivalent to setting no limit. - - `maxTimeMS` (int): The maximum amount of time to allow the count - command to run, in milliseconds. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. - - `hint` (string or list of tuples): The index to use. Specify either - the index name as a string or the index specification as a list of - tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - - The :meth:`count` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - .. note:: When migrating from :meth:`count` to :meth:`count_documents` - the following query operators must be replaced: - - +-------------+-------------------------------------+ - | Operator | Replacement | - +=============+=====================================+ - | $where | `$expr`_ | - +-------------+-------------------------------------+ - | $near | `$geoWithin`_ with `$center`_ | - +-------------+-------------------------------------+ - | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | - +-------------+-------------------------------------+ - - $expr requires MongoDB 3.6+ - - :Parameters: - - `filter` (optional): A query document that selects which documents - to count in the collection. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): See list of options above. - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Support the `collation` option. - - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere - """ - warnings.warn("count is deprecated. Use estimated_document_count or " - "count_documents instead. Please note that $where must " - "be replaced by $expr, $near must be replaced by " - "$geoWithin with $center, and $nearSphere must be " - "replaced by $geoWithin with $centerSphere", - DeprecationWarning, stacklevel=2) - cmd = SON([("count", self.__name)]) - if filter is not None: - if "query" in kwargs: - raise ConfigurationError("can't pass both filter and query") - kwargs["query"] = filter - if "hint" in kwargs and not isinstance(kwargs["hint"], str): - kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd.update(kwargs) - return self._count(cmd, collation, session) - def create_indexes(self, indexes, session=None, **kwargs): """Create one or more indexes on this collection. diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 7a8c08c9e1..7370a585ad 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -799,65 +799,6 @@ def sort(self, key_or_list, direction=None): self.__ordering = helpers._index_document(keys) return self - def count(self, with_limit_and_skip=False): - """**DEPRECATED** - Get the size of the results set for this query. - - The :meth:`count` method is deprecated and **not** supported in a - transaction. Please use - :meth:`~pymongo.collection.Collection.count_documents` instead. - - Returns the number of documents in the results set for this query. Does - not take :meth:`limit` and :meth:`skip` into account by default - set - `with_limit_and_skip` to ``True`` if that is the desired behavior. - Raises :class:`~pymongo.errors.OperationFailure` on a database error. - - When used with MongoDB >= 2.6, :meth:`~count` uses any :meth:`~hint` - applied to the query. In the following example the hint is passed to - the count command: - - collection.find({'field': 'value'}).hint('field_1').count() - - The :meth:`count` method obeys the - :attr:`~pymongo.collection.Collection.read_preference` of the - :class:`~pymongo.collection.Collection` instance on which - :meth:`~pymongo.collection.Collection.find` was called. - - :Parameters: - - `with_limit_and_skip` (optional): take any :meth:`limit` or - :meth:`skip` that has been applied to this cursor into account when - getting the count - - .. note:: The `with_limit_and_skip` parameter requires server - version **>= 1.1.4-** - - .. versionchanged:: 3.7 - Deprecated. - - .. versionchanged:: 2.8 - The :meth:`~count` method now supports :meth:`~hint`. - """ - warnings.warn("count is deprecated. Use Collection.count_documents " - "instead.", DeprecationWarning, stacklevel=2) - validate_boolean("with_limit_and_skip", with_limit_and_skip) - cmd = SON([("count", self.__collection.name), - ("query", self.__spec)]) - if self.__max_time_ms is not None: - cmd["maxTimeMS"] = self.__max_time_ms - if self.__comment: - cmd["comment"] = self.__comment - - if self.__hint is not None: - cmd["hint"] = self.__hint - - if with_limit_and_skip: - if self.__limit: - cmd["limit"] = self.__limit - if self.__skip: - cmd["skip"] = self.__skip - - return self.__collection._count( - cmd, self.__collation, session=self.__session) - def distinct(self, key): """Get a list of distinct values for `key` among all documents in the result set of this query. diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index d17ebb5aac..0522201097 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -25,10 +25,6 @@ from pymongo.uri_parser import parse_uri -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - class TestAuthAWS(unittest.TestCase): @classmethod diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index 34270f872a..4b91c3b939 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -49,10 +49,6 @@ def _connect(options): client.admin.command('ismaster') -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - class TestOCSP(unittest.TestCase): def test_tls_insecure(self): diff --git a/test/test_collation.py b/test/test_collation.py index 2052fc8db3..f5c64bb022 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -26,7 +26,7 @@ UpdateMany, UpdateOne) from pymongo.write_concern import WriteConcern from test import client_context, IntegrationTest, unittest -from test.utils import EventListener, ignore_deprecations, rs_or_single_client +from test.utils import EventListener, rs_or_single_client class TestCollationObject(unittest.TestCase): @@ -150,16 +150,6 @@ def test_aggregate(self): collation=self.collation) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB - @ignore_deprecations - def test_count(self): - self.db.test.count(collation=self.collation) - self.assertCollationInLastCommand() - - self.listener.results.clear() - self.db.test.find(collation=self.collation).count() - self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_count_documents(self): self.db.test.count_documents({}, collation=self.collation) diff --git a/test/test_collection.py b/test/test_collection.py index cae4306b6e..84dca73dc9 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -58,7 +58,7 @@ from pymongo.write_concern import WriteConcern from test import client_context, unittest from test.test_client import IntegrationTest -from test.utils import (get_pool, ignore_deprecations, is_mongos, +from test.utils import (get_pool, is_mongos, rs_or_single_client, single_client, wait_until, EventListener, IMPOSSIBLE_WRITE_CONCERN) @@ -1568,21 +1568,6 @@ def test_manual_last_error(self): coll.insert_one({"x": 1}) self.db.command("getlasterror", w=1, wtimeout=1) - @ignore_deprecations - def test_count(self): - db = self.db - db.drop_collection("test") - - self.assertEqual(db.test.count(), 0) - db.test.insert_many([{}, {}]) - self.assertEqual(db.test.count(), 2) - db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}]) - self.assertEqual(db.test.find({'foo': 'bar'}).count(), 1) - self.assertEqual(db.test.count({'foo': 'bar'}), 1) - self.assertEqual(db.test.find({'foo': re.compile(r'ba.*')}).count(), 2) - self.assertEqual( - db.test.count({'foo': re.compile(r'ba.*')}), 2) - def test_count_documents(self): db = self.db db.drop_collection("test") diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index 2daa60679e..f5fa12003f 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -95,7 +95,9 @@ def run_scenario(self): for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] - if name == 'bulk_write': + if name == 'count': + self.skipTest('PyMongo does not support count') + elif name == 'bulk_write': bulk_args = [] for request in args['requests']: opname = request['name'] diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 17f77a1635..5a63e030fe 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -95,6 +95,8 @@ def run_operation(collection, test): options = arguments.pop("options", {}) for option_name in options: arguments[camel_to_snake(option_name)] = options[option_name] + if operation == 'count': + raise unittest.SkipTest('PyMongo does not support count') if operation == "bulk_write": # Parse each request into a bulk write model. requests = [] diff --git a/test/test_cursor.py b/test/test_cursor.py index f743c6dcec..d7380988ab 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -769,62 +769,6 @@ def test_sort(self): break self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING) - @ignore_deprecations - def test_count(self): - db = self.db - db.test.drop() - - self.assertEqual(0, db.test.find().count()) - - db.test.insert_many([{"x": i} for i in range(10)]) - - self.assertEqual(10, db.test.find().count()) - self.assertTrue(isinstance(db.test.find().count(), int)) - self.assertEqual(10, db.test.find().limit(5).count()) - self.assertEqual(10, db.test.find().skip(5).count()) - - self.assertEqual(1, db.test.find({"x": 1}).count()) - self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count()) - - a = db.test.find() - b = a.count() - for _ in a: - break - self.assertEqual(b, a.count()) - - self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count()) - - @ignore_deprecations - def test_count_with_hint(self): - collection = self.db.test - collection.drop() - - collection.insert_many([{'i': 1}, {'i': 2}]) - self.assertEqual(2, collection.find().count()) - - collection.create_index([('i', 1)]) - - self.assertEqual(1, collection.find({'i': 1}).hint("_id_").count()) - self.assertEqual(2, collection.find().hint("_id_").count()) - - self.assertRaises(OperationFailure, - collection.find({'i': 1}).hint("BAD HINT").count) - - # Create a sparse index which should have no entries. - collection.create_index([('x', 1)], sparse=True) - - self.assertEqual(0, collection.find({'i': 1}).hint("x_1").count()) - self.assertEqual( - 0, collection.find({'i': 1}).hint([("x", 1)]).count()) - - if client_context.version.at_least(3, 3, 2): - self.assertEqual(0, collection.find().hint("x_1").count()) - self.assertEqual(0, collection.find().hint([("x", 1)]).count()) - else: - self.assertEqual(2, collection.find().hint("x_1").count()) - self.assertEqual(2, collection.find().hint([("x", 1)]).count()) - - @ignore_deprecations def test_where(self): db = self.db db.test.drop() @@ -854,9 +798,6 @@ def test_where(self): 3, len(list(db.test.find().where(code_with_scope)))) self.assertEqual(10, len(list(db.test.find()))) - - self.assertEqual(3, db.test.find().where('this.x < 3').count()) - self.assertEqual(10, db.test.find().count()) self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where('this.x < 3')]) @@ -1013,12 +954,6 @@ def test_clone_empty(self): self.assertRaises(StopIteration, cursor.next) self.assertRaises(StopIteration, cursor2.next) - @ignore_deprecations - def test_count_with_fields(self): - self.db.test.drop() - self.db.test.insert_one({"x": 1}) - self.assertEqual(1, self.db.test.find({}, ["a"]).count()) - def test_bad_getitem(self): self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5) @@ -1105,30 +1040,6 @@ def test_getitem_numeric_index(self): self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) - @ignore_deprecations - def test_count_with_limit_and_skip(self): - self.assertRaises(TypeError, self.db.test.find().count, "foo") - - def check_len(cursor, length): - self.assertEqual(len(list(cursor)), cursor.count(True)) - self.assertEqual(length, cursor.count(True)) - - self.db.drop_collection("test") - self.db.test.insert_many([{"i": i} for i in range(100)]) - - check_len(self.db.test.find(), 100) - - check_len(self.db.test.find().limit(10), 10) - check_len(self.db.test.find().limit(110), 100) - - check_len(self.db.test.find().skip(10), 90) - check_len(self.db.test.find().skip(110), 0) - - check_len(self.db.test.find().limit(10).skip(10), 10) - check_len(self.db.test.find()[10:20], 10) - check_len(self.db.test.find().limit(10).skip(95), 5) - check_len(self.db.test.find()[95:105], 5) - def test_len(self): self.assertRaises(TypeError, len, self.db.test.find()) @@ -1274,7 +1185,6 @@ def test_with_statement(self): self.assertTrue(c1.alive) @client_context.require_no_mongos - @ignore_deprecations def test_comment(self): # MongoDB 3.1.5 changed the ns for commands. regex = {'$regex': r'pymongo_test.(\$cmd|test)'} @@ -1290,24 +1200,15 @@ def test_comment(self): self.db.command('profile', 2) # Profile ALL commands. try: list(self.db.test.find().comment('foo')) - op = self.db.system.profile.find({'ns': 'pymongo_test.test', - 'op': 'query', - query_key: 'foo'}) - self.assertEqual(op.count(), 1) - - self.db.test.find().comment('foo').count() - op = self.db.system.profile.find({'ns': regex, - 'op': 'command', - 'command.count': 'test', - 'command.comment': 'foo'}) - self.assertEqual(op.count(), 1) + count = self.db.system.profile.count_documents( + {'ns': 'pymongo_test.test', 'op': 'query', query_key: 'foo'}) + self.assertEqual(count, 1) self.db.test.find().comment('foo').distinct('type') - op = self.db.system.profile.find({'ns': regex, - 'op': 'command', - 'command.distinct': 'test', - 'command.comment': 'foo'}) - self.assertEqual(op.count(), 1) + count = self.db.system.profile.count_documents( + {'ns': regex, 'op': 'command', 'command.distinct': 'test', + 'command.comment': 'foo'}) + self.assertEqual(count, 1) finally: self.db.command('profile', 0) # Turn off profiling. self.db.system.profile.drop() diff --git a/test/test_examples.py b/test/test_examples.py index ec9021ec26..f96ca8b53c 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -63,7 +63,7 @@ def test_first_three_examples(self): cursor = db.inventory.find({"item": "canvas"}) # End Example 2 - self.assertEqual(cursor.count(), 1) + self.assertEqual(len(list(cursor)), 1) # Start Example 3 db.inventory.insert_many([ diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 2679e7b823..e6b84f2977 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -38,8 +38,7 @@ from test import (client_context, unittest, IntegrationTest) -from test.utils import (ignore_deprecations, - joinall, +from test.utils import (joinall, one, rs_client, rs_or_single_client, @@ -422,7 +421,6 @@ def test_gridfs_lazy_connect(self): f = fs.new_file() self.assertRaises(ServerSelectionTimeoutError, f.close) - @ignore_deprecations def test_gridfs_find(self): self.fs.put(b"test2", filename="two") time.sleep(0.01) @@ -431,8 +429,9 @@ def test_gridfs_find(self): self.fs.put(b"test1", filename="one") time.sleep(0.01) self.fs.put(b"test2++", filename="two") - self.assertEqual(3, self.fs.find({"filename": "two"}).count()) - self.assertEqual(4, self.fs.find().count()) + files = self.db.fs.files + self.assertEqual(3, files.count_documents({"filename": "two"})) + self.assertEqual(4, files.count_documents({})) cursor = self.fs.find( no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) gout = next(cursor) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index f31949f03c..8b4c2cf346 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -37,8 +37,7 @@ from test import (client_context, unittest, IntegrationTest) -from test.utils import (ignore_deprecations, - joinall, +from test.utils import (joinall, one, rs_client, rs_or_single_client, @@ -350,7 +349,6 @@ def test_gridfs_lazy_connect(self): ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b"") # Still no connection. - @ignore_deprecations def test_gridfs_find(self): self.fs.upload_from_stream("two", b"test2") time.sleep(0.01) @@ -359,8 +357,9 @@ def test_gridfs_find(self): self.fs.upload_from_stream("one", b"test1") time.sleep(0.01) self.fs.upload_from_stream("two", b"test2++") - self.assertEqual(3, self.fs.find({"filename": "two"}).count()) - self.assertEqual(4, self.fs.find({}).count()) + files = self.db.fs.files + self.assertEqual(3, files.count_documents({"filename": "two"})) + self.assertEqual(4, files.count_documents({})) cursor = self.fs.find( {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2) diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 0dc9b609bc..d330a741bb 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -68,11 +68,11 @@ def test_read_concern_uri(self): def test_invalid_read_concern(self): coll = self.db.get_collection( 'coll', read_concern=ReadConcern('majority')) - self.assertRaisesRegexp( - ConfigurationError, - 'read concern level of majority is not valid ' - 'with a max wire version of [0-3]', - coll.count) + with self.assertRaisesRegex( + ConfigurationError, + 'read concern level of majority is not valid ' + 'with a max wire version of [0-3]'): + coll.find_one() @client_context.require_version_min(3, 1, 9, -1) def test_find_command(self): diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 7a913d23bb..c40bfea236 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -40,7 +40,6 @@ IntegrationTest, unittest) from test.utils import (connected, - ignore_deprecations, one, OvertCommandListener, rs_client, @@ -431,10 +430,6 @@ def test_inline_map_reduce(self): 'inline_map_reduce', 'function() { }', 'function() { }') - @ignore_deprecations - def test_count(self): - self._test_coll_helper(True, self.c.pymongo_test.test, 'count') - def test_count_documents(self): self._test_coll_helper( True, self.c.pymongo_test.test, 'count_documents', {}) diff --git a/test/test_session.py b/test/test_session.py index 45fcae636b..6103ecfb65 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -32,8 +32,7 @@ OperationFailure) from pymongo.read_concern import ReadConcern from test import IntegrationTest, client_context, unittest, SkipTest -from test.utils import (ignore_deprecations, - rs_or_single_client, +from test.utils import (rs_or_single_client, EventListener, TestCreator, wait_until) @@ -290,7 +289,6 @@ def test_collection(self): ops.extend([ (coll.distinct, ['a'], {}), (coll.find_one, [], {}), - (coll.count, [], {}), (coll.count_documents, [{}], {}), (coll.inline_map_reduce, ['function() {}', 'function() {}'], {}), (coll.list_indexes, [], {}), @@ -337,7 +335,6 @@ def test_cursor(self): ops = [ ('find', lambda session: list(coll.find(session=session))), ('getitem', lambda session: coll.find(session=session)[0]), - ('count', lambda session: coll.find(session=session).count()), ('distinct', lambda session: coll.find(session=session).distinct('a')), ('explain', lambda session: coll.find(session=session).explain()), @@ -811,8 +808,6 @@ def test_reads(self): lambda coll, session: list(coll.find({}, session=session))) self._test_reads( lambda coll, session: coll.find_one({}, session=session)) - self._test_reads( - lambda coll, session: coll.count(session=session)) self._test_reads( lambda coll, session: coll.count_documents({}, session=session)) self._test_reads( @@ -1062,7 +1057,6 @@ def setUp(self): if '$clusterTime' not in client_context.ismaster: raise SkipTest('$clusterTime not supported') - @ignore_deprecations def test_cluster_time(self): listener = SessionTestListener() # Prevent heartbeats from updating $clusterTime between operations. diff --git a/test/test_ssl.py b/test/test_ssl.py index 65ed16968c..e226a3cbbb 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -559,7 +559,8 @@ def test_mongodb_x509_auth(self): ssl_cert_reqs=ssl.CERT_NONE, ssl_certfile=CLIENT_PEM) - self.assertRaises(OperationFailure, noauth.pymongo_test.test.count) + with self.assertRaises(OperationFailure): + noauth.pymongo_test.test.find_one() listener = EventListener() auth = MongoClient( diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 80173f7c3a..c16e17c5e9 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -256,6 +256,8 @@ def run_operation(self, sessions, collection, operation): name = 'open_download_stream_by_name' elif name == 'download': name = 'open_download_stream' + elif name == 'count': + self.skipTest('PyMongo does not support count') database = collection.database collection = database.get_collection(collection.name) From 10002fad1cf046eb3af994b3153ed7d0de109da2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 Aug 2021 14:00:41 -0700 Subject: [PATCH 0436/2111] PYTHON-2857 Remove the socketKeepAlive option (#702) --- doc/changelog.rst | 2 ++ doc/migrate-to-pymongo4.rst | 8 ++++++++ pymongo/client_options.py | 3 +-- pymongo/common.py | 1 - pymongo/mongo_client.py | 16 +++------------- pymongo/pool.py | 18 ++++-------------- test/test_client.py | 19 +++++-------------- 7 files changed, 23 insertions(+), 44 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4e52cafc29..fcbd8bd42c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -18,6 +18,8 @@ Breaking Changes in 4.0 - Removed the ``waitQueueMultiple`` keyword argument to :class:`~pymongo.mongo_client.MongoClient` and removed :exc:`pymongo.errors.ExceededMaxWaiters`. +- Removed the ``socketKeepAlive`` keyword argument to + :class:`~pymongo.mongo_client.MongoClient`. - Removed :meth:`pymongo.mongo_client.MongoClient.fsync`, :meth:`pymongo.mongo_client.MongoClient.unlock`, and :attr:`pymongo.mongo_client.MongoClient.is_locked`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index a613a15c46..0a452884f2 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -71,6 +71,14 @@ Removed the ``waitQueueMultiple`` keyword argument to ``waitQueueMultiple`` to bound queuing, limit the size of the thread pool in your application. +The socketKeepAlive parameter is removed +.......................................... + +Removed the ``socketKeepAlive`` keyword argument to +:class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP +keepalive. For more information see: +https://docs.mongodb.com/manual/faq/diagnostics/#does-tcp-keepalive-time-affect-mongodb-deployments + MongoClient.fsync is removed ............................ diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 460dd66427..ea223145b2 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -117,7 +117,6 @@ def _parse_pool_options(options): if max_pool_size is not None and min_pool_size > max_pool_size: raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT) - socket_keepalive = options.get('socketkeepalive', True) socket_timeout = options.get('sockettimeoutms') wait_queue_timeout = options.get( 'waitqueuetimeoutms', common.WAIT_QUEUE_TIMEOUT) @@ -135,7 +134,7 @@ def _parse_pool_options(options): max_idle_time_seconds, connect_timeout, socket_timeout, wait_queue_timeout, - ssl_context, ssl_match_hostname, socket_keepalive, + ssl_context, ssl_match_hostname, _EventListeners(event_listeners), appname, driver, diff --git a/pymongo/common.py b/pymongo/common.py index 98470b7554..8b06527562 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -650,7 +650,6 @@ def validate_tzinfo(dummy, value): 'server_api': validate_server_api_or_none, 'fsync': validate_boolean_or_string, 'minpoolsize': validate_non_negative_integer, - 'socketkeepalive': validate_boolean_or_string, 'tlscrlfile': validate_readable, 'tz_aware': validate_boolean_or_string, 'unicode_decode_error_handler': validate_unicode_decode_error_handler, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index c5a88c4fd3..0c1361e344 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -309,10 +309,6 @@ def __init__( trigger a retry, see the `retryable reads specification `_. - - `socketKeepAlive`: (boolean) **DEPRECATED** Whether to send - periodic keep-alive packets on connected sockets. Defaults to - ``True``. Disabling it is not recommended, see - https://docs.mongodb.com/manual/faq/diagnostics/#does-tcp-keepalive-time-affect-mongodb-deployments", - `compressors`: Comma separated list of compressors for wire protocol compression. The list is used to negotiate a compressor with the server. Currently supported options are "snappy", "zlib" @@ -502,8 +498,9 @@ def __init__( .. mongodoc:: connections - .. versionchanged:: 3.12 - Removed the ``waitQueueMultiple`` keyword argument. + .. versionchanged:: 4.0 + Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` keyword + arguments. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. @@ -700,13 +697,6 @@ def __init__( # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - if 'socketkeepalive' in opts: - warnings.warn( - "The socketKeepAlive option is deprecated. It now" - "defaults to true and disabling it is not recommended, see " - "https://docs.mongodb.com/manual/faq/diagnostics/" - "#does-tcp-keepalive-time-affect-mongodb-deployments", - DeprecationWarning, stacklevel=2) self.__options = options = ClientOptions( username, password, dbase, opts) diff --git a/pymongo/pool.py b/pymongo/pool.py index aa615b6c94..5434676b1e 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -261,7 +261,7 @@ class PoolOptions(object): '__max_idle_time_seconds', '__connect_timeout', '__socket_timeout', '__wait_queue_timeout', - '__ssl_context', '__ssl_match_hostname', '__socket_keepalive', + '__ssl_context', '__ssl_match_hostname', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', '__pause_enabled', '__server_api', '__load_balanced') @@ -271,7 +271,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, max_idle_time_seconds=MAX_IDLE_TIME_SEC, connect_timeout=None, socket_timeout=None, wait_queue_timeout=WAIT_QUEUE_TIMEOUT, ssl_context=None, - ssl_match_hostname=True, socket_keepalive=True, + ssl_match_hostname=True, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, pause_enabled=True, server_api=None, load_balanced=None): @@ -283,7 +283,6 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__wait_queue_timeout = wait_queue_timeout self.__ssl_context = ssl_context self.__ssl_match_hostname = ssl_match_hostname - self.__socket_keepalive = socket_keepalive self.__event_listeners = event_listeners self.__appname = appname self.__driver = driver @@ -406,13 +405,6 @@ def ssl_match_hostname(self): """ return self.__ssl_match_hostname - @property - def socket_keepalive(self): - """Whether to send periodic messages to determine if a connection - is closed. - """ - return self.__socket_keepalive - @property def event_listeners(self): """An instance of pymongo.monitoring._EventListeners. @@ -1000,10 +992,8 @@ def _create_connection(address, options): try: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock.settimeout(options.connect_timeout) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, - options.socket_keepalive) - if options.socket_keepalive: - _set_keepalive_times(sock) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) sock.connect(sa) return sock except socket.error as e: diff --git a/test/test_client.py b/test/test_client.py index 2cd64dc48f..f4f67b0098 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -127,7 +127,6 @@ def test_keyword_arg_defaults(self): # socket.Socket.settimeout takes a float in seconds self.assertEqual(20.0, pool_opts.connect_timeout) self.assertEqual(None, pool_opts.wait_queue_timeout) - self.assertTrue(pool_opts.socket_keepalive) self.assertEqual(None, pool_opts.ssl_context) self.assertEqual(None, options.replica_set_name) self.assertEqual(ReadPreference.PRIMARY, client.read_preference) @@ -1078,19 +1077,11 @@ def test_waitQueueTimeoutMS(self): self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2) def test_socketKeepAlive(self): - for socketKeepAlive in [True, False]: - with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter("always") - client = rs_or_single_client(socketKeepAlive=socketKeepAlive) - self.assertTrue(any("The socketKeepAlive option is deprecated" - in str(k) for k in ctx)) - pool = get_pool(client) - self.assertEqual(socketKeepAlive, - pool.opts.socket_keepalive) - with pool.get_socket({}) as sock_info: - keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE) - self.assertEqual(socketKeepAlive, bool(keepalive)) + pool = get_pool(self.client) + with pool.get_socket({}) as sock_info: + keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE) + self.assertTrue(keepalive) def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware='foo') From 71a1656be062d6ae0d38e6aafcefe920d9bbdcb6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 12 Aug 2021 09:29:30 -0700 Subject: [PATCH 0437/2111] PYTHON-2038 Remove pymongo.errors.CertificateError (#705) --- doc/changelog.rst | 1 + doc/migrate-to-pymongo4.rst | 6 ++++++ pymongo/errors.py | 7 ++++--- pymongo/pool.py | 12 ++++++------ pymongo/pyopenssl_context.py | 2 +- 5 files changed, 18 insertions(+), 10 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index fcbd8bd42c..ff3c421e62 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -93,6 +93,7 @@ Breaking Changes in 4.0 :meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. - Removed :exc:`pymongo.errors.NotMasterError`. Use :exc:`pymongo.errors.NotPrimaryError` instead. +- Removed :exc:`pymongo.errors.CertificateError`. - Removed :attr:`pymongo.GEOHAYSTACK`. - Removed :class:`bson.binary.UUIDLegacy`. - The "tls" install extra is no longer necessary or supported and will be diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 0a452884f2..2f8f3395e6 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -582,6 +582,12 @@ NotMasterError is removed Removed :exc:`~pymongo.errors.NotMasterError`. Use :exc:`~pymongo.errors.NotPrimaryError` instead. +CertificateError is removed +--------------------------- + +Removed :exc:`~pymongo.errors.CertificateError`. Since PyMongo 3.0 this error +is handled internally and is never raised to the application. + pymongo.GEOHAYSTACK is removed ------------------------------ diff --git a/pymongo/errors.py b/pymongo/errors.py index 1b15fc1f54..0ee35827a7 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -18,14 +18,15 @@ try: # CPython 3.7+ - from ssl import SSLCertVerificationError as CertificateError + from ssl import SSLCertVerificationError as _CertificateError except ImportError: try: - from ssl import CertificateError + from ssl import CertificateError as _CertificateError except ImportError: - class CertificateError(ValueError): + class _CertificateError(ValueError): pass + class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" def __init__(self, message='', error_labels=None): diff --git a/pymongo/pool.py b/pymongo/pool.py index 5434676b1e..20812a0cad 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -40,7 +40,7 @@ ORDERED_TYPES, WAIT_QUEUE_TIMEOUT) from pymongo.errors import (AutoReconnect, - CertificateError, + _CertificateError, ConnectionFailure, ConfigurationError, InvalidOperation, @@ -1013,7 +1013,7 @@ def _create_connection(address, options): def _configured_socket(address, options): """Given (host, port) and PoolOptions, return a configured socket. - Can raise socket.error, ConnectionFailure, or CertificateError. + Can raise socket.error, ConnectionFailure, or _CertificateError. Sets socket's SSL and timeout options. """ @@ -1034,9 +1034,9 @@ def _configured_socket(address, options): sock = ssl_context.wrap_socket(sock, server_hostname=host) else: sock = ssl_context.wrap_socket(sock) - except CertificateError: + except _CertificateError: sock.close() - # Raise CertificateError directly like we do after match_hostname + # Raise _CertificateError directly like we do after match_hostname # below. raise except (IOError, OSError, _SSLError) as exc: @@ -1050,7 +1050,7 @@ def _configured_socket(address, options): options.ssl_match_hostname): try: ssl.match_hostname(sock.getpeercert(), hostname=host) - except CertificateError: + except _CertificateError: sock.close() raise @@ -1310,7 +1310,7 @@ def remove_stale_sockets(self, reference_generation, all_credentials): def connect(self, all_credentials=None): """Connect to Mongo and return a new SocketInfo. - Can raise ConnectionFailure or CertificateError. + Can raise ConnectionFailure. Note that the pool does not keep a reference to the socket -- you must call return_socket() when you're done with it. diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 2118106e63..f7c53a59e5 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -35,7 +35,7 @@ VerificationError as _SIVerificationError) from pymongo.errors import ( - CertificateError as _CertificateError, + _CertificateError, ConfigurationError as _ConfigurationError) from pymongo.ocsp_support import ( _load_trusted_ca_certs, From 65aa7c86d52800c0ff529c213d836d9c42282b7b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 12 Aug 2021 09:44:52 -0700 Subject: [PATCH 0438/2111] PYTHON-2850 Remove map_reduce/inline_map_reduce (#703) --- doc/api/pymongo/collection.rst | 2 - doc/changelog.rst | 2 + doc/examples/aggregation.rst | 96 --------------------- doc/migrate-to-pymongo4.rst | 14 +++ pymongo/collection.py | 150 --------------------------------- pymongo/mongo_client.py | 8 +- test/test_collation.py | 6 -- test/test_collection.py | 97 --------------------- test/test_common.py | 42 --------- test/test_custom_types.py | 34 -------- test/test_read_concern.py | 28 ------ test/test_read_preferences.py | 10 --- test/test_session.py | 27 ------ test/test_transactions.py | 2 - test/utils_spec_runner.py | 5 +- 15 files changed, 21 insertions(+), 502 deletions(-) diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 494a296610..3e8188b281 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -64,5 +64,3 @@ .. automethod:: drop .. automethod:: rename .. automethod:: options - .. automethod:: map_reduce - .. automethod:: inline_map_reduce diff --git a/doc/changelog.rst b/doc/changelog.rst index ff3c421e62..3bfb2f6f28 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -59,6 +59,8 @@ Breaking Changes in 4.0 :class:`pymongo.bulk.BulkOperationBuilder`. Use :meth:`pymongo.collection.Collection.bulk_write` instead. - Removed :meth:`pymongo.collection.Collection.group`. +- Removed :meth:`pymongo.collection.Collection.map_reduce` and + :meth:`pymongo.collection.Collection.inline_map_reduce`. - Removed the ``useCursor`` option for :meth:`~pymongo.collection.Collection.aggregate`. - Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor`. Use diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index a2a7214b36..738b09485a 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -75,99 +75,3 @@ sub-fields into the top-level of results. .. seealso:: The full documentation for MongoDB's `aggregation framework `_ - -Map/Reduce ----------- - -Another option for aggregation is to use the map reduce framework. Here we -will define **map** and **reduce** functions to also count the number of -occurrences for each tag in the ``tags`` array, across the entire collection. - -Our **map** function just emits a single `(key, 1)` pair for each tag in -the array: - -.. doctest:: - - >>> from bson.code import Code - >>> mapper = Code(""" - ... function () { - ... this.tags.forEach(function(z) { - ... emit(z, 1); - ... }); - ... } - ... """) - -The **reduce** function sums over all of the emitted values for a given key: - -.. doctest:: - - >>> reducer = Code(""" - ... function (key, values) { - ... var total = 0; - ... for (var i = 0; i < values.length; i++) { - ... total += values[i]; - ... } - ... return total; - ... } - ... """) - -.. note:: We can't just return ``values.length`` as the **reduce** function - might be called iteratively on the results of other reduce steps. - -Finally, we call :meth:`~pymongo.collection.Collection.map_reduce` and -iterate over the result collection: - -.. doctest:: - - >>> result = db.things.map_reduce(mapper, reducer, "myresults") - >>> for doc in result.find().sort("_id"): - ... pprint.pprint(doc) - ... - {'_id': 'cat', 'value': 3.0} - {'_id': 'dog', 'value': 2.0} - {'_id': 'mouse', 'value': 1.0} - -Advanced Map/Reduce -------------------- - -PyMongo's API supports all of the features of MongoDB's map/reduce engine. -One interesting feature is the ability to get more detailed results when -desired, by passing `full_response=True` to -:meth:`~pymongo.collection.Collection.map_reduce`. This returns the full -response to the map/reduce command, rather than just the result collection: - -.. doctest:: - - >>> pprint.pprint( - ... db.things.map_reduce(mapper, reducer, "myresults", full_response=True)) - {...'ok': 1.0,... 'result': 'myresults'...} - -All of the optional map/reduce parameters are also supported, simply pass them -as keyword arguments. In this example we use the `query` parameter to limit the -documents that will be mapped over: - -.. doctest:: - - >>> results = db.things.map_reduce( - ... mapper, reducer, "myresults", query={"x": {"$lt": 2}}) - >>> for doc in results.find().sort("_id"): - ... pprint.pprint(doc) - ... - {'_id': 'cat', 'value': 1.0} - {'_id': 'dog', 'value': 1.0} - -You can use :class:`~bson.son.SON` or :class:`collections.OrderedDict` to -specify a different database to store the result collection: - -.. doctest:: - - >>> from bson.son import SON - >>> pprint.pprint( - ... db.things.map_reduce( - ... mapper, - ... reducer, - ... out=SON([("replace", "results"), ("db", "outdb")]), - ... full_response=True)) - {...'ok': 1.0,... 'result': {'collection': 'results', 'db': 'outdb'}...} - -.. seealso:: The full list of options for MongoDB's `map reduce engine `_ diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 2f8f3395e6..ffc554a6f4 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -468,6 +468,20 @@ instead. .. _group command: https://docs.mongodb.com/manual/reference/command/group/ +Collection.map_reduce and Collection.inline_map_reduce are removed +.................................................................. + +Removed :meth:`pymongo.collection.Collection.map_reduce` and +:meth:`pymongo.collection.Collection.inline_map_reduce`. +Migrate to :meth:`~pymongo.collection.Collection.aggregate` or run the +`mapReduce command`_ directly with :meth:`~pymongo.database.Database.command` +instead. For more guidance on this migration see: + +- https://docs.mongodb.com/manual/reference/map-reduce-to-aggregation-pipeline/ +- https://docs.mongodb.com/manual/reference/aggregation-commands-comparison/ + +.. _mapReduce command: https://docs.mongodb.com/manual/reference/command/mapReduce/ + Collection.ensure_index is removed .................................. diff --git a/pymongo/collection.py b/pymongo/collection.py index 09a641eb78..33959ebb9d 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2340,156 +2340,6 @@ def _cmd(session, server, sock_info, slave_ok): return self.__database.client._retryable_read( _cmd, self._read_preference_for(session), session) - def _map_reduce(self, map, reduce, out, session, read_pref, **kwargs): - """Internal mapReduce helper.""" - cmd = SON([("mapReduce", self.__name), - ("map", map), - ("reduce", reduce), - ("out", out)]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd.update(kwargs) - - inline = 'inline' in out - - if inline: - user_fields = {'results': 1} - else: - user_fields = None - - read_pref = ((session and session._txn_read_preference()) - or read_pref) - - with self.__database.client._socket_for_reads(read_pref, session) as ( - sock_info, slave_ok): - if (sock_info.max_wire_version >= 4 and - ('readConcern' not in cmd) and - inline): - read_concern = self.read_concern - else: - read_concern = None - if 'writeConcern' not in cmd and not inline: - write_concern = self._write_concern_for(session) - else: - write_concern = None - - return self._command( - sock_info, cmd, slave_ok, read_pref, - read_concern=read_concern, - write_concern=write_concern, - collation=collation, session=session, - user_fields=user_fields) - - def map_reduce(self, map, reduce, out, full_response=False, session=None, - **kwargs): - """Perform a map/reduce operation on this collection. - - If `full_response` is ``False`` (default) returns a - :class:`~pymongo.collection.Collection` instance containing - the results of the operation. Otherwise, returns the full - response from the server to the `map reduce command`_. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `out`: output collection name or `out object` (dict). See - the `map reduce command`_ documentation for available options. - Note: `out` options are order sensitive. :class:`~bson.son.SON` - can be used to specify multiple options. - e.g. SON([('replace', ), ('db', )]) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.map_reduce(map, reduce, "myresults", limit=2) - - .. note:: The :meth:`map_reduce` method does **not** obey the - :attr:`read_preference` of this :class:`Collection`. To run - mapReduce on a secondary use the :meth:`inline_map_reduce` method - instead. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation (if the - output is not inline) when using MongoDB >= 3.4. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - .. seealso:: :doc:`/examples/aggregation` - - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 2.2 - Removed deprecated arguments: merge_output and reduce_output - - .. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/ - - .. mongodoc:: mapreduce - - """ - if not isinstance(out, (str, abc.Mapping)): - raise TypeError("'out' must be an instance of str or a mapping") - - response = self._map_reduce(map, reduce, out, session, - ReadPreference.PRIMARY, **kwargs) - - if full_response or not response.get('result'): - return response - elif isinstance(response['result'], dict): - dbase = response['result']['db'] - coll = response['result']['collection'] - return self.__database.client[dbase][coll] - else: - return self.__database[response["result"]] - - def inline_map_reduce(self, map, reduce, full_response=False, session=None, - **kwargs): - """Perform an inline map/reduce operation on this collection. - - Perform the map/reduce operation on the server in RAM. A result - collection is not created. The result set is returned as a list - of documents. - - If `full_response` is ``False`` (default) returns the - result documents in a list. Otherwise, returns the full - response from the server to the `map reduce command`_. - - The :meth:`inline_map_reduce` method obeys the :attr:`read_preference` - of this :class:`Collection`. - - :Parameters: - - `map`: map function (as a JavaScript string) - - `reduce`: reduce function (as a JavaScript string) - - `full_response` (optional): if ``True``, return full response to - this command - otherwise just return the result collection - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): additional arguments to the - `map reduce command`_ may be passed as keyword arguments to this - helper method, e.g.:: - - >>> db.test.inline_map_reduce(map, reduce, limit=2) - - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - """ - res = self._map_reduce(map, reduce, {"inline": 1}, session, - self.read_preference, **kwargs) - - if full_response: - return res - else: - return res.get("results") - def _write_concern_for_cmd(self, cmd, session): raw_wc = cmd.get('writeConcern') if raw_wc is not None: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 0c1361e344..4608610058 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -297,11 +297,9 @@ def __init__( :meth:`pymongo.mongo_client.MongoClient.watch`, and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. - Unsupported read operations include, but are not limited to: - :meth:`~pymongo.collection.Collection.map_reduce`, - :meth:`~pymongo.collection.Collection.inline_map_reduce`, - :meth:`~pymongo.database.Database.command`, - and any getMore operation on a cursor. + Unsupported read operations include, but are not limited to + :meth:`~pymongo.database.Database.command` and any getMore + operation on a cursor. Enabling retryable reads makes applications more resilient to transient errors such as network failures, database upgrades, and diff --git a/test/test_collation.py b/test/test_collation.py index f5c64bb022..d352fccc17 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -180,12 +180,6 @@ def test_explain_command(self): self.collation.document, self.last_command_started()['explain']['collation']) - @raisesConfigurationErrorForOldMongoDB - def test_map_reduce(self): - self.db.test.map_reduce('function() {}', 'function() {}', 'output', - collation=self.collation) - self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_delete(self): self.db.test.delete_one({'foo': 42}, collation=self.collation) diff --git a/test/test_collection.py b/test/test_collection.py index 84dca73dc9..b4bde0c68b 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -28,7 +28,6 @@ from bson import encode from bson.raw_bson import RawBSONDocument from bson.regex import Regex -from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId from bson.son import SON @@ -2015,102 +2014,6 @@ def test_insert_many_large_batch(self): db.collection_4.drop() - def test_map_reduce(self): - db = self.db - db.drop_collection("test") - - db.test.insert_one({"id": 1, "tags": ["dog", "cat"]}) - db.test.insert_one({"id": 2, "tags": ["cat"]}) - db.test.insert_one({"id": 3, "tags": ["mouse", "cat", "dog"]}) - db.test.insert_one({"id": 4, "tags": []}) - - map = Code("function () {" - " this.tags.forEach(function(z) {" - " emit(z, 1);" - " });" - "}") - reduce = Code("function (key, values) {" - " var total = 0;" - " for (var i = 0; i < values.length; i++) {" - " total += values[i];" - " }" - " return total;" - "}") - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - - db.test.insert_one({"id": 5, "tags": ["hampster"]}) - result = db.test.map_reduce(map, reduce, out='mrunittests') - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - db.test.delete_one({"id": 5}) - - result = db.test.map_reduce(map, reduce, - out={'merge': 'mrunittests'}) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce(map, reduce, - out={'reduce': 'mrunittests'}) - - self.assertEqual(6, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(4, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(2, result.find_one({"_id": "mouse"})["value"]) - self.assertEqual(1, result.find_one({"_id": "hampster"})["value"]) - - result = db.test.map_reduce( - map, - reduce, - out={'replace': 'mrunittests'} - ) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - - # Create the output database. - db.client.mrtestdb.mrunittests.insert_one({}) - result = db.test.map_reduce(map, reduce, - out=SON([('replace', 'mrunittests'), - ('db', 'mrtestdb') - ])) - self.assertEqual(3, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(2, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(1, result.find_one({"_id": "mouse"})["value"]) - self.client.drop_database('mrtestdb') - - full_result = db.test.map_reduce(map, reduce, - out='mrunittests', full_response=True) - self.assertEqual('mrunittests', full_result["result"]) - if client_context.version < (4, 3): - self.assertEqual(6, full_result["counts"]["emit"]) - - result = db.test.map_reduce(map, reduce, out='mrunittests', limit=2) - self.assertEqual(2, result.find_one({"_id": "cat"})["value"]) - self.assertEqual(1, result.find_one({"_id": "dog"})["value"]) - self.assertEqual(None, result.find_one({"_id": "mouse"})) - - result = db.test.map_reduce(map, reduce, out={'inline': 1}) - self.assertTrue(isinstance(result, dict)) - self.assertTrue('results' in result) - self.assertTrue(result['results'][1]["_id"] in ("cat", - "dog", - "mouse")) - - result = db.test.inline_map_reduce(map, reduce) - self.assertTrue(isinstance(result, list)) - self.assertEqual(3, len(result)) - self.assertTrue(result[1]["_id"] in ("cat", "dog", "mouse")) - - full_result = db.test.inline_map_reduce(map, reduce, - full_response=True) - self.assertEqual(3, len(full_result["results"])) - if client_context.version < (4, 3): - self.assertEqual(6, full_result["counts"]["emit"]) - - with self.write_concern_collection() as coll: - coll.map_reduce(map, reduce, 'output') - def test_messages_with_unicode_collection_names(self): db = self.db diff --git a/test/test_common.py b/test/test_common.py index 896df50e56..c6ab4182d4 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -20,7 +20,6 @@ sys.path[0:0] = [""] from bson.binary import Binary, PYTHON_LEGACY, STANDARD -from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.errors import OperationFailure @@ -107,47 +106,6 @@ def test_uuid_representation(self): update={'$set': {'i': 7}}, query={'_id': Binary.from_uuid(uu, PYTHON_LEGACY)})['value']['i']) - # Test (inline)_map_reduce - coll.drop() - coll.insert_one({"_id": uu, "x": 1, "tags": ["dog", "cat"]}) - coll.insert_one({"_id": uuid.uuid4(), "x": 3, - "tags": ["mouse", "cat", "dog"]}) - - map = Code("function () {" - " this.tags.forEach(function(z) {" - " emit(z, 1);" - " });" - "}") - - reduce = Code("function (key, values) {" - " var total = 0;" - " for (var i = 0; i < values.length; i++) {" - " total += values[i];" - " }" - " return total;" - "}") - - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - q = {"_id": uu} - result = coll.inline_map_reduce(map, reduce, query=q) - self.assertEqual([], result) - - result = coll.map_reduce(map, reduce, "results", query=q) - self.assertEqual(0, self.db.results.count_documents({})) - - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - q = {"_id": uu} - result = coll.inline_map_reduce(map, reduce, query=q) - self.assertEqual(2, len(result)) - - result = coll.map_reduce(map, reduce, "results", query=q) - self.assertEqual(2, self.db.results.count_documents({})) - - self.db.drop_collection("result") - coll.drop() - def test_write_concern(self): c = rs_or_single_client(connect=False) self.assertEqual(WriteConcern(), c.write_concern) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 89f530c951..619e4c10df 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -34,7 +34,6 @@ _BUILT_IN_TYPES, _dict_to_bson, _bson_to_dict) -from bson.code import Code from bson.codec_options import (CodecOptions, TypeCodec, TypeDecoder, TypeEncoder, TypeRegistry) from bson.errors import InvalidDocument @@ -622,39 +621,6 @@ def test_distinct_w_custom_type(self): self.assertEqual(values, test.distinct("a")) - def test_map_reduce_w_custom_type(self): - test = self.db.get_collection( - 'test', codec_options=UPPERSTR_DECODER_CODECOPTS) - - test.insert_many([ - {'_id': 1, 'sku': 'abcd', 'qty': 1}, - {'_id': 2, 'sku': 'abcd', 'qty': 2}, - {'_id': 3, 'sku': 'abcd', 'qty': 3}]) - - map = Code("function () {" - " emit(this.sku, this.qty);" - "}") - reduce = Code("function (key, values) {" - " return Array.sum(values);" - "}") - - result = test.map_reduce(map, reduce, out={'inline': 1}) - self.assertTrue(isinstance(result, dict)) - self.assertTrue('results' in result) - self.assertEqual(result['results'][0], {'_id': 'ABCD', 'value': 6}) - - result = test.inline_map_reduce(map, reduce) - self.assertTrue(isinstance(result, list)) - self.assertEqual(1, len(result)) - self.assertEqual(result[0]["_id"], 'ABCD') - - full_result = test.inline_map_reduce(map, reduce, - full_response=True) - result = full_result['results'] - self.assertTrue(isinstance(result, list)) - self.assertEqual(1, len(result)) - self.assertEqual(result[0]["_id"], 'ABCD') - def test_find_one_and__w_custom_type_decoder(self): db = self.db c = db.get_collection('test', codec_options=UNINT_DECODER_CODECOPTS) diff --git a/test/test_read_concern.py b/test/test_read_concern.py index d330a741bb..0b9c742a11 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -122,31 +122,3 @@ def test_aggregate_out(self): else: self.assertNotIn('readConcern', self.listener.results['started'][0].command) - - def test_map_reduce_out(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - coll.map_reduce('function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }', - out='output_collection') - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) - - if client_context.version.at_least(3, 1, 9, -1): - self.listener.results.clear() - coll.map_reduce( - 'function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }', - out={'inline': 1}) - self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) - - @client_context.require_version_min(3, 1, 9, -1) - def test_inline_map_reduce(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.inline_map_reduce( - 'function() { emit(this._id, this.value); }', - 'function(key, values) { return 42; }')) - self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index c40bfea236..6c57a6ce9b 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -420,16 +420,6 @@ def test_create_collection(self): lambda: self.c.pymongo_test.create_collection( 'some_collection%s' % random.randint(0, sys.maxsize))) - def test_map_reduce(self): - self._test_coll_helper(False, self.c.pymongo_test.test, 'map_reduce', - 'function() { }', 'function() { }', - {'inline': 1}) - - def test_inline_map_reduce(self): - self._test_coll_helper(True, self.c.pymongo_test.test, - 'inline_map_reduce', - 'function() { }', 'function() { }') - def test_count_documents(self): self._test_coll_helper( True, self.c.pymongo_test.test, 'count_documents', {}) diff --git a/test/test_session.py b/test/test_session.py index 6103ecfb65..7a78ca72d4 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -265,8 +265,6 @@ def collection_write_ops(coll): (coll.update_many, [{}, {'$set': {'a': 1}}], {}), (coll.delete_one, [{}], {}), (coll.delete_many, [{}], {}), - (coll.map_reduce, - ['function() {}', 'function() {}', 'output'], {}), (coll.find_one_and_replace, [{}, {}], {}), (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), (coll.find_one_and_delete, [{}, {}], {}), @@ -290,7 +288,6 @@ def test_collection(self): (coll.distinct, ['a'], {}), (coll.find_one, [], {}), (coll.count_documents, [{}], {}), - (coll.inline_map_reduce, ['function() {}', 'function() {}'], {}), (coll.list_indexes, [], {}), (coll.index_information, [], {}), (coll.options, [], {}), @@ -819,22 +816,6 @@ def test_reads(self): lambda coll, session: list(coll.find_raw_batches( {}, session=session))) - # SERVER-40938 removed support for casually consistent mapReduce. - map_reduce_exc = None - if client_context.version.at_least(4, 1, 12): - map_reduce_exc = OperationFailure - # SERVER-44635 The mapReduce in aggregation project added back - # support for casually consistent mapReduce. - if client_context.version < (4, 3): - self._test_reads( - lambda coll, session: coll.map_reduce( - 'function() {}', 'function() {}', 'inline', session=session), - exception=map_reduce_exc) - self._test_reads( - lambda coll, session: coll.inline_map_reduce( - 'function() {}', 'function() {}', session=session), - exception=map_reduce_exc) - self.assertRaises( ConfigurationError, self._test_reads, @@ -951,14 +932,6 @@ def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( lambda coll, session: coll.find({}, session=session).explain()) - @client_context.require_no_standalone - @unittest.skipIf(client_context.serverless, - "Serverless does not support mapReduce") - def test_writes_do_not_include_read_concern_map_reduce(self): - self._test_no_read_concern( - lambda coll, session: coll.map_reduce( - 'function() {}', 'function() {}', 'mrout', session=session)) - @client_context.require_no_standalone @client_context.require_version_max(4, 1, 0) def test_aggregate_out_does_not_include_read_concern(self): diff --git a/test/test_transactions.py b/test/test_transactions.py index 1e0318dd74..33a9186e83 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -135,8 +135,6 @@ def test_transaction_write_concern_override(self): (client.drop_database, [db.name], {}), (db.drop_collection, ['collection'], {}), (coll.drop, [], {}), - (coll.map_reduce, - ['function() {}', 'function() {}', 'output'], {}), (coll.rename, ['collection2'], {}), # Drop collection2 between tests of "rename", above. (coll.database.drop_collection, ['collection2'], {}), diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index c16e17c5e9..f3ab3b8952 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -256,6 +256,8 @@ def run_operation(self, sessions, collection, operation): name = 'open_download_stream_by_name' elif name == 'download': name = 'open_download_stream' + elif name == 'map_reduce': + self.skipTest('PyMongo does not support mapReduce') elif name == 'count': self.skipTest('PyMongo does not support count') @@ -312,9 +314,6 @@ def run_operation(self, sessions, collection, operation): arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY) return out.find() - if name == "map_reduce": - if isinstance(result, dict) and 'results' in result: - return result['results'] if 'download' in name: result = Binary(result.read()) From 6a18027db8a3ff3ddff7b04a5c5f91c0338b44f1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 16 Aug 2021 10:27:37 -0700 Subject: [PATCH 0439/2111] PYTHON-2534 Avoid race in test_pool_paused_error_is_retryable (#704) --- test/__init__.py | 6 ++--- test/test_retryable_reads.py | 44 +++++++++++++++++++++------------- test/test_retryable_writes.py | 45 +++++++++++++++++++++-------------- 3 files changed, 57 insertions(+), 38 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 8099cc51fb..11dadd1936 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -154,7 +154,7 @@ def __init__( self.old_min_heartbeat_interval = None self.old_kill_cursor_frequency = None self.old_events_queue_frequency = None - self._enabled = True + self._enabled = False self._stack = None def enable(self): @@ -776,8 +776,8 @@ def require_failCommand_blockConnection(self, func): """ return self._require( lambda: (self.test_commands_enabled and ( - (not self.is_mongos and self.version >= (4, 2, 9))) or - (self.is_mongos and self.version >= (4, 4))), + (not self.is_mongos and self.version >= (4, 2, 9)) or + (self.is_mongos and self.version >= (4, 4)))), "failCommand blockConnection is not supported", func=func) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 963ff3e183..665aa9fd32 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -163,23 +163,33 @@ def test_pool_paused_error_is_retryable(self): maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) - threads = [FindThread(client.pymongo_test.test) for _ in range(2)] - fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['find'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91, - }, - } - with self.fail_point(fail_command): - for thread in threads: - thread.start() - for thread in threads: - thread.join() - for thread in threads: - self.assertTrue(thread.passed) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['find'], + 'blockConnection': True, + 'blockTimeMS': 1000, + 'errorCode': 91, + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break # Via CMAP monitoring, assert that the first check out succeeds. cmap_events = cmap_listener.events_by_type(( diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 464ff39ac4..32e0c32a90 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -515,24 +515,33 @@ def test_pool_paused_error_is_retryable(self): maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) - threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] - fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91, - 'errorLabels': ['RetryableWriteError'], - }, - } - with self.fail_point(fail_command): - for thread in threads: - thread.start() - for thread in threads: - thread.join() - for thread in threads: - self.assertTrue(thread.passed) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + 'mode': {'times': 1}, + 'data': { + 'failCommands': ['insert'], + 'blockConnection': True, + 'blockTimeMS': 1000, + 'errorCode': 91, + 'errorLabels': ['RetryableWriteError'], + }, + } + with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break # Via CMAP monitoring, assert that the first check out succeeds. cmap_events = cmap_listener.events_by_type(( From 3c8b78348d5ba0ae56fd314effed5bc2895d2d21 Mon Sep 17 00:00:00 2001 From: Prashant Mital Date: Wed, 11 Aug 2021 21:27:39 -0700 Subject: [PATCH 0440/2111] PYTHON-2866 Setting tlsDisableOCSPEndpointCheck=false must enable OCSP endpoint check (cherry picked from commit fe1d19dea45c143fa26bdd9c20d9c1087be3a66a) --- pymongo/common.py | 4 +++- test/test_uri_parser.py | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 8b06527562..bf4ed08b13 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -630,12 +630,14 @@ def validate_tzinfo(dummy, value): 'tls': validate_boolean_or_string, 'tlsallowinvalidcertificates': validate_allow_invalid_certs, 'ssl_cert_reqs': validate_cert_reqs, + # Normalized to ssl_match_hostname which is the logical inverse of tlsallowinvalidhostnames 'tlsallowinvalidhostnames': lambda *x: not validate_boolean_or_string(*x), 'ssl_match_hostname': validate_boolean_or_string, 'tlscafile': validate_readable, 'tlscertificatekeyfile': validate_readable, 'tlscertificatekeyfilepassword': validate_string_or_none, - 'tlsdisableocspendpointcheck': validate_boolean_or_string, + # Normalized to ssl_check_ocsp_endpoint which is the logical inverse of tlsdisableocspendpointcheck + 'tlsdisableocspendpointcheck': lambda *x: not validate_boolean_or_string(*x), 'tlsinsecure': validate_boolean_or_string, 'w': validate_non_negative_int_or_basestring, 'wtimeoutms': validate_non_negative_integer, diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 068cf13c0d..59127249ce 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -479,6 +479,16 @@ def test_tlsinsecure_legacy_conflict(self): with self.assertRaises(InvalidURI): parse_uri(uri, validate=False, warn=False, normalize=False) + def test_tlsDisableOCSPEndpointCheck(self): + # check that tlsDisableOCSPEndpointCheck is handled correctly. + uri = "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true" + res = {'ssl_check_ocsp_endpoint': False} + self.assertEqual(res, parse_uri(uri)["options"]) + + uri = "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false" + res = {'ssl_check_ocsp_endpoint': True} + self.assertEqual(res, parse_uri(uri)["options"]) + def test_normalize_options(self): # check that options are converted to their internal names correctly. uri = ("mongodb://example.com/?tls=true&appname=myapp&maxPoolSize=10&" From 09aef81b4ade65c9fbfd456df392dbc6a7cd4152 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 17 Aug 2021 14:53:11 -0700 Subject: [PATCH 0441/2111] DRIVERS-1864 Resync SDAM spec tests (#707) --- test/discovery_and_monitoring/rs/compatible.json | 2 ++ test/discovery_and_monitoring/rs/compatible_unknown.json | 1 + test/discovery_and_monitoring/rs/discover_arbiters.json | 1 + .../rs/discover_arbiters_replicaset.json | 1 + test/discovery_and_monitoring/rs/discover_ghost.json | 1 + .../rs/discover_ghost_replicaset.json | 1 + test/discovery_and_monitoring/rs/discover_hidden.json | 1 + .../rs/discover_hidden_replicaset.json | 1 + test/discovery_and_monitoring/rs/discover_passives.json | 2 ++ .../rs/discover_passives_replicaset.json | 2 ++ test/discovery_and_monitoring/rs/discover_primary.json | 1 + .../rs/discover_primary_replicaset.json | 1 + test/discovery_and_monitoring/rs/discover_rsother.json | 1 + .../rs/discover_rsother_replicaset.json | 2 ++ test/discovery_and_monitoring/rs/discover_secondary.json | 1 + .../rs/discover_secondary_replicaset.json | 1 + test/discovery_and_monitoring/rs/discovery.json | 4 ++++ test/discovery_and_monitoring/rs/equal_electionids.json | 2 ++ .../rs/hosts_differ_from_seeds.json | 1 + test/discovery_and_monitoring/rs/incompatible_arbiter.json | 2 ++ test/discovery_and_monitoring/rs/incompatible_ghost.json | 2 ++ test/discovery_and_monitoring/rs/incompatible_other.json | 2 ++ test/discovery_and_monitoring/rs/ls_timeout.json | 6 ++++++ test/discovery_and_monitoring/rs/member_reconfig.json | 2 ++ test/discovery_and_monitoring/rs/member_standalone.json | 2 ++ test/discovery_and_monitoring/rs/new_primary.json | 2 ++ .../rs/new_primary_new_electionid.json | 3 +++ .../rs/new_primary_new_setversion.json | 3 +++ .../rs/new_primary_wrong_set_name.json | 2 ++ test/discovery_and_monitoring/rs/non_rs_member.json | 1 + test/discovery_and_monitoring/rs/normalize_case.json | 1 + test/discovery_and_monitoring/rs/normalize_case_me.json | 2 ++ test/discovery_and_monitoring/rs/null_election_id.json | 4 ++++ test/discovery_and_monitoring/rs/primary_becomes_ghost.json | 2 ++ .../discovery_and_monitoring/rs/primary_becomes_mongos.json | 2 ++ .../rs/primary_becomes_standalone.json | 1 + .../rs/primary_changes_set_name.json | 2 ++ test/discovery_and_monitoring/rs/primary_disconnect.json | 1 + .../rs/primary_disconnect_electionid.json | 5 +++++ .../rs/primary_disconnect_setversion.json | 5 +++++ .../rs/primary_hint_from_secondary_with_mismatched_me.json | 2 ++ test/discovery_and_monitoring/rs/primary_mismatched_me.json | 1 + .../rs/primary_mismatched_me_not_removed.json | 2 ++ .../rs/primary_reports_new_member.json | 4 ++++ .../rs/primary_to_no_primary_mismatched_me.json | 2 ++ .../discovery_and_monitoring/rs/primary_wrong_set_name.json | 1 + test/discovery_and_monitoring/rs/repeated.json | 4 ++++ test/discovery_and_monitoring/rs/replicaset_rsnp.json | 1 + test/discovery_and_monitoring/rs/response_from_removed.json | 2 ++ test/discovery_and_monitoring/rs/sec_not_auth.json | 2 ++ test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json | 2 ++ .../rs/secondary_mismatched_me.json | 1 + .../rs/secondary_wrong_set_name.json | 1 + .../rs/secondary_wrong_set_name_with_primary.json | 2 ++ .../rs/setversion_without_electionid.json | 2 ++ .../rs/stepdown_change_set_name.json | 2 ++ test/discovery_and_monitoring/rs/too_new.json | 2 ++ test/discovery_and_monitoring/rs/too_old.json | 2 ++ .../discovery_and_monitoring/rs/topology_version_equal.json | 2 ++ .../rs/topology_version_greater.json | 5 +++++ test/discovery_and_monitoring/rs/topology_version_less.json | 2 ++ test/discovery_and_monitoring/rs/unexpected_mongos.json | 1 + .../rs/use_setversion_without_electionid.json | 3 +++ test/discovery_and_monitoring/rs/wrong_set_name.json | 1 + test/discovery_and_monitoring/sharded/compatible.json | 2 ++ .../sharded/discover_single_mongos.json | 1 + .../discovery_and_monitoring/sharded/ls_timeout_mongos.json | 4 ++++ .../discovery_and_monitoring/sharded/mongos_disconnect.json | 3 +++ .../discovery_and_monitoring/sharded/multiple_mongoses.json | 2 ++ .../sharded/non_mongos_removed.json | 2 ++ test/discovery_and_monitoring/sharded/too_new.json | 2 ++ test/discovery_and_monitoring/sharded/too_old.json | 2 ++ test/discovery_and_monitoring/single/compatible.json | 1 + .../single/direct_connection_external_ip.json | 1 + .../single/direct_connection_mongos.json | 1 + .../single/direct_connection_replicaset.json | 1 + .../single/direct_connection_rsarbiter.json | 1 + .../single/direct_connection_rsprimary.json | 1 + .../single/direct_connection_rssecondary.json | 1 + .../single/direct_connection_standalone.json | 1 + .../single/direct_connection_wrong_set_name.json | 2 ++ .../single/discover_standalone.json | 1 + .../single/ls_timeout_standalone.json | 1 + test/discovery_and_monitoring/single/not_ok_response.json | 4 +++- .../discovery_and_monitoring/single/standalone_removed.json | 1 + test/discovery_and_monitoring/single/too_new.json | 1 + test/discovery_and_monitoring/single/too_old.json | 1 + .../single/too_old_then_upgraded.json | 2 ++ test/sdam_monitoring/discovered_standalone.json | 2 +- test/sdam_monitoring/replica_set_with_no_primary.json | 2 +- test/sdam_monitoring/replica_set_with_primary.json | 2 +- test/sdam_monitoring/replica_set_with_removal.json | 2 +- test/sdam_monitoring/required_replica_set.json | 3 ++- test/sdam_monitoring/standalone.json | 3 ++- .../standalone_suppress_equal_description_changes.json | 6 ++++-- 95 files changed, 178 insertions(+), 9 deletions(-) diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json index 5a01fe4935..444b13e9d5 100644 --- a/test/discovery_and_monitoring/rs/compatible.json +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json index 237f663fd1..cf92dd1ed3 100644 --- a/test/discovery_and_monitoring/rs/compatible_unknown.json +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json index f30bfd0b6a..53709b0cee 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json index f7ed29fb68..64fb49f4fc 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json index b4dd6212a7..2e24c83e0b 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost.json +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json index 3e673b739e..cf5fe83a54 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json index 08a3cc1b2d..e4a90f1f9c 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden.json +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hidden": true, diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json index 95346a8380..04420596f0 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hidden": true, diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json index 2c71f461db..30258409f6 100644 --- a/test/discovery_and_monitoring/rs/discover_passives.json +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -43,6 +44,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "passive": true, diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json index c50d94f21d..266eaa5234 100644 --- a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -43,6 +44,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "passive": true, diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json index c322fe5f76..2d1292bbd4 100644 --- a/test/discovery_and_monitoring/rs/discover_primary.json +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json index 0ba8771df1..54dfefba5f 100644 --- a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json index ec50271bde..4ab25667f0 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother.json +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": false, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json index fb8590019a..e3958d70ad 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hidden": true, @@ -24,6 +25,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": false, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json index fb758e31de..22325d4e03 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary.json +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json index 69697114dd..d903b6444d 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/discovery.json b/test/discovery_and_monitoring/rs/discovery.json index 4a489f68ab..50e1269223 100644 --- a/test/discovery_and_monitoring/rs/discovery.json +++ b/test/discovery_and_monitoring/rs/discovery.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", @@ -47,6 +48,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", @@ -91,6 +93,7 @@ "d:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -134,6 +137,7 @@ "c:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json index 0e8efc46cf..17df3207fa 100644 --- a/test/discovery_and_monitoring/rs/equal_electionids.json +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -26,6 +27,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json index 9323768b00..4e02304c61 100644 --- a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json +++ b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json index 7f6279b51a..f0539cb337 100644 --- a/test/discovery_and_monitoring/rs/incompatible_arbiter.json +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "arbiterOnly": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json index c6bc53ca0a..824e953f90 100644 --- a/test/discovery_and_monitoring/rs/incompatible_ghost.json +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 1 diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json index 8870eb6622..6f301ef5de 100644 --- a/test/discovery_and_monitoring/rs/incompatible_other.json +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "hidden": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/ls_timeout.json b/test/discovery_and_monitoring/rs/ls_timeout.json index a04ecaef89..96389d3b76 100644 --- a/test/discovery_and_monitoring/rs/ls_timeout.json +++ b/test/discovery_and_monitoring/rs/ls_timeout.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -53,6 +54,7 @@ "d:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, @@ -90,6 +92,7 @@ "e:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "hosts": [ "a:27017", @@ -136,6 +139,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ @@ -184,6 +188,7 @@ "c:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "setName": "rs", "hidden": true, @@ -226,6 +231,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/member_reconfig.json b/test/discovery_and_monitoring/rs/member_reconfig.json index 2b075241bb..0e2c2c462e 100644 --- a/test/discovery_and_monitoring/rs/member_reconfig.json +++ b/test/discovery_and_monitoring/rs/member_reconfig.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -41,6 +42,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/member_standalone.json b/test/discovery_and_monitoring/rs/member_standalone.json index 15beec0046..0756003a89 100644 --- a/test/discovery_and_monitoring/rs/member_standalone.json +++ b/test/discovery_and_monitoring/rs/member_standalone.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 @@ -32,6 +33,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json index e629e3b28b..ed1a6245f9 100644 --- a/test/discovery_and_monitoring/rs/new_primary.json +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -41,6 +42,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index 6c88dc2399..ccb3a41f75 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -54,6 +55,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -100,6 +102,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index cebdf9ab4e..415a0f66aa 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -54,6 +55,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -100,6 +102,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json index 9bcab322ed..d7b19cfe8f 100644 --- a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -41,6 +42,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/non_rs_member.json b/test/discovery_and_monitoring/rs/non_rs_member.json index 907c1651e0..538077ef09 100644 --- a/test/discovery_and_monitoring/rs/non_rs_member.json +++ b/test/discovery_and_monitoring/rs/non_rs_member.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/test/discovery_and_monitoring/rs/normalize_case.json b/test/discovery_and_monitoring/rs/normalize_case.json index 6cfd75168f..96a944f0c3 100644 --- a/test/discovery_and_monitoring/rs/normalize_case.json +++ b/test/discovery_and_monitoring/rs/normalize_case.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/normalize_case_me.json b/test/discovery_and_monitoring/rs/normalize_case_me.json index c89522275c..ab1720cefc 100644 --- a/test/discovery_and_monitoring/rs/normalize_case_me.json +++ b/test/discovery_and_monitoring/rs/normalize_case_me.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "me": "A:27017", @@ -51,6 +52,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index d3c096597f..62120e8448 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -52,6 +53,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -104,6 +106,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -151,6 +154,7 @@ "c:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json index 3ca8f23786..9c54b39856 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -36,6 +37,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json index 4b33cbea14..ac416e57d5 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -36,6 +37,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json index 9dc8254920..a64524d0ca 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/primary_changes_set_name.json b/test/discovery_and_monitoring/rs/primary_changes_set_name.json index 45434d45f3..bf70ca3014 100644 --- a/test/discovery_and_monitoring/rs/primary_changes_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_changes_set_name.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -36,6 +37,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/primary_disconnect.json b/test/discovery_and_monitoring/rs/primary_disconnect.json index 06a103962a..3db854f085 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index a374e3e4fb..3a80b150fe 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -26,6 +27,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -101,6 +103,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -144,6 +147,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -190,6 +194,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index f1e2c0097c..32e03fb7d4 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -26,6 +27,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -101,6 +103,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -144,6 +147,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -190,6 +194,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json index fb4fe4dc13..bc02cc9571 100644 --- a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "me": "c:27017", @@ -39,6 +40,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "me": "b:27017", "hosts": [ diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_mismatched_me.json index 49ab1bf74e..2d2c0f40d8 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me.json @@ -26,6 +26,7 @@ "a:27017", "b:27017" ], + "helloOk": true, "isWritablePrimary": true, "ok": 1, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json index 41d9452844..4c40093659 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -12,6 +12,7 @@ "localhost:27017", "localhost:27018" ], + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "primary": "localhost:27017", @@ -47,6 +48,7 @@ "localhost:27017", "localhost:27018" ], + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/primary_reports_new_member.json b/test/discovery_and_monitoring/rs/primary_reports_new_member.json index 8b99a4399e..ac0d9374f0 100644 --- a/test/discovery_and_monitoring/rs/primary_reports_new_member.json +++ b/test/discovery_and_monitoring/rs/primary_reports_new_member.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", @@ -42,6 +43,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -75,6 +77,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -113,6 +116,7 @@ "c:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json index 5e63a509be..6dbd73dadc 100644 --- a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -42,6 +43,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "c:27017", diff --git a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json index c87b18d1b2..cc0691fb8c 100644 --- a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json index 1921bba580..610aeae0ac 100644 --- a/test/discovery_and_monitoring/rs/repeated.json +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hidden": true, @@ -45,6 +46,7 @@ "c:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 @@ -72,6 +74,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hidden": true, @@ -109,6 +112,7 @@ "c:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json index 89f0740db9..3148e1c141 100644 --- a/test/discovery_and_monitoring/rs/replicaset_rsnp.json +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/rs/response_from_removed.json b/test/discovery_and_monitoring/rs/response_from_removed.json index f6dfe3b3a6..87a66d9e72 100644 --- a/test/discovery_and_monitoring/rs/response_from_removed.json +++ b/test/discovery_and_monitoring/rs/response_from_removed.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -36,6 +37,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/sec_not_auth.json b/test/discovery_and_monitoring/rs/sec_not_auth.json index 4f448c9068..a39855e654 100644 --- a/test/discovery_and_monitoring/rs/sec_not_auth.json +++ b/test/discovery_and_monitoring/rs/sec_not_auth.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index e3c262bff7..4c1cb011a5 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json index 3c3634e6fa..6f1b9b5986 100644 --- a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -27,6 +27,7 @@ "a:27017", "b:27017" ], + "helloOk": true, "isWritablePrimary": false, "ok": 1, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json index be621c06c8..8d2f152f59 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json index eebc196447..b7ef2d6d6a 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -41,6 +42,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 59333b533a..2f68287f1d 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -46,6 +47,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json index ce20d1a542..e9075f97f2 100644 --- a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json +++ b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -36,6 +37,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/rs/too_new.json b/test/discovery_and_monitoring/rs/too_new.json index 97de822036..0433d27a36 100644 --- a/test/discovery_and_monitoring/rs/too_new.json +++ b/test/discovery_and_monitoring/rs/too_new.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json index 5dae8a2b35..461d00acc4 100644 --- a/test/discovery_and_monitoring/rs/too_old.json +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "hosts": [ @@ -22,6 +23,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "setName": "rs", diff --git a/test/discovery_and_monitoring/rs/topology_version_equal.json b/test/discovery_and_monitoring/rs/topology_version_equal.json index a2e81e338d..d3baa13479 100644 --- a/test/discovery_and_monitoring/rs/topology_version_equal.json +++ b/test/discovery_and_monitoring/rs/topology_version_equal.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -52,6 +53,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/topology_version_greater.json b/test/discovery_and_monitoring/rs/topology_version_greater.json index c1bc773de4..f296ccee62 100644 --- a/test/discovery_and_monitoring/rs/topology_version_greater.json +++ b/test/discovery_and_monitoring/rs/topology_version_greater.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -52,6 +53,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -101,6 +103,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -150,6 +153,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -184,6 +188,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/topology_version_less.json b/test/discovery_and_monitoring/rs/topology_version_less.json index 9376065646..435337ff25 100644 --- a/test/discovery_and_monitoring/rs/topology_version_less.json +++ b/test/discovery_and_monitoring/rs/topology_version_less.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017" @@ -52,6 +53,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/unexpected_mongos.json b/test/discovery_and_monitoring/rs/unexpected_mongos.json index f74d298969..cc19a961f2 100644 --- a/test/discovery_and_monitoring/rs/unexpected_mongos.json +++ b/test/discovery_and_monitoring/rs/unexpected_mongos.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 6e9c2370dc..421ff57c8d 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -54,6 +55,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -94,6 +96,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/rs/wrong_set_name.json b/test/discovery_and_monitoring/rs/wrong_set_name.json index cc4e3e963c..9654ff7b79 100644 --- a/test/discovery_and_monitoring/rs/wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/wrong_set_name.json @@ -8,6 +8,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json index 3f6df3e09e..e531db97f9 100644 --- a/test/discovery_and_monitoring/sharded/compatible.json +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, @@ -18,6 +19,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json index b3d07f893f..9e877a0840 100644 --- a/test/discovery_and_monitoring/sharded/discover_single_mongos.json +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json index 7a46adee51..93fa398d52 100644 --- a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json +++ b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, @@ -19,6 +20,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, @@ -49,6 +51,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, @@ -60,6 +63,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/sharded/mongos_disconnect.json b/test/discovery_and_monitoring/sharded/mongos_disconnect.json index f0f98648a7..50a93eda5f 100644 --- a/test/discovery_and_monitoring/sharded/mongos_disconnect.json +++ b/test/discovery_and_monitoring/sharded/mongos_disconnect.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, @@ -18,6 +19,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, @@ -70,6 +72,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/sharded/multiple_mongoses.json b/test/discovery_and_monitoring/sharded/multiple_mongoses.json index 7539836099..311592d715 100644 --- a/test/discovery_and_monitoring/sharded/multiple_mongoses.json +++ b/test/discovery_and_monitoring/sharded/multiple_mongoses.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, @@ -18,6 +19,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/sharded/non_mongos_removed.json b/test/discovery_and_monitoring/sharded/non_mongos_removed.json index a9c3a4b6a3..d74375ebbf 100644 --- a/test/discovery_and_monitoring/sharded/non_mongos_removed.json +++ b/test/discovery_and_monitoring/sharded/non_mongos_removed.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, @@ -18,6 +19,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "b:27017" diff --git a/test/discovery_and_monitoring/sharded/too_new.json b/test/discovery_and_monitoring/sharded/too_new.json index b4f9f14951..4b997d2163 100644 --- a/test/discovery_and_monitoring/sharded/too_new.json +++ b/test/discovery_and_monitoring/sharded/too_new.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 999, @@ -18,6 +19,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid" } diff --git a/test/discovery_and_monitoring/sharded/too_old.json b/test/discovery_and_monitoring/sharded/too_old.json index 41ffed925d..688e1db0f5 100644 --- a/test/discovery_and_monitoring/sharded/too_old.json +++ b/test/discovery_and_monitoring/sharded/too_old.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, @@ -18,6 +19,7 @@ "b:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid" } diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json index 9c91ae1db1..302927598c 100644 --- a/test/discovery_and_monitoring/single/compatible.json +++ b/test/discovery_and_monitoring/single/compatible.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json index fcc2e83667..90676a8f9b 100644 --- a/test/discovery_and_monitoring/single/direct_connection_external_ip.json +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "b:27017" diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json index 2cb39ca9bb..25fe965185 100644 --- a/test/discovery_and_monitoring/single/direct_connection_mongos.json +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json index a9fb7ef9fa..cd8660888a 100644 --- a/test/discovery_and_monitoring/single/direct_connection_replicaset.json +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json index 8adc36d44e..e204956056 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "arbiterOnly": true, "hosts": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json index ccd89e279b..409e8502b3 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json index 588b3a88ad..305f283b52 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": false, "secondary": true, "hosts": [ diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json index 2ec2f575fe..b47278482a 100644 --- a/test/discovery_and_monitoring/single/direct_connection_standalone.json +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json index 429bd561e2..71080e6810 100644 --- a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -36,6 +37,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json index 595a2d1fa9..858cbdaf63 100644 --- a/test/discovery_and_monitoring/single/discover_standalone.json +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/ls_timeout_standalone.json b/test/discovery_and_monitoring/single/ls_timeout_standalone.json index e48f6151f9..87b3e4e8a1 100644 --- a/test/discovery_and_monitoring/single/ls_timeout_standalone.json +++ b/test/discovery_and_monitoring/single/ls_timeout_standalone.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, diff --git a/test/discovery_and_monitoring/single/not_ok_response.json b/test/discovery_and_monitoring/single/not_ok_response.json index 0223459e68..8e7c2a10e3 100644 --- a/test/discovery_and_monitoring/single/not_ok_response.json +++ b/test/discovery_and_monitoring/single/not_ok_response.json @@ -1,5 +1,5 @@ { - "description": "Handle a not-ok hello response", + "description": "Handle a not-ok isWritablePrimary response", "uri": "mongodb://a", "phases": [ { @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 @@ -17,6 +18,7 @@ "a:27017", { "ok": 0, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/standalone_removed.json b/test/discovery_and_monitoring/single/standalone_removed.json index b88ee89531..57f8f861b1 100644 --- a/test/discovery_and_monitoring/single/standalone_removed.json +++ b/test/discovery_and_monitoring/single/standalone_removed.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/discovery_and_monitoring/single/too_new.json b/test/discovery_and_monitoring/single/too_new.json index 5320c4a261..8dd57d3348 100644 --- a/test/discovery_and_monitoring/single/too_new.json +++ b/test/discovery_and_monitoring/single/too_new.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 999, "maxWireVersion": 1000 diff --git a/test/discovery_and_monitoring/single/too_old.json b/test/discovery_and_monitoring/single/too_old.json index 55ef82acb7..8c027e01db 100644 --- a/test/discovery_and_monitoring/single/too_old.json +++ b/test/discovery_and_monitoring/single/too_old.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true } ] diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json index 2bad1e40bf..58ae7d9de4 100644 --- a/test/discovery_and_monitoring/single/too_old_then_upgraded.json +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true } ] @@ -31,6 +32,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json index caf31b3391..dd8f7fc51e 100644 --- a/test/sdam_monitoring/discovered_standalone.json +++ b/test/sdam_monitoring/discovered_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json index 768aa7a3e1..950e32efe1 100644 --- a/test/sdam_monitoring/replica_set_with_no_primary.json +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -19,7 +19,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json index da66403541..2ad94d6e6a 100644 --- a/test/sdam_monitoring/replica_set_with_primary.json +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json index 16941021a3..ae28faa30c 100644 --- a/test/sdam_monitoring/replica_set_with_removal.json +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -69,7 +69,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ], [ diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json index d86b1dfcc6..401c5d99c5 100644 --- a/test/sdam_monitoring/required_replica_set.json +++ b/test/sdam_monitoring/required_replica_set.json @@ -8,6 +8,7 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "setName": "rs", "setVersion": 1, @@ -17,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 69a100f454..821a1525d4 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -8,9 +8,10 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json index 1771f85fc0..5958e2d26c 100644 --- a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -8,18 +8,20 @@ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ], [ "a:27017", { "ok": 1, + "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 4 + "maxWireVersion": 6 } ] ], From 69c69a6bfbaac61263b7ba93483983510e469309 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 19 Aug 2021 10:39:26 -0700 Subject: [PATCH 0442/2111] PYTHON-2862 Remove versionchanged info for PyMongo <3.0 (#709) --- bson/__init__.py | 18 ------------------ bson/binary.py | 3 --- bson/json_util.py | 23 ----------------------- bson/max_key.py | 6 +----- bson/min_key.py | 6 +----- pymongo/collection.py | 26 -------------------------- pymongo/cursor.py | 3 --- pymongo/database.py | 18 ------------------ pymongo/uri_parser.py | 3 --- 9 files changed, 2 insertions(+), 104 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 298112d73c..4a511f53ca 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -928,15 +928,6 @@ def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. - - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 """ data, view = get_data_and_view(data) if not isinstance(codec_options, CodecOptions): @@ -1191,15 +1182,6 @@ def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. - - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 """ return decode(self, codec_options) diff --git a/bson/binary.py b/bson/binary.py index 7110fede26..39bc69c049 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -49,9 +49,6 @@ This is the new BSON binary subtype for UUIDs. The current default is :data:`OLD_UUID_SUBTYPE`. - -.. versionchanged:: 2.1 - Changed to subtype 4. """ diff --git a/bson/json_util.py b/bson/json_util.py index 7b42d5668f..05fa0e2ec0 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -84,25 +84,6 @@ Extended JSON converter for Python built on top of `libbson `_. `python-bsonjs` works best with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`. - -.. versionchanged:: 2.8 - The output format for :class:`~bson.timestamp.Timestamp` has changed from - '{"t": , "i": }' to '{"$timestamp": {"t": , "i": }}'. - This new format will be decoded to an instance of - :class:`~bson.timestamp.Timestamp`. The old format will continue to be - decoded to a python dict as before. Encoding to the old format is no longer - supported as it was never correct and loses type information. - Added support for $numberLong and $undefined - new in MongoDB 2.6 - and - parsing $date in ISO-8601 format. - -.. versionchanged:: 2.7 - Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef - instances. - -.. versionchanged:: 2.3 - Added dumps and loads helpers to automatically handle conversion to and - from json and supports :class:`~bson.binary.Binary` and - :class:`~bson.code.Code` """ import base64 @@ -401,10 +382,6 @@ def dumps(obj, *args, **kwargs): .. versionchanged:: 3.4 Accepts optional parameter `json_options`. See :class:`JSONOptions`. - - .. versionchanged:: 2.7 - Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef - instances. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) return json.dumps(_json_convert(obj, json_options), *args, **kwargs) diff --git a/bson/max_key.py b/bson/max_key.py index 7e89dd70de..efdf5c78b9 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -17,11 +17,7 @@ class MaxKey(object): - """MongoDB internal MaxKey type. - - .. versionchanged:: 2.7 - ``MaxKey`` now implements comparison operators. - """ + """MongoDB internal MaxKey type.""" _type_marker = 127 diff --git a/bson/min_key.py b/bson/min_key.py index b03520e9c2..7d2b3a6dd9 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -17,11 +17,7 @@ class MinKey(object): - """MongoDB internal MinKey type. - - .. versionchanged:: 2.7 - ``MinKey`` now implements comparison operators. - """ + """MongoDB internal MinKey type.""" _type_marker = 255 diff --git a/pymongo/collection.py b/pymongo/collection.py index 33959ebb9d..7ddcb96994 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -138,12 +138,6 @@ def __init__(self, database, name, create=False, codec_options=None, collection.__my_collection__ - .. versionchanged:: 2.2 - Removed deprecated argument: options - - .. versionadded:: 2.1 - uuid_subtype attribute - .. mongodoc:: collections """ super(Collection, self).__init__( @@ -1382,19 +1376,6 @@ def find(self, *args, **kwargs): expression object. Soft deprecated the ``manipulate`` option. - .. versionchanged:: 2.7 - Added ``compile_re`` option. If set to False, PyMongo represented - BSON regular expressions as :class:`~bson.regex.Regex` objects - instead of attempting to compile BSON regular expressions as Python - native regular expressions, thus preventing errors for some - incompatible patterns, see `PYTHON-500`_. - - .. versionchanged:: 2.3 - Added the ``tag_sets`` and ``secondary_acceptable_latency_ms`` - parameters. - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 - .. mongodoc:: find """ @@ -2072,13 +2053,6 @@ def aggregate(self, pipeline, session=None, **kwargs): .. versionchanged:: 3.0 The :meth:`aggregate` method always returns a CommandCursor. The pipeline argument must be a list. - .. versionchanged:: 2.7 - When the cursor option is used, return - :class:`~pymongo.command_cursor.CommandCursor` instead of - :class:`~pymongo.cursor.Cursor`. - .. versionchanged:: 2.6 - Added cursor support. - .. versionadded:: 2.3 .. seealso:: :doc:`/examples/aggregation` diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 7370a585ad..5608c71b24 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -879,9 +879,6 @@ def hint(self, index): :Parameters: - `index`: index to hint on (as an index specifier) - - .. versionchanged:: 2.8 - The :meth:`~hint` method accepts the name of the index. """ self.__check_okay_to_chain() self.__set_hint(index) diff --git a/pymongo/database.py b/pymongo/database.py index 61d87611dc..ea9d4a1dd7 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -299,9 +299,6 @@ def create_collection(self, name, codec_options=None, .. versionchanged:: 3.0 Added the codec_options, read_preference, and write_concern options. - .. versionchanged:: 2.2 - Removed deprecated argument: options - .. _create collection command: https://docs.mongodb.com/manual/reference/command/create """ @@ -588,21 +585,6 @@ def command(self, command, value=1, check=True, BSON regular expression to a Python regular expression object. Added the `codec_options` parameter. - .. versionchanged:: 2.7 - Added `compile_re` option. If set to False, PyMongo represented BSON - regular expressions as :class:`~bson.regex.Regex` objects instead of - attempting to compile BSON regular expressions as Python native - regular expressions, thus preventing errors for some incompatible - patterns, see `PYTHON-500`_. - - .. versionchanged:: 2.3 - Added `tag_sets` and `secondary_acceptable_latency_ms` options. - .. versionchanged:: 2.2 - Added support for `as_class` - the class you want to use for - the resulting documents - - .. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500 - .. mongodoc:: commands """ if read_preference is None: diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 6801900c9b..95327eed85 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -44,9 +44,6 @@ def parse_userinfo(userinfo): :Paramaters: - `userinfo`: A string of the form : - - .. versionchanged:: 2.2 - Now uses `urllib.unquote_plus` so `+` characters must be escaped. """ if '@' in userinfo or userinfo.count(':') > 1: raise InvalidURI("Username and password must be escaped according to " From f9bfd11290980205a749b84ccc75f23aaf3d968c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 19 Aug 2021 13:31:46 -0700 Subject: [PATCH 0443/2111] PYTHON-2870 Add support for man/text/latex/etc.. docs output (#708) Regenerate sphinx makefile with sphinx-quickstart 3.5.4. Remove problematic mongodoc sphinx extension. --- bson/dbref.py | 2 +- bson/objectid.py | 2 +- doc/Makefile | 95 +++--------------- doc/conf.py | 3 +- doc/examples/encryption.rst | 2 +- doc/examples/geo.rst | 2 +- doc/examples/high_availability.rst | 2 +- doc/make.bat | 148 +++++++---------------------- doc/mongo_extensions.py | 97 ------------------- gridfs/__init__.py | 8 +- gridfs/grid_file.py | 2 +- pymongo/change_stream.py | 2 +- pymongo/client_session.py | 4 +- pymongo/collection.py | 9 +- pymongo/command_cursor.py | 2 +- pymongo/cursor.py | 10 +- pymongo/database.py | 6 +- pymongo/mongo_client.py | 4 +- 18 files changed, 77 insertions(+), 323 deletions(-) delete mode 100644 doc/mongo_extensions.py diff --git a/bson/dbref.py b/bson/dbref.py index f4395b76cd..2edaf69022 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -42,7 +42,7 @@ def __init__(self, collection, id, database=None, _extra={}, **kwargs): - `**kwargs` (optional): additional keyword arguments will create additional, custom fields - .. mongodoc:: dbrefs + .. seealso:: The MongoDB documentation on `dbrefs `_. """ if not isinstance(collection, str): raise TypeError("collection must be an instance of str") diff --git a/bson/objectid.py b/bson/objectid.py index 6129df35b2..faf8910edc 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -89,7 +89,7 @@ def __init__(self, oid=None): :Parameters: - `oid` (optional): a valid ObjectId. - .. mongodoc:: objectids + .. seealso:: The MongoDB documentation on `ObjectIds`_. .. versionchanged:: 3.8 :class:`~bson.objectid.ObjectId` now implements the `ObjectID diff --git a/doc/Makefile b/doc/Makefile index 9fa6e3a48c..d4bb2cbb9e 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,89 +1,20 @@ -# Makefile for Sphinx documentation +# Minimal makefile for Sphinx documentation # -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . BUILDDIR = _build -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest - +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyMongo.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyMongo.qhc" - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." +.PHONY: help Makefile -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/conf.py b/doc/conf.py index 4ce2b5bc0b..facb74f470 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -14,8 +14,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', - 'sphinx.ext.todo', 'doc.mongo_extensions', - 'sphinx.ext.intersphinx'] + 'sphinx.ext.todo', 'sphinx.ext.intersphinx'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index ba07b37223..e86eb7733d 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -16,7 +16,7 @@ level encryption supports workloads where applications must guarantee that unauthorized parties, including server administrators, cannot read the encrypted data. -.. mongodoc:: client-side-field-level-encryption +.. seealso:: The MongoDB documentation on `Client Side Field Level Encryption `_. Dependencies ------------ diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 5caa36eafc..9fe62f910b 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -10,7 +10,7 @@ Geospatial Indexing Example This example shows how to create and use a :data:`~pymongo.GEO2D` index in PyMongo. To create a spherical (earth-like) geospatial index use :data:`~pymongo.GEOSPHERE` instead. -.. mongodoc:: geo +.. seealso:: The MongoDB documentation on `Geospatial Indexes `_. Creating a Geospatial Index --------------------------- diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index 6a72ba75b8..a5c252f8a3 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -14,7 +14,7 @@ PyMongo makes working with `replica sets replica set and show how to handle both initialization and normal connections with PyMongo. -.. mongodoc:: rs +.. seealso:: The MongoDB documentation on `replication `_. Starting a Replica Set ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/make.bat b/doc/make.bat index 4ccc1590eb..2119f51099 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -1,113 +1,35 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -set SPHINXBUILD=sphinx-build -set BUILDDIR=_build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyMongo.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyMongo.ghc - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/mongo_extensions.py b/doc/mongo_extensions.py deleted file mode 100644 index 47ea9b7c89..0000000000 --- a/doc/mongo_extensions.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MongoDB specific extensions to Sphinx.""" - -from docutils import nodes -from docutils.parsers import rst -from sphinx import addnodes - - -class mongodoc(nodes.Admonition, nodes.Element): - pass - - -class mongoref(nodes.reference): - pass - - -def visit_mongodoc_node(self, node): - self.visit_admonition(node, "seealso") - - -def depart_mongodoc_node(self, node): - self.depart_admonition(node) - - -def visit_mongoref_node(self, node): - atts = {"class": "reference external", - "href": node["refuri"], - "name": node["name"]} - self.body.append(self.starttag(node, 'a', '', **atts)) - - -def depart_mongoref_node(self, node): - self.body.append('') - if not isinstance(node.parent, nodes.TextElement): - self.body.append('\n') - - -class MongodocDirective(rst.Directive): - - has_content = True - required_arguments = 0 - optional_arguments = 0 - final_argument_whitespace = False - option_spec = {} - - def run(self): - node = mongodoc() - title = 'The MongoDB documentation on' - node += nodes.title(title, title) - self.state.nested_parse(self.content, self.content_offset, node) - return [node] - - -def process_mongodoc_nodes(app, doctree, fromdocname): - for node in doctree.traverse(mongodoc): - anchor = None - for name in node.parent.parent.traverse(addnodes.desc_signature): - anchor = name["ids"][0] - break - if not anchor: - for name in node.parent.traverse(nodes.section): - anchor = name["ids"][0] - break - for para in node.traverse(nodes.paragraph): - tag = str(list(para.traverse())[1]) - link = mongoref("", "") - link["refuri"] = "http://dochub.mongodb.org/core/%s" % tag - link["name"] = anchor - link.append(nodes.emphasis(tag, tag)) - new_para = nodes.paragraph() - new_para += link - node.replace(para, new_para) - - -def setup(app): - app.add_node(mongodoc, - html=(visit_mongodoc_node, depart_mongodoc_node), - latex=(visit_mongodoc_node, depart_mongodoc_node), - text=(visit_mongodoc_node, depart_mongodoc_node)) - app.add_node(mongoref, - html=(visit_mongoref_node, depart_mongoref_node)) - - app.add_directive("mongodoc", MongodocDirective) - app.connect("doctree-resolved", process_mongodoc_nodes) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index c12f93f1de..2a637398e3 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -17,7 +17,7 @@ The :mod:`gridfs` package is an implementation of GridFS on top of :mod:`pymongo`, exposing a file-like interface. -.. mongodoc:: gridfs +.. seealso:: The MongoDB documentation on `gridfs `_. """ from collections import abc @@ -63,7 +63,7 @@ def __init__(self, database, collection="fs", disable_md5=False): `database` must use an acknowledged :attr:`~pymongo.database.Database.write_concern` - .. mongodoc:: gridfs + .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(database, Database): raise TypeError("database must be an instance of Database") @@ -367,7 +367,7 @@ def find(self, *args, **kwargs): Removed the read_preference, tag_sets, and secondary_acceptable_latency_ms options. .. versionadded:: 2.7 - .. mongodoc:: find + .. seealso:: The MongoDB documentation on `find `_. """ return GridOutCursor(self.__collection, *args, **kwargs) @@ -452,7 +452,7 @@ def __init__(self, db, bucket_name="fs", .. versionadded:: 3.1 - .. mongodoc:: gridfs + .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(db, Database): raise TypeError("database must be an instance of Database") diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 5a48fee176..e5ac598e75 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -816,7 +816,7 @@ def __init__(self, collection, filter=None, skip=0, limit=0, .. versionadded 2.7 - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ _disallow_transactions(session) collection = _clear_entity_type_registry(collection) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index fe685694a2..936059e14e 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -64,7 +64,7 @@ class ChangeStream(object): :meth:`pymongo.mongo_client.MongoClient.watch` instead. .. versionadded:: 3.6 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. """ def __init__(self, target, pipeline, full_document, resume_after, max_await_time_ms, batch_size, collation, diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 78410d325f..7d9ce712f0 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -37,7 +37,7 @@ causally consistent session, an application can read its own writes and is guaranteed monotonic reads, even when reading from replica set secondaries. -.. mongodoc:: causal-consistency +.. seealso:: The MongoDB documentation on `causal-consistency `_. .. _transactions-ref: @@ -91,7 +91,7 @@ are routed to the same mongos server. When the transaction is completed, by running either commitTransaction or abortTransaction, the session is unpinned. -.. mongodoc:: transactions +.. seealso:: The MongoDB documentation on `transactions `_. .. _snapshot-reads-ref: diff --git a/pymongo/collection.py b/pymongo/collection.py index 7ddcb96994..68471c06c5 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -138,7 +138,7 @@ def __init__(self, database, name, create=False, codec_options=None, collection.__my_collection__ - .. mongodoc:: collections + .. seealso:: The MongoDB documentation on `collections `_. """ super(Collection, self).__init__( codec_options or database.codec_options, @@ -1376,8 +1376,7 @@ def find(self, *args, **kwargs): expression object. Soft deprecated the ``manipulate`` option. - .. mongodoc:: find - + .. seealso:: The MongoDB documentation on `find `_. """ return Cursor(self, *args, **kwargs) @@ -1757,7 +1756,7 @@ def create_index(self, keys, session=None, **kwargs): :meth:`create_index` no longer caches index names. Removed support for the drop_dups and bucket_size aliases. - .. mongodoc:: indexes + .. seealso:: The MongoDB documentation on `indexes `_. .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core """ @@ -2198,7 +2197,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. versionadded:: 3.6 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 6d0349a909..21822ac61b 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -303,7 +303,7 @@ def __init__(self, collection, cursor_info, address, see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` instead. - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ assert not cursor_info.get('firstBatch') super(RawBatchCommandCursor, self).__init__( diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 5608c71b24..596f06b66c 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -146,7 +146,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. @@ -494,7 +494,7 @@ def limit(self, limit): :Parameters: - `limit`: the number of results to return - .. mongodoc:: limit + .. seealso:: The MongoDB documentation on `limit `_. """ if not isinstance(limit, int): raise TypeError("limit must be an integer") @@ -839,7 +839,7 @@ def explain(self): :meth:`~pymongo.database.Database.command` to run the explain command directly. - .. mongodoc:: explain + .. seealso:: The MongoDB documentation on `explain `_. """ c = self.clone() c.__explain = True @@ -1233,7 +1233,7 @@ def __init__(self, *args, **kwargs): see :meth:`~pymongo.collection.Collection.find_raw_batches` instead. - .. mongodoc:: cursors + .. seealso:: The MongoDB documentation on `cursors `_. """ super(RawBatchCursor, self).__init__(*args, **kwargs) @@ -1250,7 +1250,7 @@ def _unpack_response(self, response, cursor_id, codec_options, def explain(self): """Returns an explain plan record for this cursor. - .. mongodoc:: explain + .. seealso:: The MongoDB documentation on `explain `_. """ clone = self._clone(deepcopy=True, base=Cursor(self.collection)) return clone.explain() diff --git a/pymongo/database.py b/pymongo/database.py index ea9d4a1dd7..89a38a15ab 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -73,7 +73,7 @@ def __init__(self, client, name, codec_options=None, read_preference=None, :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) client.read_concern is used. - .. mongodoc:: databases + .. seealso:: The MongoDB documentation on `databases `_. .. versionchanged:: 3.2 Added the read_concern option. @@ -468,7 +468,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. versionadded:: 3.7 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst @@ -585,7 +585,7 @@ def command(self, command, value=1, check=True, BSON regular expression to a Python regular expression object. Added the `codec_options` parameter. - .. mongodoc:: commands + .. seealso:: The MongoDB documentation on `commands `_. """ if read_preference is None: read_preference = ((session and session._txn_read_preference()) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4608610058..c12f0b9f45 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -494,7 +494,7 @@ def __init__( client to use Versioned API. See :ref:`versioned-api-ref` for details. - .. mongodoc:: connections + .. seealso:: The MongoDB documentation on `connections `_. .. versionchanged:: 4.0 Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` keyword @@ -862,7 +862,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, .. versionadded:: 3.7 - .. mongodoc:: changeStreams + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst From b3118e034e55222d01684401a69b3c9fac273c6b Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Thu, 19 Aug 2021 14:58:31 -0700 Subject: [PATCH 0444/2111] PYTHON-2162 Remove support for ssl* URI options (#706) --- doc/migrate-to-pymongo4.rst | 34 +++++ pymongo/client_options.py | 77 ++++++----- pymongo/common.py | 44 +----- pymongo/mongo_client.py | 4 +- pymongo/ocsp_support.py | 2 +- pymongo/pool.py | 12 +- pymongo/ssl_support.py | 52 ++------ pymongo/topology.py | 2 +- pymongo/uri_parser.py | 39 +++--- test/__init__.py | 12 +- test/test_client.py | 14 +- test/test_dns.py | 4 +- test/test_pooling.py | 2 +- test/test_ssl.py | 257 ++++++++++++++---------------------- test/test_uri_parser.py | 44 ++---- 15 files changed, 249 insertions(+), 350 deletions(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index ffc554a6f4..a7dd6570de 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -79,6 +79,40 @@ Removed the ``socketKeepAlive`` keyword argument to keepalive. For more information see: https://docs.mongodb.com/manual/faq/diagnostics/#does-tcp-keepalive-time-affect-mongodb-deployments +Renamed URI options +................... + +Several deprecated URI options have been renamed to the standardized +option names defined in the +`URI options specification `_. +The old option names and their renamed equivalents are summarized in the table +below. Some renamed options have different semantics from the option being +replaced as noted in the 'Migration Notes' column. + ++--------------------+-------------------------------+-------------------------------------------------------------+ +| Old URI Option | Renamed URI Option | Migration Notes | ++====================+===============================+=============================================================+ +| ssl_pem_passphrase | tlsCertificateKeyFilePassword | - | ++--------------------+-------------------------------+-------------------------------------------------------------+ +| ssl_ca_certs | tlsCAFile | - | ++--------------------+-------------------------------+-------------------------------------------------------------+ +| ssl_crlfile | tlsCRLFile | - | ++--------------------+-------------------------------+-------------------------------------------------------------+ +| ssl_match_hostname | tlsAllowInvalidHostnames | ``ssl_match_hostname=True`` is equivalent to | +| | | ``tlsAllowInvalidHostnames=False`` and vice-versa. | ++--------------------+-------------------------------+-------------------------------------------------------------+ +| ssl_cert_reqs | tlsAllowInvalidCertificates | Instead of ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` | +| | | and ``ssl.CERT_REQUIRED``, ``tlsAllowInvalidCertificates`` | +| | | expects a boolean value - ``True`` is equivalent to | +| | | ``ssl.CERT_NONE``, while ``False`` is equivalent to | +| | | ``ssl.CERT_REQUIRED``. | ++--------------------+-------------------------------+-------------------------------------------------------------+ +| ssl_certfile | tlsCertificateKeyFile | Instead of using ``ssl_certfile`` and ``ssl_keyfile`` | +| | | to specify the certificate and private key files, | ++--------------------+ | ``tlsCertificateKeyFile`` expects a single file containing | +| ssl_keyfile | | both the client certificate and the private key. | ++--------------------+-------------------------------+-------------------------------------------------------------+ + MongoClient.fsync is removed ............................ diff --git a/pymongo/client_options.py b/pymongo/client_options.py index ea223145b2..7fdbb4cc8c 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -69,43 +69,52 @@ def _parse_read_concern(options): def _parse_ssl_options(options): """Parse ssl options.""" - use_ssl = options.get('ssl') - if use_ssl is not None: - validate_boolean('ssl', use_ssl) - - certfile = options.get('ssl_certfile') - keyfile = options.get('ssl_keyfile') - passphrase = options.get('ssl_pem_passphrase') - ca_certs = options.get('ssl_ca_certs') - cert_reqs = options.get('ssl_cert_reqs') - match_hostname = options.get('ssl_match_hostname', True) - crlfile = options.get('ssl_crlfile') - check_ocsp_endpoint = options.get('ssl_check_ocsp_endpoint', True) - - ssl_kwarg_keys = [k for k in options - if k.startswith('ssl_') and options[k]] - if use_ssl is False and ssl_kwarg_keys: - raise ConfigurationError("ssl has not been enabled but the " - "following ssl parameters have been set: " - "%s. Please set `ssl=True` or remove." - % ', '.join(ssl_kwarg_keys)) - - if ssl_kwarg_keys and use_ssl is None: - # ssl options imply ssl = True - use_ssl = True - - if use_ssl is True: + use_tls = options.get('tls') + if use_tls is not None: + validate_boolean('tls', use_tls) + + certfile = options.get('tlscertificatekeyfile') + passphrase = options.get('tlscertificatekeyfilepassword') + ca_certs = options.get('tlscafile') + crlfile = options.get('tlscrlfile') + allow_invalid_certificates = options.get('tlsallowinvalidcertificates', False) + allow_invalid_hostnames = options.get('tlsallowinvalidhostnames', False) + disable_ocsp_endpoint_check = options.get('tlsdisableocspendpointcheck', False) + + enabled_tls_opts = [] + for opt in ('tlscertificatekeyfile', 'tlscertificatekeyfilepassword', + 'tlscafile', 'tlscrlfile'): + # Any non-null value of these options implies tls=True. + if opt in options and options[opt]: + enabled_tls_opts.append(opt) + for opt in ('tlsallowinvalidcertificates', 'tlsallowinvalidhostnames', + 'tlsdisableocspendpointcheck'): + # A value of False for these options implies tls=True. + if opt in options and not options[opt]: + enabled_tls_opts.append(opt) + + if enabled_tls_opts: + if use_tls is None: + # Implicitly enable TLS when one of the tls* options is set. + use_tls = True + elif not use_tls: + # Error since tls is explicitly disabled but a tls option is set. + raise ConfigurationError("TLS has not been enabled but the " + "following tls parameters have been set: " + "%s. Please set `tls=True` or remove." + % ', '.join(enabled_tls_opts)) + + if use_tls: ctx = get_ssl_context( certfile, - keyfile, passphrase, ca_certs, - cert_reqs, + allow_invalid_certificates, crlfile, - match_hostname, - check_ocsp_endpoint) - return ctx, match_hostname - return None, match_hostname + allow_invalid_hostnames, + disable_ocsp_endpoint_check) + return ctx, allow_invalid_hostnames + return None, allow_invalid_hostnames def _parse_pool_options(options): @@ -127,14 +136,14 @@ def _parse_pool_options(options): compression_settings = CompressionSettings( options.get('compressors', []), options.get('zlibcompressionlevel', -1)) - ssl_context, ssl_match_hostname = _parse_ssl_options(options) + ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) load_balanced = options.get('loadbalanced') return PoolOptions(max_pool_size, min_pool_size, max_idle_time_seconds, connect_timeout, socket_timeout, wait_queue_timeout, - ssl_context, ssl_match_hostname, + ssl_context, tls_allow_invalid_hostnames, _EventListeners(event_listeners), appname, driver, diff --git a/pymongo/common.py b/pymongo/common.py index bf4ed08b13..79c50f77a3 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -35,8 +35,6 @@ from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode -from pymongo.ssl_support import (validate_cert_reqs, - validate_allow_invalid_certs) from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern ORDERED_TYPES = (SON, OrderedDict) @@ -585,18 +583,11 @@ def validate_tzinfo(dummy, value): # Dictionary where keys are the names of public URI options, and values -# are lists of aliases for that option. Aliases of option names are assumed -# to have been deprecated. +# are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP = { 'journal': ['j'], 'wtimeoutms': ['wtimeout'], 'tls': ['ssl'], - 'tlsallowinvalidcertificates': ['ssl_cert_reqs'], - 'tlsallowinvalidhostnames': ['ssl_match_hostname'], - 'tlscrlfile': ['ssl_crlfile'], - 'tlscafile': ['ssl_ca_certs'], - 'tlscertificatekeyfile': ['ssl_certfile'], - 'tlscertificatekeyfilepassword': ['ssl_pem_passphrase'], } # Dictionary where keys are the names of URI options, and values @@ -626,18 +617,13 @@ def validate_tzinfo(dummy, value): 'loadbalanced': validate_boolean_or_string, 'serverselectiontimeoutms': validate_timeout_or_zero, 'sockettimeoutms': validate_timeout_or_none_or_zero, - 'ssl_keyfile': validate_readable, 'tls': validate_boolean_or_string, - 'tlsallowinvalidcertificates': validate_allow_invalid_certs, - 'ssl_cert_reqs': validate_cert_reqs, - # Normalized to ssl_match_hostname which is the logical inverse of tlsallowinvalidhostnames - 'tlsallowinvalidhostnames': lambda *x: not validate_boolean_or_string(*x), - 'ssl_match_hostname': validate_boolean_or_string, + 'tlsallowinvalidcertificates': validate_boolean_or_string, + 'tlsallowinvalidhostnames': validate_boolean_or_string, 'tlscafile': validate_readable, 'tlscertificatekeyfile': validate_readable, 'tlscertificatekeyfilepassword': validate_string_or_none, - # Normalized to ssl_check_ocsp_endpoint which is the logical inverse of tlsdisableocspendpointcheck - 'tlsdisableocspendpointcheck': lambda *x: not validate_boolean_or_string(*x), + 'tlsdisableocspendpointcheck': validate_boolean_or_string, 'tlsinsecure': validate_boolean_or_string, 'w': validate_non_negative_int_or_basestring, 'wtimeoutms': validate_non_negative_integer, @@ -682,14 +668,7 @@ def validate_tzinfo(dummy, value): INTERNAL_URI_OPTION_NAME_MAP = { 'j': 'journal', 'wtimeout': 'wtimeoutms', - 'tls': 'ssl', - 'tlsallowinvalidcertificates': 'ssl_cert_reqs', - 'tlsallowinvalidhostnames': 'ssl_match_hostname', - 'tlscrlfile': 'ssl_crlfile', - 'tlscafile': 'ssl_ca_certs', - 'tlscertificatekeyfile': 'ssl_certfile', - 'tlscertificatekeyfilepassword': 'ssl_pem_passphrase', - 'tlsdisableocspendpointcheck': 'ssl_check_ocsp_endpoint', + 'ssl': 'tls', } # Map from deprecated URI option names to a tuple indicating the method of @@ -704,19 +683,6 @@ def validate_tzinfo(dummy, value): # option and/or recommend remedial action. 'j': ('renamed', 'journal'), 'wtimeout': ('renamed', 'wTimeoutMS'), - 'ssl_cert_reqs': ('renamed', 'tlsAllowInvalidCertificates'), - 'ssl_match_hostname': ('renamed', 'tlsAllowInvalidHostnames'), - 'ssl_crlfile': ('renamed', 'tlsCRLFile'), - 'ssl_ca_certs': ('renamed', 'tlsCAFile'), - 'ssl_certfile': ('removed', ( - 'Instead of using ssl_certfile to specify the certificate file, ' - 'use tlsCertificateKeyFile to pass a single file containing both ' - 'the client certificate and the private key')), - 'ssl_keyfile': ('removed', ( - 'Instead of using ssl_keyfile to specify the private keyfile, ' - 'use tlsCertificateKeyFile to pass a single file containing both ' - 'the client certificate and the private key')), - 'ssl_pem_passphrase': ('renamed', 'tlsCertificateKeyFilePassword'), } # Augment the option validator map with pymongo-specific option information. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index c12f0b9f45..cf7b4d3a63 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -456,7 +456,9 @@ def __init__( python 2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults to ``None``. - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables certificate revocation status checking via the OCSP responder - specified on the server certificate. Defaults to ``False``. + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. - `ssl`: (boolean) Alias for ``tls``. | **Read Concern options:** diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index f7f8975041..1a983e0af8 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -88,7 +88,7 @@ def _get_issuer_cert(cert, chain, trusted_ca_certs): # Depending on the server's TLS library, the peer's cert chain may not # include the self signed root CA. In this case we check the user - # provided tlsCAFile (ssl_ca_certs) for the issuer. + # provided tlsCAFile for the issuer. # Remove once we use the verified peer cert chain in PYTHON-2147. if trusted_ca_certs: for candidate in trusted_ca_certs: diff --git a/pymongo/pool.py b/pymongo/pool.py index 20812a0cad..d21a77809a 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -261,7 +261,7 @@ class PoolOptions(object): '__max_idle_time_seconds', '__connect_timeout', '__socket_timeout', '__wait_queue_timeout', - '__ssl_context', '__ssl_match_hostname', + '__ssl_context', '__tls_allow_invalid_hostnames', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', '__pause_enabled', '__server_api', '__load_balanced') @@ -271,7 +271,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, max_idle_time_seconds=MAX_IDLE_TIME_SEC, connect_timeout=None, socket_timeout=None, wait_queue_timeout=WAIT_QUEUE_TIMEOUT, ssl_context=None, - ssl_match_hostname=True, + tls_allow_invalid_hostnames=False, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, pause_enabled=True, server_api=None, load_balanced=None): @@ -282,7 +282,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__socket_timeout = socket_timeout self.__wait_queue_timeout = wait_queue_timeout self.__ssl_context = ssl_context - self.__ssl_match_hostname = ssl_match_hostname + self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames self.__event_listeners = event_listeners self.__appname = appname self.__driver = driver @@ -400,10 +400,10 @@ def ssl_context(self): return self.__ssl_context @property - def ssl_match_hostname(self): + def tls_allow_invalid_hostnames(self): """Call ssl.match_hostname if cert_reqs is not ssl.CERT_NONE. """ - return self.__ssl_match_hostname + return self.__tls_allow_invalid_hostnames @property def event_listeners(self): @@ -1047,7 +1047,7 @@ def _configured_socket(address, options): _raise_connection_failure(address, exc, "SSL handshake failed: ") if (ssl_context.verify_mode and not getattr(ssl_context, "check_hostname", False) and - options.ssl_match_hostname): + not options.tls_allow_invalid_hostnames): try: ssl.match_hostname(sock.getpeercert(), hostname=host) except _CertificateError: diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index ca6ee8575d..57361f2fa8 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -39,51 +39,26 @@ HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) SSLError = _ssl.SSLError - def validate_cert_reqs(option, value): - """Validate the cert reqs are valid. It must be None or one of the - three values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or - ``ssl.CERT_REQUIRED``. - """ - if value is None: - return value - if isinstance(value, str) and hasattr(_stdlibssl, value): - value = getattr(_stdlibssl, value) - - if value in (CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED): - return value - raise ValueError("The value of %s must be one of: " - "`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or " - "`ssl.CERT_REQUIRED`" % (option,)) - - def validate_allow_invalid_certs(option, value): - """Validate the option to allow invalid certificates is valid.""" - # Avoid circular import. - from pymongo.common import validate_boolean_or_string - boolean_cert_reqs = validate_boolean_or_string(option, value) - if boolean_cert_reqs: - return CERT_NONE - return CERT_REQUIRED def get_ssl_context(*args): """Create and return an SSLContext object.""" (certfile, - keyfile, passphrase, ca_certs, - cert_reqs, + allow_invalid_certificates, crlfile, - match_hostname, - check_ocsp_endpoint) = args - verify_mode = CERT_REQUIRED if cert_reqs is None else cert_reqs + allow_invalid_hostnames, + disable_ocsp_endpoint_check) = args + verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) # SSLContext.check_hostname was added in CPython 3.4. if hasattr(ctx, "check_hostname"): if _ssl.CHECK_HOSTNAME_SAFE and verify_mode != CERT_NONE: - ctx.check_hostname = match_hostname + ctx.check_hostname = not allow_invalid_hostnames else: ctx.check_hostname = False if hasattr(ctx, "check_ocsp_endpoint"): - ctx.check_ocsp_endpoint = check_ocsp_endpoint + ctx.check_ocsp_endpoint = not disable_ocsp_endpoint_check if hasattr(ctx, "options"): # Explicitly disable SSLv2, SSLv3 and TLS compression. Note that # up to date versions of MongoDB 2.4 and above already disable @@ -95,20 +70,20 @@ def get_ssl_context(*args): ctx.options |= _ssl.OP_NO_RENEGOTIATION if certfile is not None: try: - ctx.load_cert_chain(certfile, keyfile, passphrase) + ctx.load_cert_chain(certfile, None, passphrase) except _ssl.SSLError as exc: raise ConfigurationError( "Private key doesn't match certificate: %s" % (exc,)) if crlfile is not None: if _ssl.IS_PYOPENSSL: raise ConfigurationError( - "ssl_crlfile cannot be used with PyOpenSSL") + "tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. ctx.verify_flags = getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) - elif cert_reqs != CERT_NONE: + elif verify_mode != CERT_NONE: ctx.load_default_certs() ctx.verify_mode = verify_mode return ctx @@ -117,15 +92,6 @@ class SSLError(Exception): pass HAS_SNI = False IPADDR_SAFE = False - def validate_cert_reqs(option, dummy): - """No ssl module, raise ConfigurationError.""" - raise ConfigurationError("The value of %s is set but can't be " - "validated. The ssl module is not available" - % (option,)) - - def validate_allow_invalid_certs(option, dummy): - """No ssl module, raise ConfigurationError.""" - return validate_cert_reqs(option, dummy) def get_ssl_context(*dummy): """No ssl module, raise ConfigurationError.""" diff --git a/pymongo/topology.py b/pymongo/topology.py index 340e504d1d..15247771cc 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -721,7 +721,7 @@ def _create_pool_for_monitor(self, address): connect_timeout=options.connect_timeout, socket_timeout=options.connect_timeout, ssl_context=options.ssl_context, - ssl_match_hostname=options.ssl_match_hostname, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, event_listeners=options.event_listeners, appname=options.appname, driver=options.driver, diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 95327eed85..60eb9cba80 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -119,12 +119,7 @@ def parse_host(entity, default_port=DEFAULT_PORT): _IMPLICIT_TLSINSECURE_OPTS = { "tlsallowinvalidcertificates", "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck",} - -# Options that cannot be specified when tlsInsecure is also specified. -_TLSINSECURE_EXCLUDE_OPTS = ( - {k for k in _IMPLICIT_TLSINSECURE_OPTS} | - {INTERNAL_URI_OPTION_NAME_MAP[k] for k in _IMPLICIT_TLSINSECURE_OPTS}) + "tlsdisableocspendpointcheck"} def _parse_options(opts, delim): @@ -156,22 +151,18 @@ def _handle_security_options(options): - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ + # Implicitly defined options must not be explicitly specified. tlsinsecure = options.get('tlsinsecure') if tlsinsecure is not None: - for opt in _TLSINSECURE_EXCLUDE_OPTS: + for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: err_msg = ("URI options %s and %s cannot be specified " "simultaneously.") raise InvalidURI(err_msg % ( options.cased_key('tlsinsecure'), options.cased_key(opt))) - # Convenience function to retrieve option values based on public or private names. - def _getopt(opt): - return (options.get(opt) or - options.get(INTERNAL_URI_OPTION_NAME_MAP[opt])) - # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. - tlsallowinvalidcerts = _getopt('tlsallowinvalidcertificates') + tlsallowinvalidcerts = options.get('tlsallowinvalidcertificates') if tlsallowinvalidcerts is not None: if 'tlsdisableocspendpointcheck' in options: err_msg = ("URI options %s and %s cannot be specified " @@ -183,7 +174,7 @@ def _getopt(opt): options['tlsdisableocspendpointcheck'] = True # Handle co-occurence of CRL and OCSP-related options. - tlscrlfile = _getopt('tlscrlfile') + tlscrlfile = options.get('tlscrlfile') if tlscrlfile is not None: for opt in ('tlsinsecure', 'tlsallowinvalidcertificates', 'tlsdisableocspendpointcheck'): @@ -201,7 +192,7 @@ def truth_value(val): return val if truth_value(options.get('ssl')) != truth_value(options.get('tls')): err_msg = ("Can not specify conflicting values for URI options %s " - "and %s.") + "and %s.") raise InvalidURI(err_msg % ( options.cased_key('ssl'), options.cased_key('tls'))) @@ -246,18 +237,18 @@ def _handle_option_deprecations(options): def _normalize_options(options): """Normalizes option names in the options dictionary by converting them to - their internally-used names. Also handles use of the tlsInsecure option. + their internally-used names. :Parameters: - `options`: Instance of _CaseInsensitiveDictionary containing MongoDB URI options. """ + # Expand the tlsInsecure option. tlsinsecure = options.get('tlsinsecure') if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: - intname = INTERNAL_URI_OPTION_NAME_MAP[opt] - # Internal options are logical inverse of public options. - options[intname] = not tlsinsecure + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure for optname in list(options): intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) @@ -316,15 +307,15 @@ def split_options(opts, validate=True, warn=False, normalize=True): options = _handle_option_deprecations(options) + if normalize: + options = _normalize_options(options) + if validate: options = validate_options(options, warn) if options.get('authsource') == '': raise InvalidURI( "the authSource database cannot be an empty string") - if normalize: - options = _normalize_options(options) - return options @@ -521,8 +512,8 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val - if "ssl" not in options: - options["ssl"] = True if validate else 'true' + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else 'true' else: nodes = split_hosts(hosts, default_port=default_port) diff --git a/test/__init__.py b/test/__init__.py index 11dadd1936..6d99f6ec59 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -241,7 +241,7 @@ def __init__(self): self.is_rs = False self.has_ipv6 = False self.tls = False - self.ssl_certfile = False + self.tlsCertificateKeyFile = False self.server_is_resolvable = is_server_resolvable() self.default_client_options = {} self.sessions_enabled = False @@ -323,7 +323,7 @@ def _init_client(self): if self.client: self.tls = True self.default_client_options.update(TLS_OPTIONS) - self.ssl_certfile = True + self.tlsCertificateKeyFile = True if self.client: self.connected = True @@ -793,10 +793,10 @@ def require_no_tls(self, func): "Must be able to connect without TLS", func=func) - def require_ssl_certfile(self, func): - """Run a test only if the client can connect with ssl_certfile.""" - return self._require(lambda: self.ssl_certfile, - "Must be able to connect with ssl_certfile", + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require(lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", func=func) def require_server_resolvable(self, func): diff --git a/test/test_client.py b/test/test_client.py index f4f67b0098..6abbd9363b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -114,10 +114,9 @@ def test_keyword_arg_defaults(self): replicaSet=None, read_preference=ReadPreference.PRIMARY, ssl=False, - ssl_keyfile=None, - ssl_certfile=None, - ssl_cert_reqs=0, # ssl.CERT_NONE - ssl_ca_certs=None, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, # ssl.CERT_NONE + tlsCAFile=None, connect=False, serverSelectionTimeoutMS=12000) @@ -394,7 +393,7 @@ def test_uri_option_precedence(self): clopts = c._MongoClient__options opts = clopts._options - self.assertEqual(opts['ssl'], False) + self.assertEqual(opts['tls'], False) self.assertEqual(clopts.replica_set_name, "newname") self.assertEqual( clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) @@ -445,7 +444,7 @@ def test_uri_security_options(self): # Matching SSL and TLS options should not cause errors. c = MongoClient('mongodb://localhost/?ssl=false', tls=False, connect=False) - self.assertEqual(c._MongoClient__options._options['ssl'], False) + self.assertEqual(c._MongoClient__options._options['tls'], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): @@ -455,12 +454,13 @@ def test_uri_security_options(self): # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, ssl_cert_reqs=True) + connect=False, tlsAllowInvalidCertificates=False) # Conflicting kwargs should raise InvalidURI with self.assertRaises(InvalidURI): MongoClient(ssl=True, tls=False) + class TestClient(IntegrationTest): def test_max_idle_time_reaper_default(self): diff --git a/test/test_dns.py b/test/test_dns.py index 2cca4d4487..fd6431c586 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -58,7 +58,9 @@ def run_test(self): uri = test_case['uri'] seeds = test_case['seeds'] hosts = test_case['hosts'] - options = test_case.get('options') + options = test_case.get('options', {}) + if 'ssl' in options: + options['tls'] = options.pop('ssl') parsed_options = test_case.get('parsed_options') # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. needs_tls = not (options and (options.get('ssl') == False or diff --git a/test/test_pooling.py b/test/test_pooling.py index 5b711a16ae..d5f4f09a9a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -176,7 +176,7 @@ def create_pool( # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options kwargs['ssl_context'] = pool_options.ssl_context - kwargs['ssl_match_hostname'] = pool_options.ssl_match_hostname + kwargs['tls_allow_invalid_hostnames'] = pool_options.tls_allow_invalid_hostnames kwargs['server_api'] = pool_options.server_api pool = Pool(pair, PoolOptions(*args, **kwargs)) pool.ready() diff --git a/test/test_ssl.py b/test/test_ssl.py index e226a3cbbb..76f4c69cfa 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -26,7 +26,7 @@ from pymongo.errors import (ConfigurationError, ConnectionFailure, OperationFailure) -from pymongo.ssl_support import HAVE_SSL, get_ssl_context, validate_cert_reqs, _ssl +from pymongo.ssl_support import HAVE_SSL, get_ssl_context, _ssl from pymongo.write_concern import WriteConcern from test import (IntegrationTest, client_context, @@ -91,7 +91,7 @@ def test_no_ssl_module(self): # Implied self.assertRaises(ConfigurationError, - MongoClient, ssl_certfile=CLIENT_PEM) + MongoClient, tlsCertificateKeyFile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") @ignore_deprecations @@ -100,63 +100,41 @@ def test_config_ssl(self): self.assertRaises(ValueError, MongoClient, ssl='foo') self.assertRaises(ConfigurationError, MongoClient, - ssl=False, - ssl_certfile=CLIENT_PEM) + tls=False, + tlsCertificateKeyFile=CLIENT_PEM) self.assertRaises(TypeError, MongoClient, ssl=0) self.assertRaises(TypeError, MongoClient, ssl=5.5) self.assertRaises(TypeError, MongoClient, ssl=[]) - self.assertRaises(IOError, MongoClient, ssl_certfile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, ssl_certfile=True) - self.assertRaises(TypeError, MongoClient, ssl_certfile=[]) - self.assertRaises(IOError, MongoClient, ssl_keyfile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, ssl_keyfile=True) - self.assertRaises(TypeError, MongoClient, ssl_keyfile=[]) + self.assertRaises(IOError, MongoClient, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=[]) # Test invalid combinations self.assertRaises(ConfigurationError, MongoClient, - ssl=False, - ssl_keyfile=CLIENT_PEM) + tls=False, + tlsCertificateKeyFile=CLIENT_PEM) self.assertRaises(ConfigurationError, MongoClient, - ssl=False, - ssl_certfile=CLIENT_PEM) + tls=False, + tlsCAFile=CA_PEM) self.assertRaises(ConfigurationError, MongoClient, - ssl=False, - ssl_keyfile=CLIENT_PEM, - ssl_certfile=CLIENT_PEM) - - self.assertRaises( - ValueError, validate_cert_reqs, 'ssl_cert_reqs', 3) - self.assertRaises( - ValueError, validate_cert_reqs, 'ssl_cert_reqs', -1) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', None), None) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', ssl.CERT_NONE), - ssl.CERT_NONE) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', ssl.CERT_OPTIONAL), - ssl.CERT_OPTIONAL) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', ssl.CERT_REQUIRED), - ssl.CERT_REQUIRED) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 0), ssl.CERT_NONE) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 1), ssl.CERT_OPTIONAL) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 2), ssl.CERT_REQUIRED) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 'CERT_NONE'), ssl.CERT_NONE) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 'CERT_OPTIONAL'), - ssl.CERT_OPTIONAL) - self.assertEqual( - validate_cert_reqs('ssl_cert_reqs', 'CERT_REQUIRED'), - ssl.CERT_REQUIRED) + tls=False, + tlsCRLFile=CRL_PEM) + self.assertRaises(ConfigurationError, + MongoClient, + tls=False, + tlsAllowInvalidCertificates=False) + self.assertRaises(ConfigurationError, + MongoClient, + tls=False, + tlsAllowInvalidHostnames=False) + self.assertRaises(ConfigurationError, + MongoClient, + tls=False, + tlsDisableOCSPEndpointCheck=False) @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") def test_use_pyopenssl_when_available(self): @@ -197,9 +175,9 @@ def test_simple_ssl(self): # no --sslPEMKeyFile or with --sslWeakCertificateValidation self.assertClientWorks(self.client) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @ignore_deprecations - def test_ssl_pem_passphrase(self): + def test_tlsCertificateKeyFilePassword(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem @@ -210,26 +188,26 @@ def test_ssl_pem_passphrase(self): MongoClient, 'localhost', ssl=True, - ssl_certfile=CLIENT_ENCRYPTED_PEM, - ssl_pem_passphrase="qwerty", - ssl_ca_certs=CA_PEM, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, serverSelectionTimeoutMS=100) else: connected(MongoClient('localhost', ssl=True, - ssl_certfile=CLIENT_ENCRYPTED_PEM, - ssl_pem_passphrase="qwerty", - ssl_ca_certs=CA_PEM, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, serverSelectionTimeoutMS=5000, **self.credentials)) uri_fmt = ("mongodb://localhost/?ssl=true" - "&ssl_certfile=%s&ssl_pem_passphrase=qwerty" - "&ssl_ca_certs=%s&serverSelectionTimeoutMS=5000") + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000") connected(MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials)) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth @ignore_deprecations def test_cert_ssl_implicitly_set(self): @@ -239,21 +217,21 @@ def test_cert_ssl_implicitly_set(self): # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - # test that setting ssl_certfile causes ssl to be set to True + # test that setting tlsCertificateKeyFile causes ssl to be set to True client = MongoClient(client_context.host, client_context.port, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) response = client.admin.command('ismaster') if 'setName' in response: client = MongoClient(client_context.pair, replicaSet=response['setName'], w=len(response['hosts']), - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) self.assertClientWorks(client) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth @ignore_deprecations def test_cert_ssl_validation(self): @@ -264,9 +242,9 @@ def test_cert_ssl_validation(self): # client = MongoClient('localhost', ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM) response = client.admin.command('ismaster') if 'setName' in response: if response['primary'].split(":")[0] != 'localhost': @@ -277,21 +255,21 @@ def test_cert_ssl_validation(self): replicaSet=response['setName'], w=len(response['hosts']), ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM) self.assertClientWorks(client) if HAVE_IPADDRESS: client = MongoClient('127.0.0.1', ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM) + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM) self.assertClientWorks(client) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth @ignore_deprecations def test_cert_ssl_uri_support(self): @@ -300,43 +278,12 @@ def test_cert_ssl_uri_support(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - uri_fmt = ("mongodb://localhost/?ssl=true&ssl_certfile=%s&ssl_cert_reqs" - "=%s&ssl_ca_certs=%s&ssl_match_hostname=true") - client = MongoClient(uri_fmt % (CLIENT_PEM, 'CERT_REQUIRED', CA_PEM)) - self.assertClientWorks(client) - - @client_context.require_ssl_certfile - @client_context.require_no_auth - @ignore_deprecations - def test_cert_ssl_validation_optional(self): - # Expects the server to be running with server.pem and ca.pem - # - # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem - # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - # - client = MongoClient('localhost', - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_OPTIONAL, - ssl_ca_certs=CA_PEM) - - response = client.admin.command('ismaster') - if 'setName' in response: - if response['primary'].split(":")[0] != 'localhost': - raise SkipTest("No hosts in the replicaset for 'localhost'. " - "Cannot validate hostname in the certificate") - - client = MongoClient('localhost', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_OPTIONAL, - ssl_ca_certs=CA_PEM) - + uri_fmt = ("mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false") + client = MongoClient(uri_fmt % (CLIENT_PEM, 'true', CA_PEM)) self.assertClientWorks(client) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable @ignore_deprecations def test_cert_ssl_validation_hostname_matching(self): @@ -345,16 +292,16 @@ def test_cert_ssl_validation_hostname_matching(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, False, True) + None, None, None, True, None, True, False) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, True, True) + None, None, None, True, None, False, False) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, False, True) + None, None, None, False, None, True, False) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context( - None, None, None, None, ssl.CERT_REQUIRED, None, True, True) + None, None, None, False, None, False, False) if _PY37PLUS or _HAVE_PYOPENSSL: self.assertTrue(ctx.check_hostname) else: @@ -365,18 +312,18 @@ def test_cert_ssl_validation_hostname_matching(self): with self.assertRaises(ConnectionFailure): connected(MongoClient('server', ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, serverSelectionTimeoutMS=500, **self.credentials)) connected(MongoClient('server', ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, - ssl_match_hostname=False, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, serverSelectionTimeoutMS=500, **self.credentials)) @@ -385,61 +332,61 @@ def test_cert_ssl_validation_hostname_matching(self): connected(MongoClient('server', replicaSet=response['setName'], ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, serverSelectionTimeoutMS=500, **self.credentials)) connected(MongoClient('server', replicaSet=response['setName'], ssl=True, - ssl_certfile=CLIENT_PEM, - ssl_cert_reqs=ssl.CERT_REQUIRED, - ssl_ca_certs=CA_PEM, - ssl_match_hostname=False, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, serverSelectionTimeoutMS=500, **self.credentials)) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @ignore_deprecations - def test_ssl_crlfile_support(self): + def test_tlsCRLFile_support(self): if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF') or _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, 'localhost', ssl=True, - ssl_ca_certs=CA_PEM, - ssl_crlfile=CRL_PEM, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, serverSelectionTimeoutMS=100) else: connected(MongoClient('localhost', ssl=True, - ssl_ca_certs=CA_PEM, + tlsCAFile=CA_PEM, serverSelectionTimeoutMS=100, **self.credentials)) with self.assertRaises(ConnectionFailure): connected(MongoClient('localhost', ssl=True, - ssl_ca_certs=CA_PEM, - ssl_crlfile=CRL_PEM, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, serverSelectionTimeoutMS=100, **self.credentials)) uri_fmt = ("mongodb://localhost/?ssl=true&" - "ssl_ca_certs=%s&serverSelectionTimeoutMS=100") + "tlsCAFile=%s&serverSelectionTimeoutMS=100") connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) - uri_fmt = ("mongodb://localhost/?ssl=true&ssl_crlfile=%s" - "&ssl_ca_certs=%s&serverSelectionTimeoutMS=100") + uri_fmt = ("mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=100") with self.assertRaises(ConnectionFailure): connected(MongoClient(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials)) - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable @ignore_deprecations def test_validation_with_system_ca_certs(self): @@ -460,7 +407,7 @@ def test_validation_with_system_ca_certs(self): # Server cert is verified. Disable hostname matching. connected(MongoClient('server', ssl=True, - ssl_match_hostname=False, + tlsAllowInvalidHostnames=True, serverSelectionTimeoutMS=100, **self.credentials)) @@ -478,7 +425,7 @@ def test_validation_with_system_ca_certs(self): def test_system_certs_config_error(self): ctx = get_ssl_context( - None, None, None, None, ssl.CERT_NONE, None, False, True) + None, None, None, ssl.CERT_NONE, None, True, False) if ((sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr(ctx, "load_default_certs")): @@ -542,7 +489,7 @@ def test_wincertstore(self): self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) @client_context.require_auth - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port @@ -556,8 +503,8 @@ def test_mongodb_x509_auth(self): noauth = MongoClient( client_context.pair, ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) with self.assertRaises(OperationFailure): noauth.pymongo_test.test.find_one() @@ -567,8 +514,8 @@ def test_mongodb_x509_auth(self): client_context.pair, authMechanism='MONGODB-X509', ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, event_listeners=[listener]) if client_context.version.at_least(3, 3, 12): @@ -590,16 +537,16 @@ def test_mongodb_x509_auth(self): quote_plus(MONGODB_X509_USERNAME), host, port)) client = MongoClient(uri, ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) # No error client.pymongo_test.test.find_one() uri = 'mongodb://%s:%d/?authMechanism=MONGODB-X509' % (host, port) client = MongoClient(uri, ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) if client_context.version.at_least(3, 3, 12): # No error client.pymongo_test.test.find_one() @@ -614,7 +561,7 @@ def test_mongodb_x509_auth(self): quote_plus("not the username"), host, port)) bad_client = MongoClient( - uri, ssl=True, ssl_cert_reqs="CERT_NONE", ssl_certfile=CLIENT_PEM) + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() @@ -624,8 +571,8 @@ def test_mongodb_x509_auth(self): username="not the username", authMechanism='MONGODB-X509', ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CLIENT_PEM) + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() @@ -637,15 +584,15 @@ def test_mongodb_x509_auth(self): try: connected(MongoClient(uri, ssl=True, - ssl_cert_reqs=ssl.CERT_NONE, - ssl_certfile=CA_PEM, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, serverSelectionTimeoutMS=100)) except (ConnectionFailure, ConfigurationError): pass else: self.fail("Invalid certificate accepted.") - @client_context.require_ssl_certfile + @client_context.require_tlsCertificateKeyFile @ignore_deprecations def test_connect_with_ca_bundle(self): def remove(path): diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 59127249ce..bd862642c9 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -92,7 +92,7 @@ def test_split_options(self): self.assertRaises(ConfigurationError, split_options, 'foo=bar;foo') self.assertTrue(split_options('ssl=true')) self.assertTrue(split_options('connect=true')) - self.assertTrue(split_options('ssl_match_hostname=true')) + self.assertTrue(split_options('tlsAllowInvalidHostnames=false')) # Test Invalid URI options that should throw warnings. with warnings.catch_warnings(): @@ -116,7 +116,7 @@ def test_split_options(self): self.assertRaises(Warning, split_options, 'connect=foo', warn=True) self.assertRaises(Warning, split_options, - 'ssl_match_hostname=foo', warn=True) + 'tlsAllowInvalidHostnames=foo', warn=True) self.assertRaises(Warning, split_options, 'connectTimeoutMS=inf', warn=True) self.assertRaises(Warning, split_options, @@ -145,7 +145,7 @@ def test_split_options(self): 'connectTimeoutMS=-1e100000') self.assertRaises(ValueError, split_options, 'ssl=foo') self.assertRaises(ValueError, split_options, 'connect=foo') - self.assertRaises(ValueError, split_options, 'ssl_match_hostname=foo') + self.assertRaises(ValueError, split_options, 'tlsAllowInvalidHostnames=foo') self.assertRaises(ValueError, split_options, 'connectTimeoutMS=inf') self.assertRaises(ValueError, split_options, 'connectTimeoutMS=-inf') self.assertRaises(ValueError, split_options, 'wtimeoutms=foo') @@ -444,58 +444,40 @@ def test_parse_ssl_paths(self): {'collection': None, 'database': None, 'nodelist': [('/MongoDB.sock', None)], - 'options': {'ssl_certfile': '/a/b'}, + 'options': {'tlsCertificateKeyFile': '/a/b'}, 'password': 'foo/bar', 'username': 'jesse', 'fqdn': None}, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?ssl_certfile=/a/b', + 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b', validate=False)) self.assertEqual( {'collection': None, 'database': None, 'nodelist': [('/MongoDB.sock', None)], - 'options': {'ssl_certfile': 'a/b'}, + 'options': {'tlsCertificateKeyFile': 'a/b'}, 'password': 'foo/bar', 'username': 'jesse', 'fqdn': None}, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?ssl_certfile=a/b', + 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b', validate=False)) def test_tlsinsecure_simple(self): # check that tlsInsecure is expanded correctly. + self.maxDiff = None uri = "mongodb://example.com/?tlsInsecure=true" res = { - "ssl_match_hostname": False, "ssl_cert_reqs": CERT_NONE, - "tlsinsecure": True, 'ssl_check_ocsp_endpoint': False} - self.assertEqual(res, parse_uri(uri)["options"]) - - def test_tlsinsecure_legacy_conflict(self): - # must not allow use of tlsinsecure alongside legacy TLS options. - # same check for modern TLS options is performed in the spec-tests. - uri = "mongodb://srv.com/?tlsInsecure=true&ssl_match_hostname=true" - with self.assertRaises(InvalidURI): - parse_uri(uri, validate=False, warn=False, normalize=False) - - def test_tlsDisableOCSPEndpointCheck(self): - # check that tlsDisableOCSPEndpointCheck is handled correctly. - uri = "mongodb://example.com/?tlsDisableOCSPEndpointCheck=true" - res = {'ssl_check_ocsp_endpoint': False} - self.assertEqual(res, parse_uri(uri)["options"]) - - uri = "mongodb://example.com/?tlsDisableOCSPEndpointCheck=false" - res = {'ssl_check_ocsp_endpoint': True} + "tlsAllowInvalidHostnames": True, + "tlsAllowInvalidCertificates": True, + "tlsInsecure": True, 'tlsDisableOCSPEndpointCheck': True} self.assertEqual(res, parse_uri(uri)["options"]) def test_normalize_options(self): # check that options are converted to their internal names correctly. - uri = ("mongodb://example.com/?tls=true&appname=myapp&maxPoolSize=10&" - "fsync=true&wtimeout=10") - res = { - "ssl": True, "appname": "myapp", "maxpoolsize": 10, - "fsync": True, "wtimeoutms": 10} + uri = ("mongodb://example.com/?ssl=true&appname=myapp&wtimeout=10") + res = {"tls": True, "appname": "myapp", "wtimeoutms": 10} self.assertEqual(res, parse_uri(uri)["options"]) def test_unquote_after_parsing(self): From 9055bb09e63d8598f0bdff1d11a7e734d8438504 Mon Sep 17 00:00:00 2001 From: Prashant Mital <5883388+prashantmital@users.noreply.github.com> Date: Fri, 20 Aug 2021 10:17:06 -0700 Subject: [PATCH 0445/2111] PYTHON-2702 Remove deprecated URI options (#710) --- doc/migrate-to-pymongo4.rst | 51 ++++++++++++++++++++----------------- pymongo/common.py | 8 ++---- test/test_common.py | 2 +- test/test_uri_parser.py | 4 +-- 4 files changed, 33 insertions(+), 32 deletions(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index a7dd6570de..658d537247 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -89,29 +89,34 @@ The old option names and their renamed equivalents are summarized in the table below. Some renamed options have different semantics from the option being replaced as noted in the 'Migration Notes' column. -+--------------------+-------------------------------+-------------------------------------------------------------+ -| Old URI Option | Renamed URI Option | Migration Notes | -+====================+===============================+=============================================================+ -| ssl_pem_passphrase | tlsCertificateKeyFilePassword | - | -+--------------------+-------------------------------+-------------------------------------------------------------+ -| ssl_ca_certs | tlsCAFile | - | -+--------------------+-------------------------------+-------------------------------------------------------------+ -| ssl_crlfile | tlsCRLFile | - | -+--------------------+-------------------------------+-------------------------------------------------------------+ -| ssl_match_hostname | tlsAllowInvalidHostnames | ``ssl_match_hostname=True`` is equivalent to | -| | | ``tlsAllowInvalidHostnames=False`` and vice-versa. | -+--------------------+-------------------------------+-------------------------------------------------------------+ -| ssl_cert_reqs | tlsAllowInvalidCertificates | Instead of ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` | -| | | and ``ssl.CERT_REQUIRED``, ``tlsAllowInvalidCertificates`` | -| | | expects a boolean value - ``True`` is equivalent to | -| | | ``ssl.CERT_NONE``, while ``False`` is equivalent to | -| | | ``ssl.CERT_REQUIRED``. | -+--------------------+-------------------------------+-------------------------------------------------------------+ -| ssl_certfile | tlsCertificateKeyFile | Instead of using ``ssl_certfile`` and ``ssl_keyfile`` | -| | | to specify the certificate and private key files, | -+--------------------+ | ``tlsCertificateKeyFile`` expects a single file containing | -| ssl_keyfile | | both the client certificate and the private key. | -+--------------------+-------------------------------+-------------------------------------------------------------+ ++--------------------+-------------------------------+--------------------------------------------------------+ +| Old URI Option | Renamed URI Option | Migration Notes | ++====================+===============================+========================================================+ +| ssl_pem_passphrase | tlsCertificateKeyFilePassword | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_ca_certs | tlsCAFile | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_crlfile | tlsCRLFile | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_match_hostname | tlsAllowInvalidHostnames | ``ssl_match_hostname=True`` is equivalent to | +| | | ``tlsAllowInvalidHostnames=False`` and vice-versa. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_cert_reqs | tlsAllowInvalidCertificates | Instead of ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` | +| | | and ``ssl.CERT_REQUIRED``, the new option expects | +| | | a boolean value - ``True`` is equivalent to | +| | | ``ssl.CERT_NONE``, while ``False`` is equivalent to | +| | | ``ssl.CERT_REQUIRED``. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| ssl_certfile | tlsCertificateKeyFile | Instead of using ``ssl_certfile`` and ``ssl_keyfile`` | +| | | to specify the certificate and private key files | ++--------------------+ | respectively, use ``tlsCertificateKeyFile`` to pass | +| ssl_keyfile | | a single file containing both the client certificate | +| | | and the private key. | ++--------------------+-------------------------------+--------------------------------------------------------+ +| j | journal | - | ++--------------------+-------------------------------+--------------------------------------------------------+ +| wtimeout | wTimeoutMS | - | ++--------------------+-------------------------------+--------------------------------------------------------+ MongoClient.fsync is removed ............................ diff --git a/pymongo/common.py b/pymongo/common.py index 79c50f77a3..b465e3816d 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -585,8 +585,6 @@ def validate_tzinfo(dummy, value): # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP = { - 'journal': ['j'], - 'wtimeoutms': ['wtimeout'], 'tls': ['ssl'], } @@ -666,8 +664,6 @@ def validate_tzinfo(dummy, value): # variant need not be included here. Options whose public and internal # names are the same need not be included here. INTERNAL_URI_OPTION_NAME_MAP = { - 'j': 'journal', - 'wtimeout': 'wtimeoutms', 'ssl': 'tls', } @@ -681,8 +677,8 @@ def validate_tzinfo(dummy, value): # preserved for renamed options as they are part of user warnings. # - 'removed': may suggest the rationale for deprecating the # option and/or recommend remedial action. - 'j': ('renamed', 'journal'), - 'wtimeout': ('renamed', 'wTimeoutMS'), + # For example: + # 'wtimeout': ('renamed', 'wTimeoutMS'), } # Augment the option validator map with pymongo-specific option information. diff --git a/test/test_common.py b/test/test_common.py index c6ab4182d4..87b5ce4c9d 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -110,7 +110,7 @@ def test_write_concern(self): c = rs_or_single_client(connect=False) self.assertEqual(WriteConcern(), c.write_concern) - c = rs_or_single_client(connect=False, w=2, wtimeout=1000) + c = rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) wc = WriteConcern(w=2, wtimeout=1000) self.assertEqual(wc, c.write_concern) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index bd862642c9..de98b9a411 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -476,8 +476,8 @@ def test_tlsinsecure_simple(self): def test_normalize_options(self): # check that options are converted to their internal names correctly. - uri = ("mongodb://example.com/?ssl=true&appname=myapp&wtimeout=10") - res = {"tls": True, "appname": "myapp", "wtimeoutms": 10} + uri = ("mongodb://example.com/?ssl=true&appname=myapp") + res = {"tls": True, "appname": "myapp"} self.assertEqual(res, parse_uri(uri)["options"]) def test_unquote_after_parsing(self): From fa9531b4bf4cde8bc1464f5b014140e0c96286f0 Mon Sep 17 00:00:00 2001 From: henrifroese Date: Mon, 23 Aug 2021 17:45:34 +0200 Subject: [PATCH 0446/2111] PYTHON-2824 Make GridOut implement full io.IOBase spec (#677) Make GridOut inherit from io.IOBase to be a fully "file-like" object (https://docs.python.org/3/glossary.html#term-file-like-object). Implement missing methods `readlines`, `writelines`, `writable`, `fileno`, `flush`, `isatty`, `truncate`, and property `closed`, following the spec (https://docs.python.org/3/library/io.html#io.IOBase.writable). Iterating over GridOut previously returned chunks, but IOBase specifies that lines should be returned. Thus, the `GridOutIterator` returning chunks is removed and GridOut simply uses the existing IOBase iterator implementation (returning `self` in `__iter__` and using `readline` in `__next__`). Additionally, iterating over GridOut previously did not move the "file pointer" along, i.e. `next(iter(some_grid_out_object))` always gave the same result (the first chunk of the file) as it would create a new iterator starting at the top of the file. This is now fixed as well, so a first call to `next(iter(some_grid_out_object))` gives the first line, and subsequent calls return the subsequent lines. --- doc/changelog.rst | 4 ++ doc/contributors.rst | 3 +- gridfs/grid_file.py | 52 ++++++++++++++++----- test/test_grid_file.py | 102 ++++++++++++++++++++++++++++++++++++++--- 4 files changed, 142 insertions(+), 19 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3bfb2f6f28..449a05022d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -126,6 +126,10 @@ Changes in Version 3.12.0 - PyMongoCrypt 1.1.0 or later is now required for client side field level encryption support. +- Iterating over :class:`gridfs.grid_file.GridOut` now moves through + the file line by line instead of chunk by chunk, and does not + restart at the top for subsequent iterations on the same object. + Call `seek(0)` to reset the iterator. Notable improvements .................... diff --git a/doc/contributors.rst b/doc/contributors.rst index c763c424c5..c99c4b80e2 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -88,4 +88,5 @@ The following is a list of people who have contributed to - Terence Honles (terencehonles) - Paul Fisher (thetorpedodog) - Julius Park (juliusgeo) -- Khanh Nguyen (KN99HN) \ No newline at end of file +- Khanh Nguyen (KN99HN) +- Henri Froese (henrifroese) \ No newline at end of file diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index e5ac598e75..606cf98d1c 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -421,7 +421,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): return False -class GridOut(object): +class GridOut(io.IOBase): """Class to read data out of GridFS. """ def __init__(self, root_collection, file_id=None, file_document=None, @@ -465,6 +465,8 @@ def __init__(self, root_collection, file_id=None, file_document=None, root_collection = _clear_entity_type_registry(root_collection) + super().__init__() + self.__chunks = root_collection.chunks self.__files = root_collection.files self.__file_id = file_id @@ -656,33 +658,40 @@ def seekable(self): def __iter__(self): """Return an iterator over all of this file's data. - The iterator will return chunk-sized instances of - :class:`str` (:class:`bytes` in python 3). This can be - useful when serving files using a webserver that handles - such an iterator efficiently. - - .. note:: - This is different from :py:class:`io.IOBase` which iterates over - *lines* in the file. Use :meth:`GridOut.readline` to read line by - line instead of chunk by chunk. + The iterator will return lines (delimited by b'\n') of + :class:`bytes`. This can be useful when serving files + using a webserver that handles such an iterator efficiently. .. versionchanged:: 3.8 The iterator now raises :class:`CorruptGridFile` when encountering any truncated, missing, or extra chunk in a file. The previous behavior was to only raise :class:`CorruptGridFile` on a missing chunk. + + .. versionchanged:: 4.0 + The iterator now iterates over *lines* in the file, instead + of chunks, to conform to the base class :py:class:`io.IOBase`. + Use :meth:`GridOut.readchunk` to read chunk by chunk instead + of line by line. """ - return GridOutIterator(self, self.__chunks, self._session) + return self def close(self): """Make GridOut more generically file-like.""" if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None + super().close() def write(self, value): raise io.UnsupportedOperation('write') + def writelines(self, lines): + raise io.UnsupportedOperation('writelines') + + def writable(self): + return False + def __enter__(self): """Makes it possible to use :class:`GridOut` files with the context manager protocol. @@ -696,6 +705,27 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False + def fileno(self): + raise io.UnsupportedOperation('fileno') + + def flush(self): + # GridOut is read-only, so flush does nothing. + pass + + def isatty(self): + return False + + def truncate(self, size=None): + # See https://docs.python.org/3/library/io.html#io.IOBase.writable + # for why truncate has to raise. + raise io.UnsupportedOperation('truncate') + + # Override IOBase.__del__ otherwise it will lead to __getattr__ on + # __IOBase_closed which calls _ensure_file and potentially performs I/O. + # We cannot do I/O in __del__ since it can lead to a deadlock. + def __del__(self): + pass + class _GridOutChunkIterator(object): """Iterates over a file's chunks using a single cursor. diff --git a/test/test_grid_file.py b/test/test_grid_file.py index d95e6a2429..37a1f905a7 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -18,6 +18,7 @@ """ import datetime +import io import sys import zipfile @@ -323,6 +324,20 @@ def test_close(self): self.assertRaises(ValueError, f.write, "test") f.close() + def test_closed(self): + f = GridIn(self.db.fs, chunkSize=5) + f.write(b"Hello world.\nHow are you?") + f.close() + + g = GridOut(self.db.fs, f._id) + self.assertFalse(g.closed) + g.read(1) + self.assertFalse(g.closed) + g.read(100) + self.assertFalse(g.closed) + g.close() + self.assertTrue(g.closed) + def test_multi_chunk_file(self): random_string = b'a' * (DEFAULT_CHUNK_SIZE + 1000) @@ -447,6 +462,58 @@ def test_readline(self): self.assertEqual(b"e", g.readline(1)) self.assertEqual(b"llo world,\n", g.readline()) + def test_readlines(self): + f = GridIn(self.db.fs, chunkSize=5) + f.write((b"""Hello world, +How are you? +Hope all is well. +Bye""")) + f.close() + + # Try read(), then readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"He", g.read(2)) + self.assertEqual([b"llo world,\n", b"How are you?\n"], g.readlines(11)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], g.readlines()) + self.assertEqual([], g.readlines()) + + # Try readline(), then readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"Hello world,\n", g.readline()) + self.assertEqual([b"How are you?\n", b"Hope all is well.\n"], g.readlines(13)) + self.assertEqual(b"Bye", g.readline()) + self.assertEqual([], g.readlines()) + + # Only readlines(). + g = GridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines()) + + g = GridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(0)) + + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual([b"How are you?\n"], g.readlines(12)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], g.readlines(18)) + + # Try readlines() first, then read(). + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual(b"H", g.read(1)) + self.assertEqual([b"ow are you?\n", b"Hope all is well.\n"], g.readlines(29)) + self.assertEqual([b"Bye"], g.readlines(1)) + + # Try readlines() first, then readline(). + g = GridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], g.readlines(1)) + self.assertEqual(b"How are you?\n", g.readline()) + self.assertEqual([b"Hope all is well.\n"], g.readlines(17)) + self.assertEqual(b"Bye", g.readline()) + def test_iterator(self): f = GridIn(self.db.fs) f.close() @@ -454,20 +521,26 @@ def test_iterator(self): self.assertEqual([], list(g)) f = GridIn(self.db.fs) - f.write(b"hello world") + f.write(b"hello world\nhere are\nsome lines.") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"hello world"], list(g)) - self.assertEqual(b"hello", g.read(5)) - self.assertEqual([b"hello world"], list(g)) - self.assertEqual(b" worl", g.read(5)) + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + self.assertEqual(b"", g.read(5)) + self.assertEqual([], list(g)) + + g = GridOut(self.db.fs, f._id) + self.assertEqual(b"hello world\n", next(iter(g))) + self.assertEqual(b"here", g.read(4)) + self.assertEqual(b" are\n", next(iter(g))) + self.assertEqual(b"some lines", g.read(10)) + self.assertEqual(b".", next(iter(g))) + self.assertRaises(StopIteration, iter(g).__next__) f = GridIn(self.db.fs, chunk_size=2) f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"he", b"ll", b"o ", - b"wo", b"rl", b"d"], list(g)) + self.assertEqual([b"hello world"], list(g)) def test_read_unaligned_buffer_size(self): in_data = (b"This is a text that doesn't " @@ -665,6 +738,21 @@ def test_zip(self): self.assertSequenceEqual(z.namelist(), ["test.txt"]) self.assertEqual(z.read("test.txt"), b"hello world") + def test_grid_out_unsupported_operations(self): + f = GridIn(self.db.fs, chunkSize=3) + f.write(b"hello world") + f.close() + + g = GridOut(self.db.fs, f._id) + + self.assertRaises(io.UnsupportedOperation, g.writelines, [b"some", b"lines"]) + self.assertRaises(io.UnsupportedOperation, g.write, b"some text") + self.assertRaises(io.UnsupportedOperation, g.fileno) + self.assertRaises(io.UnsupportedOperation, g.truncate) + + self.assertFalse(g.writable()) + self.assertFalse(g.isatty()) + if __name__ == "__main__": unittest.main() From 2eb0df812c6a4afbdbcd12692ca8da0b4dd0c14e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 24 Aug 2021 13:36:37 -0400 Subject: [PATCH 0447/2111] PYTHON-2879 Fix get_ssl_context for CSFLE and ocsptest.py (#713) --- pymongo/client_options.py | 2 +- pymongo/encryption.py | 21 ++++++++------------- pymongo/pool.py | 2 +- pymongo/ssl_support.py | 13 ++++--------- test/test_client.py | 2 +- test/test_ssl.py | 26 ++++++++++---------------- test/test_uri_parser.py | 5 ----- tools/ocsptest.py | 17 +++++++---------- 8 files changed, 32 insertions(+), 56 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 7fdbb4cc8c..77c3f85a25 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -109,8 +109,8 @@ def _parse_ssl_options(options): certfile, passphrase, ca_certs, - allow_invalid_certificates, crlfile, + allow_invalid_certificates, allow_invalid_hostnames, disable_ocsp_endpoint_check) return ctx, allow_invalid_hostnames diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1153b17fbe..9ec2d22804 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -49,15 +49,11 @@ from pymongo.mongo_client import MongoClient from pymongo.pool import _configured_socket, PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.ssl_support import get_ssl_context, HAVE_SSL +from pymongo.ssl_support import get_ssl_context from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern from pymongo.daemon import _spawn_daemon -if HAVE_SSL: - from ssl import CERT_REQUIRED -else: - CERT_REQUIRED = None _HTTPS_PORT = 443 _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. @@ -114,14 +110,13 @@ def kms_request(self, kms_context): # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. ctx = get_ssl_context( - None, # certfile - None, # keyfile - None, # passphrase - None, # ca_certs - CERT_REQUIRED, # cert_reqs - None, # crlfile - True, # match_hostname - True) # check_ocsp_endpoint + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False) # disable_ocsp_endpoint_check opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, socket_timeout=_KMS_CONNECT_TIMEOUT, ssl_context=ctx) diff --git a/pymongo/pool.py b/pymongo/pool.py index d21a77809a..dcd3e2bad5 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -401,7 +401,7 @@ def ssl_context(self): @property def tls_allow_invalid_hostnames(self): - """Call ssl.match_hostname if cert_reqs is not ssl.CERT_NONE. + """If True skip ssl.match_hostname. """ return self.__tls_allow_invalid_hostnames diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 57361f2fa8..5826f95801 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -35,20 +35,15 @@ # at a high level. This is legacy behavior, but requires us to # import the ssl module even if we're only using it for this purpose. import ssl as _stdlibssl - from ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED + from ssl import CERT_NONE, CERT_REQUIRED HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) SSLError = _ssl.SSLError - def get_ssl_context(*args): + def get_ssl_context(certfile, passphrase, ca_certs, crlfile, + allow_invalid_certificates, allow_invalid_hostnames, + disable_ocsp_endpoint_check): """Create and return an SSLContext object.""" - (certfile, - passphrase, - ca_certs, - allow_invalid_certificates, - crlfile, - allow_invalid_hostnames, - disable_ocsp_endpoint_check) = args verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) # SSLContext.check_hostname was added in CPython 3.4. diff --git a/test/test_client.py b/test/test_client.py index 6abbd9363b..95093c59e9 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -115,7 +115,7 @@ def test_keyword_arg_defaults(self): read_preference=ReadPreference.PRIMARY, ssl=False, tlsCertificateKeyFile=None, - tlsAllowInvalidCertificates=True, # ssl.CERT_NONE + tlsAllowInvalidCertificates=True, tlsCAFile=None, connect=False, serverSelectionTimeoutMS=12000) diff --git a/test/test_ssl.py b/test/test_ssl.py index 76f4c69cfa..d6055cafbd 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -291,17 +291,13 @@ def test_cert_ssl_validation_hostname_matching(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - ctx = get_ssl_context( - None, None, None, True, None, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, True, None, False, False) + ctx = get_ssl_context(None, None, None, None, True, False, False) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, False, None, True, False) + ctx = get_ssl_context(None, None, None, None, False, True, False) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context( - None, None, None, False, None, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False) if _PY37PLUS or _HAVE_PYOPENSSL: self.assertTrue(ctx.check_hostname) else: @@ -424,8 +420,7 @@ def test_validation_with_system_ca_certs(self): **self.credentials)) def test_system_certs_config_error(self): - ctx = get_ssl_context( - None, None, None, ssl.CERT_NONE, None, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False) if ((sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr(ctx, "load_default_certs")): @@ -457,12 +452,12 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context( - None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True, True) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, + False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, None, None, True, True) + ctx = get_ssl_context(None, None, None, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) finally: @@ -479,12 +474,11 @@ def test_wincertstore(self): if not ssl_support.HAVE_WINCERTSTORE: raise SkipTest("Need wincertstore to test wincertstore.") - ctx = get_ssl_context( - None, None, None, CA_PEM, ssl.CERT_REQUIRED, None, True, True) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, None, None, True, True) + ctx = get_ssl_context(None, None, None, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index de98b9a411..f044fba5b3 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -18,11 +18,6 @@ import sys import warnings -try: - from ssl import CERT_NONE -except ImportError: - CERT_NONE = 0 - sys.path[0:0] = [""] from bson.binary import JAVA_LEGACY diff --git a/tools/ocsptest.py b/tools/ocsptest.py index b2d5c0a495..149da000ba 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -16,8 +16,6 @@ import logging import socket -from ssl import CERT_REQUIRED - from pymongo.pyopenssl_context import SSLContext from pymongo.ssl_support import get_ssl_context @@ -28,14 +26,13 @@ def check_ocsp(host, port, capath): ctx = get_ssl_context( - None, # certfile - None, # keyfile - None, # passphrase - capath, - CERT_REQUIRED, - None, # crlfile - True, # match_hostname - True) # check_ocsp_endpoint + None, # certfile + None, # passphrase + capath, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False) # disable_ocsp_endpoint_check # Ensure we're using pyOpenSSL. assert isinstance(ctx, SSLContext) From 6913738b0aebad29156248dd0446e5eed8e2fc09 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 1 Sep 2021 13:44:58 -0400 Subject: [PATCH 0448/2111] PYTHON-2808 Use Invoke-WebRequest instead certifi to workaround FLE test issue (#714) --- .evergreen/run-tests.sh | 6 ++---- test/test_encryption.py | 23 +---------------------- 2 files changed, 3 insertions(+), 26 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index efc82000f9..43e9a8b347 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -107,6 +107,8 @@ if [ -n "$TEST_ENCRYPTION" ]; then if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin $PYTHON -m pip install -U setuptools + # PYTHON-2808 Ensure this machine has the CA cert for google KMS. + powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true fi if [ -z "$LIBMONGOCRYPT_URL" ]; then @@ -149,10 +151,6 @@ if [ -n "$TEST_ENCRYPTION" ]; then . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh # Start the mock KMS servers. - if [ "$OS" = "Windows_NT" ]; then - # Remove after BUILD-13574. - python -m pip install certifi - fi pushd ${DRIVERS_TOOLS}/.evergreen/csfle python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & diff --git a/test/test_encryption.py b/test/test_encryption.py index 6a71a5a424..efd12c3728 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -55,8 +55,7 @@ from test import (unittest, client_context, IntegrationTest, - PyMongoTestCase, - SystemCertsPatcher) + PyMongoTestCase) from test.utils import (TestCreator, camel_to_snake_args, OvertCommandListener, @@ -66,26 +65,6 @@ wait_until) from test.utils_spec_runner import SpecRunner -try: - import certifi - HAVE_CERTIFI = True -except ImportError: - HAVE_CERTIFI = False - -patcher = None - - -def setUpModule(): - if sys.platform == 'win32' and HAVE_CERTIFI: - # Remove after BUILD-13574. - global patcher - patcher = SystemCertsPatcher(certifi.where()) - - -def tearDownModule(): - if patcher: - patcher.disable() - def get_client_opts(client): return client._MongoClient__options From 88e86f6f5a949e9c2ee9f4f4a2800d7770a777ad Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 8 Sep 2021 11:24:10 -0700 Subject: [PATCH 0449/2111] PYTHON-2057 Make 'name' a required argument for DriverInfo class (#718) --- doc/changelog.rst | 1 + doc/migrate-to-pymongo4.rst | 7 +++++++ pymongo/driver_info.py | 2 +- test/test_client.py | 2 ++ 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 449a05022d..6b5ffcc53a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -100,6 +100,7 @@ Breaking Changes in 4.0 - Removed :class:`bson.binary.UUIDLegacy`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. +- ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 658d537247..421e39fc92 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -725,3 +725,10 @@ pymongo.message helpers are removed Removed :meth:`pymongo.message.delete`, :meth:`pymongo.message.get_more`, :meth:`pymongo.message.insert`, :meth:`pymongo.message.kill_cursors`, :meth:`pymongo.message.query`, and :meth:`pymongo.message.update`. + + +Name is a required argument for pymongo.driver_info.DriverInfo +.............................................................. + +``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. + diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 9ffcce4872..5e0843e4df 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -26,7 +26,7 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be None to accept PyMongo's default. """ - def __new__(cls, name=None, version=None, platform=None): + def __new__(cls, name, version=None, platform=None): self = super(DriverInfo, cls).__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): diff --git a/test/test_client.py b/test/test_client.py index 95093c59e9..3584329fd2 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -305,6 +305,8 @@ def test_metadata(self): self.assertRaises(ValueError, MongoClient, appname='x' * 129) # Bad "driver" options. self.assertRaises(TypeError, DriverInfo, 'Foo', 1, 'a') + self.assertRaises(TypeError, DriverInfo, version="1", platform='a') + self.assertRaises(TypeError, DriverInfo) self.assertRaises(TypeError, MongoClient, driver=1) self.assertRaises(TypeError, MongoClient, driver='abc') self.assertRaises(TypeError, MongoClient, driver=('Foo', '1', 'a')) From d9e5666336499549aba451dc59bcbe3cfc404d23 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 8 Sep 2021 11:32:14 -0700 Subject: [PATCH 0450/2111] PYTHON-2875 Require hint with min/max queries (#712) --- doc/changelog.rst | 2 ++ doc/migrate-to-pymongo4.rst | 13 +++++++++++++ pymongo/cursor.py | 8 +++----- test/test_collection.py | 5 ++--- test/test_cursor.py | 25 ++++--------------------- 5 files changed, 24 insertions(+), 29 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 6b5ffcc53a..32b26d7449 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -100,6 +100,8 @@ Breaking Changes in 4.0 - Removed :class:`bson.binary.UUIDLegacy`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. +- The ``hint`` option is now required when using ``min`` or ``max`` queries + with :meth:`~pymongo.collection.Collection.find`. - ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. Notable improvements diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 421e39fc92..2ae78a1a17 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -591,6 +591,19 @@ can be changed to this:: show_record_id=False, ) +The hint parameter is required with min/max +........................................... + +The ``hint`` option is now required when using ``min`` or ``max`` queries +with :meth:`~pymongo.collection.Collection.find` to ensure the query utilizes +the correct index. For example, code like this:: + + cursor = coll.find({}, min={'x', min_value}) + +can be changed to this:: + + cursor = coll.find({}, min={'x', min_value}, hint=[('x', ASCENDING)]) + SONManipulator is removed ------------------------- diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 596f06b66c..5f3419b7cc 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1060,11 +1060,9 @@ def _refresh(self): if self.__id is None: # Query if (self.__min or self.__max) and not self.__hint: - warnings.warn("using a min/max query operator without " - "specifying a Cursor.hint is deprecated. A " - "hint will be required when using min/max in " - "PyMongo 4.0", - DeprecationWarning, stacklevel=3) + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index") q = self._query_class(self.__query_flags, self.__collection.database.name, self.__collection.name, diff --git a/test/test_collection.py b/test/test_collection.py index b4bde0c68b..ba6d2b544b 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1932,9 +1932,8 @@ def test_min_query(self): self.db.test.insert_many([{"x": 1}, {"x": 2}]) self.db.test.create_index("x") - cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}) - if client_context.requires_hint_with_min_max_queries: - cursor = cursor.hint("x_1") + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, + hint="x_1") docs = list(cursor) self.assertEqual(1, len(docs)) diff --git a/test/test_cursor.py b/test/test_cursor.py index d7380988ab..f5b6e49235 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -21,7 +21,6 @@ import sys import time import threading -import warnings sys.path[0:0] = [""] @@ -450,7 +449,6 @@ def test_limit(self): break self.assertRaises(InvalidOperation, a.limit, 5) - @ignore_deprecations # Ignore max without hint. def test_max(self): db = self.db db.test.drop() @@ -460,10 +458,7 @@ def test_max(self): db.test.insert_many([{"j": j, "k": j} for j in range(10)]) def find(max_spec, expected_index): - cursor = db.test.find().max(max_spec) - if client_context.requires_hint_with_min_max_queries: - cursor = cursor.hint(expected_index) - return cursor + return db.test.find().max(max_spec).hint(expected_index) cursor = find([("j", 3)], j_index) self.assertEqual(len(list(cursor)), 3) @@ -489,7 +484,6 @@ def find(max_spec, expected_index): self.assertRaises(TypeError, db.test.find().max, 10) self.assertRaises(TypeError, db.test.find().max, {"j": 10}) - @ignore_deprecations # Ignore min without hint. def test_min(self): db = self.db db.test.drop() @@ -499,10 +493,7 @@ def test_min(self): db.test.insert_many([{"j": j, "k": j} for j in range(10)]) def find(min_spec, expected_index): - cursor = db.test.find().min(min_spec) - if client_context.requires_hint_with_min_max_queries: - cursor = cursor.hint(expected_index) - return cursor + return db.test.find().min(min_spec).hint(expected_index) cursor = find([("j", 3)], j_index) self.assertEqual(len(list(cursor)), 7) @@ -528,23 +519,15 @@ def find(min_spec, expected_index): self.assertRaises(TypeError, db.test.find().min, 10) self.assertRaises(TypeError, db.test.find().min, {"j": 10}) - @client_context.require_version_max(4, 1, -1) def test_min_max_without_hint(self): coll = self.db.test j_index = [("j", ASCENDING)] coll.create_index(j_index) - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("default", DeprecationWarning) - list(coll.find().min([("j", 3)])) - self.assertIn('using a min/max query operator', str(warns[0])) - # Ensure the warning is raised with the proper stack level. - del warns[:] + with self.assertRaises(InvalidOperation): list(coll.find().min([("j", 3)])) - self.assertIn('using a min/max query operator', str(warns[0])) - del warns[:] + with self.assertRaises(InvalidOperation): list(coll.find().max([("j", 3)])) - self.assertIn('using a min/max query operator', str(warns[0])) def test_batch_size(self): db = self.db From fb38fbe35ebd41d737357214831a0bc7044fd5f4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 8 Sep 2021 11:33:41 -0700 Subject: [PATCH 0451/2111] PYTHON-2046 Change default JSONMode and dumps output from LEGACY to RELAXED (#711) --- bson/json_util.py | 107 ++++++++++++++++++++++-------------- doc/changelog.rst | 6 ++ doc/migrate-to-pymongo3.rst | 4 +- doc/migrate-to-pymongo4.rst | 7 +++ gridfs/grid_file.py | 2 +- test/test_bson_corpus.py | 3 +- test/test_json_util.py | 97 +++++++++++++++++++++----------- 7 files changed, 149 insertions(+), 77 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index 05fa0e2ec0..b9e088943a 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -17,9 +17,9 @@ This module provides two helper methods `dumps` and `loads` that wrap the native :mod:`json` methods and provide explicit BSON conversion to and from JSON. :class:`~bson.json_util.JSONOptions` provides a way to control how JSON -is emitted and parsed, with the default being the legacy PyMongo format. -:mod:`~bson.json_util` can also generate Canonical or Relaxed `Extended JSON`_ -when :const:`CANONICAL_JSON_OPTIONS` or :const:`RELAXED_JSON_OPTIONS` is +is emitted and parsed, with the default being the Relaxed Extended JSON format. +:mod:`~bson.json_util` can also generate Canonical or legacy `Extended JSON`_ +when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is provided, respectively. .. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst @@ -32,7 +32,7 @@ >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]') [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] -Example usage (serialization): +Example usage with :const:`RELAXED_JSON_OPTIONS` (the default): .. doctest:: @@ -40,9 +40,9 @@ >>> from bson.json_util import dumps >>> dumps([{'foo': [1, 2]}, ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }", {})}, + ... {'code': Code("function x() { return 1; }")}, ... {'bin': Binary(b"\x01\x02\x03\x04")}]) - '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' + '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`CANONICAL_JSON_OPTIONS`): @@ -57,18 +57,18 @@ ... json_options=CANONICAL_JSON_OPTIONS) '[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' -Example usage (with :const:`RELAXED_JSON_OPTIONS`): +Example usage (with :const:`LEGACY_JSON_OPTIONS`): .. doctest:: >>> from bson import Binary, Code - >>> from bson.json_util import dumps, RELAXED_JSON_OPTIONS + >>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS >>> dumps([{'foo': [1, 2]}, ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, + ... {'code': Code("function x() { return 1; }", {})}, ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=RELAXED_JSON_OPTIONS) - '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' + ... json_options=LEGACY_JSON_OPTIONS) + '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' Alternatively, you can manually pass the `default` to :func:`json.dumps`. It won't handle :class:`~bson.binary.Binary` and :class:`~bson.code.Code` @@ -238,23 +238,27 @@ class JSONOptions(CodecOptions): .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. - .. versionadded:: 3.4 + .. versionchanged:: 4.0 + The default for `json_mode` was changed from :const:`JSONMode.LEGACY` + to :const:`JSONMode.RELAXED`. .. versionchanged:: 3.5 Accepts the optional parameter `json_mode`. + .. versionadded:: 3.4 """ - def __new__(cls, strict_number_long=False, - datetime_representation=DatetimeRepresentation.LEGACY, - strict_uuid=False, json_mode=JSONMode.LEGACY, + def __new__(cls, strict_number_long=None, + datetime_representation=None, + strict_uuid=None, json_mode=JSONMode.RELAXED, *args, **kwargs): kwargs["tz_aware"] = kwargs.get("tz_aware", True) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) if datetime_representation not in (DatetimeRepresentation.LEGACY, DatetimeRepresentation.NUMBERLONG, - DatetimeRepresentation.ISO8601): + DatetimeRepresentation.ISO8601, + None): raise ConfigurationError( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") @@ -267,17 +271,47 @@ def __new__(cls, strict_number_long=False, "or CANONICAL from JSONMode.") self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: + if strict_number_long: + raise ConfigurationError( + "Cannot specify strict_number_long=True with" + " JSONMode.RELAXED") + if datetime_representation not in (None, + DatetimeRepresentation.ISO8601): + raise ConfigurationError( + "datetime_representation must be DatetimeRepresentation." + "ISO8601 or omitted with JSONMode.RELAXED") + if strict_uuid not in (None, True): + raise ConfigurationError( + "Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = False self.datetime_representation = DatetimeRepresentation.ISO8601 self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: + if strict_number_long not in (None, True): + raise ConfigurationError( + "Cannot specify strict_number_long=False with" + " JSONMode.RELAXED") + if datetime_representation not in ( + None, DatetimeRepresentation.NUMBERLONG): + raise ConfigurationError( + "datetime_representation must be DatetimeRepresentation." + "NUMBERLONG or omitted with JSONMode.RELAXED") + if strict_uuid not in (None, True): + raise ConfigurationError( + "Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = True self.datetime_representation = DatetimeRepresentation.NUMBERLONG self.strict_uuid = True - else: - self.strict_number_long = strict_number_long - self.datetime_representation = datetime_representation - self.strict_uuid = strict_uuid + else: # JSONMode.LEGACY + self.strict_number_long = False + self.datetime_representation = DatetimeRepresentation.LEGACY + self.strict_uuid = False + if strict_number_long is not None: + self.strict_number_long = strict_number_long + if datetime_representation is not None: + self.datetime_representation = datetime_representation + if strict_uuid is not None: + self.strict_uuid = strict_uuid return self def _arguments_repr(self): @@ -307,7 +341,7 @@ def with_options(self, **kwargs): >>> from bson.json_util import CANONICAL_JSON_OPTIONS >>> CANONICAL_JSON_OPTIONS.tz_aware True - >>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False) + >>> json_options = CANONICAL_JSON_OPTIONS.with_options(tz_aware=False, tzinfo=None) >>> json_options.tz_aware False @@ -329,15 +363,6 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -DEFAULT_JSON_OPTIONS = LEGACY_JSON_OPTIONS -"""The default :class:`JSONOptions` for JSON encoding/decoding. - -The same as :const:`LEGACY_JSON_OPTIONS`. This will change to -:const:`RELAXED_JSON_OPTIONS` in a future release. - -.. versionadded:: 3.4 -""" - CANONICAL_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.CANONICAL) """:class:`JSONOptions` for Canonical Extended JSON. @@ -354,18 +379,16 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -STRICT_JSON_OPTIONS = JSONOptions( - strict_number_long=True, - datetime_representation=DatetimeRepresentation.ISO8601, - strict_uuid=True) -"""**DEPRECATED** - :class:`JSONOptions` for MongoDB Extended JSON's *Strict -mode* encoding. +DEFAULT_JSON_OPTIONS = RELAXED_JSON_OPTIONS +"""The default :class:`JSONOptions` for JSON encoding/decoding. -.. versionadded:: 3.4 +The same as :const:`RELAXED_JSON_OPTIONS`. + +.. versionchanged:: 4.0 + Changed from :const:`LEGACY_JSON_OPTIONS` to + :const:`RELAXED_JSON_OPTIONS`. -.. versionchanged:: 3.5 - Deprecated. Use :const:`RELAXED_JSON_OPTIONS` or - :const:`CANONICAL_JSON_OPTIONS` instead. +.. versionadded:: 3.4 """ @@ -380,6 +403,10 @@ def dumps(obj, *args, **kwargs): encoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. + .. versionchanged:: 4.0 + Now outputs MongoDB Relaxed Extended JSON by default (using + :const:`DEFAULT_JSON_OPTIONS`). + .. versionchanged:: 3.4 Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ diff --git a/doc/changelog.rst b/doc/changelog.rst index 32b26d7449..828d8b0387 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -98,6 +98,12 @@ Breaking Changes in 4.0 - Removed :exc:`pymongo.errors.CertificateError`. - Removed :attr:`pymongo.GEOHAYSTACK`. - Removed :class:`bson.binary.UUIDLegacy`. +- Removed :const:`bson.json_util.STRICT_JSON_OPTIONS`. Use + :const:`~bson.json_util.RELAXED_JSON_OPTIONS` or + :const:`~bson.json_util.CANONICAL_JSON_OPTIONS` instead. +- Changed the default JSON encoding representation from legacy to relaxed. + The json_mode parameter for :const:`bson.json_util.dumps` now defaults to + :const:`~bson.json_util.RELAXED_JSON_OPTIONS`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. - The ``hint`` option is now required when using ``min`` or ``max`` queries diff --git a/doc/migrate-to-pymongo3.rst b/doc/migrate-to-pymongo3.rst index 9b30fc36f0..c1a4c490a3 100644 --- a/doc/migrate-to-pymongo3.rst +++ b/doc/migrate-to-pymongo3.rst @@ -124,9 +124,7 @@ modifier. Code like this:: # Set a 5 second select() timeout. >>> cursor = collection.find({"a": 1}, network_timeout=5) -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: +can be changed to this with PyMongo 2.9 or later:: # Set a 5 second (5000 millisecond) server side query timeout. >>> cursor = collection.find({"a": 1}, modifiers={"$maxTimeMS": 5000}) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 2ae78a1a17..2394f29caf 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -675,6 +675,13 @@ can be changed to this:: uu = uuid.uuid4() uuid_legacy = Binary.from_uuid(uu, PYTHON_LEGACY) +Default JSONMode changed from LEGACY to RELAXED +----------------------------------------------- + +Changed the default JSON encoding representation from legacy to relaxed. +The json_mode parameter for :const:`bson.json_util.dumps` now defaults to +:const:`~bson.json_util.RELAXED_JSON_OPTIONS`. + Removed features with no migration path --------------------------------------- diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 606cf98d1c..3e455dc932 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -658,7 +658,7 @@ def seekable(self): def __iter__(self): """Return an iterator over all of this file's data. - The iterator will return lines (delimited by b'\n') of + The iterator will return lines (delimited by ``b'\\n'``) of :class:`bytes`. This can be useful when serving files using a webserver that handles such an iterator efficiently. diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 6590d00a3e..98a4cf3058 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -71,7 +71,8 @@ json_options_uuid_04 = json_util.JSONOptions(json_mode=JSONMode.CANONICAL, uuid_representation=STANDARD) json_options_iso8601 = json_util.JSONOptions( - datetime_representation=json_util.DatetimeRepresentation.ISO8601) + datetime_representation=json_util.DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY) to_extjson = functools.partial(json_util.dumps, json_options=json_util.CANONICAL_JSON_OPTIONS) to_extjson_uuid_04 = functools.partial(json_util.dumps, diff --git a/test/test_json_util.py b/test/test_json_util.py index 5b6efbc636..c20c793d3a 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -24,7 +24,9 @@ from bson import json_util, EPOCH_AWARE, SON from bson.json_util import (DatetimeRepresentation, - STRICT_JSON_OPTIONS) + JSONMode, + JSONOptions, + LEGACY_JSON_OPTIONS) from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE, USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD) from bson.code import Code @@ -40,6 +42,13 @@ from test import unittest, IntegrationTest +STRICT_JSON_OPTIONS = JSONOptions( + strict_number_long=True, + datetime_representation=DatetimeRepresentation.ISO8601, + strict_uuid=True, + json_mode=JSONMode.LEGACY) + + class TestJsonUtil(unittest.TestCase): def round_tripped(self, doc, **kwargs): return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs) @@ -51,16 +60,18 @@ def test_basic(self): self.round_trip({"hello": "world"}) def test_json_options_with_options(self): - opts = json_util.JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG) + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, + json_mode=JSONMode.LEGACY) self.assertEqual( opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) opts2 = opts.with_options( - datetime_representation=DatetimeRepresentation.ISO8601) + datetime_representation=DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY) self.assertEqual( opts2.datetime_representation, DatetimeRepresentation.ISO8601) - opts = json_util.JSONOptions(strict_number_long=True) + opts = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) self.assertEqual(opts.strict_number_long, True) opts2 = opts.with_options(strict_number_long=False) self.assertEqual(opts2.strict_number_long, False) @@ -152,11 +163,17 @@ def test_datetime(self): pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)} post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)} self.assertEqual( - '{"dt": {"$date": -62135593138990}}', + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch)) self.assertEqual( - '{"dt": {"$date": 63075661010}}', + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch)) + self.assertEqual( + '{"dt": {"$date": -62135593138990}}', + json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS)) + self.assertEqual( + '{"dt": {"$date": 63075661010}}', + json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS)) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS)) @@ -164,8 +181,9 @@ def test_datetime(self): '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS)) - number_long_options = json_util.JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG) + number_long_options = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, + json_mode=JSONMode.LEGACY) self.assertEqual( '{"dt": {"$date": {"$numberLong": "63075661010"}}}', json_util.dumps(post_epoch, json_options=number_long_options)) @@ -194,14 +212,14 @@ def test_datetime(self): datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=json_util.JSONOptions(tz_aware=True, + json_options=JSONOptions(tz_aware=True, tzinfo=utc))["dt"]) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=json_util.JSONOptions(tz_aware=False))["dt"]) - self.round_trip(pre_epoch_naive, json_options=json_util.JSONOptions( + json_options=JSONOptions(tz_aware=False))["dt"]) + self.round_trip(pre_epoch_naive, json_options=JSONOptions( tz_aware=False)) # Test a non-utc timezone @@ -211,10 +229,12 @@ def test_datetime(self): self.assertEqual( '{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}', json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS)) - self.round_trip(aware_datetime, json_options=json_util.JSONOptions( + self.round_trip(aware_datetime, json_options=JSONOptions( + json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific)) - self.round_trip(aware_datetime, json_options=json_util.JSONOptions( + self.round_trip(aware_datetime, json_options=JSONOptions( datetime_representation=DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific)) def test_regex_object_hook(self): @@ -253,13 +273,18 @@ def test_regex(self): # Check order. self.assertEqual( - '{"$regex": ".*", "$options": "mx"}', + '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', json_util.dumps(Regex('.*', re.M | re.X))) self.assertEqual( - '{"$regex": ".*", "$options": "mx"}', + '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', json_util.dumps(re.compile(b'.*', re.M | re.X))) + self.assertEqual( + '{"$regex": ".*", "$options": "mx"}', + json_util.dumps(Regex('.*', re.M | re.X), + json_options=LEGACY_JSON_OPTIONS)) + def test_minkey(self): self.round_trip({"m": MinKey()}) @@ -278,26 +303,28 @@ def test_uuid(self): self.round_trip(doc) self.assertEqual( '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}', - json_util.dumps(doc)) + json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS)) self.assertEqual( '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( - doc, json_options=json_util.STRICT_JSON_OPTIONS)) + doc, json_options=STRICT_JSON_OPTIONS)) self.assertEqual( '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_util.dumps( - doc, json_options=json_util.JSONOptions( - strict_uuid=True, uuid_representation=STANDARD))) + doc, json_options=JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, + uuid_representation=STANDARD))) self.assertEqual( doc, json_util.loads( '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}')) for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}): - options = json_util.JSONOptions( - strict_uuid=True, uuid_representation=uuid_representation) + options = JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, + uuid_representation=uuid_representation) self.round_trip(doc, json_options=options) # Ignore UUID representation when decoding BSON binary subtype 4. self.assertEqual(doc, json_util.loads( @@ -307,8 +334,9 @@ def test_uuid(self): def test_uuid_uuid_rep_unspecified(self): _uuid = uuid.uuid4() - options = json_util.JSONOptions( + options = JSONOptions( strict_uuid=True, + json_mode=JSONMode.LEGACY, uuid_representation=UuidRepresentation.UNSPECIFIED) # Cannot directly encode native UUIDs with UNSPECIFIED. @@ -329,7 +357,8 @@ def test_uuid_uuid_rep_unspecified(self): doc, json_util.loads(ext_json_str, json_options=options)) # $uuid-encoded fields doc = {'uuid': Binary(_uuid.bytes, subtype=4)} - ext_json_str = json_util.dumps({'uuid': _uuid}) + ext_json_str = json_util.dumps({'uuid': _uuid}, + json_options=LEGACY_JSON_OPTIONS) self.assertEqual( doc, json_util.loads(ext_json_str, json_options=options)) @@ -350,11 +379,13 @@ def test_binary(self): self.assertEqual(type(bin), bytes) # PYTHON-443 ensure old type formats are supported - json_bin_dump = json_util.dumps(bin_type_dict) - self.assertTrue('"$type": "00"' in json_bin_dump) + json_bin_dump = json_util.dumps(bin_type_dict, + json_options=LEGACY_JSON_OPTIONS) + self.assertIn('"$type": "00"', json_bin_dump) self.assertEqual(bin_type_dict, json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}')) - json_bin_dump = json_util.dumps(md5_type_dict) + json_bin_dump = json_util.dumps(md5_type_dict, + json_options=LEGACY_JSON_OPTIONS) # Check order. self.assertEqual( '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' @@ -365,8 +396,9 @@ def test_binary(self): json_util.loads('{"md5": {"$type": 5, "$binary":' ' "IG43GK8JL9HRL4DK53HMrA=="}}')) - json_bin_dump = json_util.dumps(custom_type_dict) - self.assertTrue('"$type": "80"' in json_bin_dump) + json_bin_dump = json_util.dumps(custom_type_dict, + json_options=LEGACY_JSON_OPTIONS) + self.assertIn('"$type": "80"', json_bin_dump) self.assertEqual(custom_type_dict, json_util.loads('{"custom": {"$type": 128, "$binary":' ' "aGVsbG8="}}')) @@ -404,7 +436,8 @@ def test_numberlong(self): Int64(65535)) self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') - json_options = json_util.JSONOptions(strict_number_long=True) + json_options = JSONOptions(strict_number_long=True, + json_mode=JSONMode.LEGACY) self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) @@ -413,10 +446,10 @@ def test_loads_document_class(self): # document_class dict should always work self.assertEqual({"foo": "bar"}, json_util.loads( '{"foo": "bar"}', - json_options=json_util.JSONOptions(document_class=dict))) + json_options=JSONOptions(document_class=dict))) self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads( '{"foo": "bar", "b": 1}', - json_options=json_util.JSONOptions(document_class=SON))) + json_options=JSONOptions(document_class=SON))) class TestJsonUtilRoundtrip(IntegrationTest): From 7a4b617b5dedd41cd63bc8cde87ed2906b94e42a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 9 Sep 2021 18:32:53 -0700 Subject: [PATCH 0452/2111] PYTHON-2883 Regex decoding error tests in top.json have unexpected, invalid syntax (#721) --- bson/json_util.py | 6 +++++- test/bson_corpus/top.json | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index b9e088943a..5498bf0442 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -666,7 +666,11 @@ def _parse_canonical_regex(doc): if len(regex) != 2: raise TypeError('Bad $regularExpression must include only "pattern"' 'and "options" components: %s' % (doc,)) - return Regex(regex['pattern'], regex['options']) + opts = regex['options'] + if not isinstance(opts, str): + raise TypeError('Bad $regularExpression options, options must be ' + 'string, was type %s' % (type(opts))) + return Regex(regex['pattern'], opts) def _parse_canonical_dbref(doc): diff --git a/test/bson_corpus/top.json b/test/bson_corpus/top.json index ce7d4cdf9b..fe5be0eaef 100644 --- a/test/bson_corpus/top.json +++ b/test/bson_corpus/top.json @@ -92,11 +92,11 @@ }, { "description": "Bad $regularExpression (pattern is number, not string)", - "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": 42, \"$options\" : \"\"}}}" + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": 42, \"options\" : \"\"}}}" }, { "description": "Bad $regularExpression (options are number, not string)", - "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": \"a\", \"$options\" : 0}}}" + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": \"a\", \"options\" : 0}}}" }, { "description" : "Bad $regularExpression (missing pattern field)", From 90d4c6f19fb466e281e177c0f2a9fa0fe75e1992 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Sep 2021 10:23:27 -0700 Subject: [PATCH 0453/2111] PYTHON-2820 Test serialization of BSON with embedded null bytes in strings (#723) --- test/bson_corpus/document.json | 4 ++++ test/bson_corpus/regex.json | 4 ++-- test/bson_corpus/top.json | 21 ++++++++++++++++++++- test/test_bson_corpus.py | 12 +++++++++--- 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/test/bson_corpus/document.json b/test/bson_corpus/document.json index 10bf637d6d..698e7ae90a 100644 --- a/test/bson_corpus/document.json +++ b/test/bson_corpus/document.json @@ -51,6 +51,10 @@ { "description": "Invalid subdocument: bad string length in field", "bson": "1C00000003666F6F001200000002626172000500000062617A000000" + }, + { + "description": "Null byte in sub-document key", + "bson": "150000000378000D00000010610000010000000000" } ] } diff --git a/test/bson_corpus/regex.json b/test/bson_corpus/regex.json index c62b019cdf..223802169d 100644 --- a/test/bson_corpus/regex.json +++ b/test/bson_corpus/regex.json @@ -54,11 +54,11 @@ ], "decodeErrors": [ { - "description": "embedded null in pattern", + "description": "Null byte in pattern string", "bson": "0F0000000B610061006300696D0000" }, { - "description": "embedded null in flags", + "description": "Null byte in flags string", "bson": "100000000B61006162630069006D0000" } ] diff --git a/test/bson_corpus/top.json b/test/bson_corpus/top.json index fe5be0eaef..9c649b5e3f 100644 --- a/test/bson_corpus/top.json +++ b/test/bson_corpus/top.json @@ -79,6 +79,10 @@ { "description": "Document truncated mid-key", "bson": "1200000002666F" + }, + { + "description": "Null byte in document key", + "bson": "0D000000107800000100000000" } ], "parseErrors": [ @@ -241,7 +245,22 @@ { "description": "Bad DBpointer (extra field)", "string": "{\"a\": {\"$dbPointer\": {\"a\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}, \"c\": {\"$numberInt\": \"2\"}, \"$ref\": \"b\"}}}" + }, + { + "description" : "Null byte in document key", + "string" : "{\"a\\u0000\": 1 }" + }, + { + "description" : "Null byte in sub-document key", + "string" : "{\"a\" : {\"b\\u0000\": 1 }}" + }, + { + "description": "Null byte in $regularExpression pattern", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\\u0000\", \"options\" : \"i\"}}}" + }, + { + "description": "Null byte in $regularExpression options", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\", \"options\" : \"i\\u0000\"}}}" } - ] } diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 98a4cf3058..68703cc701 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -31,7 +31,7 @@ from bson.codec_options import CodecOptions from bson.decimal128 import Decimal128 from bson.dbref import DBRef -from bson.errors import InvalidBSON, InvalidId +from bson.errors import InvalidBSON, InvalidDocument, InvalidId from bson.json_util import JSONMode from bson.son import SON @@ -51,6 +51,8 @@ # This variant of $numberLong may have been generated by an old version # of mongoexport. 'Bad $numberLong (number, not string)', + # We parse Regex flags with extra characters, including nulls. + 'Null byte in $regularExpression options', ]) _DEPRECATED_BSON_TYPES = { @@ -198,10 +200,14 @@ def run_test(self): decode_extjson(parse_error_case['string']) else: try: - decode_extjson(parse_error_case['string']) + doc = decode_extjson(parse_error_case['string']) + # Null bytes are validated when encoding to BSON. + if 'Null' in description: + to_bson(doc) raise AssertionError('exception not raised for test ' 'case: ' + description) - except (ValueError, KeyError, TypeError, InvalidId): + except (ValueError, KeyError, TypeError, InvalidId, + InvalidDocument): pass elif bson_type == '0x05': try: From afa3997bb21c6a0858ebb6669905aa6abe7a6364 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Sep 2021 11:03:25 -0700 Subject: [PATCH 0454/2111] PYTHON-2680 Breaking changes to DBRef BSON+JSON decoding (#722) Implement DBRef spec version 1.0 tests. --- bson/__init__.py | 5 +- bson/_cbsonmodule.c | 119 ++++++++++-------- bson/json_util.py | 11 +- doc/changelog.rst | 9 ++ doc/migrate-to-pymongo4.rst | 13 ++ test/bson_corpus/binary.json | 19 ++- test/bson_corpus/code.json | 26 ++-- test/bson_corpus/dbref.json | 22 +++- test/bson_corpus/double.json | 4 +- test/bson_corpus/symbol.json | 14 +-- test/crud/unified/aggregate-merge.json | 2 +- .../bulkWrite-arrayFilters-clientError.json | 2 +- test/crud/unified/bulkWrite-arrayFilters.json | 2 +- .../bulkWrite-delete-hint-clientError.json | 2 +- .../bulkWrite-delete-hint-serverError.json | 2 +- test/crud/unified/bulkWrite-delete-hint.json | 2 +- .../bulkWrite-update-hint-clientError.json | 2 +- .../bulkWrite-update-hint-serverError.json | 2 +- test/crud/unified/bulkWrite-update-hint.json | 2 +- .../unified/deleteMany-hint-clientError.json | 2 +- .../unified/deleteMany-hint-serverError.json | 2 +- test/crud/unified/deleteMany-hint.json | 2 +- .../unified/deleteOne-hint-clientError.json | 2 +- .../unified/deleteOne-hint-serverError.json | 2 +- test/crud/unified/deleteOne-hint.json | 2 +- .../find-allowdiskuse-clientError.json | 2 +- .../find-allowdiskuse-serverError.json | 2 +- test/crud/unified/find-allowdiskuse.json | 2 +- .../findOneAndDelete-hint-clientError.json | 2 +- .../findOneAndDelete-hint-serverError.json | 2 +- test/crud/unified/findOneAndDelete-hint.json | 2 +- .../findOneAndReplace-hint-clientError.json | 2 +- .../findOneAndReplace-hint-serverError.json | 2 +- test/crud/unified/findOneAndReplace-hint.json | 2 +- .../findOneAndUpdate-hint-clientError.json | 2 +- .../findOneAndUpdate-hint-serverError.json | 2 +- test/crud/unified/findOneAndUpdate-hint.json | 2 +- test/crud/unified/replaceOne-hint.json | 2 +- .../unified/updateMany-hint-clientError.json | 2 +- .../unified/updateMany-hint-serverError.json | 2 +- test/crud/unified/updateMany-hint.json | 2 +- .../unified/updateOne-hint-clientError.json | 2 +- .../unified/updateOne-hint-serverError.json | 2 +- test/crud/unified/updateOne-hint.json | 2 +- test/crud/unified/updateWithPipelines.json | 2 +- test/test_bson.py | 3 +- test/test_bson_corpus.py | 54 +++++--- test/test_dbref.py | 104 +++++++++++++++ 48 files changed, 333 insertions(+), 140 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 4a511f53ca..b4e6aecdc3 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -202,7 +202,10 @@ def _get_object(data, view, position, obj_end, opts, dummy): obj = _elements_to_dict(data, view, position + 4, end, opts) position += obj_size - if "$ref" in obj: + # If DBRef validation fails, return a normal doc. + if (isinstance(obj.get('$ref'), str) and + "$id" in obj and + isinstance(obj.get('$db'), (str, type(None)))): return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 2eb9e992d5..67ee01c722 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1506,6 +1506,71 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { return result; } +/* + * Hook for optional decoding BSON documents to DBRef. + */ +static PyObject *_dbref_hook(PyObject* self, PyObject* value) { + struct module_state *state = GETSTATE(self); + PyObject* dbref = NULL; + PyObject* dbref_type = NULL; + PyObject* ref = NULL; + PyObject* id = NULL; + PyObject* database = NULL; + PyObject* ret = NULL; + int db_present = 0; + + /* Decoding for DBRefs */ + if (PyMapping_HasKeyString(value, "$ref") && PyMapping_HasKeyString(value, "$id")) { /* DBRef */ + ref = PyMapping_GetItemString(value, "$ref"); + /* PyMapping_GetItemString returns NULL to indicate error. */ + if (!ref) { + goto invalid; + } + id = PyMapping_GetItemString(value, "$id"); + /* PyMapping_GetItemString returns NULL to indicate error. */ + if (!id) { + goto invalid; + } + + if (PyMapping_HasKeyString(value, "$db")) { + database = PyMapping_GetItemString(value, "$db"); + if (!database) { + goto invalid; + } + db_present = 1; + } else { + database = Py_None; + Py_INCREF(database); + } + + // check types + if (!(PyUnicode_Check(ref) && (database == Py_None || PyUnicode_Check(database)))) { + ret = value; + goto invalid; + } + + PyMapping_DelItemString(value, "$ref"); + PyMapping_DelItemString(value, "$id"); + if (db_present) { + PyMapping_DelItemString(value, "$db"); + } + + if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { + dbref = PyObject_CallFunctionObjArgs(dbref_type, ref, id, database, value, NULL); + Py_DECREF(value); + ret = dbref; + } + } else { + ret = value; + } +invalid: + Py_XDECREF(dbref_type); + Py_XDECREF(ref); + Py_XDECREF(id); + Py_XDECREF(database); + return ret; +} + static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned* position, unsigned char type, unsigned max, const codec_options_t* options) { @@ -1552,7 +1617,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 3: { - PyObject* collection; uint32_t size; if (max < 4) { @@ -1585,55 +1649,10 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } - /* Decoding for DBRefs */ - if (PyMapping_HasKeyString(value, "$ref")) { /* DBRef */ - PyObject* dbref = NULL; - PyObject* dbref_type; - PyObject* id; - PyObject* database; - - collection = PyMapping_GetItemString(value, "$ref"); - /* PyMapping_GetItemString returns NULL to indicate error. */ - if (!collection) { - goto invalid; - } - PyMapping_DelItemString(value, "$ref"); - - if (PyMapping_HasKeyString(value, "$id")) { - id = PyMapping_GetItemString(value, "$id"); - if (!id) { - Py_DECREF(collection); - goto invalid; - } - PyMapping_DelItemString(value, "$id"); - } else { - id = Py_None; - Py_INCREF(id); - } - - if (PyMapping_HasKeyString(value, "$db")) { - database = PyMapping_GetItemString(value, "$db"); - if (!database) { - Py_DECREF(collection); - Py_DECREF(id); - goto invalid; - } - PyMapping_DelItemString(value, "$db"); - } else { - database = Py_None; - Py_INCREF(database); - } - - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - dbref = PyObject_CallFunctionObjArgs(dbref_type, collection, id, database, value, NULL); - Py_DECREF(dbref_type); - } - Py_DECREF(value); - value = dbref; - - Py_DECREF(id); - Py_DECREF(collection); - Py_DECREF(database); + /* Hook for DBRefs */ + value = _dbref_hook(self, value); + if (!value) { + goto invalid; } *position += size; diff --git a/bson/json_util.py b/bson/json_util.py index 5498bf0442..fe0bfe0699 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -121,9 +121,6 @@ "x": re.X, } -# Dollar-prefixed keys which may appear in DBRefs. -_DBREF_KEYS = frozenset(['$id', '$ref', '$db']) - class DatetimeRepresentation: LEGACY = 0 @@ -463,7 +460,9 @@ def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS): def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): if "$oid" in dct: return _parse_canonical_oid(dct) - if "$ref" in dct: + if (isinstance(dct.get('$ref'), str) and + "$id" in dct and + isinstance(dct.get('$db'), (str, type(None)))): return _parse_canonical_dbref(dct) if "$date" in dct: return _parse_canonical_datetime(dct, json_options) @@ -675,10 +674,6 @@ def _parse_canonical_regex(doc): def _parse_canonical_dbref(doc): """Decode a JSON DBRef to bson.dbref.DBRef.""" - for key in doc: - if key.startswith('$') and key not in _DBREF_KEYS: - # Other keys start with $, so dct cannot be parsed as a DBRef. - return doc return DBRef(doc.pop('$ref'), doc.pop('$id'), database=doc.pop('$db', None), **doc) diff --git a/doc/changelog.rst b/doc/changelog.rst index 828d8b0387..d7a68436e2 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -104,6 +104,14 @@ Breaking Changes in 4.0 - Changed the default JSON encoding representation from legacy to relaxed. The json_mode parameter for :const:`bson.json_util.dumps` now defaults to :const:`~bson.json_util.RELAXED_JSON_OPTIONS`. +- Changed the BSON and JSON decoding behavior of :class:`~bson.dbref.DBRef` + to match the behavior outlined in the `DBRef specification`_ version 1.0. + Specifically, PyMongo now only decodes a subdocument into a + :class:`~bson.dbref.DBRef` if and only if, it contains both ``$ref`` and + ``$id`` fields and the ``$ref``, ``$id``, and ``$db`` fields are of the + correct type. Otherwise the document is returned as normal. Previously, any + subdocument containing a ``$ref`` field would be decoded as a + :class:`~bson.dbref.DBRef`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. - The ``hint`` option is now required when using ``min`` or ``max`` queries @@ -123,6 +131,7 @@ See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 +.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst Changes in Version 3.12.0 ------------------------- diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 2394f29caf..0e512da774 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -752,3 +752,16 @@ Name is a required argument for pymongo.driver_info.DriverInfo ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. +DBRef BSON/JSON decoding behavior +................................. + +Changed the BSON and JSON decoding behavior of :class:`~bson.dbref.DBRef` +to match the behavior outlined in the `DBRef specification`_ version 1.0. +Specifically, PyMongo now only decodes a subdocument into a +:class:`~bson.dbref.DBRef` if and only if, it contains both ``$ref`` and +``$id`` fields and the ``$ref``, ``$id``, and ``$db`` fields are of the +correct type. Otherwise the document is returned as normal. Previously, any +subdocument containing a ``$ref`` field would be decoded as a +:class:`~bson.dbref.DBRef`. + +.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst diff --git a/test/bson_corpus/binary.json b/test/bson_corpus/binary.json index 324c56abde..beb2e07a70 100644 --- a/test/bson_corpus/binary.json +++ b/test/bson_corpus/binary.json @@ -50,6 +50,11 @@ "canonical_bson": "1D000000057800100000000573FFD26444B34C6990E8E7D1DFC035D400", "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"05\"}}}" }, + { + "description": "subtype 0x07", + "canonical_bson": "1D000000057800100000000773FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"07\"}}}" + }, { "description": "subtype 0x80", "canonical_bson": "0F0000000578000200000080FFFF00", @@ -94,8 +99,20 @@ "string": "{\"x\" : { \"$uuid\" : { \"data\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}}" }, { - "description": "$uuid invalid value", + "description": "$uuid invalid value--too short", "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-90e8-e7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too long", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4-789e4\"}}" + }, + { + "description": "$uuid invalid value--misplaced hyphens", + "string": "{\"x\" : { \"$uuid\" : \"73ff-d26444b-34c6-990e8e-7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too many hyphens", + "string": "{\"x\" : { \"$uuid\" : \"----d264-44b3-4--9-90e8-e7d1dfc0----\"}}" } ] } diff --git a/test/bson_corpus/code.json b/test/bson_corpus/code.json index 6f37349ad0..b8482b2541 100644 --- a/test/bson_corpus/code.json +++ b/test/bson_corpus/code.json @@ -20,48 +20,48 @@ }, { "description": "two-byte UTF-8 (\u00e9)", - "canonical_bson": "190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", - "canonical_extjson": "{\"a\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}" + "canonical_bson": "190000000D61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}}" }, { "description": "three-byte UTF-8 (\u2606)", - "canonical_bson": "190000000261000D000000E29886E29886E29886E298860000", - "canonical_extjson": "{\"a\" : \"\\u2606\\u2606\\u2606\\u2606\"}" + "canonical_bson": "190000000D61000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u2606\\u2606\\u2606\\u2606\"}}" }, { "description": "Embedded nulls", - "canonical_bson": "190000000261000D0000006162006261620062616261620000", - "canonical_extjson": "{\"a\" : \"ab\\u0000bab\\u0000babab\"}" + "canonical_bson": "190000000D61000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"ab\\u0000bab\\u0000babab\"}}" } ], "decodeErrors": [ { "description": "bad code string length: 0 (but no 0x00 either)", - "bson": "0C0000000261000000000000" + "bson": "0C0000000D61000000000000" }, { "description": "bad code string length: -1", - "bson": "0C000000026100FFFFFFFF00" + "bson": "0C0000000D6100FFFFFFFF00" }, { "description": "bad code string length: eats terminator", - "bson": "10000000026100050000006200620000" + "bson": "100000000D6100050000006200620000" }, { "description": "bad code string length: longer than rest of document", - "bson": "120000000200FFFFFF00666F6F6261720000" + "bson": "120000000D00FFFFFF00666F6F6261720000" }, { "description": "code string is not null-terminated", - "bson": "1000000002610004000000616263FF00" + "bson": "100000000D610004000000616263FF00" }, { "description": "empty code string, but extra null", - "bson": "0E00000002610001000000000000" + "bson": "0E0000000D610001000000000000" }, { "description": "invalid UTF-8", - "bson": "0E00000002610002000000E90000" + "bson": "0E0000000D610002000000E90000" } ] } diff --git a/test/bson_corpus/dbref.json b/test/bson_corpus/dbref.json index 1fe12c6f68..41c0b09d0e 100644 --- a/test/bson_corpus/dbref.json +++ b/test/bson_corpus/dbref.json @@ -1,5 +1,5 @@ { - "description": "DBRef", + "description": "Document type (DBRef sub-documents)", "bson_type": "0x03", "valid": [ { @@ -26,6 +26,26 @@ "description": "Document with key names similar to those of a DBRef", "canonical_bson": "3e0000000224726566000c0000006e6f742d612d646272656600072469640058921b3e6e32ab156a22b59e022462616e616e6100050000007065656c0000", "canonical_extjson": "{\"$ref\": \"not-a-dbref\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$banana\": \"peel\"}" + }, + { + "description": "DBRef with additional dollar-prefixed and dotted fields", + "canonical_bson": "48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e10612e62000100000010246300010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"a.b\": {\"$numberInt\": \"1\"}, \"$c\": {\"$numberInt\": \"1\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $id is missing", + "canonical_bson": "26000000036462726566001a0000000224726566000b000000636f6c6c656374696f6e000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\"}}" + }, + { + "description": "Sub-document resembles DBRef but $ref is not a string", + "canonical_bson": "2c000000036462726566002000000010247265660001000000072469640058921b3e6e32ab156a22b59e0000", + "canonical_extjson": "{\"dbref\": {\"$ref\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $db is not a string", + "canonical_bson": "4000000003646272656600340000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e1024646200010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$db\": {\"$numberInt\": \"1\"}}}" } ] } diff --git a/test/bson_corpus/double.json b/test/bson_corpus/double.json index 7be4ff45e6..d5b8fb3d7e 100644 --- a/test/bson_corpus/double.json +++ b/test/bson_corpus/double.json @@ -30,13 +30,13 @@ { "description": "1.2345678921232E+18", "canonical_bson": "100000000164002a1bf5f41022b14300", - "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678921232e+18\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678921232E+18\"}}", "relaxed_extjson": "{\"d\" : 1.2345678921232E+18}" }, { "description": "-1.2345678921232E+18", "canonical_bson": "100000000164002a1bf5f41022b1c300", - "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678921232e+18\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678921232E+18\"}}", "relaxed_extjson": "{\"d\" : -1.2345678921232E+18}" }, { diff --git a/test/bson_corpus/symbol.json b/test/bson_corpus/symbol.json index 4e46cb9511..3dd3577ebd 100644 --- a/test/bson_corpus/symbol.json +++ b/test/bson_corpus/symbol.json @@ -50,31 +50,31 @@ "decodeErrors": [ { "description": "bad symbol length: 0 (but no 0x00 either)", - "bson": "0C0000000261000000000000" + "bson": "0C0000000E61000000000000" }, { "description": "bad symbol length: -1", - "bson": "0C000000026100FFFFFFFF00" + "bson": "0C0000000E6100FFFFFFFF00" }, { "description": "bad symbol length: eats terminator", - "bson": "10000000026100050000006200620000" + "bson": "100000000E6100050000006200620000" }, { "description": "bad symbol length: longer than rest of document", - "bson": "120000000200FFFFFF00666F6F6261720000" + "bson": "120000000E00FFFFFF00666F6F6261720000" }, { "description": "symbol is not null-terminated", - "bson": "1000000002610004000000616263FF00" + "bson": "100000000E610004000000616263FF00" }, { "description": "empty symbol, but extra null", - "bson": "0E00000002610001000000000000" + "bson": "0E0000000E610001000000000000" }, { "description": "invalid UTF-8", - "bson": "0E00000002610002000000E90000" + "bson": "0E0000000E610002000000E90000" } ] } diff --git a/test/crud/unified/aggregate-merge.json b/test/crud/unified/aggregate-merge.json index c34e93a699..ac61ceb8a6 100644 --- a/test/crud/unified/aggregate-merge.json +++ b/test/crud/unified/aggregate-merge.json @@ -1,6 +1,6 @@ { "description": "aggregate-merge", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.1.11" diff --git a/test/crud/unified/bulkWrite-arrayFilters-clientError.json b/test/crud/unified/bulkWrite-arrayFilters-clientError.json index 6073890dd3..63815e3233 100644 --- a/test/crud/unified/bulkWrite-arrayFilters-clientError.json +++ b/test/crud/unified/bulkWrite-arrayFilters-clientError.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-arrayFilters-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.5.5" diff --git a/test/crud/unified/bulkWrite-arrayFilters.json b/test/crud/unified/bulkWrite-arrayFilters.json index 4d7b4b7947..70ee014f7a 100644 --- a/test/crud/unified/bulkWrite-arrayFilters.json +++ b/test/crud/unified/bulkWrite-arrayFilters.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-arrayFilters", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.5.6" diff --git a/test/crud/unified/bulkWrite-delete-hint-clientError.json b/test/crud/unified/bulkWrite-delete-hint-clientError.json index c55067be22..2961b55dc0 100644 --- a/test/crud/unified/bulkWrite-delete-hint-clientError.json +++ b/test/crud/unified/bulkWrite-delete-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-delete-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.3.99" diff --git a/test/crud/unified/bulkWrite-delete-hint-serverError.json b/test/crud/unified/bulkWrite-delete-hint-serverError.json index 30b9010ac3..fa99522093 100644 --- a/test/crud/unified/bulkWrite-delete-hint-serverError.json +++ b/test/crud/unified/bulkWrite-delete-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-delete-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.4.0", diff --git a/test/crud/unified/bulkWrite-delete-hint.json b/test/crud/unified/bulkWrite-delete-hint.json index 31f3865323..9fcdecefd7 100644 --- a/test/crud/unified/bulkWrite-delete-hint.json +++ b/test/crud/unified/bulkWrite-delete-hint.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-delete-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.4" diff --git a/test/crud/unified/bulkWrite-update-hint-clientError.json b/test/crud/unified/bulkWrite-update-hint-clientError.json index 68a92065aa..d5eb71c29e 100644 --- a/test/crud/unified/bulkWrite-update-hint-clientError.json +++ b/test/crud/unified/bulkWrite-update-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-update-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.3.99" diff --git a/test/crud/unified/bulkWrite-update-hint-serverError.json b/test/crud/unified/bulkWrite-update-hint-serverError.json index 2a9a6795ca..b0f7e1b381 100644 --- a/test/crud/unified/bulkWrite-update-hint-serverError.json +++ b/test/crud/unified/bulkWrite-update-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-update-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.4.0", diff --git a/test/crud/unified/bulkWrite-update-hint.json b/test/crud/unified/bulkWrite-update-hint.json index b8445d80d4..4206359891 100644 --- a/test/crud/unified/bulkWrite-update-hint.json +++ b/test/crud/unified/bulkWrite-update-hint.json @@ -1,6 +1,6 @@ { "description": "bulkWrite-update-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0" diff --git a/test/crud/unified/deleteMany-hint-clientError.json b/test/crud/unified/deleteMany-hint-clientError.json index 285f567f02..66320122b5 100644 --- a/test/crud/unified/deleteMany-hint-clientError.json +++ b/test/crud/unified/deleteMany-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "deleteMany-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.3.99" diff --git a/test/crud/unified/deleteMany-hint-serverError.json b/test/crud/unified/deleteMany-hint-serverError.json index 90bfb89fc7..88d4a65576 100644 --- a/test/crud/unified/deleteMany-hint-serverError.json +++ b/test/crud/unified/deleteMany-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "deleteMany-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.4.0", diff --git a/test/crud/unified/deleteMany-hint.json b/test/crud/unified/deleteMany-hint.json index b0cdc03048..59d903d201 100644 --- a/test/crud/unified/deleteMany-hint.json +++ b/test/crud/unified/deleteMany-hint.json @@ -1,6 +1,6 @@ { "description": "deleteMany-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.4" diff --git a/test/crud/unified/deleteOne-hint-clientError.json b/test/crud/unified/deleteOne-hint-clientError.json index b6b2932bdc..cf629f59e0 100644 --- a/test/crud/unified/deleteOne-hint-clientError.json +++ b/test/crud/unified/deleteOne-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "deleteOne-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.3.99" diff --git a/test/crud/unified/deleteOne-hint-serverError.json b/test/crud/unified/deleteOne-hint-serverError.json index 8b1398c75d..15541ed857 100644 --- a/test/crud/unified/deleteOne-hint-serverError.json +++ b/test/crud/unified/deleteOne-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "deleteOne-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.4.0", diff --git a/test/crud/unified/deleteOne-hint.json b/test/crud/unified/deleteOne-hint.json index 9e3970a540..bcc4bc2347 100644 --- a/test/crud/unified/deleteOne-hint.json +++ b/test/crud/unified/deleteOne-hint.json @@ -1,6 +1,6 @@ { "description": "deleteOne-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.4" diff --git a/test/crud/unified/find-allowdiskuse-clientError.json b/test/crud/unified/find-allowdiskuse-clientError.json index a47fd8e254..5bd954e79d 100644 --- a/test/crud/unified/find-allowdiskuse-clientError.json +++ b/test/crud/unified/find-allowdiskuse-clientError.json @@ -1,6 +1,6 @@ { "description": "find-allowdiskuse-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.0.99" diff --git a/test/crud/unified/find-allowdiskuse-serverError.json b/test/crud/unified/find-allowdiskuse-serverError.json index a7907ba254..dc58f8f0e3 100644 --- a/test/crud/unified/find-allowdiskuse-serverError.json +++ b/test/crud/unified/find-allowdiskuse-serverError.json @@ -1,6 +1,6 @@ { "description": "find-allowdiskuse-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.2", diff --git a/test/crud/unified/find-allowdiskuse.json b/test/crud/unified/find-allowdiskuse.json index 8d4cf66bf8..789bb7fbf1 100644 --- a/test/crud/unified/find-allowdiskuse.json +++ b/test/crud/unified/find-allowdiskuse.json @@ -1,6 +1,6 @@ { "description": "find-allowdiskuse", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.1" diff --git a/test/crud/unified/findOneAndDelete-hint-clientError.json b/test/crud/unified/findOneAndDelete-hint-clientError.json index d04125edd2..c6ff467866 100644 --- a/test/crud/unified/findOneAndDelete-hint-clientError.json +++ b/test/crud/unified/findOneAndDelete-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "findOneAndDelete-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "4.0.99" diff --git a/test/crud/unified/findOneAndDelete-hint-serverError.json b/test/crud/unified/findOneAndDelete-hint-serverError.json index 23c01f48f1..b874102728 100644 --- a/test/crud/unified/findOneAndDelete-hint-serverError.json +++ b/test/crud/unified/findOneAndDelete-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "findOneAndDelete-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0", diff --git a/test/crud/unified/findOneAndDelete-hint.json b/test/crud/unified/findOneAndDelete-hint.json index 0180010dc8..8b53f2bd3f 100644 --- a/test/crud/unified/findOneAndDelete-hint.json +++ b/test/crud/unified/findOneAndDelete-hint.json @@ -1,6 +1,6 @@ { "description": "findOneAndDelete-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.4" diff --git a/test/crud/unified/findOneAndReplace-hint-clientError.json b/test/crud/unified/findOneAndReplace-hint-clientError.json index c483b23028..6b07eb1f4d 100644 --- a/test/crud/unified/findOneAndReplace-hint-clientError.json +++ b/test/crud/unified/findOneAndReplace-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "findOneAndReplace-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "4.0.99" diff --git a/test/crud/unified/findOneAndReplace-hint-serverError.json b/test/crud/unified/findOneAndReplace-hint-serverError.json index e8f1c89360..7fbf5a0ea3 100644 --- a/test/crud/unified/findOneAndReplace-hint-serverError.json +++ b/test/crud/unified/findOneAndReplace-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "findOneAndReplace-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0", diff --git a/test/crud/unified/findOneAndReplace-hint.json b/test/crud/unified/findOneAndReplace-hint.json index 13ac6a9c90..d07c5921a7 100644 --- a/test/crud/unified/findOneAndReplace-hint.json +++ b/test/crud/unified/findOneAndReplace-hint.json @@ -1,6 +1,6 @@ { "description": "findOneAndReplace-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.1" diff --git a/test/crud/unified/findOneAndUpdate-hint-clientError.json b/test/crud/unified/findOneAndUpdate-hint-clientError.json index dace72b0ad..d0b51313c9 100644 --- a/test/crud/unified/findOneAndUpdate-hint-clientError.json +++ b/test/crud/unified/findOneAndUpdate-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "findOneAndUpdate-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "4.0.99" diff --git a/test/crud/unified/findOneAndUpdate-hint-serverError.json b/test/crud/unified/findOneAndUpdate-hint-serverError.json index 1413ced2e3..99fd9938f8 100644 --- a/test/crud/unified/findOneAndUpdate-hint-serverError.json +++ b/test/crud/unified/findOneAndUpdate-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "findOneAndUpdate-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0", diff --git a/test/crud/unified/findOneAndUpdate-hint.json b/test/crud/unified/findOneAndUpdate-hint.json index 68cef18ef9..5be6d2b3e8 100644 --- a/test/crud/unified/findOneAndUpdate-hint.json +++ b/test/crud/unified/findOneAndUpdate-hint.json @@ -1,6 +1,6 @@ { "description": "findOneAndUpdate-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.3.1" diff --git a/test/crud/unified/replaceOne-hint.json b/test/crud/unified/replaceOne-hint.json index edb1ceb7c9..6926e9d8df 100644 --- a/test/crud/unified/replaceOne-hint.json +++ b/test/crud/unified/replaceOne-hint.json @@ -1,6 +1,6 @@ { "description": "replaceOne-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0" diff --git a/test/crud/unified/updateMany-hint-clientError.json b/test/crud/unified/updateMany-hint-clientError.json index 99c66c919e..5da878e293 100644 --- a/test/crud/unified/updateMany-hint-clientError.json +++ b/test/crud/unified/updateMany-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "updateMany-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.3.99" diff --git a/test/crud/unified/updateMany-hint-serverError.json b/test/crud/unified/updateMany-hint-serverError.json index cc5ecfe26c..c81f36b13c 100644 --- a/test/crud/unified/updateMany-hint-serverError.json +++ b/test/crud/unified/updateMany-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "updateMany-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.4.0", diff --git a/test/crud/unified/updateMany-hint.json b/test/crud/unified/updateMany-hint.json index e5f707fb5d..929be52994 100644 --- a/test/crud/unified/updateMany-hint.json +++ b/test/crud/unified/updateMany-hint.json @@ -1,6 +1,6 @@ { "description": "updateMany-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0" diff --git a/test/crud/unified/updateOne-hint-clientError.json b/test/crud/unified/updateOne-hint-clientError.json index 8c0ddbd1d5..d4f1a53430 100644 --- a/test/crud/unified/updateOne-hint-clientError.json +++ b/test/crud/unified/updateOne-hint-clientError.json @@ -1,6 +1,6 @@ { "description": "updateOne-hint-clientError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "maxServerVersion": "3.3.99" diff --git a/test/crud/unified/updateOne-hint-serverError.json b/test/crud/unified/updateOne-hint-serverError.json index d8a46da944..05fb033319 100644 --- a/test/crud/unified/updateOne-hint-serverError.json +++ b/test/crud/unified/updateOne-hint-serverError.json @@ -1,6 +1,6 @@ { "description": "updateOne-hint-serverError", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "3.4.0", diff --git a/test/crud/unified/updateOne-hint.json b/test/crud/unified/updateOne-hint.json index 9277c605f6..484e00757d 100644 --- a/test/crud/unified/updateOne-hint.json +++ b/test/crud/unified/updateOne-hint.json @@ -1,6 +1,6 @@ { "description": "updateOne-hint", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.2.0" diff --git a/test/crud/unified/updateWithPipelines.json b/test/crud/unified/updateWithPipelines.json index 12ae04665a..164f2f6a19 100644 --- a/test/crud/unified/updateWithPipelines.json +++ b/test/crud/unified/updateWithPipelines.json @@ -1,6 +1,6 @@ { "description": "updateWithPipelines", - "schemaVersion": "1.1", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.1.11" diff --git a/test/test_bson.py b/test/test_bson.py index ee30c8948c..89f4a11176 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -490,8 +490,7 @@ def test_bad_dbref(self): ref_only = {'ref': {'$ref': 'collection'}} id_only = {'ref': {'$id': ObjectId()}} - self.assertEqual(DBRef('collection', id=None), - decode(encode(ref_only))['ref']) + self.assertEqual(ref_only, decode(encode(ref_only))) self.assertEqual(id_only, decode(encode(id_only))) def test_bytes_as_keys(self): diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 68703cc701..7893395c60 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -40,20 +40,27 @@ _TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'bson_corpus') -_TESTS_TO_SKIP = set([ +_TESTS_TO_SKIP = { # Python cannot decode dates after year 9999. 'Y10K', -]) +} -_NON_PARSE_ERRORS = set([ +_NON_PARSE_ERRORS = { # {"$date": } is our legacy format which we still need to parse. 'Bad $date (number, not string or hash)', # This variant of $numberLong may have been generated by an old version # of mongoexport. 'Bad $numberLong (number, not string)', + # Python's UUID constructor is very permissive. + '$uuid invalid value--misplaced hyphens', # We parse Regex flags with extra characters, including nulls. 'Null byte in $regularExpression options', -]) +} + +_IMPLCIT_LOSSY_TESTS = { + # JSON decodes top-level $ref+$id as a DBRef but BSON doesn't. + 'Document with key names similar to those of a DBRef' +} _DEPRECATED_BSON_TYPES = { # Symbol @@ -128,15 +135,22 @@ def run_test(self): cEJ = valid_case['canonical_extjson'] rEJ = valid_case.get('relaxed_extjson') dEJ = valid_case.get('degenerate_extjson') + if description in _IMPLCIT_LOSSY_TESTS: + valid_case.setdefault('lossy', True) lossy = valid_case.get('lossy') + # BSON double, use lowercase 'e+' to match Python's encoding + if bson_type == '0x01': + cEJ = cEJ.replace('E+', 'e+') + decoded_bson = decode_bson(cB) if not lossy: # Make sure we can parse the legacy (default) JSON format. legacy_json = json_util.dumps( decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS) - self.assertEqual(decode_extjson(legacy_json), decoded_bson) + self.assertEqual( + decode_extjson(legacy_json), decoded_bson, description) if deprecated: if 'converted_bson' in valid_case: @@ -158,7 +172,7 @@ def run_test(self): if not (sys.platform.startswith("java") and description == 'NaN with payload'): # Test round-tripping canonical bson. - self.assertEqual(encode_bson(decoded_bson), cB) + self.assertEqual(encode_bson(decoded_bson), cB, description) self.assertJsonEqual(encode_extjson(decoded_bson), cEJ) # Test round-tripping canonical extended json. @@ -191,24 +205,24 @@ def run_test(self): binascii.unhexlify(decode_error_case['bson'].encode('utf8'))) for parse_error_case in case_spec.get('parseErrors', []): + description = parse_error_case['description'] + if description in _NON_PARSE_ERRORS: + decode_extjson(parse_error_case['string']) + continue if bson_type == '0x13': self.assertRaises( DecimalException, Decimal128, parse_error_case['string']) elif bson_type == '0x00': - description = parse_error_case['description'] - if description in _NON_PARSE_ERRORS: - decode_extjson(parse_error_case['string']) - else: - try: - doc = decode_extjson(parse_error_case['string']) - # Null bytes are validated when encoding to BSON. - if 'Null' in description: - to_bson(doc) - raise AssertionError('exception not raised for test ' - 'case: ' + description) - except (ValueError, KeyError, TypeError, InvalidId, - InvalidDocument): - pass + try: + doc = decode_extjson(parse_error_case['string']) + # Null bytes are validated when encoding to BSON. + if 'Null' in description: + to_bson(doc) + raise AssertionError('exception not raised for test ' + 'case: ' + description) + except (ValueError, KeyError, TypeError, InvalidId, + InvalidDocument): + pass elif bson_type == '0x05': try: decode_extjson(parse_error_case['string']) diff --git a/test/test_dbref.py b/test/test_dbref.py index d71c2c68eb..964947351e 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -18,6 +18,7 @@ import sys sys.path[0:0] = [""] +from bson import encode, decode from bson.dbref import DBRef from bson.objectid import ObjectId from test import unittest @@ -131,5 +132,108 @@ def test_dbref_hash(self): self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) + +# https://github.com/mongodb/specifications/blob/master/source/dbref.rst#test-plan +class TestDBRefSpec(unittest.TestCase): + def test_decoding_1_2_3(self): + for doc in [ + # 1, Valid documents MUST be decoded to a DBRef: + {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$ref": "coll0", "$id": 1}, + {"$ref": "coll0", "$id": None}, + {"$ref": "coll0", "$id": 1, "$db": "db0"}, + # 2, Valid documents with extra fields: + {"$ref": "coll0", "$id": 1, "$db": "db0", "foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo": True, "bar": False}, + {"$ref": "coll0", "$id": 1, "meta": {"foo": 1, "bar": 2}}, + {"$ref": "coll0", "$id": 1, "$foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo.bar": 0}, + # 3, Valid documents with out of order fields: + {"$id": 1, "$ref": "coll0"}, + {"$db": "db0", "$ref": "coll0", "$id": 1}, + {"foo": 1, "$id": 1, "$ref": "coll0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, + ]: + with self.subTest(doc=doc): + decoded = decode(encode({'dbref': doc})) + dbref = decoded['dbref'] + self.assertIsInstance(dbref, DBRef) + self.assertEqual(dbref.collection, doc['$ref']) + self.assertEqual(dbref.id, doc['$id']) + self.assertEqual(dbref.database, doc.get('$db')) + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + def test_decoding_4_5(self): + for doc in [ + # 4, Documents missing required fields MUST NOT be decoded to a + # DBRef: + {"$ref": "coll0"}, + {"$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$db": "db0"}, + # 5, Documents with invalid types for $ref or $db MUST NOT be + # decoded to a DBRef + {"$ref": True, "$id": 1}, + {"$ref": "coll0", "$id": 1, "$db": 1}, + ]: + with self.subTest(doc=doc): + decoded = decode(encode({'dbref': doc})) + dbref = decoded['dbref'] + self.assertIsInstance(dbref, dict) + + def test_encoding_1_2(self): + for doc in [ + # 1, Encoding DBRefs with basic fields: + {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, + {"$ref": "coll0", "$id": 1}, + {"$ref": "coll0", "$id": None}, + {"$ref": "coll0", "$id": 1, "$db": "db0"}, + # 2, Encoding DBRefs with extra, optional fields: + {"$ref": "coll0", "$id": 1, "$db": "db0", "foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo": True, "bar": False}, + {"$ref": "coll0", "$id": 1, "meta": {"foo": 1, "bar": 2}}, + {"$ref": "coll0", "$id": 1, "$foo": "bar"}, + {"$ref": "coll0", "$id": 1, "foo.bar": 0}, + ]: + with self.subTest(doc=doc): + # Decode the test input to a DBRef via a BSON roundtrip. + encoded_doc = encode({'dbref': doc}) + decoded = decode(encoded_doc) + dbref = decoded['dbref'] + self.assertIsInstance(dbref, DBRef) + # Encode the DBRef. + encoded_dbref = encode(decoded) + self.assertEqual(encoded_dbref, encoded_doc) + # Ensure extra fields are present. + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + def test_encoding_3(self): + for doc in [ + # 3, Encoding DBRefs re-orders any out of order fields during + # decoding: + {"$id": 1, "$ref": "coll0"}, + {"$db": "db0", "$ref": "coll0", "$id": 1}, + {"foo": 1, "$id": 1, "$ref": "coll0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0"}, + {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, + ]: + with self.subTest(doc=doc): + # Decode the test input to a DBRef via a BSON roundtrip. + encoded_doc = encode({'dbref': doc}) + decoded = decode(encoded_doc) + dbref = decoded['dbref'] + self.assertIsInstance(dbref, DBRef) + # Encode the DBRef. + encoded_dbref = encode(decoded) + # BSON does not match because DBRef fields are reordered. + self.assertNotEqual(encoded_dbref, encoded_doc) + self.assertEqual(decode(encoded_dbref), decode(encoded_doc)) + # Ensure extra fields are present. + for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: + self.assertEqual(getattr(dbref, extra), doc[extra]) + + if __name__ == "__main__": unittest.main() From 64ae5c654485f746c230a4dee32d39b98afea015 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 8 Sep 2021 14:42:13 -0700 Subject: [PATCH 0455/2111] PYTHON-2898 Update Max Staleness spec tests --- .../DefaultNoMaxStaleness.json | 10 +++--- .../ReplicaSetNoPrimary/Incompatible.json | 36 ------------------- .../ReplicaSetNoPrimary/LastUpdateTime.json | 12 +++---- .../ReplicaSetNoPrimary/Nearest.json | 12 +++---- .../ReplicaSetNoPrimary/Nearest2.json | 12 +++---- .../ReplicaSetNoPrimary/PrimaryPreferred.json | 8 ++--- .../PrimaryPreferred_tags.json | 8 ++--- .../ReplicaSetNoPrimary/Secondary.json | 12 +++---- .../SecondaryPreferred.json | 8 ++--- .../SecondaryPreferred_tags.json | 12 +++---- .../ReplicaSetNoPrimary/ZeroMaxStaleness.json | 4 +-- .../DefaultNoMaxStaleness.json | 10 +++--- .../ReplicaSetWithPrimary/Incompatible.json | 36 ------------------- .../ReplicaSetWithPrimary/LastUpdateTime.json | 12 +++---- .../ReplicaSetWithPrimary/LongHeartbeat.json | 10 +++--- .../ReplicaSetWithPrimary/LongHeartbeat2.json | 4 +-- .../MaxStalenessTooSmall.json | 4 +-- .../MaxStalenessWithModePrimary.json | 4 +-- .../ReplicaSetWithPrimary/Nearest.json | 12 +++---- .../ReplicaSetWithPrimary/Nearest2.json | 12 +++---- .../ReplicaSetWithPrimary/Nearest_tags.json | 8 ++--- .../PrimaryPreferred.json | 8 ++--- .../PrimaryPreferred_incompatible.json | 36 ------------------- .../SecondaryPreferred.json | 8 ++--- .../SecondaryPreferred_tags.json | 16 ++++----- .../SecondaryPreferred_tags2.json | 10 +++--- .../ReplicaSetWithPrimary/Secondary_tags.json | 16 ++++----- .../Secondary_tags2.json | 10 +++--- .../ZeroMaxStaleness.json | 4 +-- test/max_staleness/Sharded/Incompatible.json | 36 ------------------- .../Sharded/SmallMaxStaleness.json | 10 +++--- test/max_staleness/Single/Incompatible.json | 24 ------------- .../Single/SmallMaxStaleness.json | 6 ++-- .../Unknown/SmallMaxStaleness.json | 2 +- 34 files changed, 132 insertions(+), 300 deletions(-) delete mode 100644 test/max_staleness/ReplicaSetNoPrimary/Incompatible.json delete mode 100644 test/max_staleness/ReplicaSetWithPrimary/Incompatible.json delete mode 100644 test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json delete mode 100644 test/max_staleness/Sharded/Incompatible.json delete mode 100644 test/max_staleness/Single/Incompatible.json diff --git a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json index 1e3dd0bfd9..5afebbbdcb 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json b/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json deleted file mode 100644 index 7f9fa764c7..0000000000 --- a/test/max_staleness/ReplicaSetNoPrimary/Incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "topology_description": { - "type": "ReplicaSetNoPrimary", - "servers": [ - { - "address": "a:27017", - "type": "RSSecondary", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 5, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "2" - } - } - }, - { - "address": "b:27017", - "type": "RSSecondary", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 4, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - } - ] - }, - "read_preference": { - "mode": "Nearest", - "maxStalenessSeconds": 120 - }, - "error": true -} diff --git a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json index e1abef2844..492d8a2f62 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json index 53549e6431..6602561c1d 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json index e2768c7fb8..16d9a673bd 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json index 8c6be6886a..7956b8e516 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json index 26007c026e..453dce6605 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json index 7d5eb58f4d..b383f275dc 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json index df0bb5d77f..7bce7d0aa4 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json index 1ac3ea0aed..32c9ca770b 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json index cb5dc5175a..fd84cd1193 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 4, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json index ed18d5837e..35eaa9d69d 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json b/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json deleted file mode 100644 index d27ea11202..0000000000 --- a/test/max_staleness/ReplicaSetWithPrimary/Incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "topology_description": { - "type": "ReplicaSetWithPrimary", - "servers": [ - { - "address": "a:27017", - "type": "RSPrimary", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 5, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - }, - { - "address": "b:27017", - "type": "RSSecondary", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 4, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - } - ] - }, - "read_preference": { - "mode": "Nearest", - "maxStalenessSeconds": 120 - }, - "error": true -} diff --git a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json index bbd8238e8a..18450beaed 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json index cb05f52aa2..b9fb407f9e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json index be169a3dcb..b695e1caeb 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json index 173f5742a2..9b798d37da 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json index eee3462783..1fa7bb4dd0 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json index 753fb82ca3..198be4a681 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json index 6233c0815a..3ae629c898 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json index 9a1cd3bb12..675df82631 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json index 107ae2755e..795b47a111 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json deleted file mode 100644 index a6681f6a13..0000000000 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred_incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "topology_description": { - "type": "ReplicaSetWithPrimary", - "servers": [ - { - "address": "a:27017", - "type": "RSPrimary", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 5, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - }, - { - "address": "b:27017", - "type": "RSSecondary", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 4, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - } - ] - }, - "read_preference": { - "mode": "PrimaryPreferred", - "maxStalenessSeconds": 150 - }, - "error": true -} diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json index 5f8a21f15c..5455708a70 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json index 09ce6d6bd0..6670b54c89 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json index 3700c30453..642fee1fb3 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json index f117159f64..502120dce6 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json index b739c6141b..6978a1807b 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 5 + "maxWireVersion": 6 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 5, + "maxWireVersion": 6, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json index f17aa93a3f..e1e4a7ffb7 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 4, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Sharded/Incompatible.json b/test/max_staleness/Sharded/Incompatible.json deleted file mode 100644 index c261383f4a..0000000000 --- a/test/max_staleness/Sharded/Incompatible.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "topology_description": { - "type": "Sharded", - "servers": [ - { - "address": "a:27017", - "type": "Mongos", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 5, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - }, - { - "address": "b:27017", - "type": "Mongos", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 4, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - } - ] - }, - "read_preference": { - "mode": "Nearest", - "maxStalenessSeconds": 120 - }, - "error": true -} diff --git a/test/max_staleness/Sharded/SmallMaxStaleness.json b/test/max_staleness/Sharded/SmallMaxStaleness.json index 27b9f1c12f..91d89720d1 100644 --- a/test/max_staleness/Sharded/SmallMaxStaleness.json +++ b/test/max_staleness/Sharded/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Single/Incompatible.json b/test/max_staleness/Single/Incompatible.json deleted file mode 100644 index b37fec7c1a..0000000000 --- a/test/max_staleness/Single/Incompatible.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "topology_description": { - "type": "Single", - "servers": [ - { - "address": "a:27017", - "type": "Standalone", - "avg_rtt_ms": 5, - "lastUpdateTime": 0, - "maxWireVersion": 4, - "lastWrite": { - "lastWriteDate": { - "$numberLong": "1" - } - } - } - ] - }, - "read_preference": { - "mode": "Nearest", - "maxStalenessSeconds": 120 - }, - "error": true -} diff --git a/test/max_staleness/Single/SmallMaxStaleness.json b/test/max_staleness/Single/SmallMaxStaleness.json index c6b10231b8..b8d2db24be 100644 --- a/test/max_staleness/Single/SmallMaxStaleness.json +++ b/test/max_staleness/Single/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -27,7 +27,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -41,7 +41,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 5, + "maxWireVersion": 6, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Unknown/SmallMaxStaleness.json b/test/max_staleness/Unknown/SmallMaxStaleness.json index bf6174b8e4..8d69f46a1e 100644 --- a/test/max_staleness/Unknown/SmallMaxStaleness.json +++ b/test/max_staleness/Unknown/SmallMaxStaleness.json @@ -6,7 +6,7 @@ { "address": "a:27017", "type": "Unknown", - "maxWireVersion": 5 + "maxWireVersion": 6 } ] }, From d8b6e01690f6127c4b45e1efa359678d33a3bf25 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 7 Sep 2021 14:32:19 -0700 Subject: [PATCH 0456/2111] PYTHON-2833 MongoDB 3.6 is the minimum supported version This change drops support for server versions older than 3.6. If you still need support for older server versions the 3.x branch of PyMongo will receive bug fixes for at least a year after the release of PyMongo 4.0. --- .evergreen/config.yml | 166 +----------------------------------------- README.rst | 2 +- doc/changelog.rst | 2 + pymongo/common.py | 4 +- 4 files changed, 8 insertions(+), 166 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 18ef5efecb..44cb584960 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -968,114 +968,6 @@ tasks: TOPOLOGY: "server" - func: "run doctests" - - name: "test-2.6-standalone" - tags: ["2.6", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "2.6" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-2.6-replica_set" - tags: ["2.6", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "2.6" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-2.6-sharded_cluster" - tags: ["2.6", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "2.6" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-3.0-standalone" - tags: ["3.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-3.0-replica_set" - tags: ["3.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-3.0-sharded_cluster" - tags: ["3.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-3.2-standalone" - tags: ["3.2", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.2" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-3.2-replica_set" - tags: ["3.2", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.2" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-3.2-sharded_cluster" - tags: ["3.2", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.2" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-3.4-standalone" - tags: ["3.4", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.4" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-3.4-replica_set" - tags: ["3.4", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.4" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-3.4-sharded_cluster" - tags: ["3.4", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.4" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - name: "test-3.6-standalone" tags: ["3.6", "standalone"] commands: @@ -1843,22 +1735,6 @@ axes: - id: mongodb-version display_name: "MongoDB" values: - - id: "2.6" - display_name: "MongoDB 2.6" - variables: - VERSION: "2.6" - - id: "3.0" - display_name: "MongoDB 3.0" - variables: - VERSION: "3.0" - - id: "3.2" - display_name: "MongoDB 3.2" - variables: - VERSION: "3.2" - - id: "3.4" - display_name: "MongoDB 3.4" - variables: - VERSION: "3.4" - id: "3.6" display_name: "MongoDB 3.6" variables: @@ -2018,10 +1894,6 @@ axes: display_name: MMAPv1 variables: STORAGE_ENGINE: "mmapv1" - - id: wiredtiger - display_name: WiredTiger - variables: - STORAGE_ENGINE: "wiredtiger" - id: inmemory display_name: InMemory variables: @@ -2123,10 +1995,6 @@ buildvariants: - ".4.2" - ".4.0" - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - matrix_name: "tests-all-encryption" matrix_spec: @@ -2142,7 +2010,6 @@ buildvariants: - ".4.4" - ".4.2" - ".4.0" - - ".2.6" - matrix_name: "tests-archlinux" matrix_spec: @@ -2156,10 +2023,6 @@ buildvariants: tasks: - ".4.0" - ".3.6" - - ".3.4" - - ".3.2" - - ".3.0" - - ".2.6" - matrix_name: "test-macos" matrix_spec: @@ -2182,17 +2045,6 @@ buildvariants: - ".4.2" - ".4.0" - ".3.6" - - ".3.4" - - ".3.2" - rules: - - if: - platform: macos-1014 - auth: "*" - ssl: "nossl" - then: - add_tasks: - - ".3.0" - - ".2.6" - matrix_name: "test-macos-encryption" matrix_spec: @@ -2252,7 +2104,7 @@ buildvariants: pyopenssl: "*" display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - - '.replica_set !.2.6 !.3.0' + - '.replica_set' # Test standalone and sharded only on 5.0 and later. - '.5.0' @@ -2265,7 +2117,7 @@ buildvariants: pyopenssl: "*" display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - - '.replica_set !.2.6 !.3.0 !.3.2 !.3.4' + - '.replica_set' # Test standalone and sharded only on 5.0 and later. - '.5.0' @@ -2277,7 +2129,7 @@ buildvariants: pyopenssl: "*" display_name: "PyOpenSSL ${platform} ${auth}" tasks: - - '.replica_set !.2.6 !.3.0' + - '.replica_set' - matrix_name: "tests-pyopenssl-windows" matrix_spec: @@ -2432,8 +2284,6 @@ buildvariants: - "test-4.2-standalone" - "test-4.0-standalone" - "test-3.6-standalone" - - "test-3.4-standalone" - - "test-3.2-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 platform: awslinux @@ -2445,16 +2295,6 @@ buildvariants: - "test-4.0-replica_set" - "test-3.6-standalone" - "test-3.6-replica_set" - - "test-3.4-standalone" - - "test-3.2-standalone" - - if: - # No need to test this on later server versions as it becomes the default - platform: awslinux - storage-engine: ["wiredtiger"] - python-version: "*" - then: - add_tasks: - - "test-3.0-standalone" # enableTestCommands=0 tests on Amazon1 (x86_64) with Python 3.6. - matrix_name: "test-disableTestCommands" diff --git a/README.rst b/README.rst index c9f2eef8db..ba505dbb3a 100644 --- a/README.rst +++ b/README.rst @@ -17,7 +17,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 2.6, 3.0, 3.2, 3.4, 3.6, 4.0, 4.2, 4.4, and 5.0. +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, and 5.0. Support / Feedback ================== diff --git a/doc/changelog.rst b/doc/changelog.rst index d7a68436e2..1f2a540956 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,8 @@ Changes in Version 4.0 .. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. +.. warning:: PyMongo 4.0 drops support for MongoDB 2.6, 3.0, 3.2, and 3.4. + PyMongo 4.0 brings a number of improvements as well as some backward breaking changes. For example, all APIs deprecated in PyMongo 3.X have been removed. Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` diff --git a/pymongo/common.py b/pymongo/common.py index b465e3816d..90dba01954 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -47,8 +47,8 @@ MAX_WRITE_BATCH_SIZE = 1000 # What this version of PyMongo supports. -MIN_SUPPORTED_SERVER_VERSION = "2.6" -MIN_SUPPORTED_WIRE_VERSION = 2 +MIN_SUPPORTED_SERVER_VERSION = "3.6" +MIN_SUPPORTED_WIRE_VERSION = 6 MAX_SUPPORTED_WIRE_VERSION = 13 # Frequency to call ismaster on servers, in seconds. From 992761568d03c5039b8f930e5388929075ea138a Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Sat, 11 Sep 2021 13:38:19 -0700 Subject: [PATCH 0457/2111] PYHON-2437 PYTHON-2873 venv improvements --- .evergreen/run-mockupdb-tests.sh | 8 +++++--- .evergreen/run-tests.sh | 7 ++----- .evergreen/utils.sh | 20 +++++++++++++++----- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh index 93e434c50f..26991b5581 100644 --- a/.evergreen/run-mockupdb-tests.sh +++ b/.evergreen/run-mockupdb-tests.sh @@ -3,11 +3,13 @@ set -o xtrace set -o errexit +. .evergreen/utils.sh + ${PYTHON_BINARY} setup.py clean cd .. -${PYTHON_BINARY} -m virtualenv mockuptests -. mockuptests/bin/activate -trap "deactivate" EXIT HUP + +createvirtualenv ${PYTHON_BINARY} mockuptests +trap "deactivatei, rm -rf mockuptests" EXIT HUP # Install PyMongo from git clone so mockup-tests don't # download it from pypi. diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 43e9a8b347..a60f0f07ae 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -75,15 +75,13 @@ if [ -z "$PYTHON_BINARY" ]; then echo "Cannot test without python3.6+ installed!" fi elif [ "$COMPRESSORS" = "snappy" ]; then - $PYTHON_BINARY -m virtualenv --never-download snappytest - . snappytest/bin/activate + createvirtualenv $PYTHON_BINARY snappytest trap "deactivate; rm -rf snappytest" EXIT HUP # 0.5.2 has issues in pypy3(.5) pip install python-snappy==0.5.1 PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then - $PYTHON_BINARY -m virtualenv --never-download zstdtest - . zstdtest/bin/activate + createvirtualenv $PYTHON_BINARY zstdtest trap "deactivate; rm -rf zstdtest" EXIT HUP pip install zstandard PYTHON=python @@ -106,7 +104,6 @@ if [ -n "$TEST_ENCRYPTION" ]; then PYTHON=python if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - $PYTHON -m pip install -U setuptools # PYTHON-2808 Ensure this machine has the CA cert for google KMS. powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true fi diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index be5adadd12..8fc42506a5 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -1,5 +1,7 @@ #!/bin/bash -ex +set -o xtrace + # Usage: # createvirtualenv /path/to/python /output/path/for/venv # * param1: Python binary to use for the virtualenv @@ -7,23 +9,31 @@ createvirtualenv () { PYTHON=$1 VENVPATH=$2 - if $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv --never-download" - elif $PYTHON -m venv -h>/dev/null; then + if $PYTHON -m venv -h>/dev/null; then # System virtualenv might not be compatible with the python3 on our path VIRTUALENV="$PYTHON -m venv" + elif $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" else echo "Cannot test without virtualenv" exit 1 fi $VIRTUALENV $VENVPATH if [ "Windows_NT" = "$OS" ]; then + # Workaround https://bugs.python.org/issue32451: + # mongovenv/Scripts/activate: line 3: $'\r': command not found + dos2unix $VENVPATH/Scripts/activate || true . $VENVPATH/Scripts/activate else . $VENVPATH/bin/activate fi - python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel + + PYVER=$(${PYTHON} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") + # pip fails to upgrade in a Python 3.6 venv on Windows. + if [ $PYVER != "3.6" -o "Windows_NT" != "$OS" ] ; then + python -m pip install --upgrade pip + python -m pip install --upgrade setuptools wheel + fi } # Usage: From 146179db53a8cf020830127c405b8dff1199b63b Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 7 Sep 2021 15:17:25 -0700 Subject: [PATCH 0458/2111] PYTHON-2803 Eliminate the use of 'slave' --- pymongo/aggregation.py | 10 ++++----- pymongo/change_stream.py | 4 ++-- pymongo/collection.py | 34 ++++++++++++++-------------- pymongo/cursor.py | 2 +- pymongo/database.py | 22 +++++++++--------- pymongo/message.py | 20 ++++++++--------- pymongo/mongo_client.py | 40 ++++++++++++++++----------------- pymongo/network.py | 8 +++---- pymongo/pool.py | 6 ++--- pymongo/server.py | 6 ++--- test/test_read_preferences.py | 14 ++++++------ test/test_server_description.py | 1 + test/test_topology.py | 4 +++- test/utils.py | 21 ----------------- 14 files changed, 87 insertions(+), 105 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index ae1b9d9eb8..812ca23b79 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -93,17 +93,17 @@ def _check_compat(sock_info): """Check whether the server version in-use supports aggregation.""" pass - def _process_result(self, result, session, server, sock_info, slave_ok): + def _process_result(self, result, session, server, sock_info, secondary_ok): if self._result_processor: self._result_processor( - result, session, server, sock_info, slave_ok) + result, session, server, sock_info, secondary_ok) def get_read_preference(self, session): if self._performs_write: return ReadPreference.PRIMARY return self._target._read_preference_for(session) - def get_cursor(self, session, server, sock_info, slave_ok): + def get_cursor(self, session, server, sock_info, secondary_ok): # Ensure command compatibility. self._check_compat(sock_info) @@ -136,7 +136,7 @@ def get_cursor(self, session, server, sock_info, slave_ok): result = sock_info.command( self._database.name, cmd, - slave_ok, + secondary_ok, self.get_read_preference(session), self._target.codec_options, parse_write_concern_error=True, @@ -147,7 +147,7 @@ def get_cursor(self, session, server, sock_info, slave_ok): client=self._database.client, user_fields=self._user_fields) - self._process_result(result, session, server, sock_info, slave_ok) + self._process_result(result, session, server, sock_info, secondary_ok) # Extract cursor from result or mock/fake one if necessary. if 'cursor' in result: diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 936059e14e..00d049a838 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -58,7 +58,7 @@ class ChangeStream(object): """The internal abstract base class for change stream cursors. - Should not be called directly by application developers. Use + Should not be called directly by application developers. Use :meth:`pymongo.collection.Collection.watch`, :meth:`pymongo.database.Database.watch`, or :meth:`pymongo.mongo_client.MongoClient.watch` instead. @@ -148,7 +148,7 @@ def _aggregation_pipeline(self): full_pipeline.extend(self._pipeline) return full_pipeline - def _process_result(self, result, session, server, sock_info, slave_ok): + def _process_result(self, result, session, server, sock_info, secondary_ok): """Callback that caches the postBatchResumeToken or startAtOperationTime from a changeStream aggregate command response containing an empty batch of change documents. diff --git a/pymongo/collection.py b/pymongo/collection.py index 68471c06c5..7994c4d8ab 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -180,7 +180,7 @@ def _socket_for_reads(self, session): def _socket_for_writes(self, session): return self.__database.client._socket_for_writes(session) - def _command(self, sock_info, command, slave_ok=False, + def _command(self, sock_info, command, secondary_ok=False, read_preference=None, codec_options=None, check=True, allowable_errors=None, read_concern=None, @@ -194,7 +194,7 @@ def _command(self, sock_info, command, slave_ok=False, :Parameters: - `sock_info` - A SocketInfo instance. - `command` - The command itself, as a SON instance. - - `slave_ok`: whether to set the SlaveOkay wire protocol bit. + - `secondary_ok`: whether to set the secondaryOkay wire protocol bit. - `codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - `check`: raise OperationFailure if there are errors @@ -221,7 +221,7 @@ def _command(self, sock_info, command, slave_ok=False, return sock_info.command( self.__database.name, command, - slave_ok, + secondary_ok, read_preference or self._read_preference_for(session), codec_options or self.codec_options, check, @@ -1413,14 +1413,14 @@ def find_raw_batches(self, *args, **kwargs): return RawBatchCursor(self, *args, **kwargs) - def _count_cmd(self, session, sock_info, slave_ok, cmd, collation): + def _count_cmd(self, session, sock_info, secondary_ok, cmd, collation): """Internal count command helper.""" # XXX: "ns missing" checks can be removed when we drop support for # MongoDB 3.0, see SERVER-17051. res = self._command( sock_info, cmd, - slave_ok, + secondary_ok, allowable_errors=["ns missing"], codec_options=self.__write_response_codec_options, read_concern=self.read_concern, @@ -1431,12 +1431,12 @@ def _count_cmd(self, session, sock_info, slave_ok, cmd, collation): return int(res["n"]) def _aggregate_one_result( - self, sock_info, slave_ok, cmd, collation, session): + self, sock_info, secondary_ok, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, cmd, - slave_ok, + secondary_ok, allowable_errors=[26], # Ignore NamespaceNotFound. codec_options=self.__write_response_codec_options, read_concern=self.read_concern, @@ -1470,7 +1470,7 @@ def estimated_document_count(self, **kwargs): raise ConfigurationError( 'estimated_document_count does not support sessions') - def _cmd(session, server, sock_info, slave_ok): + def _cmd(session, server, sock_info, secondary_ok): if sock_info.max_wire_version >= 12: # MongoDB 4.9+ pipeline = [ @@ -1482,7 +1482,7 @@ def _cmd(session, server, sock_info, slave_ok): ('cursor', {})]) cmd.update(kwargs) result = self._aggregate_one_result( - sock_info, slave_ok, cmd, collation=None, session=session) + sock_info, secondary_ok, cmd, collation=None, session=session) if not result: return 0 return int(result['n']) @@ -1490,7 +1490,7 @@ def _cmd(session, server, sock_info, slave_ok): # MongoDB < 4.9 cmd = SON([('count', self.__name)]) cmd.update(kwargs) - return self._count_cmd(None, sock_info, slave_ok, cmd, None) + return self._count_cmd(None, sock_info, secondary_ok, cmd, None) return self.__database.client._retryable_read( _cmd, self.read_preference, None) @@ -1567,9 +1567,9 @@ def count_documents(self, filter, session=None, **kwargs): collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, slave_ok): + def _cmd(session, server, sock_info, secondary_ok): result = self._aggregate_one_result( - sock_info, slave_ok, cmd, collation, session) + sock_info, secondary_ok, cmd, collation, session) if not result: return 0 return result['n'] @@ -1873,12 +1873,12 @@ def list_indexes(self, session=None): read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, slave_ok): + def _cmd(session, server, sock_info, secondary_ok): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) if sock_info.max_wire_version > 2: with self.__database.client._tmp_session(session, False) as s: try: - cursor = self._command(sock_info, cmd, slave_ok, + cursor = self._command(sock_info, cmd, secondary_ok, read_pref, codec_options, session=s)["cursor"] @@ -1894,7 +1894,7 @@ def _cmd(session, server, sock_info, slave_ok): else: res = message._first_batch( sock_info, self.__database.name, "system.indexes", - {"ns": self.__full_name}, 0, slave_ok, codec_options, + {"ns": self.__full_name}, 0, secondary_ok, codec_options, read_pref, cmd, self.database.client._event_listeners) cursor = res["cursor"] @@ -2304,9 +2304,9 @@ def distinct(self, key, filter=None, session=None, **kwargs): kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, slave_ok): + def _cmd(session, server, sock_info, secondary_ok): return self._command( - sock_info, cmd, slave_ok, read_concern=self.read_concern, + sock_info, cmd, secondary_ok, read_concern=self.read_concern, collation=collation, session=session, user_fields={"values": 1})["values"] diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 5f3419b7cc..65f2705126 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -68,7 +68,7 @@ _QUERY_OPTIONS = { "tailable_cursor": 2, - "slave_okay": 4, + "secondary_okay": 4, "oplog_replay": 8, "no_timeout": 16, "await_data": 32, diff --git a/pymongo/database.py b/pymongo/database.py index 89a38a15ab..df8d730fb2 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -478,7 +478,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - def _command(self, sock_info, command, slave_ok=False, value=1, check=True, + def _command(self, sock_info, command, secondary_ok=False, value=1, check=True, allowable_errors=None, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, write_concern=None, @@ -492,7 +492,7 @@ def _command(self, sock_info, command, slave_ok=False, value=1, check=True, return sock_info.command( self.__name, command, - slave_ok, + secondary_ok, read_preference, codec_options, check, @@ -591,8 +591,8 @@ def command(self, command, value=1, check=True, read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) with self.__client._socket_for_reads( - read_preference, session) as (sock_info, slave_ok): - return self._command(sock_info, command, slave_ok, value, + read_preference, session) as (sock_info, secondary_ok): + return self._command(sock_info, command, secondary_ok, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) @@ -604,15 +604,15 @@ def _retryable_read_command(self, command, value=1, check=True, read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, slave_ok): - return self._command(sock_info, command, slave_ok, value, + def _cmd(session, server, sock_info, secondary_ok): + return self._command(sock_info, command, secondary_ok, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) return self.__client._retryable_read( _cmd, read_preference, session) - def _list_collections(self, sock_info, slave_okay, session, + def _list_collections(self, sock_info, secondary_okay, session, read_preference, **kwargs): """Internal listCollections helper.""" @@ -625,7 +625,7 @@ def _list_collections(self, sock_info, slave_okay, session, with self.__client._tmp_session( session, close=False) as tmp_session: cursor = self._command( - sock_info, cmd, slave_okay, + sock_info, cmd, secondary_okay, read_preference=read_preference, session=tmp_session)["cursor"] cmd_cursor = CommandCursor( @@ -647,7 +647,7 @@ def _list_collections(self, sock_info, slave_okay, session, cmd = SON([("aggregate", "system.namespaces"), ("pipeline", pipeline), ("cursor", kwargs.get("cursor", {}))]) - cursor = self._command(sock_info, cmd, slave_okay)["cursor"] + cursor = self._command(sock_info, cmd, secondary_okay)["cursor"] cmd_cursor = CommandCursor(coll, cursor, sock_info.address) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor @@ -676,9 +676,9 @@ def list_collections(self, session=None, filter=None, **kwargs): read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, slave_okay): + def _cmd(session, server, sock_info, secondary_okay): return self._list_collections( - sock_info, slave_okay, session, read_preference=read_pref, + sock_info, secondary_okay, session, read_preference=read_pref, **kwargs) return self.__client._retryable_read( diff --git a/pymongo/message.py b/pymongo/message.py index 12ca524ffd..22effb1b24 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -101,7 +101,7 @@ def _maybe_add_read_preference(spec, read_preference): # problems with mongos versions that don't support read preferences. Also, # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting - # the slaveOkay bit has the same effect). + # the secondaryOkay bit has the same effect). if mode and ( mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): @@ -328,10 +328,10 @@ def as_command(self, sock_info): self._as_command = cmd, self.db return self._as_command - def get_message(self, set_slave_ok, sock_info, use_cmd=False): - """Get a query message, possibly setting the slaveOk bit.""" - if set_slave_ok: - # Set the slaveOk bit. + def get_message(self, set_secondary_ok, sock_info, use_cmd=False): + """Get a query message, possibly setting the secondaryOk bit.""" + if set_secondary_ok: + # Set the secondaryOk bit. flags = self.flags | 4 else: flags = self.flags @@ -344,7 +344,7 @@ def get_message(self, set_slave_ok, sock_info, use_cmd=False): if sock_info.op_msg_enabled: request_id, msg, size, _ = _op_msg( 0, spec, self.db, self.read_preference, - set_slave_ok, False, self.codec_options, + set_secondary_ok, False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size ns = "%s.%s" % (self.db, "$cmd") @@ -699,13 +699,13 @@ def _op_msg_uncompressed(flags, command, identifier, docs, check_keys, opts): _op_msg_uncompressed = _cmessage._op_msg -def _op_msg(flags, command, dbname, read_preference, slave_ok, check_keys, +def _op_msg(flags, command, dbname, read_preference, secondary_ok, check_keys, opts, ctx=None): """Get a OP_MSG message.""" command['$db'] = dbname # getMore commands do not send $readPreference. if read_preference is not None and "$readPreference" not in command: - if slave_ok and not read_preference.mode: + if secondary_ok and not read_preference.mode: command["$readPreference"] = ( ReadPreference.PRIMARY_PREFERRED.document) else: @@ -1675,7 +1675,7 @@ def unpack(cls, msg): def _first_batch(sock_info, db, coll, query, ntoreturn, - slave_ok, codec_options, read_preference, cmd, listeners): + secondary_ok, codec_options, read_preference, cmd, listeners): """Simple query helper for retrieving a first (and possibly only) batch.""" query = _Query( 0, db, coll, 0, query, None, codec_options, @@ -1687,7 +1687,7 @@ def _first_batch(sock_info, db, coll, query, ntoreturn, if publish: start = datetime.datetime.now() - request_id, msg, max_doc_size = query.get_message(slave_ok, sock_info) + request_id, msg, max_doc_size = query.get_message(secondary_ok, sock_info) if publish: encoding_duration = datetime.datetime.now() - start diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index cf7b4d3a63..549389f847 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1105,7 +1105,7 @@ def _end_sessions(self, session_ids): # another session. with self._socket_for_reads( ReadPreference.PRIMARY_PREFERRED, - None) as (sock_info, slave_ok): + None) as (sock_info, secondary_ok): if not sock_info.supports_sessions: return @@ -1113,7 +1113,7 @@ def _end_sessions(self, session_ids): spec = SON([('endSessions', session_ids[i:i + common._MAX_END_SESSIONS])]) sock_info.command( - 'admin', spec, slave_ok=slave_ok, client=self) + 'admin', spec, secondary_ok=secondary_ok, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions # command. @@ -1217,39 +1217,39 @@ def _socket_for_writes(self, session): return self._get_socket(server, session) @contextlib.contextmanager - def _slaveok_for_server(self, read_preference, server, session): + def _secondaryok_for_server(self, read_preference, server, session): assert read_preference is not None, "read_preference must not be None" # Get a socket for a server matching the read preference, and yield - # sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to - # mongods with topology type Single. If the server type is Mongos, - # follow the rules for passing read preference to mongos, even for - # topology type Single." + # sock_info, secondary_ok. Server Selection Spec: "SecondaryOK must + # be sent to mongods with topology type Single. If the server type is + # Mongos, follow the rules for passing read preference to mongos, even + # for topology type Single." # Thread safe: if the type is single it cannot change. topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single with self._get_socket(server, session) as sock_info: - slave_ok = (single and not sock_info.is_mongos) or ( + secondary_ok = (single and not sock_info.is_mongos) or ( read_preference != ReadPreference.PRIMARY) - yield sock_info, slave_ok + yield sock_info, secondary_ok @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): assert read_preference is not None, "read_preference must not be None" # Get a socket for a server matching the read preference, and yield - # sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to - # mongods with topology type Single. If the server type is Mongos, - # follow the rules for passing read preference to mongos, even for - # topology type Single." + # sock_info, secondary_ok. Server Selection Spec: "SecondaryOK must be + # sent to mongods with topology type Single. If the server type is + # Mongos, follow the rules for passing read preference to mongos, even + # for topology type Single." # Thread safe: if the type is single it cannot change. topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single server = self._select_server(read_preference, session) with self._get_socket(server, session) as sock_info: - slave_ok = (single and not sock_info.is_mongos) or ( + secondary_ok = (single and not sock_info.is_mongos) or ( read_preference != ReadPreference.PRIMARY) - yield sock_info, slave_ok + yield sock_info, secondary_ok def _should_pin_cursor(self, session): return (self.__options.load_balanced and @@ -1276,9 +1276,9 @@ def _run_operation(self, operation, unpack_res, address=None): operation.sock_mgr.sock, operation, True, self._event_listeners, unpack_res) - def _cmd(session, server, sock_info, slave_ok): + def _cmd(session, server, sock_info, secondary_ok): return server.run_operation( - sock_info, operation, slave_ok, self._event_listeners, + sock_info, operation, secondary_ok, self._event_listeners, unpack_res) return self._retryable_read( @@ -1375,13 +1375,13 @@ def _retryable_read(self, func, read_pref, session, address=None, read_pref, session, address=address) if not server.description.retryable_reads_supported: retryable = False - with self._slaveok_for_server(read_pref, server, session) as ( - sock_info, slave_ok): + with self._secondaryok_for_server(read_pref, server, session) as ( + sock_info, secondary_ok): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. raise last_error - return func(session, server, sock_info, slave_ok) + return func(session, server, sock_info, secondary_ok) except ServerSelectionTimeoutError: if retrying: # The application may think the write was never attempted diff --git a/pymongo/network.py b/pymongo/network.py index c6146566cc..cc83d90573 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -38,7 +38,7 @@ _UNPACK_HEADER = struct.Struct(" Date: Fri, 10 Sep 2021 16:57:16 -0700 Subject: [PATCH 0459/2111] PYTHON-2803 Get rid of most uses of 'master' This change also resolves PYTHON-2848 for MongoDB 4.0. --- doc/migrate-to-pymongo3.rst | 4 +- pymongo/auth.py | 4 +- pymongo/common.py | 8 +- pymongo/hello.py | 1 + pymongo/helpers.py | 11 +- pymongo/message.py | 3 +- pymongo/mongo_client.py | 4 +- pymongo/monitor.py | 24 +-- pymongo/monitoring.py | 3 +- pymongo/network.py | 2 +- pymongo/pool.py | 46 ++-- pymongo/read_preferences.py | 2 +- pymongo/server_description.py | 50 ++--- pymongo/topology.py | 10 +- pymongo/topology_description.py | 12 +- pymongo/uri_parser.py | 2 +- test/__init__.py | 35 +-- test/atlas/test_connection.py | 2 +- test/mod_wsgi_test/mod_wsgi_test.wsgi | 3 +- test/ocsp/test_ocsp.py | 2 +- test/performance/perf_test.py | 2 +- test/pymongo_mocks.py | 34 +-- test/test_auth.py | 13 +- test/test_bulk.py | 4 +- test/test_client.py | 21 +- test/test_cmap.py | 12 +- test/test_collection.py | 2 +- ...nnections_survive_primary_stepdown_spec.py | 4 +- test/test_discovery_and_monitoring.py | 26 +-- test/test_encryption.py | 6 +- test/test_errors.py | 2 +- test/test_heartbeat_monitoring.py | 4 +- test/test_mongos_load_balancing.py | 14 +- test/test_monitoring.py | 36 ++-- test/test_pooling.py | 2 +- test/test_replica_set_reconfig.py | 10 +- test/test_sdam_monitoring_spec.py | 6 +- test/test_server.py | 4 +- test/test_server_description.py | 62 +++--- test/test_server_selection.py | 3 +- test/test_session.py | 2 +- test/test_ssl.py | 9 +- test/test_streaming_protocol.py | 31 +-- test/test_topology.py | 204 +++++++++--------- test/utils.py | 21 +- test/utils_selection_tests.py | 20 +- 46 files changed, 397 insertions(+), 385 deletions(-) diff --git a/doc/migrate-to-pymongo3.rst b/doc/migrate-to-pymongo3.rst index c1a4c490a3..633d0a7abb 100644 --- a/doc/migrate-to-pymongo3.rst +++ b/doc/migrate-to-pymongo3.rst @@ -431,13 +431,13 @@ can be changed to this with PyMongo 2.9 or later: >>> from pymongo.errors import ConnectionFailure >>> client = MongoClient(connect=False) >>> try: - ... result = client.admin.command("ismaster") + ... client.admin.command("ping") ... except ConnectionFailure: ... print("Server not available") >>> Any operation can be used to determine if the server is available. We choose -the "ismaster" command here because it is cheap and does not require auth, so +the "ping" command here because it is cheap and does not require auth, so it is a simple way to check whether the server is available. The max_pool_size parameter is removed diff --git a/pymongo/auth.py b/pymongo/auth.py index 9861e77174..bfec7ab60e 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -532,8 +532,8 @@ def from_credentials(creds): def speculate_command(self): raise NotImplementedError - def parse_response(self, ismaster): - self.speculative_authenticate = ismaster.speculative_authenticate + def parse_response(self, hello): + self.speculative_authenticate = hello.speculative_authenticate def speculate_succeeded(self): return bool(self.speculative_authenticate) diff --git a/pymongo/common.py b/pymongo/common.py index 90dba01954..f1a5389b35 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -51,7 +51,7 @@ MIN_SUPPORTED_WIRE_VERSION = 6 MAX_SUPPORTED_WIRE_VERSION = 13 -# Frequency to call ismaster on servers, in seconds. +# Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 # Frequency to clean up unclosed cursors, in seconds. @@ -67,7 +67,7 @@ # longest it is willing to wait for a new primary to be found. SERVER_SELECTION_TIMEOUT = 30 -# Spec requires at least 500ms between ismaster calls. +# Spec requires at least 500ms between hello calls. MIN_HEARTBEAT_INTERVAL = 0.5 # Spec requires at least 60s between SRV rescans. @@ -127,13 +127,13 @@ def partition_node(node): def clean_node(node): - """Split and normalize a node name from an ismaster response.""" + """Split and normalize a node name from a hello response.""" host, port = partition_node(node) # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the ismaster response. + # "FOO.com" is in the hello response. return host.lower(), port diff --git a/pymongo/hello.py b/pymongo/hello.py index 290f27dcc9..0ad06e9619 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -25,6 +25,7 @@ class HelloCompat: LEGACY_CMD = 'ismaster' PRIMARY = 'isWritablePrimary' LEGACY_PRIMARY = 'ismaster' + LEGACY_ERROR = 'not master' def _get_server_type(doc): diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 1d8005ac31..86d1e9f484 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -29,6 +29,7 @@ WriteError, WriteConcernError, WTimeoutError) +from pymongo.hello import HelloCompat # From the SDAM spec, the "node is shutting down" codes. _SHUTDOWN_CODES = frozenset([ @@ -38,7 +39,7 @@ # From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_MASTER_CODES = frozenset([ +_NOT_PRIMARY_CODES = frozenset([ 10058, # LegacyNotPrimary <=3.2 "not primary" error code 10107, # NotWritablePrimary 13435, # NotPrimaryNoSecondaryOk @@ -47,7 +48,7 @@ 189, # PrimarySteppedDown ]) | _SHUTDOWN_CODES # From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_MASTER_CODES | frozenset([ +_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset([ 7, # HostNotFound 6, # HostUnreachable 89, # NetworkTimeout @@ -150,9 +151,9 @@ def _check_command_response(response, max_wire_version, # Server is "not primary" or "recovering" if code is not None: - if code in _NOT_MASTER_CODES: + if code in _NOT_PRIMARY_CODES: raise NotPrimaryError(errmsg, response) - elif "not master" in errmsg or "node is recovering" in errmsg: + elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg: raise NotPrimaryError(errmsg, response) # Other errors @@ -184,7 +185,7 @@ def _check_gle_response(result, max_wire_version): if error_msg is None: return result - if error_msg.startswith("not master"): + if error_msg.startswith(HelloCompat.LEGACY_ERROR): raise NotPrimaryError(error_msg, result) details = result diff --git a/pymongo/message.py b/pymongo/message.py index 22effb1b24..a30975db9e 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -51,6 +51,7 @@ NotPrimaryError, OperationFailure, ProtocolError) +from pymongo.hello import HelloCompat from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -1524,7 +1525,7 @@ def raw_response(self, cursor_id=None, user_fields=None): error_object = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) - if error_object["$err"].startswith("not master"): + if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: raise ExecutionTimeout(error_object.get("$err"), diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 549389f847..64e3e2e4a3 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -167,8 +167,8 @@ def __init__( from pymongo.errors import ConnectionFailure client = MongoClient() try: - # The ismaster command is cheap and does not require auth. - client.admin.command('ismaster') + # The ping command is cheap and does not require auth. + client.admin.command('ping') except ConnectionFailure: print("Server not available") diff --git a/pymongo/monitor.py b/pymongo/monitor.py index df673b720d..d13d337f8e 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -62,7 +62,7 @@ def target(): self._executor = executor def _on_topology_gc(dummy=None): - # This prevents GC from waiting 10 seconds for isMaster to complete + # This prevents GC from waiting 10 seconds for hello to complete # See test_cleanup_executors_on_client_del. monitor = self_ref() if monitor: @@ -133,7 +133,7 @@ def __init__( self.heartbeater = None def cancel_check(self): - """Cancel any concurrent isMaster check. + """Cancel any concurrent hello check. Note: this is called from a weakref.proxy callback and MUST NOT take any locks. @@ -204,7 +204,7 @@ def _run(self): self.close() def _check_server(self): - """Call isMaster or read the next streaming response. + """Call hello or read the next streaming response. Returns a ServerDescription. """ @@ -213,7 +213,7 @@ def _check_server(self): try: return self._check_once() except (OperationFailure, NotPrimaryError) as exc: - # Update max cluster time even when isMaster fails. + # Update max cluster time even when hello fails. self._topology.receive_cluster_time( exc.details.get('$clusterTime')) raise @@ -236,7 +236,7 @@ def _check_server(self): return ServerDescription(address, error=error) def _check_once(self): - """A single attempt to call ismaster. + """A single attempt to call hello. Returns a ServerDescription, or raises an exception. """ @@ -267,19 +267,19 @@ def _check_with_socket(self, conn): cluster_time = self._topology.max_cluster_time() start = time.monotonic() if conn.more_to_come: - # Read the next streaming isMaster (MongoDB 4.4+). + # Read the next streaming hello (MongoDB 4.4+). response = Hello(conn._next_reply(), awaitable=True) elif (conn.performed_handshake and self._server_description.topology_version): - # Initiate streaming isMaster (MongoDB 4.4+). - response = conn._ismaster( + # Initiate streaming hello (MongoDB 4.4+). + response = conn._hello( cluster_time, self._server_description.topology_version, self._settings.heartbeat_frequency, None) else: - # New connection handshake or polling isMaster (MongoDB <4.4). - response = conn._ismaster(cluster_time, None, None, None) + # New connection handshake or polling hello (MongoDB <4.4). + response = conn._hello(cluster_time, None, None, None) return response, time.monotonic() - start @@ -384,12 +384,12 @@ def _run(self): self._pool.reset() def _ping(self): - """Run an "isMaster" command and return the RTT.""" + """Run a "hello" command and return the RTT.""" with self._pool.get_socket({}) as sock_info: if self._executor._stopped: raise Exception('_RttMonitor closed') start = time.monotonic() - sock_info.ismaster() + sock_info.hello() return time.monotonic() - start diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 147be2a46d..325ec05544 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -182,6 +182,7 @@ def connection_checked_in(self, event): from collections import abc, namedtuple +from pymongo.hello import HelloCompat from pymongo.helpers import _handle_exception _Listeners = namedtuple('Listeners', @@ -512,7 +513,7 @@ def register(listener): # The "hello" command is also deemed sensitive when attempting speculative # authentication. def _is_speculative_authenticate(command_name, doc): - if (command_name.lower() in ('hello', 'ismaster') and + if (command_name.lower() in ('hello', HelloCompat.LEGACY_CMD) and 'speculativeAuthenticate' in doc): return True return False diff --git a/pymongo/network.py b/pymongo/network.py index cc83d90573..7ec6540dd4 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -246,7 +246,7 @@ def wait_for_read(sock_info, deadline): readable = sock_info.socket_checker.select( sock, read=True, timeout=timeout) if context.cancelled: - raise _OperationCancelled('isMaster cancelled') + raise _OperationCancelled('hello cancelled') if readable: return if deadline and time.monotonic() > deadline: diff --git a/pymongo/pool.py b/pymongo/pool.py index bce21f721b..52c57c7f07 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -413,13 +413,13 @@ def event_listeners(self): @property def appname(self): - """The application name, for sending with ismaster in server handshake. + """The application name, for sending with hello in server handshake. """ return self.__appname @property def driver(self): - """Driver name and version, for sending with ismaster in handshake. + """Driver name and version, for sending with hello in handshake. """ return self.__driver @@ -556,10 +556,10 @@ def hello_cmd(self): else: return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) - def ismaster(self, all_credentials=None): - return self._ismaster(None, None, None, all_credentials) + def hello(self, all_credentials=None): + return self._hello(None, None, None, all_credentials) - def _ismaster(self, cluster_time, topology_version, + def _hello(self, cluster_time, topology_version, heartbeat_frequency, all_credentials): cmd = self.hello_cmd() performing_handshake = not self.performed_handshake @@ -600,36 +600,36 @@ def _ismaster(self, cluster_time, topology_version, doc.setdefault('serviceId', process_id) if not self.opts.load_balanced: doc.pop('serviceId', None) - ismaster = Hello(doc, awaitable=awaitable) - self.is_writable = ismaster.is_writable - self.max_wire_version = ismaster.max_wire_version - self.max_bson_size = ismaster.max_bson_size - self.max_message_size = ismaster.max_message_size - self.max_write_batch_size = ismaster.max_write_batch_size + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size self.supports_sessions = ( - ismaster.logical_session_timeout_minutes is not None) - self.hello_ok = ismaster.hello_ok - self.is_mongos = ismaster.server_type == SERVER_TYPE.Mongos + hello.logical_session_timeout_minutes is not None) + self.hello_ok = hello.hello_ok + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos if performing_handshake and self.compression_settings: ctx = self.compression_settings.get_compression_context( - ismaster.compressors) + hello.compressors) self.compression_context = ctx - self.op_msg_enabled = ismaster.max_wire_version >= 6 + self.op_msg_enabled = hello.max_wire_version >= 6 if creds: - self.negotiated_mechanisms[creds] = ismaster.sasl_supported_mechs + self.negotiated_mechanisms[creds] = hello.sasl_supported_mechs if auth_ctx: - auth_ctx.parse_response(ismaster) + auth_ctx.parse_response(hello) if auth_ctx.speculate_succeeded(): self.auth_ctx[auth_ctx.credentials] = auth_ctx if self.opts.load_balanced: - if not ismaster.service_id: + if not hello.service_id: raise ConfigurationError( 'Driver attempted to initialize in load balancing mode,' ' but the server does not support this mode') - self.service_id = ismaster.service_id + self.service_id = hello.service_id self.generation = self.pool_gen.get(self.service_id) - return ismaster + return hello def _next_reply(self): reply = self.receive_message(None) @@ -1110,7 +1110,7 @@ def __init__(self, address, options, handshake=True): :Parameters: - `address`: a (hostname, port) tuple - `options`: a PoolOptions instance - - `handshake`: whether to call ismaster for each new SocketInfo + - `handshake`: whether to call hello for each new SocketInfo """ if options.pause_enabled: self.state = PoolState.PAUSED @@ -1338,7 +1338,7 @@ def connect(self, all_credentials=None): sock_info = SocketInfo(sock, self, self.address, conn_id) try: if self.handshake: - sock_info.ismaster(all_credentials) + sock_info.hello(all_credentials) self.is_writable = sock_info.is_writable sock_info.check_auth(all_credentials) diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 3c07c16ba1..c60240822d 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -519,7 +519,7 @@ def __init__(self): def add_sample(self, sample): if sample < 0: - # Likely system time change while waiting for ismaster response + # Likely system time change while waiting for hello response # and not using time.monotonic. Ignore it, the next one will # probably be valid. return diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 1c64a8fd53..2cbf6d63cd 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -26,7 +26,7 @@ class ServerDescription(object): :Parameters: - `address`: A (host, port) pair - - `ismaster`: Optional Hello instance + - `hello`: Optional Hello instance - `round_trip_time`: Optional float - `error`: Optional, the last error attempting to connect to the server """ @@ -43,41 +43,41 @@ class ServerDescription(object): def __init__( self, address, - ismaster=None, + hello=None, round_trip_time=None, error=None): self._address = address - if not ismaster: - ismaster = Hello({}) - - self._server_type = ismaster.server_type - self._all_hosts = ismaster.all_hosts - self._tags = ismaster.tags - self._replica_set_name = ismaster.replica_set_name - self._primary = ismaster.primary - self._max_bson_size = ismaster.max_bson_size - self._max_message_size = ismaster.max_message_size - self._max_write_batch_size = ismaster.max_write_batch_size - self._min_wire_version = ismaster.min_wire_version - self._max_wire_version = ismaster.max_wire_version - self._set_version = ismaster.set_version - self._election_id = ismaster.election_id - self._cluster_time = ismaster.cluster_time - self._is_writable = ismaster.is_writable - self._is_readable = ismaster.is_readable - self._ls_timeout_minutes = ismaster.logical_session_timeout_minutes + if not hello: + hello = Hello({}) + + self._server_type = hello.server_type + self._all_hosts = hello.all_hosts + self._tags = hello.tags + self._replica_set_name = hello.replica_set_name + self._primary = hello.primary + self._max_bson_size = hello.max_bson_size + self._max_message_size = hello.max_message_size + self._max_write_batch_size = hello.max_write_batch_size + self._min_wire_version = hello.min_wire_version + self._max_wire_version = hello.max_wire_version + self._set_version = hello.set_version + self._election_id = hello.election_id + self._cluster_time = hello.cluster_time + self._is_writable = hello.is_writable + self._is_readable = hello.is_readable + self._ls_timeout_minutes = hello.logical_session_timeout_minutes self._round_trip_time = round_trip_time - self._me = ismaster.me + self._me = hello.me self._last_update_time = time.monotonic() self._error = error - self._topology_version = ismaster.topology_version + self._topology_version = hello.topology_version if error: if hasattr(error, 'details') and isinstance(error.details, dict): self._topology_version = error.details.get('topologyVersion') - if ismaster.last_write_date: + if hello.last_write_date: # Convert from datetime to seconds. - delta = ismaster.last_write_date - EPOCH_NAIVE + delta = hello.last_write_date - EPOCH_NAIVE self._last_write_date = delta.total_seconds() else: self._last_write_date = None diff --git a/pymongo/topology.py b/pymongo/topology.py index 15247771cc..8a2818b5c7 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -278,7 +278,7 @@ def _process_change(self, server_description, reset_pool=False): td_old = self._description sd_old = td_old._server_descriptions[server_description.address] if _is_stale_server_description(sd_old, server_description): - # This is a stale isMaster response. Ignore it. + # This is a stale hello response. Ignore it. return new_td = updated_topology_description( @@ -326,10 +326,10 @@ def _process_change(self, server_description, reset_pool=False): self._condition.notify_all() def on_change(self, server_description, reset_pool=False): - """Process a new ServerDescription after an ismaster call completes.""" + """Process a new ServerDescription after an hello call completes.""" # We do no I/O holding the lock. with self._lock: - # Monitors may continue working on ismaster calls for some time + # Monitors may continue working on hello calls for some time # after a call to Topology.close, so this method may be called at # any time. Ensure the topology is open before processing the # change. @@ -626,7 +626,7 @@ def _handle_error(self, address, err_ctx): err_code = error.code else: err_code = error.details.get('code', -1) - if err_code in helpers._NOT_MASTER_CODES: + if err_code in helpers._NOT_PRIMARY_CODES: is_shutting_down = err_code in helpers._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. if not self._settings.load_balanced: @@ -649,7 +649,7 @@ def _handle_error(self, address, err_ctx): # Clear the pool. server.reset(service_id) # "When a client marks a server Unknown from `Network error when - # reading or writing`_, clients MUST cancel the isMaster check on + # reading or writing`_, clients MUST cancel the hello check on # that server and close the current monitoring connection." server._monitor.cancel_check() diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 4282c4ff31..bf29586fa6 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -67,7 +67,7 @@ def __init__(self, self._init_incompatible_err() # Server Discovery And Monitoring Spec: Whenever a client updates the - # TopologyDescription from an ismaster response, it MUST set + # TopologyDescription from an hello response, it MUST set # TopologyDescription.logicalSessionTimeoutMinutes to the smallest # logicalSessionTimeoutMinutes value among ServerDescriptions of all # data-bearing server types. If any have a null @@ -303,7 +303,7 @@ def __repr__(self): self.topology_type_name, servers) -# If topology type is Unknown and we receive an ismaster response, what should +# If topology type is Unknown and we receive a hello response, what should # the new topology type be? _SERVER_TYPE_TO_TOPOLOGY_TYPE = { SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded, @@ -321,9 +321,9 @@ def updated_topology_description(topology_description, server_description): :Parameters: - `topology_description`: the current TopologyDescription - `server_description`: a new ServerDescription that resulted from - an ismaster call + a hello call - Called after attempting (successfully or not) to call ismaster on the + Called after attempting (successfully or not) to call hello on the server at server_description.address. Does not modify topology_description. """ address = server_description.address @@ -436,7 +436,7 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): :Parameters: - `topology_description`: the current TopologyDescription - `seedlist`: a list of new seeds new ServerDescription that resulted from - an ismaster call + a hello call """ # Create a copy of the server descriptions. sds = topology_description.server_descriptions() @@ -470,7 +470,7 @@ def _update_rs_from_primary( server_description, max_set_version, max_election_id): - """Update topology description from a primary's ismaster response. + """Update topology description from a primary's hello response. Pass in a dict of ServerDescriptions, current replica set name, the ServerDescription we are processing, and the TopologyDescription's diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 60eb9cba80..cf5cf64f5c 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -111,7 +111,7 @@ def parse_host(entity, default_port=DEFAULT_PORT): # Normalize hostname to lowercase, since DNS is case-insensitive: # http://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the ismaster response. + # "FOO.com" is in the hello response. return host.lower(), port diff --git a/test/__init__.py b/test/__init__.py index 6d99f6ec59..388caf715e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -48,6 +48,7 @@ from bson.son import SON from pymongo import common, message from pymongo.common import partition_node +from pymongo.hello import HelloCompat from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl from pymongo.uri_parser import parse_uri @@ -270,8 +271,8 @@ def client_options(self): return opts @property - def ismaster(self): - return self.client.admin.command('isMaster') + def hello(self): + return self.client.admin.command(HelloCompat.LEGACY_CMD) def _connect(self, host, port, **kwargs): # Jython takes a long time to connect. @@ -284,11 +285,11 @@ def _connect(self, host, port, **kwargs): host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) try: try: - client.admin.command('isMaster') # Can we connect? + client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - 'connected client %r, but isMaster failed: %s' % ( + 'connected client %r, but legacy hello failed: %s' % ( client, exc)) else: self.connection_attempts.append( @@ -365,11 +366,11 @@ def _init_client(self): # MMAPv1 does not support retryWrites=True. self.default_client_options['retryWrites'] = False - ismaster = self.ismaster - self.sessions_enabled = 'logicalSessionTimeoutMinutes' in ismaster + hello = self.hello + self.sessions_enabled = 'logicalSessionTimeoutMinutes' in hello - if 'setName' in ismaster: - self.replica_set_name = str(ismaster['setName']) + if 'setName' in hello: + self.replica_set_name = str(hello['setName']) self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. @@ -387,18 +388,18 @@ def _init_client(self): replicaSet=self.replica_set_name, **self.default_client_options) - # Get the authoritative ismaster result from the primary. - ismaster = self.ismaster + # Get the authoritative hello result from the primary. + hello = self.hello nodes = [partition_node(node.lower()) - for node in ismaster.get('hosts', [])] + for node in hello.get('hosts', [])] nodes.extend([partition_node(node.lower()) - for node in ismaster.get('passives', [])]) + for node in hello.get('passives', [])]) nodes.extend([partition_node(node.lower()) - for node in ismaster.get('arbiters', [])]) + for node in hello.get('arbiters', [])]) self.nodes = set(nodes) else: self.nodes = set([(host, port)]) - self.w = len(ismaster.get("hosts", [])) or 1 + self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) if TEST_SERVERLESS: @@ -419,7 +420,7 @@ def _init_client(self): self.test_commands_enabled = True self.has_ipv6 = self._server_started_with_ipv6() - self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid') + self.is_mongos = (self.hello.get('msg') == 'isdbgrid') if self.is_mongos: if self.serverless: self.mongoses.append(self.client.address) @@ -432,8 +433,8 @@ def _init_client(self): mongos_client = self._connect( *next_address, **self.default_client_options) if mongos_client: - ismaster = mongos_client.admin.command('ismaster') - if ismaster.get('msg') == 'isdbgrid': + hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get('msg') == 'isdbgrid': self.mongoses.append(next_address) def init(self): diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index ef2a83369f..1ad84068ed 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -57,7 +57,7 @@ def connect(uri): raise Exception("Must set env variable to test.") client = pymongo.MongoClient(uri) # No TLS error - client.admin.command('ismaster') + client.admin.command('ping') # No auth error client.test.test.count_documents({}) diff --git a/test/mod_wsgi_test/mod_wsgi_test.wsgi b/test/mod_wsgi_test/mod_wsgi_test.wsgi index 9b435b1edf..1fa4e74350 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.wsgi +++ b/test/mod_wsgi_test/mod_wsgi_test.wsgi @@ -25,12 +25,13 @@ repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) sys.path.insert(0, repository_path) import pymongo +from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient client = MongoClient() # If the deployment is a replica set, connect to the whole set. -replica_set_name = client.admin.command('ismaster').get('setName') +replica_set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') if replica_set_name: client = MongoClient(replicaSet=replica_set_name) diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index 4b91c3b939..07197e73b6 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -46,7 +46,7 @@ def _connect(options): "&tlsCAFile=%s&%s") % (TIMEOUT_MS, CA_FILE, options) print(uri) client = pymongo.MongoClient(uri) - client.admin.command('ismaster') + client.admin.command('ping') class TestOCSP(unittest.TestCase): diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 9f6e268df0..d84e67aca4 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -189,7 +189,7 @@ def setUp(self): def do_task(self): command = self.client.perftest.command for _ in range(NUM_DOCS): - command("ismaster") + command("ping") class TestDocument(PerformanceTest): diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 511560b59e..8b1ece8ad6 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -21,7 +21,7 @@ from pymongo import common from pymongo import MongoClient from pymongo.errors import AutoReconnect, NetworkTimeout -from pymongo.hello import Hello +from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import Pool from pymongo.server_description import ServerDescription @@ -99,20 +99,20 @@ def __init__( def _check_once(self): client = self.client address = self._server_description.address - response, rtt = client.mock_is_master('%s:%d' % address) + response, rtt = client.mock_hello('%s:%d' % address) return ServerDescription(address, Hello(response), rtt) class MockClient(MongoClient): def __init__( - self, standalones, members, mongoses, ismaster_hosts=None, + self, standalones, members, mongoses, hello_hosts=None, arbiters=None, down_hosts=None, *args, **kwargs): """A MongoClient connected to the default server, with a mock topology. standalones, members, mongoses, arbiters, and down_hosts determine the configuration of the topology. They are formatted like ['a:1', 'b:2']. - ismaster_hosts provides an alternative host list for the server's - mocked ismaster response; see test_connect_with_internal_ips. + hello_hosts provides an alternative host list for the server's + mocked hello response; see test_connect_with_internal_ips. """ self.mock_standalones = standalones[:] self.mock_members = members[:] @@ -125,10 +125,10 @@ def __init__( # Hosts that should be considered an arbiter. self.mock_arbiters = arbiters[:] if arbiters else [] - if ismaster_hosts is not None: - self.mock_ismaster_hosts = ismaster_hosts + if hello_hosts is not None: + self.mock_hello_hosts = hello_hosts else: - self.mock_ismaster_hosts = members[:] + self.mock_hello_hosts = members[:] self.mock_mongoses = mongoses[:] @@ -166,8 +166,8 @@ def set_wire_version_range(self, host, min_version, max_version): def set_max_write_batch_size(self, host, size): self.mock_max_write_batch_sizes[host] = size - def mock_is_master(self, host): - """Return mock ismaster response (a dict) and round trip time.""" + def mock_hello(self, host): + """Return mock hello response (a dict) and round trip time.""" if host in self.mock_wire_versions: min_wire_version, max_wire_version = self.mock_wire_versions[host] else: @@ -186,20 +186,20 @@ def mock_is_master(self, host): elif host in self.mock_standalones: response = { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'minWireVersion': min_wire_version, 'maxWireVersion': max_wire_version, 'maxWriteBatchSize': max_write_batch_size} elif host in self.mock_members: - ismaster = (host == self.mock_primary) + primary = (host == self.mock_primary) # Simulate a replica set member. response = { 'ok': 1, - 'ismaster': ismaster, - 'secondary': not ismaster, + HelloCompat.LEGACY_CMD: primary, + 'secondary': not primary, 'setName': 'rs', - 'hosts': self.mock_ismaster_hosts, + 'hosts': self.mock_hello_hosts, 'minWireVersion': min_wire_version, 'maxWireVersion': max_wire_version, 'maxWriteBatchSize': max_write_batch_size} @@ -213,14 +213,14 @@ def mock_is_master(self, host): elif host in self.mock_mongoses: response = { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'minWireVersion': min_wire_version, 'maxWireVersion': max_wire_version, 'msg': 'isdbgrid', 'maxWriteBatchSize': max_write_batch_size} else: # In test_internal_ips(), we try to connect to a host listed - # in ismaster['hosts'] but not publicly accessible. + # in hello['hosts'] but not publicly accessible. raise AutoReconnect('Unknown host: %s' % host) return response, rtt diff --git a/test/test_auth.py b/test/test_auth.py index f7b13a1cf3..52159e4f3b 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -25,6 +25,7 @@ from pymongo import MongoClient, monitoring from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple from pymongo.errors import OperationFailure +from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP from test import client_context, IntegrationTest, SkipTest, unittest, Version @@ -155,7 +156,7 @@ def test_gssapi_simple(self): client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command('ismaster').get('setName') + set_name = client.admin.command('HelloCompat.LEGACY_CMD').get('setName') if set_name: if not self.service_realm_required: # Without authMechanismProperties @@ -221,7 +222,7 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command('ismaster').get('setName') + set_name = client.admin.command('HelloCompat.LEGACY_CMD').get('setName') if set_name: client = MongoClient(GSSAPI_HOST, GSSAPI_PORT, @@ -269,7 +270,7 @@ def test_sasl_plain(self): client = MongoClient(uri) client.ldap.test.find_one() - set_name = client.admin.command('ismaster').get('setName') + set_name = client.admin.command('HelloCompat.LEGACY_CMD').get('setName') if set_name: client = MongoClient(SASL_HOST, SASL_PORT, @@ -299,8 +300,8 @@ def auth_string(user, password): bad_user = MongoClient(auth_string('not-user', SASL_PASS)) bad_pwd = MongoClient(auth_string(SASL_USER, 'not-pwd')) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, 'ismaster') - self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ismaster') + self.assertRaises(OperationFailure, bad_user.admin.command, 'ping') + self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ping') class TestSCRAMSHA1(IntegrationTest): @@ -530,7 +531,7 @@ def test_scram_saslprep(self): def test_cache(self): client = single_client() # Force authentication. - client.admin.command('ismaster') + client.admin.command('ping') all_credentials = client._MongoClient__all_credentials credentials = all_credentials.get('admin') cache = credentials.cache diff --git a/test/test_bulk.py b/test/test_bulk.py index 9834da55cb..f0cb52912e 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -776,8 +776,8 @@ def setUpClass(cls): cls.w = client_context.w cls.secondary = None if cls.w > 1: - for member in client_context.ismaster['hosts']: - if member != client_context.ismaster['primary']: + for member in client_context.hello['hosts']: + if member != client_context.hello['primary']: cls.secondary = single_client(*partition_node(member)) break diff --git a/test/test_client.py b/test/test_client.py index 3584329fd2..8ac1f86be9 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -36,11 +36,12 @@ from bson.tz_util import utc import pymongo from pymongo import message, monitoring -from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS from pymongo.command_cursor import CommandCursor +from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD from pymongo.cursor import Cursor, CursorType from pymongo.database import Database +from pymongo.driver_info import DriverInfo from pymongo.errors import (AutoReconnect, ConfigurationError, ConnectionFailure, @@ -50,10 +51,10 @@ OperationFailure, ServerSelectionTimeoutError, WriteConcernError) +from pymongo.hello import HelloCompat +from pymongo.mongo_client import MongoClient from pymongo.monitoring import (ServerHeartbeatListener, ServerHeartbeatStartedEvent) -from pymongo.mongo_client import MongoClient -from pymongo.driver_info import DriverInfo from pymongo.pool import SocketInfo, _METADATA from pymongo.read_preferences import ReadPreference from pymongo.server_description import ServerDescription @@ -61,9 +62,9 @@ writable_server_selector) from pymongo.server_type import SERVER_TYPE from pymongo.settings import TOPOLOGY_TYPE +from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.topology import _ErrorContext from pymongo.topology_description import TopologyDescription -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.write_concern import WriteConcern from test import (client_context, client_knobs, @@ -833,7 +834,7 @@ def test_close_stops_kill_cursors_thread(self): self.assertTrue(client._kill_cursors_executor._stopped) # Reusing the closed client should restart the thread. - client.admin.command('isMaster') + client.admin.command('ping') self.assertFalse(client._kill_cursors_executor._stopped) # Again, closing the client should stop the thread. @@ -850,7 +851,7 @@ def test_uri_connect_option(self): self.assertFalse(kc_thread and kc_thread.is_alive()) # Using the client should open topology and start the thread. - client.admin.command('isMaster') + client.admin.command('ping') self.assertTrue(client._topology._opened) kc_thread = client._kill_cursors_executor._thread self.assertTrue(kc_thread and kc_thread.is_alive()) @@ -1783,11 +1784,11 @@ def test_max_bson_size(self): c = self._get_client() # max_bson_size will cause the client to connect. - ismaster = c.db.command('ismaster') - self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size) - if 'maxMessageSizeBytes' in ismaster: + hello = c.db.command(HelloCompat.LEGACY_CMD) + self.assertEqual(hello['maxBsonObjectSize'], c.max_bson_size) + if 'maxMessageSizeBytes' in hello: self.assertEqual( - ismaster['maxMessageSizeBytes'], + hello['maxMessageSizeBytes'], c.max_message_size) diff --git a/test/test_cmap.py b/test/test_cmap.py index 3a7b708526..38966eb66a 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -318,11 +318,11 @@ def test_1_client_connection_pool_options(self): def test_2_all_client_pools_have_same_options(self): client = rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) - client.admin.command('isMaster') + client.admin.command('ping') # Discover at least one secondary. if client_context.has_secondaries: client.admin.command( - 'isMaster', read_preference=ReadPreference.SECONDARY) + 'ping', read_preference=ReadPreference.SECONDARY) pools = get_pools(client) pool_opts = pools[0].opts @@ -346,7 +346,7 @@ def test_4_subscribe_to_events(self): self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. - client.admin.command('isMaster') + client.admin.command('ping') self.assertEqual( listener.event_count(ConnectionCheckOutStartedEvent), 1) self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) @@ -355,7 +355,7 @@ def test_4_subscribe_to_events(self): self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) # Uses the existing connection. - client.admin.command('isMaster') + client.admin.command('ping') self.assertEqual( listener.event_count(ConnectionCheckOutStartedEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) @@ -379,7 +379,7 @@ def mock_connect(*args, **kwargs): # Attempt to create a new connection. with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): - client.admin.command('isMaster') + client.admin.command('ping') self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) @@ -401,7 +401,7 @@ def test_5_check_out_fails_auth_error(self): # Attempt to create a new connection. with self.assertRaisesRegex(OperationFailure, 'failed'): - client.admin.command('isMaster') + client.admin.command('ping') self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) diff --git a/test/test_collection.py b/test/test_collection.py index ba6d2b544b..2795b47425 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2134,7 +2134,7 @@ def test_find_one_and_write_concern(self): c_default = db.get_collection('test', write_concern=WriteConcern()) results = listener.results # Authenticate the client and throw out auth commands from the listener. - db.command('ismaster') + db.command('ping') results.clear() if client_context.version.at_least(3, 1, 9, -1): c_w0.find_one_and_update( diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 6ed16fa50e..894b14becd 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -122,13 +122,13 @@ def run_scenario(self, error_code, retry, pool_status_checker): @client_context.require_version_min(4, 2, -1) @client_context.require_test_commands - def test_not_master_keep_connection_pool(self): + def test_not_primary_keep_connection_pool(self): self.run_scenario(10107, True, self.verify_pool_not_cleared) @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 1, 0, -1) @client_context.require_test_commands - def test_not_master_reset_connection_pool(self): + def test_not_primary_reset_connection_pool(self): self.run_scenario(10107, False, self.verify_pool_cleared) @client_context.require_version_min(4, 0, 0) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 0b3ac6da5d..c26c0df309 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -31,7 +31,7 @@ OperationFailure) from pymongo.helpers import (_check_command_response, _check_write_command_response) -from pymongo.hello import Hello +from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription, SERVER_TYPE from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext @@ -81,9 +81,9 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): return c -def got_ismaster(topology, server_address, ismaster_response): +def got_hello(topology, server_address, hello_response): server_description = ServerDescription( - server_address, Hello(ismaster_response), 0) + server_address, Hello(hello_response), 0) topology.on_change(server_description) @@ -206,7 +206,7 @@ def run_scenario(self): description = phase.get('description', str(i)) with assertion_context('phase: %s' % (description,)): for response in phase.get('responses', []): - got_ismaster( + got_hello( c, common.partition_node(response[0]), response[1]) for app_error in phase.get('applicationErrors', []): @@ -244,12 +244,12 @@ def test_cluster_time_comparison(self): def send_cluster_time(time, inc, should_update): old = t.max_cluster_time() new = {'clusterTime': Timestamp(time, inc)} - got_ismaster(t, - ('host', 27017), - {'ok': 1, - 'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': new}) + got_hello(t, + ('host', 27017), + {'ok': 1, + 'minWireVersion': 0, + 'maxWireVersion': 6, + '$clusterTime': new}) actual = t.max_cluster_time() if should_update: @@ -332,15 +332,15 @@ def test_pool_unpause(self): listener.events.index(hb_succeeded)) listener.reset() - fail_ismaster = { + fail_hello = { 'mode': {'times': 2}, 'data': { - 'failCommands': ['isMaster', 'hello'], + 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], 'errorCode': 1234, 'appName': 'SDAMPoolManagementTest', }, } - with self.fail_point(fail_ismaster): + with self.fail_point(fail_hello): listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) listener.wait_for_event(monitoring.PoolClearedEvent, 1) listener.wait_for_event( diff --git a/test/test_encryption.py b/test/test_encryption.py index efd12c3728..adab7221d2 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -264,11 +264,11 @@ def test_use_after_close(self): client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - client.admin.command('isMaster') + client.admin.command('ping') client.close() with self.assertRaisesRegex(InvalidOperation, 'Cannot use MongoClient after close'): - client.admin.command('isMaster') + client.admin.command('ping') class TestClientMaxWireVersion(IntegrationTest): @@ -287,7 +287,7 @@ def test_raise_max_wire_version_error(self): with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.insert_one({}) with self.assertRaisesRegex(ConfigurationError, msg): - client.admin.command('isMaster') + client.admin.command('ping') with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.find_one({}) with self.assertRaisesRegex(ConfigurationError, msg): diff --git a/test/test_errors.py b/test/test_errors.py index aec7bf478d..b70b0d5291 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -62,7 +62,7 @@ def test_unicode_strs_operation_failure(self): {"errmsg": 'unicode \U0001f40d'}) self._test_unicode_strs(exc) - def test_unicode_strs_not_master_error(self): + def test_unicode_strs_not_primary_error(self): exc = NotPrimaryError('unicode \U0001f40d', {"errmsg": 'unicode \U0001f40d'}) self._test_unicode_strs(exc) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index bd926f772d..6941e6bd84 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] from pymongo.errors import ConnectionFailure -from pymongo.hello import Hello +from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from test import unittest, client_knobs, IntegrationTest from test.utils import (HeartbeatEventListener, MockPool, single_client, @@ -74,7 +74,7 @@ def _check_with_socket(self, *args, **kwargs): def test_standalone(self): responses = (('a', 27017), { - "ismaster": True, + HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1 diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index 96a7e9f6bd..e67b1186e3 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -41,7 +41,7 @@ def __init__(self, client): self.passed = False def run(self): - self.client.db.command('ismaster') + self.client.db.command('ping') self.passed = True # No exception raised. @@ -121,10 +121,10 @@ def test_failover(self): def f(): try: - client.db.command('ismaster') + client.db.command('ping') except AutoReconnect: # Second attempt succeeds. - client.db.command('ismaster') + client.db.command('ping') passed.append(True) @@ -151,23 +151,23 @@ def test_local_threshold(self): writable_addresses(topology)) # No error - client.admin.command('ismaster') + client.admin.command('ping') client = connected(self.mock_client(localThresholdMS=0)) self.assertEqual(0, client.local_threshold_ms) # No error - client.db.command('ismaster') + client.db.command('ping') # Our chosen mongos goes down. client.kill_host('%s:%s' % next(iter(client.nodes))) try: - client.db.command('ismaster') + client.db.command('ping') except: pass # We eventually connect to a new mongos. def connect_to_new_mongos(): try: - return client.db.command('ismaster') + return client.db.command('ping') except AutoReconnect: pass wait_until(connect_to_new_mongos, 'connect to a new mongos') diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 3ff9f04a81..c9f4e5ae76 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -63,7 +63,7 @@ def tearDown(self): super(TestCommandMonitoring, self).tearDown() def test_started_simple(self): - self.client.pymongo_test.command('ismaster') + self.client.pymongo_test.command('ping') results = self.listener.results started = results['started'][0] succeeded = results['succeeded'][0] @@ -72,14 +72,14 @@ def test_started_simple(self): isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue( isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ismaster', 1)]), started.command) - self.assertEqual('ismaster', started.command_name) + self.assertEqualCommand(SON([('ping', 1)]), started.command) + self.assertEqual('ping', started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual('pymongo_test', started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_succeeded_simple(self): - self.client.pymongo_test.command('ismaster') + self.client.pymongo_test.command('ping') results = self.listener.results started = results['started'][0] succeeded = results['succeeded'][0] @@ -88,7 +88,7 @@ def test_succeeded_simple(self): isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue( isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertEqual('ismaster', succeeded.command_name) + self.assertEqual('ping', succeeded.command_name) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(1, succeeded.reply.get('ok')) self.assertTrue(isinstance(succeeded.request_id, int)) @@ -432,11 +432,11 @@ def test_get_more_failure(self): @client_context.require_replica_set @client_context.require_secondaries_count(1) - def test_not_master_error(self): + def test_not_primary_error(self): address = next(iter(client_context.client.secondaries)) client = single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. - client.admin.command('ismaster') + client.admin.command('ping') self.listener.results.clear() error = None try: @@ -982,7 +982,7 @@ def test_bulk_write_command_error(self): 'data': { 'failCommands': ['insert'], 'closeConnection': False, - 'errorCode': 10107, # NotMaster + 'errorCode': 10107, # Not primary }, } with self.fail_point(insert_command_error): @@ -1126,7 +1126,7 @@ def setUpClass(cls): monitoring.register(cls.listener) cls.client = single_client() # Get one (authenticated) socket in the pool. - cls.client.pymongo_test.command('ismaster') + cls.client.pymongo_test.command('ping') @classmethod def tearDownClass(cls): @@ -1139,7 +1139,7 @@ def setUp(self): self.listener.results.clear() def test_simple(self): - self.client.pymongo_test.command('ismaster') + self.client.pymongo_test.command('ping') results = self.listener.results started = results['started'][0] succeeded = results['succeeded'][0] @@ -1148,8 +1148,8 @@ def test_simple(self): isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue( isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ismaster', 1)]), started.command) - self.assertEqual('ismaster', started.command_name) + self.assertEqualCommand(SON([('ping', 1)]), started.command) + self.assertEqual('ping', started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual('pymongo_test', started.database_name) self.assertTrue(isinstance(started.request_id, int)) @@ -1160,27 +1160,27 @@ class TestEventClasses(unittest.TestCase): def test_command_event_repr(self): request_id, connection_id, operation_id = 1, ('localhost', 27017), 2 event = monitoring.CommandStartedEvent( - {'isMaster': 1}, 'admin', request_id, connection_id, operation_id) + {'ping': 1}, 'admin', request_id, connection_id, operation_id) self.assertEqual( repr(event), "") + "command: 'ping', operation_id: 2, service_id: None>") delta = datetime.timedelta(milliseconds=100) event = monitoring.CommandSucceededEvent( - delta, {'ok': 1}, 'isMaster', request_id, connection_id, + delta, {'ok': 1}, 'ping', request_id, connection_id, operation_id) self.assertEqual( repr(event), "") event = monitoring.CommandFailedEvent( - delta, {'ok': 0}, 'isMaster', request_id, connection_id, + delta, {'ok': 0}, 'ping', request_id, connection_id, operation_id) self.assertEqual( repr(event), "") def test_server_heartbeat_event_repr(self): diff --git a/test/test_pooling.py b/test/test_pooling.py index d5f4f09a9a..becbacc1ef 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -281,7 +281,7 @@ def test_socket_checker(self): self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) # Make the socket readable _, msg, _ = message._query( - 0, 'admin.$cmd', 0, -1, SON([('isMaster', 1)]), None, + 0, 'admin.$cmd', 0, -1, SON([('ping', 1)]), None, DEFAULT_CODEC_OPTIONS) s.sendall(msg) # Block until the socket is readable. diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 62dc0ac0a9..a25d7af169 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -61,7 +61,7 @@ def test_client(self): c.close() with self.assertRaises(AutoReconnect): - c.db.command('ismaster') + c.db.command('ping') self.assertEqual(c.address, None) @@ -106,7 +106,7 @@ def test_replica_set_client(self): wait_until(lambda: ('c', 3) in c.secondaries, 'discover host "c"') # C is removed. - c.mock_ismaster_hosts.remove('c:3') + c.mock_hello_hosts.remove('c:3') wait_until(lambda: set([('b', 2)]) == c.secondaries, 'update list of secondaries') @@ -156,10 +156,10 @@ def test_client(self): # C is added. c.mock_members.append('c:3') - c.mock_ismaster_hosts.append('c:3') + c.mock_hello_hosts.append('c:3') c.close() - c.db.command('ismaster') + c.db.command('ping') self.assertEqual(c.address, ('a', 1)) @@ -181,7 +181,7 @@ def test_replica_set_client(self): # C is added. c.mock_members.append('c:3') - c.mock_ismaster_hosts.append('c:3') + c.mock_hello_hosts.append('c:3') wait_until(lambda: set([('b', 2), ('c', 3)]) == c.secondaries, 'discover the new secondary') diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index cb168bb2bd..25b0064a0f 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -197,7 +197,7 @@ def _run(self): source_address = clean_node(source) topology.on_change(ServerDescription( address=source_address, - ismaster=Hello(response), + hello=Hello(response), round_trip_time=0)) expected_results = phase['outcome']['events'] @@ -326,10 +326,10 @@ def marked_unknown_and_rediscovered(): def test_network_error_publishes_events(self): self._test_app_error({'closeConnection': True}, ConnectionFailure) - # In 4.4+, NotMaster errors from failCommand don't cause SDAM state + # In 4.4+, not primary errors from failCommand don't cause SDAM state # changes because topologyVersion is not incremented. @client_context.require_version_max(4, 3) - def test_not_master_error_publishes_events(self): + def test_not_primary_error_publishes_events(self): self._test_app_error({'errorCode': 10107, 'closeConnection': False, 'errorLabels': ['RetryableWriteError']}, NotPrimaryError) diff --git a/test/test_server.py b/test/test_server.py index ca3ce76cd3..e4996d2e09 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -26,8 +26,8 @@ class TestServer(unittest.TestCase): def test_repr(self): - ismaster = Hello({'ok': 1}) - sd = ServerDescription(('localhost', 27017), ismaster) + hello = Hello({'ok': 1}) + sd = ServerDescription(('localhost', 27017), hello) server = Server(sd, pool=object(), monitor=object()) self.assertTrue('Standalone' in str(server)) diff --git a/test/test_server_description.py b/test/test_server_description.py index 7abb8b9c1a..23d6c8f377 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -21,36 +21,36 @@ from bson.objectid import ObjectId from bson.int64 import Int64 from pymongo.server_type import SERVER_TYPE -from pymongo.hello import Hello +from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription from test import unittest address = ('localhost', 27017) -def parse_ismaster_response(doc): - ismaster_response = Hello(doc) - return ServerDescription(address, ismaster_response) +def parse_hello_response(doc): + hello_response = Hello(doc) + return ServerDescription(address, hello_response) class TestServerDescription(unittest.TestCase): def test_unknown(self): - # Default, no ismaster_response. + # Default, no hello_response. s = ServerDescription(address) self.assertEqual(SERVER_TYPE.Unknown, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_mongos(self): - s = parse_ismaster_response({'ok': 1, 'msg': 'isdbgrid'}) + s = parse_hello_response({'ok': 1, 'msg': 'isdbgrid'}) self.assertEqual(SERVER_TYPE.Mongos, s.server_type) self.assertEqual('Mongos', s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_primary(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': True, 'setName': 'rs'}) + s = parse_hello_response( + {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs'}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) self.assertEqual('RSPrimary', s.server_type_name) @@ -58,8 +58,8 @@ def test_primary(self): self.assertTrue(s.is_readable) def test_secondary(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': False, 'secondary': True, 'setName': 'rs'}) + s = parse_hello_response( + {'ok': 1, HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs'}) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) self.assertEqual('RSSecondary', s.server_type_name) @@ -67,8 +67,8 @@ def test_secondary(self): self.assertTrue(s.is_readable) def test_arbiter(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': False, 'arbiterOnly': True, 'setName': 'rs'}) + s = parse_hello_response( + {'ok': 1, HelloCompat.LEGACY_CMD: False, 'arbiterOnly': True, 'setName': 'rs'}) self.assertEqual(SERVER_TYPE.RSArbiter, s.server_type) self.assertEqual('RSArbiter', s.server_type_name) @@ -76,15 +76,15 @@ def test_arbiter(self): self.assertFalse(s.is_readable) def test_other(self): - s = parse_ismaster_response( - {'ok': 1, 'ismaster': False, 'setName': 'rs'}) + s = parse_hello_response( + {'ok': 1, HelloCompat.LEGACY_CMD: False, 'setName': 'rs'}) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) self.assertEqual('RSOther', s.server_type_name) - s = parse_ismaster_response({ + s = parse_hello_response({ 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'hidden': True, 'setName': 'rs'}) @@ -94,7 +94,7 @@ def test_other(self): self.assertFalse(s.is_readable) def test_ghost(self): - s = parse_ismaster_response({'ok': 1, 'isreplicaset': True}) + s = parse_hello_response({'ok': 1, 'isreplicaset': True}) self.assertEqual(SERVER_TYPE.RSGhost, s.server_type) self.assertEqual('RSGhost', s.server_type_name) @@ -102,9 +102,9 @@ def test_ghost(self): self.assertFalse(s.is_readable) def test_fields(self): - s = parse_ismaster_response({ + s = parse_hello_response({ 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'primary': 'a:27017', 'tags': {'a': 'foo', 'b': 'baz'}, @@ -125,35 +125,35 @@ def test_fields(self): self.assertEqual(5, s.max_wire_version) def test_default_max_message_size(self): - s = parse_ismaster_response({ + s = parse_hello_response({ 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'maxBsonObjectSize': 2}) # Twice max_bson_size. self.assertEqual(4, s.max_message_size) def test_standalone(self): - s = parse_ismaster_response({'ok': 1, 'ismaster': True}) + s = parse_hello_response({'ok': 1, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) # Mongod started with --slave. # master-slave replication was removed in MongoDB 4.0. - s = parse_ismaster_response({'ok': 1, 'ismaster': False}) + s = parse_hello_response({'ok': 1, HelloCompat.LEGACY_CMD: False}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_ok_false(self): - s = parse_ismaster_response({'ok': 0, 'ismaster': True}) + s = parse_hello_response({'ok': 0, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Unknown, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_all_hosts(self): - s = parse_ismaster_response({ + s = parse_hello_response({ 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'hosts': ['a'], 'passives': ['b:27018'], 'arbiters': ['c'] @@ -164,15 +164,15 @@ def test_all_hosts(self): sorted(s.all_hosts)) def test_repr(self): - s = parse_ismaster_response({'ok': 1, 'msg': 'isdbgrid'}) + s = parse_hello_response({'ok': 1, 'msg': 'isdbgrid'}) self.assertEqual(repr(s), "") def test_topology_version(self): topology_version = {'processId': ObjectId(), 'counter': Int64('0')} - s = parse_ismaster_response( - {'ok': 1, 'ismaster': True, 'setName': 'rs', + s = parse_hello_response( + {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'topologyVersion': topology_version}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) @@ -185,8 +185,8 @@ def test_topology_version(self): def test_topology_version_not_present(self): # No topologyVersion field. - s = parse_ismaster_response( - {'ok': 1, 'ismaster': True, 'setName': 'rs'}) + s = parse_hello_response( + {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs'}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) self.assertEqual(None, s.topology_version) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index fc8f643163..1e4165246d 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -20,6 +20,7 @@ from pymongo import MongoClient from pymongo import ReadPreference from pymongo.errors import ServerSelectionTimeoutError +from pymongo.hello import HelloCompat from pymongo.server_selectors import writable_server_selector from pymongo.settings import TopologySettings from pymongo.topology import Topology @@ -75,7 +76,7 @@ def custom_selector(servers): # Wait the node list to be fully populated. def all_hosts_started(): - return (len(client.admin.command('isMaster')['hosts']) == + return (len(client.admin.command(HelloCompat.LEGACY_CMD)['hosts']) == len(client._topology._description.readable_servers)) wait_until(all_hosts_started, 'receive heartbeat from all hosts') diff --git a/test/test_session.py b/test/test_session.py index 7a78ca72d4..fc1f8382fe 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -1027,7 +1027,7 @@ def test_sessions_not_supported(self): class TestClusterTime(IntegrationTest): def setUp(self): super(TestClusterTime, self).setUp() - if '$clusterTime' not in client_context.ismaster: + if '$clusterTime' not in client_context.hello: raise SkipTest('$clusterTime not supported') def test_cluster_time(self): diff --git a/test/test_ssl.py b/test/test_ssl.py index d6055cafbd..7c3cc4bacb 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -26,6 +26,7 @@ from pymongo.errors import (ConfigurationError, ConnectionFailure, OperationFailure) +from pymongo.hello import HelloCompat from pymongo.ssl_support import HAVE_SSL, get_ssl_context, _ssl from pymongo.write_concern import WriteConcern from test import (IntegrationTest, @@ -221,7 +222,7 @@ def test_cert_ssl_implicitly_set(self): client = MongoClient(client_context.host, client_context.port, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) - response = client.admin.command('ismaster') + response = client.admin.command(HelloCompat.LEGACY_CMD) if 'setName' in response: client = MongoClient(client_context.pair, replicaSet=response['setName'], @@ -245,7 +246,7 @@ def test_cert_ssl_validation(self): tlsCertificateKeyFile=CLIENT_PEM, tlsAllowInvalidCertificates=False, tlsCAFile=CA_PEM) - response = client.admin.command('ismaster') + response = client.admin.command(HelloCompat.LEGACY_CMD) if 'setName' in response: if response['primary'].split(":")[0] != 'localhost': raise SkipTest("No hosts in the replicaset for 'localhost'. " @@ -303,7 +304,7 @@ def test_cert_ssl_validation_hostname_matching(self): else: self.assertFalse(ctx.check_hostname) - response = self.client.admin.command('ismaster') + response = self.client.admin.command(HelloCompat.LEGACY_CMD) with self.assertRaises(ConnectionFailure): connected(MongoClient('server', @@ -603,7 +604,7 @@ def remove(path): tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle) as client: - self.assertTrue(client.admin.command('ismaster')) + self.assertTrue(client.admin.command('ping')) if __name__ == "__main__": diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index f2b2c7ccaa..4715fbfee7 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -20,6 +20,7 @@ sys.path[0:0] = [""] from pymongo import monitoring +from pymongo.hello import HelloCompat from test import (client_context, IntegrationTest, @@ -45,17 +46,17 @@ def test_failCommand_streaming(self): address = client.address listener.reset() - fail_ismaster = { + fail_hello = { 'configureFailPoint': 'failCommand', 'mode': {'times': 4}, 'data': { - 'failCommands': ['isMaster', 'hello'], + 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], 'closeConnection': False, 'errorCode': 10107, 'appName': 'failingHeartbeatTest', }, } - with self.fail_point(fail_ismaster): + with self.fail_point(fail_hello): def _marked_unknown(event): return (event.server_address == address and not event.new_description.is_server_type_known) @@ -83,21 +84,21 @@ def test_streaming_rtt(self): listener = ServerEventListener() hb_listener = HeartbeatEventListener() # On Windows, RTT can actually be 0.0 because time.time() only has - # 1-15 millisecond resolution. We need to delay the initial isMaster + # 1-15 millisecond resolution. We need to delay the initial hello # to ensure that RTT is never zero. name = 'streamingRttTest' - delay_ismaster = { + delay_hello = { 'configureFailPoint': 'failCommand', 'mode': {'times': 1000}, 'data': { - 'failCommands': ['isMaster', 'hello'], + 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], 'blockConnection': True, 'blockTimeMS': 20, # This can be uncommented after SERVER-49220 is fixed. # 'appName': name, }, } - with self.fail_point(delay_ismaster): + with self.fail_point(delay_hello): client = rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, @@ -107,9 +108,9 @@ def test_streaming_rtt(self): client.admin.command('ping') address = client.address - delay_ismaster['data']['blockTimeMS'] = 500 - delay_ismaster['data']['appName'] = name - with self.fail_point(delay_ismaster): + delay_hello['data']['blockTimeMS'] = 500 + delay_hello['data']['appName'] = name + with self.fail_point(delay_hello): def rtt_exceeds_250_ms(): # XXX: Add a public TopologyDescription getter to MongoClient? topology = client._topology @@ -135,15 +136,15 @@ def changed_event(event): def test_monitor_waits_after_server_check_error(self): # This test implements: # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks - fail_ismaster = { + fail_hello = { 'mode': {'times': 5}, 'data': { - 'failCommands': ['isMaster', 'hello'], + 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], 'errorCode': 1234, 'appName': 'SDAMMinHeartbeatFrequencyTest', }, } - with self.fail_point(fail_ismaster): + with self.fail_point(fail_hello): start = time.time() client = single_client( appName='SDAMMinHeartbeatFrequencyTest', @@ -161,7 +162,7 @@ def test_monitor_waits_after_server_check_error(self): # 1502ms: failed monitor handshake, 4 # 2002ms: failed monitor handshake, 5 # 2502ms: monitor handshake succeeds - # 2503ms: run awaitable isMaster + # 2503ms: run awaitable hello # 2504ms: application handshake succeeds # 2505ms: ping command succeeds self.assertGreaterEqual(duration, 2) @@ -186,7 +187,7 @@ def hb_failed(event): fail_heartbeat = { 'mode': {'times': 2}, 'data': { - 'failCommands': ['isMaster', 'hello'], + 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], 'closeConnection': True, 'appName': 'heartbeatEventAwaitedFlag', }, diff --git a/test/test_topology.py b/test/test_topology.py index 1513837997..f2b23eb74c 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -21,24 +21,24 @@ from bson.objectid import ObjectId from pymongo import common -from pymongo.read_preferences import ReadPreference, Secondary -from pymongo.server_type import SERVER_TYPE -from pymongo.topology import (_ErrorContext, - Topology) -from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.errors import (AutoReconnect, ConfigurationError, ConnectionFailure) -from pymongo.hello import Hello +from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import PoolOptions +from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_description import ServerDescription from pymongo.server_selectors import (any_server_selector, writable_server_selector) +from pymongo.server_type import SERVER_TYPE from pymongo.settings import TopologySettings +from pymongo.topology import (_ErrorContext, + Topology) +from pymongo.topology_description import TOPOLOGY_TYPE from test import client_knobs, unittest -from test.utils import MockPool, wait_until from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, wait_until class SetNameDiscoverySettings(TopologySettings): @@ -65,9 +65,9 @@ def create_mock_topology( return t -def got_ismaster(topology, server_address, ismaster_response): +def got_hello(topology, server_address, hello_response): server_description = ServerDescription( - server_address, Hello(ismaster_response), 0) + server_address, Hello(hello_response), 0) topology.on_change(server_description) @@ -119,23 +119,23 @@ def test_timeout_configuration(self): self.assertEqual(1, monitor._pool.opts.connect_timeout) self.assertEqual(1, monitor._pool.opts.socket_timeout) - # The monitor, not its pool, is responsible for calling ismaster. + # The monitor, not its pool, is responsible for calling hello. self.assertFalse(monitor._pool.handshake) class TestSingleServerTopology(TopologyTest): def test_direct_connection(self): - for server_type, ismaster_response in [ + for server_type, hello_response in [ (SERVER_TYPE.RSPrimary, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'hosts': ['a'], 'setName': 'rs', 'maxWireVersion': 6}), (SERVER_TYPE.RSSecondary, { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'hosts': ['a'], 'setName': 'rs', @@ -143,13 +143,13 @@ def test_direct_connection(self): (SERVER_TYPE.Mongos, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'msg': 'isdbgrid', 'maxWireVersion': 6}), (SERVER_TYPE.RSArbiter, { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'arbiterOnly': True, 'hosts': ['a'], 'setName': 'rs', @@ -157,7 +157,7 @@ def test_direct_connection(self): (SERVER_TYPE.Standalone, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'maxWireVersion': 6}), # A "slave" in a master-slave deployment. @@ -165,7 +165,7 @@ def test_direct_connection(self): # 4.0. (SERVER_TYPE.Standalone, { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'maxWireVersion': 6}), ]: t = create_mock_topology() @@ -176,7 +176,7 @@ def test_direct_connection(self): t.select_servers(any_server_selector, server_selection_timeout=0) - got_ismaster(t, address, ismaster_response) + got_hello(t, address, hello_response) # Topology type never changes. self.assertEqual(TOPOLOGY_TYPE.Single, t.description.topology_type) @@ -267,15 +267,15 @@ def new_average(): class TestMultiServerTopology(TopologyTest): def test_readable_writable(self): t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b']}) - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a', 'b']}) @@ -291,16 +291,16 @@ def test_readable_writable(self): Secondary(tag_sets=[{'tag': 'exists'}]))) t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': False, 'setName': 'rs', 'hosts': ['a', 'b']}) - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a', 'b']}) @@ -316,15 +316,15 @@ def test_readable_writable(self): Secondary(tag_sets=[{'tag': 'exists'}]))) t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b']}) - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a', 'b'], @@ -342,15 +342,15 @@ def test_readable_writable(self): def test_close(self): t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b']}) - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a', 'b']}) @@ -372,10 +372,10 @@ def test_close(self): self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) - # A closed topology should not be updated when receiving an isMaster. - got_ismaster(t, ('a', 27017), { + # A closed topology should not be updated when receiving a hello. + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b', 'c']}) @@ -392,15 +392,15 @@ def test_close(self): def test_handle_error(self): t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b']}) - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a', 'b']}) @@ -413,9 +413,9 @@ def test_handle_error(self): self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b']}) @@ -453,9 +453,9 @@ def test_discover_set_name_from_primary(self): self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a']}) @@ -465,9 +465,9 @@ def test_discover_set_name_from_primary(self): # Another response from the primary. Tests the code that processes # primary response when topology type is already ReplicaSetWithPrimary. - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a']}) @@ -489,9 +489,9 @@ def test_discover_set_name_from_secondary(self): self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a']}) @@ -504,9 +504,9 @@ def test_wire_version(self): t = create_mock_topology(replica_set_name='rs') t.description.check_compatible() # No error. - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a']}) @@ -515,9 +515,9 @@ def test_wire_version(self): self.assertEqual(server.description.min_wire_version, 0) self.assertEqual(server.description.max_wire_version, 0) - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a'], 'minWireVersion': 1, @@ -527,9 +527,9 @@ def test_wire_version(self): self.assertEqual(server.description.max_wire_version, 5) # Incompatible. - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a'], 'minWireVersion': 21, @@ -548,9 +548,9 @@ def test_wire_version(self): self.fail('No error with incompatible wire version') # Incompatible. - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a'], 'minWireVersion': 0, @@ -576,17 +576,17 @@ def write_batch_size(): s = t.select_server(writable_server_selector) return s.description.max_write_batch_size - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b'], 'maxWireVersion': 6, 'maxWriteBatchSize': 1}) - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a', 'b'], @@ -597,9 +597,9 @@ def write_batch_size(): self.assertEqual(1, write_batch_size()) # b becomes primary. - got_ismaster(t, ('b', 27017), { + got_hello(t, ('b', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'b'], 'maxWireVersion': 6, @@ -610,9 +610,9 @@ def write_batch_size(): def test_topology_repr(self): t = create_mock_topology(replica_set_name='rs') self.addCleanup(t.close) - got_ismaster(t, ('a', 27017), { + got_hello(t, ('a', 27017), { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a', 'c', 'b']}) self.assertEqual( @@ -632,7 +632,7 @@ def test_unexpected_load_balancer(self): t = create_mock_topology(seeds=['a']) mock_lb_response = {'ok': 1, 'msg': 'isdbgrid', 'serviceId': ObjectId(), 'maxWireVersion': 13} - got_ismaster(t, ('a', 27017), mock_lb_response) + got_hello(t, ('a', 27017), mock_lb_response) sds = t.description.server_descriptions() self.assertIn(('a', 27017), sds) self.assertEqual(sds[('a', 27017)].server_type_name, 'LoadBalancer') @@ -641,84 +641,84 @@ def test_unexpected_load_balancer(self): # Load balancers are removed from a topology with multiple seeds. t = create_mock_topology(seeds=['a', 'b']) - got_ismaster(t, ('a', 27017), mock_lb_response) + got_hello(t, ('a', 27017), mock_lb_response) self.assertNotIn(('a', 27017), t.description.server_descriptions()) self.assertEqual(t.description.topology_type_name, 'Unknown') -def wait_for_master(topology): +def wait_for_primary(topology): """Wait for a Topology to discover a writable server. - If the monitor is currently calling ismaster, a blocking call to + If the monitor is currently calling hello, a blocking call to select_server from this thread can trigger a spurious wake of the monitor thread. In applications this is harmless but it would break some tests, so we pass server_selection_timeout=0 and poll instead. """ - def get_master(): + def get_primary(): try: return topology.select_server(writable_server_selector, 0) except ConnectionFailure: return None - return wait_until(get_master, 'find master') + return wait_until(get_primary, 'find primary') class TestTopologyErrors(TopologyTest): - # Errors when calling ismaster. + # Errors when calling hello. def test_pool_reset(self): - # ismaster succeeds at first, then always raises socket error. - ismaster_count = [0] + # hello succeeds at first, then always raises socket error. + hello_count = [0] class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): - ismaster_count[0] += 1 - if ismaster_count[0] == 1: + hello_count[0] += 1 + if hello_count[0] == 1: return Hello({'ok': 1, 'maxWireVersion': 6}), 0 else: raise AutoReconnect('mock monitor error') t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) - server = wait_for_master(t) - self.assertEqual(1, ismaster_count[0]) + server = wait_for_primary(t) + self.assertEqual(1, hello_count[0]) generation = server.pool.gen.get_overall() - # Pool is reset by ismaster failure. + # Pool is reset by hello failure. t.request_check_all() self.assertNotEqual(generation, server.pool.gen.get_overall()) - def test_ismaster_retry(self): - # ismaster succeeds at first, then raises socket error, then succeeds. - ismaster_count = [0] + def test_hello_retry(self): + # hello succeeds at first, then raises socket error, then succeeds. + hello_count = [0] class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): - ismaster_count[0] += 1 - if ismaster_count[0] in (1, 3): + hello_count[0] += 1 + if hello_count[0] in (1, 3): return Hello({'ok': 1, 'maxWireVersion': 6}), 0 else: raise AutoReconnect( - 'mock monitor error #%s' % (ismaster_count[0],)) + 'mock monitor error #%s' % (hello_count[0],)) t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) - server = wait_for_master(t) - self.assertEqual(1, ismaster_count[0]) + server = wait_for_primary(t) + self.assertEqual(1, hello_count[0]) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) - # Second ismaster call, server is marked Unknown, then the monitor - # immediately runs a retry (third ismaster). + # Second hello call, server is marked Unknown, then the monitor + # immediately runs a retry (third hello). t.request_check_all() - # The third ismaster call (the immediate retry) happens sometime soon + # The third hello call (the immediate retry) happens sometime soon # after the failed check triggered by request_check_all. Wait until # the server becomes known again. server = t.select_server(writable_server_selector, 0.250) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) - self.assertEqual(3, ismaster_count[0]) + self.assertEqual(3, hello_count[0]) def test_internal_monitor_error(self): exception = AssertionError('internal error') @@ -743,9 +743,9 @@ def assertMessage(self, message, topology, selector=any_server_selector): def test_no_primary(self): t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs', 'hosts': ['a']}) @@ -758,9 +758,9 @@ def test_no_primary(self): def test_no_secondary(self): t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': True, + HelloCompat.LEGACY_CMD: True, 'setName': 'rs', 'hosts': ['a']}) @@ -777,9 +777,9 @@ def test_no_secondary(self): def test_bad_replica_set_name(self): t = create_mock_topology(replica_set_name='rs') - got_ismaster(t, address, { + got_hello(t, address, { 'ok': 1, - 'ismaster': False, + HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'wrong', 'hosts': ['a']}) @@ -790,8 +790,8 @@ def test_bad_replica_set_name(self): def test_multiple_standalones(self): # Standalones are removed from a topology with multiple seeds. t = create_mock_topology(seeds=['a', 'b']) - got_ismaster(t, ('a', 27017), {'ok': 1}) - got_ismaster(t, ('b', 27017), {'ok': 1}) + got_hello(t, ('a', 27017), {'ok': 1}) + got_hello(t, ('b', 27017), {'ok': 1}) self.assertMessage('No servers available', t) def test_no_mongoses(self): @@ -799,11 +799,11 @@ def test_no_mongoses(self): t = create_mock_topology(seeds=['a', 'b']) # Discover a mongos and change topology type to Sharded. - got_ismaster(t, ('a', 27017), {'ok': 1, 'msg': 'isdbgrid'}) + got_hello(t, ('a', 27017), {'ok': 1, 'msg': 'isdbgrid'}) # Oops, both servers are standalone now. Remove them. - got_ismaster(t, ('a', 27017), {'ok': 1}) - got_ismaster(t, ('b', 27017), {'ok': 1}) + got_hello(t, ('a', 27017), {'ok': 1}) + got_hello(t, ('b', 27017), {'ok': 1}) self.assertMessage('No mongoses available', t) diff --git a/test/utils.py b/test/utils.py index fa1c8511b7..43528b31de 100644 --- a/test/utils.py +++ b/test/utils.py @@ -38,6 +38,7 @@ monitoring, operations, read_preferences) from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern @@ -576,19 +577,19 @@ def ensure_all_connected(client): Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ - ismaster = client.admin.command("isMaster") - if 'setName' not in ismaster: + hello = client.admin.command(HelloCompat.LEGACY_CMD) + if 'setName' not in hello: raise ConfigurationError("cluster is not a replica set") - target_host_list = set(ismaster['hosts']) - connected_host_list = set([ismaster['me']]) + target_host_list = set(hello['hosts']) + connected_host_list = set([hello['me']]) admindb = client.get_database('admin') - # Run isMaster until we have connected to each host at least once. + # Run hello until we have connected to each host at least once. while connected_host_list != target_host_list: - ismaster = admindb.command("isMaster", + hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) - connected_host_list.update([ismaster["me"]]) + connected_host_list.update([hello["me"]]) def one(s): @@ -715,10 +716,10 @@ def joinall(threads): def connected(client): """Convenience to wait for a newly-constructed client to connect.""" with warnings.catch_warnings(): - # Ignore warning that "ismaster" is always routed to primary even + # Ignore warning that ping is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) - client.admin.command('ismaster') # Force connection. + client.admin.command('ping') # Force connection. return client @@ -760,7 +761,7 @@ def repl_set_step_down(client, **kwargs): client.admin.command(cmd) def is_mongos(client): - res = client.admin.command('ismaster') + res = client.admin.command(HelloCompat.LEGACY_CMD) return res.get('msg', '') == 'isdbgrid' diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 4037705ae4..6de0b7303b 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -23,7 +23,7 @@ from bson import json_util from pymongo.common import clean_node, HEARTBEAT_FREQUENCY from pymongo.errors import AutoReconnect, ConfigurationError -from pymongo.hello import Hello +from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription from pymongo.settings import TopologySettings from pymongo.server_selectors import writable_server_selector @@ -62,30 +62,30 @@ def make_server_description(server, hosts): if server_type in ("Unknown", "PossiblePrimary"): return ServerDescription(clean_node(server['address']), Hello({})) - ismaster_response = {'ok': True, 'hosts': hosts} + hello_response = {'ok': True, 'hosts': hosts} if server_type != "Standalone" and server_type != "Mongos": - ismaster_response['setName'] = "rs" + hello_response['setName'] = "rs" if server_type == "RSPrimary": - ismaster_response['ismaster'] = True + hello_response[HelloCompat.LEGACY_CMD] = True elif server_type == "RSSecondary": - ismaster_response['secondary'] = True + hello_response['secondary'] = True elif server_type == "Mongos": - ismaster_response['msg'] = 'isdbgrid' + hello_response['msg'] = 'isdbgrid' - ismaster_response['lastWrite'] = { + hello_response['lastWrite'] = { 'lastWriteDate': make_last_write_date(server) } for field in 'maxWireVersion', 'tags', 'idleWritePeriodMillis': if field in server: - ismaster_response[field] = server[field] + hello_response[field] = server[field] - ismaster_response.setdefault('maxWireVersion', 6) + hello_response.setdefault('maxWireVersion', 6) # Sets _last_update_time to now. sd = ServerDescription(clean_node(server['address']), - Hello(ismaster_response), + Hello(hello_response), round_trip_time=server['avg_rtt_ms'] / 1000.0) if 'lastUpdateTime' in server: From f64c5aa9403b4b6c2f033ab242294e6f8d136429 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 14 Sep 2021 12:24:06 -0700 Subject: [PATCH 0460/2111] PYTHON-2904 Further language modernization --- pymongo/cursor.py | 2 +- pymongo/mongo_client.py | 4 ++-- test/test_auth.py | 6 +++--- test/test_change_stream.py | 12 ++++++------ test/test_cursor.py | 10 +++++----- test/test_encryption.py | 4 ++-- test/utils.py | 10 +++++----- 7 files changed, 24 insertions(+), 24 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 65f2705126..78dffc662b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -394,7 +394,7 @@ def __query_spec(self): # Make a shallow copy so we can cleanly rewind or clone. spec = self.__spec.copy() - # White-listed commands must be wrapped in $query. + # Allow-listed commands must be wrapped in $query. if "$query" not in spec: # $query has to come first spec = SON([("$query", spec)]) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 64e3e2e4a3..a1152cc824 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -435,12 +435,12 @@ def __init__( ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. Defaults to ``False``. Think very carefully before setting this to ``True`` as that could make your application vulnerable to - man-in-the-middle attacks. + on-path attackers. - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS hostname verification. ``tlsAllowInvalidHostnames=False`` implies ``tls=True``. Defaults to ``False``. Think very carefully before setting this to ``True`` as that could make your application - vulnerable to man-in-the-middle attacks. + vulnerable to on-path attackers. - `tlsCAFile`: A file containing a single or a bundle of "certification authority" certificates, which are used to validate certificates passed from the other end of the connection. diff --git a/test/test_auth.py b/test/test_auth.py index 52159e4f3b..76b320fcb9 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -35,7 +35,7 @@ rs_or_single_client, rs_or_single_client_noauth, single_client_noauth, - WhiteListEventListener) + AllowListEventListener) # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. GSSAPI_HOST = os.environ.get('GSSAPI_HOST') @@ -354,7 +354,7 @@ def setUp(self): super(TestSCRAM, self).setUp() self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS monitoring._SENSITIVE_COMMANDS = set([]) - self.listener = WhiteListEventListener("saslStart") + self.listener = AllowListEventListener("saslStart") def tearDown(self): monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS @@ -363,7 +363,7 @@ def tearDown(self): super(TestSCRAM, self).tearDown() def test_scram_skip_empty_exchange(self): - listener = WhiteListEventListener("saslStart", "saslContinue") + listener = AllowListEventListener("saslStart", "saslContinue") client_context.create_user( 'testscram', 'sha256', 'pwd', roles=['dbOwner'], mechanisms=['SCRAM-SHA-256']) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 8c1bec1a68..e891e1403a 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -45,7 +45,7 @@ from test import client_context, unittest, IntegrationTest from test.unified_format import generate_test_classes from test.utils import ( - EventListener, WhiteListEventListener, rs_or_single_client, wait_until) + EventListener, AllowListEventListener, rs_or_single_client, wait_until) class TestChangeStreamBase(IntegrationTest): @@ -60,8 +60,8 @@ def change_stream(self, *args, **kwargs): return self.change_stream_with_client(self.client, *args, **kwargs) def client_with_listener(self, *commands): - """Return a client with a WhiteListEventListener.""" - listener = WhiteListEventListener(*commands) + """Return a client with a AllowListEventListener.""" + listener = AllowListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener @@ -445,7 +445,7 @@ def test_start_after_resume_process_without_changes(self): class ProseSpecTestsMixin(object): def _client_with_listener(self, *commands): - listener = WhiteListEventListener(*commands) + listener = AllowListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener @@ -474,7 +474,7 @@ def _get_expected_resume_token(self, stream, listener, """Predicts what the resume token should currently be for server versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes - listener is a WhiteListEventListener that listens for aggregate and + listener is a AllowListEventListener that listens for aggregate and getMore commands.""" if previous_change is None or stream._cursor._has_next(): token = self._get_expected_resume_token_legacy( @@ -1047,7 +1047,7 @@ class TestAllLegacyScenarios(IntegrationTest): @client_context.require_connection def setUpClass(cls): super(TestAllLegacyScenarios, cls).setUpClass() - cls.listener = WhiteListEventListener("aggregate", "getMore") + cls.listener = AllowListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod diff --git a/test/test_cursor.py b/test/test_cursor.py index f5b6e49235..09020a6de7 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -44,7 +44,7 @@ OvertCommandListener, ignore_deprecations, rs_or_single_client, - WhiteListEventListener) + AllowListEventListener) class TestCursor(IntegrationTest): @@ -231,7 +231,7 @@ def test_max_await_time_ms(self): 10).max_await_time_ms(90) self.assertEqual(90, cursor._Cursor__max_await_time_ms) - listener = WhiteListEventListener('find', 'getMore') + listener = AllowListEventListener('find', 'getMore') coll = rs_or_single_client( event_listeners=[listener])[self.db.name].pymongo_test results = listener.results @@ -349,7 +349,7 @@ def test_explain(self): def test_explain_with_read_concern(self): # Do not add readConcern level to explain. - listener = WhiteListEventListener("explain") + listener = AllowListEventListener("explain") client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client.pymongo_test.test.with_options( @@ -1221,7 +1221,7 @@ def test_close_kills_cursor_synchronously(self): gc.collect() self.client._process_periodic_tasks() - listener = WhiteListEventListener("killCursors") + listener = AllowListEventListener("killCursors") results = listener.results client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) @@ -1268,7 +1268,7 @@ def test_delete_not_initialized(self): @client_context.require_version_min(3, 6) def test_getMore_does_not_send_readPreference(self): - listener = WhiteListEventListener('find', 'getMore') + listener = AllowListEventListener('find', 'getMore') client = rs_or_single_client( event_listeners=[listener]) self.addCleanup(client.close) diff --git a/test/test_encryption.py b/test/test_encryption.py index adab7221d2..4c9e28b585 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -60,7 +60,7 @@ camel_to_snake_args, OvertCommandListener, TopologyEventListener, - WhiteListEventListener, + AllowListEventListener, rs_or_single_client, wait_until) from test.utils_spec_runner import SpecRunner @@ -1287,7 +1287,7 @@ def _test_automatic(self, expectation_extjson, payload): keyvault_namespace, schema_map=self.SCHEMA_MAP) - insert_listener = WhiteListEventListener('insert') + insert_listener = AllowListEventListener('insert') client = rs_or_single_client( auto_encryption_opts=encryption_opts, event_listeners=[insert_listener]) diff --git a/test/utils.py b/test/utils.py index 43528b31de..d58c616675 100644 --- a/test/utils.py +++ b/test/utils.py @@ -162,23 +162,23 @@ def reset(self): self.results.clear() -class WhiteListEventListener(EventListener): +class AllowListEventListener(EventListener): def __init__(self, *commands): self.commands = set(commands) - super(WhiteListEventListener, self).__init__() + super(AllowListEventListener, self).__init__() def started(self, event): if event.command_name in self.commands: - super(WhiteListEventListener, self).started(event) + super(AllowListEventListener, self).started(event) def succeeded(self, event): if event.command_name in self.commands: - super(WhiteListEventListener, self).succeeded(event) + super(AllowListEventListener, self).succeeded(event) def failed(self, event): if event.command_name in self.commands: - super(WhiteListEventListener, self).failed(event) + super(AllowListEventListener, self).failed(event) class OvertCommandListener(EventListener): From b76c523a60b663a10373ef622510960757e16df2 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 14 Sep 2021 13:15:35 -0700 Subject: [PATCH 0461/2111] Make sure we use the correct pip --- .evergreen/build-mac.sh | 2 +- .evergreen/run-mockupdb-tests.sh | 2 +- .evergreen/run-mongodb-aws-ecs-test.sh | 2 +- .evergreen/run-mongodb-aws-test.sh | 2 +- .evergreen/run-perf-tests.sh | 2 +- .evergreen/run-tests.sh | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index f8f40e263f..79b947d718 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -16,7 +16,7 @@ for VERSION in 3.6 3.7 3.8 3.9; do if ! $PYTHON -m wheel version; then createvirtualenv $PYTHON releasevenv WHEELPYTHON=python - pip install --upgrade wheel + python -m pip install --upgrade wheel else WHEELPYTHON=$PYTHON fi diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh index 26991b5581..d833fdea82 100644 --- a/.evergreen/run-mockupdb-tests.sh +++ b/.evergreen/run-mockupdb-tests.sh @@ -13,7 +13,7 @@ trap "deactivatei, rm -rf mockuptests" EXIT HUP # Install PyMongo from git clone so mockup-tests don't # download it from pypi. -pip install ${PROJECT_DIRECTORY} +python -m pip install ${PROJECT_DIRECTORY} git clone https://github.com/ajdavis/pymongo-mockup-tests.git cd pymongo-mockup-tests diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 00dd8e419a..e7bcf1cda5 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -37,7 +37,7 @@ authtest () { . venvaws/bin/activate cd src - pip install '.[aws]' + python -m pip install '.[aws]' python test/auth_aws/test_auth_aws.py cd - deactivate diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index 6ab480bd72..e51c12d609 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -60,7 +60,7 @@ authtest () { else . venvaws/bin/activate fi - pip install '.[aws]' + python -m pip install '.[aws]' python test/auth_aws/test_auth_aws.py deactivate rm -rf venvaws diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index cdf598c3bf..d2a913c824 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -18,7 +18,7 @@ VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python3" $VIRTUALENV pyperftest . pyperftest/bin/activate -pip install simplejson +python -m pip install simplejson python setup.py build_ext -i start_time=$(date +%s) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index a60f0f07ae..1e08e3ce17 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -78,12 +78,12 @@ elif [ "$COMPRESSORS" = "snappy" ]; then createvirtualenv $PYTHON_BINARY snappytest trap "deactivate; rm -rf snappytest" EXIT HUP # 0.5.2 has issues in pypy3(.5) - pip install python-snappy==0.5.1 + python -m pip install python-snappy==0.5.1 PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then createvirtualenv $PYTHON_BINARY zstdtest trap "deactivate; rm -rf zstdtest" EXIT HUP - pip install zstandard + python -m pip install zstandard PYTHON=python else PYTHON="$PYTHON_BINARY" From 3b8961a76e510925266676e0b5a6cced8250fde7 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 14 Sep 2021 14:00:13 -0700 Subject: [PATCH 0462/2111] PYTHON-2679 Auto discover replica sets by default (#716) --- doc/changelog.rst | 5 +++++ doc/migrate-to-pymongo4.rst | 9 +++++++++ pymongo/mongo_client.py | 6 ++++-- pymongo/settings.py | 7 ++----- pymongo/topology_description.py | 3 ++- test/test_command_monitoring_legacy.py | 7 ++++--- test/test_sdam_monitoring_spec.py | 8 ++++---- test/test_topology.py | 7 ++++--- test/utils.py | 4 +++- test/utils_selection_tests.py | 11 ++++++++--- 10 files changed, 45 insertions(+), 22 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 1f2a540956..b7a0824433 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -116,6 +116,11 @@ Breaking Changes in 4.0 :class:`~bson.dbref.DBRef`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. +- ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` + defaults to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. - The ``hint`` option is now required when using ``min`` or ``max`` queries with :meth:`~pymongo.collection.Collection.find`. - ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 0e512da774..fe10a60870 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -62,6 +62,15 @@ get the same behavior. MongoClient ----------- +``directConnection`` defaults to False +...................................... + +``directConnection`` URI option and keyword argument to :class:`~pymongo +.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, +allowing for the automatic discovery of replica sets. This means that if you +want a direct connection to a single server you must pass +``directConnection=True`` as a URI option or keyword argument. + The waitQueueMultiple parameter is removed .......................................... diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a1152cc824..adfb6834e4 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -919,13 +919,15 @@ def address(self): .. versionadded:: 3.0 """ topology_type = self._topology._description.topology_type - if topology_type == TOPOLOGY_TYPE.Sharded: + if (topology_type == TOPOLOGY_TYPE.Sharded and + len(self.topology_description.server_descriptions()) > 1): raise InvalidOperation( 'Cannot use "address" property when load balancing among' ' mongoses, use "nodes" instead.') if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced): + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded): return None return self._server_property('address') diff --git a/pymongo/settings.py b/pymongo/settings.py index c866d16718..ff9e84ee01 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -39,7 +39,7 @@ def __init__(self, heartbeat_frequency=common.HEARTBEAT_FREQUENCY, server_selector=None, fqdn=None, - direct_connection=None, + direct_connection=False, load_balanced=None): """Represent MongoClient's configuration. @@ -62,10 +62,7 @@ def __init__(self, self._fqdn = fqdn self._heartbeat_frequency = heartbeat_frequency - if direct_connection is None: - self._direct = (len(self._seeds) == 1 and not self.replica_set_name) - else: - self._direct = direct_connection + self._direct = direct_connection self._load_balanced = load_balanced self._topology_id = ObjectId() diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index bf29586fa6..c6b81b5384 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -248,7 +248,8 @@ def apply_local_threshold(selection): common_wv)) if self.topology_type in (TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced): + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Unknown): # Ignore selectors for standalone and load balancer mode. return self.known_servers elif address: diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index f5fa12003f..be8168edd7 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -22,12 +22,13 @@ import pymongo +from pymongo import MongoClient from bson import json_util from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern from test import unittest, client_context -from test.utils import ( - single_client, wait_until, EventListener, parse_read_preference) +from test.utils import (rs_or_single_client, wait_until, EventListener, + parse_read_preference) # Location of JSON test specifications. _TEST_PATH = os.path.join( @@ -47,7 +48,7 @@ class TestAllScenarios(unittest.TestCase): @client_context.require_connection def setUpClass(cls): cls.listener = EventListener() - cls.client = single_client(event_listeners=[cls.listener]) + cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod def tearDownClass(cls): diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 25b0064a0f..e51283f85d 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -21,6 +21,7 @@ sys.path[0:0] = [""] +from pymongo import MongoClient from bson.json_util import object_hook from pymongo import monitoring from pymongo.common import clean_node @@ -32,7 +33,6 @@ from pymongo.topology_description import TOPOLOGY_TYPE from test import unittest, client_context, client_knobs, IntegrationTest from test.utils import (ServerAndTopologyEventListener, - single_client, server_name_to_type, rs_or_single_client, wait_until) @@ -186,9 +186,9 @@ class NoopMonitor(Monitor): def _run(self): time.sleep(0.05) - m = single_client(h=scenario_def['uri'], p=27017, - event_listeners=[self.all_listener], - _monitor_class=NoopMonitor) + m = MongoClient(host=scenario_def['uri'], port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor) topology = m._get_topology() try: diff --git a/test/test_topology.py b/test/test_topology.py index f2b23eb74c..6881df2fcd 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -52,13 +52,14 @@ def get_topology_type(self): def create_mock_topology( seeds=None, replica_set_name=None, - monitor_class=DummyMonitor): + monitor_class=DummyMonitor, + direct_connection=False): partitioned_seeds = list(map(common.partition_node, seeds or ['a'])) topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, pool_class=MockPool, - monitor_class=monitor_class) + monitor_class=monitor_class, direct_connection=direct_connection) t = Topology(topology_settings) t.open() @@ -168,7 +169,7 @@ def test_direct_connection(self): HelloCompat.LEGACY_CMD: False, 'maxWireVersion': 6}), ]: - t = create_mock_topology() + t = create_mock_topology(direct_connection=True) # Can't select a server while the only server is of type Unknown. with self.assertRaisesRegex(ConnectionFailure, diff --git a/test/utils.py b/test/utils.py index d58c616675..cc5b6d8f05 100644 --- a/test/utils.py +++ b/test/utils.py @@ -516,7 +516,7 @@ def _connection_string(h, authenticate): return "mongodb://%s" % (str(h),) -def _mongo_client(host, port, authenticate=True, directConnection=False, +def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host @@ -524,6 +524,8 @@ def _mongo_client(host, port, authenticate=True, directConnection=False, client_options = client_context.default_client_options.copy() if client_context.replica_set_name and not directConnection: client_options['replicaSet'] = client_context.replica_set_name + if directConnection is not None: + client_options['directConnection'] = directConnection client_options.update(kwargs) client = MongoClient(_connection_string(host, authenticate), port, diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 6de0b7303b..0006f6f673 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -127,7 +127,9 @@ def create_topology(scenario_def, **kwargs): topology_type = get_topology_type_name(scenario_def) if topology_type == 'LoadBalanced': kwargs.setdefault('load_balanced', True) - + # Force topology description to ReplicaSet + elif topology_type in ['ReplicaSetNoPrimary', 'ReplicaSetWithPrimary']: + kwargs.setdefault('replica_set_name', 'rs') settings = get_topology_settings_dict( heartbeat_frequency=frequency, seeds=seeds, @@ -145,8 +147,9 @@ def create_topology(scenario_def, **kwargs): server_description = make_server_description(server, hosts) topology.on_change(server_description) - if topology_type == 'LoadBalanced': - assert topology.description.topology_type_name == 'LoadBalanced' + # Assert that descriptions match + assert (scenario_def['topology_description']['type'] == + topology.description.topology_type_name) return topology @@ -252,6 +255,8 @@ class TestAllScenarios(unittest.TestCase): dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) From fb20975a1f89363613560258f38e790bd8d32ca1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Sep 2021 16:48:52 -0700 Subject: [PATCH 0463/2111] PYTHON-2245 Change default uuidRepresentation to UNSPECIFIED (ie disable UUID encoding by default) (#724) This change also stops decoding both 3 and 4 subtypes as UUIDs. With standard, only subtype 4 is decoded to UUID and subtype 3 is decoded to Binary. With legacy representations, only subtype 3 is decoded to UUID and subtype 4 is decoded to Binary. --- bson/__init__.py | 18 +-- bson/_cbsonmodule.c | 12 +- bson/binary.py | 11 +- bson/codec_options.py | 38 +++--- bson/json_util.py | 5 +- doc/changelog.rst | 7 ++ doc/examples/uuid.rst | 230 ++++++++++++++++++------------------ doc/faq.rst | 2 +- doc/migrate-to-pymongo4.rst | 11 ++ pymongo/mongo_client.py | 6 +- test/test_binary.py | 35 +++--- test/test_bson.py | 23 ++-- test/test_change_stream.py | 9 +- test/test_common.py | 16 ++- test/test_json_util.py | 21 +++- test/test_raw_bson.py | 13 +- 16 files changed, 252 insertions(+), 205 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index b4e6aecdc3..1efb1f7ff5 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -69,7 +69,7 @@ from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, OLD_UUID_SUBTYPE, - JAVA_LEGACY, CSHARP_LEGACY, + JAVA_LEGACY, CSHARP_LEGACY, STANDARD, UUID_SUBTYPE) from bson.code import Code from bson.codec_options import ( @@ -265,20 +265,14 @@ def _get_binary(data, view, position, obj_end, opts, dummy1): raise InvalidBSON('bad binary object length') # Convert UUID subtypes to native UUIDs. - # TODO: PYTHON-2245 Decoding should follow UUID spec in PyMongo 4.0+ if subtype in ALL_UUID_SUBTYPES: - uuid_representation = opts.uuid_representation + uuid_rep = opts.uuid_representation binary_value = Binary(data[position:end], subtype) - if uuid_representation == UuidRepresentation.UNSPECIFIED: + if ((uuid_rep == UuidRepresentation.UNSPECIFIED) or + (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) or + (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD)): return binary_value, end - if subtype == UUID_SUBTYPE: - # Legacy behavior: use STANDARD with binary subtype 4. - uuid_representation = UuidRepresentation.STANDARD - elif uuid_representation == UuidRepresentation.STANDARD: - # subtype == OLD_UUID_SUBTYPE - # Legacy behavior: STANDARD is the same as PYTHON_LEGACY. - uuid_representation = UuidRepresentation.PYTHON_LEGACY - return binary_value.as_uuid(uuid_representation), end + return binary_value.as_uuid(uuid_rep), end # Decode subtype 0 to 'bytes'. if subtype == 0: diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 67ee01c722..93610f7c58 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1760,8 +1760,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (!data) { goto invalid; } - /* Encode as UUID or Binary based on options->uuid_rep - * TODO: PYTHON-2245 Decoding should follow UUID spec in PyMongo 4.0 */ + /* Encode as UUID or Binary based on options->uuid_rep */ if (subtype == 3 || subtype == 4) { PyObject* binary_type = NULL; PyObject* binary_value = NULL; @@ -1782,15 +1781,12 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto uuiderror; } - if (uuid_rep == UNSPECIFIED) { + if ((uuid_rep == UNSPECIFIED) || + (subtype == 4 && uuid_rep != STANDARD) || + (subtype == 3 && uuid_rep == STANDARD)) { value = binary_value; Py_INCREF(value); } else { - if (subtype == 4) { - uuid_rep = STANDARD; - } else if (uuid_rep == STANDARD) { - uuid_rep = PYTHON_LEGACY; - } value = PyObject_CallMethod(binary_value, "as_uuid", "(i)", uuid_rep); } diff --git a/bson/binary.py b/bson/binary.py index 39bc69c049..dd12f56e20 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -39,7 +39,10 @@ """Old BSON binary subtype for a UUID. :class:`uuid.UUID` instances will automatically be encoded -by :mod:`bson` using this subtype. +by :mod:`bson` using this subtype when using +:data:`UuidRepresentation.PYTHON_LEGACY`, +:data:`UuidRepresentation.JAVA_LEGACY`, or +:data:`UuidRepresentation.CSHARP_LEGACY`. .. versionadded:: 2.1 """ @@ -47,8 +50,10 @@ UUID_SUBTYPE = 4 """BSON binary subtype for a UUID. -This is the new BSON binary subtype for UUIDs. The -current default is :data:`OLD_UUID_SUBTYPE`. +This is the standard BSON binary subtype for UUIDs. +:class:`uuid.UUID` instances will automatically be encoded +by :mod:`bson` using this subtype when using +:data:`UuidRepresentation.STANDARD`. """ diff --git a/bson/codec_options.py b/bson/codec_options.py index 9ce772427d..6fcffcc17a 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -219,7 +219,7 @@ class CodecOptions(_options_base): naive. Defaults to ``False``. - `uuid_representation`: The BSON representation to use when encoding and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY`. New + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New applications should consider setting this to :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language compatibility. See :ref:`handling-uuid-data-example` for details. @@ -233,6 +233,11 @@ class CodecOptions(_options_base): - `type_registry`: Instance of :class:`TypeRegistry` used to customize encoding and decoding behavior. + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + .. versionadded:: 3.8 `type_registry` attribute. @@ -245,7 +250,7 @@ class CodecOptions(_options_base): def __new__(cls, document_class=dict, tz_aware=False, - uuid_representation=None, + uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler="strict", tzinfo=None, type_registry=None): if not (issubclass(document_class, _MutableMapping) or @@ -255,9 +260,7 @@ def __new__(cls, document_class=dict, "sublass of collections.abc.MutableMapping") if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") - if uuid_representation is None: - uuid_representation = UuidRepresentation.PYTHON_LEGACY - elif uuid_representation not in ALL_UUID_REPRESENTATIONS: + if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError("uuid_representation must be a value " "from bson.binary.UuidRepresentation") if not isinstance(unicode_decode_error_handler, (str, None)): @@ -327,21 +330,18 @@ def with_options(self, **kwargs): return CodecOptions(**opts) -DEFAULT_CODEC_OPTIONS = CodecOptions( - uuid_representation=UuidRepresentation.PYTHON_LEGACY) +DEFAULT_CODEC_OPTIONS = CodecOptions() def _parse_codec_options(options): """Parse BSON codec options.""" - return CodecOptions( - document_class=options.get( - 'document_class', DEFAULT_CODEC_OPTIONS.document_class), - tz_aware=options.get( - 'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware), - uuid_representation=options.get('uuidrepresentation'), - unicode_decode_error_handler=options.get( - 'unicode_decode_error_handler', - DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler), - tzinfo=options.get('tzinfo', DEFAULT_CODEC_OPTIONS.tzinfo), - type_registry=options.get( - 'type_registry', DEFAULT_CODEC_OPTIONS.type_registry)) + kwargs = {} + for k in set(options) & {'document_class', 'tz_aware', + 'uuidrepresentation', + 'unicode_decode_error_handler', 'tzinfo', + 'type_registry'}: + if k == 'uuidrepresentation': + kwargs['uuid_representation'] = options[k] + else: + kwargs[k] = options[k] + return CodecOptions(**kwargs) diff --git a/bson/json_util.py b/bson/json_util.py index fe0bfe0699..557388a515 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -222,7 +222,7 @@ class JSONOptions(CodecOptions): :class:`collections.MutableMapping`. Defaults to :class:`dict`. - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` to use when encoding and decoding instances of :class:`uuid.UUID`. - Defaults to :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY`. + Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type `Date` will be decoded to timezone aware instances of :class:`datetime.datetime`. Otherwise they will be naive. Defaults @@ -238,6 +238,9 @@ class JSONOptions(CodecOptions): .. versionchanged:: 4.0 The default for `json_mode` was changed from :const:`JSONMode.LEGACY` to :const:`JSONMode.RELAXED`. + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. .. versionchanged:: 3.5 Accepts the optional parameter `json_mode`. diff --git a/doc/changelog.rst b/doc/changelog.rst index b7a0824433..6adff2e715 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -17,6 +17,13 @@ Breaking Changes in 4.0 ....................... - Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. +- The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, + :class:`~bson.json_util.JSONOptions`, and + :class:`~pymongo.mongo_client.MongoClient` has been changed from + :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a + :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. + See :ref:`handling-uuid-data-example` for details. - Removed the ``waitQueueMultiple`` keyword argument to :class:`~pymongo.mongo_client.MongoClient` and removed :exc:`pymongo.errors.ExceededMaxWaiters`. diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst index 4d55fbeae2..d4a77d4038 100644 --- a/doc/examples/uuid.rst +++ b/doc/examples/uuid.rst @@ -147,16 +147,15 @@ Consider the following situation:: collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)})['_id'] == 'foo' - # Retrieving this document using UuidRepresentation.STANDARD returns a native UUID + # Retrieving this document using UuidRepresentation.STANDARD returns a Binary instance std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) std_collection = client.testdb.get_collection('test', codec_options=std_opts) doc = std_collection.find_one({'_id': 'foo'}) - assert doc['uuid'] == input_uuid + assert isinstance(doc['uuid'], Binary) - # Round-tripping the retrieved document silently changes the Binary subtype to 4 + # Round-tripping the retrieved document yields the exact same document std_collection.replace_one({'_id': 'foo'}, doc) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) is None - round_tripped_doc = collection.find_one({'uuid': Binary(input_uuid.bytes, 4)}) + round_tripped_doc = collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) assert doc == round_tripped_doc @@ -230,7 +229,7 @@ Applications can set the UUID representation in one of the following ways: #. At the ``MongoClient`` level using the ``uuidRepresentation`` URI option, e.g.:: - client = MongoClient("mongodb://a:27107/?uuidRepresentation=javaLegacy") + client = MongoClient("mongodb://a:27107/?uuidRepresentation=standard") Valid values are: @@ -240,6 +239,12 @@ Applications can set the UUID representation in one of the following ways: * - Value - UUID Representation + * - ``unspecified`` + - :ref:`unspecified-representation-details` + + * - ``standard`` + - :ref:`standard-representation-details` + * - ``pythonLegacy`` - :ref:`python-legacy-representation-details` @@ -249,17 +254,11 @@ Applications can set the UUID representation in one of the following ways: * - ``csharpLegacy`` - :ref:`csharp-legacy-representation-details` - * - ``standard`` - - :ref:`standard-representation-details` - - * - ``unspecified`` - - :ref:`unspecified-representation-details` - #. At the ``MongoClient`` level using the ``uuidRepresentation`` kwarg option, e.g.:: from bson.binary import UuidRepresentation - client = MongoClient(uuidRepresentation=UuidRepresentation.PYTHON_LEGACY) + client = MongoClient(uuidRepresentation=UuidRepresentation.STANDARD) #. At the ``Database`` or ``Collection`` level by supplying a suitable :class:`~bson.codec_options.CodecOptions` instance, e.g.:: @@ -288,39 +287,117 @@ Supported UUID Representations - Decode :class:`~bson.binary.Binary` subtype 4 to - Decode :class:`~bson.binary.Binary` subtype 3 to + * - :ref:`standard-representation-details` + - No + - :class:`~bson.binary.Binary` subtype 4 + - :class:`uuid.UUID` + - :class:`~bson.binary.Binary` subtype 3 + + * - :ref:`unspecified-representation-details` + - Yes, in PyMongo>=4 + - Raise :exc:`ValueError` + - :class:`~bson.binary.Binary` subtype 4 + - :class:`~bson.binary.Binary` subtype 3 + * - :ref:`python-legacy-representation-details` - - Yes, in PyMongo>=2.9,<4 + - No - :class:`~bson.binary.Binary` subtype 3 with standard byte-order - - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 4 in PyMongo>=4 + - :class:`~bson.binary.Binary` subtype 4 - :class:`uuid.UUID` * - :ref:`java-legacy-representation-details` - No - :class:`~bson.binary.Binary` subtype 3 with Java legacy byte-order - - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 4 in PyMongo>=4 + - :class:`~bson.binary.Binary` subtype 4 - :class:`uuid.UUID` * - :ref:`csharp-legacy-representation-details` - No - :class:`~bson.binary.Binary` subtype 3 with C# legacy byte-order - - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 4 in PyMongo>=4 - - :class:`uuid.UUID` - - * - :ref:`standard-representation-details` - - No - :class:`~bson.binary.Binary` subtype 4 - :class:`uuid.UUID` - - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 3 in PyMongo>=4 - - * - :ref:`unspecified-representation-details` - - Yes, in PyMongo>=4 - - Raise :exc:`ValueError` - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` in PyMongo<4; :class:`~bson.binary.Binary` subtype 3 in PyMongo>=4 We now detail the behavior and use-case for each supported UUID representation. +.. _unspecified-representation-details: + +``UNSPECIFIED`` +^^^^^^^^^^^^^^^ + +.. attention:: Starting in PyMongo 4.0, + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` is the default + UUID representation used by PyMongo. + +The :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` representation +prevents the incorrect interpretation of UUID bytes by stopping short of +automatically converting UUID fields in BSON to native UUID types. Decoding +a UUID when using this representation returns a :class:`~bson.binary.Binary` +object instead. If required, users can coerce the decoded +:class:`~bson.binary.Binary` objects into native UUIDs using the +:meth:`~bson.binary.Binary.as_uuid` method and specifying the appropriate +representation format. The following example shows +what this might look like for a UUID stored by the C# driver:: + + from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS + from bson.binary import Binary, UuidRepresentation + from uuid import uuid4 + + # Using UuidRepresentation.CSHARP_LEGACY + csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) + + # Store a legacy C#-formatted UUID + input_uuid = uuid4() + collection = client.testdb.get_collection('test', codec_options=csharp_opts) + collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) + + # Using UuidRepresentation.UNSPECIFIED + unspec_opts = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + unspec_collection = client.testdb.get_collection('test', codec_options=unspec_opts) + + # UUID fields are decoded as Binary when UuidRepresentation.UNSPECIFIED is configured + document = unspec_collection.find_one({'_id': 'foo'}) + decoded_field = document['uuid'] + assert isinstance(decoded_field, Binary) + + # Binary.as_uuid() can be used to coerce the decoded value to a native UUID + decoded_uuid = decoded_field.as_uuid(UuidRepresentation.CSHARP_LEGACY) + assert decoded_uuid == input_uuid + +Native :class:`uuid.UUID` objects cannot directly be encoded to +:class:`~bson.binary.Binary` when the UUID representation is ``UNSPECIFIED`` +and attempting to do so will result in an exception:: + + unspec_collection.insert_one({'_id': 'bar', 'uuid': uuid4()}) + Traceback (most recent call last): + ... + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information. + +Instead, applications using :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` +must explicitly coerce a native UUID using the +:meth:`~bson.binary.Binary.from_uuid` method:: + + explicit_binary = Binary.from_uuid(uuid4(), UuidRepresentation.STANDARD) + unspec_collection.insert_one({'_id': 'bar', 'uuid': explicit_binary}) + +.. _standard-representation-details: + +``STANDARD`` +^^^^^^^^^^^^ + +.. attention:: This UUID representation should be used by new applications or + applications that are encoding and/or decoding UUIDs in MongoDB for the + first time. + +The :data:`~bson.binary.UuidRepresentation.STANDARD` representation +enables cross-language compatibility by ensuring the same byte-ordering +when encoding UUIDs from all drivers. UUIDs written by a driver with this +representation configured will be handled correctly by every other provided +it is also configured with the ``STANDARD`` representation. + +``STANDARD`` encodes native :class:`uuid.UUID` objects to +:class:`~bson.binary.Binary` subtype 4 objects. + .. _python-legacy-representation-details: ``PYTHON_LEGACY`` @@ -331,7 +408,7 @@ representation. but **don't** explicitly set a UUID representation. .. attention:: :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` - has been the default uuid representation since PyMongo 2.9. + was the default uuid representation in PyMongo 3. The :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` representation corresponds to the legacy representation of UUIDs used by PyMongo. This @@ -341,7 +418,7 @@ representation conforms with The following example illustrates the use of this representation:: from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import UuidRepresentation + from bson.binary import Binary, UuidRepresentation # No configured UUID representation collection = client.python_legacy.get_collection('test', codec_options=DEFAULT_CODEC_OPTIONS) @@ -350,16 +427,12 @@ The following example illustrates the use of this representation:: pylegacy_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) pylegacy_collection = client.python_legacy.get_collection('test', codec_options=pylegacy_opts) - # UUIDs written by PyMongo with no UuidRepresentation configured can be queried using PYTHON_LEGACY + # UUIDs written by PyMongo 3 with no UuidRepresentation configured + # (or PyMongo 4.0 with PYTHON_LEGACY) can be queried using PYTHON_LEGACY uuid_1 = uuid4() - collection.insert_one({'uuid': uuid_1}) + pylegacy_collection.insert_one({'uuid': uuid_1}) document = pylegacy_collection.find_one({'uuid': uuid_1}) - # UUIDs written using PYTHON_LEGACY can be read by PyMongo with no UuidRepresentation configured - uuid_2 = uuid4() - pylegacy_collection.insert_one({'uuid': uuid_2}) - document = collection.find_one({'uuid': uuid_2}) - ``PYTHON_LEGACY`` encodes native :class:`uuid.UUID` objects to :class:`~bson.binary.Binary` subtype 3 objects, preserving the same byte-order as :attr:`~uuid.UUID.bytes`:: @@ -389,8 +462,7 @@ As an example, consider the same UUID described in :ref:`example-legacy-uuid`. Let us assume that an application used the Java driver without an explicitly specified UUID representation to insert the example UUID ``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this -value using PyMongo with no UUID representation specified, we end up with an -entirely different UUID:: +value using ``PYTHON_LEGACY``, we end up with an entirely different UUID:: UUID('77665544-3322-1100-ffee-ddccbbaa9988') @@ -424,8 +496,7 @@ As an example, consider the same UUID described in :ref:`example-legacy-uuid`. Let us assume that an application used the C# driver without an explicitly specified UUID representation to insert the example UUID ``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this -value using PyMongo with no UUID representation specified, we end up with an -entirely different UUID:: +value using PYTHON_LEGACY, we end up with an entirely different UUID:: UUID('33221100-5544-7766-8899-aabbccddeeff') @@ -438,80 +509,3 @@ PyMongo uses the specified UUID representation to reorder the BSON bytes and load them correctly. ``CSHARP_LEGACY`` encodes native :class:`uuid.UUID` objects to :class:`~bson.binary.Binary` subtype 3 objects, while performing the same byte-reordering as the legacy C# driver's UUID to BSON encoder. - -.. _standard-representation-details: - -``STANDARD`` -^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used by new applications - that have never stored UUIDs in MongoDB. - -The :data:`~bson.binary.UuidRepresentation.STANDARD` representation -enables cross-language compatibility by ensuring the same byte-ordering -when encoding UUIDs from all drivers. UUIDs written by a driver with this -representation configured will be handled correctly by every other provided -it is also configured with the ``STANDARD`` representation. - -``STANDARD`` encodes native :class:`uuid.UUID` objects to -:class:`~bson.binary.Binary` subtype 4 objects. - -.. _unspecified-representation-details: - -``UNSPECIFIED`` -^^^^^^^^^^^^^^^ - -.. attention:: Starting in PyMongo 4.0, - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` will be the default - UUID representation used by PyMongo. - -The :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` representation -prevents the incorrect interpretation of UUID bytes by stopping short of -automatically converting UUID fields in BSON to native UUID types. Loading -a UUID when using this representation returns a :class:`~bson.binary.Binary` -object instead. If required, users can coerce the decoded -:class:`~bson.binary.Binary` objects into native UUIDs using the -:meth:`~bson.binary.Binary.as_uuid` method and specifying the appropriate -representation format. The following example shows -what this might look like for a UUID stored by the C# driver:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.CSHARP_LEGACY - csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) - - # Store a legacy C#-formatted UUID - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=csharp_opts) - collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) - - # Using UuidRepresentation.UNSPECIFIED - unspec_opts = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) - unspec_collection = client.testdb.get_collection('test', codec_options=unspec_opts) - - # UUID fields are decoded as Binary when UuidRepresentation.UNSPECIFIED is configured - document = unspec_collection.find_one({'_id': 'foo'}) - decoded_field = document['uuid'] - assert isinstance(decoded_field, Binary) - - # Binary.as_uuid() can be used to coerce the decoded value to a native UUID - decoded_uuid = decoded_field.as_uuid(UuidRepresentation.CSHARP_LEGACY) - assert decoded_uuid == input_uuid - -Native :class:`uuid.UUID` objects cannot directly be encoded to -:class:`~bson.binary.Binary` when the UUID representation is ``UNSPECIFIED`` -and attempting to do so will result in an exception:: - - unspec_collection.insert_one({'_id': 'bar', 'uuid': uuid4()}) - Traceback (most recent call last): - ... - ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information. - -Instead, applications using :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` -must explicitly coerce a native UUID using the -:meth:`~bson.binary.Binary.from_uuid` method:: - - explicit_binary = Binary.from_uuid(uuid4(), UuidRepresentation.PYTHON_LEGACY) - unspec_collection.insert_one({'_id': 'bar', 'uuid': explicit_binary}) diff --git a/doc/faq.rst b/doc/faq.rst index 5454db448c..c2a6fc7f7f 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -266,7 +266,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> opts CodecOptions(document_class=, tz_aware=False, - uuid_representation=UuidRepresentation.PYTHON_LEGACY, + uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index fe10a60870..2baac8eee3 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -774,3 +774,14 @@ subdocument containing a ``$ref`` field would be decoded as a :class:`~bson.dbref.DBRef`. .. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst + +Encoding a UUID raises an error by default +.......................................... + +The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, +:class:`~bson.json_util.JSONOptions`, and +:class:`~pymongo.mongo_client.MongoClient` has been changed from +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to +:data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a +:class:`uuid.UUID` instance to BSON or JSON now produces an error by default. +See :ref:`handling-uuid-data-example` for details. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index adfb6834e4..cce5f14d04 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -325,8 +325,8 @@ def __init__( speed. 9 is best compression. Defaults to -1. - `uuidRepresentation`: The BSON representation to use when encoding from and decoding to instances of :class:`~uuid.UUID`. Valid - values are `pythonLegacy` (the default), `javaLegacy`, - `csharpLegacy`, `standard` and `unspecified`. New applications + values are `pythonLegacy`, `javaLegacy`, `csharpLegacy`, `standard` + and `unspecified` (the default). New applications should consider setting this to `standard` for cross language compatibility. See :ref:`handling-uuid-data-example` for details. - `unicode_decode_error_handler`: The error handler to apply when @@ -501,6 +501,8 @@ def __init__( .. versionchanged:: 4.0 Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` keyword arguments. + The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. diff --git a/test/test_binary.py b/test/test_binary.py index 440c04667a..e6b681fc51 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -145,15 +145,18 @@ def test_hash(self): self.assertEqual(hash(Binary(b"hello world", 42)), hash(two)) def test_uuid_subtype_4(self): - """uuid_representation should be ignored when decoding subtype 4 for - all UuidRepresentation values except UNSPECIFIED.""" + """Only STANDARD should decode subtype 4 as native uuid.""" expected_uuid = uuid.uuid4() - doc = {"uuid": Binary(expected_uuid.bytes, 4)} + expected_bin = Binary(expected_uuid.bytes, 4) + doc = {"uuid": expected_bin} encoded = encode(doc) - for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - - {UuidRepresentation.UNSPECIFIED}): - options = CodecOptions(uuid_representation=uuid_representation) - self.assertEqual(expected_uuid, decode(encoded, options)["uuid"]) + for uuid_rep in (UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY): + opts = CodecOptions(uuid_representation=uuid_rep) + self.assertEqual(expected_bin, decode(encoded, opts)["uuid"]) + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + self.assertEqual(expected_uuid, decode(encoded, opts)["uuid"]) def test_legacy_java_uuid(self): # Test decoding @@ -522,29 +525,25 @@ def _test_decoding(self, client_uuid_representation_string, # Implicit decoding prose test #1 def test_decoding_1(self): - # TODO: these assertions will change after PYTHON-2245. Specifically, - # the 'standard' field will be decoded as a Binary subtype 4. - binary_value = Binary.from_uuid( - self.uuid, UuidRepresentation.PYTHON_LEGACY) + standard_binary = Binary.from_uuid( + self.uuid, UuidRepresentation.STANDARD) self._test_decoding( "javaLegacy", UuidRepresentation.JAVA_LEGACY, - self.uuid, self.uuid) + standard_binary, self.uuid) self._test_decoding( "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, - self.uuid, self.uuid) + standard_binary, self.uuid) self._test_decoding( "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, - self.uuid, self.uuid) + standard_binary, self.uuid) # Implicit decoding pose test #2 def test_decoding_2(self): - # TODO: these assertions will change after PYTHON-2245. Specifically, - # the 'legacy' field will be decoded as a Binary subtype 3. - binary_value = Binary.from_uuid( + legacy_binary = Binary.from_uuid( self.uuid, UuidRepresentation.PYTHON_LEGACY) self._test_decoding( "standard", UuidRepresentation.PYTHON_LEGACY, - self.uuid, binary_value.as_uuid(UuidRepresentation.PYTHON_LEGACY)) + self.uuid, legacy_binary) # Implicit decoding pose test #3 def test_decoding_3(self): diff --git a/test/test_bson.py b/test/test_bson.py index 89f4a11176..5c0f163bb8 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -638,8 +638,13 @@ def test_tuple(self): def test_uuid(self): id = uuid.uuid4() - transformed_id = decode(encode({"id": id}))["id"] + # The default uuid_representation is UNSPECIFIED + with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): + bson.decode_all(encode({'uuid': id})) + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + transformed_id = decode(encode({"id": id}, codec_options=opts), + codec_options=opts)["id"] self.assertTrue(isinstance(transformed_id, uuid.UUID)) self.assertEqual(id, transformed_id) self.assertNotEqual(uuid.uuid4(), transformed_id) @@ -648,8 +653,9 @@ def test_uuid_legacy(self): id = uuid.uuid4() legacy = Binary.from_uuid(id, UuidRepresentation.PYTHON_LEGACY) self.assertEqual(3, legacy.subtype) - transformed = decode(encode({"uuid": legacy}))["uuid"] - self.assertTrue(isinstance(transformed, uuid.UUID)) + bin = decode(encode({"uuid": legacy}))["uuid"] + self.assertTrue(isinstance(bin, Binary)) + transformed = bin.as_uuid(UuidRepresentation.PYTHON_LEGACY) self.assertEqual(id, transformed) # The C extension was segfaulting on unicode RegExs, so we have this test @@ -965,7 +971,7 @@ def test_tzinfo(self): def test_codec_options_repr(self): r = ("CodecOptions(document_class=dict, tz_aware=False, " - "uuid_representation=UuidRepresentation.PYTHON_LEGACY, " + "uuid_representation=UuidRepresentation.UNSPECIFIED, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " "fallback_encoder=None))") @@ -973,17 +979,16 @@ def test_codec_options_repr(self): def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is - # False. The default uuid_representation is PYTHON_LEGACY but this - # decodes same as STANDARD, so all this test proves about UUID decoding - # is that it's not CSHARP_LEGACY or JAVA_LEGACY. + # False. doc = {'sub_document': {}, - 'uuid': uuid.uuid4(), 'dt': datetime.datetime.utcnow()} decoded = bson.decode_all(bson.encode(doc))[0] self.assertIsInstance(decoded['sub_document'], dict) - self.assertEqual(decoded['uuid'], doc['uuid']) self.assertIsNone(decoded['dt'].tzinfo) + # The default uuid_representation is UNSPECIFIED + with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): + bson.decode_all(bson.encode({'uuid': uuid.uuid4()})) def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index e891e1403a..a49f6972b2 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -896,7 +896,8 @@ def test_simple(self): with self.change_stream() as change_stream: for collname in collnames: self._insert_and_check( - change_stream, collname, {'_id': uuid.uuid4()}) + change_stream, collname, + {'_id': Binary.from_uuid(uuid.uuid4())}) def test_isolation(self): # Ensure inserts to other dbs don't show up in our ChangeStream. @@ -905,9 +906,11 @@ def test_isolation(self): other_db, self.db, msg="Isolation must be tested on separate DBs") collname = self.id() with self.change_stream() as change_stream: - other_db[collname].insert_one({'_id': uuid.uuid4()}) + other_db[collname].insert_one( + {'_id': Binary.from_uuid(uuid.uuid4())}) self._insert_and_check( - change_stream, collname, {'_id': uuid.uuid4()}) + change_stream, collname, + {'_id': Binary.from_uuid(uuid.uuid4())}) self.client.drop_database(other_db) diff --git a/test/test_common.py b/test/test_common.py index 87b5ce4c9d..dcd618c509 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -19,7 +19,7 @@ sys.path[0:0] = [""] -from bson.binary import Binary, PYTHON_LEGACY, STANDARD +from bson.binary import Binary, PYTHON_LEGACY, STANDARD, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.errors import OperationFailure @@ -40,12 +40,15 @@ def test_uuid_representation(self): coll.drop() # Test property - self.assertEqual(PYTHON_LEGACY, + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) # Test basic query uu = uuid.uuid4() # Insert as binary subtype 3 + coll = self.db.get_collection( + "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + legacy_opts = coll.codec_options coll.insert_one({'uu': uu}) self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) coll = self.db.get_collection( @@ -53,7 +56,7 @@ def test_uuid_representation(self): self.assertEqual(STANDARD, coll.codec_options.uuid_representation) self.assertEqual(None, coll.find_one({'uu': uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uu, coll.find_one({'uu': uul})['uu']) + self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) # Test count_documents self.assertEqual(0, coll.count_documents({'uu': uu})) @@ -98,9 +101,10 @@ def test_uuid_representation(self): self.assertEqual(5, coll.find_one({'_id': uu})['i']) # Test command - self.assertEqual(5, self.db.command('findAndModify', 'uuid', - update={'$set': {'i': 6}}, - query={'_id': uu})['value']['i']) + self.assertEqual(5, self.db.command( + 'findAndModify', 'uuid', + update={'$set': {'i': 6}}, + query={'_id': uu}, codec_options=legacy_opts)['value']['i']) self.assertEqual(6, self.db.command( 'findAndModify', 'uuid', update={'$set': {'i': 7}}, diff --git a/test/test_json_util.py b/test/test_json_util.py index c20c793d3a..791e3de18f 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -298,9 +298,22 @@ def test_timestamp(self): self.assertEqual(dct, rtdct) self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res) + def test_uuid_default(self): + # Cannot directly encode native UUIDs with the default + # uuid_representation. + doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} + with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): + json_util.dumps(doc) + legacy_jsn = '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}' + expected = {'uuid': Binary( + b'\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y', 4)} + self.assertEqual(json_util.loads(legacy_jsn), expected) + def test_uuid(self): doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} - self.round_trip(doc) + uuid_legacy_opts = LEGACY_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY) + self.round_trip(doc, json_options=uuid_legacy_opts) self.assertEqual( '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}', json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS)) @@ -308,7 +321,8 @@ def test_uuid(self): '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( - doc, json_options=STRICT_JSON_OPTIONS)) + doc, json_options=STRICT_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY))) self.assertEqual( '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', @@ -319,7 +333,8 @@ def test_uuid(self): self.assertEqual( doc, json_util.loads( '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}')) + '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + json_options=uuid_legacy_opts)) for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}): options = JSONOptions( diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index 229382f3d1..7fb53c6da9 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -19,7 +19,7 @@ sys.path[0:0] = [""] from bson import decode, encode -from bson.binary import Binary, JAVA_LEGACY +from bson.binary import Binary, JAVA_LEGACY, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import InvalidBSON from bson.raw_bson import RawBSONDocument, DEFAULT_RAW_BSON_OPTIONS @@ -92,7 +92,12 @@ def test_round_trip_raw_uuid(self): 'bin3': Binary(uid.bytes, 3)} raw = RawBSONDocument(encode(doc)) coll.insert_one(raw) - self.assertEqual(coll.find_one(), {'_id': 1, 'bin4': uid, 'bin3': uid}) + self.assertEqual(coll.find_one(), doc) + uuid_coll = coll.with_options( + codec_options=coll.codec_options.with_options( + uuid_representation=UuidRepresentation.STANDARD)) + self.assertEqual(uuid_coll.find_one(), + {'_id': 1, 'bin4': uid, 'bin3': Binary(uid.bytes, 3)}) # Test that the raw bytes haven't changed. raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) @@ -185,3 +190,7 @@ def test_preserve_key_ordering(self): for rkey, elt in zip(rawdoc, keyvaluepairs): self.assertEqual(rkey, elt[0]) + + +if __name__ == "__main__": + unittest.main() From e3771587c37d5ebe715c907c8b7115dc83196601 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 14 Sep 2021 16:54:11 -0700 Subject: [PATCH 0464/2111] PYTHON-1949 CodecOptions and JSONOptions should have the same default value for tz_aware (#720) --- bson/json_util.py | 7 +-- doc/changelog.rst | 4 ++ doc/migrate-to-pymongo4.rst | 8 ++++ test/test_bson_corpus.py | 3 +- test/test_json_util.py | 87 +++++++++++++++---------------------- 5 files changed, 54 insertions(+), 55 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index 557388a515..cc6dfbace4 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -226,7 +226,7 @@ class JSONOptions(CodecOptions): - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type `Date` will be decoded to timezone aware instances of :class:`datetime.datetime`. Otherwise they will be naive. Defaults - to ``True``. + to ``False``. - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the timezone from which :class:`~datetime.datetime` objects should be decoded. Defaults to :const:`~bson.tz_util.utc`. @@ -245,14 +245,15 @@ class JSONOptions(CodecOptions): .. versionchanged:: 3.5 Accepts the optional parameter `json_mode`. - .. versionadded:: 3.4 + .. versionchanged:: 4.0 + Changed default value of `tz_aware` to False. """ def __new__(cls, strict_number_long=None, datetime_representation=None, strict_uuid=None, json_mode=JSONMode.RELAXED, *args, **kwargs): - kwargs["tz_aware"] = kwargs.get("tz_aware", True) + kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) if datetime_representation not in (DatetimeRepresentation.LEGACY, diff --git a/doc/changelog.rst b/doc/changelog.rst index 6adff2e715..ea8cd19bad 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -123,6 +123,9 @@ Breaking Changes in 4.0 :class:`~bson.dbref.DBRef`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. +- ``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, + now defaults to ``False`` instead of ``True``. ``json_util.loads`` now +decodes datetime as naive by default. - ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, allowing for the automatic discovery of replica sets. This means that if you @@ -132,6 +135,7 @@ Breaking Changes in 4.0 with :meth:`~pymongo.collection.Collection.find`. - ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. + d Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 2baac8eee3..8300a09914 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -183,6 +183,14 @@ can be changed to this:: names = client.list_database_names() +``tz_aware`` defaults to ``False`` +.................................. + +``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, +now defaults to ``False`` instead of ``True``. ``json_util.loads`` now +decodes datetime as naive by default. + + Database -------- diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 7893395c60..cbb702e405 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -74,6 +74,7 @@ # Need to set tz_aware=True in order to use "strict" dates in extended JSON. codec_options = CodecOptions(tz_aware=True, document_class=SON) +codec_options_no_tzaware = CodecOptions(document_class=SON) # We normally encode UUID as binary subtype 0x03, # but we'll need to encode to subtype 0x04 for one of the tests. codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) @@ -93,7 +94,7 @@ to_bson_uuid_04 = functools.partial(encode, codec_options=codec_options_uuid_04) to_bson = functools.partial(encode, codec_options=codec_options) -decode_bson = lambda bbytes: decode(bbytes, codec_options=codec_options) +decode_bson = functools.partial(decode, codec_options=codec_options_no_tzaware) decode_extjson = functools.partial( json_util.loads, json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, diff --git a/test/test_json_util.py b/test/test_json_util.py index 791e3de18f..f28b75c9be 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] -from bson import json_util, EPOCH_AWARE, SON +from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON from bson.json_util import (DatetimeRepresentation, JSONMode, JSONOptions, @@ -103,61 +103,45 @@ def test_dbref(self): json_util.dumps(DBRef('collection', 1, 'db'))) def test_datetime(self): + tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options( + tz_aware=True) # only millis, not micros + self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, + 191000, utc)}, json_options=tz_aware_opts) self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, - 49, 45, 191000, utc)}) - - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - # No explicit offset - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - # Localtime behind UTC - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - # Localtime ahead of UTC - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) - jsn = '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}' - self.assertEqual(EPOCH_AWARE, json_util.loads(jsn)["dt"]) + 49, 45, 191000)}) + + for jsn in ['{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', + '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}' + ]: + self.assertEqual(EPOCH_AWARE, json_util.loads( + jsn, json_options=tz_aware_opts)["dt"]) + self.assertEqual(EPOCH_NAIVE, json_util.loads(jsn)["dt"]) dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc) jsn = '{"dt": {"$date": -62135593139000}}' - self.assertEqual(dtm, json_util.loads(jsn)["dt"]) + self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}' - self.assertEqual(dtm, json_util.loads(jsn)["dt"]) + self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) # Test dumps format pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)} @@ -207,7 +191,8 @@ def test_datetime(self): self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( - '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}')["dt"]) + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', + json_options=tz_aware_opts)["dt"]) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( From 0fa86c8ee12faeca1777eaad5e00a1d00319202d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Sep 2021 12:04:34 -0700 Subject: [PATCH 0465/2111] Removed directConnection workaround in SRV spec tests (#727) --- test/test_dns.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/test_dns.py b/test/test_dns.py index fd6431c586..91f8750d8b 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -105,12 +105,6 @@ def run_test(self): # tests. copts['tlsAllowInvalidHostnames'] = True - # The SRV spec tests assume drivers auto discover replica set - # members. This should be removed during PYTHON-2679. - if not self.load_balanced and ( - 'directconnection' not in result['options']): - copts['directConnection'] = False - client = MongoClient(uri, **copts) wait_until( lambda: hosts == client.nodes, From 4e086ba2186fa9655d11614ab348d620666c8643 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Sep 2021 12:05:09 -0700 Subject: [PATCH 0466/2111] PYTHON-1915 Prohibit copying ClientSession objects (#726) --- doc/contributors.rst | 3 ++- pymongo/client_session.py | 3 +++ test/test_session.py | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index c99c4b80e2..22cbee3215 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -89,4 +89,5 @@ The following is a list of people who have contributed to - Paul Fisher (thetorpedodog) - Julius Park (juliusgeo) - Khanh Nguyen (KN99HN) -- Henri Froese (henrifroese) \ No newline at end of file +- Henri Froese (henrifroese) +- Ishmum Jawad Khan (ishmum123) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 7d9ce712f0..f8071e5f2b 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -953,6 +953,9 @@ def _update_read_concern(self, cmd, sock_info): if self._snapshot_time is not None: rc['atClusterTime'] = self._snapshot_time + def __copy__(self): + raise TypeError('A ClientSession cannot be copied, create a new session instead') + class _ServerSession(object): def __init__(self, generation): diff --git a/test/test_session.py b/test/test_session.py index fc1f8382fe..608024e0b1 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -716,6 +716,10 @@ def test_snapshot_incompatible_with_causal_consistency(self): snapshot=True): pass + def test_session_not_copyable(self): + client = self.client + with client.start_session() as s: + self.assertRaises(TypeError, lambda: copy.copy(s)) class TestCausalConsistency(unittest.TestCase): From 23fe13fcbafa3b331c08531151246dc18695ccd4 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Sep 2021 12:56:18 -0700 Subject: [PATCH 0467/2111] PYTHON-1965 The bson package should not depend on the pymongo package (#725) --- bson/json_util.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index cc6dfbace4..0644874b44 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -93,8 +93,6 @@ import re import uuid -from pymongo.errors import ConfigurationError - import bson from bson import EPOCH_AWARE, RE_TYPE, SON from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, @@ -260,45 +258,45 @@ def __new__(cls, strict_number_long=None, DatetimeRepresentation.NUMBERLONG, DatetimeRepresentation.ISO8601, None): - raise ConfigurationError( + raise ValueError( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") self = super(JSONOptions, cls).__new__(cls, *args, **kwargs) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): - raise ConfigurationError( + raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " "or CANONICAL from JSONMode.") self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: if strict_number_long: - raise ConfigurationError( + raise ValueError( "Cannot specify strict_number_long=True with" " JSONMode.RELAXED") if datetime_representation not in (None, DatetimeRepresentation.ISO8601): - raise ConfigurationError( + raise ValueError( "datetime_representation must be DatetimeRepresentation." "ISO8601 or omitted with JSONMode.RELAXED") if strict_uuid not in (None, True): - raise ConfigurationError( + raise ValueError( "Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = False self.datetime_representation = DatetimeRepresentation.ISO8601 self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: if strict_number_long not in (None, True): - raise ConfigurationError( + raise ValueError( "Cannot specify strict_number_long=False with" " JSONMode.RELAXED") if datetime_representation not in ( None, DatetimeRepresentation.NUMBERLONG): - raise ConfigurationError( + raise ValueError( "datetime_representation must be DatetimeRepresentation." "NUMBERLONG or omitted with JSONMode.RELAXED") if strict_uuid not in (None, True): - raise ConfigurationError( + raise ValueError( "Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = True self.datetime_representation = DatetimeRepresentation.NUMBERLONG From fbd5599deb5433e08b43b2a9543fb1e3b0349129 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 16 Sep 2021 15:21:40 -0700 Subject: [PATCH 0468/2111] PYTHON-2518 SON class should be compatible with Python 3's OrderedDict API (#730) --- bson/son.py | 3 --- test/test_son.py | 18 +++++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/bson/son.py b/bson/son.py index c6baaa98c4..a726961293 100644 --- a/bson/son.py +++ b/bson/son.py @@ -62,9 +62,6 @@ def __delitem__(self, key): self.__keys.remove(key) dict.__delitem__(self, key) - def keys(self): - return list(self.__keys) - def copy(self): other = SON() other.update(self) diff --git a/test/test_son.py b/test/test_son.py index a8ac49060a..edddd6b8b8 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -23,7 +23,7 @@ from bson.son import SON from test import unittest - +from collections import OrderedDict class TestSON(unittest.TestCase): def test_ordered_dict(self): @@ -189,5 +189,21 @@ def test_len(self): test_son.popitem() self.assertEqual(2, len(test_son)) + def test_keys(self): + # Test to make sure that set operations do not throw an error + d = SON().keys() + for i in [OrderedDict, dict]: + try: + d - i().keys() + except TypeError: + self.fail("SON().keys() is not returning an object compatible " + "with %s objects" % (str(i))) + # Test to verify correctness + d = SON({"k": "v"}).keys() + for i in [OrderedDict, dict]: + self.assertEqual(d | i({"k1": 0}).keys(), {"k", "k1"}) + for i in [OrderedDict, dict]: + self.assertEqual(d - i({"k": 0}).keys(), set()) + if __name__ == "__main__": unittest.main() From 88e744d5065bc256a086ebf13506fe1c65179dc1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 16 Sep 2021 15:52:14 -0700 Subject: [PATCH 0469/2111] PYTHON-808 Prevent use of Database and Collection in boolean expressions (#728) --- doc/changelog.rst | 7 ++++++- doc/migrate-to-pymongo4.rst | 26 ++++++++++++++++++++++++++ pymongo/collection.py | 5 +++++ pymongo/database.py | 5 +++++ test/test_collection.py | 4 ++++ test/test_database.py | 4 ++++ 6 files changed, 50 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ea8cd19bad..8adb9ae7fc 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -134,8 +134,13 @@ decodes datetime as naive by default. - The ``hint`` option is now required when using ``min`` or ``max`` queries with :meth:`~pymongo.collection.Collection.find`. - ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. +- :class:`~pymongo.collection.Collection` and :class:`~pymongo.database.Database` + now raises an error upon evaluating as a Boolean, please use the + syntax ``if collection is not None:`` or ``if database is not None:`` as + opposed to + the previous syntax which was simply ``if collection:`` or ``if database:``. + You must now explicitly compare with None. - d Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 8300a09914..d17c13e753 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -321,6 +321,19 @@ Can be changed to this:: .. _'system.profile' collection: https://docs.mongodb.com/manual/reference/database-profiler/ +Database.__bool__ raises NotImplementedError +............................................ +:class:`~pymongo.database.Database` now raises an error upon evaluating as a +Boolean. Code like this:: + + if database: + +Can be changed to this:: + + if database is not None: + +You must now explicitly compare with None. + Collection ---------- @@ -621,6 +634,19 @@ can be changed to this:: cursor = coll.find({}, min={'x', min_value}, hint=[('x', ASCENDING)]) +Collection.__bool__ raises NotImplementedError +.............................................. +:class:`~pymongo.collection.Collection` now raises an error upon evaluating +as a Boolean. Code like this:: + + if collection: + +Can be changed to this:: + + if collection is not None: + +You must now explicitly compare with None. + SONManipulator is removed ------------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index 7994c4d8ab..a5d2fa5c3e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -289,6 +289,11 @@ def __ne__(self, other): def __hash__(self): return hash((self.__database, self.__name)) + def __bool__(self): + raise NotImplementedError("Collection objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None") + @property def full_name(self): """The full name of this :class:`Collection`. diff --git a/pymongo/database.py b/pymongo/database.py index df8d730fb2..35a878322d 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -840,6 +840,11 @@ def __next__(self): next = __next__ + def __bool__(self): + raise NotImplementedError("Database objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None") + def dereference(self, dbref, session=None, **kwargs): """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. diff --git a/test/test_collection.py b/test/test_collection.py index 2795b47425..26c616a4dd 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2265,6 +2265,10 @@ def test_find_command_generation(self): ('$dumb', 2), ('filter', {'foo': 1})]).to_dict()) + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Collection(self.db, 'test')) + if __name__ == "__main__": unittest.main() diff --git a/test/test_database.py b/test/test_database.py index 37bda18b75..4813f8d100 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -687,6 +687,10 @@ def test_database_aggregation_unsupported(self): with self.admin.aggregate(self.pipeline) as _: pass + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Database(self.client, "test")) + if __name__ == "__main__": unittest.main() From 11752ed594f8d0fd3dac342b01b6c2e1a594e458 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Sep 2021 16:53:50 -0700 Subject: [PATCH 0470/2111] PYTHON-2899 Remove code for MongoDB <= 3.4 (#729) Remove unneeded memoryview to bytes conversion. --- pymongo/_cmessagemodule.c | 704 ------------------------- pymongo/aggregation.py | 19 +- pymongo/auth.py | 8 +- pymongo/bulk.py | 89 +--- pymongo/collection.py | 158 +----- pymongo/database.py | 54 +- pymongo/helpers.py | 35 -- pymongo/message.py | 460 ++-------------- pymongo/mongo_client.py | 54 +- pymongo/pool.py | 36 +- test/__init__.py | 19 +- test/test_auth.py | 9 - test/test_bulk.py | 50 +- test/test_client.py | 25 +- test/test_collation.py | 24 - test/test_collection.py | 185 ++----- test/test_command_monitoring_legacy.py | 11 - test/test_cursor.py | 45 +- test/test_custom_types.py | 1 - test/test_database.py | 28 +- test/test_decimal128.py | 4 - test/test_encryption.py | 1 - test/test_examples.py | 50 +- test/test_max_staleness.py | 7 - test/test_monitoring.py | 24 - test/test_read_concern.py | 13 +- test/test_read_preferences.py | 3 - test/test_read_write_concern_spec.py | 12 +- test/test_retryable_writes.py | 15 +- test/test_session.py | 8 - test/test_ssl.py | 29 +- test/test_topology.py | 5 +- 32 files changed, 224 insertions(+), 1961 deletions(-) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 70df158abf..845c14bd54 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -62,302 +62,6 @@ static int buffer_write_bytes_ssize_t(buffer_t buffer, const char* data, Py_ssiz return buffer_write_bytes(buffer, data, downsize); } -/* add a lastError message on the end of the buffer. - * returns 0 on failure */ -static int add_last_error(PyObject* self, buffer_t buffer, - int request_id, char* ns, Py_ssize_t nslen, - codec_options_t* options, PyObject* args) { - struct module_state *state = GETSTATE(self); - - int message_start; - int document_start; - int message_length; - int document_length; - PyObject* key = NULL; - PyObject* value = NULL; - Py_ssize_t pos = 0; - PyObject* one; - char *p = strchr(ns, '.'); - /* Length of the database portion of ns. */ - nslen = p ? (int)(p - ns) : nslen; - - message_start = buffer_save_space(buffer, 4); - if (message_start == -1) { - return 0; - } - if (!buffer_write_int32(buffer, (int32_t)request_id) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00" /* opcode */ - "\x00\x00\x00\x00", /* options */ - 12) || - !buffer_write_bytes_ssize_t(buffer, ns, nslen) || /* database */ - !buffer_write_bytes(buffer, - ".$cmd\x00" /* collection name */ - "\x00\x00\x00\x00" /* skip */ - "\xFF\xFF\xFF\xFF", /* limit (-1) */ - 14)) { - return 0; - } - - /* save space for length */ - document_start = buffer_save_space(buffer, 4); - if (document_start == -1) { - return 0; - } - - /* getlasterror: 1 */ - if (!(one = PyLong_FromLong(1))) - return 0; - - if (!write_pair(state->_cbson, buffer, "getlasterror", 12, one, 0, - options, 1)) { - Py_DECREF(one); - return 0; - } - Py_DECREF(one); - - /* getlasterror options */ - while (PyDict_Next(args, &pos, &key, &value)) { - if (!decode_and_write_pair(state->_cbson, buffer, key, value, 0, - options, 0)) { - return 0; - } - } - - /* EOD */ - if (!buffer_write_bytes(buffer, "\x00", 1)) { - return 0; - } - - message_length = buffer_get_position(buffer) - message_start; - document_length = buffer_get_position(buffer) - document_start; - buffer_write_int32_at_position( - buffer, message_start, (int32_t)message_length); - buffer_write_int32_at_position( - buffer, document_start, (int32_t)document_length); - return 1; -} - -static int init_insert_buffer(buffer_t buffer, int request_id, int options, - const char* coll_name, Py_ssize_t coll_name_len, - int compress) { - int length_location = 0; - if (!compress) { - /* Save space for message length */ - int length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - return length_location; - } - if (!buffer_write_int32(buffer, (int32_t)request_id) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd2\x07\x00\x00", - 8)) { - return -1; - } - } - if (!buffer_write_int32(buffer, (int32_t)options) || - !buffer_write_bytes_ssize_t(buffer, - coll_name, - coll_name_len + 1)) { - return -1; - } - return length_location; -} - -static PyObject* _cbson_insert_message(PyObject* self, PyObject* args) { - /* Used by the Bulk API to insert into pre-2.6 servers. Collection.insert - * uses _cbson_do_batched_insert. */ - struct module_state *state = GETSTATE(self); - - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - char* collection_name = NULL; - Py_ssize_t collection_name_length; - PyObject* docs; - PyObject* doc; - PyObject* iterator; - int before, cur_size, max_size = 0; - int flags = 0; - unsigned char check_keys; - unsigned char continue_on_error; - codec_options_t options; - buffer_t buffer = NULL; - int length_location, message_length; - PyObject* result = NULL; - - if (!PyArg_ParseTuple(args, "et#ObbO&", - "utf-8", - &collection_name, - &collection_name_length, - &docs, &check_keys, - &continue_on_error, - convert_codec_options, &options)) { - return NULL; - } - if (continue_on_error) { - flags += 1; - } - buffer = buffer_new(); - if (!buffer) { - goto fail; - } - - length_location = init_insert_buffer(buffer, - request_id, - flags, - collection_name, - collection_name_length, - 0); - if (length_location == -1) { - goto fail; - } - - iterator = PyObject_GetIter(docs); - if (iterator == NULL) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "input is not iterable"); - Py_DECREF(InvalidOperation); - } - goto fail; - } - while ((doc = PyIter_Next(iterator)) != NULL) { - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { - Py_DECREF(doc); - Py_DECREF(iterator); - goto fail; - } - Py_DECREF(doc); - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - } - Py_DECREF(iterator); - - if (PyErr_Occurred()) { - goto fail; - } - - if (!max_size) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - } - goto fail; - } - - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - - /* objectify buffer */ - result = Py_BuildValue("iy#i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - max_size); -fail: - PyMem_Free(collection_name); - destroy_codec_options(&options); - if (buffer) { - buffer_free(buffer); - } - return result; -} - -static PyObject* _cbson_update_message(PyObject* self, PyObject* args) { - /* NOTE just using a random number as the request_id */ - struct module_state *state = GETSTATE(self); - - int request_id = rand(); - char* collection_name = NULL; - Py_ssize_t collection_name_length; - int before, cur_size, max_size = 0; - PyObject* doc; - PyObject* spec; - unsigned char multi; - unsigned char upsert; - unsigned char check_keys; - codec_options_t options; - int flags; - buffer_t buffer = NULL; - int length_location, message_length; - PyObject* result = NULL; - - if (!PyArg_ParseTuple(args, "et#bbOObO&", - "utf-8", - &collection_name, - &collection_name_length, - &upsert, &multi, &spec, &doc, &check_keys, - convert_codec_options, &options)) { - return NULL; - } - - flags = 0; - if (upsert) { - flags += 1; - } - if (multi) { - flags += 2; - } - buffer = buffer_new(); - if (!buffer) { - goto fail; - } - - // save space for message length - length_location = buffer_save_space(buffer, 4); - if (length_location == -1) { - goto fail; - } - if (!buffer_write_int32(buffer, (int32_t)request_id) || - !buffer_write_bytes(buffer, - "\x00\x00\x00\x00" - "\xd1\x07\x00\x00" - "\x00\x00\x00\x00", - 12) || - !buffer_write_bytes_ssize_t(buffer, - collection_name, - collection_name_length + 1) || - !buffer_write_int32(buffer, (int32_t)flags)) { - goto fail; - } - - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, spec, 0, &options, 1)) { - goto fail; - } - max_size = buffer_get_position(buffer) - before; - - before = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { - goto fail; - } - cur_size = buffer_get_position(buffer) - before; - max_size = (cur_size > max_size) ? cur_size : max_size; - - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - - /* objectify buffer */ - result = Py_BuildValue("iy#i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - max_size); -fail: - PyMem_Free(collection_name); - destroy_codec_options(&options); - if (buffer) { - buffer_free(buffer); - } - return result; -} - static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ struct module_state *state = GETSTATE(self); @@ -688,333 +392,6 @@ _set_document_too_large(int size, long max) { } } -static PyObject* -_send_insert(PyObject* self, PyObject* ctx, - PyObject* gle_args, buffer_t buffer, - char* coll_name, Py_ssize_t coll_len, int request_id, int safe, - codec_options_t* options, PyObject* to_publish, int compress) { - - if (safe) { - if (!add_last_error(self, buffer, request_id, - coll_name, coll_len, options, gle_args)) { - return NULL; - } - } - - /* The max_doc_size parameter for legacy_bulk_insert is the max size of - * any document in buffer. We enforced max size already, pass 0 here. */ - return PyObject_CallMethod(ctx, "legacy_bulk_insert", - "iy#iNOi", - request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - 0, - PyBool_FromLong((long)safe), - to_publish, compress); -} - -static PyObject* _cbson_do_batched_insert(PyObject* self, PyObject* args) { - struct module_state *state = GETSTATE(self); - - /* NOTE just using a random number as the request_id */ - int request_id = rand(); - int send_safe, flags = 0; - int length_location, message_length; - Py_ssize_t collection_name_length; - int compress; - char* collection_name = NULL; - PyObject* docs; - PyObject* doc; - PyObject* iterator; - PyObject* ctx; - PyObject* last_error_args; - PyObject* result; - PyObject* max_bson_size_obj; - PyObject* max_message_size_obj; - PyObject* compress_obj; - PyObject* to_publish = NULL; - unsigned char check_keys; - unsigned char safe; - unsigned char continue_on_error; - codec_options_t options; - unsigned char empty = 1; - long max_bson_size; - long max_message_size; - buffer_t buffer; - PyObject *exc_type = NULL, *exc_value = NULL, *exc_trace = NULL; - - if (!PyArg_ParseTuple(args, "et#ObbObO&O", - "utf-8", - &collection_name, - &collection_name_length, - &docs, &check_keys, &safe, - &last_error_args, - &continue_on_error, - convert_codec_options, &options, - &ctx)) { - return NULL; - } - if (continue_on_error) { - flags += 1; - } - /* - * If we are doing unacknowledged writes *and* continue_on_error - * is True it's pointless (and slower) to send GLE. - */ - send_safe = (safe || !continue_on_error); - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); - max_bson_size = PyLong_AsLong(max_bson_size_obj); - Py_XDECREF(max_bson_size_obj); - if (max_bson_size == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); - max_message_size = PyLong_AsLong(max_message_size_obj); - Py_XDECREF(max_message_size_obj); - if (max_message_size == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - compress_obj = PyObject_GetAttrString(ctx, "compress"); - compress = PyObject_IsTrue(compress_obj); - Py_XDECREF(compress_obj); - if (compress == -1) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - compress = compress && !(safe || send_safe); - - buffer = buffer_new(); - if (!buffer) { - destroy_codec_options(&options); - PyMem_Free(collection_name); - return NULL; - } - - length_location = init_insert_buffer(buffer, - request_id, - flags, - collection_name, - collection_name_length, - compress); - if (length_location == -1) { - goto insertfail; - } - - if (!(to_publish = PyList_New(0))) { - goto insertfail; - } - - iterator = PyObject_GetIter(docs); - if (iterator == NULL) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "input is not iterable"); - Py_DECREF(InvalidOperation); - } - goto insertfail; - } - while ((doc = PyIter_Next(iterator)) != NULL) { - int before = buffer_get_position(buffer); - int cur_size; - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { - goto iterfail; - } - - cur_size = buffer_get_position(buffer) - before; - if (cur_size > max_bson_size) { - /* If we've encoded anything send it before raising. */ - if (!empty) { - buffer_update_position(buffer, before); - if (!compress) { - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - } - result = _send_insert(self, ctx, last_error_args, buffer, - collection_name, collection_name_length, - request_id, send_safe, &options, - to_publish, compress); - if (!result) - goto iterfail; - Py_DECREF(result); - } - _set_document_too_large(cur_size, max_bson_size); - goto iterfail; - } - empty = 0; - - /* We have enough data, send this batch. */ - if (buffer_get_position(buffer) > max_message_size) { - int new_request_id = rand(); - int message_start; - buffer_t new_buffer = buffer_new(); - if (!new_buffer) { - goto iterfail; - } - message_start = init_insert_buffer(new_buffer, - new_request_id, - flags, - collection_name, - collection_name_length, - compress); - if (message_start == -1) { - buffer_free(new_buffer); - goto iterfail; - } - - /* Copy the overflow encoded document into the new buffer. */ - if (!buffer_write_bytes(new_buffer, - (const char*)buffer_get_buffer(buffer) + before, cur_size)) { - buffer_free(new_buffer); - goto iterfail; - } - - /* Roll back to the beginning of this document. */ - buffer_update_position(buffer, before); - if (!compress) { - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - } - - result = _send_insert(self, ctx, last_error_args, buffer, - collection_name, collection_name_length, - request_id, send_safe, &options, to_publish, - compress); - - buffer_free(buffer); - buffer = new_buffer; - request_id = new_request_id; - length_location = message_start; - - Py_DECREF(to_publish); - if (!(to_publish = PyList_New(0))) { - goto insertfail; - } - - if (!result) { - PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; - PyObject* OperationFailure; - PyErr_Fetch(&etype, &evalue, &etrace); - OperationFailure = _error("OperationFailure"); - if (OperationFailure) { - if (PyErr_GivenExceptionMatches(etype, OperationFailure)) { - if (!safe || continue_on_error) { - Py_DECREF(OperationFailure); - if (!safe) { - /* We're doing unacknowledged writes and - * continue_on_error is False. Just return. */ - Py_DECREF(etype); - Py_XDECREF(evalue); - Py_XDECREF(etrace); - Py_DECREF(to_publish); - Py_DECREF(iterator); - Py_DECREF(doc); - buffer_free(buffer); - PyMem_Free(collection_name); - Py_RETURN_NONE; - } - /* continue_on_error is True, store the error - * details to re-raise after the final batch */ - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - exc_type = etype; - exc_value = evalue; - exc_trace = etrace; - if (PyList_Append(to_publish, doc) < 0) { - goto iterfail; - } - Py_CLEAR(doc); - continue; - } - } - Py_DECREF(OperationFailure); - } - /* This isn't OperationFailure, we couldn't - * import OperationFailure, or we are doing - * acknowledged writes. Re-raise immediately. */ - PyErr_Restore(etype, evalue, etrace); - goto iterfail; - } else { - Py_DECREF(result); - } - } - if (PyList_Append(to_publish, doc) < 0) { - goto iterfail; - } - Py_CLEAR(doc); - } - Py_DECREF(iterator); - - if (PyErr_Occurred()) { - goto insertfail; - } - - if (empty) { - PyObject* InvalidOperation = _error("InvalidOperation"); - if (InvalidOperation) { - PyErr_SetString(InvalidOperation, "cannot do an empty bulk insert"); - Py_DECREF(InvalidOperation); - } - goto insertfail; - } - - if (!compress) { - message_length = buffer_get_position(buffer) - length_location; - buffer_write_int32_at_position( - buffer, length_location, (int32_t)message_length); - } - - /* Send the last (or only) batch */ - result = _send_insert(self, ctx, last_error_args, buffer, - collection_name, collection_name_length, - request_id, safe, &options, to_publish, compress); - - Py_DECREF(to_publish); - PyMem_Free(collection_name); - buffer_free(buffer); - - if (!result) { - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - return NULL; - } else { - Py_DECREF(result); - } - - if (exc_type) { - /* Re-raise any previously stored exception - * due to continue_on_error being True */ - PyErr_Restore(exc_type, exc_value, exc_trace); - return NULL; - } - - Py_RETURN_NONE; - -iterfail: - Py_XDECREF(doc); - Py_DECREF(iterator); -insertfail: - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_trace); - Py_XDECREF(to_publish); - buffer_free(buffer); - PyMem_Free(collection_name); - return NULL; -} - #define _INSERT 0 #define _UPDATE 1 #define _DELETE 2 @@ -1591,94 +968,13 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { return result; } -static PyObject* -_cbson_batched_write_command(PyObject* self, PyObject* args) { - char *ns = NULL; - unsigned char op; - unsigned char check_keys; - Py_ssize_t ns_len; - int request_id; - int position; - PyObject* command; - PyObject* docs; - PyObject* ctx = NULL; - PyObject* to_publish = NULL; - PyObject* result = NULL; - codec_options_t options; - buffer_t buffer; - struct module_state *state = GETSTATE(self); - - if (!PyArg_ParseTuple(args, "et#bOObO&O", "utf-8", - &ns, &ns_len, &op, &command, &docs, &check_keys, - convert_codec_options, &options, - &ctx)) { - return NULL; - } - if (!(buffer = buffer_new())) { - PyMem_Free(ns); - destroy_codec_options(&options); - return NULL; - } - /* Save space for message length and request id */ - if ((buffer_save_space(buffer, 8)) == -1) { - goto fail; - } - if (!buffer_write_bytes(buffer, - "\x00\x00\x00\x00" /* responseTo */ - "\xd4\x07\x00\x00", /* opcode */ - 8)) { - goto fail; - } - if (!(to_publish = PyList_New(0))) { - goto fail; - } - - if (!_batched_write_command( - ns, - ns_len, - op, - check_keys, - command, - docs, - ctx, - to_publish, - options, - buffer, - state)) { - goto fail; - } - - request_id = rand(); - position = buffer_get_position(buffer); - buffer_write_int32_at_position(buffer, 0, (int32_t)position); - buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); - result = Py_BuildValue("iy#O", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), - to_publish); -fail: - PyMem_Free(ns); - destroy_codec_options(&options); - buffer_free(buffer); - Py_XDECREF(to_publish); - return result; -} - static PyMethodDef _CMessageMethods[] = { - {"_insert_message", _cbson_insert_message, METH_VARARGS, - "Create an insert message to be sent to MongoDB"}, - {"_update_message", _cbson_update_message, METH_VARARGS, - "create an update message to be sent to MongoDB"}, {"_query_message", _cbson_query_message, METH_VARARGS, "create a query message to be sent to MongoDB"}, {"_get_more_message", _cbson_get_more_message, METH_VARARGS, "create a get more message to be sent to MongoDB"}, {"_op_msg", _cbson_op_msg, METH_VARARGS, "create an OP_MSG message to be sent to MongoDB"}, - {"_do_batched_insert", _cbson_do_batched_insert, METH_VARARGS, - "insert a batch of documents, splitting the batch as needed"}, - {"_batched_write_command", _cbson_batched_write_command, METH_VARARGS, - "Create the next batched insert, update, or delete command"}, {"_encode_batched_write_command", _cbson_encode_batched_write_command, METH_VARARGS, "Encode the next batched insert, update, or delete command"}, {"_batched_op_msg", _cbson_batched_op_msg, METH_VARARGS, diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 812ca23b79..2a34a05d3a 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -88,11 +88,6 @@ def _database(self): """The database against which the aggregation command is run.""" raise NotImplementedError - @staticmethod - def _check_compat(sock_info): - """Check whether the server version in-use supports aggregation.""" - pass - def _process_result(self, result, session, server, sock_info, secondary_ok): if self._result_processor: self._result_processor( @@ -104,9 +99,6 @@ def get_read_preference(self, session): return self._target._read_preference_for(session) def get_cursor(self, session, server, sock_info, secondary_ok): - # Ensure command compatibility. - self._check_compat(sock_info) - # Serialize command. cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) @@ -117,8 +109,7 @@ def get_cursor(self, session, server, sock_info, secondary_ok): # - server version is >= 4.2 or # - server version is >= 3.2 and pipeline doesn't use $out if (('readConcern' not in cmd) and - ((sock_info.max_wire_version >= 4 and - not self._performs_write) or + (not self._performs_write or (sock_info.max_wire_version >= 8))): read_concern = self._target.read_concern else: @@ -218,11 +209,3 @@ def _cursor_collection(self, cursor): # aggregate too by defaulting to the .$cmd.aggregate namespace. _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) return self._database[collname] - - @staticmethod - def _check_compat(sock_info): - # Older server version don't raise a descriptive error, so we raise - # one instead. - if not sock_info.max_wire_version >= 6: - err_msg = "Database.aggregate() is only supported on MongoDB 3.6+." - raise ConfigurationError(err_msg) diff --git a/pymongo/auth.py b/pymongo/auth.py index bfec7ab60e..b946980865 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -455,10 +455,6 @@ def _authenticate_x509(credentials, sock_info): return cmd = _X509Context(credentials).speculate_command() - if credentials.username is None and sock_info.max_wire_version < 5: - raise ConfigurationError( - "A username is required for MONGODB-X509 authentication " - "when connected to MongoDB versions older than 3.4.") sock_info.command('$external', cmd) @@ -496,10 +492,8 @@ def _authenticate_default(credentials, sock_info): return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-256') else: return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') - elif sock_info.max_wire_version >= 3: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') else: - return _authenticate_mongo_cr(credentials, sock_info) + return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') _AUTH_MAP = { diff --git a/pymongo/bulk.py b/pymongo/bulk.py index ff3f5974df..0f57309287 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -35,7 +35,6 @@ InvalidOperation, OperationFailure) from pymongo.message import (_INSERT, _UPDATE, _DELETE, - _do_batched_insert, _randint, _BulkWriteContext, _EncryptedBulkWriteContext) @@ -256,17 +255,6 @@ def gen_unordered(self): def _execute_command(self, generator, write_concern, session, sock_info, op_id, retryable, full_result): - if sock_info.max_wire_version < 5: - if self.uses_collation: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use a collation.') - if self.uses_hint: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use hint.') - if sock_info.max_wire_version < 6 and self.uses_array_filters: - raise ConfigurationError( - 'Must be connected to MongoDB 3.6+ to use arrayFilters.') - db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -283,7 +271,7 @@ def _execute_command(self, generator, write_concern, session, ('ordered', self.ordered)]) if not write_concern.is_server_default: cmd['writeConcern'] = write_concern.document - if self.bypass_doc_val and sock_info.max_wire_version >= 4: + if self.bypass_doc_val: cmd['bypassDocumentValidation'] = True bwc = self.bulk_ctx_class( db_name, cmd, sock_info, op_id, listeners, session, @@ -358,24 +346,6 @@ def retryable_bulk(session, sock_info, retryable): _raise_bulk_write_error(full_result) return full_result - def execute_insert_no_results(self, sock_info, run, op_id, acknowledged): - """Execute insert, returning no results. - """ - command = SON([('insert', self.collection.name), - ('ordered', self.ordered)]) - concern = {'w': int(self.ordered)} - command['writeConcern'] = concern - if self.bypass_doc_val and sock_info.max_wire_version >= 4: - command['bypassDocumentValidation'] = True - db = self.collection.database - bwc = _BulkWriteContext( - db.name, command, sock_info, op_id, db.client._event_listeners, - None, _INSERT, self.collection.codec_options) - # Legacy batched OP_INSERT. - _do_batched_insert( - self.collection.full_name, run.ops, True, acknowledged, concern, - not self.ordered, self.collection.codec_options, bwc) - def execute_op_msg_no_results(self, sock_info, generator): """Execute write commands with OP_MSG and w=0 writeConcern, unordered. """ @@ -441,62 +411,13 @@ def execute_no_results(self, sock_info, generator): raise ConfigurationError( 'hint is unsupported for unacknowledged writes.') # Cannot have both unacknowledged writes and bypass document validation. - if self.bypass_doc_val and sock_info.max_wire_version >= 4: + if self.bypass_doc_val: raise OperationFailure("Cannot set bypass_document_validation with" " unacknowledged write concern") - # OP_MSG - if sock_info.max_wire_version > 5: - if self.ordered: - return self.execute_command_no_results(sock_info, generator) - return self.execute_op_msg_no_results(sock_info, generator) - - coll = self.collection - # If ordered is True we have to send GLE or use write - # commands so we can abort on the first error. - write_concern = WriteConcern(w=int(self.ordered)) - op_id = _randint() - - next_run = next(generator) - while next_run: - # An ordered bulk write needs to send acknowledged writes to short - # circuit the next run. However, the final message on the final - # run can be unacknowledged. - run = next_run - next_run = next(generator, None) - needs_ack = self.ordered and next_run is not None - try: - if run.op_type == _INSERT: - self.execute_insert_no_results( - sock_info, run, op_id, needs_ack) - elif run.op_type == _UPDATE: - for operation in run.ops: - doc = operation['u'] - check_keys = True - if doc and next(iter(doc)).startswith('$'): - check_keys = False - coll._update( - sock_info, - operation['q'], - doc, - upsert=operation['upsert'], - check_keys=check_keys, - multi=operation['multi'], - write_concern=write_concern, - op_id=op_id, - ordered=self.ordered, - bypass_doc_val=self.bypass_doc_val) - else: - for operation in run.ops: - coll._delete(sock_info, - operation['q'], - not operation['limit'], - write_concern, - op_id, - self.ordered) - except OperationFailure: - if self.ordered: - break + if self.ordered: + return self.execute_command_no_results(sock_info, generator) + return self.execute_op_msg_no_results(sock_info, generator) def execute(self, write_concern, session): """Execute operations. diff --git a/pymongo/collection.py b/pymongo/collection.py index a5d2fa5c3e..b30bcf22f9 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -435,59 +435,6 @@ def bulk_write(self, requests, ordered=True, return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) - def _legacy_write(self, sock_info, name, cmd, op_id, - bypass_doc_val, func, *args): - """Internal legacy unacknowledged write helper.""" - # Cannot have both unacknowledged write and bypass document validation. - if bypass_doc_val and sock_info.max_wire_version >= 4: - raise OperationFailure("Cannot set bypass_document_validation with" - " unacknowledged write concern") - listeners = self.database.client._event_listeners - publish = listeners.enabled_for_commands - - if publish: - start = datetime.datetime.now() - args = args + (sock_info.compression_context,) - rqst_id, msg, max_size = func(*args) - if publish: - duration = datetime.datetime.now() - start - listeners.publish_command_start( - cmd, self.__database.name, rqst_id, sock_info.address, op_id, - sock_info.service_id) - start = datetime.datetime.now() - try: - result = sock_info.legacy_write(rqst_id, msg, max_size, False) - except Exception as exc: - if publish: - dur = (datetime.datetime.now() - start) + duration - if isinstance(exc, OperationFailure): - details = exc.details - # Succeed if GLE was successful and this is a write error. - if details.get("ok") and "n" in details: - reply = message._convert_write_result( - name, cmd, details) - listeners.publish_command_success( - dur, reply, name, rqst_id, sock_info.address, - op_id, sock_info.service_id) - raise - else: - details = message._convert_exception(exc) - listeners.publish_command_failure( - dur, details, name, rqst_id, sock_info.address, op_id, - sock_info.service_id) - raise - if publish: - if result is not None: - reply = message._convert_write_result(name, cmd, result) - else: - # Comply with APM spec. - reply = {'ok': 1} - duration = (datetime.datetime.now() - start) + duration - listeners.publish_command_success( - duration, reply, name, rqst_id, sock_info.address, op_id, - sock_info.service_id) - return result - def _insert_one( self, doc, ordered, check_keys, write_concern, op_id, bypass_doc_val, @@ -502,15 +449,7 @@ def _insert_one( command['writeConcern'] = write_concern.document def _insert_command(session, sock_info, retryable_write): - if not sock_info.op_msg_enabled and not acknowledged: - # Legacy OP_INSERT. - return self._legacy_write( - sock_info, 'insert', command, op_id, - bypass_doc_val, message._insert, self.__full_name, - [doc], check_keys, False, - self.__write_response_codec_options) - - if bypass_doc_val and sock_info.max_wire_version >= 4: + if bypass_doc_val: command['bypassDocumentValidation'] = True result = sock_info.command( @@ -658,28 +597,19 @@ def _update(self, sock_info, criteria, document, upsert=False, ('multi', multi), ('upsert', upsert)]) if collation is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use collations.') - elif not acknowledged: + if not acknowledged: raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') else: update_doc['collation'] = collation if array_filters is not None: - if sock_info.max_wire_version < 6: - raise ConfigurationError( - 'Must be connected to MongoDB 3.6+ to use array_filters.') - elif not acknowledged: + if not acknowledged: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') else: update_doc['arrayFilters'] = array_filters if hint is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use hint.') - elif not acknowledged: + if not acknowledged: raise ConfigurationError( 'hint is unsupported for unacknowledged writes.') if not isinstance(hint, str): @@ -692,16 +622,8 @@ def _update(self, sock_info, criteria, document, upsert=False, if not write_concern.is_server_default: command['writeConcern'] = write_concern.document - if not sock_info.op_msg_enabled and not acknowledged: - # Legacy OP_UPDATE. - return self._legacy_write( - sock_info, 'update', command, op_id, - bypass_doc_val, message._update, self.__full_name, upsert, - multi, criteria, document, check_keys, - self.__write_response_codec_options) - # Update command. - if bypass_doc_val and sock_info.max_wire_version >= 4: + if bypass_doc_val: command['bypassDocumentValidation'] = True # The command result has to be published for APM unmodified @@ -1018,19 +940,13 @@ def _delete( ('limit', int(not multi))]) collation = validate_collation_or_none(collation) if collation is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use collations.') - elif not acknowledged: + if not acknowledged: raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') else: delete_doc['collation'] = collation if hint is not None: - if sock_info.max_wire_version < 5: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use hint.') - elif not acknowledged: + if not acknowledged: raise ConfigurationError( 'hint is unsupported for unacknowledged writes.') if not isinstance(hint, str): @@ -1042,13 +958,6 @@ def _delete( if not write_concern.is_server_default: command['writeConcern'] = write_concern.document - if not sock_info.op_msg_enabled and not acknowledged: - # Legacy OP_DELETE. - return self._legacy_write( - sock_info, 'delete', command, op_id, - False, message._delete, self.__full_name, criteria, - self.__write_response_codec_options, - int(not multi)) # Delete command. result = sock_info.command( self.__database.name, @@ -1635,7 +1544,6 @@ def __create_indexes(self, indexes, session, **kwargs): """ names = [] with self._socket_for_writes(session) as sock_info: - supports_collations = sock_info.max_wire_version >= 5 supports_quorum = sock_info.max_wire_version >= 9 def gen_indexes(): @@ -1645,10 +1553,6 @@ def gen_indexes(): "%r is not an instance of " "pymongo.operations.IndexModel" % (index,)) document = index.document - if "collation" in document and not supports_collations: - raise ConfigurationError( - "Must be connected to MongoDB " - "3.4+ to use collations.") names.append(document["name"]) yield document @@ -1880,32 +1784,21 @@ def list_indexes(self, session=None): def _cmd(session, server, sock_info, secondary_ok): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) - if sock_info.max_wire_version > 2: - with self.__database.client._tmp_session(session, False) as s: - try: - cursor = self._command(sock_info, cmd, secondary_ok, - read_pref, - codec_options, - session=s)["cursor"] - except OperationFailure as exc: - # Ignore NamespaceNotFound errors to match the behavior - # of reading from *.system.indexes. - if exc.code != 26: - raise - cursor = {'id': 0, 'firstBatch': []} - cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=s, - explicit_session=session is not None) - else: - res = message._first_batch( - sock_info, self.__database.name, "system.indexes", - {"ns": self.__full_name}, 0, secondary_ok, codec_options, - read_pref, cmd, - self.database.client._event_listeners) - cursor = res["cursor"] - # Note that a collection can only have 64 indexes, so there - # will never be a getMore call. - cmd_cursor = CommandCursor(coll, cursor, sock_info.address) + with self.__database.client._tmp_session(session, False) as s: + try: + cursor = self._command(sock_info, cmd, secondary_ok, + read_pref, + codec_options, + session=s)["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {'id': 0, 'firstBatch': []} + cmd_cursor = CommandCursor( + coll, cursor, sock_info.address, session=s, + explicit_session=session is not None) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor @@ -2356,10 +2249,6 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, def _find_and_modify(session, sock_info, retryable_write): if array_filters is not None: - if sock_info.max_wire_version < 6: - raise ConfigurationError( - 'Must be connected to MongoDB 3.6+ to use ' - 'arrayFilters.') if not write_concern.acknowledged: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged ' @@ -2373,8 +2262,7 @@ def _find_and_modify(session, sock_info, retryable_write): raise ConfigurationError( 'hint is unsupported for unacknowledged writes.') cmd['hint'] = hint - if (sock_info.max_wire_version >= 4 and - not write_concern.is_server_default): + if not write_concern.is_server_default: cmd['writeConcern'] = write_concern.document out = self._command(sock_info, cmd, read_preference=ReadPreference.PRIMARY, diff --git a/pymongo/database.py b/pymongo/database.py index 35a878322d..c30d29bde4 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -14,25 +14,19 @@ """Database level operations.""" -import warnings - from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.dbref import DBRef from bson.son import SON -from pymongo import auth, common +from pymongo import common from pymongo.aggregation import _DatabaseAggregationCommand from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor from pymongo.errors import (CollectionInvalid, InvalidName) -from pymongo.message import _first_batch from pymongo.read_preferences import ReadPreference -_INDEX_REGEX = {"name": {"$regex": r"^(?!.*\$)"}} - - def _check_name(name): """Check if a database name is valid. """ @@ -618,37 +612,21 @@ def _list_collections(self, sock_info, secondary_okay, session, coll = self.get_collection( "$cmd", read_preference=read_preference) - if sock_info.max_wire_version > 2: - cmd = SON([("listCollections", 1), - ("cursor", {})]) - cmd.update(kwargs) - with self.__client._tmp_session( - session, close=False) as tmp_session: - cursor = self._command( - sock_info, cmd, secondary_okay, - read_preference=read_preference, - session=tmp_session)["cursor"] - cmd_cursor = CommandCursor( - coll, - cursor, - sock_info.address, - session=tmp_session, - explicit_session=session is not None) - else: - match = _INDEX_REGEX - if "filter" in kwargs: - match = {"$and": [_INDEX_REGEX, kwargs["filter"]]} - dblen = len(self.name.encode("utf8") + b".") - pipeline = [ - {"$project": {"name": {"$substr": ["$name", dblen, -1]}, - "options": 1}}, - {"$match": match} - ] - cmd = SON([("aggregate", "system.namespaces"), - ("pipeline", pipeline), - ("cursor", kwargs.get("cursor", {}))]) - cursor = self._command(sock_info, cmd, secondary_okay)["cursor"] - cmd_cursor = CommandCursor(coll, cursor, sock_info.address) + cmd = SON([("listCollections", 1), + ("cursor", {})]) + cmd.update(kwargs) + with self.__client._tmp_session( + session, close=False) as tmp_session: + cursor = self._command( + sock_info, cmd, secondary_okay, + read_preference=read_preference, + session=tmp_session)["cursor"] + cmd_cursor = CommandCursor( + coll, + cursor, + sock_info.address, + session=tmp_session, + explicit_session=session is not None) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 86d1e9f484..37d6f59b74 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -168,41 +168,6 @@ def _check_command_response(response, max_wire_version, raise OperationFailure(errmsg, code, response, max_wire_version) -def _check_gle_response(result, max_wire_version): - """Return getlasterror response as a dict, or raise OperationFailure.""" - # Did getlasterror itself fail? - _check_command_response(result, max_wire_version) - - if result.get("wtimeout", False): - # MongoDB versions before 1.8.0 return the error message in an "errmsg" - # field. If "errmsg" exists "err" will also exist set to None, so we - # have to check for "errmsg" first. - raise WTimeoutError(result.get("errmsg", result.get("err")), - result.get("code"), - result) - - error_msg = result.get("err", "") - if error_msg is None: - return result - - if error_msg.startswith(HelloCompat.LEGACY_ERROR): - raise NotPrimaryError(error_msg, result) - - details = result - - # mongos returns the error code in an error object for some errors. - if "errObjects" in result: - for errobj in result["errObjects"]: - if errobj.get("err") == error_msg: - details = errobj - break - - code = details.get("code") - if code in (11000, 11001, 12582): - raise DuplicateKeyError(details["err"], code, result) - raise OperationFailure(details["err"], code, result) - - def _raise_last_write_error(write_errors): # If the last batch had multiple errors only report # the last error to emulate continue_on_error. diff --git a/pymongo/message.py b/pymongo/message.py index a30975db9e..8a496d5b1b 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -32,7 +32,6 @@ _decode_selective, _dict_to_bson, _make_c_string) -from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.int64 import Int64 from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument) @@ -52,7 +51,6 @@ OperationFailure, ProtocolError) from pymongo.hello import HelloCompat -from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -271,7 +269,7 @@ def namespace(self): def use_command(self, sock_info): use_find_cmd = False - if sock_info.max_wire_version >= 4 and not self.exhaust: + if not self.exhaust: use_find_cmd = True elif sock_info.max_wire_version >= 8: # OP_MSG supports exhaust on MongoDB 4.2+ @@ -283,18 +281,7 @@ def use_command(self, sock_info): % (self.read_concern.level, sock_info.max_wire_version)) - if sock_info.max_wire_version < 5 and self.collation is not None: - raise ConfigurationError( - 'Specifying a collation is unsupported with a max wire ' - 'version of %d.' % (sock_info.max_wire_version,)) - - if sock_info.max_wire_version < 4 and self.allow_disk_use is not None: - raise ConfigurationError( - 'Specifying allowDiskUse is unsupported with a max wire ' - 'version of %d.' % (sock_info.max_wire_version,)) - sock_info.validate_session(self.client, self.session) - return use_find_cmd def as_command(self, sock_info): @@ -342,24 +329,21 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] - if sock_info.op_msg_enabled: - request_id, msg, size, _ = _op_msg( - 0, spec, self.db, self.read_preference, - set_secondary_ok, False, self.codec_options, - ctx=sock_info.compression_context) - return request_id, msg, size - ns = "%s.%s" % (self.db, "$cmd") - ntoreturn = -1 # All DB commands return 1 document - else: - # OP_QUERY treats ntoreturn of -1 and 1 the same, return - # one document and close the cursor. We have to use 2 for - # batch size if 1 is specified. - ntoreturn = self.batch_size == 1 and 2 or self.batch_size - if self.limit: - if ntoreturn: - ntoreturn = min(self.limit, ntoreturn) - else: - ntoreturn = self.limit + request_id, msg, size, _ = _op_msg( + 0, spec, self.db, self.read_preference, + set_secondary_ok, False, self.codec_options, + ctx=sock_info.compression_context) + return request_id, msg, size + + # OP_QUERY treats ntoreturn of -1 and 1 the same, return + # one document and close the cursor. We have to use 2 for + # batch size if 1 is specified. + ntoreturn = self.batch_size == 1 and 2 or self.batch_size + if self.limit: + if ntoreturn: + ntoreturn = min(self.limit, ntoreturn) + else: + ntoreturn = self.limit if sock_info.is_mongos: spec = _maybe_add_read_preference(spec, @@ -400,7 +384,7 @@ def namespace(self): def use_command(self, sock_info): use_cmd = False - if sock_info.max_wire_version >= 4 and not self.exhaust: + if not self.exhaust: use_cmd = True elif sock_info.max_wire_version >= 8: # OP_MSG supports exhaust on MongoDB 4.2+ @@ -440,19 +424,15 @@ def get_message(self, dummy0, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] - if sock_info.op_msg_enabled: - if self.sock_mgr: - flags = _OpMsg.EXHAUST_ALLOWED - else: - flags = 0 - request_id, msg, size, _ = _op_msg( - flags, spec, self.db, None, - False, False, self.codec_options, - ctx=sock_info.compression_context) - return request_id, msg, size - ns = "%s.%s" % (self.db, "$cmd") - return _query(0, ns, 0, -1, spec, None, self.codec_options, - ctx=ctx) + if self.sock_mgr: + flags = _OpMsg.EXHAUST_ALLOWED + else: + flags = 0 + request_id, msg, size, _ = _op_msg( + flags, spec, self.db, None, + False, False, self.codec_options, + ctx=sock_info.compression_context) + return request_id, msg, size return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) @@ -464,7 +444,7 @@ def use_command(self, sock_info): if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True - elif sock_info.op_msg_enabled and not self.exhaust: + elif not self.exhaust: return True return False @@ -476,7 +456,7 @@ def use_command(self, sock_info): if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True - elif sock_info.op_msg_enabled and not self.exhaust: + elif not self.exhaust: return True return False @@ -528,16 +508,6 @@ def _compress(operation, data, ctx): return request_id, header + compressed -def __last_error(namespace, args): - """Data to send to do a lastError. - """ - cmd = SON([("getlasterror", 1)]) - cmd.update(args) - splitns = namespace.split('.', 1) - return _query(0, splitns[0] + '.$cmd', 0, -1, cmd, - None, DEFAULT_CODEC_OPTIONS) - - _pack_header = struct.Struct(" ctx.max_bson_size) - - message_length += encoded_length - if message_length < ctx.max_message_size and not too_large: - data.write(encoded) - to_send.append(doc) - has_docs = True - continue - - if has_docs: - # We have enough data, send this message. - try: - if compress: - rid, msg = None, data.getvalue() - else: - rid, msg = _insert_message(data.getvalue(), send_safe) - ctx.legacy_bulk_insert( - rid, msg, 0, send_safe, to_send, compress) - # Exception type could be OperationFailure or a subtype - # (e.g. DuplicateKeyError) - except OperationFailure as exc: - # Like it says, continue on error... - if continue_on_error: - # Store exception details to re-raise after the final batch. - last_error = exc - # With unacknowledged writes just return at the first error. - elif not safe: - return - # With acknowledged writes raise immediately. - else: - raise - - if too_large: - _raise_document_too_large( - "insert", encoded_length, ctx.max_bson_size) - - message_length = begin_loc + encoded_length - data.seek(begin_loc) - data.truncate() - data.write(encoded) - to_send = [doc] - - if not has_docs: - raise InvalidOperation("cannot do an empty bulk insert") - - if compress: - request_id, msg = None, data.getvalue() - else: - request_id, msg = _insert_message(data.getvalue(), safe) - ctx.legacy_bulk_insert(request_id, msg, 0, safe, to_send, compress) - - # Re-raise any exception stored due to continue_on_error - if last_error is not None: - raise last_error -if _use_c: - _do_batched_insert = _cmessage._do_batched_insert - # OP_MSG ------------------------------------------------------------- @@ -1335,20 +1066,6 @@ def _do_batched_op_msg( # End OP_MSG ----------------------------------------------------- -def _batched_write_command_compressed( - namespace, operation, command, docs, check_keys, opts, ctx): - """Create the next batched insert, update, or delete command, compressed. - """ - data, to_send = _encode_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx) - - request_id, msg = _compress( - 2004, - data, - ctx.sock_info.compression_context) - return request_id, msg, to_send - - def _encode_batched_write_command( namespace, operation, command, docs, check_keys, opts, ctx): """Encode the next batched insert, update, or delete command. @@ -1362,53 +1079,6 @@ def _encode_batched_write_command( _encode_batched_write_command = _cmessage._encode_batched_write_command -def _batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Create the next batched insert, update, or delete command. - """ - buf = _BytesIO() - - # Save space for message length and request id - buf.write(_ZERO_64) - # responseTo, opCode - buf.write(b"\x00\x00\x00\x00\xd4\x07\x00\x00") - - # Write OP_QUERY write command - to_send, length = _batched_write_command_impl( - namespace, operation, command, docs, check_keys, opts, ctx, buf) - - # Header - request id and message length - buf.seek(4) - request_id = _randint() - buf.write(_pack_int(request_id)) - buf.seek(0) - buf.write(_pack_int(length)) - - return request_id, buf.getvalue(), to_send -if _use_c: - _batched_write_command = _cmessage._batched_write_command - - -def _do_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Batched write commands entry point.""" - if ctx.sock_info.compression_context: - return _batched_write_command_compressed( - namespace, operation, command, docs, check_keys, opts, ctx) - return _batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx) - - -def _do_bulk_write_command( - namespace, operation, command, docs, check_keys, opts, ctx): - """Bulk write commands entry point.""" - if ctx.sock_info.max_wire_version > 5: - return _do_batched_op_msg( - namespace, operation, command, docs, check_keys, opts, ctx) - return _do_batched_write_command( - namespace, operation, command, docs, check_keys, opts, ctx) - - def _batched_write_command_impl( namespace, operation, command, docs, check_keys, opts, ctx, buf): """Create a batched OP_QUERY write command.""" @@ -1585,7 +1255,7 @@ def unpack(cls, msg): # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) - documents = bytes(msg[20:]) + documents = msg[20:] return cls(flags, cursor_id, number_returned, documents) @@ -1665,7 +1335,7 @@ def unpack(cls, msg): if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") - payload_document = bytes(msg[5:]) + payload_document = msg[5:] return cls(flags, payload_document) @@ -1673,63 +1343,3 @@ def unpack(cls, msg): _OpReply.OP_CODE: _OpReply.unpack, _OpMsg.OP_CODE: _OpMsg.unpack, } - - -def _first_batch(sock_info, db, coll, query, ntoreturn, - secondary_ok, codec_options, read_preference, cmd, listeners): - """Simple query helper for retrieving a first (and possibly only) batch.""" - query = _Query( - 0, db, coll, 0, query, None, codec_options, - read_preference, ntoreturn, 0, DEFAULT_READ_CONCERN, None, None, - None, None, False) - - name = next(iter(cmd)) - publish = listeners.enabled_for_commands - if publish: - start = datetime.datetime.now() - - request_id, msg, max_doc_size = query.get_message(secondary_ok, sock_info) - - if publish: - encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start( - cmd, db, request_id, sock_info.address, - service_id=sock_info.service_id) - start = datetime.datetime.now() - - sock_info.send_message(msg, max_doc_size) - reply = sock_info.receive_message(request_id) - try: - docs = reply.unpack_response(None, codec_options) - except Exception as exc: - if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure = exc.details - else: - failure = _convert_exception(exc) - listeners.publish_command_failure( - duration, failure, name, request_id, sock_info.address, - service_id=sock_info.service_id) - raise - # listIndexes - if 'cursor' in cmd: - result = { - 'cursor': { - 'firstBatch': docs, - 'id': reply.cursor_id, - 'ns': '%s.%s' % (db, coll) - }, - 'ok': 1.0 - } - # fsyncUnlock, currentOp - else: - result = docs[0] if docs else {} - result['ok'] = 1.0 - if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - listeners.publish_command_success( - duration, result, name, request_id, sock_info.address, - service_id=sock_info.service_id) - - return result diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index cce5f14d04..db56187b77 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -32,9 +32,7 @@ """ import contextlib -import datetime import threading -import warnings import weakref from collections import defaultdict @@ -62,8 +60,7 @@ ServerSelectionTimeoutError) from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import (writable_preferred_server_selector, - writable_server_selector) +from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.topology import (Topology, _ErrorContext) @@ -1377,8 +1374,6 @@ def _retryable_read(self, func, read_pref, session, address=None, try: server = self._select_server( read_pref, session, address=address) - if not server.description.retryable_reads_supported: - retryable = False with self._secondaryok_for_server(read_pref, server, session) as ( sock_info, secondary_ok): if retrying and not retryable: @@ -1561,51 +1556,10 @@ def _kill_cursors(self, cursor_ids, address, topology, session): self._kill_cursor_impl(cursor_ids, address, session, sock_info) def _kill_cursor_impl(self, cursor_ids, address, session, sock_info): - listeners = self._event_listeners - publish = listeners.enabled_for_commands - - try: - namespace = address.namespace - db, coll = namespace.split('.', 1) - except AttributeError: - namespace = None - db = coll = "OP_KILL_CURSORS" + namespace = address.namespace + db, coll = namespace.split('.', 1) spec = SON([('killCursors', coll), ('cursors', cursor_ids)]) - if sock_info.max_wire_version >= 4 and namespace is not None: - sock_info.command(db, spec, session=session, client=self) - else: - if publish: - start = datetime.datetime.now() - request_id, msg = message._kill_cursors(cursor_ids) - if publish: - duration = datetime.datetime.now() - start - # Here and below, address could be a tuple or - # _CursorAddress. We always want to publish a - # tuple to match the rest of the monitoring - # API. - listeners.publish_command_start( - spec, db, request_id, tuple(address), - service_id=sock_info.service_id) - start = datetime.datetime.now() - - try: - sock_info.send_message(msg, 0) - except Exception as exc: - if publish: - dur = ((datetime.datetime.now() - start) + duration) - listeners.publish_command_failure( - dur, message._convert_exception(exc), - 'killCursors', request_id, - tuple(address), service_id=sock_info.service_id) - raise - - if publish: - duration = ((datetime.datetime.now() - start) + duration) - # OP_KILL_CURSORS returns no reply, fake one. - reply = {'cursorsUnknown': cursor_ids, 'ok': 1} - listeners.publish_command_success( - duration, reply, 'killCursors', request_id, - tuple(address), service_id=sock_info.service_id) + sock_info.command(db, spec, session=session, client=self) def _process_kill_cursors(self): """Process any pending kill cursors requests.""" diff --git a/pymongo/pool.py b/pymongo/pool.py index 52c57c7f07..e85dbeae3b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -580,7 +580,7 @@ def _hello(self, cluster_time, topology_version, self.sock.settimeout( self.opts.connect_timeout + heartbeat_frequency) - if self.max_wire_version >= 6 and cluster_time is not None: + if not performing_handshake and cluster_time is not None: cmd['$clusterTime'] = cluster_time # XXX: Simplify in PyMongo 4.0 when all_credentials is always a single @@ -615,7 +615,7 @@ def _hello(self, cluster_time, topology_version, hello.compressors) self.compression_context = ctx - self.op_msg_enabled = hello.max_wire_version >= 6 + self.op_msg_enabled = True if creds: self.negotiated_mechanisms[creds] = hello.sasl_supported_mechs if auth_ctx: @@ -687,23 +687,13 @@ def command(self, dbname, spec, secondary_ok=False, if not isinstance(spec, ORDERED_TYPES): spec = SON(spec) - if (read_concern and self.max_wire_version < 4 - and not read_concern.ok_for_legacy): - raise ConfigurationError( - 'read concern level of %s is not valid ' - 'with a max wire version of %d.' - % (read_concern.level, self.max_wire_version)) if not (write_concern is None or write_concern.acknowledged or collation is None): raise ConfigurationError( 'Collation is unsupported for unacknowledged writes.') - if (self.max_wire_version >= 5 and - write_concern and + if (write_concern and not write_concern.is_server_default): spec['writeConcern'] = write_concern.document - elif self.max_wire_version < 5 and collation is not None: - raise ConfigurationError( - 'Must be connected to MongoDB 3.4+ to use a collation.') self.add_server_api(spec) if session: @@ -769,25 +759,17 @@ def _raise_if_not_writable(self, unacknowledged): raise NotPrimaryError("not primary", { "ok": 0, "errmsg": "not primary", "code": 10107}) - def legacy_write(self, request_id, msg, max_doc_size, with_last_error): - """Send OP_INSERT, etc., optionally returning response as a dict. + def unack_write(self, msg, max_doc_size): + """Send unack OP_MSG. - Can raise ConnectionFailure or OperationFailure. + Can raise ConnectionFailure or InvalidDocument. :Parameters: - - `request_id`: an int. - - `msg`: bytes, an OP_INSERT, OP_UPDATE, or OP_DELETE message, - perhaps with a getlasterror command appended. + - `msg`: bytes, an OP_MSG message. - `max_doc_size`: size in bytes of the largest document in `msg`. - - `with_last_error`: True if a getlasterror command is appended. """ - self._raise_if_not_writable(not with_last_error) - + self._raise_if_not_writable(True) self.send_message(msg, max_doc_size) - if with_last_error: - reply = self.receive_message(request_id) - return helpers._check_gle_response(reply.command_response(), - self.max_wire_version) def write_command(self, request_id, msg): """Send "insert" etc. command, returning response as a dict. @@ -881,7 +863,7 @@ def socket_closed(self): def send_cluster_time(self, command, session, client): """Add cluster time for MongoDB >= 3.6.""" - if self.max_wire_version >= 6 and client: + if client: client._send_cluster_time(command, session) def add_server_api(self, command): diff --git a/test/__init__.py b/test/__init__.py index 388caf715e..c70e854d1f 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -605,10 +605,9 @@ def require_version_max(self, *ver): def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" - return self.check_auth_with_sharding( - self._require(lambda: self.auth_enabled, - "Authentication is not enabled on the server", - func=func)) + return self._require(lambda: self.auth_enabled, + "Authentication is not enabled on the server", + func=func) def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" @@ -706,14 +705,6 @@ def require_no_load_balancer(self, func): "Must not be connected to a load balancer", func=func) - def check_auth_with_sharding(self, func): - """Skip a test when connected to mongos < 2.0 and running with auth.""" - condition = lambda: not (self.auth_enabled and - self.is_mongos and self.version < (2,)) - return self._require(condition, - "Auth with sharding requires MongoDB >= 2.0.0", - func=func) - def is_topology_type(self, topologies): unknown = set(topologies) - {'single', 'replicaset', 'sharded', 'sharded-replicaset', 'load-balanced'} @@ -818,9 +809,7 @@ def supports_retryable_writes(self): return False if not self.sessions_enabled: return False - if self.version.at_least(3, 6): - return self.is_mongos or self.is_rs - return False + return self.is_mongos or self.is_rs def require_retryable_writes(self, func): """Run a test only if the deployment supports retryable writes.""" diff --git a/test/test_auth.py b/test/test_auth.py index 76b320fcb9..6504ce5777 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -307,17 +307,8 @@ def auth_string(user, password): class TestSCRAMSHA1(IntegrationTest): @client_context.require_auth - @client_context.require_version_min(2, 7, 2) def setUp(self): super(TestSCRAMSHA1, self).setUp() - # Before 2.7.7, SCRAM-SHA-1 had to be enabled from the command line. - if client_context.version < Version(2, 7, 7): - cmd_line = client_context.cmd_line - if 'SCRAM-SHA-1' not in cmd_line.get( - 'parsed', {}).get('setParameter', - {}).get('authenticationMechanisms', ''): - raise SkipTest('SCRAM-SHA-1 mechanism not enabled') - client_context.create_user( 'pymongo_test', 'user', 'pass', roles=['userAdmin', 'readWrite']) diff --git a/test/test_bulk.py b/test/test_bulk.py index f0cb52912e..aa2dcab928 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -164,18 +164,6 @@ def test_update_many(self): def test_update_many_pipeline(self): self._test_update_many([{'$set': {'foo': 'bar'}}]) - @client_context.require_version_max(3, 5, 5) - def test_array_filters_unsupported(self): - requests = [ - UpdateMany( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]), - UpdateOne( - {}, {'$set': {"y.$[i].b": 2}}, array_filters=[{'i.b': 3}]) - ] - for bulk_op in requests: - self.assertRaises( - ConfigurationError, self.coll.bulk_write, [bulk_op]) - def test_array_filters_validation(self): self.assertRaises(TypeError, UpdateMany, {}, {}, array_filters={}) self.assertRaises(TypeError, UpdateOne, {}, {}, array_filters={}) @@ -307,7 +295,6 @@ def test_numerous_inserts(self): self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) - @client_context.require_version_min(3, 6) def test_bulk_max_message_size(self): self.coll.delete_many({}) self.addCleanup(self.coll.delete_many, {}) @@ -781,38 +768,29 @@ def setUpClass(cls): cls.secondary = single_client(*partition_node(member)) break - # We tested wtimeout errors by specifying a write concern greater than - # the number of members, but in MongoDB 2.7.8+ this causes a different - # sort of error, "Not enough data-bearing nodes". In recent servers we - # use a failpoint to pause replication on a secondary. - cls.need_replication_stopped = client_context.version.at_least(2, 7, 8) - @classmethod def tearDownClass(cls): if cls.secondary: cls.secondary.close() def cause_wtimeout(self, requests, ordered): - if self.need_replication_stopped: - if not client_context.test_commands_enabled: - self.skipTest("Test commands must be enabled.") + if not client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='alwaysOn') - - try: - coll = self.coll.with_options( - write_concern=WriteConcern(w=self.w, wtimeout=1)) - return coll.bulk_write(requests, ordered=ordered) - finally: - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='off') - else: + # Use the rsSyncApplyStop failpoint to pause replication on a + # secondary which will cause a wtimeout error. + self.secondary.admin.command('configureFailPoint', + 'rsSyncApplyStop', + mode='alwaysOn') + + try: coll = self.coll.with_options( - write_concern=WriteConcern(w=self.w + 1, wtimeout=1)) + write_concern=WriteConcern(w=self.w, wtimeout=1)) return coll.bulk_write(requests, ordered=ordered) + finally: + self.secondary.admin.command('configureFailPoint', + 'rsSyncApplyStop', + mode='off') @client_context.require_replica_set @client_context.require_secondaries_count(1) diff --git a/test/test_client.py b/test/test_client.py index 8ac1f86be9..d69025c8b0 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -729,17 +729,15 @@ def test_list_databases(self): for doc in client.list_databases(): self.assertIs(type(doc), dict) - if client_context.version.at_least(3, 4, 2): - self.client.pymongo_test.test.insert_one({}) - cursor = self.client.list_databases(filter={"name": "admin"}) - docs = list(cursor) - self.assertEqual(1, len(docs)) - self.assertEqual(docs[0]["name"], "admin") - - if client_context.version.at_least(3, 4, 3): - cursor = self.client.list_databases(nameOnly=True) - for doc in cursor: - self.assertEqual(["name"], list(doc)) + self.client.pymongo_test.test.insert_one({}) + cursor = self.client.list_databases(filter={"name": "admin"}) + docs = list(cursor) + self.assertEqual(1, len(docs)) + self.assertEqual(docs[0]["name"], "admin") + + cursor = self.client.list_databases(nameOnly=True) + for doc in cursor: + self.assertEqual(["name"], list(doc)) def test_list_database_names(self): self.client.pymongo_test.test.insert_one({"dummy": "object"}) @@ -763,15 +761,12 @@ def test_drop_database(self): self.assertIn("pymongo_test2", dbs) self.client.drop_database("pymongo_test") - if client_context.version.at_least(3, 3, 9) and client_context.is_rs: + if client_context.is_rs: wc_client = rs_or_single_client(w=len(client_context.nodes) + 1) with self.assertRaises(WriteConcernError): wc_client.drop_database('pymongo_test2') self.client.drop_database(self.client.pymongo_test2) - - raise SkipTest("This test often fails due to SERVER-2329") - dbs = self.client.list_database_names() self.assertNotIn("pymongo_test", dbs) self.assertNotIn("pymongo_test2", dbs) diff --git a/test/test_collation.py b/test/test_collation.py index d352fccc17..f0139b4a22 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -77,17 +77,6 @@ def test_constructor(self): }, Collation('en_US', backwards=True).document) -def raisesConfigurationErrorForOldMongoDB(func): - @functools.wraps(func) - def wrapper(self, *args, **kwargs): - if client_context.version.at_least(3, 3, 9): - return func(self, *args, **kwargs) - else: - with self.assertRaises(ConfigurationError): - return func(self, *args, **kwargs) - return wrapper - - class TestCollation(IntegrationTest): @classmethod @client_context.require_connection @@ -120,7 +109,6 @@ def assertCollationInLastCommand(self): self.collation.document, self.last_command_started()['collation']) - @raisesConfigurationErrorForOldMongoDB def test_create_collection(self): self.db.test.drop() self.db.create_collection('test', collation=self.collation) @@ -136,7 +124,6 @@ def test_index_model(self): model = IndexModel([('a', 1), ('b', -1)], collation=self.collation) self.assertEqual(self.collation.document, model.document['collation']) - @raisesConfigurationErrorForOldMongoDB def test_create_index(self): self.db.test.create_index('foo', collation=self.collation) ci_cmd = self.listener.results['started'][0].command @@ -144,18 +131,15 @@ def test_create_index(self): self.collation.document, ci_cmd['indexes'][0]['collation']) - @raisesConfigurationErrorForOldMongoDB def test_aggregate(self): self.db.test.aggregate([{'$group': {'_id': 42}}], collation=self.collation) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_count_documents(self): self.db.test.count_documents({}, collation=self.collation) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_distinct(self): self.db.test.distinct('foo', collation=self.collation) self.assertCollationInLastCommand() @@ -164,14 +148,12 @@ def test_distinct(self): self.db.test.find(collation=self.collation).distinct('foo') self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_find_command(self): self.db.test.insert_one({'is this thing on?': True}) self.listener.results.clear() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_explain_command(self): self.listener.results.clear() self.db.test.find(collation=self.collation).explain() @@ -180,7 +162,6 @@ def test_explain_command(self): self.collation.document, self.last_command_started()['explain']['collation']) - @raisesConfigurationErrorForOldMongoDB def test_delete(self): self.db.test.delete_one({'foo': 42}, collation=self.collation) command = self.listener.results['started'][0].command @@ -195,7 +176,6 @@ def test_delete(self): self.collation.document, command['deletes'][0]['collation']) - @raisesConfigurationErrorForOldMongoDB def test_update(self): self.db.test.replace_one({'foo': 42}, {'foo': 43}, collation=self.collation) @@ -220,7 +200,6 @@ def test_update(self): self.collation.document, command['updates'][0]['collation']) - @raisesConfigurationErrorForOldMongoDB def test_find_and(self): self.db.test.find_one_and_delete({'foo': 42}, collation=self.collation) self.assertCollationInLastCommand() @@ -235,7 +214,6 @@ def test_find_and(self): collation=self.collation) self.assertCollationInLastCommand() - @raisesConfigurationErrorForOldMongoDB def test_bulk_write(self): self.db.test.collection.bulk_write([ DeleteOne({'noCollation': 42}), @@ -266,7 +244,6 @@ def check_ops(ops): check_ops(delete_cmd['deletes']) check_ops(update_cmd['updates']) - @raisesConfigurationErrorForOldMongoDB def test_indexes_same_keys_different_collations(self): self.db.test.drop() usa_collation = Collation('en_US') @@ -302,7 +279,6 @@ def test_unacknowledged_write(self): with self.assertRaises(ConfigurationError): collection.bulk_write([update_one]) - @raisesConfigurationErrorForOldMongoDB def test_cursor_collation(self): self.db.test.insert_one({'hello': 'world'}) next(self.db.test.find().collation(self.collation)) diff --git a/test/test_collection.py b/test/test_collection.py index 26c616a4dd..a7d56bf7b6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -134,7 +134,7 @@ def tearDown(self): @contextlib.contextmanager def write_concern_collection(self): - if client_context.version.at_least(3, 3, 9) and client_context.is_rs: + if client_context.is_rs: with self.assertRaises(WriteConcernError): # Unsatisfiable write concern. yield Collection( @@ -153,7 +153,6 @@ def test_equality(self): def test_hashable(self): self.assertIn(self.db.test.mike, {self.db["test.mike"]}) - @client_context.require_version_min(3, 3, 9) def test_create(self): # No Exception. db = client_context.client.pymongo_test @@ -322,9 +321,6 @@ def test_drop_index(self): @client_context.require_no_mongos @client_context.require_test_commands def test_index_management_max_time_ms(self): - if (client_context.version[:2] == (3, 4) and - client_context.version[2] < 4): - raise unittest.SkipTest("SERVER-27711") coll = self.db.test self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", @@ -531,21 +527,6 @@ def _drop_dups_setup(self, db): db.test.insert_one({'i': 2}) # duplicate db.test.insert_one({'i': 3}) - @client_context.require_version_max(2, 6) - def test_index_drop_dups(self): - # Try dropping duplicates - db = self.db - self._drop_dups_setup(db) - - # No error, just drop the duplicate - db.test.create_index([('i', ASCENDING)], unique=True, dropDups=True) - - # Duplicate was dropped - self.assertEqual(3, db.test.count_documents({})) - - # Index was created, plus the index on _id - self.assertEqual(2, len(db.test.index_information())) - def test_index_dont_drop_dups(self): # Try *not* dropping duplicates db = self.db @@ -587,7 +568,6 @@ def get_plan_stage(self, root, stage): return stage return {} - @client_context.require_version_min(3, 1, 9, -1) def test_index_filter(self): db = self.db db.drop_collection("test") @@ -901,10 +881,8 @@ def test_write_large_document(self): unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) self.assertRaises(DocumentTooLarge, unack_coll.replace_one, {"bar": "x"}, {"bar": "x" * (max_size - 14)}) - # This will pass with OP_UPDATE or the update command. self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) - @client_context.require_version_min(3, 1, 9, -1) def test_insert_bypass_document_validation(self): db = self.db db.test.drop() @@ -923,14 +901,9 @@ def test_insert_bypass_document_validation(self): self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(2, result.inserted_id) - if client_context.version < (3, 6): - # Uses OP_INSERT which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.insert_one, - {"y": 1}, bypass_document_validation=True) - else: - db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1}), - "find w:0 inserted document") + db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"y": 1}), + "find w:0 inserted document") # Test insert_many docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] @@ -959,7 +932,6 @@ def test_insert_bypass_document_validation(self): [{"x": 1}, {"x": 2}], bypass_document_validation=True) - @client_context.require_version_min(3, 1, 9, -1) def test_replace_bypass_document_validation(self): db = self.db db.test.drop() @@ -997,18 +969,11 @@ def test_replace_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": 103})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - if client_context.version < (3, 6): - # Uses OP_UPDATE which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.replace_one, - {"y": 1}, {"x": 1}, - bypass_document_validation=True) - else: - db_w0.test.replace_one({"y": 1}, {"x": 1}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"x": 1}), - "find w:0 replaced document") + db_w0.test.replace_one({"y": 1}, {"x": 1}, + bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"x": 1}), + "find w:0 replaced document") - @client_context.require_version_min(3, 1, 9, -1) def test_update_bypass_document_validation(self): db = self.db db.test.drop() @@ -1048,16 +1013,10 @@ def test_update_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"z": 0})) db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) - if client_context.version < (3, 6): - # Uses OP_UPDATE which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.update_one, - {"y": 1}, {"$inc": {"x": 1}}, + db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) - else: - db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), - "find w:0 updated document") + wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), + "find w:0 updated document") # Test update_many db.test.insert_many([{"z": i} for i in range(3, 101)]) @@ -1094,19 +1053,12 @@ def test_update_bypass_document_validation(self): db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) - if client_context.version < (3, 6): - # Uses OP_UPDATE which does not support bypass_document_validation. - self.assertRaises(OperationFailure, db_w0.test.update_many, - {"m": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - else: - db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - wait_until( - lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, - "find w:0 updated documents") + db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, + bypass_document_validation=True) + wait_until( + lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, + "find w:0 updated documents") - @client_context.require_version_min(3, 1, 9, -1) def test_bypass_document_validation_bulk_write(self): db = self.db db.test.drop() @@ -1499,24 +1451,6 @@ def test_update_many(self): self.assertRaises(InvalidOperation, lambda: result.upserted_id) self.assertFalse(result.acknowledged) - # MongoDB >= 3.5.8 allows dotted fields in updates - @client_context.require_version_max(3, 5, 7) - def test_update_with_invalid_keys(self): - self.db.drop_collection("test") - self.assertTrue(self.db.test.insert_one({"hello": "world"})) - doc = self.db.test.find_one() - doc['a.b'] = 'c' - - # Replace - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"hello": "world"}, doc) - # Upsert - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"foo": "bar"}, doc, upsert=True) - - # Check that the last two ops didn't actually modify anything - self.assertTrue('a.b' not in self.db.test.find_one()) - def test_update_check_keys(self): self.db.drop_collection("test") self.assertTrue(self.db.test.insert_one({"hello": "world"})) @@ -2041,19 +1975,6 @@ def __getattr__(self, name): c.insert_one({'bad': bad}) self.assertEqual('bar', c.find_one()['bad']['foo']) - @client_context.require_version_max(3, 5, 5) - def test_array_filters_unsupported(self): - c = self.db.test - with self.assertRaises(ConfigurationError): - c.update_one( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - with self.assertRaises(ConfigurationError): - c.update_many( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - with self.assertRaises(ConfigurationError): - c.find_one_and_update( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - def test_array_filters_validation(self): # array_filters must be a list. c = self.db.test @@ -2136,54 +2057,40 @@ def test_find_one_and_write_concern(self): # Authenticate the client and throw out auth commands from the listener. db.command('ping') results.clear() - if client_context.version.at_least(3, 1, 9, -1): - c_w0.find_one_and_update( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() + c_w0.find_one_and_update( + {'_id': 1}, {'$set': {'foo': 'bar'}}) + self.assertEqual( + {'w': 0}, results['started'][0].command['writeConcern']) + results.clear() - c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() + c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) + self.assertEqual( + {'w': 0}, results['started'][0].command['writeConcern']) + results.clear() - c_w0.find_one_and_delete({'_id': 1}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() + c_w0.find_one_and_delete({'_id': 1}) + self.assertEqual( + {'w': 0}, results['started'][0].command['writeConcern']) + results.clear() - # Test write concern errors. - if client_context.is_rs: - c_wc_error = db.get_collection( - 'test', - write_concern=WriteConcern( - w=len(client_context.nodes) + 1)) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_update, - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_replace, - {'w': 0}, results['started'][0].command['writeConcern']) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_delete, - {'w': 0}, results['started'][0].command['writeConcern']) - results.clear() - else: - c_w0.find_one_and_update( + # Test write concern errors. + if client_context.is_rs: + c_wc_error = db.get_collection( + 'test', + write_concern=WriteConcern( + w=len(client_context.nodes) + 1)) + self.assertRaises( + WriteConcernError, + c_wc_error.find_one_and_update, {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - - c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertNotIn('writeConcern', results['started'][0].command) - results.clear() - - c_w0.find_one_and_delete({'_id': 1}) - self.assertNotIn('writeConcern', results['started'][0].command) + self.assertRaises( + WriteConcernError, + c_wc_error.find_one_and_replace, + {'w': 0}, results['started'][0].command['writeConcern']) + self.assertRaises( + WriteConcernError, + c_wc_error.find_one_and_delete, + {'w': 0}, results['started'][0].command['writeConcern']) results.clear() c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}}) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index be8168edd7..7ff80d75e5 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -122,17 +122,6 @@ def run_scenario(self): tuple(coll.find(**args)) except OperationFailure: pass - # Wait for the killCursors thread to run if necessary. - if 'limit' in args and client_context.version[:2] < (3, 1): - self.client._kill_cursors_executor.wake() - started = self.listener.results['started'] - succeeded = self.listener.results['succeeded'] - wait_until( - lambda: started[-1].command_name == 'killCursors', - "publish a start event for killCursors.") - wait_until( - lambda: succeeded[-1].command_name == 'killCursors', - "publish a succeeded event for killCursors.") else: try: getattr(coll, name)(**args) diff --git a/test/test_cursor.py b/test/test_cursor.py index 09020a6de7..d56f9fc27d 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -198,7 +198,6 @@ def test_max_time_ms(self): "maxTimeAlwaysTimeOut", mode="off") - @client_context.require_version_min(3, 1, 9, -1) def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() @@ -573,12 +572,8 @@ def cursor_count(cursor, expected_count): cur = db.test.find().batch_size(1) next(cur) - if client_context.version.at_least(3, 1, 9): - # find command batchSize should be 1 - self.assertEqual(0, len(cur._Cursor__data)) - else: - # OP_QUERY ntoreturn should be 2 - self.assertEqual(1, len(cur._Cursor__data)) + # find command batchSize should be 1 + self.assertEqual(0, len(cur._Cursor__data)) next(cur) self.assertEqual(0, len(cur._Cursor__data)) next(cur) @@ -1169,27 +1164,19 @@ def test_with_statement(self): @client_context.require_no_mongos def test_comment(self): - # MongoDB 3.1.5 changed the ns for commands. - regex = {'$regex': r'pymongo_test.(\$cmd|test)'} - - if client_context.version.at_least(3, 5, 8, -1): - query_key = "command.comment" - elif client_context.version.at_least(3, 1, 8, -1): - query_key = "query.comment" - else: - query_key = "query.$comment" - self.client.drop_database(self.db) self.db.command('profile', 2) # Profile ALL commands. try: list(self.db.test.find().comment('foo')) count = self.db.system.profile.count_documents( - {'ns': 'pymongo_test.test', 'op': 'query', query_key: 'foo'}) + {'ns': 'pymongo_test.test', 'op': 'query', + 'command.comment': 'foo'}) self.assertEqual(count, 1) self.db.test.find().comment('foo').distinct('type') count = self.db.system.profile.count_documents( - {'ns': regex, 'op': 'command', 'command.distinct': 'test', + {'ns': 'pymongo_test.test', 'op': 'command', + 'command.distinct': 'test', 'command.comment': 'foo'}) self.assertEqual(count, 1) finally: @@ -1266,7 +1253,6 @@ def test_delete_not_initialized(self): cursor = Cursor.__new__(Cursor) # Skip calling __init__ cursor.__del__() # no error - @client_context.require_version_min(3, 6) def test_getMore_does_not_send_readPreference(self): listener = AllowListEventListener('find', 'getMore') client = rs_or_single_client( @@ -1408,16 +1394,9 @@ def test_get_item(self): with self.assertRaises(InvalidOperation): self.db.test.find_raw_batches()[0] - @client_context.require_version_min(3, 4) def test_collation(self): next(self.db.test.find_raw_batches(collation=Collation('en_US'))) - @client_context.require_version_max(3, 2) - def test_collation_error(self): - with self.assertRaises(ConfigurationError): - next(self.db.test.find_raw_batches(collation=Collation('en_US'))) - - @client_context.require_version_min(3, 2) @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): self.db.get_collection( @@ -1425,12 +1404,6 @@ def test_read_concern(self): c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) - @client_context.require_version_max(3, 1) - def test_read_concern_error(self): - c = self.db.get_collection("test", read_concern=ReadConcern("majority")) - with self.assertRaises(ConfigurationError): - next(c.find_raw_batches()) - def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) @@ -1588,15 +1561,9 @@ def test_get_item(self): with self.assertRaises(InvalidOperation): self.db.test.aggregate_raw_batches([])[0] - @client_context.require_version_min(3, 4) def test_collation(self): next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) - @client_context.require_version_max(3, 2) - def test_collation_error(self): - with self.assertRaises(ConfigurationError): - next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) - def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 619e4c10df..83d8c8a2c5 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -790,7 +790,6 @@ def run_test(doc_cls): class TestCollectionChangeStreamsWCustomTypes( IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod - @client_context.require_version_min(3, 6, 0) @client_context.require_no_mmap @client_context.require_no_standalone def setUpClass(cls): diff --git a/test/test_database.py b/test/test_database.py index 4813f8d100..4adccc1b58 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -204,11 +204,8 @@ def test_list_collection_names_filter(self): self.assertIn("capped", names) self.assertIn("non_capped", names) command = results["started"][0].command - if client_context.version >= (3, 0): - self.assertIn("nameOnly", command) - self.assertTrue(command["nameOnly"]) - else: - self.assertNotIn("nameOnly", command) + self.assertIn("nameOnly", command) + self.assertTrue(command["nameOnly"]) def test_list_collections(self): self.client.drop_database("pymongo_test") @@ -324,7 +321,7 @@ def test_drop_collection(self): db.drop_collection(db.test.doesnotexist) - if client_context.version.at_least(3, 3, 9) and client_context.is_rs: + if client_context.is_rs: db_wc = Database(self.client, 'pymongo_test', write_concern=IMPOSSIBLE_WRITE_CONCERN) with self.assertRaises(WriteConcernError): @@ -377,19 +374,15 @@ def test_command(self): self.assertEqualReply(second, third) # We use 'aggregate' as our example command, since it's an easy way to - # retrieve a BSON regex from a collection using a command. But until - # MongoDB 2.3.2, aggregation turned regexes into strings: SERVER-6470. - # Note: MongoDB 3.5.2 requires the 'cursor' or 'explain' option for - # aggregate. - @client_context.require_version_max(3, 5, 0) + # retrieve a BSON regex from a collection using a command. def test_command_with_regex(self): db = self.client.pymongo_test db.test.drop() db.test.insert_one({'r': re.compile('.*')}) db.test.insert_one({'r': Regex('.*')}) - result = db.command('aggregate', 'test', pipeline=[]) - for doc in result['result']: + result = db.command('aggregate', 'test', pipeline=[], cursor={}) + for doc in result['cursor']['firstBatch']: self.assertTrue(isinstance(doc['r'], Regex)) def test_password_digest(self): @@ -647,13 +640,11 @@ def setUp(self): self.result = {"dummy": "dummy field"} self.admin = self.client.admin - @client_context.require_version_min(3, 6, 0) def test_database_aggregation(self): with self.admin.aggregate(self.pipeline) as cursor: result = next(cursor) self.assertEqual(result, self.result) - @client_context.require_version_min(3, 6, 0) @client_context.require_no_mongos def test_database_aggregation_fake_cursor(self): coll_name = "test_output" @@ -680,13 +671,6 @@ def test_database_aggregation_fake_cursor(self): result = wait_until(output_coll.find_one, "read unacknowledged write") self.assertEqual(result["dummy"], self.result["dummy"]) - @client_context.require_version_max(3, 6, 0, -1) - def test_database_aggregation_unsupported(self): - err_msg = r"Database.aggregate\(\) is only supported on MongoDB 3.6\+." - with self.assertRaisesRegex(ConfigurationError, err_msg): - with self.admin.aggregate(self.pipeline) as _: - pass - def test_bool(self): with self.assertRaises(NotImplementedError): bool(Database(self.client, "test")) diff --git a/test/test_decimal128.py b/test/test_decimal128.py index b242b7dfa5..249eae8218 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -27,10 +27,6 @@ class TestDecimal128(unittest.TestCase): def test_round_trip(self): - if not client_context.version.at_least(3, 3, 6): - raise unittest.SkipTest( - 'Round trip test requires MongoDB >= 3.3.6') - coll = client_context.client.pymongo_test.test coll.drop() diff --git a/test/test_encryption.py b/test/test_encryption.py index 4c9e28b585..5b1257966d 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -479,7 +479,6 @@ class TestSpec(SpecRunner): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') - @client_context.require_version_min(3, 6) # SpecRunner requires sessions. def setUpClass(cls): super(TestSpec, cls).setUpClass() diff --git a/test/test_examples.py b/test/test_examples.py index f96ca8b53c..dcf9dd2de3 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -660,7 +660,6 @@ def test_delete(self): self.assertEqual(db.inventory.count_documents({}), 0) - @client_context.require_version_min(3, 5, 11) @client_context.require_replica_set @client_context.require_no_mmap def test_change_streams(self): @@ -762,34 +761,31 @@ def test_aggregate_examples(self): ]) # End Aggregation Example 3 - # $lookup was new in 3.2. The let and pipeline options - # were added in 3.6. - if client_context.version.at_least(3, 6, 0): - # Start Aggregation Example 4 - db.air_alliances.aggregate([ - {"$lookup": { - "from": "air_airlines", - "let": {"constituents": "$airlines"}, - "pipeline": [ - {"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}} - ], - "as": "airlines" - } - }, - {"$project": { - "_id": 0, - "name": 1, - "airlines": { - "$filter": { - "input": "$airlines", - "as": "airline", - "cond": {"$eq": ["$$airline.country", "Canada"]} - } + # Start Aggregation Example 4 + db.air_alliances.aggregate([ + {"$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [ + {"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}} + ], + "as": "airlines" + } + }, + {"$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]} } } } - ]) - # End Aggregation Example 4 + } + ]) + # End Aggregation Example 4 def test_commands(self): db = self.db @@ -817,7 +813,6 @@ def test_index_management(self): ) # End Index Example 1 - @client_context.require_version_min(3, 6, 0) @client_context.require_replica_set def test_misc(self): # Marketing examples @@ -1069,7 +1064,6 @@ def callback(session): class TestCausalConsistencyExamples(IntegrationTest): - @client_context.require_version_min(3, 6, 0) @client_context.require_secondaries_count(1) @client_context.require_no_mmap def test_causal_consistency(self): diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index dd9169f284..1fd82884f1 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -116,7 +116,6 @@ def test_max_staleness_zero(self): self.assertEqual(-1, client.read_preference.max_staleness) self.assertIn("must be a positive integer", str(ctx[0])) - @client_context.require_version_min(3, 3, 6) # SERVER-8858 @client_context.require_replica_set def test_last_write_date(self): # From max-staleness-tests.rst, "Parse lastWriteDate". @@ -138,12 +137,6 @@ def test_last_write_date(self): self.assertGreater(second, first) self.assertLess(second, first + 10) - @client_context.require_version_max(3, 3) - def test_last_write_date_absent(self): - # From max-staleness-tests.rst, "Absent lastWriteDate". - client = rs_or_single_client() - sd = client._topology.select_server(writable_server_selector) - self.assertIsNone(sd.description.last_write_date) if __name__ == "__main__": unittest.main() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index c9f4e5ae76..0d925b04bf 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -860,7 +860,6 @@ def test_insert_many(self): self.assertEqual(6, count) def test_insert_many_unacknowledged(self): - # On legacy servers this uses bulk OP_INSERT. coll = self.client.pymongo_test.test coll.drop() unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) @@ -996,29 +995,6 @@ def test_bulk_write_command_error(self): self.assertEqual(event.failure['code'], 10107) self.assertTrue(event.failure['errmsg']) - @client_context.require_version_max(3, 4, 99) - def test_bulk_write_legacy_network_error(self): - self.listener.results.clear() - - # Make the delete operation run on a closed connection. - self.client.admin.command('ping') - pool = get_pool(self.client) - sock_info = pool.sockets[0] - sock_info.sock.close() - - # Test legacy unacknowledged write network error. - coll = self.client.pymongo_test.get_collection( - 'test', write_concern=WriteConcern(w=0)) - with self.assertRaises(AutoReconnect): - coll.bulk_write([InsertOne({'_id': 1})], ordered=False) - failed = self.listener.results['failed'] - self.assertEqual(1, len(failed)) - event = failed[0] - self.assertEqual(event.command_name, 'insert') - self.assertIsInstance(event.failure, dict) - self.assertEqual(event.failure['errtype'], 'AutoReconnect') - self.assertTrue(event.failure['errmsg']) - def test_write_errors(self): coll = self.client.pymongo_test.test coll.drop() diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 0b9c742a11..81a6863f5e 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -15,7 +15,7 @@ """Test the read_concern module.""" from bson.son import SON -from pymongo.errors import ConfigurationError +from pymongo.errors import OperationFailure from pymongo.read_concern import ReadConcern from test import client_context, IntegrationTest @@ -64,17 +64,13 @@ def test_read_concern_uri(self): client = rs_or_single_client(uri, connect=False) self.assertEqual(ReadConcern('majority'), client.read_concern) - @client_context.require_version_max(3, 1) def test_invalid_read_concern(self): coll = self.db.get_collection( - 'coll', read_concern=ReadConcern('majority')) - with self.assertRaisesRegex( - ConfigurationError, - 'read concern level of majority is not valid ' - 'with a max wire version of [0-3]'): + 'coll', read_concern=ReadConcern('unknown')) + # We rely on the server to validate read concern. + with self.assertRaises(OperationFailure): coll.find_one() - @client_context.require_version_min(3, 1, 9, -1) def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll @@ -93,7 +89,6 @@ def test_find_command(self): ('readConcern', {'level': 'local'})]), self.listener.results['started'][0].command) - @client_context.require_version_min(3, 1, 9, -1) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index fcd9989918..128b41d5fa 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -579,8 +579,6 @@ def test_read_preference_document_hedge(self): self.assertEqual( out, SON([("$query", {}), ("$readPreference", pref.document)])) - # Require OP_MSG so that $readPreference is visible in the command event. - @client_context.require_version_min(3, 6) def test_send_hedge(self): cases = { 'primaryPreferred': PrimaryPreferred, @@ -697,7 +695,6 @@ def test_mongos(self): self.assertEqual(last_id, results[0]["_id"]) @client_context.require_mongos - @client_context.require_version_min(3, 3, 12) def test_mongos_max_staleness(self): # Sanity check that we're sending maxStalenessSeconds coll = client_context.client.pymongo_test.get_collection( diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 49f1823d2b..b334ee9359 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -123,8 +123,6 @@ def insert_command(): ('delete_many', lambda: coll.delete_many({})), ('bulk_write', lambda: coll.bulk_write([InsertOne({})])), ('command', insert_command), - ] - ops_require_34 = [ ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])), # SERVER-46668 Delete all the documents in the collection to # workaround a hang in createIndexes. @@ -136,11 +134,9 @@ def insert_command(): ('rename', lambda: coll.rename('new')), ('drop', lambda: db.new.drop()), ] - if client_context.version > (3, 4): - ops.extend(ops_require_34) - # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. - if client_context.version[:2] != (3, 6): - ops.append(('drop_database', lambda: client.drop_database(db))) + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. + if client_context.version[:2] != (3, 6): + ops.append(('drop_database', lambda: client.drop_database(db))) for name, f in ops: # Ensure insert_many and bulk_write still raise BulkWriteError. @@ -161,8 +157,6 @@ def test_raise_write_concern_error(self): self.assertWriteOpsRaise( WriteConcern(w=client_context.w+1, wtimeout=1), WriteConcernError) - # MongoDB 3.2 introduced the stopReplProducer failpoint. - @client_context.require_version_min(3, 2) @client_context.require_secondaries_count(1) @client_context.require_test_commands def test_raise_wtimeout(self): diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 32e0c32a90..bf9f08721a 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -165,7 +165,6 @@ def tearDownClass(cls): cls.client.close() super(TestRetryableWritesMMAPv1, cls).tearDownClass() - @client_context.require_version_min(3, 5) @client_context.require_no_standalone def test_actionable_error_message(self): if client_context.storage_engine != 'mmapv1': @@ -202,15 +201,13 @@ def tearDownClass(cls): super(TestRetryableWrites, cls).tearDownClass() def setUp(self): - if (client_context.version.at_least(3, 5) and client_context.is_rs - and client_context.test_commands_enabled): + if client_context.is_rs and client_context.test_commands_enabled: self.client.admin.command(SON([ ('configureFailPoint', 'onPrimaryTransactionalWrite'), ('mode', 'alwaysOn')])) def tearDown(self): - if (client_context.version.at_least(3, 5) and client_context.is_rs - and client_context.test_commands_enabled): + if client_context.is_rs and client_context.test_commands_enabled: self.client.admin.command(SON([ ('configureFailPoint', 'onPrimaryTransactionalWrite'), ('mode', 'off')])) @@ -230,7 +227,6 @@ def test_supported_single_statement_no_retry(self): 'txnNumber', event.command, '%s sent txnNumber with %s' % (msg, event.command_name)) - @client_context.require_version_min(3, 5) @client_context.require_no_standalone def test_supported_single_statement_supported_cluster(self): for method, args, kwargs in retryable_single_statement_ops( @@ -271,8 +267,7 @@ def test_supported_single_statement_supported_cluster(self): initial_transaction_id, msg) def test_supported_single_statement_unsupported_cluster(self): - if client_context.version.at_least(3, 5) and ( - client_context.is_rs or client_context.is_mongos): + if client_context.is_rs or client_context.is_mongos: raise SkipTest('This cluster supports retryable writes') for method, args, kwargs in retryable_single_statement_ops( @@ -319,7 +314,6 @@ def test_server_selection_timeout_not_retried(self): method(*args, **kwargs) self.assertEqual(len(listener.results['started']), 0, msg) - @client_context.require_version_min(3, 5) @client_context.require_replica_set @client_context.require_test_commands def test_retry_timeout_raises_original_error(self): @@ -352,7 +346,6 @@ def raise_error(*args, **kwargs): method(*args, **kwargs) self.assertEqual(len(listener.results['started']), 1, msg) - @client_context.require_version_min(3, 5) @client_context.require_replica_set @client_context.require_test_commands def test_batch_splitting(self): @@ -388,7 +381,6 @@ def test_batch_splitting(self): } self.assertEqual(bulk_result.bulk_api_result, expected_result) - @client_context.require_version_min(3, 5) @client_context.require_replica_set @client_context.require_test_commands def test_batch_splitting_retry_fails(self): @@ -572,7 +564,6 @@ def test_pool_paused_error_is_retryable(self): # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): - @client_context.require_version_min(3, 6) @client_context.require_replica_set @client_context.require_no_mmap def test_increment_transaction_id_without_sending_command(self): diff --git a/test/test_session.py b/test/test_session.py index 608024e0b1..c8c80069a7 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -1020,14 +1020,6 @@ def test_cluster_time_no_server_support(self): self.assertIsNone(after_cluster_time) -class TestSessionsNotSupported(IntegrationTest): - @client_context.require_version_max(3, 5, 10) - def test_sessions_not_supported(self): - with self.assertRaisesRegex( - ConfigurationError, "Sessions are not supported"): - self.client.start_session() - - class TestClusterTime(IntegrationTest): def setUp(self): super(TestClusterTime, self).setUp() diff --git a/test/test_ssl.py b/test/test_ssl.py index 7c3cc4bacb..45614034e1 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -513,19 +513,14 @@ def test_mongodb_x509_auth(self): tlsCertificateKeyFile=CLIENT_PEM, event_listeners=[listener]) - if client_context.version.at_least(3, 3, 12): - # No error - auth.pymongo_test.test.find_one() - names = listener.started_command_names() - if client_context.version.at_least(4, 4, -1): - # Speculative auth skips the authenticate command. - self.assertEqual(names, ['find']) - else: - self.assertEqual(names, ['authenticate', 'find']) + # No error + auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ['find']) else: - # Should require a username - with self.assertRaises(ConfigurationError): - auth.pymongo_test.test.find_one() + self.assertEqual(names, ['authenticate', 'find']) uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( @@ -542,14 +537,8 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) - if client_context.version.at_least(3, 3, 12): - # No error - client.pymongo_test.test.find_one() - else: - # Should require a username - with self.assertRaises(ConfigurationError): - client.pymongo_test.test.find_one() - + # No error + client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( diff --git a/test/test_topology.py b/test/test_topology.py index 6881df2fcd..a309d622ab 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -522,10 +522,11 @@ def test_wire_version(self): 'setName': 'rs', 'hosts': ['a'], 'minWireVersion': 1, - 'maxWireVersion': 5}) + 'maxWireVersion': 6}) self.assertEqual(server.description.min_wire_version, 1) - self.assertEqual(server.description.max_wire_version, 5) + self.assertEqual(server.description.max_wire_version, 6) + t.select_servers(any_server_selector) # Incompatible. got_hello(t, address, { From d559b28efbb05a2a9c15a992efc17a01dacb13b0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Sep 2021 17:50:22 -0700 Subject: [PATCH 0471/2111] PYTHON-2905 Fix CSFLE after UUID decoding changes (#732) --- pymongo/encryption.py | 2 +- test/test_encryption.py | 5 +++-- test/utils_spec_runner.py | 8 ++------ 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 9ec2d22804..2c3f8eb0f7 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -208,7 +208,7 @@ def insert_data_key(self, data_key): :Returns: The _id of the inserted data key document. """ - raw_doc = RawBSONDocument(data_key) + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) data_key_id = raw_doc.get('_id') if not isinstance(data_key_id, uuid.UUID): raise TypeError('data_key _id must be a UUID') diff --git a/test/test_encryption.py b/test/test_encryption.py index 5b1257966d..67681daba8 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -419,7 +419,8 @@ def test_codec_options(self): self.assertNotEqual(encrypted_standard, encrypted_legacy) # Test that codec_options is applied during decryption. self.assertEqual( - client_encryption_legacy.decrypt(encrypted_standard), value) + client_encryption_legacy.decrypt(encrypted_standard), + Binary.from_uuid(value)) self.assertNotEqual( client_encryption.decrypt(encrypted_legacy), value) @@ -848,7 +849,7 @@ def kms_providers(): @staticmethod def fix_up_schema(json_schema): """Remove deprecated symbol/dbPointer types from json schema.""" - for key in json_schema['properties'].keys(): + for key in list(json_schema['properties']): if '_symbol_' in key or '_dbPointer_' in key: del json_schema['properties'][key] return json_schema diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index f3ab3b8952..040c507aeb 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -20,8 +20,7 @@ from collections import abc from bson import decode, encode -from bson.binary import Binary, STANDARD -from bson.codec_options import CodecOptions +from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON @@ -634,13 +633,10 @@ def end_sessions(sessions): s.end_session() -OPTS = CodecOptions(document_class=dict, uuid_representation=STANDARD) - - def decode_raw(val): """Decode RawBSONDocuments in the given container.""" if isinstance(val, (list, abc.Mapping)): - return decode(encode({'v': val}, codec_options=OPTS), OPTS)['v'] + return decode(encode({'v': val}))['v'] return val From 5125bca2dffbd4a440e6f183c647bb8dc1c6316b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 17 Sep 2021 14:20:51 -0700 Subject: [PATCH 0472/2111] PYTHON-2899 Fix "no server" tests and cleanup docs (#736) --- doc/changelog.rst | 2 +- doc/migrate-to-pymongo4.rst | 22 ++++++---------------- test/test_decimal128.py | 1 + 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8adb9ae7fc..6364985bb6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -125,7 +125,7 @@ Breaking Changes in 4.0 ignored by pip. - ``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, now defaults to ``False`` instead of ``True``. ``json_util.loads`` now -decodes datetime as naive by default. + decodes datetime as naive by default. - ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, allowing for the automatic discovery of replica sets. This means that if you diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index d17c13e753..7cebec9d83 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -141,33 +141,23 @@ instead. For example:: MongoClient.unlock is removed ............................. -Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Users of MongoDB -version 3.2 or newer can run the `fsyncUnlock command`_ directly with -:meth:`~pymongo.database.Database.command`:: +Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Run the +`fsyncUnlock command`_ directly with +:meth:`~pymongo.database.Database.command` instead. For example:: client.admin.command('fsyncUnlock') -Users of MongoDB version 2.6 and 3.0 can query the "unlock" virtual -collection:: - - client.admin["$cmd.sys.unlock"].find_one() - .. _fsyncUnlock command: https://docs.mongodb.com/manual/reference/command/fsyncUnlock/ MongoClient.is_locked is removed ................................ -Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Users of MongoDB -version 3.2 or newer can run the `currentOp command`_ directly with -:meth:`~pymongo.database.Database.command`:: +Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Run the +`currentOp command`_ directly with +:meth:`~pymongo.database.Database.command` instead. For example:: is_locked = client.admin.command('currentOp').get('fsyncLock') -Users of MongoDB version 2.6 and 3.0 can query the "inprog" virtual -collection:: - - is_locked = client.admin["$cmd.sys.inprog"].find_one().get('fsyncLock') - .. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ MongoClient.database_names is removed diff --git a/test/test_decimal128.py b/test/test_decimal128.py index 249eae8218..4ff25935dd 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -26,6 +26,7 @@ class TestDecimal128(unittest.TestCase): + @client_context.require_connection def test_round_trip(self): coll = client_context.client.pymongo_test.test coll.drop() From f1d3f9ca2f22b8e0413899225737cee7cc4335df Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Sat, 18 Sep 2021 17:28:35 -0700 Subject: [PATCH 0473/2111] PYTHON-2473 Delete Travis config file --- .travis.yml | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5f5499235b..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: python - -python: - - 3.6 - - 3.7 - - 3.8 - -services: - - mongodb - -script: PYMONGO_MUST_CONNECT=1 python setup.py test - From fcedc510e1039ab47b1c2d52f1392f298feaf81b Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 22 Sep 2021 12:18:19 -0700 Subject: [PATCH 0474/2111] PYTHON-2501 Remove iteritems from son.SON (#731) --- bson/son.py | 23 ++++++++--------------- doc/changelog.rst | 3 +++ doc/migrate-to-pymongo4.rst | 15 +++++++++++++++ pymongo/collection.py | 2 +- pymongo/helpers.py | 2 ++ 5 files changed, 29 insertions(+), 16 deletions(-) diff --git a/bson/son.py b/bson/son.py index a726961293..5a3210fcdb 100644 --- a/bson/son.py +++ b/bson/son.py @@ -77,24 +77,16 @@ def __iter__(self): def has_key(self, key): return key in self.__keys - # third level takes advantage of second level definitions - def iteritems(self): - for k in self: - yield (k, self[k]) - def iterkeys(self): return self.__iter__() # fourth level uses definitions from lower levels def itervalues(self): - for _, v in self.iteritems(): + for _, v in self.items(): yield v def values(self): - return [v for _, v in self.iteritems()] - - def items(self): - return [(key, self[key]) for key in self] + return [v for _, v in self.items()] def clear(self): self.__keys = [] @@ -122,7 +114,7 @@ def pop(self, key, *args): def popitem(self): try: - k, v = next(self.iteritems()) + k, v = next(iter(self.items())) except StopIteration: raise KeyError('container is empty') del self[k] @@ -132,8 +124,8 @@ def update(self, other=None, **kwargs): # Make progressively weaker assumptions about "other" if other is None: pass - elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups - for k, v in other.iteritems(): + elif hasattr(other, 'items'): + for k, v in other.items(): self[k] = v elif hasattr(other, 'keys'): for k in other.keys(): @@ -155,7 +147,8 @@ def __eq__(self, other): regular dictionary is order-insensitive. """ if isinstance(other, SON): - return len(self) == len(other) and self.items() == other.items() + return len(self) == len(other) and list(self.items()) == \ + list(other.items()) return self.to_dict() == other def __ne__(self, other): @@ -189,7 +182,7 @@ def __deepcopy__(self, memo): if val_id in memo: return memo.get(val_id) memo[val_id] = out - for k, v in self.iteritems(): + for k, v in self.items(): if not isinstance(v, RE_TYPE): v = copy.deepcopy(v, memo) out[k] = v diff --git a/doc/changelog.rst b/doc/changelog.rst index 6364985bb6..dcc4c881cc 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -134,6 +134,9 @@ Breaking Changes in 4.0 - The ``hint`` option is now required when using ``min`` or ``max`` queries with :meth:`~pymongo.collection.Collection.find`. - ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. +- :meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather + than a list. +- Removed :meth:`bson.son.SON.iteritems`. - :class:`~pymongo.collection.Collection` and :class:`~pymongo.database.Database` now raises an error upon evaluating as a Boolean, please use the syntax ``if collection is not None:`` or ``if database is not None:`` as diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 7cebec9d83..2906919b27 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -669,6 +669,21 @@ custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and For more information, see the :doc:`custom type example `. +``SON().items()`` now returns ``dict_items`` object. +---------------------------------------------------- +:meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather than +a list. + +``SON().iteritems()`` removed. +------------------------------ +``SON.iteritems()`` now removed. Code that looks like this:: + + for k, v in son.iteritems(): + +Can now be replaced by code that looks like:: + + for k, v in son.items(): + IsMaster is removed ------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index b30bcf22f9..7e8e299088 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1834,7 +1834,7 @@ def index_information(self, session=None): cursor = self.list_indexes(session=session) info = {} for index in cursor: - index["key"] = index["key"].items() + index["key"] = list(index["key"].items()) index = dict(index) info[index.pop("name")] = index return info diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 37d6f59b74..55d53d836e 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -72,6 +72,8 @@ def _index_list(key_or_list, direction=None): else: if isinstance(key_or_list, str): return [(key_or_list, ASCENDING)] + if isinstance(key_or_list, abc.ItemsView): + return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, " "key_or_list must be an instance of list") From c7d80802be949f951e09bde08baffc93909f0aa0 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 23 Sep 2021 14:46:44 -0700 Subject: [PATCH 0475/2111] PYTHON-1853 Empty projections should return the entire document not just the _id (#738) --- doc/changelog.rst | 7 +++++++ doc/migrate-to-pymongo4.rst | 16 ++++++++++++++++ pymongo/collection.py | 4 ++++ pymongo/cursor.py | 2 -- test/test_collection.py | 5 ++++- 5 files changed, 31 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index dcc4c881cc..b50563bc47 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -143,6 +143,13 @@ Breaking Changes in 4.0 opposed to the previous syntax which was simply ``if collection:`` or ``if database:``. You must now explicitly compare with None. +- Empty projections (eg {} or []) for + :meth:`~pymongo.collection.Collection.find`, and + :meth:`~pymongo.collection.Collection.find_one` + are passed to the server as-is rather than the previous behavior which + substituted in a projection of ``{"_id": 1}``. This means that an empty + projection will now return the entire document, not just the ``"_id"`` field. + Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 2906919b27..0ac6d393d2 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -637,6 +637,22 @@ Can be changed to this:: You must now explicitly compare with None. +Collection.find returns entire document with empty projection +............................................................. +Empty projections (eg {} or []) for +:meth:`~pymongo.collection.Collection.find`, and +:meth:`~pymongo.collection.Collection.find_one` +are passed to the server as-is rather than the previous behavior which +substituted in a projection of ``{"_id": 1}``. This means that an empty +projection will now return the entire document, not just the ``"_id"`` field. +To ensure that behavior remains consistent, code like this:: + + coll.find({}, projection={}) + +Can be changed to this:: + + coll.find({}, projection={"_id":1}) + SONManipulator is removed ------------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index 7e8e299088..8632204b81 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1246,6 +1246,10 @@ def find(self, *args, **kwargs): .. versionchanged:: 4.0 Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. .. versionchanged:: 3.11 Added the ``allow_disk_use`` option. diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 78dffc662b..c38adaf377 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -195,8 +195,6 @@ def __init__(self, collection, filter=None, projection=None, skip=0, allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: - if not projection: - projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") self.__spec = spec diff --git a/test/test_collection.py b/test/test_collection.py index a7d56bf7b6..e81776a6ac 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1707,7 +1707,10 @@ def test_find_one(self): self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) - self.assertEqual(["_id"], list(db.test.find_one(projection=[]))) + self.assertEqual(["_id"], list(db.test.find_one(projection={'_id': + True}))) + self.assertTrue("hello" in list(db.test.find_one(projection={}))) + self.assertTrue("hello" in list(db.test.find_one(projection=[]))) self.assertEqual(None, db.test.find_one({"hello": "foo"})) self.assertEqual(None, db.test.find_one(ObjectId())) From 968ee7ba9662b32106901407bf370178f2e073fe Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 23 Sep 2021 15:57:57 -0700 Subject: [PATCH 0476/2111] PYTHON-2868 Test Serverless behind a load balancer (#742) --- .evergreen/config.yml | 17 +++--- test/__init__.py | 27 +++++---- test/load_balancer/cursors.json | 10 ++++ test/load_balancer/sdam-error-handling.json | 11 +++- test/load_balancer/transactions.json | 15 +++++ test/load_balancer/wait-queue-timeouts.json | 2 +- test/test_client.py | 15 ++--- test/test_cmap.py | 8 ++- test/test_load_balancer.py | 2 +- test/test_unified_format.py | 6 +- test/transactions/unified/mongos-unpin.json | 60 +++++++++++++++++++- test/unified_format.py | 3 +- test/utils.py | 25 ++++---- test/utils_spec_runner.py | 2 +- test/versioned-api/transaction-handling.json | 2 +- 15 files changed, 152 insertions(+), 53 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 44cb584960..f106528185 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -417,9 +417,11 @@ functions: fi if [ -n "${test_serverless}" ]; then export TEST_SERVERLESS=1 - export MONGODB_URI="${MONGODB_URI}" + export MONGODB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}" export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" + export SINGLE_MONGOS_LB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}" + export MULTI_MONGOS_LB_URI="${MULTI_ATLASPROXY_SERVERLESS_URI}" fi PYTHON_BINARY=${PYTHON_BINARY} \ @@ -873,9 +875,10 @@ task_groups: script: | ${PREPARE_SHELL} set +o xtrace - SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ - SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ - SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ + LOADBALANCED=ON \ + SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ + SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ + SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ bash ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh - command: expansions.update params: @@ -887,9 +890,9 @@ task_groups: ${PREPARE_SHELL} set +o xtrace SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ - SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ - SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ - SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \ + SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ + SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ + SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \ bash ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh tasks: - ".serverless" diff --git a/test/__init__.py b/test/__init__.py index c70e854d1f..9e0e972d12 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -107,12 +107,14 @@ db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd elif TEST_SERVERLESS: - res = parse_uri(os.environ["MONGODB_URI"]) - host, port = res['nodelist'].pop(0) - additional_serverless_mongoses = res['nodelist'] + TEST_LOADBALANCER = True + res = parse_uri(SINGLE_MONGOS_LB_URI) + host, port = res['nodelist'][0] db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd TLS_OPTIONS = {'tls': True} + # Spec says serverless tests must be run with compression. + COMPRESSORS = COMPRESSORS or 'zlib' def is_server_resolvable(): @@ -236,7 +238,7 @@ def __init__(self): self.version = Version(-1) # Needs to be comparable with Version self.auth_enabled = False self.test_commands_enabled = False - self.server_parameters = None + self.server_parameters = {} self.is_mongos = False self.mongoses = [] self.is_rs = False @@ -251,7 +253,7 @@ def __init__(self): self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER self.serverless = TEST_SERVERLESS - if self.load_balancer: + if self.load_balancer or self.serverless: self.default_client_options["loadBalanced"] = True if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS @@ -402,7 +404,11 @@ def _init_client(self): self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) - if TEST_SERVERLESS: + if self.serverless: + self.server_parameters = { + 'requireApiVersion': False, + 'enableTestCommands': True, + } self.test_commands_enabled = True self.has_ipv6 = False else: @@ -422,14 +428,11 @@ def _init_client(self): self.is_mongos = (self.hello.get('msg') == 'isdbgrid') if self.is_mongos: - if self.serverless: - self.mongoses.append(self.client.address) - self.mongoses.extend(additional_serverless_mongoses) - else: + address = self.client.address + self.mongoses.append(address) + if not self.serverless: # Check for another mongos on the next port. - address = self.client.address next_address = address[0], address[1] + 1 - self.mongoses.append(address) mongos_client = self._connect( *next_address, **self.default_client_options) if mongos_client: diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json index 4e2a55fd43..e66c46c0c3 100644 --- a/test/load_balancer/cursors.json +++ b/test/load_balancer/cursors.json @@ -902,6 +902,11 @@ }, { "description": "listCollections pins the cursor to a connection", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "listCollections", @@ -1151,6 +1156,11 @@ }, { "description": "change streams pin to a connection", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "createChangeStream", diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 462fa0aac5..8760b723fd 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -127,6 +127,11 @@ "tests": [ { "description": "only connections for a specific serviceId are closed when pools are cleared", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "createFindCursor", @@ -255,7 +260,7 @@ ] }, { - "description": "errors during the initial connection hello are ignore", + "description": "errors during the initial connection hello are ignored", "runOnRequirements": [ { "minServerVersion": "4.9" @@ -274,7 +279,9 @@ }, "data": { "failCommands": [ - "isMaster" + "ismaster", + "isMaster", + "hello" ], "closeConnection": true, "appName": "lbSDAMErrorTestClient" diff --git a/test/load_balancer/transactions.json b/test/load_balancer/transactions.json index add2453848..8cf24f4ca4 100644 --- a/test/load_balancer/transactions.json +++ b/test/load_balancer/transactions.json @@ -607,6 +607,11 @@ }, { "description": "pinned connection is released after a transient non-network CRUD error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -715,6 +720,11 @@ }, { "description": "pinned connection is released after a transient network CRUD error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -831,6 +841,11 @@ }, { "description": "pinned connection is released after a transient non-network commit error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/load_balancer/wait-queue-timeouts.json b/test/load_balancer/wait-queue-timeouts.json index 61575d6706..3dc6e46cff 100644 --- a/test/load_balancer/wait-queue-timeouts.json +++ b/test/load_balancer/wait-queue-timeouts.json @@ -15,7 +15,7 @@ "useMultipleMongoses": true, "uriOptions": { "maxPoolSize": 1, - "waitQueueTimeoutMS": 5 + "waitQueueTimeoutMS": 50 }, "observeEvents": [ "connectionCheckedOutEvent", diff --git a/test/test_client.py b/test/test_client.py index d69025c8b0..cce0286703 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -889,7 +889,7 @@ def test_auth_from_uri(self): "pymongo_test", "user", "pass", roles=['userAdmin', 'readWrite']) with self.assertRaises(OperationFailure): - connected(rs_or_single_client( + connected(rs_or_single_client_noauth( "mongodb://a:b@%s:%d" % (host, port))) # No error. @@ -899,7 +899,7 @@ def test_auth_from_uri(self): # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) with self.assertRaises(OperationFailure): - connected(rs_or_single_client(uri)) + connected(rs_or_single_client_noauth(uri)) # No error. connected(rs_or_single_client_noauth( @@ -923,7 +923,7 @@ def test_username_and_password(self): client_context.create_user("admin", "ad min", "pa/ss") self.addCleanup(client_context.drop_user, "admin", "ad min") - c = rs_or_single_client(username="ad min", password="pa/ss") + c = rs_or_single_client_noauth(username="ad min", password="pa/ss") # Username and password aren't in strings that will likely be logged. self.assertNotIn("ad min", repr(c)) @@ -935,7 +935,8 @@ def test_username_and_password(self): c.server_info() with self.assertRaises(OperationFailure): - rs_or_single_client(username="ad min", password="foo").server_info() + rs_or_single_client_noauth( + username="ad min", password="foo").server_info() @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): @@ -957,11 +958,7 @@ def test_unix_socket(self): if not os.access(mongodb_socket, os.R_OK): raise SkipTest("Socket file is not accessible") - if client_context.auth_enabled: - uri = "mongodb://%s:%s@%s" % (db_user, db_pwd, encoded_socket) - else: - uri = "mongodb://%s" % encoded_socket - + uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. client = rs_or_single_client(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) diff --git a/test/test_cmap.py b/test/test_cmap.py index 38966eb66a..d08cc24a59 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -55,6 +55,7 @@ OvertCommandListener, rs_or_single_client, single_client, + single_client_noauth, TestCreator, wait_until) from test.utils_spec_runner import SpecRunnerThread @@ -334,7 +335,7 @@ def test_3_uri_connection_pool_options(self): opts = '&'.join(['%s=%s' % (k, v) for k, v in self.POOL_OPTIONS.items()]) uri = 'mongodb://%s/?%s' % (client_context.pair, opts) - client = rs_or_single_client(uri, **self.credentials) + client = rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) @@ -395,8 +396,9 @@ def mock_connect(*args, **kwargs): def test_5_check_out_fails_auth_error(self): listener = CMAPListener() - client = single_client(username="notauser", password="fail", - event_listeners=[listener]) + client = single_client_noauth( + username="notauser", password="fail", + event_listeners=[listener]) self.addCleanup(client.close) # Attempt to create a new connection. diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 5c0eadf036..62b40be504 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -38,6 +38,7 @@ class TestLB(IntegrationTest): RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True def test_connections_are_only_returned_once(self): pool = get_pool(self.client) @@ -97,7 +98,6 @@ def _test_no_gc_deadlock(self, create_resource): "failCommands": [ "find", "aggregate" ], - "errorCode": 91, "closeConnection": True, } } diff --git a/test/test_unified_format.py b/test/test_unified_format.py index a32918c4d0..74770b6f3a 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -33,7 +33,8 @@ class_name_prefix='UnifiedTestFormat', expected_failures=[ 'Client side error in command starting transaction', # PYTHON-1894 - ])) + ], + RUN_ON_SERVERLESS=False)) globals().update(generate_test_classes( @@ -43,7 +44,8 @@ bypass_test_generation_errors=True, expected_failures=[ '.*', # All tests expected to fail - ])) + ], + RUN_ON_SERVERLESS=False)) class TestMatchEvaluatorUtil(unittest.TestCase): diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json index 012d1dca85..4f7ae43794 100644 --- a/test/transactions/unified/mongos-unpin.json +++ b/test/transactions/unified/mongos-unpin.json @@ -49,7 +49,7 @@ }, "tests": [ { - "description": "unpin after TransientTransctionError error on commit", + "description": "unpin after TransientTransactionError error on commit", "runOnRequirements": [ { "serverless": "forbid" @@ -108,6 +108,24 @@ "arguments": { "session": "session0" } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" } ] }, @@ -142,7 +160,7 @@ ] }, { - "description": "unpin after TransientTransctionError error on abort", + "description": "unpin after non-transient error on abort", "runOnRequirements": [ { "serverless": "forbid" @@ -192,11 +210,29 @@ "arguments": { "session": "session0" } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" } ] }, { - "description": "unpin after non-transient error on abort", + "description": "unpin after TransientTransactionError error on abort", "operations": [ { "name": "startTransaction", @@ -241,6 +277,24 @@ "arguments": { "session": "session0" } + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "abortTransaction", + "object": "session0" } ] }, diff --git a/test/unified_format.py b/test/unified_format.py index bd818b7077..69e79e3b7e 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -279,7 +279,7 @@ def _create_entity(self, entity_spec): self._listeners[spec['id']] = listener kwargs['event_listeners'] = [listener] if spec.get('useMultipleMongoses'): - if client_context.load_balancer: + if client_context.load_balancer or client_context.serverless: kwargs['h'] = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: kwargs['h'] = client_context.mongos_seeds() @@ -658,6 +658,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): """ SCHEMA_VERSION = Version.from_string('1.5') RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True @staticmethod def should_run_on(run_on_spec): diff --git a/test/utils.py b/test/utils.py index cc5b6d8f05..27fcccccaf 100644 --- a/test/utils.py +++ b/test/utils.py @@ -47,6 +47,7 @@ writable_server_selector) from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern +from pymongo.uri_parser import parse_uri from test import (client_context, db_user, @@ -507,13 +508,10 @@ def create_tests(self): setattr(self._test_class, new_test.__name__, new_test) -def _connection_string(h, authenticate): - if h.startswith("mongodb://"): +def _connection_string(h): + if h.startswith("mongodb://") or h.startswith("mongodb+srv://"): return h - elif client_context.auth_enabled and authenticate: - return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h)) - else: - return "mongodb://%s" % (str(h),) + return "mongodb://%s" % (str(h),) def _mongo_client(host, port, authenticate=True, directConnection=None, @@ -528,10 +526,17 @@ def _mongo_client(host, port, authenticate=True, directConnection=None, client_options['directConnection'] = directConnection client_options.update(kwargs) - client = MongoClient(_connection_string(host, authenticate), port, - **client_options) - - return client + uri = _connection_string(host) + if client_context.auth_enabled and authenticate: + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if (not res['username'] and not res['password'] and + 'username' not in client_options and + 'password' not in client_options): + client_options['username'] = db_user + client_options['password'] = db_pwd + + return MongoClient(uri, port, **client_options) def single_client_noauth(h=None, p=None, **kwargs): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 040c507aeb..d552a18ca2 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -510,7 +510,7 @@ def run_scenario(self, scenario_def, test): use_multi_mongos = test['useMultipleMongoses'] host = None if use_multi_mongos: - if client_context.load_balancer: + if client_context.load_balancer or client_context.serverless: host = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: host = client_context.mongos_seeds() diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json index 5c627bb351..c00c5240ae 100644 --- a/test/versioned-api/transaction-handling.json +++ b/test/versioned-api/transaction-handling.json @@ -1,6 +1,6 @@ { "description": "Transaction handling", - "schemaVersion": "1.1", + "schemaVersion": "1.3", "runOnRequirements": [ { "minServerVersion": "4.9", From b0a26601d4caf06e543692cdb5658f3a4b41c3a3 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 23 Sep 2021 16:02:58 -0700 Subject: [PATCH 0477/2111] PYTHON-2803 Fix typos in auth tests --- test/test_auth.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_auth.py b/test/test_auth.py index 6504ce5777..d0724dce72 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -156,7 +156,7 @@ def test_gssapi_simple(self): client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command('HelloCompat.LEGACY_CMD').get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') if set_name: if not self.service_realm_required: # Without authMechanismProperties @@ -222,7 +222,7 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command('HelloCompat.LEGACY_CMD').get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') if set_name: client = MongoClient(GSSAPI_HOST, GSSAPI_PORT, @@ -270,7 +270,7 @@ def test_sasl_plain(self): client = MongoClient(uri) client.ldap.test.find_one() - set_name = client.admin.command('HelloCompat.LEGACY_CMD').get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') if set_name: client = MongoClient(SASL_HOST, SASL_PORT, From 4b447365d1a89631cb8df9ed954c909ddd4acaf0 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 23 Sep 2021 16:42:01 -0700 Subject: [PATCH 0478/2111] PYTHON-2902 Allow dnspython 2 in srv extra (#740) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 88ec93a007..88df14ee07 100755 --- a/setup.py +++ b/setup.py @@ -282,7 +282,7 @@ def build_extension(self, ext): 'snappy': ['python-snappy'], 'zstd': ['zstandard'], 'aws': ['pymongo-auth-aws<2.0.0'], - 'srv': ["dnspython>=1.16.0,<2.0.0"], + 'srv': ["dnspython>=1.16.0,<3.0.0"], } # GSSAPI extras From c38085269bf615a53594db755dec3876bdfe0f6a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 24 Sep 2021 13:54:40 -0700 Subject: [PATCH 0479/2111] PYTHON-1337 Add __slots__ to commonly used bson classes (#739) --- bson/_helpers.py | 40 ++++++++++++++++++++++ bson/dbref.py | 12 +++---- bson/int64.py | 7 ++++ bson/max_key.py | 7 ++++ bson/min_key.py | 7 ++++ bson/regex.py | 6 ++++ bson/timestamp.py | 5 +++ doc/changelog.rst | 5 +++ doc/migrate-to-pymongo4.rst | 9 +++++ test/test_bson.py | 68 +++++++++++++++++++++++++++++++++++++ 10 files changed, 158 insertions(+), 8 deletions(-) create mode 100644 bson/_helpers.py diff --git a/bson/_helpers.py b/bson/_helpers.py new file mode 100644 index 0000000000..6449705eb2 --- /dev/null +++ b/bson/_helpers.py @@ -0,0 +1,40 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setstate and getstate functions for objects with __slots__, allowing + compatibility with default pickling protocol +""" + + +def _setstate_slots(self, state): + for slot, value in state.items(): + setattr(self, slot, value) + + +def _mangle_name(name, prefix): + if name.startswith("__"): + prefix = "_"+prefix + else: + prefix = "" + return prefix + name + + +def _getstate_slots(self): + prefix = self.__class__.__name__ + ret = dict() + for name in self.__slots__: + mangled_name = _mangle_name(name, prefix) + if hasattr(self, mangled_name): + ret[mangled_name] = getattr(self, mangled_name) + return ret diff --git a/bson/dbref.py b/bson/dbref.py index 2edaf69022..24e97a6698 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -17,12 +17,14 @@ from copy import deepcopy from bson.son import SON - +from bson._helpers import _getstate_slots, _setstate_slots class DBRef(object): """A reference to a document stored in MongoDB. """ - + __slots__ = "__collection", "__id", "__database", "__kwargs" + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 @@ -81,12 +83,6 @@ def __getattr__(self, key): except KeyError: raise AttributeError(key) - # Have to provide __setstate__ to avoid - # infinite recursion since we override - # __getattr__. - def __setstate__(self, state): - self.__dict__.update(state) - def as_doc(self): """Get the SON document representation of this DBRef. diff --git a/bson/int64.py b/bson/int64.py index 4fce5ad7bd..fb9bfe9143 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -24,5 +24,12 @@ class Int64(int): :Parameters: - `value`: the numeric value to represent """ + __slots__ = () _type_marker = 18 + + def __getstate__(self): + return {} + + def __setstate__(self, state): + pass diff --git a/bson/max_key.py b/bson/max_key.py index efdf5c78b9..afd7fcb1b3 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -18,9 +18,16 @@ class MaxKey(object): """MongoDB internal MaxKey type.""" + __slots__ = () _type_marker = 127 + def __getstate__(self): + return {} + + def __setstate__(self, state): + pass + def __eq__(self, other): return isinstance(other, MaxKey) diff --git a/bson/min_key.py b/bson/min_key.py index 7d2b3a6dd9..bcb7f9e60f 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -18,9 +18,16 @@ class MinKey(object): """MongoDB internal MinKey type.""" + __slots__ = () _type_marker = 255 + def __getstate__(self): + return {} + + def __setstate__(self, state): + pass + def __eq__(self, other): return isinstance(other, MinKey) diff --git a/bson/regex.py b/bson/regex.py index 3a9042500b..5cf097f08c 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -18,6 +18,7 @@ import re from bson.son import RE_TYPE +from bson._helpers import _getstate_slots, _setstate_slots def str_flags_to_int(str_flags): @@ -40,6 +41,11 @@ def str_flags_to_int(str_flags): class Regex(object): """BSON regular expression data.""" + __slots__ = ("pattern", "flags") + + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots + _type_marker = 11 @classmethod diff --git a/bson/timestamp.py b/bson/timestamp.py index 5e497f4c8c..69c061d2a5 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -19,6 +19,7 @@ import datetime from bson.tz_util import utc +from bson._helpers import _getstate_slots, _setstate_slots UPPERBOUND = 4294967296 @@ -26,6 +27,10 @@ class Timestamp(object): """MongoDB internal timestamps used in the opLog. """ + __slots__ = ("__time", "__inc") + + __getstate__ = _getstate_slots + __setstate__ = _setstate_slots _type_marker = 17 diff --git a/doc/changelog.rst b/doc/changelog.rst index b50563bc47..d570d8377b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -143,6 +143,11 @@ Breaking Changes in 4.0 opposed to the previous syntax which was simply ``if collection:`` or ``if database:``. You must now explicitly compare with None. +- Classes :class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, + :class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, + :class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` all implement + ``__slots__`` now. This means that their attributes are fixed, and new + attributes cannot be added to them at runtime. - Empty projections (eg {} or []) for :meth:`~pymongo.collection.Collection.find`, and :meth:`~pymongo.collection.Collection.find_one` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 0ac6d393d2..0630f58162 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -840,3 +840,12 @@ The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. See :ref:`handling-uuid-data-example` for details. + +Additional BSON classes implement ``__slots__`` +............................................... + +:class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, +:class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, +:class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` now implement +``__slots__`` to reduce memory usage. This means that their attributes are fixed, and new +attributes cannot be added to the object at runtime. \ No newline at end of file diff --git a/test/test_bson.py b/test/test_bson.py index 5c0f163bb8..b91bc7f5fb 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -25,6 +25,7 @@ import sys import tempfile import uuid +import pickle from collections import abc, OrderedDict from io import BytesIO @@ -1053,6 +1054,73 @@ def test_unicode_decode_error_handler(self): self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( unicode_decode_error_handler="junk")) + def round_trip_pickle(self, obj, pickled_with_older): + pickled_with_older_obj = pickle.loads(pickled_with_older) + for protocol in range(pickle.HIGHEST_PROTOCOL + 1): + pkl = pickle.dumps(obj, protocol=protocol) + obj2 = pickle.loads(pkl) + self.assertEqual(obj, obj2) + self.assertEqual(pickled_with_older_obj, obj2) + + def test_regex_pickling(self): + reg = Regex(".?") + pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' + b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' + b'\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag' + b's\x94K\x00ub.') + self.round_trip_pickle(reg, pickled_with_3) + + def test_timestamp_pickling(self): + ts = Timestamp(0, 1) + pickled_with_3 = (b'\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c' + b'\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)' + b'\x81\x94}\x94(' + b'\x8c\x10_Timestamp__time\x94K\x00\x8c' + b'\x0f_Timestamp__inc\x94K\x01ub.') + self.round_trip_pickle(ts, pickled_with_3) + + def test_dbref_pickling(self): + dbr = DBRef("foo", 5) + pickled_with_3 = (b'\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n' + b'bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}' + b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94' + b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database' + b'\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub.') + self.round_trip_pickle(dbr, pickled_with_3) + + dbr = DBRef("foo", 5, database='db', kwargs1=None) + pickled_with_3 = (b'\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c' + b'\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}' + b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94' + b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database' + b'\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94' + b'\x8c\x07kwargs1\x94Nsub.') + + self.round_trip_pickle(dbr, pickled_with_3) + + def test_minkey_pickling(self): + mink = MinKey() + pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' + b'\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)' + b'\x81\x94.') + + self.round_trip_pickle(mink, pickled_with_3) + + def test_maxkey_pickling(self): + maxk = MaxKey() + pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' + b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' + b'\x81\x94.') + + self.round_trip_pickle(maxk, pickled_with_3) + + def test_int64_pickling(self): + i64 = Int64(9) + pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n' + b'bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94' + b'\x81\x94.') + self.round_trip_pickle(i64, pickled_with_3) + if __name__ == "__main__": unittest.main() From 111552281d9ff87e80278b12ea861d83813b8c0b Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 24 Sep 2021 14:16:29 -0700 Subject: [PATCH 0480/2111] PYTHON-2921 Fix eventlet detection with Python 3.10 (#744) --- test/utils.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/test/utils.py b/test/utils.py index 27fcccccaf..5b6f9fd264 100644 --- a/test/utils.py +++ b/test/utils.py @@ -918,13 +918,9 @@ def gevent_monkey_patched(): def eventlet_monkey_patched(): """Check if eventlet's monkey patching is active.""" - try: - import threading - import eventlet - return (threading.current_thread.__module__ == - 'eventlet.green.threading') - except ImportError: - return False + import threading + return (threading.current_thread.__module__ == + 'eventlet.green.threading') def is_greenthread_patched(): From a80169d1fa49734290ae73f113edf0c7bb589877 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 24 Sep 2021 15:37:24 -0700 Subject: [PATCH 0481/2111] PYTHON-2463 Do not allow a MongoClient to be reused after it is closed (#737) --- doc/changelog.rst | 3 + doc/migrate-to-pymongo4.rst | 6 ++ pymongo/mongo_client.py | 9 ++- pymongo/topology.py | 14 ++++- test/test_client.py | 98 +++++++++++++----------------- test/test_load_balancer.py | 4 -- test/test_mongos_load_balancing.py | 15 ----- test/test_pooling.py | 9 --- test/test_raw_bson.py | 2 + test/test_replica_set_reconfig.py | 21 +++---- test/test_server_selection.py | 2 +- test/test_threads.py | 34 ----------- test/unified_format.py | 2 +- 13 files changed, 79 insertions(+), 140 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index d570d8377b..b203140127 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -143,6 +143,9 @@ Breaking Changes in 4.0 opposed to the previous syntax which was simply ``if collection:`` or ``if database:``. You must now explicitly compare with None. +- :class:`~pymongo.mongo_client.MongoClient` cannot execute any operations + after being closed. The previous behavior would simply reconnect. However, + now you must create a new instance. - Classes :class:`~bson.int64.Int64`, :class:`~bson.min_key.MinKey`, :class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, :class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` all implement diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 0630f58162..eea17e8bcc 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -180,6 +180,12 @@ can be changed to this:: now defaults to ``False`` instead of ``True``. ``json_util.loads`` now decodes datetime as naive by default. +MongoClient cannot execute operations after ``close()`` +....................................................... + +:class:`~pymongo.mongo_client.MongoClient` cannot execute any operations +after being closed. The previous behavior would simply reconnect. However, +now you must create a new instance. Database -------- diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index db56187b77..f105b1023a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -704,7 +704,6 @@ def __init__( self.__kill_cursors_queue = [] self._event_listeners = options.pool_options.event_listeners - super(MongoClient, self).__init__(options.codec_options, options.read_preference, options.write_concern, @@ -1127,10 +1126,10 @@ def close(self): sending one or more endSessions commands. Close all sockets in the connection pools and stop the monitor threads. - If this instance is used again it will be automatically re-opened and - the threads restarted unless auto encryption is enabled. A client - enabled with auto encryption cannot be used again after being closed; - any attempt will raise :exc:`~.errors.InvalidOperation`. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. .. versionchanged:: 3.6 End all server sessions created by this client. diff --git a/pymongo/topology.py b/pymongo/topology.py index 8a2818b5c7..9ca3029bf0 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -33,7 +33,8 @@ OperationFailure, PyMongoError, ServerSelectionTimeoutError, - WriteError) + WriteError, + InvalidOperation) from pymongo.hello import Hello from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions @@ -112,6 +113,7 @@ def __init__(self, topology_settings): # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False + self._closed = False self._lock = threading.Lock() self._condition = self._settings.condition_class(self._lock) self._servers = {} @@ -461,7 +463,9 @@ def update_pool(self, all_credentials): raise def close(self): - """Clear pools and terminate monitors. Topology reopens on demand.""" + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. """ with self._lock: for server in self._servers.values(): server.close() @@ -477,6 +481,7 @@ def close(self): self._srv_monitor.close() self._opened = False + self._closed = True # Publish only after releasing the lock. if self._publish_tp: @@ -550,6 +555,11 @@ def _ensure_opened(self): Hold the lock when calling this. """ + if self._closed: + raise InvalidOperation("Once a MongoClient is closed, " + "all operations will fail. Please create " + "a new client object if you wish to " + "reconnect.") if not self._opened: self._opened = True self._update_servers() diff --git a/test/test_client.py b/test/test_client.py index cce0286703..993c58267c 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -50,7 +50,8 @@ NetworkTimeout, OperationFailure, ServerSelectionTimeoutError, - WriteConcernError) + WriteConcernError, + InvalidOperation) from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient from pymongo.monitoring import (ServerHeartbeatListener, @@ -772,28 +773,22 @@ def test_drop_database(self): self.assertNotIn("pymongo_test2", dbs) def test_close(self): - coll = self.client.pymongo_test.bar - - self.client.close() - self.client.close() - - coll.count_documents({}) - - self.client.close() - self.client.close() - - coll.count_documents({}) + test_client = rs_or_single_client() + coll = test_client.pymongo_test.bar + test_client.close() + self.assertRaises(InvalidOperation, coll.count_documents, {}) def test_close_kills_cursors(self): if sys.platform.startswith('java'): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") + test_client = rs_or_single_client() # Kill any cursors possibly queued up by previous tests. gc.collect() - self.client._process_periodic_tasks() + test_client._process_periodic_tasks() # Add some test data. - coll = self.client.pymongo_test.test_close_kills_cursors + coll = test_client.pymongo_test.test_close_kills_cursors docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) @@ -811,13 +806,13 @@ def test_close_kills_cursors(self): gc.collect() # Close the client and ensure the topology is closed. - self.assertTrue(self.client._topology._opened) - self.client.close() - self.assertFalse(self.client._topology._opened) - + self.assertTrue(test_client._topology._opened) + test_client.close() + self.assertFalse(test_client._topology._opened) + test_client = rs_or_single_client() # The killCursors task should not need to re-open the topology. - self.client._process_periodic_tasks() - self.assertFalse(self.client._topology._opened) + test_client._process_periodic_tasks() + self.assertTrue(test_client._topology._opened) def test_close_stops_kill_cursors_thread(self): client = rs_client() @@ -828,12 +823,9 @@ def test_close_stops_kill_cursors_thread(self): client.close() self.assertTrue(client._kill_cursors_executor._stopped) - # Reusing the closed client should restart the thread. - client.admin.command('ping') - self.assertFalse(client._kill_cursors_executor._stopped) - - # Again, closing the client should stop the thread. - client.close() + # Reusing the closed client should raise an InvalidOperation error. + self.assertRaises(InvalidOperation, client.admin.command, 'ping') + # Thread is still stopped. self.assertTrue(client._kill_cursors_executor._stopped) def test_uri_connect_option(self): @@ -1128,12 +1120,13 @@ def test_contextlib(self): with contextlib.closing(client): self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) - self.assertEqual(1, len(get_pool(client).sockets)) - self.assertEqual(0, len(get_pool(client).sockets)) - + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() + client = rs_or_single_client() with client as client: self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) - self.assertEqual(0, len(get_pool(client).sockets)) + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() def test_interrupt_signal(self): if sys.platform.startswith('java'): @@ -1787,35 +1780,26 @@ def test_max_bson_size(self): class TestMongoClientFailover(MockClientTest): def test_discover_primary(self): - # Disable background refresh. - with client_knobs(heartbeat_frequency=999999): - c = MockClient( - standalones=[], - members=['a:1', 'b:2', 'c:3'], - mongoses=[], - host='b:2', # Pass a secondary. - replicaSet='rs') - self.addCleanup(c.close) - - wait_until(lambda: len(c.nodes) == 3, 'connect') - self.assertEqual(c.address, ('a', 1)) - - # Fail over. - c.kill_host('a:1') - c.mock_primary = 'b:2' - - c.close() - self.assertEqual(0, len(c.nodes)) - - t = c._get_topology() - t.select_servers(writable_server_selector) # Reconnect. - self.assertEqual(c.address, ('b', 2)) + c = MockClient( + standalones=[], + members=['a:1', 'b:2', 'c:3'], + mongoses=[], + host='b:2', # Pass a secondary. + replicaSet='rs', + heartbeatFrequencyMS=500) + self.addCleanup(c.close) - # a:1 not longer in nodes. - self.assertLess(len(c.nodes), 3) + wait_until(lambda: len(c.nodes) == 3, 'connect') - # c:3 is rediscovered. - t.select_server_by_address(('c', 3)) + self.assertEqual(c.address, ('a', 1)) + # Fail over. + c.kill_host('a:1') + c.mock_primary = 'b:2' + wait_until(lambda: c.address == ('b', 2), "wait for server " + "address to be " + "updated") + # a:1 not longer in nodes. + self.assertLess(len(c.nodes), 3) def test_reconnect(self): # Verify the node list isn't forgotten during a network failure. diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 62b40be504..da77734e92 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -62,10 +62,6 @@ def test_unpin_committed_transaction(self): self.assertEqual(pool.active_sockets, 1) # Still pinned. self.assertEqual(pool.active_sockets, 0) # Unpinned. - def test_client_can_be_reopened(self): - self.client.close() - self.db.test.find_one({}) - @client_context.require_failCommand_fail_point def test_cursor_gc(self): def create_resource(coll): diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index e67b1186e3..575bc458c5 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -92,21 +92,6 @@ def test_lazy_connect(self): do_simple_op(client, nthreads) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') - def test_reconnect(self): - nthreads = 10 - client = connected(self.mock_client()) - - # connected() ensures we've contacted at least one mongos. Wait for - # all of them. - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') - - # Trigger reconnect. - client.close() - do_simple_op(client, nthreads) - - wait_until(lambda: len(client.nodes) == 3, - 'reconnect to all mongoses') - def test_failover(self): nthreads = 10 client = connected(self.mock_client(localThresholdMS=0.001)) diff --git a/test/test_pooling.py b/test/test_pooling.py index becbacc1ef..266e080ca8 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -102,12 +102,6 @@ def run_mongo_thread(self): raise AssertionError("Should have raised DuplicateKeyError") -class Disconnect(MongoThread): - def run_mongo_thread(self): - for _ in range(N): - self.client.close() - - class SocketGetter(MongoThread): """Utility for TestPooling. @@ -198,9 +192,6 @@ def test_max_pool_size_validation(self): def test_no_disconnect(self): run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) - def test_disconnect(self): - run_cases(self.c, [InsertOneAndFind, Disconnect, Unique]) - def test_pool_reuses_open_socket(self): # Test Pool's _check_closed() method doesn't close a healthy socket. cx_pool = self.create_pool(max_pool_size=10) diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index 7fb53c6da9..7e1bf6f837 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -25,6 +25,7 @@ from bson.raw_bson import RawBSONDocument, DEFAULT_RAW_BSON_OPTIONS from bson.son import SON from test import client_context, unittest +from test.utils import rs_or_single_client from test.test_client import IntegrationTest @@ -43,6 +44,7 @@ class TestRawBSONDocument(IntegrationTest): @classmethod def setUpClass(cls): super(TestRawBSONDocument, cls).setUpClass() + client_context.client = rs_or_single_client() cls.client = client_context.client def tearDown(self): diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index a25d7af169..f19a32ea4e 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -18,7 +18,7 @@ sys.path[0:0] = [""] -from pymongo.errors import ConnectionFailure, AutoReconnect +from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError from pymongo import ReadPreference from test import unittest, client_context, client_knobs, MockClientTest from test.pymongo_mocks import MockClient @@ -42,13 +42,10 @@ def test_client(self): mongoses=[], host='a:1,b:2,c:3', replicaSet='rs', - serverSelectionTimeoutMS=100) + serverSelectionTimeoutMS=100, + connect=False) self.addCleanup(c.close) - # MongoClient connects to primary by default. - wait_until(lambda: c.address is not None, 'connect to primary') - self.assertEqual(c.address, ('a', 1)) - # C is brought up as a standalone. c.mock_members.remove('c:3') c.mock_standalones.append('c:3') @@ -57,14 +54,15 @@ def test_client(self): c.kill_host('a:1') c.kill_host('b:2') - # Force reconnect. - c.close() - - with self.assertRaises(AutoReconnect): + with self.assertRaises(ServerSelectionTimeoutError): c.db.command('ping') - self.assertEqual(c.address, None) + # Client can still discover the primary node + c.revive_host('a:1') + wait_until(lambda: c.address is not None, 'connect to primary') + self.assertEqual(c.address, ('a', 1)) + def test_replica_set_client(self): c = MockClient( standalones=[], @@ -158,7 +156,6 @@ def test_client(self): c.mock_members.append('c:3') c.mock_hello_hosts.append('c:3') - c.close() c.db.command('ping') self.assertEqual(c.address, ('a', 1)) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 1e4165246d..46fce3b13a 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -111,8 +111,8 @@ def test_selector_called(self): # Client setup. mongo_client = rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection - self.addCleanup(mongo_client.drop_database, 'testdb') self.addCleanup(mongo_client.close) + self.addCleanup(mongo_client.drop_database, 'testdb') # Do N operations and test selector is called at least N times. test_collection.insert_one({'age': 20, 'name': 'John'}) diff --git a/test/test_threads.py b/test/test_threads.py index 854d3cab05..a3cde207a2 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -111,21 +111,6 @@ def run(self): assert error -class Disconnect(threading.Thread): - - def __init__(self, client, n): - threading.Thread.__init__(self) - self.client = client - self.n = n - self.passed = False - - def run(self): - for _ in range(self.n): - self.client.close() - - self.passed = True - - class TestThreads(IntegrationTest): def setUp(self): self.db = self.client.pymongo_test @@ -180,25 +165,6 @@ def test_safe_update(self): error.join() okay.join() - def test_client_disconnect(self): - db = rs_or_single_client(serverSelectionTimeoutMS=30000).pymongo_test - db.drop_collection("test") - db.test.insert_many([{"x": i} for i in range(1000)]) - - # Start 10 threads that execute a query, and 10 threads that call - # client.close() 10 times in a row. - threads = [SaveAndFind(db.test) for _ in range(10)] - threads.extend(Disconnect(db.client, 10) for _ in range(10)) - - for t in threads: - t.start() - - for t in threads: - t.join(300) - - for t in threads: - self.assertTrue(t.passed) - if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 69e79e3b7e..0a2f0b9965 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -975,9 +975,9 @@ def _testOperation_targetedFailPoint(self, spec): "session %s" % (spec['session'],)) client = single_client('%s:%s' % session._pinned_address) + self.addCleanup(client.close) self.__set_fail_point( client=client, command_args=spec['failPoint']) - self.addCleanup(client.close) def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec['session']] From 7467aa634d1ac36b52cc95757f1cc154d318a527 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 24 Sep 2021 15:47:37 -0700 Subject: [PATCH 0482/2111] PYTHON-2915 Fix bug when starting a transaction with a large bulk write (#743) --- pymongo/bulk.py | 30 ++++++++++++---------- pymongo/message.py | 52 +++++++++++++++++++-------------------- test/test_transactions.py | 30 ++++++++++++++++++++++ 3 files changed, 72 insertions(+), 40 deletions(-) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 0f57309287..829b482c95 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -267,17 +267,18 @@ def _execute_command(self, generator, write_concern, session, # sock_info.write_command. sock_info.validate_session(client, session) while run: - cmd = SON([(_COMMANDS[run.op_type], self.collection.name), - ('ordered', self.ordered)]) - if not write_concern.is_server_default: - cmd['writeConcern'] = write_concern.document - if self.bypass_doc_val: - cmd['bypassDocumentValidation'] = True + cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd, sock_info, op_id, listeners, session, + db_name, cmd_name, sock_info, op_id, listeners, session, run.op_type, self.collection.codec_options) while run.idx_offset < len(run.ops): + cmd = SON([(cmd_name, self.collection.name), + ('ordered', self.ordered)]) + if not write_concern.is_server_default: + cmd['writeConcern'] = write_concern.document + if self.bypass_doc_val: + cmd['bypassDocumentValidation'] = True if session: # Start a new retryable write unless one was already # started for this command. @@ -287,9 +288,10 @@ def _execute_command(self, generator, write_concern, session, session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info) sock_info.send_cluster_time(cmd, session, client) + sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible in one command. - result, to_send = bwc.execute(ops, client) + result, to_send = bwc.execute(cmd, ops, client) # Retryable writeConcernErrors halt the execution of this run. wce = result.get('writeConcernError', {}) @@ -359,17 +361,19 @@ def execute_op_msg_no_results(self, sock_info, generator): run = self.current_run while run: - cmd = SON([(_COMMANDS[run.op_type], self.collection.name), - ('ordered', False), - ('writeConcern', {'w': 0})]) + cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd, sock_info, op_id, listeners, None, + db_name, cmd_name, sock_info, op_id, listeners, None, run.op_type, self.collection.codec_options) while run.idx_offset < len(run.ops): + cmd = SON([(cmd_name, self.collection.name), + ('ordered', False), + ('writeConcern', {'w': 0})]) + sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible. - to_send = bwc.execute_unack(ops, client) + to_send = bwc.execute_unack(cmd, ops, client) run.idx_offset += len(to_send) self.current_run = run = next(generator, None) diff --git a/pymongo/message.py b/pymongo/message.py index 8a496d5b1b..86a83f152e 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -704,50 +704,48 @@ def _get_more(collection_name, num_to_return, cursor_id, ctx=None): class _BulkWriteContext(object): """A wrapper around SocketInfo for use with write splitting functions.""" - __slots__ = ('db_name', 'command', 'sock_info', 'op_id', + __slots__ = ('db_name', 'sock_info', 'op_id', 'name', 'field', 'publish', 'start_time', 'listeners', 'session', 'compress', 'op_type', 'codec') - def __init__(self, database_name, command, sock_info, operation_id, + def __init__(self, database_name, cmd_name, sock_info, operation_id, listeners, session, op_type, codec): self.db_name = database_name - self.command = command self.sock_info = sock_info self.op_id = operation_id self.listeners = listeners self.publish = listeners.enabled_for_commands - self.name = next(iter(command)) + self.name = cmd_name self.field = _FIELD_MAP[self.name] self.start_time = datetime.datetime.now() if self.publish else None self.session = session self.compress = True if sock_info.compression_context else False self.op_type = op_type self.codec = codec - sock_info.add_server_api(command) - def _batch_command(self, docs): + def _batch_command(self, cmd, docs): namespace = self.db_name + '.$cmd' request_id, msg, to_send = _do_batched_op_msg( - namespace, self.op_type, self.command, docs, self.check_keys, + namespace, self.op_type, cmd, docs, self.check_keys, self.codec, self) if not to_send: raise InvalidOperation("cannot do an empty bulk write") return request_id, msg, to_send - def execute(self, docs, client): - request_id, msg, to_send = self._batch_command(docs) - result = self.write_command(request_id, msg, to_send) + def execute(self, cmd, docs, client): + request_id, msg, to_send = self._batch_command(cmd, docs) + result = self.write_command(cmd, request_id, msg, to_send) client._process_response(result, self.session) return result, to_send - def execute_unack(self, docs, client): - request_id, msg, to_send = self._batch_command(docs) + def execute_unack(self, cmd, docs, client): + request_id, msg, to_send = self._batch_command(cmd, docs) # Though this isn't strictly a "legacy" write, the helper # handles publishing commands and sending our message # without receiving a result. Send 0 for max_doc_size # to disable size checking. Size checking is handled while # the documents are encoded to BSON. - self.unack_write(request_id, msg, 0, to_send) + self.unack_write(cmd, request_id, msg, 0, to_send) return to_send @property @@ -778,12 +776,12 @@ def max_split_size(self): """The maximum size of a BSON command before batch splitting.""" return self.max_bson_size - def unack_write(self, request_id, msg, max_doc_size, docs): + def unack_write(self, cmd, request_id, msg, max_doc_size, docs): """A proxy for SocketInfo.unack_write that handles event publishing. """ if self.publish: duration = datetime.datetime.now() - self.start_time - cmd = self._start(request_id, docs) + cmd = self._start(cmd, request_id, docs) start = datetime.datetime.now() try: result = self.sock_info.unack_write(msg, max_doc_size) @@ -811,12 +809,12 @@ def unack_write(self, request_id, msg, max_doc_size, docs): self.start_time = datetime.datetime.now() return result - def write_command(self, request_id, msg, docs): + def write_command(self, cmd, request_id, msg, docs): """A proxy for SocketInfo.write_command that handles event publishing. """ if self.publish: duration = datetime.datetime.now() - self.start_time - self._start(request_id, docs) + self._start(cmd, request_id, docs) start = datetime.datetime.now() try: reply = self.sock_info.write_command(request_id, msg) @@ -836,9 +834,8 @@ def write_command(self, request_id, msg, docs): self.start_time = datetime.datetime.now() return reply - def _start(self, request_id, docs): + def _start(self, cmd, request_id, docs): """Publish a CommandStartedEvent.""" - cmd = self.command.copy() cmd[self.field] = docs self.listeners.publish_command_start( cmd, self.db_name, @@ -871,10 +868,10 @@ def _fail(self, request_id, failure, duration): class _EncryptedBulkWriteContext(_BulkWriteContext): __slots__ = () - def _batch_command(self, docs): + def _batch_command(self, cmd, docs): namespace = self.db_name + '.$cmd' msg, to_send = _encode_batched_write_command( - namespace, self.op_type, self.command, docs, self.check_keys, + namespace, self.op_type, cmd, docs, self.check_keys, self.codec, self) if not to_send: raise InvalidOperation("cannot do an empty bulk write") @@ -885,17 +882,18 @@ def _batch_command(self, docs): DEFAULT_RAW_BSON_OPTIONS) return cmd, to_send - def execute(self, docs, client): - cmd, to_send = self._batch_command(docs) + def execute(self, cmd, docs, client): + batched_cmd, to_send = self._batch_command(cmd, docs) result = self.sock_info.command( - self.db_name, cmd, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + self.db_name, batched_cmd, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, session=self.session, client=client) return result, to_send - def execute_unack(self, docs, client): - cmd, to_send = self._batch_command(docs) + def execute_unack(self, cmd, docs, client): + batched_cmd, to_send = self._batch_command(cmd, docs) self.sock_info.command( - self.db_name, cmd, write_concern=WriteConcern(w=0), + self.db_name, batched_cmd, write_concern=WriteConcern(w=0), session=self.session, client=client) return to_send diff --git a/test/test_transactions.py b/test/test_transactions.py index 33a9186e83..de3066303f 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -286,6 +286,36 @@ def gridfs_open_upload_stream(*args, **kwargs): ): op(*args, session=s) + # Require 4.2+ for large (16MB+) transactions. + @client_context.require_version_min(4, 2) + @client_context.require_transactions + def test_transaction_starts_with_batched_write(self): + # Start a transaction with a batch of operations that needs to be + # split. + listener = OvertCommandListener() + client = rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + coll.delete_many({}) + listener.reset() + self.addCleanup(client.close) + self.addCleanup(coll.drop) + ops = [InsertOne({'a': '1'*(10*1024*1024)}) for _ in range(10)] + with client.start_session() as session: + with session.start_transaction(): + coll.bulk_write(ops, session=session) + # Assert commands were constructed properly. + self.assertEqual(['insert', 'insert', 'insert', 'commitTransaction'], + listener.started_command_names()) + first_cmd = listener.results['started'][0].command + self.assertTrue(first_cmd['startTransaction']) + lsid = first_cmd['lsid'] + txn_number = first_cmd['txnNumber'] + for event in listener.results['started'][1:]: + self.assertNotIn('startTransaction', event.command) + self.assertEqual(lsid, event.command['lsid']) + self.assertEqual(txn_number, event.command['txnNumber']) + self.assertEqual(10, coll.count_documents({})) + class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" From 9cb64775c97a800e2c3a7cf54cb732f95b37332b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 29 Sep 2021 16:36:50 -0700 Subject: [PATCH 0483/2111] PYTHON-2926 Skip failing aggregate $out test on 5.1 --- test/crud/unified/aggregate-out-readConcern.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/crud/unified/aggregate-out-readConcern.json b/test/crud/unified/aggregate-out-readConcern.json index e293457c1c..ca1cff2069 100644 --- a/test/crud/unified/aggregate-out-readConcern.json +++ b/test/crud/unified/aggregate-out-readConcern.json @@ -262,6 +262,11 @@ }, { "description": "readConcern available with out stage", + "runOnRequirements": [ + { + "maxServerVersion": "5.0.99" + } + ], "operations": [ { "object": "collection_readConcern_available", From 6e7b652d86f74ded569b1b91f058592fcbbfaf7d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 5 Oct 2021 14:00:39 -0700 Subject: [PATCH 0484/2111] PYTHON-2914 MongoClient should raise an error when given multiple URIs (#747) --- doc/changelog.rst | 2 ++ doc/migrate-to-pymongo4.rst | 6 ++++++ pymongo/mongo_client.py | 16 +++++++++------- test/test_client.py | 5 +++++ 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index b203140127..c4c4f35fed 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -157,6 +157,8 @@ Breaking Changes in 4.0 are passed to the server as-is rather than the previous behavior which substituted in a projection of ``{"_id": 1}``. This means that an empty projection will now return the entire document, not just the ``"_id"`` field. +- ``MongoClient()`` now raises a :exc:`~pymongo.errors.ConfigurationError` + when more than one URI is passed into the ``hosts`` argument. Notable improvements diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index eea17e8bcc..7430ea50ad 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -187,6 +187,12 @@ MongoClient cannot execute operations after ``close()`` after being closed. The previous behavior would simply reconnect. However, now you must create a new instance. +MongoClient raises exception when given more than one URI +......................................................... + +``MongoClient()`` now raises a :exc:`~pymongo.errors.ConfigurationError` +when more than one URI is passed into the ``hosts`` argument. + Database -------- diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index f105b1023a..c0bcb9575b 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -109,11 +109,9 @@ def __init__( The `host` parameter can be a full `mongodb URI `_, in addition to - a simple hostname. It can also be a list of hostnames or - URIs. Any port specified in the host string(s) will override - the `port` parameter. If multiple mongodb URIs containing - database or auth information are passed, the last database, - username, and password present will be used. For username and + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and passwords reserved characters like ':', '/', '+' and '@' must be percent encoded following RFC 2396:: @@ -179,8 +177,9 @@ def __init__( :Parameters: - `host` (optional): hostname or IP address or Unix domain socket path of a single mongod or mongos instance to connect to, or a - mongodb URI, or a list of hostnames / mongodb URIs. If `host` is - an IPv6 literal it must be enclosed in '[' and ']' characters + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters following the RFC2732 URL syntax (e.g. '[::1]' for localhost). Multihomed and round robin DNS addresses are **not** supported. - `port` (optional): port number on which to connect @@ -645,6 +644,9 @@ def __init__( dbase = None opts = common._CaseInsensitiveDictionary() fqdn = None + if len([h for h in host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB " + "URIs") for entity in host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, diff --git a/test/test_client.py b/test/test_client.py index 993c58267c..4398fa08be 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -466,6 +466,11 @@ def test_uri_security_options(self): class TestClient(IntegrationTest): + def test_multiple_uris(self): + with self.assertRaises(ConfigurationError): + MongoClient(host=['mongodb+srv://cluster-a.abc12.mongodb.net', + 'mongodb+srv://cluster-b.abc12.mongodb.net', + 'mongodb+srv://cluster-c.abc12.mongodb.net']) def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): From dd9206a1ae93247558824f6c1daea2b2b25d8e2e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 6 Oct 2021 11:56:44 -0700 Subject: [PATCH 0485/2111] PYTHON-2928 Fix mod_wsgi test failures (#750) --- test/mod_wsgi_test/mod_wsgi_test.wsgi | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/test/mod_wsgi_test/mod_wsgi_test.wsgi b/test/mod_wsgi_test/mod_wsgi_test.wsgi index 1fa4e74350..bfd1c4bab0 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.wsgi +++ b/test/mod_wsgi_test/mod_wsgi_test.wsgi @@ -29,19 +29,13 @@ from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient client = MongoClient() - -# If the deployment is a replica set, connect to the whole set. -replica_set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') -if replica_set_name: - client = MongoClient(replicaSet=replica_set_name) - collection = client.test.test - ndocs = 20 - collection.drop() collection.insert_many([{'i': i} for i in range(ndocs)]) -client.close() # Discard main thread's request socket. +client.close() # Discard main thread's request socket. +client = MongoClient() +collection = client.test.test try: from mod_wsgi import version as mod_wsgi_version From 049daf9cf674bacdb5c991dcd86b39ae10777189 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Oct 2021 16:35:46 -0500 Subject: [PATCH 0486/2111] PYTHON-2935 Fix test_encryption.TestClientSimple.test_use_after_close (#751) --- pymongo/topology.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pymongo/topology.py b/pymongo/topology.py index 9ca3029bf0..4083d7f331 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -556,10 +556,8 @@ def _ensure_opened(self): Hold the lock when calling this. """ if self._closed: - raise InvalidOperation("Once a MongoClient is closed, " - "all operations will fail. Please create " - "a new client object if you wish to " - "reconnect.") + raise InvalidOperation("Cannot use MongoClient after close") + if not self._opened: self._opened = True self._update_servers() From 6bb8a1f411cb7e4c58af910ed5aca51aad9e490b Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 8 Oct 2021 11:23:21 -0700 Subject: [PATCH 0487/2111] PYTHON-2823 Allow custom service names with srvServiceName URI option (#749) --- doc/changelog.rst | 4 +++- pymongo/common.py | 4 ++++ pymongo/mongo_client.py | 15 +++++++++++- pymongo/monitor.py | 6 ++++- pymongo/settings.py | 2 ++ pymongo/srv_resolver.py | 8 ++++--- pymongo/uri_parser.py | 12 ++++++++-- .../loadBalanced-no-results.json | 7 ++++++ .../replica-set/srv-service-name.json | 16 +++++++++++++ test/test_client.py | 21 ++++++++++++++++ test/uri_options/srv-service-name-option.json | 24 +++++++++++++++++++ 11 files changed, 111 insertions(+), 8 deletions(-) create mode 100644 test/srv_seedlist/load-balanced/loadBalanced-no-results.json create mode 100644 test/srv_seedlist/replica-set/srv-service-name.json create mode 100644 test/uri_options/srv-service-name-option.json diff --git a/doc/changelog.rst b/doc/changelog.rst index c4c4f35fed..8dfe4f7503 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -134,6 +134,9 @@ Breaking Changes in 4.0 - The ``hint`` option is now required when using ``min`` or ``max`` queries with :meth:`~pymongo.collection.Collection.find`. - ``name`` is now a required argument for the :class:`pymongo.driver_info.DriverInfo` class. +- When providing a "mongodb+srv://" URI to + :class:`~pymongo.mongo_client.MongoClient` constructor you can now use the + ``srvServiceName`` URI option to specify your own SRV service name. - :meth:`~bson.son.SON.items` now returns a ``dict_items`` object rather than a list. - Removed :meth:`bson.son.SON.iteritems`. @@ -160,7 +163,6 @@ Breaking Changes in 4.0 - ``MongoClient()`` now raises a :exc:`~pymongo.errors.ConfigurationError` when more than one URI is passed into the ``hosts`` argument. - Notable improvements .................... diff --git a/pymongo/common.py b/pymongo/common.py index f1a5389b35..8ccdc21b63 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -113,6 +113,9 @@ # From the driver sessions spec. _MAX_END_SESSIONS = 10000 +# Default value for srvServiceName +SRV_SERVICE_NAME = "mongodb" + def partition_node(node): """Split a host:port string into (host, int(port)) pair.""" @@ -626,6 +629,7 @@ def validate_tzinfo(dummy, value): 'w': validate_non_negative_int_or_basestring, 'wtimeoutms': validate_non_negative_integer, 'zlibcompressionlevel': validate_zlib_compression_level, + 'srvservicename': validate_string } # Dictionary where keys are the names of URI options specific to pymongo, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index c0bcb9575b..d1794938f7 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -329,6 +329,11 @@ def __init__( a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include 'strict', 'replace', and 'ignore'. Defaults to 'strict'. + - ``srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + | **Write Concern options:** | (Only set if passed. No default values.) @@ -499,6 +504,7 @@ def __init__( arguments. The default for `uuidRepresentation` was changed from ``pythonLegacy`` to ``unspecified``. + Added the ``srvServiceName`` URI and keyword argument. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. @@ -644,6 +650,8 @@ def __init__( dbase = None opts = common._CaseInsensitiveDictionary() fqdn = None + srv_service_name = keyword_opts.get("srvservicename", None) + if len([h for h in host if "/" in h]) > 1: raise ConfigurationError("host must not contain multiple MongoDB " "URIs") @@ -659,7 +667,7 @@ def __init__( keyword_opts.cased_key("connecttimeoutms"), timeout) res = uri_parser.parse_uri( entity, port, validate=True, warn=True, normalize=False, - connect_timeout=timeout) + connect_timeout=timeout, srv_service_name=srv_service_name) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -689,6 +697,10 @@ def __init__( # Override connection string options with kwarg options. opts.update(keyword_opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + # Handle security-option conflicts in combined options. opts = _handle_security_options(opts) # Normalize combined options. @@ -728,6 +740,7 @@ def __init__( server_selector=options.server_selector, heartbeat_frequency=options.heartbeat_frequency, fqdn=fqdn, + srv_service_name=srv_service_name, direct_connection=options.direct_connection, load_balanced=options.load_balanced, ) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index d13d337f8e..5359ee054a 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -299,6 +299,7 @@ def __init__(self, topology, topology_settings): self._settings = topology_settings self._seedlist = self._settings._seeds self._fqdn = self._settings.fqdn + self._srv_service_name = self._settings._srv_service_name def _run(self): seedlist = self._get_seedlist() @@ -316,7 +317,10 @@ def _get_seedlist(self): Returns a list of ServerDescriptions. """ try: - seedlist, ttl = _SrvResolver(self._fqdn).get_hosts_and_min_ttl() + resolver = _SrvResolver(self._fqdn, + self._settings.pool_options.connect_timeout, + self._srv_service_name) + seedlist, ttl = resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception diff --git a/pymongo/settings.py b/pymongo/settings.py index ff9e84ee01..7e9d393acc 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -39,6 +39,7 @@ def __init__(self, heartbeat_frequency=common.HEARTBEAT_FREQUENCY, server_selector=None, fqdn=None, + srv_service_name=common.SRV_SERVICE_NAME, direct_connection=False, load_balanced=None): """Represent MongoClient's configuration. @@ -60,6 +61,7 @@ def __init__(self, self._server_selection_timeout = server_selection_timeout self._server_selector = server_selector self._fqdn = fqdn + self._srv_service_name = srv_service_name self._heartbeat_frequency = heartbeat_frequency self._direct = direct_connection diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index c53ce378e3..e3f8f330f2 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -47,8 +47,10 @@ def _resolve(*args, **kwargs): "Did you mean to use 'mongodb://'?") class _SrvResolver(object): - def __init__(self, fqdn, connect_timeout=None): + def __init__(self, fqdn, + connect_timeout, srv_service_name): self.__fqdn = fqdn + self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT # Validate the fully qualified domain name. @@ -83,8 +85,8 @@ def get_options(self): def _resolve_uri(self, encapsulate_errors): try: - results = _resolve('_mongodb._tcp.' + self.__fqdn, 'SRV', - lifetime=self.__connect_timeout) + results = _resolve('_' + self.__srv + '._tcp.' + self.__fqdn, + 'SRV', lifetime=self.__connect_timeout) except Exception as exc: if not encapsulate_errors: # Raise the original error. diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index cf5cf64f5c..79642c7112 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -21,6 +21,7 @@ from urllib.parse import unquote_plus from pymongo.common import ( + SRV_SERVICE_NAME, get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, URI_OPTIONS_DEPRECATION_MAP, _CaseInsensitiveDictionary) from pymongo.errors import ConfigurationError, InvalidURI @@ -373,7 +374,7 @@ def _check_options(nodes, options): def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, - normalize=True, connect_timeout=None): + normalize=True, connect_timeout=None, srv_service_name=None): """Parse and validate a MongoDB URI. Returns a dict of the form:: @@ -405,6 +406,7 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, to their internally-used names. Default: ``True``. - `connect_timeout` (optional): The maximum time in milliseconds to wait for a response from the DNS server. + - 'srv_service_name` (optional): A custom SRV service name .. versionchanged:: 3.9 Added the ``normalize`` parameter. @@ -468,6 +470,9 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, if opts: options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if '@' in host_part: userinfo, _, hosts = host_part.rpartition('@') user, passwd = parse_userinfo(userinfo) @@ -499,7 +504,7 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout=connect_timeout) + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name) nodes = dns_resolver.get_hosts() dns_options = dns_resolver.get_options() if dns_options: @@ -514,6 +519,9 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, options[opt] = val if "tls" not in options and "ssl" not in options: options["tls"] = True if validate else 'true' + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError("The srvServiceName option is only allowed " + "with 'mongodb+srv://' URIs") else: nodes = split_hosts(hosts, default_port=default_port) diff --git a/test/srv_seedlist/load-balanced/loadBalanced-no-results.json b/test/srv_seedlist/load-balanced/loadBalanced-no-results.json new file mode 100644 index 0000000000..7f49416aa3 --- /dev/null +++ b/test/srv_seedlist/load-balanced/loadBalanced-no-results.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test4.test.build.10gen.cc/?loadBalanced=true", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because no SRV records are present for this URI." +} diff --git a/test/srv_seedlist/replica-set/srv-service-name.json b/test/srv_seedlist/replica-set/srv-service-name.json new file mode 100644 index 0000000000..ec36cdbb00 --- /dev/null +++ b/test/srv_seedlist/replica-set/srv-service-name.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true, + "srvServiceName": "customname" + } +} diff --git a/test/test_client.py b/test/test_client.py index 4398fa08be..4d8b26400a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1591,6 +1591,27 @@ def test_network_error_message(self): with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) + @unittest.skipUnless( + _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + def test_service_name_from_kwargs(self): + client = MongoClient( + 'mongodb+srv://user:password@test22.test.build.10gen.cc', + srvServiceName='customname', connect=False) + self.assertEqual(client._topology_settings._srv_service_name, + 'customname') + client = MongoClient( + 'mongodb+srv://user:password@test22.test.build.10gen.cc' + '/?srvServiceName=shouldbeoverriden', + srvServiceName='customname', connect=False) + self.assertEqual(client._topology_settings._srv_service_name, + 'customname') + client = MongoClient( + 'mongodb+srv://user:password@test22.test.build.10gen.cc' + '/?srvServiceName=customname', + connect=False) + self.assertEqual(client._topology_settings._srv_service_name, + 'customname') + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" diff --git a/test/uri_options/srv-service-name-option.json b/test/uri_options/srv-service-name-option.json new file mode 100644 index 0000000000..049a35bc70 --- /dev/null +++ b/test/uri_options/srv-service-name-option.json @@ -0,0 +1,24 @@ +{ + "tests": [ + { + "description": "SRV URI with custom srvServiceName", + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvServiceName": "customname" + } + }, + { + "description": "Non-SRV URI with custom srvServiceName", + "uri": "mongodb://example.com/?srvServiceName=customname", + "valid": false, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + } + ] +} From a4ccfa5b3f07a634f40cf715bcba8c82ecf3a729 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 8 Oct 2021 17:44:15 -0700 Subject: [PATCH 0488/2111] PYTHON-2938 Fix race condition caused by MongoClient._process_periodic_tasks(client) (#752) --- pymongo/mongo_client.py | 27 ++++++++++++++++++++------- test/test_client.py | 19 +++++++++++++++++++ 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index d1794938f7..279eae5587 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1596,8 +1596,14 @@ def _process_kill_cursors(self): try: self._cleanup_cursor(True, cursor_id, address, sock_mgr, None, False) - except Exception: - helpers._handle_exception() + except Exception as exc: + if (isinstance(exc, InvalidOperation) + and self._topology._closed): + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + helpers._handle_exception() # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: @@ -1606,18 +1612,25 @@ def _process_kill_cursors(self): try: self._kill_cursors( cursor_ids, address, topology, session=None) - except Exception: - helpers._handle_exception() + except Exception as exc: + if (isinstance(exc, InvalidOperation) and + self._topology._closed): + raise + else: + helpers._handle_exception() # This method is run periodically by a background thread. def _process_periodic_tasks(self): """Process any pending kill cursors requests and maintain connection pool parameters.""" - self._process_kill_cursors() try: + self._process_kill_cursors() self._topology.update_pool(self.__all_credentials) - except Exception: - helpers._handle_exception() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + helpers._handle_exception() def __start_session(self, implicit, **kwargs): # Raises ConfigurationError if sessions are not supported. diff --git a/test/test_client.py b/test/test_client.py index 4d8b26400a..aee95692c6 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1591,6 +1591,24 @@ def test_network_error_message(self): with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) + @unittest.skipIf('PyPy' in sys.version, 'PYTHON-2938 could fail on PyPy') + def test_process_periodic_tasks(self): + client = rs_or_single_client() + coll = client.db.collection + coll.insert_many([{} for _ in range(5)]) + cursor = coll.find(batch_size=2) + cursor.next() + c_id = cursor.cursor_id + self.assertIsNotNone(c_id) + client.close() + # Add cursor to kill cursors queue + del cursor + wait_until(lambda: client._MongoClient__kill_cursors_queue, + "waited for cursor to be added to queue") + client._process_periodic_tasks() # This must not raise or print any exceptions + with self.assertRaises(InvalidOperation): + coll.insert_many([{} for _ in range(5)]) + @unittest.skipUnless( _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_service_name_from_kwargs(self): @@ -1613,6 +1631,7 @@ def test_service_name_from_kwargs(self): 'customname') + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From df6f6496a4d9f7586a374b903560318ca4e292af Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Oct 2021 10:29:38 -0700 Subject: [PATCH 0489/2111] PYTHON-2927 PYTHON-2937 Skip failing tests on PyPy (#753) Cleanup test clients more eagerly. --- test/test_client.py | 1 + test/test_pooling.py | 2 ++ test/test_read_write_concern_spec.py | 2 ++ test/test_retryable_writes.py | 1 + test/test_ssl.py | 9 ++++++++- test/test_transactions.py | 9 ++++++++- 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/test/test_client.py b/test/test_client.py index aee95692c6..2187bde9dd 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1552,6 +1552,7 @@ def test_direct_connection(self): @unittest.skipIf(sys.platform.startswith('java'), 'Jython does not support gc.get_objects') + @unittest.skipIf('PyPy' in sys.version, 'PYTHON-2927 fails often on PyPy') def test_continuous_network_errors(self): def server_description_count(): i = 0 diff --git a/test/test_pooling.py b/test/test_pooling.py index 266e080ca8..ea2ed32247 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -421,6 +421,7 @@ class TestPoolMaxSize(_TestPoolingBase): def test_max_pool_size(self): max_pool_size = 4 c = rs_or_single_client(maxPoolSize=max_pool_size) + self.addCleanup(c.close) collection = c[DB].test # Need one document. @@ -456,6 +457,7 @@ def f(): def test_max_pool_size_none(self): c = rs_or_single_client(maxPoolSize=None) + self.addCleanup(c.close) collection = c[DB].test # Need one document. diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index b334ee9359..d6c5a68c32 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -52,6 +52,7 @@ def test_omit_default_read_write_concern(self): listener = EventListener() # Client with default readConcern and writeConcern client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) @@ -209,6 +210,7 @@ def test_error_includes_errInfo(self): def test_write_error_details_exposes_errinfo(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) db = client.errinfotest self.addCleanup(client.drop_database, "errinfotest") validator = {"x": {"$type": "string"}} diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index bf9f08721a..ffc93eb2fa 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -573,6 +573,7 @@ def test_increment_transaction_id_without_sending_command(self): listener = OvertCommandListener() client = rs_or_single_client( retryWrites=True, event_listeners=[listener]) + self.addCleanup(client.close) topology = client._topology select_server = topology.select_server diff --git a/test/test_ssl.py b/test/test_ssl.py index 45614034e1..0162eb3a0d 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -500,6 +500,7 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) + self.addCleanup(noauth.close) with self.assertRaises(OperationFailure): noauth.pymongo_test.test.find_one() @@ -512,6 +513,7 @@ def test_mongodb_x509_auth(self): tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, event_listeners=[listener]) + self.addCleanup(auth.close) # No error auth.pymongo_test.test.find_one() @@ -529,6 +531,7 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) + self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() @@ -537,6 +540,7 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) + self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match @@ -545,7 +549,9 @@ def test_mongodb_x509_auth(self): quote_plus("not the username"), host, port)) bad_client = MongoClient( - uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) + uri, ssl=True, tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM) + self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() @@ -557,6 +563,7 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM) + self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() diff --git a/test/test_transactions.py b/test/test_transactions.py index de3066303f..6c41f28cf4 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -290,6 +290,9 @@ def gridfs_open_upload_stream(*args, **kwargs): @client_context.require_version_min(4, 2) @client_context.require_transactions def test_transaction_starts_with_batched_write(self): + if 'PyPy' in sys.version and client_context.tls: + self.skipTest('PYTHON-2937 PyPy is so slow sending large ' + 'messages over TLS that this test fails') # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() @@ -299,7 +302,8 @@ def test_transaction_starts_with_batched_write(self): listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) - ops = [InsertOne({'a': '1'*(10*1024*1024)}) for _ in range(10)] + large_str = '\0'*(10*1024*1024) + ops = [InsertOne({'a': large_str}) for _ in range(10)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) @@ -364,6 +368,7 @@ def callback(session): def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): @@ -393,6 +398,7 @@ def callback(session): def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): @@ -423,6 +429,7 @@ def callback(session): def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) + self.addCleanup(client.close) coll = client[self.db.name].test def callback(session): From a94916edf10fddad3335ff14b55b83e27641ec14 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Oct 2021 13:58:04 -0700 Subject: [PATCH 0490/2111] PYTHON-2923 Add Python 3.10 to release tasks (#758) --- .evergreen/build-mac.sh | 2 +- .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 3 ++- .evergreen/build-windows.sh | 2 +- setup.py | 1 + 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 79b947d718..5671ae6c6f 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.6 3.7 3.8 3.9; do +for VERSION in 3.6 3.7 3.8 3.9 3.10; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 4016782e79..1b74fc68e1 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp36|cp37|cp38|cp39) ]]; then + if [[ ! $PYTHON =~ (cp36|cp37|cp38|cp39|cp310) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 61b8b840d3..602a8e1e6c 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -37,7 +37,8 @@ unexpected=$(find dist \! \( -iname dist -or \ -iname '*cp36*' -or \ -iname '*cp37*' -or \ -iname '*cp38*' -or \ - -iname '*cp39*' \)) + -iname '*cp39*' -or \ + -iname '*cp310*' \)) if [ -n "$unexpected" ]; then echo "Unexpected files:" $unexpected exit 1 diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index f44cff18ac..97c7940769 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 36 37 38 39; do +for VERSION in 36 37 38 39 310; do _pythons=(C:/Python/Python${VERSION}/python.exe \ C:/Python/32/Python${VERSION}/python.exe) for PYTHON in "${_pythons[@]}"; do diff --git a/setup.py b/setup.py index 88df14ee07..cdf9113911 100755 --- a/setup.py +++ b/setup.py @@ -336,6 +336,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database"], From d77c20497b2bec8da8203279fd427f74a91370c9 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 13 Oct 2021 14:08:04 -0700 Subject: [PATCH 0491/2111] PYTHON-2940 Fix spec tests that require DNSPython (#756) --- test/test_uri_spec.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index cfca633458..2646f02e41 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -24,7 +24,8 @@ from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate from pymongo.compression_support import _HAVE_SNAPPY -from pymongo.uri_parser import parse_uri +from pymongo.srv_resolver import _HAVE_DNSPYTHON +from pymongo.uri_parser import parse_uri, SRV_SCHEME from test import clear_warning_registry, unittest @@ -92,7 +93,8 @@ def run_scenario(self): compressors = (test.get('options') or {}).get('compressors', []) if 'snappy' in compressors and not _HAVE_SNAPPY: self.skipTest('This test needs the snappy module.') - + if test['uri'].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: + self.skipTest("This test needs dnspython package.") valid = True warning = False From 11e6f9860a86b11a80cd0fac35ed3943a81ff520 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 20 Oct 2021 13:39:32 -0700 Subject: [PATCH 0492/2111] PYTHON-1579 Update URI parser to adhere to new connection string spec (#755) --- .evergreen/config.yml | 4 +- doc/changelog.rst | 13 ++++- doc/examples/authentication.rst | 6 +-- doc/migrate-to-pymongo4.rst | 19 ++++++- pymongo/auth.py | 3 -- pymongo/mongo_client.py | 2 +- pymongo/uri_parser.py | 37 ++++++++++--- test/connection_string/test/invalid-uris.json | 54 +++++++++++++++---- test/connection_string/test/valid-auth.json | 21 ++++++++ .../test/valid-host_identifiers.json | 6 +-- .../test/valid-warnings.json | 30 +++++++++++ test/test_ssl.py | 8 +-- test/test_uri_parser.py | 2 +- test/test_uri_spec.py | 3 +- 14 files changed, 173 insertions(+), 35 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f106528185..363b15ff57 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -523,7 +523,7 @@ functions: silent: true script: | cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' + alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote(sys.argv[1]))"' USER=$(urlencode ${iam_auth_ecs_account}) PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) MONGODB_URI="mongodb://$USER:$PASS@localhost" @@ -554,7 +554,7 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' + alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote(sys.argv[1]))"' alias jsonkey='${python3_binary} -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' USER=$(jsonkey AccessKeyId) USER=$(urlencode $USER) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8dfe4f7503..1213634ce7 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -160,8 +160,19 @@ Breaking Changes in 4.0 are passed to the server as-is rather than the previous behavior which substituted in a projection of ``{"_id": 1}``. This means that an empty projection will now return the entire document, not just the ``"_id"`` field. -- ``MongoClient()`` now raises a :exc:`~pymongo.errors.ConfigurationError` +- :class:`~pymongo.mongo_client.MongoClient` now raises a :exc:`~pymongo.errors.ConfigurationError` when more than one URI is passed into the ``hosts`` argument. +- :class:`~pymongo.mongo_client.MongoClient`` now raises an + :exc:`~pymongo.errors.InvalidURI` exception + when it encounters unescaped percent signs in username and password when + parsing MongoDB URIs. +- :class:`~pymongo.mongo_client.MongoClient` now uses + :py::func:`urllib.parse.unquote` rather than + :py:func:`urllib.parse.unquote_plus`, + meaning that plus signs ("+") are no longer converted to spaces (" "). This + means that if you were previously quoting your login information using + quote_plus, you must now switch to quote. Additionally, be aware that this + change only occurs when parsing login information from the URI. Notable improvements .................... diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index d0a1fba15b..bd6036e460 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -11,14 +11,14 @@ Percent-Escaping Username and Password -------------------------------------- Username and password must be percent-escaped with -:meth:`urllib.parse.quote_plus`, to be used in a MongoDB URI. For example:: +:py:func:`urllib.parse.quote`, to be used in a MongoDB URI. For example:: >>> from pymongo import MongoClient >>> import urllib.parse - >>> username = urllib.parse.quote_plus('user') + >>> username = urllib.parse.quote('user') >>> username 'user' - >>> password = urllib.parse.quote_plus('pass/word') + >>> password = urllib.parse.quote('pass/word') >>> password 'pass%2Fword' >>> MongoClient('mongodb://%s:%s@127.0.0.1' % (username, password)) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 7430ea50ad..994f8450af 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -190,9 +190,26 @@ now you must create a new instance. MongoClient raises exception when given more than one URI ......................................................... -``MongoClient()`` now raises a :exc:`~pymongo.errors.ConfigurationError` +:class:`~pymongo.mongo_client.MongoClient` now raises a :exc:`~pymongo.errors.ConfigurationError` when more than one URI is passed into the ``hosts`` argument. +MongoClient raises exception when given unescaped percent sign in login info +............................................................................ + +:class:`~pymongo.mongo_client.MongoClient` now raises an +:exc:`~pymongo.errors.InvalidURI` exception +when it encounters unescaped percent signs in username and password. + +MongoClient uses `unquote` rather than `unquote_plus` for login info +.................................................................... + +:class:`~pymongo.mongo_client.MongoClient` now uses +:py:func:`urllib.parse.unquote` rather than +:py:func:`urllib.parse.unquote_plus`, meaning that space characters are no +longer converted to plus signs. This means that if you were previously +quoting your login information using :py:func:`urllib.parse.quote_plus`, you +must now switch to :py:func:`urllib.parse.quote`. + Database -------- diff --git a/pymongo/auth.py b/pymongo/auth.py index b946980865..0fec2c7753 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -319,9 +319,6 @@ def _authenticate_gssapi(credentials, sock_info): if password is not None: if _USE_PRINCIPAL: - # Note that, though we use unquote_plus for unquoting URI - # options, we use quote here. Microsoft's UrlUnescape (used - # by WinKerberos) doesn't support +. principal = ":".join((quote(username), quote(password))) result, ctx = kerberos.authGSSClientInit( service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 279eae5587..57f623889e 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -329,7 +329,7 @@ def __init__( a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include 'strict', 'replace', and 'ignore'. Defaults to 'strict'. - - ``srvServiceName`: (string) The SRV service name to use for + - `srvServiceName`: (string) The SRV service name to use for "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: MongoClient("mongodb+srv://example.com/?srvServiceName=customname") diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 79642c7112..46c5eecd9e 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -18,7 +18,7 @@ import warnings import sys -from urllib.parse import unquote_plus +from urllib.parse import unquote, unquote_plus from pymongo.common import ( SRV_SERVICE_NAME, @@ -35,10 +35,26 @@ DEFAULT_PORT = 27017 +def _unquoted_percent(s): + """Check for unescaped percent signs. + + :Paramaters: + - `s`: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == '%': + sub = s[i:i+3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote(sub) == sub: + return True + return False + def parse_userinfo(userinfo): """Validates the format of user information in a MongoDB URI. - Reserved characters like ':', '/', '+' and '@' must be escaped - following RFC 3986. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. Returns a 2-tuple containing the unescaped username followed by the unescaped password. @@ -46,14 +62,17 @@ def parse_userinfo(userinfo): :Paramaters: - `userinfo`: A string of the form : """ - if '@' in userinfo or userinfo.count(':') > 1: + if ('@' in userinfo or userinfo.count(':') > 1 or + _unquoted_percent(userinfo)): raise InvalidURI("Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus") + "RFC 3986, use urllib.parse.quote") + user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. if not user: raise InvalidURI("The empty string is not valid username.") - return unquote_plus(user), unquote_plus(passwd) + + return unquote(user), unquote(passwd) def parse_ipv6_literal_host(entity, default_port): @@ -408,6 +427,12 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, wait for a response from the DNS server. - 'srv_service_name` (optional): A custom SRV service name + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported and plus signs ("+") are no longer decoded into spaces (" ") + when decoding username and password. To avoid these issues, use + :py:func:`urllib.parse.quote` when building the URI. + .. versionchanged:: 3.9 Added the ``normalize`` parameter. diff --git a/test/connection_string/test/invalid-uris.json b/test/connection_string/test/invalid-uris.json index 677cb5384c..e04da2b236 100644 --- a/test/connection_string/test/invalid-uris.json +++ b/test/connection_string/test/invalid-uris.json @@ -189,15 +189,6 @@ "auth": null, "options": null }, - { - "description": "Username with password containing an unescaped colon", - "uri": "mongodb://alice:foo:bar@127.0.0.1", - "valid": false, - "warning": null, - "hosts": null, - "auth": null, - "options": null - }, { "description": "Username containing an unescaped at-sign", "uri": "mongodb://alice@@127.0.0.1", @@ -251,6 +242,51 @@ "hosts": null, "auth": null, "options": null + }, + { + "description": "mongodb+srv with multiple service names", + "uri": "mongodb+srv://test5.test.mongodb.com,test6.test.mongodb.com", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "mongodb+srv with port number", + "uri": "mongodb+srv://test7.test.mongodb.com:27018", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign", + "uri": "mongodb://alice%foo:bar@127.0.0.1", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign and an escaped one", + "uri": "mongodb://user%20%:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "Username with password containing an unescaped percent sign (non hex digit)", + "uri": "mongodb://user%w:password@localhost", + "valid": false, + "warning": null, + "hosts": null, + "auth": null, + "options": null } ] } diff --git a/test/connection_string/test/valid-auth.json b/test/connection_string/test/valid-auth.json index 672777ff84..4f684ff185 100644 --- a/test/connection_string/test/valid-auth.json +++ b/test/connection_string/test/valid-auth.json @@ -240,6 +240,27 @@ "authmechanism": "MONGODB-CR" } }, + { + "description": "Subdelimiters in user/pass don't need escaping (MONGODB-CR)", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "ipv4", + "host": "127.0.0.1", + "port": null + } + ], + "auth": { + "username": "!$&'()*+,;=", + "password": "!$&'()*+,;=", + "db": "admin" + }, + "options": { + "authmechanism": "MONGODB-CR" + } + }, { "description": "Escaped username (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", diff --git a/test/connection_string/test/valid-host_identifiers.json b/test/connection_string/test/valid-host_identifiers.json index f33358725c..e8833b4af2 100644 --- a/test/connection_string/test/valid-host_identifiers.json +++ b/test/connection_string/test/valid-host_identifiers.json @@ -132,18 +132,18 @@ }, { "description": "UTF-8 hosts", - "uri": "mongodb://b\u00fccher.example.com,uml\u00e4ut.example.com/", + "uri": "mongodb://bücher.example.com,umläut.example.com/", "valid": true, "warning": false, "hosts": [ { "type": "hostname", - "host": "b\u00fccher.example.com", + "host": "bücher.example.com", "port": null }, { "type": "hostname", - "host": "uml\u00e4ut.example.com", + "host": "umläut.example.com", "port": null } ], diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json index 87f7248f21..1eacbf8fcb 100644 --- a/test/connection_string/test/valid-warnings.json +++ b/test/connection_string/test/valid-warnings.json @@ -63,6 +63,36 @@ "options": { "wtimeoutms": 10 } + }, + { + "description": "Empty integer option values are ignored", + "uri": "mongodb://localhost/?maxIdleTimeMS=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null + }, + { + "description": "Empty boolean option value are ignored", + "uri": "mongodb://localhost/?journal=", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": null } ] } diff --git a/test/test_ssl.py b/test/test_ssl.py index 0162eb3a0d..df44c31dc9 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] -from urllib.parse import quote_plus +from urllib.parse import quote from pymongo import MongoClient, ssl_support from pymongo.errors import (ConfigurationError, @@ -526,7 +526,7 @@ def test_mongodb_x509_auth(self): uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) + quote(MONGODB_X509_USERNAME), host, port)) client = MongoClient(uri, ssl=True, tlsAllowInvalidCertificates=True, @@ -546,7 +546,7 @@ def test_mongodb_x509_auth(self): # Auth should fail if username and certificate do not match uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( - quote_plus("not the username"), host, port)) + quote("not the username"), host, port)) bad_client = MongoClient( uri, ssl=True, tlsAllowInvalidCertificates=True, @@ -571,7 +571,7 @@ def test_mongodb_x509_auth(self): # Invalid certificate (using CA certificate as client certificate) uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) + quote(MONGODB_X509_USERNAME), host, port)) try: connected(MongoClient(uri, ssl=True, diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index f044fba5b3..33c7276046 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -43,7 +43,7 @@ def test_validate_userinfo(self): self.assertTrue(parse_userinfo('user:password')) self.assertEqual(('us:r', 'p@ssword'), parse_userinfo('us%3Ar:p%40ssword')) - self.assertEqual(('us er', 'p ssword'), + self.assertEqual(('us+er', 'p+ssword'), parse_userinfo('us+er:p+ssword')) self.assertEqual(('us er', 'p ssword'), parse_userinfo('us%20er:p%20ssword')) diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 2646f02e41..1bb5fd2f5b 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -179,7 +179,8 @@ def create_tests(test_path): if not filename.endswith('.json'): # skip everything that is not a test specification continue - with open(os.path.join(dirpath, filename)) as scenario_stream: + json_path = os.path.join(dirpath, filename) + with open(json_path, encoding="utf-8") as scenario_stream: scenario_def = json.load(scenario_stream) for testcase in scenario_def['tests']: From d77cb674b66b60f7cb7dd84269b1a74de5b08554 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 21 Oct 2021 15:48:58 -0700 Subject: [PATCH 0493/2111] PYTHON-2717 Treat maxPoolSize=0 the same as maxPoolSize=None --- pymongo/common.py | 2 +- pymongo/mongo_client.py | 7 +++++-- pymongo/pool.py | 2 +- test/test_client.py | 3 +-- test/test_pooling.py | 8 ++++++-- test/uri_options/connection-pool-options.json | 11 +++++++++++ 6 files changed, 25 insertions(+), 8 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index 8ccdc21b63..d77dd7edd1 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -607,7 +607,7 @@ def validate_tzinfo(dummy, value): 'journal': validate_boolean_or_string, 'localthresholdms': validate_positive_float_or_zero, 'maxidletimems': validate_timeout_or_none, - 'maxpoolsize': validate_positive_integer_or_none, + 'maxpoolsize': validate_non_negative_integer_or_none, 'maxstalenessseconds': validate_max_staleness, 'readconcernlevel': validate_string_or_none, 'readpreference': validate_read_preference_mode, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 57f623889e..11c51f9bc2 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -207,7 +207,9 @@ def __init__( - `maxPoolSize` (optional): The maximum allowable number of concurrent connections to each connected server. Requests to a server will block if there are `maxPoolSize` outstanding - connections to the requested server. Defaults to 100. Cannot be 0. + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. - `minPoolSize` (optional): The minimum required number of concurrent connections that the pool will maintain to each connected server. Default is 0. @@ -1004,7 +1006,8 @@ def max_pool_size(self): """The maximum allowable number of concurrent connections to each connected server. Requests to a server will block if there are `maxPoolSize` outstanding connections to the requested server. - Defaults to 100. Cannot be 0. + Defaults to 100. Can be either 0 or None, in which case there is no + limit on the number of concurrent connections. When a server's pool has reached `max_pool_size`, operations for that server block waiting for a socket to be returned to the pool. If diff --git a/pymongo/pool.py b/pymongo/pool.py index e85dbeae3b..952874abb3 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1132,7 +1132,7 @@ def __init__(self, address, options, handshake=True): self.size_cond = threading.Condition(self.lock) self.requests = 0 self.max_pool_size = self.opts.max_pool_size - if self.max_pool_size is None: + if not self.max_pool_size: self.max_pool_size = float('inf') # The second portion of the wait queue. # Enforces: maxConnecting diff --git a/test/test_client.py b/test/test_client.py index 2187bde9dd..288c697b2e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -161,8 +161,7 @@ def test_types(self): self.assertRaises(ConfigurationError, MongoClient, []) def test_max_pool_size_zero(self): - with self.assertRaises(ValueError): - MongoClient(maxPoolSize=0) + MongoClient(maxPoolSize=0) def test_uri_detection(self): self.assertRaises( diff --git a/test/test_pooling.py b/test/test_pooling.py index ea2ed32247..ce0bed87c4 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -485,10 +485,14 @@ def f(): joinall(threads) self.assertEqual(nthreads, self.n_passed) self.assertTrue(len(cx_pool.sockets) > 1) + self.assertEqual(cx_pool.max_pool_size, float('inf')) + def test_max_pool_size_zero(self): - with self.assertRaises(ValueError): - rs_or_single_client(maxPoolSize=0) + c = rs_or_single_client(maxPoolSize=0) + self.addCleanup(c.close) + pool = get_pool(c) + self.assertEqual(pool.max_pool_size, float('inf')) def test_max_pool_size_with_connection_failure(self): # The pool acquires its semaphore before attempting to connect; ensure diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index df90f11dfc..aae16190ba 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -31,6 +31,17 @@ "auth": null, "options": {} }, + { + "description": "maxPoolSize=0 does not error", + "uri": "mongodb://example.com/?maxPoolSize=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "maxPoolSize": 0 + } + }, { "description": "minPoolSize=0 does not error", "uri": "mongodb://example.com/?minPoolSize=0", From f4eb8f93fd81e532141cc2168b3fd28836e77bb5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 22 Oct 2021 14:43:20 -0700 Subject: [PATCH 0494/2111] PYTHON-2966 Make MongoClient.event_listeners work as expected (#761) --- pymongo/mongo_client.py | 2 +- pymongo/monitoring.py | 9 +++++---- test/test_client.py | 13 ++++++++++++- test/test_session.py | 4 ++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 11c51f9bc2..3cca1ee367 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -895,7 +895,7 @@ def event_listeners(self): See :mod:`~pymongo.monitoring` for details. """ - return self._event_listeners.event_listeners + return self._event_listeners.event_listeners() @property def topology_description(self): diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 325ec05544..ba9bbe8128 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1318,10 +1318,11 @@ def enabled_for_cmap(self): def event_listeners(self): """List of registered event listeners.""" - return (self.__command_listeners[:], - self.__server_heartbeat_listeners[:], - self.__server_listeners[:], - self.__topology_listeners[:]) + return (self.__command_listeners[:] + + self.__server_heartbeat_listeners[:] + + self.__server_listeners[:] + + self.__topology_listeners[:] + + self.__cmap_listeners) def publish_command_start(self, command, database_name, request_id, connection_id, op_id=None, diff --git a/test/test_client.py b/test/test_client.py index 288c697b2e..edd459eb33 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -35,7 +35,7 @@ from bson.son import SON from bson.tz_util import utc import pymongo -from pymongo import message, monitoring +from pymongo import event_loggers, message, monitoring from pymongo.command_cursor import CommandCursor from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD @@ -463,6 +463,17 @@ def test_uri_security_options(self): with self.assertRaises(InvalidURI): MongoClient(ssl=True, tls=False) + def test_event_listeners(self): + c = MongoClient(event_listeners=[], connect=False) + self.assertEqual(c.event_listeners, []) + listeners = [event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger()] + c = MongoClient(event_listeners=listeners, connect=False) + self.assertEqual(c.event_listeners, listeners) + class TestClient(IntegrationTest): def test_multiple_uris(self): diff --git a/test/test_session.py b/test/test_session.py index c8c80069a7..9ab0c349f7 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -104,7 +104,7 @@ def tearDown(self): self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): - listener = client.event_listeners()[0][0] + listener = client.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: @@ -626,7 +626,7 @@ def test_command_cursor_limit_reached(self): lambda cursor: list(cursor)) def _test_unacknowledged_ops(self, client, *ops): - listener = client.event_listeners()[0][0] + listener = client.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: From 468427ddd4847b57caa9adca5b65377664879c08 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 25 Oct 2021 12:57:18 -0700 Subject: [PATCH 0495/2111] PYTHON-2926 Unskip test_readConcern_available_with_out_stage on 5.1+ #762 --- test/crud/unified/aggregate-out-readConcern.json | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/crud/unified/aggregate-out-readConcern.json b/test/crud/unified/aggregate-out-readConcern.json index ca1cff2069..e293457c1c 100644 --- a/test/crud/unified/aggregate-out-readConcern.json +++ b/test/crud/unified/aggregate-out-readConcern.json @@ -262,11 +262,6 @@ }, { "description": "readConcern available with out stage", - "runOnRequirements": [ - { - "maxServerVersion": "5.0.99" - } - ], "operations": [ { "object": "collection_readConcern_available", From eabd2235ba1ebc24c26109b11477f59292f367dd Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 26 Oct 2021 14:24:27 -0700 Subject: [PATCH 0496/2111] PYTHON-2859 Add BSON Binary subtype 7 (#763) --- bson/binary.py | 6 ++++++ doc/api/bson/binary.rst | 1 + 2 files changed, 7 insertions(+) diff --git a/bson/binary.py b/bson/binary.py index dd12f56e20..50cfbd8439 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -169,6 +169,12 @@ class UuidRepresentation: """BSON binary subtype for an MD5 hash. """ +COLUMN_SUBTYPE = 7 +"""BSON binary subtype for columns. + +.. versionadded:: 4.0 +""" + USER_DEFINED_SUBTYPE = 128 """BSON binary subtype for any user defined structure. """ diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index a754d43d52..b9673d70d9 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -14,6 +14,7 @@ .. autodata:: JAVA_LEGACY .. autodata:: CSHARP_LEGACY .. autodata:: MD5_SUBTYPE + .. autodata:: COLUMN_SUBTYPE .. autodata:: USER_DEFINED_SUBTYPE .. autoclass:: UuidRepresentation From bfa5aafb34130677956fd39a8da3b67855b9636d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 26 Oct 2021 14:47:51 -0700 Subject: [PATCH 0497/2111] PYTHON-2832 Provide options to limit number of mongos servers used in connecting to sharded clusters (#754) --- doc/changelog.rst | 5 + pymongo/common.py | 3 +- pymongo/mongo_client.py | 10 +- pymongo/settings.py | 5 +- pymongo/srv_resolver.py | 9 +- pymongo/topology_description.py | 20 ++- pymongo/uri_parser.py | 18 ++- ...-conflicts_with_loadBalanced-true-txt.json | 7 ++ ...osts-conflicts_with_loadBalanced-true.json | 7 ++ .../load-balanced/srvMaxHosts-zero-txt.json | 14 +++ .../load-balanced/srvMaxHosts-zero.json | 14 +++ ...axHosts-conflicts_with_replicaSet-txt.json | 7 ++ ...srvMaxHosts-conflicts_with_replicaSet.json | 7 ++ .../srvMaxHosts-equal_to_srv_records.json | 17 +++ .../srvMaxHosts-greater_than_srv_records.json | 16 +++ .../srvMaxHosts-invalid_integer.json | 7 ++ .../replica-set/srvMaxHosts-invalid_type.json | 7 ++ .../srvMaxHosts-less_than_srv_records.json | 13 ++ .../replica-set/srvMaxHosts-zero-txt.json | 17 +++ .../replica-set/srvMaxHosts-zero.json | 17 +++ .../srvMaxHosts-equal_to_srv_records.json | 16 +++ .../srvMaxHosts-greater_than_srv_records.json | 15 +++ .../sharded/srvMaxHosts-invalid_integer.json | 7 ++ .../sharded/srvMaxHosts-invalid_type.json | 7 ++ .../srvMaxHosts-less_than_srv_records.json | 9 ++ .../sharded/srvMaxHosts-zero.json | 15 +++ test/test_client.py | 16 ++- test/test_dns.py | 40 ++++-- test/test_srv_polling.py | 75 +++++++++-- test/uri_options/srv-options.json | 116 ++++++++++++++++++ test/uri_options/srv-service-name-option.json | 24 ---- 31 files changed, 501 insertions(+), 59 deletions(-) create mode 100644 test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json create mode 100644 test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json create mode 100644 test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json create mode 100644 test/srv_seedlist/load-balanced/srvMaxHosts-zero.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json create mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-zero.json create mode 100644 test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json create mode 100644 test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json create mode 100644 test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json create mode 100644 test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json create mode 100644 test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json create mode 100644 test/srv_seedlist/sharded/srvMaxHosts-zero.json create mode 100644 test/uri_options/srv-options.json delete mode 100644 test/uri_options/srv-service-name-option.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 1213634ce7..3b3b664e91 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -179,6 +179,11 @@ Notable improvements - Enhanced connection pooling to create connections more efficiently and avoid connection storms. +- :class:`~pymongo.mongo_client.MongoClient` now accepts a URI and keyword + argument `srvMaxHosts` that limits the number of mongos-like hosts a client + will connect to. More specifically, when a mongodb+srv:// connection string + resolves to more than `srvMaxHosts` number of hosts, the client will randomly + choose a `srvMaxHosts` sized subset of hosts. Issues Resolved ............... diff --git a/pymongo/common.py b/pymongo/common.py index d77dd7edd1..6d9ce0cd9e 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -629,7 +629,8 @@ def validate_tzinfo(dummy, value): 'w': validate_non_negative_int_or_basestring, 'wtimeoutms': validate_non_negative_integer, 'zlibcompressionlevel': validate_zlib_compression_level, - 'srvservicename': validate_string + 'srvservicename': validate_string, + 'srvmaxhosts': validate_non_negative_integer } # Dictionary where keys are the names of URI options specific to pymongo, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 3cca1ee367..7dca8bb9c9 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -652,8 +652,8 @@ def __init__( dbase = None opts = common._CaseInsensitiveDictionary() fqdn = None - srv_service_name = keyword_opts.get("srvservicename", None) - + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") if len([h for h in host if "/" in h]) > 1: raise ConfigurationError("host must not contain multiple MongoDB " "URIs") @@ -669,7 +669,9 @@ def __init__( keyword_opts.cased_key("connecttimeoutms"), timeout) res = uri_parser.parse_uri( entity, port, validate=True, warn=True, normalize=False, - connect_timeout=timeout, srv_service_name=srv_service_name) + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -703,6 +705,7 @@ def __init__( if srv_service_name is None: srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") # Handle security-option conflicts in combined options. opts = _handle_security_options(opts) # Normalize combined options. @@ -745,6 +748,7 @@ def __init__( srv_service_name=srv_service_name, direct_connection=options.direct_connection, load_balanced=options.load_balanced, + srv_max_hosts=srv_max_hosts ) self._topology = Topology(self._topology_settings) diff --git a/pymongo/settings.py b/pymongo/settings.py index 7e9d393acc..e9e28c13ac 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -41,7 +41,8 @@ def __init__(self, fqdn=None, srv_service_name=common.SRV_SERVICE_NAME, direct_connection=False, - load_balanced=None): + load_balanced=None, + srv_max_hosts=0): """Represent MongoClient's configuration. Take a list of (host, port) pairs and optional replica set name. @@ -63,7 +64,7 @@ def __init__(self, self._fqdn = fqdn self._srv_service_name = srv_service_name self._heartbeat_frequency = heartbeat_frequency - + self._srv_max_hosts = srv_max_hosts or 0 self._direct = direct_connection self._load_balanced = load_balanced diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index e3f8f330f2..69e075aec4 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -15,6 +15,7 @@ """Support for resolving hosts and options from mongodb+srv:// URIs.""" import ipaddress +import random try: from dns import resolver @@ -25,7 +26,6 @@ from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError - # dnspython can return bytes or str from various parts # of its API depending on version. We always want str. def maybe_decode(text): @@ -48,11 +48,11 @@ def _resolve(*args, **kwargs): class _SrvResolver(object): def __init__(self, fqdn, - connect_timeout, srv_service_name): + connect_timeout, srv_service_name, srv_max_hosts=0): self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT - + self.__srv_max_hosts = srv_max_hosts or 0 # Validate the fully qualified domain name. try: ipaddress.ip_address(fqdn) @@ -111,7 +111,8 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) if self.__plist != nlist: raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) - + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) return results, nodes def get_hosts(self): diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index c6b81b5384..19a1681faf 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -15,6 +15,7 @@ """Represent a deployment of MongoDB servers.""" from collections import namedtuple +from random import sample from pymongo import common from pymongo.errors import ConfigurationError @@ -223,6 +224,10 @@ def common_wire_version(self): def heartbeat_frequency(self): return self._topology_settings.heartbeat_frequency + @property + def srv_max_hosts(self): + return self._topology_settings._srv_max_hosts + def apply_selector(self, selector, address, custom_selector=None): def apply_local_threshold(selection): @@ -446,16 +451,23 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): if set(sds.keys()) == set(seedlist): return topology_description - # Add SDs corresponding to servers recently added to the SRV record. - for address in seedlist: - if address not in sds: - sds[address] = ServerDescription(address) # Remove SDs corresponding to servers no longer part of the SRV record. for address in list(sds.keys()): if address not in seedlist: sds.pop(address) + if topology_description.srv_max_hosts != 0: + new_hosts = set(seedlist) - set(sds.keys()) + n_to_add = topology_description.srv_max_hosts - len(sds) + if n_to_add > 0: + seedlist = sample(new_hosts, min(n_to_add, len(new_hosts))) + else: + seedlist = [] + # Add SDs corresponding to servers recently added to the SRV record. + for address in seedlist: + if address not in sds: + sds[address] = ServerDescription(address) return TopologyDescription( topology_description.topology_type, sds, diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 46c5eecd9e..5d97eb4837 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -393,7 +393,8 @@ def _check_options(nodes, options): def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, - normalize=True, connect_timeout=None, srv_service_name=None): + normalize=True, connect_timeout=None, srv_service_name=None, + srv_max_hosts=None): """Parse and validate a MongoDB URI. Returns a dict of the form:: @@ -494,10 +495,8 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, if opts: options.update(split_options(opts, validate, warn, normalize)) - if srv_service_name is None: srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) - if '@' in host_part: userinfo, _, hosts = host_part.rpartition('@') user, passwd = parse_userinfo(userinfo) @@ -510,7 +509,7 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, hosts = unquote_plus(hosts) fqdn = None - + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: if options.get('directConnection'): raise ConfigurationError( @@ -529,7 +528,8 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name) + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, + srv_max_hosts) nodes = dns_resolver.get_hosts() dns_options = dns_resolver.get_options() if dns_options: @@ -542,11 +542,19 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI( + "You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") if "tls" not in options and "ssl" not in options: options["tls"] = True if validate else 'true' elif not is_srv and options.get("srvServiceName") is not None: raise ConfigurationError("The srvServiceName option is only allowed " "with 'mongodb+srv://' URIs") + elif not is_srv and srv_max_hosts: + raise ConfigurationError("The srvMaxHosts option is only allowed " + "with 'mongodb+srv://' URIs") else: nodes = split_hosts(hosts, default_port=default_port) diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json new file mode 100644 index 0000000000..a7600a8a7b --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test20.test.build.10gen.cc/?srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with loadBalanced=true (TXT)" +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json new file mode 100644 index 0000000000..d03a174b1e --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with loadBalanced=true" +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json new file mode 100644 index 0000000000..8d48b5bbb9 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test20.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017" + ], + "options": { + "loadBalanced": true, + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json new file mode 100644 index 0000000000..2382fccf85 --- /dev/null +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -0,0 +1,14 @@ +{ + "uri": "mongodb+srv://test3.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017" + ], + "options": { + "loadBalanced": true, + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json new file mode 100644 index 0000000000..6de1e37fa5 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet-txt.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with replicaSet option (TXT)" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json new file mode 100644 index 0000000000..f968757502 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-conflicts_with_replicaSet.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because positive integer for srvMaxHosts conflicts with replicaSet option" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json new file mode 100644 index 0000000000..d9765ac663 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "numSeeds": 2, + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 2, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json new file mode 100644 index 0000000000..494bb87687 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=3", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 3, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json new file mode 100644 index 0000000000..5ba1a3b540 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=-1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because srvMaxHosts is not greater than or equal to zero" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json new file mode 100644 index 0000000000..79e75b9b15 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=foo", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because srvMaxHosts is not an integer" +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json new file mode 100644 index 0000000000..66a5e90dad --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json @@ -0,0 +1,13 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", + "numSeeds": 1, + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "srvMaxHosts": 1, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json new file mode 100644 index 0000000000..241a901c64 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test5.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "authSource": "thisDB", + "replicaSet": "repl0", + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json new file mode 100644 index 0000000000..c68610a201 --- /dev/null +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json @@ -0,0 +1,17 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json new file mode 100644 index 0000000000..46390726f0 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "numSeeds": 2, + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 2, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json new file mode 100644 index 0000000000..e02d72bf28 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=3", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 3, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json b/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json new file mode 100644 index 0000000000..0939624fc3 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=-1", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because srvMaxHosts is not greater than or equal to zero" +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json b/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json new file mode 100644 index 0000000000..c228d26612 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json @@ -0,0 +1,7 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=foo", + "seeds": [], + "hosts": [], + "error": true, + "comment": "Should fail because srvMaxHosts is not an integer" +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json new file mode 100644 index 0000000000..fdcc1692c0 --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json @@ -0,0 +1,9 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", + "numSeeds": 1, + "numHosts": 1, + "options": { + "srvMaxHosts": 1, + "ssl": true + } +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-zero.json b/test/srv_seedlist/sharded/srvMaxHosts-zero.json new file mode 100644 index 0000000000..10ab9e656d --- /dev/null +++ b/test/srv_seedlist/sharded/srvMaxHosts-zero.json @@ -0,0 +1,15 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "options": { + "srvMaxHosts": 0, + "ssl": true + } +} diff --git a/test/test_client.py b/test/test_client.py index edd459eb33..745e8d6f22 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -65,7 +65,7 @@ from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.topology import _ErrorContext -from pymongo.topology_description import TopologyDescription +from pymongo.topology_description import TopologyDescription, _updated_topology_description_srv_polling from pymongo.write_concern import WriteConcern from test import (client_context, client_knobs, @@ -1641,6 +1641,20 @@ def test_service_name_from_kwargs(self): self.assertEqual(client._topology_settings._srv_service_name, 'customname') + def test_srv_max_hosts_kwarg(self): + client = MongoClient( + 'mongodb+srv://test1.test.build.10gen.cc/') + self.assertGreater( + len(client.topology_description.server_descriptions()), 1) + client = MongoClient( + 'mongodb+srv://test1.test.build.10gen.cc/', srvmaxhosts=1) + self.assertEqual( + len(client.topology_description.server_descriptions()), 1) + client = MongoClient( + 'mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1', + srvmaxhosts=2) + self.assertEqual( + len(client.topology_description.server_descriptions()), 2) class TestExhaustCursor(IntegrationTest): diff --git a/test/test_dns.py b/test/test_dns.py index 91f8750d8b..8404c2aa69 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -50,14 +50,27 @@ def setUp(self): pass +class TestDNSSharded(unittest.TestCase): + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'srv_seedlist', 'sharded') + load_balanced = False + + @client_context.require_mongos + def setUp(self): + pass + + def create_test(test_case): def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") uri = test_case['uri'] - seeds = test_case['seeds'] - hosts = test_case['hosts'] + seeds = test_case.get('seeds') + num_seeds = test_case.get('numSeeds', len(seeds or [])) + hosts = test_case.get('hosts') + num_hosts = test_case.get("numHosts", len(hosts or [])) + options = test_case.get('options', {}) if 'ssl' in options: options['tls'] = options.pop('ssl') @@ -75,9 +88,12 @@ def run_test(self): if hosts: hosts = frozenset(split_hosts(','.join(hosts))) - if seeds: + if seeds or num_seeds: result = parse_uri(uri, validate=True) - self.assertEqual(sorted(result['nodelist']), sorted(seeds)) + if seeds is not None: + self.assertEqual(sorted(result['nodelist']), sorted(seeds)) + if num_seeds is not None: + self.assertEqual(len(result['nodelist']), num_seeds) if options: opts = result['options'] if 'readpreferencetags' in opts: @@ -106,9 +122,18 @@ def run_test(self): copts['tlsAllowInvalidHostnames'] = True client = MongoClient(uri, **copts) - wait_until( - lambda: hosts == client.nodes, - 'match test hosts to client nodes') + if num_seeds is not None: + self.assertEqual(len(client._topology_settings.seeds), + num_seeds) + if hosts is not None: + wait_until( + lambda: hosts == client.nodes, + 'match test hosts to client nodes') + if num_hosts is not None: + wait_until(lambda: num_hosts == len(client.nodes), + "wait to connect to num_hosts") + # XXX: we should block until SRV poller runs at least once + # and re-run these assertions. else: try: parse_uri(uri) @@ -130,6 +155,7 @@ def create_tests(cls): create_tests(TestDNSRepl) create_tests(TestDNSLoadBalanced) +create_tests(TestDNSSharded) class TestParsingErrors(unittest.TestCase): diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 1908103476..030dcd27f6 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -35,11 +35,11 @@ class SrvPollingKnobs(object): def __init__(self, ttl_time=None, min_srv_rescan_interval=None, - dns_resolver_nodelist_response=None, + nodelist_callback=None, count_resolver_calls=False): self.ttl_time = ttl_time self.min_srv_rescan_interval = min_srv_rescan_interval - self.dns_resolver_nodelist_response = dns_resolver_nodelist_response + self.nodelist_callback = nodelist_callback self.count_resolver_calls = count_resolver_calls self.old_min_srv_rescan_interval = None @@ -55,8 +55,8 @@ def enable(self): def mock_get_hosts_and_min_ttl(resolver, *args): nodes, ttl = self.old_dns_resolver_response(resolver) - if self.dns_resolver_nodelist_response is not None: - nodes = self.dns_resolver_nodelist_response() + if self.nodelist_callback is not None: + nodes = self.nodelist_callback() if self.ttl_time is not None: ttl = self.ttl_time return nodes, ttl @@ -113,7 +113,8 @@ def predicate(): if set(expected_nodelist) == set(nodelist): return True return False - wait_until(predicate, "see expected nodelist", timeout=100*WAIT_TIME) + wait_until(predicate, "see expected nodelist", + timeout=100*WAIT_TIME) def assert_nodelist_nochange(self, expected_nodelist, client): """Check if the client._topology ever deviates from seeing all nodes @@ -154,7 +155,7 @@ def dns_resolver_response(): self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( - dns_resolver_nodelist_response=dns_resolver_response, + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls): assertion_method(expected_response, client) @@ -207,7 +208,7 @@ def final_callback(): with SrvPollingKnobs( ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - dns_resolver_nodelist_response=initial_callback, + nodelist_callback=initial_callback, count_resolver_calls=True): # Client uses unpatched method to get initial nodelist client = MongoClient(self.CONNECTION_STRING) @@ -216,7 +217,7 @@ def final_callback(): with SrvPollingKnobs( ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - dns_resolver_nodelist_response=final_callback): + nodelist_callback=final_callback): # Nodelist should reflect new valid DNS resolver response. self.assert_nodelist_change(response_final, client) @@ -230,6 +231,64 @@ def erroring_seedlist(): raise ConfigurationError self._test_recover_from_initial(erroring_seedlist) + def test_10_all_dns_selected(self): + response = [("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020)] + + def nodelist_callback(): + return response + with SrvPollingKnobs(ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=0) + self.addCleanup(client.close) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_11_all_dns_selected(self): + response = [("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020)] + + def nodelist_callback(): + return response + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) + self.addCleanup(client.close) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + + def test_12_new_dns_randomly_selected(self): + response = [("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017)] + + def nodelist_callback(): + return response + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) + self.addCleanup(client.close) + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + sleep(2*common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set( + client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), + final_topology) + self.assertEqual(len(final_topology), 2) + + def test_does_not_flipflop(self): + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=1) + self.addCleanup(client.close) + old = set(client.topology_description.server_descriptions()) + sleep(4*WAIT_TIME) + new = set(client.topology_description.server_descriptions()) + self.assertSetEqual(old, new) + if __name__ == '__main__': unittest.main() diff --git a/test/uri_options/srv-options.json b/test/uri_options/srv-options.json new file mode 100644 index 0000000000..ffc356f12f --- /dev/null +++ b/test/uri_options/srv-options.json @@ -0,0 +1,116 @@ +{ + "tests": [ + { + "description": "SRV URI with custom srvServiceName", + "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvServiceName": "customname" + } + }, + { + "description": "Non-SRV URI with custom srvServiceName", + "uri": "mongodb://example.com/?srvServiceName=customname", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "srvMaxHosts": 2 + } + }, + { + "description": "SRV URI with negative integer for srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with invalid type for srvMaxHosts", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=foo", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "Non-SRV URI with srvMaxHosts", + "uri": "mongodb://example.com/?srvMaxHosts=2", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with positive srvMaxHosts and replicaSet", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&replicaSet=foo", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with positive srvMaxHosts and loadBalanced=true", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&loadBalanced=true", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "SRV URI with positive srvMaxHosts and loadBalanced=false", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=2&loadBalanced=false", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": false, + "srvMaxHosts": 2 + } + }, + { + "description": "SRV URI with srvMaxHosts=0 and replicaSet", + "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=0&replicaSet=foo", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "replicaSet": "foo", + "srvMaxHosts": 0 + } + }, + { + "description": "SRV URI with srvMaxHosts=0 and loadBalanced=true", + "uri": "mongodb+srv://test3.test.build.10gen.cc/?srvMaxHosts=0&loadBalanced=true", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "loadBalanced": true, + "srvMaxHosts": 0 + } + } + ] +} diff --git a/test/uri_options/srv-service-name-option.json b/test/uri_options/srv-service-name-option.json deleted file mode 100644 index 049a35bc70..0000000000 --- a/test/uri_options/srv-service-name-option.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "tests": [ - { - "description": "SRV URI with custom srvServiceName", - "uri": "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname", - "valid": true, - "warning": false, - "hosts": null, - "auth": null, - "options": { - "srvServiceName": "customname" - } - }, - { - "description": "Non-SRV URI with custom srvServiceName", - "uri": "mongodb://example.com/?srvServiceName=customname", - "valid": false, - "warning": true, - "hosts": null, - "auth": null, - "options": {} - } - ] -} From ca46cb20bd991ddebcaec534973f6e7cff7f1782 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 26 Oct 2021 17:49:22 -0700 Subject: [PATCH 0498/2111] PYTHON-2948 Add prose test for SRV polling with a custom service name (#764) --- test/test_srv_polling.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 030dcd27f6..1e99277692 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -289,6 +289,24 @@ def test_does_not_flipflop(self): new = set(client.topology_description.server_descriptions()) self.assertSetEqual(old, new) + def test_srv_service_name(self): + # Construct a valid final response callback distinct from base. + response = [ + ("localhost.test.build.10gen.cc.", 27019), + ("localhost.test.build.10gen.cc.", 27020) + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = MongoClient( + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" + "=customname") + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + self.assert_nodelist_change(response, client) + if __name__ == '__main__': unittest.main() From 3235f9659054239116a30e5af7ba4d3839183b3c Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 27 Oct 2021 12:33:48 -0700 Subject: [PATCH 0499/2111] PYTHON-2911 Bump maxWireVersion to 14 for MongoDB 5.1 (#765) --- pymongo/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 6d9ce0cd9e..7f2bda2d05 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -49,7 +49,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 13 +MAX_SUPPORTED_WIRE_VERSION = 14 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 From 9844d3dc038c8f33b3dc78e0cc0b33dbdba18f24 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 28 Oct 2021 16:05:32 -0700 Subject: [PATCH 0500/2111] PYTHON-2972 Test Failure - test_srv_max_hosts_kwarg when dnspython is not installed (#768) --- test/test_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_client.py b/test/test_client.py index 745e8d6f22..bad4ed9184 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1641,6 +1641,8 @@ def test_service_name_from_kwargs(self): self.assertEqual(client._topology_settings._srv_service_name, 'customname') + @unittest.skipUnless( + _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_srv_max_hosts_kwarg(self): client = MongoClient( 'mongodb+srv://test1.test.build.10gen.cc/') From 3c3a85d1bc8ccaa80f6ef16d00d2a1d448bdb5fe Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 28 Oct 2021 17:12:30 -0700 Subject: [PATCH 0501/2111] PYTHON-2817 Add .readthedocs.yaml config file (#769) --- .readthedocs.yaml | 19 +++++++++++++++++++ doc/docs-requirements.txt | 3 +++ 2 files changed, 22 insertions(+) create mode 100644 .readthedocs.yaml create mode 100644 doc/docs-requirements.txt diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000000..358e7502f3 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,19 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the doc/ directory with Sphinx +sphinx: + configuration: doc/conf.py + +# Set the version of Python and requirements required to build the docs. +python: + version: 3.8 + install: + # Install pymongo itself. + - method: pip + path: . + - requirements: doc/docs-requirements.txt diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt new file mode 100644 index 0000000000..ce5d1abf36 --- /dev/null +++ b/doc/docs-requirements.txt @@ -0,0 +1,3 @@ +Sphinx~=4.2 +sphinx_rtd_theme~=0.5 +readthedocs-sphinx-search~=0.1 From 42324c69cf1028d4530c7ea3b9f20e723baae713 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 29 Oct 2021 16:30:55 -0700 Subject: [PATCH 0502/2111] PYTHON-2973 Revert back to using quote_plus/unquote_plus (#767) --- .evergreen/config.yml | 4 ++-- doc/changelog.rst | 7 ------- doc/examples/authentication.rst | 4 ++-- doc/migrate-to-pymongo4.rst | 10 ---------- pymongo/auth.py | 3 +++ pymongo/uri_parser.py | 12 +++++------- test/test_ssl.py | 8 ++++---- test/test_uri_parser.py | 11 ++++++++++- test/test_uri_spec.py | 3 +++ 9 files changed, 29 insertions(+), 33 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 363b15ff57..f106528185 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -523,7 +523,7 @@ functions: silent: true script: | cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote(sys.argv[1]))"' + alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' USER=$(urlencode ${iam_auth_ecs_account}) PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) MONGODB_URI="mongodb://$USER:$PASS@localhost" @@ -554,7 +554,7 @@ functions: script: | # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote(sys.argv[1]))"' + alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' alias jsonkey='${python3_binary} -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' USER=$(jsonkey AccessKeyId) USER=$(urlencode $USER) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3b3b664e91..acb0949d82 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -166,13 +166,6 @@ Breaking Changes in 4.0 :exc:`~pymongo.errors.InvalidURI` exception when it encounters unescaped percent signs in username and password when parsing MongoDB URIs. -- :class:`~pymongo.mongo_client.MongoClient` now uses - :py::func:`urllib.parse.unquote` rather than - :py:func:`urllib.parse.unquote_plus`, - meaning that plus signs ("+") are no longer converted to spaces (" "). This - means that if you were previously quoting your login information using - quote_plus, you must now switch to quote. Additionally, be aware that this - change only occurs when parsing login information from the URI. Notable improvements .................... diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index bd6036e460..1e0f133a5a 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -15,10 +15,10 @@ Username and password must be percent-escaped with >>> from pymongo import MongoClient >>> import urllib.parse - >>> username = urllib.parse.quote('user') + >>> username = urllib.parse.quote_plus('user') >>> username 'user' - >>> password = urllib.parse.quote('pass/word') + >>> password = urllib.parse.quote_plus('pass/word') >>> password 'pass%2Fword' >>> MongoClient('mongodb://%s:%s@127.0.0.1' % (username, password)) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 994f8450af..8a07346700 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -200,16 +200,6 @@ MongoClient raises exception when given unescaped percent sign in login info :exc:`~pymongo.errors.InvalidURI` exception when it encounters unescaped percent signs in username and password. -MongoClient uses `unquote` rather than `unquote_plus` for login info -.................................................................... - -:class:`~pymongo.mongo_client.MongoClient` now uses -:py:func:`urllib.parse.unquote` rather than -:py:func:`urllib.parse.unquote_plus`, meaning that space characters are no -longer converted to plus signs. This means that if you were previously -quoting your login information using :py:func:`urllib.parse.quote_plus`, you -must now switch to :py:func:`urllib.parse.quote`. - Database -------- diff --git a/pymongo/auth.py b/pymongo/auth.py index 0fec2c7753..b946980865 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -319,6 +319,9 @@ def _authenticate_gssapi(credentials, sock_info): if password is not None: if _USE_PRINCIPAL: + # Note that, though we use unquote_plus for unquoting URI + # options, we use quote here. Microsoft's UrlUnescape (used + # by WinKerberos) doesn't support +. principal = ":".join((quote(username), quote(password))) result, ctx = kerberos.authGSSClientInit( service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 5d97eb4837..23db48bf4a 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -18,7 +18,7 @@ import warnings import sys -from urllib.parse import unquote, unquote_plus +from urllib.parse import unquote_plus from pymongo.common import ( SRV_SERVICE_NAME, @@ -47,7 +47,7 @@ def _unquoted_percent(s): sub = s[i:i+3] # If unquoting yields the same string this means there was an # unquoted %. - if unquote(sub) == sub: + if unquote_plus(sub) == sub: return True return False @@ -65,14 +65,14 @@ def parse_userinfo(userinfo): if ('@' in userinfo or userinfo.count(':') > 1 or _unquoted_percent(userinfo)): raise InvalidURI("Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote") + "RFC 3986, use urllib.parse.quote_plus") user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. if not user: raise InvalidURI("The empty string is not valid username.") - return unquote(user), unquote(passwd) + return unquote_plus(user), unquote_plus(passwd) def parse_ipv6_literal_host(entity, default_port): @@ -430,9 +430,7 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, .. versionchanged:: 4.0 To better follow RFC 3986, unquoted percent signs ("%") are no longer - supported and plus signs ("+") are no longer decoded into spaces (" ") - when decoding username and password. To avoid these issues, use - :py:func:`urllib.parse.quote` when building the URI. + supported. .. versionchanged:: 3.9 Added the ``normalize`` parameter. diff --git a/test/test_ssl.py b/test/test_ssl.py index df44c31dc9..0162eb3a0d 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] -from urllib.parse import quote +from urllib.parse import quote_plus from pymongo import MongoClient, ssl_support from pymongo.errors import (ConfigurationError, @@ -526,7 +526,7 @@ def test_mongodb_x509_auth(self): uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( - quote(MONGODB_X509_USERNAME), host, port)) + quote_plus(MONGODB_X509_USERNAME), host, port)) client = MongoClient(uri, ssl=True, tlsAllowInvalidCertificates=True, @@ -546,7 +546,7 @@ def test_mongodb_x509_auth(self): # Auth should fail if username and certificate do not match uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( - quote("not the username"), host, port)) + quote_plus("not the username"), host, port)) bad_client = MongoClient( uri, ssl=True, tlsAllowInvalidCertificates=True, @@ -571,7 +571,7 @@ def test_mongodb_x509_auth(self): # Invalid certificate (using CA certificate as client certificate) uri = ('mongodb://%s@%s:%d/?authMechanism=' 'MONGODB-X509' % ( - quote(MONGODB_X509_USERNAME), host, port)) + quote_plus(MONGODB_X509_USERNAME), host, port)) try: connected(MongoClient(uri, ssl=True, diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 33c7276046..7e00bd9760 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -17,6 +17,7 @@ import copy import sys import warnings +from urllib.parse import quote_plus sys.path[0:0] = [""] @@ -43,7 +44,7 @@ def test_validate_userinfo(self): self.assertTrue(parse_userinfo('user:password')) self.assertEqual(('us:r', 'p@ssword'), parse_userinfo('us%3Ar:p%40ssword')) - self.assertEqual(('us+er', 'p+ssword'), + self.assertEqual(('us er', 'p ssword'), parse_userinfo('us+er:p+ssword')) self.assertEqual(('us er', 'p ssword'), parse_userinfo('us%20er:p%20ssword')) @@ -512,6 +513,14 @@ def test_redact_AWS_SESSION_TOKEN(self): 'quote_plus?'): parse_uri(uri) + def test_special_chars(self): + user = "user@ /9+:?~!$&'()*+,;=" + pwd = "pwd@ /9+:?~!$&'()*+,;=" + uri = 'mongodb://%s:%s@localhost' % (quote_plus(user), quote_plus(pwd)) + res = parse_uri(uri) + self.assertEqual(user, res['username']) + self.assertEqual(pwd, res['password']) + if __name__ == "__main__": unittest.main() diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 1bb5fd2f5b..59457b57ac 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -143,6 +143,9 @@ def run_scenario(self): options['database'] += "." + options['collection'] for elm in auth: if auth[elm] is not None: + # We have to do this because while the spec requires + # "+"->"+", unquote_plus does "+"->" " + options[elm] = options[elm].replace(" ", "+") self.assertEqual(auth[elm], options[elm], "Expected %s but got %s" % (auth[elm], options[elm])) From 695a90e75e6cf79f9a081478a63d20ba9b2da2fb Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 1 Nov 2021 21:17:28 +0200 Subject: [PATCH 0503/2111] PYTHON-2987 Fix test unicode repr on PyPy 7.3.7 (#770) --- test/test_errors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_errors.py b/test/test_errors.py index b70b0d5291..53c55f8167 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -45,8 +45,8 @@ def test_operation_failure(self): self.assertIn("full error", traceback.format_exc()) def _test_unicode_strs(self, exc): - if 'PyPy' in sys.version: - # PyPy displays unicode in repr differently. + if sys.implementation.name == 'pypy' and sys.implementation.version < (7, 3, 7): + # PyPy used to display unicode in repr differently. self.assertEqual("unicode \U0001f40d, full error: {" "'errmsg': 'unicode \\U0001f40d'}", str(exc)) else: From 8b2eb24c35689a6e6508cd694c2bd615d2fd20d8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 1 Nov 2021 14:26:47 -0700 Subject: [PATCH 0504/2111] PYTHON-2164 Remove client max_bson_size/max_message_size/max_write_batch_size (#766) Use the hello command instead: doc = client.admin.command('hello') max_bson_size = doc['maxBsonObjectSize'] max_message_size = doc['maxMessageSizeBytes'] max_write_batch_size = doc['maxWriteBatchSize'] Also add documentation for TopologyDescription.apply_selector. --- doc/api/pymongo/mongo_client.rst | 3 --- doc/changelog.rst | 3 +++ doc/migrate-to-pymongo4.rst | 23 ++++++++++++++++++ pymongo/mongo_client.py | 33 ------------------------- pymongo/topology_description.py | 41 ++++++++++++++++++++------------ test/__init__.py | 14 ++++++++++- test/test_bulk.py | 8 +++---- test/test_client.py | 16 ++----------- test/test_collection.py | 8 +++---- 9 files changed, 75 insertions(+), 74 deletions(-) diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index e48af01ad2..ba95e06478 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -26,9 +26,6 @@ .. autoattribute:: min_pool_size .. autoattribute:: max_idle_time_ms .. autoattribute:: nodes - .. autoattribute:: max_bson_size - .. autoattribute:: max_message_size - .. autoattribute:: max_write_batch_size .. autoattribute:: local_threshold_ms .. autoattribute:: server_selection_timeout .. autoattribute:: codec_options diff --git a/doc/changelog.rst b/doc/changelog.rst index acb0949d82..b5f32d21bf 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -33,6 +33,9 @@ Breaking Changes in 4.0 :meth:`pymongo.mongo_client.MongoClient.unlock`, and :attr:`pymongo.mongo_client.MongoClient.is_locked`. - Removed :meth:`pymongo.mongo_client.MongoClient.database_names`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_message_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. - Removed :meth:`pymongo.database.Database.eval`, :data:`pymongo.database.Database.system_js` and :class:`pymongo.database.SystemJS`. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 8a07346700..b830718050 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -173,6 +173,29 @@ can be changed to this:: names = client.list_database_names() +MongoClient.max_bson_size/max_message_size/max_write_batch_size are removed +........................................................................... + +Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`, +:attr:`pymongo.mongo_client.MongoClient.max_message_size`, and +:attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. These helpers +were incorrect when in ``loadBalanced=true mode`` and ambiguous in clusters +with mixed versions. Use the `hello command`_ to get the authoritative +value from the remote server instead. Code like this:: + + max_bson_size = client.max_bson_size + max_message_size = client.max_message_size + max_write_batch_size = client.max_write_batch_size + +can be changed to this:: + + doc = client.admin.command('hello') + max_bson_size = doc['maxBsonObjectSize'] + max_message_size = doc['maxMessageSizeBytes'] + max_write_batch_size = doc['maxWriteBatchSize'] + +.. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ + ``tz_aware`` defaults to ``False`` .................................. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7dca8bb9c9..9c0695e6c0 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1053,39 +1053,6 @@ def nodes(self): description = self._topology.description return frozenset(s.address for s in description.known_servers) - @property - def max_bson_size(self): - """The largest BSON object the connected server accepts in bytes. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - """ - return self._server_property('max_bson_size') - - @property - def max_message_size(self): - """The largest message the connected server accepts in bytes. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - """ - return self._server_property('max_message_size') - - @property - def max_write_batch_size(self): - """The maxWriteBatchSize reported by the server. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - Returns a default value when connected to server versions prior to - MongoDB 2.6. - """ - return self._server_property('max_write_batch_size') - @property def local_threshold_ms(self): """The local threshold for this instance.""" diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 19a1681faf..d0100ff8b9 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -228,21 +228,32 @@ def heartbeat_frequency(self): def srv_max_hosts(self): return self._topology_settings._srv_max_hosts - def apply_selector(self, selector, address, custom_selector=None): + def _apply_local_threshold(self, selection): + if not selection: + return [] + # Round trip time in seconds. + fastest = min( + s.round_trip_time for s in selection.server_descriptions) + threshold = self._topology_settings.local_threshold_ms / 1000.0 + return [s for s in selection.server_descriptions + if (s.round_trip_time - fastest) <= threshold] + + def apply_selector(self, selector, address=None, custom_selector=None): + """List of servers matching the provided selector(s). - def apply_local_threshold(selection): - if not selection: - return [] - - settings = self._topology_settings - - # Round trip time in seconds. - fastest = min( - s.round_trip_time for s in selection.server_descriptions) - threshold = settings.local_threshold_ms / 1000.0 - return [s for s in selection.server_descriptions - if (s.round_trip_time - fastest) <= threshold] + :Parameters: + - `selector`: a callable that takes a Selection as input and returns + a Selection as output. For example, an instance of a read + preference from :mod:`~pymongo.read_preferences`. + - `address` (optional): A server address to select. + - `custom_selector` (optional): A callable that augments server + selection rules. Accepts a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + .. versionadded:: 3.4 + """ if getattr(selector, 'min_wire_version', 0): common_wv = self.common_wire_version if common_wv and common_wv < selector.min_wire_version: @@ -271,7 +282,7 @@ def apply_local_threshold(selection): if custom_selector is not None and selection: selection = selection.with_server_descriptions( custom_selector(selection.server_descriptions)) - return apply_local_threshold(selection) + return self._apply_local_threshold(selection) def has_readable_server(self, read_preference=ReadPreference.PRIMARY): """Does this topology have any readable servers available matching the @@ -288,7 +299,7 @@ def has_readable_server(self, read_preference=ReadPreference.PRIMARY): .. versionadded:: 3.4 """ common.validate_read_preference("read_preference", read_preference) - return any(self.apply_selector(read_preference, None)) + return any(self.apply_selector(read_preference)) def has_writable_server(self): """Does this topology have a writable server available? diff --git a/test/__init__.py b/test/__init__.py index 9e0e972d12..d9144e8623 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -239,6 +239,7 @@ def __init__(self): self.auth_enabled = False self.test_commands_enabled = False self.server_parameters = {} + self._hello = None self.is_mongos = False self.mongoses = [] self.is_rs = False @@ -274,7 +275,9 @@ def client_options(self): @property def hello(self): - return self.client.admin.command(HelloCompat.LEGACY_CMD) + if not self._hello: + self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello def _connect(self, host, port, **kwargs): # Jython takes a long time to connect. @@ -391,6 +394,7 @@ def _init_client(self): **self.default_client_options) # Get the authoritative hello result from the primary. + self._hello = None hello = self.hello nodes = [partition_node(node.lower()) for node in hello.get('hosts', [])] @@ -866,6 +870,14 @@ def requires_hint_with_min_max_queries(self): # Changed in SERVER-39567. return self.version.at_least(4, 1, 10) + @property + def max_bson_size(self): + return self.hello['maxBsonObjectSize'] + + @property + def max_write_batch_size(self): + return self.hello['maxWriteBatchSize'] + # Reusable client context client_context = ClientContext() diff --git a/test/test_bulk.py b/test/test_bulk.py index aa2dcab928..f93cd6c766 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -283,7 +283,7 @@ def test_upsert(self): def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. - n_docs = self.client.max_write_batch_size + 100 + n_docs = client_context.max_write_batch_size + 100 requests = [InsertOne({}) for _ in range(n_docs)] result = self.coll.bulk_write(requests, ordered=False) self.assertEqual(n_docs, result.inserted_count) @@ -344,7 +344,7 @@ def test_bulk_write_invalid_arguments(self): self.coll.bulk_write([{}]) def test_upsert_large(self): - big = 'a' * (client_context.client.max_bson_size - 37) + big = 'a' * (client_context.max_bson_size - 37) result = self.coll.bulk_write([ UpdateOne({'x': 1}, {'$set': {'s': big}}, upsert=True)]) self.assertEqualResponse( @@ -566,7 +566,7 @@ def test_multiple_error_unordered_batch(self): result) def test_large_inserts_ordered(self): - big = 'x' * self.coll.database.client.max_bson_size + big = 'x' * client_context.max_bson_size requests = [ InsertOne({'b': 1, 'a': 1}), InsertOne({'big': big}), @@ -599,7 +599,7 @@ def test_large_inserts_ordered(self): self.assertEqual(6, self.coll.count_documents({})) def test_large_inserts_unordered(self): - big = 'x' * self.coll.database.client.max_bson_size + big = 'x' * client_context.max_bson_size requests = [ InsertOne({'b': 1, 'a': 1}), InsertOne({'big': big}), diff --git a/test/test_client.py b/test/test_client.py index bad4ed9184..2b32dee971 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -640,15 +640,14 @@ def test_init_disconnected(self): c = rs_or_single_client(connect=False) self.assertEqual(c.codec_options, CodecOptions()) - self.assertIsInstance(c.max_bson_size, int) c = rs_or_single_client(connect=False) self.assertFalse(c.primary) self.assertFalse(c.secondaries) c = rs_or_single_client(connect=False) - self.assertIsInstance(c.max_write_batch_size, int) self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) - + self.assertIsNone(c.address) # PYTHON-2981 + c.admin.command('ping') # connect if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) @@ -1837,17 +1836,6 @@ def test(collection): lazy_client_trial(reset, find_one, test, self._get_client) - def test_max_bson_size(self): - c = self._get_client() - - # max_bson_size will cause the client to connect. - hello = c.db.command(HelloCompat.LEGACY_CMD) - self.assertEqual(hello['maxBsonObjectSize'], c.max_bson_size) - if 'maxMessageSizeBytes' in hello: - self.assertEqual( - hello['maxMessageSizeBytes'], - c.max_message_size) - class TestMongoClientFailover(MockClientTest): diff --git a/test/test_collection.py b/test/test_collection.py index e81776a6ac..bb3795c94a 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -851,7 +851,7 @@ def test_delete_many(self): lambda: 0 == db.test.count_documents({}), 'delete 2 documents') def test_command_document_too_large(self): - large = '*' * (self.client.max_bson_size + _COMMAND_OVERHEAD) + large = '*' * (client_context.max_bson_size + _COMMAND_OVERHEAD) coll = self.db.test self.assertRaises( DocumentTooLarge, coll.insert_one, {'data': large}) @@ -862,7 +862,7 @@ def test_command_document_too_large(self): DocumentTooLarge, coll.delete_one, {'data': large}) def test_write_large_document(self): - max_size = self.db.client.max_bson_size + max_size = client_context.max_bson_size half_size = int(max_size / 2) max_str = "x" * max_size half_str = "x" * half_size @@ -1879,7 +1879,7 @@ def test_min_query(self): def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. self.db.test.drop() - n_docs = self.client.max_write_batch_size + 100 + n_docs = client_context.max_write_batch_size + 100 self.db.test.insert_many([{} for _ in range(n_docs)]) self.assertEqual(n_docs, self.db.test.count_documents({})) self.db.test.drop() @@ -1888,7 +1888,7 @@ def test_insert_many_large_batch(self): # Tests legacy insert. db = self.client.test_insert_large_batch self.addCleanup(self.client.drop_database, 'test_insert_large_batch') - max_bson_size = self.client.max_bson_size + max_bson_size = client_context.max_bson_size # Write commands are limited to 16MB + 16k per batch big_string = 'x' * int(max_bson_size / 2) From 9f6c6a3061abf4d2d5f1549d85f3d6995565eba9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 1 Nov 2021 18:17:29 -0700 Subject: [PATCH 0505/2111] PYTHON-2990 Use https:// instead of unauthenticated git:// for git clone --- .evergreen/config.yml | 2 +- .evergreen/perf.yml | 2 +- doc/installation.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f106528185..6f5a19e478 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -97,7 +97,7 @@ functions: # If this was a patch build, doing a fresh clone would not actually test the patch cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS else - git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 2f8f54e58d..3079eb9b0e 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -96,7 +96,7 @@ functions: # If this was a patch build, doing a fresh clone would not actually test the patch cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS else - git clone git://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config diff --git a/doc/installation.rst b/doc/installation.rst index 72e478cfc4..a3d29c7f4f 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -83,7 +83,7 @@ If you'd rather install directly from the source (i.e. to stay on the bleeding edge), install the C extension dependencies then check out the latest source from GitHub and install the driver from the resulting tree:: - $ git clone git://github.com/mongodb/mongo-python-driver.git pymongo + $ git clone https://github.com/mongodb/mongo-python-driver.git pymongo $ cd pymongo/ $ python3 setup.py install From b342990934b55370ebde40d20ba6423e035b1961 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Nov 2021 11:21:33 -0700 Subject: [PATCH 0506/2111] PYTHON-2164 Add MongoClient.options, remove redundant properties (#772) --- doc/api/pymongo/client_options.rst | 7 +++ doc/api/pymongo/index.rst | 1 + doc/api/pymongo/mongo_client.rst | 7 +-- doc/api/pymongo/pool.rst | 5 +- doc/changelog.rst | 11 +++- doc/migrate-to-pymongo4.rst | 31 +++++++++++ pymongo/client_options.py | 18 ++++++- pymongo/encryption.py | 2 +- pymongo/mongo_client.py | 85 ++++-------------------------- pymongo/monitor.py | 2 +- pymongo/monitoring.py | 8 +-- pymongo/pool.py | 47 ++++++++++------- pymongo/topology.py | 6 +-- test/test_auth_spec.py | 2 +- test/test_client.py | 40 ++++++++------ test/test_mongos_load_balancing.py | 4 +- test/test_pooling.py | 4 +- test/test_read_preferences.py | 12 ++--- test/test_retryable_reads.py | 10 ++-- test/test_sdam_monitoring_spec.py | 2 +- test/test_session.py | 4 +- 21 files changed, 159 insertions(+), 149 deletions(-) create mode 100644 doc/api/pymongo/client_options.rst diff --git a/doc/api/pymongo/client_options.rst b/doc/api/pymongo/client_options.rst new file mode 100644 index 0000000000..3ffc10bad6 --- /dev/null +++ b/doc/api/pymongo/client_options.rst @@ -0,0 +1,7 @@ +:mod:`client_options` -- Read only configuration options for a MongoClient. +=========================================================================== + +.. automodule:: pymongo.client_options + + .. autoclass:: pymongo.client_options.ClientOptions() + :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 8a3a99b815..6e6e337950 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -29,6 +29,7 @@ Sub-modules: bulk change_stream + client_options client_session collation collection diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index ba95e06478..83dab27f2c 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -14,7 +14,6 @@ Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. - .. autoattribute:: event_listeners .. autoattribute:: topology_description .. autoattribute:: address .. autoattribute:: primary @@ -22,16 +21,12 @@ .. autoattribute:: arbiters .. autoattribute:: is_primary .. autoattribute:: is_mongos - .. autoattribute:: max_pool_size - .. autoattribute:: min_pool_size - .. autoattribute:: max_idle_time_ms .. autoattribute:: nodes - .. autoattribute:: local_threshold_ms - .. autoattribute:: server_selection_timeout .. autoattribute:: codec_options .. autoattribute:: read_preference .. autoattribute:: write_concern .. autoattribute:: read_concern + .. autoattribute:: options .. automethod:: start_session .. automethod:: list_databases .. automethod:: list_database_names diff --git a/doc/api/pymongo/pool.rst b/doc/api/pymongo/pool.rst index 4e37de4a35..78274e8f8b 100644 --- a/doc/api/pymongo/pool.rst +++ b/doc/api/pymongo/pool.rst @@ -2,5 +2,6 @@ ============================================================== .. automodule:: pymongo.pool - :synopsis: Pool module for use with a MongoDB client. - :members: + + .. autoclass:: pymongo.pool.PoolOptions() + :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index b5f32d21bf..31e5d5d1de 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -36,6 +36,13 @@ Breaking Changes in 4.0 - Removed :attr:`pymongo.mongo_client.MongoClient.max_bson_size`. - Removed :attr:`pymongo.mongo_client.MongoClient.max_message_size`. - Removed :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.event_listeners`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. +- Removed :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. +- Removed :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. +- Removed :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. +- Removed :attr:`pymongo.mongo_client.MongoClient.retry_writes`. +- Removed :attr:`pymongo.mongo_client.MongoClient.retry_reads`. - Removed :meth:`pymongo.database.Database.eval`, :data:`pymongo.database.Database.system_js` and :class:`pymongo.database.SystemJS`. @@ -180,6 +187,8 @@ Notable improvements will connect to. More specifically, when a mongodb+srv:// connection string resolves to more than `srvMaxHosts` number of hosts, the client will randomly choose a `srvMaxHosts` sized subset of hosts. +- Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access + to a client's configuration options. Issues Resolved ............... @@ -2121,7 +2130,7 @@ Important new features: - The ``max_pool_size`` option for :class:`~pymongo.mongo_client.MongoClient` and :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` now actually caps the number of sockets the pool will open concurrently. - Once the pool has reached :attr:`~pymongo.mongo_client.MongoClient.max_pool_size` + Once the pool has reaches max_pool_size operations will block waiting for a socket to become available. If ``waitQueueTimeoutMS`` is set, an operation that blocks waiting for a socket will raise :exc:`~pymongo.errors.ConnectionFailure` after the timeout. By diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index b830718050..c0288c104e 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -196,6 +196,37 @@ can be changed to this:: .. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ +MongoClient.event_listeners and other configuration option helpers are removed +.............................................................................. + +The following client configuration option helpers are removed: +- :attr:`pymongo.mongo_client.MongoClient.event_listeners`. +- :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. +- :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. +- :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. +- :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. +- :attr:`pymongo.mongo_client.MongoClient.retry_writes`. +- :attr:`pymongo.mongo_client.MongoClient.retry_reads`. + +These helpers have been replaced by +:attr:`pymongo.mongo_client.MongoClient.options`. Code like this:: + + client.event_listeners + client.local_threshold_ms + client.server_selection_timeout + client.max_pool_size + client.min_pool_size + client.max_idle_time_ms + +can be changed to this:: + + client.options.event_listeners + client.options.local_threshold_ms + client.options.server_selection_timeout + client.options.pool_options.max_pool_size + client.options.pool_options.min_pool_size + client.options.pool_options.max_idle_time_seconds + ``tz_aware`` defaults to ``False`` .................................. diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 77c3f85a25..845d4ef9a1 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -153,8 +153,12 @@ def _parse_pool_options(options): class ClientOptions(object): + """Read only configuration options for a MongoClient. - """ClientOptions""" + Should not be instantiated directly by application developers. Access + a client's options via :attr:`pymongo.mongo_client.MongoClient.options` + instead. + """ def __init__(self, username, password, database, options): self.__options = options @@ -200,7 +204,7 @@ def codec_options(self): return self.__codec_options @property - def credentials(self): + def _credentials(self): """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" return self.__credentials @@ -272,3 +276,13 @@ def auto_encryption_opts(self): def load_balanced(self): """True if the client was configured to connect to a load balancer.""" return self.__load_balanced + + @property + def event_listeners(self): + """The event listeners registered for this client. + + See :mod:`~pymongo.monitoring` for details. + + .. versionadded:: 4.0 + """ + return self.__pool_options._event_listeners.event_listeners() diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 2c3f8eb0f7..ad19b26426 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -261,7 +261,7 @@ def __init__(self, client, opts): self._internal_client = None def _get_internal_client(encrypter, mongo_client): - if mongo_client.max_pool_size is None: + if mongo_client.options.pool_options.max_pool_size is None: # Unlimited pool size, use the same client. return mongo_client # Else - limited pool size, use an internal client. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 9c0695e6c0..a308219cfb 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -722,14 +722,14 @@ def __init__( self.__lock = threading.Lock() self.__kill_cursors_queue = [] - self._event_listeners = options.pool_options.event_listeners + self._event_listeners = options.pool_options._event_listeners super(MongoClient, self).__init__(options.codec_options, options.read_preference, options.write_concern, options.read_concern) self.__all_credentials = {} - creds = options.credentials + creds = options._credentials if creds: self.__all_credentials[creds.source] = creds @@ -893,14 +893,6 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - @property - def event_listeners(self): - """The event listeners registered for this client. - - See :mod:`~pymongo.monitoring` for details. - """ - return self._event_listeners.event_listeners() - @property def topology_description(self): """The description of the connected MongoDB deployment. @@ -1005,40 +997,6 @@ def is_mongos(self): """ return self._server_property('server_type') == SERVER_TYPE.Mongos - @property - def max_pool_size(self): - """The maximum allowable number of concurrent connections to each - connected server. Requests to a server will block if there are - `maxPoolSize` outstanding connections to the requested server. - Defaults to 100. Can be either 0 or None, in which case there is no - limit on the number of concurrent connections. - - When a server's pool has reached `max_pool_size`, operations for that - server block waiting for a socket to be returned to the pool. If - ``waitQueueTimeoutMS`` is set, a blocked operation will raise - :exc:`~pymongo.errors.ConnectionFailure` after a timeout. - By default ``waitQueueTimeoutMS`` is not set. - """ - return self.__options.pool_options.max_pool_size - - @property - def min_pool_size(self): - """The minimum required number of concurrent connections that the pool - will maintain to each connected server. Default is 0. - """ - return self.__options.pool_options.min_pool_size - - @property - def max_idle_time_ms(self): - """The maximum number of milliseconds that a connection can remain - idle in the pool before being removed and replaced. Defaults to - `None` (no limit). - """ - seconds = self.__options.pool_options.max_idle_time_seconds - if seconds is None: - return None - return 1000 * seconds - @property def nodes(self): """Set of all currently connected servers. @@ -1054,38 +1012,15 @@ def nodes(self): return frozenset(s.address for s in description.known_servers) @property - def local_threshold_ms(self): - """The local threshold for this instance.""" - return self.__options.local_threshold_ms - - @property - def server_selection_timeout(self): - """The server selection timeout for this instance in seconds.""" - return self.__options.server_selection_timeout - - @property - def retry_writes(self): - """If this instance should retry supported write operations.""" - return self.__options.retry_writes + def options(self): + """The configuration options for this client. - @property - def retry_reads(self): - """If this instance should retry supported write operations.""" - return self.__options.retry_reads + :Returns: + An instance of :class:`~pymongo.client_options.ClientOptions`. - def _is_writable(self): - """Attempt to connect to a writable server, or return False. + .. versionadded:: 4.0 """ - topology = self._get_topology() # Starts monitors if necessary. - try: - svr = topology.select_server(writable_server_selector) - - # When directly connected to a secondary, arbiter, etc., - # select_server returns it, whatever the selector. Check - # again if the server is writable. - return svr.description.is_writable - except ConnectionFailure: - return False + return self.__options def _end_sessions(self, session_ids): """Send endSessions command(s) with the given session ids.""" @@ -1282,7 +1217,7 @@ def _retry_with_session(self, retryable, func, session, bulk): Re-raises any exception thrown by func(). """ - retryable = (retryable and self.retry_writes + retryable = (retryable and self.options.retry_writes and session and not session.in_transaction) return self._retry_internal(retryable, func, session, bulk) @@ -1353,7 +1288,7 @@ def _retryable_read(self, func, read_pref, session, address=None, Re-raises any exception thrown by func(). """ retryable = (retryable and - self.retry_reads + self.options.retry_reads and not (session and session.in_transaction)) last_error = None retrying = False diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 5359ee054a..37b894bd53 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -123,7 +123,7 @@ def __init__( self._server_description = server_description self._pool = pool self._settings = topology_settings - self._listeners = self._settings._pool_options.event_listeners + self._listeners = self._settings._pool_options._event_listeners pub = self._listeners is not None self._publish = pub and self._listeners.enabled_for_server_heartbeat self._cancel_context = None diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index ba9bbe8128..b877e19a23 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1318,10 +1318,10 @@ def enabled_for_cmap(self): def event_listeners(self): """List of registered event listeners.""" - return (self.__command_listeners[:] + - self.__server_heartbeat_listeners[:] + - self.__server_listeners[:] + - self.__topology_listeners[:] + + return (self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + self.__cmap_listeners) def publish_command_start(self, command, database_name, diff --git a/pymongo/pool.py b/pymongo/pool.py index 952874abb3..f9b370c66e 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -256,6 +256,17 @@ def _cond_wait(condition, deadline): class PoolOptions(object): + """Read only connection pool options for a MongoClient. + + Should not be instantiated directly by application developers. Access + a client's pool options via + :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: + + pool_opts = client.options.pool_options + pool_opts.max_pool_size + pool_opts.min_pool_size + + """ __slots__ = ('__max_pool_size', '__min_pool_size', '__max_idle_time_seconds', @@ -394,7 +405,7 @@ def wait_queue_timeout(self): return self.__wait_queue_timeout @property - def ssl_context(self): + def _ssl_context(self): """An SSLContext instance or None. """ return self.__ssl_context @@ -406,7 +417,7 @@ def tls_allow_invalid_hostnames(self): return self.__tls_allow_invalid_hostnames @property - def event_listeners(self): + def _event_listeners(self): """An instance of pymongo.monitoring._EventListeners. """ return self.__event_listeners @@ -424,7 +435,7 @@ def driver(self): return self.__driver @property - def compression_settings(self): + def _compression_settings(self): return self.__compression_settings @property @@ -506,9 +517,9 @@ def __init__(self, sock, pool, address, id): self.hello_ok = None self.is_mongos = False self.op_msg_enabled = False - self.listeners = pool.opts.event_listeners + self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap - self.compression_settings = pool.opts.compression_settings + self.compression_settings = pool.opts._compression_settings self.compression_context = None self.socket_checker = SocketChecker() # Support for mechanism negotiation on the initial handshake. @@ -1000,7 +1011,7 @@ def _configured_socket(address, options): Sets socket's SSL and timeout options. """ sock = _create_connection(address, options) - ssl_context = options.ssl_context + ssl_context = options._ssl_context if ssl_context is not None: host = address[0] @@ -1123,8 +1134,8 @@ def __init__(self, address, options, handshake=True): # Don't publish events in Monitor pools. self.enabled_for_cmap = ( self.handshake and - self.opts.event_listeners is not None and - self.opts.event_listeners.enabled_for_cmap) + self.opts._event_listeners is not None and + self.opts._event_listeners.enabled_for_cmap) # The first portion of the wait queue. # Enforces: maxPoolSize @@ -1141,7 +1152,7 @@ def __init__(self, address, options, handshake=True): self._max_connecting = self.opts.max_connecting self._pending = 0 if self.enabled_for_cmap: - self.opts.event_listeners.publish_pool_created( + self.opts._event_listeners.publish_pool_created( self.address, self.opts.non_default_options) # Similar to active_sockets but includes threads in the wait queue. self.operation_count = 0 @@ -1158,7 +1169,7 @@ def ready(self): if self.state != PoolState.READY: self.state = PoolState.READY if self.enabled_for_cmap: - self.opts.event_listeners.publish_pool_ready(self.address) + self.opts._event_listeners.publish_pool_ready(self.address) @property def closed(self): @@ -1197,7 +1208,7 @@ def _reset(self, close, pause=True, service_id=None): self._max_connecting_cond.notify_all() self.size_cond.notify_all() - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners # CMAP spec says that close() MUST close sockets before publishing the # PoolClosedEvent but that reset() SHOULD close sockets *after* # publishing the PoolClearedEvent. @@ -1301,7 +1312,7 @@ def connect(self, all_credentials=None): conn_id = self.next_connection_id self.next_connection_id += 1 - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_created(self.address, conn_id) @@ -1353,7 +1364,7 @@ def get_socket(self, all_credentials, handler=None): - `all_credentials`: dict, maps auth source to MongoCredential. - `handler` (optional): A _MongoClientErrorHandler. """ - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) @@ -1391,7 +1402,7 @@ def get_socket(self, all_credentials, handler=None): def _raise_if_not_ready(self, emit_event): if self.state != PoolState.READY: if self.enabled_for_cmap and emit_event: - self.opts.event_listeners.publish_connection_check_out_failed( + self.opts._event_listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.CONN_ERROR) _raise_connection_failure( self.address, AutoReconnect('connection pool paused')) @@ -1406,7 +1417,7 @@ def _get_socket(self, all_credentials): if self.closed: if self.enabled_for_cmap: - self.opts.event_listeners.publish_connection_check_out_failed( + self.opts._event_listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.POOL_CLOSED) raise _PoolClosedError( 'Attempted to check out a connection from closed connection ' @@ -1486,7 +1497,7 @@ def _get_socket(self, all_credentials): self.size_cond.notify() if self.enabled_for_cmap and not emitted_event: - self.opts.event_listeners.publish_connection_check_out_failed( + self.opts._event_listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.CONN_ERROR) raise @@ -1505,7 +1516,7 @@ def return_socket(self, sock_info): sock_info.pinned_txn = False sock_info.pinned_cursor = False self.__pinned_sockets.discard(sock_info) - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_checked_in(self.address, sock_info.id) if self.pid != os.getpid(): @@ -1578,7 +1589,7 @@ def _perished(self, sock_info): return False def _raise_wait_queue_timeout(self): - listeners = self.opts.event_listeners + listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.TIMEOUT) diff --git a/pymongo/topology.py b/pymongo/topology.py index 4083d7f331..c6a702fde5 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -73,7 +73,7 @@ class Topology(object): """Monitor a topology of one or more servers.""" def __init__(self, topology_settings): self._topology_id = topology_settings._topology_id - self._listeners = topology_settings._pool_options.event_listeners + self._listeners = topology_settings._pool_options._event_listeners pub = self._listeners is not None self._publish_server = pub and self._listeners.enabled_for_server self._publish_tp = pub and self._listeners.enabled_for_topology @@ -728,9 +728,9 @@ def _create_pool_for_monitor(self, address): monitor_pool_options = PoolOptions( connect_timeout=options.connect_timeout, socket_timeout=options.connect_timeout, - ssl_context=options.ssl_context, + ssl_context=options._ssl_context, tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, - event_listeners=options.event_listeners, + event_listeners=options._event_listeners, appname=options.appname, driver=options.driver, pause_enabled=False, diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 1b6b919e80..8bf0dcb21c 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -44,7 +44,7 @@ def run_test(self): self.assertRaises(Exception, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) - credentials = client._MongoClient__options.credentials + credentials = client._MongoClient__options._credentials if credential is None: self.assertIsNone(credentials) else: diff --git a/test/test_client.py b/test/test_client.py index 2b32dee971..34922d1097 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -36,6 +36,7 @@ from bson.tz_util import utc import pymongo from pymongo import event_loggers, message, monitoring +from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD @@ -56,7 +57,7 @@ from pymongo.mongo_client import MongoClient from pymongo.monitoring import (ServerHeartbeatListener, ServerHeartbeatStartedEvent) -from pymongo.pool import SocketInfo, _METADATA +from pymongo.pool import SocketInfo, _METADATA, PoolOptions from pymongo.read_preferences import ReadPreference from pymongo.server_description import ServerDescription from pymongo.server_selectors import (readable_server_selector, @@ -128,10 +129,10 @@ def test_keyword_arg_defaults(self): # socket.Socket.settimeout takes a float in seconds self.assertEqual(20.0, pool_opts.connect_timeout) self.assertEqual(None, pool_opts.wait_queue_timeout) - self.assertEqual(None, pool_opts.ssl_context) + self.assertEqual(None, pool_opts._ssl_context) self.assertEqual(None, options.replica_set_name) self.assertEqual(ReadPreference.PRIMARY, client.read_preference) - self.assertAlmostEqual(12, client.server_selection_timeout) + self.assertAlmostEqual(12, client.options.server_selection_timeout) def test_connect_timeout(self): client = MongoClient(connect=False, connectTimeoutMS=None, @@ -465,14 +466,23 @@ def test_uri_security_options(self): def test_event_listeners(self): c = MongoClient(event_listeners=[], connect=False) - self.assertEqual(c.event_listeners, []) + self.assertEqual(c.options.event_listeners, []) listeners = [event_loggers.CommandLogger(), event_loggers.HeartbeatLogger(), event_loggers.ServerLogger(), event_loggers.TopologyLogger(), event_loggers.ConnectionPoolLogger()] c = MongoClient(event_listeners=listeners, connect=False) - self.assertEqual(c.event_listeners, listeners) + self.assertEqual(c.options.event_listeners, listeners) + + def test_client_options(self): + c = MongoClient(connect=False) + self.assertIsInstance(c.options, ClientOptions) + self.assertIsInstance(c.options.pool_options, PoolOptions) + self.assertEqual(c.options.server_selection_timeout, 30) + self.assertEqual(c.options.pool_options.max_idle_time_seconds, None) + self.assertIsInstance(c.options.retry_writes, bool) + self.assertIsInstance(c.options.retry_reads, bool) class TestClient(IntegrationTest): @@ -635,7 +645,7 @@ def test_init_disconnected(self): c = rs_or_single_client(connect=False) self.assertIsInstance(c.is_mongos, bool) c = rs_or_single_client(connect=False) - self.assertIsInstance(c.max_pool_size, int) + self.assertIsInstance(c.options.pool_options.max_pool_size, int) self.assertIsInstance(c.nodes, frozenset) c = rs_or_single_client(connect=False) @@ -1003,8 +1013,8 @@ def test_timeouts(self): self.assertEqual(10.5, get_pool(client).opts.connect_timeout) self.assertEqual(10.5, get_pool(client).opts.socket_timeout) self.assertEqual(10.5, get_pool(client).opts.max_idle_time_seconds) - self.assertEqual(10500, client.max_idle_time_ms) - self.assertEqual(10.5, client.server_selection_timeout) + self.assertEqual(10.5, client.options.pool_options.max_idle_time_seconds) + self.assertEqual(10.5, client.options.server_selection_timeout) def test_socket_timeout_ms_validation(self): c = rs_or_single_client(socketTimeoutMS=10 * 1000) @@ -1044,10 +1054,10 @@ def get_x(db): def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=100, connect=False) - self.assertAlmostEqual(0.1, client.server_selection_timeout) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) client = MongoClient(serverSelectionTimeoutMS=0, connect=False) - self.assertAlmostEqual(0, client.server_selection_timeout) + self.assertAlmostEqual(0, client.options.server_selection_timeout) self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) @@ -1058,20 +1068,20 @@ def test_server_selection_timeout(self): client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False) - self.assertAlmostEqual(0.1, client.server_selection_timeout) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False) - self.assertAlmostEqual(0, client.server_selection_timeout) + self.assertAlmostEqual(0, client.options.server_selection_timeout) # Test invalid timeout in URI ignored and set to default. client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False) - self.assertAlmostEqual(30, client.server_selection_timeout) + self.assertAlmostEqual(30, client.options.server_selection_timeout) client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False) - self.assertAlmostEqual(30, client.server_selection_timeout) + self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): client = rs_or_single_client(waitQueueTimeoutMS=2000) @@ -1379,7 +1389,7 @@ def test_small_heartbeat_frequency_ms(self): def test_compression(self): def compression_settings(client): pool_options = client._MongoClient__options.pool_options - return pool_options.compression_settings + return pool_options._compression_settings uri = "mongodb://localhost:27017/?compressors=zlib" client = MongoClient(uri, connect=False) diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index 575bc458c5..c110b8b10c 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -127,7 +127,7 @@ def f(): def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) - self.assertEqual(30, client.local_threshold_ms) + self.assertEqual(30, client.options.local_threshold_ms) wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') topology = client._topology @@ -139,7 +139,7 @@ def test_local_threshold(self): client.admin.command('ping') client = connected(self.mock_client(localThresholdMS=0)) - self.assertEqual(0, client.local_threshold_ms) + self.assertEqual(0, client.options.local_threshold_ms) # No error client.db.command('ping') # Our chosen mongos goes down. diff --git a/test/test_pooling.py b/test/test_pooling.py index ce0bed87c4..0456380814 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -169,7 +169,7 @@ def create_pool( **kwargs): # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options - kwargs['ssl_context'] = pool_options.ssl_context + kwargs['ssl_context'] = pool_options._ssl_context kwargs['tls_allow_invalid_hostnames'] = pool_options.tls_allow_invalid_hostnames kwargs['server_api'] = pool_options.server_api pool = Pool(pair, PoolOptions(*args, **kwargs)) @@ -187,7 +187,7 @@ def test_max_pool_size_validation(self): ValueError, MongoClient, host=host, port=port, maxPoolSize='foo') c = MongoClient(host=host, port=port, maxPoolSize=100, connect=False) - self.assertEqual(c.max_pool_size, 100) + self.assertEqual(c.options.pool_options.max_pool_size, 100) def test_no_disconnect(self): run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 128b41d5fa..18dbd0bee4 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -209,20 +209,16 @@ def test_tag_sets_validation(self): def test_threshold_validation(self): self.assertEqual(17, rs_client( - localThresholdMS=17 - ).local_threshold_ms) + localThresholdMS=17, connect=False).options.local_threshold_ms) self.assertEqual(42, rs_client( - localThresholdMS=42 - ).local_threshold_ms) + localThresholdMS=42, connect=False).options.local_threshold_ms) self.assertEqual(666, rs_client( - localthresholdms=666 - ).local_threshold_ms) + localThresholdMS=666, connect=False).options.local_threshold_ms) self.assertEqual(0, rs_client( - localthresholdms=0 - ).local_threshold_ms) + localThresholdMS=0, connect=False).options.local_threshold_ms) self.assertRaises(ValueError, rs_client, diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 665aa9fd32..c4c093f66f 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -48,19 +48,19 @@ class TestClientOptions(PyMongoTestCase): def test_default(self): client = MongoClient(connect=False) - self.assertEqual(client.retry_reads, True) + self.assertEqual(client.options.retry_reads, True) def test_kwargs(self): client = MongoClient(retryReads=True, connect=False) - self.assertEqual(client.retry_reads, True) + self.assertEqual(client.options.retry_reads, True) client = MongoClient(retryReads=False, connect=False) - self.assertEqual(client.retry_reads, False) + self.assertEqual(client.options.retry_reads, False) def test_uri(self): client = MongoClient('mongodb://h/?retryReads=true', connect=False) - self.assertEqual(client.retry_reads, True) + self.assertEqual(client.options.retry_reads, True) client = MongoClient('mongodb://h/?retryReads=false', connect=False) - self.assertEqual(client.retry_reads, False) + self.assertEqual(client.options.retry_reads, False) class TestSpec(SpecRunner): diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index e51283f85d..02807f05ab 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -290,7 +290,7 @@ def _test_app_error(self, fail_command_opts, expected_error): 'data': data, } with self.fail_point(fail_insert): - if self.test_client.retry_writes: + if self.test_client.options.retry_writes: self.coll.insert_one({}) else: with self.assertRaises(expected_error): diff --git a/test/test_session.py b/test/test_session.py index 9ab0c349f7..e844ae3a08 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -104,7 +104,7 @@ def tearDown(self): self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): - listener = client.event_listeners[0] + listener = client.options.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: @@ -626,7 +626,7 @@ def test_command_cursor_limit_reached(self): lambda cursor: list(cursor)) def _test_unacknowledged_ops(self, client, *ops): - listener = client.event_listeners[0] + listener = client.options.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: From 89f41cfbd2560b41592dadf6e1b59f80c33ec207 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 Nov 2021 14:12:12 -0700 Subject: [PATCH 0507/2111] PYTHON-2999 Remove unused and internal only CRAM-MD5 auth mechanism (#777) --- pymongo/auth.py | 26 -------------------------- pymongo/common.py | 5 +---- 2 files changed, 1 insertion(+), 30 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index b946980865..17f3a32fe8 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -421,31 +421,6 @@ def _authenticate_plain(credentials, sock_info): sock_info.command(source, cmd) -def _authenticate_cram_md5(credentials, sock_info): - """Authenticate using CRAM-MD5 (RFC 2195) - """ - source = credentials.source - username = credentials.username - password = credentials.password - # The password used as the mac key is the - # same as what we use for MONGODB-CR - passwd = _password_digest(username, password) - cmd = SON([('saslStart', 1), - ('mechanism', 'CRAM-MD5'), - ('payload', Binary(b'')), - ('autoAuthorize', 1)]) - response = sock_info.command(source, cmd) - # MD5 as implicit default digest for digestmod is deprecated - # in python 3.4 - mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=hashlib.md5) - mac.update(response['payload']) - challenge = username.encode('utf-8') + b' ' + mac.hexdigest().encode('utf-8') - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', Binary(challenge))]) - sock_info.command(source, cmd) - - def _authenticate_x509(credentials, sock_info): """Authenticate using MONGODB-X509. """ @@ -497,7 +472,6 @@ def _authenticate_default(credentials, sock_info): _AUTH_MAP = { - 'CRAM-MD5': _authenticate_cram_md5, 'GSSAPI': _authenticate_gssapi, 'MONGODB-CR': _authenticate_mongo_cr, 'MONGODB-X509': _authenticate_x509, diff --git a/pymongo/common.py b/pymongo/common.py index 7f2bda2d05..3d68ba1c76 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -372,10 +372,7 @@ def validate_read_preference_mode(dummy, value): def validate_auth_mechanism(option, value): """Validate the authMechanism URI option. """ - # CRAM-MD5 is for server testing only. Undocumented, - # unsupported, may be removed at any time. You have - # been warned. - if value not in MECHANISMS and value != 'CRAM-MD5': + if value not in MECHANISMS: raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) return value From e27131546c4cd1f9535d795825f7f73cd8309bc9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 Nov 2021 17:25:11 -0700 Subject: [PATCH 0508/2111] PYTHON-2998 Remove md5 checksums from gridfs and remove disable_md5 (#776) Speed up gridfs tests (shaves off about 2 minutes on macOS). --- doc/changelog.rst | 7 ++++-- doc/migrate-to-pymongo4.rst | 26 ++++++++++++++++++++ gridfs/__init__.py | 49 +++++++++++++++++-------------------- gridfs/grid_file.py | 23 ++++++----------- test/__init__.py | 7 ++++++ test/gridfs/upload.json | 8 ------ test/test_custom_types.py | 2 +- test/test_grid_file.py | 14 ++++------- test/test_gridfs.py | 32 ++++++------------------ test/test_gridfs_bucket.py | 45 ++++++++-------------------------- test/test_gridfs_spec.py | 6 ++--- test/utils_spec_runner.py | 4 +-- 12 files changed, 94 insertions(+), 129 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 31e5d5d1de..3b3a700b3b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -170,12 +170,15 @@ Breaking Changes in 4.0 are passed to the server as-is rather than the previous behavior which substituted in a projection of ``{"_id": 1}``. This means that an empty projection will now return the entire document, not just the ``"_id"`` field. -- :class:`~pymongo.mongo_client.MongoClient` now raises a :exc:`~pymongo.errors.ConfigurationError` - when more than one URI is passed into the ``hosts`` argument. +- :class:`~pymongo.mongo_client.MongoClient` now raises a + :exc:`~pymongo.errors.ConfigurationError` when more than one URI is passed + into the ``hosts`` argument. - :class:`~pymongo.mongo_client.MongoClient`` now raises an :exc:`~pymongo.errors.InvalidURI` exception when it encounters unescaped percent signs in username and password when parsing MongoDB URIs. +- Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and + :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. Notable improvements .................... diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index c0288c104e..c8ac401a18 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -819,6 +819,32 @@ Changed the default JSON encoding representation from legacy to relaxed. The json_mode parameter for :const:`bson.json_util.dumps` now defaults to :const:`~bson.json_util.RELAXED_JSON_OPTIONS`. +GridFS changes +-------------- + +.. _removed-gridfs-checksum: + +disable_md5 parameter is removed +................................ + +Removed the `disable_md5` option for :class:`~gridfs.GridFSBucket` and +:class:`~gridfs.GridFS`. GridFS no longer generates checksums. +Applications that desire a file digest should implement it outside GridFS +and store it with other file metadata. For example:: + + import hashlib + my_db = MongoClient().test + fs = GridFSBucket(my_db) + grid_in = fs.open_upload_stream("test_file") + file_data = b'...' + sha356 = hashlib.sha256(file_data).hexdigest() + grid_in.write(file_data) + grid_in.sha356 = sha356 # Set the custom 'sha356' field + grid_in.close() + +Note that for large files, the checksum may need to be computed in chunks +to avoid the excessive memory needed to load the entire file at once. + Removed features with no migration path --------------------------------------- diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 2a637398e3..c36d921e8c 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -39,7 +39,7 @@ class GridFS(object): """An instance of GridFS on top of a single Database. """ - def __init__(self, database, collection="fs", disable_md5=False): + def __init__(self, database, collection="fs"): """Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of @@ -48,14 +48,18 @@ def __init__(self, database, collection="fs", disable_md5=False): :Parameters: - `database`: database to use - `collection` (optional): root collection to use - - `disable_md5` (optional): When True, MD5 checksums will not be - computed for uploaded files. Useful in environments where MD5 - cannot be used for regulatory or other reasons. Defaults to False. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. .. versionchanged:: 3.11 Running a GridFS operation in a transaction now always raises an error. GridFS does not support multi-document transactions. + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + .. versionchanged:: 3.1 Indexes are only ensured on the first write to the DB. @@ -77,7 +81,6 @@ def __init__(self, database, collection="fs", disable_md5=False): self.__collection = database[collection] self.__files = self.__collection.files self.__chunks = self.__collection.chunks - self.__disable_md5 = disable_md5 def new_file(self, **kwargs): """Create a new file in GridFS. @@ -93,8 +96,7 @@ def new_file(self, **kwargs): :Parameters: - `**kwargs` (optional): keyword arguments for file creation """ - return GridIn( - self.__collection, disable_md5=self.__disable_md5, **kwargs) + return GridIn(self.__collection, **kwargs) def put(self, data, **kwargs): """Put data in GridFS as a new file. @@ -126,8 +128,7 @@ def put(self, data, **kwargs): .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ - grid_file = GridIn( - self.__collection, disable_md5=self.__disable_md5, **kwargs) + grid_file = GridIn(self.__collection, **kwargs) try: grid_file.write(data) finally: @@ -423,7 +424,7 @@ class GridFSBucket(object): def __init__(self, db, bucket_name="fs", chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None, - read_preference=None, disable_md5=False): + read_preference=None): """Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of @@ -442,13 +443,17 @@ def __init__(self, db, bucket_name="fs", (the default) db.write_concern is used. - `read_preference` (optional): The read preference to use. If ``None`` (the default) db.read_preference is used. - - `disable_md5` (optional): When True, MD5 checksums will not be - computed for uploaded files. Useful in environments where MD5 - cannot be used for regulatory or other reasons. Defaults to False. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. .. versionchanged:: 3.11 - Running a GridFS operation in a transaction now always raises an - error. GridFSBucket does not support multi-document transactions. + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. .. versionadded:: 3.1 @@ -465,8 +470,6 @@ def __init__(self, db, bucket_name="fs", self._bucket_name = bucket_name self._collection = db[bucket_name] - self._disable_md5 = disable_md5 - self._chunks = self._collection.chunks.with_options( write_concern=write_concern, read_preference=read_preference) @@ -522,11 +525,7 @@ def open_upload_stream(self, filename, chunk_size_bytes=None, if metadata is not None: opts["metadata"] = metadata - return GridIn( - self._collection, - session=session, - disable_md5=self._disable_md5, - **opts) + return GridIn(self._collection, session=session, **opts) def open_upload_stream_with_id( self, file_id, filename, chunk_size_bytes=None, metadata=None, @@ -579,11 +578,7 @@ def open_upload_stream_with_id( if metadata is not None: opts["metadata"] = metadata - return GridIn( - self._collection, - session=session, - disable_md5=self._disable_md5, - **opts) + return GridIn(self._collection, session=session, **opts) def upload_from_stream(self, filename, source, chunk_size_bytes=None, metadata=None, session=None): diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 3e455dc932..fc01d88d24 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -14,7 +14,6 @@ """Tools for representing files stored in GridFS.""" import datetime -import hashlib import io import math import os @@ -115,8 +114,7 @@ def _disallow_transactions(session): class GridIn(object): """Class to write data to GridFS. """ - def __init__( - self, root_collection, session=None, disable_md5=False, **kwargs): + def __init__(self, root_collection, session=None, **kwargs): """Write a file to GridFS Application developers should generally not need to @@ -152,12 +150,15 @@ def __init__( - `session` (optional): a :class:`~pymongo.client_session.ClientSession` to use for all commands - - `disable_md5` (optional): When True, an MD5 checksum will not be - computed for the uploaded file. Useful in environments where - MD5 cannot be used for regulatory or other reasons. Defaults to - False. - `**kwargs` (optional): file level options (see above) + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -183,8 +184,6 @@ def __init__( coll = _clear_entity_type_registry( root_collection, read_preference=ReadPreference.PRIMARY) - if not disable_md5: - kwargs["md5"] = hashlib.md5() # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) @@ -271,9 +270,6 @@ def __flush_data(self, data): """Flush `data` to a chunk. """ self.__ensure_indexes() - if 'md5' in self._file: - self._file['md5'].update(data) - if not data: return assert(len(data) <= self.chunk_size) @@ -301,9 +297,6 @@ def __flush(self): """ try: self.__flush_buffer() - - if "md5" in self._file: - self._file["md5"] = self._file["md5"].hexdigest() # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) self._file["uploadDate"] = datetime.datetime.utcnow() diff --git a/test/__init__.py b/test/__init__.py index d9144e8623..ab53b7fdc5 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -949,6 +949,13 @@ def setUpClass(cls): else: cls.credentials = {} + def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + c.delete_many({}) + c.drop_indexes() + def patch_system_certs(self, ca_certs): patcher = SystemCertsPatcher(ca_certs) self.addCleanup(patcher.disable) diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json index 324ac49d23..7d4adec1d8 100644 --- a/test/gridfs/upload.json +++ b/test/gridfs/upload.json @@ -29,7 +29,6 @@ "length": 0, "chunkSize": 4, "uploadDate": "*actual", - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "filename" } ] @@ -62,7 +61,6 @@ "length": 1, "chunkSize": 4, "uploadDate": "*actual", - "md5": "47ed733b8d10be225eceba344d533586", "filename": "filename" } ] @@ -108,7 +106,6 @@ "length": 3, "chunkSize": 4, "uploadDate": "*actual", - "md5": "bafae3a174ab91fc70db7a6aa50f4f52", "filename": "filename" } ] @@ -154,7 +151,6 @@ "length": 4, "chunkSize": 4, "uploadDate": "*actual", - "md5": "7e7c77cff5705d1f7574a25ef6662117", "filename": "filename" } ] @@ -200,7 +196,6 @@ "length": 5, "chunkSize": 4, "uploadDate": "*actual", - "md5": "283d4fea5dded59cf837d3047328f5af", "filename": "filename" } ] @@ -254,7 +249,6 @@ "length": 8, "chunkSize": 4, "uploadDate": "*actual", - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "filename" } ] @@ -309,7 +303,6 @@ "length": 1, "chunkSize": 4, "uploadDate": "*actual", - "md5": "47ed733b8d10be225eceba344d533586", "filename": "filename", "contentType": "image/jpeg" } @@ -359,7 +352,6 @@ "length": 1, "chunkSize": 4, "uploadDate": "*actual", - "md5": "47ed733b8d10be225eceba344d533586", "filename": "filename", "metadata": { "x": 1 diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 83d8c8a2c5..5db208ab7e 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -672,7 +672,7 @@ def test_grid_out_custom_opts(self): self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 'red', "bar": 'blue'}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", two.md5) + self.assertEqual(None, two.md5) for attr in ["_id", "name", "content_type", "length", "chunk_size", "upload_date", "aliases", "metadata", "md5"]: diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 37a1f905a7..a53e40c4c9 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -80,8 +80,7 @@ def test_grid_in_custom_opts(self): class TestGridFile(IntegrationTest): def setUp(self): - self.db.drop_collection('fs.files') - self.db.drop_collection('fs.chunks') + self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) def test_basic(self): f = GridIn(self.db.fs, filename="test") @@ -112,7 +111,7 @@ def test_md5(self): f = GridIn(self.db.fs) f.write(b"hello world\n") f.close() - self.assertEqual("6f5902ac237024bdd0c176cb93063dc4", f.md5) + self.assertEqual(None, f.md5) def test_alternate_collection(self): self.db.alt.files.delete_many({}) @@ -128,9 +127,6 @@ def test_alternate_collection(self): g = GridOut(self.db.alt, f._id) self.assertEqual(b"hello world", g.read()) - # test that md5 still works... - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", g.md5) - def test_grid_in_default_opts(self): self.assertRaises(TypeError, GridIn, "foo") @@ -194,7 +190,7 @@ def test_grid_in_default_opts(self): self.assertEqual({"foo": 1}, a.metadata) - self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", a.md5) + self.assertEqual(None, a.md5) self.assertRaises(AttributeError, setattr, a, "md5", 5) # Make sure custom attributes that were set both before and after @@ -225,7 +221,7 @@ def test_grid_out_default_opts(self): self.assertTrue(isinstance(b.upload_date, datetime.datetime)) self.assertEqual(None, b.aliases) self.assertEqual(None, b.metadata) - self.assertEqual("d41d8cd98f00b204e9800998ecf8427e", b.md5) + self.assertEqual(None, b.md5) for attr in ["_id", "name", "content_type", "length", "chunk_size", "upload_date", "aliases", "metadata", "md5"]: @@ -266,7 +262,7 @@ def test_grid_out_custom_opts(self): self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 1, "bar": 2}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual("5eb63bbbe01eeed093cb22bb8f5acdc3", two.md5) + self.assertEqual(None, two.md5) for attr in ["_id", "name", "content_type", "length", "chunk_size", "upload_date", "aliases", "metadata", "md5"]: diff --git a/test/test_gridfs.py b/test/test_gridfs.py index e6b84f2977..d7d5a74e5f 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -97,10 +97,8 @@ def setUpClass(cls): cls.alt = gridfs.GridFS(cls.db, "alt") def setUp(self): - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("alt.files") - self.db.drop_collection("alt.chunks") + self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, + self.db.alt.files, self.db.alt.chunks) def test_basic(self): oid = self.fs.put(b"hello world") @@ -158,7 +156,7 @@ def test_empty_file(self): self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], str)) + self.assertNotIn("md5", raw) def test_corrupt_chunk(self): files_id = self.fs.put(b'foobar') @@ -174,12 +172,11 @@ def test_corrupt_chunk(self): self.fs.delete(files_id) def test_put_ensures_index(self): - # setUp has dropped collections. - names = self.db.list_collection_names() - self.assertFalse([name for name in names if name.startswith('fs')]) - chunks = self.db.fs.chunks files = self.db.fs.files + # Ensure the collections are removed. + chunks.drop() + files.drop() self.fs.put(b"junk") self.assertTrue(any( @@ -484,21 +481,6 @@ def test_unacknowledged(self): def test_md5(self): gin = self.fs.new_file() - gin.write(b"includes md5 sum") - gin.close() - self.assertIsNotNone(gin.md5) - md5sum = gin.md5 - - gout = self.fs.get(gin._id) - self.assertIsNotNone(gout.md5) - self.assertEqual(md5sum, gout.md5) - - _id = self.fs.put(b"also includes md5 sum") - gout = self.fs.get(_id) - self.assertIsNotNone(gout.md5) - - fs = gridfs.GridFS(self.db, disable_md5=True) - gin = fs.new_file() gin.write(b"no md5 sum") gin.close() self.assertIsNone(gin.md5) @@ -506,7 +488,7 @@ def test_md5(self): gout = self.fs.get(gin._id) self.assertIsNone(gout.md5) - _id = fs.put(b"still no md5 sum") + _id = self.fs.put(b"still no md5 sum") gout = self.fs.get(_id) self.assertIsNone(gout.md5) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 8b4c2cf346..499643f673 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -86,10 +86,8 @@ def setUpClass(cls): cls.db, bucket_name="alt") def setUp(self): - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("alt.files") - self.db.drop_collection("alt.chunks") + self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, + self.db.alt.files, self.db.alt.chunks) def test_basic(self): oid = self.fs.upload_from_stream("test_filename", @@ -105,7 +103,6 @@ def test_basic(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) def test_multi_chunk_delete(self): - self.db.fs.drop() self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) gfs = gridfs.GridFSBucket(self.db) @@ -130,7 +127,7 @@ def test_empty_file(self): self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) self.assertEqual(255 * 1024, raw["chunkSize"]) - self.assertTrue(isinstance(raw["md5"], str)) + self.assertNotIn("md5", raw) def test_corrupt_chunk(self): files_id = self.fs.upload_from_stream("test_filename", @@ -147,12 +144,11 @@ def test_corrupt_chunk(self): self.fs.delete(files_id) def test_upload_ensures_index(self): - # setUp has dropped collections. - names = self.db.list_collection_names() - self.assertFalse([name for name in names if name.startswith('fs')]) - chunks = self.db.fs.chunks files = self.db.fs.files + # Ensure the collections are removed. + chunks.drop() + files.drop() self.fs.upload_from_stream("filename", b"junk") self.assertTrue(any( @@ -464,41 +460,20 @@ def test_download_to_stream_by_name(self): self.assertEqual(file1.read(), file2.read()) def test_md5(self): - gin = self.fs.open_upload_stream("has md5") - gin.write(b"includes md5 sum") - gin.close() - self.assertIsNotNone(gin.md5) - md5sum = gin.md5 - - gout = self.fs.open_download_stream(gin._id) - self.assertIsNotNone(gout.md5) - self.assertEqual(md5sum, gout.md5) - - gin = self.fs.open_upload_stream_with_id(ObjectId(), "also has md5") - gin.write(b"also includes md5 sum") - gin.close() - self.assertIsNotNone(gin.md5) - md5sum = gin.md5 - - gout = self.fs.open_download_stream(gin._id) - self.assertIsNotNone(gout.md5) - self.assertEqual(md5sum, gout.md5) - - fs = gridfs.GridFSBucket(self.db, disable_md5=True) - gin = fs.open_upload_stream("no md5") + gin = self.fs.open_upload_stream("no md5") gin.write(b"no md5 sum") gin.close() self.assertIsNone(gin.md5) - gout = fs.open_download_stream(gin._id) + gout = self.fs.open_download_stream(gin._id) self.assertIsNone(gout.md5) - gin = fs.open_upload_stream_with_id(ObjectId(), "also no md5") + gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5") gin.write(b"also no md5 sum") gin.close() self.assertIsNone(gin.md5) - gout = fs.open_download_stream(gin._id) + gout = self.fs.open_download_stream(gin._id) self.assertIsNone(gout.md5) diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index e9e097568c..86449db370 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -66,10 +66,8 @@ def setUpClass(cls): "download_by_name": cls.fs.open_download_stream_by_name} def init_db(self, data, test): - self.db.drop_collection("fs.files") - self.db.drop_collection("fs.chunks") - self.db.drop_collection("expected.files") - self.db.drop_collection("expected.chunks") + self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, + self.db.expected.files, self.db.expected.chunks) # Read in data. if data['files']: diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index d552a18ca2..f3a9c4390a 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -270,9 +270,7 @@ def run_operation(self, sessions, collection, operation): if object_name == 'gridfsbucket': # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). - obj = GridFSBucket( - database, bucket_name=collection.name, - disable_md5=True) + obj = GridFSBucket(database, bucket_name=collection.name) else: objects = { 'client': database.client, From 420d74095defa9da150b8a7a33367d1a5be9d751 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 5 Nov 2021 13:07:07 -0700 Subject: [PATCH 0509/2111] PYTHON-2721 Reenable regex flags test on MongoDB 5.0+ (#779) --- test/test_collection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test_collection.py b/test/test_collection.py index bb3795c94a..79a2a907a6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1132,7 +1132,6 @@ def test_fields_specifier_as_dict(self): self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) - @client_context.require_version_max(4, 9, -1) # PYTHON-2721 def test_find_w_regex(self): db = self.db db.test.delete_many({}) From 2f3acb6bc232b661ab3ee389417b38cbed38df91 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 5 Nov 2021 15:05:05 -0700 Subject: [PATCH 0510/2111] PYTHON-2941 Add a CMAP test that verifies the background thread hands over connections to threads doing checkout (#780) --- ...-minPoolSize-connection-maxConnecting.json | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json diff --git a/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json b/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json new file mode 100644 index 0000000000..3b0d43e877 --- /dev/null +++ b/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json @@ -0,0 +1,88 @@ +{ + "version": 1, + "style": "integration", + "description": "threads blocked by maxConnecting check out minPoolSize connections", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "minPoolSize": 2, + "maxPoolSize": 3, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "wait", + "ms": 200 + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 2 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckedOut", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCreated", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "address": 42 + } + ], + "ignore": [ + "ConnectionPoolReady", + "ConnectionClosed", + "ConnectionReady", + "ConnectionPoolCreated", + "ConnectionCheckOutStarted" + ] +} From 9a47c30699b81b433f9b84d04e53a3f7fddb0c33 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 8 Nov 2021 14:30:58 -0800 Subject: [PATCH 0511/2111] PYTHON-2971 PossiblePrimary must not be checked for wire version compatibility of MaxStalenessSeconds (#778) --- .../OneKnownTwoUnavailable.json | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json diff --git a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json new file mode 100644 index 0000000000..54f318872f --- /dev/null +++ b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json @@ -0,0 +1,60 @@ +{ + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + { + "address": "a:27017", + "type": "PossiblePrimary", + "avg_rtt_ms": 5, + "maxWireVersion": 0 + }, + { + "address": "b:27017", + "type": "Unknown", + "avg_rtt_ms": 5, + "maxWireVersion": 0 + }, + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 6, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] + }, + "read_preference": { + "mode": "Nearest", + "maxStalenessSeconds": 120 + }, + "suitable_servers": [ + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 6, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ], + "in_latency_window": [ + { + "address": "c:27017", + "type": "RSSecondary", + "maxWireVersion": 6, + "avg_rtt_ms": 5, + "lastWrite": { + "lastWriteDate": { + "$numberLong": "1" + } + } + } + ] +} From b05ac0e7ba0173ccd336f1485c45d256673f387e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 8 Nov 2021 16:12:45 -0800 Subject: [PATCH 0512/2111] PYTHON-2460 Client can create more than minPoolSize background connections (#782) --- test/test_pooling.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test_pooling.py b/test/test_pooling.py index 0456380814..b8f3cf1908 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -291,10 +291,14 @@ def test_socket_checker(self): def test_return_socket_after_reset(self): pool = self.create_pool() with pool.get_socket({}) as sock: + self.assertEqual(pool.active_sockets, 1) + self.assertEqual(pool.operation_count, 1) pool.reset() self.assertTrue(sock.closed) self.assertEqual(0, len(pool.sockets)) + self.assertEqual(pool.active_sockets, 0) + self.assertEqual(pool.operation_count, 0) def test_pool_check(self): # Test that Pool recovers from two connection failures in a row. From e80141ed1c3afb9edf97f2a3a7507f78ebb9a97f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 8 Nov 2021 16:19:24 -0800 Subject: [PATCH 0513/2111] PYTHON-2992 Implement unified test format loop operation (#773) --- test/test_create_entities.py | 142 ++++++++++++++++++ ...client-storeEventsAsEntities-minItems.json | 18 +++ ...ity-client-storeEventsAsEntities-type.json | 18 +++ ...ailedEvent-hasServerConnectionId-type.json | 29 ++++ ...artedEvent-hasServerConnectionId-type.json | 29 ++++ ...eededEvent-hasServerConnectionId-type.json | 29 ++++ ...ntsAsEntities-conflict_with_client_id.json | 28 ++++ ...ities-conflict_within_different_array.json | 43 ++++++ ...AsEntities-conflict_within_same_array.json | 36 +++++ .../entity-findCursor-malformed.json | 44 ++++++ ...ind-cursor.json => entity-findCursor.json} | 12 +- .../ignoreResultAndError-malformed.json | 48 ++++++ .../valid-fail/ignoreResultAndError.json | 13 -- .../entity-client-storeEventsAsEntities.json | 67 +++++++++ test/unified_format.py | 95 ++++++++++-- 15 files changed, 613 insertions(+), 38 deletions(-) create mode 100644 test/test_create_entities.py create mode 100644 test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json create mode 100644 test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json create mode 100644 test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json create mode 100644 test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json create mode 100644 test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json create mode 100644 test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json create mode 100644 test/unified-test-format/valid-fail/entity-findCursor-malformed.json rename test/unified-test-format/valid-fail/{entity-find-cursor.json => entity-findCursor.json} (76%) create mode 100644 test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json create mode 100644 test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json diff --git a/test/test_create_entities.py b/test/test_create_entities.py new file mode 100644 index 0000000000..9b5c30d64e --- /dev/null +++ b/test/test_create_entities.py @@ -0,0 +1,142 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +from test.unified_format import UnifiedSpecTestMixinV1 + +from pymongo.monitoring import PoolCreatedEvent + + +class TestCreateEntities(unittest.TestCase): + def test_store_events_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "blank", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ] + } + ] + } + }, + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] + } + self.scenario_runner.TEST_SPEC = spec + self.scenario_runner.setUp() + self.scenario_runner.run_scenario(spec["tests"][0]) + final_entity_map = self.scenario_runner.entity_map + self.assertIn("events1", final_entity_map) + self.assertGreater(len(final_entity_map["events1"]), 0) + for event in final_entity_map["events1"]: + self.assertEqual(type(event), PoolCreatedEvent) + + def test_store_all_others_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "Find", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": True + }, + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "dat" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "dat" + } + } + ], + + "tests": [ + { + "description": "test loops", + "operations": [ + { + "name": "loop", + "object": "testRunner", + "arguments": { + "storeIterationsAsEntity": "iterations", + "storeSuccessesAsEntity": "successes", + "storeFailuresAsEntity": "failures", + "storeErrorsAsEntity": "errors", + "numIterations": 5, + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 44 + } + } + + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 44 + } + } + + } + ] + } + } + ] + } + ] + } + + self.scenario_runner.TEST_SPEC = spec + self.scenario_runner.setUp() + self.scenario_runner.run_scenario(spec["tests"][0]) + final_entity_map = self.scenario_runner.entity_map + for entity in ["errors", "failures"]: + self.assertIn(entity, final_entity_map) + self.assertGreaterEqual(len(final_entity_map[entity]), 0) + self.assertEqual(type(final_entity_map[entity]), list) + for entity in ["successes", "iterations"]: + self.assertIn(entity, final_entity_map) + self.assertEqual(type(final_entity_map[entity]), int) diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json new file mode 100644 index 0000000000..d94863ed11 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-storeEventsAsEntities-minItems", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json new file mode 100644 index 0000000000..79f6b85ed2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-storeEventsAsEntities-type", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..7787ea6516 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..a913f00ab7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandStartedEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json new file mode 100644 index 0000000000..0712c33694 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-hasServerConnectionId-type", + "schemaVersion": "1.6", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "hasServerConnectionId": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json new file mode 100644 index 0000000000..8c0c4d2041 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json @@ -0,0 +1,28 @@ +{ + "description": "entity-client-storeEventsAsEntities-conflict_with_client_id", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0", + "events": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json new file mode 100644 index 0000000000..77bc4abf2e --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json @@ -0,0 +1,43 @@ +{ + "description": "entity-client-storeEventsAsEntities-conflict_within_different_array", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events", + "events": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent" + ] + } + ] + } + }, + { + "client": { + "id": "client1", + "storeEventsAsEntities": [ + { + "id": "events", + "events": [ + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json new file mode 100644 index 0000000000..e1a9499883 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json @@ -0,0 +1,36 @@ +{ + "description": "entity-client-storeEventsAsEntities-conflict_within_same_array", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events", + "events": [ + "PoolCreatedEvent", + "PoolReadyEvent", + "PoolClearedEvent", + "PoolClosedEvent" + ] + }, + { + "id": "events", + "events": [ + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent" + ] + } + ] + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-findCursor-malformed.json b/test/unified-test-format/valid-fail/entity-findCursor-malformed.json new file mode 100644 index 0000000000..0956efa4c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/entity-findCursor-malformed.json @@ -0,0 +1,44 @@ +{ + "description": "entity-findCursor-malformed", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0Name", + "collectionName": "coll0", + "documents": [] + } + ], + "tests": [ + { + "description": "createFindCursor fails if filter is not specified", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "saveResultAsEntity": "cursor0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-find-cursor.json b/test/unified-test-format/valid-fail/entity-findCursor.json similarity index 76% rename from test/unified-test-format/valid-fail/entity-find-cursor.json rename to test/unified-test-format/valid-fail/entity-findCursor.json index f4c5bcdf48..389e448c06 100644 --- a/test/unified-test-format/valid-fail/entity-find-cursor.json +++ b/test/unified-test-format/valid-fail/entity-findCursor.json @@ -1,5 +1,5 @@ { - "description": "entity-find-cursor", + "description": "entity-findCursor", "schemaVersion": "1.3", "createEntities": [ { @@ -30,16 +30,6 @@ } ], "tests": [ - { - "description": "createFindCursor fails if filter is not specified", - "operations": [ - { - "name": "createFindCursor", - "object": "collection0", - "saveResultAsEntity": "cursor0" - } - ] - }, { "description": "iterateUntilDocumentOrError fails if it references a nonexistent entity", "operations": [ diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json b/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json new file mode 100644 index 0000000000..b64779c723 --- /dev/null +++ b/test/unified-test-format/valid-fail/ignoreResultAndError-malformed.json @@ -0,0 +1,48 @@ +{ + "description": "ignoreResultAndError-malformed", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "malformed operation fails if ignoreResultAndError is true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "foo": "bar" + }, + "ignoreResultAndError": true + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/ignoreResultAndError.json b/test/unified-test-format/valid-fail/ignoreResultAndError.json index 4457040b4f..01b2421a9f 100644 --- a/test/unified-test-format/valid-fail/ignoreResultAndError.json +++ b/test/unified-test-format/valid-fail/ignoreResultAndError.json @@ -54,19 +54,6 @@ "ignoreResultAndError": false } ] - }, - { - "description": "malformed operation fails if ignoreResultAndError is true", - "operations": [ - { - "name": "insertOne", - "object": "collection0", - "arguments": { - "foo": "bar" - }, - "ignoreResultAndError": true - } - ] } ] } diff --git a/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json b/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json new file mode 100644 index 0000000000..e37e5a1acd --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json @@ -0,0 +1,67 @@ +{ + "description": "entity-client-storeEventsAsEntities", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "client0_events", + "events": [ + "CommandStartedEvent", + "CommandSucceededEvent", + "CommandFailedEvent" + ] + } + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "storeEventsAsEntities captures events", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 0a2f0b9965..4c705299e9 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -16,13 +16,14 @@ https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst """ - +import collections import copy import datetime import functools import os import re import sys +import time import types from collections import abc @@ -68,6 +69,13 @@ JSON_OPTS = json_util.JSONOptions(tz_aware=False) +IS_INTERRUPTED = False + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + def with_metaclass(meta, *bases): """Create a base class with a metaclass. @@ -188,7 +196,7 @@ def close(self): class EventListenerUtil(CMAPListener, CommandListener): def __init__(self, observe_events, ignore_commands, - observe_sensitive_commands): + observe_sensitive_commands, store_events, entity_map): self._event_types = set(name.lower() for name in observe_events) if observe_sensitive_commands: self._observe_sensitive_commands = True @@ -197,6 +205,15 @@ def __init__(self, observe_events, ignore_commands, self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add('configurefailpoint') + self._event_mapping = collections.defaultdict(list) + self.entity_map = entity_map + if store_events: + for i in store_events: + id = i["id"] + events = (i.lower() for i in i["events"]) + for i in events: + self._event_mapping[i].append(id) + self.entity_map[id] = [] super(EventListenerUtil, self).__init__() def get_events(self, event_type): @@ -205,8 +222,11 @@ def get_events(self, event_type): return [e for e in self.events if 'Command' not in type(e).__name__] def add_event(self, event): - if type(event).__name__.lower() in self._event_types: + event_name = type(event).__name__.lower() + if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) + for id in self._event_mapping[event_name]: + self.entity_map[id].append(event) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: @@ -241,6 +261,12 @@ def __init__(self, test_class): self._session_lsids = {} self.test = test_class + def __contains__(self, item): + return item in self._entities + + def __len__(self): + return len(self._entities) + def __getitem__(self, item): try: return self._entities[item] @@ -271,13 +297,13 @@ def _create_entity(self, entity_spec): ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) observe_sensitive_commands = spec.get( 'observeSensitiveCommands', False) - # TODO: PYTHON-2511 support storeEventsAsEntities - if len(observe_events) or len(ignore_commands): - ignore_commands = [cmd.lower() for cmd in ignore_commands] - listener = EventListenerUtil( - observe_events, ignore_commands, observe_sensitive_commands) - self._listeners[spec['id']] = listener - kwargs['event_listeners'] = [listener] + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil( + observe_events, ignore_commands, + observe_sensitive_commands, + spec.get("storeEventsAsEntities"), self) + self._listeners[spec['id']] = listener + kwargs['event_listeners'] = [listener] if spec.get('useMultipleMongoses'): if client_context.load_balancer or client_context.serverless: kwargs['h'] = client_context.MULTI_MONGOS_LB_URI @@ -1048,6 +1074,47 @@ def _testOperation_assertNumberConnectionsCheckedOut(self, spec): pool = get_pool(client) self.assertEqual(spec['connections'], pool.active_sockets) + def _testOperation_loop(self, spec): + failure_key = spec.get('storeFailuresAsEntity') + error_key = spec.get('storeErrorsAsEntity') + successes_key = spec.get('storeSuccessesAsEntity') + iteration_key = spec.get('storeIterationsAsEntity') + iteration_limiter_key = spec.get('numIterations') + if failure_key: + self.entity_map[failure_key] = [] + if error_key: + self.entity_map[error_key] = [] + if successes_key: + self.entity_map[successes_key] = 0 + if iteration_key: + self.entity_map[iteration_key] = 0 + i = 0 + while True: + if iteration_limiter_key and i >= iteration_limiter_key: + break + i += 1 + if IS_INTERRUPTED: + break + try: + for op in spec["operations"]: + self.run_entity_operation(op) + if successes_key: + self.entity_map._entities[successes_key] += 1 + if iteration_key: + self.entity_map._entities[iteration_key] += 1 + except AssertionError as exc: + if failure_key or error_key: + self.entity_map[failure_key or error_key].append({ + "error": exc, "time": time.time()}) + else: + raise exc + except Exception as exc: + if error_key or failure_key: + self.entity_map[error_key or failure_key].append( + {"error": exc, "time": time.time()}) + else: + raise exc + def run_special_operation(self, spec): opname = spec['name'] method_name = '_testOperation_%s' % (opname,) @@ -1060,11 +1127,11 @@ def run_special_operation(self, spec): def run_operations(self, spec): for op in spec: - target = op['object'] - if target != 'testRunner': - self.run_entity_operation(op) - else: + if op['object'] == 'testRunner': self.run_special_operation(op) + else: + self.run_entity_operation(op) + def check_events(self, spec): for event_spec in spec: From c404150fe7966896ef65941d4ec3e58f348eadcc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 8 Nov 2021 17:00:42 -0800 Subject: [PATCH 0514/2111] PYTHON-3011 Fix test_connections_are_only_returned_once (#781) --- test/test_load_balancer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index da77734e92..247072c7bd 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -45,7 +45,7 @@ def test_connections_are_only_returned_once(self): nconns = len(pool.sockets) self.db.test.find_one({}) self.assertEqual(len(pool.sockets), nconns) - self.db.test.aggregate([{'$limit': 1}]) + list(self.db.test.aggregate([{'$limit': 1}])) self.assertEqual(len(pool.sockets), nconns) @client_context.require_load_balancer From 370e1652ad97a2dbfd33400fc0e23a6c2fc4a5d5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 10 Nov 2021 16:49:31 -0800 Subject: [PATCH 0515/2111] PYTHON-3003 Add kms_tls_options to configure options for KMS provider connections (#784) --- pymongo/common.py | 13 +++++++++- pymongo/encryption.py | 42 ++++++++++++++++++++++---------- pymongo/encryption_options.py | 27 +++++++++++---------- pymongo/uri_parser.py | 34 ++++++++++++++++++++++++++ test/test_encryption.py | 45 +++++++++++++++++++++++++++++++++-- 5 files changed, 133 insertions(+), 28 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index 3d68ba1c76..5dd7b180c0 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -30,7 +30,6 @@ validate_zlib_compression_level) from pymongo.driver_info import DriverInfo from pymongo.server_api import ServerApi -from pymongo.encryption_options import validate_auto_encryption_opts_or_none from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern @@ -582,6 +581,18 @@ def validate_tzinfo(dummy, value): return value +def validate_auto_encryption_opts_or_none(option, value): + """Validate the driver keyword arg.""" + if value is None: + return value + from pymongo.encryption_options import AutoEncryptionOpts + if not isinstance(value, AutoEncryptionOpts): + raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( + option,)) + + return value + + # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP = { diff --git a/pymongo/encryption.py b/pymongo/encryption.py index ad19b26426..1fe2877bbc 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -46,6 +46,7 @@ EncryptionError, InvalidOperation, ServerSelectionTimeoutError) +from pymongo.encryption_options import AutoEncryptionOpts from pymongo.mongo_client import MongoClient from pymongo.pool import _configured_socket, PoolOptions from pymongo.read_concern import ReadConcern @@ -106,20 +107,23 @@ def kms_request(self, kms_context): """ endpoint = kms_context.endpoint message = kms_context.message - host, port = parse_host(endpoint, _HTTPS_PORT) - # Enable strict certificate verification, OCSP, match hostname, and - # SNI using the system default CA certificates. - ctx = get_ssl_context( - None, # certfile - None, # passphrase - None, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False) # disable_ocsp_endpoint_check + provider = kms_context.kms_provider + ctx = self.opts._kms_ssl_contexts.get(provider) + if not ctx: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False) # disable_ocsp_endpoint_check opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, socket_timeout=_KMS_CONNECT_TIMEOUT, ssl_context=ctx) + host, port = parse_host(endpoint, _HTTPS_PORT) conn = _configured_socket((host, port), opts) try: conn.sendall(message) @@ -359,7 +363,7 @@ class ClientEncryption(object): """Explicit client-side field level encryption.""" def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - codec_options): + codec_options, kms_tls_options=None): """Explicit client-side field level encryption. The ClientEncryption class encapsulates explicit operations on a key @@ -411,6 +415,16 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, should be the same CodecOptions instance configured on the MongoClient, Database, or Collection used to access application data. + - `kms_tls_options` (optional): A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter. .. versionadded:: 3.9 """ @@ -432,7 +446,9 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, db, coll = key_vault_namespace.split('.', 1) key_vault_coll = key_vault_client[db][coll] - self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, None) + opts = AutoEncryptionOpts(kms_providers, key_vault_namespace, + kms_tls_options=kms_tls_options) + self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, opts) self._encryption = ExplicitEncrypter( self._io_callbacks, MongoCryptOptions(kms_providers, None)) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index fd1226c7c6..1d4aa0c7b0 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -23,6 +23,7 @@ _HAVE_PYMONGOCRYPT = False from pymongo.errors import ConfigurationError +from pymongo.uri_parser import _parse_kms_tls_options class AutoEncryptionOpts(object): @@ -35,7 +36,8 @@ def __init__(self, kms_providers, key_vault_namespace, mongocryptd_uri='mongodb://localhost:27020', mongocryptd_bypass_spawn=False, mongocryptd_spawn_path='mongocryptd', - mongocryptd_spawn_args=None): + mongocryptd_spawn_args=None, + kms_tls_options=None): """Options to configure automatic client-side field level encryption. Automatic client-side field level encryption requires MongoDB 4.2 @@ -118,6 +120,16 @@ def __init__(self, kms_providers, key_vault_namespace, ``['--idleShutdownTimeoutSecs=60']``. If the list does not include the ``idleShutdownTimeoutSecs`` option then ``'--idleShutdownTimeoutSecs=60'`` will be added. + - `kms_tls_options` (optional): A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter. .. versionadded:: 3.9 """ @@ -142,14 +154,5 @@ def __init__(self, kms_providers, key_vault_namespace, if not any('idleShutdownTimeoutSecs' in s for s in self._mongocryptd_spawn_args): self._mongocryptd_spawn_args.append('--idleShutdownTimeoutSecs=60') - - -def validate_auto_encryption_opts_or_none(option, value): - """Validate the driver keyword arg.""" - if value is None: - return value - if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( - option,)) - - return value + # Maps KMS provider name to a SSLContext. + self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 23db48bf4a..8c43d51770 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -20,6 +20,7 @@ from urllib.parse import unquote_plus +from pymongo.client_options import _parse_ssl_options from pymongo.common import ( SRV_SERVICE_NAME, get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, @@ -569,6 +570,39 @@ def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, } +def _parse_kms_tls_options(kms_tls_options): + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError('kms_tls_options must be a dict') + contexts = {} + for provider, opts in kms_tls_options.items(): + if not isinstance(opts, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + opts.setdefault('tls', True) + opts = _CaseInsensitiveDictionary(opts) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = validate_options(opts) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + if ssl_context is None: + raise ConfigurationError('TLS is required for KMS providers') + if allow_invalid_hostnames: + raise ConfigurationError('Insecure TLS options prohibited') + + for n in ['tlsInsecure', + 'tlsAllowInvalidCertificates', + 'tlsAllowInvalidHostnames', + 'tlsDisableOCSPEndpointCheck', + 'tlsDisableCertificateRevocationCheck']: + if n in opts: + raise ConfigurationError( + f'Insecure TLS options prohibited: {n}') + contexts[provider] = ssl_context + return contexts + + if __name__ == '__main__': import pprint import sys diff --git a/test/test_encryption.py b/test/test_encryption.py index 67681daba8..d94fcf3469 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -17,6 +17,7 @@ import base64 import copy import os +import ssl import traceback import socket import sys @@ -50,9 +51,8 @@ from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne from pymongo.write_concern import WriteConcern -from test.test_ssl import CA_PEM -from test import (unittest, +from test import (unittest, CA_PEM, CLIENT_PEM, client_context, IntegrationTest, PyMongoTestCase) @@ -92,6 +92,7 @@ def test_init(self): self.assertEqual(opts._mongocryptd_spawn_path, 'mongocryptd') self.assertEqual( opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + self.assertEqual(opts._kms_ssl_contexts, {}) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') def test_init_spawn_args(self): @@ -116,6 +117,46 @@ def test_init_spawn_args(self): opts._mongocryptd_spawn_args, ['--quiet', '--port=27020', '--idleShutdownTimeoutSecs=60']) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + def test_init_kms_tls_options(self): + # Error cases: + with self.assertRaisesRegex( + TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AutoEncryptionOpts({}, 'k.d', kms_tls_options={'kmip': 1}) + for tls_opts in [ + {'kmip': {'tls': True, 'tlsInsecure': True}}, + {'kmip': {'tls': True, 'tlsAllowInvalidCertificates': True}}, + {'kmip': {'tls': True, 'tlsAllowInvalidHostnames': True}}, + {'kmip': {'tls': True, 'tlsDisableOCSPEndpointCheck': True}}]: + with self.assertRaisesRegex( + ConfigurationError, 'Insecure TLS options prohibited'): + opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + with self.assertRaises(FileNotFoundError): + AutoEncryptionOpts({}, 'k.d', kms_tls_options={ + 'kmip': {'tlsCAFile': 'does-not-exist'}}) + # Success cases: + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + self.assertEqual(opts._kms_ssl_contexts, {}) + opts = AutoEncryptionOpts( + {}, 'k.d', kms_tls_options={'kmip': {'tls': True}, 'aws': {}}) + ctx = opts._kms_ssl_contexts['kmip'] + # On < 3.7 we check hostnames manually. + if sys.version_info[:2] >= (3, 7): + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = opts._kms_ssl_contexts['aws'] + if sys.version_info[:2] >= (3, 7): + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, 'k.d', kms_tls_options={'kmip': { + 'tlsCAFile': CA_PEM, 'tlsCertificateKeyFile': CLIENT_PEM}}) + ctx = opts._kms_ssl_contexts['kmip'] + if sys.version_info[:2] >= (3, 7): + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + class TestClientOptions(PyMongoTestCase): def test_default(self): From 99a413f81b620ca389330e24a9731386573aff79 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Thu, 11 Nov 2021 13:56:26 -0800 Subject: [PATCH 0516/2111] Update author and maintainer --- README.rst | 3 +-- setup.py | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index ba505dbb3a..f83ad70b10 100644 --- a/README.rst +++ b/README.rst @@ -3,8 +3,7 @@ PyMongo ======= :Info: See `the mongo site `_ for more information. See `GitHub `_ for the latest source. :Documentation: Available at `pymongo.readthedocs.io `_ -:Author: Mike Dirolf -:Maintainer: Bernie Hackett +:Author: The MongoDB Python Team About ===== diff --git a/setup.py b/setup.py index cdf9113911..8fcad6cc60 100755 --- a/setup.py +++ b/setup.py @@ -314,10 +314,8 @@ def build_extension(self, ext): version=version, description="Python driver for MongoDB ", long_description=readme_content, - author="Mike Dirolf", + author="The MongoDB Python Team", author_email="mongodb-user@googlegroups.com", - maintainer="Bernie Hackett", - maintainer_email="bernie@mongodb.com", url="http://github.com/mongodb/mongo-python-driver", keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], From e1884b44dfc5c70359fdc056104786cabe494108 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 11 Nov 2021 15:00:17 -0800 Subject: [PATCH 0517/2111] PYTHON-2512 Update Astrolabe's Workload Executor to use the unified test runner (#783) --- test/test_create_entities.py | 4 +-- test/unified_format.py | 56 +++++++++++++++++------------------- 2 files changed, 28 insertions(+), 32 deletions(-) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index 9b5c30d64e..3f60eb9b76 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -15,8 +15,6 @@ from test.unified_format import UnifiedSpecTestMixinV1 -from pymongo.monitoring import PoolCreatedEvent - class TestCreateEntities(unittest.TestCase): def test_store_events_as_entities(self): @@ -53,7 +51,7 @@ def test_store_events_as_entities(self): self.assertIn("events1", final_entity_map) self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: - self.assertEqual(type(event), PoolCreatedEvent) + self.assertIn("PoolCreatedEvent", event) def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() diff --git a/test/unified_format.py b/test/unified_format.py index 4c705299e9..0d60a05467 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -226,7 +226,7 @@ def add_event(self, event): if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) for id in self._event_mapping[event_name]: - self.entity_map[id].append(event) + self.entity_map[id].append(str(event)) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: @@ -284,7 +284,7 @@ def __setitem__(self, key, value): self._entities[key] = value - def _create_entity(self, entity_spec): + def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: self.test.fail( "Entity spec %s did not contain exactly one top-level key" % ( @@ -315,6 +315,8 @@ def _create_entity(self, entity_spec): kwargs['server_api'] = ServerApi( server_api['version'], strict=server_api.get('strict'), deprecation_errors=server_api.get('deprecationErrors')) + if uri: + kwargs['h'] = uri client = rs_or_single_client(**kwargs) self[spec['id']] = client self.test.addCleanup(client.close) @@ -366,9 +368,9 @@ def _create_entity(self, entity_spec): self.test.fail( 'Unable to create entity of unknown type %s' % (entity_type,)) - def create_entities_from_spec(self, entity_spec): + def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: - self._create_entity(spec) + self._create_entity(spec, uri=uri) def get_listener_for_client(self, client_name): client = self[client_name] @@ -718,7 +720,6 @@ def insert_initial_data(self, initial_data): def setUpClass(cls): # super call creates internal client cls.client super(UnifiedSpecTestMixinV1, cls).setUpClass() - # process file-level runOnRequirements run_on_spec = cls.TEST_SPEC.get('runOnRequirements', []) if not cls.should_run_on(run_on_spec): @@ -733,7 +734,6 @@ def setUpClass(cls): def setUp(self): super(UnifiedSpecTestMixinV1, self).setUp() - # process schemaVersion # note: we check major schema version during class generation # note: we do this here because we cannot run assertions in setUpClass @@ -1080,15 +1080,14 @@ def _testOperation_loop(self, spec): successes_key = spec.get('storeSuccessesAsEntity') iteration_key = spec.get('storeIterationsAsEntity') iteration_limiter_key = spec.get('numIterations') - if failure_key: - self.entity_map[failure_key] = [] - if error_key: - self.entity_map[error_key] = [] - if successes_key: - self.entity_map[successes_key] = 0 - if iteration_key: - self.entity_map[iteration_key] = 0 + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 i = 0 + global IS_INTERRUPTED while True: if iteration_limiter_key and i >= iteration_limiter_key: break @@ -1096,24 +1095,24 @@ def _testOperation_loop(self, spec): if IS_INTERRUPTED: break try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 for op in spec["operations"]: self.run_entity_operation(op) if successes_key: self.entity_map._entities[successes_key] += 1 - if iteration_key: - self.entity_map._entities[iteration_key] += 1 - except AssertionError as exc: - if failure_key or error_key: - self.entity_map[failure_key or error_key].append({ - "error": exc, "time": time.time()}) - else: - raise exc except Exception as exc: - if error_key or failure_key: - self.entity_map[error_key or failure_key].append( - {"error": exc, "time": time.time()}) + if isinstance(exc, AssertionError): + key = failure_key or error_key else: - raise exc + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append({ + "error": str(exc), + "time": time.time(), + "type": type(exc).__name__ + }) def run_special_operation(self, spec): opname = spec['name'] @@ -1174,7 +1173,7 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) - def run_scenario(self, spec): + def run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) @@ -1191,8 +1190,7 @@ def run_scenario(self, spec): # process createEntities self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec( - self.TEST_SPEC.get('createEntities', [])) - + self.TEST_SPEC.get('createEntities', []), uri=uri) # process initialData self.insert_initial_data(self.TEST_SPEC.get('initialData', [])) From 6d1dd6d63a43823204a33776a3da6c77f42068e2 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 12 Nov 2021 16:23:57 -0800 Subject: [PATCH 0518/2111] PYTHON-3014 Update how events are added to entity map to match specification (#785) --- test/test_create_entities.py | 4 +++- test/unified_format.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index 3f60eb9b76..b82b730aef 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -47,11 +47,12 @@ def test_store_events_as_entities(self): self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() final_entity_map = self.scenario_runner.entity_map self.assertIn("events1", final_entity_map) self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: - self.assertIn("PoolCreatedEvent", event) + self.assertIn("PoolCreatedEvent", event["name"]) def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() @@ -130,6 +131,7 @@ def test_store_all_others_as_entities(self): self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) + self.scenario_runner.entity_map["client0"].close() final_entity_map = self.scenario_runner.entity_map for entity in ["errors", "failures"]: self.assertIn(entity, final_entity_map) diff --git a/test/unified_format.py b/test/unified_format.py index 0d60a05467..25a980425f 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -226,7 +226,11 @@ def add_event(self, event): if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) for id in self._event_mapping[event_name]: - self.entity_map[id].append(str(event)) + self.entity_map[id].append({ + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event) + }) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: From 754e52890f92969c56f274714041850d71c6f664 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 15 Nov 2021 13:01:45 -0800 Subject: [PATCH 0519/2111] PYTHON-2915 Skip large txn test on slow Windows hosts (#788) --- test/test_transactions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_transactions.py b/test/test_transactions.py index 6c41f28cf4..32f02f8437 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -289,6 +289,8 @@ def gridfs_open_upload_stream(*args, **kwargs): # Require 4.2+ for large (16MB+) transactions. @client_context.require_version_min(4, 2) @client_context.require_transactions + @unittest.skipIf(sys.platform == 'win32', + 'Our Windows machines are too slow to pass this test') def test_transaction_starts_with_batched_write(self): if 'PyPy' in sys.version and client_context.tls: self.skipTest('PYTHON-2937 PyPy is so slow sending large ' From a7fb3281ea093103d0b19fe48f21593e7a7d1b8d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 15 Nov 2021 16:23:59 -0800 Subject: [PATCH 0520/2111] PYTHON-3004 Support kmip FLE KMS provider (#786) Resync CSFLE spec tests. --- .evergreen/config.yml | 43 + .evergreen/run-tests.sh | 7 - pymongo/encryption.py | 23 +- pymongo/encryption_options.py | 9 +- .../corpus/corpus-encrypted.json | 1830 +++++++++++++++++ .../corpus/corpus-key-kmip.json | 32 + .../corpus/corpus-schema.json | 1266 ++++++++++++ .../client-side-encryption/corpus/corpus.json | 1662 +++++++++++++++ .../client-side-encryption/spec/azureKMS.json | 14 + test/client-side-encryption/spec/gcpKMS.json | 14 + test/client-side-encryption/spec/kmipKMS.json | 223 ++ test/test_encryption.py | 244 ++- 12 files changed, 5328 insertions(+), 39 deletions(-) create mode 100644 test/client-side-encryption/corpus/corpus-key-kmip.json create mode 100644 test/client-side-encryption/spec/kmipKMS.json diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6f5a19e478..16be7f882a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -359,6 +359,49 @@ functions: PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-doctests.sh "run tests": + # If testing FLE, start the KMS mock servers, first create the virtualenv. + - command: shell.exec + params: + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate_venv.sh + fi + # Run in the background so the mock servers don't block the EVG task. + - command: shell.exec + params: + background: true + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate_venv.sh + # The -u options forces the stdout and stderr streams to be unbuffered. + # TMPDIR is required to avoid "AF_UNIX path too long" errors. + TMPDIR="$(dirname $DRIVERS_TOOLS)" python -u kms_kmip_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 5698 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & + python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & + fi + # Wait up to 10 seconds for the KMIP server to start. + - command: shell.exec + params: + script: | + if [ -n "${test_encryption}" ]; then + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/csfle + . ./activate_venv.sh + for i in $(seq 1 1 10); do + sleep 1 + if python -u kms_kmip_client.py; then + echo 'KMS KMIP server started!' + exit 0 + fi + done + echo 'Failed to start KMIP server!' + exit 1 + fi - command: shell.exec type: test params: diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 1e08e3ce17..3f4d6d9459 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -146,13 +146,6 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh - - # Start the mock KMS servers. - pushd ${DRIVERS_TOOLS}/.evergreen/csfle - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & - trap 'kill $(jobs -p)' EXIT HUP - popd fi if [ -z "$DATA_LAKE" ]; then diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1fe2877bbc..117666ac82 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -109,7 +109,7 @@ def kms_request(self, kms_context): message = kms_context.message provider = kms_context.kms_provider ctx = self.opts._kms_ssl_contexts.get(provider) - if not ctx: + if ctx is None: # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. ctx = get_ssl_context( @@ -378,9 +378,8 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, See :ref:`explicit-client-side-encryption` for an example. :Parameters: - - `kms_providers`: Map of KMS provider options. Two KMS providers - are supported: "aws" and "local". The kmsProviders map values - differ by provider: + - `kms_providers`: Map of KMS provider options. The `kms_providers` + map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used @@ -396,6 +395,8 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, Additionally, "endpoint" may also be specified as a string (defaults to 'oauth2.googleapis.com'). These are the credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. - `local`: Map with "key" as `bytes` (96 bytes in length) or a base64 encoded string which decodes to 96 bytes. "key" is the master key used to encrypt/decrypt @@ -424,7 +425,7 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter. + Added the `kms_tls_options` parameter and the "kmip" KMS provider. .. versionadded:: 3.9 """ @@ -458,7 +459,7 @@ def create_data_key(self, kms_provider, master_key=None, :Parameters: - `kms_provider`: The KMS provider to use. Supported values are - "aws" and "local". + "aws", "azure", "gcp", "kmip", and "local". - `master_key`: Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is not applicable and may be omitted. @@ -493,6 +494,16 @@ def create_data_key(self, kms_provider, master_key=None, - `endpoint` (string): Optional. Host with optional port. Defaults to "cloudkms.googleapis.com". + If the `kms_provider` is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `key_alt_names` (optional): An optional list of string alternate names used to reference a key. If a key is created with alternate names, then encryption may refer to the key by the unique alternate diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 1d4aa0c7b0..c96f4a6d67 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -55,9 +55,8 @@ def __init__(self, kms_providers, key_vault_namespace, See :ref:`automatic-client-side-encryption` for an example. :Parameters: - - `kms_providers`: Map of KMS provider options. Two KMS providers - are supported: "aws" and "local". The kmsProviders map values - differ by provider: + - `kms_providers`: Map of KMS provider options. The `kms_providers` + map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. These are the AWS access key ID and AWS secret access key used @@ -73,6 +72,8 @@ def __init__(self, kms_providers, key_vault_namespace, Additionally, "endpoint" may also be specified as a string (defaults to 'oauth2.googleapis.com'). These are the credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. - `local`: Map with "key" as `bytes` (96 bytes in length) or a base64 encoded string which decodes to 96 bytes. "key" is the master key used to encrypt/decrypt @@ -129,7 +130,7 @@ def __init__(self, kms_providers, key_vault_namespace, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter. + Added the `kms_tls_options` parameter and the "kmip" KMS provider. .. versionadded:: 3.9 """ diff --git a/test/client-side-encryption/corpus/corpus-encrypted.json b/test/client-side-encryption/corpus/corpus-encrypted.json index a11682688a..1b72aa8a39 100644 --- a/test/client-side-encryption/corpus/corpus-encrypted.json +++ b/test/client-side-encryption/corpus/corpus-encrypted.json @@ -7681,5 +7681,1835 @@ "value": { "$maxKey": 1 } + }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAB1hL/nPkpQtqxQUANbIJr30PQ98vPvaoy4JWUoElOL+cCnrSra3o7W+12dydy0rCS2EKrVm7Fw0C8L9nf1hpWjw==", + "subType": "06" + } + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABxlcphy2SxXlkRBvO1Z3nNUqchmeOhIhkdYBbbW7CwYeLVRDciXFsZN73Nb9Bm+W4IpUNpo6mqFEtfjevIjtFyg==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABx5AfRSiblFc1DGwxRIaUSP2kaM76ryzPUKL9KnEgnX1kjIlFz5B15uMht2cxdrntHFe1qZZk8V9PxTBpWZhJ8Q==", + "subType": "06" + } + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAABXUC9v9HPrmU9tINzFmr2sQM9f7GHDus+y5T4pWX28PRtfnTysN/ANCfB9RosoR/wuKsbznwwD2JfSzOvlKo3PQ==", + "subType": "06" + } + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.2339999999999999858" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACGHmqW1qbfqVlfB0x0CkXCk9smhs3yXsxJ/8eypSgbDQqVLSW2nf5bbHpnoCHHNtQ7I7ZBXzPzDLH2GgMJpopeQ==", + "subType": "06" + } + } + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAC9BJTD1pEMbslAjbJYt7yx/jzKkcZF3axu96+NYwp8afUCjXG5TOUZzODOwkbJuWgr7DBxa2GkZTvaAEk86h+Ow==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACQlG28ECy8KHXC7GEPdC8+raBo2RMJwl5pofcPaTGkPUEbkreguMd1mYctNb90vXxby1nNeJY4o5zJJCMiNhNXg==", + "subType": "06" + } + } + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAACbWuK+3nzeKSNVjmgHb0Ii7rA+CsAd+gYubPiMiHXZwE/o6i9FYWN+t/VK3p4K0CwIi6q3cycrMb2IgcvM27Q7Q==", + "subType": "06" + } + } + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAC5OZgr9keCXOIj5Fi06i4win1xt7gpsyPA4Os+HdFn1MIP9tnktvWNRb8Rqhuj2O9KO83brx74Hu3EQ4nT6uCMw==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADh2nGqaAUwHDRVjqYpj8JAPH7scmiHp1Z9SGBZQ6Fapxm+zWDdTBHyitM9U69BctJ5DaaafyqFOj5yr6sJ+ebJQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAD1YhOKyNle4y0Qbeio1HlCULLeTCALCLgKSITd50bilD+oDyqQawixJAwphcdjhLdFzbFwst5RWqpsiWMPHx4hQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADveILoWFgX7AhUWCv8UL52TUa75qHuoNadnTQydJlqd6PVmtRKj+8vS7VwxNWPaH4wB1Tk7emMyFEbZpvvzjxqQ==", + "subType": "06" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAADB/LN9V/4SROJn+ESHRLM7wwcUltQUx3+LbbYXjPDXiiV14HK76Iyy6ZxJ+M5qC9bRj3afhTKuWLBblB8WwksOg==", + "subType": "06" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEasWXQam8XtOkSO0nEttMCQ0iZ4V8DDmhMKyQDFDsiNHyF2h98Ya/xFv4ZSlbpGWXPBvBATEGgov/PDg2vhVi53y4Pk33RHfY60hABuksp3o=", + "subType": "06" + } + } + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEj3A1DYSEHm/3SlEmusA+pewxRPUoZ2NAjs60ioEBlCw9n6yiiB+X8d/w40TKsjZcOSfh05NC0z3gnpqQvrNolkxkvi9dmFiZeiiv5vBZUPI=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEqeJW+L6lP0bn5QcD0FMI0C8vv2n5kV7SKgqKi1o5mxaxmp3Cjlspf7yumfSiQ5js6G9yJVAvHuxlqv14UFyR9RgXS0PIA8WzsAqkL0sJSw0=", + "subType": "06" + } + } + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAEnPlPwy0B1VKuNum1GzkZwQjZia5jNYL5bf/k+PbfhnToTRWGxx8+E3R7XXp6YT/rFkjPlzU8ww9+iZNo2oqNpYuHdrIC8ybhO6HZAlvcERo=", + "subType": "06" + } + } + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFliNDZ6DmjoVcYQBCKDI9njpBsDELg+TD6XLF7xbZnMaJCCHLHr7w3x2/xFfrFSN44CtGAKOniYPCMAspaxHqOA==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF/P8LPmHKGgG0l5/Xi7jdkwfxpGPxoY0417suCvN6zjM3JNdufytzkektrm9CbBb1SnZCGYF9c0FCMzFG+tN/dg==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFWI0N4RbnYdEiFrzNpbRN9p+bSLm8Lthiu4K3/CvBg6GQpLMVQFhjW01Bud0lxpT2ohRnOK+ASUhiFcUU/t/lWQ==", + "subType": "06" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFQZvAtpY4cjEr1rJWVoUGaZKmzocSJ0muHose7Tk5kRDczjFa4Jcu4hN7JLM9qz2z4g+WJC3KQTdW4ZBXStke/Q==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFohIHrvzu8xLxVHsnYEDhZmv8BpEoEtFSjMUQzvBLUInvvTuU/rOzlVL88CkAEII7M3hcvrz8FKY7b7lC1veoYg==", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFn7rhdO8tYq77uVxcqd9Qjz84Yg7JnJMYf0ULTMTh1vJHacckkhXw+8fIMMiAKwuOVwGkMAtu5RBvrFqdfxryCg8RLTxu1YYVthufiClEIS0=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFwwXQx9dKyoyHq7GBMmHzYe9ysoJK/f/ZWzA6nErau9MtX1gqi7VRsYqkamb47/zVbsLZwPMmdgNyPxEh3kqbV2D61t5RG2A3VeqhO1pTF8c=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAFALeGeinJ8DE+WZniLdCIW2gfJUj445Ukp9PvRLgBXLGedl8mIXlLF2eu3BA9vP6s5y9w6peQjhn+oEofrsUVYD2duyzeIRMKgNiNchjf6TU=", + "subType": "06" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAF06Fx8CO3OSKE3fGri0VwK0e22YiG9LH2QkDTsRdFbT2lBm+bDD9FrEY8vKWS5RljMuysaxjBOzZ98d2LEs6k8LMOm83Nz/RESe4ZbbcfdQ0=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAFzmZI909fJgxOykJtvOlv5LsX8z6BxUX2Xg5TsIwOxJMPSC8usm/zR7sZawoVBOuJxtNVLY/8oNP/4pFtAmQo02bUOtTo1yxNz/IZa9x+Q5E=", + "subType": "06" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHZFzE908RuO5deEt3t2QQdT12ybwqbm8D+sMJrdKt2Wp4kVPsw4ocAGGsRYN6VXe46P5fmyG5HqVWn0hkflZnQg==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAH3dPKyCCStvOtVGzlgIS33fsl8OAwQblt9i21pOVuLiliY1Tup9EtkSic88+nNEtXnq9gRknRzLthXv/k1ql+7Q==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHcEjxVfHDSfLzFxAuK/rs/Pn/XV7jLkgKXZYeY0PNlRi1MHojN2AvQqI3J2rOvAjuYfikGcpvGPp/goqUbV9HYw==", + "subType": "06" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAHX65sNHnRYpx3VbWPCdQyFe7u0Y5ItabLEduqDeVsPk/iK4X3GjCSHQfw1yPi+CA+/veVpgdonwws6RiYV4ZZ5Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAHKU7mcdGEq2WGrDB6TicipLQstAk6G3PkiNt5F3bMavpKLjz04UBrd8aWGVG2gJTTON1UKRztiYFgRvb8f+LK/Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIw/xgJlKEvErmVtue3X3RFsOI2sttAbxnzh1INc9GUQ2vok1VwYt9k88RxMPiOwMAZG7P1MlAdx7zt865onPKOw==", + "subType": "06" + } + } + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIn8IuzlNHbpTgXOd1wEp364zJOBxj2Zf7a9B5osUV1sDY0G1OVpEnuDvZeUsdiUSyRjTTxzyuD/KZlKZ3+qrnrA==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAI3Nz9PdjUYQRGfTtvYSR8EQuUKFL0wdlEdfSCTBmMBhBPuuF9KxqCgy+ldVu1DRRgg3346DOKEEtE9BJPPInJ6Q==", + "subType": "06" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAIEGjqoerIZBk8Rw+YTO7jFKWzagDS8mEpD+9Wm1Q0r0ZHUmV0dQZcIqRV4oUk8U8uHUn0N3t2qGLr+rhUs4GH/g==", + "subType": "06" + } + } + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJgr0v4xetUXjlLcPcyKv/rzjtWOKp9CZJcm23Noglu5RR/rXJS0qKI+W9MmJ64TMf27KvaJ0UXwfTRrvOC1plCg==", + "subType": "06" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJoeysAaiPsVK+JL1P1vD/9xF92m5kKidUdn6yklPlSKN4VVEBTymDetTLujULs1u1TlrS71jVLxo3xEwpG/KQvg==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJVwu4+Su0DktpnZvzTBHYpWbWTq5gho/SLijrcIrFJcvq4YrjjPCXv+odCl95tkH+J1RlJdQ5Cr0umEIazLa6GA==", + "subType": "06" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAJWTYpjbDkIf82QXHMGrvd0SqhP8cBIakfYJf5aNcNrs86vxRhiG3KwETWPeOOlPZ6n1WjE2bOLB+DJTAxmJvahA==", + "subType": "06" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAJ/+sQrUqQh+JADSVIKM0d68gDUhDy37M1z1uvROzQw6hUAbQeD0DWdztADKg560UTPM4uOgH4NAyhLyBLMrWWHg==", + "subType": "06" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALi8avMfpxSlDsSTqdxO8O2B1M79gOElyUIdXySQo7mvgHlf4oHQ7r94lL9dnsA2t/jmUmBKoGypaUQUSQE+9x+A==", + "subType": "06" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALfHerZ/KolaBrb5qi3SpeNVW+i/nh5mkcdtQg5f1pHePr68KryHucM/XDAzbMqrPlag2/41STGYdJqzYO7Mbppg==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALOhKDVAN5cuDyB1EuRFWgKKt0wGJ63E5pPY8Tq2TXMNgCxUUc5O+TE+Ux4ls/uMyOBA3gPzND0CZKiru0i7ACUQ==", + "subType": "06" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAALK3Hg8xX9gX+d3vKh7aosRP9CS2CIFeG9sapZv3OAPv1eWjY62Cp/G16kJ0BQt33RYD+DzD3gWupfUSyNZR0gng==", + "subType": "06" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAALaQXA8rItT7ELVxO8XtAWdHuiXFFPmnMhS5PMrUy/6mRtbq4fvU9dascW7ozonKOh8ad6+MIT7B/STv9dVBF4Kw==", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMoGkfmmUWTI+0aW7jVyCJ5Dgru1SCXBUmJSRzDL0D57pNruQ+79tVVcI6Uz5j87DhZFxShHbPjj583vLOOBNM3WGzZCpqH3serhHTWvXK+NM=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMwu1WaRhhv43xgxLNxuenbND9M6mxGtCs9o4J5+yfL95XNB9Daie3RcLlyngz0pncBie6IqjhTycXsxTLQ94Jdg6m5GD5cU541LYKvhbv5f4=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAM+CIoCAisUwhhJtWQLolxQGQWafniwYyvaJQHmJC94Uwbf1gPfhMR42v2VtrmIVP0J0BaP/xf0cco2/qWRdKGZpgkK2CK6M972NtnZ/2x03A=", + "subType": "06" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAMjbeE9+EaJYjGfeAuxsV8teOdsW8bfnlkvji/tE11Zq89UMGx+oUsZzeLjUgVZ5nxsZKCZjEAq+DPnwFVC+MgqNeqWL7fRChODFlPGH2ZC+8=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAM5B+fjbjYCZzCYUu4N/pJI3srCCXN+OCCHweeweqmpIEmB7yw87bQRIMGtCm6HuekcZ5J5q+nY5AQb0du/wh1YIoOrC3u4w7ZcLHkDmuAJPg=", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANuzlkWs/c8xArrAxPgYuCeShjj1zCfIMHOTPohspcyNofo9iY3P5MlhEOprZDiS8dBFg6EB7fZDzDdczx6VCN2A==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANwJ72y7UqCBJh1NwVRiE3vU1ex7FMv/X5YWCMuO9MHPMo4g1V5eaO4KfOr+K8+9NtkflgMpeDkvwP92rfR5ud5Q==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANj5q+888itRnLsw9PNGsBLhgqpvem5IJBOE2292r6zwjVueoEK/2I2PesRnn0esnkwdia1ADoMkcLUegwcFRkWQ==", + "subType": "06" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAANnvbnmApys7OIe8LGTsZKDG1F1G1SI/rfZVmF6q1fq5U7feYPp1ejb2t2S2+v7LfcOHytsQWGcYuWCDcl+vosvQ==", + "subType": "06" + } + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAANOR9R/Da8j5iVxllLiGFlv4U/bVn/PyN9/5WeGJkGJeE/j/osKrKx6IL1igI0YVI+pKKzsINqJGIv+bJX0s7MNw==", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOe+vXpJSkmBM3WkxZrn4ea9/C6iNyMXWUzkQIzIYlnbkyu8od8nfOdhobUhoFxcKnvdaxN1s5NhJ1FA97RN/upGYN+AI/7cTCElmFSpdSvkI=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPpCgK6Hc/M2elOJkwIU9J7PZa+h1chody2yvfDu/UlB6T5sxnEZ6aEY/ISNLhJlhsRzuApSgFOmnrcG6Eg9VnSKin2yK0ll+VFxQEDHAcSA=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOVoHX9GaOn71L5D9TpZmmxkx/asr0FHCLG5ZgLLA04yIhZHsDjt2DiVGGO/Mf4KwvoBn7Cf08qMhW7rQh2LgvvSLBO3zbw5l+MZ/bSn+Jylo=", + "subType": "06" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAOPobmcO/I4QObtCUEmGWpSCJ6tlYyhbO59q78LZBucSNl7DSkf/13tOJ9t+WKXACcMKVMmfPoFsgHbVj1nKWULBT07n1OWWDTZkuMD6C2+Fc=", + "subType": "06" + } + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAOPpwX4mafoQJYHuzYfbKW1JunpjpB7Nd2slTC3n8Hsas9wQYf9VkModQhe5M4wZHOIXpehaODRcjKKfKRmpnNBOURSLm/ORJvy+UxtSLsnqo=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPW2VMMm+EvsYpVtJQhsxgxgvV35kr9nxqKxP2qqIOAOQ58R/1oyYScFkNwB/tw0A1/zdvhoo+ERa7c0tjLIojFrosXhX2N/8Z4VnbZruz0Nk=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPjPq9BQR4EwG/CD+RthOJY04m99LCl/shY6HnaU/QL627kN1dbBAG5vs+MXfa+glg8waVTNgB94vm3j72FMV1ZOKvbl4faWF1Rl2EOpOlR9U=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtqebrCAidKzBMvp3B5/vBeetqeCoMKS+vo+hLAYooXrnBunWxwRHpr45XYUvroG3aqOMkLtVZSgw8sO6Y/3z1viO2G0sGQW1ZMoW0/PX5Uw=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAPtkJwXKlq8Fx1f1+9HFofM4uKi6lHQRFRyiOyUFJYxxZY1LR/2WXXTqWz3MWtrcJFCB+QSVOb1N/ieC7AZUboPgIuPJISM3Hu5VU2x/Isbdc=", + "subType": "06" + } + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQ50kE7Tby9od2OsmIGZhp9k/mj4vy/YdnmF6YsSPxihbjV1vXGMraI/nGCr+0H1riwzq3m4sCT7aPw2VgiuwKMA==", + "subType": "06" + } + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQkNL14OSMX/bJbsLtB/UumRoat6QOY7fvwZxRrkXTS3VJVHigthI1cUX7Is/uUsY8oHOfk/ZuHklQkifmfdcklQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQtN2gNVU9Itoj+vgcK/4jEB5baSUH+Qz2WqTY7m0XaA3bPWGFCiWY4Sdw+qovednrSSSbC+azWi1QYclFRraldQ==", + "subType": "06" + } + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAQk6uBqwXXFF9zEM4bc124goI3pBy2Jdi8Cd0ycKkjXrPG7GVCUm2UMbO+zEzYODeVo35N11g2yMXcv9RVgjWtNA==", + "subType": "06" + } + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAQgrkPEf+RBZMn/J7HZObqEfus8icYls6ecaUrlabI6v1ALgxLuv23WSIfTr6mqpQCounqdA14DWS/Wl3kSkVC0w==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAAR2Cu3o2e/u5o69MndeZPJU5ngVA1G2MNYn00t+up/GlmaUC1ni1CVl0ZR0EVZ0gCDUrfxwPISPib8y23tNjbsog==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARgi8stgSQwqnN4Ws2ZBILOREsjreZcS1MBerL7dbGLVfzW99tqECglhGokkrE0aY69L0xMgcAUIaFRN4GanQAPg==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARPxEEI8L5Q3Jybu88BLdf31T3uYEUbijgSlKlkTt141RYrlE8nxtiYU5/5H9GXBis0Qq1s2C+MauD2h/cNijTCA==", + "subType": "06" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAARh/QaU1dnGbii4LtXCpT5o6vencc8E2fzarjJFbSEd0ixW/UV1ppZdvD729d0umkaIwIEVA4q+XVvHfl/ckKPFg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAARqdpLb72mmzb75QBrE+ATMfS5LLqzAD/1g5ScT8zfgh0IHsZZBWCJlSVRNC12Sgr3zdXHMtYp8C3OZT6/tPkQGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASVv+ClXkh9spIaXWJYRV/o8UZjG+WWWrNpIjZ9LQn2bXakrKJ3REvdkrzGuxASmBhBYTplEyvxVCJwXuWRAGGYw==", + "subType": "06" + } + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASeAz/dK+Gc4/jx3W07B2rNFvQ0LoyCllFRvRVGu1Xf1NByc4cRZLOMzlr99syz/fifF6WY30bOi5Pani9QtFuGg==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASP1HD9uoDlwTldaznKxW71JUQcLsa4/cUWzeTnelQwdpohCbZsM8fBZBqgwwTWnjpYY/LBUipC6yhwLKfUXBoBQ==", + "subType": "06" + } + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAASnGPH77bS/ETB1hn+VTvsBrxEvIHA6EAb8Z2SEz6BHt7SVeI+I7DLERvRVpV5kNJFcKgXDrvRmD+Et0rhSmk9sw==", + "subType": "06" + } + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "ASjCDwAAAAAAAAAAAAAAAAAS+zKmtijSTPOEVlpwmaeMIOuzVNuZpV4Jw9zP8Yqa1xYtlItXDozqdibacRaA74KU49KNySdR1T7fxwxa2OOTrQ==", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATu/BbCc5Ti9SBlMR2B8zj3Q1yQ16Uob+10LWaT5QKS192IcnBGy4wmmNkIsTys060xUby9KKQF80dVPnjYfqJwEXCe/pVaPQZftE0DolKv78=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATpq6/dtxq2ZUZHrK10aB0YjjPalEaXYcyAyRZjfXWAYCLZdT9sIybjX3Axjxisim+VSHx0QU7oXkKUfcbLgHyjUXj8g9059FHxKFkUsNv4Z8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATS++9KcfM7uiShZYxRpFPrBJquKv7dyvFRTjnxs6aaaPo0fiqpv6bco/cMLsldEVpWDEA/Tc2HtSXYPp4UJsMfASyBjoxCloL5SaRWyD9Ye8=", + "subType": "06" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AijCDwAAAAAAAAAAAAAAAAATREcETS5KoAGyj/P45owPrdFfy5ng8Z1ND+F+780lLddOyPeDnIsa7yg6uvhTZ65mHfGLvKcFocclYenq/AX1dY4xdjLRg/AfT088A27ORUA=", + "subType": "06" + } + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } } } \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-key-kmip.json b/test/client-side-encryption/corpus/corpus-key-kmip.json new file mode 100644 index 0000000000..7c7069700e --- /dev/null +++ b/test/client-side-encryption/corpus/corpus-key-kmip.json @@ -0,0 +1,32 @@ +{ + "_id": { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": ["kmip"] +} \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus-schema.json b/test/client-side-encryption/corpus/corpus-schema.json index f145f712a4..e74bc914f5 100644 --- a/test/client-side-encryption/corpus/corpus-schema.json +++ b/test/client-side-encryption/corpus/corpus-schema.json @@ -5064,6 +5064,1272 @@ "bsonType": "binData" } } + }, + "kmip_double_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "double" + } + } + } + }, + "kmip_double_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_double_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "string" + } + } + } + }, + "kmip_string_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "string" + } + } + } + }, + "kmip_string_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_string_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "object" + } + } + } + }, + "kmip_object_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_object_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "array" + } + } + } + }, + "kmip_array_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_array_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=00_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "binData" + } + } + } + }, + "kmip_binData=04_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "objectId" + } + } + } + }, + "kmip_objectId_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_objectId_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "bool" + } + } + } + }, + "kmip_bool_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_bool_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "date" + } + } + } + }, + "kmip_date_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "date" + } + } + } + }, + "kmip_date_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_date_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "regex" + } + } + } + }, + "kmip_regex_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_regex_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "dbPointer" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "javascript" + } + } + } + }, + "kmip_javascript_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascript_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "symbol" + } + } + } + }, + "kmip_symbol_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_symbol_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "javascriptWithScope" + } + } + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "int" + } + } + } + }, + "kmip_int_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "int" + } + } + } + }, + "kmip_int_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_int_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "timestamp" + } + } + } + }, + "kmip_timestamp_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "long" + } + } + } + }, + "kmip_long_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + "bsonType": "long" + } + } + } + }, + "kmip_long_det_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_long_det_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_auto_id": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "KMIPAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_auto_altname": { + "bsonType": "object", + "properties": { + "value": { + "encrypt": { + "keyId": "/altname_kmip", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + "bsonType": "decimal" + } + } + } + }, + "kmip_decimal_rand_explicit_id": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } + }, + "kmip_decimal_rand_explicit_altname": { + "bsonType": "object", + "properties": { + "value": { + "bsonType": "binData" + } + } } } } \ No newline at end of file diff --git a/test/client-side-encryption/corpus/corpus.json b/test/client-side-encryption/corpus/corpus.json index 55bbaf99c2..559711b347 100644 --- a/test/client-side-encryption/corpus/corpus.json +++ b/test/client-side-encryption/corpus/corpus.json @@ -4,6 +4,7 @@ "altname_local": "local", "altname_azure": "azure", "altname_gcp": "gcp", + "altname_kmip": "kmip", "aws_double_rand_auto_id": { "kms": "aws", "type": "double", @@ -6648,6 +6649,1667 @@ "$maxKey": 1 } }, + "kmip_double_rand_auto_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_auto_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_rand_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_id": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_double_det_explicit_altname": { + "kms": "kmip", + "type": "double", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDouble": "1.234" + } + }, + "kmip_string_rand_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_auto_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_rand_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_auto_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_id": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": "mongodb" + }, + "kmip_string_det_explicit_altname": { + "kms": "kmip", + "type": "string", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": "mongodb" + }, + "kmip_object_rand_auto_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_auto_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_rand_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_id": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_object_det_explicit_altname": { + "kms": "kmip", + "type": "object", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "x": { + "$numberInt": "1" + } + } + }, + "kmip_array_rand_auto_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_auto_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_rand_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_id": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_array_det_explicit_altname": { + "kms": "kmip", + "type": "array", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": [ + { + "$numberInt": "1" + }, + { + "$numberInt": "2" + }, + { + "$numberInt": "3" + } + ] + }, + "kmip_binData=00_rand_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_auto_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_auto_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_id": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=00_det_explicit_altname": { + "kms": "kmip", + "type": "binData=00", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AQIDBA==", + "subType": "00" + } + } + }, + "kmip_binData=04_rand_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_auto_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_rand_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_auto_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_id": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_binData=04_det_explicit_altname": { + "kms": "kmip", + "type": "binData=04", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$binary": { + "base64": "AAECAwQFBgcICQoLDA0ODw==", + "subType": "04" + } + } + }, + "kmip_undefined_rand_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_rand_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_id": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_undefined_det_explicit_altname": { + "kms": "kmip", + "type": "undefined", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$undefined": true + } + }, + "kmip_objectId_rand_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_auto_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_rand_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_auto_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_id": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_objectId_det_explicit_altname": { + "kms": "kmip", + "type": "objectId", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$oid": "01234567890abcdef0123456" + } + }, + "kmip_bool_rand_auto_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_auto_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": true + }, + "kmip_bool_rand_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": true + }, + "kmip_bool_det_explicit_id": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": true + }, + "kmip_bool_det_explicit_altname": { + "kms": "kmip", + "type": "bool", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": true + }, + "kmip_date_rand_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_auto_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_rand_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_auto_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_id": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_date_det_explicit_altname": { + "kms": "kmip", + "type": "date", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$date": { + "$numberLong": "12345" + } + } + }, + "kmip_null_rand_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_rand_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_id": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": null + }, + "kmip_null_det_explicit_altname": { + "kms": "kmip", + "type": "null", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": null + }, + "kmip_regex_rand_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_auto_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_rand_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_auto_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_id": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_regex_det_explicit_altname": { + "kms": "kmip", + "type": "regex", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$regularExpression": { + "pattern": ".*", + "options": "" + } + } + }, + "kmip_dbPointer_rand_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_auto_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_rand_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_auto_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_id": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_dbPointer_det_explicit_altname": { + "kms": "kmip", + "type": "dbPointer", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$dbPointer": { + "$ref": "db.example", + "$id": { + "$oid": "01234567890abcdef0123456" + } + } + } + }, + "kmip_javascript_rand_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_auto_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_rand_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_auto_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_id": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_javascript_det_explicit_altname": { + "kms": "kmip", + "type": "javascript", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1" + } + }, + "kmip_symbol_rand_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_auto_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_rand_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_auto_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_id": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_symbol_det_explicit_altname": { + "kms": "kmip", + "type": "symbol", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$symbol": "mongodb-symbol" + } + }, + "kmip_javascriptWithScope_rand_auto_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_auto_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_rand_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_id": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_javascriptWithScope_det_explicit_altname": { + "kms": "kmip", + "type": "javascriptWithScope", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$code": "x=1", + "$scope": {} + } + }, + "kmip_int_rand_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_auto_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_rand_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_auto_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_id": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_int_det_explicit_altname": { + "kms": "kmip", + "type": "int", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberInt": "123" + } + }, + "kmip_timestamp_rand_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_auto_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_rand_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_auto_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_id": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_timestamp_det_explicit_altname": { + "kms": "kmip", + "type": "timestamp", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$timestamp": { + "t": 0, + "i": 12345 + } + } + }, + "kmip_long_rand_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_auto_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_rand_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_auto_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_id": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_long_det_explicit_altname": { + "kms": "kmip", + "type": "long", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberLong": "456" + } + }, + "kmip_decimal_rand_auto_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_auto_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "auto", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_rand_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": true, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_id": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_decimal_det_explicit_altname": { + "kms": "kmip", + "type": "decimal", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$numberDecimal": "1.234" + } + }, + "kmip_minKey_rand_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_rand_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_id": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_minKey_det_explicit_altname": { + "kms": "kmip", + "type": "minKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$minKey": 1 + } + }, + "kmip_maxKey_rand_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_rand_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "rand", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_id": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "id", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, + "kmip_maxKey_det_explicit_altname": { + "kms": "kmip", + "type": "maxKey", + "algo": "det", + "method": "explicit", + "identifier": "altname", + "allowed": false, + "value": { + "$maxKey": 1 + } + }, "payload=0,algo=rand": { "kms": "local", "type": "string", diff --git a/test/client-side-encryption/spec/azureKMS.json b/test/client-side-encryption/spec/azureKMS.json index f0f5329d70..afecf40b0a 100644 --- a/test/client-side-encryption/spec/azureKMS.json +++ b/test/client-side-encryption/spec/azureKMS.json @@ -64,6 +64,20 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/test/client-side-encryption/spec/gcpKMS.json b/test/client-side-encryption/spec/gcpKMS.json index 297d5d0dc8..c2c08b8a23 100644 --- a/test/client-side-encryption/spec/gcpKMS.json +++ b/test/client-side-encryption/spec/gcpKMS.json @@ -64,6 +64,20 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/test/client-side-encryption/spec/kmipKMS.json b/test/client-side-encryption/spec/kmipKMS.json new file mode 100644 index 0000000000..5749d21ab8 --- /dev/null +++ b/test/client-side-encryption/spec/kmipKMS.json @@ -0,0 +1,223 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index d94fcf3469..72e7dbbf1c 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -17,11 +17,12 @@ import base64 import copy import os +import re import ssl -import traceback import socket import sys import textwrap +import traceback import uuid sys.path[0:0] = [""] @@ -516,6 +517,10 @@ def test_with_statement(self): 'email': os.environ.get('FLE_GCP_EMAIL', ''), 'privateKey': os.environ.get('FLE_GCP_PRIVATEKEY', '')} +KMIP = {'endpoint': os.environ.get('FLE_KMIP_ENDPOINT', 'localhost:5698')} +KMS_TLS_OPTS = {'kmip': {'tlsCAFile': CA_PEM, + 'tlsCertificateKeyFile': CLIENT_PEM}} + class TestSpec(SpecRunner): @@ -550,6 +555,9 @@ def parse_auto_encrypt_opts(self, opts): kms_providers['gcp'] = GCP_CREDS if not any(AZURE_CREDS.values()): self.skipTest('GCP environment credentials are not set') + if 'kmip' in kms_providers: + kms_providers['kmip'] = KMIP + opts['kms_tls_options'] = KMS_TLS_OPTS if 'key_vault_namespace' not in opts: opts['key_vault_namespace'] = 'keyvault.datakeys' opts = dict(opts) @@ -631,6 +639,13 @@ def run_scenario(self): b'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ' b'5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk') +ALL_KMS_PROVIDERS = { + 'aws': AWS_CREDS, + 'azure': AZURE_CREDS, + 'gcp': GCP_CREDS, + 'kmip': KMIP, + 'local': {'key': LOCAL_MASTER_KEY}} + LOCAL_KEY_ID = Binary( base64.b64decode(b'LOCALAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) AWS_KEY_ID = Binary( @@ -639,6 +654,8 @@ def run_scenario(self): base64.b64decode(b'AZUREAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) GCP_KEY_ID = Binary( base64.b64decode(b'GCPAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) +KMIP_KEY_ID = Binary( + base64.b64decode(b'KMIPAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) def create_with_schema(coll, json_schema): @@ -661,10 +678,7 @@ def create_key_vault(vault, *data_keys): class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): - KMS_PROVIDERS = {'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'local': {'key': LOCAL_MASTER_KEY}} + KMS_PROVIDERS = ALL_KMS_PROVIDERS MASTER_KEYS = { 'aws': { @@ -679,6 +693,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): 'location': 'global', 'keyRing': 'key-ring-csfle', 'keyName': 'key-name-csfle'}, + 'kmip': {}, 'local': None } @@ -710,11 +725,13 @@ def setUpClass(cls): } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas) + cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS) cls.client_encrypted = rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation='standard') cls.client_encryption = ClientEncryption( - cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS) + cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS, + kms_tls_options=KMS_TLS_OPTS) @classmethod def tearDownClass(cls): @@ -784,6 +801,9 @@ def test_data_key_azure(self): def test_data_key_gcp(self): self.run_test('gcp') + def test_data_key_kmip(self): + self.run_test('kmip') + class TestExternalKeyVault(EncryptionIntegrationTest): @@ -882,10 +902,7 @@ def setUpClass(cls): @staticmethod def kms_providers(): - return {'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'local': {'key': LOCAL_MASTER_KEY}} + return ALL_KMS_PROVIDERS @staticmethod def fix_up_schema(json_schema): @@ -923,7 +940,8 @@ def _test_corpus(self, opts): json_data('corpus', 'corpus-key-local.json'), json_data('corpus', 'corpus-key-aws.json'), json_data('corpus', 'corpus-key-azure.json'), - json_data('corpus', 'corpus-key-gcp.json')) + json_data('corpus', 'corpus-key-gcp.json'), + json_data('corpus', 'corpus-key-kmip.json')) self.addCleanup(vault.drop) client_encrypted = rs_or_single_client( @@ -932,7 +950,7 @@ def _test_corpus(self, opts): client_encryption = ClientEncryption( self.kms_providers(), 'keyvault.datakeys', client_context.client, - OPTS) + OPTS, kms_tls_options=KMS_TLS_OPTS) self.addCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) @@ -940,7 +958,7 @@ def _test_corpus(self, opts): for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', - 'altname_local'): + 'altname_local', 'altname_kmip'): continue if value['method'] == 'auto': continue @@ -948,7 +966,7 @@ def _test_corpus(self, opts): identifier = value['identifier'] self.assertIn(identifier, ('id', 'altname')) kms = value['kms'] - self.assertIn(kms, ('local', 'aws', 'azure', 'gcp')) + self.assertIn(kms, ('local', 'aws', 'azure', 'gcp', 'kmip')) if identifier == 'id': if kms == 'local': kwargs = dict(key_id=LOCAL_KEY_ID) @@ -956,8 +974,10 @@ def _test_corpus(self, opts): kwargs = dict(key_id=AWS_KEY_ID) elif kms == 'azure': kwargs = dict(key_id=AZURE_KEY_ID) - else: + elif kms == 'gcp': kwargs = dict(key_id=GCP_KEY_ID) + else: + kwargs = dict(key_id=KMIP_KEY_ID) else: kwargs = dict(key_alt_name=kms) @@ -990,7 +1010,7 @@ def _test_corpus(self, opts): corpus_encrypted_actual = coll.find_one() for key, value in corpus_encrypted_actual.items(): if key in ('_id', 'altname_aws', 'altname_azure', - 'altname_gcp', 'altname_local'): + 'altname_gcp', 'altname_local', 'altname_kmip'): continue if value['algo'] == 'det': @@ -1011,7 +1031,8 @@ def _test_corpus(self, opts): self.assertEqual(value['value'], corpus[key]['value'], key) def test_corpus(self): - opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys', + kms_tls_options=KMS_TLS_OPTS) self._test_corpus(opts) def test_corpus_local_schema(self): @@ -1019,7 +1040,8 @@ def test_corpus_local_schema(self): schemas = {'db.coll': self.fix_up_schema( json_data('corpus', 'corpus-schema.json'))} opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas) + self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS) self._test_corpus(opts) @@ -1142,21 +1164,26 @@ def setUpClass(cls): def setUp(self): kms_providers = {'aws': AWS_CREDS, 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS} + 'gcp': GCP_CREDS, + 'kmip': KMIP} self.client_encryption = ClientEncryption( kms_providers=kms_providers, key_vault_namespace='keyvault.datakeys', key_vault_client=client_context.client, - codec_options=OPTS) + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS) kms_providers_invalid = copy.deepcopy(kms_providers) kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'example.com:443' kms_providers_invalid['gcp']['endpoint'] = 'example.com:443' + kms_providers_invalid['kmip']['endpoint'] = 'doesnotexist.local:5698' self.client_encryption_invalid = ClientEncryption( kms_providers=kms_providers_invalid, key_vault_namespace='keyvault.datakeys', key_vault_client=client_context.client, - codec_options=OPTS) + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS) + self._kmip_host_error = '' def tearDown(self): self.client_encryption.close() @@ -1289,6 +1316,41 @@ def test_09_gcp_invalid_endpoint(self): self.client_encryption.create_data_key( 'gcp', master_key=master_key) + def kmip_host_error(self): + if self._kmip_host_error: + return self._kmip_host_error + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + try: + socket.getaddrinfo('doesnotexist.local', 5698, socket.AF_INET, + socket.SOCK_STREAM) + except Exception as exc: + self._kmip_host_error = re.escape(str(exc)) + return self._kmip_host_error + + def test_10_kmip_invalid_endpoint(self): + key = {'keyId': '1'} + self.run_test_expected_success('kmip', key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + self.client_encryption_invalid.create_data_key('kmip', key) + + def test_11_kmip_master_key_endpoint(self): + key = {'keyId': '1', 'endpoint': KMIP['endpoint']} + self.run_test_expected_success('kmip', key) + # Override invalid endpoint: + data_key_id = self.client_encryption_invalid.create_data_key( + 'kmip', master_key=key) + encrypted = self.client_encryption_invalid.encrypt( + 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=data_key_id) + self.assertEqual( + 'test', self.client_encryption_invalid.decrypt(encrypted)) + + def test_12_kmip_master_key_invalid_endpoint(self): + key = {'keyId': '1', 'endpoint': 'doesnotexist.local:5698'} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + self.client_encryption.create_data_key('kmip', key) + class AzureGCPEncryptionTestMixin(object): DEK = None @@ -1709,5 +1771,143 @@ def test_invalid_hostname_in_kms_certificate(self): self.client_encrypted.create_data_key('aws', master_key=key) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +class TestKmsTLSOptions(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), + 'AWS environment credentials are not set') + @unittest.skipIf(sys.version_info[:2] >= (3, 10) and + sys.platform == 'win32', + 'These tests hang with Python 3.10 on Windows') + def setUp(self): + super(TestKmsTLSOptions, self).setUp() + # 1, create client with only tlsCAFile. + providers = copy.deepcopy(ALL_KMS_PROVIDERS) + providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8002' + providers['gcp']['endpoint'] = '127.0.0.1:8002' + kms_tls_opts_ca_only = { + 'aws': {'tlsCAFile': CA_PEM}, + 'azure': {'tlsCAFile': CA_PEM}, + 'gcp': {'tlsCAFile': CA_PEM}, + 'kmip': {'tlsCAFile': CA_PEM}, + } + self.client_encryption_no_client_cert = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts_ca_only) + self.addCleanup(self.client_encryption_no_client_cert.close) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]['tlsCertificateKeyFile'] = CLIENT_PEM + self.client_encryption_with_tls = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts) + self.addCleanup(self.client_encryption_with_tls.close) + # 3, update endpoints to expired host. + providers = copy.deepcopy(providers) + providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8000' + providers['gcp']['endpoint'] = '127.0.0.1:8000' + providers['kmip']['endpoint'] = '127.0.0.1:8000' + self.client_encryption_expired = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts_ca_only) + self.addCleanup(self.client_encryption_expired.close) + # 3, update endpoints to invalid host. + providers = copy.deepcopy(providers) + providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8001' + providers['gcp']['endpoint'] = '127.0.0.1:8001' + providers['kmip']['endpoint'] = '127.0.0.1:8001' + self.client_encryption_invalid_hostname = ClientEncryption( + providers, 'keyvault.datakeys', self.client, OPTS, + kms_tls_options=kms_tls_opts_ca_only) + self.addCleanup(self.client_encryption_invalid_hostname.close) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = 'certificate required|SSL handshake failed' + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == 'win32': + self.cert_error += '|forcibly closed' + # On Windows Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += '|forcibly closed' + + def test_01_aws(self): + key = { + 'region': 'us-east-1', + 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', + 'endpoint': '127.0.0.1:8002', + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('aws', key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, 'parse error'): + self.client_encryption_with_tls.create_data_key('aws', key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key['endpoint'] = '127.0.0.1:8000' + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('aws', key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + key['endpoint'] = '127.0.0.1:8001' + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key('aws', key) + + def test_02_azure(self): + key = {'keyVaultEndpoint': 'doesnotexist.local', 'keyName': 'foo'} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('azure', key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): + self.client_encryption_with_tls.create_data_key('azure', key) + # Expired cert error. + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('azure', key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key( + 'azure', key) + + def test_03_gcp(self): + key = {'projectId': 'foo', 'location': 'bar', 'keyRing': 'baz', + 'keyName': 'foo'} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('gcp', key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): + self.client_encryption_with_tls.create_data_key('gcp', key) + # Expired cert error. + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('gcp', key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key('gcp', key) + + def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_no_client_cert.create_data_key('kmip') + self.client_encryption_with_tls.create_data_key('kmip') + # Expired cert error. + with self.assertRaisesRegex( + EncryptionError, 'expired|certificate verify failed'): + self.client_encryption_expired.create_data_key('kmip') + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, 'IP address mismatch|wronghost'): + self.client_encryption_invalid_hostname.create_data_key('kmip') + + if __name__ == "__main__": unittest.main() From 278a50d4b0365206412044be450e5cdda46a1b65 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 12:51:36 -0800 Subject: [PATCH 0521/2111] PYTHON-3005 Make maxConnecting configurable (#789) --- doc/changelog.rst | 2 + pymongo/client_options.py | 2 + pymongo/common.py | 1 + pymongo/mongo_client.py | 5 +- ...kout-custom-maxConnecting-is-enforced.json | 81 +++++++++++++++++++ test/uri_options/connection-pool-options.json | 23 +++++- 6 files changed, 111 insertions(+), 3 deletions(-) create mode 100644 test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 3b3a700b3b..3b46667cee 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -185,6 +185,8 @@ Notable improvements - Enhanced connection pooling to create connections more efficiently and avoid connection storms. +- Added the ``maxConnecting`` URI and + :class:`~pymongo.mongo_client.MongoClient` keyword argument. - :class:`~pymongo.mongo_client.MongoClient` now accepts a URI and keyword argument `srvMaxHosts` that limits the number of mongos-like hosts a client will connect to. More specifically, when a mongodb+srv:// connection string diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 845d4ef9a1..f7dbf255bc 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -138,6 +138,7 @@ def _parse_pool_options(options): options.get('zlibcompressionlevel', -1)) ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) load_balanced = options.get('loadbalanced') + max_connecting = options.get('maxconnecting', common.MAX_CONNECTING) return PoolOptions(max_pool_size, min_pool_size, max_idle_time_seconds, @@ -148,6 +149,7 @@ def _parse_pool_options(options): appname, driver, compression_settings, + max_connecting=max_connecting, server_api=server_api, load_balanced=load_balanced) diff --git a/pymongo/common.py b/pymongo/common.py index 5dd7b180c0..772f2f299b 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -615,6 +615,7 @@ def validate_auto_encryption_opts_or_none(option, value): 'journal': validate_boolean_or_string, 'localthresholdms': validate_positive_float_or_zero, 'maxidletimems': validate_timeout_or_none, + 'maxconnecting': validate_positive_integer, 'maxpoolsize': validate_non_negative_integer_or_none, 'maxstalenessseconds': validate_max_staleness, 'readconcernlevel': validate_string_or_none, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a308219cfb..1f8b781487 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -216,6 +216,8 @@ def __init__( - `maxIdleTimeMS` (optional): The maximum number of milliseconds that a connection can remain idle in the pool before being removed and replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. - `socketTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait for a response after sending an ordinary (non-monitoring) database operation before concluding that @@ -506,7 +508,8 @@ def __init__( arguments. The default for `uuidRepresentation` was changed from ``pythonLegacy`` to ``unspecified``. - Added the ``srvServiceName`` URI and keyword argument. + Added the ``srvServiceName`` and ``maxConnecting`` URI and keyword + argument. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. diff --git a/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json b/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json new file mode 100644 index 0000000000..6620f82fd9 --- /dev/null +++ b/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "integration", + "description": "custom maxConnecting is enforced", + "runOn": [ + { + "minServerVersion": "4.4.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "poolOptions": { + "maxConnecting": 1, + "maxPoolSize": 2, + "waitQueueTimeoutMS": 5000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "start", + "target": "thread2" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "checkOut", + "thread": "thread2" + }, + { + "name": "waitForEvent", + "event": "ConnectionReady", + "count": 2 + } + ], + "events": [ + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionReady" + } + ], + "ignore": [ + "ConnectionCheckOutStarted", + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionClosed", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index aae16190ba..118b2f6783 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid connection pool options are parsed correctly", - "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3", + "uri": "mongodb://example.com/?maxIdleTimeMS=50000&maxPoolSize=5&minPoolSize=3&maxConnecting=1", "valid": true, "warning": false, "hosts": null, @@ -10,7 +10,8 @@ "options": { "maxIdleTimeMS": 50000, "maxPoolSize": 5, - "minPoolSize": 3 + "minPoolSize": 3, + "maxConnecting": 1 } }, { @@ -52,6 +53,24 @@ "options": { "minPoolSize": 0 } + }, + { + "description": "maxConnecting=0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=0", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "maxConnecting<0 causes a warning", + "uri": "mongodb://example.com/?maxConnecting=-1", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } From a655c576c9902ddd928f1c425037d38d50fddd92 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 16 Nov 2021 14:23:55 -0800 Subject: [PATCH 0522/2111] PYTHON-3015 Document cipher mismatch issues (#791) --- doc/examples/tls.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 2f72555d7a..03ac63a633 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -213,3 +213,21 @@ revocation checking failed:: [('SSL routines', 'tls_process_initial_server_flight', 'invalid status response')] See :ref:`OCSP` for more details. + +Python 3.10+ incompatibilities with TLS/SSL on MongoDB <= 4.0 +............................................................. + +Note that `changes made to the ssl module in Python 3.10+ +`_ may cause incompatibilities +with MongoDB <= 4.0. The following are some example errors that may occur with this +combination:: + + SSL handshake failed: localhost:27017: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:997) + SSL handshake failed: localhost:27017: EOF occurred in violation of protocol (_ssl.c:997) + +The MongoDB server logs may show the following error:: + + 2021-06-30T21:22:44.917+0100 E NETWORK [conn16] SSL: error:1408A0C1:SSL routines:ssl3_get_client_hello:no shared cipher + +To resolve this issue, use Python <=3.10, upgrade to MongoDB 4.2+, or install +pymongo with the :ref:`OCSP` extra which relies on PyOpenSSL. From 99aab1b0ba71afaa278882b860af9cb0e3163087 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 14:34:36 -0800 Subject: [PATCH 0523/2111] PYTHON-3017 Properly check for closed KMS connections (#790) --- pymongo/encryption.py | 2 ++ test/test_encryption.py | 8 +++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 117666ac82..cb4080397f 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -129,6 +129,8 @@ def kms_request(self, kms_context): conn.sendall(message) while kms_context.bytes_needed > 0: data = conn.recv(kms_context.bytes_needed) + if not data: + raise OSError('KMS connection closed') kms_context.feed(data) finally: conn.close() diff --git a/test/test_encryption.py b/test/test_encryption.py index 72e7dbbf1c..f77d3fffc7 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1775,9 +1775,6 @@ def test_invalid_hostname_in_kms_certificate(self): class TestKmsTLSOptions(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), 'AWS environment credentials are not set') - @unittest.skipIf(sys.version_info[:2] >= (3, 10) and - sys.platform == 'win32', - 'These tests hang with Python 3.10 on Windows') def setUp(self): super(TestKmsTLSOptions, self).setUp() # 1, create client with only tlsCAFile. @@ -1822,7 +1819,8 @@ def setUp(self): self.addCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) - self.cert_error = 'certificate required|SSL handshake failed' + self.cert_error = ('certificate required|SSL handshake failed|' + 'KMS connection closed') # On Windows this error might be: # [WinError 10054] An existing connection was forcibly closed by the remote host if sys.platform == 'win32': @@ -1830,7 +1828,7 @@ def setUp(self): # On Windows Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) if sys.version_info[:2] >= (3, 10): - self.cert_error += '|forcibly closed' + self.cert_error += '|EOF' def test_01_aws(self): key = { From 8fc6dc3c4ffd358005bd3cae8a6c13f0d85a31cc Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 16 Nov 2021 16:26:18 -0800 Subject: [PATCH 0524/2111] PYTHON-1489 Merge ajdavis/pymongo-mockup-tests into pymongo (#787) --- .evergreen/config.yml | 2 +- .evergreen/run-mockupdb-tests.sh | 14 +- test/mockupdb/operations.py | 148 ++++++++ test/mockupdb/test_auth_recovering_member.py | 50 +++ test/mockupdb/test_cluster_time.py | 193 ++++++++++ test/mockupdb/test_cursor_namespace.py | 122 +++++++ test/mockupdb/test_getmore_sharded.py | 62 ++++ test/mockupdb/test_handshake.py | 172 +++++++++ test/mockupdb/test_initial_ismaster.py | 46 +++ test/mockupdb/test_legacy_crud.py | 126 +++++++ test/mockupdb/test_list_indexes.py | 78 ++++ test/mockupdb/test_max_staleness.py | 67 ++++ test/mockupdb/test_mixed_version_sharded.py | 89 +++++ .../mockupdb/test_mongos_command_read_mode.py | 125 +++++++ .../test_network_disconnect_primary.py | 87 +++++ test/mockupdb/test_op_msg.py | 340 ++++++++++++++++++ test/mockupdb/test_op_msg_read_preference.py | 197 ++++++++++ test/mockupdb/test_projection.py | 56 +++ test/mockupdb/test_query_read_pref_sharded.py | 107 ++++++ test/mockupdb/test_reset_and_request_check.py | 145 ++++++++ test/mockupdb/test_slave_okay_rs.py | 79 ++++ test/mockupdb/test_slave_okay_sharded.py | 101 ++++++ test/mockupdb/test_slave_okay_single.py | 104 ++++++ test/mockupdb/test_starting_from_overflow.py | 76 ++++ 24 files changed, 2577 insertions(+), 9 deletions(-) mode change 100644 => 100755 .evergreen/run-mockupdb-tests.sh create mode 100644 test/mockupdb/operations.py create mode 100755 test/mockupdb/test_auth_recovering_member.py create mode 100644 test/mockupdb/test_cluster_time.py create mode 100644 test/mockupdb/test_cursor_namespace.py create mode 100644 test/mockupdb/test_getmore_sharded.py create mode 100644 test/mockupdb/test_handshake.py create mode 100644 test/mockupdb/test_initial_ismaster.py create mode 100755 test/mockupdb/test_legacy_crud.py create mode 100644 test/mockupdb/test_list_indexes.py create mode 100644 test/mockupdb/test_max_staleness.py create mode 100644 test/mockupdb/test_mixed_version_sharded.py create mode 100644 test/mockupdb/test_mongos_command_read_mode.py create mode 100755 test/mockupdb/test_network_disconnect_primary.py create mode 100755 test/mockupdb/test_op_msg.py create mode 100644 test/mockupdb/test_op_msg_read_preference.py create mode 100644 test/mockupdb/test_projection.py create mode 100644 test/mockupdb/test_query_read_pref_sharded.py create mode 100755 test/mockupdb/test_reset_and_request_check.py create mode 100644 test/mockupdb/test_slave_okay_rs.py create mode 100644 test/mockupdb/test_slave_okay_sharded.py create mode 100644 test/mockupdb/test_slave_okay_single.py create mode 100644 test/mockupdb/test_starting_from_overflow.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 16be7f882a..d4c95105bf 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -346,7 +346,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh + PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh "run doctests": - command: shell.exec diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh old mode 100644 new mode 100755 index d833fdea82..a0b67302a4 --- a/.evergreen/run-mockupdb-tests.sh +++ b/.evergreen/run-mockupdb-tests.sh @@ -1,20 +1,18 @@ #!/bin/bash - +# Must be run from pymongo repo root set -o xtrace set -o errexit . .evergreen/utils.sh ${PYTHON_BINARY} setup.py clean -cd .. createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivatei, rm -rf mockuptests" EXIT HUP +trap "deactivate, rm -rf mockuptests" EXIT HUP # Install PyMongo from git clone so mockup-tests don't # download it from pypi. -python -m pip install ${PROJECT_DIRECTORY} - -git clone https://github.com/ajdavis/pymongo-mockup-tests.git -cd pymongo-mockup-tests -python setup.py test +python -m pip install . +python -m pip install --upgrade 'https://github.com/ajdavis/mongo-mockup-db/archive/master.zip' +cd ./test/mockupdb +python -m unittest discover -v diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py new file mode 100644 index 0000000000..2c8701ae83 --- /dev/null +++ b/test/mockupdb/operations.py @@ -0,0 +1,148 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"),; +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple + +from mockupdb import * +from pymongo import ReadPreference + +__all__ = ['operations', 'upgrades'] + + +Operation = namedtuple( + 'operation', + ['name', 'function', 'reply', 'op_type', 'not_master']) +"""Client operations on MongoDB. + +Each has a human-readable name, a function that actually executes a test, and +a type that maps to one of the types in the Server Selection Spec: +'may-use-secondary', 'must-use-primary', etc. + +The special type 'always-use-secondary' applies to an operation with an explicit +read mode, like the operation "command('c', read_preference=SECONDARY)". + +The not-master response is how a secondary responds to a must-use-primary op, +or how a recovering member responds to a may-use-secondary op. + +Example uses: + +We can use "find_one" to validate that the SlaveOk bit is set when querying a +standalone, even with mode PRIMARY, but that it isn't set when sent to a mongos +with mode PRIMARY. Or it can validate that "$readPreference" is included in +mongos queries except with mode PRIMARY or SECONDARY_PREFERRED (PYTHON-865). + +We can use "options_old" and "options_new" to test that the driver queries an +old server's system.namespaces collection, but uses the listCollections command +on a new server (PYTHON-857). + +"secondary command" is good to test that the client can direct reads to +secondaries in a replica set, or select a mongos for secondary reads in a +sharded cluster (PYTHON-868). +""" + +not_master_reply_to_query = OpReply( + {'$err': 'not master'}, + flags=REPLY_FLAGS['QueryFailure']) + +not_master_reply_to_command = OpReply(ok=0, errmsg='not master') + +operations = [ + Operation( + 'find_one', + lambda client: client.db.collection.find_one(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='may-use-secondary', + not_master=not_master_reply_to_query), + Operation( + 'count', + lambda client: client.db.collection.count_documents({}), + reply={'n': 1}, + op_type='may-use-secondary', + not_master=not_master_reply_to_command), + Operation( + 'aggregate', + lambda client: client.db.collection.aggregate([]), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='may-use-secondary', + not_master=not_master_reply_to_command), + Operation( + 'mapreduce', + lambda client: client.db.collection.map_reduce( + 'function() {}', 'function() {}'), + reply={'result': {'db': 'db', 'collection': 'out_collection'}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), + Operation( + 'inline_mapreduce', + lambda client: client.db.collection.inline_map_reduce( + 'function() {}', 'function() {}', {'out': {'inline': 1}}), + reply={'results': []}, + op_type='may-use-secondary', + not_master=not_master_reply_to_command), + Operation( + 'options', + lambda client: client.db.collection.options(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), + Operation( + 'command', + lambda client: client.db.command('foo'), + reply={'ok': 1}, + op_type='must-use-primary', # Ignores client's read preference. + not_master=not_master_reply_to_command), + Operation( + 'secondary command', + lambda client: + client.db.command('foo', read_preference=ReadPreference.SECONDARY), + reply={'ok': 1}, + op_type='always-use-secondary', + not_master=OpReply(ok=0, errmsg='node is recovering')), + Operation( + 'listCollections', + lambda client: client.db.collection_names(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), + Operation( + 'listIndexes', + lambda client: client.db.collection.index_information(), + reply={'cursor': {'id': 0, 'firstBatch': []}}, + op_type='must-use-primary', + not_master=not_master_reply_to_command), +] + + +_ops_by_name = dict([(op.name, op) for op in operations]) + +Upgrade = namedtuple('Upgrade', + ['name', 'function', 'old', 'new', 'wire_version']) + +upgrades = [ + Upgrade('index_information', + lambda client: client.db.collection.index_information(), + old=OpQuery(namespace='db.system.indexes'), + new=Command('listIndexes', 'collection', namespace='db'), + wire_version=3), + Upgrade('collection_names', + lambda client: client.db.collection_names(), + old=Command('aggregate', 'system.namespaces', namespace='db'), + new=Command('listCollections', namespace='db'), + wire_version=3), + Upgrade('options', + lambda client: client.db.collection.options(), + old=Command('aggregate', 'system.namespaces', namespace='db'), + new=Command('listCollections', namespace='db'), + wire_version=3), +] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py new file mode 100755 index 0000000000..360c593a00 --- /dev/null +++ b/test/mockupdb/test_auth_recovering_member.py @@ -0,0 +1,50 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mockupdb import MockupDB +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + +import unittest + + +class TestAuthRecoveringMember(unittest.TestCase): + def test_auth_recovering_member(self): + # Test that we don't attempt auth against a recovering RS member. + server = MockupDB() + server.autoresponds('ismaster', { + 'minWireVersion': 2, + 'maxWireVersion': 6, + 'ismaster': False, + 'secondary': False, + 'setName': 'rs'}) + + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri, + replicaSet='rs', + serverSelectionTimeoutMS=100, + socketTimeoutMS=100) + + self.addCleanup(client.close) + + # Should see there's no primary or secondary and raise selection timeout + # error. If it raises AutoReconnect we know it actually tried the + # server, and that's wrong. + with self.assertRaises(ServerSelectionTimeoutError): + client.db.authenticate('user', 'password') + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py new file mode 100644 index 0000000000..fae6c3faae --- /dev/null +++ b/test/mockupdb/test_cluster_time.py @@ -0,0 +1,193 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test $clusterTime handling.""" + +from bson import Timestamp +from mockupdb import going, MockupDB +from pymongo import (MongoClient, + InsertOne, + UpdateOne, + DeleteMany, + version_tuple) + +import unittest + + +class TestClusterTime(unittest.TestCase): + def cluster_time_conversation(self, callback, replies): + cluster_time = Timestamp(0, 0) + server = MockupDB() + + # First test all commands include $clusterTime with wire version 6. + responder = server.autoresponds( + 'ismaster', + {'minWireVersion': 0, + 'maxWireVersion': 6, + '$clusterTime': {'clusterTime': cluster_time}}) + + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with going(callback, client): + for reply in replies: + request = server.receives() + self.assertIn('$clusterTime', request) + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + reply['$clusterTime'] = {'clusterTime': cluster_time} + request.reply(reply) + + # Now test that no commands include $clusterTime with wire version 5, + # even though the isMaster reply still has $clusterTime. + server.cancel_responder(responder) + server.autoresponds('ismaster', + {'minWireVersion': 0, + 'maxWireVersion': 5, + '$clusterTime': {'clusterTime': cluster_time}}) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with going(callback, client): + for reply in replies: + request = server.receives() + self.assertNotIn('$clusterTime', request) + request.reply(reply) + + def test_command(self): + def callback(client): + client.db.command('ping') + client.db.command('ping') + + self.cluster_time_conversation(callback, [{'ok': 1}] * 2) + + def test_bulk(self): + def callback(client): + client.db.collection.bulk_write([ + InsertOne({}), + InsertOne({}), + UpdateOne({}, {'$inc': {'x': 1}}), + DeleteMany({})]) + + self.cluster_time_conversation( + callback, + [{'ok': 1, 'nInserted': 2}, + {'ok': 1, 'nModified': 1}, + {'ok': 1, 'nDeleted': 2}]) + + batches = [ + {'cursor': {'id': 123, 'firstBatch': [{'a': 1}]}}, + {'cursor': {'id': 123, 'nextBatch': [{'a': 2}]}}, + {'cursor': {'id': 0, 'nextBatch': [{'a': 3}]}}] + + def test_cursor(self): + def callback(client): + list(client.db.collection.find()) + + self.cluster_time_conversation(callback, self.batches) + + def test_aggregate(self): + def callback(client): + list(client.db.collection.aggregate([])) + + self.cluster_time_conversation(callback, self.batches) + + def test_explain(self): + def callback(client): + client.db.collection.find().explain() + + self.cluster_time_conversation(callback, [{'ok': 1}]) + + def test_monitor(self): + cluster_time = Timestamp(0, 0) + reply = {'minWireVersion': 0, + 'maxWireVersion': 6, + '$clusterTime': {'clusterTime': cluster_time}} + + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri, heartbeatFrequencyMS=500) + self.addCleanup(client.close) + + request = server.receives('ismaster') + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn('$clusterTime', request) + request.ok(reply) + + # Next exchange: client returns first clusterTime, we send the second. + request = server.receives('ismaster') + self.assertIn('$clusterTime', request) + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + reply['$clusterTime'] = {'clusterTime': cluster_time} + request.reply(reply) + + # Third exchange: client returns second clusterTime. + request = server.receives('ismaster') + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + + # Return command error with a new clusterTime. + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + error = {'ok': 0, + 'code': 211, + 'errmsg': 'Cache Reader No keys found for HMAC ...', + '$clusterTime': {'clusterTime': cluster_time}} + request.reply(error) + + # PyMongo 3.11+ closes the monitoring connection on command errors. + if version_tuple >= (3, 11, -1): + # Fourth exchange: the Monitor closes the connection and runs the + # handshake on a new connection. + request = server.receives('ismaster') + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn('$clusterTime', request) + + # Reply without $clusterTime. + reply.pop('$clusterTime') + request.reply(reply) + else: + # Fourth exchange: the Monitor retry attempt uses the clusterTime + # from the previous isMaster error. + request = server.receives('ismaster') + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + + cluster_time = Timestamp(cluster_time.time, + cluster_time.inc + 1) + error['$clusterTime'] = {'clusterTime': cluster_time} + request.reply(error) + + # Fifth exchange: the Monitor attempt uses the clusterTime from + # the previous isMaster error. + request = server.receives('ismaster') + self.assertEqual(request['$clusterTime']['clusterTime'], + cluster_time) + request.reply(reply) + client.close() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py new file mode 100644 index 0000000000..600f7bca6d --- /dev/null +++ b/test/mockupdb/test_cursor_namespace.py @@ -0,0 +1,122 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" + +from mockupdb import going, MockupDB +from pymongo import MongoClient, version_tuple + +import unittest + + +class TestCursorNamespace(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_cursor_namespace(self, cursor_op, command): + with going(cursor_op) as docs: + request = self.server.receives( + **{command: 'collection', 'namespace': 'test'}) + # Respond with a different namespace. + request.reply({'cursor': { + 'firstBatch': [{'doc': 1}], + 'id': 123, + 'ns': 'different_db.different.coll'}}) + # Client uses the namespace we returned. + request = self.server.receives( + getMore=123, namespace='different_db', + collection='different.coll') + + request.reply({'cursor': { + 'nextBatch': [{'doc': 2}], + 'id': 0}}) + + self.assertEqual([{'doc': 1}, {'doc': 2}], docs()) + + def test_aggregate_cursor(self): + def op(): + return list(self.client.test.collection.aggregate([])) + self._test_cursor_namespace(op, 'aggregate') + + @unittest.skipUnless(version_tuple >= (3, 11, -1), 'Fixed in pymongo 3.11') + def test_find_cursor(self): + def op(): + return list(self.client.test.collection.find()) + self._test_cursor_namespace(op, 'find') + + def test_list_indexes(self): + def op(): + return list(self.client.test.collection.list_indexes()) + self._test_cursor_namespace(op, 'listIndexes') + + +class TestKillCursorsNamespace(unittest.TestCase): + @classmethod + @unittest.skipUnless(version_tuple >= (3, 12, -1), 'Fixed in pymongo 3.12') + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.client.close() + cls.server.stop() + + def _test_killCursors_namespace(self, cursor_op, command): + with going(cursor_op): + request = self.server.receives( + **{command: 'collection', 'namespace': 'test'}) + # Respond with a different namespace. + request.reply({'cursor': { + 'firstBatch': [{'doc': 1}], + 'id': 123, + 'ns': 'different_db.different.coll'}}) + # Client uses the namespace we returned for killCursors. + request = self.server.receives(**{ + 'killCursors': 'different.coll', + 'cursors': [123], + '$db': 'different_db'}) + request.reply({ + 'ok': 1, + 'cursorsKilled': [123], + 'cursorsNotFound': [], + 'cursorsAlive': [], + 'cursorsUnknown': []}) + + def test_aggregate_killCursor(self): + def op(): + cursor = self.client.test.collection.aggregate([], batchSize=1) + next(cursor) + cursor.close() + self._test_killCursors_namespace(op, 'aggregate') + + def test_find_killCursor(self): + def op(): + cursor = self.client.test.collection.find(batch_size=1) + next(cursor) + cursor.close() + self._test_killCursors_namespace(op, 'find') + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py new file mode 100644 index 0000000000..2b3a1fd6ce --- /dev/null +++ b/test/mockupdb/test_getmore_sharded.py @@ -0,0 +1,62 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor with a sharded cluster.""" +from pymongo import MongoClient + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, going + +import unittest + + +class TestGetmoreSharded(unittest.TestCase): + def test_getmore_sharded(self): + servers = [MockupDB(), MockupDB()] + + # Collect queries to either server in one queue. + q = Queue() + for server in servers: + server.subscribe(q.put) + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=6) + server.run() + self.addCleanup(server.stop) + + client = MongoClient('mongodb://%s:%d,%s:%d' % ( + servers[0].host, servers[0].port, + servers[1].host, servers[1].port)) + self.addCleanup(client.close) + collection = client.db.collection + cursor = collection.find() + with going(next, cursor): + query = q.get(timeout=1) + query.replies({'cursor': {'id': 123, 'firstBatch': [{}]}}) + + # 10 batches, all getMores go to same server. + for i in range(1, 10): + with going(next, cursor): + getmore = q.get(timeout=1) + self.assertEqual(query.server, getmore.server) + cursor_id = 123 if i < 9 else 0 + getmore.replies({'cursor': {'id': cursor_id, + 'nextBatch': [{}]}}) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py new file mode 100644 index 0000000000..9dbdec3057 --- /dev/null +++ b/test/mockupdb/test_handshake.py @@ -0,0 +1,172 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from mockupdb import MockupDB, OpReply, OpMsg, absent, Command, go +from pymongo import MongoClient, version as pymongo_version, version_tuple +from pymongo.errors import OperationFailure + +import unittest + + +def _check_handshake_data(request): + assert 'client' in request + data = request['client'] + + assert data['application'] == {'name': 'my app'} + assert data['driver'] == {'name': 'PyMongo', 'version': pymongo_version} + + # Keep it simple, just check these fields exist. + assert 'os' in data + assert 'platform' in data + + +class TestHandshake(unittest.TestCase): + @unittest.skipUnless(version_tuple >= (3, 4), "requires PyMongo 3.4") + def test_client_handshake_data(self): + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in (primary, secondary)] + primary_response = OpReply('ismaster', True, + setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + error_response = OpReply( + 0, errmsg='Cache Reader No keys found for HMAC ...', code=211) + + secondary_response = OpReply('ismaster', False, + setName='rs', hosts=hosts, + secondary=True, + minWireVersion=2, maxWireVersion=6) + + client = MongoClient(primary.uri, + replicaSet='rs', + appname='my app', + heartbeatFrequencyMS=500) # Speed up the test. + + self.addCleanup(client.close) + + # New monitoring sockets send data during handshake. + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + + # Subsequent heartbeats have no client data. + primary.receives('ismaster', 1, client=absent).ok(error_response) + secondary.receives('ismaster', 1, client=absent).ok(error_response) + + # PyMongo 3.11+ closes the monitoring connection on command errors. + if version_tuple >= (3, 11, -1): + # The heartbeat retry (on a new connection) does have client data. + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) + else: + # The heartbeat retry has no client data after a command failure. + primary.receives('ismaster', 1, client=absent).ok(error_response) + secondary.receives('ismaster', 1, client=absent).ok(error_response) + + # Still no client data. + primary.receives('ismaster', 1, client=absent).ok(primary_response) + secondary.receives('ismaster', 1, client=absent).ok(secondary_response) + + # After a disconnect, next ismaster has client data again. + primary.receives('ismaster', 1, client=absent).hangup() + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + secondary.autoresponds('ismaster', secondary_response) + + # Start a command, so the client opens an application socket. + future = go(client.db.command, 'whatever') + + for request in primary: + if request.matches(Command('ismaster')): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket. + _check_handshake_data(request) + request.ok(primary_response) + else: + # Command succeeds. + if version_tuple >= (3, 7): + request.assert_matches(OpMsg('whatever')) + else: + request.assert_matches(Command('whatever')) + request.ok() + assert future() + return + + @unittest.skipUnless(version_tuple >= (3, 11, -1), "requires PyMongo 3.11") + def test_client_handshake_saslSupportedMechs(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + primary_response = OpReply('ismaster', True, + minWireVersion=2, maxWireVersion=6) + client = MongoClient(server.uri, + username='username', + password='password') + + self.addCleanup(client.close) + + # New monitoring sockets send data during handshake. + heartbeat = server.receives('ismaster') + heartbeat.ok(primary_response) + + future = go(client.db.command, 'whatever') + for request in server: + if request.matches('ismaster'): + if request.client_port == heartbeat.client_port: + # This is the monitor again, keep going. + request.ok(primary_response) + else: + # Handshaking a new application socket should send + # saslSupportedMechs and speculativeAuthenticate. + self.assertEqual(request['saslSupportedMechs'], + 'admin.username') + self.assertIn( + 'saslStart', request['speculativeAuthenticate']) + auth = {'conversationId': 1, 'done': False, + 'payload': b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' + b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' + b'tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei' + b'PHrSmh4uhkg==,i=15000'} + request.ok('ismaster', True, + saslSupportedMechs=['SCRAM-SHA-256'], + speculativeAuthenticate=auth, + minWireVersion=2, maxWireVersion=6) + # Authentication should immediately fail with: + # OperationFailure: Server returned an invalid nonce. + with self.assertRaises(OperationFailure): + future() + return + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py new file mode 100644 index 0000000000..c67fcbf9e1 --- /dev/null +++ b/test/mockupdb/test_initial_ismaster.py @@ -0,0 +1,46 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +from mockupdb import MockupDB, wait_until +from pymongo import MongoClient + +import unittest + + +class TestInitialIsMaster(unittest.TestCase): + def test_initial_ismaster(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + start = time.time() + client = MongoClient(server.uri) + self.addCleanup(client.close) + + # A single ismaster is enough for the client to be connected. + self.assertFalse(client.nodes) + server.receives('ismaster').ok(ismaster=True, + minWireVersion=2, maxWireVersion=6) + wait_until(lambda: client.nodes, + 'update nodes', timeout=1) + + # At least 10 seconds before next heartbeat. + server.receives('ismaster').ok(ismaster=True, + minWireVersion=2, maxWireVersion=6) + self.assertGreaterEqual(time.time() - start, 10) + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_legacy_crud.py b/test/mockupdb/test_legacy_crud.py new file mode 100755 index 0000000000..508313dbbd --- /dev/null +++ b/test/mockupdb/test_legacy_crud.py @@ -0,0 +1,126 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from bson.son import SON +from mockupdb import (MockupDB, going, OpInsert, OpMsg, absent, Command, + OP_MSG_FLAGS) +from pymongo import MongoClient, WriteConcern, version_tuple + +import unittest + + +class TestLegacyCRUD(unittest.TestCase): + def test_op_insert_manipulate_false(self): + # Test three aspects of legacy insert with manipulate=False: + # 1. The return value is None, [None], or [None, None] as appropriate. + # 2. _id is not set on the passed-in document object. + # 3. _id is not sent to server. + server = MockupDB(auto_ismaster=True) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + coll = client.db.get_collection('coll', write_concern=WriteConcern(w=0)) + doc = {} + with going(coll.insert, doc, manipulate=False) as future: + if version_tuple >= (3, 7): + server.receives(OpMsg(SON([ + ("insert", coll.name), + ("ordered", True), + ("writeConcern", {"w": 0}), + ("documents", [{}])]), flags=OP_MSG_FLAGS['moreToCome'])) + else: + server.receives(OpInsert({'_id': absent})) + + self.assertFalse('_id' in doc) + self.assertIsNone(future()) + + docs = [{}] # One doc in a list. + with going(coll.insert, docs, manipulate=False) as future: + if version_tuple >= (3, 7): + # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. + request = server.receives() + request.assert_matches(OpMsg(SON([ + ("insert", coll.name), + ("ordered", True), + ("documents", [{}])]), flags=0)) + request.reply({"n": 1}) + else: + server.receives(OpInsert({'_id': absent})) + + self.assertFalse('_id' in docs[0]) + self.assertEqual(future(), [None]) + + docs = [{}, {}] # Two docs. + with going(coll.insert, docs, manipulate=False) as future: + if version_tuple >= (3, 7): + # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. + request = server.receives() + request.assert_matches(OpMsg(SON([ + ("insert", coll.name), + ("ordered", True), + ("documents", [{}, {}])]), flags=0)) + request.reply({"n": 2}) + else: + server.receives(OpInsert({'_id': absent}, {'_id': absent})) + + self.assertFalse('_id' in docs[0]) + self.assertFalse('_id' in docs[1]) + self.assertEqual(future(), [None, None]) + + def test_insert_command_manipulate_false(self): + # Test same three aspects as test_op_insert_manipulate_false does, + # with the "insert" command. + server = MockupDB(auto_ismaster={'maxWireVersion': 2}) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + doc = {} + with going(client.db.coll.insert, doc, manipulate=False) as future: + r = server.receives(Command("insert", "coll", documents=[{}])) + # MockupDB doesn't understand "absent" in subdocuments yet. + self.assertFalse('_id' in r.doc['documents'][0]) + r.ok() + + self.assertFalse('_id' in doc) + self.assertIsNone(future()) + + docs = [{}] # One doc in a list. + with going(client.db.coll.insert, docs, manipulate=False) as future: + r = server.receives(Command("insert", "coll", documents=[{}])) + self.assertFalse('_id' in r.doc['documents'][0]) + r.ok() + + self.assertFalse('_id' in docs[0]) + self.assertEqual(future(), [None]) + + docs = [{}, {}] # Two docs. + with going(client.db.coll.insert, docs, manipulate=False) as future: + r = server.receives(Command("insert", "coll", documents=[{}, {}])) + self.assertFalse('_id' in r.doc['documents'][0]) + self.assertFalse('_id' in r.doc['documents'][1]) + r.ok() + + self.assertFalse('_id' in docs[0]) + self.assertFalse('_id' in docs[1]) + self.assertEqual(future(), [None, None]) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py new file mode 100644 index 0000000000..7483e80df2 --- /dev/null +++ b/test/mockupdb/test_list_indexes.py @@ -0,0 +1,78 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test list_indexes with more than one batch.""" + +from bson import SON + +from mockupdb import going, MockupDB, OpGetMore +from pymongo import MongoClient + +import unittest + + +class TestListIndexes(unittest.TestCase): + def test_list_indexes_opquery(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 3}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + with going(client.test.collection.list_indexes) as cursor: + request = server.receives( + listIndexes='collection', namespace='test') + request.reply({'cursor': { + 'firstBatch': [{'name': 'index_0'}], + 'id': 123}}) + + with going(list, cursor()) as indexes: + request = server.receives(OpGetMore, + namespace='test.collection', + cursor_id=123) + + request.reply([{'name': 'index_1'}], cursor_id=0) + + self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) + for index_info in indexes(): + self.assertIsInstance(index_info, SON) + + def test_list_indexes_command(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + with going(client.test.collection.list_indexes) as cursor: + request = server.receives( + listIndexes='collection', namespace='test') + request.reply({'cursor': { + 'firstBatch': [{'name': 'index_0'}], + 'id': 123}}) + + with going(list, cursor()) as indexes: + request = server.receives(getMore=123, + namespace='test', + collection='collection') + + request.reply({'cursor': { + 'nextBatch': [{'name': 'index_1'}], + 'id': 0}}) + + self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) + for index_info in indexes(): + self.assertIsInstance(index_info, SON) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py new file mode 100644 index 0000000000..89d17a133f --- /dev/null +++ b/test/mockupdb/test_max_staleness.py @@ -0,0 +1,67 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mockupdb import MockupDB, going +from pymongo import MongoClient + +import unittest + + +class TestMaxStalenessMongos(unittest.TestCase): + def test_mongos(self): + mongos = MockupDB() + mongos.autoresponds('ismaster', maxWireVersion=5, + ismaster=True, msg='isdbgrid') + mongos.run() + self.addCleanup(mongos.stop) + + # No maxStalenessSeconds. + uri = 'mongodb://localhost:%d/?readPreference=secondary' % mongos.port + + client = MongoClient(uri) + self.addCleanup(client.close) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertNotIn( + 'maxStalenessSeconds', + request.doc['$readPreference']) + + self.assertTrue(request.slave_okay) + request.ok(cursor={'firstBatch': [], 'id': 0}) + + # find_one succeeds with no result. + self.assertIsNone(future()) + + # Set maxStalenessSeconds to 1. Client has no minimum with mongos, + # we let mongos enforce the 90-second minimum and return an error: + # SERVER-27146. + uri = 'mongodb://localhost:%d/?readPreference=secondary' \ + '&maxStalenessSeconds=1' % mongos.port + + client = MongoClient(uri) + self.addCleanup(client.close) + with going(client.db.coll.find_one) as future: + request = mongos.receives() + self.assertEqual( + 1, + request.doc['$readPreference']['maxStalenessSeconds']) + + self.assertTrue(request.slave_okay) + request.ok(cursor={'firstBatch': [], 'id': 0}) + + self.assertIsNone(future()) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py new file mode 100644 index 0000000000..9f57dd0f43 --- /dev/null +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -0,0 +1,89 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo with a mixed-version cluster.""" + +import time + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, go +from pymongo import MongoClient + +import unittest +from operations import upgrades + + +class TestMixedVersionSharded(unittest.TestCase): + def setup_server(self, upgrade): + self.mongos_old, self.mongos_new = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q = Queue() + for server in self.mongos_old, self.mongos_new: + server.subscribe(self.q.put) + server.autoresponds('getlasterror') + server.run() + self.addCleanup(server.stop) + + # Max wire version is too old for the upgraded operation. + self.mongos_old.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + maxWireVersion=upgrade.wire_version - 1) + + # Up-to-date max wire version. + self.mongos_new.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + maxWireVersion=upgrade.wire_version) + + self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos_old.address_string, + self.mongos_new.address_string) + + self.client = MongoClient(self.mongoses_uri) + + def tearDown(self): + if hasattr(self, 'client') and self.client: + self.client.close() + + +def create_mixed_version_sharded_test(upgrade): + def test(self): + self.setup_server(upgrade) + start = time.time() + servers_used = set() + while len(servers_used) < 2: + go(upgrade.function, self.client) + request = self.q.get(timeout=1) + servers_used.add(request.server) + request.assert_matches(upgrade.old + if request.server is self.mongos_old + else upgrade.new) + if time.time() > start + 10: + self.fail('never used both mongoses') + return test + + +def generate_mixed_version_sharded_tests(): + for upgrade in upgrades: + test = create_mixed_version_sharded_test(upgrade) + test_name = 'test_%s' % upgrade.name.replace(' ', '_') + test.__name__ = test_name + setattr(TestMixedVersionSharded, test_name, test) + + +generate_mixed_version_sharded_tests() + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py new file mode 100644 index 0000000000..1fe2ea5869 --- /dev/null +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -0,0 +1,125 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +from bson import SON +from mockupdb import MockupDB, going +from pymongo import MongoClient, ReadPreference +from pymongo.read_preferences import (make_read_preference, + read_pref_mode_from_name, + _MONGOS_MODES) + +import unittest +from operations import operations + + +class TestMongosCommandReadMode(unittest.TestCase): + def test_aggregate(self): + server = MockupDB() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=5) + self.addCleanup(server.stop) + server.run() + + client = MongoClient(server.uri) + self.addCleanup(client.close) + collection = client.test.collection + with going(collection.aggregate, []): + command = server.receives(aggregate='collection', pipeline=[]) + self.assertFalse(command.slave_ok, 'SlaveOkay set') + self.assertNotIn('$readPreference', command) + command.ok(result=[{}]) + + secondary_collection = collection.with_options( + read_preference=ReadPreference.SECONDARY) + + with going(secondary_collection.aggregate, []): + command = server.receives( + {'$query': SON([('aggregate', 'collection'), + ('pipeline', []), + ('cursor', {})]), + '$readPreference': {'mode': 'secondary'}}) + command.ok(result=[{}]) + self.assertTrue(command.slave_ok, 'SlaveOkay not set') + + +def create_mongos_read_mode_test(mode, operation): + def test(self): + server = MockupDB() + self.addCleanup(server.stop) + server.run() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=5) + + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = MongoClient(server.uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client) as future: + request = server.receive() + request.reply(operation.reply) + + future() # No error. + + if operation.op_type == 'always-use-secondary': + self.assertEqual(ReadPreference.SECONDARY.document, + request.doc.get('$readPreference')) + slave_ok = mode != 'primary' + self.assertIn('$query', request.doc) + elif operation.op_type == 'must-use-primary': + self.assertNotIn('$readPreference', request) + self.assertNotIn('$query', request.doc) + slave_ok = False + elif operation.op_type == 'may-use-secondary': + slave_ok = mode != 'primary' + if mode in ('primary', 'secondaryPreferred'): + self.assertNotIn('$readPreference', request) + self.assertNotIn('$query', request.doc) + else: + self.assertEqual(pref.document, + request.doc.get('$readPreference')) + self.assertIn('$query', request.doc) + else: + self.fail('unrecognized op_type %r' % operation.op_type) + + if slave_ok: + self.assertTrue(request.slave_ok, 'SlaveOkay not set') + else: + self.assertFalse(request.slave_ok, 'SlaveOkay set') + + return test + + +def generate_mongos_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + if mode == 'primary' and operation.op_type == 'always-use-secondary': + # Skip something like command('foo', read_preference=SECONDARY). + continue + test = create_mongos_read_mode_test(mode, operation) + test_name = 'test_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), mode) + test.__name__ = test_name + setattr(TestMongosCommandReadMode, test_name, test) + + +generate_mongos_read_mode_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py new file mode 100755 index 0000000000..1df5febb78 --- /dev/null +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -0,0 +1,87 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, wait_until, OpReply, going, Future +from pymongo.errors import ConnectionFailure +from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo import MongoClient + +import unittest + + +class TestNetworkDisconnectPrimary(unittest.TestCase): + def test_network_disconnect_primary(self): + # Application operation fails against primary. Test that topology + # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. + # http://bit.ly/1B5ttuL + primary, secondary = servers = [MockupDB() for _ in range(2)] + for server in servers: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string for server in servers] + primary_response = OpReply(ismaster=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + primary.autoresponds('ismaster', primary_response) + secondary.autoresponds( + 'ismaster', + ismaster=False, secondary=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + + client = MongoClient(primary.uri, replicaSet='rs') + self.addCleanup(client.close) + wait_until(lambda: client.primary == primary.address, + 'discover primary') + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, + topology.description.topology_type) + + # Open a socket in the application pool (calls ismaster). + with going(client.db.command, 'buildinfo'): + primary.receives('buildinfo').ok() + + # The primary hangs replying to ismaster. + ismaster_future = Future() + primary.autoresponds('ismaster', + lambda r: r.ok(ismaster_future.result())) + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(client.db.command, 'buildinfo'): + primary.receives('buildinfo').hangup() + + # Topology type is updated. + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, + topology.description.topology_type) + + # Let ismasters through again. + ismaster_future.set_result(primary_response) + + # Demand a primary. + with going(client.db.command, 'buildinfo'): + wait_until(lambda: client.primary == primary.address, + 'rediscover primary') + primary.receives('buildinfo').ok() + + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, + topology.description.topology_type) + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py new file mode 100755 index 0000000000..dc574226bc --- /dev/null +++ b/test/mockupdb/test_op_msg.py @@ -0,0 +1,340 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple + +from mockupdb import MockupDB, going, OpMsg, OpMsgReply, OP_MSG_FLAGS +from pymongo import MongoClient, WriteConcern, version_tuple +from pymongo.operations import InsertOne, UpdateOne, DeleteOne +from pymongo.cursor import CursorType + +import unittest + + +Operation = namedtuple( + 'Operation', + ['name', 'function', 'request', 'reply']) + +operations = [ + Operation( + 'find_one', + lambda coll: coll.find_one({}), + request=OpMsg({"find": "coll"}, flags=0), + reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + Operation( + 'aggregate', + lambda coll: coll.aggregate([]), + request=OpMsg({"aggregate": "coll"}, flags=0), + reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + Operation( + 'insert_one', + lambda coll: coll.insert_one({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'insert_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'insert_many', + lambda coll: coll.insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 3}), + Operation( + 'insert_many-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 3}), + Operation( + 'insert_many-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'replace_one', + lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'replace_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).replace_one({"_id": 1}, + {"new": 1}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update_one', + lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'replace_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).update_one({"_id": 1}, + {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update_many', + lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'update_many-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).update_many({"_id": 1}, + {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'delete_one', + lambda coll: coll.delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'delete_one-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'delete_many', + lambda coll: coll.delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'delete_many-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + # Legacy methods + Operation( + 'insert', + lambda coll: coll.insert({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'insert-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).insert({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'insert-w0-argument', + lambda coll: coll.insert({}, w=0), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update', + lambda coll: coll.update({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 1, 'nModified': 1}), + Operation( + 'update-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).update({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'update-w0-argument', + lambda coll: coll.update({"_id": 1}, {"new": 1}, w=0), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'remove', + lambda coll: coll.remove({"_id": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 1}), + Operation( + 'remove-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).remove({"_id": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'remove-w0-argument', + lambda coll: coll.remove({"_id": 1}, w=0), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'bulk_write_insert', + lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_insert-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([InsertOne({}), + InsertOne({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_insert-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne({}), InsertOne({})], ordered=False), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'bulk_write_update', + lambda coll: coll.bulk_write([ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 2, 'nModified': 2}), + Operation( + 'bulk_write_update-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + request=OpMsg({"update": "coll"}, flags=0), + reply={'ok': 1, 'n': 2, 'nModified': 2}), + Operation( + 'bulk_write_update-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}})], ordered=False), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), + Operation( + 'bulk_write_delete', + lambda coll: coll.bulk_write([ + DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_delete-w0', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={'ok': 1, 'n': 2}), + Operation( + 'bulk_write_delete-w0-unordered', + lambda coll: coll.with_options( + write_concern=WriteConcern(w=0)).bulk_write([ + DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), + reply=None), +] + +operations_312 = [ + Operation( + 'find_raw_batches', + lambda coll: list(coll.find_raw_batches({})), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, + ]), + Operation( + 'aggregate_raw_batches', + lambda coll: list(coll.aggregate_raw_batches([])), + request=[ + OpMsg({"aggregate": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {'ok': 1, 'cursor': {'firstBatch': [], 'id': 7}}, + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, + ]), + Operation( + 'find_exhaust_cursor', + lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=1 << 16), + ], + reply=[ + OpMsgReply( + {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, flags=0), + OpMsgReply( + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), + OpMsgReply( + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), + OpMsgReply( + {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, flags=0), + ]), +] + + +class TestOpMsg(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.server = MockupDB(auto_ismaster=True, max_wire_version=8) + cls.server.run() + cls.client = MongoClient(cls.server.uri) + + @classmethod + def tearDownClass(cls): + cls.server.stop() + cls.client.close() + + def _test_operation(self, op): + coll = self.client.db.coll + with going(op.function, coll) as future: + expected_requests = op.request + replies = op.reply + if not isinstance(op.request, list): + expected_requests = [op.request] + replies = [op.reply] + + for expected_request in expected_requests: + request = self.server.receives() + request.assert_matches(expected_request) + reply = None + if replies: + reply = replies.pop(0) + if reply is not None: + request.reply(reply) + for reply in replies: + if reply is not None: + request.reply(reply) + + future() # No error. + + +def operation_test(op, decorator): + @decorator() + def test(self): + self._test_operation(op) + return test + + +def create_tests(ops, decorator): + for op in ops: + test_name = "test_op_msg_%s" % (op.name,) + setattr(TestOpMsg, test_name, operation_test(op, decorator)) + + +create_tests(operations, lambda: unittest.skipUnless( + version_tuple >= (3, 7), "requires PyMongo 3.7")) + +create_tests(operations_312, lambda: unittest.skipUnless( + version_tuple >= (3, 12), "requires PyMongo 3.12")) + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py new file mode 100644 index 0000000000..925a00f6a5 --- /dev/null +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -0,0 +1,197 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import itertools + +from mockupdb import MockupDB, going, CommandBase +from pymongo import MongoClient, ReadPreference, version_tuple +from pymongo.read_preferences import (make_read_preference, + read_pref_mode_from_name, + _MONGOS_MODES) + +import unittest +from operations import operations + + +class OpMsgReadPrefBase(unittest.TestCase): + single_mongod = False + + @classmethod + def setUpClass(cls): + super(OpMsgReadPrefBase, cls).setUpClass() + if version_tuple < (3, 7): + raise unittest.SkipTest("requires PyMongo 3.7") + + @classmethod + def add_test(cls, mode, test_name, test): + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = MongoClient(self.primary.uri, + read_preference=read_preference) + self.addCleanup(client.close) + return client + + +class TestOpMsgMongos(OpMsgReadPrefBase): + + @classmethod + def setUpClass(cls): + super(TestOpMsgMongos, cls).setUpClass() + auto_ismaster = { + 'ismaster': True, + 'msg': 'isdbgrid', # Mongos. + 'minWireVersion': 2, + 'maxWireVersion': 6, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super(TestOpMsgMongos, cls).tearDownClass() + + +class TestOpMsgReplicaSet(OpMsgReadPrefBase): + + @classmethod + def setUpClass(cls): + super(TestOpMsgReplicaSet, cls).setUpClass() + cls.primary, cls.secondary = MockupDB(), MockupDB() + for server in cls.primary, cls.secondary: + server.run() + + hosts = [server.address_string + for server in (cls.primary, cls.secondary)] + + primary_ismaster = { + 'ismaster': True, + 'setName': 'rs', + 'hosts': hosts, + 'minWireVersion': 2, + 'maxWireVersion': 6, + } + cls.primary.autoresponds(CommandBase('ismaster'), primary_ismaster) + secondary_ismaster = copy.copy(primary_ismaster) + secondary_ismaster['ismaster'] = False + secondary_ismaster['secondary'] = True + cls.secondary.autoresponds(CommandBase('ismaster'), secondary_ismaster) + + @classmethod + def tearDownClass(cls): + for server in cls.primary, cls.secondary: + server.stop() + super(TestOpMsgReplicaSet, cls).tearDownClass() + + @classmethod + def add_test(cls, mode, test_name, test): + # Skip nearest tests since we don't know if we will select the primary + # or secondary. + if mode != 'nearest': + setattr(cls, test_name, test) + + def setup_client(self, read_preference): + client = MongoClient(self.primary.uri, + replicaSet='rs', + read_preference=read_preference) + + # Run a command on a secondary to discover the topology. This ensures + # that secondaryPreferred commands will select the secondary. + client.admin.command('ismaster', + read_preference=ReadPreference.SECONDARY) + self.addCleanup(client.close) + return client + + +class TestOpMsgSingle(OpMsgReadPrefBase): + single_mongod = True + + @classmethod + def setUpClass(cls): + super(TestOpMsgSingle, cls).setUpClass() + auto_ismaster = { + 'ismaster': True, + 'minWireVersion': 2, + 'maxWireVersion': 6, + } + cls.primary = MockupDB(auto_ismaster=auto_ismaster) + cls.primary.run() + cls.secondary = cls.primary + + @classmethod + def tearDownClass(cls): + cls.primary.stop() + super(TestOpMsgSingle, cls).tearDownClass() + + +def create_op_msg_read_mode_test(mode, operation): + def test(self): + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = self.setup_client(read_preference=pref) + + if operation.op_type == 'always-use-secondary': + expected_server = self.secondary + expected_pref = ReadPreference.SECONDARY + elif operation.op_type == 'must-use-primary': + expected_server = self.primary + expected_pref = ReadPreference.PRIMARY + elif operation.op_type == 'may-use-secondary': + if mode in ('primary', 'primaryPreferred'): + expected_server = self.primary + else: + expected_server = self.secondary + expected_pref = pref + else: + self.fail('unrecognized op_type %r' % operation.op_type) + + # For single mongod we send primaryPreferred instead of primary. + if expected_pref == ReadPreference.PRIMARY and self.single_mongod: + expected_pref = ReadPreference.PRIMARY_PREFERRED + + with going(operation.function, client) as future: + request = expected_server.receive() + request.reply(operation.reply) + + future() # No error. + + self.assertEqual(expected_pref.document, + request.doc.get('$readPreference')) + self.assertNotIn('$query', request.doc) + + return test + + +def generate_op_msg_read_mode_tests(): + matrix = itertools.product(_MONGOS_MODES, operations) + + for entry in matrix: + mode, operation = entry + test = create_op_msg_read_mode_test(mode, operation) + test_name = 'test_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), mode) + test.__name__ = test_name + for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: + cls.add_test(mode, test_name, test) + + +generate_op_msg_read_mode_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_projection.py b/test/mockupdb/test_projection.py new file mode 100644 index 0000000000..0b74c22cbd --- /dev/null +++ b/test/mockupdb/test_projection.py @@ -0,0 +1,56 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""PyMongo shouldn't append projection fields to "find" command, PYTHON-1479.""" + +from bson import SON +from mockupdb import Command, MockupDB, OpQuery, going +from pymongo import MongoClient + +import unittest + + +class TestProjection(unittest.TestCase): + def test_projection(self): + q = {} + fields = {'foo': True} + + # OP_QUERY, + server = MockupDB(auto_ismaster=True, + min_wire_version=0, max_wire_version=3) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + cursor = client.test.collection.find(q, fields) + with going(next, cursor): + request = server.receives(OpQuery(q, fields=fields)) + request.reply([], cursor_id=0) + + # "find" command. + server = MockupDB(auto_ismaster=True, + min_wire_version=0, max_wire_version=4) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + cursor = client.test.collection.find(q, fields) + cmd = Command(SON([('find', 'collection'), ('filter', q), + ('projection', fields)])) + + with going(next, cursor): + request = server.receives(cmd) + request.ok(cursor={'id': 0, 'firstBatch': []}) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py new file mode 100644 index 0000000000..033cdeff19 --- /dev/null +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -0,0 +1,107 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo query and read preference with a sharded cluster.""" + +from bson import SON +from pymongo import MongoClient, version_tuple +from pymongo.read_preferences import (Primary, + PrimaryPreferred, + Secondary, + SecondaryPreferred, + Nearest) +from mockupdb import MockupDB, going, Command, OpMsg + +import unittest + + +class TestQueryAndReadModeSharded(unittest.TestCase): + def test_query_and_read_mode_sharded_op_query(self): + server = MockupDB() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=5) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + modes_without_query = ( + Primary(), + SecondaryPreferred(),) + + modes_with_query = ( + PrimaryPreferred(), + Secondary(), + Nearest(), + SecondaryPreferred([{'tag': 'value'}]),) + + find_command = SON([('find', 'test'), ('filter', {'a': 1})]) + for query in ({'a': 1}, {'$query': {'a': 1}},): + for mode in modes_with_query + modes_without_query: + collection = client.db.get_collection('test', + read_preference=mode) + cursor = collection.find(query.copy()) + with going(next, cursor): + request = server.receives() + if mode in modes_without_query: + # Filter is hoisted out of $query. + request.assert_matches(Command(find_command)) + self.assertFalse('$readPreference' in request) + else: + # Command is nested in $query. + request.assert_matches(Command( + SON([('$query', find_command), + ('$readPreference', mode.document)]))) + + request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) + + @unittest.skipUnless(version_tuple >= (3, 7), "requires PyMongo 3.7") + def test_query_and_read_mode_sharded_op_msg(self): + """Test OP_MSG sends non-primary $readPreference and never $query.""" + server = MockupDB() + server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', + minWireVersion=2, maxWireVersion=6) + server.run() + self.addCleanup(server.stop) + + client = MongoClient(server.uri) + self.addCleanup(client.close) + + read_prefs = ( + Primary(), + SecondaryPreferred(), + PrimaryPreferred(), + Secondary(), + Nearest(), + SecondaryPreferred([{'tag': 'value'}]),) + + for query in ({'a': 1}, {'$query': {'a': 1}},): + for mode in read_prefs: + collection = client.db.get_collection('test', + read_preference=mode) + cursor = collection.find(query.copy()) + with going(next, cursor): + request = server.receives() + # Command is not nested in $query. + request.assert_matches(OpMsg( + SON([('find', 'test'), + ('filter', {'a': 1}), + ('$readPreference', mode.document)]))) + + request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py new file mode 100755 index 0000000000..27b55f3180 --- /dev/null +++ b/test/mockupdb/test_reset_and_request_check.py @@ -0,0 +1,145 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import itertools + +from mockupdb import MockupDB, going, wait_until +from pymongo.server_type import SERVER_TYPE +from pymongo.errors import ConnectionFailure +from pymongo import MongoClient, version_tuple + +import unittest +from operations import operations + + +class TestResetAndRequestCheck(unittest.TestCase): + def __init__(self, *args, **kwargs): + super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) + self.ismaster_time = 0 + self.client = None + self.server = None + + def setup_server(self): + self.server = MockupDB() + + def responder(request): + self.ismaster_time = time.time() + return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + + self.server.autoresponds('ismaster', responder) + self.server.run() + self.addCleanup(self.server.stop) + + kwargs = {'socketTimeoutMS': 100} + # Disable retryable reads when pymongo supports it. + if version_tuple[:3] >= (3, 9): + kwargs['retryReads'] = False + self.client = MongoClient(self.server.uri, **kwargs) + wait_until(lambda: self.client.nodes, 'connect to standalone') + + def tearDown(self): + if hasattr(self, 'client') and self.client: + self.client.close() + + def _test_disconnect(self, operation): + # Application operation fails. Test that client resets server + # description and does *not* schedule immediate check. + self.setup_server() + + # Network error on application operation. + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives().hangup() + + # Server is Unknown. + topology = self.client._topology + with self.assertRaises(ConnectionFailure): + topology.select_server_by_address(self.server.address, 0) + + time.sleep(0.5) + after = time.time() + + # Demand a reconnect. + with going(self.client.db.command, 'buildinfo'): + self.server.receives('buildinfo').ok() + + last = self.ismaster_time + self.assertGreaterEqual(last, after, 'called ismaster before needed') + + def _test_timeout(self, operation): + # Application operation times out. Test that client does *not* reset + # server description and does *not* schedule immediate check. + self.setup_server() + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives() + before = self.ismaster_time + time.sleep(0.5) + + # Server is *not* Unknown. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, 0) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertEqual(after, before, 'unneeded ismaster call') + + def _test_not_master(self, operation): + # Application operation gets a "not master" error. + self.setup_server() + + with self.assertRaises(ConnectionFailure): + with going(operation.function, self.client): + self.server.receives().replies(operation.not_master) + before = self.ismaster_time + time.sleep(1) + + # Server is rediscovered. + topology = self.client._topology + server = topology.select_server_by_address(self.server.address, 0) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) + + after = self.ismaster_time + self.assertGreater(after, before, 'ismaster not called') + + +def create_reset_test(operation, test_method): + def test(self): + test_method(self, operation) + + return test + + +def generate_reset_tests(): + test_methods = [ + (TestResetAndRequestCheck._test_disconnect, 'test_disconnect'), + (TestResetAndRequestCheck._test_timeout, 'test_timeout'), + (TestResetAndRequestCheck._test_not_master, 'test_not_master'), + ] + + matrix = itertools.product(operations, test_methods) + + for entry in matrix: + operation, (test_method, name) = entry + test = create_reset_test(operation, test_method) + test_name = '%s_%s' % (name, operation.name.replace(' ', '_')) + test.__name__ = test_name + setattr(TestResetAndRequestCheck, test_name, test) + +generate_reset_tests() + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py new file mode 100644 index 0000000000..5ff6fced4e --- /dev/null +++ b/test/mockupdb/test_slave_okay_rs.py @@ -0,0 +1,79 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with a replica set connection. + +Just make sure SlaveOkay is *not* set on primary reads. +""" + +from mockupdb import MockupDB, going +from pymongo import MongoClient + +import unittest +from operations import operations + + +class TestSlaveOkayRS(unittest.TestCase): + def setup_server(self): + self.primary, self.secondary = MockupDB(), MockupDB() + for server in self.primary, self.secondary: + server.run() + self.addCleanup(server.stop) + + hosts = [server.address_string + for server in (self.primary, self.secondary)] + self.primary.autoresponds( + 'ismaster', + ismaster=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + self.secondary.autoresponds( + 'ismaster', + ismaster=False, secondary=True, setName='rs', hosts=hosts, + minWireVersion=2, maxWireVersion=6) + + +def create_slave_ok_rs_test(operation): + def test(self): + self.setup_server() + assert not operation.op_type == 'always-use-secondary' + + client = MongoClient(self.primary.uri, replicaSet='rs') + self.addCleanup(client.close) + with going(operation.function, client): + request = self.primary.receive() + request.reply(operation.reply) + + self.assertFalse(request.slave_ok, 'SlaveOkay set read mode "primary"') + + return test + + +def generate_slave_ok_rs_tests(): + for operation in operations: + # Don't test secondary operations with MockupDB, the server enforces the + # SlaveOkay bit so integration tests prove we set it. + if operation.op_type == 'always-use-secondary': + continue + test = create_slave_ok_rs_test(operation) + + test_name = 'test_%s' % operation.name.replace(' ', '_') + test.__name__ = test_name + setattr(TestSlaveOkayRS, test_name, test) + + +generate_slave_ok_rs_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py new file mode 100644 index 0000000000..719de57553 --- /dev/null +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -0,0 +1,101 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" +import itertools + +from pymongo.read_preferences import make_read_preference +from pymongo.read_preferences import read_pref_mode_from_name + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from mockupdb import MockupDB, going +from pymongo import MongoClient + +import unittest +from operations import operations + + +class TestSlaveOkaySharded(unittest.TestCase): + def setup_server(self): + self.mongos1, self.mongos2 = MockupDB(), MockupDB() + + # Collect queries to either server in one queue. + self.q = Queue() + for server in self.mongos1, self.mongos2: + server.subscribe(self.q.put) + server.run() + self.addCleanup(server.stop) + server.autoresponds('ismaster', minWireVersion=2, maxWireVersion=6, + ismaster=True, msg='isdbgrid') + + self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos1.address_string, + self.mongos2.address_string) + + +def create_slave_ok_sharded_test(mode, operation): + def test(self): + self.setup_server() + if operation.op_type == 'always-use-secondary': + slave_ok = True + elif operation.op_type == 'may-use-secondary': + slave_ok = mode != 'primary' + elif operation.op_type == 'must-use-primary': + slave_ok = False + else: + assert False, 'unrecognized op_type %r' % operation.op_type + + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = MongoClient(self.mongoses_uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.q.get(timeout=1) + request.reply(operation.reply) + + if slave_ok: + self.assertTrue(request.slave_ok, 'SlaveOkay not set') + else: + self.assertFalse(request.slave_ok, 'SlaveOkay set') + + return test + + +def generate_slave_ok_sharded_tests(): + modes = 'primary', 'secondary', 'nearest' + matrix = itertools.product(modes, operations) + + for entry in matrix: + mode, operation = entry + test = create_slave_ok_sharded_test(mode, operation) + test_name = 'test_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), mode) + + test.__name__ = test_name + setattr(TestSlaveOkaySharded, test_name, test) + + +generate_slave_ok_sharded_tests() + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py new file mode 100644 index 0000000000..4a0725a869 --- /dev/null +++ b/test/mockupdb/test_slave_okay_single.py @@ -0,0 +1,104 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo's SlaveOkay with: + +- A direct connection to a standalone. +- A direct connection to a slave. +- A direct connection to a mongos. +""" + +import itertools + +from mockupdb import MockupDB, going +from pymongo import MongoClient +from pymongo.read_preferences import (make_read_preference, + read_pref_mode_from_name) +from pymongo.topology_description import TOPOLOGY_TYPE + +import unittest +from operations import operations + + +def topology_type_name(client): + topology_type = client._topology._description.topology_type + return TOPOLOGY_TYPE._fields[topology_type] + + +class TestSlaveOkaySingle(unittest.TestCase): + def setUp(self): + self.server = MockupDB() + self.server.run() + self.addCleanup(self.server.stop) + + +def create_slave_ok_single_test(mode, server_type, ismaster, operation): + def test(self): + ismaster_with_version = ismaster.copy() + ismaster_with_version['minWireVersion'] = 2 + ismaster_with_version['maxWireVersion'] = 6 + self.server.autoresponds('ismaster', **ismaster_with_version) + if operation.op_type == 'always-use-secondary': + slave_ok = True + elif operation.op_type == 'may-use-secondary': + slave_ok = mode != 'primary' or server_type != 'mongos' + elif operation.op_type == 'must-use-primary': + slave_ok = server_type != 'mongos' + else: + assert False, 'unrecognized op_type %r' % operation.op_type + + pref = make_read_preference(read_pref_mode_from_name(mode), + tag_sets=None) + + client = MongoClient(self.server.uri, read_preference=pref) + self.addCleanup(client.close) + with going(operation.function, client): + request = self.server.receive() + request.reply(operation.reply) + + self.assertEqual(topology_type_name(client), 'Single') + if slave_ok: + self.assertTrue(request.slave_ok, 'SlaveOkay not set') + else: + self.assertFalse(request.slave_ok, 'SlaveOkay set') + + return test + + +def generate_slave_ok_single_tests(): + modes = 'primary', 'secondary', 'nearest' + server_types = [ + ('standalone', {'ismaster': True}), + ('slave', {'ismaster': False}), + ('mongos', {'ismaster': True, 'msg': 'isdbgrid'})] + + matrix = itertools.product(modes, server_types, operations) + + for entry in matrix: + mode, (server_type, ismaster), operation = entry + test = create_slave_ok_single_test(mode, server_type, ismaster, + operation) + + test_name = 'test_%s_%s_with_mode_%s' % ( + operation.name.replace(' ', '_'), server_type, mode) + + test.__name__ = test_name + setattr(TestSlaveOkaySingle, test_name, test) + + +generate_slave_ok_single_tests() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/mockupdb/test_starting_from_overflow.py b/test/mockupdb/test_starting_from_overflow.py new file mode 100644 index 0000000000..d94cab0ff3 --- /dev/null +++ b/test/mockupdb/test_starting_from_overflow.py @@ -0,0 +1,76 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that PyMongo ignores the startingFrom field, PYTHON-945.""" + +from mockupdb import going, MockupDB, OpGetMore, OpQuery, Command +from pymongo import MongoClient + +import unittest + + +class TestStartingFromOverflow(unittest.TestCase): + def test_query(self): + server = MockupDB(auto_ismaster=True, + min_wire_version=0, max_wire_version=3) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + cursor = client.test.collection.find() + with going(list, cursor) as docs: + request = server.receives(OpQuery) + request.reply({'a': 1}, cursor_id=123, starting_from=-7) + request = server.receives(OpGetMore, cursor_id=123) + request.reply({'a': 2}, starting_from=-3, cursor_id=0) + + self.assertEqual([{'a': 1}, {'a': 2}], docs()) + + def test_aggregate(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 3}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + with going(client.test.collection.aggregate, []) as cursor: + request = server.receives(Command) + request.reply({'cursor': { + 'id': 123, + 'firstBatch': [{'a': 1}]}}) + + with going(list, cursor()) as docs: + request = server.receives(OpGetMore, cursor_id=123) + request.reply({'a': 2}, starting_from=-3, cursor_id=0) + + self.assertEqual([{'a': 1}, {'a': 2}], docs()) + + def test_find_command(self): + server = MockupDB(auto_ismaster={'maxWireVersion': 4}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + with going(list, client.test.collection.find()) as docs: + server.receives(Command).reply({'cursor': { + 'id': 123, + 'firstBatch': [{'a': 1}]}}) + + request = server.receives(Command("getMore", 123)) + request.reply({'cursor': { + 'id': 0, + 'nextBatch': [{'a': 2}]}}, + starting_from=-3) + + self.assertEqual([{'a': 1}, {'a': 2}], docs()) + + +if __name__ == '__main__': + unittest.main() From e655b0bb9940904dcbc4e6f7fc18a79262dd6ffc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 16:35:31 -0800 Subject: [PATCH 0525/2111] PYTHON-3001 Bump minimum pymongocrypt version to 1.2.0 (#793) --- doc/changelog.rst | 5 +++++ pymongo/encryption.py | 4 ++++ pymongo/encryption_options.py | 4 ++++ setup.py | 2 +- 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3b46667cee..5f0ff23300 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -179,6 +179,8 @@ Breaking Changes in 4.0 parsing MongoDB URIs. - Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. +- PyMongoCrypt 1.2.0 or later is now required for client side field level + encryption support. Notable improvements .................... @@ -194,6 +196,9 @@ Notable improvements choose a `srvMaxHosts` sized subset of hosts. - Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access to a client's configuration options. +- Support for the "kmip" KMS provider for client side field level encryption. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts` + and :mod:`~pymongo.encryption`. Issues Resolved ............... diff --git a/pymongo/encryption.py b/pymongo/encryption.py index cb4080397f..064ba48d51 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -426,6 +426,10 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c96f4a6d67..d0c2d5ce72 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -129,6 +129,10 @@ def __init__(self, kms_providers, key_vault_namespace, kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. diff --git a/setup.py b/setup.py index 8fcad6cc60..63a1df4955 100755 --- a/setup.py +++ b/setup.py @@ -277,7 +277,7 @@ def build_extension(self, ext): pyopenssl_reqs.append('certifi') extras_require = { - 'encryption': ['pymongocrypt>=1.1.0,<2.0.0'], + 'encryption': ['pymongocrypt>=1.2.0,<2.0.0'], 'ocsp': pyopenssl_reqs, 'snappy': ['python-snappy'], 'zstd': ['zstandard'], From 24cc4c42bf6476c25f90cfcc7e10e63e359465b2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 16 Nov 2021 17:14:33 -0800 Subject: [PATCH 0526/2111] PYTHON-3019 Fix doc test failures (#794) Remove pymongo 2 to 3 migration guide. Make raw_bson doc tests less flakey. --- bson/raw_bson.py | 16 +- doc/changelog.rst | 4 +- doc/index.rst | 4 - doc/migrate-to-pymongo3.rst | 544 ------------------------------------ doc/migrate-to-pymongo4.rst | 3 +- 5 files changed, 13 insertions(+), 558 deletions(-) delete mode 100644 doc/migrate-to-pymongo3.rst diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 339354d7dd..bfe888b6b7 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -28,23 +28,23 @@ >>> client.drop_database('db') >>> client.drop_database('replica_db') >>> db = client.db - >>> result = db.test.insert_many([{'a': 1}, - ... {'b': 1}, - ... {'c': 1}, - ... {'d': 1}]) + >>> result = db.test.insert_many([{'_id': 1, 'a': 1}, + ... {'_id': 2, 'b': 1}, + ... {'_id': 3, 'c': 1}, + ... {'_id': 4, 'd': 1}]) >>> replica_db = client.replica_db >>> for doc in db.test.find(): ... print(f"raw document: {doc.raw}") ... print(f"decoded document: {bson.decode(doc.raw)}") ... result = replica_db.test.insert_one(doc) raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'a': 1} + decoded document: {'_id': 1, 'a': 1} raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'b': 1} + decoded document: {'_id': 2, 'b': 1} raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'c': 1} + decoded document: {'_id': 3, 'c': 1} raw document: b'...' - decoded document: {'_id': ObjectId('...'), 'd': 1} + decoded document: {'_id': 4, 'd': 1} For use cases like moving documents across different databases or writing binary blobs to disk, using raw BSON documents provides better speed and avoids the diff --git a/doc/changelog.rst b/doc/changelog.rst index 5f0ff23300..222750f550 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1881,7 +1881,9 @@ Changes in Version 2.9 Version 2.9 provides an upgrade path to PyMongo 3.x. Most of the API changes from PyMongo 3.0 have been backported in a backward compatible way, allowing applications to be written against PyMongo >= 2.9, rather then PyMongo 2.x or -PyMongo 3.x. See the :doc:`/migrate-to-pymongo3` for detailed examples. +PyMongo 3.x. See the `PyMongo 3 Migration Guide +`_ for +detailed examples. .. note:: There are a number of new deprecations in this release for features that were removed in PyMongo 3.0. diff --git a/doc/index.rst b/doc/index.rst index 9ef6907181..da05bf80ae 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -34,9 +34,6 @@ everything you need to know to use **PyMongo**. :doc:`migrate-to-pymongo4` A PyMongo 3.x to 4.x migration guide. -:doc:`migrate-to-pymongo3` - A PyMongo 2.x to 3.x migration guide. - :doc:`python3` Frequently asked questions about python 3 support. @@ -123,5 +120,4 @@ Indices and tables changelog python3 migrate-to-pymongo4 - migrate-to-pymongo3 developer/index diff --git a/doc/migrate-to-pymongo3.rst b/doc/migrate-to-pymongo3.rst deleted file mode 100644 index 633d0a7abb..0000000000 --- a/doc/migrate-to-pymongo3.rst +++ /dev/null @@ -1,544 +0,0 @@ -PyMongo 3 Migration Guide -========================= - -.. contents:: - -.. testsetup:: - - from pymongo import MongoClient, ReadPreference - client = MongoClient() - collection = client.my_database.my_collection - -PyMongo 3 is a partial rewrite bringing a large number of improvements. It -also brings a number of backward breaking changes. This guide provides a -roadmap for migrating an existing application from PyMongo 2.x to 3.x or -writing libraries that will work with both PyMongo 2.x and 3.x. - -PyMongo 2.9 ------------ - -The first step in any successful migration involves upgrading to, or -requiring, at least PyMongo 2.9. If your project has a -requirements.txt file, add the line "pymongo >= 2.9, < 3.0" until you have -completely migrated to PyMongo 3. Most of the key new -methods and options from PyMongo 3.0 are backported in PyMongo 2.9 making -migration much easier. - -Enable Deprecation Warnings ---------------------------- - -Starting with PyMongo 2.9, :exc:`DeprecationWarning` is raised by most methods -removed in PyMongo 3.0. Make sure you enable runtime warnings to see -where deprecated functions and methods are being used in your application:: - - python -Wd - -Warnings can also be changed to errors:: - - python -Wd -Werror - -.. note:: Not all deprecated features raise :exc:`DeprecationWarning` when - used. For example, the :meth:`~pymongo.collection.Collection.find` options - renamed in PyMongo 3.0 do not raise :exc:`DeprecationWarning` when used in - PyMongo 2.x. See also `Removed features with no migration path`_. - -CRUD API --------- - -Changes to find() and find_one() -................................ - -"spec" renamed "filter" -~~~~~~~~~~~~~~~~~~~~~~~ - -The `spec` option has been renamed to `filter`. Code like this:: - - >>> cursor = collection.find(spec={"a": 1}) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find(filter={"a": 1}) - -or this with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}) - -"fields" renamed "projection" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `fields` option has been renamed to `projection`. Code like this:: - - >>> cursor = collection.find({"a": 1}, fields={"_id": False}) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, projection={"_id": False}) - -or this with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, {"_id": False}) - -"partial" renamed "allow_partial_results" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `partial` option has been renamed to `allow_partial_results`. Code like -this:: - - >>> cursor = collection.find({"a": 1}, partial=True) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, allow_partial_results=True) - -"timeout" replaced by "no_cursor_timeout" -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `timeout` option has been replaced by `no_cursor_timeout`. Code like this:: - - >>> cursor = collection.find({"a": 1}, timeout=False) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> cursor = collection.find({"a": 1}, no_cursor_timeout=True) - -"network_timeout" is removed -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The `network_timeout` option has been removed. This option was always the -wrong solution for timing out long running queries and should never be used -in production. Starting with **MongoDB 2.6** you can use the $maxTimeMS query -modifier. Code like this:: - - # Set a 5 second select() timeout. - >>> cursor = collection.find({"a": 1}, network_timeout=5) - -can be changed to this with PyMongo 2.9 or later:: - - # Set a 5 second (5000 millisecond) server side query timeout. - >>> cursor = collection.find({"a": 1}, modifiers={"$maxTimeMS": 5000}) - -or with PyMongo 3.5 or later: - - >>> cursor = collection.find({"a": 1}, max_time_ms=5000) - -or with any version of PyMongo: - -.. doctest:: - - >>> cursor = collection.find({"$query": {"a": 1}, "$maxTimeMS": 5000}) - -.. seealso:: `$maxTimeMS - `_ - -Tailable cursors -~~~~~~~~~~~~~~~~ - -The `tailable` and `await_data` options have been replaced by `cursor_type`. -Code like this:: - - >>> cursor = collection.find({"a": 1}, tailable=True) - >>> cursor = collection.find({"a": 1}, tailable=True, await_data=True) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import CursorType - >>> cursor = collection.find({"a": 1}, cursor_type=CursorType.TAILABLE) - >>> cursor = collection.find({"a": 1}, cursor_type=CursorType.TAILABLE_AWAIT) - -Other removed options -~~~~~~~~~~~~~~~~~~~~~ - -The `slave_okay`, `read_preference`, `tag_sets`, -and `secondary_acceptable_latency_ms` options have been removed. See the `Read -Preferences`_ section for solutions. - -The aggregate method always returns a cursor -............................................ - -PyMongo 2.6 added an option to return an iterable cursor from -:meth:`~pymongo.collection.Collection.aggregate`. In PyMongo 3 -:meth:`~pymongo.collection.Collection.aggregate` always returns a cursor. Use -the `cursor` option for consistent behavior with PyMongo 2.9 and later: - -.. doctest:: - - >>> for result in collection.aggregate([], cursor={}): - ... pass - -Read Preferences ----------------- - -The "slave_okay" option is removed -.................................. - -The `slave_okay` option is removed from PyMongo's API. The -secondaryPreferred read preference provides the same behavior. -Code like this:: - - >>> client = MongoClient(slave_okay=True) - -can be changed to this with PyMongo 2.9 or newer: - -.. doctest:: - - >>> client = MongoClient(readPreference="secondaryPreferred") - -The "read_preference" attribute is immutable -............................................ - -Code like this:: - - >>> from pymongo import ReadPreference - >>> db = client.my_database - >>> db.read_preference = ReadPreference.SECONDARY - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> db = client.get_database("my_database", - ... read_preference=ReadPreference.SECONDARY) - -Code like this:: - - >>> cursor = collection.find({"a": 1}, - ... read_preference=ReadPreference.SECONDARY) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> coll2 = collection.with_options(read_preference=ReadPreference.SECONDARY) - >>> cursor = coll2.find({"a": 1}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -The "tag_sets" option and attribute are removed -............................................... - -The `tag_sets` MongoClient option is removed. The `read_preference` -option can be used instead. Code like this:: - - >>> client = MongoClient( - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{"dc": "ny"}, {"dc": "sf"}]) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.read_preferences import Secondary - >>> client = MongoClient(read_preference=Secondary([{"dc": "ny"}])) - -To change the tags sets for a Database or Collection, code like this:: - - >>> db = client.my_database - >>> db.read_preference = ReadPreference.SECONDARY - >>> db.tag_sets = [{"dc": "ny"}] - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> db = client.get_database("my_database", - ... read_preference=Secondary([{"dc": "ny"}])) - -Code like this:: - - >>> cursor = collection.find( - ... {"a": 1}, - ... read_preference=ReadPreference.SECONDARY, - ... tag_sets=[{"dc": "ny"}]) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.read_preferences import Secondary - >>> coll2 = collection.with_options( - ... read_preference=Secondary([{"dc": "ny"}])) - >>> cursor = coll2.find({"a": 1}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -The "secondary_acceptable_latency_ms" option and attribute are removed -...................................................................... - -PyMongo 2.x supports `secondary_acceptable_latency_ms` as an option to methods -throughout the driver, but mongos only supports a global latency option. -PyMongo 3.x has changed to match the behavior of mongos, allowing migration -from a single server, to a replica set, to a sharded cluster without a -surprising change in server selection behavior. A new option, -`localThresholdMS`, is available through MongoClient and should be used in -place of `secondaryAcceptableLatencyMS`. Code like this:: - - >>> client = MongoClient(readPreference="nearest", - ... secondaryAcceptableLatencyMS=100) - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client = MongoClient(readPreference="nearest", - ... localThresholdMS=100) - -Write Concern -------------- - -The "safe" option is removed -............................ - -In PyMongo 3 the `safe` option is removed from the entire API. -:class:`~pymongo.mongo_client.MongoClient` has always defaulted to acknowledged -write operations and continues to do so in PyMongo 3. - -The "write_concern" attribute is immutable -.......................................... - -The `write_concern` attribute is immutable in PyMongo 3. Code like this:: - - >>> client = MongoClient() - >>> client.write_concern = {"w": "majority"} - -can be changed to this with any version of PyMongo: - -.. doctest:: - - >>> client = MongoClient(w="majority") - -Code like this:: - - >>> db = client.my_database - >>> db.write_concern = {"w": "majority"} - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo import WriteConcern - >>> db = client.get_database("my_database", - ... write_concern=WriteConcern(w="majority")) - -The new CRUD API write methods do not accept write concern options. Code like -this:: - - >>> oid = collection.insert({"a": 2}, w="majority") - -can be changed to this with PyMongo 3 or later: - -.. doctest:: - - >>> from pymongo import WriteConcern - >>> coll2 = collection.with_options( - ... write_concern=WriteConcern(w="majority")) - >>> oid = coll2.insert_one({"a": 2}) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` - -Codec Options -------------- - -The "document_class" attribute is removed -......................................... - -Code like this:: - - >>> from bson.son import SON - >>> client = MongoClient() - >>> client.document_class = SON - -can be replaced by this in any version of PyMongo: - -.. doctest:: - - >>> from bson.son import SON - >>> client = MongoClient(document_class=SON) - -or to change the `document_class` for a :class:`~pymongo.database.Database` -with PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson.codec_options import CodecOptions - >>> from bson.son import SON - >>> db = client.get_database("my_database", CodecOptions(SON)) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` and - :meth:`~pymongo.collection.Collection.with_options` - -The "uuid_subtype" option and attribute are removed -................................................... - -Code like this:: - - >>> from bson.binary import JAVA_LEGACY - >>> db = client.my_database - >>> db.uuid_subtype = JAVA_LEGACY - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson.binary import JAVA_LEGACY - >>> from bson.codec_options import CodecOptions - >>> db = client.get_database("my_database", - ... CodecOptions(uuid_representation=JAVA_LEGACY)) - -.. seealso:: :meth:`~pymongo.database.Database.get_collection` and - :meth:`~pymongo.collection.Collection.with_options` - -MongoClient ------------ - -MongoClient connects asynchronously -................................... - -In PyMongo 3, the :class:`~pymongo.mongo_client.MongoClient` constructor no -longer blocks while connecting to the server or servers, and it no longer -raises :exc:`~pymongo.errors.ConnectionFailure` if they are unavailable, nor -:exc:`~pymongo.errors.ConfigurationError` if the user’s credentials are wrong. -Instead, the constructor returns immediately and launches the connection -process on background threads. The `connect` option is added to control whether -these threads are started immediately, or when the client is first used. - -For consistent behavior in PyMongo 2.x and PyMongo 3.x, code like this:: - - >>> from pymongo.errors import ConnectionFailure - >>> try: - ... client = MongoClient() - ... except ConnectionFailure: - ... print("Server not available") - >>> - -can be changed to this with PyMongo 2.9 or later: - -.. doctest:: - - >>> from pymongo.errors import ConnectionFailure - >>> client = MongoClient(connect=False) - >>> try: - ... client.admin.command("ping") - ... except ConnectionFailure: - ... print("Server not available") - >>> - -Any operation can be used to determine if the server is available. We choose -the "ping" command here because it is cheap and does not require auth, so -it is a simple way to check whether the server is available. - -The max_pool_size parameter is removed -...................................... - -PyMongo 3 replaced the max_pool_size parameter with support for the MongoDB URI -`maxPoolSize` option. Code like this:: - - >>> client = MongoClient(max_pool_size=10) - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client = MongoClient(maxPoolSize=10) - >>> client = MongoClient("mongodb://localhost:27017/?maxPoolSize=10") - -The "disconnect" method is removed -.................................. - -Code like this:: - - >>> client.disconnect() - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> client.close() - -The host and port attributes are removed -........................................ - -Code like this:: - - >>> host = client.host - >>> port = client.port - -can be replaced by this with PyMongo 2.9 or later: - -.. doctest:: - - >>> address = client.address - >>> host, port = address or (None, None) - -BSON ----- - -"as_class", "tz_aware", and "uuid_subtype" are removed -...................................................... - -The `as_class`, `tz_aware`, and `uuid_subtype` parameters have been -removed from the functions provided in :mod:`bson`. Furthermore, the -:func:`~bson.encode` and :func:`~bson.decode` functions have been added -as more performant alternatives to the :meth:`bson.BSON.encode` and -:meth:`bson.BSON.decode` methods. Code like this:: - - >>> from bson import BSON - >>> from bson.son import SON - >>> encoded = BSON.encode({"a": 1}, as_class=SON) - -can be replaced by this in PyMongo 2.9 or later: - -.. doctest:: - - >>> from bson import encode - >>> from bson.codec_options import CodecOptions - >>> from bson.son import SON - >>> encoded = encode({"a": 1}, codec_options=CodecOptions(SON)) - -Removed features with no migration path ---------------------------------------- - -MasterSlaveConnection is removed -................................ - -Master slave deployments are deprecated in MongoDB. Starting with MongoDB 3.0 -a replica set can have up to 50 members and that limit is likely to be -removed in later releases. We recommend migrating to replica sets instead. - -Requests are removed -.................... - -The client methods `start_request`, `in_request`, and `end_request` are -removed. Requests were designed to make read-your-writes consistency more -likely with the w=0 write concern. Additionally, a thread in a request used the -same member for all secondary reads in a replica set. To ensure -read-your-writes consistency in PyMongo 3.0, do not override the default write -concern with w=0, and do not override the default read preference of PRIMARY. - -The "compile_re" option is removed -.................................. - -In PyMongo 3 regular expressions are never compiled to Python match objects. - -The "use_greenlets" option is removed -..................................... - -The `use_greenlets` option was meant to allow use of PyMongo with Gevent -without the use of gevent.monkey.patch_threads(). This option caused a lot -of confusion and made it difficult to support alternative asyncio libraries -like Eventlet. Users of Gevent should use gevent.monkey.patch_all() instead. - -.. seealso:: :doc:`examples/gevent` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index c8ac401a18..5acd3a5d12 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -25,7 +25,8 @@ completely migrated to PyMongo 4. Most of the key new methods and options from PyMongo 4.0 are backported in PyMongo 3.12 making migration much easier. .. note:: Users of PyMongo 2.X who wish to upgrade to 4.x must first upgrade - to PyMongo 3.x by following the :doc:`migrate-to-pymongo3`. + to PyMongo 3.x by following the `PyMongo 3 Migration Guide + `_. Python 3.6+ ----------- From 12a6af7ab6cfeb8004570cea6812327c7fd4b226 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 17 Nov 2021 12:31:59 -0800 Subject: [PATCH 0527/2111] PYTHON-2981 Stop using MongoClient.address for hashing and equality (#795) --- doc/changelog.rst | 3 +++ pymongo/mongo_client.py | 6 +++--- pymongo/monitor.py | 3 +-- pymongo/settings.py | 16 +++++++++++++--- pymongo/topology.py | 14 ++++++++++++++ test/test_client.py | 29 +++++++++++++++++++++++------ 6 files changed, 57 insertions(+), 14 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 222750f550..88c1b7cd20 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -177,6 +177,9 @@ Breaking Changes in 4.0 :exc:`~pymongo.errors.InvalidURI` exception when it encounters unescaped percent signs in username and password when parsing MongoDB URIs. +- Comparing two :class:`~pymongo.mongo_client.MongoClient` instances now + uses a set of immutable properties rather than + :attr:`~pymongo.mongo_client.MongoClient.address` which can change. - Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. - PyMongoCrypt 1.2.0 or later is now required for client side field level diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 1f8b781487..90e6a7706f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -748,9 +748,9 @@ def __init__( server_selector=options.server_selector, heartbeat_frequency=options.heartbeat_frequency, fqdn=fqdn, - srv_service_name=srv_service_name, direct_connection=options.direct_connection, load_balanced=options.load_balanced, + srv_service_name=srv_service_name, srv_max_hosts=srv_max_hosts ) @@ -1337,14 +1337,14 @@ def _retryable_write(self, retryable, func, session): def __eq__(self, other): if isinstance(other, self.__class__): - return self.address == other.address + return self._topology == other._topology return NotImplemented def __ne__(self, other): return not self == other def __hash__(self): - return hash(self.address) + return hash(self._topology) def _repr_helper(self): def option_repr(option, value): diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 37b894bd53..a383e272cd 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -299,7 +299,6 @@ def __init__(self, topology, topology_settings): self._settings = topology_settings self._seedlist = self._settings._seeds self._fqdn = self._settings.fqdn - self._srv_service_name = self._settings._srv_service_name def _run(self): seedlist = self._get_seedlist() @@ -319,7 +318,7 @@ def _get_seedlist(self): try: resolver = _SrvResolver(self._fqdn, self._settings.pool_options.connect_timeout, - self._srv_service_name) + self._settings.srv_service_name) seedlist, ttl = resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. diff --git a/pymongo/settings.py b/pymongo/settings.py index e9e28c13ac..d17b5e8b86 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -39,9 +39,9 @@ def __init__(self, heartbeat_frequency=common.HEARTBEAT_FREQUENCY, server_selector=None, fqdn=None, - srv_service_name=common.SRV_SERVICE_NAME, direct_connection=False, load_balanced=None, + srv_service_name=common.SRV_SERVICE_NAME, srv_max_hosts=0): """Represent MongoClient's configuration. @@ -62,11 +62,11 @@ def __init__(self, self._server_selection_timeout = server_selection_timeout self._server_selector = server_selector self._fqdn = fqdn - self._srv_service_name = srv_service_name self._heartbeat_frequency = heartbeat_frequency - self._srv_max_hosts = srv_max_hosts or 0 self._direct = direct_connection self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the @@ -131,6 +131,16 @@ def load_balanced(self): """True if the client was configured to connect to a load balancer.""" return self._load_balanced + @property + def srv_service_name(self): + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self): + """The srvMaxHosts.""" + return self._srv_max_hosts + def get_topology_type(self): if self.load_balanced: return TOPOLOGY_TYPE.LoadBalanced diff --git a/pymongo/topology.py b/pymongo/topology.py index c6a702fde5..9139c1492e 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -803,6 +803,20 @@ def __repr__(self): msg = 'CLOSED ' return '<%s %s%r>' % (self.__class__.__name__, msg, self._description) + def eq_props(self): + """The properties to use for MongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, + ts.srv_service_name) + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self): + return hash(self.eq_props()) + class _ErrorContext(object): """An error with context for SDAM error handling.""" diff --git a/test/test_client.py b/test/test_client.py index 34922d1097..8c89a45481 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -676,15 +676,32 @@ def test_init_disconnected_with_auth(self): self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - c = connected(rs_or_single_client()) + seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + c = rs_or_single_client(seed, connect=False) + self.addCleanup(c.close) self.assertEqual(client_context.client, c) - # Explicitly test inequality self.assertFalse(client_context.client != c) + c = rs_or_single_client('invalid.com', connect=False) + self.addCleanup(c.close) + self.assertNotEqual(client_context.client, c) + self.assertTrue(client_context.client != c) + # Seeds differ: + self.assertNotEqual(MongoClient('a', connect=False), + MongoClient('b', connect=False)) + # Same seeds but out of order still compares equal: + self.assertEqual(MongoClient(['a', 'b', 'c'], connect=False), + MongoClient(['c', 'a', 'b'], connect=False)) + def test_hashable(self): - c = connected(rs_or_single_client()) + seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + c = rs_or_single_client(seed, connect=False) + self.addCleanup(c.close) self.assertIn(c, {client_context.client}) + c = rs_or_single_client('invalid.com', connect=False) + self.addCleanup(c.close) + self.assertNotIn(c, {client_context.client}) def test_host_w_port(self): with self.assertRaises(ValueError): @@ -1635,19 +1652,19 @@ def test_service_name_from_kwargs(self): client = MongoClient( 'mongodb+srv://user:password@test22.test.build.10gen.cc', srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings._srv_service_name, + self.assertEqual(client._topology_settings.srv_service_name, 'customname') client = MongoClient( 'mongodb+srv://user:password@test22.test.build.10gen.cc' '/?srvServiceName=shouldbeoverriden', srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings._srv_service_name, + self.assertEqual(client._topology_settings.srv_service_name, 'customname') client = MongoClient( 'mongodb+srv://user:password@test22.test.build.10gen.cc' '/?srvServiceName=customname', connect=False) - self.assertEqual(client._topology_settings._srv_service_name, + self.assertEqual(client._topology_settings.srv_service_name, 'customname') @unittest.skipUnless( From 9cf88cfdc102a54019eae0f93f473cbebc5b1c3c Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 17 Nov 2021 17:37:05 -0800 Subject: [PATCH 0528/2111] PYTHON-2773 Mockupdb test failures (#796) --- test/mockupdb/operations.py | 45 ++----- test/mockupdb/test_auth_recovering_member.py | 2 +- test/mockupdb/test_cluster_time.py | 51 ++----- test/mockupdb/test_cursor_namespace.py | 4 +- test/mockupdb/test_handshake.py | 31 ++--- test/mockupdb/test_legacy_crud.py | 126 ------------------ test/mockupdb/test_list_indexes.py | 23 ---- test/mockupdb/test_max_staleness.py | 2 +- test/mockupdb/test_mixed_version_sharded.py | 2 +- .../mockupdb/test_mongos_command_read_mode.py | 33 ++--- test/mockupdb/test_op_msg.py | 66 +-------- test/mockupdb/test_op_msg_read_preference.py | 9 +- test/mockupdb/test_projection.py | 56 -------- test/mockupdb/test_query_read_pref_sharded.py | 45 +------ test/mockupdb/test_reset_and_request_check.py | 9 +- test/mockupdb/test_slave_okay_single.py | 6 +- test/mockupdb/test_starting_from_overflow.py | 76 ----------- 17 files changed, 62 insertions(+), 524 deletions(-) delete mode 100755 test/mockupdb/test_legacy_crud.py delete mode 100644 test/mockupdb/test_projection.py delete mode 100644 test/mockupdb/test_starting_from_overflow.py diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 2c8701ae83..9fb0ca16b6 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -15,6 +15,7 @@ from collections import namedtuple from mockupdb import * +from mockupdb import OpMsgReply from pymongo import ReadPreference __all__ = ['operations', 'upgrades'] @@ -51,11 +52,11 @@ sharded cluster (PYTHON-868). """ -not_master_reply_to_query = OpReply( +not_master_reply_to_query = OpMsgReply( {'$err': 'not master'}, flags=REPLY_FLAGS['QueryFailure']) -not_master_reply_to_command = OpReply(ok=0, errmsg='not master') +not_master_reply_to_command = OpMsgReply(ok=0, errmsg='not master') operations = [ Operation( @@ -76,20 +77,6 @@ reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='may-use-secondary', not_master=not_master_reply_to_command), - Operation( - 'mapreduce', - lambda client: client.db.collection.map_reduce( - 'function() {}', 'function() {}'), - reply={'result': {'db': 'db', 'collection': 'out_collection'}}, - op_type='must-use-primary', - not_master=not_master_reply_to_command), - Operation( - 'inline_mapreduce', - lambda client: client.db.collection.inline_map_reduce( - 'function() {}', 'function() {}', {'out': {'inline': 1}}), - reply={'results': []}, - op_type='may-use-secondary', - not_master=not_master_reply_to_command), Operation( 'options', lambda client: client.db.collection.options(), @@ -109,12 +96,6 @@ reply={'ok': 1}, op_type='always-use-secondary', not_master=OpReply(ok=0, errmsg='node is recovering')), - Operation( - 'listCollections', - lambda client: client.db.collection_names(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='must-use-primary', - not_master=not_master_reply_to_command), Operation( 'listIndexes', lambda client: client.db.collection.index_information(), @@ -130,19 +111,9 @@ ['name', 'function', 'old', 'new', 'wire_version']) upgrades = [ - Upgrade('index_information', - lambda client: client.db.collection.index_information(), - old=OpQuery(namespace='db.system.indexes'), - new=Command('listIndexes', 'collection', namespace='db'), - wire_version=3), - Upgrade('collection_names', - lambda client: client.db.collection_names(), - old=Command('aggregate', 'system.namespaces', namespace='db'), - new=Command('listCollections', namespace='db'), - wire_version=3), - Upgrade('options', - lambda client: client.db.collection.options(), - old=Command('aggregate', 'system.namespaces', namespace='db'), - new=Command('listCollections', namespace='db'), - wire_version=3), + Upgrade('estimated_document_count', + lambda client: client.db.collection.estimated_document_count(), + old=OpMsg('count', 'collection', namespace='db'), + new=OpMsg('aggregate', 'collection', namespace='db'), + wire_version=12), ] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py index 360c593a00..6fb983b37f 100755 --- a/test/mockupdb/test_auth_recovering_member.py +++ b/test/mockupdb/test_auth_recovering_member.py @@ -44,7 +44,7 @@ def test_auth_recovering_member(self): # error. If it raises AutoReconnect we know it actually tried the # server, and that's wrong. with self.assertRaises(ServerSelectionTimeoutError): - client.db.authenticate('user', 'password') + client.db.command("ping") if __name__ == '__main__': unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index fae6c3faae..858e32a0fa 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -19,8 +19,7 @@ from pymongo import (MongoClient, InsertOne, UpdateOne, - DeleteMany, - version_tuple) + DeleteMany) import unittest @@ -54,23 +53,6 @@ def cluster_time_conversation(self, callback, replies): reply['$clusterTime'] = {'clusterTime': cluster_time} request.reply(reply) - # Now test that no commands include $clusterTime with wire version 5, - # even though the isMaster reply still has $clusterTime. - server.cancel_responder(responder) - server.autoresponds('ismaster', - {'minWireVersion': 0, - 'maxWireVersion': 5, - '$clusterTime': {'clusterTime': cluster_time}}) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - with going(callback, client): - for reply in replies: - request = server.receives() - self.assertNotIn('$clusterTime', request) - request.reply(reply) - def test_command(self): def callback(client): client.db.command('ping') @@ -158,27 +140,16 @@ def test_monitor(self): request.reply(error) # PyMongo 3.11+ closes the monitoring connection on command errors. - if version_tuple >= (3, 11, -1): - # Fourth exchange: the Monitor closes the connection and runs the - # handshake on a new connection. - request = server.receives('ismaster') - # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn('$clusterTime', request) - - # Reply without $clusterTime. - reply.pop('$clusterTime') - request.reply(reply) - else: - # Fourth exchange: the Monitor retry attempt uses the clusterTime - # from the previous isMaster error. - request = server.receives('ismaster') - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) - - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - error['$clusterTime'] = {'clusterTime': cluster_time} - request.reply(error) + + # Fourth exchange: the Monitor closes the connection and runs the + # handshake on a new connection. + request = server.receives('ismaster') + # No $clusterTime in first ismaster, only in subsequent ones + self.assertNotIn('$clusterTime', request) + + # Reply without $clusterTime. + reply.pop('$clusterTime') + request.reply(reply) # Fifth exchange: the Monitor attempt uses the clusterTime from # the previous isMaster error. diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 600f7bca6d..10605601cf 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -15,7 +15,7 @@ """Test list_indexes with more than one batch.""" from mockupdb import going, MockupDB -from pymongo import MongoClient, version_tuple +from pymongo import MongoClient import unittest @@ -57,7 +57,6 @@ def op(): return list(self.client.test.collection.aggregate([])) self._test_cursor_namespace(op, 'aggregate') - @unittest.skipUnless(version_tuple >= (3, 11, -1), 'Fixed in pymongo 3.11') def test_find_cursor(self): def op(): return list(self.client.test.collection.find()) @@ -71,7 +70,6 @@ def op(): class TestKillCursorsNamespace(unittest.TestCase): @classmethod - @unittest.skipUnless(version_tuple >= (3, 12, -1), 'Fixed in pymongo 3.12') def setUpClass(cls): cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) cls.server.run() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 9dbdec3057..621f01728f 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -14,7 +14,7 @@ from mockupdb import MockupDB, OpReply, OpMsg, absent, Command, go -from pymongo import MongoClient, version as pymongo_version, version_tuple +from pymongo import MongoClient, version as pymongo_version from pymongo.errors import OperationFailure import unittest @@ -33,7 +33,6 @@ def _check_handshake_data(request): class TestHandshake(unittest.TestCase): - @unittest.skipUnless(version_tuple >= (3, 4), "requires PyMongo 3.4") def test_client_handshake_data(self): primary, secondary = MockupDB(), MockupDB() for server in primary, secondary: @@ -72,20 +71,14 @@ def test_client_handshake_data(self): primary.receives('ismaster', 1, client=absent).ok(error_response) secondary.receives('ismaster', 1, client=absent).ok(error_response) - # PyMongo 3.11+ closes the monitoring connection on command errors. - if version_tuple >= (3, 11, -1): - # The heartbeat retry (on a new connection) does have client data. - heartbeat = primary.receives('ismaster') - _check_handshake_data(heartbeat) - heartbeat.ok(primary_response) - - heartbeat = secondary.receives('ismaster') - _check_handshake_data(heartbeat) - heartbeat.ok(secondary_response) - else: - # The heartbeat retry has no client data after a command failure. - primary.receives('ismaster', 1, client=absent).ok(error_response) - secondary.receives('ismaster', 1, client=absent).ok(error_response) + # The heartbeat retry (on a new connection) does have client data. + heartbeat = primary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(primary_response) + + heartbeat = secondary.receives('ismaster') + _check_handshake_data(heartbeat) + heartbeat.ok(secondary_response) # Still no client data. primary.receives('ismaster', 1, client=absent).ok(primary_response) @@ -113,15 +106,11 @@ def test_client_handshake_data(self): request.ok(primary_response) else: # Command succeeds. - if version_tuple >= (3, 7): - request.assert_matches(OpMsg('whatever')) - else: - request.assert_matches(Command('whatever')) + request.assert_matches(OpMsg('whatever')) request.ok() assert future() return - @unittest.skipUnless(version_tuple >= (3, 11, -1), "requires PyMongo 3.11") def test_client_handshake_saslSupportedMechs(self): server = MockupDB() server.run() diff --git a/test/mockupdb/test_legacy_crud.py b/test/mockupdb/test_legacy_crud.py deleted file mode 100755 index 508313dbbd..0000000000 --- a/test/mockupdb/test_legacy_crud.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2017 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from bson.son import SON -from mockupdb import (MockupDB, going, OpInsert, OpMsg, absent, Command, - OP_MSG_FLAGS) -from pymongo import MongoClient, WriteConcern, version_tuple - -import unittest - - -class TestLegacyCRUD(unittest.TestCase): - def test_op_insert_manipulate_false(self): - # Test three aspects of legacy insert with manipulate=False: - # 1. The return value is None, [None], or [None, None] as appropriate. - # 2. _id is not set on the passed-in document object. - # 3. _id is not sent to server. - server = MockupDB(auto_ismaster=True) - server.run() - self.addCleanup(server.stop) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - coll = client.db.get_collection('coll', write_concern=WriteConcern(w=0)) - doc = {} - with going(coll.insert, doc, manipulate=False) as future: - if version_tuple >= (3, 7): - server.receives(OpMsg(SON([ - ("insert", coll.name), - ("ordered", True), - ("writeConcern", {"w": 0}), - ("documents", [{}])]), flags=OP_MSG_FLAGS['moreToCome'])) - else: - server.receives(OpInsert({'_id': absent})) - - self.assertFalse('_id' in doc) - self.assertIsNone(future()) - - docs = [{}] # One doc in a list. - with going(coll.insert, docs, manipulate=False) as future: - if version_tuple >= (3, 7): - # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. - request = server.receives() - request.assert_matches(OpMsg(SON([ - ("insert", coll.name), - ("ordered", True), - ("documents", [{}])]), flags=0)) - request.reply({"n": 1}) - else: - server.receives(OpInsert({'_id': absent})) - - self.assertFalse('_id' in docs[0]) - self.assertEqual(future(), [None]) - - docs = [{}, {}] # Two docs. - with going(coll.insert, docs, manipulate=False) as future: - if version_tuple >= (3, 7): - # PyMongo 3.7 ordered bulk w:0 writes use implicit w:1. - request = server.receives() - request.assert_matches(OpMsg(SON([ - ("insert", coll.name), - ("ordered", True), - ("documents", [{}, {}])]), flags=0)) - request.reply({"n": 2}) - else: - server.receives(OpInsert({'_id': absent}, {'_id': absent})) - - self.assertFalse('_id' in docs[0]) - self.assertFalse('_id' in docs[1]) - self.assertEqual(future(), [None, None]) - - def test_insert_command_manipulate_false(self): - # Test same three aspects as test_op_insert_manipulate_false does, - # with the "insert" command. - server = MockupDB(auto_ismaster={'maxWireVersion': 2}) - server.run() - self.addCleanup(server.stop) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - doc = {} - with going(client.db.coll.insert, doc, manipulate=False) as future: - r = server.receives(Command("insert", "coll", documents=[{}])) - # MockupDB doesn't understand "absent" in subdocuments yet. - self.assertFalse('_id' in r.doc['documents'][0]) - r.ok() - - self.assertFalse('_id' in doc) - self.assertIsNone(future()) - - docs = [{}] # One doc in a list. - with going(client.db.coll.insert, docs, manipulate=False) as future: - r = server.receives(Command("insert", "coll", documents=[{}])) - self.assertFalse('_id' in r.doc['documents'][0]) - r.ok() - - self.assertFalse('_id' in docs[0]) - self.assertEqual(future(), [None]) - - docs = [{}, {}] # Two docs. - with going(client.db.coll.insert, docs, manipulate=False) as future: - r = server.receives(Command("insert", "coll", documents=[{}, {}])) - self.assertFalse('_id' in r.doc['documents'][0]) - self.assertFalse('_id' in r.doc['documents'][1]) - r.ok() - - self.assertFalse('_id' in docs[0]) - self.assertFalse('_id' in docs[1]) - self.assertEqual(future(), [None, None]) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index 7483e80df2..b4787ff624 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -23,29 +23,6 @@ class TestListIndexes(unittest.TestCase): - def test_list_indexes_opquery(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 3}) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) - with going(client.test.collection.list_indexes) as cursor: - request = server.receives( - listIndexes='collection', namespace='test') - request.reply({'cursor': { - 'firstBatch': [{'name': 'index_0'}], - 'id': 123}}) - - with going(list, cursor()) as indexes: - request = server.receives(OpGetMore, - namespace='test.collection', - cursor_id=123) - - request.reply([{'name': 'index_1'}], cursor_id=0) - - self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) - for index_info in indexes(): - self.assertIsInstance(index_info, SON) def test_list_indexes_command(self): server = MockupDB(auto_ismaster={'maxWireVersion': 6}) diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py index 89d17a133f..9bd65a1764 100644 --- a/test/mockupdb/test_max_staleness.py +++ b/test/mockupdb/test_max_staleness.py @@ -21,7 +21,7 @@ class TestMaxStalenessMongos(unittest.TestCase): def test_mongos(self): mongos = MockupDB() - mongos.autoresponds('ismaster', maxWireVersion=5, + mongos.autoresponds('ismaster', maxWireVersion=6, ismaster=True, msg='isdbgrid') mongos.run() self.addCleanup(mongos.stop) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 9f57dd0f43..d13af3562b 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -21,7 +21,7 @@ except ImportError: from Queue import Queue -from mockupdb import MockupDB, go +from mockupdb import MockupDB, go, OpMsg from pymongo import MongoClient import unittest diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 1fe2ea5869..ccd40c2cd7 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -15,7 +15,7 @@ import itertools from bson import SON -from mockupdb import MockupDB, going +from mockupdb import MockupDB, going, OpMsg, go from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import (make_read_preference, read_pref_mode_from_name, @@ -29,7 +29,7 @@ class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): server = MockupDB() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=5) + minWireVersion=2, maxWireVersion=6) self.addCleanup(server.stop) server.run() @@ -39,18 +39,16 @@ def test_aggregate(self): with going(collection.aggregate, []): command = server.receives(aggregate='collection', pipeline=[]) self.assertFalse(command.slave_ok, 'SlaveOkay set') - self.assertNotIn('$readPreference', command) command.ok(result=[{}]) secondary_collection = collection.with_options( read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): - command = server.receives( - {'$query': SON([('aggregate', 'collection'), - ('pipeline', []), - ('cursor', {})]), - '$readPreference': {'mode': 'secondary'}}) + + command = server.receives(OpMsg({"aggregate": "collection", + "pipeline": [], + '$readPreference': {'mode': 'secondary'}})) command.ok(result=[{}]) self.assertTrue(command.slave_ok, 'SlaveOkay not set') @@ -61,37 +59,28 @@ def test(self): self.addCleanup(server.stop) server.run() server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=5) + minWireVersion=2, maxWireVersion=6) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(server.uri, read_preference=pref) self.addCleanup(client.close) - with going(operation.function, client) as future: + + with going(operation.function, client): request = server.receive() request.reply(operation.reply) - future() # No error. - if operation.op_type == 'always-use-secondary': self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get('$readPreference')) slave_ok = mode != 'primary' - self.assertIn('$query', request.doc) elif operation.op_type == 'must-use-primary': - self.assertNotIn('$readPreference', request) - self.assertNotIn('$query', request.doc) slave_ok = False elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' - if mode in ('primary', 'secondaryPreferred'): - self.assertNotIn('$readPreference', request) - self.assertNotIn('$query', request.doc) - else: - self.assertEqual(pref.document, - request.doc.get('$readPreference')) - self.assertIn('$query', request.doc) + self.assertEqual(pref.document, + request.doc.get('$readPreference')) else: self.fail('unrecognized op_type %r' % operation.op_type) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index dc574226bc..35e70cebfc 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -15,7 +15,7 @@ from collections import namedtuple from mockupdb import MockupDB, going, OpMsg, OpMsgReply, OP_MSG_FLAGS -from pymongo import MongoClient, WriteConcern, version_tuple +from pymongo import MongoClient, WriteConcern from pymongo.operations import InsertOne, UpdateOne, DeleteOne from pymongo.cursor import CursorType @@ -125,54 +125,6 @@ request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), reply=None), # Legacy methods - Operation( - 'insert', - lambda coll: coll.insert({}), - request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), - Operation( - 'insert-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert({}), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'insert-w0-argument', - lambda coll: coll.insert({}, w=0), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'update', - lambda coll: coll.update({"_id": 1}, {"new": 1}), - request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), - Operation( - 'update-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).update({"_id": 1}, {"new": 1}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'update-w0-argument', - lambda coll: coll.update({"_id": 1}, {"new": 1}, w=0), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'remove', - lambda coll: coll.remove({"_id": 1}), - request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), - Operation( - 'remove-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).remove({"_id": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), - Operation( - 'remove-w0-argument', - lambda coll: coll.remove({"_id": 1}, w=0), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), Operation( 'bulk_write_insert', lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), @@ -303,8 +255,7 @@ def _test_operation(self, op): replies = [op.reply] for expected_request in expected_requests: - request = self.server.receives() - request.assert_matches(expected_request) + request = self.server.receives(expected_request) reply = None if replies: reply = replies.pop(0) @@ -317,24 +268,21 @@ def _test_operation(self, op): future() # No error. -def operation_test(op, decorator): - @decorator() +def operation_test(op): def test(self): self._test_operation(op) return test -def create_tests(ops, decorator): +def create_tests(ops): for op in ops: test_name = "test_op_msg_%s" % (op.name,) - setattr(TestOpMsg, test_name, operation_test(op, decorator)) + setattr(TestOpMsg, test_name, operation_test(op)) -create_tests(operations, lambda: unittest.skipUnless( - version_tuple >= (3, 7), "requires PyMongo 3.7")) +create_tests(operations) -create_tests(operations_312, lambda: unittest.skipUnless( - version_tuple >= (3, 12), "requires PyMongo 3.12")) +create_tests(operations_312) if __name__ == '__main__': unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 925a00f6a5..ba359a5e05 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -16,7 +16,7 @@ import itertools from mockupdb import MockupDB, going, CommandBase -from pymongo import MongoClient, ReadPreference, version_tuple +from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import (make_read_preference, read_pref_mode_from_name, _MONGOS_MODES) @@ -31,8 +31,6 @@ class OpMsgReadPrefBase(unittest.TestCase): @classmethod def setUpClass(cls): super(OpMsgReadPrefBase, cls).setUpClass() - if version_tuple < (3, 7): - raise unittest.SkipTest("requires PyMongo 3.7") @classmethod def add_test(cls, mode, test_name, test): @@ -159,11 +157,10 @@ def test(self): expected_pref = pref else: self.fail('unrecognized op_type %r' % operation.op_type) - # For single mongod we send primaryPreferred instead of primary. - if expected_pref == ReadPreference.PRIMARY and self.single_mongod: + if (expected_pref == ReadPreference.PRIMARY and self.single_mongod + and operation.name != "command"): expected_pref = ReadPreference.PRIMARY_PREFERRED - with going(operation.function, client) as future: request = expected_server.receive() request.reply(operation.reply) diff --git a/test/mockupdb/test_projection.py b/test/mockupdb/test_projection.py deleted file mode 100644 index 0b74c22cbd..0000000000 --- a/test/mockupdb/test_projection.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2018-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""PyMongo shouldn't append projection fields to "find" command, PYTHON-1479.""" - -from bson import SON -from mockupdb import Command, MockupDB, OpQuery, going -from pymongo import MongoClient - -import unittest - - -class TestProjection(unittest.TestCase): - def test_projection(self): - q = {} - fields = {'foo': True} - - # OP_QUERY, - server = MockupDB(auto_ismaster=True, - min_wire_version=0, max_wire_version=3) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - cursor = client.test.collection.find(q, fields) - with going(next, cursor): - request = server.receives(OpQuery(q, fields=fields)) - request.reply([], cursor_id=0) - - # "find" command. - server = MockupDB(auto_ismaster=True, - min_wire_version=0, max_wire_version=4) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - cursor = client.test.collection.find(q, fields) - cmd = Command(SON([('find', 'collection'), ('filter', q), - ('projection', fields)])) - - with going(next, cursor): - request = server.receives(cmd) - request.ok(cursor={'id': 0, 'firstBatch': []}) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 033cdeff19..21813f7b8e 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -15,59 +15,18 @@ """Test PyMongo query and read preference with a sharded cluster.""" from bson import SON -from pymongo import MongoClient, version_tuple +from pymongo import MongoClient from pymongo.read_preferences import (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) -from mockupdb import MockupDB, going, Command, OpMsg +from mockupdb import MockupDB, going, OpMsg import unittest class TestQueryAndReadModeSharded(unittest.TestCase): - def test_query_and_read_mode_sharded_op_query(self): - server = MockupDB() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=5) - server.run() - self.addCleanup(server.stop) - - client = MongoClient(server.uri) - self.addCleanup(client.close) - - modes_without_query = ( - Primary(), - SecondaryPreferred(),) - - modes_with_query = ( - PrimaryPreferred(), - Secondary(), - Nearest(), - SecondaryPreferred([{'tag': 'value'}]),) - - find_command = SON([('find', 'test'), ('filter', {'a': 1})]) - for query in ({'a': 1}, {'$query': {'a': 1}},): - for mode in modes_with_query + modes_without_query: - collection = client.db.get_collection('test', - read_preference=mode) - cursor = collection.find(query.copy()) - with going(next, cursor): - request = server.receives() - if mode in modes_without_query: - # Filter is hoisted out of $query. - request.assert_matches(Command(find_command)) - self.assertFalse('$readPreference' in request) - else: - # Command is nested in $query. - request.assert_matches(Command( - SON([('$query', find_command), - ('$readPreference', mode.document)]))) - - request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) - - @unittest.skipUnless(version_tuple >= (3, 7), "requires PyMongo 3.7") def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 27b55f3180..86c2085e39 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -18,7 +18,7 @@ from mockupdb import MockupDB, going, wait_until from pymongo.server_type import SERVER_TYPE from pymongo.errors import ConnectionFailure -from pymongo import MongoClient, version_tuple +from pymongo import MongoClient import unittest from operations import operations @@ -44,8 +44,7 @@ def responder(request): kwargs = {'socketTimeoutMS': 100} # Disable retryable reads when pymongo supports it. - if version_tuple[:3] >= (3, 9): - kwargs['retryReads'] = False + kwargs['retryReads'] = False self.client = MongoClient(self.server.uri, **kwargs) wait_until(lambda: self.client.nodes, 'connect to standalone') @@ -103,8 +102,9 @@ def _test_not_master(self, operation): with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): - self.server.receives().replies(operation.not_master) + request = self.server.receives() before = self.ismaster_time + request.replies(operation.not_master) time.sleep(1) # Server is rediscovered. @@ -139,6 +139,7 @@ def generate_reset_tests(): test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) + generate_reset_tests() if __name__ == '__main__': diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 4a0725a869..83c0f925a4 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -67,11 +67,7 @@ def test(self): request = self.server.receive() request.reply(operation.reply) - self.assertEqual(topology_type_name(client), 'Single') - if slave_ok: - self.assertTrue(request.slave_ok, 'SlaveOkay not set') - else: - self.assertFalse(request.slave_ok, 'SlaveOkay set') + self.assertIn(topology_type_name(client), ['Sharded', 'Single']) return test diff --git a/test/mockupdb/test_starting_from_overflow.py b/test/mockupdb/test_starting_from_overflow.py deleted file mode 100644 index d94cab0ff3..0000000000 --- a/test/mockupdb/test_starting_from_overflow.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test that PyMongo ignores the startingFrom field, PYTHON-945.""" - -from mockupdb import going, MockupDB, OpGetMore, OpQuery, Command -from pymongo import MongoClient - -import unittest - - -class TestStartingFromOverflow(unittest.TestCase): - def test_query(self): - server = MockupDB(auto_ismaster=True, - min_wire_version=0, max_wire_version=3) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - cursor = client.test.collection.find() - with going(list, cursor) as docs: - request = server.receives(OpQuery) - request.reply({'a': 1}, cursor_id=123, starting_from=-7) - request = server.receives(OpGetMore, cursor_id=123) - request.reply({'a': 2}, starting_from=-3, cursor_id=0) - - self.assertEqual([{'a': 1}, {'a': 2}], docs()) - - def test_aggregate(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 3}) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - with going(client.test.collection.aggregate, []) as cursor: - request = server.receives(Command) - request.reply({'cursor': { - 'id': 123, - 'firstBatch': [{'a': 1}]}}) - - with going(list, cursor()) as docs: - request = server.receives(OpGetMore, cursor_id=123) - request.reply({'a': 2}, starting_from=-3, cursor_id=0) - - self.assertEqual([{'a': 1}, {'a': 2}], docs()) - - def test_find_command(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 4}) - server.run() - self.addCleanup(server.stop) - client = MongoClient(server.uri) - with going(list, client.test.collection.find()) as docs: - server.receives(Command).reply({'cursor': { - 'id': 123, - 'firstBatch': [{'a': 1}]}}) - - request = server.receives(Command("getMore", 123)) - request.reply({'cursor': { - 'id': 0, - 'nextBatch': [{'a': 2}]}}, - starting_from=-3) - - self.assertEqual([{'a': 1}, {'a': 2}], docs()) - - -if __name__ == '__main__': - unittest.main() From 5b8b09ac4f925d0cbaa9532aba8b66a572ccbd03 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 17 Nov 2021 18:18:41 -0800 Subject: [PATCH 0529/2111] PYTHON-3020 Properly mark server unknown after "not master" errors without a code (#797) Fix prefer-error-code SDAM test. --- .evergreen/run-mockupdb-tests.sh | 2 +- pymongo/topology.py | 4 +++- .../errors/prefer-error-code.json | 4 ++-- test/mockupdb/operations.py | 18 +++++++----------- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh index a0b67302a4..a76ed6316f 100755 --- a/.evergreen/run-mockupdb-tests.sh +++ b/.evergreen/run-mockupdb-tests.sh @@ -8,7 +8,7 @@ set -o errexit ${PYTHON_BINARY} setup.py clean createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivate, rm -rf mockuptests" EXIT HUP +trap "deactivate; rm -rf mockuptests" EXIT HUP # Install PyMongo from git clone so mockup-tests don't # download it from pypi. diff --git a/pymongo/topology.py b/pymongo/topology.py index 9139c1492e..6f26cff617 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -633,7 +633,9 @@ def _handle_error(self, address, err_ctx): if hasattr(error, 'code'): err_code = error.code else: - err_code = error.details.get('code', -1) + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get('code', default) if err_code in helpers._NOT_PRIMARY_CODES: is_shutting_down = err_code in helpers._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. diff --git a/test/discovery_and_monitoring/errors/prefer-error-code.json b/test/discovery_and_monitoring/errors/prefer-error-code.json index 21d123f429..eb00b69613 100644 --- a/test/discovery_and_monitoring/errors/prefer-error-code.json +++ b/test/discovery_and_monitoring/errors/prefer-error-code.json @@ -52,7 +52,7 @@ } }, { - "description": "errmsg \"not writable primary\" gets ignored when error code exists", + "description": "errmsg \"not master\" gets ignored when error code exists", "applicationErrors": [ { "address": "a:27017", @@ -61,7 +61,7 @@ "type": "command", "response": { "ok": 0, - "errmsg": "not writable primary", + "errmsg": "not master", "code": 1 } } diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 9fb0ca16b6..47890f80ee 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -52,11 +52,7 @@ sharded cluster (PYTHON-868). """ -not_master_reply_to_query = OpMsgReply( - {'$err': 'not master'}, - flags=REPLY_FLAGS['QueryFailure']) - -not_master_reply_to_command = OpMsgReply(ok=0, errmsg='not master') +not_master_reply = OpMsgReply(ok=0, errmsg='not master') operations = [ Operation( @@ -64,31 +60,31 @@ lambda client: client.db.collection.find_one(), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='may-use-secondary', - not_master=not_master_reply_to_query), + not_master=not_master_reply), Operation( 'count', lambda client: client.db.collection.count_documents({}), reply={'n': 1}, op_type='may-use-secondary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'aggregate', lambda client: client.db.collection.aggregate([]), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='may-use-secondary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'options', lambda client: client.db.collection.options(), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='must-use-primary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'command', lambda client: client.db.command('foo'), reply={'ok': 1}, op_type='must-use-primary', # Ignores client's read preference. - not_master=not_master_reply_to_command), + not_master=not_master_reply), Operation( 'secondary command', lambda client: @@ -101,7 +97,7 @@ lambda client: client.db.collection.index_information(), reply={'cursor': {'id': 0, 'firstBatch': []}}, op_type='must-use-primary', - not_master=not_master_reply_to_command), + not_master=not_master_reply), ] From 79659063c592726a36c7a1770b5c9b8ae78d7b04 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Nov 2021 16:28:02 -0800 Subject: [PATCH 0530/2111] PYTHON-3021 Send primaryPreferred when connected to standalone servers (#799) --- pymongo/mongo_client.py | 2 +- test/mockupdb/test_op_msg_read_preference.py | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 90e6a7706f..dd9dd6d33a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1170,8 +1170,8 @@ def _socket_for_reads(self, read_preference, session): # for topology type Single." # Thread safe: if the type is single it cannot change. topology = self._get_topology() - single = topology.description.topology_type == TOPOLOGY_TYPE.Single server = self._select_server(read_preference, session) + single = topology.description.topology_type == TOPOLOGY_TYPE.Single with self._get_socket(server, session) as sock_info: secondary_ok = (single and not sock_info.is_mongos) or ( diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index ba359a5e05..6ecc229ea1 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -158,15 +158,12 @@ def test(self): else: self.fail('unrecognized op_type %r' % operation.op_type) # For single mongod we send primaryPreferred instead of primary. - if (expected_pref == ReadPreference.PRIMARY and self.single_mongod - and operation.name != "command"): + if expected_pref == ReadPreference.PRIMARY and self.single_mongod: expected_pref = ReadPreference.PRIMARY_PREFERRED - with going(operation.function, client) as future: + with going(operation.function, client): request = expected_server.receive() request.reply(operation.reply) - future() # No error. - self.assertEqual(expected_pref.document, request.doc.get('$readPreference')) self.assertNotIn('$query', request.doc) From cddae7ae938b3d6493435778223fb03240a6ccaf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Nov 2021 16:28:42 -0800 Subject: [PATCH 0531/2111] PYTHON-2919 Remove MongoDB 2.6-3.4 from performance testing (#798) --- .evergreen/perf.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 3079eb9b0e..70e83ff582 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -200,28 +200,6 @@ post: - func: "cleanup" tasks: - - name: "perf-3.0-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.0" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-3.4-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.4" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - name: "perf-3.6-standalone" tags: ["perf"] commands: @@ -273,8 +251,6 @@ buildvariants: batchtime: 10080 # 7 days run_on: centos6-perf tasks: - - name: "perf-3.0-standalone" - - name: "perf-3.4-standalone" - name: "perf-3.6-standalone" - name: "perf-4.0-standalone" - name: "perf-4.2-standalone" From 2af521ec03eab88ad5c9ebef0f73025c00489604 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 19 Nov 2021 12:15:23 -0800 Subject: [PATCH 0532/2111] PYTHON-2984 Fix retry behavior for bulk write writeConcernError (#800) --- pymongo/bulk.py | 8 +- pymongo/helpers.py | 21 +- .../{ => legacy}/bulkWrite-errorLabels.json | 0 .../{ => legacy}/bulkWrite-serverErrors.json | 8 +- .../{ => legacy}/bulkWrite.json | 0 .../{ => legacy}/deleteMany.json | 0 .../{ => legacy}/deleteOne-errorLabels.json | 0 .../{ => legacy}/deleteOne-serverErrors.json | 8 +- .../{ => legacy}/deleteOne.json | 0 .../findOneAndDelete-errorLabels.json | 0 .../findOneAndDelete-serverErrors.json | 8 +- .../{ => legacy}/findOneAndDelete.json | 0 .../findOneAndReplace-errorLabels.json | 0 .../findOneAndReplace-serverErrors.json | 8 +- .../{ => legacy}/findOneAndReplace.json | 0 .../findOneAndUpdate-errorLabels.json | 0 .../findOneAndUpdate-serverErrors.json | 8 +- .../{ => legacy}/findOneAndUpdate.json | 0 .../{ => legacy}/insertMany-errorLabels.json | 0 .../{ => legacy}/insertMany-serverErrors.json | 8 +- .../{ => legacy}/insertMany.json | 0 .../{ => legacy}/insertOne-errorLabels.json | 0 .../{ => legacy}/insertOne-serverErrors.json | 40 ++-- .../{ => legacy}/insertOne.json | 0 .../{ => legacy}/replaceOne-errorLabels.json | 0 .../{ => legacy}/replaceOne-serverErrors.json | 8 +- .../{ => legacy}/replaceOne.json | 0 .../{ => legacy}/updateMany.json | 0 .../{ => legacy}/updateOne-errorLabels.json | 0 .../{ => legacy}/updateOne-serverErrors.json | 8 +- .../{ => legacy}/updateOne.json | 0 .../unified/bulkWrite-serverErrors.json | 205 ++++++++++++++++++ .../unified/insertOne-serverErrors.json | 173 +++++++++++++++ test/test_retryable_writes.py | 2 +- test/test_retryable_writes_unified.py | 33 +++ test/transactions/legacy/error-labels.json | 8 +- .../legacy/mongos-recovery-token.json | 8 +- test/transactions/legacy/retryable-abort.json | 24 +- .../transactions/legacy/retryable-commit.json | 24 +- 39 files changed, 515 insertions(+), 95 deletions(-) rename test/retryable_writes/{ => legacy}/bulkWrite-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/bulkWrite-serverErrors.json (97%) rename test/retryable_writes/{ => legacy}/bulkWrite.json (100%) rename test/retryable_writes/{ => legacy}/deleteMany.json (100%) rename test/retryable_writes/{ => legacy}/deleteOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/deleteOne-serverErrors.json (95%) rename test/retryable_writes/{ => legacy}/deleteOne.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndDelete-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndDelete-serverErrors.json (95%) rename test/retryable_writes/{ => legacy}/findOneAndDelete.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndReplace-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndReplace-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/findOneAndReplace.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndUpdate-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/findOneAndUpdate-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/findOneAndUpdate.json (100%) rename test/retryable_writes/{ => legacy}/insertMany-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/insertMany-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/insertMany.json (100%) rename test/retryable_writes/{ => legacy}/insertOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/insertOne-serverErrors.json (97%) rename test/retryable_writes/{ => legacy}/insertOne.json (100%) rename test/retryable_writes/{ => legacy}/replaceOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/replaceOne-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/replaceOne.json (100%) rename test/retryable_writes/{ => legacy}/updateMany.json (100%) rename test/retryable_writes/{ => legacy}/updateOne-errorLabels.json (100%) rename test/retryable_writes/{ => legacy}/updateOne-serverErrors.json (96%) rename test/retryable_writes/{ => legacy}/updateOne.json (100%) create mode 100644 test/retryable_writes/unified/bulkWrite-serverErrors.json create mode 100644 test/retryable_writes/unified/insertOne-serverErrors.json create mode 100644 test/test_retryable_writes_unified.py diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 829b482c95..1bb8edf943 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -28,7 +28,7 @@ validate_is_document_type, validate_ok_for_replace, validate_ok_for_update) -from pymongo.helpers import _RETRYABLE_ERROR_CODES +from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc from pymongo.collation import validate_collation_or_none from pymongo.errors import (BulkWriteError, ConfigurationError, @@ -119,9 +119,9 @@ def _merge_command(run, full_result, offset, result): replacement["op"] = run.ops[idx] full_result["writeErrors"].append(replacement) - wc_error = result.get("writeConcernError") - if wc_error: - full_result["writeConcernErrors"].append(wc_error) + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) def _raise_bulk_write_error(full_result): diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 55d53d836e..a9d40d8103 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -188,6 +188,18 @@ def _raise_write_concern_error(error): error.get("errmsg"), error.get("code"), error) +def _get_wce_doc(result): + """Return the writeConcernError or None.""" + wce = result.get("writeConcernError") + if wce: + # The server reports errorLabels at the top level but it's more + # convenient to attach it to the writeConcernError doc itself. + error_labels = result.get("errorLabels") + if error_labels: + wce["errorLabels"] = error_labels + return wce + + def _check_write_command_response(result): """Backward compatibility helper for write command error handling. """ @@ -196,12 +208,9 @@ def _check_write_command_response(result): if write_errors: _raise_last_write_error(write_errors) - error = result.get("writeConcernError") - if error: - error_labels = result.get("errorLabels") - if error_labels: - error.update({'errorLabels': error_labels}) - _raise_write_concern_error(error) + wce = _get_wce_doc(result) + if wce: + _raise_write_concern_error(wce) def _fields_list_to_dict(fields, option_name): diff --git a/test/retryable_writes/bulkWrite-errorLabels.json b/test/retryable_writes/legacy/bulkWrite-errorLabels.json similarity index 100% rename from test/retryable_writes/bulkWrite-errorLabels.json rename to test/retryable_writes/legacy/bulkWrite-errorLabels.json diff --git a/test/retryable_writes/bulkWrite-serverErrors.json b/test/retryable_writes/legacy/bulkWrite-serverErrors.json similarity index 97% rename from test/retryable_writes/bulkWrite-serverErrors.json rename to test/retryable_writes/legacy/bulkWrite-serverErrors.json index 9d792ceafb..1e6cc74c05 100644 --- a/test/retryable_writes/bulkWrite-serverErrors.json +++ b/test/retryable_writes/legacy/bulkWrite-serverErrors.json @@ -119,12 +119,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/bulkWrite.json b/test/retryable_writes/legacy/bulkWrite.json similarity index 100% rename from test/retryable_writes/bulkWrite.json rename to test/retryable_writes/legacy/bulkWrite.json diff --git a/test/retryable_writes/deleteMany.json b/test/retryable_writes/legacy/deleteMany.json similarity index 100% rename from test/retryable_writes/deleteMany.json rename to test/retryable_writes/legacy/deleteMany.json diff --git a/test/retryable_writes/deleteOne-errorLabels.json b/test/retryable_writes/legacy/deleteOne-errorLabels.json similarity index 100% rename from test/retryable_writes/deleteOne-errorLabels.json rename to test/retryable_writes/legacy/deleteOne-errorLabels.json diff --git a/test/retryable_writes/deleteOne-serverErrors.json b/test/retryable_writes/legacy/deleteOne-serverErrors.json similarity index 95% rename from test/retryable_writes/deleteOne-serverErrors.json rename to test/retryable_writes/legacy/deleteOne-serverErrors.json index 4eab2fa296..a1a27838de 100644 --- a/test/retryable_writes/deleteOne-serverErrors.json +++ b/test/retryable_writes/legacy/deleteOne-serverErrors.json @@ -75,12 +75,12 @@ "failCommands": [ "delete" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/deleteOne.json b/test/retryable_writes/legacy/deleteOne.json similarity index 100% rename from test/retryable_writes/deleteOne.json rename to test/retryable_writes/legacy/deleteOne.json diff --git a/test/retryable_writes/findOneAndDelete-errorLabels.json b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json similarity index 100% rename from test/retryable_writes/findOneAndDelete-errorLabels.json rename to test/retryable_writes/legacy/findOneAndDelete-errorLabels.json diff --git a/test/retryable_writes/findOneAndDelete-serverErrors.json b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json similarity index 95% rename from test/retryable_writes/findOneAndDelete-serverErrors.json rename to test/retryable_writes/legacy/findOneAndDelete-serverErrors.json index 4c10861614..c18b63f456 100644 --- a/test/retryable_writes/findOneAndDelete-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json @@ -81,12 +81,12 @@ "failCommands": [ "findAndModify" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/findOneAndDelete.json b/test/retryable_writes/legacy/findOneAndDelete.json similarity index 100% rename from test/retryable_writes/findOneAndDelete.json rename to test/retryable_writes/legacy/findOneAndDelete.json diff --git a/test/retryable_writes/findOneAndReplace-errorLabels.json b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json similarity index 100% rename from test/retryable_writes/findOneAndReplace-errorLabels.json rename to test/retryable_writes/legacy/findOneAndReplace-errorLabels.json diff --git a/test/retryable_writes/findOneAndReplace-serverErrors.json b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json similarity index 96% rename from test/retryable_writes/findOneAndReplace-serverErrors.json rename to test/retryable_writes/legacy/findOneAndReplace-serverErrors.json index 64c69e2f6d..944a3af848 100644 --- a/test/retryable_writes/findOneAndReplace-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json @@ -85,12 +85,12 @@ "failCommands": [ "findAndModify" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/findOneAndReplace.json b/test/retryable_writes/legacy/findOneAndReplace.json similarity index 100% rename from test/retryable_writes/findOneAndReplace.json rename to test/retryable_writes/legacy/findOneAndReplace.json diff --git a/test/retryable_writes/findOneAndUpdate-errorLabels.json b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json similarity index 100% rename from test/retryable_writes/findOneAndUpdate-errorLabels.json rename to test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json diff --git a/test/retryable_writes/findOneAndUpdate-serverErrors.json b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json similarity index 96% rename from test/retryable_writes/findOneAndUpdate-serverErrors.json rename to test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json index 9f54604992..e83a610615 100644 --- a/test/retryable_writes/findOneAndUpdate-serverErrors.json +++ b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json @@ -86,12 +86,12 @@ "failCommands": [ "findAndModify" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/findOneAndUpdate.json b/test/retryable_writes/legacy/findOneAndUpdate.json similarity index 100% rename from test/retryable_writes/findOneAndUpdate.json rename to test/retryable_writes/legacy/findOneAndUpdate.json diff --git a/test/retryable_writes/insertMany-errorLabels.json b/test/retryable_writes/legacy/insertMany-errorLabels.json similarity index 100% rename from test/retryable_writes/insertMany-errorLabels.json rename to test/retryable_writes/legacy/insertMany-errorLabels.json diff --git a/test/retryable_writes/insertMany-serverErrors.json b/test/retryable_writes/legacy/insertMany-serverErrors.json similarity index 96% rename from test/retryable_writes/insertMany-serverErrors.json rename to test/retryable_writes/legacy/insertMany-serverErrors.json index 7b45b506c9..fe8dbf4a62 100644 --- a/test/retryable_writes/insertMany-serverErrors.json +++ b/test/retryable_writes/legacy/insertMany-serverErrors.json @@ -92,12 +92,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/insertMany.json b/test/retryable_writes/legacy/insertMany.json similarity index 100% rename from test/retryable_writes/insertMany.json rename to test/retryable_writes/legacy/insertMany.json diff --git a/test/retryable_writes/insertOne-errorLabels.json b/test/retryable_writes/legacy/insertOne-errorLabels.json similarity index 100% rename from test/retryable_writes/insertOne-errorLabels.json rename to test/retryable_writes/legacy/insertOne-errorLabels.json diff --git a/test/retryable_writes/insertOne-serverErrors.json b/test/retryable_writes/legacy/insertOne-serverErrors.json similarity index 97% rename from test/retryable_writes/insertOne-serverErrors.json rename to test/retryable_writes/legacy/insertOne-serverErrors.json index e8571f8cf9..5179a6ab75 100644 --- a/test/retryable_writes/insertOne-serverErrors.json +++ b/test/retryable_writes/legacy/insertOne-serverErrors.json @@ -761,12 +761,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -812,12 +812,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -863,12 +863,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -914,12 +914,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, @@ -965,12 +965,12 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/insertOne.json b/test/retryable_writes/legacy/insertOne.json similarity index 100% rename from test/retryable_writes/insertOne.json rename to test/retryable_writes/legacy/insertOne.json diff --git a/test/retryable_writes/replaceOne-errorLabels.json b/test/retryable_writes/legacy/replaceOne-errorLabels.json similarity index 100% rename from test/retryable_writes/replaceOne-errorLabels.json rename to test/retryable_writes/legacy/replaceOne-errorLabels.json diff --git a/test/retryable_writes/replaceOne-serverErrors.json b/test/retryable_writes/legacy/replaceOne-serverErrors.json similarity index 96% rename from test/retryable_writes/replaceOne-serverErrors.json rename to test/retryable_writes/legacy/replaceOne-serverErrors.json index 7457228cd7..6b35722e12 100644 --- a/test/retryable_writes/replaceOne-serverErrors.json +++ b/test/retryable_writes/legacy/replaceOne-serverErrors.json @@ -85,12 +85,12 @@ "failCommands": [ "update" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/replaceOne.json b/test/retryable_writes/legacy/replaceOne.json similarity index 100% rename from test/retryable_writes/replaceOne.json rename to test/retryable_writes/legacy/replaceOne.json diff --git a/test/retryable_writes/updateMany.json b/test/retryable_writes/legacy/updateMany.json similarity index 100% rename from test/retryable_writes/updateMany.json rename to test/retryable_writes/legacy/updateMany.json diff --git a/test/retryable_writes/updateOne-errorLabels.json b/test/retryable_writes/legacy/updateOne-errorLabels.json similarity index 100% rename from test/retryable_writes/updateOne-errorLabels.json rename to test/retryable_writes/legacy/updateOne-errorLabels.json diff --git a/test/retryable_writes/updateOne-serverErrors.json b/test/retryable_writes/legacy/updateOne-serverErrors.json similarity index 96% rename from test/retryable_writes/updateOne-serverErrors.json rename to test/retryable_writes/legacy/updateOne-serverErrors.json index 1160198019..cf274f57e0 100644 --- a/test/retryable_writes/updateOne-serverErrors.json +++ b/test/retryable_writes/legacy/updateOne-serverErrors.json @@ -86,12 +86,12 @@ "failCommands": [ "update" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/retryable_writes/updateOne.json b/test/retryable_writes/legacy/updateOne.json similarity index 100% rename from test/retryable_writes/updateOne.json rename to test/retryable_writes/legacy/updateOne.json diff --git a/test/retryable_writes/unified/bulkWrite-serverErrors.json b/test/retryable_writes/unified/bulkWrite-serverErrors.json new file mode 100644 index 0000000000..23cf2869a6 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite-serverErrors.json @@ -0,0 +1,205 @@ +{ + "description": "retryable-writes bulkWrite serverErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite succeeds after retryable writeConcernError in first batch", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3 + } + }, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": { + "_id": 2 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json new file mode 100644 index 0000000000..77245a8197 --- /dev/null +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -0,0 +1,173 @@ +{ + "description": "retryable-writes insertOne serverErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne succeeds after retryable writeConcernError", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index ffc93eb2fa..f3f09095d7 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -60,7 +60,7 @@ # Location of JSON test specifications. _TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes') + os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'legacy') class TestAllScenarios(SpecRunner): diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py new file mode 100644 index 0000000000..4e851de273 --- /dev/null +++ b/test/test_retryable_writes_unified.py @@ -0,0 +1,33 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'unified') + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/transactions/legacy/error-labels.json b/test/transactions/legacy/error-labels.json index a57f216b9b..0be19c731c 100644 --- a/test/transactions/legacy/error-labels.json +++ b/test/transactions/legacy/error-labels.json @@ -963,12 +963,12 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } }, diff --git a/test/transactions/legacy/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json index 02c2002f75..da4e9861d1 100644 --- a/test/transactions/legacy/mongos-recovery-token.json +++ b/test/transactions/legacy/mongos-recovery-token.json @@ -180,12 +180,12 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errmsg": "Replication is being shut down", - "errorLabels": [ - "RetryableWriteError" - ] + "errmsg": "Replication is being shut down" } } } diff --git a/test/transactions/legacy/retryable-abort.json b/test/transactions/legacy/retryable-abort.json index b712e80862..13cc7c88fb 100644 --- a/test/transactions/legacy/retryable-abort.json +++ b/test/transactions/legacy/retryable-abort.json @@ -1556,11 +1556,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1673,11 +1673,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1790,11 +1790,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1907,11 +1907,11 @@ "failCommands": [ "abortTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } diff --git a/test/transactions/legacy/retryable-commit.json b/test/transactions/legacy/retryable-commit.json index d83a1d9f52..49148c62d2 100644 --- a/test/transactions/legacy/retryable-commit.json +++ b/test/transactions/legacy/retryable-commit.json @@ -1855,11 +1855,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11600, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -1977,11 +1977,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 11602, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -2099,11 +2099,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 189, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } @@ -2221,11 +2221,11 @@ "failCommands": [ "commitTransaction" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, - "errorLabels": [ - "RetryableWriteError" - ], "errmsg": "Replication is being shut down" } } From 64a4f6e14167e35f4d8305a6c541395062a3cf6a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 23 Nov 2021 15:45:49 -0800 Subject: [PATCH 0533/2111] PYTHON-3024 Update estimatedDocumentCount test for Atlas Data Lake (#802) Migrate data lake testing to ubuntu 18. Ensure mongohouse downloads the right build via VARIANT. --- .evergreen/config.yml | 6 ++++-- test/data_lake/estimatedDocumentCount.json | 21 +++++++++++++++++++-- test/data_lake/getMore.json | 2 +- test/data_lake/listCollections.json | 2 +- test/data_lake/listDatabases.json | 2 +- test/data_lake/runCommand.json | 2 +- 6 files changed, 27 insertions(+), 8 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d4c95105bf..c785d8f14b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -310,7 +310,9 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh + # The mongohouse build script needs to be passed the VARIANT variable, see + # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 + VARIANT=ubuntu1804 bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec type: setup params: @@ -2439,7 +2441,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: - platform: ubuntu-16.04 + platform: ubuntu-18.04 python-version: ["3.6", "3.9"] auth: "auth" c-extensions: "*" diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json index d039a51f06..87b385208d 100644 --- a/test/data_lake/estimatedDocumentCount.json +++ b/test/data_lake/estimatedDocumentCount.json @@ -15,8 +15,25 @@ { "command_started_event": { "command": { - "count": "driverdata" - } + "aggregate": "driverdata", + "pipeline": [ + { + "$collStats": { + "count": {} + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": "$count" + } + } + } + ] + }, + "command_name": "aggregate", + "database_name": "test" } } ] diff --git a/test/data_lake/getMore.json b/test/data_lake/getMore.json index fa1deab4f3..e2e1d4788a 100644 --- a/test/data_lake/getMore.json +++ b/test/data_lake/getMore.json @@ -54,4 +54,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/data_lake/listCollections.json b/test/data_lake/listCollections.json index 8d8a8f6c1b..e419f7b3e9 100644 --- a/test/data_lake/listCollections.json +++ b/test/data_lake/listCollections.json @@ -22,4 +22,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/data_lake/listDatabases.json b/test/data_lake/listDatabases.json index f8ec9a0bf4..6458148e49 100644 --- a/test/data_lake/listDatabases.json +++ b/test/data_lake/listDatabases.json @@ -21,4 +21,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/data_lake/runCommand.json b/test/data_lake/runCommand.json index f72e863ba5..d81ff1a64b 100644 --- a/test/data_lake/runCommand.json +++ b/test/data_lake/runCommand.json @@ -28,4 +28,4 @@ ] } ] -} \ No newline at end of file +} From 37b5195eef2614215b865f20bfdf5dbd4133290d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 23 Nov 2021 16:15:52 -0800 Subject: [PATCH 0534/2111] PYTHON-2434 Automatically combine release wheels + sdist into one archive (#803) --- .evergreen/config.yml | 119 ++++++++++++++++++++++++++++++++++-------- RELEASE.rst | 37 +++++-------- 2 files changed, 109 insertions(+), 47 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c785d8f14b..93b37d504d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -870,6 +870,16 @@ functions: # Remove all Docker images docker rmi -f $(docker images -a -q) &> /dev/null || true + "build release": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + set -o xtrace + ${PREPARE_SHELL} + .evergreen/release.sh + "upload release": - command: archive.targz_pack params: @@ -882,12 +892,63 @@ functions: aws_key: ${aws_key} aws_secret: ${aws_secret} local_file: release-files.tgz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/release/${task_id}-${execution}-release-files.tar.gz + remote_file: ${UPLOAD_BUCKET}/release/${revision}/${task_id}-${execution}-release-files.tar.gz bucket: mciuploads permissions: public-read content_type: ${content_type|application/gzip} display_name: Release files + "download and merge releases": + - command: shell.exec + params: + silent: true + script: | + export AWS_ACCESS_KEY_ID=${aws_key} + export AWS_SECRET_ACCESS_KEY=${aws_secret} + + # Download all the task coverage files. + aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/release/${revision}/ release/ + - command: shell.exec + params: + shell: "bash" + script: | + set -o xtrace + ${PREPARE_SHELL} + # Combine releases into one directory. + ls -la release/ + mkdir releases + # Copy old manylinux release first since we want the newer manylinux + # wheels to override them. + mkdir old_manylinux + if mv release/*old_manylinux* old_manylinux; then + for REL in old_manylinux/*; do + tar zxvf $REL -C releases/ + done + fi + for REL in release/*; do + tar zxvf $REL -C releases/ + done + # Build source distribution. + cd src/ + /opt/python/3.6/bin/python3 setup.py sdist + cp dist/* ../releases + - command: archive.targz_pack + params: + target: "release-files-all.tgz" + source_dir: "releases/" + include: + - "*" + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: release-files-all.tgz + remote_file: ${UPLOAD_BUCKET}/release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/gzip} + display_name: Release files all + pre: - func: "fetch source" - func: "prepare resources" @@ -972,22 +1033,31 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release" - tags: ["release"] + - name: "release-mac" + tags: ["release_tag"] + run_on: macos-1014 + commands: + - func: "build release" + - func: "upload release" + + - name: "release-windows" + tags: ["release_tag"] + run_on: windows-64-vsMulti-small + commands: + - func: "build release" + - func: "upload release" + + - name: "release-manylinux" + tags: ["release_tag"] + run_on: ubuntu2004-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - .evergreen/release.sh + - func: "build release" - func: "upload release" - name: "release-old-manylinux" - tags: ["release"] + tags: ["release_tag"] + run_on: ubuntu2004-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - command: shell.exec @@ -1000,6 +1070,16 @@ tasks: .evergreen/build-manylinux.sh BUILD_WITH_TAG - func: "upload release" + - name: "release-combine" + tags: ["release_tag"] + run_on: ubuntu2004-small + depends_on: + - name: "*" + variant: ".release_tag" + patch_optional: true + commands: + - func: "download and merge releases" + # Standard test tasks {{{ - name: "mockupdb" @@ -2530,19 +2610,12 @@ buildvariants: tasks: - name: "load-balancer-test" -- matrix_name: "Release" - matrix_spec: - platform: [ubuntu-20.04, windows-64-vsMulti-small, macos-1014] - display_name: "Release ${platform}" +- name: Release + display_name: Release batchtime: 20160 # 14 days + tags: ["release_tag"] tasks: - - name: "release" - rules: - - if: - platform: ubuntu-20.04 - then: - add_tasks: - - name: "release-old-manylinux" + - ".release_tag" # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available diff --git a/RELEASE.rst b/RELEASE.rst index 220908084b..84b60d9b6a 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -55,48 +55,37 @@ Doing a Release 8. Push commit / tag, eg ``git push && git push --tags``. 9. Pushing a tag will trigger a release process in Evergreen which builds - wheels and eggs for manylinux, macOS, and Windows. Wait for these jobs to - complete and then download the "Release files" archive from each task. See: + wheels for manylinux, macOS, and Windows. Wait for the "release-combine" + task to complete and then download the "Release files all" archive. See: https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release - Unpack each downloaded archive so that we can upload the included files. For - the next steps let's assume we unpacked these files into the following paths:: + The contents should look like this:: - $ ls path/to/manylinux + $ ls path/to/archive + pymongo--cp310-cp310-macosx_10_9_universal2.whl + ... pymongo--cp38-cp38-manylinux2014_x86_64.whl ... - $ ls path/to/windows/ pymongo--cp38-cp38-win_amd64.whl ... - -10. Build the source distribution:: - - $ git clone git@github.com:mongodb/mongo-python-driver.git - $ cd mongo-python-driver - $ git checkout "" - $ python3 setup.py sdist - - This will create the following distribution:: - - $ ls dist pymongo-.tar.gz -11. Upload all the release packages to PyPI with twine:: +10. Upload all the release packages to PyPI with twine:: - $ python3 -m twine upload dist/*.tar.gz path/to/manylinux/* path/to/mac/* path/to/windows/* + $ python3 -m twine upload path/to/archive/* -12. Make sure the new version appears on https://pymongo.readthedocs.io/. If the +11. Make sure the new version appears on https://pymongo.readthedocs.io/. If the new version does not show up automatically, trigger a rebuild of "latest": https://readthedocs.org/projects/pymongo/builds/ -13. Bump the version number to .dev0 in setup.py/__init__.py, +12. Bump the version number to .dev0 in setup.py/__init__.py, commit, push. -14. Publish the release version in Jira. +13. Publish the release version in Jira. -15. Announce the release on: +14. Announce the release on: https://developer.mongodb.com/community/forums/c/community/release-notes/ -16. File a ticket for DOCSP highlighting changes in server version and Python +15. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: https://jira.mongodb.org/browse/DOCSP-13536 From 7de879a9fe8ba09e4b4762b19af220ceaf635434 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 23 Nov 2021 16:56:41 -0800 Subject: [PATCH 0535/2111] PYTHON-3018 Add docs for removed methods from Collection (#801) --- doc/migrate-to-pymongo4.rst | 2 ++ pymongo/collection.py | 7 +++++++ pymongo/database.py | 7 +++++++ pymongo/mongo_client.py | 16 ++++++++++------ 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5acd3a5d12..22071bd3bb 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -1,3 +1,5 @@ +.. _pymongo4-migration-guide: + PyMongo 4 Migration Guide ========================= diff --git a/pymongo/collection.py b/pymongo/collection.py index 8632204b81..774c290235 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -115,6 +115,13 @@ def __init__(self, database, name, create=False, codec_options=None, - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + .. versionchanged:: 3.6 Added ``session`` parameter. diff --git a/pymongo/database.py b/pymongo/database.py index c30d29bde4..dc8c13cbb0 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -69,6 +69,13 @@ def __init__(self, client, name, codec_options=None, read_preference=None, .. seealso:: The MongoDB documentation on `databases `_. + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + .. versionchanged:: 3.2 Added the read_concern option. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dd9dd6d33a..af159da521 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -504,12 +504,16 @@ def __init__( .. seealso:: The MongoDB documentation on `connections `_. .. versionchanged:: 4.0 - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` keyword - arguments. - The default for `uuidRepresentation` was changed from - ``pythonLegacy`` to ``unspecified``. - Added the ``srvServiceName`` and ``maxConnecting`` URI and keyword - argument. + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName`` and ``maxConnecting`` URI and + keyword argument. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. From 2c28149a301bb33b3347bf38f8dbfe085215ca38 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 29 Nov 2021 10:31:12 -0800 Subject: [PATCH 0536/2111] BUMP 4.0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 0ec31e9e7e..608ddb818e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -53,7 +53,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 0, '.dev0') +version_tuple = (4, 0) def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 63a1df4955..9c5fcae8a1 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.0.dev0" +version = "4.0" f = open("README.rst") try: From e3d1d6f5b48101654a05493fd6eec7fe3fa014bd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 29 Nov 2021 10:32:30 -0800 Subject: [PATCH 0537/2111] BUMP 4.0.1.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 608ddb818e..4ac46672e7 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -53,7 +53,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 0) +version_tuple = (4, 0, 1, '.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 9c5fcae8a1..158eb9a42b 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.0" +version = "4.0.1.dev0" f = open("README.rst") try: From 046d789d9f25f1d9984e443837a634fe166b38ca Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 30 Nov 2021 15:02:40 -0800 Subject: [PATCH 0538/2111] PYTHON-2957 Support 'let' option for multiple CRUD commands (#804) --- doc/changelog.rst | 16 ++ pymongo/aggregation.py | 5 +- pymongo/collection.py | 107 ++++++-- pymongo/cursor.py | 11 +- test/crud/unified/aggregate-let.json | 103 -------- test/crud/unified/deleteMany-let.json | 201 +++++++++++++++ test/crud/unified/deleteOne-let.json | 191 +++++++++++++++ test/crud/unified/find-let.json | 148 +++++++++++ test/crud/unified/findOneAndDelete-let.json | 180 ++++++++++++++ test/crud/unified/findOneAndReplace-let.json | 197 +++++++++++++++ test/crud/unified/findOneAndUpdate-let.json | 217 +++++++++++++++++ test/crud/unified/updateMany-let.json | 243 +++++++++++++++++++ test/crud/unified/updateOne-let.json | 215 ++++++++++++++++ test/test_collection.py | 17 ++ 14 files changed, 1721 insertions(+), 130 deletions(-) create mode 100644 test/crud/unified/deleteMany-let.json create mode 100644 test/crud/unified/deleteOne-let.json create mode 100644 test/crud/unified/find-let.json create mode 100644 test/crud/unified/findOneAndDelete-let.json create mode 100644 test/crud/unified/findOneAndReplace-let.json create mode 100644 test/crud/unified/findOneAndUpdate-let.json create mode 100644 test/crud/unified/updateMany-let.json create mode 100644 test/crud/unified/updateOne-let.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 88c1b7cd20..192b456619 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,22 @@ Changelog ========= +Changes in Version 4.1 +---------------------- + +- :meth:`pymongo.collection.Collection.update_one`, + :meth:`pymongo.collection.Collection.update_many`, + :meth:`pymongo.collection.Collection.delete_one`, + :meth:`pymongo.collection.Collection.delete_many`, + :meth:`pymongo.collection.Collection.aggregate`, + :meth:`pymongo.collection.Collection.find_one_and_delete`, + :meth:`pymongo.collection.Collection.find_one_and_replace`, + :meth:`pymongo.collection.Collection.find_one_and_update`, + and :meth:`pymongo.collection.Collection.find` all support a new keyword + argument ``let`` which is a map of parameter names and values. Parameters + can then be accessed as variables in an aggregate expression context. + + Changes in Version 4.0 ---------------------- diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 2a34a05d3a..4a565ee134 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -30,7 +30,7 @@ class _AggregationCommand(object): :meth:`pymongo.database.Database.aggregate` instead. """ def __init__(self, target, cursor_class, pipeline, options, - explicit_session, user_fields=None, result_processor=None): + explicit_session, let=None, user_fields=None, result_processor=None): if "explain" in options: raise ConfigurationError("The explain option is not supported. " "Use Database.command instead.") @@ -44,6 +44,9 @@ def __init__(self, target, cursor_class, pipeline, options, self._performs_write = True common.validate_is_mapping('options', options) + if let: + common.validate_is_mapping("let", let) + options["let"] = let self._options = options # This is the batchSize that will be used for setting the initial diff --git a/pymongo/collection.py b/pymongo/collection.py index 774c290235..393c26aa5c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -593,7 +593,7 @@ def _update(self, sock_info, criteria, document, upsert=False, check_keys=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None, retryable_write=False): + hint=None, session=None, retryable_write=False, let=None): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) collation = validate_collation_or_none(collation) @@ -626,6 +626,9 @@ def _update(self, sock_info, criteria, document, upsert=False, command = SON([('update', self.name), ('ordered', ordered), ('updates', [update_doc])]) + if let: + common.validate_is_mapping("let", let) + command["let"] = let if not write_concern.is_server_default: command['writeConcern'] = write_concern.document @@ -663,7 +666,7 @@ def _update_retryable( check_keys=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None): + hint=None, session=None, let=None): """Internal update / replace helper.""" def _update(session, sock_info, retryable_write): return self._update( @@ -672,7 +675,7 @@ def _update(session, sock_info, retryable_write): write_concern=write_concern, op_id=op_id, ordered=ordered, bypass_doc_val=bypass_doc_val, collation=collation, array_filters=array_filters, hint=hint, session=session, - retryable_write=retryable_write) + retryable_write=retryable_write, let=let) return self.__database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, @@ -759,7 +762,7 @@ def replace_one(self, filter, replacement, upsert=False, def update_one(self, filter, update, upsert=False, bypass_document_validation=False, collation=None, array_filters=None, hint=None, - session=None): + session=None, let=None): """Update a single document matching the filter. >>> for doc in db.test.find(): @@ -802,10 +805,16 @@ def update_one(self, filter, update, upsert=False, MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -830,12 +839,12 @@ def update_one(self, filter, update, upsert=False, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, - hint=hint, session=session), + hint=hint, session=session, let=let), write_concern.acknowledged) def update_many(self, filter, update, upsert=False, array_filters=None, bypass_document_validation=False, collation=None, - hint=None, session=None): + hint=None, session=None, let=None): """Update one or more documents that match the filter. >>> for doc in db.test.find(): @@ -878,10 +887,16 @@ def update_many(self, filter, update, upsert=False, array_filters=None, MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -906,7 +921,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, - hint=hint, session=session), + hint=hint, session=session, let=let), write_concern.acknowledged) def drop(self, session=None): @@ -938,7 +953,8 @@ def drop(self, session=None): def _delete( self, sock_info, criteria, multi, write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None, retryable_write=False): + collation=None, hint=None, session=None, retryable_write=False, + let=None): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern @@ -965,6 +981,10 @@ def _delete( if not write_concern.is_server_default: command['writeConcern'] = write_concern.document + if let: + common.validate_is_document_type("let", let) + command["let"] = let + # Delete command. result = sock_info.command( self.__database.name, @@ -980,20 +1000,21 @@ def _delete( def _delete_retryable( self, criteria, multi, write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None): + collation=None, hint=None, session=None, let=None): """Internal delete helper.""" def _delete(session, sock_info, retryable_write): return self._delete( sock_info, criteria, multi, write_concern=write_concern, op_id=op_id, ordered=ordered, collation=collation, hint=hint, session=session, - retryable_write=retryable_write) + retryable_write=retryable_write, let=let) return self.__database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, _delete, session) - def delete_one(self, filter, collation=None, hint=None, session=None): + def delete_one(self, filter, collation=None, hint=None, session=None, + let=None): """Delete a single document matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1017,10 +1038,16 @@ def delete_one(self, filter, collation=None, hint=None, session=None): MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1034,10 +1061,11 @@ def delete_one(self, filter, collation=None, hint=None, session=None): self._delete_retryable( filter, False, write_concern=write_concern, - collation=collation, hint=hint, session=session), + collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def delete_many(self, filter, collation=None, hint=None, session=None): + def delete_many(self, filter, collation=None, hint=None, session=None, + let=None): """Delete one or more documents matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1061,10 +1089,16 @@ def delete_many(self, filter, collation=None, hint=None, session=None): MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1078,7 +1112,7 @@ def delete_many(self, filter, collation=None, hint=None, session=None): self._delete_retryable( filter, True, write_concern=write_concern, - collation=collation, hint=hint, session=session), + collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) def find_one(self, filter=None, *args, **kwargs): @@ -1889,15 +1923,16 @@ def options(self, session=None): return options def _aggregate(self, aggregation_command, pipeline, cursor_class, session, - explicit_session, **kwargs): + explicit_session, let=None, **kwargs): cmd = aggregation_command( - self, cursor_class, pipeline, kwargs, explicit_session, + self, cursor_class, pipeline, kwargs, explicit_session, let, user_fields={'cursor': {'firstBatch': 1}}) + return self.__database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), session, retryable=not cmd._performs_write) - def aggregate(self, pipeline, session=None, **kwargs): + def aggregate(self, pipeline, session=None, let=None, **kwargs): """Perform an aggregation using the aggregation framework on this collection. @@ -1944,6 +1979,8 @@ def aggregate(self, pipeline, session=None, **kwargs): A :class:`~pymongo.command_cursor.CommandCursor` over the result set. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 4.0 Removed the ``useCursor`` option. .. versionchanged:: 3.9 @@ -1973,6 +2010,7 @@ def aggregate(self, pipeline, session=None, **kwargs): CommandCursor, session=s, explicit_session=session is not None, + let=let, **kwargs) def aggregate_raw_batches(self, pipeline, session=None, **kwargs): @@ -2232,7 +2270,7 @@ def _write_concern_for_cmd(self, cmd, session): def __find_and_modify(self, filter, projection, sort, upsert=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, - **kwargs): + let=None, **kwargs): """Internal findAndModify helper.""" common.validate_is_mapping("filter", filter) @@ -2243,6 +2281,9 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) + if let: + common.validate_is_mapping("let", let) + cmd["let"] = let cmd.update(kwargs) if projection is not None: cmd["fields"] = helpers._fields_list_to_dict(projection, @@ -2290,7 +2331,7 @@ def _find_and_modify(session, sock_info, retryable_write): def find_one_and_delete(self, filter, projection=None, sort=None, hint=None, - session=None, **kwargs): + session=None, let=None, **kwargs): """Finds a single document and deletes it, returning the document. >>> db.test.count_documents({'x': 1}) @@ -2337,7 +2378,13 @@ def find_one_and_delete(self, filter, - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -2356,13 +2403,13 @@ def find_one_and_delete(self, filter, .. versionadded:: 3.0 """ kwargs['remove'] = True - return self.__find_and_modify(filter, projection, sort, + return self.__find_and_modify(filter, projection, sort, let=let, hint=hint, session=session, **kwargs) def find_one_and_replace(self, filter, replacement, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, - hint=None, session=None, **kwargs): + hint=None, session=None, let=None, **kwargs): """Finds a single document and replaces it, returning either the original or the replaced document. @@ -2412,10 +2459,16 @@ def find_one_and_replace(self, filter, replacement, MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.6 @@ -2436,14 +2489,14 @@ def find_one_and_replace(self, filter, replacement, common.validate_ok_for_replace(replacement) kwargs['update'] = replacement return self.__find_and_modify(filter, projection, - sort, upsert, return_document, + sort, upsert, return_document, let=let, hint=hint, session=session, **kwargs) def find_one_and_update(self, filter, update, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, - **kwargs): + let=None, **kwargs): """Finds a single document and updates it, returning either the original or the updated document. @@ -2533,10 +2586,16 @@ def find_one_and_update(self, filter, update, MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.9 @@ -2561,7 +2620,7 @@ def find_one_and_update(self, filter, update, kwargs['update'] = update return self.__find_and_modify(filter, projection, sort, upsert, return_document, - array_filters, hint=hint, + array_filters, hint=hint, let=let, session=session, **kwargs) def __iter__(self): diff --git a/pymongo/cursor.py b/pymongo/cursor.py index c38adaf377..e825edf8fd 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -24,7 +24,8 @@ from bson.code import Code from bson.son import SON from pymongo import helpers -from pymongo.common import validate_boolean, validate_is_mapping +from pymongo.common import (validate_boolean, validate_is_mapping, + validate_is_document_type) from pymongo.collation import validate_collation_or_none from pymongo.errors import (ConnectionFailure, InvalidOperation, @@ -140,7 +141,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=None, show_record_id=None, snapshot=None, comment=None, session=None, - allow_disk_use=None): + allow_disk_use=None, let=None): """Create a new cursor. Should not be called directly by application developers - see @@ -197,6 +198,10 @@ def __init__(self, collection, filter=None, projection=None, skip=0, if projection is not None: projection = helpers._fields_list_to_dict(projection, "projection") + if let: + validate_is_document_type("let", let) + + self.__let = let self.__spec = spec self.__projection = projection self.__skip = skip @@ -370,6 +375,8 @@ def __query_spec(self): operators["$explain"] = True if self.__hint: operators["$hint"] = self.__hint + if self.__let: + operators["let"] = self.__let if self.__comment: operators["$comment"] = self.__comment if self.__max_scan: diff --git a/test/crud/unified/aggregate-let.json b/test/crud/unified/aggregate-let.json index d3b76bd65a..039900920f 100644 --- a/test/crud/unified/aggregate-let.json +++ b/test/crud/unified/aggregate-let.json @@ -56,109 +56,6 @@ "minServerVersion": "5.0" } ], - "operations": [ - { - "name": "aggregate", - "object": "collection0", - "arguments": { - "pipeline": [ - { - "$match": { - "$expr": { - "$eq": [ - "$_id", - "$$id" - ] - } - } - }, - { - "$project": { - "_id": 0, - "x": "$$x", - "y": "$$y", - "rand": "$$rand" - } - } - ], - "let": { - "id": 1, - "x": "foo", - "y": { - "$literal": "bar" - }, - "rand": { - "$rand": {} - } - } - }, - "expectResult": [ - { - "x": "foo", - "y": "bar", - "rand": { - "$$type": "double" - } - } - ] - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$match": { - "$expr": { - "$eq": [ - "$_id", - "$$id" - ] - } - } - }, - { - "$project": { - "_id": 0, - "x": "$$x", - "y": "$$y", - "rand": "$$rand" - } - } - ], - "let": { - "id": 1, - "x": "foo", - "y": { - "$literal": "bar" - }, - "rand": { - "$rand": {} - } - } - } - } - } - ] - } - ] - }, - { - "description": "Aggregate with let option and dollar-prefixed $literal value", - "runOnRequirements": [ - { - "minServerVersion": "5.0", - "topologies": [ - "single", - "replicaset" - ] - } - ], "operations": [ { "name": "aggregate", diff --git a/test/crud/unified/deleteMany-let.json b/test/crud/unified/deleteMany-let.json new file mode 100644 index 0000000000..71bf26a013 --- /dev/null +++ b/test/crud/unified/deleteMany-let.json @@ -0,0 +1,201 @@ +{ + "description": "deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "let": { + "name": "name" + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "limit": 0 + } + ], + "let": { + "name": "name" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-let.json b/test/crud/unified/deleteOne-let.json new file mode 100644 index 0000000000..9718682235 --- /dev/null +++ b/test/crud/unified/deleteOne-let.json @@ -0,0 +1,191 @@ +{ + "description": "deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-let.json b/test/crud/unified/find-let.json new file mode 100644 index 0000000000..4e9c9c99f4 --- /dev/null +++ b/test/crud/unified/find-let.json @@ -0,0 +1,148 @@ +{ + "description": "find-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Find with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ] + }, + { + "description": "Find with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + }, + "expectError": { + "errorContains": "Unrecognized field 'let'", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "let": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-let.json b/test/crud/unified/findOneAndDelete-let.json new file mode 100644 index 0000000000..ba8e681c0e --- /dev/null +++ b/test/crud/unified/findOneAndDelete-let.json @@ -0,0 +1,180 @@ +{ + "description": "findOneAndDelete-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "remove": true, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-let.json b/test/crud/unified/findOneAndReplace-let.json new file mode 100644 index 0000000000..5e5de44b31 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-let.json @@ -0,0 +1,197 @@ +{ + "description": "findOneAndReplace-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "x" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "x" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": { + "x": "x" + }, + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-let.json b/test/crud/unified/findOneAndUpdate-let.json new file mode 100644 index 0000000000..74d7d0e58b --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-let.json @@ -0,0 +1,217 @@ +{ + "description": "findOneAndUpdate-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectError": { + "errorContains": "field 'let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json new file mode 100644 index 0000000000..b4a4ddd800 --- /dev/null +++ b/test/crud/unified/updateMany-let.json @@ -0,0 +1,243 @@ +{ + "description": "updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$name", + "$$name" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x", + "y": "$$y" + } + } + ], + "multi": true + } + ], + "let": { + "name": "name", + "x": "foo", + "y": { + "$literal": "bar" + } + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name", + "x": "foo", + "y": "bar" + }, + { + "_id": 3, + "name": "name", + "x": "foo", + "y": "bar" + } + ] + } + ] + }, + { + "description": "updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ], + "multi": true + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json new file mode 100644 index 0000000000..7b1cc4cf00 --- /dev/null +++ b/test/crud/unified/updateOne-let.json @@ -0,0 +1,215 @@ +{ + "description": "updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "id": 1, + "x": "foo" + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ] + } + ], + "let": { + "id": 1, + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "UpdateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": "$$x" + } + } + ], + "let": { + "x": "foo" + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$set": { + "x": "$$x" + } + } + ] + } + ], + "let": { + "x": "foo" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/test_collection.py b/test/test_collection.py index 79a2a907a6..4af2298ceb 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2178,6 +2178,23 @@ def test_bool(self): with self.assertRaises(NotImplementedError): bool(Collection(self.db, 'test')) + @client_context.require_version_min(5, 0, 0) + def test_helpers_with_let(self): + c = self.db.test + helpers = [(c.delete_many, ({}, {})), (c.delete_one, ({}, {})), + (c.find, ({})), (c.update_many, ({}, {'$inc': {'x': 3}})), + (c.update_one, ({}, {'$inc': {'x': 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([], {}))] + for let in [10, "str"]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, + "let must be an instance of dict"): + helper(*args, let=let) + for helper, args in helpers: + helper(*args, let={}) + if __name__ == "__main__": unittest.main() From 5ec4e6cc4cba641c98cb48eefb76b82c99d7ae82 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Dec 2021 13:45:50 -0800 Subject: [PATCH 0539/2111] PYTHON-3027 Fix server selection when topology type is Unknown (#806) --- pymongo/topology_description.py | 7 +-- test/mockupdb/test_rsghost.py | 52 +++++++++++++++++++ .../server_selection/Unknown/read/ghost.json | 18 +++++++ .../server_selection/Unknown/write/ghost.json | 18 +++++++ test/utils_selection_tests.py | 8 ++- 5 files changed, 98 insertions(+), 5 deletions(-) create mode 100644 test/mockupdb/test_rsghost.py create mode 100644 test/server_selection/server_selection/Unknown/read/ghost.json create mode 100644 test/server_selection/server_selection/Unknown/write/ghost.json diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index d0100ff8b9..4fe897dcef 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -263,9 +263,10 @@ def apply_selector(self, selector, address=None, custom_selector=None): selector.min_wire_version, common_wv)) - if self.topology_type in (TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Unknown): + if self.topology_type == TOPOLOGY_TYPE.Unknown: + return [] + elif self.topology_type in (TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced): # Ignore selectors for standalone and load balancer mode. return self.known_servers elif address: diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py new file mode 100644 index 0000000000..2f02503f54 --- /dev/null +++ b/test/mockupdb/test_rsghost.py @@ -0,0 +1,52 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test connections to RSGhost nodes.""" + +import datetime + +from mockupdb import going, MockupDB +from pymongo import MongoClient +from pymongo.errors import ServerSelectionTimeoutError + +import unittest + + +class TestRSGhost(unittest.TestCase): + + def test_rsghost(self): + rsother_response = { + 'ok': 1.0, 'ismaster': False, 'secondary': False, + 'info': 'Does not have a valid replica set config', + 'isreplicaset': True, 'maxBsonObjectSize': 16777216, + 'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 100000, + 'localTime': datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), + 'logicalSessionTimeoutMinutes': 30, 'connectionId': 3, + 'minWireVersion': 0, 'maxWireVersion': 15, 'readOnly': False} + server = MockupDB(auto_ismaster=rsother_response) + server.run() + self.addCleanup(server.stop) + # Default auto discovery yields a server selection timeout. + with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client: + with self.assertRaises(ServerSelectionTimeoutError): + client.test.command('ping') + # Direct connection succeeds. + with MongoClient(server.uri, directConnection=True) as client: + with going(client.test.command, 'ping'): + request = server.receives(ping=1) + request.reply() + + +if __name__ == '__main__': + unittest.main() diff --git a/test/server_selection/server_selection/Unknown/read/ghost.json b/test/server_selection/server_selection/Unknown/read/ghost.json new file mode 100644 index 0000000000..76d3d774e8 --- /dev/null +++ b/test/server_selection/server_selection/Unknown/read/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "read", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/server_selection/server_selection/Unknown/write/ghost.json b/test/server_selection/server_selection/Unknown/write/ghost.json new file mode 100644 index 0000000000..65caa4cd0a --- /dev/null +++ b/test/server_selection/server_selection/Unknown/write/ghost.json @@ -0,0 +1,18 @@ +{ + "topology_description": { + "type": "Unknown", + "servers": [ + { + "address": "a:27017", + "avg_rtt_ms": 5, + "type": "RSGhost" + } + ] + }, + "operation": "write", + "read_preference": { + "mode": "Nearest" + }, + "suitable_servers": [], + "in_latency_window": [] +} diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 0006f6f673..76125b6f15 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -63,7 +63,7 @@ def make_server_description(server, hosts): return ServerDescription(clean_node(server['address']), Hello({})) hello_response = {'ok': True, 'hosts': hosts} - if server_type != "Standalone" and server_type != "Mongos": + if server_type not in ("Standalone", "Mongos", "RSGhost"): hello_response['setName'] = "rs" if server_type == "RSPrimary": @@ -72,6 +72,10 @@ def make_server_description(server, hosts): hello_response['secondary'] = True elif server_type == "Mongos": hello_response['msg'] = 'isdbgrid' + elif server_type == "RSGhost": + hello_response['isreplicaset'] = True + elif server_type == "RSArbiter": + hello_response['arbiterOnly'] = True hello_response['lastWrite'] = { 'lastWriteDate': make_last_write_date(server) @@ -149,7 +153,7 @@ def create_topology(scenario_def, **kwargs): # Assert that descriptions match assert (scenario_def['topology_description']['type'] == - topology.description.topology_type_name) + topology.description.topology_type_name), topology.description.topology_type_name return topology From 44853ea9c3ffe9dba5e356687f0870a1a41f3d7c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 6 Dec 2021 11:26:36 -0800 Subject: [PATCH 0540/2111] PYTHON-3033 Fix typo in uuid docs (#808) --- doc/examples/uuid.rst | 3 ++- pymongo/mongo_client.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst index d4a77d4038..90ec71ebe2 100644 --- a/doc/examples/uuid.rst +++ b/doc/examples/uuid.rst @@ -1,3 +1,4 @@ + .. _handling-uuid-data-example: Handling UUID Data @@ -12,7 +13,7 @@ to MongoDB and retrieve them as native :class:`uuid.UUID` objects:: from uuid import uuid4 # use the 'standard' representation for cross-language compatibility. - client = MongoClient(uuid_representation=UuidRepresentation.STANDARD) + client = MongoClient(uuidRepresentation='standard') collection = client.get_database('uuid_db').get_collection('uuid_coll') # remove all documents from collection diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index af159da521..dae62e7605 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -325,9 +325,9 @@ def __init__( speed. 9 is best compression. Defaults to -1. - `uuidRepresentation`: The BSON representation to use when encoding from and decoding to instances of :class:`~uuid.UUID`. Valid - values are `pythonLegacy`, `javaLegacy`, `csharpLegacy`, `standard` - and `unspecified` (the default). New applications - should consider setting this to `standard` for cross language + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language compatibility. See :ref:`handling-uuid-data-example` for details. - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would From 70f7fe75426b76debfce787a0ee2eb398c27a1ce Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 6 Dec 2021 13:13:15 -0800 Subject: [PATCH 0541/2111] PYTHON-3028 $regex as a field name does not allow for non-string values (#807) --- bson/json_util.py | 2 +- test/test_json_util.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/bson/json_util.py b/bson/json_util.py index 0644874b44..ed67d9a36c 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -508,7 +508,7 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): def _parse_legacy_regex(doc): pattern = doc["$regex"] # Check if this is the $regex query operator. - if isinstance(pattern, Regex): + if not isinstance(pattern, (str, bytes)): return doc flags = 0 # PyMongo always adds $options but some other tools may not. diff --git a/test/test_json_util.py b/test/test_json_util.py index f28b75c9be..dbf4f1c26a 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -270,6 +270,15 @@ def test_regex(self): json_util.dumps(Regex('.*', re.M | re.X), json_options=LEGACY_JSON_OPTIONS)) + def test_regex_validation(self): + non_str_types = [10, {}, []] + docs = [{"$regex": i} for i in non_str_types] + for doc in docs: + self.assertEqual(doc, json_util.loads(json.dumps(doc))) + + doc = {"$regex": ""} + self.assertIsInstance(json_util.loads(json.dumps(doc)), Regex) + def test_minkey(self): self.round_trip({"m": MinKey()}) From f3a76a703bbd3ec02ce3e8ea4b1a4bdf28162b0b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 11:14:09 -0800 Subject: [PATCH 0542/2111] BUMP 4.1.0.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 4ac46672e7..5db9363f90 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -53,7 +53,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 0, 1, '.dev0') +version_tuple = (4, 1, 0, '.dev0') def get_version_string(): if isinstance(version_tuple[-1], str): diff --git a/setup.py b/setup.py index 158eb9a42b..464e33e082 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.0.1.dev0" +version = "4.1.0.dev0" f = open("README.rst") try: From e15464296876f5724649972b1f005af2b8ea03a9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 15:47:28 -0800 Subject: [PATCH 0543/2111] Removed references to outdated server versions (#812) --- bson/decimal128.py | 2 - doc/examples/authentication.rst | 19 +++------ doc/examples/bulk.rst | 5 --- pymongo/aggregation.py | 2 +- pymongo/client_session.py | 2 - pymongo/collection.py | 76 +++++++++++---------------------- pymongo/common.py | 2 +- pymongo/cursor.py | 9 ++-- pymongo/database.py | 5 +-- pymongo/mongo_client.py | 39 +++++++---------- pymongo/operations.py | 23 ++++------ pymongo/pool.py | 2 +- pymongo/results.py | 16 +------ pymongo/write_concern.py | 8 ++-- test/test_collection.py | 2 - test/test_cursor.py | 3 +- test/utils.py | 4 +- 17 files changed, 69 insertions(+), 150 deletions(-) diff --git a/bson/decimal128.py b/bson/decimal128.py index 528e0f9a35..ede728bbab 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -15,8 +15,6 @@ """Tools for working with the BSON decimal128 type. .. versionadded:: 3.4 - -.. note:: The Decimal128 BSON type requires MongoDB 3.4+. """ import decimal diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 1e0f133a5a..db2dbd3d1f 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -97,9 +97,8 @@ the "MongoDB Challenge-Response" protocol:: Default Authentication Mechanism -------------------------------- -If no mechanism is specified, PyMongo automatically uses MONGODB-CR when -connected to a pre-3.0 version of MongoDB, SCRAM-SHA-1 when connected to -MongoDB 3.0 through 3.6, and negotiates the mechanism to use (SCRAM-SHA-1 +If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 when connected +to MongoDB 3.6 and negotiates the mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. Default Database and "authSource" @@ -125,15 +124,12 @@ MONGODB-X509 ------------ .. versionadded:: 2.6 -The MONGODB-X509 mechanism authenticates a username derived from the -distinguished subject name of the X.509 certificate presented by the driver -during TLS/SSL negotiation. This authentication method requires the use of -TLS/SSL connections with certificate validation and is available in -MongoDB 2.6 and newer:: +The MONGODB-X509 mechanism authenticates via the X.509 certificate presented +by the driver during TLS/SSL negotiation. This authentication method requires +the use of TLS/SSL connections with certificate validation:: >>> from pymongo import MongoClient >>> client = MongoClient('example.com', - ... username="" ... authMechanism="MONGODB-X509", ... tls=True, ... tlsCertificateKeyFile='/path/to/client.pem', @@ -142,16 +138,13 @@ MongoDB 2.6 and newer:: MONGODB-X509 authenticates against the $external virtual database, so you do not have to specify a database in the URI:: - >>> uri = "mongodb://@example.com/?authMechanism=MONGODB-X509" + >>> uri = "mongodb://example.com/?authMechanism=MONGODB-X509" >>> client = MongoClient(uri, ... tls=True, ... tlsCertificateKeyFile='/path/to/client.pem', ... tlsCAFile='/path/to/ca.pem') >>> -.. versionchanged:: 3.4 - When connected to MongoDB >= 3.4 the username is no longer required. - .. _gssapi: GSSAPI (Kerberos) diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 9e8a57a803..23505268f0 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -74,11 +74,6 @@ of operations performed. 'writeConcernErrors': [], 'writeErrors': []} -.. warning:: ``nModified`` is only reported by MongoDB 2.6 and later. When - connected to an earlier server version, or in certain mixed version sharding - configurations, PyMongo omits this field from the results of a bulk - write operation. - The first write failure that occurs (e.g. duplicate key error) aborts the remaining operations, and PyMongo raises :class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 4a565ee134..f0be39e671 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -147,7 +147,7 @@ def get_cursor(self, session, server, sock_info, secondary_ok): if 'cursor' in result: cursor = result['cursor'] else: - # Pre-MongoDB 2.6 or unacknowledged write. Fake a cursor. + # Unacknowledged $out/$merge write. Fake a cursor. cursor = { "id": 0, "firstBatch": result.get("result", []), diff --git a/pymongo/client_session.py b/pymongo/client_session.py index f8071e5f2b..8c61623ae4 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -14,8 +14,6 @@ """Logical sessions for ordering sequential operations. -Requires MongoDB 3.6. - .. versionadded:: 3.6 Causally Consistent Reads diff --git a/pymongo/collection.py b/pymongo/collection.py index 393c26aa5c..092163c403 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -107,8 +107,7 @@ def __init__(self, database, name, create=False, codec_options=None, default) database.read_concern is used. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. If a collation is provided, - it will be passed to the create collection command. This option is - only supported on MongoDB 3.4 and above. + it will be passed to the create collection command. - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command @@ -209,8 +208,7 @@ def _command(self, sock_info, command, secondary_ok=False, - `read_concern` (optional) - An instance of :class:`~pymongo.read_concern.ReadConcern`. - `write_concern`: An instance of - :class:`~pymongo.write_concern.WriteConcern`. This option is only - valid for MongoDB 3.4 and above. + :class:`~pymongo.write_concern.WriteConcern`. - `collation` (optional) - An instance of :class:`~pymongo.collation.Collation`. - `session` (optional): a @@ -720,10 +718,9 @@ def replace_one(self, filter, replacement, upsert=False, match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. This option is only supported on MongoDB 3.2 and above. + ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -790,13 +787,11 @@ def update_one(self, filter, update, upsert=False, match the filter. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. This option is only supported on MongoDB 3.2 and above. + ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. This option is only - supported on MongoDB 3.6 and above. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -872,13 +867,11 @@ def update_many(self, filter, update, upsert=False, array_filters=None, match the filter. - `bypass_document_validation` (optional): If ``True``, allows the write to opt-out of document level validation. Default is - ``False``. This option is only supported on MongoDB 3.2 and above. + ``False``. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. This option is only - supported on MongoDB 3.6 and above. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -1028,8 +1021,7 @@ def delete_one(self, filter, collation=None, hint=None, session=None, :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -1079,8 +1071,7 @@ def delete_many(self, filter, collation=None, hint=None, session=None, :Parameters: - `filter`: A query that matches the documents to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -1226,8 +1217,7 @@ def find(self, *args, **kwargs): - `batch_size` (optional): Limits the number of documents returned in a single batch. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `return_key` (optional): If True, return only the index keys in each document. - `show_record_id` (optional): If True, adds a field ``$recordId`` in @@ -1472,12 +1462,10 @@ def count_documents(self, filter, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow this operation to run, in milliseconds. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (string or list of tuples): The index to use. Specify either the index name as a string or the index specification as a list of tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - This option is only supported on MongoDB 3.6 and above. The :meth:`count_documents` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -1495,8 +1483,6 @@ def count_documents(self, filter, session=None, **kwargs): | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | +-------------+-------------------------------------+ - $expr requires MongoDB 3.6+ - :Parameters: - `filter` (required): A query document that selects which documents to count in the collection. Can be an empty document to count all @@ -1554,13 +1540,8 @@ def create_indexes(self, indexes, session=None, **kwargs): - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. - .. note:: `create_indexes` uses the `createIndexes`_ command - introduced in MongoDB **2.6** and cannot be used with earlier - versions. - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -1665,9 +1646,9 @@ def create_index(self, keys, session=None, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires MongoDB >=3.2. + a partial index. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. Requires MongoDB >= 3.4. + :class:`~pymongo.collation.Collation`. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the {"$**" : 1} key pattern. Requires MongoDB >= 4.2. @@ -1683,8 +1664,7 @@ def create_index(self, keys, session=None, **kwargs): using the option will fail if a duplicate value is detected. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. :Parameters: - `keys`: a single key or a list of (key, direction) @@ -1733,8 +1713,7 @@ def drop_indexes(self, session=None, **kwargs): command (like maxTimeMS) can be passed as keyword arguments. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -1772,8 +1751,7 @@ def drop_index(self, index_or_name, session=None, **kwargs): command (like maxTimeMS) can be passed as keyword arguments. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword @@ -1946,8 +1924,7 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): example is included in the :ref:`aggregate-examples` documentation. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. :Parameters: - `pipeline`: a list of aggregation pipeline stages @@ -2060,8 +2037,6 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, :class:`~pymongo.change_stream.CollectionChangeStream` cursor which iterates over changes on this collection. - Introduced in MongoDB 3.6. - .. code-block:: python with db.collection.watch() as stream: @@ -2172,8 +2147,7 @@ def rename(self, new_name, session=None, **kwargs): (i.e. ``dropTarget=True``) .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation when using - MongoDB >= 3.4. + this collection is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2219,8 +2193,7 @@ def distinct(self, key, filter=None, session=None, **kwargs): - `maxTimeMS` (int): The maximum amount of time to allow the count command to run, in milliseconds. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only supported - on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. The :meth:`distinct` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -2576,8 +2549,7 @@ def find_one_and_update(self, filter, update, :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. This option is only - supported on MongoDB 3.6 and above. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to diff --git a/pymongo/common.py b/pymongo/common.py index 772f2f299b..14789c8109 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -102,7 +102,7 @@ # Default value for retryReads. RETRY_READS = True -# mongod/s 2.6 and above return code 59 when a command doesn't exist. +# The error code returned when a command doesn't exist. COMMAND_NOT_FOUND_CODES = (59,) # Error codes to ignore if GridFS calls createIndex on a secondary diff --git a/pymongo/cursor.py b/pymongo/cursor.py index e825edf8fd..3e78c2d97c 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -773,8 +773,7 @@ def sort(self, key_or_list, direction=None): ('field2', pymongo.DESCENDING)]): print(doc) - Beginning with MongoDB version 2.6, text search results can be - sorted by relevance:: + Text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, @@ -837,8 +836,8 @@ def distinct(self, key): def explain(self): """Returns an explain plan record for this cursor. - .. note:: Starting with MongoDB 3.2 :meth:`explain` uses - the default verbosity mode of the `explain command + .. note:: This method uses the default verbosity mode of the + `explain command `_, ``allPlansExecution``. To use a different verbosity use :meth:`~pymongo.database.Database.command` to run the explain @@ -944,8 +943,6 @@ def where(self, code): def collation(self, collation): """Adds a :class:`~pymongo.collation.Collation` to this query. - This option is only supported on MongoDB 3.4 and above. - Raises :exc:`TypeError` if `collation` is not an instance of :class:`~pymongo.collation.Collation` or a ``dict``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has diff --git a/pymongo/database.py b/pymongo/database.py index dc8c13cbb0..c7ed38b73f 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -321,8 +321,6 @@ def aggregate(self, pipeline, session=None, **kwargs): See the `aggregation pipeline`_ documentation for a list of stages that are supported. - Introduced in MongoDB 3.6. - .. code-block:: python # Lists all operations currently running on the server. @@ -716,8 +714,7 @@ def drop_collection(self, name_or_collection, session=None): :class:`~pymongo.client_session.ClientSession`. .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this database is automatically applied to this operation when using - MongoDB >= 3.4. + this database is automatically applied to this operation. .. versionchanged:: 3.6 Added ``session`` parameter. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dae62e7605..41e701706d 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -247,10 +247,9 @@ def __init__( between periodic server checks, or None to accept the default frequency of 10 seconds. - `appname`: (string or None) The name of the application that - created this MongoClient instance. MongoDB 3.4 and newer will - print this value in the server log upon establishing each - connection. It is also recorded in the slow query log and - profile collections. + created this MongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. - `driver`: (pair or None) A driver implemented on top of PyMongo can pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, version, and platform to the message printed in the server log when @@ -259,7 +258,7 @@ def __init__( :mod:`~pymongo.monitoring` for details. - `retryWrites`: (boolean) Whether supported write operations executed within this MongoClient will be retried once after a - network error on MongoDB 3.6+. Defaults to ``True``. + network error. Defaults to ``True``. The supported write operations are: - :meth:`~pymongo.collection.Collection.bulk_write`, as long as @@ -281,7 +280,7 @@ def __init__( https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst - `retryReads`: (boolean) Whether supported read operations executed within this MongoClient will be retried once after a - network error on MongoDB 3.6+. Defaults to ``True``. + network error. Defaults to ``True``. The supported read operations are: :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, @@ -315,9 +314,8 @@ def __init__( zlib support requires the Python standard library zlib module. zstd requires the `zstandard `_ package. By default no compression is used. Compression support - must also be enabled on the server. MongoDB 3.4+ supports snappy - compression. MongoDB 3.6 adds support for zlib. MongoDB 4.2 adds - support for zstd. + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. - `zlibCompressionLevel`: (int) The zlib compression level to use when zlib is used as the wire protocol compressor. Supported values are -1 through 9. -1 tells the zlib library to use its default @@ -355,10 +353,8 @@ def __init__( will cause **write operations to wait indefinitely**. - `journal`: If ``True`` block until write operations have been committed to the journal. Cannot be used in combination with - `fsync`. Prior to MongoDB 2.6 this option was ignored if the server - was running without journaling. Starting with MongoDB 2.6 write - operations will fail with an exception if this option is used when - the server is running without journaling. + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. - `fsync`: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` @@ -406,11 +402,9 @@ def __init__( - `authSource`: The database to authenticate on. Defaults to the database specified in the URI, if provided, or to "admin". - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically uses MONGODB-CR - when connected to a pre-3.0 version of MongoDB, SCRAM-SHA-1 when - connected to MongoDB 3.0 through 3.6, and negotiates the mechanism - to use (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB - 4.0+. + If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 + when connected to MongoDB 3.6 and negotiates the mechanism to use + (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. - `authMechanismProperties`: Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass authMechanismProperties='SERVICE_NAME:= 3.6, end all server sessions created by this client by - sending one or more endSessions commands. + End all server sessions created by this client by sending one or more + endSessions commands. Close all sockets in the connection pools and stop the monitor threads. @@ -1565,8 +1559,6 @@ def start_session(self, :class:`~pymongo.client_session.SessionOptions`. See the :mod:`~pymongo.client_session` module for details and examples. - Requires MongoDB 3.6. - A :class:`~pymongo.client_session.ClientSession` may only be used with the MongoClient that started it. :class:`ClientSession` instances are **not thread-safe or fork-safe**. They can only be used by one thread @@ -1722,8 +1714,7 @@ def drop_database(self, name_or_database, session=None): Added ``session`` parameter. .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of - this client is automatically applied to this operation when using - MongoDB >= 3.4. + this client is automatically applied to this operation. .. versionchanged:: 3.4 Apply this client's write concern automatically to this operation diff --git a/pymongo/operations.py b/pymongo/operations.py index b5d670e0ff..be6a959f5c 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -65,8 +65,7 @@ def __init__(self, filter, collation=None, hint=None): :Parameters: - `filter`: A query that matches the document to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -119,8 +118,7 @@ def __init__(self, filter, collation=None, hint=None): :Parameters: - `filter`: A query that matches the documents to delete. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -177,8 +175,7 @@ def __init__(self, filter, replacement, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -287,10 +284,9 @@ def __init__(self, filter, update, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -335,10 +331,9 @@ def __init__(self, filter, update, upsert=False, collation=None, - `upsert` (optional): If ``True``, perform an insert if no documents match the filter. - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. This option is only - supported on MongoDB 3.4 and above. + :class:`~pymongo.collation.Collation`. - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. Requires MongoDB 3.6+. + array elements an update should apply. - `hint` (optional): An index to use to support the query predicate specified either by its string name, or in the same format as passed to @@ -404,9 +399,9 @@ def __init__(self, keys, **kwargs): this collection after seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for - a partial index. Requires MongoDB >= 3.2. + a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` - that specifies the collation to use in MongoDB >= 3.4. + that specifies the collation to use. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the { "$**" : 1} key pattern. Requires MongoDB >= 4.2. diff --git a/pymongo/pool.py b/pymongo/pool.py index f9b370c66e..84661c4879 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -873,7 +873,7 @@ def socket_closed(self): return self.socket_checker.socket_closed(self.sock) def send_cluster_time(self, command, session, client): - """Add cluster time for MongoDB >= 3.6.""" + """Add $clusterTime.""" if client: client._send_cluster_time(command, session) diff --git a/pymongo/results.py b/pymongo/results.py index a5025e9f48..0374803249 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -118,13 +118,7 @@ def matched_count(self): @property def modified_count(self): - """The number of documents modified. - - .. note:: modified_count is only reported by MongoDB 2.6 and later. - When connected to an earlier server version, or in certain mixed - version sharding configurations, this attribute will be set to - ``None``. - """ + """The number of documents modified. """ self._raise_if_unacknowledged("modified_count") return self.__raw_result.get("nModified") @@ -195,13 +189,7 @@ def matched_count(self): @property def modified_count(self): - """The number of documents modified. - - .. note:: modified_count is only reported by MongoDB 2.6 and later. - When connected to an earlier server version, or in certain mixed - version sharding configurations, this attribute will be set to - ``None``. - """ + """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") return self.__bulk_api_result.get("nModified") diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ebc997c0db..2075240f0a 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -33,11 +33,9 @@ class WriteConcern(object): to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - `j`: If ``True`` block until write operations have been committed - to the journal. Cannot be used in combination with `fsync`. Prior - to MongoDB 2.6 this option was ignored if the server was running - without journaling. Starting with MongoDB 2.6 write operations will - fail with an exception if this option is used when the server is - running without journaling. + to the journal. Cannot be used in combination with `fsync`. Write + operations will fail with an exception if this option is used when + the server is running without journaling. - `fsync`: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` diff --git a/test/test_collection.py b/test/test_collection.py index 4af2298ceb..4a167bacb3 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -680,8 +680,6 @@ def test_options(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) result = db.test.options() - # mongos 2.2.x adds an $auth field when auth is enabled. - result.pop('$auth', None) self.assertEqual(result, {"capped": True, 'size': 4096}) db.drop_collection("test") diff --git a/test/test_cursor.py b/test/test_cursor.py index d56f9fc27d..8c27544b80 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -343,8 +343,7 @@ def test_explain(self): for _ in a: break b = a.explain() - # "cursor" pre MongoDB 2.7.6, "executionStats" post - self.assertTrue("cursor" in b or "executionStats" in b) + self.assertIn("executionStats", b) def test_explain_with_read_concern(self): # Do not add readConcern level to explain. diff --git a/test/utils.py b/test/utils.py index 5b6f9fd264..bdea5c69c2 100644 --- a/test/utils.py +++ b/test/utils.py @@ -928,7 +928,7 @@ def is_greenthread_patched(): def disable_replication(client): - """Disable replication on all secondaries, requires MongoDB 3.2.""" + """Disable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', @@ -936,7 +936,7 @@ def disable_replication(client): def enable_replication(client): - """Enable replication on all secondaries, requires MongoDB 3.2.""" + """Enable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) secondary.admin.command('configureFailPoint', 'stopReplProducer', From bf992c20a63e8d235ff7b4021a0ba4246d1ddec5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 16:26:01 -0800 Subject: [PATCH 0544/2111] PYTHON-2554 Support aggregate $merge and $out executing on secondaries (#774) --- doc/changelog.rst | 6 + pymongo/aggregation.py | 12 +- pymongo/collection.py | 8 +- pymongo/mongo_client.py | 2 +- pymongo/read_preferences.py | 39 ++ pymongo/topology_description.py | 17 +- .../aggregate-write-readPreference.json | 460 ++++++++++++++++++ .../db-aggregate-write-readPreference.json | 446 +++++++++++++++++ test/test_read_preferences.py | 4 +- 9 files changed, 978 insertions(+), 16 deletions(-) create mode 100644 test/crud/unified/aggregate-write-readPreference.json create mode 100644 test/crud/unified/db-aggregate-write-readPreference.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 192b456619..062104bc8f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,8 @@ Changelog Changes in Version 4.1 ---------------------- +PyMongo 4.0 brings a number of improvements including: + - :meth:`pymongo.collection.Collection.update_one`, :meth:`pymongo.collection.Collection.update_many`, :meth:`pymongo.collection.Collection.delete_one`, @@ -15,6 +17,10 @@ Changes in Version 4.1 and :meth:`pymongo.collection.Collection.find` all support a new keyword argument ``let`` which is a map of parameter names and values. Parameters can then be accessed as variables in an aggregate expression context. +- :meth:`~pymongo.collection.Collection.aggregate` now supports + $merge and $out executing on secondaries on MongoDB >=5.0. + aggregate() now always obeys the collection's :attr:`read_preference` on + MongoDB >= 5.0. Changes in Version 4.0 diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index f0be39e671..a5a7abaed7 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -19,7 +19,7 @@ from pymongo import common from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import _AggWritePref, ReadPreference class _AggregationCommand(object): @@ -70,6 +70,7 @@ def __init__(self, target, cursor_class, pipeline, options, options.pop('collation', None)) self._max_await_time_ms = options.pop('maxAwaitTimeMS', None) + self._write_preference = None @property def _aggregation_target(self): @@ -97,9 +98,12 @@ def _process_result(self, result, session, server, sock_info, secondary_ok): result, session, server, sock_info, secondary_ok) def get_read_preference(self, session): - if self._performs_write: - return ReadPreference.PRIMARY - return self._target._read_preference_for(session) + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) + return pref def get_cursor(self, session, server, sock_info, secondary_ok): # Serialize command. diff --git a/pymongo/collection.py b/pymongo/collection.py index 092163c403..ea11875ce2 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1915,9 +1915,9 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): collection. The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Collection`, except when ``$out`` or ``$merge`` are used, in - which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` - is used. + :class:`Collection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. .. note:: This method does not support the 'explain' option. Please use :meth:`~pymongo.database.Database.command` instead. An @@ -1958,6 +1958,8 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): .. versionchanged:: 4.1 Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. .. versionchanged:: 4.0 Removed the ``useCursor`` option. .. versionchanged:: 3.9 diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 41e701706d..9c98e5d211 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1155,7 +1155,7 @@ def _secondaryok_for_server(self, read_preference, server, session): with self._get_socket(server, session) as sock_info: secondary_ok = (single and not sock_info.is_mongos) or ( - read_preference != ReadPreference.PRIMARY) + read_preference.mode != ReadPreference.PRIMARY.mode) yield sock_info, secondary_ok @contextlib.contextmanager diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index c60240822d..2471d5834c 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -424,6 +424,45 @@ def __call__(self, selection): self.max_staleness, selection)) +class _AggWritePref: + """Agg $out/$merge write preference. + + * If there are readable servers and there is any pre-5.0 server, use + primary read preference. + * Otherwise use `pref` read preference. + + :Parameters: + - `pref`: The read preference to use on MongoDB 5.0+. + """ + + __slots__ = ('pref', 'effective_pref') + + def __init__(self, pref): + self.pref = pref + self.effective_pref = ReadPreference.PRIMARY + + def selection_hook(self, topology_description): + common_wv = topology_description.common_wire_version + if (topology_description.has_readable_server( + ReadPreference.PRIMARY_PREFERRED) and + common_wv and common_wv < 13): + self.effective_pref = ReadPreference.PRIMARY + else: + self.effective_pref = self.pref + + def __call__(self, selection): + """Apply this read preference to a Selection.""" + return self.effective_pref(selection) + + def __repr__(self): + return "_AggWritePref(pref=%r)" % (self.pref,) + + # Proxy other calls to the effective_pref so that _AggWritePref can be + # used in place of an actual read preference. + def __getattr__(self, name): + return getattr(self.effective_pref, name) + + _ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 4fe897dcef..c13d00a64c 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -19,7 +19,7 @@ from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import ReadPreference, _AggWritePref from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE @@ -263,21 +263,24 @@ def apply_selector(self, selector, address=None, custom_selector=None): selector.min_wire_version, common_wv)) + if isinstance(selector, _AggWritePref): + selector.selection_hook(self) + if self.topology_type == TOPOLOGY_TYPE.Unknown: return [] elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): # Ignore selectors for standalone and load balancer mode. return self.known_servers - elif address: + if address: # Ignore selectors when explicit address is requested. description = self.server_descriptions().get(address) return [description] if description else [] - elif self.topology_type == TOPOLOGY_TYPE.Sharded: - # Ignore read preference. - selection = Selection.from_topology_description(self) - else: - selection = selector(Selection.from_topology_description(self)) + + selection = Selection.from_topology_description(self) + # Ignore read preference for sharded clusters. + if self.topology_type != TOPOLOGY_TYPE.Sharded: + selection = selector(selection) # Apply custom selector followed by localThresholdMS. if custom_selector is not None and selection: diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json new file mode 100644 index 0000000000..28327e8d83 --- /dev/null +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -0,0 +1,460 @@ +{ + "description": "aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + }, + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "coll1" + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "coll1" + } + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll1", + "databaseName": "db0", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json new file mode 100644 index 0000000000..269299e3c7 --- /dev/null +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -0,0 +1,446 @@ +{ + "description": "db-aggregate-write-readPreference", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "_yamlAnchors": { + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + }, + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0", + "databaseOptions": { + "readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + } + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [] + } + ], + "tests": [ + { + "description": "Database-level aggregate with $out includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $out omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99", + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$out": "coll0" + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge includes read preference for 5.0+ server", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "mode": "secondaryPreferred", + "maxStalenessSeconds": 600 + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Database-level aggregate with $merge omits read preference for pre-5.0 server", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "object": "database0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + }, + { + "$addFields": { + "_id": 1 + } + }, + { + "$project": { + "_id": 1 + } + }, + { + "$merge": { + "into": "coll0" + } + } + ], + "$readPreference": { + "mode": "primary" + }, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "w": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 18dbd0bee4..bbc89b9d14 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -433,7 +433,9 @@ def test_aggregate(self): [{'$project': {'_id': 1}}]) def test_aggregate_write(self): - self._test_coll_helper(False, self.c.pymongo_test.test, + # 5.0 servers support $out on secondaries. + secondary_ok = client_context.version.at_least(5, 0) + self._test_coll_helper(secondary_ok, self.c.pymongo_test.test, 'aggregate', [{'$project': {'_id': 1}}, {'$out': "agg_write_test"}]) From 1d7b9a80b91315f1af4b88577e20ab4ac9e55e8f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Dec 2021 16:41:10 -0800 Subject: [PATCH 0545/2111] PYTHON-3026 Fix Windows Python 3.6 tests (#813) --- .evergreen/utils.sh | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 8fc42506a5..55c549d3aa 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -28,12 +28,8 @@ createvirtualenv () { . $VENVPATH/bin/activate fi - PYVER=$(${PYTHON} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") - # pip fails to upgrade in a Python 3.6 venv on Windows. - if [ $PYVER != "3.6" -o "Windows_NT" != "$OS" ] ; then - python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel - fi + python -m pip install --upgrade pip + python -m pip install --upgrade setuptools wheel } # Usage: From 9f29e7313744278484d3731a7a4b563440a12978 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 13:02:32 -0800 Subject: [PATCH 0546/2111] PYTHON-2473 Add basic Github Actions testing (#815) --- .github/workflows/test-python.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/test-python.yml diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml new file mode 100644 index 0000000000..28ee689966 --- /dev/null +++ b/.github/workflows/test-python.yml @@ -0,0 +1,28 @@ +name: Python Tests + +on: + push: + pull_request: + +jobs: + build: + # supercharge/mongodb-github-action requires containers so we don't test other platforms + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-20.04] + python-version: ["3.6", "3.10", "pypy-3.8"] + name: CPython ${{ matrix.python-version }}-${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Start MongoDB + uses: supercharge/mongodb-github-action@1.7.0 + with: + mongodb-version: 4.4 + - name: Run tests + run: | + python setup.py test From 9deb1069f3e4289e2e59f2951faf3d987e3cb04c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 13:50:53 -0800 Subject: [PATCH 0547/2111] PYTHON-1643 Resync read write concern spec tests --- .../connection-string/read-concern.json | 18 ++++++++++ .../document/read-concern.json | 33 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/test/read_write_concern/connection-string/read-concern.json b/test/read_write_concern/connection-string/read-concern.json index dd2b792b29..1ecad8c268 100644 --- a/test/read_write_concern/connection-string/read-concern.json +++ b/test/read_write_concern/connection-string/read-concern.json @@ -24,6 +24,24 @@ "readConcern": { "level": "majority" } + }, + { + "description": "linearizable specified", + "uri": "mongodb://localhost/?readConcernLevel=linearizable", + "valid": true, + "warning": false, + "readConcern": { + "level": "linearizable" + } + }, + { + "description": "available specified", + "uri": "mongodb://localhost/?readConcernLevel=available", + "valid": true, + "warning": false, + "readConcern": { + "level": "available" + } } ] } diff --git a/test/read_write_concern/document/read-concern.json b/test/read_write_concern/document/read-concern.json index ef2bafdf55..187397dae5 100644 --- a/test/read_write_concern/document/read-concern.json +++ b/test/read_write_concern/document/read-concern.json @@ -28,6 +28,39 @@ "level": "local" }, "isServerDefault": false + }, + { + "description": "Linearizable", + "valid": true, + "readConcern": { + "level": "linearizable" + }, + "readConcernDocument": { + "level": "linearizable" + }, + "isServerDefault": false + }, + { + "description": "Snapshot", + "valid": true, + "readConcern": { + "level": "snapshot" + }, + "readConcernDocument": { + "level": "snapshot" + }, + "isServerDefault": false + }, + { + "description": "Available", + "valid": true, + "readConcern": { + "level": "available" + }, + "readConcernDocument": { + "level": "available" + }, + "isServerDefault": false } ] } From d504c1f399de2d2368e2b355d157e83f955b7c5c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 14:03:01 -0800 Subject: [PATCH 0548/2111] PYTHON-2086 Verify max set version and max election id on topologies in SDAM spec tests --- test/test_discovery_and_monitoring.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index c26c0df309..107168f294 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -196,6 +196,11 @@ def check_outcome(self, topology, outcome): self.assertEqual(topology_type_name(expected_topology_type), topology_type_name(topology.description.topology_type)) + self.assertEqual(outcome.get('maxSetVersion'), + topology.description.max_set_version) + self.assertEqual(outcome.get('maxElectionId'), + topology.description.max_election_id) + def create_test(scenario_def): def run_scenario(self): From 57ad29e4bdf5133dade02031386be26b70572690 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 16:29:20 -0800 Subject: [PATCH 0549/2111] PYTHON-2203 Resync auth spec tests --- test/auth/connection-string.json | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/test/auth/connection-string.json b/test/auth/connection-string.json index 5452912e87..2a37ae8df4 100644 --- a/test/auth/connection-string.json +++ b/test/auth/connection-string.json @@ -216,6 +216,18 @@ "mechanism_properties": null } }, + { + "description": "should recognize the mechanism with no username when auth source is explicitly specified (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, { "description": "should throw an exception if supplied a password (MONGODB-X509)", "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", @@ -362,7 +374,7 @@ "credential": null }, { - "description": "authSource without username doesn't create credential", + "description": "authSource without username doesn't create credential (default mechanism)", "uri": "mongodb://localhost/?authSource=foo", "valid": true, "credential": null @@ -389,6 +401,18 @@ "mechanism_properties": null } }, + { + "description": "should recognise the mechanism when auth source is explicitly specified (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, { "description": "should throw an exception if username and no password (MONGODB-AWS)", "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", From 7bd9bd7b471be89f0955ebb80d275c2cca8c024a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 17:55:26 -0800 Subject: [PATCH 0550/2111] PYTHON-2160 Stop using Google Groups email address (#818) --- THIRD-PARTY-NOTICES | 2 +- setup.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 4f2edb8660..28a340b3fb 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -4,7 +4,7 @@ be distributed under licenses different than the PyMongo software. In the event that we accidentally failed to list a required notice, please bring it to our attention through any of the ways detailed here: - mongodb-dev@googlegroups.com + https://jira.mongodb.org/projects/PYTHON The attached notices are provided for information only. diff --git a/setup.py b/setup.py index 464e33e082..7d1ad52dc7 100755 --- a/setup.py +++ b/setup.py @@ -315,7 +315,6 @@ def build_extension(self, ext): description="Python driver for MongoDB ", long_description=readme_content, author="The MongoDB Python Team", - author_email="mongodb-user@googlegroups.com", url="http://github.com/mongodb/mongo-python-driver", keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], From c94a3ad1dff4716f70989a46b126604e46e2e419 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Dec 2021 18:00:41 -0800 Subject: [PATCH 0551/2111] PYTHON-2585 Remove legacy multi-auth code (#816) --- pymongo/auth.py | 8 +-- pymongo/client_options.py | 17 ++---- pymongo/mongo_client.py | 10 +--- pymongo/monitor.py | 9 ++- pymongo/pool.py | 122 +++++++++++++------------------------- pymongo/server.py | 6 +- pymongo/topology.py | 4 +- test/pymongo_mocks.py | 4 +- test/test_auth.py | 20 ++----- test/test_auth_spec.py | 2 +- test/test_client.py | 29 +++++---- test/test_cmap.py | 4 +- test/test_pooling.py | 36 +++++------ test/utils.py | 2 +- 14 files changed, 106 insertions(+), 167 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index 17f3a32fe8..a2e206357c 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -195,7 +195,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Make local _hmac = hmac.HMAC - ctx = sock_info.auth_ctx.get(credentials) + ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): nonce, first_bare = ctx.scram_data res = ctx.speculative_authenticate @@ -424,7 +424,7 @@ def _authenticate_plain(credentials, sock_info): def _authenticate_x509(credentials, sock_info): """Authenticate using MONGODB-X509. """ - ctx = sock_info.auth_ctx.get(credentials) + ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): # MONGODB-X509 is done after the speculative auth step. return @@ -454,8 +454,8 @@ def _authenticate_mongo_cr(credentials, sock_info): def _authenticate_default(credentials, sock_info): if sock_info.max_wire_version >= 7: - if credentials in sock_info.negotiated_mechanisms: - mechs = sock_info.negotiated_mechanisms[credentials] + if sock_info.negotiated_mechs: + mechs = sock_info.negotiated_mechs else: source = credentials.source cmd = sock_info.hello_cmd() diff --git a/pymongo/client_options.py b/pymongo/client_options.py index f7dbf255bc..c2f5ae01cf 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -117,8 +117,9 @@ def _parse_ssl_options(options): return None, allow_invalid_hostnames -def _parse_pool_options(options): +def _parse_pool_options(username, password, database, options): """Parse connection pool options.""" + credentials = _parse_credentials(username, password, database, options) max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) max_idle_time_seconds = options.get( @@ -151,7 +152,8 @@ def _parse_pool_options(options): compression_settings, max_connecting=max_connecting, server_api=server_api, - load_balanced=load_balanced) + load_balanced=load_balanced, + credentials=credentials) class ClientOptions(object): @@ -164,10 +166,7 @@ class ClientOptions(object): def __init__(self, username, password, database, options): self.__options = options - self.__codec_options = _parse_codec_options(options) - self.__credentials = _parse_credentials( - username, password, database, options) self.__direct_connection = options.get('directconnection') self.__local_threshold_ms = options.get( 'localthresholdms', common.LOCAL_THRESHOLD_MS) @@ -175,7 +174,8 @@ def __init__(self, username, password, database, options): # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. self.__server_selection_timeout = options.get( 'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT) - self.__pool_options = _parse_pool_options(options) + self.__pool_options = _parse_pool_options( + username, password, database, options) self.__read_preference = _parse_read_preference(options) self.__replica_set_name = options.get('replicaset') self.__write_concern = _parse_write_concern(options) @@ -205,11 +205,6 @@ def codec_options(self): """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options - @property - def _credentials(self): - """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" - return self.__credentials - @property def direct_connection(self): """Whether to connect to the deployment in 'Single' topology.""" diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 9c98e5d211..87c87c0241 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -729,11 +729,6 @@ def __init__( options.write_concern, options.read_concern) - self.__all_credentials = {} - creds = options._credentials - if creds: - self.__all_credentials[creds.source] = creds - self._topology_settings = TopologySettings( seeds=seeds, replica_set_name=options.replica_set_name, @@ -1090,8 +1085,7 @@ def _get_socket(self, server, session): if in_txn and session._pinned_connection: yield session._pinned_connection return - with server.get_socket( - self.__all_credentials, handler=err_handler) as sock_info: + with server.get_socket(handler=err_handler) as sock_info: # Pin this session to the selected server or connection. if (in_txn and server.description.server_type in ( SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer)): @@ -1535,7 +1529,7 @@ def _process_periodic_tasks(self): maintain connection pool parameters.""" try: self._process_kill_cursors() - self._topology.update_pool(self.__all_credentials) + self._topology.update_pool() except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: return diff --git a/pymongo/monitor.py b/pymongo/monitor.py index a383e272cd..039ec51942 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -246,7 +246,7 @@ def _check_once(self): if self._cancel_context and self._cancel_context.cancelled: self._reset_connection() - with self._pool.get_socket({}) as sock_info: + with self._pool.get_socket() as sock_info: self._cancel_context = sock_info.cancel_context response, round_trip_time = self._check_with_socket(sock_info) if not response.awaitable: @@ -275,11 +275,10 @@ def _check_with_socket(self, conn): response = conn._hello( cluster_time, self._server_description.topology_version, - self._settings.heartbeat_frequency, - None) + self._settings.heartbeat_frequency) else: # New connection handshake or polling hello (MongoDB <4.4). - response = conn._hello(cluster_time, None, None, None) + response = conn._hello(cluster_time, None, None) return response, time.monotonic() - start @@ -388,7 +387,7 @@ def _run(self): def _ping(self): """Run a "hello" command and return the RTT.""" - with self._pool.get_socket({}) as sock_info: + with self._pool.get_socket() as sock_info: if self._executor._stopped: raise Exception('_RttMonitor closed') start = time.monotonic() diff --git a/pymongo/pool.py b/pymongo/pool.py index 84661c4879..6fe9d024d6 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -275,7 +275,8 @@ class PoolOptions(object): '__ssl_context', '__tls_allow_invalid_hostnames', '__event_listeners', '__appname', '__driver', '__metadata', '__compression_settings', '__max_connecting', - '__pause_enabled', '__server_api', '__load_balanced') + '__pause_enabled', '__server_api', '__load_balanced', + '__credentials') def __init__(self, max_pool_size=MAX_POOL_SIZE, min_pool_size=MIN_POOL_SIZE, @@ -285,7 +286,8 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, tls_allow_invalid_hostnames=False, event_listeners=None, appname=None, driver=None, compression_settings=None, max_connecting=MAX_CONNECTING, - pause_enabled=True, server_api=None, load_balanced=None): + pause_enabled=True, server_api=None, load_balanced=None, + credentials=None): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds @@ -302,6 +304,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__pause_enabled = pause_enabled self.__server_api = server_api self.__load_balanced = load_balanced + self.__credentials = credentials self.__metadata = copy.deepcopy(_METADATA) if appname: self.__metadata['application'] = {'name': appname} @@ -325,6 +328,11 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__metadata['platform'] = "%s|%s" % ( _METADATA['platform'], driver.platform) + @property + def _credentials(self): + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + @property def non_default_options(self): """The non-default options this pool was created with. @@ -457,25 +465,6 @@ def load_balanced(self): return self.__load_balanced -def _negotiate_creds(all_credentials): - """Return one credential that needs mechanism negotiation, if any. - """ - if all_credentials: - for creds in all_credentials.values(): - if creds.mechanism == 'DEFAULT' and creds.username: - return creds - return None - - -def _speculative_context(all_credentials): - """Return the _AuthContext to use for speculative auth, if any. - """ - if all_credentials and len(all_credentials) == 1: - creds = next(iter(all_credentials.values())) - return auth._AuthContext.from_credentials(creds) - return None - - class _CancellationContext(object): def __init__(self): self._cancelled = False @@ -504,7 +493,7 @@ def __init__(self, sock, pool, address, id): self.sock = sock self.address = address self.id = id - self.authset = set() + self.authed = set() self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False @@ -523,9 +512,8 @@ def __init__(self, sock, pool, address, id): self.compression_context = None self.socket_checker = SocketChecker() # Support for mechanism negotiation on the initial handshake. - # Maps credential to saslSupportedMechs. - self.negotiated_mechanisms = {} - self.auth_ctx = {} + self.negotiated_mechs = None + self.auth_ctx = None # The pool's generation changes with each reset() so we can close # sockets created before the last reset. @@ -567,11 +555,10 @@ def hello_cmd(self): else: return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) - def hello(self, all_credentials=None): - return self._hello(None, None, None, all_credentials) + def hello(self): + return self._hello(None, None, None) - def _hello(self, cluster_time, topology_version, - heartbeat_frequency, all_credentials): + def _hello(self, cluster_time, topology_version, heartbeat_frequency): cmd = self.hello_cmd() performing_handshake = not self.performed_handshake awaitable = False @@ -594,14 +581,15 @@ def _hello(self, cluster_time, topology_version, if not performing_handshake and cluster_time is not None: cmd['$clusterTime'] = cluster_time - # XXX: Simplify in PyMongo 4.0 when all_credentials is always a single - # unchangeable value per MongoClient. - creds = _negotiate_creds(all_credentials) + creds = self.opts._credentials if creds: - cmd['saslSupportedMechs'] = creds.source + '.' + creds.username - auth_ctx = _speculative_context(all_credentials) - if auth_ctx: - cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() + if creds.mechanism == 'DEFAULT' and creds.username: + cmd['saslSupportedMechs'] = creds.source + '.' + creds.username + auth_ctx = auth._AuthContext.from_credentials(creds) + if auth_ctx: + cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() + else: + auth_ctx = None doc = self.command('admin', cmd, publish_events=False, exhaust_allowed=awaitable) @@ -628,11 +616,11 @@ def _hello(self, cluster_time, topology_version, self.op_msg_enabled = True if creds: - self.negotiated_mechanisms[creds] = hello.sasl_supported_mechs + self.negotiated_mechs = hello.sasl_supported_mechs if auth_ctx: auth_ctx.parse_response(hello) if auth_ctx.speculate_succeeded(): - self.auth_ctx[auth_ctx.credentials] = auth_ctx + self.auth_ctx = auth_ctx if self.opts.load_balanced: if not hello.service_id: raise ConfigurationError( @@ -799,41 +787,21 @@ def write_command(self, request_id, msg): helpers._check_command_response(result, self.max_wire_version) return result - def check_auth(self, all_credentials): - """Update this socket's authentication. + def authenticate(self): + """Authenticate to the server if needed. - Log in or out to bring this socket's credentials up to date with - those provided. Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `all_credentials`: dict, maps auth source to MongoCredential. + Can raise ConnectionFailure or OperationFailure. """ - if all_credentials: - for credentials in all_credentials.values(): - if credentials not in self.authset: - self.authenticate(credentials) - # CMAP spec says to publish the ready event only after authenticating # the connection. if not self.ready: + creds = self.opts._credentials + if creds: + auth.authenticate(creds, self) self.ready = True if self.enabled_for_cmap: self.listeners.publish_connection_ready(self.address, self.id) - def authenticate(self, credentials): - """Log in to the server and store these credentials in `authset`. - - Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `credentials`: A MongoCredential. - """ - auth.authenticate(credentials, self) - self.authset.add(credentials) - # negotiated_mechanisms are no longer needed. - self.negotiated_mechanisms.pop(credentials, None) - self.auth_ctx.pop(credentials, None) - def validate_session(self, client, session): """Validate this session before use with client. @@ -1245,7 +1213,7 @@ def close(self): def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def remove_stale_sockets(self, reference_generation, all_credentials): + def remove_stale_sockets(self, reference_generation): """Removes stale sockets then adds new ones if pool is too small and has not been reset. The `reference_generation` argument specifies the `generation` at the point in time this operation was requested on the @@ -1281,7 +1249,7 @@ def remove_stale_sockets(self, reference_generation, all_credentials): return self._pending += 1 incremented = True - sock_info = self.connect(all_credentials) + sock_info = self.connect() with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. @@ -1300,7 +1268,7 @@ def remove_stale_sockets(self, reference_generation, all_credentials): self.requests -= 1 self.size_cond.notify() - def connect(self, all_credentials=None): + def connect(self): """Connect to Mongo and return a new SocketInfo. Can raise ConnectionFailure. @@ -1331,10 +1299,10 @@ def connect(self, all_credentials=None): sock_info = SocketInfo(sock, self, self.address, conn_id) try: if self.handshake: - sock_info.hello(all_credentials) + sock_info.hello() self.is_writable = sock_info.is_writable - sock_info.check_auth(all_credentials) + sock_info.authenticate() except BaseException: sock_info.close_socket(ConnectionClosedReason.ERROR) raise @@ -1342,7 +1310,7 @@ def connect(self, all_credentials=None): return sock_info @contextlib.contextmanager - def get_socket(self, all_credentials, handler=None): + def get_socket(self, handler=None): """Get a socket from the pool. Use with a "with" statement. Returns a :class:`SocketInfo` object wrapping a connected @@ -1350,25 +1318,20 @@ def get_socket(self, all_credentials, handler=None): This method should always be used in a with-statement:: - with pool.get_socket(credentials) as socket_info: + with pool.get_socket() as socket_info: socket_info.send_message(msg) data = socket_info.receive_message(op_code, request_id) - The socket is logged in or out as needed to match ``all_credentials`` - using the correct authentication mechanism for the server's wire - protocol version. - Can raise ConnectionFailure or OperationFailure. :Parameters: - - `all_credentials`: dict, maps auth source to MongoCredential. - `handler` (optional): A _MongoClientErrorHandler. """ listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) - sock_info = self._get_socket(all_credentials) + sock_info = self._get_socket() if self.enabled_for_cmap: listeners.publish_connection_checked_out( self.address, sock_info.id) @@ -1407,7 +1370,7 @@ def _raise_if_not_ready(self, emit_event): _raise_connection_failure( self.address, AutoReconnect('connection pool paused')) - def _get_socket(self, all_credentials): + def _get_socket(self): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of @@ -1480,12 +1443,11 @@ def _get_socket(self, all_credentials): continue else: # We need to create a new connection try: - sock_info = self.connect(all_credentials) + sock_info = self.connect() finally: with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() - sock_info.check_auth(all_credentials) except BaseException: if sock_info: # We checked out a socket but authentication failed. diff --git a/pymongo/server.py b/pymongo/server.py index 0a487e8c41..2a0a7267b7 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -77,9 +77,9 @@ def run_operation(self, sock_info, operation, set_secondary_okay, listeners, Can raise ConnectionFailure, OperationFailure, etc. :Parameters: + - `sock_info` - A SocketInfo instance. - `operation`: A _Query or _GetMore object. - `set_secondary_okay`: Pass to operation.get_message. - - `all_credentials`: dict, maps auth source to MongoCredential. - `listeners`: Instance of _EventListeners or None. - `unpack_res`: A callable that decodes the wire protocol response. """ @@ -200,8 +200,8 @@ def run_operation(self, sock_info, operation, set_secondary_okay, listeners, return response - def get_socket(self, all_credentials, handler=None): - return self.pool.get_socket(all_credentials, handler) + def get_socket(self, handler=None): + return self.pool.get_socket(handler) @property def description(self): diff --git a/pymongo/topology.py b/pymongo/topology.py index 6f26cff617..021a1dee60 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -444,7 +444,7 @@ def data_bearing_servers(self): return self._description.known_servers return self._description.readable_servers - def update_pool(self, all_credentials): + def update_pool(self): # Remove any stale sockets and add new sockets if pool is too small. servers = [] with self._lock: @@ -456,7 +456,7 @@ def update_pool(self, all_credentials): for server, generation in servers: try: - server.pool.remove_stale_sockets(generation, all_credentials) + server.pool.remove_stale_sockets(generation) except PyMongoError as exc: ctx = _ErrorContext(exc, 0, generation, False, None) self.handle_error(server.description.address, ctx) diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 8b1ece8ad6..1494fbedcc 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -40,7 +40,7 @@ def __init__(self, client, pair, *args, **kwargs): Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) @contextlib.contextmanager - def get_socket(self, all_credentials, handler=None): + def get_socket(self, handler=None): client = self.client host_and_port = '%s:%s' % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: @@ -51,7 +51,7 @@ def get_socket(self, all_credentials, handler=None): + client.mock_members + client.mock_mongoses), "bad host: %s" % host_and_port - with Pool.get_socket(self, all_credentials, handler) as sock_info: + with Pool.get_socket(self, handler) as sock_info: sock_info.mock_host = self.mock_host sock_info.mock_port = self.mock_port yield sock_info diff --git a/test/test_auth.py b/test/test_auth.py index d0724dce72..35f198574b 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -30,6 +30,7 @@ from pymongo.saslprep import HAVE_STRINGPREP from test import client_context, IntegrationTest, SkipTest, unittest, Version from test.utils import (delay, + get_pool, ignore_deprecations, single_client, rs_or_single_client, @@ -521,10 +522,12 @@ def test_scram_saslprep(self): def test_cache(self): client = single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) # Force authentication. client.admin.command('ping') - all_credentials = client._MongoClient__all_credentials - credentials = all_credentials.get('admin') cache = credentials.cache self.assertIsNotNone(cache) data = cache.data @@ -536,19 +539,6 @@ def test_cache(self): self.assertIsInstance(salt, bytes) self.assertIsInstance(iterations, int) - pool = next(iter(client._topology._servers.values()))._pool - with pool.get_socket(all_credentials) as sock_info: - authset = sock_info.authset - cached = set(all_credentials.values()) - self.assertEqual(len(cached), 1) - self.assertFalse(authset - cached) - self.assertFalse(cached - authset) - - sock_credentials = next(iter(authset)) - sock_cache = sock_credentials.cache - self.assertIsNotNone(sock_cache) - self.assertEqual(sock_cache.data, data) - def test_scram_threaded(self): coll = client_context.client.db.test diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 8bf0dcb21c..e78b4b209a 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -44,7 +44,7 @@ def run_test(self): self.assertRaises(Exception, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) - credentials = client._MongoClient__options._credentials + credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) else: diff --git a/test/test_client.py b/test/test_client.py index 8c89a45481..8db1cb5621 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -498,7 +498,7 @@ def test_max_idle_time_reaper_default(self): client = rs_or_single_client() server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) self.assertTrue(sock_info in server._pool.sockets) @@ -511,7 +511,7 @@ def test_max_idle_time_reaper_removes_stale_minPoolSize(self): minPoolSize=1) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, two # sockets could be created and checked into the pool. @@ -530,7 +530,7 @@ def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): maxPoolSize=1) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, # maxPoolSize=1 should prevent two sockets from being created. @@ -547,11 +547,11 @@ def test_max_idle_time_reaper_removes_stale(self): client = rs_or_single_client(maxIdleTimeMS=500) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info_one: + with server._pool.get_socket() as sock_info_one: pass # Assert that the pool does not close sockets prematurely. time.sleep(.300) - with server._pool.get_socket({}) as sock_info_two: + with server._pool.get_socket() as sock_info_two: pass self.assertIs(sock_info_one, sock_info_two) wait_until( @@ -574,7 +574,7 @@ def test_min_pool_size(self): "pool initialized with 10 sockets") # Assert that if a socket is closed, a new one takes its place - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: sock_info.close_socket(None) wait_until(lambda: 10 == len(server._pool.sockets), "a closed socket gets replaced from the pool") @@ -586,12 +586,12 @@ def test_max_idle_time_checkout(self): client = rs_or_single_client(maxIdleTimeMS=500) server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) time.sleep(1) # Sleep so that the socket becomes stale. - with server._pool.get_socket({}) as new_sock_info: + with server._pool.get_socket() as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets)) self.assertFalse(sock_info in server._pool.sockets) @@ -601,11 +601,11 @@ def test_max_idle_time_checkout(self): client = rs_or_single_client() server = client._get_topology().select_server( readable_server_selector) - with server._pool.get_socket({}) as sock_info: + with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) time.sleep(1) - with server._pool.get_socket({}) as new_sock_info: + with server._pool.get_socket() as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(server._pool.sockets)) @@ -1106,7 +1106,7 @@ def test_waitQueueTimeoutMS(self): def test_socketKeepAlive(self): pool = get_pool(self.client) - with pool.get_socket({}) as sock_info: + with pool.get_socket() as sock_info: keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) self.assertTrue(keepalive) @@ -1325,8 +1325,8 @@ def test_auth_network_error(self): socket_info = one(pool.sockets) socket_info.sock.close() - # SocketInfo.check_auth logs in with the new credential, but gets a - # socket.error. Should be reraised as AutoReconnect. + # SocketInfo.authenticate logs, but gets a socket.error. Should be + # reraised as AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. @@ -1521,8 +1521,7 @@ def run(self): try: while True: for _ in range(10): - client._topology.update_pool( - client._MongoClient__all_credentials) + client._topology.update_pool() if generation != pool.gen.get_overall(): break finally: diff --git a/test/test_cmap.py b/test/test_cmap.py index d08cc24a59..20ed7f31ec 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -120,7 +120,7 @@ def wait_for_event(self, op): def check_out(self, op): """Run the 'checkOut' operation.""" label = op['label'] - with self.pool.get_socket({}) as sock_info: + with self.pool.get_socket() as sock_info: # Call 'pin_cursor' so we can hold the socket. sock_info.pin_cursor() if label: @@ -452,7 +452,7 @@ def test_close_leaves_pool_unpaused(self): self.assertEqual(1, listener.event_count(PoolClearedEvent)) self.assertEqual(PoolState.READY, pool.state) # Checking out a connection should succeed - with pool.get_socket({}): + with pool.get_socket(): pass diff --git a/test/test_pooling.py b/test/test_pooling.py index b8f3cf1908..4f0ac3584f 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -118,7 +118,7 @@ def run_mongo_thread(self): self.state = 'get_socket' # Call 'pin_cursor' so we can hold the socket. - with self.pool.get_socket({}) as sock: + with self.pool.get_socket() as sock: sock.pin_cursor() self.sock = sock @@ -196,10 +196,10 @@ def test_pool_reuses_open_socket(self): # Test Pool's _check_closed() method doesn't close a healthy socket. cx_pool = self.create_pool(max_pool_size=10) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: pass - with cx_pool.get_socket({}) as new_sock_info: + with cx_pool.get_socket() as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(cx_pool.sockets)) @@ -208,11 +208,11 @@ def test_get_socket_and_exception(self): # get_socket() returns socket after a non-network error. cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) with self.assertRaises(ZeroDivisionError): - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: 1 / 0 # Socket was returned, not closed. - with cx_pool.get_socket({}) as new_sock_info: + with cx_pool.get_socket() as new_sock_info: self.assertEqual(sock_info, new_sock_info) self.assertEqual(1, len(cx_pool.sockets)) @@ -221,7 +221,7 @@ def test_pool_removes_closed_socket(self): # Test that Pool removes explicitly closed socket. cx_pool = self.create_pool() - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: # Use SocketInfo's API to close the socket. sock_info.close_socket(None) @@ -233,20 +233,20 @@ def test_pool_removes_dead_socket(self): cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: # Simulate a closed socket without telling the SocketInfo it's # closed. sock_info.sock.close() self.assertTrue(sock_info.socket_closed()) - with cx_pool.get_socket({}) as new_sock_info: + with cx_pool.get_socket() as new_sock_info: self.assertEqual(0, len(cx_pool.sockets)) self.assertNotEqual(sock_info, new_sock_info) self.assertEqual(1, len(cx_pool.sockets)) # Semaphore was released. - with cx_pool.get_socket({}): + with cx_pool.get_socket(): pass def test_socket_closed(self): @@ -290,7 +290,7 @@ def test_socket_checker(self): def test_return_socket_after_reset(self): pool = self.create_pool() - with pool.get_socket({}) as sock: + with pool.get_socket() as sock: self.assertEqual(pool.active_sockets, 1) self.assertEqual(pool.operation_count, 1) pool.reset() @@ -309,7 +309,7 @@ def test_pool_check(self): cx_pool._check_interval_seconds = 0 # Always check. self.addCleanup(cx_pool.close) - with cx_pool.get_socket({}) as sock_info: + with cx_pool.get_socket() as sock_info: # Simulate a closed socket without telling the SocketInfo it's # closed. sock_info.sock.close() @@ -317,12 +317,12 @@ def test_pool_check(self): # Swap pool's address with a bad one. address, cx_pool.address = cx_pool.address, ('foo.com', 1234) with self.assertRaises(AutoReconnect): - with cx_pool.get_socket({}): + with cx_pool.get_socket(): pass # Back to normal, semaphore was correctly released. cx_pool.address = address - with cx_pool.get_socket({}): + with cx_pool.get_socket(): pass def test_wait_queue_timeout(self): @@ -331,10 +331,10 @@ def test_wait_queue_timeout(self): max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) - with pool.get_socket({}) as sock_info: + with pool.get_socket() as sock_info: start = time.time() with self.assertRaises(ConnectionFailure): - with pool.get_socket({}): + with pool.get_socket(): pass duration = time.time() - start @@ -349,7 +349,7 @@ def test_no_wait_queue_timeout(self): self.addCleanup(pool.close) # Reach max_size. - with pool.get_socket({}) as s1: + with pool.get_socket() as s1: t = SocketGetter(self.c, pool) t.start() while t.state != 'get_socket': @@ -370,7 +370,7 @@ def test_checkout_more_than_max_pool_size(self): socks = [] for _ in range(2): # Call 'pin_cursor' so we can hold the socket. - with pool.get_socket({}) as sock: + with pool.get_socket() as sock: sock.pin_cursor() socks.append(sock) @@ -515,7 +515,7 @@ def test_max_pool_size_with_connection_failure(self): # socket from pool" instead of AutoReconnect. for i in range(2): with self.assertRaises(AutoReconnect) as context: - with test_pool.get_socket({}): + with test_pool.get_socket(): pass # Testing for AutoReconnect instead of ConnectionFailure, above, diff --git a/test/utils.py b/test/utils.py index bdea5c69c2..efc6e24879 100644 --- a/test/utils.py +++ b/test/utils.py @@ -269,7 +269,7 @@ def __init__(self, address, options, handshake=True): def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def get_socket(self, all_credentials, handler=None): + def get_socket(self, handler=None): return MockSocketInfo() def return_socket(self, *args, **kwargs): From 797197e73bd18fc7c4076408e68aa745f8070c49 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Dec 2021 10:22:49 -0800 Subject: [PATCH 0552/2111] PYTHON-2763 Remove outdated check_keys and $clusterTime logic (#817) --- pymongo/_cmessagemodule.c | 105 ++++++-------------------------------- pymongo/collection.py | 20 +++----- pymongo/encryption.py | 10 +--- pymongo/message.py | 90 ++++++++++++++------------------ pymongo/network.py | 12 ++--- pymongo/pool.py | 5 +- 6 files changed, 68 insertions(+), 174 deletions(-) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 845c14bd54..517c0fb798 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -67,7 +67,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { struct module_state *state = GETSTATE(self); int request_id = rand(); - PyObject* cluster_time = NULL; unsigned int flags; char* collection_name = NULL; Py_ssize_t collection_name_length; @@ -79,18 +78,16 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; - unsigned char check_keys = 0; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "Iet#iiOOO&|b", + if (!PyArg_ParseTuple(args, "Iet#iiOOO&", &flags, "utf-8", &collection_name, &collection_name_length, &num_to_skip, &num_to_return, &query, &field_selector, - convert_codec_options, &options, - &check_keys)) { + convert_codec_options, &options)) { return NULL; } buffer = buffer_new(); @@ -104,29 +101,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { goto fail; } - /* Pop $clusterTime from dict and write it at the end, avoiding an error - * from the $-prefix and check_keys. - * - * If "dict" is a defaultdict we don't want to call PyMapping_GetItemString - * on it. That would **create** an _id where one didn't previously exist - * (PYTHON-871). - */ - if (PyDict_Check(query)) { - cluster_time = PyDict_GetItemString(query, "$clusterTime"); - if (cluster_time) { - /* PyDict_GetItemString returns a borrowed reference. */ - Py_INCREF(cluster_time); - if (-1 == PyMapping_DelItemString(query, "$clusterTime")) { - goto fail; - } - } - } else if (PyMapping_HasKeyString(query, "$clusterTime")) { - cluster_time = PyMapping_GetItemString(query, "$clusterTime"); - if (!cluster_time - || -1 == PyMapping_DelItemString(query, "$clusterTime")) { - goto fail; - } - } if (!buffer_write_int32(buffer, (int32_t)request_id) || !buffer_write_bytes(buffer, "\x00\x00\x00\x00\xd4\x07\x00\x00", 8) || !buffer_write_int32(buffer, (int32_t)flags) || @@ -138,37 +112,10 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { } begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, query, check_keys, &options, 1)) { + if (!write_dict(state->_cbson, buffer, query, 0, &options, 1)) { goto fail; } - /* back up a byte and write $clusterTime */ - if (cluster_time) { - int length; - char zero = 0; - - buffer_update_position(buffer, buffer_get_position(buffer) - 1); - if (!write_pair(state->_cbson, buffer, "$clusterTime", 12, cluster_time, - 0, &options, 1)) { - goto fail; - } - - if (!buffer_write_bytes(buffer, &zero, 1)) { - goto fail; - } - - length = buffer_get_position(buffer) - begin; - buffer_write_int32_at_position(buffer, begin, (int32_t)length); - - /* undo popping $clusterTime */ - if (-1 == PyMapping_SetItemString( - query, "$clusterTime", cluster_time)) { - goto fail; - } - - Py_CLEAR(cluster_time); - } - max_size = buffer_get_position(buffer) - begin; if (field_selector != Py_None) { @@ -196,7 +143,6 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { if (buffer) { buffer_free(buffer); } - Py_XDECREF(cluster_time); return result; } @@ -274,7 +220,6 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_ssize_t identifier_length = 0; PyObject* docs; PyObject* doc; - unsigned char check_keys = 0; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -283,15 +228,14 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { PyObject* result = NULL; PyObject* iterator = NULL; - /*flags, command, identifier, docs, check_keys, opts*/ - if (!PyArg_ParseTuple(args, "IOet#ObO&", + /*flags, command, identifier, docs, opts*/ + if (!PyArg_ParseTuple(args, "IOet#OO&", &flags, &command, "utf-8", &identifier, &identifier_length, &docs, - &check_keys, convert_codec_options, &options)) { return NULL; } @@ -340,8 +284,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { } while ((doc = PyIter_Next(iterator)) != NULL) { int encoded_doc_size = write_dict( - state->_cbson, buffer, doc, check_keys, - &options, 1); + state->_cbson, buffer, doc, 0, &options, 1); if (!encoded_doc_size) { Py_CLEAR(doc); goto fail; @@ -400,7 +343,7 @@ _set_document_too_large(int size, long max) { static int _batched_op_msg( - unsigned char op, unsigned char check_keys, unsigned char ack, + unsigned char op, unsigned char ack, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -471,16 +414,12 @@ _batched_op_msg( } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) goto fail; break; @@ -510,8 +449,7 @@ _batched_op_msg( int cur_size; int doc_too_large = 0; int unacked_doc_too_large = 0; - if (!write_dict(state->_cbson, buffer, doc, check_keys, - &options, 1)) { + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } cur_size = buffer_get_position(buffer) - cur_doc_begin; @@ -584,7 +522,6 @@ _batched_op_msg( static PyObject* _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; - unsigned char check_keys; unsigned char ack; PyObject* command; PyObject* docs; @@ -595,8 +532,8 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObbO&O", - &op, &command, &docs, &check_keys, &ack, + if (!PyArg_ParseTuple(args, "bOObO&O", + &op, &command, &docs, &ack, convert_codec_options, &options, &ctx)) { return NULL; @@ -611,7 +548,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { if (!_batched_op_msg( op, - check_keys, ack, command, docs, @@ -637,7 +573,6 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { static PyObject* _cbson_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; - unsigned char check_keys; unsigned char ack; int request_id; int position; @@ -650,8 +585,8 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObbO&O", - &op, &command, &docs, &check_keys, &ack, + if (!PyArg_ParseTuple(args, "bOObO&O", + &op, &command, &docs, &ack, convert_codec_options, &options, &ctx)) { return NULL; @@ -676,7 +611,6 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { if (!_batched_op_msg( op, - check_keys, ack, command, docs, @@ -707,7 +641,7 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { static int _batched_write_command( - char* ns, Py_ssize_t ns_len, unsigned char op, int check_keys, + char* ns, Py_ssize_t ns_len, unsigned char op, PyObject* command, PyObject* docs, PyObject* ctx, PyObject* to_publish, codec_options_t options, buffer_t buffer, struct module_state *state) { @@ -786,16 +720,12 @@ _batched_write_command( } case _UPDATE: { - /* MongoDB does key validation for update. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "updates\x00", 8)) goto fail; break; } case _DELETE: { - /* Never check keys in a delete command. */ - check_keys = 0; if (!buffer_write_bytes(buffer, "deletes\x00", 8)) goto fail; break; @@ -838,8 +768,7 @@ _batched_write_command( goto fail; } cur_doc_begin = buffer_get_position(buffer); - if (!write_dict(state->_cbson, buffer, doc, - check_keys, &options, 1)) { + if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } @@ -915,7 +844,6 @@ static PyObject* _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; - unsigned char check_keys; Py_ssize_t ns_len; PyObject* command; PyObject* docs; @@ -926,8 +854,8 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "et#bOObO&O", "utf-8", - &ns, &ns_len, &op, &command, &docs, &check_keys, + if (!PyArg_ParseTuple(args, "et#bOOO&O", "utf-8", + &ns, &ns_len, &op, &command, &docs, convert_codec_options, &options, &ctx)) { return NULL; @@ -945,7 +873,6 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { ns, ns_len, op, - check_keys, command, docs, ctx, diff --git a/pymongo/collection.py b/pymongo/collection.py index ea11875ce2..70c13c34f4 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -441,9 +441,7 @@ def bulk_write(self, requests, ordered=True, return BulkWriteResult({}, False) def _insert_one( - self, doc, ordered, - check_keys, write_concern, op_id, bypass_doc_val, - session): + self, doc, ordered, write_concern, op_id, bypass_doc_val, session): """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged @@ -462,7 +460,6 @@ def _insert_command(session, sock_info, retryable_write): command, write_concern=write_concern, codec_options=self.__write_response_codec_options, - check_keys=check_keys, session=session, client=self.__database.client, retryable_write=retryable_write) @@ -520,7 +517,7 @@ def insert_one(self, document, bypass_document_validation=False, write_concern = self._write_concern_for(session) return InsertOneResult( self._insert_one( - document, ordered=True, check_keys=False, + document, ordered=True, write_concern=write_concern, op_id=None, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) @@ -588,8 +585,7 @@ def gen(): return InsertManyResult(inserted_ids, write_concern.acknowledged) def _update(self, sock_info, criteria, document, upsert=False, - check_keys=False, multi=False, - write_concern=None, op_id=None, ordered=True, + multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None, retryable_write=False, let=None): """Internal update / replace helper.""" @@ -660,16 +656,14 @@ def _update(self, sock_info, criteria, document, upsert=False, return result def _update_retryable( - self, criteria, document, upsert=False, - check_keys=False, multi=False, + self, criteria, document, upsert=False, multi=False, write_concern=None, op_id=None, ordered=True, bypass_doc_val=False, collation=None, array_filters=None, hint=None, session=None, let=None): """Internal update / replace helper.""" def _update(session, sock_info, retryable_write): return self._update( - sock_info, criteria, document, upsert=upsert, - check_keys=check_keys, multi=multi, + sock_info, criteria, document, upsert=upsert, multi=multi, write_concern=write_concern, op_id=op_id, ordered=ordered, bypass_doc_val=bypass_doc_val, collation=collation, array_filters=array_filters, hint=hint, session=session, @@ -830,7 +824,7 @@ def update_one(self, filter, update, upsert=False, write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, check_keys=False, + filter, update, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, @@ -910,7 +904,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, check_keys=False, multi=True, + filter, update, upsert, multi=True, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 064ba48d51..4b08492ee9 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -301,30 +301,24 @@ def _get_internal_client(encrypter, mongo_client): opts._kms_providers, schema_map)) self._closed = False - def encrypt(self, database, cmd, check_keys, codec_options): + def encrypt(self, database, cmd, codec_options): """Encrypt a MongoDB command. :Parameters: - `database`: The database for this command. - `cmd`: A command document. - - `check_keys`: If True, check `cmd` for invalid keys. - `codec_options`: The CodecOptions to use while encoding `cmd`. :Returns: The encrypted command to execute. """ self._check_closed() - # Workaround for $clusterTime which is incompatible with - # check_keys. - cluster_time = check_keys and cmd.pop('$clusterTime', None) - encoded_cmd = _dict_to_bson(cmd, check_keys, codec_options) + encoded_cmd = _dict_to_bson(cmd, False, codec_options) with _wrap_encryption_errors(): encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. encrypt_cmd = _inflate_bson( encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - if cluster_time: - encrypt_cmd['$clusterTime'] = cluster_time return encrypt_cmd def decrypt(self, response): diff --git a/pymongo/message.py b/pymongo/message.py index 86a83f152e..bccf0a9f51 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -331,7 +331,7 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): spec = self.as_command(sock_info)[0] request_id, msg, size, _ = _op_msg( 0, spec, self.db, self.read_preference, - set_secondary_ok, False, self.codec_options, + set_secondary_ok, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -430,7 +430,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): flags = 0 request_id, msg, size, _ = _op_msg( flags, spec, self.db, None, - False, False, self.codec_options, + False, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -526,7 +526,7 @@ def __pack_message(operation, data): _pack_byte = struct.Struct(" max_cmd_size diff --git a/pymongo/network.py b/pymongo/network.py index 7ec6540dd4..10d71308f6 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -41,7 +41,7 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, read_preference, codec_options, session, client, check=True, allowable_errors=None, address=None, - check_keys=False, listeners=None, max_bson_size=None, + listeners=None, max_bson_size=None, read_concern=None, parse_write_concern_error=False, collation=None, @@ -65,7 +65,6 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - `address`: the (host, port) of `sock` - - `check_keys`: if True, check `spec` for invalid keys - `listeners`: An instance of :class:`~pymongo.monitoring.EventListeners` - `max_bson_size`: The maximum encoded bson size for this server - `read_concern`: The read concern for this command. @@ -107,16 +106,13 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, if (client and client._encrypter and not client._encrypter._bypass_auto_encryption): - spec = orig = client._encrypter.encrypt( - dbname, spec, check_keys, codec_options) - # We already checked the keys, no need to do it again. - check_keys = False + spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) if use_op_msg: flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 request_id, msg, size, max_doc_size = message._op_msg( - flags, spec, dbname, read_preference, secondary_ok, check_keys, + flags, spec, dbname, read_preference, secondary_ok, codec_options, ctx=compression_ctx) # If this is an unacknowledged write then make sure the encoded doc(s) # are small enough, otherwise rely on the server to return an error. @@ -125,7 +121,7 @@ def command(sock_info, dbname, spec, secondary_ok, is_mongos, message._raise_document_too_large(name, size, max_bson_size) else: request_id, msg, size = message._query( - flags, ns, 0, -1, spec, None, codec_options, check_keys, + flags, ns, 0, -1, spec, None, codec_options, compression_ctx) if (max_bson_size is not None diff --git a/pymongo/pool.py b/pymongo/pool.py index 6fe9d024d6..99e64d8b2b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -644,7 +644,7 @@ def _next_reply(self): def command(self, dbname, spec, secondary_ok=False, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, check=True, - allowable_errors=None, check_keys=False, + allowable_errors=None, read_concern=None, write_concern=None, parse_write_concern_error=False, @@ -665,7 +665,6 @@ def command(self, dbname, spec, secondary_ok=False, - `codec_options`: a CodecOptions instance - `check`: raise OperationFailure if there are errors - `allowable_errors`: errors to ignore if `check` is True - - `check_keys`: if True, check `spec` for invalid keys - `read_concern`: The read concern for this command. - `write_concern`: The write concern for this command. - `parse_write_concern_error`: Whether to parse the @@ -707,7 +706,7 @@ def command(self, dbname, spec, secondary_ok=False, return command(self, dbname, spec, secondary_ok, self.is_mongos, read_preference, codec_options, session, client, check, allowable_errors, - self.address, check_keys, listeners, + self.address, listeners, self.max_bson_size, read_concern, parse_write_concern_error=parse_write_concern_error, collation=collation, From a7891480d1799233451861550a5eab4265f251c1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 10 Dec 2021 13:34:18 -0600 Subject: [PATCH 0553/2111] PYTHON-2353 Update create_collection docs with more options (#820) --- pymongo/database.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index c7ed38b73f..33ae4038c8 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -277,15 +277,30 @@ def create_collection(self, name, codec_options=None, as keyword arguments to this method. Valid options include, but are not limited to: - - ``size``: desired initial size for the collection (in + - ``size`` (int): desired initial size for the collection (in bytes). For capped collections this size is the max size of the collection. - - ``capped``: if True, this is a capped collection - - ``max``: maximum number of objects if capped (optional) - - ``timeseries``: a document specifying configuration options for + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for timeseries collections - - ``expireAfterSeconds``: the number of seconds after which a + - ``expireAfterSeconds`` (int): the number of seconds after which a document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. .. versionchanged:: 3.11 This method is now supported inside multi-document transactions From b2f3c66575efdfbb9d6c8aee14eb69ed40fbf649 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 13 Dec 2021 14:41:25 -0800 Subject: [PATCH 0554/2111] PYTHON-2888 Migrate from json.send to perf.send (#819) Rename ops_per_sec to bytes_per_sec to better reflect the perf measurement. --- .evergreen/perf.yml | 3 +-- test/performance/perf_test.py | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 70e83ff582..8b3638d535 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -133,9 +133,8 @@ functions: file_location: src/report.json "send dashboard data": - - command: json.send + - command: perf.send params: - name: perf file: src/results.json "cleanup": diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index d84e67aca4..dab7138add 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -47,9 +47,7 @@ result_data = [] def tearDownModule(): - output = json.dumps({ - 'results': result_data - }, indent=4) + output = json.dumps(result_data, indent=4) if OUTPUT_FILE: with open(OUTPUT_FILE, 'w') as opf: opf.write(output) @@ -79,16 +77,22 @@ def setUp(self): def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) - result = self.data_size / median + bytes_per_sec = self.data_size / median print('Running %s. MEDIAN=%s' % (self.__class__.__name__, self.percentile(50))) result_data.append({ - 'name': name, - 'results': { - '1': { - 'ops_per_sec': result - } - } + 'info': { + 'test_name': name, + 'args': { + 'threads': 1, + }, + }, + 'metrics': [ + { + 'name': 'bytes_per_sec', + 'value': bytes_per_sec + }, + ] }) def before(self): From ff3a8b44dcc9b5fc972d859a0a1ba249cb579aaa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 13 Dec 2021 15:47:34 -0800 Subject: [PATCH 0555/2111] PYTHON-1864 PYTHON-2931 Spec complaint $readPreference (#809) Stop sending $readPreference to standalone servers. Stop sending $readPreference primary because it's the server default. Remove outdated secondary_ok flag. --- pymongo/aggregation.py | 13 ++--- pymongo/change_stream.py | 2 +- pymongo/collection.py | 35 ++++++------- pymongo/database.py | 20 ++++---- pymongo/message.py | 22 +++----- pymongo/mongo_client.py | 50 ++++++++----------- pymongo/network.py | 9 ++-- pymongo/pool.py | 9 ++-- pymongo/server.py | 4 +- .../aggregate-write-readPreference.json | 4 +- .../db-aggregate-write-readPreference.json | 4 +- .../mockupdb/test_mongos_command_read_mode.py | 7 ++- test/mockupdb/test_op_msg_read_preference.py | 23 ++++++--- test/mockupdb/test_query_read_pref_sharded.py | 13 ++--- test/test_cursor.py | 10 +++- test/test_read_preferences.py | 19 ++++--- 16 files changed, 123 insertions(+), 121 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index a5a7abaed7..8fb0225eb3 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -92,11 +92,6 @@ def _database(self): """The database against which the aggregation command is run.""" raise NotImplementedError - def _process_result(self, result, session, server, sock_info, secondary_ok): - if self._result_processor: - self._result_processor( - result, session, server, sock_info, secondary_ok) - def get_read_preference(self, session): if self._write_preference: return self._write_preference @@ -105,7 +100,7 @@ def get_read_preference(self, session): self._write_preference = pref = _AggWritePref(pref) return pref - def get_cursor(self, session, server, sock_info, secondary_ok): + def get_cursor(self, session, server, sock_info, read_preference): # Serialize command. cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) @@ -134,8 +129,7 @@ def get_cursor(self, session, server, sock_info, secondary_ok): result = sock_info.command( self._database.name, cmd, - secondary_ok, - self.get_read_preference(session), + read_preference, self._target.codec_options, parse_write_concern_error=True, read_concern=read_concern, @@ -145,7 +139,8 @@ def get_cursor(self, session, server, sock_info, secondary_ok): client=self._database.client, user_fields=self._user_fields) - self._process_result(result, session, server, sock_info, secondary_ok) + if self._result_processor: + self._result_processor(result, sock_info) # Extract cursor from result or mock/fake one if necessary. if 'cursor' in result: diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 00d049a838..54bf98d83e 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -148,7 +148,7 @@ def _aggregation_pipeline(self): full_pipeline.extend(self._pipeline) return full_pipeline - def _process_result(self, result, session, server, sock_info, secondary_ok): + def _process_result(self, result, sock_info): """Callback that caches the postBatchResumeToken or startAtOperationTime from a changeStream aggregate command response containing an empty batch of change documents. diff --git a/pymongo/collection.py b/pymongo/collection.py index 70c13c34f4..82e29f4061 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -186,7 +186,7 @@ def _socket_for_reads(self, session): def _socket_for_writes(self, session): return self.__database.client._socket_for_writes(session) - def _command(self, sock_info, command, secondary_ok=False, + def _command(self, sock_info, command, read_preference=None, codec_options=None, check=True, allowable_errors=None, read_concern=None, @@ -200,7 +200,6 @@ def _command(self, sock_info, command, secondary_ok=False, :Parameters: - `sock_info` - A SocketInfo instance. - `command` - The command itself, as a SON instance. - - `secondary_ok`: whether to set the secondaryOkay wire protocol bit. - `codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - `check`: raise OperationFailure if there are errors @@ -226,7 +225,6 @@ def _command(self, sock_info, command, secondary_ok=False, return sock_info.command( self.__database.name, command, - secondary_ok, read_preference or self._read_preference_for(session), codec_options or self.codec_options, check, @@ -1356,14 +1354,14 @@ def find_raw_batches(self, *args, **kwargs): return RawBatchCursor(self, *args, **kwargs) - def _count_cmd(self, session, sock_info, secondary_ok, cmd, collation): + def _count_cmd(self, session, sock_info, read_preference, cmd, collation): """Internal count command helper.""" # XXX: "ns missing" checks can be removed when we drop support for # MongoDB 3.0, see SERVER-17051. res = self._command( sock_info, cmd, - secondary_ok, + read_preference=read_preference, allowable_errors=["ns missing"], codec_options=self.__write_response_codec_options, read_concern=self.read_concern, @@ -1374,12 +1372,12 @@ def _count_cmd(self, session, sock_info, secondary_ok, cmd, collation): return int(res["n"]) def _aggregate_one_result( - self, sock_info, secondary_ok, cmd, collation, session): + self, sock_info, read_preference, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, cmd, - secondary_ok, + read_preference, allowable_errors=[26], # Ignore NamespaceNotFound. codec_options=self.__write_response_codec_options, read_concern=self.read_concern, @@ -1413,7 +1411,7 @@ def estimated_document_count(self, **kwargs): raise ConfigurationError( 'estimated_document_count does not support sessions') - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): if sock_info.max_wire_version >= 12: # MongoDB 4.9+ pipeline = [ @@ -1425,7 +1423,8 @@ def _cmd(session, server, sock_info, secondary_ok): ('cursor', {})]) cmd.update(kwargs) result = self._aggregate_one_result( - sock_info, secondary_ok, cmd, collation=None, session=session) + sock_info, read_preference, cmd, collation=None, + session=session) if not result: return 0 return int(result['n']) @@ -1433,7 +1432,8 @@ def _cmd(session, server, sock_info, secondary_ok): # MongoDB < 4.9 cmd = SON([('count', self.__name)]) cmd.update(kwargs) - return self._count_cmd(None, sock_info, secondary_ok, cmd, None) + return self._count_cmd( + None, sock_info, read_preference, cmd, collation=None) return self.__database.client._retryable_read( _cmd, self.read_preference, None) @@ -1506,9 +1506,9 @@ def count_documents(self, filter, session=None, **kwargs): collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): result = self._aggregate_one_result( - sock_info, secondary_ok, cmd, collation, session) + sock_info, read_preference, cmd, collation, session) if not result: return 0 return result['n'] @@ -1799,12 +1799,12 @@ def list_indexes(self, session=None): read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) with self.__database.client._tmp_session(session, False) as s: try: - cursor = self._command(sock_info, cmd, secondary_ok, - read_pref, + cursor = self._command(sock_info, cmd, + read_preference, codec_options, session=s)["cursor"] except OperationFailure as exc: @@ -2220,9 +2220,10 @@ def distinct(self, key, filter=None, session=None, **kwargs): kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): return self._command( - sock_info, cmd, secondary_ok, read_concern=self.read_concern, + sock_info, cmd, read_preference=read_preference, + read_concern=self.read_concern, collation=collation, session=session, user_fields={"values": 1})["values"] diff --git a/pymongo/database.py b/pymongo/database.py index 33ae4038c8..a6c1275126 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -492,7 +492,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - def _command(self, sock_info, command, secondary_ok=False, value=1, check=True, + def _command(self, sock_info, command, value=1, check=True, allowable_errors=None, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, write_concern=None, @@ -506,7 +506,6 @@ def _command(self, sock_info, command, secondary_ok=False, value=1, check=True, return sock_info.command( self.__name, command, - secondary_ok, read_preference, codec_options, check, @@ -605,8 +604,8 @@ def command(self, command, value=1, check=True, read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) with self.__client._socket_for_reads( - read_preference, session) as (sock_info, secondary_ok): - return self._command(sock_info, command, secondary_ok, value, + read_preference, session) as (sock_info, read_preference): + return self._command(sock_info, command, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) @@ -618,16 +617,15 @@ def _retryable_read_command(self, command, value=1, check=True, read_preference = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, secondary_ok): - return self._command(sock_info, command, secondary_ok, value, + def _cmd(session, server, sock_info, read_preference): + return self._command(sock_info, command, value, check, allowable_errors, read_preference, codec_options, session=session, **kwargs) return self.__client._retryable_read( _cmd, read_preference, session) - def _list_collections(self, sock_info, secondary_okay, session, - read_preference, **kwargs): + def _list_collections(self, sock_info, session, read_preference, **kwargs): """Internal listCollections helper.""" coll = self.get_collection( @@ -638,7 +636,7 @@ def _list_collections(self, sock_info, secondary_okay, session, with self.__client._tmp_session( session, close=False) as tmp_session: cursor = self._command( - sock_info, cmd, secondary_okay, + sock_info, cmd, read_preference=read_preference, session=tmp_session)["cursor"] cmd_cursor = CommandCursor( @@ -674,9 +672,9 @@ def list_collections(self, session=None, filter=None, **kwargs): read_pref = ((session and session._txn_read_preference()) or ReadPreference.PRIMARY) - def _cmd(session, server, sock_info, secondary_okay): + def _cmd(session, server, sock_info, read_preference): return self._list_collections( - sock_info, secondary_okay, session, read_preference=read_pref, + sock_info, session, read_preference=read_preference, **kwargs) return self.__client._retryable_read( diff --git a/pymongo/message.py b/pymongo/message.py index bccf0a9f51..2e09df457e 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -316,9 +316,9 @@ def as_command(self, sock_info): self._as_command = cmd, self.db return self._as_command - def get_message(self, set_secondary_ok, sock_info, use_cmd=False): + def get_message(self, read_preference, sock_info, use_cmd=False): """Get a query message, possibly setting the secondaryOk bit.""" - if set_secondary_ok: + if read_preference.mode: # Set the secondaryOk bit. flags = self.flags | 4 else: @@ -330,8 +330,7 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] request_id, msg, size, _ = _op_msg( - 0, spec, self.db, self.read_preference, - set_secondary_ok, self.codec_options, + 0, spec, self.db, read_preference, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -346,8 +345,7 @@ def get_message(self, set_secondary_ok, sock_info, use_cmd=False): ntoreturn = self.limit if sock_info.is_mongos: - spec = _maybe_add_read_preference(spec, - self.read_preference) + spec = _maybe_add_read_preference(spec, read_preference) return _query(flags, ns, self.ntoskip, ntoreturn, spec, None if use_cmd else self.fields, @@ -429,8 +427,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): else: flags = 0 request_id, msg, size, _ = _op_msg( - flags, spec, self.db, None, - False, self.codec_options, + flags, spec, self.db, None, self.codec_options, ctx=sock_info.compression_context) return request_id, msg, size @@ -572,16 +569,13 @@ def _op_msg_uncompressed(flags, command, identifier, docs, opts): _op_msg_uncompressed = _cmessage._op_msg -def _op_msg(flags, command, dbname, read_preference, secondary_ok, - opts, ctx=None): +def _op_msg(flags, command, dbname, read_preference, opts, ctx=None): """Get a OP_MSG message.""" command['$db'] = dbname # getMore commands do not send $readPreference. if read_preference is not None and "$readPreference" not in command: - if secondary_ok and not read_preference.mode: - command["$readPreference"] = ( - ReadPreference.PRIMARY_PREFERRED.document) - else: + # Only send $readPreference if it's not primary (the default). + if read_preference.mode: command["$readPreference"] = read_preference.document name = next(iter(command)) try: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 87c87c0241..a133c96a7f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1025,7 +1025,7 @@ def _end_sessions(self, session_ids): # another session. with self._socket_for_reads( ReadPreference.PRIMARY_PREFERRED, - None) as (sock_info, secondary_ok): + None) as (sock_info, read_pref): if not sock_info.supports_sessions: return @@ -1033,7 +1033,7 @@ def _end_sessions(self, session_ids): spec = SON([('endSessions', session_ids[i:i + common._MAX_END_SESSIONS])]) sock_info.command( - 'admin', spec, secondary_ok=secondary_ok, client=self) + 'admin', spec, read_preference=read_pref, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions # command. @@ -1136,39 +1136,33 @@ def _socket_for_writes(self, session): return self._get_socket(server, session) @contextlib.contextmanager - def _secondaryok_for_server(self, read_preference, server, session): + def _socket_from_server(self, read_preference, server, session): assert read_preference is not None, "read_preference must not be None" # Get a socket for a server matching the read preference, and yield - # sock_info, secondary_ok. Server Selection Spec: "SecondaryOK must - # be sent to mongods with topology type Single. If the server type is - # Mongos, follow the rules for passing read preference to mongos, even - # for topology type Single." + # sock_info with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. # Thread safe: if the type is single it cannot change. topology = self._get_topology() single = topology.description.topology_type == TOPOLOGY_TYPE.Single with self._get_socket(server, session) as sock_info: - secondary_ok = (single and not sock_info.is_mongos) or ( - read_preference.mode != ReadPreference.PRIMARY.mode) - yield sock_info, secondary_ok + if single: + if sock_info.is_repl: + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif sock_info.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield sock_info, read_preference - @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): assert read_preference is not None, "read_preference must not be None" - # Get a socket for a server matching the read preference, and yield - # sock_info, secondary_ok. Server Selection Spec: "SecondaryOK must be - # sent to mongods with topology type Single. If the server type is - # Mongos, follow the rules for passing read preference to mongos, even - # for topology type Single." - # Thread safe: if the type is single it cannot change. topology = self._get_topology() server = self._select_server(read_preference, session) - single = topology.description.topology_type == TOPOLOGY_TYPE.Single - - with self._get_socket(server, session) as sock_info: - secondary_ok = (single and not sock_info.is_mongos) or ( - read_preference != ReadPreference.PRIMARY) - yield sock_info, secondary_ok + return self._socket_from_server(read_preference, server, session) def _should_pin_cursor(self, session): return (self.__options.load_balanced and @@ -1195,9 +1189,9 @@ def _run_operation(self, operation, unpack_res, address=None): operation.sock_mgr.sock, operation, True, self._event_listeners, unpack_res) - def _cmd(session, server, sock_info, secondary_ok): + def _cmd(session, server, sock_info, read_preference): return server.run_operation( - sock_info, operation, secondary_ok, self._event_listeners, + sock_info, operation, read_preference, self._event_listeners, unpack_res) return self._retryable_read( @@ -1292,13 +1286,13 @@ def _retryable_read(self, func, read_pref, session, address=None, try: server = self._select_server( read_pref, session, address=address) - with self._secondaryok_for_server(read_pref, server, session) as ( - sock_info, secondary_ok): + with self._socket_from_server(read_pref, server, session) as ( + sock_info, read_pref): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. raise last_error - return func(session, server, sock_info, secondary_ok) + return func(session, server, sock_info, read_pref) except ServerSelectionTimeoutError: if retrying: # The application may think the write was never attempted diff --git a/pymongo/network.py b/pymongo/network.py index 10d71308f6..a14e9924a4 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -38,7 +38,7 @@ _UNPACK_HEADER = struct.Struct(" max_bson_size + message._COMMAND_OVERHEAD): diff --git a/pymongo/pool.py b/pymongo/pool.py index 99e64d8b2b..88b0e09737 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -608,6 +608,10 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): self.supports_sessions = ( hello.logical_session_timeout_minutes is not None) self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther, SERVER_TYPE.RSGhost) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone self.is_mongos = hello.server_type == SERVER_TYPE.Mongos if performing_handshake and self.compression_settings: ctx = self.compression_settings.get_compression_context( @@ -641,7 +645,7 @@ def _next_reply(self): response_doc.pop('serviceId', None) return response_doc - def command(self, dbname, spec, secondary_ok=False, + def command(self, dbname, spec, read_preference=ReadPreference.PRIMARY, codec_options=DEFAULT_CODEC_OPTIONS, check=True, allowable_errors=None, @@ -660,7 +664,6 @@ def command(self, dbname, spec, secondary_ok=False, :Parameters: - `dbname`: name of the database on which to run the command - `spec`: a command document as a dict, SON, or mapping object - - `secondary_ok`: whether to set the secondaryOkay wire protocol bit - `read_preference`: a read preference - `codec_options`: a CodecOptions instance - `check`: raise OperationFailure if there are errors @@ -703,7 +706,7 @@ def command(self, dbname, spec, secondary_ok=False, if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self, dbname, spec, secondary_ok, + return command(self, dbname, spec, self.is_mongos, read_preference, codec_options, session, client, check, allowable_errors, self.address, listeners, diff --git a/pymongo/server.py b/pymongo/server.py index 2a0a7267b7..8464cbbc6e 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -68,7 +68,7 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() - def run_operation(self, sock_info, operation, set_secondary_okay, listeners, + def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): """Run a _Query or _GetMore operation and return a Response object. @@ -95,7 +95,7 @@ def run_operation(self, sock_info, operation, set_secondary_okay, listeners, request_id = 0 else: message = operation.get_message( - set_secondary_okay, sock_info, use_cmd) + read_preference, sock_info, use_cmd) request_id, data, max_doc_size = self._split_message(message) if publish: diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json index 28327e8d83..bc887e83cb 100644 --- a/test/crud/unified/aggregate-write-readPreference.json +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -237,7 +237,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" @@ -425,7 +425,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json index 269299e3c7..2a81282de8 100644 --- a/test/crud/unified/db-aggregate-write-readPreference.json +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -222,7 +222,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" @@ -416,7 +416,7 @@ } ], "$readPreference": { - "mode": "primary" + "$$exists": false }, "readConcern": { "level": "local" diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index ccd40c2cd7..49aee27047 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -79,8 +79,11 @@ def test(self): slave_ok = False elif operation.op_type == 'may-use-secondary': slave_ok = mode != 'primary' - self.assertEqual(pref.document, - request.doc.get('$readPreference')) + actual_pref = request.doc.get('$readPreference') + if mode == 'primary': + self.assertIsNone(actual_pref) + else: + self.assertEqual(pref.document, actual_pref) else: self.fail('unrecognized op_type %r' % operation.op_type) diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 6ecc229ea1..d9adfe17eb 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -148,24 +148,31 @@ def test(self): expected_pref = ReadPreference.SECONDARY elif operation.op_type == 'must-use-primary': expected_server = self.primary - expected_pref = ReadPreference.PRIMARY + expected_pref = None elif operation.op_type == 'may-use-secondary': - if mode in ('primary', 'primaryPreferred'): + if mode == 'primary': expected_server = self.primary + expected_pref = None + elif mode == 'primaryPreferred': + expected_server = self.primary + expected_pref = pref else: expected_server = self.secondary - expected_pref = pref + expected_pref = pref else: self.fail('unrecognized op_type %r' % operation.op_type) - # For single mongod we send primaryPreferred instead of primary. - if expected_pref == ReadPreference.PRIMARY and self.single_mongod: - expected_pref = ReadPreference.PRIMARY_PREFERRED + # For single mongod we omit the read preference. + if self.single_mongod: + expected_pref = None with going(operation.function, client): request = expected_server.receive() request.reply(operation.reply) - self.assertEqual(expected_pref.document, - request.doc.get('$readPreference')) + actual_pref = request.doc.get('$readPreference') + if expected_pref: + self.assertEqual(expected_pref.document, actual_pref) + else: + self.assertIsNone(actual_pref) self.assertNotIn('$query', request.doc) return test diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 21813f7b8e..88dcdd8351 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -47,17 +47,18 @@ def test_query_and_read_mode_sharded_op_msg(self): SecondaryPreferred([{'tag': 'value'}]),) for query in ({'a': 1}, {'$query': {'a': 1}},): - for mode in read_prefs: + for pref in read_prefs: collection = client.db.get_collection('test', - read_preference=mode) + read_preference=pref) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives() # Command is not nested in $query. - request.assert_matches(OpMsg( - SON([('find', 'test'), - ('filter', {'a': 1}), - ('$readPreference', mode.document)]))) + expected_cmd = SON([('find', 'test'), + ('filter', {'a': 1})]) + if pref.mode: + expected_cmd['$readPreference'] = pref.document + request.assert_matches(OpMsg(expected_cmd)) request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) diff --git a/test/test_cursor.py b/test/test_cursor.py index 8c27544b80..8bea12228d 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -36,6 +36,7 @@ InvalidOperation, OperationFailure) from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern from test import (client_context, unittest, @@ -1257,7 +1258,9 @@ def test_getMore_does_not_send_readPreference(self): client = rs_or_single_client( event_listeners=[listener]) self.addCleanup(client.close) - coll = client[self.db.name].test + # We never send primary read preference so override the default. + coll = client[self.db.name].get_collection( + 'test', read_preference=ReadPreference.PRIMARY_PREFERRED) coll.delete_many({}) coll.insert_many([{} for _ in range(5)]) @@ -1267,7 +1270,10 @@ def test_getMore_does_not_send_readPreference(self): started = listener.results['started'] self.assertEqual(2, len(started)) self.assertEqual('find', started[0].command_name) - self.assertIn('$readPreference', started[0].command) + if client_context.is_rs or client_context.is_mongos: + self.assertIn('$readPreference', started[0].command) + else: + self.assertNotIn('$readPreference', started[0].command) self.assertEqual('getMore', started[1].command_name) self.assertNotIn('$readPreference', started[1].command) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index bbc89b9d14..a63df72545 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -309,17 +309,17 @@ def __init__(self, *args, **kwargs): def _socket_for_reads(self, read_preference, session): context = super(ReadPrefTester, self)._socket_for_reads( read_preference, session) - with context as (sock_info, secondary_ok): + with context as (sock_info, read_preference): self.record_a_read(sock_info.address) - yield sock_info, secondary_ok + yield sock_info, read_preference @contextlib.contextmanager - def _secondaryok_for_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._secondaryok_for_server( + def _socket_from_server(self, read_preference, server, session): + context = super(ReadPrefTester, self)._socket_from_server( read_preference, server, session) - with context as (sock_info, secondary_ok): + with context as (sock_info, read_preference): self.record_a_read(sock_info.address) - yield sock_info, secondary_ok + yield sock_info, read_preference def record_a_read(self, address): server = self._get_topology().select_server_by_address(address, 0) @@ -597,8 +597,11 @@ def test_send_hedge(self): started = listener.results['started'] self.assertEqual(len(started), 1, started) cmd = started[0].command - self.assertIn('$readPreference', cmd) - self.assertEqual(cmd['$readPreference'], pref.document) + if client_context.is_rs or client_context.is_mongos: + self.assertIn('$readPreference', cmd) + self.assertEqual(cmd['$readPreference'], pref.document) + else: + self.assertNotIn('$readPreference', cmd) def test_maybe_add_read_preference(self): From 68b818141a8b745e473ab95b4001ca739b05099d Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Mon, 13 Dec 2021 16:30:36 -0800 Subject: [PATCH 0556/2111] PYTHON-2903 Migrate testing from Amazon1 to Ubuntu 18.04 (#822) --- .evergreen/config.yml | 138 ++++++++++++------------------- .evergreen/run-mod-wsgi-tests.sh | 2 +- .evergreen/run-tests.sh | 3 +- .evergreen/utils.sh | 6 +- test/test_encryption.py | 10 +-- 5 files changed, 63 insertions(+), 96 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 93b37d504d..002774df42 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1770,6 +1770,7 @@ axes: run_on: ubuntu1804-small batchtime: 10080 # 7 days variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz python3_binary: "/opt/python/3.8/bin/python3" - id: ubuntu-20.04 display_name: "Ubuntu 20.04" @@ -2196,37 +2197,26 @@ buildvariants: tasks: - ".4.2" -- matrix_name: "tests-python-version-amazon1-test-ssl" +- matrix_name: "tests-python-version-ubuntu18-test-ssl" matrix_spec: - platform: awslinux - python-version: &amazon1-pythons ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + platform: ubuntu-18.04 + python-version: "*" auth-ssl: "*" coverage: "*" display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu20-ssl" - matrix_spec: - platform: ubuntu-20.04 - python-version: ["3.10"] - auth-ssl: "*" - display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" - tasks: - - ".latest" - - ".5.0" - - ".4.4" - - matrix_name: "tests-pyopenssl" matrix_spec: - platform: awslinux - python-version: ["3.6", "3.7", "3.8", "3.9"] + platform: ubuntu-18.04 + python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: awslinux - python-version: ["3.6", "3.8", "3.9"] + platform: ubuntu-18.04 + python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2236,19 +2226,6 @@ buildvariants: # Test standalone and sharded only on 5.0 and later. - '.5.0' -- matrix_name: "tests-pyopenssl-pypy" - matrix_spec: - platform: debian92 - python-version: ["pypy3.6", "pypy3.7"] - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" - tasks: - - '.replica_set' - # Test standalone and sharded only on 5.0 and later. - - '.5.0' - - matrix_name: "tests-pyopenssl-macOS" matrix_spec: platform: macos-1014 @@ -2270,10 +2247,10 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-amazon1-test-encryption" +- matrix_name: "tests-python-version-ubuntu18-test-encryption" matrix_spec: - platform: awslinux - python-version: ["3.6", "3.7", "3.8", "3.9"] + platform: ubuntu-18.04 + python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config @@ -2282,25 +2259,16 @@ buildvariants: display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions -- matrix_name: "tests-pypy-debian-test-encryption" - matrix_spec: - platform: debian92 - python-version: ["pypy3.6", "pypy3.7"] - auth-ssl: noauth-nossl - encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions - -- matrix_name: "tests-python-version-amazon1-without-c-extensions" +- matrix_name: "tests-python-version-ubuntu18-without-c-extensions" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: awslinux + - platform: ubuntu-18.04 python-version: ["pypy3.6", "pypy3.7"] c-extensions: "*" auth-ssl: "*" @@ -2310,9 +2278,8 @@ buildvariants: - matrix_name: "tests-python-version-ubuntu18-compression" matrix_spec: - # Ubuntu 16.04 images have libsnappy-dev installed, and provides OpenSSL 1.0.2 for testing Python 3.7 platform: ubuntu-18.04 - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + python-version: "*" c-extensions: "*" compression: "*" exclude_spec: @@ -2321,11 +2288,6 @@ buildvariants: python-version: ["pypy3.6", "pypy3.7"] c-extensions: "with-c-extensions" compression: "*" - # PYTHON-2365 Some tests fail with CPython 3.8+ and python-snappy - - platform: ubuntu-18.04 - python-version: ["3.8", "3.9"] - c-extensions: "*" - compression: ["snappy"] display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" @@ -2343,16 +2305,16 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-amazon1" +- matrix_name: "tests-python-version-green-framework-ubuntu18" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: awslinux - python-version: ["pypy3.6", "pypy3.7"] + - platform: ubuntu-18.04 + python-version: ["pypy3.6", "pypy3.7", "system-python3"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2374,12 +2336,13 @@ buildvariants: display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" tasks: *all-server-versions -- matrix_name: "tests-python-version-supports-openssl-110-test-ssl" +- matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: debian92 - python-version: *amazon1-pythons + platform: awslinux + # Python 3.10+ requires OpenSSL 1.1.1+ + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] auth-ssl: "*" - display_name: "${python-version} OpenSSL 1.1.0 ${platform} ${auth-ssl}" + display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: - ".latest" @@ -2392,16 +2355,16 @@ buildvariants: display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Storage engine tests on Amazon1 (x86_64) with Python 3.6. +# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.6. - matrix_name: "tests-storage-engines" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 storage-engine: "*" python-version: 3.6 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: awslinux + platform: ubuntu-18.04 storage-engine: ["inmemory"] python-version: "*" then: @@ -2414,7 +2377,7 @@ buildvariants: - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: awslinux + platform: ubuntu-18.04 storage-engine: ["mmapv1"] python-version: "*" then: @@ -2424,10 +2387,10 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on Amazon1 (x86_64) with Python 3.6. +# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.6. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 disableTestCommands: "*" python-version: "3.6" display_name: "Disable test commands ${python-version} ${platform}" @@ -2436,8 +2399,8 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" tasks: @@ -2454,12 +2417,12 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 python-version: ["3.6", "3.7", "3.8", "3.9"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ - - platform: awslinux + - platform: ubuntu-18.04 python-version: ["3.8", "3.9"] mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" @@ -2469,7 +2432,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 python-version: 3.6 display_name: "MockupDB Tests" tasks: @@ -2477,7 +2440,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: awslinux + platform: ubuntu-18.04 python-version: ["3.6"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -2486,7 +2449,7 @@ buildvariants: - name: "no-server" display_name: "No server test" run_on: - - amazon1-2018-test + - ubuntu1804-test tasks: - name: "no-server" expansions: @@ -2495,7 +2458,7 @@ buildvariants: - name: "Coverage Report" display_name: "Coverage Report" run_on: - - ubuntu1604-test + - ubuntu1804-test tasks: - name: "coverage-report" expansions: @@ -2503,18 +2466,23 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: - name: "atlas-connect" - matrix_name: "serverless" matrix_spec: - platform: awslinux - python-version: *amazon1-pythons + platform: ubuntu-18.04 + python-version: "*" auth-ssl: auth-ssl serverless: "*" + exclude_spec: + - platform: ubuntu-18.04 + python-version: ["system-python3"] + auth-ssl: auth-ssl + serverless: "*" display_name: "Serverless ${python-version} ${platform}" tasks: - "serverless_task_group" @@ -2522,7 +2490,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.9"] + python-version: ["3.6", "3.10"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2532,7 +2500,7 @@ buildvariants: - matrix_name: "versioned-api-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.9"] + python-version: ["3.6", "3.10"] auth: "auth" versionedApi: "*" display_name: "Versioned API ${versionedApi} ${python-version}" @@ -2557,7 +2525,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.6", "3.9"] + python-version-windows: ["3.6", "3.10"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2604,7 +2572,7 @@ buildvariants: platform: ubuntu-18.04 mongodb-version: ["5.0", "latest"] auth-ssl: "*" - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + python-version: "*" loadbalancer: "*" display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" tasks: diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index 725023cc3a..03d72e9701 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -4,7 +4,7 @@ set -o errexit APACHE=$(command -v apache2 || command -v /usr/lib/apache2/mpm-prefork/apache2) || true if [ -n "$APACHE" ]; then - APACHE_CONFIG=apache22ubuntu1204.conf + APACHE_CONFIG=apache24ubuntu161404.conf else APACHE=$(command -v httpd) || true if [ -z "$APACHE" ]; then diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 3f4d6d9459..69550ec932 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -77,8 +77,7 @@ if [ -z "$PYTHON_BINARY" ]; then elif [ "$COMPRESSORS" = "snappy" ]; then createvirtualenv $PYTHON_BINARY snappytest trap "deactivate; rm -rf snappytest" EXIT HUP - # 0.5.2 has issues in pypy3(.5) - python -m pip install python-snappy==0.5.1 + python -m pip install python-snappy PYTHON=python elif [ "$COMPRESSORS" = "zstd" ]; then createvirtualenv $PYTHON_BINARY zstdtest diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 55c549d3aa..b7f65104e8 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -9,11 +9,11 @@ set -o xtrace createvirtualenv () { PYTHON=$1 VENVPATH=$2 - if $PYTHON -m venv -h>/dev/null; then + if $PYTHON -m virtualenv --version; then + VIRTUALENV="$PYTHON -m virtualenv" + elif $PYTHON -m venv -h>/dev/null; then # System virtualenv might not be compatible with the python3 on our path VIRTUALENV="$PYTHON -m venv" - elif $PYTHON -m virtualenv --version; then - VIRTUALENV="$PYTHON -m virtualenv" else echo "Cannot test without virtualenv" exit 1 diff --git a/test/test_encryption.py b/test/test_encryption.py index f77d3fffc7..88acadfbaf 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1820,15 +1820,15 @@ def setUp(self): # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ('certificate required|SSL handshake failed|' - 'KMS connection closed') + 'KMS connection closed|Connection reset by peer') + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += '|EOF' # On Windows this error might be: # [WinError 10054] An existing connection was forcibly closed by the remote host if sys.platform == 'win32': self.cert_error += '|forcibly closed' - # On Windows Python 3.10+ this error might be: - # EOF occurred in violation of protocol (_ssl.c:2384) - if sys.version_info[:2] >= (3, 10): - self.cert_error += '|EOF' def test_01_aws(self): key = { From 0fc82d9c7bb19805df1483c5a283d37a3dab473d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Dec 2021 10:32:13 -0800 Subject: [PATCH 0557/2111] PYTHON-2763 Fix check_keys removal in encryption (#823) --- pymongo/message.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 2e09df457e..584528c2f2 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -309,10 +309,8 @@ def as_command(self, sock_info): sock_info.send_cluster_time(cmd, session, self.client) # Support auto encryption client = self.client - if (client._encrypter and - not client._encrypter._bypass_auto_encryption): - cmd = client._encrypter.encrypt( - self.db, cmd, False, self.codec_options) + if client._encrypter and not client._encrypter._bypass_auto_encryption: + cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) self._as_command = cmd, self.db return self._as_command @@ -407,10 +405,8 @@ def as_command(self, sock_info): sock_info.send_cluster_time(cmd, self.session, self.client) # Support auto encryption client = self.client - if (client._encrypter and - not client._encrypter._bypass_auto_encryption): - cmd = client._encrypter.encrypt( - self.db, cmd, False, self.codec_options) + if client._encrypter and not client._encrypter._bypass_auto_encryption: + cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) self._as_command = cmd, self.db return self._as_command From b502c44c06a351ed69e1c007377290cb58544429 Mon Sep 17 00:00:00 2001 From: Roberto Martinez <63836051+Pochetes@users.noreply.github.com> Date: Tue, 14 Dec 2021 14:24:05 -0500 Subject: [PATCH 0558/2111] Use quotes for pip install with extras (#824) --- doc/installation.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/installation.rst b/doc/installation.rst index a3d29c7f4f..9c9d80c7a1 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -37,44 +37,44 @@ GSSAPI authentication requires `pykerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python3 -m pip install pymongo[gssapi] + $ python3 -m pip install "pymongo[gssapi]" :ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws `_:: - $ python3 -m pip install pymongo[aws] + $ python3 -m pip install "pymongo[aws]" Support for mongodb+srv:// URIs requires `dnspython `_:: - $ python3 -m pip install pymongo[srv] + $ python3 -m pip install "pymongo[srv]" :ref:`OCSP` requires `PyOpenSSL `_, `requests `_ and `service_identity `_:: - $ python3 -m pip install pymongo[ocsp] + $ python3 -m pip install "pymongo[ocsp]" Wire protocol compression with snappy requires `python-snappy `_:: - $ python3 -m pip install pymongo[snappy] + $ python3 -m pip install "pymongo[snappy]" Wire protocol compression with zstandard requires `zstandard `_:: - $ python3 -m pip install pymongo[zstd] + $ python3 -m pip install "pymongo[zstd]" :ref:`Client-Side Field Level Encryption` requires `pymongocrypt `_:: - $ python3 -m pip install pymongo[encryption] + $ python3 -m pip install "pymongo[encryption]" You can install all dependencies automatically with the following command:: - $ python3 -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption] + $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption]" Installing from source ---------------------- From 3843cef3f2a67da32e6bfdbeecc0b888256a6d5d Mon Sep 17 00:00:00 2001 From: Roberto Martinez <63836051+Pochetes@users.noreply.github.com> Date: Tue, 14 Dec 2021 14:24:33 -0500 Subject: [PATCH 0559/2111] Use quotes for pip install with extras in README (#825) --- README.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.rst b/README.rst index f83ad70b10..390599a6cf 100644 --- a/README.rst +++ b/README.rst @@ -97,17 +97,17 @@ GSSAPI authentication requires `pykerberos `_ on Windows. The correct dependency can be installed automatically along with PyMongo:: - $ python -m pip install pymongo[gssapi] + $ python -m pip install "pymongo[gssapi]" MONGODB-AWS authentication requires `pymongo-auth-aws `_:: - $ python -m pip install pymongo[aws] + $ python -m pip install "pymongo[aws]" Support for mongodb+srv:// URIs requires `dnspython `_:: - $ python -m pip install pymongo[srv] + $ python -m pip install "pymongo[srv]" OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests @@ -116,27 +116,27 @@ OCSP (Online Certificate Status Protocol) requires `PyOpenSSL require `certifi `_:: - $ python -m pip install pymongo[ocsp] + $ python -m pip install "pymongo[ocsp]" Wire protocol compression with snappy requires `python-snappy `_:: - $ python -m pip install pymongo[snappy] + $ python -m pip install "pymongo[snappy]" Wire protocol compression with zstandard requires `zstandard `_:: - $ python -m pip install pymongo[zstd] + $ python -m pip install "pymongo[zstd]" Client-Side Field Level Encryption requires `pymongocrypt `_:: - $ python -m pip install pymongo[encryption] + $ python -m pip install "pymongo[encryption]" You can install all dependencies automatically with the following command:: - $ python -m pip install pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption] + $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption]" Additional dependencies are: From 2b53bf3b8590bedecd55036d9ccc9d0e7bcc1c99 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Tue, 14 Dec 2021 15:12:01 -0800 Subject: [PATCH 0560/2111] PYTHON-3042 Migrate OCSP testing to Ubuntu 20.04 (#826) --- .evergreen/config.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 002774df42..a265ba83bc 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2512,8 +2512,10 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: - platform: awslinux - python-version: ["3.6", "3.9", "pypy3.6", "pypy3.7"] + # OCSP stapling is not supported on Ubuntu 18.04. + # See https://jira.mongodb.org/browse/SERVER-51364. + platform: ubuntu-20.04 + python-version: ["3.6", "3.10", "pypy3.6", "pypy3.7"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" From 3886d0660e677e5a82d464a6ad0c72b869d41bbc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Dec 2021 15:12:49 -0800 Subject: [PATCH 0561/2111] PYTHON-3040 Remove duplicate srvMaxHosts tests --- .../replica-set/srvMaxHosts-invalid_integer.json | 7 ------- .../srv_seedlist/replica-set/srvMaxHosts-invalid_type.json | 7 ------- test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json | 7 ------- test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json | 7 ------- 4 files changed, 28 deletions(-) delete mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json delete mode 100644 test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json delete mode 100644 test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json delete mode 100644 test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json deleted file mode 100644 index 5ba1a3b540..0000000000 --- a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_integer.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=-1", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not greater than or equal to zero" -} diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json b/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json deleted file mode 100644 index 79e75b9b15..0000000000 --- a/test/srv_seedlist/replica-set/srvMaxHosts-invalid_type.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0&srvMaxHosts=foo", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not an integer" -} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json b/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json deleted file mode 100644 index 0939624fc3..0000000000 --- a/test/srv_seedlist/sharded/srvMaxHosts-invalid_integer.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=-1", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not greater than or equal to zero" -} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json b/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json deleted file mode 100644 index c228d26612..0000000000 --- a/test/srv_seedlist/sharded/srvMaxHosts-invalid_type.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=foo", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because srvMaxHosts is not an integer" -} From ee80ebab544c8093f326f3e20f6473e80a826f29 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Fri, 17 Dec 2021 12:10:35 -0800 Subject: [PATCH 0562/2111] PYTHON-3049 Test with PyPy 3.8 (#827) --- .evergreen/config.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a265ba83bc..44a075a727 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1923,6 +1923,10 @@ axes: display_name: "PyPy 3.7" variables: PYTHON_BINARY: "/opt/python/pypy3.7/bin/pypy3" + - id: "pypy3.8" + display_name: "PyPy 3.8" + variables: + PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" - id: "system-python3" display_name: "Python3" variables: @@ -2216,7 +2220,7 @@ buildvariants: # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7"] + python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2269,7 +2273,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7"] + python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2285,7 +2289,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7"] + python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" display_name: "${compression} ${c-extensions} ${python-version} ${platform}" @@ -2314,7 +2318,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "system-python3"] + python-version: ["pypy3.6", "pypy3.7", "pypy3.8", "system-python3"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2340,7 +2344,7 @@ buildvariants: matrix_spec: platform: awslinux # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7"] + python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7", "pypy3.8"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: @@ -2515,7 +2519,7 @@ buildvariants: # OCSP stapling is not supported on Ubuntu 18.04. # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 - python-version: ["3.6", "3.10", "pypy3.6", "pypy3.7"] + python-version: ["3.6", "3.10", "pypy3.6", "pypy3.8"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" From c760f900f2e4109a247c2ffc8ad3549362007772 Mon Sep 17 00:00:00 2001 From: David Kim <50807669+DavidKimDY@users.noreply.github.com> Date: Mon, 20 Dec 2021 22:32:53 +0900 Subject: [PATCH 0563/2111] Edit simple typo in docs (#828) --- pymongo/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/server.py b/pymongo/server.py index 8464cbbc6e..cb9442d000 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -77,7 +77,7 @@ def run_operation(self, sock_info, operation, read_preference, listeners, Can raise ConnectionFailure, OperationFailure, etc. :Parameters: - - `sock_info` - A SocketInfo instance. + - `sock_info`: A SocketInfo instance. - `operation`: A _Query or _GetMore object. - `set_secondary_okay`: Pass to operation.get_message. - `listeners`: Instance of _EventListeners or None. From 52ed5a4135a76480e03e96eb0369c2c4eae0c3f7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Jan 2022 16:09:48 -0600 Subject: [PATCH 0564/2111] PYTHON-3052 Add Typings to PyMongo Itself (#829) --- .github/workflows/test-python.yml | 21 ++ bson/__init__.py | 201 ++++++++-------- bson/_helpers.py | 7 +- bson/binary.py | 40 +-- bson/code.py | 18 +- bson/codec_options.py | 63 ++--- bson/dbref.py | 24 +- bson/decimal128.py | 40 +-- bson/int64.py | 7 +- bson/json_util.py | 97 ++++---- bson/max_key.py | 19 +- bson/min_key.py | 19 +- bson/objectid.py | 48 ++-- bson/raw_bson.py | 32 +-- bson/regex.py | 17 +- bson/son.py | 62 ++--- bson/timestamp.py | 30 +-- bson/tz_util.py | 18 +- doc/changelog.rst | 4 +- gridfs/__init__.py | 98 ++++---- gridfs/grid_file.py | 227 +++++++++--------- mypy.ini | 11 + setup.py | 2 +- test/mockupdb/operations.py | 5 +- test/mockupdb/test_getmore_sharded.py | 5 +- test/mockupdb/test_mixed_version_sharded.py | 5 +- .../test_network_disconnect_primary.py | 5 +- test/mockupdb/test_slave_okay_sharded.py | 5 +- test/mod_wsgi_test/test_client.py | 14 +- test/performance/perf_test.py | 5 +- tools/clean.py | 2 +- 31 files changed, 617 insertions(+), 534 deletions(-) create mode 100644 mypy.ini diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 28ee689966..3ad5aa79fe 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -26,3 +26,24 @@ jobs: - name: Run tests run: | python setup.py test + + mypytest: + name: Run mypy + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.7', '3.10'] + fail-fast: false + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install -U pip mypy + pip install -e ".[zstd, srv]" + - name: Run mypy + run: | + mypy --install-types --non-interactive bson gridfs tools diff --git a/bson/__init__.py b/bson/__init__.py index 1efb1f7ff5..5be673cfc3 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -57,40 +57,42 @@ import calendar import datetime import itertools -import platform import re import struct import sys import uuid - -from codecs import (utf_8_decode as _utf_8_decode, - utf_8_encode as _utf_8_encode) +from codecs import utf_8_decode as _utf_8_decode # type: ignore +from codecs import utf_8_encode as _utf_8_encode # type: ignore from collections import abc as _abc +from typing import (TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, + Iterator, List, Mapping, MutableMapping, NoReturn, + Sequence, Tuple, Type, TypeVar, Union, cast) -from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, - OLD_UUID_SUBTYPE, - JAVA_LEGACY, CSHARP_LEGACY, STANDARD, - UUID_SUBTYPE) +from bson.binary import (ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, + OLD_UUID_SUBTYPE, STANDARD, UUID_SUBTYPE, Binary, + UuidRepresentation) from bson.code import Code -from bson.codec_options import ( - CodecOptions, DEFAULT_CODEC_OPTIONS, _raw_document_class) +from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, + _raw_document_class) from bson.dbref import DBRef from bson.decimal128 import Decimal128 -from bson.errors import (InvalidBSON, - InvalidDocument, - InvalidStringData) +from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData from bson.int64 import Int64 from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex -from bson.son import SON, RE_TYPE +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc +# Import RawBSONDocument for type-checking only to avoid circular dependency. +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument + try: - from bson import _cbson + from bson import _cbson # type: ignore _USE_C = True except ImportError: _USE_C = False @@ -131,38 +133,38 @@ _UNPACK_TIMESTAMP_FROM = struct.Struct(" Tuple[Any, memoryview]: if isinstance(data, (bytes, bytearray)): return data, memoryview(data) view = memoryview(data) return view.tobytes(), view -def _raise_unknown_type(element_type, element_name): +def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON("Detected unknown BSON type %r for fieldname '%s'. Are " "you using the latest driver version?" % ( chr(element_type).encode(), element_name)) -def _get_int(data, view, position, dummy0, dummy1, dummy2): +def _get_int(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[int, int]: """Decode a BSON int32 to python int.""" return _UNPACK_INT_FROM(data, position)[0], position + 4 -def _get_c_string(data, view, position, opts): +def _get_c_string(data: Any, view: Any, position: int, opts: Any) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_float(data, view, position, dummy0, dummy1, dummy2): +def _get_float(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[float, int]: """Decode a BSON double to python float.""" return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 -def _get_string(data, view, position, obj_end, opts, dummy): +def _get_string(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] position += 4 @@ -175,7 +177,7 @@ def _get_string(data, view, position, obj_end, opts, dummy): opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_object_size(data, position, obj_end): +def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: """Validate and return a BSON document's size.""" try: obj_size = _UNPACK_INT_FROM(data, position)[0] @@ -192,7 +194,7 @@ def _get_object_size(data, position, obj_end): return obj_size, end -def _get_object(data, view, position, obj_end, opts, dummy): +def _get_object(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): @@ -211,7 +213,7 @@ def _get_object(data, view, position, obj_end, opts, dummy): return obj, position -def _get_array(data, view, position, obj_end, opts, element_name): +def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] end = position + size - 1 @@ -220,7 +222,7 @@ def _get_array(data, view, position, obj_end, opts, element_name): position += 4 end -= 1 - result = [] + result: List[Any] = [] # Avoid doing global and attribute lookups in the loop. append = result.append @@ -250,7 +252,7 @@ def _get_array(data, view, position, obj_end, opts, element_name): return result, position + 1 -def _get_binary(data, view, position, obj_end, opts, dummy1): +def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) position += 5 @@ -283,13 +285,13 @@ def _get_binary(data, view, position, obj_end, opts, dummy1): return value, end -def _get_oid(data, view, position, dummy0, dummy1, dummy2): +def _get_oid(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[ObjectId, int]: """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 return ObjectId(data[position:end]), end -def _get_boolean(data, view, position, dummy0, dummy1, dummy2): +def _get_boolean(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[bool, int]: """Decode a BSON true/false to python True/False.""" end = position + 1 boolean_byte = data[position:end] @@ -300,19 +302,19 @@ def _get_boolean(data, view, position, dummy0, dummy1, dummy2): raise InvalidBSON('invalid boolean value: %r' % boolean_byte) -def _get_date(data, view, position, dummy0, opts, dummy1): +def _get_date(data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any) -> Tuple[datetime.datetime, int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime( _UNPACK_LONG_FROM(data, position)[0], opts), position + 8 -def _get_code(data, view, position, obj_end, opts, element_name): +def _get_code(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) return Code(code), position -def _get_code_w_scope(data, view, position, obj_end, opts, element_name): +def _get_code_w_scope(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] code, position = _get_string( @@ -323,7 +325,7 @@ def _get_code_w_scope(data, view, position, obj_end, opts, element_name): return Code(code, scope), position -def _get_regex(data, view, position, dummy0, opts, dummy1): +def _get_regex(data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any) -> Tuple[Regex, int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) bson_flags, position = _get_c_string(data, view, position, opts) @@ -331,7 +333,7 @@ def _get_regex(data, view, position, dummy0, opts, dummy1): return bson_re, position -def _get_ref(data, view, position, obj_end, opts, element_name): +def _get_ref(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" collection, position = _get_string( data, view, position, obj_end, opts, element_name) @@ -339,18 +341,18 @@ def _get_ref(data, view, position, obj_end, opts, element_name): return DBRef(collection, oid), position -def _get_timestamp(data, view, position, dummy0, dummy1, dummy2): +def _get_timestamp(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Timestamp, int]: """Decode a BSON timestamp to bson.timestamp.Timestamp.""" inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) return Timestamp(timestamp, inc), position + 8 -def _get_int64(data, view, position, dummy0, dummy1, dummy2): +def _get_int64(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Int64, int]: """Decode a BSON int64 to bson.int64.Int64.""" return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 -def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): +def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Decimal128, int]: """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" end = position + 16 return Decimal128.from_bid(data[position:end]), end @@ -362,7 +364,7 @@ def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions -_ELEMENT_GETTER = { +_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]]= { ord(BSONNUM): _get_float, ord(BSONSTR): _get_string, ord(BSONOBJ): _get_object, @@ -387,10 +389,10 @@ def _get_decimal128(data, view, position, dummy0, dummy1, dummy2): if _USE_C: - def _element_to_dict(data, view, position, obj_end, opts): + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: return _cbson._element_to_dict(data, position, obj_end, opts) else: - def _element_to_dict(data, view, position, obj_end, opts): + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 @@ -410,12 +412,15 @@ def _element_to_dict(data, view, position, obj_end, opts): return element_name, value, position -def _raw_to_dict(data, position, obj_end, opts, result): +_T = TypeVar("_T", bound=MutableMapping[Any, Any]) + + +def _raw_to_dict(data: Any, position: int, obj_end: int, opts: Any, result: _T) -> _T: data, view = get_data_and_view(data) return _elements_to_dict(data, view, position, obj_end, opts, result) -def _elements_to_dict(data, view, position, obj_end, opts, result=None): +def _elements_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() @@ -428,7 +433,7 @@ def _elements_to_dict(data, view, position, obj_end, opts, result=None): return result -def _bson_to_dict(data, opts): +def _bson_to_dict(data: Any, opts: Any) -> Any: """Decode a BSON string to document_class.""" data, view = get_data_and_view(data) try: @@ -454,7 +459,7 @@ def _bson_to_dict(data, opts): _LIST_NAMES = tuple((str(i) + "\x00").encode('utf8') for i in range(1000)) -def gen_list_name(): +def gen_list_name() -> Generator[bytes, None, None]: """Generate "keys" for encoded lists in the sequence b"0\x00", b"1\x00", b"2\x00", ... @@ -469,7 +474,7 @@ def gen_list_name(): yield (str(next(counter)) + "\x00").encode('utf8') -def _make_c_string_check(string): +def _make_c_string_check(string: Union[str, bytes]) -> bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: @@ -485,10 +490,10 @@ def _make_c_string_check(string): if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") - return _utf_8_encode(string)[0] + b"\x00" + return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" -def _make_c_string(string): +def _make_c_string(string: Union[str, bytes]) -> bytes: """Make a 'C' string.""" if isinstance(string, bytes): try: @@ -498,30 +503,30 @@ def _make_c_string(string): raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: - return _utf_8_encode(string)[0] + b"\x00" + return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" -def _make_name(string): +def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") - return _utf_8_encode(string)[0] + b"\x00" + return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" -def _encode_float(name, value, dummy0, dummy1): +def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: """Encode a float.""" return b"\x01" + name + _PACK_FLOAT(value) -def _encode_bytes(name, value, dummy0, dummy1): +def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: """Encode a python bytes.""" # Python3 special case. Store 'bytes' as BSON binary subtype 0. return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -def _encode_mapping(name, value, check_keys, opts): +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): return b'\x03' + name + value.raw @@ -530,7 +535,7 @@ def _encode_mapping(name, value, check_keys, opts): return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_dbref(name, value, check_keys, opts): +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> bytes: """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 @@ -550,7 +555,7 @@ def _encode_dbref(name, value, check_keys, opts): return bytes(buf) -def _encode_list(name, value, check_keys, opts): +def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() data = b"".join([_name_value_to_bson(next(lname), item, @@ -559,48 +564,48 @@ def _encode_list(name, value, check_keys, opts): return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_text(name, value, dummy0, dummy1): +def _encode_text(name: bytes, value: str, dummy0: Any, dummy1: Any) -> bytes: """Encode a python str.""" value = _utf_8_encode(value)[0] - return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" + return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" # type: ignore -def _encode_binary(name, value, dummy0, dummy1): +def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.binary.Binary.""" subtype = value.subtype if subtype == 2: - value = _PACK_INT(len(value)) + value + value = _PACK_INT(len(value)) + value # type: ignore return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value -def _encode_uuid(name, value, dummy, opts): +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: Any) -> bytes: """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation binval = Binary.from_uuid(value, uuid_representation=uuid_representation) return _encode_binary(name, binval, dummy, opts) -def _encode_objectid(name, value, dummy0, dummy1): +def _encode_objectid(name: bytes, value: ObjectId, dummy: Any, dummy1: Any) -> bytes: """Encode bson.objectid.ObjectId.""" return b"\x07" + name + value.binary -def _encode_bool(name, value, dummy0, dummy1): +def _encode_bool(name: bytes, value: bool, dummy0: Any, dummy1: Any) -> bytes: """Encode a python boolean (True/False).""" return b"\x08" + name + (value and b"\x01" or b"\x00") -def _encode_datetime(name, value, dummy0, dummy1): +def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: Any) -> bytes: """Encode datetime.datetime.""" millis = _datetime_to_millis(value) return b"\x09" + name + _PACK_LONG(millis) -def _encode_none(name, dummy0, dummy1, dummy2): +def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode python None.""" return b"\x0A" + name -def _encode_regex(name, value, dummy0, dummy1): +def _encode_regex(name: bytes, value: Regex, dummy0: Any, dummy1: Any) -> bytes: """Encode a python regex or bson.regex.Regex.""" flags = value.flags # Python 3 common case @@ -626,7 +631,7 @@ def _encode_regex(name, value, dummy0, dummy1): return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags -def _encode_code(name, value, dummy, opts): +def _encode_code(name: bytes, value: Code, dummy: Any, opts: Any) -> bytes: """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) @@ -637,7 +642,7 @@ def _encode_code(name, value, dummy, opts): return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope -def _encode_int(name, value, dummy0, dummy1): +def _encode_int(name: bytes, value: int, dummy0: Any, dummy1: Any) -> bytes: """Encode a python int.""" if -2147483648 <= value <= 2147483647: return b"\x10" + name + _PACK_INT(value) @@ -648,12 +653,12 @@ def _encode_int(name, value, dummy0, dummy1): raise OverflowError("BSON can only handle up to 8-byte ints") -def _encode_timestamp(name, value, dummy0, dummy1): +def _encode_timestamp(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.timestamp.Timestamp.""" return b"\x11" + name + _PACK_TIMESTAMP(value.inc, value.time) -def _encode_long(name, value, dummy0, dummy1): +def _encode_long(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: """Encode a python long (python 2.x)""" try: return b"\x12" + name + _PACK_LONG(value) @@ -661,17 +666,17 @@ def _encode_long(name, value, dummy0, dummy1): raise OverflowError("BSON can only handle up to 8-byte ints") -def _encode_decimal128(name, value, dummy0, dummy1): +def _encode_decimal128(name: bytes, value: Decimal128, dummy0: Any, dummy1: Any) -> bytes: """Encode bson.decimal128.Decimal128.""" return b"\x13" + name + value.bid -def _encode_minkey(name, dummy0, dummy1, dummy2): +def _encode_minkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode bson.min_key.MinKey.""" return b"\xFF" + name -def _encode_maxkey(name, dummy0, dummy1, dummy2): +def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode bson.max_key.MaxKey.""" return b"\x7F" + name @@ -726,14 +731,14 @@ def _encode_maxkey(name, dummy0, dummy1, dummy2): _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) -def _name_value_to_bson(name, value, check_keys, opts, - in_custom_call=False, - in_fallback_call=False): +def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, + in_custom_call: bool = False, + in_fallback_call: bool = False) -> bytes: """Encode a single name, value pair.""" # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: - return _ENCODERS[type(value)](name, value, check_keys, opts) + return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore except KeyError: pass @@ -745,7 +750,7 @@ def _name_value_to_bson(name, value, check_keys, opts, func = _MARKERS[marker] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func - return func(name, value, check_keys, opts) + return func(name, value, check_keys, opts) # type: ignore # Third, check if a type encoder is registered for this type. # Note that subtypes of registered custom types are not auto-encoded. @@ -765,7 +770,7 @@ def _name_value_to_bson(name, value, check_keys, opts, func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func - return func(name, value, check_keys, opts) + return func(name, value, check_keys, opts) # type: ignore # As a last resort, try using the fallback encoder, if the user has # provided one. @@ -779,7 +784,7 @@ def _name_value_to_bson(name, value, check_keys, opts, "cannot encode object: %r, of type: %r" % (value, type(value))) -def _element_to_bson(key, value, check_keys, opts): +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): raise InvalidDocument("documents must have only string keys, " @@ -794,10 +799,10 @@ def _element_to_bson(key, value, check_keys, opts): return _name_value_to_bson(name, value, check_keys, opts) -def _dict_to_bson(doc, check_keys, opts, top_level=True): +def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) -> bytes: """Encode a document to BSON.""" if _raw_document_class(doc): - return doc.raw + return cast(bytes, doc.raw) try: elements = [] if top_level and "_id" in doc: @@ -816,7 +821,7 @@ def _dict_to_bson(doc, check_keys, opts, top_level=True): _dict_to_bson = _cbson._dict_to_bson -def _millis_to_datetime(millis, opts): +def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: """Convert milliseconds since epoch UTC to datetime.""" diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) // 1000 @@ -832,10 +837,10 @@ def _millis_to_datetime(millis, opts): microseconds=micros) -def _datetime_to_millis(dtm): +def _datetime_to_millis(dtm: datetime.datetime) -> int: """Convert datetime to milliseconds since epoch UTC.""" if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() + dtm = dtm - dtm.utcoffset() # type: ignore return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) @@ -844,7 +849,11 @@ def _datetime_to_millis(dtm): "codec_options must be an instance of CodecOptions") -def encode(document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): +_DocumentIn = Mapping[str, Any] +_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] + + +def encode(document: _DocumentIn, check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). @@ -871,7 +880,7 @@ def encode(document, check_keys=False, codec_options=DEFAULT_CODEC_OPTIONS): return _dict_to_bson(document, check_keys, codec_options) -def decode(data, codec_options=DEFAULT_CODEC_OPTIONS): +def decode(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -903,7 +912,7 @@ def decode(data, codec_options=DEFAULT_CODEC_OPTIONS): return _bson_to_dict(data, codec_options) -def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): +def decode_all(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[_DocumentOut]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -967,7 +976,7 @@ def decode_all(data, codec_options=DEFAULT_CODEC_OPTIONS): decode_all = _cbson.decode_all -def _decode_selective(rawdoc, fields, codec_options): +def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: if _raw_document_class(codec_options.document_class): # If document_class is RawBSONDocument, use vanilla dictionary for # decoding command response. @@ -986,7 +995,7 @@ def _decode_selective(rawdoc, fields, codec_options): return doc -def _convert_raw_document_lists_to_streams(document): +def _convert_raw_document_lists_to_streams(document: Any) -> None: cursor = document.get('cursor') if cursor: for key in ('firstBatch', 'nextBatch'): @@ -996,7 +1005,7 @@ def _convert_raw_document_lists_to_streams(document): cursor[key] = [stream] -def _decode_all_selective(data, codec_options, fields): +def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) -> List[Any]: """Decode BSON data to a single document while using user-provided custom decoding logic. @@ -1033,7 +1042,7 @@ def _decode_all_selective(data, codec_options, fields): return [_decode_selective(_doc, fields, codec_options,)] -def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): +def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1066,7 +1075,7 @@ def decode_iter(data, codec_options=DEFAULT_CODEC_OPTIONS): yield _bson_to_dict(elements, codec_options) -def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): +def decode_file_iter(file_obj: BinaryIO, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1095,7 +1104,7 @@ def decode_file_iter(file_obj, codec_options=DEFAULT_CODEC_OPTIONS): yield _bson_to_dict(elements, codec_options) -def is_valid(bson): +def is_valid(bson: bytes) -> bool: """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of @@ -1124,8 +1133,8 @@ class BSON(bytes): """ @classmethod - def encode(cls, document, check_keys=False, - codec_options=DEFAULT_CODEC_OPTIONS): + def encode(cls: Type["BSON"], document: _DocumentIn, check_keys: bool = False, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> "BSON": """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). @@ -1149,7 +1158,7 @@ def encode(cls, document, check_keys=False, """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): + def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: # type: ignore[override] """Decode this BSON data. By default, returns a BSON document represented as a Python @@ -1183,7 +1192,7 @@ def decode(self, codec_options=DEFAULT_CODEC_OPTIONS): return decode(self, codec_options) -def has_c(): +def has_c() -> bool: """Is the C extension installed? """ return _USE_C diff --git a/bson/_helpers.py b/bson/_helpers.py index 6449705eb2..2d89789586 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -15,14 +15,15 @@ """Setstate and getstate functions for objects with __slots__, allowing compatibility with default pickling protocol """ +from typing import Any, Mapping -def _setstate_slots(self, state): +def _setstate_slots(self: Any, state: Any) -> None: for slot, value in state.items(): setattr(self, slot, value) -def _mangle_name(name, prefix): +def _mangle_name(name: str, prefix: str) -> str: if name.startswith("__"): prefix = "_"+prefix else: @@ -30,7 +31,7 @@ def _mangle_name(name, prefix): return prefix + name -def _getstate_slots(self): +def _getstate_slots(self: Any) -> Mapping[Any, Any]: prefix = self.__class__.__name__ ret = dict() for name in self.__slots__: diff --git a/bson/binary.py b/bson/binary.py index 50cfbd8439..53d5419b49 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Tuple, Type from uuid import UUID -from warnings import warn """Tools for representing BSON binary data. """ @@ -69,7 +69,7 @@ class UuidRepresentation: code. When decoding a BSON binary field with a UUID subtype, a :class:`~bson.binary.Binary` instance will be returned instead of a :class:`uuid.UUID` instance. - + See :ref:`unspecified-representation-details` for details. .. versionadded:: 3.11 @@ -81,7 +81,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`UUID_SUBTYPE`. - + See :ref:`standard-representation-details` for details. .. versionadded:: 3.11 @@ -93,7 +93,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`OLD_UUID_SUBTYPE`. - + See :ref:`python-legacy-representation-details` for details. .. versionadded:: 3.11 @@ -105,7 +105,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the Java driver's legacy byte order. - + See :ref:`java-legacy-representation-details` for details. .. versionadded:: 3.11 @@ -117,7 +117,7 @@ class UuidRepresentation: :class:`uuid.UUID` instances will automatically be encoded to and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the C# driver's legacy byte order. - + See :ref:`csharp-legacy-representation-details` for details. .. versionadded:: 3.11 @@ -153,11 +153,12 @@ class UuidRepresentation: """ ALL_UUID_SUBTYPES = (OLD_UUID_SUBTYPE, UUID_SUBTYPE) -ALL_UUID_REPRESENTATIONS = (UuidRepresentation.UNSPECIFIED, - UuidRepresentation.STANDARD, - UuidRepresentation.PYTHON_LEGACY, - UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY) +ALL_UUID_REPRESENTATIONS = ( + UuidRepresentation.UNSPECIFIED, + UuidRepresentation.STANDARD, + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY) UUID_REPRESENTATION_NAMES = { UuidRepresentation.UNSPECIFIED: 'UuidRepresentation.UNSPECIFIED', UuidRepresentation.STANDARD: 'UuidRepresentation.STANDARD', @@ -208,8 +209,9 @@ class Binary(bytes): """ _type_marker = 5 + __subtype: int - def __new__(cls, data, subtype=BINARY_SUBTYPE): + def __new__(cls: Type["Binary"], data: bytes, subtype: int = BINARY_SUBTYPE) -> "Binary": if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: @@ -220,7 +222,7 @@ def __new__(cls, data, subtype=BINARY_SUBTYPE): return self @classmethod - def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): + def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD) -> "Binary": """Create a BSON Binary object from a Python UUID. Creates a :class:`~bson.binary.Binary` object from a @@ -271,7 +273,7 @@ def from_uuid(cls, uuid, uuid_representation=UuidRepresentation.STANDARD): return cls(payload, subtype) - def as_uuid(self, uuid_representation=UuidRepresentation.STANDARD): + def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUID: """Create a Python UUID from this BSON Binary object. Decodes this binary object as a native :class:`uuid.UUID` instance @@ -316,19 +318,19 @@ def as_uuid(self, uuid_representation=UuidRepresentation.STANDARD): self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation])) @property - def subtype(self): + def subtype(self) -> int: """Subtype of this binary data. """ return self.__subtype - def __getnewargs__(self): + def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 data = super(Binary, self).__getnewargs__()[0] if not isinstance(data, bytes): data = data.encode('latin-1') return data, self.__subtype - def __eq__(self, other): + def __eq__(self, other : Any) -> bool: if isinstance(other, Binary): return ((self.__subtype, bytes(self)) == (other.subtype, bytes(other))) @@ -337,10 +339,10 @@ def __eq__(self, other): # subclass of str... return False - def __hash__(self): + def __hash__(self) -> int: return super(Binary, self).__hash__() ^ hash(self.__subtype) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): diff --git a/bson/code.py b/bson/code.py index 3d8b4904da..6f4b1838d8 100644 --- a/bson/code.py +++ b/bson/code.py @@ -16,6 +16,7 @@ """ from collections.abc import Mapping as _Mapping +from typing import Any, Mapping, Optional, Type, Union class Code(str): @@ -47,15 +48,16 @@ class Code(str): """ _type_marker = 13 + __scope: Union[Mapping[str, Any], None] - def __new__(cls, code, scope=None, **kwargs): + def __new__(cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> "Code": if not isinstance(code, str): raise TypeError("code must be an instance of str") self = str.__new__(cls, code) try: - self.__scope = code.scope + self.__scope = code.scope # type: ignore except AttributeError: self.__scope = None @@ -63,20 +65,20 @@ def __new__(cls, code, scope=None, **kwargs): if not isinstance(scope, _Mapping): raise TypeError("scope must be an instance of dict") if self.__scope is not None: - self.__scope.update(scope) + self.__scope.update(scope) # type: ignore else: self.__scope = scope if kwargs: if self.__scope is not None: - self.__scope.update(kwargs) + self.__scope.update(kwargs) # type: ignore else: self.__scope = kwargs return self @property - def scope(self): + def scope(self) -> Optional[Mapping[str, Any]]: """Scope dictionary for this instance or ``None``. """ return self.__scope @@ -84,12 +86,12 @@ def scope(self): def __repr__(self): return "Code(%s, %r)" % (str.__repr__(self), self.__scope) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Code): return (self.__scope, str(self)) == (other.__scope, str(other)) return False - __hash__ = None + __hash__: Any = None - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/codec_options.py b/bson/codec_options.py index 6fcffcc17a..81e79158b4 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -16,22 +16,26 @@ import abc import datetime -import warnings - from collections import namedtuple from collections.abc import MutableMapping as _MutableMapping +from typing import (TYPE_CHECKING, Any, Callable, Dict, Generic, Iterable, + MutableMapping, Optional, Type, TypeVar, Union, cast) + +from bson.binary import (ALL_UUID_REPRESENTATIONS, UUID_REPRESENTATION_NAMES, + UuidRepresentation) + +# Import RawBSONDocument for type-checking only to avoid circular dependency. +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument -from bson.binary import (UuidRepresentation, - ALL_UUID_REPRESENTATIONS, - UUID_REPRESENTATION_NAMES) -def _abstractproperty(func): +def _abstractproperty(func: Callable[..., Any]) -> property: return property(abc.abstractmethod(func)) _RAW_BSON_DOCUMENT_MARKER = 101 -def _raw_document_class(document_class): +def _raw_document_class(document_class: Any) -> bool: """Determine if a document_class is a RawBSONDocument class.""" marker = getattr(document_class, '_type_marker', None) return marker == _RAW_BSON_DOCUMENT_MARKER @@ -47,12 +51,12 @@ class TypeEncoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ @_abstractproperty - def python_type(self): + def python_type(self) -> Any: """The Python type to be converted into something serializable.""" pass @abc.abstractmethod - def transform_python(self, value): + def transform_python(self, value: Any) -> Any: """Convert the given Python object into something serializable.""" pass @@ -67,12 +71,12 @@ class TypeDecoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ @_abstractproperty - def bson_type(self): + def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" pass @abc.abstractmethod - def transform_bson(self, value): + def transform_bson(self, value: Any) -> Any: """Convert the given BSON value into our own type.""" pass @@ -92,6 +96,9 @@ class TypeCodec(TypeEncoder, TypeDecoder): pass +_Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] +_Fallback = Callable[[Any], Any] + class TypeRegistry(object): """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after @@ -118,7 +125,7 @@ class TypeRegistry(object): :mod:`bson` can encode. See :ref:`fallback-encoder-callable` documentation for an example. """ - def __init__(self, type_codecs=None, fallback_encoder=None): + def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_encoder: Optional[_Fallback] = None) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder self._encoder_map = {} @@ -144,10 +151,10 @@ def __init__(self, type_codecs=None, fallback_encoder=None): TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec)) - def _validate_type_encoder(self, codec): + def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES for pytype in _BUILT_IN_TYPES: - if issubclass(codec.python_type, pytype): + if issubclass(cast(TypeCodec, codec).python_type, pytype): err_msg = ("TypeEncoders cannot change how built-in types are " "encoded (encoder %s transforms type %s)" % (codec, pytype)) @@ -158,7 +165,7 @@ def __repr__(self): self.__class__.__name__, self.__type_codecs, self._fallback_encoder)) - def __eq__(self, other): + def __eq__(self, other: Any) -> Any: if not isinstance(other, type(self)): return NotImplemented return ((self._decoder_map == other._decoder_map) and @@ -166,7 +173,7 @@ def __eq__(self, other): (self._fallback_encoder == other._fallback_encoder)) -_options_base = namedtuple( +_options_base = namedtuple( # type: ignore 'CodecOptions', ('document_class', 'tz_aware', 'uuid_representation', 'unicode_decode_error_handler', 'tzinfo', 'type_registry')) @@ -247,12 +254,12 @@ class CodecOptions(_options_base): retrieved from the server will be modified in the client application and stored back to the server. """ - - def __new__(cls, document_class=dict, - tz_aware=False, - uuid_representation=UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler="strict", - tzinfo=None, type_registry=None): + def __new__(cls: Type["CodecOptions"], document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: Optional[str] = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None) -> "CodecOptions": if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): raise TypeError("document_class must be dict, bson.son.SON, " @@ -263,7 +270,7 @@ def __new__(cls, document_class=dict, if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError("uuid_representation must be a value " "from bson.binary.UuidRepresentation") - if not isinstance(unicode_decode_error_handler, (str, None)): + if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore raise ValueError("unicode_decode_error_handler must be a string " "or None") if tzinfo is not None: @@ -283,7 +290,7 @@ def __new__(cls, document_class=dict, cls, (document_class, tz_aware, uuid_representation, unicode_decode_error_handler, tzinfo, type_registry)) - def _arguments_repr(self): + def _arguments_repr(self) -> str: """Representation of the arguments used to create this object.""" document_class_repr = ( 'dict' if self.document_class is dict @@ -299,7 +306,7 @@ def _arguments_repr(self): self.unicode_decode_error_handler, self.tzinfo, self.type_registry)) - def _options_dict(self): + def _options_dict(self) -> Dict[str, Any]: """Dictionary of the arguments used to create this object.""" # TODO: PYTHON-2442 use _asdict() instead return { @@ -313,7 +320,7 @@ def _options_dict(self): def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._arguments_repr()) - def with_options(self, **kwargs): + def with_options(self, **kwargs: Any) -> "CodecOptions": """Make a copy of this CodecOptions, overriding some options:: >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -330,10 +337,10 @@ def with_options(self, **kwargs): return CodecOptions(**opts) -DEFAULT_CODEC_OPTIONS = CodecOptions() +DEFAULT_CODEC_OPTIONS: CodecOptions = CodecOptions() -def _parse_codec_options(options): +def _parse_codec_options(options: Any) -> CodecOptions: """Parse BSON codec options.""" kwargs = {} for k in set(options) & {'document_class', 'tz_aware', diff --git a/bson/dbref.py b/bson/dbref.py index 24e97a6698..92a3a68367 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -15,9 +15,11 @@ """Tools for manipulating DBRefs (references to MongoDB documents).""" from copy import deepcopy +from typing import Any, Mapping, Optional -from bson.son import SON from bson._helpers import _getstate_slots, _setstate_slots +from bson.son import SON + class DBRef(object): """A reference to a document stored in MongoDB. @@ -28,7 +30,7 @@ class DBRef(object): # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 - def __init__(self, collection, id, database=None, _extra={}, **kwargs): + def __init__(self, collection: str, id: Any, database: Optional[str] = None, _extra: Mapping[str, Any] = {}, **kwargs: Any) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not @@ -58,32 +60,32 @@ def __init__(self, collection, id, database=None, _extra={}, **kwargs): self.__kwargs = kwargs @property - def collection(self): + def collection(self) -> str: """Get the name of this DBRef's collection. """ return self.__collection @property - def id(self): + def id(self) -> Any: """Get this DBRef's _id. """ return self.__id @property - def database(self): + def database(self) -> Optional[str]: """Get the name of this DBRef's database. Returns None if this DBRef doesn't specify a database. """ return self.__database - def __getattr__(self, key): + def __getattr__(self, key: Any) -> Any: try: return self.__kwargs[key] except KeyError: raise AttributeError(key) - def as_doc(self): + def as_doc(self) -> SON[str, Any]: """Get the SON document representation of this DBRef. Generally not needed by application developers @@ -103,7 +105,7 @@ def __repr__(self): return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): us = (self.__database, self.__collection, self.__id, self.__kwargs) @@ -112,15 +114,15 @@ def __eq__(self, other): return us == them return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: """Get a hash value for this :class:`DBRef`.""" return hash((self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items())))) - def __deepcopy__(self, memo): + def __deepcopy__(self, memo: Any) -> "DBRef": """Support function for `copy.deepcopy()`.""" return DBRef(deepcopy(self.__collection, memo), deepcopy(self.__id, memo), diff --git a/bson/decimal128.py b/bson/decimal128.py index ede728bbab..bbf5d326e4 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -19,8 +19,7 @@ import decimal import struct -import sys - +from typing import Any, Sequence, Tuple, Type, Union _PACK_64 = struct.Struct(" decimal.Context: """Returns an instance of :class:`decimal.Context` appropriate for working with IEEE-754 128-bit decimal floating point values. """ opts = _CTX_OPTIONS.copy() opts['traps'] = [] - return decimal.Context(**opts) + return decimal.Context(**opts) # type: ignore -def _decimal_to_128(value): +def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: """Converts a decimal.Decimal to BID (high bits, low bits). :Parameters: @@ -215,7 +215,7 @@ class Decimal128(object): _type_marker = 19 - def __init__(self, value): + def __init__(self, value: _VALUE_OPTIONS) -> None: if isinstance(value, (str, decimal.Decimal)): self.__high, self.__low = _decimal_to_128(value) elif isinstance(value, (list, tuple)): @@ -223,11 +223,11 @@ def __init__(self, value): raise ValueError('Invalid size for creation of Decimal128 ' 'from list or tuple. Must have exactly 2 ' 'elements.') - self.__high, self.__low = value + self.__high, self.__low = value # type: ignore else: raise TypeError("Cannot convert %r to Decimal128" % (value,)) - def to_decimal(self): + def to_decimal(self) -> decimal.Decimal: """Returns an instance of :class:`decimal.Decimal` for this :class:`Decimal128`. """ @@ -236,11 +236,11 @@ def to_decimal(self): sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: - return decimal.Decimal((sign, (), 'N')) + return decimal.Decimal((sign, (), 'N')) # type: ignore elif (high & _NAN) == _NAN: - return decimal.Decimal((sign, (), 'n')) + return decimal.Decimal((sign, (), 'n')) # type: ignore elif (high & _INF) == _INF: - return decimal.Decimal((sign, (), 'F')) + return decimal.Decimal((sign, (), 'F')) # type: ignore if (high & _EXPONENT_MASK) == _EXPONENT_MASK: exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS @@ -270,7 +270,7 @@ def to_decimal(self): return ctx.create_decimal((sign, digits, exponent)) @classmethod - def from_bid(cls, value): + def from_bid(cls: Type["Decimal128"], value: bytes) -> "Decimal128": """Create an instance of :class:`Decimal128` from Binary Integer Decimal string. @@ -282,14 +282,14 @@ def from_bid(cls, value): raise TypeError("value must be an instance of bytes") if len(value) != 16: raise ValueError("value must be exactly 16 bytes") - return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) + return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) # type: ignore @property - def bid(self): + def bid(self) -> bytes: """The Binary Integer Decimal (BID) encoding of this instance.""" return _PACK_64(self.__low) + _PACK_64(self.__high) - def __str__(self): + def __str__(self) -> str: dec = self.to_decimal() if dec.is_nan(): # Required by the drivers spec to match MongoDB behavior. @@ -299,16 +299,16 @@ def __str__(self): def __repr__(self): return "Decimal128('%s')" % (str(self),) - def __setstate__(self, value): + def __setstate__(self, value: Tuple[int, int]) -> None: self.__high, self.__low = value - def __getstate__(self): + def __getstate__(self) -> Tuple[int, int]: return self.__high, self.__low - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Decimal128): return self.bid == other.bid return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other diff --git a/bson/int64.py b/bson/int64.py index fb9bfe9143..f1424c8812 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -14,6 +14,9 @@ """A BSON wrapper for long (int in python3)""" +from typing import Any + + class Int64(int): """Representation of the BSON int64 type. @@ -28,8 +31,8 @@ class Int64(int): _type_marker = 18 - def __getstate__(self): + def __getstate__(self) -> Any: return {} - def __setstate__(self, state): + def __setstate__(self, state: Any) -> None: pass diff --git a/bson/json_util.py b/bson/json_util.py index ed67d9a36c..d7f501f120 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -92,11 +92,13 @@ import math import re import uuid +from typing import (Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, + cast) import bson -from bson import EPOCH_AWARE, RE_TYPE, SON -from bson.binary import (Binary, UuidRepresentation, ALL_UUID_SUBTYPES, - UUID_SUBTYPE) +from bson import EPOCH_AWARE +from bson.binary import (ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, + UuidRepresentation) from bson.code import Code from bson.codec_options import CodecOptions from bson.dbref import DBRef @@ -106,10 +108,10 @@ from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex +from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp from bson.tz_util import utc - _RE_OPT_TABLE = { "i": re.I, "l": re.L, @@ -246,11 +248,17 @@ class JSONOptions(CodecOptions): .. versionchanged:: 4.0 Changed default value of `tz_aware` to False. """ - - def __new__(cls, strict_number_long=None, - datetime_representation=None, - strict_uuid=None, json_mode=JSONMode.RELAXED, - *args, **kwargs): + json_mode: int + strict_number_long: bool + datetime_representation: int + strict_uuid: bool + + def __new__(cls: Type["JSONOptions"], + strict_number_long: Optional[bool] = None, + datetime_representation: Optional[int] = None, + strict_uuid: Optional[bool] = None, + json_mode: int = JSONMode.RELAXED, + *args: Any, **kwargs: Any) -> "JSONOptions": kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) @@ -261,7 +269,7 @@ def __new__(cls, strict_number_long=None, raise ValueError( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") - self = super(JSONOptions, cls).__new__(cls, *args, **kwargs) + self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): @@ -313,7 +321,7 @@ def __new__(cls, strict_number_long=None, self.strict_uuid = strict_uuid return self - def _arguments_repr(self): + def _arguments_repr(self) -> str: return ('strict_number_long=%r, ' 'datetime_representation=%r, ' 'strict_uuid=%r, json_mode=%r, %s' % ( @@ -323,7 +331,7 @@ def _arguments_repr(self): self.json_mode, super(JSONOptions, self)._arguments_repr())) - def _options_dict(self): + def _options_dict(self) -> Dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead options_dict = super(JSONOptions, self)._options_dict() options_dict.update({ @@ -333,7 +341,7 @@ def _options_dict(self): 'json_mode': self.json_mode}) return options_dict - def with_options(self, **kwargs): + def with_options(self, **kwargs: Any) -> "JSONOptions": """ Make a copy of this JSONOptions, overriding some options:: @@ -354,7 +362,7 @@ def with_options(self, **kwargs): return JSONOptions(**opts) -LEGACY_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.LEGACY) +LEGACY_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.LEGACY) """:class:`JSONOptions` for encoding to PyMongo's legacy JSON format. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.LEGACY`. @@ -362,7 +370,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -CANONICAL_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.CANONICAL) +CANONICAL_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.CANONICAL) """:class:`JSONOptions` for Canonical Extended JSON. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.CANONICAL`. @@ -370,7 +378,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -RELAXED_JSON_OPTIONS = JSONOptions(json_mode=JSONMode.RELAXED) +RELAXED_JSON_OPTIONS: JSONOptions = JSONOptions(json_mode=JSONMode.RELAXED) """:class:`JSONOptions` for Relaxed Extended JSON. .. seealso:: The documentation for :const:`bson.json_util.JSONMode.RELAXED`. @@ -378,7 +386,7 @@ def with_options(self, **kwargs): .. versionadded:: 3.5 """ -DEFAULT_JSON_OPTIONS = RELAXED_JSON_OPTIONS +DEFAULT_JSON_OPTIONS: JSONOptions = RELAXED_JSON_OPTIONS """The default :class:`JSONOptions` for JSON encoding/decoding. The same as :const:`RELAXED_JSON_OPTIONS`. @@ -391,7 +399,7 @@ def with_options(self, **kwargs): """ -def dumps(obj, *args, **kwargs): +def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: """Helper function that wraps :func:`json.dumps`. Recursive function that handles all BSON types including @@ -413,7 +421,7 @@ def dumps(obj, *args, **kwargs): return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s, *args, **kwargs): +def loads(s: str, *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. @@ -440,7 +448,7 @@ def loads(s, *args, **kwargs): return json.loads(s, *args, **kwargs) -def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): +def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: """Recursive helper method that converts BSON types so they can be converted into json. """ @@ -455,11 +463,11 @@ def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): return obj -def object_pairs_hook(pairs, json_options=DEFAULT_JSON_OPTIONS): +def object_pairs_hook(pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: return object_hook(json_options.document_class(pairs), json_options) -def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): +def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if "$oid" in dct: return _parse_canonical_oid(dct) if (isinstance(dct.get('$ref'), str) and @@ -505,7 +513,7 @@ def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): return dct -def _parse_legacy_regex(doc): +def _parse_legacy_regex(doc: Any) -> Any: pattern = doc["$regex"] # Check if this is the $regex query operator. if not isinstance(pattern, (str, bytes)): @@ -517,7 +525,7 @@ def _parse_legacy_regex(doc): return Regex(pattern, flags) -def _parse_legacy_uuid(doc, json_options): +def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) @@ -529,7 +537,7 @@ def _parse_legacy_uuid(doc, json_options): return uuid.UUID(doc["$uuid"]) -def _binary_or_uuid(data, subtype, json_options): +def _binary_or_uuid(data: Any, subtype: int, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: # special handling for UUID if subtype in ALL_UUID_SUBTYPES: uuid_representation = json_options.uuid_representation @@ -546,11 +554,11 @@ def _binary_or_uuid(data, subtype, json_options): return binary_value.as_uuid(uuid_representation) if subtype == 0: - return data + return cast(uuid.UUID, data) return Binary(data, subtype) -def _parse_legacy_binary(doc, json_options): +def _parse_legacy_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: if isinstance(doc["$type"], int): doc["$type"] = "%02x" % doc["$type"] subtype = int(doc["$type"], 16) @@ -560,7 +568,7 @@ def _parse_legacy_binary(doc, json_options): return _binary_or_uuid(data, subtype, json_options) -def _parse_canonical_binary(doc, json_options): +def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: binary = doc["$binary"] b64 = binary["base64"] subtype = binary["subType"] @@ -577,7 +585,7 @@ def _parse_canonical_binary(doc, json_options): return _binary_or_uuid(data, int(subtype, 16), json_options) -def _parse_canonical_datetime(doc, json_options): +def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.datetime: """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: @@ -636,14 +644,14 @@ def _parse_canonical_datetime(doc, json_options): return bson._millis_to_datetime(int(dtm), json_options) -def _parse_canonical_oid(doc): +def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: raise TypeError('Bad $oid, extra field(s): %s' % (doc,)) return ObjectId(doc['$oid']) -def _parse_canonical_symbol(doc): +def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" symbol = doc['$symbol'] if len(doc) != 1: @@ -651,7 +659,7 @@ def _parse_canonical_symbol(doc): return str(symbol) -def _parse_canonical_code(doc): +def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ('$code', '$scope'): @@ -659,7 +667,7 @@ def _parse_canonical_code(doc): return Code(doc['$code'], scope=doc.get('$scope')) -def _parse_canonical_regex(doc): +def _parse_canonical_regex(doc: Any) -> Regex: """Decode a JSON regex to bson.regex.Regex.""" regex = doc['$regularExpression'] if len(doc) != 1: @@ -674,13 +682,13 @@ def _parse_canonical_regex(doc): return Regex(regex['pattern'], opts) -def _parse_canonical_dbref(doc): +def _parse_canonical_dbref(doc: Any) -> DBRef: """Decode a JSON DBRef to bson.dbref.DBRef.""" return DBRef(doc.pop('$ref'), doc.pop('$id'), database=doc.pop('$db', None), **doc) -def _parse_canonical_dbpointer(doc): +def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" dbref = doc['$dbPointer'] if len(doc) != 1: @@ -702,7 +710,7 @@ def _parse_canonical_dbpointer(doc): raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,)) -def _parse_canonical_int32(doc): +def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" i_str = doc['$numberInt'] if len(doc) != 1: @@ -712,7 +720,7 @@ def _parse_canonical_int32(doc): return int(i_str) -def _parse_canonical_int64(doc): +def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" l_str = doc['$numberLong'] if len(doc) != 1: @@ -720,7 +728,7 @@ def _parse_canonical_int64(doc): return Int64(l_str) -def _parse_canonical_double(doc): +def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" d_str = doc['$numberDouble'] if len(doc) != 1: @@ -730,7 +738,7 @@ def _parse_canonical_double(doc): return float(d_str) -def _parse_canonical_decimal128(doc): +def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" d_str = doc['$numberDecimal'] if len(doc) != 1: @@ -740,7 +748,7 @@ def _parse_canonical_decimal128(doc): return Decimal128(d_str) -def _parse_canonical_minkey(doc): +def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" if type(doc['$minKey']) is not int or doc['$minKey'] != 1: raise TypeError('$minKey value must be 1: %s' % (doc,)) @@ -749,7 +757,7 @@ def _parse_canonical_minkey(doc): return MinKey() -def _parse_canonical_maxkey(doc): +def _parse_canonical_maxkey(doc: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1: raise TypeError('$maxKey value must be 1: %s', (doc,)) @@ -758,7 +766,7 @@ def _parse_canonical_maxkey(doc): return MaxKey() -def _encode_binary(data, subtype, json_options): +def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: return SON([ ('$binary', base64.b64encode(data).decode()), @@ -768,7 +776,7 @@ def _encode_binary(data, subtype, json_options): ('subType', "%02x" % subtype)])} -def default(obj, json_options=DEFAULT_JSON_OPTIONS): +def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: # We preserve key order when rendering SON, DBRef, etc. as JSON by # returning a SON for those types instead of a dict. if isinstance(obj, ObjectId): @@ -780,9 +788,10 @@ def default(obj, json_options=DEFAULT_JSON_OPTIONS): DatetimeRepresentation.ISO8601): if not obj.tzinfo: obj = obj.replace(tzinfo=utc) + assert obj.tzinfo is not None if obj >= EPOCH_AWARE: off = obj.tzinfo.utcoffset(obj) - if (off.days, off.seconds, off.microseconds) == (0, 0, 0): + if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore tz_string = 'Z' else: tz_string = obj.strftime('%z') diff --git a/bson/max_key.py b/bson/max_key.py index afd7fcb1b3..107dc9dec6 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -14,6 +14,7 @@ """Representation for the MongoDB internal MaxKey type. """ +from typing import Any class MaxKey(object): @@ -22,31 +23,31 @@ class MaxKey(object): _type_marker = 127 - def __getstate__(self): + def __getstate__(self) -> Any: return {} - def __setstate__(self, state): + def __setstate__(self, state: Any) -> None: pass - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __hash__(self): + def __hash__(self) -> int: return hash(self._type_marker) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, other): + def __le__(self, other: Any) -> bool: return isinstance(other, MaxKey) - def __lt__(self, dummy): + def __lt__(self, dummy: Any) -> bool: return False - def __ge__(self, dummy): + def __ge__(self, dummy: Any) -> bool: return True - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: return not isinstance(other, MaxKey) def __repr__(self): diff --git a/bson/min_key.py b/bson/min_key.py index bcb7f9e60f..5483eb6cf8 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -14,6 +14,7 @@ """Representation for the MongoDB internal MinKey type. """ +from typing import Any class MinKey(object): @@ -22,31 +23,31 @@ class MinKey(object): _type_marker = 255 - def __getstate__(self): + def __getstate__(self) -> Any: return {} - def __setstate__(self, state): + def __setstate__(self, state: Any) -> None: pass - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __hash__(self): + def __hash__(self) -> int: return hash(self._type_marker) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __le__(self, dummy): + def __le__(self, dummy: Any) -> bool: return True - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: return not isinstance(other, MinKey) - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: return isinstance(other, MinKey) - def __gt__(self, dummy): + def __gt__(self, dummy: Any) -> bool: return False def __repr__(self): diff --git a/bson/objectid.py b/bson/objectid.py index faf8910edc..baf1966bce 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -23,23 +23,22 @@ import struct import threading import time - from random import SystemRandom +from typing import Any, NoReturn, Optional, Type, Union from bson.errors import InvalidId from bson.tz_util import utc - _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid): +def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" " or a 24-character hex string" % oid) -def _random_bytes(): +def _random_bytes() -> bytes: """Get the 5-byte random field of an ObjectId.""" return os.urandom(5) @@ -59,7 +58,7 @@ class ObjectId(object): _type_marker = 7 - def __init__(self, oid=None): + def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: """Initialize a new ObjectId. An ObjectId is a 12-byte unique identifier consisting of: @@ -105,7 +104,7 @@ def __init__(self, oid=None): self.__validate(oid) @classmethod - def from_datetime(cls, generation_time): + def from_datetime(cls: Type["ObjectId"], generation_time: datetime.datetime) -> "ObjectId": """Create a dummy ObjectId instance with a specific generation time. This method is useful for doing range queries on a field @@ -132,15 +131,16 @@ def from_datetime(cls, generation_time): - `generation_time`: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - if generation_time.utcoffset() is not None: - generation_time = generation_time - generation_time.utcoffset() + offset = generation_time.utcoffset() + if offset is not None: + generation_time = generation_time - offset timestamp = calendar.timegm(generation_time.timetuple()) oid = struct.pack( ">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" return cls(oid) @classmethod - def is_valid(cls, oid): + def is_valid(cls: Type["ObjectId"], oid: Any) -> bool: """Checks if a `oid` string is valid or not. :Parameters: @@ -158,7 +158,7 @@ def is_valid(cls, oid): return False @classmethod - def _random(cls): + def _random(cls) -> bytes: """Generate a 5-byte random number once per process. """ pid = os.getpid() @@ -167,7 +167,7 @@ def _random(cls): cls.__random = _random_bytes() return cls.__random - def __generate(self): + def __generate(self) -> None: """Generate a new value for this ObjectId. """ @@ -184,7 +184,7 @@ def __generate(self): self.__id = oid - def __validate(self, oid): + def __validate(self, oid: Any) -> None: """Validate and use the given id for this ObjectId. Raises TypeError if id is not an instance of @@ -210,13 +210,13 @@ def __validate(self, oid): "not %s" % (type(oid),)) @property - def binary(self): + def binary(self) -> bytes: """12-byte binary representation of this ObjectId. """ return self.__id @property - def generation_time(self): + def generation_time(self) -> datetime.datetime: """A :class:`datetime.datetime` instance representing the time of generation for this :class:`ObjectId`. @@ -227,13 +227,13 @@ def generation_time(self): timestamp = struct.unpack(">I", self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc) - def __getstate__(self): + def __getstate__(self) -> bytes: """return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id - def __setstate__(self, value): + def __setstate__(self, value: Any) -> None: """explicit state set from pickling """ # Provide backwards compatability with OIDs @@ -250,42 +250,42 @@ def __setstate__(self, value): else: self.__id = oid - def __str__(self): + def __str__(self) -> str: return binascii.hexlify(self.__id).decode() def __repr__(self): return "ObjectId('%s')" % (str(self),) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id == other.binary return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id != other.binary return NotImplemented - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id < other.binary return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id <= other.binary return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id > other.binary return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, ObjectId): return self.__id >= other.binary return NotImplemented - def __hash__(self): + def __hash__(self) -> int: """Get a hash value for this :class:`ObjectId`.""" return hash(self.__id) diff --git a/bson/raw_bson.py b/bson/raw_bson.py index bfe888b6b7..8a3b0cb4fb 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -52,14 +52,16 @@ """ from collections.abc import Mapping as _Mapping +from typing import Any, ItemsView, Iterator, Mapping, Optional, cast -from bson import _raw_to_dict, _get_object_size -from bson.codec_options import ( - DEFAULT_CODEC_OPTIONS as DEFAULT, _RAW_BSON_DOCUMENT_MARKER) +from bson import _get_object_size, _raw_to_dict +from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER +from bson.codec_options import DEFAULT_CODEC_OPTIONS as DEFAULT +from bson.codec_options import CodecOptions from bson.son import SON -class RawBSONDocument(_Mapping): +class RawBSONDocument(Mapping[str, Any]): """Representation for a MongoDB document that provides access to the raw BSON bytes that compose it. @@ -70,7 +72,7 @@ class RawBSONDocument(_Mapping): __slots__ = ('__raw', '__inflated_doc', '__codec_options') _type_marker = _RAW_BSON_DOCUMENT_MARKER - def __init__(self, bson_bytes, codec_options=None): + def __init__(self, bson_bytes: bytes, codec_options: Optional[CodecOptions] = None) -> None: """Create a new :class:`RawBSONDocument` :class:`RawBSONDocument` is a representation of a BSON document that @@ -105,7 +107,7 @@ class from the standard library so it can be used like a read-only `document_class` must be :class:`RawBSONDocument`. """ self.__raw = bson_bytes - self.__inflated_doc = None + self.__inflated_doc: Optional[Mapping[str, Any]] = None # Can't default codec_options to DEFAULT_RAW_BSON_OPTIONS in signature, # it refers to this class RawBSONDocument. if codec_options is None: @@ -119,16 +121,16 @@ class from the standard library so it can be used like a read-only _get_object_size(bson_bytes, 0, len(bson_bytes)) @property - def raw(self): + def raw(self) -> bytes: """The raw BSON bytes composing this document.""" return self.__raw - def items(self): + def items(self) -> ItemsView[str, Any]: """Lazily decode and iterate elements in this document.""" return self.__inflated.items() @property - def __inflated(self): + def __inflated(self) -> Mapping[str, Any]: if self.__inflated_doc is None: # We already validated the object's size when this document was # created, so no need to do that again. @@ -137,16 +139,16 @@ def __inflated(self): self.__raw, self.__codec_options) return self.__inflated_doc - def __getitem__(self, item): + def __getitem__(self, item: str) -> Any: return self.__inflated[item] - def __iter__(self): + def __iter__(self) -> Iterator[str]: return iter(self.__inflated) - def __len__(self): + def __len__(self) -> int: return len(self.__inflated) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, RawBSONDocument): return self.__raw == other.raw return NotImplemented @@ -156,7 +158,7 @@ def __repr__(self): % (self.raw, self.__codec_options)) -def _inflate_bson(bson_bytes, codec_options): +def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: """Inflates the top level fields of a BSON document. :Parameters: @@ -170,7 +172,7 @@ def _inflate_bson(bson_bytes, codec_options): bson_bytes, 4, len(bson_bytes)-1, codec_options, SON()) -DEFAULT_RAW_BSON_OPTIONS = DEFAULT.with_options(document_class=RawBSONDocument) +DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) """The default :class:`~bson.codec_options.CodecOptions` for :class:`RawBSONDocument`. """ diff --git a/bson/regex.py b/bson/regex.py index 5cf097f08c..454aca3cec 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -16,12 +16,13 @@ """ import re +from typing import Any, Pattern, Type, Union -from bson.son import RE_TYPE from bson._helpers import _getstate_slots, _setstate_slots +from bson.son import RE_TYPE -def str_flags_to_int(str_flags): +def str_flags_to_int(str_flags: str) -> int: flags = 0 if "i" in str_flags: flags |= re.IGNORECASE @@ -49,7 +50,7 @@ class Regex(object): _type_marker = 11 @classmethod - def from_native(cls, regex): + def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -80,7 +81,7 @@ def from_native(cls, regex): return Regex(regex.pattern, regex.flags) - def __init__(self, pattern, flags=0): + def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> None: """BSON regular expression data. This class is useful to store and retrieve regular expressions that are @@ -103,21 +104,21 @@ def __init__(self, pattern, flags=0): raise TypeError( "flags must be a string or int, not %s" % type(flags)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Regex): return self.pattern == other.pattern and self.flags == other.flags else: return NotImplemented - __hash__ = None + __hash__ = None # type: ignore - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): return "Regex(%r, %r)" % (self.pattern, self.flags) - def try_compile(self): + def try_compile(self) -> Pattern[Any]: """Compile this :class:`Regex` as a Python regular expression. .. warning:: diff --git a/bson/son.py b/bson/son.py index 5a3210fcdb..7207367f3d 100644 --- a/bson/son.py +++ b/bson/son.py @@ -20,29 +20,35 @@ import copy import re - from collections.abc import Mapping as _Mapping +from typing import (Any, Dict, Iterable, Iterator, List, Mapping, + Optional, Pattern, Tuple, Type, TypeVar, Union) # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type -RE_TYPE = type(re.compile("")) +RE_TYPE: Type[Pattern[Any]] = type(re.compile("")) + +_Key = TypeVar("_Key", bound=str) +_Value = TypeVar("_Value") +_T = TypeVar("_T") -class SON(dict): +class SON(Dict[_Key, _Value]): """SON data. A subclass of dict that maintains ordering of keys and provides a few extra niceties for dealing with SON. SON provides an API similar to collections.OrderedDict. """ + __keys: List[Any] - def __init__(self, data=None, **kwargs): + def __init__(self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, **kwargs: Any) -> None: self.__keys = [] dict.__init__(self) self.update(data) self.update(kwargs) - def __new__(cls, *args, **kwargs): + def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": instance = super(SON, cls).__new__(cls, *args, **kwargs) instance.__keys = [] return instance @@ -53,53 +59,53 @@ def __repr__(self): result.append("(%r, %r)" % (key, self[key])) return "SON([%s])" % ", ".join(result) - def __setitem__(self, key, value): + def __setitem__(self, key: _Key, value: _Value) -> None: if key not in self.__keys: self.__keys.append(key) dict.__setitem__(self, key, value) - def __delitem__(self, key): + def __delitem__(self, key: _Key) -> None: self.__keys.remove(key) dict.__delitem__(self, key) - def copy(self): - other = SON() + def copy(self) -> "SON[_Key, _Value]": + other: SON[_Key, _Value] = SON() other.update(self) return other # TODO this is all from UserDict.DictMixin. it could probably be made more # efficient. # second level definitions support higher levels - def __iter__(self): + def __iter__(self) -> Iterator[_Key]: for k in self.__keys: yield k - def has_key(self, key): + def has_key(self, key: _Key) -> bool: return key in self.__keys - def iterkeys(self): + def iterkeys(self) -> Iterator[_Key]: return self.__iter__() # fourth level uses definitions from lower levels - def itervalues(self): + def itervalues(self) -> Iterator[_Value]: for _, v in self.items(): yield v - def values(self): + def values(self) -> List[_Value]: # type: ignore[override] return [v for _, v in self.items()] - def clear(self): + def clear(self) -> None: self.__keys = [] super(SON, self).clear() - def setdefault(self, key, default=None): + def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[override] try: return self[key] except KeyError: self[key] = default return default - def pop(self, key, *args): + def pop(self, key: _Key, *args: Union[_Value, _T]) -> Union[_Value, _T]: if len(args) > 1: raise TypeError("pop expected at most 2 arguments, got "\ + repr(1 + len(args))) @@ -112,7 +118,7 @@ def pop(self, key, *args): del self[key] return value - def popitem(self): + def popitem(self) -> Tuple[_Key, _Value]: try: k, v = next(iter(self.items())) except StopIteration: @@ -120,7 +126,7 @@ def popitem(self): del self[k] return (k, v) - def update(self, other=None, **kwargs): + def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type: ignore[override] # Make progressively weaker assumptions about "other" if other is None: pass @@ -136,13 +142,13 @@ def update(self, other=None, **kwargs): if kwargs: self.update(kwargs) - def get(self, key, default=None): + def get(self, key: _Key, default: Optional[Union[_Value, _T]] = None) -> Union[_Value, _T, None]: # type: ignore[override] try: return self[key] except KeyError: return default - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: """Comparison to another SON is order-sensitive while comparison to a regular dictionary is order-insensitive. """ @@ -151,20 +157,20 @@ def __eq__(self, other): list(other.items()) return self.to_dict() == other - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __len__(self): + def __len__(self) -> int: return len(self.__keys) - def to_dict(self): + def to_dict(self) -> Dict[_Key, _Value]: """Convert a SON document to a normal Python dictionary instance. This is trickier than just *dict(...)* because it needs to be recursive. """ - def transform_value(value): + def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): @@ -176,11 +182,11 @@ def transform_value(value): return transform_value(dict(self)) - def __deepcopy__(self, memo): - out = SON() + def __deepcopy__(self, memo: Dict[int, "SON[_Key, _Value]"]) -> "SON[_Key, _Value]": + out: SON[_Key, _Value] = SON() val_id = id(self) if val_id in memo: - return memo.get(val_id) + return memo[val_id] memo[val_id] = out for k, v in self.items(): if not isinstance(v, RE_TYPE): diff --git a/bson/timestamp.py b/bson/timestamp.py index 69c061d2a5..93c7540fd0 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -17,9 +17,10 @@ import calendar import datetime +from typing import Any, Union -from bson.tz_util import utc from bson._helpers import _getstate_slots, _setstate_slots +from bson.tz_util import utc UPPERBOUND = 4294967296 @@ -34,7 +35,7 @@ class Timestamp(object): _type_marker = 17 - def __init__(self, time, inc): + def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: """Create a new :class:`Timestamp`. This class is only for use with the MongoDB opLog. If you need @@ -53,8 +54,9 @@ def __init__(self, time, inc): - `inc`: the incrementing counter """ if isinstance(time, datetime.datetime): - if time.utcoffset() is not None: - time = time - time.utcoffset() + offset = time.utcoffset() + if offset is not None: + time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): raise TypeError("time must be an instance of int") @@ -69,45 +71,45 @@ def __init__(self, time, inc): self.__inc = inc @property - def time(self): + def time(self) -> int: """Get the time portion of this :class:`Timestamp`. """ return self.__time @property - def inc(self): + def inc(self) -> int: """Get the inc portion of this :class:`Timestamp`. """ return self.__inc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.__time == other.time and self.__inc == other.inc) else: return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return hash(self.time) ^ hash(self.inc) - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __lt__(self, other): + def __lt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) < (other.time, other.inc) return NotImplemented - def __le__(self, other): + def __le__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) <= (other.time, other.inc) return NotImplemented - def __gt__(self, other): + def __gt__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) > (other.time, other.inc) return NotImplemented - def __ge__(self, other): + def __ge__(self, other: Any) -> bool: if isinstance(other, Timestamp): return (self.time, self.inc) >= (other.time, other.inc) return NotImplemented @@ -115,7 +117,7 @@ def __ge__(self, other): def __repr__(self): return "Timestamp(%s, %s)" % (self.__time, self.__inc) - def as_datetime(self): + def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding to the time portion of this :class:`Timestamp`. diff --git a/bson/tz_util.py b/bson/tz_util.py index 6ec918fb2b..43ae52ccff 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -14,10 +14,10 @@ """Timezone related utilities for BSON.""" -from datetime import (timedelta, - tzinfo) +from datetime import datetime, timedelta, tzinfo +from typing import Any, Optional, Tuple, Union -ZERO = timedelta(0) +ZERO: timedelta = timedelta(0) class FixedOffset(tzinfo): @@ -28,25 +28,25 @@ class FixedOffset(tzinfo): Defining __getinitargs__ enables pickling / copying. """ - def __init__(self, offset, name): + def __init__(self, offset: Union[float, timedelta], name: str) -> None: if isinstance(offset, timedelta): self.__offset = offset else: self.__offset = timedelta(minutes=offset) self.__name = name - def __getinitargs__(self): + def __getinitargs__(self) -> Tuple[timedelta, str]: return self.__offset, self.__name - def utcoffset(self, dt): + def utcoffset(self, dt: Optional[datetime]) -> timedelta: return self.__offset - def tzname(self, dt): + def tzname(self, dt: Optional[datetime]) -> str: return self.__name - def dst(self, dt): + def dst(self, dt: Optional[datetime]) -> timedelta: return ZERO -utc = FixedOffset(0, "UTC") +utc: FixedOffset = FixedOffset(0, "UTC") """Fixed offset timezone representing UTC.""" diff --git a/doc/changelog.rst b/doc/changelog.rst index 062104bc8f..4ff9cd781d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,7 +4,7 @@ Changelog Changes in Version 4.1 ---------------------- -PyMongo 4.0 brings a number of improvements including: +PyMongo 4.1 brings a number of improvements including: - :meth:`pymongo.collection.Collection.update_one`, :meth:`pymongo.collection.Collection.update_many`, @@ -21,6 +21,8 @@ PyMongo 4.0 brings a number of improvements including: $merge and $out executing on secondaries on MongoDB >=5.0. aggregate() now always obeys the collection's :attr:`read_preference` on MongoDB >= 5.0. +- :meth:`gridfs.GridOut.seek` now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. Changes in Version 4.0 diff --git a/gridfs/__init__.py b/gridfs/__init__.py index c36d921e8c..02c42d6eb6 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -21,25 +21,28 @@ """ from collections import abc +from typing import Any, List, Mapping, Optional, cast -from pymongo import (ASCENDING, - DESCENDING) +from bson.objectid import ObjectId +from gridfs.errors import NoFile +from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, GridIn, GridOut, + GridOutCursor, _clear_entity_type_registry, + _disallow_transactions) +from pymongo import ASCENDING, DESCENDING +from pymongo.client_session import ClientSession +from pymongo.collation import Collation +from pymongo.collection import Collection from pymongo.common import UNAUTHORIZED_CODES, validate_string from pymongo.database import Database from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.read_preferences import _ServerMode +from pymongo.write_concern import WriteConcern -from gridfs.errors import NoFile -from gridfs.grid_file import (GridIn, - GridOut, - GridOutCursor, - DEFAULT_CHUNK_SIZE, - _clear_entity_type_registry, - _disallow_transactions) class GridFS(object): """An instance of GridFS on top of a single Database. """ - def __init__(self, database, collection="fs"): + def __init__(self, database: Database, collection: str = "fs"): """Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of @@ -82,7 +85,7 @@ def __init__(self, database, collection="fs"): self.__files = self.__collection.files self.__chunks = self.__collection.chunks - def new_file(self, **kwargs): + def new_file(self, **kwargs: Any) -> GridIn: """Create a new file in GridFS. Returns a new :class:`~gridfs.grid_file.GridIn` instance to @@ -98,7 +101,7 @@ def new_file(self, **kwargs): """ return GridIn(self.__collection, **kwargs) - def put(self, data, **kwargs): + def put(self, data: Any, **kwargs: Any) -> Any: """Put data in GridFS as a new file. Equivalent to doing:: @@ -136,7 +139,7 @@ def put(self, data, **kwargs): return grid_file._id - def get(self, file_id, session=None): + def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Get a file from GridFS by ``"_id"``. Returns an instance of :class:`~gridfs.grid_file.GridOut`, @@ -156,7 +159,7 @@ def get(self, file_id, session=None): gout._ensure_file() return gout - def get_version(self, filename=None, version=-1, session=None, **kwargs): + def get_version(self, filename: Optional[str] = None, version: Optional[int] = -1, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches @@ -197,6 +200,8 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): _disallow_transactions(session) cursor = self.__files.find(query, session=session) + if version is None: + version = -1 if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) @@ -209,7 +214,7 @@ def get_version(self, filename=None, version=-1, session=None, **kwargs): except StopIteration: raise NoFile("no version %d for filename %r" % (version, filename)) - def get_last_version(self, filename=None, session=None, **kwargs): + def get_last_version(self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: """Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. @@ -228,7 +233,7 @@ def get_last_version(self, filename=None, session=None, **kwargs): return self.get_version(filename=filename, session=session, **kwargs) # TODO add optional safe mode for chunk removal? - def delete(self, file_id, session=None): + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Delete a file from GridFS by ``"_id"``. Deletes all data belonging to the file with ``"_id"``: @@ -257,7 +262,7 @@ def delete(self, file_id, session=None): self.__files.delete_one({"_id": file_id}, session=session) self.__chunks.delete_many({"files_id": file_id}, session=session) - def list(self, session=None): + def list(self, session: Optional[ClientSession] = None) -> List[str]: """List the names of all files stored in this instance of :class:`GridFS`. @@ -278,7 +283,7 @@ def list(self, session=None): name for name in self.__files.distinct("filename", session=session) if name is not None] - def find_one(self, filter=None, session=None, *args, **kwargs): + def find_one(self, filter: Optional[Any] = None, session: Optional[ClientSession] = None, *args: Any, **kwargs: Any) -> Optional[GridOut]: """Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for @@ -311,7 +316,7 @@ def find_one(self, filter=None, session=None, *args, **kwargs): return None - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Query GridFS for files. Returns a cursor that iterates across files matching @@ -372,7 +377,7 @@ def find(self, *args, **kwargs): """ return GridOutCursor(self.__collection, *args, **kwargs) - def exists(self, document_or_id=None, session=None, **kwargs): + def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> bool: """Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its @@ -422,9 +427,10 @@ def exists(self, document_or_id=None, session=None, **kwargs): class GridFSBucket(object): """An instance of GridFS on top of a single Database.""" - def __init__(self, db, bucket_name="fs", - chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None, - read_preference=None): + def __init__(self, db: Database, bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None) -> None: """Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of @@ -470,18 +476,19 @@ def __init__(self, db, bucket_name="fs", self._bucket_name = bucket_name self._collection = db[bucket_name] - self._chunks = self._collection.chunks.with_options( + self._chunks: Collection = self._collection.chunks.with_options( write_concern=write_concern, read_preference=read_preference) - self._files = self._collection.files.with_options( + self._files: Collection = self._collection.files.with_options( write_concern=write_concern, read_preference=read_preference) self._chunk_size_bytes = chunk_size_bytes - def open_upload_stream(self, filename, chunk_size_bytes=None, - metadata=None, session=None): + def open_upload_stream(self, filename: str, chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -528,8 +535,9 @@ def open_upload_stream(self, filename, chunk_size_bytes=None, return GridIn(self._collection, session=session, **opts) def open_upload_stream_with_id( - self, file_id, filename, chunk_size_bytes=None, metadata=None, - session=None): + self, file_id: Any, filename: str, chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -580,8 +588,10 @@ def open_upload_stream_with_id( return GridIn(self._collection, session=session, **opts) - def upload_from_stream(self, filename, source, chunk_size_bytes=None, - metadata=None, session=None): + def upload_from_stream(self, filename: str, source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> ObjectId: """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads @@ -621,11 +631,12 @@ def upload_from_stream(self, filename, source, chunk_size_bytes=None, filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) - return gin._id + return cast(ObjectId, gin._id) - def upload_from_stream_with_id(self, file_id, filename, source, - chunk_size_bytes=None, metadata=None, - session=None): + def upload_from_stream_with_id(self, file_id: Any, filename: str, source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None) -> None: """Uploads a user file to a GridFS bucket with a custom file id. Reads the contents of the user file from `source` and uploads @@ -667,7 +678,7 @@ def upload_from_stream_with_id(self, file_id, filename, source, session=session) as gin: gin.write(source) - def open_download_stream(self, file_id, session=None): + def open_download_stream(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Opens a Stream from which the application can read the contents of the stored file specified by file_id. @@ -698,7 +709,7 @@ def open_download_stream(self, file_id, session=None): gout._ensure_file() return gout - def download_to_stream(self, file_id, destination, session=None): + def download_to_stream(self, file_id: Any, destination: Any, session: Optional[ClientSession] = None) -> None: """Downloads the contents of the stored file specified by file_id and writes the contents to `destination`. @@ -729,7 +740,7 @@ def download_to_stream(self, file_id, destination, session=None): for chunk in gout: destination.write(chunk) - def delete(self, file_id, session=None): + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Given an file_id, delete this stored file's files collection document and associated chunks from a GridFS bucket. @@ -758,7 +769,7 @@ def delete(self, file_id, session=None): raise NoFile( "no file could be deleted because none matched %s" % file_id) - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Find and return the files collection documents that match ``filter`` Returns a cursor that iterates across files matching @@ -806,7 +817,7 @@ def find(self, *args, **kwargs): """ return GridOutCursor(self._collection, *args, **kwargs) - def open_download_stream_by_name(self, filename, revision=-1, session=None): + def open_download_stream_by_name(self, filename: str, revision: int = -1, session: Optional[ClientSession] = None) -> GridOut: """Opens a Stream from which the application can read the contents of `filename` and optional `revision`. @@ -861,8 +872,9 @@ def open_download_stream_by_name(self, filename, revision=-1, session=None): raise NoFile( "no version %d for filename %r" % (revision, filename)) - def download_to_stream_by_name(self, filename, destination, revision=-1, - session=None): + def download_to_stream_by_name(self, filename: str, destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None) -> None: """Write the contents of `filename` (with optional `revision`) to `destination`. @@ -905,7 +917,7 @@ def download_to_stream_by_name(self, filename, destination, revision=-1, for chunk in gout: destination.write(chunk) - def rename(self, file_id, new_filename, session=None): + def rename(self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None) -> None: """Renames the stored file with the specified file_id. For example:: diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index fc01d88d24..9353a97a1c 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -17,32 +17,25 @@ import io import math import os +from typing import Any, Iterable, List, Mapping, Optional, cast -from bson.int64 import Int64 -from bson.son import SON from bson.binary import Binary +from bson.int64 import Int64 from bson.objectid import ObjectId +from bson.son import SON +from gridfs.errors import CorruptGridFile, FileExists, NoFile from pymongo import ASCENDING +from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.cursor import Cursor -from pymongo.errors import (ConfigurationError, - CursorNotFound, - DuplicateKeyError, - InvalidOperation, +from pymongo.errors import (ConfigurationError, CursorNotFound, + DuplicateKeyError, InvalidOperation, OperationFailure) from pymongo.read_preferences import ReadPreference -from gridfs.errors import CorruptGridFile, FileExists, NoFile - -try: - _SEEK_SET = os.SEEK_SET - _SEEK_CUR = os.SEEK_CUR - _SEEK_END = os.SEEK_END -# before 2.5 -except AttributeError: - _SEEK_SET = 0 - _SEEK_CUR = 1 - _SEEK_END = 2 +_SEEK_SET = os.SEEK_SET +_SEEK_CUR = os.SEEK_CUR +_SEEK_END = os.SEEK_END EMPTY = b"" NEWLN = b"\n" @@ -51,14 +44,14 @@ # Slightly under a power of 2, to work well with server's record allocations. DEFAULT_CHUNK_SIZE = 255 * 1024 -_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)]) -_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) +_C_INDEX: SON[str, Any] = SON([("files_id", ASCENDING), ("n", ASCENDING)]) +_F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) -def _grid_in_property(field_name, docstring, read_only=False, - closed_only=False): +def _grid_in_property(field_name: str, docstring: str, read_only: Optional[bool] = False, + closed_only: Optional[bool] = False) -> Any: """Create a GridIn property.""" - def getter(self): + def getter(self: Any) -> Any: if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) @@ -67,7 +60,7 @@ def getter(self): return self._file.get(field_name, 0) return self._file.get(field_name, None) - def setter(self, value): + def setter(self: Any, value: Any) -> Any: if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) @@ -85,9 +78,9 @@ def setter(self, value): return property(getter, doc=docstring) -def _grid_out_property(field_name, docstring): +def _grid_out_property(field_name: str, docstring: str) -> Any: """Create a GridOut property.""" - def getter(self): + def getter(self: Any) -> Any: self._ensure_file() # Protect against PHP-237 @@ -99,13 +92,13 @@ def getter(self): return property(getter, doc=docstring) -def _clear_entity_type_registry(entity, **kwargs): +def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: """Clear the given database/collection object's type registry.""" codecopts = entity.codec_options.with_options(type_registry=None) return entity.with_options(codec_options=codecopts, **kwargs) -def _disallow_transactions(session): +def _disallow_transactions(session: Optional[ClientSession]) -> None: if session and session.in_transaction: raise InvalidOperation( 'GridFS does not support multi-document transactions') @@ -114,7 +107,7 @@ def _disallow_transactions(session): class GridIn(object): """Class to write data to GridFS. """ - def __init__(self, root_collection, session=None, **kwargs): + def __init__(self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any) -> None: """Write a file to GridFS Application developers should generally not need to @@ -150,7 +143,7 @@ def __init__(self, root_collection, session=None, **kwargs): - `session` (optional): a :class:`~pymongo.client_session.ClientSession` to use for all commands - - `**kwargs` (optional): file level options (see above) + - `**kwargs: Any` (optional): file level options (see above) .. versionchanged:: 4.0 Removed the `disable_md5` parameter. See @@ -197,7 +190,7 @@ def __init__(self, root_collection, session=None, **kwargs): object.__setattr__(self, "_closed", False) object.__setattr__(self, "_ensured_index", False) - def __create_index(self, collection, index_key, unique): + def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: @@ -209,14 +202,14 @@ def __create_index(self, collection, index_key, unique): collection.create_index( index_key.items(), unique=unique, session=self._session) - def __ensure_indexes(self): + def __ensure_indexes(self) -> None: if not object.__getattribute__(self, "_ensured_index"): _disallow_transactions(self._session) self.__create_index(self._coll.files, _F_INDEX, False) self.__create_index(self._coll.chunks, _C_INDEX, True) object.__setattr__(self, "_ensured_index", True) - def abort(self): + def abort(self) -> None: """Remove all chunks/files that may have been uploaded and close. """ self._coll.chunks.delete_many( @@ -226,33 +219,36 @@ def abort(self): object.__setattr__(self, "_closed", True) @property - def closed(self): + def closed(self) -> bool: """Is this file closed? """ return self._closed - _id = _grid_in_property("_id", "The ``'_id'`` value for this file.", + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) - filename = _grid_in_property("filename", "Name of this file.") - name = _grid_in_property("filename", "Alias for `filename`.") - content_type = _grid_in_property("contentType", "Mime-type for this file.") - length = _grid_in_property("length", "Length (in bytes) of this file.", + filename: Optional[str] = _grid_in_property("filename", "Name of this file.") + name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_in_property("contentType", "Mime-type for this file.") + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) - chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.", + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) - upload_date = _grid_in_property("uploadDate", + upload_date: datetime.datetime = _grid_in_property("uploadDate", "Date that this file was uploaded.", closed_only=True) - md5 = _grid_in_property("md5", "MD5 of the contents of this file " + md5: Optional[str] = _grid_in_property("md5", "MD5 of the contents of this file " "if an md5 sum was created.", closed_only=True) - def __getattr__(self, name): + _buffer: io.BytesIO + _closed: bool + + def __getattr__(self, name: str) -> Any: if name in self._file: return self._file[name] raise AttributeError("GridIn object has no attribute '%s'" % name) - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: @@ -266,7 +262,7 @@ def __setattr__(self, name, value): self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - def __flush_data(self, data): + def __flush_data(self, data: Any) -> None: """Flush `data` to a chunk. """ self.__ensure_indexes() @@ -285,14 +281,14 @@ def __flush_data(self, data): self._chunk_number += 1 self._position += len(data) - def __flush_buffer(self): + def __flush_buffer(self) -> None: """Flush the buffer contents out to a chunk. """ self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = io.BytesIO() - def __flush(self): + def __flush(self) -> Any: """Flush the file to the database. """ try: @@ -306,11 +302,11 @@ def __flush(self): except DuplicateKeyError: self._raise_file_exists(self._id) - def _raise_file_exists(self, file_id): + def _raise_file_exists(self, file_id: Any) -> None: """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) - def close(self): + def close(self) -> None: """Flush the file and close it. A closed file cannot be written any more. Calling @@ -320,16 +316,16 @@ def close(self): self.__flush() object.__setattr__(self, "_closed", True) - def read(self, size=-1): + def read(self, size: Optional[int] = -1) -> None: raise io.UnsupportedOperation('read') - def readable(self): + def readable(self) -> bool: return False - def seekable(self): + def seekable(self)-> bool: return False - def write(self, data): + def write(self, data: Any) -> None: """Write data to the file. There is no return value. `data` can be either a string of bytes or a file-like object @@ -387,7 +383,7 @@ def write(self, data): to_write = read(self.chunk_size) self._buffer.write(to_write) - def writelines(self, sequence): + def writelines(self, sequence: Iterable[Any]) -> None: """Write a sequence of strings to the file. Does not add seperators. @@ -395,15 +391,15 @@ def writelines(self, sequence): for line in sequence: self.write(line) - def writeable(self): + def writeable(self) -> bool: return True - def __enter__(self): + def __enter__(self) -> "GridIn": """Support for the context manager protocol. """ return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Support for the context manager protocol. Close the file and allow exceptions to propagate. @@ -417,8 +413,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class GridOut(io.IOBase): """Class to read data out of GridFS. """ - def __init__(self, root_collection, file_id=None, file_document=None, - session=None): + def __init__(self, root_collection: Collection, file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None) -> None: """Read a file from GridFS Application developers should generally not need to @@ -469,20 +466,23 @@ def __init__(self, root_collection, file_id=None, file_document=None, self._file = file_document self._session = session - _id = _grid_out_property("_id", "The ``'_id'`` value for this file.") - filename = _grid_out_property("filename", "Name of this file.") - name = _grid_out_property("filename", "Alias for `filename`.") - content_type = _grid_out_property("contentType", "Mime-type for this file.") - length = _grid_out_property("length", "Length (in bytes) of this file.") - chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.") - upload_date = _grid_out_property("uploadDate", + _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _grid_out_property("filename", "Name of this file.") + name: str = _grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_out_property("contentType", "Mime-type for this file.") + length: int = _grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _grid_out_property("uploadDate", "Date that this file was first uploaded.") - aliases = _grid_out_property("aliases", "List of aliases for this file.") - metadata = _grid_out_property("metadata", "Metadata attached to this file.") - md5 = _grid_out_property("md5", "MD5 of the contents of this file " + aliases: Optional[List[str]] = _grid_out_property("aliases", "List of aliases for this file.") + metadata: Optional[Mapping[str, Any]] = _grid_out_property("metadata", "Metadata attached to this file.") + md5: Optional[str] = _grid_out_property("md5", "MD5 of the contents of this file " "if an md5 sum was created.") - def _ensure_file(self): + _file: Any + __chunk_iter: Any + + def _ensure_file(self) -> None: if not self._file: _disallow_transactions(self._session) self._file = self.__files.find_one({"_id": self.__file_id}, @@ -491,16 +491,16 @@ def _ensure_file(self): raise NoFile("no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id)) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: self._ensure_file() if name in self._file: return self._file[name] raise AttributeError("GridOut object has no attribute '%s'" % name) - def readable(self): + def readable(self) -> bool: return True - def readchunk(self): + def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ @@ -526,7 +526,7 @@ def readchunk(self): self.__buffer = EMPTY return chunk_data - def read(self, size=-1): + def read(self, size: int = -1) -> bytes: """Read at most `size` bytes from the file (less if there isn't enough data). @@ -572,7 +572,7 @@ def read(self, size=-1): data.seek(0) return data.read(size) - def readline(self, size=-1): + def readline(self, size: int = -1) -> bytes: # type: ignore[override] """Read one line or up to `size` bytes from the file. :Parameters: @@ -606,12 +606,12 @@ def readline(self, size=-1): data.seek(0) return data.read(size) - def tell(self): + def tell(self) -> int: """Return the current position of this file. """ return self.__position - def seek(self, pos, whence=_SEEK_SET): + def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. :Parameters: @@ -622,6 +622,10 @@ def seek(self, pos, whence=_SEEK_SET): positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative to the current position, :attr:`os.SEEK_END` (``2``) to seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. """ if whence == _SEEK_SET: new_pos = pos @@ -637,18 +641,19 @@ def seek(self, pos, whence=_SEEK_SET): # Optimization, continue using the same buffer and chunk iterator. if new_pos == self.__position: - return + return new_pos self.__position = new_pos self.__buffer = EMPTY if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None + return new_pos - def seekable(self): + def seekable(self) -> bool: return True - def __iter__(self): + def __iter__(self) -> "GridOut": """Return an iterator over all of this file's data. The iterator will return lines (delimited by ``b'\\n'``) of @@ -669,46 +674,46 @@ def __iter__(self): """ return self - def close(self): + def close(self) -> None: """Make GridOut more generically file-like.""" if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None super().close() - def write(self, value): + def write(self, value: Any) -> None: raise io.UnsupportedOperation('write') - def writelines(self, lines): + def writelines(self, lines: Any) -> None: raise io.UnsupportedOperation('writelines') - def writable(self): + def writable(self) -> bool: return False - def __enter__(self): + def __enter__(self) -> "GridOut": """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Makes it possible to use :class:`GridOut` files with the context manager protocol. """ self.close() return False - def fileno(self): + def fileno(self) -> int: raise io.UnsupportedOperation('fileno') - def flush(self): + def flush(self) -> None: # GridOut is read-only, so flush does nothing. pass - def isatty(self): + def isatty(self) -> bool: return False - def truncate(self, size=None): + def truncate(self, size: Optional[int] = None) -> int: # See https://docs.python.org/3/library/io.html#io.IOBase.writable # for why truncate has to raise. raise io.UnsupportedOperation('truncate') @@ -716,7 +721,7 @@ def truncate(self, size=None): # Override IOBase.__del__ otherwise it will lead to __getattr__ on # __IOBase_closed which calls _ensure_file and potentially performs I/O. # We cannot do I/O in __del__ since it can lead to a deadlock. - def __del__(self): + def __del__(self) -> None: pass @@ -726,7 +731,7 @@ class _GridOutChunkIterator(object): Raises CorruptGridFile when encountering any truncated, missing, or extra chunk in a file. """ - def __init__(self, grid_out, chunks, session, next_chunk): + def __init__(self, grid_out: GridOut, chunks: Collection, session: Optional[ClientSession], next_chunk: Any) -> None: self._id = grid_out._id self._chunk_size = int(grid_out.chunk_size) self._length = int(grid_out.length) @@ -736,15 +741,17 @@ def __init__(self, grid_out, chunks, session, next_chunk): self._num_chunks = math.ceil(float(self._length) / self._chunk_size) self._cursor = None - def expected_chunk_length(self, chunk_n): + _cursor: Optional[Cursor] + + def expected_chunk_length(self, chunk_n: int) -> int: if chunk_n < self._num_chunks - 1: return self._chunk_size return self._length - (self._chunk_size * (self._num_chunks - 1)) - def __iter__(self): + def __iter__(self) -> "_GridOutChunkIterator": return self - def _create_cursor(self): + def _create_cursor(self) -> None: filter = {"files_id": self._id} if self._next_chunk > 0: filter["n"] = {"$gte": self._next_chunk} @@ -752,7 +759,7 @@ def _create_cursor(self): self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) - def _next_with_retry(self): + def _next_with_retry(self) -> Mapping[str, Any]: """Return the next chunk and retry once on CursorNotFound. We retry on CursorNotFound to maintain backwards compatibility in @@ -761,7 +768,7 @@ def _next_with_retry(self): """ if self._cursor is None: self._create_cursor() - + assert self._cursor is not None try: return self._cursor.next() except CursorNotFound: @@ -769,7 +776,7 @@ def _next_with_retry(self): self._create_cursor() return self._cursor.next() - def next(self): + def next(self) -> Mapping[str, Any]: try: chunk = self._next_with_retry() except StopIteration: @@ -804,20 +811,20 @@ def next(self): __next__ = next - def close(self): + def close(self) -> None: if self._cursor: self._cursor.close() self._cursor = None class GridOutIterator(object): - def __init__(self, grid_out, chunks, session): + def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) - def __iter__(self): + def __iter__(self) -> "GridOutIterator": return self - def next(self): + def next(self) -> bytes: chunk = self.__chunk_iter.next() return bytes(chunk["data"]) @@ -828,9 +835,13 @@ class GridOutCursor(Cursor): """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ - def __init__(self, collection, filter=None, skip=0, limit=0, - no_cursor_timeout=False, sort=None, batch_size=0, - session=None): + def __init__(self, collection: Collection, filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None) -> None: """Create a new cursor, similar to the normal :class:`~pymongo.cursor.Cursor`. @@ -852,7 +863,7 @@ def __init__(self, collection, filter=None, skip=0, limit=0, no_cursor_timeout=no_cursor_timeout, sort=sort, batch_size=batch_size, session=session) - def next(self): + def next(self) -> GridOut: """Get next GridOut object from cursor. """ _disallow_transactions(self.session) @@ -863,13 +874,13 @@ def next(self): __next__ = next - def add_option(self, *args, **kwargs): + def add_option(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args, **kwargs): + def remove_option(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError("Method does not exist for GridOutCursor") - def _clone_base(self, session): + def _clone_base(self, session: ClientSession) -> "GridOutCursor": """Creates an empty GridOutCursor for information to be copied into. """ return GridOutCursor(self.__root_collection, session=session) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000000..2646febb6f --- /dev/null +++ b/mypy.ini @@ -0,0 +1,11 @@ +[mypy] +disallow_subclassing_any = true +disallow_incomplete_defs = true +no_implicit_optional = true +strict_equality = true +warn_unused_configs = true +warn_unused_ignores = true +warn_redundant_casts = true + +[mypy-mockupdb] +ignore_missing_imports = True diff --git a/setup.py b/setup.py index 7d1ad52dc7..fde9ae1b3f 100755 --- a/setup.py +++ b/setup.py @@ -282,7 +282,7 @@ def build_extension(self, ext): 'snappy': ['python-snappy'], 'zstd': ['zstandard'], 'aws': ['pymongo-auth-aws<2.0.0'], - 'srv': ["dnspython>=1.16.0,<3.0.0"], + 'srv': ["dnspython>=1.16.0,<3.0.0"] } # GSSAPI extras diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 47890f80ee..138c059ac6 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -14,15 +14,14 @@ from collections import namedtuple -from mockupdb import * -from mockupdb import OpMsgReply +from mockupdb import OpMsgReply, OpMsg, OpReply from pymongo import ReadPreference __all__ = ['operations', 'upgrades'] Operation = namedtuple( - 'operation', + 'Operation', ['name', 'function', 'reply', 'op_type', 'not_master']) """Client operations on MongoDB. diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 2b3a1fd6ce..5461a13e35 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -15,10 +15,7 @@ """Test PyMongo cursor with a sharded cluster.""" from pymongo import MongoClient -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, going diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index d13af3562b..2b6ea6a513 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -16,10 +16,7 @@ import time -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, go, OpMsg from pymongo import MongoClient diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index 1df5febb78..bc29ce5f0f 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, wait_until, OpReply, going, Future from pymongo.errors import ConnectionFailure diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 719de57553..63bb0fe303 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -23,10 +23,7 @@ from pymongo.read_preferences import make_read_preference from pymongo.read_preferences import read_pref_mode_from_name -try: - from queue import Queue -except ImportError: - from Queue import Queue +from queue import Queue from mockupdb import MockupDB, going from pymongo import MongoClient diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index 61cf8df674..f99ac0054e 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -21,18 +21,8 @@ from optparse import OptionParser -try: - from urllib2 import urlopen -except ImportError: - # Python 3. - from urllib.request import urlopen - - -try: - import thread -except ImportError: - # Python 3. - import _thread as thread +from urllib.request import urlopen +import _thread as thread def parse_args(): diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index dab7138add..b752453f13 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -19,12 +19,13 @@ import sys import tempfile import time +from typing import Any, List import warnings try: import simplejson as json except ImportError: - import json + import json # type: ignore sys.path[0:0] = [""] @@ -44,7 +45,7 @@ OUTPUT_FILE = os.environ.get('OUTPUT_FILE') -result_data = [] +result_data: List = [] def tearDownModule(): output = json.dumps(result_data, indent=4) diff --git a/tools/clean.py b/tools/clean.py index a5d383af4e..55896781a4 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -33,7 +33,7 @@ pass try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore sys.exit("could still import _cmessage") except ImportError: pass From c9229ace268379da61d18fac192f96f440a65fe4 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 18 Jan 2022 16:40:28 -0800 Subject: [PATCH 0565/2111] PYTHON-3061 Add 'let' option to ReplaceOptions (#832) --- doc/changelog.rst | 8 +- pymongo/collection.py | 13 +- test/crud/unified/replaceOne-let.json | 207 ++++++++++++++++++++++++++ 3 files changed, 222 insertions(+), 6 deletions(-) create mode 100644 test/crud/unified/replaceOne-let.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 4ff9cd781d..de38f188e4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -14,9 +14,11 @@ PyMongo 4.1 brings a number of improvements including: :meth:`pymongo.collection.Collection.find_one_and_delete`, :meth:`pymongo.collection.Collection.find_one_and_replace`, :meth:`pymongo.collection.Collection.find_one_and_update`, - and :meth:`pymongo.collection.Collection.find` all support a new keyword - argument ``let`` which is a map of parameter names and values. Parameters - can then be accessed as variables in an aggregate expression context. + :meth:`pymongo.collection.Collection.find`, + and :meth:`pymongo.collection.Collection.replace_one `all support a new + keyword argument ``let`` which is a map of parameter names and values. + Parameters can then be accessed as variables in an aggregate expression + context. - :meth:`~pymongo.collection.Collection.aggregate` now supports $merge and $out executing on secondaries on MongoDB >=5.0. aggregate() now always obeys the collection's :attr:`read_preference` on diff --git a/pymongo/collection.py b/pymongo/collection.py index 82e29f4061..0a8d011217 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -673,7 +673,7 @@ def _update(session, sock_info, retryable_write): def replace_one(self, filter, replacement, upsert=False, bypass_document_validation=False, collation=None, - hint=None, session=None): + hint=None, session=None, let=None): """Replace a single document matching the filter. >>> for doc in db.test.find({}): @@ -721,10 +721,16 @@ def replace_one(self, filter, replacement, upsert=False, MongoDB 4.2 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -738,14 +744,15 @@ def replace_one(self, filter, replacement, upsert=False, """ common.validate_is_mapping("filter", filter) common.validate_ok_for_replace(replacement) - + if let: + common.validate_is_mapping("let", let) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( filter, replacement, upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, hint=hint, session=session), + collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) def update_one(self, filter, update, upsert=False, diff --git a/test/crud/unified/replaceOne-let.json b/test/crud/unified/replaceOne-let.json new file mode 100644 index 0000000000..6cf8e15675 --- /dev/null +++ b/test/crud/unified/replaceOne-let.json @@ -0,0 +1,207 @@ +{ + "description": "replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "ReplaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": "foo" + }, + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": "foo" + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} From f074cfb696f7e44d4fb6fdd3c465a303cf4cca9c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 20 Jan 2022 08:18:13 -0600 Subject: [PATCH 0566/2111] PYTHON-2863 Allow hint for unacknowledged writes using OP_MSG when supported by the server (#830) --- pymongo/bulk.py | 93 ++++-- pymongo/collection.py | 17 +- ...kWrite-deleteMany-hint-unacknowledged.json | 269 +++++++++++++++ ...lkWrite-deleteOne-hint-unacknowledged.json | 265 +++++++++++++++ ...kWrite-replaceOne-hint-unacknowledged.json | 293 +++++++++++++++++ ...kWrite-updateMany-hint-unacknowledged.json | 305 ++++++++++++++++++ ...lkWrite-updateOne-hint-unacknowledged.json | 305 ++++++++++++++++++ .../deleteMany-hint-unacknowledged.json | 245 ++++++++++++++ .../deleteOne-hint-unacknowledged.json | 241 ++++++++++++++ .../findOneAndDelete-hint-unacknowledged.json | 225 +++++++++++++ ...findOneAndReplace-hint-unacknowledged.json | 248 ++++++++++++++ .../findOneAndUpdate-hint-unacknowledged.json | 253 +++++++++++++++ .../replaceOne-hint-unacknowledged.json | 269 +++++++++++++++ ...ged-bulkWrite-delete-hint-clientError.json | 193 ----------- ...ged-bulkWrite-update-hint-clientError.json | 284 ---------------- ...nowledged-deleteMany-hint-clientError.json | 149 --------- ...knowledged-deleteOne-hint-clientError.json | 133 -------- ...ged-findOneAndDelete-hint-clientError.json | 133 -------- ...ed-findOneAndReplace-hint-clientError.json | 139 -------- ...ged-findOneAndUpdate-hint-clientError.json | 143 -------- ...nowledged-replaceOne-hint-clientError.json | 143 -------- ...nowledged-updateMany-hint-clientError.json | 159 --------- ...knowledged-updateOne-hint-clientError.json | 147 --------- .../updateMany-hint-unacknowledged.json | 281 ++++++++++++++++ .../updateOne-hint-unacknowledged.json | 281 ++++++++++++++++ 25 files changed, 3549 insertions(+), 1664 deletions(-) create mode 100644 test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json create mode 100644 test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json create mode 100644 test/crud/unified/deleteMany-hint-unacknowledged.json create mode 100644 test/crud/unified/deleteOne-hint-unacknowledged.json create mode 100644 test/crud/unified/findOneAndDelete-hint-unacknowledged.json create mode 100644 test/crud/unified/findOneAndReplace-hint-unacknowledged.json create mode 100644 test/crud/unified/findOneAndUpdate-hint-unacknowledged.json create mode 100644 test/crud/unified/replaceOne-hint-unacknowledged.json delete mode 100644 test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-deleteMany-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-deleteOne-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-replaceOne-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-updateMany-hint-clientError.json delete mode 100644 test/crud/unified/unacknowledged-updateOne-hint-clientError.json create mode 100644 test/crud/unified/updateMany-hint-unacknowledged.json create mode 100644 test/crud/unified/updateOne-hint-unacknowledged.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 1bb8edf943..1921108a12 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -149,12 +149,14 @@ def __init__(self, collection, ordered, bypass_document_validation): self.bypass_doc_val = bypass_document_validation self.uses_collation = False self.uses_array_filters = False - self.uses_hint = False + self.uses_hint_update = False + self.uses_hint_delete = False self.is_retryable = True self.retrying = False self.started_retryable_write = False # Extra state so that we know where to pick up on a retry attempt. self.current_run = None + self.next_run = None @property def bulk_ctx_class(self): @@ -188,7 +190,7 @@ def add_update(self, selector, update, multi=False, upsert=False, self.uses_array_filters = True cmd['arrayFilters'] = array_filters if hint is not None: - self.uses_hint = True + self.uses_hint_update = True cmd['hint'] = hint if multi: # A bulk_write containing an update_many is not retryable. @@ -207,7 +209,7 @@ def add_replace(self, selector, replacement, upsert=False, self.uses_collation = True cmd['collation'] = collation if hint is not None: - self.uses_hint = True + self.uses_hint_update = True cmd['hint'] = hint self.ops.append((_UPDATE, cmd)) @@ -220,7 +222,7 @@ def add_delete(self, selector, limit, collation=None, hint=None): self.uses_collation = True cmd['collation'] = collation if hint is not None: - self.uses_hint = True + self.uses_hint_delete = True cmd['hint'] = hint if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. @@ -254,25 +256,39 @@ def gen_unordered(self): yield run def _execute_command(self, generator, write_concern, session, - sock_info, op_id, retryable, full_result): + sock_info, op_id, retryable, full_result, + final_write_concern=None): db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners if not self.current_run: self.current_run = next(generator) + self.next_run = None run = self.current_run # sock_info.command validates the session, but we use # sock_info.write_command. sock_info.validate_session(client, session) + last_run = False + while run: + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( db_name, cmd_name, sock_info, op_id, listeners, session, run.op_type, self.collection.codec_options) while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + cmd = SON([(cmd_name, self.collection.name), ('ordered', self.ordered)]) if not write_concern.is_server_default: @@ -290,25 +306,31 @@ def _execute_command(self, generator, write_concern, session, sock_info.send_cluster_time(cmd, session, client) sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible in one command. - result, to_send = bwc.execute(cmd, ops, client) - - # Retryable writeConcernErrors halt the execution of this run. - wce = result.get('writeConcernError', {}) - if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: - # Synthesize the full bulk result without modifying the - # current one because this write operation may be retried. - full = copy.deepcopy(full_result) - _merge_command(run, full, run.idx_offset, result) - _raise_bulk_write_error(full) - - _merge_command(run, full_result, run.idx_offset, result) - # We're no longer in a retry once a command succeeds. - self.retrying = False - self.started_retryable_write = False - - if self.ordered and "writeErrors" in result: - break + if write_concern.acknowledged: + result, to_send = bwc.execute(cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get('writeConcernError', {}) + if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = bwc.execute_unack(cmd, ops, client) + run.idx_offset += len(to_send) # We're supposed to continue if errors are @@ -316,7 +338,7 @@ def _execute_command(self, generator, write_concern, session, if self.ordered and full_result['writeErrors']: break # Reset our state - self.current_run = run = next(generator, None) + self.current_run = run = self.next_run def execute_command(self, generator, write_concern, session): """Execute using write commands. @@ -377,7 +399,7 @@ def execute_op_msg_no_results(self, sock_info, generator): run.idx_offset += len(to_send) self.current_run = run = next(generator, None) - def execute_command_no_results(self, sock_info, generator): + def execute_command_no_results(self, sock_info, generator, write_concern): """Execute write commands with OP_MSG and w=0 WriteConcern, ordered. """ full_result = { @@ -393,16 +415,16 @@ def execute_command_no_results(self, sock_info, generator): # Ordered bulk writes have to be acknowledged so that we stop # processing at the first error, even when the application # specified unacknowledged writeConcern. - write_concern = WriteConcern() + initial_write_concern = WriteConcern() op_id = _randint() try: self._execute_command( - generator, write_concern, None, - sock_info, op_id, False, full_result) + generator, initial_write_concern, None, + sock_info, op_id, False, full_result, write_concern) except OperationFailure: pass - def execute_no_results(self, sock_info, generator): + def execute_no_results(self, sock_info, generator, write_concern): """Execute all operations, returning no results (w=0). """ if self.uses_collation: @@ -411,16 +433,21 @@ def execute_no_results(self, sock_info, generator): if self.uses_array_filters: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged writes.') - if self.uses_hint: + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and sock_info.max_wire_version < 9: + raise ConfigurationError( + 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') + if unack and self.uses_hint_update and sock_info.max_wire_version < 8: raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: raise OperationFailure("Cannot set bypass_document_validation with" " unacknowledged write concern") if self.ordered: - return self.execute_command_no_results(sock_info, generator) + return self.execute_command_no_results(sock_info, generator, write_concern) return self.execute_op_msg_no_results(sock_info, generator) def execute(self, write_concern, session): @@ -443,6 +470,6 @@ def execute(self, write_concern, session): client = self.collection.database.client if not write_concern.acknowledged: with client._socket_for_writes(session) as sock_info: - self.execute_no_results(sock_info, generator) + self.execute_no_results(sock_info, generator, write_concern) else: return self.execute_command(generator, write_concern, session) diff --git a/pymongo/collection.py b/pymongo/collection.py index 0a8d011217..ecb82a2cac 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -608,9 +608,9 @@ def _update(self, sock_info, criteria, document, upsert=False, else: update_doc['arrayFilters'] = array_filters if hint is not None: - if not acknowledged: + if not acknowledged and sock_info.max_wire_version < 8: raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') if not isinstance(hint, str): hint = helpers._index_document(hint) update_doc['hint'] = hint @@ -961,9 +961,9 @@ def _delete( else: delete_doc['collation'] = collation if hint is not None: - if not acknowledged: + if not acknowledged and sock_info.max_wire_version < 9: raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') if not isinstance(hint, str): hint = helpers._index_document(hint) delete_doc['hint'] = hint @@ -2277,8 +2277,9 @@ def __find_and_modify(self, filter, projection, sort, upsert=None, write_concern = self._write_concern_for_cmd(cmd, session) def _find_and_modify(session, sock_info, retryable_write): + acknowledged = write_concern.acknowledged if array_filters is not None: - if not write_concern.acknowledged: + if not acknowledged: raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged ' 'writes.') @@ -2286,10 +2287,10 @@ def _find_and_modify(session, sock_info, retryable_write): if hint is not None: if sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint.') - if not write_concern.acknowledged: + 'Must be connected to MongoDB 4.2+ to use hint on find and modify commands.') + elif (not acknowledged and sock_info.max_wire_version < 9): raise ConfigurationError( - 'hint is unsupported for unacknowledged writes.') + 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands.') cmd['hint'] = hint if not write_concern.is_server_default: cmd['writeConcern'] = write_concern.document diff --git a/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..2dda9486e8 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "bulkWrite-deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..aadf6d9e99 --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-hint-unacknowledged.json @@ -0,0 +1,265 @@ +{ + "description": "bulkWrite-deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..e54cd704df --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-hint-unacknowledged.json @@ -0,0 +1,293 @@ +{ + "description": "bulkWrite-replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..87478918d2 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1345f6b536 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-hint-unacknowledged.json @@ -0,0 +1,305 @@ +{ + "description": "bulkWrite-updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-hint-unacknowledged.json b/test/crud/unified/deleteMany-hint-unacknowledged.json new file mode 100644 index 0000000000..ab7e9c7c09 --- /dev/null +++ b/test/crud/unified/deleteMany-hint-unacknowledged.json @@ -0,0 +1,245 @@ +{ + "description": "deleteMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteMany with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteMany with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 0 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-hint-unacknowledged.json b/test/crud/unified/deleteOne-hint-unacknowledged.json new file mode 100644 index 0000000000..1782f0f525 --- /dev/null +++ b/test/crud/unified/deleteOne-hint-unacknowledged.json @@ -0,0 +1,241 @@ +{ + "description": "deleteOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged deleteOne with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged deleteOne with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "limit": 1 + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-hint-unacknowledged.json b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json new file mode 100644 index 0000000000..077f9892b9 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-hint-unacknowledged.json @@ -0,0 +1,225 @@ +{ + "description": "findOneAndDelete-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndDelete with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "remove": true, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-hint-unacknowledged.json b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json new file mode 100644 index 0000000000..8228d8a2aa --- /dev/null +++ b/test/crud/unified/findOneAndReplace-hint-unacknowledged.json @@ -0,0 +1,248 @@ +{ + "description": "findOneAndReplace-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndReplace with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 111 + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json new file mode 100644 index 0000000000..d116a06d0d --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-hint-unacknowledged.json @@ -0,0 +1,253 @@ +{ + "description": "findOneAndUpdate-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error on pre-4.4 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint string on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged findOneAndUpdate with hint document on 4.4+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "$$type": [ + "string", + "object" + ] + }, + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-hint-unacknowledged.json b/test/crud/unified/replaceOne-hint-unacknowledged.json new file mode 100644 index 0000000000..5c5dec64f6 --- /dev/null +++ b/test/crud/unified/replaceOne-hint-unacknowledged.json @@ -0,0 +1,269 @@ +{ + "description": "replaceOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged replaceOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged replaceOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 111 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json b/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json deleted file mode 100644 index dca8108109..0000000000 --- a/test/crud/unified/unacknowledged-bulkWrite-delete-hint-clientError.json +++ /dev/null @@ -1,193 +0,0 @@ -{ - "description": "unacknowledged-bulkWrite-delete-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "BulkWrite_delete_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "BulkWrite_delete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged bulkWrite deleteOne with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteOne": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - } - }, - { - "deleteOne": { - "filter": { - "_id": 2 - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "BulkWrite_delete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - }, - { - "description": "Unacknowledged bulkWrite deleteMany with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteMany": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "hint": "_id_" - } - }, - { - "deleteMany": { - "filter": { - "_id": { - "$gte": 4 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "BulkWrite_delete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json b/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json deleted file mode 100644 index 22377b9ac1..0000000000 --- a/test/crud/unified/unacknowledged-bulkWrite-update-hint-clientError.json +++ /dev/null @@ -1,284 +0,0 @@ -{ - "description": "unacknowledged-bulkWrite-update-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "Bulkwrite_update_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged bulkWrite updateOne with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "updateOne": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - }, - { - "description": "Unacknowledged bulkWrite updateMany with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - } - }, - { - "updateMany": { - "filter": { - "_id": { - "$lt": 3 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - }, - { - "description": "Unacknowledged bulkWrite replaceOne with hints fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "replaceOne": { - "filter": { - "_id": 3 - }, - "replacement": { - "x": 333 - }, - "hint": "_id_" - } - }, - { - "replaceOne": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 444 - }, - "hint": { - "_id": 1 - } - } - } - ], - "ordered": true - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Bulkwrite_update_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json b/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json deleted file mode 100644 index 21776eae80..0000000000 --- a/test/crud/unified/unacknowledged-deleteMany-hint-clientError.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "description": "unacknowledged-deleteMany-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "DeleteMany_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "DeleteMany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged deleteMany with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteMany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - }, - { - "description": "Unacknowledged deleteMany with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteMany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json b/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json deleted file mode 100644 index 870c08339c..0000000000 --- a/test/crud/unified/unacknowledged-deleteOne-hint-clientError.json +++ /dev/null @@ -1,133 +0,0 @@ -{ - "description": "unacknowledged-deleteOne-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "DeleteOne_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "DeleteOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged deleteOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged deleteOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "DeleteOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json deleted file mode 100644 index a19cd77638..0000000000 --- a/test/crud/unified/unacknowledged-findOneAndDelete-hint-clientError.json +++ /dev/null @@ -1,133 +0,0 @@ -{ - "description": "unacknowledged-findOneAndDelete-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "findOneAndDelete_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "findOneAndDelete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged findOneAndDelete with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "findOneAndDelete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged findOneAndDelete with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 1 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "findOneAndDelete_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json deleted file mode 100644 index c60bfdef17..0000000000 --- a/test/crud/unified/unacknowledged-findOneAndReplace-hint-clientError.json +++ /dev/null @@ -1,139 +0,0 @@ -{ - "description": "unacknowledged-findOneAndReplace-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "FindOneAndReplace_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "FindOneAndReplace_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged findOneAndReplace with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndReplace_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged findOneAndReplace with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 33 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndReplace_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json b/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json deleted file mode 100644 index 506510a3c9..0000000000 --- a/test/crud/unified/unacknowledged-findOneAndUpdate-hint-clientError.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "description": "unacknowledged-findOneAndUpdate-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "FindOneAndUpdate_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "FindOneAndUpdate_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged findOneAndUpdate with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndUpdate_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged findOneAndUpdate with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "FindOneAndUpdate_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json b/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json deleted file mode 100644 index b4f4bed5f9..0000000000 --- a/test/crud/unified/unacknowledged-replaceOne-hint-clientError.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "description": "unacknowledged-replaceOne-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "ReplaceOne_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "ReplaceOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged ReplaceOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "ReplaceOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged ReplaceOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "ReplaceOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-updateMany-hint-clientError.json b/test/crud/unified/unacknowledged-updateMany-hint-clientError.json deleted file mode 100644 index 3087dc4dbc..0000000000 --- a/test/crud/unified/unacknowledged-updateMany-hint-clientError.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "description": "unacknowledged-updateMany-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "Updatemany_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "Updatemany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged updateMany with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Updatemany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - }, - { - "description": "Unacknowledged updateMany with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "Updatemany_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/unacknowledged-updateOne-hint-clientError.json b/test/crud/unified/unacknowledged-updateOne-hint-clientError.json deleted file mode 100644 index 208703c26f..0000000000 --- a/test/crud/unified/unacknowledged-updateOne-hint-clientError.json +++ /dev/null @@ -1,147 +0,0 @@ -{ - "description": "unacknowledged-updateOne-hint-clientError", - "schemaVersion": "1.1", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-v2" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "UpdateOne_hint", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - } - } - } - ], - "initialData": [ - { - "collectionName": "UpdateOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Unacknowledged updateOne with hint string fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": "_id_" - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "UpdateOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - }, - { - "description": "Unacknowledged updateOne with hint document fails with client-side error", - "operations": [ - { - "object": "collection0", - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "hint": { - "_id": 1 - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [] - } - ], - "outcome": [ - { - "collectionName": "UpdateOne_hint", - "databaseName": "crud-v2", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ] - } - ] -} diff --git a/test/crud/unified/updateMany-hint-unacknowledged.json b/test/crud/unified/updateMany-hint-unacknowledged.json new file mode 100644 index 0000000000..e83838aac2 --- /dev/null +++ b/test/crud/unified/updateMany-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateMany-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateMany with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateMany with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-hint-unacknowledged.json b/test/crud/unified/updateOne-hint-unacknowledged.json new file mode 100644 index 0000000000..859b0f92f9 --- /dev/null +++ b/test/crud/unified/updateOne-hint-unacknowledged.json @@ -0,0 +1,281 @@ +{ + "description": "updateOne-hint-unacknowledged", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "db0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Unacknowledged updateOne with hint string fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document fails with client-side error on pre-4.2 server", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint string on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": "_id_" + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + }, + { + "description": "Unacknowledged updateOne with hint document on 4.2+ server", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "hint": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + }, + "hint": { + "$$type": [ + "string", + "object" + ] + } + } + ], + "writeConcern": { + "w": 0 + } + } + } + } + ] + } + ] + } + ] +} From d6fc05ae49c8225f865b83c55ed7bfa33931611c Mon Sep 17 00:00:00 2001 From: Alexander Golin Date: Thu, 20 Jan 2022 17:03:43 -0500 Subject: [PATCH 0567/2111] Create CODEOWNERS (#834) --- .github/CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..15a41b6ce6 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +# Global owner for repo +* @blink1073 @juliusgeo @ShaneHarvey From b7c33debbf43cd2e2c65c66c646e1833db2ad01c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 21 Jan 2022 10:08:48 -0800 Subject: [PATCH 0568/2111] PYTHON-3046 Document support for backslashreplace and surrogateescape (#836) --- bson/codec_options.py | 3 +- pymongo/mongo_client.py | 3 +- test/test_bson.py | 71 +++++++++++++---------------------------- 3 files changed, 27 insertions(+), 50 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 81e79158b4..27df48de8a 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -233,7 +233,8 @@ class CodecOptions(_options_base): - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', and 'ignore'. Defaults to 'strict'. + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the timezone to/from which :class:`~datetime.datetime` objects should be encoded/decoded. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a133c96a7f..052ade3853 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -330,7 +330,8 @@ def __init__( - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', and 'ignore'. Defaults to 'strict'. + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. - `srvServiceName`: (string) The SRV service name to use for "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: diff --git a/test/test_bson.py b/test/test_bson.py index b91bc7f5fb..eb4f4e47c2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -994,57 +994,32 @@ def test_decode_all_defaults(self): def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) - # Test handling of bad key value. + # Test handling of bad key value, bad string value, and both. invalid_key = enc[:7] + b'\xe9' + enc[8:] - replaced_key = b'ke\xe9str'.decode('utf-8', 'replace') - ignored_key = b'ke\xe9str'.decode('utf-8', 'ignore') - - dec = decode(invalid_key, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: "foobar"}) - - dec = decode(invalid_key, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: "foobar"}) - - self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_key, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_key) - - # Test handing of bad string value. - invalid_val = BSON(enc[:18] + b'\xe9' + enc[19:]) - replaced_val = b'fo\xe9bar'.decode('utf-8', 'replace') - ignored_val = b'fo\xe9bar'.decode('utf-8', 'ignore') - - dec = decode(invalid_val, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {"keystr": replaced_val}) - - dec = decode(invalid_val, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {"keystr": ignored_val}) - - self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_val, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_val) - - # Test handing bad key + bad value. + invalid_val = enc[:18] + b'\xe9' + enc[19:] invalid_both = enc[:7] + b'\xe9' + enc[8:18] + b'\xe9' + enc[19:] - dec = decode(invalid_both, - CodecOptions(unicode_decode_error_handler="replace")) - self.assertEqual(dec, {replaced_key: replaced_val}) - - dec = decode(invalid_both, - CodecOptions(unicode_decode_error_handler="ignore")) - self.assertEqual(dec, {ignored_key: ignored_val}) - - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( - unicode_decode_error_handler="strict")) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid_both) + # Ensure that strict mode raises an error. + for invalid in [invalid_key, invalid_val, invalid_both]: + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions( + unicode_decode_error_handler="strict")) + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) + self.assertRaises(InvalidBSON, decode, invalid) + + # Test all other error handlers. + for handler in ['replace', 'backslashreplace', 'surrogateescape', + 'ignore']: + expected_key = b'ke\xe9str'.decode('utf-8', handler) + expected_val = b'fo\xe9bar'.decode('utf-8', handler) + doc = decode(invalid_key, + CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: "foobar"}) + doc = decode(invalid_val, + CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {"keystr": expected_val}) + doc = decode(invalid_both, + CodecOptions(unicode_decode_error_handler=handler)) + self.assertEqual(doc, {expected_key: expected_val}) # Test handling bad error mode. dec = decode(enc, From bdafc357331813222b1e677b66041dad1fc852a5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 21 Jan 2022 10:09:03 -0800 Subject: [PATCH 0569/2111] PYTHON-3041 Fix doc example for initializing a replica set (#835) --- doc/examples/high_availability.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index a5c252f8a3..19b48f7d01 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -52,11 +52,11 @@ At this point all of our nodes are up and running, but the set has yet to be initialized. Until the set is initialized no node will become the primary, and things are essentially "offline". -To initialize the set we need to connect to a single node and run the -initiate command:: +To initialize the set we need to connect directly to a single node and run the +initiate command using the ``directConnection`` option:: >>> from pymongo import MongoClient - >>> c = MongoClient('localhost', 27017) + >>> c = MongoClient('localhost', 27017, directConnection=True) .. note:: We could have connected to any of the other nodes instead, but only the node we initiate from is allowed to contain any @@ -81,15 +81,19 @@ The initial connection as made above is a special case for an uninitialized replica set. Normally we'll want to connect differently. A connection to a replica set can be made using the :meth:`~pymongo.mongo_client.MongoClient` constructor, specifying -one or more members of the set, along with the replica set name. Any of -the following connects to the replica set we just created:: +one or more members of the set and optionally the replica set name. +Any of the following connects to the replica set we just created:: + >>> MongoClient('localhost') + MongoClient(host=['localhost:27017'], ...) >>> MongoClient('localhost', replicaset='foo') MongoClient(host=['localhost:27017'], replicaset='foo', ...) >>> MongoClient('localhost:27018', replicaset='foo') MongoClient(['localhost:27018'], replicaset='foo', ...) >>> MongoClient('localhost', 27019, replicaset='foo') MongoClient(['localhost:27019'], replicaset='foo', ...) + >>> MongoClient('mongodb://localhost:27017,localhost:27018/') + MongoClient(['localhost:27017', 'localhost:27018'], ...) >>> MongoClient('mongodb://localhost:27017,localhost:27018/?replicaSet=foo') MongoClient(['localhost:27017', 'localhost:27018'], replicaset='foo', ...) From 4eeb685c5794cc4fdac6103d083415603f987c90 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 26 Jan 2022 11:57:36 -0800 Subject: [PATCH 0570/2111] PYTHON-3069 Require hello command + OP_MSG when 'loadBalanced=True' (#837) --- pymongo/pool.py | 5 ++- test/mockupdb/test_handshake.py | 62 ++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 88b0e09737..a0868c9916 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -550,7 +550,10 @@ def unpin(self): self.close_socket(ConnectionClosedReason.STALE) def hello_cmd(self): - if self.opts.server_api or self.hello_ok: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or versioned api mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True return SON([(HelloCompat.CMD, 1)]) else: return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 621f01728f..34028a637f 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -12,14 +12,58 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mockupdb import (MockupDB, OpReply, OpMsg, OpMsgReply, OpQuery, absent, + Command, go) -from mockupdb import MockupDB, OpReply, OpMsg, absent, Command, go from pymongo import MongoClient, version as pymongo_version from pymongo.errors import OperationFailure +from pymongo.server_api import ServerApi, ServerApiVersion +from bson.objectid import ObjectId import unittest +def test_hello_with_option(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req == None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get( + "loadBalanced") else {} + return r.reply(OpMsgReply(minWireVersion=0, maxWireVersion=13, + **kwargs, **load_balanced_kwargs)) + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"):("server_api", ServerApi( + ServerApiVersion.V1))} + client = MongoClient("mongodb://"+primary.address_string, + appname='my app', # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v + in kwargs.items()])) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + + def _check_handshake_data(request): assert 'client' in request data = request['client'] @@ -156,6 +200,22 @@ def test_client_handshake_saslSupportedMechs(self): future() return + def test_handshake_load_balanced(self): + test_hello_with_option(self, OpMsg, loadBalanced=True) + with self.assertRaisesRegex(AssertionError, "does not match"): + test_hello_with_option(self, Command, loadBalanced=True) + + def test_handshake_versioned_api(self): + test_hello_with_option(self, OpMsg, apiVersion="1") + with self.assertRaisesRegex(AssertionError, "does not match"): + test_hello_with_option(self, Command, apiVersion="1") + + def test_handshake_not_either(self): + # If we don't specify either option then it should be using + # OP_QUERY for the initial step of the handshake. + test_hello_with_option(self, Command) + with self.assertRaisesRegex(AssertionError, "does not match"): + test_hello_with_option(self, OpMsg) if __name__ == '__main__': unittest.main() From 308b4f4e08c9fe91210ee498adbbafe0c7ebebe4 Mon Sep 17 00:00:00 2001 From: Bernie Hackett Date: Wed, 26 Jan 2022 17:28:38 -0800 Subject: [PATCH 0571/2111] PYTHON-1596 Test on RHEL7 FIPS (#838) --- .evergreen/config.yml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 44a075a727..bf96f220ff 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1688,6 +1688,16 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run load-balancer" - func: "run tests" + + - name: "test-fips-standalone" + tags: ["fips"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" + - func: "run tests" # }}} - name: "coverage-report" tags: ["coverage"] @@ -1758,6 +1768,12 @@ axes: batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz + - id: rhel70-fips + display_name: "RHEL 7.0 FIPS" + run_on: rhel70-fips + batchtime: 10080 # 7 days + variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: ubuntu-16.04 display_name: "Ubuntu 16.04" run_on: ubuntu1604-test @@ -2157,6 +2173,16 @@ buildvariants: - ".4.0" - ".3.6" +- matrix_name: "tests-fips" + matrix_spec: + platform: + - rhel70-fips + auth: "auth" + ssl: "ssl" + display_name: "${platform} ${auth} ${ssl}" + tasks: + - "test-fips-standalone" + - matrix_name: "test-macos" matrix_spec: platform: From aa60c2a2c051d9191aacf853405666bdeaa8d93a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 1 Feb 2022 12:45:47 -0800 Subject: [PATCH 0572/2111] PYTHON-3071 [DevOps] Merge and improve resync_specs.sh (#839) --- .evergreen/resync-specs.sh | 145 +++++++++++++++++++++++++++++++++++++ CONTRIBUTING.rst | 20 +++++ 2 files changed, 165 insertions(+) create mode 100755 .evergreen/resync-specs.sh diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh new file mode 100755 index 0000000000..1d0742258b --- /dev/null +++ b/.evergreen/resync-specs.sh @@ -0,0 +1,145 @@ +#!/bin/bash +# exit when any command fails +set -e +PYMONGO=$(dirname "$(cd "$(dirname "$0")"; pwd)") +SPECS=${MDB_SPECS:-~/Work/specifications} + +help (){ + echo "Usage: resync_specs.sh [-bcsp] spec" + echo "Required arguments:" + echo " spec determines which folder the spec tests will be copied from." + echo "Optional flags:" + echo " -b is used to add a string to the blocklist for that next run. Can be used" + echo " any number of times on a single command to block multiple patterns." + echo " You can use any regex pattern (it is passed to 'grep -Ev')." + echo " -c is used to set a branch or commit that will be checked out in the" + echo " specifications repo before copying." + echo " -s is used to set a unique path to the specs repo for that specific" + echo " run." + echo "Notes:" + echo "You can export the environment variable MDB_SPECS to set the specs" + echo " repo similar to -s, but this will persist between runs until you " + echo "unset it." +} + +# Parse flag args +BRANCH='' +BLOCKLIST='.*\.yml' +while getopts 'b:c:s:' flag; do + case "${flag}" in + b) BLOCKLIST+="|$OPTARG" + ;; + c) BRANCH="${OPTARG}" + ;; + s) SPECS="${OPTARG}" + ;; + *) help; exit 0 + ;; + esac +done +shift $((OPTIND-1)) + +if [ -z $BRANCH ] +then + git -C $SPECS checkout $BRANCH +fi + +# Ensure the JSON files are up to date. +cd $SPECS/source +make +cd - +# cpjson unified-test-format/tests/invalid unified-test-format/invalid +# * param1: Path to spec tests dir in specifications repo +# * param2: Path to where the corresponding tests live in Python. +cpjson () { + find "$PYMONGO"/test/$2 -type f -delete + cd "$SPECS"/source/$1 + find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ + $PYMONGO/test/$2 + printf "\nIgnored files for ${PWD}" + printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ + <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ + sed -e '/^[0-9]/d' | sed -e 's|< ./||g' +} + +for spec in "$@" +do + case "$spec" in + bson*corpus) + cpjson bson-corpus/tests/ bson_corpus + ;; + max*staleness) + cpjson max-staleness/tests/ max_staleness + ;; + connection*string) + cpjson connection-string/tests/ connection_string/test + ;; + change*streams) + cpjson change-streams/tests/ change_streams/ + ;; + cmap|CMAP) + cpjson connection-monitoring-and-pooling/tests cmap + ;; + command*monitoring) + cpjson command-monitoring/tests command_monitoring + ;; + crud|CRUD) + cpjson crud/tests/ crud + ;; + load*balancer) + cpjson load-balancers/tests load_balancer + ;; + initial-dns-seedlist-discovery|srv_seedlist) + cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist + ;; + old_srv_seedlist) + cpjson initial-dns-seedlist-discovery/tests srv_seedlist + ;; + retryable*reads) + cpjson retryable-reads/tests/ retryable_reads + ;; + retryable*writes) + cpjson retryable-writes/tests/ retryable_writes + ;; + sdam|SDAM) + cpjson server-discovery-and-monitoring/tests/errors \ + discovery_and_monitoring/errors + cpjson server-discovery-and-monitoring/tests/rs \ + discovery_and_monitoring/rs + cpjson server-discovery-and-monitoring/tests/sharded \ + discovery_and_monitoring/sharded + cpjson server-discovery-and-monitoring/tests/single \ + discovery_and_monitoring/single + cpjson server-discovery-and-monitoring/tests/integration \ + discovery_and_monitoring_integration + cpjson server-discovery-and-monitoring/tests/load-balanced \ + discovery_and_monitoring/load-balanced + ;; + sdam*monitoring) + cpjson server-discovery-and-monitoring/tests/monitoring sdam_monitoring + ;; + server*selection) + cpjson server-selection/tests/ server_selection + ;; + sessions) + cpjson sessions/tests/ sessions + ;; + transactions|transactions-convenient-api) + cpjson transactions/tests/ transactions + cpjson transactions-convenient-api/tests/ transactions-convenient-api + ;; + unified) + cpjson unified-test-format/tests/ unified-test-format/ + ;; + uri|uri*options) + cpjson uri-options/tests uri_options + ;; + versioned-api) + cpjson versioned-api/tests versioned-api + ;; + *) + echo "Do not know how to resync spec tests for '${spec}'" + help + ;; + esac +done diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index cf451172a4..40dca00e0c 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -48,3 +48,23 @@ just make your changes to the inline documentation of the appropriate branch and submit a `pull request `_. You might also use the GitHub `Edit `_ button. + +Re-sync Spec Tests +----------------- + +If you would like to re-sync the copy of the specification tests in the +PyMongo repository with that which is inside the `specifications repo +`_, please +use the script provided in ``.evergreen/resync-specs.sh``.:: + + git clone git@github.com:mongodb/specifications.git + export MDB_SPECS=~/specifications + cd ~/mongo-python-driver/.evergreen + ./resync-specs.sh -b "connection-string*" crud bson-corpus + cd .. + +The ``-b`` flag adds as a regex pattern to block files you do not wish to +update in PyMongo. +This is primarily helpful if you are implementing a new feature in PyMongo +that has spec tests already implemented, or if you are attempting to +validate new spec tests in PyMongo. \ No newline at end of file From abfa0d35bcbc99107dfac71f58c4b1606dc7656a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Feb 2022 13:53:58 -0600 Subject: [PATCH 0573/2111] PYTHON-3075 bulk_write does not apply CodecOptions to upserted_ids result (#840) --- pymongo/message.py | 13 +++---- pymongo/pool.py | 4 +-- test/test_bulk.py | 76 +++++++++++++++++++++++++++++++++++++++++ test/test_encryption.py | 33 +++++++++++++++++- 4 files changed, 117 insertions(+), 9 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 584528c2f2..f632214a08 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -32,6 +32,7 @@ _decode_selective, _dict_to_bson, _make_c_string) +from bson import codec_options from bson.int64 import Int64 from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument) @@ -798,7 +799,7 @@ def write_command(self, cmd, request_id, msg, docs): self._start(cmd, request_id, docs) start = datetime.datetime.now() try: - reply = self.sock_info.write_command(request_id, msg) + reply = self.sock_info.write_command(request_id, msg, self.codec) if self.publish: duration = (datetime.datetime.now() - start) + duration self._succeed(request_id, reply, duration) @@ -866,7 +867,7 @@ def execute(self, cmd, docs, client): batched_cmd, to_send = self._batch_command(cmd, docs) result = self.sock_info.command( self.db_name, batched_cmd, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + codec_options=self.codec, session=self.session, client=client) return result, to_send @@ -1205,9 +1206,9 @@ def unpack_response(self, cursor_id=None, return bson._decode_all_selective( self.documents, codec_options, user_fields) - def command_response(self): + def command_response(self, codec_options): """Unpack a command response.""" - docs = self.unpack_response() + docs = self.unpack_response(codec_options=codec_options) assert self.number_returned == 1 return docs[0] @@ -1273,9 +1274,9 @@ def unpack_response(self, cursor_id=None, return bson._decode_all_selective( self.payload_document, codec_options, user_fields) - def command_response(self): + def command_response(self, codec_options): """Unpack a command response.""" - return self.unpack_response()[0] + return self.unpack_response(codec_options=codec_options)[0] def raw_command_response(self): """Return the bytes of the command response.""" diff --git a/pymongo/pool.py b/pymongo/pool.py index a0868c9916..70920d5b23 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -775,7 +775,7 @@ def unack_write(self, msg, max_doc_size): self._raise_if_not_writable(True) self.send_message(msg, max_doc_size) - def write_command(self, request_id, msg): + def write_command(self, request_id, msg, codec_options): """Send "insert" etc. command, returning response as a dict. Can raise ConnectionFailure or OperationFailure. @@ -786,7 +786,7 @@ def write_command(self, request_id, msg): """ self.send_message(msg, 0) reply = self.receive_message(request_id) - result = reply.command_response() + result = reply.command_response(codec_options) # Raises NotPrimaryError or OperationFailure. helpers._check_command_response(result, self.max_wire_version) diff --git a/test/test_bulk.py b/test/test_bulk.py index f93cd6c766..08740a437e 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -15,9 +15,13 @@ """Test the bulk API.""" import sys +import uuid +from bson.binary import UuidRepresentation +from bson.codec_options import CodecOptions sys.path[0:0] = [""] +from bson import Binary from bson.objectid import ObjectId from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, @@ -376,6 +380,78 @@ def test_client_generated_upsert_id(self): {'index': 2, '_id': 2}]}, result.bulk_api_result) + def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write([ + UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': uuids[0]}, + {'index': 1, '_id': uuids[1]}, + {'index': 2, '_id': uuids[2]}]}, + result.bulk_api_result) + + def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = coll.bulk_write([ + UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': uuids[0]}, + {'index': 1, '_id': uuids[1]}, + {'index': 2, '_id': uuids[2]}]}, + result.bulk_api_result) + + def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids = [ + {'f': Binary(bytes(i)), 'f2': uuid.uuid4()} + for i in range(3) + ] + + result = coll.bulk_write([ + UpdateOne({'_id': ids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': ids[2]}, {'_id': ids[2]}, upsert=True), + ]) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id['f'] = bytes(_id['f']) + + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': ids[0]}, + {'index': 1, '_id': ids[1]}, + {'index': 2, '_id': ids[2]}]}, + result.bulk_api_result) + def test_single_ordered_batch(self): result = self.coll.bulk_write([ InsertOne({'a': 1}), diff --git a/test/test_encryption.py b/test/test_encryption.py index 88acadfbaf..8e47d44525 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -29,6 +29,7 @@ from bson import encode, json_util from bson.binary import (Binary, + UuidRepresentation, JAVA_LEGACY, STANDARD, UUID_SUBTYPE) @@ -50,13 +51,14 @@ ServerSelectionTimeoutError, WriteError) from pymongo.mongo_client import MongoClient -from pymongo.operations import InsertOne +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern from test import (unittest, CA_PEM, CLIENT_PEM, client_context, IntegrationTest, PyMongoTestCase) +from test.test_bulk import BulkTestBase from test.utils import (TestCreator, camel_to_snake_args, OvertCommandListener, @@ -313,6 +315,35 @@ def test_use_after_close(self): client.admin.command('ping') +class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): + + def test_upsert_uuid_standard_encrypte(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client.close) + + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = coll.bulk_write([ + UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), + ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), + ]) + self.assertEqualResponse( + {'nMatched': 0, + 'nModified': 0, + 'nUpserted': 3, + 'nInserted': 0, + 'nRemoved': 0, + 'upserted': [{'index': 0, '_id': uuids[0]}, + {'index': 1, '_id': uuids[1]}, + {'index': 2, '_id': uuids[2]}]}, + result.bulk_api_result) + + class TestClientMaxWireVersion(IntegrationTest): @classmethod From dd6c140d438039e9f6df96cd3d4221f380a37e18 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Feb 2022 21:12:36 -0600 Subject: [PATCH 0574/2111] PYTHON-3060 Add typings to pymongo package (#831) --- bson/__init__.py | 19 +- bson/binary.py | 9 +- gridfs/grid_file.py | 4 +- mypy.ini | 22 +++ pymongo/__init__.py | 28 +-- pymongo/aggregation.py | 6 +- pymongo/auth.py | 14 +- pymongo/auth_aws.py | 7 +- pymongo/bulk.py | 22 +-- pymongo/change_stream.py | 70 ++++--- pymongo/client_options.py | 2 +- pymongo/client_session.py | 106 ++++++----- pymongo/collation.py | 31 ++-- pymongo/collection.py | 311 +++++++++++++++++++++----------- pymongo/command_cursor.py | 71 +++++--- pymongo/common.py | 146 +++++++-------- pymongo/compression_support.py | 7 +- pymongo/cursor.py | 189 +++++++++++-------- pymongo/database.py | 157 +++++++++++----- pymongo/driver_info.py | 3 +- pymongo/encryption.py | 63 ++++--- pymongo/encryption_options.py | 30 +-- pymongo/errors.py | 51 ++++-- pymongo/event_loggers.py | 46 +++-- pymongo/hello.py | 66 ++++--- pymongo/helpers.py | 14 +- pymongo/message.py | 29 ++- pymongo/mongo_client.py | 181 +++++++++++-------- pymongo/monitor.py | 10 +- pymongo/monitoring.py | 203 +++++++++++++-------- pymongo/network.py | 9 +- pymongo/ocsp_cache.py | 2 +- pymongo/ocsp_support.py | 61 +++---- pymongo/operations.py | 47 ++--- pymongo/periodic_executor.py | 18 +- pymongo/pool.py | 60 +++--- pymongo/pyopenssl_context.py | 41 ++--- pymongo/read_concern.py | 12 +- pymongo/read_preferences.py | 74 ++++---- pymongo/results.py | 58 +++--- pymongo/saslprep.py | 9 +- pymongo/server.py | 7 +- pymongo/server_description.py | 88 ++++----- pymongo/server_type.py | 19 +- pymongo/socket_checker.py | 12 +- pymongo/srv_resolver.py | 3 +- pymongo/ssl_context.py | 1 + pymongo/ssl_support.py | 8 +- pymongo/topology.py | 41 ++--- pymongo/topology_description.py | 99 ++++++---- pymongo/typings.py | 29 +++ pymongo/uri_parser.py | 40 ++-- pymongo/write_concern.py | 16 +- test/performance/perf_test.py | 2 +- test/test_cursor.py | 2 +- test/test_grid_file.py | 2 +- tools/clean.py | 2 +- 57 files changed, 1579 insertions(+), 1100 deletions(-) create mode 100644 pymongo/typings.py diff --git a/bson/__init__.py b/bson/__init__.py index 5be673cfc3..e518cd91c9 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -61,10 +61,10 @@ import struct import sys import uuid -from codecs import utf_8_decode as _utf_8_decode # type: ignore -from codecs import utf_8_encode as _utf_8_encode # type: ignore +from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] +from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] from collections import abc as _abc -from typing import (TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, +from typing import (IO, TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, Iterator, List, Mapping, MutableMapping, NoReturn, Sequence, Tuple, Type, TypeVar, Union, cast) @@ -88,11 +88,13 @@ # Import RawBSONDocument for type-checking only to avoid circular dependency. if TYPE_CHECKING: + from array import array + from mmap import mmap from bson.raw_bson import RawBSONDocument try: - from bson import _cbson # type: ignore + from bson import _cbson # type: ignore[attr-defined] _USE_C = True except ImportError: _USE_C = False @@ -851,6 +853,7 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _DocumentIn = Mapping[str, Any] _DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] +_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] def encode(document: _DocumentIn, check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> bytes: @@ -880,7 +883,7 @@ def encode(document: _DocumentIn, check_keys: bool = False, codec_options: Codec return _dict_to_bson(document, check_keys, codec_options) -def decode(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: +def decode(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -912,7 +915,7 @@ def decode(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> return _bson_to_dict(data, codec_options) -def decode_all(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[_DocumentOut]: +def decode_all(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[Dict[str, Any]]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -1075,7 +1078,7 @@ def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS yield _bson_to_dict(elements, codec_options) -def decode_file_iter(file_obj: BinaryIO, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: +def decode_file_iter(file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1158,7 +1161,7 @@ def encode(cls: Type["BSON"], document: _DocumentIn, check_keys: bool = False, """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentOut: # type: ignore[override] + def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: # type: ignore[override] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/bson/binary.py b/bson/binary.py index 53d5419b49..de44d48174 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Tuple, Type +from typing import Any, Tuple, Type, Union, TYPE_CHECKING from uuid import UUID """Tools for representing BSON binary data. @@ -57,6 +57,11 @@ """ +if TYPE_CHECKING: + from array import array as _array + from mmap import mmap as _mmap + + class UuidRepresentation: UNSPECIFIED = 0 """An unspecified UUID representation. @@ -211,7 +216,7 @@ class Binary(bytes): _type_marker = 5 __subtype: int - def __new__(cls: Type["Binary"], data: bytes, subtype: int = BINARY_SUBTYPE) -> "Binary": + def __new__(cls: Type["Binary"], data: Union[memoryview, bytes, "_mmap", "_array"], subtype: int = BINARY_SUBTYPE) -> "Binary": if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 9353a97a1c..686d328a3c 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -874,10 +874,10 @@ def next(self) -> GridOut: __next__ = next - def add_option(self, *args: Any, **kwargs: Any) -> None: + def add_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args: Any, **kwargs: Any) -> None: + def remove_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] raise NotImplementedError("Method does not exist for GridOutCursor") def _clone_base(self, session: ClientSession) -> "GridOutCursor": diff --git a/mypy.ini b/mypy.ini index 2646febb6f..926bf95745 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,11 +1,33 @@ [mypy] +check_untyped_defs = true disallow_subclassing_any = true disallow_incomplete_defs = true no_implicit_optional = true +pretty = true +show_error_context = true +show_error_codes = true strict_equality = true warn_unused_configs = true warn_unused_ignores = true warn_redundant_casts = true +[mypy-kerberos.*] +ignore_missing_imports = True + [mypy-mockupdb] ignore_missing_imports = True + +[mypy-pymongo_auth_aws.*] +ignore_missing_imports = True + +[mypy-pymongocrypt.*] +ignore_missing_imports = True + +[mypy-service_identity.*] +ignore_missing_imports = True + +[mypy-snappy.*] +ignore_missing_imports = True + +[mypy-winkerberos.*] +ignore_missing_imports = True diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 5db9363f90..54a962df57 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -14,6 +14,8 @@ """Python driver for MongoDB.""" +from typing import Tuple, Union + ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 @@ -53,35 +55,33 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple = (4, 1, 0, '.dev0') +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, '.dev0') -def get_version_string(): +def get_version_string() -> str: if isinstance(version_tuple[-1], str): return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] return '.'.join(map(str, version_tuple)) -__version__ = version = get_version_string() +__version__: str = get_version_string() +version = __version__ + """Current version of PyMongo.""" from pymongo.collection import ReturnDocument -from pymongo.common import (MIN_SUPPORTED_WIRE_VERSION, - MAX_SUPPORTED_WIRE_VERSION) +from pymongo.common import (MAX_SUPPORTED_WIRE_VERSION, + MIN_SUPPORTED_WIRE_VERSION) from pymongo.cursor import CursorType from pymongo.mongo_client import MongoClient -from pymongo.operations import (IndexModel, - InsertOne, - DeleteOne, - DeleteMany, - UpdateOne, - UpdateMany, - ReplaceOne) +from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, + ReplaceOne, UpdateMany, UpdateOne) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -def has_c(): + +def has_c() -> bool: """Is the C extension installed?""" try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] return True except ImportError: return False diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 8fb0225eb3..b2e20e9ca5 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -15,11 +15,10 @@ """Perform aggregation operations on a collection or database.""" from bson.son import SON - from pymongo import common from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError -from pymongo.read_preferences import _AggWritePref, ReadPreference +from pymongo.read_preferences import ReadPreference, _AggWritePref class _AggregationCommand(object): @@ -37,7 +36,7 @@ def __init__(self, target, cursor_class, pipeline, options, self._target = target - common.validate_list('pipeline', pipeline) + pipeline = common.validate_list('pipeline', pipeline) self._pipeline = pipeline self._performs_write = False if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): @@ -82,7 +81,6 @@ def _cursor_namespace(self): """The namespace in which the aggregate command is run.""" raise NotImplementedError - @property def _cursor_collection(self, cursor_doc): """The Collection used for the aggregate command cursor.""" raise NotImplementedError diff --git a/pymongo/auth.py b/pymongo/auth.py index a2e206357c..34f1c7fc94 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -19,9 +19,9 @@ import hmac import os import socket - from base64 import standard_b64decode, standard_b64encode from collections import namedtuple +from typing import Callable, Mapping from urllib.parse import quote from bson.binary import Binary @@ -97,7 +97,7 @@ def __hash__(self): """Mechanism properties for GSSAPI authentication.""" -_AWSProperties = namedtuple('AWSProperties', ['aws_session_token']) +_AWSProperties = namedtuple('_AWSProperties', ['aws_session_token']) """Mechanism properties for MONGODB-AWS authentication.""" @@ -140,9 +140,9 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): properties = extra.get('authmechanismproperties', {}) aws_session_token = properties.get('AWS_SESSION_TOKEN') - props = _AWSProperties(aws_session_token=aws_session_token) + aws_props = _AWSProperties(aws_session_token=aws_session_token) # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, '$external', user, passwd, props, None) + return MongoCredential(mech, '$external', user, passwd, aws_props, None) elif mech == 'PLAIN': source_database = source or database or '$external' return MongoCredential(mech, source_database, user, passwd, None, None) @@ -471,7 +471,7 @@ def _authenticate_default(credentials, sock_info): return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') -_AUTH_MAP = { +_AUTH_MAP: Mapping[str, Callable] = { 'GSSAPI': _authenticate_gssapi, 'MONGODB-CR': _authenticate_mongo_cr, 'MONGODB-X509': _authenticate_x509, @@ -532,7 +532,7 @@ def speculate_command(self): return cmd -_SPECULATIVE_AUTH_MAP = { +_SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { 'MONGODB-X509': _X509Context, 'SCRAM-SHA-1': functools.partial(_ScramContext, mechanism='SCRAM-SHA-1'), 'SCRAM-SHA-256': functools.partial(_ScramContext, @@ -544,6 +544,6 @@ def speculate_command(self): def authenticate(credentials, sock_info): """Authenticate sock_info.""" mechanism = credentials.mechanism - auth_func = _AUTH_MAP.get(mechanism) + auth_func = _AUTH_MAP[mechanism] auth_func(credentials, sock_info) diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index ff07a12e7f..0233d192d4 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -16,12 +16,11 @@ try: import pymongo_auth_aws - from pymongo_auth_aws import (AwsCredential, - AwsSaslContext, + from pymongo_auth_aws import (AwsCredential, AwsSaslContext, PyMongoAuthAwsError) _HAVE_MONGODB_AWS = True except ImportError: - class AwsSaslContext(object): + class AwsSaslContext(object): # type: ignore def __init__(self, credentials): pass _HAVE_MONGODB_AWS = False @@ -32,7 +31,7 @@ def __init__(self, credentials): from pymongo.errors import ConfigurationError, OperationFailure -class _AwsSaslContext(AwsSaslContext): +class _AwsSaslContext(AwsSaslContext): # type: ignore # Dependency injection: def binary_type(self): """Return the bson.binary.Binary type.""" diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 1921108a12..8d343bb2c6 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -17,31 +17,23 @@ .. versionadded:: 2.7 """ import copy - from itertools import islice from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (validate_is_mapping, - validate_is_document_type, - validate_ok_for_replace, - validate_ok_for_update) -from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc from pymongo.collation import validate_collation_or_none -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure) -from pymongo.message import (_INSERT, _UPDATE, _DELETE, - _randint, - _BulkWriteContext, - _EncryptedBulkWriteContext) +from pymongo.common import (validate_is_document_type, validate_is_mapping, + validate_ok_for_replace, validate_ok_for_update) +from pymongo.errors import (BulkWriteError, ConfigurationError, + InvalidOperation, OperationFailure) +from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc +from pymongo.message import (_DELETE, _INSERT, _UPDATE, _BulkWriteContext, + _EncryptedBulkWriteContext, _randint) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern - _DELETE_ALL = 0 _DELETE_ONE = 1 diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 54bf98d83e..69446fdecf 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -15,21 +15,20 @@ """Watch changes on a collection, a database, or the entire cluster.""" import copy +from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, + Optional, Union) from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument - +from bson.timestamp import Timestamp from pymongo import common from pymongo.aggregation import (_CollectionAggregationCommand, _DatabaseAggregationCommand) from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor -from pymongo.errors import (ConnectionFailure, - CursorNotFound, - InvalidOperation, - OperationFailure, - PyMongoError) - +from pymongo.errors import (ConnectionFailure, CursorNotFound, + InvalidOperation, OperationFailure, PyMongoError) +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. @@ -55,7 +54,14 @@ ]) -class ChangeStream(object): +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.database import Database + from pymongo.mongo_client import MongoClient + + +class ChangeStream(Generic[_DocumentType]): """The internal abstract base class for change stream cursors. Should not be called directly by application developers. Use @@ -66,14 +72,22 @@ class ChangeStream(object): .. versionadded:: 3.6 .. seealso:: The MongoDB documentation on `changeStreams `_. """ - def __init__(self, target, pipeline, full_document, resume_after, - max_await_time_ms, batch_size, collation, - start_at_operation_time, session, start_after): + def __init__( + self, + target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]"], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional["ClientSession"], + start_after: Optional[Mapping[str, Any]], + ) -> None: if pipeline is None: pipeline = [] - elif not isinstance(pipeline, list): - raise TypeError("pipeline must be a list") - + pipeline = common.validate_list('pipeline', pipeline) common.validate_string_or_none('full_document', full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) @@ -84,7 +98,7 @@ def __init__(self, target, pipeline, full_document, resume_after, self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. - self._target = target.with_options( + self._target = target.with_options( # type: ignore codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: @@ -117,7 +131,7 @@ def _client(self): def _change_stream_options(self): """Return the options dict for the $changeStream pipeline stage.""" - options = {} + options: Dict[str, Any] = {} if self._full_document is not None: options['fullDocument'] = self._full_document @@ -144,7 +158,7 @@ def _command_options(self): def _aggregation_pipeline(self): """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() - full_pipeline = [{'$changeStream': options}] + full_pipeline: list = [{'$changeStream': options}] full_pipeline.extend(self._pipeline) return full_pipeline @@ -197,15 +211,15 @@ def _resume(self): pass self._cursor = self._create_cursor() - def close(self): + def close(self) -> None: """Close this ChangeStream.""" self._cursor.close() - def __iter__(self): + def __iter__(self) -> "ChangeStream[_DocumentType]": return self @property - def resume_token(self): + def resume_token(self) -> Optional[Mapping[str, Any]]: """The cached resume token that will be used to resume after the most recently returned change. @@ -213,7 +227,7 @@ def resume_token(self): """ return copy.deepcopy(self._resume_token) - def next(self): + def next(self) -> _DocumentType: """Advance the cursor. This method blocks until the next change document is returned or an @@ -255,7 +269,7 @@ def next(self): __next__ = next @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise @@ -265,7 +279,7 @@ def alive(self): """ return self._cursor.alive - def try_next(self): + def try_next(self) -> Optional[_DocumentType]: """Advance the cursor without blocking indefinitely. This method returns the next change document without waiting @@ -354,14 +368,14 @@ def try_next(self): return _bson_to_dict(change.raw, self._orig_codec_options) return change - def __enter__(self): + def __enter__(self) -> "ChangeStream": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() -class CollectionChangeStream(ChangeStream): +class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): """A change stream that watches changes on a single collection. Should not be called directly by application developers. Use @@ -378,7 +392,7 @@ def _client(self): return self._target.database.client -class DatabaseChangeStream(ChangeStream): +class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): """A change stream that watches changes on all collections in a database. Should not be called directly by application developers. Use @@ -395,7 +409,7 @@ def _client(self): return self._target.client -class ClusterChangeStream(DatabaseChangeStream): +class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): """A change stream that watches changes on all collections in the cluster. Should not be called directly by application developers. Use diff --git a/pymongo/client_options.py b/pymongo/client_options.py index c2f5ae01cf..14ef0f781e 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -15,9 +15,9 @@ """Tools to parse mongo client options.""" from bson.codec_options import _parse_codec_options +from pymongo import common from pymongo.auth import _build_credentials_tuple from pymongo.common import validate_boolean -from pymongo import common from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError from pymongo.monitoring import _EventListeners diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 8c61623ae4..3d4ad514e5 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -134,25 +134,23 @@ import collections import time import uuid - from collections.abc import Mapping as _Mapping +from typing import (TYPE_CHECKING, Any, Callable, ContextManager, Generic, + Mapping, Optional, TypeVar) from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from bson.timestamp import Timestamp - from pymongo.cursor import _SocketManager -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure, - PyMongoError, +from pymongo.errors import (ConfigurationError, ConnectionFailure, + InvalidOperation, OperationFailure, PyMongoError, WTimeoutError) from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _DocumentType from pymongo.write_concern import WriteConcern @@ -172,10 +170,12 @@ class SessionOptions(object): .. versionchanged:: 3.12 Added the ``snapshot`` parameter. """ - def __init__(self, - causal_consistency=None, - default_transaction_options=None, - snapshot=False): + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional["TransactionOptions"] = None, + snapshot: Optional[bool] = False, + ) -> None: if snapshot: if causal_consistency: raise ConfigurationError('snapshot reads do not support ' @@ -194,12 +194,12 @@ def __init__(self, self._snapshot = snapshot @property - def causal_consistency(self): + def causal_consistency(self) -> bool: """Whether causal consistency is configured.""" return self._causal_consistency @property - def default_transaction_options(self): + def default_transaction_options(self) -> Optional["TransactionOptions"]: """The default TransactionOptions to use for transactions started on this session. @@ -208,7 +208,7 @@ def default_transaction_options(self): return self._default_transaction_options @property - def snapshot(self): + def snapshot(self) -> Optional[bool]: """Whether snapshot reads are configured. .. versionadded:: 3.12 @@ -243,8 +243,13 @@ class TransactionOptions(object): .. versionadded:: 3.7 """ - def __init__(self, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None + ) -> None: self._read_concern = read_concern self._write_concern = write_concern self._read_preference = read_preference @@ -274,23 +279,23 @@ def __init__(self, read_concern=None, write_concern=None, "max_commit_time_ms must be an integer or None") @property - def read_concern(self): + def read_concern(self) -> Optional[ReadConcern]: """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" return self._read_concern @property - def write_concern(self): + def write_concern(self) -> Optional[WriteConcern]: """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" return self._write_concern @property - def read_preference(self): + def read_preference(self) -> Optional[_ServerMode]: """This transaction's :class:`~pymongo.read_preferences.ReadPreference`. """ return self._read_preference @property - def max_commit_time_ms(self): + def max_commit_time_ms(self) -> Optional[int]: """The maxTimeMS to use when running a commitTransaction command. .. versionadded:: 3.9 @@ -427,7 +432,13 @@ def _within_time_limit(start_time): return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT -class ClientSession(object): +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.mongo_client import MongoClient + + +class ClientSession(Generic[_DocumentType]): """A session for ordering sequential operations. :class:`ClientSession` instances are **not thread-safe or fork-safe**. @@ -439,9 +450,11 @@ class ClientSession(object): :class:`ClientSession`, call :meth:`~pymongo.mongo_client.MongoClient.start_session`. """ - def __init__(self, client, server_session, options, implicit): + def __init__( + self, client: "MongoClient[_DocumentType]", server_session: Any, options: SessionOptions, implicit: bool + ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client = client + self._client: MongoClient[_DocumentType] = client self._server_session = server_session self._options = options self._cluster_time = None @@ -451,7 +464,7 @@ def __init__(self, client, server_session, options, implicit): self._implicit = implicit self._transaction = _Transaction(None, client) - def end_session(self): + def end_session(self) -> None: """Finish this session. If a transaction has started, abort it. It is an error to use the session after the session has ended. @@ -474,39 +487,39 @@ def _check_ended(self): if self._server_session is None: raise InvalidOperation("Cannot use ended session") - def __enter__(self): + def __enter__(self) -> "ClientSession[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self): + def client(self) -> "MongoClient[_DocumentType]": """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ return self._client @property - def options(self): + def options(self) -> SessionOptions: """The :class:`SessionOptions` this session was created with.""" return self._options @property - def session_id(self): + def session_id(self) -> Mapping[str, Any]: """A BSON document, the opaque server session identifier.""" self._check_ended() return self._server_session.session_id @property - def cluster_time(self): + def cluster_time(self) -> Optional[Mapping[str, Any]]: """The cluster time returned by the last operation executed in this session. """ return self._cluster_time @property - def operation_time(self): + def operation_time(self) -> Optional[Timestamp]: """The operation time returned by the last operation executed in this session. """ @@ -522,8 +535,14 @@ def _inherit_option(self, name, val): return val return getattr(self.client, name) - def with_transaction(self, callback, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def with_transaction( + self, + callback: Callable[["ClientSession"], _T], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: """Execute a callback in a transaction. This method starts a transaction on this session, executes ``callback`` @@ -649,8 +668,13 @@ def callback(session, custom_arg, custom_kwarg=None): # Commit succeeded. return ret - def start_transaction(self, read_concern=None, write_concern=None, - read_preference=None, max_commit_time_ms=None): + def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> ContextManager: """Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. @@ -685,7 +709,7 @@ def start_transaction(self, read_concern=None, write_concern=None, self._start_retryable_write() return _TransactionContext(self) - def commit_transaction(self): + def commit_transaction(self) -> None: """Commit a multi-statement transaction. .. versionadded:: 3.7 @@ -729,7 +753,7 @@ def commit_transaction(self): finally: self._transaction.state = _TxnState.COMMITTED - def abort_transaction(self): + def abort_transaction(self) -> None: """Abort a multi-statement transaction. .. versionadded:: 3.7 @@ -804,7 +828,7 @@ def _advance_cluster_time(self, cluster_time): if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: self._cluster_time = cluster_time - def advance_cluster_time(self, cluster_time): + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: """Update the cluster time for this session. :Parameters: @@ -827,7 +851,7 @@ def _advance_operation_time(self, operation_time): if operation_time > self._operation_time: self._operation_time = operation_time - def advance_operation_time(self, operation_time): + def advance_operation_time(self, operation_time: Timestamp) -> None: """Update the operation time for this session. :Parameters: @@ -856,12 +880,12 @@ def _process_response(self, reply): self._transaction.recovery_token = recovery_token @property - def has_ended(self): + def has_ended(self) -> bool: """True if this session is finished.""" return self._server_session is None @property - def in_transaction(self): + def in_transaction(self) -> bool: """True if this session has an active multi-statement transaction. .. versionadded:: 3.10 diff --git a/pymongo/collation.py b/pymongo/collation.py index 873d603336..e398264ac2 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -16,6 +16,7 @@ .. _collations: http://userguide.icu-project.org/collation/concepts """ +from typing import Any, Dict, Mapping, Optional, Union from pymongo import common @@ -151,18 +152,18 @@ class Collation(object): __slots__ = ("__document",) - def __init__(self, locale, - caseLevel=None, - caseFirst=None, - strength=None, - numericOrdering=None, - alternate=None, - maxVariable=None, - normalization=None, - backwards=None, - **kwargs): + def __init__(self, locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any) -> None: locale = common.validate_string('locale', locale) - self.__document = {'locale': locale} + self.__document: Dict[str, Any] = {'locale': locale} if caseLevel is not None: self.__document['caseLevel'] = common.validate_boolean( 'caseLevel', caseLevel) @@ -190,7 +191,7 @@ def __init__(self, locale, self.__document.update(kwargs) @property - def document(self): + def document(self) -> Dict[str, Any]: """The document representation of this collation. .. note:: @@ -204,16 +205,16 @@ def __repr__(self): return 'Collation(%s)' % ( ', '.join('%s=%r' % (key, document[key]) for key in document),) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): return self.document == other.document return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other -def validate_collation_or_none(value): +def validate_collation_or_none(value: Optional[Union[Mapping[str, Any], Collation]]) -> Optional[Dict[str, Any]]: if value is None: return None if isinstance(value, Collation): diff --git a/pymongo/collection.py b/pymongo/collection.py index ecb82a2cac..aa2d148fbe 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -14,44 +14,45 @@ """Collection level utilities for Mongo.""" -import datetime -import warnings - from collections import abc +from typing import (TYPE_CHECKING, Any, Generic, Iterable, List, Mapping, + MutableMapping, Optional, Sequence, Tuple, Union) from bson.code import Code +from bson.codec_options import CodecOptions from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument -from bson.codec_options import CodecOptions from bson.son import SON -from pymongo import (common, - helpers, - message) +from bson.timestamp import Timestamp +from pymongo import common, helpers, message from pymongo.aggregation import (_CollectionAggregationCommand, _CollectionRawAggregationCommand) from pymongo.bulk import _Bulk -from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.collation import validate_collation_or_none from pymongo.change_stream import CollectionChangeStream +from pymongo.collation import validate_collation_or_none +from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import (ConfigurationError, - InvalidName, - InvalidOperation, +from pymongo.errors import (ConfigurationError, InvalidName, InvalidOperation, OperationFailure) from pymongo.helpers import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import IndexModel -from pymongo.read_preferences import ReadPreference -from pymongo.results import (BulkWriteResult, - DeleteResult, - InsertOneResult, - InsertManyResult, - UpdateResult) +from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, + ReplaceOne, UpdateMany, UpdateOne) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import (BulkWriteResult, DeleteResult, InsertManyResult, + InsertOneResult, UpdateResult) +from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} +_WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] +# Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] +_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_IndexKeyHint = Union[str, _IndexList] + + class ReturnDocument(object): """An enum used with :meth:`~pymongo.collection.Collection.find_one_and_replace` and @@ -65,13 +66,28 @@ class ReturnDocument(object): """Return the updated/replaced or inserted document.""" -class Collection(common.BaseObject): +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.database import Database + from pymongo.read_concern import ReadConcern + + +class Collection(common.BaseObject, Generic[_DocumentType]): """A Mongo collection. """ - def __init__(self, database, name, create=False, codec_options=None, - read_preference=None, write_concern=None, read_concern=None, - session=None, **kwargs): + def __init__( + self, + database: "Database[_DocumentType]", + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> None: """Get / create a Mongo collection. Raises :class:`TypeError` if `name` is not an instance of @@ -169,7 +185,7 @@ def __init__(self, database, name, create=False, codec_options=None, "null character") collation = validate_collation_or_none(kwargs.pop('collation', None)) - self.__database = database + self.__database: Database[_DocumentType] = database self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) if create or kwargs or collation: @@ -252,7 +268,7 @@ def __create(self, options, collation, session): write_concern=self._write_concern_for(session), collation=collation, session=session) - def __getattr__(self, name): + def __getattr__(self, name: str) -> "Collection[_DocumentType]": """Get a sub-collection of this collection by name. Raises InvalidName if an invalid collection name is used. @@ -268,7 +284,7 @@ def __getattr__(self, name): name, full_name, full_name)) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> "Collection[_DocumentType]": return Collection(self.__database, "%s.%s" % (self.__name, name), False, @@ -280,25 +296,25 @@ def __getitem__(self, name): def __repr__(self): return "Collection(%r, %r)" % (self.__database, self.__name) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): return (self.__database == other.database and self.__name == other.name) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: return hash((self.__database, self.__name)) - def __bool__(self): + def __bool__(self) -> bool: raise NotImplementedError("Collection objects do not implement truth " "value testing or bool(). Please compare " "with None instead: collection is not None") @property - def full_name(self): + def full_name(self) -> str: """The full name of this :class:`Collection`. The full name is of the form `database_name.collection_name`. @@ -306,19 +322,24 @@ def full_name(self): return self.__full_name @property - def name(self): + def name(self) -> str: """The name of this :class:`Collection`.""" return self.__name @property - def database(self): + def database(self) -> "Database[_DocumentType]": """The :class:`~pymongo.database.Database` that this :class:`Collection` is a part of. """ return self.__database - def with_options(self, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def with_options( + self, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> "Collection[_DocumentType]": """Get a clone of this collection changing the specified settings. >>> coll1.read_preference @@ -356,8 +377,13 @@ def with_options(self, codec_options=None, read_preference=None, write_concern or self.write_concern, read_concern or self.read_concern) - def bulk_write(self, requests, ordered=True, - bypass_document_validation=False, session=None): + def bulk_write( + self, + requests: Sequence[_WriteOp], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional["ClientSession"] = None + ) -> BulkWriteResult: """Send a batch of write operations to the server. Requests are passed as a list of write operation instances ( @@ -470,8 +496,10 @@ def _insert_command(session, sock_info, retryable_write): if not isinstance(doc, RawBSONDocument): return doc.get('_id') - def insert_one(self, document, bypass_document_validation=False, - session=None): + def insert_one(self, document: _DocumentIn, + bypass_document_validation: bool = False, + session: Optional["ClientSession"] = None + ) -> InsertOneResult: """Insert a single document. >>> db.test.count_documents({'x': 1}) @@ -520,8 +548,12 @@ def insert_one(self, document, bypass_document_validation=False, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged) - def insert_many(self, documents, ordered=True, - bypass_document_validation=False, session=None): + def insert_many(self, + documents: Iterable[_DocumentIn], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional["ClientSession"] = None + ) -> InsertManyResult: """Insert an iterable of documents. >>> db.test.count_documents({}) @@ -565,7 +597,7 @@ def insert_many(self, documents, ordered=True, or isinstance(documents, abc.Mapping) or not documents): raise TypeError("documents must be a non-empty list") - inserted_ids = [] + inserted_ids: List[ObjectId] = [] def gen(): """A generator that validates documents and handles _ids.""" for document in documents: @@ -671,9 +703,16 @@ def _update(session, sock_info, retryable_write): (write_concern or self.write_concern).acknowledged and not multi, _update, session) - def replace_one(self, filter, replacement, upsert=False, - bypass_document_validation=False, collation=None, - hint=None, session=None, let=None): + def replace_one(self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> UpdateResult: """Replace a single document matching the filter. >>> for doc in db.test.find({}): @@ -755,10 +794,17 @@ def replace_one(self, filter, replacement, upsert=False, collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def update_one(self, filter, update, upsert=False, - bypass_document_validation=False, - collation=None, array_filters=None, hint=None, - session=None, let=None): + def update_one(self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> UpdateResult: """Update a single document matching the filter. >>> for doc in db.test.find(): @@ -800,8 +846,8 @@ def update_one(self, filter, update, upsert=False, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -836,9 +882,17 @@ def update_one(self, filter, update, upsert=False, hint=hint, session=session, let=let), write_concern.acknowledged) - def update_many(self, filter, update, upsert=False, array_filters=None, - bypass_document_validation=False, collation=None, - hint=None, session=None, let=None): + def update_many(self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> UpdateResult: """Update one or more documents that match the filter. >>> for doc in db.test.find(): @@ -880,8 +934,8 @@ def update_many(self, filter, update, upsert=False, array_filters=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -916,7 +970,7 @@ def update_many(self, filter, update, upsert=False, array_filters=None, hint=hint, session=session, let=let), write_concern.acknowledged) - def drop(self, session=None): + def drop(self, session: Optional["ClientSession"] = None) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. :Parameters: @@ -1005,8 +1059,13 @@ def _delete(session, sock_info, retryable_write): (write_concern or self.write_concern).acknowledged and not multi, _delete, session) - def delete_one(self, filter, collation=None, hint=None, session=None, - let=None): + def delete_one(self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> DeleteResult: """Delete a single document matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1030,8 +1089,8 @@ def delete_one(self, filter, collation=None, hint=None, session=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -1055,8 +1114,13 @@ def delete_one(self, filter, collation=None, hint=None, session=None, collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def delete_many(self, filter, collation=None, hint=None, session=None, - let=None): + def delete_many(self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None + ) -> DeleteResult: """Delete one or more documents matching the filter. >>> db.test.count_documents({'x': 1}) @@ -1080,8 +1144,8 @@ def delete_many(self, filter, collation=None, hint=None, session=None, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). :Returns: @@ -1105,7 +1169,7 @@ def delete_many(self, filter, collation=None, hint=None, session=None, collation=collation, hint=hint, session=session, let=let), write_concern.acknowledged) - def find_one(self, filter=None, *args, **kwargs): + def find_one(self, filter: Optional[Any] = None, *args: Any, **kwargs: Any) -> Optional[_DocumentType]: """Get a single document from the database. All arguments to :meth:`find` are also valid arguments for @@ -1139,7 +1203,7 @@ def find_one(self, filter=None, *args, **kwargs): return result return None - def find(self, *args, **kwargs): + def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: """Query the database. The `filter` argument is a prototype document that all results @@ -1328,7 +1392,7 @@ def find(self, *args, **kwargs): """ return Cursor(self, *args, **kwargs) - def find_raw_batches(self, *args, **kwargs): + def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: """Query the database and retrieve batches of raw BSON. Similar to the :meth:`find` method but returns a @@ -1396,7 +1460,7 @@ def _aggregate_one_result( batch = result['cursor']['firstBatch'] return batch[0] if batch else None - def estimated_document_count(self, **kwargs): + def estimated_document_count(self, **kwargs: Any) -> int: """Get an estimate of the number of documents in this collection using collection metadata. @@ -1445,7 +1509,7 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read( _cmd, self.read_preference, None) - def count_documents(self, filter, session=None, **kwargs): + def count_documents(self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any) -> int: """Count the number of documents in this collection. .. note:: For a fast count of the total documents in a collection see @@ -1523,7 +1587,7 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read( _cmd, self._read_preference_for(session), session) - def create_indexes(self, indexes, session=None, **kwargs): + def create_indexes(self, indexes: Sequence[IndexModel], session: Optional["ClientSession"] = None, **kwargs: Any) -> List[str]: """Create one or more indexes on this collection. >>> from pymongo import IndexModel, ASCENDING, DESCENDING @@ -1598,7 +1662,7 @@ def gen_indexes(): session=session) return names - def create_index(self, keys, session=None, **kwargs): + def create_index(self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> str: """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. @@ -1701,7 +1765,7 @@ def create_index(self, keys, session=None, **kwargs): index = IndexModel(keys, **kwargs) return self.__create_indexes([index], session, **cmd_options)[0] - def drop_indexes(self, session=None, **kwargs): + def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: """Drops all indexes on this collection. Can be used on non-existant collections or collections with no indexes. @@ -1727,7 +1791,7 @@ def drop_indexes(self, session=None, **kwargs): """ self.drop_index("*", session=session, **kwargs) - def drop_index(self, index_or_name, session=None, **kwargs): + def drop_index(self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: """Drops the specified index on this collection. Can be used on non-existant collections or collections with no @@ -1780,7 +1844,7 @@ def drop_index(self, index_or_name, session=None, **kwargs): write_concern=self._write_concern_for(session), session=session) - def list_indexes(self, session=None): + def list_indexes(self, session: Optional["ClientSession"] = None) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. >>> for index in db.test.list_indexes(): @@ -1829,7 +1893,7 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read( _cmd, read_pref, session) - def index_information(self, session=None): + def index_information(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: """Get information on this collection's indexes. Returns a dictionary where the keys are index names (as @@ -1863,7 +1927,7 @@ def index_information(self, session=None): info[index.pop("name")] = index return info - def options(self, session=None): + def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: """Get the options set on this collection. Returns a dictionary of options and their values - see @@ -1896,6 +1960,7 @@ def options(self, session=None): return {} options = result.get("options", {}) + assert options is not None if "create" in options: del options["create"] @@ -1911,7 +1976,7 @@ def _aggregate(self, aggregation_command, pipeline, cursor_class, session, cmd.get_cursor, cmd.get_read_preference(session), session, retryable=not cmd._performs_write) - def aggregate(self, pipeline, session=None, let=None, **kwargs): + def aggregate(self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this collection. @@ -1993,7 +2058,9 @@ def aggregate(self, pipeline, session=None, let=None, **kwargs): let=let, **kwargs) - def aggregate_raw_batches(self, pipeline, session=None, **kwargs): + def aggregate_raw_batches( + self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> RawBatchCursor[_DocumentType]: """Perform an aggregation and retrieve batches of raw BSON. Similar to the :meth:`aggregate` method but returns a @@ -2030,9 +2097,17 @@ def aggregate_raw_batches(self, pipeline, session=None, **kwargs): explicit_session=session is not None, **kwargs) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch(self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional["ClientSession"] = None, + start_after: Optional[Mapping[str, Any]] = None, + ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. Performs an aggregation with an implicit initial ``$changeStream`` @@ -2132,7 +2207,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, batch_size, collation, start_at_operation_time, session, start_after) - def rename(self, new_name, session=None, **kwargs): + def rename(self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any) -> MutableMapping[str, Any]: """Rename this collection. If operating in auth mode, client must be authorized as an @@ -2183,7 +2258,9 @@ def rename(self, new_name, session=None, **kwargs): parse_write_concern_error=True, session=s, client=self.__database.client) - def distinct(self, key, filter=None, session=None, **kwargs): + def distinct( + self, key: str, filter: Optional[Mapping[str, Any]] = None, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> List: """Get a list of distinct values for `key` among all documents in this collection. @@ -2283,7 +2360,7 @@ def _find_and_modify(session, sock_info, retryable_write): raise ConfigurationError( 'arrayFilters is unsupported for unacknowledged ' 'writes.') - cmd["arrayFilters"] = array_filters + cmd["arrayFilters"] = list(array_filters) if hint is not None: if sock_info.max_wire_version < 8: raise ConfigurationError( @@ -2307,9 +2384,15 @@ def _find_and_modify(session, sock_info, retryable_write): return self.__database.client._retryable_write( write_concern.acknowledged, _find_and_modify, session) - def find_one_and_delete(self, filter, - projection=None, sort=None, hint=None, - session=None, let=None, **kwargs): + def find_one_and_delete(self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and deletes it, returning the document. >>> db.test.count_documents({'x': 1}) @@ -2357,8 +2440,8 @@ def find_one_and_delete(self, filter, as keyword arguments (for example maxTimeMS can be used with recent server versions). - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). .. versionchanged:: 4.1 @@ -2384,10 +2467,18 @@ def find_one_and_delete(self, filter, return self.__find_and_modify(filter, projection, sort, let=let, hint=hint, session=session, **kwargs) - def find_one_and_replace(self, filter, replacement, - projection=None, sort=None, upsert=False, - return_document=ReturnDocument.BEFORE, - hint=None, session=None, let=None, **kwargs): + def find_one_and_replace(self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and replaces it, returning either the original or the replaced document. @@ -2438,8 +2529,8 @@ def find_one_and_replace(self, filter, replacement, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with @@ -2470,11 +2561,19 @@ def find_one_and_replace(self, filter, replacement, sort, upsert, return_document, let=let, hint=hint, session=session, **kwargs) - def find_one_and_update(self, filter, update, - projection=None, sort=None, upsert=False, - return_document=ReturnDocument.BEFORE, - array_filters=None, hint=None, session=None, - let=None, **kwargs): + def find_one_and_update(self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> _DocumentType: """Finds a single document and updates it, returning either the original or the updated document. @@ -2564,8 +2663,8 @@ def find_one_and_update(self, filter, update, - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with @@ -2600,15 +2699,15 @@ def find_one_and_update(self, filter, update, array_filters, hint=hint, let=let, session=session, **kwargs) - def __iter__(self): + def __iter__(self) -> "Collection[_DocumentType]": return self - def __next__(self): + def __next__(self) -> None: raise TypeError("'Collection' object is not iterable") next = __next__ - def __call__(self, *args, **kwargs): + def __call__(self, *args: Any, **kwargs: Any) -> None: """This is only here so that some API misusages are easier to debug. """ if "." not in self.__name: diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 21822ac61b..b7dbf7a8e7 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,28 +15,38 @@ """CommandCursor class to iterate over command results.""" from collections import deque +from typing import (TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, + Tuple) from bson import _convert_raw_document_lists_to_streams -from pymongo.cursor import _SocketManager, _CURSOR_CLOSED_ERRORS -from pymongo.errors import (ConnectionFailure, - InvalidOperation, +from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager +from pymongo.errors import (ConnectionFailure, InvalidOperation, OperationFailure) -from pymongo.message import (_CursorAddress, - _GetMore, - _RawBatchGetMore) +from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore from pymongo.response import PinnedResponse +from pymongo.typings import _DocumentType +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection -class CommandCursor(object): + +class CommandCursor(Generic[_DocumentType]): """A cursor / iterator over command cursors.""" _getmore_class = _GetMore - def __init__(self, collection, cursor_info, address, - batch_size=0, max_await_time_ms=None, session=None, - explicit_session=False): + def __init__(self, + collection: "Collection[_DocumentType]", + cursor_info: Mapping[str, Any], + address: Optional[Tuple[str, Optional[int]]], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional["ClientSession"] = None, + explicit_session: bool = False, + ) -> None: """Create a new command cursor.""" - self.__sock_mgr = None - self.__collection = collection + self.__sock_mgr: Any = None + self.__collection: Collection[_DocumentType] = collection self.__id = cursor_info['id'] self.__data = deque(cursor_info['firstBatch']) self.__postbatchresumetoken = cursor_info.get('postBatchResumeToken') @@ -60,7 +70,7 @@ def __init__(self, collection, cursor_info, address, and max_await_time_ms is not None): raise TypeError("max_await_time_ms must be an integer or None") - def __del__(self): + def __del__(self) -> None: self.__die() def __die(self, synchronous=False): @@ -92,12 +102,12 @@ def __end_session(self, synchronous): self.__session._end_session(lock=synchronous) self.__session = None - def close(self): + def close(self) -> None: """Explicitly close / kill this cursor. """ self.__die(True) - def batch_size(self, batch_size): + def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": """Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. @@ -222,7 +232,7 @@ def _refresh(self): return len(self.__data) @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? Even if :attr:`alive` is ``True``, :meth:`next` can raise @@ -239,12 +249,12 @@ def alive(self): return bool(len(self.__data) or (not self.__killed)) @property - def cursor_id(self): + def cursor_id(self) -> int: """Returns the id of the cursor.""" return self.__id @property - def address(self): + def address(self) -> Optional[Tuple[str, Optional[int]]]: """The (host, port) of the server used, or None. .. versionadded:: 3.0 @@ -252,18 +262,19 @@ def address(self): return self.__address @property - def session(self): + def session(self) -> Optional["ClientSession"]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 """ if self.__explicit_session: return self.__session + return None - def __iter__(self): + def __iter__(self) -> Iterator[_DocumentType]: return self - def next(self): + def next(self) -> _DocumentType: """Advance the cursor.""" # Block until a document is returnable. while self.alive: @@ -284,19 +295,25 @@ def _try_next(self, get_more_allowed): else: return None - def __enter__(self): + def __enter__(self) -> "CommandCursor[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() -class RawBatchCommandCursor(CommandCursor): +class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): _getmore_class = _RawBatchGetMore - def __init__(self, collection, cursor_info, address, - batch_size=0, max_await_time_ms=None, session=None, - explicit_session=False): + def __init__(self, + collection: "Collection[_DocumentType]", + cursor_info: Mapping[str, Any], + address: Optional[Tuple[str, Optional[int]]], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional["ClientSession"] = None, + explicit_session: bool = False, + ) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - diff --git a/pymongo/common.py b/pymongo/common.py index 14789c8109..fa2fe9bf11 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -17,8 +17,9 @@ import datetime import warnings - -from collections import abc, OrderedDict +from collections import OrderedDict, abc +from typing import (Any, Callable, Dict, List, Mapping, MutableMapping, + Optional, Sequence, Tuple, Type, Union, cast) from urllib.parse import unquote_plus from bson import SON @@ -29,18 +30,18 @@ from pymongo.compression_support import (validate_compressors, validate_zlib_compression_level) from pymongo.driver_info import DriverInfo -from pymongo.server_api import ServerApi from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode +from pymongo.server_api import ServerApi from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern -ORDERED_TYPES = (SON, OrderedDict) +ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024 ** 2) -MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE +MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 MAX_WRITE_BATCH_SIZE = 1000 @@ -85,13 +86,13 @@ MAX_CONNECTING = 2 # Default value for maxIdleTimeMS. -MAX_IDLE_TIME_MS = None +MAX_IDLE_TIME_MS: Optional[int] = None # Default value for maxIdleTimeMS in seconds. -MAX_IDLE_TIME_SEC = None +MAX_IDLE_TIME_SEC: Optional[int] = None # Default value for waitQueueTimeoutMS in seconds. -WAIT_QUEUE_TIMEOUT = None +WAIT_QUEUE_TIMEOUT: Optional[int] = None # Default value for localThresholdMS. LOCAL_THRESHOLD_MS = 15 @@ -103,10 +104,10 @@ RETRY_READS = True # The error code returned when a command doesn't exist. -COMMAND_NOT_FOUND_CODES = (59,) +COMMAND_NOT_FOUND_CODES: Sequence[int] = (59,) # Error codes to ignore if GridFS calls createIndex on a secondary -UNAUTHORIZED_CODES = (13, 16547, 16548) +UNAUTHORIZED_CODES: Sequence[int] = (13, 16547, 16548) # Maximum number of sessions to send in a single endSessions command. # From the driver sessions spec. @@ -116,7 +117,7 @@ SRV_SERVICE_NAME = "mongodb" -def partition_node(node): +def partition_node(node: str) -> Tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 @@ -128,7 +129,7 @@ def partition_node(node): return host, port -def clean_node(node): +def clean_node(node: str) -> Tuple[str, int]: """Split and normalize a node name from a hello response.""" host, port = partition_node(node) @@ -139,7 +140,7 @@ def clean_node(node): return host.lower(), port -def raise_config_error(key, dummy): +def raise_config_error(key: str, dummy: Any) -> None: """Raise ConfigurationError with the given key name.""" raise ConfigurationError("Unknown option %s" % (key,)) @@ -154,14 +155,14 @@ def raise_config_error(key, dummy): } -def validate_boolean(option, value): +def validate_boolean(option: str, value: Any) -> bool: """Validates that 'value' is True or False.""" if isinstance(value, bool): return value raise TypeError("%s must be True or False" % (option,)) -def validate_boolean_or_string(option, value): +def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ('true', 'false'): @@ -171,7 +172,7 @@ def validate_boolean_or_string(option, value): return validate_boolean(option, value) -def validate_integer(option, value): +def validate_integer(option: str, value: Any) -> int: """Validates that 'value' is an integer (or basestring representation). """ if isinstance(value, int): @@ -185,7 +186,7 @@ def validate_integer(option, value): raise TypeError("Wrong type for %s, value must be an integer" % (option,)) -def validate_positive_integer(option, value): +def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0. """ val = validate_integer(option, value) @@ -195,7 +196,7 @@ def validate_positive_integer(option, value): return val -def validate_non_negative_integer(option, value): +def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0. """ val = validate_integer(option, value) @@ -205,7 +206,7 @@ def validate_non_negative_integer(option, value): return val -def validate_readable(option, value): +def validate_readable(option: str, value: Any) -> Optional[str]: """Validates that 'value' is file-like and readable. """ if value is None: @@ -217,7 +218,7 @@ def validate_readable(option, value): return value -def validate_positive_integer_or_none(option, value): +def validate_positive_integer_or_none(option: str, value: Any) -> Optional[int]: """Validate that 'value' is a positive integer or None. """ if value is None: @@ -225,7 +226,7 @@ def validate_positive_integer_or_none(option, value): return validate_positive_integer(option, value) -def validate_non_negative_integer_or_none(option, value): +def validate_non_negative_integer_or_none(option: str, value: Any) -> Optional[int]: """Validate that 'value' is a positive integer or 0 or None. """ if value is None: @@ -233,7 +234,7 @@ def validate_non_negative_integer_or_none(option, value): return validate_non_negative_integer(option, value) -def validate_string(option, value): +def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`. """ if isinstance(value, str): @@ -242,7 +243,7 @@ def validate_string(option, value): "str" % (option,)) -def validate_string_or_none(option, value): +def validate_string_or_none(option: str, value: Any) -> Optional[str]: """Validates that 'value' is an instance of `basestring` or `None`. """ if value is None: @@ -250,7 +251,7 @@ def validate_string_or_none(option, value): return validate_string(option, value) -def validate_int_or_basestring(option, value): +def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: """Validates that 'value' is an integer or string. """ if isinstance(value, int): @@ -264,7 +265,7 @@ def validate_int_or_basestring(option, value): "integer or a string" % (option,)) -def validate_non_negative_int_or_basestring(option, value): +def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: """Validates that 'value' is an integer or string. """ if isinstance(value, int): @@ -279,7 +280,7 @@ def validate_non_negative_int_or_basestring(option, value): "non negative integer or a string" % (option,)) -def validate_positive_float(option, value): +def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is positive. """ @@ -299,7 +300,7 @@ def validate_positive_float(option, value): return value -def validate_positive_float_or_zero(option, value): +def validate_positive_float_or_zero(option: str, value: Any) -> float: """Validates that 'value' is 0 or a positive float, or can be converted to 0 or a positive float. """ @@ -308,7 +309,7 @@ def validate_positive_float_or_zero(option, value): return validate_positive_float(option, value) -def validate_timeout_or_none(option, value): +def validate_timeout_or_none(option: str, value: Any) -> Optional[float]: """Validates a timeout specified in milliseconds returning a value in floating point seconds. """ @@ -317,7 +318,7 @@ def validate_timeout_or_none(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_timeout_or_zero(option, value): +def validate_timeout_or_zero(option: str, value: Any) -> float: """Validates a timeout specified in milliseconds returning a value in floating point seconds for the case where None is an error and 0 is valid. Setting the timeout to nothing in the URI string is a @@ -330,7 +331,7 @@ def validate_timeout_or_zero(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_timeout_or_none_or_zero(option, value): +def validate_timeout_or_none_or_zero(option: Any, value: Any) -> Optional[float]: """Validates a timeout specified in milliseconds returning a value in floating point seconds. value=0 and value="0" are treated the same as value=None which means unlimited timeout. @@ -340,7 +341,7 @@ def validate_timeout_or_none_or_zero(option, value): return validate_positive_float(option, value) / 1000.0 -def validate_max_staleness(option, value): +def validate_max_staleness(option: str, value: Any) -> int: """Validates maxStalenessSeconds according to the Max Staleness Spec.""" if value == -1 or value == "-1": # Default: No maximum staleness. @@ -348,7 +349,7 @@ def validate_max_staleness(option, value): return validate_positive_integer(option, value) -def validate_read_preference(dummy, value): +def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: """Validate a read preference. """ if not isinstance(value, _ServerMode): @@ -356,7 +357,7 @@ def validate_read_preference(dummy, value): return value -def validate_read_preference_mode(dummy, value): +def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: """Validate read preference mode for a MongoClient. .. versionchanged:: 3.5 @@ -368,7 +369,7 @@ def validate_read_preference_mode(dummy, value): return value -def validate_auth_mechanism(option, value): +def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option. """ if value not in MECHANISMS: @@ -376,7 +377,7 @@ def validate_auth_mechanism(option, value): return value -def validate_uuid_representation(dummy, value): +def validate_uuid_representation(dummy: Any, value: Any) -> int: """Validate the uuid representation option selected in the URI. """ try: @@ -387,13 +388,13 @@ def validate_uuid_representation(dummy, value): "%s" % (value, tuple(_UUID_REPRESENTATIONS))) -def validate_read_preference_tags(name, value): +def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]]: """Parse readPreferenceTags if passed as a client kwarg. """ if not isinstance(value, list): value = [value] - tag_sets = [] + tag_sets: List = [] for tag_set in value: if tag_set == '': tag_sets.append({}) @@ -416,10 +417,10 @@ def validate_read_preference_tags(name, value): 'AWS_SESSION_TOKEN']) -def validate_auth_mechanism_properties(option, value): +def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" value = validate_string(option, value) - props = {} + props: Dict[str, Any] = {} for opt in value.split(','): try: key, val = opt.split(':') @@ -443,7 +444,7 @@ def validate_auth_mechanism_properties(option, value): return props -def validate_document_class(option, value): +def validate_document_class(option: str, value: Any) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError("%s must be dict, bson.son.SON, " @@ -452,7 +453,7 @@ def validate_document_class(option, value): return value -def validate_type_registry(option, value): +def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): raise TypeError("%s must be an instance of %s" % ( @@ -460,21 +461,21 @@ def validate_type_registry(option, value): return value -def validate_list(option, value): +def validate_list(option: str, value: Any) -> List: """Validates that 'value' is a list.""" if not isinstance(value, list): raise TypeError("%s must be a list" % (option,)) return value -def validate_list_or_none(option, value): +def validate_list_or_none(option: Any, value: Any) -> Optional[List]: """Validates that 'value' is a list or None.""" if value is None: return value return validate_list(option, value) -def validate_list_or_mapping(option, value): +def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): raise TypeError("%s must either be a list or an instance of dict, " @@ -482,7 +483,7 @@ def validate_list_or_mapping(option, value): "collections.Mapping" % (option,)) -def validate_is_mapping(option, value): +def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): raise TypeError("%s must be an instance of dict, bson.son.SON, or " @@ -490,7 +491,7 @@ def validate_is_mapping(option, value): "collections.Mapping" % (option,)) -def validate_is_document_type(option, value): +def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError("%s must be an instance of dict, bson.son.SON, " @@ -499,7 +500,7 @@ def validate_is_document_type(option, value): "collections.MutableMapping" % (option,)) -def validate_appname_or_none(option, value): +def validate_appname_or_none(option: str, value: Any) -> Optional[str]: """Validate the appname option.""" if value is None: return value @@ -510,7 +511,7 @@ def validate_appname_or_none(option, value): return value -def validate_driver_or_none(option, value): +def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: """Validate the driver keyword arg.""" if value is None: return value @@ -519,7 +520,7 @@ def validate_driver_or_none(option, value): return value -def validate_server_api_or_none(option, value): +def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: """Validate the server_api keyword arg.""" if value is None: return value @@ -528,7 +529,7 @@ def validate_server_api_or_none(option, value): return value -def validate_is_callable_or_none(option, value): +def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: """Validates that 'value' is a callable.""" if value is None: return value @@ -537,7 +538,7 @@ def validate_is_callable_or_none(option, value): return value -def validate_ok_for_replace(replacement): +def validate_ok_for_replace(replacement: Mapping[str, Any]) -> None: """Validate a replacement document.""" validate_is_mapping("replacement", replacement) # Replacement can be {} @@ -547,7 +548,7 @@ def validate_ok_for_replace(replacement): raise ValueError('replacement can not include $ operators') -def validate_ok_for_update(update): +def validate_ok_for_update(update: Any) -> None: """Validate an update document.""" validate_list_or_mapping("update", update) # Update cannot be {}. @@ -563,7 +564,7 @@ def validate_ok_for_update(update): _UNICODE_DECODE_ERROR_HANDLERS = frozenset(['strict', 'replace', 'ignore']) -def validate_unicode_decode_error_handler(dummy, value): +def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: """Validate the Unicode decode error handler option of CodecOptions. """ if value not in _UNICODE_DECODE_ERROR_HANDLERS: @@ -573,7 +574,7 @@ def validate_unicode_decode_error_handler(dummy, value): return value -def validate_tzinfo(dummy, value): +def validate_tzinfo(dummy: Any, value: Any) -> Optional[datetime.tzinfo]: """Validate the tzinfo option """ if value is not None and not isinstance(value, datetime.tzinfo): @@ -581,7 +582,7 @@ def validate_tzinfo(dummy, value): return value -def validate_auto_encryption_opts_or_none(option, value): +def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[Any]: """Validate the driver keyword arg.""" if value is None: return value @@ -595,7 +596,7 @@ def validate_auto_encryption_opts_or_none(option, value): # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. -URI_OPTIONS_ALIAS_MAP = { +URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { 'tls': ['ssl'], } @@ -603,7 +604,7 @@ def validate_auto_encryption_opts_or_none(option, value): # are functions that validate user-input values for that option. If an option # alias uses a different validator than its public counterpart, it should be # included here as a key, value pair. -URI_OPTIONS_VALIDATOR_MAP = { +URI_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { 'appname': validate_appname_or_none, 'authmechanism': validate_auth_mechanism, 'authmechanismproperties': validate_auth_mechanism_properties, @@ -644,7 +645,7 @@ def validate_auto_encryption_opts_or_none(option, value): # Dictionary where keys are the names of URI options specific to pymongo, # and values are functions that validate user-input values for those options. -NONSPEC_OPTIONS_VALIDATOR_MAP = { +NONSPEC_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { 'connect': validate_boolean_or_string, 'driver': validate_driver_or_none, 'server_api': validate_server_api_or_none, @@ -661,7 +662,7 @@ def validate_auto_encryption_opts_or_none(option, value): # Dictionary where keys are the names of keyword-only options for the # MongoClient constructor, and values are functions that validate user-input # values for those options. -KW_VALIDATORS = { +KW_VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = { 'document_class': validate_document_class, 'type_registry': validate_type_registry, 'read_preference': validate_read_preference, @@ -677,14 +678,14 @@ def validate_auto_encryption_opts_or_none(option, value): # internally-used names of that URI option. Options with only one name # variant need not be included here. Options whose public and internal # names are the same need not be included here. -INTERNAL_URI_OPTION_NAME_MAP = { +INTERNAL_URI_OPTION_NAME_MAP: Dict[str, str] = { 'ssl': 'tls', } # Map from deprecated URI option names to a tuple indicating the method of # their deprecation and any additional information that may be needed to # construct the warning message. -URI_OPTIONS_DEPRECATION_MAP = { +URI_OPTIONS_DEPRECATION_MAP: Dict[str, Tuple[str, str]] = { # format: : (, ), # Supported values: # - 'renamed': should be the new option name. Note that case is @@ -704,11 +705,11 @@ def validate_auto_encryption_opts_or_none(option, value): URI_OPTIONS_VALIDATOR_MAP[optname]) # Map containing all URI option and keyword argument validators. -VALIDATORS = URI_OPTIONS_VALIDATOR_MAP.copy() +VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() VALIDATORS.update(KW_VALIDATORS) # List of timeout-related options. -TIMEOUT_OPTIONS = [ +TIMEOUT_OPTIONS: List[str] = [ 'connecttimeoutms', 'heartbeatfrequencyms', 'maxidletimems', @@ -722,7 +723,7 @@ def validate_auto_encryption_opts_or_none(option, value): _AUTH_OPTIONS = frozenset(['authmechanismproperties']) -def validate_auth_option(option, value): +def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: """Validate optional authentication parameters. """ lower, value = validate(option, value) @@ -732,7 +733,7 @@ def validate_auth_option(option, value): return option, value -def validate(option, value): +def validate(option: str, value: Any) -> Tuple[str, Any]: """Generic validation function. """ lower = option.lower() @@ -741,7 +742,7 @@ def validate(option, value): return option, value -def get_validated_options(options, warn=True): +def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> MutableMapping[str, Any]: """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. @@ -751,6 +752,7 @@ def get_validated_options(options, warn=True): invalid options will be ignored. Otherwise, invalid options will cause errors. """ + validated_options: MutableMapping[str, Any] if isinstance(options, _CaseInsensitiveDictionary): validated_options = _CaseInsensitiveDictionary() get_normed_key = lambda x: x @@ -794,8 +796,8 @@ class BaseObject(object): SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. """ - def __init__(self, codec_options, read_preference, write_concern, - read_concern): + def __init__(self, codec_options: CodecOptions, read_preference: _ServerMode, write_concern: WriteConcern, + read_concern: ReadConcern) -> None: if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of " @@ -819,14 +821,14 @@ def __init__(self, codec_options, read_preference, write_concern, self.__read_concern = read_concern @property - def codec_options(self): + def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ return self.__codec_options @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """Read only access to the :class:`~pymongo.write_concern.WriteConcern` of this instance. @@ -844,7 +846,7 @@ def _write_concern_for(self, session): return self.write_concern @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """Read only access to the read preference of this instance. .. versionchanged:: 3.0 @@ -861,7 +863,7 @@ def _read_preference_for(self, session): return self.__read_preference @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """Read only access to the :class:`~pymongo.read_concern.ReadConcern` of this instance. diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index d367595288..c9cc041aff 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -13,6 +13,7 @@ # limitations under the License. import warnings +from typing import Callable try: import snappy @@ -99,7 +100,7 @@ def get_compression_context(self, compressors): return ZstdContext() -def _zlib_no_compress(data): +def _zlib_no_compress(data, level=None): """Compress data with zlib level 0.""" cobj = zlib.compressobj(0) return b"".join([cobj.compress(data), cobj.flush()]) @@ -117,6 +118,8 @@ class ZlibContext(object): compressor_id = 2 def __init__(self, level): + self.compress: Callable[[bytes], bytes] + # Jython zlib.compress doesn't support -1 if level == -1: self.compress = zlib.compress @@ -124,7 +127,7 @@ def __init__(self, level): elif level == 0: self.compress = _zlib_no_compress else: - self.compress = lambda data: zlib.compress(data, level) + self.compresss = lambda data, _: zlib.compress(data, level) class ZstdContext(object): diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 3e78c2d97c..152acaca65 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -13,29 +13,26 @@ # limitations under the License. """Cursor class to iterate over Mongo query results.""" - import copy import threading import warnings - from collections import deque +from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterable, List, Mapping, + MutableMapping, Optional, Sequence, Tuple, Union, cast, overload) from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON from pymongo import helpers -from pymongo.common import (validate_boolean, validate_is_mapping, - validate_is_document_type) from pymongo.collation import validate_collation_or_none -from pymongo.errors import (ConnectionFailure, - InvalidOperation, +from pymongo.common import (validate_boolean, validate_is_document_type, + validate_is_mapping) +from pymongo.errors import (ConnectionFailure, InvalidOperation, OperationFailure) -from pymongo.message import (_CursorAddress, - _GetMore, - _RawBatchGetMore, - _Query, - _RawBatchQuery) +from pymongo.message import (_CursorAddress, _GetMore, _Query, + _RawBatchGetMore, _RawBatchQuery) from pymongo.response import PinnedResponse +from pymongo.typings import _CollationIn, _DocumentType # These errors mean that the server has already killed the cursor so there is # no need to send killCursors. @@ -126,22 +123,47 @@ def close(self): self.sock.unpin() self.sock = None +_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_Hint = Union[str, _Sort] + + +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection -class Cursor(object): + +class Cursor(Generic[_DocumentType]): """A cursor / iterator over Mongo query results. """ _query_class = _Query _getmore_class = _GetMore - def __init__(self, collection, filter=None, projection=None, skip=0, - limit=0, no_cursor_timeout=False, - cursor_type=CursorType.NON_TAILABLE, - sort=None, allow_partial_results=False, oplog_replay=False, - batch_size=0, - collation=None, hint=None, max_scan=None, max_time_ms=None, - max=None, min=None, return_key=None, show_record_id=None, - snapshot=None, comment=None, session=None, - allow_disk_use=None, let=None): + def __init__(self, + collection: "Collection[_DocumentType]", + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Any = None, + session: Optional["ClientSession"] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None + ) -> None: """Create a new cursor. Should not be called directly by application developers - see @@ -151,11 +173,12 @@ def __init__(self, collection, filter=None, projection=None, skip=0, """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. - self.__collection = collection - self.__id = None + self.__collection: Collection[_DocumentType] = collection + self.__id: Any = None self.__exhaust = False - self.__sock_mgr = None + self.__sock_mgr: Any = None self.__killed = False + self.__session: Optional["ClientSession"] if session: self.__session = session @@ -164,10 +187,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__session = None self.__explicit_session = False - spec = filter - if spec is None: - spec = {} - + spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") @@ -203,6 +223,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__let = let self.__spec = spec + self.__has_filter = filter is not None self.__projection = projection self.__skip = skip self.__limit = limit @@ -212,9 +233,9 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms - self.__max_await_time_ms = None - self.__max = max - self.__min = min + self.__max_await_time_ms: Optional[int] = None + self.__max: Optional[Union[SON[Any, Any], _Sort]] = max + self.__min: Optional[Union[SON[Any, Any], _Sort]] = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id @@ -239,7 +260,7 @@ def __init__(self, collection, filter=None, projection=None, skip=0, # it anytime we change __limit. self.__empty = False - self.__data = deque() + self.__data: deque = deque() self.__address = None self.__retrieved = 0 @@ -261,22 +282,22 @@ def __init__(self, collection, filter=None, projection=None, skip=0, self.__collname = collection.name @property - def collection(self): + def collection(self) -> "Collection[_DocumentType]": """The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating. """ return self.__collection @property - def retrieved(self): + def retrieved(self) -> int: """The number of documents retrieved so far. """ return self.__retrieved - def __del__(self): + def __del__(self) -> None: self.__die() - def rewind(self): + def rewind(self) -> "Cursor[_DocumentType]": """Rewind this cursor to its unevaluated state. Reset this cursor if it has been partially or completely evaluated. @@ -294,7 +315,7 @@ def rewind(self): return self - def clone(self): + def clone(self) -> "Cursor[_DocumentType]": """Get a clone of this cursor. Returns a new Cursor instance with options matching those that have @@ -318,7 +339,7 @@ def _clone(self, deepcopy=True, base=None): "batch_size", "max_scan", "query_flags", "collation", "empty", "show_record_id", "return_key", "allow_disk_use", - "snapshot", "exhaust") + "snapshot", "exhaust", "has_filter") data = dict((k, v) for k, v in self.__dict__.items() if k.startswith('_Cursor__') and k[9:] in values_to_clone) if deepcopy: @@ -360,7 +381,7 @@ def __die(self, synchronous=False): self.__session = None self.__sock_mgr = None - def close(self): + def close(self) -> None: """Explicitly close / kill this cursor. """ self.__die(True) @@ -397,7 +418,7 @@ def __query_spec(self): if operators: # Make a shallow copy so we can cleanly rewind or clone. - spec = self.__spec.copy() + spec = copy.copy(self.__spec) # Allow-listed commands must be wrapped in $query. if "$query" not in spec: @@ -429,7 +450,7 @@ def __check_okay_to_chain(self): if self.__retrieved or self.__id is not None: raise InvalidOperation("cannot set options after executing query") - def add_option(self, mask): + def add_option(self, mask: int) -> "Cursor[_DocumentType]": """Set arbitrary query flags using a bitmask. To set the tailable flag: @@ -450,7 +471,7 @@ def add_option(self, mask): self.__query_flags |= mask return self - def remove_option(self, mask): + def remove_option(self, mask: int) -> "Cursor[_DocumentType]": """Unset arbitrary query flags using a bitmask. To unset the tailable flag: @@ -466,7 +487,7 @@ def remove_option(self, mask): self.__query_flags &= ~mask return self - def allow_disk_use(self, allow_disk_use): + def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]": """Specifies whether MongoDB can use temporary disk files while processing a blocking sort operation. @@ -488,7 +509,7 @@ def allow_disk_use(self, allow_disk_use): self.__allow_disk_use = allow_disk_use return self - def limit(self, limit): + def limit(self, limit: int) -> "Cursor[_DocumentType]": """Limits the number of results to be returned by this cursor. Raises :exc:`TypeError` if `limit` is not an integer. Raises @@ -511,7 +532,7 @@ def limit(self, limit): self.__limit = limit return self - def batch_size(self, batch_size): + def batch_size(self, batch_size: int) -> "Cursor[_DocumentType]": """Limits the number of documents returned in one batch. Each batch requires a round trip to the server. It can be adjusted to optimize performance and limit data transfer. @@ -539,7 +560,7 @@ def batch_size(self, batch_size): self.__batch_size = batch_size return self - def skip(self, skip): + def skip(self, skip: int) -> "Cursor[_DocumentType]": """Skips the first `skip` results of this cursor. Raises :exc:`TypeError` if `skip` is not an integer. Raises @@ -560,7 +581,7 @@ def skip(self, skip): self.__skip = skip return self - def max_time_ms(self, max_time_ms): + def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]": """Specifies a time limit for a query operation. If the specified time is exceeded, the operation will be aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` @@ -581,7 +602,7 @@ def max_time_ms(self, max_time_ms): self.__max_time_ms = max_time_ms return self - def max_await_time_ms(self, max_await_time_ms): + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_DocumentType]": """Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. @@ -609,6 +630,14 @@ def max_await_time_ms(self, max_await_time_ms): return self + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> "Cursor[_DocumentType]": + ... + def __getitem__(self, index): """Get a single document or a slice of documents from this cursor. @@ -691,7 +720,7 @@ def __getitem__(self, index): raise TypeError("index %r cannot be applied to Cursor " "instances" % index) - def max_scan(self, max_scan): + def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": """**DEPRECATED** - Limit the number of documents to scan when performing the query. @@ -711,7 +740,7 @@ def max_scan(self, max_scan): self.__max_scan = max_scan return self - def max(self, spec): + def max(self, spec: _Sort) -> "Cursor[_DocumentType]": """Adds ``max`` operator that specifies upper bound for specific index. When using ``max``, :meth:`~hint` should also be configured to ensure @@ -734,7 +763,7 @@ def max(self, spec): self.__max = SON(spec) return self - def min(self, spec): + def min(self, spec: _Sort) -> "Cursor[_DocumentType]": """Adds ``min`` operator that specifies lower bound for specific index. When using ``min``, :meth:`~hint` should also be configured to ensure @@ -757,7 +786,7 @@ def min(self, spec): self.__min = SON(spec) return self - def sort(self, key_or_list, direction=None): + def sort(self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None) -> "Cursor[_DocumentType]": """Sorts this cursor's results. Pass a field name and a direction, either @@ -803,7 +832,7 @@ def sort(self, key_or_list, direction=None): self.__ordering = helpers._index_document(keys) return self - def distinct(self, key): + def distinct(self, key: str) -> List: """Get a list of distinct values for `key` among all documents in the result set of this query. @@ -820,7 +849,7 @@ def distinct(self, key): .. seealso:: :meth:`pymongo.collection.Collection.distinct` """ - options = {} + options: Dict[str, Any] = {} if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: @@ -833,7 +862,7 @@ def distinct(self, key): return self.__collection.distinct( key, session=self.__session, **options) - def explain(self): + def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. .. note:: This method uses the default verbosity mode of the @@ -863,7 +892,7 @@ def __set_hint(self, index): else: self.__hint = helpers._index_document(index) - def hint(self, index): + def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]": """Adds a 'hint', telling Mongo the proper index to use for the query. Judicious use of hints can greatly improve query @@ -888,7 +917,7 @@ def hint(self, index): self.__set_hint(index) return self - def comment(self, comment): + def comment(self, comment: Any) -> "Cursor[_DocumentType]": """Adds a 'comment' to the cursor. http://docs.mongodb.org/manual/reference/operator/comment/ @@ -903,7 +932,7 @@ def comment(self, comment): self.__comment = comment return self - def where(self, code): + def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]": """Adds a `$where`_ clause to this query. The `code` argument must be an instance of :class:`basestring` @@ -937,10 +966,18 @@ def where(self, code): if not isinstance(code, Code): code = Code(code) - self.__spec["$where"] = code + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: Dict[str, Any] + if self.__has_filter: + spec = dict(self.__spec) + else: + spec = cast(Dict, self.__spec) + spec["$where"] = code + self.__spec = spec return self - def collation(self, collation): + def collation(self, collation: Optional[_CollationIn]) -> "Cursor[_DocumentType]": """Adds a :class:`~pymongo.collation.Collation` to this query. Raises :exc:`TypeError` if `collation` is not an instance of @@ -1106,7 +1143,7 @@ def _refresh(self): return len(self.__data) @property - def alive(self): + def alive(self) -> bool: """Does this cursor have the potential to return more data? This is mostly useful with `tailable cursors @@ -1128,7 +1165,7 @@ def alive(self): return bool(len(self.__data) or (not self.__killed)) @property - def cursor_id(self): + def cursor_id(self) -> Optional[int]: """Returns the id of the cursor .. versionadded:: 2.2 @@ -1136,7 +1173,7 @@ def cursor_id(self): return self.__id @property - def address(self): + def address(self) -> Optional[Tuple[str, Any]]: """The (host, port) of the server used, or None. .. versionchanged:: 3.0 @@ -1145,18 +1182,19 @@ def address(self): return self.__address @property - def session(self): + def session(self) -> Optional["ClientSession"]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 """ if self.__explicit_session: return self.__session + return None - def __iter__(self): + def __iter__(self) -> "Cursor[_DocumentType]": return self - def next(self): + def next(self) -> _DocumentType: """Advance the cursor.""" if self.__empty: raise StopIteration @@ -1167,20 +1205,20 @@ def next(self): __next__ = next - def __enter__(self): + def __enter__(self) -> "Cursor[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __copy__(self): + def __copy__(self) -> "Cursor[_DocumentType]": """Support function for `copy.copy()`. .. versionadded:: 2.4 """ return self._clone(deepcopy=False) - def __deepcopy__(self, memo): + def __deepcopy__(self, memo: Any) -> Any: """Support function for `copy.deepcopy()`. .. versionadded:: 2.4 @@ -1193,6 +1231,7 @@ def _deepcopy(self, x, memo=None): Regular expressions cannot be deep copied but as they are immutable we don't have to copy them when cloning. """ + y: Any if not hasattr(x, 'items'): y, is_list, iterator = [], True, enumerate(x) else: @@ -1220,13 +1259,13 @@ def _deepcopy(self, x, memo=None): return y -class RawBatchCursor(Cursor): +class RawBatchCursor(Cursor, Generic[_DocumentType]): """A cursor / iterator over raw batches of BSON data from a query result.""" _query_class = _RawBatchQuery _getmore_class = _RawBatchGetMore - def __init__(self, *args, **kwargs): + def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - @@ -1235,7 +1274,7 @@ def __init__(self, *args, **kwargs): .. seealso:: The MongoDB documentation on `cursors `_. """ - super(RawBatchCursor, self).__init__(*args, **kwargs) + super(RawBatchCursor, self).__init__(collection, *args, **kwargs) def _unpack_response(self, response, cursor_id, codec_options, user_fields=None, legacy_response=False): @@ -1247,7 +1286,7 @@ def _unpack_response(self, response, cursor_id, codec_options, _convert_raw_document_lists_to_streams(raw_response[0]) return raw_response - def explain(self): + def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. .. seealso:: The MongoDB documentation on `explain `_. @@ -1255,5 +1294,5 @@ def explain(self): clone = self._clone(deepcopy=True, base=Cursor(self.collection)) return clone.explain() - def __getitem__(self, index): + def __getitem__(self, index: Any) -> "Cursor[_DocumentType]": raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/database.py b/pymongo/database.py index a6c1275126..4f5f931352 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,18 +13,21 @@ # limitations under the License. """Database level operations.""" +from typing import (TYPE_CHECKING, Any, Dict, Generic, List, Mapping, MutableMapping, Optional, + Sequence, Union) -from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.dbref import DBRef from bson.son import SON +from bson.timestamp import Timestamp from pymongo import common from pymongo.aggregation import _DatabaseAggregationCommand from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.errors import (CollectionInvalid, - InvalidName) -from pymongo.read_preferences import ReadPreference +from pymongo.errors import CollectionInvalid, InvalidName +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline def _check_name(name): @@ -39,12 +42,24 @@ def _check_name(name): "character %r" % invalid_char) -class Database(common.BaseObject): +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.mongo_client import MongoClient + from pymongo.read_concern import ReadConcern + from pymongo.write_concern import WriteConcern + + +class Database(common.BaseObject, Generic[_DocumentType]): """A Mongo database. """ - - def __init__(self, client, name, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def __init__(self, + client: "MongoClient[_DocumentType]", + name: str, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> None: """Get a database by client and name. Raises :class:`TypeError` if `name` is not an instance of @@ -104,20 +119,24 @@ def __init__(self, client, name, codec_options=None, read_preference=None, _check_name(name) self.__name = name - self.__client = client + self.__client: MongoClient[_DocumentType] = client @property - def client(self): + def client(self) -> "MongoClient[_DocumentType]": """The client instance for this :class:`Database`.""" return self.__client @property - def name(self): + def name(self) -> str: """The name of this :class:`Database`.""" return self.__name - def with_options(self, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def with_options(self, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. >>> db1.read_preference @@ -156,22 +175,22 @@ def with_options(self, codec_options=None, read_preference=None, write_concern or self.write_concern, read_concern or self.read_concern) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, Database): return (self.__client == other.client and self.__name == other.name) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: return hash((self.__client, self.__name)) def __repr__(self): return "Database(%r, %r)" % (self.__client, self.__name) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. @@ -185,7 +204,7 @@ def __getattr__(self, name): " collection, use database[%r]." % (name, name, name)) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> "Collection[_DocumentType]": """Get a collection of this database by name. Raises InvalidName if an invalid collection name is used. @@ -195,8 +214,13 @@ def __getitem__(self, name): """ return Collection(self, name) - def get_collection(self, name, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def get_collection(self, + name: str, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -238,9 +262,15 @@ def get_collection(self, name, codec_options=None, read_preference=None, self, name, False, codec_options, read_preference, write_concern, read_concern) - def create_collection(self, name, codec_options=None, - read_preference=None, write_concern=None, - read_concern=None, session=None, **kwargs): + def create_collection(self, + name: str, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional["WriteConcern"] = None, + read_concern: Optional["ReadConcern"] = None, + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this database. @@ -286,20 +316,20 @@ def create_collection(self, name, codec_options=None, timeseries collections - ``expireAfterSeconds`` (int): the number of seconds after which a document in a timeseries collection expires - - ``validator`` (dict): a document specifying validation rules or expressions + - ``validator`` (dict): a document specifying validation rules or expressions for the collection - - ``validationLevel`` (str): how strictly to apply the + - ``validationLevel`` (str): how strictly to apply the validation rules to existing documents during an update. The default level is "strict" - ``validationAction`` (str): whether to "error" on invalid documents - (the default) or just "warn" about the violations but allow invalid + (the default) or just "warn" about the violations but allow invalid documents to be inserted - ``indexOptionDefaults`` (dict): a document specifying a default configuration for indexes when creating a collection - - ``viewOn`` (str): the name of the source collection or view from which + - ``viewOn`` (str): the name of the source collection or view from which to create the view - ``pipeline`` (list): a list of aggregation pipeline stages - - ``comment`` (str): a user-provided comment to attach to this command. + - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. .. versionchanged:: 3.11 @@ -330,7 +360,11 @@ def create_collection(self, name, codec_options=None, read_preference, write_concern, read_concern, session=s, **kwargs) - def aggregate(self, pipeline, session=None, **kwargs): + def aggregate(self, + pipeline: _Pipeline, + session: Optional["ClientSession"] = None, + **kwargs: Any + ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. See the `aggregation pipeline`_ documentation for a list of stages @@ -400,9 +434,17 @@ def aggregate(self, pipeline, session=None, **kwargs): cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch(self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional["ClientSession"] = None, + start_after: Optional[Mapping[str, Any]] = None, + ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. Performs an aggregation with an implicit initial ``$changeStream`` @@ -515,9 +557,16 @@ def _command(self, sock_info, command, value=1, check=True, session=s, client=self.__client) - def command(self, command, value=1, check=True, - allowable_errors=None, read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): + def command(self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions] = DEFAULT_CODEC_OPTIONS, + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> Dict[str, Any]: """Issue a MongoDB command. Send command `command` to the database and return the @@ -648,7 +697,11 @@ def _list_collections(self, sock_info, session, read_preference, **kwargs): cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - def list_collections(self, session=None, filter=None, **kwargs): + def list_collections(self, + session: Optional["ClientSession"] = None, + filter: Optional[Mapping[str, Any]] = None, + **kwargs: Any + ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the collections of this database. :Parameters: @@ -680,7 +733,11 @@ def _cmd(session, server, sock_info, read_preference): return self.__client._retryable_read( _cmd, read_pref, session) - def list_collection_names(self, session=None, filter=None, **kwargs): + def list_collection_names(self, + session: Optional["ClientSession"] = None, + filter: Optional[Mapping[str, Any]] = None, + **kwargs: Any + ) -> List[str]: """Get a list of all the collection names in this database. For example, to list all non-system collections:: @@ -717,7 +774,10 @@ def list_collection_names(self, session=None, filter=None, **kwargs): return [result["name"] for result in self.list_collections(session=session, **kwargs)] - def drop_collection(self, name_or_collection, session=None): + def drop_collection(self, + name_or_collection: Union[str, Collection], + session: Optional["ClientSession"] = None + ) -> Dict[str, Any]: """Drop a collection. :Parameters: @@ -752,9 +812,13 @@ def drop_collection(self, name_or_collection, session=None): parse_write_concern_error=True, session=session) - def validate_collection(self, name_or_collection, - scandata=False, full=False, session=None, - background=None): + def validate_collection(self, + name_or_collection: Union[str, Collection], + scandata: bool = False, + full: bool = False, + session: Optional["ClientSession"] = None, + background: Optional[bool] = None, + ) -> Dict[str, Any]: """Validate a collection. Returns a dict of validation info. Raises CollectionInvalid if @@ -827,20 +891,23 @@ def validate_collection(self, name_or_collection, return result - def __iter__(self): + def __iter__(self) -> "Database[_DocumentType]": return self - def __next__(self): + def __next__(self) -> "Database[_DocumentType]": raise TypeError("'Database' object is not iterable") next = __next__ - def __bool__(self): + def __bool__(self) -> bool: raise NotImplementedError("Database objects do not implement truth " "value testing or bool(). Please compare " "with None instead: database is not None") - def dereference(self, dbref, session=None, **kwargs): + def dereference(self, dbref: DBRef, + session: Optional["ClientSession"] = None, + **kwargs: Any + ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 5e0843e4df..1bb599af37 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -15,6 +15,7 @@ """Advanced options for MongoDB drivers implemented on top of PyMongo.""" from collections import namedtuple +from typing import Optional class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): @@ -26,7 +27,7 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be None to accept PyMongo's default. """ - def __new__(cls, name, version=None, platform=None): + def __new__(cls, name: str, version: Optional[str] = None, platform: Optional[str] = None) -> "DriverInfo": self = super(DriverInfo, cls).__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4b08492ee9..b076f490f4 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -15,15 +15,16 @@ """Support for explicit client-side field level encryption.""" import contextlib -import os -import subprocess import uuid import weakref +from typing import Any, Mapping, Optional, Sequence try: from pymongocrypt.auto_encrypter import AutoEncrypter from pymongocrypt.errors import MongoCryptError - from pymongocrypt.explicit_encrypter import ExplicitEncrypter + from pymongocrypt.explicit_encrypter import ( + ExplicitEncrypter + ) from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback _HAVE_PYMONGOCRYPT = True @@ -32,29 +33,22 @@ MongoCryptCallback = object from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary from bson.codec_options import CodecOptions -from bson.binary import (Binary, - STANDARD, - UUID_SUBTYPE) from bson.errors import BSONError -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, - RawBSONDocument, +from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson) from bson.son import SON - -from pymongo.errors import (ConfigurationError, - EncryptionError, - InvalidOperation, - ServerSelectionTimeoutError) +from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts +from pymongo.errors import (ConfigurationError, EncryptionError, + InvalidOperation, ServerSelectionTimeoutError) from pymongo.mongo_client import MongoClient -from pymongo.pool import _configured_socket, PoolOptions +from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern from pymongo.ssl_support import get_ssl_context from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern -from pymongo.daemon import _spawn_daemon - _HTTPS_PORT = 443 _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. @@ -80,9 +74,10 @@ def _wrap_encryption_errors(): raise EncryptionError(exc) -class _EncryptionIO(MongoCryptCallback): +class _EncryptionIO(MongoCryptCallback): # type: ignore def __init__(self, client, key_vault_coll, mongocryptd_client, opts): """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any # Use a weak ref to break reference cycle. if client is not None: self.client_ref = weakref.ref(client) @@ -355,11 +350,17 @@ class Algorithm(object): "AEAD_AES_256_CBC_HMAC_SHA_512-Random") + class ClientEncryption(object): """Explicit client-side field level encryption.""" - def __init__(self, kms_providers, key_vault_namespace, key_vault_client, - codec_options, kms_tls_options=None): + def __init__(self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None + ) -> None: """Explicit client-side field level encryption. The ClientEncryption class encapsulates explicit operations on a key @@ -449,12 +450,15 @@ def __init__(self, kms_providers, key_vault_namespace, key_vault_client, opts = AutoEncryptionOpts(kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options) - self._io_callbacks = _EncryptionIO(None, key_vault_coll, None, opts) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO(None, key_vault_coll, None, opts) self._encryption = ExplicitEncrypter( self._io_callbacks, MongoCryptOptions(kms_providers, None)) - def create_data_key(self, kms_provider, master_key=None, - key_alt_names=None): + def create_data_key(self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None + ) -> Binary: """Create and insert a new data key into the key vault collection. :Parameters: @@ -526,7 +530,12 @@ def create_data_key(self, kms_provider, master_key=None, kms_provider, master_key=master_key, key_alt_names=key_alt_names) - def encrypt(self, value, algorithm, key_id=None, key_alt_name=None): + def encrypt(self, + value: Any, + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None + ) -> Binary: """Encrypt a BSON value with a given key and algorithm. Note that exactly one of ``key_id`` or ``key_alt_name`` must be @@ -557,7 +566,7 @@ def encrypt(self, value, algorithm, key_id=None, key_alt_name=None): doc, algorithm, key_id=key_id, key_alt_name=key_alt_name) return decode(encrypted_doc)['v'] - def decrypt(self, value): + def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. :Parameters: @@ -578,17 +587,17 @@ def decrypt(self, value): return decode(decrypted_doc, codec_options=self._codec_options)['v'] - def __enter__(self): + def __enter__(self) -> "ClientEncryption": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() def _check_closed(self): if self._encryption is None: raise InvalidOperation("Cannot use closed ClientEncryption") - def close(self): + def close(self) -> None: """Release resources. Note that using this class in a with-statement will automatically call diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index d0c2d5ce72..21a13f6a5e 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -15,6 +15,7 @@ """Support for automatic client-side field level encryption.""" import copy +from typing import TYPE_CHECKING, Any, List, Mapping, Optional try: import pymongocrypt @@ -25,19 +26,25 @@ from pymongo.errors import ConfigurationError from pymongo.uri_parser import _parse_kms_tls_options +if TYPE_CHECKING: + from pymongo.mongo_client import MongoClient + class AutoEncryptionOpts(object): """Options to configure automatic client-side field level encryption.""" - def __init__(self, kms_providers, key_vault_namespace, - key_vault_client=None, - schema_map=None, - bypass_auto_encryption=False, - mongocryptd_uri='mongodb://localhost:27020', - mongocryptd_bypass_spawn=False, - mongocryptd_spawn_path='mongocryptd', - mongocryptd_spawn_args=None, - kms_tls_options=None): + def __init__(self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: Optional["MongoClient"] = None, + schema_map: Optional[Mapping[str, Any]] = None, + bypass_auto_encryption: Optional[bool] = False, + mongocryptd_uri: str = 'mongodb://localhost:27020', + mongocryptd_bypass_spawn: bool = False, + mongocryptd_spawn_path: str = 'mongocryptd', + mongocryptd_spawn_args: Optional[List[str]] = None, + kms_tls_options: Optional[Mapping[str, Any]] = None + ) -> None: """Options to configure automatic client-side field level encryption. Automatic client-side field level encryption requires MongoDB 4.2 @@ -152,8 +159,9 @@ def __init__(self, kms_providers, key_vault_namespace, self._mongocryptd_uri = mongocryptd_uri self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn self._mongocryptd_spawn_path = mongocryptd_spawn_path - self._mongocryptd_spawn_args = (copy.copy(mongocryptd_spawn_args) or - ['--idleShutdownTimeoutSecs=60']) + if mongocryptd_spawn_args is None: + mongocryptd_spawn_args = ['--idleShutdownTimeoutSecs=60'] + self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): raise TypeError('mongocryptd_spawn_args must be a list') if not any('idleShutdownTimeoutSecs' in s diff --git a/pymongo/errors.py b/pymongo/errors.py index 0ee35827a7..89c45730c9 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -13,6 +13,8 @@ # limitations under the License. """Exceptions raised by PyMongo.""" +from typing import (Any, Iterable, List, Mapping, Optional, Sequence, Tuple, + Union) from bson.errors import * @@ -23,18 +25,21 @@ try: from ssl import CertificateError as _CertificateError except ImportError: - class _CertificateError(ValueError): + class _CertificateError(ValueError): # type: ignore pass class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" - def __init__(self, message='', error_labels=None): + def __init__(self, + message: str = '', + error_labels: Optional[Iterable[str]] = None + ) -> None: super(PyMongoError, self).__init__(message) self._message = message self._error_labels = set(error_labels or []) - def has_error_label(self, label): + def has_error_label(self, label: str) -> bool: """Return True if this error contains the given label. .. versionadded:: 3.7 @@ -70,10 +75,17 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ - def __init__(self, message='', errors=None): + errors: Union[Mapping[str, Any], Sequence] + details: Union[Mapping[str, Any], Sequence] + + def __init__(self, + message: str = '', + errors: Optional[Union[Mapping[str, Any], Sequence]] = None + ) -> None: error_labels = None - if errors is not None and isinstance(errors, dict): - error_labels = errors.get('errorLabels') + if errors is not None: + if isinstance(errors, dict): + error_labels = errors.get('errorLabels') super(AutoReconnect, self).__init__(message, error_labels) self.errors = self.details = errors or [] @@ -109,7 +121,10 @@ class NotPrimaryError(AutoReconnect): .. versionadded:: 3.12 """ - def __init__(self, message='', errors=None): + def __init__(self, + message: str = '', + errors: Optional[Union[Mapping[str, Any], List]] = None + ) -> None: super(NotPrimaryError, self).__init__( _format_detailed_error(message, errors), errors=errors) @@ -139,7 +154,12 @@ class OperationFailure(PyMongoError): The :attr:`details` attribute. """ - def __init__(self, error, code=None, details=None, max_wire_version=None): + def __init__(self, + error: str, + code: Optional[int] = None, + details: Optional[Mapping[str, Any]] = None, + max_wire_version: Optional[int] = None, + ) -> None: error_labels = None if details is not None: error_labels = details.get('errorLabels') @@ -154,13 +174,13 @@ def _max_wire_version(self): return self.__max_wire_version @property - def code(self): + def code(self) -> Optional[int]: """The error code returned by the server, if any. """ return self.__code @property - def details(self): + def details(self) -> Optional[Mapping[str, Any]]: """The complete error document returned by the server. Depending on the error that occurred, the error document @@ -225,14 +245,17 @@ class BulkWriteError(OperationFailure): .. versionadded:: 2.7 """ - def __init__(self, results): + details: Mapping[str, Any] + + def __init__(self, results: Mapping[str, Any]) -> None: super(BulkWriteError, self).__init__( "batch op errors occurred", 65, results) - def __reduce__(self): + def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) + class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -264,12 +287,12 @@ class EncryptionError(PyMongoError): .. versionadded:: 3.9 """ - def __init__(self, cause): + def __init__(self, cause: Exception) -> None: super(EncryptionError, self).__init__(str(cause)) self.__cause = cause @property - def cause(self): + def cause(self) -> Exception: """The exception that caused this encryption or decryption error.""" return self.__cause diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 7d5501c372..f0857f8f45 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -26,8 +26,6 @@ ``MongoClient(event_listeners=[CommandLogger()])`` """ - - import logging from pymongo import monitoring @@ -42,18 +40,18 @@ class CommandLogger(monitoring.CommandListener): logs them at the `INFO` severity level using :mod:`logging`. .. versionadded:: 3.11 """ - def started(self, event): + def started(self, event: monitoring.CommandStartedEvent) -> None: logging.info("Command {0.command_name} with request id " "{0.request_id} started on server " "{0.connection_id}".format(event)) - def succeeded(self, event): + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) - def failed(self, event): + def failed(self, event: monitoring.CommandFailedEvent) -> None: logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " @@ -70,11 +68,11 @@ class ServerLogger(monitoring.ServerListener): .. versionadded:: 3.11 """ - def opened(self, event): + def opened(self, event: monitoring.ServerOpeningEvent) -> None: logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) - def description_changed(self, event): + def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type new_server_type = event.new_description.server_type if new_server_type != previous_server_type: @@ -84,7 +82,7 @@ def description_changed(self, event): "{0.previous_description.server_type_name} to " "{0.new_description.server_type_name}".format(event)) - def closed(self, event): + def closed(self, event: monitoring.ServerClosedEvent) -> None: logging.warning("Server {0.server_address} removed from topology " "{0.topology_id}".format(event)) @@ -99,17 +97,17 @@ class HeartbeatLogger(monitoring.ServerHeartbeatListener): .. versionadded:: 3.11 """ - def started(self, event): + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: logging.info("Heartbeat sent to server " "{0.connection_id}".format(event)) - def succeeded(self, event): + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: # The reply.document attribute was added in PyMongo 3.4. logging.info("Heartbeat to server {0.connection_id} " "succeeded with reply " "{0.reply.document}".format(event)) - def failed(self, event): + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: logging.warning("Heartbeat to server {0.connection_id} " "failed with error {0.reply}".format(event)) @@ -124,11 +122,11 @@ class TopologyLogger(monitoring.TopologyListener): .. versionadded:: 3.11 """ - def opened(self, event): + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: logging.info("Topology with id {0.topology_id} " "opened".format(event)) - def description_changed(self, event): + def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: logging.info("Topology description updated for " "topology id {0.topology_id}".format(event)) previous_topology_type = event.previous_description.topology_type @@ -146,7 +144,7 @@ def description_changed(self, event): if not event.new_description.has_readable_server(): logging.warning("No readable servers available.") - def closed(self, event): + def closed(self, event: monitoring.TopologyClosedEvent) -> None: logging.info("Topology with id {0.topology_id} " "closed".format(event)) @@ -168,43 +166,43 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): .. versionadded:: 3.11 """ - def pool_created(self, event): + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: logging.info("[pool {0.address}] pool created".format(event)) def pool_ready(self, event): logging.info("[pool {0.address}] pool ready".format(event)) - def pool_cleared(self, event): + def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: logging.info("[pool {0.address}] pool cleared".format(event)) - def pool_closed(self, event): + def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: logging.info("[pool {0.address}] pool closed".format(event)) - def connection_created(self, event): + def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection created".format(event)) - def connection_ready(self, event): + def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection setup succeeded".format(event)) - def connection_closed(self, event): + def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection closed, reason: " "{0.reason}".format(event)) - def connection_check_out_started(self, event): + def connection_check_out_started(self, event: monitoring.ConnectionCheckOutStartedEvent) -> None: logging.info("[pool {0.address}] connection check out " "started".format(event)) - def connection_check_out_failed(self, event): + def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: logging.info("[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event)) - def connection_checked_out(self, event): + def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection checked out of pool".format(event)) - def connection_checked_in(self, event): + def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: logging.info("[pool {0.address}][conn #{0.connection_id}] " "connection checked into pool".format(event)) diff --git a/pymongo/hello.py b/pymongo/hello.py index 0ad06e9619..ba09d80e32 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -14,10 +14,15 @@ """Helpers for the 'hello' and legacy hello commands.""" +import copy +import datetime import itertools +from typing import Any, Generic, List, Mapping, Optional, Set, Tuple +from bson.objectid import ObjectId from pymongo import common from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _DocumentType class HelloCompat: @@ -56,7 +61,7 @@ def _get_server_type(doc): return SERVER_TYPE.Standalone -class Hello(object): +class Hello(Generic[_DocumentType]): """Parse a hello response from the server. .. versionadded:: 3.12 @@ -64,9 +69,9 @@ class Hello(object): __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', '_awaitable') - def __init__(self, doc, awaitable=False): + def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: self._server_type = _get_server_type(doc) - self._doc = doc + self._doc: _DocumentType = doc self._is_writable = self._server_type in ( SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, @@ -79,19 +84,19 @@ def __init__(self, doc, awaitable=False): self._awaitable = awaitable @property - def document(self): + def document(self) -> _DocumentType: """The complete hello command response document. .. versionadded:: 3.4 """ - return self._doc.copy() + return copy.copy(self._doc) @property - def server_type(self): + def server_type(self) -> int: return self._server_type @property - def all_hosts(self): + def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return set(map(common.clean_node, itertools.chain( self._doc.get('hosts', []), @@ -99,12 +104,12 @@ def all_hosts(self): self._doc.get('arbiters', [])))) @property - def tags(self): + def tags(self) -> Mapping[str, Any]: """Replica set member tags or empty dict.""" return self._doc.get('tags', {}) @property - def primary(self): + def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" if self._doc.get('primary'): return common.partition_node(self._doc['primary']) @@ -112,70 +117,71 @@ def primary(self): return None @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self._doc.get('setName') @property - def max_bson_size(self): + def max_bson_size(self) -> int: return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) @property - def max_message_size(self): + def max_message_size(self) -> int: return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) @property - def min_wire_version(self): + def min_wire_version(self) -> int: return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) @property - def max_wire_version(self): + def max_wire_version(self) -> int: return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) @property - def set_version(self): + def set_version(self) -> Optional[int]: return self._doc.get('setVersion') @property - def election_id(self): + def election_id(self) -> Optional[ObjectId]: return self._doc.get('electionId') @property - def cluster_time(self): + def cluster_time(self) -> Optional[Mapping[str, Any]]: return self._doc.get('$clusterTime') @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: return self._doc.get('logicalSessionTimeoutMinutes') @property - def is_writable(self): + def is_writable(self) -> bool: return self._is_writable @property - def is_readable(self): + def is_readable(self) -> bool: return self._is_readable @property - def me(self): + def me(self) -> Optional[Tuple[str, int]]: me = self._doc.get('me') if me: return common.clean_node(me) + return None @property - def last_write_date(self): + def last_write_date(self) -> Optional[datetime.datetime]: return self._doc.get('lastWrite', {}).get('lastWriteDate') @property - def compressors(self): + def compressors(self) -> Optional[List[str]]: return self._doc.get('compression') @property - def sasl_supported_mechs(self): + def sasl_supported_mechs(self) -> List[str]: """Supported authentication mechanisms for the current user. For example:: @@ -187,22 +193,22 @@ def sasl_supported_mechs(self): return self._doc.get('saslSupportedMechs', []) @property - def speculative_authenticate(self): + def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: """The speculativeAuthenticate field.""" return self._doc.get('speculativeAuthenticate') @property - def topology_version(self): + def topology_version(self) -> Optional[Mapping[str, Any]]: return self._doc.get('topologyVersion') @property - def awaitable(self): + def awaitable(self) -> bool: return self._awaitable @property - def service_id(self): + def service_id(self) -> Optional[ObjectId]: return self._doc.get('serviceId') @property - def hello_ok(self): + def hello_ok(self) -> bool: return self._doc.get('helloOk', False) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index a9d40d8103..b2726dca6b 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -16,18 +16,14 @@ import sys import traceback - from collections import abc +from typing import Any from bson.son import SON from pymongo import ASCENDING -from pymongo.errors import (CursorNotFound, - DuplicateKeyError, - ExecutionTimeout, - NotPrimaryError, - OperationFailure, - WriteError, - WriteConcernError, +from pymongo.errors import (CursorNotFound, DuplicateKeyError, + ExecutionTimeout, NotPrimaryError, + OperationFailure, WriteConcernError, WriteError, WTimeoutError) from pymongo.hello import HelloCompat @@ -95,7 +91,7 @@ def _index_document(index_list): if not len(index_list): raise ValueError("key_or_list must not be the empty list") - index = SON() + index: SON[str, Any] = SON() for (key, value) in index_list: if not isinstance(key, str): raise TypeError( diff --git a/pymongo/message.py b/pymongo/message.py index f632214a08..ac6000cfd2 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -23,8 +23,8 @@ import datetime import random import struct - from io import BytesIO as _BytesIO +from typing import Any import bson from bson import (CodecOptions, @@ -32,30 +32,24 @@ _decode_selective, _dict_to_bson, _make_c_string) -from bson import codec_options from bson.int64 import Int64 -from bson.raw_bson import (_inflate_bson, DEFAULT_RAW_BSON_OPTIONS, - RawBSONDocument) +from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, + _inflate_bson) from bson.son import SON try: - from pymongo import _cmessage + from pymongo import _cmessage # type: ignore[attr-defined] _use_c = True except ImportError: _use_c = False -from pymongo.errors import (ConfigurationError, - CursorNotFound, - DocumentTooLarge, - ExecutionTimeout, - InvalidOperation, - NotPrimaryError, - OperationFailure, - ProtocolError) +from pymongo.errors import (ConfigurationError, CursorNotFound, + DocumentTooLarge, ExecutionTimeout, + InvalidOperation, NotPrimaryError, + OperationFailure, ProtocolError) from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern - MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 @@ -457,6 +451,7 @@ def use_command(self, sock_info): class _CursorAddress(tuple): """The server address (host, port) of a cursor, with namespace property.""" + __namespace: Any def __new__(cls, address, namespace): self = tuple.__new__(cls, address) @@ -762,6 +757,7 @@ def unack_write(self, cmd, request_id, msg, max_doc_size, docs): """A proxy for SocketInfo.unack_write that handles event publishing. """ if self.publish: + assert self.start_time is not None duration = datetime.datetime.now() - self.start_time cmd = self._start(cmd, request_id, docs) start = datetime.datetime.now() @@ -777,6 +773,7 @@ def unack_write(self, cmd, request_id, msg, max_doc_size, docs): self._succeed(request_id, reply, duration) except Exception as exc: if self.publish: + assert self.start_time is not None duration = (datetime.datetime.now() - start) + duration if isinstance(exc, OperationFailure): failure = _convert_write_result( @@ -795,6 +792,7 @@ def write_command(self, cmd, request_id, msg, docs): """A proxy for SocketInfo.write_command that handles event publishing. """ if self.publish: + assert self.start_time is not None duration = datetime.datetime.now() - self.start_time self._start(cmd, request_id, docs) start = datetime.datetime.now() @@ -1171,7 +1169,8 @@ def raw_response(self, cursor_id=None, user_fields=None): if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: - raise ExecutionTimeout(error_object.get("$err"), + default_msg = "operation exceeded time limit" + raise ExecutionTimeout(error_object.get("$err", default_msg), error_object.get("code"), error_object) raise OperationFailure("database error: %s" % diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 052ade3853..975fc87610 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -34,46 +34,41 @@ import contextlib import threading import weakref - from collections import defaultdict +from typing import (TYPE_CHECKING, Any, Dict, FrozenSet, Generic, List, + Mapping, Optional, Sequence, Set, Tuple, Type, Union, cast) -from bson.codec_options import DEFAULT_CODEC_OPTIONS +import bson +from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, + TypeRegistry) from bson.son import SON -from pymongo import (common, - database, - helpers, - message, - periodic_executor, - uri_parser, - client_session) -from pymongo.change_stream import ClusterChangeStream +from bson.timestamp import Timestamp +from pymongo import (client_session, common, database, helpers, message, + periodic_executor, uri_parser) +from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.errors import (AutoReconnect, - BulkWriteError, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - NotPrimaryError, - OperationFailure, - PyMongoError, +from pymongo.errors import (AutoReconnect, BulkWriteError, ConfigurationError, + ConnectionFailure, InvalidOperation, + NotPrimaryError, OperationFailure, PyMongoError, ServerSelectionTimeoutError) from pymongo.pool import ConnectionClosedReason -from pymongo.read_preferences import ReadPreference +from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.topology import (Topology, - _ErrorContext) -from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.settings import TopologySettings -from pymongo.uri_parser import (_handle_option_deprecations, - _handle_security_options, - _normalize_options, - _check_options) -from pymongo.write_concern import DEFAULT_WRITE_CONCERN +from pymongo.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.uri_parser import (_check_options, _handle_option_deprecations, + _handle_security_options, _normalize_options) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + from pymongo.read_concern import ReadConcern -class MongoClient(common.BaseObject): +class MongoClient(common.BaseObject, Generic[_DocumentType]): """ A client-side representation of a MongoDB cluster. @@ -89,15 +84,15 @@ class MongoClient(common.BaseObject): # No host/port; these are retrieved from TopologySettings. _constructor_args = ('document_class', 'tz_aware', 'connect') - def __init__( - self, - host=None, - port=None, - document_class=dict, - tz_aware=None, - connect=None, - type_registry=None, - **kwargs): + def __init__(self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Type[_DocumentType] = dict, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: """Client for a MongoDB instance, a replica set, or a set of mongoses. The client object is thread-safe and has connection-pooling built in. @@ -621,7 +616,7 @@ def __init__( client.__my_database__ """ - self.__init_kwargs = {'host': host, + self.__init_kwargs: Dict[str, Any] = {'host': host, 'port': port, 'document_class': document_class, 'tz_aware': tz_aware, @@ -722,7 +717,7 @@ def __init__( self.__default_database_name = dbase self.__lock = threading.Lock() - self.__kill_cursors_queue = [] + self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners super(MongoClient, self).__init__(options.codec_options, @@ -765,7 +760,7 @@ def target(): # We strongly reference the executor and it weakly references us via # this closure. When the client is freed, stop the executor soon. - self_ref = weakref.ref(self, executor.close) + self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor if connect: @@ -798,9 +793,17 @@ def _server_property(self, attr_name): return getattr(server.description, attr_name) - def watch(self, pipeline=None, full_document=None, resume_after=None, - max_await_time_ms=None, batch_size=None, collation=None, - start_at_operation_time=None, session=None, start_after=None): + def watch(self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. Performs an aggregation with an implicit initial ``$changeStream`` @@ -891,7 +894,7 @@ def watch(self, pipeline=None, full_document=None, resume_after=None, start_after) @property - def topology_description(self): + def topology_description(self) -> TopologyDescription: """The description of the connected MongoDB deployment. >>> client.topology_description @@ -913,7 +916,7 @@ def topology_description(self): return self._topology.description @property - def address(self): + def address(self) -> Optional[Tuple[str, int]]: """(host, port) of the current standalone, primary, or mongos, or None. Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if @@ -940,7 +943,7 @@ def address(self): return self._server_property('address') @property - def primary(self): + def primary(self) -> Optional[Tuple[str, int]]: """The (host, port) of the current primary of the replica set. Returns ``None`` if this client is not connected to a replica set, @@ -953,7 +956,7 @@ def primary(self): return self._topology.get_primary() @property - def secondaries(self): + def secondaries(self) -> Set[Tuple[str, int]]: """The secondary members known to this client. A sequence of (host, port) pairs. Empty if this client is not @@ -966,7 +969,7 @@ def secondaries(self): return self._topology.get_secondaries() @property - def arbiters(self): + def arbiters(self) -> Set[Tuple[str, int]]: """Arbiters in the replica set. A sequence of (host, port) pairs. Empty if this client is not @@ -976,7 +979,7 @@ def arbiters(self): return self._topology.get_arbiters() @property - def is_primary(self): + def is_primary(self) -> bool: """If this client is connected to a server that can accept writes. True if the current server is a standalone, mongos, or the primary of @@ -987,7 +990,7 @@ def is_primary(self): return self._server_property('is_writable') @property - def is_mongos(self): + def is_mongos(self) -> bool: """If this client is connected to mongos. If the client is not connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available.. @@ -995,7 +998,7 @@ def is_mongos(self): return self._server_property('server_type') == SERVER_TYPE.Mongos @property - def nodes(self): + def nodes(self) -> FrozenSet[Tuple[str, Optional[int]]]: """Set of all currently connected servers. .. warning:: When connected to a replica set the value of :attr:`nodes` @@ -1009,7 +1012,7 @@ def nodes(self): return frozenset(s.address for s in description.known_servers) @property - def options(self): + def options(self) -> ClientOptions: """The configuration options for this client. :Returns: @@ -1040,7 +1043,7 @@ def _end_sessions(self, session_ids): # command. pass - def close(self): + def close(self) -> None: """Cleanup client resources and disconnect from MongoDB. End all server sessions created by this client by sending one or more @@ -1214,7 +1217,7 @@ def _retry_with_session(self, retryable, func, session, bulk): def _retry_internal(self, retryable, func, session, bulk): """Internal retryable write helper.""" max_wire_version = 0 - last_error = None + last_error: Optional[Exception] = None retrying = False def is_retrying(): @@ -1239,6 +1242,7 @@ def is_retrying(): if is_retrying(): # A retry is not possible because this server does # not support sessions raise the last error. + assert last_error is not None raise last_error retryable = False return func(session, sock_info, retryable) @@ -1247,6 +1251,7 @@ def is_retrying(): # The application may think the write was never attempted # if we raise ServerSelectionTimeoutError on the retry # attempt. Raise the original exception instead. + assert last_error is not None raise last_error # A ServerSelectionTimeoutError error indicates that there may # be a persistent outage. Attempting to retry in this case will @@ -1280,7 +1285,7 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable = (retryable and self.options.retry_reads and not (session and session.in_transaction)) - last_error = None + last_error: Optional[Exception] = None retrying = False while True: @@ -1292,6 +1297,7 @@ def _retryable_read(self, func, read_pref, session, address=None, if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. + assert last_error is not None raise last_error return func(session, server, sock_info, read_pref) except ServerSelectionTimeoutError: @@ -1299,6 +1305,7 @@ def _retryable_read(self, func, read_pref, session, address=None, # The application may think the write was never attempted # if we raise ServerSelectionTimeoutError on the retry # attempt. Raise the original exception instead. + assert last_error is not None raise last_error # A ServerSelectionTimeoutError error indicates that there may # be a persistent outage. Attempting to retry in this case will @@ -1322,15 +1329,15 @@ def _retryable_write(self, retryable, func, session): with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, None) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return self._topology == other._topology return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __hash__(self): + def __hash__(self) -> int: return hash(self._topology) def _repr_helper(self): @@ -1366,7 +1373,7 @@ def option_repr(option, value): def __repr__(self): return ("MongoClient(%s)" % (self._repr_helper(),)) - def __getattr__(self, name): + def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. Raises :class:`~pymongo.errors.InvalidName` if an invalid @@ -1381,7 +1388,7 @@ def __getattr__(self, name): " database, use client[%r]." % (name, name, name)) return self.__getitem__(name) - def __getitem__(self, name): + def __getitem__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. Raises :class:`~pymongo.errors.InvalidName` if an invalid @@ -1539,9 +1546,10 @@ def __start_session(self, implicit, **kwargs): self, server_session, opts, implicit) def start_session(self, - causal_consistency=None, - default_transaction_options=None, - snapshot=False): + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession[_DocumentType]: """Start a logical session. This method takes the same parameters as @@ -1630,7 +1638,9 @@ def _process_response(self, reply, session): if session is not None: session._process_response(reply) - def server_info(self, session=None): + def server_info(self, + session: Optional[client_session.ClientSession] = None + ) -> Dict[str, Any]: """Get information about the MongoDB server we're connected to. :Parameters: @@ -1644,7 +1654,10 @@ def server_info(self, session=None): read_preference=ReadPreference.PRIMARY, session=session) - def list_databases(self, session=None, **kwargs): + def list_databases(self, + session: Optional[client_session.ClientSession] = None, + **kwargs: Any + ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the databases of the connected server. :Parameters: @@ -1673,7 +1686,9 @@ def list_databases(self, session=None, **kwargs): } return CommandCursor(admin["$cmd"], cursor, None) - def list_database_names(self, session=None): + def list_database_names(self, + session: Optional[client_session.ClientSession] = None + ) -> List[str]: """Get a list of the names of all databases on the connected server. :Parameters: @@ -1685,7 +1700,10 @@ def list_database_names(self, session=None): return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] - def drop_database(self, name_or_database, session=None): + def drop_database(self, + name_or_database: Union[str, database.Database], + session: Optional[client_session.ClientSession] = None + ) -> None: """Drop a database. Raises :class:`TypeError` if `name_or_database` is not an instance of @@ -1727,8 +1745,13 @@ def drop_database(self, name_or_database, session=None): parse_write_concern_error=True, session=session) - def get_default_database(self, default=None, codec_options=None, - read_preference=None, write_concern=None, read_concern=None): + def get_default_database(self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> database.Database[_DocumentType]: """Get the database named in the MongoDB connection URI. >>> uri = 'mongodb://host/my_database' @@ -1773,12 +1796,18 @@ def get_default_database(self, default=None, codec_options=None, raise ConfigurationError( 'No default database name defined or provided.') + name = cast(str, self.__default_database_name or default) return database.Database( - self, self.__default_database_name or default, codec_options, + self, name, codec_options, read_preference, write_concern, read_concern) - def get_database(self, name=None, codec_options=None, read_preference=None, - write_concern=None, read_concern=None): + def get_database(self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional["ReadConcern"] = None, + ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -1838,16 +1867,16 @@ def _database_default_options(self, name): read_preference=ReadPreference.PRIMARY, write_concern=DEFAULT_WRITE_CONCERN) - def __enter__(self): + def __enter__(self) -> "MongoClient[_DocumentType]": return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __iter__(self): + def __iter__(self) -> "MongoClient[_DocumentType]": return self - def __next__(self): + def __next__(self) -> None: raise TypeError("'MongoClient' object is not iterable") next = __next__ diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 039ec51942..388ba61687 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -18,10 +18,10 @@ import threading import time import weakref +from typing import Any, Mapping, cast from pymongo import common, periodic_executor -from pymongo.errors import (NotPrimaryError, - OperationFailure, +from pymongo.errors import (NotPrimaryError, OperationFailure, _OperationCancelled) from pymongo.hello import Hello from pymongo.periodic_executor import _shutdown_executors @@ -50,7 +50,7 @@ def target(): monitor = self_ref() if monitor is None: return False # Stop the executor. - monitor._run() + monitor._run() # type:ignore[attr-defined] return True executor = periodic_executor.PeriodicExecutor( @@ -214,8 +214,8 @@ def _check_server(self): return self._check_once() except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when hello fails. - self._topology.receive_cluster_time( - exc.details.get('$clusterTime')) + details = cast(Mapping[str, Any], exc.details) + self._topology.receive_cluster_time(details.get('$clusterTime')) raise except ReferenceError: raise diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index b877e19a23..6f57200a3b 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -180,12 +180,21 @@ def connection_checked_in(self, event): handler first. """ +import datetime from collections import abc, namedtuple +from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional -from pymongo.hello import HelloCompat +from bson.objectid import ObjectId +from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _handle_exception +from pymongo.typings import _Address -_Listeners = namedtuple('Listeners', +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +_Listeners = namedtuple('_Listeners', ('command_listeners', 'server_listeners', 'server_heartbeat_listeners', 'topology_listeners', 'cmap_listeners')) @@ -193,6 +202,9 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) +_DocumentOut = Mapping[str, Any] + + class _EventListener(object): """Abstract base class for all event listeners.""" @@ -204,7 +216,7 @@ class CommandListener(_EventListener): and `CommandFailedEvent`. """ - def started(self, event): + def started(self, event: "CommandStartedEvent") -> None: """Abstract method to handle a `CommandStartedEvent`. :Parameters: @@ -212,7 +224,7 @@ def started(self, event): """ raise NotImplementedError - def succeeded(self, event): + def succeeded(self, event: "CommandSucceededEvent") -> None: """Abstract method to handle a `CommandSucceededEvent`. :Parameters: @@ -220,7 +232,7 @@ def succeeded(self, event): """ raise NotImplementedError - def failed(self, event): + def failed(self, event: "CommandFailedEvent") -> None: """Abstract method to handle a `CommandFailedEvent`. :Parameters: @@ -245,7 +257,7 @@ class ConnectionPoolListener(_EventListener): .. versionadded:: 3.9 """ - def pool_created(self, event): + def pool_created(self, event: "PoolCreatedEvent") -> None: """Abstract method to handle a :class:`PoolCreatedEvent`. Emitted when a Connection Pool is created. @@ -255,7 +267,7 @@ def pool_created(self, event): """ raise NotImplementedError - def pool_ready(self, event): + def pool_ready(self, event: "PoolReadyEvent") -> None: """Abstract method to handle a :class:`PoolReadyEvent`. Emitted when a Connection Pool is marked ready. @@ -267,7 +279,7 @@ def pool_ready(self, event): """ raise NotImplementedError - def pool_cleared(self, event): + def pool_cleared(self, event: "PoolClearedEvent") -> None: """Abstract method to handle a `PoolClearedEvent`. Emitted when a Connection Pool is cleared. @@ -277,7 +289,7 @@ def pool_cleared(self, event): """ raise NotImplementedError - def pool_closed(self, event): + def pool_closed(self, event: "PoolClosedEvent") -> None: """Abstract method to handle a `PoolClosedEvent`. Emitted when a Connection Pool is closed. @@ -287,7 +299,7 @@ def pool_closed(self, event): """ raise NotImplementedError - def connection_created(self, event): + def connection_created(self, event: "ConnectionCreatedEvent") -> None: """Abstract method to handle a :class:`ConnectionCreatedEvent`. Emitted when a Connection Pool creates a Connection object. @@ -297,7 +309,7 @@ def connection_created(self, event): """ raise NotImplementedError - def connection_ready(self, event): + def connection_ready(self, event: "ConnectionReadyEvent") -> None: """Abstract method to handle a :class:`ConnectionReadyEvent`. Emitted when a Connection has finished its setup, and is now ready to @@ -308,7 +320,7 @@ def connection_ready(self, event): """ raise NotImplementedError - def connection_closed(self, event): + def connection_closed(self, event: "ConnectionClosedEvent") -> None: """Abstract method to handle a :class:`ConnectionClosedEvent`. Emitted when a Connection Pool closes a Connection. @@ -318,7 +330,7 @@ def connection_closed(self, event): """ raise NotImplementedError - def connection_check_out_started(self, event): + def connection_check_out_started(self, event: "ConnectionCheckOutStartedEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. Emitted when the driver starts attempting to check out a connection. @@ -328,7 +340,7 @@ def connection_check_out_started(self, event): """ raise NotImplementedError - def connection_check_out_failed(self, event): + def connection_check_out_failed(self, event: "ConnectionCheckOutFailedEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. Emitted when the driver's attempt to check out a connection fails. @@ -338,7 +350,7 @@ def connection_check_out_failed(self, event): """ raise NotImplementedError - def connection_checked_out(self, event): + def connection_checked_out(self, event: "ConnectionCheckedOutEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. Emitted when the driver successfully checks out a Connection. @@ -348,7 +360,7 @@ def connection_checked_out(self, event): """ raise NotImplementedError - def connection_checked_in(self, event): + def connection_checked_in(self, event: "ConnectionCheckedInEvent") -> None: """Abstract method to handle a :class:`ConnectionCheckedInEvent`. Emitted when the driver checks in a Connection back to the Connection @@ -369,7 +381,7 @@ class ServerHeartbeatListener(_EventListener): .. versionadded:: 3.3 """ - def started(self, event): + def started(self, event: "ServerHeartbeatStartedEvent") -> None: """Abstract method to handle a `ServerHeartbeatStartedEvent`. :Parameters: @@ -377,7 +389,7 @@ def started(self, event): """ raise NotImplementedError - def succeeded(self, event): + def succeeded(self, event: "ServerHeartbeatSucceededEvent") -> None: """Abstract method to handle a `ServerHeartbeatSucceededEvent`. :Parameters: @@ -385,7 +397,7 @@ def succeeded(self, event): """ raise NotImplementedError - def failed(self, event): + def failed(self, event: "ServerHeartbeatFailedEvent") -> None: """Abstract method to handle a `ServerHeartbeatFailedEvent`. :Parameters: @@ -402,7 +414,7 @@ class TopologyListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event): + def opened(self, event: "TopologyOpenedEvent") -> None: """Abstract method to handle a `TopologyOpenedEvent`. :Parameters: @@ -410,7 +422,7 @@ def opened(self, event): """ raise NotImplementedError - def description_changed(self, event): + def description_changed(self, event: "TopologyDescriptionChangedEvent") -> None: """Abstract method to handle a `TopologyDescriptionChangedEvent`. :Parameters: @@ -418,7 +430,7 @@ def description_changed(self, event): """ raise NotImplementedError - def closed(self, event): + def closed(self, event: "TopologyClosedEvent") -> None: """Abstract method to handle a `TopologyClosedEvent`. :Parameters: @@ -435,7 +447,7 @@ class ServerListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event): + def opened(self, event: "ServerOpeningEvent") -> None: """Abstract method to handle a `ServerOpeningEvent`. :Parameters: @@ -443,7 +455,7 @@ def opened(self, event): """ raise NotImplementedError - def description_changed(self, event): + def description_changed(self, event: "ServerDescriptionChangedEvent") -> None: """Abstract method to handle a `ServerDescriptionChangedEvent`. :Parameters: @@ -451,7 +463,7 @@ def description_changed(self, event): """ raise NotImplementedError - def closed(self, event): + def closed(self, event: "ServerClosedEvent") -> None: """Abstract method to handle a `ServerClosedEvent`. :Parameters: @@ -478,7 +490,7 @@ def _validate_event_listeners(option, listeners): return listeners -def register(listener): +def register(listener: _EventListener) -> None: """Register a global event listener. :Parameters: @@ -525,8 +537,14 @@ class _CommandEvent(object): __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") - def __init__(self, command_name, request_id, connection_id, operation_id, - service_id=None): + def __init__( + self, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: self.__cmd_name = command_name self.__rqst_id = request_id self.__conn_id = connection_id @@ -534,22 +552,22 @@ def __init__(self, command_name, request_id, connection_id, operation_id, self.__service_id = service_id @property - def command_name(self): + def command_name(self) -> str: """The command name.""" return self.__cmd_name @property - def request_id(self): + def request_id(self) -> int: """The request id for this operation.""" return self.__rqst_id @property - def connection_id(self): + def connection_id(self) -> _Address: """The address (host, port) of the server this command was sent to.""" return self.__conn_id @property - def service_id(self): + def service_id(self) -> Optional[ObjectId]: """The service_id this command was sent to, or ``None``. .. versionadded:: 3.12 @@ -557,7 +575,7 @@ def service_id(self): return self.__service_id @property - def operation_id(self): + def operation_id(self) -> Optional[int]: """An id for this series of events or None.""" return self.__op_id @@ -576,28 +594,36 @@ class CommandStartedEvent(_CommandEvent): """ __slots__ = ("__cmd", "__db") - def __init__(self, command, database_name, *args, service_id=None): + def __init__( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: if not command: raise ValueError("%r is not a valid command" % (command,)) # Command name must be first key. command_name = next(iter(command)) super(CommandStartedEvent, self).__init__( - command_name, *args, service_id=service_id) + command_name, request_id, connection_id, operation_id, service_id=service_id) cmd_name, cmd_doc = command_name.lower(), command[command_name] if (cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command)): - self.__cmd = {} + self.__cmd: Mapping[str, Any] = {} else: self.__cmd = command self.__db = database_name @property - def command(self): + def command(self) -> _DocumentOut: """The command document.""" return self.__cmd @property - def database_name(self): + def database_name(self) -> str: """The name of the database this command was run against.""" return self.__db @@ -625,8 +651,16 @@ class CommandSucceededEvent(_CommandEvent): """ __slots__ = ("__duration_micros", "__reply") - def __init__(self, duration, reply, command_name, - request_id, connection_id, operation_id, service_id=None): + def __init__( + self, + duration: datetime.timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: super(CommandSucceededEvent, self).__init__( command_name, request_id, connection_id, operation_id, service_id=service_id) @@ -634,17 +668,17 @@ def __init__(self, duration, reply, command_name, cmd_name = command_name.lower() if (cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply)): - self.__reply = {} + self.__reply: Mapping[str, Any] = {} else: self.__reply = reply @property - def duration_micros(self): + def duration_micros(self) -> int: """The duration of this operation in microseconds.""" return self.__duration_micros @property - def reply(self): + def reply(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__reply @@ -672,18 +706,27 @@ class CommandFailedEvent(_CommandEvent): """ __slots__ = ("__duration_micros", "__failure") - def __init__(self, duration, failure, *args, service_id=None): - super(CommandFailedEvent, self).__init__(*args, service_id=service_id) + def __init__( + self, + duration: datetime.timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + ) -> None: + super(CommandFailedEvent, self).__init__(command_name, request_id, connection_id, operation_id, service_id=service_id) self.__duration_micros = _to_micros(duration) self.__failure = failure @property - def duration_micros(self): + def duration_micros(self) -> int: """The duration of this operation in microseconds.""" return self.__duration_micros @property - def failure(self): + def failure(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__failure @@ -700,11 +743,11 @@ class _PoolEvent(object): """Base class for pool events.""" __slots__ = ("__address",) - def __init__(self, address): + def __init__(self, address: _Address) -> None: self.__address = address @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server the pool is attempting to connect to. """ @@ -725,12 +768,12 @@ class PoolCreatedEvent(_PoolEvent): """ __slots__ = ("__options",) - def __init__(self, address, options): + def __init__(self, address: _Address, options: Dict[str, Any]) -> None: super(PoolCreatedEvent, self).__init__(address) self.__options = options @property - def options(self): + def options(self) -> Dict[str, Any]: """Any non-default pool options that were set on this Connection Pool. """ return self.__options @@ -764,12 +807,12 @@ class PoolClearedEvent(_PoolEvent): """ __slots__ = ("__service_id",) - def __init__(self, address, service_id=None): + def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: super(PoolClearedEvent, self).__init__(address) self.__service_id = service_id @property - def service_id(self): + def service_id(self) -> Optional[ObjectId]: """Connections with this service_id are cleared. When service_id is ``None``, all connections in the pool are cleared. @@ -839,19 +882,19 @@ class _ConnectionEvent(object): """Private base class for some connection events.""" __slots__ = ("__address", "__connection_id") - def __init__(self, address, connection_id): + def __init__(self, address: _Address, connection_id: int) -> None: self.__address = address self.__connection_id = connection_id @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server this connection is attempting to connect to. """ return self.__address @property - def connection_id(self): + def connection_id(self) -> int: """The ID of the Connection.""" return self.__connection_id @@ -958,19 +1001,19 @@ class ConnectionCheckOutFailedEvent(object): """ __slots__ = ("__address", "__reason") - def __init__(self, address, reason): + def __init__(self, address: _Address, reason: str) -> None: self.__address = address self.__reason = reason @property - def address(self): + def address(self) -> _Address: """The address (host, port) pair of the server this connection is attempting to connect to. """ return self.__address @property - def reason(self): + def reason(self) -> str: """A reason explaining why connection check out failed. The reason must be one of the strings from the @@ -1014,17 +1057,17 @@ class _ServerEvent(object): __slots__ = ("__server_address", "__topology_id") - def __init__(self, server_address, topology_id): + def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: self.__server_address = server_address self.__topology_id = topology_id @property - def server_address(self): + def server_address(self) -> _Address: """The address (host, port) pair of the server""" return self.__server_address @property - def topology_id(self): + def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id @@ -1041,19 +1084,19 @@ class ServerDescriptionChangedEvent(_ServerEvent): __slots__ = ('__previous_description', '__new_description') - def __init__(self, previous_description, new_description, *args): + def __init__(self, previous_description: "ServerDescription", new_description: "ServerDescription", *args: Any) -> None: super(ServerDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self): + def previous_description(self) -> "ServerDescription": """The previous :class:`~pymongo.server_description.ServerDescription`.""" return self.__previous_description @property - def new_description(self): + def new_description(self) -> "ServerDescription": """The new :class:`~pymongo.server_description.ServerDescription`.""" return self.__new_description @@ -1087,11 +1130,11 @@ class TopologyEvent(object): __slots__ = ('__topology_id') - def __init__(self, topology_id): + def __init__(self, topology_id: ObjectId) -> None: self.__topology_id = topology_id @property - def topology_id(self): + def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id @@ -1108,19 +1151,19 @@ class TopologyDescriptionChangedEvent(TopologyEvent): __slots__ = ('__previous_description', '__new_description') - def __init__(self, previous_description, new_description, *args): + def __init__(self, previous_description: "TopologyDescription", new_description: "TopologyDescription", *args: Any) -> None: super(TopologyDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self): + def previous_description(self) -> "TopologyDescription": """The previous :class:`~pymongo.topology_description.TopologyDescription`.""" return self.__previous_description @property - def new_description(self): + def new_description(self) -> "TopologyDescription": """The new :class:`~pymongo.topology_description.TopologyDescription`.""" return self.__new_description @@ -1154,11 +1197,11 @@ class _ServerHeartbeatEvent(object): __slots__ = ('__connection_id') - def __init__(self, connection_id): + def __init__(self, connection_id: _Address) -> None: self.__connection_id = connection_id @property - def connection_id(self): + def connection_id(self) -> _Address: """The address (host, port) of the server this heartbeat was sent to.""" return self.__connection_id @@ -1184,24 +1227,24 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): __slots__ = ('__duration', '__reply', '__awaited') - def __init__(self, duration, reply, connection_id, awaited=False): + def __init__(self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False) -> None: super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @property - def duration(self): + def duration(self) -> float: """The duration of this heartbeat in microseconds.""" return self.__duration @property - def reply(self): + def reply(self) -> Hello: """An instance of :class:`~pymongo.hello.Hello`.""" return self.__reply @property - def awaited(self): + def awaited(self) -> bool: """Whether the heartbeat was awaited. If true, then :meth:`duration` reflects the sum of the round trip time @@ -1225,24 +1268,24 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): __slots__ = ('__duration', '__reply', '__awaited') - def __init__(self, duration, reply, connection_id, awaited=False): + def __init__(self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False) -> None: super(ServerHeartbeatFailedEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @property - def duration(self): + def duration(self) -> float: """The duration of this heartbeat in microseconds.""" return self.__duration @property - def reply(self): + def reply(self) -> Exception: """A subclass of :exc:`Exception`.""" return self.__reply @property - def awaited(self): + def awaited(self) -> bool: """Whether the heartbeat was awaited. If true, then :meth:`duration` reflects the sum of the round trip time diff --git a/pymongo/network.py b/pymongo/network.py index a14e9924a4..48e5084e31 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -20,21 +20,16 @@ import struct import time - from bson import _decode_all_selective - from pymongo import helpers, message from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import decompress, _NO_COMPRESSION -from pymongo.errors import (NotPrimaryError, - OperationFailure, - ProtocolError, +from pymongo.compression_support import _NO_COMPRESSION, decompress +from pymongo.errors import (NotPrimaryError, OperationFailure, ProtocolError, _OperationCancelled) from pymongo.message import _UNPACK_REPLY, _OpMsg from pymongo.monitoring import _is_speculative_authenticate from pymongo.socket_checker import _errno_from_exception - _UNPACK_HEADER = struct.Struct(" None: """Create an InsertOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -43,21 +45,25 @@ def _add_to_bulk(self, bulkobj): def __repr__(self): return "InsertOne(%r)" % (self._doc,) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return other._doc == self._doc return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other +_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_IndexKeyHint = Union[str, _IndexList] + + class DeleteOne(object): """Represents a delete_one operation.""" __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None, hint=None): + def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Create a DeleteOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -95,13 +101,13 @@ def _add_to_bulk(self, bulkobj): def __repr__(self): return "DeleteOne(%r, %r)" % (self._filter, self._collation) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ((other._filter, other._collation) == (self._filter, self._collation)) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other @@ -110,7 +116,7 @@ class DeleteMany(object): __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter, collation=None, hint=None): + def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Create a DeleteMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -148,13 +154,13 @@ def _add_to_bulk(self, bulkobj): def __repr__(self): return "DeleteMany(%r, %r)" % (self._filter, self._collation) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ((other._filter, other._collation) == (self._filter, self._collation)) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other @@ -163,8 +169,8 @@ class ReplaceOne(object): __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - def __init__(self, filter, replacement, upsert=False, collation=None, - hint=None): + def __init__(self, filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None) -> None: """Create a ReplaceOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -207,7 +213,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_replace(self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ( (other._filter, other._doc, other._upsert, other._collation, @@ -215,7 +221,7 @@ def __eq__(self, other): self._collation, other._hint)) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): @@ -241,7 +247,6 @@ def __init__(self, filter, doc, upsert, collation, array_filters, hint): if not isinstance(hint, str): hint = helpers._index_document(hint) - self._filter = filter self._doc = doc self._upsert = upsert @@ -272,8 +277,8 @@ class UpdateOne(_UpdateOp): __slots__ = () - def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None, hint=None): + def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Represents an update_one operation. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -319,8 +324,8 @@ class UpdateMany(_UpdateOp): __slots__ = () - def __init__(self, filter, update, upsert=False, collation=None, - array_filters=None, hint=None): + def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: """Create an UpdateMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -366,7 +371,7 @@ class IndexModel(object): __slots__ = ("__document",) - def __init__(self, keys, **kwargs): + def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. @@ -437,7 +442,7 @@ def __init__(self, keys, **kwargs): self.__document['collation'] = collation @property - def document(self): + def document(self) -> Dict[str, Any]: """An index document suitable for passing to the createIndexes command. """ diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index e1690ee9b1..36e094c4cb 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -17,6 +17,7 @@ import threading import time import weakref +from typing import Any, Optional class PeriodicExecutor(object): @@ -41,7 +42,7 @@ def __init__(self, interval, min_interval, target, name=None): self._min_interval = min_interval self._target = target self._stopped = False - self._thread = None + self._thread: Optional[threading.Thread] = None self._name = name self._skip_sleep = False @@ -52,7 +53,7 @@ def __repr__(self): return '<%s(name=%s) object at 0x%x>' % ( self.__class__.__name__, self._name, id(self)) - def open(self): + def open(self) -> None: """Start. Multiple calls have no effect. Not safe to call from multiple threads at once. @@ -64,13 +65,14 @@ def open(self): # join should not block indefinitely because there is no # other work done outside the while loop in self._run. try: + assert self._thread is not None self._thread.join() except ReferenceError: # Thread terminated. pass self._thread_will_exit = False self._stopped = False - started = False + started: Any = False try: started = self._thread and self._thread.is_alive() except ReferenceError: @@ -84,7 +86,7 @@ def open(self): _register_executor(self) thread.start() - def close(self, dummy=None): + def close(self, dummy: Any = None) -> None: """Stop. To restart, call open(). The dummy parameter allows an executor's close method to be a weakref @@ -92,7 +94,7 @@ def close(self, dummy=None): """ self._stopped = True - def join(self, timeout=None): + def join(self, timeout: Optional[int] = None) -> None: if self._thread is not None: try: self._thread.join(timeout) @@ -100,14 +102,14 @@ def join(self, timeout=None): # Thread already terminated, or not yet started. pass - def wake(self): + def wake(self) -> None: """Execute the target function soon.""" self._event = True - def update_interval(self, new_interval): + def update_interval(self, new_interval: int) -> None: self._interval = new_interval - def skip_sleep(self): + def skip_sleep(self) -> None: self._skip_sleep = True def __should_stop(self): diff --git a/pymongo/pool.py b/pymongo/pool.py index 70920d5b23..c53c9f4736 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -18,50 +18,38 @@ import ipaddress import os import platform -import ssl import socket +import ssl import sys import threading import time import weakref +from typing import Any from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON -from pymongo import auth, helpers, __version__ +from pymongo import __version__, auth, helpers from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (MAX_BSON_SIZE, - MAX_CONNECTING, - MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, - MAX_POOL_SIZE, - MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, - MIN_POOL_SIZE, - ORDERED_TYPES, +from pymongo.common import (MAX_BSON_SIZE, MAX_CONNECTING, MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, MAX_POOL_SIZE, MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, MIN_POOL_SIZE, ORDERED_TYPES, WAIT_QUEUE_TIMEOUT) -from pymongo.errors import (AutoReconnect, - _CertificateError, - ConnectionFailure, - ConfigurationError, - InvalidOperation, - DocumentTooLarge, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError) -from pymongo.hello import HelloCompat, Hello +from pymongo.errors import (AutoReconnect, ConfigurationError, + ConnectionFailure, DocumentTooLarge, + InvalidOperation, NetworkTimeout, NotPrimaryError, + OperationFailure, PyMongoError, _CertificateError) +from pymongo.hello import Hello, HelloCompat from pymongo.monitoring import (ConnectionCheckOutFailedReason, ConnectionClosedReason) -from pymongo.network import (command, - receive_message) +from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import ( - SSLError as _SSLError, - HAS_SNI as _HAVE_SNI, - IPADDR_SAFE as _IPADDR_SAFE) +from pymongo.ssl_support import HAS_SNI as _HAVE_SNI +from pymongo.ssl_support import IPADDR_SAFE as _IPADDR_SAFE +from pymongo.ssl_support import SSLError as _SSLError + # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are # not permitted for SNI hostname. @@ -73,7 +61,7 @@ def is_ip_address(address): return False try: - from fcntl import fcntl, F_GETFD, F_SETFD, FD_CLOEXEC + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl def _set_non_inheritable_non_atomic(fd): """Set the close-on-exec flag on the given file descriptor.""" flags = fcntl(fd, F_GETFD) @@ -82,7 +70,7 @@ def _set_non_inheritable_non_atomic(fd): # Windows, various platforms we don't claim to support # (Jython, IronPython, ...), systems that don't provide # everything we need from fcntl, etc. - def _set_non_inheritable_non_atomic(dummy): + def _set_non_inheritable_non_atomic(fd): """Dummy function for platforms that don't provide fcntl.""" pass @@ -145,7 +133,7 @@ def _set_keepalive_times(sock): _set_tcp_option(sock, 'TCP_KEEPINTVL', _MAX_TCP_KEEPINTVL) _set_tcp_option(sock, 'TCP_KEEPCNT', _MAX_TCP_KEEPCNT) -_METADATA = SON([ +_METADATA: SON[str, Any] = SON([ ('driver', SON([('name', 'PyMongo'), ('version', __version__)])), ]) @@ -205,7 +193,7 @@ def _set_keepalive_times(sock): if platform.python_implementation().startswith('PyPy'): _METADATA['platform'] = ' '.join( (platform.python_implementation(), - '.'.join(map(str, sys.pypy_version_info)), + '.'.join(map(str, sys.pypy_version_info)), # type: ignore '(Python %s)' % '.'.join(map(str, sys.version_info)))) elif sys.platform.startswith('java'): _METADATA['platform'] = ' '.join( @@ -688,7 +676,7 @@ def command(self, dbname, spec, session = _validate_session_write_concern(session, write_concern) # Ensure command name remains in first place. - if not isinstance(spec, ORDERED_TYPES): + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] spec = SON(spec) if not (write_concern is None or write_concern.acknowledged or @@ -1088,7 +1076,7 @@ def __init__(self, address, options, handshake=True): # LIFO pool. Sockets are ordered on idle time. Sockets claimed # and returned to pool from the left side. Stale sockets removed # from the right side. - self.sockets = collections.deque() + self.sockets: collections.deque = collections.deque() self.lock = threading.Lock() self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. @@ -1165,8 +1153,8 @@ def _reset(self, close, pause=True, service_id=None): if service_id is None: sockets, self.sockets = self.sockets, collections.deque() else: - discard = collections.deque() - keep = collections.deque() + discard: collections.deque = collections.deque() + keep: collections.deque = collections.deque() for sock_info in self.sockets: if sock_info.service_id == service_id: discard.append(sock_info) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index f7c53a59e5..c5a5f0936d 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -20,29 +20,28 @@ import ssl as _stdlibssl import sys as _sys import time as _time - from errno import EINTR as _EINTR - from ipaddress import ip_address as _ip_address -from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate -from OpenSSL import crypto as _crypto, SSL as _SSL -from service_identity.pyopenssl import ( - verify_hostname as _verify_hostname, - verify_ip_address as _verify_ip_address) +from cryptography.x509 import \ + load_der_x509_certificate as _load_der_x509_certificate +from OpenSSL import SSL as _SSL +from OpenSSL import crypto as _crypto from service_identity import ( - CertificateError as _SICertificateError, - VerificationError as _SIVerificationError) - -from pymongo.errors import ( - _CertificateError, - ConfigurationError as _ConfigurationError) -from pymongo.ocsp_support import ( - _load_trusted_ca_certs, - _ocsp_callback) + CertificateError as _SICertificateError +) +from service_identity import VerificationError as _SIVerificationError +from service_identity.pyopenssl import ( # + verify_hostname as _verify_hostname +) +from service_identity.pyopenssl import verify_ip_address as _verify_ip_address + +from pymongo.errors import ConfigurationError as _ConfigurationError +from pymongo.errors import _CertificateError from pymongo.ocsp_cache import _OCSPCache -from pymongo.socket_checker import ( - _errno_from_exception, SocketChecker as _SocketChecker) +from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback +from pymongo.socket_checker import SocketChecker as _SocketChecker +from pymongo.socket_checker import _errno_from_exception try: import certifi @@ -132,7 +131,7 @@ def recv(self, *args, **kwargs): def recv_into(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + return self._call(super(_sslConn, self).recv_into, *args, **kwargs) # type: ignore except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -147,7 +146,7 @@ def sendall(self, buf, flags=0): while total_sent < total_length: try: sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags) + super(_sslConn, self).send, view[total_sent:], flags) # type: ignore # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. @@ -296,7 +295,7 @@ def _load_wincerts(self, store): """Attempt to load CA certs from Windows trust store.""" cert_store = self._ctx.get_cert_store() oid = _stdlibssl.Purpose.SERVER_AUTH.oid - for cert, encoding, trust in _stdlibssl.enum_certificates(store): + for cert, encoding, trust in _stdlibssl.enum_certificates(store): # type: ignore if encoding == "x509_asn": if trust is True or oid in trust: cert_store.add_cert( diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index 7e9cc4485c..aaf67ef5a6 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -14,6 +14,8 @@ """Tools for working with read concerns.""" +from typing import Any, Dict, Optional + class ReadConcern(object): """ReadConcern @@ -29,7 +31,7 @@ class ReadConcern(object): """ - def __init__(self, level=None): + def __init__(self, level: Optional[str] = None) -> None: if level is None or isinstance(level, str): self.__level = level else: @@ -37,18 +39,18 @@ def __init__(self, level=None): 'level must be a string or None.') @property - def level(self): + def level(self) -> Optional[str]: """The read concern level.""" return self.__level @property - def ok_for_legacy(self): + def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with old wire protocol versions.""" return self.level is None or self.level == 'local' @property - def document(self): + def document(self) -> Dict[str, Any]: """The document representation of this read concern. .. note:: @@ -60,7 +62,7 @@ def document(self): doc['level'] = self.level return doc - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ReadConcern): return self.document == other.document return NotImplemented diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 2471d5834c..cc1317fb88 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -15,13 +15,13 @@ """Utilities for choosing which member of a replica set to read from.""" from collections import abc +from typing import Any, Dict, Mapping, Optional, Sequence from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError from pymongo.server_selectors import (member_with_tags_server_selector, secondary_with_tags_server_selector) - _PRIMARY = 0 _PRIMARY_PREFERRED = 1 _SECONDARY = 2 @@ -44,9 +44,9 @@ def _validate_tag_sets(tag_sets): if tag_sets is None: return tag_sets - if not isinstance(tag_sets, list): + if not isinstance(tag_sets, (list, tuple)): raise TypeError(( - "Tag sets %r invalid, must be a list") % (tag_sets,)) + "Tag sets %r invalid, must be a sequence") % (tag_sets,)) if len(tag_sets) == 0: raise ValueError(( "Tag sets %r invalid, must be None or contain at least one set of" @@ -59,7 +59,7 @@ def _validate_tag_sets(tag_sets): "bson.son.SON or other type that inherits from " "collection.Mapping" % (tags,)) - return tag_sets + return list(tag_sets) def _invalid_max_staleness_msg(max_staleness): @@ -93,6 +93,10 @@ def _validate_hedge(hedge): return hedge +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] + + class _ServerMode(object): """Base class for all read preferences. """ @@ -100,7 +104,7 @@ class _ServerMode(object): __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") - def __init__(self, mode, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, mode: int, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: self.__mongos_mode = _MONGOS_MODES[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) @@ -108,22 +112,22 @@ def __init__(self, mode, tag_sets=None, max_staleness=-1, hedge=None): self.__hedge = _validate_hedge(hedge) @property - def name(self): + def name(self) -> str: """The name of this read preference. """ return self.__class__.__name__ @property - def mongos_mode(self): + def mongos_mode(self) -> str: """The mongos mode of this read preference. """ return self.__mongos_mode @property - def document(self): + def document(self) -> Dict[str, Any]: """Read preference as a document. """ - doc = {'mode': self.__mongos_mode} + doc: Dict[str, Any] = {'mode': self.__mongos_mode} if self.__tag_sets not in (None, [{}]): doc['tags'] = self.__tag_sets if self.__max_staleness != -1: @@ -133,13 +137,13 @@ def document(self): return doc @property - def mode(self): + def mode(self) -> int: """The mode of this read preference instance. """ return self.__mode @property - def tag_sets(self): + def tag_sets(self) -> _TagSets: """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to read only from members whose ``dc`` tag has the value ``"ny"``. To specify a priority-order for tag sets, provide a list of @@ -154,14 +158,14 @@ def tag_sets(self): return list(self.__tag_sets) if self.__tag_sets else [{}] @property - def max_staleness(self): + def max_staleness(self) -> int: """The maximum estimated length of time (in seconds) a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations, or -1 for no maximum.""" return self.__max_staleness @property - def hedge(self): + def hedge(self) -> Optional[_Hedge]: """The read preference ``hedge`` parameter. A dictionary that configures how the server will perform hedged reads. @@ -185,7 +189,7 @@ def hedge(self): return self.__hedge @property - def min_wire_version(self): + def min_wire_version(self) -> int: """The wire protocol version the server must support. Some read preferences impose version requirements on all servers (e.g. @@ -201,7 +205,7 @@ def __repr__(self): return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( self.name, self.__tag_sets, self.__max_staleness, self.__hedge) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): return (self.mode == other.mode and self.tag_sets == other.tag_sets and @@ -209,7 +213,7 @@ def __eq__(self, other): self.hedge == other.hedge) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __getstate__(self): @@ -243,17 +247,17 @@ class Primary(_ServerMode): __slots__ = () - def __init__(self): + def __init__(self) -> None: super(Primary, self).__init__(_PRIMARY) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to a Selection.""" return selection.primary_selection def __repr__(self): return "Primary()" - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): return other.mode == _PRIMARY return NotImplemented @@ -289,11 +293,11 @@ class PrimaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(PrimaryPreferred, self).__init__( _PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" if selection.primary: return selection.primary_selection @@ -329,11 +333,11 @@ class Secondary(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(Secondary, self).__init__( _SECONDARY, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return secondary_with_tags_server_selector( self.tag_sets, @@ -370,11 +374,11 @@ class SecondaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(SecondaryPreferred, self).__init__( _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" secondaries = secondary_with_tags_server_selector( self.tag_sets, @@ -412,11 +416,11 @@ class Nearest(_ServerMode): __slots__ = () - def __init__(self, tag_sets=None, max_staleness=-1, hedge=None): + def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: super(Nearest, self).__init__( _NEAREST, tag_sets, max_staleness, hedge) - def __call__(self, selection): + def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return member_with_tags_server_selector( self.tag_sets, @@ -467,7 +471,7 @@ def __getattr__(self, name): Secondary, SecondaryPreferred, Nearest) -def make_read_preference(mode, tag_sets, max_staleness=-1): +def make_read_preference(mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): raise ConfigurationError("Read preference primary " @@ -476,7 +480,7 @@ def make_read_preference(mode, tag_sets, max_staleness=-1): raise ConfigurationError("Read preference primary cannot be " "combined with maxStalenessSeconds") return Primary() - return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) + return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore _MODES = ( @@ -545,7 +549,7 @@ class ReadPreference(object): NEAREST = Nearest() -def read_pref_mode_from_name(name): +def read_pref_mode_from_name(name: str) -> int: """Get the read preference mode from mongos/uri name. """ return _MONGOS_MODES.index(name) @@ -553,10 +557,12 @@ def read_pref_mode_from_name(name): class MovingAverage(object): """Tracks an exponentially-weighted moving average.""" - def __init__(self): + average: Optional[float] + + def __init__(self) -> None: self.average = None - def add_sample(self, sample): + def add_sample(self, sample: float) -> None: if sample < 0: # Likely system time change while waiting for hello response # and not using time.monotonic. Ignore it, the next one will @@ -569,9 +575,9 @@ def add_sample(self, sample): # average with alpha = 0.2. self.average = 0.8 * self.average + 0.2 * sample - def get(self): + def get(self) -> Optional[float]: """Get the calculated average, or None if no samples yet.""" return self.average - def reset(self): + def reset(self) -> None: self.average = None diff --git a/pymongo/results.py b/pymongo/results.py index 0374803249..637bf73b0f 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -13,6 +13,7 @@ # limitations under the License. """Result class definitions.""" +from typing import Any, Dict, List, Mapping, Optional, Sequence, cast from pymongo.errors import InvalidOperation @@ -22,7 +23,7 @@ class _WriteResult(object): __slots__ = ("__acknowledged",) - def __init__(self, acknowledged): + def __init__(self, acknowledged: bool) -> None: self.__acknowledged = acknowledged def _raise_if_unacknowledged(self, property_name): @@ -34,7 +35,7 @@ def _raise_if_unacknowledged(self, property_name): "error." % (property_name,)) @property - def acknowledged(self): + def acknowledged(self) -> bool: """Is this the result of an acknowledged write operation? The :attr:`acknowledged` attribute will be ``False`` when using @@ -59,12 +60,12 @@ class InsertOneResult(_WriteResult): __slots__ = ("__inserted_id", "__acknowledged") - def __init__(self, inserted_id, acknowledged): + def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id super(InsertOneResult, self).__init__(acknowledged) @property - def inserted_id(self): + def inserted_id(self) -> Any: """The inserted document's _id.""" return self.__inserted_id @@ -75,12 +76,12 @@ class InsertManyResult(_WriteResult): __slots__ = ("__inserted_ids", "__acknowledged") - def __init__(self, inserted_ids, acknowledged): + def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids super(InsertManyResult, self).__init__(acknowledged) @property - def inserted_ids(self): + def inserted_ids(self) -> List: """A list of _ids of the inserted documents, in the order provided. .. note:: If ``False`` is passed for the `ordered` parameter to @@ -99,17 +100,17 @@ class UpdateResult(_WriteResult): __slots__ = ("__raw_result", "__acknowledged") - def __init__(self, raw_result, acknowledged): + def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result super(UpdateResult, self).__init__(acknowledged) @property - def raw_result(self): + def raw_result(self) -> Dict[str, Any]: """The raw result document returned by the server.""" return self.__raw_result @property - def matched_count(self): + def matched_count(self) -> int: """The number of documents matched for this update.""" self._raise_if_unacknowledged("matched_count") if self.upserted_id is not None: @@ -117,13 +118,13 @@ def matched_count(self): return self.__raw_result.get("n", 0) @property - def modified_count(self): + def modified_count(self) -> int: """The number of documents modified. """ self._raise_if_unacknowledged("modified_count") - return self.__raw_result.get("nModified") + return cast(int, self.__raw_result.get("nModified")) @property - def upserted_id(self): + def upserted_id(self) -> Any: """The _id of the inserted document if an upsert took place. Otherwise ``None``. """ @@ -137,17 +138,17 @@ class DeleteResult(_WriteResult): __slots__ = ("__raw_result", "__acknowledged") - def __init__(self, raw_result, acknowledged): + def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result super(DeleteResult, self).__init__(acknowledged) @property - def raw_result(self): + def raw_result(self) -> Dict[str, Any]: """The raw result document returned by the server.""" return self.__raw_result @property - def deleted_count(self): + def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") return self.__raw_result.get("n", 0) @@ -158,7 +159,7 @@ class BulkWriteResult(_WriteResult): __slots__ = ("__bulk_api_result", "__acknowledged") - def __init__(self, bulk_api_result, acknowledged): + def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: """Create a BulkWriteResult instance. :Parameters: @@ -171,44 +172,45 @@ def __init__(self, bulk_api_result, acknowledged): super(BulkWriteResult, self).__init__(acknowledged) @property - def bulk_api_result(self): + def bulk_api_result(self) -> Dict[str, Any]: """The raw bulk API result.""" return self.__bulk_api_result @property - def inserted_count(self): + def inserted_count(self) -> int: """The number of documents inserted.""" self._raise_if_unacknowledged("inserted_count") - return self.__bulk_api_result.get("nInserted") + return cast(int, self.__bulk_api_result.get("nInserted")) @property - def matched_count(self): + def matched_count(self) -> int: """The number of documents matched for an update.""" self._raise_if_unacknowledged("matched_count") - return self.__bulk_api_result.get("nMatched") + return cast(int, self.__bulk_api_result.get("nMatched")) @property - def modified_count(self): + def modified_count(self) -> int: """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") - return self.__bulk_api_result.get("nModified") + return cast(int, self.__bulk_api_result.get("nModified")) @property - def deleted_count(self): + def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") - return self.__bulk_api_result.get("nRemoved") + return cast(int, self.__bulk_api_result.get("nRemoved")) @property - def upserted_count(self): + def upserted_count(self) -> int: """The number of documents upserted.""" self._raise_if_unacknowledged("upserted_count") - return self.__bulk_api_result.get("nUpserted") + return cast(int, self.__bulk_api_result.get("nUpserted")) @property - def upserted_ids(self): + def upserted_ids(self) -> Optional[Dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: return dict((upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"]) + return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 08a780c055..99445b06f0 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -13,13 +13,13 @@ # limitations under the License. """An implementation of RFC4013 SASLprep.""" - +from typing import Any, Optional try: import stringprep except ImportError: HAVE_STRINGPREP = False - def saslprep(data): + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """SASLprep dummy""" if isinstance(data, str): raise TypeError( @@ -29,6 +29,7 @@ def saslprep(data): else: HAVE_STRINGPREP = True import unicodedata + # RFC4013 section 2.3 prohibited output. _PROHIBITED = ( # A strict reading of RFC 4013 requires table c12 here, but @@ -44,7 +45,7 @@ def saslprep(data): stringprep.in_table_c8, stringprep.in_table_c9) - def saslprep(data, prohibit_unassigned_code_points=True): + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """An implementation of RFC4013 SASLprep. :Parameters: @@ -60,6 +61,8 @@ def saslprep(data, prohibit_unassigned_code_points=True): :Returns: The SASLprep'ed version of `data`. """ + prohibited: Any + if not isinstance(data, str): return data diff --git a/pymongo/server.py b/pymongo/server.py index cb9442d000..74093b05ed 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -17,11 +17,10 @@ from datetime import datetime from bson import _decode_all_selective - from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers import _check_command_response from pymongo.message import _convert_exception, _OpMsg -from pymongo.response import Response, PinnedResponse +from pymongo.response import PinnedResponse, Response from pymongo.server_type import SERVER_TYPE _CURSOR_DOC_FIELDS = {'cursor': {'firstBatch': 1, 'nextBatch': 1}} @@ -59,6 +58,8 @@ def close(self): Reconnect with open(). """ if self._publish: + assert self._listener is not None + assert self._events is not None self._events.put((self._listener.publish_server_closed, (self._description.address, self._topology_id))) self._monitor.close() @@ -169,6 +170,8 @@ def run_operation(self, sock_info, operation, read_preference, listeners, docs = _decode_all_selective( decrypted, operation.codec_options, user_fields) + response: Response + if client._should_pin_cursor(operation.session) or operation.exhaust: sock_info.pin_cursor() if isinstance(reply, _OpMsg): diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 2cbf6d63cd..0a9b799165 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -15,10 +15,13 @@ """Represent one server the driver is connected to.""" import time +from typing import Any, Dict, Mapping, Optional, Set, Tuple, cast from bson import EPOCH_NAIVE -from pymongo.server_type import SERVER_TYPE +from bson.objectid import ObjectId from pymongo.hello import Hello +from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _Address class ServerDescription(object): @@ -41,11 +44,12 @@ class ServerDescription(object): '_topology_version') def __init__( - self, - address, - hello=None, - round_trip_time=None, - error=None): + self, + address: _Address, + hello: Optional[Hello] = None, + round_trip_time: Optional[float] = None, + error: Optional[Exception] = None, + ) -> None: self._address = address if not hello: hello = Hello({}) @@ -72,9 +76,11 @@ def __init__( self._error = error self._topology_version = hello.topology_version if error: - if hasattr(error, 'details') and isinstance(error.details, dict): - self._topology_version = error.details.get('topologyVersion') + details = getattr(error, 'details', None) + if isinstance(details, dict): + self._topology_version = details.get('topologyVersion') + self._last_write_date: Optional[float] if hello.last_write_date: # Convert from datetime to seconds. delta = hello.last_write_date - EPOCH_NAIVE @@ -83,17 +89,17 @@ def __init__( self._last_write_date = None @property - def address(self): + def address(self) -> _Address: """The address (host, port) of this server.""" return self._address @property - def server_type(self): + def server_type(self) -> int: """The type of this server.""" return self._server_type @property - def server_type_name(self): + def server_type_name(self) -> str: """The server type as a human readable string. .. versionadded:: 3.4 @@ -101,78 +107,78 @@ def server_type_name(self): return SERVER_TYPE._fields[self._server_type] @property - def all_hosts(self): + def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return self._all_hosts @property - def tags(self): + def tags(self) -> Mapping[str, Any]: return self._tags @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self._replica_set_name @property - def primary(self): + def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" return self._primary @property - def max_bson_size(self): + def max_bson_size(self) -> int: return self._max_bson_size @property - def max_message_size(self): + def max_message_size(self) -> int: return self._max_message_size @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: return self._max_write_batch_size @property - def min_wire_version(self): + def min_wire_version(self) -> int: return self._min_wire_version @property - def max_wire_version(self): + def max_wire_version(self) -> int: return self._max_wire_version @property - def set_version(self): + def set_version(self) -> Optional[int]: return self._set_version @property - def election_id(self): + def election_id(self) -> Optional[ObjectId]: return self._election_id @property - def cluster_time(self): + def cluster_time(self)-> Optional[Mapping[str, Any]]: return self._cluster_time @property - def election_tuple(self): + def election_tuple(self) -> Tuple[Optional[int], Optional[ObjectId]]: return self._set_version, self._election_id @property - def me(self): + def me(self) -> Optional[Tuple[str, int]]: return self._me @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: return self._ls_timeout_minutes @property - def last_write_date(self): + def last_write_date(self) -> Optional[float]: return self._last_write_date @property - def last_update_time(self): + def last_update_time(self) -> float: return self._last_update_time @property - def round_trip_time(self): + def round_trip_time(self) -> Optional[float]: """The current average latency or None.""" # This override is for unittesting only! if self._address in self._host_to_round_trip_time: @@ -181,28 +187,28 @@ def round_trip_time(self): return self._round_trip_time @property - def error(self): + def error(self) -> Optional[Exception]: """The last error attempting to connect to the server, or None.""" return self._error @property - def is_writable(self): + def is_writable(self) -> bool: return self._is_writable @property - def is_readable(self): + def is_readable(self) -> bool: return self._is_readable @property - def mongos(self): + def mongos(self) -> bool: return self._server_type == SERVER_TYPE.Mongos @property - def is_server_type_known(self): + def is_server_type_known(self) -> bool: return self.server_type != SERVER_TYPE.Unknown @property - def retryable_writes_supported(self): + def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" return (( self._ls_timeout_minutes is not None and @@ -210,20 +216,20 @@ def retryable_writes_supported(self): or self._server_type == SERVER_TYPE.LoadBalancer) @property - def retryable_reads_supported(self): + def retryable_reads_supported(self) -> bool: """Checks if this server supports retryable writes.""" return self._max_wire_version >= 6 @property - def topology_version(self): + def topology_version(self) -> Optional[Mapping[str, Any]]: return self._topology_version - def to_unknown(self, error=None): + def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": unknown = ServerDescription(self.address, error=error) unknown._topology_version = self.topology_version return unknown - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, ServerDescription): return ((self._address == other.address) and (self._server_type == other.server_type) and @@ -242,7 +248,7 @@ def __eq__(self, other): return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): @@ -254,4 +260,4 @@ def __repr__(self): self.round_trip_time, errmsg) # For unittesting only. Use under no circumstances! - _host_to_round_trip_time = {} + _host_to_round_trip_time: Dict = {} diff --git a/pymongo/server_type.py b/pymongo/server_type.py index 101f9dba4c..ee53b6b97d 100644 --- a/pymongo/server_type.py +++ b/pymongo/server_type.py @@ -14,10 +14,19 @@ """Type codes for MongoDB servers.""" -from collections import namedtuple +from typing import NamedTuple -SERVER_TYPE = namedtuple('ServerType', - ['Unknown', 'Mongos', 'RSPrimary', 'RSSecondary', - 'RSArbiter', 'RSOther', 'RSGhost', - 'Standalone', 'LoadBalancer'])(*range(9)) +class _ServerType(NamedTuple): + Unknown: int + Mongos: int + RSPrimary: int + RSSecondary: int + RSArbiter: int + RSOther: int + RSGhost: int + Standalone: int + LoadBalancer: int + + +SERVER_TYPE = _ServerType(*range(9)) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 48f168be48..9eb3d5f084 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -16,7 +16,9 @@ import errno import select +import socket import sys +from typing import Any, Optional # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 @@ -34,17 +36,19 @@ def _errno_from_exception(exc): class SocketChecker(object): - def __init__(self): + def __init__(self) -> None: + self._poller: Optional[select.poll] if _HAVE_POLL: self._poller = select.poll() else: self._poller = None - def select(self, sock, read=False, write=False, timeout=0): + def select(self, sock: Any, read: bool = False, write: bool = False, timeout: int = 0) -> bool: """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. """ + res: Any while True: try: if self._poller: @@ -74,12 +78,12 @@ def select(self, sock, read=False, write=False, timeout=0): # ready: subsets of the first three arguments. Return # True if any of the lists are not empty. return any(res) - except (_SelectError, IOError) as exc: + except (_SelectError, IOError) as exc: # type: ignore if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue raise - def socket_closed(self, sock): + def socket_closed(self, sock: Any) -> bool: """Return True if we know socket has been closed, False otherwise. """ try: diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 69e075aec4..d9ee7b7c8a 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -26,6 +26,7 @@ from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError + # dnspython can return bytes or str from various parts # of its API depending on version. We always want str. def maybe_decode(text): @@ -38,7 +39,7 @@ def maybe_decode(text): def _resolve(*args, **kwargs): if hasattr(resolver, 'resolve'): # dnspython >= 2 - return resolver.resolve(*args, **kwargs) + return resolver.resolve(*args, **kwargs) # type: ignore # dnspython 1.X return resolver.query(*args, **kwargs) diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 2f35676f87..e546105141 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -32,6 +32,7 @@ SSLError = _ssl.SSLError from ssl import SSLContext + if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): from ssl import VERIFY_CRL_CHECK_LEAF # Python 3.7 uses OpenSSL's hostname matching implementation diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 5826f95801..b3428197b7 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -24,7 +24,7 @@ import pymongo.pyopenssl_context as _ssl except ImportError: try: - import pymongo.ssl_context as _ssl + import pymongo.ssl_context as _ssl # type: ignore[no-redef] except ImportError: HAVE_SSL = False @@ -74,7 +74,7 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, raise ConfigurationError( "tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - ctx.verify_flags = getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0) + setattr(ctx, 'verify_flags', getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) @@ -83,11 +83,11 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, ctx.verify_mode = verify_mode return ctx else: - class SSLError(Exception): + class SSLError(Exception): # type: ignore pass HAS_SNI = False IPADDR_SAFE = False - def get_ssl_context(*dummy): + def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" raise ConfigurationError("The ssl module is not available.") diff --git a/pymongo/topology.py b/pymongo/topology.py index 021a1dee60..b2d31ed314 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -21,35 +21,27 @@ import time import warnings import weakref +from typing import Any -from pymongo import (common, - helpers, - periodic_executor) +from pymongo import common, helpers, periodic_executor from pymongo.client_session import _ServerSessionPool -from pymongo.errors import (ConnectionFailure, - ConfigurationError, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError, - WriteError, - InvalidOperation) +from pymongo.errors import (ConfigurationError, ConnectionFailure, + InvalidOperation, NetworkTimeout, NotPrimaryError, + OperationFailure, PyMongoError, + ServerSelectionTimeoutError, WriteError) from pymongo.hello import Hello from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (any_server_selector, +from pymongo.server_selectors import (Selection, any_server_selector, arbiter_server_selector, - secondary_server_selector, readable_server_selector, - writable_server_selector, - Selection) -from pymongo.topology_description import (updated_topology_description, - _updated_topology_description_srv_polling, - TopologyDescription, - SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE) + secondary_server_selector, + writable_server_selector) +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE, TopologyDescription, + _updated_topology_description_srv_polling, updated_topology_description) def process_events_queue(queue_ref): @@ -80,12 +72,13 @@ def __init__(self, topology_settings): # Create events queue if there are publishers. self._events = None - self.__events_executor = None + self.__events_executor: Any = None if self._publish_server or self._publish_tp: self._events = queue.Queue(maxsize=100) if self._publish_tp: + assert self._events is not None self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) self._settings = topology_settings @@ -99,6 +92,7 @@ def __init__(self, topology_settings): self._description = topology_description if self._publish_tp: + assert self._events is not None initial_td = TopologyDescription(TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings) self._events.put(( @@ -107,6 +101,7 @@ def __init__(self, topology_settings): for seed in topology_settings.seeds: if self._publish_server: + assert self._events is not None self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) @@ -296,6 +291,7 @@ def _process_change(self, server_description, reset_pool=False): suppress_event = ((self._publish_server or self._publish_tp) and sd_old == server_description) if self._publish_server and not suppress_event: + assert self._events is not None self._events.put(( self._listeners.publish_server_description_changed, (sd_old, server_description, @@ -306,6 +302,7 @@ def _process_change(self, server_description, reset_pool=False): self._receive_cluster_time_no_lock(server_description.cluster_time) if self._publish_tp and not suppress_event: + assert self._events is not None self._events.put(( self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) @@ -354,6 +351,7 @@ def _process_srv_update(self, seedlist): self._update_servers() if self._publish_tp: + assert self._events is not None self._events.put(( self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) @@ -485,6 +483,7 @@ def close(self): # Publish only after releasing the lock. if self._publish_tp: + assert self._events is not None self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) if self._publish_server or self._publish_tp: diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index c13d00a64c..241ef5afbe 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -14,34 +14,48 @@ """Represent a deployment of MongoDB servers.""" -from collections import namedtuple from random import sample +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference, _AggWritePref +from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE +from pymongo.typings import _Address # Enumeration for various kinds of MongoDB cluster topologies. -TOPOLOGY_TYPE = namedtuple('TopologyType', [ - 'Single', 'ReplicaSetNoPrimary', 'ReplicaSetWithPrimary', 'Sharded', - 'Unknown', 'LoadBalanced'])(*range(6)) +class _TopologyType(NamedTuple): + Single: int + ReplicaSetNoPrimary: int + ReplicaSetWithPrimary: int + Sharded: int + Unknown: int + LoadBalanced: int + + +TOPOLOGY_TYPE = _TopologyType(*range(6)) # Topologies compatible with SRV record polling. -SRV_POLLING_TOPOLOGIES = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) +SRV_POLLING_TOPOLOGIES: Tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) + + +_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] class TopologyDescription(object): - def __init__(self, - topology_type, - server_descriptions, - replica_set_name, - max_set_version, - max_election_id, - topology_settings): + def __init__( + self, + topology_type: int, + server_descriptions: Dict[_Address, ServerDescription], + replica_set_name: Optional[str], + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], + topology_settings: Any, + ) -> None: """Representation of a deployment of MongoDB servers. :Parameters: @@ -81,7 +95,7 @@ def __init__(self, for s in readable_servers): self._ls_timeout_minutes = None else: - self._ls_timeout_minutes = min(s.logical_session_timeout_minutes + self._ls_timeout_minutes = min(s.logical_session_timeout_minutes # type: ignore for s in readable_servers) def _init_incompatible_err(self): @@ -104,23 +118,23 @@ def _init_incompatible_err(self): if server_too_new: self._incompatible_err = ( - "Server at %s:%d requires wire version %d, but this " + "Server at %s:%d requires wire version %d, but this " # type: ignore "version of PyMongo only supports up to %d." - % (s.address[0], s.address[1], + % (s.address[0], s.address[1] or 0, s.min_wire_version, common.MAX_SUPPORTED_WIRE_VERSION)) elif server_too_old: self._incompatible_err = ( - "Server at %s:%d reports wire version %d, but this " + "Server at %s:%d reports wire version %d, but this " # type: ignore "version of PyMongo requires at least %d (MongoDB %s)." - % (s.address[0], s.address[1], + % (s.address[0], s.address[1] or 0, s.max_wire_version, common.MIN_SUPPORTED_WIRE_VERSION, common.MIN_SUPPORTED_SERVER_VERSION)) break - def check_compatible(self): + def check_compatible(self) -> None: """Raise ConfigurationError if any server is incompatible. A server is incompatible if its wire protocol version range does not @@ -129,15 +143,15 @@ def check_compatible(self): if self._incompatible_err: raise ConfigurationError(self._incompatible_err) - def has_server(self, address): + def has_server(self, address: _Address) -> bool: return address in self._server_descriptions - def reset_server(self, address): + def reset_server(self, address: _Address) -> "TopologyDescription": """A copy of this description, with one server marked Unknown.""" unknown_sd = self._server_descriptions[address].to_unknown() return updated_topology_description(self, unknown_sd) - def reset(self): + def reset(self) -> "TopologyDescription": """A copy of this description, with all servers marked Unknown.""" if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary @@ -156,18 +170,18 @@ def reset(self): self._max_election_id, self._topology_settings) - def server_descriptions(self): + def server_descriptions(self) -> Dict[_Address, ServerDescription]: """Dict of (address, :class:`~pymongo.server_description.ServerDescription`).""" return self._server_descriptions.copy() @property - def topology_type(self): + def topology_type(self) -> int: """The type of this topology.""" return self._topology_type @property - def topology_type_name(self): + def topology_type_name(self) -> str: """The topology type as a human readable string. .. versionadded:: 3.4 @@ -175,44 +189,44 @@ def topology_type_name(self): return TOPOLOGY_TYPE._fields[self._topology_type] @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """The replica set name.""" return self._replica_set_name @property - def max_set_version(self): + def max_set_version(self) -> Optional[int]: """Greatest setVersion seen from a primary, or None.""" return self._max_set_version @property - def max_election_id(self): + def max_election_id(self) -> Optional[ObjectId]: """Greatest electionId seen from a primary, or None.""" return self._max_election_id @property - def logical_session_timeout_minutes(self): + def logical_session_timeout_minutes(self) -> Optional[int]: """Minimum logical session timeout, or None.""" return self._ls_timeout_minutes @property - def known_servers(self): + def known_servers(self) -> List[ServerDescription]: """List of Servers of types besides Unknown.""" return [s for s in self._server_descriptions.values() if s.is_server_type_known] @property - def has_known_servers(self): + def has_known_servers(self) -> bool: """Whether there are any Servers of types besides Unknown.""" return any(s for s in self._server_descriptions.values() if s.is_server_type_known) @property - def readable_servers(self): + def readable_servers(self) -> List[ServerDescription]: """List of readable Servers.""" return [s for s in self._server_descriptions.values() if s.is_readable] @property - def common_wire_version(self): + def common_wire_version(self) -> Optional[int]: """Minimum of all servers' max wire versions, or None.""" servers = self.known_servers if servers: @@ -221,11 +235,11 @@ def common_wire_version(self): return None @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self._topology_settings.heartbeat_frequency @property - def srv_max_hosts(self): + def srv_max_hosts(self) -> int: return self._topology_settings._srv_max_hosts def _apply_local_threshold(self, selection): @@ -238,7 +252,12 @@ def _apply_local_threshold(self, selection): return [s for s in selection.server_descriptions if (s.round_trip_time - fastest) <= threshold] - def apply_selector(self, selector, address=None, custom_selector=None): + def apply_selector( + self, + selector: Any, + address: Optional[_Address] = None, + custom_selector: Optional[_ServerSelector] = None + ) -> List[ServerDescription]: """List of servers matching the provided selector(s). :Parameters: @@ -288,7 +307,7 @@ def apply_selector(self, selector, address=None, custom_selector=None): custom_selector(selection.server_descriptions)) return self._apply_local_threshold(selection) - def has_readable_server(self, read_preference=ReadPreference.PRIMARY): + def has_readable_server(self, read_preference: _ServerMode =ReadPreference.PRIMARY) -> bool: """Does this topology have any readable servers available matching the given read preference? @@ -305,7 +324,7 @@ def has_readable_server(self, read_preference=ReadPreference.PRIMARY): common.validate_read_preference("read_preference", read_preference) return any(self.apply_selector(read_preference)) - def has_writable_server(self): + def has_writable_server(self) -> bool: """Does this topology have a writable server available? .. note:: When connected directly to a single server this method @@ -336,7 +355,9 @@ def __repr__(self): } -def updated_topology_description(topology_description, server_description): +def updated_topology_description( + topology_description: TopologyDescription, server_description: ServerDescription +) -> "TopologyDescription": """Return an updated copy of a TopologyDescription. :Parameters: diff --git a/pymongo/typings.py b/pymongo/typings.py new file mode 100644 index 0000000000..ae5aec3213 --- /dev/null +++ b/pymongo/typings.py @@ -0,0 +1,29 @@ +# Copyright 2022-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by PyMongo""" +from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, MutableMapping, Optional, + Tuple, Type, TypeVar, Union) + +if TYPE_CHECKING: + from bson.raw_bson import RawBSONDocument + from pymongo.collation import Collation + + +# Common Shared Types. +_Address = Tuple[str, Optional[int]] +_CollationIn = Union[Mapping[str, Any], "Collation"] +_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] +_Pipeline = List[Mapping[str, Any]] +_DocumentType = TypeVar('_DocumentType', Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any]) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 8c43d51770..c213f4217c 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -15,20 +15,19 @@ """Tools to parse and validate a MongoDB URI.""" import re -import warnings import sys - +import warnings +from typing import (Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, + Union, cast) from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options -from pymongo.common import ( - SRV_SERVICE_NAME, - get_validated_options, INTERNAL_URI_OPTION_NAME_MAP, - URI_OPTIONS_DEPRECATION_MAP, _CaseInsensitiveDictionary) +from pymongo.common import (INTERNAL_URI_OPTION_NAME_MAP, SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, get_validated_options) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver - SCHEME = 'mongodb://' SCHEME_LEN = len(SCHEME) SRV_SCHEME = 'mongodb+srv://' @@ -52,7 +51,7 @@ def _unquoted_percent(s): return True return False -def parse_userinfo(userinfo): +def parse_userinfo(userinfo: str) -> Tuple[str, str]: """Validates the format of user information in a MongoDB URI. Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", "]", "@") as per RFC 3986 must be escaped. @@ -76,7 +75,7 @@ def parse_userinfo(userinfo): return unquote_plus(user), unquote_plus(passwd) -def parse_ipv6_literal_host(entity, default_port): +def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> Tuple[str, Optional[Union[str, int]]]: """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where @@ -98,7 +97,7 @@ def parse_ipv6_literal_host(entity, default_port): return entity[1: i], entity[i + 2:] -def parse_host(entity, default_port=DEFAULT_PORT): +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple[str, Optional[int]]: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -111,7 +110,7 @@ def parse_host(entity, default_port=DEFAULT_PORT): specified in entity. """ host = entity - port = default_port + port: Optional[Union[str, int]] = default_port if entity[0] == '[': host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): @@ -279,7 +278,7 @@ def _normalize_options(options): return options -def validate_options(opts, warn=False): +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: """Validates and normalizes options passed in a MongoDB URI. Returns a new dictionary of validated and normalized options. If warn is @@ -295,7 +294,7 @@ def validate_options(opts, warn=False): return get_validated_options(opts, warn) -def split_options(opts, validate=True, warn=False, normalize=True): +def split_options(opts: str, validate: bool = True, warn: bool = False, normalize: bool = True) -> MutableMapping[str, Any]: """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. @@ -340,7 +339,7 @@ def split_options(opts, validate=True, warn=False, normalize=True): return options -def split_hosts(hosts, default_port=DEFAULT_PORT): +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[Tuple[str, Optional[int]]]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. @@ -393,9 +392,16 @@ def _check_options(nodes, options): 'Cannot specify replicaSet with loadBalanced=true') -def parse_uri(uri, default_port=DEFAULT_PORT, validate=True, warn=False, - normalize=True, connect_timeout=None, srv_service_name=None, - srv_max_hosts=None): +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None +) -> Dict[str, Any]: """Parse and validate a MongoDB URI. Returns a dict of the form:: diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 2075240f0a..5168948ee3 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -14,6 +14,8 @@ """Tools for working with write concerns.""" +from typing import Any, Dict, Optional, Union + from pymongo.errors import ConfigurationError @@ -45,8 +47,8 @@ class WriteConcern(object): __slots__ = ("__document", "__acknowledged", "__server_default") - def __init__(self, w=None, wtimeout=None, j=None, fsync=None): - self.__document = {} + def __init__(self, w: Optional[Union[int, str]] = None, wtimeout: Optional[int] = None, j: Optional[bool] = None, fsync: Optional[bool] = None) -> None: + self.__document: Dict[str, Any] = {} self.__acknowledged = True if wtimeout is not None: @@ -84,12 +86,12 @@ def __init__(self, w=None, wtimeout=None, j=None, fsync=None): self.__server_default = not self.__document @property - def is_server_default(self): + def is_server_default(self) -> bool: """Does this WriteConcern match the server default.""" return self.__server_default @property - def document(self): + def document(self) -> Dict[str, Any]: """The document representation of this write concern. .. note:: @@ -99,7 +101,7 @@ def document(self): return self.__document.copy() @property - def acknowledged(self): + def acknowledged(self) -> bool: """If ``True`` write operations will wait for acknowledgement before returning. """ @@ -109,12 +111,12 @@ def __repr__(self): return ("WriteConcern(%s)" % ( ", ".join("%s=%s" % kvt for kvt in self.__document.items()),)) - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): return self.__document == other.document return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: if isinstance(other, WriteConcern): return self.__document != other.document return NotImplemented diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index b752453f13..84c6baf60d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -25,7 +25,7 @@ try: import simplejson as json except ImportError: - import json # type: ignore + import json # type: ignore[no-redef] sys.path[0:0] = [""] diff --git a/test/test_cursor.py b/test/test_cursor.py index 8bea12228d..0b8ba049c2 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -889,7 +889,7 @@ def test_clone(self): # Every attribute should be the same. cursor2 = cursor.clone() - self.assertEqual(cursor.__dict__, cursor2.__dict__) + self.assertDictEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) diff --git a/test/test_grid_file.py b/test/test_grid_file.py index a53e40c4c9..6d7cc7ba3b 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -238,7 +238,7 @@ def test_grid_out_cursor_options(self): cursor_dict.pop('_Cursor__session') cursor_clone_dict = cursor_clone.__dict__.copy() cursor_clone_dict.pop('_Cursor__session') - self.assertEqual(cursor_dict, cursor_clone_dict) + self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) self.assertRaises(NotImplementedError, cursor.remove_option, 0) diff --git a/tools/clean.py b/tools/clean.py index 55896781a4..53729d6406 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -33,7 +33,7 @@ pass try: - from pymongo import _cmessage # type: ignore + from pymongo import _cmessage # type: ignore[attr-defined] sys.exit("could still import _cmessage") except ImportError: pass From 51691246e9b2ef8446f3716c9ba7bab1a9f4e1ad Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 3 Feb 2022 15:25:14 -0800 Subject: [PATCH 0575/2111] PYTHON-2858 Use OP_MSG to authenticate if server supports OP_MSG (#843) --- test/mockupdb/test_handshake.py | 39 ++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 34028a637f..c15aaff9b8 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -50,7 +50,7 @@ def respond(r): appname='my app', # For _check_handshake_data() **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()])) - + self.addCleanup(client.close) # We have an autoresponder luckily, so no need for `go()`. @@ -217,5 +217,42 @@ def test_handshake_not_either(self): with self.assertRaisesRegex(AssertionError, "does not match"): test_hello_with_option(self, OpMsg) + def test_handshake_max_wire(self): + server = MockupDB() + primary_response = {"hello": 1, "ok": 1, + "minWireVersion": 0, "maxWireVersion": 6} + self.found_auth_msg = False + + def responder(request): + if request.matches(OpMsg, saslStart=1): + self.found_auth_msg = True + # Immediately closes the connection with + # OperationFailure: Server returned an invalid nonce. + request.reply(OpMsgReply(**primary_response, + **{'payload': + b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' + b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' + b'tXdF9r,' + b's=4dcxugMJq2P4hQaDbGXZR8uR3ei' + b'PHrSmh4uhkg==,i=15000', + "saslSupportedMechs": [ + "SCRAM-SHA-1"]})) + else: + return request.reply(**primary_response) + + server.autoresponds(responder) + self.addCleanup(server.stop) + server.run() + client = MongoClient(server.uri, + username='username', + password='password', + ) + self.addCleanup(client.close) + self.assertRaises(OperationFailure, client.db.collection.find_one, + {"a": 1}) + self.assertTrue(self.found_auth_msg, "Could not find authentication " + "command with correct protocol") + + if __name__ == '__main__': unittest.main() From 561ee7cf77fcbdefb9e2f46691f2b2ba4c65198b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Feb 2022 17:33:16 -0800 Subject: [PATCH 0576/2111] PYTHON-3110 Remove use of example.com in CSFLE tests (#848) --- test/test_encryption.py | 51 ++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 8e47d44525..af4165f1d1 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1205,8 +1205,8 @@ def setUp(self): kms_tls_options=KMS_TLS_OPTS) kms_providers_invalid = copy.deepcopy(kms_providers) - kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'example.com:443' - kms_providers_invalid['gcp']['endpoint'] = 'example.com:443' + kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'doesnotexist.invalid:443' + kms_providers_invalid['gcp']['endpoint'] = 'doesnotexist.invalid:443' kms_providers_invalid['kmip']['endpoint'] = 'doesnotexist.local:5698' self.client_encryption_invalid = ClientEncryption( kms_providers=kms_providers_invalid, @@ -1214,7 +1214,8 @@ def setUp(self): key_vault_client=client_context.client, codec_options=OPTS, kms_tls_options=KMS_TLS_OPTS) - self._kmip_host_error = '' + self._kmip_host_error = None + self._invalid_host_error = None def tearDown(self): self.client_encryption.close() @@ -1295,9 +1296,9 @@ def test_06_aws_endpoint_invalid_host(self): "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "example.com" + "endpoint": "doesnotexist.invalid" } - with self.assertRaisesRegex(EncryptionError, 'parse error'): + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): self.client_encryption.create_data_key( 'aws', master_key=master_key) @@ -1309,8 +1310,8 @@ def test_07_azure(self): self.run_test_expected_success('azure', master_key) # The full error should be something like: - # "Invalid JSON in KMS response. HTTP status=404. Error: Got parse error at '<', position 0: 'SPECIAL_EXPECTED'" - with self.assertRaisesRegex(EncryptionError, 'parse error'): + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): self.client_encryption_invalid.create_data_key( 'azure', master_key=master_key) @@ -1326,8 +1327,8 @@ def test_08_gcp_valid_endpoint(self): self.run_test_expected_success('gcp', master_key) # The full error should be something like: - # "Invalid JSON in KMS response. HTTP status=404. Error: Got parse error at '<', position 0: 'SPECIAL_EXPECTED'" - with self.assertRaisesRegex(EncryptionError, 'parse error'): + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): self.client_encryption_invalid.create_data_key( 'gcp', master_key=master_key) @@ -1339,7 +1340,7 @@ def test_09_gcp_invalid_endpoint(self): "location": "global", "keyRing": "key-ring-csfle", "keyName": "key-name-csfle", - "endpoint": "example.com:443"} + "endpoint": "doesnotexist.invalid:443"} # The full error should be something like: # "Invalid KMS response, no access_token returned. HTTP status=200" @@ -1347,22 +1348,30 @@ def test_09_gcp_invalid_endpoint(self): self.client_encryption.create_data_key( 'gcp', master_key=master_key) - def kmip_host_error(self): - if self._kmip_host_error: - return self._kmip_host_error + def dns_error(self, host, port): # The full error should be something like: # "[Errno 8] nodename nor servname provided, or not known" - try: - socket.getaddrinfo('doesnotexist.local', 5698, socket.AF_INET, - socket.SOCK_STREAM) - except Exception as exc: - self._kmip_host_error = re.escape(str(exc)) - return self._kmip_host_error + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error( + 'doesnotexist.invalid', 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error('doesnotexist.local', 5698) + return self._kmip_host_error def test_10_kmip_invalid_endpoint(self): key = {'keyId': '1'} self.run_test_expected_success('kmip', key) - with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): self.client_encryption_invalid.create_data_key('kmip', key) def test_11_kmip_master_key_endpoint(self): @@ -1379,7 +1388,7 @@ def test_11_kmip_master_key_endpoint(self): def test_12_kmip_master_key_invalid_endpoint(self): key = {'keyId': '1', 'endpoint': 'doesnotexist.local:5698'} - with self.assertRaisesRegex(EncryptionError, self.kmip_host_error()): + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): self.client_encryption.create_data_key('kmip', key) From f4cef373283a95b00bc7b78626ae8fda23a472ed Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 7 Feb 2022 19:33:41 -0600 Subject: [PATCH 0577/2111] PYTHON-3064 Add typings to test package (#844) --- .github/workflows/test-python.yml | 3 +- bson/son.py | 2 +- mypy.ini | 10 ++ pymongo/socket_checker.py | 5 +- pymongo/srv_resolver.py | 2 +- pymongo/typings.py | 4 +- test/__init__.py | 29 ++++-- test/auth_aws/test_auth_aws.py | 1 + test/mockupdb/test_cursor_namespace.py | 6 ++ test/mockupdb/test_getmore_sharded.py | 2 +- test/mockupdb/test_handshake.py | 4 +- test/mockupdb/test_mixed_version_sharded.py | 4 +- test/mockupdb/test_op_msg.py | 2 + test/mockupdb/test_op_msg_read_preference.py | 5 +- test/mockupdb/test_reset_and_request_check.py | 12 ++- test/mockupdb/test_slave_okay_sharded.py | 2 +- test/performance/perf_test.py | 9 ++ test/test_auth.py | 6 +- test/test_binary.py | 6 ++ test/test_bson.py | 24 +++-- test/test_bulk.py | 91 ++++++++++--------- test/test_change_stream.py | 40 ++++++++ test/test_client.py | 16 ++-- test/test_cmap.py | 8 +- test/test_code.py | 3 +- test/test_collation.py | 6 ++ test/test_collection.py | 53 ++++++----- test/test_command_monitoring_legacy.py | 2 + test/test_common.py | 10 +- ...nnections_survive_primary_stepdown_spec.py | 6 +- test/test_crud_v1.py | 6 +- test/test_cursor.py | 20 ++-- test/test_custom_types.py | 43 +++++++-- test/test_database.py | 23 +++-- test/test_dbref.py | 7 +- test/test_decimal128.py | 1 + test/test_discovery_and_monitoring.py | 2 + test/test_encryption.py | 39 +++++--- test/test_examples.py | 7 ++ test/test_grid_file.py | 3 + test/test_gridfs.py | 35 +++++-- test/test_gridfs_bucket.py | 6 +- test/test_gridfs_spec.py | 3 + test/test_json_util.py | 4 +- test/test_max_staleness.py | 2 +- test/test_monitor.py | 2 +- test/test_monitoring.py | 14 ++- test/test_objectid.py | 4 +- test/test_ocsp_cache.py | 12 ++- test/test_raw_bson.py | 3 + test/test_read_concern.py | 1 + test/test_read_preferences.py | 22 +++-- test/test_read_write_concern_spec.py | 17 ++-- test/test_retryable_writes.py | 5 + test/test_sdam_monitoring_spec.py | 5 + test/test_server_selection.py | 5 +- test/test_server_selection_in_window.py | 2 +- test/test_session.py | 16 +++- test/test_srv_polling.py | 16 ++-- test/test_ssl.py | 34 ++++--- test/test_streaming_protocol.py | 2 +- test/test_transactions.py | 18 ++-- test/test_uri_parser.py | 4 +- test/test_write_concern.py | 2 +- test/unified_format.py | 9 +- test/utils.py | 27 ++---- test/utils_spec_runner.py | 9 +- 67 files changed, 542 insertions(+), 261 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 3ad5aa79fe..ca1845e2cd 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -46,4 +46,5 @@ jobs: pip install -e ".[zstd, srv]" - name: Run mypy run: | - mypy --install-types --non-interactive bson gridfs tools + mypy --install-types --non-interactive bson gridfs tools pymongo + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index test diff --git a/bson/son.py b/bson/son.py index 7207367f3d..bb39644637 100644 --- a/bson/son.py +++ b/bson/son.py @@ -28,7 +28,7 @@ # This is essentially the same as re._pattern_type RE_TYPE: Type[Pattern[Any]] = type(re.compile("")) -_Key = TypeVar("_Key", bound=str) +_Key = TypeVar("_Key") _Value = TypeVar("_Value") _T = TypeVar("_T") diff --git a/mypy.ini b/mypy.ini index 926bf95745..91b1121cd5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,6 +11,9 @@ warn_unused_configs = true warn_unused_ignores = true warn_redundant_casts = true +[mypy-gevent.*] +ignore_missing_imports = True + [mypy-kerberos.*] ignore_missing_imports = True @@ -29,5 +32,12 @@ ignore_missing_imports = True [mypy-snappy.*] ignore_missing_imports = True +[mypy-test.*] +allow_redefinition = true +allow_untyped_globals = true + [mypy-winkerberos.*] ignore_missing_imports = True + +[mypy-xmlrunner.*] +ignore_missing_imports = True diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 9eb3d5f084..42db7b9373 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -16,9 +16,8 @@ import errno import select -import socket import sys -from typing import Any, Optional +from typing import Any, Optional, Union # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 @@ -43,7 +42,7 @@ def __init__(self) -> None: else: self._poller = None - def select(self, sock: Any, read: bool = False, write: bool = False, timeout: int = 0) -> bool: + def select(self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0) -> bool: """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index d9ee7b7c8a..989e79131c 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -39,7 +39,7 @@ def maybe_decode(text): def _resolve(*args, **kwargs): if hasattr(resolver, 'resolve'): # dnspython >= 2 - return resolver.resolve(*args, **kwargs) # type: ignore + return resolver.resolve(*args, **kwargs) # dnspython 1.X return resolver.query(*args, **kwargs) diff --git a/pymongo/typings.py b/pymongo/typings.py index ae5aec3213..767eed36c5 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -14,7 +14,7 @@ """Type aliases used by PyMongo""" from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, MutableMapping, Optional, - Tuple, Type, TypeVar, Union) + Sequence, Tuple, Type, TypeVar, Union) if TYPE_CHECKING: from bson.raw_bson import RawBSONDocument @@ -25,5 +25,5 @@ _Address = Tuple[str, Optional[int]] _CollationIn = Union[Mapping[str, Any], "Collation"] _DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] -_Pipeline = List[Mapping[str, Any]] +_Pipeline = Sequence[Mapping[str, Any]] _DocumentType = TypeVar('_DocumentType', Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any]) diff --git a/test/__init__.py b/test/__init__.py index ab53b7fdc5..c02eb97949 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -40,6 +40,7 @@ from contextlib import contextmanager from functools import wraps +from typing import Dict, no_type_check from unittest import SkipTest import pymongo @@ -48,7 +49,9 @@ from bson.son import SON from pymongo import common, message from pymongo.common import partition_node +from pymongo.database import Database from pymongo.hello import HelloCompat +from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl from pymongo.uri_parser import parse_uri @@ -86,7 +89,7 @@ os.path.join(CERT_PATH, 'client.pem')) CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem')) -TLS_OPTIONS = dict(tls=True) +TLS_OPTIONS: Dict = dict(tls=True) if CLIENT_PEM: TLS_OPTIONS['tlsCertificateKeyFile'] = CLIENT_PEM if CA_PEM: @@ -102,13 +105,13 @@ # Remove after PYTHON-2712 from pymongo import pool pool._MOCK_SERVICE_ID = True - res = parse_uri(SINGLE_MONGOS_LB_URI) + res = parse_uri(SINGLE_MONGOS_LB_URI or "") host, port = res['nodelist'][0] db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd elif TEST_SERVERLESS: TEST_LOADBALANCER = True - res = parse_uri(SINGLE_MONGOS_LB_URI) + res = parse_uri(SINGLE_MONGOS_LB_URI or "") host, port = res['nodelist'][0] db_user = res['username'] or db_user db_pwd = res['password'] or db_pwd @@ -184,6 +187,7 @@ def enable(self): def __enter__(self): self.enable() + @no_type_check def disable(self): common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval @@ -224,6 +228,8 @@ def _all_users(db): class ClientContext(object): + client: MongoClient + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI def __init__(self): @@ -247,9 +253,9 @@ def __init__(self): self.tls = False self.tlsCertificateKeyFile = False self.server_is_resolvable = is_server_resolvable() - self.default_client_options = {} + self.default_client_options: Dict = {} self.sessions_enabled = False - self.client = None + self.client = None # type: ignore self.conn_lock = threading.Lock() self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER @@ -340,6 +346,7 @@ def _init_client(self): try: self.cmd_line = self.client.admin.command('getCmdLineOpts') except pymongo.errors.OperationFailure as e: + assert e.details is not None msg = e.details.get('errmsg', '') if e.code == 13 or 'unauthorized' in msg or 'login' in msg: # Unauthorized. @@ -418,6 +425,7 @@ def _init_client(self): else: self.server_parameters = self.client.admin.command( 'getParameter', '*') + assert self.cmd_line is not None if 'enableTestCommands=1' in self.cmd_line['argv']: self.test_commands_enabled = True elif 'parsed' in self.cmd_line: @@ -436,7 +444,8 @@ def _init_client(self): self.mongoses.append(address) if not self.serverless: # Check for another mongos on the next port. - next_address = address[0], address[1] + 1 + assert address is not None + next_address = address[0], address[1] + 1 mongos_client = self._connect( *next_address, **self.default_client_options) if mongos_client: @@ -496,6 +505,7 @@ def _check_user_provided(self): try: return db_user in _all_users(client.admin) except pymongo.errors.OperationFailure as e: + assert e.details is not None msg = e.details.get('errmsg', '') if e.code == 18 or 'auth fails' in msg: # Auth failed. @@ -505,6 +515,7 @@ def _check_user_provided(self): def _server_started_with_auth(self): # MongoDB >= 2.0 + assert self.cmd_line is not None if 'parsed' in self.cmd_line: parsed = self.cmd_line['parsed'] # MongoDB >= 2.6 @@ -525,6 +536,7 @@ def _server_started_with_ipv6(self): if not socket.has_ipv6: return False + assert self.cmd_line is not None if 'parsed' in self.cmd_line: if not self.cmd_line['parsed'].get('net', {}).get('ipv6'): return False @@ -932,6 +944,9 @@ def fail_point(self, command_args): class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" + client: MongoClient + db: Database + credentials: Dict[str, str] @classmethod @client_context.require_connection @@ -1073,7 +1088,7 @@ def run(self, test): if HAVE_XML: - class PymongoXMLTestRunner(XMLTestRunner): + class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] def run(self, test): setup() result = super(PymongoXMLTestRunner, self).run(test) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 0522201097..f096d0569a 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -26,6 +26,7 @@ class TestAuthAWS(unittest.TestCase): + uri: str @classmethod def setUpClass(cls): diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 10605601cf..a52e2fb4e7 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -21,6 +21,9 @@ class TestCursorNamespace(unittest.TestCase): + server: MockupDB + client: MongoClient + @classmethod def setUpClass(cls): cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) @@ -69,6 +72,9 @@ def op(): class TestKillCursorsNamespace(unittest.TestCase): + server: MockupDB + client: MongoClient + @classmethod def setUpClass(cls): cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 5461a13e35..0d91583378 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -27,7 +27,7 @@ def test_getmore_sharded(self): servers = [MockupDB(), MockupDB()] # Collect queries to either server in one queue. - q = Queue() + q: Queue = Queue() for server in servers: server.subscribe(q.put) server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index c15aaff9b8..29313de8c2 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -48,7 +48,7 @@ def respond(r): ServerApiVersion.V1))} client = MongoClient("mongodb://"+primary.address_string, appname='my app', # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v + **dict([k_map.get((k, v), (k, v)) for k, v # type: ignore[arg-type] in kwargs.items()])) self.addCleanup(client.close) @@ -58,7 +58,7 @@ def respond(r): # We do this checking here rather than in the autoresponder `respond()` # because it runs in another Python thread so there are some funky things - # with error handling within that thread, and we want to be able to use + # with error handling within that thread, and we want to be able to use # self.assertRaises(). self.handshake_req.assert_matches(protocol(hello, **kwargs)) _check_handshake_data(self.handshake_req) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 2b6ea6a513..c3af907404 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -30,7 +30,7 @@ def setup_server(self, upgrade): self.mongos_old, self.mongos_new = MockupDB(), MockupDB() # Collect queries to either server in one queue. - self.q = Queue() + self.q: Queue = Queue() for server in self.mongos_old, self.mongos_new: server.subscribe(self.q.put) server.autoresponds('getlasterror') @@ -59,7 +59,7 @@ def create_mixed_version_sharded_test(upgrade): def test(self): self.setup_server(upgrade) start = time.time() - servers_used = set() + servers_used: set = set() while len(servers_used) < 2: go(upgrade.function, self.client) request = self.q.get(timeout=1) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 35e70cebfc..78397a3336 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -233,6 +233,8 @@ class TestOpMsg(unittest.TestCase): + server: MockupDB + client: MongoClient @classmethod def setUpClass(cls): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index d9adfe17eb..eb3a14fa01 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -14,6 +14,7 @@ import copy import itertools +from typing import Any from mockupdb import MockupDB, going, CommandBase from pymongo import MongoClient, ReadPreference @@ -27,6 +28,8 @@ class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False + primary: MockupDB + secondary: MockupDB @classmethod def setUpClass(cls): @@ -142,7 +145,7 @@ def test(self): tag_sets=None) client = self.setup_client(read_preference=pref) - + expected_pref: Any if operation.op_type == 'always-use-secondary': expected_server = self.secondary expected_pref = ReadPreference.SECONDARY diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 86c2085e39..48f9486544 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -27,7 +27,7 @@ class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) - self.ismaster_time = 0 + self.ismaster_time = 0.0 self.client = None self.server = None @@ -45,7 +45,7 @@ def responder(request): kwargs = {'socketTimeoutMS': 100} # Disable retryable reads when pymongo supports it. kwargs['retryReads'] = False - self.client = MongoClient(self.server.uri, **kwargs) + self.client = MongoClient(self.server.uri, **kwargs) # type: ignore wait_until(lambda: self.client.nodes, 'connect to standalone') def tearDown(self): @@ -56,6 +56,8 @@ def _test_disconnect(self, operation): # Application operation fails. Test that client resets server # description and does *not* schedule immediate check. self.setup_server() + assert self.server is not None + assert self.client is not None # Network error on application operation. with self.assertRaises(ConnectionFailure): @@ -81,6 +83,8 @@ def _test_timeout(self, operation): # Application operation times out. Test that client does *not* reset # server description and does *not* schedule immediate check. self.setup_server() + assert self.server is not None + assert self.client is not None with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): @@ -91,6 +95,7 @@ def _test_timeout(self, operation): # Server is *not* Unknown. topology = self.client._topology server = topology.select_server_by_address(self.server.address, 0) + assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time @@ -99,6 +104,8 @@ def _test_timeout(self, operation): def _test_not_master(self, operation): # Application operation gets a "not master" error. self.setup_server() + assert self.server is not None + assert self.client is not None with self.assertRaises(ConnectionFailure): with going(operation.function, self.client): @@ -110,6 +117,7 @@ def _test_not_master(self, operation): # Server is rediscovered. topology = self.client._topology server = topology.select_server_by_address(self.server.address, 0) + assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 63bb0fe303..07e05bfece 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -37,7 +37,7 @@ def setup_server(self): self.mongos1, self.mongos2 = MockupDB(), MockupDB() # Collect queries to either server in one queue. - self.q = Queue() + self.q: Queue = Queue() for server in self.mongos1, self.mongos2: server.subscribe(self.q.put) server.run() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 84c6baf60d..7effa1c1ee 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -67,6 +67,10 @@ def __exit__(self, *args): class PerformanceTest(object): + dataset: Any + data_size: Any + do_task: Any + fail: Any @classmethod def setUpClass(cls): @@ -386,6 +390,7 @@ def mp_map(map_func, files): def insert_json_file(filename): + assert proc_client is not None with open(filename, 'r') as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) @@ -398,11 +403,13 @@ def insert_json_file_with_file_id(filename): doc = json.loads(line) doc['file'] = filename documents.append(doc) + assert proc_client is not None coll = proc_client.perftest.corpus coll.insert_many(documents) def read_json_file(filename): + assert proc_client is not None coll = proc_client.perftest.corpus temp = tempfile.TemporaryFile(mode='w') try: @@ -414,6 +421,7 @@ def read_json_file(filename): def insert_gridfs_file(filename): + assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) with open(filename, 'rb') as gfile: @@ -421,6 +429,7 @@ def insert_gridfs_file(filename): def read_gridfs_file(filename): + assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) temp = tempfile.TemporaryFile() diff --git a/test/test_auth.py b/test/test_auth.py index 35f198574b..5b4ef0c51f 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -76,6 +76,8 @@ def run(self): class TestGSSAPI(unittest.TestCase): + mech_properties: str + service_realm_required: bool @classmethod def setUpClass(cls): @@ -116,6 +118,7 @@ def test_credentials_hashing(self): @ignore_deprecations def test_gssapi_simple(self): + assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: uri = ('mongodb://%s:%s@%s:%d/?authMechanism=' 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), @@ -264,6 +267,8 @@ def test_sasl_plain(self): authMechanism='PLAIN') client.ldap.test.find_one() + assert SASL_USER is not None + assert SASL_PASS is not None uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' 'authSource=%s' % (quote_plus(SASL_USER), quote_plus(SASL_PASS), @@ -540,7 +545,6 @@ def test_cache(self): self.assertIsInstance(iterations, int) def test_scram_threaded(self): - coll = client_context.client.db.test coll.drop() coll.insert_one({'_id': 1}) diff --git a/test/test_binary.py b/test/test_binary.py index e6b681fc51..4bbda0c9d4 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -41,6 +41,8 @@ class TestBinary(unittest.TestCase): + csharp_data: bytes + java_data: bytes @classmethod def setUpClass(cls): @@ -354,6 +356,8 @@ def test_buffer_protocol(self): class TestUuidSpecExplicitCoding(unittest.TestCase): + uuid: uuid.UUID + @classmethod def setUpClass(cls): super(TestUuidSpecExplicitCoding, cls).setUpClass() @@ -457,6 +461,8 @@ def test_decoding_4(self): class TestUuidSpecImplicitCoding(IntegrationTest): + uuid: uuid.UUID + @classmethod def setUpClass(cls): super(TestUuidSpecImplicitCoding, cls).setUpClass() diff --git a/test/test_bson.py b/test/test_bson.py index eb4f4e47c2..7052042ca8 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -186,7 +186,7 @@ def test_encode_then_decode_any_mapping_legacy(self): decoder=lambda *args: BSON(args[0]).decode(*args[1:])) def test_encoding_defaultdict(self): - dct = collections.defaultdict(dict, [('foo', 'bar')]) + dct = collections.defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] encode(dct) self.assertEqual(dct, collections.defaultdict(dict, [('foo', 'bar')])) @@ -302,7 +302,7 @@ def test_basic_decode(self): def test_decode_all_buffer_protocol(self): docs = [{'foo': 'bar'}, {}] - bs = b"".join(map(encode, docs)) + bs = b"".join(map(encode, docs)) # type: ignore[arg-type] self.assertEqual(docs, decode_all(bytearray(bs))) self.assertEqual(docs, decode_all(memoryview(bs))) self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1])) @@ -530,7 +530,9 @@ def test_large_datetime_truncation(self): def test_aware_datetime(self): aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) - as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc) + offset = aware.utcoffset() + assert offset is not None + as_utc = (aware - offset).replace(tzinfo=utc) self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), as_utc) after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))[ @@ -591,7 +593,9 @@ def test_local_datetime(self): def test_naive_decode(self): aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) - naive_utc = (aware - aware.utcoffset()).replace(tzinfo=None) + offset = aware.utcoffset() + assert offset is not None + naive_utc = (aware - offset).replace(tzinfo=None) self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), naive_utc) after = decode(encode({"date": aware}))["date"] self.assertEqual(None, after.tzinfo) @@ -603,9 +607,9 @@ def test_dst(self): @unittest.skip('Disabled due to http://bugs.python.org/issue25222') def test_bad_encode(self): - evil_list = {'a': []} + evil_list: dict = {'a': []} evil_list['a'].append(evil_list) - evil_dict = {} + evil_dict: dict = {} evil_dict['a'] = evil_dict for evil_data in [evil_dict, evil_list]: self.assertRaises(Exception, encode, evil_data) @@ -1039,8 +1043,8 @@ def round_trip_pickle(self, obj, pickled_with_older): def test_regex_pickling(self): reg = Regex(".?") - pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' + pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' + b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' b'\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag' b's\x94K\x00ub.') self.round_trip_pickle(reg, pickled_with_3) @@ -1083,8 +1087,8 @@ def test_minkey_pickling(self): def test_maxkey_pickling(self): maxk = MaxKey() - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' + pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' + b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' b'\x81\x94.') self.round_trip_pickle(maxk, pickled_with_3) diff --git a/test/test_bulk.py b/test/test_bulk.py index 08740a437e..a895dfddc3 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -16,13 +16,15 @@ import sys import uuid -from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions + +from pymongo.mongo_client import MongoClient sys.path[0:0] = [""] -from bson import Binary +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions from bson.objectid import ObjectId +from pymongo.collection import Collection from pymongo.common import partition_node from pymongo.errors import (BulkWriteError, ConfigurationError, @@ -40,6 +42,8 @@ class BulkTestBase(IntegrationTest): + coll: Collection + coll_w0: Collection @classmethod def setUpClass(cls): @@ -280,6 +284,7 @@ def test_upsert(self): upsert=True)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) @@ -341,11 +346,11 @@ def test_bulk_write_invalid_arguments(self): # The requests argument must be a list. generator = (InsertOne({}) for _ in range(10)) with self.assertRaises(TypeError): - self.coll.bulk_write(generator) + self.coll.bulk_write(generator) # type: ignore[arg-type] # Document is not wrapped in a bulk write operation. with self.assertRaises(TypeError): - self.coll.bulk_write([{}]) + self.coll.bulk_write([{}]) # type: ignore[list-item] def test_upsert_large(self): big = 'a' * (client_context.max_bson_size - 37) @@ -425,7 +430,7 @@ def test_upsert_uuid_unspecified(self): def test_upsert_uuid_standard_subdocuments(self): options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) coll = self.coll.with_options(codec_options=options) - ids = [ + ids: list = [ {'f': Binary(bytes(i)), 'f2': uuid.uuid4()} for i in range(3) ] @@ -472,7 +477,7 @@ def test_single_ordered_batch(self): def test_single_error_ordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), InsertOne({'b': 3, 'a': 2}), @@ -506,7 +511,7 @@ def test_single_error_ordered_batch(self): def test_multiple_error_ordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), UpdateOne({'b': 3}, {'$set': {'a': 2}}, upsert=True), @@ -542,7 +547,7 @@ def test_multiple_error_ordered_batch(self): result) def test_single_unordered_batch(self): - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 1}, {'$set': {'b': 1}}), UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), @@ -564,7 +569,7 @@ def test_single_unordered_batch(self): def test_single_error_unordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), InsertOne({'b': 3, 'a': 2}), @@ -599,7 +604,7 @@ def test_single_error_unordered_batch(self): def test_multiple_error_unordered_batch(self): self.coll.create_index('a', unique=True) self.addCleanup(self.coll.drop_index, [('a', 1)]) - requests = [ + requests: list = [ InsertOne({'b': 1, 'a': 1}), UpdateOne({'b': 2}, {'$set': {'a': 3}}, upsert=True), UpdateOne({'b': 3}, {'$set': {'a': 4}}, upsert=True), @@ -662,7 +667,7 @@ def test_large_inserts_ordered(self): self.coll.delete_many({}) big = 'x' * (1024 * 1024 * 4) - result = self.coll.bulk_write([ + write_result = self.coll.bulk_write([ InsertOne({'a': 1, 'big': big}), InsertOne({'a': 2, 'big': big}), InsertOne({'a': 3, 'big': big}), @@ -671,7 +676,7 @@ def test_large_inserts_ordered(self): InsertOne({'a': 6, 'big': big}), ]) - self.assertEqual(6, result.inserted_count) + self.assertEqual(6, write_result.inserted_count) self.assertEqual(6, self.coll.count_documents({})) def test_large_inserts_unordered(self): @@ -685,12 +690,12 @@ def test_large_inserts_unordered(self): try: self.coll.bulk_write(requests, ordered=False) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) + self.assertEqual(2, details['nInserted']) self.coll.delete_many({}) @@ -741,7 +746,7 @@ def tearDown(self): self.coll.delete_many({}) def test_no_results_ordered_success(self): - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'a': 2}), @@ -755,7 +760,7 @@ def test_no_results_ordered_success(self): 'removed {"_id": 1}') def test_no_results_ordered_failure(self): - requests = [ + requests: list = [ InsertOne({'_id': 1}), UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'_id': 2}), @@ -771,7 +776,7 @@ def test_no_results_ordered_failure(self): self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) def test_no_results_unordered_success(self): - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'a': 2}), @@ -785,7 +790,7 @@ def test_no_results_unordered_success(self): 'removed {"_id": 1}') def test_no_results_unordered_failure(self): - requests = [ + requests: list = [ InsertOne({'_id': 1}), UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), InsertOne({'_id': 2}), @@ -832,13 +837,15 @@ def test_no_remove(self): class TestBulkWriteConcern(BulkTestBase): + w: Optional[int] + secondary: MongoClient @classmethod def setUpClass(cls): super(TestBulkWriteConcern, cls).setUpClass() cls.w = client_context.w cls.secondary = None - if cls.w > 1: + if cls.w is not None and cls.w > 1: for member in client_context.hello['hosts']: if member != client_context.hello['primary']: cls.secondary = single_client(*partition_node(member)) @@ -886,7 +893,7 @@ def test_write_concern_failure_ordered(self): try: self.cause_wtimeout(requests, ordered=True) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") @@ -899,13 +906,13 @@ def test_write_concern_failure_ordered(self): 'nRemoved': 0, 'upserted': [], 'writeErrors': []}, - result) + details) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 0) + self.assertTrue(len(details['writeConcernErrors']) > 0) - failed = result['writeConcernErrors'][0] + failed = details['writeConcernErrors'][0] self.assertEqual(64, failed['code']) self.assertTrue(isinstance(failed['errmsg'], str)) @@ -924,7 +931,7 @@ def test_write_concern_failure_ordered(self): try: self.cause_wtimeout(requests, ordered=True) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") @@ -941,10 +948,10 @@ def test_write_concern_failure_ordered(self): 'code': 11000, 'errmsg': '...', 'op': {'_id': '...', 'a': 1}}]}, - result) + details) - self.assertTrue(len(result['writeConcernErrors']) > 1) - failed = result['writeErrors'][0] + self.assertTrue(len(details['writeConcernErrors']) > 1) + failed = details['writeErrors'][0] self.assertTrue("duplicate" in failed['errmsg']) @client_context.require_replica_set @@ -966,17 +973,17 @@ def test_write_concern_failure_unordered(self): try: self.cause_wtimeout(requests, ordered=False) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(0, len(result['writeErrors'])) + self.assertEqual(2, details['nInserted']) + self.assertEqual(1, details['nUpserted']) + self.assertEqual(0, len(details['writeErrors'])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) + self.assertTrue(len(details['writeConcernErrors']) > 1) self.coll.delete_many({}) self.coll.create_index('a', unique=True) @@ -984,7 +991,7 @@ def test_write_concern_failure_unordered(self): # Fail due to write concern support as well # as duplicate key error on unordered batch. - requests = [ + requests: list = [ InsertOne({'a': 1}), UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), InsertOne({'a': 1}), @@ -993,29 +1000,29 @@ def test_write_concern_failure_unordered(self): try: self.cause_wtimeout(requests, ordered=False) except BulkWriteError as exc: - result = exc.details + details = exc.details self.assertEqual(exc.code, 65) else: self.fail("Error not raised") - self.assertEqual(2, result['nInserted']) - self.assertEqual(1, result['nUpserted']) - self.assertEqual(1, len(result['writeErrors'])) + self.assertEqual(2, details['nInserted']) + self.assertEqual(1, details['nUpserted']) + self.assertEqual(1, len(details['writeErrors'])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(result['writeConcernErrors']) > 1) + self.assertTrue(len(details['writeConcernErrors']) > 1) - failed = result['writeErrors'][0] + failed = details['writeErrors'][0] self.assertEqual(2, failed['index']) self.assertEqual(11000, failed['code']) self.assertTrue(isinstance(failed['errmsg'], str)) self.assertEqual(1, failed['op']['a']) - failed = result['writeConcernErrors'][0] + failed = details['writeConcernErrors'][0] self.assertEqual(64, failed['code']) self.assertTrue(isinstance(failed['errmsg'], str)) - upserts = result['upserted'] + upserts = details['upserted'] self.assertEqual(1, len(upserts)) self.assertEqual(1, upserts[0]['index']) self.assertTrue(upserts[0].get('_id')) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index a49f6972b2..655b99e801 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -24,6 +24,7 @@ import uuid from itertools import product +from typing import no_type_check sys.path[0:0] = [''] @@ -121,6 +122,7 @@ def kill_change_stream_cursor(self, change_stream): class APITestsMixin(object): + @no_type_check def test_watch(self): with self.change_stream( [{'$project': {'foo': 0}}], full_document='updateLookup', @@ -145,6 +147,7 @@ def test_watch(self): with self.change_stream(resume_after=resume_token): pass + @no_type_check def test_try_next(self): # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options( @@ -161,6 +164,7 @@ def test_try_next(self): wait_until(lambda: stream.try_next() is not None, "get change from try_next") + @no_type_check def test_try_next_runs_one_getmore(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) @@ -216,6 +220,7 @@ def test_try_next_runs_one_getmore(self): set(["getMore"])) self.assertIsNone(stream.try_next()) + @no_type_check def test_batch_size_is_honored(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) @@ -245,6 +250,7 @@ def test_batch_size_is_honored(self): self.assertEqual(expected[key], cmd[key]) # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check @client_context.require_version_min(4, 0, 0) def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() @@ -258,6 +264,7 @@ def test_start_at_operation_time(self): for i in range(ndocs): cs.next() + @no_type_check def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") results = listener.results @@ -273,12 +280,14 @@ def _test_full_pipeline(self, expected_cs_stage): {'$project': {'foo': 0}}], command.command['pipeline']) + @no_type_check def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ self._test_full_pipeline({}) + @no_type_check def test_iteration(self): with self.change_stream(batch_size=2) as change_stream: num_inserted = 10 @@ -292,6 +301,7 @@ def test_iteration(self): break self._test_invalidate_stops_iteration(change_stream) + @no_type_check def _test_next_blocks(self, change_stream): inserted_doc = {'_id': ObjectId()} changes = [] @@ -311,18 +321,21 @@ def _test_next_blocks(self, change_stream): self.assertEqual(changes[0]['operationType'], 'insert') self.assertEqual(changes[0]['fullDocument'], inserted_doc) + @no_type_check def test_next_blocks(self): """Test that next blocks until a change is readable""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: self._test_next_blocks(change_stream) + @no_type_check def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( [{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream: self._test_next_blocks(change_stream) + @no_type_check def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" # Use a short await time to speed up the test. @@ -338,6 +351,7 @@ def iterate_cursor(): t.join(3) self.assertFalse(t.is_alive()) + @no_type_check def test_unknown_full_document(self): """Must rely on the server to raise an error on unknown fullDocument. """ @@ -347,6 +361,7 @@ def test_unknown_full_document(self): except OperationFailure: pass + @no_type_check def test_change_operations(self): """Test each operation type.""" expected_ns = {'db': self.watched_collection().database.name, @@ -393,6 +408,7 @@ def test_change_operations(self): # Invalidate. self._test_get_invalidate_event(change_stream) + @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after(self): resume_token = self.get_resume_token(invalidate=True) @@ -408,6 +424,7 @@ def test_start_after(self): self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 2}) + @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -427,6 +444,7 @@ def test_start_after_resume_process_with_changes(self): self.assertEqual(change['operationType'], 'insert') self.assertEqual(change['fullDocument'], {'_id': 3}) + @no_type_check @client_context.require_no_mongos # Remove after SERVER-41196 @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_without_changes(self): @@ -444,12 +462,14 @@ def test_start_after_resume_process_without_changes(self): class ProseSpecTestsMixin(object): + @no_type_check def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener + @no_type_check def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): self.watched_collection().insert_many( [{"data": k} for k in range(batch_size)]) @@ -485,6 +505,7 @@ def _get_expected_resume_token(self, stream, listener, response = listener.results['succeeded'][-1].reply return response['cursor']['postBatchResumeToken'] + @no_type_check def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. @@ -497,6 +518,7 @@ def _test_raises_error_on_missing_id(self, expected_exception): with self.assertRaises(StopIteration): next(change_stream) + @no_type_check def _test_update_resume_token(self, expected_rt_getter): """ChangeStream must continuously track the last seen resumeToken.""" client, listener = self._client_with_listener("aggregate", "getMore") @@ -536,6 +558,7 @@ def test_raises_error_on_missing_id_418minus(self): self._test_raises_error_on_missing_id(InvalidOperation) # Prose test no. 3 + @no_type_check def test_resume_on_error(self): with self.change_stream() as change_stream: self.insert_one_and_check(change_stream, {'_id': 1}) @@ -544,6 +567,7 @@ def test_resume_on_error(self): self.insert_one_and_check(change_stream, {'_id': 2}) # Prose test no. 4 + @no_type_check @client_context.require_failCommand_fail_point def test_no_resume_attempt_if_aggregate_command_fails(self): # Set non-retryable error on aggregate command. @@ -568,6 +592,7 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): # each operation which ensure compliance with this prose test. # Prose test no. 7 + @no_type_check def test_initial_empty_batch(self): with self.change_stream() as change_stream: # The first batch should be empty. @@ -579,6 +604,7 @@ def test_initial_empty_batch(self): self.assertEqual(cursor_id, change_stream._cursor.cursor_id) # Prose test no. 8 + @no_type_check def test_kill_cursors(self): def raise_error(): raise ServerSelectionTimeoutError('mock error') @@ -591,6 +617,7 @@ def raise_error(): self.insert_one_and_check(change_stream, {'_id': 2}) # Prose test no. 9 + @no_type_check @client_context.require_version_min(4, 0, 0) @client_context.require_version_max(4, 0, 7) def test_start_at_operation_time_caching(self): @@ -619,6 +646,7 @@ def test_start_at_operation_time_caching(self): # This test is identical to prose test no. 3. # Prose test no. 11 + @no_type_check @client_context.require_version_min(4, 0, 7) def test_resumetoken_empty_batch(self): client, listener = self._client_with_listener("getMore") @@ -631,6 +659,7 @@ def test_resumetoken_empty_batch(self): response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 + @no_type_check @client_context.require_version_min(4, 0, 7) def test_resumetoken_exhausted_batch(self): client, listener = self._client_with_listener("getMore") @@ -643,6 +672,7 @@ def test_resumetoken_exhausted_batch(self): response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 + @no_type_check @client_context.require_version_max(4, 0, 7) def test_resumetoken_empty_batch_legacy(self): resume_point = self.get_resume_token() @@ -659,6 +689,7 @@ def test_resumetoken_empty_batch_legacy(self): self.assertEqual(resume_token, resume_point) # Prose test no. 12 + @no_type_check @client_context.require_version_max(4, 0, 7) def test_resumetoken_exhausted_batch_legacy(self): # Resume token is _id of last change. @@ -673,6 +704,7 @@ def test_resumetoken_exhausted_batch_legacy(self): self.assertEqual(change_stream.resume_token, change["_id"]) # Prose test no. 13 + @no_type_check def test_resumetoken_partially_iterated_batch(self): # When batch has been iterated up to but not including the last element. # Resume token should be _id of previous change document. @@ -686,6 +718,7 @@ def test_resumetoken_partially_iterated_batch(self): self.assertEqual(resume_token, change["_id"]) + @no_type_check def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): # When the batch is not empty and hasn't been iterated at all. # Resume token should be same as the resume option used. @@ -704,17 +737,20 @@ def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): self.assertEqual(resume_token, resume_point) # Prose test no. 14 + @no_type_check @client_context.require_no_mongos def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): self._test_resumetoken_uniterated_nonempty_batch("resume_after") # Prose test no. 14 + @no_type_check @client_context.require_no_mongos @client_context.require_version_min(4, 1, 1) def test_resumetoken_uniterated_nonempty_batch_startafter(self): self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 + @no_type_check @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. @@ -735,6 +771,7 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 + @no_type_check @client_context.require_version_min(4, 1, 1) def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. @@ -757,6 +794,8 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): + dbs: list + @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_no_mmap @@ -1045,6 +1084,7 @@ def test_read_concern(self): class TestAllLegacyScenarios(IntegrationTest): RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener @classmethod @client_context.require_connection diff --git a/test/test_client.py b/test/test_client.py index 8db1cb5621..9ca9989052 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -28,6 +28,8 @@ import threading import warnings +from typing import no_type_check, Type + sys.path[0:0] = [""] from bson import encode @@ -99,6 +101,7 @@ class ClientUnitTest(unittest.TestCase): """MongoClient tests that don't require a server.""" + client: MongoClient @classmethod @client_context.require_connection @@ -341,7 +344,7 @@ def transform_python(self, value): return int(value) # Ensure codec options are passed in correctly - document_class = SON + document_class: Type[SON] = SON type_registry = TypeRegistry([MyFloatAsIntEncoder()]) tz_aware = True uuid_representation_label = 'javaLegacy' @@ -614,7 +617,7 @@ def test_constants(self): port are not overloaded. """ host, port = client_context.host, client_context.port - kwargs = client_context.default_client_options.copy() + kwargs: dict = client_context.default_client_options.copy() if client_context.auth_enabled: kwargs['username'] = db_user kwargs['password'] = db_pwd @@ -1111,6 +1114,7 @@ def test_socketKeepAlive(self): socket.SO_KEEPALIVE) self.assertTrue(keepalive) + @no_type_check def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware='foo') @@ -1140,7 +1144,7 @@ def test_ipv6(self): uri = "mongodb://%s[::1]:%d" % (auth_str, client_context.port) if client_context.is_rs: - uri += '/?replicaSet=' + client_context.replica_set_name + uri += '/?replicaSet=' + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) @@ -1379,7 +1383,7 @@ def init(self, *args): heartbeat_times.append(time.time()) try: - ServerHeartbeatStartedEvent.__init__ = init + ServerHeartbeatStartedEvent.__init__ = init # type: ignore listener = HeartbeatStartedListener() uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( client_context.host, client_context.port) @@ -1394,7 +1398,7 @@ def init(self, *args): client.close() finally: - ServerHeartbeatStartedEvent.__init__ = old_init + ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore def test_small_heartbeat_frequency_ms(self): uri = "mongodb://example/?heartbeatFrequencyMS=499" @@ -1847,7 +1851,7 @@ def test(collection): lazy_client_trial(reset, delete_one, test, self._get_client) def test_find_one(self): - results = [] + results: list = [] def reset(collection): collection.drop() diff --git a/test/test_cmap.py b/test/test_cmap.py index 20ed7f31ec..bfc600f19f 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -213,11 +213,11 @@ def set_fail_point(self, command_args): def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" - self.logs = [] + self.logs: list = [] self.assertEqual(scenario_def['version'], 1) self.assertIn(scenario_def['style'], ['unit', 'integration']) self.listener = CMAPListener() - self._ops = [] + self._ops: list = [] # Configure the fail point before creating the client. if 'failPoint' in test: @@ -259,9 +259,9 @@ def run_scenario(self, scenario_def, test): self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. - self.targets = dict() + self.targets: dict = dict() # Map of label names to Connection objects - self.labels = dict() + self.labels: dict = dict() def cleanup(): for t in self.targets.values(): diff --git a/test/test_code.py b/test/test_code.py index c5e190f363..1c4b5be1fe 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -17,6 +17,7 @@ """Tests for the Code wrapper.""" import sys + sys.path[0:0] = [""] from bson.code import Code @@ -35,7 +36,7 @@ def test_read_only(self): c = Code("blah") def set_c(): - c.scope = 5 + c.scope = 5 # type: ignore self.assertRaises(AttributeError, set_c) def test_code(self): diff --git a/test/test_collation.py b/test/test_collation.py index f0139b4a22..9c4f4f6576 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -17,6 +17,8 @@ import functools import warnings +from typing import Any + from pymongo.collation import ( Collation, CollationCaseFirst, CollationStrength, CollationAlternate, @@ -78,6 +80,10 @@ def test_constructor(self): class TestCollation(IntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation + @classmethod @client_context.require_connection def setUpClass(cls): diff --git a/test/test_collection.py b/test/test_collection.py index 4a167bacb3..3d4a107aa9 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -20,8 +20,11 @@ import re import sys -from codecs import utf_8_decode +from codecs import utf_8_decode # type: ignore from collections import defaultdict +from typing import no_type_check + +from pymongo.database import Database sys.path[0:0] = [""] @@ -66,6 +69,7 @@ class TestCollectionNoConnect(unittest.TestCase): """Test Collection features on a client that does not connect. """ + db: Database @classmethod def setUpClass(cls): @@ -116,11 +120,12 @@ def test_iteration(self): class TestCollection(IntegrationTest): + w: int @classmethod def setUpClass(cls): super(TestCollection, cls).setUpClass() - cls.w = client_context.w + cls.w = client_context.w # type: ignore @classmethod def tearDownClass(cls): @@ -726,7 +731,7 @@ def test_insert_many(self): db = self.db db.test.drop() - docs = [{} for _ in range(5)] + docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertTrue(isinstance(result.inserted_ids, list)) @@ -759,7 +764,7 @@ def test_insert_many(self): db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) - docs = [{} for _ in range(5)] + docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertFalse(result.acknowledged) @@ -792,11 +797,11 @@ def test_insert_many_invalid(self): with self.assertRaisesRegex( TypeError, "documents must be a non-empty list"): - db.test.insert_many(1) + db.test.insert_many(1) # type: ignore[arg-type] with self.assertRaisesRegex( TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) + db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) # type: ignore[arg-type] def test_delete_one(self): self.db.test.drop() @@ -1064,7 +1069,7 @@ def test_bypass_document_validation_bulk_write(self): db_w0 = self.db.client.get_database( self.db.name, write_concern=WriteConcern(w=0)) - ops = [InsertOne({"a": -10}), + ops: list = [InsertOne({"a": -10}), InsertOne({"a": -11}), InsertOne({"a": -12}), UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), @@ -1087,7 +1092,7 @@ def test_bypass_document_validation_bulk_write(self): def test_find_by_default_dct(self): db = self.db db.test.insert_one({'foo': 'bar'}) - dct = defaultdict(dict, [('foo', 'bar')]) + dct = defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] self.assertIsNotNone(db.test.find_one(dct)) self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')])) @@ -1117,6 +1122,7 @@ def test_find_w_fields(self): doc = next(db.test.find({}, ["mike"])) self.assertFalse("extra thing" in doc) + @no_type_check def test_fields_specifier_as_dict(self): db = self.db db.test.delete_many({}) @@ -1333,7 +1339,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 1})) self.assertEqual(0, db.test.count_documents({"x": 1})) - self.assertEqual(db.test.find_one(id1)["y"], 1) + self.assertEqual(db.test.find_one(id1)["y"], 1) # type: ignore replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) result = db.test.replace_one({"y": 1}, replacement, True) @@ -1344,7 +1350,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"y": 1})) - self.assertEqual(db.test.find_one(id1)["z"], 1) + self.assertEqual(db.test.find_one(id1)["z"], 1) # type: ignore result = db.test.replace_one({"x": 2}, {"y": 2}, True) self.assertTrue(isinstance(result, UpdateResult)) @@ -1377,7 +1383,7 @@ def test_update_one(self): self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 6) + self.assertEqual(db.test.find_one(id1)["x"], 6) # type: ignore id2 = db.test.insert_one({"x": 1}).inserted_id result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) @@ -1386,8 +1392,8 @@ def test_update_one(self): self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 7) - self.assertEqual(db.test.find_one(id2)["x"], 1) + self.assertEqual(db.test.find_one(id1)["x"], 7) # type: ignore + self.assertEqual(db.test.find_one(id2)["x"], 1) # type: ignore result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) self.assertTrue(isinstance(result, UpdateResult)) @@ -1587,12 +1593,12 @@ def test_aggregation_cursor(self): # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) - self.assertEqual(5, len(cursor._CommandCursor__data)) + self.assertEqual(5, len(cursor._CommandCursor__data)) # type: ignore # Force a getMore - cursor._CommandCursor__data.clear() + cursor._CommandCursor__data.clear() # type: ignore next(cursor) # batchSize - 1 - self.assertEqual(4, len(cursor._CommandCursor__data)) + self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore # Exhaust the cursor. There shouldn't be any errors. for doc in cursor: pass @@ -1679,6 +1685,7 @@ def test_rename(self): with self.write_concern_collection() as coll: coll.rename('foo') + @no_type_check def test_find_one(self): db = self.db db.drop_collection("test") @@ -1973,17 +1980,17 @@ def __getattr__(self, name): bad = BadGetAttr([('foo', 'bar')]) c.insert_one({'bad': bad}) - self.assertEqual('bar', c.find_one()['bad']['foo']) + self.assertEqual('bar', c.find_one()['bad']['foo']) # type: ignore def test_array_filters_validation(self): # array_filters must be a list. c = self.db.test with self.assertRaises(TypeError): - c.update_one({}, {'$set': {'a': 1}}, array_filters={}) + c.update_one({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.update_many({}, {'$set': {'a': 1}}, array_filters={}) + c.update_many({}, {'$set': {'a': 1}}, array_filters={} ) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) + c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) @@ -2158,7 +2165,7 @@ def test_find_regex(self): c.drop() c.insert_one({'r': re.compile('.*')}) - self.assertTrue(isinstance(c.find_one()['r'], Regex)) + self.assertTrue(isinstance(c.find_one()['r'], Regex)) # type: ignore for doc in c.find(): self.assertTrue(isinstance(doc['r'], Regex)) @@ -2189,9 +2196,9 @@ def test_helpers_with_let(self): for helper, args in helpers: with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): - helper(*args, let=let) + helper(*args, let=let) # type: ignore for helper, args in helpers: - helper(*args, let={}) + helper(*args, let={}) # type: ignore if __name__ == "__main__": diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index 7ff80d75e5..a05dbd9668 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -43,6 +43,8 @@ def camel_to_snake(camel): class TestAllScenarios(unittest.TestCase): + listener: EventListener + client: MongoClient @classmethod @client_context.require_connection diff --git a/test/test_common.py b/test/test_common.py index dcd618c509..7d7a26c278 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -50,13 +50,13 @@ def test_uuid_representation(self): "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) legacy_opts = coll.codec_options coll.insert_one({'uu': uu}) - self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) + self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) # type: ignore coll = self.db.get_collection( "uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) self.assertEqual(None, coll.find_one({'uu': uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) + self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) # type: ignore # Test count_documents self.assertEqual(0, coll.count_documents({'uu': uu})) @@ -81,9 +81,9 @@ def test_uuid_representation(self): coll.update_one({'_id': uu}, {'$set': {'i': 2}}) coll = self.db.get_collection( "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({'_id': uu})['i']) + self.assertEqual(1, coll.find_one({'_id': uu})['i']) # type: ignore coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - self.assertEqual(2, coll.find_one({'_id': uu})['i']) + self.assertEqual(2, coll.find_one({'_id': uu})['i']) # type: ignore # Test Cursor.distinct self.assertEqual([2], coll.find({'_id': uu}).distinct('i')) @@ -98,7 +98,7 @@ def test_uuid_representation(self): "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) self.assertEqual(2, coll.find_one_and_update({'_id': uu}, {'$set': {'i': 5}})['i']) - self.assertEqual(5, coll.find_one({'_id': uu})['i']) + self.assertEqual(5, coll.find_one({'_id': uu})['i']) # type: ignore # Test command self.assertEqual(5, self.db.command( diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 894b14becd..e683974b03 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -20,6 +20,7 @@ from bson import SON from pymongo import monitoring +from pymongo.collection import Collection from pymongo.errors import NotPrimaryError from pymongo.write_concern import WriteConcern @@ -33,6 +34,9 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): + listener: CMAPListener + coll: Collection + @classmethod @client_context.require_replica_set def setUpClass(cls): @@ -111,7 +115,7 @@ def run_scenario(self, error_code, retry, pool_status_checker): # Insert record and verify failure. with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) - self.assertEqual(exc.exception.details['code'], error_code) + self.assertEqual(exc.exception.details['code'], error_code) # type: ignore # Retry before CMAPListener assertion if retry_before=True. if retry: self.coll.insert_one({"test": 1}) diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 5a63e030fe..4399d9f223 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -53,7 +53,7 @@ def check_result(self, expected_result, result): # SPEC-869: Only BulkWriteResult has upserted_count. if (prop == "upserted_count" and not isinstance(result, BulkWriteResult)): - if result.upserted_id is not None: + if result.upserted_id is not None: # type: ignore upserted_count = 1 else: upserted_count = 0 @@ -69,14 +69,14 @@ def check_result(self, expected_result, result): ids = expected_result[res] if isinstance(ids, dict): ids = [ids[str(i)] for i in range(len(ids))] - self.assertEqual(ids, result.inserted_ids, msg) + self.assertEqual(ids, result.inserted_ids, msg) # type: ignore elif prop == "upserted_ids": # Convert indexes from strings to integers. ids = expected_result[res] expected_ids = {} for str_index in ids: expected_ids[int(str_index)] = ids[str_index] - self.assertEqual(expected_ids, result.upserted_ids, msg) + self.assertEqual(expected_ids, result.upserted_ids, msg) # type: ignore else: self.assertEqual( getattr(result, prop), expected_result[res], msg) diff --git a/test/test_cursor.py b/test/test_cursor.py index 0b8ba049c2..f741b8b0cc 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -57,7 +57,7 @@ def test_deepcopy_cursor_littered_with_regexes(self): re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) cursor2 = copy.deepcopy(cursor) - self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) + self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore def test_add_remove_option(self): cursor = self.db.test.find() @@ -149,9 +149,9 @@ def test_allow_disk_use(self): self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz') cursor = coll.find().allow_disk_use(True) - self.assertEqual(True, cursor._Cursor__allow_disk_use) + self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore cursor = coll.find().allow_disk_use(False) - self.assertEqual(False, cursor._Cursor__allow_disk_use) + self.assertEqual(False, cursor._Cursor__allow_disk_use) # type: ignore def test_max_time_ms(self): db = self.db @@ -165,15 +165,15 @@ def test_max_time_ms(self): coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) - self.assertEqual(999, cursor._Cursor__max_time_ms) + self.assertEqual(999, cursor._Cursor__max_time_ms) # type: ignore cursor = coll.find().max_time_ms(10).max_time_ms(1000) - self.assertEqual(1000, cursor._Cursor__max_time_ms) + self.assertEqual(1000, cursor._Cursor__max_time_ms) # type: ignore cursor = coll.find().max_time_ms(999) c2 = cursor.clone() - self.assertEqual(999, c2._Cursor__max_time_ms) - self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) - self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) + self.assertEqual(999, c2._Cursor__max_time_ms) # type: ignore + self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) # type: ignore + self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) # type: ignore self.assertTrue(coll.find_one(max_time_ms=1000)) @@ -889,7 +889,7 @@ def test_clone(self): # Every attribute should be the same. cursor2 = cursor.clone() - self.assertDictEqual(cursor.__dict__, cursor2.__dict__) + self.assertEqual(cursor.__dict__, cursor2.__dict__) # Shallow copies can so can mutate cursor2 = copy.copy(cursor) @@ -1025,7 +1025,7 @@ def test_properties(self): self.assertEqual(self.db.test, self.db.test.find().collection) def set_coll(): - self.db.test.find().collection = "hello" + self.db.test.find().collection = "hello" # type: ignore self.assertRaises(AttributeError, set_coll) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 5db208ab7e..eee47b9d2b 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -21,6 +21,7 @@ from collections import OrderedDict from decimal import Decimal from random import random +from typing import Any, Tuple, Type, no_type_check sys.path[0:0] = [""] @@ -127,6 +128,7 @@ def transform_bson(self, value): class CustomBSONTypeTests(object): + @no_type_check def roundtrip(self, doc): bsonbytes = encode(doc, codec_options=self.codecopts) rt_document = decode(bsonbytes, codec_options=self.codecopts) @@ -139,6 +141,7 @@ def test_encode_decode_roundtrip(self): self.roundtrip({'average': [[Decimal('56.47')]]}) self.roundtrip({'average': [{'b': Decimal('56.47')}]}) + @no_type_check def test_decode_all(self): documents = [] for dec in range(3): @@ -151,12 +154,14 @@ def test_decode_all(self): self.assertEqual( decode_all(bsonstream, self.codecopts), documents) + @no_type_check def test__bson_to_dict(self): document = {'average': Decimal('56.47')} rawbytes = encode(document, codec_options=self.codecopts) decoded_document = _bson_to_dict(rawbytes, self.codecopts) self.assertEqual(document, decoded_document) + @no_type_check def test__dict_to_bson(self): document = {'average': Decimal('56.47')} rawbytes = encode(document, codec_options=self.codecopts) @@ -172,12 +177,14 @@ def _generate_multidocument_bson_stream(self): bsonstream += encode(doc) return edocs, bsonstream + @no_type_check def test_decode_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() for expected_doc, decoded_doc in zip( expected, decode_iter(bson_data, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) + @no_type_check def test_decode_file_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() fileobj = tempfile.TemporaryFile() @@ -293,6 +300,15 @@ def test_type_checks(self): class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + @classmethod def setUpClass(cls): class TypeA(object): @@ -378,6 +394,10 @@ def test_infinite_loop_exceeds_max_recursion_depth(self): class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + @classmethod def setUpClass(cls): class MyIntType(object): @@ -466,32 +486,32 @@ class MyIntDecoder(TypeDecoder): def transform_bson(self, value): return self.types[0](value) - codec_instances = [MyIntDecoder(), MyIntEncoder()] + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] type_registry = TypeRegistry(codec_instances) self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}) + {MyIntEncoder.python_type: codec_instances[1].transform_python}) # type: ignore self.assertEqual( type_registry._decoder_map, - {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) # type: ignore def test_initialize_fail(self): err_msg = ("Expected an instance of TypeEncoder, TypeDecoder, " "or TypeCodec, got .* instead") with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(self.codecs) + TypeRegistry(self.codecs) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([type('AnyType', (object,), {})()]) err_msg = "fallback_encoder %r is not a callable" % (True,) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([], True) + TypeRegistry([], True) # type: ignore[arg-type] err_msg = "fallback_encoder %r is not a callable" % ('hello',) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(fallback_encoder='hello') + TypeRegistry(fallback_encoder='hello') # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] @@ -525,7 +545,7 @@ def run_test(base, attrs): if pytype in [bool, type(None), RE_TYPE,]: continue - class MyType(pytype): + class MyType(pytype): # type: ignore pass attrs.update({'python_type': MyType, 'transform_python': lambda x: x}) @@ -598,7 +618,7 @@ def test_aggregate_w_custom_type_decoder(self): test = db.get_collection( 'test', codec_options=UNINT_DECODER_CODECOPTS) - pipeline = [ + pipeline: list = [ {'$match': {'status': 'complete'}}, {'$group': {'_id': "$status", 'total_qty': {"$sum": "$qty"}}},] result = test.aggregate(pipeline) @@ -680,15 +700,18 @@ def test_grid_out_custom_opts(self): class ChangeStreamsWCustomTypesTestMixin(object): + @no_type_check def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) + @no_type_check def insert_and_check(self, change_stream, insert_doc, expected_doc): self.input_target.insert_one(insert_doc) change = next(change_stream) self.assertEqual(change['fullDocument'], expected_doc) + @no_type_check def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor @@ -696,6 +719,7 @@ def kill_change_stream_cursor(self, change_stream): client = self.input_target.database.client client._close_cursor_now(cursor.cursor_id, address) + @no_type_check def test_simple(self): codecopts = CodecOptions(type_registry=TypeRegistry([ UndecipherableIntEncoder(), UppercaseTextDecoder()])) @@ -718,6 +742,7 @@ def test_simple(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + @no_type_check def test_custom_type_in_pipeline(self): codecopts = CodecOptions(type_registry=TypeRegistry([ UndecipherableIntEncoder(), UppercaseTextDecoder()])) @@ -741,6 +766,7 @@ def test_custom_type_in_pipeline(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + @no_type_check def test_break_resume_token(self): # Get one document from a change stream to determine resumeToken type. self.create_targets() @@ -766,6 +792,7 @@ def test_break_resume_token(self): self.kill_change_stream_cursor(change_stream) self.insert_and_check(change_stream, docs[2], docs[2]) + @no_type_check def test_document_class(self): def run_test(doc_cls): codecopts = CodecOptions(type_registry=TypeRegistry([ diff --git a/test/test_database.py b/test/test_database.py index 4adccc1b58..096eb5b979 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -17,6 +17,7 @@ import datetime import re import sys +from typing import Any, List, Mapping sys.path[0:0] = [""] @@ -57,6 +58,7 @@ class TestDatabaseNoConnect(unittest.TestCase): """Test Database features on a client that does not connect. """ + client: MongoClient @classmethod def setUpClass(cls): @@ -143,7 +145,7 @@ def test_create_collection(self): test = db.create_collection("test") self.assertTrue("test" in db.list_collection_names()) test.insert_one({"hello": "world"}) - self.assertEqual(db.test.find_one()["hello"], "world") + self.assertEqual(db.test.find_one()["hello"], "world") # type: ignore db.drop_collection("test.foo") db.create_collection("test.foo") @@ -198,6 +200,7 @@ def test_list_collection_names_filter(self): self.assertNotIn("nameOnly", results["started"][0].command) # Should send nameOnly (except on 2.6). + filter: Any for filter in (None, {}, {'name': {'$in': ['capped', 'non_capped']}}): results.clear() names = db.list_collection_names(filter=filter) @@ -225,7 +228,7 @@ def test_list_collections(self): self.assertTrue("$" not in coll) # Duplicate check. - coll_cnt = {} + coll_cnt: dict = {} for coll in colls: try: # Found duplicate. @@ -233,7 +236,7 @@ def test_list_collections(self): self.assertTrue(False) except KeyError: coll_cnt[coll] = 1 - coll_cnt = {} + coll_cnt: dict = {} # Checking if is there any collection which don't exists. if (len(set(colls) - set(["test","test.mike"])) == 0 or @@ -466,6 +469,7 @@ def test_insert_find_one(self): self.assertEqual(None, db.test.find_one({"hello": "test"})) b = db.test.find_one() + assert b is not None b["hello"] = "mike" db.test.replace_one({"_id": b["_id"]}, b) @@ -482,12 +486,12 @@ def test_long(self): db = self.client.pymongo_test db.test.drop() db.test.insert_one({"x": 9223372036854775807}) - retrieved = db.test.find_one()['x'] + retrieved = db.test.find_one()['x'] # type: ignore self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) db.test.delete_many({}) db.test.insert_one({"x": Int64(1)}) - retrieved = db.test.find_one()['x'] + retrieved = db.test.find_one()['x'] # type: ignore self.assertEqual(Int64(1), retrieved) self.assertIsInstance(retrieved, Int64) @@ -509,8 +513,8 @@ def test_delete(self): length += 1 self.assertEqual(length, 2) - db.test.delete_one(db.test.find_one()) - db.test.delete_one(db.test.find_one()) + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] + db.test.delete_one(db.test.find_one()) # type: ignore[arg-type] self.assertEqual(db.test.find_one(), None) db.test.insert_one({"x": 1}) @@ -625,7 +629,7 @@ def test_with_options(self): 'read_preference': ReadPreference.PRIMARY, 'write_concern': WriteConcern(w=1), 'read_concern': ReadConcern(level="local")} - db2 = db1.with_options(**newopts) + db2 = db1.with_options(**newopts) # type: ignore[arg-type] for opt in newopts: self.assertEqual( getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) @@ -633,7 +637,7 @@ def test_with_options(self): class TestDatabaseAggregation(IntegrationTest): def setUp(self): - self.pipeline = [{"$listLocalSessions": {}}, + self.pipeline: List[Mapping[str, Any]] = [{"$listLocalSessions": {}}, {"$limit": 1}, {"$addFields": {"dummy": "dummy field"}}, {"$project": {"_id": 0, "dummy": 1}}] @@ -648,6 +652,7 @@ def test_database_aggregation(self): @client_context.require_no_mongos def test_database_aggregation_fake_cursor(self): coll_name = "test_output" + write_stage: dict if client_context.version < (4, 3): db_name = "admin" write_stage = {"$out": coll_name} diff --git a/test/test_dbref.py b/test/test_dbref.py index 964947351e..348b1d14de 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -16,6 +16,7 @@ import pickle import sys +from typing import Any sys.path[0:0] = [""] from bson import encode, decode @@ -44,10 +45,10 @@ def test_read_only(self): a = DBRef("coll", ObjectId()) def foo(): - a.collection = "blah" + a.collection = "blah" # type: ignore[misc] def bar(): - a.id = "aoeu" + a.id = "aoeu" # type: ignore[misc] self.assertEqual("coll", a.collection) a.id @@ -136,6 +137,7 @@ def test_dbref_hash(self): # https://github.com/mongodb/specifications/blob/master/source/dbref.rst#test-plan class TestDBRefSpec(unittest.TestCase): def test_decoding_1_2_3(self): + doc: Any for doc in [ # 1, Valid documents MUST be decoded to a DBRef: {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, @@ -183,6 +185,7 @@ def test_decoding_4_5(self): self.assertIsInstance(dbref, dict) def test_encoding_1_2(self): + doc: Any for doc in [ # 1, Encoding DBRefs with basic fields: {"$ref": "coll0", "$id": ObjectId("60a6fe9a54f4180c86309efa")}, diff --git a/test/test_decimal128.py b/test/test_decimal128.py index 4ff25935dd..3988a4559a 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -35,6 +35,7 @@ def test_round_trip(self): b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') coll.insert_one({'dec128': dec128}) doc = coll.find_one({'dec128': dec128}) + assert doc is not None self.assertIsNotNone(doc) self.assertEqual(doc['dec128'], dec128) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 107168f294..c3a50709ac 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -364,10 +364,12 @@ def _event_count(self, event): def marked_unknown(e): return (isinstance(e, monitoring.ServerDescriptionChangedEvent) and not e.new_description.is_server_type_known) + assert self.server_listener is not None return len(self.server_listener.matching(marked_unknown)) # Only support CMAP events for now. self.assertTrue(event.startswith('Pool') or event.startswith('Conn')) event_type = getattr(monitoring, event) + assert self.pool_listener is not None return self.pool_listener.event_count(event_type) def assert_event_count(self, event, count): diff --git a/test/test_encryption.py b/test/test_encryption.py index af4165f1d1..966d9b5815 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -25,6 +25,10 @@ import traceback import uuid +from typing import Any + +from pymongo.collection import Collection + sys.path[0:0] = [""] from bson import encode, json_util @@ -126,6 +130,7 @@ def test_init_kms_tls_options(self): with self.assertRaisesRegex( TypeError, r'kms_tls_options\["kmip"\] must be a dict'): AutoEncryptionOpts({}, 'k.d', kms_tls_options={'kmip': 1}) + tls_opts: Any for tls_opts in [ {'kmip': {'tls': True, 'tlsInsecure': True}}, {'kmip': {'tls': True, 'tlsAllowInvalidCertificates': True}}, @@ -138,6 +143,7 @@ def test_init_kms_tls_options(self): AutoEncryptionOpts({}, 'k.d', kms_tls_options={ 'kmip': {'tlsCAFile': 'does-not-exist'}}) # Success cases: + tls_opts: Any for tls_opts in [None, {}]: opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) self.assertEqual(opts._kms_ssl_contexts, {}) @@ -432,14 +438,14 @@ def test_validation(self): msg = 'value to decrypt must be a bson.binary.Binary with subtype 6' with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt('str') + client_encryption.decrypt('str') # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.decrypt(Binary(b'123')) msg = 'key_id must be a bson.binary.Binary with subtype 4' algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) + client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt('str', algo, key_id=Binary(b'123')) @@ -459,7 +465,7 @@ def test_bson_errors(self): def test_codec_options(self): with self.assertRaisesRegex(TypeError, 'codec_options must be'): ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) + KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) # type: ignore[arg-type] opts = CodecOptions(uuid_representation=JAVA_LEGACY) client_encryption_legacy = ClientEncryption( @@ -708,6 +714,10 @@ def create_key_vault(vault, *data_keys): class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): + client_encrypted: MongoClient + client_encryption: ClientEncryption + listener: OvertCommandListener + vault: Any KMS_PROVIDERS = ALL_KMS_PROVIDERS @@ -776,7 +786,7 @@ def setUp(self): def run_test(self, provider_name): # Create data key. - master_key = self.MASTER_KEYS[provider_name] + master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( provider_name, master_key=master_key, key_alt_names=['%s_altname' % (provider_name,)]) @@ -798,7 +808,7 @@ def run_test(self, provider_name): {'_id': provider_name, 'value': encrypted}) doc_decrypted = self.client_encrypted.db.coll.find_one( {'_id': provider_name}) - self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) + self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( @@ -985,7 +995,7 @@ def _test_corpus(self, opts): self.addCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) - corpus_copied = SON() + corpus_copied: SON = SON() for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', @@ -1021,7 +1031,7 @@ def _test_corpus(self, opts): try: encrypted_val = client_encryption.encrypt( - value['value'], algo, **kwargs) + value['value'], algo, **kwargs) # type: ignore[arg-type] if not value['allowed']: self.fail('encrypt should have failed: %r: %r' % ( key, value)) @@ -1082,6 +1092,10 @@ def test_corpus_local_schema(self): class TestBsonSizeBatches(EncryptionIntegrationTest): """Prose tests for BSON size limits and batch splitting.""" + coll: Collection + coll_encrypted: Collection + client_encrypted: MongoClient + listener: OvertCommandListener @classmethod def setUpClass(cls): @@ -1397,6 +1411,7 @@ class AzureGCPEncryptionTestMixin(object): KMS_PROVIDER_MAP = None KEYVAULT_DB = 'keyvault' KEYVAULT_COLL = 'datakeys' + client: MongoClient def setUp(self): keyvault = self.client.get_database( @@ -1406,7 +1421,7 @@ def setUp(self): def _test_explicit(self, expectation): client_encryption = ClientEncryption( - self.KMS_PROVIDER_MAP, + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, OPTS) @@ -1426,7 +1441,7 @@ def _test_automatic(self, expectation_extjson, payload): keyvault_namespace = '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) encryption_opts = AutoEncryptionOpts( - self.KMS_PROVIDER_MAP, + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] keyvault_namespace, schema_map=self.SCHEMA_MAP) @@ -1818,7 +1833,7 @@ class TestKmsTLSOptions(EncryptionIntegrationTest): def setUp(self): super(TestKmsTLSOptions, self).setUp() # 1, create client with only tlsCAFile. - providers = copy.deepcopy(ALL_KMS_PROVIDERS) + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8002' providers['gcp']['endpoint'] = '127.0.0.1:8002' kms_tls_opts_ca_only = { @@ -1840,7 +1855,7 @@ def setUp(self): kms_tls_options=kms_tls_opts) self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. - providers = copy.deepcopy(providers) + providers: dict = copy.deepcopy(providers) providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8000' providers['gcp']['endpoint'] = '127.0.0.1:8000' providers['kmip']['endpoint'] = '127.0.0.1:8000' @@ -1849,7 +1864,7 @@ def setUp(self): kms_tls_options=kms_tls_opts_ca_only) self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. - providers = copy.deepcopy(providers) + providers: dict = copy.deepcopy(providers) providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8001' providers['gcp']['endpoint'] = '127.0.0.1:8001' providers['kmip']['endpoint'] = '127.0.0.1:8001' diff --git a/test/test_examples.py b/test/test_examples.py index dcf9dd2de3..ed12c8bcc1 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -890,6 +890,7 @@ def update_employee_info(session): update_employee_info(session) employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') @@ -916,6 +917,7 @@ def run_transaction_with_retry(txn_func, session): run_transaction_with_retry(update_employee_info, session) employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') @@ -954,6 +956,7 @@ def _insert_employee_retry_commit(session): run_transaction_with_retry(_insert_employee_retry_commit, session) employee = employees.find_one({"employee": 4}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Active') @@ -1021,6 +1024,7 @@ def update_employee_info(session): # End Transactions Retry Example 3 employee = employees.find_one({"employee": 3}) + assert employee is not None self.assertIsNotNone(employee) self.assertEqual(employee['status'], 'Inactive') @@ -1089,6 +1093,9 @@ def test_causal_consistency(self): 'start': current_date}, session=s1) # End Causal Consistency Example 1 + assert s1.cluster_time is not None + assert s1.operation_time is not None + # Start Causal Consistency Example 2 with client.start_session(causal_consistency=True) as s2: s2.advance_cluster_time(s1.cluster_time) diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 6d7cc7ba3b..2208e97b42 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -24,6 +24,8 @@ from io import BytesIO +from pymongo.database import Database + sys.path[0:0] = [""] from bson.objectid import ObjectId @@ -47,6 +49,7 @@ class TestGridFileNoConnect(unittest.TestCase): """Test GridFile features on a client that does not connect. """ + db: Database @classmethod def setUpClass(cls): diff --git a/test/test_gridfs.py b/test/test_gridfs.py index d7d5a74e5f..3d8a7d8f6b 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -27,6 +27,7 @@ sys.path[0:0] = [""] from bson.binary import Binary +from pymongo.database import Database from pymongo.mongo_client import MongoClient from pymongo.errors import (ConfigurationError, NotPrimaryError, @@ -78,6 +79,7 @@ def run(self): class TestGridfsNoConnect(unittest.TestCase): + db: Database @classmethod def setUpClass(cls): @@ -89,6 +91,8 @@ def test_gridfs(self): class TestGridfs(IntegrationTest): + fs: gridfs.GridFS + alt: gridfs.GridFS @classmethod def setUpClass(cls): @@ -152,6 +156,7 @@ def test_empty_file(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) @@ -213,7 +218,7 @@ def test_threaded_reads(self): self.fs.put(b"hello", _id="test") threads = [] - results = [] + results: list = [] for i in range(10): threads.append(JustRead(self.fs, 10, results)) threads[i].start() @@ -396,6 +401,7 @@ def test_missing_length_iter(self): # Test fix that guards against PHP-237 self.fs.put(b"", filename="empty") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) f = self.fs.get_last_version(filename="empty") @@ -447,23 +453,32 @@ def test_delete_not_initialized(self): # but will still call __del__. cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ with self.assertRaises(TypeError): - cursor.__init__(self.db.fs.files, {}, {"_id": True}) + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore cursor.__del__() # no error def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) id1 = self.fs.put(b'test1', filename='file1') - self.assertEqual(b'test1', self.fs.find_one().read()) + res = self.fs.find_one() + assert res is not None + self.assertEqual(b'test1', res.read()) id2 = self.fs.put(b'test2', filename='file2', meta='data') - self.assertEqual(b'test1', self.fs.find_one(id1).read()) - self.assertEqual(b'test2', self.fs.find_one(id2).read()) - - self.assertEqual(b'test1', - self.fs.find_one({'filename': 'file1'}).read()) - - self.assertEqual('data', self.fs.find_one(id2).meta) + res1 = self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b'test1', res1.read()) + res2 = self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b'test2', res2.read()) + + res3 = self.fs.find_one({'filename': 'file1'}) + assert res3 is not None + self.assertEqual(b'test1', res3.read()) + + res4 = self.fs.find_one(id2) + assert res4 is not None + self.assertEqual('data', res4.meta) def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 499643f673..53f94991d3 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -77,6 +77,8 @@ def run(self): class TestGridfs(IntegrationTest): + fs: gridfs.GridFSBucket + alt: gridfs.GridFSBucket @classmethod def setUpClass(cls): @@ -123,6 +125,7 @@ def test_empty_file(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) raw = self.db.fs.files.find_one() + assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) @@ -208,7 +211,7 @@ def test_threaded_reads(self): self.fs.upload_from_stream("test", b"hello") threads = [] - results = [] + results: list = [] for i in range(10): threads.append(JustRead(self.fs, 10, results)) threads[i].start() @@ -322,6 +325,7 @@ def test_missing_length_iter(self): # Test fix that guards against PHP-237 self.fs.upload_from_stream("empty", b"") doc = self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None doc.pop("length") self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) fstr = self.fs.open_download_stream_by_name("empty") diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 86449db370..057a7b4841 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -55,6 +55,9 @@ def camel_to_snake(camel): class TestAllScenarios(IntegrationTest): + fs: gridfs.GridFSBucket + str_to_cmd: dict + @classmethod def setUpClass(cls): super(TestAllScenarios, cls).setUpClass() diff --git a/test/test_json_util.py b/test/test_json_util.py index dbf4f1c26a..16c7d96a2f 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -20,6 +20,8 @@ import sys import uuid +from typing import Any, List, MutableMapping + sys.path[0:0] = [""] from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON @@ -466,7 +468,7 @@ def test_cursor(self): db = self.db db.drop_collection("test") - docs = [ + docs: List[MutableMapping[str, Any]] = [ {'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code("function x() { return 1; }")}, diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 1fd82884f1..5c484fe334 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -35,7 +35,7 @@ 'max_staleness') -class TestAllScenarios(create_selection_tests(_TEST_PATH)): +class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass diff --git a/test/test_monitor.py b/test/test_monitor.py index 61e2057b52..ed0d4543f8 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -59,7 +59,7 @@ def test_cleanup_executors_on_client_del(self): # Each executor stores a weakref to itself in _EXECUTORS. executor_refs = [ - (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] # type: ignore del executors del client diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 0d925b04bf..4e513c5c69 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -16,6 +16,7 @@ import datetime import sys import time +from typing import Any import warnings sys.path[0:0] = [""] @@ -43,6 +44,7 @@ class TestCommandMonitoring(IntegrationTest): + listener: EventListener @classmethod @client_context.require_connection @@ -754,7 +756,7 @@ def test_non_bulk_writes(self): # delete_one self.listener.results.clear() - res = coll.delete_one({'x': 3}) + res2 = coll.delete_one({'x': 3}) results = self.listener.results started = results['started'][0] succeeded = results['succeeded'][0] @@ -1091,6 +1093,8 @@ def test_sensitive_commands(self): class TestGlobalListener(IntegrationTest): + listener: EventListener + saved_listeners: Any @classmethod @client_context.require_connection @@ -1167,13 +1171,13 @@ def test_server_heartbeat_event_repr(self): "") delta = 0.1 event = monitoring.ServerHeartbeatSucceededEvent( - delta, {'ok': 1}, connection_id) + delta, {'ok': 1}, connection_id) # type: ignore[arg-type] self.assertEqual( repr(event), "") event = monitoring.ServerHeartbeatFailedEvent( - delta, 'ERROR', connection_id) + delta, 'ERROR', connection_id) # type: ignore[arg-type] self.assertEqual( repr(event), "") event = monitoring.ServerDescriptionChangedEvent( - 'PREV', 'NEW', server_address, topology_id) + 'PREV', 'NEW', server_address, topology_id) # type: ignore[arg-type] self.assertEqual( repr(event), "") event = monitoring.TopologyDescriptionChangedEvent( - 'PREV', 'NEW', topology_id) + 'PREV', 'NEW', topology_id) # type: ignore[arg-type] self.assertEqual( repr(event), " Date: Tue, 8 Feb 2022 10:01:46 -0800 Subject: [PATCH 0578/2111] PYTHON-3043 Test mod_wsgi with Python 3.10 (#850) --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index bf96f220ff..d681815c12 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2448,12 +2448,12 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.7", "3.8", "3.9"] + python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ - platform: ubuntu-18.04 - python-version: ["3.8", "3.9"] + python-version: ["3.8", "3.9", "3.10"] mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: From 5578999a90e439fbca06fc0ffc98f4d04e96f7b4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 9 Feb 2022 06:44:28 -0600 Subject: [PATCH 0579/2111] PYTHON-1834 Use a code formatter (#852) --- bson/__init__.py | 382 ++-- bson/_helpers.py | 4 +- bson/binary.py | 58 +- bson/code.py | 10 +- bson/codec_options.py | 201 +- bson/dbref.py | 49 +- bson/decimal128.py | 56 +- bson/errors.py | 15 +- bson/int64.py | 1 + bson/json_util.py | 313 ++- bson/max_key.py | 1 + bson/min_key.py | 1 + bson/objectid.py | 32 +- bson/raw_bson.py | 14 +- bson/regex.py | 8 +- bson/son.py | 39 +- bson/timestamp.py | 12 +- doc/conf.py | 91 +- green_framework_test.py | 32 +- gridfs/__init__.py | 202 +- gridfs/grid_file.py | 251 +-- pymongo/__init__.py | 23 +- pymongo/aggregation.py | 58 +- pymongo/auth.py | 376 ++-- pymongo/auth_aws.py | 54 +- pymongo/bulk.py | 228 ++- pymongo/change_stream.py | 141 +- pymongo/client_options.py | 166 +- pymongo/client_session.py | 205 +- pymongo/collation.py | 80 +- pymongo/collection.py | 944 +++++---- pymongo/command_cursor.py | 109 +- pymongo/common.py | 439 ++--- pymongo/compression_support.py | 15 +- pymongo/cursor.py | 358 ++-- pymongo/daemon.py | 43 +- pymongo/database.py | 312 +-- pymongo/driver_info.py | 13 +- pymongo/encryption.py | 154 +- pymongo/encryption_options.py | 24 +- pymongo/errors.py | 55 +- pymongo/event_loggers.py | 117 +- pymongo/hello.py | 93 +- pymongo/helpers.py | 128 +- pymongo/max_staleness_selectors.py | 24 +- pymongo/message.py | 730 +++---- pymongo/mongo_client.py | 456 +++-- pymongo/monitor.py | 77 +- pymongo/monitoring.py | 362 ++-- pymongo/network.py | 136 +- pymongo/ocsp_cache.py | 20 +- pymongo/ocsp_support.py | 99 +- pymongo/operations.py | 150 +- pymongo/periodic_executor.py | 5 +- pymongo/pool.py | 606 +++--- pymongo/pyopenssl_context.py | 70 +- pymongo/read_concern.py | 11 +- pymongo/read_preferences.py | 203 +- pymongo/response.py | 21 +- pymongo/results.py | 23 +- pymongo/saslprep.py | 17 +- pymongo/server.py | 76 +- pymongo/server_api.py | 16 +- pymongo/server_description.py | 88 +- pymongo/server_selectors.py | 49 +- pymongo/settings.py | 47 +- pymongo/socket_checker.py | 12 +- pymongo/srv_resolver.py | 31 +- pymongo/ssl_support.py | 24 +- pymongo/topology.py | 294 +-- pymongo/topology_description.py | 209 +- pymongo/typings.py | 20 +- pymongo/uri_parser.py | 327 ++-- pymongo/write_concern.py | 14 +- setup.py | 170 +- test/__init__.py | 560 +++--- test/atlas/test_connection.py | 57 +- test/auth_aws/test_auth_aws.py | 15 +- test/crud_v2_format.py | 10 +- test/mockupdb/operations.py | 94 +- test/mockupdb/test_auth_recovering_member.py | 31 +- test/mockupdb/test_cluster_time.py | 116 +- test/mockupdb/test_cursor_namespace.py | 92 +- test/mockupdb/test_getmore_sharded.py | 24 +- test/mockupdb/test_handshake.py | 195 +- test/mockupdb/test_initial_ismaster.py | 16 +- test/mockupdb/test_list_indexes.py | 30 +- test/mockupdb/test_max_staleness.py | 30 +- test/mockupdb/test_mixed_version_sharded.py | 42 +- .../mockupdb/test_mongos_command_read_mode.py | 80 +- .../test_network_disconnect_primary.py | 63 +- test/mockupdb/test_op_msg.py | 290 +-- test/mockupdb/test_op_msg_read_preference.py | 89 +- test/mockupdb/test_query_read_pref_sharded.py | 44 +- test/mockupdb/test_reset_and_request_check.py | 42 +- test/mockupdb/test_rsghost.py | 35 +- test/mockupdb/test_slave_okay_rs.py | 36 +- test/mockupdb/test_slave_okay_sharded.py | 46 +- test/mockupdb/test_slave_okay_single.py | 53 +- test/mod_wsgi_test/test_client.py | 87 +- test/ocsp/test_ocsp.py | 25 +- test/performance/perf_test.py | 213 +-- test/pymongo_mocks.py | 100 +- test/qcheck.py | 60 +- test/test_auth.py | 575 +++--- test/test_auth_spec.py | 70 +- test/test_binary.py | 364 ++-- test/test_bson.py | 811 ++++---- test/test_bson_corpus.py | 156 +- test/test_bulk.py | 1033 +++++----- test/test_change_stream.py | 615 +++--- test/test_client.py | 999 +++++----- test/test_client_context.py | 48 +- test/test_cmap.py | 280 ++- test/test_code.py | 19 +- test/test_collation.py | 267 ++- test/test_collection.py | 1190 ++++++------ test/test_collection_management.py | 4 +- test/test_command_monitoring_legacy.py | 182 +- test/test_command_monitoring_unified.py | 14 +- test/test_common.py | 120 +- ...nnections_survive_primary_stepdown_spec.py | 44 +- test/test_create_entities.py | 71 +- test/test_crud_unified.py | 7 +- test/test_crud_v1.py | 129 +- test/test_cursor.py | 696 ++++--- test/test_custom_types.py | 549 +++--- test/test_data_lake.py | 46 +- test/test_database.py | 277 +-- test/test_dbref.py | 95 +- test/test_decimal128.py | 28 +- test/test_discovery_and_monitoring.py | 305 ++- test/test_dns.py | 116 +- test/test_encryption.py | 1684 +++++++++-------- test/test_errors.py | 40 +- test/test_examples.py | 928 +++++---- test/test_grid_file.py | 152 +- test/test_gridfs.py | 169 +- test/test_gridfs_bucket.py | 267 ++- test/test_gridfs_spec.py | 123 +- test/test_heartbeat_monitoring.py | 59 +- test/test_json_util.py | 429 +++-- test/test_load_balancer.py | 37 +- test/test_max_staleness.py | 41 +- test/test_mongos_load_balancing.py | 73 +- test/test_monitor.py | 27 +- test/test_monitoring.py | 1136 +++++------ test/test_objectid.py | 54 +- test/test_ocsp_cache.py | 32 +- test/test_pooling.py | 101 +- test/test_pymongo.py | 7 +- test/test_raw_bson.py | 128 +- test/test_read_concern.py | 72 +- test/test_read_preferences.py | 472 ++--- test/test_read_write_concern_spec.py | 272 ++- test/test_replica_set_reconfig.py | 136 +- test/test_retryable_reads.py | 126 +- test/test_retryable_writes.py | 446 ++--- test/test_retryable_writes_unified.py | 3 +- test/test_saslprep.py | 5 +- test/test_sdam_monitoring_spec.py | 232 +-- test/test_server.py | 9 +- test/test_server_description.py | 136 +- test/test_server_selection.py | 150 +- test/test_server_selection_in_window.py | 60 +- test/test_server_selection_rtt.py | 18 +- test/test_session.py | 639 +++---- test/test_sessions_unified.py | 3 +- test/test_son.py | 71 +- test/test_srv_polling.py | 146 +- test/test_ssl.py | 533 +++--- test/test_streaming_protocol.py | 145 +- test/test_threads.py | 19 +- test/test_timestamp.py | 9 +- test/test_topology.py | 810 ++++---- test/test_transactions.py | 284 +-- test/test_transactions_unified.py | 3 +- test/test_unified_format.py | 73 +- test/test_uri_parser.py | 728 ++++--- test/test_uri_spec.py | 137 +- test/test_versioned_api.py | 48 +- test/test_write_concern.py | 9 +- test/unicode/test_utf8.py | 13 +- test/unified_format.py | 870 ++++----- test/utils.py | 387 ++-- test/utils_selection_tests.py | 146 +- test/utils_spec_runner.py | 314 ++- test/version.py | 16 +- tools/clean.py | 2 + tools/fail_if_no_c.py | 1 + tools/ocsptest.py | 33 +- 191 files changed, 18246 insertions(+), 16796 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index e518cd91c9..9431909f9c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -64,16 +64,39 @@ from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] from collections import abc as _abc -from typing import (IO, TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Generator, - Iterator, List, Mapping, MutableMapping, NoReturn, - Sequence, Tuple, Type, TypeVar, Union, cast) - -from bson.binary import (ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, - OLD_UUID_SUBTYPE, STANDARD, UUID_SUBTYPE, Binary, - UuidRepresentation) +from typing import ( + IO, + TYPE_CHECKING, + Any, + BinaryIO, + Callable, + Dict, + Generator, + Iterator, + List, + Mapping, + MutableMapping, + NoReturn, + Sequence, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from bson.binary import ( + ALL_UUID_SUBTYPES, + CSHARP_LEGACY, + JAVA_LEGACY, + OLD_UUID_SUBTYPE, + STANDARD, + UUID_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code -from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, - _raw_document_class) +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, _raw_document_class from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -90,11 +113,13 @@ if TYPE_CHECKING: from array import array from mmap import mmap + from bson.raw_bson import RawBSONDocument try: from bson import _cbson # type: ignore[attr-defined] + _USE_C = True except ImportError: _USE_C = False @@ -104,27 +129,27 @@ EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) -BSONNUM = b"\x01" # Floating point -BSONSTR = b"\x02" # UTF-8 string -BSONOBJ = b"\x03" # Embedded document -BSONARR = b"\x04" # Array -BSONBIN = b"\x05" # Binary -BSONUND = b"\x06" # Undefined -BSONOID = b"\x07" # ObjectId -BSONBOO = b"\x08" # Boolean -BSONDAT = b"\x09" # UTC Datetime -BSONNUL = b"\x0A" # Null -BSONRGX = b"\x0B" # Regex -BSONREF = b"\x0C" # DBRef -BSONCOD = b"\x0D" # Javascript code -BSONSYM = b"\x0E" # Symbol -BSONCWS = b"\x0F" # Javascript code with scope -BSONINT = b"\x10" # 32bit int -BSONTIM = b"\x11" # Timestamp -BSONLON = b"\x12" # 64bit int -BSONDEC = b"\x13" # Decimal128 -BSONMIN = b"\xFF" # Min key -BSONMAX = b"\x7F" # Max key +BSONNUM = b"\x01" # Floating point +BSONSTR = b"\x02" # UTF-8 string +BSONOBJ = b"\x03" # Embedded document +BSONARR = b"\x04" # Array +BSONBIN = b"\x05" # Binary +BSONUND = b"\x06" # Undefined +BSONOID = b"\x07" # ObjectId +BSONBOO = b"\x08" # Boolean +BSONDAT = b"\x09" # UTC Datetime +BSONNUL = b"\x0A" # Null +BSONRGX = b"\x0B" # Regex +BSONREF = b"\x0C" # DBRef +BSONCOD = b"\x0D" # Javascript code +BSONSYM = b"\x0E" # Symbol +BSONCWS = b"\x0F" # Javascript code with scope +BSONINT = b"\x10" # 32bit int +BSONTIM = b"\x11" # Timestamp +BSONLON = b"\x12" # 64bit int +BSONDEC = b"\x13" # Decimal128 +BSONMIN = b"\xFF" # Min key +BSONMAX = b"\x7F" # Max key _UNPACK_FLOAT_FROM = struct.Struct(" Tuple[Any, memoryview]: def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" - raise InvalidBSON("Detected unknown BSON type %r for fieldname '%s'. Are " - "you using the latest driver version?" % ( - chr(element_type).encode(), element_name)) + raise InvalidBSON( + "Detected unknown BSON type %r for fieldname '%s'. Are " + "you using the latest driver version?" % (chr(element_type).encode(), element_name) + ) -def _get_int(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[int, int]: +def _get_int( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[int, int]: """Decode a BSON int32 to python int.""" return _UNPACK_INT_FROM(data, position)[0], position + 4 @@ -157,16 +185,19 @@ def _get_int(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dumm def _get_c_string(data: Any, view: Any, position: int, opts: Any) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) - return _utf_8_decode(view[position:end], - opts.unicode_decode_error_handler, True)[0], end + 1 + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 -def _get_float(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[float, int]: +def _get_float( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[float, int]: """Decode a BSON double to python float.""" return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 -def _get_string(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[str, int]: +def _get_string( + data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any +) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] position += 4 @@ -175,8 +206,7 @@ def _get_string(data: Any, view: Any, position: int, obj_end: int, opts: Any, du end = position + length - 1 if data[end] != 0: raise InvalidBSON("invalid end of string") - return _utf_8_decode(view[position:end], - opts.unicode_decode_error_handler, True)[0], end + 1 + return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: @@ -196,26 +226,30 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: return obj_size, end -def _get_object(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any) -> Tuple[Any, int]: +def _get_object( + data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any +) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) if _raw_document_class(opts.document_class): - return (opts.document_class(data[position:end + 1], opts), - position + obj_size) + return (opts.document_class(data[position : end + 1], opts), position + obj_size) obj = _elements_to_dict(data, view, position + 4, end, opts) position += obj_size # If DBRef validation fails, return a normal doc. - if (isinstance(obj.get('$ref'), str) and - "$id" in obj and - isinstance(obj.get('$db'), (str, type(None)))): - return (DBRef(obj.pop("$ref"), obj.pop("$id", None), - obj.pop("$db", None), obj), position) + if ( + isinstance(obj.get("$ref"), str) + and "$id" in obj + and isinstance(obj.get("$db"), (str, type(None))) + ): + return (DBRef(obj.pop("$ref"), obj.pop("$id", None), obj.pop("$db", None), obj), position) return obj, position -def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Any, int]: +def _get_array( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] end = position + size - 1 @@ -235,10 +269,11 @@ def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, ele while position < end: element_type = data[position] # Just skip the keys. - position = index(b'\x00', position) + 1 + position = index(b"\x00", position) + 1 try: value, position = getter[element_type]( - data, view, position, obj_end, opts, element_name) + data, view, position, obj_end, opts, element_name + ) except KeyError: _raise_unknown_type(element_type, element_name) @@ -250,11 +285,13 @@ def _get_array(data: Any, view: Any, position: int, obj_end: int, opts: Any, ele append(value) if position != end + 1: - raise InvalidBSON('bad array length') + raise InvalidBSON("bad array length") return result, position + 1 -def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any) -> Tuple[Union[Binary, uuid.UUID], int]: +def _get_binary( + data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any +) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) position += 5 @@ -266,15 +303,17 @@ def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, du length = length2 end = position + length if length < 0 or end > obj_end: - raise InvalidBSON('bad binary object length') + raise InvalidBSON("bad binary object length") # Convert UUID subtypes to native UUIDs. if subtype in ALL_UUID_SUBTYPES: uuid_rep = opts.uuid_representation binary_value = Binary(data[position:end], subtype) - if ((uuid_rep == UuidRepresentation.UNSPECIFIED) or - (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) or - (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD)): + if ( + (uuid_rep == UuidRepresentation.UNSPECIFIED) + or (subtype == UUID_SUBTYPE and uuid_rep != STANDARD) + or (subtype == OLD_UUID_SUBTYPE and uuid_rep == STANDARD) + ): return binary_value, end return binary_value.as_uuid(uuid_rep), end @@ -287,47 +326,57 @@ def _get_binary(data: Any, view: Any, position: int, obj_end: int, opts: Any, du return value, end -def _get_oid(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[ObjectId, int]: +def _get_oid( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[ObjectId, int]: """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 return ObjectId(data[position:end]), end -def _get_boolean(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[bool, int]: +def _get_boolean( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[bool, int]: """Decode a BSON true/false to python True/False.""" end = position + 1 boolean_byte = data[position:end] - if boolean_byte == b'\x00': + if boolean_byte == b"\x00": return False, end - elif boolean_byte == b'\x01': + elif boolean_byte == b"\x01": return True, end - raise InvalidBSON('invalid boolean value: %r' % boolean_byte) + raise InvalidBSON("invalid boolean value: %r" % boolean_byte) -def _get_date(data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any) -> Tuple[datetime.datetime, int]: +def _get_date( + data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any +) -> Tuple[datetime.datetime, int]: """Decode a BSON datetime to python datetime.datetime.""" - return _millis_to_datetime( - _UNPACK_LONG_FROM(data, position)[0], opts), position + 8 + return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 -def _get_code(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: +def _get_code( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) return Code(code), position -def _get_code_w_scope(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[Code, int]: +def _get_code_w_scope( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] - code, position = _get_string( - data, view, position + 4, code_end, opts, element_name) + code, position = _get_string(data, view, position + 4, code_end, opts, element_name) scope, position = _get_object(data, view, position, code_end, opts, element_name) if position != code_end: - raise InvalidBSON('scope outside of javascript code boundaries') + raise InvalidBSON("scope outside of javascript code boundaries") return Code(code, scope), position -def _get_regex(data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any) -> Tuple[Regex, int]: +def _get_regex( + data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any +) -> Tuple[Regex, int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) bson_flags, position = _get_c_string(data, view, position, opts) @@ -335,26 +384,33 @@ def _get_regex(data: Any, view: Any, position: int, dummy0: Any, opts: Any, dumm return bson_re, position -def _get_ref(data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str) -> Tuple[DBRef, int]: +def _get_ref( + data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str +) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" - collection, position = _get_string( - data, view, position, obj_end, opts, element_name) + collection, position = _get_string(data, view, position, obj_end, opts, element_name) oid, position = _get_oid(data, view, position, obj_end, opts, element_name) return DBRef(collection, oid), position -def _get_timestamp(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Timestamp, int]: +def _get_timestamp( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Timestamp, int]: """Decode a BSON timestamp to bson.timestamp.Timestamp.""" inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) return Timestamp(timestamp, inc), position + 8 -def _get_int64(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Int64, int]: +def _get_int64( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Int64, int]: """Decode a BSON int64 to bson.int64.Int64.""" return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 -def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any) -> Tuple[Decimal128, int]: +def _get_decimal128( + data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any +) -> Tuple[Decimal128, int]: """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" end = position + 16 return Decimal128.from_bid(data[position:end]), end @@ -366,7 +422,7 @@ def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: An # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions -_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]]= { +_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]] = { ord(BSONNUM): _get_float, ord(BSONSTR): _get_string, ord(BSONOBJ): _get_object, @@ -387,22 +443,26 @@ def _get_decimal128(data: Any, view: Any, position: int, dummy0: Any, dummy1: An ord(BSONLON): _get_int64, ord(BSONDEC): _get_decimal128, ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), - ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w)} + ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w), +} if _USE_C: + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: return _cbson._element_to_dict(data, position, obj_end, opts) + else: + def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 element_name, position = _get_c_string(data, view, position, opts) try: - value, position = _ELEMENT_GETTER[element_type](data, view, position, - obj_end, opts, - element_name) + value, position = _ELEMENT_GETTER[element_type]( + data, view, position, obj_end, opts, element_name + ) except KeyError: _raise_unknown_type(element_type, element_name) @@ -422,7 +482,9 @@ def _raw_to_dict(data: Any, position: int, obj_end: int, opts: Any, result: _T) return _elements_to_dict(data, view, position, obj_end, opts, result) -def _elements_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None) -> Any: +def _elements_to_dict( + data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None +) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() @@ -431,7 +493,7 @@ def _elements_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: A key, value, position = _element_to_dict(data, view, position, obj_end, opts) result[key] = value if position != obj_end: - raise InvalidBSON('bad object or element length') + raise InvalidBSON("bad object or element length") return result @@ -449,6 +511,8 @@ def _bson_to_dict(data: Any, opts: Any) -> Any: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) + + if _USE_C: _bson_to_dict = _cbson._bson_to_dict @@ -458,7 +522,7 @@ def _bson_to_dict(data: Any, opts: Any) -> Any: _PACK_LENGTH_SUBTYPE = struct.Struct(" Generator[bytes, None, None]: @@ -473,25 +537,22 @@ def gen_list_name() -> Generator[bytes, None, None]: counter = itertools.count(1000) while True: - yield (str(next(counter)) + "\x00").encode('utf8') + yield (str(next(counter)) + "\x00").encode("utf8") def _make_c_string_check(string: Union[str, bytes]) -> bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -502,8 +563,7 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " - "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -512,8 +572,7 @@ def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " - "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -531,9 +590,8 @@ def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): - return b'\x03' + name + value.raw - data = b"".join([_element_to_bson(key, val, check_keys, opts) - for key, val in value.items()]) + return b"\x03" + name + value.raw + data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in value.items()]) return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" @@ -542,27 +600,22 @@ def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> byt buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 - buf += _name_value_to_bson(b"$ref\x00", - value.collection, check_keys, opts) - buf += _name_value_to_bson(b"$id\x00", - value.id, check_keys, opts) + buf += _name_value_to_bson(b"$ref\x00", value.collection, check_keys, opts) + buf += _name_value_to_bson(b"$id\x00", value.id, check_keys, opts) if value.database is not None: - buf += _name_value_to_bson( - b"$db\x00", value.database, check_keys, opts) + buf += _name_value_to_bson(b"$db\x00", value.database, check_keys, opts) for key, val in value._DBRef__kwargs.items(): buf += _element_to_bson(key, val, check_keys, opts) buf += b"\x00" - buf[begin:begin + 4] = _PACK_INT(len(buf) - begin) + buf[begin : begin + 4] = _PACK_INT(len(buf) - begin) return bytes(buf) def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() - data = b"".join([_name_value_to_bson(next(lname), item, - check_keys, opts) - for item in value]) + data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) return b"\x04" + name + _PACK_INT(len(data) + 5) + data + b"\x00" @@ -586,6 +639,7 @@ def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: Any) -> bytes: binval = Binary.from_uuid(value, uuid_representation=uuid_representation) return _encode_binary(name, binval, dummy, opts) + def _encode_objectid(name: bytes, value: ObjectId, dummy: Any, dummy1: Any) -> bytes: """Encode bson.objectid.ObjectId.""" return b"\x07" + name + value.binary @@ -733,9 +787,14 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) -def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, - in_custom_call: bool = False, - in_fallback_call: bool = False) -> bytes: +def _name_value_to_bson( + name: bytes, + value: Any, + check_keys: bool, + opts: Any, + in_custom_call: bool = False, + in_fallback_call: bool = False, +) -> bytes: """Encode a single name, value pair.""" # First see if the type is already cached. KeyError will only ever # happen once per subtype. @@ -760,8 +819,8 @@ def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, custom_encoder = opts.type_registry._encoder_map.get(type(value)) if custom_encoder is not None: return _name_value_to_bson( - name, custom_encoder(value), check_keys, opts, - in_custom_call=True) + name, custom_encoder(value), check_keys, opts, in_custom_call=True + ) # Fourth, test each base type. This will only happen once for # a subtype of a supported base type. Unlike in the C-extensions, this @@ -779,18 +838,16 @@ def _name_value_to_bson(name: bytes, value: Any, check_keys: bool, opts: Any, fallback_encoder = opts.type_registry._fallback_encoder if not in_fallback_call and fallback_encoder is not None: return _name_value_to_bson( - name, fallback_encoder(value), check_keys, opts, - in_fallback_call=True) + name, fallback_encoder(value), check_keys, opts, in_fallback_call=True + ) - raise InvalidDocument( - "cannot encode object: %r, of type: %r" % (value, type(value))) + raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, " - "key was %r" % (key,)) + raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) @@ -808,17 +865,17 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) try: elements = [] if top_level and "_id" in doc: - elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], - check_keys, opts)) + elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) for key, value in doc.items(): if not top_level or key != "_id": - elements.append(_element_to_bson(key, value, - check_keys, opts)) + elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" + + if _USE_C: _dict_to_bson = _cbson._dict_to_bson @@ -829,26 +886,22 @@ def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: seconds = (millis - diff) // 1000 micros = diff * 1000 if opts.tz_aware: - dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, - microseconds=micros) + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) if opts.tzinfo: dt = dt.astimezone(opts.tzinfo) return dt else: - return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, - microseconds=micros) + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) def _datetime_to_millis(dtm: datetime.datetime) -> int: """Convert datetime to milliseconds since epoch UTC.""" if dtm.utcoffset() is not None: dtm = dtm - dtm.utcoffset() # type: ignore - return int(calendar.timegm(dtm.timetuple()) * 1000 + - dtm.microsecond // 1000) + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) -_CODEC_OPTIONS_TYPE_ERROR = TypeError( - "codec_options must be an instance of CodecOptions") +_CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") _DocumentIn = Mapping[str, Any] @@ -856,7 +909,11 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] -def encode(document: _DocumentIn, check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> bytes: +def encode( + document: _DocumentIn, + check_keys: bool = False, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, +) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). @@ -883,7 +940,9 @@ def encode(document: _DocumentIn, check_keys: bool = False, codec_options: Codec return _dict_to_bson(document, check_keys, codec_options) -def decode(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: +def decode( + data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> Dict[str, Any]: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -915,7 +974,9 @@ def decode(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OP return _bson_to_dict(data, codec_options) -def decode_all(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> List[Dict[str, Any]]: +def decode_all( + data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> List[Dict[str, Any]]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -957,14 +1018,10 @@ def decode_all(data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODE raise InvalidBSON("bad eoo") if use_raw: docs.append( - codec_options.document_class( - data[position:obj_end + 1], codec_options)) + codec_options.document_class(data[position : obj_end + 1], codec_options) + ) else: - docs.append(_elements_to_dict(data, - view, - position + 4, - obj_end, - codec_options)) + docs.append(_elements_to_dict(data, view, position + 4, obj_end, codec_options)) position += obj_size return docs except InvalidBSON: @@ -999,9 +1056,9 @@ def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[A def _convert_raw_document_lists_to_streams(document: Any) -> None: - cursor = document.get('cursor') + cursor = document.get("cursor") if cursor: - for key in ('firstBatch', 'nextBatch'): + for key in ("firstBatch", "nextBatch"): batch = cursor.get(key) if batch: stream = b"".join(doc.raw for doc in batch) @@ -1039,13 +1096,23 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - # Decode documents for internal use. from bson.raw_bson import RawBSONDocument + internal_codec_options = codec_options.with_options( - document_class=RawBSONDocument, type_registry=None) + document_class=RawBSONDocument, type_registry=None + ) _doc = _bson_to_dict(data, internal_codec_options) - return [_decode_selective(_doc, fields, codec_options,)] - - -def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: + return [ + _decode_selective( + _doc, + fields, + codec_options, + ) + ] + + +def decode_iter( + data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> Iterator[_DocumentOut]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1072,13 +1139,15 @@ def decode_iter(data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS end = len(data) - 1 while position < end: obj_size = _UNPACK_INT_FROM(data, position)[0] - elements = data[position:position + obj_size] + elements = data[position : position + obj_size] position += obj_size yield _bson_to_dict(elements, codec_options) -def decode_file_iter(file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Iterator[_DocumentOut]: +def decode_file_iter( + file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS +) -> Iterator[_DocumentOut]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1136,8 +1205,12 @@ class BSON(bytes): """ @classmethod - def encode(cls: Type["BSON"], document: _DocumentIn, check_keys: bool = False, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> "BSON": + def encode( + cls: Type["BSON"], + document: _DocumentIn, + check_keys: bool = False, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + ) -> "BSON": """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). @@ -1196,6 +1269,5 @@ def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[st def has_c() -> bool: - """Is the C extension installed? - """ + """Is the C extension installed?""" return _USE_C diff --git a/bson/_helpers.py b/bson/_helpers.py index 2d89789586..ee3b0f1099 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -25,7 +25,7 @@ def _setstate_slots(self: Any, state: Any) -> None: def _mangle_name(name: str, prefix: str) -> str: if name.startswith("__"): - prefix = "_"+prefix + prefix = "_" + prefix else: prefix = "" return prefix + name @@ -37,5 +37,5 @@ def _getstate_slots(self: Any) -> Mapping[Any, Any]: for name in self.__slots__: mangled_name = _mangle_name(name, prefix) if hasattr(self, mangled_name): - ret[mangled_name] = getattr(self, mangled_name) + ret[mangled_name] = getattr(self, mangled_name) return ret diff --git a/bson/binary.py b/bson/binary.py index de44d48174..e20bf87af3 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Tuple, Type, Union, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Tuple, Type, Union from uuid import UUID """Tools for representing BSON binary data. @@ -163,13 +163,15 @@ class UuidRepresentation: UuidRepresentation.STANDARD, UuidRepresentation.PYTHON_LEGACY, UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY) + UuidRepresentation.CSHARP_LEGACY, +) UUID_REPRESENTATION_NAMES = { - UuidRepresentation.UNSPECIFIED: 'UuidRepresentation.UNSPECIFIED', - UuidRepresentation.STANDARD: 'UuidRepresentation.STANDARD', - UuidRepresentation.PYTHON_LEGACY: 'UuidRepresentation.PYTHON_LEGACY', - UuidRepresentation.JAVA_LEGACY: 'UuidRepresentation.JAVA_LEGACY', - UuidRepresentation.CSHARP_LEGACY: 'UuidRepresentation.CSHARP_LEGACY'} + UuidRepresentation.UNSPECIFIED: "UuidRepresentation.UNSPECIFIED", + UuidRepresentation.STANDARD: "UuidRepresentation.STANDARD", + UuidRepresentation.PYTHON_LEGACY: "UuidRepresentation.PYTHON_LEGACY", + UuidRepresentation.JAVA_LEGACY: "UuidRepresentation.JAVA_LEGACY", + UuidRepresentation.CSHARP_LEGACY: "UuidRepresentation.CSHARP_LEGACY", +} MD5_SUBTYPE = 5 """BSON binary subtype for an MD5 hash. @@ -216,7 +218,11 @@ class Binary(bytes): _type_marker = 5 __subtype: int - def __new__(cls: Type["Binary"], data: Union[memoryview, bytes, "_mmap", "_array"], subtype: int = BINARY_SUBTYPE) -> "Binary": + def __new__( + cls: Type["Binary"], + data: Union[memoryview, bytes, "_mmap", "_array"], + subtype: int = BINARY_SUBTYPE, + ) -> "Binary": if not isinstance(subtype, int): raise TypeError("subtype must be an instance of int") if subtype >= 256 or subtype < 0: @@ -227,7 +233,9 @@ def __new__(cls: Type["Binary"], data: Union[memoryview, bytes, "_mmap", "_array return self @classmethod - def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD) -> "Binary": + def from_uuid( + cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD + ) -> "Binary": """Create a BSON Binary object from a Python UUID. Creates a :class:`~bson.binary.Binary` object from a @@ -251,8 +259,9 @@ def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRe raise TypeError("uuid must be an instance of uuid.UUID") if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value " - "from bson.binary.UuidRepresentation") + raise ValueError( + "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + ) if uuid_representation == UuidRepresentation.UNSPECIFIED: raise ValueError( @@ -261,7 +270,8 @@ def from_uuid(cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRe "converted to bson.Binary instances using " "bson.Binary.from_uuid() or a different UuidRepresentation " "can be configured. See the documentation for " - "UuidRepresentation for more information.") + "UuidRepresentation for more information." + ) subtype = OLD_UUID_SUBTYPE if uuid_representation == UuidRepresentation.PYTHON_LEGACY: @@ -296,12 +306,12 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI .. versionadded:: 3.11 """ if self.subtype not in ALL_UUID_SUBTYPES: - raise ValueError("cannot decode subtype %s as a uuid" % ( - self.subtype,)) + raise ValueError("cannot decode subtype %s as a uuid" % (self.subtype,)) if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value from " - "bson.binary.UuidRepresentation") + raise ValueError( + "uuid_representation must be a value from " "bson.binary.UuidRepresentation" + ) if uuid_representation == UuidRepresentation.UNSPECIFIED: raise ValueError("uuid_representation cannot be UNSPECIFIED") @@ -319,26 +329,26 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI if self.subtype == UUID_SUBTYPE: return UUID(bytes=self) - raise ValueError("cannot decode subtype %s to %s" % ( - self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation])) + raise ValueError( + "cannot decode subtype %s to %s" + % (self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation]) + ) @property def subtype(self) -> int: - """Subtype of this binary data. - """ + """Subtype of this binary data.""" return self.__subtype def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 data = super(Binary, self).__getnewargs__()[0] if not isinstance(data, bytes): - data = data.encode('latin-1') + data = data.encode("latin-1") return data, self.__subtype - def __eq__(self, other : Any) -> bool: + def __eq__(self, other: Any) -> bool: if isinstance(other, Binary): - return ((self.__subtype, bytes(self)) == - (other.subtype, bytes(other))) + return (self.__subtype, bytes(self)) == (other.subtype, bytes(other)) # We don't return NotImplemented here because if we did then # Binary("foo") == "foo" would return True, since Binary is a # subclass of str... diff --git a/bson/code.py b/bson/code.py index 6f4b1838d8..b732e82469 100644 --- a/bson/code.py +++ b/bson/code.py @@ -50,7 +50,12 @@ class Code(str): _type_marker = 13 __scope: Union[Mapping[str, Any], None] - def __new__(cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> "Code": + def __new__( + cls: Type["Code"], + code: Union[str, "Code"], + scope: Optional[Mapping[str, Any]] = None, + **kwargs: Any + ) -> "Code": if not isinstance(code, str): raise TypeError("code must be an instance of str") @@ -79,8 +84,7 @@ def __new__(cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping @property def scope(self) -> Optional[Mapping[str, Any]]: - """Scope dictionary for this instance or ``None``. - """ + """Scope dictionary for this instance or ``None``.""" return self.__scope def __repr__(self): diff --git a/bson/codec_options.py b/bson/codec_options.py index 27df48de8a..b43a0275d8 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -18,11 +18,26 @@ import datetime from collections import namedtuple from collections.abc import MutableMapping as _MutableMapping -from typing import (TYPE_CHECKING, Any, Callable, Dict, Generic, Iterable, - MutableMapping, Optional, Type, TypeVar, Union, cast) - -from bson.binary import (ALL_UUID_REPRESENTATIONS, UUID_REPRESENTATION_NAMES, - UuidRepresentation) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + Iterable, + MutableMapping, + Optional, + Type, + TypeVar, + Union, + cast, +) + +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + UUID_REPRESENTATION_NAMES, + UuidRepresentation, +) # Import RawBSONDocument for type-checking only to avoid circular dependency. if TYPE_CHECKING: @@ -32,12 +47,13 @@ def _abstractproperty(func: Callable[..., Any]) -> property: return property(abc.abstractmethod(func)) + _RAW_BSON_DOCUMENT_MARKER = 101 def _raw_document_class(document_class: Any) -> bool: """Determine if a document_class is a RawBSONDocument class.""" - marker = getattr(document_class, '_type_marker', None) + marker = getattr(document_class, "_type_marker", None) return marker == _RAW_BSON_DOCUMENT_MARKER @@ -50,6 +66,7 @@ class TypeEncoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ + @_abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" @@ -70,6 +87,7 @@ class TypeDecoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ + @_abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" @@ -93,12 +111,14 @@ class TypeCodec(TypeEncoder, TypeDecoder): See :ref:`custom-type-type-codec` documentation for an example. """ + pass _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] + class TypeRegistry(object): """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after @@ -125,7 +145,12 @@ class TypeRegistry(object): :mod:`bson` can encode. See :ref:`fallback-encoder-callable` documentation for an example. """ - def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_encoder: Optional[_Fallback] = None) -> None: + + def __init__( + self, + type_codecs: Optional[Iterable[_Codec]] = None, + fallback_encoder: Optional[_Fallback] = None, + ) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder self._encoder_map = {} @@ -133,8 +158,7 @@ def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_enco if self._fallback_encoder is not None: if not callable(fallback_encoder): - raise TypeError("fallback_encoder %r is not a callable" % ( - fallback_encoder)) + raise TypeError("fallback_encoder %r is not a callable" % (fallback_encoder)) for codec in self.__type_codecs: is_valid_codec = False @@ -147,36 +171,49 @@ def __init__(self, type_codecs: Optional[Iterable[_Codec]] = None, fallback_enco self._decoder_map[codec.bson_type] = codec.transform_bson if not is_valid_codec: raise TypeError( - "Expected an instance of %s, %s, or %s, got %r instead" % ( - TypeEncoder.__name__, TypeDecoder.__name__, - TypeCodec.__name__, codec)) + "Expected an instance of %s, %s, or %s, got %r instead" + % (TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec) + ) def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES + for pytype in _BUILT_IN_TYPES: if issubclass(cast(TypeCodec, codec).python_type, pytype): - err_msg = ("TypeEncoders cannot change how built-in types are " - "encoded (encoder %s transforms type %s)" % - (codec, pytype)) + err_msg = ( + "TypeEncoders cannot change how built-in types are " + "encoded (encoder %s transforms type %s)" % (codec, pytype) + ) raise TypeError(err_msg) def __repr__(self): - return ('%s(type_codecs=%r, fallback_encoder=%r)' % ( - self.__class__.__name__, self.__type_codecs, - self._fallback_encoder)) + return "%s(type_codecs=%r, fallback_encoder=%r)" % ( + self.__class__.__name__, + self.__type_codecs, + self._fallback_encoder, + ) def __eq__(self, other: Any) -> Any: if not isinstance(other, type(self)): return NotImplemented - return ((self._decoder_map == other._decoder_map) and - (self._encoder_map == other._encoder_map) and - (self._fallback_encoder == other._fallback_encoder)) + return ( + (self._decoder_map == other._decoder_map) + and (self._encoder_map == other._encoder_map) + and (self._fallback_encoder == other._fallback_encoder) + ) _options_base = namedtuple( # type: ignore - 'CodecOptions', - ('document_class', 'tz_aware', 'uuid_representation', - 'unicode_decode_error_handler', 'tzinfo', 'type_registry')) + "CodecOptions", + ( + "document_class", + "tz_aware", + "uuid_representation", + "unicode_decode_error_handler", + "tzinfo", + "type_registry", + ), +) class CodecOptions(_options_base): @@ -255,32 +292,35 @@ class CodecOptions(_options_base): retrieved from the server will be modified in the client application and stored back to the server. """ - def __new__(cls: Type["CodecOptions"], document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, - tz_aware: bool = False, - uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: Optional[str] = "strict", - tzinfo: Optional[datetime.tzinfo] = None, - type_registry: Optional[TypeRegistry] = None) -> "CodecOptions": - if not (issubclass(document_class, _MutableMapping) or - _raw_document_class(document_class)): - raise TypeError("document_class must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.abc.MutableMapping") + + def __new__( + cls: Type["CodecOptions"], + document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: Optional[str] = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + ) -> "CodecOptions": + if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "sublass of collections.abc.MutableMapping" + ) if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError("uuid_representation must be a value " - "from bson.binary.UuidRepresentation") + raise ValueError( + "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + ) if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore - raise ValueError("unicode_decode_error_handler must be a string " - "or None") + raise ValueError("unicode_decode_error_handler must be a string " "or None") if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError( - "tzinfo must be an instance of datetime.tzinfo") + raise TypeError("tzinfo must be an instance of datetime.tzinfo") if not tz_aware: - raise ValueError( - "cannot specify tzinfo without also setting tz_aware=True") + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") type_registry = type_registry or TypeRegistry() @@ -288,38 +328,53 @@ def __new__(cls: Type["CodecOptions"], document_class: Union[Type[MutableMapping raise TypeError("type_registry must be an instance of TypeRegistry") return tuple.__new__( - cls, (document_class, tz_aware, uuid_representation, - unicode_decode_error_handler, tzinfo, type_registry)) + cls, + ( + document_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + ), + ) def _arguments_repr(self) -> str: """Representation of the arguments used to create this object.""" - document_class_repr = ( - 'dict' if self.document_class is dict - else repr(self.document_class)) - - uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation, - self.uuid_representation) - - return ('document_class=%s, tz_aware=%r, uuid_representation=%s, ' - 'unicode_decode_error_handler=%r, tzinfo=%r, ' - 'type_registry=%r' % - (document_class_repr, self.tz_aware, uuid_rep_repr, - self.unicode_decode_error_handler, self.tzinfo, - self.type_registry)) + document_class_repr = "dict" if self.document_class is dict else repr(self.document_class) + + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) + + return ( + "document_class=%s, tz_aware=%r, uuid_representation=%s, " + "unicode_decode_error_handler=%r, tzinfo=%r, " + "type_registry=%r" + % ( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + ) + ) def _options_dict(self) -> Dict[str, Any]: """Dictionary of the arguments used to create this object.""" # TODO: PYTHON-2442 use _asdict() instead return { - 'document_class': self.document_class, - 'tz_aware': self.tz_aware, - 'uuid_representation': self.uuid_representation, - 'unicode_decode_error_handler': self.unicode_decode_error_handler, - 'tzinfo': self.tzinfo, - 'type_registry': self.type_registry} + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + } def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self._arguments_repr()) + return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) def with_options(self, **kwargs: Any) -> "CodecOptions": """Make a copy of this CodecOptions, overriding some options:: @@ -344,12 +399,16 @@ def with_options(self, **kwargs: Any) -> "CodecOptions": def _parse_codec_options(options: Any) -> CodecOptions: """Parse BSON codec options.""" kwargs = {} - for k in set(options) & {'document_class', 'tz_aware', - 'uuidrepresentation', - 'unicode_decode_error_handler', 'tzinfo', - 'type_registry'}: - if k == 'uuidrepresentation': - kwargs['uuid_representation'] = options[k] + for k in set(options) & { + "document_class", + "tz_aware", + "uuidrepresentation", + "unicode_decode_error_handler", + "tzinfo", + "type_registry", + }: + if k == "uuidrepresentation": + kwargs["uuid_representation"] = options[k] else: kwargs[k] = options[k] return CodecOptions(**kwargs) diff --git a/bson/dbref.py b/bson/dbref.py index 92a3a68367..773c95f59d 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -22,15 +22,22 @@ class DBRef(object): - """A reference to a document stored in MongoDB. - """ + """A reference to a document stored in MongoDB.""" + __slots__ = "__collection", "__id", "__database", "__kwargs" __getstate__ = _getstate_slots __setstate__ = _setstate_slots # DBRef isn't actually a BSON "type" so this number was arbitrarily chosen. _type_marker = 100 - def __init__(self, collection: str, id: Any, database: Optional[str] = None, _extra: Mapping[str, Any] = {}, **kwargs: Any) -> None: + def __init__( + self, + collection: str, + id: Any, + database: Optional[str] = None, + _extra: Mapping[str, Any] = {}, + **kwargs: Any + ) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not @@ -61,14 +68,12 @@ def __init__(self, collection: str, id: Any, database: Optional[str] = None, _ex @property def collection(self) -> str: - """Get the name of this DBRef's collection. - """ + """Get the name of this DBRef's collection.""" return self.__collection @property def id(self) -> Any: - """Get this DBRef's _id. - """ + """Get this DBRef's _id.""" return self.__id @property @@ -90,27 +95,22 @@ def as_doc(self) -> SON[str, Any]: Generally not needed by application developers """ - doc = SON([("$ref", self.collection), - ("$id", self.id)]) + doc = SON([("$ref", self.collection), ("$id", self.id)]) if self.database is not None: doc["$db"] = self.database doc.update(self.__kwargs) return doc def __repr__(self): - extra = "".join([", %s=%r" % (k, v) - for k, v in self.__kwargs.items()]) + extra = "".join([", %s=%r" % (k, v) for k, v in self.__kwargs.items()]) if self.database is None: return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, - self.database, extra) + return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): - us = (self.__database, self.__collection, - self.__id, self.__kwargs) - them = (other.__database, other.__collection, - other.__id, other.__kwargs) + us = (self.__database, self.__collection, self.__id, self.__kwargs) + them = (other.__database, other.__collection, other.__id, other.__kwargs) return us == them return NotImplemented @@ -119,12 +119,15 @@ def __ne__(self, other: Any) -> bool: def __hash__(self) -> int: """Get a hash value for this :class:`DBRef`.""" - return hash((self.__collection, self.__id, self.__database, - tuple(sorted(self.__kwargs.items())))) + return hash( + (self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items()))) + ) def __deepcopy__(self, memo: Any) -> "DBRef": """Support function for `copy.deepcopy()`.""" - return DBRef(deepcopy(self.__collection, memo), - deepcopy(self.__id, memo), - deepcopy(self.__database, memo), - deepcopy(self.__kwargs, memo)) + return DBRef( + deepcopy(self.__collection, memo), + deepcopy(self.__id, memo), + deepcopy(self.__database, memo), + deepcopy(self.__kwargs, memo), + ) diff --git a/bson/decimal128.py b/bson/decimal128.py index bbf5d326e4..ab2d1a24ac 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -31,8 +31,8 @@ _MAX_DIGITS = 34 _INF = 0x7800000000000000 -_NAN = 0x7c00000000000000 -_SNAN = 0x7e00000000000000 +_NAN = 0x7C00000000000000 +_SNAN = 0x7E00000000000000 _SIGN = 0x8000000000000000 _NINF = (_INF + _SIGN, 0) @@ -43,16 +43,14 @@ _PSNAN = (_SNAN, 0) _CTX_OPTIONS = { - 'prec': _MAX_DIGITS, - 'rounding': decimal.ROUND_HALF_EVEN, - 'Emin': _EXPONENT_MIN, - 'Emax': _EXPONENT_MAX, - 'capitals': 1, - 'flags': [], - 'traps': [decimal.InvalidOperation, - decimal.Overflow, - decimal.Inexact], - 'clamp': 1 + "prec": _MAX_DIGITS, + "rounding": decimal.ROUND_HALF_EVEN, + "Emin": _EXPONENT_MIN, + "Emax": _EXPONENT_MAX, + "capitals": 1, + "flags": [], + "traps": [decimal.InvalidOperation, decimal.Overflow, decimal.Inexact], + "clamp": 1, } _DEC128_CTX = decimal.Context(**_CTX_OPTIONS.copy()) # type: ignore @@ -64,7 +62,7 @@ def create_decimal128_context() -> decimal.Context: for working with IEEE-754 128-bit decimal floating point values. """ opts = _CTX_OPTIONS.copy() - opts['traps'] = [] + opts["traps"] = [] return decimal.Context(**opts) # type: ignore @@ -105,9 +103,9 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: biased_exponent = exponent + _EXPONENT_BIAS if high >> 49 == 1: - high = high & 0x7fffffffffff + high = high & 0x7FFFFFFFFFFF high |= _EXPONENT_MASK - high |= (biased_exponent & 0x3fff) << 47 + high |= (biased_exponent & 0x3FFF) << 47 else: high |= biased_exponent << 49 @@ -211,7 +209,8 @@ class Decimal128(object): >>> Decimal('NaN') == Decimal('NaN') False """ - __slots__ = ('__high', '__low') + + __slots__ = ("__high", "__low") _type_marker = 19 @@ -220,9 +219,11 @@ def __init__(self, value: _VALUE_OPTIONS) -> None: self.__high, self.__low = _decimal_to_128(value) elif isinstance(value, (list, tuple)): if len(value) != 2: - raise ValueError('Invalid size for creation of Decimal128 ' - 'from list or tuple. Must have exactly 2 ' - 'elements.') + raise ValueError( + "Invalid size for creation of Decimal128 " + "from list or tuple. Must have exactly 2 " + "elements." + ) self.__high, self.__low = value # type: ignore else: raise TypeError("Cannot convert %r to Decimal128" % (value,)) @@ -236,25 +237,25 @@ def to_decimal(self) -> decimal.Decimal: sign = 1 if (high & _SIGN) else 0 if (high & _SNAN) == _SNAN: - return decimal.Decimal((sign, (), 'N')) # type: ignore + return decimal.Decimal((sign, (), "N")) # type: ignore elif (high & _NAN) == _NAN: - return decimal.Decimal((sign, (), 'n')) # type: ignore + return decimal.Decimal((sign, (), "n")) # type: ignore elif (high & _INF) == _INF: - return decimal.Decimal((sign, (), 'F')) # type: ignore + return decimal.Decimal((sign, (), "F")) # type: ignore if (high & _EXPONENT_MASK) == _EXPONENT_MASK: - exponent = ((high & 0x1fffe00000000000) >> 47) - _EXPONENT_BIAS + exponent = ((high & 0x1FFFE00000000000) >> 47) - _EXPONENT_BIAS return decimal.Decimal((sign, (0,), exponent)) else: - exponent = ((high & 0x7fff800000000000) >> 49) - _EXPONENT_BIAS + exponent = ((high & 0x7FFF800000000000) >> 49) - _EXPONENT_BIAS arr = bytearray(15) - mask = 0x00000000000000ff + mask = 0x00000000000000FF for i in range(14, 6, -1): arr[i] = (low & mask) >> ((14 - i) << 3) mask = mask << 8 - mask = 0x00000000000000ff + mask = 0x00000000000000FF for i in range(6, 0, -1): arr[i] = (high & mask) >> ((6 - i) << 3) mask = mask << 8 @@ -263,8 +264,7 @@ def to_decimal(self) -> decimal.Decimal: arr[0] = (high & mask) >> 48 # cdecimal only accepts a tuple for digits. - digits = tuple( - int(digit) for digit in str(int.from_bytes(arr, 'big'))) + digits = tuple(int(digit) for digit in str(int.from_bytes(arr, "big"))) with decimal.localcontext(_DEC128_CTX) as ctx: return ctx.create_decimal((sign, digits, exponent)) diff --git a/bson/errors.py b/bson/errors.py index 9bdb741371..7333b27b58 100644 --- a/bson/errors.py +++ b/bson/errors.py @@ -16,25 +16,20 @@ class BSONError(Exception): - """Base class for all BSON exceptions. - """ + """Base class for all BSON exceptions.""" class InvalidBSON(BSONError): - """Raised when trying to create a BSON object from invalid data. - """ + """Raised when trying to create a BSON object from invalid data.""" class InvalidStringData(BSONError): - """Raised when trying to encode a string containing non-UTF8 data. - """ + """Raised when trying to encode a string containing non-UTF8 data.""" class InvalidDocument(BSONError): - """Raised when trying to create a BSON object from an invalid document. - """ + """Raised when trying to create a BSON object from an invalid document.""" class InvalidId(BSONError): - """Raised when trying to create an ObjectId from invalid data. - """ + """Raised when trying to create an ObjectId from invalid data.""" diff --git a/bson/int64.py b/bson/int64.py index f1424c8812..ed4dfa5661 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -27,6 +27,7 @@ class Int64(int): :Parameters: - `value`: the numeric value to represent """ + __slots__ = () _type_marker = 18 diff --git a/bson/json_util.py b/bson/json_util.py index d7f501f120..3cdf701f70 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -92,13 +92,11 @@ import math import re import uuid -from typing import (Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, - cast) +from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, cast import bson from bson import EPOCH_AWARE -from bson.binary import (ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, - UuidRepresentation) +from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions from bson.dbref import DBRef @@ -248,64 +246,64 @@ class JSONOptions(CodecOptions): .. versionchanged:: 4.0 Changed default value of `tz_aware` to False. """ + json_mode: int strict_number_long: bool datetime_representation: int strict_uuid: bool - def __new__(cls: Type["JSONOptions"], - strict_number_long: Optional[bool] = None, - datetime_representation: Optional[int] = None, - strict_uuid: Optional[bool] = None, - json_mode: int = JSONMode.RELAXED, - *args: Any, **kwargs: Any) -> "JSONOptions": + def __new__( + cls: Type["JSONOptions"], + strict_number_long: Optional[bool] = None, + datetime_representation: Optional[int] = None, + strict_uuid: Optional[bool] = None, + json_mode: int = JSONMode.RELAXED, + *args: Any, + **kwargs: Any + ) -> "JSONOptions": kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) - if datetime_representation not in (DatetimeRepresentation.LEGACY, - DatetimeRepresentation.NUMBERLONG, - DatetimeRepresentation.ISO8601, - None): + if datetime_representation not in ( + DatetimeRepresentation.LEGACY, + DatetimeRepresentation.NUMBERLONG, + DatetimeRepresentation.ISO8601, + None, + ): raise ValueError( "JSONOptions.datetime_representation must be one of LEGACY, " - "NUMBERLONG, or ISO8601 from DatetimeRepresentation.") + "NUMBERLONG, or ISO8601 from DatetimeRepresentation." + ) self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) - if json_mode not in (JSONMode.LEGACY, - JSONMode.RELAXED, - JSONMode.CANONICAL): + if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " - "or CANONICAL from JSONMode.") + "or CANONICAL from JSONMode." + ) self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: if strict_number_long: - raise ValueError( - "Cannot specify strict_number_long=True with" - " JSONMode.RELAXED") - if datetime_representation not in (None, - DatetimeRepresentation.ISO8601): + raise ValueError("Cannot specify strict_number_long=True with" " JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.ISO8601): raise ValueError( "datetime_representation must be DatetimeRepresentation." - "ISO8601 or omitted with JSONMode.RELAXED") + "ISO8601 or omitted with JSONMode.RELAXED" + ) if strict_uuid not in (None, True): - raise ValueError( - "Cannot specify strict_uuid=False with JSONMode.RELAXED") + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = False self.datetime_representation = DatetimeRepresentation.ISO8601 self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: if strict_number_long not in (None, True): - raise ValueError( - "Cannot specify strict_number_long=False with" - " JSONMode.RELAXED") - if datetime_representation not in ( - None, DatetimeRepresentation.NUMBERLONG): + raise ValueError("Cannot specify strict_number_long=False with" " JSONMode.RELAXED") + if datetime_representation not in (None, DatetimeRepresentation.NUMBERLONG): raise ValueError( "datetime_representation must be DatetimeRepresentation." - "NUMBERLONG or omitted with JSONMode.RELAXED") + "NUMBERLONG or omitted with JSONMode.RELAXED" + ) if strict_uuid not in (None, True): - raise ValueError( - "Cannot specify strict_uuid=False with JSONMode.RELAXED") + raise ValueError("Cannot specify strict_uuid=False with JSONMode.RELAXED") self.strict_number_long = True self.datetime_representation = DatetimeRepresentation.NUMBERLONG self.strict_uuid = True @@ -322,23 +320,30 @@ def __new__(cls: Type["JSONOptions"], return self def _arguments_repr(self) -> str: - return ('strict_number_long=%r, ' - 'datetime_representation=%r, ' - 'strict_uuid=%r, json_mode=%r, %s' % ( - self.strict_number_long, - self.datetime_representation, - self.strict_uuid, - self.json_mode, - super(JSONOptions, self)._arguments_repr())) + return ( + "strict_number_long=%r, " + "datetime_representation=%r, " + "strict_uuid=%r, json_mode=%r, %s" + % ( + self.strict_number_long, + self.datetime_representation, + self.strict_uuid, + self.json_mode, + super(JSONOptions, self)._arguments_repr(), + ) + ) def _options_dict(self) -> Dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead options_dict = super(JSONOptions, self)._options_dict() - options_dict.update({ - 'strict_number_long': self.strict_number_long, - 'datetime_representation': self.datetime_representation, - 'strict_uuid': self.strict_uuid, - 'json_mode': self.json_mode}) + options_dict.update( + { + "strict_number_long": self.strict_number_long, + "datetime_representation": self.datetime_representation, + "strict_uuid": self.strict_uuid, + "json_mode": self.json_mode, + } + ) return options_dict def with_options(self, **kwargs: Any) -> "JSONOptions": @@ -355,8 +360,7 @@ def with_options(self, **kwargs: Any) -> "JSONOptions": .. versionadded:: 3.12 """ opts = self._options_dict() - for opt in ('strict_number_long', 'datetime_representation', - 'strict_uuid', 'json_mode'): + for opt in ("strict_number_long", "datetime_representation", "strict_uuid", "json_mode"): opts[opt] = kwargs.get(opt, getattr(self, opt)) opts.update(kwargs) return JSONOptions(**opts) @@ -443,8 +447,7 @@ def loads(s: str, *args: Any, **kwargs: Any) -> Any: Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) - kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook( - pairs, json_options) + kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) @@ -452,10 +455,9 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> """Recursive helper method that converts BSON types so they can be converted into json. """ - if hasattr(obj, 'items'): - return SON(((k, _json_convert(v, json_options)) - for k, v in obj.items())) - elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes)): + if hasattr(obj, "items"): + return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): return list((_json_convert(v, json_options) for v in obj)) try: return default(obj, json_options) @@ -463,16 +465,20 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> return obj -def object_pairs_hook(pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: +def object_pairs_hook( + pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS +) -> Any: return object_hook(json_options.document_class(pairs), json_options) def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if "$oid" in dct: return _parse_canonical_oid(dct) - if (isinstance(dct.get('$ref'), str) and - "$id" in dct and - isinstance(dct.get('$db'), (str, type(None)))): + if ( + isinstance(dct.get("$ref"), str) + and "$id" in dct + and isinstance(dct.get("$db"), (str, type(None))) + ): return _parse_canonical_dbref(dct) if "$date" in dct: return _parse_canonical_datetime(dct, json_options) @@ -528,9 +534,9 @@ def _parse_legacy_regex(doc: Any) -> Any: def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: - raise TypeError('Bad $uuid, extra field(s): %s' % (doc,)) + raise TypeError("Bad $uuid, extra field(s): %s" % (doc,)) if not isinstance(doc["$uuid"], str): - raise TypeError('$uuid must be a string: %s' % (doc,)) + raise TypeError("$uuid must be a string: %s" % (doc,)) if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) else: @@ -562,7 +568,7 @@ def _parse_legacy_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, u if isinstance(doc["$type"], int): doc["$type"] = "%02x" % doc["$type"] subtype = int(doc["$type"], 16) - if subtype >= 0xffffff80: # Handle mongoexport values + if subtype >= 0xFFFFFF80: # Handle mongoexport values subtype = int(doc["$type"][6:], 16) data = base64.b64decode(doc["$binary"].encode()) return _binary_or_uuid(data, subtype, json_options) @@ -573,13 +579,13 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary b64 = binary["base64"] subtype = binary["subType"] if not isinstance(b64, str): - raise TypeError('$binary base64 must be a string: %s' % (doc,)) + raise TypeError("$binary base64 must be a string: %s" % (doc,)) if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError('$binary subType must be a string at most 2 ' - 'characters: %s' % (doc,)) + raise TypeError("$binary subType must be a string at most 2 " "characters: %s" % (doc,)) if len(binary) != 2: - raise TypeError('$binary must include only "base64" and "subType" ' - 'components: %s' % (doc,)) + raise TypeError( + '$binary must include only "base64" and "subType" ' "components: %s" % (doc,) + ) data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) @@ -589,46 +595,46 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: - raise TypeError('Bad $date, extra field(s): %s' % (doc,)) + raise TypeError("Bad $date, extra field(s): %s" % (doc,)) # mongoexport 2.6 and newer if isinstance(dtm, str): # Parse offset - if dtm[-1] == 'Z': + if dtm[-1] == "Z": dt = dtm[:-1] - offset = 'Z' - elif dtm[-6] in ('+', '-') and dtm[-3] == ':': + offset = "Z" + elif dtm[-6] in ("+", "-") and dtm[-3] == ":": # (+|-)HH:MM dt = dtm[:-6] offset = dtm[-6:] - elif dtm[-5] in ('+', '-'): + elif dtm[-5] in ("+", "-"): # (+|-)HHMM dt = dtm[:-5] offset = dtm[-5:] - elif dtm[-3] in ('+', '-'): + elif dtm[-3] in ("+", "-"): # (+|-)HH dt = dtm[:-3] offset = dtm[-3:] else: dt = dtm - offset = '' + offset = "" # Parse the optional factional seconds portion. - dot_index = dt.rfind('.') + dot_index = dt.rfind(".") microsecond = 0 if dot_index != -1: microsecond = int(float(dt[dot_index:]) * 1000000) dt = dt[:dot_index] - aware = datetime.datetime.strptime( - dt, "%Y-%m-%dT%H:%M:%S").replace(microsecond=microsecond, - tzinfo=utc) + aware = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S").replace( + microsecond=microsecond, tzinfo=utc + ) - if offset and offset != 'Z': + if offset and offset != "Z": if len(offset) == 6: - hours, minutes = offset[1:].split(':') - secs = (int(hours) * 3600 + int(minutes) * 60) + hours, minutes = offset[1:].split(":") + secs = int(hours) * 3600 + int(minutes) * 60 elif len(offset) == 5: - secs = (int(offset[1:3]) * 3600 + int(offset[3:]) * 60) + secs = int(offset[1:3]) * 3600 + int(offset[3:]) * 60 elif len(offset) == 3: secs = int(offset[1:3]) * 3600 if offset[0] == "-": @@ -647,133 +653,130 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: - raise TypeError('Bad $oid, extra field(s): %s' % (doc,)) - return ObjectId(doc['$oid']) + raise TypeError("Bad $oid, extra field(s): %s" % (doc,)) + return ObjectId(doc["$oid"]) def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" - symbol = doc['$symbol'] + symbol = doc["$symbol"] if len(doc) != 1: - raise TypeError('Bad $symbol, extra field(s): %s' % (doc,)) + raise TypeError("Bad $symbol, extra field(s): %s" % (doc,)) return str(symbol) def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: - if key not in ('$code', '$scope'): - raise TypeError('Bad $code, extra field(s): %s' % (doc,)) - return Code(doc['$code'], scope=doc.get('$scope')) + if key not in ("$code", "$scope"): + raise TypeError("Bad $code, extra field(s): %s" % (doc,)) + return Code(doc["$code"], scope=doc.get("$scope")) def _parse_canonical_regex(doc: Any) -> Regex: """Decode a JSON regex to bson.regex.Regex.""" - regex = doc['$regularExpression'] + regex = doc["$regularExpression"] if len(doc) != 1: - raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,)) + raise TypeError("Bad $regularExpression, extra field(s): %s" % (doc,)) if len(regex) != 2: - raise TypeError('Bad $regularExpression must include only "pattern"' - 'and "options" components: %s' % (doc,)) - opts = regex['options'] + raise TypeError( + 'Bad $regularExpression must include only "pattern"' + 'and "options" components: %s' % (doc,) + ) + opts = regex["options"] if not isinstance(opts, str): - raise TypeError('Bad $regularExpression options, options must be ' - 'string, was type %s' % (type(opts))) - return Regex(regex['pattern'], opts) + raise TypeError( + "Bad $regularExpression options, options must be " "string, was type %s" % (type(opts)) + ) + return Regex(regex["pattern"], opts) def _parse_canonical_dbref(doc: Any) -> DBRef: """Decode a JSON DBRef to bson.dbref.DBRef.""" - return DBRef(doc.pop('$ref'), doc.pop('$id'), - database=doc.pop('$db', None), **doc) + return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" - dbref = doc['$dbPointer'] + dbref = doc["$dbPointer"] if len(doc) != 1: - raise TypeError('Bad $dbPointer, extra field(s): %s' % (doc,)) + raise TypeError("Bad $dbPointer, extra field(s): %s" % (doc,)) if isinstance(dbref, DBRef): dbref_doc = dbref.as_doc() # DBPointer must not contain $db in its value. if dbref.database is not None: - raise TypeError( - 'Bad $dbPointer, extra field $db: %s' % (dbref_doc,)) + raise TypeError("Bad $dbPointer, extra field $db: %s" % (dbref_doc,)) if not isinstance(dbref.id, ObjectId): - raise TypeError( - 'Bad $dbPointer, $id must be an ObjectId: %s' % (dbref_doc,)) + raise TypeError("Bad $dbPointer, $id must be an ObjectId: %s" % (dbref_doc,)) if len(dbref_doc) != 2: - raise TypeError( - 'Bad $dbPointer, extra field(s) in DBRef: %s' % (dbref_doc,)) + raise TypeError("Bad $dbPointer, extra field(s) in DBRef: %s" % (dbref_doc,)) return dbref else: - raise TypeError('Bad $dbPointer, expected a DBRef: %s' % (doc,)) + raise TypeError("Bad $dbPointer, expected a DBRef: %s" % (doc,)) def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" - i_str = doc['$numberInt'] + i_str = doc["$numberInt"] if len(doc) != 1: - raise TypeError('Bad $numberInt, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberInt, extra field(s): %s" % (doc,)) if not isinstance(i_str, str): - raise TypeError('$numberInt must be string: %s' % (doc,)) + raise TypeError("$numberInt must be string: %s" % (doc,)) return int(i_str) def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" - l_str = doc['$numberLong'] + l_str = doc["$numberLong"] if len(doc) != 1: - raise TypeError('Bad $numberLong, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberLong, extra field(s): %s" % (doc,)) return Int64(l_str) def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" - d_str = doc['$numberDouble'] + d_str = doc["$numberDouble"] if len(doc) != 1: - raise TypeError('Bad $numberDouble, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberDouble, extra field(s): %s" % (doc,)) if not isinstance(d_str, str): - raise TypeError('$numberDouble must be string: %s' % (doc,)) + raise TypeError("$numberDouble must be string: %s" % (doc,)) return float(d_str) def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" - d_str = doc['$numberDecimal'] + d_str = doc["$numberDecimal"] if len(doc) != 1: - raise TypeError('Bad $numberDecimal, extra field(s): %s' % (doc,)) + raise TypeError("Bad $numberDecimal, extra field(s): %s" % (doc,)) if not isinstance(d_str, str): - raise TypeError('$numberDecimal must be string: %s' % (doc,)) + raise TypeError("$numberDecimal must be string: %s" % (doc,)) return Decimal128(d_str) def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" - if type(doc['$minKey']) is not int or doc['$minKey'] != 1: - raise TypeError('$minKey value must be 1: %s' % (doc,)) + if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: + raise TypeError("$minKey value must be 1: %s" % (doc,)) if len(doc) != 1: - raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) + raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) return MinKey() def _parse_canonical_maxkey(doc: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" - if type(doc['$maxKey']) is not int or doc['$maxKey'] != 1: - raise TypeError('$maxKey value must be 1: %s', (doc,)) + if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: + raise TypeError("$maxKey value must be 1: %s", (doc,)) if len(doc) != 1: - raise TypeError('Bad $minKey, extra field(s): %s' % (doc,)) + raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) return MaxKey() def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: - return SON([ - ('$binary', base64.b64encode(data).decode()), - ('$type', "%02x" % subtype)]) - return {'$binary': SON([ - ('base64', base64.b64encode(data).decode()), - ('subType', "%02x" % subtype)])} + return SON([("$binary", base64.b64encode(data).decode()), ("$type", "%02x" % subtype)]) + return { + "$binary": SON([("base64", base64.b64encode(data).decode()), ("subType", "%02x" % subtype)]) + } def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: @@ -784,25 +787,24 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if isinstance(obj, DBRef): return _json_convert(obj.as_doc(), json_options=json_options) if isinstance(obj, datetime.datetime): - if (json_options.datetime_representation == - DatetimeRepresentation.ISO8601): + if json_options.datetime_representation == DatetimeRepresentation.ISO8601: if not obj.tzinfo: obj = obj.replace(tzinfo=utc) assert obj.tzinfo is not None if obj >= EPOCH_AWARE: off = obj.tzinfo.utcoffset(obj) if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore - tz_string = 'Z' + tz_string = "Z" else: - tz_string = obj.strftime('%z') + tz_string = obj.strftime("%z") millis = int(obj.microsecond / 1000) fracsecs = ".%03d" % (millis,) if millis else "" - return {"$date": "%s%s%s" % ( - obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string)} + return { + "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + } millis = bson._datetime_to_millis(obj) - if (json_options.datetime_representation == - DatetimeRepresentation.LEGACY): + if json_options.datetime_representation == DatetimeRepresentation.LEGACY: return {"$date": millis} return {"$date": {"$numberLong": str(millis)}} if json_options.strict_number_long and isinstance(obj, Int64): @@ -824,11 +826,10 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: if isinstance(obj.pattern, str): pattern = obj.pattern else: - pattern = obj.pattern.decode('utf-8') + pattern = obj.pattern.decode("utf-8") if json_options.json_mode == JSONMode.LEGACY: return SON([("$regex", pattern), ("$options", flags)]) - return {'$regularExpression': SON([("pattern", pattern), - ("options", flags)])} + return {"$regularExpression": SON([("pattern", pattern), ("options", flags)])} if isinstance(obj, MinKey): return {"$minKey": 1} if isinstance(obj, MaxKey): @@ -837,18 +838,15 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} if isinstance(obj, Code): if obj.scope is None: - return {'$code': str(obj)} - return SON([ - ('$code', str(obj)), - ('$scope', _json_convert(obj.scope, json_options))]) + return {"$code": str(obj)} + return SON([("$code", str(obj)), ("$scope", _json_convert(obj.scope, json_options))]) if isinstance(obj, Binary): return _encode_binary(obj, obj.subtype, json_options) if isinstance(obj, bytes): return _encode_binary(obj, 0, json_options) if isinstance(obj, uuid.UUID): if json_options.strict_uuid: - binval = Binary.from_uuid( - obj, uuid_representation=json_options.uuid_representation) + binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) return _encode_binary(binval, binval.subtype, json_options) else: return {"$uuid": obj.hex} @@ -856,19 +854,18 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: return {"$numberDecimal": str(obj)} if isinstance(obj, bool): return obj - if (json_options.json_mode == JSONMode.CANONICAL and - isinstance(obj, int)): - if -2 ** 31 <= obj < 2 ** 31: - return {'$numberInt': str(obj)} - return {'$numberLong': str(obj)} + if json_options.json_mode == JSONMode.CANONICAL and isinstance(obj, int): + if -(2**31) <= obj < 2**31: + return {"$numberInt": str(obj)} + return {"$numberLong": str(obj)} if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): if math.isnan(obj): - return {'$numberDouble': 'NaN'} + return {"$numberDouble": "NaN"} elif math.isinf(obj): - representation = 'Infinity' if obj > 0 else '-Infinity' - return {'$numberDouble': representation} + representation = "Infinity" if obj > 0 else "-Infinity" + return {"$numberDouble": representation} elif json_options.json_mode == JSONMode.CANONICAL: # repr() will return the shortest string guaranteed to produce the # original value, when float() is called on it. - return {'$numberDouble': str(repr(obj))} + return {"$numberDouble": str(repr(obj))} raise TypeError("%r is not JSON serializable" % obj) diff --git a/bson/max_key.py b/bson/max_key.py index 107dc9dec6..b4f38d072e 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -19,6 +19,7 @@ class MaxKey(object): """MongoDB internal MaxKey type.""" + __slots__ = () _type_marker = 127 diff --git a/bson/min_key.py b/bson/min_key.py index 5483eb6cf8..babc655e43 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -19,6 +19,7 @@ class MinKey(object): """MongoDB internal MinKey type.""" + __slots__ = () _type_marker = 255 diff --git a/bson/objectid.py b/bson/objectid.py index baf1966bce..9ad3ed60be 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -35,7 +35,8 @@ def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" - " or a 24-character hex string" % oid) + " or a 24-character hex string" % oid + ) def _random_bytes() -> bytes: @@ -44,8 +45,7 @@ def _random_bytes() -> bytes: class ObjectId(object): - """A MongoDB ObjectId. - """ + """A MongoDB ObjectId.""" _pid = os.getpid() @@ -54,7 +54,7 @@ class ObjectId(object): __random = _random_bytes() - __slots__ = ('__id',) + __slots__ = ("__id",) _type_marker = 7 @@ -131,12 +131,11 @@ def from_datetime(cls: Type["ObjectId"], generation_time: datetime.datetime) -> - `generation_time`: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - offset = generation_time.utcoffset() + offset = generation_time.utcoffset() if offset is not None: generation_time = generation_time - offset timestamp = calendar.timegm(generation_time.timetuple()) - oid = struct.pack( - ">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" + oid = struct.pack(">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" return cls(oid) @classmethod @@ -159,8 +158,7 @@ def is_valid(cls: Type["ObjectId"], oid: Any) -> bool: @classmethod def _random(cls) -> bytes: - """Generate a 5-byte random number once per process. - """ + """Generate a 5-byte random number once per process.""" pid = os.getpid() if pid != cls._pid: cls._pid = pid @@ -168,8 +166,7 @@ def _random(cls) -> bytes: return cls.__random def __generate(self) -> None: - """Generate a new value for this ObjectId. - """ + """Generate a new value for this ObjectId.""" # 4 bytes current time oid = struct.pack(">I", int(time.time())) @@ -206,13 +203,13 @@ def __validate(self, oid: Any) -> None: else: _raise_invalid_id(oid) else: - raise TypeError("id must be an instance of (bytes, str, ObjectId), " - "not %s" % (type(oid),)) + raise TypeError( + "id must be an instance of (bytes, str, ObjectId), " "not %s" % (type(oid),) + ) @property def binary(self) -> bytes: - """12-byte binary representation of this ObjectId. - """ + """12-byte binary representation of this ObjectId.""" return self.__id @property @@ -234,8 +231,7 @@ def __getstate__(self) -> bytes: return self.__id def __setstate__(self, value: Any) -> None: - """explicit state set from pickling - """ + """explicit state set from pickling""" # Provide backwards compatability with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): @@ -246,7 +242,7 @@ def __setstate__(self, value: Any) -> None: # In python 3.x this has to be converted to `bytes` # by encoding latin-1. if isinstance(oid, str): - self.__id = oid.encode('latin-1') + self.__id = oid.encode("latin-1") else: self.__id = oid diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 8a3b0cb4fb..c102b367a2 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -69,7 +69,7 @@ class RawBSONDocument(Mapping[str, Any]): RawBSONDocument decode its bytes. """ - __slots__ = ('__raw', '__inflated_doc', '__codec_options') + __slots__ = ("__raw", "__inflated_doc", "__codec_options") _type_marker = _RAW_BSON_DOCUMENT_MARKER def __init__(self, bson_bytes: bytes, codec_options: Optional[CodecOptions] = None) -> None: @@ -115,7 +115,8 @@ class from the standard library so it can be used like a read-only elif codec_options.document_class is not RawBSONDocument: raise TypeError( "RawBSONDocument cannot use CodecOptions with document " - "class %s" % (codec_options.document_class, )) + "class %s" % (codec_options.document_class,) + ) self.__codec_options = codec_options # Validate the bson object size. _get_object_size(bson_bytes, 0, len(bson_bytes)) @@ -135,8 +136,7 @@ def __inflated(self) -> Mapping[str, Any]: # We already validated the object's size when this document was # created, so no need to do that again. # Use SON to preserve ordering of elements. - self.__inflated_doc = _inflate_bson( - self.__raw, self.__codec_options) + self.__inflated_doc = _inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc def __getitem__(self, item: str) -> Any: @@ -154,8 +154,7 @@ def __eq__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return ("RawBSONDocument(%r, codec_options=%r)" - % (self.raw, self.__codec_options)) + return "RawBSONDocument(%r, codec_options=%r)" % (self.raw, self.__codec_options) def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: @@ -168,8 +167,7 @@ def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any must be :class:`RawBSONDocument`. """ # Use SON to preserve ordering of elements. - return _raw_to_dict( - bson_bytes, 4, len(bson_bytes)-1, codec_options, SON()) + return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON()) DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) diff --git a/bson/regex.py b/bson/regex.py index 454aca3cec..317c65049f 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -42,6 +42,7 @@ def str_flags_to_int(str_flags: str) -> int: class Regex(object): """BSON regular expression data.""" + __slots__ = ("pattern", "flags") __getstate__ = _getstate_slots @@ -75,9 +76,7 @@ def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": .. _PCRE: http://www.pcre.org/ """ if not isinstance(regex, RE_TYPE): - raise TypeError( - "regex must be a compiled regular expression, not %s" - % type(regex)) + raise TypeError("regex must be a compiled regular expression, not %s" % type(regex)) return Regex(regex.pattern, regex.flags) @@ -101,8 +100,7 @@ def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> No elif isinstance(flags, int): self.flags = flags else: - raise TypeError( - "flags must be a string or int, not %s" % type(flags)) + raise TypeError("flags must be a string or int, not %s" % type(flags)) def __eq__(self, other: Any) -> bool: if isinstance(other, Regex): diff --git a/bson/son.py b/bson/son.py index bb39644637..e4238b4058 100644 --- a/bson/son.py +++ b/bson/son.py @@ -21,8 +21,20 @@ import copy import re from collections.abc import Mapping as _Mapping -from typing import (Any, Dict, Iterable, Iterator, List, Mapping, - Optional, Pattern, Tuple, Type, TypeVar, Union) +from typing import ( + Any, + Dict, + Iterable, + Iterator, + List, + Mapping, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, +) # This sort of sucks, but seems to be as good as it gets... # This is essentially the same as re._pattern_type @@ -40,9 +52,14 @@ class SON(Dict[_Key, _Value]): few extra niceties for dealing with SON. SON provides an API similar to collections.OrderedDict. """ + __keys: List[Any] - def __init__(self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, **kwargs: Any) -> None: + def __init__( + self, + data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, + **kwargs: Any + ) -> None: self.__keys = [] dict.__init__(self) self.update(data) @@ -107,8 +124,7 @@ def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[over def pop(self, key: _Key, *args: Union[_Value, _T]) -> Union[_Value, _T]: if len(args) > 1: - raise TypeError("pop expected at most 2 arguments, got "\ - + repr(1 + len(args))) + raise TypeError("pop expected at most 2 arguments, got " + repr(1 + len(args))) try: value = self[key] except KeyError: @@ -122,7 +138,7 @@ def popitem(self) -> Tuple[_Key, _Value]: try: k, v = next(iter(self.items())) except StopIteration: - raise KeyError('container is empty') + raise KeyError("container is empty") del self[k] return (k, v) @@ -130,10 +146,10 @@ def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type # Make progressively weaker assumptions about "other" if other is None: pass - elif hasattr(other, 'items'): + elif hasattr(other, "items"): for k, v in other.items(): self[k] = v - elif hasattr(other, 'keys'): + elif hasattr(other, "keys"): for k in other.keys(): self[k] = other[k] else: @@ -153,8 +169,7 @@ def __eq__(self, other: Any) -> bool: regular dictionary is order-insensitive. """ if isinstance(other, SON): - return len(self) == len(other) and list(self.items()) == \ - list(other.items()) + return len(self) == len(other) and list(self.items()) == list(other.items()) return self.to_dict() == other def __ne__(self, other: Any) -> bool: @@ -174,9 +189,7 @@ def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): - return dict([ - (k, transform_value(v)) - for k, v in value.items()]) + return dict([(k, transform_value(v)) for k, v in value.items()]) else: return value diff --git a/bson/timestamp.py b/bson/timestamp.py index 93c7540fd0..a333b9fa3e 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -26,8 +26,8 @@ class Timestamp(object): - """MongoDB internal timestamps used in the opLog. - """ + """MongoDB internal timestamps used in the opLog.""" + __slots__ = ("__time", "__inc") __getstate__ = _getstate_slots @@ -72,19 +72,17 @@ def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: @property def time(self) -> int: - """Get the time portion of this :class:`Timestamp`. - """ + """Get the time portion of this :class:`Timestamp`.""" return self.__time @property def inc(self) -> int: - """Get the inc portion of this :class:`Timestamp`. - """ + """Get the inc portion of this :class:`Timestamp`.""" return self.__inc def __eq__(self, other: Any) -> bool: if isinstance(other, Timestamp): - return (self.__time == other.time and self.__inc == other.inc) + return self.__time == other.time and self.__inc == other.inc else: return NotImplemented diff --git a/doc/conf.py b/doc/conf.py index facb74f470..47debcf14c 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -4,8 +4,10 @@ # # This file is execfile()d with the current directory set to its containing dir. -import sys, os -sys.path[0:0] = [os.path.abspath('..')] +import os +import sys + +sys.path[0:0] = [os.path.abspath("..")] import pymongo @@ -13,21 +15,26 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', - 'sphinx.ext.todo', 'sphinx.ext.intersphinx'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.coverage", + "sphinx.ext.todo", + "sphinx.ext.intersphinx", +] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'PyMongo' -copyright = 'MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc' +project = "PyMongo" +copyright = "MongoDB, Inc. 2008-present. MongoDB, Mongo, and the leaf logo are registered trademarks of MongoDB, Inc" html_show_sphinx = False # The version info for the project you're documenting, acts as replacement for @@ -44,31 +51,31 @@ # List of directories, relative to source directory, that shouldn't be searched # for source files. -exclude_trees = ['_build'] +exclude_trees = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for extensions ---------------------------------------------------- -autoclass_content = 'init' +autoclass_content = "init" -doctest_path = [os.path.abspath('..')] +doctest_path = [os.path.abspath("..")] -doctest_test_doctest_blocks = '' +doctest_test_doctest_blocks = "" doctest_global_setup = """ from pymongo.mongo_client import MongoClient @@ -82,91 +89,87 @@ # Theme gratefully vendored from CPython source. html_theme = "pydoctheme" html_theme_path = ["."] -html_theme_options = { - 'collapsiblesidebar': True, - 'googletag': False -} +html_theme_options = {"collapsiblesidebar": True, "googletag": False} # Additional static files. -html_static_path = ['static'] +html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'PyMongo' + release.replace('.', '_') +htmlhelp_basename = "PyMongo" + release.replace(".", "_") # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'PyMongo.tex', 'PyMongo Documentation', - 'Michael Dirolf', 'manual'), + ("index", "PyMongo.tex", "PyMongo Documentation", "Michael Dirolf", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True intersphinx_mapping = { - 'gevent': ('http://www.gevent.org/', None), - 'py': ('https://docs.python.org/3/', None), + "gevent": ("http://www.gevent.org/", None), + "py": ("https://docs.python.org/3/", None), } diff --git a/green_framework_test.py b/green_framework_test.py index baffe21b15..610845a9f6 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -21,30 +21,35 @@ def run_gevent(): """Prepare to run tests with Gevent. Can raise ImportError.""" from gevent import monkey + monkey.patch_all() def run_eventlet(): """Prepare to run tests with Eventlet. Can raise ImportError.""" import eventlet + # https://github.com/eventlet/eventlet/issues/401 eventlet.sleep() eventlet.monkey_patch() FRAMEWORKS = { - 'gevent': run_gevent, - 'eventlet': run_eventlet, + "gevent": run_gevent, + "eventlet": run_eventlet, } def list_frameworks(): """Tell the user what framework names are valid.""" - sys.stdout.write("""Testable frameworks: %s + sys.stdout.write( + """Testable frameworks: %s Note that membership in this list means the framework can be tested with PyMongo, not necessarily that it is officially supported. -""" % ", ".join(sorted(FRAMEWORKS))) +""" + % ", ".join(sorted(FRAMEWORKS)) + ) def run(framework_name, *args): @@ -53,7 +58,7 @@ def run(framework_name, *args): FRAMEWORKS[framework_name]() # Run the tests. - sys.argv[:] = ['setup.py', 'test'] + list(args) + sys.argv[:] = ["setup.py", "test"] + list(args) import setup @@ -62,11 +67,13 @@ def main(): usage = """python %s FRAMEWORK_NAME Test PyMongo with a variety of greenlet-based monkey-patching frameworks. See -python %s --help-frameworks.""" % (sys.argv[0], sys.argv[0]) +python %s --help-frameworks.""" % ( + sys.argv[0], + sys.argv[0], + ) try: - opts, args = getopt.getopt( - sys.argv[1:], "h", ["help", "help-frameworks"]) + opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "help-frameworks"]) except getopt.GetoptError as err: print(str(err)) print(usage) @@ -87,13 +94,14 @@ def main(): sys.exit(1) if args[0] not in FRAMEWORKS: - print('%r is not a testable framework.\n' % args[0]) + print("%r is not a testable framework.\n" % args[0]) list_frameworks() sys.exit(1) - run(args[0], # Framework name. - *args[1:]) # Command line args to setup.py, like what test to run. + run( + args[0], *args[1:] # Framework name. + ) # Command line args to setup.py, like what test to run. -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 02c42d6eb6..22b28af1a7 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -25,9 +25,14 @@ from bson.objectid import ObjectId from gridfs.errors import NoFile -from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, GridIn, GridOut, - GridOutCursor, _clear_entity_type_registry, - _disallow_transactions) +from gridfs.grid_file import ( + DEFAULT_CHUNK_SIZE, + GridIn, + GridOut, + GridOutCursor, + _clear_entity_type_registry, + _disallow_transactions, +) from pymongo import ASCENDING, DESCENDING from pymongo.client_session import ClientSession from pymongo.collation import Collation @@ -40,8 +45,8 @@ class GridFS(object): - """An instance of GridFS on top of a single Database. - """ + """An instance of GridFS on top of a single Database.""" + def __init__(self, database: Database, collection: str = "fs"): """Create a new instance of :class:`GridFS`. @@ -78,8 +83,7 @@ def __init__(self, database: Database, collection: str = "fs"): database = _clear_entity_type_registry(database) if not database.write_concern.acknowledged: - raise ConfigurationError('database must use ' - 'acknowledged write_concern') + raise ConfigurationError("database must use " "acknowledged write_concern") self.__collection = database[collection] self.__files = self.__collection.files @@ -159,7 +163,13 @@ def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: gout._ensure_file() return gout - def get_version(self, filename: Optional[str] = None, version: Optional[int] = -1, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: + def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[ClientSession] = None, + **kwargs: Any + ) -> GridOut: """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches @@ -201,7 +211,7 @@ def get_version(self, filename: Optional[str] = None, version: Optional[int] = - _disallow_transactions(session) cursor = self.__files.find(query, session=session) if version is None: - version = -1 + version = -1 if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) @@ -209,12 +219,13 @@ def get_version(self, filename: Optional[str] = None, version: Optional[int] = - cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) try: doc = next(cursor) - return GridOut( - self.__collection, file_document=doc, session=session) + return GridOut(self.__collection, file_document=doc, session=session) except StopIteration: raise NoFile("no version %d for filename %r" % (version, filename)) - def get_last_version(self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> GridOut: + def get_last_version( + self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any + ) -> GridOut: """Get the most recent version of a file in GridFS by ``"filename"`` or metadata fields. @@ -280,10 +291,16 @@ def list(self, session: Optional[ClientSession] = None) -> List[str]: # With an index, distinct includes documents with no filename # as None. return [ - name for name in self.__files.distinct("filename", session=session) - if name is not None] - - def find_one(self, filter: Optional[Any] = None, session: Optional[ClientSession] = None, *args: Any, **kwargs: Any) -> Optional[GridOut]: + name for name in self.__files.distinct("filename", session=session) if name is not None + ] + + def find_one( + self, + filter: Optional[Any] = None, + session: Optional[ClientSession] = None, + *args: Any, + **kwargs: Any + ) -> Optional[GridOut]: """Get a single file from gridfs. All arguments to :meth:`find` are also valid arguments for @@ -377,7 +394,12 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """ return GridOutCursor(self.__collection, *args, **kwargs) - def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> bool: + def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[ClientSession] = None, + **kwargs: Any + ) -> bool: """Check if a file exists in this instance of :class:`GridFS`. The file to check for can be specified by the value of its @@ -427,10 +449,14 @@ def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientS class GridFSBucket(object): """An instance of GridFS on top of a single Database.""" - def __init__(self, db: Database, bucket_name: str = "fs", - chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None) -> None: + def __init__( + self, + db: Database, + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: """Create a new instance of :class:`GridFSBucket`. Raises :exc:`TypeError` if `database` is not an instance of @@ -472,23 +498,27 @@ def __init__(self, db: Database, bucket_name: str = "fs", wtc = write_concern if write_concern is not None else db.write_concern if not wtc.acknowledged: - raise ConfigurationError('write concern must be acknowledged') + raise ConfigurationError("write concern must be acknowledged") self._bucket_name = bucket_name self._collection = db[bucket_name] self._chunks: Collection = self._collection.chunks.with_options( - write_concern=write_concern, - read_preference=read_preference) + write_concern=write_concern, read_preference=read_preference + ) self._files: Collection = self._collection.files.with_options( - write_concern=write_concern, - read_preference=read_preference) + write_concern=write_concern, read_preference=read_preference + ) self._chunk_size_bytes = chunk_size_bytes - def open_upload_stream(self, filename: str, chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> GridIn: + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -526,18 +556,25 @@ def open_upload_stream(self, filename: str, chunk_size_bytes: Optional[int] = No """ validate_string("filename", filename) - opts = {"filename": filename, - "chunk_size": (chunk_size_bytes if chunk_size_bytes - is not None else self._chunk_size_bytes)} + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } if metadata is not None: opts["metadata"] = metadata return GridIn(self._collection, session=session, **opts) def open_upload_stream_with_id( - self, file_id: Any, filename: str, chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> GridIn: + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: """Opens a Stream that the application can write the contents of the file to. @@ -579,19 +616,26 @@ def open_upload_stream_with_id( """ validate_string("filename", filename) - opts = {"_id": file_id, - "filename": filename, - "chunk_size": (chunk_size_bytes if chunk_size_bytes - is not None else self._chunk_size_bytes)} + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } if metadata is not None: opts["metadata"] = metadata return GridIn(self._collection, session=session, **opts) - def upload_from_stream(self, filename: str, source: Any, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> ObjectId: + def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> ObjectId: """Uploads a user file to a GridFS bucket. Reads the contents of the user file from `source` and uploads @@ -627,16 +671,20 @@ def upload_from_stream(self, filename: str, source: Any, .. versionchanged:: 3.6 Added ``session`` parameter. """ - with self.open_upload_stream( - filename, chunk_size_bytes, metadata, session=session) as gin: + with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: gin.write(source) return cast(ObjectId, gin._id) - def upload_from_stream_with_id(self, file_id: Any, filename: str, source: Any, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None) -> None: + def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> None: """Uploads a user file to a GridFS bucket with a custom file id. Reads the contents of the user file from `source` and uploads @@ -674,11 +722,13 @@ def upload_from_stream_with_id(self, file_id: Any, filename: str, source: Any, Added ``session`` parameter. """ with self.open_upload_stream_with_id( - file_id, filename, chunk_size_bytes, metadata, - session=session) as gin: + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: gin.write(source) - def open_download_stream(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: + def open_download_stream( + self, file_id: Any, session: Optional[ClientSession] = None + ) -> GridOut: """Opens a Stream from which the application can read the contents of the stored file specified by file_id. @@ -709,7 +759,9 @@ def open_download_stream(self, file_id: Any, session: Optional[ClientSession] = gout._ensure_file() return gout - def download_to_stream(self, file_id: Any, destination: Any, session: Optional[ClientSession] = None) -> None: + def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[ClientSession] = None + ) -> None: """Downloads the contents of the stored file specified by file_id and writes the contents to `destination`. @@ -766,8 +818,7 @@ def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: res = self._files.delete_one({"_id": file_id}, session=session) self._chunks.delete_many({"files_id": file_id}, session=session) if not res.deleted_count: - raise NoFile( - "no file could be deleted because none matched %s" % file_id) + raise NoFile("no file could be deleted because none matched %s" % file_id) def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Find and return the files collection documents that match ``filter`` @@ -817,7 +868,9 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """ return GridOutCursor(self._collection, *args, **kwargs) - def open_download_stream_by_name(self, filename: str, revision: int = -1, session: Optional[ClientSession] = None) -> GridOut: + def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[ClientSession] = None + ) -> GridOut: """Opens a Stream from which the application can read the contents of `filename` and optional `revision`. @@ -866,15 +919,17 @@ def open_download_stream_by_name(self, filename: str, revision: int = -1, sessio cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) try: grid_file = next(cursor) - return GridOut( - self._collection, file_document=grid_file, session=session) + return GridOut(self._collection, file_document=grid_file, session=session) except StopIteration: - raise NoFile( - "no version %d for filename %r" % (revision, filename)) - - def download_to_stream_by_name(self, filename: str, destination: Any, - revision: int = -1, - session: Optional[ClientSession] = None) -> None: + raise NoFile("no version %d for filename %r" % (revision, filename)) + + def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None, + ) -> None: """Write the contents of `filename` (with optional `revision`) to `destination`. @@ -912,12 +967,13 @@ def download_to_stream_by_name(self, filename: str, destination: Any, .. versionchanged:: 3.6 Added ``session`` parameter. """ - with self.open_download_stream_by_name( - filename, revision, session=session) as gout: + with self.open_download_stream_by_name(filename, revision, session=session) as gout: for chunk in gout: destination.write(chunk) - def rename(self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None) -> None: + def rename( + self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None + ) -> None: """Renames the stored file with the specified file_id. For example:: @@ -940,9 +996,11 @@ def rename(self, file_id: Any, new_filename: str, session: Optional[ClientSessio Added ``session`` parameter. """ _disallow_transactions(session) - result = self._files.update_one({"_id": file_id}, - {"$set": {"filename": new_filename}}, - session=session) + result = self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) if not result.matched_count: - raise NoFile("no files could be renamed %r because none " - "matched file_id %i" % (new_filename, file_id)) + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 686d328a3c..93a97158ae 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -28,9 +28,13 @@ from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.cursor import Cursor -from pymongo.errors import (ConfigurationError, CursorNotFound, - DuplicateKeyError, InvalidOperation, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) from pymongo.read_preferences import ReadPreference _SEEK_SET = os.SEEK_SET @@ -48,30 +52,36 @@ _F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) -def _grid_in_property(field_name: str, docstring: str, read_only: Optional[bool] = False, - closed_only: Optional[bool] = False) -> Any: +def _grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: """Create a GridIn property.""" + def getter(self: Any) -> Any: if closed_only and not self._closed: - raise AttributeError("can only get %r on a closed file" % - field_name) + raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 - if field_name == 'length': + if field_name == "length": return self._file.get(field_name, 0) return self._file.get(field_name, None) def setter(self: Any, value: Any) -> Any: if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, - {"$set": {field_name: value}}) + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) self._file[field_name] = value if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: - docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and " - "can only be read after :meth:`close` " - "has been called.") + docstring = "%s\n\n%s" % ( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) if not read_only and not closed_only: return property(getter, setter, doc=docstring) @@ -80,11 +90,12 @@ def setter(self: Any, value: Any) -> Any: def _grid_out_property(field_name: str, docstring: str) -> Any: """Create a GridOut property.""" + def getter(self: Any) -> Any: self._ensure_file() # Protect against PHP-237 - if field_name == 'length': + if field_name == "length": return self._file.get(field_name, 0) return self._file.get(field_name, None) @@ -100,14 +111,15 @@ def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: def _disallow_transactions(session: Optional[ClientSession]) -> None: if session and session.in_transaction: - raise InvalidOperation( - 'GridFS does not support multi-document transactions') + raise InvalidOperation("GridFS does not support multi-document transactions") class GridIn(object): - """Class to write data to GridFS. - """ - def __init__(self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any) -> None: + """Class to write data to GridFS.""" + + def __init__( + self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any + ) -> None: """Write a file to GridFS Application developers should generally not need to @@ -160,12 +172,10 @@ def __init__(self, root_collection: Collection, session: Optional[ClientSession] :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an " "instance of Collection") if not root_collection.write_concern.acknowledged: - raise ConfigurationError('root_collection must use ' - 'acknowledged write_concern') + raise ConfigurationError("root_collection must use " "acknowledged write_concern") _disallow_transactions(session) # Handle alternative naming @@ -174,8 +184,7 @@ def __init__(self, root_collection: Collection, session: Optional[ClientSession] if "chunk_size" in kwargs: kwargs["chunkSize"] = kwargs.pop("chunk_size") - coll = _clear_entity_type_registry( - root_collection, read_preference=ReadPreference.PRIMARY) + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) # Defaults kwargs["_id"] = kwargs.get("_id", ObjectId()) @@ -194,13 +203,14 @@ def __create_index(self, collection: Collection, index_key: Any, unique: bool) - doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: - index_keys = [index_spec['key'] for index_spec in - collection.list_indexes(session=self._session)] + index_keys = [ + index_spec["key"] + for index_spec in collection.list_indexes(session=self._session) + ] except OperationFailure: index_keys = [] if index_key not in index_keys: - collection.create_index( - index_key.items(), unique=unique, session=self._session) + collection.create_index(index_key.items(), unique=unique, session=self._session) def __ensure_indexes(self) -> None: if not object.__getattribute__(self, "_ensured_index"): @@ -210,35 +220,28 @@ def __ensure_indexes(self) -> None: object.__setattr__(self, "_ensured_index", True) def abort(self) -> None: - """Remove all chunks/files that may have been uploaded and close. - """ - self._coll.chunks.delete_many( - {"files_id": self._file['_id']}, session=self._session) - self._coll.files.delete_one( - {"_id": self._file['_id']}, session=self._session) + """Remove all chunks/files that may have been uploaded and close.""" + self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) object.__setattr__(self, "_closed", True) @property def closed(self) -> bool: - """Is this file closed? - """ + """Is this file closed?""" return self._closed - _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", - read_only=True) + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) filename: Optional[str] = _grid_in_property("filename", "Name of this file.") name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") content_type: Optional[str] = _grid_in_property("contentType", "Mime-type for this file.") - length: int = _grid_in_property("length", "Length (in bytes) of this file.", - closed_only=True) - chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", - read_only=True) - upload_date: datetime.datetime = _grid_in_property("uploadDate", - "Date that this file was uploaded.", - closed_only=True) - md5: Optional[str] = _grid_in_property("md5", "MD5 of the contents of this file " - "if an md5 sum was created.", - closed_only=True) + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _grid_in_property( + "md5", "MD5 of the contents of this file " "if an md5 sum was created.", closed_only=True + ) _buffer: io.BytesIO _closed: bool @@ -259,46 +262,39 @@ def __setattr__(self, name: str, value: Any) -> None: # them now. self._file[name] = value if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, - {"$set": {name: value}}) + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) def __flush_data(self, data: Any) -> None: - """Flush `data` to a chunk. - """ + """Flush `data` to a chunk.""" self.__ensure_indexes() if not data: return - assert(len(data) <= self.chunk_size) + assert len(data) <= self.chunk_size - chunk = {"files_id": self._file["_id"], - "n": self._chunk_number, - "data": Binary(data)} + chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} try: self._chunks.insert_one(chunk, session=self._session) except DuplicateKeyError: - self._raise_file_exists(self._file['_id']) + self._raise_file_exists(self._file["_id"]) self._chunk_number += 1 self._position += len(data) def __flush_buffer(self) -> None: - """Flush the buffer contents out to a chunk. - """ + """Flush the buffer contents out to a chunk.""" self.__flush_data(self._buffer.getvalue()) self._buffer.close() self._buffer = io.BytesIO() def __flush(self) -> Any: - """Flush the file to the database. - """ + """Flush the file to the database.""" try: self.__flush_buffer() # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) self._file["uploadDate"] = datetime.datetime.utcnow() - return self._coll.files.insert_one( - self._file, session=self._session) + return self._coll.files.insert_one(self._file, session=self._session) except DuplicateKeyError: self._raise_file_exists(self._id) @@ -317,12 +313,12 @@ def close(self) -> None: object.__setattr__(self, "_closed", True) def read(self, size: Optional[int] = -1) -> None: - raise io.UnsupportedOperation('read') + raise io.UnsupportedOperation("read") def readable(self) -> bool: return False - def seekable(self)-> bool: + def seekable(self) -> bool: return False def write(self, data: Any) -> None: @@ -360,8 +356,7 @@ def write(self, data: Any) -> None: try: data = data.encode(self.encoding) except AttributeError: - raise TypeError("must specify an encoding for file in " - "order to write str") + raise TypeError("must specify an encoding for file in " "order to write str") read = io.BytesIO(data).read if self._buffer.tell() > 0: @@ -395,8 +390,7 @@ def writeable(self) -> bool: return True def __enter__(self) -> "GridIn": - """Support for the context manager protocol. - """ + """Support for the context manager protocol.""" return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: @@ -411,11 +405,15 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: class GridOut(io.IOBase): - """Class to read data out of GridFS. - """ - def __init__(self, root_collection: Collection, file_id: Optional[int] = None, - file_document: Optional[Any] = None, - session: Optional[ClientSession] = None) -> None: + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: Collection, + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None, + ) -> None: """Read a file from GridFS Application developers should generally not need to @@ -449,8 +447,7 @@ def __init__(self, root_collection: Collection, file_id: Optional[int] = None, from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " - "instance of Collection") + raise TypeError("root_collection must be an " "instance of Collection") _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) @@ -472,12 +469,16 @@ def __init__(self, root_collection: Collection, file_id: Optional[int] = None, content_type: Optional[str] = _grid_out_property("contentType", "Mime-type for this file.") length: int = _grid_out_property("length", "Length (in bytes) of this file.") chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") - upload_date: datetime.datetime = _grid_out_property("uploadDate", - "Date that this file was first uploaded.") + upload_date: datetime.datetime = _grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) aliases: Optional[List[str]] = _grid_out_property("aliases", "List of aliases for this file.") - metadata: Optional[Mapping[str, Any]] = _grid_out_property("metadata", "Metadata attached to this file.") - md5: Optional[str] = _grid_out_property("md5", "MD5 of the contents of this file " - "if an md5 sum was created.") + metadata: Optional[Mapping[str, Any]] = _grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _grid_out_property( + "md5", "MD5 of the contents of this file " "if an md5 sum was created." + ) _file: Any __chunk_iter: Any @@ -485,11 +486,11 @@ def __init__(self, root_collection: Collection, file_id: Optional[int] = None, def _ensure_file(self) -> None: if not self._file: _disallow_transactions(self._session) - self._file = self.__files.find_one({"_id": self.__file_id}, - session=self._session) + self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) if not self._file: - raise NoFile("no file in gridfs collection %r with _id %r" % - (self.__files, self.__file_id)) + raise NoFile( + "no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id) + ) def __getattr__(self, name: str) -> Any: self._ensure_file() @@ -514,10 +515,11 @@ def readchunk(self) -> bytes: chunk_number = int((received + self.__position) / chunk_size) if self.__chunk_iter is None: self.__chunk_iter = _GridOutChunkIterator( - self, self.__chunks, self._session, chunk_number) + self, self.__chunks, self._session, chunk_number + ) chunk = self.__chunk_iter.next() - chunk_data = chunk["data"][self.__position % chunk_size:] + chunk_data = chunk["data"][self.__position % chunk_size :] if not chunk_data: raise CorruptGridFile("truncated chunk") @@ -607,8 +609,7 @@ def readline(self, size: int = -1) -> bytes: # type: ignore[override] return data.read(size) def tell(self) -> int: - """Return the current position of this file. - """ + """Return the current position of this file.""" return self.__position def seek(self, pos: int, whence: int = _SEEK_SET) -> int: @@ -682,10 +683,10 @@ def close(self) -> None: super().close() def write(self, value: Any) -> None: - raise io.UnsupportedOperation('write') + raise io.UnsupportedOperation("write") def writelines(self, lines: Any) -> None: - raise io.UnsupportedOperation('writelines') + raise io.UnsupportedOperation("writelines") def writable(self) -> bool: return False @@ -704,7 +705,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: return False def fileno(self) -> int: - raise io.UnsupportedOperation('fileno') + raise io.UnsupportedOperation("fileno") def flush(self) -> None: # GridOut is read-only, so flush does nothing. @@ -716,7 +717,7 @@ def isatty(self) -> bool: def truncate(self, size: Optional[int] = None) -> int: # See https://docs.python.org/3/library/io.html#io.IOBase.writable # for why truncate has to raise. - raise io.UnsupportedOperation('truncate') + raise io.UnsupportedOperation("truncate") # Override IOBase.__del__ otherwise it will lead to __getattr__ on # __IOBase_closed which calls _ensure_file and potentially performs I/O. @@ -731,7 +732,14 @@ class _GridOutChunkIterator(object): Raises CorruptGridFile when encountering any truncated, missing, or extra chunk in a file. """ - def __init__(self, grid_out: GridOut, chunks: Collection, session: Optional[ClientSession], next_chunk: Any) -> None: + + def __init__( + self, + grid_out: GridOut, + chunks: Collection, + session: Optional[ClientSession], + next_chunk: Any, + ) -> None: self._id = grid_out._id self._chunk_size = int(grid_out.chunk_size) self._length = int(grid_out.length) @@ -756,8 +764,7 @@ def _create_cursor(self) -> None: if self._next_chunk > 0: filter["n"] = {"$gte": self._next_chunk} _disallow_transactions(self._session) - self._cursor = self._chunks.find(filter, sort=[("n", 1)], - session=self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) def _next_with_retry(self) -> Mapping[str, Any]: """Return the next chunk and retry once on CursorNotFound. @@ -788,7 +795,8 @@ def next(self) -> Mapping[str, Any]: self.close() raise CorruptGridFile( "Missing chunk: expected chunk #%d but found " - "chunk with n=%d" % (self._next_chunk, chunk["n"])) + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) if chunk["n"] >= self._num_chunks: # According to spec, ignore extra chunks if they are empty. @@ -796,15 +804,16 @@ def next(self) -> Mapping[str, Any]: self.close() raise CorruptGridFile( "Extra chunk found: expected %d chunks but found " - "chunk with n=%d" % (self._num_chunks, chunk["n"])) + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) expected_length = self.expected_chunk_length(chunk["n"]) if len(chunk["data"]) != expected_length: self.close() raise CorruptGridFile( "truncated chunk #%d: expected chunk length to be %d but " - "found chunk with length %d" % ( - chunk["n"], expected_length, len(chunk["data"]))) + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) self._next_chunk += 1 return chunk @@ -835,13 +844,18 @@ class GridOutCursor(Cursor): """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ - def __init__(self, collection: Collection, filter: Optional[Mapping[str, Any]] = None, - skip: int = 0, - limit: int = 0, - no_cursor_timeout: bool = False, - sort: Optional[Any] = None, - batch_size: int = 0, - session: Optional[ClientSession] = None) -> None: + + def __init__( + self, + collection: Collection, + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None, + ) -> None: """Create a new cursor, similar to the normal :class:`~pymongo.cursor.Cursor`. @@ -859,18 +873,22 @@ def __init__(self, collection: Collection, filter: Optional[Mapping[str, Any]] = self.__root_collection = collection super(GridOutCursor, self).__init__( - collection.files, filter, skip=skip, limit=limit, - no_cursor_timeout=no_cursor_timeout, sort=sort, - batch_size=batch_size, session=session) + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) def next(self) -> GridOut: - """Get next GridOut object from cursor. - """ + """Get next GridOut object from cursor.""" _disallow_transactions(self.session) # Work around "super is not iterable" issue in Python 3.x next_file = super(GridOutCursor, self).next() - return GridOut(self.__root_collection, file_document=next_file, - session=self.session) + return GridOut(self.__root_collection, file_document=next_file, session=self.session) __next__ = next @@ -881,6 +899,5 @@ def remove_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[over raise NotImplementedError("Method does not exist for GridOutCursor") def _clone_base(self, session: ClientSession) -> "GridOutCursor": - """Creates an empty GridOutCursor for information to be copied into. - """ + """Creates an empty GridOutCursor for information to be copied into.""" return GridOutCursor(self.__root_collection, session=session) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 54a962df57..f8baa91971 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,12 +55,14 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, '.dev0') +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, ".dev0") + def get_version_string() -> str: if isinstance(version_tuple[-1], str): - return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1] - return '.'.join(map(str, version_tuple)) + return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] + return ".".join(map(str, version_tuple)) + __version__: str = get_version_string() version = __version__ @@ -68,12 +70,18 @@ def get_version_string() -> str: """Current version of PyMongo.""" from pymongo.collection import ReturnDocument -from pymongo.common import (MAX_SUPPORTED_WIRE_VERSION, - MIN_SUPPORTED_WIRE_VERSION) +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType from pymongo.mongo_client import MongoClient -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, - ReplaceOne, UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -82,6 +90,7 @@ def has_c() -> bool: """Is the C extension installed?""" try: from pymongo import _cmessage # type: ignore[attr-defined] + return True except ImportError: return False diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index b2e20e9ca5..2b8cafe7cb 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -28,21 +28,32 @@ class _AggregationCommand(object): :meth:`pymongo.collection.Collection.aggregate`, or :meth:`pymongo.database.Database.aggregate` instead. """ - def __init__(self, target, cursor_class, pipeline, options, - explicit_session, let=None, user_fields=None, result_processor=None): + + def __init__( + self, + target, + cursor_class, + pipeline, + options, + explicit_session, + let=None, + user_fields=None, + result_processor=None, + ): if "explain" in options: - raise ConfigurationError("The explain option is not supported. " - "Use Database.command instead.") + raise ConfigurationError( + "The explain option is not supported. " "Use Database.command instead." + ) self._target = target - pipeline = common.validate_list('pipeline', pipeline) + pipeline = common.validate_list("pipeline", pipeline) self._pipeline = pipeline self._performs_write = False if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): self._performs_write = True - common.validate_is_mapping('options', options) + common.validate_is_mapping("options", options) if let: common.validate_is_mapping("let", let) options["let"] = let @@ -51,7 +62,8 @@ def __init__(self, target, cursor_class, pipeline, options, # This is the batchSize that will be used for setting the initial # batchSize for the cursor, as well as the subsequent getMores. self._batch_size = common.validate_non_negative_integer_or_none( - "batchSize", self._options.pop("batchSize", None)) + "batchSize", self._options.pop("batchSize", None) + ) # If the cursor option is already specified, avoid overriding it. self._options.setdefault("cursor", {}) @@ -65,10 +77,9 @@ def __init__(self, target, cursor_class, pipeline, options, self._user_fields = user_fields self._result_processor = result_processor - self._collation = validate_collation_or_none( - options.pop('collation', None)) + self._collation = validate_collation_or_none(options.pop("collation", None)) - self._max_await_time_ms = options.pop('maxAwaitTimeMS', None) + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) self._write_preference = None @property @@ -100,17 +111,16 @@ def get_read_preference(self, session): def get_cursor(self, session, server, sock_info, read_preference): # Serialize command. - cmd = SON([("aggregate", self._aggregation_target), - ("pipeline", self._pipeline)]) + cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) cmd.update(self._options) # Apply this target's read concern if: # readConcern has not been specified as a kwarg and either # - server version is >= 4.2 or # - server version is >= 3.2 and pipeline doesn't use $out - if (('readConcern' not in cmd) and - (not self._performs_write or - (sock_info.max_wire_version >= 8))): + if ("readConcern" not in cmd) and ( + not self._performs_write or (sock_info.max_wire_version >= 8) + ): read_concern = self._target.read_concern else: read_concern = None @@ -118,7 +128,7 @@ def get_cursor(self, session, server, sock_info, read_preference): # Apply this target's write concern if: # writeConcern has not been specified as a kwarg and pipeline doesn't # perform a write operation - if 'writeConcern' not in cmd and self._performs_write: + if "writeConcern" not in cmd and self._performs_write: write_concern = self._target._write_concern_for(session) else: write_concern = None @@ -135,14 +145,15 @@ def get_cursor(self, session, server, sock_info, read_preference): collation=self._collation, session=session, client=self._database.client, - user_fields=self._user_fields) + user_fields=self._user_fields, + ) if self._result_processor: self._result_processor(result, sock_info) # Extract cursor from result or mock/fake one if necessary. - if 'cursor' in result: - cursor = result['cursor'] + if "cursor" in result: + cursor = result["cursor"] else: # Unacknowledged $out/$merge write. Fake a cursor. cursor = { @@ -153,16 +164,19 @@ def get_cursor(self, session, server, sock_info, read_preference): # Create and return cursor instance. cmd_cursor = self._cursor_class( - self._cursor_collection(cursor), cursor, sock_info.address, + self._cursor_collection(cursor), + cursor, + sock_info.address, batch_size=self._batch_size or 0, max_await_time_ms=self._max_await_time_ms, - session=session, explicit_session=self._explicit_session) + session=session, + explicit_session=self._explicit_session, + ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor class _CollectionAggregationCommand(_AggregationCommand): - @property def _aggregation_target(self): return self._target.name diff --git a/pymongo/auth.py b/pymongo/auth.py index 34f1c7fc94..0a4e7e7324 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -34,7 +34,8 @@ _USE_PRINCIPAL = False try: import winkerberos as kerberos - if tuple(map(int, kerberos.__version__.split('.')[:2])) >= (0, 5): + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): _USE_PRINCIPAL = True except ImportError: try: @@ -44,21 +45,24 @@ MECHANISMS = frozenset( - ['GSSAPI', - 'MONGODB-CR', - 'MONGODB-X509', - 'MONGODB-AWS', - 'PLAIN', - 'SCRAM-SHA-1', - 'SCRAM-SHA-256', - 'DEFAULT']) + [ + "GSSAPI", + "MONGODB-CR", + "MONGODB-X509", + "MONGODB-AWS", + "PLAIN", + "SCRAM-SHA-1", + "SCRAM-SHA-256", + "DEFAULT", + ] +) """The authentication mechanisms supported by PyMongo.""" class _Cache(object): __slots__ = ("data",) - _hash_val = hash('_Cache') + _hash_val = hash("_Cache") def __init__(self): self.data = None @@ -78,80 +82,69 @@ def __hash__(self): return self._hash_val - MongoCredential = namedtuple( - 'MongoCredential', - ['mechanism', - 'source', - 'username', - 'password', - 'mechanism_properties', - 'cache']) + "MongoCredential", + ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], +) """A hashable namedtuple of values used for authentication.""" -GSSAPIProperties = namedtuple('GSSAPIProperties', - ['service_name', - 'canonicalize_host_name', - 'service_realm']) +GSSAPIProperties = namedtuple( + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] +) """Mechanism properties for GSSAPI authentication.""" -_AWSProperties = namedtuple('_AWSProperties', ['aws_session_token']) +_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) """Mechanism properties for MONGODB-AWS authentication.""" def _build_credentials_tuple(mech, source, user, passwd, extra, database): - """Build and return a mechanism specific credentials tuple. - """ - if mech not in ('MONGODB-X509', 'MONGODB-AWS') and user is None: + """Build and return a mechanism specific credentials tuple.""" + if mech not in ("MONGODB-X509", "MONGODB-AWS") and user is None: raise ConfigurationError("%s requires a username." % (mech,)) - if mech == 'GSSAPI': - if source is not None and source != '$external': - raise ValueError( - "authentication source must be $external or None for GSSAPI") - properties = extra.get('authmechanismproperties', {}) - service_name = properties.get('SERVICE_NAME', 'mongodb') - canonicalize = properties.get('CANONICALIZE_HOST_NAME', False) - service_realm = properties.get('SERVICE_REALM') - props = GSSAPIProperties(service_name=service_name, - canonicalize_host_name=canonicalize, - service_realm=service_realm) + if mech == "GSSAPI": + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for GSSAPI") + properties = extra.get("authmechanismproperties", {}) + service_name = properties.get("SERVICE_NAME", "mongodb") + canonicalize = properties.get("CANONICALIZE_HOST_NAME", False) + service_realm = properties.get("SERVICE_REALM") + props = GSSAPIProperties( + service_name=service_name, + canonicalize_host_name=canonicalize, + service_realm=service_realm, + ) # Source is always $external. - return MongoCredential(mech, '$external', user, passwd, props, None) - elif mech == 'MONGODB-X509': + return MongoCredential(mech, "$external", user, passwd, props, None) + elif mech == "MONGODB-X509": if passwd is not None: - raise ConfigurationError( - "Passwords are not supported by MONGODB-X509") - if source is not None and source != '$external': - raise ValueError( - "authentication source must be " - "$external or None for MONGODB-X509") + raise ConfigurationError("Passwords are not supported by MONGODB-X509") + if source is not None and source != "$external": + raise ValueError("authentication source must be " "$external or None for MONGODB-X509") # Source is always $external, user can be None. - return MongoCredential(mech, '$external', user, None, None, None) - elif mech == 'MONGODB-AWS': + return MongoCredential(mech, "$external", user, None, None, None) + elif mech == "MONGODB-AWS": if user is not None and passwd is None: + raise ConfigurationError("username without a password is not supported by MONGODB-AWS") + if source is not None and source != "$external": raise ConfigurationError( - "username without a password is not supported by MONGODB-AWS") - if source is not None and source != '$external': - raise ConfigurationError( - "authentication source must be " - "$external or None for MONGODB-AWS") + "authentication source must be " "$external or None for MONGODB-AWS" + ) - properties = extra.get('authmechanismproperties', {}) - aws_session_token = properties.get('AWS_SESSION_TOKEN') + properties = extra.get("authmechanismproperties", {}) + aws_session_token = properties.get("AWS_SESSION_TOKEN") aws_props = _AWSProperties(aws_session_token=aws_session_token) # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, '$external', user, passwd, aws_props, None) - elif mech == 'PLAIN': - source_database = source or database or '$external' + return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "PLAIN": + source_database = source or database or "$external" return MongoCredential(mech, source_database, user, passwd, None, None) else: - source_database = source or database or 'admin' + source_database = source or database or "admin" if passwd is None: raise ConfigurationError("A password is required.") - return MongoCredential( - mech, source_database, user, passwd, None, _Cache()) + return MongoCredential(mech, source_database, user, passwd, None, _Cache()) def _xor(fir, sec): @@ -170,18 +163,22 @@ def _authenticate_scram_start(credentials, mechanism): nonce = standard_b64encode(os.urandom(32)) first_bare = b"n=" + user + b",r=" + nonce - cmd = SON([('saslStart', 1), - ('mechanism', mechanism), - ('payload', Binary(b"n,," + first_bare)), - ('autoAuthorize', 1), - ('options', {'skipEmptyExchange': True})]) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", mechanism), + ("payload", Binary(b"n,," + first_bare)), + ("autoAuthorize", 1), + ("options", {"skipEmptyExchange": True}), + ] + ) return nonce, first_bare, cmd def _authenticate_scram(credentials, sock_info, mechanism): """Authenticate using SCRAM.""" username = credentials.username - if mechanism == 'SCRAM-SHA-256': + if mechanism == "SCRAM-SHA-256": digest = "sha256" digestmod = hashlib.sha256 data = saslprep(credentials.password).encode("utf-8") @@ -200,17 +197,16 @@ def _authenticate_scram(credentials, sock_info, mechanism): nonce, first_bare = ctx.scram_data res = ctx.speculative_authenticate else: - nonce, first_bare, cmd = _authenticate_scram_start( - credentials, mechanism) + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) res = sock_info.command(source, cmd) - server_first = res['payload'] + server_first = res["payload"] parsed = _parse_scram_response(server_first) - iterations = int(parsed[b'i']) + iterations = int(parsed[b"i"]) if iterations < 4096: raise OperationFailure("Server returned an invalid iteration count.") - salt = parsed[b's'] - rnonce = parsed[b'r'] + salt = parsed[b"s"] + rnonce = parsed[b"r"] if not rnonce.startswith(nonce): raise OperationFailure("Server returned an invalid nonce.") @@ -223,8 +219,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): # Salt and / or iterations could change for a number of different # reasons. Either changing invalidates the cache. if not client_key or salt != csalt or iterations != citerations: - salted_pass = hashlib.pbkdf2_hmac( - digest, data, standard_b64decode(salt), iterations) + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() cache.data = (client_key, server_key, salt, iterations) @@ -234,32 +229,38 @@ def _authenticate_scram(credentials, sock_info, mechanism): client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) client_final = b",".join((without_proof, client_proof)) - server_sig = standard_b64encode( - _hmac(server_key, auth_msg, digestmod).digest()) + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) - cmd = SON([('saslContinue', 1), - ('conversationId', res['conversationId']), - ('payload', Binary(client_final))]) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", res["conversationId"]), + ("payload", Binary(client_final)), + ] + ) res = sock_info.command(source, cmd) - parsed = _parse_scram_response(res['payload']) - if not hmac.compare_digest(parsed[b'v'], server_sig): + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): raise OperationFailure("Server returned an invalid signature.") # A third empty challenge may be required if the server does not support # skipEmptyExchange: SERVER-44857. - if not res['done']: - cmd = SON([('saslContinue', 1), - ('conversationId', res['conversationId']), - ('payload', Binary(b''))]) + if not res["done"]: + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", res["conversationId"]), + ("payload", Binary(b"")), + ] + ) res = sock_info.command(source, cmd) - if not res['done']: - raise OperationFailure('SASL conversation failed to complete.') + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") def _password_digest(username, password): - """Get a password digest to use for authentication. - """ + """Get a password digest to use for authentication.""" if not isinstance(password, str): raise TypeError("password must be an instance of str") if len(password) == 0: @@ -269,17 +270,16 @@ def _password_digest(username, password): md5hash = hashlib.md5() data = "%s:mongo:%s" % (username, password) - md5hash.update(data.encode('utf-8')) + md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() def _auth_key(nonce, username, password): - """Get an auth key to use for authentication. - """ + """Get an auth key to use for authentication.""" digest = _password_digest(username, password) md5hash = hashlib.md5() data = "%s%s%s" % (nonce, username, digest) - md5hash.update(data.encode('utf-8')) + md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() @@ -287,7 +287,8 @@ def _canonicalize_hostname(hostname): """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( - hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME)[0] + hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME + )[0] try: name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) @@ -298,11 +299,11 @@ def _canonicalize_hostname(hostname): def _authenticate_gssapi(credentials, sock_info): - """Authenticate using GSSAPI. - """ + """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: - raise ConfigurationError('The "kerberos" module must be ' - 'installed to use GSSAPI authentication.') + raise ConfigurationError( + 'The "kerberos" module must be ' "installed to use GSSAPI authentication." + ) try: username = credentials.username @@ -313,9 +314,9 @@ def _authenticate_gssapi(credentials, sock_info): host = sock_info.address[0] if props.canonicalize_host_name: host = _canonicalize_hostname(host) - service = props.service_name + '@' + host + service = props.service_name + "@" + host if props.service_realm is not None: - service = service + '@' + props.service_realm + service = service + "@" + props.service_realm if password is not None: if _USE_PRINCIPAL: @@ -324,81 +325,88 @@ def _authenticate_gssapi(credentials, sock_info): # by WinKerberos) doesn't support +. principal = ":".join((quote(username), quote(password))) result, ctx = kerberos.authGSSClientInit( - service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) else: - if '@' in username: - user, domain = username.split('@', 1) + if "@" in username: + user, domain = username.split("@", 1) else: user, domain = username, None result, ctx = kerberos.authGSSClientInit( - service, gssflags=kerberos.GSS_C_MUTUAL_FLAG, - user=user, domain=domain, password=password) + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) else: - result, ctx = kerberos.authGSSClientInit( - service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) if result != kerberos.AUTH_GSS_COMPLETE: - raise OperationFailure('Kerberos context failed to initialize.') + raise OperationFailure("Kerberos context failed to initialize.") try: # pykerberos uses a weird mix of exceptions and return values # to indicate errors. # 0 == continue, 1 == complete, -1 == error # Only authGSSClientStep can return 0. - if kerberos.authGSSClientStep(ctx, '') != 0: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos " "failure in step function.") # Start a SASL conversation with mongod/s # Note: pykerberos deals with base64 encoded byte strings. # Since mongo accepts base64 strings as the payload we don't # have to use bson.binary.Binary. payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslStart', 1), - ('mechanism', 'GSSAPI'), - ('payload', payload), - ('autoAuthorize', 1)]) - response = sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "GSSAPI"), + ("payload", payload), + ("autoAuthorize", 1), + ] + ) + response = sock_info.command("$external", cmd) # Limit how many times we loop to catch protocol / library issues for _ in range(10): - result = kerberos.authGSSClientStep(ctx, - str(response['payload'])) + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) if result == -1: - raise OperationFailure('Unknown kerberos ' - 'failure in step function.') + raise OperationFailure("Unknown kerberos " "failure in step function.") - payload = kerberos.authGSSClientResponse(ctx) or '' + payload = kerberos.authGSSClientResponse(ctx) or "" - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - response = sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", response["conversationId"]), + ("payload", payload), + ] + ) + response = sock_info.command("$external", cmd) if result == kerberos.AUTH_GSS_COMPLETE: break else: - raise OperationFailure('Kerberos ' - 'authentication failed to complete.') + raise OperationFailure("Kerberos " "authentication failed to complete.") # Once the security context is established actually authenticate. # See RFC 4752, Section 3.1, last two paragraphs. - if kerberos.authGSSClientUnwrap(ctx, - str(response['payload'])) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Unwrap step.') + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos " "failure during GSS_Unwrap step.") - if kerberos.authGSSClientWrap(ctx, - kerberos.authGSSClientResponse(ctx), - username) != 1: - raise OperationFailure('Unknown kerberos ' - 'failure during GSS_Wrap step.') + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos " "failure during GSS_Wrap step.") payload = kerberos.authGSSClientResponse(ctx) - cmd = SON([('saslContinue', 1), - ('conversationId', response['conversationId']), - ('payload', payload)]) - sock_info.command('$external', cmd) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", response["conversationId"]), + ("payload", payload), + ] + ) + sock_info.command("$external", cmd) finally: kerberos.authGSSClientClean(ctx) @@ -408,47 +416,45 @@ def _authenticate_gssapi(credentials, sock_info): def _authenticate_plain(credentials, sock_info): - """Authenticate using SASL PLAIN (RFC 4616) - """ + """Authenticate using SASL PLAIN (RFC 4616)""" source = credentials.source username = credentials.username password = credentials.password - payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8') - cmd = SON([('saslStart', 1), - ('mechanism', 'PLAIN'), - ('payload', Binary(payload)), - ('autoAuthorize', 1)]) + payload = ("\x00%s\x00%s" % (username, password)).encode("utf-8") + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "PLAIN"), + ("payload", Binary(payload)), + ("autoAuthorize", 1), + ] + ) sock_info.command(source, cmd) def _authenticate_x509(credentials, sock_info): - """Authenticate using MONGODB-X509. - """ + """Authenticate using MONGODB-X509.""" ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): # MONGODB-X509 is done after the speculative auth step. return cmd = _X509Context(credentials).speculate_command() - sock_info.command('$external', cmd) + sock_info.command("$external", cmd) def _authenticate_mongo_cr(credentials, sock_info): - """Authenticate using MONGODB-CR. - """ + """Authenticate using MONGODB-CR.""" source = credentials.source username = credentials.username password = credentials.password # Get a nonce - response = sock_info.command(source, {'getnonce': 1}) - nonce = response['nonce'] + response = sock_info.command(source, {"getnonce": 1}) + nonce = response["nonce"] key = _auth_key(nonce, username, password) # Actually authenticate - query = SON([('authenticate', 1), - ('user', username), - ('nonce', nonce), - ('key', key)]) + query = SON([("authenticate", 1), ("user", username), ("nonce", nonce), ("key", key)]) sock_info.command(source, query) @@ -459,29 +465,27 @@ def _authenticate_default(credentials, sock_info): else: source = credentials.source cmd = sock_info.hello_cmd() - cmd['saslSupportedMechs'] = source + '.' + credentials.username - mechs = sock_info.command( - source, cmd, publish_events=False).get( - 'saslSupportedMechs', []) - if 'SCRAM-SHA-256' in mechs: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-256') + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = sock_info.command(source, cmd, publish_events=False).get( + "saslSupportedMechs", [] + ) + if "SCRAM-SHA-256" in mechs: + return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-256") else: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') + return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-1") else: - return _authenticate_scram(credentials, sock_info, 'SCRAM-SHA-1') + return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-1") _AUTH_MAP: Mapping[str, Callable] = { - 'GSSAPI': _authenticate_gssapi, - 'MONGODB-CR': _authenticate_mongo_cr, - 'MONGODB-X509': _authenticate_x509, - 'MONGODB-AWS': _authenticate_aws, - 'PLAIN': _authenticate_plain, - 'SCRAM-SHA-1': functools.partial( - _authenticate_scram, mechanism='SCRAM-SHA-1'), - 'SCRAM-SHA-256': functools.partial( - _authenticate_scram, mechanism='SCRAM-SHA-256'), - 'DEFAULT': _authenticate_default, + "GSSAPI": _authenticate_gssapi, + "MONGODB-CR": _authenticate_mongo_cr, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, } @@ -514,10 +518,9 @@ def __init__(self, credentials, mechanism): self.mechanism = mechanism def speculate_command(self): - nonce, first_bare, cmd = _authenticate_scram_start( - self.credentials, self.mechanism) + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) # The 'db' field is included only on the speculative command. - cmd['db'] = self.credentials.source + cmd["db"] = self.credentials.source # Save for later use. self.scram_data = (nonce, first_bare) return cmd @@ -525,19 +528,17 @@ def speculate_command(self): class _X509Context(_AuthContext): def speculate_command(self): - cmd = SON([('authenticate', 1), - ('mechanism', 'MONGODB-X509')]) + cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) if self.credentials.username is not None: - cmd['user'] = self.credentials.username + cmd["user"] = self.credentials.username return cmd _SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { - 'MONGODB-X509': _X509Context, - 'SCRAM-SHA-1': functools.partial(_ScramContext, mechanism='SCRAM-SHA-1'), - 'SCRAM-SHA-256': functools.partial(_ScramContext, - mechanism='SCRAM-SHA-256'), - 'DEFAULT': functools.partial(_ScramContext, mechanism='SCRAM-SHA-256'), + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), } @@ -546,4 +547,3 @@ def authenticate(credentials, sock_info): mechanism = credentials.mechanism auth_func = _AUTH_MAP[mechanism] auth_func(credentials, sock_info) - diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index 0233d192d4..4b2af35ea4 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -16,13 +16,15 @@ try: import pymongo_auth_aws - from pymongo_auth_aws import (AwsCredential, AwsSaslContext, - PyMongoAuthAwsError) + from pymongo_auth_aws import AwsCredential, AwsSaslContext, PyMongoAuthAwsError + _HAVE_MONGODB_AWS = True except ImportError: + class AwsSaslContext(object): # type: ignore def __init__(self, credentials): pass + _HAVE_MONGODB_AWS = False import bson @@ -47,38 +49,46 @@ def bson_decode(self, data): def _authenticate_aws(credentials, sock_info): - """Authenticate using MONGODB-AWS. - """ + """Authenticate using MONGODB-AWS.""" if not _HAVE_MONGODB_AWS: raise ConfigurationError( "MONGODB-AWS authentication requires pymongo-auth-aws: " - "install with: python -m pip install 'pymongo[aws]'") + "install with: python -m pip install 'pymongo[aws]'" + ) if sock_info.max_wire_version < 9: - raise ConfigurationError( - "MONGODB-AWS authentication requires MongoDB version 4.4 or later") + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") try: - ctx = _AwsSaslContext(AwsCredential( - credentials.username, credentials.password, - credentials.mechanism_properties.aws_session_token)) + ctx = _AwsSaslContext( + AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) client_payload = ctx.step(None) - client_first = SON([('saslStart', 1), - ('mechanism', 'MONGODB-AWS'), - ('payload', client_payload)]) - server_first = sock_info.command('$external', client_first) + client_first = SON( + [("saslStart", 1), ("mechanism", "MONGODB-AWS"), ("payload", client_payload)] + ) + server_first = sock_info.command("$external", client_first) res = server_first # Limit how many times we loop to catch protocol / library issues for _ in range(10): - client_payload = ctx.step(res['payload']) - cmd = SON([('saslContinue', 1), - ('conversationId', server_first['conversationId']), - ('payload', client_payload)]) - res = sock_info.command('$external', cmd) - if res['done']: + client_payload = ctx.step(res["payload"]) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", server_first["conversationId"]), + ("payload", client_payload), + ] + ) + res = sock_info.command("$external", cmd) + if res["done"]: # SASL complete. break except PyMongoAuthAwsError as exc: # Convert to OperationFailure and include pymongo-auth-aws version. - raise OperationFailure('%s (pymongo-auth-aws version %s)' % ( - exc, pymongo_auth_aws.__version__)) + raise OperationFailure( + "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) + ) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 8d343bb2c6..e043e09fdd 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -24,13 +24,27 @@ from bson.son import SON from pymongo.client_session import _validate_session_write_concern from pymongo.collation import validate_collation_or_none -from pymongo.common import (validate_is_document_type, validate_is_mapping, - validate_ok_for_replace, validate_ok_for_update) -from pymongo.errors import (BulkWriteError, ConfigurationError, - InvalidOperation, OperationFailure) +from pymongo.common import ( + validate_is_document_type, + validate_is_mapping, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc -from pymongo.message import (_DELETE, _INSERT, _UPDATE, _BulkWriteContext, - _EncryptedBulkWriteContext, _randint) +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _EncryptedBulkWriteContext, + _randint, +) from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -42,15 +56,14 @@ _UNKNOWN_ERROR = 8 _WRITE_CONCERN_ERROR = 64 -_COMMANDS = ('insert', 'update', 'delete') +_COMMANDS = ("insert", "update", "delete") class _Run(object): - """Represents a batch of write operations. - """ + """Represents a batch of write operations.""" + def __init__(self, op_type): - """Initialize a new Run object. - """ + """Initialize a new Run object.""" self.op_type = op_type self.index_map = [] self.ops = [] @@ -77,8 +90,7 @@ def add(self, original_index, operation): def _merge_command(run, full_result, offset, result): - """Merge a write command result into the full bulk result. - """ + """Merge a write command result into the full bulk result.""" affected = result.get("n", 0) if run.op_type == _INSERT: @@ -95,7 +107,7 @@ def _merge_command(run, full_result, offset, result): doc["index"] = run.index(doc["index"] + offset) full_result["upserted"].extend(upserted) full_result["nUpserted"] += n_upserted - full_result["nMatched"] += (affected - n_upserted) + full_result["nMatched"] += affected - n_upserted else: full_result["nMatched"] += affected full_result["nModified"] += result["nModified"] @@ -117,24 +129,22 @@ def _merge_command(run, full_result, offset, result): def _raise_bulk_write_error(full_result): - """Raise a BulkWriteError from the full bulk api result. - """ + """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: - full_result["writeErrors"].sort( - key=lambda error: error["index"]) + full_result["writeErrors"].sort(key=lambda error: error["index"]) raise BulkWriteError(full_result) class _Bulk(object): - """The private guts of the bulk write API. - """ + """The private guts of the bulk write API.""" + def __init__(self, collection, ordered, bypass_document_validation): - """Initialize a _Bulk instance. - """ + """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( - unicode_decode_error_handler='replace', - document_class=dict)) + unicode_decode_error_handler="replace", document_class=dict + ) + ) self.ordered = ordered self.ops = [] self.executed = False @@ -159,63 +169,64 @@ def bulk_ctx_class(self): return _BulkWriteContext def add_insert(self, document): - """Add an insert document to the list of ops. - """ + """Add an insert document to the list of ops.""" validate_is_document_type("document", document) # Generate ObjectId client side. - if not (isinstance(document, RawBSONDocument) or '_id' in document): - document['_id'] = ObjectId() + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() self.ops.append((_INSERT, document)) - def add_update(self, selector, update, multi=False, upsert=False, - collation=None, array_filters=None, hint=None): - """Create an update document and add it to the list of ops. - """ + def add_update( + self, + selector, + update, + multi=False, + upsert=False, + collation=None, + array_filters=None, + hint=None, + ): + """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd = SON([('q', selector), ('u', update), - ('multi', multi), ('upsert', upsert)]) + cmd = SON([("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if array_filters is not None: self.uses_array_filters = True - cmd['arrayFilters'] = array_filters + cmd["arrayFilters"] = array_filters if hint is not None: self.uses_hint_update = True - cmd['hint'] = hint + cmd["hint"] = hint if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd)) - def add_replace(self, selector, replacement, upsert=False, - collation=None, hint=None): - """Create a replace document and add it to the list of ops. - """ + def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None): + """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) - cmd = SON([('q', selector), ('u', replacement), - ('multi', False), ('upsert', upsert)]) + cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if hint is not None: self.uses_hint_update = True - cmd['hint'] = hint + cmd["hint"] = hint self.ops.append((_UPDATE, cmd)) def add_delete(self, selector, limit, collation=None, hint=None): - """Create a delete document and add it to the list of ops. - """ - cmd = SON([('q', selector), ('limit', limit)]) + """Create a delete document and add it to the list of ops.""" + cmd = SON([("q", selector), ("limit", limit)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True - cmd['collation'] = collation + cmd["collation"] = collation if hint is not None: self.uses_hint_delete = True - cmd['hint'] = hint + cmd["hint"] = hint if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. self.is_retryable = False @@ -247,9 +258,17 @@ def gen_unordered(self): if run.ops: yield run - def _execute_command(self, generator, write_concern, session, - sock_info, op_id, retryable, full_result, - final_write_concern=None): + def _execute_command( + self, + generator, + write_concern, + session, + sock_info, + op_id, + retryable, + full_result, + final_write_concern=None, + ): db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -272,8 +291,15 @@ def _execute_command(self, generator, write_concern, session, cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd_name, sock_info, op_id, listeners, session, - run.op_type, self.collection.codec_options) + db_name, + cmd_name, + sock_info, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) while run.idx_offset < len(run.ops): # If this is the last possible operation, use the @@ -281,20 +307,18 @@ def _execute_command(self, generator, write_concern, session, if last_run and (len(run.ops) - run.idx_offset) == 1: write_concern = final_write_concern or write_concern - cmd = SON([(cmd_name, self.collection.name), - ('ordered', self.ordered)]) + cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) if not write_concern.is_server_default: - cmd['writeConcern'] = write_concern.document + cmd["writeConcern"] = write_concern.document if self.bypass_doc_val: - cmd['bypassDocumentValidation'] = True + cmd["bypassDocumentValidation"] = True if session: # Start a new retryable write unless one was already # started for this command. if retryable and not self.started_retryable_write: session._start_retryable_write() self.started_retryable_write = True - session._apply_to(cmd, retryable, ReadPreference.PRIMARY, - sock_info) + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info) sock_info.send_cluster_time(cmd, session, client) sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) @@ -304,8 +328,8 @@ def _execute_command(self, generator, write_concern, session, result, to_send = bwc.execute(cmd, ops, client) # Retryable writeConcernErrors halt the execution of this run. - wce = result.get('writeConcernError', {}) - if wce.get('code', 0) in _RETRYABLE_ERROR_CODES: + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: # Synthesize the full bulk result without modifying the # current one because this write operation may be retried. full = copy.deepcopy(full_result) @@ -327,14 +351,13 @@ def _execute_command(self, generator, write_concern, session, # We're supposed to continue if errors are # at the write concern level (e.g. wtimeout) - if self.ordered and full_result['writeErrors']: + if self.ordered and full_result["writeErrors"]: break # Reset our state self.current_run = run = self.next_run def execute_command(self, generator, write_concern, session): - """Execute using write commands. - """ + """Execute using write commands.""" # nModified is only reported for write commands, not legacy ops. full_result = { "writeErrors": [], @@ -350,21 +373,19 @@ def execute_command(self, generator, write_concern, session): def retryable_bulk(session, sock_info, retryable): self._execute_command( - generator, write_concern, session, sock_info, op_id, - retryable, full_result) + generator, write_concern, session, sock_info, op_id, retryable, full_result + ) client = self.collection.database.client with client._tmp_session(session) as s: - client._retry_with_session( - self.is_retryable, retryable_bulk, s, self) + client._retry_with_session(self.is_retryable, retryable_bulk, s, self) if full_result["writeErrors"] or full_result["writeConcernErrors"]: _raise_bulk_write_error(full_result) return full_result def execute_op_msg_no_results(self, sock_info, generator): - """Execute write commands with OP_MSG and w=0 writeConcern, unordered. - """ + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -377,13 +398,24 @@ def execute_op_msg_no_results(self, sock_info, generator): while run: cmd_name = _COMMANDS[run.op_type] bwc = self.bulk_ctx_class( - db_name, cmd_name, sock_info, op_id, listeners, None, - run.op_type, self.collection.codec_options) + db_name, + cmd_name, + sock_info, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) while run.idx_offset < len(run.ops): - cmd = SON([(cmd_name, self.collection.name), - ('ordered', False), - ('writeConcern', {'w': 0})]) + cmd = SON( + [ + (cmd_name, self.collection.name), + ("ordered", False), + ("writeConcern", {"w": 0}), + ] + ) sock_info.add_server_api(cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible. @@ -392,8 +424,7 @@ def execute_op_msg_no_results(self, sock_info, generator): self.current_run = run = next(generator, None) def execute_command_no_results(self, sock_info, generator, write_concern): - """Execute write commands with OP_MSG and w=0 WriteConcern, ordered. - """ + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" full_result = { "writeErrors": [], "writeConcernErrors": [], @@ -411,45 +442,50 @@ def execute_command_no_results(self, sock_info, generator, write_concern): op_id = _randint() try: self._execute_command( - generator, initial_write_concern, None, - sock_info, op_id, False, full_result, write_concern) + generator, + initial_write_concern, + None, + sock_info, + op_id, + False, + full_result, + write_concern, + ) except OperationFailure: pass def execute_no_results(self, sock_info, generator, write_concern): - """Execute all operations, returning no results (w=0). - """ + """Execute all operations, returning no results (w=0).""" if self.uses_collation: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") if self.uses_array_filters: - raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged writes.') + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") # Guard against unsupported unacknowledged writes. unack = write_concern and not write_concern.acknowledged if unack and self.uses_hint_delete and sock_info.max_wire_version < 9: raise ConfigurationError( - 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) if unack and self.uses_hint_update and sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: - raise OperationFailure("Cannot set bypass_document_validation with" - " unacknowledged write concern") + raise OperationFailure( + "Cannot set bypass_document_validation with" " unacknowledged write concern" + ) if self.ordered: return self.execute_command_no_results(sock_info, generator, write_concern) return self.execute_op_msg_no_results(sock_info, generator) def execute(self, write_concern, session): - """Execute operations. - """ + """Execute operations.""" if not self.ops: - raise InvalidOperation('No operations to execute') + raise InvalidOperation("No operations to execute") if self.executed: - raise InvalidOperation('Bulk operations can ' - 'only be executed once.') + raise InvalidOperation("Bulk operations can " "only be executed once.") self.executed = True write_concern = write_concern or self.collection.write_concern session = _validate_session_write_concern(session, write_concern) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 69446fdecf..a35c9cb844 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -15,43 +15,51 @@ """Watch changes on a collection, a database, or the entire cluster.""" import copy -from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, - Optional, Union) +from typing import TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, Optional, Union from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp from pymongo import common -from pymongo.aggregation import (_CollectionAggregationCommand, - _DatabaseAggregationCommand) +from pymongo.aggregation import ( + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor -from pymongo.errors import (ConnectionFailure, CursorNotFound, - InvalidOperation, OperationFailure, PyMongoError) +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) from pymongo.typings import _CollationIn, _DocumentType, _Pipeline # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. -_RESUMABLE_GETMORE_ERRORS = frozenset([ - 6, # HostUnreachable - 7, # HostNotFound - 89, # NetworkTimeout - 91, # ShutdownInProgress - 189, # PrimarySteppedDown - 262, # ExceededTimeLimit - 9001, # SocketException - 10107, # NotWritablePrimary - 11600, # InterruptedAtShutdown - 11602, # InterruptedDueToReplStateChange - 13435, # NotPrimaryNoSecondaryOk - 13436, # NotPrimaryOrSecondary - 63, # StaleShardVersion - 150, # StaleEpoch - 13388, # StaleConfig - 234, # RetryChangeStream - 133, # FailedToSatisfyReadPreference - 216, # ElectionInProgress -]) +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + 216, # ElectionInProgress + ] +) if TYPE_CHECKING: @@ -72,9 +80,12 @@ class ChangeStream(Generic[_DocumentType]): .. versionadded:: 3.6 .. seealso:: The MongoDB documentation on `changeStreams `_. """ + def __init__( self, - target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]"], + target: Union[ + "MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]" + ], pipeline: Optional[_Pipeline], full_document: Optional[str], resume_after: Optional[Mapping[str, Any]], @@ -87,8 +98,8 @@ def __init__( ) -> None: if pipeline is None: pipeline = [] - pipeline = common.validate_list('pipeline', pipeline) - common.validate_string_or_none('full_document', full_document) + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) @@ -99,8 +110,8 @@ def __init__( # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( # type: ignore - codec_options=target.codec_options.with_options( - document_class=RawBSONDocument)) + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) else: self._target = target @@ -126,24 +137,24 @@ def _aggregation_command_class(self): @property def _client(self): """The client against which the aggregation commands for - this ChangeStream will be run. """ + this ChangeStream will be run.""" raise NotImplementedError def _change_stream_options(self): """Return the options dict for the $changeStream pipeline stage.""" options: Dict[str, Any] = {} if self._full_document is not None: - options['fullDocument'] = self._full_document + options["fullDocument"] = self._full_document resume_token = self.resume_token if resume_token is not None: if self._uses_start_after: - options['startAfter'] = resume_token + options["startAfter"] = resume_token else: - options['resumeAfter'] = resume_token + options["resumeAfter"] = resume_token if self._start_at_operation_time is not None: - options['startAtOperationTime'] = self._start_at_operation_time + options["startAtOperationTime"] = self._start_at_operation_time return options def _command_options(self): @@ -158,7 +169,7 @@ def _command_options(self): def _aggregation_pipeline(self): """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() - full_pipeline: list = [{'$changeStream': options}] + full_pipeline: list = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline @@ -170,38 +181,43 @@ def _process_result(self, result, sock_info): This is implemented as a callback because we need access to the wire version in order to determine whether to cache this value. """ - if not result['cursor']['firstBatch']: - if 'postBatchResumeToken' in result['cursor']: - self._resume_token = result['cursor']['postBatchResumeToken'] - elif (self._start_at_operation_time is None and - self._uses_resume_after is False and - self._uses_start_after is False and - sock_info.max_wire_version >= 7): + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and sock_info.max_wire_version >= 7 + ): self._start_at_operation_time = result.get("operationTime") # PYTHON-2181: informative error on missing operationTime. if self._start_at_operation_time is None: raise OperationFailure( "Expected field 'operationTime' missing from command " - "response : %r" % (result, )) + "response : %r" % (result,) + ) def _run_aggregation_cmd(self, session, explicit_session): """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ cmd = self._aggregation_command_class( - self._target, CommandCursor, self._aggregation_pipeline(), - self._command_options(), explicit_session, - result_processor=self._process_result) + self._target, + CommandCursor, + self._aggregation_pipeline(), + self._command_options(), + explicit_session, + result_processor=self._process_result, + ) return self._client._retryable_read( - cmd.get_cursor, self._target._read_preference_for(session), - session) + cmd.get_cursor, self._target._read_preference_for(session), session + ) def _create_cursor(self): with self._client._tmp_session(self._session, close=False) as s: - return self._run_aggregation_cmd( - session=s, - explicit_session=self._session is not None) + return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) def _resume(self): """Reestablish this change stream after a resumable error.""" @@ -321,10 +337,9 @@ def try_next(self) -> Optional[_DocumentType]: except OperationFailure as exc: if exc._max_wire_version is None: raise - is_resumable = ((exc._max_wire_version >= 9 and - exc.has_error_label("ResumableChangeStreamError")) or - (exc._max_wire_version < 9 and - exc.code in _RESUMABLE_GETMORE_ERRORS)) + is_resumable = ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) if not is_resumable: raise self._resume() @@ -343,17 +358,16 @@ def try_next(self) -> Optional[_DocumentType]: # Else, changes are available. try: - resume_token = change['_id'] + resume_token = change["_id"] except KeyError: self.close() raise InvalidOperation( - "Cannot provide resume functionality when the resume " - "token is missing.") + "Cannot provide resume functionality when the resume " "token is missing." + ) # If this is the last change document from the current batch, cache the # postBatchResumeToken. - if (not self._cursor._has_next() and - self._cursor._post_batch_resume_token): + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: resume_token = self._cursor._post_batch_resume_token # Hereafter, don't use startAfter; instead use resumeAfter. @@ -383,6 +397,7 @@ class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + @property def _aggregation_command_class(self): return _CollectionAggregationCommand @@ -400,6 +415,7 @@ class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + @property def _aggregation_command_class(self): return _DatabaseAggregationCommand @@ -417,6 +433,7 @@ class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + def _change_stream_options(self): options = super(ClusterChangeStream, self)._change_stream_options() options["allChangesForCluster"] = True diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 14ef0f781e..4987601d5c 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -23,8 +23,7 @@ from pymongo.monitoring import _EventListeners from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name) +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern @@ -32,63 +31,69 @@ def _parse_credentials(username, password, database, options): """Parse authentication credentials.""" - mechanism = options.get('authmechanism', 'DEFAULT' if username else None) - source = options.get('authsource') + mechanism = options.get("authmechanism", "DEFAULT" if username else None) + source = options.get("authsource") if username or mechanism: - return _build_credentials_tuple( - mechanism, source, username, password, options, database) + return _build_credentials_tuple(mechanism, source, username, password, options, database) return None def _parse_read_preference(options): """Parse read preference options.""" - if 'read_preference' in options: - return options['read_preference'] + if "read_preference" in options: + return options["read_preference"] - name = options.get('readpreference', 'primary') + name = options.get("readpreference", "primary") mode = read_pref_mode_from_name(name) - tags = options.get('readpreferencetags') - max_staleness = options.get('maxstalenessseconds', -1) + tags = options.get("readpreferencetags") + max_staleness = options.get("maxstalenessseconds", -1) return make_read_preference(mode, tags, max_staleness) def _parse_write_concern(options): """Parse write concern options.""" - concern = options.get('w') - wtimeout = options.get('wtimeoutms') - j = options.get('journal') - fsync = options.get('fsync') + concern = options.get("w") + wtimeout = options.get("wtimeoutms") + j = options.get("journal") + fsync = options.get("fsync") return WriteConcern(concern, wtimeout, j, fsync) def _parse_read_concern(options): """Parse read concern options.""" - concern = options.get('readconcernlevel') + concern = options.get("readconcernlevel") return ReadConcern(concern) def _parse_ssl_options(options): """Parse ssl options.""" - use_tls = options.get('tls') + use_tls = options.get("tls") if use_tls is not None: - validate_boolean('tls', use_tls) + validate_boolean("tls", use_tls) - certfile = options.get('tlscertificatekeyfile') - passphrase = options.get('tlscertificatekeyfilepassword') - ca_certs = options.get('tlscafile') - crlfile = options.get('tlscrlfile') - allow_invalid_certificates = options.get('tlsallowinvalidcertificates', False) - allow_invalid_hostnames = options.get('tlsallowinvalidhostnames', False) - disable_ocsp_endpoint_check = options.get('tlsdisableocspendpointcheck', False) + certfile = options.get("tlscertificatekeyfile") + passphrase = options.get("tlscertificatekeyfilepassword") + ca_certs = options.get("tlscafile") + crlfile = options.get("tlscrlfile") + allow_invalid_certificates = options.get("tlsallowinvalidcertificates", False) + allow_invalid_hostnames = options.get("tlsallowinvalidhostnames", False) + disable_ocsp_endpoint_check = options.get("tlsdisableocspendpointcheck", False) enabled_tls_opts = [] - for opt in ('tlscertificatekeyfile', 'tlscertificatekeyfilepassword', - 'tlscafile', 'tlscrlfile'): + for opt in ( + "tlscertificatekeyfile", + "tlscertificatekeyfilepassword", + "tlscafile", + "tlscrlfile", + ): # Any non-null value of these options implies tls=True. if opt in options and options[opt]: enabled_tls_opts.append(opt) - for opt in ('tlsallowinvalidcertificates', 'tlsallowinvalidhostnames', - 'tlsdisableocspendpointcheck'): + for opt in ( + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", + ): # A value of False for these options implies tls=True. if opt in options and not options[opt]: enabled_tls_opts.append(opt) @@ -99,10 +104,11 @@ def _parse_ssl_options(options): use_tls = True elif not use_tls: # Error since tls is explicitly disabled but a tls option is set. - raise ConfigurationError("TLS has not been enabled but the " - "following tls parameters have been set: " - "%s. Please set `tls=True` or remove." - % ', '.join(enabled_tls_opts)) + raise ConfigurationError( + "TLS has not been enabled but the " + "following tls parameters have been set: " + "%s. Please set `tls=True` or remove." % ", ".join(enabled_tls_opts) + ) if use_tls: ctx = get_ssl_context( @@ -112,7 +118,8 @@ def _parse_ssl_options(options): crlfile, allow_invalid_certificates, allow_invalid_hostnames, - disable_ocsp_endpoint_check) + disable_ocsp_endpoint_check, + ) return ctx, allow_invalid_hostnames return None, allow_invalid_hostnames @@ -120,40 +127,42 @@ def _parse_ssl_options(options): def _parse_pool_options(username, password, database, options): """Parse connection pool options.""" credentials = _parse_credentials(username, password, database, options) - max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE) - min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE) - max_idle_time_seconds = options.get( - 'maxidletimems', common.MAX_IDLE_TIME_SEC) + max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) + min_pool_size = options.get("minpoolsize", common.MIN_POOL_SIZE) + max_idle_time_seconds = options.get("maxidletimems", common.MAX_IDLE_TIME_SEC) if max_pool_size is not None and min_pool_size > max_pool_size: raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") - connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT) - socket_timeout = options.get('sockettimeoutms') - wait_queue_timeout = options.get( - 'waitqueuetimeoutms', common.WAIT_QUEUE_TIMEOUT) - event_listeners = options.get('event_listeners') - appname = options.get('appname') - driver = options.get('driver') - server_api = options.get('server_api') + connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) + socket_timeout = options.get("sockettimeoutms") + wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) + event_listeners = options.get("event_listeners") + appname = options.get("appname") + driver = options.get("driver") + server_api = options.get("server_api") compression_settings = CompressionSettings( - options.get('compressors', []), - options.get('zlibcompressionlevel', -1)) + options.get("compressors", []), options.get("zlibcompressionlevel", -1) + ) ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) - load_balanced = options.get('loadbalanced') - max_connecting = options.get('maxconnecting', common.MAX_CONNECTING) - return PoolOptions(max_pool_size, - min_pool_size, - max_idle_time_seconds, - connect_timeout, socket_timeout, - wait_queue_timeout, - ssl_context, tls_allow_invalid_hostnames, - _EventListeners(event_listeners), - appname, - driver, - compression_settings, - max_connecting=max_connecting, - server_api=server_api, - load_balanced=load_balanced, - credentials=credentials) + load_balanced = options.get("loadbalanced") + max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) + return PoolOptions( + max_pool_size, + min_pool_size, + max_idle_time_seconds, + connect_timeout, + socket_timeout, + wait_queue_timeout, + ssl_context, + tls_allow_invalid_hostnames, + _EventListeners(event_listeners), + appname, + driver, + compression_settings, + max_connecting=max_connecting, + server_api=server_api, + load_balanced=load_balanced, + credentials=credentials, + ) class ClientOptions(object): @@ -167,28 +176,25 @@ class ClientOptions(object): def __init__(self, username, password, database, options): self.__options = options self.__codec_options = _parse_codec_options(options) - self.__direct_connection = options.get('directconnection') - self.__local_threshold_ms = options.get( - 'localthresholdms', common.LOCAL_THRESHOLD_MS) + self.__direct_connection = options.get("directconnection") + self.__local_threshold_ms = options.get("localthresholdms", common.LOCAL_THRESHOLD_MS) # self.__server_selection_timeout is in seconds. Must use full name for # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. self.__server_selection_timeout = options.get( - 'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT) - self.__pool_options = _parse_pool_options( - username, password, database, options) + "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT + ) + self.__pool_options = _parse_pool_options(username, password, database, options) self.__read_preference = _parse_read_preference(options) - self.__replica_set_name = options.get('replicaset') + self.__replica_set_name = options.get("replicaset") self.__write_concern = _parse_write_concern(options) self.__read_concern = _parse_read_concern(options) - self.__connect = options.get('connect') - self.__heartbeat_frequency = options.get( - 'heartbeatfrequencyms', common.HEARTBEAT_FREQUENCY) - self.__retry_writes = options.get('retrywrites', common.RETRY_WRITES) - self.__retry_reads = options.get('retryreads', common.RETRY_READS) - self.__server_selector = options.get( - 'server_selector', any_server_selector) - self.__auto_encryption_opts = options.get('auto_encryption_opts') - self.__load_balanced = options.get('loadbalanced') + self.__connect = options.get("connect") + self.__heartbeat_frequency = options.get("heartbeatfrequencyms", common.HEARTBEAT_FREQUENCY) + self.__retry_writes = options.get("retrywrites", common.RETRY_WRITES) + self.__retry_reads = options.get("retryreads", common.RETRY_READS) + self.__server_selector = options.get("server_selector", any_server_selector) + self.__auto_encryption_opts = options.get("auto_encryption_opts") + self.__load_balanced = options.get("loadbalanced") @property def _options(self): diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 3d4ad514e5..44381c0241 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -135,17 +135,30 @@ import time import uuid from collections.abc import Mapping as _Mapping -from typing import (TYPE_CHECKING, Any, Callable, ContextManager, Generic, - Mapping, Optional, TypeVar) +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Generic, + Mapping, + Optional, + TypeVar, +) from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from bson.timestamp import Timestamp from pymongo.cursor import _SocketManager -from pymongo.errors import (ConfigurationError, ConnectionFailure, - InvalidOperation, OperationFailure, PyMongoError, - WTimeoutError) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) from pymongo.helpers import _RETRYABLE_ERROR_CODES from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode @@ -170,6 +183,7 @@ class SessionOptions(object): .. versionchanged:: 3.12 Added the ``snapshot`` parameter. """ + def __init__( self, causal_consistency: Optional[bool] = None, @@ -178,8 +192,7 @@ def __init__( ) -> None: if snapshot: if causal_consistency: - raise ConfigurationError('snapshot reads do not support ' - 'causal_consistency=True') + raise ConfigurationError("snapshot reads do not support " "causal_consistency=True") causal_consistency = False elif causal_consistency is None: causal_consistency = True @@ -188,8 +201,9 @@ def __init__( if not isinstance(default_transaction_options, TransactionOptions): raise TypeError( "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: %r" % - (default_transaction_options,)) + "pymongo.client_session.TransactionOptions, not: %r" + % (default_transaction_options,) + ) self._default_transaction_options = default_transaction_options self._snapshot = snapshot @@ -243,12 +257,13 @@ class TransactionOptions(object): .. versionadded:: 3.7 """ + def __init__( self, read_concern: Optional[ReadConcern] = None, write_concern: Optional[WriteConcern] = None, read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None + max_commit_time_ms: Optional[int] = None, ) -> None: self._read_concern = read_concern self._write_concern = write_concern @@ -256,27 +271,31 @@ def __init__( self._max_commit_time_ms = max_commit_time_ms if read_concern is not None: if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of " - "pymongo.read_concern.ReadConcern, not: %r" % - (read_concern,)) + raise TypeError( + "read_concern must be an instance of " + "pymongo.read_concern.ReadConcern, not: %r" % (read_concern,) + ) if write_concern is not None: if not isinstance(write_concern, WriteConcern): - raise TypeError("write_concern must be an instance of " - "pymongo.write_concern.WriteConcern, not: %r" % - (write_concern,)) + raise TypeError( + "write_concern must be an instance of " + "pymongo.write_concern.WriteConcern, not: %r" % (write_concern,) + ) if not write_concern.acknowledged: raise ConfigurationError( "transactions do not support unacknowledged write concern" - ": %r" % (write_concern,)) + ": %r" % (write_concern,) + ) if read_preference is not None: if not isinstance(read_preference, _ServerMode): - raise TypeError("%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,)) + raise TypeError( + "%r is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." % (read_preference,) + ) if max_commit_time_ms is not None: if not isinstance(max_commit_time_ms, int): - raise TypeError( - "max_commit_time_ms must be an integer or None") + raise TypeError("max_commit_time_ms must be an integer or None") @property def read_concern(self) -> Optional[ReadConcern]: @@ -290,8 +309,7 @@ def write_concern(self) -> Optional[WriteConcern]: @property def read_preference(self) -> Optional[_ServerMode]: - """This transaction's :class:`~pymongo.read_preferences.ReadPreference`. - """ + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" return self._read_preference @property @@ -319,14 +337,15 @@ def _validate_session_write_concern(session, write_concern): return None else: raise ConfigurationError( - 'Explicit sessions are incompatible with ' - 'unacknowledged write concern: %r' % ( - write_concern,)) + "Explicit sessions are incompatible with " + "unacknowledged write concern: %r" % (write_concern,) + ) return session class _TransactionContext(object): """Internal transaction context manager for start_transaction.""" + def __init__(self, session): self.__session = session @@ -352,6 +371,7 @@ class _TxnState(object): class _Transaction(object): """Internal class to hold transaction information in a ClientSession.""" + def __init__(self, opts, client): self.opts = opts self.state = _TxnState.NONE @@ -415,10 +435,12 @@ def _max_time_expired_error(exc): # From the transactions spec, all the retryable writes errors plus # WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset([ - 64, # WriteConcernFailed - 50, # MaxTimeMSExpired -]) +_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset( + [ + 64, # WriteConcernFailed + 50, # MaxTimeMSExpired + ] +) # From the Convenient API for Transactions spec, with_transaction must # halt retries after 120 seconds. @@ -450,8 +472,13 @@ class ClientSession(Generic[_DocumentType]): :class:`ClientSession`, call :meth:`~pymongo.mongo_client.MongoClient.start_session`. """ + def __init__( - self, client: "MongoClient[_DocumentType]", server_session: Any, options: SessionOptions, implicit: bool + self, + client: "MongoClient[_DocumentType]", + server_session: Any, + options: SessionOptions, + implicit: bool, ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. self._client: MongoClient[_DocumentType] = client @@ -630,17 +657,17 @@ def callback(session, custom_arg, custom_kwarg=None): """ start_time = time.monotonic() while True: - self.start_transaction( - read_concern, write_concern, read_preference, - max_commit_time_ms) + self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) try: ret = callback(self) except Exception as exc: if self.in_transaction: self.abort_transaction() - if (isinstance(exc, PyMongoError) and - exc.has_error_label("TransientTransactionError") and - _within_time_limit(start_time)): + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): # Retry the entire transaction. continue raise @@ -653,14 +680,17 @@ def callback(session, custom_arg, custom_kwarg=None): try: self.commit_transaction() except PyMongoError as exc: - if (exc.has_error_label("UnknownTransactionCommitResult") - and _within_time_limit(start_time) - and not _max_time_expired_error(exc)): + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): # Retry the commit. continue - if (exc.has_error_label("TransientTransactionError") and - _within_time_limit(start_time)): + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): # Retry the entire transaction. break raise @@ -687,23 +717,22 @@ def start_transaction( self._check_ended() if self.options.snapshot: - raise InvalidOperation("Transactions are not supported in " - "snapshot sessions") + raise InvalidOperation("Transactions are not supported in " "snapshot sessions") if self.in_transaction: raise InvalidOperation("Transaction already in progress") read_concern = self._inherit_option("read_concern", read_concern) write_concern = self._inherit_option("write_concern", write_concern) - read_preference = self._inherit_option( - "read_preference", read_preference) + read_preference = self._inherit_option("read_preference", read_preference) if max_commit_time_ms is None: opts = self.options.default_transaction_options if opts: max_commit_time_ms = opts.max_commit_time_ms self._transaction.opts = TransactionOptions( - read_concern, write_concern, read_preference, max_commit_time_ms) + read_concern, write_concern, read_preference, max_commit_time_ms + ) self._transaction.reset() self._transaction.state = _TxnState.STARTING self._start_retryable_write() @@ -723,8 +752,7 @@ def commit_transaction(self) -> None: self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: - raise InvalidOperation( - "Cannot call commitTransaction after calling abortTransaction") + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") elif state is _TxnState.COMMITTED: # We're explicitly retrying the commit, move the state back to # "in progress" so that in_transaction returns true. @@ -770,8 +798,7 @@ def abort_transaction(self) -> None: elif state is _TxnState.ABORTED: raise InvalidOperation("Cannot call abortTransaction twice") elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): - raise InvalidOperation( - "Cannot call abortTransaction after calling commitTransaction") + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") try: self._finish_transaction_with_retry("abortTransaction") @@ -788,8 +815,10 @@ def _finish_transaction_with_retry(self, command_name): :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". """ + def func(session, sock_info, retryable): return self._finish_transaction(sock_info, command_name) + return self._client._retry_internal(True, func, self, None) def _finish_transaction(self, sock_info, command_name): @@ -799,7 +828,7 @@ def _finish_transaction(self, sock_info, command_name): cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": if opts.max_commit_time_ms: - cmd['maxTimeMS'] = opts.max_commit_time_ms + cmd["maxTimeMS"] = opts.max_commit_time_ms # Transaction spec says that after the initial commit attempt, # subsequent commitTransaction commands should be upgraded to use @@ -811,14 +840,11 @@ def _finish_transaction(self, sock_info, command_name): wc = WriteConcern(**wc_doc) if self._transaction.recovery_token: - cmd['recoveryToken'] = self._transaction.recovery_token + cmd["recoveryToken"] = self._transaction.recovery_token return self._client.admin._command( - sock_info, - cmd, - session=self, - write_concern=wc, - parse_write_concern_error=True) + sock_info, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) def _advance_cluster_time(self, cluster_time): """Internal cluster time helper.""" @@ -837,8 +863,7 @@ def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: another `ClientSession` instance. """ if not isinstance(cluster_time, _Mapping): - raise TypeError( - "cluster_time must be a subclass of collections.Mapping") + raise TypeError("cluster_time must be a subclass of collections.Mapping") if not isinstance(cluster_time.get("clusterTime"), Timestamp): raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) @@ -860,22 +885,21 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance " - "of bson.timestamp.Timestamp") + raise TypeError("operation_time must be an instance " "of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) def _process_response(self, reply): """Process a response to a command that was run with this session.""" - self._advance_cluster_time(reply.get('$clusterTime')) - self._advance_operation_time(reply.get('operationTime')) + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) if self._options.snapshot and self._snapshot_time is None: - if 'cursor' in reply: - ct = reply['cursor'].get('atClusterTime') + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") else: - ct = reply.get('atClusterTime') + ct = reply.get("atClusterTime") self._snapshot_time = ct if self.in_transaction and self._transaction.sharded: - recovery_token = reply.get('recoveryToken') + recovery_token = reply.get("recoveryToken") if recovery_token: self._transaction.recovery_token = recovery_token @@ -894,8 +918,7 @@ def in_transaction(self) -> bool: @property def _starting_transaction(self): - """True if this session is starting a multi-statement transaction. - """ + """True if this session is starting a multi-statement transaction.""" return self._transaction.starting() @property @@ -931,58 +954,56 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): self._update_read_concern(command, sock_info) self._server_session.last_use = time.monotonic() - command['lsid'] = self._server_session.session_id + command["lsid"] = self._server_session.session_id if is_retryable: - command['txnNumber'] = self._server_session.transaction_id + command["txnNumber"] = self._server_session.transaction_id return if self.in_transaction: if read_preference != ReadPreference.PRIMARY: raise InvalidOperation( - 'read preference in a transaction must be primary, not: ' - '%r' % (read_preference,)) + "read preference in a transaction must be primary, not: " + "%r" % (read_preference,) + ) if self._transaction.state == _TxnState.STARTING: # First command begins a new transaction. self._transaction.state = _TxnState.IN_PROGRESS - command['startTransaction'] = True + command["startTransaction"] = True if self._transaction.opts.read_concern: rc = self._transaction.opts.read_concern.document if rc: - command['readConcern'] = rc + command["readConcern"] = rc self._update_read_concern(command, sock_info) - command['txnNumber'] = self._server_session.transaction_id - command['autocommit'] = False + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False def _start_retryable_write(self): self._check_ended() self._server_session.inc_transaction_id() def _update_read_concern(self, cmd, sock_info): - if (self.options.causal_consistency - and self.operation_time is not None): - cmd.setdefault('readConcern', {})[ - 'afterClusterTime'] = self.operation_time + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time if self.options.snapshot: if sock_info.max_wire_version < 13: - raise ConfigurationError( - 'Snapshot reads require MongoDB 5.0 or later') - rc = cmd.setdefault('readConcern', {}) - rc['level'] = 'snapshot' + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" if self._snapshot_time is not None: - rc['atClusterTime'] = self._snapshot_time + rc["atClusterTime"] = self._snapshot_time def __copy__(self): - raise TypeError('A ClientSession cannot be copied, create a new session instead') + raise TypeError("A ClientSession cannot be copied, create a new session instead") class _ServerSession(object): def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. - self.session_id = {'id': Binary(uuid.uuid4().bytes, 4)} + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} self.last_use = time.monotonic() self._transaction_id = 0 self.dirty = False @@ -1016,6 +1037,7 @@ class _ServerSessionPool(collections.deque): This class is not thread-safe, access it while holding the Topology lock. """ + def __init__(self, *args, **kwargs): super(_ServerSessionPool, self).__init__(*args, **kwargs) self.generation = 0 @@ -1056,8 +1078,7 @@ def return_server_session(self, server_session, session_timeout_minutes): def return_server_session_no_lock(self, server_session): # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. - if (server_session.generation == self.generation and - not server_session.dirty): + if server_session.generation == self.generation and not server_session.dirty: self.appendleft(server_session) def _clear_stale(self, session_timeout_minutes): diff --git a/pymongo/collation.py b/pymongo/collation.py index e398264ac2..aef480b932 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -49,10 +49,10 @@ class CollationAlternate(object): :class:`~pymongo.collation.Collation`. """ - NON_IGNORABLE = 'non-ignorable' + NON_IGNORABLE = "non-ignorable" """Spaces and punctuation are treated as base characters.""" - SHIFTED = 'shifted' + SHIFTED = "shifted" """Spaces and punctuation are *not* considered base characters. Spaces and punctuation are distinguished regardless when the @@ -68,10 +68,10 @@ class CollationMaxVariable(object): :class:`~pymongo.collation.Collation`. """ - PUNCT = 'punct' + PUNCT = "punct" """Both punctuation and spaces are ignored.""" - SPACE = 'space' + SPACE = "space" """Spaces alone are ignored.""" @@ -81,13 +81,13 @@ class CollationCaseFirst(object): :class:`~pymongo.collation.Collation`. """ - UPPER = 'upper' + UPPER = "upper" """Sort uppercase characters first.""" - LOWER = 'lower' + LOWER = "lower" """Sort lowercase characters first.""" - OFF = 'off' + OFF = "off" """Default for locale or collation strength.""" @@ -152,42 +152,41 @@ class Collation(object): __slots__ = ("__document",) - def __init__(self, locale: str, - caseLevel: Optional[bool] = None, - caseFirst: Optional[str] = None, - strength: Optional[int] = None, - numericOrdering: Optional[bool] = None, - alternate: Optional[str] = None, - maxVariable: Optional[str] = None, - normalization: Optional[bool] = None, - backwards: Optional[bool] = None, - **kwargs: Any) -> None: - locale = common.validate_string('locale', locale) - self.__document: Dict[str, Any] = {'locale': locale} + def __init__( + self, + locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any + ) -> None: + locale = common.validate_string("locale", locale) + self.__document: Dict[str, Any] = {"locale": locale} if caseLevel is not None: - self.__document['caseLevel'] = common.validate_boolean( - 'caseLevel', caseLevel) + self.__document["caseLevel"] = common.validate_boolean("caseLevel", caseLevel) if caseFirst is not None: - self.__document['caseFirst'] = common.validate_string( - 'caseFirst', caseFirst) + self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) if strength is not None: - self.__document['strength'] = common.validate_integer( - 'strength', strength) + self.__document["strength"] = common.validate_integer("strength", strength) if numericOrdering is not None: - self.__document['numericOrdering'] = common.validate_boolean( - 'numericOrdering', numericOrdering) + self.__document["numericOrdering"] = common.validate_boolean( + "numericOrdering", numericOrdering + ) if alternate is not None: - self.__document['alternate'] = common.validate_string( - 'alternate', alternate) + self.__document["alternate"] = common.validate_string("alternate", alternate) if maxVariable is not None: - self.__document['maxVariable'] = common.validate_string( - 'maxVariable', maxVariable) + self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) if normalization is not None: - self.__document['normalization'] = common.validate_boolean( - 'normalization', normalization) + self.__document["normalization"] = common.validate_boolean( + "normalization", normalization + ) if backwards is not None: - self.__document['backwards'] = common.validate_boolean( - 'backwards', backwards) + self.__document["backwards"] = common.validate_boolean("backwards", backwards) self.__document.update(kwargs) @property @@ -202,8 +201,7 @@ def document(self) -> Dict[str, Any]: def __repr__(self): document = self.document - return 'Collation(%s)' % ( - ', '.join('%s=%r' % (key, document[key]) for key in document),) + return "Collation(%s)" % (", ".join("%s=%r" % (key, document[key]) for key in document),) def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): @@ -214,13 +212,13 @@ def __ne__(self, other: Any) -> bool: return not self == other -def validate_collation_or_none(value: Optional[Union[Mapping[str, Any], Collation]]) -> Optional[Dict[str, Any]]: +def validate_collation_or_none( + value: Optional[Union[Mapping[str, Any], Collation]] +) -> Optional[Dict[str, Any]]: if value is None: return None if isinstance(value, Collation): return value.document if isinstance(value, dict): return value - raise TypeError( - 'collation must be a dict, an instance of collation.Collation, ' - 'or None.') + raise TypeError("collation must be a dict, an instance of collation.Collation, " "or None.") diff --git a/pymongo/collection.py b/pymongo/collection.py index aa2d148fbe..b17bb61f34 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -15,8 +15,19 @@ """Collection level utilities for Mongo.""" from collections import abc -from typing import (TYPE_CHECKING, Any, Generic, Iterable, List, Mapping, - MutableMapping, Optional, Sequence, Tuple, Union) +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Union, +) from bson.code import Code from bson.codec_options import CodecOptions @@ -25,26 +36,44 @@ from bson.son import SON from bson.timestamp import Timestamp from pymongo import common, helpers, message -from pymongo.aggregation import (_CollectionAggregationCommand, - _CollectionRawAggregationCommand) +from pymongo.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) from pymongo.bulk import _Bulk from pymongo.change_stream import CollectionChangeStream from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import (ConfigurationError, InvalidName, InvalidOperation, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) from pymongo.helpers import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, InsertOne, - ReplaceOne, UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.results import (BulkWriteResult, DeleteResult, InsertManyResult, - InsertOneResult, UpdateResult) +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline from pymongo.write_concern import WriteConcern -_FIND_AND_MODIFY_DOC_FIELDS = {'value': 1} +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} _WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] @@ -58,6 +87,7 @@ class ReturnDocument(object): :meth:`~pymongo.collection.Collection.find_one_and_replace` and :meth:`~pymongo.collection.Collection.find_one_and_update`. """ + BEFORE = False """Return the original document before it was updated/replaced, or ``None`` if no document matches the query. @@ -73,8 +103,7 @@ class ReturnDocument(object): class Collection(common.BaseObject, Generic[_DocumentType]): - """A Mongo collection. - """ + """A Mongo collection.""" def __init__( self, @@ -166,24 +195,21 @@ def __init__( codec_options or database.codec_options, read_preference or database.read_preference, write_concern or database.write_concern, - read_concern or database.read_concern) + read_concern or database.read_concern, + ) if not isinstance(name, str): raise TypeError("name must be an instance of str") if not name or ".." in name: raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or - name.startswith("$cmd")): - raise InvalidName("collection names must not " - "contain '$': %r" % name) + if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): + raise InvalidName("collection names must not " "contain '$': %r" % name) if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " - "or end with '.': %r" % name) + raise InvalidName("collection names must not start " "or end with '.': %r" % name) if "\x00" in name: - raise InvalidName("collection names must not contain the " - "null character") - collation = validate_collation_or_none(kwargs.pop('collation', None)) + raise InvalidName("collection names must not contain the " "null character") + collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__database: Database[_DocumentType] = database self.__name = name @@ -192,25 +218,30 @@ def __init__( self.__create(kwargs, collation, session) self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler='replace', - document_class=dict) + unicode_decode_error_handler="replace", document_class=dict + ) def _socket_for_reads(self, session): - return self.__database.client._socket_for_reads( - self._read_preference_for(session), session) + return self.__database.client._socket_for_reads(self._read_preference_for(session), session) def _socket_for_writes(self, session): return self.__database.client._socket_for_writes(session) - def _command(self, sock_info, command, - read_preference=None, - codec_options=None, check=True, allowable_errors=None, - read_concern=None, - write_concern=None, - collation=None, - session=None, - retryable_write=False, - user_fields=None): + def _command( + self, + sock_info, + command, + read_preference=None, + codec_options=None, + check=True, + allowable_errors=None, + read_concern=None, + write_concern=None, + collation=None, + session=None, + retryable_write=False, + user_fields=None, + ): """Internal command helper. :Parameters: @@ -252,11 +283,11 @@ def _command(self, sock_info, command, session=s, client=self.__database.client, retryable_write=retryable_write, - user_fields=user_fields) + user_fields=user_fields, + ) def __create(self, options, collation, session): - """Sends a create command with the given options. - """ + """Sends a create command with the given options.""" cmd = SON([("create", self.__name)]) if options: if "size" in options: @@ -264,9 +295,13 @@ def __create(self, options, collation, session): cmd.update(options) with self._socket_for_writes(session) as sock_info: self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), - collation=collation, session=session) + collation=collation, + session=session, + ) def __getattr__(self, name: str) -> "Collection[_DocumentType]": """Get a sub-collection of this collection by name. @@ -276,30 +311,31 @@ def __getattr__(self, name: str) -> "Collection[_DocumentType]": :Parameters: - `name`: the name of the collection to get """ - if name.startswith('_'): + if name.startswith("_"): full_name = "%s.%s" % (self.__name, name) raise AttributeError( "Collection has no attribute %r. To access the %s" - " collection, use database['%s']." % ( - name, full_name, full_name)) + " collection, use database['%s']." % (name, full_name, full_name) + ) return self.__getitem__(name) def __getitem__(self, name: str) -> "Collection[_DocumentType]": - return Collection(self.__database, - "%s.%s" % (self.__name, name), - False, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern) + return Collection( + self.__database, + "%s.%s" % (self.__name, name), + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) def __repr__(self): return "Collection(%r, %r)" % (self.__database, self.__name) def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): - return (self.__database == other.database and - self.__name == other.name) + return self.__database == other.database and self.__name == other.name return NotImplemented def __ne__(self, other: Any) -> bool: @@ -309,9 +345,11 @@ def __hash__(self) -> int: return hash((self.__database, self.__name)) def __bool__(self) -> bool: - raise NotImplementedError("Collection objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: collection is not None") + raise NotImplementedError( + "Collection objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) @property def full_name(self) -> str: @@ -369,20 +407,22 @@ def with_options( default) the :attr:`read_concern` of this :class:`Collection` is used. """ - return Collection(self.__database, - self.__name, - False, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern) + return Collection( + self.__database, + self.__name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) def bulk_write( self, requests: Sequence[_WriteOp], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None + session: Optional["ClientSession"] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -464,20 +504,17 @@ def bulk_write( return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) - def _insert_one( - self, doc, ordered, write_concern, op_id, bypass_doc_val, session): + def _insert_one(self, doc, ordered, write_concern, op_id, bypass_doc_val, session): """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - command = SON([('insert', self.name), - ('ordered', ordered), - ('documents', [doc])]) + command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document + command["writeConcern"] = write_concern.document def _insert_command(session, sock_info, retryable_write): if bypass_doc_val: - command['bypassDocumentValidation'] = True + command["bypassDocumentValidation"] = True result = sock_info.command( self.__database.name, @@ -486,19 +523,21 @@ def _insert_command(session, sock_info, retryable_write): codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write) + retryable_write=retryable_write, + ) _check_write_command_response(result) - self.__database.client._retryable_write( - acknowledged, _insert_command, session) + self.__database.client._retryable_write(acknowledged, _insert_command, session) if not isinstance(doc, RawBSONDocument): - return doc.get('_id') + return doc.get("_id") - def insert_one(self, document: _DocumentIn, + def insert_one( + self, + document: _DocumentIn, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None + session: Optional["ClientSession"] = None, ) -> InsertOneResult: """Insert a single document. @@ -543,16 +582,22 @@ def insert_one(self, document: _DocumentIn, write_concern = self._write_concern_for(session) return InsertOneResult( self._insert_one( - document, ordered=True, - write_concern=write_concern, op_id=None, - bypass_doc_val=bypass_document_validation, session=session), - write_concern.acknowledged) + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + ), + write_concern.acknowledged, + ) - def insert_many(self, + def insert_many( + self, documents: Iterable[_DocumentIn], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None + session: Optional["ClientSession"] = None, ) -> InsertManyResult: """Insert an iterable of documents. @@ -593,11 +638,14 @@ def insert_many(self, .. versionadded:: 3.0 """ - if (not isinstance(documents, abc.Iterable) - or isinstance(documents, abc.Mapping) - or not documents): + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): raise TypeError("documents must be a non-empty list") inserted_ids: List[ObjectId] = [] + def gen(): """A generator that validates documents and handles _ids.""" for document in documents: @@ -614,51 +662,59 @@ def gen(): blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) - def _update(self, sock_info, criteria, document, upsert=False, - multi=False, write_concern=None, op_id=None, ordered=True, - bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None, retryable_write=False, let=None): + def _update( + self, + sock_info, + criteria, + document, + upsert=False, + multi=False, + write_concern=None, + op_id=None, + ordered=True, + bypass_doc_val=False, + collation=None, + array_filters=None, + hint=None, + session=None, + retryable_write=False, + let=None, + ): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) collation = validate_collation_or_none(collation) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - update_doc = SON([('q', criteria), - ('u', document), - ('multi', multi), - ('upsert', upsert)]) + update_doc = SON([("q", criteria), ("u", document), ("multi", multi), ("upsert", upsert)]) if collation is not None: if not acknowledged: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") else: - update_doc['collation'] = collation + update_doc["collation"] = collation if array_filters is not None: if not acknowledged: - raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged writes.') + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") else: - update_doc['arrayFilters'] = array_filters + update_doc["arrayFilters"] = array_filters if hint is not None: if not acknowledged and sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands.') + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) if not isinstance(hint, str): hint = helpers._index_document(hint) - update_doc['hint'] = hint + update_doc["hint"] = hint - command = SON([('update', self.name), - ('ordered', ordered), - ('updates', [update_doc])]) + command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) if let: common.validate_is_mapping("let", let) command["let"] = let if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document + command["writeConcern"] = write_concern.document # Update command. if bypass_doc_val: - command['bypassDocumentValidation'] = True + command["bypassDocumentValidation"] = True # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. @@ -669,41 +725,66 @@ def _update(self, sock_info, criteria, document, upsert=False, codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write).copy() + retryable_write=retryable_write, + ).copy() _check_write_command_response(result) # Add the updatedExisting field for compatibility. - if result.get('n') and 'upserted' not in result: - result['updatedExisting'] = True + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True else: - result['updatedExisting'] = False + result["updatedExisting"] = False # MongoDB >= 2.6.0 returns the upsert _id in an array # element. Break it out for backward compatibility. - if 'upserted' in result: - result['upserted'] = result['upserted'][0]['_id'] + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] if not acknowledged: return None return result def _update_retryable( - self, criteria, document, upsert=False, multi=False, - write_concern=None, op_id=None, ordered=True, - bypass_doc_val=False, collation=None, array_filters=None, - hint=None, session=None, let=None): + self, + criteria, + document, + upsert=False, + multi=False, + write_concern=None, + op_id=None, + ordered=True, + bypass_doc_val=False, + collation=None, + array_filters=None, + hint=None, + session=None, + let=None, + ): """Internal update / replace helper.""" + def _update(session, sock_info, retryable_write): return self._update( - sock_info, criteria, document, upsert=upsert, multi=multi, - write_concern=write_concern, op_id=op_id, ordered=ordered, - bypass_doc_val=bypass_doc_val, collation=collation, - array_filters=array_filters, hint=hint, session=session, - retryable_write=retryable_write, let=let) + sock_info, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _update, session) + (write_concern or self.write_concern).acknowledged and not multi, _update, session + ) - def replace_one(self, + def replace_one( + self, filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, @@ -711,7 +792,7 @@ def replace_one(self, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> UpdateResult: """Replace a single document matching the filter. @@ -788,13 +869,21 @@ def replace_one(self, write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, replacement, upsert, + filter, + replacement, + upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) - def update_one(self, + def update_one( + self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, @@ -803,7 +892,7 @@ def update_one(self, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> UpdateResult: """Update a single document matching the filter. @@ -870,19 +959,27 @@ def update_one(self, """ common.validate_is_mapping("filter", filter) common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) + common.validate_list_or_none("array_filters", array_filters) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, + filter, + update, + upsert, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, array_filters=array_filters, - hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) - def update_many(self, + def update_many( + self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, @@ -891,7 +988,7 @@ def update_many(self, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> UpdateResult: """Update one or more documents that match the filter. @@ -958,17 +1055,25 @@ def update_many(self, """ common.validate_is_mapping("filter", filter) common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) + common.validate_list_or_none("array_filters", array_filters) write_concern = self._write_concern_for(session) return UpdateResult( self._update_retryable( - filter, update, upsert, multi=True, + filter, + update, + upsert, + multi=True, write_concern=write_concern, bypass_doc_val=bypass_document_validation, - collation=collation, array_filters=array_filters, - hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) def drop(self, session: Optional["ClientSession"] = None) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. @@ -993,39 +1098,46 @@ def drop(self, session: Optional["ClientSession"] = None) -> None: self.codec_options, self.read_preference, self.write_concern, - self.read_concern) + self.read_concern, + ) dbo.drop_collection(self.__name, session=session) def _delete( - self, sock_info, criteria, multi, - write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None, retryable_write=False, - let=None): + self, + sock_info, + criteria, + multi, + write_concern=None, + op_id=None, + ordered=True, + collation=None, + hint=None, + session=None, + retryable_write=False, + let=None, + ): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged - delete_doc = SON([('q', criteria), - ('limit', int(not multi))]) + delete_doc = SON([("q", criteria), ("limit", int(not multi))]) collation = validate_collation_or_none(collation) if collation is not None: if not acknowledged: - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") else: - delete_doc['collation'] = collation + delete_doc["collation"] = collation if hint is not None: if not acknowledged and sock_info.max_wire_version < 9: raise ConfigurationError( - 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands.') + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) if not isinstance(hint, str): hint = helpers._index_document(hint) - delete_doc['hint'] = hint - command = SON([('delete', self.name), - ('ordered', ordered), - ('deletes', [delete_doc])]) + delete_doc["hint"] = hint + command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) if not write_concern.is_server_default: - command['writeConcern'] = write_concern.document + command["writeConcern"] = write_concern.document if let: common.validate_is_document_type("let", let) @@ -1039,32 +1151,51 @@ def _delete( codec_options=self.__write_response_codec_options, session=session, client=self.__database.client, - retryable_write=retryable_write) + retryable_write=retryable_write, + ) _check_write_command_response(result) return result def _delete_retryable( - self, criteria, multi, - write_concern=None, op_id=None, ordered=True, - collation=None, hint=None, session=None, let=None): + self, + criteria, + multi, + write_concern=None, + op_id=None, + ordered=True, + collation=None, + hint=None, + session=None, + let=None, + ): """Internal delete helper.""" + def _delete(session, sock_info, retryable_write): return self._delete( - sock_info, criteria, multi, - write_concern=write_concern, op_id=op_id, ordered=ordered, - collation=collation, hint=hint, session=session, - retryable_write=retryable_write, let=let) + sock_info, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + ) return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _delete, session) + (write_concern or self.write_concern).acknowledged and not multi, _delete, session + ) - def delete_one(self, + def delete_one( + self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> DeleteResult: """Delete a single document matching the filter. @@ -1109,17 +1240,24 @@ def delete_one(self, write_concern = self._write_concern_for(session) return DeleteResult( self._delete_retryable( - filter, False, + filter, + False, write_concern=write_concern, - collation=collation, hint=hint, session=session, let=let), - write_concern.acknowledged) + collation=collation, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) - def delete_many(self, + def delete_many( + self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None + let: Optional[Mapping[str, Any]] = None, ) -> DeleteResult: """Delete one or more documents matching the filter. @@ -1164,12 +1302,20 @@ def delete_many(self, write_concern = self._write_concern_for(session) return DeleteResult( self._delete_retryable( - filter, True, + filter, + True, write_concern=write_concern, - collation=collation, hint=hint, session=session, let=let), - write_concern.acknowledged) - - def find_one(self, filter: Optional[Any] = None, *args: Any, **kwargs: Any) -> Optional[_DocumentType]: + collation=collation, + hint=hint, + session=session, + let=let, + ), + write_concern.acknowledged, + ) + + def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: """Get a single document from the database. All arguments to :meth:`find` are also valid arguments for @@ -1194,8 +1340,7 @@ def find_one(self, filter: Optional[Any] = None, *args: Any, **kwargs: Any) -> O >>> collection.find_one(max_time_ms=100) """ - if (filter is not None and not - isinstance(filter, abc.Mapping)): + if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} cursor = self.find(filter, *args, **kwargs) @@ -1420,8 +1565,7 @@ def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_Documen """ # OP_MSG is required to support encryption. if self.__database.client._encrypter: - raise InvalidOperation( - "find_raw_batches does not support auto encryption") + raise InvalidOperation("find_raw_batches does not support auto encryption") return RawBatchCursor(self, *args, **kwargs) @@ -1437,13 +1581,13 @@ def _count_cmd(self, session, sock_info, read_preference, cmd, collation): codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, - session=session) + session=session, + ) if res.get("errmsg", "") == "ns missing": return 0 return int(res["n"]) - def _aggregate_one_result( - self, sock_info, read_preference, cmd, collation, session): + def _aggregate_one_result(self, sock_info, read_preference, cmd, collation, session): """Internal helper to run an aggregate that returns a single result.""" result = self._command( sock_info, @@ -1453,11 +1597,12 @@ def _aggregate_one_result( codec_options=self.__write_response_codec_options, read_concern=self.read_concern, collation=collation, - session=session) + session=session, + ) # cursor will not be present for NamespaceNotFound errors. - if 'cursor' not in result: + if "cursor" not in result: return None - batch = result['cursor']['firstBatch'] + batch = result["cursor"]["firstBatch"] return batch[0] if batch else None def estimated_document_count(self, **kwargs: Any) -> int: @@ -1478,38 +1623,35 @@ def estimated_document_count(self, **kwargs: Any) -> int: .. versionadded:: 3.7 """ - if 'session' in kwargs: - raise ConfigurationError( - 'estimated_document_count does not support sessions') + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") def _cmd(session, server, sock_info, read_preference): if sock_info.max_wire_version >= 12: # MongoDB 4.9+ pipeline = [ - {'$collStats': {'count': {}}}, - {'$group': {'_id': 1, 'n': {'$sum': '$count'}}}, + {"$collStats": {"count": {}}}, + {"$group": {"_id": 1, "n": {"$sum": "$count"}}}, ] - cmd = SON([('aggregate', self.__name), - ('pipeline', pipeline), - ('cursor', {})]) + cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) cmd.update(kwargs) result = self._aggregate_one_result( - sock_info, read_preference, cmd, collation=None, - session=session) + sock_info, read_preference, cmd, collation=None, session=session + ) if not result: return 0 - return int(result['n']) + return int(result["n"]) else: # MongoDB < 4.9 - cmd = SON([('count', self.__name)]) + cmd = SON([("count", self.__name)]) cmd.update(kwargs) - return self._count_cmd( - None, sock_info, read_preference, cmd, collation=None) + return self._count_cmd(None, sock_info, read_preference, cmd, collation=None) - return self.__database.client._retryable_read( - _cmd, self.read_preference, None) + return self.__database.client._retryable_read(_cmd, self.read_preference, None) - def count_documents(self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any) -> int: + def count_documents( + self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any + ) -> int: """Count the number of documents in this collection. .. note:: For a fast count of the total documents in a collection see @@ -1563,31 +1705,34 @@ def count_documents(self, filter: Mapping[str, Any], session: Optional["ClientSe .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere """ - pipeline = [{'$match': filter}] - if 'skip' in kwargs: - pipeline.append({'$skip': kwargs.pop('skip')}) - if 'limit' in kwargs: - pipeline.append({'$limit': kwargs.pop('limit')}) - pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}}) - cmd = SON([('aggregate', self.__name), - ('pipeline', pipeline), - ('cursor', {})]) + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) def _cmd(session, server, sock_info, read_preference): - result = self._aggregate_one_result( - sock_info, read_preference, cmd, collation, session) + result = self._aggregate_one_result(sock_info, read_preference, cmd, collation, session) if not result: return 0 - return result['n'] + return result["n"] return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) + _cmd, self._read_preference_for(session), session + ) - def create_indexes(self, indexes: Sequence[IndexModel], session: Optional["ClientSession"] = None, **kwargs: Any) -> List[str]: + def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional["ClientSession"] = None, + **kwargs: Any, + ) -> List[str]: """Create one or more indexes on this collection. >>> from pymongo import IndexModel, ASCENDING, DESCENDING @@ -1619,7 +1764,7 @@ def create_indexes(self, indexes: Sequence[IndexModel], session: Optional["Clien .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ """ - common.validate_list('indexes', indexes) + common.validate_list("indexes", indexes) return self.__create_indexes(indexes, session, **kwargs) def __create_indexes(self, indexes, session, **kwargs): @@ -1641,28 +1786,33 @@ def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of " - "pymongo.operations.IndexModel" % (index,)) + "%r is not an instance of " "pymongo.operations.IndexModel" % (index,) + ) document = index.document names.append(document["name"]) yield document - cmd = SON([('createIndexes', self.name), - ('indexes', list(gen_indexes()))]) + cmd = SON([("createIndexes", self.name), ("indexes", list(gen_indexes()))]) cmd.update(kwargs) - if 'commitQuorum' in kwargs and not supports_quorum: + if "commitQuorum" in kwargs and not supports_quorum: raise ConfigurationError( "Must be connected to MongoDB 4.4+ to use the " - "commitQuorum option for createIndexes") + "commitQuorum option for createIndexes" + ) self._command( - sock_info, cmd, read_preference=ReadPreference.PRIMARY, + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, write_concern=self._write_concern_for(session), - session=session) + session=session, + ) return names - def create_index(self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> str: + def create_index( + self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> str: """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. @@ -1791,7 +1941,9 @@ def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) """ self.drop_index("*", session=session, **kwargs) - def drop_index(self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: + def drop_index( + self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> None: """Drops the specified index on this collection. Can be used on non-existant collections or collections with no @@ -1837,14 +1989,18 @@ def drop_index(self, index_or_name: _IndexKeyHint, session: Optional["ClientSess cmd = SON([("dropIndexes", self.__name), ("index", name)]) cmd.update(kwargs) with self._socket_for_writes(session) as sock_info: - self._command(sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - session=session) - - def list_indexes(self, session: Optional["ClientSession"] = None) -> CommandCursor[MutableMapping[str, Any]]: + self._command( + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + def list_indexes( + self, session: Optional["ClientSession"] = None + ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. >>> for index in db.test.list_indexes(): @@ -1865,35 +2021,35 @@ def list_indexes(self, session: Optional["ClientSession"] = None) -> CommandCurs .. versionadded:: 3.0 """ codec_options = CodecOptions(SON) - coll = self.with_options(codec_options=codec_options, - read_preference=ReadPreference.PRIMARY) - read_pref = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + coll = self.with_options( + codec_options=codec_options, read_preference=ReadPreference.PRIMARY + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) with self.__database.client._tmp_session(session, False) as s: try: - cursor = self._command(sock_info, cmd, - read_preference, - codec_options, - session=s)["cursor"] + cursor = self._command( + sock_info, cmd, read_preference, codec_options, session=s + )["cursor"] except OperationFailure as exc: # Ignore NamespaceNotFound errors to match the behavior # of reading from *.system.indexes. if exc.code != 26: raise - cursor = {'id': 0, 'firstBatch': []} + cursor = {"id": 0, "firstBatch": []} cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=s, - explicit_session=session is not None) + coll, cursor, sock_info.address, session=s, explicit_session=session is not None + ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - return self.__database.client._retryable_read( - _cmd, read_pref, session) + return self.__database.client._retryable_read(_cmd, read_pref, session) - def index_information(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: + def index_information( + self, session: Optional["ClientSession"] = None + ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. Returns a dictionary where the keys are index names (as @@ -1947,9 +2103,9 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s self.codec_options, self.read_preference, self.write_concern, - self.read_concern) - cursor = dbo.list_collections( - session=session, filter={"name": self.__name}) + self.read_concern, + ) + cursor = dbo.list_collections(session=session, filter={"name": self.__name}) result = None for doc in cursor: @@ -1966,17 +2122,40 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s return options - def _aggregate(self, aggregation_command, pipeline, cursor_class, session, - explicit_session, let=None, **kwargs): + def _aggregate( + self, + aggregation_command, + pipeline, + cursor_class, + session, + explicit_session, + let=None, + **kwargs, + ): cmd = aggregation_command( - self, cursor_class, pipeline, kwargs, explicit_session, let, - user_fields={'cursor': {'firstBatch': 1}}) + self, + cursor_class, + pipeline, + kwargs, + explicit_session, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) return self.__database.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(session), session, - retryable=not cmd._performs_write) + cmd.get_cursor, + cmd.get_read_preference(session), + session, + retryable=not cmd._performs_write, + ) - def aggregate(self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> CommandCursor[_DocumentType]: + def aggregate( + self, + pipeline: _Pipeline, + session: Optional["ClientSession"] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this collection. @@ -2050,13 +2229,15 @@ def aggregate(self, pipeline: _Pipeline, session: Optional["ClientSession"] = No https://docs.mongodb.com/manual/reference/command/aggregate """ with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate(_CollectionAggregationCommand, - pipeline, - CommandCursor, - session=s, - explicit_session=session is not None, - let=let, - **kwargs) + return self._aggregate( + _CollectionAggregationCommand, + pipeline, + CommandCursor, + session=s, + explicit_session=session is not None, + let=let, + **kwargs, + ) def aggregate_raw_batches( self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any @@ -2086,18 +2267,20 @@ def aggregate_raw_batches( """ # OP_MSG is required to support encryption. if self.__database.client._encrypter: - raise InvalidOperation( - "aggregate_raw_batches does not support auto encryption") + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate(_CollectionRawAggregationCommand, - pipeline, - RawBatchCommandCursor, - session=s, - explicit_session=session is not None, - **kwargs) - - def watch(self, + return self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + explicit_session=session is not None, + **kwargs, + ) + + def watch( + self, pipeline: Optional[_Pipeline] = None, full_document: Optional[str] = None, resume_after: Optional[Mapping[str, Any]] = None, @@ -2203,11 +2386,21 @@ def watch(self, https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return CollectionChangeStream( - self, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) - - def rename(self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any) -> MutableMapping[str, Any]: + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + ) + + def rename( + self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any + ) -> MutableMapping[str, Any]: """Rename this collection. If operating in auth mode, client must be authorized as an @@ -2253,13 +2446,20 @@ def rename(self, new_name: str, session: Optional["ClientSession"] = None, **kwa with self._socket_for_writes(session) as sock_info: with self.__database.client._tmp_session(session) as s: return sock_info.command( - 'admin', cmd, + "admin", + cmd, write_concern=write_concern, parse_write_concern_error=True, - session=s, client=self.__database.client) + session=s, + client=self.__database.client, + ) def distinct( - self, key: str, filter: Optional[Mapping[str, Any]] = None, session: Optional["ClientSession"] = None, **kwargs: Any + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional["ClientSession"] = None, + **kwargs: Any, ) -> List: """Get a list of distinct values for `key` among all documents in this collection. @@ -2296,52 +2496,64 @@ def distinct( """ if not isinstance(key, str): raise TypeError("key must be an instance of str") - cmd = SON([("distinct", self.__name), - ("key", key)]) + cmd = SON([("distinct", self.__name), ("key", key)]) if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) + def _cmd(session, server, sock_info, read_preference): return self._command( - sock_info, cmd, read_preference=read_preference, + sock_info, + cmd, + read_preference=read_preference, read_concern=self.read_concern, - collation=collation, session=session, - user_fields={"values": 1})["values"] + collation=collation, + session=session, + user_fields={"values": 1}, + )["values"] return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session) + _cmd, self._read_preference_for(session), session + ) def _write_concern_for_cmd(self, cmd, session): - raw_wc = cmd.get('writeConcern') + raw_wc = cmd.get("writeConcern") if raw_wc is not None: return WriteConcern(**raw_wc) else: return self._write_concern_for(session) - def __find_and_modify(self, filter, projection, sort, upsert=None, - return_document=ReturnDocument.BEFORE, - array_filters=None, hint=None, session=None, - let=None, **kwargs): + def __find_and_modify( + self, + filter, + projection, + sort, + upsert=None, + return_document=ReturnDocument.BEFORE, + array_filters=None, + hint=None, + session=None, + let=None, + **kwargs, + ): """Internal findAndModify helper.""" common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): - raise ValueError("return_document must be " - "ReturnDocument.BEFORE or ReturnDocument.AFTER") - collation = validate_collation_or_none(kwargs.pop('collation', None)) - cmd = SON([("findAndModify", self.__name), - ("query", filter), - ("new", return_document)]) + raise ValueError( + "return_document must be " "ReturnDocument.BEFORE or ReturnDocument.AFTER" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) if let: common.validate_is_mapping("let", let) cmd["let"] = let cmd.update(kwargs) if projection is not None: - cmd["fields"] = helpers._fields_list_to_dict(projection, - "projection") + cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") if sort is not None: cmd["sort"] = helpers._index_document(sort) if upsert is not None: @@ -2358,33 +2570,41 @@ def _find_and_modify(session, sock_info, retryable_write): if array_filters is not None: if not acknowledged: raise ConfigurationError( - 'arrayFilters is unsupported for unacknowledged ' - 'writes.') + "arrayFilters is unsupported for unacknowledged " "writes." + ) cmd["arrayFilters"] = list(array_filters) if hint is not None: if sock_info.max_wire_version < 8: raise ConfigurationError( - 'Must be connected to MongoDB 4.2+ to use hint on find and modify commands.') - elif (not acknowledged and sock_info.max_wire_version < 9): + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and sock_info.max_wire_version < 9: raise ConfigurationError( - 'Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands.') - cmd['hint'] = hint + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint if not write_concern.is_server_default: - cmd['writeConcern'] = write_concern.document - out = self._command(sock_info, cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=write_concern, - collation=collation, session=session, - retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS) + cmd["writeConcern"] = write_concern.document + out = self._command( + sock_info, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) _check_write_command_response(out) return out.get("value") return self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, session) + write_concern.acknowledged, _find_and_modify, session + ) - def find_one_and_delete(self, + def find_one_and_delete( + self, filter: Mapping[str, Any], projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, sort: Optional[_IndexList] = None, @@ -2463,11 +2683,13 @@ def find_one_and_delete(self, Added the `collation` option. .. versionadded:: 3.0 """ - kwargs['remove'] = True - return self.__find_and_modify(filter, projection, sort, let=let, - hint=hint, session=session, **kwargs) + kwargs["remove"] = True + return self.__find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) - def find_one_and_replace(self, + def find_one_and_replace( + self, filter: Mapping[str, Any], replacement: Mapping[str, Any], projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, @@ -2556,12 +2778,21 @@ def find_one_and_replace(self, .. versionadded:: 3.0 """ common.validate_ok_for_replace(replacement) - kwargs['update'] = replacement - return self.__find_and_modify(filter, projection, - sort, upsert, return_document, let=let, - hint=hint, session=session, **kwargs) + kwargs["update"] = replacement + return self.__find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) - def find_one_and_update(self, + def find_one_and_update( + self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, @@ -2692,12 +2923,20 @@ def find_one_and_update(self, .. versionadded:: 3.0 """ common.validate_ok_for_update(update) - common.validate_list_or_none('array_filters', array_filters) - kwargs['update'] = update - return self.__find_and_modify(filter, projection, - sort, upsert, return_document, - array_filters, hint=hint, let=let, - session=session, **kwargs) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + return self.__find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) def __iter__(self) -> "Collection[_DocumentType]": return self @@ -2708,15 +2947,16 @@ def __next__(self) -> None: next = __next__ def __call__(self, *args: Any, **kwargs: Any) -> None: - """This is only here so that some API misusages are easier to debug. - """ + """This is only here so that some API misusages are easier to debug.""" if "." not in self.__name: - raise TypeError("'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % - self.__name) - raise TypeError("'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % - self.__name.split(".")[-1]) + raise TypeError( + "'Collection' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self.__name + ) + raise TypeError( + "'Collection' object is not callable. If you meant to " + "call the '%s' method on a 'Collection' object it is " + "failing because no such method exists." % self.__name.split(".")[-1] + ) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index b7dbf7a8e7..d7a37766b2 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,13 +15,11 @@ """CommandCursor class to iterate over command results.""" from collections import deque -from typing import (TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, - Tuple) +from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, Tuple from bson import _convert_raw_document_lists_to_streams from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager -from pymongo.errors import (ConnectionFailure, InvalidOperation, - OperationFailure) +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore from pymongo.response import PinnedResponse from pymongo.typings import _DocumentType @@ -33,9 +31,11 @@ class CommandCursor(Generic[_DocumentType]): """A cursor / iterator over command cursors.""" + _getmore_class = _GetMore - def __init__(self, + def __init__( + self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], address: Optional[Tuple[str, Optional[int]]], @@ -47,15 +47,15 @@ def __init__(self, """Create a new command cursor.""" self.__sock_mgr: Any = None self.__collection: Collection[_DocumentType] = collection - self.__id = cursor_info['id'] - self.__data = deque(cursor_info['firstBatch']) - self.__postbatchresumetoken = cursor_info.get('postBatchResumeToken') + self.__id = cursor_info["id"] + self.__data = deque(cursor_info["firstBatch"]) + self.__postbatchresumetoken = cursor_info.get("postBatchResumeToken") self.__address = address self.__batch_size = batch_size self.__max_await_time_ms = max_await_time_ms self.__session = session self.__explicit_session = explicit_session - self.__killed = (self.__id == 0) + self.__killed = self.__id == 0 if self.__killed: self.__end_session(True) @@ -66,22 +66,19 @@ def __init__(self, self.batch_size(batch_size) - if (not isinstance(max_await_time_ms, int) - and max_await_time_ms is not None): + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") def __del__(self) -> None: self.__die() def __die(self, synchronous=False): - """Closes this cursor. - """ + """Closes this cursor.""" already_killed = self.__killed self.__killed = True if self.__id and not already_killed: cursor_id = self.__id - address = _CursorAddress( - self.__address, self.__ns) + address = _CursorAddress(self.__address, self.__ns) else: # Skip killCursors. cursor_id = 0 @@ -92,7 +89,8 @@ def __die(self, synchronous=False): address, self.__sock_mgr, self.__session, - self.__explicit_session) + self.__explicit_session, + ) if not self.__explicit_session: self.__session = None self.__sock_mgr = None @@ -103,8 +101,7 @@ def __end_session(self, synchronous): self.__session = None def close(self) -> None: - """Explicitly close / kill this cursor. - """ + """Explicitly close / kill this cursor.""" self.__die(True) def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": @@ -157,12 +154,12 @@ def _maybe_pin_connection(self, sock_info): self.__sock_mgr = sock_mgr def __send_message(self, operation): - """Send a getmore message and handle the response. - """ + """Send a getmore message and handle the response.""" client = self.__collection.database.client try: response = client._run_operation( - operation, self._unpack_response, address=self.__address) + operation, self._unpack_response, address=self.__address + ) except OperationFailure as exc: if exc.code in _CURSOR_CLOSED_ERRORS: # Don't send killCursors because the cursor is already closed. @@ -182,13 +179,12 @@ def __send_message(self, operation): if isinstance(response, PinnedResponse): if not self.__sock_mgr: - self.__sock_mgr = _SocketManager(response.socket_info, - response.more_to_come) + self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come) if response.from_command: - cursor = response.docs[0]['cursor'] - documents = cursor['nextBatch'] - self.__postbatchresumetoken = cursor.get('postBatchResumeToken') - self.__id = cursor['id'] + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self.__postbatchresumetoken = cursor.get("postBatchResumeToken") + self.__id = cursor["id"] else: documents = response.docs self.__id = response.data.cursor_id @@ -197,10 +193,10 @@ def __send_message(self, operation): self.close() self.__data = deque(documents) - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.unpack_response(cursor_id, codec_options, user_fields, - legacy_response) + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) def _refresh(self): """Refreshes the cursor with more data from the server. @@ -213,19 +209,23 @@ def _refresh(self): return len(self.__data) if self.__id: # Get More - dbname, collname = self.__ns.split('.', 1) + dbname, collname = self.__ns.split(".", 1) read_pref = self.__collection._read_preference_for(self.session) self.__send_message( - self._getmore_class(dbname, - collname, - self.__batch_size, - self.__id, - self.__collection.codec_options, - read_pref, - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, False)) + self._getmore_class( + dbname, + collname, + self.__batch_size, + self.__id, + self.__collection.codec_options, + read_pref, + self.__session, + self.__collection.database.client, + self.__max_await_time_ms, + self.__sock_mgr, + False, + ) + ) else: # Cursor id is zero nothing else to return self.__die(True) @@ -305,7 +305,8 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): _getmore_class = _RawBatchGetMore - def __init__(self, + def __init__( + self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], address: Optional[Tuple[str, Optional[int]]], @@ -322,15 +323,21 @@ def __init__(self, .. seealso:: The MongoDB documentation on `cursors `_. """ - assert not cursor_info.get('firstBatch') + assert not cursor_info.get("firstBatch") super(RawBatchCommandCursor, self).__init__( - collection, cursor_info, address, batch_size, - max_await_time_ms, session, explicit_session) - - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - raw_response = response.raw_response( - cursor_id, user_fields=user_fields) + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + explicit_session, + ) + + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + raw_response = response.raw_response(cursor_id, user_fields=user_fields) if not legacy_response: # OP_MSG returns firstBatch/nextBatch documents as a BSON array # Re-assemble the array of documents into a document stream diff --git a/pymongo/common.py b/pymongo/common.py index fa2fe9bf11..769b277cf3 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -18,8 +18,20 @@ import datetime import warnings from collections import OrderedDict, abc -from typing import (Any, Callable, Dict, List, Mapping, MutableMapping, - Optional, Sequence, Tuple, Type, Union, cast) +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) from urllib.parse import unquote_plus from bson import SON @@ -27,8 +39,10 @@ from bson.codec_options import CodecOptions, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS -from pymongo.compression_support import (validate_compressors, - validate_zlib_compression_level) +from pymongo.compression_support import ( + validate_compressors, + validate_zlib_compression_level, +) from pymongo.driver_info import DriverInfo from pymongo.errors import ConfigurationError from pymongo.monitoring import _validate_event_listeners @@ -40,7 +54,7 @@ ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. -MAX_BSON_SIZE = 16 * (1024 ** 2) +MAX_BSON_SIZE = 16 * (1024**2) MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 @@ -121,10 +135,10 @@ def partition_node(node: str) -> Tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 - idx = node.rfind(':') + idx = node.rfind(":") if idx != -1: - host, port = node[:idx], int(node[idx + 1:]) - if host.startswith('['): + host, port = node[:idx], int(node[idx + 1 :]) + if host.startswith("["): host = host[1:-1] return host, port @@ -147,11 +161,11 @@ def raise_config_error(key: str, dummy: Any) -> None: # Mapping of URI uuid representation options to valid subtypes. _UUID_REPRESENTATIONS = { - 'unspecified': UuidRepresentation.UNSPECIFIED, - 'standard': UuidRepresentation.STANDARD, - 'pythonLegacy': UuidRepresentation.PYTHON_LEGACY, - 'javaLegacy': UuidRepresentation.JAVA_LEGACY, - 'csharpLegacy': UuidRepresentation.CSHARP_LEGACY + "unspecified": UuidRepresentation.UNSPECIFIED, + "standard": UuidRepresentation.STANDARD, + "pythonLegacy": UuidRepresentation.PYTHON_LEGACY, + "javaLegacy": UuidRepresentation.JAVA_LEGACY, + "csharpLegacy": UuidRepresentation.CSHARP_LEGACY, } @@ -165,95 +179,81 @@ def validate_boolean(option: str, value: Any) -> bool: def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): - if value not in ('true', 'false'): - raise ValueError("The value of %s must be " - "'true' or 'false'" % (option,)) - return value == 'true' + if value not in ("true", "false"): + raise ValueError("The value of %s must be " "'true' or 'false'" % (option,)) + return value == "true" return validate_boolean(option, value) def validate_integer(option: str, value: Any) -> int: - """Validates that 'value' is an integer (or basestring representation). - """ + """Validates that 'value' is an integer (or basestring representation).""" if isinstance(value, int): return value elif isinstance(value, str): try: return int(value) except ValueError: - raise ValueError("The value of %s must be " - "an integer" % (option,)) + raise ValueError("The value of %s must be " "an integer" % (option,)) raise TypeError("Wrong type for %s, value must be an integer" % (option,)) def validate_positive_integer(option: str, value: Any) -> int: - """Validate that 'value' is a positive integer, which does not include 0. - """ + """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be " - "a positive integer" % (option,)) + raise ValueError("The value of %s must be " "a positive integer" % (option,)) return val def validate_non_negative_integer(option: str, value: Any) -> int: - """Validate that 'value' is a positive integer or 0. - """ + """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be " - "a non negative integer" % (option,)) + raise ValueError("The value of %s must be " "a non negative integer" % (option,)) return val def validate_readable(option: str, value: Any) -> Optional[str]: - """Validates that 'value' is file-like and readable. - """ + """Validates that 'value' is file-like and readable.""" if value is None: return value # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) - open(value, 'r').close() + open(value, "r").close() return value def validate_positive_integer_or_none(option: str, value: Any) -> Optional[int]: - """Validate that 'value' is a positive integer or None. - """ + """Validate that 'value' is a positive integer or None.""" if value is None: return value return validate_positive_integer(option, value) def validate_non_negative_integer_or_none(option: str, value: Any) -> Optional[int]: - """Validate that 'value' is a positive integer or 0 or None. - """ + """Validate that 'value' is a positive integer or 0 or None.""" if value is None: return value return validate_non_negative_integer(option, value) def validate_string(option: str, value: Any) -> str: - """Validates that 'value' is an instance of `str`. - """ + """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of " - "str" % (option,)) + raise TypeError("Wrong type for %s, value must be an instance of " "str" % (option,)) def validate_string_or_none(option: str, value: Any) -> Optional[str]: - """Validates that 'value' is an instance of `basestring` or `None`. - """ + """Validates that 'value' is an instance of `basestring` or `None`.""" if value is None: return value return validate_string(option, value) def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: - """Validates that 'value' is an integer or string. - """ + """Validates that 'value' is an integer or string.""" if isinstance(value, int): return value elif isinstance(value, str): @@ -261,13 +261,11 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an " - "integer or a string" % (option,)) + raise TypeError("Wrong type for %s, value must be an " "integer or a string" % (option,)) def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: - """Validates that 'value' is an integer or string. - """ + """Validates that 'value' is an integer or string.""" if isinstance(value, int): return value elif isinstance(value, str): @@ -276,13 +274,14 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in except ValueError: return value return validate_non_negative_integer(option, val) - raise TypeError("Wrong type for %s, value must be an " - "non negative integer or a string" % (option,)) + raise TypeError( + "Wrong type for %s, value must be an " "non negative integer or a string" % (option,) + ) def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is - positive. + positive. """ errmsg = "%s must be an integer or float" % (option,) try: @@ -295,8 +294,7 @@ def validate_positive_float(option: str, value: Any) -> float: # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and " - "less than one billion" % (option,)) + raise ValueError("%s must be greater than 0 and " "less than one billion" % (option,)) return value @@ -325,7 +323,7 @@ def validate_timeout_or_zero(option: str, value: Any) -> float: config error. """ if value is None: - raise ConfigurationError("%s cannot be None" % (option, )) + raise ConfigurationError("%s cannot be None" % (option,)) if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 @@ -350,8 +348,7 @@ def validate_max_staleness(option: str, value: Any) -> int: def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: - """Validate a read preference. - """ + """Validate a read preference.""" if not isinstance(value, _ServerMode): raise TypeError("%r is not a read preference." % (value,)) return value @@ -370,33 +367,32 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: def validate_auth_mechanism(option: str, value: Any) -> str: - """Validate the authMechanism URI option. - """ + """Validate the authMechanism URI option.""" if value not in MECHANISMS: raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) return value def validate_uuid_representation(dummy: Any, value: Any) -> int: - """Validate the uuid representation option selected in the URI. - """ + """Validate the uuid representation option selected in the URI.""" try: return _UUID_REPRESENTATIONS[value] except KeyError: - raise ValueError("%s is an invalid UUID representation. " - "Must be one of " - "%s" % (value, tuple(_UUID_REPRESENTATIONS))) + raise ValueError( + "%s is an invalid UUID representation. " + "Must be one of " + "%s" % (value, tuple(_UUID_REPRESENTATIONS)) + ) def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]]: - """Parse readPreferenceTags if passed as a client kwarg. - """ + """Parse readPreferenceTags if passed as a client kwarg.""" if not isinstance(value, list): value = [value] tag_sets: List = [] for tag_set in value: - if tag_set == '': + if tag_set == "": tag_sets.append({}) continue try: @@ -406,37 +402,41 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid " - "value for %s" % (tag_set, name)) + raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) return tag_sets -_MECHANISM_PROPS = frozenset(['SERVICE_NAME', - 'CANONICALIZE_HOST_NAME', - 'SERVICE_REALM', - 'AWS_SESSION_TOKEN']) +_MECHANISM_PROPS = frozenset( + ["SERVICE_NAME", "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN"] +) def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" value = validate_string(option, value) props: Dict[str, Any] = {} - for opt in value.split(','): + for opt in value.split(","): try: - key, val = opt.split(':') + key, val = opt.split(":") except ValueError: # Try not to leak the token. - if 'AWS_SESSION_TOKEN' in opt: - opt = ('AWS_SESSION_TOKEN:, did you forget ' - 'to percent-escape the token with quote_plus?') - raise ValueError("auth mechanism properties must be " - "key:value pairs like SERVICE_NAME:" - "mongodb, not %s." % (opt,)) + if "AWS_SESSION_TOKEN" in opt: + opt = ( + "AWS_SESSION_TOKEN:, did you forget " + "to percent-escape the token with quote_plus?" + ) + raise ValueError( + "auth mechanism properties must be " + "key:value pairs like SERVICE_NAME:" + "mongodb, not %s." % (opt,) + ) if key not in _MECHANISM_PROPS: - raise ValueError("%s is not a supported auth " - "mechanism property. Must be one of " - "%s." % (key, tuple(_MECHANISM_PROPS))) - if key == 'CANONICALIZE_HOST_NAME': + raise ValueError( + "%s is not a supported auth " + "mechanism property. Must be one of " + "%s." % (key, tuple(_MECHANISM_PROPS)) + ) + if key == "CANONICALIZE_HOST_NAME": props[key] = validate_boolean_or_string(key, val) else: props[key] = unquote_plus(val) @@ -444,20 +444,23 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Uni return props -def validate_document_class(option: str, value: Any) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: +def validate_document_class( + option: str, value: Any +) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): - raise TypeError("%s must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping" % (option,)) + raise TypeError( + "%s must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "sublass of collections.MutableMapping" % (option,) + ) return value def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): - raise TypeError("%s must be an instance of %s" % ( - option, TypeRegistry)) + raise TypeError("%s must be an instance of %s" % (option, TypeRegistry)) return value @@ -478,26 +481,32 @@ def validate_list_or_none(option: Any, value: Any) -> Optional[List]: def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): - raise TypeError("%s must either be a list or an instance of dict, " - "bson.son.SON, or any other type that inherits from " - "collections.Mapping" % (option,)) + raise TypeError( + "%s must either be a list or an instance of dict, " + "bson.son.SON, or any other type that inherits from " + "collections.Mapping" % (option,) + ) def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): - raise TypeError("%s must be an instance of dict, bson.son.SON, or " - "any other type that inherits from " - "collections.Mapping" % (option,)) + raise TypeError( + "%s must be an instance of dict, bson.son.SON, or " + "any other type that inherits from " + "collections.Mapping" % (option,) + ) def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): - raise TypeError("%s must be an instance of dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or " - "a type that inherits from " - "collections.MutableMapping" % (option,)) + raise TypeError( + "%s must be an instance of dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or " + "a type that inherits from " + "collections.MutableMapping" % (option,) + ) def validate_appname_or_none(option: str, value: Any) -> Optional[str]: @@ -506,7 +515,7 @@ def validate_appname_or_none(option: str, value: Any) -> Optional[str]: return value validate_string(option, value) # We need length in bytes, so encode utf8 first. - if len(value.encode('utf-8')) > 128: + if len(value.encode("utf-8")) > 128: raise ValueError("%s must be <= 128 bytes" % (option,)) return value @@ -544,8 +553,8 @@ def validate_ok_for_replace(replacement: Mapping[str, Any]) -> None: # Replacement can be {} if replacement and not isinstance(replacement, RawBSONDocument): first = next(iter(replacement)) - if first.startswith('$'): - raise ValueError('replacement can not include $ operators') + if first.startswith("$"): + raise ValueError("replacement can not include $ operators") def validate_ok_for_update(update: Any) -> None: @@ -553,30 +562,30 @@ def validate_ok_for_update(update: Any) -> None: validate_list_or_mapping("update", update) # Update cannot be {}. if not update: - raise ValueError('update cannot be empty') + raise ValueError("update cannot be empty") is_document = not isinstance(update, list) first = next(iter(update)) - if is_document and not first.startswith('$'): - raise ValueError('update only works with $ operators') + if is_document and not first.startswith("$"): + raise ValueError("update only works with $ operators") -_UNICODE_DECODE_ERROR_HANDLERS = frozenset(['strict', 'replace', 'ignore']) +_UNICODE_DECODE_ERROR_HANDLERS = frozenset(["strict", "replace", "ignore"]) def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: - """Validate the Unicode decode error handler option of CodecOptions. - """ + """Validate the Unicode decode error handler option of CodecOptions.""" if value not in _UNICODE_DECODE_ERROR_HANDLERS: - raise ValueError("%s is an invalid Unicode decode error handler. " - "Must be one of " - "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS))) + raise ValueError( + "%s is an invalid Unicode decode error handler. " + "Must be one of " + "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) + ) return value def validate_tzinfo(dummy: Any, value: Any) -> Optional[datetime.tzinfo]: - """Validate the tzinfo option - """ + """Validate the tzinfo option""" if value is not None and not isinstance(value, datetime.tzinfo): raise TypeError("%s must be an instance of datetime.tzinfo" % value) return value @@ -587,9 +596,9 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A if value is None: return value from pymongo.encryption_options import AutoEncryptionOpts + if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % ( - option,)) + raise TypeError("%s must be an instance of AutoEncryptionOpts" % (option,)) return value @@ -597,7 +606,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { - 'tls': ['ssl'], + "tls": ["ssl"], } # Dictionary where keys are the names of URI options, and values @@ -605,73 +614,73 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # alias uses a different validator than its public counterpart, it should be # included here as a key, value pair. URI_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { - 'appname': validate_appname_or_none, - 'authmechanism': validate_auth_mechanism, - 'authmechanismproperties': validate_auth_mechanism_properties, - 'authsource': validate_string, - 'compressors': validate_compressors, - 'connecttimeoutms': validate_timeout_or_none_or_zero, - 'directconnection': validate_boolean_or_string, - 'heartbeatfrequencyms': validate_timeout_or_none, - 'journal': validate_boolean_or_string, - 'localthresholdms': validate_positive_float_or_zero, - 'maxidletimems': validate_timeout_or_none, - 'maxconnecting': validate_positive_integer, - 'maxpoolsize': validate_non_negative_integer_or_none, - 'maxstalenessseconds': validate_max_staleness, - 'readconcernlevel': validate_string_or_none, - 'readpreference': validate_read_preference_mode, - 'readpreferencetags': validate_read_preference_tags, - 'replicaset': validate_string_or_none, - 'retryreads': validate_boolean_or_string, - 'retrywrites': validate_boolean_or_string, - 'loadbalanced': validate_boolean_or_string, - 'serverselectiontimeoutms': validate_timeout_or_zero, - 'sockettimeoutms': validate_timeout_or_none_or_zero, - 'tls': validate_boolean_or_string, - 'tlsallowinvalidcertificates': validate_boolean_or_string, - 'tlsallowinvalidhostnames': validate_boolean_or_string, - 'tlscafile': validate_readable, - 'tlscertificatekeyfile': validate_readable, - 'tlscertificatekeyfilepassword': validate_string_or_none, - 'tlsdisableocspendpointcheck': validate_boolean_or_string, - 'tlsinsecure': validate_boolean_or_string, - 'w': validate_non_negative_int_or_basestring, - 'wtimeoutms': validate_non_negative_integer, - 'zlibcompressionlevel': validate_zlib_compression_level, - 'srvservicename': validate_string, - 'srvmaxhosts': validate_non_negative_integer + "appname": validate_appname_or_none, + "authmechanism": validate_auth_mechanism, + "authmechanismproperties": validate_auth_mechanism_properties, + "authsource": validate_string, + "compressors": validate_compressors, + "connecttimeoutms": validate_timeout_or_none_or_zero, + "directconnection": validate_boolean_or_string, + "heartbeatfrequencyms": validate_timeout_or_none, + "journal": validate_boolean_or_string, + "localthresholdms": validate_positive_float_or_zero, + "maxidletimems": validate_timeout_or_none, + "maxconnecting": validate_positive_integer, + "maxpoolsize": validate_non_negative_integer_or_none, + "maxstalenessseconds": validate_max_staleness, + "readconcernlevel": validate_string_or_none, + "readpreference": validate_read_preference_mode, + "readpreferencetags": validate_read_preference_tags, + "replicaset": validate_string_or_none, + "retryreads": validate_boolean_or_string, + "retrywrites": validate_boolean_or_string, + "loadbalanced": validate_boolean_or_string, + "serverselectiontimeoutms": validate_timeout_or_zero, + "sockettimeoutms": validate_timeout_or_none_or_zero, + "tls": validate_boolean_or_string, + "tlsallowinvalidcertificates": validate_boolean_or_string, + "tlsallowinvalidhostnames": validate_boolean_or_string, + "tlscafile": validate_readable, + "tlscertificatekeyfile": validate_readable, + "tlscertificatekeyfilepassword": validate_string_or_none, + "tlsdisableocspendpointcheck": validate_boolean_or_string, + "tlsinsecure": validate_boolean_or_string, + "w": validate_non_negative_int_or_basestring, + "wtimeoutms": validate_non_negative_integer, + "zlibcompressionlevel": validate_zlib_compression_level, + "srvservicename": validate_string, + "srvmaxhosts": validate_non_negative_integer, } # Dictionary where keys are the names of URI options specific to pymongo, # and values are functions that validate user-input values for those options. NONSPEC_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { - 'connect': validate_boolean_or_string, - 'driver': validate_driver_or_none, - 'server_api': validate_server_api_or_none, - 'fsync': validate_boolean_or_string, - 'minpoolsize': validate_non_negative_integer, - 'tlscrlfile': validate_readable, - 'tz_aware': validate_boolean_or_string, - 'unicode_decode_error_handler': validate_unicode_decode_error_handler, - 'uuidrepresentation': validate_uuid_representation, - 'waitqueuemultiple': validate_non_negative_integer_or_none, - 'waitqueuetimeoutms': validate_timeout_or_none, + "connect": validate_boolean_or_string, + "driver": validate_driver_or_none, + "server_api": validate_server_api_or_none, + "fsync": validate_boolean_or_string, + "minpoolsize": validate_non_negative_integer, + "tlscrlfile": validate_readable, + "tz_aware": validate_boolean_or_string, + "unicode_decode_error_handler": validate_unicode_decode_error_handler, + "uuidrepresentation": validate_uuid_representation, + "waitqueuemultiple": validate_non_negative_integer_or_none, + "waitqueuetimeoutms": validate_timeout_or_none, } # Dictionary where keys are the names of keyword-only options for the # MongoClient constructor, and values are functions that validate user-input # values for those options. KW_VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = { - 'document_class': validate_document_class, - 'type_registry': validate_type_registry, - 'read_preference': validate_read_preference, - 'event_listeners': _validate_event_listeners, - 'tzinfo': validate_tzinfo, - 'username': validate_string_or_none, - 'password': validate_string_or_none, - 'server_selector': validate_is_callable_or_none, - 'auto_encryption_opts': validate_auto_encryption_opts_or_none, + "document_class": validate_document_class, + "type_registry": validate_type_registry, + "read_preference": validate_read_preference, + "event_listeners": _validate_event_listeners, + "tzinfo": validate_tzinfo, + "username": validate_string_or_none, + "password": validate_string_or_none, + "server_selector": validate_is_callable_or_none, + "auto_encryption_opts": validate_auto_encryption_opts_or_none, } # Dictionary where keys are any URI option name, and values are the @@ -679,7 +688,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # variant need not be included here. Options whose public and internal # names are the same need not be included here. INTERNAL_URI_OPTION_NAME_MAP: Dict[str, str] = { - 'ssl': 'tls', + "ssl": "tls", } # Map from deprecated URI option names to a tuple indicating the method of @@ -701,8 +710,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A for optname, aliases in URI_OPTIONS_ALIAS_MAP.items(): for alias in aliases: if alias not in URI_OPTIONS_VALIDATOR_MAP: - URI_OPTIONS_VALIDATOR_MAP[alias] = ( - URI_OPTIONS_VALIDATOR_MAP[optname]) + URI_OPTIONS_VALIDATOR_MAP[alias] = URI_OPTIONS_VALIDATOR_MAP[optname] # Map containing all URI option and keyword argument validators. VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() @@ -710,39 +718,38 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # List of timeout-related options. TIMEOUT_OPTIONS: List[str] = [ - 'connecttimeoutms', - 'heartbeatfrequencyms', - 'maxidletimems', - 'maxstalenessseconds', - 'serverselectiontimeoutms', - 'sockettimeoutms', - 'waitqueuetimeoutms', + "connecttimeoutms", + "heartbeatfrequencyms", + "maxidletimems", + "maxstalenessseconds", + "serverselectiontimeoutms", + "sockettimeoutms", + "waitqueuetimeoutms", ] -_AUTH_OPTIONS = frozenset(['authmechanismproperties']) +_AUTH_OPTIONS = frozenset(["authmechanismproperties"]) def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: - """Validate optional authentication parameters. - """ + """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError('Unknown ' - 'authentication option: %s' % (option,)) + raise ConfigurationError("Unknown " "authentication option: %s" % (option,)) return option, value def validate(option: str, value: Any) -> Tuple[str, Any]: - """Generic validation function. - """ + """Generic validation function.""" lower = option.lower() validator = VALIDATORS.get(lower, raise_config_error) value = validator(option, value) return option, value -def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> MutableMapping[str, Any]: +def get_validated_options( + options: Mapping[str, Any], warn: bool = True +) -> MutableMapping[str, Any]: """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. @@ -765,8 +772,7 @@ def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> Muta for opt, value in options.items(): normed_key = get_normed_key(opt) try: - validator = URI_OPTIONS_VALIDATOR_MAP.get( - normed_key, raise_config_error) + validator = URI_OPTIONS_VALIDATOR_MAP.get(normed_key, raise_config_error) value = validator(opt, value) except (ValueError, TypeError, ConfigurationError) as exc: if warn: @@ -779,14 +785,7 @@ def get_validated_options(options: Mapping[str, Any], warn: bool = True) -> Muta # List of write-concern-related options. -WRITE_CONCERN_OPTIONS = frozenset([ - 'w', - 'wtimeout', - 'wtimeoutms', - 'fsync', - 'j', - 'journal' -]) +WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) class BaseObject(object): @@ -796,28 +795,38 @@ class BaseObject(object): SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB. """ - def __init__(self, codec_options: CodecOptions, read_preference: _ServerMode, write_concern: WriteConcern, - read_concern: ReadConcern) -> None: + def __init__( + self, + codec_options: CodecOptions, + read_preference: _ServerMode, + write_concern: WriteConcern, + read_concern: ReadConcern, + ) -> None: if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of " - "bson.codec_options.CodecOptions") + raise TypeError( + "codec_options must be an instance of " "bson.codec_options.CodecOptions" + ) self.__codec_options = codec_options if not isinstance(read_preference, _ServerMode): - raise TypeError("%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,)) + raise TypeError( + "%r is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." % (read_preference,) + ) self.__read_preference = read_preference if not isinstance(write_concern, WriteConcern): - raise TypeError("write_concern must be an instance of " - "pymongo.write_concern.WriteConcern") + raise TypeError( + "write_concern must be an instance of " "pymongo.write_concern.WriteConcern" + ) self.__write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of " - "pymongo.read_concern.ReadConcern") + raise TypeError( + "read_concern must be an instance of " "pymongo.read_concern.ReadConcern" + ) self.__read_concern = read_concern @property @@ -838,8 +847,7 @@ def write_concern(self) -> WriteConcern: return self.__write_concern def _write_concern_for(self, session): - """Read only access to the write concern of this instance or session. - """ + """Read only access to the write concern of this instance or session.""" # Override this operation's write concern with the transaction's. if session and session.in_transaction: return DEFAULT_WRITE_CONCERN @@ -855,8 +863,7 @@ def read_preference(self) -> _ServerMode: return self.__read_preference def _read_preference_for(self, session): - """Read only access to the read preference of this instance or session. - """ + """Read only access to the read preference of this instance or session.""" # Override this operation's read preference with the transaction's. if session: return session._txn_read_preference() or self.__read_preference diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index c9cc041aff..72cc232867 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -17,6 +17,7 @@ try: import snappy + _HAVE_SNAPPY = True except ImportError: # python-snappy isn't available. @@ -24,6 +25,7 @@ try: import zlib + _HAVE_ZLIB = True except ImportError: # Python built without zlib support. @@ -31,6 +33,7 @@ try: from zstandard import ZstdCompressor, ZstdDecompressor + _HAVE_ZSTD = True except ImportError: _HAVE_ZSTD = False @@ -59,17 +62,20 @@ def validate_compressors(dummy, value): compressors.remove(compressor) warnings.warn( "Wire protocol compression with snappy is not available. " - "You must install the python-snappy module for snappy support.") + "You must install the python-snappy module for snappy support." + ) elif compressor == "zlib" and not _HAVE_ZLIB: compressors.remove(compressor) warnings.warn( "Wire protocol compression with zlib is not available. " - "The zlib module is not available.") + "The zlib module is not available." + ) elif compressor == "zstd" and not _HAVE_ZSTD: compressors.remove(compressor) warnings.warn( "Wire protocol compression with zstandard is not available. " - "You must install the zstandard module for zstandard support.") + "You must install the zstandard module for zstandard support." + ) return compressors @@ -79,8 +85,7 @@ def validate_zlib_compression_level(option, value): except: raise TypeError("%s must be an integer, not %r." % (option, value)) if level < -1 or level > 9: - raise ValueError( - "%s must be between -1 and 9, not %d." % (option, level)) + raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 152acaca65..ba9e5956f2 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -17,52 +17,74 @@ import threading import warnings from collections import deque -from typing import (TYPE_CHECKING, Any, Dict, Generic, Iterable, List, Mapping, - MutableMapping, Optional, Sequence, Tuple, Union, cast, overload) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + Iterable, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Union, + cast, + overload, +) from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON from pymongo import helpers from pymongo.collation import validate_collation_or_none -from pymongo.common import (validate_boolean, validate_is_document_type, - validate_is_mapping) -from pymongo.errors import (ConnectionFailure, InvalidOperation, - OperationFailure) -from pymongo.message import (_CursorAddress, _GetMore, _Query, - _RawBatchGetMore, _RawBatchQuery) +from pymongo.common import ( + validate_boolean, + validate_is_document_type, + validate_is_mapping, +) +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import ( + _CursorAddress, + _GetMore, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) from pymongo.response import PinnedResponse from pymongo.typings import _CollationIn, _DocumentType # These errors mean that the server has already killed the cursor so there is # no need to send killCursors. -_CURSOR_CLOSED_ERRORS = frozenset([ - 43, # CursorNotFound - 50, # MaxTimeMSExpired - 175, # QueryPlanKilled - 237, # CursorKilled - - # On a tailable cursor, the following errors mean the capped collection - # rolled over. - # MongoDB 2.6: - # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} - 28617, - # MongoDB 3.0: - # {'$err': 'getMore executor error: UnknownError no details available', - # 'code': 17406, 'ok': 0} - 17406, - # MongoDB 3.2 + 3.4: - # {'ok': 0.0, 'errmsg': 'GetMore command executor error: - # CappedPositionLost: CollectionScan died due to failure to restore - # tailable cursor position. Last seen record id: RecordId(3)', - # 'code': 96} - 96, - # MongoDB 3.6+: - # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to - # restore tailable cursor position. Last seen record id: RecordId(3)"', - # 'code': 136, 'codeName': 'CappedPositionLost'} - 136, -]) +_CURSOR_CLOSED_ERRORS = frozenset( + [ + 43, # CursorNotFound + 50, # MaxTimeMSExpired + 175, # QueryPlanKilled + 237, # CursorKilled + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, + ] +) _QUERY_OPTIONS = { "tailable_cursor": 2, @@ -71,7 +93,8 @@ "no_timeout": 16, "await_data": 32, "exhaust": 64, - "partial": 128} + "partial": 128, +} class CursorType(object): @@ -104,8 +127,8 @@ class CursorType(object): class _SocketManager(object): - """Used with exhaust cursors to ensure the socket is returned. - """ + """Used with exhaust cursors to ensure the socket is returned.""" + def __init__(self, sock, more_to_come): self.sock = sock self.more_to_come = more_to_come @@ -116,13 +139,13 @@ def update_exhaust(self, more_to_come): self.more_to_come = more_to_come def close(self): - """Return this instance's socket to the connection pool. - """ + """Return this instance's socket to the connection pool.""" if not self.closed: self.closed = True self.sock.unpin() self.sock = None + _Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] _Hint = Union[str, _Sort] @@ -133,12 +156,13 @@ def close(self): class Cursor(Generic[_DocumentType]): - """A cursor / iterator over Mongo query results. - """ + """A cursor / iterator over Mongo query results.""" + _query_class = _Query _getmore_class = _GetMore - def __init__(self, + def __init__( + self, collection: "Collection[_DocumentType]", filter: Optional[Mapping[str, Any]] = None, projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, @@ -162,7 +186,7 @@ def __init__(self, comment: Any = None, session: Optional["ClientSession"] = None, allow_disk_use: Optional[bool] = None, - let: Optional[bool] = None + let: Optional[bool] = None, ) -> None: """Create a new cursor. @@ -195,15 +219,22 @@ def __init__(self, raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self.__explicit_session: - warnings.warn("use an explicit session with no_cursor_timeout=True " - "otherwise the cursor may still timeout after " - "30 minutes, for more info see " - "https://docs.mongodb.com/v4.4/reference/method/" - "cursor.noCursorTimeout/" - "#session-idle-timeout-overrides-nocursortimeout", - UserWarning, stacklevel=2) - if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, - CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://docs.mongodb.com/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) @@ -246,8 +277,7 @@ def __init__(self, # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') + raise InvalidOperation("Exhaust cursors are " "not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True @@ -290,8 +320,7 @@ def collection(self) -> "Collection[_DocumentType]": @property def retrieved(self) -> int: - """The number of documents retrieved so far. - """ + """The number of documents retrieved so far.""" return self.__retrieved def __del__(self) -> None: @@ -333,28 +362,47 @@ def _clone(self, deepcopy=True, base=None): else: base = self._clone_base(None) - values_to_clone = ("spec", "projection", "skip", "limit", - "max_time_ms", "max_await_time_ms", "comment", - "max", "min", "ordering", "explain", "hint", - "batch_size", "max_scan", - "query_flags", "collation", "empty", - "show_record_id", "return_key", "allow_disk_use", - "snapshot", "exhaust", "has_filter") - data = dict((k, v) for k, v in self.__dict__.items() - if k.startswith('_Cursor__') and k[9:] in values_to_clone) + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + ) + data = dict( + (k, v) + for k, v in self.__dict__.items() + if k.startswith("_Cursor__") and k[9:] in values_to_clone + ) if deepcopy: data = self._deepcopy(data) base.__dict__.update(data) return base def _clone_base(self, session): - """Creates an empty Cursor object for information to be copied into. - """ + """Creates an empty Cursor object for information to be copied into.""" return self.__class__(self.__collection, session=session) def __die(self, synchronous=False): - """Closes this cursor. - """ + """Closes this cursor.""" try: already_killed = self.__killed except AttributeError: @@ -364,8 +412,7 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: cursor_id = self.__id - address = _CursorAddress( - self.__address, "%s.%s" % (self.__dbname, self.__collname)) + address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname)) else: # Skip killCursors. cursor_id = 0 @@ -376,19 +423,18 @@ def __die(self, synchronous=False): address, self.__sock_mgr, self.__session, - self.__explicit_session) + self.__explicit_session, + ) if not self.__explicit_session: self.__session = None self.__sock_mgr = None def close(self) -> None: - """Explicitly close / kill this cursor. - """ + """Explicitly close / kill this cursor.""" self.__die(True) def __query_spec(self): - """Get the spec to use for a query. - """ + """Get the spec to use for a query.""" operators = {} if self.__ordering: operators["$orderby"] = self.__ordering @@ -437,16 +483,15 @@ def __query_spec(self): # that breaks commands like count and find_and_modify. # Checking spec.keys()[0] covers the case that the spec # was passed as an instance of SON or OrderedDict. - elif ("query" in self.__spec and - (len(self.__spec) == 1 or - next(iter(self.__spec)) == "query")): + elif "query" in self.__spec and ( + len(self.__spec) == 1 or next(iter(self.__spec)) == "query" + ): return SON({"$query": self.__spec}) return self.__spec def __check_okay_to_chain(self): - """Check if it is okay to chain more options onto this cursor. - """ + """Check if it is okay to chain more options onto this cursor.""" if self.__retrieved or self.__id is not None: raise InvalidOperation("cannot set options after executing query") @@ -464,8 +509,7 @@ def add_option(self, mask: int) -> "Cursor[_DocumentType]": if self.__limit: raise InvalidOperation("Can't use limit and exhaust together.") if self.__collection.database.client.is_mongos: - raise InvalidOperation('Exhaust cursors are ' - 'not supported by mongos') + raise InvalidOperation("Exhaust cursors are " "not supported by mongos") self.__exhaust = True self.__query_flags |= mask @@ -503,7 +547,7 @@ def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]": .. versionadded:: 3.11 """ if not isinstance(allow_disk_use, bool): - raise TypeError('allow_disk_use must be a bool') + raise TypeError("allow_disk_use must be a bool") self.__check_okay_to_chain() self.__allow_disk_use = allow_disk_use @@ -594,8 +638,7 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]": :Parameters: - `max_time_ms`: the time limit after which the operation is aborted """ - if (not isinstance(max_time_ms, int) - and max_time_ms is not None): + if not isinstance(max_time_ms, int) and max_time_ms is not None: raise TypeError("max_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -619,8 +662,7 @@ def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_Docume .. versionadded:: 3.2 """ - if (not isinstance(max_await_time_ms, int) - and max_await_time_ms is not None): + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") self.__check_okay_to_chain() @@ -688,15 +730,15 @@ def __getitem__(self, index): skip = 0 if index.start is not None: if index.start < 0: - raise IndexError("Cursor instances do not support " - "negative indices") + raise IndexError("Cursor instances do not support " "negative indices") skip = index.start if index.stop is not None: limit = index.stop - skip if limit < 0: - raise IndexError("stop index must be greater than start " - "index for slice %r" % index) + raise IndexError( + "stop index must be greater than start " "index for slice %r" % index + ) if limit == 0: self.__empty = True else: @@ -708,8 +750,7 @@ def __getitem__(self, index): if isinstance(index, int): if index < 0: - raise IndexError("Cursor instances do not support negative " - "indices") + raise IndexError("Cursor instances do not support negative " "indices") clone = self.clone() clone.skip(index + self.__skip) clone.limit(-1) # use a hard limit @@ -717,8 +758,7 @@ def __getitem__(self, index): for doc in clone: return doc raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor " - "instances" % index) + raise TypeError("index %r cannot be applied to Cursor " "instances" % index) def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": """**DEPRECATED** - Limit the number of documents to scan when @@ -786,7 +826,9 @@ def min(self, spec: _Sort) -> "Cursor[_DocumentType]": self.__min = SON(spec) return self - def sort(self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None) -> "Cursor[_DocumentType]": + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> "Cursor[_DocumentType]": """Sorts this cursor's results. Pass a field name and a direction, either @@ -853,14 +895,13 @@ def distinct(self, key: str) -> List: if self.__spec: options["query"] = self.__spec if self.__max_time_ms is not None: - options['maxTimeMS'] = self.__max_time_ms + options["maxTimeMS"] = self.__max_time_ms if self.__comment: - options['comment'] = self.__comment + options["comment"] = self.__comment if self.__collation is not None: - options['collation'] = self.__collation + options["collation"] = self.__collation - return self.__collection.distinct( - key, session=self.__session, **options) + return self.__collection.distinct(key, session=self.__session, **options) def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. @@ -1005,12 +1046,12 @@ def __send_message(self, operation): client = self.__collection.database.client # OP_MSG is required to support exhaust cursors with encryption. if client._encrypter and self.__exhaust: - raise InvalidOperation( - "exhaust cursors do not support auto encryption") + raise InvalidOperation("exhaust cursors do not support auto encryption") try: response = client._run_operation( - operation, self._unpack_response, address=self.__address) + operation, self._unpack_response, address=self.__address + ) except OperationFailure as exc: if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: # Don't send killCursors because the cursor is already closed. @@ -1020,8 +1061,10 @@ def __send_message(self, operation): # due to capped collection roll over. Setting # self.__killed to True ensures Cursor.alive will be # False. No need to re-raise. - if (exc.code in _CURSOR_CLOSED_ERRORS and - self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]): + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): return raise except ConnectionFailure: @@ -1036,23 +1079,22 @@ def __send_message(self, operation): self.__address = response.address if isinstance(response, PinnedResponse): if not self.__sock_mgr: - self.__sock_mgr = _SocketManager(response.socket_info, - response.more_to_come) + self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come) cmd_name = operation.name docs = response.docs if response.from_command: if cmd_name != "explain": - cursor = docs[0]['cursor'] - self.__id = cursor['id'] - if cmd_name == 'find': - documents = cursor['firstBatch'] + cursor = docs[0]["cursor"] + self.__id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] # Update the namespace used for future getMore commands. - ns = cursor.get('ns') + ns = cursor.get("ns") if ns: - self.__dbname, self.__collname = ns.split('.', 1) + self.__dbname, self.__collname = ns.split(".", 1) else: - documents = cursor['nextBatch'] + documents = cursor["nextBatch"] self.__data = deque(documents) self.__retrieved += len(documents) else: @@ -1072,16 +1114,15 @@ def __send_message(self, operation): if self.__limit and self.__id and self.__limit <= self.__retrieved: self.close() - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - return response.unpack_response(cursor_id, codec_options, user_fields, - legacy_response) + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) def _read_preference(self): if self.__read_preference is None: # Save the read preference for getMore commands. - self.__read_preference = self.__collection._read_preference_for( - self.session) + self.__read_preference = self.__collection._read_preference_for(self.session) return self.__read_preference def _refresh(self): @@ -1101,23 +1142,26 @@ def _refresh(self): if (self.__min or self.__max) and not self.__hint: raise InvalidOperation( "Passing a 'hint' is required when using the min/max query" - " option to ensure the query utilizes the correct index") - q = self._query_class(self.__query_flags, - self.__collection.database.name, - self.__collection.name, - self.__skip, - self.__query_spec(), - self.__projection, - self.__codec_options, - self._read_preference(), - self.__limit, - self.__batch_size, - self.__read_concern, - self.__collation, - self.__session, - self.__collection.database.client, - self.__allow_disk_use, - self.__exhaust) + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self.__query_flags, + self.__collection.database.name, + self.__collection.name, + self.__skip, + self.__query_spec(), + self.__projection, + self.__codec_options, + self._read_preference(), + self.__limit, + self.__batch_size, + self.__read_concern, + self.__collation, + self.__session, + self.__collection.database.client, + self.__allow_disk_use, + self.__exhaust, + ) self.__send_message(q) elif self.__id: # Get More if self.__limit: @@ -1127,17 +1171,19 @@ def _refresh(self): else: limit = self.__batch_size # Exhaust cursors don't send getMore messages. - g = self._getmore_class(self.__dbname, - self.__collname, - limit, - self.__id, - self.__codec_options, - self._read_preference(), - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - self.__exhaust) + g = self._getmore_class( + self.__dbname, + self.__collname, + limit, + self.__id, + self.__codec_options, + self._read_preference(), + self.__session, + self.__collection.database.client, + self.__max_await_time_ms, + self.__sock_mgr, + self.__exhaust, + ) self.__send_message(g) return len(self.__data) @@ -1232,7 +1278,7 @@ def _deepcopy(self, x, memo=None): don't have to copy them when cloning. """ y: Any - if not hasattr(x, 'items'): + if not hasattr(x, "items"): y, is_list, iterator = [], True, enumerate(x) else: y, is_list, iterator = {}, False, x.items() @@ -1276,10 +1322,10 @@ def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs """ super(RawBatchCursor, self).__init__(collection, *args, **kwargs) - def _unpack_response(self, response, cursor_id, codec_options, - user_fields=None, legacy_response=False): - raw_response = response.raw_response( - cursor_id, user_fields=user_fields) + def _unpack_response( + self, response, cursor_id, codec_options, user_fields=None, legacy_response=False + ): + raw_response = response.raw_response(cursor_id, user_fields=user_fields) if not legacy_response: # OP_MSG returns firstBatch/nextBatch documents as a BSON array # Re-assemble the array of documents into a document stream diff --git a/pymongo/daemon.py b/pymongo/daemon.py index f0253547d9..53141751ac 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -24,7 +24,6 @@ import sys import warnings - # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) @@ -53,23 +52,29 @@ def _silence_resource_warning(popen): popen.returncode = 0 -if sys.platform == 'win32': +if sys.platform == "win32": # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. - _DETACHED_PROCESS = getattr(subprocess, 'DETACHED_PROCESS', 0x00000008) + _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) def _spawn_daemon(args): """Spawn a daemon process (Windows).""" try: - with open(os.devnull, 'r+b') as devnull: + with open(os.devnull, "r+b") as devnull: popen = subprocess.Popen( args, creationflags=_DETACHED_PROCESS, - stdin=devnull, stderr=devnull, stdout=devnull) + stdin=devnull, + stderr=devnull, + stdout=devnull, + ) _silence_resource_warning(popen) except FileNotFoundError as exc: - warnings.warn(f'Failed to start {args[0]}: is it on your $PATH?\n' - f'Original exception: {exc}', RuntimeWarning, - stacklevel=2) + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) + else: # On Unix we spawn the daemon process with a double Popen. # 1) The first Popen runs this file as a Python script using the current @@ -85,16 +90,16 @@ def _spawn_daemon(args): def _spawn(args): """Spawn the process and silence stdout/stderr.""" try: - with open(os.devnull, 'r+b') as devnull: + with open(os.devnull, "r+b") as devnull: return subprocess.Popen( - args, - close_fds=True, - stdin=devnull, stderr=devnull, stdout=devnull) + args, close_fds=True, stdin=devnull, stderr=devnull, stdout=devnull + ) except FileNotFoundError as exc: - warnings.warn(f'Failed to start {args[0]}: is it on your $PATH?\n' - f'Original exception: {exc}', RuntimeWarning, - stacklevel=2) - + warnings.warn( + f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + RuntimeWarning, + stacklevel=2, + ) def _spawn_daemon_double_popen(args): """Spawn a daemon process using a double subprocess.Popen.""" @@ -105,7 +110,6 @@ def _spawn_daemon_double_popen(args): # processes. _popen_wait(temp_proc, _WAIT_TIMEOUT) - def _spawn_daemon(args): """Spawn a daemon process (Unix).""" # "If Python is unable to retrieve the real path to its executable, @@ -123,10 +127,9 @@ def _spawn_daemon(args): # until the main application exits. _spawn(args) - - if __name__ == '__main__': + if __name__ == "__main__": # Attempt to start a new session to decouple from the parent. - if hasattr(os, 'setsid'): + if hasattr(os, "setsid"): try: os.setsid() except OSError: diff --git a/pymongo/database.py b/pymongo/database.py index 4f5f931352..675db132f7 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,8 +13,18 @@ # limitations under the License. """Database level operations.""" -from typing import (TYPE_CHECKING, Any, Dict, Generic, List, Mapping, MutableMapping, Optional, - Sequence, Union) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, +) from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions from bson.dbref import DBRef @@ -31,15 +41,13 @@ def _check_name(name): - """Check if a database name is valid. - """ + """Check if a database name is valid.""" if not name: raise InvalidName("database name cannot be the empty string") - for invalid_char in [' ', '.', '$', '/', '\\', '\x00', '"']: + for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: if invalid_char in name: - raise InvalidName("database names cannot contain the " - "character %r" % invalid_char) + raise InvalidName("database names cannot contain the " "character %r" % invalid_char) if TYPE_CHECKING: @@ -50,9 +58,10 @@ def _check_name(name): class Database(common.BaseObject, Generic[_DocumentType]): - """A Mongo database. - """ - def __init__(self, + """A Mongo database.""" + + def __init__( + self, client: "MongoClient[_DocumentType]", name: str, codec_options: Optional[CodecOptions] = None, @@ -110,12 +119,13 @@ def __init__(self, codec_options or client.codec_options, read_preference or client.read_preference, write_concern or client.write_concern, - read_concern or client.read_concern) + read_concern or client.read_concern, + ) if not isinstance(name, str): raise TypeError("name must be an instance of str") - if name != '$external': + if name != "$external": _check_name(name) self.__name = name @@ -131,7 +141,8 @@ def name(self) -> str: """The name of this :class:`Database`.""" return self.__name - def with_options(self, + def with_options( + self, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, @@ -168,17 +179,18 @@ def with_options(self, .. versionadded:: 3.8 """ - return Database(self.client, - self.__name, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern) + return Database( + self.client, + self.__name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) def __eq__(self, other: Any) -> bool: if isinstance(other, Database): - return (self.__client == other.client and - self.__name == other.name) + return self.__client == other.client and self.__name == other.name return NotImplemented def __ne__(self, other: Any) -> bool: @@ -198,10 +210,11 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: :Parameters: - `name`: the name of the collection to get """ - if name.startswith('_'): + if name.startswith("_"): raise AttributeError( "Database has no attribute %r. To access the %s" - " collection, use database[%r]." % (name, name, name)) + " collection, use database[%r]." % (name, name, name) + ) return self.__getitem__(name) def __getitem__(self, name: str) -> "Collection[_DocumentType]": @@ -214,7 +227,8 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": """ return Collection(self, name) - def get_collection(self, + def get_collection( + self, name: str, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -259,10 +273,11 @@ def get_collection(self, used. """ return Collection( - self, name, False, codec_options, read_preference, - write_concern, read_concern) + self, name, False, codec_options, read_preference, write_concern, read_concern + ) - def create_collection(self, + def create_collection( + self, name: str, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -351,19 +366,25 @@ def create_collection(self, with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. - if ((not s or not s.in_transaction) and - name in self.list_collection_names( - filter={"name": name}, session=s)): + if (not s or not s.in_transaction) and name in self.list_collection_names( + filter={"name": name}, session=s + ): raise CollectionInvalid("collection %s already exists" % name) - return Collection(self, name, True, codec_options, - read_preference, write_concern, - read_concern, session=s, **kwargs) + return Collection( + self, + name, + True, + codec_options, + read_preference, + write_concern, + read_concern, + session=s, + **kwargs, + ) - def aggregate(self, - pipeline: _Pipeline, - session: Optional["ClientSession"] = None, - **kwargs: Any + def aggregate( + self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. @@ -428,13 +449,19 @@ def aggregate(self, """ with self.client._tmp_session(session, close=False) as s: cmd = _DatabaseAggregationCommand( - self, CommandCursor, pipeline, kwargs, session is not None, - user_fields={'cursor': {'firstBatch': 1}}) + self, + CommandCursor, + pipeline, + kwargs, + session is not None, + user_fields={"cursor": {"firstBatch": 1}}, + ) return self.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(s), s, - retryable=not cmd._performs_write) + cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write + ) - def watch(self, + def watch( + self, pipeline: Optional[_Pipeline] = None, full_document: Optional[str] = None, resume_after: Optional[Mapping[str, Any]] = None, @@ -530,15 +557,32 @@ def watch(self, https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return DatabaseChangeStream( - self, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) - - def _command(self, sock_info, command, value=1, check=True, - allowable_errors=None, read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, - write_concern=None, - parse_write_concern_error=False, session=None, **kwargs): + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + ) + + def _command( + self, + sock_info, + command, + value=1, + check=True, + allowable_errors=None, + read_preference=ReadPreference.PRIMARY, + codec_options=DEFAULT_CODEC_OPTIONS, + write_concern=None, + parse_write_concern_error=False, + session=None, + **kwargs, + ): """Internal command helper.""" if isinstance(command, str): command = SON([(command, value)]) @@ -555,9 +599,11 @@ def _command(self, sock_info, command, value=1, check=True, write_concern=write_concern, parse_write_concern_error=parse_write_concern_error, session=s, - client=self.__client) + client=self.__client, + ) - def command(self, + def command( + self, command: Union[str, MutableMapping[str, Any]], value: Any = 1, check: bool = True, @@ -650,57 +696,78 @@ def command(self, .. seealso:: The MongoDB documentation on `commands `_. """ if read_preference is None: - read_preference = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) - with self.__client._socket_for_reads( - read_preference, session) as (sock_info, read_preference): - return self._command(sock_info, command, value, - check, allowable_errors, read_preference, - codec_options, session=session, **kwargs) - - def _retryable_read_command(self, command, value=1, check=True, - allowable_errors=None, read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, session=None, **kwargs): + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + with self.__client._socket_for_reads(read_preference, session) as ( + sock_info, + read_preference, + ): + return self._command( + sock_info, + command, + value, + check, + allowable_errors, + read_preference, + codec_options, + session=session, + **kwargs, + ) + + def _retryable_read_command( + self, + command, + value=1, + check=True, + allowable_errors=None, + read_preference=None, + codec_options=DEFAULT_CODEC_OPTIONS, + session=None, + **kwargs, + ): """Same as command but used for retryable read commands.""" if read_preference is None: - read_preference = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY def _cmd(session, server, sock_info, read_preference): - return self._command(sock_info, command, value, - check, allowable_errors, read_preference, - codec_options, session=session, **kwargs) + return self._command( + sock_info, + command, + value, + check, + allowable_errors, + read_preference, + codec_options, + session=session, + **kwargs, + ) - return self.__client._retryable_read( - _cmd, read_preference, session) + return self.__client._retryable_read(_cmd, read_preference, session) def _list_collections(self, sock_info, session, read_preference, **kwargs): """Internal listCollections helper.""" - coll = self.get_collection( - "$cmd", read_preference=read_preference) - cmd = SON([("listCollections", 1), - ("cursor", {})]) + coll = self.get_collection("$cmd", read_preference=read_preference) + cmd = SON([("listCollections", 1), ("cursor", {})]) cmd.update(kwargs) - with self.__client._tmp_session( - session, close=False) as tmp_session: + with self.__client._tmp_session(session, close=False) as tmp_session: cursor = self._command( - sock_info, cmd, - read_preference=read_preference, - session=tmp_session)["cursor"] + sock_info, cmd, read_preference=read_preference, session=tmp_session + )["cursor"] cmd_cursor = CommandCursor( coll, cursor, sock_info.address, session=tmp_session, - explicit_session=session is not None) + explicit_session=session is not None, + ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - def list_collections(self, - session: Optional["ClientSession"] = None, - filter: Optional[Mapping[str, Any]] = None, - **kwargs: Any + def list_collections( + self, + session: Optional["ClientSession"] = None, + filter: Optional[Mapping[str, Any]] = None, + **kwargs: Any, ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the collections of this database. @@ -721,22 +788,21 @@ def list_collections(self, .. versionadded:: 3.6 """ if filter is not None: - kwargs['filter'] = filter - read_pref = ((session and session._txn_read_preference()) - or ReadPreference.PRIMARY) + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY def _cmd(session, server, sock_info, read_preference): return self._list_collections( - sock_info, session, read_preference=read_preference, - **kwargs) + sock_info, session, read_preference=read_preference, **kwargs + ) - return self.__client._retryable_read( - _cmd, read_pref, session) + return self.__client._retryable_read(_cmd, read_pref, session) - def list_collection_names(self, + def list_collection_names( + self, session: Optional["ClientSession"] = None, filter: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> List[str]: """Get a list of all the collection names in this database. @@ -771,12 +837,10 @@ def list_collection_names(self, if not filter or (len(filter) == 1 and "name" in filter): kwargs["nameOnly"] = True - return [result["name"] - for result in self.list_collections(session=session, **kwargs)] + return [result["name"] for result in self.list_collections(session=session, **kwargs)] - def drop_collection(self, - name_or_collection: Union[str, Collection], - session: Optional["ClientSession"] = None + def drop_collection( + self, name_or_collection: Union[str, Collection], session: Optional["ClientSession"] = None ) -> Dict[str, Any]: """Drop a collection. @@ -806,13 +870,17 @@ def drop_collection(self, with self.__client._socket_for_writes(session) as sock_info: return self._command( - sock_info, 'drop', value=name, - allowable_errors=['ns not found', 26], + sock_info, + "drop", + value=name, + allowable_errors=["ns not found", 26], write_concern=self._write_concern_for(session), parse_write_concern_error=True, - session=session) + session=session, + ) - def validate_collection(self, + def validate_collection( + self, name_or_collection: Union[str, Collection], scandata: bool = False, full: bool = False, @@ -853,12 +921,9 @@ def validate_collection(self, name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or " - "Collection") + raise TypeError("name_or_collection must be an instance of str or " "Collection") - cmd = SON([("validate", name), - ("scandata", scandata), - ("full", full)]) + cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) if background is not None: cmd["background"] = background @@ -875,10 +940,8 @@ def validate_collection(self, for _, res in result["raw"].items(): if "result" in res: info = res["result"] - if (info.find("exception") != -1 or - info.find("corrupt") != -1): - raise CollectionInvalid("%s invalid: " - "%s" % (name, info)) + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid("%s invalid: " "%s" % (name, info)) elif not res.get("valid", False): valid = False break @@ -900,13 +963,14 @@ def __next__(self) -> "Database[_DocumentType]": next = __next__ def __bool__(self) -> bool: - raise NotImplementedError("Database objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: database is not None") - - def dereference(self, dbref: DBRef, - session: Optional["ClientSession"] = None, - **kwargs: Any + raise NotImplementedError( + "Database objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def dereference( + self, dbref: DBRef, session: Optional["ClientSession"] = None, **kwargs: Any ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. @@ -931,8 +995,8 @@ def dereference(self, dbref: DBRef, if not isinstance(dbref, DBRef): raise TypeError("cannot dereference a %s" % type(dbref)) if dbref.database is not None and dbref.database != self.__name: - raise ValueError("trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, - self.__name)) - return self[dbref.collection].find_one( - {"_id": dbref.id}, session=session, **kwargs) + raise ValueError( + "trying to dereference a DBRef that points to " + "another database (%r not %r)" % (dbref.database, self.__name) + ) + return self[dbref.collection].find_one({"_id": dbref.id}, session=session, **kwargs) diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 1bb599af37..53fbfd3428 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -18,7 +18,7 @@ from typing import Optional -class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): +class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): """Info about a driver wrapping PyMongo. The MongoDB server logs PyMongo's name, version, and platform whenever @@ -27,11 +27,16 @@ class DriverInfo(namedtuple('DriverInfo', ['name', 'version', 'platform'])): like 'MyDriver', '1.2.3', 'some platform info'. Any of these strings may be None to accept PyMongo's default. """ - def __new__(cls, name: str, version: Optional[str] = None, platform: Optional[str] = None) -> "DriverInfo": + + def __new__( + cls, name: str, version: Optional[str] = None, platform: Optional[str] = None + ) -> "DriverInfo": self = super(DriverInfo, cls).__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): - raise TypeError("Wrong type for DriverInfo %s option, value " - "must be an instance of str" % (key,)) + raise TypeError( + "Wrong type for DriverInfo %s option, value " + "must be an instance of str" % (key,) + ) return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py index b076f490f4..4a6653f959 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -22,11 +22,10 @@ try: from pymongocrypt.auto_encrypter import AutoEncrypter from pymongocrypt.errors import MongoCryptError - from pymongocrypt.explicit_encrypter import ( - ExplicitEncrypter - ) + from pymongocrypt.explicit_encrypter import ExplicitEncrypter from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback + _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False @@ -36,13 +35,16 @@ from bson.binary import STANDARD, UUID_SUBTYPE, Binary from bson.codec_options import CodecOptions from bson.errors import BSONError -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, - _inflate_bson) +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts -from pymongo.errors import (ConfigurationError, EncryptionError, - InvalidOperation, ServerSelectionTimeoutError) +from pymongo.errors import ( + ConfigurationError, + EncryptionError, + InvalidOperation, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern @@ -57,8 +59,7 @@ _DATA_KEY_OPTS = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, - uuid_representation=STANDARD) +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD) @contextlib.contextmanager @@ -85,8 +86,9 @@ def __init__(self, client, key_vault_coll, mongocryptd_client, opts): self.client_ref = None self.key_vault_coll = key_vault_coll.with_options( codec_options=_KEY_VAULT_OPTS, - read_concern=ReadConcern(level='majority'), - write_concern=WriteConcern(w='majority')) + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ) self.mongocryptd_client = mongocryptd_client self.opts = opts self._spawned = False @@ -108,16 +110,19 @@ def kms_request(self, kms_context): # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. ctx = get_ssl_context( - None, # certfile - None, # passphrase - None, # ca_certs - None, # crlfile + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile False, # allow_invalid_certificates False, # allow_invalid_hostnames - False) # disable_ocsp_endpoint_check - opts = PoolOptions(connect_timeout=_KMS_CONNECT_TIMEOUT, - socket_timeout=_KMS_CONNECT_TIMEOUT, - ssl_context=ctx) + False, + ) # disable_ocsp_endpoint_check + opts = PoolOptions( + connect_timeout=_KMS_CONNECT_TIMEOUT, + socket_timeout=_KMS_CONNECT_TIMEOUT, + ssl_context=ctx, + ) host, port = parse_host(endpoint, _HTTPS_PORT) conn = _configured_socket((host, port), opts) try: @@ -125,7 +130,7 @@ def kms_request(self, kms_context): while kms_context.bytes_needed > 0: data = conn.recv(kms_context.bytes_needed) if not data: - raise OSError('KMS connection closed') + raise OSError("KMS connection closed") kms_context.feed(data) finally: conn.close() @@ -143,8 +148,7 @@ def collection_info(self, database, filter): :Returns: The first document from the listCollections command response as BSON. """ - with self.client_ref()[database].list_collections( - filter=RawBSONDocument(filter)) as cursor: + with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: for doc in cursor: return _dict_to_bson(doc, False, _DATA_KEY_OPTS) @@ -155,7 +159,7 @@ def spawn(self): successfully. """ self._spawned = True - args = [self.opts._mongocryptd_spawn_path or 'mongocryptd'] + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] args.extend(self.opts._mongocryptd_spawn_args) _spawn_daemon(args) @@ -176,15 +180,15 @@ def mark_command(self, database, cmd): inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) try: res = self.mongocryptd_client[database].command( - inflated_cmd, - codec_options=DEFAULT_RAW_BSON_OPTIONS) + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) except ServerSelectionTimeoutError: if self.opts._mongocryptd_bypass_spawn: raise self.spawn() res = self.mongocryptd_client[database].command( - inflated_cmd, - codec_options=DEFAULT_RAW_BSON_OPTIONS) + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) return res.raw def fetch_keys(self, filter): @@ -210,9 +214,9 @@ def insert_data_key(self, data_key): The _id of the inserted data key document. """ raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) - data_key_id = raw_doc.get('_id') + data_key_id = raw_doc.get("_id") if not isinstance(data_key_id, uuid.UUID): - raise TypeError('data_key _id must be a UUID') + raise TypeError("data_key _id must be a UUID") self.key_vault_coll.insert_one(raw_doc) return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE) @@ -247,6 +251,7 @@ class _Encrypter(object): This class is used to support automatic encryption and decryption of MongoDB commands.""" + def __init__(self, client, opts): """Create a _Encrypter for a client. @@ -268,8 +273,7 @@ def _get_internal_client(encrypter, mongo_client): # Else - limited pool size, use an internal client. if encrypter._internal_client is not None: return encrypter._internal_client - internal_client = mongo_client._duplicate( - minPoolSize=0, auto_encryption_opts=None) + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) encrypter._internal_client = internal_client return internal_client @@ -283,17 +287,17 @@ def _get_internal_client(encrypter, mongo_client): else: metadata_client = _get_internal_client(self, client) - db, coll = opts._key_vault_namespace.split('.', 1) + db, coll = opts._key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] mongocryptd_client = MongoClient( - opts._mongocryptd_uri, connect=False, - serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS) + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) - io_callbacks = _EncryptionIO( - metadata_client, key_vault_coll, mongocryptd_client, opts) - self._auto_encrypter = AutoEncrypter(io_callbacks, MongoCryptOptions( - opts._kms_providers, schema_map)) + io_callbacks = _EncryptionIO(metadata_client, key_vault_coll, mongocryptd_client, opts) + self._auto_encrypter = AutoEncrypter( + io_callbacks, MongoCryptOptions(opts._kms_providers, schema_map) + ) self._closed = False def encrypt(self, database, cmd, codec_options): @@ -312,8 +316,7 @@ def encrypt(self, database, cmd, codec_options): with _wrap_encryption_errors(): encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. - encrypt_cmd = _inflate_bson( - encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + encrypt_cmd = _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) return encrypt_cmd def decrypt(self, response): @@ -344,22 +347,21 @@ def close(self): class Algorithm(object): """An enum that defines the supported encryption algorithms.""" - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = ( - "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") - AEAD_AES_256_CBC_HMAC_SHA_512_Random = ( - "AEAD_AES_256_CBC_HMAC_SHA_512-Random") + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" class ClientEncryption(object): """Explicit client-side field level encryption.""" - def __init__(self, + def __init__( + self, kms_providers: Mapping[str, Any], key_vault_namespace: str, key_vault_client: MongoClient, codec_options: CodecOptions, - kms_tls_options: Optional[Mapping[str, Any]] = None + kms_tls_options: Optional[Mapping[str, Any]] = None, ) -> None: """Explicit client-side field level encryption. @@ -434,30 +436,37 @@ def __init__(self, raise ConfigurationError( "client-side field level encryption requires the pymongocrypt " "library: install a compatible version with: " - "python -m pip install 'pymongo[encryption]'") + "python -m pip install 'pymongo[encryption]'" + ) if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of " - "bson.codec_options.CodecOptions") + raise TypeError( + "codec_options must be an instance of " "bson.codec_options.CodecOptions" + ) self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client self._codec_options = codec_options - db, coll = key_vault_namespace.split('.', 1) + db, coll = key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] - opts = AutoEncryptionOpts(kms_providers, key_vault_namespace, - kms_tls_options=kms_tls_options) - self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO(None, key_vault_coll, None, opts) + opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options + ) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) self._encryption = ExplicitEncrypter( - self._io_callbacks, MongoCryptOptions(kms_providers, None)) + self._io_callbacks, MongoCryptOptions(kms_providers, None) + ) - def create_data_key(self, + def create_data_key( + self, kms_provider: str, master_key: Optional[Mapping[str, Any]] = None, - key_alt_names: Optional[Sequence[str]] = None + key_alt_names: Optional[Sequence[str]] = None, ) -> Binary: """Create and insert a new data key into the key vault collection. @@ -527,14 +536,15 @@ def create_data_key(self, self._check_closed() with _wrap_encryption_errors(): return self._encryption.create_data_key( - kms_provider, master_key=master_key, - key_alt_names=key_alt_names) + kms_provider, master_key=master_key, key_alt_names=key_alt_names + ) - def encrypt(self, + def encrypt( + self, value: Any, algorithm: str, key_id: Optional[Binary] = None, - key_alt_name: Optional[str] = None + key_alt_name: Optional[str] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -554,17 +564,17 @@ def encrypt(self, The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. """ self._check_closed() - if (key_id is not None and not ( - isinstance(key_id, Binary) and - key_id.subtype == UUID_SUBTYPE)): - raise TypeError( - 'key_id must be a bson.binary.Binary with subtype 4') + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") - doc = encode({'v': value}, codec_options=self._codec_options) + doc = encode({"v": value}, codec_options=self._codec_options) with _wrap_encryption_errors(): encrypted_doc = self._encryption.encrypt( - doc, algorithm, key_id=key_id, key_alt_name=key_alt_name) - return decode(encrypted_doc)['v'] + doc, algorithm, key_id=key_id, key_alt_name=key_alt_name + ) + return decode(encrypted_doc)["v"] def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. @@ -578,14 +588,12 @@ def decrypt(self, value: Binary) -> Any: """ self._check_closed() if not (isinstance(value, Binary) and value.subtype == 6): - raise TypeError( - 'value to decrypt must be a bson.binary.Binary with subtype 6') + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") with _wrap_encryption_errors(): - doc = encode({'v': value}) + doc = encode({"v": value}) decrypted_doc = self._encryption.decrypt(doc) - return decode(decrypted_doc, - codec_options=self._codec_options)['v'] + return decode(decrypted_doc, codec_options=self._codec_options)["v"] def __enter__(self) -> "ClientEncryption": return self diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 21a13f6a5e..c206b4c8b5 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -19,6 +19,7 @@ try: import pymongocrypt + _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False @@ -27,23 +28,24 @@ from pymongo.uri_parser import _parse_kms_tls_options if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient + from pymongo.mongo_client import MongoClient class AutoEncryptionOpts(object): """Options to configure automatic client-side field level encryption.""" - def __init__(self, + def __init__( + self, kms_providers: Mapping[str, Any], key_vault_namespace: str, key_vault_client: Optional["MongoClient"] = None, schema_map: Optional[Mapping[str, Any]] = None, bypass_auto_encryption: Optional[bool] = False, - mongocryptd_uri: str = 'mongodb://localhost:27020', + mongocryptd_uri: str = "mongodb://localhost:27020", mongocryptd_bypass_spawn: bool = False, - mongocryptd_spawn_path: str = 'mongocryptd', + mongocryptd_spawn_path: str = "mongocryptd", mongocryptd_spawn_args: Optional[List[str]] = None, - kms_tls_options: Optional[Mapping[str, Any]] = None + kms_tls_options: Optional[Mapping[str, Any]] = None, ) -> None: """Options to configure automatic client-side field level encryption. @@ -149,7 +151,8 @@ def __init__(self, raise ConfigurationError( "client side encryption requires the pymongocrypt library: " "install a compatible version with: " - "python -m pip install 'pymongo[encryption]'") + "python -m pip install 'pymongo[encryption]'" + ) self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace @@ -160,12 +163,11 @@ def __init__(self, self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn self._mongocryptd_spawn_path = mongocryptd_spawn_path if mongocryptd_spawn_args is None: - mongocryptd_spawn_args = ['--idleShutdownTimeoutSecs=60'] + mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): - raise TypeError('mongocryptd_spawn_args must be a list') - if not any('idleShutdownTimeoutSecs' in s - for s in self._mongocryptd_spawn_args): - self._mongocryptd_spawn_args.append('--idleShutdownTimeoutSecs=60') + raise TypeError("mongocryptd_spawn_args must be a list") + if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): + self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) diff --git a/pymongo/errors.py b/pymongo/errors.py index 89c45730c9..a98a5a7fb8 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -13,8 +13,7 @@ # limitations under the License. """Exceptions raised by PyMongo.""" -from typing import (Any, Iterable, List, Mapping, Optional, Sequence, Tuple, - Union) +from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Union from bson.errors import * @@ -25,16 +24,15 @@ try: from ssl import CertificateError as _CertificateError except ImportError: + class _CertificateError(ValueError): # type: ignore pass class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" - def __init__(self, - message: str = '', - error_labels: Optional[Iterable[str]] = None - ) -> None: + + def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: super(PyMongoError, self).__init__(message) self._message = message self._error_labels = set(error_labels or []) @@ -75,17 +73,17 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ + errors: Union[Mapping[str, Any], Sequence] details: Union[Mapping[str, Any], Sequence] - def __init__(self, - message: str = '', - errors: Optional[Union[Mapping[str, Any], Sequence]] = None + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence]] = None ) -> None: error_labels = None if errors is not None: if isinstance(errors, dict): - error_labels = errors.get('errorLabels') + error_labels = errors.get("errorLabels") super(AutoReconnect, self).__init__(message, error_labels) self.errors = self.details = errors or [] @@ -121,12 +119,13 @@ class NotPrimaryError(AutoReconnect): .. versionadded:: 3.12 """ - def __init__(self, - message: str = '', - errors: Optional[Union[Mapping[str, Any], List]] = None + + def __init__( + self, message: str = "", errors: Optional[Union[Mapping[str, Any], List]] = None ) -> None: super(NotPrimaryError, self).__init__( - _format_detailed_error(message, errors), errors=errors) + _format_detailed_error(message, errors), errors=errors + ) class ServerSelectionTimeoutError(AutoReconnect): @@ -143,8 +142,7 @@ class ServerSelectionTimeoutError(AutoReconnect): class ConfigurationError(PyMongoError): - """Raised when something is incorrectly configured. - """ + """Raised when something is incorrectly configured.""" class OperationFailure(PyMongoError): @@ -154,7 +152,8 @@ class OperationFailure(PyMongoError): The :attr:`details` attribute. """ - def __init__(self, + def __init__( + self, error: str, code: Optional[int] = None, details: Optional[Mapping[str, Any]] = None, @@ -162,9 +161,10 @@ def __init__(self, ) -> None: error_labels = None if details is not None: - error_labels = details.get('errorLabels') + error_labels = details.get("errorLabels") super(OperationFailure, self).__init__( - _format_detailed_error(error, details), error_labels=error_labels) + _format_detailed_error(error, details), error_labels=error_labels + ) self.__code = code self.__details = details self.__max_wire_version = max_wire_version @@ -175,8 +175,7 @@ def _max_wire_version(self): @property def code(self) -> Optional[int]: - """The error code returned by the server, if any. - """ + """The error code returned by the server, if any.""" return self.__code @property @@ -192,7 +191,6 @@ def details(self) -> Optional[Mapping[str, Any]]: return self.__details - class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is invalidated on the server. @@ -245,17 +243,16 @@ class BulkWriteError(OperationFailure): .. versionadded:: 2.7 """ + details: Mapping[str, Any] def __init__(self, results: Mapping[str, Any]) -> None: - super(BulkWriteError, self).__init__( - "batch op errors occurred", 65, results) + super(BulkWriteError, self).__init__("batch op errors occurred", 65, results) def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) - class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -273,8 +270,8 @@ class InvalidURI(ConfigurationError): class DocumentTooLarge(InvalidDocument): - """Raised when an encoded document is too large for the connected server. - """ + """Raised when an encoded document is too large for the connected server.""" + pass @@ -298,6 +295,6 @@ def cause(self) -> Exception: class _OperationCancelled(AutoReconnect): - """Internal error raised when a socket operation is cancelled. - """ + """Internal error raised when a socket operation is cancelled.""" + pass diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index f0857f8f45..0b92d9fa2b 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -40,22 +40,29 @@ class CommandLogger(monitoring.CommandListener): logs them at the `INFO` severity level using :mod:`logging`. .. versionadded:: 3.11 """ + def started(self, event: monitoring.CommandStartedEvent) -> None: - logging.info("Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event)) + logging.info( + "Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event) + ) def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: - logging.info("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event)) + logging.info( + "Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event) + ) def failed(self, event: monitoring.CommandFailedEvent) -> None: - logging.info("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event)) + logging.info( + "Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event) + ) class ServerLogger(monitoring.ServerListener): @@ -68,9 +75,9 @@ class ServerLogger(monitoring.ServerListener): .. versionadded:: 3.11 """ + def opened(self, event: monitoring.ServerOpeningEvent) -> None: - logging.info("Server {0.server_address} added to topology " - "{0.topology_id}".format(event)) + logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type @@ -80,11 +87,13 @@ def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) - logging.info( "Server {0.server_address} changed type from " "{0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event)) + "{0.new_description.server_type_name}".format(event) + ) def closed(self, event: monitoring.ServerClosedEvent) -> None: - logging.warning("Server {0.server_address} removed from topology " - "{0.topology_id}".format(event)) + logging.warning( + "Server {0.server_address} removed from topology " "{0.topology_id}".format(event) + ) class HeartbeatLogger(monitoring.ServerHeartbeatListener): @@ -97,19 +106,22 @@ class HeartbeatLogger(monitoring.ServerHeartbeatListener): .. versionadded:: 3.11 """ + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: - logging.info("Heartbeat sent to server " - "{0.connection_id}".format(event)) + logging.info("Heartbeat sent to server " "{0.connection_id}".format(event)) def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: # The reply.document attribute was added in PyMongo 3.4. - logging.info("Heartbeat to server {0.connection_id} " - "succeeded with reply " - "{0.reply.document}".format(event)) + logging.info( + "Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event) + ) def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: - logging.warning("Heartbeat to server {0.connection_id} " - "failed with error {0.reply}".format(event)) + logging.warning( + "Heartbeat to server {0.connection_id} " "failed with error {0.reply}".format(event) + ) class TopologyLogger(monitoring.TopologyListener): @@ -122,13 +134,14 @@ class TopologyLogger(monitoring.TopologyListener): .. versionadded:: 3.11 """ + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: - logging.info("Topology with id {0.topology_id} " - "opened".format(event)) + logging.info("Topology with id {0.topology_id} " "opened".format(event)) def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: - logging.info("Topology description updated for " - "topology id {0.topology_id}".format(event)) + logging.info( + "Topology description updated for " "topology id {0.topology_id}".format(event) + ) previous_topology_type = event.previous_description.topology_type new_topology_type = event.new_description.topology_type if new_topology_type != previous_topology_type: @@ -136,7 +149,8 @@ def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) logging.info( "Topology {0.topology_id} changed type from " "{0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event)) + "{0.new_description.topology_type_name}".format(event) + ) # The has_writable_server and has_readable_server methods # were added in PyMongo 3.4. if not event.new_description.has_writable_server(): @@ -145,8 +159,7 @@ def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) logging.warning("No readable servers available.") def closed(self, event: monitoring.TopologyClosedEvent) -> None: - logging.info("Topology with id {0.topology_id} " - "closed".format(event)) + logging.info("Topology with id {0.topology_id} " "closed".format(event)) class ConnectionPoolLogger(monitoring.ConnectionPoolListener): @@ -166,6 +179,7 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): .. versionadded:: 3.11 """ + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: logging.info("[pool {0.address}] pool created".format(event)) @@ -179,30 +193,41 @@ def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: logging.info("[pool {0.address}] pool closed".format(event)) def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection created".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " "connection created".format(event) + ) def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection setup succeeded".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection setup succeeded".format(event) + ) def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection closed, reason: " - "{0.reason}".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection closed, reason: " + "{0.reason}".format(event) + ) - def connection_check_out_started(self, event: monitoring.ConnectionCheckOutStartedEvent) -> None: - logging.info("[pool {0.address}] connection check out " - "started".format(event)) + def connection_check_out_started( + self, event: monitoring.ConnectionCheckOutStartedEvent + ) -> None: + logging.info("[pool {0.address}] connection check out " "started".format(event)) def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: - logging.info("[pool {0.address}] connection check out " - "failed, reason: {0.reason}".format(event)) + logging.info( + "[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event) + ) def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection checked out of pool".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection checked out of pool".format(event) + ) def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: - logging.info("[pool {0.address}][conn #{0.connection_id}] " - "connection checked into pool".format(event)) + logging.info( + "[pool {0.address}][conn #{0.connection_id}] " + "connection checked into pool".format(event) + ) diff --git a/pymongo/hello.py b/pymongo/hello.py index ba09d80e32..92e9b426c0 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -26,36 +26,36 @@ class HelloCompat: - CMD = 'hello' - LEGACY_CMD = 'ismaster' - PRIMARY = 'isWritablePrimary' - LEGACY_PRIMARY = 'ismaster' - LEGACY_ERROR = 'not master' + CMD = "hello" + LEGACY_CMD = "ismaster" + PRIMARY = "isWritablePrimary" + LEGACY_PRIMARY = "ismaster" + LEGACY_ERROR = "not master" def _get_server_type(doc): """Determine the server type from a hello response.""" - if not doc.get('ok'): + if not doc.get("ok"): return SERVER_TYPE.Unknown - if doc.get('serviceId'): + if doc.get("serviceId"): return SERVER_TYPE.LoadBalancer - elif doc.get('isreplicaset'): + elif doc.get("isreplicaset"): return SERVER_TYPE.RSGhost - elif doc.get('setName'): - if doc.get('hidden'): + elif doc.get("setName"): + if doc.get("hidden"): return SERVER_TYPE.RSOther elif doc.get(HelloCompat.PRIMARY): return SERVER_TYPE.RSPrimary elif doc.get(HelloCompat.LEGACY_PRIMARY): return SERVER_TYPE.RSPrimary - elif doc.get('secondary'): + elif doc.get("secondary"): return SERVER_TYPE.RSSecondary - elif doc.get('arbiterOnly'): + elif doc.get("arbiterOnly"): return SERVER_TYPE.RSArbiter else: return SERVER_TYPE.RSOther - elif doc.get('msg') == 'isdbgrid': + elif doc.get("msg") == "isdbgrid": return SERVER_TYPE.Mongos else: return SERVER_TYPE.Standalone @@ -66,8 +66,8 @@ class Hello(Generic[_DocumentType]): .. versionadded:: 3.12 """ - __slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable', - '_awaitable') + + __slots__ = ("_doc", "_server_type", "_is_writable", "_is_readable", "_awaitable") def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: self._server_type = _get_server_type(doc) @@ -76,11 +76,10 @@ def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: SERVER_TYPE.RSPrimary, SERVER_TYPE.Standalone, SERVER_TYPE.Mongos, - SERVER_TYPE.LoadBalancer) + SERVER_TYPE.LoadBalancer, + ) - self._is_readable = ( - self.server_type == SERVER_TYPE.RSSecondary - or self._is_writable) + self._is_readable = self.server_type == SERVER_TYPE.RSSecondary or self._is_writable self._awaitable = awaitable @property @@ -98,64 +97,70 @@ def server_type(self) -> int: @property def all_hosts(self) -> Set[Tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" - return set(map(common.clean_node, itertools.chain( - self._doc.get('hosts', []), - self._doc.get('passives', []), - self._doc.get('arbiters', [])))) + return set( + map( + common.clean_node, + itertools.chain( + self._doc.get("hosts", []), + self._doc.get("passives", []), + self._doc.get("arbiters", []), + ), + ) + ) @property def tags(self) -> Mapping[str, Any]: """Replica set member tags or empty dict.""" - return self._doc.get('tags', {}) + return self._doc.get("tags", {}) @property def primary(self) -> Optional[Tuple[str, int]]: """This server's opinion about who the primary is, or None.""" - if self._doc.get('primary'): - return common.partition_node(self._doc['primary']) + if self._doc.get("primary"): + return common.partition_node(self._doc["primary"]) else: return None @property def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" - return self._doc.get('setName') + return self._doc.get("setName") @property def max_bson_size(self) -> int: - return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE) + return self._doc.get("maxBsonObjectSize", common.MAX_BSON_SIZE) @property def max_message_size(self) -> int: - return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size) + return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) @property def max_write_batch_size(self) -> int: - return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE) + return self._doc.get("maxWriteBatchSize", common.MAX_WRITE_BATCH_SIZE) @property def min_wire_version(self) -> int: - return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION) + return self._doc.get("minWireVersion", common.MIN_WIRE_VERSION) @property def max_wire_version(self) -> int: - return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION) + return self._doc.get("maxWireVersion", common.MAX_WIRE_VERSION) @property def set_version(self) -> Optional[int]: - return self._doc.get('setVersion') + return self._doc.get("setVersion") @property def election_id(self) -> Optional[ObjectId]: - return self._doc.get('electionId') + return self._doc.get("electionId") @property def cluster_time(self) -> Optional[Mapping[str, Any]]: - return self._doc.get('$clusterTime') + return self._doc.get("$clusterTime") @property def logical_session_timeout_minutes(self) -> Optional[int]: - return self._doc.get('logicalSessionTimeoutMinutes') + return self._doc.get("logicalSessionTimeoutMinutes") @property def is_writable(self) -> bool: @@ -167,18 +172,18 @@ def is_readable(self) -> bool: @property def me(self) -> Optional[Tuple[str, int]]: - me = self._doc.get('me') + me = self._doc.get("me") if me: return common.clean_node(me) return None @property def last_write_date(self) -> Optional[datetime.datetime]: - return self._doc.get('lastWrite', {}).get('lastWriteDate') + return self._doc.get("lastWrite", {}).get("lastWriteDate") @property def compressors(self) -> Optional[List[str]]: - return self._doc.get('compression') + return self._doc.get("compression") @property def sasl_supported_mechs(self) -> List[str]: @@ -190,16 +195,16 @@ def sasl_supported_mechs(self) -> List[str]: ["SCRAM-SHA-1", "SCRAM-SHA-256"] """ - return self._doc.get('saslSupportedMechs', []) + return self._doc.get("saslSupportedMechs", []) @property def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: """The speculativeAuthenticate field.""" - return self._doc.get('speculativeAuthenticate') + return self._doc.get("speculativeAuthenticate") @property def topology_version(self) -> Optional[Mapping[str, Any]]: - return self._doc.get('topologyVersion') + return self._doc.get("topologyVersion") @property def awaitable(self) -> bool: @@ -207,8 +212,8 @@ def awaitable(self) -> bool: @property def service_id(self) -> Optional[ObjectId]: - return self._doc.get('serviceId') + return self._doc.get("serviceId") @property def hello_ok(self) -> bool: - return self._doc.get('helloOk', False) + return self._doc.get("helloOk", False) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index b2726dca6b..f12c1e1655 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -21,36 +21,51 @@ from bson.son import SON from pymongo import ASCENDING -from pymongo.errors import (CursorNotFound, DuplicateKeyError, - ExecutionTimeout, NotPrimaryError, - OperationFailure, WriteConcernError, WriteError, - WTimeoutError) +from pymongo.errors import ( + CursorNotFound, + DuplicateKeyError, + ExecutionTimeout, + NotPrimaryError, + OperationFailure, + WriteConcernError, + WriteError, + WTimeoutError, +) from pymongo.hello import HelloCompat # From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES = frozenset([ - 11600, # InterruptedAtShutdown - 91, # ShutdownInProgress -]) +_SHUTDOWN_CODES = frozenset( + [ + 11600, # InterruptedAtShutdown + 91, # ShutdownInProgress + ] +) # From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_PRIMARY_CODES = frozenset([ - 10058, # LegacyNotPrimary <=3.2 "not primary" error code - 10107, # NotWritablePrimary - 13435, # NotPrimaryNoSecondaryOk - 11602, # InterruptedDueToReplStateChange - 13436, # NotPrimaryOrSecondary - 189, # PrimarySteppedDown -]) | _SHUTDOWN_CODES +_NOT_PRIMARY_CODES = ( + frozenset( + [ + 10058, # LegacyNotPrimary <=3.2 "not primary" error code + 10107, # NotWritablePrimary + 13435, # NotPrimaryNoSecondaryOk + 11602, # InterruptedDueToReplStateChange + 13436, # NotPrimaryOrSecondary + 189, # PrimarySteppedDown + ] + ) + | _SHUTDOWN_CODES +) # From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset([ - 7, # HostNotFound - 6, # HostUnreachable - 89, # NetworkTimeout - 9001, # SocketException - 262, # ExceededTimeLimit -]) +_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset( + [ + 7, # HostNotFound + 6, # HostUnreachable + 89, # NetworkTimeout + 9001, # SocketException + 262, # ExceededTimeLimit + ] +) def _gen_index_name(keys): @@ -71,8 +86,9 @@ def _index_list(key_or_list, direction=None): if isinstance(key_or_list, abc.ItemsView): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): - raise TypeError("if no direction is specified, " - "key_or_list must be an instance of list") + raise TypeError( + "if no direction is specified, " "key_or_list must be an instance of list" + ) return key_or_list @@ -82,44 +98,44 @@ def _index_document(index_list): Takes a list of (key, direction) pairs. """ if isinstance(index_list, abc.Mapping): - raise TypeError("passing a dict to sort/create_index/hint is not " - "allowed - use a list of tuples instead. did you " - "mean %r?" % list(index_list.items())) + raise TypeError( + "passing a dict to sort/create_index/hint is not " + "allowed - use a list of tuples instead. did you " + "mean %r?" % list(index_list.items()) + ) elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, " - "not: " + repr(index_list)) + raise TypeError("must use a list of (key, direction) pairs, " "not: " + repr(index_list)) if not len(index_list): raise ValueError("key_or_list must not be the empty list") index: SON[str, Any] = SON() for (key, value) in index_list: if not isinstance(key, str): - raise TypeError( - "first item in each key pair must be an instance of str") + raise TypeError("first item in each key pair must be an instance of str") if not isinstance(value, (str, int, abc.Mapping)): - raise TypeError("second item in each key pair must be 1, -1, " - "'2d', or another valid MongoDB index specifier.") + raise TypeError( + "second item in each key pair must be 1, -1, " + "'2d', or another valid MongoDB index specifier." + ) index[key] = value return index -def _check_command_response(response, max_wire_version, - allowable_errors=None, - parse_write_concern_error=False): - """Check the response to a command for errors. - """ +def _check_command_response( + response, max_wire_version, allowable_errors=None, parse_write_concern_error=False +): + """Check the response to a command for errors.""" if "ok" not in response: # Server didn't recognize our message as a command. - raise OperationFailure(response.get("$err"), - response.get("code"), - response, - max_wire_version) + raise OperationFailure( + response.get("$err"), response.get("code"), response, max_wire_version + ) - if parse_write_concern_error and 'writeConcernError' in response: + if parse_write_concern_error and "writeConcernError" in response: _error = response["writeConcernError"] _labels = response.get("errorLabels") if _labels: - _error.update({'errorLabels': _labels}) + _error.update({"errorLabels": _labels}) _raise_write_concern_error(_error) if response["ok"]: @@ -176,12 +192,10 @@ def _raise_last_write_error(write_errors): def _raise_write_concern_error(error): - if "errInfo" in error and error["errInfo"].get('wtimeout'): + if "errInfo" in error and error["errInfo"].get("wtimeout"): # Make sure we raise WTimeoutError - raise WTimeoutError( - error.get("errmsg"), error.get("code"), error) - raise WriteConcernError( - error.get("errmsg"), error.get("code"), error) + raise WTimeoutError(error.get("errmsg"), error.get("code"), error) + raise WriteConcernError(error.get("errmsg"), error.get("code"), error) def _get_wce_doc(result): @@ -197,8 +211,7 @@ def _get_wce_doc(result): def _check_write_command_response(result): - """Backward compatibility helper for write command error handling. - """ + """Backward compatibility helper for write command error handling.""" # Prefer write errors over write concern errors write_errors = result.get("writeErrors") if write_errors: @@ -223,12 +236,12 @@ def _fields_list_to_dict(fields, option_name): if isinstance(fields, (abc.Sequence, abc.Set)): if not all(isinstance(field, str) for field in fields): - raise TypeError("%s must be a list of key names, each an " - "instance of str" % (option_name,)) + raise TypeError( + "%s must be a list of key names, each an " "instance of str" % (option_name,) + ) return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or " - "list of key names" % (option_name,)) + raise TypeError("%s must be a mapping or " "list of key names" % (option_name,)) def _handle_exception(): @@ -240,8 +253,7 @@ def _handle_exception(): if sys.stderr: einfo = sys.exc_info() try: - traceback.print_exception(einfo[0], einfo[1], einfo[2], - None, sys.stderr) + traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) except IOError: pass finally: diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 6bc2fe7232..28b0bb615e 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -30,28 +30,27 @@ from pymongo.errors import ConfigurationError from pymongo.server_type import SERVER_TYPE - # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 SMALLEST_MAX_STALENESS = 90 -def _validate_max_staleness(max_staleness, - heartbeat_frequency): +def _validate_max_staleness(max_staleness, heartbeat_frequency): # We checked for max staleness -1 before this, it must be positive here. if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: raise ConfigurationError( "maxStalenessSeconds must be at least heartbeatFrequencyMS +" " %d seconds. maxStalenessSeconds is set to %d," - " heartbeatFrequencyMS is set to %d." % ( - IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000)) + " heartbeatFrequencyMS is set to %d." + % (IDLE_WRITE_PERIOD, max_staleness, heartbeat_frequency * 1000) + ) if max_staleness < SMALLEST_MAX_STALENESS: raise ConfigurationError( "maxStalenessSeconds must be at least %d. " - "maxStalenessSeconds is set to %d." % ( - SMALLEST_MAX_STALENESS, max_staleness)) + "maxStalenessSeconds is set to %d." % (SMALLEST_MAX_STALENESS, max_staleness) + ) def _with_primary(max_staleness, selection): @@ -63,9 +62,10 @@ def _with_primary(max_staleness, selection): if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. staleness = ( - (s.last_update_time - s.last_write_date) - - (primary.last_update_time - primary.last_write_date) + - selection.heartbeat_frequency) + (s.last_update_time - s.last_write_date) + - (primary.last_update_time - primary.last_write_date) + + selection.heartbeat_frequency + ) if staleness <= max_staleness: sds.append(s) @@ -88,9 +88,7 @@ def _no_primary(max_staleness, selection): for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. - staleness = (smax.last_write_date - - s.last_write_date + - selection.heartbeat_frequency) + staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency if staleness <= max_staleness: sds.append(s) diff --git a/pymongo/message.py b/pymongo/message.py index ac6000cfd2..18cf0a6bf3 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -27,25 +27,27 @@ from typing import Any import bson -from bson import (CodecOptions, - encode, - _decode_selective, - _dict_to_bson, - _make_c_string) +from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode from bson.int64 import Int64 -from bson.raw_bson import (DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, - _inflate_bson) +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON try: from pymongo import _cmessage # type: ignore[attr-defined] + _use_c = True except ImportError: _use_c = False -from pymongo.errors import (ConfigurationError, CursorNotFound, - DocumentTooLarge, ExecutionTimeout, - InvalidOperation, NotPrimaryError, - OperationFailure, ProtocolError) +from pymongo.errors import ( + ConfigurationError, + CursorNotFound, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + ProtocolError, +) from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -60,26 +62,21 @@ _UPDATE = 1 _DELETE = 2 -_EMPTY = b'' -_BSONOBJ = b'\x03' -_ZERO_8 = b'\x00' -_ZERO_16 = b'\x00\x00' -_ZERO_32 = b'\x00\x00\x00\x00' -_ZERO_64 = b'\x00\x00\x00\x00\x00\x00\x00\x00' -_SKIPLIM = b'\x00\x00\x00\x00\xff\xff\xff\xff' +_EMPTY = b"" +_BSONOBJ = b"\x03" +_ZERO_8 = b"\x00" +_ZERO_16 = b"\x00\x00" +_ZERO_32 = b"\x00\x00\x00\x00" +_ZERO_64 = b"\x00\x00\x00\x00\x00\x00\x00\x00" +_SKIPLIM = b"\x00\x00\x00\x00\xff\xff\xff\xff" _OP_MAP = { - _INSERT: b'\x04documents\x00\x00\x00\x00\x00', - _UPDATE: b'\x04updates\x00\x00\x00\x00\x00', - _DELETE: b'\x04deletes\x00\x00\x00\x00\x00', -} -_FIELD_MAP = { - 'insert': 'documents', - 'update': 'updates', - 'delete': 'deletes' + _INSERT: b"\x04documents\x00\x00\x00\x00\x00", + _UPDATE: b"\x04updates\x00\x00\x00\x00\x00", + _DELETE: b"\x04deletes\x00\x00\x00\x00\x00", } +_FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions( - unicode_decode_error_handler='replace') +_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions(unicode_decode_error_handler="replace") def _randint(): @@ -96,9 +93,7 @@ def _maybe_add_read_preference(spec, read_preference): # for maximum backwards compatibility, don't add $readPreference for # secondaryPreferred unless tags or maxStalenessSeconds are in use (setting # the secondaryOkay bit has the same effect). - if mode and ( - mode != ReadPreference.SECONDARY_PREFERRED.mode or - len(document) > 1): + if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): if "$query" not in spec: spec = SON([("$query", spec)]) spec["$readPreference"] = document @@ -107,8 +102,7 @@ def _maybe_add_read_preference(spec, read_preference): def _convert_exception(exception): """Convert an Exception into a failure document for publishing.""" - return {'errmsg': str(exception), - 'errtype': exception.__class__.__name__} + return {"errmsg": str(exception), "errtype": exception.__class__.__name__} def _convert_write_result(operation, command, result): @@ -121,21 +115,17 @@ def _convert_write_result(operation, command, result): if errmsg: # The write was successful on at least the primary so don't return. if result.get("wtimeout"): - res["writeConcernError"] = {"errmsg": errmsg, - "code": 64, - "errInfo": {"wtimeout": True}} + res["writeConcernError"] = {"errmsg": errmsg, "code": 64, "errInfo": {"wtimeout": True}} else: # The write failed. - error = {"index": 0, - "code": result.get("code", 8), - "errmsg": errmsg} + error = {"index": 0, "code": result.get("code", 8), "errmsg": errmsg} if "errInfo" in result: error["errInfo"] = result["errInfo"] res["writeErrors"] = [error] return res if operation == "insert": # GLE result for insert is always 0 in most MongoDB versions. - res["n"] = len(command['documents']) + res["n"] = len(command["documents"]) elif operation == "update": if "upserted" in result: res["upserted"] = [{"index": 0, "_id": result["upserted"]}] @@ -144,102 +134,149 @@ def _convert_write_result(operation, command, result): elif result.get("updatedExisting") is False and affected == 1: # If _id is in both the update document *and* the query spec # the update document _id takes precedence. - update = command['updates'][0] + update = command["updates"][0] _id = update["u"].get("_id", update["q"].get("_id")) res["upserted"] = [{"index": 0, "_id": _id}] return res -_OPTIONS = SON([ - ('tailable', 2), - ('oplogReplay', 8), - ('noCursorTimeout', 16), - ('awaitData', 32), - ('allowPartialResults', 128)]) - - -_MODIFIERS = SON([ - ('$query', 'filter'), - ('$orderby', 'sort'), - ('$hint', 'hint'), - ('$comment', 'comment'), - ('$maxScan', 'maxScan'), - ('$maxTimeMS', 'maxTimeMS'), - ('$max', 'max'), - ('$min', 'min'), - ('$returnKey', 'returnKey'), - ('$showRecordId', 'showRecordId'), - ('$showDiskLoc', 'showRecordId'), # <= MongoDb 3.0 - ('$snapshot', 'snapshot')]) - - -def _gen_find_command(coll, spec, projection, skip, limit, batch_size, options, - read_concern, collation=None, session=None, - allow_disk_use=None): +_OPTIONS = SON( + [ + ("tailable", 2), + ("oplogReplay", 8), + ("noCursorTimeout", 16), + ("awaitData", 32), + ("allowPartialResults", 128), + ] +) + + +_MODIFIERS = SON( + [ + ("$query", "filter"), + ("$orderby", "sort"), + ("$hint", "hint"), + ("$comment", "comment"), + ("$maxScan", "maxScan"), + ("$maxTimeMS", "maxTimeMS"), + ("$max", "max"), + ("$min", "min"), + ("$returnKey", "returnKey"), + ("$showRecordId", "showRecordId"), + ("$showDiskLoc", "showRecordId"), # <= MongoDb 3.0 + ("$snapshot", "snapshot"), + ] +) + + +def _gen_find_command( + coll, + spec, + projection, + skip, + limit, + batch_size, + options, + read_concern, + collation=None, + session=None, + allow_disk_use=None, +): """Generate a find command document.""" - cmd = SON([('find', coll)]) - if '$query' in spec: - cmd.update([(_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) - for key, val in spec.items()]) - if '$explain' in cmd: - cmd.pop('$explain') - if '$readPreference' in cmd: - cmd.pop('$readPreference') + cmd = SON([("find", coll)]) + if "$query" in spec: + cmd.update( + [ + (_MODIFIERS[key], val) if key in _MODIFIERS else (key, val) + for key, val in spec.items() + ] + ) + if "$explain" in cmd: + cmd.pop("$explain") + if "$readPreference" in cmd: + cmd.pop("$readPreference") else: - cmd['filter'] = spec + cmd["filter"] = spec if projection: - cmd['projection'] = projection + cmd["projection"] = projection if skip: - cmd['skip'] = skip + cmd["skip"] = skip if limit: - cmd['limit'] = abs(limit) + cmd["limit"] = abs(limit) if limit < 0: - cmd['singleBatch'] = True + cmd["singleBatch"] = True if batch_size: - cmd['batchSize'] = batch_size + cmd["batchSize"] = batch_size if read_concern.level and not (session and session.in_transaction): - cmd['readConcern'] = read_concern.document + cmd["readConcern"] = read_concern.document if collation: - cmd['collation'] = collation + cmd["collation"] = collation if allow_disk_use is not None: - cmd['allowDiskUse'] = allow_disk_use + cmd["allowDiskUse"] = allow_disk_use if options: - cmd.update([(opt, True) - for opt, val in _OPTIONS.items() - if options & val]) + cmd.update([(opt, True) for opt, val in _OPTIONS.items() if options & val]) return cmd def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms): """Generate a getMore command document.""" - cmd = SON([('getMore', cursor_id), - ('collection', coll)]) + cmd = SON([("getMore", cursor_id), ("collection", coll)]) if batch_size: - cmd['batchSize'] = batch_size + cmd["batchSize"] = batch_size if max_await_time_ms is not None: - cmd['maxTimeMS'] = max_await_time_ms + cmd["maxTimeMS"] = max_await_time_ms return cmd class _Query(object): """A query operation.""" - __slots__ = ('flags', 'db', 'coll', 'ntoskip', 'spec', - 'fields', 'codec_options', 'read_preference', 'limit', - 'batch_size', 'name', 'read_concern', 'collation', - 'session', 'client', 'allow_disk_use', '_as_command', - 'exhaust') + __slots__ = ( + "flags", + "db", + "coll", + "ntoskip", + "spec", + "fields", + "codec_options", + "read_preference", + "limit", + "batch_size", + "name", + "read_concern", + "collation", + "session", + "client", + "allow_disk_use", + "_as_command", + "exhaust", + ) # For compatibility with the _GetMore class. sock_mgr = None cursor_id = None - def __init__(self, flags, db, coll, ntoskip, spec, fields, - codec_options, read_preference, limit, - batch_size, read_concern, collation, session, client, - allow_disk_use, exhaust): + def __init__( + self, + flags, + db, + coll, + ntoskip, + spec, + fields, + codec_options, + read_preference, + limit, + batch_size, + read_concern, + collation, + session, + client, + allow_disk_use, + exhaust, + ): self.flags = flags self.db = db self.coll = coll @@ -255,7 +292,7 @@ def __init__(self, flags, db, coll, ntoskip, spec, fields, self.session = session self.client = client self.allow_disk_use = allow_disk_use - self.name = 'find' + self.name = "find" self._as_command = None self.exhaust = exhaust @@ -271,10 +308,10 @@ def use_command(self, sock_info): use_find_cmd = True elif not self.read_concern.ok_for_legacy: raise ConfigurationError( - 'read concern level of %s is not valid ' - 'with a max wire version of %d.' - % (self.read_concern.level, - sock_info.max_wire_version)) + "read concern level of %s is not valid " + "with a max wire version of %d." + % (self.read_concern.level, sock_info.max_wire_version) + ) sock_info.validate_session(self.client, self.session) return use_find_cmd @@ -286,14 +323,23 @@ def as_command(self, sock_info): if self._as_command is not None: return self._as_command - explain = '$explain' in self.spec + explain = "$explain" in self.spec cmd = _gen_find_command( - self.coll, self.spec, self.fields, self.ntoskip, - self.limit, self.batch_size, self.flags, self.read_concern, - self.collation, self.session, self.allow_disk_use) + self.coll, + self.spec, + self.fields, + self.ntoskip, + self.limit, + self.batch_size, + self.flags, + self.read_concern, + self.collation, + self.session, + self.allow_disk_use, + ) if explain: - self.name = 'explain' - cmd = SON([('explain', cmd)]) + self.name = "explain" + cmd = SON([("explain", cmd)]) session = self.session sock_info.add_server_api(cmd) if session: @@ -323,8 +369,13 @@ def get_message(self, read_preference, sock_info, use_cmd=False): if use_cmd: spec = self.as_command(sock_info)[0] request_id, msg, size, _ = _op_msg( - 0, spec, self.db, read_preference, self.codec_options, - ctx=sock_info.compression_context) + 0, + spec, + self.db, + read_preference, + self.codec_options, + ctx=sock_info.compression_context, + ) return request_id, msg, size # OP_QUERY treats ntoreturn of -1 and 1 the same, return @@ -340,23 +391,52 @@ def get_message(self, read_preference, sock_info, use_cmd=False): if sock_info.is_mongos: spec = _maybe_add_read_preference(spec, read_preference) - return _query(flags, ns, self.ntoskip, ntoreturn, - spec, None if use_cmd else self.fields, - self.codec_options, ctx=sock_info.compression_context) + return _query( + flags, + ns, + self.ntoskip, + ntoreturn, + spec, + None if use_cmd else self.fields, + self.codec_options, + ctx=sock_info.compression_context, + ) class _GetMore(object): """A getmore operation.""" - __slots__ = ('db', 'coll', 'ntoreturn', 'cursor_id', 'max_await_time_ms', - 'codec_options', 'read_preference', 'session', 'client', - 'sock_mgr', '_as_command', 'exhaust') - - name = 'getMore' - - def __init__(self, db, coll, ntoreturn, cursor_id, codec_options, - read_preference, session, client, max_await_time_ms, - sock_mgr, exhaust): + __slots__ = ( + "db", + "coll", + "ntoreturn", + "cursor_id", + "max_await_time_ms", + "codec_options", + "read_preference", + "session", + "client", + "sock_mgr", + "_as_command", + "exhaust", + ) + + name = "getMore" + + def __init__( + self, + db, + coll, + ntoreturn, + cursor_id, + codec_options, + read_preference, + session, + client, + max_await_time_ms, + sock_mgr, + exhaust, + ): self.db = db self.coll = coll self.ntoreturn = ntoreturn @@ -390,9 +470,9 @@ def as_command(self, sock_info): if self._as_command is not None: return self._as_command - cmd = _gen_get_more_command(self.cursor_id, self.coll, - self.ntoreturn, - self.max_await_time_ms) + cmd = _gen_get_more_command( + self.cursor_id, self.coll, self.ntoreturn, self.max_await_time_ms + ) if self.session: self.session._apply_to(cmd, False, self.read_preference, sock_info) @@ -418,8 +498,8 @@ def get_message(self, dummy0, sock_info, use_cmd=False): else: flags = 0 request_id, msg, size, _ = _op_msg( - flags, spec, self.db, None, self.codec_options, - ctx=sock_info.compression_context) + flags, spec, self.db, None, self.codec_options, ctx=sock_info.compression_context + ) return request_id, msg, size return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) @@ -451,6 +531,7 @@ def use_command(self, sock_info): class _CursorAddress(tuple): """The server address (host, port) of a cursor, with namespace property.""" + __namespace: Any def __new__(cls, address, namespace): @@ -470,8 +551,7 @@ def __hash__(self): def __eq__(self, other): if isinstance(other, _CursorAddress): - return (tuple(self) == tuple(other) - and self.namespace == other.namespace) + return tuple(self) == tuple(other) and self.namespace == other.namespace return NotImplemented def __ne__(self, other): @@ -481,19 +561,21 @@ def __ne__(self, other): _pack_compression_header = struct.Struct(" max_message_size)) + doc_too_large = idx == 0 and (new_message_size > max_message_size) # When OP_MSG is used unacknowleged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of # halting the bulk write operation. - unacked_doc_too_large = (not ack and (doc_length > max_bson_size)) + unacked_doc_too_large = not ack and (doc_length > max_bson_size) if doc_too_large or unacked_doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] - _raise_document_too_large( - write_op, len(value), max_bson_size) + _raise_document_too_large(write_op, len(value), max_bson_size) # We have enough data, return this batch. if new_message_size > max_message_size: break @@ -966,37 +1074,31 @@ def _batched_op_msg_impl( return to_send, length -def _encode_batched_op_msg( - operation, command, docs, ack, opts, ctx): +def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx): """Encode the next batched insert, update, or delete operation as OP_MSG. """ buf = _BytesIO() - to_send, _ = _batched_op_msg_impl( - operation, command, docs, ack, opts, ctx, buf) + to_send, _ = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) return buf.getvalue(), to_send + + if _use_c: _encode_batched_op_msg = _cmessage._encode_batched_op_msg -def _batched_op_msg_compressed( - operation, command, docs, ack, opts, ctx): +def _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx): """Create the next batched insert, update, or delete operation with OP_MSG, compressed. """ - data, to_send = _encode_batched_op_msg( - operation, command, docs, ack, opts, ctx) + data, to_send = _encode_batched_op_msg(operation, command, docs, ack, opts, ctx) - request_id, msg = _compress( - 2013, - data, - ctx.sock_info.compression_context) + request_id, msg = _compress(2013, data, ctx.sock_info.compression_context) return request_id, msg, to_send -def _batched_op_msg( - operation, command, docs, ack, opts, ctx): +def _batched_op_msg(operation, command, docs, ack, opts, ctx): """OP_MSG implementation entry point.""" buf = _BytesIO() @@ -1005,8 +1107,7 @@ def _batched_op_msg( # responseTo, opCode buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") - to_send, length = _batched_op_msg_impl( - operation, command, docs, ack, opts, ctx, buf) + to_send, length = _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf) # Header - request id and message length buf.seek(4) @@ -1016,45 +1117,42 @@ def _batched_op_msg( buf.write(_pack_int(length)) return request_id, buf.getvalue(), to_send + + if _use_c: _batched_op_msg = _cmessage._batched_op_msg -def _do_batched_op_msg( - namespace, operation, command, docs, opts, ctx): +def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx): """Create the next batched insert, update, or delete operation using OP_MSG. """ - command['$db'] = namespace.split('.', 1)[0] - if 'writeConcern' in command: - ack = bool(command['writeConcern'].get('w', 1)) + command["$db"] = namespace.split(".", 1)[0] + if "writeConcern" in command: + ack = bool(command["writeConcern"].get("w", 1)) else: ack = True if ctx.sock_info.compression_context: - return _batched_op_msg_compressed( - operation, command, docs, ack, opts, ctx) - return _batched_op_msg( - operation, command, docs, ack, opts, ctx) + return _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx) + return _batched_op_msg(operation, command, docs, ack, opts, ctx) # End OP_MSG ----------------------------------------------------- -def _encode_batched_write_command( - namespace, operation, command, docs, opts, ctx): - """Encode the next batched insert, update, or delete command. - """ +def _encode_batched_write_command(namespace, operation, command, docs, opts, ctx): + """Encode the next batched insert, update, or delete command.""" buf = _BytesIO() - to_send, _ = _batched_write_command_impl( - namespace, operation, command, docs, opts, ctx, buf) + to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf) return buf.getvalue(), to_send + + if _use_c: _encode_batched_write_command = _cmessage._encode_batched_write_command -def _batched_write_command_impl( - namespace, operation, command, docs, opts, ctx, buf): +def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf): """Create a batched OP_QUERY write command.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size @@ -1066,7 +1164,7 @@ def _batched_write_command_impl( # No options buf.write(_ZERO_32) # Namespace as C string - buf.write(namespace.encode('utf8')) + buf.write(namespace.encode("utf8")) buf.write(_ZERO_8) # Skip: 0, Limit: -1 buf.write(_SKIPLIM) @@ -1082,7 +1180,7 @@ def _batched_write_command_impl( try: buf.write(_OP_MAP[operation]) except KeyError: - raise InvalidOperation('Unknown command') + raise InvalidOperation("Unknown command") # Where to write list document length list_start = buf.tell() - 4 @@ -1090,18 +1188,16 @@ def _batched_write_command_impl( idx = 0 for doc in docs: # Encode the current operation - key = str(idx).encode('utf8') + key = str(idx).encode("utf8") value = _dict_to_bson(doc, False, opts) # Is there enough room to add this document? max_cmd_size accounts for # the two trailing null bytes. doc_too_large = len(value) > max_cmd_size if doc_too_large: write_op = list(_FIELD_MAP.keys())[operation] - _raise_document_too_large( - write_op, len(value), max_bson_size) - enough_data = (idx >= 1 and - (buf.tell() + len(key) + len(value)) >= max_split_size) - enough_documents = (idx >= max_write_batch_size) + _raise_document_too_large(write_op, len(value), max_bson_size) + enough_data = idx >= 1 and (buf.tell() + len(key) + len(value)) >= max_split_size + enough_documents = idx >= max_write_batch_size if enough_data or enough_documents: break buf.write(_BSONOBJ) @@ -1170,20 +1266,25 @@ def raw_response(self, cursor_id=None, user_fields=None): raise NotPrimaryError(error_object["$err"], error_object) elif error_object.get("code") == 50: default_msg = "operation exceeded time limit" - raise ExecutionTimeout(error_object.get("$err", default_msg), - error_object.get("code"), - error_object) - raise OperationFailure("database error: %s" % - error_object.get("$err"), - error_object.get("code"), - error_object) + raise ExecutionTimeout( + error_object.get("$err", default_msg), error_object.get("code"), error_object + ) + raise OperationFailure( + "database error: %s" % error_object.get("$err"), + error_object.get("code"), + error_object, + ) if self.documents: return [self.documents] return [] - def unpack_response(self, cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, legacy_response=False): + def unpack_response( + self, + cursor_id=None, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + user_fields=None, + legacy_response=False, + ): """Unpack a response from the database and decode the BSON document(s). Check the response for errors and unpack, returning a dictionary @@ -1202,8 +1303,7 @@ def unpack_response(self, cursor_id=None, self.raw_response(cursor_id) if legacy_response: return bson.decode_all(self.documents, codec_options) - return bson._decode_all_selective( - self.documents, codec_options, user_fields) + return bson._decode_all_selective(self.documents, codec_options, user_fields) def command_response(self, codec_options): """Unpack a command response.""" @@ -1254,13 +1354,17 @@ def raw_response(self, cursor_id=None, user_fields={}): user_fields is used to determine which fields must not be decoded """ inflated_response = _decode_selective( - RawBSONDocument(self.payload_document), user_fields, - DEFAULT_RAW_BSON_OPTIONS) + RawBSONDocument(self.payload_document), user_fields, DEFAULT_RAW_BSON_OPTIONS + ) return [inflated_response] - def unpack_response(self, cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, legacy_response=False): + def unpack_response( + self, + cursor_id=None, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + user_fields=None, + legacy_response=False, + ): """Unpack a OP_MSG command response. :Parameters: @@ -1270,8 +1374,7 @@ def unpack_response(self, cursor_id=None, """ # If _OpMsg is in-use, this cannot be a legacy response. assert not legacy_response - return bson._decode_all_selective( - self.payload_document, codec_options, user_fields) + return bson._decode_all_selective(self.payload_document, codec_options, user_fields) def command_response(self, codec_options): """Unpack a command response.""" @@ -1292,17 +1395,12 @@ def unpack(cls, msg): flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError( - "Unsupported OP_MSG flag checksumPresent: " - "0x%x" % (flags,)) + raise ProtocolError("Unsupported OP_MSG flag checksumPresent: " "0x%x" % (flags,)) if flags ^ cls.MORE_TO_COME: - raise ProtocolError( - "Unsupported OP_MSG flags: 0x%x" % (flags,)) + raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) if first_payload_type != 0: - raise ProtocolError( - "Unsupported OP_MSG payload type: " - "0x%x" % (first_payload_type,)) + raise ProtocolError("Unsupported OP_MSG payload type: " "0x%x" % (first_payload_type,)) if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 975fc87610..3fa2946c7c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -35,23 +35,50 @@ import threading import weakref from collections import defaultdict -from typing import (TYPE_CHECKING, Any, Dict, FrozenSet, Generic, List, - Mapping, Optional, Sequence, Set, Tuple, Type, Union, cast) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + Generic, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, + Type, + Union, + cast, +) import bson -from bson.codec_options import (DEFAULT_CODEC_OPTIONS, CodecOptions, - TypeRegistry) +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.son import SON from bson.timestamp import Timestamp -from pymongo import (client_session, common, database, helpers, message, - periodic_executor, uri_parser) +from pymongo import ( + client_session, + common, + database, + helpers, + message, + periodic_executor, + uri_parser, +) from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.errors import (AutoReconnect, BulkWriteError, ConfigurationError, - ConnectionFailure, InvalidOperation, - NotPrimaryError, OperationFailure, PyMongoError, - ServerSelectionTimeoutError) +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, +) from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector @@ -60,8 +87,12 @@ from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription from pymongo.typings import _CollationIn, _DocumentType, _Pipeline -from pymongo.uri_parser import (_check_options, _handle_option_deprecations, - _handle_security_options, _normalize_options) +from pymongo.uri_parser import ( + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, +) from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern if TYPE_CHECKING: @@ -78,13 +109,15 @@ class MongoClient(common.BaseObject, Generic[_DocumentType]): resources related to this, including background threads for monitoring, and connection pools. """ + HOST = "localhost" PORT = 27017 # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. - _constructor_args = ('document_class', 'tz_aware', 'connect') + _constructor_args = ("document_class", "tz_aware", "connect") - def __init__(self, + def __init__( + self, host: Optional[Union[str, Sequence[str]]] = None, port: Optional[int] = None, document_class: Type[_DocumentType] = dict, @@ -616,13 +649,15 @@ def __init__(self, client.__my_database__ """ - self.__init_kwargs: Dict[str, Any] = {'host': host, - 'port': port, - 'document_class': document_class, - 'tz_aware': tz_aware, - 'connect': connect, - 'type_registry': type_registry, - **kwargs} + self.__init_kwargs: Dict[str, Any] = { + "host": host, + "port": port, + "document_class": document_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } if host is None: host = self.HOST @@ -635,13 +670,13 @@ def __init__(self, # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. - pool_class = kwargs.pop('_pool_class', None) - monitor_class = kwargs.pop('_monitor_class', None) - condition_class = kwargs.pop('_condition_class', None) + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts['document_class'] = document_class + keyword_opts["document_class"] = document_class seeds = set() username = None @@ -652,8 +687,7 @@ def __init__(self, srv_service_name = keyword_opts.get("srvservicename") srv_max_hosts = keyword_opts.get("srvmaxhosts") if len([h for h in host if "/" in h]) > 1: - raise ConfigurationError("host must not contain multiple MongoDB " - "URIs") + raise ConfigurationError("host must not contain multiple MongoDB " "URIs") for entity in host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, @@ -663,12 +697,18 @@ def __init__(self, timeout = keyword_opts.get("connecttimeoutms") if timeout is not None: timeout = common.validate_timeout_or_none_or_zero( - keyword_opts.cased_key("connecttimeoutms"), timeout) + keyword_opts.cased_key("connecttimeoutms"), timeout + ) res = uri_parser.parse_uri( - entity, port, validate=True, warn=True, normalize=False, + entity, + port, + validate=True, + warn=True, + normalize=False, connect_timeout=timeout, srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts) + srv_max_hosts=srv_max_hosts, + ) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -682,19 +722,20 @@ def __init__(self, # Add options with named keyword arguments to the parsed kwarg options. if type_registry is not None: - keyword_opts['type_registry'] = type_registry + keyword_opts["type_registry"] = type_registry if tz_aware is None: - tz_aware = opts.get('tz_aware', False) + tz_aware = opts.get("tz_aware", False) if connect is None: - connect = opts.get('connect', True) - keyword_opts['tz_aware'] = tz_aware - keyword_opts['connect'] = connect + connect = opts.get("connect", True) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect # Handle deprecated options in kwarg options. keyword_opts = _handle_option_deprecations(keyword_opts) # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary(dict(common.validate( - keyword_opts.cased_key(k), v) for k, v in keyword_opts.items())) + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) # Override connection string options with kwarg options. opts.update(keyword_opts) @@ -712,18 +753,19 @@ def __init__(self, # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self.__options = options = ClientOptions( - username, password, dbase, opts) + self.__options = options = ClientOptions(username, password, dbase, opts) self.__default_database_name = dbase self.__lock = threading.Lock() self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners - super(MongoClient, self).__init__(options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern) + super(MongoClient, self).__init__( + options.codec_options, + options.read_preference, + options.write_concern, + options.read_concern, + ) self._topology_settings = TopologySettings( seeds=seeds, @@ -740,7 +782,7 @@ def __init__(self, direct_connection=options.direct_connection, load_balanced=options.load_balanced, srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts + srv_max_hosts=srv_max_hosts, ) self._topology = Topology(self._topology_settings) @@ -756,7 +798,8 @@ def target(): interval=common.KILL_CURSOR_FREQUENCY, min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, - name="pymongo_kill_cursors_thread") + name="pymongo_kill_cursors_thread", + ) # We strongly reference the executor and it weakly references us via # this closure. When the client is freed, stop the executor soon. @@ -769,8 +812,8 @@ def target(): self._encrypter = None if self.__options.auto_encryption_opts: from pymongo.encryption import _Encrypter - self._encrypter = _Encrypter( - self, self.__options.auto_encryption_opts) + + self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) def _duplicate(self, **kwargs): args = self.__init_kwargs.copy() @@ -788,12 +831,12 @@ def _server_property(self, attr_name): the server may change. In such cases, store a local reference to a ServerDescription first, then use its properties. """ - server = self._topology.select_server( - writable_server_selector) + server = self._topology.select_server(writable_server_selector) return getattr(server.description, attr_name) - def watch(self, + def watch( + self, pipeline: Optional[_Pipeline] = None, full_document: Optional[str] = None, resume_after: Optional[Mapping[str, Any]] = None, @@ -889,9 +932,17 @@ def watch(self, https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst """ return ClusterChangeStream( - self.admin, pipeline, full_document, resume_after, max_await_time_ms, - batch_size, collation, start_at_operation_time, session, - start_after) + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + ) @property def topology_description(self) -> TopologyDescription: @@ -916,7 +967,7 @@ def topology_description(self) -> TopologyDescription: return self._topology.description @property - def address(self) -> Optional[Tuple[str, int]]: + def address(self) -> Optional[Tuple[str, int]]: """(host, port) of the current standalone, primary, or mongos, or None. Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if @@ -930,17 +981,22 @@ def address(self) -> Optional[Tuple[str, int]]: .. versionadded:: 3.0 """ topology_type = self._topology._description.topology_type - if (topology_type == TOPOLOGY_TYPE.Sharded and - len(self.topology_description.server_descriptions()) > 1): + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): raise InvalidOperation( 'Cannot use "address" property when load balancing among' - ' mongoses, use "nodes" instead.') - if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded): + ' mongoses, use "nodes" instead.' + ) + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded, + ): return None - return self._server_property('address') + return self._server_property("address") @property def primary(self) -> Optional[Tuple[str, int]]: @@ -987,7 +1043,7 @@ def is_primary(self) -> bool: connection is established or raise ServerSelectionTimeoutError if no server is available. """ - return self._server_property('is_writable') + return self._server_property("is_writable") @property def is_mongos(self) -> bool: @@ -995,7 +1051,7 @@ def is_mongos(self) -> bool: connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available.. """ - return self._server_property('server_type') == SERVER_TYPE.Mongos + return self._server_property("server_type") == SERVER_TYPE.Mongos @property def nodes(self) -> FrozenSet[Tuple[str, Optional[int]]]: @@ -1027,17 +1083,16 @@ def _end_sessions(self, session_ids): try: # Use SocketInfo.command directly to avoid implicitly creating # another session. - with self._socket_for_reads( - ReadPreference.PRIMARY_PREFERRED, - None) as (sock_info, read_pref): + with self._socket_for_reads(ReadPreference.PRIMARY_PREFERRED, None) as ( + sock_info, + read_pref, + ): if not sock_info.supports_sessions: return for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = SON([('endSessions', - session_ids[i:i + common._MAX_END_SESSIONS])]) - sock_info.command( - 'admin', spec, read_preference=read_pref, client=self) + spec = SON([("endSessions", session_ids[i : i + common._MAX_END_SESSIONS])]) + sock_info.command("admin", spec, read_preference=read_pref, client=self) except PyMongoError: # Drivers MUST ignore any errors returned by the endSessions # command. @@ -1091,16 +1146,20 @@ def _get_socket(self, server, session): return with server.get_socket(handler=err_handler) as sock_info: # Pin this session to the selected server or connection. - if (in_txn and server.description.server_type in ( - SERVER_TYPE.Mongos, SERVER_TYPE.LoadBalancer)): + if in_txn and server.description.server_type in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ): session._pin(server, sock_info) err_handler.contribute_socket(sock_info) - if (self._encrypter and - not self._encrypter._bypass_auto_encryption and - sock_info.max_wire_version < 8): + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and sock_info.max_wire_version < 8 + ): raise ConfigurationError( - 'Auto-encryption requires a minimum MongoDB version ' - 'of 4.2') + "Auto-encryption requires a minimum MongoDB version " "of 4.2" + ) yield sock_info def _select_server(self, server_selector, session, address=None): @@ -1123,8 +1182,7 @@ def _select_server(self, server_selector, session, address=None): # We're running a getMore or this session is pinned to a mongos. server = topology.select_server_by_address(address) if not server: - raise AutoReconnect('server %s:%d no longer available' - % address) + raise AutoReconnect("server %s:%d no longer available" % address) else: server = topology.select_server(server_selector) return server @@ -1169,8 +1227,7 @@ def _socket_for_reads(self, read_preference, session): return self._socket_from_server(read_preference, server, session) def _should_pin_cursor(self, session): - return (self.__options.load_balanced and - not (session and session.in_transaction)) + return self.__options.load_balanced and not (session and session.in_transaction) def _run_operation(self, operation, unpack_res, address=None): """Run a _Query/_GetMore operation and return a Response. @@ -1183,24 +1240,28 @@ def _run_operation(self, operation, unpack_res, address=None): """ if operation.sock_mgr: server = self._select_server( - operation.read_preference, operation.session, address=address) + operation.read_preference, operation.session, address=address + ) with operation.sock_mgr.lock: - with _MongoClientErrorHandler( - self, server, operation.session) as err_handler: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: err_handler.contribute_socket(operation.sock_mgr.sock) return server.run_operation( - operation.sock_mgr.sock, operation, True, - self._event_listeners, unpack_res) + operation.sock_mgr.sock, operation, True, self._event_listeners, unpack_res + ) def _cmd(session, server, sock_info, read_preference): return server.run_operation( - sock_info, operation, read_preference, self._event_listeners, - unpack_res) + sock_info, operation, read_preference, self._event_listeners, unpack_res + ) return self._retryable_read( - _cmd, operation.read_preference, operation.session, - address=address, retryable=isinstance(operation, message._Query)) + _cmd, + operation.read_preference, + operation.session, + address=address, + retryable=isinstance(operation, message._Query), + ) def _retry_with_session(self, retryable, func, session, bulk): """Execute an operation with at most one consecutive retries @@ -1210,8 +1271,9 @@ def _retry_with_session(self, retryable, func, session, bulk): Re-raises any exception thrown by func(). """ - retryable = (retryable and self.options.retry_writes - and session and not session.in_transaction) + retryable = ( + retryable and self.options.retry_writes and session and not session.in_transaction + ) return self._retry_internal(retryable, func, session, bulk) def _retry_internal(self, retryable, func, session, bulk): @@ -1222,6 +1284,7 @@ def _retry_internal(self, retryable, func, session, bulk): def is_retrying(): return bulk.retrying if bulk else retrying + # Increment the transaction id up front to ensure any retry attempt # will use the proper txnNumber, even if server or socket selection # fails before the command can be sent. @@ -1234,8 +1297,8 @@ def is_retrying(): try: server = self._select_server(writable_server_selector, session) supports_session = ( - session is not None and - server.description.retryable_writes_supported) + session is not None and server.description.retryable_writes_supported + ) with self._get_socket(server, session) as sock_info: max_wire_version = sock_info.max_wire_version if retryable and not supports_session: @@ -1273,8 +1336,7 @@ def is_retrying(): retrying = True last_error = exc - def _retryable_read(self, func, read_pref, session, address=None, - retryable=True): + def _retryable_read(self, func, read_pref, session, address=None, retryable=True): """Execute an operation with at most one consecutive retries Returns func()'s return value on success. On error retries the same @@ -1282,18 +1344,16 @@ def _retryable_read(self, func, read_pref, session, address=None, Re-raises any exception thrown by func(). """ - retryable = (retryable and - self.options.retry_reads - and not (session and session.in_transaction)) + retryable = ( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) last_error: Optional[Exception] = None retrying = False while True: try: - server = self._select_server( - read_pref, session, address=address) - with self._socket_from_server(read_pref, server, session) as ( - sock_info, read_pref): + server = self._select_server(read_pref, session, address=address) + with self._socket_from_server(read_pref, server, session) as (sock_info, read_pref): if retrying and not retryable: # A retry is not possible because this server does # not support retryable reads, raise the last error. @@ -1343,35 +1403,38 @@ def __hash__(self) -> int: def _repr_helper(self): def option_repr(option, value): """Fix options whose __repr__ isn't usable in a constructor.""" - if option == 'document_class': + if option == "document_class": if value is dict: - return 'document_class=dict' + return "document_class=dict" else: - return 'document_class=%s.%s' % (value.__module__, - value.__name__) + return "document_class=%s.%s" % (value.__module__, value.__name__) if option in common.TIMEOUT_OPTIONS and value is not None: return "%s=%s" % (option, int(value * 1000)) - return '%s=%r' % (option, value) + return "%s=%r" % (option, value) # Host first... - options = ['host=%r' % [ - '%s:%d' % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds]] + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] # ... then everything in self._constructor_args... options.extend( - option_repr(key, self.__options._options[key]) - for key in self._constructor_args) + option_repr(key, self.__options._options[key]) for key in self._constructor_args + ) # ... then everything else. options.extend( option_repr(key, self.__options._options[key]) for key in self.__options._options - if key not in set(self._constructor_args) - and key != 'username' and key != 'password') - return ', '.join(options) + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) def __repr__(self): - return ("MongoClient(%s)" % (self._repr_helper(),)) + return "MongoClient(%s)" % (self._repr_helper(),) def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. @@ -1382,10 +1445,11 @@ def __getattr__(self, name: str) -> database.Database[_DocumentType]: :Parameters: - `name`: the name of the database to get """ - if name.startswith('_'): + if name.startswith("_"): raise AttributeError( "MongoClient has no attribute %r. To access the %s" - " database, use client[%r]." % (name, name, name)) + " database, use client[%r]." % (name, name, name) + ) return self.__getitem__(name) def __getitem__(self, name: str) -> database.Database[_DocumentType]: @@ -1399,8 +1463,9 @@ def __getitem__(self, name: str) -> database.Database[_DocumentType]: """ return database.Database(self, name) - def _cleanup_cursor(self, locks_allowed, cursor_id, address, sock_mgr, - session, explicit_session): + def _cleanup_cursor( + self, locks_allowed, cursor_id, address, sock_mgr, session, explicit_session + ): """Cleanup a cursor from cursor.close() or __del__. This method handles cleanup for Cursors/CommandCursors including any @@ -1421,12 +1486,9 @@ def _cleanup_cursor(self, locks_allowed, cursor_id, address, sock_mgr, # If this is an exhaust cursor and we haven't completely # exhausted the result set we *must* close the socket # to stop the server from sending more data. - sock_mgr.sock.close_socket( - ConnectionClosedReason.ERROR) + sock_mgr.sock.close_socket(ConnectionClosedReason.ERROR) else: - self._close_cursor_now( - cursor_id, address, session=session, - sock_mgr=sock_mgr) + self._close_cursor_now(cursor_id, address, session=session, sock_mgr=sock_mgr) if sock_mgr: sock_mgr.close() else: @@ -1440,8 +1502,7 @@ def _close_cursor_soon(self, cursor_id, address, sock_mgr=None): """Request that a cursor and/or connection be cleaned up soon.""" self.__kill_cursors_queue.append((address, cursor_id, sock_mgr)) - def _close_cursor_now(self, cursor_id, address=None, session=None, - sock_mgr=None): + def _close_cursor_now(self, cursor_id, address=None, session=None, sock_mgr=None): """Send a kill cursors message with the given id. The cursor is closed synchronously on the current thread. @@ -1453,11 +1514,9 @@ def _close_cursor_now(self, cursor_id, address=None, session=None, if sock_mgr: with sock_mgr.lock: # Cursor is pinned to LB outside of a transaction. - self._kill_cursor_impl( - [cursor_id], address, session, sock_mgr.sock) + self._kill_cursor_impl([cursor_id], address, session, sock_mgr.sock) else: - self._kill_cursors( - [cursor_id], address, self._get_topology(), session) + self._kill_cursors([cursor_id], address, self._get_topology(), session) except PyMongoError: # Make another attempt to kill the cursor later. self._close_cursor_soon(cursor_id, address) @@ -1477,8 +1536,8 @@ def _kill_cursors(self, cursor_ids, address, topology, session): def _kill_cursor_impl(self, cursor_ids, address, session, sock_info): namespace = address.namespace - db, coll = namespace.split('.', 1) - spec = SON([('killCursors', coll), ('cursors', cursor_ids)]) + db, coll = namespace.split(".", 1) + spec = SON([("killCursors", coll), ("cursors", cursor_ids)]) sock_info.command(db, spec, session=session, client=self) def _process_kill_cursors(self): @@ -1500,11 +1559,9 @@ def _process_kill_cursors(self): for address, cursor_id, sock_mgr in pinned_cursors: try: - self._cleanup_cursor(True, cursor_id, address, sock_mgr, - None, False) + self._cleanup_cursor(True, cursor_id, address, sock_mgr, None, False) except Exception as exc: - if (isinstance(exc, InvalidOperation) - and self._topology._closed): + if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it # can be caught in _process_periodic_tasks raise @@ -1516,11 +1573,9 @@ def _process_kill_cursors(self): topology = self._get_topology() for address, cursor_ids in address_to_cursor_ids.items(): try: - self._kill_cursors( - cursor_ids, address, topology, session=None) + self._kill_cursors(cursor_ids, address, topology, session=None) except Exception as exc: - if (isinstance(exc, InvalidOperation) and - self._topology._closed): + if isinstance(exc, InvalidOperation) and self._topology._closed: raise else: helpers._handle_exception() @@ -1542,10 +1597,10 @@ def __start_session(self, implicit, **kwargs): # Raises ConfigurationError if sessions are not supported. server_session = self._get_server_session() opts = client_session.SessionOptions(**kwargs) - return client_session.ClientSession( - self, server_session, opts, implicit) + return client_session.ClientSession(self, server_session, opts, implicit) - def start_session(self, + def start_session( + self, causal_consistency: Optional[bool] = None, default_transaction_options: Optional[client_session.TransactionOptions] = None, snapshot: Optional[bool] = False, @@ -1571,7 +1626,8 @@ def start_session(self, False, causal_consistency=causal_consistency, default_transaction_options=default_transaction_options, - snapshot=snapshot) + snapshot=snapshot, + ) def _get_server_session(self): """Internal: start or resume a _ServerSession.""" @@ -1624,23 +1680,21 @@ def _send_cluster_time(self, command, session): topology_time = self._topology.max_cluster_time() session_time = session.cluster_time if session else None if topology_time and session_time: - if topology_time['clusterTime'] > session_time['clusterTime']: + if topology_time["clusterTime"] > session_time["clusterTime"]: cluster_time = topology_time else: cluster_time = session_time else: cluster_time = topology_time or session_time if cluster_time: - command['$clusterTime'] = cluster_time + command["$clusterTime"] = cluster_time def _process_response(self, reply, session): - self._topology.receive_cluster_time(reply.get('$clusterTime')) + self._topology.receive_cluster_time(reply.get("$clusterTime")) if session is not None: session._process_response(reply) - def server_info(self, - session: Optional[client_session.ClientSession] = None - ) -> Dict[str, Any]: + def server_info(self, session: Optional[client_session.ClientSession] = None) -> Dict[str, Any]: """Get information about the MongoDB server we're connected to. :Parameters: @@ -1650,13 +1704,12 @@ def server_info(self, .. versionchanged:: 3.6 Added ``session`` parameter. """ - return self.admin.command("buildinfo", - read_preference=ReadPreference.PRIMARY, - session=session) + return self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ) - def list_databases(self, - session: Optional[client_session.ClientSession] = None, - **kwargs: Any + def list_databases( + self, session: Optional[client_session.ClientSession] = None, **kwargs: Any ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the databases of the connected server. @@ -1686,8 +1739,8 @@ def list_databases(self, } return CommandCursor(admin["$cmd"], cursor, None) - def list_database_names(self, - session: Optional[client_session.ClientSession] = None + def list_database_names( + self, session: Optional[client_session.ClientSession] = None ) -> List[str]: """Get a list of the names of all databases on the connected server. @@ -1697,12 +1750,12 @@ def list_database_names(self, .. versionadded:: 3.6 """ - return [doc["name"] - for doc in self.list_databases(session, nameOnly=True)] + return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] - def drop_database(self, + def drop_database( + self, name_or_database: Union[str, database.Database], - session: Optional[client_session.ClientSession] = None + session: Optional[client_session.ClientSession] = None, ) -> None: """Drop a database. @@ -1733,8 +1786,7 @@ def drop_database(self, name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance " - "of str or a Database") + raise TypeError("name_or_database must be an instance " "of str or a Database") with self._socket_for_writes(session) as sock_info: self[name]._command( @@ -1743,9 +1795,11 @@ def drop_database(self, read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), parse_write_concern_error=True, - session=session) + session=session, + ) - def get_default_database(self, + def get_default_database( + self, default: Optional[str] = None, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -1793,15 +1847,15 @@ def get_default_database(self, Deprecated, use :meth:`get_database` instead. """ if self.__default_database_name is None and default is None: - raise ConfigurationError( - 'No default database name defined or provided.') + raise ConfigurationError("No default database name defined or provided.") name = cast(str, self.__default_database_name or default) return database.Database( - self, name, codec_options, - read_preference, write_concern, read_concern) + self, name, codec_options, read_preference, write_concern, read_concern + ) - def get_database(self, + def get_database( + self, name: Optional[str] = None, codec_options: Optional[CodecOptions] = None, read_preference: Optional[_ServerMode] = None, @@ -1853,19 +1907,21 @@ def get_database(self, """ if name is None: if self.__default_database_name is None: - raise ConfigurationError('No default database defined') + raise ConfigurationError("No default database defined") name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, - write_concern, read_concern) + self, name, codec_options, read_preference, write_concern, read_concern + ) def _database_default_options(self, name): """Get a Database instance with the default settings.""" return self.get_database( - name, codec_options=DEFAULT_CODEC_OPTIONS, + name, + codec_options=DEFAULT_CODEC_OPTIONS, read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN) + write_concern=DEFAULT_WRITE_CONCERN, + ) def __enter__(self) -> "MongoClient[_DocumentType]": return self @@ -1887,7 +1943,7 @@ def _retryable_error_doc(exc): if isinstance(exc, BulkWriteError): # Check the last writeConcernError to determine if this # BulkWriteError is retryable. - wces = exc.details['writeConcernErrors'] + wces = exc.details["writeConcernErrors"] wce = wces[-1] if wces else None return wce if isinstance(exc, (NotPrimaryError, OperationFailure)): @@ -1898,18 +1954,18 @@ def _retryable_error_doc(exc): def _add_retryable_write_error(exc, max_wire_version): doc = _retryable_error_doc(exc) if doc: - code = doc.get('code', 0) + code = doc.get("code", 0) # retryWrites on MMAPv1 should raise an actionable error. - if (code == 20 and - str(exc).startswith("Transaction numbers")): + if code == 20 and str(exc).startswith("Transaction numbers"): errmsg = ( "This MongoDB deployment does not support " "retryable writes. Please add retryWrites=false " - "to your connection string.") + "to your connection string." + ) raise OperationFailure(errmsg, code, exc.details) if max_wire_version >= 9: # In MongoDB 4.4+, the server reports the error labels. - for label in doc.get('errorLabels', []): + for label in doc.get("errorLabels", []): exc._add_error_label(label) else: if code in helpers._RETRYABLE_ERROR_CODES: @@ -1917,16 +1973,23 @@ def _add_retryable_write_error(exc, max_wire_version): # Connection errors are always retryable except NotPrimaryError which is # handled above. - if (isinstance(exc, ConnectionFailure) and - not isinstance(exc, NotPrimaryError)): + if isinstance(exc, ConnectionFailure) and not isinstance(exc, NotPrimaryError): exc._add_error_label("RetryableWriteError") class _MongoClientErrorHandler(object): """Handle errors raised when executing an operation.""" - __slots__ = ('client', 'server_address', 'session', 'max_wire_version', - 'sock_generation', 'completed_handshake', 'service_id', - 'handled') + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) def __init__(self, client, server, session): self.client = client @@ -1960,13 +2023,18 @@ def handle(self, exc_type, exc_val): self.session._server_session.mark_dirty() if issubclass(exc_type, PyMongoError): - if (exc_val.has_error_label("TransientTransactionError") or - exc_val.has_error_label("RetryableWriteError")): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): self.session._unpin() err_ctx = _ErrorContext( - exc_val, self.max_wire_version, self.sock_generation, - self.completed_handshake, self.service_id) + exc_val, + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) self.client._topology.handle_error(self.server_address, err_ctx) def __enter__(self): diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 388ba61687..844ad02262 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -21,8 +21,7 @@ from typing import Any, Mapping, cast from pymongo import common, periodic_executor -from pymongo.errors import (NotPrimaryError, OperationFailure, - _OperationCancelled) +from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage @@ -54,10 +53,8 @@ def target(): return True executor = periodic_executor.PeriodicExecutor( - interval=interval, - min_interval=min_interval, - target=target, - name=name) + interval=interval, min_interval=min_interval, target=target, name=name + ) self._executor = executor @@ -101,12 +98,7 @@ def request_check(self): class Monitor(MonitorBase): - def __init__( - self, - server_description, - topology, - pool, - topology_settings): + def __init__(self, server_description, topology, pool, topology_settings): """Class to monitor a MongoDB server on a background thread. Pass an initial ServerDescription, a Topology, a Pool, and @@ -119,7 +111,8 @@ def __init__( topology, "pymongo_server_monitor_thread", topology_settings.heartbeat_frequency, - common.MIN_HEARTBEAT_INTERVAL) + common.MIN_HEARTBEAT_INTERVAL, + ) self._server_description = server_description self._pool = pool self._settings = topology_settings @@ -128,8 +121,10 @@ def __init__( self._publish = pub and self._listeners.enabled_for_server_heartbeat self._cancel_context = None self._rtt_monitor = _RttMonitor( - topology, topology_settings, topology._create_pool_for_monitor( - server_description.address)) + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) self.heartbeater = None def cancel_check(self): @@ -179,7 +174,8 @@ def _run(self): _sanitize(exc) # Already closed the connection, wait for the next check. self._server_description = ServerDescription( - self._server_description.address, error=exc) + self._server_description.address, error=exc + ) if prev_sd.is_server_type_known: # Immediately retry since we've already waited 500ms to # discover that we've been cancelled. @@ -187,11 +183,14 @@ def _run(self): return # Update the Topology and clear the server pool on error. - self._topology.on_change(self._server_description, - reset_pool=self._server_description.error) - - if (self._server_description.is_server_type_known and - self._server_description.topology_version): + self._topology.on_change( + self._server_description, reset_pool=self._server_description.error + ) + + if ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): self._start_rtt_monitor() # Immediately check for the next streaming response. self._executor.skip_sleep() @@ -215,7 +214,7 @@ def _check_server(self): except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when hello fails. details = cast(Mapping[str, Any], exc.details) - self._topology.receive_cluster_time(details.get('$clusterTime')) + self._topology.receive_cluster_time(details.get("$clusterTime")) raise except ReferenceError: raise @@ -226,8 +225,7 @@ def _check_server(self): duration = time.monotonic() - start if self._publish: awaited = sd.is_server_type_known and sd.topology_version - self._listeners.publish_server_heartbeat_failed( - address, duration, error, awaited) + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) self._reset_connection() if isinstance(error, _OperationCancelled): raise @@ -252,11 +250,11 @@ def _check_once(self): if not response.awaitable: self._rtt_monitor.add_sample(round_trip_time) - sd = ServerDescription(address, response, - self._rtt_monitor.average()) + sd = ServerDescription(address, response, self._rtt_monitor.average()) if self._publish: self._listeners.publish_server_heartbeat_succeeded( - address, round_trip_time, response, response.awaitable) + address, round_trip_time, response, response.awaitable + ) return sd def _check_with_socket(self, conn): @@ -269,13 +267,13 @@ def _check_with_socket(self, conn): if conn.more_to_come: # Read the next streaming hello (MongoDB 4.4+). response = Hello(conn._next_reply(), awaitable=True) - elif (conn.performed_handshake and - self._server_description.topology_version): + elif conn.performed_handshake and self._server_description.topology_version: # Initiate streaming hello (MongoDB 4.4+). response = conn._hello( cluster_time, self._server_description.topology_version, - self._settings.heartbeat_frequency) + self._settings.heartbeat_frequency, + ) else: # New connection handshake or polling hello (MongoDB <4.4). response = conn._hello(cluster_time, None, None) @@ -294,7 +292,8 @@ def __init__(self, topology, topology_settings): topology, "pymongo_srv_polling_thread", common.MIN_SRV_RESCAN_INTERVAL, - topology_settings.heartbeat_frequency) + topology_settings.heartbeat_frequency, + ) self._settings = topology_settings self._seedlist = self._settings._seeds self._fqdn = self._settings.fqdn @@ -315,9 +314,11 @@ def _get_seedlist(self): Returns a list of ServerDescriptions. """ try: - resolver = _SrvResolver(self._fqdn, - self._settings.pool_options.connect_timeout, - self._settings.srv_service_name) + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) seedlist, ttl = resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. @@ -330,8 +331,7 @@ def _get_seedlist(self): self.request_check() return None else: - self._executor.update_interval( - max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) return seedlist @@ -345,7 +345,8 @@ def __init__(self, topology, topology_settings, pool): topology, "pymongo_server_rtt_thread", topology_settings.heartbeat_frequency, - common.MIN_HEARTBEAT_INTERVAL) + common.MIN_HEARTBEAT_INTERVAL, + ) self._pool = pool self._moving_average = MovingAverage() @@ -389,7 +390,7 @@ def _ping(self): """Run a "hello" command and return the RTT.""" with self._pool.get_socket() as sock_info: if self._executor._stopped: - raise Exception('_RttMonitor closed') + raise Exception("_RttMonitor closed") start = time.monotonic() sock_info.hello() return time.monotonic() - start diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 6f57200a3b..6a3ed6d07e 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -194,10 +194,16 @@ def connection_checked_in(self, event): from pymongo.topology_description import TopologyDescription -_Listeners = namedtuple('_Listeners', - ('command_listeners', 'server_listeners', - 'server_heartbeat_listeners', 'topology_listeners', - 'cmap_listeners')) +_Listeners = namedtuple( + "_Listeners", + ( + "command_listeners", + "server_listeners", + "server_heartbeat_listeners", + "topology_listeners", + "cmap_listeners", + ), +) _LISTENERS = _Listeners([], [], [], [], []) @@ -483,10 +489,12 @@ def _validate_event_listeners(option, listeners): raise TypeError("%s must be a list or tuple" % (option,)) for listener in listeners: if not isinstance(listener, _EventListener): - raise TypeError("Listeners for %s must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (option,)) + raise TypeError( + "Listeners for %s must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." % (option,) + ) return listeners @@ -499,10 +507,12 @@ def register(listener: _EventListener) -> None: :class:`TopologyListener`, or :class:`ConnectionPoolListener`. """ if not isinstance(listener, _EventListener): - raise TypeError("Listeners for %s must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (listener,)) + raise TypeError( + "Listeners for %s must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." % (listener,) + ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) if isinstance(listener, ServerHeartbeatListener): @@ -514,19 +524,32 @@ def register(listener: _EventListener) -> None: if isinstance(listener, ConnectionPoolListener): _LISTENERS.cmap_listeners.append(listener) + # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). _SENSITIVE_COMMANDS = set( - ["authenticate", "saslstart", "saslcontinue", "getnonce", "createuser", - "updateuser", "copydbgetnonce", "copydbsaslstart", "copydb"]) + [ + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", + ] +) # The "hello" command is also deemed sensitive when attempting speculative # authentication. def _is_speculative_authenticate(command_name, doc): - if (command_name.lower() in ('hello', HelloCompat.LEGACY_CMD) and - 'speculativeAuthenticate' in doc): + if ( + command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) + and "speculativeAuthenticate" in doc + ): return True return False @@ -534,8 +557,7 @@ def _is_speculative_authenticate(command_name, doc): class _CommandEvent(object): """Base class for command events.""" - __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", - "__service_id") + __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") def __init__( self, @@ -592,6 +614,7 @@ class CommandStartedEvent(_CommandEvent): - `operation_id`: An optional identifier for a series of related events. - `service_id`: The service_id this command was sent to, or ``None``. """ + __slots__ = ("__cmd", "__db") def __init__( @@ -608,10 +631,10 @@ def __init__( # Command name must be first key. command_name = next(iter(command)) super(CommandStartedEvent, self).__init__( - command_name, request_id, connection_id, operation_id, service_id=service_id) + command_name, request_id, connection_id, operation_id, service_id=service_id + ) cmd_name, cmd_doc = command_name.lower(), command[command_name] - if (cmd_name in _SENSITIVE_COMMANDS or - _is_speculative_authenticate(cmd_name, command)): + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): self.__cmd: Mapping[str, Any] = {} else: self.__cmd = command @@ -628,12 +651,14 @@ def database_name(self) -> str: return self.__db def __repr__(self): - return ( - "<%s %s db: %r, command: %r, operation_id: %s, " - "service_id: %s>") % ( - self.__class__.__name__, self.connection_id, - self.database_name, self.command_name, self.operation_id, - self.service_id) + return ("<%s %s db: %r, command: %r, operation_id: %s, " "service_id: %s>") % ( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.service_id, + ) class CommandSucceededEvent(_CommandEvent): @@ -649,6 +674,7 @@ class CommandSucceededEvent(_CommandEvent): - `operation_id`: An optional identifier for a series of related events. - `service_id`: The service_id this command was sent to, or ``None``. """ + __slots__ = ("__duration_micros", "__reply") def __init__( @@ -662,12 +688,11 @@ def __init__( service_id: Optional[ObjectId] = None, ) -> None: super(CommandSucceededEvent, self).__init__( - command_name, request_id, connection_id, operation_id, - service_id=service_id) + command_name, request_id, connection_id, operation_id, service_id=service_id + ) self.__duration_micros = _to_micros(duration) cmd_name = command_name.lower() - if (cmd_name in _SENSITIVE_COMMANDS or - _is_speculative_authenticate(cmd_name, reply)): + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): self.__reply: Mapping[str, Any] = {} else: self.__reply = reply @@ -683,12 +708,14 @@ def reply(self) -> _DocumentOut: return self.__reply def __repr__(self): - return ( - "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "service_id: %s>") % ( - self.__class__.__name__, self.connection_id, - self.command_name, self.operation_id, self.duration_micros, - self.service_id) + return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, " "service_id: %s>") % ( + self.__class__.__name__, + self.connection_id, + self.command_name, + self.operation_id, + self.duration_micros, + self.service_id, + ) class CommandFailedEvent(_CommandEvent): @@ -704,6 +731,7 @@ class CommandFailedEvent(_CommandEvent): - `operation_id`: An optional identifier for a series of related events. - `service_id`: The service_id this command was sent to, or ``None``. """ + __slots__ = ("__duration_micros", "__failure") def __init__( @@ -716,7 +744,9 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, ) -> None: - super(CommandFailedEvent, self).__init__(command_name, request_id, connection_id, operation_id, service_id=service_id) + super(CommandFailedEvent, self).__init__( + command_name, request_id, connection_id, operation_id, service_id=service_id + ) self.__duration_micros = _to_micros(duration) self.__failure = failure @@ -733,14 +763,21 @@ def failure(self) -> _DocumentOut: def __repr__(self): return ( "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "failure: %r, service_id: %s>") % ( - self.__class__.__name__, self.connection_id, self.command_name, - self.operation_id, self.duration_micros, self.failure, - self.service_id) + "failure: %r, service_id: %s>" + ) % ( + self.__class__.__name__, + self.connection_id, + self.command_name, + self.operation_id, + self.duration_micros, + self.failure, + self.service_id, + ) class _PoolEvent(object): """Base class for pool events.""" + __slots__ = ("__address",) def __init__(self, address: _Address) -> None: @@ -754,7 +791,7 @@ def address(self) -> _Address: return self.__address def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__address) + return "%s(%r)" % (self.__class__.__name__, self.__address) class PoolCreatedEvent(_PoolEvent): @@ -766,6 +803,7 @@ class PoolCreatedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = ("__options",) def __init__(self, address: _Address, options: Dict[str, Any]) -> None: @@ -774,13 +812,11 @@ def __init__(self, address: _Address, options: Dict[str, Any]) -> None: @property def options(self) -> Dict[str, Any]: - """Any non-default pool options that were set on this Connection Pool. - """ + """Any non-default pool options that were set on this Connection Pool.""" return self.__options def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.address, self.__options) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__options) class PoolReadyEvent(_PoolEvent): @@ -792,6 +828,7 @@ class PoolReadyEvent(_PoolEvent): .. versionadded:: 4.0 """ + __slots__ = () @@ -805,6 +842,7 @@ class PoolClearedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = ("__service_id",) def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: @@ -822,8 +860,7 @@ def service_id(self) -> Optional[ObjectId]: return self.__service_id def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.address, self.__service_id) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__service_id) class PoolClosedEvent(_PoolEvent): @@ -835,6 +872,7 @@ class PoolClosedEvent(_PoolEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -845,17 +883,17 @@ class ConnectionClosedReason(object): .. versionadded:: 3.9 """ - STALE = 'stale' + STALE = "stale" """The pool was cleared, making the connection no longer valid.""" - IDLE = 'idle' + IDLE = "idle" """The connection became stale by being idle for too long (maxIdleTimeMS). """ - ERROR = 'error' + ERROR = "error" """The connection experienced an error, making it no longer valid.""" - POOL_CLOSED = 'poolClosed' + POOL_CLOSED = "poolClosed" """The pool was closed, making the connection no longer valid.""" @@ -866,13 +904,13 @@ class ConnectionCheckOutFailedReason(object): .. versionadded:: 3.9 """ - TIMEOUT = 'timeout' + TIMEOUT = "timeout" """The connection check out attempt exceeded the specified timeout.""" - POOL_CLOSED = 'poolClosed' + POOL_CLOSED = "poolClosed" """The pool was previously closed, and cannot provide new connections.""" - CONN_ERROR = 'connectionError' + CONN_ERROR = "connectionError" """The connection check out attempt experienced an error while setting up a new connection. """ @@ -880,6 +918,7 @@ class ConnectionCheckOutFailedReason(object): class _ConnectionEvent(object): """Private base class for some connection events.""" + __slots__ = ("__address", "__connection_id") def __init__(self, address: _Address, connection_id: int) -> None: @@ -899,8 +938,7 @@ def connection_id(self) -> int: return self.__connection_id def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.__address, self.__connection_id) + return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__connection_id) class ConnectionCreatedEvent(_ConnectionEvent): @@ -916,6 +954,7 @@ class ConnectionCreatedEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -929,6 +968,7 @@ class ConnectionReadyEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -943,6 +983,7 @@ class ConnectionClosedEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = ("__reason",) def __init__(self, address, connection_id, reason): @@ -959,9 +1000,12 @@ def reason(self): return self.__reason def __repr__(self): - return '%s(%r, %r, %r)' % ( - self.__class__.__name__, self.address, self.connection_id, - self.__reason) + return "%s(%r, %r, %r)" % ( + self.__class__.__name__, + self.address, + self.connection_id, + self.__reason, + ) class ConnectionCheckOutStartedEvent(object): @@ -973,6 +1017,7 @@ class ConnectionCheckOutStartedEvent(object): .. versionadded:: 3.9 """ + __slots__ = ("__address",) def __init__(self, address): @@ -986,7 +1031,7 @@ def address(self): return self.__address def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self.__address) + return "%s(%r)" % (self.__class__.__name__, self.__address) class ConnectionCheckOutFailedEvent(object): @@ -999,6 +1044,7 @@ class ConnectionCheckOutFailedEvent(object): .. versionadded:: 3.9 """ + __slots__ = ("__address", "__reason") def __init__(self, address: _Address, reason: str) -> None: @@ -1022,8 +1068,7 @@ def reason(self) -> str: return self.__reason def __repr__(self): - return '%s(%r, %r)' % ( - self.__class__.__name__, self.__address, self.__reason) + return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__reason) class ConnectionCheckedOutEvent(_ConnectionEvent): @@ -1036,6 +1081,7 @@ class ConnectionCheckedOutEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -1049,6 +1095,7 @@ class ConnectionCheckedInEvent(_ConnectionEvent): .. versionadded:: 3.9 """ + __slots__ = () @@ -1073,7 +1120,10 @@ def topology_id(self) -> ObjectId: def __repr__(self): return "<%s %s topology_id: %s>" % ( - self.__class__.__name__, self.server_address, self.topology_id) + self.__class__.__name__, + self.server_address, + self.topology_id, + ) class ServerDescriptionChangedEvent(_ServerEvent): @@ -1082,9 +1132,14 @@ class ServerDescriptionChangedEvent(_ServerEvent): .. versionadded:: 3.3 """ - __slots__ = ('__previous_description', '__new_description') + __slots__ = ("__previous_description", "__new_description") - def __init__(self, previous_description: "ServerDescription", new_description: "ServerDescription", *args: Any) -> None: + def __init__( + self, + previous_description: "ServerDescription", + new_description: "ServerDescription", + *args: Any + ) -> None: super(ServerDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @@ -1103,8 +1158,11 @@ def new_description(self) -> "ServerDescription": def __repr__(self): return "<%s %s changed from: %s, to: %s>" % ( - self.__class__.__name__, self.server_address, - self.previous_description, self.new_description) + self.__class__.__name__, + self.server_address, + self.previous_description, + self.new_description, + ) class ServerOpeningEvent(_ServerEvent): @@ -1128,7 +1186,7 @@ class ServerClosedEvent(_ServerEvent): class TopologyEvent(object): """Base class for topology description events.""" - __slots__ = ('__topology_id') + __slots__ = "__topology_id" def __init__(self, topology_id: ObjectId) -> None: self.__topology_id = topology_id @@ -1139,8 +1197,7 @@ def topology_id(self) -> ObjectId: return self.__topology_id def __repr__(self): - return "<%s topology_id: %s>" % ( - self.__class__.__name__, self.topology_id) + return "<%s topology_id: %s>" % (self.__class__.__name__, self.topology_id) class TopologyDescriptionChangedEvent(TopologyEvent): @@ -1149,9 +1206,14 @@ class TopologyDescriptionChangedEvent(TopologyEvent): .. versionadded:: 3.3 """ - __slots__ = ('__previous_description', '__new_description') + __slots__ = ("__previous_description", "__new_description") - def __init__(self, previous_description: "TopologyDescription", new_description: "TopologyDescription", *args: Any) -> None: + def __init__( + self, + previous_description: "TopologyDescription", + new_description: "TopologyDescription", + *args: Any + ) -> None: super(TopologyDescriptionChangedEvent, self).__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @@ -1170,8 +1232,11 @@ def new_description(self) -> "TopologyDescription": def __repr__(self): return "<%s topology_id: %s changed from: %s, to: %s>" % ( - self.__class__.__name__, self.topology_id, - self.previous_description, self.new_description) + self.__class__.__name__, + self.topology_id, + self.previous_description, + self.new_description, + ) class TopologyOpenedEvent(TopologyEvent): @@ -1195,7 +1260,7 @@ class TopologyClosedEvent(TopologyEvent): class _ServerHeartbeatEvent(object): """Base class for server heartbeat events.""" - __slots__ = ('__connection_id') + __slots__ = "__connection_id" def __init__(self, connection_id: _Address) -> None: self.__connection_id = connection_id @@ -1225,9 +1290,11 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply', '__awaited') + __slots__ = ("__duration", "__reply", "__awaited") - def __init__(self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False) -> None: + def __init__( + self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + ) -> None: super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply @@ -1255,8 +1322,12 @@ def awaited(self) -> bool: def __repr__(self): return "<%s %s duration: %s, awaited: %s, reply: %s>" % ( - self.__class__.__name__, self.connection_id, - self.duration, self.awaited, self.reply) + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): @@ -1266,9 +1337,11 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ('__duration', '__reply', '__awaited') + __slots__ = ("__duration", "__reply", "__awaited") - def __init__(self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False) -> None: + def __init__( + self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False + ) -> None: super(ServerHeartbeatFailedEvent, self).__init__(connection_id) self.__duration = duration self.__reply = reply @@ -1296,8 +1369,12 @@ def awaited(self) -> bool: def __repr__(self): return "<%s %s duration: %s, awaited: %s, reply: %r>" % ( - self.__class__.__name__, self.connection_id, - self.duration, self.awaited, self.reply) + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) class _EventListeners(object): @@ -1308,6 +1385,7 @@ class _EventListeners(object): :Parameters: - `listeners`: A list of event listeners. """ + def __init__(self, listeners): self.__command_listeners = _LISTENERS.command_listeners[:] self.__server_listeners = _LISTENERS.server_listeners[:] @@ -1329,8 +1407,7 @@ def __init__(self, listeners): self.__cmap_listeners.append(lst) self.__enabled_for_commands = bool(self.__command_listeners) self.__enabled_for_server = bool(self.__server_listeners) - self.__enabled_for_server_heartbeat = bool( - self.__server_heartbeat_listeners) + self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) self.__enabled_for_topology = bool(self.__topology_listeners) self.__enabled_for_cmap = bool(self.__cmap_listeners) @@ -1361,15 +1438,17 @@ def enabled_for_cmap(self): def event_listeners(self): """List of registered event listeners.""" - return (self.__command_listeners + - self.__server_heartbeat_listeners + - self.__server_listeners + - self.__topology_listeners + - self.__cmap_listeners) - - def publish_command_start(self, command, database_name, - request_id, connection_id, op_id=None, - service_id=None): + return ( + self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + + self.__cmap_listeners + ) + + def publish_command_start( + self, command, database_name, request_id, connection_id, op_id=None, service_id=None + ): """Publish a CommandStartedEvent to all command listeners. :Parameters: @@ -1385,18 +1464,25 @@ def publish_command_start(self, command, database_name, if op_id is None: op_id = request_id event = CommandStartedEvent( - command, database_name, request_id, connection_id, op_id, - service_id=service_id) + command, database_name, request_id, connection_id, op_id, service_id=service_id + ) for subscriber in self.__command_listeners: try: subscriber.started(event) except Exception: _handle_exception() - def publish_command_success(self, duration, reply, command_name, - request_id, connection_id, op_id=None, - service_id=None, - speculative_hello=False): + def publish_command_success( + self, + duration, + reply, + command_name, + request_id, + connection_id, + op_id=None, + service_id=None, + speculative_hello=False, + ): """Publish a CommandSucceededEvent to all command listeners. :Parameters: @@ -1417,17 +1503,24 @@ def publish_command_success(self, duration, reply, command_name, # speculativeAuthenticate. reply = {} event = CommandSucceededEvent( - duration, reply, command_name, request_id, connection_id, op_id, - service_id) + duration, reply, command_name, request_id, connection_id, op_id, service_id + ) for subscriber in self.__command_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_command_failure(self, duration, failure, command_name, - request_id, connection_id, op_id=None, - service_id=None): + def publish_command_failure( + self, + duration, + failure, + command_name, + request_id, + connection_id, + op_id=None, + service_id=None, + ): """Publish a CommandFailedEvent to all command listeners. :Parameters: @@ -1444,8 +1537,8 @@ def publish_command_failure(self, duration, failure, command_name, if op_id is None: op_id = request_id event = CommandFailedEvent( - duration, failure, command_name, request_id, connection_id, op_id, - service_id=service_id) + duration, failure, command_name, request_id, connection_id, op_id, service_id=service_id + ) for subscriber in self.__command_listeners: try: subscriber.failed(event) @@ -1466,8 +1559,7 @@ def publish_server_heartbeat_started(self, connection_id): except Exception: _handle_exception() - def publish_server_heartbeat_succeeded(self, connection_id, duration, - reply, awaited): + def publish_server_heartbeat_succeeded(self, connection_id, duration, reply, awaited): """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. @@ -1477,17 +1569,15 @@ def publish_server_heartbeat_succeeded(self, connection_id, duration, resolution for the platform. - `reply`: The command reply. - `awaited`: True if the response was awaited. - """ - event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, - awaited) + """ + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.succeeded(event) except Exception: _handle_exception() - def publish_server_heartbeat_failed(self, connection_id, duration, reply, - awaited): + def publish_server_heartbeat_failed(self, connection_id, duration, reply, awaited): """Publish a ServerHeartbeatFailedEvent to all server heartbeat listeners. @@ -1497,9 +1587,8 @@ def publish_server_heartbeat_failed(self, connection_id, duration, reply, resolution for the platform. - `reply`: The command reply. - `awaited`: True if the response was awaited. - """ - event = ServerHeartbeatFailedEvent(duration, reply, connection_id, - awaited) + """ + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.failed(event) @@ -1536,9 +1625,9 @@ def publish_server_closed(self, server_address, topology_id): except Exception: _handle_exception() - def publish_server_description_changed(self, previous_description, - new_description, server_address, - topology_id): + def publish_server_description_changed( + self, previous_description, new_description, server_address, topology_id + ): """Publish a ServerDescriptionChangedEvent to all server listeners. :Parameters: @@ -1548,9 +1637,9 @@ def publish_server_description_changed(self, previous_description, - `topology_id`: A unique identifier for the topology this server is a part of. """ - event = ServerDescriptionChangedEvent(previous_description, - new_description, server_address, - topology_id) + event = ServerDescriptionChangedEvent( + previous_description, new_description, server_address, topology_id + ) for subscriber in self.__server_listeners: try: subscriber.description_changed(event) @@ -1585,8 +1674,9 @@ def publish_topology_closed(self, topology_id): except Exception: _handle_exception() - def publish_topology_description_changed(self, previous_description, - new_description, topology_id): + def publish_topology_description_changed( + self, previous_description, new_description, topology_id + ): """Publish a TopologyDescriptionChangedEvent to all topology listeners. :Parameters: @@ -1595,8 +1685,7 @@ def publish_topology_description_changed(self, previous_description, - `topology_id`: A unique identifier for the topology this server is a part of. """ - event = TopologyDescriptionChangedEvent(previous_description, - new_description, topology_id) + event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) for subscriber in self.__topology_listeners: try: subscriber.description_changed(event) @@ -1604,8 +1693,7 @@ def publish_topology_description_changed(self, previous_description, _handle_exception() def publish_pool_created(self, address, options): - """Publish a :class:`PoolCreatedEvent` to all pool listeners. - """ + """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" event = PoolCreatedEvent(address, options) for subscriber in self.__cmap_listeners: try: @@ -1614,8 +1702,7 @@ def publish_pool_created(self, address, options): _handle_exception() def publish_pool_ready(self, address): - """Publish a :class:`PoolReadyEvent` to all pool listeners. - """ + """Publish a :class:`PoolReadyEvent` to all pool listeners.""" event = PoolReadyEvent(address) for subscriber in self.__cmap_listeners: try: @@ -1624,8 +1711,7 @@ def publish_pool_ready(self, address): _handle_exception() def publish_pool_cleared(self, address, service_id): - """Publish a :class:`PoolClearedEvent` to all pool listeners. - """ + """Publish a :class:`PoolClearedEvent` to all pool listeners.""" event = PoolClearedEvent(address, service_id) for subscriber in self.__cmap_listeners: try: @@ -1634,8 +1720,7 @@ def publish_pool_cleared(self, address, service_id): _handle_exception() def publish_pool_closed(self, address): - """Publish a :class:`PoolClosedEvent` to all pool listeners. - """ + """Publish a :class:`PoolClosedEvent` to all pool listeners.""" event = PoolClosedEvent(address) for subscriber in self.__cmap_listeners: try: @@ -1655,8 +1740,7 @@ def publish_connection_created(self, address, connection_id): _handle_exception() def publish_connection_ready(self, address, connection_id): - """Publish a :class:`ConnectionReadyEvent` to all connection listeners. - """ + """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" event = ConnectionReadyEvent(address, connection_id) for subscriber in self.__cmap_listeners: try: diff --git a/pymongo/network.py b/pymongo/network.py index 48e5084e31..db952af731 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -24,8 +24,12 @@ from pymongo import helpers, message from pymongo.common import MAX_MESSAGE_SIZE from pymongo.compression_support import _NO_COMPRESSION, decompress -from pymongo.errors import (NotPrimaryError, OperationFailure, ProtocolError, - _OperationCancelled) +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, + ProtocolError, + _OperationCancelled, +) from pymongo.message import _UNPACK_REPLY, _OpMsg from pymongo.monitoring import _is_speculative_authenticate from pymongo.socket_checker import _errno_from_exception @@ -33,18 +37,29 @@ _UNPACK_HEADER = struct.Struct(" max_bson_size): + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: message._raise_document_too_large(name, size, max_bson_size) else: request_id, msg, size = message._query( - 0, ns, 0, -1, spec, None, codec_options, compression_ctx) + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) - if (max_bson_size is not None - and size > max_bson_size + message._COMMAND_OVERHEAD): - message._raise_document_too_large( - name, size, max_bson_size + message._COMMAND_OVERHEAD) + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) if publish: encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start(orig, dbname, request_id, address, - service_id=sock_info.service_id) + listeners.publish_command_start( + orig, dbname, request_id, address, service_id=sock_info.service_id + ) start = datetime.datetime.now() try: @@ -137,15 +150,19 @@ def command(sock_info, dbname, spec, is_mongos, reply = receive_message(sock_info, request_id) sock_info.more_to_come = reply.more_to_come unpacked_docs = reply.unpack_response( - codec_options=codec_options, user_fields=user_fields) + codec_options=codec_options, user_fields=user_fields + ) response_doc = unpacked_docs[0] if client: client._process_response(response_doc, session) if check: helpers._check_command_response( - response_doc, sock_info.max_wire_version, allowable_errors, - parse_write_concern_error=parse_write_concern_error) + response_doc, + sock_info.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) except Exception as exc: if publish: duration = (datetime.datetime.now() - start) + encoding_duration @@ -154,25 +171,31 @@ def command(sock_info, dbname, spec, is_mongos, else: failure = message._convert_exception(exc) listeners.publish_command_failure( - duration, failure, name, request_id, address, - service_id=sock_info.service_id) + duration, failure, name, request_id, address, service_id=sock_info.service_id + ) raise if publish: duration = (datetime.datetime.now() - start) + encoding_duration listeners.publish_command_success( - duration, response_doc, name, request_id, address, + duration, + response_doc, + name, + request_id, + address, service_id=sock_info.service_id, - speculative_hello=speculative_hello) + speculative_hello=speculative_hello, + ) if client and client._encrypter and reply: decrypted = client._encrypter.decrypt(reply.raw_command_response()) - response_doc = _decode_all_selective(decrypted, codec_options, - user_fields)[0] + response_doc = _decode_all_selective(decrypted, codec_options, user_fields)[0] return response_doc + _UNPACK_COMPRESSION_HEADER = struct.Struct(" max_message_size: - raise ProtocolError("Message length (%r) is larger than server max " - "message size (%r)" % (length, max_message_size)) + raise ProtocolError( + "Message length (%r) is larger than server max " + "message size (%r)" % (length, max_message_size) + ) if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - _receive_data_on_socket(sock_info, 9, deadline)) - data = decompress( - _receive_data_on_socket(sock_info, length - 25, deadline), - compressor_id) + _receive_data_on_socket(sock_info, 9, deadline) + ) + data = decompress(_receive_data_on_socket(sock_info, length - 25, deadline), compressor_id) else: data = _receive_data_on_socket(sock_info, length - 16, deadline) try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected " - "%r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError("Got opcode %r but expected " "%r" % (op_code, _UNPACK_REPLY.keys())) return unpack_reply(data) @@ -222,7 +246,7 @@ def wait_for_read(sock_info, deadline): sock = sock_info.sock while True: # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, 'pending') and sock.pending() > 0: + if hasattr(sock, "pending") and sock.pending() > 0: readable = True else: # Wait up to 500ms for the socket to become readable and then @@ -231,15 +255,15 @@ def wait_for_read(sock_info, deadline): timeout = max(min(deadline - time.monotonic(), _POLL_TIMEOUT), 0.001) else: timeout = _POLL_TIMEOUT - readable = sock_info.socket_checker.select( - sock, read=True, timeout=timeout) + readable = sock_info.socket_checker.select(sock, read=True, timeout=timeout) if context.cancelled: - raise _OperationCancelled('hello cancelled') + raise _OperationCancelled("hello cancelled") if readable: return if deadline and time.monotonic() > deadline: raise socket.timeout("timed out") + def _receive_data_on_socket(sock_info, length, deadline): buf = bytearray(length) mv = memoryview(buf) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 0db1014774..24507260ed 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -21,9 +21,11 @@ class _OCSPCache(object): """A cache for OCSP responses.""" - CACHE_KEY_TYPE = namedtuple('OcspResponseCacheKey', # type: ignore - ['hash_algorithm', 'issuer_name_hash', - 'issuer_key_hash', 'serial_number']) + + CACHE_KEY_TYPE = namedtuple( # type: ignore + "OcspResponseCacheKey", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) def __init__(self): self._data = {} @@ -35,7 +37,8 @@ def _get_cache_key(self, ocsp_request): hash_algorithm=ocsp_request.hash_algorithm.name.lower(), issuer_name_hash=ocsp_request.issuer_name_hash, issuer_key_hash=ocsp_request.issuer_key_hash, - serial_number=ocsp_request.serial_number) + serial_number=ocsp_request.serial_number, + ) def __setitem__(self, key, value): """Add/update a cache entry. @@ -56,15 +59,13 @@ def __setitem__(self, key, value): return # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.utcnow() - < value.next_update): + if not (value.this_update <= _datetime.utcnow() < value.next_update): return # Cache new response OR update cached response if new response # has longer validity. cached_value = self._data.get(cache_key, None) - if (cached_value is None or - cached_value.next_update < value.next_update): + if cached_value is None or cached_value.next_update < value.next_update: self._data[cache_key] = value def __getitem__(self, item): @@ -79,8 +80,7 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if (value.this_update <= _datetime.utcnow() < - value.next_update): + if value.this_update <= _datetime.utcnow() < value.next_update: return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index a24fcc5730..369055ea8d 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -20,35 +20,30 @@ from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend -from cryptography.hazmat.primitives.asymmetric.dsa import \ - DSAPublicKey as _DSAPublicKey +from cryptography.hazmat.primitives.asymmetric.dsa import DSAPublicKey as _DSAPublicKey from cryptography.hazmat.primitives.asymmetric.ec import ECDSA as _ECDSA -from cryptography.hazmat.primitives.asymmetric.ec import \ - EllipticCurvePublicKey as _EllipticCurvePublicKey -from cryptography.hazmat.primitives.asymmetric.padding import \ - PKCS1v15 as _PKCS1v15 -from cryptography.hazmat.primitives.asymmetric.rsa import \ - RSAPublicKey as _RSAPublicKey +from cryptography.hazmat.primitives.asymmetric.ec import ( + EllipticCurvePublicKey as _EllipticCurvePublicKey, +) +from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 as _PKCS1v15 +from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey as _RSAPublicKey from cryptography.hazmat.primitives.hashes import SHA1 as _SHA1 from cryptography.hazmat.primitives.hashes import Hash as _Hash from cryptography.hazmat.primitives.serialization import Encoding as _Encoding -from cryptography.hazmat.primitives.serialization import \ - PublicFormat as _PublicFormat -from cryptography.x509 import \ - AuthorityInformationAccess as _AuthorityInformationAccess +from cryptography.hazmat.primitives.serialization import PublicFormat as _PublicFormat +from cryptography.x509 import AuthorityInformationAccess as _AuthorityInformationAccess from cryptography.x509 import ExtendedKeyUsage as _ExtendedKeyUsage from cryptography.x509 import ExtensionNotFound as _ExtensionNotFound from cryptography.x509 import TLSFeature as _TLSFeature from cryptography.x509 import TLSFeatureType as _TLSFeatureType -from cryptography.x509 import \ - load_pem_x509_certificate as _load_pem_x509_certificate +from cryptography.x509 import load_pem_x509_certificate as _load_pem_x509_certificate from cryptography.x509.ocsp import OCSPCertStatus as _OCSPCertStatus from cryptography.x509.ocsp import OCSPRequestBuilder as _OCSPRequestBuilder from cryptography.x509.ocsp import OCSPResponseStatus as _OCSPResponseStatus -from cryptography.x509.ocsp import \ - load_der_ocsp_response as _load_der_ocsp_response -from cryptography.x509.oid import \ - AuthorityInformationAccessOID as _AuthorityInformationAccessOID +from cryptography.x509.ocsp import load_der_ocsp_response as _load_der_ocsp_response +from cryptography.x509.oid import ( + AuthorityInformationAccessOID as _AuthorityInformationAccessOID, +) from cryptography.x509.oid import ExtendedKeyUsageOID as _ExtendedKeyUsageOID from requests import post as _post from requests.exceptions import RequestException as _RequestException @@ -61,21 +56,20 @@ _LOGGER = _logging.getLogger(__name__) _CERT_REGEX = _re.compile( - b'-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+', - _re.DOTALL) + b"-----BEGIN CERTIFICATE[^\r\n]+.+?-----END CERTIFICATE[^\r\n]+", _re.DOTALL +) def _load_trusted_ca_certs(cafile): """Parse the tlsCAFile into a list of certificates.""" - with open(cafile, 'rb') as f: + with open(cafile, "rb") as f: data = f.read() # Load all the certs in the file. trusted_ca_certs = [] backend = _default_backend() for cert_data in _re.findall(_CERT_REGEX, data): - trusted_ca_certs.append( - _load_pem_x509_certificate(cert_data, backend)) + trusted_ca_certs.append(_load_pem_x509_certificate(cert_data, backend)) return trusted_ca_certs @@ -127,14 +121,11 @@ def _public_key_hash(cert): # (excluding the tag and length fields)" # https://stackoverflow.com/a/46309453/600498 if isinstance(public_key, _RSAPublicKey): - pbytes = public_key.public_bytes( - _Encoding.DER, _PublicFormat.PKCS1) + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.PKCS1) elif isinstance(public_key, _EllipticCurvePublicKey): - pbytes = public_key.public_bytes( - _Encoding.X962, _PublicFormat.UncompressedPoint) + pbytes = public_key.public_bytes(_Encoding.X962, _PublicFormat.UncompressedPoint) else: - pbytes = public_key.public_bytes( - _Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) + pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) digest = _Hash(_SHA1(), backend=_default_backend()) digest.update(pbytes) return digest.finalize() @@ -142,16 +133,18 @@ def _public_key_hash(cert): def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): return [ - cert for cert in certificates - if _public_key_hash(cert) == responder_key_hash and - cert.issuer == issuer.subject] + cert + for cert in certificates + if _public_key_hash(cert) == responder_key_hash and cert.issuer == issuer.subject + ] def _get_certs_by_name(certificates, issuer, responder_name): return [ - cert for cert in certificates - if cert.subject == responder_name and - cert.issuer == issuer.subject] + cert + for cert in certificates + if cert.subject == responder_name and cert.issuer == issuer.subject + ] def _verify_response_signature(issuer, response): @@ -189,10 +182,11 @@ def _verify_response_signature(issuer, response): _LOGGER.debug("Delegate not authorized for OCSP signing") return 0 if not _verify_signature( - issuer.public_key(), - responder_cert.signature, - responder_cert.signature_hash_algorithm, - responder_cert.tbs_certificate_bytes): + issuer.public_key(), + responder_cert.signature, + responder_cert.signature_hash_algorithm, + responder_cert.tbs_certificate_bytes, + ): _LOGGER.debug("Delegate signature verification failed") return 0 # RFC6960, Section 3.2, Number 2 @@ -200,7 +194,8 @@ def _verify_response_signature(issuer, response): responder_cert.public_key(), response.signature, response.signature_hash_algorithm, - response.tbs_response_bytes) + response.tbs_response_bytes, + ) if not ret: _LOGGER.debug("Response signature verification failed") return ret @@ -244,8 +239,9 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): response = _post( uri, data=ocsp_request.public_bytes(_Encoding.DER), - headers={'Content-Type': 'application/ocsp-request'}, - timeout=5) + headers={"Content-Type": "application/ocsp-request"}, + timeout=5, + ) except _RequestException as exc: _LOGGER.debug("HTTP request failed: %s", exc) return None @@ -253,8 +249,7 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): _LOGGER.debug("HTTP request returned %d", response.status_code) return None ocsp_response = _load_der_ocsp_response(response.content) - _LOGGER.debug( - "OCSP response status: %r", ocsp_response.response_status) + _LOGGER.debug("OCSP response status: %r", ocsp_response.response_status) if ocsp_response.response_status != _OCSPResponseStatus.SUCCESSFUL: return None # RFC6960, Section 3.2, Number 1. Only relevant if we need to @@ -298,7 +293,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): ocsp_response_cache = user_data.ocsp_response_cache # No stapled OCSP response - if ocsp_bytes == b'': + if ocsp_bytes == b"": _LOGGER.debug("Peer did not staple an OCSP response") if must_staple: _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") @@ -313,9 +308,11 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No authority access information, soft fail") # No stapled OCSP response, no responder URI, soft fail. return 1 - uris = [desc.access_location.value - for desc in ext.value - if desc.access_method == _AuthorityInformationAccessOID.OCSP] + uris = [ + desc.access_location.value + for desc in ext.value + if desc.access_method == _AuthorityInformationAccessOID.OCSP + ] if not uris: _LOGGER.debug("No OCSP URI, soft fail") # No responder URI, soft fail. @@ -328,8 +325,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): # successful, valid responses with a certificate status of REVOKED. for uri in uris: _LOGGER.debug("Trying %s", uri) - response = _get_ocsp_response( - cert, issuer, uri, ocsp_response_cache) + response = _get_ocsp_response(cert, issuer, uri, ocsp_response_cache) if response is None: # The endpoint didn't respond in time, or the response was # unsuccessful or didn't match the request, or the response @@ -349,8 +345,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No issuer cert?") return 0 response = _load_der_ocsp_response(ocsp_bytes) - _LOGGER.debug( - "OCSP response status: %r", response.response_status) + _LOGGER.debug("OCSP response status: %r", response.response_status) # This happens in _request_ocsp when there is no stapled response so # we know if we can compare serial numbers for the request and response. if response.response_status != _OCSPResponseStatus.SUCCESSFUL: diff --git a/pymongo/operations.py b/pymongo/operations.py index d07f027e24..8f264c48c2 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -63,7 +63,12 @@ class DeleteOne(object): __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a DeleteOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -95,16 +100,14 @@ def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation, - hint=self._hint) + bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) def __repr__(self): return "DeleteOne(%r, %r)" % (self._filter, self._collation) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ((other._filter, other._collation) == - (self._filter, self._collation)) + return (other._filter, other._collation) == (self._filter, self._collation) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -116,7 +119,12 @@ class DeleteMany(object): __slots__ = ("_filter", "_collation", "_hint") - def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a DeleteMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -148,16 +156,14 @@ def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation, - hint=self._hint) + bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) def __repr__(self): return "DeleteMany(%r, %r)" % (self._filter, self._collation) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ((other._filter, other._collation) == - (self._filter, self._collation)) + return (other._filter, other._collation) == (self._filter, self._collation) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -169,8 +175,14 @@ class ReplaceOne(object): __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - def __init__(self, filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a ReplaceOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -210,15 +222,19 @@ def __init__(self, filter: Mapping[str, Any], replacement: Mapping[str, Any], up def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_replace(self._filter, self._doc, self._upsert, - collation=self._collation, hint=self._hint) + bulkobj.add_replace( + self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint + ) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return ( - (other._filter, other._doc, other._upsert, other._collation, - other._hint) == (self._filter, self._doc, self._upsert, - self._collation, other._hint)) + return (other._filter, other._doc, other._upsert, other._collation, other._hint) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + other._hint, + ) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -226,15 +242,19 @@ def __ne__(self, other: Any) -> bool: def __repr__(self): return "%s(%r, %r, %r, %r, %r)" % ( - self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation, self._hint) + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + ) class _UpdateOp(object): """Private base class for update operations.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", - "_hint") + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") def __init__(self, filter, doc, upsert, collation, array_filters, hint): if filter is not None: @@ -257,10 +277,20 @@ def __init__(self, filter, doc, upsert, collation, array_filters, hint): def __eq__(self, other): if type(other) == type(self): return ( - (other._filter, other._doc, other._upsert, other._collation, - other._array_filters, other._hint) == - (self._filter, self._doc, self._upsert, self._collation, - self._array_filters, self._hint)) + other._filter, + other._doc, + other._upsert, + other._collation, + other._array_filters, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) return NotImplemented def __ne__(self, other): @@ -268,8 +298,14 @@ def __ne__(self, other): def __repr__(self): return "%s(%r, %r, %r, %r, %r, %r)" % ( - self.__class__.__name__, self._filter, self._doc, self._upsert, - self._collation, self._array_filters, self._hint) + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) class UpdateOne(_UpdateOp): @@ -277,8 +313,15 @@ class UpdateOne(_UpdateOp): __slots__ = () - def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, - array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Represents an update_one operation. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -308,15 +351,19 @@ def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _ .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateOne, self).__init__(filter, update, upsert, collation, - array_filters, hint) + super(UpdateOne, self).__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update(self._filter, self._doc, False, self._upsert, - collation=self._collation, - array_filters=self._array_filters, - hint=self._hint) + bulkobj.add_update( + self._filter, + self._doc, + False, + self._upsert, + collation=self._collation, + array_filters=self._array_filters, + hint=self._hint, + ) class UpdateMany(_UpdateOp): @@ -324,8 +371,15 @@ class UpdateMany(_UpdateOp): __slots__ = () - def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, collation: Optional[_CollationIn] = None, - array_filters: Optional[List[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None) -> None: + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create an UpdateMany instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -355,15 +409,19 @@ def __init__(self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _ .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateMany, self).__init__(filter, update, upsert, collation, - array_filters, hint) + super(UpdateMany, self).__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update(self._filter, self._doc, True, self._upsert, - collation=self._collation, - array_filters=self._array_filters, - hint=self._hint) + bulkobj.add_update( + self._filter, + self._doc, + True, + self._upsert, + collation=self._collation, + array_filters=self._array_filters, + hint=self._hint, + ) class IndexModel(object): @@ -436,10 +494,10 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) - collation = validate_collation_or_none(kwargs.pop('collation', None)) + collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__document = kwargs if collation is not None: - self.__document['collation'] = collation + self.__document["collation"] = collation @property def document(self) -> Dict[str, Any]: diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 36e094c4cb..5bb08ec23f 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -22,7 +22,7 @@ class PeriodicExecutor(object): def __init__(self, interval, min_interval, target, name=None): - """"Run a target function periodically on a background thread. + """ "Run a target function periodically on a background thread. If the target's return value is false, the executor stops. @@ -50,8 +50,7 @@ def __init__(self, interval, min_interval, target, name=None): self._lock = threading.Lock() def __repr__(self): - return '<%s(name=%s) object at 0x%x>' % ( - self.__class__.__name__, self._name, id(self)) + return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) def open(self) -> None: """Start. Multiple calls have no effect. diff --git a/pymongo/pool.py b/pymongo/pool.py index c53c9f4736..d616408ef8 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -30,17 +30,32 @@ from bson.son import SON from pymongo import __version__, auth, helpers from pymongo.client_session import _validate_session_write_concern -from pymongo.common import (MAX_BSON_SIZE, MAX_CONNECTING, MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, MAX_POOL_SIZE, MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, MIN_POOL_SIZE, ORDERED_TYPES, - WAIT_QUEUE_TIMEOUT) -from pymongo.errors import (AutoReconnect, ConfigurationError, - ConnectionFailure, DocumentTooLarge, - InvalidOperation, NetworkTimeout, NotPrimaryError, - OperationFailure, PyMongoError, _CertificateError) +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, + MAX_POOL_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + MIN_POOL_SIZE, + ORDERED_TYPES, + WAIT_QUEUE_TIMEOUT, +) +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + DocumentTooLarge, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + _CertificateError, +) from pymongo.hello import Hello, HelloCompat -from pymongo.monitoring import (ConnectionCheckOutFailedReason, - ConnectionClosedReason) +from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command @@ -60,12 +75,15 @@ def is_ip_address(address): except (ValueError, UnicodeError): return False + try: from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + def _set_non_inheritable_non_atomic(fd): """Set the close-on-exec flag on the given file descriptor.""" flags = fcntl(fd, F_GETFD) fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + except ImportError: # Windows, various platforms we don't claim to support # (Jython, IronPython, ...), systems that don't provide @@ -74,11 +92,12 @@ def _set_non_inheritable_non_atomic(fd): """Dummy function for platforms that don't provide fcntl.""" pass + _MAX_TCP_KEEPIDLE = 120 _MAX_TCP_KEEPINTVL = 10 _MAX_TCP_KEEPCNT = 9 -if sys.platform == 'win32': +if sys.platform == "win32": try: import _winreg as winreg except ImportError: @@ -96,8 +115,8 @@ def _query(key, name, default): try: with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters") as key: + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) except OSError: @@ -108,13 +127,12 @@ def _query(key, name, default): def _set_keepalive_times(sock): idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, - _MAX_TCP_KEEPINTVL * 1000) - if (idle_ms < _WINDOWS_TCP_IDLE_MS or - interval_ms < _WINDOWS_TCP_INTERVAL_MS): - sock.ioctl(socket.SIO_KEEPALIVE_VALS, - (1, idle_ms, interval_ms)) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + else: + def _set_tcp_option(sock, tcp_option, max_value): if hasattr(socket, tcp_option): sockopt = getattr(socket, tcp_option) @@ -129,88 +147,106 @@ def _set_tcp_option(sock, tcp_option, max_value): pass def _set_keepalive_times(sock): - _set_tcp_option(sock, 'TCP_KEEPIDLE', _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, 'TCP_KEEPINTVL', _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, 'TCP_KEEPCNT', _MAX_TCP_KEEPCNT) + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) -_METADATA: SON[str, Any] = SON([ - ('driver', SON([('name', 'PyMongo'), ('version', __version__)])), -]) -if sys.platform.startswith('linux'): +_METADATA: SON[str, Any] = SON( + [ + ("driver", SON([("name", "PyMongo"), ("version", __version__)])), + ] +) + +if sys.platform.startswith("linux"): # platform.linux_distribution was deprecated in Python 3.5 # and removed in Python 3.8. Starting in Python 3.5 it # raises DeprecationWarning # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 _name = platform.system() - _METADATA['os'] = SON([ - ('type', _name), - ('name', _name), - ('architecture', platform.machine()), - # Kernel version (e.g. 4.4.0-17-generic). - ('version', platform.release()) - ]) -elif sys.platform == 'darwin': - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', platform.system()), - ('architecture', platform.machine()), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - ('version', platform.mac_ver()[0]) - ]) -elif sys.platform == 'win32': - _METADATA['os'] = SON([ - ('type', platform.system()), - # "Windows XP", "Windows 7", "Windows 10", etc. - ('name', ' '.join((platform.system(), platform.release()))), - ('architecture', platform.machine()), - # Windows patch level (e.g. 5.1.2600-SP3) - ('version', '-'.join(platform.win32_ver()[1:3])) - ]) -elif sys.platform.startswith('java'): + _METADATA["os"] = SON( + [ + ("type", _name), + ("name", _name), + ("architecture", platform.machine()), + # Kernel version (e.g. 4.4.0-17-generic). + ("version", platform.release()), + ] + ) +elif sys.platform == "darwin": + _METADATA["os"] = SON( + [ + ("type", platform.system()), + ("name", platform.system()), + ("architecture", platform.machine()), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + ("version", platform.mac_ver()[0]), + ] + ) +elif sys.platform == "win32": + _METADATA["os"] = SON( + [ + ("type", platform.system()), + # "Windows XP", "Windows 7", "Windows 10", etc. + ("name", " ".join((platform.system(), platform.release()))), + ("architecture", platform.machine()), + # Windows patch level (e.g. 5.1.2600-SP3) + ("version", "-".join(platform.win32_ver()[1:3])), + ] + ) +elif sys.platform.startswith("java"): _name, _ver, _arch = platform.java_ver()[-1] - _METADATA['os'] = SON([ - # Linux, Windows 7, Mac OS X, etc. - ('type', _name), - ('name', _name), - # x86, x86_64, AMD64, etc. - ('architecture', _arch), - # Linux kernel version, OSX version, etc. - ('version', _ver) - ]) + _METADATA["os"] = SON( + [ + # Linux, Windows 7, Mac OS X, etc. + ("type", _name), + ("name", _name), + # x86, x86_64, AMD64, etc. + ("architecture", _arch), + # Linux kernel version, OSX version, etc. + ("version", _ver), + ] + ) else: # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) - _aliased = platform.system_alias( - platform.system(), platform.release(), platform.version()) - _METADATA['os'] = SON([ - ('type', platform.system()), - ('name', ' '.join([part for part in _aliased[:2] if part])), - ('architecture', platform.machine()), - ('version', _aliased[2]) - ]) - -if platform.python_implementation().startswith('PyPy'): - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(map(str, sys.pypy_version_info)), # type: ignore - '(Python %s)' % '.'.join(map(str, sys.version_info)))) -elif sys.platform.startswith('java'): - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(map(str, sys.version_info)), - '(%s)' % ' '.join((platform.system(), platform.release())))) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = SON( + [ + ("type", platform.system()), + ("name", " ".join([part for part in _aliased[:2] if part])), + ("architecture", platform.machine()), + ("version", _aliased[2]), + ] + ) + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) else: - _METADATA['platform'] = ' '.join( - (platform.python_implementation(), - '.'.join(map(str, sys.version_info)))) + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) # If the first getaddrinfo call of this interpreter's life is on a thread, # while the main thread holds the import lock, getaddrinfo deadlocks trying # to import the IDNA codec. Import it here, where presumably we're on the # main thread, to avoid the deadlock. See PYTHON-607. -'foo'.encode('idna') +"foo".encode("idna") # Remove after PYTHON-2712 _MOCK_SERVICE_ID = False @@ -221,14 +257,14 @@ def _raise_connection_failure(address, error, msg_prefix=None): host, port = address # If connecting to a Unix socket, port will be None. if port is not None: - msg = '%s:%d: %s' % (host, port, error) + msg = "%s:%d: %s" % (host, port, error) else: - msg = '%s: %s' % (host, error) + msg = "%s: %s" % (host, error) if msg_prefix: msg = msg_prefix + msg if isinstance(error, socket.timeout): raise NetworkTimeout(msg) from error - elif isinstance(error, _SSLError) and 'timed out' in str(error): + elif isinstance(error, _SSLError) and "timed out" in str(error): # Eventlet does not distinguish TLS network timeouts from other # SSLErrors (https://github.com/eventlet/eventlet/issues/692). # Luckily, we can work around this limitation because the phrase @@ -256,26 +292,47 @@ class PoolOptions(object): """ - __slots__ = ('__max_pool_size', '__min_pool_size', - '__max_idle_time_seconds', - '__connect_timeout', '__socket_timeout', - '__wait_queue_timeout', - '__ssl_context', '__tls_allow_invalid_hostnames', - '__event_listeners', '__appname', '__driver', '__metadata', - '__compression_settings', '__max_connecting', - '__pause_enabled', '__server_api', '__load_balanced', - '__credentials') - - def __init__(self, max_pool_size=MAX_POOL_SIZE, - min_pool_size=MIN_POOL_SIZE, - max_idle_time_seconds=MAX_IDLE_TIME_SEC, connect_timeout=None, - socket_timeout=None, wait_queue_timeout=WAIT_QUEUE_TIMEOUT, - ssl_context=None, - tls_allow_invalid_hostnames=False, - event_listeners=None, appname=None, driver=None, - compression_settings=None, max_connecting=MAX_CONNECTING, - pause_enabled=True, server_api=None, load_balanced=None, - credentials=None): + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size=MAX_POOL_SIZE, + min_pool_size=MIN_POOL_SIZE, + max_idle_time_seconds=MAX_IDLE_TIME_SEC, + connect_timeout=None, + socket_timeout=None, + wait_queue_timeout=WAIT_QUEUE_TIMEOUT, + ssl_context=None, + tls_allow_invalid_hostnames=False, + event_listeners=None, + appname=None, + driver=None, + compression_settings=None, + max_connecting=MAX_CONNECTING, + pause_enabled=True, + server_api=None, + load_balanced=None, + credentials=None, + ): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size self.__max_idle_time_seconds = max_idle_time_seconds @@ -295,7 +352,7 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, self.__credentials = credentials self.__metadata = copy.deepcopy(_METADATA) if appname: - self.__metadata['application'] = {'name': appname} + self.__metadata["application"] = {"name": appname} # Combine the "driver" MongoClient option with PyMongo's info, like: # { @@ -307,14 +364,17 @@ def __init__(self, max_pool_size=MAX_POOL_SIZE, # } if driver: if driver.name: - self.__metadata['driver']['name'] = "%s|%s" % ( - _METADATA['driver']['name'], driver.name) + self.__metadata["driver"]["name"] = "%s|%s" % ( + _METADATA["driver"]["name"], + driver.name, + ) if driver.version: - self.__metadata['driver']['version'] = "%s|%s" % ( - _METADATA['driver']['version'], driver.version) + self.__metadata["driver"]["version"] = "%s|%s" % ( + _METADATA["driver"]["version"], + driver.version, + ) if driver.platform: - self.__metadata['platform'] = "%s|%s" % ( - _METADATA['platform'], driver.platform) + self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) @property def _credentials(self): @@ -329,15 +389,15 @@ def non_default_options(self): """ opts = {} if self.__max_pool_size != MAX_POOL_SIZE: - opts['maxPoolSize'] = self.__max_pool_size + opts["maxPoolSize"] = self.__max_pool_size if self.__min_pool_size != MIN_POOL_SIZE: - opts['minPoolSize'] = self.__min_pool_size + opts["minPoolSize"] = self.__min_pool_size if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: - opts['maxIdleTimeMS'] = self.__max_idle_time_seconds * 1000 + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: - opts['waitQueueTimeoutMS'] = self.__wait_queue_timeout * 1000 + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 if self.__max_connecting != MAX_CONNECTING: - opts['maxConnecting'] = self.__max_connecting + opts["maxConnecting"] = self.__max_connecting return opts @property @@ -383,14 +443,12 @@ def max_idle_time_seconds(self): @property def connect_timeout(self): - """How long a connection can take to be opened before timing out. - """ + """How long a connection can take to be opened before timing out.""" return self.__connect_timeout @property def socket_timeout(self): - """How long a send or receive on a socket can take before timing out. - """ + """How long a send or receive on a socket can take before timing out.""" return self.__socket_timeout @property @@ -402,32 +460,27 @@ def wait_queue_timeout(self): @property def _ssl_context(self): - """An SSLContext instance or None. - """ + """An SSLContext instance or None.""" return self.__ssl_context @property def tls_allow_invalid_hostnames(self): - """If True skip ssl.match_hostname. - """ + """If True skip ssl.match_hostname.""" return self.__tls_allow_invalid_hostnames @property def _event_listeners(self): - """An instance of pymongo.monitoring._EventListeners. - """ + """An instance of pymongo.monitoring._EventListeners.""" return self.__event_listeners @property def appname(self): - """The application name, for sending with hello in server handshake. - """ + """The application name, for sending with hello in server handshake.""" return self.__appname @property def driver(self): - """Driver name and version, for sending with hello in handshake. - """ + """Driver name and version, for sending with hello in handshake.""" return self.__driver @property @@ -436,20 +489,17 @@ def _compression_settings(self): @property def metadata(self): - """A dict of metadata about the application, driver, os, and platform. - """ + """A dict of metadata about the application, driver, os, and platform.""" return self.__metadata.copy() @property def server_api(self): - """A pymongo.server_api.ServerApi or None. - """ + """A pymongo.server_api.ServerApi or None.""" return self.__server_api @property def load_balanced(self): - """True if this Pool is configured in load balanced mode. - """ + """True if this Pool is configured in load balanced mode.""" return self.__load_balanced @@ -476,6 +526,7 @@ class SocketInfo(object): - `address`: the server's (host, port) - `id`: the id of this socket in it's pool """ + def __init__(self, sock, pool, address, id): self.pool_ref = weakref.ref(pool) self.sock = sock @@ -544,7 +595,7 @@ def hello_cmd(self): self.op_msg_enabled = True return SON([(HelloCompat.CMD, 1)]) else: - return SON([(HelloCompat.LEGACY_CMD, 1), ('helloOk', True)]) + return SON([(HelloCompat.LEGACY_CMD, 1), ("helloOk", True)]) def hello(self): return self._hello(None, None, None) @@ -555,58 +606,58 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): awaitable = False if performing_handshake: self.performed_handshake = True - cmd['client'] = self.opts.metadata + cmd["client"] = self.opts.metadata if self.compression_settings: - cmd['compression'] = self.compression_settings.compressors + cmd["compression"] = self.compression_settings.compressors if self.opts.load_balanced: - cmd['loadBalanced'] = True + cmd["loadBalanced"] = True elif topology_version is not None: - cmd['topologyVersion'] = topology_version - cmd['maxAwaitTimeMS'] = int(heartbeat_frequency*1000) + cmd["topologyVersion"] = topology_version + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) awaitable = True # If connect_timeout is None there is no timeout. if self.opts.connect_timeout: - self.sock.settimeout( - self.opts.connect_timeout + heartbeat_frequency) + self.sock.settimeout(self.opts.connect_timeout + heartbeat_frequency) if not performing_handshake and cluster_time is not None: - cmd['$clusterTime'] = cluster_time + cmd["$clusterTime"] = cluster_time creds = self.opts._credentials if creds: - if creds.mechanism == 'DEFAULT' and creds.username: - cmd['saslSupportedMechs'] = creds.source + '.' + creds.username + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username auth_ctx = auth._AuthContext.from_credentials(creds) if auth_ctx: - cmd['speculativeAuthenticate'] = auth_ctx.speculate_command() + cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() else: auth_ctx = None - doc = self.command('admin', cmd, publish_events=False, - exhaust_allowed=awaitable) + doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) # PYTHON-2712 will remove this topologyVersion fallback logic. if self.opts.load_balanced and _MOCK_SERVICE_ID: - process_id = doc.get('topologyVersion', {}).get('processId') - doc.setdefault('serviceId', process_id) + process_id = doc.get("topologyVersion", {}).get("processId") + doc.setdefault("serviceId", process_id) if not self.opts.load_balanced: - doc.pop('serviceId', None) + doc.pop("serviceId", None) hello = Hello(doc, awaitable=awaitable) self.is_writable = hello.is_writable self.max_wire_version = hello.max_wire_version self.max_bson_size = hello.max_bson_size self.max_message_size = hello.max_message_size self.max_write_batch_size = hello.max_write_batch_size - self.supports_sessions = ( - hello.logical_session_timeout_minutes is not None) + self.supports_sessions = hello.logical_session_timeout_minutes is not None self.hello_ok = hello.hello_ok self.is_repl = hello.server_type in ( - SERVER_TYPE.RSPrimary, SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther, SERVER_TYPE.RSGhost) + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) self.is_standalone = hello.server_type == SERVER_TYPE.Standalone self.is_mongos = hello.server_type == SERVER_TYPE.Mongos if performing_handshake and self.compression_settings: - ctx = self.compression_settings.get_compression_context( - hello.compressors) + ctx = self.compression_settings.get_compression_context(hello.compressors) self.compression_context = ctx self.op_msg_enabled = True @@ -619,8 +670,9 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): if self.opts.load_balanced: if not hello.service_id: raise ConfigurationError( - 'Driver attempted to initialize in load balancing mode,' - ' but the server does not support this mode') + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) self.service_id = hello.service_id self.generation = self.pool_gen.get(self.service_id) return hello @@ -633,23 +685,28 @@ def _next_reply(self): helpers._check_command_response(response_doc, self.max_wire_version) # Remove after PYTHON-2712. if not self.opts.load_balanced: - response_doc.pop('serviceId', None) + response_doc.pop("serviceId", None) return response_doc - def command(self, dbname, spec, - read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, check=True, - allowable_errors=None, - read_concern=None, - write_concern=None, - parse_write_concern_error=False, - collation=None, - session=None, - client=None, - retryable_write=False, - publish_events=True, - user_fields=None, - exhaust_allowed=False): + def command( + self, + dbname, + spec, + read_preference=ReadPreference.PRIMARY, + codec_options=DEFAULT_CODEC_OPTIONS, + check=True, + allowable_errors=None, + read_concern=None, + write_concern=None, + parse_write_concern_error=False, + collation=None, + session=None, + client=None, + retryable_write=False, + publish_events=True, + user_fields=None, + exhaust_allowed=False, + ): """Execute a command or raise an error. :Parameters: @@ -679,36 +736,43 @@ def command(self, dbname, spec, if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] spec = SON(spec) - if not (write_concern is None or write_concern.acknowledged or - collation is None): - raise ConfigurationError( - 'Collation is unsupported for unacknowledged writes.') - if (write_concern and - not write_concern.is_server_default): - spec['writeConcern'] = write_concern.document + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if write_concern and not write_concern.is_server_default: + spec["writeConcern"] = write_concern.document self.add_server_api(spec) if session: - session._apply_to(spec, retryable_write, read_preference, - self) + session._apply_to(spec, retryable_write, read_preference, self) self.send_cluster_time(spec, session, client) listeners = self.listeners if publish_events else None unacknowledged = write_concern and not write_concern.acknowledged if self.op_msg_enabled: self._raise_if_not_writable(unacknowledged) try: - return command(self, dbname, spec, - self.is_mongos, read_preference, codec_options, - session, client, check, allowable_errors, - self.address, listeners, - self.max_bson_size, read_concern, - parse_write_concern_error=parse_write_concern_error, - collation=collation, - compression_ctx=self.compression_context, - use_op_msg=self.op_msg_enabled, - unacknowledged=unacknowledged, - user_fields=user_fields, - exhaust_allowed=exhaust_allowed) + return command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + ) except (OperationFailure, NotPrimaryError): raise # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. @@ -720,12 +784,11 @@ def send_message(self, message, max_doc_size): If a network exception is raised, the socket is closed. """ - if (self.max_bson_size is not None - and max_doc_size > self.max_bson_size): + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: raise DocumentTooLarge( "BSON document too large (%d bytes) - the connected server " - "supports BSON document sizes up to %d bytes." % - (max_doc_size, self.max_bson_size)) + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) try: self.sock.sendall(message) @@ -748,8 +811,7 @@ def _raise_if_not_writable(self, unacknowledged): """ if unacknowledged and not self.is_writable: # Write won't succeed, bail as if we'd received a not primary error. - raise NotPrimaryError("not primary", { - "ok": 0, "errmsg": "not primary", "code": 10107}) + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) def unack_write(self, msg, max_doc_size): """Send unack OP_MSG. @@ -803,8 +865,8 @@ def validate_session(self, client, session): if session: if session._client is not client: raise InvalidOperation( - 'Can only use session with the MongoClient that' - ' started it') + "Can only use session with the MongoClient that" " started it" + ) def close_socket(self, reason): """Close this connection with a reason.""" @@ -812,8 +874,7 @@ def close_socket(self, reason): return self._close_socket() if reason and self.enabled_for_cmap: - self.listeners.publish_connection_closed( - self.address, self.id, reason) + self.listeners.publish_connection_closed(self.address, self.id, reason) def _close_socket(self): """Close this connection.""" @@ -893,7 +954,7 @@ def __repr__(self): return "SocketInfo(%s)%s at %s" % ( repr(self.sock), self.closed and " CLOSED" or "", - id(self) + id(self), ) @@ -907,10 +968,9 @@ def _create_connection(address, options): host, port = address # Check if dealing with a unix domain socket - if host.endswith('.sock'): + if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported " - "on this system") + raise ConnectionFailure("UNIX-sockets are not supported " "on this system") sock = socket.socket(socket.AF_UNIX) # SOCK_CLOEXEC not supported for Unix sockets. _set_non_inheritable_non_atomic(sock.fileno()) @@ -925,7 +985,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != 'localhost': + if socket.has_ipv6 and host != "localhost": family = socket.AF_UNSPEC err = None @@ -935,8 +995,7 @@ def _create_connection(address, options): # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 # all file descriptors are created non-inheritable. See PEP 446. try: - sock = socket.socket( - af, socktype | getattr(socket, 'SOCK_CLOEXEC', 0), proto) + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) except socket.error: # Can SOCK_CLOEXEC be defined even if the kernel doesn't support # it? @@ -961,7 +1020,7 @@ def _create_connection(address, options): # host with an OS/kernel or Python interpreter that doesn't # support IPv6. The test case is Jython2.5.1 which doesn't # support IPv6 at all. - raise socket.error('getaddrinfo failed') + raise socket.error("getaddrinfo failed") def _configured_socket(address, options): @@ -999,9 +1058,11 @@ def _configured_socket(address, options): # failures alike. Permanent handshake failures, like protocol # mismatch, will be turned into ServerSelectionTimeoutErrors later. _raise_connection_failure(address, exc, "SSL handshake failed: ") - if (ssl_context.verify_mode and not - getattr(ssl_context, "check_hostname", False) and - not options.tls_allow_invalid_hostnames): + if ( + ssl_context.verify_mode + and not getattr(ssl_context, "check_hostname", False) + and not options.tls_allow_invalid_hostnames + ): try: ssl.match_hostname(sock.getpeercert(), hostname=host) except _CertificateError: @@ -1016,6 +1077,7 @@ class _PoolClosedError(PyMongoError): """Internal error raised when a thread tries to get a connection from a closed pool. """ + pass @@ -1094,9 +1156,10 @@ def __init__(self, address, options, handshake=True): self.handshake = handshake # Don't publish events in Monitor pools. self.enabled_for_cmap = ( - self.handshake and - self.opts._event_listeners is not None and - self.opts._event_listeners.enabled_for_cmap) + self.handshake + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) # The first portion of the wait queue. # Enforces: maxPoolSize @@ -1105,7 +1168,7 @@ def __init__(self, address, options, handshake=True): self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: - self.max_pool_size = float('inf') + self.max_pool_size = float("inf") # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue @@ -1114,7 +1177,8 @@ def __init__(self, address, options, handshake=True): self._pending = 0 if self.enabled_for_cmap: self.opts._event_listeners.publish_pool_created( - self.address, self.opts.non_default_options) + self.address, self.opts.non_default_options + ) # Similar to active_sockets but includes threads in the wait queue. self.operation_count = 0 # Retain references to pinned connections to prevent the CPython GC @@ -1141,8 +1205,7 @@ def _reset(self, close, pause=True, service_id=None): with self.size_cond: if self.closed: return - if (self.opts.pause_enabled and pause and - not self.opts.load_balanced): + if self.opts.pause_enabled and pause and not self.opts.load_balanced: old_state, self.state = self.state, PoolState.PAUSED self.gen.inc(service_id) newpid = os.getpid() @@ -1180,8 +1243,7 @@ def _reset(self, close, pause=True, service_id=None): listeners.publish_pool_closed(self.address) else: if old_state != PoolState.PAUSED and self.enabled_for_cmap: - listeners.publish_pool_cleared(self.address, - service_id=service_id) + listeners.publish_pool_cleared(self.address, service_id=service_id) for sock_info in sockets: sock_info.close_socket(ConnectionClosedReason.STALE) @@ -1219,16 +1281,17 @@ def remove_stale_sockets(self, reference_generation): if self.opts.max_idle_time_seconds is not None: with self.lock: - while (self.sockets and - self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds): + while ( + self.sockets + and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): sock_info = self.sockets.pop() sock_info.close_socket(ConnectionClosedReason.IDLE) while True: with self.size_cond: # There are enough sockets in the pool. - if (len(self.sockets) + self.active_sockets >= - self.opts.min_pool_size): + if len(self.sockets) + self.active_sockets >= self.opts.min_pool_size: return if self.requests >= self.opts.min_pool_size: return @@ -1282,7 +1345,8 @@ def connect(self): except BaseException as error: if self.enabled_for_cmap: listeners.publish_connection_closed( - self.address, conn_id, ConnectionClosedReason.ERROR) + self.address, conn_id, ConnectionClosedReason.ERROR + ) if isinstance(error, (IOError, OSError, _SSLError)): _raise_connection_failure(self.address, error) @@ -1326,8 +1390,7 @@ def get_socket(self, handler=None): sock_info = self._get_socket() if self.enabled_for_cmap: - listeners.publish_connection_checked_out( - self.address, sock_info.id) + listeners.publish_connection_checked_out(self.address, sock_info.id) try: yield sock_info except: @@ -1359,9 +1422,9 @@ def _raise_if_not_ready(self, emit_event): if self.state != PoolState.READY: if self.enabled_for_cmap and emit_event: self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) - _raise_connection_failure( - self.address, AutoReconnect('connection pool paused')) + self.address, ConnectionCheckOutFailedReason.CONN_ERROR + ) + _raise_connection_failure(self.address, AutoReconnect("connection pool paused")) def _get_socket(self): """Get or create a SocketInfo. Can raise ConnectionFailure.""" @@ -1374,10 +1437,11 @@ def _get_socket(self): if self.closed: if self.enabled_for_cmap: self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.POOL_CLOSED) + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED + ) raise _PoolClosedError( - 'Attempted to check out a connection from closed connection ' - 'pool') + "Attempted to check out a connection from closed connection " "pool" + ) with self.lock: self.operation_count += 1 @@ -1414,13 +1478,11 @@ def _get_socket(self): # to be checked back into the pool. with self._max_connecting_cond: self._raise_if_not_ready(emit_event=False) - while not (self.sockets or - self._pending < self._max_connecting): + while not (self.sockets or self._pending < self._max_connecting): if not _cond_wait(self._max_connecting_cond, deadline): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. - if (self.sockets or - self._pending < self._max_connecting): + if self.sockets or self._pending < self._max_connecting: self._max_connecting_cond.notify() emitted_event = True self._raise_wait_queue_timeout() @@ -1453,7 +1515,8 @@ def _get_socket(self): if self.enabled_for_cmap and not emitted_event: self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR) + self.address, ConnectionCheckOutFailedReason.CONN_ERROR + ) raise sock_info.active = True @@ -1483,14 +1546,13 @@ def return_socket(self, sock_info): # CMAP requires the closed event be emitted after the check in. if self.enabled_for_cmap: listeners.publish_connection_closed( - self.address, sock_info.id, - ConnectionClosedReason.ERROR) + self.address, sock_info.id, ConnectionClosedReason.ERROR + ) else: with self.lock: # Hold the lock to ensure this section does not race with # Pool.reset(). - if self.stale_generation(sock_info.generation, - sock_info.service_id): + if self.stale_generation(sock_info.generation, sock_info.service_id): sock_info.close_socket(ConnectionClosedReason.STALE) else: sock_info.update_last_checkin_time() @@ -1525,14 +1587,16 @@ def _perished(self, sock_info): """ idle_time_seconds = sock_info.idle_time_seconds() # If socket is idle, open a new one. - if (self.opts.max_idle_time_seconds is not None and - idle_time_seconds > self.opts.max_idle_time_seconds): + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): sock_info.close_socket(ConnectionClosedReason.IDLE) return True - if (self._check_interval_seconds is not None and ( - 0 == self._check_interval_seconds or - idle_time_seconds > self._check_interval_seconds)): + if self._check_interval_seconds is not None and ( + 0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds + ): if sock_info.socket_closed(): sock_info.close_socket(ConnectionClosedReason.ERROR) return True @@ -1547,20 +1611,28 @@ def _raise_wait_queue_timeout(self): listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.TIMEOUT) + self.address, ConnectionCheckOutFailedReason.TIMEOUT + ) if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns raise ConnectionFailure( - 'Timeout waiting for connection from the connection pool. ' - 'maxPoolSize: %s, connections in use by cursors: %s, ' - 'connections in use by transactions: %s, connections in use ' - 'by other operations: %s, wait_queue_timeout: %s' % ( - self.opts.max_pool_size, self.ncursors, self.ntxns, - other_ops, self.opts.wait_queue_timeout)) + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: %s, connections in use by cursors: %s, " + "connections in use by transactions: %s, connections in use " + "by other operations: %s, wait_queue_timeout: %s" + % ( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + self.opts.wait_queue_timeout, + ) + ) raise ConnectionFailure( - 'Timed out while checking out a connection from connection pool. ' - 'maxPoolSize: %s, wait_queue_timeout: %s' % ( - self.opts.max_pool_size, self.opts.wait_queue_timeout)) + "Timed out while checking out a connection from connection pool. " + "maxPoolSize: %s, wait_queue_timeout: %s" + % (self.opts.max_pool_size, self.opts.wait_queue_timeout) + ) def __del__(self): # Avoid ResourceWarnings in Python 3 diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index c5a5f0936d..d42cafb084 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -23,17 +23,12 @@ from errno import EINTR as _EINTR from ipaddress import ip_address as _ip_address -from cryptography.x509 import \ - load_der_x509_certificate as _load_der_x509_certificate +from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate from OpenSSL import SSL as _SSL from OpenSSL import crypto as _crypto -from service_identity import ( - CertificateError as _SICertificateError -) +from service_identity import CertificateError as _SICertificateError from service_identity import VerificationError as _SIVerificationError -from service_identity.pyopenssl import ( # - verify_hostname as _verify_hostname -) +from service_identity.pyopenssl import verify_hostname as _verify_hostname from service_identity.pyopenssl import verify_ip_address as _verify_ip_address from pymongo.errors import ConfigurationError as _ConfigurationError @@ -45,6 +40,7 @@ try: import certifi + _HAVE_CERTIFI = True except ImportError: _HAVE_CERTIFI = False @@ -69,11 +65,11 @@ _VERIFY_MAP = { _stdlibssl.CERT_NONE: _SSL.VERIFY_NONE, _stdlibssl.CERT_OPTIONAL: _SSL.VERIFY_PEER, - _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT + _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_REVERSE_VERIFY_MAP = dict( - (value, key) for key, value in _VERIFY_MAP.items()) +_REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) + def _is_ip_address(address): try: @@ -82,22 +78,21 @@ def _is_ip_address(address): except (ValueError, UnicodeError): return False + # According to the docs for Connection.send it can raise # WantX509LookupError and should be retried. -_RETRY_ERRORS = ( - _SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +_RETRY_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) def _ragged_eof(exc): """Return True if the OpenSSL.SSL.SysCallError is a ragged EOF.""" - return exc.args == (-1, 'Unexpected EOF') + return exc.args == (-1, "Unexpected EOF") # https://github.com/pyca/pyopenssl/issues/168 # https://github.com/pyca/pyopenssl/issues/176 # https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets class _sslConn(_SSL.Connection): - def __init__(self, ctx, sock, suppress_ragged_eofs): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs @@ -111,8 +106,7 @@ def _call(self, call, *args, **kwargs): try: return call(*args, **kwargs) except _RETRY_ERRORS: - self.socket_checker.select( - self, True, True, timeout) + self.socket_checker.select(self, True, True, timeout) if timeout and _time.monotonic() - start > timeout: raise _socket.timeout("timed out") continue @@ -146,7 +140,8 @@ def sendall(self, buf, flags=0): while total_sent < total_length: try: sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags) # type: ignore + super(_sslConn, self).send, view[total_sent:], flags # type: ignore + ) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. @@ -163,6 +158,7 @@ def sendall(self, buf, flags=0): class _CallbackData(object): """Data class which is passed to the OCSP callback.""" + def __init__(self): self.trusted_ca_certs = None self.check_ocsp_endpoint = None @@ -174,7 +170,7 @@ class SSLContext(object): context. """ - __slots__ = ('_protocol', '_ctx', '_callback_data', '_check_hostname') + __slots__ = ("_protocol", "_ctx", "_callback_data", "_check_hostname") def __init__(self, protocol): self._protocol = protocol @@ -186,8 +182,7 @@ def __init__(self, protocol): # side configuration and wrap_socket tries to support both client and # server side sockets. self._callback_data.check_ocsp_endpoint = True - self._ctx.set_ocsp_client_callback( - callback=_ocsp_callback, data=self._callback_data) + self._ctx.set_ocsp_client_callback(callback=_ocsp_callback, data=self._callback_data) @property def protocol(self): @@ -205,12 +200,14 @@ def __get_verify_mode(self): def __set_verify_mode(self, value): """Setter for verify_mode.""" + def _cb(connobj, x509obj, errnum, errdepth, retcode): # It seems we don't need to do anything here. Twisted doesn't, # and OpenSSL's SSL_CTX_set_verify let's you pass NULL # for the callback option. It's weird that PyOpenSSL requires # this. return retcode + self._ctx.set_verify(_VERIFY_MAP[value], _cb) verify_mode = property(__get_verify_mode, __set_verify_mode) @@ -233,8 +230,7 @@ def __set_check_ocsp_endpoint(self, value): raise TypeError("check_ocsp must be True or False") self._callback_data.check_ocsp_endpoint = value - check_ocsp_endpoint = property(__get_check_ocsp_endpoint, - __set_check_ocsp_endpoint) + check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) def __get_options(self): # Calling set_options adds the option to the existing bitmask and @@ -262,11 +258,13 @@ def load_cert_chain(self, certfile, keyfile=None, password=None): # https://github.com/python/cpython/blob/v3.8.0/Modules/_ssl.c#L3930-L3971 # Password callback MUST be set first or it will be ignored. if password: + def _pwcb(max_length, prompt_twice, user_data): # XXX:We could check the password length against what OpenSSL # tells us is the max, but we can't raise an exception, so... # warn? - return password.encode('utf-8') + return password.encode("utf-8") + self._ctx.set_passwd_cb(_pwcb) self._ctx.use_certificate_chain_file(certfile) self._ctx.use_privatekey_file(keyfile or certfile) @@ -289,7 +287,8 @@ def _load_certifi(self): "tlsAllowInvalidCertificates is False but no system " "CA certificates could be loaded. Please install the " "certifi package, or provide a path to a CA file using " - "the tlsCAFile option") + "the tlsCAFile option" + ) def _load_wincerts(self, store): """Attempt to load CA certs from Windows trust store.""" @@ -299,8 +298,8 @@ def _load_wincerts(self, store): if encoding == "x509_asn": if trust is True or oid in trust: cert_store.add_cert( - _crypto.X509.from_cryptography( - _load_der_x509_certificate(cert))) + _crypto.X509.from_cryptography(_load_der_x509_certificate(cert)) + ) def load_default_certs(self): """A PyOpenSSL version of load_default_certs from CPython.""" @@ -309,7 +308,7 @@ def load_default_certs(self): # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths if _sys.platform == "win32": try: - for storename in ('CA', 'ROOT'): + for storename in ("CA", "ROOT"): self._load_wincerts(storename) except PermissionError: # Fall back to certifi @@ -325,10 +324,15 @@ def set_default_verify_paths(self): # but not that same as CPython's. self._ctx.set_default_verify_paths() - def wrap_socket(self, sock, server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, session=None): + def wrap_socket( + self, + sock, + server_side=False, + do_handshake_on_connect=True, + suppress_ragged_eofs=True, + server_hostname=None, + session=None, + ): """Wrap an existing Python socket sock and return a TLS socket object. """ @@ -342,7 +346,7 @@ def wrap_socket(self, sock, server_side=False, if server_hostname and not _is_ip_address(server_hostname): # XXX: Do this in a callback registered with # SSLContext.set_info_callback? See Twisted for an example. - ssl_conn.set_tlsext_host_name(server_hostname.encode('idna')) + ssl_conn.set_tlsext_host_name(server_hostname.encode("idna")) if self.verify_mode != _stdlibssl.CERT_NONE: # Request a stapled OCSP response. ssl_conn.request_ocsp() diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index aaf67ef5a6..dfb3930ab0 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -35,8 +35,7 @@ def __init__(self, level: Optional[str] = None) -> None: if level is None or isinstance(level, str): self.__level = level else: - raise TypeError( - 'level must be a string or None.') + raise TypeError("level must be a string or None.") @property def level(self) -> Optional[str]: @@ -47,7 +46,7 @@ def level(self) -> Optional[str]: def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with old wire protocol versions.""" - return self.level is None or self.level == 'local' + return self.level is None or self.level == "local" @property def document(self) -> Dict[str, Any]: @@ -59,7 +58,7 @@ def document(self) -> Dict[str, Any]: """ doc = {} if self.__level: - doc['level'] = self.level + doc["level"] = self.level return doc def __eq__(self, other: Any) -> bool: @@ -69,8 +68,8 @@ def __eq__(self, other: Any) -> bool: def __repr__(self): if self.level: - return 'ReadConcern(%s)' % self.level - return 'ReadConcern()' + return "ReadConcern(%s)" % self.level + return "ReadConcern()" DEFAULT_READ_CONCERN = ReadConcern() diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index cc1317fb88..02a2e88bf0 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -19,8 +19,10 @@ from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError -from pymongo.server_selectors import (member_with_tags_server_selector, - secondary_with_tags_server_selector) +from pymongo.server_selectors import ( + member_with_tags_server_selector, + secondary_with_tags_server_selector, +) _PRIMARY = 0 _PRIMARY_PREFERRED = 1 @@ -30,41 +32,40 @@ _MONGOS_MODES = ( - 'primary', - 'primaryPreferred', - 'secondary', - 'secondaryPreferred', - 'nearest', + "primary", + "primaryPreferred", + "secondary", + "secondaryPreferred", + "nearest", ) def _validate_tag_sets(tag_sets): - """Validate tag sets for a MongoClient. - """ + """Validate tag sets for a MongoClient.""" if tag_sets is None: return tag_sets if not isinstance(tag_sets, (list, tuple)): - raise TypeError(( - "Tag sets %r invalid, must be a sequence") % (tag_sets,)) + raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) if len(tag_sets) == 0: - raise ValueError(( - "Tag sets %r invalid, must be None or contain at least one set of" - " tags") % (tag_sets,)) + raise ValueError( + ("Tag sets %r invalid, must be None or contain at least one set of" " tags") + % (tag_sets,) + ) for tags in tag_sets: if not isinstance(tags, abc.Mapping): raise TypeError( "Tag set %r invalid, must be an instance of dict, " "bson.son.SON or other type that inherits from " - "collection.Mapping" % (tags,)) + "collection.Mapping" % (tags,) + ) return list(tag_sets) def _invalid_max_staleness_msg(max_staleness): - return ("maxStalenessSeconds must be a positive integer, not %s" % - max_staleness) + return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness # Some duplication with common.py to avoid import cycle. @@ -98,13 +99,17 @@ def _validate_hedge(hedge): class _ServerMode(object): - """Base class for all read preferences. - """ + """Base class for all read preferences.""" - __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", - "__hedge") + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") - def __init__(self, mode: int, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: + def __init__( + self, + mode: int, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: self.__mongos_mode = _MONGOS_MODES[mode] self.__mode = mode self.__tag_sets = _validate_tag_sets(tag_sets) @@ -113,33 +118,29 @@ def __init__(self, mode: int, tag_sets: Optional[_TagSets] = None, max_staleness @property def name(self) -> str: - """The name of this read preference. - """ + """The name of this read preference.""" return self.__class__.__name__ @property def mongos_mode(self) -> str: - """The mongos mode of this read preference. - """ + """The mongos mode of this read preference.""" return self.__mongos_mode @property def document(self) -> Dict[str, Any]: - """Read preference as a document. - """ - doc: Dict[str, Any] = {'mode': self.__mongos_mode} + """Read preference as a document.""" + doc: Dict[str, Any] = {"mode": self.__mongos_mode} if self.__tag_sets not in (None, [{}]): - doc['tags'] = self.__tag_sets + doc["tags"] = self.__tag_sets if self.__max_staleness != -1: - doc['maxStalenessSeconds'] = self.__max_staleness + doc["maxStalenessSeconds"] = self.__max_staleness if self.__hedge not in (None, {}): - doc['hedge'] = self.__hedge + doc["hedge"] = self.__hedge return doc @property def mode(self) -> int: - """The mode of this read preference instance. - """ + """The mode of this read preference instance.""" return self.__mode @property @@ -203,14 +204,20 @@ def min_wire_version(self) -> int: def __repr__(self): return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( - self.name, self.__tag_sets, self.__max_staleness, self.__hedge) + self.name, + self.__tag_sets, + self.__max_staleness, + self.__hedge, + ) def __eq__(self, other: Any) -> bool: if isinstance(other, _ServerMode): - return (self.mode == other.mode and - self.tag_sets == other.tag_sets and - self.max_staleness == other.max_staleness and - self.hedge == other.hedge) + return ( + self.mode == other.mode + and self.tag_sets == other.tag_sets + and self.max_staleness == other.max_staleness + and self.hedge == other.hedge + ) return NotImplemented def __ne__(self, other: Any) -> bool: @@ -221,18 +228,20 @@ def __getstate__(self): Needed explicitly because __slots__() defined. """ - return {'mode': self.__mode, - 'tag_sets': self.__tag_sets, - 'max_staleness': self.__max_staleness, - 'hedge': self.__hedge} + return { + "mode": self.__mode, + "tag_sets": self.__tag_sets, + "max_staleness": self.__max_staleness, + "hedge": self.__hedge, + } def __setstate__(self, value): """Restore from pickling.""" - self.__mode = value['mode'] + self.__mode = value["mode"] self.__mongos_mode = _MONGOS_MODES[self.__mode] - self.__tag_sets = _validate_tag_sets(value['tag_sets']) - self.__max_staleness = _validate_max_staleness(value['max_staleness']) - self.__hedge = _validate_hedge(value['hedge']) + self.__tag_sets = _validate_tag_sets(value["tag_sets"]) + self.__max_staleness = _validate_max_staleness(value["max_staleness"]) + self.__hedge = _validate_hedge(value["hedge"]) class Primary(_ServerMode): @@ -293,9 +302,13 @@ class PrimaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: - super(PrimaryPreferred, self).__init__( - _PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -303,9 +316,8 @@ def __call__(self, selection: Any) -> Any: return selection.primary_selection else: return secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class Secondary(_ServerMode): @@ -333,16 +345,19 @@ class Secondary(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: - super(Secondary, self).__init__( - _SECONDARY, tag_sets, max_staleness, hedge) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class SecondaryPreferred(_ServerMode): @@ -374,16 +389,21 @@ class SecondaryPreferred(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: super(SecondaryPreferred, self).__init__( - _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) + _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge + ) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" secondaries = secondary_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) if secondaries: return secondaries @@ -416,16 +436,19 @@ class Nearest(_ServerMode): __slots__ = () - def __init__(self, tag_sets: Optional[_TagSets] = None, max_staleness: int = -1, hedge: Optional[_Hedge] = None) -> None: - super(Nearest, self).__init__( - _NEAREST, tag_sets, max_staleness, hedge) + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" return member_with_tags_server_selector( - self.tag_sets, - max_staleness_selectors.select( - self.max_staleness, selection)) + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) class _AggWritePref: @@ -439,7 +462,7 @@ class _AggWritePref: - `pref`: The read preference to use on MongoDB 5.0+. """ - __slots__ = ('pref', 'effective_pref') + __slots__ = ("pref", "effective_pref") def __init__(self, pref): self.pref = pref @@ -447,9 +470,11 @@ def __init__(self, pref): def selection_hook(self, topology_description): common_wv = topology_description.common_wire_version - if (topology_description.has_readable_server( - ReadPreference.PRIMARY_PREFERRED) and - common_wv and common_wv < 13): + if ( + topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) + and common_wv + and common_wv < 13 + ): self.effective_pref = ReadPreference.PRIMARY else: self.effective_pref = self.pref @@ -467,28 +492,29 @@ def __getattr__(self, name): return getattr(self.effective_pref, name) -_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, - Secondary, SecondaryPreferred, Nearest) +_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) -def make_read_preference(mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1) -> _ServerMode: +def make_read_preference( + mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 +) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): - raise ConfigurationError("Read preference primary " - "cannot be combined with tags") + raise ConfigurationError("Read preference primary " "cannot be combined with tags") if max_staleness != -1: - raise ConfigurationError("Read preference primary cannot be " - "combined with maxStalenessSeconds") + raise ConfigurationError( + "Read preference primary cannot be " "combined with maxStalenessSeconds" + ) return Primary() return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore _MODES = ( - 'PRIMARY', - 'PRIMARY_PREFERRED', - 'SECONDARY', - 'SECONDARY_PREFERRED', - 'NEAREST', + "PRIMARY", + "PRIMARY_PREFERRED", + "SECONDARY", + "SECONDARY_PREFERRED", + "NEAREST", ) @@ -542,6 +568,7 @@ class ReadPreference(object): - ``NEAREST``: Read from any shard member. """ + PRIMARY = Primary() PRIMARY_PREFERRED = PrimaryPreferred() SECONDARY = Secondary() @@ -550,13 +577,13 @@ class ReadPreference(object): def read_pref_mode_from_name(name: str) -> int: - """Get the read preference mode from mongos/uri name. - """ + """Get the read preference mode from mongos/uri name.""" return _MONGOS_MODES.index(name) class MovingAverage(object): """Tracks an exponentially-weighted moving average.""" + average: Optional[float] def __init__(self) -> None: diff --git a/pymongo/response.py b/pymongo/response.py index 3094399da6..1369eac4e0 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -16,11 +16,9 @@ class Response(object): - __slots__ = ('_data', '_address', '_request_id', '_duration', - '_from_command', '_docs') + __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") - def __init__(self, data, address, request_id, duration, from_command, - docs): + def __init__(self, data, address, request_id, duration, from_command, docs): """Represent a response from the server. :Parameters: @@ -69,10 +67,11 @@ def docs(self): class PinnedResponse(Response): - __slots__ = ('_socket_info', '_more_to_come') + __slots__ = ("_socket_info", "_more_to_come") - def __init__(self, data, address, socket_info, request_id, duration, - from_command, docs, more_to_come): + def __init__( + self, data, address, socket_info, request_id, duration, from_command, docs, more_to_come + ): """Represent a response to an exhaust cursor's initial query. :Parameters: @@ -87,11 +86,9 @@ def __init__(self, data, address, socket_info, request_id, duration, - `more_to_come`: Bool indicating whether cursor is ready to be exhausted. """ - super(PinnedResponse, self).__init__(data, - address, - request_id, - duration, - from_command, docs) + super(PinnedResponse, self).__init__( + data, address, request_id, duration, from_command, docs + ) self._socket_info = socket_info self._more_to_come = more_to_come diff --git a/pymongo/results.py b/pymongo/results.py index 637bf73b0f..127f574184 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -29,10 +29,12 @@ def __init__(self, acknowledged: bool) -> None: def _raise_if_unacknowledged(self, property_name): """Raise an exception on property access if unacknowledged.""" if not self.__acknowledged: - raise InvalidOperation("A value for %s is not available when " - "the write is unacknowledged. Check the " - "acknowledged attribute to avoid this " - "error." % (property_name,)) + raise InvalidOperation( + "A value for %s is not available when " + "the write is unacknowledged. Check the " + "acknowledged attribute to avoid this " + "error." % (property_name,) + ) @property def acknowledged(self) -> bool: @@ -55,8 +57,7 @@ def acknowledged(self) -> bool: class InsertOneResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_one`. - """ + """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" __slots__ = ("__inserted_id", "__acknowledged") @@ -71,8 +72,7 @@ def inserted_id(self) -> Any: class InsertManyResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_many`. - """ + """The return type for :meth:`~pymongo.collection.Collection.insert_many`.""" __slots__ = ("__inserted_ids", "__acknowledged") @@ -119,7 +119,7 @@ def matched_count(self) -> int: @property def modified_count(self) -> int: - """The number of documents modified. """ + """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") return cast(int, self.__raw_result.get("nModified")) @@ -211,6 +211,7 @@ def upserted_ids(self) -> Optional[Dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: - return dict((upsert["index"], upsert["_id"]) - for upsert in self.bulk_api_result["upserted"]) + return dict( + (upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"] + ) return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 99445b06f0..b96d6fcb56 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -19,13 +19,16 @@ import stringprep except ImportError: HAVE_STRINGPREP = False + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """SASLprep dummy""" if isinstance(data, str): raise TypeError( "The stringprep module is not available. Usernames and " - "passwords must be instances of bytes.") + "passwords must be instances of bytes." + ) return data + else: HAVE_STRINGPREP = True import unicodedata @@ -43,7 +46,8 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) stringprep.in_table_c6, stringprep.in_table_c7, stringprep.in_table_c8, - stringprep.in_table_c9) + stringprep.in_table_c9, + ) def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: """An implementation of RFC4013 SASLprep. @@ -78,12 +82,12 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) in_table_c12 = stringprep.in_table_c12 in_table_b1 = stringprep.in_table_b1 data = "".join( - ["\u0020" if in_table_c12(elt) else elt - for elt in data if not in_table_b1(elt)]) + ["\u0020" if in_table_c12(elt) else elt for elt in data if not in_table_b1(elt)] + ) # RFC3454 section 2, step 2 - Normalize # RFC4013 section 2.2 normalization - data = unicodedata.ucd_3_2_0.normalize('NFKC', data) + data = unicodedata.ucd_3_2_0.normalize("NFKC", data) in_table_d1 = stringprep.in_table_d1 if in_table_d1(data[0]): @@ -104,7 +108,6 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi for char in data: if any(in_table(char) for in_table in prohibited): - raise ValueError( - "SASLprep: failed prohibited character check") + raise ValueError("SASLprep: failed prohibited character check") return data diff --git a/pymongo/server.py b/pymongo/server.py index 74093b05ed..be1e7da89c 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -23,12 +23,13 @@ from pymongo.response import PinnedResponse, Response from pymongo.server_type import SERVER_TYPE -_CURSOR_DOC_FIELDS = {'cursor': {'firstBatch': 1, 'nextBatch': 1}} +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} class Server(object): - def __init__(self, server_description, pool, monitor, topology_id=None, - listeners=None, events=None): + def __init__( + self, server_description, pool, monitor, topology_id=None, listeners=None, events=None + ): """Represent one MongoDB server.""" self._description = server_description self._pool = pool @@ -60,8 +61,12 @@ def close(self): if self._publish: assert self._listener is not None assert self._events is not None - self._events.put((self._listener.publish_server_closed, - (self._description.address, self._topology_id))) + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) self._monitor.close() self._pool.reset_without_pause() @@ -69,8 +74,7 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() - def run_operation(self, sock_info, operation, read_preference, listeners, - unpack_res): + def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -90,20 +94,18 @@ def run_operation(self, sock_info, operation, read_preference, listeners, start = datetime.now() use_cmd = operation.use_command(sock_info) - more_to_come = (operation.sock_mgr - and operation.sock_mgr.more_to_come) + more_to_come = operation.sock_mgr and operation.sock_mgr.more_to_come if more_to_come: request_id = 0 else: - message = operation.get_message( - read_preference, sock_info, use_cmd) + message = operation.get_message(read_preference, sock_info, use_cmd) request_id, data, max_doc_size = self._split_message(message) if publish: cmd, dbn = operation.as_command(sock_info) listeners.publish_command_start( - cmd, dbn, request_id, sock_info.address, - service_id=sock_info.service_id) + cmd, dbn, request_id, sock_info.address, service_id=sock_info.service_id + ) start = datetime.now() try: @@ -120,10 +122,13 @@ def run_operation(self, sock_info, operation, read_preference, listeners, else: user_fields = None legacy_response = True - docs = unpack_res(reply, operation.cursor_id, - operation.codec_options, - legacy_response=legacy_response, - user_fields=user_fields) + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) if use_cmd: first = docs[0] operation.client._process_response(first, operation.session) @@ -136,9 +141,13 @@ def run_operation(self, sock_info, operation, read_preference, listeners, else: failure = _convert_exception(exc) listeners.publish_command_failure( - duration, failure, operation.name, - request_id, sock_info.address, - service_id=sock_info.service_id) + duration, + failure, + operation.name, + request_id, + sock_info.address, + service_id=sock_info.service_id, + ) raise if publish: @@ -150,25 +159,26 @@ def run_operation(self, sock_info, operation, read_preference, listeners, elif operation.name == "explain": res = docs[0] if docs else {} else: - res = {"cursor": {"id": reply.cursor_id, - "ns": operation.namespace()}, - "ok": 1} + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} if operation.name == "find": res["cursor"]["firstBatch"] = docs else: res["cursor"]["nextBatch"] = docs listeners.publish_command_success( - duration, res, operation.name, request_id, - sock_info.address, service_id=sock_info.service_id) + duration, + res, + operation.name, + request_id, + sock_info.address, + service_id=sock_info.service_id, + ) # Decrypt response. client = operation.client if client and client._encrypter: if use_cmd: - decrypted = client._encrypter.decrypt( - reply.raw_command_response()) - docs = _decode_all_selective( - decrypted, operation.codec_options, user_fields) + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) response: Response @@ -191,7 +201,8 @@ def run_operation(self, sock_info, operation, read_preference, listeners, request_id=request_id, from_command=use_cmd, docs=docs, - more_to_come=more_to_come) + more_to_come=more_to_come, + ) else: response = Response( data=reply, @@ -199,7 +210,8 @@ def run_operation(self, sock_info, operation, read_preference, listeners, duration=duration, request_id=request_id, from_command=use_cmd, - docs=docs) + docs=docs, + ) return response @@ -233,4 +245,4 @@ def _split_message(self, message): return request_id, data, 0 def __repr__(self): - return '<%s %r>' % (self.__class__.__name__, self._description) + return "<%s %r>" % (self.__class__.__name__, self._description) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 4a1b925ca9..110406366a 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -97,6 +97,7 @@ class ServerApiVersion: class ServerApi(object): """MongoDB Versioned API.""" + def __init__(self, version, strict=None, deprecation_errors=None): """Options to configure MongoDB Versioned API. @@ -116,12 +117,13 @@ def __init__(self, version, strict=None, deprecation_errors=None): if strict is not None and not isinstance(strict, bool): raise TypeError( "Wrong type for ServerApi strict, value must be an instance " - "of bool, not %s" % (type(strict),)) - if (deprecation_errors is not None and - not isinstance(deprecation_errors, bool)): + "of bool, not %s" % (type(strict),) + ) + if deprecation_errors is not None and not isinstance(deprecation_errors, bool): raise TypeError( "Wrong type for ServerApi deprecation_errors, value must be " - "an instance of bool, not %s" % (type(deprecation_errors),)) + "an instance of bool, not %s" % (type(deprecation_errors),) + ) self._version = version self._strict = strict self._deprecation_errors = deprecation_errors @@ -161,8 +163,8 @@ def _add_to_command(cmd, server_api): """ if not server_api: return - cmd['apiVersion'] = server_api.version + cmd["apiVersion"] = server_api.version if server_api.strict is not None: - cmd['apiStrict'] = server_api.strict + cmd["apiStrict"] = server_api.strict if server_api.deprecation_errors is not None: - cmd['apiDeprecationErrors'] = server_api.deprecation_errors + cmd["apiDeprecationErrors"] = server_api.deprecation_errors diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 0a9b799165..6b2a71df0b 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -35,13 +35,30 @@ class ServerDescription(object): """ __slots__ = ( - '_address', '_server_type', '_all_hosts', '_tags', '_replica_set_name', - '_primary', '_max_bson_size', '_max_message_size', - '_max_write_batch_size', '_min_wire_version', '_max_wire_version', - '_round_trip_time', '_me', '_is_writable', '_is_readable', - '_ls_timeout_minutes', '_error', '_set_version', '_election_id', - '_cluster_time', '_last_write_date', '_last_update_time', - '_topology_version') + "_address", + "_server_type", + "_all_hosts", + "_tags", + "_replica_set_name", + "_primary", + "_max_bson_size", + "_max_message_size", + "_max_write_batch_size", + "_min_wire_version", + "_max_wire_version", + "_round_trip_time", + "_me", + "_is_writable", + "_is_readable", + "_ls_timeout_minutes", + "_error", + "_set_version", + "_election_id", + "_cluster_time", + "_last_write_date", + "_last_update_time", + "_topology_version", + ) def __init__( self, @@ -76,9 +93,9 @@ def __init__( self._error = error self._topology_version = hello.topology_version if error: - details = getattr(error, 'details', None) + details = getattr(error, "details", None) if isinstance(details, dict): - self._topology_version = details.get('topologyVersion') + self._topology_version = details.get("topologyVersion") self._last_write_date: Optional[float] if hello.last_write_date: @@ -154,7 +171,7 @@ def election_id(self) -> Optional[ObjectId]: return self._election_id @property - def cluster_time(self)-> Optional[Mapping[str, Any]]: + def cluster_time(self) -> Optional[Mapping[str, Any]]: return self._cluster_time @property @@ -210,10 +227,10 @@ def is_server_type_known(self) -> bool: @property def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" - return (( - self._ls_timeout_minutes is not None and - self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary)) - or self._server_type == SERVER_TYPE.LoadBalancer) + return ( + self._ls_timeout_minutes is not None + and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + ) or self._server_type == SERVER_TYPE.LoadBalancer @property def retryable_reads_supported(self) -> bool: @@ -224,27 +241,28 @@ def retryable_reads_supported(self) -> bool: def topology_version(self) -> Optional[Mapping[str, Any]]: return self._topology_version - def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": + def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": unknown = ServerDescription(self.address, error=error) unknown._topology_version = self.topology_version return unknown def __eq__(self, other: Any) -> bool: if isinstance(other, ServerDescription): - return ((self._address == other.address) and - (self._server_type == other.server_type) and - (self._min_wire_version == other.min_wire_version) and - (self._max_wire_version == other.max_wire_version) and - (self._me == other.me) and - (self._all_hosts == other.all_hosts) and - (self._tags == other.tags) and - (self._replica_set_name == other.replica_set_name) and - (self._set_version == other.set_version) and - (self._election_id == other.election_id) and - (self._primary == other.primary) and - (self._ls_timeout_minutes == - other.logical_session_timeout_minutes) and - (self._error == other.error)) + return ( + (self._address == other.address) + and (self._server_type == other.server_type) + and (self._min_wire_version == other.min_wire_version) + and (self._max_wire_version == other.max_wire_version) + and (self._me == other.me) + and (self._all_hosts == other.all_hosts) + and (self._tags == other.tags) + and (self._replica_set_name == other.replica_set_name) + and (self._set_version == other.set_version) + and (self._election_id == other.election_id) + and (self._primary == other.primary) + and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) + and (self._error == other.error) + ) return NotImplemented @@ -252,12 +270,16 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - errmsg = '' + errmsg = "" if self.error: - errmsg = ', error=%r' % (self.error,) + errmsg = ", error=%r" % (self.error,) return "<%s %s server_type: %s, rtt: %s%s>" % ( - self.__class__.__name__, self.address, self.server_type_name, - self.round_trip_time, errmsg) + self.__class__.__name__, + self.address, + self.server_type_name, + self.round_trip_time, + errmsg, + ) # For unittesting only. Use under no circumstances! _host_to_round_trip_time: Dict = {} diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index cc18450ad8..313566cb83 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -29,32 +29,28 @@ def from_topology_description(cls, topology_description): primary = sd break - return Selection(topology_description, - topology_description.known_servers, - topology_description.common_wire_version, - primary) - - def __init__(self, - topology_description, - server_descriptions, - common_wire_version, - primary): + return Selection( + topology_description, + topology_description.known_servers, + topology_description.common_wire_version, + primary, + ) + + def __init__(self, topology_description, server_descriptions, common_wire_version, primary): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version def with_server_descriptions(self, server_descriptions): - return Selection(self.topology_description, - server_descriptions, - self.common_wire_version, - self.primary) + return Selection( + self.topology_description, server_descriptions, self.common_wire_version, self.primary + ) def secondary_with_max_last_write_date(self): secondaries = secondary_server_selector(self) if secondaries.server_descriptions: - return max(secondaries.server_descriptions, - key=lambda sd: sd.last_write_date) + return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) @property def primary_selection(self): @@ -82,30 +78,31 @@ def any_server_selector(selection): def readable_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions if s.is_readable]) + [s for s in selection.server_descriptions if s.is_readable] + ) def writable_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions if s.is_writable]) + [s for s in selection.server_descriptions if s.is_writable] + ) def secondary_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions - if s.server_type == SERVER_TYPE.RSSecondary]) + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary] + ) def arbiter_server_selector(selection): return selection.with_server_descriptions( - [s for s in selection.server_descriptions - if s.server_type == SERVER_TYPE.RSArbiter]) + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter] + ) def writable_preferred_server_selector(selection): """Like PrimaryPreferred but doesn't use tags or latency.""" - return (writable_server_selector(selection) or - secondary_server_selector(selection)) + return writable_server_selector(selection) or secondary_server_selector(selection) def apply_single_tag_set(tag_set, selection): @@ -116,6 +113,7 @@ def apply_single_tag_set(tag_set, selection): The empty tag set {} matches any server. """ + def tags_match(server_tags): for key, value in tag_set.items(): if key not in server_tags or server_tags[key] != value: @@ -124,7 +122,8 @@ def tags_match(server_tags): return True return selection.with_server_descriptions( - [s for s in selection.server_descriptions if tags_match(s.tags)]) + [s for s in selection.server_descriptions if tags_match(s.tags)] + ) def apply_tag_sets(tag_sets, selection): diff --git a/pymongo/settings.py b/pymongo/settings.py index d17b5e8b86..2bd2527cdf 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -27,32 +27,35 @@ class TopologySettings(object): - def __init__(self, - seeds=None, - replica_set_name=None, - pool_class=None, - pool_options=None, - monitor_class=None, - condition_class=None, - local_threshold_ms=LOCAL_THRESHOLD_MS, - server_selection_timeout=SERVER_SELECTION_TIMEOUT, - heartbeat_frequency=common.HEARTBEAT_FREQUENCY, - server_selector=None, - fqdn=None, - direct_connection=False, - load_balanced=None, - srv_service_name=common.SRV_SERVICE_NAME, - srv_max_hosts=0): + def __init__( + self, + seeds=None, + replica_set_name=None, + pool_class=None, + pool_options=None, + monitor_class=None, + condition_class=None, + local_threshold_ms=LOCAL_THRESHOLD_MS, + server_selection_timeout=SERVER_SELECTION_TIMEOUT, + heartbeat_frequency=common.HEARTBEAT_FREQUENCY, + server_selector=None, + fqdn=None, + direct_connection=False, + load_balanced=None, + srv_service_name=common.SRV_SERVICE_NAME, + srv_max_hosts=0, + ): """Represent MongoClient's configuration. Take a list of (host, port) pairs and optional replica set name. """ if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: raise ConfigurationError( - "heartbeatFrequencyMS cannot be less than %d" % ( - common.MIN_HEARTBEAT_INTERVAL * 1000,)) + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) - self._seeds = seeds or [('localhost', 27017)] + self._seeds = seeds or [("localhost", 27017)] self._replica_set_name = replica_set_name self._pool_class = pool_class or pool.Pool self._pool_options = pool_options or PoolOptions() @@ -71,7 +74,7 @@ def __init__(self, self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. - self._stack = ''.join(traceback.format_stack()) + self._stack = "".join(traceback.format_stack()) @property def seeds(self): @@ -153,6 +156,4 @@ def get_topology_type(self): def get_server_descriptions(self): """Initial dict of (address, ServerDescription) for all seeds.""" - return dict([ - (address, ServerDescription(address)) - for address in self.seeds]) + return dict([(address, ServerDescription(address)) for address in self.seeds]) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 42db7b9373..70c12f0699 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -21,12 +21,12 @@ # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 -_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith('java') +_HAVE_POLL = hasattr(select, "poll") and not sys.platform.startswith("java") _SelectError = getattr(select, "error", OSError) def _errno_from_exception(exc): - if hasattr(exc, 'errno'): + if hasattr(exc, "errno"): return exc.errno if exc.args: return exc.args[0] @@ -34,7 +34,6 @@ def _errno_from_exception(exc): class SocketChecker(object): - def __init__(self) -> None: self._poller: Optional[select.poll] if _HAVE_POLL: @@ -42,7 +41,9 @@ def __init__(self) -> None: else: self._poller = None - def select(self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0) -> bool: + def select( + self, sock: Any, read: bool = False, write: bool = False, timeout: Optional[float] = 0 + ) -> bool: """Select for reads or writes with a timeout in seconds (or None). Returns True if the socket is readable/writable, False on timeout. @@ -83,8 +84,7 @@ def select(self, sock: Any, read: bool = False, write: bool = False, timeout: Op raise def socket_closed(self, sock: Any) -> bool: - """Return True if we know socket has been closed, False otherwise. - """ + """Return True if we know socket has been closed, False otherwise.""" try: return self.select(sock, read=True) except (RuntimeError, KeyError): diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 989e79131c..fe2dd49aa0 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -19,6 +19,7 @@ try: from dns import resolver + _HAVE_DNSPYTHON = True except ImportError: _HAVE_DNSPYTHON = False @@ -37,19 +38,21 @@ def maybe_decode(text): # PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. def _resolve(*args, **kwargs): - if hasattr(resolver, 'resolve'): + if hasattr(resolver, "resolve"): # dnspython >= 2 return resolver.resolve(*args, **kwargs) # dnspython 1.X return resolver.query(*args, **kwargs) + _INVALID_HOST_MSG = ( "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " - "Did you mean to use 'mongodb://'?") + "Did you mean to use 'mongodb://'?" +) + class _SrvResolver(object): - def __init__(self, fqdn, - connect_timeout, srv_service_name, srv_max_hosts=0): + def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT @@ -71,23 +74,21 @@ def __init__(self, fqdn, def get_options(self): try: - results = _resolve(self.__fqdn, 'TXT', - lifetime=self.__connect_timeout) + results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): # No TXT records return None except Exception as exc: raise ConfigurationError(str(exc)) if len(results) > 1: - raise ConfigurationError('Only one TXT record is supported') - return ( - b'&'.join([b''.join(res.strings) for res in results])).decode( - 'utf-8') + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") def _resolve_uri(self, encapsulate_errors): try: - results = _resolve('_' + self.__srv + '._tcp.' + self.__fqdn, - 'SRV', lifetime=self.__connect_timeout) + results = _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) except Exception as exc: if not encapsulate_errors: # Raise the original error. @@ -101,13 +102,13 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): # Construct address tuples nodes = [ - (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) - for res in results] + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) for res in results + ] # Validate hosts for node in nodes: try: - nlist = node[0].split(".")[1:][-self.__slen:] + nlist = node[0].split(".")[1:][-self.__slen :] except Exception: raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) if self.__plist != nlist: diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index b3428197b7..7b5417fefa 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -36,13 +36,20 @@ # import the ssl module even if we're only using it for this purpose. import ssl as _stdlibssl from ssl import CERT_NONE, CERT_REQUIRED + HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) SSLError = _ssl.SSLError - def get_ssl_context(certfile, passphrase, ca_certs, crlfile, - allow_invalid_certificates, allow_invalid_hostnames, - disable_ocsp_endpoint_check): + def get_ssl_context( + certfile, + passphrase, + ca_certs, + crlfile, + allow_invalid_certificates, + allow_invalid_hostnames, + disable_ocsp_endpoint_check, + ): """Create and return an SSLContext object.""" verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) @@ -67,14 +74,12 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, try: ctx.load_cert_chain(certfile, None, passphrase) except _ssl.SSLError as exc: - raise ConfigurationError( - "Private key doesn't match certificate: %s" % (exc,)) + raise ConfigurationError("Private key doesn't match certificate: %s" % (exc,)) if crlfile is not None: if _ssl.IS_PYOPENSSL: - raise ConfigurationError( - "tlsCRLFile cannot be used with PyOpenSSL") + raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - setattr(ctx, 'verify_flags', getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) + setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) @@ -82,9 +87,12 @@ def get_ssl_context(certfile, passphrase, ca_certs, crlfile, ctx.load_default_certs() ctx.verify_mode = verify_mode return ctx + else: + class SSLError(Exception): # type: ignore pass + HAS_SNI = False IPADDR_SAFE = False diff --git a/pymongo/topology.py b/pymongo/topology.py index b2d31ed314..6134b8201b 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -25,23 +25,37 @@ from pymongo import common, helpers, periodic_executor from pymongo.client_session import _ServerSessionPool -from pymongo.errors import (ConfigurationError, ConnectionFailure, - InvalidOperation, NetworkTimeout, NotPrimaryError, - OperationFailure, PyMongoError, - ServerSelectionTimeoutError, WriteError) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) from pymongo.hello import Hello from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (Selection, any_server_selector, - arbiter_server_selector, - readable_server_selector, - secondary_server_selector, - writable_server_selector) +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + readable_server_selector, + secondary_server_selector, + writable_server_selector, +) from pymongo.topology_description import ( - SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE, TopologyDescription, - _updated_topology_description_srv_polling, updated_topology_description) + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) def process_events_queue(queue_ref): @@ -63,6 +77,7 @@ def process_events_queue(queue_ref): class Topology(object): """Monitor a topology of one or more servers.""" + def __init__(self, topology_settings): self._topology_id = topology_settings._topology_id self._listeners = topology_settings._pool_options._event_listeners @@ -79,8 +94,7 @@ def __init__(self, topology_settings): if self._publish_tp: assert self._events is not None - self._events.put((self._listeners.publish_topology_opened, - (self._topology_id,))) + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) self._settings = topology_settings topology_description = TopologyDescription( topology_settings.get_topology_type(), @@ -88,22 +102,26 @@ def __init__(self, topology_settings): topology_settings.replica_set_name, None, None, - topology_settings) + topology_settings, + ) self._description = topology_description if self._publish_tp: assert self._events is not None - initial_td = TopologyDescription(TOPOLOGY_TYPE.Unknown, {}, None, - None, None, self._settings) - self._events.put(( - self._listeners.publish_topology_description_changed, - (initial_td, self._description, self._topology_id))) + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) for seed in topology_settings.seeds: if self._publish_server: assert self._events is not None - self._events.put((self._listeners.publish_server_opened, - (seed, self._topology_id))) + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) @@ -117,6 +135,7 @@ def __init__(self, topology_settings): self._session_pool = _ServerSessionPool() if self._publish_server or self._publish_tp: + def target(): return process_events_queue(weak) @@ -124,7 +143,8 @@ def target(): interval=common.EVENTS_QUEUE_FREQUENCY, min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, - name="pymongo_events_thread") + name="pymongo_events_thread", + ) # We strongly reference the executor and it weakly references # the queue via this closure. When the topology is freed, stop @@ -134,8 +154,7 @@ def target(): executor.open() self._srv_monitor = None - if (self._settings.fqdn is not None and - not self._settings.load_balanced): + if self._settings.fqdn is not None and not self._settings.load_balanced: self._srv_monitor = SrvMonitor(self, self._settings) def open(self): @@ -158,7 +177,8 @@ def open(self): "MongoClient opened before fork. Create MongoClient only " "after forking. See PyMongo's documentation for details: " "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe") + "is-pymongo-fork-safe" + ) with self._lock: # Reset the session pool to avoid duplicate sessions in # the child process. @@ -167,10 +187,7 @@ def open(self): with self._lock: self._ensure_opened() - def select_servers(self, - selector, - server_selection_timeout=None, - address=None): + def select_servers(self, selector, server_selection_timeout=None, address=None): """Return a list of Servers matching selector, or time out. :Parameters: @@ -192,25 +209,25 @@ def select_servers(self, server_timeout = server_selection_timeout with self._lock: - server_descriptions = self._select_servers_loop( - selector, server_timeout, address) + server_descriptions = self._select_servers_loop(selector, server_timeout, address) - return [self.get_server_by_address(sd.address) - for sd in server_descriptions] + return [self.get_server_by_address(sd.address) for sd in server_descriptions] def _select_servers_loop(self, selector, timeout, address): """select_servers() guts. Hold the lock when calling this.""" now = time.monotonic() end_time = now + timeout server_descriptions = self._description.apply_selector( - selector, address, custom_selector=self._settings.server_selector) + selector, address, custom_selector=self._settings.server_selector + ) while not server_descriptions: # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( - "%s, Timeout: %ss, Topology Description: %r" % - (self._error_message(selector), timeout, self.description)) + "%s, Timeout: %ss, Topology Description: %r" + % (self._error_message(selector), timeout, self.description) + ) self._ensure_opened() self._request_check_all() @@ -223,19 +240,15 @@ def _select_servers_loop(self, selector, timeout, address): self._description.check_compatible() now = time.monotonic() server_descriptions = self._description.apply_selector( - selector, address, - custom_selector=self._settings.server_selector) + selector, address, custom_selector=self._settings.server_selector + ) self._description.check_compatible() return server_descriptions - def select_server(self, - selector, - server_selection_timeout=None, - address=None): + def select_server(self, selector, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" - servers = self.select_servers( - selector, server_selection_timeout, address) + servers = self.select_servers(selector, server_selection_timeout, address) if len(servers) == 1: return servers[0] server1, server2 = random.sample(servers, 2) @@ -244,8 +257,7 @@ def select_server(self, else: return server2 - def select_server_by_address(self, address, - server_selection_timeout=None): + def select_server_by_address(self, address, server_selection_timeout=None): """Return a Server for "address", reconnecting if necessary. If the server's type is not known, request an immediate check of all @@ -263,9 +275,7 @@ def select_server_by_address(self, address, Raises exc:`ServerSelectionTimeoutError` after `server_selection_timeout` if no matching servers are found. """ - return self.select_server(any_server_selector, - server_selection_timeout, - address) + return self.select_server(any_server_selector, server_selection_timeout, address) def _process_change(self, server_description, reset_pool=False): """Process a new ServerDescription on an opened topology. @@ -278,24 +288,24 @@ def _process_change(self, server_description, reset_pool=False): # This is a stale hello response. Ignore it. return - new_td = updated_topology_description( - self._description, server_description) + new_td = updated_topology_description(self._description, server_description) # CMAP: Ensure the pool is "ready" when the server is selectable. - if (server_description.is_readable - or (server_description.is_server_type_known and - new_td.topology_type == TOPOLOGY_TYPE.Single)): + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): server = self._servers.get(server_description.address) if server: server.pool.ready() - suppress_event = ((self._publish_server or self._publish_tp) - and sd_old == server_description) + suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description if self._publish_server and not suppress_event: assert self._events is not None - self._events.put(( - self._listeners.publish_server_description_changed, - (sd_old, server_description, - server_description.address, self._topology_id))) + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) self._description = new_td self._update_servers() @@ -303,16 +313,20 @@ def _process_change(self, server_description, reset_pool=False): if self._publish_tp and not suppress_event: assert self._events is not None - self._events.put(( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id))) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) # Shutdown SRV polling for unsupported cluster types. # This is only applicable if the old topology was Unknown, and the # new one is something other than Unknown or Sharded. - if self._srv_monitor and (td_old.topology_type == TOPOLOGY_TYPE.Unknown - and self._description.topology_type not in - SRV_POLLING_TOPOLOGIES): + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): self._srv_monitor.close() # Clear the pool from a failed heartbeat. @@ -336,8 +350,7 @@ def on_change(self, server_description, reset_pool=False): # once. Check if it's still in the description or if some state- # change removed it. E.g., we got a host list from the primary # that didn't include this server. - if (self._opened and - self._description.has_server(server_description.address)): + if self._opened and self._description.has_server(server_description.address): self._process_change(server_description, reset_pool) def _process_srv_update(self, seedlist): @@ -345,16 +358,18 @@ def _process_srv_update(self, seedlist): Hold the lock when calling this. """ td_old = self._description - self._description = _updated_topology_description_srv_polling( - self._description, seedlist) + self._description = _updated_topology_description_srv_polling(self._description, seedlist) self._update_servers() if self._publish_tp: assert self._events is not None - self._events.put(( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id))) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) def on_srv_update(self, seedlist): """Process a new list of nodes obtained from scanning SRV records.""" @@ -391,8 +406,10 @@ def _get_replica_set_members(self, selector): # Implemented here in Topology instead of MongoClient, so it can lock. with self._lock: topology_type = self._description.topology_type - if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary): + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): return set() return set([sd.address for sd in selector(self._new_selection())]) @@ -418,9 +435,10 @@ def _receive_cluster_time_no_lock(self, cluster_time): # value of the clusterTime embedded field." if cluster_time: # ">" uses bson.timestamp.Timestamp's comparison operator. - if (not self._max_cluster_time - or cluster_time['clusterTime'] > - self._max_cluster_time['clusterTime']): + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): self._max_cluster_time = cluster_time def receive_cluster_time(self, cluster_time): @@ -449,8 +467,7 @@ def update_pool(self): # Only update pools for data-bearing servers. for sd in self.data_bearing_servers(): server = self._servers[sd.address] - servers.append((server, - server.pool.gen.get_overall())) + servers.append((server, server.pool.gen.get_overall())) for server, generation in servers: try: @@ -463,7 +480,7 @@ def update_pool(self): def close(self): """Clear pools and terminate monitors. Topology does not reopen on demand. Any further operations will raise - :exc:`~.errors.InvalidOperation`. """ + :exc:`~.errors.InvalidOperation`.""" with self._lock: for server in self._servers.values(): server.close() @@ -484,8 +501,7 @@ def close(self): # Publish only after releasing the lock. if self._publish_tp: assert self._events is not None - self._events.put((self._listeners.publish_topology_closed, - (self._topology_id,))) + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) if self._publish_server or self._publish_tp: self.__events_executor.close() @@ -506,19 +522,16 @@ def _check_session_support(self): if self._description.topology_type == TOPOLOGY_TYPE.Single: if not self._description.has_known_servers: self._select_servers_loop( - any_server_selector, - self._settings.server_selection_timeout, - None) + any_server_selector, self._settings.server_selection_timeout, None + ) elif not self._description.readable_servers: self._select_servers_loop( - readable_server_selector, - self._settings.server_selection_timeout, - None) + readable_server_selector, self._settings.server_selection_timeout, None + ) session_timeout = self._description.logical_session_timeout_minutes if session_timeout is None: - raise ConfigurationError( - "Sessions are not supported by this MongoDB deployment") + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") return session_timeout def get_server_session(self): @@ -529,15 +542,15 @@ def get_server_session(self): session_timeout = self._check_session_support() else: # Sessions never time out in load balanced mode. - session_timeout = float('inf') + session_timeout = float("inf") return self._session_pool.get_server_session(session_timeout) def return_server_session(self, server_session, lock): if lock: with self._lock: self._session_pool.return_server_session( - server_session, - self._description.logical_session_timeout_minutes) + server_session, self._description.logical_session_timeout_minutes + ) else: # Called from a __del__ method, can't use a lock. self._session_pool.return_server_session_no_lock(server_session) @@ -566,16 +579,17 @@ def _ensure_opened(self): self.__events_executor.open() # Start the SRV polling thread. - if self._srv_monitor and (self.description.topology_type in - SRV_POLLING_TOPOLOGIES): + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): self._srv_monitor.open() if self._settings.load_balanced: # Emit initial SDAM events for load balancer mode. - self._process_change(ServerDescription( - self._seed_addresses[0], - Hello({'ok': 1, 'serviceId': self._topology_id, - 'maxWireVersion': 13}))) + self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) # Ensure that the monitors are open. for server in self._servers.values(): @@ -587,8 +601,7 @@ def _is_stale_error(self, address, err_ctx): # Another thread removed this server from the topology. return True - if server._pool.stale_generation( - err_ctx.sock_generation, err_ctx.service_id): + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): # This is an outdated error from a previous pool version. return True @@ -596,9 +609,9 @@ def _is_stale_error(self, address, err_ctx): cur_tv = server.description.topology_version error = err_ctx.error error_tv = None - if error and hasattr(error, 'details'): + if error and hasattr(error, "details"): if isinstance(error.details, dict): - error_tv = error.details.get('topologyVersion') + error_tv = error.details.get("topologyVersion") return _is_stale_error_topology_version(cur_tv, error_tv) @@ -610,8 +623,7 @@ def _handle_error(self, address, err_ctx): error = err_ctx.error exc_type = type(error) service_id = err_ctx.service_id - if (issubclass(exc_type, NetworkTimeout) and - err_ctx.completed_handshake): + if issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake: # The socket has been closed. Don't reset the server. # Server Discovery And Monitoring Spec: "When an application # operation fails because of any network error besides a socket @@ -629,12 +641,12 @@ def _handle_error(self, address, err_ctx): # as Unknown and request an immediate check of the server. # Otherwise, we clear the connection pool, mark the server as # Unknown and request an immediate check of the server. - if hasattr(error, 'code'): + if hasattr(error, "code"): err_code = error.code else: # Default error code if one does not exist. default = 10107 if isinstance(error, NotPrimaryError) else None - err_code = error.details.get('code', default) + err_code = error.details.get("code", default) if err_code in helpers._NOT_PRIMARY_CODES: is_shutting_down = err_code in helpers._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. @@ -687,7 +699,8 @@ def _update_servers(self): server_description=sd, topology=self, pool=self._create_pool_for_monitor(address), - topology_settings=self._settings) + topology_settings=self._settings, + ) weak = None if self._publish_server: @@ -698,7 +711,8 @@ def _update_servers(self): monitor=monitor, topology_id=self._topology_id, listeners=self._listeners, - events=weak) + events=weak, + ) self._servers[address] = server server.open() @@ -709,8 +723,7 @@ def _update_servers(self): self._servers[address].description = sd # Update is_writable value of the pool, if it changed. if was_writable != sd.is_writable: - self._servers[address].pool.update_is_writable( - sd.is_writable) + self._servers[address].pool.update_is_writable(sd.is_writable) for address, server in list(self._servers.items()): if not self._description.has_server(address): @@ -738,8 +751,7 @@ def _create_pool_for_monitor(self, address): server_api=options.server_api, ) - return self._settings.pool_class(address, monitor_pool_options, - handshake=False) + return self._settings.pool_class(address, monitor_pool_options, handshake=False) def _error_message(self, selector): """Format an error message if server selection fails. @@ -748,22 +760,23 @@ def _error_message(self, selector): """ is_replica_set = self._description.topology_type in ( TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) if is_replica_set: - server_plural = 'replica set members' + server_plural = "replica set members" elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: - server_plural = 'mongoses' + server_plural = "mongoses" else: - server_plural = 'servers' + server_plural = "servers" if self._description.known_servers: # We've connected, but no servers match the selector. if selector is writable_server_selector: if is_replica_set: - return 'No primary available for writes' + return "No primary available for writes" else: - return 'No %s available for writes' % server_plural + return "No %s available for writes" % server_plural else: return 'No %s match selector "%s"' % (server_plural, selector) else: @@ -773,9 +786,11 @@ def _error_message(self, selector): if is_replica_set: # We removed all servers because of the wrong setName? return 'No %s available for replica set name "%s"' % ( - server_plural, self._settings.replica_set_name) + server_plural, + self._settings.replica_set_name, + ) else: - return 'No %s available' % server_plural + return "No %s available" % server_plural # 1 or more servers, all Unknown. Are they unknown for one reason? error = servers[0].error @@ -783,32 +798,29 @@ def _error_message(self, selector): if same: if error is None: # We're still discovering. - return 'No %s found yet' % server_plural + return "No %s found yet" % server_plural - if (is_replica_set and not - set(addresses).intersection(self._seed_addresses)): + if is_replica_set and not set(addresses).intersection(self._seed_addresses): # We replaced our seeds with new hosts but can't reach any. return ( - 'Could not reach any servers in %s. Replica set is' - ' configured with internal hostnames or IPs?' % - addresses) + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) return str(error) else: - return ','.join(str(server.error) for server in servers - if server.error) + return ",".join(str(server.error) for server in servers if server.error) def __repr__(self): - msg = '' + msg = "" if not self._opened: - msg = 'CLOSED ' - return '<%s %s%r>' % (self.__class__.__name__, msg, self._description) + msg = "CLOSED " + return "<%s %s%r>" % (self.__class__.__name__, msg, self._description) def eq_props(self): """The properties to use for MongoClient/Topology equality checks.""" ts = self._settings - return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, - ts.srv_service_name) + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) def __eq__(self, other): if isinstance(other, self.__class__): @@ -821,8 +833,8 @@ def __hash__(self): class _ErrorContext(object): """An error with context for SDAM error handling.""" - def __init__(self, error, max_wire_version, sock_generation, - completed_handshake, service_id): + + def __init__(self, error, max_wire_version, sock_generation, completed_handshake, service_id): self.error = error self.max_wire_version = max_wire_version self.sock_generation = sock_generation @@ -834,9 +846,9 @@ def _is_stale_error_topology_version(current_tv, error_tv): """Return True if the error's topologyVersion is <= current.""" if current_tv is None or error_tv is None: return False - if current_tv['processId'] != error_tv['processId']: + if current_tv["processId"] != error_tv["processId"]: return False - return current_tv['counter'] >= error_tv['counter'] + return current_tv["counter"] >= error_tv["counter"] def _is_stale_server_description(current_sd, new_sd): @@ -844,6 +856,6 @@ def _is_stale_server_description(current_sd, new_sd): current_tv, new_tv = current_sd.topology_version, new_sd.topology_version if current_tv is None or new_tv is None: return False - if current_tv['processId'] != new_tv['processId']: + if current_tv["processId"] != new_tv["processId"]: return False - return current_tv['counter'] > new_tv['counter'] + return current_tv["counter"] > new_tv["counter"] diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 241ef5afbe..b3dd60680f 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -91,12 +91,12 @@ def __init__( readable_servers = self.readable_servers if not readable_servers: self._ls_timeout_minutes = None - elif any(s.logical_session_timeout_minutes is None - for s in readable_servers): + elif any(s.logical_session_timeout_minutes is None for s in readable_servers): self._ls_timeout_minutes = None else: - self._ls_timeout_minutes = min(s.logical_session_timeout_minutes # type: ignore - for s in readable_servers) + self._ls_timeout_minutes = min( # type: ignore[type-var] + s.logical_session_timeout_minutes for s in readable_servers + ) def _init_incompatible_err(self): """Internal compatibility check for non-load balanced topologies.""" @@ -109,28 +109,39 @@ def _init_incompatible_err(self): server_too_new = ( # Server too new. s.min_wire_version is not None - and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION) + and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION + ) server_too_old = ( # Server too old. s.max_wire_version is not None - and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION) + and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION + ) if server_too_new: self._incompatible_err = ( "Server at %s:%d requires wire version %d, but this " # type: ignore "version of PyMongo only supports up to %d." - % (s.address[0], s.address[1] or 0, - s.min_wire_version, common.MAX_SUPPORTED_WIRE_VERSION)) + % ( + s.address[0], + s.address[1] or 0, + s.min_wire_version, + common.MAX_SUPPORTED_WIRE_VERSION, + ) + ) elif server_too_old: self._incompatible_err = ( "Server at %s:%d reports wire version %d, but this " # type: ignore "version of PyMongo requires at least %d (MongoDB %s)." - % (s.address[0], s.address[1] or 0, - s.max_wire_version, - common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION)) + % ( + s.address[0], + s.address[1] or 0, + s.max_wire_version, + common.MIN_SUPPORTED_WIRE_VERSION, + common.MIN_SUPPORTED_SERVER_VERSION, + ) + ) break @@ -159,8 +170,7 @@ def reset(self) -> "TopologyDescription": topology_type = self._topology_type # The default ServerDescription's type is Unknown. - sds = dict((address, ServerDescription(address)) - for address in self._server_descriptions) + sds = dict((address, ServerDescription(address)) for address in self._server_descriptions) return TopologyDescription( topology_type, @@ -168,7 +178,8 @@ def reset(self) -> "TopologyDescription": self._replica_set_name, self._max_set_version, self._max_election_id, - self._topology_settings) + self._topology_settings, + ) def server_descriptions(self) -> Dict[_Address, ServerDescription]: """Dict of (address, @@ -211,14 +222,12 @@ def logical_session_timeout_minutes(self) -> Optional[int]: @property def known_servers(self) -> List[ServerDescription]: """List of Servers of types besides Unknown.""" - return [s for s in self._server_descriptions.values() - if s.is_server_type_known] + return [s for s in self._server_descriptions.values() if s.is_server_type_known] @property def has_known_servers(self) -> bool: """Whether there are any Servers of types besides Unknown.""" - return any(s for s in self._server_descriptions.values() - if s.is_server_type_known) + return any(s for s in self._server_descriptions.values() if s.is_server_type_known) @property def readable_servers(self) -> List[ServerDescription]: @@ -246,17 +255,17 @@ def _apply_local_threshold(self, selection): if not selection: return [] # Round trip time in seconds. - fastest = min( - s.round_trip_time for s in selection.server_descriptions) + fastest = min(s.round_trip_time for s in selection.server_descriptions) threshold = self._topology_settings.local_threshold_ms / 1000.0 - return [s for s in selection.server_descriptions - if (s.round_trip_time - fastest) <= threshold] + return [ + s for s in selection.server_descriptions if (s.round_trip_time - fastest) <= threshold + ] def apply_selector( self, selector: Any, address: Optional[_Address] = None, - custom_selector: Optional[_ServerSelector] = None + custom_selector: Optional[_ServerSelector] = None, ) -> List[ServerDescription]: """List of servers matching the provided selector(s). @@ -273,22 +282,20 @@ def apply_selector( .. versionadded:: 3.4 """ - if getattr(selector, 'min_wire_version', 0): + if getattr(selector, "min_wire_version", 0): common_wv = self.common_wire_version if common_wv and common_wv < selector.min_wire_version: raise ConfigurationError( "%s requires min wire version %d, but topology's min" - " wire version is %d" % (selector, - selector.min_wire_version, - common_wv)) + " wire version is %d" % (selector, selector.min_wire_version, common_wv) + ) if isinstance(selector, _AggWritePref): selector.selection_hook(self) if self.topology_type == TOPOLOGY_TYPE.Unknown: return [] - elif self.topology_type in (TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced): + elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): # Ignore selectors for standalone and load balancer mode. return self.known_servers if address: @@ -304,10 +311,11 @@ def apply_selector( # Apply custom selector followed by localThresholdMS. if custom_selector is not None and selection: selection = selection.with_server_descriptions( - custom_selector(selection.server_descriptions)) + custom_selector(selection.server_descriptions) + ) return self._apply_local_threshold(selection) - def has_readable_server(self, read_preference: _ServerMode =ReadPreference.PRIMARY) -> bool: + def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: """Does this topology have any readable servers available matching the given read preference? @@ -336,11 +344,13 @@ def has_writable_server(self) -> bool: def __repr__(self): # Sort the servers by address. - servers = sorted(self._server_descriptions.values(), - key=lambda sd: sd.address) + servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) return "<%s id: %s, topology_type: %s, servers: %r>" % ( - self.__class__.__name__, self._topology_settings._topology_id, - self.topology_type_name, servers) + self.__class__.__name__, + self._topology_settings._topology_id, + self.topology_type_name, + servers, + ) # If topology type is Unknown and we receive a hello response, what should @@ -386,12 +396,12 @@ def updated_topology_description( if topology_type == TOPOLOGY_TYPE.Single: # Set server type to Unknown if replica set name does not match. - if (set_name is not None and - set_name != server_description.replica_set_name): + if set_name is not None and set_name != server_description.replica_set_name: error = ConfigurationError( "client is configured to connect to a replica set named " - "'%s' but this node belongs to a set named '%s'" % ( - set_name, server_description.replica_set_name)) + "'%s' but this node belongs to a set named '%s'" + % (set_name, server_description.replica_set_name) + ) sds[address] = server_description.to_unknown(error=error) # Single type never changes. return TopologyDescription( @@ -400,7 +410,8 @@ def updated_topology_description( set_name, max_set_version, max_election_id, - topology_description._topology_settings) + topology_description._topology_settings, + ) if topology_type == TOPOLOGY_TYPE.Unknown: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): @@ -421,21 +432,14 @@ def updated_topology_description( sds.pop(address) elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, - set_name, - max_set_version, - max_election_id) = _update_rs_from_primary(sds, - set_name, - server_description, - max_set_version, - max_election_id) - - elif server_type in ( - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther): + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): topology_type, set_name = _update_rs_no_primary_from_member( - sds, set_name, server_description) + sds, set_name, server_description + ) elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): @@ -443,33 +447,26 @@ def updated_topology_description( topology_type = _check_has_primary(sds) elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, - set_name, - max_set_version, - max_election_id) = _update_rs_from_primary(sds, - set_name, - server_description, - max_set_version, - max_election_id) - - elif server_type in ( - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther): - topology_type = _update_rs_with_primary_from_member( - sds, set_name, server_description) + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) else: # Server type is Unknown or RSGhost: did we just lose the primary? topology_type = _check_has_primary(sds) # Return updated copy. - return TopologyDescription(topology_type, - sds, - set_name, - max_set_version, - max_election_id, - topology_description._topology_settings) + return TopologyDescription( + topology_type, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) def _updated_topology_description_srv_polling(topology_description, seedlist): @@ -487,7 +484,6 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): if set(sds.keys()) == set(seedlist): return topology_description - # Remove SDs corresponding to servers no longer part of the SRV record. for address in list(sds.keys()): if address not in seedlist: @@ -510,15 +506,13 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): topology_description.replica_set_name, topology_description.max_set_version, topology_description.max_election_id, - topology_description._topology_settings) + topology_description._topology_settings, + ) def _update_rs_from_primary( - sds, - replica_set_name, - server_description, - max_set_version, - max_election_id): + sds, replica_set_name, server_description, max_set_version, max_election_id +): """Update topology description from a primary's hello response. Pass in a dict of ServerDescriptions, current replica set name, the @@ -535,35 +529,33 @@ def _update_rs_from_primary( # We found a primary but it doesn't have the replica_set_name # provided by the user. sds.pop(server_description.address) - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) max_election_tuple = max_set_version, max_election_id if None not in server_description.election_tuple: - if (None not in max_election_tuple and - max_election_tuple > server_description.election_tuple): + if ( + None not in max_election_tuple + and max_election_tuple > server_description.election_tuple + ): # Stale primary, set to type Unknown. sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) max_election_id = server_description.election_id - if (server_description.set_version is not None and - (max_set_version is None or - server_description.set_version > max_set_version)): + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): max_set_version = server_description.set_version # We've heard from the primary. Is it the same primary as before? for server in sds.values(): - if (server.server_type is SERVER_TYPE.RSPrimary - and server.address != server_description.address): + if ( + server.server_type is SERVER_TYPE.RSPrimary + and server.address != server_description.address + ): # Reset old primary's type to Unknown. sds[server.address] = server.to_unknown() @@ -582,16 +574,10 @@ def _update_rs_from_primary( # If the host list differs from the seed list, we may not have a primary # after all. - return (_check_has_primary(sds), - replica_set_name, - max_set_version, - max_election_id) + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) -def _update_rs_with_primary_from_member( - sds, - replica_set_name, - server_description): +def _update_rs_with_primary_from_member(sds, replica_set_name, server_description): """RS with known primary. Process a response from a non-primary. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -603,18 +589,14 @@ def _update_rs_with_primary_from_member( if replica_set_name != server_description.replica_set_name: sds.pop(server_description.address) - elif (server_description.me and - server_description.address != server_description.me): + elif server_description.me and server_description.address != server_description.me: sds.pop(server_description.address) # Had this member been the primary? return _check_has_primary(sds) -def _update_rs_no_primary_from_member( - sds, - replica_set_name, - server_description): +def _update_rs_no_primary_from_member(sds, replica_set_name, server_description): """RS without known primary. Update from a non-primary's response. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -636,8 +618,7 @@ def _update_rs_no_primary_from_member( if address not in sds: sds[address] = ServerDescription(address) - if (server_description.me and - server_description.address != server_description.me): + if server_description.me and server_description.address != server_description.me: sds.pop(server_description.address) return topology_type, replica_set_name diff --git a/pymongo/typings.py b/pymongo/typings.py index 767eed36c5..263b591e24 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -13,8 +13,20 @@ # limitations under the License. """Type aliases used by PyMongo""" -from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, MutableMapping, Optional, - Sequence, Tuple, Type, TypeVar, Union) +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) if TYPE_CHECKING: from bson.raw_bson import RawBSONDocument @@ -26,4 +38,6 @@ _CollationIn = Union[Mapping[str, Any], "Collation"] _DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentType = TypeVar('_DocumentType', Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any]) +_DocumentType = TypeVar( + "_DocumentType", Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any] +) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index c213f4217c..76c6e4d513 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -17,20 +17,33 @@ import re import sys import warnings -from typing import (Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, - Union, cast) +from typing import ( + Any, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Tuple, + Union, + cast, +) from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options -from pymongo.common import (INTERNAL_URI_OPTION_NAME_MAP, SRV_SERVICE_NAME, - URI_OPTIONS_DEPRECATION_MAP, - _CaseInsensitiveDictionary, get_validated_options) +from pymongo.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver -SCHEME = 'mongodb://' +SCHEME = "mongodb://" SCHEME_LEN = len(SCHEME) -SRV_SCHEME = 'mongodb+srv://' +SRV_SCHEME = "mongodb+srv://" SRV_SCHEME_LEN = len(SRV_SCHEME) DEFAULT_PORT = 27017 @@ -43,14 +56,15 @@ def _unquoted_percent(s): and '%E2%85%A8' but cannot have unquoted percent like '%foo'. """ for i in range(len(s)): - if s[i] == '%': - sub = s[i:i+3] + if s[i] == "%": + sub = s[i : i + 3] # If unquoting yields the same string this means there was an # unquoted %. if unquote_plus(sub) == sub: return True return False + def parse_userinfo(userinfo: str) -> Tuple[str, str]: """Validates the format of user information in a MongoDB URI. Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", @@ -62,10 +76,11 @@ def parse_userinfo(userinfo: str) -> Tuple[str, str]: :Paramaters: - `userinfo`: A string of the form : """ - if ('@' in userinfo or userinfo.count(':') > 1 or - _unquoted_percent(userinfo)): - raise InvalidURI("Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus") + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. @@ -75,7 +90,9 @@ def parse_userinfo(userinfo: str) -> Tuple[str, str]: return unquote_plus(user), unquote_plus(passwd) -def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> Tuple[str, Optional[Union[str, int]]]: +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> Tuple[str, Optional[Union[str, int]]]: """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where @@ -87,17 +104,19 @@ def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> Tuple[s - `default_port`: The port number to use when one wasn't specified in entity. """ - if entity.find(']') == -1: - raise ValueError("an IPv6 address literal must be " - "enclosed in '[' and ']' according " - "to RFC 2732.") - i = entity.find(']:') + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be " "enclosed in '[' and ']' according " "to RFC 2732." + ) + i = entity.find("]:") if i == -1: return entity[1:-1], default_port - return entity[1: i], entity[i + 2:] + return entity[1:i], entity[i + 2 :] -def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple[str, Optional[int]]: +def parse_host( + entity: str, default_port: Optional[int] = DEFAULT_PORT +) -> Tuple[str, Optional[int]]: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -111,21 +130,22 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple """ host = entity port: Optional[Union[str, int]] = default_port - if entity[0] == '[': + if entity[0] == "[": host, port = parse_ipv6_literal_host(entity, default_port) elif entity.endswith(".sock"): return entity, default_port - elif entity.find(':') != -1: - if entity.count(':') > 1: - raise ValueError("Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732.") - host, port = host.split(':', 1) + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %s" - % (port,)) + raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: @@ -139,7 +159,8 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> Tuple _IMPLICIT_TLSINSECURE_OPTS = { "tlsallowinvalidcertificates", "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck"} + "tlsdisableocspendpointcheck", +} def _parse_options(opts, delim): @@ -149,12 +170,12 @@ def _parse_options(opts, delim): options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") - if key.lower() == 'readpreferencetags': + if key.lower() == "readpreferencetags": options.setdefault(key, []).append(value) else: if key in options: warnings.warn("Duplicate URI option '%s'." % (key,)) - if key.lower() == 'authmechanismproperties': + if key.lower() == "authmechanismproperties": val = value else: val = unquote_plus(value) @@ -172,49 +193,47 @@ def _handle_security_options(options): MongoDB URI options. """ # Implicitly defined options must not be explicitly specified. - tlsinsecure = options.get('tlsinsecure') + tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: - err_msg = ("URI options %s and %s cannot be specified " - "simultaneously.") - raise InvalidURI(err_msg % ( - options.cased_key('tlsinsecure'), options.cased_key(opt))) + err_msg = "URI options %s and %s cannot be specified " "simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. - tlsallowinvalidcerts = options.get('tlsallowinvalidcertificates') + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") if tlsallowinvalidcerts is not None: - if 'tlsdisableocspendpointcheck' in options: - err_msg = ("URI options %s and %s cannot be specified " - "simultaneously.") - raise InvalidURI(err_msg % ( - 'tlsallowinvalidcertificates', options.cased_key( - 'tlsdisableocspendpointcheck'))) + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified " "simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) if tlsallowinvalidcerts is True: - options['tlsdisableocspendpointcheck'] = True + options["tlsdisableocspendpointcheck"] = True # Handle co-occurence of CRL and OCSP-related options. - tlscrlfile = options.get('tlscrlfile') + tlscrlfile = options.get("tlscrlfile") if tlscrlfile is not None: - for opt in ('tlsinsecure', 'tlsallowinvalidcertificates', - 'tlsdisableocspendpointcheck'): + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): if options.get(opt) is True: - err_msg = ("URI option %s=True cannot be specified when " - "CRL checking is enabled.") + err_msg = "URI option %s=True cannot be specified when " "CRL checking is enabled." raise InvalidURI(err_msg % (opt,)) - if 'ssl' in options and 'tls' in options: + if "ssl" in options and "tls" in options: + def truth_value(val): - if val in ('true', 'false'): - return val == 'true' + if val in ("true", "false"): + return val == "true" if isinstance(val, bool): return val return val - if truth_value(options.get('ssl')) != truth_value(options.get('tls')): - err_msg = ("Can not specify conflicting values for URI options %s " - "and %s.") - raise InvalidURI(err_msg % ( - options.cased_key('ssl'), options.cased_key('tls'))) + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s " "and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) return options @@ -231,26 +250,30 @@ def _handle_option_deprecations(options): for optname in list(options): if optname in URI_OPTIONS_DEPRECATION_MAP: mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] - if mode == 'renamed': + if mode == "renamed": newoptname = message if newoptname in options: - warn_msg = ("Deprecated option '%s' ignored in favor of " - "'%s'.") + warn_msg = "Deprecated option '%s' ignored in favor of " "'%s'." warnings.warn( - warn_msg % (options.cased_key(optname), - options.cased_key(newoptname)), - DeprecationWarning, stacklevel=2) + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) options.pop(optname) continue warn_msg = "Option '%s' is deprecated, use '%s' instead." warnings.warn( warn_msg % (options.cased_key(optname), newoptname), - DeprecationWarning, stacklevel=2) - elif mode == 'removed': + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": warn_msg = "Option '%s' is deprecated. %s." warnings.warn( warn_msg % (options.cased_key(optname), message), - DeprecationWarning, stacklevel=2) + DeprecationWarning, + stacklevel=2, + ) return options @@ -264,7 +287,7 @@ def _normalize_options(options): MongoDB URI options. """ # Expand the tlsInsecure option. - tlsinsecure = options.get('tlsinsecure') + tlsinsecure = options.get("tlsinsecure") if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: # Implicit options are logically the same as tlsInsecure. @@ -294,7 +317,9 @@ def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapp return get_validated_options(opts, warn) -def split_options(opts: str, validate: bool = True, warn: bool = False, normalize: bool = True) -> MutableMapping[str, Any]: +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: """Takes the options portion of a MongoDB URI, validates each option and returns the options in a dictionary. @@ -332,14 +357,15 @@ def split_options(opts: str, validate: bool = True, warn: bool = False, normaliz if validate: options = validate_options(options, warn) - if options.get('authsource') == '': - raise InvalidURI( - "the authSource database cannot be an empty string") + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") return options -def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[Tuple[str, Optional[int]]]: +def split_hosts( + hosts: str, default_port: Optional[int] = DEFAULT_PORT +) -> List[Tuple[str, Optional[int]]]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. @@ -353,13 +379,12 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[ for a host. """ nodes = [] - for entity in hosts.split(','): + for entity in hosts.split(","): if not entity: - raise ConfigurationError("Empty host " - "(or extra comma in host list).") + raise ConfigurationError("Empty host " "(or extra comma in host list).") port = default_port # Unix socket entities don't have ports - if entity.endswith('.sock'): + if entity.endswith(".sock"): port = None nodes.append(parse_host(entity, port)) return nodes @@ -367,29 +392,25 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[ # Prohibited characters in database name. DB names also can't have ".", but for # backward-compat we allow "db.collection" in URI. -_BAD_DB_CHARS = re.compile('[' + re.escape(r'/ "$') + ']') +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") _ALLOWED_TXT_OPTS = frozenset( - ['authsource', 'authSource', 'replicaset', 'replicaSet', 'loadbalanced', - 'loadBalanced']) + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) def _check_options(nodes, options): # Ensure directConnection was not True if there are multiple seeds. - if len(nodes) > 1 and options.get('directconnection'): - raise ConfigurationError( - 'Cannot specify multiple hosts with directConnection=true') + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") - if options.get('loadbalanced'): + if options.get("loadbalanced"): if len(nodes) > 1: - raise ConfigurationError( - 'Cannot specify multiple hosts with loadBalanced=true') - if options.get('directconnection'): - raise ConfigurationError( - 'Cannot specify directConnection=true with loadBalanced=true') - if options.get('replicaset'): - raise ConfigurationError( - 'Cannot specify replicaSet with loadBalanced=true') + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") def parse_uri( @@ -400,7 +421,7 @@ def parse_uri( normalize: bool = True, connect_timeout: Optional[float] = None, srv_service_name: Optional[str] = None, - srv_max_hosts: Optional[int] = None + srv_max_hosts: Optional[int] = None, ) -> Dict[str, Any]: """Parse and validate a MongoDB URI. @@ -460,14 +481,16 @@ def parse_uri( python_path = sys.executable or "python" raise ConfigurationError( 'The "dnspython" module must be ' - 'installed to use mongodb+srv:// URIs. ' - 'To fix this error install pymongo with the srv extra:\n ' - '%s -m pip install "pymongo[srv]"' % (python_path)) + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo with the srv extra:\n " + '%s -m pip install "pymongo[srv]"' % (python_path) + ) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: - raise InvalidURI("Invalid URI scheme: URI must " - "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME)) + raise InvalidURI( + "Invalid URI scheme: URI must " "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) + ) if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") @@ -478,21 +501,20 @@ def parse_uri( collection = None options = _CaseInsensitiveDictionary() - host_part, _, path_part = scheme_free.partition('/') + host_part, _, path_part = scheme_free.partition("/") if not host_part: host_part = path_part path_part = "" - if not path_part and '?' in host_part: - raise InvalidURI("A '/' is required between " - "the host list and any options.") + if not path_part and "?" in host_part: + raise InvalidURI("A '/' is required between " "the host list and any options.") if path_part: - dbase, _, opts = path_part.partition('?') + dbase, _, opts = path_part.partition("?") if dbase: dbase = unquote_plus(dbase) - if '.' in dbase: - dbase, collection = dbase.split('.', 1) + if "." in dbase: + dbase, collection = dbase.split(".", 1) if _BAD_DB_CHARS.search(dbase): raise InvalidURI('Bad database name "%s"' % dbase) else: @@ -502,77 +524,74 @@ def parse_uri( options.update(split_options(opts, validate, warn, normalize)) if srv_service_name is None: srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) - if '@' in host_part: - userinfo, _, hosts = host_part.rpartition('@') + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") user, passwd = parse_userinfo(userinfo) else: hosts = host_part - if '/' in hosts: - raise InvalidURI("Any '/' in a unix domain socket must be" - " percent-encoded: %s" % host_part) + if "/" in hosts: + raise InvalidURI( + "Any '/' in a unix domain socket must be" " percent-encoded: %s" % host_part + ) hosts = unquote_plus(hosts) fqdn = None srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: - if options.get('directConnection'): + if options.get("directConnection"): raise ConfigurationError( - "Cannot specify directConnection=true with " - "%s URIs" % (SRV_SCHEME,)) + "Cannot specify directConnection=true with " "%s URIs" % (SRV_SCHEME,) + ) nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI( - "%s URIs must include one, " - "and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI("%s URIs must include one, " "and only one, hostname" % (SRV_SCHEME,)) fqdn, port = nodes[0] if port is not None: - raise InvalidURI( - "%s URIs must not include a port number" % (SRV_SCHEME,)) + raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, - srv_max_hosts) + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) nodes = dns_resolver.get_hosts() dns_options = dns_resolver.get_options() if dns_options: - parsed_dns_options = split_options( - dns_options, validate, warn, normalize) + parsed_dns_options = split_options(dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are " - "supported from DNS") + "Only authSource, replicaSet, and loadBalanced are " "supported from DNS" + ) for opt, val in parsed_dns_options.items(): if opt not in options: options[opt] = val if options.get("loadBalanced") and srv_max_hosts: - raise InvalidURI( - "You cannot specify loadBalanced with srvMaxHosts") + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") if options.get("replicaSet") and srv_max_hosts: raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") if "tls" not in options and "ssl" not in options: - options["tls"] = True if validate else 'true' + options["tls"] = True if validate else "true" elif not is_srv and options.get("srvServiceName") is not None: - raise ConfigurationError("The srvServiceName option is only allowed " - "with 'mongodb+srv://' URIs") + raise ConfigurationError( + "The srvServiceName option is only allowed " "with 'mongodb+srv://' URIs" + ) elif not is_srv and srv_max_hosts: - raise ConfigurationError("The srvMaxHosts option is only allowed " - "with 'mongodb+srv://' URIs") + raise ConfigurationError( + "The srvMaxHosts option is only allowed " "with 'mongodb+srv://' URIs" + ) else: nodes = split_hosts(hosts, default_port=default_port) _check_options(nodes, options) return { - 'nodelist': nodes, - 'username': user, - 'password': passwd, - 'database': dbase, - 'collection': collection, - 'options': options, - 'fqdn': fqdn + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, } @@ -581,37 +600,39 @@ def _parse_kms_tls_options(kms_tls_options): if not kms_tls_options: return {} if not isinstance(kms_tls_options, dict): - raise TypeError('kms_tls_options must be a dict') + raise TypeError("kms_tls_options must be a dict") contexts = {} for provider, opts in kms_tls_options.items(): if not isinstance(opts, dict): raise TypeError(f'kms_tls_options["{provider}"] must be a dict') - opts.setdefault('tls', True) + opts.setdefault("tls", True) opts = _CaseInsensitiveDictionary(opts) opts = _handle_security_options(opts) opts = _normalize_options(opts) opts = validate_options(opts) ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) if ssl_context is None: - raise ConfigurationError('TLS is required for KMS providers') + raise ConfigurationError("TLS is required for KMS providers") if allow_invalid_hostnames: - raise ConfigurationError('Insecure TLS options prohibited') - - for n in ['tlsInsecure', - 'tlsAllowInvalidCertificates', - 'tlsAllowInvalidHostnames', - 'tlsDisableOCSPEndpointCheck', - 'tlsDisableCertificateRevocationCheck']: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableOCSPEndpointCheck", + "tlsDisableCertificateRevocationCheck", + ]: if n in opts: - raise ConfigurationError( - f'Insecure TLS options prohibited: {n}') + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") contexts[provider] = ssl_context return contexts -if __name__ == '__main__': +if __name__ == "__main__": import pprint import sys + try: pprint.pprint(parse_uri(sys.argv[1])) except InvalidURI as exc: diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 5168948ee3..fea912d569 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -47,7 +47,13 @@ class WriteConcern(object): __slots__ = ("__document", "__acknowledged", "__server_default") - def __init__(self, w: Optional[Union[int, str]] = None, wtimeout: Optional[int] = None, j: Optional[bool] = None, fsync: Optional[bool] = None) -> None: + def __init__( + self, + w: Optional[Union[int, str]] = None, + wtimeout: Optional[int] = None, + j: Optional[bool] = None, + fsync: Optional[bool] = None, + ) -> None: self.__document: Dict[str, Any] = {} self.__acknowledged = True @@ -67,8 +73,7 @@ def __init__(self, w: Optional[Union[int, str]] = None, wtimeout: Optional[int] if not isinstance(fsync, bool): raise TypeError("fsync must be True or False") if j and fsync: - raise ConfigurationError("Can't set both j " - "and fsync at the same time") + raise ConfigurationError("Can't set both j " "and fsync at the same time") self.__document["fsync"] = fsync if w == 0 and j is True: @@ -108,8 +113,7 @@ def acknowledged(self) -> bool: return self.__acknowledged def __repr__(self): - return ("WriteConcern(%s)" % ( - ", ".join("%s=%s" % kvt for kvt in self.__document.items()),)) + return "WriteConcern(%s)" % (", ".join("%s=%s" % kvt for kvt in self.__document.items()),) def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): diff --git a/setup.py b/setup.py index fde9ae1b3f..5dbbdde22b 100755 --- a/setup.py +++ b/setup.py @@ -4,7 +4,6 @@ import sys import warnings - if sys.version_info[:2] < (3, 6): raise RuntimeError("Python version >= 3.6 required.") @@ -15,8 +14,8 @@ except ImportError: pass -from setuptools import setup, __version__ as _setuptools_version - +from setuptools import __version__ as _setuptools_version +from setuptools import setup if sys.version_info[:2] < (3, 10): from distutils.cmd import Command @@ -54,13 +53,14 @@ # generated by distutils for Apple provided pythons, allowing C extension # builds to complete without error. The inspiration comes from older # versions of distutils.sysconfig.get_config_vars. -if sys.platform == 'darwin' and 'clang' in platform.python_compiler().lower(): +if sys.platform == "darwin" and "clang" in platform.python_compiler().lower(): from distutils.sysconfig import get_config_vars + res = get_config_vars() - for key in ('CFLAGS', 'PY_CFLAGS'): + for key in ("CFLAGS", "PY_CFLAGS"): if key in res: flags = res[key] - flags = re.sub('-mno-fused-madd', '', flags) + flags = re.sub("-mno-fused-madd", "", flags) res[key] = flags @@ -69,11 +69,9 @@ class test(Command): user_options = [ ("test-module=", "m", "Discover tests in specified module"), - ("test-suite=", "s", - "Test suite to run (e.g. 'some_module.test_suite')"), + ("test-suite=", "s", "Test suite to run (e.g. 'some_module.test_suite')"), ("failfast", "f", "Stop running tests on first failure or error"), - ("xunit-output=", "x", - "Generate a results directory with XUnit XML format") + ("xunit-output=", "x", "Generate a results directory with XUnit XML format"), ] def initialize_options(self): @@ -84,44 +82,42 @@ def initialize_options(self): def finalize_options(self): if self.test_suite is None and self.test_module is None: - self.test_module = 'test' + self.test_module = "test" elif self.test_module is not None and self.test_suite is not None: - raise Exception( - "You may specify a module or suite, but not both" - ) + raise Exception("You may specify a module or suite, but not both") def run(self): # Installing required packages, running egg_info and build_ext are # part of normal operation for setuptools.command.test.test if self.distribution.install_requires: - self.distribution.fetch_build_eggs( - self.distribution.install_requires) + self.distribution.fetch_build_eggs(self.distribution.install_requires) if self.distribution.tests_require: self.distribution.fetch_build_eggs(self.distribution.tests_require) if self.xunit_output: self.distribution.fetch_build_eggs(["unittest-xml-reporting"]) - self.run_command('egg_info') - build_ext_cmd = self.reinitialize_command('build_ext') + self.run_command("egg_info") + build_ext_cmd = self.reinitialize_command("build_ext") build_ext_cmd.inplace = 1 - self.run_command('build_ext') + self.run_command("build_ext") # Construct a TextTestRunner directly from the unittest imported from # test, which creates a TestResult that supports the 'addSkip' method. # setuptools will by default create a TextTestRunner that uses the old # TestResult class. - from test import unittest, PymongoTestRunner, test_cases + from test import PymongoTestRunner, test_cases, unittest + if self.test_suite is None: all_tests = unittest.defaultTestLoader.discover(self.test_module) suite = unittest.TestSuite() - suite.addTests(sorted(test_cases(all_tests), - key=lambda x: x.__module__)) + suite.addTests(sorted(test_cases(all_tests), key=lambda x: x.__module__)) else: - suite = unittest.defaultTestLoader.loadTestsFromName( - self.test_suite) + suite = unittest.defaultTestLoader.loadTestsFromName(self.test_suite) if self.xunit_output: from test import PymongoXMLTestRunner - runner = PymongoXMLTestRunner(verbosity=2, failfast=self.failfast, - output=self.xunit_output) + + runner = PymongoXMLTestRunner( + verbosity=2, failfast=self.failfast, output=self.xunit_output + ) else: runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) result = runner.run(suite) @@ -132,8 +128,7 @@ class doc(Command): description = "generate or test documentation" - user_options = [("test", "t", - "run doctests instead of generating documentation")] + user_options = [("test", "t", "run doctests instead of generating documentation")] boolean_options = ["test"] @@ -146,16 +141,13 @@ def finalize_options(self): def run(self): if not _HAVE_SPHINX: - raise RuntimeError( - "You must install Sphinx to build or test the documentation.") + raise RuntimeError("You must install Sphinx to build or test the documentation.") if self.test: - path = os.path.join( - os.path.abspath('.'), "doc", "_build", "doctest") + path = os.path.join(os.path.abspath("."), "doc", "_build", "doctest") mode = "doctest" else: - path = os.path.join( - os.path.abspath('.'), "doc", "_build", version) + path = os.path.join(os.path.abspath("."), "doc", "_build", version) mode = "html" try: @@ -168,7 +160,7 @@ def run(self): # sphinx.main calls sys.exit when sphinx.build_main exists. # Call build_main directly so we can check status and print # the full path to the built docs. - if hasattr(sphinx, 'build_main'): + if hasattr(sphinx, "build_main"): status = sphinx.build_main(sphinx_args) else: status = sphinx.main(sphinx_args) @@ -176,8 +168,9 @@ def run(self): if status: raise RuntimeError("documentation step '%s' failed" % (mode,)) - sys.stdout.write("\nDocumentation step '%s' performed, results here:\n" - " %s/\n" % (mode, path)) + sys.stdout.write( + "\nDocumentation step '%s' performed, results here:\n" " %s/\n" % (mode, path) + ) class custom_build_ext(build_ext): @@ -234,11 +227,14 @@ def run(self): build_ext.run(self) except Exception: e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("Extension modules", - "There was an issue with " - "your platform configuration" - " - see above.")) + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "Extension modules", + "There was an issue with " "your platform configuration" " - see above.", + ) + ) def build_extension(self, ext): name = ext.name @@ -246,68 +242,75 @@ def build_extension(self, ext): build_ext.build_extension(self, ext) except Exception: e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("The %s extension " - "module" % (name,), - "The output above " - "this warning shows how " - "the compilation " - "failed.")) - -ext_modules = [Extension('bson._cbson', - include_dirs=['bson'], - sources=['bson/_cbsonmodule.c', - 'bson/time64.c', - 'bson/buffer.c', - 'bson/encoding_helpers.c']), - Extension('pymongo._cmessage', - include_dirs=['bson'], - sources=['pymongo/_cmessagemodule.c', - 'bson/buffer.c'])] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "The %s extension " "module" % (name,), + "The output above " "this warning shows how " "the compilation " "failed.", + ) + ) + + +ext_modules = [ + Extension( + "bson._cbson", + include_dirs=["bson"], + sources=[ + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + "bson/encoding_helpers.c", + ], + ), + Extension( + "pymongo._cmessage", + include_dirs=["bson"], + sources=["pymongo/_cmessagemodule.c", "bson/buffer.c"], + ), +] # PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced # a related feature we need. 17.2.0 fixes a bug # in set_default_verify_paths we should really avoid. # service_identity 18.1.0 introduced support for IP addr matching. pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] -if sys.platform in ('win32', 'darwin'): +if sys.platform in ("win32", "darwin"): # Fallback to certifi on Windows if we can't load CA certs from the system # store and just use certifi on macOS. # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - pyopenssl_reqs.append('certifi') + pyopenssl_reqs.append("certifi") extras_require = { - 'encryption': ['pymongocrypt>=1.2.0,<2.0.0'], - 'ocsp': pyopenssl_reqs, - 'snappy': ['python-snappy'], - 'zstd': ['zstandard'], - 'aws': ['pymongo-auth-aws<2.0.0'], - 'srv': ["dnspython>=1.16.0,<3.0.0"] + "encryption": ["pymongocrypt>=1.2.0,<2.0.0"], + "ocsp": pyopenssl_reqs, + "snappy": ["python-snappy"], + "zstd": ["zstandard"], + "aws": ["pymongo-auth-aws<2.0.0"], + "srv": ["dnspython>=1.16.0,<3.0.0"], } # GSSAPI extras -if sys.platform == 'win32': - extras_require['gssapi'] = ["winkerberos>=0.5.0"] +if sys.platform == "win32": + extras_require["gssapi"] = ["winkerberos>=0.5.0"] else: - extras_require['gssapi'] = ["pykerberos"] + extras_require["gssapi"] = ["pykerberos"] -extra_opts = { - "packages": ["bson", "pymongo", "gridfs"] -} +extra_opts = {"packages": ["bson", "pymongo", "gridfs"]} if "--no_ext" in sys.argv: sys.argv.remove("--no_ext") -elif (sys.platform.startswith("java") or - sys.platform == "cli" or - "PyPy" in sys.version): - sys.stdout.write(""" +elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: + sys.stdout.write( + """ *****************************************************\n The optional C extensions are currently not supported\n by this python implementation.\n *****************************************************\n -""") +""" + ) else: - extra_opts['ext_modules'] = ext_modules + extra_opts["ext_modules"] = ext_modules setup( name="pymongo", @@ -336,10 +339,9 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.10", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database"], - cmdclass={"build_ext": custom_build_ext, - "doc": doc, - "test": test}, + "Topic :: Database", + ], + cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, extras_require=extras_require, **extra_opts ) diff --git a/test/__init__.py b/test/__init__.py index c02eb97949..32220cfff3 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -27,6 +27,7 @@ try: from xmlrunner import XMLTestRunner + HAVE_XML = True # ValueError is raised when version 3+ is installed on Jython 2.7. except (ImportError, ValueError): @@ -34,18 +35,19 @@ try: import ipaddress + HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False from contextlib import contextmanager from functools import wraps +from test.version import Version from typing import Dict, no_type_check from unittest import SkipTest import pymongo import pymongo.errors - from bson.son import SON from pymongo import common, message from pymongo.common import partition_node @@ -55,7 +57,6 @@ from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl from pymongo.uri_parser import parse_uri -from test.version import Version if HAVE_SSL: import ssl @@ -64,36 +65,34 @@ # Enable the fault handler to dump the traceback of each running thread # after a segfault. import faulthandler + faulthandler.enable() except ImportError: pass # Enable debug output for uncollectable objects. PyPy does not have set_debug. -if hasattr(gc, 'set_debug'): +if hasattr(gc, "set_debug"): gc.set_debug( - gc.DEBUG_UNCOLLECTABLE | - getattr(gc, 'DEBUG_OBJECTS', 0) | - getattr(gc, 'DEBUG_INSTANCES', 0)) + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) # The host and port of a single mongod or mongos, or the seed host # for a replica set. -host = os.environ.get("DB_IP", 'localhost') +host = os.environ.get("DB_IP", "localhost") port = int(os.environ.get("DB_PORT", 27017)) db_user = os.environ.get("DB_USER", "user") db_pwd = os.environ.get("DB_PASSWORD", "password") -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.environ.get('CLIENT_PEM', - os.path.join(CERT_PATH, 'client.pem')) -CA_PEM = os.environ.get('CA_PEM', os.path.join(CERT_PATH, 'ca.pem')) +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) TLS_OPTIONS: Dict = dict(tls=True) if CLIENT_PEM: - TLS_OPTIONS['tlsCertificateKeyFile'] = CLIENT_PEM + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM if CA_PEM: - TLS_OPTIONS['tlsCAFile'] = CA_PEM + TLS_OPTIONS["tlsCAFile"] = CA_PEM COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") @@ -104,20 +103,21 @@ if TEST_LOADBALANCER: # Remove after PYTHON-2712 from pymongo import pool + pool._MOCK_SERVICE_ID = True res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res['nodelist'][0] - db_user = res['username'] or db_user - db_pwd = res['password'] or db_pwd + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd elif TEST_SERVERLESS: TEST_LOADBALANCER = True res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res['nodelist'][0] - db_user = res['username'] or db_user - db_pwd = res['password'] or db_pwd - TLS_OPTIONS = {'tls': True} + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + TLS_OPTIONS = {"tls": True} # Spec says serverless tests must be run with compression. - COMPRESSORS = COMPRESSORS or 'zlib' + COMPRESSORS = COMPRESSORS or "zlib" def is_server_resolvable(): @@ -126,7 +126,7 @@ def is_server_resolvable(): socket.setdefaulttimeout(1) try: try: - socket.gethostbyname('server') + socket.gethostbyname("server") return True except socket.error: return False @@ -135,22 +135,23 @@ def is_server_resolvable(): def _create_user(authdb, user, pwd=None, roles=None, **kwargs): - cmd = SON([('createUser', user)]) + cmd = SON([("createUser", user)]) # X509 doesn't use a password if pwd: - cmd['pwd'] = pwd - cmd['roles'] = roles or ['root'] + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] cmd.update(**kwargs) return authdb.command(cmd) class client_knobs(object): def __init__( - self, - heartbeat_frequency=None, - min_heartbeat_interval=None, - kill_cursor_frequency=None, - events_queue_frequency=None): + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): self.heartbeat_frequency = heartbeat_frequency self.min_heartbeat_interval = min_heartbeat_interval self.kill_cursor_frequency = kill_cursor_frequency @@ -182,7 +183,7 @@ def enable(self): common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency self._enabled = True # Store the allocation traceback to catch non-disabled client_knobs. - self._stack = ''.join(traceback.format_stack()) + self._stack = "".join(traceback.format_stack()) def __enter__(self): self.enable() @@ -204,6 +205,7 @@ def make_wrapper(f): def wrap(*args, **kwargs): with self: return f(*args, **kwargs) + return wrap return make_wrapper(func) @@ -211,20 +213,23 @@ def wrap(*args, **kwargs): def __del__(self): if self._enabled: msg = ( - 'ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, ' - 'MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, ' - 'EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s' % ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, " + "MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, " + "EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s" + % ( common.HEARTBEAT_FREQUENCY, common.MIN_HEARTBEAT_INTERVAL, common.KILL_CURSOR_FREQUENCY, common.EVENTS_QUEUE_FREQUENCY, - self._stack)) + self._stack, + ) + ) self.disable() raise Exception(msg) def _all_users(db): - return set(u['user'] for u in db.command('usersInfo').get('users', [])) + return set(u["user"] for u in db.command("usersInfo").get("users", [])) class ClientContext(object): @@ -273,10 +278,10 @@ def client_options(self): """Return the MongoClient options for creating a duplicate client.""" opts = client_context.default_client_options.copy() if client_context.auth_enabled: - opts['username'] = db_user - opts['password'] = db_pwd + opts["username"] = db_user + opts["password"] = db_pwd if self.replica_set_name: - opts['replicaSet'] = self.replica_set_name + opts["replicaSet"] = self.replica_set_name return opts @property @@ -287,29 +292,26 @@ def hello(self): def _connect(self, host, port, **kwargs): # Jython takes a long time to connect. - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): timeout_ms = 10000 else: timeout_ms = 5000 kwargs.update(self.default_client_options) - client = pymongo.MongoClient( - host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) + client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) try: try: client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - 'connected client %r, but legacy hello failed: %s' % ( - client, exc)) + "connected client %r, but legacy hello failed: %s" % (client, exc) + ) else: - self.connection_attempts.append( - 'successfully connected client %r' % (client,)) + self.connection_attempts.append("successfully connected client %r" % (client,)) # If connected, then return client with default timeout return pymongo.MongoClient(host, port, **kwargs) except pymongo.errors.ConnectionFailure as exc: - self.connection_attempts.append( - 'failed to connect client %r: %s' % (client, exc)) + self.connection_attempts.append("failed to connect client %r: %s" % (client, exc)) return None finally: client.close() @@ -320,12 +322,11 @@ def _init_client(self): if self.client is not None: # Return early when connected to dataLake as mongohoused does not # support the getCmdLineOpts command and is tested without TLS. - build_info = self.client.admin.command('buildInfo') - if 'dataLake' in build_info: + build_info = self.client.admin.command("buildInfo") + if "dataLake" in build_info: self.is_data_lake = True self.auth_enabled = True - self.client = self._connect( - host, port, username=db_user, password=db_pwd) + self.client = self._connect(host, port, username=db_user, password=db_pwd) self.connected = True return @@ -344,11 +345,11 @@ def _init_client(self): self.auth_enabled = True else: try: - self.cmd_line = self.client.admin.command('getCmdLineOpts') + self.cmd_line = self.client.admin.command("getCmdLineOpts") except pymongo.errors.OperationFailure as e: assert e.details is not None - msg = e.details.get('errmsg', '') - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: # Unauthorized. self.auth_enabled = True else: @@ -363,26 +364,30 @@ def _init_client(self): _create_user(self.client.admin, db_user, db_pwd) self.client = self._connect( - host, port, username=db_user, password=db_pwd, + host, + port, + username=db_user, + password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options) + **self.default_client_options + ) # May not have this if OperationFailure was raised earlier. - self.cmd_line = self.client.admin.command('getCmdLineOpts') + self.cmd_line = self.client.admin.command("getCmdLineOpts") if self.serverless: self.server_status = {} else: - self.server_status = self.client.admin.command('serverStatus') + self.server_status = self.client.admin.command("serverStatus") if self.storage_engine == "mmapv1": # MMAPv1 does not support retryWrites=True. - self.default_client_options['retryWrites'] = False + self.default_client_options["retryWrites"] = False hello = self.hello - self.sessions_enabled = 'logicalSessionTimeoutMinutes' in hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello - if 'setName' in hello: - self.replica_set_name = str(hello['setName']) + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) self.is_rs = True if self.auth_enabled: # It doesn't matter which member we use as the seed here. @@ -392,23 +397,19 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options) + **self.default_client_options + ) else: self.client = pymongo.MongoClient( - host, - port, - replicaSet=self.replica_set_name, - **self.default_client_options) + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) # Get the authoritative hello result from the primary. self._hello = None hello = self.hello - nodes = [partition_node(node.lower()) - for node in hello.get('hosts', [])] - nodes.extend([partition_node(node.lower()) - for node in hello.get('passives', [])]) - nodes.extend([partition_node(node.lower()) - for node in hello.get('arbiters', [])]) + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) self.nodes = set(nodes) else: self.nodes = set([(host, port)]) @@ -417,40 +418,38 @@ def _init_client(self): if self.serverless: self.server_parameters = { - 'requireApiVersion': False, - 'enableTestCommands': True, + "requireApiVersion": False, + "enableTestCommands": True, } self.test_commands_enabled = True self.has_ipv6 = False else: - self.server_parameters = self.client.admin.command( - 'getParameter', '*') + self.server_parameters = self.client.admin.command("getParameter", "*") assert self.cmd_line is not None - if 'enableTestCommands=1' in self.cmd_line['argv']: + if "enableTestCommands=1" in self.cmd_line["argv"]: self.test_commands_enabled = True - elif 'parsed' in self.cmd_line: - params = self.cmd_line['parsed'].get('setParameter', []) - if 'enableTestCommands=1' in params: + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: self.test_commands_enabled = True else: - params = self.cmd_line['parsed'].get('setParameter', {}) - if params.get('enableTestCommands') == '1': + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": self.test_commands_enabled = True self.has_ipv6 = self._server_started_with_ipv6() - self.is_mongos = (self.hello.get('msg') == 'isdbgrid') + self.is_mongos = self.hello.get("msg") == "isdbgrid" if self.is_mongos: address = self.client.address self.mongoses.append(address) if not self.serverless: # Check for another mongos on the next port. assert address is not None - next_address = address[0], address[1] + 1 - mongos_client = self._connect( - *next_address, **self.default_client_options) + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) if mongos_client: hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) - if hello.get('msg') == 'isdbgrid': + if hello.get("msg") == "isdbgrid": self.mongoses.append(next_address) def init(self): @@ -459,7 +458,7 @@ def init(self): self._init_client() def connection_attempt_info(self): - return '\n'.join(self.connection_attempts) + return "\n".join(self.connection_attempts) @property def host(self): @@ -496,18 +495,20 @@ def storage_engine(self): def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" client = pymongo.MongoClient( - host, port, + host, + port, username=db_user, password=db_pwd, serverSelectionTimeoutMS=100, - **self.default_client_options) + **self.default_client_options + ) try: return db_user in _all_users(client.admin) except pymongo.errors.OperationFailure as e: assert e.details is not None - msg = e.details.get('errmsg', '') - if e.code == 18 or 'auth fails' in msg: + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: # Auth failed. return False else: @@ -516,32 +517,31 @@ def _check_user_provided(self): def _server_started_with_auth(self): # MongoDB >= 2.0 assert self.cmd_line is not None - if 'parsed' in self.cmd_line: - parsed = self.cmd_line['parsed'] + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] + if "security" in parsed: + security = parsed["security"] # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' + if "authorization" in security: + return security["authorization"] == "enabled" # < rc3 - return (security.get('auth', False) or - bool(security.get('keyFile'))) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy - argv = self.cmd_line['argv'] - return '--auth' in argv or '--keyFile' in argv + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv def _server_started_with_ipv6(self): if not socket.has_ipv6: return False assert self.cmd_line is not None - if 'parsed' in self.cmd_line: - if not self.cmd_line['parsed'].get('net', {}).get('ipv6'): + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): return False else: - if '--ipv6' not in self.cmd_line['argv']: + if "--ipv6" not in self.cmd_line["argv"]: return False # The server was started with --ipv6. Is there an IPv6 route to it? @@ -561,101 +561,107 @@ def wrap(*args, **kwargs): self.init() # Always raise SkipTest if we can't connect to MongoDB if not self.connected: - raise SkipTest( - "Cannot connect to MongoDB on %s" % (self.pair,)) + raise SkipTest("Cannot connect to MongoDB on %s" % (self.pair,)) if condition(): return f(*args, **kwargs) raise SkipTest(msg) + return wrap if func is None: + def decorate(f): return make_wrapper(f) + return decorate return make_wrapper(func) def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): - kwargs['writeConcern'] = {'w': self.w} + kwargs["writeConcern"] = {"w": self.w} return _create_user(self.client[dbname], user, pwd, roles, **kwargs) def drop_user(self, dbname, user): - self.client[dbname].command( - 'dropUser', user, writeConcern={'w': self.w}) + self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) def require_connection(self, func): """Run a test only if we can connect to MongoDB.""" return self._require( lambda: True, # _require checks if we're connected "Cannot connect to MongoDB on %s" % (self.pair,), - func=func) + func=func, + ) def require_data_lake(self, func): """Run a test only if we are connected to Atlas Data Lake.""" return self._require( lambda: self.is_data_lake, "Not connected to Atlas Data Lake on %s" % (self.pair,), - func=func) + func=func, + ) def require_no_mmap(self, func): """Run a test only if the server is not using the MMAPv1 storage engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters. """ + run regardless of storage engine on sharded clusters.""" + def is_not_mmap(): if self.is_mongos: return True - return self.storage_engine != 'mmapv1' + return self.storage_engine != "mmapv1" - return self._require( - is_not_mmap, "Storage engine must not be MMAPv1", func=func) + return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) - return self._require(lambda: self.version >= other_version, - "Server version must be at least %s" - % str(other_version)) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) def require_version_max(self, *ver): """Run a test only if the server version is at most ``version``.""" other_version = Version(*ver) - return self._require(lambda: self.version <= other_version, - "Server version must be at most %s" - % str(other_version)) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" - return self._require(lambda: self.auth_enabled, - "Authentication is not enabled on the server", - func=func) + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" - return self._require(lambda: not self.auth_enabled, - "Authentication must not be enabled on the server", - func=func) + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) def require_replica_set(self, func): """Run a test only if the client is connected to a replica set.""" - return self._require(lambda: self.is_rs, - "Not connected to a replica set", - func=func) + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) def require_secondaries_count(self, count): """Run a test only if the client is connected to a replica set that has `count` secondaries. """ + def sec_count(): return 0 if not self.client else len(self.client.secondaries) - return self._require(lambda: sec_count() >= count, - "Not enough secondaries available") + + return self._require(lambda: sec_count() >= count, "Not enough secondaries available") @property def supports_secondary_read_pref(self): if self.has_secondaries: return True if self.is_mongos: - shard = self.client.config.shards.find_one()['host'] - num_members = shard.count(',') + 1 + shard = self.client.config.shards.find_one()["host"] + num_members = shard.count(",") + 1 return num_members > 1 return False @@ -663,90 +669,94 @@ def require_secondary_read_pref(self): """Run a test only if the client is connected to a cluster that supports secondary read preference """ - return self._require(lambda: self.supports_secondary_read_pref, - "This cluster does not support secondary read " - "preference") + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read " "preference", + ) def require_no_replica_set(self, func): """Run a test if the client is *not* connected to a replica set.""" return self._require( - lambda: not self.is_rs, - "Connected to a replica set, not a standalone mongod", - func=func) + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) def require_ipv6(self, func): """Run a test only if the client can connect to a server via IPv6.""" - return self._require(lambda: self.has_ipv6, - "No IPv6", - func=func) + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) def require_no_mongos(self, func): """Run a test only if the client is not connected to a mongos.""" - return self._require(lambda: not self.is_mongos, - "Must be connected to a mongod, not a mongos", - func=func) + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) def require_mongos(self, func): """Run a test only if the client is connected to a mongos.""" - return self._require(lambda: self.is_mongos, - "Must be connected to a mongos", - func=func) + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) def require_multiple_mongoses(self, func): """Run a test only if the client is connected to a sharded cluster that has 2 mongos nodes.""" - return self._require(lambda: len(self.mongoses) > 1, - "Must have multiple mongoses available", - func=func) + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) def require_standalone(self, func): """Run a test only if the client is connected to a standalone.""" - return self._require(lambda: not (self.is_mongos or self.is_rs), - "Must be connected to a standalone", - func=func) + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) def require_no_standalone(self, func): """Run a test only if the client is not connected to a standalone.""" - return self._require(lambda: self.is_mongos or self.is_rs, - "Must be connected to a replica set or mongos", - func=func) + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) def require_load_balancer(self, func): """Run a test only if the client is connected to a load balancer.""" - return self._require(lambda: self.load_balancer, - "Must be connected to a load balancer", - func=func) + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) def require_no_load_balancer(self, func): - """Run a test only if the client is not connected to a load balancer. - """ - return self._require(lambda: not self.load_balancer, - "Must not be connected to a load balancer", - func=func) + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) def is_topology_type(self, topologies): - unknown = set(topologies) - {'single', 'replicaset', 'sharded', - 'sharded-replicaset', 'load-balanced'} + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "sharded-replicaset", + "load-balanced", + } if unknown: - raise AssertionError('Unknown topologies: %r' % (unknown,)) + raise AssertionError("Unknown topologies: %r" % (unknown,)) if self.load_balancer: - if 'load-balanced' in topologies: + if "load-balanced" in topologies: return True return False - if 'single' in topologies and not (self.is_mongos or self.is_rs): + if "single" in topologies and not (self.is_mongos or self.is_rs): return True - if 'replicaset' in topologies and self.is_rs: + if "replicaset" in topologies and self.is_rs: return True - if 'sharded' in topologies and self.is_mongos: + if "sharded" in topologies and self.is_mongos: return True - if 'sharded-replicaset' in topologies and self.is_mongos: + if "sharded-replicaset" in topologies and self.is_mongos: shards = list(client_context.client.config.shards.find()) for shard in shards: # For a 3-member RS-backed sharded cluster, shard['host'] # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' # Otherwise it will be 'ip1:port1' - host_spec = shard['host'] - if not len(host_spec.split('/')) > 1: + host_spec = shard["host"] + if not len(host_spec.split("/")) > 1: return False return True return False @@ -755,76 +765,80 @@ def require_cluster_type(self, topologies=[]): """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies are 'single', 'replicaset', and 'sharded'.""" + def _is_valid_topology(): return self.is_topology_type(topologies) - return self._require( - _is_valid_topology, - "Cluster type not in %s" % (topologies)) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) def require_test_commands(self, func): """Run a test only if the server has test commands enabled.""" - return self._require(lambda: self.test_commands_enabled, - "Test commands must be enabled", - func=func) + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) def require_failCommand_fail_point(self, func): """Run a test only if the server supports the failCommand fail point.""" - return self._require(lambda: self.supports_failCommand_fail_point, - "failCommand fail point must be supported", - func=func) + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) def require_failCommand_appName(self, func): """Run a test only if the server supports the failCommand appName.""" # SERVER-47195 - return self._require(lambda: (self.test_commands_enabled and - self.version >= (4, 4, -1)), - "failCommand appName must be supported", - func=func) + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + "failCommand appName must be supported", + func=func, + ) def require_failCommand_blockConnection(self, func): - """Run a test only if the server supports failCommand blockConnection. - """ + """Run a test only if the server supports failCommand blockConnection.""" return self._require( - lambda: (self.test_commands_enabled and ( - (not self.is_mongos and self.version >= (4, 2, 9)) or - (self.is_mongos and self.version >= (4, 4)))), + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), "failCommand blockConnection is not supported", - func=func) + func=func, + ) def require_tls(self, func): """Run a test only if the client can connect over TLS.""" - return self._require(lambda: self.tls, - "Must be able to connect via TLS", - func=func) + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) def require_no_tls(self, func): """Run a test only if the client can connect over TLS.""" - return self._require(lambda: not self.tls, - "Must be able to connect without TLS", - func=func) + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) def require_tlsCertificateKeyFile(self, func): """Run a test only if the client can connect with tlsCertificateKeyFile.""" - return self._require(lambda: self.tlsCertificateKeyFile, - "Must be able to connect with tlsCertificateKeyFile", - func=func) + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) def require_server_resolvable(self, func): """Run a test only if the hostname 'server' is resolvable.""" - return self._require(lambda: self.server_is_resolvable, - "No hosts entry for 'server'. Cannot validate " - "hostname in the certificate", - func=func) + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate " "hostname in the certificate", + func=func, + ) def require_sessions(self, func): """Run a test only if the deployment supports sessions.""" - return self._require(lambda: self.sessions_enabled, - "Sessions not supported", - func=func) + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) def supports_retryable_writes(self): - if self.storage_engine == 'mmapv1': + if self.storage_engine == "mmapv1": return False if not self.sessions_enabled: return False @@ -832,12 +846,14 @@ def supports_retryable_writes(self): def require_retryable_writes(self, func): """Run a test only if the deployment supports retryable writes.""" - return self._require(self.supports_retryable_writes, - "This server does not support retryable writes", - func=func) + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) def supports_transactions(self): - if self.storage_engine == 'mmapv1': + if self.storage_engine == "mmapv1": return False if self.version.at_least(4, 1, 8): @@ -853,28 +869,28 @@ def require_transactions(self, func): *Might* because this does not test the storage engine or FCV. """ - return self._require(self.supports_transactions, - "Transactions are not supported", - func=func) + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) def require_no_api_version(self, func): """Skip this test when testing with requireApiVersion.""" - return self._require(lambda: not MONGODB_API_VERSION, - "This test does not work with requireApiVersion", - func=func) + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) def mongos_seeds(self): - return ','.join('%s:%s' % address for address in self.mongoses) + return ",".join("%s:%s" % address for address in self.mongoses) @property def supports_failCommand_fail_point(self): """Does the server support the failCommand fail point?""" if self.is_mongos: - return (self.version.at_least(4, 1, 5) and - self.test_commands_enabled) + return self.version.at_least(4, 1, 5) and self.test_commands_enabled else: - return (self.version.at_least(4, 0) and - self.test_commands_enabled) + return self.version.at_least(4, 0) and self.test_commands_enabled @property def requires_hint_with_min_max_queries(self): @@ -884,11 +900,11 @@ def requires_hint_with_min_max_queries(self): @property def max_bson_size(self): - return self.hello['maxBsonObjectSize'] + return self.hello["maxBsonObjectSize"] @property def max_write_batch_size(self): - return self.hello['maxWriteBatchSize'] + return self.hello["maxWriteBatchSize"] # Reusable client context @@ -897,13 +913,13 @@ def max_write_batch_size(self): def sanitize_cmd(cmd): cp = cmd.copy() - cp.pop('$clusterTime', None) - cp.pop('$db', None) - cp.pop('$readPreference', None) - cp.pop('lsid', None) + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) if MONGODB_API_VERSION: # Versioned api parameters - cp.pop('apiVersion', None) + cp.pop("apiVersion", None) # OP_MSG encoding may move the payload type one field to the # end of the command. Do the same here. name = next(iter(cp)) @@ -918,8 +934,8 @@ def sanitize_cmd(cmd): def sanitize_reply(reply): cp = reply.copy() - cp.pop('$clusterTime', None) - cp.pop('operationTime', None) + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) return cp @@ -932,18 +948,20 @@ def assertEqualReply(self, expected, actual, msg=None): @contextmanager def fail_point(self, command_args): - cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on = SON([("configureFailPoint", "failCommand")]) cmd_on.update(command_args) client_context.client.admin.command(cmd_on) try: yield finally: client_context.client.admin.command( - 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" + client: MongoClient db: Database credentials: Dict[str, str] @@ -951,16 +969,14 @@ class IntegrationTest(PyMongoTestCase): @classmethod @client_context.require_connection def setUpClass(cls): - if (client_context.load_balancer and - not getattr(cls, 'RUN_ON_LOAD_BALANCER', False)): - raise SkipTest('this test does not support load balancers') - if (client_context.serverless and - not getattr(cls, 'RUN_ON_SERVERLESS', False)): - raise SkipTest('this test does not support serverless') + if client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + if client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + raise SkipTest("this test does not support serverless") cls.client = client_context.client cls.db = cls.client.pymongo_test if client_context.auth_enabled: - cls.credentials = {'username': db_user, 'password': db_pwd} + cls.credentials = {"username": db_user, "password": db_pwd} else: cls.credentials = {} @@ -996,9 +1012,7 @@ def setUpClass(cls): def setUp(self): super(MockClientTest, self).setUp() - self.client_knobs = client_knobs( - heartbeat_frequency=0.001, - min_heartbeat_interval=0.001) + self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) self.client_knobs.enable() @@ -1017,9 +1031,9 @@ def _get_executors(topology): executors = [] for server in topology._servers.values(): # Some MockMonitor do not have an _executor. - if hasattr(server._monitor, '_executor'): + if hasattr(server._monitor, "_executor"): executors.append(server._monitor._executor) - if hasattr(server._monitor, '_rtt_monitor'): + if hasattr(server._monitor, "_rtt_monitor"): executors.append(server._monitor._rtt_monitor._executor) executors.append(topology._Topology__events_executor) if topology._srv_monitor: @@ -1031,14 +1045,17 @@ def _get_executors(topology): def all_executors_stopped(topology): running = [e for e in _get_executors(topology) if not e._stopped] if running: - print(' Topology %s has THREADS RUNNING: %s, created at: %s' % ( - topology, running, topology._settings._stack)) + print( + " Topology %s has THREADS RUNNING: %s, created at: %s" + % (topology, running, topology._settings._stack) + ) return False return True def print_unclosed_clients(): from pymongo.topology import Topology + processed = set() # Call collect to manually cleanup any would-be gc'd clients to avoid # false positives. @@ -1058,11 +1075,11 @@ def print_unclosed_clients(): def teardown(): garbage = [] for g in gc.garbage: - garbage.append('GARBAGE: %r' % (g,)) - garbage.append(' gc.get_referents: %r' % (gc.get_referents(g),)) - garbage.append(' gc.get_referrers: %r' % (gc.get_referrers(g),)) + garbage.append("GARBAGE: %r" % (g,)) + garbage.append(" gc.get_referents: %r" % (gc.get_referents(g),)) + garbage.append(" gc.get_referrers: %r" % (gc.get_referrers(g),)) if garbage: - assert False, '\n'.join(garbage) + assert False, "\n".join(garbage) c = client_context.client if c: if not client_context.is_data_lake: @@ -1075,7 +1092,7 @@ def teardown(): c.close() # Jython does not support gc.get_objects. - if not sys.platform.startswith('java'): + if not sys.platform.startswith("java"): print_unclosed_clients() @@ -1088,6 +1105,7 @@ def run(self, test): if HAVE_XML: + class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] def run(self, test): setup() @@ -1118,17 +1136,21 @@ def clear_warning_registry(): class SystemCertsPatcher(object): def __init__(self, ca_certs): - if (ssl.OPENSSL_VERSION.lower().startswith('libressl') and - sys.platform == 'darwin' and not _ssl.IS_PYOPENSSL): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): raise SkipTest( "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable.") - self.original_certs = os.environ.get('SSL_CERT_FILE') + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") # Tell OpenSSL where CA certificates live. - os.environ['SSL_CERT_FILE'] = ca_certs + os.environ["SSL_CERT_FILE"] = ca_certs def disable(self): if self.original_certs is None: - os.environ.pop('SSL_CERT_FILE') + os.environ.pop("SSL_CERT_FILE") else: - os.environ['SSL_CERT_FILE'] = self.original_certs + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 1ad84068ed..cad2b10683 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -17,7 +17,6 @@ import os import sys import unittest - from collections import defaultdict sys.path[0:0] = [""] @@ -27,6 +26,7 @@ try: import dns + HAS_DNS = True except ImportError: HAS_DNS = False @@ -57,59 +57,59 @@ def connect(uri): raise Exception("Must set env variable to test.") client = pymongo.MongoClient(uri) # No TLS error - client.admin.command('ping') + client.admin.command("ping") # No auth error client.test.test.count_documents({}) class TestAtlasConnect(unittest.TestCase): - @unittest.skipUnless(HAS_SNI, 'Free tier requires SNI support') + @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") def test_free_tier(self): - connect(URIS['ATLAS_FREE']) + connect(URIS["ATLAS_FREE"]) def test_replica_set(self): - connect(URIS['ATLAS_REPL']) + connect(URIS["ATLAS_REPL"]) def test_sharded_cluster(self): - connect(URIS['ATLAS_SHRD']) + connect(URIS["ATLAS_SHRD"]) def test_tls_11(self): - connect(URIS['ATLAS_TLS11']) + connect(URIS["ATLAS_TLS11"]) def test_tls_12(self): - connect(URIS['ATLAS_TLS12']) + connect(URIS["ATLAS_TLS12"]) def test_serverless(self): - connect(URIS['ATLAS_SERVERLESS']) + connect(URIS["ATLAS_SERVERLESS"]) def connect_srv(self, uri): connect(uri) - self.assertIn('mongodb+srv://', uri) + self.assertIn("mongodb+srv://", uri) - @unittest.skipUnless(HAS_SNI, 'Free tier requires SNI support') - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_free_tier(self): - self.connect_srv(URIS['ATLAS_SRV_FREE']) + self.connect_srv(URIS["ATLAS_SRV_FREE"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_replica_set(self): - self.connect_srv(URIS['ATLAS_SRV_REPL']) + self.connect_srv(URIS["ATLAS_SRV_REPL"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_sharded_cluster(self): - self.connect_srv(URIS['ATLAS_SRV_SHRD']) + self.connect_srv(URIS["ATLAS_SRV_SHRD"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_11(self): - self.connect_srv(URIS['ATLAS_SRV_TLS11']) + self.connect_srv(URIS["ATLAS_SRV_TLS11"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_12(self): - self.connect_srv(URIS['ATLAS_SRV_TLS12']) + self.connect_srv(URIS["ATLAS_SRV_TLS12"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, 'SRV requires dnspython') + @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_serverless(self): - self.connect_srv(URIS['ATLAS_SRV_SERVERLESS']) + self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) def test_uniqueness(self): """Ensure that we don't accidentally duplicate the test URIs.""" @@ -117,11 +117,12 @@ def test_uniqueness(self): for name, uri in URIS.items(): if uri: uri_to_names[uri].append(name) - duplicates = [names for names in uri_to_names.values() - if len(names) > 1] - self.assertFalse(duplicates, 'Error: the following env variables have ' - 'duplicate values: %s' % (duplicates,)) + duplicates = [names for names in uri_to_names.values() if len(names) > 1] + self.assertFalse( + duplicates, + "Error: the following env variables have " "duplicate values: %s" % (duplicates,), + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index f096d0569a..750d18c4fe 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -30,21 +30,22 @@ class TestAuthAWS(unittest.TestCase): @classmethod def setUpClass(cls): - cls.uri = os.environ['MONGODB_URI'] + cls.uri = os.environ["MONGODB_URI"] def test_should_fail_without_credentials(self): - if '@' not in self.uri: - self.skipTest('MONGODB_URI already has no credentials') + if "@" not in self.uri: + self.skipTest("MONGODB_URI already has no credentials") - hosts = ['%s:%s' % addr for addr in parse_uri(self.uri)['nodelist']] + hosts = ["%s:%s" % addr for addr in parse_uri(self.uri)["nodelist"]] self.assertTrue(hosts) with MongoClient(hosts) as client: with self.assertRaises(OperationFailure): client.aws.test.find_one() def test_should_fail_incorrect_credentials(self): - with MongoClient(self.uri, username='fake', password='fake', - authMechanism='MONGODB-AWS') as client: + with MongoClient( + self.uri, username="fake", password="fake", authMechanism="MONGODB-AWS" + ) as client: with self.assertRaises(OperationFailure): client.get_database().test.find_one() @@ -53,5 +54,5 @@ def test_connect_uri(self): client.get_database().test.find_one() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py index dbdea40d46..4118dfef9f 100644 --- a/test/crud_v2_format.py +++ b/test/crud_v2_format.py @@ -33,22 +33,22 @@ def allowable_errors(self, op): def get_scenario_db_name(self, scenario_def): """Crud spec says database_name is optional.""" - return scenario_def.get('database_name', self.TEST_DB) + return scenario_def.get("database_name", self.TEST_DB) def get_scenario_coll_name(self, scenario_def): """Crud spec says collection_name is optional.""" - return scenario_def.get('collection_name', self.TEST_COLLECTION) + return scenario_def.get("collection_name", self.TEST_COLLECTION) def get_object_name(self, op): """Crud spec says object is optional and defaults to 'collection'.""" - return op.get('object', 'collection') + return op.get("object", "collection") def get_outcome_coll_name(self, outcome, collection): """Crud spec says outcome has an optional 'collection.name'.""" - return outcome['collection'].get('name', collection.name) + return outcome["collection"].get("name", collection.name) def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" # PYTHON-1935 Only create the collection if there is data to insert. - if scenario_def['data']: + if scenario_def["data"]: super(TestCrudV2, self).setup_scenario(scenario_def) diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 138c059ac6..efb9e5084e 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -14,15 +14,14 @@ from collections import namedtuple -from mockupdb import OpMsgReply, OpMsg, OpReply +from mockupdb import OpMsg, OpMsgReply, OpReply + from pymongo import ReadPreference -__all__ = ['operations', 'upgrades'] +__all__ = ["operations", "upgrades"] -Operation = namedtuple( - 'Operation', - ['name', 'function', 'reply', 'op_type', 'not_master']) +Operation = namedtuple("Operation", ["name", "function", "reply", "op_type", "not_master"]) """Client operations on MongoDB. Each has a human-readable name, a function that actually executes a test, and @@ -51,64 +50,71 @@ sharded cluster (PYTHON-868). """ -not_master_reply = OpMsgReply(ok=0, errmsg='not master') +not_master_reply = OpMsgReply(ok=0, errmsg="not master") operations = [ Operation( - 'find_one', + "find_one", lambda client: client.db.collection.find_one(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='may-use-secondary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( - 'count', + "count", lambda client: client.db.collection.count_documents({}), - reply={'n': 1}, - op_type='may-use-secondary', - not_master=not_master_reply), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( - 'aggregate', + "aggregate", lambda client: client.db.collection.aggregate([]), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='may-use-secondary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( - 'options', + "options", lambda client: client.db.collection.options(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='must-use-primary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), Operation( - 'command', - lambda client: client.db.command('foo'), - reply={'ok': 1}, - op_type='must-use-primary', # Ignores client's read preference. - not_master=not_master_reply), + "command", + lambda client: client.db.command("foo"), + reply={"ok": 1}, + op_type="must-use-primary", # Ignores client's read preference. + not_master=not_master_reply, + ), Operation( - 'secondary command', - lambda client: - client.db.command('foo', read_preference=ReadPreference.SECONDARY), - reply={'ok': 1}, - op_type='always-use-secondary', - not_master=OpReply(ok=0, errmsg='node is recovering')), + "secondary command", + lambda client: client.db.command("foo", read_preference=ReadPreference.SECONDARY), + reply={"ok": 1}, + op_type="always-use-secondary", + not_master=OpReply(ok=0, errmsg="node is recovering"), + ), Operation( - 'listIndexes', + "listIndexes", lambda client: client.db.collection.index_information(), - reply={'cursor': {'id': 0, 'firstBatch': []}}, - op_type='must-use-primary', - not_master=not_master_reply), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), ] _ops_by_name = dict([(op.name, op) for op in operations]) -Upgrade = namedtuple('Upgrade', - ['name', 'function', 'old', 'new', 'wire_version']) +Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) upgrades = [ - Upgrade('estimated_document_count', - lambda client: client.db.collection.estimated_document_count(), - old=OpMsg('count', 'collection', namespace='db'), - new=OpMsg('aggregate', 'collection', namespace='db'), - wire_version=12), + Upgrade( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + old=OpMsg("count", "collection", namespace="db"), + new=OpMsg("aggregate", "collection", namespace="db"), + wire_version=12, + ), ] diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py index 6fb983b37f..33d33da24c 100755 --- a/test/mockupdb/test_auth_recovering_member.py +++ b/test/mockupdb/test_auth_recovering_member.py @@ -12,31 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest + from mockupdb import MockupDB + from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError -import unittest - class TestAuthRecoveringMember(unittest.TestCase): def test_auth_recovering_member(self): # Test that we don't attempt auth against a recovering RS member. server = MockupDB() - server.autoresponds('ismaster', { - 'minWireVersion': 2, - 'maxWireVersion': 6, - 'ismaster': False, - 'secondary': False, - 'setName': 'rs'}) + server.autoresponds( + "ismaster", + { + "minWireVersion": 2, + "maxWireVersion": 6, + "ismaster": False, + "secondary": False, + "setName": "rs", + }, + ) server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri, - replicaSet='rs', - serverSelectionTimeoutMS=100, - socketTimeoutMS=100) + client = MongoClient( + server.uri, replicaSet="rs", serverSelectionTimeoutMS=100, socketTimeoutMS=100 + ) self.addCleanup(client.close) @@ -46,5 +50,6 @@ def test_auth_recovering_member(self): with self.assertRaises(ServerSelectionTimeoutError): client.db.command("ping") -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index 858e32a0fa..e6d8c2126c 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -14,15 +14,13 @@ """Test $clusterTime handling.""" -from bson import Timestamp -from mockupdb import going, MockupDB -from pymongo import (MongoClient, - InsertOne, - UpdateOne, - DeleteMany) - import unittest +from mockupdb import MockupDB, going + +from bson import Timestamp +from pymongo import DeleteMany, InsertOne, MongoClient, UpdateOne + class TestClusterTime(unittest.TestCase): def cluster_time_conversation(self, callback, replies): @@ -31,10 +29,13 @@ def cluster_time_conversation(self, callback, replies): # First test all commands include $clusterTime with wire version 6. responder = server.autoresponds( - 'ismaster', - {'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': {'clusterTime': cluster_time}}) + "ismaster", + { + "minWireVersion": 0, + "maxWireVersion": 6, + "$clusterTime": {"clusterTime": cluster_time}, + }, + ) server.run() self.addCleanup(server.stop) @@ -45,39 +46,35 @@ def cluster_time_conversation(self, callback, replies): with going(callback, client): for reply in replies: request = server.receives() - self.assertIn('$clusterTime', request) - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - reply['$clusterTime'] = {'clusterTime': cluster_time} + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} request.reply(reply) def test_command(self): def callback(client): - client.db.command('ping') - client.db.command('ping') + client.db.command("ping") + client.db.command("ping") - self.cluster_time_conversation(callback, [{'ok': 1}] * 2) + self.cluster_time_conversation(callback, [{"ok": 1}] * 2) def test_bulk(self): def callback(client): - client.db.collection.bulk_write([ - InsertOne({}), - InsertOne({}), - UpdateOne({}, {'$inc': {'x': 1}}), - DeleteMany({})]) + client.db.collection.bulk_write( + [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] + ) self.cluster_time_conversation( callback, - [{'ok': 1, 'nInserted': 2}, - {'ok': 1, 'nModified': 1}, - {'ok': 1, 'nDeleted': 2}]) + [{"ok": 1, "nInserted": 2}, {"ok": 1, "nModified": 1}, {"ok": 1, "nDeleted": 2}], + ) batches = [ - {'cursor': {'id': 123, 'firstBatch': [{'a': 1}]}}, - {'cursor': {'id': 123, 'nextBatch': [{'a': 2}]}}, - {'cursor': {'id': 0, 'nextBatch': [{'a': 3}]}}] + {"cursor": {"id": 123, "firstBatch": [{"a": 1}]}}, + {"cursor": {"id": 123, "nextBatch": [{"a": 2}]}}, + {"cursor": {"id": 0, "nextBatch": [{"a": 3}]}}, + ] def test_cursor(self): def callback(client): @@ -95,13 +92,15 @@ def test_explain(self): def callback(client): client.db.collection.find().explain() - self.cluster_time_conversation(callback, [{'ok': 1}]) + self.cluster_time_conversation(callback, [{"ok": 1}]) def test_monitor(self): cluster_time = Timestamp(0, 0) - reply = {'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': {'clusterTime': cluster_time}} + reply = { + "minWireVersion": 0, + "maxWireVersion": 6, + "$clusterTime": {"clusterTime": cluster_time}, + } server = MockupDB() server.run() @@ -110,55 +109,52 @@ def test_monitor(self): client = MongoClient(server.uri, heartbeatFrequencyMS=500) self.addCleanup(client.close) - request = server.receives('ismaster') + request = server.receives("ismaster") # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn('$clusterTime', request) + self.assertNotIn("$clusterTime", request) request.ok(reply) # Next exchange: client returns first clusterTime, we send the second. - request = server.receives('ismaster') - self.assertIn('$clusterTime', request) - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - reply['$clusterTime'] = {'clusterTime': cluster_time} + request = server.receives("ismaster") + self.assertIn("$clusterTime", request) + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + reply["$clusterTime"] = {"clusterTime": cluster_time} request.reply(reply) # Third exchange: client returns second clusterTime. - request = server.receives('ismaster') - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) + request = server.receives("ismaster") + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) # Return command error with a new clusterTime. - cluster_time = Timestamp(cluster_time.time, - cluster_time.inc + 1) - error = {'ok': 0, - 'code': 211, - 'errmsg': 'Cache Reader No keys found for HMAC ...', - '$clusterTime': {'clusterTime': cluster_time}} + cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) + error = { + "ok": 0, + "code": 211, + "errmsg": "Cache Reader No keys found for HMAC ...", + "$clusterTime": {"clusterTime": cluster_time}, + } request.reply(error) # PyMongo 3.11+ closes the monitoring connection on command errors. # Fourth exchange: the Monitor closes the connection and runs the # handshake on a new connection. - request = server.receives('ismaster') + request = server.receives("ismaster") # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn('$clusterTime', request) + self.assertNotIn("$clusterTime", request) # Reply without $clusterTime. - reply.pop('$clusterTime') + reply.pop("$clusterTime") request.reply(reply) # Fifth exchange: the Monitor attempt uses the clusterTime from # the previous isMaster error. - request = server.receives('ismaster') - self.assertEqual(request['$clusterTime']['clusterTime'], - cluster_time) + request = server.receives("ismaster") + self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) request.reply(reply) client.close() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index a52e2fb4e7..10788ac0f9 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -14,11 +14,12 @@ """Test list_indexes with more than one batch.""" -from mockupdb import going, MockupDB -from pymongo import MongoClient - import unittest +from mockupdb import MockupDB, going + +from pymongo import MongoClient + class TestCursorNamespace(unittest.TestCase): server: MockupDB @@ -26,7 +27,7 @@ class TestCursorNamespace(unittest.TestCase): @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) cls.server.run() cls.client = MongoClient(cls.server.uri) @@ -37,38 +38,43 @@ def tearDownClass(cls): def _test_cursor_namespace(self, cursor_op, command): with going(cursor_op) as docs: - request = self.server.receives( - **{command: 'collection', 'namespace': 'test'}) + request = self.server.receives(**{command: "collection", "namespace": "test"}) # Respond with a different namespace. - request.reply({'cursor': { - 'firstBatch': [{'doc': 1}], - 'id': 123, - 'ns': 'different_db.different.coll'}}) + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) # Client uses the namespace we returned. request = self.server.receives( - getMore=123, namespace='different_db', - collection='different.coll') + getMore=123, namespace="different_db", collection="different.coll" + ) - request.reply({'cursor': { - 'nextBatch': [{'doc': 2}], - 'id': 0}}) + request.reply({"cursor": {"nextBatch": [{"doc": 2}], "id": 0}}) - self.assertEqual([{'doc': 1}, {'doc': 2}], docs()) + self.assertEqual([{"doc": 1}, {"doc": 2}], docs()) def test_aggregate_cursor(self): def op(): return list(self.client.test.collection.aggregate([])) - self._test_cursor_namespace(op, 'aggregate') + + self._test_cursor_namespace(op, "aggregate") def test_find_cursor(self): def op(): return list(self.client.test.collection.find()) - self._test_cursor_namespace(op, 'find') + + self._test_cursor_namespace(op, "find") def test_list_indexes(self): def op(): return list(self.client.test.collection.list_indexes()) - self._test_cursor_namespace(op, 'listIndexes') + + self._test_cursor_namespace(op, "listIndexes") class TestKillCursorsNamespace(unittest.TestCase): @@ -77,7 +83,7 @@ class TestKillCursorsNamespace(unittest.TestCase): @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) cls.server.run() cls.client = MongoClient(cls.server.uri) @@ -88,39 +94,47 @@ def tearDownClass(cls): def _test_killCursors_namespace(self, cursor_op, command): with going(cursor_op): - request = self.server.receives( - **{command: 'collection', 'namespace': 'test'}) + request = self.server.receives(**{command: "collection", "namespace": "test"}) # Respond with a different namespace. - request.reply({'cursor': { - 'firstBatch': [{'doc': 1}], - 'id': 123, - 'ns': 'different_db.different.coll'}}) + request.reply( + { + "cursor": { + "firstBatch": [{"doc": 1}], + "id": 123, + "ns": "different_db.different.coll", + } + } + ) # Client uses the namespace we returned for killCursors. - request = self.server.receives(**{ - 'killCursors': 'different.coll', - 'cursors': [123], - '$db': 'different_db'}) - request.reply({ - 'ok': 1, - 'cursorsKilled': [123], - 'cursorsNotFound': [], - 'cursorsAlive': [], - 'cursorsUnknown': []}) + request = self.server.receives( + **{"killCursors": "different.coll", "cursors": [123], "$db": "different_db"} + ) + request.reply( + { + "ok": 1, + "cursorsKilled": [123], + "cursorsNotFound": [], + "cursorsAlive": [], + "cursorsUnknown": [], + } + ) def test_aggregate_killCursor(self): def op(): cursor = self.client.test.collection.aggregate([], batchSize=1) next(cursor) cursor.close() - self._test_killCursors_namespace(op, 'aggregate') + + self._test_killCursors_namespace(op, "aggregate") def test_find_killCursor(self): def op(): cursor = self.client.test.collection.find(batch_size=1) next(cursor) cursor.close() - self._test_killCursors_namespace(op, 'find') + + self._test_killCursors_namespace(op, "find") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 0d91583378..5f5400ab07 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -13,13 +13,12 @@ # limitations under the License. """Test PyMongo cursor with a sharded cluster.""" -from pymongo import MongoClient - +import unittest from queue import Queue from mockupdb import MockupDB, going -import unittest +from pymongo import MongoClient class TestGetmoreSharded(unittest.TestCase): @@ -30,20 +29,22 @@ def test_getmore_sharded(self): q: Queue = Queue() for server in servers: server.subscribe(q.put) - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) server.run() self.addCleanup(server.stop) - client = MongoClient('mongodb://%s:%d,%s:%d' % ( - servers[0].host, servers[0].port, - servers[1].host, servers[1].port)) + client = MongoClient( + "mongodb://%s:%d,%s:%d" + % (servers[0].host, servers[0].port, servers[1].host, servers[1].port) + ) self.addCleanup(client.close) collection = client.db.collection cursor = collection.find() with going(next, cursor): query = q.get(timeout=1) - query.replies({'cursor': {'id': 123, 'firstBatch': [{}]}}) + query.replies({"cursor": {"id": 123, "firstBatch": [{}]}}) # 10 batches, all getMores go to same server. for i in range(1, 10): @@ -51,9 +52,8 @@ def test_getmore_sharded(self): getmore = q.get(timeout=1) self.assertEqual(query.server, getmore.server) cursor_id = 123 if i < 9 else 0 - getmore.replies({'cursor': {'id': cursor_id, - 'nextBatch': [{}]}}) + getmore.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 29313de8c2..c9799fa21e 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mockupdb import (MockupDB, OpReply, OpMsg, OpMsgReply, OpQuery, absent, - Command, go) +import unittest + +from mockupdb import Command, MockupDB, OpMsg, OpMsgReply, OpQuery, OpReply, absent, go -from pymongo import MongoClient, version as pymongo_version +from bson.objectid import ObjectId +from pymongo import MongoClient +from pymongo import version as pymongo_version from pymongo.errors import OperationFailure from pymongo.server_api import ServerApi, ServerApiVersion -from bson.objectid import ObjectId - -import unittest def test_hello_with_option(self, protocol, **kwargs): @@ -30,26 +30,28 @@ def test_hello_with_option(self, protocol, **kwargs): primary = MockupDB() # Set up a custom handler to save the first request from the driver. self.handshake_req = None + def respond(r): # Only save the very first request from the driver. if self.handshake_req == None: self.handshake_req = r - load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get( - "loadBalanced") else {} - return r.reply(OpMsgReply(minWireVersion=0, maxWireVersion=13, - **kwargs, **load_balanced_kwargs)) + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + primary.autoresponds(respond) primary.run() self.addCleanup(primary.stop) # We need a special dict because MongoClient uses "server_api" and all # of the commands use "apiVersion". - k_map = {("apiVersion", "1"):("server_api", ServerApi( - ServerApiVersion.V1))} - client = MongoClient("mongodb://"+primary.address_string, - appname='my app', # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v # type: ignore[arg-type] - in kwargs.items()])) + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] + ) self.addCleanup(client.close) @@ -65,15 +67,15 @@ def respond(r): def _check_handshake_data(request): - assert 'client' in request - data = request['client'] + assert "client" in request + data = request["client"] - assert data['application'] == {'name': 'my app'} - assert data['driver'] == {'name': 'PyMongo', 'version': pymongo_version} + assert data["application"] == {"name": "my app"} + assert data["driver"] == {"name": "PyMongo", "version": pymongo_version} # Keep it simple, just check these fields exist. - assert 'os' in data - assert 'platform' in data + assert "os" in data + assert "platform" in data class TestHandshake(unittest.TestCase): @@ -84,63 +86,66 @@ def test_client_handshake_data(self): self.addCleanup(server.stop) hosts = [server.address_string for server in (primary, secondary)] - primary_response = OpReply('ismaster', True, - setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) - error_response = OpReply( - 0, errmsg='Cache Reader No keys found for HMAC ...', code=211) - - secondary_response = OpReply('ismaster', False, - setName='rs', hosts=hosts, - secondary=True, - minWireVersion=2, maxWireVersion=6) - - client = MongoClient(primary.uri, - replicaSet='rs', - appname='my app', - heartbeatFrequencyMS=500) # Speed up the test. + primary_response = OpReply( + "ismaster", True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + error_response = OpReply(0, errmsg="Cache Reader No keys found for HMAC ...", code=211) + + secondary_response = OpReply( + "ismaster", + False, + setName="rs", + hosts=hosts, + secondary=True, + minWireVersion=2, + maxWireVersion=6, + ) + + client = MongoClient( + primary.uri, replicaSet="rs", appname="my app", heartbeatFrequencyMS=500 + ) # Speed up the test. self.addCleanup(client.close) # New monitoring sockets send data during handshake. - heartbeat = primary.receives('ismaster') + heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) - heartbeat = secondary.receives('ismaster') + heartbeat = secondary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(secondary_response) # Subsequent heartbeats have no client data. - primary.receives('ismaster', 1, client=absent).ok(error_response) - secondary.receives('ismaster', 1, client=absent).ok(error_response) + primary.receives("ismaster", 1, client=absent).ok(error_response) + secondary.receives("ismaster", 1, client=absent).ok(error_response) # The heartbeat retry (on a new connection) does have client data. - heartbeat = primary.receives('ismaster') + heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) - heartbeat = secondary.receives('ismaster') + heartbeat = secondary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(secondary_response) # Still no client data. - primary.receives('ismaster', 1, client=absent).ok(primary_response) - secondary.receives('ismaster', 1, client=absent).ok(secondary_response) + primary.receives("ismaster", 1, client=absent).ok(primary_response) + secondary.receives("ismaster", 1, client=absent).ok(secondary_response) # After a disconnect, next ismaster has client data again. - primary.receives('ismaster', 1, client=absent).hangup() - heartbeat = primary.receives('ismaster') + primary.receives("ismaster", 1, client=absent).hangup() + heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) - secondary.autoresponds('ismaster', secondary_response) + secondary.autoresponds("ismaster", secondary_response) # Start a command, so the client opens an application socket. - future = go(client.db.command, 'whatever') + future = go(client.db.command, "whatever") for request in primary: - if request.matches(Command('ismaster')): + if request.matches(Command("ismaster")): if request.client_port == heartbeat.client_port: # This is the monitor again, keep going. request.ok(primary_response) @@ -150,7 +155,7 @@ def test_client_handshake_data(self): request.ok(primary_response) else: # Command succeeds. - request.assert_matches(OpMsg('whatever')) + request.assert_matches(OpMsg("whatever")) request.ok() assert future() return @@ -160,40 +165,42 @@ def test_client_handshake_saslSupportedMechs(self): server.run() self.addCleanup(server.stop) - primary_response = OpReply('ismaster', True, - minWireVersion=2, maxWireVersion=6) - client = MongoClient(server.uri, - username='username', - password='password') + primary_response = OpReply("ismaster", True, minWireVersion=2, maxWireVersion=6) + client = MongoClient(server.uri, username="username", password="password") self.addCleanup(client.close) # New monitoring sockets send data during handshake. - heartbeat = server.receives('ismaster') + heartbeat = server.receives("ismaster") heartbeat.ok(primary_response) - future = go(client.db.command, 'whatever') + future = go(client.db.command, "whatever") for request in server: - if request.matches('ismaster'): + if request.matches("ismaster"): if request.client_port == heartbeat.client_port: # This is the monitor again, keep going. request.ok(primary_response) else: # Handshaking a new application socket should send # saslSupportedMechs and speculativeAuthenticate. - self.assertEqual(request['saslSupportedMechs'], - 'admin.username') - self.assertIn( - 'saslStart', request['speculativeAuthenticate']) - auth = {'conversationId': 1, 'done': False, - 'payload': b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' - b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' - b'tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei' - b'PHrSmh4uhkg==,i=15000'} - request.ok('ismaster', True, - saslSupportedMechs=['SCRAM-SHA-256'], - speculativeAuthenticate=auth, - minWireVersion=2, maxWireVersion=6) + self.assertEqual(request["saslSupportedMechs"], "admin.username") + self.assertIn("saslStart", request["speculativeAuthenticate"]) + auth = { + "conversationId": 1, + "done": False, + "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r,s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + } + request.ok( + "ismaster", + True, + saslSupportedMechs=["SCRAM-SHA-256"], + speculativeAuthenticate=auth, + minWireVersion=2, + maxWireVersion=6, + ) # Authentication should immediately fail with: # OperationFailure: Server returned an invalid nonce. with self.assertRaises(OperationFailure): @@ -219,8 +226,7 @@ def test_handshake_not_either(self): def test_handshake_max_wire(self): server = MockupDB() - primary_response = {"hello": 1, "ok": 1, - "minWireVersion": 0, "maxWireVersion": 6} + primary_response = {"hello": 1, "ok": 1, "minWireVersion": 0, "maxWireVersion": 6} self.found_auth_msg = False def responder(request): @@ -228,31 +234,36 @@ def responder(request): self.found_auth_msg = True # Immediately closes the connection with # OperationFailure: Server returned an invalid nonce. - request.reply(OpMsgReply(**primary_response, - **{'payload': - b'r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0' - b'1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky' - b'tXdF9r,' - b's=4dcxugMJq2P4hQaDbGXZR8uR3ei' - b'PHrSmh4uhkg==,i=15000', - "saslSupportedMechs": [ - "SCRAM-SHA-1"]})) + request.reply( + OpMsgReply( + **primary_response, + **{ + "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r," + b"s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + "saslSupportedMechs": ["SCRAM-SHA-1"], + } + ) + ) else: return request.reply(**primary_response) server.autoresponds(responder) self.addCleanup(server.stop) server.run() - client = MongoClient(server.uri, - username='username', - password='password', - ) + client = MongoClient( + server.uri, + username="username", + password="password", + ) self.addCleanup(client.close) - self.assertRaises(OperationFailure, client.db.collection.find_one, - {"a": 1}) - self.assertTrue(self.found_auth_msg, "Could not find authentication " - "command with correct protocol") + self.assertRaises(OperationFailure, client.db.collection.find_one, {"a": 1}) + self.assertTrue( + self.found_auth_msg, "Could not find authentication " "command with correct protocol" + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py index c67fcbf9e1..155ae6152e 100644 --- a/test/mockupdb/test_initial_ismaster.py +++ b/test/mockupdb/test_initial_ismaster.py @@ -13,11 +13,11 @@ # limitations under the License. import time +import unittest from mockupdb import MockupDB, wait_until -from pymongo import MongoClient -import unittest +from pymongo import MongoClient class TestInitialIsMaster(unittest.TestCase): @@ -32,15 +32,13 @@ def test_initial_ismaster(self): # A single ismaster is enough for the client to be connected. self.assertFalse(client.nodes) - server.receives('ismaster').ok(ismaster=True, - minWireVersion=2, maxWireVersion=6) - wait_until(lambda: client.nodes, - 'update nodes', timeout=1) + server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + wait_until(lambda: client.nodes, "update nodes", timeout=1) # At least 10 seconds before next heartbeat. - server.receives('ismaster').ok(ismaster=True, - minWireVersion=2, maxWireVersion=6) + server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) self.assertGreaterEqual(time.time() - start, 10) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index b4787ff624..2bdbd7b910 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -14,42 +14,34 @@ """Test list_indexes with more than one batch.""" -from bson import SON +import unittest -from mockupdb import going, MockupDB, OpGetMore -from pymongo import MongoClient +from mockupdb import MockupDB, OpGetMore, going -import unittest +from bson import SON +from pymongo import MongoClient class TestListIndexes(unittest.TestCase): - def test_list_indexes_command(self): - server = MockupDB(auto_ismaster={'maxWireVersion': 6}) + server = MockupDB(auto_ismaster={"maxWireVersion": 6}) server.run() self.addCleanup(server.stop) client = MongoClient(server.uri) self.addCleanup(client.close) with going(client.test.collection.list_indexes) as cursor: - request = server.receives( - listIndexes='collection', namespace='test') - request.reply({'cursor': { - 'firstBatch': [{'name': 'index_0'}], - 'id': 123}}) + request = server.receives(listIndexes="collection", namespace="test") + request.reply({"cursor": {"firstBatch": [{"name": "index_0"}], "id": 123}}) with going(list, cursor()) as indexes: - request = server.receives(getMore=123, - namespace='test', - collection='collection') + request = server.receives(getMore=123, namespace="test", collection="collection") - request.reply({'cursor': { - 'nextBatch': [{'name': 'index_1'}], - 'id': 0}}) + request.reply({"cursor": {"nextBatch": [{"name": "index_1"}], "id": 0}}) - self.assertEqual([{'name': 'index_0'}, {'name': 'index_1'}], indexes()) + self.assertEqual([{"name": "index_0"}, {"name": "index_1"}], indexes()) for index_info in indexes(): self.assertIsInstance(index_info, SON) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py index 9bd65a1764..02efb6a718 100644 --- a/test/mockupdb/test_max_staleness.py +++ b/test/mockupdb/test_max_staleness.py @@ -12,33 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest + from mockupdb import MockupDB, going -from pymongo import MongoClient -import unittest +from pymongo import MongoClient class TestMaxStalenessMongos(unittest.TestCase): def test_mongos(self): mongos = MockupDB() - mongos.autoresponds('ismaster', maxWireVersion=6, - ismaster=True, msg='isdbgrid') + mongos.autoresponds("ismaster", maxWireVersion=6, ismaster=True, msg="isdbgrid") mongos.run() self.addCleanup(mongos.stop) # No maxStalenessSeconds. - uri = 'mongodb://localhost:%d/?readPreference=secondary' % mongos.port + uri = "mongodb://localhost:%d/?readPreference=secondary" % mongos.port client = MongoClient(uri) self.addCleanup(client.close) with going(client.db.coll.find_one) as future: request = mongos.receives() - self.assertNotIn( - 'maxStalenessSeconds', - request.doc['$readPreference']) + self.assertNotIn("maxStalenessSeconds", request.doc["$readPreference"]) self.assertTrue(request.slave_okay) - request.ok(cursor={'firstBatch': [], 'id': 0}) + request.ok(cursor={"firstBatch": [], "id": 0}) # find_one succeeds with no result. self.assertIsNone(future()) @@ -46,22 +44,22 @@ def test_mongos(self): # Set maxStalenessSeconds to 1. Client has no minimum with mongos, # we let mongos enforce the 90-second minimum and return an error: # SERVER-27146. - uri = 'mongodb://localhost:%d/?readPreference=secondary' \ - '&maxStalenessSeconds=1' % mongos.port + uri = ( + "mongodb://localhost:%d/?readPreference=secondary" + "&maxStalenessSeconds=1" % mongos.port + ) client = MongoClient(uri) self.addCleanup(client.close) with going(client.db.coll.find_one) as future: request = mongos.receives() - self.assertEqual( - 1, - request.doc['$readPreference']['maxStalenessSeconds']) + self.assertEqual(1, request.doc["$readPreference"]["maxStalenessSeconds"]) self.assertTrue(request.slave_okay) - request.ok(cursor={'firstBatch': [], 'id': 0}) + request.ok(cursor={"firstBatch": [], "id": 0}) self.assertIsNone(future()) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index c3af907404..ce91794ee4 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -15,15 +15,14 @@ """Test PyMongo with a mixed-version cluster.""" import time - +import unittest from queue import Queue -from mockupdb import MockupDB, go, OpMsg -from pymongo import MongoClient - -import unittest +from mockupdb import MockupDB, OpMsg, go from operations import upgrades +from pymongo import MongoClient + class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): @@ -33,25 +32,29 @@ def setup_server(self, upgrade): self.q: Queue = Queue() for server in self.mongos_old, self.mongos_new: server.subscribe(self.q.put) - server.autoresponds('getlasterror') + server.autoresponds("getlasterror") server.run() self.addCleanup(server.stop) # Max wire version is too old for the upgraded operation. - self.mongos_old.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - maxWireVersion=upgrade.wire_version - 1) + self.mongos_old.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version - 1 + ) # Up-to-date max wire version. - self.mongos_new.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - maxWireVersion=upgrade.wire_version) + self.mongos_new.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version + ) - self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos_old.address_string, - self.mongos_new.address_string) + self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongos_old.address_string, + self.mongos_new.address_string, + ) self.client = MongoClient(self.mongoses_uri) def tearDown(self): - if hasattr(self, 'client') and self.client: + if hasattr(self, "client") and self.client: self.client.close() @@ -64,23 +67,24 @@ def test(self): go(upgrade.function, self.client) request = self.q.get(timeout=1) servers_used.add(request.server) - request.assert_matches(upgrade.old - if request.server is self.mongos_old - else upgrade.new) + request.assert_matches( + upgrade.old if request.server is self.mongos_old else upgrade.new + ) if time.time() > start + 10: - self.fail('never used both mongoses') + self.fail("never used both mongoses") + return test def generate_mixed_version_sharded_tests(): for upgrade in upgrades: test = create_mixed_version_sharded_test(upgrade) - test_name = 'test_%s' % upgrade.name.replace(' ', '_') + test_name = "test_%s" % upgrade.name.replace(" ", "_") test.__name__ = test_name setattr(TestMixedVersionSharded, test_name, test) generate_mixed_version_sharded_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 49aee27047..d2c3bfc1b0 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -13,23 +13,26 @@ # limitations under the License. import itertools +import unittest + +from mockupdb import MockupDB, OpMsg, go, going +from operations import operations from bson import SON -from mockupdb import MockupDB, going, OpMsg, go from pymongo import MongoClient, ReadPreference -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name, - _MONGOS_MODES) - -import unittest -from operations import operations +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): server = MockupDB() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) self.addCleanup(server.stop) server.run() @@ -37,20 +40,25 @@ def test_aggregate(self): self.addCleanup(client.close) collection = client.test.collection with going(collection.aggregate, []): - command = server.receives(aggregate='collection', pipeline=[]) - self.assertFalse(command.slave_ok, 'SlaveOkay set') + command = server.receives(aggregate="collection", pipeline=[]) + self.assertFalse(command.slave_ok, "SlaveOkay set") command.ok(result=[{}]) - secondary_collection = collection.with_options( - read_preference=ReadPreference.SECONDARY) + secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): - command = server.receives(OpMsg({"aggregate": "collection", - "pipeline": [], - '$readPreference': {'mode': 'secondary'}})) + command = server.receives( + OpMsg( + { + "aggregate": "collection", + "pipeline": [], + "$readPreference": {"mode": "secondary"}, + } + ) + ) command.ok(result=[{}]) - self.assertTrue(command.slave_ok, 'SlaveOkay not set') + self.assertTrue(command.slave_ok, "SlaveOkay not set") def create_mongos_read_mode_test(mode, operation): @@ -58,11 +66,11 @@ def test(self): server = MockupDB() self.addCleanup(server.stop) server.run() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(server.uri, read_preference=pref) self.addCleanup(client.close) @@ -71,26 +79,25 @@ def test(self): request = server.receive() request.reply(operation.reply) - if operation.op_type == 'always-use-secondary': - self.assertEqual(ReadPreference.SECONDARY.document, - request.doc.get('$readPreference')) - slave_ok = mode != 'primary' - elif operation.op_type == 'must-use-primary': + if operation.op_type == "always-use-secondary": + self.assertEqual(ReadPreference.SECONDARY.document, request.doc.get("$readPreference")) + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": slave_ok = False - elif operation.op_type == 'may-use-secondary': - slave_ok = mode != 'primary' - actual_pref = request.doc.get('$readPreference') - if mode == 'primary': + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + actual_pref = request.doc.get("$readPreference") + if mode == "primary": self.assertIsNone(actual_pref) else: self.assertEqual(pref.document, actual_pref) else: - self.fail('unrecognized op_type %r' % operation.op_type) + self.fail("unrecognized op_type %r" % operation.op_type) if slave_ok: - self.assertTrue(request.slave_ok, 'SlaveOkay not set') + self.assertTrue(request.slave_ok, "SlaveOkay not set") else: - self.assertFalse(request.slave_ok, 'SlaveOkay set') + self.assertFalse(request.slave_ok, "SlaveOkay set") return test @@ -100,12 +107,11 @@ def generate_mongos_read_mode_tests(): for entry in matrix: mode, operation = entry - if mode == 'primary' and operation.op_type == 'always-use-secondary': + if mode == "primary" and operation.op_type == "always-use-secondary": # Skip something like command('foo', read_preference=SECONDARY). continue test = create_mongos_read_mode_test(mode, operation) - test_name = 'test_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), mode) + test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestMongosCommandReadMode, test_name, test) @@ -113,5 +119,5 @@ def generate_mongos_read_mode_tests(): generate_mongos_read_mode_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index bc29ce5f0f..dcf5256fac 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest from queue import Queue -from mockupdb import MockupDB, wait_until, OpReply, going, Future +from mockupdb import Future, MockupDB, OpReply, going, wait_until + +from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo import MongoClient - -import unittest class TestNetworkDisconnectPrimary(unittest.TestCase): @@ -33,52 +33,53 @@ def test_network_disconnect_primary(self): self.addCleanup(server.stop) hosts = [server.address_string for server in servers] - primary_response = OpReply(ismaster=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) - primary.autoresponds('ismaster', primary_response) + primary_response = OpReply( + ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) + primary.autoresponds("ismaster", primary_response) secondary.autoresponds( - 'ismaster', - ismaster=False, secondary=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) - - client = MongoClient(primary.uri, replicaSet='rs') + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=6, + ) + + client = MongoClient(primary.uri, replicaSet="rs") self.addCleanup(client.close) - wait_until(lambda: client.primary == primary.address, - 'discover primary') + wait_until(lambda: client.primary == primary.address, "discover primary") topology = client._topology - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) # Open a socket in the application pool (calls ismaster). - with going(client.db.command, 'buildinfo'): - primary.receives('buildinfo').ok() + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").ok() # The primary hangs replying to ismaster. ismaster_future = Future() - primary.autoresponds('ismaster', - lambda r: r.ok(ismaster_future.result())) + primary.autoresponds("ismaster", lambda r: r.ok(ismaster_future.result())) # Network error on application operation. with self.assertRaises(ConnectionFailure): - with going(client.db.command, 'buildinfo'): - primary.receives('buildinfo').hangup() + with going(client.db.command, "buildinfo"): + primary.receives("buildinfo").hangup() # Topology type is updated. - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, topology.description.topology_type) # Let ismasters through again. ismaster_future.set_result(primary_response) # Demand a primary. - with going(client.db.command, 'buildinfo'): - wait_until(lambda: client.primary == primary.address, - 'rediscover primary') - primary.receives('buildinfo').ok() + with going(client.db.command, "buildinfo"): + wait_until(lambda: client.primary == primary.address, "rediscover primary") + primary.receives("buildinfo").ok() + + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, topology.description.topology_type) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - topology.description.topology_type) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 78397a3336..da7ff3d33e 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -12,223 +12,248 @@ # See the License for the specific language governing permissions and # limitations under the License. +import unittest from collections import namedtuple -from mockupdb import MockupDB, going, OpMsg, OpMsgReply, OP_MSG_FLAGS +from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going + from pymongo import MongoClient, WriteConcern -from pymongo.operations import InsertOne, UpdateOne, DeleteOne from pymongo.cursor import CursorType +from pymongo.operations import DeleteOne, InsertOne, UpdateOne -import unittest - - -Operation = namedtuple( - 'Operation', - ['name', 'function', 'request', 'reply']) +Operation = namedtuple("Operation", ["name", "function", "request", "reply"]) operations = [ Operation( - 'find_one', + "find_one", lambda coll: coll.find_one({}), request=OpMsg({"find": "coll"}, flags=0), - reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), Operation( - 'aggregate', + "aggregate", lambda coll: coll.aggregate([]), request=OpMsg({"aggregate": "coll"}, flags=0), - reply={'ok': 1, 'cursor': {'firstBatch': [], 'id': 0}}), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, + ), Operation( - 'insert_one', + "insert_one", lambda coll: coll.insert_one({}), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), + reply={"ok": 1, "n": 1}, + ), Operation( - 'insert_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert_one({}), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "insert_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'insert_many', + "insert_many", lambda coll: coll.insert_many([{}, {}, {}]), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 3}), + reply={"ok": 1, "n": 3}, + ), Operation( - 'insert_many-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), + "insert_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 3}), + reply={"ok": 1, "n": 3}, + ), Operation( - 'insert_many-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).insert_many( - [{}, {}, {}], ordered=False), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "insert_many-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'replace_one', + "replace_one", lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), Operation( - 'replace_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).replace_one({"_id": 1}, - {"new": 1}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).replace_one( + {"_id": 1}, {"new": 1} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'update_one', + "update_one", lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), Operation( - 'replace_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).update_one({"_id": 1}, - {"$set": {"new": 1}}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_one( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'update_many', + "update_many", lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 1, 'nModified': 1}), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), Operation( - 'update_many-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).update_many({"_id": 1}, - {"$set": {"new": 1}}), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "update_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_many( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'delete_one', + "delete_one", lambda coll: coll.delete_one({"a": 1}), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), + reply={"ok": 1, "n": 1}, + ), Operation( - 'delete_one-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).delete_one({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "delete_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'delete_many', + "delete_many", lambda coll: coll.delete_many({"a": 1}), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 1}), + reply={"ok": 1, "n": 1}, + ), Operation( - 'delete_many-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).delete_many({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "delete_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), # Legacy methods Operation( - 'bulk_write_insert', + "bulk_write_insert", lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_insert-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([InsertOne({}), - InsertOne({})]), + "bulk_write_insert-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne({}), InsertOne({})] + ), request=OpMsg({"insert": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_insert-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})], ordered=False), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "bulk_write_insert-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne({}), InsertOne({})], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'bulk_write_update', - lambda coll: coll.bulk_write([ - UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + "bulk_write_update", + lambda coll: coll.bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 2, 'nModified': 2}), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), Operation( - 'bulk_write_update-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ + "bulk_write_update-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}})]), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), request=OpMsg({"update": "coll"}, flags=0), - reply={'ok': 1, 'n': 2, 'nModified': 2}), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), Operation( - 'bulk_write_update-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ + "bulk_write_update-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}})], ordered=False), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ], + ordered=False, + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), Operation( - 'bulk_write_delete', - lambda coll: coll.bulk_write([ - DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + "bulk_write_delete", + lambda coll: coll.bulk_write([DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_delete-w0', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ - DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + "bulk_write_delete-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})] + ), request=OpMsg({"delete": "coll"}, flags=0), - reply={'ok': 1, 'n': 2}), + reply={"ok": 1, "n": 2}, + ), Operation( - 'bulk_write_delete-w0-unordered', - lambda coll: coll.with_options( - write_concern=WriteConcern(w=0)).bulk_write([ - DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS['moreToCome']), - reply=None), + "bulk_write_delete-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False + ), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), ] operations_312 = [ Operation( - 'find_raw_batches', + "find_raw_batches", lambda coll: list(coll.find_raw_batches({})), request=[ OpMsg({"find": "coll"}, flags=0), OpMsg({"getMore": 7}, flags=0), ], reply=[ - {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, - ]), + {"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), Operation( - 'aggregate_raw_batches', + "aggregate_raw_batches", lambda coll: list(coll.aggregate_raw_batches([])), request=[ OpMsg({"aggregate": "coll"}, flags=0), OpMsg({"getMore": 7}, flags=0), ], reply=[ - {'ok': 1, 'cursor': {'firstBatch': [], 'id': 7}}, - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, - ]), + {"ok": 1, "cursor": {"firstBatch": [], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), Operation( - 'find_exhaust_cursor', + "find_exhaust_cursor", lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), request=[ OpMsg({"find": "coll"}, flags=0), OpMsg({"getMore": 7}, flags=1 << 16), ], reply=[ - OpMsgReply( - {'ok': 1, 'cursor': {'firstBatch': [{}], 'id': 7}}, flags=0), - OpMsgReply( - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), - OpMsgReply( - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 7}}, flags=2), - OpMsgReply( - {'ok': 1, 'cursor': {'nextBatch': [{}], 'id': 0}}, flags=0), - ]), + OpMsgReply({"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, flags=0), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, flags=0), + ], + ), ] @@ -273,6 +298,7 @@ def _test_operation(self, op): def operation_test(op): def test(self): self._test_operation(op) + return test @@ -286,5 +312,5 @@ def create_tests(ops): create_tests(operations_312) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index eb3a14fa01..b8d1348b97 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -14,17 +14,19 @@ import copy import itertools +import unittest from typing import Any -from mockupdb import MockupDB, going, CommandBase -from pymongo import MongoClient, ReadPreference -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name, - _MONGOS_MODES) - -import unittest +from mockupdb import CommandBase, MockupDB, going from operations import operations +from pymongo import MongoClient, ReadPreference +from pymongo.read_preferences import ( + _MONGOS_MODES, + make_read_preference, + read_pref_mode_from_name, +) + class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False @@ -40,22 +42,20 @@ def add_test(cls, mode, test_name, test): setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, - read_preference=read_preference) + client = MongoClient(self.primary.uri, read_preference=read_preference) self.addCleanup(client.close) return client class TestOpMsgMongos(OpMsgReadPrefBase): - @classmethod def setUpClass(cls): super(TestOpMsgMongos, cls).setUpClass() auto_ismaster = { - 'ismaster': True, - 'msg': 'isdbgrid', # Mongos. - 'minWireVersion': 2, - 'maxWireVersion': 6, + "ismaster": True, + "msg": "isdbgrid", # Mongos. + "minWireVersion": 2, + "maxWireVersion": 6, } cls.primary = MockupDB(auto_ismaster=auto_ismaster) cls.primary.run() @@ -68,7 +68,6 @@ def tearDownClass(cls): class TestOpMsgReplicaSet(OpMsgReadPrefBase): - @classmethod def setUpClass(cls): super(TestOpMsgReplicaSet, cls).setUpClass() @@ -76,21 +75,20 @@ def setUpClass(cls): for server in cls.primary, cls.secondary: server.run() - hosts = [server.address_string - for server in (cls.primary, cls.secondary)] + hosts = [server.address_string for server in (cls.primary, cls.secondary)] primary_ismaster = { - 'ismaster': True, - 'setName': 'rs', - 'hosts': hosts, - 'minWireVersion': 2, - 'maxWireVersion': 6, + "ismaster": True, + "setName": "rs", + "hosts": hosts, + "minWireVersion": 2, + "maxWireVersion": 6, } - cls.primary.autoresponds(CommandBase('ismaster'), primary_ismaster) + cls.primary.autoresponds(CommandBase("ismaster"), primary_ismaster) secondary_ismaster = copy.copy(primary_ismaster) - secondary_ismaster['ismaster'] = False - secondary_ismaster['secondary'] = True - cls.secondary.autoresponds(CommandBase('ismaster'), secondary_ismaster) + secondary_ismaster["ismaster"] = False + secondary_ismaster["secondary"] = True + cls.secondary.autoresponds(CommandBase("ismaster"), secondary_ismaster) @classmethod def tearDownClass(cls): @@ -102,18 +100,15 @@ def tearDownClass(cls): def add_test(cls, mode, test_name, test): # Skip nearest tests since we don't know if we will select the primary # or secondary. - if mode != 'nearest': + if mode != "nearest": setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, - replicaSet='rs', - read_preference=read_preference) + client = MongoClient(self.primary.uri, replicaSet="rs", read_preference=read_preference) # Run a command on a secondary to discover the topology. This ensures # that secondaryPreferred commands will select the secondary. - client.admin.command('ismaster', - read_preference=ReadPreference.SECONDARY) + client.admin.command("ismaster", read_preference=ReadPreference.SECONDARY) self.addCleanup(client.close) return client @@ -125,9 +120,9 @@ class TestOpMsgSingle(OpMsgReadPrefBase): def setUpClass(cls): super(TestOpMsgSingle, cls).setUpClass() auto_ismaster = { - 'ismaster': True, - 'minWireVersion': 2, - 'maxWireVersion': 6, + "ismaster": True, + "minWireVersion": 2, + "maxWireVersion": 6, } cls.primary = MockupDB(auto_ismaster=auto_ismaster) cls.primary.run() @@ -141,29 +136,28 @@ def tearDownClass(cls): def create_op_msg_read_mode_test(mode, operation): def test(self): - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = self.setup_client(read_preference=pref) expected_pref: Any - if operation.op_type == 'always-use-secondary': + if operation.op_type == "always-use-secondary": expected_server = self.secondary expected_pref = ReadPreference.SECONDARY - elif operation.op_type == 'must-use-primary': + elif operation.op_type == "must-use-primary": expected_server = self.primary expected_pref = None - elif operation.op_type == 'may-use-secondary': - if mode == 'primary': + elif operation.op_type == "may-use-secondary": + if mode == "primary": expected_server = self.primary expected_pref = None - elif mode == 'primaryPreferred': + elif mode == "primaryPreferred": expected_server = self.primary expected_pref = pref else: expected_server = self.secondary expected_pref = pref else: - self.fail('unrecognized op_type %r' % operation.op_type) + self.fail("unrecognized op_type %r" % operation.op_type) # For single mongod we omit the read preference. if self.single_mongod: expected_pref = None @@ -171,12 +165,12 @@ def test(self): request = expected_server.receive() request.reply(operation.reply) - actual_pref = request.doc.get('$readPreference') + actual_pref = request.doc.get("$readPreference") if expected_pref: self.assertEqual(expected_pref.document, actual_pref) else: self.assertIsNone(actual_pref) - self.assertNotIn('$query', request.doc) + self.assertNotIn("$query", request.doc) return test @@ -187,8 +181,7 @@ def generate_op_msg_read_mode_tests(): for entry in matrix: mode, operation = entry test = create_op_msg_read_mode_test(mode, operation) - test_name = 'test_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), mode) + test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) test.__name__ = test_name for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: cls.add_test(mode, test_name, test) @@ -197,5 +190,5 @@ def generate_op_msg_read_mode_tests(): generate_op_msg_read_mode_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 88dcdd8351..7ad4f2afc8 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -14,24 +14,28 @@ """Test PyMongo query and read preference with a sharded cluster.""" +import unittest + +from mockupdb import MockupDB, OpMsg, going + from bson import SON from pymongo import MongoClient -from pymongo.read_preferences import (Primary, - PrimaryPreferred, - Secondary, - SecondaryPreferred, - Nearest) -from mockupdb import MockupDB, going, OpMsg - -import unittest +from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + Secondary, + SecondaryPreferred, +) class TestQueryAndReadModeSharded(unittest.TestCase): def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() - server.autoresponds('ismaster', ismaster=True, msg='isdbgrid', - minWireVersion=2, maxWireVersion=6) + server.autoresponds( + "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + ) server.run() self.addCleanup(server.stop) @@ -44,24 +48,26 @@ def test_query_and_read_mode_sharded_op_msg(self): PrimaryPreferred(), Secondary(), Nearest(), - SecondaryPreferred([{'tag': 'value'}]),) + SecondaryPreferred([{"tag": "value"}]), + ) - for query in ({'a': 1}, {'$query': {'a': 1}},): + for query in ( + {"a": 1}, + {"$query": {"a": 1}}, + ): for pref in read_prefs: - collection = client.db.get_collection('test', - read_preference=pref) + collection = client.db.get_collection("test", read_preference=pref) cursor = collection.find(query.copy()) with going(next, cursor): request = server.receives() # Command is not nested in $query. - expected_cmd = SON([('find', 'test'), - ('filter', {'a': 1})]) + expected_cmd = SON([("find", "test"), ("filter", {"a": 1})]) if pref.mode: - expected_cmd['$readPreference'] = pref.document + expected_cmd["$readPreference"] = pref.document request.assert_matches(OpMsg(expected_cmd)) - request.replies({'cursor': {'id': 0, 'firstBatch': [{}]}}) + request.replies({"cursor": {"id": 0, "firstBatch": [{}]}}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 48f9486544..778be3d5ca 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time import itertools +import time +import unittest from mockupdb import MockupDB, going, wait_until -from pymongo.server_type import SERVER_TYPE -from pymongo.errors import ConnectionFailure -from pymongo import MongoClient - -import unittest from operations import operations +from pymongo import MongoClient +from pymongo.errors import ConnectionFailure +from pymongo.server_type import SERVER_TYPE + class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): @@ -38,18 +38,18 @@ def responder(request): self.ismaster_time = time.time() return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) - self.server.autoresponds('ismaster', responder) + self.server.autoresponds("ismaster", responder) self.server.run() self.addCleanup(self.server.stop) - kwargs = {'socketTimeoutMS': 100} + kwargs = {"socketTimeoutMS": 100} # Disable retryable reads when pymongo supports it. - kwargs['retryReads'] = False + kwargs["retryReads"] = False self.client = MongoClient(self.server.uri, **kwargs) # type: ignore - wait_until(lambda: self.client.nodes, 'connect to standalone') + wait_until(lambda: self.client.nodes, "connect to standalone") def tearDown(self): - if hasattr(self, 'client') and self.client: + if hasattr(self, "client") and self.client: self.client.close() def _test_disconnect(self, operation): @@ -73,11 +73,11 @@ def _test_disconnect(self, operation): after = time.time() # Demand a reconnect. - with going(self.client.db.command, 'buildinfo'): - self.server.receives('buildinfo').ok() + with going(self.client.db.command, "buildinfo"): + self.server.receives("buildinfo").ok() last = self.ismaster_time - self.assertGreaterEqual(last, after, 'called ismaster before needed') + self.assertGreaterEqual(last, after, "called ismaster before needed") def _test_timeout(self, operation): # Application operation times out. Test that client does *not* reset @@ -99,7 +99,7 @@ def _test_timeout(self, operation): self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time - self.assertEqual(after, before, 'unneeded ismaster call') + self.assertEqual(after, before, "unneeded ismaster call") def _test_not_master(self, operation): # Application operation gets a "not master" error. @@ -121,7 +121,7 @@ def _test_not_master(self, operation): self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) after = self.ismaster_time - self.assertGreater(after, before, 'ismaster not called') + self.assertGreater(after, before, "ismaster not called") def create_reset_test(operation, test_method): @@ -133,9 +133,9 @@ def test(self): def generate_reset_tests(): test_methods = [ - (TestResetAndRequestCheck._test_disconnect, 'test_disconnect'), - (TestResetAndRequestCheck._test_timeout, 'test_timeout'), - (TestResetAndRequestCheck._test_not_master, 'test_not_master'), + (TestResetAndRequestCheck._test_disconnect, "test_disconnect"), + (TestResetAndRequestCheck._test_timeout, "test_timeout"), + (TestResetAndRequestCheck._test_not_master, "test_not_master"), ] matrix = itertools.product(operations, test_methods) @@ -143,12 +143,12 @@ def generate_reset_tests(): for entry in matrix: operation, (test_method, name) = entry test = create_reset_test(operation, test_method) - test_name = '%s_%s' % (name, operation.name.replace(' ', '_')) + test_name = "%s_%s" % (name, operation.name.replace(" ", "_")) test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) generate_reset_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py index 2f02503f54..354399728d 100644 --- a/test/mockupdb/test_rsghost.py +++ b/test/mockupdb/test_rsghost.py @@ -15,38 +15,45 @@ """Test connections to RSGhost nodes.""" import datetime +import unittest + +from mockupdb import MockupDB, going -from mockupdb import going, MockupDB from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError -import unittest - class TestRSGhost(unittest.TestCase): - def test_rsghost(self): rsother_response = { - 'ok': 1.0, 'ismaster': False, 'secondary': False, - 'info': 'Does not have a valid replica set config', - 'isreplicaset': True, 'maxBsonObjectSize': 16777216, - 'maxMessageSizeBytes': 48000000, 'maxWriteBatchSize': 100000, - 'localTime': datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), - 'logicalSessionTimeoutMinutes': 30, 'connectionId': 3, - 'minWireVersion': 0, 'maxWireVersion': 15, 'readOnly': False} + "ok": 1.0, + "ismaster": False, + "secondary": False, + "info": "Does not have a valid replica set config", + "isreplicaset": True, + "maxBsonObjectSize": 16777216, + "maxMessageSizeBytes": 48000000, + "maxWriteBatchSize": 100000, + "localTime": datetime.datetime(2021, 11, 30, 0, 53, 4, 99000), + "logicalSessionTimeoutMinutes": 30, + "connectionId": 3, + "minWireVersion": 0, + "maxWireVersion": 15, + "readOnly": False, + } server = MockupDB(auto_ismaster=rsother_response) server.run() self.addCleanup(server.stop) # Default auto discovery yields a server selection timeout. with MongoClient(server.uri, serverSelectionTimeoutMS=250) as client: with self.assertRaises(ServerSelectionTimeoutError): - client.test.command('ping') + client.test.command("ping") # Direct connection succeeds. with MongoClient(server.uri, directConnection=True) as client: - with going(client.test.command, 'ping'): + with going(client.test.command, "ping"): request = server.receives(ping=1) request.reply() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 5ff6fced4e..5a162c08e3 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -17,12 +17,13 @@ Just make sure SlaveOkay is *not* set on primary reads. """ -from mockupdb import MockupDB, going -from pymongo import MongoClient - import unittest + +from mockupdb import MockupDB, going from operations import operations +from pymongo import MongoClient + class TestSlaveOkayRS(unittest.TestCase): def setup_server(self): @@ -31,24 +32,27 @@ def setup_server(self): server.run() self.addCleanup(server.stop) - hosts = [server.address_string - for server in (self.primary, self.secondary)] + hosts = [server.address_string for server in (self.primary, self.secondary)] self.primary.autoresponds( - 'ismaster', - ismaster=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) + "ismaster", ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ) self.secondary.autoresponds( - 'ismaster', - ismaster=False, secondary=True, setName='rs', hosts=hosts, - minWireVersion=2, maxWireVersion=6) + "ismaster", + ismaster=False, + secondary=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=6, + ) def create_slave_ok_rs_test(operation): def test(self): self.setup_server() - assert not operation.op_type == 'always-use-secondary' + assert not operation.op_type == "always-use-secondary" - client = MongoClient(self.primary.uri, replicaSet='rs') + client = MongoClient(self.primary.uri, replicaSet="rs") self.addCleanup(client.close) with going(operation.function, client): request = self.primary.receive() @@ -63,11 +67,11 @@ def generate_slave_ok_rs_tests(): for operation in operations: # Don't test secondary operations with MockupDB, the server enforces the # SlaveOkay bit so integration tests prove we set it. - if operation.op_type == 'always-use-secondary': + if operation.op_type == "always-use-secondary": continue test = create_slave_ok_rs_test(operation) - test_name = 'test_%s' % operation.name.replace(' ', '_') + test_name = "test_%s" % operation.name.replace(" ", "_") test.__name__ = test_name setattr(TestSlaveOkayRS, test_name, test) @@ -75,5 +79,5 @@ def generate_slave_ok_rs_tests(): generate_slave_ok_rs_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 07e05bfece..52c643b417 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -19,18 +19,15 @@ - A direct connection to a mongos. """ import itertools - -from pymongo.read_preferences import make_read_preference -from pymongo.read_preferences import read_pref_mode_from_name - +import unittest from queue import Queue from mockupdb import MockupDB, going -from pymongo import MongoClient - -import unittest from operations import operations +from pymongo import MongoClient +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name + class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): @@ -42,27 +39,29 @@ def setup_server(self): server.subscribe(self.q.put) server.run() self.addCleanup(server.stop) - server.autoresponds('ismaster', minWireVersion=2, maxWireVersion=6, - ismaster=True, msg='isdbgrid') + server.autoresponds( + "ismaster", minWireVersion=2, maxWireVersion=6, ismaster=True, msg="isdbgrid" + ) - self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos1.address_string, - self.mongos2.address_string) + self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongos1.address_string, + self.mongos2.address_string, + ) def create_slave_ok_sharded_test(mode, operation): def test(self): self.setup_server() - if operation.op_type == 'always-use-secondary': + if operation.op_type == "always-use-secondary": slave_ok = True - elif operation.op_type == 'may-use-secondary': - slave_ok = mode != 'primary' - elif operation.op_type == 'must-use-primary': + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" + elif operation.op_type == "must-use-primary": slave_ok = False else: - assert False, 'unrecognized op_type %r' % operation.op_type + assert False, "unrecognized op_type %r" % operation.op_type - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.mongoses_uri, read_preference=pref) self.addCleanup(client.close) @@ -71,22 +70,21 @@ def test(self): request.reply(operation.reply) if slave_ok: - self.assertTrue(request.slave_ok, 'SlaveOkay not set') + self.assertTrue(request.slave_ok, "SlaveOkay not set") else: - self.assertFalse(request.slave_ok, 'SlaveOkay set') + self.assertFalse(request.slave_ok, "SlaveOkay set") return test def generate_slave_ok_sharded_tests(): - modes = 'primary', 'secondary', 'nearest' + modes = "primary", "secondary", "nearest" matrix = itertools.product(modes, operations) for entry in matrix: mode, operation = entry test = create_slave_ok_sharded_test(mode, operation) - test_name = 'test_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), mode) + test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestSlaveOkaySharded, test_name, test) @@ -94,5 +92,5 @@ def generate_slave_ok_sharded_tests(): generate_slave_ok_sharded_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 83c0f925a4..98cd1f2706 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -20,16 +20,15 @@ """ import itertools +import unittest from mockupdb import MockupDB, going +from operations import operations + from pymongo import MongoClient -from pymongo.read_preferences import (make_read_preference, - read_pref_mode_from_name) +from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE -import unittest -from operations import operations - def topology_type_name(client): topology_type = client._topology._description.topology_type @@ -46,20 +45,19 @@ def setUp(self): def create_slave_ok_single_test(mode, server_type, ismaster, operation): def test(self): ismaster_with_version = ismaster.copy() - ismaster_with_version['minWireVersion'] = 2 - ismaster_with_version['maxWireVersion'] = 6 - self.server.autoresponds('ismaster', **ismaster_with_version) - if operation.op_type == 'always-use-secondary': + ismaster_with_version["minWireVersion"] = 2 + ismaster_with_version["maxWireVersion"] = 6 + self.server.autoresponds("ismaster", **ismaster_with_version) + if operation.op_type == "always-use-secondary": slave_ok = True - elif operation.op_type == 'may-use-secondary': - slave_ok = mode != 'primary' or server_type != 'mongos' - elif operation.op_type == 'must-use-primary': - slave_ok = server_type != 'mongos' + elif operation.op_type == "may-use-secondary": + slave_ok = mode != "primary" or server_type != "mongos" + elif operation.op_type == "must-use-primary": + slave_ok = server_type != "mongos" else: - assert False, 'unrecognized op_type %r' % operation.op_type + assert False, "unrecognized op_type %r" % operation.op_type - pref = make_read_preference(read_pref_mode_from_name(mode), - tag_sets=None) + pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.server.uri, read_preference=pref) self.addCleanup(client.close) @@ -67,27 +65,30 @@ def test(self): request = self.server.receive() request.reply(operation.reply) - self.assertIn(topology_type_name(client), ['Sharded', 'Single']) + self.assertIn(topology_type_name(client), ["Sharded", "Single"]) return test def generate_slave_ok_single_tests(): - modes = 'primary', 'secondary', 'nearest' + modes = "primary", "secondary", "nearest" server_types = [ - ('standalone', {'ismaster': True}), - ('slave', {'ismaster': False}), - ('mongos', {'ismaster': True, 'msg': 'isdbgrid'})] + ("standalone", {"ismaster": True}), + ("slave", {"ismaster": False}), + ("mongos", {"ismaster": True, "msg": "isdbgrid"}), + ] matrix = itertools.product(modes, server_types, operations) for entry in matrix: mode, (server_type, ismaster), operation = entry - test = create_slave_ok_single_test(mode, server_type, ismaster, - operation) + test = create_slave_ok_single_test(mode, server_type, ismaster, operation) - test_name = 'test_%s_%s_with_mode_%s' % ( - operation.name.replace(' ', '_'), server_type, mode) + test_name = "test_%s_%s_with_mode_%s" % ( + operation.name.replace(" ", "_"), + server_type, + mode, + ) test.__name__ = test_name setattr(TestSlaveOkaySingle, test_name, test) @@ -96,5 +97,5 @@ def generate_slave_ok_single_tests(): generate_slave_ok_single_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index f99ac0054e..bfdae9e824 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -15,42 +15,58 @@ """Test client for mod_wsgi application, see bug PYTHON-353. """ +import _thread as thread import sys import threading import time - from optparse import OptionParser - from urllib.request import urlopen -import _thread as thread def parse_args(): - parser = OptionParser("""usage: %prog [options] mode url + parser = OptionParser( + """usage: %prog [options] mode url - mode:\tparallel or serial""") + mode:\tparallel or serial""" + ) # Should be enough that any connection leak will exhaust available file # descriptors. parser.add_option( - "-n", "--nrequests", type="int", - dest="nrequests", default=50 * 1000, - help="Number of times to GET the URL, in total") + "-n", + "--nrequests", + type="int", + dest="nrequests", + default=50 * 1000, + help="Number of times to GET the URL, in total", + ) parser.add_option( - "-t", "--nthreads", type="int", - dest="nthreads", default=100, - help="Number of threads with mode 'parallel'") + "-t", + "--nthreads", + type="int", + dest="nthreads", + default=100, + help="Number of threads with mode 'parallel'", + ) parser.add_option( - "-q", "--quiet", - action="store_false", dest="verbose", default=True, - help="Don't print status messages to stdout") + "-q", + "--quiet", + action="store_false", + dest="verbose", + default=True, + help="Don't print status messages to stdout", + ) parser.add_option( - "-c", "--continue", - action="store_true", dest="continue_", default=False, - help="Continue after HTTP errors") + "-c", + "--continue", + action="store_true", + dest="continue_", + default=False, + help="Continue after HTTP errors", + ) try: options, (mode, url) = parser.parse_args() @@ -58,7 +74,7 @@ def parse_args(): parser.print_usage() sys.exit(1) - if mode not in ('parallel', 'serial'): + if mode not in ("parallel", "serial"): parser.print_usage() sys.exit(1) @@ -107,18 +123,22 @@ def run(self): def main(options, mode, url): start_time = time.time() errors = 0 - if mode == 'parallel': + if mode == "parallel": nrequests_per_thread = options.nrequests // options.nthreads if options.verbose: - print ( - 'Getting %s %s times total in %s threads, ' - '%s times per thread' % ( - url, nrequests_per_thread * options.nthreads, - options.nthreads, nrequests_per_thread)) + print( + "Getting %s %s times total in %s threads, " + "%s times per thread" + % ( + url, + nrequests_per_thread * options.nthreads, + options.nthreads, + nrequests_per_thread, + ) + ) threads = [ - URLGetterThread(options, url, nrequests_per_thread) - for _ in range(options.nthreads) + URLGetterThread(options, url, nrequests_per_thread) for _ in range(options.nthreads) ] for t in threads: @@ -130,14 +150,11 @@ def main(options, mode, url): errors = sum([t.errors for t in threads]) nthreads_with_errors = len([t for t in threads if t.errors]) if nthreads_with_errors: - print('%d threads had errors! %d errors in total' % ( - nthreads_with_errors, errors)) + print("%d threads had errors! %d errors in total" % (nthreads_with_errors, errors)) else: - assert mode == 'serial' + assert mode == "serial" if options.verbose: - print('Getting %s %s times in one thread' % ( - url, options.nrequests - )) + print("Getting %s %s times in one thread" % (url, options.nrequests)) for i in range(1, options.nrequests + 1): try: @@ -153,16 +170,16 @@ def main(options, mode, url): print(i) if errors: - print('%d errors!' % errors) + print("%d errors!" % errors) if options.verbose: - print('Completed in %.2f seconds' % (time.time() - start_time)) + print("Completed in %.2f seconds" % (time.time() - start_time)) if errors: # Failure sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": options, mode, url = parse_args() main(options, mode, url) diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index 07197e73b6..cce846feac 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -22,19 +22,17 @@ sys.path[0:0] = [""] import pymongo - from pymongo.errors import ServerSelectionTimeoutError - CA_FILE = os.environ.get("CA_FILE") -OCSP_TLS_SHOULD_SUCCEED = (os.environ.get('OCSP_TLS_SHOULD_SUCCEED') == 'true') +OCSP_TLS_SHOULD_SUCCEED = os.environ.get("OCSP_TLS_SHOULD_SUCCEED") == "true" # Enable logs in this format: # 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response -FORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s' +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) -if sys.platform == 'win32': +if sys.platform == "win32": # The non-stapled OCSP endpoint check is slow on Windows. TIMEOUT_MS = 5000 else: @@ -42,15 +40,17 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" - "&tlsCAFile=%s&%s") % (TIMEOUT_MS, CA_FILE, options) + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" "&tlsCAFile=%s&%s") % ( + TIMEOUT_MS, + CA_FILE, + options, + ) print(uri) client = pymongo.MongoClient(uri) - client.admin.command('ping') + client.admin.command("ping") class TestOCSP(unittest.TestCase): - def test_tls_insecure(self): # Should always succeed options = "tls=true&tlsInsecure=true" @@ -65,12 +65,11 @@ def test_tls(self): options = "tls=true" if not OCSP_TLS_SHOULD_SUCCEED: self.assertRaisesRegex( - ServerSelectionTimeoutError, - "invalid status response", - _connect, options) + ServerSelectionTimeoutError, "invalid status response", _connect, options + ) else: _connect(options) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 7effa1c1ee..3cb4b5d5d1 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -19,8 +19,8 @@ import sys import tempfile import time -from typing import Any, List import warnings +from typing import Any, List try: import simplejson as json @@ -29,28 +29,30 @@ sys.path[0:0] = [""] +from test import client_context, host, port, unittest + from bson import decode, encode from bson.json_util import loads from gridfs import GridFSBucket from pymongo import MongoClient -from test import client_context, host, port, unittest NUM_ITERATIONS = 100 MAX_ITERATION_TIME = 300 NUM_DOCS = 10000 -TEST_PATH = os.environ.get('TEST_PATH', os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('data'))) +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) -OUTPUT_FILE = os.environ.get('OUTPUT_FILE') +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") result_data: List = [] + def tearDownModule(): output = json.dumps(result_data, indent=4) if OUTPUT_FILE: - with open(OUTPUT_FILE, 'w') as opf: + with open(OUTPUT_FILE, "w") as opf: opf.write(output) else: print(output) @@ -83,22 +85,20 @@ def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) bytes_per_sec = self.data_size / median - print('Running %s. MEDIAN=%s' % (self.__class__.__name__, - self.percentile(50))) - result_data.append({ - 'info': { - 'test_name': name, - 'args': { - 'threads': 1, + print("Running %s. MEDIAN=%s" % (self.__class__.__name__, self.percentile(50))) + result_data.append( + { + "info": { + "test_name": name, + "args": { + "threads": 1, + }, }, - }, - 'metrics': [ - { - 'name': 'bytes_per_sec', - 'value': bytes_per_sec - }, - ] - }) + "metrics": [ + {"name": "bytes_per_sec", "value": bytes_per_sec}, + ], + } + ) def before(self): pass @@ -107,12 +107,12 @@ def after(self): pass def percentile(self, percentile): - if hasattr(self, 'results'): + if hasattr(self, "results"): sorted_results = sorted(self.results) percentile_index = int(len(sorted_results) * percentile / 100) - 1 return sorted_results[percentile_index] else: - self.fail('Test execution failed') + self.fail("Test execution failed") def runTest(self): results = [] @@ -120,7 +120,7 @@ def runTest(self): self.max_iterations = NUM_ITERATIONS for i in range(NUM_ITERATIONS): if time.monotonic() - start > MAX_ITERATION_TIME: - warnings.warn('Test timed out, completed %s iterations.' % i) + warnings.warn("Test timed out, completed %s iterations." % i) break self.before() with Timer() as timer: @@ -135,9 +135,7 @@ def runTest(self): class BsonEncodingTest(PerformanceTest): def setUp(self): # Location of test data. - with open( - os.path.join(TEST_PATH, - os.path.join('extended_bson', self.dataset))) as data: + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: self.document = loads(data.read()) def do_task(self): @@ -148,9 +146,7 @@ def do_task(self): class BsonDecodingTest(PerformanceTest): def setUp(self): # Location of test data. - with open( - os.path.join(TEST_PATH, - os.path.join('extended_bson', self.dataset))) as data: + with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: self.document = encode(json.loads(data.read())) def do_task(self): @@ -159,41 +155,42 @@ def do_task(self): class TestFlatEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'flat_bson.json' + dataset = "flat_bson.json" data_size = 75310000 class TestFlatDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'flat_bson.json' + dataset = "flat_bson.json" data_size = 75310000 class TestDeepEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'deep_bson.json' + dataset = "deep_bson.json" data_size = 19640000 class TestDeepDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'deep_bson.json' + dataset = "deep_bson.json" data_size = 19640000 class TestFullEncoding(BsonEncodingTest, unittest.TestCase): - dataset = 'full_bson.json' + dataset = "full_bson.json" data_size = 57340000 class TestFullDecoding(BsonDecodingTest, unittest.TestCase): - dataset = 'full_bson.json' + dataset = "full_bson.json" data_size = 57340000 # SINGLE-DOC BENCHMARKS class TestRunCommand(PerformanceTest, unittest.TestCase): data_size = 160000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def do_task(self): command = self.client.perftest.command @@ -205,29 +202,29 @@ class TestDocument(PerformanceTest): def setUp(self): # Location of test data. with open( - os.path.join( - TEST_PATH, os.path.join( - 'single_and_multi_document', self.dataset)), 'r') as data: + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)), "r" + ) as data: self.document = json.loads(data.read()) self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def tearDown(self): super(TestDocument, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def after(self): - self.client.perftest.drop_collection('corpus') + self.client.perftest.drop_collection("corpus") class TestFindOneByID(TestDocument, unittest.TestCase): data_size = 16220000 + def setUp(self): - self.dataset = 'tweet.json' + self.dataset = "tweet.json" super(TestFindOneByID, self).setUp() documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -238,7 +235,7 @@ def setUp(self): def do_task(self): find_one = self.corpus.find_one for _id in self.inserted_ids: - find_one({'_id': _id}) + find_one({"_id": _id}) def before(self): pass @@ -249,8 +246,9 @@ def after(self): class TestSmallDocInsertOne(TestDocument, unittest.TestCase): data_size = 2750000 + def setUp(self): - self.dataset = 'small_doc.json' + self.dataset = "small_doc.json" super(TestSmallDocInsertOne, self).setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -263,8 +261,9 @@ def do_task(self): class TestLargeDocInsertOne(TestDocument, unittest.TestCase): data_size = 27310890 + def setUp(self): - self.dataset = 'large_doc.json' + self.dataset = "large_doc.json" super(TestLargeDocInsertOne, self).setUp() self.documents = [self.document.copy() for _ in range(10)] @@ -278,14 +277,13 @@ def do_task(self): # MULTI-DOC BENCHMARKS class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): data_size = 16220000 + def setUp(self): - self.dataset = 'tweet.json' + self.dataset = "tweet.json" super(TestFindManyAndEmptyCursor, self).setUp() for _ in range(10): - self.client.perftest.command( - 'insert', 'corpus', - documents=[self.document] * 1000) + self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) self.corpus = self.client.perftest.corpus def do_task(self): @@ -300,13 +298,14 @@ def after(self): class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): data_size = 2750000 + def setUp(self): - self.dataset = 'small_doc.json' + self.dataset = "small_doc.json" super(TestSmallDocBulkInsert, self).setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def do_task(self): self.corpus.insert_many(self.documents, ordered=True) @@ -314,13 +313,14 @@ def do_task(self): class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): data_size = 27310890 + def setUp(self): - self.dataset = 'large_doc.json' + self.dataset = "large_doc.json" super(TestLargeDocBulkInsert, self).setUp() self.documents = [self.document.copy() for _ in range(10)] def before(self): - self.corpus = self.client.perftest.create_collection('corpus') + self.corpus = self.client.perftest.create_collection("corpus") def do_task(self): self.corpus.insert_many(self.documents, ordered=True) @@ -328,47 +328,48 @@ def do_task(self): class TestGridFsUpload(PerformanceTest, unittest.TestCase): data_size = 52428800 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") gridfs_path = os.path.join( - TEST_PATH, - os.path.join('single_and_multi_document', 'gridfs_large.bin')) - with open(gridfs_path, 'rb') as data: + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: self.document = data.read() self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): super(TestGridFsUpload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.bucket.upload_from_stream('init', b'x') + self.bucket.upload_from_stream("init", b"x") def do_task(self): - self.bucket.upload_from_stream('gridfstest', self.document) + self.bucket.upload_from_stream("gridfstest", self.document) class TestGridFsDownload(PerformanceTest, unittest.TestCase): data_size = 52428800 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") gridfs_path = os.path.join( - TEST_PATH, - os.path.join('single_and_multi_document', 'gridfs_large.bin')) + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) self.bucket = GridFSBucket(self.client.perftest) - with open(gridfs_path, 'rb') as gfile: - self.uploaded_id = self.bucket.upload_from_stream( - 'gridfstest', gfile) + with open(gridfs_path, "rb") as gfile: + self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) def tearDown(self): super(TestGridFsDownload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") def do_task(self): self.bucket.open_download_stream(self.uploaded_id).read() @@ -391,17 +392,17 @@ def mp_map(map_func, files): def insert_json_file(filename): assert proc_client is not None - with open(filename, 'r') as data: + with open(filename, "r") as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) def insert_json_file_with_file_id(filename): documents = [] - with open(filename, 'r') as data: + with open(filename, "r") as data: for line in data: doc = json.loads(line) - doc['file'] = filename + doc["file"] = filename documents.append(doc) assert proc_client is not None coll = proc_client.perftest.corpus @@ -411,11 +412,11 @@ def insert_json_file_with_file_id(filename): def read_json_file(filename): assert proc_client is not None coll = proc_client.perftest.corpus - temp = tempfile.TemporaryFile(mode='w') + temp = tempfile.TemporaryFile(mode="w") try: temp.writelines( - [json.dumps(doc) + '\n' for - doc in coll.find({'file': filename}, {'_id': False})]) + [json.dumps(doc) + "\n" for doc in coll.find({"file": filename}, {"_id": False})] + ) finally: temp.close() @@ -424,7 +425,7 @@ def insert_gridfs_file(filename): assert proc_client is not None bucket = GridFSBucket(proc_client.perftest) - with open(filename, 'rb') as gfile: + with open(filename, "rb") as gfile: bucket.upload_from_stream(filename, gfile) @@ -441,41 +442,39 @@ def read_gridfs_file(filename): class TestJsonMultiImport(PerformanceTest, unittest.TestCase): data_size = 565000000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.client.perftest.command({'create': 'corpus'}) + self.client.perftest.command({"create": "corpus"}) self.corpus = self.client.perftest.corpus - ldjson_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'ldjson_multi')) - self.files = [os.path.join( - ldjson_path, s) for s in os.listdir(ldjson_path)] + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] def do_task(self): mp_map(insert_json_file, self.files) def after(self): - self.client.perftest.drop_collection('corpus') + self.client.perftest.drop_collection("corpus") def tearDown(self): super(TestJsonMultiImport, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") class TestJsonMultiExport(PerformanceTest, unittest.TestCase): data_size = 565000000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') - self.client.perfest.corpus.create_index('file') + self.client.drop_database("perftest") + self.client.perfest.corpus.create_index("file") - ldjson_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'ldjson_multi')) - self.files = [os.path.join( - ldjson_path, s) for s in os.listdir(ldjson_path)] + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] mp_map(insert_json_file_with_file_id, self.files) @@ -484,48 +483,46 @@ def do_task(self): def tearDown(self): super(TestJsonMultiExport, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") class TestGridFsMultiFileUpload(PerformanceTest, unittest.TestCase): data_size = 262144000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") def before(self): - self.client.perftest.drop_collection('fs.files') - self.client.perftest.drop_collection('fs.chunks') + self.client.perftest.drop_collection("fs.files") + self.client.perftest.drop_collection("fs.chunks") self.bucket = GridFSBucket(self.client.perftest) - gridfs_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'gridfs_multi')) - self.files = [os.path.join( - gridfs_path, s) for s in os.listdir(gridfs_path)] + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] def do_task(self): mp_map(insert_gridfs_file, self.files) def tearDown(self): super(TestGridFsMultiFileUpload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") class TestGridFsMultiFileDownload(PerformanceTest, unittest.TestCase): data_size = 262144000 + def setUp(self): self.client = client_context.client - self.client.drop_database('perftest') + self.client.drop_database("perftest") bucket = GridFSBucket(self.client.perftest) - gridfs_path = os.path.join( - TEST_PATH, os.path.join('parallel', 'gridfs_multi')) - self.files = [os.path.join( - gridfs_path, s) for s in os.listdir(gridfs_path)] + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] for fname in self.files: - with open(fname, 'rb') as gfile: + with open(fname, "rb") as gfile: bucket.upload_from_stream(fname, gfile) def do_task(self): @@ -533,7 +530,7 @@ def do_task(self): def tearDown(self): super(TestGridFsMultiFileDownload, self).tearDown() - self.client.drop_database('perftest') + self.client.drop_database("perftest") if __name__ == "__main__": diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 1494fbedcc..580c5da993 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -15,19 +15,17 @@ """Tools for mocking parts of PyMongo to test other parts.""" import contextlib -from functools import partial import weakref +from functools import partial +from test import client_context -from pymongo import common -from pymongo import MongoClient +from pymongo import MongoClient, common from pymongo.errors import AutoReconnect, NetworkTimeout from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import Pool from pymongo.server_description import ServerDescription -from test import client_context - class MockPool(Pool): def __init__(self, client, pair, *args, **kwargs): @@ -42,14 +40,13 @@ def __init__(self, client, pair, *args, **kwargs): @contextlib.contextmanager def get_socket(self, handler=None): client = self.client - host_and_port = '%s:%s' % (self.mock_host, self.mock_port) + host_and_port = "%s:%s" % (self.mock_host, self.mock_port) if host_and_port in client.mock_down_hosts: - raise AutoReconnect('mock error') + raise AutoReconnect("mock error") assert host_and_port in ( - client.mock_standalones - + client.mock_members - + client.mock_mongoses), "bad host: %s" % host_and_port + client.mock_standalones + client.mock_members + client.mock_mongoses + ), ("bad host: %s" % host_and_port) with Pool.get_socket(self, handler) as sock_info: sock_info.mock_host = self.mock_host @@ -79,34 +76,31 @@ def close(self): class MockMonitor(Monitor): - def __init__( - self, - client, - server_description, - topology, - pool, - topology_settings): + def __init__(self, client, server_description, topology, pool, topology_settings): # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it # to avoid cycles. self.client = weakref.proxy(client) - Monitor.__init__( - self, - server_description, - topology, - pool, - topology_settings) + Monitor.__init__(self, server_description, topology, pool, topology_settings) def _check_once(self): client = self.client address = self._server_description.address - response, rtt = client.mock_hello('%s:%d' % address) + response, rtt = client.mock_hello("%s:%d" % address) return ServerDescription(address, Hello(response), rtt) class MockClient(MongoClient): def __init__( - self, standalones, members, mongoses, hello_hosts=None, - arbiters=None, down_hosts=None, *args, **kwargs): + self, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs + ): """A MongoClient connected to the default server, with a mock topology. standalones, members, mongoses, arbiters, and down_hosts determine the @@ -144,8 +138,8 @@ def __init__( # Hostname -> round trip time self.mock_rtts = {} - kwargs['_pool_class'] = partial(MockPool, self) - kwargs['_monitor_class'] = partial(MockMonitor, self) + kwargs["_pool_class"] = partial(MockPool, self) + kwargs["_monitor_class"] = partial(MockMonitor, self) client_options = client_context.default_client_options.copy() client_options.update(kwargs) @@ -175,53 +169,57 @@ def mock_hello(self, host): max_wire_version = common.MAX_SUPPORTED_WIRE_VERSION max_write_batch_size = self.mock_max_write_batch_sizes.get( - host, common.MAX_WRITE_BATCH_SIZE) + host, common.MAX_WRITE_BATCH_SIZE + ) rtt = self.mock_rtts.get(host, 0) # host is like 'a:1'. if host in self.mock_down_hosts: - raise NetworkTimeout('mock timeout') + raise NetworkTimeout("mock timeout") elif host in self.mock_standalones: response = { - 'ok': 1, + "ok": 1, HelloCompat.LEGACY_CMD: True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } elif host in self.mock_members: - primary = (host == self.mock_primary) + primary = host == self.mock_primary # Simulate a replica set member. response = { - 'ok': 1, + "ok": 1, HelloCompat.LEGACY_CMD: primary, - 'secondary': not primary, - 'setName': 'rs', - 'hosts': self.mock_hello_hosts, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'maxWriteBatchSize': max_write_batch_size} + "secondary": not primary, + "setName": "rs", + "hosts": self.mock_hello_hosts, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } if self.mock_primary: - response['primary'] = self.mock_primary + response["primary"] = self.mock_primary if host in self.mock_arbiters: - response['arbiterOnly'] = True - response['secondary'] = False + response["arbiterOnly"] = True + response["secondary"] = False elif host in self.mock_mongoses: response = { - 'ok': 1, + "ok": 1, HelloCompat.LEGACY_CMD: True, - 'minWireVersion': min_wire_version, - 'maxWireVersion': max_wire_version, - 'msg': 'isdbgrid', - 'maxWriteBatchSize': max_write_batch_size} + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "msg": "isdbgrid", + "maxWriteBatchSize": max_write_batch_size, + } else: # In test_internal_ips(), we try to connect to a host listed # in hello['hosts'] but not publicly accessible. - raise AutoReconnect('Unknown host: %s' % host) + raise AutoReconnect("Unknown host: %s" % host) return response, rtt diff --git a/test/qcheck.py b/test/qcheck.py index 57e0940b72..4cce7b5bc8 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -83,9 +83,7 @@ def gen_unichar(): def gen_unicode(gen_length): - return lambda: "".join([x for x in - gen_list(gen_unichar(), gen_length)() if - x not in ".$"]) + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) def gen_list(generator, gen_length): @@ -93,22 +91,24 @@ def gen_list(generator, gen_length): def gen_datetime(): - return lambda: datetime.datetime(random.randint(1970, 2037), - random.randint(1, 12), - random.randint(1, 28), - random.randint(0, 23), - random.randint(0, 59), - random.randint(0, 59), - random.randint(0, 999) * 1000) + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) def gen_dict(gen_key, gen_value, gen_length): - def a_dict(gen_key, gen_value, length): result = {} for _ in range(length): result[gen_key()] = gen_value() return result + return lambda: a_dict(gen_key, gen_value, gen_length()) @@ -128,6 +128,7 @@ def gen_flags(): flags = flags | re.VERBOSE return flags + return lambda: re.compile(pattern(), gen_flags()) @@ -142,15 +143,17 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - choices = [gen_unicode(gen_range(0, 50)), - gen_printable_string(gen_range(0, 50)), - my_map(gen_string(gen_range(0, 1000)), bytes), - gen_int(), - gen_float(), - gen_boolean(), - gen_datetime(), - gen_objectid(), - lift(None)] + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] if ref: choices.append(gen_dbref()) if depth > 0: @@ -164,9 +167,10 @@ def gen_mongo_list(depth, ref): def gen_mongo_dict(depth, ref=True): - return my_map(gen_dict(gen_unicode(gen_range(0, 20)), - gen_mongo_value(depth - 1, ref), - gen_range(0, 10)), SON) + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) def simplify(case): # TODO this is a hack @@ -236,8 +240,10 @@ def check_unittest(test, predicate, generator): counter_examples = check(predicate, generator) if counter_examples: failures = len(counter_examples) - message = "\n".join([" -> %s" % f for f in - counter_examples[:examples]]) - message = ("found %d counter examples, displaying first %d:\n%s" % - (failures, min(failures, examples), message)) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) test.fail(message) diff --git a/test/test_auth.py b/test/test_auth.py index 5b4ef0c51f..5abdbef3dc 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -17,42 +17,44 @@ import os import sys import threading - from urllib.parse import quote_plus sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, Version, client_context, unittest +from test.utils import ( + AllowListEventListener, + delay, + get_pool, + ignore_deprecations, + rs_or_single_client, + rs_or_single_client_noauth, + single_client, + single_client_noauth, +) + from pymongo import MongoClient, monitoring from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple from pymongo.errors import OperationFailure from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP -from test import client_context, IntegrationTest, SkipTest, unittest, Version -from test.utils import (delay, - get_pool, - ignore_deprecations, - single_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client_noauth, - AllowListEventListener) # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. -GSSAPI_HOST = os.environ.get('GSSAPI_HOST') -GSSAPI_PORT = int(os.environ.get('GSSAPI_PORT', '27017')) -GSSAPI_PRINCIPAL = os.environ.get('GSSAPI_PRINCIPAL') -GSSAPI_SERVICE_NAME = os.environ.get('GSSAPI_SERVICE_NAME', 'mongodb') -GSSAPI_CANONICALIZE = os.environ.get('GSSAPI_CANONICALIZE', 'false') -GSSAPI_SERVICE_REALM = os.environ.get('GSSAPI_SERVICE_REALM') -GSSAPI_PASS = os.environ.get('GSSAPI_PASS') -GSSAPI_DB = os.environ.get('GSSAPI_DB', 'test') - -SASL_HOST = os.environ.get('SASL_HOST') -SASL_PORT = int(os.environ.get('SASL_PORT', '27017')) -SASL_USER = os.environ.get('SASL_USER') -SASL_PASS = os.environ.get('SASL_PASS') -SASL_DB = os.environ.get('SASL_DB', '$external') +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") class AutoAuthenticateThread(threading.Thread): @@ -71,7 +73,7 @@ def __init__(self, collection): self.success = False def run(self): - assert self.collection.find_one({'$where': delay(1)}) is not None + assert self.collection.find_one({"$where": delay(1)}) is not None self.success = True @@ -82,36 +84,33 @@ class TestGSSAPI(unittest.TestCase): @classmethod def setUpClass(cls): if not HAVE_KERBEROS: - raise SkipTest('Kerberos module not available.') + raise SkipTest("Kerberos module not available.") if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: - raise SkipTest( - 'Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI') + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") cls.service_realm_required = ( - GSSAPI_SERVICE_REALM is not None and - GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL) - mech_properties = 'SERVICE_NAME:%s' % (GSSAPI_SERVICE_NAME,) - mech_properties += ( - ',CANONICALIZE_HOST_NAME:%s' % (GSSAPI_CANONICALIZE,)) + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = "SERVICE_NAME:%s" % (GSSAPI_SERVICE_NAME,) + mech_properties += ",CANONICALIZE_HOST_NAME:%s" % (GSSAPI_CANONICALIZE,) if GSSAPI_SERVICE_REALM is not None: - mech_properties += ',SERVICE_REALM:%s' % (GSSAPI_SERVICE_REALM,) + mech_properties += ",SERVICE_REALM:%s" % (GSSAPI_SERVICE_REALM,) cls.mech_properties = mech_properties def test_credentials_hashing(self): # GSSAPI credentials are properly hashed. - creds0 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', {}, None) + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) creds1 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'A'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) creds2 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'A'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) creds3 = _build_credentials_tuple( - 'GSSAPI', None, 'user', 'pass', - {'authmechanismproperties': {'SERVICE_NAME': 'B'}}, None) + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) self.assertEqual(1, len(set([creds1, creds2]))) self.assertEqual(3, len(set([creds0, creds1, creds2, creds3]))) @@ -120,24 +119,28 @@ def test_credentials_hashing(self): def test_gssapi_simple(self): assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), - GSSAPI_PASS, - GSSAPI_HOST, - GSSAPI_PORT)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=" "GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) else: - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'GSSAPI' % (quote_plus(GSSAPI_PRINCIPAL), - GSSAPI_HOST, - GSSAPI_PORT)) + uri = "mongodb://%s@%s:%d/?authMechanism=" "GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) if not self.service_realm_required: # Without authMechanismProperties. - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI') + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) client[GSSAPI_DB].collection.find_one() @@ -146,60 +149,68 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Authenticate with authMechanismProperties. - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + '&authMechanismProperties=%s' % (self.mech_properties,) + mech_uri = uri + "&authMechanismProperties=%s" % (self.mech_properties,) client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: if not self.service_realm_required: # Without authMechanismProperties - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) client[GSSAPI_DB].list_collection_names() - uri = uri + '&replicaSet=%s' % (str(set_name),) + uri = uri + "&replicaSet=%s" % (str(set_name),) client = MongoClient(uri) client[GSSAPI_DB].list_collection_names() # With authMechanismProperties - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties, - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) client[GSSAPI_DB].list_collection_names() - mech_uri = mech_uri + '&replicaSet=%s' % (str(set_name),) + mech_uri = mech_uri + "&replicaSet=%s" % (str(set_name),) client = MongoClient(mech_uri) client[GSSAPI_DB].list_collection_names() @ignore_deprecations def test_gssapi_threaded(self): - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) # Authentication succeeded? client.server_info() @@ -213,7 +224,7 @@ def test_gssapi_threaded(self): if not collection.count_documents({}): try: collection.drop() - collection.insert_one({'_id': 1}) + collection.insert_one({"_id": 1}) except OperationFailure: raise SkipTest("User must be able to write.") @@ -226,15 +237,17 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: - client = MongoClient(GSSAPI_HOST, - GSSAPI_PORT, - username=GSSAPI_PRINCIPAL, - password=GSSAPI_PASS, - authMechanism='GSSAPI', - authMechanismProperties=self.mech_properties, - replicaSet=set_name) + client = MongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) # Succeeded? client.server_info() @@ -250,101 +263,109 @@ def test_gssapi_threaded(self): class TestSASLPlain(unittest.TestCase): - @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: - raise SkipTest('Must set SASL_HOST, ' - 'SASL_USER, and SASL_PASS to test SASL') + raise SkipTest("Must set SASL_HOST, " "SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - client = MongoClient(SASL_HOST, - SASL_PORT, - username=SASL_USER, - password=SASL_PASS, - authSource=SASL_DB, - authMechanism='PLAIN') + client = MongoClient( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() assert SASL_USER is not None assert SASL_PASS is not None - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, SASL_DB)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) client = MongoClient(uri) client.ldap.test.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get('setName') + set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") if set_name: - client = MongoClient(SASL_HOST, - SASL_PORT, - replicaSet=set_name, - username=SASL_USER, - password=SASL_PASS, - authSource=SASL_DB, - authMechanism='PLAIN') + client = MongoClient( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) client.ldap.test.find_one() - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s;replicaSet=%s' % (quote_plus(SASL_USER), - quote_plus(SASL_PASS), - SASL_HOST, SASL_PORT, - SASL_DB, str(set_name))) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) client = MongoClient(uri) client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): def auth_string(user, password): - uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;' - 'authSource=%s' % (quote_plus(user), - quote_plus(password), - SASL_HOST, SASL_PORT, SASL_DB)) + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) return uri - bad_user = MongoClient(auth_string('not-user', SASL_PASS)) - bad_pwd = MongoClient(auth_string(SASL_USER, 'not-pwd')) + bad_user = MongoClient(auth_string("not-user", SASL_PASS)) + bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, 'ping') - self.assertRaises(OperationFailure, bad_pwd.admin.command, 'ping') + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") class TestSCRAMSHA1(IntegrationTest): - @client_context.require_auth def setUp(self): super(TestSCRAMSHA1, self).setUp() - client_context.create_user( - 'pymongo_test', 'user', 'pass', roles=['userAdmin', 'readWrite']) + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) def tearDown(self): - client_context.drop_user('pymongo_test', 'user') + client_context.drop_user("pymongo_test", "user") super(TestSCRAMSHA1, self).tearDown() def test_scram_sha1(self): host, port = client_context.host, client_context.port client = rs_or_single_client_noauth( - 'mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' - % (host, port)) - client.pymongo_test.command('dbstats') + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + client.pymongo_test.command("dbstats") if client_context.is_rs: - uri = ('mongodb://user:pass' - '@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1' - '&replicaSet=%s' % (host, port, - client_context.replica_set_name)) + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, client_context.replica_set_name) + ) client = single_client_noauth(uri) - client.pymongo_test.command('dbstats') - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - db.command('dbstats') + client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") # https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation class TestSCRAM(IntegrationTest): - @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): @@ -362,114 +383,118 @@ def tearDown(self): def test_scram_skip_empty_exchange(self): listener = AllowListEventListener("saslStart", "saslContinue") client_context.create_user( - 'testscram', 'sha256', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram', - event_listeners=[listener]) - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + client.testscram.command("dbstats") if client_context.version < (4, 4, -1): # Assert we sent the skipEmptyExchange option. - first_event = listener.results['started'][0] - self.assertEqual(first_event.command_name, 'saslStart') - self.assertEqual( - first_event.command['options'], {'skipEmptyExchange': True}) + first_event = listener.results["started"][0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) # Assert the third exchange was skipped on servers that support it. # Note that the first exchange occurs on the connection handshake. started = listener.started_command_names() if client_context.version.at_least(4, 4, -1): - self.assertEqual(started, ['saslContinue']) + self.assertEqual(started, ["saslContinue"]) else: - self.assertEqual( - started, ['saslStart', 'saslContinue', 'saslContinue']) + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) def test_scram(self): # Step 1: create users client_context.create_user( - 'testscram', 'sha1', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-1']) + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) client_context.create_user( - 'testscram', 'sha256', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client_context.create_user( - 'testscram', 'both', 'pwd', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-1', 'SCRAM-SHA-256']) + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) # Step 2: verify auth success cases - client = rs_or_single_client_noauth( - username='sha1', password='pwd', authSource='testscram') - client.testscram.command('dbstats') + client = rs_or_single_client_noauth(username="sha1", password="pwd", authSource="testscram") + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha1', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-1') - client.testscram.command('dbstats') + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram') - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 client = rs_or_single_client_noauth( - username='both', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-1') - client.testscram.command('dbstats') + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='both', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") self.listener.results.clear() client = rs_or_single_client_noauth( - username='both', password='pwd', authSource='testscram', - event_listeners=[self.listener]) - client.testscram.command('dbstats') + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + client.testscram.command("dbstats") if client_context.version.at_least(4, 4, -1): # Speculative authentication in 4.4+ sends saslStart with the # handshake. - self.assertEqual(self.listener.results['started'], []) + self.assertEqual(self.listener.results["started"], []) else: - started = self.listener.results['started'][0] - self.assertEqual(started.command.get('mechanism'), 'SCRAM-SHA-256') + started = self.listener.results["started"][0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions client = rs_or_single_client_noauth( - username='sha1', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-256') + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) with self.assertRaises(OperationFailure): - client.testscram.command('dbstats') + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='sha256', password='pwd', authSource='testscram', - authMechanism='SCRAM-SHA-1') + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) with self.assertRaises(OperationFailure): - client.testscram.command('dbstats') + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='not-a-user', password='pwd', authSource='testscram') + username="not-a-user", password="pwd", authSource="testscram" + ) with self.assertRaises(OperationFailure): - client.testscram.command('dbstats') + client.testscram.command("dbstats") if client_context.is_rs: host, port = client_context.host, client_context.port - uri = ('mongodb://both:pwd@%s:%d/testscram' - '?replicaSet=%s' % (host, port, - client_context.replica_set_name)) + uri = "mongodb://both:pwd@%s:%d/testscram" "?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - client.testscram.command('dbstats') - db = client.get_database( - 'testscram', read_preference=ReadPreference.SECONDARY) - db.command('dbstats') + client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + db.command("dbstats") - @unittest.skipUnless(HAVE_STRINGPREP, 'Cannot test without stringprep') + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") def test_scram_saslprep(self): # Step 4: test SASLprep host, port = client_context.host, client_context.port @@ -478,52 +503,59 @@ def test_scram_saslprep(self): # becomes 'IX'. SASLprep is only supported when the standard # library provides stringprep. client_context.create_user( - 'testscram', '\u2168', '\u2163', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client_context.create_user( - 'testscram', 'IX', 'IX', roles=['dbOwner'], - mechanisms=['SCRAM-SHA-256']) + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) client = rs_or_single_client_noauth( - username='\u2168', password='\u2163', authSource='testscram') - client.testscram.command('dbstats') + username="\u2168", password="\u2163", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='\u2168', password='\u2163', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='\u2168', password='IV', authSource='testscram') - client.testscram.command('dbstats') + username="\u2168", password="IV", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='IX', password='I\u00ADX', authSource='testscram') - client.testscram.command('dbstats') + username="IX", password="I\u00ADX", authSource="testscram" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='IX', password='I\u00ADX', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - username='IX', password='IX', authSource='testscram', - authMechanism='SCRAM-SHA-256') - client.testscram.command('dbstats') + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + client.testscram.command("dbstats") client = rs_or_single_client_noauth( - 'mongodb://\u2168:\u2163@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://\u2168:IV@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth("mongodb://\u2168:IV@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") - client = rs_or_single_client_noauth( - 'mongodb://IX:I\u00ADX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') - client = rs_or_single_client_noauth( - 'mongodb://IX:IX@%s:%d/testscram' % (host, port)) - client.testscram.command('dbstats') + client = rs_or_single_client_noauth("mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") + client = rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) + client.testscram.command("dbstats") def test_cache(self): client = single_client() @@ -532,7 +564,7 @@ def test_cache(self): self.assertIsNotNone(cache) self.assertIsNone(cache.data) # Force authentication. - client.admin.command('ping') + client.admin.command("ping") cache = credentials.cache self.assertIsNotNone(cache) data = cache.data @@ -547,7 +579,7 @@ def test_cache(self): def test_scram_threaded(self): coll = client_context.client.db.test coll.drop() - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate coll = rs_or_single_client().db.test @@ -562,71 +594,68 @@ def test_scram_threaded(self): class TestAuthURIOptions(IntegrationTest): - @client_context.require_auth def setUp(self): super(TestAuthURIOptions, self).setUp() - client_context.create_user('admin', 'admin', 'pass') - client_context.create_user( - 'pymongo_test', 'user', 'pass', ['userAdmin', 'readWrite']) + client_context.create_user("admin", "admin", "pass") + client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): - client_context.drop_user('pymongo_test', 'user') - client_context.drop_user('admin', 'admin') + client_context.drop_user("pymongo_test", "user") + client_context.drop_user("admin", "admin") super(TestAuthURIOptions, self).tearDown() def test_uri_options(self): # Test default to admin host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth( - 'mongodb://admin:pass@%s:%d' % (host, port)) - self.assertTrue(client.admin.command('dbstats')) + client = rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + self.assertTrue(client.admin.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://admin:pass@%s:%d/?replicaSet=%s' % ( - host, port, client_context.replica_set_name)) + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - self.assertTrue(client.admin.command('dbstats')) - db = client.get_database( - 'admin', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertTrue(client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test explicit database - uri = 'mongodb://user:pass@%s:%d/pymongo_test' % (host, port) + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) + self.assertRaises(OperationFailure, client.admin.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s' % ( - host, port, client_context.replica_set_name)) + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + client_context.replica_set_name, + ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.admin.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertRaises(OperationFailure, client.admin.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) # Test authSource - uri = ('mongodb://user:pass@%s:%d' - '/pymongo_test2?authSource=pymongo_test' % (host, port)) + uri = "mongodb://user:pass@%s:%d" "/pymongo_test2?authSource=pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) + self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: - uri = ('mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=' - '%s;authSource=pymongo_test' % ( - host, port, client_context.replica_set_name)) + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) + ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, - client.pymongo_test2.command, 'dbstats') - self.assertTrue(client.pymongo_test.command('dbstats')) - db = client.get_database( - 'pymongo_test', read_preference=ReadPreference.SECONDARY) - self.assertTrue(db.command('dbstats')) + self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + self.assertTrue(client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(db.command("dbstats")) if __name__ == "__main__": diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index e78b4b209a..9f2fa374ac 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -21,12 +21,11 @@ sys.path[0:0] = [""] -from pymongo import MongoClient from test import unittest +from pymongo import MongoClient -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'auth') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") class TestAuthSpec(unittest.TestCase): @@ -34,11 +33,10 @@ class TestAuthSpec(unittest.TestCase): def create_test(test_case): - def run_test(self): - uri = test_case['uri'] - valid = test_case['valid'] - credential = test_case.get('credential') + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") if not valid: self.assertRaises(Exception, MongoClient, uri, connect=False) @@ -49,39 +47,34 @@ def run_test(self): self.assertIsNone(credentials) else: self.assertIsNotNone(credentials) - self.assertEqual(credentials.username, credential['username']) - self.assertEqual(credentials.password, credential['password']) - self.assertEqual(credentials.source, credential['source']) - if credential['mechanism'] is not None: - self.assertEqual( - credentials.mechanism, credential['mechanism']) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) else: - self.assertEqual(credentials.mechanism, 'DEFAULT') - expected = credential['mechanism_properties'] + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] if expected is not None: actual = credentials.mechanism_properties for key, val in expected.items(): - if 'SERVICE_NAME' in expected: - self.assertEqual( - actual.service_name, expected['SERVICE_NAME']) - elif 'CANONICALIZE_HOST_NAME' in expected: - self.assertEqual( - actual.canonicalize_host_name, - expected['CANONICALIZE_HOST_NAME']) - elif 'SERVICE_REALM' in expected: + if "SERVICE_NAME" in expected: + self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) + elif "CANONICALIZE_HOST_NAME" in expected: self.assertEqual( - actual.service_realm, - expected['SERVICE_REALM']) - elif 'AWS_SESSION_TOKEN' in expected: + actual.canonicalize_host_name, expected["CANONICALIZE_HOST_NAME"] + ) + elif "SERVICE_REALM" in expected: + self.assertEqual(actual.service_realm, expected["SERVICE_REALM"]) + elif "AWS_SESSION_TOKEN" in expected: self.assertEqual( - actual.aws_session_token, - expected['AWS_SESSION_TOKEN']) + actual.aws_session_token, expected["AWS_SESSION_TOKEN"] + ) else: - self.fail('Unhandled property: %s' % (key,)) + self.fail("Unhandled property: %s" % (key,)) else: - if credential['mechanism'] == 'MONGODB-AWS': - self.assertIsNone( - credentials.mechanism_properties.aws_session_token) + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) else: self.assertIsNone(credentials.mechanism_properties) @@ -89,19 +82,16 @@ def run_test(self): def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as auth_tests: - test_cases = json.load(auth_tests)['tests'] + test_cases = json.load(auth_tests)["tests"] for test_case in test_cases: - if test_case.get('optional', False): + if test_case.get("optional", False): continue test_method = create_test(test_case) - name = str(test_case['description'].lower().replace(' ', '_')) - setattr( - TestAuthSpec, - 'test_%s_%s' % (test_suffix, name), - test_method) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, "test_%s_%s" % (test_suffix, name), test_method) create_tests() diff --git a/test/test_binary.py b/test/test_binary.py index 4bbda0c9d4..6352e93d2c 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -25,20 +25,18 @@ sys.path[0:0] = [""] -import bson +from test import IntegrationTest, client_context, unittest +from test.utils import ignore_deprecations +import bson from bson import decode, encode from bson.binary import * from bson.codec_options import CodecOptions from bson.son import SON - from pymongo.common import validate_uuid_representation from pymongo.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import ignore_deprecations - class TestBinary(unittest.TestCase): csharp_data: bytes @@ -48,37 +46,39 @@ class TestBinary(unittest.TestCase): def setUpClass(cls): # Generated by the Java driver from_java = ( - b'bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu' - b'Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND' - b'ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+' - b'XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1' - b'aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR' - b'jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA' - b'AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z' - b'DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf' - b'aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx' - b'29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My' - b'1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB' - b'W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp' - b'bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc' - b'0MQAA') + b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" + b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" + b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" + b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" + b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" + b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" + b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" + b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" + b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" + b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" + b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" + b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" + b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" + b"0MQAA" + ) cls.java_data = base64.b64decode(from_java) # Generated by the .net driver from_csharp = ( - b'ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl' - b'iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2' - b'ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V' - b'pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl' - b'AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A' - b'ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z' - b'oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU' - b'zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn' - b'dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA' - b'CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT' - b'QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP' - b'MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00' - b'ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=') + b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" + b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" + b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" + b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" + b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" + b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" + b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" + b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" + b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" + b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" + b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" + b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" + b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" + ) cls.csharp_data = base64.b64decode(from_csharp) def test_binary(self): @@ -124,20 +124,15 @@ def test_equality(self): def test_repr(self): one = Binary(b"hello world") - self.assertEqual(repr(one), - "Binary(%s, 0)" % (repr(b"hello world"),)) + self.assertEqual(repr(one), "Binary(%s, 0)" % (repr(b"hello world"),)) two = Binary(b"hello world", 2) - self.assertEqual(repr(two), - "Binary(%s, 2)" % (repr(b"hello world"),)) + self.assertEqual(repr(two), "Binary(%s, 2)" % (repr(b"hello world"),)) three = Binary(b"\x08\xFF") - self.assertEqual(repr(three), - "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(three), "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) four = Binary(b"\x08\xFF", 2) - self.assertEqual(repr(four), - "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(four), "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) five = Binary(b"test", 100) - self.assertEqual(repr(five), - "Binary(%s, 100)" % (repr(b"test"),)) + self.assertEqual(repr(five), "Binary(%s, 100)" % (repr(b"test"),)) def test_hash(self): one = Binary(b"hello world") @@ -152,9 +147,11 @@ def test_uuid_subtype_4(self): expected_bin = Binary(expected_uuid.bytes, 4) doc = {"uuid": expected_bin} encoded = encode(doc) - for uuid_rep in (UuidRepresentation.PYTHON_LEGACY, - UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY): + for uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + ): opts = CodecOptions(uuid_representation=uuid_rep) self.assertEqual(expected_bin, decode(encoded, opts)["uuid"]) opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) @@ -165,39 +162,39 @@ def test_legacy_java_uuid(self): data = self.java_data docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=STANDARD)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) @client_context.require_connection @@ -205,21 +202,19 @@ def test_legacy_java_uuid_roundtrip(self): data = self.java_data docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) - client_context.client.pymongo_test.drop_collection('java_uuid') + client_context.client.pymongo_test.drop_collection("java_uuid") db = client_context.client.pymongo_test - coll = db.get_collection( - 'java_uuid', CodecOptions(uuid_representation=JAVA_LEGACY)) + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) coll.insert_many(docs) self.assertEqual(5, coll.count_documents({})) for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - coll = db.get_collection( - 'java_uuid', CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client_context.client.pymongo_test.drop_collection('java_uuid') + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("java_uuid") def test_legacy_csharp_uuid(self): data = self.csharp_data @@ -227,39 +222,39 @@ def test_legacy_csharp_uuid(self): # Test decoding docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) for d in docs: - self.assertNotEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) for d in docs: - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) # Test encoding - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=PYTHON_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=STANDARD)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=STANDARD)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=JAVA_LEGACY)) for doc in docs] + ) self.assertNotEqual(data, encoded) - encoded = b''.join([ - encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) - for doc in docs]) + encoded = b"".join( + [encode(doc, False, CodecOptions(uuid_representation=CSHARP_LEGACY)) for doc in docs] + ) self.assertEqual(data, encoded) @client_context.require_connection @@ -267,29 +262,25 @@ def test_legacy_csharp_uuid_roundtrip(self): data = self.csharp_data docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) - client_context.client.pymongo_test.drop_collection('csharp_uuid') + client_context.client.pymongo_test.drop_collection("csharp_uuid") db = client_context.client.pymongo_test - coll = db.get_collection( - 'csharp_uuid', CodecOptions(uuid_representation=CSHARP_LEGACY)) + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) coll.insert_many(docs) self.assertEqual(5, coll.count_documents({})) for d in coll.find(): - self.assertEqual(d['newguid'], uuid.UUID(d['newguidstring'])) + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - coll = db.get_collection( - 'csharp_uuid', CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) for d in coll.find(): - self.assertNotEqual(d['newguid'], d['newguidstring']) - client_context.client.pymongo_test.drop_collection('csharp_uuid') + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("csharp_uuid") def test_uri_to_uuid(self): uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" client = MongoClient(uri, connect=False) - self.assertEqual( - client.pymongo_test.test.codec_options.uuid_representation, - CSHARP_LEGACY) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) @client_context.require_connection def test_uuid_queries(self): @@ -298,37 +289,39 @@ def test_uuid_queries(self): coll.drop() uu = uuid.uuid4() - coll.insert_one({'uuid': Binary(uu.bytes, 3)}) + coll.insert_one({"uuid": Binary(uu.bytes, 3)}) self.assertEqual(1, coll.count_documents({})) # Test regular UUID queries (using subtype 4). coll = db.get_collection( - "test", CodecOptions( - uuid_representation=UuidRepresentation.STANDARD)) - self.assertEqual(0, coll.count_documents({'uuid': uu})) - coll.insert_one({'uuid': uu}) + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, coll.count_documents({"uuid": uu})) + coll.insert_one({"uuid": uu}) self.assertEqual(2, coll.count_documents({})) - docs = list(coll.find({'uuid': uu})) + docs = list(coll.find({"uuid": uu})) self.assertEqual(1, len(docs)) - self.assertEqual(uu, docs[0]['uuid']) + self.assertEqual(uu, docs[0]["uuid"]) # Test both. uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) - predicate = {'uuid': {'$in': [uu, uu_legacy]}} + predicate = {"uuid": {"$in": [uu, uu_legacy]}} self.assertEqual(2, coll.count_documents(predicate)) docs = list(coll.find(predicate)) self.assertEqual(2, len(docs)) coll.drop() def test_pickle(self): - b1 = Binary(b'123', 2) + b1 = Binary(b"123", 2) # For testing backwards compatibility with pre-2.4 pymongo - p = (b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" - b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" - b"\x05K\x02sb.") + p = ( + b"\x80\x03cbson.binary\nBinary\nq\x00C\x03123q\x01\x85q" + b"\x02\x81q\x03}q\x04X\x10\x00\x00\x00_Binary__subtypeq" + b"\x05K\x02sb." + ) - if not sys.version.startswith('3.0'): + if not sys.version.startswith("3.0"): self.assertEqual(b1, pickle.loads(p)) for proto in range(pickle.HIGHEST_PROTOCOL + 1): @@ -344,15 +337,15 @@ def test_pickle(self): self.assertEqual(uul, pickle.loads(pickle.dumps(uul, proto))) def test_buffer_protocol(self): - b0 = Binary(b'123', 2) + b0 = Binary(b"123", 2) - self.assertEqual(b0, Binary(memoryview(b'123'), 2)) - self.assertEqual(b0, Binary(bytearray(b'123'), 2)) - with mmap.mmap(-1, len(b'123')) as mm: - mm.write(b'123') + self.assertEqual(b0, Binary(memoryview(b"123"), 2)) + self.assertEqual(b0, Binary(bytearray(b"123"), 2)) + with mmap.mmap(-1, len(b"123")) as mm: + mm.write(b"123") mm.seek(0) self.assertEqual(b0, Binary(mm, 2)) - self.assertEqual(b0, Binary(array.array('B', b'123'), 2)) + self.assertEqual(b0, Binary(array.array("B", b"123"), 2)) class TestUuidSpecExplicitCoding(unittest.TestCase): @@ -370,40 +363,37 @@ def _hex_to_bytes(hexstring): # Explicit encoding prose test #1 def test_encoding_1(self): obj = Binary.from_uuid(self.uuid) - expected_obj = Binary( - self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + expected_obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) self.assertEqual(obj, expected_obj) - def _test_encoding_w_uuid_rep( - self, uuid_rep, expected_hexstring, expected_subtype): + def _test_encoding_w_uuid_rep(self, uuid_rep, expected_hexstring, expected_subtype): obj = Binary.from_uuid(self.uuid, uuid_rep) - expected_obj = Binary( - self._hex_to_bytes(expected_hexstring), expected_subtype) + expected_obj = Binary(self._hex_to_bytes(expected_hexstring), expected_subtype) self.assertEqual(obj, expected_obj) # Explicit encoding prose test #2 def test_encoding_2(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.STANDARD, - "00112233445566778899AABBCCDDEEFF", 4) + UuidRepresentation.STANDARD, "00112233445566778899AABBCCDDEEFF", 4 + ) # Explicit encoding prose test #3 def test_encoding_3(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.JAVA_LEGACY, - "7766554433221100FFEEDDCCBBAA9988", 3) + UuidRepresentation.JAVA_LEGACY, "7766554433221100FFEEDDCCBBAA9988", 3 + ) # Explicit encoding prose test #4 def test_encoding_4(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.CSHARP_LEGACY, - "33221100554477668899AABBCCDDEEFF", 3) + UuidRepresentation.CSHARP_LEGACY, "33221100554477668899AABBCCDDEEFF", 3 + ) # Explicit encoding prose test #5 def test_encoding_5(self): self._test_encoding_w_uuid_rep( - UuidRepresentation.PYTHON_LEGACY, - "00112233445566778899AABBCCDDEEFF", 3) + UuidRepresentation.PYTHON_LEGACY, "00112233445566778899AABBCCDDEEFF", 3 + ) # Explicit encoding prose test #6 def test_encoding_6(self): @@ -412,17 +402,18 @@ def test_encoding_6(self): # Explicit decoding prose test #1 def test_decoding_1(self): - obj = Binary( - self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) + obj = Binary(self._hex_to_bytes("00112233445566778899AABBCCDDEEFF"), 4) # Case i: self.assertEqual(obj.as_uuid(), self.uuid) # Case ii: self.assertEqual(obj.as_uuid(UuidRepresentation.STANDARD), self.uuid) # Cases iii-vi: - for uuid_rep in (UuidRepresentation.JAVA_LEGACY, - UuidRepresentation.CSHARP_LEGACY, - UuidRepresentation.PYTHON_LEGACY): + for uuid_rep in ( + UuidRepresentation.JAVA_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.PYTHON_LEGACY, + ): with self.assertRaises(ValueError): obj.as_uuid(uuid_rep) @@ -433,31 +424,29 @@ def _test_decoding_legacy(self, hexstring, uuid_rep): with self.assertRaises(ValueError): obj.as_uuid() # Cases ii-iii: - for rep in (UuidRepresentation.STANDARD, - UuidRepresentation.UNSPECIFIED): + for rep in (UuidRepresentation.STANDARD, UuidRepresentation.UNSPECIFIED): with self.assertRaises(ValueError): obj.as_uuid(rep) # Case iv: - self.assertEqual(obj.as_uuid(uuid_rep), - self.uuid) + self.assertEqual(obj.as_uuid(uuid_rep), self.uuid) # Explicit decoding prose test #2 def test_decoding_2(self): self._test_decoding_legacy( - "7766554433221100FFEEDDCCBBAA9988", - UuidRepresentation.JAVA_LEGACY) + "7766554433221100FFEEDDCCBBAA9988", UuidRepresentation.JAVA_LEGACY + ) # Explicit decoding prose test #3 def test_decoding_3(self): self._test_decoding_legacy( - "33221100554477668899AABBCCDDEEFF", - UuidRepresentation.CSHARP_LEGACY) + "33221100554477668899AABBCCDDEEFF", UuidRepresentation.CSHARP_LEGACY + ) # Explicit decoding prose test #4 def test_decoding_4(self): self._test_decoding_legacy( - "00112233445566778899AABBCCDDEEFF", - UuidRepresentation.PYTHON_LEGACY) + "00112233445566778899AABBCCDDEEFF", UuidRepresentation.PYTHON_LEGACY + ) class TestUuidSpecImplicitCoding(IntegrationTest): @@ -474,95 +463,90 @@ def _hex_to_bytes(hexstring): def _get_coll_w_uuid_rep(self, uuid_rep): codec_options = self.client.codec_options.with_options( - uuid_representation=validate_uuid_representation(None, uuid_rep)) + uuid_representation=validate_uuid_representation(None, uuid_rep) + ) coll = self.db.get_collection( - 'pymongo_test', codec_options=codec_options, - write_concern=WriteConcern("majority")) + "pymongo_test", codec_options=codec_options, write_concern=WriteConcern("majority") + ) return coll def _test_encoding(self, uuid_rep, expected_hexstring, expected_subtype): coll = self._get_coll_w_uuid_rep(uuid_rep) coll.delete_many({}) - coll.insert_one({'_id': self.uuid}) + coll.insert_one({"_id": self.uuid}) self.assertTrue( - coll.find_one({"_id": Binary( - self._hex_to_bytes(expected_hexstring), expected_subtype)})) + coll.find_one({"_id": Binary(self._hex_to_bytes(expected_hexstring), expected_subtype)}) + ) # Implicit encoding prose test #1 def test_encoding_1(self): - self._test_encoding( - "javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) + self._test_encoding("javaLegacy", "7766554433221100FFEEDDCCBBAA9988", 3) # Implicit encoding prose test #2 def test_encoding_2(self): - self._test_encoding( - "csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) + self._test_encoding("csharpLegacy", "33221100554477668899AABBCCDDEEFF", 3) # Implicit encoding prose test #3 def test_encoding_3(self): - self._test_encoding( - "pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) + self._test_encoding("pythonLegacy", "00112233445566778899AABBCCDDEEFF", 3) # Implicit encoding prose test #4 def test_encoding_4(self): - self._test_encoding( - "standard", "00112233445566778899AABBCCDDEEFF", 4) + self._test_encoding("standard", "00112233445566778899AABBCCDDEEFF", 4) # Implicit encoding prose test #5 def test_encoding_5(self): with self.assertRaises(ValueError): - self._test_encoding( - "unspecifed", "dummy", -1) - - def _test_decoding(self, client_uuid_representation_string, - legacy_field_uuid_representation, - expected_standard_field_value, - expected_legacy_field_value): + self._test_encoding("unspecifed", "dummy", -1) + + def _test_decoding( + self, + client_uuid_representation_string, + legacy_field_uuid_representation, + expected_standard_field_value, + expected_legacy_field_value, + ): coll = self._get_coll_w_uuid_rep(client_uuid_representation_string) coll.drop() standard_val = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) legacy_val = Binary.from_uuid(self.uuid, legacy_field_uuid_representation) - coll.insert_one({'standard': standard_val, 'legacy': legacy_val}) + coll.insert_one({"standard": standard_val, "legacy": legacy_val}) doc = coll.find_one() - self.assertEqual(doc['standard'], expected_standard_field_value) - self.assertEqual(doc['legacy'], expected_legacy_field_value) + self.assertEqual(doc["standard"], expected_standard_field_value) + self.assertEqual(doc["legacy"], expected_legacy_field_value) # Implicit decoding prose test #1 def test_decoding_1(self): - standard_binary = Binary.from_uuid( - self.uuid, UuidRepresentation.STANDARD) + standard_binary = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) self._test_decoding( - "javaLegacy", UuidRepresentation.JAVA_LEGACY, - standard_binary, self.uuid) + "javaLegacy", UuidRepresentation.JAVA_LEGACY, standard_binary, self.uuid + ) self._test_decoding( - "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, - standard_binary, self.uuid) + "csharpLegacy", UuidRepresentation.CSHARP_LEGACY, standard_binary, self.uuid + ) self._test_decoding( - "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, - standard_binary, self.uuid) + "pythonLegacy", UuidRepresentation.PYTHON_LEGACY, standard_binary, self.uuid + ) # Implicit decoding pose test #2 def test_decoding_2(self): - legacy_binary = Binary.from_uuid( - self.uuid, UuidRepresentation.PYTHON_LEGACY) - self._test_decoding( - "standard", UuidRepresentation.PYTHON_LEGACY, - self.uuid, legacy_binary) + legacy_binary = Binary.from_uuid(self.uuid, UuidRepresentation.PYTHON_LEGACY) + self._test_decoding("standard", UuidRepresentation.PYTHON_LEGACY, self.uuid, legacy_binary) # Implicit decoding pose test #3 def test_decoding_3(self): - expected_standard_value = Binary.from_uuid( - self.uuid, UuidRepresentation.STANDARD) - for legacy_uuid_rep in (UuidRepresentation.PYTHON_LEGACY, - UuidRepresentation.CSHARP_LEGACY, - UuidRepresentation.JAVA_LEGACY): - expected_legacy_value = Binary.from_uuid( - self.uuid, legacy_uuid_rep) + expected_standard_value = Binary.from_uuid(self.uuid, UuidRepresentation.STANDARD) + for legacy_uuid_rep in ( + UuidRepresentation.PYTHON_LEGACY, + UuidRepresentation.CSHARP_LEGACY, + UuidRepresentation.JAVA_LEGACY, + ): + expected_legacy_value = Binary.from_uuid(self.uuid, legacy_uuid_rep) self._test_decoding( - "unspecified", legacy_uuid_rep, - expected_standard_value, expected_legacy_value) + "unspecified", legacy_uuid_rep, expected_standard_value, expected_legacy_value + ) if __name__ == "__main__": diff --git a/test/test_bson.py b/test/test_bson.py index 7052042ca8..f8f587567d 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -21,44 +21,43 @@ import datetime import mmap import os +import pickle import re import sys import tempfile import uuid -import pickle - -from collections import abc, OrderedDict +from collections import OrderedDict, abc from io import BytesIO sys.path[0:0] = [""] +from test import qcheck, unittest +from test.utils import ExceptionCatchingThread + import bson -from bson import (BSON, - decode, - decode_all, - decode_file_iter, - decode_iter, - encode, - EPOCH_AWARE, - is_valid, - Regex) +from bson import ( + BSON, + EPOCH_AWARE, + Regex, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, + is_valid, +) from bson.binary import Binary, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions +from bson.dbref import DBRef +from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 +from bson.max_key import MaxKey +from bson.min_key import MinKey from bson.objectid import ObjectId -from bson.dbref import DBRef from bson.son import SON from bson.timestamp import Timestamp -from bson.errors import (InvalidBSON, - InvalidDocument) -from bson.max_key import MaxKey -from bson.min_key import MinKey -from bson.tz_util import (FixedOffset, - utc) - -from test import qcheck, unittest -from test.utils import ExceptionCatchingThread +from bson.tz_util import FixedOffset, utc class NotADict(abc.MutableMapping): @@ -95,7 +94,6 @@ def __repr__(self): class DSTAwareTimezone(datetime.tzinfo): - def __init__(self, offset, name, dst_start_month, dst_end_month): self.__offset = offset self.__dst_start_month = dst_start_month @@ -121,11 +119,10 @@ class TestBSON(unittest.TestCase): def assertInvalid(self, data): self.assertRaises(InvalidBSON, decode, data) - def check_encode_then_decode(self, doc_class=dict, decoder=decode, - encoder=encode): + def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): # Work around http://bugs.jython.org/issue1728 - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): doc_class = SON def helper(doc): @@ -134,8 +131,7 @@ def helper(doc): helper({}) helper({"test": "hello"}) - self.assertTrue(isinstance(decoder(encoder( - {"hello": "world"}))["hello"], str)) + self.assertTrue(isinstance(decoder(encoder({"hello": "world"}))["hello"], str)) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) @@ -148,9 +144,8 @@ def helper(doc): helper({"a binary": Binary(b"test", 128)}) helper({"a binary": Binary(b"test", 254)}) helper({"another binary": Binary(b"test", 2)}) - helper(SON([('test dst', datetime.datetime(1993, 4, 4, 2))])) - helper(SON([('test negative dst', - datetime.datetime(1, 1, 1, 1, 1, 1))])) + helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))])) + helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))])) helper({"big float": float(10000000000)}) helper({"ref": DBRef("coll", 5)}) helper({"ref": DBRef("coll", 5, foo="bar", bar=4)}) @@ -160,14 +155,12 @@ def helper(doc): helper({"foo": MinKey()}) helper({"foo": MaxKey()}) helper({"$field": Code("function(){ return true; }")}) - helper({"$field": Code("return function(){ return x; }", scope={'x': False})}) + helper({"$field": Code("return function(){ return x; }", scope={"x": False})}) def encode_then_decode(doc): - return doc_class(doc) == decoder(encode(doc), CodecOptions( - document_class=doc_class)) + return doc_class(doc) == decoder(encode(doc), CodecOptions(document_class=doc_class)) - qcheck.check_unittest(self, encode_then_decode, - qcheck.gen_mongo_dict(3)) + qcheck.check_unittest(self, encode_then_decode, qcheck.gen_mongo_dict(3)) def test_encode_then_decode(self): self.check_encode_then_decode() @@ -177,18 +170,20 @@ def test_encode_then_decode_any_mapping(self): def test_encode_then_decode_legacy(self): self.check_encode_then_decode( - encoder=BSON.encode, - decoder=lambda *args: BSON(args[0]).decode(*args[1:])) + encoder=BSON.encode, decoder=lambda *args: BSON(args[0]).decode(*args[1:]) + ) def test_encode_then_decode_any_mapping_legacy(self): self.check_encode_then_decode( - doc_class=NotADict, encoder=BSON.encode, - decoder=lambda *args: BSON(args[0]).decode(*args[1:])) + doc_class=NotADict, + encoder=BSON.encode, + decoder=lambda *args: BSON(args[0]).decode(*args[1:]), + ) def test_encoding_defaultdict(self): - dct = collections.defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] + dct = collections.defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] encode(dct) - self.assertEqual(dct, collections.defaultdict(dict, [('foo', 'bar')])) + self.assertEqual(dct, collections.defaultdict(dict, [("foo", "bar")])) def test_basic_validation(self): self.assertRaises(TypeError, is_valid, 100) @@ -209,117 +204,132 @@ def test_basic_validation(self): self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12") self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00") self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" - b"\x04\x00\x00\x00bar\x00\x00") - self.assertInvalid(b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" - b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00") - self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c" - b"\x00\x00\x00\x08bar\x00\x01\x00\x00") - self.assertInvalid(b"\x1c\x00\x00\x00\x03foo\x00" - b"\x12\x00\x00\x00\x02bar\x00" - b"\x05\x00\x00\x00baz\x00\x00\x00") - self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" - b"\x04\x00\x00\x00abc\xff\x00") - - def test_bad_string_lengths(self): - self.assertInvalid( - b"\x0c\x00\x00\x00\x02\x00" - b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" b"\x04\x00\x00\x00bar\x00\x00") self.assertInvalid( - b"\x12\x00\x00\x00\x02\x00" - b"\xff\xff\xff\xfffoobar\x00\x00") + b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" + ) self.assertInvalid( - b"\x0c\x00\x00\x00\x0e\x00" - b"\x00\x00\x00\x00\x00\x00") + b"\x15\x00\x00\x00\x03foo\x00\x0c" b"\x00\x00\x00\x08bar\x00\x01\x00\x00" + ) self.assertInvalid( - b"\x12\x00\x00\x00\x0e\x00" - b"\xff\xff\xff\xfffoobar\x00\x00") + b"\x1c\x00\x00\x00\x03foo\x00" + b"\x12\x00\x00\x00\x02bar\x00" + b"\x05\x00\x00\x00baz\x00\x00\x00" + ) + self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" b"\x04\x00\x00\x00abc\xff\x00") + + def test_bad_string_lengths(self): + self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00" b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x02\x00" b"\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00" b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00" b"\xff\xff\xff\xfffoobar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x0c\x00" - b"\x00\x00\x00\x00\x00RY\xb5j" - b"\xfa[\xd8A\xd6X]\x99\x00") + b"\x18\x00\x00\x00\x0c\x00" b"\x00\x00\x00\x00\x00RY\xb5j" b"\xfa[\xd8A\xd6X]\x99\x00" + ) self.assertInvalid( b"\x1e\x00\x00\x00\x0c\x00" b"\xff\xff\xff\xfffoobar\x00" - b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\r\x00" - b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid( - b"\x0c\x00\x00\x00\r\x00" - b"\xff\xff\xff\xff\x00\x00") + b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + ) + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\xff\xff\xff\xff\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x00\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\x01\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\xff\xff" b"\xff\xff\x00\x0c\x00\x00" b"\x00\x02\x00\x01\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x01\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\x00\x00\x00" - b"\x00\x00\x00\x00") + b"\x00\x00\x00\x00" + ) self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x01\x00" b"\x00\x00\x00\x0c\x00\x00" b"\x00\x02\x00\xff\xff\xff" - b"\xff\x00\x00\x00") + b"\xff\x00\x00\x00" + ) def test_random_data_is_not_bson(self): - qcheck.check_unittest(self, qcheck.isnt(is_valid), - qcheck.gen_string(qcheck.gen_range(0, 40))) + qcheck.check_unittest( + self, qcheck.isnt(is_valid), qcheck.gen_string(qcheck.gen_range(0, 40)) + ) def test_basic_decode(self): - self.assertEqual({"test": "hello world"}, - decode(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" - b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" - b"\x72\x6C\x64\x00\x00")) - self.assertEqual([{"test": "hello world"}, {}], - decode_all(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00")) - self.assertEqual([{"test": "hello world"}, {}], - list(decode_iter( - b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00"))) - self.assertEqual([{"test": "hello world"}, {}], - list(decode_file_iter(BytesIO( - b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00")))) + self.assertEqual( + {"test": "hello world"}, + decode( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C" + b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F" + b"\x72\x6C\x64\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + decode_all( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_iter( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ), + ) + self.assertEqual( + [{"test": "hello world"}, {}], + list( + decode_file_iter( + BytesIO( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ) + ) + ), + ) def test_decode_all_buffer_protocol(self): - docs = [{'foo': 'bar'}, {}] + docs = [{"foo": "bar"}, {}] bs = b"".join(map(encode, docs)) # type: ignore[arg-type] self.assertEqual(docs, decode_all(bytearray(bs))) self.assertEqual(docs, decode_all(memoryview(bs))) - self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1])) - self.assertEqual(docs, decode_all(array.array('B', bs))) + self.assertEqual(docs, decode_all(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(docs, decode_all(array.array("B", bs))) with mmap.mmap(-1, len(bs)) as mm: mm.write(bs) mm.seek(0) self.assertEqual(docs, decode_all(mm)) def test_decode_buffer_protocol(self): - doc = {'foo': 'bar'} + doc = {"foo": "bar"} bs = encode(doc) self.assertEqual(doc, decode(bs)) self.assertEqual(doc, decode(bytearray(bs))) self.assertEqual(doc, decode(memoryview(bs))) - self.assertEqual(doc, decode(memoryview(b'1' + bs + b'1')[1:-1])) - self.assertEqual(doc, decode(array.array('B', bs))) + self.assertEqual(doc, decode(memoryview(b"1" + bs + b"1")[1:-1])) + self.assertEqual(doc, decode(array.array("B", bs))) with mmap.mmap(-1, len(bs)) as mm: mm.write(bs) mm.seek(0) @@ -329,8 +339,7 @@ def test_invalid_decodes(self): # Invalid object size (not enough bytes in document for even # an object size of first object. # NOTE: decode_all and decode_iter don't care, not sure if they should? - self.assertRaises(InvalidBSON, list, - decode_file_iter(BytesIO(b"\x1B"))) + self.assertRaises(InvalidBSON, list, decode_file_iter(BytesIO(b"\x1B"))) bad_bsons = [ # An object size that's too small to even include the object size, @@ -338,21 +347,27 @@ def test_invalid_decodes(self): b"\x01\x00\x00\x00\x00", # One object, but with object size listed smaller than it is in the # data. - (b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\x00"), + ( + b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\x00" + ), # One object, missing the EOO at the end. - (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00"), + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00" + ), # One object, sized correctly, with a spot for an EOO, but the EOO # isn't 0x00. - (b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" - b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" - b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" - b"\x05\x00\x00\x00\xFF"), + ( + b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74" + b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C" + b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00" + b"\x05\x00\x00\x00\xFF" + ), ] for i, data in enumerate(bad_bsons): msg = "bad_bson[{}]".format(i) @@ -371,14 +386,17 @@ def test_invalid_decodes(self): def test_invalid_field_name(self): # Decode a truncated field with self.assertRaises(InvalidBSON) as ctx: - decode(b'\x0b\x00\x00\x00\x02field\x00') + decode(b"\x0b\x00\x00\x00\x02field\x00") # Assert that the InvalidBSON error message is not empty. self.assertTrue(str(ctx.exception)) def test_data_timestamp(self): - self.assertEqual({"test": Timestamp(4, 20)}, - decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" - b"\x00\x00\x00\x04\x00\x00\x00\x00")) + self.assertEqual( + {"test": Timestamp(4, 20)}, + decode( + b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" b"\x00\x00\x00\x04\x00\x00\x00\x00" + ), + ) def test_basic_encode(self): self.assertRaises(TypeError, encode, 100) @@ -388,83 +406,102 @@ def test_basic_encode(self): self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00")) self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00") - self.assertEqual(encode({"test": "hello world"}), - b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" - b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" - b"\x64\x00\x00") - self.assertEqual(encode({"mike": 100}), - b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" - b"\x00\x00\x00") - self.assertEqual(encode({"hello": 1.5}), - b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" - b"\x00\x00\x00\x00\x00\xF8\x3F\x00") - self.assertEqual(encode({"true": True}), - b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00") - self.assertEqual(encode({"false": False}), - b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" - b"\x00") - self.assertEqual(encode({"empty": []}), - b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" - b"\x00\x00\x00\x00\x00") - self.assertEqual(encode({"none": {}}), - b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" - b"\x00\x00\x00\x00") - self.assertEqual(encode({"test": Binary(b"test", 0)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - b"\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": Binary(b"test", 2)}), - b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" - b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": Binary(b"test", 128)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" - b"\x00\x00\x80\x74\x65\x73\x74\x00") - self.assertEqual(encode({"test": None}), - b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") - self.assertEqual(encode({"date": datetime.datetime(2007, 1, 8, - 0, 30, 11)}), - b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" - b"\x1C\xFF\x0F\x01\x00\x00\x00") - self.assertEqual(encode({"regex": re.compile(b"a*b", - re.IGNORECASE)}), - b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" - b"\x2A\x62\x00\x69\x00\x00") - self.assertEqual(encode({"$where": Code("test")}), - b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" - b"\x00\x00") - self.assertEqual(encode({"$field": - Code("function(){ return true;}", scope=None)}), - b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" - b"function(){ return true;}\x00\x00") - self.assertEqual(encode({"$field": - Code("return function(){ return x; }", - scope={'x': False})}), - b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" - b"\x00\x00return function(){ return x; }\x00\t\x00" - b"\x00\x00\x08x\x00\x00\x00\x00") + self.assertEqual( + encode({"test": "hello world"}), + b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00" + b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C" + b"\x64\x00\x00", + ) + self.assertEqual( + encode({"mike": 100}), + b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" b"\x00\x00\x00", + ) + self.assertEqual( + encode({"hello": 1.5}), + b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" b"\x00\x00\x00\x00\x00\xF8\x3F\x00", + ) + self.assertEqual( + encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00" + ) + self.assertEqual( + encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" b"\x00" + ) + self.assertEqual( + encode({"empty": []}), + b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" b"\x00\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"none": {}}), + b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" b"\x00\x00\x00\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 0)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 2)}), + b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00" + b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00", + ) + self.assertEqual( + encode({"test": Binary(b"test", 128)}), + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x80\x74\x65\x73\x74\x00", + ) + self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") + self.assertEqual( + encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), + b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" b"\x1C\xFF\x0F\x01\x00\x00\x00", + ) + self.assertEqual( + encode({"regex": re.compile(b"a*b", re.IGNORECASE)}), + b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" b"\x2A\x62\x00\x69\x00\x00", + ) + self.assertEqual( + encode({"$where": Code("test")}), + b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" b"\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("function(){ return true;}", scope=None)}), + b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" b"function(){ return true;}\x00\x00", + ) + self.assertEqual( + encode({"$field": Code("return function(){ return x; }", scope={"x": False})}), + b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00" + b"\x00\x00return function(){ return x; }\x00\t\x00" + b"\x00\x00\x08x\x00\x00\x00\x00", + ) unicode_empty_scope = Code("function(){ return 'héllo';}", {}) - self.assertEqual(encode({'$field': unicode_empty_scope}), - b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" - b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" - b"\x00\x00\x00\x00\x00") + self.assertEqual( + encode({"$field": unicode_empty_scope}), + b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00" + b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05" + b"\x00\x00\x00\x00\x00", + ) a = ObjectId(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B") - self.assertEqual(encode({"oid": a}), - b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" - b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00") - self.assertEqual(encode({"ref": DBRef("coll", a)}), - b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" - b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" - b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" - b"\x00") + self.assertEqual( + encode({"oid": a}), + b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02" + b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00", + ) + self.assertEqual( + encode({"ref": DBRef("coll", a)}), + b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02" + b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00" + b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00" + b"\x00", + ) def test_unknown_type(self): # Repr value differs with major python version - part = "type %r for fieldname 'foo'" % (b'\x14',) + part = "type %r for fieldname 'foo'" % (b"\x14",) docs = [ - b'\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00', - (b'\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140' - b'\x00\x01\x00\x00\x00\x00\x00'), - (b' \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00' - b'\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00')] + b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", + (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140" b"\x00\x01\x00\x00\x00\x00\x00"), + ( + b" \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00" + b"\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00" + ), + ] for bs in docs: try: decode(bs) @@ -481,21 +518,19 @@ def test_dbpointer(self): # not support creation of the DBPointer type, but will decode # DBPointer to DBRef. - bs = (b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" - b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00") + bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" - self.assertEqual({'': DBRef('', ObjectId('5259b56afa5bd841d6585d99'))}, - decode(bs)) + self.assertEqual({"": DBRef("", ObjectId("5259b56afa5bd841d6585d99"))}, decode(bs)) def test_bad_dbref(self): - ref_only = {'ref': {'$ref': 'collection'}} - id_only = {'ref': {'$id': ObjectId()}} + ref_only = {"ref": {"$ref": "collection"}} + id_only = {"ref": {"$id": ObjectId()}} self.assertEqual(ref_only, decode(encode(ref_only))) self.assertEqual(id_only, decode(encode(id_only))) def test_bytes_as_keys(self): - doc = {b"foo": 'bar'} + doc = {b"foo": "bar"} # Since `bytes` are stored as Binary you can't use them # as keys in python 3.x. Using binary data as a key makes # no sense in BSON anyway and little sense in python. @@ -528,15 +563,12 @@ def test_large_datetime_truncation(self): self.assertEqual(dt2.second, dt1.second) def test_aware_datetime(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) offset = aware.utcoffset() assert offset is not None as_utc = (aware - offset).replace(tzinfo=utc) - self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), - as_utc) - after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))[ - "date"] + self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), as_utc) + after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))["date"] self.assertEqual(utc, after.tzinfo) self.assertEqual(as_utc, after) @@ -545,54 +577,47 @@ def test_local_datetime(self): tz = DSTAwareTimezone(60, "sixty-minutes", 4, 7) # It's not DST. - local = datetime.datetime(year=2025, month=12, hour=2, day=1, - tzinfo=tz) + local = datetime.datetime(year=2025, month=12, hour=2, day=1, tzinfo=tz) options = CodecOptions(tz_aware=True, tzinfo=tz) # Encode with this timezone, then decode to UTC. - encoded = encode({'date': local}, codec_options=options) - self.assertEqual(local.replace(hour=1, tzinfo=None), - decode(encoded)['date']) + encoded = encode({"date": local}, codec_options=options) + self.assertEqual(local.replace(hour=1, tzinfo=None), decode(encoded)["date"]) # It's DST. - local = datetime.datetime(year=2025, month=4, hour=1, day=1, - tzinfo=tz) - encoded = encode({'date': local}, codec_options=options) - self.assertEqual(local.replace(month=3, day=31, hour=23, tzinfo=None), - decode(encoded)['date']) + local = datetime.datetime(year=2025, month=4, hour=1, day=1, tzinfo=tz) + encoded = encode({"date": local}, codec_options=options) + self.assertEqual( + local.replace(month=3, day=31, hour=23, tzinfo=None), decode(encoded)["date"] + ) # Encode UTC, then decode in a different timezone. - encoded = encode({'date': local.replace(tzinfo=utc)}) - decoded = decode(encoded, options)['date'] + encoded = encode({"date": local.replace(tzinfo=utc)}) + decoded = decode(encoded, options)["date"] self.assertEqual(local.replace(hour=3), decoded) self.assertEqual(tz, decoded.tzinfo) # Test round-tripping. self.assertEqual( - local, decode(encode( - {'date': local}, codec_options=options), options)['date']) + local, decode(encode({"date": local}, codec_options=options), options)["date"] + ) # Test around the Unix Epoch. epochs = ( EPOCH_AWARE, - EPOCH_AWARE.astimezone(FixedOffset(120, 'one twenty')), - EPOCH_AWARE.astimezone(FixedOffset(-120, 'minus one twenty')) + EPOCH_AWARE.astimezone(FixedOffset(120, "one twenty")), + EPOCH_AWARE.astimezone(FixedOffset(-120, "minus one twenty")), ) utc_co = CodecOptions(tz_aware=True) for epoch in epochs: - doc = {'epoch': epoch} + doc = {"epoch": epoch} # We always retrieve datetimes in UTC unless told to do otherwise. - self.assertEqual( - EPOCH_AWARE, - decode(encode(doc), codec_options=utc_co)['epoch']) + self.assertEqual(EPOCH_AWARE, decode(encode(doc), codec_options=utc_co)["epoch"]) # Round-trip the epoch. local_co = CodecOptions(tz_aware=True, tzinfo=epoch.tzinfo) - self.assertEqual( - epoch, - decode(encode(doc), codec_options=local_co)['epoch']) + self.assertEqual(epoch, decode(encode(doc), codec_options=local_co)["epoch"]) def test_naive_decode(self): - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) offset = aware.utcoffset() assert offset is not None naive_utc = (aware - offset).replace(tzinfo=None) @@ -605,32 +630,30 @@ def test_dst(self): d = {"x": datetime.datetime(1993, 4, 4, 2)} self.assertEqual(d, decode(encode(d))) - @unittest.skip('Disabled due to http://bugs.python.org/issue25222') + @unittest.skip("Disabled due to http://bugs.python.org/issue25222") def test_bad_encode(self): - evil_list: dict = {'a': []} - evil_list['a'].append(evil_list) + evil_list: dict = {"a": []} + evil_list["a"].append(evil_list) evil_dict: dict = {} - evil_dict['a'] = evil_dict + evil_dict["a"] = evil_dict for evil_data in [evil_dict, evil_list]: self.assertRaises(Exception, encode, evil_data) def test_overflow(self): self.assertTrue(encode({"x": 9223372036854775807})) - self.assertRaises(OverflowError, encode, - {"x": 9223372036854775808}) + self.assertRaises(OverflowError, encode, {"x": 9223372036854775808}) self.assertTrue(encode({"x": -9223372036854775808})) - self.assertRaises(OverflowError, encode, - {"x": -9223372036854775809}) + self.assertRaises(OverflowError, encode, {"x": -9223372036854775809}) def test_small_long_encode_decode(self): - encoded1 = encode({'x': 256}) - decoded1 = decode(encoded1)['x'] + encoded1 = encode({"x": 256}) + decoded1 = decode(encoded1)["x"] self.assertEqual(256, decoded1) self.assertEqual(type(256), type(decoded1)) - encoded2 = encode({'x': Int64(256)}) - decoded2 = decode(encoded2)['x'] + encoded2 = encode({"x": Int64(256)}) + decoded2 = decode(encoded2)["x"] expected = Int64(256) self.assertEqual(expected, decoded2) self.assertEqual(type(expected), type(decoded2)) @@ -638,18 +661,16 @@ def test_small_long_encode_decode(self): self.assertNotEqual(type(decoded1), type(decoded2)) def test_tuple(self): - self.assertEqual({"tuple": [1, 2]}, - decode(encode({"tuple": (1, 2)}))) + self.assertEqual({"tuple": [1, 2]}, decode(encode({"tuple": (1, 2)}))) def test_uuid(self): id = uuid.uuid4() # The default uuid_representation is UNSPECIFIED - with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): - bson.decode_all(encode({'uuid': id})) + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(encode({"uuid": id})) opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - transformed_id = decode(encode({"id": id}, codec_options=opts), - codec_options=opts)["id"] + transformed_id = decode(encode({"id": id}, codec_options=opts), codec_options=opts)["id"] self.assertTrue(isinstance(transformed_id, uuid.UUID)) self.assertEqual(id, transformed_id) self.assertNotEqual(uuid.uuid4(), transformed_id) @@ -666,7 +687,7 @@ def test_uuid_legacy(self): # The C extension was segfaulting on unicode RegExs, so we have this test # that doesn't really test anything but the lack of a segfault. def test_unicode_regex(self): - regex = re.compile('revisi\xf3n') + regex = re.compile("revisi\xf3n") decode(encode({"regex": regex})) def test_non_string_keys(self): @@ -677,12 +698,12 @@ def test_utf8(self): self.assertEqual(w, decode(encode(w))) # b'a\xe9' == "aé".encode("iso-8859-1") - iso8859_bytes = b'a\xe9' + iso8859_bytes = b"a\xe9" y = {"hello": iso8859_bytes} # Stored as BSON binary subtype 0. out = decode(encode(y)) - self.assertTrue(isinstance(out['hello'], bytes)) - self.assertEqual(out['hello'], iso8859_bytes) + self.assertTrue(isinstance(out["hello"], bytes)) + self.assertEqual(out["hello"], iso8859_bytes) def test_null_character(self): doc = {"a": "\x00"} @@ -694,28 +715,27 @@ def test_null_character(self): self.assertRaises(InvalidDocument, encode, {b"\x00": "a"}) self.assertRaises(InvalidDocument, encode, {"\x00": "a"}) - self.assertRaises(InvalidDocument, encode, - {"a": re.compile(b"ab\x00c")}) - self.assertRaises(InvalidDocument, encode, - {"a": re.compile("ab\x00c")}) + self.assertRaises(InvalidDocument, encode, {"a": re.compile(b"ab\x00c")}) + self.assertRaises(InvalidDocument, encode, {"a": re.compile("ab\x00c")}) def test_move_id(self): - self.assertEqual(b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" - b"\x02a\x00\x02\x00\x00\x00a\x00\x00", - encode(SON([("a", "a"), ("_id", "a")]))) - - self.assertEqual(b"\x2c\x00\x00\x00" - b"\x02_id\x00\x02\x00\x00\x00b\x00" - b"\x03b\x00" - b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" - b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", - encode(SON([("b", - SON([("a", "a"), ("_id", "a")])), - ("_id", "b")]))) + self.assertEqual( + b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00" + b"\x02a\x00\x02\x00\x00\x00a\x00\x00", + encode(SON([("a", "a"), ("_id", "a")])), + ) + + self.assertEqual( + b"\x2c\x00\x00\x00" + b"\x02_id\x00\x02\x00\x00\x00b\x00" + b"\x03b\x00" + b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00" + b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00", + encode(SON([("b", SON([("a", "a"), ("_id", "a")])), ("_id", "b")])), + ) def test_dates(self): - doc = {"early": datetime.datetime(1686, 5, 5), - "late": datetime.datetime(2086, 5, 5)} + doc = {"early": datetime.datetime(1686, 5, 5), "late": datetime.datetime(2086, 5, 5)} try: self.assertEqual(doc, decode(encode(doc))) except ValueError: @@ -728,15 +748,12 @@ def test_dates(self): def test_custom_class(self): self.assertIsInstance(decode(encode({})), dict) self.assertNotIsInstance(decode(encode({})), SON) - self.assertIsInstance( - decode(encode({}), CodecOptions(document_class=SON)), SON) + self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) - self.assertEqual( - 1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) + self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) x = encode({"x": [{"y": 1}]}) - self.assertIsInstance( - decode(x, CodecOptions(document_class=SON))["x"][0], SON) + self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) def test_subclasses(self): # make sure we can serialize subclasses of native Python types. @@ -749,9 +766,7 @@ class _myfloat(float): class _myunicode(str): pass - d = {'a': _myint(42), 'b': _myfloat(63.9), - 'c': _myunicode('hello world') - } + d = {"a": _myint(42), "b": _myfloat(63.9), "c": _myunicode("hello world")} d2 = decode(encode(d)) for key, value in d2.items(): orig_value = d[key] @@ -761,65 +776,60 @@ class _myunicode(str): def test_ordered_dict(self): d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) - self.assertEqual( - d, decode(encode(d), CodecOptions(document_class=OrderedDict))) + self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) def test_bson_regex(self): # Invalid Python regex, though valid PCRE. - bson_re1 = Regex(r'[\w-\.]') - self.assertEqual(r'[\w-\.]', bson_re1.pattern) + bson_re1 = Regex(r"[\w-\.]") + self.assertEqual(r"[\w-\.]", bson_re1.pattern) self.assertEqual(0, bson_re1.flags) - doc1 = {'r': bson_re1} + doc1 = {"r": bson_re1} doc1_bson = ( - b'\x11\x00\x00\x00' # document length - b'\x0br\x00[\\w-\\.]\x00\x00' # r: regex - b'\x00') # document terminator + b"\x11\x00\x00\x00" b"\x0br\x00[\\w-\\.]\x00\x00" b"\x00" # document length # r: regex + ) # document terminator self.assertEqual(doc1_bson, encode(doc1)) self.assertEqual(doc1, decode(doc1_bson)) # Valid Python regex, with flags. - re2 = re.compile('.*', re.I | re.M | re.S | re.U | re.X) - bson_re2 = Regex('.*', re.I | re.M | re.S | re.U | re.X) + re2 = re.compile(".*", re.I | re.M | re.S | re.U | re.X) + bson_re2 = Regex(".*", re.I | re.M | re.S | re.U | re.X) - doc2_with_re = {'r': re2} - doc2_with_bson_re = {'r': bson_re2} + doc2_with_re = {"r": re2} + doc2_with_bson_re = {"r": bson_re2} doc2_bson = ( - b"\x11\x00\x00\x00" # document length - b"\x0br\x00.*\x00imsux\x00" # r: regex - b"\x00") # document terminator + b"\x11\x00\x00\x00" b"\x0br\x00.*\x00imsux\x00" b"\x00" # document length # r: regex + ) # document terminator self.assertEqual(doc2_bson, encode(doc2_with_re)) self.assertEqual(doc2_bson, encode(doc2_with_bson_re)) - self.assertEqual(re2.pattern, decode(doc2_bson)['r'].pattern) - self.assertEqual(re2.flags, decode(doc2_bson)['r'].flags) + self.assertEqual(re2.pattern, decode(doc2_bson)["r"].pattern) + self.assertEqual(re2.flags, decode(doc2_bson)["r"].flags) def test_regex_from_native(self): - self.assertEqual('.*', Regex.from_native(re.compile('.*')).pattern) - self.assertEqual(0, Regex.from_native(re.compile(b'')).flags) + self.assertEqual(".*", Regex.from_native(re.compile(".*")).pattern) + self.assertEqual(0, Regex.from_native(re.compile(b"")).flags) - regex = re.compile(b'', re.I | re.L | re.M | re.S | re.X) - self.assertEqual( - re.I | re.L | re.M | re.S | re.X, - Regex.from_native(regex).flags) + regex = re.compile(b"", re.I | re.L | re.M | re.S | re.X) + self.assertEqual(re.I | re.L | re.M | re.S | re.X, Regex.from_native(regex).flags) - unicode_regex = re.compile('', re.U) + unicode_regex = re.compile("", re.U) self.assertEqual(re.U, Regex.from_native(unicode_regex).flags) def test_regex_hash(self): - self.assertRaises(TypeError, hash, Regex('hello')) + self.assertRaises(TypeError, hash, Regex("hello")) def test_regex_comparison(self): - re1 = Regex('a') - re2 = Regex('b') + re1 = Regex("a") + re2 = Regex("b") self.assertNotEqual(re1, re2) - re1 = Regex('a', re.I) - re2 = Regex('a', re.M) + re1 = Regex("a", re.I) + re2 = Regex("a", re.M) self.assertNotEqual(re1, re2) - re1 = Regex('a', re.I) - re2 = Regex('a', re.I) + re1 = Regex("a", re.I) + re2 = Regex("a", re.I) self.assertEqual(re1, re2) def test_exception_wrapping(self): @@ -827,13 +837,12 @@ def test_exception_wrapping(self): # the final exception always matches InvalidBSON. # {'s': '\xff'}, will throw attempting to decode utf-8. - bad_doc = b'\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00' + bad_doc = b"\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00" with self.assertRaises(InvalidBSON) as context: decode_all(bad_doc) - self.assertIn("codec can't decode byte 0xff", - str(context.exception)) + self.assertIn("codec can't decode byte 0xff", str(context.exception)) def test_minkey_maxkey_comparison(self): # MinKey's <, <=, >, >=, !=, and ==. @@ -907,29 +916,25 @@ def test_timestamp_comparison(self): self.assertFalse(Timestamp(1, 0) > Timestamp(1, 0)) def test_timestamp_highorder_bits(self): - doc = {'a': Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} - doc_bson = (b'\x10\x00\x00\x00' - b'\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff' - b'\x00') + doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} + doc_bson = b"\x10\x00\x00\x00" b"\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff" b"\x00" self.assertEqual(doc_bson, encode(doc)) self.assertEqual(doc, decode(doc_bson)) def test_bad_id_keys(self): - self.assertRaises(InvalidDocument, encode, - {"_id": {"$bad": 123}}, True) - self.assertRaises(InvalidDocument, encode, - {"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}, True) - encode({"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}) + self.assertRaises(InvalidDocument, encode, {"_id": {"$bad": 123}}, True) + self.assertRaises( + InvalidDocument, encode, {"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}, True + ) + encode({"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}) def test_bson_encode_thread_safe(self): - def target(i): for j in range(1000): - my_int = type('MyInt_%s_%s' % (i, j), (int,), {}) - bson.encode({'my_int': my_int()}) + my_int = type("MyInt_%s_%s" % (i, j), (int,), {}) + bson.encode({"my_int": my_int()}) - threads = [ExceptionCatchingThread(target=target, args=(i,)) - for i in range(3)] + threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] for t in threads: t.start() @@ -947,11 +952,11 @@ def __init__(self, val): def __repr__(self): return repr(self.val) - self.assertEqual('1', repr(Wrapper(1))) + self.assertEqual("1", repr(Wrapper(1))) with self.assertRaisesRegex( - InvalidDocument, - "cannot encode object: 1, of type: " + repr(Wrapper)): - encode({'t': Wrapper(1)}) + InvalidDocument, "cannot encode object: 1, of type: " + repr(Wrapper) + ): + encode({"t": Wrapper(1)}) class TestCodecOptions(unittest.TestCase): @@ -969,69 +974,67 @@ def test_uuid_representation(self): self.assertRaises(ValueError, CodecOptions, uuid_representation=2) def test_tzinfo(self): - self.assertRaises(TypeError, CodecOptions, tzinfo='pacific') - tz = FixedOffset(42, 'forty-two') + self.assertRaises(TypeError, CodecOptions, tzinfo="pacific") + tz = FixedOffset(42, "forty-two") self.assertRaises(ValueError, CodecOptions, tzinfo=tz) self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo) def test_codec_options_repr(self): - r = ("CodecOptions(document_class=dict, tz_aware=False, " - "uuid_representation=UuidRepresentation.UNSPECIFIED, " - "unicode_decode_error_handler='strict', " - "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None))") + r = ( + "CodecOptions(document_class=dict, tz_aware=False, " + "uuid_representation=UuidRepresentation.UNSPECIFIED, " + "unicode_decode_error_handler='strict', " + "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " + "fallback_encoder=None))" + ) self.assertEqual(r, repr(CodecOptions())) def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {'sub_document': {}, - 'dt': datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} decoded = bson.decode_all(bson.encode(doc))[0] - self.assertIsInstance(decoded['sub_document'], dict) - self.assertIsNone(decoded['dt'].tzinfo) + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) # The default uuid_representation is UNSPECIFIED - with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): - bson.decode_all(bson.encode({'uuid': uuid.uuid4()})) + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): + bson.decode_all(bson.encode({"uuid": uuid.uuid4()})) def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) # Test handling of bad key value, bad string value, and both. - invalid_key = enc[:7] + b'\xe9' + enc[8:] - invalid_val = enc[:18] + b'\xe9' + enc[19:] - invalid_both = enc[:7] + b'\xe9' + enc[8:18] + b'\xe9' + enc[19:] + invalid_key = enc[:7] + b"\xe9" + enc[8:] + invalid_val = enc[:18] + b"\xe9" + enc[19:] + invalid_both = enc[:7] + b"\xe9" + enc[8:18] + b"\xe9" + enc[19:] # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: - self.assertRaises(InvalidBSON, decode, invalid, CodecOptions( - unicode_decode_error_handler="strict")) + self.assertRaises( + InvalidBSON, decode, invalid, CodecOptions(unicode_decode_error_handler="strict") + ) self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) self.assertRaises(InvalidBSON, decode, invalid) # Test all other error handlers. - for handler in ['replace', 'backslashreplace', 'surrogateescape', - 'ignore']: - expected_key = b'ke\xe9str'.decode('utf-8', handler) - expected_val = b'fo\xe9bar'.decode('utf-8', handler) - doc = decode(invalid_key, - CodecOptions(unicode_decode_error_handler=handler)) + for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: + expected_key = b"ke\xe9str".decode("utf-8", handler) + expected_val = b"fo\xe9bar".decode("utf-8", handler) + doc = decode(invalid_key, CodecOptions(unicode_decode_error_handler=handler)) self.assertEqual(doc, {expected_key: "foobar"}) - doc = decode(invalid_val, - CodecOptions(unicode_decode_error_handler=handler)) + doc = decode(invalid_val, CodecOptions(unicode_decode_error_handler=handler)) self.assertEqual(doc, {"keystr": expected_val}) - doc = decode(invalid_both, - CodecOptions(unicode_decode_error_handler=handler)) + doc = decode(invalid_both, CodecOptions(unicode_decode_error_handler=handler)) self.assertEqual(doc, {expected_key: expected_val}) # Test handling bad error mode. - dec = decode(enc, - CodecOptions(unicode_decode_error_handler="junk")) + dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) self.assertEqual(dec, {"keystr": "foobar"}) - self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions( - unicode_decode_error_handler="junk")) + self.assertRaises( + InvalidBSON, decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk") + ) def round_trip_pickle(self, obj, pickled_with_older): pickled_with_older_obj = pickle.loads(pickled_with_older) @@ -1043,61 +1046,75 @@ def round_trip_pickle(self, obj, pickled_with_older): def test_regex_pickling(self): reg = Regex(".?") - pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}' - b'\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag' - b's\x94K\x00ub.') + pickled_with_3 = ( + b"\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag" + b"s\x94K\x00ub." + ) self.round_trip_pickle(reg, pickled_with_3) def test_timestamp_pickling(self): ts = Timestamp(0, 1) - pickled_with_3 = (b'\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)' - b'\x81\x94}\x94(' - b'\x8c\x10_Timestamp__time\x94K\x00\x8c' - b'\x0f_Timestamp__inc\x94K\x01ub.') + pickled_with_3 = ( + b"\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)" + b"\x81\x94}\x94(" + b"\x8c\x10_Timestamp__time\x94K\x00\x8c" + b"\x0f_Timestamp__inc\x94K\x01ub." + ) self.round_trip_pickle(ts, pickled_with_3) def test_dbref_pickling(self): dbr = DBRef("foo", 5) - pickled_with_3 = (b'\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}' - b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94' - b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database' - b'\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub.') + pickled_with_3 = ( + b"\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub." + ) self.round_trip_pickle(dbr, pickled_with_3) - dbr = DBRef("foo", 5, database='db', kwargs1=None) - pickled_with_3 = (b'\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}' - b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94' - b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database' - b'\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94' - b'\x8c\x07kwargs1\x94Nsub.') + dbr = DBRef("foo", 5, database="db", kwargs1=None) + pickled_with_3 = ( + b"\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}" + b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94" + b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database" + b"\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94" + b"\x8c\x07kwargs1\x94Nsub." + ) self.round_trip_pickle(dbr, pickled_with_3) def test_minkey_pickling(self): mink = MinKey() - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)' - b'\x81\x94.') + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)" + b"\x81\x94." + ) self.round_trip_pickle(mink, pickled_with_3) def test_maxkey_pickling(self): maxk = MaxKey() - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c' - b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)' - b'\x81\x94.') + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c" + b"\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)" + b"\x81\x94." + ) self.round_trip_pickle(maxk, pickled_with_3) def test_int64_pickling(self): i64 = Int64(9) - pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n' - b'bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94' - b'\x81\x94.') + pickled_with_3 = ( + b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n" + b"bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94" + b"\x81\x94." + ) self.round_trip_pickle(i64, pickled_with_3) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index cbb702e405..4a46276573 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -21,54 +21,52 @@ import json import os import sys - from decimal import DecimalException sys.path[0:0] = [""] +from test import unittest + from bson import decode, encode, json_util from bson.binary import STANDARD from bson.codec_options import CodecOptions -from bson.decimal128 import Decimal128 from bson.dbref import DBRef +from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidId from bson.json_util import JSONMode from bson.son import SON -from test import unittest - -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'bson_corpus') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bson_corpus") _TESTS_TO_SKIP = { # Python cannot decode dates after year 9999. - 'Y10K', + "Y10K", } _NON_PARSE_ERRORS = { # {"$date": } is our legacy format which we still need to parse. - 'Bad $date (number, not string or hash)', + "Bad $date (number, not string or hash)", # This variant of $numberLong may have been generated by an old version # of mongoexport. - 'Bad $numberLong (number, not string)', + "Bad $numberLong (number, not string)", # Python's UUID constructor is very permissive. - '$uuid invalid value--misplaced hyphens', + "$uuid invalid value--misplaced hyphens", # We parse Regex flags with extra characters, including nulls. - 'Null byte in $regularExpression options', + "Null byte in $regularExpression options", } _IMPLCIT_LOSSY_TESTS = { # JSON decodes top-level $ref+$id as a DBRef but BSON doesn't. - 'Document with key names similar to those of a DBRef' + "Document with key names similar to those of a DBRef" } _DEPRECATED_BSON_TYPES = { # Symbol - '0x0E': str, + "0x0E": str, # Undefined - '0x06': type(None), + "0x06": type(None), # DBPointer - '0x0C': DBRef + "0x0C": DBRef, } @@ -78,27 +76,23 @@ # We normally encode UUID as binary subtype 0x03, # but we'll need to encode to subtype 0x04 for one of the tests. codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) -json_options_uuid_04 = json_util.JSONOptions(json_mode=JSONMode.CANONICAL, - uuid_representation=STANDARD) +json_options_uuid_04 = json_util.JSONOptions( + json_mode=JSONMode.CANONICAL, uuid_representation=STANDARD +) json_options_iso8601 = json_util.JSONOptions( - datetime_representation=json_util.DatetimeRepresentation.ISO8601, - json_mode=JSONMode.LEGACY) -to_extjson = functools.partial(json_util.dumps, - json_options=json_util.CANONICAL_JSON_OPTIONS) -to_extjson_uuid_04 = functools.partial(json_util.dumps, - json_options=json_options_uuid_04) -to_extjson_iso8601 = functools.partial(json_util.dumps, - json_options=json_options_iso8601) -to_relaxed_extjson = functools.partial( - json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) -to_bson_uuid_04 = functools.partial(encode, - codec_options=codec_options_uuid_04) + datetime_representation=json_util.DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY +) +to_extjson = functools.partial(json_util.dumps, json_options=json_util.CANONICAL_JSON_OPTIONS) +to_extjson_uuid_04 = functools.partial(json_util.dumps, json_options=json_options_uuid_04) +to_extjson_iso8601 = functools.partial(json_util.dumps, json_options=json_options_iso8601) +to_relaxed_extjson = functools.partial(json_util.dumps, json_options=json_util.RELAXED_JSON_OPTIONS) +to_bson_uuid_04 = functools.partial(encode, codec_options=codec_options_uuid_04) to_bson = functools.partial(encode, codec_options=codec_options) decode_bson = functools.partial(decode, codec_options=codec_options_no_tzaware) decode_extjson = functools.partial( json_util.loads, - json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, - document_class=SON)) + json_options=json_util.JSONOptions(json_mode=JSONMode.CANONICAL, document_class=SON), +) loads = functools.partial(json.loads, object_pairs_hook=SON) @@ -113,65 +107,62 @@ def assertJsonEqual(self, first, second, msg=None): def create_test(case_spec): - bson_type = case_spec['bson_type'] + bson_type = case_spec["bson_type"] # Test key is absent when testing top-level documents. - test_key = case_spec.get('test_key') - deprecated = case_spec.get('deprecated') + test_key = case_spec.get("test_key") + deprecated = case_spec.get("deprecated") def run_test(self): - for valid_case in case_spec.get('valid', []): - description = valid_case['description'] + for valid_case in case_spec.get("valid", []): + description = valid_case["description"] if description in _TESTS_TO_SKIP: continue # Special case for testing encoding UUID as binary subtype 0x04. - if description.startswith('subtype 0x04'): + if description.startswith("subtype 0x04"): encode_extjson = to_extjson_uuid_04 encode_bson = to_bson_uuid_04 else: encode_extjson = to_extjson encode_bson = to_bson - cB = binascii.unhexlify(valid_case['canonical_bson'].encode('utf8')) - cEJ = valid_case['canonical_extjson'] - rEJ = valid_case.get('relaxed_extjson') - dEJ = valid_case.get('degenerate_extjson') + cB = binascii.unhexlify(valid_case["canonical_bson"].encode("utf8")) + cEJ = valid_case["canonical_extjson"] + rEJ = valid_case.get("relaxed_extjson") + dEJ = valid_case.get("degenerate_extjson") if description in _IMPLCIT_LOSSY_TESTS: - valid_case.setdefault('lossy', True) - lossy = valid_case.get('lossy') + valid_case.setdefault("lossy", True) + lossy = valid_case.get("lossy") # BSON double, use lowercase 'e+' to match Python's encoding - if bson_type == '0x01': - cEJ = cEJ.replace('E+', 'e+') + if bson_type == "0x01": + cEJ = cEJ.replace("E+", "e+") decoded_bson = decode_bson(cB) if not lossy: # Make sure we can parse the legacy (default) JSON format. legacy_json = json_util.dumps( - decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS) - self.assertEqual( - decode_extjson(legacy_json), decoded_bson, description) + decoded_bson, json_options=json_util.LEGACY_JSON_OPTIONS + ) + self.assertEqual(decode_extjson(legacy_json), decoded_bson, description) if deprecated: - if 'converted_bson' in valid_case: - converted_bson = binascii.unhexlify( - valid_case['converted_bson'].encode('utf8')) + if "converted_bson" in valid_case: + converted_bson = binascii.unhexlify(valid_case["converted_bson"].encode("utf8")) self.assertEqual(encode_bson(decoded_bson), converted_bson) self.assertJsonEqual( - encode_extjson(decode_bson(converted_bson)), - valid_case['converted_extjson']) + encode_extjson(decode_bson(converted_bson)), valid_case["converted_extjson"] + ) # Make sure we can decode the type. self.assertEqual(decoded_bson, decode_extjson(cEJ)) if test_key is not None: - self.assertIsInstance(decoded_bson[test_key], - _DEPRECATED_BSON_TYPES[bson_type]) + self.assertIsInstance(decoded_bson[test_key], _DEPRECATED_BSON_TYPES[bson_type]) continue # Jython can't handle NaN with a payload from # struct.(un)pack if endianness is specified in the format string. - if not (sys.platform.startswith("java") and - description == 'NaN with payload'): + if not (sys.platform.startswith("java") and description == "NaN with payload"): # Test round-tripping canonical bson. self.assertEqual(encode_bson(decoded_bson), cB, description) self.assertJsonEqual(encode_extjson(decoded_bson), cEJ) @@ -183,8 +174,8 @@ def run_test(self): self.assertEqual(encode_bson(decoded_json), cB) # Test round-tripping degenerate bson. - if 'degenerate_bson' in valid_case: - dB = binascii.unhexlify(valid_case['degenerate_bson'].encode('utf8')) + if "degenerate_bson" in valid_case: + dB = binascii.unhexlify(valid_case["degenerate_bson"].encode("utf8")) self.assertEqual(encode_bson(decode_bson(dB)), cB) # Test round-tripping degenerate extended json. @@ -200,53 +191,48 @@ def run_test(self): decoded_json = decode_extjson(rEJ) self.assertJsonEqual(to_relaxed_extjson(decoded_json), rEJ) - for decode_error_case in case_spec.get('decodeErrors', []): + for decode_error_case in case_spec.get("decodeErrors", []): with self.assertRaises(InvalidBSON): - decode_bson( - binascii.unhexlify(decode_error_case['bson'].encode('utf8'))) + decode_bson(binascii.unhexlify(decode_error_case["bson"].encode("utf8"))) - for parse_error_case in case_spec.get('parseErrors', []): - description = parse_error_case['description'] + for parse_error_case in case_spec.get("parseErrors", []): + description = parse_error_case["description"] if description in _NON_PARSE_ERRORS: - decode_extjson(parse_error_case['string']) + decode_extjson(parse_error_case["string"]) continue - if bson_type == '0x13': - self.assertRaises( - DecimalException, Decimal128, parse_error_case['string']) - elif bson_type == '0x00': + if bson_type == "0x13": + self.assertRaises(DecimalException, Decimal128, parse_error_case["string"]) + elif bson_type == "0x00": try: - doc = decode_extjson(parse_error_case['string']) + doc = decode_extjson(parse_error_case["string"]) # Null bytes are validated when encoding to BSON. - if 'Null' in description: + if "Null" in description: to_bson(doc) - raise AssertionError('exception not raised for test ' - 'case: ' + description) - except (ValueError, KeyError, TypeError, InvalidId, - InvalidDocument): + raise AssertionError("exception not raised for test " "case: " + description) + except (ValueError, KeyError, TypeError, InvalidId, InvalidDocument): pass - elif bson_type == '0x05': + elif bson_type == "0x05": try: - decode_extjson(parse_error_case['string']) - raise AssertionError('exception not raised for test ' - 'case: ' + description) + decode_extjson(parse_error_case["string"]) + raise AssertionError("exception not raised for test " "case: " + description) except (TypeError, ValueError): pass else: - raise AssertionError('cannot test parseErrors for type ' + - bson_type) + raise AssertionError("cannot test parseErrors for type " + bson_type) + return run_test def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) - with codecs.open(filename, encoding='utf-8') as bson_test_file: + with codecs.open(filename, encoding="utf-8") as bson_test_file: test_method = create_test(json.load(bson_test_file)) - setattr(TestBSONCorpus, 'test_' + test_suffix, test_method) + setattr(TestBSONCorpus, "test_" + test_suffix, test_method) create_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_bulk.py b/test/test_bulk.py index a895dfddc3..fae1c7e201 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -21,24 +21,27 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + remove_all_users, + rs_or_single_client_noauth, + single_client, + wait_until, +) + from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.collection import Collection from pymongo.common import partition_node -from pymongo.errors import (BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) from pymongo.operations import * from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (remove_all_users, - rs_or_single_client_noauth, - single_client, - wait_until) class BulkTestBase(IntegrationTest): @@ -58,87 +61,91 @@ def setUp(self): def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" for key, value in expected.items(): - if key == 'nModified': - self.assertEqual(value, actual['nModified']) - elif key == 'upserted': + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": expected_upserts = value - actual_upserts = actual['upserted'] + actual_upserts = actual["upserted"] self.assertEqual( - len(expected_upserts), len(actual_upserts), - 'Expected %d elements in "upserted", got %d' % ( - len(expected_upserts), len(actual_upserts))) + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) for e, a in zip(expected_upserts, actual_upserts): self.assertEqualUpsert(e, a) - elif key == 'writeErrors': + elif key == "writeErrors": expected_errors = value - actual_errors = actual['writeErrors'] + actual_errors = actual["writeErrors"] self.assertEqual( - len(expected_errors), len(actual_errors), - 'Expected %d elements in "writeErrors", got %d' % ( - len(expected_errors), len(actual_errors))) + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) for e, a in zip(expected_errors, actual_errors): self.assertEqualWriteError(e, a) else: self.assertEqual( - actual.get(key), value, - '%r value of %r does not match expected %r' % - (key, actual.get(key), value)) + actual.get(key), + value, + "%r value of %r does not match expected %r" % (key, actual.get(key), value), + ) def assertEqualUpsert(self, expected, actual): """Compare bulk.execute()['upserts'] to expected value. Like: {'index': 0, '_id': ObjectId()} """ - self.assertEqual(expected['index'], actual['index']) - if expected['_id'] == '...': + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": # Unspecified value. - self.assertTrue('_id' in actual) + self.assertTrue("_id" in actual) else: - self.assertEqual(expected['_id'], actual['_id']) + self.assertEqual(expected["_id"], actual["_id"]) def assertEqualWriteError(self, expected, actual): """Compare bulk.execute()['writeErrors'] to expected value. Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} """ - self.assertEqual(expected['index'], actual['index']) - self.assertEqual(expected['code'], actual['code']) - if expected['errmsg'] == '...': + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue('errmsg' in actual) + self.assertTrue("errmsg" in actual) else: - self.assertEqual(expected['errmsg'], actual['errmsg']) + self.assertEqual(expected["errmsg"], actual["errmsg"]) - expected_op = expected['op'].copy() - actual_op = actual['op'].copy() - if expected_op.get('_id') == '...': + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue('_id' in actual_op) - actual_op.pop('_id') - expected_op.pop('_id') + self.assertTrue("_id" in actual_op) + actual_op.pop("_id") + expected_op.pop("_id") self.assertEqual(expected_op, actual_op) class TestBulk(BulkTestBase): - def test_empty(self): self.assertRaises(InvalidOperation, self.coll.bulk_write, []) def test_insert(self): expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } result = self.coll.bulk_write([InsertOne({})]) @@ -149,14 +156,14 @@ def test_insert(self): def _test_update_many(self, update): expected = { - 'nMatched': 2, - 'nModified': 2, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -166,11 +173,11 @@ def _test_update_many(self, update): self.assertTrue(result.modified_count in (2, None)) def test_update_many(self): - self._test_update_many({'$set': {'foo': 'bar'}}) + self._test_update_many({"$set": {"foo": "bar"}}) @client_context.require_version_min(4, 1, 11) def test_update_many_pipeline(self): - self._test_update_many([{'$set': {'foo': 'bar'}}]) + self._test_update_many([{"$set": {"foo": "bar"}}]) def test_array_filters_validation(self): self.assertRaises(TypeError, UpdateMany, {}, {}, array_filters={}) @@ -178,23 +185,21 @@ def test_array_filters_validation(self): def test_array_filters_unacknowledged(self): coll = self.coll_w0 - update_one = UpdateOne( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) - update_many = UpdateMany( - {}, {'$set': {'y.$[i].b': 5}}, array_filters=[{'i.b': 1}]) + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) self.assertRaises(ConfigurationError, coll.bulk_write, [update_one]) self.assertRaises(ConfigurationError, coll.bulk_write, [update_many]) def _test_update_one(self, update): expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -205,28 +210,28 @@ def _test_update_one(self, update): self.assertTrue(result.modified_count in (1, None)) def test_update_one(self): - self._test_update_one({'$set': {'foo': 'bar'}}) + self._test_update_one({"$set": {"foo": "bar"}}) @client_context.require_version_min(4, 1, 11) def test_update_one_pipeline(self): - self._test_update_one([{'$set': {'foo': 'bar'}}]) + self._test_update_one([{"$set": {"foo": "bar"}}]) def test_replace_one(self): expected = { - 'nMatched': 1, - 'nModified': 1, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) - result = self.coll.bulk_write([ReplaceOne({}, {'foo': 'bar'})]) + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (1, None)) @@ -234,14 +239,14 @@ def test_replace_one(self): def test_remove(self): # Test removing all documents, ordered. expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 2, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } self.coll.insert_many([{}, {}]) @@ -253,14 +258,14 @@ def test_remove_one(self): # Test removing one document, empty selector. self.coll.insert_many([{}, {}]) expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 0, - 'nRemoved': 1, - 'upserted': [], - 'writeErrors': [], - 'writeConcernErrors': [] + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], } result = self.coll.bulk_write([DeleteOne({})]) @@ -271,24 +276,22 @@ def test_remove_one(self): def test_upsert(self): expected = { - 'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}] + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], } - result = self.coll.bulk_write([ReplaceOne({}, - {'foo': 'bar'}, - upsert=True)]) + result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.upserted_count) assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) - self.assertEqual(self.coll.count_documents({'foo': 'bar'}), 1) + self.assertEqual(self.coll.count_documents({"foo": "bar"}), 1) def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. @@ -311,23 +314,23 @@ def test_bulk_max_message_size(self): # Generate a list of documents such that the first batched OP_MSG is # as close as possible to the 48MB limit. docs = [ - {'_id': 1, 'l': 's' * _16_MB}, - {'_id': 2, 'l': 's' * _16_MB}, - {'_id': 3, 'l': 's' * (_16_MB - 10000)}, + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, ] # Fill in the remaining ~10000 bytes with small documents. for i in range(4, 10000): - docs.append({'_id': i}) + docs.append({"_id": i}) result = self.coll.insert_many(docs) self.assertEqual(len(docs), len(result.inserted_ids)) def test_generator_insert(self): def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} result = self.coll.insert_many(gen()) self.assertEqual(5, len(result.inserted_ids)) @@ -353,134 +356,166 @@ def test_bulk_write_invalid_arguments(self): self.coll.bulk_write([{}]) # type: ignore[list-item] def test_upsert_large(self): - big = 'a' * (client_context.max_bson_size - 37) - result = self.coll.bulk_write([ - UpdateOne({'x': 1}, {'$set': {'s': big}}, upsert=True)]) + big = "a" * (client_context.max_bson_size - 37) + result = self.coll.bulk_write([UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)]) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': '...'}]}, - result.bulk_api_result) - - self.assertEqual(1, self.coll.count_documents({'x': 1})) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, self.coll.count_documents({"x": 1})) def test_client_generated_upsert_id(self): - result = self.coll.bulk_write([ - UpdateOne({'_id': 0}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': 1}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': 2}, {'_id': 2}, upsert=True), - ]) + result = self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': 0}, - {'index': 1, '_id': 1}, - {'index': 2, '_id': 2}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) def test_upsert_uuid_standard(self): options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) coll = self.coll.with_options(codec_options=options) uuids = [uuid.uuid4() for _ in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': uuids[0]}, - {'index': 1, '_id': uuids[1]}, - {'index': 2, '_id': uuids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) def test_upsert_uuid_unspecified(self): options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) coll = self.coll.with_options(codec_options=options) uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': uuids[0]}, - {'index': 1, '_id': uuids[1]}, - {'index': 2, '_id': uuids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) def test_upsert_uuid_standard_subdocuments(self): options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) coll = self.coll.with_options(codec_options=options) - ids: list = [ - {'f': Binary(bytes(i)), 'f2': uuid.uuid4()} - for i in range(3) - ] + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': ids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': ids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': ids[2]}, {'_id': ids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) # The `Binary` values are returned as `bytes` objects. for _id in ids: - _id['f'] = bytes(_id['f']) + _id["f"] = bytes(_id["f"]) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': ids[0]}, - {'index': 1, '_id': ids[1]}, - {'index': 2, '_id': ids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) def test_single_ordered_batch(self): - result = self.coll.bulk_write([ - InsertOne({'a': 1}), - UpdateOne({'a': 1}, {'$set': {'b': 1}}), - UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), - InsertOne({'a': 3}), - DeleteOne({'a': 3}), - ]) + result = self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}]}, - result.bulk_api_result) + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) def test_single_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - InsertOne({'b': 3, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), ] try: self.coll.bulk_write(requests) @@ -491,33 +526,41 @@ def test_single_error_ordered_batch(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_multiple_error_ordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - UpdateOne({'b': 3}, {'$set': {'a': 2}}, upsert=True), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - InsertOne({'b': 4, 'a': 3}), - InsertOne({'b': 5, 'a': 1}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), ] try: @@ -529,50 +572,61 @@ def test_multiple_error_ordered_batch(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_single_unordered_batch(self): requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 1}, {'$set': {'b': 1}}), - UpdateOne({'a': 2}, {'$set': {'b': 2}}, upsert=True), - InsertOne({'a': 3}), - DeleteOne({'a': 3}), + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), ] result = self.coll.bulk_write(requests, ordered=False) self.assertEqualResponse( - {'nMatched': 1, - 'nModified': 1, - 'nUpserted': 1, - 'nInserted': 2, - 'nRemoved': 1, - 'upserted': [{'index': 2, '_id': '...'}], - 'writeErrors': [], - 'writeConcernErrors': []}, - result.bulk_api_result) + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) def test_single_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 1}}, upsert=True), - InsertOne({'b': 3, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), ] try: @@ -584,33 +638,41 @@ def test_single_error_unordered_batch(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 1, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 2}, - 'u': {'$set': {'a': 1}}, - 'multi': False, - 'upsert': True}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) def test_multiple_error_unordered_batch(self): - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) requests: list = [ - InsertOne({'b': 1, 'a': 1}), - UpdateOne({'b': 2}, {'$set': {'a': 3}}, upsert=True), - UpdateOne({'b': 3}, {'$set': {'a': 4}}, upsert=True), - UpdateOne({'b': 4}, {'$set': {'a': 3}}, upsert=True), - InsertOne({'b': 5, 'a': 2}), - InsertOne({'b': 6, 'a': 1}), + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), ] try: @@ -623,35 +685,43 @@ def test_multiple_error_unordered_batch(self): # Assume the update at index 1 runs before the update at index 3, # although the spec does not require it. Same for inserts. self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 2, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [ - {'index': 1, '_id': '...'}, - {'index': 2, '_id': '...'}], - 'writeConcernErrors': [], - 'writeErrors': [ - {'index': 3, - 'code': 11000, - 'errmsg': '...', - 'op': {'q': {'b': 4}, - 'u': {'$set': {'a': 3}}, - 'multi': False, - 'upsert': True}}, - {'index': 5, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'b': 6, 'a': 1}}]}, - result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) def test_large_inserts_ordered(self): - big = 'x' * client_context.max_bson_size + big = "x" * client_context.max_bson_size requests = [ - InsertOne({'b': 1, 'a': 1}), - InsertOne({'big': big}), - InsertOne({'b': 2, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), ] try: @@ -662,29 +732,31 @@ def test_large_inserts_ordered(self): else: self.fail("Error not raised") - self.assertEqual(1, result['nInserted']) + self.assertEqual(1, result["nInserted"]) self.coll.delete_many({}) - big = 'x' * (1024 * 1024 * 4) - write_result = self.coll.bulk_write([ - InsertOne({'a': 1, 'big': big}), - InsertOne({'a': 2, 'big': big}), - InsertOne({'a': 3, 'big': big}), - InsertOne({'a': 4, 'big': big}), - InsertOne({'a': 5, 'big': big}), - InsertOne({'a': 6, 'big': big}), - ]) + big = "x" * (1024 * 1024 * 4) + write_result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) self.assertEqual(6, write_result.inserted_count) self.assertEqual(6, self.coll.count_documents({})) def test_large_inserts_unordered(self): - big = 'x' * client_context.max_bson_size + big = "x" * client_context.max_bson_size requests = [ - InsertOne({'b': 1, 'a': 1}), - InsertOne({'big': big}), - InsertOne({'b': 2, 'a': 2}), + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), ] try: @@ -695,26 +767,28 @@ def test_large_inserts_unordered(self): else: self.fail("Error not raised") - self.assertEqual(2, details['nInserted']) + self.assertEqual(2, details["nInserted"]) self.coll.delete_many({}) - big = 'x' * (1024 * 1024 * 4) - result = self.coll.bulk_write([ - InsertOne({'a': 1, 'big': big}), - InsertOne({'a': 2, 'big': big}), - InsertOne({'a': 3, 'big': big}), - InsertOne({'a': 4, 'big': big}), - InsertOne({'a': 5, 'big': big}), - InsertOne({'a': 6, 'big': big}), - ], ordered=False) + big = "x" * (1024 * 1024 * 4) + result = self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) self.assertEqual(6, result.inserted_count) self.assertEqual(6, self.coll.count_documents({})) class BulkAuthorizationTestBase(BulkTestBase): - @classmethod @client_context.require_auth @client_context.require_no_api_version @@ -723,117 +797,112 @@ def setUpClass(cls): def setUp(self): super(BulkAuthorizationTestBase, self).setUp() - client_context.create_user( - self.db.name, 'readonly', 'pw', ['read']) + client_context.create_user(self.db.name, "readonly", "pw", ["read"]) self.db.command( - 'createRole', 'noremove', - privileges=[{ - 'actions': ['insert', 'update', 'find'], - 'resource': {'db': 'pymongo_test', 'collection': 'test'} - }], - roles=[]) - - client_context.create_user(self.db.name, 'noremove', 'pw', ['noremove']) + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) def tearDown(self): - self.db.command('dropRole', 'noremove') + self.db.command("dropRole", "noremove") remove_all_users(self.db) class TestBulkUnacknowledged(BulkTestBase): - def tearDown(self): self.coll.delete_many({}) def test_no_results_ordered_success(self): requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'a': 2}), - DeleteOne({'a': 1}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') + wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') def test_no_results_ordered_failure(self): requests: list = [ - InsertOne({'_id': 1}), - UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'_id': 2}), + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), # Fails with duplicate key error. - InsertOne({'_id': 1}), + InsertOne({"_id": 1}), # Should not be executed since the batch is ordered. - DeleteOne({'_id': 1}), + DeleteOne({"_id": 1}), ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: 3 == self.coll.count_documents({}), - 'insert 3 documents') - self.assertEqual({'_id': 1}, self.coll.find_one({'_id': 1})) + wait_until(lambda: 3 == self.coll.count_documents({}), "insert 3 documents") + self.assertEqual({"_id": 1}, self.coll.find_one({"_id": 1})) def test_no_results_unordered_success(self): requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'a': 2}), - DeleteOne({'a': 1}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') + wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') def test_no_results_unordered_failure(self): requests: list = [ - InsertOne({'_id': 1}), - UpdateOne({'_id': 3}, {'$set': {'b': 1}}, upsert=True), - InsertOne({'_id': 2}), + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), # Fails with duplicate key error. - InsertOne({'_id': 1}), + InsertOne({"_id": 1}), # Should be executed since the batch is unordered. - DeleteOne({'_id': 1}), + DeleteOne({"_id": 1}), ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), - 'insert 2 documents') - wait_until(lambda: self.coll.find_one({'_id': 1}) is None, - 'removed {"_id": 1}') + wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") + wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') class TestBulkAuthorization(BulkAuthorizationTestBase): - def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth(username='readonly', password='pw', - authSource='pymongo_test') + cli = rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) coll = cli.pymongo_test.test coll.find_one() - self.assertRaises(OperationFailure, coll.bulk_write, - [InsertOne({'x': 1})]) + self.assertRaises(OperationFailure, coll.bulk_write, [InsertOne({"x": 1})]) def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth(username='noremove', password='pw', - authSource='pymongo_test') + cli = rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) coll = cli.pymongo_test.test coll.find_one() requests = [ - InsertOne({'x': 1}), - ReplaceOne({'x': 2}, {'x': 2}, upsert=True), - DeleteMany({}), # Prohibited. - InsertOne({'x': 3}), # Never attempted. + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. ] self.assertRaises(OperationFailure, coll.bulk_write, requests) - self.assertEqual(set([1, 2]), set(self.coll.distinct('x'))) + self.assertEqual(set([1, 2]), set(self.coll.distinct("x"))) class TestBulkWriteConcern(BulkTestBase): @@ -846,8 +915,8 @@ def setUpClass(cls): cls.w = client_context.w cls.secondary = None if cls.w is not None and cls.w > 1: - for member in client_context.hello['hosts']: - if member != client_context.hello['primary']: + for member in client_context.hello["hosts"]: + if member != client_context.hello["primary"]: cls.secondary = single_client(*partition_node(member)) break @@ -862,32 +931,23 @@ def cause_wtimeout(self, requests, ordered): # Use the rsSyncApplyStop failpoint to pause replication on a # secondary which will cause a wtimeout error. - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='alwaysOn') + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") try: - coll = self.coll.with_options( - write_concern=WriteConcern(w=self.w, wtimeout=1)) + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) return coll.bulk_write(requests, ordered=ordered) finally: - self.secondary.admin.command('configureFailPoint', - 'rsSyncApplyStop', - mode='off') + self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_ordered(self): # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) - result = coll_ww.bulk_write([ - DeleteOne({"something": "that does no exist"})]) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) self.assertTrue(result.acknowledged) - requests = [ - InsertOne({'a': 1}), - InsertOne({'a': 2}) - ] + requests = [InsertOne({"a": 1}), InsertOne({"a": 2})] # Replication wtimeout is a 'soft' error. # It shouldn't stop batch processing. try: @@ -899,34 +959,37 @@ def test_write_concern_failure_ordered(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 0, - 'nInserted': 2, - 'nRemoved': 0, - 'upserted': [], - 'writeErrors': []}, - details) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details['writeConcernErrors']) > 0) + self.assertTrue(len(details["writeConcernErrors"]) > 0) - failed = details['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) # Fail due to write concern support as well # as duplicate key error on ordered batch. requests = [ - InsertOne({'a': 1}), - ReplaceOne({'a': 3}, {'b': 1}, upsert=True), - InsertOne({'a': 1}), - InsertOne({'a': 2}), + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), ] try: self.cause_wtimeout(requests, ordered=True) @@ -937,36 +1000,36 @@ def test_write_concern_failure_ordered(self): self.fail("Error not raised") self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 1, - 'nInserted': 1, - 'nRemoved': 0, - 'upserted': [{'index': 1, '_id': '...'}], - 'writeErrors': [ - {'index': 2, - 'code': 11000, - 'errmsg': '...', - 'op': {'_id': '...', 'a': 1}}]}, - details) - - self.assertTrue(len(details['writeConcernErrors']) > 1) - failed = details['writeErrors'][0] - self.assertTrue("duplicate" in failed['errmsg']) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertTrue(len(details["writeConcernErrors"]) > 1) + failed = details["writeErrors"][0] + self.assertTrue("duplicate" in failed["errmsg"]) @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_unordered(self): # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) - result = coll_ww.bulk_write([ - DeleteOne({"something": "that does no exist"})], ordered=False) + result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})], ordered=False) self.assertTrue(result.acknowledged) requests = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), - InsertOne({'a': 2}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), ] # Replication wtimeout is a 'soft' error. # It shouldn't stop batch processing. @@ -978,24 +1041,24 @@ def test_write_concern_failure_unordered(self): else: self.fail("Error not raised") - self.assertEqual(2, details['nInserted']) - self.assertEqual(1, details['nUpserted']) - self.assertEqual(0, len(details['writeErrors'])) + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details['writeConcernErrors']) > 1) + self.assertTrue(len(details["writeConcernErrors"]) > 1) self.coll.delete_many({}) - self.coll.create_index('a', unique=True) - self.addCleanup(self.coll.drop_index, [('a', 1)]) + self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) # Fail due to write concern support as well # as duplicate key error on unordered batch. requests: list = [ - InsertOne({'a': 1}), - UpdateOne({'a': 3}, {'$set': {'a': 3, 'b': 1}}, upsert=True), - InsertOne({'a': 1}), - InsertOne({'a': 2}), + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), ] try: self.cause_wtimeout(requests, ordered=False) @@ -1005,27 +1068,27 @@ def test_write_concern_failure_unordered(self): else: self.fail("Error not raised") - self.assertEqual(2, details['nInserted']) - self.assertEqual(1, details['nUpserted']) - self.assertEqual(1, len(details['writeErrors'])) + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details['writeConcernErrors']) > 1) + self.assertTrue(len(details["writeConcernErrors"]) > 1) - failed = details['writeErrors'][0] - self.assertEqual(2, failed['index']) - self.assertEqual(11000, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) - self.assertEqual(1, failed['op']['a']) + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertEqual(1, failed["op"]["a"]) - failed = details['writeConcernErrors'][0] - self.assertEqual(64, failed['code']) - self.assertTrue(isinstance(failed['errmsg'], str)) + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) - upserts = details['upserted'] + upserts = details["upserted"] self.assertEqual(1, len(upserts)) - self.assertEqual(1, upserts[0]['index']) - self.assertTrue(upserts[0].get('_id')) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) if __name__ == "__main__": diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 655b99e801..73768fd0f6 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -14,40 +14,42 @@ """Test the change_stream module.""" -import random import os +import random import re -import sys import string +import sys import threading import time import uuid - from itertools import product from typing import no_type_check -sys.path[0:0] = [''] +sys.path[0:0] = [""] -from bson import ObjectId, SON, Timestamp, encode, json_util -from bson.binary import (ALL_UUID_REPRESENTATIONS, - Binary, - STANDARD, - PYTHON_LEGACY) +from test import IntegrationTest, client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import ( + AllowListEventListener, + EventListener, + rs_or_single_client, + wait_until, +) + +from bson import SON, ObjectId, Timestamp, encode, json_util +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument - from pymongo import MongoClient from pymongo.command_cursor import CommandCursor -from pymongo.errors import (InvalidOperation, OperationFailure, - ServerSelectionTimeoutError) +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.unified_format import generate_test_classes -from test.utils import ( - EventListener, AllowListEventListener, rs_or_single_client, wait_until) - class TestChangeStreamBase(IntegrationTest): RUN_ON_LOAD_BALANCER = True @@ -70,7 +72,7 @@ def client_with_listener(self, *commands): def watched_collection(self, *args, **kwargs): """Return a collection that is watched by self.change_stream().""" # Construct a unique collection for each test. - collname = '.'.join(self.id().rsplit('.', 2)[1:]) + collname = ".".join(self.id().rsplit(".", 2)[1:]) return self.db.get_collection(collname, *args, **kwargs) def generate_invalidate_event(self, change_stream): @@ -81,27 +83,25 @@ def generate_unique_collnames(self, numcolls): """Generate numcolls collection names unique to a test.""" collnames = [] for idx in range(1, numcolls + 1): - collnames.append(self.id() + '_' + str(idx)) + collnames.append(self.id() + "_" + str(idx)) return collnames def get_resume_token(self, invalidate=False): """Get a resume token to use for starting a change stream.""" # Ensure targeted collection exists before starting. - coll = self.watched_collection(write_concern=WriteConcern('majority')) + coll = self.watched_collection(write_concern=WriteConcern("majority")) coll.insert_one({}) if invalidate: - with self.change_stream( - [{'$match': {'operationType': 'invalidate'}}]) as cs: + with self.change_stream([{"$match": {"operationType": "invalidate"}}]) as cs: if isinstance(cs._target, MongoClient): - self.skipTest( - "cluster-level change streams cannot be invalidated") + self.skipTest("cluster-level change streams cannot be invalidated") self.generate_invalidate_event(cs) - return cs.next()['_id'] + return cs.next()["_id"] else: with self.change_stream() as cs: - coll.insert_one({'data': 1}) - return cs.next()['_id'] + coll.insert_one({"data": 1}) + return cs.next()["_id"] def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most @@ -125,18 +125,18 @@ class APITestsMixin(object): @no_type_check def test_watch(self): with self.change_stream( - [{'$project': {'foo': 0}}], full_document='updateLookup', - max_await_time_ms=1000, batch_size=100) as change_stream: - self.assertEqual([{'$project': {'foo': 0}}], - change_stream._pipeline) - self.assertEqual('updateLookup', change_stream._full_document) + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) - self.assertEqual( - 1000, change_stream._cursor._CommandCursor__max_await_time_ms) - self.watched_collection( - write_concern=WriteConcern("majority")).insert_one({}) + self.assertEqual(1000, change_stream._cursor._CommandCursor__max_await_time_ms) + self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) _ = change_stream.next() resume_token = change_stream.resume_token with self.assertRaises(TypeError): @@ -150,37 +150,33 @@ def test_watch(self): @no_type_check def test_try_next(self): # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() coll.insert_one({}) self.addCleanup(coll.drop) with self.change_stream(max_await_time_ms=250) as stream: - self.assertIsNone(stream.try_next()) # No changes initially. - coll.insert_one({}) # Generate a change. + self.assertIsNone(stream.try_next()) # No changes initially. + coll.insert_one({}) # Generate a change. # On sharded clusters, even majority-committed changes only show # up once an event that sorts after it shows up on the other # shard. So, we wait on try_next to eventually return changes. - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") + wait_until(lambda: stream.try_next() is not None, "get change from try_next") @no_type_check def test_try_next_runs_one_getmore(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. - client.admin.command('ping') + client.admin.command("ping") listener.results.clear() # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) self.addCleanup(coll.drop) - with self.change_stream_with_client( - client, max_await_time_ms=250) as stream: + with self.change_stream_with_client(client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) listener.results.clear() @@ -194,9 +190,8 @@ def test_try_next_runs_one_getmore(self): listener.results.clear() # Get at least one change before resuming. - coll.insert_one({'_id': 2}) - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") + coll.insert_one({"_id": 2}) + wait_until(lambda: stream.try_next() is not None, "get change from try_next") listener.results.clear() # Cause the next request to initiate the resume process. @@ -208,16 +203,13 @@ def test_try_next_runs_one_getmore(self): # - resume with aggregate command # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) - self.assertEqual( - listener.started_command_names(), ["getMore", "aggregate"]) + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) listener.results.clear() # Stream still works after a resume. - coll.insert_one({'_id': 3}) - wait_until(lambda: stream.try_next() is not None, - "get change from try_next") - self.assertEqual(set(listener.started_command_names()), - set(["getMore"])) + coll.insert_one({"_id": 3}) + wait_until(lambda: stream.try_next() is not None, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), set(["getMore"])) self.assertIsNone(stream.try_next()) @no_type_check @@ -225,27 +217,25 @@ def test_batch_size_is_honored(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. - client.admin.command('ping') + client.admin.command("ping") listener.results.clear() # ChangeStreams only read majority committed data so use w:majority. - coll = self.watched_collection().with_options( - write_concern=WriteConcern("majority")) + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() # Create the watched collection before starting the change stream to # skip any "create" events. - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) self.addCleanup(coll.drop) # Expected batchSize. - expected = {'batchSize': 23} - with self.change_stream_with_client( - client, max_await_time_ms=250, batch_size=23) as stream: + expected = {"batchSize": 23} + with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. - cmd = listener.results['started'][0].command - self.assertEqual(cmd['cursor'], expected) + cmd = listener.results["started"][0].command + self.assertEqual(cmd["cursor"], expected) listener.results.clear() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) - cmd = listener.results['started'][0].command + cmd = listener.results["started"][0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) @@ -255,8 +245,7 @@ def test_batch_size_is_honored(self): def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() - coll = self.watched_collection( - write_concern=WriteConcern("majority")) + coll = self.watched_collection(write_concern=WriteConcern("majority")) ndocs = 3 coll.insert_many([{"data": i} for i in range(ndocs)]) @@ -268,17 +257,16 @@ def test_start_at_operation_time(self): def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") results = listener.results - with self.change_stream_with_client( - client, [{'$project': {'foo': 0}}]) as _: + with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: pass - self.assertEqual(1, len(results['started'])) - command = results['started'][0] - self.assertEqual('aggregate', command.command_name) - self.assertEqual([ - {'$changeStream': expected_cs_stage}, - {'$project': {'foo': 0}}], - command.command['pipeline']) + self.assertEqual(1, len(results["started"])) + command = results["started"][0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) @no_type_check def test_full_pipeline(self): @@ -291,11 +279,10 @@ def test_full_pipeline(self): def test_iteration(self): with self.change_stream(batch_size=2) as change_stream: num_inserted = 10 - self.watched_collection().insert_many( - [{} for _ in range(num_inserted)]) + self.watched_collection().insert_many([{} for _ in range(num_inserted)]) inserts_received = 0 for change in change_stream: - self.assertEqual(change['operationType'], 'insert') + self.assertEqual(change["operationType"], "insert") inserts_received += 1 if inserts_received == num_inserted: break @@ -303,10 +290,9 @@ def test_iteration(self): @no_type_check def _test_next_blocks(self, change_stream): - inserted_doc = {'_id': ObjectId()} + inserted_doc = {"_id": ObjectId()} changes = [] - t = threading.Thread( - target=lambda: changes.append(change_stream.next())) + t = threading.Thread(target=lambda: changes.append(change_stream.next())) t.start() # Sleep for a bit to prove that the call to next() blocks. time.sleep(1) @@ -318,8 +304,8 @@ def _test_next_blocks(self, change_stream): t.join(30) self.assertFalse(t.is_alive()) self.assertEqual(1, len(changes)) - self.assertEqual(changes[0]['operationType'], 'insert') - self.assertEqual(changes[0]['fullDocument'], inserted_doc) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) @no_type_check def test_next_blocks(self): @@ -332,7 +318,8 @@ def test_next_blocks(self): def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( - [{'$changeStream': {}}], maxAwaitTimeMS=250) as change_stream: + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: self._test_next_blocks(change_stream) @no_type_check @@ -340,9 +327,11 @@ def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" # Use a short await time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: + def iterate_cursor(): for _ in change_stream: pass + t = threading.Thread(target=iterate_cursor) t.start() self.watched_collection().insert_one({}) @@ -353,10 +342,9 @@ def iterate_cursor(): @no_type_check def test_unknown_full_document(self): - """Must rely on the server to raise an error on unknown fullDocument. - """ + """Must rely on the server to raise an error on unknown fullDocument.""" try: - with self.change_stream(full_document='notValidatedByPyMongo'): + with self.change_stream(full_document="notValidatedByPyMongo"): pass except OperationFailure: pass @@ -364,47 +352,46 @@ def test_unknown_full_document(self): @no_type_check def test_change_operations(self): """Test each operation type.""" - expected_ns = {'db': self.watched_collection().database.name, - 'coll': self.watched_collection().name} + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } with self.change_stream() as change_stream: # Insert. - inserted_doc = {'_id': ObjectId(), 'foo': 'bar'} + inserted_doc = {"_id": ObjectId(), "foo": "bar"} self.watched_collection().insert_one(inserted_doc) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], expected_ns) - self.assertEqual(change['fullDocument'], inserted_doc) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) # Update. - update_spec = {'$set': {'new': 1}, '$unset': {'foo': 1}} + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} self.watched_collection().update_one(inserted_doc, update_spec) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'update') - self.assertEqual(change['ns'], expected_ns) - self.assertNotIn('fullDocument', change) - - expected_update_description = { - 'updatedFields': {'new': 1}, - 'removedFields': ['foo']} + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} if client_context.version.at_least(4, 5, 0): - expected_update_description['truncatedArrays'] = [] - self.assertEqual(expected_update_description, - change['updateDescription']) + expected_update_description["truncatedArrays"] = [] + self.assertEqual(expected_update_description, change["updateDescription"]) # Replace. - self.watched_collection().replace_one({'new': 1}, {'foo': 'bar'}) + self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'replace') - self.assertEqual(change['ns'], expected_ns) - self.assertEqual(change['fullDocument'], inserted_doc) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) # Delete. - self.watched_collection().delete_one({'foo': 'bar'}) + self.watched_collection().delete_one({"foo": "bar"}) change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'delete') - self.assertEqual(change['ns'], expected_ns) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) # Invalidate. self._test_get_invalidate_event(change_stream) @@ -419,30 +406,29 @@ def test_start_after(self): # start_after can resume after invalidate. with self.change_stream(start_after=resume_token) as change_stream: - self.watched_collection().insert_one({'_id': 2}) + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) @no_type_check @client_context.require_version_min(4, 1, 1) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) - with self.change_stream(start_after=resume_token, - max_await_time_ms=250) as change_stream: - self.watched_collection().insert_one({'_id': 2}) + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) - self.watched_collection().insert_one({'_id': 3}) + self.watched_collection().insert_one({"_id": 3}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 3}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) @no_type_check @client_context.require_no_mongos # Remove after SERVER-41196 @@ -450,15 +436,14 @@ def test_start_after_resume_process_with_changes(self): def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) - with self.change_stream(start_after=resume_token, - max_await_time_ms=250) as change_stream: + with self.change_stream(start_after=resume_token, max_await_time_ms=250) as change_stream: self.assertIsNone(change_stream.try_next()) self.kill_change_stream_cursor(change_stream) - self.watched_collection().insert_one({'_id': 2}) + self.watched_collection().insert_one({"_id": 2}) change = change_stream.next() - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['fullDocument'], {'_id': 2}) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) class ProseSpecTestsMixin(object): @@ -471,46 +456,42 @@ def _client_with_listener(self, *commands): @no_type_check def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): - self.watched_collection().insert_many( - [{"data": k} for k in range(batch_size)]) + self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) for _ in range(batch_size): change = next(change_stream) return change - def _get_expected_resume_token_legacy(self, stream, - listener, previous_change=None): + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None.""" if previous_change is None: - agg_cmd = listener.results['started'][0] + agg_cmd = listener.results["started"][0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") - return previous_change['_id'] + return previous_change["_id"] - def _get_expected_resume_token(self, stream, listener, - previous_change=None): + def _get_expected_resume_token(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes listener is a AllowListEventListener that listens for aggregate and getMore commands.""" if previous_change is None or stream._cursor._has_next(): - token = self._get_expected_resume_token_legacy( - stream, listener, previous_change) + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) if token is not None: return token - response = listener.results['succeeded'][-1].reply - return response['cursor']['postBatchResumeToken'] + response = listener.results["succeeded"][-1].reply + return response["cursor"]["postBatchResumeToken"] @no_type_check def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ - with self.change_stream([{'$project': {'_id': 0}}]) as change_stream: + with self.change_stream([{"$project": {"_id": 0}}]) as change_stream: self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) @@ -522,17 +503,17 @@ def _test_raises_error_on_missing_id(self, expected_exception): def _test_update_resume_token(self, expected_rt_getter): """ChangeStream must continuously track the last seen resumeToken.""" client, listener = self._client_with_listener("aggregate", "getMore") - coll = self.watched_collection(write_concern=WriteConcern('majority')) + coll = self.watched_collection(write_concern=WriteConcern("majority")) with self.change_stream_with_client(client) as change_stream: self.assertEqual( - change_stream.resume_token, - expected_rt_getter(change_stream, listener)) + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) for _ in range(3): coll.insert_one({}) change = next(change_stream) self.assertEqual( - change_stream.resume_token, - expected_rt_getter(change_stream, listener, change)) + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) # Prose test no. 1 @client_context.require_version_min(4, 0, 7) @@ -561,18 +542,17 @@ def test_raises_error_on_missing_id_418minus(self): @no_type_check def test_resume_on_error(self): with self.change_stream() as change_stream: - self.insert_one_and_check(change_stream, {'_id': 1}) + self.insert_one_and_check(change_stream, {"_id": 1}) # Cause a cursor not found error on the next getMore. self.kill_change_stream_cursor(change_stream) - self.insert_one_and_check(change_stream, {'_id': 2}) + self.insert_one_and_check(change_stream, {"_id": 2}) # Prose test no. 4 @no_type_check @client_context.require_failCommand_fail_point def test_no_resume_attempt_if_aggregate_command_fails(self): # Set non-retryable error on aggregate command. - fail_point = {'mode': {'times': 1}, - 'data': {'errorCode': 2, 'failCommands': ['aggregate']}} + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} client, listener = self._client_with_listener("aggregate", "getMore") with self.fail_point(fail_point): try: @@ -581,9 +561,8 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): pass # Driver should have attempted aggregate command only once. - self.assertEqual(len(listener.results['started']), 1) - self.assertEqual(listener.results['started'][0].command_name, - 'aggregate') + self.assertEqual(len(listener.results["started"]), 1) + self.assertEqual(listener.results["started"][0].command_name, "aggregate") # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED @@ -607,14 +586,15 @@ def test_initial_empty_batch(self): @no_type_check def test_kill_cursors(self): def raise_error(): - raise ServerSelectionTimeoutError('mock error') + raise ServerSelectionTimeoutError("mock error") + with self.change_stream() as change_stream: - self.insert_one_and_check(change_stream, {'_id': 1}) + self.insert_one_and_check(change_stream, {"_id": 1}) # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor self.kill_change_stream_cursor(change_stream) cursor.close = raise_error - self.insert_one_and_check(change_stream, {'_id': 2}) + self.insert_one_and_check(change_stream, {"_id": 2}) # Prose test no. 9 @no_type_check @@ -626,21 +606,21 @@ def test_start_at_operation_time_caching(self): with self.change_stream_with_client(client) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results['started'][-1].command - self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get( - "startAtOperationTime")) + cmd = listener.results["started"][-1].command + self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) # Case 2: change stream started with startAtOperationTime listener.results.clear() optime = self.get_start_at_operation_time() - with self.change_stream_with_client( - client, start_at_operation_time=optime) as cs: + with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results['started'][-1].command - self.assertEqual(cmd["pipeline"][0]["$changeStream"].get( - "startAtOperationTime"), optime, str([k.command for k in - listener.results['started']])) + cmd = listener.results["started"][-1].command + self.assertEqual( + cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), + optime, + str([k.command for k in listener.results["started"]]), + ) # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. @@ -654,9 +634,8 @@ def test_resumetoken_empty_batch(self): self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token - response = listener.results['succeeded'][0].reply - self.assertEqual(resume_token, - response["cursor"]["postBatchResumeToken"]) + response = listener.results["succeeded"][0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 @no_type_check @@ -667,9 +646,8 @@ def test_resumetoken_exhausted_batch(self): self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token - response = listener.results['succeeded'][-1].reply - self.assertEqual(resume_token, - response["cursor"]["postBatchResumeToken"]) + response = listener.results["succeeded"][-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 @no_type_check @@ -696,7 +674,7 @@ def test_resumetoken_exhausted_batch_legacy(self): with self.change_stream() as change_stream: change = self._populate_and_exhaust_change_stream(change_stream) self.assertEqual(change_stream.resume_token, change["_id"]) - resume_point = change['_id'] + resume_point = change["_id"] # Resume token is _id of last change even if resumeAfter is specified. with self.change_stream(resume_after=resume_point) as change_stream: @@ -709,9 +687,9 @@ def test_resumetoken_partially_iterated_batch(self): # When batch has been iterated up to but not including the last element. # Resume token should be _id of previous change document. with self.change_stream() as change_stream: - self.watched_collection( - write_concern=WriteConcern('majority')).insert_many( - [{"data": k} for k in range(3)]) + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) for _ in range(2): change = next(change_stream) resume_token = change_stream.resume_token @@ -725,13 +703,12 @@ def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): resume_point = self.get_resume_token() # Insert some documents so that firstBatch isn't empty. - self.watched_collection( - write_concern=WriteConcern("majority")).insert_many( - [{'a': 1}, {'b': 2}, {'c': 3}]) + self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) # Resume token should be same as the resume option. - with self.change_stream( - **{resume_option: resume_point}) as change_stream: + with self.change_stream(**{resume_option: resume_point}) as change_stream: self.assertTrue(change_stream._cursor._has_next()) resume_token = change_stream.resume_token self.assertEqual(resume_token, resume_point) @@ -757,18 +734,15 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") - with self.change_stream_with_client( - client, start_after=resume_point) as change_stream: + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes - change_stream.try_next() # No changes + change_stream.try_next() # No changes self.kill_change_stream_cursor(change_stream) - change_stream.try_next() # Resume attempt + change_stream.try_next() # Resume attempt - response = listener.results['started'][-1] - self.assertIsNone( - response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) - self.assertIsNotNone( - response.command["pipeline"][0]["$changeStream"].get("startAfter")) + response = listener.results["started"][-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 @no_type_check @@ -778,19 +752,16 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): resume_point = self.get_resume_token() client, listener = self._client_with_listener("aggregate") - with self.change_stream_with_client( - client, start_after=resume_point) as change_stream: + with self.change_stream_with_client(client, start_after=resume_point) as change_stream: self.assertFalse(change_stream._cursor._has_next()) # No changes self.watched_collection().insert_one({}) - next(change_stream) # Changes + next(change_stream) # Changes self.kill_change_stream_cursor(change_stream) - change_stream.try_next() # Resume attempt + change_stream.try_next() # Resume attempt - response = listener.results['started'][-1] - self.assertIsNotNone( - response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) - self.assertIsNone( - response.command["pipeline"][0]["$changeStream"].get("startAfter")) + response = listener.results["started"][-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @@ -828,10 +799,9 @@ def _insert_and_check(self, change_stream, db, collname, doc): coll = db[collname] coll.insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], {'db': db.name, - 'coll': collname}) - self.assertEqual(change['fullDocument'], doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) def insert_one_and_check(self, change_stream, doc): db = random.choice(self.dbs) @@ -842,22 +812,20 @@ def test_simple(self): collnames = self.generate_unique_collnames(3) with self.change_stream() as change_stream: for db, collname in product(self.dbs, collnames): - self._insert_and_check( - change_stream, db, collname, {'_id': collname} - ) + self._insert_and_check(change_stream, db, collname, {"_id": collname}) def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.client.admin.aggregate( - [{'$changeStream': {'allChangesForCluster': True}}], - maxAwaitTimeMS=250) as change_stream: + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: self._test_next_blocks(change_stream) def test_full_pipeline(self): """$changeStream must be the first stage in a change stream pipeline sent to the server. """ - self._test_full_pipeline({'allChangesForCluster': True}) + self._test_full_pipeline({"allChangesForCluster": True}) class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @@ -883,22 +851,22 @@ def _test_get_invalidate_event(self, change_stream): change = change_stream.next() # 4.1+ returns "drop" events for each collection in dropped database # and a "dropDatabase" event for the database itself. - if change['operationType'] == 'drop': - self.assertTrue(change['_id']) + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) for _ in range(len(dropped_colls)): - ns = change['ns'] - self.assertEqual(ns['db'], change_stream._target.name) - self.assertIn(ns['coll'], dropped_colls) + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) change = change_stream.next() - self.assertEqual(change['operationType'], 'dropDatabase') - self.assertTrue(change['_id']) - self.assertEqual(change['ns'], {'db': change_stream._target.name}) + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) # Get next change. change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'invalidate') - self.assertNotIn('ns', change) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() @@ -908,10 +876,9 @@ def _test_invalidate_stops_iteration(self, change_stream): change_stream._client.drop_database(self.db.name) # Check drop and dropDatabase events. for change in change_stream: - self.assertIn(change['operationType'], ( - 'drop', 'dropDatabase', 'invalidate')) + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) # Last change must be invalidate. - self.assertEqual(change['operationType'], 'invalidate') + self.assertEqual(change["operationType"], "invalidate") # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() @@ -922,10 +889,9 @@ def _insert_and_check(self, change_stream, collname, doc): coll = self.db[collname] coll.insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual(change['ns'], {'db': self.db.name, - 'coll': collname}) - self.assertEqual(change['fullDocument'], doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) def insert_one_and_check(self, change_stream, doc): self._insert_and_check(change_stream, self.id(), doc) @@ -935,26 +901,21 @@ def test_simple(self): with self.change_stream() as change_stream: for collname in collnames: self._insert_and_check( - change_stream, collname, - {'_id': Binary.from_uuid(uuid.uuid4())}) + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) def test_isolation(self): # Ensure inserts to other dbs don't show up in our ChangeStream. other_db = self.client.pymongo_test_temp - self.assertNotEqual( - other_db, self.db, msg="Isolation must be tested on separate DBs") + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") collname = self.id() with self.change_stream() as change_stream: - other_db[collname].insert_one( - {'_id': Binary.from_uuid(uuid.uuid4())}) - self._insert_and_check( - change_stream, collname, - {'_id': Binary.from_uuid(uuid.uuid4())}) + other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + self._insert_and_check(change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())}) self.client.drop_database(other_db) -class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, - ProseSpecTestsMixin): +class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod @client_context.require_version_min(3, 5, 11) @client_context.require_no_mmap @@ -968,8 +929,11 @@ def setUp(self): self.watched_collection().insert_one({}) def change_stream_with_client(self, client, *args, **kwargs): - return client[self.db.name].get_collection( - self.watched_collection().name).watch(*args, **kwargs) + return ( + client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) def generate_invalidate_event(self, change_stream): # Dropping the collection invalidates the change stream. @@ -979,9 +943,9 @@ def _test_invalidate_stops_iteration(self, change_stream): self.generate_invalidate_event(change_stream) # Check drop and dropDatabase events. for change in change_stream: - self.assertIn(change['operationType'], ('drop', 'invalidate')) + self.assertIn(change["operationType"], ("drop", "invalidate")) # Last change must be invalidate. - self.assertEqual(change['operationType'], 'invalidate') + self.assertEqual(change["operationType"], "invalidate") # Change stream must not allow further iteration. with self.assertRaises(StopIteration): change_stream.next() @@ -993,17 +957,18 @@ def _test_get_invalidate_event(self, change_stream): change_stream._target.drop() change = change_stream.next() # 4.1+ returns a "drop" change document. - if change['operationType'] == 'drop': - self.assertTrue(change['_id']) - self.assertEqual(change['ns'], { - 'db': change_stream._target.database.name, - 'coll': change_stream._target.name}) + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) # Last change should be invalidate. change = change_stream.next() - self.assertTrue(change['_id']) - self.assertEqual(change['operationType'], 'invalidate') - self.assertNotIn('ns', change) - self.assertNotIn('fullDocument', change) + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) # The ChangeStream should be dead. with self.assertRaises(StopIteration): change_stream.next() @@ -1011,38 +976,36 @@ def _test_get_invalidate_event(self, change_stream): def insert_one_and_check(self, change_stream, doc): self.watched_collection().insert_one(doc) change = next(change_stream) - self.assertEqual(change['operationType'], 'insert') + self.assertEqual(change["operationType"], "insert") self.assertEqual( - change['ns'], {'db': self.watched_collection().database.name, - 'coll': self.watched_collection().name}) - self.assertEqual(change['fullDocument'], doc) + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) def test_raw(self): """Test with RawBSONDocument.""" - raw_coll = self.watched_collection( - codec_options=DEFAULT_RAW_BSON_OPTIONS) + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) with raw_coll.watch() as change_stream: - raw_doc = RawBSONDocument(encode({'_id': 1})) + raw_doc = RawBSONDocument(encode({"_id": 1})) self.watched_collection().insert_one(raw_doc) change = next(change_stream) self.assertIsInstance(change, RawBSONDocument) - self.assertEqual(change['operationType'], 'insert') - self.assertEqual( - change['ns']['db'], self.watched_collection().database.name) - self.assertEqual( - change['ns']['coll'], self.watched_collection().name) - self.assertEqual(change['fullDocument'], raw_doc) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" for uuid_representation in ALL_UUID_REPRESENTATIONS: for id_subtype in (STANDARD, PYTHON_LEGACY): options = self.watched_collection().codec_options.with_options( - uuid_representation=uuid_representation) + uuid_representation=uuid_representation + ) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: - coll.insert_one( - {'_id': Binary(uuid.uuid4().bytes, id_subtype)}) + coll.insert_one({"_id": Binary(uuid.uuid4().bytes, id_subtype)}) _ = change_stream.next() resume_token = change_stream.resume_token @@ -1051,12 +1014,12 @@ def test_uuid_representations(self): def test_document_id_order(self): """Test with document _ids that need their order preserved.""" - random_keys = random.sample(string.ascii_letters, - len(string.ascii_letters)) - random_doc = {'_id': SON([(key, key) for key in random_keys])} + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} for document_class in (dict, SON, RawBSONDocument): options = self.watched_collection().codec_options.with_options( - document_class=document_class) + document_class=document_class + ) coll = self.watched_collection(codec_options=options) with coll.watch() as change_stream: coll.insert_one(random_doc) @@ -1072,12 +1035,12 @@ def test_document_id_order(self): def test_read_concern(self): """Test readConcern is not validated by the driver.""" # Read concern 'local' is not allowed for $changeStream. - coll = self.watched_collection(read_concern=ReadConcern('local')) + coll = self.watched_collection(read_concern=ReadConcern("local")) with self.assertRaises(OperationFailure): coll.watch() # Does not error. - coll = self.watched_collection(read_concern=ReadConcern('majority')) + coll = self.watched_collection(read_concern=ReadConcern("majority")) with coll.watch(): pass @@ -1103,10 +1066,13 @@ def setUp(self): self.listener.results.clear() def setUpCluster(self, scenario_dict): - assets = [(scenario_dict["database_name"], - scenario_dict["collection_name"]), - (scenario_dict.get("database2_name", "db2"), - scenario_dict.get("collection2_name", "coll2"))] + assets = [ + (scenario_dict["database_name"], scenario_dict["collection_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), + ] for db, coll in assets: self.client.drop_database(db) self.client[db].create_collection(coll) @@ -1118,12 +1084,15 @@ def setFailPoint(self, scenario_dict): elif not client_context.test_commands_enabled: self.skipTest("Test commands must be enabled") - fail_cmd = SON([('configureFailPoint', 'failCommand')]) + fail_cmd = SON([("configureFailPoint", "failCommand")]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) self.addCleanup( client_context.client.admin.command, - 'configureFailPoint', fail_cmd['configureFailPoint'], mode='off') + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) def assert_list_contents_are_subset(self, superlist, sublist): """Check that each element in sublist is a subset of the corresponding @@ -1143,7 +1112,7 @@ def assert_dict_is_subset(self, superdict, subdict): exempt_fields = ["documentKey", "_id", "getMore"] for key, value in subdict.items(): if key not in superdict: - self.fail('Key %s not found in %s' % (key, superdict)) + self.fail("Key %s not found in %s" % (key, superdict)) if isinstance(value, dict): self.assert_dict_is_subset(superdict[key], value) continue @@ -1169,14 +1138,13 @@ def tearDown(self): self.listener.results.clear() -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'change_streams') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() def get_change_stream(client, scenario_def, test): @@ -1207,12 +1175,11 @@ def run_operation(client, operation): # Apply specified operations opname = camel_to_snake(operation["name"]) arguments = operation.get("arguments", {}) - if opname == 'rename': + if opname == "rename": # Special case for rename operation. - arguments = {'new_name': arguments["to"]} - cmd = getattr(client.get_database( - operation["database"]).get_collection( - operation["collection"]), opname + arguments = {"new_name": arguments["to"]} + cmd = getattr( + client.get_database(operation["database"]).get_collection(operation["collection"]), opname ) return cmd(**arguments) @@ -1224,15 +1191,12 @@ def run_scenario(self): self.setFailPoint(test) is_error = test["result"].get("error", False) try: - with get_change_stream( - self.client, scenario_def, test - ) as change_stream: + with get_change_stream(self.client, scenario_def, test) as change_stream: for operation in test["operations"]: # Run specified operations run_operation(self.client, operation) num_expected_changes = len(test["result"].get("success", [])) - changes = [ - change_stream.next() for _ in range(num_expected_changes)] + changes = [change_stream.next() for _ in range(num_expected_changes)] # Run a next() to induce an error if one is expected and # there are no changes. if is_error and not changes: @@ -1266,7 +1230,7 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')): + for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): dirname = os.path.split(dirpath)[-1] for filename in filenames: @@ -1275,31 +1239,25 @@ def create_tests(): test_type = os.path.splitext(filename)[0] - for test in scenario_def['tests']: + for test in scenario_def["tests"]: new_test = create_test(scenario_def, test) new_test = client_context.require_no_mmap(new_test) - if 'minServerVersion' in test: - min_ver = tuple( - int(elt) for - elt in test['minServerVersion'].split('.')) - new_test = client_context.require_version_min(*min_ver)( - new_test) - if 'maxServerVersion' in test: - max_ver = tuple( - int(elt) for - elt in test['maxServerVersion'].split('.')) - new_test = client_context.require_version_max(*max_ver)( - new_test) - - topologies = test['topology'] - new_test = client_context.require_cluster_type(topologies)( - new_test) - - test_name = 'test_%s_%s_%s' % ( + if "minServerVersion" in test: + min_ver = tuple(int(elt) for elt in test["minServerVersion"].split(".")) + new_test = client_context.require_version_min(*min_ver)(new_test) + if "maxServerVersion" in test: + max_ver = tuple(int(elt) for elt in test["maxServerVersion"].split(".")) + new_test = client_context.require_version_max(*max_ver)(new_test) + + topologies = test["topology"] + new_test = client_context.require_cluster_type(topologies)(new_test) + + test_name = "test_%s_%s_%s" % ( dirname, test_type.replace("-", "_"), - str(test['description'].replace(" ", "_"))) + str(test["description"].replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestAllLegacyScenarios, new_test.__name__, new_test) @@ -1308,10 +1266,13 @@ def create_tests(): create_tests() -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'unified'), - module=__name__,)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 9ca9989052..0487161b1e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -14,6 +14,7 @@ """Test the mongo_client module.""" +import _thread as thread import contextlib import copy import datetime @@ -23,108 +24,116 @@ import socket import struct import sys -import time -import _thread as thread import threading +import time import warnings - -from typing import no_type_check, Type +from typing import Type, no_type_check sys.path[0:0] = [""] +from test import ( + HAVE_IPADDRESS, + IntegrationTest, + MockClientTest, + SkipTest, + client_context, + client_knobs, + db_pwd, + db_user, + unittest, +) +from test.pymongo_mocks import MockClient +from test.utils import ( + NTHREADS, + CMAPListener, + FunctionCallRecorder, + assertRaisesExactly, + connected, + delay, + get_pool, + gevent_monkey_patched, + is_greenthread_patched, + lazy_client_trial, + one, + remove_all_users, + rs_client, + rs_or_single_client, + rs_or_single_client_noauth, + single_client, + wait_until, +) + +import pymongo from bson import encode from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry from bson.son import SON from bson.tz_util import utc -import pymongo from pymongo import event_loggers, message, monitoring from pymongo.client_options import ClientOptions from pymongo.command_cursor import CommandCursor -from pymongo.common import CONNECT_TIMEOUT, _UUID_REPRESENTATIONS +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD from pymongo.cursor import Cursor, CursorType from pymongo.database import Database from pymongo.driver_info import DriverInfo -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure, - InvalidName, - InvalidURI, - NetworkTimeout, - OperationFailure, - ServerSelectionTimeoutError, - WriteConcernError, - InvalidOperation) +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + InvalidName, + InvalidOperation, + InvalidURI, + NetworkTimeout, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient -from pymongo.monitoring import (ServerHeartbeatListener, - ServerHeartbeatStartedEvent) -from pymongo.pool import SocketInfo, _METADATA, PoolOptions +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.pool import _METADATA, PoolOptions, SocketInfo from pymongo.read_preferences import ReadPreference from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (readable_server_selector, - writable_server_selector) +from pymongo.server_selectors import readable_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.topology import _ErrorContext -from pymongo.topology_description import TopologyDescription, _updated_topology_description_srv_polling +from pymongo.topology_description import ( + TopologyDescription, + _updated_topology_description_srv_polling, +) from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - SkipTest, - unittest, - IntegrationTest, - db_pwd, - db_user, - MockClientTest, - HAVE_IPADDRESS) -from test.pymongo_mocks import MockClient -from test.utils import (assertRaisesExactly, - connected, - CMAPListener, - delay, - FunctionCallRecorder, - get_pool, - gevent_monkey_patched, - is_greenthread_patched, - lazy_client_trial, - NTHREADS, - one, - remove_all_users, - rs_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, - wait_until) class ClientUnitTest(unittest.TestCase): """MongoClient tests that don't require a server.""" + client: MongoClient @classmethod @client_context.require_connection def setUpClass(cls): - cls.client = rs_or_single_client(connect=False, - serverSelectionTimeoutMS=100) + cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) @classmethod def tearDownClass(cls): cls.client.close() def test_keyword_arg_defaults(self): - client = MongoClient(socketTimeoutMS=None, - connectTimeoutMS=20000, - waitQueueTimeoutMS=None, - replicaSet=None, - read_preference=ReadPreference.PRIMARY, - ssl=False, - tlsCertificateKeyFile=None, - tlsAllowInvalidCertificates=True, - tlsCAFile=None, - connect=False, - serverSelectionTimeoutMS=12000) + client = MongoClient( + socketTimeoutMS=None, + connectTimeoutMS=20000, + waitQueueTimeoutMS=None, + replicaSet=None, + read_preference=ReadPreference.PRIMARY, + ssl=False, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, + tlsCAFile=None, + connect=False, + serverSelectionTimeoutMS=12000, + ) options = client._MongoClient__options pool_opts = options.pool_options @@ -138,19 +147,17 @@ def test_keyword_arg_defaults(self): self.assertAlmostEqual(12, client.options.server_selection_timeout) def test_connect_timeout(self): - client = MongoClient(connect=False, connectTimeoutMS=None, - socketTimeoutMS=None) + client = MongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) pool_opts = client._MongoClient__options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = MongoClient(connect=False, connectTimeoutMS=0, - socketTimeoutMS=0) + client = MongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) pool_opts = client._MongoClient__options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) client = MongoClient( - 'mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0', - connect=False) + "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False + ) pool_opts = client._MongoClient__options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) @@ -168,18 +175,9 @@ def test_max_pool_size_zero(self): MongoClient(maxPoolSize=0) def test_uri_detection(self): - self.assertRaises( - ConfigurationError, - MongoClient, - "/foo") - self.assertRaises( - ConfigurationError, - MongoClient, - "://") - self.assertRaises( - ConfigurationError, - MongoClient, - "foo/") + self.assertRaises(ConfigurationError, MongoClient, "/foo") + self.assertRaises(ConfigurationError, MongoClient, "://") + self.assertRaises(ConfigurationError, MongoClient, "foo/") def test_get_db(self): def make_db(base, name): @@ -199,15 +197,14 @@ def make_db(base, name): def test_get_database(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - db = self.client.get_database( - 'foo', codec_options, ReadPreference.SECONDARY, write_concern) - self.assertEqual('foo', db.name) + db = self.client.get_database("foo", codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) def test_getattr(self): - self.assertTrue(isinstance(self.client['_does_not_exist'], Database)) + self.assertTrue(isinstance(self.client["_does_not_exist"], Database)) with self.assertRaises(AttributeError) as context: self.client._does_not_exist @@ -215,8 +212,7 @@ def test_getattr(self): # Message should be: # "AttributeError: MongoClient has no attribute '_does_not_exist'. To # access the _does_not_exist database, use client['_does_not_exist']". - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): def iterate(): @@ -225,108 +221,111 @@ def iterate(): self.assertRaises(TypeError, iterate) def test_get_default_database(self): - c = rs_or_single_client("mongodb://%s:%d/foo" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + c = rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_default_database()) # Test that default doesn't override the URI value. - self.assertEqual(Database(c, 'foo'), c.get_default_database('bar')) + self.assertEqual(Database(c, "foo"), c.get_default_database("bar")) codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - db = c.get_default_database( - None, codec_options, ReadPreference.SECONDARY, write_concern) - self.assertEqual('foo', db.name) + db = c.get_default_database(None, codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database('foo')) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_default_database("foo")) def test_get_default_database_error(self): # URI with no database. - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) self.assertRaises(ConfigurationError, c.get_default_database) def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % ( - client_context.host, client_context.port) + uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) c = rs_or_single_client(uri, connect=False) - self.assertEqual(Database(c, 'foo'), c.get_default_database()) + self.assertEqual(Database(c, "foo"), c.get_default_database()) def test_get_database_default(self): - c = rs_or_single_client("mongodb://%s:%d/foo" % (client_context.host, - client_context.port), - connect=False) - self.assertEqual(Database(c, 'foo'), c.get_database()) + c = rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + ) + self.assertEqual(Database(c, "foo"), c.get_database()) def test_get_database_default_error(self): # URI with no database. - c = rs_or_single_client("mongodb://%s:%d/" % (client_context.host, - client_context.port), - connect=False) + c = rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + ) self.assertRaises(ConfigurationError, c.get_database) def test_get_database_default_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % ( - client_context.host, client_context.port) + uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) c = rs_or_single_client(uri, connect=False) - self.assertEqual(Database(c, 'foo'), c.get_database()) + self.assertEqual(Database(c, "foo"), c.get_database()) def test_primary_read_pref_with_tags(self): # No tags allowed with "primary". with self.assertRaises(ConfigurationError): - MongoClient('mongodb://host/?readpreferencetags=dc:east') + MongoClient("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient('mongodb://host/?' - 'readpreference=primary&readpreferencetags=dc:east') + MongoClient("mongodb://host/?" "readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): c = rs_or_single_client( - "mongodb://host", connect=False, - readpreference=ReadPreference.NEAREST.mongos_mode) + "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode + ) self.assertEqual(c.read_preference, ReadPreference.NEAREST) def test_metadata(self): metadata = copy.deepcopy(_METADATA) - metadata['application'] = {'name': 'foobar'} - client = MongoClient( - "mongodb://foo:27017/?appname=foobar&connect=false") + metadata["application"] = {"name": "foobar"} + client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - client = MongoClient('foo', 27017, appname='foobar', connect=False) + client = MongoClient("foo", 27017, appname="foobar", connect=False) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) # No error - MongoClient(appname='x' * 128) - self.assertRaises(ValueError, MongoClient, appname='x' * 129) + MongoClient(appname="x" * 128) + self.assertRaises(ValueError, MongoClient, appname="x" * 129) # Bad "driver" options. - self.assertRaises(TypeError, DriverInfo, 'Foo', 1, 'a') - self.assertRaises(TypeError, DriverInfo, version="1", platform='a') + self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") + self.assertRaises(TypeError, DriverInfo, version="1", platform="a") self.assertRaises(TypeError, DriverInfo) self.assertRaises(TypeError, MongoClient, driver=1) - self.assertRaises(TypeError, MongoClient, driver='abc') - self.assertRaises(TypeError, MongoClient, driver=('Foo', '1', 'a')) + self.assertRaises(TypeError, MongoClient, driver="abc") + self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) # Test appending to driver info. - metadata['driver']['name'] = 'PyMongo|FooDriver' - metadata['driver']['version'] = '%s|1.2.3' % ( - _METADATA['driver']['version'],) - client = MongoClient('foo', 27017, appname='foobar', - driver=DriverInfo('FooDriver', '1.2.3', None), connect=False) + metadata["driver"]["name"] = "PyMongo|FooDriver" + metadata["driver"]["version"] = "%s|1.2.3" % (_METADATA["driver"]["version"],) + client = MongoClient( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", None), + connect=False, + ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - metadata['platform'] = '%s|FooPlatform' % ( - _METADATA['platform'],) - client = MongoClient('foo', 27017, appname='foobar', - driver=DriverInfo('FooDriver', '1.2.3', 'FooPlatform'), connect=False) + metadata["platform"] = "%s|FooPlatform" % (_METADATA["platform"],) + client = MongoClient( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), + connect=False, + ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) @@ -334,12 +333,14 @@ def test_kwargs_codec_options(self): class MyFloatType(object): def __init__(self, x): self.__x = x + @property def x(self): return self.__x class MyFloatAsIntEncoder(TypeEncoder): python_type = MyFloatType + def transform_python(self, value): return int(value) @@ -347,8 +348,8 @@ def transform_python(self, value): document_class: Type[SON] = SON type_registry = TypeRegistry([MyFloatAsIntEncoder()]) tz_aware = True - uuid_representation_label = 'javaLegacy' - unicode_decode_error_handler = 'ignore' + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" tzinfo = utc c = MongoClient( document_class=document_class, @@ -357,63 +358,62 @@ def transform_python(self, value): uuidrepresentation=uuid_representation_label, unicode_decode_error_handler=unicode_decode_error_handler, tzinfo=tzinfo, - connect=False + connect=False, ) self.assertEqual(c.codec_options.document_class, document_class) self.assertEqual(c.codec_options.type_registry, type_registry) self.assertEqual(c.codec_options.tz_aware, tz_aware) self.assertEqual( - c.codec_options.uuid_representation, - _UUID_REPRESENTATIONS[uuid_representation_label]) - self.assertEqual( - c.codec_options.unicode_decode_error_handler, - unicode_decode_error_handler) + c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual(c.codec_options.tzinfo, tzinfo) def test_uri_codec_options(self): # Ensure codec options are passed in correctly - uuid_representation_label = 'javaLegacy' - unicode_decode_error_handler = 'ignore' - uri = ("mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" - "%s&unicode_decode_error_handler=%s" % ( - client_context.host, - client_context.port, - uuid_representation_label, - unicode_decode_error_handler)) + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + uri = ( + "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" + "%s&unicode_decode_error_handler=%s" + % ( + client_context.host, + client_context.port, + uuid_representation_label, + unicode_decode_error_handler, + ) + ) c = MongoClient(uri, connect=False) self.assertEqual(c.codec_options.tz_aware, True) self.assertEqual( - c.codec_options.uuid_representation, - _UUID_REPRESENTATIONS[uuid_representation_label]) - self.assertEqual( - c.codec_options.unicode_decode_error_handler, - unicode_decode_error_handler) + c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. - uri = ("mongodb://localhost/?ssl=true&replicaSet=name" - "&readPreference=primary") - c = MongoClient(uri, ssl=False, replicaSet="newname", - readPreference="secondaryPreferred") + uri = "mongodb://localhost/?ssl=true&replicaSet=name" "&readPreference=primary" + c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") clopts = c._MongoClient__options opts = clopts._options - self.assertEqual(opts['tls'], False) + self.assertEqual(opts["tls"], False) self.assertEqual(clopts.replica_set_name, "newname") - self.assertEqual( - clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) + self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. from pymongo.srv_resolver import _resolve + patched_resolver = FunctionCallRecorder(_resolve) pymongo.srv_resolver._resolve = patched_resolver + def reset_resolver(): pymongo.srv_resolver._resolve = _resolve + self.addCleanup(reset_resolver) # Setup. @@ -427,7 +427,7 @@ def test_scenario(args, kwargs, expected_value): patched_resolver.reset() MongoClient(*args, **kwargs) for _, kw in patched_resolver.call_list(): - self.assertAlmostEqual(kw['lifetime'], expected_value) + self.assertAlmostEqual(kw["lifetime"], expected_value) # No timeout specified. test_scenario((base_uri,), {}, CONNECT_TIMEOUT) @@ -436,7 +436,7 @@ def test_scenario(args, kwargs, expected_value): test_scenario((uri_with_timeout,), {}, expected_uri_value) # Timeout only specified in keyword arguments. - kwarg = {'connectTimeoutMS': connectTimeoutMS} + kwarg = {"connectTimeoutMS": connectTimeoutMS} test_scenario((base_uri,), kwarg, expected_kw_value) # Timeout specified in both kwargs and connection string. @@ -445,23 +445,27 @@ def test_scenario(args, kwargs, expected_value): def test_uri_security_options(self): # Ensure that we don't silently override security-related options. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?ssl=true', tls=False, - connect=False) + MongoClient("mongodb://localhost/?ssl=true", tls=False, connect=False) # Matching SSL and TLS options should not cause errors. - c = MongoClient('mongodb://localhost/?ssl=false', tls=False, - connect=False) - self.assertEqual(c._MongoClient__options._options['tls'], False) + c = MongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c._MongoClient__options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, tlsAllowInvalidHostnames=True) + MongoClient( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidHostnames=True, + ) # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): - MongoClient('mongodb://localhost/?tlsInsecure=true', - connect=False, tlsAllowInvalidCertificates=False) + MongoClient( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidCertificates=False, + ) # Conflicting kwargs should raise InvalidURI with self.assertRaises(InvalidURI): @@ -470,11 +474,13 @@ def test_uri_security_options(self): def test_event_listeners(self): c = MongoClient(event_listeners=[], connect=False) self.assertEqual(c.options.event_listeners, []) - listeners = [event_loggers.CommandLogger(), - event_loggers.HeartbeatLogger(), - event_loggers.ServerLogger(), - event_loggers.TopologyLogger(), - event_loggers.ConnectionPoolLogger()] + listeners = [ + event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger(), + ] c = MongoClient(event_listeners=listeners, connect=False) self.assertEqual(c.options.event_listeners, listeners) @@ -491,16 +497,19 @@ def test_client_options(self): class TestClient(IntegrationTest): def test_multiple_uris(self): with self.assertRaises(ConfigurationError): - MongoClient(host=['mongodb+srv://cluster-a.abc12.mongodb.net', - 'mongodb+srv://cluster-b.abc12.mongodb.net', - 'mongodb+srv://cluster-c.abc12.mongodb.net']) + MongoClient( + host=[ + "mongodb+srv://cluster-a.abc12.mongodb.net", + "mongodb+srv://cluster-b.abc12.mongodb.net", + "mongodb+srv://cluster-c.abc12.mongodb.net", + ] + ) def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove sockets when maxIdleTimeMS not set client = rs_or_single_client() - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) @@ -510,89 +519,78 @@ def test_max_idle_time_reaper_default(self): def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one - client = rs_or_single_client(maxIdleTimeMS=500, - minPoolSize=1) - server = client._get_topology().select_server( - readable_server_selector) + client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, two # sockets could be created and checked into the pool. self.assertGreaterEqual(len(server._pool.sockets), 1) - wait_until(lambda: sock_info not in server._pool.sockets, - "remove stale socket") - wait_until(lambda: 1 <= len(server._pool.sockets), - "replace stale socket") + wait_until(lambda: sock_info not in server._pool.sockets, "remove stale socket") + wait_until(lambda: 1 <= len(server._pool.sockets), "replace stale socket") client.close() def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new sockets. - client = rs_or_single_client(maxIdleTimeMS=500, - minPoolSize=1, - maxPoolSize=1) - server = client._get_topology().select_server( - readable_server_selector) + client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass # When the reaper runs at the same time as the get_socket, # maxPoolSize=1 should prevent two sockets from being created. self.assertEqual(1, len(server._pool.sockets)) - wait_until(lambda: sock_info not in server._pool.sockets, - "remove stale socket") - wait_until(lambda: 1 == len(server._pool.sockets), - "replace stale socket") + wait_until(lambda: sock_info not in server._pool.sockets, "remove stale socket") + wait_until(lambda: 1 == len(server._pool.sockets), "replace stale socket") client.close() def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info_one: pass # Assert that the pool does not close sockets prematurely. - time.sleep(.300) + time.sleep(0.300) with server._pool.get_socket() as sock_info_two: pass self.assertIs(sock_info_one, sock_info_two) wait_until( lambda: 0 == len(server._pool.sockets), - "stale socket reaped and new one NOT added to the pool") + "stale socket reaped and new one NOT added to the pool", + ) client.close() def test_min_pool_size(self): - with client_knobs(kill_cursor_frequency=.1): + with client_knobs(kill_cursor_frequency=0.1): client = rs_or_single_client() - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) self.assertEqual(0, len(server._pool.sockets)) # Assert that pool started up at minPoolSize client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server( - readable_server_selector) - wait_until(lambda: 10 == len(server._pool.sockets), - "pool initialized with 10 sockets") + server = client._get_topology().select_server(readable_server_selector) + wait_until(lambda: 10 == len(server._pool.sockets), "pool initialized with 10 sockets") # Assert that if a socket is closed, a new one takes its place with server._pool.get_socket() as sock_info: sock_info.close_socket(None) - wait_until(lambda: 10 == len(server._pool.sockets), - "a closed socket gets replaced from the pool") + wait_until( + lambda: 10 == len(server._pool.sockets), + "a closed socket gets replaced from the pool", + ) self.assertFalse(sock_info in server._pool.sockets) def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) - time.sleep(1) # Sleep so that the socket becomes stale. + time.sleep(1) # Sleep so that the socket becomes stale. with server._pool.get_socket() as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) @@ -602,8 +600,7 @@ def test_max_idle_time_checkout(self): # Test that sockets are reused if maxIdleTimeMS is not set. client = rs_or_single_client() - server = client._get_topology().select_server( - readable_server_selector) + server = client._get_topology().select_server(readable_server_selector) with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) @@ -619,15 +616,14 @@ def test_constants(self): host, port = client_context.host, client_context.port kwargs: dict = client_context.default_client_options.copy() if client_context.auth_enabled: - kwargs['username'] = db_user - kwargs['password'] = db_pwd + kwargs["username"] = db_user + kwargs["password"] = db_pwd # Set bad defaults. MongoClient.HOST = "somedomainthatdoesntexist.org" MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): - connected(MongoClient(serverSelectionTimeoutMS=10, - **kwargs)) + connected(MongoClient(serverSelectionTimeoutMS=10, **kwargs)) # Override the defaults. No error. connected(MongoClient(host, port, **kwargs)) @@ -660,7 +656,7 @@ def test_init_disconnected(self): self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) self.assertIsNone(c.address) # PYTHON-2981 - c.admin.command('ping') # connect + c.admin.command("ping") # connect if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) @@ -668,66 +664,68 @@ def test_init_disconnected(self): self.assertEqual(c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" - c = MongoClient(bad_host, port, connectTimeoutMS=1, - serverSelectionTimeoutMS=10) + c = MongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = MongoClient(uri, connectTimeoutMS=1, - serverSelectionTimeoutMS=10) + c = MongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertEqual(client_context.client, c) # Explicitly test inequality self.assertFalse(client_context.client != c) - c = rs_or_single_client('invalid.com', connect=False) + c = rs_or_single_client("invalid.com", connect=False) self.addCleanup(c.close) self.assertNotEqual(client_context.client, c) self.assertTrue(client_context.client != c) # Seeds differ: - self.assertNotEqual(MongoClient('a', connect=False), - MongoClient('b', connect=False)) + self.assertNotEqual(MongoClient("a", connect=False), MongoClient("b", connect=False)) # Same seeds but out of order still compares equal: - self.assertEqual(MongoClient(['a', 'b', 'c'], connect=False), - MongoClient(['c', 'a', 'b'], connect=False)) + self.assertEqual( + MongoClient(["a", "b", "c"], connect=False), MongoClient(["c", "a", "b"], connect=False) + ) def test_hashable(self): - seed = '%s:%s' % list(self.client._topology_settings.seeds)[0] + seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertIn(c, {client_context.client}) - c = rs_or_single_client('invalid.com', connect=False) + c = rs_or_single_client("invalid.com", connect=False) self.addCleanup(c.close) self.assertNotIn(c, {client_context.client}) def test_host_w_port(self): with self.assertRaises(ValueError): - connected(MongoClient("%s:1234567" % (client_context.host,), - connectTimeoutMS=1, - serverSelectionTimeoutMS=10)) + connected( + MongoClient( + "%s:1234567" % (client_context.host,), + connectTimeoutMS=1, + serverSelectionTimeoutMS=10, + ) + ) def test_repr(self): # Used to test 'eval' below. import bson client = MongoClient( - 'mongodb://localhost:27017,localhost:27018/?replicaSet=replset' - '&connectTimeoutMS=12345&w=1&wtimeoutms=100', - connect=False, document_class=SON) + "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" + "&connectTimeoutMS=12345&w=1&wtimeoutms=100", + connect=False, + document_class=SON, + ) the_repr = repr(client) - self.assertIn('MongoClient(host=', the_repr) + self.assertIn("MongoClient(host=", the_repr) self.assertIn( - "document_class=bson.son.SON, " - "tz_aware=False, " - "connect=False, ", - the_repr) + "document_class=bson.son.SON, " "tz_aware=False, " "connect=False, ", the_repr + ) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("w=1", the_repr) @@ -735,20 +733,18 @@ def test_repr(self): self.assertEqual(eval(the_repr), client) - client = MongoClient("localhost:27017,localhost:27018", - replicaSet='replset', - connectTimeoutMS=12345, - socketTimeoutMS=None, - w=1, - wtimeoutms=100, - connect=False) + client = MongoClient( + "localhost:27017,localhost:27018", + replicaSet="replset", + connectTimeoutMS=12345, + socketTimeoutMS=None, + w=1, + wtimeoutms=100, + connect=False, + ) the_repr = repr(client) - self.assertIn('MongoClient(host=', the_repr) - self.assertIn( - "document_class=dict, " - "tz_aware=False, " - "connect=False, ", - the_repr) + self.assertIn("MongoClient(host=", the_repr) + self.assertIn("document_class=dict, " "tz_aware=False, " "connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("sockettimeoutms=None", the_repr) @@ -758,11 +754,10 @@ def test_repr(self): self.assertEqual(eval(the_repr), client) def test_getters(self): - wait_until(lambda: client_context.nodes == self.client.nodes, - "find all nodes") + wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") def test_list_databases(self): - cmd_docs = self.client.admin.command('listDatabases')['databases'] + cmd_docs = self.client.admin.command("listDatabases")["databases"] cursor = self.client.list_databases() self.assertIsInstance(cursor, CommandCursor) helper_docs = list(cursor) @@ -809,7 +804,7 @@ def test_drop_database(self): if client_context.is_rs: wc_client = rs_or_single_client(w=len(client_context.nodes) + 1) with self.assertRaises(WriteConcernError): - wc_client.drop_database('pymongo_test2') + wc_client.drop_database("pymongo_test2") self.client.drop_database(self.client.pymongo_test2) dbs = self.client.list_database_names() @@ -823,7 +818,7 @@ def test_close(self): self.assertRaises(InvalidOperation, coll.count_documents, {}) def test_close_kills_cursors(self): - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") test_client = rs_or_single_client() @@ -868,7 +863,7 @@ def test_close_stops_kill_cursors_thread(self): self.assertTrue(client._kill_cursors_executor._stopped) # Reusing the closed client should raise an InvalidOperation error. - self.assertRaises(InvalidOperation, client.admin.command, 'ping') + self.assertRaises(InvalidOperation, client.admin.command, "ping") # Thread is still stopped. self.assertTrue(client._kill_cursors_executor._stopped) @@ -882,7 +877,7 @@ def test_uri_connect_option(self): self.assertFalse(kc_thread and kc_thread.is_alive()) # Using the client should open topology and start the thread. - client.admin.command('ping') + client.admin.command("ping") self.assertTrue(client._topology._opened) kc_thread = client._kill_cursors_executor._thread self.assertTrue(kc_thread and kc_thread.is_alive()) @@ -921,16 +916,13 @@ def test_auth_from_uri(self): self.addCleanup(client_context.drop_user, "admin", "admin") self.addCleanup(remove_all_users, self.client.pymongo_test) - client_context.create_user( - "pymongo_test", "user", "pass", roles=['userAdmin', 'readWrite']) + client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) with self.assertRaises(OperationFailure): - connected(rs_or_single_client_noauth( - "mongodb://a:b@%s:%d" % (host, port))) + connected(rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) # No error. - connected(rs_or_single_client_noauth( - "mongodb://admin:pass@%s:%d" % (host, port))) + connected(rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) @@ -938,21 +930,21 @@ def test_auth_from_uri(self): connected(rs_or_single_client_noauth(uri)) # No error. - connected(rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port))) + connected( + rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) + ) # Auth with lazy connection. rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), - connect=False).pymongo_test.test.find_one() + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ).pymongo_test.test.find_one() # Wrong password. bad_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), - connect=False) + "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False + ) - self.assertRaises(OperationFailure, - bad_client.pymongo_test.test.find_one) + self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) @client_context.require_auth def test_username_and_password(self): @@ -971,26 +963,23 @@ def test_username_and_password(self): c.server_info() with self.assertRaises(OperationFailure): - rs_or_single_client_noauth( - username="ad min", password="foo").server_info() + rs_or_single_client_noauth(username="ad min", password="foo").server_info() @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), - connect=False) + "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), connect=False + ) - assertRaisesExactly( - OperationFailure, lazy_client.test.collection.find_one) + assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) @client_context.require_no_tls def test_unix_socket(self): if not hasattr(socket, "AF_UNIX"): raise SkipTest("UNIX-sockets are not supported on this system") - mongodb_socket = '/tmp/mongodb-%d.sock' % (client_context.port,) - encoded_socket = ( - '%2Ftmp%2F' + 'mongodb-%d.sock' % (client_context.port,)) + mongodb_socket = "/tmp/mongodb-%d.sock" % (client_context.port,) + encoded_socket = "%2Ftmp%2F" + "mongodb-%d.sock" % (client_context.port,) if not os.access(mongodb_socket, os.R_OK): raise SkipTest("Socket file is not accessible") @@ -1006,8 +995,9 @@ def test_unix_socket(self): # Confirm it fails with a missing socket. self.assertRaises( ConnectionFailure, - connected, MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", - serverSelectionTimeoutMS=100)) + connected, + MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100), + ) def test_document_class(self): c = self.client @@ -1029,7 +1019,8 @@ def test_timeouts(self): connectTimeoutMS=10500, socketTimeoutMS=10500, maxIdleTimeMS=10500, - serverSelectionTimeoutMS=10500) + serverSelectionTimeoutMS=10500, + ) self.assertEqual(10.5, get_pool(client).opts.connect_timeout) self.assertEqual(10.5, get_pool(client).opts.socket_timeout) self.assertEqual(10.5, get_pool(client).opts.max_idle_time_seconds) @@ -1046,14 +1037,11 @@ def test_socket_timeout_ms_validation(self): c = connected(rs_or_single_client(socketTimeoutMS=0)) self.assertEqual(None, get_pool(c).opts.socket_timeout) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=-1) + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS=1e10) + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10) - self.assertRaises(ValueError, - rs_or_single_client, socketTimeoutMS='foo') + self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS="foo") def test_socket_timeout(self): no_timeout = self.client @@ -1069,6 +1057,7 @@ def test_socket_timeout(self): def get_x(db): doc = next(db.test.find().where(where_func)) return doc["x"] + self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test) @@ -1079,28 +1068,23 @@ def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=0, connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) - self.assertRaises(ValueError, MongoClient, - serverSelectionTimeoutMS="foo", connect=False) - self.assertRaises(ValueError, MongoClient, - serverSelectionTimeoutMS=-1, connect=False) - self.assertRaises(ConfigurationError, MongoClient, - serverSelectionTimeoutMS=None, connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) + self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS=-1, connect=False) + self.assertRaises( + ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False + ) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) # Test invalid timeout in URI ignored and set to default. - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) - client = MongoClient( - 'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): @@ -1110,13 +1094,12 @@ def test_waitQueueTimeoutMS(self): def test_socketKeepAlive(self): pool = get_pool(self.client) with pool.get_socket() as sock_info: - keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, - socket.SO_KEEPALIVE) + keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) self.assertTrue(keepalive) @no_type_check def test_tz_aware(self): - self.assertRaises(ValueError, MongoClient, tz_aware='foo') + self.assertRaises(ValueError, MongoClient, tz_aware="foo") aware = rs_or_single_client(tz_aware=True) naive = self.client @@ -1129,7 +1112,8 @@ def test_tz_aware(self): self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) self.assertEqual( aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"]) + naive.pymongo_test.test.find_one()["x"], + ) @client_context.require_ipv6 def test_ipv6(self): @@ -1144,7 +1128,7 @@ def test_ipv6(self): uri = "mongodb://%s[::1]:%d" % (auth_str, client_context.port) if client_context.is_rs: - uri += '/?replicaSet=' + (client_context.replica_set_name or "") + uri += "/?replicaSet=" + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) @@ -1174,7 +1158,7 @@ def test_contextlib(self): client.pymongo_test.test.find_one() def test_interrupt_signal(self): - if sys.platform.startswith('java'): + if sys.platform.startswith("java"): # We can't figure out how to raise an exception on a thread that's # blocked on a socket, whether that's the main thread or a worker, # without simply killing the whole thread in Jython. This suggests @@ -1191,8 +1175,8 @@ def test_interrupt_signal(self): where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once - db.drop_collection('foo') - db.foo.insert_one({'_id': 1}) + db.drop_collection("foo") + db.foo.insert_one({"_id": 1}) old_signal_handler = None try: @@ -1203,7 +1187,8 @@ def test_interrupt_signal(self): # sock.recv(): TypeError: 'int' object is not callable # We don't know what causes this, so we hack around it. - if sys.platform == 'win32': + if sys.platform == "win32": + def interrupter(): # Raises KeyboardInterrupt in the main thread time.sleep(0.25) @@ -1222,7 +1207,7 @@ def sigalarm(num, frame): raised = False try: # Will be interrupted by a KeyboardInterrupt. - next(db.foo.find({'$where': where})) + next(db.foo.find({"$where": where})) except KeyboardInterrupt: raised = True @@ -1233,10 +1218,7 @@ def sigalarm(num, frame): # Raises AssertionError due to PYTHON-294 -- Mongo's response to # the previous find() is still waiting to be read on the socket, # so the request id's don't match. - self.assertEqual( - {'_id': 1}, - next(db.foo.find()) - ) + self.assertEqual({"_id": 1}, next(db.foo.find())) finally: if old_signal_handler: signal.signal(signal.SIGALRM, old_signal_handler) @@ -1253,10 +1235,8 @@ def test_operation_failure(self): self.assertGreaterEqual(socket_count, 1) old_sock_info = next(iter(pool.sockets)) client.pymongo_test.test.drop() - client.pymongo_test.test.insert_one({'_id': 'foo'}) - self.assertRaises( - OperationFailure, - client.pymongo_test.test.insert_one, {'_id': 'foo'}) + client.pymongo_test.test.insert_one({"_id": "foo"}) + self.assertRaises(OperationFailure, client.pymongo_test.test.insert_one, {"_id": "foo"}) self.assertEqual(socket_count, len(pool.sockets)) new_sock_info = next(iter(pool.sockets)) @@ -1268,27 +1248,26 @@ def test_lazy_connect_w0(self): # Use a separate collection to avoid races where we're still # completing an operation on a collection while the next test begins. - client_context.client.drop_database('test_lazy_connect_w0') - self.addCleanup( - client_context.client.drop_database, 'test_lazy_connect_w0') + client_context.client.drop_database("test_lazy_connect_w0") + self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") client = rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.insert_one({}) wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, - "find one document") + lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, "find one document" + ) client = rs_or_single_client(connect=False, w=0) - client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}}) + client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) wait_until( - lambda: client.test_lazy_connect_w0.test.find_one().get('x') == 1, - "update one document") + lambda: client.test_lazy_connect_w0.test.find_one().get("x") == 1, "update one document" + ) client = rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.delete_one({}) wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, - "delete one document") + lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, "delete one document" + ) @client_context.require_no_mongos def test_exhaust_network_error(self): @@ -1320,9 +1299,7 @@ def test_auth_network_error(self): # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. - c = connected(rs_or_single_client(maxPoolSize=1, - waitQueueTimeoutMS=1, - retryReads=False)) + c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False)) # Cause a network error on the actual socket. pool = get_pool(c) @@ -1338,8 +1315,7 @@ def test_auth_network_error(self): @client_context.require_no_replica_set def test_connect_to_standalone_using_replica_set_name(self): - client = single_client(replicaSet='anything', - serverSelectionTimeoutMS=100) + client = single_client(replicaSet="anything", serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): client.test.test.find_one() @@ -1350,16 +1326,24 @@ def test_stale_getmore(self): # the topology before the getMore message is sent. Test that # MongoClient._run_operation_with_response handles the error. with self.assertRaises(AutoReconnect): - client = rs_client(connect=False, - serverSelectionTimeoutMS=100) + client = rs_client(connect=False, serverSelectionTimeoutMS=100) client._run_operation( - operation=message._GetMore('pymongo_test', 'collection', - 101, 1234, client.codec_options, - ReadPreference.PRIMARY, - None, client, None, None, False), - unpack_res=Cursor( - client.pymongo_test.collection)._unpack_response, - address=('not-a-member', 27017)) + operation=message._GetMore( + "pymongo_test", + "collection", + 101, + 1234, + client.codec_options, + ReadPreference.PRIMARY, + None, + client, + None, + None, + False, + ), + unpack_res=Cursor(client.pymongo_test.collection)._unpack_response, + address=("not-a-member", 27017), + ) def test_heartbeat_frequency_ms(self): class HeartbeatStartedListener(ServerHeartbeatListener): @@ -1386,15 +1370,17 @@ def init(self, *args): ServerHeartbeatStartedEvent.__init__ = init # type: ignore listener = HeartbeatStartedListener() uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( - client_context.host, client_context.port) + client_context.host, + client_context.port, + ) client = single_client(uri, event_listeners=[listener]) - wait_until(lambda: len(listener.results) >= 2, - "record two ServerHeartbeatStartedEvents") + wait_until( + lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" + ) # Default heartbeatFrequencyMS is 10 sec. Check the interval was # closer to 0.5 sec with heartbeatFrequencyMS configured. - self.assertAlmostEqual( - heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) + self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) client.close() finally: @@ -1405,7 +1391,7 @@ def test_small_heartbeat_frequency_ms(self): with self.assertRaises(ConfigurationError) as context: MongoClient(uri) - self.assertIn('heartbeatFrequencyMS', str(context.exception)) + self.assertIn("heartbeatFrequencyMS", str(context.exception)) def test_compression(self): def compression_settings(client): @@ -1415,16 +1401,16 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, 4) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017" client = MongoClient(uri, connect=False) @@ -1439,7 +1425,7 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=foobar,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) # According to the connection string spec, unsupported values @@ -1447,12 +1433,12 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zlib']) + self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) if not _HAVE_SNAPPY: @@ -1464,11 +1450,11 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=snappy" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['snappy']) + self.assertEqual(opts.compressors, ["snappy"]) uri = "mongodb://localhost:27017/?compressors=snappy,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['snappy', 'zlib']) + self.assertEqual(opts.compressors, ["snappy", "zlib"]) if not _HAVE_ZSTD: uri = "mongodb://localhost:27017/?compressors=zstd" @@ -1479,11 +1465,11 @@ def compression_settings(client): uri = "mongodb://localhost:27017/?compressors=zstd" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zstd']) + self.assertEqual(opts.compressors, ["zstd"]) uri = "mongodb://localhost:27017/?compressors=zstd,zlib" client = MongoClient(uri, connect=False) opts = compression_settings(client) - self.assertEqual(opts.compressors, ['zstd', 'zlib']) + self.assertEqual(opts.compressors, ["zstd", "zlib"]) options = client_context.default_client_options if "compressors" in options and "zlib" in options["compressors"]: @@ -1495,7 +1481,7 @@ def compression_settings(client): def test_reset_during_update_pool(self): client = rs_or_single_client(minPoolSize=10) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) generation = pool.gen.get_overall() @@ -1511,9 +1497,8 @@ def stop(self): def run(self): while self.running: - exc = AutoReconnect('mock pool error') - ctx = _ErrorContext( - exc, 0, pool.gen.get_overall(), False, None) + exc = AutoReconnect("mock pool error") + ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) client._topology.handle_error(pool.address, ctx) time.sleep(0.001) @@ -1531,17 +1516,17 @@ def run(self): finally: t.stop() t.join() - client.admin.command('ping') + client.admin.command("ping") def test_background_connections_do_not_hold_locks(self): min_pool_size = 10 client = rs_or_single_client( - serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, - connect=False) + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False + ) self.addCleanup(client.close) # Create a single connection in the pool. - client.admin.command('ping') + client.admin.command("ping") # Cause new connections stall for a few seconds. pool = get_pool(client) @@ -1553,15 +1538,15 @@ def stall_connect(*args, **kwargs): pool.connect = stall_connect # Un-patch Pool.connect to break the cyclic reference. - self.addCleanup(delattr, pool, 'connect') + self.addCleanup(delattr, pool, "connect") # Wait for the background thread to start creating connections - wait_until(lambda: len(pool.sockets) > 1, 'start creating connections') + wait_until(lambda: len(pool.sockets) > 1, "start creating connections") # Assert that application operations do not block. for _ in range(10): start = time.monotonic() - client.admin.command('ping') + client.admin.command("ping") total = time.monotonic() - start # Each ping command should not take more than 2 seconds self.assertLess(total, 2) @@ -1570,28 +1555,27 @@ def stall_connect(*args, **kwargs): def test_direct_connection(self): # direct_connection=True should result in Single topology. client = rs_or_single_client(directConnection=True) - client.admin.command('ping') + client.admin.command("ping") self.assertEqual(len(client.nodes), 1) - self.assertEqual(client._topology_settings.get_topology_type(), - TOPOLOGY_TYPE.Single) + self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) client.close() # direct_connection=False should result in RS topology. client = rs_or_single_client(directConnection=False) - client.admin.command('ping') + client.admin.command("ping") self.assertGreaterEqual(len(client.nodes), 1) - self.assertIn(client._topology_settings.get_topology_type(), - [TOPOLOGY_TYPE.ReplicaSetNoPrimary, - TOPOLOGY_TYPE.ReplicaSetWithPrimary]) + self.assertIn( + client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], + ) client.close() # directConnection=True, should error with multiple hosts as a list. with self.assertRaises(ConfigurationError): - MongoClient(['host1', 'host2'], directConnection=True) + MongoClient(["host1", "host2"], directConnection=True) - @unittest.skipIf(sys.platform.startswith('java'), - 'Jython does not support gc.get_objects') - @unittest.skipIf('PyPy' in sys.version, 'PYTHON-2927 fails often on PyPy') + @unittest.skipIf(sys.platform.startswith("java"), "Jython does not support gc.get_objects") + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") def test_continuous_network_errors(self): def server_description_count(): i = 0 @@ -1602,12 +1586,12 @@ def server_description_count(): except ReferenceError: pass return i + gc.collect() with client_knobs(min_heartbeat_interval=0.003): client = MongoClient( - 'invalid:27017', - heartbeatFrequencyMS=3, - serverSelectionTimeoutMS=100) + "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=100 + ) initial_count = server_description_count() self.addCleanup(client.close) with self.assertRaises(ServerSelectionTimeoutError): @@ -1623,15 +1607,15 @@ def server_description_count(): def test_network_error_message(self): client = single_client(retryReads=False) self.addCleanup(client.close) - client.admin.command('ping') # connect - with self.fail_point({'mode': {'times': 1}, - 'data': {'closeConnection': True, - 'failCommands': ['find']}}): - expected = '%s:%s: ' % client.address + client.admin.command("ping") # connect + with self.fail_point( + {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} + ): + expected = "%s:%s: " % client.address with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) - @unittest.skipIf('PyPy' in sys.version, 'PYTHON-2938 could fail on PyPy') + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") def test_process_periodic_tasks(self): client = rs_or_single_client() coll = client.db.collection @@ -1643,49 +1627,45 @@ def test_process_periodic_tasks(self): client.close() # Add cursor to kill cursors queue del cursor - wait_until(lambda: client._MongoClient__kill_cursors_queue, - "waited for cursor to be added to queue") + wait_until( + lambda: client._MongoClient__kill_cursors_queue, + "waited for cursor to be added to queue", + ) client._process_periodic_tasks() # This must not raise or print any exceptions with self.assertRaises(InvalidOperation): coll.insert_many([{} for _ in range(5)]) - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_service_name_from_kwargs(self): client = MongoClient( - 'mongodb+srv://user:password@test22.test.build.10gen.cc', - srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings.srv_service_name, - 'customname') + "mongodb+srv://user:password@test22.test.build.10gen.cc", + srvServiceName="customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") client = MongoClient( - 'mongodb+srv://user:password@test22.test.build.10gen.cc' - '/?srvServiceName=shouldbeoverriden', - srvServiceName='customname', connect=False) - self.assertEqual(client._topology_settings.srv_service_name, - 'customname') + "mongodb+srv://user:password@test22.test.build.10gen.cc" + "/?srvServiceName=shouldbeoverriden", + srvServiceName="customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") client = MongoClient( - 'mongodb+srv://user:password@test22.test.build.10gen.cc' - '/?srvServiceName=customname', - connect=False) - self.assertEqual(client._topology_settings.srv_service_name, - 'customname') - - @unittest.skipUnless( - _HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") + "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=customname", + connect=False, + ) + self.assertEqual(client._topology_settings.srv_service_name, "customname") + + @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_srv_max_hosts_kwarg(self): + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + self.assertEqual(len(client.topology_description.server_descriptions()), 1) client = MongoClient( - 'mongodb+srv://test1.test.build.10gen.cc/') - self.assertGreater( - len(client.topology_description.server_descriptions()), 1) - client = MongoClient( - 'mongodb+srv://test1.test.build.10gen.cc/', srvmaxhosts=1) - self.assertEqual( - len(client.topology_description.server_descriptions()), 1) - client = MongoClient( - 'mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1', - srvmaxhosts=2) - self.assertEqual( - len(client.topology_description.server_descriptions()), 2) + "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 + ) + self.assertEqual(len(client.topology_description.server_descriptions()), 2) class TestExhaustCursor(IntegrationTest): @@ -1708,8 +1688,8 @@ def test_exhaust_query_server_error(self): # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find( - SON([('$query', {}), ('$orderby', True)]), - cursor_type=CursorType.EXHAUST) + SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST + ) self.assertRaises(OperationFailure, cursor.next) self.assertFalse(sock_info.closed) @@ -1743,8 +1723,8 @@ def receive_message(request_id): SocketInfo.receive_message(sock_info, request_id) # responseFlags bit 1 is QueryFailure. - msg = struct.pack('= count, - 'find %s %s event(s)' % (count, event), timeout=timeout) + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + wait_until( + lambda: self.listener.event_count(event) >= count, + "find %s %s event(s)" % (count, event), + timeout=timeout, + ) def check_out(self, op): """Run the 'checkOut' operation.""" - label = op['label'] + label = op["label"] with self.pool.get_socket() as sock_info: # Call 'pin_cursor' so we can hold the socket. sock_info.pin_cursor() @@ -130,7 +130,7 @@ def check_out(self, op): def check_in(self, op): """Run the 'checkIn' operation.""" - label = op['connection'] + label = op["connection"] sock_info = self.labels[label] self.pool.return_socket(sock_info) @@ -148,8 +148,8 @@ def close(self, op): def run_operation(self, op): """Run a single operation in a test.""" - op_name = camel_to_snake(op['name']) - thread = op['thread'] + op_name = camel_to_snake(op["name"]) + thread = op["thread"] meth = getattr(self, op_name) if thread: self.targets[thread].schedule(lambda: meth(op)) @@ -164,9 +164,9 @@ def run_operations(self, ops): def check_object(self, actual, expected): """Assert that the actual object matches the expected object.""" - self.assertEqual(type(actual), OBJECT_TYPES[expected['type']]) + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) for attr, expected_val in expected.items(): - if attr == 'type': + if attr == "type": continue c2s = camel_to_snake(attr) actual_val = getattr(actual, c2s) @@ -182,62 +182,60 @@ def check_event(self, actual, expected): def actual_events(self, ignore): """Return all the non-ignored events.""" ignore = tuple(OBJECT_TYPES[name] for name in ignore) - return [event for event in self.listener.events - if not isinstance(event, ignore)] + return [event for event in self.listener.events if not isinstance(event, ignore)] def check_events(self, events, ignore): """Check the events of a test.""" actual_events = self.actual_events(ignore) for actual, expected in zip(actual_events, events): - self.logs.append('Checking event actual: %r vs expected: %r' % ( - actual, expected)) + self.logs.append("Checking event actual: %r vs expected: %r" % (actual, expected)) self.check_event(actual, expected) if len(events) > len(actual_events): - self.fail('missing events: %r' % (events[len(actual_events):],)) + self.fail("missing events: %r" % (events[len(actual_events) :],)) def check_error(self, actual, expected): - message = expected.pop('message') + message = expected.pop("message") self.check_object(actual, expected) self.assertIn(message, str(actual)) def _set_fail_point(self, client, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) + cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) client.admin.command(cmd) def set_fail_point(self, command_args): if not client_context.supports_failCommand_fail_point: - self.skipTest('failCommand fail point must be supported') + self.skipTest("failCommand fail point must be supported") self._set_fail_point(self.client, command_args) def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" self.logs: list = [] - self.assertEqual(scenario_def['version'], 1) - self.assertIn(scenario_def['style'], ['unit', 'integration']) + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) self.listener = CMAPListener() self._ops: list = [] # Configure the fail point before creating the client. - if 'failPoint' in test: - fp = test['failPoint'] + if "failPoint" in test: + fp = test["failPoint"] self.set_fail_point(fp) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) - - opts = test['poolOptions'].copy() - opts['event_listeners'] = [self.listener] - opts['_monitor_class'] = DummyMonitor - opts['connect'] = False + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False # Support backgroundThreadIntervalMS, default to 50ms. - interval = opts.pop('backgroundThreadIntervalMS', 50) + interval = opts.pop("backgroundThreadIntervalMS", 50) if interval < 0: kill_cursor_frequency = 99999999 else: - kill_cursor_frequency = interval/1000.0 - with client_knobs(kill_cursor_frequency=kill_cursor_frequency, - min_heartbeat_interval=.05): + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): client = single_client(**opts) # Update the SD to a known type because the DummyMonitor will not. # Note we cannot simply call topology.on_change because that would @@ -245,10 +243,10 @@ def run_scenario(self, scenario_def, test): # PoolReadyEvents. Instead, update the initial state before # opening the Topology. td = client_context.client._topology.description - sd = td.server_descriptions()[(client_context.host, - client_context.port)] + sd = td.server_descriptions()[(client_context.host, client_context.port)] client._topology._description = updated_topology_description( - client._topology._description, sd) + client._topology._description, sd + ) # When backgroundThreadIntervalMS is negative we do not start the # background thread to ensure it never runs. if interval < 0: @@ -274,37 +272,37 @@ def cleanup(): self.addCleanup(cleanup) try: - if test['error']: + if test["error"]: with self.assertRaises(PyMongoError) as ctx: - self.run_operations(test['operations']) - self.check_error(ctx.exception, test['error']) + self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) else: - self.run_operations(test['operations']) + self.run_operations(test["operations"]) - self.check_events(test['events'], test['ignore']) + self.check_events(test["events"], test["ignore"]) except Exception: # Print the events after a test failure. - print('\nFailed test: %r' % (test['description'],)) - print('Operations:') + print("\nFailed test: %r" % (test["description"],)) + print("Operations:") for op in self._ops: print(op) - print('Threads:') + print("Threads:") print(self.targets) - print('Connections:') + print("Connections:") print(self.labels) - print('Events:') + print("Events:") for event in self.listener.events: print(event) - print('Log:') + print("Log:") for log in self.logs: print(log) raise POOL_OPTIONS = { - 'maxPoolSize': 50, - 'minPoolSize': 1, - 'maxIdleTimeMS': 10000, - 'waitQueueTimeoutMS': 10000 + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, } # @@ -319,11 +317,10 @@ def test_1_client_connection_pool_options(self): def test_2_all_client_pools_have_same_options(self): client = rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") # Discover at least one secondary. if client_context.has_secondaries: - client.admin.command( - 'ping', read_preference=ReadPreference.SECONDARY) + client.admin.command("ping", read_preference=ReadPreference.SECONDARY) pools = get_pools(client) pool_opts = pools[0].opts @@ -332,9 +329,8 @@ def test_2_all_client_pools_have_same_options(self): self.assertEqual(pool.opts, pool_opts) def test_3_uri_connection_pool_options(self): - opts = '&'.join(['%s=%s' % (k, v) - for k, v in self.POOL_OPTIONS.items()]) - uri = 'mongodb://%s/?%s' % (client_context.pair, opts) + opts = "&".join(["%s=%s" % (k, v) for k, v in self.POOL_OPTIONS.items()]) + uri = "mongodb://%s/?%s" % (client_context.pair, opts) client = rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts @@ -347,18 +343,16 @@ def test_4_subscribe_to_events(self): self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. - client.admin.command('ping') - self.assertEqual( - listener.event_count(ConnectionCheckOutStartedEvent), 1) + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) # Uses the existing connection. - client.admin.command('ping') - self.assertEqual( - listener.event_count(ConnectionCheckOutStartedEvent), 2) + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) @@ -373,49 +367,44 @@ def test_5_check_out_fails_connection_error(self): pool = get_pool(client) def mock_connect(*args, **kwargs): - raise ConnectionFailure('connect failed') + raise ConnectionFailure("connect failed") + pool.connect = mock_connect # Un-patch Pool.connect to break the cyclic reference. - self.addCleanup(delattr, pool, 'connect') + self.addCleanup(delattr, pool, "connect") # Attempt to create a new connection. - with self.assertRaisesRegex(ConnectionFailure, 'connect failed'): - client.admin.command('ping') + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + client.admin.command("ping") self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) - self.assertIsInstance(listener.events[2], - ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[3], - ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) self.assertIsInstance(listener.events[4], PoolClearedEvent) failed_event = listener.events[3] - self.assertEqual( - failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) def test_5_check_out_fails_auth_error(self): listener = CMAPListener() client = single_client_noauth( - username="notauser", password="fail", - event_listeners=[listener]) + username="notauser", password="fail", event_listeners=[listener] + ) self.addCleanup(client.close) # Attempt to create a new connection. - with self.assertRaisesRegex(OperationFailure, 'failed'): - client.admin.command('ping') + with self.assertRaisesRegex(OperationFailure, "failed"): + client.admin.command("ping") self.assertIsInstance(listener.events[0], PoolCreatedEvent) self.assertIsInstance(listener.events[1], PoolReadyEvent) - self.assertIsInstance(listener.events[2], - ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) # Error happens here. self.assertIsInstance(listener.events[4], ConnectionClosedEvent) - self.assertIsInstance(listener.events[5], - ConnectionCheckOutFailedEvent) - self.assertEqual(listener.events[5].reason, - ConnectionCheckOutFailedReason.CONN_ERROR) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) # # Extra non-spec tests @@ -426,13 +415,13 @@ def assertRepr(self, obj): self.assertEqual(repr(new_obj), repr(obj)) def test_events_repr(self): - host = ('localhost', 27017) + host = ("localhost", 27017) self.assertRepr(ConnectionCheckedInEvent(host, 1)) self.assertRepr(ConnectionCheckedOutEvent(host, 1)) - self.assertRepr(ConnectionCheckOutFailedEvent( - host, ConnectionCheckOutFailedReason.POOL_CLOSED)) - self.assertRepr(ConnectionClosedEvent( - host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr( + ConnectionCheckOutFailedEvent(host, ConnectionCheckOutFailedReason.POOL_CLOSED) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) self.assertRepr(ConnectionCreatedEvent(host, 1)) self.assertRepr(ConnectionReadyEvent(host, 1)) self.assertRepr(ConnectionCheckOutStartedEvent(host)) @@ -446,7 +435,7 @@ def test_close_leaves_pool_unpaused(self): # test_threads.TestThreads.test_client_disconnect listener = CMAPListener() client = single_client(event_listeners=[listener]) - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) client.close() self.assertEqual(1, listener.event_count(PoolClearedEvent)) @@ -464,7 +453,6 @@ def run_scenario(self): class CMAPTestCreator(TestCreator): - def tests(self, scenario_def): """Extract the tests from a spec file. diff --git a/test/test_code.py b/test/test_code.py index 1c4b5be1fe..9ff305e39a 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -20,9 +20,10 @@ sys.path[0:0] = [""] -from bson.code import Code from test import unittest +from bson.code import Code + class TestCode(unittest.TestCase): def test_types(self): @@ -37,6 +38,7 @@ def test_read_only(self): def set_c(): c.scope = 5 # type: ignore + self.assertRaises(AttributeError, set_c) def test_code(self): @@ -47,15 +49,15 @@ def test_code(self): self.assertTrue(isinstance(a_code, Code)) self.assertFalse(isinstance(a_string, Code)) self.assertIsNone(a_code.scope) - with_scope = Code('hello world', {'my_var': 5}) - self.assertEqual({'my_var': 5}, with_scope.scope) - empty_scope = Code('hello world', {}) + with_scope = Code("hello world", {"my_var": 5}) + self.assertEqual({"my_var": 5}, with_scope.scope) + empty_scope = Code("hello world", {}) self.assertEqual({}, empty_scope.scope) - another_scope = Code(with_scope, {'new_var': 42}) + another_scope = Code(with_scope, {"new_var": 42}) self.assertEqual(str(with_scope), str(another_scope)) - self.assertEqual({'new_var': 42, 'my_var': 5}, another_scope.scope) + self.assertEqual({"new_var": 42, "my_var": 5}, another_scope.scope) # No error. - Code('héllø world¡') + Code("héllø world¡") def test_repr(self): c = Code("hello world", {}) @@ -98,8 +100,7 @@ def test_scope_preserved(self): def test_scope_kwargs(self): self.assertEqual({"a": 1}, Code("", a=1).scope) self.assertEqual({"a": 1}, Code("", {"a": 2}, a=1).scope) - self.assertEqual({"a": 1, "b": 2, "c": 3}, - Code("", {"b": 2}, a=1, c=3).scope) + self.assertEqual({"a": 1, "b": 2, "c": 3}, Code("", {"b": 2}, a=1, c=3).scope) if __name__ == "__main__": diff --git a/test/test_collation.py b/test/test_collation.py index 9c4f4f6576..d8410a9de4 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -16,42 +16,48 @@ import functools import warnings - +from test import IntegrationTest, client_context, unittest +from test.utils import EventListener, rs_or_single_client from typing import Any from pymongo.collation import ( Collation, - CollationCaseFirst, CollationStrength, CollationAlternate, - CollationMaxVariable) + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) from pymongo.errors import ConfigurationError -from pymongo.operations import (DeleteMany, DeleteOne, IndexModel, ReplaceOne, - UpdateMany, UpdateOne) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.write_concern import WriteConcern -from test import client_context, IntegrationTest, unittest -from test.utils import EventListener, rs_or_single_client class TestCollationObject(unittest.TestCase): - def test_constructor(self): self.assertRaises(TypeError, Collation, locale=42) # Fill in a locale to test the other options. - _Collation = functools.partial(Collation, 'en_US') + _Collation = functools.partial(Collation, "en_US") # No error. _Collation(caseFirst=CollationCaseFirst.UPPER) - self.assertRaises(TypeError, _Collation, caseLevel='true') - self.assertRaises(ValueError, _Collation, strength='six') - self.assertRaises(TypeError, _Collation, - numericOrdering='true') + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") self.assertRaises(TypeError, _Collation, alternate=5) self.assertRaises(TypeError, _Collation, maxVariable=2) - self.assertRaises(TypeError, _Collation, normalization='false') - self.assertRaises(TypeError, _Collation, backwards='true') + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") # No errors. - Collation('en_US', future_option='bar', another_option=42) + Collation("en_US", future_option="bar", another_option=42) collation = Collation( - 'en_US', + "en_US", caseLevel=True, caseFirst=CollationCaseFirst.UPPER, strength=CollationStrength.QUATERNARY, @@ -59,24 +65,27 @@ def test_constructor(self): alternate=CollationAlternate.SHIFTED, maxVariable=CollationMaxVariable.SPACE, normalization=True, - backwards=True) - - self.assertEqual({ - 'locale': 'en_US', - 'caseLevel': True, - 'caseFirst': 'upper', - 'strength': 4, - 'numericOrdering': True, - 'alternate': 'shifted', - 'maxVariable': 'space', - 'normalization': True, - 'backwards': True - }, collation.document) - - self.assertEqual({ - 'locale': 'en_US', - 'backwards': True - }, Collation('en_US', backwards=True).document) + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) class TestCollation(IntegrationTest): @@ -91,7 +100,7 @@ def setUpClass(cls): cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test - cls.collation = Collation('en_US') + cls.collation = Collation("en_US") cls.warn_context = warnings.catch_warnings() cls.warn_context.__enter__() warnings.simplefilter("ignore", DeprecationWarning) @@ -108,38 +117,33 @@ def tearDown(self): super(TestCollation, self).tearDown() def last_command_started(self): - return self.listener.results['started'][-1].command + return self.listener.results["started"][-1].command def assertCollationInLastCommand(self): - self.assertEqual( - self.collation.document, - self.last_command_started()['collation']) + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) def test_create_collection(self): self.db.test.drop() - self.db.create_collection('test', collation=self.collation) + self.db.create_collection("test", collation=self.collation) self.assertCollationInLastCommand() # Test passing collation as a dict as well. self.db.test.drop() self.listener.results.clear() - self.db.create_collection('test', collation=self.collation.document) + self.db.create_collection("test", collation=self.collation.document) self.assertCollationInLastCommand() def test_index_model(self): - model = IndexModel([('a', 1), ('b', -1)], collation=self.collation) - self.assertEqual(self.collation.document, model.document['collation']) + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) def test_create_index(self): - self.db.test.create_index('foo', collation=self.collation) - ci_cmd = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - ci_cmd['indexes'][0]['collation']) + self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) def test_aggregate(self): - self.db.test.aggregate([{'$group': {'_id': 42}}], - collation=self.collation) + self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) self.assertCollationInLastCommand() def test_count_documents(self): @@ -147,15 +151,15 @@ def test_count_documents(self): self.assertCollationInLastCommand() def test_distinct(self): - self.db.test.distinct('foo', collation=self.collation) + self.db.test.distinct("foo", collation=self.collation) self.assertCollationInLastCommand() self.listener.results.clear() - self.db.test.find(collation=self.collation).distinct('foo') + self.db.test.find(collation=self.collation).distinct("foo") self.assertCollationInLastCommand() def test_find_command(self): - self.db.test.insert_one({'is this thing on?': True}) + self.db.test.insert_one({"is this thing on?": True}) self.listener.results.clear() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() @@ -165,127 +169,118 @@ def test_explain_command(self): self.db.test.find(collation=self.collation).explain() # The collation should be part of the explained command. self.assertEqual( - self.collation.document, - self.last_command_started()['explain']['collation']) + self.collation.document, self.last_command_started()["explain"]["collation"] + ) def test_delete(self): - self.db.test.delete_one({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) + self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) self.listener.results.clear() - self.db.test.delete_many({'foo': 42}, collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['deletes'][0]['collation']) + self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) def test_update(self): - self.db.test.replace_one({'foo': 42}, {'foo': 43}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) self.listener.results.clear() - self.db.test.update_one({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) self.listener.results.clear() - self.db.test.update_many({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) - command = self.listener.results['started'][0].command - self.assertEqual( - self.collation.document, - command['updates'][0]['collation']) + self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.results["started"][0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) def test_find_and(self): - self.db.test.find_one_and_delete({'foo': 42}, collation=self.collation) + self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) self.assertCollationInLastCommand() self.listener.results.clear() - self.db.test.find_one_and_update({'foo': 42}, {'$set': {'foo': 43}}, - collation=self.collation) + self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) self.assertCollationInLastCommand() self.listener.results.clear() - self.db.test.find_one_and_replace({'foo': 42}, {'foo': 43}, - collation=self.collation) + self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) self.assertCollationInLastCommand() def test_bulk_write(self): - self.db.test.collection.bulk_write([ - DeleteOne({'noCollation': 42}), - DeleteMany({'noCollation': 42}), - DeleteOne({'foo': 42}, collation=self.collation), - DeleteMany({'foo': 42}, collation=self.collation), - ReplaceOne({'noCollation': 24}, {'bar': 42}), - UpdateOne({'noCollation': 84}, {'$set': {'bar': 10}}, upsert=True), - UpdateMany({'noCollation': 45}, {'$set': {'bar': 42}}), - ReplaceOne({'foo': 24}, {'foo': 42}, collation=self.collation), - UpdateOne({'foo': 84}, {'$set': {'foo': 10}}, upsert=True, - collation=self.collation), - UpdateMany({'foo': 45}, {'$set': {'foo': 42}}, - collation=self.collation) - ]) - - delete_cmd = self.listener.results['started'][0].command - update_cmd = self.listener.results['started'][1].command + self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.results["started"][0].command + update_cmd = self.listener.results["started"][1].command def check_ops(ops): for op in ops: - if 'noCollation' in op['q']: - self.assertNotIn('collation', op) + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) else: - self.assertEqual(self.collation.document, - op['collation']) + self.assertEqual(self.collation.document, op["collation"]) - check_ops(delete_cmd['deletes']) - check_ops(update_cmd['updates']) + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) def test_indexes_same_keys_different_collations(self): self.db.test.drop() - usa_collation = Collation('en_US') - ja_collation = Collation('ja') - self.db.test.create_indexes([ - IndexModel('fieldname', collation=usa_collation), - IndexModel('fieldname', name='japanese_version', - collation=ja_collation), - IndexModel('fieldname', name='simple') - ]) + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) indexes = self.db.test.index_information() - self.assertEqual(usa_collation.document['locale'], - indexes['fieldname_1']['collation']['locale']) - self.assertEqual(ja_collation.document['locale'], - indexes['japanese_version']['collation']['locale']) - self.assertNotIn('collation', indexes['simple']) - self.db.test.drop_index('fieldname_1') + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + self.db.test.drop_index("fieldname_1") indexes = self.db.test.index_information() - self.assertIn('japanese_version', indexes) - self.assertIn('simple', indexes) - self.assertNotIn('fieldname', indexes) + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) def test_unacknowledged_write(self): unacknowledged = WriteConcern(w=0) - collection = self.db.get_collection( - 'test', write_concern=unacknowledged) + collection = self.db.get_collection("test", write_concern=unacknowledged) with self.assertRaises(ConfigurationError): collection.update_one( - {'hello': 'world'}, {'$set': {'hello': 'moon'}}, - collation=self.collation) - update_one = UpdateOne({'hello': 'world'}, {'$set': {'hello': 'moon'}}, - collation=self.collation) + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) with self.assertRaises(ConfigurationError): collection.bulk_write([update_one]) def test_cursor_collation(self): - self.db.test.insert_one({'hello': 'world'}) + self.db.test.insert_one({"hello": "world"}) next(self.db.test.find().collation(self.collation)) self.assertCollationInLastCommand() diff --git a/test/test_collection.py b/test/test_collection.py index 3d4a107aa9..f81c2c2645 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -19,7 +19,6 @@ import contextlib import re import sys - from codecs import utf_8_decode # type: ignore from collections import defaultdict from typing import no_type_check @@ -28,47 +27,57 @@ sys.path[0:0] = [""] +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + get_pool, + is_mongos, + rs_or_single_client, + single_client, + wait_until, +) + from bson import encode -from bson.raw_bson import RawBSONDocument -from bson.regex import Regex from bson.codec_options import CodecOptions from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex from bson.son import SON from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT from pymongo.bulk import BulkWriteError from pymongo.collection import Collection, ReturnDocument from pymongo.command_cursor import CommandCursor from pymongo.cursor import CursorType -from pymongo.errors import (ConfigurationError, - DocumentTooLarge, - DuplicateKeyError, - ExecutionTimeout, - InvalidDocument, - InvalidName, - InvalidOperation, - OperationFailure, - WriteConcernError) +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command from pymongo.mongo_client import MongoClient from pymongo.operations import * from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference -from pymongo.results import (InsertOneResult, - InsertManyResult, - UpdateResult, - DeleteResult) +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) from pymongo.write_concern import WriteConcern -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import (get_pool, is_mongos, - rs_or_single_client, single_client, - wait_until, EventListener, - IMPOSSIBLE_WRITE_CONCERN) class TestCollectionNoConnect(unittest.TestCase): - """Test Collection features on a client that does not connect. - """ + """Test Collection features on a client that does not connect.""" + db: Database @classmethod @@ -95,7 +104,7 @@ def make_col(base, name): def test_getattr(self): coll = self.db.test - self.assertTrue(isinstance(coll['_does_not_exist'], Collection)) + self.assertTrue(isinstance(coll["_does_not_exist"], Collection)) with self.assertRaises(AttributeError) as context: coll._does_not_exist @@ -104,8 +113,7 @@ def test_getattr(self): # "AttributeError: Collection has no attribute '_does_not_exist'. To # access the test._does_not_exist collection, use # database['test._does_not_exist']." - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) coll2 = coll.with_options(write_concern=WriteConcern(w=0)) self.assertEqual(coll2.write_concern, WriteConcern(w=0)) @@ -143,8 +151,8 @@ def write_concern_collection(self): with self.assertRaises(WriteConcernError): # Unsatisfiable write concern. yield Collection( - self.db, 'test', - write_concern=WriteConcern(w=len(client_context.nodes) + 1)) + self.db, "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) else: yield self.db.test @@ -163,33 +171,33 @@ def test_create(self): db = client_context.client.pymongo_test db.create_test_no_wc.drop() wait_until( - lambda: 'create_test_no_wc' not in db.list_collection_names(), - 'drop create_test_no_wc collection') - Collection(db, name='create_test_no_wc', create=True) + lambda: "create_test_no_wc" not in db.list_collection_names(), + "drop create_test_no_wc collection", + ) + Collection(db, name="create_test_no_wc", create=True) wait_until( - lambda: 'create_test_no_wc' in db.list_collection_names(), - 'create create_test_no_wc collection') + lambda: "create_test_no_wc" in db.list_collection_names(), + "create create_test_no_wc collection", + ) # SERVER-33317 - if (not client_context.is_mongos or not - client_context.version.at_least(3, 7, 0)): + if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): with self.assertRaises(OperationFailure): Collection( - db, name='create-test-wc', - write_concern=IMPOSSIBLE_WRITE_CONCERN, - create=True) + db, name="create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN, create=True + ) def test_drop_nonexistent_collection(self): - self.db.drop_collection('test') - self.assertFalse('test' in self.db.list_collection_names()) + self.db.drop_collection("test") + self.assertFalse("test" in self.db.list_collection_names()) # No exception - self.db.drop_collection('test') + self.db.drop_collection("test") def test_create_indexes(self): db = self.db - self.assertRaises(TypeError, db.test.create_indexes, 'foo') - self.assertRaises(TypeError, db.test.create_indexes, ['foo']) + self.assertRaises(TypeError, db.test.create_indexes, "foo") + self.assertRaises(TypeError, db.test.create_indexes, ["foo"]) self.assertRaises(TypeError, IndexModel, 5) self.assertRaises(ValueError, IndexModel, []) @@ -198,8 +206,7 @@ def test_create_indexes(self): self.assertEqual(len(db.test.index_information()), 1) db.test.create_indexes([IndexModel("hello")]) - db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)])]) + db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) # Tuple instead of list. db.test.create_indexes([IndexModel((("world", ASCENDING),))]) @@ -207,9 +214,9 @@ def test_create_indexes(self): self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - names = db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)], - name="hello_world")]) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) self.assertEqual(names, ["hello_world"]) db.test.drop_indexes() @@ -219,37 +226,35 @@ def test_create_indexes(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) - names = db.test.create_indexes([IndexModel([("hello", DESCENDING), - ("world", ASCENDING)]), - IndexModel("hello")]) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) info = db.test.index_information() for name in names: self.assertTrue(name in info) db.test.drop() - db.test.insert_one({'a': 1}) - db.test.insert_one({'a': 1}) - self.assertRaises( - DuplicateKeyError, - db.test.create_indexes, - [IndexModel('a', unique=True)]) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + self.assertRaises(DuplicateKeyError, db.test.create_indexes, [IndexModel("a", unique=True)]) with self.write_concern_collection() as coll: - coll.create_indexes([IndexModel('hello')]) + coll.create_indexes([IndexModel("hello")]) @client_context.require_version_max(4, 3, -1) def test_create_indexes_commitQuorum_requires_44(self): db = self.db with self.assertRaisesRegex( - ConfigurationError, - 'Must be connected to MongoDB 4\.4\+ to use the commitQuorum ' - 'option for createIndexes'): - db.coll.create_indexes([IndexModel('a')], commitQuorum="majority") + ConfigurationError, + "Must be connected to MongoDB 4\.4\+ to use the commitQuorum " + "option for createIndexes", + ): + db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") @client_context.require_no_standalone @client_context.require_version_min(4, 4, -1) def test_create_indexes_commitQuorum(self): - self.db.coll.create_indexes([IndexModel('a')], commitQuorum="majority") + self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") def test_create_index(self): db = self.db @@ -271,8 +276,7 @@ def test_create_index(self): self.assertEqual(len(db.test.index_information()), 4) db.test.drop_indexes() - ix = db.test.create_index([("hello", DESCENDING), - ("world", ASCENDING)], name="hello_world") + ix = db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") self.assertEqual(ix, "hello_world") db.test.drop_indexes() @@ -286,13 +290,12 @@ def test_create_index(self): self.assertTrue("hello_-1_world_1" in db.test.index_information()) db.test.drop() - db.test.insert_one({'a': 1}) - db.test.insert_one({'a': 1}) - self.assertRaises( - DuplicateKeyError, db.test.create_index, 'a', unique=True) + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + self.assertRaises(DuplicateKeyError, db.test.create_index, "a", unique=True) with self.write_concern_collection() as coll: - coll.create_index([('hello', DESCENDING)]) + coll.create_index([("hello", DESCENDING)]) def test_drop_index(self): db = self.db @@ -321,31 +324,22 @@ def test_drop_index(self): self.assertTrue("hello_1" in db.test.index_information()) with self.write_concern_collection() as coll: - coll.drop_index('hello_1') + coll.drop_index("hello_1") @client_context.require_no_mongos @client_context.require_test_commands def test_index_management_max_time_ms(self): coll = self.db.test - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: + self.assertRaises(ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) self.assertRaises( - ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, - coll.create_indexes, - [IndexModel("foo")], - maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) + ExecutionTimeout, coll.create_indexes, [IndexModel("foo")], maxTimeMS=1 + ) + self.assertRaises(ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) + self.assertRaises(ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_list_indexes(self): db = self.db @@ -362,16 +356,15 @@ def map_indexes(indexes): db.test.create_index("hello") indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 2) - self.assertEqual(map_indexes(indexes)["hello_1"]["key"], - SON([("hello", ASCENDING)])) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 3) index_map = map_indexes(indexes) - self.assertEqual(index_map["hello_-1_world_1"]["key"], - SON([("hello", DESCENDING), ("world", ASCENDING)])) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) # List indexes on a collection that does not exist. @@ -391,26 +384,23 @@ def test_index_info(self): db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) - db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], - unique=True) - self.assertEqual(db.test.index_information()["hello_1"]["key"], - [("hello", ASCENDING)]) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 3) - self.assertEqual([("hello", DESCENDING), ("world", ASCENDING)], - db.test.index_information()["hello_-1_world_1"]["key"] - ) self.assertEqual( - True, db.test.index_information()["hello_-1_world_1"]["unique"]) + [("hello", DESCENDING), ("world", ASCENDING)], + db.test.index_information()["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, db.test.index_information()["hello_-1_world_1"]["unique"]) def test_index_geo2d(self): db = self.db db.test.drop_indexes() - self.assertEqual('loc_2d', db.test.create_index([("loc", GEO2D)])) - index_info = db.test.index_information()['loc_2d'] - self.assertEqual([('loc', '2d')], index_info['key']) + self.assertEqual("loc_2d", db.test.create_index([("loc", GEO2D)])) + index_info = db.test.index_information()["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) # geoSearch was deprecated in 4.4 and removed in 5.0 @client_context.require_version_max(4, 5) @@ -418,35 +408,29 @@ def test_index_geo2d(self): def test_index_haystack(self): db = self.db db.test.drop() - _id = db.test.insert_one({ - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }).inserted_id - db.test.insert_one({ - "pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant" - }) - db.test.insert_one({ - "pos": {"long": 59.1, "lat": 87.2}, "type": "office" - }) - db.test.create_index( - [("pos", "geoHaystack"), ("type", ASCENDING)], - bucketSize=1 - ) - - results = db.command(SON([ - ("geoSearch", "test"), - ("near", [33, 33]), - ("maxDistance", 6), - ("search", {"type": "restaurant"}), - ("limit", 30), - ]))['results'] + _id = db.test.insert_one( + {"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"} + ).inserted_id + db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + )["results"] self.assertEqual(2, len(results)) - self.assertEqual({ - "_id": _id, - "pos": {"long": 34.2, "lat": 33.3}, - "type": "restaurant" - }, results[0]) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) @client_context.require_no_mongos def test_index_text(self): @@ -456,38 +440,33 @@ def test_index_text(self): index_info = db.test.index_information()["t_text"] self.assertTrue("weights" in index_info) - db.test.insert_many([ - {'t': 'spam eggs and spam'}, - {'t': 'spam'}, - {'t': 'egg sausage and bacon'}]) + db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) # MongoDB 2.6 text search. Create 'score' field in projection. - cursor = db.test.find( - {'$text': {'$search': 'spam'}}, - {'score': {'$meta': 'textScore'}}) + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) + cursor.sort([("score", {"$meta": "textScore"})]) results = list(cursor) - self.assertTrue(results[0]['score'] >= results[1]['score']) + self.assertTrue(results[0]["score"] >= results[1]["score"]) db.test.drop_indexes() def test_index_2dsphere(self): db = self.db db.test.drop_indexes() - self.assertEqual("geo_2dsphere", - db.test.create_index([("geo", GEOSPHERE)])) + self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) for dummy, info in db.test.index_information().items(): - field, idx_type = info['key'][0] - if field == 'geo' and idx_type == '2dsphere': + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": break else: self.fail("2dsphere index not found.") - poly = {"type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} query = {"geo": {"$within": {"$geometry": poly}}} # This query will error without a 2dsphere index. @@ -497,12 +476,11 @@ def test_index_2dsphere(self): def test_index_hashed(self): db = self.db db.test.drop_indexes() - self.assertEqual("a_hashed", - db.test.create_index([("a", HASHED)])) + self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) for dummy, info in db.test.index_information().items(): - field, idx_type = info['key'][0] - if field == 'a' and idx_type == 'hashed': + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": break else: self.fail("hashed index not found.") @@ -512,25 +490,25 @@ def test_index_hashed(self): def test_index_sparse(self): db = self.db db.test.drop_indexes() - db.test.create_index([('key', ASCENDING)], sparse=True) - self.assertTrue(db.test.index_information()['key_1']['sparse']) + db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue(db.test.index_information()["key_1"]["sparse"]) def test_index_background(self): db = self.db db.test.drop_indexes() - db.test.create_index([('keya', ASCENDING)]) - db.test.create_index([('keyb', ASCENDING)], background=False) - db.test.create_index([('keyc', ASCENDING)], background=True) - self.assertFalse('background' in db.test.index_information()['keya_1']) - self.assertFalse(db.test.index_information()['keyb_1']['background']) - self.assertTrue(db.test.index_information()['keyc_1']['background']) + db.test.create_index([("keya", ASCENDING)]) + db.test.create_index([("keyb", ASCENDING)], background=False) + db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertFalse("background" in db.test.index_information()["keya_1"]) + self.assertFalse(db.test.index_information()["keyb_1"]["background"]) + self.assertTrue(db.test.index_information()["keyc_1"]["background"]) def _drop_dups_setup(self, db): - db.drop_collection('test') - db.test.insert_one({'i': 1}) - db.test.insert_one({'i': 2}) - db.test.insert_one({'i': 2}) # duplicate - db.test.insert_one({'i': 3}) + db.drop_collection("test") + db.test.insert_one({"i": 1}) + db.test.insert_one({"i": 2}) + db.test.insert_one({"i": 2}) # duplicate + db.test.insert_one({"i": 3}) def test_index_dont_drop_dups(self): # Try *not* dropping duplicates @@ -539,11 +517,8 @@ def test_index_dont_drop_dups(self): # There's a duplicate def test_create(): - db.test.create_index( - [('i', ASCENDING)], - unique=True, - dropDups=False - ) + db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + self.assertRaises(DuplicateKeyError, test_create) # Duplicate wasn't dropped @@ -554,12 +529,12 @@ def test_create(): # Get the plan dynamically because the explain format will change. def get_plan_stage(self, root, stage): - if root.get('stage') == stage: + if root.get("stage") == stage: return root elif "inputStage" in root: - return self.get_plan_stage(root['inputStage'], stage) + return self.get_plan_stage(root["inputStage"], stage) elif "inputStages" in root: - for i in root['inputStages']: + for i in root["inputStages"]: stage = self.get_plan_stage(i, stage) if stage: return stage @@ -567,8 +542,8 @@ def get_plan_stage(self, root, stage): # queryPlan (and slotBasedPlan) are new in 5.0. return self.get_plan_stage(root["queryPlan"], stage) elif "shards" in root: - for i in root['shards']: - stage = self.get_plan_stage(i['winningPlan'], stage) + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) if stage: return stage return {} @@ -578,52 +553,52 @@ def test_index_filter(self): db.drop_collection("test") # Test bad filter spec on create. - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression=5) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={"x": {"$asdasd": 3}}) - self.assertRaises(OperationFailure, db.test.create_index, "x", - partialFilterExpression={"$and": 5}) - - self.assertEqual("x_1", db.test.create_index( - [('x', ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}})) + self.assertRaises(OperationFailure, db.test.create_index, "x", partialFilterExpression=5) + self.assertRaises( + OperationFailure, + db.test.create_index, + "x", + partialFilterExpression={"x": {"$asdasd": 3}}, + ) + self.assertRaises( + OperationFailure, db.test.create_index, "x", partialFilterExpression={"$and": 5} + ) + + self.assertEqual( + "x_1", + db.test.create_index([("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}), + ) db.test.insert_one({"x": 5, "a": 2}) db.test.insert_one({"x": 6, "a": 1}) # Operations that use the partial index. explain = db.test.find({"x": 6, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) explain = db.test.find({"x": {"$gt": 1}, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) explain = db.test.find({"x": 6, "a": {"$lte": 1}}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'IXSCAN') - self.assertEqual("x_1", stage.get('indexName')) - self.assertTrue(stage.get('isPartial')) + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) # Operations that do not use the partial index. explain = db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) explain = db.test.find({"x": 6}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) # Test drop_indexes. db.test.drop_index("x_1") explain = db.test.find({"x": 6, "a": 1}).explain() - stage = self.get_plan_stage(explain['queryPlanner']['winningPlan'], - 'COLLSCAN') + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") self.assertNotEqual({}, stage) def test_field_selection(self): @@ -685,7 +660,7 @@ def test_options(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) result = db.test.options() - self.assertEqual(result, {"capped": True, 'size': 4096}) + self.assertEqual(result, {"capped": True, "size": 4096}) db.drop_collection("test") def test_insert_one(self): @@ -710,19 +685,16 @@ def test_insert_one(self): self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) self.assertEqual(2, db.test.count_documents({})) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertTrue(isinstance(result.inserted_id, ObjectId)) self.assertEqual(document["_id"], result.inserted_id) self.assertFalse(result.acknowledged) # The insert failed duplicate key... - wait_until(lambda: 2 == db.test.count_documents({}), - 'forcing duplicate key error') + wait_until(lambda: 2 == db.test.count_documents({}), "forcing duplicate key error") - document = RawBSONDocument( - encode({'_id': ObjectId(), 'foo': 'bar'})) + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) result = db.test.insert_one(document) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(result.inserted_id, None) @@ -740,7 +712,7 @@ def test_insert_many(self): _id = doc["_id"] self.assertTrue(isinstance(_id, ObjectId)) self.assertTrue(_id in result.inserted_ids) - self.assertEqual(1, db.test.count_documents({'_id': _id})) + self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [{"_id": i} for i in range(5)] @@ -755,15 +727,13 @@ def test_insert_many(self): self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) - docs = [RawBSONDocument(encode({"_id": i + 5})) - for i in range(5)] + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) self.assertTrue(isinstance(result.inserted_ids, list)) self.assertEqual([], result.inserted_ids) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) self.assertTrue(isinstance(result, InsertManyResult)) @@ -775,11 +745,11 @@ def test_insert_many_generator(self): coll.delete_many({}) def gen(): - yield {'a': 1, 'b': 1} - yield {'a': 1, 'b': 2} - yield {'a': 2, 'b': 3} - yield {'a': 3, 'b': 5} - yield {'a': 5, 'b': 8} + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} result = coll.insert_many(gen()) self.assertEqual(5, len(result.inserted_ids)) @@ -787,21 +757,17 @@ def gen(): def test_insert_many_invalid(self): db = self.db - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): db.test.insert_many({}) - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): db.test.insert_many([]) - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): db.test.insert_many(1) # type: ignore[arg-type] - with self.assertRaisesRegex( - TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({'_id': 2}))) # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) # type: ignore[arg-type] def test_delete_one(self): self.db.test.drop() @@ -822,13 +788,12 @@ def test_delete_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, self.db.test.count_documents({})) - db = self.db.client.get_database(self.db.name, - write_concern=WriteConcern(w=0)) + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_one({"z": 1}) self.assertTrue(isinstance(result, DeleteResult)) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until(lambda: 0 == db.test.count_documents({}), 'delete 1 documents') + wait_until(lambda: 0 == db.test.count_documents({}), "delete 1 documents") def test_delete_many(self): self.db.test.drop() @@ -844,25 +809,20 @@ def test_delete_many(self): self.assertTrue(result.acknowledged) self.assertEqual(0, self.db.test.count_documents({"x": 1})) - db = self.db.client.get_database(self.db.name, - write_concern=WriteConcern(w=0)) + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_many({"y": 1}) self.assertTrue(isinstance(result, DeleteResult)) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until( - lambda: 0 == db.test.count_documents({}), 'delete 2 documents') + wait_until(lambda: 0 == db.test.count_documents({}), "delete 2 documents") def test_command_document_too_large(self): - large = '*' * (client_context.max_bson_size + _COMMAND_OVERHEAD) + large = "*" * (client_context.max_bson_size + _COMMAND_OVERHEAD) coll = self.db.test - self.assertRaises( - DocumentTooLarge, coll.insert_one, {'data': large}) + self.assertRaises(DocumentTooLarge, coll.insert_one, {"data": large}) # update_one and update_many are the same - self.assertRaises( - DocumentTooLarge, coll.replace_one, {}, {'data': large}) - self.assertRaises( - DocumentTooLarge, coll.delete_one, {'data': large}) + self.assertRaises(DocumentTooLarge, coll.replace_one, {}, {"data": large}) + self.assertRaises(DocumentTooLarge, coll.delete_one, {"data": large}) def test_write_large_document(self): max_size = client_context.max_bson_size @@ -871,42 +831,38 @@ def test_write_large_document(self): half_str = "x" * half_size self.assertEqual(max_size, 16777216) - self.assertRaises(OperationFailure, self.db.test.insert_one, - {"foo": max_str}) - self.assertRaises(OperationFailure, self.db.test.replace_one, - {}, {"foo": max_str}, upsert=True) - self.assertRaises(OperationFailure, self.db.test.insert_many, - [{"x": 1}, {"foo": max_str}]) + self.assertRaises(OperationFailure, self.db.test.insert_one, {"foo": max_str}) + self.assertRaises( + OperationFailure, self.db.test.replace_one, {}, {"foo": max_str}, upsert=True + ) + self.assertRaises(OperationFailure, self.db.test.insert_many, [{"x": 1}, {"foo": max_str}]) self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) self.db.test.insert_one({"bar": "x"}) # Use w=0 here to test legacy doc size checking in all server versions unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) - self.assertRaises(DocumentTooLarge, unack_coll.replace_one, - {"bar": "x"}, {"bar": "x" * (max_size - 14)}) + self.assertRaises( + DocumentTooLarge, unack_coll.replace_one, {"bar": "x"}, {"bar": "x" * (max_size - 14)} + ) self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) def test_insert_bypass_document_validation(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$exists": True}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test insert_one - self.assertRaises(OperationFailure, db.test.insert_one, - {"_id": 1, "x": 100}) - result = db.test.insert_one({"_id": 1, "x": 100}, - bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.insert_one, {"_id": 1, "x": 100}) + result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(1, result.inserted_id) - result = db.test.insert_one({"_id":2, "a":0}) + result = db.test.insert_one({"_id": 2, "a": 0}) self.assertTrue(isinstance(result, InsertOneResult)) self.assertEqual(2, result.inserted_id) db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1}), - "find w:0 inserted document") + wait_until(lambda: db_w0.test.find_one({"y": 1}), "find w:0 inserted document") # Test insert_many docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] @@ -931,25 +887,25 @@ def test_insert_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) - self.assertRaises(OperationFailure, db_w0.test.insert_many, - [{"x": 1}, {"x": 2}], - bypass_document_validation=True) + self.assertRaises( + OperationFailure, + db_w0.test.insert_many, + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) def test_replace_bypass_document_validation(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$exists": True}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test replace_one db.test.insert_one({"a": 101}) - self.assertRaises(OperationFailure, db.test.replace_one, - {"a": 101}, {"y": 1}) + self.assertRaises(OperationFailure, db.test.replace_one, {"a": 101}, {"y": 1}) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"a": 101})) - db.test.replace_one({"a": 101}, {"y": 1}, - bypass_document_validation=True) + db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"a": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) db.test.replace_one({"y": 1}, {"a": 102}) @@ -958,123 +914,107 @@ def test_replace_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": 102})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.replace_one, - {"y": 1}, {"x": 101}) + self.assertRaises(OperationFailure, db.test.replace_one, {"y": 1}, {"x": 101}) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) - db.test.replace_one({"y": 1}, {"x": 101}, - bypass_document_validation=True) + db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"x": 101})) - db.test.replace_one({"x": 101}, {"a": 103}, - bypass_document_validation=False) + db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"a": 103})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - db_w0.test.replace_one({"y": 1}, {"x": 1}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"x": 1}), - "find w:0 replaced document") + db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") def test_update_bypass_document_validation(self): db = self.db db.test.drop() db.test.insert_one({"z": 5}) - db.command(SON([("collMod", "test"), - ("validator", {"z": {"$gte": 0}})])) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) + db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test update_one - self.assertRaises(OperationFailure, db.test.update_one, - {"z": 5}, {"$inc": {"z": -10}}) + self.assertRaises(OperationFailure, db.test.update_one, {"z": 5}, {"$inc": {"z": -10}}) self.assertEqual(0, db.test.count_documents({"z": -5})) self.assertEqual(1, db.test.count_documents({"z": 5})) - db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, - bypass_document_validation=True) + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) self.assertEqual(0, db.test.count_documents({"z": 5})) self.assertEqual(1, db.test.count_documents({"z": -5})) - db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, - bypass_document_validation=False) + db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"z": -5})) - db.test.insert_one({"z": -10}, - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_one, - {"z": -10}, {"$inc": {"z": 1}}) + db.test.insert_one({"z": -10}, bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_one, {"z": -10}, {"$inc": {"z": 1}}) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": -10})) - db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, - bypass_document_validation=True) + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) self.assertEqual(1, db.test.count_documents({"z": -9})) self.assertEqual(0, db.test.count_documents({"z": -10})) - db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, - bypass_document_validation=False) + db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": 0})) db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) - db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), - "find w:0 updated document") + db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), "find w:0 updated document") # Test update_many db.test.insert_many([{"z": i} for i in range(3, 101)]) - db.test.insert_one({"y": 0}, - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, {}, - {"$inc": {"z": -100}}) + db.test.insert_one({"y": 0}, bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": -100}}) self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100})) - db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}}, - bypass_document_validation=True) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) - db.test.update_many({"z": {"$gt": -50}}, {"$inc": {"z": 100}}, - bypass_document_validation=False) + db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}})) - db.test.insert_many([{"z": -i} for i in range(50)], - bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, - {}, {"$inc": {"z": 1}}) + db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": 1}}) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}})) - db.test.update_many({"z": {"$gte": 0}}, {"$inc": {"z": -100}}, - bypass_document_validation=True) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) self.assertEqual(150, db.test.count_documents({"z": {"$lte": 0}})) - db.test.update_many({"z": {"$lte": 0}}, {"$inc": {"z": 100}}, - bypass_document_validation=False) + db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) self.assertEqual(150, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) - db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, - bypass_document_validation=True) + db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) wait_until( - lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, - "find w:0 updated documents") + lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, "find w:0 updated documents" + ) def test_bypass_document_validation_bulk_write(self): db = self.db db.test.drop() db.create_collection("test", validator={"a": {"$gte": 0}}) - db_w0 = self.db.client.get_database( - self.db.name, write_concern=WriteConcern(w=0)) - - ops: list = [InsertOne({"a": -10}), - InsertOne({"a": -11}), - InsertOne({"a": -12}), - UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), - UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), - ReplaceOne({"a": {"$lte": -10}}, {"a": -1})] + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] db.test.bulk_write(ops, bypass_document_validation=True) self.assertEqual(3, db.test.count_documents({})) @@ -1086,22 +1026,22 @@ def test_bypass_document_validation_bulk_write(self): for op in ops: self.assertRaises(BulkWriteError, db.test.bulk_write, [op]) - self.assertRaises(OperationFailure, db_w0.test.bulk_write, ops, - bypass_document_validation=True) + self.assertRaises( + OperationFailure, db_w0.test.bulk_write, ops, bypass_document_validation=True + ) def test_find_by_default_dct(self): db = self.db - db.test.insert_one({'foo': 'bar'}) - dct = defaultdict(dict, [('foo', 'bar')]) # type: ignore[arg-type] + db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] self.assertIsNotNone(db.test.find_one(dct)) - self.assertEqual(dct, defaultdict(dict, [('foo', 'bar')])) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) def test_find_w_fields(self): db = self.db db.test.delete_many({}) - db.test.insert_one({"x": 1, "mike": "awesome", - "extra thing": "abcdefghijklmnopqrstuvwxyz"}) + db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) self.assertEqual(1, db.test.count_documents({})) doc = next(db.test.find({})) self.assertTrue("x" in doc) @@ -1130,9 +1070,7 @@ def test_fields_specifier_as_dict(self): db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) self.assertEqual([1, 2, 3], db.test.find_one()["x"]) - self.assertEqual([2, 3], - db.test.find_one( - projection={"x": {"$slice": -2}})["x"]) + self.assertEqual([2, 3], db.test.find_one(projection={"x": {"$slice": -2}})["x"]) self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) @@ -1146,14 +1084,10 @@ def test_find_w_regex(self): db.test.insert_one({"x": "hello_test"}) self.assertEqual(len(list(db.test.find())), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello.*")}))), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("ello")}))), 4) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello$")}))), 0) - self.assertEqual(len(list(db.test.find({"x": - re.compile("^hello_mi.*$")}))), 2) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello.*")}))), 4) + self.assertEqual(len(list(db.test.find({"x": re.compile("ello")}))), 4) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello$")}))), 0) + self.assertEqual(len(list(db.test.find({"x": re.compile("^hello_mi.*$")}))), 2) def test_id_can_be_anything(self): db = self.db @@ -1217,83 +1151,74 @@ def test_write_error_text_handling(self): db.test.create_index("text", unique=True) # Test workaround for SERVER-24007 - data = (b'a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83' - b'\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83') + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) text = utf_8_decode(data, None, True) db.test.insert_one({"text": text}) # Should raise DuplicateKeyError, not InvalidBSON - self.assertRaises(DuplicateKeyError, - db.test.insert_one, - {"text": text}) + self.assertRaises(DuplicateKeyError, db.test.insert_one, {"text": text}) - self.assertRaises(DuplicateKeyError, - db.test.replace_one, - {"_id": ObjectId()}, - {"text": text}, - upsert=True) + self.assertRaises( + DuplicateKeyError, db.test.replace_one, {"_id": ObjectId()}, {"text": text}, upsert=True + ) # Should raise BulkWriteError, not InvalidBSON - self.assertRaises(BulkWriteError, - db.test.insert_many, - [{"text": text}]) + self.assertRaises(BulkWriteError, db.test.insert_many, [{"text": text}]) def test_write_error_unicode(self): coll = self.db.test self.addCleanup(coll.drop) - coll.create_index('a', unique=True) - coll.insert_one({'a': 'unicode \U0001f40d'}) - with self.assertRaisesRegex( - DuplicateKeyError, - 'E11000 duplicate key error') as ctx: - coll.insert_one({'a': 'unicode \U0001f40d'}) + coll.create_index("a", unique=True) + coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + coll.insert_one({"a": "unicode \U0001f40d"}) # Once more for good measure. - self.assertIn('E11000 duplicate key error', - str(ctx.exception)) + self.assertIn("E11000 duplicate key error", str(ctx.exception)) def test_wtimeout(self): # Ensure setting wtimeout doesn't disable write concern altogether. # See SERVER-12596. collection = self.db.test collection.drop() - collection.insert_one({'_id': 1}) + collection.insert_one({"_id": 1}) - coll = collection.with_options( - write_concern=WriteConcern(w=1, wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1}) + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) - coll = collection.with_options( - write_concern=WriteConcern(wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {'_id': 1}) + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) def test_error_code(self): try: @@ -1319,16 +1244,13 @@ def test_index_on_subfield(self): db.test.insert_one({"hello": {"a": 4, "b": 5}}) db.test.insert_one({"hello": {"a": 7, "b": 2}}) - self.assertRaises(DuplicateKeyError, - db.test.insert_one, - {"hello": {"a": 4, "b": 10}}) + self.assertRaises(DuplicateKeyError, db.test.insert_one, {"hello": {"a": 4, "b": 10}}) def test_replace_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.replace_one({}, {"$set": {"x": 1}})) + self.assertRaises(ValueError, lambda: db.test.replace_one({}, {"$set": {"x": 1}})) id1 = db.test.insert_one({"x": 1}).inserted_id result = db.test.replace_one({"x": 1}, {"y": 1}) @@ -1360,8 +1282,7 @@ def test_replace_one(self): self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 2})) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.replace_one({"x": 0}, {"y": 0}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1373,8 +1294,7 @@ def test_update_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.update_one({}, {"x": 1})) + self.assertRaises(ValueError, lambda: db.test.update_one({}, {"x": 1})) id1 = db.test.insert_one({"x": 5}).inserted_id result = db.test.update_one({}, {"$inc": {"x": 1}}) @@ -1402,8 +1322,7 @@ def test_update_one(self): self.assertTrue(isinstance(result.upserted_id, ObjectId)) self.assertTrue(result.acknowledged) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1415,8 +1334,7 @@ def test_update_many(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, - lambda: db.test.update_many({}, {"x": 1})) + self.assertRaises(ValueError, lambda: db.test.update_many({}, {"x": 1})) db.test.insert_one({"x": 4, "y": 3}) db.test.insert_one({"x": 5, "y": 5}) @@ -1445,8 +1363,7 @@ def test_update_many(self): self.assertTrue(isinstance(result.upserted_id, ObjectId)) self.assertTrue(result.acknowledged) - db = db.client.get_database(db.name, - write_concern=WriteConcern(w=0)) + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) self.assertTrue(isinstance(result, UpdateResult)) self.assertRaises(InvalidOperation, lambda: result.matched_count) @@ -1459,28 +1376,28 @@ def test_update_check_keys(self): self.assertTrue(self.db.test.insert_one({"hello": "world"})) # Modify shouldn't check keys... - self.assertTrue(self.db.test.update_one({"hello": "world"}, - {"$set": {"foo.bar": "baz"}}, - upsert=True)) + self.assertTrue( + self.db.test.update_one({"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True) + ) # I know this seems like testing the server but I'd like to be notified # by CI if the server's behavior changes here. doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) - self.assertRaises(OperationFailure, self.db.test.update_one, - {"hello": "world"}, doc, upsert=True) + self.assertRaises( + OperationFailure, self.db.test.update_one, {"hello": "world"}, doc, upsert=True + ) # This is going to cause keys to be checked and raise InvalidDocument. # That's OK assuming the server's behavior in the previous assert # doesn't change. If the behavior changes checking the first key for # '$' in update won't be good enough anymore. doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) - self.assertRaises(OperationFailure, self.db.test.replace_one, - {"hello": "world"}, doc, upsert=True) + self.assertRaises( + OperationFailure, self.db.test.replace_one, {"hello": "world"}, doc, upsert=True + ) # Replace with empty document - self.assertNotEqual(0, - self.db.test.replace_one( - {"hello": "world"}, {}).matched_count) + self.assertNotEqual(0, self.db.test.replace_one({"hello": "world"}, {}).matched_count) def test_acknowledged_delete(self): db = self.db @@ -1514,10 +1431,9 @@ def test_count_documents(self): self.assertEqual(db.test.count_documents({}), 0) db.test.insert_many([{}, {}]) self.assertEqual(db.test.count_documents({}), 2) - db.test.insert_many([{'foo': 'bar'}, {'foo': 'baz'}]) - self.assertEqual(db.test.count_documents({'foo': 'bar'}), 1) - self.assertEqual( - db.test.count_documents({'foo': re.compile(r'ba.*')}), 2) + db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) def test_estimated_document_count(self): db = self.db @@ -1533,39 +1449,37 @@ def test_estimated_document_count(self): def test_aggregate(self): db = self.db db.drop_collection("test") - db.test.insert_one({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} result = db.test.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) - self.assertEqual([{'foo': [1, 2]}], list(result)) + self.assertEqual([{"foo": [1, 2]}], list(result)) # Test write concern. with self.write_concern_collection() as coll: - coll.aggregate([{'$out': 'output-collection'}]) + coll.aggregate([{"$out": "output-collection"}]) def test_aggregate_raw_bson(self): db = self.db db.drop_collection("test") - db.test.insert_one({'foo': [1, 2]}) + db.test.insert_one({"foo": [1, 2]}) self.assertRaises(TypeError, db.test.aggregate, "wow") pipeline = {"$project": {"_id": False, "foo": True}} - coll = db.get_collection( - 'test', - codec_options=CodecOptions(document_class=RawBSONDocument)) + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) result = coll.aggregate([pipeline]) self.assertTrue(isinstance(result, CommandCursor)) first_result = next(result) self.assertIsInstance(first_result, RawBSONDocument) - self.assertEqual([1, 2], list(first_result['foo'])) + self.assertEqual([1, 2], list(first_result["foo"])) def test_aggregation_cursor_validation(self): db = self.db - projection = {'$project': {'_id': '$_id'}} + projection = {"$project": {"_id": "$_id"}} cursor = db.test.aggregate([projection], cursor={}) self.assertTrue(isinstance(cursor, CommandCursor)) @@ -1576,20 +1490,17 @@ def test_aggregation_cursor(self): db = self.client.get_database( db.name, read_preference=ReadPreference.SECONDARY, - write_concern=WriteConcern(w=self.w)) + write_concern=WriteConcern(w=self.w), + ) for collection_size in (10, 1000): db.drop_collection("test") - db.test.insert_many([{'_id': i} for i in range(collection_size)]) + db.test.insert_many([{"_id": i} for i in range(collection_size)]) expected_sum = sum(range(collection_size)) # Use batchSize to ensure multiple getMore messages - cursor = db.test.aggregate( - [{'$project': {'_id': '$_id'}}], - batchSize=5) + cursor = db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) - self.assertEqual( - expected_sum, - sum(doc['_id'] for doc in cursor)) + self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor)) # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) @@ -1607,7 +1518,7 @@ def test_aggregation_cursor_alive(self): self.db.test.delete_many({}) self.db.test.insert_many([{} for _ in range(3)]) self.addCleanup(self.db.test.delete_many, {}) - cursor = self.db.test.aggregate(pipeline=[], cursor={'batchSize': 2}) + cursor = self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) n = 0 while True: cursor.next() @@ -1621,15 +1532,14 @@ def test_aggregation_cursor_alive(self): def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") - db.test_large_limit.create_index([('x', 1)]) + db.test_large_limit.create_index([("x", 1)]) my_str = "mongomongo" * 1000 - db.test_large_limit.insert_many( - {"x": i, "y": my_str} for i in range(2000)) + db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) i = 0 y = 0 - for doc in db.test_large_limit.find(limit=1900).sort([('x', 1)]): + for doc in db.test_large_limit.find(limit=1900).sort([("x", 1)]): i += 1 y += doc["x"] @@ -1683,7 +1593,7 @@ def test_rename(self): db.foo.rename("test", dropTarget=True) with self.write_concern_collection() as coll: - coll.rename('foo') + coll.rename("foo") @no_type_check def test_find_one(self): @@ -1696,8 +1606,7 @@ def test_find_one(self): self.assertEqual(db.test.find_one(_id), db.test.find_one()) self.assertEqual(db.test.find_one(None), db.test.find_one()) self.assertEqual(db.test.find_one({}), db.test.find_one()) - self.assertEqual(db.test.find_one({"hello": "world"}), - db.test.find_one()) + self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) self.assertTrue("hello" in db.test.find_one(projection=["hello"])) self.assertTrue("hello" not in db.test.find_one(projection=["foo"])) @@ -1711,8 +1620,7 @@ def test_find_one(self): self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) - self.assertEqual(["_id"], list(db.test.find_one(projection={'_id': - True}))) + self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) self.assertTrue("hello" in list(db.test.find_one(projection={}))) self.assertTrue("hello" in list(db.test.find_one(projection=[]))) @@ -1765,16 +1673,13 @@ def test_cursor_timeout(self): def test_exhaust(self): if is_mongos(self.db.client): - self.assertRaises(InvalidOperation, - self.db.test.find, - cursor_type=CursorType.EXHAUST) + self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) return # Limit is incompatible with exhaust. - self.assertRaises(InvalidOperation, - self.db.test.find, - cursor_type=CursorType.EXHAUST, - limit=5) + self.assertRaises( + InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST, limit=5 + ) cur = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertRaises(InvalidOperation, cur.limit, 5) cur = self.db.test.find(limit=5) @@ -1785,7 +1690,7 @@ def test_exhaust(self): self.db.drop_collection("test") # Insert enough documents to require more than one batch - self.db.test.insert_many([{'i': i} for i in range(150)]) + self.db.test.insert_many([{"i": i} for i in range(150)]) client = rs_or_single_client(maxPoolSize=1) self.addCleanup(client.close) @@ -1807,8 +1712,7 @@ def test_exhaust(self): # If the Cursor instance is discarded before being completely iterated # and the socket has pending data (more_to_come=True) we have to close # and discard the socket. - cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, - batch_size=2) + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) if client_context.version.at_least(4, 2): # On 4.2+ we use OP_MSG which only sets more_to_come=True after the # first getMore. @@ -1817,12 +1721,12 @@ def test_exhaust(self): else: next(cur) self.assertEqual(0, len(pool.sockets)) - if sys.platform.startswith('java') or 'PyPy' in sys.version: + if sys.platform.startswith("java") or "PyPy" in sys.version: # Don't wait for GC or use gc.collect(), it's unreliable. cur.close() cur = None # Wait until the background thread returns the socket. - wait_until(lambda: pool.active_sockets == 0, 'return socket') + wait_until(lambda: pool.active_sockets == 0, "return socket") # The socket should be discarded. self.assertEqual(0, len(pool.sockets)) @@ -1837,11 +1741,11 @@ def test_distinct(self): self.assertEqual([1, 2, 3], distinct) - distinct = test.find({'a': {'$gt': 1}}).distinct("a") + distinct = test.find({"a": {"$gt": 1}}).distinct("a") distinct.sort() self.assertEqual([2, 3], distinct) - distinct = test.distinct('a', {'a': {'$gt': 1}}) + distinct = test.distinct("a", {"a": {"$gt": 1}}) distinct.sort() self.assertEqual([2, 3], distinct) @@ -1862,19 +1766,15 @@ def test_query_on_query_field(self): self.db.test.insert_one({"query": "foo"}) self.db.test.insert_one({"bar": "foo"}) - self.assertEqual(1, - self.db.test.count_documents({"query": {"$ne": None}})) - self.assertEqual(1, - len(list(self.db.test.find({"query": {"$ne": None}}))) - ) + self.assertEqual(1, self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len(list(self.db.test.find({"query": {"$ne": None}})))) def test_min_query(self): self.db.drop_collection("test") self.db.test.insert_many([{"x": 1}, {"x": 2}]) self.db.test.create_index("x") - cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, - hint="x_1") + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") docs = list(cursor) self.assertEqual(1, len(docs)) @@ -1891,24 +1791,30 @@ def test_numerous_inserts(self): def test_insert_many_large_batch(self): # Tests legacy insert. db = self.client.test_insert_large_batch - self.addCleanup(self.client.drop_database, 'test_insert_large_batch') + self.addCleanup(self.client.drop_database, "test_insert_large_batch") max_bson_size = client_context.max_bson_size # Write commands are limited to 16MB + 16k per batch - big_string = 'x' * int(max_bson_size / 2) + big_string = "x" * int(max_bson_size / 2) # Batch insert that requires 2 batches. - successful_insert = [{'x': big_string}, {'x': big_string}, - {'x': big_string}, {'x': big_string}] + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] db.collection_0.insert_many(successful_insert) self.assertEqual(4, db.collection_0.count_documents({})) db.collection_0.drop() # Test that inserts fail after first error. - insert_second_fails = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id2', 'x': big_string}] + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] with self.assertRaises(BulkWriteError): db.collection_1.insert_many(insert_second_fails) @@ -1918,25 +1824,27 @@ def test_insert_many_large_batch(self): db.collection_1.drop() # 2 batches, 2nd insert fails, unacknowledged, ordered. - unack_coll = db.collection_2.with_options( - write_concern=WriteConcern(w=0)) + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) unack_coll.insert_many(insert_second_fails) - wait_until(lambda: 1 == db.collection_2.count_documents({}), - 'insert 1 document', timeout=60) + wait_until( + lambda: 1 == db.collection_2.count_documents({}), "insert 1 document", timeout=60 + ) db.collection_2.drop() # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are # dupes. Acknowledged, unordered. - insert_two_failures = [{'_id': 'id0', 'x': big_string}, - {'_id': 'id0', 'x': big_string}, - {'_id': 'id1', 'x': big_string}, - {'_id': 'id1', 'x': big_string}] + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] with self.assertRaises(OperationFailure) as context: db.collection_3.insert_many(insert_two_failures, ordered=False) - self.assertIn('id1', str(context.exception)) + self.assertIn("id1", str(context.exception)) # Only the first and third documents should be inserted. self.assertEqual(2, db.collection_3.count_documents({})) @@ -1944,13 +1852,13 @@ def test_insert_many_large_batch(self): db.collection_3.drop() # 2 batches, 2 errors, unacknowledged, unordered. - unack_coll = db.collection_4.with_options( - write_concern=WriteConcern(w=0)) + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) unack_coll.insert_many(insert_two_failures, ordered=False) # Only the first and third documents are inserted. - wait_until(lambda: 2 == db.collection_4.count_documents({}), - 'insert 2 documents', timeout=60) + wait_until( + lambda: 2 == db.collection_4.count_documents({}), "insert 2 documents", timeout=60 + ) db.collection_4.drop() @@ -1978,224 +1886,246 @@ class BadGetAttr(dict): def __getattr__(self, name): pass - bad = BadGetAttr([('foo', 'bar')]) - c.insert_one({'bad': bad}) - self.assertEqual('bar', c.find_one()['bad']['foo']) # type: ignore + bad = BadGetAttr([("foo", "bar")]) + c.insert_one({"bad": bad}) + self.assertEqual("bar", c.find_one()["bad"]["foo"]) # type: ignore def test_array_filters_validation(self): # array_filters must be a list. c = self.db.test with self.assertRaises(TypeError): - c.update_one({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] + c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.update_many({}, {'$set': {'a': 1}}, array_filters={} ) # type: ignore[arg-type] + c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {'$set': {'a': 1}}, array_filters={}) # type: ignore[arg-type] + c.find_one_and_update({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) with self.assertRaises(ConfigurationError): - c_w0.update_one({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) with self.assertRaises(ConfigurationError): - c_w0.update_many({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) with self.assertRaises(ConfigurationError): - c_w0.find_one_and_update({}, {'$set': {'y.$[i].b': 5}}, - array_filters=[{'i.b': 1}]) + c_w0.find_one_and_update({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) def test_find_one_and(self): c = self.db.test c.drop() - c.insert_one({'_id': 1, 'i': 1}) - - self.assertEqual({'_id': 1, 'i': 1}, - c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_delete({'_id': 1})) - self.assertEqual(None, c.find_one({'_id': 1})) - - self.assertEqual(None, - c.find_one_and_update({'_id': 1}, {'$inc': {'i': 1}})) - self.assertEqual({'_id': 1, 'i': 1}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER, - upsert=True)) - self.assertEqual({'_id': 1, 'i': 2}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - return_document=ReturnDocument.AFTER)) - - self.assertEqual({'_id': 1, 'i': 3}, - c.find_one_and_replace( - {'_id': 1}, {'i': 3, 'j': 1}, - projection=['i'], - return_document=ReturnDocument.AFTER)) - self.assertEqual({'i': 4}, - c.find_one_and_update( - {'_id': 1}, {'$inc': {'i': 1}}, - projection={'i': 1, '_id': 0}, - return_document=ReturnDocument.AFTER)) + c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual({"_id": 1, "i": 1}, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, c.find_one({"_id": 1})) + + self.assertEqual(None, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) c.drop() for j in range(5): - c.insert_one({'j': j, 'i': 0}) + c.insert_one({"j": j, "i": 0}) - sort = [('j', DESCENDING)] - self.assertEqual(4, c.find_one_and_update({}, - {'$inc': {'i': 1}}, - sort=sort)['j']) + sort = [("j", DESCENDING)] + self.assertEqual(4, c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort)["j"]) def test_find_one_and_write_concern(self): listener = EventListener() db = single_client(event_listeners=[listener])[self.db.name] # non-default WriteConcern. - c_w0 = db.get_collection( - 'test', write_concern=WriteConcern(w=0)) + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. - c_default = db.get_collection('test', write_concern=WriteConcern()) + c_default = db.get_collection("test", write_concern=WriteConcern()) results = listener.results # Authenticate the client and throw out auth commands from the listener. - db.command('ping') + db.command("ping") results.clear() - c_w0.find_one_and_update( - {'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) + c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) results.clear() - c_w0.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) + c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) results.clear() - c_w0.find_one_and_delete({'_id': 1}) - self.assertEqual( - {'w': 0}, results['started'][0].command['writeConcern']) + c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) results.clear() # Test write concern errors. if client_context.is_rs: c_wc_error = db.get_collection( - 'test', - write_concern=WriteConcern( - w=len(client_context.nodes) + 1)) + "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_update, - {'_id': 1}, {'$set': {'foo': 'bar'}}) + {"_id": 1}, + {"$set": {"foo": "bar"}}, + ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_replace, - {'w': 0}, results['started'][0].command['writeConcern']) + {"w": 0}, + results["started"][0].command["writeConcern"], + ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_delete, - {'w': 0}, results['started'][0].command['writeConcern']) + {"w": 0}, + results["started"][0].command["writeConcern"], + ) results.clear() - c_default.find_one_and_update({'_id': 1}, {'$set': {'foo': 'bar'}}) - self.assertNotIn('writeConcern', results['started'][0].command) + c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", results["started"][0].command) results.clear() - c_default.find_one_and_replace({'_id': 1}, {'foo': 'bar'}) - self.assertNotIn('writeConcern', results['started'][0].command) + c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", results["started"][0].command) results.clear() - c_default.find_one_and_delete({'_id': 1}) - self.assertNotIn('writeConcern', results['started'][0].command) + c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", results["started"][0].command) results.clear() def test_find_with_nested(self): c = self.db.test c.drop() - c.insert_many([{'i': i} for i in range(5)]) # [0, 1, 2, 3, 4] + c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] self.assertEqual( [2], - [i['i'] for i in c.find({ - '$and': [ + [ + i["i"] + for i in c.find( { - # This clause gives us [1,2,4] - '$or': [ - {'i': {'$lte': 2}}, - {'i': {'$gt': 3}}, - ], - }, - { - # This clause gives us [2,3] - '$or': [ - {'i': 2}, - {'i': 3}, + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, ] - }, - ] - })] + } + ) + ], ) self.assertEqual( [0, 1, 2], - [i['i'] for i in c.find({ - '$or': [ - { - # This clause gives us [2] - '$and': [ - {'i': {'$gte': 2}}, - {'i': {'$lt': 3}}, - ], - }, + [ + i["i"] + for i in c.find( { - # This clause gives us [0,1] - '$and': [ - {'i': {'$gt': -100}}, - {'i': {'$lt': 2}}, + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, ] - }, - ] - })] + } + ) + ], ) def test_find_regex(self): c = self.db.test c.drop() - c.insert_one({'r': re.compile('.*')}) + c.insert_one({"r": re.compile(".*")}) - self.assertTrue(isinstance(c.find_one()['r'], Regex)) # type: ignore + self.assertTrue(isinstance(c.find_one()["r"], Regex)) # type: ignore for doc in c.find(): - self.assertTrue(isinstance(doc['r'], Regex)) + self.assertTrue(isinstance(doc["r"], Regex)) def test_find_command_generation(self): - cmd = _gen_find_command('coll', {'$query': {'foo': 1}, '$dumb': 2}, - None, 0, 0, 0, None, DEFAULT_READ_CONCERN, - None, None) + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) self.assertEqual( - cmd.to_dict(), - SON([('find', 'coll'), - ('$dumb', 2), - ('filter', {'foo': 1})]).to_dict()) + cmd.to_dict(), SON([("find", "coll"), ("$dumb", 2), ("filter", {"foo": 1})]).to_dict() + ) def test_bool(self): with self.assertRaises(NotImplementedError): - bool(Collection(self.db, 'test')) + bool(Collection(self.db, "test")) @client_context.require_version_min(5, 0, 0) def test_helpers_with_let(self): c = self.db.test - helpers = [(c.delete_many, ({}, {})), (c.delete_one, ({}, {})), - (c.find, ({})), (c.update_many, ({}, {'$inc': {'x': 3}})), - (c.update_one, ({}, {'$inc': {'x': 3}})), - (c.find_one_and_delete, ({}, {})), - (c.find_one_and_replace, ({}, {})), - (c.aggregate, ([], {}))] + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (c.find, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([], {})), + ] for let in [10, "str"]: for helper, args in helpers: - with self.assertRaisesRegex(TypeError, - "let must be an instance of dict"): + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): helper(*args, let=let) # type: ignore for helper, args in helpers: helper(*args, let={}) # type: ignore diff --git a/test/test_collection_management.py b/test/test_collection_management.py index 342e612583..c5e29eda8a 100644 --- a/test/test_collection_management.py +++ b/test/test_collection_management.py @@ -20,12 +20,10 @@ sys.path[0:0] = [""] from test import unittest - from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'collection_management') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "collection_management") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index a05dbd9668..ed3d516f97 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -20,26 +20,28 @@ sys.path[0:0] = [""] -import pymongo +from test import client_context, unittest +from test.utils import ( + EventListener, + parse_read_preference, + rs_or_single_client, + wait_until, +) -from pymongo import MongoClient +import pymongo from bson import json_util +from pymongo import MongoClient from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern -from test import unittest, client_context -from test.utils import (rs_or_single_client, wait_until, EventListener, - parse_read_preference) # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'command_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() class TestAllScenarios(unittest.TestCase): @@ -61,9 +63,9 @@ def tearDown(self): def format_actual_results(results): - started = results['started'] - succeeded = results['succeeded'] - failed = results['failed'] + started = results["started"] + succeeded = results["succeeded"] + failed = results["failed"] msg = "\nStarted: %r" % (started[0].command if len(started) else None,) msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,) msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,) @@ -72,51 +74,51 @@ def format_actual_results(results): def create_test(scenario_def, test): def run_scenario(self): - dbname = scenario_def['database_name'] - collname = scenario_def['collection_name'] + dbname = scenario_def["database_name"] + collname = scenario_def["collection_name"] coll = self.client[dbname][collname] coll.drop() - coll.insert_many(scenario_def['data']) + coll.insert_many(scenario_def["data"]) self.listener.results.clear() - name = camel_to_snake(test['operation']['name']) - if 'read_preference' in test['operation']: - coll = coll.with_options(read_preference=parse_read_preference( - test['operation']['read_preference'])) - if 'collectionOptions' in test['operation']: - colloptions = test['operation']['collectionOptions'] - if 'writeConcern' in colloptions: - concern = colloptions['writeConcern'] - coll = coll.with_options( - write_concern=WriteConcern(**concern)) - - test_args = test['operation']['arguments'] - if 'options' in test_args: - options = test_args.pop('options') + name = camel_to_snake(test["operation"]["name"]) + if "read_preference" in test["operation"]: + coll = coll.with_options( + read_preference=parse_read_preference(test["operation"]["read_preference"]) + ) + if "collectionOptions" in test["operation"]: + colloptions = test["operation"]["collectionOptions"] + if "writeConcern" in colloptions: + concern = colloptions["writeConcern"] + coll = coll.with_options(write_concern=WriteConcern(**concern)) + + test_args = test["operation"]["arguments"] + if "options" in test_args: + options = test_args.pop("options") test_args.update(options) args = {} for arg in test_args: args[camel_to_snake(arg)] = test_args[arg] - if name == 'count': - self.skipTest('PyMongo does not support count') - elif name == 'bulk_write': + if name == "count": + self.skipTest("PyMongo does not support count") + elif name == "bulk_write": bulk_args = [] - for request in args['requests']: - opname = request['name'] + for request in args["requests"]: + opname = request["name"] klass = opname[0:1].upper() + opname[1:] - arg = getattr(pymongo, klass)(**request['arguments']) + arg = getattr(pymongo, klass)(**request["arguments"]) bulk_args.append(arg) try: - coll.bulk_write(bulk_args, args.get('ordered', True)) + coll.bulk_write(bulk_args, args.get("ordered", True)) except OperationFailure: pass - elif name == 'find': - if 'sort' in args: - args['sort'] = list(args['sort'].items()) - if 'hint' in args: - args['hint'] = list(args['hint'].items()) - for arg in 'skip', 'limit': + elif name == "find": + if "sort" in args: + args["sort"] = list(args["sort"].items()) + if "hint" in args: + args["hint"] = list(args["hint"].items()) + for arg in "skip", "limit": if arg in args: args[arg] = int(args[arg]) try: @@ -131,73 +133,73 @@ def run_scenario(self): pass res = self.listener.results - for expectation in test['expectations']: + for expectation in test["expectations"]: event_type = next(iter(expectation)) if event_type == "command_started_event": - event = res['started'][0] if len(res['started']) else None + event = res["started"][0] if len(res["started"]) else None if event is not None: # The tests substitute 42 for any number other than 0. - if (event.command_name == 'getMore' - and event.command['getMore']): - event.command['getMore'] = 42 - elif event.command_name == 'killCursors': - event.command['cursors'] = [42] - elif event.command_name == 'update': + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = 42 + elif event.command_name == "killCursors": + event.command["cursors"] = [42] + elif event.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into # expectations. - updates = expectation[event_type]['command'][ - 'updates'] + updates = expectation[event_type]["command"]["updates"] for update in updates: - update.setdefault('upsert', False) - update.setdefault('multi', False) + update.setdefault("upsert", False) + update.setdefault("multi", False) elif event_type == "command_succeeded_event": - event = ( - res['succeeded'].pop(0) if len(res['succeeded']) else None) + event = res["succeeded"].pop(0) if len(res["succeeded"]) else None if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, # and "" for any error message. - if 'writeErrors' in reply: - for doc in reply['writeErrors']: + if "writeErrors" in reply: + for doc in reply["writeErrors"]: # Remove any new fields the server adds. The tests # only have index, code, and errmsg. - diff = set(doc) - set(['index', 'code', 'errmsg']) + diff = set(doc) - set(["index", "code", "errmsg"]) for field in diff: doc.pop(field) - doc['code'] = 42 - doc['errmsg'] = "" - elif 'cursor' in reply: - if reply['cursor']['id']: - reply['cursor']['id'] = 42 - elif event.command_name == 'killCursors': + doc["code"] = 42 + doc["errmsg"] = "" + elif "cursor" in reply: + if reply["cursor"]["id"]: + reply["cursor"]["id"] = 42 + elif event.command_name == "killCursors": # Make the tests continue to pass when the killCursors # command is actually in use. - if 'cursorsKilled' in reply: - reply.pop('cursorsKilled') - reply['cursorsUnknown'] = [42] + if "cursorsKilled" in reply: + reply.pop("cursorsKilled") + reply["cursorsUnknown"] = [42] # Found succeeded event. Pop related started event. - res['started'].pop(0) + res["started"].pop(0) elif event_type == "command_failed_event": - event = res['failed'].pop(0) if len(res['failed']) else None + event = res["failed"].pop(0) if len(res["failed"]) else None if event is not None: # Found failed event. Pop related started event. - res['started'].pop(0) + res["started"].pop(0) else: self.fail("Unknown event type") if event is None: - event_name = event_type.split('_')[1] + event_name = event_type.split("_")[1] self.fail( "Expected %s event for %s command. Actual " - "results:%s" % ( + "results:%s" + % ( event_name, - expectation[event_type]['command_name'], - format_actual_results(res))) + expectation[event_type]["command_name"], + format_actual_results(res), + ) + ) for attr, expected in expectation[event_type].items(): - if 'options' in expected: - options = expected.pop('options') + if "options" in expected: + options = expected.pop("options") expected.update(options) actual = getattr(event, attr) if isinstance(expected, dict): @@ -210,35 +212,33 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, 'legacy')): + for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): dirname = os.path.split(dirpath)[-1] for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) - assert bool(scenario_def.get('tests')), "tests cannot be empty" + assert bool(scenario_def.get("tests")), "tests cannot be empty" # Construct test from scenario. - for test in scenario_def['tests']: + for test in scenario_def["tests"]: new_test = create_test(scenario_def, test) if "ignore_if_server_version_greater_than" in test: version = test["ignore_if_server_version_greater_than"] - ver = tuple(int(elt) for elt in version.split('.')) - new_test = client_context.require_version_max(*ver)( - new_test) + ver = tuple(int(elt) for elt in version.split(".")) + new_test = client_context.require_version_max(*ver)(new_test) if "ignore_if_server_version_less_than" in test: version = test["ignore_if_server_version_less_than"] - ver = tuple(int(elt) for elt in version.split('.')) - new_test = client_context.require_version_min(*ver)( - new_test) + ver = tuple(int(elt) for elt in version.split(".")) + new_test = client_context.require_version_min(*ver)(new_test) if "ignore_if_topology_type" in test: types = set(test["ignore_if_topology_type"]) if "sharded" in types: - new_test = client_context.require_no_mongos(None)( - new_test) + new_test = client_context.require_no_mongos(None)(new_test) - test_name = 'test_%s_%s_%s' % ( + test_name = "test_%s_%s_%s" % ( dirname, os.path.splitext(filename)[0], - str(test['description'].replace(" ", "_"))) + str(test["description"].replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_command_monitoring_unified.py b/test/test_command_monitoring_unified.py index 9390c9fec6..46e1e4724c 100644 --- a/test/test_command_monitoring_unified.py +++ b/test/test_command_monitoring_unified.py @@ -22,16 +22,16 @@ from test import unittest from test.unified_format import generate_test_classes - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'command_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'unified'), - module=__name__,)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) if __name__ == "__main__": diff --git a/test/test_common.py b/test/test_common.py index 7d7a26c278..ff50878ea1 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -19,13 +19,14 @@ sys.path[0:0] = [""] -from bson.binary import Binary, PYTHON_LEGACY, STANDARD, UuidRepresentation +from test import IntegrationTest, client_context, unittest +from test.utils import connected, rs_or_single_client, single_client + +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import connected, rs_or_single_client, single_client @client_context.require_connection @@ -34,81 +35,79 @@ def setUpModule(): class TestCommon(IntegrationTest): - def test_uuid_representation(self): coll = self.db.uuid coll.drop() # Test property - self.assertEqual(UuidRepresentation.UNSPECIFIED, - coll.codec_options.uuid_representation) + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) # Test basic query uu = uuid.uuid4() # Insert as binary subtype 3 - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) legacy_opts = coll.codec_options - coll.insert_one({'uu': uu}) - self.assertEqual(uu, coll.find_one({'uu': uu})['uu']) # type: ignore - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) + coll.insert_one({"uu": uu}) + self.assertEqual(uu, coll.find_one({"uu": uu})["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) - self.assertEqual(None, coll.find_one({'uu': uu})) + self.assertEqual(None, coll.find_one({"uu": uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uul, coll.find_one({'uu': uul})['uu']) # type: ignore + self.assertEqual(uul, coll.find_one({"uu": uul})["uu"]) # type: ignore # Test count_documents - self.assertEqual(0, coll.count_documents({'uu': uu})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.count_documents({'uu': uu})) + self.assertEqual(0, coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.count_documents({"uu": uu})) # Test delete - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - coll.delete_one({'uu': uu}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.delete_one({"uu": uu}) self.assertEqual(1, coll.count_documents({})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - coll.delete_one({'uu': uu}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + coll.delete_one({"uu": uu}) self.assertEqual(0, coll.count_documents({})) # Test update_one - coll.insert_one({'_id': uu, 'i': 1}) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({'_id': uu})['i']) # type: ignore - coll.update_one({'_id': uu}, {'$set': {'i': 2}}) - self.assertEqual(2, coll.find_one({'_id': uu})['i']) # type: ignore + coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, coll.find_one({"_id": uu})["i"]) # type: ignore + coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, coll.find_one({"_id": uu})["i"]) # type: ignore # Test Cursor.distinct - self.assertEqual([2], coll.find({'_id': uu}).distinct('i')) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=STANDARD)) - self.assertEqual([], coll.find({'_id': uu}).distinct('i')) + self.assertEqual([2], coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], coll.find({"_id": uu}).distinct("i")) # Test findAndModify - self.assertEqual(None, coll.find_one_and_update({'_id': uu}, - {'$set': {'i': 5}})) - coll = self.db.get_collection( - "uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(2, coll.find_one_and_update({'_id': uu}, - {'$set': {'i': 5}})['i']) - self.assertEqual(5, coll.find_one({'_id': uu})['i']) # type: ignore + self.assertEqual(None, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})["i"]) + self.assertEqual(5, coll.find_one({"_id": uu})["i"]) # type: ignore # Test command - self.assertEqual(5, self.db.command( - 'findAndModify', 'uuid', - update={'$set': {'i': 6}}, - query={'_id': uu}, codec_options=legacy_opts)['value']['i']) - self.assertEqual(6, self.db.command( - 'findAndModify', 'uuid', - update={'$set': {'i': 7}}, - query={'_id': Binary.from_uuid(uu, PYTHON_LEGACY)})['value']['i']) + self.assertEqual( + 5, + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + )["value"]["i"], + ) + self.assertEqual( + 6, + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + )["value"]["i"], + ) def test_write_concern(self): c = rs_or_single_client(connect=False) @@ -119,7 +118,7 @@ def test_write_concern(self): self.assertEqual(wc, c.write_concern) # Can we override back to the server default? - db = c.get_database('pymongo_test', write_concern=WriteConcern()) + db = c.get_database("pymongo_test", write_concern=WriteConcern()) self.assertEqual(db.write_concern, WriteConcern()) db = c.pymongo_test @@ -128,7 +127,7 @@ def test_write_concern(self): self.assertEqual(wc, coll.write_concern) cwc = WriteConcern(j=True) - coll = db.get_collection('test', write_concern=cwc) + coll = db.get_collection("test", write_concern=cwc) self.assertEqual(cwc, coll.write_concern) self.assertEqual(wc, db.write_concern) @@ -149,21 +148,22 @@ def test_mongo_client(self): self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client("mongodb://%s/" % (pair,), - replicaSet=client_context.replica_set_name) + m = rs_or_single_client( + "mongodb://%s/" % (pair,), replicaSet=client_context.replica_set_name + ) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client("mongodb://%s/?w=0" % (pair,), - replicaSet=client_context.replica_set_name) + m = rs_or_single_client( + "mongodb://%s/?w=0" % (pair,), replicaSet=client_context.replica_set_name + ) coll = m.pymongo_test.write_concern_test coll.insert_one(doc) # Equality tests direct = connected(single_client(w=0)) - direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), - **self.credentials)) + direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index e683974b03..fd9f126551 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -18,20 +18,20 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + CMAPListener, + ensure_all_connected, + repl_set_step_down, + rs_or_single_client, +) + from bson import SON from pymongo import monitoring from pymongo.collection import Collection from pymongo.errors import NotPrimaryError from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (CMAPListener, - ensure_all_connected, - repl_set_step_down, - rs_or_single_client) - class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): listener: CMAPListener @@ -42,9 +42,9 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): def setUpClass(cls): super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() cls.listener = CMAPListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener], - retryWrites=False, - heartbeatFrequencyMS=500) + cls.client = rs_or_single_client( + event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) # Ensure connections to all servers in replica set. This is to test # that the is_writable flag is properly updated for sockets that @@ -52,10 +52,8 @@ def setUpClass(cls): ensure_all_connected(cls.client) cls.listener.reset() - cls.db = cls.client.get_database( - "step-down", write_concern=WriteConcern("majority")) - cls.coll = cls.db.get_collection( - "step-down", write_concern=WriteConcern("majority")) + cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) + cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) @classmethod def tearDownClass(cls): @@ -73,17 +71,15 @@ def set_fail_point(self, command_args): self.client.admin.command(cmd) def verify_pool_cleared(self): - self.assertEqual( - self.listener.event_count(monitoring.PoolClearedEvent), 1) + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) def verify_pool_not_cleared(self): - self.assertEqual( - self.listener.event_count(monitoring.PoolClearedEvent), 0) + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) @client_context.require_version_min(4, 2, -1) def test_get_more_iteration(self): # Insert 5 documents with WC majority. - self.coll.insert_many([{'data': k} for k in range(5)]) + self.coll.insert_many([{"data": k} for k in range(5)]) # Start a find operation and retrieve first batch of results. batch_size = 2 cursor = self.coll.find(batch_size=batch_size) @@ -108,14 +104,14 @@ def test_get_more_iteration(self): def run_scenario(self, error_code, retry, pool_status_checker): # Set fail point. - self.set_fail_point({"mode": {"times": 1}, - "data": {"failCommands": ["insert"], - "errorCode": error_code}}) + self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) self.addCleanup(self.set_fail_point, {"mode": "off"}) # Insert record and verify failure. with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) - self.assertEqual(exc.exception.details['code'], error_code) # type: ignore + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore # Retry before CMAPListener assertion if retry_before=True. if retry: self.coll.insert_one({"test": 1}) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index b82b730aef..ad0ac9347e 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest - from test.unified_format import UnifiedSpecTestMixinV1 @@ -26,23 +25,18 @@ def test_store_events_as_entities(self): { "client": { "id": "client0", - "storeEventsAsEntities": [ - { - "id": "events1", - "events": [ - "PoolCreatedEvent", - ] - } - ] + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], } }, ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] + "tests": [{"description": "foo", "operations": []}], } self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() @@ -63,27 +57,18 @@ def test_store_all_others_as_entities(self): { "client": { "id": "client0", - "uriOptions": { - "retryReads": True - }, - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "dat" + "uriOptions": {"retryReads": True}, } }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, { "collection": { "id": "collection0", "database": "database0", - "collectionName": "dat" + "collectionName": "dat", } - } + }, ], - "tests": [ { "description": "test loops", @@ -99,33 +84,21 @@ def test_store_all_others_as_entities(self): "numIterations": 5, "operations": [ { - "name": "insertOne", - "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "x": 44 - } - } - + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, }, { "name": "insertOne", "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "x": 44 - } - } - - } - ] - } + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + ], + }, } - ] + ], } - ] + ], } self.scenario_runner.TEST_SPEC = spec diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py index a435c1caa1..cc9a521b3b 100644 --- a/test/test_crud_unified.py +++ b/test/test_crud_unified.py @@ -20,16 +20,13 @@ sys.path[0:0] = [""] from test import unittest - from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "unified") # Generate unified tests. -globals().update(generate_test_classes( - TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) +globals().update(generate_test_classes(TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) if __name__ == "__main__": unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 4399d9f223..c23ce28061 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -19,26 +19,32 @@ sys.path[0:0] = [""] -from pymongo import operations, WriteConcern +from test import IntegrationTest, client_context, unittest +from test.utils import ( + TestCreator, + camel_to_snake, + camel_to_snake_args, + camel_to_upper_camel, + drop_collections, +) + +from pymongo import WriteConcern, operations from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor from pymongo.errors import PyMongoError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.read_concern import ReadConcern -from pymongo.results import _WriteResult, BulkWriteResult -from pymongo.operations import (InsertOne, - DeleteOne, - DeleteMany, - ReplaceOne, - UpdateOne, - UpdateMany) - -from test import client_context, unittest, IntegrationTest -from test.utils import (camel_to_snake, camel_to_upper_camel, - camel_to_snake_args, drop_collections, TestCreator) +from pymongo.results import BulkWriteResult, _WriteResult # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'crud', 'v1') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "v1") class TestAllScenarios(IntegrationTest): @@ -51,8 +57,7 @@ def check_result(self, expected_result, result): prop = camel_to_snake(res) msg = "%s : %r != %r" % (prop, expected_result, result) # SPEC-869: Only BulkWriteResult has upserted_count. - if (prop == "upserted_count" - and not isinstance(result, BulkWriteResult)): + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: # type: ignore upserted_count = 1 else: @@ -61,8 +66,7 @@ def check_result(self, expected_result, result): elif prop == "inserted_ids": # BulkWriteResult does not have inserted_ids. if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), - result.inserted_count) + self.assertEqual(len(expected_result[res]), result.inserted_count) else: # InsertManyResult may be compared to [id1] from the # crud spec or {"0": id1} from the retryable write spec. @@ -78,8 +82,7 @@ def check_result(self, expected_result, result): expected_ids[int(str_index)] = ids[str_index] self.assertEqual(expected_ids, result.upserted_ids, msg) # type: ignore else: - self.assertEqual( - getattr(result, prop), expected_result[res], msg) + self.assertEqual(getattr(result, prop), expected_result[res], msg) else: self.assertEqual(result, expected_result) @@ -87,16 +90,16 @@ def check_result(self, expected_result, result): def run_operation(collection, test): # Convert command from CamelCase to pymongo.collection method. - operation = camel_to_snake(test['operation']['name']) + operation = camel_to_snake(test["operation"]["name"]) cmd = getattr(collection, operation) # Convert arguments to snake_case and handle special cases. - arguments = test['operation']['arguments'] + arguments = test["operation"]["arguments"] options = arguments.pop("options", {}) for option_name in options: arguments[camel_to_snake(option_name)] = options[option_name] - if operation == 'count': - raise unittest.SkipTest('PyMongo does not support count') + if operation == "count": + raise unittest.SkipTest("PyMongo does not support count") if operation == "bulk_write": # Parse each request into a bulk write model. requests = [] @@ -137,15 +140,15 @@ def create_test(scenario_def, test, name): def run_scenario(self): # Cleanup state and load data (if provided). drop_collections(self.db) - data = scenario_def.get('data') + data = scenario_def.get("data") if data: - self.db.test.with_options( - write_concern=WriteConcern(w="majority")).insert_many( - scenario_def['data']) + self.db.test.with_options(write_concern=WriteConcern(w="majority")).insert_many( + scenario_def["data"] + ) # Run operations and check results or errors. - expected_result = test.get('outcome', {}).get('result') - expected_error = test.get('outcome', {}).get('error') + expected_result = test.get("outcome", {}).get("result") + expected_error = test.get("outcome", {}).get("error") if expected_error is True: with self.assertRaises(PyMongoError): run_operation(self.db.test, test) @@ -155,16 +158,15 @@ def run_scenario(self): check_result(self, expected_result, result) # Assert final state is expected. - expected_c = test['outcome'].get('collection') + expected_c = test["outcome"].get("collection") if expected_c is not None: - expected_name = expected_c.get('name') + expected_name = expected_c.get("name") if expected_name is not None: db_coll = self.db[expected_name] else: db_coll = self.db.test - db_coll = db_coll.with_options( - read_concern=ReadConcern(level="local")) - self.assertEqual(list(db_coll.find()), expected_c['data']) + db_coll = db_coll.with_options(read_concern=ReadConcern(level="local")) + self.assertEqual(list(db_coll.find()), expected_c["data"]) return run_scenario @@ -175,53 +177,68 @@ def run_scenario(self): class TestWriteOpsComparison(unittest.TestCase): def test_InsertOneEquals(self): - self.assertEqual(InsertOne({'foo': 42}), InsertOne({'foo': 42})) + self.assertEqual(InsertOne({"foo": 42}), InsertOne({"foo": 42})) def test_InsertOneNotEquals(self): - self.assertNotEqual(InsertOne({'foo': 42}), InsertOne({'foo': 23})) + self.assertNotEqual(InsertOne({"foo": 42}), InsertOne({"foo": 23})) def test_DeleteOneEquals(self): - self.assertEqual(DeleteOne({'foo': 42}), DeleteOne({'foo': 42})) + self.assertEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 42})) def test_DeleteOneNotEquals(self): - self.assertNotEqual(DeleteOne({'foo': 42}), DeleteOne({'foo': 23})) + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 23})) def test_DeleteManyEquals(self): - self.assertEqual(DeleteMany({'foo': 42}), DeleteMany({'foo': 42})) + self.assertEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 42})) def test_DeleteManyNotEquals(self): - self.assertNotEqual(DeleteMany({'foo': 42}), DeleteMany({'foo': 23})) + self.assertNotEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 23})) def test_DeleteOneNotEqualsDeleteMany(self): - self.assertNotEqual(DeleteOne({'foo': 42}), DeleteMany({'foo': 42})) + self.assertNotEqual(DeleteOne({"foo": 42}), DeleteMany({"foo": 42})) def test_ReplaceOneEquals(self): - self.assertEqual(ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False), - ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False)) + self.assertEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ) def test_ReplaceOneNotEquals(self): - self.assertNotEqual(ReplaceOne({'foo': 42}, {'bar': 42}, upsert=False), - ReplaceOne({'foo': 42}, {'bar': 42}, upsert=True)) + self.assertNotEqual( + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=False), + ReplaceOne({"foo": 42}, {"bar": 42}, upsert=True), + ) def test_UpdateOneEquals(self): - self.assertEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateOne({'foo': 42}, {'$set': {'bar': 42}})) + self.assertEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + ) def test_UpdateOneNotEquals(self): - self.assertNotEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateOne({'foo': 42}, {'$set': {'bar': 23}})) + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateOne({"foo": 42}, {"$set": {"bar": 23}}), + ) def test_UpdateManyEquals(self): - self.assertEqual(UpdateMany({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 42}})) + self.assertEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) def test_UpdateManyNotEquals(self): - self.assertNotEqual(UpdateMany({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 23}})) + self.assertNotEqual( + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 23}}), + ) def test_UpdateOneNotEqualsUpdateMany(self): - self.assertNotEqual(UpdateOne({'foo': 42}, {'$set': {'bar': 42}}), - UpdateMany({'foo': 42}, {'$set': {'bar': 42}})) + self.assertNotEqual( + UpdateOne({"foo": 42}, {"$set": {"bar": 42}}), + UpdateMany({"foo": 42}, {"$set": {"bar": 42}}), + ) + if __name__ == "__main__": unittest.main() diff --git a/test/test_cursor.py b/test/test_cursor.py index f741b8b0cc..7a80b003df 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -19,42 +19,47 @@ import random import re import sys -import time import threading +import time sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + ignore_deprecations, + rs_or_single_client, +) + from bson import decode_all from bson.code import Code from bson.son import SON -from pymongo import (ASCENDING, - DESCENDING) +from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType -from pymongo.errors import (ConfigurationError, - ExecutionTimeout, - InvalidOperation, - OperationFailure) +from pymongo.errors import ( + ConfigurationError, + ExecutionTimeout, + InvalidOperation, + OperationFailure, +) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (EventListener, - OvertCommandListener, - ignore_deprecations, - rs_or_single_client, - AllowListEventListener) class TestCursor(IntegrationTest): def test_deepcopy_cursor_littered_with_regexes(self): - cursor = self.db.test.find({ - "x": re.compile("^hmmm.*"), - "y": [re.compile("^hmm.*")], - "z": {"a": [re.compile("^hm.*")]}, - re.compile("^key.*"): {"a": [re.compile("^hm.*")]}}) + cursor = self.db.test.find( + { + "x": re.compile("^hmmm.*"), + "y": [re.compile("^hmm.*")], + "z": {"a": [re.compile("^hm.*")]}, + re.compile("^key.*"): {"a": [re.compile("^hm.*")]}, + } + ) cursor2 = copy.deepcopy(cursor) self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore @@ -65,19 +70,15 @@ def test_add_remove_option(self): cursor.add_option(2) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.add_option(128) - cursor2 = self.db.test.find( - cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) self.assertEqual(162, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(162, cursor._Cursor__query_flags) cursor.add_option(128) @@ -86,13 +87,11 @@ def test_add_remove_option(self): cursor.remove_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertEqual(2, cursor._Cursor__query_flags) cursor.remove_option(32) @@ -102,8 +101,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(no_cursor_timeout=True) self.assertEqual(16, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(16) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(16) self.assertEqual(0, cursor._Cursor__query_flags) @@ -111,8 +109,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) self.assertEqual(34, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(34) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(32) self.assertEqual(2, cursor._Cursor__query_flags) @@ -120,8 +117,7 @@ def test_add_remove_option(self): cursor = self.db.test.find(allow_partial_results=True) self.assertEqual(128, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(128) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) cursor.remove_option(128) self.assertEqual(0, cursor._Cursor__query_flags) @@ -134,8 +130,7 @@ def test_add_remove_option_exhaust(self): cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) self.assertEqual(64, cursor._Cursor__query_flags) cursor2 = self.db.test.find().add_option(64) - self.assertEqual(cursor._Cursor__query_flags, - cursor2._Cursor__query_flags) + self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) self.assertTrue(cursor._Cursor__exhaust) cursor.remove_option(64) self.assertEqual(0, cursor._Cursor__query_flags) @@ -146,7 +141,7 @@ def test_allow_disk_use(self): db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz') + self.assertRaises(TypeError, coll.find().allow_disk_use, "baz") cursor = coll.find().allow_disk_use(True) self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore @@ -157,7 +152,7 @@ def test_max_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().max_time_ms, 'foo') + self.assertRaises(TypeError, coll.find().max_time_ms, "foo") coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) @@ -178,12 +173,9 @@ def test_max_time_ms(self): self.assertTrue(coll.find_one(max_time_ms=1000)) client = self.client - if (not client_context.is_mongos - and client_context.test_commands_enabled): + if not client_context.is_mongos and client_context.test_commands_enabled: # Cursor parses server timeout error in response to initial query. - client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: cursor = coll.find().max_time_ms(1) try: @@ -192,19 +184,16 @@ def test_max_time_ms(self): pass else: self.fail("ExecutionTimeout not raised") - self.assertRaises(ExecutionTimeout, - coll.find_one, max_time_ms=1) + self.assertRaises(ExecutionTimeout, coll.find_one, max_time_ms=1) finally: - client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.create_collection("pymongo_test", capped=True, size=4096) - self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo') + self.assertRaises(TypeError, coll.find().max_await_time_ms, "foo") coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) @@ -222,95 +211,91 @@ def test_max_await_time_ms(self): self.assertEqual(None, cursor._Cursor__max_await_time_ms) # If cursor is tailable_await and timeout is set - cursor = coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) self.assertEqual(99, cursor._Cursor__max_await_time_ms) - cursor = coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms( - 10).max_await_time_ms(90) + cursor = ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_await_time_ms(10) + .max_await_time_ms(90) + ) self.assertEqual(90, cursor._Cursor__max_await_time_ms) - listener = AllowListEventListener('find', 'getMore') - coll = rs_or_single_client( - event_listeners=[listener])[self.db.name].pymongo_test + listener = AllowListEventListener("find", "getMore") + coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test results = listener.results # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertFalse("maxTimeMS" in results["started"][0].command) # getMore - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertFalse("maxTimeMS" in results["started"][1].command) results.clear() # Tailable_await with max_await_time_ms set. - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertEqual("find", results["started"][0].command_name) + self.assertFalse("maxTimeMS" in results["started"][0].command) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertTrue('maxTimeMS' in results['started'][1].command) - self.assertEqual(99, results['started'][1].command['maxTimeMS']) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertTrue("maxTimeMS" in results["started"][1].command) + self.assertEqual(99, results["started"][1].command["maxTimeMS"]) results.clear() # Tailable_await with max_time_ms - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) results.clear() # Tailable_await with both max_time_ms and max_await_time_ms - list(coll.find( - cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms( - 99).max_await_time_ms(99)) + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertTrue('maxTimeMS' in results['started'][1].command) - self.assertEqual(99, results['started'][1].command['maxTimeMS']) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertTrue("maxTimeMS" in results["started"][1].command) + self.assertEqual(99, results["started"][1].command["maxTimeMS"]) results.clear() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertFalse('maxTimeMS' in results['started'][0].command) + self.assertEqual("find", results["started"][0].command_name) + self.assertFalse("maxTimeMS" in results["started"][0].command) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) results.clear() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find - self.assertEqual('find', results['started'][0].command_name) - self.assertTrue('maxTimeMS' in results['started'][0].command) - self.assertEqual(99, results['started'][0].command['maxTimeMS']) + self.assertEqual("find", results["started"][0].command_name) + self.assertTrue("maxTimeMS" in results["started"][0].command) + self.assertEqual(99, results["started"][0].command["maxTimeMS"]) # getMore - self.assertEqual('getMore', results['started'][1].command_name) - self.assertFalse('maxTimeMS' in results['started'][1].command) + self.assertEqual("getMore", results["started"][1].command_name) + self.assertFalse("maxTimeMS" in results["started"][1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -322,9 +307,7 @@ def test_max_time_ms_getmore(self): # Send initial query before turning on failpoint. next(cursor) - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: try: # Iterate up to first getmore. @@ -334,9 +317,7 @@ def test_max_time_ms_getmore(self): else: self.fail("ExecutionTimeout not raised") finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_explain(self): a = self.db.test.find() @@ -351,10 +332,9 @@ def test_explain_with_read_concern(self): listener = AllowListEventListener("explain") client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - coll = client.pymongo_test.test.with_options( - read_concern=ReadConcern(level="local")) + coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) - started = listener.results['started'] + started = listener.results["started"] self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) @@ -365,23 +345,26 @@ def test_hint(self): db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("num", ASCENDING)]).explain) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain, + ) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, + ) spec = [("num", DESCENDING)] index = db.test.create_index(spec) first = next(db.test.find()) - self.assertEqual(0, first.get('num')) + self.assertEqual(0, first.get("num")) first = next(db.test.find().hint(spec)) - self.assertEqual(99, first.get('num')) - self.assertRaises(OperationFailure, - db.test.find({"num": 17, "foo": 17}) - .hint([("foo", ASCENDING)]).explain) + self.assertEqual(99, first.get("num")) + self.assertRaises( + OperationFailure, + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, + ) a = db.test.find({"num": 17}) a.hint(spec) @@ -395,11 +378,11 @@ def test_hint_by_name(self): db.test.insert_many([{"i": i} for i in range(100)]) - db.test.create_index([('i', DESCENDING)], name='fooindex') + db.test.create_index([("i", DESCENDING)], name="fooindex") first = next(db.test.find()) - self.assertEqual(0, first.get('i')) - first = next(db.test.find().hint('fooindex')) - self.assertEqual(99, first.get('i')) + self.assertEqual(0, first.get("i")) + first = next(db.test.find().hint("fooindex")) + self.assertEqual(99, first.get("i")) def test_limit(self): db = self.db @@ -702,8 +685,7 @@ def test_sort(self): self.assertRaises(TypeError, db.test.find().sort, 5) self.assertRaises(ValueError, db.test.find().sort, []) self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING) - self.assertRaises(TypeError, db.test.find().sort, - [("hello", DESCENDING)], DESCENDING) + self.assertRaises(TypeError, db.test.find().sort, [("hello", DESCENDING)], DESCENDING) db.test.drop() @@ -724,8 +706,7 @@ def test_sort(self): self.assertEqual(desc, expect) desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])] self.assertEqual(desc, expect) - desc = [i["x"] for i in - db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] + desc = [i["x"] for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] self.assertEqual(desc, expect) expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] @@ -736,9 +717,9 @@ def test_sort(self): for (a, b) in shuffled: db.test.insert_one({"a": a, "b": b}) - result = [(i["a"], i["b"]) for i in - db.test.find().sort([("b", DESCENDING), - ("a", ASCENDING)])] + result = [ + (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) + ] self.assertEqual(result, expected) a = db.test.find() @@ -758,42 +739,34 @@ def test_where(self): db.test.insert_many([{"x": i} for i in range(10)]) - self.assertEqual(3, len(list(db.test.find().where('this.x < 3')))) - self.assertEqual(3, - len(list(db.test.find().where(Code('this.x < 3'))))) + self.assertEqual(3, len(list(db.test.find().where("this.x < 3")))) + self.assertEqual(3, len(list(db.test.find().where(Code("this.x < 3"))))) - code_with_scope = Code('this.x < i', {"i": 3}) + code_with_scope = Code("this.x < i", {"i": 3}) if client_context.version.at_least(4, 3, 3): # MongoDB 4.4 removed support for Code with scope. with self.assertRaises(OperationFailure): list(db.test.find().where(code_with_scope)) - code_with_empty_scope = Code('this.x < 3', {}) + code_with_empty_scope = Code("this.x < 3", {}) with self.assertRaises(OperationFailure): list(db.test.find().where(code_with_empty_scope)) else: - self.assertEqual( - 3, len(list(db.test.find().where(code_with_scope)))) + self.assertEqual(3, len(list(db.test.find().where(code_with_scope)))) self.assertEqual(10, len(list(db.test.find()))) - self.assertEqual([0, 1, 2], - [a["x"] for a in - db.test.find().where('this.x < 3')]) - self.assertEqual([], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x < 3')]) - self.assertEqual([5], - [a["x"] for a in - db.test.find({"x": 5}).where('this.x > 3')]) - - cursor = db.test.find().where('this.x < 3').where('this.x > 7') + self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where("this.x < 3")]) + self.assertEqual([], [a["x"] for a in db.test.find({"x": 5}).where("this.x < 3")]) + self.assertEqual([5], [a["x"] for a in db.test.find({"x": 5}).where("this.x > 3")]) + + cursor = db.test.find().where("this.x < 3").where("this.x > 7") self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() - b = a.where('this.x > 3') + b = a.where("this.x > 3") for _ in a: break - self.assertRaises(InvalidOperation, a.where, 'this.x < 3') + self.assertRaises(InvalidOperation, a.where, "this.x < 3") def test_rewind(self): self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -866,26 +839,28 @@ def test_clone(self): self.assertNotEqual(cursor, cursor.clone()) # Just test attributes - cursor = self.db.test.find({"x": re.compile("^hello.*")}, - projection={'_id': False}, - skip=1, - no_cursor_timeout=True, - cursor_type=CursorType.TAILABLE_AWAIT, - sort=[("x", 1)], - allow_partial_results=True, - oplog_replay=True, - batch_size=123, - collation={'locale': 'en_US'}, - hint=[("_id", 1)], - max_scan=100, - max_time_ms=1000, - return_key=True, - show_record_id=True, - snapshot=True, - allow_disk_use=True).limit(2) - cursor.min([('a', 1)]).max([('b', 3)]) + cursor = self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ).limit(2) + cursor.min([("a", 1)]).max([("b", 3)]) cursor.add_option(128) - cursor.comment('hi!') + cursor.comment("hi!") # Every attribute should be the same. cursor2 = cursor.clone() @@ -893,17 +868,17 @@ def test_clone(self): # Shallow copies can so can mutate cursor2 = copy.copy(cursor) - cursor2._Cursor__projection['cursor2'] = False - self.assertTrue('cursor2' in cursor._Cursor__projection) + cursor2._Cursor__projection["cursor2"] = False + self.assertTrue("cursor2" in cursor._Cursor__projection) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) - cursor3._Cursor__projection['cursor3'] = False - self.assertFalse('cursor3' in cursor._Cursor__projection) + cursor3._Cursor__projection["cursor3"] = False + self.assertFalse("cursor3" in cursor._Cursor__projection) cursor4 = cursor.clone() - cursor4._Cursor__projection['cursor4'] = False - self.assertFalse('cursor4' in cursor._Cursor__projection) + cursor4._Cursor__projection["cursor4"] = False + self.assertFalse("cursor4" in cursor._Cursor__projection) # Test memo when deepcopying queries query = {"hello": "world"} @@ -912,14 +887,12 @@ def test_clone(self): cursor2 = copy.deepcopy(cursor) - self.assertNotEqual(id(cursor._Cursor__spec), - id(cursor2._Cursor__spec)) - self.assertEqual(id(cursor2._Cursor__spec['reflexive']), - id(cursor2._Cursor__spec)) + self.assertNotEqual(id(cursor._Cursor__spec), id(cursor2._Cursor__spec)) + self.assertEqual(id(cursor2._Cursor__spec["reflexive"]), id(cursor2._Cursor__spec)) self.assertEqual(len(cursor2._Cursor__spec), 2) # Ensure hints are cloned as the correct type - cursor = self.db.test.find().hint([('z', 1), ("a", 1)]) + cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) @@ -947,46 +920,38 @@ def test_getitem_slice_index(self): self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) for a, b in zip(count(0), self.db.test.find()): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(100, len(list(self.db.test.find()[0:]))) for a, b in zip(count(0), self.db.test.find()[0:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(80, len(list(self.db.test.find()[20:]))) for a, b in zip(count(20), self.db.test.find()[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) for a, b in zip(count(99), self.db.test.find()[99:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) for i in self.db.test.find()[1000:]: self.fail() self.assertEqual(5, len(list(self.db.test.find()[20:25]))) - self.assertEqual(5, len(list( - self.db.test.find()[20:25]))) + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) for a, b in zip(count(20), self.db.test.find()[20:25]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) for a, b in zip(count(20), self.db.test.find()[40:45][20:]): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find()[40:45].limit(0).skip(20)) - ) - ) - for a, b in zip(count(20), - self.db.test.find()[40:45].limit(0).skip(20)): - self.assertEqual(a, b['i']) - - self.assertEqual(80, - len(list(self.db.test.find().limit(10).skip(40)[20:])) - ) - for a, b in zip(count(20), - self.db.test.find().limit(10).skip(40)[20:]): - self.assertEqual(a, b['i']) + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): + self.assertEqual(a, b["i"]) self.assertEqual(1, len(list(self.db.test.find()[:1]))) self.assertEqual(5, len(list(self.db.test.find()[:5]))) @@ -995,10 +960,7 @@ def test_getitem_slice_index(self): self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) self.assertEqual(0, len(list(self.db.test.find()[10:10]))) self.assertEqual(0, len(list(self.db.test.find()[:0]))) - self.assertEqual(80, - len(list(self.db.test.find()[10:10].limit(0).skip(20)) - ) - ) + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) @@ -1006,17 +968,16 @@ def test_getitem_numeric_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) - self.assertEqual(0, self.db.test.find()[0]['i']) - self.assertEqual(50, self.db.test.find()[50]['i']) - self.assertEqual(50, self.db.test.find().skip(50)[0]['i']) - self.assertEqual(50, self.db.test.find().skip(49)[1]['i']) - self.assertEqual(50, self.db.test.find()[50]['i']) - self.assertEqual(99, self.db.test.find()[99]['i']) + self.assertEqual(0, self.db.test.find()[0]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(50, self.db.test.find().skip(50)[0]["i"]) + self.assertEqual(50, self.db.test.find().skip(49)[1]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(99, self.db.test.find()[99]["i"]) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) - self.assertRaises(IndexError, - lambda x: self.db.test.find().skip(50)[x], 50) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) def test_len(self): self.assertRaises(TypeError, len, self.db.test.find()) @@ -1032,7 +993,7 @@ def set_coll(): def test_get_more(self): db = self.db db.drop_collection("test") - db.test.insert_many([{'i': i} for i in range(10)]) + db.test.insert_many([{"i": i} for i in range(10)]) self.assertEqual(10, len(list(db.test.find().batch_size(5)))) def test_tailable(self): @@ -1075,8 +1036,10 @@ def test_tailable(self): self.assertEqual(3, db.test.count_documents({})) # __getitem__(index) - for cursor in (db.test.find(cursor_type=CursorType.TAILABLE), - db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)): + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): self.assertEqual(4, cursor[0]["x"]) self.assertEqual(5, cursor[1]["x"]) self.assertEqual(6, cursor[2]["x"]) @@ -1106,6 +1069,7 @@ def iterate_cursor(): while cursor.alive: for doc in cursor: pass + t = threading.Thread(target=iterate_cursor) t.start() time.sleep(1) @@ -1114,12 +1078,10 @@ def iterate_cursor(): t.join(3) self.assertFalse(t.is_alive()) - def test_distinct(self): self.db.drop_collection("test") - self.db.test.insert_many( - [{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + self.db.test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a") distinct.sort() @@ -1145,8 +1107,7 @@ def test_max_scan(self): self.assertEqual(100, len(list(self.db.test.find()))) self.assertEqual(50, len(list(self.db.test.find().max_scan(50)))) - self.assertEqual(50, len(list(self.db.test.find() - .max_scan(90).max_scan(50)))) + self.assertEqual(50, len(list(self.db.test.find().max_scan(90).max_scan(50)))) def test_with_statement(self): self.db.drop_collection("test") @@ -1165,28 +1126,32 @@ def test_with_statement(self): @client_context.require_no_mongos def test_comment(self): self.client.drop_database(self.db) - self.db.command('profile', 2) # Profile ALL commands. + self.db.command("profile", 2) # Profile ALL commands. try: - list(self.db.test.find().comment('foo')) + list(self.db.test.find().comment("foo")) count = self.db.system.profile.count_documents( - {'ns': 'pymongo_test.test', 'op': 'query', - 'command.comment': 'foo'}) + {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} + ) self.assertEqual(count, 1) - self.db.test.find().comment('foo').distinct('type') + self.db.test.find().comment("foo").distinct("type") count = self.db.system.profile.count_documents( - {'ns': 'pymongo_test.test', 'op': 'command', - 'command.distinct': 'test', - 'command.comment': 'foo'}) + { + "ns": "pymongo_test.test", + "op": "command", + "command.distinct": "test", + "command.comment": "foo", + } + ) self.assertEqual(count, 1) finally: - self.db.command('profile', 0) # Turn off profiling. + self.db.command("profile", 0) # Turn off profiling. self.db.system.profile.drop() self.db.test.insert_many([{}, {}]) cursor = self.db.test.find() next(cursor) - self.assertRaises(InvalidOperation, cursor.comment, 'hello') + self.assertRaises(InvalidOperation, cursor.comment, "hello") def test_alive(self): self.db.test.delete_many({}) @@ -1230,8 +1195,7 @@ def assertCursorKilled(): self.assertEqual(1, len(results["started"])) self.assertEqual("killCursors", results["started"][0].command_name) self.assertEqual(1, len(results["succeeded"])) - self.assertEqual("killCursors", - results["succeeded"][0].command_name) + self.assertEqual("killCursors", results["succeeded"][0].command_name) assertCursorKilled() results.clear() @@ -1254,37 +1218,37 @@ def test_delete_not_initialized(self): cursor.__del__() # no error def test_getMore_does_not_send_readPreference(self): - listener = AllowListEventListener('find', 'getMore') - client = rs_or_single_client( - event_listeners=[listener]) + listener = AllowListEventListener("find", "getMore") + client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) # We never send primary read preference so override the default. coll = client[self.db.name].get_collection( - 'test', read_preference=ReadPreference.PRIMARY_PREFERRED) + "test", read_preference=ReadPreference.PRIMARY_PREFERRED + ) coll.delete_many({}) coll.insert_many([{} for _ in range(5)]) self.addCleanup(coll.drop) list(coll.find(batch_size=3)) - started = listener.results['started'] + started = listener.results["started"] self.assertEqual(2, len(started)) - self.assertEqual('find', started[0].command_name) + self.assertEqual("find", started[0].command_name) if client_context.is_rs or client_context.is_mongos: - self.assertIn('$readPreference', started[0].command) + self.assertIn("$readPreference", started[0].command) else: - self.assertNotIn('$readPreference', started[0].command) - self.assertEqual('getMore', started[1].command_name) - self.assertNotIn('$readPreference', started[1].command) + self.assertNotIn("$readPreference", started[0].command) + self.assertEqual("getMore", started[1].command_name) + self.assertNotIn("$readPreference", started[1].command) class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.find_raw_batches().sort('_id')) + batches = list(c.find_raw_batches().sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1292,24 +1256,27 @@ def test_find_raw(self): def test_find_raw_transaction(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): - batches = list(client[self.db.name].test.find_raw_batches( - session=session).sort('_id')) - cmd = listener.results['started'][0] - self.assertEqual(cmd.command_name, 'find') - self.assertIn('$clusterTime', cmd.command) - self.assertEqual(cmd.command['startTransaction'], True) - self.assertEqual(cmd.command['txnNumber'], 1) + batches = list( + client[self.db.name].test.find_raw_batches(session=session).sort("_id") + ) + cmd = listener.results["started"][0] + self.assertEqual(cmd.command_name, "find") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results['succeeded'][-1] - self.assertEqual(last_cmd.reply['$clusterTime']['clusterTime'], - session.cluster_time['clusterTime']) + last_cmd = listener.results["succeeded"][-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1319,47 +1286,42 @@ def test_find_raw_transaction(self): def test_find_raw_retryable_reads(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) - with self.fail_point({ - 'mode': {'times': 1}, 'data': {'failCommands': ['find'], - 'closeConnection': True}}): - batches = list( - client[self.db.name].test.find_raw_batches().sort('_id')) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + batches = list(client[self.db.name].test.find_raw_batches().sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results['started']), 2) - for cmd in listener.results['started']: - self.assertEqual(cmd.command_name, 'find') + self.assertEqual(len(listener.results["started"]), 2) + for cmd in listener.results["started"]: + self.assertEqual(cmd.command_name, "find") @client_context.require_version_min(5, 0, 0) @client_context.require_no_standalone def test_find_raw_snapshot_reads(self): - c = self.db.get_collection( - "test", write_concern=WriteConcern(w="majority")) + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: - db.test.distinct('x', {}, session=session) - batches = list(db.test.find_raw_batches( - session=session).sort('_id')) + db.test.distinct("x", {}, session=session) + batches = list(db.test.find_raw_batches(session=session).sort("_id")) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results['started'][1].command - self.assertEqual(find_cmd['readConcern']['level'], 'snapshot') - self.assertIsNotNone(find_cmd['readConcern']['atClusterTime']) + find_cmd = listener.results["started"][1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) def test_explain(self): c = self.db.test @@ -1384,13 +1346,13 @@ def test_clone(self): def test_exhaust(self): c = self.db.test c.drop() - c.insert_many({'_id': i} for i in range(200)) - result = b''.join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) - self.assertEqual([{'_id': i} for i in range(200)], decode_all(result)) + c.insert_many({"_id": i} for i in range(200)) + result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) + self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) def test_server_error(self): with self.assertRaises(OperationFailure) as exc: - next(self.db.test.find_raw_batches({'x': {'$bad': 1}})) + next(self.db.test.find_raw_batches({"x": {"$bad": 1}})) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) @@ -1400,12 +1362,11 @@ def test_get_item(self): self.db.test.find_raw_batches()[0] def test_collation(self): - next(self.db.test.find_raw_batches(collation=Collation('en_US'))) + next(self.db.test.find_raw_batches(collation=Collation("en_US"))) - @client_context.require_no_mmap # MMAPv1 does not support read concern + @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): - self.db.get_collection( - "test", write_concern=WriteConcern(w="majority")).insert_one({}) + self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one({}) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) @@ -1414,7 +1375,7 @@ def test_monitoring(self): client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() - c.insert_many([{'_id': i} for i in range(10)]) + c.insert_many([{"_id": i} for i in range(10)]) listener.results.clear() cursor = c.find_raw_batches(batch_size=4) @@ -1422,19 +1383,18 @@ def test_monitoring(self): # First raw batch of 4 documents. next(cursor) - started = listener.results['started'][0] - succeeded = listener.results['succeeded'][0] - self.assertEqual(0, len(listener.results['failed'])) - self.assertEqual('find', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('find', succeeded.command_name) + started = listener.results["started"][0] + succeeded = listener.results["succeeded"][0] + self.assertEqual(0, len(listener.results["failed"])) + self.assertEqual("find", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("find", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") # The batch is a list of one raw bytes object. self.assertEqual(len(csr["firstBatch"]), 1) - self.assertEqual(decode_all(csr["firstBatch"][0]), - [{'_id': i} for i in range(0, 4)]) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(0, 4)]) listener.results.clear() @@ -1442,17 +1402,16 @@ def test_monitoring(self): next(cursor) try: results = listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertEqual('getMore', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getMore', succeeded.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) - self.assertEqual(decode_all(csr["nextBatch"][0]), - [{'_id': i} for i in range(4, 8)]) + self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) finally: # Finish the cursor. tuple(cursor) @@ -1466,9 +1425,9 @@ def setUpClass(cls): def test_aggregate_raw(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.aggregate_raw_batches([{'$sort': {'_id': 1}}])) + batches = list(c.aggregate_raw_batches([{"$sort": {"_id": 1}}])) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1476,24 +1435,29 @@ def test_aggregate_raw(self): def test_aggregate_raw_transaction(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): - batches = list(client[self.db.name].test.aggregate_raw_batches( - [{'$sort': {'_id': 1}}], session=session)) - cmd = listener.results['started'][0] - self.assertEqual(cmd.command_name, 'aggregate') - self.assertIn('$clusterTime', cmd.command) - self.assertEqual(cmd.command['startTransaction'], True) - self.assertEqual(cmd.command['txnNumber'], 1) + batches = list( + client[self.db.name].test.aggregate_raw_batches( + [{"$sort": {"_id": 1}}], session=session + ) + ) + cmd = listener.results["started"][0] + self.assertEqual(cmd.command_name, "aggregate") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results['succeeded'][-1] - self.assertEqual(last_cmd.reply['$clusterTime']['clusterTime'], - session.cluster_time['clusterTime']) + last_cmd = listener.results["succeeded"][-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1502,62 +1466,63 @@ def test_aggregate_raw_transaction(self): def test_aggregate_raw_retryable_reads(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) - with self.fail_point({ - 'mode': {'times': 1}, 'data': {'failCommands': ['aggregate'], - 'closeConnection': True}}): - batches = list(client[self.db.name].test.aggregate_raw_batches( - [{'$sort': {'_id': 1}}])) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} + ): + batches = list(client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}])) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results['started']), 3) - cmds = listener.results['started'] - self.assertEqual(cmds[0].command_name, 'aggregate') - self.assertEqual(cmds[1].command_name, 'aggregate') + self.assertEqual(len(listener.results["started"]), 3) + cmds = listener.results["started"] + self.assertEqual(cmds[0].command_name, "aggregate") + self.assertEqual(cmds[1].command_name, "aggregate") @client_context.require_version_min(5, 0, -1) @client_context.require_no_standalone def test_aggregate_raw_snapshot_reads(self): - c = self.db.get_collection( - "test", write_concern=WriteConcern(w="majority")) + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], - retryReads=True) + client = rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: - db.test.distinct('x', {}, session=session) - batches = list(db.test.aggregate_raw_batches( - [{'$sort': {'_id': 1}}], session=session)) + db.test.distinct("x", {}, session=session) + batches = list(db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session)) self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results['started'][1].command - self.assertEqual(find_cmd['readConcern']['level'], 'snapshot') - self.assertIsNotNone(find_cmd['readConcern']['atClusterTime']) + find_cmd = listener.results["started"][1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) def test_server_error(self): c = self.db.test c.drop() - docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)] + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - c.insert_one({'_id': 10, 'x': 'not a number'}) + c.insert_one({"_id": 10, "x": "not a number"}) with self.assertRaises(OperationFailure) as exc: - list(self.db.test.aggregate_raw_batches([{ - '$sort': {'_id': 1}, - }, { - '$project': {'x': {'$multiply': [2, '$x']}} - }], batchSize=4)) + list( + self.db.test.aggregate_raw_batches( + [ + { + "$sort": {"_id": 1}, + }, + {"$project": {"x": {"$multiply": [2, "$x"]}}}, + ], + batchSize=4, + ) + ) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) @@ -1567,25 +1532,25 @@ def test_get_item(self): self.db.test.aggregate_raw_batches([])[0] def test_collation(self): - next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US'))) + next(self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) def test_monitoring(self): listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() - c.insert_many([{'_id': i} for i in range(10)]) + c.insert_many([{"_id": i} for i in range(10)]) listener.results.clear() - cursor = c.aggregate_raw_batches([{'$sort': {'_id': 1}}], batchSize=4) + cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) # Start cursor, no initial batch. - started = listener.results['started'][0] - succeeded = listener.results['succeeded'][0] - self.assertEqual(0, len(listener.results['failed'])) - self.assertEqual('aggregate', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('aggregate', succeeded.command_name) + started = listener.results["started"][0] + succeeded = listener.results["succeeded"][0] + self.assertEqual(0, len(listener.results["failed"])) + self.assertEqual("aggregate", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("aggregate", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") @@ -1597,18 +1562,17 @@ def test_monitoring(self): n = 0 for batch in cursor: results = listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertEqual('getMore', started.command_name) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getMore', succeeded.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) csr = succeeded.reply["cursor"] self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(len(csr["nextBatch"]), 1) self.assertEqual(csr["nextBatch"][0], batch) - self.assertEqual(decode_all(batch), - [{'_id': i} for i in range(n, min(n + 4, 10))]) + self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) n += 4 listener.results.clear() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index eee47b9d2b..4659a62e62 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -17,7 +17,6 @@ import datetime import sys import tempfile - from collections import OrderedDict from decimal import Decimal from random import random @@ -25,32 +24,37 @@ sys.path[0:0] = [""] -from bson import (Decimal128, - decode, - decode_all, - decode_file_iter, - decode_iter, - encode, - RE_TYPE, - _BUILT_IN_TYPES, - _dict_to_bson, - _bson_to_dict) -from bson.codec_options import (CodecOptions, TypeCodec, TypeDecoder, - TypeEncoder, TypeRegistry) +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import rs_client + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) from bson.errors import InvalidDocument from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument - from gridfs import GridIn, GridOut - from pymongo.collection import ReturnDocument from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import rs_client - class DecimalEncoder(TypeEncoder): @property @@ -74,8 +78,7 @@ class DecimalCodec(DecimalDecoder, DecimalEncoder): pass -DECIMAL_CODECOPTS = CodecOptions( - type_registry=TypeRegistry([DecimalCodec()])) +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) class UndecipherableInt64Type(object): @@ -91,39 +94,55 @@ def __eq__(self, other): class UndecipherableIntDecoder(TypeDecoder): bson_type = Int64 + def transform_bson(self, value): return UndecipherableInt64Type(value) class UndecipherableIntEncoder(TypeEncoder): python_type = UndecipherableInt64Type + def transform_python(self, value): return Int64(value.value) UNINT_DECODER_CODECOPTS = CodecOptions( - type_registry=TypeRegistry([UndecipherableIntDecoder(), ])) + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) -UNINT_CODECOPTS = CodecOptions(type_registry=TypeRegistry( - [UndecipherableIntDecoder(), UndecipherableIntEncoder()])) +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) class UppercaseTextDecoder(TypeDecoder): bson_type = str + def transform_bson(self, value): return value.upper() -UPPERSTR_DECODER_CODECOPTS = CodecOptions(type_registry=TypeRegistry( - [UppercaseTextDecoder(),])) +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) def type_obfuscating_decoder_factory(rt_type): class ResumeTokenToNanDecoder(TypeDecoder): bson_type = rt_type + def transform_bson(self, value): return "NaN" + return ResumeTokenToNanDecoder @@ -135,43 +154,42 @@ def roundtrip(self, doc): self.assertEqual(doc, rt_document) def test_encode_decode_roundtrip(self): - self.roundtrip({'average': Decimal('56.47')}) - self.roundtrip({'average': {'b': Decimal('56.47')}}) - self.roundtrip({'average': [Decimal('56.47')]}) - self.roundtrip({'average': [[Decimal('56.47')]]}) - self.roundtrip({'average': [{'b': Decimal('56.47')}]}) + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) @no_type_check def test_decode_all(self): documents = [] for dec in range(3): - documents.append({'average': Decimal('56.4%s' % (dec,))}) + documents.append({"average": Decimal("56.4%s" % (dec,))}) bsonstream = bytes() for doc in documents: bsonstream += encode(doc, codec_options=self.codecopts) - self.assertEqual( - decode_all(bsonstream, self.codecopts), documents) + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) @no_type_check def test__bson_to_dict(self): - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} rawbytes = encode(document, codec_options=self.codecopts) decoded_document = _bson_to_dict(rawbytes, self.codecopts) self.assertEqual(document, decoded_document) @no_type_check def test__dict_to_bson(self): - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} rawbytes = encode(document, codec_options=self.codecopts) encoded_document = _dict_to_bson(document, False, self.codecopts) self.assertEqual(encoded_document, rawbytes) def _generate_multidocument_bson_stream(self): inp_num = [str(random() * 100)[:4] for _ in range(10)] - docs = [{'n': Decimal128(dec)} for dec in inp_num] - edocs = [{'n': Decimal(dec)} for dec in inp_num] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] bsonstream = b"" for doc in docs: bsonstream += encode(doc) @@ -180,8 +198,7 @@ def _generate_multidocument_bson_stream(self): @no_type_check def test_decode_iter(self): expected, bson_data = self._generate_multidocument_bson_stream() - for expected_doc, decoded_doc in zip( - expected, decode_iter(bson_data, self.codecopts)): + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) @no_type_check @@ -191,26 +208,24 @@ def test_decode_file_iter(self): fileobj.write(bson_data) fileobj.seek(0) - for expected_doc, decoded_doc in zip( - expected, decode_file_iter(fileobj, self.codecopts)): + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): self.assertEqual(expected_doc, decoded_doc) fileobj.close() -class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, - unittest.TestCase): +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): @classmethod def setUpClass(cls): cls.codecopts = DECIMAL_CODECOPTS -class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, - unittest.TestCase): +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): @classmethod def setUpClass(cls): codec_options = CodecOptions( - type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder()))) + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) cls.codecopts = codec_options @@ -221,29 +236,29 @@ def _get_codec_options(self, fallback_encoder): def test_simple(self): codecopts = self._get_codec_options(lambda x: Decimal128(x)) - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} bsonbytes = encode(document, codec_options=codecopts) - exp_document = {'average': Decimal128('56.47')} + exp_document = {"average": Decimal128("56.47")} exp_bsonbytes = encode(exp_document) self.assertEqual(bsonbytes, exp_bsonbytes) def test_erroring_fallback_encoder(self): - codecopts = self._get_codec_options(lambda _: 1/0) + codecopts = self._get_codec_options(lambda _: 1 / 0) # fallback converter should not be invoked when encoding known types. encode( - {'a': 1, 'b': Decimal128('1.01'), 'c': {'arr': ['abc', 3.678]}}, - codec_options=codecopts) + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) # expect an error when encoding a custom type. - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} with self.assertRaises(ZeroDivisionError): encode(document, codec_options=codecopts) def test_noop_fallback_encoder(self): codecopts = self._get_codec_options(lambda x: x) - document = {'average': Decimal('56.47')} + document = {"average": Decimal("56.47")} with self.assertRaises(InvalidDocument): encode(document, codec_options=codecopts) @@ -253,8 +268,9 @@ def fallback_encoder(value): return Decimal128(value) except: raise TypeError("cannot encode type %s" % (type(value))) + codecopts = self._get_codec_options(fallback_encoder) - document = {'average': Decimal} + document = {"average": Decimal} with self.assertRaises(TypeError): encode(document, codec_options=codecopts) @@ -262,8 +278,9 @@ def fallback_encoder(value): class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): msg = "Can't instantiate abstract class" + def run_test(base, attrs, fail): - codec = type('testcodec', (base,), attrs) + codec = type("testcodec", (base,), attrs) if fail: with self.assertRaisesRegex(TypeError, msg): codec() @@ -273,24 +290,46 @@ def run_test(base, attrs, fail): class MyType(object): pass - run_test(TypeEncoder, {'python_type': MyType,}, fail=True) - run_test(TypeEncoder, {'transform_python': lambda s, x: x}, fail=True) - run_test(TypeEncoder, {'transform_python': lambda s, x: x, - 'python_type': MyType}, fail=False) - - run_test(TypeDecoder, {'bson_type': Decimal128, }, fail=True) - run_test(TypeDecoder, {'transform_bson': lambda s, x: x}, fail=True) - run_test(TypeDecoder, {'transform_bson': lambda s, x: x, - 'bson_type': Decimal128}, fail=False) - - run_test(TypeCodec, {'bson_type': Decimal128, - 'python_type': MyType}, fail=True) - run_test(TypeCodec, {'transform_bson': lambda s, x: x, - 'transform_python': lambda s, x: x}, fail=True) - run_test(TypeCodec, {'python_type': MyType, - 'transform_python': lambda s, x: x, - 'transform_bson': lambda s, x: x, - 'bson_type': Decimal128}, fail=False) + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) def test_type_checks(self): self.assertTrue(issubclass(TypeCodec, TypeEncoder)) @@ -332,6 +371,7 @@ def fallback_encoder_A2BSON(value): # transforms B into something encodable class B2BSON(TypeEncoder): python_type = TypeB + def transform_python(self, value): return value.value @@ -340,6 +380,7 @@ def transform_python(self, value): # BSON-encodable. class A2B(TypeEncoder): python_type = TypeA + def transform_python(self, value): return TypeB(value.value) @@ -348,6 +389,7 @@ def transform_python(self, value): # BSON-encodable. class B2A(TypeEncoder): python_type = TypeB + def transform_python(self, value): return TypeA(value.value) @@ -360,37 +402,37 @@ def transform_python(self, value): cls.A2B = A2B def test_encode_fallback_then_custom(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B)) - testdoc = {'x': self.TypeA(123)} - expected_bytes = encode({'x': 123}) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) - self.assertEqual(encode(testdoc, codec_options=codecopts), - expected_bytes) + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) def test_encode_custom_then_fallback(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON)) - testdoc = {'x': self.TypeB(123)} - expected_bytes = encode({'x': 123}) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) - self.assertEqual(encode(testdoc, codec_options=codecopts), - expected_bytes) + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) def test_chaining_encoders_fails(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.A2B(), self.B2BSON()])) + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) with self.assertRaises(InvalidDocument): - encode({'x': self.TypeA(123)}, codec_options=codecopts) + encode({"x": self.TypeA(123)}, codec_options=codecopts) def test_infinite_loop_exceeds_max_recursion_depth(self): - codecopts = CodecOptions(type_registry=TypeRegistry( - [self.B2A()], fallback_encoder=self.fallback_encoder_A2B)) + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) # Raises max recursion depth exceeded error with self.assertRaises(RuntimeError): - encode({'x': self.TypeA(100)}, codec_options=codecopts) + encode({"x": self.TypeA(100)}, codec_options=codecopts) class TestTypeRegistry(unittest.TestCase): @@ -449,29 +491,34 @@ def fallback_encoder(value): def test_simple(self): codec_instances = [codec() for codec in self.codecs] + def assert_proper_initialization(type_registry, codec_instances): - self.assertEqual(type_registry._encoder_map, { - self.types[0]: codec_instances[0].transform_python, - self.types[1]: codec_instances[1].transform_python}) - self.assertEqual(type_registry._decoder_map, { - int: codec_instances[0].transform_bson, - str: codec_instances[1].transform_bson}) self.assertEqual( - type_registry._fallback_encoder, self.fallback_encoder) + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) type_registry = TypeRegistry(codec_instances, self.fallback_encoder) assert_proper_initialization(type_registry, codec_instances) type_registry = TypeRegistry( - fallback_encoder=self.fallback_encoder, type_codecs=codec_instances) + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) assert_proper_initialization(type_registry, codec_instances) # Ensure codec list held by the type registry doesn't change if we # mutate the initial list. codec_instances_copy = list(codec_instances) codec_instances.pop(0) - self.assertListEqual( - type_registry._TypeRegistry__type_codecs, codec_instances_copy) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) def test_simple_separate_codecs(self): class MyIntEncoder(TypeEncoder): @@ -491,72 +538,83 @@ def transform_bson(self, value): self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}) # type: ignore + {MyIntEncoder.python_type: codec_instances[1].transform_python}, # type: ignore[has-type] + ) self.assertEqual( - type_registry._decoder_map, - {MyIntDecoder.bson_type: codec_instances[0].transform_bson}) # type: ignore + type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} # type: ignore[has-type] + ) def test_initialize_fail(self): - err_msg = ("Expected an instance of TypeEncoder, TypeDecoder, " - "or TypeCodec, got .* instead") + err_msg = ( + "Expected an instance of TypeEncoder, TypeDecoder, " "or TypeCodec, got .* instead" + ) with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(self.codecs) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([type('AnyType', (object,), {})()]) + TypeRegistry([type("AnyType", (object,), {})()]) err_msg = "fallback_encoder %r is not a callable" % (True,) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry([], True) # type: ignore[arg-type] + TypeRegistry([], True) # type: ignore[arg-type] - err_msg = "fallback_encoder %r is not a callable" % ('hello',) + err_msg = "fallback_encoder %r is not a callable" % ("hello",) with self.assertRaisesRegex(TypeError, err_msg): - TypeRegistry(fallback_encoder='hello') # type: ignore[arg-type] + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) - r = ("TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % ( - codec_instances, None)) + r = "TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % (codec_instances, None) self.assertEqual(r, repr(type_registry)) def test_type_registry_eq(self): codec_instances = [codec() for codec in self.codecs] - self.assertEqual( - TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) codec_instances_2 = [codec() for codec in self.codecs] - self.assertNotEqual( - TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) def test_builtin_types_override_fails(self): def run_test(base, attrs): - msg = (r"TypeEncoders cannot change how built-in types " - r"are encoded \(encoder .* transforms type .*\)") + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) for pytype in _BUILT_IN_TYPES: - attrs.update({'python_type': pytype, - 'transform_python': lambda x: x}) - codec = type('testcodec', (base, ), attrs) + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) codec_instance = codec() with self.assertRaisesRegex(TypeError, msg): - TypeRegistry([codec_instance,]) + TypeRegistry( + [ + codec_instance, + ] + ) # Test only some subtypes as not all can be subclassed. - if pytype in [bool, type(None), RE_TYPE,]: + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: continue class MyType(pytype): # type: ignore pass - attrs.update({'python_type': MyType, - 'transform_python': lambda x: x}) - codec = type('testcodec', (base, ), attrs) + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) codec_instance = codec() with self.assertRaisesRegex(TypeError, msg): - TypeRegistry([codec_instance,]) + TypeRegistry( + [ + codec_instance, + ] + ) run_test(TypeEncoder, {}) - run_test(TypeCodec, {'bson_type': Decimal128, - 'transform_bson': lambda x: x}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) class TestCollectionWCustomType(IntegrationTest): @@ -568,115 +626,127 @@ def tearDown(self): def test_command_errors_w_custom_type_decoder(self): db = self.db - test_doc = {'_id': 1, 'data': 'a'} - test = db.get_collection('test', - codec_options=UNINT_DECODER_CODECOPTS) + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) result = test.insert_one(test_doc) - self.assertEqual(result.inserted_id, test_doc['_id']) + self.assertEqual(result.inserted_id, test_doc["_id"]) with self.assertRaises(DuplicateKeyError): test.insert_one(test_doc) def test_find_w_custom_type_decoder(self): db = self.db - input_docs = [ - {'x': Int64(k)} for k in [1, 2, 3]] + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] for doc in input_docs: db.test.insert_one(doc) - test = db.get_collection( - 'test', codec_options=UNINT_DECODER_CODECOPTS) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) for doc in test.find({}, batch_size=1): - self.assertIsInstance(doc['x'], UndecipherableInt64Type) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) def test_find_w_custom_type_decoder_and_document_class(self): def run_test(doc_cls): db = self.db - input_docs = [ - {'x': Int64(k)} for k in [1, 2, 3]] + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] for doc in input_docs: db.test.insert_one(doc) - test = db.get_collection('test', codec_options=CodecOptions( - type_registry=TypeRegistry([UndecipherableIntDecoder()]), - document_class=doc_cls)) + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) for doc in test.find({}, batch_size=1): self.assertIsInstance(doc, doc_cls) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) for doc_cls in [RawBSONDocument, OrderedDict]: run_test(doc_cls) def test_aggregate_w_custom_type_decoder(self): db = self.db - db.test.insert_many([ - {'status': 'in progress', 'qty': Int64(1)}, - {'status': 'complete', 'qty': Int64(10)}, - {'status': 'in progress', 'qty': Int64(1)}, - {'status': 'complete', 'qty': Int64(10)}, - {'status': 'in progress', 'qty': Int64(1)},]) - test = db.get_collection( - 'test', codec_options=UNINT_DECODER_CODECOPTS) + db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) pipeline: list = [ - {'$match': {'status': 'complete'}}, - {'$group': {'_id': "$status", 'total_qty': {"$sum": "$qty"}}},] + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] result = test.aggregate(pipeline) res = list(result)[0] - self.assertEqual(res['_id'], 'complete') - self.assertIsInstance(res['total_qty'], UndecipherableInt64Type) - self.assertEqual(res['total_qty'].value, 20) + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) def test_distinct_w_custom_type(self): self.db.drop_collection("test") - test = self.db.get_collection('test', codec_options=UNINT_CODECOPTS) + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) values = [ UndecipherableInt64Type(1), UndecipherableInt64Type(2), UndecipherableInt64Type(3), - {"b": UndecipherableInt64Type(3)}] + {"b": UndecipherableInt64Type(3)}, + ] test.insert_many({"a": val} for val in values) self.assertEqual(values, test.distinct("a")) def test_find_one_and__w_custom_type_decoder(self): db = self.db - c = db.get_collection('test', codec_options=UNINT_DECODER_CODECOPTS) - c.insert_one({'_id': 1, 'x': Int64(1)}) - - doc = c.find_one_and_update({'_id': 1}, {'$inc': {'x': 1}}, - return_document=ReturnDocument.AFTER) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 2) - - doc = c.find_one_and_replace({'_id': 1}, {'x': Int64(3), 'y': True}, - return_document=ReturnDocument.AFTER) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 3) - self.assertEqual(doc['y'], True) - - doc = c.find_one_and_delete({'y': True}) - self.assertEqual(doc['_id'], 1) - self.assertIsInstance(doc['x'], UndecipherableInt64Type) - self.assertEqual(doc['x'].value, 3) + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) self.assertIsNone(c.find_one()) class TestGridFileCustomType(IntegrationTest): def setUp(self): - self.db.drop_collection('fs.files') - self.db.drop_collection('fs.chunks') + self.db.drop_collection("fs.files") + self.db.drop_collection("fs.chunks") def test_grid_out_custom_opts(self): db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) - one = GridIn(db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 'red', "bar": 'blue'}, bar=3, - baz="hello") + one = GridIn( + db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) one.write(b"hello world") one.close() @@ -690,12 +760,21 @@ def test_grid_out_custom_opts(self): self.assertEqual(1000, two.chunk_size) self.assertTrue(isinstance(two.upload_date, datetime.datetime)) self.assertEqual(["foo"], two.aliases) - self.assertEqual({"foo": 'red', "bar": 'blue'}, two.metadata) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) self.assertEqual(None, two.md5) - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) @@ -705,11 +784,10 @@ def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) @no_type_check - def insert_and_check(self, change_stream, insert_doc, - expected_doc): + def insert_and_check(self, change_stream, insert_doc, expected_doc): self.input_target.insert_one(insert_doc) change = next(change_stream) - self.assertEqual(change['fullDocument'], expected_doc) + self.assertEqual(change["fullDocument"], expected_doc) @no_type_check def kill_change_stream_cursor(self, change_stream): @@ -721,18 +799,21 @@ def kill_change_stream_cursor(self, change_stream): @no_type_check def test_simple(self): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UndecipherableIntEncoder(), UppercaseTextDecoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) self.create_targets(codec_options=codecopts) input_docs = [ - {'_id': UndecipherableInt64Type(1), 'data': 'hello'}, - {'_id': 2, 'data': 'world'}, - {'_id': UndecipherableInt64Type(3), 'data': '!'},] + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] expected_docs = [ - {'_id': 1, 'data': 'HELLO'}, - {'_id': 2, 'data': 'WORLD'}, - {'_id': 3, 'data': '!'},] + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] change_stream = self.change_stream() @@ -744,22 +825,22 @@ def test_simple(self): @no_type_check def test_custom_type_in_pipeline(self): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UndecipherableIntEncoder(), UppercaseTextDecoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) self.create_targets(codec_options=codecopts) input_docs = [ - {'_id': UndecipherableInt64Type(1), 'data': 'hello'}, - {'_id': 2, 'data': 'world'}, - {'_id': UndecipherableInt64Type(3), 'data': '!'}] - expected_docs = [ - {'_id': 2, 'data': 'WORLD'}, - {'_id': 3, 'data': '!'}] + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] # UndecipherableInt64Type should be encoded with the TypeRegistry. change_stream = self.change_stream( - [{'$match': {'documentKey._id': { - '$gte': UndecipherableInt64Type(2)}}}]) + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) self.input_target.insert_one(input_docs[0]) self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) @@ -773,17 +854,17 @@ def test_break_resume_token(self): change_stream = self.change_stream() self.input_target.insert_one({"data": "test"}) change = next(change_stream) - resume_token_decoder = type_obfuscating_decoder_factory( - type(change['_id']['_data'])) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) # Custom-decoding the resumeToken type breaks resume tokens. - codecopts = CodecOptions(type_registry=TypeRegistry([ - resume_token_decoder(), UndecipherableIntEncoder()])) + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) # Re-create targets, change stream and proceed. self.create_targets(codec_options=codecopts) - docs = [{'_id': 1}, {'_id': 2}, {'_id': 3}] + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] change_stream = self.change_stream() self.insert_and_check(change_stream, docs[0], docs[0]) @@ -795,27 +876,27 @@ def test_break_resume_token(self): @no_type_check def test_document_class(self): def run_test(doc_cls): - codecopts = CodecOptions(type_registry=TypeRegistry([ - UppercaseTextDecoder(), UndecipherableIntEncoder()]), - document_class=doc_cls) + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) self.create_targets(codec_options=codecopts) change_stream = self.change_stream() - doc = {'a': UndecipherableInt64Type(101), 'b': 'xyz'} + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} self.input_target.insert_one(doc) change = next(change_stream) self.assertIsInstance(change, doc_cls) - self.assertEqual(change['fullDocument']['a'], 101) - self.assertEqual(change['fullDocument']['b'], 'XYZ') + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") for doc_cls in [OrderedDict, RawBSONDocument]: run_test(doc_cls) -class TestCollectionChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_no_mmap @client_context.require_no_standalone @@ -827,16 +908,14 @@ def tearDown(self): self.input_target.drop() def create_targets(self, *args, **kwargs): - self.watched_target = self.db.get_collection( - 'test', *args, **kwargs) + self.watched_target = self.db.get_collection("test", *args, **kwargs) self.input_target = self.watched_target # Ensure the collection exists and is empty. self.input_target.insert_one({}) self.input_target.delete_many({}) -class TestDatabaseChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) @client_context.require_no_mmap @@ -850,15 +929,13 @@ def tearDown(self): self.client.drop_database(self.watched_target) def create_targets(self, *args, **kwargs): - self.watched_target = self.client.get_database( - self.db.name, *args, **kwargs) + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) self.input_target = self.watched_target.test # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + self.input_target.insert_one({"data": "dummy"}) -class TestClusterChangeStreamsWCustomTypes( - IntegrationTest, ChangeStreamsWCustomTypesTestMixin): +class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) @client_context.require_no_mmap @@ -872,15 +949,15 @@ def tearDown(self): self.client.drop_database(self.db) def create_targets(self, *args, **kwargs): - codec_options = kwargs.pop('codec_options', None) + codec_options = kwargs.pop("codec_options", None) if codec_options: - kwargs['type_registry'] = codec_options.type_registry - kwargs['document_class'] = codec_options.document_class + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class self.watched_target = rs_client(*args, **kwargs) self.addCleanup(self.watched_target.close) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. - self.input_target.insert_one({'data': 'dummy'}) + self.input_target.insert_one({"data": "dummy"}) if __name__ == "__main__": diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 2954efe651..863b3a4f59 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -19,33 +19,37 @@ sys.path[0:0] = [""] -from pymongo.auth import MECHANISMS -from test import client_context, unittest, IntegrationTest +from test import IntegrationTest, client_context, unittest from test.crud_v2_format import TestCrudV2 from test.utils import ( - rs_client_noauth, rs_or_single_client, OvertCommandListener, TestCreator) + OvertCommandListener, + TestCreator, + rs_client_noauth, + rs_or_single_client, +) +from pymongo.auth import MECHANISMS # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "data_lake") +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") class TestDataLakeMustConnect(IntegrationTest): def test_connected_to_data_lake(self): - data_lake = os.environ.get('DATA_LAKE') + data_lake = os.environ.get("DATA_LAKE") if not data_lake: - self.skipTest('DATA_LAKE is not set') + self.skipTest("DATA_LAKE is not set") - self.assertTrue(client_context.is_data_lake, - 'client context.is_data_lake must be True when ' - 'DATA_LAKE is set') + self.assertTrue( + client_context.is_data_lake, + "client context.is_data_lake must be True when " "DATA_LAKE is set", + ) class TestDataLakeProse(IntegrationTest): # Default test database and collection names. - TEST_DB = 'test' - TEST_COLLECTION = 'driverdata' + TEST_DB = "test" + TEST_COLLECTION = "driverdata" @classmethod @client_context.require_data_lake @@ -56,8 +60,7 @@ def setUpClass(cls): def test_1(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) - cursor = client[self.TEST_DB][self.TEST_COLLECTION].find( - {}, batch_size=2) + cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) next(cursor) # find command assertions @@ -69,13 +72,12 @@ def test_1(self): # killCursors command assertions cursor.close() started = listener.results["started"][-1] - self.assertEqual(started.command_name, 'killCursors') + self.assertEqual(started.command_name, "killCursors") succeeded = listener.results["succeeded"][-1] - self.assertEqual(succeeded.command_name, 'killCursors') + self.assertEqual(succeeded.command_name, "killCursors") self.assertIn(cursor_id, started.command["cursors"]) - target_ns = ".".join([started.command['$db'], - started.command['killCursors']]) + target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) self.assertEqual(cursor_ns, target_ns) self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) @@ -83,19 +85,19 @@ def test_1(self): # Test no auth def test_2(self): client = rs_client_noauth() - client.admin.command('ping') + client.admin.command("ping") # Test with auth def test_3(self): - for mechanism in ['SCRAM-SHA-1', 'SCRAM-SHA-256']: + for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: client = rs_or_single_client(authMechanism=mechanism) client[self.TEST_DB][self.TEST_COLLECTION].find_one() class DataLakeTestSpec(TestCrudV2): # Default test database and collection names. - TEST_DB = 'test' - TEST_COLLECTION = 'driverdata' + TEST_DB = "test" + TEST_COLLECTION = "driverdata" @classmethod @client_context.require_data_lake diff --git a/test/test_database.py b/test/test_database.py index 096eb5b979..9a08d971db 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -21,43 +21,44 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.test_custom_types import DECIMAL_CODECOPTS +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + DeprecationFilter, + OvertCommandListener, + ignore_deprecations, + rs_or_single_client, + server_started_with_auth, + wait_until, +) + from bson.codec_options import CodecOptions -from bson.int64 import Int64 -from bson.regex import Regex from bson.dbref import DBRef +from bson.int64 import Int64 from bson.objectid import ObjectId +from bson.regex import Regex from bson.son import SON -from pymongo import (auth, - helpers) +from pymongo import auth, helpers from pymongo.collection import Collection from pymongo.database import Database -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - ExecutionTimeout, - InvalidName, - OperationFailure, - WriteConcernError) +from pymongo.errors import ( + CollectionInvalid, + ConfigurationError, + ExecutionTimeout, + InvalidName, + OperationFailure, + WriteConcernError, +) from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - SkipTest, - unittest, - IntegrationTest) -from test.utils import (ignore_deprecations, - rs_or_single_client, - server_started_with_auth, - wait_until, - DeprecationFilter, - IMPOSSIBLE_WRITE_CONCERN, - OvertCommandListener) -from test.test_custom_types import DECIMAL_CODECOPTS class TestDatabaseNoConnect(unittest.TestCase): - """Test Database features on a client that does not connect. - """ + """Test Database features on a client that does not connect.""" + client: MongoClient @classmethod @@ -69,18 +70,17 @@ def test_name(self): self.assertRaises(InvalidName, Database, self.client, "my db") self.assertRaises(InvalidName, Database, self.client, 'my"db') self.assertRaises(InvalidName, Database, self.client, "my\x00db") - self.assertRaises(InvalidName, Database, - self.client, "my\u0000db") + self.assertRaises(InvalidName, Database, self.client, "my\u0000db") self.assertEqual("name", Database(self.client, "name").name) def test_get_collection(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) - read_concern = ReadConcern('majority') + read_concern = ReadConcern("majority") coll = self.client.pymongo_test.get_collection( - 'foo', codec_options, ReadPreference.SECONDARY, write_concern, - read_concern) - self.assertEqual('foo', coll.name) + "foo", codec_options, ReadPreference.SECONDARY, write_concern, read_concern + ) + self.assertEqual("foo", coll.name) self.assertEqual(codec_options, coll.codec_options) self.assertEqual(ReadPreference.SECONDARY, coll.read_preference) self.assertEqual(write_concern, coll.write_concern) @@ -88,7 +88,7 @@ def test_get_collection(self): def test_getattr(self): db = self.client.pymongo_test - self.assertTrue(isinstance(db['_does_not_exist'], Collection)) + self.assertTrue(isinstance(db["_does_not_exist"], Collection)) with self.assertRaises(AttributeError) as context: db._does_not_exist @@ -96,24 +96,19 @@ def test_getattr(self): # Message should be: "AttributeError: Database has no attribute # '_does_not_exist'. To access the _does_not_exist collection, # use database['_does_not_exist']". - self.assertIn("has no attribute '_does_not_exist'", - str(context.exception)) + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): self.assertRaises(TypeError, next, self.client.pymongo_test) class TestDatabase(IntegrationTest): - def test_equality(self): - self.assertNotEqual(Database(self.client, "test"), - Database(self.client, "mike")) - self.assertEqual(Database(self.client, "test"), - Database(self.client, "test")) + self.assertNotEqual(Database(self.client, "test"), Database(self.client, "mike")) + self.assertEqual(Database(self.client, "test"), Database(self.client, "test")) # Explicitly test inequality - self.assertFalse(Database(self.client, "test") != - Database(self.client, "test")) + self.assertFalse(Database(self.client, "test") != Database(self.client, "test")) def test_hashable(self): self.assertIn(self.client.test, {Database(self.client, "test")}) @@ -126,9 +121,10 @@ def test_get_coll(self): self.assertEqual(db.test.mike, db["test.mike"]) def test_repr(self): - self.assertEqual(repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, - repr("pymongo_test"))) + self.assertEqual( + repr(Database(self.client, "pymongo_test")), + "Database(%r, %s)" % (self.client, repr("pymongo_test")), + ) def test_create_collection(self): db = Database(self.client, "pymongo_test") @@ -165,7 +161,8 @@ def test_list_collection_names(self): db.systemcoll.test.insert_one({}) no_system_collections = db.list_collection_names( - filter={"name": {"$regex": r"^(?!system\.)"}}) + filter={"name": {"$regex": r"^(?!system\.)"}} + ) for coll in no_system_collections: self.assertTrue(not coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) @@ -192,8 +189,7 @@ def test_list_collection_names_filter(self): self.addCleanup(client.drop_database, db.name) # Should not send nameOnly. - for filter in ({'options.capped': True}, - {'options.capped': True, 'name': 'capped'}): + for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): results.clear() names = db.list_collection_names(filter=filter) self.assertEqual(names, ["capped"]) @@ -201,7 +197,7 @@ def test_list_collection_names_filter(self): # Should send nameOnly (except on 2.6). filter: Any - for filter in (None, {}, {'name': {'$in': ['capped', 'non_capped']}}): + for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): results.clear() names = db.list_collection_names(filter=filter) self.assertIn("capped", names) @@ -239,8 +235,10 @@ def test_list_collections(self): coll_cnt: dict = {} # Checking if is there any collection which don't exists. - if (len(set(colls) - set(["test","test.mike"])) == 0 or - len(set(colls) - set(["test","test.mike","system.indexes"])) == 0): + if ( + len(set(colls) - set(["test", "test.mike"])) == 0 + or len(set(colls) - set(["test", "test.mike", "system.indexes"])) == 0 + ): self.assertTrue(True) else: self.assertTrue(False) @@ -254,7 +252,7 @@ def test_list_collections(self): db.drop_collection("test") db.create_collection("test", capped=True, size=4096) - results = db.list_collections(filter={'options.capped': True}) + results = db.list_collections(filter={"options.capped": True}) colls = [result["name"] for result in results] # Checking only capped collections are present @@ -277,8 +275,10 @@ def test_list_collections(self): coll_cnt = {} # Checking if is there any collection which don't exists. - if (len(set(colls) - set(["test"])) == 0 or - len(set(colls) - set(["test","system.indexes"])) == 0): + if ( + len(set(colls) - set(["test"])) == 0 + or len(set(colls) - set(["test", "system.indexes"])) == 0 + ): self.assertTrue(True) else: self.assertTrue(False) @@ -287,13 +287,13 @@ def test_list_collections(self): def test_list_collection_names_single_socket(self): client = rs_or_single_client(maxPoolSize=1) - client.drop_database('test_collection_names_single_socket') + client.drop_database("test_collection_names_single_socket") db = client.test_collection_names_single_socket for i in range(200): db.create_collection(str(i)) db.list_collection_names() # Must not hang. - client.drop_database('test_collection_names_single_socket') + client.drop_database("test_collection_names_single_socket") def test_drop_collection(self): db = Database(self.client, "pymongo_test") @@ -325,10 +325,9 @@ def test_drop_collection(self): db.drop_collection(db.test.doesnotexist) if client_context.is_rs: - db_wc = Database(self.client, 'pymongo_test', - write_concern=IMPOSSIBLE_WRITE_CONCERN) + db_wc = Database(self.client, "pymongo_test", write_concern=IMPOSSIBLE_WRITE_CONCERN) with self.assertRaises(WriteConcernError): - db_wc.drop_collection('test') + db_wc.drop_collection("test") def test_validate_collection(self): db = self.client.pymongo_test @@ -338,10 +337,8 @@ def test_validate_collection(self): db.test.insert_one({"dummy": "object"}) - self.assertRaises(OperationFailure, db.validate_collection, - "test.doesnotexist") - self.assertRaises(OperationFailure, db.validate_collection, - db.test.doesnotexist) + self.assertRaises(OperationFailure, db.validate_collection, "test.doesnotexist") + self.assertRaises(OperationFailure, db.validate_collection, db.test.doesnotexist) self.assertTrue(db.validate_collection("test")) self.assertTrue(db.validate_collection(db.test)) @@ -357,10 +354,9 @@ def test_validate_collection_background(self): coll = db.test self.assertTrue(db.validate_collection(coll, background=False)) # The inMemory storage engine does not support background=True. - if client_context.storage_engine != 'inMemory': + if client_context.storage_engine != "inMemory": self.assertTrue(db.validate_collection(coll, background=True)) - self.assertTrue( - db.validate_collection(coll, scandata=True, background=True)) + self.assertTrue(db.validate_collection(coll, scandata=True, background=True)) # The server does not support background=True with full=True. # Assert that we actually send the background option by checking # that this combination fails. @@ -381,24 +377,25 @@ def test_command(self): def test_command_with_regex(self): db = self.client.pymongo_test db.test.drop() - db.test.insert_one({'r': re.compile('.*')}) - db.test.insert_one({'r': Regex('.*')}) + db.test.insert_one({"r": re.compile(".*")}) + db.test.insert_one({"r": Regex(".*")}) - result = db.command('aggregate', 'test', pipeline=[], cursor={}) - for doc in result['cursor']['firstBatch']: - self.assertTrue(isinstance(doc['r'], Regex)) + result = db.command("aggregate", "test", pipeline=[], cursor={}) + for doc in result["cursor"]["firstBatch"]: + self.assertTrue(isinstance(doc["r"], Regex)) def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, 5) self.assertRaises(TypeError, auth._password_digest, True) self.assertRaises(TypeError, auth._password_digest, None) - self.assertTrue(isinstance(auth._password_digest("mike", "password"), - str)) - self.assertEqual(auth._password_digest("mike", "password"), - "cd7e45b3b2767dc2fa9b6b548457ed00") - self.assertEqual(auth._password_digest("Gustave", "Dor\xe9"), - "81e0e2364499209f466e75926a162d73") + self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) + self.assertEqual( + auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" + ) + self.assertEqual( + auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73" + ) def test_id_ordering(self): # PyMongo attempts to have _id show up first @@ -409,11 +406,11 @@ def test_id_ordering(self): # with hash randomization enabled (e.g. tox). db = self.client.pymongo_test db.test.drop() - db.test.insert_one(SON([("hello", "world"), - ("_id", 5)])) + db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON)) + "pymongo_test", codec_options=CodecOptions(document_class=SON) + ) cursor = db.test.find() for x in cursor: for (k, v) in x.items(): @@ -432,10 +429,8 @@ def test_deref(self): obj = {"x": True} key = db.test.insert_one(obj).inserted_id self.assertEqual(obj, db.dereference(DBRef("test", key))) - self.assertEqual(obj, - db.dereference(DBRef("test", key, "pymongo_test"))) - self.assertRaises(ValueError, - db.dereference, DBRef("test", key, "foo")) + self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) + self.assertRaises(ValueError, db.dereference, DBRef("test", key, "foo")) self.assertEqual(None, db.dereference(DBRef("test", 4))) obj = {"_id": 4} @@ -448,10 +443,11 @@ def test_deref_kwargs(self): db.test.insert_one({"_id": 4, "foo": "bar"}) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON)) - self.assertEqual(SON([("foo", "bar")]), - db.dereference(DBRef("test", 4), - projection={"_id": False})) + "pymongo_test", codec_options=CodecOptions(document_class=SON) + ) + self.assertEqual( + SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) + ) # TODO some of these tests belong in the collection level testing. def test_insert_find_one(self): @@ -486,12 +482,12 @@ def test_long(self): db = self.client.pymongo_test db.test.drop() db.test.insert_one({"x": 9223372036854775807}) - retrieved = db.test.find_one()['x'] # type: ignore + retrieved = db.test.find_one()["x"] # type: ignore self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) db.test.delete_many({}) db.test.insert_one({"x": Int64(1)}) - retrieved = db.test.find_one()['x'] # type: ignore + retrieved = db.test.find_one()["x"] # type: ignore self.assertEqual(Int64(1), retrieved) self.assertIsInstance(retrieved, Int64) @@ -533,11 +529,10 @@ def test_command_response_without_ok(self): # Sometimes (SERVER-10891) the server's response to a badly-formatted # command document will have no 'ok' field. We should raise # OperationFailure instead of KeyError. - self.assertRaises(OperationFailure, - helpers._check_command_response, {}, None) + self.assertRaises(OperationFailure, helpers._check_command_response, {}, None) try: - helpers._check_command_response({'$err': 'foo'}, None) + helpers._check_command_response({"$err": "foo"}, None) except OperationFailure as e: self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") else: @@ -545,64 +540,59 @@ def test_command_response_without_ok(self): def test_mongos_response(self): error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {'ok': 0, 'errmsg': 'inner'}}} + "ok": 0, + "errmsg": "outer", + "raw": {"shard0/host0,host1": {"ok": 0, "errmsg": "inner"}}, + } with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document, None) - self.assertIn('inner', str(context.exception)) + self.assertIn("inner", str(context.exception)) # If a shard has no primary and you run a command like dbstats, which # cannot be run on a secondary, mongos's response includes empty "raw" # errors. See SERVER-15428. - error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {}}} + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document, None) - self.assertIn('outer', str(context.exception)) + self.assertIn("outer", str(context.exception)) # Raw error has ok: 0 but no errmsg. Not a known case, but test it. - error_document = { - 'ok': 0, - 'errmsg': 'outer', - 'raw': {'shard0/host0,host1': {'ok': 0}}} + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} with self.assertRaises(OperationFailure) as context: helpers._check_command_response(error_document, None) - self.assertIn('outer', str(context.exception)) + self.assertIn("outer", str(context.exception)) @client_context.require_test_commands @client_context.require_no_mongos def test_command_max_time_ms(self): - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="alwaysOn") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: db = self.client.pymongo_test - db.command('count', 'test') - self.assertRaises(ExecutionTimeout, db.command, - 'count', 'test', maxTimeMS=1) - pipeline = [{'$project': {'name': 1, 'count': 1}}] + db.command("count", "test") + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) + pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. - db.command('aggregate', 'test', pipeline=pipeline, cursor={}) - self.assertRaises(ExecutionTimeout, db.command, - 'aggregate', 'test', - pipeline=pipeline, cursor={}, maxTimeMS=1) + db.command("aggregate", "test", pipeline=pipeline, cursor={}) + self.assertRaises( + ExecutionTimeout, + db.command, + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) # Collection helper. db.test.aggregate(pipeline=pipeline) - self.assertRaises(ExecutionTimeout, - db.test.aggregate, pipeline, maxTimeMS=1) + self.assertRaises(ExecutionTimeout, db.test.aggregate, pipeline, maxTimeMS=1) finally: - self.client.admin.command("configureFailPoint", - "maxTimeAlwaysTimeOut", - mode="off") + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") def test_with_options(self): codec_options = DECIMAL_CODECOPTS @@ -611,13 +601,22 @@ def test_with_options(self): read_concern = ReadConcern(level="majority") # List of all options to compare. - allopts = ['name', 'client', 'codec_options', - 'read_preference', 'write_concern', 'read_concern'] + allopts = [ + "name", + "client", + "codec_options", + "read_preference", + "write_concern", + "read_concern", + ] db1 = self.client.get_database( - 'with_options_test', codec_options=codec_options, - read_preference=read_preference, write_concern=write_concern, - read_concern=read_concern) + "with_options_test", + codec_options=codec_options, + read_preference=read_preference, + write_concern=write_concern, + read_concern=read_concern, + ) # Case 1: swap no options db2 = db1.with_options() @@ -625,22 +624,25 @@ def test_with_options(self): self.assertEqual(getattr(db1, opt), getattr(db2, opt)) # Case 2: swap all options - newopts = {'codec_options': CodecOptions(), - 'read_preference': ReadPreference.PRIMARY, - 'write_concern': WriteConcern(w=1), - 'read_concern': ReadConcern(level="local")} + newopts = { + "codec_options": CodecOptions(), + "read_preference": ReadPreference.PRIMARY, + "write_concern": WriteConcern(w=1), + "read_concern": ReadConcern(level="local"), + } db2 = db1.with_options(**newopts) # type: ignore[arg-type] for opt in newopts: - self.assertEqual( - getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) + self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) class TestDatabaseAggregation(IntegrationTest): def setUp(self): - self.pipeline: List[Mapping[str, Any]] = [{"$listLocalSessions": {}}, - {"$limit": 1}, - {"$addFields": {"dummy": "dummy field"}}, - {"$project": {"_id": 0, "dummy": 1}}] + self.pipeline: List[Mapping[str, Any]] = [ + {"$listLocalSessions": {}}, + {"$limit": 1}, + {"$addFields": {"dummy": "dummy field"}}, + {"$project": {"_id": 0, "dummy": 1}}, + ] self.result = {"dummy": "dummy field"} self.admin = self.client.admin @@ -660,8 +662,7 @@ def test_database_aggregation_fake_cursor(self): # SERVER-43287 disallows writing with $out to the admin db, use # $merge instead. db_name = "pymongo_test" - write_stage = { - "$merge": {"into": {"db": db_name, "coll": coll_name}}} + write_stage = {"$merge": {"into": {"db": db_name, "coll": coll_name}}} output_coll = self.client[db_name][coll_name] output_coll.drop() self.addCleanup(output_coll.drop) diff --git a/test/test_dbref.py b/test/test_dbref.py index 348b1d14de..8e98bd8ce5 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -17,14 +17,15 @@ import pickle import sys from typing import Any + sys.path[0:0] = [""] -from bson import encode, decode -from bson.dbref import DBRef -from bson.objectid import ObjectId +from copy import deepcopy from test import unittest -from copy import deepcopy +from bson import decode, encode +from bson.dbref import DBRef +from bson.objectid import ObjectId class TestDBRef(unittest.TestCase): @@ -57,53 +58,45 @@ def bar(): self.assertRaises(AttributeError, bar) def test_repr(self): - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef('coll', ObjectId('1234567890abcdef12345678'))") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" - % (repr('coll'),) - ) - self.assertEqual(repr(DBRef("coll", 5, foo="bar")), - "DBRef('coll', 5, foo='bar')") - self.assertEqual(repr(DBRef("coll", - ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " - "'foo')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef('coll', ObjectId('1234567890abcdef12345678'))", + ) + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), + "DBRef(%s, ObjectId('1234567890abcdef12345678'))" % (repr("coll"),), + ) + self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") + self.assertEqual( + repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), + "DBRef('coll', ObjectId('1234567890abcdef12345678'), " "'foo')", + ) def test_equality(self): obj_id = ObjectId("1234567890abcdef12345678") - self.assertEqual(DBRef('foo', 5), DBRef('foo', 5)) + self.assertEqual(DBRef("foo", 5), DBRef("foo", 5)) self.assertEqual(DBRef("coll", obj_id), DBRef("coll", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef("coll", obj_id, "foo")) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", obj_id, "foo")) self.assertNotEqual(DBRef("coll", obj_id), DBRef("col", obj_id)) - self.assertNotEqual(DBRef("coll", obj_id), - DBRef("coll", ObjectId(b"123456789011"))) + self.assertNotEqual(DBRef("coll", obj_id), DBRef("coll", ObjectId(b"123456789011"))) self.assertNotEqual(DBRef("coll", obj_id), 4) - self.assertNotEqual(DBRef("coll", obj_id, "foo"), - DBRef("coll", obj_id, "bar")) + self.assertNotEqual(DBRef("coll", obj_id, "foo"), DBRef("coll", obj_id, "bar")) # Explicitly test inequality - self.assertFalse(DBRef('foo', 5) != DBRef('foo', 5)) + self.assertFalse(DBRef("foo", 5) != DBRef("foo", 5)) self.assertFalse(DBRef("coll", obj_id) != DBRef("coll", obj_id)) - self.assertFalse(DBRef("coll", obj_id, "foo") != - DBRef("coll", obj_id, "foo")) + self.assertFalse(DBRef("coll", obj_id, "foo") != DBRef("coll", obj_id, "foo")) def test_kwargs(self): - self.assertEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="bar")) + self.assertEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="bar")) self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5)) - self.assertNotEqual(DBRef("coll", 5, foo="bar"), - DBRef("coll", 5, foo="baz")) + self.assertNotEqual(DBRef("coll", 5, foo="bar"), DBRef("coll", 5, foo="baz")) self.assertEqual("bar", DBRef("coll", 5, foo="bar").foo) - self.assertRaises(AttributeError, getattr, - DBRef("coll", 5, foo="bar"), "bar") + self.assertRaises(AttributeError, getattr, DBRef("coll", 5, foo="bar"), "bar") def test_deepcopy(self): - a = DBRef('coll', 'asdf', 'db', x=[1]) + a = DBRef("coll", "asdf", "db", x=[1]) b = deepcopy(a) self.assertEqual(a, b) @@ -116,19 +109,19 @@ def test_deepcopy(self): self.assertEqual(b.x, [2]) def test_pickling(self): - dbr = DBRef('coll', 5, foo='bar') + dbr = DBRef("coll", 5, foo="bar") for protocol in [0, 1, 2, -1]: pkl = pickle.dumps(dbr, protocol=protocol) dbr2 = pickle.loads(pkl) self.assertEqual(dbr, dbr2) def test_dbref_hash(self): - dbref_1a = DBRef('collection', 'id', 'database') - dbref_1b = DBRef('collection', 'id', 'database') + dbref_1a = DBRef("collection", "id", "database") + dbref_1b = DBRef("collection", "id", "database") self.assertEqual(hash(dbref_1a), hash(dbref_1b)) - dbref_2a = DBRef('collection', 'id', 'database', custom='custom') - dbref_2b = DBRef('collection', 'id', 'database', custom='custom') + dbref_2a = DBRef("collection", "id", "database", custom="custom") + dbref_2b = DBRef("collection", "id", "database", custom="custom") self.assertEqual(hash(dbref_2a), hash(dbref_2b)) self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) @@ -158,12 +151,12 @@ def test_decoding_1_2_3(self): {"foo": 1, "$ref": "coll0", "$id": 1, "$db": "db0", "bar": 1}, ]: with self.subTest(doc=doc): - decoded = decode(encode({'dbref': doc})) - dbref = decoded['dbref'] + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] self.assertIsInstance(dbref, DBRef) - self.assertEqual(dbref.collection, doc['$ref']) - self.assertEqual(dbref.id, doc['$id']) - self.assertEqual(dbref.database, doc.get('$db')) + self.assertEqual(dbref.collection, doc["$ref"]) + self.assertEqual(dbref.id, doc["$id"]) + self.assertEqual(dbref.database, doc.get("$db")) for extra in set(doc.keys()) - {"$ref", "$id", "$db"}: self.assertEqual(getattr(dbref, extra), doc[extra]) @@ -180,8 +173,8 @@ def test_decoding_4_5(self): {"$ref": "coll0", "$id": 1, "$db": 1}, ]: with self.subTest(doc=doc): - decoded = decode(encode({'dbref': doc})) - dbref = decoded['dbref'] + decoded = decode(encode({"dbref": doc})) + dbref = decoded["dbref"] self.assertIsInstance(dbref, dict) def test_encoding_1_2(self): @@ -201,9 +194,9 @@ def test_encoding_1_2(self): ]: with self.subTest(doc=doc): # Decode the test input to a DBRef via a BSON roundtrip. - encoded_doc = encode({'dbref': doc}) + encoded_doc = encode({"dbref": doc}) decoded = decode(encoded_doc) - dbref = decoded['dbref'] + dbref = decoded["dbref"] self.assertIsInstance(dbref, DBRef) # Encode the DBRef. encoded_dbref = encode(decoded) @@ -224,9 +217,9 @@ def test_encoding_3(self): ]: with self.subTest(doc=doc): # Decode the test input to a DBRef via a BSON roundtrip. - encoded_doc = encode({'dbref': doc}) + encoded_doc = encode({"dbref": doc}) decoded = decode(encoded_doc) - dbref = decoded['dbref'] + dbref = decoded["dbref"] self.assertIsInstance(dbref, DBRef) # Encode the DBRef. encoded_dbref = encode(decoded) diff --git a/test/test_decimal128.py b/test/test_decimal128.py index 3988a4559a..b46f94f594 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -16,41 +16,39 @@ import pickle import sys - from decimal import Decimal sys.path[0:0] = [""] -from bson.decimal128 import Decimal128, create_decimal128_context from test import client_context, unittest -class TestDecimal128(unittest.TestCase): +from bson.decimal128 import Decimal128, create_decimal128_context + +class TestDecimal128(unittest.TestCase): @client_context.require_connection def test_round_trip(self): coll = client_context.client.pymongo_test.test coll.drop() - dec128 = Decimal128.from_bid( - b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') - coll.insert_one({'dec128': dec128}) - doc = coll.find_one({'dec128': dec128}) + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") + coll.insert_one({"dec128": dec128}) + doc = coll.find_one({"dec128": dec128}) assert doc is not None self.assertIsNotNone(doc) - self.assertEqual(doc['dec128'], dec128) + self.assertEqual(doc["dec128"], dec128) def test_pickle(self): - dec128 = Decimal128.from_bid( - b'\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0') + dec128 = Decimal128.from_bid(b"\x00@cR\xbf\xc6\x01\x00\x00\x00\x00\x00\x00\x00\x1c0") for protocol in range(pickle.HIGHEST_PROTOCOL + 1): pkl = pickle.dumps(dec128, protocol=protocol) self.assertEqual(dec128, pickle.loads(pkl)) def test_special(self): - dnan = Decimal('NaN') - dnnan = Decimal('-NaN') - dsnan = Decimal('sNaN') - dnsnan = Decimal('-sNaN') + dnan = Decimal("NaN") + dnnan = Decimal("-NaN") + dsnan = Decimal("sNaN") + dnsnan = Decimal("-sNaN") dnan128 = Decimal128(dnan) dnnan128 = Decimal128(dnnan) dsnan128 = Decimal128(dsnan) @@ -70,5 +68,5 @@ def test_decimal128_context(self): self.assertEqual("0E-6176", str(ctx.copy().create_decimal("1E-6177"))) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index c3a50709ac..51b168b0a0 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -21,40 +21,41 @@ sys.path[0:0] = [""] -from bson import json_util, Timestamp -from pymongo import (common, - monitoring) -from pymongo.errors import (AutoReconnect, - ConfigurationError, - NetworkTimeout, - NotPrimaryError, - OperationFailure) -from pymongo.helpers import (_check_command_response, - _check_write_command_response) +from test import IntegrationTest, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import ( + CMAPListener, + HeartbeatEventListener, + TestCreator, + assertion_context, + client_context, + get_pool, + rs_or_single_client, + server_name_to_type, + single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner, SpecRunnerThread + +from bson import Timestamp, json_util +from pymongo import common, monitoring +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) from pymongo.hello import Hello, HelloCompat -from pymongo.server_description import ServerDescription, SERVER_TYPE +from pymongo.helpers import _check_command_response, _check_write_command_response +from pymongo.server_description import SERVER_TYPE, ServerDescription from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.uri_parser import parse_uri -from test import unittest, IntegrationTest -from test.utils import (assertion_context, - CMAPListener, - client_context, - get_pool, - HeartbeatEventListener, - server_name_to_type, - rs_or_single_client, - single_client, - TestCreator, - wait_until) -from test.utils_spec_runner import SpecRunner, SpecRunnerThread -from test.pymongo_mocks import DummyMonitor - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'discovery_and_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") def create_mock_topology(uri, monitor_class=DummyMonitor): @@ -62,19 +63,20 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): replica_set_name = None direct_connection = None load_balanced = None - if 'replicaset' in parsed_uri['options']: - replica_set_name = parsed_uri['options']['replicaset'] - if 'directConnection' in parsed_uri['options']: - direct_connection = parsed_uri['options']['directConnection'] - if 'loadBalanced' in parsed_uri['options']: - load_balanced = parsed_uri['options']['loadBalanced'] + if "replicaset" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaset"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] topology_settings = TopologySettings( - parsed_uri['nodelist'], + parsed_uri["nodelist"], replica_set_name=replica_set_name, monitor_class=monitor_class, direct_connection=direct_connection, - load_balanced=load_balanced) + load_balanced=load_balanced, + ) c = Topology(topology_settings) c.open() @@ -82,43 +84,42 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): def got_hello(topology, server_address, hello_response): - server_description = ServerDescription( - server_address, Hello(hello_response), 0) + server_description = ServerDescription(server_address, Hello(hello_response), 0) topology.on_change(server_description) def got_app_error(topology, app_error): - server_address = common.partition_node(app_error['address']) + server_address = common.partition_node(app_error["address"]) server = topology.get_server_by_address(server_address) - error_type = app_error['type'] - generation = app_error.get( - 'generation', server.pool.gen.get_overall()) - when = app_error['when'] - max_wire_version = app_error['maxWireVersion'] + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] # XXX: We could get better test coverage by mocking the errors on the # Pool/SocketInfo. try: - if error_type == 'command': - _check_command_response(app_error['response'], max_wire_version) - _check_write_command_response(app_error['response']) - elif error_type == 'network': - raise AutoReconnect('mock non-timeout network error') - elif error_type == 'timeout': - raise NetworkTimeout('mock network timeout error') + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") else: - raise AssertionError('unknown error type: %s' % (error_type,)) + raise AssertionError("unknown error type: %s" % (error_type,)) assert False except (AutoReconnect, NotPrimaryError, OperationFailure) as e: - if when == 'beforeHandshakeCompletes': + if when == "beforeHandshakeCompletes": completed_handshake = False - elif when == 'afterHandshakeCompletes': + elif when == "afterHandshakeCompletes": completed_handshake = True else: - assert False, 'Unknown when field %s' % (when,) + assert False, "Unknown when field %s" % (when,) topology.handle_error( - server_address, _ErrorContext(e, max_wire_version, generation, - completed_handshake, None)) + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) def get_type(topology, hostname): @@ -139,14 +140,12 @@ def server_type_name(server_type): def check_outcome(self, topology, outcome): - expected_servers = outcome['servers'] + expected_servers = outcome["servers"] # Check weak equality before proceeding. - self.assertEqual( - len(topology.description.server_descriptions()), - len(expected_servers)) + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) - if outcome.get('compatible') is False: + if outcome.get("compatible") is False: with self.assertRaises(ConfigurationError): topology.description.check_compatible() else: @@ -160,64 +159,58 @@ def check_outcome(self, topology, outcome): self.assertTrue(topology.has_server(node)) actual_server = topology.get_server_by_address(node) actual_server_description = actual_server.description - expected_server_type = server_name_to_type(expected_server['type']) + expected_server_type = server_name_to_type(expected_server["type"]) self.assertEqual( server_type_name(expected_server_type), - server_type_name(actual_server_description.server_type)) + server_type_name(actual_server_description.server_type), + ) - self.assertEqual( - expected_server.get('setName'), - actual_server_description.replica_set_name) + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) - self.assertEqual( - expected_server.get('setVersion'), - actual_server_description.set_version) + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) - self.assertEqual( - expected_server.get('electionId'), - actual_server_description.election_id) + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) self.assertEqual( - expected_server.get('topologyVersion'), - actual_server_description.topology_version) + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) - expected_pool = expected_server.get('pool') + expected_pool = expected_server.get("pool") if expected_pool: - self.assertEqual( - expected_pool.get('generation'), - actual_server.pool.gen.get_overall()) + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) - self.assertEqual(outcome['setName'], topology.description.replica_set_name) - self.assertEqual(outcome.get('logicalSessionTimeoutMinutes'), - topology.description.logical_session_timeout_minutes) + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) - expected_topology_type = getattr(TOPOLOGY_TYPE, outcome['topologyType']) - self.assertEqual(topology_type_name(expected_topology_type), - topology_type_name(topology.description.topology_type)) + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) - self.assertEqual(outcome.get('maxSetVersion'), - topology.description.max_set_version) - self.assertEqual(outcome.get('maxElectionId'), - topology.description.max_election_id) + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) def create_test(scenario_def): def run_scenario(self): - c = create_mock_topology(scenario_def['uri']) + c = create_mock_topology(scenario_def["uri"]) - for i, phase in enumerate(scenario_def['phases']): + for i, phase in enumerate(scenario_def["phases"]): # Including the phase description makes failures easier to debug. - description = phase.get('description', str(i)) - with assertion_context('phase: %s' % (description,)): - for response in phase.get('responses', []): - got_hello( - c, common.partition_node(response[0]), response[1]) + description = phase.get("description", str(i)) + with assertion_context("phase: %s" % (description,)): + for response in phase.get("responses", []): + got_hello(c, common.partition_node(response[0]), response[1]) - for app_error in phase.get('applicationErrors', []): + for app_error in phase.get("applicationErrors", []): got_app_error(c, app_error) - check_outcome(self, c, phase['outcome']) + check_outcome(self, c, phase["outcome"]) return run_scenario @@ -232,8 +225,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -244,17 +236,16 @@ def create_tests(): class TestClusterTimeComparison(unittest.TestCase): def test_cluster_time_comparison(self): - t = create_mock_topology('mongodb://host') + t = create_mock_topology("mongodb://host") def send_cluster_time(time, inc, should_update): old = t.max_cluster_time() - new = {'clusterTime': Timestamp(time, inc)} - got_hello(t, - ('host', 27017), - {'ok': 1, - 'minWireVersion': 0, - 'maxWireVersion': 6, - '$clusterTime': new}) + new = {"clusterTime": Timestamp(time, inc)} + got_hello( + t, + ("host", 27017), + {"ok": 1, "minWireVersion": 0, "maxWireVersion": 6, "$clusterTime": new}, + ) actual = t.max_cluster_time() if should_update: @@ -270,7 +261,6 @@ def send_cluster_time(time, inc, should_update): class TestIgnoreStaleErrors(IntegrationTest): - def test_ignore_stale_connection_errors(self): N_THREADS = 5 barrier = threading.Barrier(N_THREADS, timeout=30) @@ -278,22 +268,22 @@ def test_ignore_stale_connection_errors(self): self.addCleanup(client.close) # Wait for initial discovery. - client.admin.command('ping') + client.admin.command("ping") pool = get_pool(client) starting_generation = pool.gen.get_overall() - wait_until(lambda: len(pool.sockets) == N_THREADS, 'created sockets') + wait_until(lambda: len(pool.sockets) == N_THREADS, "created sockets") def mock_command(*args, **kwargs): # Synchronize all threads to ensure they use the same generation. barrier.wait() - raise AutoReconnect('mock SocketInfo.command error') + raise AutoReconnect("mock SocketInfo.command error") for sock in pool.sockets: sock.command = mock_command def insert_command(i): try: - client.test.command('insert', 'test', documents=[{'i': i}]) + client.test.command("insert", "test", documents=[{"i": i}]) except AutoReconnect as exc: pass @@ -306,11 +296,10 @@ def insert_command(i): t.join() # Expect a single pool reset for the network error - self.assertEqual( - starting_generation+1, pool.gen.get_overall()) + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) # Server should be selectable. - client.admin.command('ping') + client.admin.command("ping") class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): @@ -322,52 +311,52 @@ class TestPoolManagement(IntegrationTest): def test_pool_unpause(self): # This test implements the prose test "Connection Pool Management" listener = CMAPHeartbeatListener() - client = single_client(appName="SDAMPoolManagementTest", - heartbeatFrequencyMS=500, - event_listeners=[listener]) + client = single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) self.addCleanup(client.close) # Assert that ConnectionPoolReadyEvent occurs after the first # ServerHeartbeatSucceededEvent. listener.wait_for_event(monitoring.PoolReadyEvent, 1) pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] - hb_succeeded = listener.events_by_type( - monitoring.ServerHeartbeatSucceededEvent)[0] - self.assertGreater( - listener.events.index(pool_ready), - listener.events.index(hb_succeeded)) + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) listener.reset() fail_hello = { - 'mode': {'times': 2}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'errorCode': 1234, - 'appName': 'SDAMPoolManagementTest', + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", }, } with self.fail_point(fail_hello): listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) listener.wait_for_event(monitoring.PoolClearedEvent, 1) - listener.wait_for_event( - monitoring.ServerHeartbeatSucceededEvent, 1) + listener.wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) listener.wait_for_event(monitoring.PoolReadyEvent, 1) class TestIntegration(SpecRunner): # Location of JSON test specifications. TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'discovery_and_monitoring_integration') + os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring_integration" + ) def _event_count(self, event): - if event == 'ServerMarkedUnknownEvent': + if event == "ServerMarkedUnknownEvent": + def marked_unknown(e): - return (isinstance(e, monitoring.ServerDescriptionChangedEvent) - and not e.new_description.is_server_type_known) + return ( + isinstance(e, monitoring.ServerDescriptionChangedEvent) + and not e.new_description.is_server_type_known + ) + assert self.server_listener is not None return len(self.server_listener.matching(marked_unknown)) # Only support CMAP events for now. - self.assertTrue(event.startswith('Pool') or event.startswith('Conn')) + self.assertTrue(event.startswith("Pool") or event.startswith("Conn")) event_type = getattr(monitoring, event) assert self.pool_listener is not None return self.pool_listener.event_count(event_type) @@ -377,50 +366,48 @@ def assert_event_count(self, event, count): Assert the given event was published exactly `count` times. """ - self.assertEqual(self._event_count(event), count, - 'expected %s not %r' % (count, event)) + self.assertEqual(self._event_count(event), count, "expected %s not %r" % (count, event)) def wait_for_event(self, event, count): """Run the waitForEvent test operation. Wait for a number of events to be published, or fail. """ - wait_until(lambda: self._event_count(event) >= count, - 'find %s %s event(s)' % (count, event)) + wait_until( + lambda: self._event_count(event) >= count, "find %s %s event(s)" % (count, event) + ) def configure_fail_point(self, fail_point): - """Run the configureFailPoint test operation. - """ + """Run the configureFailPoint test operation.""" self.set_fail_point(fail_point) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fail_point['configureFailPoint'], - 'mode': 'off'}) + self.addCleanup( + self.set_fail_point, + {"configureFailPoint": fail_point["configureFailPoint"], "mode": "off"}, + ) def run_admin_command(self, command, **kwargs): - """Run the runAdminCommand test operation. - """ + """Run the runAdminCommand test operation.""" self.client.admin.command(command, **kwargs) def record_primary(self): - """Run the recordPrimary test operation. - """ + """Run the recordPrimary test operation.""" self._previous_primary = self.scenario_client.primary def wait_for_primary_change(self, timeout_ms): - """Run the waitForPrimaryChange test operation. - """ + """Run the waitForPrimaryChange test operation.""" + def primary_changed(): primary = self.scenario_client.primary if primary is None: return False return primary != self._previous_primary - timeout = timeout_ms/1000.0 - wait_until(primary_changed, 'change primary', timeout=timeout) + + timeout = timeout_ms / 1000.0 + wait_until(primary_changed, "change primary", timeout=timeout) def wait(self, ms): - """Run the "wait" test operation. - """ - time.sleep(ms/1000.0) + """Run the "wait" test operation.""" + time.sleep(ms / 1000.0) def start_thread(self, name): """Run the 'startThread' thread operation.""" @@ -431,8 +418,7 @@ def start_thread(self, name): def run_on_thread(self, sessions, collection, name, operation): """Run the 'runOnThread' operation.""" thread = self.targets[name] - thread.schedule(lambda: self._run_op( - sessions, collection, operation, False)) + thread.schedule(lambda: self._run_op(sessions, collection, operation, False)) def wait_for_thread(self, name): """Run the 'waitForThread' operation.""" @@ -441,8 +427,7 @@ def wait_for_thread(self, name): thread.join(60) if thread.exc: raise thread.exc - self.assertFalse( - thread.is_alive(), 'Thread %s is still running' % (name,)) + self.assertFalse(thread.is_alive(), "Thread %s is still running" % (name,)) def create_spec_test(scenario_def, test, name): diff --git a/test/test_dns.py b/test/test_dns.py index 8404c2aa69..d47e115f41 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -21,18 +21,20 @@ sys.path[0:0] = [""] +from test import client_context, unittest +from test.utils import wait_until + from pymongo.common import validate_read_preference_tags -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.errors import ConfigurationError from pymongo.mongo_client import MongoClient +from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.uri_parser import parse_uri, split_hosts -from test import client_context, unittest -from test.utils import wait_until class TestDNSRepl(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'srv_seedlist', 'replica-set') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "replica-set" + ) load_balanced = False @client_context.require_replica_set @@ -41,8 +43,9 @@ def setUp(self): class TestDNSLoadBalanced(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'srv_seedlist', 'load-balanced') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "load-balanced" + ) load_balanced = True @client_context.require_load_balancer @@ -51,8 +54,7 @@ def setUp(self): class TestDNSSharded(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'srv_seedlist', 'sharded') + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "sharded") load_balanced = False @client_context.require_mongos @@ -61,77 +63,74 @@ def setUp(self): def create_test(test_case): - def run_test(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("DNS tests require the dnspython module") - uri = test_case['uri'] - seeds = test_case.get('seeds') - num_seeds = test_case.get('numSeeds', len(seeds or [])) - hosts = test_case.get('hosts') + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") num_hosts = test_case.get("numHosts", len(hosts or [])) - options = test_case.get('options', {}) - if 'ssl' in options: - options['tls'] = options.pop('ssl') - parsed_options = test_case.get('parsed_options') + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. - needs_tls = not (options and (options.get('ssl') == False or - options.get('tls') == False)) + needs_tls = not (options and (options.get("ssl") == False or options.get("tls") == False)) if needs_tls and not client_context.tls: - self.skipTest('this test requires a TLS cluster') + self.skipTest("this test requires a TLS cluster") if not needs_tls and client_context.tls: - self.skipTest('this test requires a non-TLS cluster') + self.skipTest("this test requires a non-TLS cluster") if seeds: - seeds = split_hosts(','.join(seeds)) + seeds = split_hosts(",".join(seeds)) if hosts: - hosts = frozenset(split_hosts(','.join(hosts))) + hosts = frozenset(split_hosts(",".join(hosts))) if seeds or num_seeds: result = parse_uri(uri, validate=True) if seeds is not None: - self.assertEqual(sorted(result['nodelist']), sorted(seeds)) + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) if num_seeds is not None: - self.assertEqual(len(result['nodelist']), num_seeds) + self.assertEqual(len(result["nodelist"]), num_seeds) if options: - opts = result['options'] - if 'readpreferencetags' in opts: + opts = result["options"] + if "readpreferencetags" in opts: rpts = validate_read_preference_tags( - 'readPreferenceTags', opts.pop('readpreferencetags')) - opts['readPreferenceTags'] = rpts - self.assertEqual(result['options'], options) + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) if parsed_options: for opt, expected in parsed_options.items(): - if opt == 'user': - self.assertEqual(result['username'], expected) - elif opt == 'password': - self.assertEqual(result['password'], expected) - elif opt == 'auth_database' or opt == 'db': - self.assertEqual(result['database'], expected) + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) hostname = next(iter(client_context.client.nodes))[0] # The replica set members must be configured as 'localhost'. - if hostname == 'localhost': + if hostname == "localhost": copts = client_context.default_client_options.copy() # Remove tls since SRV parsing should add it automatically. - copts.pop('tls', None) + copts.pop("tls", None) if client_context.tls: # Our test certs don't support the SRV hosts used in these # tests. - copts['tlsAllowInvalidHostnames'] = True + copts["tlsAllowInvalidHostnames"] = True client = MongoClient(uri, **copts) if num_seeds is not None: - self.assertEqual(len(client._topology_settings.seeds), - num_seeds) + self.assertEqual(len(client._topology_settings.seeds), num_seeds) if hosts is not None: - wait_until( - lambda: hosts == client.nodes, - 'match test hosts to client nodes') + wait_until(lambda: hosts == client.nodes, "match test hosts to client nodes") if num_hosts is not None: - wait_until(lambda: num_hosts == len(client.nodes), - "wait to connect to num_hosts") + wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) # XXX: we should block until SRV poller runs at least once # and re-run these assertions. else: @@ -146,11 +145,11 @@ def run_test(self): def create_tests(cls): - for filename in glob.glob(os.path.join(cls.TEST_PATH, '*.json')): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as dns_test_file: test_method = create_test(json.load(dns_test_file)) - setattr(cls, 'test_' + test_suffix, test_method) + setattr(cls, "test_" + test_suffix, test_method) create_tests(TestDNSRepl) @@ -159,26 +158,33 @@ def create_tests(cls): class TestParsingErrors(unittest.TestCase): - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") def test_invalid_host(self): self.assertRaisesRegex( ConfigurationError, "Invalid URI host: mongodb is not", - MongoClient, "mongodb+srv://mongodb") + MongoClient, + "mongodb+srv://mongodb", + ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: mongodb.com is not", - MongoClient, "mongodb+srv://mongodb.com") + MongoClient, + "mongodb+srv://mongodb.com", + ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: an IP address is not", - MongoClient, "mongodb+srv://127.0.0.1") + MongoClient, + "mongodb+srv://127.0.0.1", + ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: an IP address is not", - MongoClient, "mongodb+srv://[::1]") + MongoClient, + "mongodb+srv://[::1]", + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_encryption.py b/test/test_encryption.py index 966d9b5815..31c3dd2bcd 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -18,150 +18,147 @@ import copy import os import re -import ssl import socket +import ssl import sys import textwrap import traceback import uuid - from typing import Any from pymongo.collection import Collection sys.path[0:0] = [""] +from test import ( + CA_PEM, + CLIENT_PEM, + IntegrationTest, + PyMongoTestCase, + client_context, + unittest, +) +from test.test_bulk import BulkTestBase +from test.utils import ( + AllowListEventListener, + OvertCommandListener, + TestCreator, + TopologyEventListener, + camel_to_snake_args, + rs_or_single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner + from bson import encode, json_util -from bson.binary import (Binary, - UuidRepresentation, - JAVA_LEGACY, - STANDARD, - UUID_SUBTYPE) +from bson.binary import JAVA_LEGACY, STANDARD, UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON - from pymongo import encryption from pymongo.cursor import CursorType -from pymongo.encryption import (Algorithm, - ClientEncryption) -from pymongo.encryption_options import AutoEncryptionOpts, _HAVE_PYMONGOCRYPT -from pymongo.errors import (BulkWriteError, - ConfigurationError, - EncryptionError, - InvalidOperation, - OperationFailure, - ServerSelectionTimeoutError, - WriteError) +from pymongo.encryption import Algorithm, ClientEncryption +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + EncryptionError, + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, + WriteError, +) from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern -from test import (unittest, CA_PEM, CLIENT_PEM, - client_context, - IntegrationTest, - PyMongoTestCase) -from test.test_bulk import BulkTestBase -from test.utils import (TestCreator, - camel_to_snake_args, - OvertCommandListener, - TopologyEventListener, - AllowListEventListener, - rs_or_single_client, - wait_until) -from test.utils_spec_runner import SpecRunner - def get_client_opts(client): return client._MongoClient__options -KMS_PROVIDERS = {'local': {'key': b'\x00'*96}} +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} class TestAutoEncryptionOpts(PyMongoTestCase): - @unittest.skipIf(_HAVE_PYMONGOCRYPT, 'pymongocrypt is installed') + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): with self.assertRaises(ConfigurationError): - AutoEncryptionOpts({}, 'keyvault.datakeys') + AutoEncryptionOpts({}, "keyvault.datakeys") - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init(self): - opts = AutoEncryptionOpts({}, 'keyvault.datakeys') + opts = AutoEncryptionOpts({}, "keyvault.datakeys") self.assertEqual(opts._kms_providers, {}) - self.assertEqual(opts._key_vault_namespace, 'keyvault.datakeys') + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") self.assertEqual(opts._key_vault_client, None) self.assertEqual(opts._schema_map, None) self.assertEqual(opts._bypass_auto_encryption, False) - self.assertEqual(opts._mongocryptd_uri, 'mongodb://localhost:27020') + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") self.assertEqual(opts._mongocryptd_bypass_spawn, False) - self.assertEqual(opts._mongocryptd_spawn_path, 'mongocryptd') - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) self.assertEqual(opts._kms_ssl_contexts, {}) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_spawn_args(self): # User can override idleShutdownTimeoutSecs opts = AutoEncryptionOpts( - {}, 'keyvault.datakeys', - mongocryptd_spawn_args=['--idleShutdownTimeoutSecs=88']) - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=88']) + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) # idleShutdownTimeoutSecs is added by default - opts = AutoEncryptionOpts( - {}, 'keyvault.datakeys', mongocryptd_spawn_args=[]) - self.assertEqual( - opts._mongocryptd_spawn_args, ['--idleShutdownTimeoutSecs=60']) + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) # Also added when other options are given opts = AutoEncryptionOpts( - {}, 'keyvault.datakeys', - mongocryptd_spawn_args=['--quiet', '--port=27020']) + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) self.assertEqual( opts._mongocryptd_spawn_args, - ['--quiet', '--port=27020', '--idleShutdownTimeoutSecs=60']) + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_kms_tls_options(self): # Error cases: - with self.assertRaisesRegex( - TypeError, r'kms_tls_options\["kmip"\] must be a dict'): - AutoEncryptionOpts({}, 'k.d', kms_tls_options={'kmip': 1}) + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) tls_opts: Any for tls_opts in [ - {'kmip': {'tls': True, 'tlsInsecure': True}}, - {'kmip': {'tls': True, 'tlsAllowInvalidCertificates': True}}, - {'kmip': {'tls': True, 'tlsAllowInvalidHostnames': True}}, - {'kmip': {'tls': True, 'tlsDisableOCSPEndpointCheck': True}}]: - with self.assertRaisesRegex( - ConfigurationError, 'Insecure TLS options prohibited'): - opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + {"kmip": {"tls": True, "tlsDisableOCSPEndpointCheck": True}}, + ]: + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) with self.assertRaises(FileNotFoundError): - AutoEncryptionOpts({}, 'k.d', kms_tls_options={ - 'kmip': {'tlsCAFile': 'does-not-exist'}}) + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) # Success cases: tls_opts: Any for tls_opts in [None, {}]: - opts = AutoEncryptionOpts({}, 'k.d', kms_tls_options=tls_opts) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) self.assertEqual(opts._kms_ssl_contexts, {}) - opts = AutoEncryptionOpts( - {}, 'k.d', kms_tls_options={'kmip': {'tls': True}, 'aws': {}}) - ctx = opts._kms_ssl_contexts['kmip'] + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + ctx = opts._kms_ssl_contexts["kmip"] # On < 3.7 we check hostnames manually. if sys.version_info[:2] >= (3, 7): self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) - ctx = opts._kms_ssl_contexts['aws'] + ctx = opts._kms_ssl_contexts["aws"] if sys.version_info[:2] >= (3, 7): self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( - {}, 'k.d', kms_tls_options={'kmip': { - 'tlsCAFile': CA_PEM, 'tlsCertificateKeyFile': CLIENT_PEM}}) - ctx = opts._kms_ssl_contexts['kmip'] + {}, + "k.d", + kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + ) + ctx = opts._kms_ssl_contexts["kmip"] if sys.version_info[:2] >= (3, 7): self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) @@ -177,9 +174,9 @@ def test_default(self): self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_kwargs(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = MongoClient(auto_encryption_opts=opts, connect=False) self.addCleanup(client.close) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) @@ -189,7 +186,7 @@ class EncryptionIntegrationTest(IntegrationTest): """Base class for encryption integration tests.""" @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): super(EncryptionIntegrationTest, cls).setUpClass() @@ -204,16 +201,14 @@ def assertBinaryUUID(self, val): # Location of JSON test files. -BASE = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'client-side-encryption') -SPEC_PATH = os.path.join(BASE, 'spec') +BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") +SPEC_PATH = os.path.join(BASE, "spec") OPTS = CodecOptions(uuid_representation=STANDARD) # Use SON to preserve the order of fields while parsing json. Use tz_aware # =False to match how CodecOptions decodes dates. -JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, - tz_aware=False) +JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, tz_aware=False) def read(*paths): @@ -230,38 +225,39 @@ def bson_data(*paths): class TestClientSimple(EncryptionIntegrationTest): - def _test_auto_encrypt(self, opts): client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) # Create the encrypted field's data key. key_vault = create_key_vault( - self.client.keyvault.datakeys, - json_data('custom', 'key-document-local.json')) + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) self.addCleanup(key_vault.drop) # Collection.insert_one/insert_many auto encrypts. - docs = [{'_id': 0, 'ssn': '000'}, - {'_id': 1, 'ssn': '111'}, - {'_id': 2, 'ssn': '222'}, - {'_id': 3, 'ssn': '333'}, - {'_id': 4, 'ssn': '444'}, - {'_id': 5, 'ssn': '555'}] + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] encrypted_coll = client.pymongo_test.test encrypted_coll.insert_one(docs[0]) encrypted_coll.insert_many(docs[1:3]) unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) unack.insert_one(docs[3]) unack.insert_many(docs[4:], ordered=False) - wait_until(lambda: self.db.test.count_documents({}) == len(docs), - 'insert documents with w=0') + wait_until( + lambda: self.db.test.count_documents({}) == len(docs), "insert documents with w=0" + ) # Database.command auto decrypts. - res = client.pymongo_test.command( - 'find', 'test', filter={'ssn': '000'}) - decrypted_docs = res['cursor']['firstBatch'] - self.assertEqual(decrypted_docs, [{'_id': 0, 'ssn': '000'}]) + res = client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) # Collection.find auto decrypts. decrypted_docs = list(encrypted_coll.find()) @@ -280,51 +276,48 @@ def _test_auto_encrypt(self, opts): self.assertEqual(decrypted_docs, docs) # Collection.distinct auto decrypts. - decrypted_ssns = encrypted_coll.distinct('ssn') - self.assertEqual(set(decrypted_ssns), set(d['ssn'] for d in docs)) + decrypted_ssns = encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), set(d["ssn"] for d in docs)) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): - self.assertIsInstance(encrypted_doc['_id'], int) - self.assertEncrypted(encrypted_doc['ssn']) + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) # Attempt to encrypt an unencodable object. with self.assertRaises(BSONError): - encrypted_coll.insert_one({'unencodeable': object()}) + encrypted_coll.insert_one({"unencodeable": object()}) def test_auto_encrypt(self): # Configure the encrypted field via jsonSchema. - json_schema = json_data('custom', 'schema.json') + json_schema = json_data("custom", "schema.json") create_with_schema(self.db.test, json_schema) self.addCleanup(self.db.test.drop) - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") self._test_auto_encrypt(opts) def test_auto_encrypt_local_schema_map(self): # Configure the encrypted field via the local schema_map option. - schemas = {'pymongo_test.test': json_data('custom', 'schema.json')} - opts = AutoEncryptionOpts( - KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas) + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) self._test_auto_encrypt(opts) def test_use_after_close(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") client.close() - with self.assertRaisesRegex(InvalidOperation, - 'Cannot use MongoClient after close'): - client.admin.command('ping') + with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): + client.admin.command("ping") class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): - def test_upsert_uuid_standard_encrypte(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) @@ -332,126 +325,131 @@ def test_upsert_uuid_standard_encrypte(self): encrypted_coll = client.pymongo_test.test coll = encrypted_coll.with_options(codec_options=options) uuids = [uuid.uuid4() for _ in range(3)] - result = coll.bulk_write([ - UpdateOne({'_id': uuids[0]}, {'$set': {'a': 0}}, upsert=True), - ReplaceOne({'a': 1}, {'_id': uuids[1]}, upsert=True), - # This is just here to make the counts right in all cases. - ReplaceOne({'_id': uuids[2]}, {'_id': uuids[2]}, upsert=True), - ]) + result = coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) self.assertEqualResponse( - {'nMatched': 0, - 'nModified': 0, - 'nUpserted': 3, - 'nInserted': 0, - 'nRemoved': 0, - 'upserted': [{'index': 0, '_id': uuids[0]}, - {'index': 1, '_id': uuids[1]}, - {'index': 2, '_id': uuids[2]}]}, - result.bulk_api_result) + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) class TestClientMaxWireVersion(IntegrationTest): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): super(TestClientMaxWireVersion, cls).setUpClass() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - msg = 'Auto-encryption requires a minimum MongoDB version of 4.2' + msg = "Auto-encryption requires a minimum MongoDB version of 4.2" with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.insert_one({}) with self.assertRaisesRegex(ConfigurationError, msg): - client.admin.command('ping') + client.admin.command("ping") with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.find_one({}) with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.bulk_write([InsertOne({})]) def test_raise_unsupported_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, 'keyvault.datakeys') + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) - msg = 'find_raw_batches does not support auto encryption' + msg = "find_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.find_raw_batches({}) - msg = 'aggregate_raw_batches does not support auto encryption' + msg = "aggregate_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.aggregate_raw_batches([]) if client_context.is_mongos: - msg = 'Exhaust cursors are not supported by mongos' + msg = "Exhaust cursors are not supported by mongos" else: - msg = 'exhaust cursors do not support auto encryption' + msg = "exhaust cursors do not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): next(client.test.test.find(cursor_type=CursorType.EXHAUST)) class TestExplicitSimple(EncryptionIntegrationTest): - def test_encrypt_decrypt(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) # Use standard UUID representation. - key_vault = client_context.client.keyvault.get_collection( - 'datakeys', codec_options=OPTS) + key_vault = client_context.client.keyvault.get_collection("datakeys", codec_options=OPTS) self.addCleanup(key_vault.drop) # Create the encrypted field's data key. - key_id = client_encryption.create_data_key( - 'local', key_alt_names=['name']) + key_id = client_encryption.create_data_key("local", key_alt_names=["name"]) self.assertBinaryUUID(key_id) - self.assertTrue(key_vault.find_one({'_id': key_id})) + self.assertTrue(key_vault.find_one({"_id": key_id})) # Create an unused data key to make sure filtering works. - unused_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['unused']) + unused_key_id = client_encryption.create_data_key("local", key_alt_names=["unused"]) self.assertBinaryUUID(unused_key_id) - self.assertTrue(key_vault.find_one({'_id': unused_key_id})) + self.assertTrue(key_vault.find_one({"_id": unused_key_id})) - doc = {'_id': 0, 'ssn': '000'} + doc = {"_id": 0, "ssn": "000"} encrypted_ssn = client_encryption.encrypt( - doc['ssn'], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) # Ensure encryption via key_alt_name for the same key produces the # same output. encrypted_ssn2 = client_encryption.encrypt( - doc['ssn'], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='name') + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) self.assertEqual(encrypted_ssn, encrypted_ssn2) # Test decryption. decrypted_ssn = client_encryption.decrypt(encrypted_ssn) - self.assertEqual(decrypted_ssn, doc['ssn']) + self.assertEqual(decrypted_ssn, doc["ssn"]) def test_validation(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) - msg = 'value to decrypt must be a bson.binary.Binary with subtype 6' + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt('str') # type: ignore[arg-type] + client_encryption.decrypt("str") # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): - client_encryption.decrypt(Binary(b'123')) + client_encryption.decrypt(Binary(b"123")) - msg = 'key_id must be a bson.binary.Binary with subtype 4' + msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=uuid.uuid4()) # type: ignore[arg-type] + client_encryption.encrypt("str", algo, key_id=uuid.uuid4()) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt('str', algo, key_id=Binary(b'123')) + client_encryption.encrypt("str", algo, key_id=Binary(b"123")) def test_bson_errors(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. @@ -460,37 +458,40 @@ def test_bson_errors(self): client_encryption.encrypt( unencodable_value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE)) + key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE), + ) def test_codec_options(self): - with self.assertRaisesRegex(TypeError, 'codec_options must be'): + with self.assertRaisesRegex(TypeError, "codec_options must be"): ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, None) # type: ignore[arg-type] + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] + ) opts = CodecOptions(uuid_representation=JAVA_LEGACY) client_encryption_legacy = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, opts) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts + ) self.addCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. - key_id = client_encryption_legacy.create_data_key('local') + key_id = client_encryption_legacy.create_data_key("local") # Encrypt a UUID with JAVA_LEGACY codec options. value = uuid.uuid4() encrypted_legacy = client_encryption_legacy.encrypt( - value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) - decrypted_value_legacy = client_encryption_legacy.decrypt( - encrypted_legacy) + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = client_encryption_legacy.decrypt(encrypted_legacy) self.assertEqual(decrypted_value_legacy, value) # Encrypt the same UUID with STANDARD codec options. client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( - value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=key_id) + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) decrypted_standard = client_encryption.decrypt(encrypted_standard) self.assertEqual(decrypted_standard, value) @@ -498,163 +499,160 @@ def test_codec_options(self): self.assertNotEqual(encrypted_standard, encrypted_legacy) # Test that codec_options is applied during decryption. self.assertEqual( - client_encryption_legacy.decrypt(encrypted_standard), - Binary.from_uuid(value)) - self.assertNotEqual( - client_encryption.decrypt(encrypted_legacy), value) + client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(client_encryption.decrypt(encrypted_legacy), value) def test_close(self): client_encryption = ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', client_context.client, OPTS) + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) client_encryption.close() # Close can be called multiple times. client_encryption.close() algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - msg = 'Cannot use closed ClientEncryption' + msg = "Cannot use closed ClientEncryption" with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.create_data_key('local') + client_encryption.create_data_key("local") with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.encrypt('val', algo, key_alt_name='name') + client_encryption.encrypt("val", algo, key_alt_name="name") with self.assertRaisesRegex(InvalidOperation, msg): - client_encryption.decrypt(Binary(b'', 6)) + client_encryption.decrypt(Binary(b"", 6)) def test_with_statement(self): with ClientEncryption( - KMS_PROVIDERS, 'keyvault.datakeys', - client_context.client, OPTS) as client_encryption: + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + ) as client_encryption: pass - with self.assertRaisesRegex( - InvalidOperation, 'Cannot use closed ClientEncryption'): - client_encryption.create_data_key('local') + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed ClientEncryption"): + client_encryption.create_data_key("local") # Spec tests AWS_CREDS = { - 'accessKeyId': os.environ.get('FLE_AWS_KEY', ''), - 'secretAccessKey': os.environ.get('FLE_AWS_SECRET', '') + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), } AWS_TEMP_CREDS = { - 'accessKeyId': os.environ.get('CSFLE_AWS_TEMP_ACCESS_KEY_ID', ''), - 'secretAccessKey': os.environ.get('CSFLE_AWS_TEMP_SECRET_ACCESS_KEY', ''), - 'sessionToken': os.environ.get('CSFLE_AWS_TEMP_SESSION_TOKEN', '') + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), } AWS_TEMP_NO_SESSION_CREDS = { - 'accessKeyId': os.environ.get('CSFLE_AWS_TEMP_ACCESS_KEY_ID', ''), - 'secretAccessKey': os.environ.get('CSFLE_AWS_TEMP_SECRET_ACCESS_KEY', '') + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } AZURE_CREDS = { - 'tenantId': os.environ.get('FLE_AZURE_TENANTID', ''), - 'clientId': os.environ.get('FLE_AZURE_CLIENTID', ''), - 'clientSecret': os.environ.get('FLE_AZURE_CLIENTSECRET', '')} + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} GCP_CREDS = { - 'email': os.environ.get('FLE_GCP_EMAIL', ''), - 'privateKey': os.environ.get('FLE_GCP_PRIVATEKEY', '')} + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} -KMIP = {'endpoint': os.environ.get('FLE_KMIP_ENDPOINT', 'localhost:5698')} -KMS_TLS_OPTS = {'kmip': {'tlsCAFile': CA_PEM, - 'tlsCertificateKeyFile': CLIENT_PEM}} +KMIP = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} +KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} class TestSpec(SpecRunner): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, 'pymongocrypt is not installed') + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): super(TestSpec, cls).setUpClass() def parse_auto_encrypt_opts(self, opts): """Parse clientOptions.autoEncryptOpts.""" opts = camel_to_snake_args(opts) - kms_providers = opts['kms_providers'] - if 'aws' in kms_providers: - kms_providers['aws'] = AWS_CREDS + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS if not any(AWS_CREDS.values()): - self.skipTest('AWS environment credentials are not set') - if 'awsTemporary' in kms_providers: - kms_providers['aws'] = AWS_TEMP_CREDS - del kms_providers['awsTemporary'] + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] if not any(AWS_TEMP_CREDS.values()): - self.skipTest('AWS Temp environment credentials are not set') - if 'awsTemporaryNoSessionToken' in kms_providers: - kms_providers['aws'] = AWS_TEMP_NO_SESSION_CREDS - del kms_providers['awsTemporaryNoSessionToken'] + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] if not any(AWS_TEMP_NO_SESSION_CREDS.values()): - self.skipTest('AWS Temp environment credentials are not set') - if 'azure' in kms_providers: - kms_providers['azure'] = AZURE_CREDS + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS if not any(AZURE_CREDS.values()): - self.skipTest('Azure environment credentials are not set') - if 'gcp' in kms_providers: - kms_providers['gcp'] = GCP_CREDS + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS if not any(AZURE_CREDS.values()): - self.skipTest('GCP environment credentials are not set') - if 'kmip' in kms_providers: - kms_providers['kmip'] = KMIP - opts['kms_tls_options'] = KMS_TLS_OPTS - if 'key_vault_namespace' not in opts: - opts['key_vault_namespace'] = 'keyvault.datakeys' + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" opts = dict(opts) return AutoEncryptionOpts(**opts) def parse_client_options(self, opts): """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop('autoEncryptOpts') + encrypt_opts = opts.pop("autoEncryptOpts") if encrypt_opts: - opts['auto_encryption_opts'] = self.parse_auto_encrypt_opts( - encrypt_opts) + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) return super(TestSpec, self).parse_client_options(opts) def get_object_name(self, op): """Default object is collection.""" - return op.get('object', 'collection') + return op.get("object", "collection") def maybe_skip_scenario(self, test): super(TestSpec, self).maybe_skip_scenario(test) - desc = test['description'].lower() - if 'type=symbol' in desc: - self.skipTest('PyMongo does not support the symbol type') + desc = test["description"].lower() + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") def setup_scenario(self, scenario_def): """Override a test's setup.""" - key_vault_data = scenario_def['key_vault_data'] + key_vault_data = scenario_def["key_vault_data"] if key_vault_data: coll = client_context.client.get_database( - 'keyvault', - write_concern=WriteConcern(w='majority'), - codec_options=OPTS)['datakeys'] + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] coll.drop() coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority'), - codec_options=OPTS) + db_name, write_concern=WriteConcern(w="majority"), codec_options=OPTS + ) coll = db[coll_name] coll.drop() - json_schema = scenario_def['json_schema'] + json_schema = scenario_def["json_schema"] if json_schema: db.create_collection( - coll_name, - validator={'$jsonSchema': json_schema}, codec_options=OPTS) + coll_name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) else: db.create_collection(coll_name) - if scenario_def['data']: + if scenario_def["data"]: # Load data. - coll.insert_many(scenario_def['data']) + coll.insert_many(scenario_def["data"]) def allowable_errors(self, op): """Override expected error classes.""" errors = super(TestSpec, self).allowable_errors(op) # An updateOne test expects encryption to error when no $ operator # appears but pymongo raises a client side ValueError in this case. - if op['name'] == 'updateOne': + if op["name"] == "updateOne": errors += (ValueError,) return errors @@ -673,40 +671,36 @@ def run_scenario(self): # Prose Tests LOCAL_MASTER_KEY = base64.b64decode( - b'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ' - b'5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk') + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) ALL_KMS_PROVIDERS = { - 'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'kmip': KMIP, - 'local': {'key': LOCAL_MASTER_KEY}} - -LOCAL_KEY_ID = Binary( - base64.b64decode(b'LOCALAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -AWS_KEY_ID = Binary( - base64.b64decode(b'AWSAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -AZURE_KEY_ID = Binary( - base64.b64decode(b'AZUREAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -GCP_KEY_ID = Binary( - base64.b64decode(b'GCPAAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) -KMIP_KEY_ID = Binary( - base64.b64decode(b'KMIPAAAAAAAAAAAAAAAAAA=='), UUID_SUBTYPE) + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP, + "local": {"key": LOCAL_MASTER_KEY}, +} + +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) def create_with_schema(coll, json_schema): """Create and return a Collection with a jsonSchema.""" - coll.with_options(write_concern=WriteConcern(w='majority')).drop() + coll.with_options(write_concern=WriteConcern(w="majority")).drop() return coll.database.create_collection( - coll.name, validator={'$jsonSchema': json_schema}, codec_options=OPTS) + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) def create_key_vault(vault, *data_keys): """Create the key vault collection with optional data keys.""" - vault = vault.with_options( - write_concern=WriteConcern(w='majority'), - codec_options=OPTS) + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) vault.drop() if data_keys: vault.insert_many(data_keys) @@ -722,27 +716,29 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): KMS_PROVIDERS = ALL_KMS_PROVIDERS MASTER_KEYS = { - 'aws': { - 'region': 'us-east-1', - 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-' - '4bd9-9f25-e30687b580d0'}, - 'azure': { - 'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', - 'keyName': 'key-name-csfle'}, - 'gcp': { - 'projectId': 'devprod-drivers', - 'location': 'global', - 'keyRing': 'key-ring-csfle', - 'keyName': 'key-name-csfle'}, - 'kmip': {}, - 'local': None + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-" "4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, } @classmethod - @unittest.skipUnless(any([all(AWS_CREDS.values()), - all(AZURE_CREDS.values()), - all(GCP_CREDS.values())]), - 'No environment credentials are set') + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) def setUpClass(cls): super(TestDataKeyDoubleEncryption, cls).setUpClass() cls.listener = OvertCommandListener() @@ -759,20 +755,21 @@ def setUpClass(cls): "encrypt": { "keyId": "/placeholder", "bsonType": "string", - "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", } } - } + }, } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, 'keyvault.datakeys', schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS) + cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + ) cls.client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) cls.client_encryption = ClientEncryption( - cls.KMS_PROVIDERS, 'keyvault.datakeys', cls.client, OPTS, - kms_tls_options=KMS_TLS_OPTS) + cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + ) @classmethod def tearDownClass(cls): @@ -788,96 +785,98 @@ def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key, - key_alt_names=['%s_altname' % (provider_name,)]) + provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] + ) self.assertBinaryUUID(datakey_id) - cmd = self.listener.results['started'][-1] - self.assertEqual('insert', cmd.command_name) - self.assertEqual({'w': 'majority'}, cmd.command.get('writeConcern')) - docs = list(self.vault.find({'_id': datakey_id})) + cmd = self.listener.results["started"][-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = list(self.vault.find({"_id": datakey_id})) self.assertEqual(len(docs), 1) - self.assertEqual(docs[0]['masterKey']['provider'], provider_name) + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) # Encrypt by key_id. encrypted = self.client_encryption.encrypt( - 'hello %s' % (provider_name,), + "hello %s" % (provider_name,), Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=datakey_id) + key_id=datakey_id, + ) self.assertEncrypted(encrypted) - self.client_encrypted.db.coll.insert_one( - {'_id': provider_name, 'value': encrypted}) - doc_decrypted = self.client_encrypted.db.coll.find_one( - {'_id': provider_name}) - self.assertEqual(doc_decrypted['value'], 'hello %s' % (provider_name,)) # type: ignore + self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], "hello %s" % (provider_name,)) # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( - 'hello %s' % (provider_name,), + "hello %s" % (provider_name,), Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='%s_altname' % (provider_name,)) + key_alt_name="%s_altname" % (provider_name,), + ) self.assertEqual(encrypted_altname, encrypted) # Explicitly encrypting an auto encrypted field. - msg = (r'Cannot encrypt element of type binData because schema ' - r'requires that type is one of: \[ string \]') + msg = ( + r"Cannot encrypt element of type binData because schema " + r"requires that type is one of: \[ string \]" + ) with self.assertRaisesRegex(EncryptionError, msg): - self.client_encrypted.db.coll.insert_one( - {'encrypted_placeholder': encrypted}) + self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) def test_data_key_local(self): - self.run_test('local') + self.run_test("local") - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_data_key_aws(self): - self.run_test('aws') + self.run_test("aws") - @unittest.skipUnless(any(AZURE_CREDS.values()), - 'Azure environment credentials are not set') + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") def test_data_key_azure(self): - self.run_test('azure') + self.run_test("azure") - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def test_data_key_gcp(self): - self.run_test('gcp') + self.run_test("gcp") def test_data_key_kmip(self): - self.run_test('kmip') + self.run_test("kmip") class TestExternalKeyVault(EncryptionIntegrationTest): - @staticmethod def kms_providers(): - return {'local': {'key': LOCAL_MASTER_KEY}} + return {"local": {"key": LOCAL_MASTER_KEY}} def _test_external_key_vault(self, with_external_key_vault): self.client.db.coll.drop() vault = create_key_vault( self.client.keyvault.datakeys, - json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json')) + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) self.addCleanup(vault.drop) # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': json_data('external', 'external-schema.json')} + schemas = {"db.coll": json_data("external", "external-schema.json")} if with_external_key_vault: - key_vault_client = rs_or_single_client( - username='fake-user', password='fake-pwd') + key_vault_client = rs_or_single_client(username="fake-user", password="fake-pwd") self.addCleanup(key_vault_client.close) else: key_vault_client = client_context.client opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, - key_vault_client=key_vault_client) + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'keyvault.datakeys', key_vault_client, OPTS) + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) self.addCleanup(client_encryption.close) if with_external_key_vault: @@ -896,14 +895,15 @@ def _test_external_key_vault(self, with_external_key_vault): client_encryption.encrypt( "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=LOCAL_KEY_ID) + key_id=LOCAL_KEY_ID, + ) # AuthenticationFailed error. self.assertIsInstance(ctx.exception.cause, OperationFailure) self.assertEqual(ctx.exception.cause.code, 18) else: client_encryption.encrypt( - "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=LOCAL_KEY_ID) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) def test_external_key_vault_1(self): self._test_external_key_vault(True) @@ -913,31 +913,28 @@ def test_external_key_vault_2(self): class TestViews(EncryptionIntegrationTest): - @staticmethod def kms_providers(): - return {'local': {'key': LOCAL_MASTER_KEY}} + return {"local": {"key": LOCAL_MASTER_KEY}} def test_views_are_prohibited(self): self.client.db.view.drop() - self.client.db.create_collection('view', viewOn='coll') + self.client.db.create_collection("view", viewOn="coll") self.addCleanup(self.client.db.view.drop) - opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys') + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) - with self.assertRaisesRegex( - EncryptionError, 'cannot auto encrypt a view'): + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): client_encrypted.db.view.insert_one({}) class TestCorpus(EncryptionIntegrationTest): - @classmethod - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUpClass(cls): super(TestCorpus, cls).setUpClass() @@ -948,141 +945,158 @@ def kms_providers(): @staticmethod def fix_up_schema(json_schema): """Remove deprecated symbol/dbPointer types from json schema.""" - for key in list(json_schema['properties']): - if '_symbol_' in key or '_dbPointer_' in key: - del json_schema['properties'][key] + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] return json_schema @staticmethod def fix_up_curpus(corpus): """Disallow deprecated symbol/dbPointer types from corpus test.""" for key in corpus: - if '_symbol_' in key or '_dbPointer_' in key: - corpus[key]['allowed'] = False + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False return corpus @staticmethod def fix_up_curpus_encrypted(corpus_encrypted, corpus): """Fix the expected values for deprecated symbol/dbPointer types.""" for key in corpus_encrypted: - if '_symbol_' in key or '_dbPointer_' in key: + if "_symbol_" in key or "_dbPointer_" in key: corpus_encrypted[key] = copy.deepcopy(corpus[key]) return corpus_encrypted def _test_corpus(self, opts): # Drop and create the collection 'db.coll' with jsonSchema. coll = create_with_schema( - self.client.db.coll, - self.fix_up_schema(json_data('corpus', 'corpus-schema.json'))) + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) self.addCleanup(coll.drop) vault = create_key_vault( self.client.keyvault.datakeys, - json_data('corpus', 'corpus-key-local.json'), - json_data('corpus', 'corpus-key-aws.json'), - json_data('corpus', 'corpus-key-azure.json'), - json_data('corpus', 'corpus-key-gcp.json'), - json_data('corpus', 'corpus-key-kmip.json')) + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) self.addCleanup(vault.drop) client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation='standard') + auto_encryption_opts=opts, uuidRepresentation="standard" + ) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( - self.kms_providers(), 'keyvault.datakeys', client_context.client, - OPTS, kms_tls_options=KMS_TLS_OPTS) + self.kms_providers(), + "keyvault.datakeys", + client_context.client, + OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) self.addCleanup(client_encryption.close) - corpus = self.fix_up_curpus(json_data('corpus', 'corpus.json')) + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) corpus_copied: SON = SON() for key, value in corpus.items(): corpus_copied[key] = copy.deepcopy(value) - if key in ('_id', 'altname_aws', 'altname_azure', 'altname_gcp', - 'altname_local', 'altname_kmip'): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): continue - if value['method'] == 'auto': + if value["method"] == "auto": continue - if value['method'] == 'explicit': - identifier = value['identifier'] - self.assertIn(identifier, ('id', 'altname')) - kms = value['kms'] - self.assertIn(kms, ('local', 'aws', 'azure', 'gcp', 'kmip')) - if identifier == 'id': - if kms == 'local': + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": kwargs = dict(key_id=LOCAL_KEY_ID) - elif kms == 'aws': + elif kms == "aws": kwargs = dict(key_id=AWS_KEY_ID) - elif kms == 'azure': + elif kms == "azure": kwargs = dict(key_id=AZURE_KEY_ID) - elif kms == 'gcp': + elif kms == "gcp": kwargs = dict(key_id=GCP_KEY_ID) else: kwargs = dict(key_id=KMIP_KEY_ID) else: kwargs = dict(key_alt_name=kms) - self.assertIn(value['algo'], ('det', 'rand')) - if value['algo'] == 'det': - algo = (Algorithm. - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic else: algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random try: encrypted_val = client_encryption.encrypt( - value['value'], algo, **kwargs) # type: ignore[arg-type] - if not value['allowed']: - self.fail('encrypt should have failed: %r: %r' % ( - key, value)) - corpus_copied[key]['value'] = encrypted_val + value["value"], algo, **kwargs # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail("encrypt should have failed: %r: %r" % (key, value)) + corpus_copied[key]["value"] = encrypted_val except Exception: - if value['allowed']: + if value["allowed"]: tb = traceback.format_exc() - self.fail('encrypt failed: %r: %r, traceback: %s' % ( - key, value, tb)) + self.fail("encrypt failed: %r: %r, traceback: %s" % (key, value, tb)) client_encrypted.db.coll.insert_one(corpus_copied) corpus_decrypted = client_encrypted.db.coll.find_one() self.assertEqual(corpus_decrypted, corpus) - corpus_encrypted_expected = self.fix_up_curpus_encrypted(json_data( - 'corpus', 'corpus-encrypted.json'), corpus) + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) corpus_encrypted_actual = coll.find_one() for key, value in corpus_encrypted_actual.items(): - if key in ('_id', 'altname_aws', 'altname_azure', - 'altname_gcp', 'altname_local', 'altname_kmip'): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): continue - if value['algo'] == 'det': - self.assertEqual( - value['value'], corpus_encrypted_expected[key]['value'], - key) - elif value['algo'] == 'rand' and value['allowed']: - self.assertNotEqual( - value['value'], corpus_encrypted_expected[key]['value'], - key) - - if value['allowed']: - decrypt_actual = client_encryption.decrypt(value['value']) + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = client_encryption.decrypt(value["value"]) decrypt_expected = client_encryption.decrypt( - corpus_encrypted_expected[key]['value']) + corpus_encrypted_expected[key]["value"] + ) self.assertEqual(decrypt_actual, decrypt_expected, key) else: - self.assertEqual(value['value'], corpus[key]['value'], key) + self.assertEqual(value["value"], corpus[key]["value"], key) def test_corpus(self): - opts = AutoEncryptionOpts(self.kms_providers(), 'keyvault.datakeys', - kms_tls_options=KMS_TLS_OPTS) + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + ) self._test_corpus(opts) def test_corpus_local_schema(self): # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': self.fix_up_schema( - json_data('corpus', 'corpus-schema.json'))} + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} opts = AutoEncryptionOpts( - self.kms_providers(), 'keyvault.datakeys', schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS) + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS, + ) self._test_corpus(opts) @@ -1092,6 +1106,7 @@ def test_corpus_local_schema(self): class TestBsonSizeBatches(EncryptionIntegrationTest): """Prose tests for BSON size limits and batch splitting.""" + coll: Collection coll_encrypted: Collection client_encrypted: MongoClient @@ -1104,24 +1119,26 @@ def setUpClass(cls): cls.coll = db.coll cls.coll.drop() # Configure the encrypted 'db.coll' collection via jsonSchema. - json_schema = json_data('limits', 'limits-schema.json') + json_schema = json_data("limits", "limits-schema.json") db.create_collection( - 'coll', validator={'$jsonSchema': json_schema}, codec_options=OPTS, - write_concern=WriteConcern(w='majority')) + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) # Create the key vault. coll = client_context.client.get_database( - 'keyvault', - write_concern=WriteConcern(w='majority'), - codec_options=OPTS)['datakeys'] + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] coll.drop() - coll.insert_one(json_data('limits', 'limits-key.json')) + coll.insert_one(json_data("limits", "limits-key.json")) - opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, 'keyvault.datakeys') + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") cls.listener = OvertCommandListener() cls.client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, event_listeners=[cls.listener]) + auto_encryption_opts=opts, event_listeners=[cls.listener] + ) cls.coll_encrypted = cls.client_encrypted.db.coll @classmethod @@ -1131,103 +1148,96 @@ def tearDownClass(cls): super(TestBsonSizeBatches, cls).tearDownClass() def test_01_insert_succeeds_under_2MiB(self): - doc = {'_id': 'over_2mib_under_16mib', 'unencrypted': 'a' * _2_MiB} + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'over_2mib_under_16mib_bulk' + doc["_id"] = "over_2mib_under_16mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_02_insert_succeeds_over_2MiB_post_encryption(self): - doc = {'_id': 'encryption_exceeds_2mib', - 'unencrypted': 'a' * ((2**21) - 2000)} - doc.update(json_data('limits', 'limits-doc.json')) + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'encryption_exceeds_2mib_bulk' + doc["_id"] = "encryption_exceeds_2mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_03_bulk_batch_split(self): - doc1 = {'_id': 'over_2mib_1', 'unencrypted': 'a' * _2_MiB} - doc2 = {'_id': 'over_2mib_2', 'unencrypted': 'a' * _2_MiB} + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual( - self.listener.started_command_names(), ['insert', 'insert']) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) def test_04_bulk_batch_split(self): - limits_doc = json_data('limits', 'limits-doc.json') - doc1 = {'_id': 'encryption_exceeds_2mib_1', - 'unencrypted': 'a' * (_2_MiB - 2000)} + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} doc1.update(limits_doc) - doc2 = {'_id': 'encryption_exceeds_2mib_2', - 'unencrypted': 'a' * (_2_MiB - 2000)} + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} doc2.update(limits_doc) self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual( - self.listener.started_command_names(), ['insert', 'insert']) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) def test_05_insert_succeeds_just_under_16MiB(self): - doc = {'_id': 'under_16mib', 'unencrypted': 'a' * (_16_MiB - 2000)} + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'under_16mib_bulk' + doc["_id"] = "under_16mib_bulk" self.coll_encrypted.bulk_write([InsertOne(doc)]) def test_06_insert_fails_over_16MiB(self): - limits_doc = json_data('limits', 'limits-doc.json') - doc = {'_id': 'encryption_exceeds_16mib', - 'unencrypted': 'a' * (_16_MiB - 2000)} + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} doc.update(limits_doc) - with self.assertRaisesRegex(WriteError, 'object to insert too large'): + with self.assertRaisesRegex(WriteError, "object to insert too large"): self.coll_encrypted.insert_one(doc) # Same with bulk_write. - doc['_id'] = 'encryption_exceeds_16mib_bulk' + doc["_id"] = "encryption_exceeds_16mib_bulk" with self.assertRaises(BulkWriteError) as ctx: self.coll_encrypted.bulk_write([InsertOne(doc)]) - err = ctx.exception.details['writeErrors'][0] - self.assertEqual(2, err['code']) - self.assertIn('object to insert too large', err['errmsg']) + err = ctx.exception.details["writeErrors"][0] + self.assertEqual(2, err["code"]) + self.assertIn("object to insert too large", err["errmsg"]) class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" @classmethod - @unittest.skipUnless(any([all(AWS_CREDS.values()), - all(AZURE_CREDS.values()), - all(GCP_CREDS.values())]), - 'No environment credentials are set') + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) def setUpClass(cls): super(TestCustomEndpoint, cls).setUpClass() def setUp(self): - kms_providers = {'aws': AWS_CREDS, - 'azure': AZURE_CREDS, - 'gcp': GCP_CREDS, - 'kmip': KMIP} + kms_providers = {"aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, "kmip": KMIP} self.client_encryption = ClientEncryption( kms_providers=kms_providers, - key_vault_namespace='keyvault.datakeys', + key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS) + kms_tls_options=KMS_TLS_OPTS, + ) kms_providers_invalid = copy.deepcopy(kms_providers) - kms_providers_invalid['azure']['identityPlatformEndpoint'] = 'doesnotexist.invalid:443' - kms_providers_invalid['gcp']['endpoint'] = 'doesnotexist.invalid:443' - kms_providers_invalid['kmip']['endpoint'] = 'doesnotexist.local:5698' + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" self.client_encryption_invalid = ClientEncryption( kms_providers=kms_providers_invalid, - key_vault_namespace='keyvault.datakeys', + key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS) + kms_tls_options=KMS_TLS_OPTS, + ) self._kmip_host_error = None self._invalid_host_error = None @@ -1236,131 +1246,134 @@ def tearDown(self): self.client_encryption_invalid.close() def run_test_expected_success(self, provider_name, master_key): - data_key_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key) + data_key_id = self.client_encryption.create_data_key(provider_name, master_key=master_key) encrypted = self.client_encryption.encrypt( - 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) - self.assertEqual('test', self.client_encryption.decrypt(encrypted)) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption.decrypt(encrypted)) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_01_aws_region_key(self): self.run_test_expected_success( - 'aws', - {"region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0")}) + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_02_aws_region_key_endpoint(self): self.run_test_expected_success( - 'aws', - {"region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com"}) - - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_03_aws_region_key_endpoint_port(self): self.run_test_expected_success( - 'aws', - {"region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:443"}) - - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_04_aws_endpoint_invalid_port(self): master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:12345" + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:12345", } with self.assertRaises(EncryptionError) as ctx: - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + self.client_encryption.create_data_key("aws", master_key=master_key) self.assertIsInstance(ctx.exception.cause, socket.error) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-2.amazonaws.com" + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-2.amazonaws.com", } # The full error should be something like: # "Credential should be scoped to a valid region, not 'us-east-1'" # but we only check for "us-east-1" to avoid breaking on slight # changes to AWS' error message. - with self.assertRaisesRegex(EncryptionError, 'us-east-1'): - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + with self.assertRaisesRegex(EncryptionError, "us-east-1"): + self.client_encryption.create_data_key("aws", master_key=master_key) - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_06_aws_endpoint_invalid_host(self): master_key = { "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "doesnotexist.invalid" + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "doesnotexist.invalid", } with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): - self.client_encryption.create_data_key( - 'aws', master_key=master_key) + self.client_encryption.create_data_key("aws", master_key=master_key) - @unittest.skipUnless(any(AZURE_CREDS.values()), - 'Azure environment credentials are not set') + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") def test_07_azure(self): - master_key = {'keyVaultEndpoint': 'key-vault-csfle.vault.azure.net', - 'keyName': 'key-name-csfle'} - self.run_test_expected_success('azure', master_key) + master_key = { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + } + self.run_test_expected_success("azure", master_key) # The full error should be something like: # "[Errno 8] nodename nor servname provided, or not known" with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): - self.client_encryption_invalid.create_data_key( - 'azure', master_key=master_key) + self.client_encryption_invalid.create_data_key("azure", master_key=master_key) - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def test_08_gcp_valid_endpoint(self): master_key = { "projectId": "devprod-drivers", "location": "global", "keyRing": "key-ring-csfle", "keyName": "key-name-csfle", - "endpoint": "cloudkms.googleapis.com:443"} - self.run_test_expected_success('gcp', master_key) + "endpoint": "cloudkms.googleapis.com:443", + } + self.run_test_expected_success("gcp", master_key) # The full error should be something like: # "[Errno 8] nodename nor servname provided, or not known" with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): - self.client_encryption_invalid.create_data_key( - 'gcp', master_key=master_key) + self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def test_09_gcp_invalid_endpoint(self): master_key = { "projectId": "devprod-drivers", "location": "global", "keyRing": "key-ring-csfle", "keyName": "key-name-csfle", - "endpoint": "doesnotexist.invalid:443"} + "endpoint": "doesnotexist.invalid:443", + } # The full error should be something like: # "Invalid KMS response, no access_token returned. HTTP status=200" with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): - self.client_encryption.create_data_key( - 'gcp', master_key=master_key) + self.client_encryption.create_data_key("gcp", master_key=master_key) def dns_error(self, host, port): # The full error should be something like: @@ -1372,96 +1385,93 @@ def dns_error(self, host, port): @property def invalid_host_error(self): if self._invalid_host_error is None: - self._invalid_host_error = self.dns_error( - 'doesnotexist.invalid', 443) + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) return self._invalid_host_error @property def kmip_host_error(self): if self._kmip_host_error is None: - self._kmip_host_error = self.dns_error('doesnotexist.local', 5698) + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) return self._kmip_host_error def test_10_kmip_invalid_endpoint(self): - key = {'keyId': '1'} - self.run_test_expected_success('kmip', key) + key = {"keyId": "1"} + self.run_test_expected_success("kmip", key) with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): - self.client_encryption_invalid.create_data_key('kmip', key) + self.client_encryption_invalid.create_data_key("kmip", key) def test_11_kmip_master_key_endpoint(self): - key = {'keyId': '1', 'endpoint': KMIP['endpoint']} - self.run_test_expected_success('kmip', key) + key = {"keyId": "1", "endpoint": KMIP["endpoint"]} + self.run_test_expected_success("kmip", key) # Override invalid endpoint: - data_key_id = self.client_encryption_invalid.create_data_key( - 'kmip', master_key=key) + data_key_id = self.client_encryption_invalid.create_data_key("kmip", master_key=key) encrypted = self.client_encryption_invalid.encrypt( - 'test', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) - self.assertEqual( - 'test', self.client_encryption_invalid.decrypt(encrypted)) + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", self.client_encryption_invalid.decrypt(encrypted)) def test_12_kmip_master_key_invalid_endpoint(self): - key = {'keyId': '1', 'endpoint': 'doesnotexist.local:5698'} + key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): - self.client_encryption.create_data_key('kmip', key) + self.client_encryption.create_data_key("kmip", key) class AzureGCPEncryptionTestMixin(object): DEK = None KMS_PROVIDER_MAP = None - KEYVAULT_DB = 'keyvault' - KEYVAULT_COLL = 'datakeys' + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" client: MongoClient def setUp(self): - keyvault = self.client.get_database( - self.KEYVAULT_DB).get_collection( - self.KEYVAULT_COLL) + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) create_key_vault(keyvault, self.DEK) def _test_explicit(self, expectation): client_encryption = ClientEncryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] - '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, - OPTS) + OPTS, + ) self.addCleanup(client_encryption.close) ciphertext = client_encryption.encrypt( - 'string0', + "string0", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary.from_uuid(self.DEK['_id'], STANDARD)) + key_id=Binary.from_uuid(self.DEK["_id"], STANDARD), + ) self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) - self.assertEqual(client_encryption.decrypt(ciphertext), 'string0') + self.assertEqual(client_encryption.decrypt(ciphertext), "string0") def _test_automatic(self, expectation_extjson, payload): encrypted_db = "db" encrypted_coll = "coll" - keyvault_namespace = '.'.join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) encryption_opts = AutoEncryptionOpts( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] keyvault_namespace, - schema_map=self.SCHEMA_MAP) + schema_map=self.SCHEMA_MAP, + ) - insert_listener = AllowListEventListener('insert') + insert_listener = AllowListEventListener("insert") client = rs_or_single_client( - auto_encryption_opts=encryption_opts, - event_listeners=[insert_listener]) + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) self.addCleanup(client.close) coll = client.get_database(encrypted_db).get_collection( - encrypted_coll, codec_options=OPTS, - write_concern=WriteConcern("majority")) + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) coll.drop() - expected_document = json_util.loads( - expectation_extjson, json_options=JSON_OPTS) + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) coll.insert_one(payload) - event = insert_listener.results['started'][0] - inserted_doc = event.command['documents'][0] + event = insert_listener.results["started"][0] + inserted_doc = event.command["documents"][0] for key, value in expected_document.items(): self.assertEqual(value, inserted_doc[key]) @@ -1471,108 +1481,112 @@ def _test_automatic(self, expectation_extjson, payload): self.assertEqual(output_doc[key], value) -class TestAzureEncryption(AzureGCPEncryptionTestMixin, - EncryptionIntegrationTest): +class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(any(AZURE_CREDS.values()), - 'Azure environment credentials are not set') + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") def setUpClass(cls): - cls.KMS_PROVIDER_MAP = {'azure': AZURE_CREDS} - cls.DEK = json_data(BASE, 'custom', 'azure-dek.json') - cls.SCHEMA_MAP = json_data(BASE, 'custom', 'azure-gcp-schema.json') + cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + cls.DEK = json_data(BASE, "custom", "azure-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") super(TestAzureEncryption, cls).setUpClass() def test_explicit(self): return self._test_explicit( - 'AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==') + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) def test_automatic(self): - expected_document_extjson = textwrap.dedent(""" + expected_document_extjson = textwrap.dedent( + """ {"secret_azure": { "$binary": { "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", "subType": "06"} - }}""") - return self._test_automatic( - expected_document_extjson, {"secret_azure": "string0"}) + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) -class TestGCPEncryption(AzureGCPEncryptionTestMixin, - EncryptionIntegrationTest): +class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod - @unittest.skipUnless(any(GCP_CREDS.values()), - 'GCP environment credentials are not set') + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") def setUpClass(cls): - cls.KMS_PROVIDER_MAP = {'gcp': GCP_CREDS} - cls.DEK = json_data(BASE, 'custom', 'gcp-dek.json') - cls.SCHEMA_MAP = json_data(BASE, 'custom', 'azure-gcp-schema.json') + cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + cls.DEK = json_data(BASE, "custom", "gcp-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") super(TestGCPEncryption, cls).setUpClass() def test_explicit(self): return self._test_explicit( - 'ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==') + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) def test_automatic(self): - expected_document_extjson = textwrap.dedent(""" + expected_document_extjson = textwrap.dedent( + """ {"secret_gcp": { "$binary": { "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", "subType": "06"} - }}""") - return self._test_automatic( - expected_document_extjson, {"secret_gcp": "string0"}) + }}""" + ) + return self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests class TestDeadlockProse(EncryptionIntegrationTest): def setUp(self): self.client_test = rs_or_single_client( - maxPoolSize=1, readConcernLevel='majority', w='majority', - uuidRepresentation='standard') + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) self.addCleanup(self.client_test.close) self.client_keyvault_listener = OvertCommandListener() self.client_keyvault = rs_or_single_client( - maxPoolSize=1, readConcernLevel='majority', w='majority', - event_listeners=[self.client_keyvault_listener]) + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) self.addCleanup(self.client_keyvault.close) self.client_test.keyvault.datakeys.drop() self.client_test.db.coll.drop() - self.client_test.keyvault.datakeys.insert_one( - json_data('external', 'external-key.json')) + self.client_test.keyvault.datakeys.insert_one(json_data("external", "external-key.json")) _ = self.client_test.db.create_collection( - 'coll', validator={'$jsonSchema': json_data( - 'external', 'external-schema.json')}, - codec_options=OPTS) + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) client_encryption = ClientEncryption( - kms_providers={'local': {'key': LOCAL_MASTER_KEY}}, - key_vault_namespace='keyvault.datakeys', - key_vault_client=self.client_test, codec_options=OPTS) + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) self.ciphertext = client_encryption.encrypt( - 'string0', Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='local') + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) client_encryption.close() self.client_listener = OvertCommandListener() self.topology_listener = TopologyEventListener() - self.optargs = ({'local': {'key': LOCAL_MASTER_KEY}}, 'keyvault.datakeys') + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") def _run_test(self, max_pool_size, auto_encryption_opts): client_encrypted = rs_or_single_client( - readConcernLevel='majority', - w='majority', + readConcernLevel="majority", + w="majority", maxPoolSize=max_pool_size, auto_encryption_opts=auto_encryption_opts, - event_listeners=[self.client_listener, self.topology_listener]) + event_listeners=[self.client_listener, self.topology_listener], + ) if auto_encryption_opts._bypass_auto_encryption == True: - self.client_test.db.coll.insert_one( - {"_id": 0, "encrypted": self.ciphertext}) + self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) elif auto_encryption_opts._bypass_auto_encryption == False: - client_encrypted.db.coll.insert_one( - {"_id": 0, "encrypted": "string0"}) + client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) else: raise RuntimeError("bypass_auto_encryption must be a bool") @@ -1582,162 +1596,170 @@ def _run_test(self, max_pool_size, auto_encryption_opts): self.addCleanup(client_encrypted.close) def test_case_1(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=None)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 4) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'find') - self.assertEqual(cev[1].database_name, 'keyvault') - self.assertEqual(cev[2].command_name, 'insert') - self.assertEqual(cev[2].database_name, 'db') - self.assertEqual(cev[3].command_name, 'find') - self.assertEqual(cev[3].database_name, 'db') + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") - self.assertEqual(len(self.topology_listener.results['opened']), 2) + self.assertEqual(len(self.topology_listener.results["opened"]), 2) def test_case_2(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 3) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'insert') - self.assertEqual(cev[1].database_name, 'db') - self.assertEqual(cev[2].command_name, 'find') - self.assertEqual(cev[2].database_name, 'db') - - cev = self.client_keyvault_listener.results['started'] + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 2) + self.assertEqual(len(self.topology_listener.results["opened"]), 2) def test_case_3(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=None)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 2) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'find') - self.assertEqual(cev[1].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 2) + self.assertEqual(len(self.topology_listener.results["opened"]), 2) def test_case_4(self): - self._run_test(max_pool_size=1, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results['started'] + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_5(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=None)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 5) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'listCollections') - self.assertEqual(cev[1].database_name, 'keyvault') - self.assertEqual(cev[2].command_name, 'find') - self.assertEqual(cev[2].database_name, 'keyvault') - self.assertEqual(cev[3].command_name, 'insert') - self.assertEqual(cev[3].database_name, 'db') - self.assertEqual(cev[4].command_name, 'find') - self.assertEqual(cev[4].database_name, 'db') - - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_6(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=False, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 3) - self.assertEqual(cev[0].command_name, 'listCollections') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'insert') - self.assertEqual(cev[1].database_name, 'db') - self.assertEqual(cev[2].command_name, 'find') - self.assertEqual(cev[2].database_name, 'db') - - cev = self.client_keyvault_listener.results['started'] + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_7(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=None)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 2) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') - self.assertEqual(cev[1].command_name, 'find') - self.assertEqual(cev[1].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) def test_case_8(self): - self._run_test(max_pool_size=None, - auto_encryption_opts=AutoEncryptionOpts( - *self.optargs, - bypass_auto_encryption=True, - key_vault_client=self.client_keyvault)) + self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) - cev = self.client_listener.results['started'] + cev = self.client_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'db') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results['started'] + cev = self.client_keyvault_listener.results["started"] self.assertEqual(len(cev), 1) - self.assertEqual(cev[0].command_name, 'find') - self.assertEqual(cev[0].database_name, 'keyvault') + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") - self.assertEqual(len(self.topology_listener.results['opened']), 1) + self.assertEqual(len(self.topology_listener.results["opened"]), 1) # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd @@ -1746,220 +1768,210 @@ def test_mongocryptd_bypass_spawn(self): # Lower the mongocryptd timeout to reduce the test run time. self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + def reset_timeout(): encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + self.addCleanup(reset_timeout) # Configure the encrypted field via the local schema_map option. - schemas = {'db.coll': json_data('external', 'external-schema.json')} + schemas = {"db.coll": json_data("external", "external-schema.json")} opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, - 'keyvault.datakeys', + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", schema_map=schemas, mongocryptd_bypass_spawn=True, - mongocryptd_uri='mongodb://localhost:27027/', + mongocryptd_uri="mongodb://localhost:27027/", mongocryptd_spawn_args=[ - '--pidfilepath=bypass-spawning-mongocryptd.pid', - '--port=27027'] + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], ) client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) - with self.assertRaisesRegex(EncryptionError, 'Timeout'): - client_encrypted.db.coll.insert_one({'encrypted': 'test'}) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + client_encrypted.db.coll.insert_one({"encrypted": "test"}) def test_bypassAutoEncryption(self): opts = AutoEncryptionOpts( - {'local': {'key': LOCAL_MASTER_KEY}}, - 'keyvault.datakeys', + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", bypass_auto_encryption=True, mongocryptd_spawn_args=[ - '--pidfilepath=bypass-spawning-mongocryptd.pid', - '--port=27027'] + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], ) client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) client_encrypted.db.coll.insert_one({"unencrypted": "test"}) # Validate that mongocryptd was not spawned: - mongocryptd_client = MongoClient( - 'mongodb://localhost:27027/?serverSelectionTimeoutMS=500') + mongocryptd_client = MongoClient("mongodb://localhost:27027/?serverSelectionTimeoutMS=500") with self.assertRaises(ServerSelectionTimeoutError): - mongocryptd_client.admin.command('ping') + mongocryptd_client.admin.command("ping") # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): super(TestKmsTLSProse, self).setUp() self.patch_system_certs(CA_PEM) self.client_encrypted = ClientEncryption( - {'aws': AWS_CREDS}, 'keyvault.datakeys', self.client, OPTS) + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) self.addCleanup(self.client_encrypted.close) def test_invalid_kms_certificate_expired(self): key = { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8000", + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8000", } # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encrypted.create_data_key('aws', master_key=key) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encrypted.create_data_key("aws", master_key=key) def test_invalid_hostname_in_kms_certificate(self): key = { - "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" - "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8001", + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:8001", } # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encrypted.create_data_key('aws', master_key=key) + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encrypted.create_data_key("aws", master_key=key) # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests class TestKmsTLSOptions(EncryptionIntegrationTest): - @unittest.skipUnless(any(AWS_CREDS.values()), - 'AWS environment credentials are not set') + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): super(TestKmsTLSOptions, self).setUp() # 1, create client with only tlsCAFile. providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) - providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8002' - providers['gcp']['endpoint'] = '127.0.0.1:8002' + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" + providers["gcp"]["endpoint"] = "127.0.0.1:8002" kms_tls_opts_ca_only = { - 'aws': {'tlsCAFile': CA_PEM}, - 'azure': {'tlsCAFile': CA_PEM}, - 'gcp': {'tlsCAFile': CA_PEM}, - 'kmip': {'tlsCAFile': CA_PEM}, + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, } self.client_encryption_no_client_cert = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts_ca_only) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) self.addCleanup(self.client_encryption_no_client_cert.close) # 2, same providers as above but with tlsCertificateKeyFile. kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) for p in kms_tls_opts: - kms_tls_opts[p]['tlsCertificateKeyFile'] = CLIENT_PEM + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM self.client_encryption_with_tls = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. providers: dict = copy.deepcopy(providers) - providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8000' - providers['gcp']['endpoint'] = '127.0.0.1:8000' - providers['kmip']['endpoint'] = '127.0.0.1:8000' + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8000" + providers["gcp"]["endpoint"] = "127.0.0.1:8000" + providers["kmip"]["endpoint"] = "127.0.0.1:8000" self.client_encryption_expired = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts_ca_only) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. providers: dict = copy.deepcopy(providers) - providers['azure']['identityPlatformEndpoint'] = '127.0.0.1:8001' - providers['gcp']['endpoint'] = '127.0.0.1:8001' - providers['kmip']['endpoint'] = '127.0.0.1:8001' + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8001" + providers["gcp"]["endpoint"] = "127.0.0.1:8001" + providers["kmip"]["endpoint"] = "127.0.0.1:8001" self.client_encryption_invalid_hostname = ClientEncryption( - providers, 'keyvault.datakeys', self.client, OPTS, - kms_tls_options=kms_tls_opts_ca_only) + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) self.addCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) - self.cert_error = ('certificate required|SSL handshake failed|' - 'KMS connection closed|Connection reset by peer') + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer" + ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) if sys.version_info[:2] >= (3, 10): - self.cert_error += '|EOF' + self.cert_error += "|EOF" # On Windows this error might be: # [WinError 10054] An existing connection was forcibly closed by the remote host - if sys.platform == 'win32': - self.cert_error += '|forcibly closed' + if sys.platform == "win32": + self.cert_error += "|forcibly closed" def test_01_aws(self): key = { - 'region': 'us-east-1', - 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', - 'endpoint': '127.0.0.1:8002', + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:8002", } with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('aws', key) + self.client_encryption_no_client_cert.create_data_key("aws", key) # "parse error" here means that the TLS handshake succeeded. - with self.assertRaisesRegex(EncryptionError, 'parse error'): - self.client_encryption_with_tls.create_data_key('aws', key) + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_tls.create_data_key("aws", key) # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) - key['endpoint'] = '127.0.0.1:8000' - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('aws', key) + key["endpoint"] = "127.0.0.1:8000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("aws", key) # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - key['endpoint'] = '127.0.0.1:8001' - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key('aws', key) + key["endpoint"] = "127.0.0.1:8001" + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("aws", key) def test_02_azure(self): - key = {'keyVaultEndpoint': 'doesnotexist.local', 'keyName': 'foo'} + key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('azure', key) + self.client_encryption_no_client_cert.create_data_key("azure", key) # "HTTP status=404" here means that the TLS handshake succeeded. - with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): - self.client_encryption_with_tls.create_data_key('azure', key) + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("azure", key) # Expired cert error. - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('azure', key) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key( - 'azure', key) + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("azure", key) def test_03_gcp(self): - key = {'projectId': 'foo', 'location': 'bar', 'keyRing': 'baz', - 'keyName': 'foo'} + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('gcp', key) + self.client_encryption_no_client_cert.create_data_key("gcp", key) # "HTTP status=404" here means that the TLS handshake succeeded. - with self.assertRaisesRegex(EncryptionError, 'HTTP status=404'): - self.client_encryption_with_tls.create_data_key('gcp', key) + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_tls.create_data_key("gcp", key) # Expired cert error. - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('gcp', key) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key('gcp', key) + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("gcp", key) def test_04_kmip(self): # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): - self.client_encryption_no_client_cert.create_data_key('kmip') - self.client_encryption_with_tls.create_data_key('kmip') + self.client_encryption_no_client_cert.create_data_key("kmip") + self.client_encryption_with_tls.create_data_key("kmip") # Expired cert error. - with self.assertRaisesRegex( - EncryptionError, 'expired|certificate verify failed'): - self.client_encryption_expired.create_data_key('kmip') + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. - with self.assertRaisesRegex( - EncryptionError, 'IP address mismatch|wronghost'): - self.client_encryption_invalid_hostname.create_data_key('kmip') + with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + self.client_encryption_invalid_hostname.create_data_key("kmip") if __name__ == "__main__": diff --git a/test/test_errors.py b/test/test_errors.py index 53c55f8167..8a225b6548 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -18,12 +18,14 @@ sys.path[0:0] = [""] -from pymongo.errors import (BulkWriteError, - EncryptionError, - NotPrimaryError, - OperationFailure) -from test import (PyMongoTestCase, - unittest) +from test import PyMongoTestCase, unittest + +from pymongo.errors import ( + BulkWriteError, + EncryptionError, + NotPrimaryError, + OperationFailure, +) class TestErrors(PyMongoTestCase): @@ -36,8 +38,7 @@ def test_not_primary_error(self): self.assertIn("full error", traceback.format_exc()) def test_operation_failure(self): - exc = OperationFailure("operation failure test", 10, - {"errmsg": "error"}) + exc = OperationFailure("operation failure test", 10, {"errmsg": "error"}) self.assertIn("full error", str(exc)) try: raise exc @@ -45,26 +46,26 @@ def test_operation_failure(self): self.assertIn("full error", traceback.format_exc()) def _test_unicode_strs(self, exc): - if sys.implementation.name == 'pypy' and sys.implementation.version < (7, 3, 7): + if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): # PyPy used to display unicode in repr differently. - self.assertEqual("unicode \U0001f40d, full error: {" - "'errmsg': 'unicode \\U0001f40d'}", str(exc)) + self.assertEqual( + "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \\U0001f40d'}", str(exc) + ) else: - self.assertEqual("unicode \U0001f40d, full error: {" - "'errmsg': 'unicode \U0001f40d'}", str(exc)) + self.assertEqual( + "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \U0001f40d'}", str(exc) + ) try: raise exc except Exception: self.assertIn("full error", traceback.format_exc()) def test_unicode_strs_operation_failure(self): - exc = OperationFailure('unicode \U0001f40d', 10, - {"errmsg": 'unicode \U0001f40d'}) + exc = OperationFailure("unicode \U0001f40d", 10, {"errmsg": "unicode \U0001f40d"}) self._test_unicode_strs(exc) def test_unicode_strs_not_primary_error(self): - exc = NotPrimaryError('unicode \U0001f40d', - {"errmsg": 'unicode \U0001f40d'}) + exc = NotPrimaryError("unicode \U0001f40d", {"errmsg": "unicode \U0001f40d"}) self._test_unicode_strs(exc) def assertPyMongoErrorEqual(self, exc1, exc2): @@ -84,7 +85,7 @@ def test_pickle_NotPrimaryError(self): self.assertPyMongoErrorEqual(exc, pickle.loads(pickle.dumps(exc))) def test_pickle_OperationFailure(self): - exc = OperationFailure('error', code=5, details={}, max_wire_version=7) + exc = OperationFailure("error", code=5, details={}, max_wire_version=7) self.assertOperationFailureEqual(exc, pickle.loads(pickle.dumps(exc))) def test_pickle_BulkWriteError(self): @@ -93,8 +94,7 @@ def test_pickle_BulkWriteError(self): self.assertIn("batch op errors occurred", str(exc)) def test_pickle_EncryptionError(self): - cause = OperationFailure('error', code=5, details={}, - max_wire_version=7) + cause = OperationFailure("error", code=5, details={}, max_wire_version=7) exc = EncryptionError(cause) exc2 = pickle.loads(pickle.dumps(exc)) self.assertPyMongoErrorEqual(exc, exc2) diff --git a/test/test_examples.py b/test/test_examples.py index ed12c8bcc1..7354ac5be2 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -20,6 +20,9 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import rs_client + import pymongo from pymongo.errors import ConnectionFailure, OperationFailure from pymongo.read_concern import ReadConcern @@ -27,9 +30,6 @@ from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import rs_client - class TestSampleShellCommands(IntegrationTest): @classmethod @@ -51,10 +51,13 @@ def test_first_three_examples(self): # Start Example 1 db.inventory.insert_one( - {"item": "canvas", - "qty": 100, - "tags": ["cotton"], - "size": {"h": 28, "w": 35.5, "uom": "cm"}}) + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) # End Example 1 self.assertEqual(db.inventory.count_documents({}), 1) @@ -66,19 +69,28 @@ def test_first_three_examples(self): self.assertEqual(len(list(cursor)), 1) # Start Example 3 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "tags": ["blank", "red"], - "size": {"h": 14, "w": 21, "uom": "cm"}}, - {"item": "mat", - "qty": 85, - "tags": ["gray"], - "size": {"h": 27.9, "w": 35.5, "uom": "cm"}}, - {"item": "mousepad", - "qty": 25, - "tags": ["gel", "blue"], - "size": {"h": 19, "w": 22.85, "uom": "cm"}}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) # End Example 3 self.assertEqual(db.inventory.count_documents({}), 4) @@ -87,26 +99,40 @@ def test_query_top_level_fields(self): db = self.db # Start Example 6 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "A"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 6 self.assertEqual(db.inventory.count_documents({}), 5) @@ -136,16 +162,15 @@ def test_query_top_level_fields(self): self.assertEqual(len(list(cursor)), 1) # Start Example 12 - cursor = db.inventory.find( - {"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) # End Example 12 self.assertEqual(len(list(cursor)), 3) # Start Example 13 - cursor = db.inventory.find({ - "status": "A", - "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]}) + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) # End Example 13 self.assertEqual(len(list(cursor)), 2) @@ -157,39 +182,51 @@ def test_query_embedded_documents(self): # Subdocument key order matters in a few of these examples so we have # to use bson.son.SON instead of a Python dict. from bson.son import SON - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), - "status": "A"}, - {"item": "paper", - "qty": 100, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), - "status": "A"}]) + + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "status": "A", + }, + ] + ) # End Example 14 # Start Example 15 - cursor = db.inventory.find( - {"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) # End Example 15 self.assertEqual(len(list(cursor)), 1) # Start Example 16 - cursor = db.inventory.find( - {"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) # End Example 16 self.assertEqual(len(list(cursor)), 0) @@ -207,8 +244,7 @@ def test_query_embedded_documents(self): self.assertEqual(len(list(cursor)), 4) # Start Example 19 - cursor = db.inventory.find( - {"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) # End Example 19 self.assertEqual(len(list(cursor)), 1) @@ -217,27 +253,20 @@ def test_query_arrays(self): db = self.db # Start Example 20 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "tags": ["blank", "red"], - "dim_cm": [14, 21]}, - {"item": "notebook", - "qty": 50, - "tags": ["red", "blank"], - "dim_cm": [14, 21]}, - {"item": "paper", - "qty": 100, - "tags": ["red", "blank", "plain"], - "dim_cm": [14, 21]}, - {"item": "planner", - "qty": 75, - "tags": ["blank", "red"], - "dim_cm": [22.85, 30]}, - {"item": "postcard", - "qty": 45, - "tags": ["blue"], - "dim_cm": [10, 15.25]}]) + db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) # End Example 20 # Start Example 21 @@ -271,8 +300,7 @@ def test_query_arrays(self): self.assertEqual(len(list(cursor)), 4) # Start Example 26 - cursor = db.inventory.find( - {"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) # End Example 26 self.assertEqual(len(list(cursor)), 1) @@ -296,64 +324,74 @@ def test_query_array_of_documents(self): # Subdocument key order matters in a few of these examples so we have # to use bson.son.SON instead of a Python dict. from bson.son import SON - db.inventory.insert_many([ - {"item": "journal", - "instock": [ - SON([("warehouse", "A"), ("qty", 5)]), - SON([("warehouse", "C"), ("qty", 15)])]}, - {"item": "notebook", - "instock": [ - SON([("warehouse", "C"), ("qty", 5)])]}, - {"item": "paper", - "instock": [ - SON([("warehouse", "A"), ("qty", 60)]), - SON([("warehouse", "B"), ("qty", 15)])]}, - {"item": "planner", - "instock": [ - SON([("warehouse", "A"), ("qty", 40)]), - SON([("warehouse", "B"), ("qty", 5)])]}, - {"item": "postcard", - "instock": [ - SON([("warehouse", "B"), ("qty", 15)]), - SON([("warehouse", "C"), ("qty", 35)])]}]) + + db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + SON([("warehouse", "A"), ("qty", 5)]), + SON([("warehouse", "C"), ("qty", 15)]), + ], + }, + {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + { + "item": "paper", + "instock": [ + SON([("warehouse", "A"), ("qty", 60)]), + SON([("warehouse", "B"), ("qty", 15)]), + ], + }, + { + "item": "planner", + "instock": [ + SON([("warehouse", "A"), ("qty", 40)]), + SON([("warehouse", "B"), ("qty", 5)]), + ], + }, + { + "item": "postcard", + "instock": [ + SON([("warehouse", "B"), ("qty", 15)]), + SON([("warehouse", "C"), ("qty", 35)]), + ], + }, + ] + ) # End Example 29 # Start Example 30 - cursor = db.inventory.find( - {"instock": SON([("warehouse", "A"), ("qty", 5)])}) + cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) # End Example 30 self.assertEqual(len(list(cursor)), 1) # Start Example 31 - cursor = db.inventory.find( - {"instock": SON([("qty", 5), ("warehouse", "A")])}) + cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) # End Example 31 self.assertEqual(len(list(cursor)), 0) # Start Example 32 - cursor = db.inventory.find({'instock.0.qty': {"$lte": 20}}) + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) # End Example 32 self.assertEqual(len(list(cursor)), 3) # Start Example 33 - cursor = db.inventory.find({'instock.qty': {"$lte": 20}}) + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) # End Example 33 self.assertEqual(len(list(cursor)), 5) # Start Example 34 - cursor = db.inventory.find( - {"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) # End Example 34 self.assertEqual(len(list(cursor)), 1) # Start Example 35 - cursor = db.inventory.find( - {"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) # End Example 35 self.assertEqual(len(list(cursor)), 3) @@ -365,8 +403,7 @@ def test_query_array_of_documents(self): self.assertEqual(len(list(cursor)), 4) # Start Example 37 - cursor = db.inventory.find( - {"instock.qty": 5, "instock.warehouse": "A"}) + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) # End Example 37 self.assertEqual(len(list(cursor)), 2) @@ -400,29 +437,40 @@ def test_projection(self): db = self.db # Start Example 42 - db.inventory.insert_many([ - {"item": "journal", - "status": "A", - "size": {"h": 14, "w": 21, "uom": "cm"}, - "instock": [{"warehouse": "A", "qty": 5}]}, - {"item": "notebook", - "status": "A", - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "instock": [{"warehouse": "C", "qty": 5}]}, - {"item": "paper", - "status": "D", - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "instock": [{"warehouse": "A", "qty": 60}]}, - {"item": "planner", - "status": "D", - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "instock": [{"warehouse": "A", "qty": 40}]}, - {"item": "postcard", - "status": "A", - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "instock": [ - {"warehouse": "B", "qty": 15}, - {"warehouse": "C", "qty": 35}]}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) # End Example 42 # Start Example 43 @@ -432,8 +480,7 @@ def test_projection(self): self.assertEqual(len(list(cursor)), 3) # Start Example 44 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) # End Example 44 for doc in cursor: @@ -444,8 +491,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 45 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) # End Example 45 for doc in cursor: @@ -456,8 +502,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 46 - cursor = db.inventory.find( - {"status": "A"}, {"status": 0, "instock": 0}) + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) # End Example 46 for doc in cursor: @@ -468,8 +513,7 @@ def test_projection(self): self.assertFalse("instock" in doc) # Start Example 47 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) # End Example 47 for doc in cursor: @@ -478,10 +522,10 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertTrue("size" in doc) self.assertFalse("instock" in doc) - size = doc['size'] - self.assertTrue('uom' in size) - self.assertFalse('h' in size) - self.assertFalse('w' in size) + size = doc["size"] + self.assertTrue("uom" in size) + self.assertFalse("h" in size) + self.assertFalse("w" in size) # Start Example 48 cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) @@ -493,14 +537,13 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertTrue("size" in doc) self.assertTrue("instock" in doc) - size = doc['size'] - self.assertFalse('uom' in size) - self.assertTrue('h' in size) - self.assertTrue('w' in size) + size = doc["size"] + self.assertFalse("uom" in size) + self.assertTrue("h" in size) + self.assertTrue("w" in size) # Start Example 49 - cursor = db.inventory.find( - {"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) # End Example 49 for doc in cursor: @@ -509,14 +552,14 @@ def test_projection(self): self.assertTrue("status" in doc) self.assertFalse("size" in doc) self.assertTrue("instock" in doc) - for subdoc in doc['instock']: - self.assertFalse('warehouse' in subdoc) - self.assertTrue('qty' in subdoc) + for subdoc in doc["instock"]: + self.assertFalse("warehouse" in subdoc) + self.assertTrue("qty" in subdoc) # Start Example 50 cursor = db.inventory.find( - {"status": "A"}, - {"item": 1, "status": 1, "instock": {"$slice": -1}}) + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) # End Example 50 for doc in cursor: @@ -531,54 +574,77 @@ def test_update_and_replace(self): db = self.db # Start Example 51 - db.inventory.insert_many([ - {"item": "canvas", - "qty": 100, - "size": {"h": 28, "w": 35.5, "uom": "cm"}, - "status": "A"}, - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "mat", - "qty": 85, - "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, - "status": "A"}, - {"item": "mousepad", - "qty": 25, - "size": {"h": 19, "w": 22.85, "uom": "cm"}, - "status": "P"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "P"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}, - {"item": "sketchbook", - "qty": 80, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "sketch pad", - "qty": 95, - "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 51 # Start Example 52 db.inventory.update_one( {"item": "paper"}, - {"$set": {"size.uom": "cm", "status": "P"}, - "$currentDate": {"lastModified": True}}) + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) # End Example 52 for doc in db.inventory.find({"item": "paper"}): @@ -589,8 +655,8 @@ def test_update_and_replace(self): # Start Example 53 db.inventory.update_many( {"qty": {"$lt": 50}}, - {"$set": {"size.uom": "in", "status": "P"}, - "$currentDate": {"lastModified": True}}) + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) # End Example 53 for doc in db.inventory.find({"qty": {"$lt": 50}}): @@ -601,10 +667,11 @@ def test_update_and_replace(self): # Start Example 54 db.inventory.replace_one( {"item": "paper"}, - {"item": "paper", - "instock": [ - {"warehouse": "A", "qty": 60}, - {"warehouse": "B", "qty": 40}]}) + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) # End Example 54 for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): @@ -617,27 +684,40 @@ def test_delete(self): db = self.db # Start Example 55 - db.inventory.insert_many([ - {"item": "journal", - "qty": 25, - "size": {"h": 14, "w": 21, "uom": "cm"}, - "status": "A"}, - {"item": "notebook", - "qty": 50, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "P"}, - {"item": "paper", - "qty": 100, - "size": {"h": 8.5, "w": 11, "uom": "in"}, - "status": "D"}, - {"item": "planner", - "qty": 75, - "size": {"h": 22.85, "w": 30, "uom": "cm"}, - "status": "D"}, - {"item": "postcard", - "qty": 45, - "size": {"h": 10, "w": 15.25, "uom": "cm"}, - "status": "A"}]) + db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) # End Example 55 self.assertEqual(db.inventory.count_documents({}), 5) @@ -682,7 +762,7 @@ def insert_docs(): # End Changestream Example 1 # Start Changestream Example 2 - cursor = db.inventory.watch(full_document='updateLookup') + cursor = db.inventory.watch(full_document="updateLookup") document = next(cursor) # End Changestream Example 2 @@ -694,8 +774,8 @@ def insert_docs(): # Start Changestream Example 4 pipeline = [ - {'$match': {'fullDocument.username': 'alice'}}, - {'$addFields': {'newField': 'this is an added field!'}} + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, ] cursor = db.inventory.watch(pipeline=pipeline) document = next(cursor) @@ -708,83 +788,77 @@ def test_aggregate_examples(self): db = self.db # Start Aggregation Example 1 - db.sales.aggregate([ - {"$match": {"items.fruit": "banana"}}, - {"$sort": {"date": 1}} - ]) + db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) # End Aggregation Example 1 # Start Aggregation Example 2 - db.sales.aggregate([ - {"$unwind": "$items"}, - {"$match": {"items.fruit": "banana"}}, - {"$group": { - "_id": {"day": {"$dayOfWeek": "$date"}}, - "count": {"$sum": "$items.quantity"}} - }, - {"$project": { - "dayOfWeek": "$_id.day", - "numberSold": "$count", - "_id": 0} - }, - {"$sort": {"numberSold": 1}} - ]) + db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) # End Aggregation Example 2 # Start Aggregation Example 3 - db.sales.aggregate([ - {"$unwind": "$items"}, - {"$group": { - "_id": {"day": {"$dayOfWeek": "$date"}}, - "items_sold": {"$sum": "$items.quantity"}, - "revenue": { - "$sum": { - "$multiply": [ - "$items.quantity", "$items.price"] - } + db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, } - } - }, - {"$project": { - "day": "$_id.day", - "revenue": 1, - "items_sold": 1, - "discount": { - "$cond": { - "if": {"$lte": ["$revenue", 250]}, - "then": 25, - "else": 0 - } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, } - } - } - ]) + }, + ] + ) # End Aggregation Example 3 # Start Aggregation Example 4 - db.air_alliances.aggregate([ - {"$lookup": { - "from": "air_airlines", - "let": {"constituents": "$airlines"}, - "pipeline": [ - {"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}} - ], - "as": "airlines" - } - }, - {"$project": { - "_id": 0, - "name": 1, - "airlines": { - "$filter": { - "input": "$airlines", - "as": "airline", - "cond": {"$eq": ["$$airline.country", "Canada"]} - } + db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", } - } - } - ]) + }, + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, + } + }, + } + }, + ] + ) # End Aggregation Example 4 def test_commands(self): @@ -809,7 +883,7 @@ def test_index_management(self): # Start Index Example 1 db.restaurants.create_index( [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], - partialFilterExpression={"rating": {"$gt": 5}} + partialFilterExpression={"rating": {"$gt": 5}}, ) # End Index Example 1 @@ -823,18 +897,14 @@ def test_misc(self): # 2. Tunable consistency controls collection = client.my_database.my_collection with client.start_session() as session: - collection.insert_one({'_id': 1}, session=session) - collection.update_one( - {'_id': 1}, {"$set": {"a": 1}}, session=session) + collection.insert_one({"_id": 1}, session=session) + collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) for doc in collection.find({}, session=session): pass # 3. Exploiting the power of arrays collection = client.test.array_updates_test - collection.update_one( - {'_id': 1}, - {"$set": {"a.$[i].b": 2}}, - array_filters=[{"i.b": 0}]) + collection.update_one({"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}]) class TestTransactionExamples(IntegrationTest): @@ -848,8 +918,7 @@ def test_transactions(self): employees = client.hr.employees events = client.reporting.events employees.insert_one({"employee": 3, "status": "Active"}) - events.insert_one( - {"employee": 3, "status": {"new": "Active", "old": None}}) + events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) # Start Transactions Intro Example 1 @@ -858,15 +927,14 @@ def update_employee_info(session): events_coll = session.client.reporting.events with session.start_transaction( - read_concern=ReadConcern("snapshot"), - write_concern=WriteConcern(w="majority")): + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): employees_coll.update_one( - {"employee": 3}, {"$set": {"status": "Inactive"}}, - session=session) + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) events_coll.insert_one( - {"employee": 3, "status": { - "new": "Inactive", "old": "Active"}}, - session=session) + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) while True: try: @@ -876,14 +944,15 @@ def update_employee_info(session): break except (ConnectionFailure, OperationFailure) as exc: # Can retry commit - if exc.has_error_label( - "UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + if exc.has_error_label("UnknownTransactionCommitResult"): + print( + "UnknownTransactionCommitResult, retrying " "commit operation ..." + ) continue else: print("Error during commit ...") raise + # End Transactions Intro Example 1 with client.start_session() as session: @@ -892,7 +961,7 @@ def update_employee_info(session): employee = employees.find_one({"employee": 3}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") # Start Transactions Retry Example 1 def run_transaction_with_retry(txn_func, session): @@ -901,16 +970,15 @@ def run_transaction_with_retry(txn_func, session): txn_func(session) # performs transaction break except (ConnectionFailure, OperationFailure) as exc: - print("Transaction aborted. Caught exception during " - "transaction.") + print("Transaction aborted. Caught exception during " "transaction.") # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying" - "transaction ...") + print("TransientTransactionError, retrying" "transaction ...") continue else: raise + # End Transactions Retry Example 1 with client.start_session() as session: @@ -919,7 +987,7 @@ def run_transaction_with_retry(txn_func, session): employee = employees.find_one({"employee": 3}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") # Start Transactions Retry Example 2 def commit_with_retry(session): @@ -932,23 +1000,21 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + print("UnknownTransactionCommitResult, retrying " "commit operation ...") continue else: print("Error during commit ...") raise + # End Transactions Retry Example 2 # Test commit_with_retry from the previous examples def _insert_employee_retry_commit(session): with session.start_transaction(): - employees.insert_one( - {"employee": 4, "status": "Active"}, - session=session) + employees.insert_one({"employee": 4, "status": "Active"}, session=session) events.insert_one( - {"employee": 4, "status": {"new": "Active", "old": None}}, - session=session) + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) commit_with_retry(session) @@ -958,7 +1024,7 @@ def _insert_employee_retry_commit(session): employee = employees.find_one({"employee": 4}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Active') + self.assertEqual(employee["status"], "Active") # Start Transactions Retry Example 3 @@ -970,8 +1036,7 @@ def run_transaction_with_retry(txn_func, session): except (ConnectionFailure, OperationFailure) as exc: # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying " - "transaction ...") + print("TransientTransactionError, retrying " "transaction ...") continue else: raise @@ -986,8 +1051,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " - "commit operation ...") + print("UnknownTransactionCommitResult, retrying " "commit operation ...") continue else: print("Error during commit ...") @@ -1000,16 +1064,16 @@ def update_employee_info(session): events_coll = session.client.reporting.events with session.start_transaction( - read_concern=ReadConcern("snapshot"), - write_concern=WriteConcern(w="majority"), - read_preference=ReadPreference.PRIMARY): + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): employees_coll.update_one( - {"employee": 3}, {"$set": {"status": "Inactive"}}, - session=session) + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) events_coll.insert_one( - {"employee": 3, "status": { - "new": "Inactive", "old": "Active"}}, - session=session) + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) commit_with_retry(session) @@ -1026,7 +1090,7 @@ def update_employee_info(session): employee = employees.find_one({"employee": 3}) assert employee is not None self.assertIsNotNone(employee) - self.assertEqual(employee['status'], 'Inactive') + self.assertEqual(employee["status"], "Inactive") MongoClient = lambda _: rs_client() uriString = None @@ -1042,10 +1106,8 @@ def update_employee_info(session): wc_majority = WriteConcern("majority", wtimeout=1000) # Prereq: Create collections. - client.get_database( - "mydb1", write_concern=wc_majority).foo.insert_one({'abc': 0}) - client.get_database( - "mydb2", write_concern=wc_majority).bar.insert_one({'xyz': 0}) + client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. def callback(session): @@ -1053,16 +1115,18 @@ def callback(session): collection_two = session.client.mydb2.bar # Important:: You must pass the session to the operations. - collection_one.insert_one({'abc': 1}, session=session) - collection_two.insert_one({'xyz': 999}, session=session) + collection_one.insert_one({"abc": 1}, session=session) + collection_two.insert_one({"xyz": 999}, session=session) # Step 2: Start a client session. with client.start_session() as session: # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). session.with_transaction( - callback, read_concern=ReadConcern('local'), + callback, + read_concern=ReadConcern("local"), write_concern=wc_majority, - read_preference=ReadPreference.PRIMARY) + read_preference=ReadPreference.PRIMARY, + ) # End Transactions withTxn API Example 1 @@ -1073,24 +1137,26 @@ class TestCausalConsistencyExamples(IntegrationTest): def test_causal_consistency(self): # Causal consistency examples client = self.client - self.addCleanup(client.drop_database, 'test') - client.test.drop_collection('items') - client.test.items.insert_one({ - 'sku': "111", 'name': 'Peanuts', - 'start':datetime.datetime.today()}) + self.addCleanup(client.drop_database, "test") + client.test.drop_collection("items") + client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) # Start Causal Consistency Example 1 with client.start_session(causal_consistency=True) as s1: current_date = datetime.datetime.today() items = client.get_database( - 'test', read_concern=ReadConcern('majority'), - write_concern=WriteConcern('majority', wtimeout=1000)).items + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items items.update_one( - {'sku': "111", 'end': None}, - {'$set': {'end': current_date}}, session=s1) + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) items.insert_one( - {'sku': "nuts-111", 'name': "Pecans", - 'start': current_date}, session=s1) + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) # End Causal Consistency Example 1 assert s1.cluster_time is not None @@ -1102,10 +1168,12 @@ def test_causal_consistency(self): s2.advance_operation_time(s1.operation_time) items = client.get_database( - 'test', read_preference=ReadPreference.SECONDARY, - read_concern=ReadConcern('majority'), - write_concern=WriteConcern('majority', wtimeout=1000)).items - for item in items.find({'end': None}, session=s2): + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + for item in items.find({"end": None}, session=s2): print(item) # End Causal Consistency Example 2 @@ -1114,35 +1182,33 @@ class TestVersionedApiExamples(IntegrationTest): @client_context.require_version_min(4, 7) def test_versioned_api(self): # Versioned API examples - MongoClient = lambda _, server_api: rs_client( - server_api=server_api, connect=False) + MongoClient = lambda _, server_api: rs_client(server_api=server_api, connect=False) uri = None # Start Versioned API Example 1 from pymongo.server_api import ServerApi + client = MongoClient(uri, server_api=ServerApi("1")) # End Versioned API Example 1 # Start Versioned API Example 2 - client = MongoClient( - uri, server_api=ServerApi("1", strict=True)) + client = MongoClient(uri, server_api=ServerApi("1", strict=True)) # End Versioned API Example 2 # Start Versioned API Example 3 - client = MongoClient( - uri, server_api=ServerApi("1", strict=False)) + client = MongoClient(uri, server_api=ServerApi("1", strict=False)) # End Versioned API Example 3 # Start Versioned API Example 4 - client = MongoClient( - uri, server_api=ServerApi("1", deprecation_errors=True)) + client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 @client_context.require_version_min(4, 7) def test_versioned_api_migration(self): # SERVER-58785 - if (client_context.is_topology_type(["sharded"]) and - not client_context.version.at_least(5, 0, 2)): + if client_context.is_topology_type(["sharded"]) and not client_context.version.at_least( + 5, 0, 2 + ): self.skipTest("This test needs MongoDB 5.0.2 or newer") client = rs_client(server_api=ServerApi("1", strict=True)) @@ -1151,22 +1217,74 @@ def test_versioned_api_migration(self): # Start Versioned API Example 5 def strptime(s): return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") - client.db.sales.insert_many([ - {"_id": 1, "item": "abc", "price": 10, "quantity": 2, "date": strptime("2021-01-01T08:00:00Z")}, - {"_id": 2, "item": "jkl", "price": 20, "quantity": 1, "date": strptime("2021-02-03T09:00:00Z")}, - {"_id": 3, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-03T09:05:00Z")}, - {"_id": 4, "item": "abc", "price": 10, "quantity": 10, "date": strptime("2021-02-15T08:00:00Z")}, - {"_id": 5, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T09:05:00Z")}, - {"_id": 6, "item": "xyz", "price": 5, "quantity": 5, "date": strptime("2021-02-15T12:05:10Z")}, - {"_id": 7, "item": "xyz", "price": 5, "quantity": 10, "date": strptime("2021-02-15T14:12:12Z")}, - {"_id": 8, "item": "abc", "price": 10, "quantity": 5, "date": strptime("2021-03-16T20:20:13Z")} - ]) + + client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) # End Versioned API Example 5 with self.assertRaisesRegex( - OperationFailure, "Provided apiStrict:true, but the command " - "count is not in API Version 1"): - client.db.command('count', 'sales', query={}) + OperationFailure, + "Provided apiStrict:true, but the command " "count is not in API Version 1", + ): + client.db.command("count", "sales", query={}) # Start Versioned API Example 6 # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} # End Versioned API Example 6 diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 2208e97b42..27d82e242b 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -21,34 +21,34 @@ import io import sys import zipfile - from io import BytesIO from pymongo.database import Database sys.path[0:0] = [""] +from test import IntegrationTest, qcheck, unittest +from test.utils import EventListener, rs_or_single_client + from bson.objectid import ObjectId from gridfs import GridFS -from gridfs.grid_file import (DEFAULT_CHUNK_SIZE, - _SEEK_CUR, - _SEEK_END, - GridIn, - GridOut, - GridOutCursor) from gridfs.errors import NoFile +from gridfs.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + GridIn, + GridOut, + GridOutCursor, +) from pymongo import MongoClient from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress -from test import (IntegrationTest, - unittest, - qcheck) -from test.utils import rs_or_single_client, EventListener class TestGridFileNoConnect(unittest.TestCase): - """Test GridFile features on a client that does not connect. - """ + """Test GridFile features on a client that does not connect.""" + db: Database @classmethod @@ -58,9 +58,17 @@ def setUpClass(cls): def test_grid_in_custom_opts(self): self.assertRaises(TypeError, GridIn, "foo") - a = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") + a = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) self.assertEqual(5, a._id) self.assertEqual("my_file", a.filename) @@ -73,15 +81,13 @@ def test_grid_in_custom_opts(self): self.assertEqual("hello", a.baz) self.assertRaises(AttributeError, getattr, a, "mike") - b = GridIn(self.db.fs, - content_type="text/html", chunk_size=1000, baz=100) + b = GridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) self.assertEqual("text/html", b.content_type) self.assertEqual(1000, b.chunk_size) self.assertEqual(100, b.baz) class TestGridFile(IntegrationTest): - def setUp(self): self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) @@ -226,30 +232,48 @@ def test_grid_out_default_opts(self): self.assertEqual(None, b.metadata) self.assertEqual(None, b.md5) - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, b, attr, 5) def test_grid_out_cursor_options(self): - self.assertRaises(TypeError, GridOutCursor.__init__, self.db.fs, {}, - projection={"filename": 1}) + self.assertRaises( + TypeError, GridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) cursor = GridOutCursor(self.db.fs, {}) cursor_clone = cursor.clone() cursor_dict = cursor.__dict__.copy() - cursor_dict.pop('_Cursor__session') + cursor_dict.pop("_Cursor__session") cursor_clone_dict = cursor_clone.__dict__.copy() - cursor_clone_dict.pop('_Cursor__session') + cursor_clone_dict.pop("_Cursor__session") self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) self.assertRaises(NotImplementedError, cursor.remove_option, 0) def test_grid_out_custom_opts(self): - one = GridIn(self.db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, aliases=["foo"], - metadata={"foo": 1, "bar": 2}, bar=3, baz="hello") + one = GridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) one.write(b"hello world") one.close() @@ -267,8 +291,17 @@ def test_grid_out_custom_opts(self): self.assertEqual(3, two.bar) self.assertEqual(None, two.md5) - for attr in ["_id", "name", "content_type", "length", "chunk_size", - "upload_date", "aliases", "metadata", "md5"]: + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: self.assertRaises(AttributeError, setattr, two, attr, 5) def test_grid_out_file_document(self): @@ -279,8 +312,7 @@ def test_grid_out_file_document(self): two = GridOut(self.db.fs, file_document=self.db.fs.files.find_one()) self.assertEqual(b"foo bar", two.read()) - three = GridOut(self.db.fs, 5, - file_document=self.db.fs.files.find_one()) + three = GridOut(self.db.fs, 5, file_document=self.db.fs.files.find_one()) self.assertEqual(b"foo bar", three.read()) four = GridOut(self.db.fs, file_document={}) @@ -307,8 +339,7 @@ def test_write_file_like(self): five.write(buffer) five.write(b" and mongodb") five.close() - self.assertEqual(b"hello world and mongodb", - GridOut(self.db.fs, five._id).read()) + self.assertEqual(b"hello world and mongodb", GridOut(self.db.fs, five._id).read()) def test_write_lines(self): a = GridIn(self.db.fs) @@ -338,7 +369,7 @@ def test_closed(self): self.assertTrue(g.closed) def test_multi_chunk_file(self): - random_string = b'a' * (DEFAULT_CHUNK_SIZE + 1000) + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) f = GridIn(self.db.fs) f.write(random_string) @@ -372,8 +403,7 @@ def helper(data): self.assertEqual(data, g.read(10) + g.read(10)) return True - qcheck.check_unittest(self, helper, - qcheck.gen_string(qcheck.gen_range(0, 20))) + qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) def test_seek(self): f = GridIn(self.db.fs, chunkSize=3) @@ -431,10 +461,14 @@ def test_multiple_reads(self): def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) - f.write((b"""Hello world, + f.write( + ( + b"""Hello world, How are you? Hope all is well. -Bye""")) +Bye""" + ) + ) f.close() # Try read(), then readline(). @@ -463,10 +497,14 @@ def test_readline(self): def test_readlines(self): f = GridIn(self.db.fs, chunkSize=5) - f.write((b"""Hello world, + f.write( + ( + b"""Hello world, How are you? Hope all is well. -Bye""")) +Bye""" + ) + ) f.close() # Try read(), then readlines(). @@ -486,13 +524,13 @@ def test_readlines(self): # Only readlines(). g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], - g.readlines()) + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines() + ) g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], - g.readlines(0)) + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines(0) + ) g = GridOut(self.db.fs, f._id) self.assertEqual([b"Hello world,\n"], g.readlines(1)) @@ -542,14 +580,13 @@ def test_iterator(self): self.assertEqual([b"hello world"], list(g)) def test_read_unaligned_buffer_size(self): - in_data = (b"This is a text that doesn't " - b"quite fit in a single 16-byte chunk.") + in_data = b"This is a text that doesn't " b"quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() g = GridOut(self.db.fs, f._id) - out_data = b'' + out_data = b"" while 1: s = g.read(13) if not s: @@ -559,7 +596,7 @@ def test_read_unaligned_buffer_size(self): self.assertEqual(in_data, out_data) def test_readchunk(self): - in_data = b'a' * 10 + in_data = b"a" * 10 f = GridIn(self.db.fs, chunkSize=3) f.write(in_data) f.close() @@ -639,13 +676,12 @@ def test_context_manager(self): self.assertEqual(contents, outfile.read()) def test_prechunked_string(self): - def write_me(s, chunk_size): buf = BytesIO(s) infile = GridIn(self.db.fs) while True: to_write = buf.read(chunk_size) - if to_write == b'': + if to_write == b"": break infile.write(to_write) infile.close() @@ -655,7 +691,7 @@ def write_me(s, chunk_size): data = outfile.read() self.assertEqual(s, data) - s = b'x' * DEFAULT_CHUNK_SIZE * 4 + s = b"x" * DEFAULT_CHUNK_SIZE * 4 # Test with default chunk size write_me(s, DEFAULT_CHUNK_SIZE) # Multiple @@ -667,7 +703,7 @@ def test_grid_out_lazy_connect(self): fs = self.db.fs outfile = GridOut(fs, file_id=-1) self.assertRaises(NoFile, outfile.read) - self.assertRaises(NoFile, getattr, outfile, 'filename') + self.assertRaises(NoFile, getattr, outfile, "filename") infile = GridIn(fs, filename=1) infile.close() @@ -680,11 +716,10 @@ def test_grid_out_lazy_connect(self): outfile.readchunk() def test_grid_in_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=10) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) - self.assertRaises(ServerSelectionTimeoutError, infile.write, b'data') + self.assertRaises(ServerSelectionTimeoutError, infile.write, b"data") self.assertRaises(ServerSelectionTimeoutError, infile.close) def test_unacknowledged(self): @@ -696,7 +731,7 @@ def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. # Use 102 batches to cause a single getMore. chunk_size = 1024 - data = b'd' * (102 * chunk_size) + data = b"d" * (102 * chunk_size) listener = EventListener() client = rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test @@ -711,7 +746,8 @@ def test_survive_cursor_not_found(self): # readchunk(). client._close_cursor_now( outfile._GridOut__chunk_iter._cursor.cursor_id, - _CursorAddress(client.address, db.fs.chunks.full_name)) + _CursorAddress(client.address, db.fs.chunks.full_name), + ) # Read the rest of the file without error. self.assertEqual(len(outfile.read()), len(data) - chunk_size) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 3d8a7d8f6b..ec88dcd488 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -21,33 +21,28 @@ import sys import threading import time - from io import BytesIO sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, one, rs_client, rs_or_single_client, single_client + +import gridfs from bson.binary import Binary +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file import GridOutCursor from pymongo.database import Database +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient -from pymongo.errors import (ConfigurationError, - NotPrimaryError, - ServerSelectionTimeoutError) from pymongo.read_preferences import ReadPreference -import gridfs -from gridfs.errors import CorruptGridFile, FileExists, NoFile -from gridfs.grid_file import GridOutCursor -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (joinall, - one, - rs_client, - rs_or_single_client, - single_client) class JustWrite(threading.Thread): - def __init__(self, fs, n): threading.Thread.__init__(self) self.fs = fs @@ -62,7 +57,6 @@ def run(self): class JustRead(threading.Thread): - def __init__(self, fs, n, results): threading.Thread.__init__(self) self.fs = fs @@ -101,8 +95,9 @@ def setUpClass(cls): cls.alt = gridfs.GridFS(cls.db, "alt") def setUp(self): - self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, - self.db.alt.files, self.db.alt.chunks) + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) def test_basic(self): oid = self.fs.put(b"hello world") @@ -146,8 +141,7 @@ def test_list(self): self.fs.put(b"foo", filename="test") self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.fs.list())) + self.assertEqual(set(["mike", "test", "hello world"]), set(self.fs.list())) def test_empty_file(self): oid = self.fs.put(b"") @@ -164,9 +158,8 @@ def test_empty_file(self): self.assertNotIn("md5", raw) def test_corrupt_chunk(self): - files_id = self.fs.put(b'foobar') - self.db.fs.chunks.update_one({'files_id': files_id}, - {'$set': {'data': Binary(b'foo', 0)}}) + files_id = self.fs.put(b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.get(files_id) self.assertRaises(CorruptGridFile, out.read) @@ -184,12 +177,18 @@ def test_put_ensures_index(self): files.drop() self.fs.put(b"junk") - self.assertTrue(any( - info.get('key') == [('files_id', 1), ('n', 1)] - for info in chunks.index_information().values())) - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in chunks.index_information().values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) def test_alt_collection(self): oid = self.alt.put(b"hello world") @@ -211,8 +210,7 @@ def test_alt_collection(self): self.alt.put(b"foo", filename="test") self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), - set(self.alt.list())) + self.assertEqual(set(["mike", "test", "hello world"]), set(self.alt.list())) def test_threaded_reads(self): self.fs.put(b"hello", _id="test") @@ -225,10 +223,7 @@ def test_threaded_reads(self): joinall(threads) - self.assertEqual( - 100 * [b'hello'], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): threads = [] @@ -242,10 +237,7 @@ def test_threaded_writes(self): self.assertEqual(f.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.count_documents({'filename': 'test'}) - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): one = self.fs.put(b"foo", filename="test") @@ -316,30 +308,25 @@ def test_get_version_with_metadata(self): three = self.fs.put(b"baz", filename="test", author="author2") self.assertEqual( - b"foo", - self.fs.get_version( - filename="test", author="author1", version=-2).read()) - self.assertEqual( - b"bar", self.fs.get_version( - filename="test", author="author1", version=-1).read()) - self.assertEqual( - b"foo", self.fs.get_version( - filename="test", author="author1", version=0).read()) + b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read() + ) self.assertEqual( - b"bar", self.fs.get_version( - filename="test", author="author1", version=1).read()) + b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read() + ) self.assertEqual( - b"baz", self.fs.get_version( - filename="test", author="author2", version=0).read()) + b"foo", self.fs.get_version(filename="test", author="author1", version=0).read() + ) self.assertEqual( - b"baz", self.fs.get_version(filename="test", version=-1).read()) + b"bar", self.fs.get_version(filename="test", author="author1", version=1).read() + ) self.assertEqual( - b"baz", self.fs.get_version(filename="test", version=2).read()) + b"baz", self.fs.get_version(filename="test", author="author2", version=0).read() + ) + self.assertEqual(b"baz", self.fs.get_version(filename="test", version=-1).read()) + self.assertEqual(b"baz", self.fs.get_version(filename="test", version=2).read()) - self.assertRaises( - NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises( - NoFile, self.fs.get_version, filename="test", author="author1", version=2) + self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") + self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) self.fs.delete(one) self.fs.delete(two) @@ -359,7 +346,7 @@ def test_file_exists(self): one.close() two = self.fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b'x' * 262146) + self.assertRaises(FileExists, two.write, b"x" * 262146) def test_exists(self): oid = self.fs.put(b"hello") @@ -414,8 +401,7 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(f)) def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=10) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db gfs = gridfs.GridFS(db) self.assertRaises(ServerSelectionTimeoutError, gfs.list) @@ -435,8 +421,7 @@ def test_gridfs_find(self): files = self.db.fs.files self.assertEqual(3, files.count_documents({"filename": "two"})) self.assertEqual(4, files.count_documents({})) - cursor = self.fs.find( - no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) gout = next(cursor) self.assertEqual(b"test1", gout.read()) cursor.rewind() @@ -459,35 +444,34 @@ def test_delete_not_initialized(self): def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) - id1 = self.fs.put(b'test1', filename='file1') + id1 = self.fs.put(b"test1", filename="file1") res = self.fs.find_one() assert res is not None - self.assertEqual(b'test1', res.read()) + self.assertEqual(b"test1", res.read()) - id2 = self.fs.put(b'test2', filename='file2', meta='data') + id2 = self.fs.put(b"test2", filename="file2", meta="data") res1 = self.fs.find_one(id1) assert res1 is not None - self.assertEqual(b'test1', res1.read()) + self.assertEqual(b"test1", res1.read()) res2 = self.fs.find_one(id2) assert res2 is not None - self.assertEqual(b'test2', res2.read()) + self.assertEqual(b"test2", res2.read()) - res3 = self.fs.find_one({'filename': 'file1'}) + res3 = self.fs.find_one({"filename": "file1"}) assert res3 is not None - self.assertEqual(b'test1', res3.read()) + self.assertEqual(b"test1", res3.read()) res4 = self.fs.find_one(id2) assert res4 is not None - self.assertEqual('data', res4.meta) + self.assertEqual("data", res4.meta) def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. - data = b'data' - self.fs.put(data, filename='f') - self.db.fs.files.update_one({'filename': 'f'}, - {'$set': {'chunkSize': 100.0}}) + data = b"data" + self.fs.put(data, filename="f") + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.get_version('f').read()) + self.assertEqual(data, self.fs.get_version("f").read()) def test_unacknowledged(self): # w=0 is prohibited. @@ -509,7 +493,6 @@ def test_md5(self): class TestGridfsReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): @@ -517,51 +500,47 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - client_context.client.drop_database('gfsreplica') + client_context.client.drop_database("gfsreplica") def test_gridfs_replica_set(self): - rsc = rs_client( - w=client_context.w, - read_preference=ReadPreference.SECONDARY) + rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - fs = gridfs.GridFS(rsc.gfsreplica, 'gfsreplicatest') + fs = gridfs.GridFS(rsc.gfsreplica, "gfsreplicatest") gin = fs.new_file() self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) - oid = fs.put(b'foo') + oid = fs.put(b"foo") content = fs.get(oid).read() - self.assertEqual(b'foo', content) + self.assertEqual(b"foo", content) def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) # Should detect it's connected to secondary and not attempt to # create index - fs = gridfs.GridFS(secondary_connection.gfsreplica, 'gfssecondarytest') + fs = gridfs.GridFS(secondary_connection.gfsreplica, "gfssecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, fs.put, b'foo') + self.assertRaises(NotPrimaryError, fs.put, b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) client = single_client( - secondary_host, - secondary_port, - read_preference=ReadPreference.SECONDARY, - connect=False) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - fs = gridfs.GridFS(client.gfsreplica, 'gfssecondarylazytest') + fs = gridfs.GridFS(client.gfsreplica, "gfssecondarylazytest") # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotPrimaryError, fs.put, 'data') + self.assertRaises(NotPrimaryError, fs.put, "data") if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 53f94991d3..8b0a9a3936 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -20,32 +20,26 @@ import itertools import threading import time - from io import BytesIO +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, one, rs_client, rs_or_single_client, single_client +import gridfs from bson.binary import Binary from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON -import gridfs -from gridfs.errors import NoFile, CorruptGridFile -from pymongo.errors import (ConfigurationError, - NotPrimaryError, - ServerSelectionTimeoutError) +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference -from test import (client_context, - unittest, - IntegrationTest) -from test.utils import (joinall, - one, - rs_client, - rs_or_single_client, - single_client) class JustWrite(threading.Thread): - def __init__(self, gfs, num): threading.Thread.__init__(self) self.gfs = gfs @@ -60,7 +54,6 @@ def run(self): class JustRead(threading.Thread): - def __init__(self, gfs, num, results): threading.Thread.__init__(self) self.gfs = gfs @@ -84,18 +77,16 @@ class TestGridfs(IntegrationTest): def setUpClass(cls): super(TestGridfs, cls).setUpClass() cls.fs = gridfs.GridFSBucket(cls.db) - cls.alt = gridfs.GridFSBucket( - cls.db, bucket_name="alt") + cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") def setUp(self): - self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, - self.db.alt.files, self.db.alt.chunks) + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) def test_basic(self): - oid = self.fs.upload_from_stream("test_filename", - b"hello world") - self.assertEqual(b"hello world", - self.fs.open_download_stream(oid).read()) + oid = self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) @@ -108,9 +99,7 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) gfs = gridfs.GridFSBucket(self.db) - oid = gfs.upload_from_stream("test_filename", - b"hello", - chunk_size_bytes=1) + oid = gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(5, self.db.fs.chunks.count_documents({})) gfs.delete(oid) @@ -118,8 +107,7 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.chunks.count_documents({})) def test_empty_file(self): - oid = self.fs.upload_from_stream("test_filename", - b"") + oid = self.fs.upload_from_stream("test_filename", b"") self.assertEqual(b"", self.fs.open_download_stream(oid).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -133,10 +121,8 @@ def test_empty_file(self): self.assertNotIn("md5", raw) def test_corrupt_chunk(self): - files_id = self.fs.upload_from_stream("test_filename", - b'foobar') - self.db.fs.chunks.update_one({'files_id': files_id}, - {'$set': {'data': Binary(b'foo', 0)}}) + files_id = self.fs.upload_from_stream("test_filename", b"foobar") + self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.open_download_stream(files_id) self.assertRaises(CorruptGridFile, out.read) @@ -154,37 +140,45 @@ def test_upload_ensures_index(self): files.drop() self.fs.upload_from_stream("filename", b"junk") - self.assertTrue(any( - info.get('key') == [('files_id', 1), ('n', 1)] - for info in chunks.index_information().values())) - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in chunks.index_information().values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) def test_ensure_index_shell_compat(self): files = self.db.fs.files - for i, j in itertools.combinations_with_replacement( - [1, 1.0, Int64(1)], 2): + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): # Create the index with different numeric types (as might be done # from the mongo shell). - shell_index = [('filename', i), ('uploadDate', j)] - self.db.command('createIndexes', files.name, - indexes=[{'key': SON(shell_index), - 'name': 'filename_1.0_uploadDate_1.0'}]) + shell_index = [("filename", i), ("uploadDate", j)] + self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) # No error. self.fs.upload_from_stream("filename", b"data") - self.assertTrue(any( - info.get('key') == [('filename', 1), ('uploadDate', 1)] - for info in files.index_information().values())) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in files.index_information().values() + ) + ) files.drop() def test_alt_collection(self): - oid = self.alt.upload_from_stream("test_filename", - b"hello world") - self.assertEqual(b"hello world", - self.alt.open_download_stream(oid).read()) + oid = self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", self.alt.open_download_stream(oid).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) @@ -194,18 +188,17 @@ def test_alt_collection(self): self.assertEqual(0, self.db.alt.chunks.count_documents({})) self.assertRaises(NoFile, self.alt.open_download_stream, "foo") - self.alt.upload_from_stream("foo", - b"hello world") - self.assertEqual(b"hello world", - self.alt.open_download_stream_by_name("foo").read()) + self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual(b"hello world", self.alt.open_download_stream_by_name("foo").read()) self.alt.upload_from_stream("mike", b"") self.alt.upload_from_stream("test", b"foo") self.alt.upload_from_stream("hello world", b"") - self.assertEqual(set(["mike", "test", "hello world", "foo"]), - set(k["filename"] for k in list( - self.db.alt.files.find()))) + self.assertEqual( + set(["mike", "test", "hello world", "foo"]), + set(k["filename"] for k in list(self.db.alt.files.find())), + ) def test_threaded_reads(self): self.fs.upload_from_stream("test", b"hello") @@ -218,10 +211,7 @@ def test_threaded_reads(self): joinall(threads) - self.assertEqual( - 100 * [b'hello'], - results - ) + self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): threads = [] @@ -235,10 +225,7 @@ def test_threaded_writes(self): self.assertEqual(fstr.read(), b"hello") # Should have created 100 versions of 'test' file - self.assertEqual( - 100, - self.db.fs.files.count_documents({'filename': 'test'}) - ) + self.assertEqual(100, self.db.fs.files.count_documents({"filename": "test"})) def test_get_last_version(self): one = self.fs.upload_from_stream("test", b"foo") @@ -250,17 +237,13 @@ def test_get_last_version(self): two = two._id three = self.fs.upload_from_stream("test", b"baz") - self.assertEqual(b"baz", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(three) - self.assertEqual(b"bar", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(two) - self.assertEqual(b"foo", - self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test").read()) self.fs.delete(one) - self.assertRaises(NoFile, - self.fs.open_download_stream_by_name, "test") + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test") def test_get_version(self): self.fs.upload_from_stream("test", b"foo") @@ -270,56 +253,41 @@ def test_get_version(self): self.fs.upload_from_stream("test", b"baz") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name( - "test", revision=0).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name( - "test", revision=1).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name( - "test", revision=2).read()) - - self.assertEqual(b"baz", self.fs.open_download_stream_by_name( - "test", revision=-1).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name( - "test", revision=-2).read()) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name( - "test", revision=-3).read()) - - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "test", revision=3) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "test", revision=-4) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=0).read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=1).read()) + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=2).read()) + + self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=-1).read()) + self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=-2).read()) + self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=-3).read()) + + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=3) + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=-4) def test_upload_from_stream(self): - oid = self.fs.upload_from_stream("test_file", - BytesIO(b"hello world"), - chunk_size_bytes=1) + oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", - self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) def test_upload_from_stream_with_id(self): oid = ObjectId() - self.fs.upload_from_stream_with_id(oid, - "test_file_custom_id", - BytesIO(b"custom id"), - chunk_size_bytes=1) - self.assertEqual(b"custom id", - self.fs.open_download_stream(oid).read()) + self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) def test_open_upload_stream(self): gin = self.fs.open_upload_stream("from_stream") gin.write(b"from stream") gin.close() - self.assertEqual(b"from stream", - self.fs.open_download_stream(gin._id).read()) + self.assertEqual(b"from stream", self.fs.open_download_stream(gin._id).read()) def test_open_upload_stream_with_id(self): oid = ObjectId() gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") gin.write(b"from stream with custom id") gin.close() - self.assertEqual(b"from stream with custom id", - self.fs.open_download_stream(oid).read()) + self.assertEqual(b"from stream with custom id", self.fs.open_download_stream(oid).read()) def test_missing_length_iter(self): # Test fix that guards against PHP-237 @@ -338,16 +306,15 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(fstr)) def test_gridfs_lazy_connect(self): - client = MongoClient('badhost', connect=False, - serverSelectionTimeoutMS=0) + client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=0) cdb = client.db gfs = gridfs.GridFSBucket(cdb) self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0) gfs = gridfs.GridFSBucket(cdb) self.assertRaises( - ServerSelectionTimeoutError, - gfs.upload_from_stream, "test", b"") # Still no connection. + ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b"" + ) # Still no connection. def test_gridfs_find(self): self.fs.upload_from_stream("two", b"test2") @@ -361,8 +328,8 @@ def test_gridfs_find(self): self.assertEqual(3, files.count_documents({"filename": "two"})) self.assertEqual(4, files.count_documents({})) cursor = self.fs.find( - {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], - skip=1, limit=2) + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) gout = next(cursor) self.assertEqual(b"test1", gout.read()) cursor.rewind() @@ -376,13 +343,11 @@ def test_gridfs_find(self): def test_grid_in_non_int_chunksize(self): # Lua, and perhaps other buggy GridFS clients, store size as a float. - data = b'data' - self.fs.upload_from_stream('f', data) - self.db.fs.files.update_one({'filename': 'f'}, - {'$set': {'chunkSize': 100.0}}) + data = b"data" + self.fs.upload_from_stream("f", data) + self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, - self.fs.open_download_stream_by_name('f').read()) + self.assertEqual(data, self.fs.open_download_stream_by_name("f").read()) def test_unacknowledged(self): # w=0 is prohibited. @@ -390,29 +355,23 @@ def test_unacknowledged(self): gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test) def test_rename(self): - _id = self.fs.upload_from_stream("first_name", b'testing') - self.assertEqual(b'testing', self.fs.open_download_stream_by_name( - "first_name").read()) + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", self.fs.open_download_stream_by_name("first_name").read()) self.fs.rename(_id, "second_name") - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, - "first_name") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name( - "second_name").read()) + self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") + self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) def test_abort(self): - gin = self.fs.open_upload_stream("test_filename", - chunk_size_bytes=5) + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) gin.write(b"test1") gin.write(b"test2") gin.write(b"test3") - self.assertEqual(3, self.db.fs.chunks.count_documents( - {"files_id": gin._id})) + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) gin.abort() self.assertTrue(gin.closed) self.assertRaises(ValueError, gin.write, b"test4") - self.assertEqual(0, self.db.fs.chunks.count_documents( - {"files_id": gin._id})) + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) def test_download_to_stream(self): file1 = BytesIO(b"hello world") @@ -429,9 +388,7 @@ def test_download_to_stream(self): self.db.drop_collection("fs.files") self.db.drop_collection("fs.chunks") file1.seek(0) - oid = self.fs.upload_from_stream("many_chunks", - file1, - chunk_size_bytes=1) + oid = self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) file2 = BytesIO() self.fs.download_to_stream(oid, file2) @@ -482,7 +439,6 @@ def test_md5(self): class TestGridfsBucketReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): @@ -490,52 +446,43 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - client_context.client.drop_database('gfsbucketreplica') + client_context.client.drop_database("gfsbucketreplica") def test_gridfs_replica_set(self): - rsc = rs_client( - w=client_context.w, - read_preference=ReadPreference.SECONDARY) + rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) - gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, 'gfsbucketreplicatest') - oid = gfs.upload_from_stream("test_filename", b'foo') + gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = gfs.upload_from_stream("test_filename", b"foo") content = gfs.open_download_stream(oid).read() - self.assertEqual(b'foo', content) + self.assertEqual(b"foo", content) def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) secondary_connection = single_client( - secondary_host, secondary_port, - read_preference=ReadPreference.SECONDARY) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) # Should detect it's connected to secondary and not attempt to # create index - gfs = gridfs.GridFSBucket( - secondary_connection.gfsbucketreplica, 'gfsbucketsecondarytest') + gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, - "test_filename", b'foo') + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) client = single_client( - secondary_host, - secondary_port, - read_preference=ReadPreference.SECONDARY, - connect=False) + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) # Still no connection. - gfs = gridfs.GridFSBucket( - client.gfsbucketreplica, 'gfsbucketsecondarylazytest') + gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, gfs.open_download_stream_by_name, - "test_filename") - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, - "test_filename", b'data') + self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") + self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"data") if __name__ == "__main__": diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 057a7b4841..3c6f6b76c4 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -19,39 +19,35 @@ import os import re import sys - from json import loads sys.path[0:0] = [""] +from test import IntegrationTest, unittest + +import gridfs from bson import Binary from bson.int64 import Int64 from bson.json_util import object_hook -import gridfs -from gridfs.errors import NoFile, CorruptGridFile -from test import (unittest, - IntegrationTest) +from gridfs.errors import CorruptGridFile, NoFile # Commands. -_COMMANDS = {"delete": lambda coll, doc: [coll.delete_many(d["q"]) - for d in doc['deletes']], - "insert": lambda coll, doc: coll.insert_many(doc['documents']), - "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) - for u in doc['updates']] - } +_COMMANDS = { + "delete": lambda coll, doc: [coll.delete_many(d["q"]) for d in doc["deletes"]], + "insert": lambda coll, doc: coll.insert_many(doc["documents"]), + "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) for u in doc["updates"]], +} # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'gridfs') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. Special case for _id. if camel == "id": return "file_id" - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() class TestAllScenarios(IntegrationTest): @@ -66,23 +62,25 @@ def setUpClass(cls): "upload": cls.fs.upload_from_stream, "download": cls.fs.open_download_stream, "delete": cls.fs.delete, - "download_by_name": cls.fs.open_download_stream_by_name} + "download_by_name": cls.fs.open_download_stream_by_name, + } def init_db(self, data, test): - self.cleanup_colls(self.db.fs.files, self.db.fs.chunks, - self.db.expected.files, self.db.expected.chunks) + self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.expected.files, self.db.expected.chunks + ) # Read in data. - if data['files']: - self.db.fs.files.insert_many(data['files']) - self.db.expected.files.insert_many(data['files']) - if data['chunks']: - self.db.fs.chunks.insert_many(data['chunks']) - self.db.expected.chunks.insert_many(data['chunks']) + if data["files"]: + self.db.fs.files.insert_many(data["files"]) + self.db.expected.files.insert_many(data["files"]) + if data["chunks"]: + self.db.fs.chunks.insert_many(data["chunks"]) + self.db.expected.chunks.insert_many(data["chunks"]) # Make initial modifications. if "arrange" in test: - for cmd in test['arrange'].get('data', []): + for cmd in test["arrange"].get("data", []): for key in cmd.keys(): if key in _COMMANDS: coll = self.db.get_collection(cmd[key]) @@ -90,11 +88,11 @@ def init_db(self, data, test): def init_expected_db(self, test, result): # Modify outcome DB. - for cmd in test['assert'].get('data', []): + for cmd in test["assert"].get("data", []): for key in cmd.keys(): if key in _COMMANDS: # Replace wildcards in inserts. - for doc in cmd.get('documents', []): + for doc in cmd.get("documents", []): keylist = doc.keys() for dockey in copy.deepcopy(list(keylist)): if "result" in str(doc[dockey]): @@ -107,8 +105,8 @@ def init_expected_db(self, test, result): coll = self.db.get_collection(cmd[key]) _COMMANDS[key](coll, cmd) - if test['assert'].get('result') == "&result": - test['assert']['result'] = result + if test["assert"].get("result") == "&result": + test["assert"]["result"] = result def sorted_list(self, coll, ignore_id): to_sort = [] @@ -129,30 +127,28 @@ def create_test(scenario_def): def run_scenario(self): # Run tests. - self.assertTrue(scenario_def['tests'], "tests cannot be empty") - for test in scenario_def['tests']: - self.init_db(scenario_def['data'], test) + self.assertTrue(scenario_def["tests"], "tests cannot be empty") + for test in scenario_def["tests"]: + self.init_db(scenario_def["data"], test) # Run GridFs Operation. - operation = self.str_to_cmd[test['act']['operation']] - args = test['act']['arguments'] + operation = self.str_to_cmd[test["act"]["operation"]] + args = test["act"]["arguments"] extra_opts = args.pop("options", {}) if "contentType" in extra_opts: - extra_opts["metadata"] = { - "contentType": extra_opts.pop("contentType")} + extra_opts["metadata"] = {"contentType": extra_opts.pop("contentType")} args.update(extra_opts) - converted_args = dict((camel_to_snake(c), v) - for c, v in args.items()) + converted_args = dict((camel_to_snake(c), v) for c, v in args.items()) - expect_error = test['assert'].get("error", False) + expect_error = test["assert"].get("error", False) result = None error = None try: result = operation(**converted_args) - if 'download' in test['act']['operation']: + if "download" in test["act"]["operation"]: result = Binary(result.read()) except Exception as exc: if not expect_error: @@ -162,47 +158,51 @@ def run_scenario(self): self.init_expected_db(test, result) # Asserts. - errors = {"FileNotFound": NoFile, - "ChunkIsMissing": CorruptGridFile, - "ExtraChunk": CorruptGridFile, - "ChunkIsWrongSize": CorruptGridFile, - "RevisionNotFound": NoFile} + errors = { + "FileNotFound": NoFile, + "ChunkIsMissing": CorruptGridFile, + "ExtraChunk": CorruptGridFile, + "ChunkIsWrongSize": CorruptGridFile, + "RevisionNotFound": NoFile, + } if expect_error: self.assertIsNotNone(error) - self.assertIsInstance(error, errors[test['assert']['error']], - test['description']) + self.assertIsInstance(error, errors[test["assert"]["error"]], test["description"]) else: self.assertIsNone(error) - if 'result' in test['assert']: - if test['assert']['result'] == 'void': - test['assert']['result'] = None - self.assertEqual(result, test['assert'].get('result')) + if "result" in test["assert"]: + if test["assert"]["result"] == "void": + test["assert"]["result"] = None + self.assertEqual(result, test["assert"].get("result")) - if 'data' in test['assert']: + if "data" in test["assert"]: # Create alphabetized list self.assertEqual( set(self.sorted_list(self.db.fs.chunks, True)), - set(self.sorted_list(self.db.expected.chunks, True))) + set(self.sorted_list(self.db.expected.chunks, True)), + ) self.assertEqual( set(self.sorted_list(self.db.fs.files, False)), - set(self.sorted_list(self.db.expected.files, False))) + set(self.sorted_list(self.db.expected.files, False)), + ) return run_scenario + def _object_hook(dct): - if 'length' in dct: - dct['length'] = Int64(dct['length']) + if "length" in dct: + dct["length"] = Int64(dct["length"]) return object_hook(dct) + def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = loads( - scenario_stream.read(), object_hook=_object_hook) + scenario_def = loads(scenario_stream.read(), object_hook=_object_hook) # Because object_hook is already defined by bson.json_util, # and everything is named 'data' @@ -210,7 +210,7 @@ def str2hex(jsn): for key, val in jsn.items(): if key in ("data", "source", "result"): if "$hex" in val: - jsn[key] = Binary(bytes.fromhex(val['$hex'])) + jsn[key] = Binary(bytes.fromhex(val["$hex"])) if isinstance(jsn[key], dict): str2hex(jsn[key]) if isinstance(jsn[key], list): @@ -221,8 +221,7 @@ def str2hex(jsn): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s' % ( - os.path.splitext(filename)[0]) + test_name = "test_%s" % (os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 6941e6bd84..cd4a875e9e 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -19,21 +19,21 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_knobs, unittest +from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until + from pymongo.errors import ConnectionFailure from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor -from test import unittest, client_knobs, IntegrationTest -from test.utils import (HeartbeatEventListener, MockPool, single_client, - wait_until) class TestHeartbeatMonitoring(IntegrationTest): - def create_mock_monitor(self, responses, uri, expected_results): listener = HeartbeatEventListener() - with client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1, - events_queue_frequency=0.1): + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + class MockMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if isinstance(responses[1], Exception): @@ -41,27 +41,21 @@ def _check_with_socket(self, *args, **kwargs): return Hello(responses[1]), 99 m = single_client( - h=uri, - event_listeners=(listener,), - _monitor_class=MockMonitor, - _pool_class=MockPool) + h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool + ) expected_len = len(expected_results) # Wait for *at least* expected_len number of results. The # monitor thread may run multiple times during the execution # of this test. - wait_until( - lambda: len(listener.events) >= expected_len, - "publish all events") + wait_until(lambda: len(listener.events) >= expected_len, "publish all events") try: # zip gives us len(expected_results) pairs. for expected, actual in zip(expected_results, listener.events): - self.assertEqual(expected, - actual.__class__.__name__) - self.assertEqual(actual.connection_id, - responses[0]) - if expected != 'ServerHeartbeatStartedEvent': + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": if isinstance(actual.reply, Hello): self.assertEqual(actual.duration, 99) self.assertEqual(actual.reply._doc, responses[1]) @@ -72,28 +66,25 @@ def _check_with_socket(self, *args, **kwargs): m.close() def test_standalone(self): - responses = (('a', 27017), - { - HelloCompat.LEGACY_CMD: True, - "maxWireVersion": 4, - "minWireVersion": 0, - "ok": 1 - }) + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) uri = "mongodb://a:27017" - expected_results = ['ServerHeartbeatStartedEvent', - 'ServerHeartbeatSucceededEvent'] + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] self.create_mock_monitor(responses, uri, expected_results) def test_standalone_error(self): - responses = (('a', 27017), - ConnectionFailure("SPECIAL MESSAGE")) + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) uri = "mongodb://a:27017" # _check_with_socket failing results in a second attempt. - expected_results = ['ServerHeartbeatStartedEvent', - 'ServerHeartbeatFailedEvent', - 'ServerHeartbeatStartedEvent', - 'ServerHeartbeatFailedEvent'] + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] self.create_mock_monitor(responses, uri, expected_results) diff --git a/test/test_json_util.py b/test/test_json_util.py index 16c7d96a2f..203542e822 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -19,21 +19,30 @@ import re import sys import uuid - from typing import Any, List, MutableMapping sys.path[0:0] = [""] -from bson import json_util, EPOCH_AWARE, EPOCH_NAIVE, SON -from bson.json_util import (DatetimeRepresentation, - JSONMode, - JSONOptions, - LEGACY_JSON_OPTIONS) -from bson.binary import (ALL_UUID_REPRESENTATIONS, Binary, MD5_SUBTYPE, - USER_DEFINED_SUBTYPE, UuidRepresentation, STANDARD) +from test import IntegrationTest, unittest + +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, json_util +from bson.binary import ( + ALL_UUID_REPRESENTATIONS, + MD5_SUBTYPE, + STANDARD, + USER_DEFINED_SUBTYPE, + Binary, + UuidRepresentation, +) from bson.code import Code from bson.dbref import DBRef from bson.int64 import Int64 +from bson.json_util import ( + LEGACY_JSON_OPTIONS, + DatetimeRepresentation, + JSONMode, + JSONOptions, +) from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId @@ -41,14 +50,12 @@ from bson.timestamp import Timestamp from bson.tz_util import FixedOffset, utc -from test import unittest, IntegrationTest - - STRICT_JSON_OPTIONS = JSONOptions( strict_number_long=True, datetime_representation=DatetimeRepresentation.ISO8601, strict_uuid=True, - json_mode=JSONMode.LEGACY) + json_mode=JSONMode.LEGACY, +) class TestJsonUtil(unittest.TestCase): @@ -63,15 +70,13 @@ def test_basic(self): def test_json_options_with_options(self): opts = JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG, - json_mode=JSONMode.LEGACY) - self.assertEqual( - opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts.datetime_representation, DatetimeRepresentation.NUMBERLONG) opts2 = opts.with_options( - datetime_representation=DatetimeRepresentation.ISO8601, - json_mode=JSONMode.LEGACY) - self.assertEqual( - opts2.datetime_representation, DatetimeRepresentation.ISO8601) + datetime_representation=DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY + ) + self.assertEqual(opts2.datetime_representation, DatetimeRepresentation.ISO8601) opts = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) self.assertEqual(opts.strict_number_long, True) @@ -79,16 +84,12 @@ def test_json_options_with_options(self): self.assertEqual(opts2.strict_number_long, False) opts = json_util.CANONICAL_JSON_OPTIONS - self.assertNotEqual( - opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) - opts2 = opts.with_options( - uuid_representation=UuidRepresentation.JAVA_LEGACY) - self.assertEqual( - opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertNotEqual(opts.uuid_representation, UuidRepresentation.JAVA_LEGACY) + opts2 = opts.with_options(uuid_representation=UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY) self.assertEqual(opts2.document_class, dict) opts3 = opts2.with_options(document_class=SON) - self.assertEqual( - opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) + self.assertEqual(opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY) self.assertEqual(opts3.document_class, SON) def test_objectid(self): @@ -102,41 +103,42 @@ def test_dbref(self): # Check order. self.assertEqual( '{"$ref": "collection", "$id": 1, "$db": "db"}', - json_util.dumps(DBRef('collection', 1, 'db'))) + json_util.dumps(DBRef("collection", 1, "db")), + ) def test_datetime(self): - tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options( - tz_aware=True) + tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options(tz_aware=True) # only millis, not micros - self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, - 191000, utc)}, json_options=tz_aware_opts) - self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, - 49, 45, 191000)}) - - for jsn in ['{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', - '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00"}}', - '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', - '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', - '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}' - ]: - self.assertEqual(EPOCH_AWARE, json_util.loads( - jsn, json_options=tz_aware_opts)["dt"]) + self.round_trip( + {"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000, utc)}, + json_options=tz_aware_opts, + ) + self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000)}) + + for jsn in [ + '{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', + '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}', + '{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}', + ]: + self.assertEqual(EPOCH_AWARE, json_util.loads(jsn, json_options=tz_aware_opts)["dt"]) self.assertEqual(EPOCH_NAIVE, json_util.loads(jsn)["dt"]) dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc) @@ -149,84 +151,99 @@ def test_datetime(self): pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)} post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)} self.assertEqual( - '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch)) + '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch) + ) self.assertEqual( - '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch)) + '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch) + ) self.assertEqual( '{"dt": {"$date": -62135593138990}}', - json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": 63075661010}}', - json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS), + ) number_long_options = JSONOptions( - datetime_representation=DatetimeRepresentation.NUMBERLONG, - json_mode=JSONMode.LEGACY) + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "63075661010"}}}', - json_util.dumps(post_epoch, json_options=number_long_options)) + json_util.dumps(post_epoch, json_options=number_long_options), + ) self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch, json_options=number_long_options)) + json_util.dumps(pre_epoch, json_options=number_long_options), + ) # ISO8601 mode assumes naive datetimes are UTC pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)} - post_epoch_naive = { - "dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} + post_epoch_naive = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)} self.assertEqual( '{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', - json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) self.assertEqual( '{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', - json_util.dumps(post_epoch_naive, - json_options=STRICT_JSON_OPTIONS)) + json_util.dumps(post_epoch_naive, json_options=STRICT_JSON_OPTIONS), + ) # Test tz_aware and tzinfo options self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( - '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=tz_aware_opts)["dt"]) + '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', json_options=tz_aware_opts + )["dt"], + ) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=JSONOptions(tz_aware=True, - tzinfo=utc))["dt"]) + json_options=JSONOptions(tz_aware=True, tzinfo=utc), + )["dt"], + ) self.assertEqual( datetime.datetime(1972, 1, 1, 1, 1, 1, 10000), json_util.loads( '{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', - json_options=JSONOptions(tz_aware=False))["dt"]) - self.round_trip(pre_epoch_naive, json_options=JSONOptions( - tz_aware=False)) + json_options=JSONOptions(tz_aware=False), + )["dt"], + ) + self.round_trip(pre_epoch_naive, json_options=JSONOptions(tz_aware=False)) # Test a non-utc timezone - pacific = FixedOffset(-8 * 60, 'US/Pacific') - aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, - pacific)} + pacific = FixedOffset(-8 * 60, "US/Pacific") + aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, pacific)} self.assertEqual( '{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}', - json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS)) - self.round_trip(aware_datetime, json_options=JSONOptions( - json_mode=JSONMode.LEGACY, - tz_aware=True, tzinfo=pacific)) - self.round_trip(aware_datetime, json_options=JSONOptions( - datetime_representation=DatetimeRepresentation.ISO8601, - json_mode=JSONMode.LEGACY, - tz_aware=True, tzinfo=pacific)) + json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions(json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific), + ) + self.round_trip( + aware_datetime, + json_options=JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + json_mode=JSONMode.LEGACY, + tz_aware=True, + tzinfo=pacific, + ), + ) def test_regex_object_hook(self): # Extended JSON format regular expression. - pat = 'a*b' + pat = "a*b" json_re = '{"$regex": "%s", "$options": "u"}' % pat loaded = json_util.object_hook(json.loads(json_re)) self.assertTrue(isinstance(loaded, Regex)) @@ -234,9 +251,7 @@ def test_regex_object_hook(self): self.assertEqual(re.U, loaded.flags) def test_regex(self): - for regex_instance in ( - re.compile("a*b", re.IGNORECASE), - Regex("a*b", re.IGNORECASE)): + for regex_instance in (re.compile("a*b", re.IGNORECASE), Regex("a*b", re.IGNORECASE)): res = self.round_tripped({"r": regex_instance})["r"] self.assertEqual("a*b", res.pattern) @@ -244,33 +259,34 @@ def test_regex(self): self.assertEqual("a*b", res.pattern) self.assertEqual(re.IGNORECASE, res.flags) - unicode_options = re.I|re.M|re.S|re.U|re.X + unicode_options = re.I | re.M | re.S | re.U | re.X regex = re.compile("a*b", unicode_options) res = self.round_tripped({"r": regex})["r"] self.assertEqual(unicode_options, res.flags) # Some tools may not add $options if no flags are set. - res = json_util.loads('{"r": {"$regex": "a*b"}}')['r'] + res = json_util.loads('{"r": {"$regex": "a*b"}}')["r"] self.assertEqual(0, res.flags) self.assertEqual( - Regex('.*', 'ilm'), - json_util.loads( - '{"r": {"$regex": ".*", "$options": "ilm"}}')['r']) + Regex(".*", "ilm"), json_util.loads('{"r": {"$regex": ".*", "$options": "ilm"}}')["r"] + ) # Check order. self.assertEqual( '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', - json_util.dumps(Regex('.*', re.M | re.X))) + json_util.dumps(Regex(".*", re.M | re.X)), + ) self.assertEqual( '{"$regularExpression": {"pattern": ".*", "options": "mx"}}', - json_util.dumps(re.compile(b'.*', re.M | re.X))) + json_util.dumps(re.compile(b".*", re.M | re.X)), + ) self.assertEqual( '{"$regex": ".*", "$options": "mx"}', - json_util.dumps(Regex('.*', re.M | re.X), - json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(Regex(".*", re.M | re.X), json_options=LEGACY_JSON_OPTIONS), + ) def test_regex_validation(self): non_str_types = [10, {}, []] @@ -297,87 +313,94 @@ def test_timestamp(self): def test_uuid_default(self): # Cannot directly encode native UUIDs with the default # uuid_representation. - doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} - with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'): + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} + with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): json_util.dumps(doc) legacy_jsn = '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}' - expected = {'uuid': Binary( - b'\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y', 4)} + expected = {"uuid": Binary(b"\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y", 4)} self.assertEqual(json_util.loads(legacy_jsn), expected) def test_uuid(self): - doc = {'uuid': uuid.UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')} + doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")} uuid_legacy_opts = LEGACY_JSON_OPTIONS.with_options( - uuid_representation=UuidRepresentation.PYTHON_LEGACY) + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ) self.round_trip(doc, json_options=uuid_legacy_opts) self.assertEqual( '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}', - json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS)) + json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS), + ) self.assertEqual( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( - doc, json_options=STRICT_JSON_OPTIONS.with_options( - uuid_representation=UuidRepresentation.PYTHON_LEGACY))) - self.assertEqual( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + doc, + json_options=STRICT_JSON_OPTIONS.with_options( + uuid_representation=UuidRepresentation.PYTHON_LEGACY + ), + ), + ) + self.assertEqual( + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_util.dumps( - doc, json_options=JSONOptions( - strict_uuid=True, json_mode=JSONMode.LEGACY, - uuid_representation=STANDARD))) - self.assertEqual( - doc, json_util.loads( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', - json_options=uuid_legacy_opts)) - for uuid_representation in (set(ALL_UUID_REPRESENTATIONS) - - {UuidRepresentation.UNSPECIFIED}): + doc, + json_options=JSONOptions( + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=STANDARD + ), + ), + ) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + json_options=uuid_legacy_opts, + ), + ) + for uuid_representation in set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}: options = JSONOptions( - strict_uuid=True, json_mode=JSONMode.LEGACY, - uuid_representation=uuid_representation) + strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=uuid_representation + ) self.round_trip(doc, json_options=options) # Ignore UUID representation when decoding BSON binary subtype 4. - self.assertEqual(doc, json_util.loads( - '{"uuid": ' - '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', - json_options=options)) + self.assertEqual( + doc, + json_util.loads( + '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + json_options=options, + ), + ) def test_uuid_uuid_rep_unspecified(self): _uuid = uuid.uuid4() options = JSONOptions( strict_uuid=True, json_mode=JSONMode.LEGACY, - uuid_representation=UuidRepresentation.UNSPECIFIED) + uuid_representation=UuidRepresentation.UNSPECIFIED, + ) # Cannot directly encode native UUIDs with UNSPECIFIED. - doc = {'uuid': _uuid} + doc = {"uuid": _uuid} with self.assertRaises(ValueError): json_util.dumps(doc, json_options=options) # All UUID subtypes are decoded as Binary with UNSPECIFIED. # subtype 3 - doc = {'uuid': Binary(_uuid.bytes, subtype=3)} + doc = {"uuid": Binary(_uuid.bytes, subtype=3)} ext_json_str = json_util.dumps(doc) - self.assertEqual( - doc, json_util.loads(ext_json_str, json_options=options)) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) # subtype 4 - doc = {'uuid': Binary(_uuid.bytes, subtype=4)} + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} ext_json_str = json_util.dumps(doc) - self.assertEqual( - doc, json_util.loads(ext_json_str, json_options=options)) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) # $uuid-encoded fields - doc = {'uuid': Binary(_uuid.bytes, subtype=4)} - ext_json_str = json_util.dumps({'uuid': _uuid}, - json_options=LEGACY_JSON_OPTIONS) - self.assertEqual( - doc, json_util.loads(ext_json_str, json_options=options)) + doc = {"uuid": Binary(_uuid.bytes, subtype=4)} + ext_json_str = json_util.dumps({"uuid": _uuid}, json_options=LEGACY_JSON_OPTIONS) + self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options)) def test_binary(self): bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"} md5_type_dict = { - "md5": Binary(b' n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac', - MD5_SUBTYPE)} + "md5": Binary(b" n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac", MD5_SUBTYPE) + } custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)} self.round_trip(bin_type_dict) @@ -385,43 +408,47 @@ def test_binary(self): self.round_trip(custom_type_dict) # Binary with subtype 0 is decoded into bytes in Python 3. - bin = json_util.loads( - '{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')['bin'] + bin = json_util.loads('{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')["bin"] self.assertEqual(type(bin), bytes) # PYTHON-443 ensure old type formats are supported - json_bin_dump = json_util.dumps(bin_type_dict, - json_options=LEGACY_JSON_OPTIONS) + json_bin_dump = json_util.dumps(bin_type_dict, json_options=LEGACY_JSON_OPTIONS) self.assertIn('"$type": "00"', json_bin_dump) - self.assertEqual(bin_type_dict, - json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}')) - json_bin_dump = json_util.dumps(md5_type_dict, - json_options=LEGACY_JSON_OPTIONS) + self.assertEqual( + bin_type_dict, json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}') + ) + json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS) # Check order. self.assertEqual( - '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' - + ' "$type": "05"}}', - json_bin_dump) + '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' + ' "$type": "05"}}', json_bin_dump + ) - self.assertEqual(md5_type_dict, - json_util.loads('{"md5": {"$type": 5, "$binary":' - ' "IG43GK8JL9HRL4DK53HMrA=="}}')) + self.assertEqual( + md5_type_dict, + json_util.loads('{"md5": {"$type": 5, "$binary":' ' "IG43GK8JL9HRL4DK53HMrA=="}}'), + ) - json_bin_dump = json_util.dumps(custom_type_dict, - json_options=LEGACY_JSON_OPTIONS) + json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS) self.assertIn('"$type": "80"', json_bin_dump) - self.assertEqual(custom_type_dict, - json_util.loads('{"custom": {"$type": 128, "$binary":' - ' "aGVsbG8="}}')) + self.assertEqual( + custom_type_dict, + json_util.loads('{"custom": {"$type": 128, "$binary":' ' "aGVsbG8="}}'), + ) # Handle mongoexport where subtype >= 128 - self.assertEqual(128, - json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 128, + json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' ' "aGVsbG8="}}')[ + "custom" + ].subtype, + ) - self.assertEqual(255, - json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' - ' "aGVsbG8="}}')['custom'].subtype) + self.assertEqual( + 255, + json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' ' "aGVsbG8="}}')[ + "custom" + ].subtype, + ) def test_code(self): self.round_trip({"code": Code("function x() { return 1; }")}) @@ -433,34 +460,30 @@ def test_code(self): # Check order. self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res) - no_scope = Code('function() {}') - self.assertEqual( - '{"$code": "function() {}"}', json_util.dumps(no_scope)) + no_scope = Code("function() {}") + self.assertEqual('{"$code": "function() {}"}', json_util.dumps(no_scope)) def test_undefined(self): jsn = '{"name": {"$undefined": true}}' - self.assertIsNone(json_util.loads(jsn)['name']) + self.assertIsNone(json_util.loads(jsn)["name"]) def test_numberlong(self): jsn = '{"weight": {"$numberLong": "65535"}}' - self.assertEqual(json_util.loads(jsn)['weight'], - Int64(65535)) - self.assertEqual(json_util.dumps({"weight": Int64(65535)}), - '{"weight": 65535}') - json_options = JSONOptions(strict_number_long=True, - json_mode=JSONMode.LEGACY) - self.assertEqual(json_util.dumps({"weight": Int64(65535)}, - json_options=json_options), - jsn) + self.assertEqual(json_util.loads(jsn)["weight"], Int64(65535)) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') + json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) + self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) def test_loads_document_class(self): # document_class dict should always work - self.assertEqual({"foo": "bar"}, json_util.loads( - '{"foo": "bar"}', - json_options=JSONOptions(document_class=dict))) - self.assertEqual(SON([("foo", "bar"), ("b", 1)]), json_util.loads( - '{"foo": "bar", "b": 1}', - json_options=JSONOptions(document_class=SON))) + self.assertEqual( + {"foo": "bar"}, + json_util.loads('{"foo": "bar"}', json_options=JSONOptions(document_class=dict)), + ) + self.assertEqual( + SON([("foo", "bar"), ("b", 1)]), + json_util.loads('{"foo": "bar", "b": 1}', json_options=JSONOptions(document_class=SON)), + ) class TestJsonUtilRoundtrip(IntegrationTest): @@ -469,12 +492,11 @@ def test_cursor(self): db.drop_collection("test") docs: List[MutableMapping[str, Any]] = [ - {'foo': [1, 2]}, - {'bar': {'hello': 'world'}}, - {'code': Code("function x() { return 1; }")}, - {'bin': Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, - {'dbref': {'_ref': DBRef('simple', - ObjectId('509b8db456c02c5ab7e63c34'))}} + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, ] db.test.insert_many(docs) @@ -482,5 +504,6 @@ def test_cursor(self): for doc in docs: self.assertTrue(doc in reloaded_docs) + if __name__ == "__main__": unittest.main() diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 247072c7bd..547cf327d3 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -21,16 +21,12 @@ sys.path[0:0] = [""] -from test import unittest, IntegrationTest, client_context -from test.utils import (ExceptionCatchingThread, - get_pool, - rs_client, - wait_until) +from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes +from test.utils import ExceptionCatchingThread, get_pool, rs_client, wait_until # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'load_balancer') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "load_balancer") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) @@ -45,7 +41,7 @@ def test_connections_are_only_returned_once(self): nconns = len(pool.sockets) self.db.test.find_one({}) self.assertEqual(len(pool.sockets), nconns) - list(self.db.test.aggregate([{'$limit': 1}])) + list(self.db.test.aggregate([{"$limit": 1}])) self.assertEqual(len(pool.sockets), nconns) @client_context.require_load_balancer @@ -68,6 +64,7 @@ def create_resource(coll): cursor = coll.find({}, batch_size=3) next(cursor) return cursor + self._test_no_gc_deadlock(create_resource) @client_context.require_failCommand_fail_point @@ -76,6 +73,7 @@ def create_resource(coll): cursor = coll.aggregate([], batchSize=3) next(cursor) return cursor + self._test_no_gc_deadlock(create_resource) def _test_no_gc_deadlock(self, create_resource): @@ -87,15 +85,11 @@ def _test_no_gc_deadlock(self, create_resource): self.assertEqual(pool.active_sockets, 0) # Cause the initial find attempt to fail to induce a reference cycle. args = { - "mode": { - "times": 1 - }, + "mode": {"times": 1}, "data": { - "failCommands": [ - "find", "aggregate" - ], - "closeConnection": True, - } + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, } with self.fail_point(args): resource = create_resource(coll) @@ -104,7 +98,7 @@ def _test_no_gc_deadlock(self, create_resource): thread = PoolLocker(pool) thread.start() - self.assertTrue(thread.locked.wait(5), 'timed out') + self.assertTrue(thread.locked.wait(5), "timed out") # Garbage collect the resource while the pool is locked to ensure we # don't deadlock. del resource @@ -116,7 +110,7 @@ def _test_no_gc_deadlock(self, create_resource): self.assertFalse(thread.is_alive()) self.assertIsNone(thread.exc) - wait_until(lambda: pool.active_sockets == 0, 'return socket') + wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. coll.delete_many({}) @@ -133,7 +127,7 @@ def test_session_gc(self): thread = PoolLocker(pool) thread.start() - self.assertTrue(thread.locked.wait(5), 'timed out') + self.assertTrue(thread.locked.wait(5), "timed out") # Garbage collect the session while the pool is locked to ensure we # don't deadlock. del session @@ -145,7 +139,7 @@ def test_session_gc(self): self.assertFalse(thread.is_alive()) self.assertIsNone(thread.exc) - wait_until(lambda: pool.active_sockets == 0, 'return socket') + wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. client[self.db.name].test.delete_many({}) @@ -164,8 +158,7 @@ def lock_pool(self): # Wait for the unlock flag. unlock_pool = self.unlock.wait(10) if not unlock_pool: - raise Exception('timed out waiting for unlock signal:' - ' deadlock?') + raise Exception("timed out waiting for unlock signal:" " deadlock?") if __name__ == "__main__": diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 5c484fe334..4c17701133 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -21,18 +21,16 @@ sys.path[0:0] = [""] -from pymongo import MongoClient -from pymongo.errors import ConfigurationError -from pymongo.server_selectors import writable_server_selector - from test import client_context, unittest from test.utils import rs_or_single_client from test.utils_selection_tests import create_selection_tests +from pymongo import MongoClient +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'max_staleness') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "max_staleness") class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore @@ -54,26 +52,21 @@ def test_max_staleness(self): with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&" - "maxStalenessSeconds=120") + MongoClient("mongodb://a/?readPreference=primary&" "maxStalenessSeconds=120") client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&" - "maxStalenessSeconds=-1") + client = MongoClient("mongodb://host/?readPreference=primary&" "maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&" - "maxStalenessSeconds=120") + client = MongoClient("mongodb://host/?readPreference=secondary&" "maxStalenessSeconds=120") self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" - "maxStalenessSeconds=1") + client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" - "maxStalenessSeconds=-1") + client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") @@ -85,15 +78,15 @@ def test_max_staleness(self): def test_max_staleness_float(self): with self.assertRaises(TypeError) as ctx: - rs_or_single_client(maxStalenessSeconds=1.5, - readPreference="nearest") + rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") self.assertIn("must be an integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5" - "&readPreference=nearest") + client = MongoClient( + "mongodb://host/?maxStalenessSeconds=1.5" "&readPreference=nearest" + ) # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -102,15 +95,13 @@ def test_max_staleness_float(self): def test_max_staleness_zero(self): # Zero is too small. with self.assertRaises(ValueError) as ctx: - rs_or_single_client(maxStalenessSeconds=0, - readPreference="nearest") + rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") self.assertIn("must be a positive integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0" - "&readPreference=nearest") + client = MongoClient("mongodb://host/?maxStalenessSeconds=0" "&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index c110b8b10c..e39940f56b 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -19,12 +19,13 @@ sys.path[0:0] = [""] +from test import MockClientTest, client_context, unittest +from test.pymongo_mocks import MockClient +from test.utils import connected, wait_until + from pymongo.errors import AutoReconnect, InvalidOperation from pymongo.server_selectors import writable_server_selector from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, MockClientTest -from test.pymongo_mocks import MockClient -from test.utils import connected, wait_until @client_context.require_connection @@ -34,14 +35,13 @@ def setUpModule(): class SimpleOp(threading.Thread): - def __init__(self, client): super(SimpleOp, self).__init__() self.client = client self.passed = False def run(self): - self.client.db.command('ping') + self.client.db.command("ping") self.passed = True # No exception raised. @@ -58,26 +58,27 @@ def do_simple_op(client, nthreads): def writable_addresses(topology): - return set(server.description.address for server in - topology.select_servers(writable_server_selector)) + return set( + server.description.address for server in topology.select_servers(writable_server_selector) + ) class TestMongosLoadBalancing(MockClientTest): - def mock_client(self, **kwargs): mock_client = MockClient( standalones=[], members=[], - mongoses=['a:1', 'b:2', 'c:3'], - host='a:1,b:2,c:3', + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", connect=False, - **kwargs) + **kwargs + ) self.addCleanup(mock_client.close) # Latencies in seconds. - mock_client.mock_rtts['a:1'] = 0.020 - mock_client.mock_rtts['b:2'] = 0.025 - mock_client.mock_rtts['c:3'] = 0.045 + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 return mock_client def test_lazy_connect(self): @@ -90,15 +91,15 @@ def test_lazy_connect(self): # Trigger initial connection. do_simple_op(client, nthreads) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") def test_failover(self): nthreads = 10 client = connected(self.mock_client(localThresholdMS=0.001)) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") # Our chosen mongos goes down. - client.kill_host('a:1') + client.kill_host("a:1") # Trigger failover to higher-latency nodes. AutoReconnect should be # raised at most once in each thread. @@ -106,10 +107,10 @@ def test_failover(self): def f(): try: - client.db.command('ping') + client.db.command("ping") except AutoReconnect: # Second attempt succeeds. - client.db.command('ping') + client.db.command("ping") passed.append(True) @@ -128,34 +129,34 @@ def f(): def test_local_threshold(self): client = connected(self.mock_client(localThresholdMS=30)) self.assertEqual(30, client.options.local_threshold_ms) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). - self.assertEqual(set([('a', 1), ('b', 2), ('c', 3)]), - writable_addresses(topology)) + self.assertEqual(set([("a", 1), ("b", 2), ("c", 3)]), writable_addresses(topology)) # No error - client.admin.command('ping') + client.admin.command("ping") client = connected(self.mock_client(localThresholdMS=0)) self.assertEqual(0, client.options.local_threshold_ms) # No error - client.db.command('ping') + client.db.command("ping") # Our chosen mongos goes down. - client.kill_host('%s:%s' % next(iter(client.nodes))) + client.kill_host("%s:%s" % next(iter(client.nodes))) try: - client.db.command('ping') + client.db.command("ping") except: pass # We eventually connect to a new mongos. def connect_to_new_mongos(): try: - return client.db.command('ping') + return client.db.command("ping") except AutoReconnect: pass - wait_until(connect_to_new_mongos, 'connect to a new mongos') + + wait_until(connect_to_new_mongos, "connect to a new mongos") def test_load_balancing(self): # Although the server selection JSON tests already prove that @@ -163,25 +164,25 @@ def test_load_balancing(self): # test of discovering servers' round trip times and configuring # localThresholdMS. client = connected(self.mock_client()) - wait_until(lambda: len(client.nodes) == 3, 'connect to all mongoses') + wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") # Prohibited for topology type Sharded. with self.assertRaises(InvalidOperation): client.address topology = client._topology - self.assertEqual(TOPOLOGY_TYPE.Sharded, - topology.description.topology_type) + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). - self.assertEqual(set([('a', 1), ('b', 2)]), - writable_addresses(topology)) + self.assertEqual(set([("a", 1), ("b", 2)]), writable_addresses(topology)) - client.mock_rtts['a:1'] = 0.045 + client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. - wait_until(lambda: set([('b', 2)]) == writable_addresses(topology), - 'discover server "a" is too far') + wait_until( + lambda: set([("b", 2)]) == writable_addresses(topology), + 'discover server "a" is too far', + ) if __name__ == "__main__": diff --git a/test/test_monitor.py b/test/test_monitor.py index ed0d4543f8..85cfb0bc40 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -20,13 +20,15 @@ sys.path[0:0] = [""] -from pymongo.periodic_executor import _EXECUTORS +from test import IntegrationTest, unittest +from test.utils import ( + ServerAndTopologyEventListener, + connected, + single_client, + wait_until, +) -from test import unittest, IntegrationTest -from test.utils import (connected, - ServerAndTopologyEventListener, - single_client, - wait_until) +from pymongo.periodic_executor import _EXECUTORS def unregistered(ref): @@ -58,16 +60,13 @@ def test_cleanup_executors_on_client_del(self): self.assertEqual(len(executors), 4) # Each executor stores a weakref to itself in _EXECUTORS. - executor_refs = [ - (r, r()._name) for r in _EXECUTORS.copy() if r() in executors] # type: ignore + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] del executors del client for ref, name in executor_refs: - wait_until(partial(unregistered, ref), - 'unregister executor: %s' % (name,), - timeout=5) + wait_until(partial(unregistered, ref), "unregister executor: %s" % (name,), timeout=5) def test_cleanup_executors_on_client_close(self): client = create_client() @@ -77,9 +76,9 @@ def test_cleanup_executors_on_client_close(self): client.close() for executor in executors: - wait_until(lambda: executor._stopped, - 'closed executor: %s' % (executor._name,), - timeout=5) + wait_until( + lambda: executor._stopped, "closed executor: %s" % (executor._name,), timeout=5 + ) if __name__ == "__main__": diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 4e513c5c69..1adb2983e4 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -16,31 +16,28 @@ import datetime import sys import time -from typing import Any import warnings +from typing import Any sys.path[0:0] = [""] +from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest +from test.utils import ( + EventListener, + get_pool, + rs_or_single_client, + single_client, + wait_until, +) + from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON -from pymongo import CursorType, monitoring, InsertOne, UpdateOne, DeleteOne +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring from pymongo.command_cursor import CommandCursor -from pymongo.errors import (AutoReconnect, - NotPrimaryError, - OperationFailure) +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - sanitize_cmd, - unittest) -from test.utils import (EventListener, - get_pool, - rs_or_single_client, - single_client, - wait_until) class TestCommandMonitoring(IntegrationTest): @@ -51,9 +48,7 @@ class TestCommandMonitoring(IntegrationTest): def setUpClass(cls): super(TestCommandMonitoring, cls).setUpClass() cls.listener = EventListener() - cls.client = rs_or_single_client( - event_listeners=[cls.listener], - retryWrites=False) + cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) @classmethod def tearDownClass(cls): @@ -65,107 +60,93 @@ def tearDown(self): super(TestCommandMonitoring, self).tearDown() def test_started_simple(self): - self.client.pymongo_test.command('ping') + self.client.pymongo_test.command("ping") results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ping', 1)]), started.command) - self.assertEqual('ping', started.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_succeeded_simple(self): - self.client.pymongo_test.command('ping') + self.client.pymongo_test.command("ping") results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertEqual('ping', succeeded.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertEqual("ping", succeeded.command_name) self.assertEqual(self.client.address, succeeded.connection_id) - self.assertEqual(1, succeeded.reply.get('ok')) + self.assertEqual(1, succeeded.reply.get("ok")) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertTrue(isinstance(succeeded.duration_micros, int)) def test_failed_simple(self): try: - self.client.pymongo_test.command('oops!') + self.client.pymongo_test.command("oops!") except OperationFailure: pass results = self.listener.results - started = results['started'][0] - failed = results['failed'][0] - self.assertEqual(0, len(results['succeeded'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) - self.assertEqual('oops!', failed.command_name) + started = results["started"][0] + failed = results["failed"][0] + self.assertEqual(0, len(results["succeeded"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("oops!", failed.command_name) self.assertEqual(self.client.address, failed.connection_id) - self.assertEqual(0, failed.failure.get('ok')) + self.assertEqual(0, failed.failure.get("ok")) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) def test_find_one(self): self.client.pymongo_test.test.find_one() results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('limit', 1), - ('singleBatch', True)]), - started.command) - self.assertEqual('find', started.command_name) + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) def test_find_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) self.listener.results.clear() - cursor = self.client.pymongo_test.test.find( - projection={'_id': False}, - batch_size=4) + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 4)]), - started.command) - self.assertEqual('find', started.command_name) + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] @@ -179,24 +160,21 @@ def test_find_and_get_more(self): next(cursor) try: results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 4)]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) + self.assertEqual("getMore", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] @@ -208,32 +186,28 @@ def test_find_and_get_more(self): tuple(cursor) def test_find_with_explain(self): - cmd = SON([('explain', SON([('find', 'test'), - ('filter', {})]))]) + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({}) self.listener.results.clear() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) res = coll.find().explain() results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(cmd, started.command) - self.assertEqual('explain', started.command_name) + self.assertEqual("explain", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('explain', succeeded.command_name) + self.assertEqual("explain", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(res, succeeded.reply) @@ -241,34 +215,31 @@ def test_find_with_explain(self): def _test_find_options(self, query, expected_cmd): coll = self.client.pymongo_test.test coll.drop() - coll.create_index('x') - coll.insert_many([{'x': i} for i in range(5)]) + coll.create_index("x") + coll.insert_many([{"x": i} for i in range(5)]) # Test that we publish the unwrapped command. self.listener.results.clear() if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) cursor = coll.find(**query) next(cursor) try: results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(expected_cmd, started.command) - self.assertEqual('find', started.command_name) + self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(self.client.address, succeeded.connection_id) finally: @@ -276,125 +247,128 @@ def _test_find_options(self, query, expected_cmd): tuple(cursor) def test_find_options(self): - query = dict(filter={}, - hint=[('x', 1)], - max_time_ms=10000, - max={'x': 10}, - min={'x': -10}, - return_key=True, - show_record_id=True, - projection={'x': False}, - skip=1, - no_cursor_timeout=True, - sort=[('_id', 1)], - allow_partial_results=True, - comment='this is a test', - batch_size=2) - - cmd = dict(find='test', - filter={}, - hint=SON([('x', 1)]), - comment='this is a test', - maxTimeMS=10000, - max={'x': 10}, - min={'x': -10}, - returnKey=True, - showRecordId=True, - sort=SON([('_id', 1)]), - projection={'x': False}, - skip=1, - batchSize=2, - noCursorTimeout=True, - allowPartialResults=True) + query = dict( + filter={}, + hint=[("x", 1)], + max_time_ms=10000, + max={"x": 10}, + min={"x": -10}, + return_key=True, + show_record_id=True, + projection={"x": False}, + skip=1, + no_cursor_timeout=True, + sort=[("_id", 1)], + allow_partial_results=True, + comment="this is a test", + batch_size=2, + ) + + cmd = dict( + find="test", + filter={}, + hint=SON([("x", 1)]), + comment="this is a test", + maxTimeMS=10000, + max={"x": 10}, + min={"x": -10}, + returnKey=True, + showRecordId=True, + sort=SON([("_id", 1)]), + projection={"x": False}, + skip=1, + batchSize=2, + noCursorTimeout=True, + allowPartialResults=True, + ) if client_context.version < (4, 1, 0, -1): - query['max_scan'] = 10 - cmd['maxScan'] = 10 + query["max_scan"] = 10 + cmd["maxScan"] = 10 self._test_find_options(query, cmd) @client_context.require_version_max(3, 7, 2) def test_find_snapshot(self): # Test "snapshot" parameter separately, can't combine with "sort". - query = dict(filter={}, - snapshot=True) + query = dict(filter={}, snapshot=True) - cmd = dict(find='test', - filter={}, - snapshot=True) + cmd = dict(find="test", filter={}, snapshot=True) self._test_find_options(query, cmd) def test_command_and_get_more(self): self.client.pymongo_test.test.drop() - self.client.pymongo_test.test.insert_many( - [{'x': 1} for _ in range(10)]) + self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) self.listener.results.clear() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: - coll = coll.with_options( - read_preference=ReadPreference.PRIMARY_PREFERRED) - cursor = coll.aggregate( - [{'$project': {'_id': False, 'x': 1}}], batchSize=4) + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('aggregate', 'test'), - ('pipeline', [{'$project': {'_id': False, 'x': 1}}]), - ('cursor', {'batchSize': 4})]), - started.command) - self.assertEqual('aggregate', started.command_name) + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('aggregate', succeeded.command_name) + self.assertEqual("aggregate", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) - expected_cursor = {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'firstBatch': [{'x': 1} for _ in range(4)]} - self.assertEqualCommand(expected_cursor, succeeded.reply.get('cursor')) + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) self.listener.results.clear() next(cursor) try: results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 4)]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('getMore', succeeded.command_name) + self.assertEqual("getMore", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { - 'cursor': {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'nextBatch': [{'x': 1} for _ in range(4)]}, - 'ok': 1.0} + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } self.assertEqualReply(expected_result, succeeded.reply) finally: # Exhaust the cursor to avoid kill cursors. @@ -411,23 +385,20 @@ def test_get_more_failure(self): except Exception: pass results = self.listener.results - started = results['started'][0] - self.assertEqual(0, len(results['succeeded'])) - failed = results['failed'][0] - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + self.assertEqual(0, len(results["succeeded"])) + failed = results["failed"][0] + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( - SON([('getMore', cursor_id), - ('collection', 'test')]), - started.command) - self.assertEqual('getMore', started.command_name) + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertTrue(isinstance(failed.duration_micros, int)) - self.assertEqual('getMore', failed.command_name) + self.assertEqual("getMore", failed.command_name) self.assertTrue(isinstance(failed.request_id, int)) self.assertEqual(cursor.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) @@ -438,7 +409,7 @@ def test_not_primary_error(self): address = next(iter(client_context.client.secondaries)) client = single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. - client.admin.command('ping') + client.admin.command("ping") self.listener.results.clear() error = None try: @@ -446,16 +417,14 @@ def test_not_primary_error(self): except NotPrimaryError as exc: error = exc.errors results = self.listener.results - started = results['started'][0] - failed = results['failed'][0] - self.assertEqual(0, len(results['succeeded'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue( - isinstance(failed, monitoring.CommandFailedEvent)) - self.assertEqual('findAndModify', failed.command_name) + started = results["started"][0] + failed = results["failed"][0] + self.assertEqual(0, len(results["succeeded"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("findAndModify", failed.command_name) self.assertEqual(address, failed.connection_id) - self.assertEqual(0, failed.failure.get('ok')) + self.assertEqual(0, failed.failure.get("ok")) self.assertTrue(isinstance(failed.request_id, int)) self.assertTrue(isinstance(failed.duration_micros, int)) self.assertEqual(error, failed.failure) @@ -466,60 +435,62 @@ def test_exhaust(self): self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) self.listener.results.clear() cursor = self.client.pymongo_test.test.find( - projection={'_id': False}, - batch_size=5, - cursor_type=CursorType.EXHAUST) + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) next(cursor) cursor_id = cursor.cursor_id results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('find', 'test'), - ('filter', {}), - ('projection', {'_id': False}), - ('batchSize', 5)]), started.command) - self.assertEqual('find', started.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('find', succeeded.command_name) + self.assertEqual("find", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { - 'cursor': {'id': cursor_id, - 'ns': 'pymongo_test.test', - 'firstBatch': [{} for _ in range(5)]}, - 'ok': 1} + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } self.assertEqualReply(expected_result, succeeded.reply) self.listener.results.clear() tuple(cursor) results = self.listener.results - self.assertEqual(0, len(results['failed'])) - for event in results['started']: + self.assertEqual(0, len(results["failed"])) + for event in results["started"]: self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('getMore', cursor_id), - ('collection', 'test'), - ('batchSize', 5)]), event.command) - self.assertEqual('getMore', event.command_name) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) self.assertEqual(cursor.address, event.connection_id) - self.assertEqual('pymongo_test', event.database_name) + self.assertEqual("pymongo_test", event.database_name) self.assertTrue(isinstance(event.request_id, int)) - for event in results['succeeded']: - self.assertTrue( - isinstance(event, monitoring.CommandSucceededEvent)) + for event in results["succeeded"]: + self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(event.duration_micros, int)) - self.assertEqual('getMore', event.command_name) + self.assertEqual("getMore", event.command_name) self.assertTrue(isinstance(event.request_id, int)) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. - self.assertEqual(0, results['succeeded'][-1].reply['cursor']['id']) + self.assertEqual(0, results["succeeded"][-1].reply["cursor"]["id"]) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): @@ -532,30 +503,30 @@ def test_kill_cursors(self): cursor.close() time.sleep(2) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertIn(cursor_id, started.command['cursors']) - self.assertEqual('killCursors', started.command_name) + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) self.assertIs(type(started.connection_id), tuple) self.assertEqual(cursor.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(succeeded.duration_micros, int)) - self.assertEqual('killCursors', succeeded.command_name) + self.assertEqual("killCursors", succeeded.command_name) self.assertTrue(isinstance(succeeded.request_id, int)) self.assertIs(type(succeeded.connection_id), tuple) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertTrue(cursor_id in succeeded.reply['cursorsUnknown'] - or cursor_id in succeeded.reply['cursorsKilled']) + self.assertTrue( + cursor_id in succeeded.reply["cursorsUnknown"] + or cursor_id in succeeded.reply["cursorsKilled"] + ) def test_non_bulk_writes(self): coll = self.client.pymongo_test.test @@ -563,18 +534,22 @@ def test_non_bulk_writes(self): self.listener.results.clear() # Implied write concern insert_one - res = coll.insert_one({'x': 1}) + res = coll.insert_one({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}])]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -583,25 +558,29 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # Unacknowledged insert_one self.listener.results.clear() coll = coll.with_options(write_concern=WriteConcern(w=0)) - res = coll.insert_one({'x': 1}) + res = coll.insert_one({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}]), - ('writeConcern', {'w': 0})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -609,24 +588,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertEqualReply(succeeded.reply, {'ok': 1}) + self.assertEqualReply(succeeded.reply, {"ok": 1}) # Explicit write concern insert_one self.listener.results.clear() coll = coll.with_options(write_concern=WriteConcern(w=1)) - res = coll.insert_one({'x': 1}) + res = coll.insert_one({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': res.inserted_id, 'x': 1}]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -635,25 +618,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # delete_many self.listener.results.clear() - res = coll.delete_many({'x': 1}) + res = coll.delete_many({"x": 1}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 1}), - ('limit', 0)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -662,28 +648,41 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(res.deleted_count, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) # replace_one self.listener.results.clear() oid = ObjectId() - res = coll.replace_one({'_id': oid}, {'_id': oid, 'x': 1}, upsert=True) + res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': oid}), - ('u', {'_id': oid, 'x': 1}), - ('multi', False), - ('upsert', True)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -692,28 +691,41 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) - self.assertEqual([{'index': 0, '_id': oid}], reply.get('upserted')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) # update_one self.listener.results.clear() - res = coll.update_one({'x': 1}, {'$inc': {'x': 1}}) + res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 1}), - ('u', {'$inc': {'x': 1}}), - ('multi', False), - ('upsert', False)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -722,27 +734,40 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # update_many self.listener.results.clear() - res = coll.update_many({'x': 2}, {'$inc': {'x': 1}}) + res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'x': 2}), - ('u', {'$inc': {'x': 1}}), - ('multi', True), - ('upsert', False)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('update', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -751,25 +776,28 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) # delete_one self.listener.results.clear() - res2 = coll.delete_one({'x': 3}) + res2 = coll.delete_one({"x": 3}) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'x': 3}), - ('limit', 1)])]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('delete', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -778,30 +806,34 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(1, reply.get('n')) + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) self.assertEqual(0, coll.count_documents({})) # write errors - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) try: self.listener.results.clear() - coll.insert_one({'_id': 1}) + coll.insert_one({"_id": 1}) except OperationFailure: pass results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': 1}]), - ('writeConcern', {'w': 1})]) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('insert', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -810,14 +842,14 @@ def test_non_bulk_writes(self): self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) reply = succeeded.reply - self.assertEqual(1, reply.get('ok')) - self.assertEqual(0, reply.get('n')) - errors = reply.get('writeErrors') + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") self.assertIsInstance(errors, list) error = errors[0] - self.assertEqual(0, error.get('index')) - self.assertIsInstance(error.get('code'), int) - self.assertIsInstance(error.get('errmsg'), str) + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) def test_insert_many(self): # This always uses the bulk API. @@ -825,13 +857,13 @@ def test_insert_many(self): coll.drop() self.listener.results.clear() - big = 'x' * (1024 * 1024 * 4) - docs = [{'_id': i, 'big': big} for i in range(6)] + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] coll.insert_many(docs) results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) documents = [] count = 0 operation_id = started[0].operation_id @@ -839,13 +871,12 @@ def test_insert_many(self): for start, succeed in zip(started, succeeded): self.assertIsInstance(start, monitoring.CommandStartedEvent) cmd = sanitize_cmd(start.command) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -856,8 +887,8 @@ def test_insert_many(self): self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) reply = succeed.reply - self.assertEqual(1, reply.get('ok')) - count += reply.get('n', 0) + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) self.assertEqual(documents, docs) self.assertEqual(6, count) @@ -868,27 +899,26 @@ def test_insert_many_unacknowledged(self): self.listener.results.clear() # Force two batches on legacy servers. - big = 'x' * (1024 * 1024 * 12) - docs = [{'_id': i, 'big': big} for i in range(6)] + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] unack_coll.insert_many(docs) results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) documents = [] operation_id = started[0].operation_id self.assertIsInstance(operation_id, int) for start, succeed in zip(started, succeeded): self.assertIsInstance(start, monitoring.CommandStartedEvent) cmd = sanitize_cmd(start.command) - cmd.pop('writeConcern', None) - self.assertEqual(['insert', 'ordered', 'documents'], - list(cmd.keys())) - self.assertEqual(coll.name, cmd['insert']) - self.assertIs(True, cmd['ordered']) - documents.extend(cmd['documents']) - self.assertEqual('pymongo_test', start.database_name) - self.assertEqual('insert', start.command_name) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -898,29 +928,32 @@ def test_insert_many_unacknowledged(self): self.assertEqual(start.connection_id, succeed.connection_id) self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - self.assertEqual(1, succeed.reply.get('ok')) + self.assertEqual(1, succeed.reply.get("ok")) self.assertEqual(documents, docs) - wait_until(lambda: coll.count_documents({}) == 6, - 'insert documents with w=0') + wait_until(lambda: coll.count_documents({}) == 6, "insert documents with w=0") def test_bulk_write(self): coll = self.client.pymongo_test.test coll.drop() self.listener.results.clear() - coll.bulk_write([InsertOne({'_id': 1}), - UpdateOne({'_id': 1}, {'$set': {'x': 1}}), - DeleteOne({'_id': 1})]) + coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) self.assertEqual(3, len(pairs)) for start, succeed in pairs: self.assertIsInstance(start, monitoring.CommandStartedEvent) - self.assertEqual('pymongo_test', start.database_name) + self.assertEqual("pymongo_test", start.database_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -931,21 +964,35 @@ def test_bulk_write(self): self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - expected = SON([('insert', coll.name), - ('ordered', True), - ('documents', [{'_id': 1}])]) + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) self.assertEqualCommand(expected, started[0].command) - expected = SON([('update', coll.name), - ('ordered', True), - ('updates', [SON([('q', {'_id': 1}), - ('u', {'$set': {'x': 1}}), - ('multi', False), - ('upsert', False)])])]) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) self.assertEqualCommand(expected, started[1].command) - expected = SON([('delete', coll.name), - ('ordered', True), - ('deletes', [SON([('q', {'_id': 1}), - ('limit', 1)])])]) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) self.assertEqualCommand(expected, started[2].command) @client_context.require_failCommand_fail_point @@ -954,23 +1001,23 @@ def test_bulk_write_command_network_error(self): self.listener.results.clear() insert_network_error = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'closeConnection': True, + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, }, } with self.fail_point(insert_network_error): with self.assertRaises(AutoReconnect): - coll.bulk_write([InsertOne({'_id': 1})]) - failed = self.listener.results['failed'] + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.results["failed"] self.assertEqual(1, len(failed)) event = failed[0] - self.assertEqual(event.command_name, 'insert') + self.assertEqual(event.command_name, "insert") self.assertIsInstance(event.failure, dict) - self.assertEqual(event.failure['errtype'], 'AutoReconnect') - self.assertTrue(event.failure['errmsg']) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) @client_context.require_failCommand_fail_point def test_bulk_write_command_error(self): @@ -978,24 +1025,24 @@ def test_bulk_write_command_error(self): self.listener.results.clear() insert_command_error = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'closeConnection': False, - 'errorCode': 10107, # Not primary + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": False, + "errorCode": 10107, # Not primary }, } with self.fail_point(insert_command_error): with self.assertRaises(NotPrimaryError): - coll.bulk_write([InsertOne({'_id': 1})]) - failed = self.listener.results['failed'] + coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.results["failed"] self.assertEqual(1, len(failed)) event = failed[0] - self.assertEqual(event.command_name, 'insert') + self.assertEqual(event.command_name, "insert") self.assertIsInstance(event.failure, dict) - self.assertEqual(event.failure['code'], 10107) - self.assertTrue(event.failure['errmsg']) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) def test_write_errors(self): coll = self.client.pymongo_test.test @@ -1003,23 +1050,27 @@ def test_write_errors(self): self.listener.results.clear() try: - coll.bulk_write([InsertOne({'_id': 1}), - InsertOne({'_id': 1}), - InsertOne({'_id': 1}), - DeleteOne({'_id': 1})], - ordered=False) + coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) except OperationFailure: pass results = self.listener.results - started = results['started'] - succeeded = results['succeeded'] - self.assertEqual(0, len(results['failed'])) + started = results["started"] + succeeded = results["succeeded"] + self.assertEqual(0, len(results["failed"])) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) errors = [] for start, succeed in pairs: self.assertIsInstance(start, monitoring.CommandStartedEvent) - self.assertEqual('pymongo_test', start.database_name) + self.assertEqual("pymongo_test", start.database_name) self.assertIsInstance(start.request_id, int) self.assertEqual(self.client.address, start.connection_id) self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) @@ -1029,11 +1080,11 @@ def test_write_errors(self): self.assertEqual(start.connection_id, succeed.connection_id) self.assertEqual(start.operation_id, operation_id) self.assertEqual(succeed.operation_id, operation_id) - if 'writeErrors' in succeed.reply: - errors.extend(succeed.reply['writeErrors']) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) self.assertEqual(2, len(errors)) - fields = set(['index', 'code', 'errmsg']) + fields = set(["index", "code", "errmsg"]) for error in errors: self.assertTrue(fields.issubset(set(error))) @@ -1043,14 +1094,14 @@ def test_first_batch_helper(self): self.listener.results.clear() tuple(self.client.pymongo_test.test.list_indexes()) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) - expected = SON([('listIndexes', 'test'), ('cursor', {})]) + expected = SON([("listIndexes", "test"), ("cursor", {})]) self.assertEqualCommand(expected, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('listIndexes', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -1058,8 +1109,8 @@ def test_first_batch_helper(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue('cursor' in succeeded.reply) - self.assertTrue('ok' in succeeded.reply) + self.assertTrue("cursor" in succeeded.reply) + self.assertTrue("ok" in succeeded.reply) self.listener.results.clear() @@ -1068,20 +1119,19 @@ def test_sensitive_commands(self): self.listener.results.clear() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start( - cmd, "pymongo_test", 12345, self.client.address) + listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( - delta, {'nonce': 'e474f4561c5eb40b', 'ok': 1.0}, - "getnonce", 12345, self.client.address) + delta, {"nonce": "e474f4561c5eb40b", "ok": 1.0}, "getnonce", 12345, self.client.address + ) results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) - self.assertEqual('pymongo_test', started.database_name) - self.assertEqual('getnonce', started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) self.assertIsInstance(started.request_id, int) self.assertEqual(self.client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) @@ -1106,7 +1156,7 @@ def setUpClass(cls): monitoring.register(cls.listener) cls.client = single_client() # Get one (authenticated) socket in the pool. - cls.client.pymongo_test.command('ping') + cls.client.pymongo_test.command("ping") @classmethod def tearDownClass(cls): @@ -1119,107 +1169,109 @@ def setUp(self): self.listener.results.clear() def test_simple(self): - self.client.pymongo_test.command('ping') + self.client.pymongo_test.command("ping") results = self.listener.results - started = results['started'][0] - succeeded = results['succeeded'][0] - self.assertEqual(0, len(results['failed'])) - self.assertTrue( - isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue( - isinstance(started, monitoring.CommandStartedEvent)) - self.assertEqualCommand(SON([('ping', 1)]), started.command) - self.assertEqual('ping', started.command_name) + started = results["started"][0] + succeeded = results["succeeded"][0] + self.assertEqual(0, len(results["failed"])) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) - self.assertEqual('pymongo_test', started.database_name) + self.assertEqual("pymongo_test", started.database_name) self.assertTrue(isinstance(started.request_id, int)) class TestEventClasses(unittest.TestCase): - def test_command_event_repr(self): - request_id, connection_id, operation_id = 1, ('localhost', 27017), 2 + request_id, connection_id, operation_id = 1, ("localhost", 27017), 2 event = monitoring.CommandStartedEvent( - {'ping': 1}, 'admin', request_id, connection_id, operation_id) + {"ping": 1}, "admin", request_id, connection_id, operation_id + ) self.assertEqual( repr(event), "") + "command: 'ping', operation_id: 2, service_id: None>", + ) delta = datetime.timedelta(milliseconds=100) event = monitoring.CommandSucceededEvent( - delta, {'ok': 1}, 'ping', request_id, connection_id, - operation_id) + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id + ) self.assertEqual( repr(event), "") + "service_id: None>", + ) event = monitoring.CommandFailedEvent( - delta, {'ok': 0}, 'ping', request_id, connection_id, - operation_id) + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id + ) self.assertEqual( repr(event), "") + "failure: {'ok': 0}, service_id: None>", + ) def test_server_heartbeat_event_repr(self): - connection_id = ('localhost', 27017) + connection_id = ("localhost", 27017) event = monitoring.ServerHeartbeatStartedEvent(connection_id) - self.assertEqual( - repr(event), - "") + self.assertEqual(repr(event), "") delta = 0.1 event = monitoring.ServerHeartbeatSucceededEvent( - delta, {'ok': 1}, connection_id) # type: ignore[arg-type] + delta, {"ok": 1}, connection_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), "") + "duration: 0.1, awaited: False, reply: {'ok': 1}>", + ) event = monitoring.ServerHeartbeatFailedEvent( - delta, 'ERROR', connection_id) # type: ignore[arg-type] + delta, "ERROR", connection_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), "") + "duration: 0.1, awaited: False, reply: 'ERROR'>", + ) def test_server_event_repr(self): - server_address = ('localhost', 27017) - topology_id = ObjectId('000000000000000000000001') + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") event = monitoring.ServerOpeningEvent(server_address, topology_id) self.assertEqual( repr(event), - "") + "", + ) event = monitoring.ServerDescriptionChangedEvent( - 'PREV', 'NEW', server_address, topology_id) # type: ignore[arg-type] + "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), - "") + "", + ) event = monitoring.ServerClosedEvent(server_address, topology_id) self.assertEqual( repr(event), - "") + "", + ) def test_topology_event_repr(self): - topology_id = ObjectId('000000000000000000000001') + topology_id = ObjectId("000000000000000000000001") event = monitoring.TopologyOpenedEvent(topology_id) - self.assertEqual( - repr(event), - "") + self.assertEqual(repr(event), "") event = monitoring.TopologyDescriptionChangedEvent( - 'PREV', 'NEW', topology_id) # type: ignore[arg-type] + "PREV", "NEW", topology_id # type: ignore[arg-type] + ) self.assertEqual( repr(event), "") + "changed from: PREV, to: NEW>", + ) event = monitoring.TopologyClosedEvent(topology_id) - self.assertEqual( - repr(event), - "") + self.assertEqual(repr(event), "") if __name__ == "__main__": diff --git a/test/test_objectid.py b/test/test_objectid.py index 26ffe2e22c..bb1af865c0 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -21,13 +21,13 @@ sys.path[0:0] = [""] -from bson.errors import InvalidId -from bson.objectid import ObjectId, _MAX_COUNTER_VALUE -from bson.tz_util import (FixedOffset, - utc) from test import SkipTest, unittest from test.utils import oid_generated_on_process +from bson.errors import InvalidId +from bson.objectid import _MAX_COUNTER_VALUE, ObjectId +from bson.tz_util import FixedOffset, utc + def oid(x): return ObjectId() @@ -57,29 +57,28 @@ def test_from_hex(self): self.assertRaises(InvalidId, ObjectId, "123456789012123456789G12") def test_repr_str(self): - self.assertEqual(repr(ObjectId("1234567890abcdef12345678")), - "ObjectId('1234567890abcdef12345678')") - self.assertEqual(str(ObjectId("1234567890abcdef12345678")), - "1234567890abcdef12345678") - self.assertEqual(str(ObjectId(b"123456789012")), - "313233343536373839303132") - self.assertEqual(ObjectId("1234567890abcdef12345678").binary, - b'\x124Vx\x90\xab\xcd\xef\x124Vx') - self.assertEqual(str(ObjectId(b'\x124Vx\x90\xab\xcd\xef\x124Vx')), - "1234567890abcdef12345678") + self.assertEqual( + repr(ObjectId("1234567890abcdef12345678")), "ObjectId('1234567890abcdef12345678')" + ) + self.assertEqual(str(ObjectId("1234567890abcdef12345678")), "1234567890abcdef12345678") + self.assertEqual(str(ObjectId(b"123456789012")), "313233343536373839303132") + self.assertEqual( + ObjectId("1234567890abcdef12345678").binary, b"\x124Vx\x90\xab\xcd\xef\x124Vx" + ) + self.assertEqual( + str(ObjectId(b"\x124Vx\x90\xab\xcd\xef\x124Vx")), "1234567890abcdef12345678" + ) def test_equality(self): a = ObjectId() self.assertEqual(a, ObjectId(a)) - self.assertEqual(ObjectId(b"123456789012"), - ObjectId(b"123456789012")) + self.assertEqual(ObjectId(b"123456789012"), ObjectId(b"123456789012")) self.assertNotEqual(ObjectId(), ObjectId()) self.assertNotEqual(ObjectId(b"123456789012"), b"123456789012") # Explicitly test inequality self.assertFalse(a != ObjectId(a)) - self.assertFalse(ObjectId(b"123456789012") != - ObjectId(b"123456789012")) + self.assertFalse(ObjectId(b"123456789012") != ObjectId(b"123456789012")) def test_binary_str_equivalence(self): a = ObjectId() @@ -95,7 +94,7 @@ def test_generation_time(self): self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) def test_from_datetime(self): - if 'PyPy 1.8.0' in sys.version: + if "PyPy 1.8.0" in sys.version: # See https://bugs.pypy.org/issue1092 raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") d = datetime.datetime.utcnow() @@ -104,8 +103,7 @@ def test_from_datetime(self): self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) self.assertEqual("0" * 16, str(oid)[8:]) - aware = datetime.datetime(1993, 4, 4, 2, - tzinfo=FixedOffset(555, "SomeZone")) + aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone")) offset = aware.utcoffset() assert offset is not None as_utc = (aware - offset).replace(tzinfo=utc) @@ -126,7 +124,8 @@ def test_pickle_backwards_compatability(self): b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" b"object\np2\nNtp3\nRp4\n" b"(dp5\nS'_ObjectId__id'\np6\n" - b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb.") + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np7\nsb." + ) # We also test against a hardcoded "New" pickle format so that we # make sure we're backward compatible with the current version in @@ -135,11 +134,12 @@ def test_pickle_backwards_compatability(self): b"ccopy_reg\n_reconstructor\np0\n" b"(cbson.objectid\nObjectId\np1\nc__builtin__\n" b"object\np2\nNtp3\nRp4\n" - b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb.") + b"S'M\\x9afV\\x13v\\xc0\\x0b\\x88\\x00\\x00\\x00'\np5\nb." + ) # Have to load using 'latin-1' since these were pickled in python2.x. - oid_1_9 = pickle.loads(pickled_with_1_9, encoding='latin-1') - oid_1_10 = pickle.loads(pickled_with_1_10, encoding='latin-1') + oid_1_9 = pickle.loads(pickled_with_1_9, encoding="latin-1") + oid_1_10 = pickle.loads(pickled_with_1_10, encoding="latin-1") self.assertEqual(oid_1_9, ObjectId("4d9a66561376c00b88000000")) self.assertEqual(oid_1_9, oid_1_10) @@ -189,9 +189,7 @@ def generate_objectid_with_timestamp(timestamp): oid.generation_time except (OverflowError, ValueError): continue - self.assertEqual( - oid.generation_time, - datetime.datetime(*exp_datetime_args, tzinfo=utc)) + self.assertEqual(oid.generation_time, datetime.datetime(*exp_datetime_args, tzinfo=utc)) def test_random_regenerated_on_pid_change(self): # Test that change of pid triggers new random number generation. diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 04fa06dfa1..0e6777a9f9 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -14,20 +14,20 @@ """Test the pymongo ocsp_support module.""" +import random +import sys from collections import namedtuple from datetime import datetime, timedelta from os import urandom -import random -import sys from time import sleep - from typing import Any sys.path[0:0] = [""] -from pymongo.ocsp_cache import _OCSPCache from test import unittest +from pymongo.ocsp_cache import _OCSPCache + class TestOcspCache(unittest.TestCase): MockHashAlgorithm: Any @@ -36,20 +36,20 @@ class TestOcspCache(unittest.TestCase): @classmethod def setUpClass(cls): - cls.MockHashAlgorithm = namedtuple( # type: ignore - "MockHashAlgorithm", ['name']) + cls.MockHashAlgorithm = namedtuple("MockHashAlgorithm", ["name"]) # type: ignore cls.MockOcspRequest = namedtuple( # type: ignore - "MockOcspRequest", ['hash_algorithm', 'issuer_name_hash', - 'issuer_key_hash', 'serial_number']) + "MockOcspRequest", + ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], + ) cls.MockOcspResponse = namedtuple( # type: ignore - "MockOcspResponse", ["this_update", "next_update"]) + "MockOcspResponse", ["this_update", "next_update"] + ) def setUp(self): self.cache = _OCSPCache() def _create_mock_request(self): - hash_algorithm = self.MockHashAlgorithm( - random.choice(['sha1', 'md5', 'sha256'])) + hash_algorithm = self.MockHashAlgorithm(random.choice(["sha1", "md5", "sha256"])) issuer_name_hash = urandom(8) issuer_key_hash = urandom(8) serial_number = random.randint(0, 10**10) @@ -57,19 +57,17 @@ def _create_mock_request(self): hash_algorithm=hash_algorithm, issuer_name_hash=issuer_name_hash, issuer_key_hash=issuer_key_hash, - serial_number=serial_number) + serial_number=serial_number, + ) - def _create_mock_response(self, this_update_delta_seconds, - next_update_delta_seconds): + def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): now = datetime.utcnow() this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) else: next_update = None - return self.MockOcspResponse( - this_update=this_update, - next_update=next_update) + return self.MockOcspResponse(this_update=this_update, next_update=next_update) def _add_mock_cache_entry(self, mock_request, mock_response): key = self.cache._get_cache_key(mock_request) diff --git a/test/test_pooling.py b/test/test_pooling.py index 4f0ac3584f..07dbc3643d 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -21,29 +21,25 @@ import threading import time -from bson.son import SON from bson.codec_options import DEFAULT_CODEC_OPTIONS - +from bson.son import SON from pymongo import MongoClient, message -from pymongo.errors import (AutoReconnect, - ConnectionFailure, - DuplicateKeyError) +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import delay, get_pool, joinall, rs_or_single_client + from pymongo.pool import Pool, PoolOptions from pymongo.socket_checker import SocketChecker -from test import client_context, IntegrationTest, unittest -from test.utils import (get_pool, - joinall, - delay, - rs_or_single_client) @client_context.require_connection def setUpModule(): pass + N = 10 DB = "pymongo-pooling-tests" @@ -62,6 +58,7 @@ def gc_collect_until_done(threads, timeout=60): class MongoThread(threading.Thread): """A thread that uses a MongoClient.""" + def __init__(self, client): super(MongoThread, self).__init__() self.daemon = True # Don't hang whole test if thread hangs. @@ -108,21 +105,22 @@ class SocketGetter(MongoThread): Checks out a socket and holds it forever. Used in test_no_wait_queue_timeout. """ + def __init__(self, client, pool): super(SocketGetter, self).__init__(client) - self.state = 'init' + self.state = "init" self.pool = pool self.sock = None def run_mongo_thread(self): - self.state = 'get_socket' + self.state = "get_socket" # Call 'pin_cursor' so we can hold the socket. with self.pool.get_socket() as sock: sock.pin_cursor() self.sock = sock - self.state = 'sock' + self.state = "sock" def __del__(self): if self.sock: @@ -162,16 +160,12 @@ def tearDown(self): self.c.close() super(_TestPoolingBase, self).tearDown() - def create_pool( - self, - pair=(client_context.host, client_context.port), - *args, - **kwargs): + def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options - kwargs['ssl_context'] = pool_options._ssl_context - kwargs['tls_allow_invalid_hostnames'] = pool_options.tls_allow_invalid_hostnames - kwargs['server_api'] = pool_options.server_api + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api pool = Pool(pair, PoolOptions(*args, **kwargs)) pool.ready() return pool @@ -180,11 +174,9 @@ def create_pool( class TestPooling(_TestPoolingBase): def test_max_pool_size_validation(self): host, port = client_context.host, client_context.port - self.assertRaises( - ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize=-1) - self.assertRaises( - ValueError, MongoClient, host=host, port=port, maxPoolSize='foo') + self.assertRaises(ValueError, MongoClient, host=host, port=port, maxPoolSize="foo") c = MongoClient(host=host, port=port, maxPoolSize=100, connect=False) self.assertEqual(c.options.pool_options.max_pool_size, 100) @@ -264,27 +256,27 @@ def test_socket_checker(self): # Socket has nothing to read. self.assertFalse(socket_checker.select(s, read=True)) self.assertFalse(socket_checker.select(s, read=True, timeout=0)) - self.assertFalse(socket_checker.select(s, read=True, timeout=.05)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) # Socket is writable. self.assertTrue(socket_checker.select(s, write=True, timeout=None)) self.assertTrue(socket_checker.select(s, write=True)) self.assertTrue(socket_checker.select(s, write=True, timeout=0)) - self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) # Make the socket readable _, msg, _ = message._query( - 0, 'admin.$cmd', 0, -1, SON([('ping', 1)]), None, - DEFAULT_CODEC_OPTIONS) + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) s.sendall(msg) # Block until the socket is readable. self.assertTrue(socket_checker.select(s, read=True, timeout=None)) self.assertTrue(socket_checker.select(s, read=True)) self.assertTrue(socket_checker.select(s, read=True, timeout=0)) - self.assertTrue(socket_checker.select(s, read=True, timeout=.05)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) # Socket is still writable. self.assertTrue(socket_checker.select(s, write=True, timeout=None)) self.assertTrue(socket_checker.select(s, write=True)) self.assertTrue(socket_checker.select(s, write=True, timeout=0)) - self.assertTrue(socket_checker.select(s, write=True, timeout=.05)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) s.close() self.assertTrue(socket_checker.socket_closed(s)) @@ -303,9 +295,7 @@ def test_return_socket_after_reset(self): def test_pool_check(self): # Test that Pool recovers from two connection failures in a row. # This exercises code at the end of Pool._check(). - cx_pool = self.create_pool(max_pool_size=1, - connect_timeout=1, - wait_queue_timeout=1) + cx_pool = self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. self.addCleanup(cx_pool.close) @@ -315,7 +305,7 @@ def test_pool_check(self): sock_info.sock.close() # Swap pool's address with a bad one. - address, cx_pool.address = cx_pool.address, ('foo.com', 1234) + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) with self.assertRaises(AutoReconnect): with cx_pool.get_socket(): pass @@ -327,8 +317,7 @@ def test_pool_check(self): def test_wait_queue_timeout(self): wait_queue_timeout = 2 # Seconds - pool = self.create_pool( - max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) with pool.get_socket() as sock_info: @@ -340,8 +329,8 @@ def test_wait_queue_timeout(self): duration = time.time() - start self.assertTrue( abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % ( - duration, wait_queue_timeout)) + "Waited %.2f seconds for a socket, expected %f" % (duration, wait_queue_timeout), + ) def test_no_wait_queue_timeout(self): # Verify get_socket() with no wait_queue_timeout blocks forever. @@ -352,16 +341,16 @@ def test_no_wait_queue_timeout(self): with pool.get_socket() as s1: t = SocketGetter(self.c, pool) t.start() - while t.state != 'get_socket': + while t.state != "get_socket": time.sleep(0.1) time.sleep(1) - self.assertEqual(t.state, 'get_socket') + self.assertEqual(t.state, "get_socket") - while t.state != 'sock': + while t.state != "sock": time.sleep(0.1) - self.assertEqual(t.state, 'sock') + self.assertEqual(t.state, "sock") self.assertEqual(t.sock, s1) def test_checkout_more_than_max_pool_size(self): @@ -381,7 +370,7 @@ def test_checkout_more_than_max_pool_size(self): threads.append(t) time.sleep(1) for t in threads: - self.assertEqual(t.state, 'get_socket') + self.assertEqual(t.state, "get_socket") for socket_info in socks: socket_info.close_socket(None) @@ -394,7 +383,8 @@ def test_maxConnecting(self): # Run 50 short running operations def find_one(): - docs.append(client.test.test.find_one({'$where': delay(0.001)})) + docs.append(client.test.test.find_one({"$where": delay(0.001)})) + threads = [threading.Thread(target=find_one) for _ in range(50)] for thread in threads: thread.start() @@ -443,7 +433,7 @@ def test_max_pool_size(self): def f(): for _ in range(5): - collection.find_one({'$where': delay(0.1)}) + collection.find_one({"$where": delay(0.1)}) assert len(cx_pool.sockets) <= max_pool_size with lock: @@ -476,7 +466,7 @@ def test_max_pool_size_none(self): def f(): for _ in range(5): - collection.find_one({'$where': delay(0.1)}) + collection.find_one({"$where": delay(0.1)}) with lock: self.n_passed += 1 @@ -489,25 +479,21 @@ def f(): joinall(threads) self.assertEqual(nthreads, self.n_passed) self.assertTrue(len(cx_pool.sockets) > 1) - self.assertEqual(cx_pool.max_pool_size, float('inf')) - + self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): c = rs_or_single_client(maxPoolSize=0) self.addCleanup(c.close) pool = get_pool(c) - self.assertEqual(pool.max_pool_size, float('inf')) + self.assertEqual(pool.max_pool_size, float("inf")) def test_max_pool_size_with_connection_failure(self): # The pool acquires its semaphore before attempting to connect; ensure # it releases the semaphore on connection failure. test_pool = Pool( - ('somedomainthatdoesntexist.org', 27017), - PoolOptions( - max_pool_size=1, - connect_timeout=1, - socket_timeout=1, - wait_queue_timeout=1)) + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) test_pool.ready() # First call to get_socket fails; if pool doesn't release its semaphore @@ -521,8 +507,7 @@ def test_max_pool_size_with_connection_failure(self): # Testing for AutoReconnect instead of ConnectionFailure, above, # is sufficient right *now* to catch a semaphore leak. But that # seems error-prone, so check the message too. - self.assertNotIn('waiting for socket from pool', - str(context.exception)) + self.assertNotIn("waiting for socket from pool", str(context.exception)) if __name__ == "__main__": diff --git a/test/test_pymongo.py b/test/test_pymongo.py index 780a4beb8b..7ec32e16a6 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -15,17 +15,18 @@ """Test the pymongo module itself.""" import sys + sys.path[0:0] = [""] -import pymongo from test import unittest +import pymongo + class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient - self.assertEqual(pymongo.MongoClient, - pymongo.mongo_client.MongoClient) + self.assertEqual(pymongo.MongoClient, pymongo.mongo_client.MongoClient) if __name__ == "__main__": diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index 90ada05c6f..a27af6e217 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -18,15 +18,16 @@ sys.path[0:0] = [""] +from test import client_context, unittest +from test.test_client import IntegrationTest +from test.utils import rs_or_single_client + from bson import decode, encode -from bson.binary import Binary, JAVA_LEGACY, UuidRepresentation +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import InvalidBSON -from bson.raw_bson import RawBSONDocument, DEFAULT_RAW_BSON_OPTIONS +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from bson.son import SON -from test import client_context, unittest -from test.utils import rs_or_single_client -from test.test_client import IntegrationTest class TestRawBSONDocument(IntegrationTest): @@ -35,9 +36,9 @@ class TestRawBSONDocument(IntegrationTest): # 'name': 'Sherlock', # 'addresses': [{'street': 'Baker Street'}]} bson_string = ( - b'Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t' - b'\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e' - b'\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00' + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" ) document = RawBSONDocument(bson_string) @@ -52,10 +53,10 @@ def tearDown(self): self.client.pymongo_test.test_raw.drop() def test_decode(self): - self.assertEqual('Sherlock', self.document['name']) - first_address = self.document['addresses'][0] + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] self.assertIsInstance(first_address, RawBSONDocument) - self.assertEqual('Baker Street', first_address['street']) + self.assertEqual("Baker Street", first_address["street"]) def test_raw(self): self.assertEqual(self.bson_string, self.document.raw) @@ -63,44 +64,45 @@ def test_raw(self): def test_empty_doc(self): doc = RawBSONDocument(encode({})) with self.assertRaises(KeyError): - doc['does-not-exist'] + doc["does-not-exist"] def test_invalid_bson_sequence(self): - bson_byte_sequence = encode({'a': 1})+encode({}) - with self.assertRaisesRegex(InvalidBSON, 'invalid object length'): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): RawBSONDocument(bson_byte_sequence) def test_invalid_bson_eoo(self): - invalid_bson_eoo = encode({'a': 1})[:-1] + b'\x01' - with self.assertRaisesRegex(InvalidBSON, 'bad eoo'): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): RawBSONDocument(invalid_bson_eoo) @client_context.require_connection def test_round_trip(self): db = self.client.get_database( - 'pymongo_test', - codec_options=CodecOptions(document_class=RawBSONDocument)) + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) db.test_raw.insert_one(self.document) - result = db.test_raw.find_one(self.document['_id']) + result = db.test_raw.find_one(self.document["_id"]) assert result is not None self.assertIsInstance(result, RawBSONDocument) self.assertEqual(dict(self.document.items()), dict(result.items())) @client_context.require_connection def test_round_trip_raw_uuid(self): - coll = self.client.get_database('pymongo_test').test_raw + coll = self.client.get_database("pymongo_test").test_raw uid = uuid.uuid4() - doc = {'_id': 1, - 'bin4': Binary(uid.bytes, 4), - 'bin3': Binary(uid.bytes, 3)} + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} raw = RawBSONDocument(encode(doc)) coll.insert_one(raw) self.assertEqual(coll.find_one(), doc) uuid_coll = coll.with_options( codec_options=coll.codec_options.with_options( - uuid_representation=UuidRepresentation.STANDARD)) - self.assertEqual(uuid_coll.find_one(), - {'_id': 1, 'bin4': uid, 'bin3': Binary(uid.bytes, 3)}) + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) # Test that the raw bytes haven't changed. raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) @@ -111,44 +113,46 @@ def test_with_codec_options(self): # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} # encoded with JAVA_LEGACY uuid representation. bson_string = ( - b'-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02' - b'\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM' - b'\x01\x00\x00\x00' + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" ) document = RawBSONDocument( bson_string, - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument)) + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) - self.assertEqual(uuid.UUID('026fab8f-975f-4965-9fbf-85ad874c60ff'), - document['_id']) + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) @client_context.require_connection def test_round_trip_codec_options(self): doc = { - 'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), - '_id': uuid.UUID('026fab8f-975f-4965-9fbf-85ad874c60ff') + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), } db = self.client.pymongo_test coll = db.get_collection( - 'test_raw', - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY)) + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) coll.insert_one(doc) - raw_java_legacy = CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument) - coll = db.get_collection('test_raw', codec_options=raw_java_legacy) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) self.assertEqual( - RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), - coll.find_one()) + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), coll.find_one() + ) @client_context.require_connection def test_raw_bson_document_embedded(self): - doc = {'embedded': self.document} + doc = {"embedded": self.document} db = self.client.pymongo_test db.test_raw.insert_one(doc) result = db.test_raw.find_one() assert result is not None - self.assertEqual(decode(self.document.raw), result['embedded']) + self.assertEqual(decode(self.document.raw), result["embedded"]) # Make sure that CodecOptions are preserved. # {'embedded': [ @@ -157,40 +161,46 @@ def test_raw_bson_document_embedded(self): # ]} # encoded with JAVA_LEGACY uuid representation. bson_string = ( - b'D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00' - b'\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00' - b'\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00' - b'\x00' + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" ) rbd = RawBSONDocument( bson_string, - codec_options=CodecOptions(uuid_representation=JAVA_LEGACY, - document_class=RawBSONDocument)) + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) db.test_raw.drop() db.test_raw.insert_one(rbd) - result = db.get_collection('test_raw', codec_options=CodecOptions( - uuid_representation=JAVA_LEGACY)).find_one() + result = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() assert result is not None - self.assertEqual(rbd['embedded'][0]['_id'], - result['embedded'][0]['_id']) + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) @client_context.require_connection def test_write_response_raw_bson(self): coll = self.client.get_database( - 'pymongo_test', - codec_options=CodecOptions(document_class=RawBSONDocument)).test_raw + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw # No Exceptions raised while handling write response. coll.insert_one(self.document) coll.delete_one(self.document) coll.insert_many([self.document]) coll.delete_many(self.document) - coll.update_one(self.document, {'$set': {'a': 'b'}}, upsert=True) - coll.update_many(self.document, {'$set': {'b': 'c'}}) + coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + coll.update_many(self.document, {"$set": {"b": "c"}}) def test_preserve_key_ordering(self): - keyvaluepairs = [('a', 1), ('b', 2), ('c', 3),] + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) for rkey, elt in zip(rawdoc, keyvaluepairs): diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 1d21db8900..d5df682fba 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -14,13 +14,13 @@ """Test the read_concern module.""" +from test import IntegrationTest, client_context +from test.utils import OvertCommandListener, rs_or_single_client, single_client + from bson.son import SON from pymongo.errors import OperationFailure from pymongo.read_concern import ReadConcern -from test import client_context, IntegrationTest -from test.utils import single_client, rs_or_single_client, OvertCommandListener - class TestReadConcern(IntegrationTest): listener: OvertCommandListener @@ -32,12 +32,12 @@ def setUpClass(cls): cls.listener = OvertCommandListener() cls.client = single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test - client_context.client.pymongo_test.create_collection('coll') + client_context.client.pymongo_test.create_collection("coll") @classmethod def tearDownClass(cls): cls.client.close() - client_context.client.pymongo_test.drop_collection('coll') + client_context.client.pymongo_test.drop_collection("coll") super(TestReadConcern, cls).tearDownClass() def tearDown(self): @@ -49,25 +49,23 @@ def test_read_concern(self): self.assertIsNone(rc.level) self.assertTrue(rc.ok_for_legacy) - rc = ReadConcern('majority') - self.assertEqual('majority', rc.level) + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) self.assertFalse(rc.ok_for_legacy) - rc = ReadConcern('local') - self.assertEqual('local', rc.level) + rc = ReadConcern("local") + self.assertEqual("local", rc.level) self.assertTrue(rc.ok_for_legacy) self.assertRaises(TypeError, ReadConcern, 42) def test_read_concern_uri(self): - uri = 'mongodb://%s/?readConcernLevel=majority' % ( - client_context.pair,) + uri = "mongodb://%s/?readConcernLevel=majority" % (client_context.pair,) client = rs_or_single_client(uri, connect=False) - self.assertEqual(ReadConcern('majority'), client.read_concern) + self.assertEqual(ReadConcern("majority"), client.read_concern) def test_invalid_read_concern(self): - coll = self.db.get_collection( - 'coll', read_concern=ReadConcern('unknown')) + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) # We rely on the server to validate read concern. with self.assertRaises(OperationFailure): coll.find_one() @@ -75,46 +73,46 @@ def test_invalid_read_concern(self): def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.find({'field': 'value'})) - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + tuple(coll.find({"field": "value"})) + self.assertNotIn("readConcern", self.listener.results["started"][0].command) self.listener.results.clear() # Explicitly set readConcern to 'local'. - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.find({'field': 'value'})) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.find({"field": "value"})) self.assertEqualCommand( - SON([('find', 'coll'), - ('filter', {'field': 'value'}), - ('readConcern', {'level': 'local'})]), - self.listener.results['started'][0].command) + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.results["started"][0].command, + ) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.aggregate([{'$match': {'field': 'value'}}])) - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + tuple(coll.aggregate([{"$match": {"field": "value"}}])) + self.assertNotIn("readConcern", self.listener.results["started"][0].command) self.listener.results.clear() # Explicitly set readConcern to 'local'. - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.aggregate([{'$match': {'field': 'value'}}])) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.aggregate([{"$match": {"field": "value"}}])) self.assertEqual( - {'level': 'local'}, - self.listener.results['started'][0].command['readConcern']) + {"level": "local"}, self.listener.results["started"][0].command["readConcern"] + ) def test_aggregate_out(self): - coll = self.db.get_collection('coll', read_concern=ReadConcern('local')) - tuple(coll.aggregate([{'$match': {'field': 'value'}}, - {'$out': 'output_collection'}])) + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + tuple(coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])) # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): - self.assertIn('readConcern', - self.listener.results['started'][0].command) + self.assertIn("readConcern", self.listener.results["started"][0].command) else: - self.assertNotIn('readConcern', - self.listener.results['started'][0].command) + self.assertNotIn("readConcern", self.listener.results["started"][0].command) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 69f61f94e8..ae2fa8bcee 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -22,52 +22,56 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + OvertCommandListener, + connected, + one, + rs_client, + single_client, + wait_until, +) +from test.version import Version + from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure from pymongo.message import _maybe_add_read_preference from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import (ReadPreference, MovingAverage, - Primary, PrimaryPreferred, - Secondary, SecondaryPreferred, - Nearest) +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) from pymongo.server_description import ServerDescription -from pymongo.server_selectors import readable_server_selector, Selection +from pymongo.server_selectors import Selection, readable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern -from test import (SkipTest, - client_context, - IntegrationTest, - unittest) -from test.utils import (connected, - one, - OvertCommandListener, - rs_client, - single_client, - wait_until) -from test.version import Version - class TestSelections(IntegrationTest): - @client_context.require_connection def test_bool(self): client = single_client() wait_until(lambda: client.address, "discover primary") - selection = Selection.from_topology_description( - client._topology.description) + selection = Selection.from_topology_description(client._topology.description) self.assertTrue(selection) self.assertFalse(selection.with_server_descriptions([])) class TestReadPreferenceObjects(unittest.TestCase): - prefs = [Primary(), - PrimaryPreferred(), - Secondary(), - Nearest(tag_sets=[{'a': 1}, {'b': 2}]), - SecondaryPreferred(max_staleness=30)] + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] def test_pickle(self): for pref in self.prefs: @@ -83,7 +87,6 @@ def test_deepcopy(self): class TestReadPreferencesBase(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): @@ -94,47 +97,41 @@ def setUp(self): # Insert some data so we can use cursors in read_from_which_host self.client.pymongo_test.test.drop() self.client.get_database( - "pymongo_test", - write_concern=WriteConcern(w=client_context.w)).test.insert_many( - [{'_id': i} for i in range(10)]) + "pymongo_test", write_concern=WriteConcern(w=client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) self.addCleanup(self.client.pymongo_test.test.drop) def read_from_which_host(self, client): - """Do a find() on the client and return which host was used - """ + """Do a find() on the client and return which host was used""" cursor = client.pymongo_test.test.find() next(cursor) return cursor.address def read_from_which_kind(self, client): """Do a find() on the client and return 'primary' or 'secondary' - depending on which the client used. + depending on which the client used. """ address = self.read_from_which_host(client) if address == client.primary: - return 'primary' + return "primary" elif address in client.secondaries: - return 'secondary' + return "secondary" else: self.fail( - 'Cursor used address %s, expected either primary ' - '%s or secondaries %s' % ( - address, client.primary, client.secondaries)) + "Cursor used address %s, expected either primary " + "%s or secondaries %s" % (address, client.primary, client.secondaries) + ) def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) - wait_until( - lambda: len(c.nodes - c.arbiters) == client_context.w, - "discovered all nodes") + wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) - self.assertEqual(expected, used, 'Cursor used %s, expected %s' % ( - used, expected)) + self.assertEqual(expected, used, "Cursor used %s, expected %s" % (used, expected)) class TestSingleSecondaryOk(TestReadPreferencesBase): - def test_reads_from_secondary(self): host, port = next(iter(self.client.secondaries)) @@ -167,62 +164,53 @@ def test_reads_from_secondary(self): class TestReadPreferences(TestReadPreferencesBase): - def test_mode_validation(self): - for mode in (ReadPreference.PRIMARY, - ReadPreference.PRIMARY_PREFERRED, - ReadPreference.SECONDARY, - ReadPreference.SECONDARY_PREFERRED, - ReadPreference.NEAREST): - self.assertEqual( - mode, - rs_client(read_preference=mode).read_preference) - - self.assertRaises( - TypeError, - rs_client, read_preference='foo') + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual(mode, rs_client(read_preference=mode).read_preference) + + self.assertRaises(TypeError, rs_client, read_preference="foo") def test_tag_sets_validation(self): S = Secondary(tag_sets=[{}]) - self.assertEqual( - [{}], - rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{}], rs_client(read_preference=S).read_preference.tag_sets) - S = Secondary(tag_sets=[{'k': 'v'}]) - self.assertEqual( - [{'k': 'v'}], - rs_client(read_preference=S).read_preference.tag_sets) + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual([{"k": "v"}], rs_client(read_preference=S).read_preference.tag_sets) - S = Secondary(tag_sets=[{'k': 'v'}, {}]) - self.assertEqual( - [{'k': 'v'}, {}], - rs_client(read_preference=S).read_preference.tag_sets) + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual([{"k": "v"}, {}], rs_client(read_preference=S).read_preference.tag_sets) self.assertRaises(ValueError, Secondary, tag_sets=[]) # One dict not ok, must be a list of dicts - self.assertRaises(TypeError, Secondary, tag_sets={'k': 'v'}) + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) - self.assertRaises(TypeError, Secondary, tag_sets='foo') + self.assertRaises(TypeError, Secondary, tag_sets="foo") - self.assertRaises(TypeError, Secondary, tag_sets=['foo']) + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) def test_threshold_validation(self): - self.assertEqual(17, rs_client( - localThresholdMS=17, connect=False).options.local_threshold_ms) + self.assertEqual( + 17, rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms + ) - self.assertEqual(42, rs_client( - localThresholdMS=42, connect=False).options.local_threshold_ms) + self.assertEqual( + 42, rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms + ) - self.assertEqual(666, rs_client( - localThresholdMS=666, connect=False).options.local_threshold_ms) + self.assertEqual( + 666, rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms + ) - self.assertEqual(0, rs_client( - localThresholdMS=0, connect=False).options.local_threshold_ms) + self.assertEqual(0, rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms) - self.assertRaises(ValueError, - rs_client, - localthresholdms=-1) + self.assertRaises(ValueError, rs_client, localthresholdms=-1) def test_zero_latency(self): ping_times: set = set() @@ -232,11 +220,8 @@ def test_zero_latency(self): for ping_time, host in zip(ping_times, self.client.nodes): ServerDescription._host_to_round_trip_time[host] = ping_time try: - client = connected( - rs_client(readPreference='nearest', localThresholdMS=0)) - wait_until( - lambda: client.nodes == self.client.nodes, - "discovered all nodes") + client = connected(rs_client(readPreference="nearest", localThresholdMS=0)) + wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes") host = self.read_from_which_host(client) for _ in range(5): self.assertEqual(host, self.read_from_which_host(client)) @@ -244,33 +229,25 @@ def test_zero_latency(self): ServerDescription._host_to_round_trip_time.clear() def test_primary(self): - self.assertReadsFrom( - 'primary', read_preference=ReadPreference.PRIMARY) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises( - ConfigurationError, - rs_client, tag_sets=[{'dc': 'ny'}]) + self.assertRaises(ConfigurationError, rs_client, tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): - self.assertReadsFrom( - 'primary', read_preference=ReadPreference.PRIMARY_PREFERRED) + self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) def test_secondary(self): - self.assertReadsFrom( - 'secondary', read_preference=ReadPreference.SECONDARY) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) def test_secondary_preferred(self): - self.assertReadsFrom( - 'secondary', read_preference=ReadPreference.SECONDARY_PREFERRED) + self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) def test_nearest(self): # With high localThresholdMS, expect to read from any # member - c = rs_client( - read_preference=ReadPreference.NEAREST, - localThresholdMS=10000) # 10 seconds + c = rs_client(read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds data_members = {self.client.primary} | self.client.secondaries @@ -286,16 +263,16 @@ def test_nearest(self): i += 1 not_used = data_members.difference(used) - latencies = ', '.join( - '%s: %dms' % (server.description.address, - server.description.round_trip_time) - for server in c._get_topology().select_servers( - readable_server_selector)) + latencies = ", ".join( + "%s: %dms" % (server.description.address, server.description.round_trip_time) + for server in c._get_topology().select_servers(readable_server_selector) + ) self.assertFalse( not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies)) + " but didn't use %s\nlatencies: %s" % (not_used, latencies), + ) class ReadPrefTester(MongoClient): @@ -307,16 +284,14 @@ def __init__(self, *args, **kwargs): @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): - context = super(ReadPrefTester, self)._socket_for_reads( - read_preference, session) + context = super(ReadPrefTester, self)._socket_for_reads(read_preference, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @contextlib.contextmanager def _socket_from_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._socket_from_server( - read_preference, server, session) + context = super(ReadPrefTester, self)._socket_from_server(read_preference, server, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @@ -325,12 +300,13 @@ def record_a_read(self, address): server = self._get_topology().select_server_by_address(address, 0) self.has_read_from.add(server) + _PREF_MAP = [ (Primary, SERVER_TYPE.RSPrimary), (PrimaryPreferred, SERVER_TYPE.RSPrimary), (Secondary, SERVER_TYPE.RSSecondary), (SecondaryPreferred, SERVER_TYPE.RSSecondary), - (Nearest, 'any') + (Nearest, "any"), ] @@ -345,16 +321,18 @@ def setUpClass(cls): cls.c = ReadPrefTester( client_context.pair, # Ignore round trip times, to test ReadPreference modes only. - localThresholdMS=1000*1000) + localThresholdMS=1000 * 1000, + ) cls.client_version = Version.from_client(cls.c) # mapReduce fails if the collection does not exist. coll = cls.c.pymongo_test.get_collection( - 'test', write_concern=WriteConcern(w=client_context.w)) + "test", write_concern=WriteConcern(w=client_context.w) + ) coll.insert_one({}) @classmethod def tearDownClass(cls): - cls.c.drop_database('pymongo_test') + cls.c.drop_database("pymongo_test") cls.c.close() def executed_on_which_server(self, client, fn, *args, **kwargs): @@ -366,12 +344,13 @@ def executed_on_which_server(self, client, fn, *args, **kwargs): def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): server = self.executed_on_which_server(client, fn, *args, **kwargs) - self.assertEqual(SERVER_TYPE._fields[server_type], - SERVER_TYPE._fields[server.description.server_type]) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) def _test_fn(self, server_type, fn): for _ in range(10): - if server_type == 'any': + if server_type == "any": used = set() for _ in range(1000): server = self.executed_on_which_server(self.c, fn) @@ -381,13 +360,9 @@ def _test_fn(self, server_type, fn): break assert self.c.primary is not None - unused = self.c.secondaries.union( - set([self.c.primary]) - ).difference(used) + unused = self.c.secondaries.union(set([self.c.primary])).difference(used) if unused: - self.fail( - "Some members not used for NEAREST: %s" % ( - unused)) + self.fail("Some members not used for NEAREST: %s" % (unused)) else: self.assertExecutedOn(server_type, self.c, fn) @@ -408,8 +383,7 @@ def test_command(self): # Test that the generic command helper obeys the read preference # passed to it. for mode, server_type in _PREF_MAP: - func = lambda: self.c.pymongo_test.command('dbStats', - read_preference=mode()) + func = lambda: self.c.pymongo_test.command("dbStats", read_preference=mode()) self._test_fn(server_type, func) def test_create_collection(self): @@ -417,30 +391,33 @@ def test_create_collection(self): # the collection already exists. self._test_primary_helper( lambda: self.c.pymongo_test.create_collection( - 'some_collection%s' % random.randint(0, sys.maxsize))) + "some_collection%s" % random.randint(0, sys.maxsize) + ) + ) def test_count_documents(self): - self._test_coll_helper( - True, self.c.pymongo_test.test, 'count_documents', {}) + self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) def test_estimated_document_count(self): - self._test_coll_helper( - True, self.c.pymongo_test.test, 'estimated_document_count') + self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") def test_distinct(self): - self._test_coll_helper(True, self.c.pymongo_test.test, 'distinct', 'a') + self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") def test_aggregate(self): - self._test_coll_helper(True, self.c.pymongo_test.test, - 'aggregate', - [{'$project': {'_id': 1}}]) + self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) def test_aggregate_write(self): # 5.0 servers support $out on secondaries. secondary_ok = client_context.version.at_least(5, 0) - self._test_coll_helper(secondary_ok, self.c.pymongo_test.test, - 'aggregate', - [{'$project': {'_id': 1}}, {'$out': "agg_write_test"}]) + self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) class TestMovingAverage(unittest.TestCase): @@ -456,77 +433,48 @@ def test_moving_average(self): class TestMongosAndReadPreference(IntegrationTest): - def test_read_preference_document(self): pref = Primary() - self.assertEqual( - pref.document, - {'mode': 'primary'}) + self.assertEqual(pref.document, {"mode": "primary"}) pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( pref.document, - {'mode': 'primaryPreferred'}) - pref = PrimaryPreferred(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'primaryPreferred', 'tags': [{'dc': 'sf'}]}) - pref = PrimaryPreferred( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'primaryPreferred', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( - pref.document, - {'mode': 'secondary'}) - pref = Secondary(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'secondary', 'tags': [{'dc': 'sf'}]}) - pref = Secondary( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'secondary', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( pref.document, - {'mode': 'secondaryPreferred'}) - pref = SecondaryPreferred(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'secondaryPreferred', 'tags': [{'dc': 'sf'}]}) - pref = SecondaryPreferred( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'secondaryPreferred', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) self.assertEqual( - pref.document, - {'mode': 'nearest'}) - pref = Nearest(tag_sets=[{'dc': 'sf'}]) - self.assertEqual( - pref.document, - {'mode': 'nearest', 'tags': [{'dc': 'sf'}]}) - pref = Nearest( - tag_sets=[{'dc': 'sf'}], max_staleness=30) - self.assertEqual( - pref.document, - {'mode': 'nearest', - 'tags': [{'dc': 'sf'}], - 'maxStalenessSeconds': 30}) + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) with self.assertRaises(TypeError): # Float is prohibited. @@ -540,72 +488,67 @@ def test_read_preference_document(self): def test_read_preference_document_hedge(self): cases = { - 'primaryPreferred': PrimaryPreferred, - 'secondary': Secondary, - 'secondaryPreferred': SecondaryPreferred, - 'nearest': Nearest, + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, } for mode, cls in cases.items(): with self.assertRaises(TypeError): cls(hedge=[]) # type: ignore pref = cls(hedge={}) - self.assertEqual(pref.document, {'mode': mode}) + self.assertEqual(pref.document, {"mode": mode}) out = _maybe_add_read_preference({}, pref) if cls == SecondaryPreferred: # SecondaryPreferred without hedge doesn't add $readPreference. self.assertEqual(out, {}) else: - self.assertEqual( - out, - SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {'enabled': True} + hedge = {"enabled": True} pref = cls(hedge=hedge) - self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {'enabled': False} + hedge = {"enabled": False} pref = cls(hedge=hedge) - self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {'enabled': False, 'extra': 'option'} + hedge = {"enabled": False, "extra": "option"} pref = cls(hedge=hedge) - self.assertEqual(pref.document, {'mode': mode, 'hedge': hedge}) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) def test_send_hedge(self): cases = { - 'primaryPreferred': PrimaryPreferred, - 'secondaryPreferred': SecondaryPreferred, - 'nearest': Nearest, + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, } if client_context.supports_secondary_read_pref: - cases['secondary'] = Secondary + cases["secondary"] = Secondary listener = OvertCommandListener() client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) - client.admin.command('ping') + client.admin.command("ping") for mode, cls in cases.items(): - pref = cls(hedge={'enabled': True}) - coll = client.test.get_collection('test', read_preference=pref) + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) listener.reset() coll.find_one() - started = listener.results['started'] + started = listener.results["started"] self.assertEqual(len(started), 1, started) cmd = started[0].command if client_context.is_rs or client_context.is_mongos: - self.assertIn('$readPreference', cmd) - self.assertEqual(cmd['$readPreference'], pref.document) + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) else: - self.assertNotIn('$readPreference', cmd) + self.assertNotIn("$readPreference", cmd) def test_maybe_add_read_preference(self): @@ -615,72 +558,74 @@ def test_maybe_add_read_preference(self): pref = PrimaryPreferred() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = PrimaryPreferred(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = Secondary() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = Secondary(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) # SecondaryPreferred without tag_sets or max_staleness doesn't add # $readPreference pref = SecondaryPreferred() out = _maybe_add_read_preference({}, pref) self.assertEqual(out, {}) - pref = SecondaryPreferred(tag_sets=[{'dc': 'nyc'}]) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = SecondaryPreferred(max_staleness=120) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) pref = Nearest() out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) - pref = Nearest(tag_sets=[{'dc': 'nyc'}]) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference({}, pref) - self.assertEqual( - out, SON([("$query", {}), ("$readPreference", pref.document)])) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) pref = Nearest() out = _maybe_add_read_preference(criteria, pref) self.assertEqual( out, - SON([("$query", {}), - ("$orderby", SON([("_id", 1)])), - ("$readPreference", pref.document)])) - pref = Nearest(tag_sets=[{'dc': 'nyc'}]) + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) out = _maybe_add_read_preference(criteria, pref) self.assertEqual( out, - SON([("$query", {}), - ("$orderby", SON([("_id", 1)])), - ("$readPreference", pref.document)])) + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) @client_context.require_mongos def test_mongos(self): res = client_context.client.config.shards.find_one() assert res is not None - shard = res['host'] - num_members = shard.count(',') + 1 + shard = res["host"] + num_members = shard.count(",") + 1 if num_members == 1: raise SkipTest("Need a replica set shard to test.") coll = client_context.client.pymongo_test.get_collection( - "test", - write_concern=WriteConcern(w=num_members)) + "test", write_concern=WriteConcern(w=num_members) + ) coll.drop() res = coll.insert_many([{} for _ in range(5)]) first_id = res.inserted_ids[0] @@ -688,11 +633,7 @@ def test_mongos(self): # Note - this isn't a perfect test since there's no way to # tell what shard member a query ran on. - for pref in (Primary(), - PrimaryPreferred(), - Secondary(), - SecondaryPreferred(), - Nearest()): + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): qcoll = coll.with_options(read_preference=pref) results = list(qcoll.find().sort([("_id", 1)])) self.assertEqual(first_id, results[0]["_id"]) @@ -705,12 +646,14 @@ def test_mongos(self): def test_mongos_max_staleness(self): # Sanity check that we're sending maxStalenessSeconds coll = client_context.client.pymongo_test.get_collection( - "test", read_preference=SecondaryPreferred(max_staleness=120)) + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) # No error coll.find_one() coll = client_context.client.pymongo_test.get_collection( - "test", read_preference=SecondaryPreferred(max_staleness=10)) + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) try: coll.find_one() except OperationFailure as exc: @@ -719,14 +662,14 @@ def test_mongos_max_staleness(self): self.fail("mongos accepted invalid staleness") coll = single_client( - readPreference='secondaryPreferred', - maxStalenessSeconds=120).pymongo_test.test + readPreference="secondaryPreferred", maxStalenessSeconds=120 + ).pymongo_test.test # No error coll.find_one() coll = single_client( - readPreference='secondaryPreferred', - maxStalenessSeconds=10).pymongo_test.test + readPreference="secondaryPreferred", maxStalenessSeconds=10 + ).pymongo_test.test try: coll.find_one() except OperationFailure as exc: @@ -734,5 +677,6 @@ def test_mongos_max_staleness(self): else: self.fail("mongos accepted invalid staleness") + if __name__ == "__main__": unittest.main() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 13bc83a023..4dfc8f068c 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -21,33 +21,33 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + EventListener, + TestCreator, + disable_replication, + enable_replication, + rs_or_single_client, +) +from test.utils_spec_runner import SpecRunner + from pymongo import DESCENDING -from pymongo.errors import (BulkWriteError, - ConfigurationError, - WTimeoutError, - WriteConcernError, - WriteError) +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern -from test import (client_context, - IntegrationTest, - unittest) -from test.utils import (EventListener, - disable_replication, - enable_replication, - rs_or_single_client, - TestCreator) -from test.utils_spec_runner import SpecRunner - -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'read_write_concern') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "read_write_concern") class TestReadWriteConcernSpec(IntegrationTest): - def test_omit_default_read_write_concern(self): listener = EventListener() # Client with default readConcern and writeConcern @@ -63,85 +63,87 @@ def test_omit_default_read_write_concern(self): def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) - collection.rename('collection2') + collection.rename("collection2") client.pymongo_test.collection2.drop() def insert_command_default_write_concern(): collection.database.command( - 'insert', 'collection', documents=[{}], - write_concern=WriteConcern()) + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) ops = [ - ('aggregate', lambda: list(collection.aggregate([]))), - ('find', lambda: list(collection.find())), - ('insert_one', lambda: collection.insert_one({})), - ('update_one', - lambda: collection.update_one({}, {'$set': {'x': 1}})), - ('update_many', - lambda: collection.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: collection.delete_one({})), - ('delete_many', lambda: collection.delete_many({})), - ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('rename_and_drop', rename_and_drop), - ('command', insert_command_default_write_concern) + ("aggregate", lambda: list(collection.aggregate([]))), + ("find", lambda: list(collection.find())), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), ] for name, f in ops: listener.results.clear() f() - self.assertGreaterEqual(len(listener.results['started']), 1) - for i, event in enumerate(listener.results['started']): + self.assertGreaterEqual(len(listener.results["started"]), 1) + for i, event in enumerate(listener.results["started"]): self.assertNotIn( - 'readConcern', event.command, - "%s sent default readConcern with %s" % ( - name, event.command_name)) + "readConcern", + event.command, + "%s sent default readConcern with %s" % (name, event.command_name), + ) self.assertNotIn( - 'writeConcern', event.command, - "%s sent default writeConcern with %s" % ( - name, event.command_name)) + "writeConcern", + event.command, + "%s sent default writeConcern with %s" % (name, event.command_name), + ) def assertWriteOpsRaise(self, write_concern, expected_exception): wc = write_concern.document # Set socket timeout to avoid indefinite stalls - client = rs_or_single_client( - w=wc['w'], wTimeoutMS=wc['wtimeout'], socketTimeoutMS=30000) - db = client.get_database('pymongo_test') + client = rs_or_single_client(w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000) + db = client.get_database("pymongo_test") coll = db.test def insert_command(): coll.database.command( - 'insert', 'new_collection', documents=[{}], + "insert", + "new_collection", + documents=[{}], writeConcern=write_concern.document, - parse_write_concern_error=True) + parse_write_concern_error=True, + ) ops = [ - ('insert_one', lambda: coll.insert_one({})), - ('insert_many', lambda: coll.insert_many([{}, {}])), - ('update_one', lambda: coll.update_one({}, {'$set': {'x': 1}})), - ('update_many', lambda: coll.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: coll.delete_one({})), - ('delete_many', lambda: coll.delete_many({})), - ('bulk_write', lambda: coll.bulk_write([InsertOne({})])), - ('command', insert_command), - ('aggregate', lambda: coll.aggregate([{'$out': 'out'}])), + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), # SERVER-46668 Delete all the documents in the collection to # workaround a hang in createIndexes. - ('delete_many', lambda: coll.delete_many({})), - ('create_index', lambda: coll.create_index([('a', DESCENDING)])), - ('create_indexes', lambda: coll.create_indexes([IndexModel('b')])), - ('drop_index', lambda: coll.drop_index([('a', DESCENDING)])), - ('create', lambda: db.create_collection('new')), - ('rename', lambda: coll.rename('new')), - ('drop', lambda: db.new.drop()), + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), ] # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. if client_context.version[:2] != (3, 6): - ops.append(('drop_database', lambda: client.drop_database(db))) + ops.append(("drop_database", lambda: client.drop_database(db))) for name, f in ops: # Ensure insert_many and bulk_write still raise BulkWriteError. - if name in ('insert_many', 'bulk_write'): + if name in ("insert_many", "bulk_write"): expected = BulkWriteError else: expected = expected_exception @@ -150,25 +152,25 @@ def insert_command(): if expected == BulkWriteError: bulk_result = cm.exception.details assert bulk_result is not None - wc_errors = bulk_result['writeConcernErrors'] + wc_errors = bulk_result["writeConcernErrors"] self.assertTrue(wc_errors) @client_context.require_replica_set def test_raise_write_concern_error(self): - self.addCleanup(client_context.client.drop_database, 'pymongo_test') + self.addCleanup(client_context.client.drop_database, "pymongo_test") assert client_context.w is not None self.assertWriteOpsRaise( - WriteConcern(w=client_context.w+1, wtimeout=1), WriteConcernError) + WriteConcern(w=client_context.w + 1, wtimeout=1), WriteConcernError + ) @client_context.require_secondaries_count(1) @client_context.require_test_commands def test_raise_wtimeout(self): - self.addCleanup(client_context.client.drop_database, 'pymongo_test') + self.addCleanup(client_context.client.drop_database, "pymongo_test") self.addCleanup(enable_replication, client_context.client) # Disable replication to guarantee a wtimeout error. disable_replication(client_context.client) - self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), - WTimeoutError) + self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) @client_context.require_failCommand_fail_point def test_error_includes_errInfo(self): @@ -176,21 +178,12 @@ def test_error_includes_errInfo(self): "code": 100, "codeName": "UnsatisfiableWriteConcern", "errmsg": "Not enough data-bearing nodes", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, } cause_wce = { "configureFailPoint": "failCommand", "mode": {"times": 2}, - "data": { - "failCommands": ["insert"], - "writeConcernError": expected_wce - }, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, } with self.fail_point(cause_wce): # Write concern error on insert includes errInfo. @@ -202,10 +195,15 @@ def test_error_includes_errInfo(self): with self.assertRaises(BulkWriteError) as ctx: self.db.test.bulk_write([InsertOne({})]) expected_details = { - 'writeErrors': [], - 'writeConcernErrors': [expected_wce], - 'nInserted': 1, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, - 'nRemoved': 0, 'upserted': []} + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } self.assertEqual(ctx.exception.details, expected_details) @client_context.require_version_min(4, 9) @@ -218,15 +216,14 @@ def test_write_error_details_exposes_errinfo(self): validator = {"x": {"$type": "string"}} db.create_collection("test", validator=validator) with self.assertRaises(WriteError) as ctx: - db.test.insert_one({'x': 1}) + db.test.insert_one({"x": 1}) self.assertEqual(ctx.exception.code, 121) self.assertIsNotNone(ctx.exception.details) assert ctx.exception.details is not None - self.assertIsNotNone(ctx.exception.details.get('errInfo')) - for event in listener.results['succeeded']: - if event.command_name == 'insert': - self.assertEqual( - event.reply['writeErrors'][0], ctx.exception.details) + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.results["succeeded"]: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) break else: self.fail("Couldn't find insert event.") @@ -235,77 +232,58 @@ def test_write_error_details_exposes_errinfo(self): def normalize_write_concern(concern): result = {} for key in concern: - if key.lower() == 'wtimeoutms': - result['wtimeout'] = concern[key] - elif key == 'journal': - result['j'] = concern[key] + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] else: result[key] = concern[key] return result def create_connection_string_test(test_case): - def run_test(self): - uri = test_case['uri'] - valid = test_case['valid'] - warning = test_case['warning'] + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] if not valid: if warning is False: - self.assertRaises( - (ConfigurationError, ValueError), - MongoClient, - uri, - connect=False) + self.assertRaises((ConfigurationError, ValueError), MongoClient, uri, connect=False) else: with warnings.catch_warnings(): - warnings.simplefilter('error', UserWarning) - self.assertRaises( - UserWarning, - MongoClient, - uri, - connect=False) + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, MongoClient, uri, connect=False) else: client = MongoClient(uri, connect=False) - if 'writeConcern' in test_case: + if "writeConcern" in test_case: document = client.write_concern.document - self.assertEqual( - document, - normalize_write_concern(test_case['writeConcern'])) - if 'readConcern' in test_case: + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: document = client.read_concern.document - self.assertEqual(document, test_case['readConcern']) + self.assertEqual(document, test_case["readConcern"]) return run_test def create_document_test(test_case): - def run_test(self): - valid = test_case['valid'] + valid = test_case["valid"] - if 'writeConcern' in test_case: - normalized = normalize_write_concern(test_case['writeConcern']) + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) if not valid: - self.assertRaises( - (ConfigurationError, ValueError), - WriteConcern, - **normalized) + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) else: write_concern = WriteConcern(**normalized) - self.assertEqual( - write_concern.document, test_case['writeConcernDocument']) - self.assertEqual( - write_concern.acknowledged, test_case['isAcknowledged']) - self.assertEqual( - write_concern.is_server_default, test_case['isServerDefault']) - if 'readConcern' in test_case: + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: # Any string for 'level' is equaly valid - read_concern = ReadConcern(**test_case['readConcern']) - self.assertEqual(read_concern.document, test_case['readConcernDocument']) - self.assertEqual( - not bool(read_concern.level), test_case['isServerDefault']) + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) return run_test @@ -314,25 +292,26 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): dirname = os.path.split(dirpath)[-1] - if dirname == 'operation': + if dirname == "operation": # This directory is tested by TestOperations. continue - elif dirname == 'connection-string': + elif dirname == "connection-string": create_test = create_connection_string_test else: create_test = create_document_test for filename in filenames: with open(os.path.join(dirpath, filename)) as test_stream: - test_cases = json.load(test_stream)['tests'] + test_cases = json.load(test_stream)["tests"] fname = os.path.splitext(filename)[0] for test_case in test_cases: new_test = create_test(test_case) - test_name = 'test_%s_%s_%s' % ( - dirname.replace('-', '_'), - fname.replace('-', '_'), - str(test_case['description'].lower().replace(' ', '_'))) + test_name = "test_%s_%s_%s" % ( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) new_test.__name__ = test_name setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) @@ -343,11 +322,11 @@ def create_tests(): class TestOperation(SpecRunner): # Location of JSON test specifications. - TEST_PATH = os.path.join(_TEST_PATH, 'operation') + TEST_PATH = os.path.join(_TEST_PATH, "operation") def get_outcome_coll_name(self, outcome, collection): """Spec says outcome has an optional 'collection.name'.""" - return outcome['collection'].get('name', collection.name) + return outcome["collection"].get("name", collection.name) def create_operation_test(scenario_def, test, name): @@ -358,10 +337,9 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator( - create_operation_test, TestOperation, TestOperation.TEST_PATH) +test_creator = TestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) test_creator.create_tests() -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index f19a32ea4e..898be99d4d 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -18,12 +18,13 @@ sys.path[0:0] = [""] -from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError -from pymongo import ReadPreference -from test import unittest, client_context, client_knobs, MockClientTest +from test import MockClientTest, client_context, client_knobs, unittest from test.pymongo_mocks import MockClient from test.utils import wait_until +from pymongo import ReadPreference +from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError + @client_context.require_connection @client_context.require_no_load_balancer @@ -38,54 +39,53 @@ class TestSecondaryBecomesStandalone(MockClientTest): def test_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs', + host="a:1,b:2,c:3", + replicaSet="rs", serverSelectionTimeoutMS=100, - connect=False) + connect=False, + ) self.addCleanup(c.close) # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") # Fail over. - c.kill_host('a:1') - c.kill_host('b:2') + c.kill_host("a:1") + c.kill_host("b:2") with self.assertRaises(ServerSelectionTimeoutError): - c.db.command('ping') + c.db.command("ping") self.assertEqual(c.address, None) # Client can still discover the primary node - c.revive_host('a:1') - wait_until(lambda: c.address is not None, 'connect to primary') - self.assertEqual(c.address, ('a', 1)) + c.revive_host("a:1") + wait_until(lambda: c.address is not None, "connect to primary") + self.assertEqual(c.address, ("a", 1)) def test_replica_set_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) self.addCleanup(c.close) - wait_until(lambda: ('b', 2) in c.secondaries, - 'discover host "b"') + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') - wait_until(lambda: ('c', 3) in c.secondaries, - 'discover host "c"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is brought up as a standalone. - c.mock_members.remove('c:3') - c.mock_standalones.append('c:3') + c.mock_members.remove("c:3") + c.mock_standalones.append("c:3") - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'update the list of secondaries') + wait_until(lambda: set([("b", 2)]) == c.secondaries, "update the list of secondaries") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) class TestSecondaryRemoved(MockClientTest): @@ -94,21 +94,21 @@ class TestSecondaryRemoved(MockClientTest): def test_replica_set_client(self): c = MockClient( standalones=[], - members=['a:1', 'b:2', 'c:3'], + members=["a:1", "b:2", "c:3"], mongoses=[], - host='a:1,b:2,c:3', - replicaSet='rs') + host="a:1,b:2,c:3", + replicaSet="rs", + ) self.addCleanup(c.close) - wait_until(lambda: ('b', 2) in c.secondaries, 'discover host "b"') - wait_until(lambda: ('c', 3) in c.secondaries, 'discover host "c"') + wait_until(lambda: ("b", 2) in c.secondaries, 'discover host "b"') + wait_until(lambda: ("c", 3) in c.secondaries, 'discover host "c"') # C is removed. - c.mock_hello_hosts.remove('c:3') - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'update list of secondaries') + c.mock_hello_hosts.remove("c:3") + wait_until(lambda: set([("b", 2)]) == c.secondaries, "update list of secondaries") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) class TestSocketError(MockClientTest): @@ -117,21 +117,22 @@ def test_socket_error_marks_member_down(self): with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], - members=['a:1', 'b:2'], + members=["a:1", "b:2"], mongoses=[], - host='a:1', - replicaSet='rs', - serverSelectionTimeoutMS=100) + host="a:1", + replicaSet="rs", + serverSelectionTimeoutMS=100, + ) self.addCleanup(c.close) - wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # b now raises socket.error. - c.mock_down_hosts.append('b:2') + c.mock_down_hosts.append("b:2") self.assertRaises( ConnectionFailure, - c.db.collection.with_options( - read_preference=ReadPreference.SECONDARY).find_one) + c.db.collection.with_options(read_preference=ReadPreference.SECONDARY).find_one, + ) self.assertEqual(1, len(c.nodes)) @@ -139,51 +140,44 @@ def test_socket_error_marks_member_down(self): class TestSecondaryAdded(MockClientTest): def test_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) self.addCleanup(c.close) - wait_until(lambda: len(c.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(c.nodes) == 2, "discover both nodes") # MongoClient connects to primary by default. - self.assertEqual(c.address, ('a', 1)) - self.assertEqual(set([('a', 1), ('b', 2)]), c.nodes) + self.assertEqual(c.address, ("a", 1)) + self.assertEqual(set([("a", 1), ("b", 2)]), c.nodes) # C is added. - c.mock_members.append('c:3') - c.mock_hello_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") - c.db.command('ping') + c.db.command("ping") - self.assertEqual(c.address, ('a', 1)) + self.assertEqual(c.address, ("a", 1)) - wait_until(lambda: set([('a', 1), ('b', 2), ('c', 3)]) == c.nodes, - 'reconnect to both secondaries') + wait_until( + lambda: set([("a", 1), ("b", 2), ("c", 3)]) == c.nodes, "reconnect to both secondaries" + ) def test_replica_set_client(self): c = MockClient( - standalones=[], - members=['a:1', 'b:2'], - mongoses=[], - host='a:1', - replicaSet='rs') + standalones=[], members=["a:1", "b:2"], mongoses=[], host="a:1", replicaSet="rs" + ) self.addCleanup(c.close) - wait_until(lambda: ('a', 1) == c.primary, 'discover the primary') - wait_until(lambda: set([('b', 2)]) == c.secondaries, - 'discover the secondary') + wait_until(lambda: ("a", 1) == c.primary, "discover the primary") + wait_until(lambda: set([("b", 2)]) == c.secondaries, "discover the secondary") # C is added. - c.mock_members.append('c:3') - c.mock_hello_hosts.append('c:3') + c.mock_members.append("c:3") + c.mock_hello_hosts.append("c:3") - wait_until(lambda: set([('b', 2), ('c', 3)]) == c.secondaries, - 'discover the new secondary') + wait_until(lambda: set([("b", 2), ("c", 3)]) == c.secondaries, "discover the new secondary") - self.assertEqual(('a', 1), c.primary) + self.assertEqual(("a", 1), c.primary) if __name__ == "__main__": diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index c4c093f66f..808477a8c0 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -21,28 +21,32 @@ sys.path[0:0] = [""] -from pymongo.mongo_client import MongoClient -from pymongo.monitoring import (ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutFailedReason, - PoolClearedEvent) -from pymongo.write_concern import WriteConcern - -from test import (client_context, - client_knobs, - IntegrationTest, - PyMongoTestCase, - unittest) -from test.utils import (CMAPListener, - OvertCommandListener, - rs_or_single_client, - TestCreator) +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + client_knobs, + unittest, +) +from test.utils import ( + CMAPListener, + OvertCommandListener, + TestCreator, + rs_or_single_client, +) from test.utils_spec_runner import SpecRunner +from pymongo.mongo_client import MongoClient +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.write_concern import WriteConcern # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_reads') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads") class TestClientOptions(PyMongoTestCase): @@ -57,9 +61,9 @@ def test_kwargs(self): self.assertEqual(client.options.retry_reads, False) def test_uri(self): - client = MongoClient('mongodb://h/?retryReads=true', connect=False) + client = MongoClient("mongodb://h/?retryReads=true", connect=False) self.assertEqual(client.options.retry_reads, True) - client = MongoClient('mongodb://h/?retryReads=false', connect=False) + client = MongoClient("mongodb://h/?retryReads=false", connect=False) self.assertEqual(client.options.retry_reads, False) @@ -76,51 +80,49 @@ def setUpClass(cls): def maybe_skip_scenario(self, test): super(TestSpec, self).maybe_skip_scenario(test) - skip_names = [ - 'listCollectionObjects', 'listIndexNames', 'listDatabaseObjects'] + skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] for name in skip_names: - if name.lower() in test['description'].lower(): - self.skipTest('PyMongo does not support %s' % (name,)) + if name.lower() in test["description"].lower(): + self.skipTest("PyMongo does not support %s" % (name,)) # Serverless does not support $out and collation. if client_context.serverless: - for operation in test['operations']: - if operation['name'] == 'aggregate': - for stage in operation['arguments']['pipeline']: + for operation in test["operations"]: + if operation["name"] == "aggregate": + for stage in operation["arguments"]["pipeline"]: if "$out" in stage: - self.skipTest( - "MongoDB Serverless does not support $out") - if "collation" in operation['arguments']: - self.skipTest( - "MongoDB Serverless does not support collations") + self.skipTest("MongoDB Serverless does not support $out") + if "collation" in operation["arguments"]: + self.skipTest("MongoDB Serverless does not support collations") # Skip changeStream related tests on MMAPv1 and serverless. - test_name = self.id().rsplit('.')[-1] - if 'changestream' in test_name.lower(): - if client_context.storage_engine == 'mmapv1': + test_name = self.id().rsplit(".")[-1] + if "changestream" in test_name.lower(): + if client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support change streams.") if client_context.serverless: self.skipTest("Serverless does not support change streams.") def get_scenario_coll_name(self, scenario_def): """Override a test's collection name to support GridFS tests.""" - if 'bucket_name' in scenario_def: - return scenario_def['bucket_name'] + if "bucket_name" in scenario_def: + return scenario_def["bucket_name"] return super(TestSpec, self).get_scenario_coll_name(scenario_def) def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" - if 'bucket_name' in scenario_def: + if "bucket_name" in scenario_def: db_name = self.get_scenario_db_name(scenario_def) db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority')) + db_name, write_concern=WriteConcern(w="majority") + ) # Create a bucket for the retryable reads GridFS tests. client_context.client.drop_database(db_name) - if scenario_def['data']: - data = scenario_def['data'] + if scenario_def["data"]: + data = scenario_def["data"] # Load data. - db['fs.chunks'].insert_many(data['fs.chunks']) - db['fs.files'].insert_many(data['fs.files']) + db["fs.chunks"].insert_many(data["fs.chunks"]) + db["fs.files"].insert_many(data["fs.files"]) else: super(TestSpec, self).setup_scenario(scenario_def) @@ -155,25 +157,23 @@ class TestPoolPausedError(IntegrationTest): RUN_ON_SERVERLESS = False @client_context.require_failCommand_blockConnection - @client_knobs(heartbeat_frequency=.05, min_heartbeat_interval=.05) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client( - maxPoolSize=1, - event_listeners=[cmap_listener, cmd_listener]) + client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() threads = [FindThread(client.pymongo_test.test) for _ in range(2)] fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['find'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91, + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, }, } with self.fail_point(fail_command): @@ -192,29 +192,25 @@ def test_pool_paused_error_is_retryable(self): break # Via CMAP monitoring, assert that the first check out succeeds. - cmap_events = cmap_listener.events_by_type(( - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - PoolClearedEvent)) + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) msg = pprint.pformat(cmap_listener.events) self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) - self.assertIsInstance( - cmap_events[2], ConnectionCheckOutFailedEvent, msg) - self.assertEqual(cmap_events[2].reason, - ConnectionCheckOutFailedReason.CONN_ERROR, - msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results['started'] + started = cmd_listener.results["started"] msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results['succeeded'] + succeeded = cmd_listener.results["succeeded"] self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results['failed'] + failed = cmd_listener.results["failed"] self.assertEqual(1, len(failed), msg) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index c4a401428a..0eb863f4cf 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -22,45 +22,46 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, client_knobs, unittest +from test.utils import ( + CMAPListener, + DeprecationFilter, + OvertCommandListener, + TestCreator, + rs_or_single_client, +) +from test.utils_spec_runner import SpecRunner +from test.version import Version + from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument from bson.son import SON - - -from pymongo.errors import (ConnectionFailure, - OperationFailure, - ServerSelectionTimeoutError, - WriteConcernError) +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) from pymongo.mongo_client import MongoClient -from pymongo.monitoring import (ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutFailedReason, - PoolClearedEvent) -from pymongo.operations import (InsertOne, - DeleteMany, - DeleteOne, - ReplaceOne, - UpdateMany, - UpdateOne) +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest, - SkipTest, - unittest) -from test.utils import (CMAPListener, - DeprecationFilter, - OvertCommandListener, - rs_or_single_client, - TestCreator) -from test.utils_spec_runner import SpecRunner -from test.version import Version - # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'legacy') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") class TestAllScenarios(SpecRunner): @@ -68,23 +69,23 @@ class TestAllScenarios(SpecRunner): RUN_ON_SERVERLESS = True def get_object_name(self, op): - return op.get('object', 'collection') + return op.get("object", "collection") def get_scenario_db_name(self, scenario_def): - return scenario_def.get('database_name', 'pymongo_test') + return scenario_def.get("database_name", "pymongo_test") def get_scenario_coll_name(self, scenario_def): - return scenario_def.get('collection_name', 'test') + return scenario_def.get("collection_name", "test") def run_test_ops(self, sessions, collection, test): # Transform retryable writes spec format into transactions. - operation = test['operation'] - outcome = test['outcome'] - if 'error' in outcome: - operation['error'] = outcome['error'] - if 'result' in outcome: - operation['result'] = outcome['result'] - test['operations'] = [operation] + operation = test["operation"] + outcome = test["outcome"] + if "error" in outcome: + operation["error"] = outcome["error"] + if "result" in outcome: + operation["result"] = outcome["result"] + test["operations"] = [operation] super(TestAllScenarios, self).run_test_ops(sessions, collection, test) @@ -96,6 +97,7 @@ def run_scenario(self): return run_scenario + test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() @@ -103,31 +105,36 @@ def run_scenario(self): def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), - (coll.bulk_write, [[InsertOne({}), - InsertOne({})]], {'ordered': False}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), (coll.bulk_write, [[ReplaceOne({}, {})]], {}), (coll.bulk_write, [[ReplaceOne({}, {}), ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), - UpdateOne({}, {'$set': {'a': 1}})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateOne({}, {"$set": {"a": 1}})]], + {}, + ), (coll.bulk_write, [[DeleteOne({})]], {}), (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {'$set': {'a': 1}}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), (coll.delete_one, [{}], {}), - (coll.find_one_and_replace, [{}, {'a': 3}], {}), - (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), + (coll.find_one_and_replace, [{}, {"a": 3}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), (coll.find_one_and_delete, [{}, {}], {}), ] def non_retryable_single_statement_ops(coll): return [ - (coll.bulk_write, [[UpdateOne({}, {'$set': {'a': 1}}), - UpdateMany({}, {'$set': {'a': 1}})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), - (coll.update_many, [{}, {'$set': {'a': 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), (coll.delete_many, [{}], {}), ] @@ -155,8 +162,7 @@ class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): def setUpClass(cls): super(TestRetryableWritesMMAPv1, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.client = rs_or_single_client(retryWrites=True) cls.db = cls.client.pymongo_test @@ -169,14 +175,15 @@ def tearDownClass(cls): @client_context.require_no_standalone def test_actionable_error_message(self): - if client_context.storage_engine != 'mmapv1': - raise SkipTest('This cluster is not running MMAPv1') - - expected_msg = ("This MongoDB deployment does not support retryable " - "writes. Please add retryWrites=false to your " - "connection string.") - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): + if client_context.storage_engine != "mmapv1": + raise SkipTest("This cluster is not running MMAPv1") + + expected_msg = ( + "This MongoDB deployment does not support retryable " + "writes. Please add retryWrites=false to your " + "connection string." + ) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): with self.assertRaisesRegex(OperationFailure, expected_msg): method(*args, **kwargs) @@ -190,12 +197,10 @@ class TestRetryableWrites(IgnoreDeprecationsTest): def setUpClass(cls): super(TestRetryableWrites, cls).setUpClass() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.listener = OvertCommandListener() - cls.client = rs_or_single_client( - retryWrites=True, event_listeners=[cls.listener]) + cls.client = rs_or_single_client(retryWrites=True, event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @classmethod @@ -206,117 +211,123 @@ def tearDownClass(cls): def setUp(self): if client_context.is_rs and client_context.test_commands_enabled: - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', 'alwaysOn')])) + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) def tearDown(self): if client_context.is_rs and client_context.test_commands_enabled: - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', 'off')])) + self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=False, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) self.addCleanup(client.close) - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) listener.results.clear() method(*args, **kwargs) - for event in listener.results['started']: + for event in listener.results["started"]: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + "%s sent txnNumber with %s" % (msg, event.command_name), + ) @client_context.require_no_standalone def test_supported_single_statement_supported_cluster(self): - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) self.listener.results.clear() method(*args, **kwargs) - commands_started = self.listener.results['started'] - self.assertEqual(len(self.listener.results['succeeded']), 1, msg) + commands_started = self.listener.results["started"] + self.assertEqual(len(self.listener.results["succeeded"]), 1, msg) first_attempt = commands_started[0] self.assertIn( - 'lsid', first_attempt.command, - '%s sent no lsid with %s' % (msg, first_attempt.command_name)) - initial_session_id = first_attempt.command['lsid'] + "lsid", + first_attempt.command, + "%s sent no lsid with %s" % (msg, first_attempt.command_name), + ) + initial_session_id = first_attempt.command["lsid"] self.assertIn( - 'txnNumber', first_attempt.command, - '%s sent no txnNumber with %s' % ( - msg, first_attempt.command_name)) + "txnNumber", + first_attempt.command, + "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + ) # There should be no retry when the failpoint is not active. - if (client_context.is_mongos or - not client_context.test_commands_enabled): + if client_context.is_mongos or not client_context.test_commands_enabled: self.assertEqual(len(commands_started), 1) continue - initial_transaction_id = first_attempt.command['txnNumber'] + initial_transaction_id = first_attempt.command["txnNumber"] retry_attempt = commands_started[1] self.assertIn( - 'lsid', retry_attempt.command, - '%s sent no lsid with %s' % (msg, first_attempt.command_name)) - self.assertEqual( - retry_attempt.command['lsid'], initial_session_id, msg) + "lsid", + retry_attempt.command, + "%s sent no lsid with %s" % (msg, first_attempt.command_name), + ) + self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) self.assertIn( - 'txnNumber', retry_attempt.command, - '%s sent no txnNumber with %s' % ( - msg, first_attempt.command_name)) - self.assertEqual(retry_attempt.command['txnNumber'], - initial_transaction_id, msg) + "txnNumber", + retry_attempt.command, + "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + ) + self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) def test_supported_single_statement_unsupported_cluster(self): if client_context.is_rs or client_context.is_mongos: - raise SkipTest('This cluster supports retryable writes') + raise SkipTest("This cluster supports retryable writes") - for method, args, kwargs in retryable_single_statement_ops( - self.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) self.listener.results.clear() method(*args, **kwargs) - for event in self.listener.results['started']: + for event in self.listener.results["started"]: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + "%s sent txnNumber with %s" % (msg, event.command_name), + ) def test_unsupported_single_statement(self): coll = self.db.retryable_write_test coll.insert_many([{}, {}]) coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) - for method, args, kwargs in (non_retryable_single_statement_ops(coll) + - retryable_single_statement_ops(coll_w0)): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) self.listener.results.clear() method(*args, **kwargs) - started_events = self.listener.results['started'] - self.assertEqual(len(self.listener.results['succeeded']), - len(started_events), msg) - self.assertEqual(len(self.listener.results['failed']), 0, msg) + started_events = self.listener.results["started"] + self.assertEqual(len(self.listener.results["succeeded"]), len(started_events), msg) + self.assertEqual(len(self.listener.results["failed"]), 0, msg) for event in started_events: self.assertNotIn( - 'txnNumber', event.command, - '%s sent txnNumber with %s' % (msg, event.command_name)) + "txnNumber", + event.command, + "%s sent txnNumber with %s" % (msg, event.command_name), + ) def test_server_selection_timeout_not_retried(self): """A ServerSelectionTimeoutError is not retried.""" listener = OvertCommandListener() client = MongoClient( - 'somedomainthatdoesntexist.org', + "somedomainthatdoesntexist.org", serverSelectionTimeoutMS=1, - retryWrites=True, event_listeners=[listener]) - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) listener.results.clear() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 0, msg) + self.assertEqual(len(listener.results["started"]), 0, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -325,8 +336,7 @@ def test_retry_timeout_raises_original_error(self): original error. """ listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -335,43 +345,44 @@ def mock_select_server(*args, **kwargs): server = select_server(*args, **kwargs) def raise_error(*args, **kwargs): - raise ServerSelectionTimeoutError( - 'No primary available for writes') + raise ServerSelectionTimeoutError("No primary available for writes") + # Raise ServerSelectionTimeout on the retry attempt. topology.select_server = raise_error return server - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) listener.results.clear() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 1, msg) + self.assertEqual(len(listener.results["started"]), 1, msg) @client_context.require_replica_set @client_context.require_test_commands def test_batch_splitting(self): """Test retry succeeds after failures during batch splitting.""" - large = 's' * 1024 * 1024 * 15 + large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) self.listener.results.clear() - bulk_result = coll.bulk_write([ - InsertOne({'_id': 1, 'l': large}), - InsertOne({'_id': 2, 'l': large}), - InsertOne({'_id': 3, 'l': large}), - UpdateOne({'_id': 1, 'l': large}, - {'$unset': {'l': 1}, '$inc': {'count': 1}}), - UpdateOne({'_id': 2, 'l': large}, {'$set': {'foo': 'bar'}}), - DeleteOne({'l': large}), - DeleteOne({'l': large})]) + bulk_result = coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) # Each command should fail and be retried. # With OP_MSG 3 inserts are one batch. 2 updates another. # 2 deletes a third. - self.assertEqual(len(self.listener.results['started']), 6) - self.assertEqual(coll.find_one(), {'_id': 1, 'count': 1}) + self.assertEqual(len(self.listener.results["started"]), 6) + self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) # Assert the final result expected_result = { "writeErrors": [], @@ -389,42 +400,51 @@ def test_batch_splitting(self): @client_context.require_test_commands def test_batch_splitting_retry_fails(self): """Test retry fails during batch splitting.""" - large = 's' * 1024 * 1024 * 15 + large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.client.admin.command(SON([ - ('configureFailPoint', 'onPrimaryTransactionalWrite'), - ('mode', {'skip': 3}), # The number of _documents_ to skip. - ('data', {'failBeforeCommitExceptionCode': 1})])) + self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) self.listener.results.clear() with self.client.start_session() as session: initial_txn = session._server_session._transaction_id try: - coll.bulk_write([InsertOne({'_id': 1, 'l': large}), - InsertOne({'_id': 2, 'l': large}), - InsertOne({'_id': 3, 'l': large}), - InsertOne({'_id': 4, 'l': large})], - session=session) + coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) except ConnectionFailure: pass else: self.fail("bulk_write should have failed") - started = self.listener.results['started'] + started = self.listener.results["started"] self.assertEqual(len(started), 3) - self.assertEqual(len(self.listener.results['succeeded']), 1) + self.assertEqual(len(self.listener.results["succeeded"]), 1) expected_txn = Int64(initial_txn + 1) - self.assertEqual(started[0].command['txnNumber'], expected_txn) - self.assertEqual(started[0].command['lsid'], session.session_id) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) expected_txn = Int64(initial_txn + 2) - self.assertEqual(started[1].command['txnNumber'], expected_txn) - self.assertEqual(started[1].command['lsid'], session.session_id) - started[1].command.pop('$clusterTime') - started[2].command.pop('$clusterTime') + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") self.assertEqual(started[1].command, started[2].command) final_txn = session._server_session._transaction_id self.assertEqual(final_txn, expected_txn) - self.assertEqual(coll.find_one(projection={'_id': True}), {'_id': 1}) + self.assertEqual(coll.find_one(projection={"_id": True}), {"_id": 1}) class TestWriteConcernError(IntegrationTest): @@ -439,20 +459,18 @@ class TestWriteConcernError(IntegrationTest): def setUpClass(cls): super(TestWriteConcernError, cls).setUpClass() cls.fail_insert = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 2}, - 'data': { - 'failCommands': ['insert'], - 'writeConcernError': { - 'code': 91, - 'errmsg': 'Replication is being shut down'}, - }} + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } @client_context.require_version_min(4, 0) def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) # Ensure collection exists. client.pymongo_test.testcoll.insert_one({}) @@ -460,14 +478,13 @@ def test_RetryableWriteError_error_label(self): with self.fail_point(self.fail_insert): with self.assertRaises(WriteConcernError) as cm: client.pymongo_test.testcoll.insert_one({}) - self.assertTrue(cm.exception.has_error_label( - 'RetryableWriteError')) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) if client_context.version >= Version(4, 4): # In MongoDB 4.4+ we rely on the server returning the error label. self.assertIn( - 'RetryableWriteError', - listener.results['succeeded'][-1].reply['errorLabels']) + "RetryableWriteError", listener.results["succeeded"][-1].reply["errorLabels"] + ) @client_context.require_version_min(4, 4) def test_RetryableWriteError_error_label_RawBSONDocument(self): @@ -476,13 +493,18 @@ def test_RetryableWriteError_error_label_RawBSONDocument(self): with self.client.start_session() as s: s._start_retryable_write() result = self.client.pymongo_test.command( - 'insert', 'testcoll', documents=[{'_id': 1}], - txnNumber=s._server_session.transaction_id, session=s, + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._server_session.transaction_id, + session=s, codec_options=DEFAULT_CODEC_OPTIONS.with_options( - document_class=RawBSONDocument)) + document_class=RawBSONDocument + ), + ) - self.assertIn('writeConcernError', result) - self.assertIn('RetryableWriteError', result['errorLabels']) + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) class InsertThread(threading.Thread): @@ -504,26 +526,24 @@ class TestPoolPausedError(IntegrationTest): @client_context.require_failCommand_blockConnection @client_context.require_retryable_writes - @client_knobs(heartbeat_frequency=.05, min_heartbeat_interval=.05) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client( - maxPoolSize=1, - event_listeners=[cmap_listener, cmd_listener]) + client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] fail_command = { - 'mode': {'times': 1}, - 'data': { - 'failCommands': ['insert'], - 'blockConnection': True, - 'blockTimeMS': 1000, - 'errorCode': 91, - 'errorLabels': ['RetryableWriteError'], + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], }, } with self.fail_point(fail_command): @@ -541,29 +561,25 @@ def test_pool_paused_error_is_retryable(self): break # Via CMAP monitoring, assert that the first check out succeeds. - cmap_events = cmap_listener.events_by_type(( - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - PoolClearedEvent)) + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) msg = pprint.pformat(cmap_listener.events) self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) - self.assertIsInstance( - cmap_events[2], ConnectionCheckOutFailedEvent, msg) - self.assertEqual(cmap_events[2].reason, - ConnectionCheckOutFailedReason.CONN_ERROR, - msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results['started'] + started = cmd_listener.results["started"] msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results['succeeded'] + succeeded = cmd_listener.results["succeeded"] self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results['failed'] + failed = cmd_listener.results["failed"] self.assertEqual(1, len(failed), msg) @@ -576,8 +592,7 @@ def test_increment_transaction_id_without_sending_command(self): the first attempt fails before sending the command. """ listener = OvertCommandListener() - client = rs_or_single_client( - retryWrites=True, event_listeners=[listener]) + client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -586,28 +601,27 @@ def raise_connection_err_select_server(*args, **kwargs): # Raise ConnectionFailure on the first attempt and perform # normal selection on the retry attempt. topology.select_server = select_server - raise ConnectionFailure('Connection refused') + raise ConnectionFailure("Connection refused") - for method, args, kwargs in retryable_single_statement_ops( - client.db.retryable_write_test): + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): listener.results.clear() topology.select_server = raise_connection_err_select_server with client.start_session() as session: kwargs = copy.deepcopy(kwargs) - kwargs['session'] = session - msg = '%s(*%r, **%r)' % (method.__name__, args, kwargs) + kwargs["session"] = session + msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) initial_txn_id = session._server_session.transaction_id # Each operation should fail on the first attempt and succeed # on the second. method(*args, **kwargs) - self.assertEqual(len(listener.results['started']), 1, msg) - retry_cmd = listener.results['started'][0].command - sent_txn_id = retry_cmd['txnNumber'] + self.assertEqual(len(listener.results["started"]), 1, msg) + retry_cmd = listener.results["started"][0].command + sent_txn_id = retry_cmd["txnNumber"] final_txn_id = session._server_session.transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) self.assertEqual(sent_txn_id, final_txn_id, msg) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py index 4e851de273..4e97c14d4b 100644 --- a/test/test_retryable_writes_unified.py +++ b/test/test_retryable_writes_unified.py @@ -23,8 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'retryable_writes', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_saslprep.py b/test/test_saslprep.py index c694224a6c..1dd4727181 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -16,11 +16,12 @@ sys.path[0:0] = [""] -from pymongo.saslprep import saslprep from test import unittest -class TestSASLprep(unittest.TestCase): +from pymongo.saslprep import saslprep + +class TestSASLprep(unittest.TestCase): def test_saslprep(self): try: import stringprep diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index e7a8a7ef05..fee751fbdc 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -21,44 +21,41 @@ sys.path[0:0] = [""] -from pymongo import MongoClient +from test import IntegrationTest, client_context, client_knobs, unittest +from test.utils import ( + ServerAndTopologyEventListener, + rs_or_single_client, + server_name_to_type, + wait_until, +) + from bson.json_util import object_hook -from pymongo import monitoring +from pymongo import MongoClient, monitoring from pymongo.collection import Collection from pymongo.common import clean_node -from pymongo.errors import (ConnectionFailure, - NotPrimaryError) +from pymongo.errors import ConnectionFailure, NotPrimaryError from pymongo.hello import Hello from pymongo.monitor import Monitor from pymongo.server_description import ServerDescription from pymongo.topology_description import TOPOLOGY_TYPE -from test import unittest, client_context, client_knobs, IntegrationTest -from test.utils import (ServerAndTopologyEventListener, - server_name_to_type, - rs_or_single_client, - wait_until) # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - 'sdam_monitoring') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sdam_monitoring") def compare_server_descriptions(expected, actual): - if ((not expected['address'] == "%s:%s" % actual.address) or - (not server_name_to_type(expected['type']) == - actual.server_type)): + if (not expected["address"] == "%s:%s" % actual.address) or ( + not server_name_to_type(expected["type"]) == actual.server_type + ): return False - expected_hosts = set( - expected['arbiters'] + expected['passives'] + expected['hosts']) + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) return expected_hosts == set("%s:%s" % s for s in actual.all_hosts) def compare_topology_descriptions(expected, actual): - if not (TOPOLOGY_TYPE.__getattribute__( - expected['topologyType']) == actual.topology_type): + if not (TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) == actual.topology_type): return False - expected = expected['servers'] + expected = expected["servers"] actual = actual.server_descriptions() if len(expected) != len(actual): return False @@ -81,70 +78,74 @@ def compare_events(expected_dict, actual): if expected_type == "server_opening_event": if not isinstance(actual, monitoring.ServerOpeningEvent): - return False, "Expected ServerOpeningEvent, got %s" % ( - actual.__class__) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, - "ServerOpeningEvent published with wrong address (expected" - " %s, got %s" % (expected['address'], - actual.server_address)) + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if not expected["address"] == "%s:%s" % actual.server_address: + return ( + False, + "ServerOpeningEvent published with wrong address (expected" + " %s, got %s" % (expected["address"], actual.server_address), + ) elif expected_type == "server_description_changed_event": if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): - return (False, - "Expected ServerDescriptionChangedEvent, got %s" % ( - actual.__class__)) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, "ServerDescriptionChangedEvent has wrong address" - " (expected %s, got %s" % (expected['address'], - actual.server_address)) + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if not expected["address"] == "%s:%s" % actual.server_address: + return ( + False, + "ServerDescriptionChangedEvent has wrong address" + " (expected %s, got %s" % (expected["address"], actual.server_address), + ) + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in" " ServerDescriptionChangedEvent") if not compare_server_descriptions( - expected['newDescription'], actual.new_description): - return (False, "New ServerDescription incorrect in" - " ServerDescriptionChangedEvent") - if not compare_server_descriptions(expected['previousDescription'], - actual.previous_description): - return (False, "Previous ServerDescription incorrect in" - " ServerDescriptionChangedEvent") + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in" " ServerDescriptionChangedEvent", + ) elif expected_type == "server_closed_event": if not isinstance(actual, monitoring.ServerClosedEvent): - return False, "Expected ServerClosedEvent, got %s" % ( - actual.__class__) - if not expected['address'] == "%s:%s" % actual.server_address: - return (False, "ServerClosedEvent published with wrong address" - " (expected %s, got %s" % (expected['address'], - actual.server_address)) + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if not expected["address"] == "%s:%s" % actual.server_address: + return ( + False, + "ServerClosedEvent published with wrong address" + " (expected %s, got %s" % (expected["address"], actual.server_address), + ) elif expected_type == "topology_opening_event": if not isinstance(actual, monitoring.TopologyOpenedEvent): - return False, "Expected TopologyOpeningEvent, got %s" % ( - actual.__class__) + return False, "Expected TopologyOpeningEvent, got %s" % (actual.__class__) elif expected_type == "topology_description_changed_event": if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): - return (False, "Expected TopologyDescriptionChangedEvent," - " got %s" % (actual.__class__)) - if not compare_topology_descriptions(expected['newDescription'], - actual.new_description): - return (False, "New TopologyDescription incorrect in " - "TopologyDescriptionChangedEvent") + return ( + False, + "Expected TopologyDescriptionChangedEvent," " got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in " "TopologyDescriptionChangedEvent", + ) if not compare_topology_descriptions( - expected['previousDescription'], - actual.previous_description): - return (False, "Previous TopologyDescription incorrect in" - " TopologyDescriptionChangedEvent") + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in" " TopologyDescriptionChangedEvent", + ) elif expected_type == "topology_closed_event": if not isinstance(actual, monitoring.TopologyClosedEvent): - return False, "Expected TopologyClosedEvent, got %s" % ( - actual.__class__) + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) else: - return False, "Incorrect event: expected %s, actual %s" % ( - expected_type, actual) + return False, "Incorrect event: expected %s, actual %s" % (expected_type, actual) return True, "" @@ -152,12 +153,10 @@ def compare_events(expected_dict, actual): def compare_multiple_events(i, expected_results, actual_results): events_in_a_row = [] j = i - while(j < len(expected_results) and isinstance( - actual_results[j], - actual_results[i].__class__)): + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): events_in_a_row.append(actual_results[j]) j += 1 - message = '' + message = "" for event in events_in_a_row: for k in range(i, j): passed, message = compare_events(expected_results[k], event) @@ -166,11 +165,10 @@ def compare_multiple_events(i, expected_results, actual_results): break else: return i, False, message - return j, True, '' + return j, True, "" class TestAllScenarios(IntegrationTest): - def setUp(self): super(TestAllScenarios, self).setUp() self.all_listener = ServerAndTopologyEventListener() @@ -184,51 +182,60 @@ def run_scenario(self): def _run_scenario(self): class NoopMonitor(Monitor): """Override the _run method to do nothing.""" + def _run(self): time.sleep(0.05) - m = MongoClient(host=scenario_def['uri'], port=27017, - event_listeners=[self.all_listener], - _monitor_class=NoopMonitor) + m = MongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) topology = m._get_topology() try: - for phase in scenario_def['phases']: - for (source, response) in phase.get('responses', []): + for phase in scenario_def["phases"]: + for (source, response) in phase.get("responses", []): source_address = clean_node(source) - topology.on_change(ServerDescription( - address=source_address, - hello=Hello(response), - round_trip_time=0)) + topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) - expected_results = phase['outcome']['events'] + expected_results = phase["outcome"]["events"] expected_len = len(expected_results) wait_until( lambda: len(self.all_listener.results) >= expected_len, - "publish all events", timeout=15) + "publish all events", + timeout=15, + ) # Wait some time to catch possible lagging extra events. time.sleep(0.5) i = 0 while i < expected_len: - result = self.all_listener.results[i] if len( - self.all_listener.results) > i else None + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) # The order of ServerOpening/ClosedEvents doesn't matter - if isinstance(result, (monitoring.ServerOpeningEvent, - monitoring.ServerClosedEvent)): + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): i, passed, message = compare_multiple_events( - i, expected_results, self.all_listener.results) + i, expected_results, self.all_listener.results + ) self.assertTrue(passed, message) else: - self.assertTrue( - *compare_events(expected_results[i], result)) + self.assertTrue(*compare_events(expected_results[i], result)) i += 1 # Assert no extra events. extra_events = self.all_listener.results[expected_len:] if extra_events: - self.fail('Extra events %r' % (extra_events,)) + self.fail("Extra events %r" % (extra_events,)) self.all_listener.reset() finally: @@ -241,11 +248,10 @@ def create_tests(): for dirpath, _, filenames in os.walk(_TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json.load( - scenario_stream, object_hook=object_hook) + scenario_def = json.load(scenario_stream, object_hook=object_hook) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s' % (os.path.splitext(filename)[0],) + test_name = "test_%s" % (os.path.splitext(filename)[0],) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -269,7 +275,8 @@ def setUpClass(cls): cls.listener = ServerAndTopologyEventListener() retry_writes = client_context.supports_transactions() cls.test_client = rs_or_single_client( - event_listeners=[cls.listener], retryWrites=retry_writes) + event_listeners=[cls.listener], retryWrites=retry_writes + ) cls.coll = cls.test_client[cls.client.db.name].test cls.coll.insert_one({}) @@ -287,12 +294,12 @@ def _test_app_error(self, fail_command_opts, expected_error): # Test that an application error causes a ServerDescriptionChangedEvent # to be published. - data = {'failCommands': ['insert']} + data = {"failCommands": ["insert"]} data.update(fail_command_opts) fail_insert = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1}, - 'data': data, + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, } with self.fail_point(fail_insert): if self.test_client.options.retry_writes: @@ -306,43 +313,48 @@ def marked_unknown(event): return ( isinstance(event, monitoring.ServerDescriptionChangedEvent) and event.server_address == address - and not event.new_description.is_server_type_known) + and not event.new_description.is_server_type_known + ) def discovered_node(event): return ( isinstance(event, monitoring.ServerDescriptionChangedEvent) and event.server_address == address and not event.previous_description.is_server_type_known - and event.new_description.is_server_type_known) + and event.new_description.is_server_type_known + ) def marked_unknown_and_rediscovered(): - return (len(self.listener.matching(marked_unknown)) >= 1 and - len(self.listener.matching(discovered_node)) >= 1) + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) # Topology events are published asynchronously - wait_until(marked_unknown_and_rediscovered, 'rediscover node') + wait_until(marked_unknown_and_rediscovered, "rediscover node") # Expect a single ServerDescriptionChangedEvent for the network error. marked_unknown_events = self.listener.matching(marked_unknown) self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) - self.assertIsInstance( - marked_unknown_events[0].new_description.error, expected_error) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) def test_network_error_publishes_events(self): - self._test_app_error({'closeConnection': True}, ConnectionFailure) + self._test_app_error({"closeConnection": True}, ConnectionFailure) # In 4.4+, not primary errors from failCommand don't cause SDAM state # changes because topologyVersion is not incremented. @client_context.require_version_max(4, 3) def test_not_primary_error_publishes_events(self): - self._test_app_error({'errorCode': 10107, 'closeConnection': False, - 'errorLabels': ['RetryableWriteError']}, - NotPrimaryError) + self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) def test_shutdown_error_publishes_events(self): - self._test_app_error({'errorCode': 91, 'closeConnection': False, - 'errorLabels': ['RetryableWriteError']}, - NotPrimaryError) + self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) if __name__ == "__main__": diff --git a/test/test_server.py b/test/test_server.py index e4996d2e09..064d77d024 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -18,18 +18,19 @@ sys.path[0:0] = [""] +from test import unittest + from pymongo.hello import Hello from pymongo.server import Server from pymongo.server_description import ServerDescription -from test import unittest class TestServer(unittest.TestCase): def test_repr(self): - hello = Hello({'ok': 1}) - sd = ServerDescription(('localhost', 27017), hello) + hello = Hello({"ok": 1}) + sd = ServerDescription(("localhost", 27017), hello) server = Server(sd, pool=object(), monitor=object()) - self.assertTrue('Standalone' in str(server)) + self.assertTrue("Standalone" in str(server)) if __name__ == "__main__": diff --git a/test/test_server_description.py b/test/test_server_description.py index 23d6c8f377..1562711375 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -18,14 +18,15 @@ sys.path[0:0] = [""] -from bson.objectid import ObjectId +from test import unittest + from bson.int64 import Int64 -from pymongo.server_type import SERVER_TYPE +from bson.objectid import ObjectId from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription -from test import unittest +from pymongo.server_type import SERVER_TYPE -address = ('localhost', 27017) +address = ("localhost", 27017) def parse_hello_response(doc): @@ -42,82 +43,88 @@ def test_unknown(self): self.assertFalse(s.is_readable) def test_mongos(self): - s = parse_hello_response({'ok': 1, 'msg': 'isdbgrid'}) + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) self.assertEqual(SERVER_TYPE.Mongos, s.server_type) - self.assertEqual('Mongos', s.server_type_name) + self.assertEqual("Mongos", s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_primary(self): - s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) - self.assertEqual('RSPrimary', s.server_type_name) + self.assertEqual("RSPrimary", s.server_type_name) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_secondary(self): s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: False, 'secondary': True, 'setName': 'rs'}) + {"ok": 1, HelloCompat.LEGACY_CMD: False, "secondary": True, "setName": "rs"} + ) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) - self.assertEqual('RSSecondary', s.server_type_name) + self.assertEqual("RSSecondary", s.server_type_name) self.assertFalse(s.is_writable) self.assertTrue(s.is_readable) def test_arbiter(self): s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: False, 'arbiterOnly': True, 'setName': 'rs'}) + {"ok": 1, HelloCompat.LEGACY_CMD: False, "arbiterOnly": True, "setName": "rs"} + ) self.assertEqual(SERVER_TYPE.RSArbiter, s.server_type) - self.assertEqual('RSArbiter', s.server_type_name) + self.assertEqual("RSArbiter", s.server_type_name) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_other(self): - s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: False, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) - self.assertEqual('RSOther', s.server_type_name) + self.assertEqual("RSOther", s.server_type_name) - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'hidden': True, - 'setName': 'rs'}) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hidden": True, + "setName": "rs", + } + ) self.assertEqual(SERVER_TYPE.RSOther, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_ghost(self): - s = parse_hello_response({'ok': 1, 'isreplicaset': True}) + s = parse_hello_response({"ok": 1, "isreplicaset": True}) self.assertEqual(SERVER_TYPE.RSGhost, s.server_type) - self.assertEqual('RSGhost', s.server_type_name) + self.assertEqual("RSGhost", s.server_type_name) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_fields(self): - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'primary': 'a:27017', - 'tags': {'a': 'foo', 'b': 'baz'}, - 'maxMessageSizeBytes': 1, - 'maxBsonObjectSize': 2, - 'maxWriteBatchSize': 3, - 'minWireVersion': 4, - 'maxWireVersion': 5, - 'setName': 'rs'}) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "primary": "a:27017", + "tags": {"a": "foo", "b": "baz"}, + "maxMessageSizeBytes": 1, + "maxBsonObjectSize": 2, + "maxWriteBatchSize": 3, + "minWireVersion": 4, + "maxWireVersion": 5, + "setName": "rs", + } + ) self.assertEqual(SERVER_TYPE.RSSecondary, s.server_type) - self.assertEqual(('a', 27017), s.primary) - self.assertEqual({'a': 'foo', 'b': 'baz'}, s.tags) + self.assertEqual(("a", 27017), s.primary) + self.assertEqual({"a": "foo", "b": "baz"}, s.tags) self.assertEqual(1, s.max_message_size) self.assertEqual(2, s.max_bson_size) self.assertEqual(3, s.max_write_batch_size) @@ -125,55 +132,57 @@ def test_fields(self): self.assertEqual(5, s.max_wire_version) def test_default_max_message_size(self): - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'maxBsonObjectSize': 2}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "maxBsonObjectSize": 2}) # Twice max_bson_size. self.assertEqual(4, s.max_message_size) def test_standalone(self): - s = parse_hello_response({'ok': 1, HelloCompat.LEGACY_CMD: True}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) # Mongod started with --slave. # master-slave replication was removed in MongoDB 4.0. - s = parse_hello_response({'ok': 1, HelloCompat.LEGACY_CMD: False}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: False}) self.assertEqual(SERVER_TYPE.Standalone, s.server_type) self.assertTrue(s.is_writable) self.assertTrue(s.is_readable) def test_ok_false(self): - s = parse_hello_response({'ok': 0, HelloCompat.LEGACY_CMD: True}) + s = parse_hello_response({"ok": 0, HelloCompat.LEGACY_CMD: True}) self.assertEqual(SERVER_TYPE.Unknown, s.server_type) self.assertFalse(s.is_writable) self.assertFalse(s.is_readable) def test_all_hosts(self): - s = parse_hello_response({ - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'hosts': ['a'], - 'passives': ['b:27018'], - 'arbiters': ['c'] - }) + s = parse_hello_response( + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "passives": ["b:27018"], + "arbiters": ["c"], + } + ) - self.assertEqual( - [('a', 27017), ('b', 27018), ('c', 27017)], - sorted(s.all_hosts)) + self.assertEqual([("a", 27017), ("b", 27018), ("c", 27017)], sorted(s.all_hosts)) def test_repr(self): - s = parse_hello_response({'ok': 1, 'msg': 'isdbgrid'}) - self.assertEqual(repr(s), - "") + s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) + self.assertEqual( + repr(s), "" + ) def test_topology_version(self): - topology_version = {'processId': ObjectId(), 'counter': Int64('0')} + topology_version = {"processId": ObjectId(), "counter": Int64("0")} s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs', - 'topologyVersion': topology_version}) + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "topologyVersion": topology_version, + } + ) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) self.assertEqual(topology_version, s.topology_version) @@ -185,8 +194,7 @@ def test_topology_version(self): def test_topology_version_not_present(self): # No topologyVersion field. - s = parse_hello_response( - {'ok': 1, HelloCompat.LEGACY_CMD: True, 'setName': 'rs'}) + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs"}) self.assertEqual(SERVER_TYPE.RSPrimary, s.server_type) self.assertEqual(None, s.topology_version) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 955736709d..a80d5f13d9 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -17,8 +17,7 @@ import os import sys -from pymongo import MongoClient -from pymongo import ReadPreference +from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError from pymongo.hello import HelloCompat from pymongo.server_selectors import writable_server_selector @@ -27,22 +26,30 @@ sys.path[0:0] = [""] -from test import client_context, unittest, IntegrationTest -from test.utils import (rs_or_single_client, wait_until, EventListener, - FunctionCallRecorder) +from test import IntegrationTest, client_context, unittest +from test.utils import ( + EventListener, + FunctionCallRecorder, + rs_or_single_client, + wait_until, +) from test.utils_selection_tests import ( - create_selection_tests, get_addresses, get_topology_settings_dict, - make_server_description) - + create_selection_tests, + get_addresses, + get_topology_settings_dict, + make_server_description, +) # Location of JSON test specifications. _TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), - os.path.join('server_selection', 'server_selection')) + os.path.join("server_selection", "server_selection"), +) class SelectionStoreSelector(object): """No-op selector that keeps track of what was passed to it.""" + def __init__(self): self.selection = None @@ -51,7 +58,6 @@ def __call__(self, selection): return selection - class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass @@ -67,37 +73,33 @@ def custom_selector(servers): # Initialize client with appropriate listeners. listener = EventListener() - client = rs_or_single_client( - server_selector=custom_selector, event_listeners=[listener]) + client = rs_or_single_client(server_selector=custom_selector, event_listeners=[listener]) self.addCleanup(client.close) - coll = client.get_database( - 'testdb', read_preference=ReadPreference.NEAREST).coll - self.addCleanup(client.drop_database, 'testdb') + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addCleanup(client.drop_database, "testdb") # Wait the node list to be fully populated. def all_hosts_started(): - return (len(client.admin.command(HelloCompat.LEGACY_CMD)['hosts']) == - len(client._topology._description.readable_servers)) + return len(client.admin.command(HelloCompat.LEGACY_CMD)["hosts"]) == len( + client._topology._description.readable_servers + ) - wait_until(all_hosts_started, 'receive heartbeat from all hosts') - expected_port = max([ - n.address[1] - for n in client._topology._description.readable_servers]) + wait_until(all_hosts_started, "receive heartbeat from all hosts") + expected_port = max([n.address[1] for n in client._topology._description.readable_servers]) # Insert 1 record and access it 10 times. - coll.insert_one({'name': 'John Doe'}) + coll.insert_one({"name": "John Doe"}) for _ in range(10): - coll.find_one({'name': 'John Doe'}) + coll.find_one({"name": "John Doe"}) # Confirm all find commands are run against appropriate host. - for command in listener.results['started']: - if command.command_name == 'find': - self.assertEqual( - command.connection_id[1], expected_port) + for command in listener.results["started"]: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) def test_invalid_server_selector(self): # Client initialization must fail if server_selector is not callable. - for selector_candidate in [list(), 10, 'string', {}]: + for selector_candidate in [list(), 10, "string", {}]: with self.assertRaisesRegex(ValueError, "must be a callable"): MongoClient(connect=False, server_selector=selector_candidate) @@ -112,13 +114,13 @@ def test_selector_called(self): mongo_client = rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection self.addCleanup(mongo_client.close) - self.addCleanup(mongo_client.drop_database, 'testdb') + self.addCleanup(mongo_client.drop_database, "testdb") # Do N operations and test selector is called at least N times. - test_collection.insert_one({'age': 20, 'name': 'John'}) - test_collection.insert_one({'age': 31, 'name': 'Jane'}) - test_collection.update_one({'name': 'Jane'}, {'$set': {'age': 21}}) - test_collection.find_one({'name': 'Roe'}) + test_collection.insert_one({"age": 20, "name": "John"}) + test_collection.insert_one({"age": 31, "name": "Jane"}) + test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + test_collection.find_one({"name": "Roe"}) self.assertGreaterEqual(selector.call_count, 4) @client_context.require_replica_set @@ -126,34 +128,26 @@ def test_latency_threshold_application(self): selector = SelectionStoreSelector() scenario_def: dict = { - 'topology_description': { - 'type': 'ReplicaSetWithPrimary', 'servers': [ - {'address': 'b:27017', - 'avg_rtt_ms': 10000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'c:27017', - 'avg_rtt_ms': 20000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'a:27017', - 'avg_rtt_ms': 30000, - 'type': 'RSPrimary', - 'tag': {}}, - ]}} + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } # Create & populate Topology such that all but one server is too slow. - rtt_times = [srv['avg_rtt_ms'] for srv in - scenario_def['topology_description']['servers']] + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] min_rtt_idx = rtt_times.index(min(rtt_times)) - seeds, hosts = get_addresses( - scenario_def["topology_description"]["servers"]) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) settings = get_topology_settings_dict( - heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, - server_selector=selector) + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) topology = Topology(TopologySettings(**settings)) topology.open() - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) @@ -161,52 +155,40 @@ def test_latency_threshold_application(self): # prior to custom server selection logic kicking in. server = topology.select_server(ReadPreference.NEAREST) assert selector.selection is not None - self.assertEqual( - len(selector.selection), - len(topology.description.server_descriptions())) + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) # Ensure proper filtering based on latency after custom selection. - self.assertEqual( - server.description.address, seeds[min_rtt_idx]) + self.assertEqual(server.description.address, seeds[min_rtt_idx]) @client_context.require_replica_set def test_server_selector_bypassed(self): selector = FunctionCallRecorder(lambda x: x) scenario_def = { - 'topology_description': { - 'type': 'ReplicaSetNoPrimary', 'servers': [ - {'address': 'b:27017', - 'avg_rtt_ms': 10000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'c:27017', - 'avg_rtt_ms': 20000, - 'type': 'RSSecondary', - 'tag': {}}, - {'address': 'a:27017', - 'avg_rtt_ms': 30000, - 'type': 'RSSecondary', - 'tag': {}}, - ]}} + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } # Create & populate Topology such that no server is writeable. - seeds, hosts = get_addresses( - scenario_def["topology_description"]["servers"]) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) settings = get_topology_settings_dict( - heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, - server_selector=selector) + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) topology = Topology(TopologySettings(**settings)) topology.open() - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Invoke server selection and assert no calls to our custom selector. - with self.assertRaisesRegex( - ServerSelectionTimeoutError, 'No primary available for writes'): - topology.select_server( - writable_server_selector, server_selection_timeout=0.1) + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + topology.select_server(writable_server_selector, server_selection_timeout=0.1) self.assertEqual(selector.call_count, 0) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index a0cbcd5f4c..4b24d0d7b0 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -16,18 +16,17 @@ import os import threading +from test import IntegrationTest, client_context, unittest +from test.utils import OvertCommandListener, TestCreator, rs_client, wait_until +from test.utils_selection_tests import create_topology from pymongo.common import clean_node from pymongo.read_preferences import ReadPreference -from test import client_context, IntegrationTest, unittest -from test.utils_selection_tests import create_topology -from test.utils import TestCreator, rs_client, OvertCommandListener, wait_until - # Location of JSON test specifications. TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('server_selection', 'in_window')) + os.path.dirname(os.path.realpath(__file__)), os.path.join("server_selection", "in_window") +) class TestAllScenarios(unittest.TestCase): @@ -35,28 +34,27 @@ def run_scenario(self, scenario_def): topology = create_topology(scenario_def) # Update mock operation_count state: - for mock in scenario_def['mocked_topology_state']: - address = clean_node(mock['address']) + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) server = topology.get_server_by_address(address) - server.pool.operation_count = mock['operation_count'] + server.pool.operation_count = mock["operation_count"] pref = ReadPreference.NEAREST - counts = dict((address, 0) for address in - topology._description.server_descriptions()) + counts = dict((address, 0) for address in topology._description.server_descriptions()) # Number of times to repeat server selection - iterations = scenario_def['iterations'] + iterations = scenario_def["iterations"] for _ in range(iterations): server = topology.select_server(pref, server_selection_timeout=0) counts[server.description.address] += 1 # Verify expected_frequencies - outcome = scenario_def['outcome'] - tolerance = outcome['tolerance'] - expected_frequencies = outcome['expected_frequencies'] + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] for host_str, freq in expected_frequencies.items(): address = clean_node(host_str) - actual_freq = float(counts[address])/iterations + actual_freq = float(counts[address]) / iterations if freq == 0: # Should be exactly 0. self.assertEqual(actual_freq, 0) @@ -112,7 +110,7 @@ def frequencies(self, client, listener): for thread in threads: self.assertTrue(thread.passed) - events = listener.results['started'] + events = listener.results["started"] self.assertEqual(len(events), N_FINDS * N_THREADS) nodes = client.nodes self.assertEqual(len(nodes), 2) @@ -120,7 +118,7 @@ def frequencies(self, client, listener): for event in events: freqs[event.connection_id] += 1 for address in freqs: - freqs[address] = freqs[address]/float(len(events)) + freqs[address] = freqs[address] / float(len(events)) return freqs @client_context.require_failCommand_appName @@ -129,21 +127,23 @@ def test_load_balancing(self): listener = OvertCommandListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. - client = rs_client(client_context.mongos_seeds(), - appName='loadBalancingTest', - event_listeners=[listener], - localThresholdMS=10000) + client = rs_client( + client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener], + localThresholdMS=10000, + ) self.addCleanup(client.close) - wait_until(lambda: len(client.nodes) == 2, 'discover both nodes') + wait_until(lambda: len(client.nodes) == 2, "discover both nodes") # Delay find commands on delay_finds = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 10000}, - 'data': { - 'failCommands': ['find'], - 'blockConnection': True, - 'blockTimeMS': 500, - 'appName': 'loadBalancingTest', + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", }, } with self.fail_point(delay_finds): diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index f914e03030..d2d8768809 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -21,11 +21,11 @@ sys.path[0:0] = [""] from test import unittest + from pymongo.read_preferences import MovingAverage # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'server_selection/rtt') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection/rtt") class TestAllScenarios(unittest.TestCase): @@ -36,14 +36,13 @@ def create_test(scenario_def): def run_scenario(self): moving_average = MovingAverage() - if scenario_def['avg_rtt_ms'] != "NULL": - moving_average.add_sample(scenario_def['avg_rtt_ms']) + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) - if scenario_def['new_rtt_ms'] != "NULL": - moving_average.add_sample(scenario_def['new_rtt_ms']) + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) - self.assertAlmostEqual(moving_average.get(), - scenario_def['new_avg_rtt']) + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) return run_scenario @@ -58,8 +57,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_session.py b/test/test_session.py index 98eccbae36..5a242d6c69 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -18,7 +18,6 @@ import os import sys import time - from io import BytesIO from typing import Set @@ -26,40 +25,36 @@ sys.path[0:0] = [""] +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import EventListener, TestCreator, rs_or_single_client, wait_until +from test.utils_spec_runner import SpecRunner + from bson import DBRef from gridfs import GridFS, GridFSBucket -from pymongo import ASCENDING, InsertOne, IndexModel, monitoring +from pymongo import ASCENDING, IndexModel, InsertOne, monitoring from pymongo.common import _MAX_END_SESSIONS -from pymongo.errors import (ConfigurationError, - InvalidOperation, - OperationFailure) +from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure from pymongo.read_concern import ReadConcern -from test import IntegrationTest, client_context, unittest, SkipTest -from test.utils import (rs_or_single_client, - EventListener, - TestCreator, - wait_until) -from test.utils_spec_runner import SpecRunner + # Ignore auth commands like saslStart, so we can assert lsid is in all commands. class SessionTestListener(EventListener): def started(self, event): - if not event.command_name.startswith('sasl'): + if not event.command_name.startswith("sasl"): super(SessionTestListener, self).started(event) def succeeded(self, event): - if not event.command_name.startswith('sasl'): + if not event.command_name.startswith("sasl"): super(SessionTestListener, self).succeeded(event) def failed(self, event): - if not event.command_name.startswith('sasl'): + if not event.command_name.startswith("sasl"): super(SessionTestListener, self).failed(event) def first_command_started(self): - assert len(self.results['started']) >= 1, ( - "No command-started events") + assert len(self.results["started"]) >= 1, "No command-started events" - return self.results['started'][0] + return self.results["started"][0] def session_ids(client): @@ -92,20 +87,21 @@ def setUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() self.client = rs_or_single_client( - event_listeners=[self.listener, self.session_checker_listener]) + event_listeners=[self.listener, self.session_checker_listener] + ) self.addCleanup(self.client.close) self.db = self.client.pymongo_test - self.initial_lsids = set(s['id'] for s in session_ids(self.client)) + self.initial_lsids = set(s["id"] for s in session_ids(self.client)) def tearDown(self): """All sessions used in the test must be returned to the pool.""" - self.client.drop_database('pymongo_test') + self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() - for event in self.session_checker_listener.results['started']: - if 'lsid' in event.command: - used_lsids.add(event.command['lsid']['id']) + for event in self.session_checker_listener.results["started"]: + if "lsid" in event.command: + used_lsids.add(event.command["lsid"]["id"]) - current_lsids = set(s['id'] for s in session_ids(self.client)) + current_lsids = set(s["id"] for s in session_ids(self.client)) self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): @@ -120,21 +116,21 @@ def _test_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) - self.assertGreaterEqual(len(listener.results['started']), 1) - for event in listener.results['started']: + self.assertGreaterEqual(len(listener.results["started"]), 1) + for event in listener.results["started"]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + "lsid" in event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) self.assertEqual( s.session_id, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - f.__name__, event.command_name)) + event.command["lsid"], + "%s sent wrong lsid with %s" % (f.__name__, event.command_name), + ) self.assertFalse(s.has_ended) @@ -147,35 +143,35 @@ def _test_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s with self.assertRaisesRegex( - InvalidOperation, - 'Can only use session with the MongoClient' - ' that started it'): + InvalidOperation, "Can only use session with the MongoClient" " that started it" + ): f(*args, **kw) # No explicit session. for f, args, kw in ops: listener.results.clear() f(*args, **kw) - self.assertGreaterEqual(len(listener.results['started']), 1) + self.assertGreaterEqual(len(listener.results["started"]), 1) lsids = [] - for event in listener.results['started']: + for event in listener.results["started"]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + "lsid" in event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) - lsids.append(event.command['lsid']) + lsids.append(event.command["lsid"]) - if not (sys.platform.startswith('java') or 'PyPy' in sys.version): + if not (sys.platform.startswith("java") or "PyPy" in sys.version): # Server session was returned to pool. Ignore interpreters with # non-deterministic GC. for lsid in lsids: self.assertIn( - lsid, session_ids(client), - "%s did not return implicit session to pool" % ( - f.__name__,)) + lsid, + session_ids(client), + "%s did not return implicit session to pool" % (f.__name__,), + ) def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. @@ -215,31 +211,28 @@ def test_end_sessions(self): listener = SessionTestListener() client = rs_or_single_client(event_listeners=[listener]) # Start many sessions. - sessions = [client.start_session() - for _ in range(_MAX_END_SESSIONS + 1)] + sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] for s in sessions: s.end_session() # Closing the client should end all sessions and clear the pool. - self.assertEqual(len(client._topology._session_pool), - _MAX_END_SESSIONS + 1) + self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) client.close() self.assertEqual(len(client._topology._session_pool), 0) - end_sessions = [e for e in listener.results['started'] - if e.command_name == 'endSessions'] + end_sessions = [e for e in listener.results["started"] if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. listener.results.clear() client.close() - self.assertEqual(len(listener.results['started']), 0) + self.assertEqual(len(listener.results["started"]), 0) def test_client(self): client = self.client ops: list = [ (client.server_info, [], {}), (client.list_database_names, [], {}), - (client.drop_database, ['pymongo_test'], {}), + (client.drop_database, ["pymongo_test"], {}), ] self._test_ops(client, *ops) @@ -248,12 +241,12 @@ def test_database(self): client = self.client db = client.pymongo_test ops: list = [ - (db.command, ['ping'], {}), - (db.create_collection, ['collection'], {}), + (db.command, ["ping"], {}), + (db.create_collection, ["collection"], {}), (db.list_collection_names, [], {}), - (db.validate_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), - (db.dereference, [DBRef('collection', 1)], {}), + (db.validate_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + (db.dereference, [DBRef("collection", 1)], {}), ] self._test_ops(client, *ops) @@ -266,19 +259,19 @@ def collection_write_ops(coll): (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {'$set': {'a': 1}}], {}), - (coll.update_many, [{}, {'$set': {'a': 1}}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), (coll.delete_one, [{}], {}), (coll.delete_many, [{}], {}), (coll.find_one_and_replace, [{}, {}], {}), - (coll.find_one_and_update, [{}, {'$set': {'a': 1}}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), (coll.find_one_and_delete, [{}, {}], {}), - (coll.rename, ['collection2'], {}), + (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. - (coll.database.drop_collection, ['collection2'], {}), - (coll.create_indexes, [[IndexModel('a')]], {}), - (coll.create_index, ['a'], {}), - (coll.drop_index, ['a_1'], {}), + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] @@ -289,15 +282,17 @@ def test_collection(self): # Test some collection methods - the rest are in test_cursor. ops = self.collection_write_ops(coll) - ops.extend([ - (coll.distinct, ['a'], {}), - (coll.find_one, [], {}), - (coll.count_documents, [{}], {}), - (coll.list_indexes, [], {}), - (coll.index_information, [], {}), - (coll.options, [], {}), - (coll.aggregate, [[]], {}), - ]) + ops.extend( + [ + (coll.distinct, ["a"], {}), + (coll.find_one, [], {}), + (coll.count_documents, [{}], {}), + (coll.list_indexes, [], {}), + (coll.index_information, [], {}), + (coll.options, [], {}), + (coll.aggregate, [[]], {}), + ] + ) self._test_ops(client, *ops) @@ -335,29 +330,28 @@ def test_cursor(self): # Test all cursor methods. ops = [ - ('find', lambda session: list(coll.find(session=session))), - ('getitem', lambda session: coll.find(session=session)[0]), - ('distinct', - lambda session: coll.find(session=session).distinct('a')), - ('explain', lambda session: coll.find(session=session).explain()), + ("find", lambda session: list(coll.find(session=session))), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), ] for name, f in ops: with client.start_session() as s: listener.results.clear() f(session=s) - self.assertGreaterEqual(len(listener.results['started']), 1) - for event in listener.results['started']: + self.assertGreaterEqual(len(listener.results["started"]), 1) + for event in listener.results["started"]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) + "lsid" in event.command, + "%s sent no lsid with %s" % (name, event.command_name), + ) self.assertEqual( s.session_id, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - name, event.command_name)) + event.command["lsid"], + "%s sent wrong lsid with %s" % (name, event.command_name), + ) with self.assertRaisesRegex(InvalidOperation, "ended session"): f(session=s) @@ -368,67 +362,64 @@ def test_cursor(self): f(session=None) event0 = listener.first_command_started() self.assertTrue( - 'lsid' in event0.command, - "%s sent no lsid with %s" % ( - name, event0.command_name)) + "lsid" in event0.command, "%s sent no lsid with %s" % (name, event0.command_name) + ) - lsid = event0.command['lsid'] + lsid = event0.command["lsid"] - for event in listener.results['started'][1:]: + for event in listener.results["started"][1:]: self.assertTrue( - 'lsid' in event.command, - "%s sent no lsid with %s" % ( - name, event.command_name)) + "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) + ) self.assertEqual( lsid, - event.command['lsid'], - "%s sent wrong lsid with %s" % ( - name, event.command_name)) + event.command["lsid"], + "%s sent wrong lsid with %s" % (name, event.command_name), + ) def test_gridfs(self): client = self.client fs = GridFS(client.pymongo_test) def new_file(session=None): - grid_file = fs.new_file(_id=1, filename='f', session=session) + grid_file = fs.new_file(_id=1, filename="f", session=session) # 1 MB, 5 chunks, to test that each chunk is fetched with same lsid. - grid_file.write(b'a' * 1048576) + grid_file.write(b"a" * 1048576) grid_file.close() def find(session=None): - files = list(fs.find({'_id': 1}, session=session)) + files = list(fs.find({"_id": 1}, session=session)) for f in files: f.read() self._test_ops( client, (new_file, [], {}), - (fs.put, [b'data'], {}), + (fs.put, [b"data"], {}), (lambda session=None: fs.get(1, session=session).read(), [], {}), - (lambda session=None: fs.get_version('f', session=session).read(), - [], {}), - (lambda session=None: - fs.get_last_version('f', session=session).read(), [], {}), + (lambda session=None: fs.get_version("f", session=session).read(), [], {}), + (lambda session=None: fs.get_last_version("f", session=session).read(), [], {}), (fs.list, [], {}), (fs.find_one, [1], {}), (lambda session=None: list(fs.find(session=session)), [], {}), (fs.exists, [1], {}), (find, [], {}), - (fs.delete, [1], {})) + (fs.delete, [1], {}), + ) def test_gridfs_bucket(self): client = self.client bucket = GridFSBucket(client.pymongo_test) def upload(session=None): - stream = bucket.open_upload_stream('f', session=session) - stream.write(b'a' * 1048576) + stream = bucket.open_upload_stream("f", session=session) + stream.write(b"a" * 1048576) stream.close() def upload_with_id(session=None): - stream = bucket.open_upload_stream_with_id(1, 'f1', session=session) - stream.write(b'a' * 1048576) + stream = bucket.open_upload_stream_with_id(1, "f1", session=session) + stream.write(b"a" * 1048576) stream.close() def open_download_stream(session=None): @@ -436,11 +427,11 @@ def open_download_stream(session=None): stream.read() def open_download_stream_by_name(session=None): - stream = bucket.open_download_stream_by_name('f', session=session) + stream = bucket.open_download_stream_by_name("f", session=session) stream.read() def find(session=None): - files = list(bucket.find({'_id': 1}, session=session)) + files = list(bucket.find({"_id": 1}, session=session)) for f in files: f.read() @@ -450,17 +441,18 @@ def find(session=None): client, (upload, [], {}), (upload_with_id, [], {}), - (bucket.upload_from_stream, ['f', b'data'], {}), - (bucket.upload_from_stream_with_id, [2, 'f', b'data'], {}), + (bucket.upload_from_stream, ["f", b"data"], {}), + (bucket.upload_from_stream_with_id, [2, "f", b"data"], {}), (open_download_stream, [], {}), (open_download_stream_by_name, [], {}), (bucket.download_to_stream, [1, sio], {}), - (bucket.download_to_stream_by_name, ['f', sio], {}), + (bucket.download_to_stream_by_name, ["f", sio], {}), (find, [], {}), - (bucket.rename, [1, 'f2'], {}), + (bucket.rename, [1, "f2"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), - (bucket.delete, [2], {})) + (bucket.delete, [2], {}), + ) def test_gridfsbucket_cursor(self): client = self.client @@ -468,7 +460,7 @@ def test_gridfsbucket_cursor(self): for file_id in 1, 2: stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) - stream.write(b'a' * 1048576) + stream.write(b"a" * 1048576) stream.close() with client.start_session() as s: @@ -518,10 +510,7 @@ def test_aggregate(self): coll = client.pymongo_test.collection def agg(session=None): - list(coll.aggregate( - [], - batchSize=2, - session=session)) + list(coll.aggregate([], batchSize=2, session=session)) # With empty collection. self._test_ops(client, (agg, [], {})) @@ -553,11 +542,11 @@ def test_aggregate_error(self): listener.results.clear() with self.assertRaises(OperationFailure): - coll.aggregate([{'$badOperation': {'bar': 1}}]) + coll.aggregate([{"$badOperation": {"bar": 1}}]) event = listener.first_command_started() - self.assertEqual(event.command_name, 'aggregate') - lsid = event.command['lsid'] + self.assertEqual(event.command_name, "aggregate") + lsid = event.command["lsid"] # Session was returned to pool despite error. self.assertIn(lsid, session_ids(client)) @@ -568,7 +557,7 @@ def _test_cursor_helper(self, create_cursor, close_cursor): cursor = create_cursor(coll, None) next(cursor) # Session is "owned" by cursor. - session = getattr(cursor, '_%s__session' % cursor.__class__.__name__) + session = getattr(cursor, "_%s__session" % cursor.__class__.__name__) self.assertIsNotNone(session) lsid = session.session_id next(cursor) @@ -591,45 +580,46 @@ def _test_cursor_helper(self, create_cursor, close_cursor): def test_cursor_close(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: cursor.close()) + lambda coll, session: coll.find(session=session), lambda cursor: cursor.close() + ) def test_command_cursor_close(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.close()) + lambda coll, session: coll.aggregate([], session=session), lambda cursor: cursor.close() + ) def test_cursor_del(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: cursor.__del__()) + lambda coll, session: coll.find(session=session), lambda cursor: cursor.__del__() + ) def test_command_cursor_del(self): self._test_cursor_helper( lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.__del__()) + lambda cursor: cursor.__del__(), + ) def test_cursor_exhaust(self): self._test_cursor_helper( - lambda coll, session: coll.find(session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.find(session=session), lambda cursor: list(cursor) + ) def test_command_cursor_exhaust(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.aggregate([], session=session), lambda cursor: list(cursor) + ) def test_cursor_limit_reached(self): self._test_cursor_helper( - lambda coll, session: coll.find(limit=4, batch_size=2, - session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.find(limit=4, batch_size=2, session=session), + lambda cursor: list(cursor), + ) def test_command_cursor_limit_reached(self): self._test_cursor_helper( - lambda coll, session: coll.aggregate([], batchSize=900, - session=session), - lambda cursor: list(cursor)) + lambda coll, session: coll.aggregate([], batchSize=900, session=session), + lambda cursor: list(cursor), + ) def _test_unacknowledged_ops(self, client, *ops): listener = client.options.event_listeners[0] @@ -640,23 +630,23 @@ def _test_unacknowledged_ops(self, client, *ops): # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) - kw['session'] = s + kw["session"] = s with self.assertRaises( - ConfigurationError, - msg="%s did not raise ConfigurationError" % ( - f.__name__,)): + ConfigurationError, msg="%s did not raise ConfigurationError" % (f.__name__,) + ): f(*args, **kw) - if f.__name__ == 'create_collection': + if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results['started'].pop(0) - self.assertEqual('listCollections', event.command_name) - self.assertIn('lsid', event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) + event = listener.results["started"].pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) # Should not run any command before raising an error. - self.assertFalse(listener.results['started'], - "%s sent command" % (f.__name__,)) + self.assertFalse(listener.results["started"], "%s sent command" % (f.__name__,)) self.assertTrue(s.has_ended) @@ -664,20 +654,22 @@ def _test_unacknowledged_ops(self, client, *ops): for f, args, kw in ops: listener.results.clear() f(*args, **kw) - self.assertGreaterEqual(len(listener.results['started']), 1) + self.assertGreaterEqual(len(listener.results["started"]), 1) - if f.__name__ == 'create_collection': + if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results['started'].pop(0) - self.assertEqual('listCollections', event.command_name) - self.assertIn('lsid', event.command, - "%s sent no lsid with %s" % ( - f.__name__, event.command_name)) - - for event in listener.results['started']: - self.assertNotIn('lsid', event.command, - "%s sent lsid with %s" % ( - f.__name__, event.command_name)) + event = listener.results["started"].pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + "%s sent no lsid with %s" % (f.__name__, event.command_name), + ) + + for event in listener.results["started"]: + self.assertNotIn( + "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) + ) def test_unacknowledged_writes(self): # Ensure the collection exists. @@ -688,8 +680,8 @@ def test_unacknowledged_writes(self): coll = db.test_unacked_writes ops: list = [ (client.drop_database, [db.name], {}), - (db.create_collection, ['collection'], {}), - (db.drop_collection, ['collection'], {}), + (db.create_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), ] ops.extend(self.collection_write_ops(coll)) self._test_unacknowledged_ops(client, *ops) @@ -705,21 +697,17 @@ def drop_db(): return False raise - wait_until(drop_db, 'dropped database after w=0 writes') + wait_until(drop_db, "dropped database after w=0 writes") def test_snapshot_incompatible_with_causal_consistency(self): - with self.client.start_session(causal_consistency=False, - snapshot=False): + with self.client.start_session(causal_consistency=False, snapshot=False): pass - with self.client.start_session(causal_consistency=False, - snapshot=True): + with self.client.start_session(causal_consistency=False, snapshot=True): pass - with self.client.start_session(causal_consistency=True, - snapshot=False): + with self.client.start_session(causal_consistency=True, snapshot=False): pass with self.assertRaises(ConfigurationError): - with self.client.start_session(causal_consistency=True, - snapshot=True): + with self.client.start_session(causal_consistency=True, snapshot=True): pass def test_session_not_copyable(self): @@ -727,6 +715,7 @@ def test_session_not_copyable(self): with client.start_session() as s: self.assertRaises(TypeError, lambda: copy.copy(s)) + class TestCausalConsistency(unittest.TestCase): listener: SessionTestListener client: MongoClient @@ -751,33 +740,32 @@ def test_core(self): self.assertIsNone(sess.operation_time) self.listener.results.clear() self.client.pymongo_test.test.find_one(session=sess) - started = self.listener.results['started'][0] + started = self.listener.results["started"][0] cmd = started.command - self.assertIsNone(cmd.get('readConcern')) + self.assertIsNone(cmd.get("readConcern")) op_time = sess.operation_time self.assertIsNotNone(op_time) - succeeded = self.listener.results['succeeded'][0] + succeeded = self.listener.results["succeeded"][0] reply = succeeded.reply - self.assertEqual(op_time, reply.get('operationTime')) + self.assertEqual(op_time, reply.get("operationTime")) # No explicit session self.client.pymongo_test.test.insert_one({}) self.assertEqual(sess.operation_time, op_time) self.listener.results.clear() try: - self.client.pymongo_test.command('doesntexist', session=sess) + self.client.pymongo_test.command("doesntexist", session=sess) except: pass - failed = self.listener.results['failed'][0] - failed_op_time = failed.failure.get('operationTime') + failed = self.listener.results["failed"][0] + failed_op_time = failed.failure.get("operationTime") # Some older builds of MongoDB 3.5 / 3.6 return None for # operationTime when a command fails. Make sure we don't # change operation_time to None. if failed_op_time is None: self.assertIsNotNone(sess.operation_time) else: - self.assertEqual( - sess.operation_time, failed_op_time) + self.assertEqual(sess.operation_time, failed_op_time) with self.client.start_session() as sess2: self.assertIsNone(sess2.cluster_time) @@ -805,36 +793,32 @@ def _test_reads(self, op, exception=None): op(coll, sess) else: op(coll, sess) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertEqual(operation_time, act) @client_context.require_no_standalone def test_reads(self): # Make sure the collection exists. self.client.pymongo_test.test.insert_one({}) + self._test_reads(lambda coll, session: list(coll.aggregate([], session=session))) + self._test_reads(lambda coll, session: list(coll.find({}, session=session))) + self._test_reads(lambda coll, session: coll.find_one({}, session=session)) + self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) + self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) self._test_reads( - lambda coll, session: list(coll.aggregate([], session=session))) - self._test_reads( - lambda coll, session: list(coll.find({}, session=session))) - self._test_reads( - lambda coll, session: coll.find_one({}, session=session)) - self._test_reads( - lambda coll, session: coll.count_documents({}, session=session)) - self._test_reads( - lambda coll, session: coll.distinct('foo', session=session)) - self._test_reads( - lambda coll, session: list(coll.aggregate_raw_batches( - [], session=session))) - self._test_reads( - lambda coll, session: list(coll.find_raw_batches( - {}, session=session))) + lambda coll, session: list(coll.aggregate_raw_batches([], session=session)) + ) + self._test_reads(lambda coll, session: list(coll.find_raw_batches({}, session=session))) self.assertRaises( ConfigurationError, self._test_reads, - lambda coll, session: coll.estimated_document_count( - session=session)) + lambda coll, session: coll.estimated_document_count(session=session), + ) def _test_writes(self, op): coll = self.client.pymongo_test.test @@ -844,50 +828,46 @@ def _test_writes(self, op): self.assertIsNotNone(operation_time) self.listener.results.clear() coll.find_one({}, session=sess) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertEqual(operation_time, act) @client_context.require_no_standalone def test_writes(self): + self._test_writes(lambda coll, session: coll.bulk_write([InsertOne({})], session=session)) + self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) + self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) self._test_writes( - lambda coll, session: coll.bulk_write( - [InsertOne({})], session=session)) - self._test_writes( - lambda coll, session: coll.insert_one({}, session=session)) + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) self._test_writes( - lambda coll, session: coll.insert_many([{}], session=session)) + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) self._test_writes( - lambda coll, session: coll.replace_one( - {'_id': 1}, {'x': 1}, session=session)) + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_writes(lambda coll, session: coll.delete_one({}, session=session)) + self._test_writes(lambda coll, session: coll.delete_many({}, session=session)) self._test_writes( - lambda coll, session: coll.update_one( - {}, {'$set': {'X': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.update_many( - {}, {'$set': {'x': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.delete_one({}, session=session)) - self._test_writes( - lambda coll, session: coll.delete_many({}, session=session)) - self._test_writes( - lambda coll, session: coll.find_one_and_replace( - {'x': 1}, {'y': 1}, session=session)) + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) self._test_writes( lambda coll, session: coll.find_one_and_update( - {'y': 1}, {'$set': {'x': 1}}, session=session)) - self._test_writes( - lambda coll, session: coll.find_one_and_delete( - {'x': 1}, session=session)) - self._test_writes( - lambda coll, session: coll.create_index("foo", session=session)) + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + self._test_writes(lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session)) + self._test_writes(lambda coll, session: coll.create_index("foo", session=session)) self._test_writes( lambda coll, session: coll.create_indexes( - [IndexModel([("bar", ASCENDING)])], session=session)) - self._test_writes( - lambda coll, session: coll.drop_index("foo_1", session=session)) - self._test_writes( - lambda coll, session: coll.drop_indexes(session=session)) + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_writes(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_writes(lambda coll, session: coll.drop_indexes(session=session)) def _test_no_read_concern(self, op): coll = self.client.pymongo_test.test @@ -897,61 +877,56 @@ def _test_no_read_concern(self, op): self.assertIsNotNone(operation_time) self.listener.results.clear() op(coll, sess) - rc = self.listener.results['started'][0].command.get( - 'readConcern') + rc = self.listener.results["started"][0].command.get("readConcern") self.assertIsNone(rc) @client_context.require_no_standalone def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: coll.bulk_write( - [InsertOne({})], session=session)) - self._test_no_read_concern( - lambda coll, session: coll.insert_one({}, session=session)) + lambda coll, session: coll.bulk_write([InsertOne({})], session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) self._test_no_read_concern( - lambda coll, session: coll.insert_many([{}], session=session)) + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) self._test_no_read_concern( - lambda coll, session: coll.replace_one( - {'_id': 1}, {'x': 1}, session=session)) + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) self._test_no_read_concern( - lambda coll, session: coll.update_one( - {}, {'$set': {'X': 1}}, session=session)) + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.delete_one({}, session=session)) + self._test_no_read_concern(lambda coll, session: coll.delete_many({}, session=session)) self._test_no_read_concern( - lambda coll, session: coll.update_many( - {}, {'$set': {'x': 1}}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.delete_one({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.delete_many({}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.find_one_and_replace( - {'x': 1}, {'y': 1}, session=session)) + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) self._test_no_read_concern( lambda coll, session: coll.find_one_and_update( - {'y': 1}, {'$set': {'x': 1}}, session=session)) - self._test_no_read_concern( - lambda coll, session: coll.find_one_and_delete( - {'x': 1}, session=session)) + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) self._test_no_read_concern( - lambda coll, session: coll.create_index("foo", session=session)) + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + self._test_no_read_concern(lambda coll, session: coll.create_index("foo", session=session)) self._test_no_read_concern( lambda coll, session: coll.create_indexes( - [IndexModel([("bar", ASCENDING)])], session=session)) - self._test_no_read_concern( - lambda coll, session: coll.drop_index("foo_1", session=session)) - self._test_no_read_concern( - lambda coll, session: coll.drop_indexes(session=session)) + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + self._test_no_read_concern(lambda coll, session: coll.drop_index("foo_1", session=session)) + self._test_no_read_concern(lambda coll, session: coll.drop_indexes(session=session)) # Not a write, but explain also doesn't support readConcern. - self._test_no_read_concern( - lambda coll, session: coll.find({}, session=session).explain()) + self._test_no_read_concern(lambda coll, session: coll.find({}, session=session).explain()) @client_context.require_no_standalone @client_context.require_version_max(4, 1, 0) def test_aggregate_out_does_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: list( - coll.aggregate([{"$out": "aggout"}], session=session))) + lambda coll, session: list(coll.aggregate([{"$out": "aggout"}], session=session)) + ) @client_context.require_no_standalone def test_get_more_does_not_include_read_concern(self): @@ -965,17 +940,20 @@ def test_get_more_does_not_include_read_concern(self): next(cursor) self.listener.results.clear() list(cursor) - started = self.listener.results['started'][0] - self.assertEqual(started.command_name, 'getMore') - self.assertIsNone(started.command.get('readConcern')) + started = self.listener.results["started"][0] + self.assertEqual(started.command_name, "getMore") + self.assertIsNone(started.command.get("readConcern")) def test_session_not_causal(self): with self.client.start_session(causal_consistency=False) as s: self.client.pymongo_test.test.insert_one({}, session=s) self.listener.results.clear() self.client.pymongo_test.test.find_one({}, session=s) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertIsNone(act) @client_context.require_standalone @@ -984,8 +962,11 @@ def test_server_not_causal(self): self.client.pymongo_test.test.insert_one({}, session=s) self.listener.results.clear() self.client.pymongo_test.test.find_one({}, session=s) - act = self.listener.results['started'][0].command.get( - 'readConcern', {}).get('afterClusterTime') + act = ( + self.listener.results["started"][0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) self.assertIsNone(act) @client_context.require_no_standalone @@ -996,28 +977,25 @@ def test_read_concern(self): coll.insert_one({}, session=s) self.listener.results.clear() coll.find_one({}, session=s) - read_concern = self.listener.results['started'][0].command.get( - 'readConcern') + read_concern = self.listener.results["started"][0].command.get("readConcern") self.assertIsNotNone(read_concern) - self.assertIsNone(read_concern.get('level')) - self.assertIsNotNone(read_concern.get('afterClusterTime')) + self.assertIsNone(read_concern.get("level")) + self.assertIsNotNone(read_concern.get("afterClusterTime")) coll = coll.with_options(read_concern=ReadConcern("majority")) self.listener.results.clear() coll.find_one({}, session=s) - read_concern = self.listener.results['started'][0].command.get( - 'readConcern') + read_concern = self.listener.results["started"][0].command.get("readConcern") self.assertIsNotNone(read_concern) - self.assertEqual(read_concern.get('level'), 'majority') - self.assertIsNotNone(read_concern.get('afterClusterTime')) + self.assertEqual(read_concern.get("level"), "majority") + self.assertIsNotNone(read_concern.get("afterClusterTime")) @client_context.require_no_standalone def test_cluster_time_with_server_support(self): self.client.pymongo_test.test.insert_one({}) self.listener.results.clear() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results['started'][0].command.get( - '$clusterTime') + after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") self.assertIsNotNone(after_cluster_time) @client_context.require_standalone @@ -1025,22 +1003,20 @@ def test_cluster_time_no_server_support(self): self.client.pymongo_test.test.insert_one({}) self.listener.results.clear() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results['started'][0].command.get( - '$clusterTime') + after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") self.assertIsNone(after_cluster_time) class TestClusterTime(IntegrationTest): def setUp(self): super(TestClusterTime, self).setUp() - if '$clusterTime' not in client_context.hello: - raise SkipTest('$clusterTime not supported') + if "$clusterTime" not in client_context.hello: + raise SkipTest("$clusterTime not supported") def test_cluster_time(self): listener = SessionTestListener() # Prevent heartbeats from updating $clusterTime between operations. - client = rs_or_single_client(event_listeners=[listener], - heartbeatFrequencyMS=999999) + client = rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). @@ -1051,7 +1027,7 @@ def test_cluster_time(self): def rename_and_drop(): # Ensure collection exists. collection.insert_one({}) - collection.rename('collection2') + collection.rename("collection2") client.pymongo_test.collection2.drop() def insert_and_find(): @@ -1074,22 +1050,19 @@ def insert_and_aggregate(): ops = [ # Tests from Driver Sessions Spec. - ('ping', lambda: client.admin.command('ping')), - ('aggregate', lambda: list(collection.aggregate([]))), - ('find', lambda: list(collection.find())), - ('insert_one', lambda: collection.insert_one({})), - + ("ping", lambda: client.admin.command("ping")), + ("aggregate", lambda: list(collection.aggregate([]))), + ("find", lambda: list(collection.find())), + ("insert_one", lambda: collection.insert_one({})), # Additional PyMongo tests. - ('insert_and_find', insert_and_find), - ('insert_and_aggregate', insert_and_aggregate), - ('update_one', - lambda: collection.update_one({}, {'$set': {'x': 1}})), - ('update_many', - lambda: collection.update_many({}, {'$set': {'x': 1}})), - ('delete_one', lambda: collection.delete_one({})), - ('delete_many', lambda: collection.delete_many({})), - ('bulk_write', lambda: collection.bulk_write([InsertOne({})])), - ('rename_and_drop', rename_and_drop), + ("insert_and_find", insert_and_find), + ("insert_and_aggregate", insert_and_aggregate), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), ] for name, f in ops: @@ -1100,48 +1073,48 @@ def insert_and_aggregate(): collection.insert_one({}) f() - self.assertGreaterEqual(len(listener.results['started']), 1) - for i, event in enumerate(listener.results['started']): + self.assertGreaterEqual(len(listener.results["started"]), 1) + for i, event in enumerate(listener.results["started"]): self.assertTrue( - '$clusterTime' in event.command, - "%s sent no $clusterTime with %s" % ( - f.__name__, event.command_name)) + "$clusterTime" in event.command, + "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), + ) if i > 0: - succeeded = listener.results['succeeded'][i - 1] + succeeded = listener.results["succeeded"][i - 1] self.assertTrue( - '$clusterTime' in succeeded.reply, - "%s received no $clusterTime with %s" % ( - f.__name__, succeeded.command_name)) + "$clusterTime" in succeeded.reply, + "%s received no $clusterTime with %s" + % (f.__name__, succeeded.command_name), + ) self.assertTrue( - event.command['$clusterTime']['clusterTime'] >= - succeeded.reply['$clusterTime']['clusterTime'], - "%s sent wrong $clusterTime with %s" % ( - f.__name__, event.command_name)) + event.command["$clusterTime"]["clusterTime"] + >= succeeded.reply["$clusterTime"]["clusterTime"], + "%s sent wrong $clusterTime with %s" % (f.__name__, event.command_name), + ) class TestSpec(SpecRunner): RUN_ON_SERVERLESS = True # Location of JSON test specifications. - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'sessions', 'legacy') + TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "legacy") def last_two_command_events(self): """Return the last two command started events.""" - started_events = self.listener.results['started'][-2:] + started_events = self.listener.results["started"][-2:] self.assertEqual(2, len(started_events)) return started_events def assert_same_lsid_on_last_two_commands(self): """Run the assertSameLsidOnLastTwoCommands test operation.""" event1, event2 = self.last_two_command_events() - self.assertEqual(event1.command['lsid'], event2.command['lsid']) + self.assertEqual(event1.command["lsid"], event2.command["lsid"]) def assert_different_lsid_on_last_two_commands(self): """Run the assertDifferentLsidOnLastTwoCommands test operation.""" event1, event2 = self.last_two_command_events() - self.assertNotEqual(event1.command['lsid'], event2.command['lsid']) + self.assertNotEqual(event1.command["lsid"], event2.command["lsid"]) def assert_session_dirty(self, session): """Run the assertSessionDirty test operation. diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py index fe25536e7e..2320d52718 100644 --- a/test/test_sessions_unified.py +++ b/test/test_sessions_unified.py @@ -23,8 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'sessions', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_son.py b/test/test_son.py index edddd6b8b8..69beb81439 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -21,9 +21,11 @@ sys.path[0:0] = [""] -from bson.son import SON -from test import unittest from collections import OrderedDict +from test import unittest + +from bson.son import SON + class TestSON(unittest.TestCase): def test_ordered_dict(self): @@ -31,9 +33,9 @@ def test_ordered_dict(self): a1["hello"] = "world" a1["mike"] = "awesome" a1["hello_"] = "mike" - self.assertEqual(list(a1.items()), [("hello", "world"), - ("mike", "awesome"), - ("hello_", "mike")]) + self.assertEqual( + list(a1.items()), [("hello", "world"), ("mike", "awesome"), ("hello_", "mike")] + ) b2 = SON({"hello": "world"}) self.assertEqual(b2["hello"], "world") @@ -41,38 +43,28 @@ def test_ordered_dict(self): def test_equality(self): a1 = SON({"hello": "world"}) - b2 = SON((('hello', 'world'), ('mike', 'awesome'), ('hello_', 'mike'))) + b2 = SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike"))) self.assertEqual(a1, SON({"hello": "world"})) - self.assertEqual(b2, SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertEqual(b2, dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertEqual(b2, dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) self.assertNotEqual(a1, b2) - self.assertNotEqual(b2, SON((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) - self.assertFalse(b2 != SON((('hello', 'world'), - ('mike', 'awesome'), - ('hello_', 'mike')))) - self.assertFalse(b2 != dict((('hello_', 'mike'), - ('mike', 'awesome'), - ('hello', 'world')))) + self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) + self.assertFalse(b2 != dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) # Embedded SON. - d4 = SON([('blah', {'foo': SON()})]) - self.assertEqual(d4, {'blah': {'foo': {}}}) - self.assertEqual(d4, {'blah': {'foo': SON()}}) - self.assertNotEqual(d4, {'blah': {'foo': []}}) + d4 = SON([("blah", {"foo": SON()})]) + self.assertEqual(d4, {"blah": {"foo": {}}}) + self.assertEqual(d4, {"blah": {"foo": SON()}}) + self.assertNotEqual(d4, {"blah": {"foo": []}}) # Original data unaffected. - self.assertEqual(SON, d4['blah']['foo'].__class__) + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_to_dict(self): a1 = SON() @@ -89,19 +81,17 @@ def test_to_dict(self): self.assertEqual(dict, d4.to_dict()["blah"]["foo"].__class__) # Original data unaffected. - self.assertEqual(SON, d4['blah']['foo'].__class__) + self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_pickle(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) for protocol in range(pickle.HIGHEST_PROTOCOL + 1): - pickled = pickle.loads(pickle.dumps(complex_son, - protocol=protocol)) - self.assertEqual(pickled['son'], pickled['list'][0]) - self.assertEqual(pickled['son'], pickled['list'][1]) + pickled = pickle.loads(pickle.dumps(complex_son, protocol=protocol)) + self.assertEqual(pickled["son"], pickled["list"][0]) + self.assertEqual(pickled["son"], pickled["list"][1]) def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo @@ -109,16 +99,16 @@ def test_pickle_backwards_compatability(self): pickled_with_2_1_1 = ( "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb.").encode('utf8') + "S'_SON__keys'\np7\n(lp8\nsb." + ).encode("utf8") son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) def test_copying(self): simple_son = SON([]) - complex_son = SON([('son', simple_son), - ('list', [simple_son, simple_son])]) + complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) regex_son = SON([("x", re.compile("^hello.*"))]) - reflexive_son = SON([('son', simple_son)]) + reflexive_son = SON([("son", simple_son)]) reflexive_son["reflexive"] = reflexive_son simple_son1 = copy.copy(simple_son) @@ -196,8 +186,10 @@ def test_keys(self): try: d - i().keys() except TypeError: - self.fail("SON().keys() is not returning an object compatible " - "with %s objects" % (str(i))) + self.fail( + "SON().keys() is not returning an object compatible " + "with %s objects" % (str(i)) + ) # Test to verify correctness d = SON({"k": "v"}).keys() for i in [OrderedDict, dict]: @@ -205,5 +197,6 @@ def test_keys(self): for i in [OrderedDict, dict]: self.assertEqual(d - i({"k": 0}).keys(), set()) + if __name__ == "__main__": unittest.main() diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 64581d83b7..6c240d7a78 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -15,29 +15,31 @@ """Run the SRV support tests.""" import sys - from time import sleep from typing import Any sys.path[0:0] = [""] -import pymongo +from test import client_knobs, unittest +from test.utils import FunctionCallRecorder, wait_until +import pymongo from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.mongo_client import MongoClient -from test import client_knobs, unittest -from test.utils import wait_until, FunctionCallRecorder - +from pymongo.srv_resolver import _HAVE_DNSPYTHON WAIT_TIME = 0.1 class SrvPollingKnobs(object): - def __init__(self, ttl_time=None, min_srv_rescan_interval=None, - nodelist_callback=None, - count_resolver_calls=False): + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): self.ttl_time = ttl_time self.min_srv_rescan_interval = min_srv_rescan_interval self.nodelist_callback = nodelist_callback @@ -48,8 +50,7 @@ def __init__(self, ttl_time=None, min_srv_rescan_interval=None, def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = \ - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval @@ -88,18 +89,20 @@ class TestSrvPolling(unittest.TestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), - ("localhost.test.build.10gen.cc", 27018)] + ("localhost.test.build.10gen.cc", 27018), + ] CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" def setUp(self): if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython " - "module") + raise unittest.SkipTest("SRV polling tests require the dnspython " "module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( - heartbeat_frequency=WAIT_TIME, min_heartbeat_interval=WAIT_TIME, - events_queue_frequency=WAIT_TIME) + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) self.client_knobs.enable() def tearDown(self): @@ -112,13 +115,14 @@ def assert_nodelist_change(self, expected_nodelist, client): """Check if the client._topology eventually sees all nodes in the expected_nodelist. """ + def predicate(): nodelist = self.get_nodelist(client) if set(expected_nodelist) == set(nodelist): return True return False - wait_until(predicate, "see expected nodelist", - timeout=100*WAIT_TIME) + + wait_until(predicate, "see expected nodelist", timeout=100 * WAIT_TIME) def assert_nodelist_nochange(self, expected_nodelist, client): """Check if the client._topology ever deviates from seeing all nodes @@ -126,20 +130,23 @@ def assert_nodelist_nochange(self, expected_nodelist, client): (WAIT_TIME * 10) seconds. Also check that the resolver is called at least once. """ - sleep(WAIT_TIME*10) + sleep(WAIT_TIME * 10) nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore - 1, "resolver was never called") + 1, + "resolver was never called", + ) return True def run_scenario(self, dns_response, expect_change): if callable(dns_response): dns_resolver_response = dns_response else: + def dns_resolver_response(): return dns_response @@ -153,34 +160,29 @@ def dns_resolver_response(): expected_response = self.BASE_SRV_RESPONSE # Patch timeouts to ensure short test running times. - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING) self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( - nodelist_callback=dns_resolver_response, - count_resolver_calls=count_resolver_calls): + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): assertion_method(expected_response, client) def test_addition(self): response = self.BASE_SRV_RESPONSE[:] - response.append( - ("localhost.test.build.10gen.cc", 27019)) + response.append(("localhost.test.build.10gen.cc", 27019)) self.run_scenario(response, True) def test_removal(self): response = self.BASE_SRV_RESPONSE[:] - response.remove( - ("localhost.test.build.10gen.cc", 27018)) + response.remove(("localhost.test.build.10gen.cc", 27018)) self.run_scenario(response, True) def test_replace_one(self): response = self.BASE_SRV_RESPONSE[:] - response.remove( - ("localhost.test.build.10gen.cc", 27018)) - response.append( - ("localhost.test.build.10gen.cc", 27019)) + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) self.run_scenario(response, True) def test_replace_both_with_one(self): @@ -188,15 +190,20 @@ def test_replace_both_with_one(self): self.run_scenario(response, True) def test_replace_both_with_two(self): - response = [("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] self.run_scenario(response, True) def test_dns_failures(self): from dns import exception + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + def response_callback(*args): raise exc("DNS Failure!") + self.run_scenario(response_callback, False) def test_dns_record_lookup_empty(self): @@ -207,89 +214,95 @@ def _test_recover_from_initial(self, initial_callback): # Construct a valid final response callback distinct from base. response_final = self.BASE_SRV_RESPONSE[:] response_final.pop() + def final_callback(): return response_final with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - nodelist_callback=initial_callback, - count_resolver_calls=True): + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): # Client uses unpatched method to get initial nodelist client = MongoClient(self.CONNECTION_STRING) # Invalid DNS resolver response should not change nodelist. self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, - nodelist_callback=final_callback): + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): # Nodelist should reflect new valid DNS resolver response. self.assert_nodelist_change(response_final, client) def test_recover_from_initially_empty_seedlist(self): def empty_seedlist(): return [] + self._test_recover_from_initial(empty_seedlist) def test_recover_from_initially_erroring_seedlist(self): def erroring_seedlist(): raise ConfigurationError + self._test_recover_from_initial(erroring_seedlist) def test_10_all_dns_selected(self): - response = [("localhost.test.build.10gen.cc", 27017), - ("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] def nodelist_callback(): return response - with SrvPollingKnobs(ttl_time=WAIT_TIME, - min_srv_rescan_interval=WAIT_TIME): + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=0) self.addCleanup(client.close) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) def test_11_all_dns_selected(self): - response = [("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27020)] + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] def nodelist_callback(): return response - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) self.addCleanup(client.close) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) def test_12_new_dns_randomly_selected(self): - response = [("localhost.test.build.10gen.cc", 27020), - ("localhost.test.build.10gen.cc", 27019), - ("localhost.test.build.10gen.cc", 27017)] + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] def nodelist_callback(): return response - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) self.addCleanup(client.close) with SrvPollingKnobs(nodelist_callback=nodelist_callback): - sleep(2*common.MIN_SRV_RESCAN_INTERVAL) - final_topology = set( - client.topology_description.server_descriptions()) - self.assertIn(("localhost.test.build.10gen.cc", 27017), - final_topology) + sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) self.assertEqual(len(final_topology), 2) def test_does_not_flipflop(self): - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=1) self.addCleanup(client.close) old = set(client.topology_description.server_descriptions()) - sleep(4*WAIT_TIME) + sleep(4 * WAIT_TIME) new = set(client.topology_description.server_descriptions()) self.assertSetEqual(old, new) @@ -297,20 +310,19 @@ def test_srv_service_name(self): # Construct a valid final response callback distinct from base. response = [ ("localhost.test.build.10gen.cc.", 27019), - ("localhost.test.build.10gen.cc.", 27020) + ("localhost.test.build.10gen.cc.", 27020), ] def nodelist_callback(): return response - with SrvPollingKnobs( - ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient( - "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" - "=customname") + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" "=customname" + ) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index 25a646a998..7629c1fd88 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -21,26 +21,21 @@ sys.path[0:0] = [""] +from test import HAVE_IPADDRESS, IntegrationTest, SkipTest, client_context, unittest +from test.utils import ( + EventListener, + cat_files, + connected, + ignore_deprecations, + remove_all_users, +) from urllib.parse import quote_plus from pymongo import MongoClient, ssl_support -from pymongo.errors import (ConfigurationError, - ConnectionFailure, - OperationFailure) +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure from pymongo.hello import HelloCompat -from pymongo.ssl_support import HAVE_SSL, get_ssl_context, _ssl +from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context from pymongo.write_concern import WriteConcern -from test import (IntegrationTest, - client_context, - SkipTest, - unittest, - HAVE_IPADDRESS) -from test.utils import (EventListener, - cat_files, - connected, - ignore_deprecations, - remove_all_users) - _HAVE_PYOPENSSL = False try: @@ -48,9 +43,12 @@ import OpenSSL import requests import service_identity + # Ensure service_identity>=18.1 is installed from service_identity.pyopenssl import verify_ip_address + from pymongo.ocsp_support import _load_trusted_ca_certs + _HAVE_PYOPENSSL = True except ImportError: _load_trusted_ca_certs = None # type: ignore @@ -59,15 +57,13 @@ if HAVE_SSL: import ssl -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'certificates') -CLIENT_PEM = os.path.join(CERT_PATH, 'client.pem') -CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, 'password_protected.pem') -CA_PEM = os.path.join(CERT_PATH, 'ca.pem') -CA_BUNDLE_PEM = os.path.join(CERT_PATH, 'trusted-ca.pem') -CRL_PEM = os.path.join(CERT_PATH, 'crl.pem') -MONGODB_X509_USERNAME = ( - "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client") +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" _PY37PLUS = sys.version_info[:2] >= (3, 7) @@ -83,27 +79,24 @@ class TestClientSSL(unittest.TestCase): - - @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what " - "happens without it.") + @unittest.skipIf( + HAVE_SSL, "The ssl module is available, can't test what " "happens without it." + ) def test_no_ssl_module(self): # Explicit - self.assertRaises(ConfigurationError, - MongoClient, ssl=True) + self.assertRaises(ConfigurationError, MongoClient, ssl=True) # Implied - self.assertRaises(ConfigurationError, - MongoClient, tlsCertificateKeyFile=CLIENT_PEM) + self.assertRaises(ConfigurationError, MongoClient, tlsCertificateKeyFile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") @ignore_deprecations def test_config_ssl(self): # Tests various ssl configurations - self.assertRaises(ValueError, MongoClient, ssl='foo') - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCertificateKeyFile=CLIENT_PEM) + self.assertRaises(ValueError, MongoClient, ssl="foo") + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) self.assertRaises(TypeError, MongoClient, ssl=0) self.assertRaises(TypeError, MongoClient, ssl=5.5) self.assertRaises(TypeError, MongoClient, ssl=[]) @@ -113,30 +106,20 @@ def test_config_ssl(self): self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=[]) # Test invalid combinations - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCertificateKeyFile=CLIENT_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCAFile=CA_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsCRLFile=CRL_PEM) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsAllowInvalidCertificates=False) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsAllowInvalidHostnames=False) - self.assertRaises(ConfigurationError, - MongoClient, - tls=False, - tlsDisableOCSPEndpointCheck=False) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsAllowInvalidCertificates=False + ) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, MongoClient, tls=False, tlsDisableOCSPEndpointCheck=False + ) @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") def test_use_pyopenssl_when_available(self): @@ -153,10 +136,11 @@ class TestSSL(IntegrationTest): def assertClientWorks(self, client): coll = client.pymongo_test.ssl_test.with_options( - write_concern=WriteConcern(w=client_context.w)) + write_concern=WriteConcern(w=client_context.w) + ) coll.drop() - coll.insert_one({'ssl': True}) - self.assertTrue(coll.find_one()['ssl']) + coll.insert_one({"ssl": True}) + self.assertTrue(coll.find_one()["ssl"]) coll.drop() @classmethod @@ -185,30 +169,38 @@ def test_tlsCertificateKeyFilePassword(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - if not hasattr(ssl, 'SSLContext') and not _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, - 'localhost', + "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, tlsCertificateKeyFilePassword="qwerty", tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100) + serverSelectionTimeoutMS=100, + ) else: - connected(MongoClient('localhost', - ssl=True, - tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, - tlsCertificateKeyFilePassword="qwerty", - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=5000, - **self.credentials)) # type: ignore - - uri_fmt = ("mongodb://localhost/?ssl=true" - "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" - "&tlsCAFile=%s&serverSelectionTimeoutMS=5000") - connected(MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), - **self.credentials)) # type: ignore + connected( + MongoClient( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + connected( + MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) @client_context.require_tlsCertificateKeyFile @client_context.require_no_auth @@ -221,16 +213,21 @@ def test_cert_ssl_implicitly_set(self): # # test that setting tlsCertificateKeyFile causes ssl to be set to True - client = MongoClient(client_context.host, client_context.port, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + client = MongoClient( + client_context.host, + client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) response = client.admin.command(HelloCompat.LEGACY_CMD) - if 'setName' in response: - client = MongoClient(client_context.pair, - replicaSet=response['setName'], - w=len(response['hosts']), - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + if "setName" in response: + client = MongoClient( + client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) self.assertClientWorks(client) @@ -243,33 +240,41 @@ def test_cert_ssl_validation(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - client = MongoClient('localhost', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM) + client = MongoClient( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) response = client.admin.command(HelloCompat.LEGACY_CMD) - if 'setName' in response: - if response['primary'].split(":")[0] != 'localhost': - raise SkipTest("No hosts in the replicaset for 'localhost'. " - "Cannot validate hostname in the certificate") - - client = MongoClient('localhost', - replicaSet=response['setName'], - w=len(response['hosts']), - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = MongoClient( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) self.assertClientWorks(client) if HAVE_IPADDRESS: - client = MongoClient('127.0.0.1', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM) + client = MongoClient( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) self.assertClientWorks(client) @client_context.require_tlsCertificateKeyFile @@ -281,9 +286,11 @@ def test_cert_ssl_uri_support(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - uri_fmt = ("mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" - "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false") - client = MongoClient(uri_fmt % (CLIENT_PEM, 'true', CA_PEM)) + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = MongoClient(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) self.assertClientWorks(client) @client_context.require_tlsCertificateKeyFile @@ -309,81 +316,107 @@ def test_cert_ssl_validation_hostname_matching(self): response = self.client.admin.command(HelloCompat.LEGACY_CMD) with self.assertRaises(ConnectionFailure): - connected(MongoClient('server', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore - - connected(MongoClient('server', - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore - - if 'setName' in response: + connected( + MongoClient( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) + + connected( + MongoClient( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) + + if "setName" in response: with self.assertRaises(ConnectionFailure): - connected(MongoClient('server', - replicaSet=response['setName'], - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore - - connected(MongoClient('server', - replicaSet=response['setName'], - ssl=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsAllowInvalidCertificates=False, - tlsCAFile=CA_PEM, - tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=500, - **self.credentials)) # type: ignore + connected( + MongoClient( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) + + connected( + MongoClient( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials # type: ignore[arg-type] + ) + ) @client_context.require_tlsCertificateKeyFile @ignore_deprecations def test_tlsCRLFile_support(self): - if not hasattr(ssl, 'VERIFY_CRL_CHECK_LEAF') or _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, MongoClient, - 'localhost', + "localhost", ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100) + serverSelectionTimeoutMS=100, + ) else: - connected(MongoClient('localhost', - ssl=True, - tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=100, + **self.credentials # type: ignore[arg-type] + ) + ) with self.assertRaises(ConnectionFailure): - connected(MongoClient('localhost', - ssl=True, - tlsCAFile=CA_PEM, - tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore - - uri_fmt = ("mongodb://localhost/?ssl=true&" - "tlsCAFile=%s&serverSelectionTimeoutMS=100") - connected(MongoClient(uri_fmt % (CA_PEM,), - **self.credentials)) # type: ignore - - uri_fmt = ("mongodb://localhost/?ssl=true&tlsCRLFile=%s" - "&tlsCAFile=%s&serverSelectionTimeoutMS=100") + connected( + MongoClient( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=100, + **self.credentials # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&" "tlsCAFile=%s&serverSelectionTimeoutMS=100" + connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=100" + ) with self.assertRaises(ConnectionFailure): - connected(MongoClient(uri_fmt % (CRL_PEM, CA_PEM), - **self.credentials)) # type: ignore + connected( + MongoClient(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable @@ -398,37 +431,39 @@ def test_validation_with_system_ca_certs(self): self.patch_system_certs(CA_PEM) with self.assertRaises(ConnectionFailure): # Server cert is verified but hostname matching fails - connected(MongoClient('server', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient("server", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + ) # Server cert is verified. Disable hostname matching. - connected(MongoClient('server', - ssl=True, - tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=100, + **self.credentials # type: ignore[arg-type] + ) + ) # Server cert and hostname are verified. - connected(MongoClient('localhost', - ssl=True, - serverSelectionTimeoutMS=100, - **self.credentials)) # type: ignore + connected( + MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + ) # Server cert and hostname are verified. connected( MongoClient( - 'mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100', - **self.credentials)) # type: ignore + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100", **self.credentials # type: ignore[arg-type] + ) + ) def test_system_certs_config_error(self): ctx = get_ssl_context(None, None, None, None, True, True, False) - if ((sys.platform != "win32" - and hasattr(ctx, "set_default_verify_paths")) - or hasattr(ctx, "load_default_certs")): - raise SkipTest( - "Can't test when system CA certificates are loadable.") + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") ssl_support: Any have_certifi = ssl_support.HAVE_CERTIFI @@ -457,8 +492,7 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, - False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) @@ -492,18 +526,24 @@ def test_wincertstore(self): @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port - self.addCleanup(remove_all_users, client_context.client['$external']) + self.addCleanup(remove_all_users, client_context.client["$external"]) # Give x509 user all necessary privileges. - client_context.create_user('$external', MONGODB_X509_USERNAME, roles=[ - {'role': 'readWriteAnyDatabase', 'db': 'admin'}, - {'role': 'userAdminAnyDatabase', 'db': 'admin'}]) + client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) noauth = MongoClient( client_context.pair, ssl=True, tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + tlsCertificateKeyFile=CLIENT_PEM, + ) self.addCleanup(noauth.close) with self.assertRaises(OperationFailure): @@ -512,11 +552,12 @@ def test_mongodb_x509_auth(self): listener = EventListener() auth = MongoClient( client_context.pair, - authMechanism='MONGODB-X509', + authMechanism="MONGODB-X509", ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, - event_listeners=[listener]) + event_listeners=[listener], + ) self.addCleanup(auth.close) # No error @@ -524,64 +565,73 @@ def test_mongodb_x509_auth(self): names = listener.started_command_names() if client_context.version.at_least(4, 4, -1): # Speculative auth skips the authenticate command. - self.assertEqual(names, ['find']) + self.assertEqual(names, ["find"]) else: - self.assertEqual(names, ['authenticate', 'find']) - - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) - client = MongoClient(uri, - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = MongoClient( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() - uri = 'mongodb://%s:%d/?authMechanism=MONGODB-X509' % (host, port) - client = MongoClient(uri, - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = MongoClient( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus("not the username"), host, port)) + uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) bad_client = MongoClient( - uri, ssl=True, tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() bad_client = MongoClient( - client_context.pair, - username="not the username", - authMechanism='MONGODB-X509', - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CLIENT_PEM) + client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() # Invalid certificate (using CA certificate as client certificate) - uri = ('mongodb://%s@%s:%d/?authMechanism=' - 'MONGODB-X509' % ( - quote_plus(MONGODB_X509_USERNAME), host, port)) + uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) try: - connected(MongoClient(uri, - ssl=True, - tlsAllowInvalidCertificates=True, - tlsCertificateKeyFile=CA_PEM, - serverSelectionTimeoutMS=100)) + connected( + MongoClient( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=100, + ) + ) except (ConnectionFailure, ConfigurationError): pass else: @@ -596,15 +646,14 @@ def remove(path): except OSError: pass - temp_ca_bundle = os.path.join(CERT_PATH, 'trusted-ca-bundle.pem') + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") self.addCleanup(remove, temp_ca_bundle) # Add the CA cert file to the bundle. cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) - with MongoClient('localhost', - tls=True, - tlsCertificateKeyFile=CLIENT_PEM, - tlsCAFile=temp_ca_bundle) as client: - self.assertTrue(client.admin.command('ping')) + with MongoClient( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(client.admin.command("ping")) if __name__ == "__main__": diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 096da04cf1..72df717901 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -19,18 +19,18 @@ sys.path[0:0] = [""] +from test import IntegrationTest, client_context, unittest +from test.utils import ( + HeartbeatEventListener, + ServerEventListener, + rs_or_single_client, + single_client, + wait_until, +) + from pymongo import monitoring from pymongo.hello import HelloCompat -from test import (client_context, - IntegrationTest, - unittest) -from test.utils import (HeartbeatEventListener, - rs_or_single_client, - single_client, - ServerEventListener, - wait_until) - class TestStreamingProtocol(IntegrationTest): @client_context.require_failCommand_appName @@ -38,33 +38,40 @@ def test_failCommand_streaming(self): listener = ServerEventListener() hb_listener = HeartbeatEventListener() client = rs_or_single_client( - event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, - appName='failingHeartbeatTest') + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") address = client.address listener.reset() fail_hello = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 4}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'closeConnection': False, - 'errorCode': 10107, - 'appName': 'failingHeartbeatTest', + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", }, } with self.fail_point(fail_hello): + def _marked_unknown(event): - return (event.server_address == address - and not event.new_description.is_server_type_known) + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) def _discovered_node(event): - return (event.server_address == address - and not event.previous_description.is_server_type_known - and event.new_description.is_server_type_known) + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) def marked_unknown(): return len(listener.matching(_marked_unknown)) >= 1 @@ -73,11 +80,11 @@ def rediscovered(): return len(listener.matching(_discovered_node)) >= 1 # Topology events are published asynchronously - wait_until(marked_unknown, 'mark node unknown') - wait_until(rediscovered, 'rediscover node') + wait_until(marked_unknown, "mark node unknown") + wait_until(rediscovered, "rediscover node") # Server should be selectable. - client.admin.command('ping') + client.admin.command("ping") @client_context.require_failCommand_appName def test_streaming_rtt(self): @@ -86,45 +93,46 @@ def test_streaming_rtt(self): # On Windows, RTT can actually be 0.0 because time.time() only has # 1-15 millisecond resolution. We need to delay the initial hello # to ensure that RTT is never zero. - name = 'streamingRttTest' + name = "streamingRttTest" delay_hello: dict = { - 'configureFailPoint': 'failCommand', - 'mode': {'times': 1000}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'blockConnection': True, - 'blockTimeMS': 20, + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, # This can be uncommented after SERVER-49220 is fixed. # 'appName': name, }, } with self.fail_point(delay_hello): client = rs_or_single_client( - event_listeners=[listener, hb_listener], - heartbeatFrequencyMS=500, - appName=name) + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") address = client.address - delay_hello['data']['blockTimeMS'] = 500 - delay_hello['data']['appName'] = name + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name with self.fail_point(delay_hello): + def rtt_exceeds_250_ms(): # XXX: Add a public TopologyDescription getter to MongoClient? topology = client._topology sd = topology.description.server_descriptions()[address] return sd.round_trip_time > 0.250 - wait_until(rtt_exceeds_250_ms, 'exceed 250ms RTT') + wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") # Server should be selectable. - client.admin.command('ping') + client.admin.command("ping") def changed_event(event): - return (event.server_address == address and isinstance( - event, monitoring.ServerDescriptionChangedEvent)) + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) # There should only be one event published, for the initial discovery. events = listener.matching(changed_event) @@ -137,21 +145,21 @@ def test_monitor_waits_after_server_check_error(self): # This test implements: # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks fail_hello = { - 'mode': {'times': 5}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'errorCode': 1234, - 'appName': 'SDAMMinHeartbeatFrequencyTest', + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", }, } with self.fail_point(fail_hello): start = time.time() client = single_client( - appName='SDAMMinHeartbeatFrequencyTest', - serverSelectionTimeoutMS=5000) + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") duration = time.time() - start # Explanation of the expected events: # 0ms: run configureFailPoint @@ -172,11 +180,13 @@ def test_monitor_waits_after_server_check_error(self): def test_heartbeat_awaited_flag(self): hb_listener = HeartbeatEventListener() client = single_client( - event_listeners=[hb_listener], heartbeatFrequencyMS=500, - appName='heartbeatEventAwaitedFlag') + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) self.addCleanup(client.close) # Force a connection. - client.admin.command('ping') + client.admin.command("ping") def hb_succeeded(event): return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) @@ -185,18 +195,17 @@ def hb_failed(event): return isinstance(event, monitoring.ServerHeartbeatFailedEvent) fail_heartbeat = { - 'mode': {'times': 2}, - 'data': { - 'failCommands': [HelloCompat.LEGACY_CMD, 'hello'], - 'closeConnection': True, - 'appName': 'heartbeatEventAwaitedFlag', + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", }, } with self.fail_point(fail_heartbeat): - wait_until(lambda: hb_listener.matching(hb_failed), - "published failed event") + wait_until(lambda: hb_listener.matching(hb_failed), "published failed event") # Reconnect. - client.admin.command('ping') + client.admin.command("ping") hb_succeeded_events = hb_listener.matching(hb_succeeded) hb_failed_events = hb_listener.matching(hb_failed) @@ -205,10 +214,12 @@ def hb_failed(event): # Depending on thread scheduling, the failed heartbeat could occur on # the second or third check. events = [type(e) for e in hb_listener.events[:4]] - if events == [monitoring.ServerHeartbeatStartedEvent, - monitoring.ServerHeartbeatSucceededEvent, - monitoring.ServerHeartbeatStartedEvent, - monitoring.ServerHeartbeatFailedEvent]: + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: self.assertFalse(hb_succeeded_events[1].awaited) else: self.assertTrue(hb_succeeded_events[1].awaited) diff --git a/test/test_threads.py b/test/test_threads.py index a3cde207a2..064008ee32 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -15,12 +15,8 @@ """Test that pymongo is thread safe.""" import threading - -from test import (client_context, - IntegrationTest, - unittest) -from test.utils import rs_or_single_client -from test.utils import joinall +from test import IntegrationTest, client_context, unittest +from test.utils import joinall, rs_or_single_client @client_context.require_connection @@ -29,7 +25,6 @@ def setUpModule(): class AutoAuthenticateThreads(threading.Thread): - def __init__(self, collection, num): threading.Thread.__init__(self) self.coll = collection @@ -39,14 +34,13 @@ def __init__(self, collection, num): def run(self): for i in range(self.num): - self.coll.insert_one({'num': i}) - self.coll.find_one({'num': i}) + self.coll.insert_one({"num": i}) + self.coll.find_one({"num": i}) self.success = True class SaveAndFind(threading.Thread): - def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection @@ -63,7 +57,6 @@ def run(self): class Insert(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection @@ -87,7 +80,6 @@ def run(self): class Update(threading.Thread): - def __init__(self, collection, n, expect_exception): threading.Thread.__init__(self) self.collection = collection @@ -100,8 +92,7 @@ def run(self): error = True try: - self.collection.update_one({"test": "unique"}, - {"$set": {"test": "update"}}) + self.collection.update_one({"test": "unique"}, {"$set": {"test": "update"}}) error = False except: if not self.expect_exception: diff --git a/test/test_timestamp.py b/test/test_timestamp.py index bb3358121c..3602fe2808 100644 --- a/test/test_timestamp.py +++ b/test/test_timestamp.py @@ -14,15 +14,17 @@ """Tests for the Timestamp class.""" -import datetime -import sys import copy +import datetime import pickle +import sys + sys.path[0:0] = [""] +from test import unittest + from bson.timestamp import Timestamp from bson.tz_util import utc -from test import unittest class TestTimestamp(unittest.TestCase): @@ -78,5 +80,6 @@ def test_repr(self): t = Timestamp(0, 0) self.assertEqual(repr(t), "Timestamp(0, 0)") + if __name__ == "__main__": unittest.main() diff --git a/test/test_topology.py b/test/test_topology.py index a309d622ab..d7bae9229f 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -18,27 +18,23 @@ sys.path[0:0] = [""] -from bson.objectid import ObjectId +from test import client_knobs, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, wait_until +from bson.objectid import ObjectId from pymongo import common -from pymongo.errors import (AutoReconnect, - ConfigurationError, - ConnectionFailure) +from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure from pymongo.hello import Hello, HelloCompat from pymongo.monitor import Monitor from pymongo.pool import PoolOptions from pymongo.read_preferences import ReadPreference, Secondary from pymongo.server_description import ServerDescription -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.settings import TopologySettings -from pymongo.topology import (_ErrorContext, - Topology) +from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE -from test import client_knobs, unittest -from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool, wait_until class SetNameDiscoverySettings(TopologySettings): @@ -46,20 +42,20 @@ def get_topology_type(self): return TOPOLOGY_TYPE.ReplicaSetNoPrimary -address = ('a', 27017) +address = ("a", 27017) def create_mock_topology( - seeds=None, - replica_set_name=None, - monitor_class=DummyMonitor, - direct_connection=False): - partitioned_seeds = list(map(common.partition_node, seeds or ['a'])) + seeds=None, replica_set_name=None, monitor_class=DummyMonitor, direct_connection=False +): + partitioned_seeds = list(map(common.partition_node, seeds or ["a"])) topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, pool_class=MockPool, - monitor_class=monitor_class, direct_connection=direct_connection) + monitor_class=monitor_class, + direct_connection=direct_connection, + ) t = Topology(topology_settings) t.open() @@ -67,8 +63,7 @@ def create_mock_topology( def got_hello(topology, server_address, hello_response): - server_description = ServerDescription( - server_address, Hello(hello_response), 0) + server_description = ServerDescription(server_address, Hello(hello_response), 0) topology.on_change(server_description) @@ -108,7 +103,7 @@ def test_timeout_configuration(self): t.open() # Get the default server. - server = t.get_server_by_address(('localhost', 27017)) + server = t.get_server_by_address(("localhost", 27017)) # The pool for application operations obeys our settings. self.assertEqual(1, server._pool.opts.connect_timeout) @@ -127,55 +122,53 @@ def test_timeout_configuration(self): class TestSingleServerTopology(TopologyTest): def test_direct_connection(self): for server_type, hello_response in [ - (SERVER_TYPE.RSPrimary, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.RSSecondary, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.Mongos, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'msg': 'isdbgrid', - 'maxWireVersion': 6}), - - (SERVER_TYPE.RSArbiter, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'arbiterOnly': True, - 'hosts': ['a'], - 'setName': 'rs', - 'maxWireVersion': 6}), - - (SERVER_TYPE.Standalone, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'maxWireVersion': 6}), - + ( + SERVER_TYPE.RSPrimary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + ( + SERVER_TYPE.RSSecondary, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + ( + SERVER_TYPE.Mongos, + {"ok": 1, HelloCompat.LEGACY_CMD: True, "msg": "isdbgrid", "maxWireVersion": 6}, + ), + ( + SERVER_TYPE.RSArbiter, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "arbiterOnly": True, + "hosts": ["a"], + "setName": "rs", + "maxWireVersion": 6, + }, + ), + (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: True, "maxWireVersion": 6}), # A "slave" in a master-slave deployment. # This replication type was removed in MongoDB # 4.0. - (SERVER_TYPE.Standalone, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'maxWireVersion': 6}), + (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: False, "maxWireVersion": 6}), ]: t = create_mock_topology(direct_connection=True) # Can't select a server while the only server is of type Unknown. - with self.assertRaisesRegex(ConnectionFailure, - 'No servers found yet'): - t.select_servers(any_server_selector, - server_selection_timeout=0) + with self.assertRaisesRegex(ConnectionFailure, "No servers found yet"): + t.select_servers(any_server_selector, server_selection_timeout=0) got_hello(t, address, hello_response) @@ -189,12 +182,13 @@ def test_direct_connection(self): # Topology type single is always readable and writable regardless # of server type or state. - self.assertEqual(t.description.topology_type_name, 'Single') + self.assertEqual(t.description.topology_type_name, "Single") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) self.assertTrue(t.description.has_readable_server(Secondary())) - self.assertTrue(t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'does-not-exist'}]))) + self.assertTrue( + t.description.has_readable_server(Secondary(tag_sets=[{"tag": "does-not-exist"}])) + ) def test_reopen(self): t = create_mock_topology() @@ -206,7 +200,7 @@ def test_reopen(self): def test_unavailable_seed(self): t = create_mock_topology() disconnected(t, address) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) def test_round_trip_time(self): round_trip_time = 125 @@ -215,10 +209,9 @@ def test_round_trip_time(self): class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: - return (Hello({'ok': 1, 'maxWireVersion': 6}), - round_trip_time) + return (Hello({"ok": 1, "maxWireVersion": 6}), round_trip_time) else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) @@ -237,14 +230,13 @@ def _check_with_socket(self, *args, **kwargs): def raises_err(): try: - t.select_server(writable_server_selector, - server_selection_timeout=0.1) + t.select_server(writable_server_selector, server_selection_timeout=0.1) except ConnectionFailure: return True else: return False - wait_until(raises_err, 'discover server is down') + wait_until(raises_err, "discover server is down") self.assertIsNone(s.description.round_trip_time) # Bring it back, RTT is now 20 milliseconds. @@ -254,8 +246,10 @@ def raises_err(): def new_average(): # We reset the average to the most recent measurement. description = s.description - return (description.round_trip_time is not None - and round(abs(20 - description.round_trip_time), 7) == 0) + return ( + description.round_trip_time is not None + and round(abs(20 - description.round_trip_time), 7) == 0 + ) tries = 0 while not new_average(): @@ -267,275 +261,289 @@ def new_average(): class TestMultiServerTopology(TopologyTest): def test_readable_writable(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) - self.assertEqual( - t.description.topology_type_name, 'ReplicaSetWithPrimary') + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertFalse( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) - - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': False, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": False, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) - self.assertEqual( - t.description.topology_type_name, 'ReplicaSetNoPrimary') + self.assertEqual(t.description.topology_type_name, "ReplicaSetNoPrimary") self.assertFalse(t.description.has_writable_server()) self.assertFalse(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertFalse( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) - - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'tags': {'tag': 'exists'}}) - - self.assertEqual( - t.description.topology_type_name, 'ReplicaSetWithPrimary') + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertFalse(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) + + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "tags": {"tag": "exists"}, + }, + ) + + self.assertEqual(t.description.topology_type_name, "ReplicaSetWithPrimary") self.assertTrue(t.description.has_writable_server()) self.assertTrue(t.description.has_readable_server()) - self.assertTrue( - t.description.has_readable_server(Secondary())) - self.assertTrue( - t.description.has_readable_server( - Secondary(tag_sets=[{'tag': 'exists'}]))) + self.assertTrue(t.description.has_readable_server(Secondary())) + self.assertTrue(t.description.has_readable_server(Secondary(tag_sets=[{"tag": "exists"}]))) def test_close(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertTrue(get_monitor(t, 'a').opened) - self.assertTrue(get_monitor(t, 'b').opened) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertTrue(get_monitor(t, "a").opened) + self.assertTrue(get_monitor(t, "b").opened) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) t.close() self.assertEqual(2, len(t.description.server_descriptions())) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertFalse(get_monitor(t, 'a').opened) - self.assertFalse(get_monitor(t, 'b').opened) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) # A closed topology should not be updated when receiving a hello. - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b', 'c']}) + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b", "c"]}, + ) self.assertEqual(2, len(t.description.server_descriptions())) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertFalse(get_monitor(t, 'a').opened) - self.assertFalse(get_monitor(t, 'b').opened) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertFalse(get_monitor(t, "a").opened) + self.assertFalse(get_monitor(t, "b").opened) # Server c should not have been added. - self.assertEqual(None, get_server(t, 'c')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) + self.assertEqual(None, get_server(t, "c")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) def test_handle_error(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True, None) - t.handle_error(('a', 27017), errctx) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, - t.description.topology_type) - - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b']}) - - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) - - t.handle_error(('b', 27017), errctx) - self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a')) - self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b')) - self.assertEqual('rs', t.description.replica_set_name) - self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, - t.description.topology_type) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + }, + ) + + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("a", 27017), errctx) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary, t.description.topology_type) + + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"]}, + ) + + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) + + t.handle_error(("b", 27017), errctx) + self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, "a")) + self.assertEqual(SERVER_TYPE.Unknown, get_type(t, "b")) + self.assertEqual("rs", t.description.replica_set_name) + self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary, t.description.topology_type) def test_handle_error_removed_server(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") # No error resetting a server not in the TopologyDescription. - errctx = _ErrorContext(AutoReconnect('mock'), 0, 0, True, None) - t.handle_error(('b', 27017), errctx) + errctx = _ErrorContext(AutoReconnect("mock"), 0, 0, True, None) + t.handle_error(("b", 27017), errctx) # Server was *not* added as type Unknown. - self.assertFalse(t.has_server(('b', 27017))) + self.assertFalse(t.has_server(("b", 27017))) def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], - pool_class=MockPool, - monitor_class=DummyMonitor) + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + ) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetWithPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) # Another response from the primary. Tests the code that processes # primary response when topology type is already ReplicaSetWithPrimary. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) # No change. - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetWithPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetWithPrimary) def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], - pool_class=MockPool, - monitor_class=DummyMonitor) + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + ) t = Topology(topology_settings) self.assertEqual(t.description.replica_set_name, None) - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) t.open() - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) - self.assertEqual(t.description.replica_set_name, 'rs') - self.assertEqual(t.description.topology_type, - TOPOLOGY_TYPE.ReplicaSetNoPrimary) + self.assertEqual(t.description.replica_set_name, "rs") + self.assertEqual(t.description.topology_type, TOPOLOGY_TYPE.ReplicaSetNoPrimary) def test_wire_version(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") t.description.check_compatible() # No error. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) # Use defaults. server = t.get_server_by_address(address) self.assertEqual(server.description.min_wire_version, 0) self.assertEqual(server.description.max_wire_version, 0) - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 1, - 'maxWireVersion': 6}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 1, + "maxWireVersion": 6, + }, + ) self.assertEqual(server.description.min_wire_version, 1) self.assertEqual(server.description.max_wire_version, 6) t.select_servers(any_server_selector) # Incompatible. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 21, - 'maxWireVersion': 22}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 21, + "maxWireVersion": 22, + }, + ) try: t.select_servers(any_server_selector) @@ -544,19 +552,24 @@ def test_wire_version(self): self.assertEqual( str(e), "Server at a:27017 requires wire version 21, but this version " - "of PyMongo only supports up to %d." - % (common.MAX_SUPPORTED_WIRE_VERSION,)) + "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), + ) else: - self.fail('No error with incompatible wire version') + self.fail("No error with incompatible wire version") # Incompatible. - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a'], - 'minWireVersion': 0, - 'maxWireVersion': 0}) + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a"], + "minWireVersion": 0, + "maxWireVersion": 0, + }, + ) try: t.select_servers(any_server_selector) @@ -566,57 +579,72 @@ def test_wire_version(self): str(e), "Server at a:27017 reports wire version 0, but this version " "of PyMongo requires at least %d (MongoDB %s)." - % (common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION)) + % (common.MIN_SUPPORTED_WIRE_VERSION, common.MIN_SUPPORTED_SERVER_VERSION), + ) else: - self.fail('No error with incompatible wire version') + self.fail("No error with incompatible wire version") def test_max_write_batch_size(self): - t = create_mock_topology(seeds=['a', 'b'], replica_set_name='rs') + t = create_mock_topology(seeds=["a", "b"], replica_set_name="rs") def write_batch_size(): s = t.select_server(writable_server_selector) return s.description.max_write_batch_size - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 1}) - - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 2}) + got_hello( + t, + ("a", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 1, + }, + ) + + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 2, + }, + ) # Uses primary's max batch size. self.assertEqual(1, write_batch_size()) # b becomes primary. - got_hello(t, ('b', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'b'], - 'maxWireVersion': 6, - 'maxWriteBatchSize': 2}) + got_hello( + t, + ("b", 27017), + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "setName": "rs", + "hosts": ["a", "b"], + "maxWireVersion": 6, + "maxWriteBatchSize": 2, + }, + ) self.assertEqual(2, write_batch_size()) def test_topology_repr(self): - t = create_mock_topology(replica_set_name='rs') + t = create_mock_topology(replica_set_name="rs") self.addCleanup(t.close) - got_hello(t, ('a', 27017), { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a', 'c', 'b']}) + got_hello( + t, + ("a", 27017), + {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "c", "b"]}, + ) self.assertEqual( repr(t.description), ", " "]>" % (t._topology_id,)) + " rtt: None>]>" % (t._topology_id,), + ) def test_unexpected_load_balancer(self): # Note: This behavior should not be reachable in practice but we # should handle it gracefully nonetheless. See PYTHON-2791. # Load balancers are included in topology with a single seed. - t = create_mock_topology(seeds=['a']) - mock_lb_response = {'ok': 1, 'msg': 'isdbgrid', - 'serviceId': ObjectId(), 'maxWireVersion': 13} - got_hello(t, ('a', 27017), mock_lb_response) + t = create_mock_topology(seeds=["a"]) + mock_lb_response = { + "ok": 1, + "msg": "isdbgrid", + "serviceId": ObjectId(), + "maxWireVersion": 13, + } + got_hello(t, ("a", 27017), mock_lb_response) sds = t.description.server_descriptions() - self.assertIn(('a', 27017), sds) - self.assertEqual(sds[('a', 27017)].server_type_name, 'LoadBalancer') - self.assertEqual(t.description.topology_type_name, 'Single') + self.assertIn(("a", 27017), sds) + self.assertEqual(sds[("a", 27017)].server_type_name, "LoadBalancer") + self.assertEqual(t.description.topology_type_name, "Single") self.assertTrue(t.description.has_writable_server()) # Load balancers are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) - got_hello(t, ('a', 27017), mock_lb_response) - self.assertNotIn(('a', 27017), t.description.server_descriptions()) - self.assertEqual(t.description.topology_type_name, 'Unknown') + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), mock_lb_response) + self.assertNotIn(("a", 27017), t.description.server_descriptions()) + self.assertEqual(t.description.topology_type_name, "Unknown") def wait_for_primary(topology): @@ -663,7 +696,7 @@ def get_primary(): except ConnectionFailure: return None - return wait_until(get_primary, 'find primary') + return wait_until(get_primary, "find primary") class TestTopologyErrors(TopologyTest): @@ -677,9 +710,9 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): hello_count[0] += 1 if hello_count[0] == 1: - return Hello({'ok': 1, 'maxWireVersion': 6}), 0 + return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect('mock monitor error') + raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) @@ -699,17 +732,15 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): hello_count[0] += 1 if hello_count[0] in (1, 3): - return Hello({'ok': 1, 'maxWireVersion': 6}), 0 + return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect( - 'mock monitor error #%s' % (hello_count[0],)) + raise AutoReconnect("mock monitor error #%s" % (hello_count[0],)) t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) server = wait_for_primary(t) self.assertEqual(1, hello_count[0]) - self.assertEqual(SERVER_TYPE.Standalone, - server.description.server_type) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) # Second hello call, server is marked Unknown, then the monitor # immediately runs a retry (third hello). @@ -718,12 +749,11 @@ def _check_with_socket(self, *args, **kwargs): # after the failed check triggered by request_check_all. Wait until # the server becomes known again. server = t.select_server(writable_server_selector, 0.250) - self.assertEqual(SERVER_TYPE.Standalone, - server.description.server_type) + self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) self.assertEqual(3, hello_count[0]) def test_internal_monitor_error(self): - exception = AssertionError('internal error') + exception = AssertionError("internal error") class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): @@ -731,9 +761,8 @@ def _check_with_socket(self, *args, **kwargs): t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) - with self.assertRaisesRegex(ConnectionFailure, 'internal error'): - t.select_server(any_server_selector, - server_selection_timeout=0.5) + with self.assertRaisesRegex(ConnectionFailure, "internal error"): + t.select_server(any_server_selector, server_selection_timeout=0.5) class TestServerSelectionErrors(TopologyTest): @@ -744,69 +773,80 @@ def assertMessage(self, message, topology, selector=any_server_selector): self.assertIn(message, str(context.exception)) def test_no_primary(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'rs', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "rs", + "hosts": ["a"], + }, + ) - self.assertMessage('No replica set members match selector "Primary()"', - t, ReadPreference.PRIMARY) + self.assertMessage( + 'No replica set members match selector "Primary()"', t, ReadPreference.PRIMARY + ) - self.assertMessage('No primary available for writes', - t, writable_server_selector) + self.assertMessage("No primary available for writes", t, writable_server_selector) def test_no_secondary(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: True, - 'setName': 'rs', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, address, {"ok": 1, HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"]} + ) self.assertMessage( - 'No replica set members match selector' + "No replica set members match selector" ' "Secondary(tag_sets=None, max_staleness=-1, hedge=None)"', - t, ReadPreference.SECONDARY) + t, + ReadPreference.SECONDARY, + ) self.assertMessage( "No replica set members match selector" " \"Secondary(tag_sets=[{'dc': 'ny'}], max_staleness=-1, " - "hedge=None)\"", - t, Secondary(tag_sets=[{'dc': 'ny'}])) + 'hedge=None)"', + t, + Secondary(tag_sets=[{"dc": "ny"}]), + ) def test_bad_replica_set_name(self): - t = create_mock_topology(replica_set_name='rs') - got_hello(t, address, { - 'ok': 1, - HelloCompat.LEGACY_CMD: False, - 'secondary': True, - 'setName': 'wrong', - 'hosts': ['a']}) + t = create_mock_topology(replica_set_name="rs") + got_hello( + t, + address, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "secondary": True, + "setName": "wrong", + "hosts": ["a"], + }, + ) - self.assertMessage( - 'No replica set members available for replica set name "rs"', t) + self.assertMessage('No replica set members available for replica set name "rs"', t) def test_multiple_standalones(self): # Standalones are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) - got_hello(t, ('a', 27017), {'ok': 1}) - got_hello(t, ('b', 27017), {'ok': 1}) - self.assertMessage('No servers available', t) + t = create_mock_topology(seeds=["a", "b"]) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No servers available", t) def test_no_mongoses(self): # Standalones are removed from a topology with multiple seeds. - t = create_mock_topology(seeds=['a', 'b']) + t = create_mock_topology(seeds=["a", "b"]) # Discover a mongos and change topology type to Sharded. - got_hello(t, ('a', 27017), {'ok': 1, 'msg': 'isdbgrid'}) + got_hello(t, ("a", 27017), {"ok": 1, "msg": "isdbgrid"}) # Oops, both servers are standalone now. Remove them. - got_hello(t, ('a', 27017), {'ok': 1}) - got_hello(t, ('b', 27017), {'ok': 1}) - self.assertMessage('No mongoses available', t) + got_hello(t, ("a", 27017), {"ok": 1}) + got_hello(t, ("b", 27017), {"ok": 1}) + self.assertMessage("No mongoses available", t) if __name__ == "__main__": diff --git a/test/test_transactions.py b/test/test_transactions.py index 169a2ad03d..34dbbba34b 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -16,35 +16,38 @@ import os import sys - from io import BytesIO sys.path[0:0] = [""] -from pymongo import client_session, WriteConcern +from test import client_context, unittest +from test.utils import ( + OvertCommandListener, + TestCreator, + rs_client, + single_client, + wait_until, +) +from test.utils_spec_runner import SpecRunner + +from gridfs import GridFS, GridFSBucket +from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions -from pymongo.errors import (CollectionInvalid, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure) +from pymongo.errors import ( + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, +) from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from gridfs import GridFS, GridFSBucket - -from test import unittest, client_context -from test.utils import (rs_client, single_client, - wait_until, OvertCommandListener, - TestCreator) -from test.utils_spec_runner import SpecRunner - # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions', 'legacy') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "legacy") -_TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG') +_TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, @@ -59,7 +62,7 @@ def setUpClass(cls): super(TransactionsBase, cls).setUpClass() if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(single_client('%s:%s' % address)) + cls.mongos_clients.append(single_client("%s:%s" % address)) @classmethod def tearDownClass(cls): @@ -69,14 +72,17 @@ def tearDownClass(cls): def maybe_skip_scenario(self, test): super(TransactionsBase, self).maybe_skip_scenario(test) - if ('secondary' in self.id() and - not client_context.is_mongos and - not client_context.has_secondaries): - raise unittest.SkipTest('No secondaries') + if ( + "secondary" in self.id() + and not client_context.is_mongos + and not client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") class TestTransactions(TransactionsBase): RUN_ON_SERVERLESS = True + @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() @@ -85,23 +91,23 @@ def test_transaction_options_validation(self): self.assertIsNone(default_options.read_preference) self.assertIsNone(default_options.max_commit_time_ms) # No error when valid options are provided. - TransactionOptions(read_concern=ReadConcern(), - write_concern=WriteConcern(), - read_preference=ReadPreference.PRIMARY, - max_commit_time_ms=10000) + TransactionOptions( + read_concern=ReadConcern(), + write_concern=WriteConcern(), + read_preference=ReadPreference.PRIMARY, + max_commit_time_ms=10000, + ) with self.assertRaisesRegex(TypeError, "read_concern must be "): TransactionOptions(read_concern={}) # type: ignore with self.assertRaisesRegex(TypeError, "write_concern must be "): TransactionOptions(write_concern={}) # type: ignore with self.assertRaisesRegex( - ConfigurationError, - "transactions do not support unacknowledged write concern"): + ConfigurationError, "transactions do not support unacknowledged write concern" + ): TransactionOptions(write_concern=WriteConcern(w=0)) - with self.assertRaisesRegex( - TypeError, "is not valid for read_preference"): + with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): TransactionOptions(read_preference={}) # type: ignore - with self.assertRaisesRegex( - TypeError, "max_commit_time_ms must be an integer or None"): + with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): TransactionOptions(max_commit_time_ms="10000") # type: ignore @client_context.require_transactions @@ -115,16 +121,11 @@ def test_transaction_write_concern_override(self): with client.start_session() as s: with s.start_transaction(write_concern=WriteConcern(w=1)): self.assertTrue(coll.insert_one({}, session=s).acknowledged) - self.assertTrue(coll.insert_many( - [{}, {}], session=s).acknowledged) - self.assertTrue(coll.bulk_write( - [InsertOne({})], session=s).acknowledged) - self.assertTrue(coll.replace_one( - {}, {}, session=s).acknowledged) - self.assertTrue(coll.update_one( - {}, {"$set": {"a": 1}}, session=s).acknowledged) - self.assertTrue(coll.update_many( - {}, {"$set": {"a": 1}}, session=s).acknowledged) + self.assertTrue(coll.insert_many([{}, {}], session=s).acknowledged) + self.assertTrue(coll.bulk_write([InsertOne({})], session=s).acknowledged) + self.assertTrue(coll.replace_one({}, {}, session=s).acknowledged) + self.assertTrue(coll.update_one({}, {"$set": {"a": 1}}, session=s).acknowledged) + self.assertTrue(coll.update_many({}, {"$set": {"a": 1}}, session=s).acknowledged) self.assertTrue(coll.delete_one({}, session=s).acknowledged) self.assertTrue(coll.delete_many({}, session=s).acknowledged) coll.find_one_and_delete({}, session=s) @@ -133,27 +134,29 @@ def test_transaction_write_concern_override(self): unsupported_txn_writes: list = [ (client.drop_database, [db.name], {}), - (db.drop_collection, ['collection'], {}), + (db.drop_collection, ["collection"], {}), (coll.drop, [], {}), - (coll.rename, ['collection2'], {}), + (coll.rename, ["collection2"], {}), # Drop collection2 between tests of "rename", above. - (coll.database.drop_collection, ['collection2'], {}), - (coll.create_indexes, [[IndexModel('a')]], {}), - (coll.create_index, ['a'], {}), - (coll.drop_index, ['a_1'], {}), + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), (coll.drop_indexes, [], {}), (coll.aggregate, [[{"$out": "aggout"}]], {}), ] # Creating a collection in a transaction requires MongoDB 4.4+. if client_context.version < (4, 3, 4): - unsupported_txn_writes.extend([ - (db.create_collection, ['collection'], {}), - ]) + unsupported_txn_writes.extend( + [ + (db.create_collection, ["collection"], {}), + ] + ) for op in unsupported_txn_writes: op, args, kwargs = op with client.start_session() as s: - kwargs['session'] = s + kwargs["session"] = s s.start_transaction(write_concern=WriteConcern(w=1)) with self.assertRaises(OperationFailure): op(*args, **kwargs) @@ -164,8 +167,7 @@ def test_transaction_write_concern_override(self): def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), - localThresholdMS=1000) + client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. @@ -193,8 +195,7 @@ def test_unpin_for_next_transaction(self): def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), - localThresholdMS=1000) + client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. @@ -255,46 +256,71 @@ def gridfs_find(*args, **kwargs): return gfs.find(*args, **kwargs).next() def gridfs_open_upload_stream(*args, **kwargs): - bucket.open_upload_stream(*args, **kwargs).write(b'1') + bucket.open_upload_stream(*args, **kwargs).write(b"1") gridfs_ops = [ - (gfs.put, (b'123',)), + (gfs.put, (b"123",)), (gfs.get, (1,)), - (gfs.get_version, ('name',)), - (gfs.get_last_version, ('name',)), - (gfs.delete, (1, )), + (gfs.get_version, ("name",)), + (gfs.get_last_version, ("name",)), + (gfs.delete, (1,)), (gfs.list, ()), (gfs.find_one, ()), (gridfs_find, ()), (gfs.exists, ()), - (gridfs_open_upload_stream, ('name',)), - (bucket.upload_from_stream, ('name', b'data',)), - (bucket.download_to_stream, (1, BytesIO(),)), - (bucket.download_to_stream_by_name, ('name', BytesIO(),)), + (gridfs_open_upload_stream, ("name",)), + ( + bucket.upload_from_stream, + ( + "name", + b"data", + ), + ), + ( + bucket.download_to_stream, + ( + 1, + BytesIO(), + ), + ), + ( + bucket.download_to_stream_by_name, + ( + "name", + BytesIO(), + ), + ), (bucket.delete, (1,)), (bucket.find, ()), (bucket.open_download_stream, (1,)), - (bucket.open_download_stream_by_name, ('name',)), - (bucket.rename, (1, 'new-name',)), + (bucket.open_download_stream_by_name, ("name",)), + ( + bucket.rename, + ( + 1, + "new-name", + ), + ), ] with client.start_session() as s, s.start_transaction(): for op, args in gridfs_ops: with self.assertRaisesRegex( - InvalidOperation, - 'GridFS does not support multi-document transactions', + InvalidOperation, + "GridFS does not support multi-document transactions", ): op(*args, session=s) # type: ignore # Require 4.2+ for large (16MB+) transactions. @client_context.require_version_min(4, 2) @client_context.require_transactions - @unittest.skipIf(sys.platform == 'win32', - 'Our Windows machines are too slow to pass this test') + @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") def test_transaction_starts_with_batched_write(self): - if 'PyPy' in sys.version and client_context.tls: - self.skipTest('PYTHON-2937 PyPy is so slow sending large ' - 'messages over TLS that this test fails') + if "PyPy" in sys.version and client_context.tls: + self.skipTest( + "PYTHON-2937 PyPy is so slow sending large " + "messages over TLS that this test fails" + ) # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() @@ -304,27 +330,29 @@ def test_transaction_starts_with_batched_write(self): listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) - large_str = '\0'*(10*1024*1024) - ops = [InsertOne({'a': large_str}) for _ in range(10)] + large_str = "\0" * (10 * 1024 * 1024) + ops = [InsertOne({"a": large_str}) for _ in range(10)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) # Assert commands were constructed properly. - self.assertEqual(['insert', 'insert', 'insert', 'commitTransaction'], - listener.started_command_names()) - first_cmd = listener.results['started'][0].command - self.assertTrue(first_cmd['startTransaction']) - lsid = first_cmd['lsid'] - txn_number = first_cmd['txnNumber'] - for event in listener.results['started'][1:]: - self.assertNotIn('startTransaction', event.command) - self.assertEqual(lsid, event.command['lsid']) - self.assertEqual(txn_number, event.command['txnNumber']) + self.assertEqual( + ["insert", "insert", "insert", "commitTransaction"], listener.started_command_names() + ) + first_cmd = listener.results["started"][0].command + self.assertTrue(first_cmd["startTransaction"]) + lsid = first_cmd["lsid"] + txn_number = first_cmd["txnNumber"] + for event in listener.results["started"][1:]: + self.assertNotIn("startTransaction", event.command) + self.assertEqual(lsid, event.command["lsid"]) + self.assertEqual(txn_number, event.command["txnNumber"]) self.assertEqual(10, coll.count_documents({})) class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" + def __init__(self, mock_timeout): self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT self.mock_timeout = mock_timeout @@ -338,15 +366,18 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(TransactionsBase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), - 'transactions-convenient-api') + TEST_PATH = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api" + ) @client_context.require_transactions def test_callback_raises_custom_error(self): - class _MyException(Exception):pass + class _MyException(Exception): + pass def raise_error(_): raise _MyException() + with self.client.start_session() as s: with self.assertRaises(_MyException): s.with_transaction(raise_error) @@ -354,17 +385,19 @@ def raise_error(_): @client_context.require_transactions def test_callback_returns_value(self): def callback(_): - return 'Foo' + return "Foo" + with self.client.start_session() as s: - self.assertEqual(s.with_transaction(callback), 'Foo') + self.assertEqual(s.with_transaction(callback), "Foo") self.db.test.insert_one({}) def callback2(session): self.db.test.insert_one({}, session=session) - return 'Foo' + return "Foo" + with self.client.start_session() as s: - self.assertEqual(s.with_transaction(callback2), 'Foo') + self.assertEqual(s.with_transaction(callback2), "Foo") @client_context.require_transactions def test_callback_not_retried_after_timeout(self): @@ -376,13 +409,13 @@ def test_callback_not_retried_after_timeout(self): def callback(session): coll.insert_one({}, session=session) err: dict = { - 'ok': 0, - 'errmsg': 'Transaction 7819 has been aborted.', - 'code': 251, - 'codeName': 'NoSuchTransaction', - 'errorLabels': ['TransientTransactionError'], + "ok": 0, + "errmsg": "Transaction 7819 has been aborted.", + "code": 251, + "codeName": "NoSuchTransaction", + "errorLabels": ["TransientTransactionError"], } - raise OperationFailure(err['errmsg'], err['code'], err) + raise OperationFailure(err["errmsg"], err["code"], err) # Create the collection. coll.insert_one({}) @@ -392,8 +425,7 @@ def callback(session): with self.assertRaises(OperationFailure): s.with_transaction(callback) - self.assertEqual(listener.started_command_names(), - ['insert', 'abortTransaction']) + self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) @client_context.require_test_commands @client_context.require_transactions @@ -408,14 +440,17 @@ def callback(session): # Create the collection. coll.insert_one({}) - self.set_fail_point({ - 'configureFailPoint': 'failCommand', 'mode': {'times': 1}, - 'data': { - 'failCommands': ['commitTransaction'], - 'errorCode': 251, # NoSuchTransaction - }}) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': 'failCommand', 'mode': 'off'}) + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["commitTransaction"], + "errorCode": 251, # NoSuchTransaction + }, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) listener.results.clear() with client.start_session() as s: @@ -423,8 +458,7 @@ def callback(session): with self.assertRaises(OperationFailure): s.with_transaction(callback) - self.assertEqual(listener.started_command_names(), - ['insert', 'commitTransaction']) + self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) @client_context.require_test_commands @client_context.require_transactions @@ -439,13 +473,14 @@ def callback(session): # Create the collection. coll.insert_one({}) - self.set_fail_point({ - 'configureFailPoint': 'failCommand', 'mode': {'times': 2}, - 'data': { - 'failCommands': ['commitTransaction'], - 'closeConnection': True}}) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': 'failCommand', 'mode': 'off'}) + self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, + } + ) + self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) listener.results.clear() with client.start_session() as s: @@ -455,8 +490,9 @@ def callback(session): # One insert for the callback and two commits (includes the automatic # retry). - self.assertEqual(listener.started_command_names(), - ['insert', 'commitTransaction', 'commitTransaction']) + self.assertEqual( + listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] + ) # Tested here because this supports Motor's convenient transactions API. @client_context.require_transactions @@ -489,6 +525,7 @@ def test_in_transaction_property(self): # Using a callback def callback(session): self.assertTrue(session.in_transaction) + with client.start_session() as s: self.assertFalse(s.in_transaction) s.with_transaction(callback) @@ -508,8 +545,9 @@ def run_scenario(self): test_creator.create_tests() -TestCreator(create_test, TestTransactionsConvenientAPI, - TestTransactionsConvenientAPI.TEST_PATH).create_tests() +TestCreator( + create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH +).create_tests() if __name__ == "__main__": diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py index 37e8d06153..4f3aa233fa 100644 --- a/test/test_transactions_unified.py +++ b/test/test_transactions_unified.py @@ -23,8 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'transactions', 'unified') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_unified_format.py b/test/test_unified_format.py index 74770b6f3a..e36959a224 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -17,35 +17,39 @@ sys.path[0:0] = [""] -from bson import ObjectId - from test import unittest -from test.unified_format import generate_test_classes, MatchEvaluatorUtil +from test.unified_format import MatchEvaluatorUtil, generate_test_classes +from bson import ObjectId -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'unified-test-format') +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unified-test-format") -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'valid-pass'), - module=__name__, - class_name_prefix='UnifiedTestFormat', - expected_failures=[ - 'Client side error in command starting transaction', # PYTHON-1894 - ], - RUN_ON_SERVERLESS=False)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + RUN_ON_SERVERLESS=False, + ) +) -globals().update(generate_test_classes( - os.path.join(_TEST_PATH, 'valid-fail'), - module=__name__, - class_name_prefix='UnifiedTestFormat', - bypass_test_generation_errors=True, - expected_failures=[ - '.*', # All tests expected to fail - ], - RUN_ON_SERVERLESS=False)) +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + RUN_ON_SERVERLESS=False, + ) +) class TestMatchEvaluatorUtil(unittest.TestCase): @@ -53,22 +57,27 @@ def setUp(self): self.match_evaluator = MatchEvaluatorUtil(self) def test_unsetOrMatches(self): - spec = {'$$unsetOrMatches': {'y': {'$$unsetOrMatches': 2}}} - for actual in [{}, {'y': 2}, None]: + spec = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: self.match_evaluator.match_result(spec, actual) - spec = {'x': {'$$unsetOrMatches': {'y': {'$$unsetOrMatches': 2}}}} - for actual in [{}, {'x': {}}, {'x': {'y': 2}}]: + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: self.match_evaluator.match_result(spec, actual) def test_type(self): self.match_evaluator.match_result( - {'operationType': 'insert', - 'ns': {'db': 'change-stream-tests', 'coll': 'test'}, - 'fullDocument': {'_id': {'$$type': 'objectId'}, 'x': 1}}, - {'operationType': 'insert', - 'fullDocument': {'_id': ObjectId('5fc93511ac93941052098f0c'), 'x': 1}, - 'ns': {'db': 'change-stream-tests', 'coll': 'test'}}) + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) if __name__ == "__main__": diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index ed5291d716..cfe21169fd 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -21,444 +21,431 @@ sys.path[0:0] = [""] +from test import unittest + from bson.binary import JAVA_LEGACY from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.uri_parser import (parse_userinfo, - split_hosts, - split_options, - parse_uri) -from test import unittest +from pymongo.uri_parser import parse_uri, parse_userinfo, split_hosts, split_options class TestURI(unittest.TestCase): - def test_validate_userinfo(self): - self.assertRaises(InvalidURI, parse_userinfo, - 'foo@') - self.assertRaises(InvalidURI, parse_userinfo, - ':password') - self.assertRaises(InvalidURI, parse_userinfo, - 'fo::o:p@ssword') - self.assertRaises(InvalidURI, parse_userinfo, ':') - self.assertTrue(parse_userinfo('user:password')) - self.assertEqual(('us:r', 'p@ssword'), - parse_userinfo('us%3Ar:p%40ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us+er:p+ssword')) - self.assertEqual(('us er', 'p ssword'), - parse_userinfo('us%20er:p%20ssword')) - self.assertEqual(('us+er', 'p+ssword'), - parse_userinfo('us%2Ber:p%2Bssword')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM')) - self.assertEqual(('dev1@FOO.COM', ''), - parse_userinfo('dev1%40FOO.COM:')) + self.assertRaises(InvalidURI, parse_userinfo, "foo@") + self.assertRaises(InvalidURI, parse_userinfo, ":password") + self.assertRaises(InvalidURI, parse_userinfo, "fo::o:p@ssword") + self.assertRaises(InvalidURI, parse_userinfo, ":") + self.assertTrue(parse_userinfo("user:password")) + self.assertEqual(("us:r", "p@ssword"), parse_userinfo("us%3Ar:p%40ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us+er:p+ssword")) + self.assertEqual(("us er", "p ssword"), parse_userinfo("us%20er:p%20ssword")) + self.assertEqual(("us+er", "p+ssword"), parse_userinfo("us%2Ber:p%2Bssword")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM")) + self.assertEqual(("dev1@FOO.COM", ""), parse_userinfo("dev1%40FOO.COM:")) def test_split_hosts(self): - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,') - self.assertRaises(ConfigurationError, split_hosts, - ',localhost:27017') - self.assertRaises(ConfigurationError, split_hosts, - 'localhost:27017,,localhost:27018') - self.assertEqual([('localhost', 27017), ('example.com', 27017)], - split_hosts('localhost,example.com')) - self.assertEqual([('localhost', 27018), ('example.com', 27019)], - split_hosts('localhost:27018,example.com:27019')) - self.assertEqual([('/tmp/mongodb-27017.sock', None)], - split_hosts('/tmp/mongodb-27017.sock')) - self.assertEqual([('/tmp/mongodb-27017.sock', None), - ('example.com', 27017)], - split_hosts('/tmp/mongodb-27017.sock,' - 'example.com:27017')) - self.assertEqual([('example.com', 27017), - ('/tmp/mongodb-27017.sock', None)], - split_hosts('example.com:27017,' - '/tmp/mongodb-27017.sock')) - self.assertRaises(ValueError, split_hosts, '::1', 27017) - self.assertRaises(ValueError, split_hosts, '[::1:27017') - self.assertRaises(ValueError, split_hosts, '::1') - self.assertRaises(ValueError, split_hosts, '::1]:27017') - self.assertEqual([('::1', 27017)], split_hosts('[::1]:27017')) - self.assertEqual([('::1', 27017)], split_hosts('[::1]')) + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,") + self.assertRaises(ConfigurationError, split_hosts, ",localhost:27017") + self.assertRaises(ConfigurationError, split_hosts, "localhost:27017,,localhost:27018") + self.assertEqual( + [("localhost", 27017), ("example.com", 27017)], split_hosts("localhost,example.com") + ) + self.assertEqual( + [("localhost", 27018), ("example.com", 27019)], + split_hosts("localhost:27018,example.com:27019"), + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None)], split_hosts("/tmp/mongodb-27017.sock") + ) + self.assertEqual( + [("/tmp/mongodb-27017.sock", None), ("example.com", 27017)], + split_hosts("/tmp/mongodb-27017.sock," "example.com:27017"), + ) + self.assertEqual( + [("example.com", 27017), ("/tmp/mongodb-27017.sock", None)], + split_hosts("example.com:27017," "/tmp/mongodb-27017.sock"), + ) + self.assertRaises(ValueError, split_hosts, "::1", 27017) + self.assertRaises(ValueError, split_hosts, "[::1:27017") + self.assertRaises(ValueError, split_hosts, "::1") + self.assertRaises(ValueError, split_hosts, "::1]:27017") + self.assertEqual([("::1", 27017)], split_hosts("[::1]:27017")) + self.assertEqual([("::1", 27017)], split_hosts("[::1]")) def test_split_options(self): - self.assertRaises(ConfigurationError, split_options, 'foo') - self.assertRaises(ConfigurationError, split_options, 'foo=bar;foo') - self.assertTrue(split_options('ssl=true')) - self.assertTrue(split_options('connect=true')) - self.assertTrue(split_options('tlsAllowInvalidHostnames=false')) + self.assertRaises(ConfigurationError, split_options, "foo") + self.assertRaises(ConfigurationError, split_options, "foo=bar;foo") + self.assertTrue(split_options("ssl=true")) + self.assertTrue(split_options("connect=true")) + self.assertTrue(split_options("tlsAllowInvalidHostnames=false")) # Test Invalid URI options that should throw warnings. with warnings.catch_warnings(): - warnings.filterwarnings('error') - self.assertRaises(Warning, split_options, - 'foo=bar', warn=True) - self.assertRaises(Warning, split_options, - 'socketTimeoutMS=foo', warn=True) - self.assertRaises(Warning, split_options, - 'socketTimeoutMS=0.0', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=0.0', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=1e100000', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=-1e100000', warn=True) - self.assertRaises(Warning, split_options, - 'ssl=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connect=foo', warn=True) - self.assertRaises(Warning, split_options, - 'tlsAllowInvalidHostnames=foo', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=inf', warn=True) - self.assertRaises(Warning, split_options, - 'connectTimeoutMS=-inf', warn=True) - self.assertRaises(Warning, split_options, 'wtimeoutms=foo', - warn=True) - self.assertRaises(Warning, split_options, 'wtimeoutms=5.5', - warn=True) - self.assertRaises(Warning, split_options, 'fsync=foo', - warn=True) - self.assertRaises(Warning, split_options, 'fsync=5.5', - warn=True) - self.assertRaises(Warning, - split_options, 'authMechanism=foo', - warn=True) + warnings.filterwarnings("error") + self.assertRaises(Warning, split_options, "foo=bar", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "socketTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=0.0", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=1e100000", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-1e100000", warn=True) + self.assertRaises(Warning, split_options, "ssl=foo", warn=True) + self.assertRaises(Warning, split_options, "connect=foo", warn=True) + self.assertRaises(Warning, split_options, "tlsAllowInvalidHostnames=foo", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=inf", warn=True) + self.assertRaises(Warning, split_options, "connectTimeoutMS=-inf", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=foo", warn=True) + self.assertRaises(Warning, split_options, "wtimeoutms=5.5", warn=True) + self.assertRaises(Warning, split_options, "fsync=foo", warn=True) + self.assertRaises(Warning, split_options, "fsync=5.5", warn=True) + self.assertRaises(Warning, split_options, "authMechanism=foo", warn=True) # Test invalid options with warn=False. - self.assertRaises(ConfigurationError, split_options, 'foo=bar') - self.assertRaises(ValueError, split_options, 'socketTimeoutMS=foo') - self.assertRaises(ValueError, split_options, 'socketTimeoutMS=0.0') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=foo') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=0.0') - self.assertRaises(ValueError, split_options, - 'connectTimeoutMS=1e100000') - self.assertRaises(ValueError, split_options, - 'connectTimeoutMS=-1e100000') - self.assertRaises(ValueError, split_options, 'ssl=foo') - self.assertRaises(ValueError, split_options, 'connect=foo') - self.assertRaises(ValueError, split_options, 'tlsAllowInvalidHostnames=foo') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=inf') - self.assertRaises(ValueError, split_options, 'connectTimeoutMS=-inf') - self.assertRaises(ValueError, split_options, 'wtimeoutms=foo') - self.assertRaises(ValueError, split_options, 'wtimeoutms=5.5') - self.assertRaises(ValueError, split_options, 'fsync=foo') - self.assertRaises(ValueError, split_options, 'fsync=5.5') - self.assertRaises(ValueError, - split_options, 'authMechanism=foo') + self.assertRaises(ConfigurationError, split_options, "foo=bar") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "socketTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=0.0") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=1e100000") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-1e100000") + self.assertRaises(ValueError, split_options, "ssl=foo") + self.assertRaises(ValueError, split_options, "connect=foo") + self.assertRaises(ValueError, split_options, "tlsAllowInvalidHostnames=foo") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=inf") + self.assertRaises(ValueError, split_options, "connectTimeoutMS=-inf") + self.assertRaises(ValueError, split_options, "wtimeoutms=foo") + self.assertRaises(ValueError, split_options, "wtimeoutms=5.5") + self.assertRaises(ValueError, split_options, "fsync=foo") + self.assertRaises(ValueError, split_options, "fsync=5.5") + self.assertRaises(ValueError, split_options, "authMechanism=foo") # Test splitting options works when valid. - self.assertTrue(split_options('socketTimeoutMS=300')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.3}, - split_options('socketTimeoutMS=300')) - self.assertEqual({'sockettimeoutms': 0.0001}, - split_options('socketTimeoutMS=0.1')) - self.assertEqual({'connecttimeoutms': 0.3}, - split_options('connectTimeoutMS=300')) - self.assertEqual({'connecttimeoutms': 0.0001}, - split_options('connectTimeoutMS=0.1')) - self.assertTrue(split_options('connectTimeoutMS=300')) - self.assertTrue(isinstance(split_options('w=5')['w'], int)) - self.assertTrue(isinstance(split_options('w=5.5')['w'], str)) - self.assertTrue(split_options('w=foo')) - self.assertTrue(split_options('w=majority')) - self.assertTrue(split_options('wtimeoutms=500')) - self.assertEqual({'fsync': True}, split_options('fsync=true')) - self.assertEqual({'fsync': False}, split_options('fsync=false')) - self.assertEqual({'authmechanism': 'GSSAPI'}, - split_options('authMechanism=GSSAPI')) - self.assertEqual({'authmechanism': 'MONGODB-CR'}, - split_options('authMechanism=MONGODB-CR')) - self.assertEqual({'authmechanism': 'SCRAM-SHA-1'}, - split_options('authMechanism=SCRAM-SHA-1')) - self.assertEqual({'authsource': 'foobar'}, - split_options('authSource=foobar')) - self.assertEqual({'maxpoolsize': 50}, split_options('maxpoolsize=50')) + self.assertTrue(split_options("socketTimeoutMS=300")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.3}, split_options("socketTimeoutMS=300")) + self.assertEqual({"sockettimeoutms": 0.0001}, split_options("socketTimeoutMS=0.1")) + self.assertEqual({"connecttimeoutms": 0.3}, split_options("connectTimeoutMS=300")) + self.assertEqual({"connecttimeoutms": 0.0001}, split_options("connectTimeoutMS=0.1")) + self.assertTrue(split_options("connectTimeoutMS=300")) + self.assertTrue(isinstance(split_options("w=5")["w"], int)) + self.assertTrue(isinstance(split_options("w=5.5")["w"], str)) + self.assertTrue(split_options("w=foo")) + self.assertTrue(split_options("w=majority")) + self.assertTrue(split_options("wtimeoutms=500")) + self.assertEqual({"fsync": True}, split_options("fsync=true")) + self.assertEqual({"fsync": False}, split_options("fsync=false")) + self.assertEqual({"authmechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) + self.assertEqual({"authmechanism": "MONGODB-CR"}, split_options("authMechanism=MONGODB-CR")) + self.assertEqual( + {"authmechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") + ) + self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) + self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foobar.com") self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") - self.assertRaises(ValueError, - parse_uri, "mongodb://::1", 27017) + self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) orig: dict = { - 'nodelist': [("localhost", 27017)], - 'username': None, - 'password': None, - 'database': None, - 'collection': None, - 'options': {}, - 'fqdn': None + "nodelist": [("localhost", 27017)], + "username": None, + "password": None, + "database": None, + "collection": None, + "options": {}, + "fqdn": None, } res: dict = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost")) - res.update({'username': 'fred', 'password': 'foobar'}) + res.update({"username": "fred", "password": "foobar"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost")) - res.update({'database': 'baz'}) + res.update({"database": "baz"}) self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/baz")) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017," - "example2.com:27017")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + self.assertEqual(res, parse_uri("mongodb://example1.com:27017," "example2.com:27017")) res = copy.deepcopy(orig) - res['nodelist'] = [("localhost", 27017), - ("localhost", 27018), - ("localhost", 27019)] - self.assertEqual(res, - parse_uri("mongodb://localhost," - "localhost:27018,localhost:27019")) + res["nodelist"] = [("localhost", 27017), ("localhost", 27018), ("localhost", 27019)] + self.assertEqual(res, parse_uri("mongodb://localhost," "localhost:27018,localhost:27019")) res = copy.deepcopy(orig) - res['database'] = 'foo' + res["database"] = "foo" self.assertEqual(res, parse_uri("mongodb://localhost/foo")) res = copy.deepcopy(orig) self.assertEqual(res, parse_uri("mongodb://localhost/")) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, parse_uri("mongodb://" - "localhost/test.yield_historical.in")) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual(res, parse_uri("mongodb://" "localhost/test.yield_historical.in")) - res.update({'username': 'fred', 'password': 'foobar'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + self.assertEqual( + res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") + ) res = copy.deepcopy(orig) - res['nodelist'] = [("example1.com", 27017), ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://example1.com:27017,example2.com" - ":27017/test.yield_historical.in")) + res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri( + "mongodb://example1.com:27017,example2.com" ":27017/test.yield_historical.in" + ), + ) # Test socket path without escaped characters. - self.assertRaises(InvalidURI, parse_uri, - "mongodb:///tmp/mongodb-27017.sock") + self.assertRaises(InvalidURI, parse_uri, "mongodb:///tmp/mongodb-27017.sock") # Test with escaped characters. res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017), - ("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, - parse_uri("mongodb://example2.com," - "%2Ftmp%2Fmongodb-27017.sock")) + res["nodelist"] = [("example2.com", 27017), ("/tmp/mongodb-27017.sock", None)] + self.assertEqual(res, parse_uri("mongodb://example2.com," "%2Ftmp%2Fmongodb-27017.sock")) res = copy.deepcopy(orig) - res['nodelist'] = [("shoe.sock.pants.co.uk", 27017), - ("/tmp/mongodb-27017.sock", None)] - res['database'] = "nethers_db" - self.assertEqual(res, - parse_uri("mongodb://shoe.sock.pants.co.uk," - "%2Ftmp%2Fmongodb-27017.sock/nethers_db")) + res["nodelist"] = [("shoe.sock.pants.co.uk", 27017), ("/tmp/mongodb-27017.sock", None)] + res["database"] = "nethers_db" + self.assertEqual( + res, + parse_uri("mongodb://shoe.sock.pants.co.uk," "%2Ftmp%2Fmongodb-27017.sock/nethers_db"), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock," - "example2.com:27017" - "/test.yield_historical.in")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017" + "/test.yield_historical.in" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None), - ("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock," - "example2.com:27017/test.yield_historical" - ".sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None), ("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27017.sock," + "example2.com:27017/test.yield_historical" + ".sock" + ), + ) res = copy.deepcopy(orig) - res['nodelist'] = [("example2.com", 27017)] - res.update({'database': 'test', 'collection': 'yield_historical.sock'}) - self.assertEqual(res, - parse_uri("mongodb://example2.com:27017" - "/test.yield_historical.sock")) + res["nodelist"] = [("example2.com", 27017)] + res.update({"database": "test", "collection": "yield_historical.sock"}) + self.assertEqual( + res, parse_uri("mongodb://example2.com:27017" "/test.yield_historical.sock") + ) res = copy.deepcopy(orig) - res['nodelist'] = [("/tmp/mongodb-27017.sock", None)] - res.update({'database': 'test', 'collection': 'mongodb-27017.sock'}) - self.assertEqual(res, - parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" - "/test.mongodb-27017.sock")) + res["nodelist"] = [("/tmp/mongodb-27017.sock", None)] + res.update({"database": "test", "collection": "mongodb-27017.sock"}) + self.assertEqual( + res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" "/test.mongodb-27017.sock") + ) res = copy.deepcopy(orig) - res['nodelist'] = [('/tmp/mongodb-27020.sock', None), - ("::1", 27017), - ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), - ("192.168.0.212", 27019), - ("localhost", 27018)] - self.assertEqual(res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27020.sock" - ",[::1]:27017,[2001:0db8:" - "85a3:0000:0000:8a2e:0370:7334]," - "192.168.0.212:27019,localhost", - 27018)) + res["nodelist"] = [ + ("/tmp/mongodb-27020.sock", None), + ("::1", 27017), + ("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 27018), + ("192.168.0.212", 27019), + ("localhost", 27018), + ] + self.assertEqual( + res, + parse_uri( + "mongodb://%2Ftmp%2Fmongodb-27020.sock" + ",[::1]:27017,[2001:0db8:" + "85a3:0000:0000:8a2e:0370:7334]," + "192.168.0.212:27019,localhost", + 27018, + ), + ) res = copy.deepcopy(orig) - res.update({'username': 'fred', 'password': 'foobar'}) - res.update({'database': 'test', 'collection': 'yield_historical.in'}) - self.assertEqual(res, - parse_uri("mongodb://fred:foobar@localhost/" - "test.yield_historical.in")) + res.update({"username": "fred", "password": "foobar"}) + res.update({"database": "test", "collection": "yield_historical.in"}) + self.assertEqual( + res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") + ) res = copy.deepcopy(orig) - res['database'] = 'test' - res['collection'] = 'name/with "delimiters' - self.assertEqual( - res, parse_uri("mongodb://localhost/test.name/with \"delimiters")) + res["database"] = "test" + res["collection"] = 'name/with "delimiters' + self.assertEqual(res, parse_uri('mongodb://localhost/test.name/with "delimiters')) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode - } - self.assertEqual(res, parse_uri( - "mongodb://localhost/?readPreference=secondary")) + res["options"] = {"readpreference": ReadPreference.SECONDARY.mongos_mode} + self.assertEqual(res, parse_uri("mongodb://localhost/?readPreference=secondary")) # Various authentication tests res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = 'password' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR"} + res["username"] = "user" + res["password"] = "password" + self.assertEqual( + res, parse_uri("mongodb://user:password@localhost/" "?authMechanism=MONGODB-CR") + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR', 'authsource': 'bar'} - res['username'] = 'user' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user:password@localhost/foo" - "?authSource=bar;authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR", "authsource": "bar"} + res["username"] = "user" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user:password@localhost/foo" "?authSource=bar;authMechanism=MONGODB-CR" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'MONGODB-CR'} - res['username'] = 'user' - res['password'] = '' - self.assertEqual(res, - parse_uri("mongodb://user:@localhost/" - "?authMechanism=MONGODB-CR")) + res["options"] = {"authmechanism": "MONGODB-CR"} + res["username"] = "user" + res["password"] = "" + self.assertEqual(res, parse_uri("mongodb://user:@localhost/" "?authMechanism=MONGODB-CR")) res = copy.deepcopy(orig) - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password" "@localhost/foo")) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authmechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri("mongodb://user%40domain.com:password" "@localhost/foo?authMechanism=GSSAPI"), + ) res = copy.deepcopy(orig) - res['options'] = {'authmechanism': 'GSSAPI'} - res['username'] = 'user@domain.com' - res['password'] = '' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com" - "@localhost/foo?authMechanism=GSSAPI")) + res["options"] = {"authmechanism": "GSSAPI"} + res["username"] = "user@domain.com" + res["password"] = "" + res["database"] = "foo" + self.assertEqual( + res, parse_uri("mongodb://user%40domain.com" "@localhost/foo?authMechanism=GSSAPI") + ) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'} - ] + res["options"] = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + ], } - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website" + ), + ) res = copy.deepcopy(orig) - res['options'] = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', 'use': 'website'}, - {'dc': 'east', 'use': 'website'}, - {} - ] + res["options"] = { + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", "use": "website"}, + {"dc": "east", "use": "website"}, + {}, + ], } - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,use:website&" - "readpreferencetags=dc:east,use:website&" - "readpreferencetags=")) + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west,use:website&" + "readpreferencetags=dc:east,use:website&" + "readpreferencetags=" + ), + ) res = copy.deepcopy(orig) - res['options'] = {'uuidrepresentation': JAVA_LEGACY} - res['username'] = 'user@domain.com' - res['password'] = 'password' - res['database'] = 'foo' - self.assertEqual(res, - parse_uri("mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=" - "javaLegacy")) + res["options"] = {"uuidrepresentation": JAVA_LEGACY} + res["username"] = "user@domain.com" + res["password"] = "password" + res["database"] = "foo" + self.assertEqual( + res, + parse_uri( + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=" + "javaLegacy" + ), + ) with warnings.catch_warnings(): - warnings.filterwarnings('error') - self.assertRaises(Warning, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption", - warn=True) - self.assertRaises(ValueError, parse_uri, - "mongodb://user%40domain.com:password" - "@localhost/foo?uuidrepresentation=notAnOption") + warnings.filterwarnings("error") + self.assertRaises( + Warning, + parse_uri, + "mongodb://user%40domain.com:password" + "@localhost/foo?uuidrepresentation=notAnOption", + warn=True, + ) + self.assertRaises( + ValueError, + parse_uri, + "mongodb://user%40domain.com:password" "@localhost/foo?uuidrepresentation=notAnOption", + ) def test_parse_ssl_paths(self): # Turn off "validate" since these paths don't exist on filesystem. self.assertEqual( - {'collection': None, - 'database': None, - 'nodelist': [('/MongoDB.sock', None)], - 'options': {'tlsCertificateKeyFile': '/a/b'}, - 'password': 'foo/bar', - 'username': 'jesse', - 'fqdn': None}, + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "/a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b', - validate=False)) + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=/a/b", + validate=False, + ), + ) self.assertEqual( - {'collection': None, - 'database': None, - 'nodelist': [('/MongoDB.sock', None)], - 'options': {'tlsCertificateKeyFile': 'a/b'}, - 'password': 'foo/bar', - 'username': 'jesse', - 'fqdn': None}, + { + "collection": None, + "database": None, + "nodelist": [("/MongoDB.sock", None)], + "options": {"tlsCertificateKeyFile": "a/b"}, + "password": "foo/bar", + "username": "jesse", + "fqdn": None, + }, parse_uri( - 'mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b', - validate=False)) + "mongodb://jesse:foo%2Fbar@%2FMongoDB.sock/?tlsCertificateKeyFile=a/b", + validate=False, + ), + ) def test_tlsinsecure_simple(self): # check that tlsInsecure is expanded correctly. @@ -467,59 +454,68 @@ def test_tlsinsecure_simple(self): res = { "tlsAllowInvalidHostnames": True, "tlsAllowInvalidCertificates": True, - "tlsInsecure": True, 'tlsDisableOCSPEndpointCheck': True} + "tlsInsecure": True, + "tlsDisableOCSPEndpointCheck": True, + } self.assertEqual(res, parse_uri(uri)["options"]) def test_normalize_options(self): # check that options are converted to their internal names correctly. - uri = ("mongodb://example.com/?ssl=true&appname=myapp") + uri = "mongodb://example.com/?ssl=true&appname=myapp" res = {"tls": True, "appname": "myapp"} self.assertEqual(res, parse_uri(uri)["options"]) def test_unquote_after_parsing(self): quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" unquoted_val = "val!@#$%^&*()_+,: etc" - uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" - "&authMechanismProperties=AWS_SESSION_TOKEN:"+quoted_val) + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + quoted_val + ) res = parse_uri(uri) options = { - 'authmechanism': 'MONGODB-AWS', - 'authmechanismproperties': { - 'AWS_SESSION_TOKEN': unquoted_val}} - self.assertEqual(options, res['options']) - - uri = (("mongodb://localhost/foo?readpreference=secondary&" - "readpreferencetags=dc:west,"+quoted_val+":"+quoted_val+"&" - "readpreferencetags=dc:east,use:"+quoted_val)) + "authmechanism": "MONGODB-AWS", + "authmechanismproperties": {"AWS_SESSION_TOKEN": unquoted_val}, + } + self.assertEqual(options, res["options"]) + + uri = ( + "mongodb://localhost/foo?readpreference=secondary&" + "readpreferencetags=dc:west," + quoted_val + ":" + quoted_val + "&" + "readpreferencetags=dc:east,use:" + quoted_val + ) res = parse_uri(uri) options = { - 'readpreference': ReadPreference.SECONDARY.mongos_mode, - 'readpreferencetags': [ - {'dc': 'west', unquoted_val: unquoted_val}, - {'dc': 'east', 'use': unquoted_val} - ] + "readpreference": ReadPreference.SECONDARY.mongos_mode, + "readpreferencetags": [ + {"dc": "west", unquoted_val: unquoted_val}, + {"dc": "east", "use": unquoted_val}, + ], } - self.assertEqual(options, res['options']) + self.assertEqual(options, res["options"]) def test_redact_AWS_SESSION_TOKEN(self): unquoted_colon = "token:" - uri = ("mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" - "&authMechanismProperties=AWS_SESSION_TOKEN:"+unquoted_colon) + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + unquoted_colon + ) with self.assertRaisesRegex( - ValueError, - 'auth mechanism properties must be key:value pairs like ' - 'SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:' - ', did you forget to percent-escape the token with ' - 'quote_plus?'): + ValueError, + "auth mechanism properties must be key:value pairs like " + "SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:" + ", did you forget to percent-escape the token with " + "quote_plus?", + ): parse_uri(uri) def test_special_chars(self): user = "user@ /9+:?~!$&'()*+,;=" pwd = "pwd@ /9+:?~!$&'()*+,;=" - uri = 'mongodb://%s:%s@localhost' % (quote_plus(user), quote_plus(pwd)) + uri = "mongodb://%s:%s@localhost" % (quote_plus(user), quote_plus(pwd)) res = parse_uri(uri) - self.assertEqual(user, res['username']) - self.assertEqual(pwd, res['password']) + self.assertEqual(user, res["username"]) + self.assertEqual(pwd, res["password"]) if __name__ == "__main__": diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 59457b57ac..d12abf3b91 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -22,19 +22,18 @@ sys.path[0:0] = [""] +from test import clear_warning_registry, unittest + from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate from pymongo.compression_support import _HAVE_SNAPPY from pymongo.srv_resolver import _HAVE_DNSPYTHON -from pymongo.uri_parser import parse_uri, SRV_SCHEME -from test import clear_warning_registry, unittest - +from pymongo.uri_parser import SRV_SCHEME, parse_uri CONN_STRING_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join('connection_string', 'test')) + os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") +) -URI_OPTIONS_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'uri_options') +URI_OPTIONS_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "uri_options") TEST_DESC_SKIP_LIST = [ "Valid options specific to single-threaded drivers are parsed correctly", @@ -64,7 +63,8 @@ "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", - "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error"] + "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", +] class TestAllScenarios(unittest.TestCase): @@ -73,8 +73,7 @@ def setUp(self): def get_error_message_template(expected, artefact): - return "%s %s for test '%s'" % ( - "Expected" if expected else "Unexpected", artefact, "%s") + return "%s %s for test '%s'" % ("Expected" if expected else "Unexpected", artefact, "%s") def run_scenario_in_dir(target_workdir): @@ -84,91 +83,107 @@ def modified_test_scenario(*args, **kwargs): os.chdir(target_workdir) func(*args, **kwargs) os.chdir(original_workdir) + return modified_test_scenario + return workdir_context_decorator def create_test(test, test_workdir): def run_scenario(self): - compressors = (test.get('options') or {}).get('compressors', []) - if 'snappy' in compressors and not _HAVE_SNAPPY: - self.skipTest('This test needs the snappy module.') - if test['uri'].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: + compressors = (test.get("options") or {}).get("compressors", []) + if "snappy" in compressors and not _HAVE_SNAPPY: + self.skipTest("This test needs the snappy module.") + if test["uri"].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: self.skipTest("This test needs dnspython package.") valid = True warning = False with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter('always') + warnings.simplefilter("always") try: - options = parse_uri(test['uri'], warn=True) + options = parse_uri(test["uri"], warn=True) except Exception: valid = False else: warning = len(ctx) > 0 - expected_valid = test.get('valid', True) + expected_valid = test.get("valid", True) self.assertEqual( - valid, expected_valid, get_error_message_template( - not expected_valid, "error") % test['description']) + valid, + expected_valid, + get_error_message_template(not expected_valid, "error") % test["description"], + ) if expected_valid: - expected_warning = test.get('warning', False) + expected_warning = test.get("warning", False) self.assertEqual( - warning, expected_warning, get_error_message_template( - expected_warning, "warning") % test['description']) + warning, + expected_warning, + get_error_message_template(expected_warning, "warning") % test["description"], + ) # Compare hosts and port. - if test['hosts'] is not None: + if test["hosts"] is not None: self.assertEqual( - len(test['hosts']), len(options['nodelist']), - "Incorrect number of hosts parsed from URI") - - for exp, actual in zip(test['hosts'], - options['nodelist']): - self.assertEqual(exp['host'], actual[0], - "Expected host %s but got %s" - % (exp['host'], actual[0])) - if exp['port'] is not None: - self.assertEqual(exp['port'], actual[1], - "Expected port %s but got %s" - % (exp['port'], actual)) + len(test["hosts"]), + len(options["nodelist"]), + "Incorrect number of hosts parsed from URI", + ) + + for exp, actual in zip(test["hosts"], options["nodelist"]): + self.assertEqual( + exp["host"], actual[0], "Expected host %s but got %s" % (exp["host"], actual[0]) + ) + if exp["port"] is not None: + self.assertEqual( + exp["port"], + actual[1], + "Expected port %s but got %s" % (exp["port"], actual), + ) # Compare auth options. - auth = test['auth'] + auth = test["auth"] if auth is not None: - auth['database'] = auth.pop('db') # db == database + auth["database"] = auth.pop("db") # db == database # Special case for PyMongo's collection parsing. - if options.get('collection') is not None: - options['database'] += "." + options['collection'] + if options.get("collection") is not None: + options["database"] += "." + options["collection"] for elm in auth: if auth[elm] is not None: # We have to do this because while the spec requires # "+"->"+", unquote_plus does "+"->" " options[elm] = options[elm].replace(" ", "+") - self.assertEqual(auth[elm], options[elm], - "Expected %s but got %s" - % (auth[elm], options[elm])) + self.assertEqual( + auth[elm], + options[elm], + "Expected %s but got %s" % (auth[elm], options[elm]), + ) # Compare URI options. err_msg = "For option %s expected %s but got %s" - if test['options']: - opts = options['options'] - for opt in test['options']: + if test["options"]: + opts = options["options"] + for opt in test["options"]: lopt = opt.lower() optname = INTERNAL_URI_OPTION_NAME_MAP.get(lopt, lopt) if opts.get(optname) is not None: - if opts[optname] == test['options'][opt]: - expected_value = test['options'][opt] + if opts[optname] == test["options"][opt]: + expected_value = test["options"][opt] else: - expected_value = validate( - lopt, test['options'][opt])[1] + expected_value = validate(lopt, test["options"][opt])[1] self.assertEqual( - opts[optname], expected_value, - err_msg % (opt, expected_value, opts[optname],)) + opts[optname], + expected_value, + err_msg + % ( + opt, + expected_value, + opts[optname], + ), + ) else: - self.fail( - "Missing expected option %s" % (opt,)) + self.fail("Missing expected option %s" % (opt,)) return run_scenario_in_dir(test_workdir)(run_scenario) @@ -176,27 +191,29 @@ def run_scenario(self): def create_tests(test_path): for dirpath, _, filenames in os.walk(test_path): dirname = os.path.split(dirpath) - dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] for filename in filenames: - if not filename.endswith('.json'): + if not filename.endswith(".json"): # skip everything that is not a test specification continue json_path = os.path.join(dirpath, filename) with open(json_path, encoding="utf-8") as scenario_stream: scenario_def = json.load(scenario_stream) - for testcase in scenario_def['tests']: - dsc = testcase['description'] + for testcase in scenario_def["tests"]: + dsc = testcase["description"] if dsc in TEST_DESC_SKIP_LIST: print("Skipping test '%s'" % dsc) continue testmethod = create_test(testcase, dirpath) - testname = 'test_%s_%s_%s' % ( - dirname, os.path.splitext(filename)[0], - str(dsc).replace(' ', '_')) + testname = "test_%s_%s_%s" % ( + dirname, + os.path.splitext(filename)[0], + str(dsc).replace(" ", "_"), + ) testmethod.__name__ = testname setattr(TestAllScenarios, testmethod.__name__, testmethod) diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 44fc89ac73..a2fd059d21 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -17,16 +17,14 @@ sys.path[0:0] = [""] -from pymongo.mongo_client import MongoClient -from pymongo.server_api import ServerApi, ServerApiVersion - -from test import client_context, IntegrationTest, unittest +from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes from test.utils import OvertCommandListener, rs_or_single_client +from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi, ServerApiVersion -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), 'versioned-api') +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "versioned-api") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) @@ -38,38 +36,38 @@ class TestServerApi(IntegrationTest): def test_server_api_defaults(self): api = ServerApi(ServerApiVersion.V1) - self.assertEqual(api.version, '1') + self.assertEqual(api.version, "1") self.assertIsNone(api.strict) self.assertIsNone(api.deprecation_errors) def test_server_api_explicit_false(self): - api = ServerApi('1', strict=False, deprecation_errors=False) - self.assertEqual(api.version, '1') + api = ServerApi("1", strict=False, deprecation_errors=False) + self.assertEqual(api.version, "1") self.assertFalse(api.strict) self.assertFalse(api.deprecation_errors) def test_server_api_strict(self): - api = ServerApi('1', strict=True, deprecation_errors=True) - self.assertEqual(api.version, '1') + api = ServerApi("1", strict=True, deprecation_errors=True) + self.assertEqual(api.version, "1") self.assertTrue(api.strict) self.assertTrue(api.deprecation_errors) def test_server_api_validation(self): with self.assertRaises(ValueError): - ServerApi('2') + ServerApi("2") with self.assertRaises(TypeError): - ServerApi('1', strict='not-a-bool') + ServerApi("1", strict="not-a-bool") with self.assertRaises(TypeError): - ServerApi('1', deprecation_errors='not-a-bool') + ServerApi("1", deprecation_errors="not-a-bool") with self.assertRaises(TypeError): - MongoClient(server_api='not-a-ServerApi') + MongoClient(server_api="not-a-ServerApi") def assertServerApi(self, event): - self.assertIn('apiVersion', event.command) - self.assertEqual(event.command['apiVersion'], '1') + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") def assertNoServerApi(self, event): - self.assertNotIn('apiVersion', event.command) + self.assertNotIn("apiVersion", event.command) def assertServerApiInAllCommands(self, events): for event in events: @@ -78,22 +76,20 @@ def assertServerApiInAllCommands(self, events): @client_context.require_version_min(4, 7) def test_command_options(self): listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi('1'), - event_listeners=[listener]) + client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) self.addCleanup(coll.delete_many, {}) list(coll.find(batch_size=25)) - client.admin.command('ping') - self.assertServerApiInAllCommands(listener.results['started']) + client.admin.command("ping") + self.assertServerApiInAllCommands(listener.results["started"]) @client_context.require_version_min(4, 7) @client_context.require_transactions def test_command_options_txn(self): listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi('1'), - event_listeners=[listener]) + client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) @@ -103,8 +99,8 @@ def test_command_options_txn(self): with client.start_session() as s, s.start_transaction(): coll.insert_many([{} for _ in range(100)], session=s) list(coll.find(batch_size=25, session=s)) - client.test.command('find', 'test', session=s) - self.assertServerApiInAllCommands(listener.results['started']) + client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.results["started"]) if __name__ == "__main__": diff --git a/test/test_write_concern.py b/test/test_write_concern.py index f0ea690fb3..02c562a348 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -22,7 +22,6 @@ class TestWriteConcern(unittest.TestCase): - def test_invalid(self): # Can't use fsync and j options together self.assertRaises(ConfigurationError, WriteConcern, j=True, fsync=True) @@ -41,9 +40,7 @@ def test_equality_to_none(self): self.assertTrue(concern != None) # noqa def test_equality_compatible_type(self): - class _FakeWriteConcern(object): - def __init__(self, **document): self.document = document @@ -66,9 +63,9 @@ def __ne__(self, other): self.assertNotEqual(WriteConcern(wtimeout=42), _FakeWriteConcern(wtimeout=2000)) def test_equality_incompatible_type(self): - _fake_type = collections.namedtuple('NotAWriteConcern', ['document']) # type: ignore - self.assertNotEqual(WriteConcern(j=True), _fake_type({'j': True})) + _fake_type = collections.namedtuple("NotAWriteConcern", ["document"]) # type: ignore + self.assertNotEqual(WriteConcern(j=True), _fake_type({"j": True})) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py index 65738d5c04..7ce2936b7a 100644 --- a/test/unicode/test_utf8.py +++ b/test/unicode/test_utf8.py @@ -2,9 +2,11 @@ sys.path[0:0] = [""] +from test import unittest + from bson import encode from bson.errors import InvalidStringData -from test import unittest + class TestUTF8(unittest.TestCase): @@ -12,18 +14,19 @@ class TestUTF8(unittest.TestCase): # legal utf-8 if the first byte is 0xf4 (244) def _assert_same_utf8_validation(self, data): try: - data.decode('utf-8') - py_is_legal = True + data.decode("utf-8") + py_is_legal = True except UnicodeDecodeError: py_is_legal = False try: - encode({'x': data}) - bson_is_legal = True + encode({"x": data}) + bson_is_legal = True except InvalidStringData: bson_is_legal = False self.assertEqual(py_is_legal, bson_is_legal, data) + if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 9c38c47863..ba1d063694 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -25,49 +25,65 @@ import sys import time import types - from collections import abc +from test import IntegrationTest, client_context, unittest +from test.utils import ( + CMAPListener, + camel_to_snake, + camel_to_snake_args, + get_pool, + parse_collection_options, + parse_spec_options, + prepare_spec_arguments, + rs_or_single_client, + single_client, + snake_to_camel, +) +from test.version import Version from typing import Any -from bson import json_util, Code, Decimal128, DBRef, SON, Int64, MaxKey, MinKey +from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util from bson.binary import Binary from bson.objectid import ObjectId -from bson.regex import Regex, RE_TYPE - +from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket - from pymongo import ASCENDING, MongoClient -from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.change_stream import ChangeStream +from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection from pymongo.database import Database from pymongo.errors import ( - BulkWriteError, ConnectionFailure, ConfigurationError, InvalidOperation, - NotPrimaryError, PyMongoError) + BulkWriteError, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + PyMongoError, +) from pymongo.monitoring import ( - CommandFailedEvent, CommandListener, CommandStartedEvent, - CommandSucceededEvent, _SENSITIVE_COMMANDS, PoolCreatedEvent, - PoolReadyEvent, PoolClearedEvent, PoolClosedEvent, ConnectionCreatedEvent, - ConnectionReadyEvent, ConnectionClosedEvent, - ConnectionCheckOutStartedEvent, ConnectionCheckOutFailedEvent, - ConnectionCheckedOutEvent, ConnectionCheckedInEvent) + _SENSITIVE_COMMANDS, + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern -from test import client_context, unittest, IntegrationTest -from test.utils import ( - camel_to_snake, get_pool, rs_or_single_client, single_client, - snake_to_camel, CMAPListener) - -from test.version import Version -from test.utils import ( - camel_to_snake_args, parse_collection_options, parse_spec_options, - prepare_spec_arguments) - - JSON_OPTS = json_util.JSONOptions(tz_aware=False) IS_INTERRUPTED = False @@ -87,14 +103,13 @@ def with_metaclass(meta, *bases): # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(type): - def __new__(cls, name, this_bases, d): if sys.version_info[:2] >= (3, 7): # This version introduced PEP 560 that requires a bit # of extra care (we mimic what is done by __build_class__). resolved_bases = types.resolve_bases(bases) if resolved_bases is not bases: - d['__orig_bases__'] = bases + d["__orig_bases__"] = bases else: resolved_bases = bases return meta(name, resolved_bases, d) @@ -102,40 +117,38 @@ def __new__(cls, name, this_bases, d): @classmethod def __prepare__(cls, name, this_bases): return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) + + return type.__new__(metaclass, "temporary_class", (), {}) def is_run_on_requirement_satisfied(requirement): topology_satisfied = True - req_topologies = requirement.get('topologies') + req_topologies = requirement.get("topologies") if req_topologies: - topology_satisfied = client_context.is_topology_type( - req_topologies) + topology_satisfied = client_context.is_topology_type(req_topologies) server_version = Version(*client_context.version[:3]) min_version_satisfied = True - req_min_server_version = requirement.get('minServerVersion') + req_min_server_version = requirement.get("minServerVersion") if req_min_server_version: - min_version_satisfied = Version.from_string( - req_min_server_version) <= server_version + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version max_version_satisfied = True - req_max_server_version = requirement.get('maxServerVersion') + req_max_server_version = requirement.get("maxServerVersion") if req_max_server_version: - max_version_satisfied = Version.from_string( - req_max_server_version) >= server_version + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version - serverless = requirement.get('serverless') + serverless = requirement.get("serverless") if serverless == "require": serverless_satisfied = client_context.serverless elif serverless == "forbid": serverless_satisfied = not client_context.serverless - else: # unset or "allow" + else: # unset or "allow" serverless_satisfied = True params_satisfied = True - params = requirement.get('serverParameters') + params = requirement.get("serverParameters") if params: for param, val in params.items(): if param not in client_context.server_parameters: @@ -144,16 +157,21 @@ def is_run_on_requirement_satisfied(requirement): params_satisfied = False auth_satisfied = True - req_auth = requirement.get('auth') + req_auth = requirement.get("auth") if req_auth is not None: if req_auth: auth_satisfied = client_context.auth_enabled else: auth_satisfied = not client_context.auth_enabled - return (topology_satisfied and min_version_satisfied and - max_version_satisfied and serverless_satisfied and - params_satisfied and auth_satisfied) + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and serverless_satisfied + and params_satisfied + and auth_satisfied + ) def parse_collection_or_database_options(options): @@ -161,15 +179,15 @@ def parse_collection_or_database_options(options): def parse_bulk_write_result(result): - upserted_ids = {str(int_idx): result.upserted_ids[int_idx] - for int_idx in result.upserted_ids} + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} return { - 'deletedCount': result.deleted_count, - 'insertedCount': result.inserted_count, - 'matchedCount': result.matched_count, - 'modifiedCount': result.modified_count, - 'upsertedCount': result.upserted_count, - 'upsertedIds': upserted_ids} + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "upsertedIds": upserted_ids, + } def parse_bulk_write_error_result(error): @@ -179,6 +197,7 @@ def parse_bulk_write_error_result(error): class NonLazyCursor(object): """A find cursor proxy that creates the remote cursor when initialized.""" + def __init__(self, find_cursor): self.find_cursor = find_cursor # Create the server side cursor. @@ -196,8 +215,9 @@ def close(self): class EventListenerUtil(CMAPListener, CommandListener): - def __init__(self, observe_events, ignore_commands, - observe_sensitive_commands, store_events, entity_map): + def __init__( + self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map + ): self._event_types = set(name.lower() for name in observe_events) if observe_sensitive_commands: self._observe_sensitive_commands = True @@ -205,7 +225,7 @@ def __init__(self, observe_events, ignore_commands, else: self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) - self._ignore_commands.add('configurefailpoint') + self._ignore_commands.add("configurefailpoint") self._event_mapping = collections.defaultdict(list) self.entity_map = entity_map if store_events: @@ -218,20 +238,22 @@ def __init__(self, observe_events, ignore_commands, super(EventListenerUtil, self).__init__() def get_events(self, event_type): - if event_type == 'command': - return [e for e in self.events if 'Command' in type(e).__name__] - return [e for e in self.events if 'Command' not in type(e).__name__] + if event_type == "command": + return [e for e in self.events if "Command" in type(e).__name__] + return [e for e in self.events if "Command" not in type(e).__name__] def add_event(self, event): event_name = type(event).__name__.lower() if event_name in self._event_types: super(EventListenerUtil, self).add_event(event) for id in self._event_mapping[event_name]: - self.entity_map[id].append({ - "name": type(event).__name__, - "observedAt": time.time(), - "description": repr(event) - }) + self.entity_map[id].append( + { + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event), + } + ) def _command_event(self, event): if event.command_name.lower() not in self._ignore_commands: @@ -260,6 +282,7 @@ def failed(self, event): class EntityMapUtil(object): """Utility class that implements an entity map as per the unified test format specification.""" + def __init__(self, test_class): self._entities = {} self._listeners = {} @@ -276,102 +299,100 @@ def __getitem__(self, item): try: return self._entities[item] except KeyError: - self.test.fail('Could not find entity named %s in map' % ( - item,)) + self.test.fail("Could not find entity named %s in map" % (item,)) def __setitem__(self, key, value): if not isinstance(key, str): - self.test.fail( - 'Expected entity name of type str, got %s' % (type(key))) + self.test.fail("Expected entity name of type str, got %s" % (type(key))) if key in self._entities: - self.test.fail('Entity named %s already in map' % (key,)) + self.test.fail("Entity named %s already in map" % (key,)) self._entities[key] = value def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: self.test.fail( - "Entity spec %s did not contain exactly one top-level key" % ( - entity_spec,)) + "Entity spec %s did not contain exactly one top-level key" % (entity_spec,) + ) entity_type, spec = next(iter(entity_spec.items())) - if entity_type == 'client': + if entity_type == "client": kwargs: dict = {} - observe_events = spec.get('observeEvents', []) - ignore_commands = spec.get('ignoreCommandMonitoringEvents', []) - observe_sensitive_commands = spec.get( - 'observeSensitiveCommands', False) + observe_events = spec.get("observeEvents", []) + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) ignore_commands = [cmd.lower() for cmd in ignore_commands] listener = EventListenerUtil( - observe_events, ignore_commands, + observe_events, + ignore_commands, observe_sensitive_commands, - spec.get("storeEventsAsEntities"), self) - self._listeners[spec['id']] = listener - kwargs['event_listeners'] = [listener] - if spec.get('useMultipleMongoses'): + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): if client_context.load_balancer or client_context.serverless: - kwargs['h'] = client_context.MULTI_MONGOS_LB_URI + kwargs["h"] = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: - kwargs['h'] = client_context.mongos_seeds() - kwargs.update(spec.get('uriOptions', {})) - server_api = spec.get('serverApi') + kwargs["h"] = client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") if server_api: - kwargs['server_api'] = ServerApi( - server_api['version'], strict=server_api.get('strict'), - deprecation_errors=server_api.get('deprecationErrors')) + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) if uri: - kwargs['h'] = uri + kwargs["h"] = uri client = rs_or_single_client(**kwargs) - self[spec['id']] = client + self[spec["id"]] = client self.test.addCleanup(client.close) return - elif entity_type == 'database': - client = self[spec['client']] + elif entity_type == "database": + client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - 'Expected entity %s to be of type MongoClient, got %s' % ( - spec['client'], type(client))) - options = parse_collection_or_database_options( - spec.get('databaseOptions', {})) - self[spec['id']] = client.get_database( - spec['databaseName'], **options) + "Expected entity %s to be of type MongoClient, got %s" + % (spec["client"], type(client)) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) return - elif entity_type == 'collection': - database = self[spec['database']] + elif entity_type == "collection": + database = self[spec["database"]] if not isinstance(database, Database): self.test.fail( - 'Expected entity %s to be of type Database, got %s' % ( - spec['database'], type(database))) - options = parse_collection_or_database_options( - spec.get('collectionOptions', {})) - self[spec['id']] = database.get_collection( - spec['collectionName'], **options) + "Expected entity %s to be of type Database, got %s" + % (spec["database"], type(database)) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) return - elif entity_type == 'session': - client = self[spec['client']] + elif entity_type == "session": + client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - 'Expected entity %s to be of type MongoClient, got %s' % ( - spec['client'], type(client))) - opts = camel_to_snake_args(spec.get('sessionOptions', {})) - if 'default_transaction_options' in opts: - txn_opts = parse_spec_options( - opts['default_transaction_options']) + "Expected entity %s to be of type MongoClient, got %s" + % (spec["client"], type(client)) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) txn_opts = TransactionOptions(**txn_opts) opts = copy.deepcopy(opts) - opts['default_transaction_options'] = txn_opts + opts["default_transaction_options"] = txn_opts session = client.start_session(**dict(opts)) - self[spec['id']] = session - self._session_lsids[spec['id']] = copy.deepcopy(session.session_id) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) self.test.addCleanup(session.end_session) return - elif entity_type == 'bucket': + elif entity_type == "bucket": # TODO: implement the 'bucket' entity type - self.test.skipTest( - 'GridFS is not currently supported (PYTHON-2459)') - self.test.fail( - 'Unable to create entity of unknown type %s' % (entity_type,)) + self.test.skipTest("GridFS is not currently supported (PYTHON-2459)") + self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: @@ -381,13 +402,12 @@ def get_listener_for_client(self, client_name): client = self[client_name] if not isinstance(client, MongoClient): self.test.fail( - 'Expected entity %s to be of type MongoClient, got %s' % ( - client_name, type(client))) + "Expected entity %s to be of type MongoClient, got %s" % (client_name, type(client)) + ) listener = self._listeners.get(client_name) if not listener: - self.test.fail( - 'No listeners configured for client %s' % (client_name,)) + self.test.fail("No listeners configured for client %s" % (client_name,)) return listener @@ -395,8 +415,9 @@ def get_lsid_for_session(self, session_name): session = self[session_name] if not isinstance(session, ClientSession): self.test.fail( - 'Expected entity %s to be of type ClientSession, got %s' % ( - session_name, type(session))) + "Expected entity %s to be of type ClientSession, got %s" + % (session_name, type(session)) + ) try: return session.session_id @@ -413,32 +434,33 @@ def get_lsid_for_session(self, session_name): BSON_TYPE_ALIAS_MAP = { # https://docs.mongodb.com/manual/reference/operator/query/type/ # https://pymongo.readthedocs.io/en/stable/api/bson/index.html - 'double': (float,), - 'string': (str,), - 'object': (abc.Mapping,), - 'array': (abc.MutableSequence,), - 'binData': binary_types, - 'undefined': (type(None),), - 'objectId': (ObjectId,), - 'bool': (bool,), - 'date': (datetime.datetime,), - 'null': (type(None),), - 'regex': (Regex, RE_TYPE), - 'dbPointer': (DBRef,), - 'javascript': (unicode_type, Code), - 'symbol': (unicode_type,), - 'javascriptWithScope': (unicode_type, Code), - 'int': (int,), - 'long': (Int64,), - 'decimal': (Decimal128,), - 'maxKey': (MaxKey,), - 'minKey': (MinKey,), + "double": (float,), + "string": (str,), + "object": (abc.Mapping,), + "array": (abc.MutableSequence,), + "binData": binary_types, + "undefined": (type(None),), + "objectId": (ObjectId,), + "bool": (bool,), + "date": (datetime.datetime,), + "null": (type(None),), + "regex": (Regex, RE_TYPE), + "dbPointer": (DBRef,), + "javascript": (unicode_type, Code), + "symbol": (unicode_type,), + "javascriptWithScope": (unicode_type, Code), + "int": (int,), + "long": (Int64,), + "decimal": (Decimal128,), + "maxKey": (MaxKey,), + "minKey": (MinKey,), } class MatchEvaluatorUtil(object): """Utility class that implements methods for evaluating matches as per the unified test format specification.""" + def __init__(self, test_class): self.test = test_class @@ -448,19 +470,18 @@ def _operation_exists(self, spec, actual, key_to_compare): elif spec is False: self.test.assertNotIn(key_to_compare, actual) else: - self.test.fail( - 'Expected boolean value for $$exists operator, got %s' % ( - spec,)) + self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) def __type_alias_to_type(self, alias): if alias not in BSON_TYPE_ALIAS_MAP: - self.test.fail('Unrecognized BSON type alias %s' % (alias,)) + self.test.fail("Unrecognized BSON type alias %s" % (alias,)) return BSON_TYPE_ALIAS_MAP[alias] def _operation_type(self, spec, actual, key_to_compare): if isinstance(spec, abc.MutableSequence): - permissible_types = tuple([ - t for alias in spec for t in self.__type_alias_to_type(alias)]) + permissible_types = tuple( + [t for alias in spec for t in self.__type_alias_to_type(alias)] + ) else: permissible_types = self.__type_alias_to_type(spec) value = actual[key_to_compare] if key_to_compare else actual @@ -481,7 +502,7 @@ def _operation_unsetOrMatches(self, spec, actual, key_to_compare): if key_to_compare not in actual: # we add a dummy value for the compared key to pass map size check - actual[key_to_compare] = 'dummyValue' + actual[key_to_compare] = "dummyValue" return self.match_result(spec, actual[key_to_compare], in_recursive_call=True) @@ -489,19 +510,16 @@ def _operation_sessionLsid(self, spec, actual, key_to_compare): expected_lsid = self.test.entity_map.get_lsid_for_session(spec) self.test.assertEqual(expected_lsid, actual[key_to_compare]) - def _evaluate_special_operation(self, opname, spec, actual, - key_to_compare): - method_name = '_operation_%s' % (opname.strip('$'),) + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): + method_name = "_operation_%s" % (opname.strip("$"),) try: method = getattr(self, method_name) except AttributeError: - self.test.fail( - 'Unsupported special matching operator %s' % (opname,)) + self.test.fail("Unsupported special matching operator %s" % (opname,)) else: method(spec, actual, key_to_compare) - def _evaluate_if_special_operation(self, expectation, actual, - key_to_compare=None): + def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): """Returns True if a special operation is evaluated, False otherwise. If the ``expectation`` map contains a single key, value pair we check it for a special operation. @@ -515,7 +533,7 @@ def _evaluate_if_special_operation(self, expectation, actual, is_special_op, opname, spec = False, False, False if key_to_compare is not None: - if key_to_compare.startswith('$$'): + if key_to_compare.startswith("$$"): is_special_op = True opname = key_to_compare spec = expectation[key_to_compare] @@ -524,20 +542,18 @@ def _evaluate_if_special_operation(self, expectation, actual, nested = expectation[key_to_compare] if isinstance(nested, abc.Mapping) and len(nested) == 1: opname, spec = next(iter(nested.items())) - if opname.startswith('$$'): + if opname.startswith("$$"): is_special_op = True elif len(expectation) == 1: opname, spec = next(iter(expectation.items())) - if opname.startswith('$$'): + if opname.startswith("$$"): is_special_op = True key_to_compare = None if is_special_op: self._evaluate_special_operation( - opname=opname, - spec=spec, - actual=actual, - key_to_compare=key_to_compare) + opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare + ) return True return False @@ -557,37 +573,33 @@ def _match_document(self, expectation, actual, is_root): if not is_root: expected_keys = set(expectation.keys()) for key, value in expectation.items(): - if value == {'$$exists': False}: + if value == {"$$exists": False}: expected_keys.remove(key) self.test.assertEqual(expected_keys, set(actual.keys())) - def match_result(self, expectation, actual, - in_recursive_call=False): + def match_result(self, expectation, actual, in_recursive_call=False): if isinstance(expectation, abc.Mapping): - return self._match_document( - expectation, actual, is_root=not in_recursive_call) + return self._match_document(expectation, actual, is_root=not in_recursive_call) if isinstance(expectation, abc.MutableSequence): self.test.assertIsInstance(actual, abc.MutableSequence) for e, a in zip(expectation, actual): if isinstance(e, abc.Mapping): - self._match_document( - e, a, is_root=not in_recursive_call) + self._match_document(e, a, is_root=not in_recursive_call) else: self.match_result(e, a, in_recursive_call=True) return # account for flexible numerics in element-wise comparison - if (isinstance(expectation, int) or - isinstance(expectation, float)): + if isinstance(expectation, int) or isinstance(expectation, float): self.test.assertEqual(expectation, actual) else: self.test.assertIsInstance(actual, type(expectation)) self.test.assertEqual(expectation, actual) def assertHasServiceId(self, spec, actual): - if 'hasServiceId' in spec: - if spec.get('hasServiceId'): + if "hasServiceId" in spec: + if spec.get("hasServiceId"): self.test.assertIsNotNone(actual.service_id) self.test.assertIsInstance(actual.service_id, ObjectId) else: @@ -597,85 +609,83 @@ def match_event(self, event_type, expectation, actual): name, spec = next(iter(expectation.items())) # every command event has the commandName field - if event_type == 'command': - command_name = spec.get('commandName') + if event_type == "command": + command_name = spec.get("commandName") if command_name: self.test.assertEqual(command_name, actual.command_name) - if name == 'commandStartedEvent': + if name == "commandStartedEvent": self.test.assertIsInstance(actual, CommandStartedEvent) - command = spec.get('command') - database_name = spec.get('databaseName') + command = spec.get("command") + database_name = spec.get("databaseName") if command: - if actual.command_name == 'update': + if actual.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into expectations. - for update in command.get('updates', []): - update.setdefault('upsert', False) - update.setdefault('multi', False) + for update in command.get("updates", []): + update.setdefault("upsert", False) + update.setdefault("multi", False) self.match_result(command, actual.command) if database_name: - self.test.assertEqual( - database_name, actual.database_name) + self.test.assertEqual(database_name, actual.database_name) self.assertHasServiceId(spec, actual) - elif name == 'commandSucceededEvent': + elif name == "commandSucceededEvent": self.test.assertIsInstance(actual, CommandSucceededEvent) - reply = spec.get('reply') + reply = spec.get("reply") if reply: self.match_result(reply, actual.reply) self.assertHasServiceId(spec, actual) - elif name == 'commandFailedEvent': + elif name == "commandFailedEvent": self.test.assertIsInstance(actual, CommandFailedEvent) self.assertHasServiceId(spec, actual) - elif name == 'poolCreatedEvent': + elif name == "poolCreatedEvent": self.test.assertIsInstance(actual, PoolCreatedEvent) - elif name == 'poolReadyEvent': + elif name == "poolReadyEvent": self.test.assertIsInstance(actual, PoolReadyEvent) - elif name == 'poolClearedEvent': + elif name == "poolClearedEvent": self.test.assertIsInstance(actual, PoolClearedEvent) self.assertHasServiceId(spec, actual) - elif name == 'poolClosedEvent': + elif name == "poolClosedEvent": self.test.assertIsInstance(actual, PoolClosedEvent) - elif name == 'connectionCreatedEvent': + elif name == "connectionCreatedEvent": self.test.assertIsInstance(actual, ConnectionCreatedEvent) - elif name == 'connectionReadyEvent': + elif name == "connectionReadyEvent": self.test.assertIsInstance(actual, ConnectionReadyEvent) - elif name == 'connectionClosedEvent': + elif name == "connectionClosedEvent": self.test.assertIsInstance(actual, ConnectionClosedEvent) - if 'reason' in spec: - self.test.assertEqual(actual.reason, spec['reason']) - elif name == 'connectionCheckOutStartedEvent': + if "reason" in spec: + self.test.assertEqual(actual.reason, spec["reason"]) + elif name == "connectionCheckOutStartedEvent": self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) - elif name == 'connectionCheckOutFailedEvent': + elif name == "connectionCheckOutFailedEvent": self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) - if 'reason' in spec: - self.test.assertEqual(actual.reason, spec['reason']) - elif name == 'connectionCheckedOutEvent': + if "reason" in spec: + self.test.assertEqual(actual.reason, spec["reason"]) + elif name == "connectionCheckedOutEvent": self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) - elif name == 'connectionCheckedInEvent': + elif name == "connectionCheckedInEvent": self.test.assertIsInstance(actual, ConnectionCheckedInEvent) else: - self.test.fail( - 'Unsupported event type %s' % (name,)) + self.test.fail("Unsupported event type %s" % (name,)) def coerce_result(opname, result): """Convert a pymongo result into the spec's result format.""" - if hasattr(result, 'acknowledged') and not result.acknowledged: - return {'acknowledged': False} - if opname == 'bulkWrite': + if hasattr(result, "acknowledged") and not result.acknowledged: + return {"acknowledged": False} + if opname == "bulkWrite": return parse_bulk_write_result(result) - if opname == 'insertOne': - return {'insertedId': result.inserted_id} - if opname == 'insertMany': + if opname == "insertOne": + return {"insertedId": result.inserted_id} + if opname == "insertMany": return {idx: _id for idx, _id in enumerate(result.inserted_ids)} - if opname in ('deleteOne', 'deleteMany'): - return {'deletedCount': result.deleted_count} - if opname in ('updateOne', 'updateMany', 'replaceOne'): + if opname in ("deleteOne", "deleteMany"): + return {"deletedCount": result.deleted_count} + if opname in ("updateOne", "updateMany", "replaceOne"): return { - 'matchedCount': result.matched_count, - 'modifiedCount': result.modified_count, - 'upsertedCount': 0 if result.upserted_id is None else 1, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": 0 if result.upserted_id is None else 1, } return result @@ -689,7 +699,8 @@ class UnifiedSpecTestMixinV1(IntegrationTest): Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string('1.5') + + SCHEMA_VERSION = Version.from_string("1.5") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -707,12 +718,13 @@ def should_run_on(run_on_spec): def insert_initial_data(self, initial_data): for collection_data in initial_data: - coll_name = collection_data['collectionName'] - db_name = collection_data['databaseName'] - documents = collection_data['documents'] + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + documents = collection_data["documents"] coll = self.client.get_database(db_name).get_collection( - coll_name, write_concern=WriteConcern(w="majority")) + coll_name, write_concern=WriteConcern(w="majority") + ) coll.drop() if len(documents) > 0: @@ -720,56 +732,54 @@ def insert_initial_data(self, initial_data): else: # ensure collection exists result = coll.insert_one({}) - coll.delete_one({'_id': result.inserted_id}) + coll.delete_one({"_id": result.inserted_id}) @classmethod def setUpClass(cls): # super call creates internal client cls.client super(UnifiedSpecTestMixinV1, cls).setUpClass() # process file-level runOnRequirements - run_on_spec = cls.TEST_SPEC.get('runOnRequirements', []) + run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) if not cls.should_run_on(run_on_spec): - raise unittest.SkipTest( - '%s runOnRequirements not satisfied' % (cls.__name__,)) + raise unittest.SkipTest("%s runOnRequirements not satisfied" % (cls.__name__,)) # add any special-casing for skipping tests here - if client_context.storage_engine == 'mmapv1': - if 'retryable-writes' in cls.TEST_SPEC['description']: - raise unittest.SkipTest( - "MMAPv1 does not support retryWrites=True") + if client_context.storage_engine == "mmapv1": + if "retryable-writes" in cls.TEST_SPEC["description"]: + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") def setUp(self): super(UnifiedSpecTestMixinV1, self).setUp() # process schemaVersion # note: we check major schema version during class generation # note: we do this here because we cannot run assertions in setUpClass - version = Version.from_string(self.TEST_SPEC['schemaVersion']) + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) self.assertLessEqual( - version, self.SCHEMA_VERSION, - 'expected schema version %s or lower, got %s' % ( - self.SCHEMA_VERSION, version)) + version, + self.SCHEMA_VERSION, + "expected schema version %s or lower, got %s" % (self.SCHEMA_VERSION, version), + ) # initialize internals self.match_evaluator = MatchEvaluatorUtil(self) def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if client_context.storage_engine == 'mmapv1': - if 'Dirty explicit session is discarded' in spec['description']: - raise unittest.SkipTest( - "MMAPv1 does not support retryWrites=True") - elif 'Client side error in command starting transaction' in spec['description']: + if client_context.storage_engine == "mmapv1": + if "Dirty explicit session is discarded" in spec["description"]: + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + elif "Client side error in command starting transaction" in spec["description"]: raise unittest.SkipTest("Implement PYTHON-1894") def process_error(self, exception, spec): - is_error = spec.get('isError') - is_client_error = spec.get('isClientError') - error_contains = spec.get('errorContains') - error_code = spec.get('errorCode') - error_code_name = spec.get('errorCodeName') - error_labels_contain = spec.get('errorLabelsContain') - error_labels_omit = spec.get('errorLabelsOmit') - expect_result = spec.get('expectResult') + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") if is_error: # already satisfied because exception was raised @@ -792,75 +802,72 @@ def process_error(self, exception, spec): self.assertIn(error_contains.lower(), errmsg) if error_code: - self.assertEqual( - error_code, exception.details.get('code')) + self.assertEqual(error_code, exception.details.get("code")) if error_code_name: - self.assertEqual( - error_code_name, exception.details.get('codeName')) + self.assertEqual(error_code_name, exception.details.get("codeName")) if error_labels_contain: - labels = [err_label for err_label in error_labels_contain - if exception.has_error_label(err_label)] + labels = [ + err_label + for err_label in error_labels_contain + if exception.has_error_label(err_label) + ] self.assertEqual(labels, error_labels_contain) if error_labels_omit: for err_label in error_labels_omit: if exception.has_error_label(err_label): - self.fail("Exception '%s' unexpectedly had label '%s'" % ( - exception, err_label)) + self.fail("Exception '%s' unexpectedly had label '%s'" % (exception, err_label)) if expect_result: if isinstance(exception, BulkWriteError): - result = parse_bulk_write_error_result( - exception) + result = parse_bulk_write_error_result(exception) self.match_evaluator.match_result(expect_result, result) else: - self.fail("expectResult can only be specified with %s " - "exceptions" % (BulkWriteError,)) + self.fail( + "expectResult can only be specified with %s " "exceptions" % (BulkWriteError,) + ) def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail('Operation %s not supported for entity ' - 'of type %s' % (opname, type(target))) + self.fail( + "Operation %s not supported for entity " "of type %s" % (opname, type(target)) + ) def __entityOperation_createChangeStream(self, target, *args, **kwargs): - if client_context.storage_engine == 'mmapv1': + if client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support change streams") - self.__raise_if_unsupported( - 'createChangeStream', target, MongoClient, Database, Collection) + self.__raise_if_unsupported("createChangeStream", target, MongoClient, Database, Collection) stream = target.watch(*args, **kwargs) self.addCleanup(stream.close) return stream def _clientOperation_createChangeStream(self, target, *args, **kwargs): - return self.__entityOperation_createChangeStream( - target, *args, **kwargs) + return self.__entityOperation_createChangeStream(target, *args, **kwargs) def _databaseOperation_createChangeStream(self, target, *args, **kwargs): - return self.__entityOperation_createChangeStream( - target, *args, **kwargs) + return self.__entityOperation_createChangeStream(target, *args, **kwargs) def _collectionOperation_createChangeStream(self, target, *args, **kwargs): - return self.__entityOperation_createChangeStream( - target, *args, **kwargs) + return self.__entityOperation_createChangeStream(target, *args, **kwargs) def _databaseOperation_runCommand(self, target, **kwargs): - self.__raise_if_unsupported('runCommand', target, Database) + self.__raise_if_unsupported("runCommand", target, Database) # Ensure the first key is the command name. - ordered_command = SON([(kwargs.pop('command_name'), 1)]) - ordered_command.update(kwargs['command']) - kwargs['command'] = ordered_command + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command return target.command(**kwargs) def _databaseOperation_listCollections(self, target, *args, **kwargs): - if 'batch_size' in kwargs: - kwargs['cursor'] = {'batchSize': kwargs.pop('batch_size')} + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} cursor = target.list_collections(*args, **kwargs) return list(cursor) def __entityOperation_aggregate(self, target, *args, **kwargs): - self.__raise_if_unsupported('aggregate', target, Database, Collection) + self.__raise_if_unsupported("aggregate", target, Database, Collection) return list(target.aggregate(*args, **kwargs)) def _databaseOperation_aggregate(self, target, *args, **kwargs): @@ -870,86 +877,84 @@ def _collectionOperation_aggregate(self, target, *args, **kwargs): return self.__entityOperation_aggregate(target, *args, **kwargs) def _collectionOperation_find(self, target, *args, **kwargs): - self.__raise_if_unsupported('find', target, Collection) + self.__raise_if_unsupported("find", target, Collection) find_cursor = target.find(*args, **kwargs) return list(find_cursor) def _collectionOperation_createFindCursor(self, target, *args, **kwargs): - self.__raise_if_unsupported('find', target, Collection) - if 'filter' not in kwargs: + self.__raise_if_unsupported("find", target, Collection) + if "filter" not in kwargs: self.fail('createFindCursor requires a "filter" argument') cursor = NonLazyCursor(target.find(*args, **kwargs)) self.addCleanup(cursor.close) return cursor def _collectionOperation_listIndexes(self, target, *args, **kwargs): - if 'batch_size' in kwargs: - self.skipTest('PyMongo does not support batch_size for ' - 'list_indexes') + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for " "list_indexes") return target.list_indexes(*args, **kwargs) def _sessionOperation_withTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == 'mmapv1': - self.skipTest('MMAPv1 does not support document-level locking') - self.__raise_if_unsupported('withTransaction', target, ClientSession) + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("withTransaction", target, ClientSession) return target.with_transaction(*args, **kwargs) def _sessionOperation_startTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == 'mmapv1': - self.skipTest('MMAPv1 does not support document-level locking') - self.__raise_if_unsupported('startTransaction', target, ClientSession) + if client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) - def _changeStreamOperation_iterateUntilDocumentOrError(self, target, - *args, **kwargs): - self.__raise_if_unsupported( - 'iterateUntilDocumentOrError', target, ChangeStream) + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): - self.__raise_if_unsupported( - 'iterateUntilDocumentOrError', target, NonLazyCursor) + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) return next(target) def _cursor_close(self, target, *args, **kwargs): - self.__raise_if_unsupported('close', target, NonLazyCursor) + self.__raise_if_unsupported("close", target, NonLazyCursor) return target.close() def run_entity_operation(self, spec): - target = self.entity_map[spec['object']] - opname = spec['name'] - opargs = spec.get('arguments') - expect_error = spec.get('expectError') - save_as_entity = spec.get('saveResultAsEntity') - expect_result = spec.get('expectResult') - ignore = spec.get('ignoreResultAndError') + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") if ignore and (expect_error or save_as_entity or expect_result): raise ValueError( - 'ignoreResultAndError is incompatible with saveResultAsEntity' - ', expectError, and expectResult') + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) if opargs: arguments = parse_spec_options(copy.deepcopy(opargs)) - prepare_spec_arguments(spec, arguments, camel_to_snake(opname), - self.entity_map, self.run_operations) + prepare_spec_arguments( + spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations + ) else: arguments = tuple() if isinstance(target, MongoClient): - method_name = '_clientOperation_%s' % (opname,) + method_name = "_clientOperation_%s" % (opname,) elif isinstance(target, Database): - method_name = '_databaseOperation_%s' % (opname,) + method_name = "_databaseOperation_%s" % (opname,) elif isinstance(target, Collection): - method_name = '_collectionOperation_%s' % (opname,) + method_name = "_collectionOperation_%s" % (opname,) elif isinstance(target, ChangeStream): - method_name = '_changeStreamOperation_%s' % (opname,) + method_name = "_changeStreamOperation_%s" % (opname,) elif isinstance(target, NonLazyCursor): - method_name = '_cursor_%s' % (opname,) + method_name = "_cursor_%s" % (opname,) elif isinstance(target, ClientSession): - method_name = '_sessionOperation_%s' % (opname,) + method_name = "_sessionOperation_%s" % (opname,) elif isinstance(target, GridFSBucket): raise NotImplementedError else: - method_name = 'doesNotExist' + method_name = "doesNotExist" try: method = getattr(self, method_name) @@ -957,8 +962,7 @@ def run_entity_operation(self, spec): try: cmd = getattr(target, camel_to_snake(opname)) except AttributeError: - self.fail('Unsupported operation %s on entity %s' % ( - opname, target)) + self.fail("Unsupported operation %s on entity %s" % (opname, target)) else: cmd = functools.partial(method, target) @@ -974,8 +978,9 @@ def run_entity_operation(self, spec): raise else: if expect_error: - self.fail('Excepted error %s but "%s" succeeded: %s' % ( - expect_error, opname, result)) + self.fail( + 'Excepted error %s but "%s" succeeded: %s' % (expect_error, opname, result) + ) if expect_result: actual = coerce_result(opname, result) @@ -986,42 +991,43 @@ def run_entity_operation(self, spec): def __set_fail_point(self, client, command_args): if not client_context.test_commands_enabled: - self.skipTest('Test commands must be enabled') + self.skipTest("Test commands must be enabled") - cmd_on = SON([('configureFailPoint', 'failCommand')]) + cmd_on = SON([("configureFailPoint", "failCommand")]) cmd_on.update(command_args) client.admin.command(cmd_on) self.addCleanup( - client.admin.command, - 'configureFailPoint', cmd_on['configureFailPoint'], mode='off') + client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) def _testOperation_failPoint(self, spec): self.__set_fail_point( - client=self.entity_map[spec['client']], - command_args=spec['failPoint']) + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) def _testOperation_targetedFailPoint(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] if not session._pinned_address: - self.fail("Cannot use targetedFailPoint operation with unpinned " - "session %s" % (spec['session'],)) + self.fail( + "Cannot use targetedFailPoint operation with unpinned " + "session %s" % (spec["session"],) + ) - client = single_client('%s:%s' % session._pinned_address) + client = single_client("%s:%s" % session._pinned_address) self.addCleanup(client.close) - self.__set_fail_point( - client=client, command_args=spec['failPoint']) + self.__set_fail_point(client=client, command_args=spec["failPoint"]) def _testOperation_assertSessionTransactionState(self, spec): - session = self.entity_map[spec['session']] - expected_state = getattr(_TxnState, spec['state'].upper()) + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) self.assertEqual(expected_state, session._transaction.state) def _testOperation_assertSessionPinned(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] self.assertIsNotNone(session._transaction.pinned_address) def _testOperation_assertSessionUnpinned(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] self.assertIsNone(session._pinned_address) self.assertIsNone(session._transaction.pinned_address) @@ -1031,61 +1037,61 @@ def __get_last_two_command_lsids(self, listener): if isinstance(event, CommandStartedEvent): cmd_started_events.append(event) if len(cmd_started_events) < 2: - self.fail('Needed 2 CommandStartedEvents to compare lsids, ' - 'got %s' % (len(cmd_started_events))) - return tuple([e.command['lsid'] for e in cmd_started_events][:2]) + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): - listener = self.entity_map.get_listener_for_client(spec['client']) + listener = self.entity_map.get_listener_for_client(spec["client"]) self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): - listener = self.entity_map.get_listener_for_client(spec['client']) + listener = self.entity_map.get_listener_for_client(spec["client"]) self.assertEqual(*self.__get_last_two_command_lsids(listener)) def _testOperation_assertSessionDirty(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] self.assertTrue(session._server_session.dirty) def _testOperation_assertSessionNotDirty(self, spec): - session = self.entity_map[spec['session']] + session = self.entity_map[spec["session"]] return self.assertFalse(session._server_session.dirty) def _testOperation_assertCollectionExists(self, spec): - database_name = spec['databaseName'] - collection_name = spec['collectionName'] - collection_name_list = list( - self.client.get_database(database_name).list_collection_names()) + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) self.assertIn(collection_name, collection_name_list) def _testOperation_assertCollectionNotExists(self, spec): - database_name = spec['databaseName'] - collection_name = spec['collectionName'] - collection_name_list = list( - self.client.get_database(database_name).list_collection_names()) + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list(self.client.get_database(database_name).list_collection_names()) self.assertNotIn(collection_name, collection_name_list) def _testOperation_assertIndexExists(self, spec): - collection = self.client[spec['databaseName']][spec['collectionName']] - index_names = [idx['name'] for idx in collection.list_indexes()] - self.assertIn(spec['indexName'], index_names) + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) def _testOperation_assertIndexNotExists(self, spec): - collection = self.client[spec['databaseName']][spec['collectionName']] + collection = self.client[spec["databaseName"]][spec["collectionName"]] for index in collection.list_indexes(): - self.assertNotEqual(spec['indexName'], index['name']) + self.assertNotEqual(spec["indexName"], index["name"]) def _testOperation_assertNumberConnectionsCheckedOut(self, spec): - client = self.entity_map[spec['client']] + client = self.entity_map[spec["client"]] pool = get_pool(client) - self.assertEqual(spec['connections'], pool.active_sockets) + self.assertEqual(spec["connections"], pool.active_sockets) def _testOperation_loop(self, spec): - failure_key = spec.get('storeFailuresAsEntity') - error_key = spec.get('storeErrorsAsEntity') - successes_key = spec.get('storeSuccessesAsEntity') - iteration_key = spec.get('storeIterationsAsEntity') - iteration_limiter_key = spec.get('numIterations') + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") for i in [failure_key, error_key]: if i: self.entity_map[i] = [] @@ -1114,37 +1120,34 @@ def _testOperation_loop(self, spec): key = error_key or failure_key if not key: raise - self.entity_map[key].append({ - "error": str(exc), - "time": time.time(), - "type": type(exc).__name__ - }) + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) def run_special_operation(self, spec): - opname = spec['name'] - method_name = '_testOperation_%s' % (opname,) + opname = spec["name"] + method_name = "_testOperation_%s" % (opname,) try: method = getattr(self, method_name) except AttributeError: - self.fail('Unsupported special test operation %s' % (opname,)) + self.fail("Unsupported special test operation %s" % (opname,)) else: - method(spec['arguments']) + method(spec["arguments"]) def run_operations(self, spec): for op in spec: - if op['object'] == 'testRunner': + if op["object"] == "testRunner": self.run_special_operation(op) else: self.run_entity_operation(op) - def check_events(self, spec): for event_spec in spec: - client_name = event_spec['client'] - events = event_spec['events'] + client_name = event_spec["client"] + events = event_spec["events"] # Valid types: 'command', 'cmap' - event_type = event_spec.get('eventType', 'command') - assert event_type in ('command', 'cmap') + event_type = event_spec.get("eventType", "command") + assert event_type in ("command", "cmap") listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) @@ -1153,68 +1156,64 @@ def check_events(self, spec): continue if len(events) > len(actual_events): - self.fail('Expected to see %s events, got %s' % ( - len(events), len(actual_events))) + self.fail("Expected to see %s events, got %s" % (len(events), len(actual_events))) for idx, expected_event in enumerate(events): - self.match_evaluator.match_event( - event_type, expected_event, actual_events[idx]) + self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) def verify_outcome(self, spec): for collection_data in spec: - coll_name = collection_data['collectionName'] - db_name = collection_data['databaseName'] - expected_documents = collection_data['documents'] + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] coll = self.client.get_database(db_name).get_collection( coll_name, read_preference=ReadPreference.PRIMARY, - read_concern=ReadConcern(level='local')) + read_concern=ReadConcern(level="local"), + ) if expected_documents: - sorted_expected_documents = sorted( - expected_documents, key=lambda doc: doc['_id']) - actual_documents = list( - coll.find({}, sort=[('_id', ASCENDING)])) - self.assertListEqual(sorted_expected_documents, - actual_documents) + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = list(coll.find({}, sort=[("_id", ASCENDING)])) + self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) # process test-level runOnRequirements - run_on_spec = spec.get('runOnRequirements', []) + run_on_spec = spec.get("runOnRequirements", []) if not self.should_run_on(run_on_spec): - raise unittest.SkipTest('runOnRequirements not satisfied') + raise unittest.SkipTest("runOnRequirements not satisfied") # process skipReason - skip_reason = spec.get('skipReason', None) + skip_reason = spec.get("skipReason", None) if skip_reason is not None: - raise unittest.SkipTest('%s' % (skip_reason,)) + raise unittest.SkipTest("%s" % (skip_reason,)) # process createEntities self.entity_map = EntityMapUtil(self) - self.entity_map.create_entities_from_spec( - self.TEST_SPEC.get('createEntities', []), uri=uri) + self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) # process initialData - self.insert_initial_data(self.TEST_SPEC.get('initialData', [])) + self.insert_initial_data(self.TEST_SPEC.get("initialData", [])) # process operations - self.run_operations(spec['operations']) + self.run_operations(spec["operations"]) # process expectEvents - if 'expectEvents' in spec: - expect_events = spec['expectEvents'] - self.assertTrue(expect_events, 'expectEvents must be non-empty') + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") self.check_events(expect_events) # process outcome - self.verify_outcome(spec.get('outcome', [])) + self.verify_outcome(spec.get("outcome", [])) class UnifiedSpecTestMeta(type): """Metaclass for generating test classes.""" + TEST_SPEC: Any EXPECTED_FAILURES: Any @@ -1224,12 +1223,12 @@ def __init__(cls, *args, **kwargs): def create_test(spec): def test_case(self): self.run_scenario(spec) + return test_case - for test_spec in cls.TEST_SPEC['tests']: - description = test_spec['description'] - test_name = 'test_%s' % (description.strip('. '). - replace(' ', '_').replace('.', '_'),) + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_%s" % (description.strip(". ").replace(" ", "_").replace(".", "_"),) test_method = create_test(copy.deepcopy(test_spec)) test_method.__name__ = str(test_name) @@ -1248,13 +1247,18 @@ def test_case(self): _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { - KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES} + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} -def generate_test_classes(test_path, module=__name__, class_name_prefix='', - expected_failures=[], - bypass_test_generation_errors=False, - **kwargs): +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], + bypass_test_generation_errors=False, + **kwargs +): """Method for generating test classes. Returns a dictionary where keys are the names of test classes and values are the test class objects.""" test_klasses = {} @@ -1263,9 +1267,11 @@ def test_base_class_factory(test_spec): """Utility that creates the base class to use for test generation. This is needed to ensure that cls.TEST_SPEC is appropriately set when the metaclass __init__ is invoked.""" + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec EXPECTED_FAILURES = expected_failures + return SpecTestBase for dirpath, _, filenames in os.walk(test_path): @@ -1277,30 +1283,34 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore # Use tz_aware=False to match how CodecOptions decodes # dates. opts = json_util.JSONOptions(tz_aware=False) - scenario_def = json_util.loads( - scenario_stream.read(), json_options=opts) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) test_type = os.path.splitext(filename)[0] - snake_class_name = 'Test%s_%s_%s' % ( - class_name_prefix, dirname.replace('-', '_'), - test_type.replace('-', '_').replace('.', '_')) + snake_class_name = "Test%s_%s_%s" % ( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) class_name = snake_to_camel(snake_class_name) try: - schema_version = Version.from_string( - scenario_def['schemaVersion']) - mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get( - schema_version[0]) + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) if mixin_class is None: raise ValueError( - "test file '%s' has unsupported schemaVersion '%s'" % ( - fpath, schema_version)) - module_dict = {'__module__': module} + "test file '%s' has unsupported schemaVersion '%s'" + % (fpath, schema_version) + ) + module_dict = {"__module__": module} module_dict.update(kwargs) test_klasses[class_name] = type( class_name, - (mixin_class, test_base_class_factory(scenario_def),), - module_dict) + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) except Exception: if bypass_test_generation_errors: continue diff --git a/test/utils.py b/test/utils.py index b0b0c87c47..2c50797266 100644 --- a/test/utils.py +++ b/test/utils.py @@ -26,16 +26,14 @@ import time import unittest import warnings - from collections import abc, defaultdict from functools import partial +from test import client_context, db_pwd, db_user from bson import json_util from bson.objectid import ObjectId from bson.son import SON - -from pymongo import (MongoClient, - monitoring, operations, read_preferences) +from pymongo import MongoClient, monitoring, operations, read_preferences from pymongo.collection import ReturnDocument from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat @@ -43,16 +41,10 @@ from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import (any_server_selector, - writable_server_selector) +from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.write_concern import WriteConcern from pymongo.uri_parser import parse_uri - -from test import (client_context, - db_user, - db_pwd) - +from pymongo.write_concern import WriteConcern IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) @@ -83,8 +75,7 @@ def matching(self, matcher): def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" - wait_until(lambda: self.event_count(event) >= count, - 'find %s %s event(s)' % (count, event)) + wait_until(lambda: self.event_count(event) >= count, "find %s %s event(s)" % (count, event)) class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): @@ -123,22 +114,21 @@ def pool_closed(self, event): class EventListener(monitoring.CommandListener): - def __init__(self): self.results = defaultdict(list) def started(self, event): - self.results['started'].append(event) + self.results["started"].append(event) def succeeded(self, event): - self.results['succeeded'].append(event) + self.results["succeeded"].append(event) def failed(self, event): - self.results['failed'].append(event) + self.results["failed"].append(event) def started_command_names(self): """Return list of command names started.""" - return [event.command_name for event in self.results['started']] + return [event.command_name for event in self.results["started"]] def reset(self): """Reset the state of this listener.""" @@ -150,13 +140,13 @@ def __init__(self): self.results = defaultdict(list) def closed(self, event): - self.results['closed'].append(event) + self.results["closed"].append(event) def description_changed(self, event): - self.results['description_changed'].append(event) + self.results["description_changed"].append(event) def opened(self, event): - self.results['opened'].append(event) + self.results["opened"].append(event) def reset(self): """Reset the state of this listener.""" @@ -164,7 +154,6 @@ def reset(self): class AllowListEventListener(EventListener): - def __init__(self, *commands): self.commands = set(commands) super(AllowListEventListener, self).__init__() @@ -184,6 +173,7 @@ def failed(self, event): class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" + def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) @@ -221,13 +211,13 @@ def reset(self): self.results = [] -class ServerEventListener(_ServerEventListener, - monitoring.ServerListener): +class ServerEventListener(_ServerEventListener, monitoring.ServerListener): """Listens to Server events.""" -class ServerAndTopologyEventListener(ServerEventListener, # type: ignore - monitoring.TopologyListener): +class ServerAndTopologyEventListener( # type: ignore[misc] + ServerEventListener, monitoring.TopologyListener +): """Listens to Server and Topology events.""" @@ -300,6 +290,7 @@ def remove_stale_sockets(self, *args, **kwargs): class ScenarioDict(dict): """Dict that returns {} for any unknown key, recursively.""" + def __init__(self, data): def convert(v): if isinstance(v, abc.Mapping): @@ -322,6 +313,7 @@ def __getitem__(self, item): class CompareType(object): """Class that compares equal to any object of the given type.""" + def __init__(self, type): self.type = type @@ -335,6 +327,7 @@ def __ne__(self, other): class FunctionCallRecorder(object): """Utility class to wrap a callable and record its invocations.""" + def __init__(self, function): self._function = function self._call_list = [] @@ -359,6 +352,7 @@ def call_count(self): class TestCreator(object): """Class to create test cases from specifications.""" + def __init__(self, create_test, test_class, test_path): """Create a TestCreator object. @@ -372,7 +366,7 @@ def __init__(self, create_test, test_class, test_path): test case. - `test_path`: path to the directory containing the JSON files with the test specifications. - """ + """ self._create_test = create_test self._test_class = test_class self.test_path = test_path @@ -380,67 +374,63 @@ def __init__(self, create_test, test_class, test_path): def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a test case.""" - if 'minServerVersion' in scenario_def: - min_ver = tuple( - int(elt) for - elt in scenario_def['minServerVersion'].split('.')) + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) if min_ver is not None: method = client_context.require_version_min(*min_ver)(method) - if 'maxServerVersion' in scenario_def: - max_ver = tuple( - int(elt) for - elt in scenario_def['maxServerVersion'].split('.')) + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) - if 'serverless' in scenario_def: - serverless = scenario_def['serverless'] + if "serverless" in scenario_def: + serverless = scenario_def["serverless"] if serverless == "require": serverless_satisfied = client_context.serverless elif serverless == "forbid": serverless_satisfied = not client_context.serverless - else: # unset or "allow" + else: # unset or "allow" serverless_satisfied = True method = unittest.skipUnless( - serverless_satisfied, - "Serverless requirement not satisfied")(method) + serverless_satisfied, "Serverless requirement not satisfied" + )(method) return method @staticmethod def valid_topology(run_on_req): return client_context.is_topology_type( - run_on_req.get('topology', ['single', 'replicaset', 'sharded', - 'load-balanced'])) + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) @staticmethod def min_server_version(run_on_req): - version = run_on_req.get('minServerVersion') + version = run_on_req.get("minServerVersion") if version: - min_ver = tuple(int(elt) for elt in version.split('.')) + min_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version >= min_ver return True @staticmethod def max_server_version(run_on_req): - version = run_on_req.get('maxServerVersion') + version = run_on_req.get("maxServerVersion") if version: - max_ver = tuple(int(elt) for elt in version.split('.')) + max_ver = tuple(int(elt) for elt in version.split(".")) return client_context.version <= max_ver return True @staticmethod def valid_auth_enabled(run_on_req): - if 'authEnabled' in run_on_req: - if run_on_req['authEnabled']: + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: return client_context.auth_enabled return not client_context.auth_enabled return True @staticmethod def serverless_ok(run_on_req): - serverless = run_on_req['serverless'] + serverless = run_on_req["serverless"] if serverless == "require": return client_context.serverless elif serverless == "forbid": @@ -449,30 +439,31 @@ def serverless_ok(run_on_req): return True def should_run_on(self, scenario_def): - run_on = scenario_def.get('runOn', []) + run_on = scenario_def.get("runOn", []) if not run_on: # Always run these tests. return True for req in run_on: - if (self.valid_topology(req) and - self.min_server_version(req) and - self.max_server_version(req) and - self.valid_auth_enabled(req) and - self.serverless_ok(req)): + if ( + self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + and self.serverless_ok(req) + ): return True return False def ensure_run_on(self, scenario_def, method): """Test modifier that enforces a 'runOn' on a test case.""" return client_context._require( - lambda: self.should_run_on(scenario_def), - "runOn not satisfied", - method) + lambda: self.should_run_on(scenario_def), "runOn not satisfied", method + ) def tests(self, scenario_def): """Allow CMAP spec test to override the location of test.""" - return scenario_def['tests'] + return scenario_def["tests"] def create_tests(self): for dirpath, _, filenames in os.walk(self.test_path): @@ -484,25 +475,22 @@ def create_tests(self): # dates. opts = json_util.JSONOptions(tz_aware=False) scenario_def = ScenarioDict( - json_util.loads(scenario_stream.read(), - json_options=opts)) + json_util.loads(scenario_stream.read(), json_options=opts) + ) test_type = os.path.splitext(filename)[0] # Construct test from scenario. for test_def in self.tests(scenario_def): - test_name = 'test_%s_%s_%s' % ( + test_name = "test_%s_%s_%s" % ( dirname, - test_type.replace("-", "_").replace('.', '_'), - str(test_def['description'].replace(" ", "_").replace( - '.', '_'))) + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) - new_test = self._create_test( - scenario_def, test_def, test_name) - new_test = self._ensure_min_max_server_version( - scenario_def, new_test) - new_test = self.ensure_run_on( - scenario_def, new_test) + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) new_test.__name__ = test_name setattr(self._test_class, new_test.__name__, new_test) @@ -514,35 +502,36 @@ def _connection_string(h): return "mongodb://%s" % (str(h),) -def _mongo_client(host, port, authenticate=True, directConnection=None, - **kwargs): +def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): """Create a new client over SSL/TLS if necessary.""" host = host or client_context.host port = port or client_context.port client_options: dict = client_context.default_client_options.copy() if client_context.replica_set_name and not directConnection: - client_options['replicaSet'] = client_context.replica_set_name + client_options["replicaSet"] = client_context.replica_set_name if directConnection is not None: - client_options['directConnection'] = directConnection + client_options["directConnection"] = directConnection client_options.update(kwargs) uri = _connection_string(host) if client_context.auth_enabled and authenticate: # Only add the default username or password if one is not provided. res = parse_uri(uri) - if (not res['username'] and not res['password'] and - 'username' not in client_options and - 'password' not in client_options): - client_options['username'] = db_user - client_options['password'] = db_pwd + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd return MongoClient(uri, port, **client_options) def single_client_noauth(h=None, p=None, **kwargs): """Make a direct connection. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, - directConnection=True, **kwargs) + return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) def single_client(h=None, p=None, **kwargs): @@ -585,17 +574,16 @@ def ensure_all_connected(client): that are configured on the client. """ hello = client.admin.command(HelloCompat.LEGACY_CMD) - if 'setName' not in hello: + if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") - target_host_list = set(hello['hosts']) - connected_host_list = set([hello['me']]) - admindb = client.get_database('admin') + target_host_list = set(hello["hosts"]) + connected_host_list = set([hello["me"]]) + admindb = client.get_database("admin") # Run hello until we have connected to each host at least once. while connected_host_list != target_host_list: - hello = admindb.command(HelloCompat.LEGACY_CMD, - read_preference=ReadPreference.SECONDARY) + hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) connected_host_list.update([hello["me"]]) @@ -612,19 +600,19 @@ def oid_generated_on_process(oid): def delay(sec): - return '''function() { sleep(%f * 1000); return true; }''' % sec + return """function() { sleep(%f * 1000); return true; }""" % sec def get_command_line(client): - command_line = client.admin.command('getCmdLineOpts') - assert command_line['ok'] == 1, "getCmdLineOpts() failed" + command_line = client.admin.command("getCmdLineOpts") + assert command_line["ok"] == 1, "getCmdLineOpts() failed" return command_line def camel_to_snake(camel): # Regex to convert CamelCase to snake_case. - snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower() + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() def camel_to_upper_camel(camel): @@ -640,21 +628,18 @@ def camel_to_snake_args(arguments): def snake_to_camel(snake): # Regex to convert snake_case to lowerCamelCase. - return re.sub(r'_([a-z])', lambda m: m.group(1).upper(), snake) + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) def parse_collection_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) return opts @@ -666,11 +651,11 @@ def server_started_with_option(client, cmdline_opt, config_opt): - `config_opt`: The config file option (i.e. nojournal) """ command_line = get_command_line(client) - if 'parsed' in command_line: - parsed = command_line['parsed'] + if "parsed" in command_line: + parsed = command_line["parsed"] if config_opt in parsed: return parsed[config_opt] - argv = command_line['argv'] + argv = command_line["argv"] return cmdline_opt in argv @@ -678,39 +663,37 @@ def server_started_with_auth(client): try: command_line = get_command_line(client) except OperationFailure as e: - msg = e.details.get('errmsg', '') # type: ignore - if e.code == 13 or 'unauthorized' in msg or 'login' in msg: + msg = e.details.get("errmsg", "") # type: ignore + if e.code == 13 or "unauthorized" in msg or "login" in msg: # Unauthorized. return True raise # MongoDB >= 2.0 - if 'parsed' in command_line: - parsed = command_line['parsed'] + if "parsed" in command_line: + parsed = command_line["parsed"] # MongoDB >= 2.6 - if 'security' in parsed: - security = parsed['security'] + if "security" in parsed: + security = parsed["security"] # >= rc3 - if 'authorization' in security: - return security['authorization'] == 'enabled' + if "authorization" in security: + return security["authorization"] == "enabled" # < rc3 - return security.get('auth', False) or bool(security.get('keyFile')) - return parsed.get('auth', False) or bool(parsed.get('keyFile')) + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) # Legacy - argv = command_line['argv'] - return '--auth' in argv or '--keyFile' in argv + argv = command_line["argv"] + return "--auth" in argv or "--keyFile" in argv def drop_collections(db): # Drop all non-system collections in this database. - for coll in db.list_collection_names( - filter={"name": {"$regex": r"^(?!system\.)"}}): + for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): db.drop_collection(coll) def remove_all_users(db): - db.command("dropAllUsersFromDatabase", 1, - writeConcern={"w": client_context.w}) + db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) def joinall(threads): @@ -726,7 +709,7 @@ def connected(client): # Ignore warning that ping is always routed to primary even # if client's read preference isn't PRIMARY. warnings.simplefilter("ignore", UserWarning) - client.admin.command('ping') # Force connection. + client.admin.command("ping") # Force connection. return client @@ -745,7 +728,7 @@ def wait_until(predicate, success_description, timeout=10): Returns the predicate's first true value. """ start = time.time() - interval = min(float(timeout)/100, 0.1) + interval = min(float(timeout) / 100, 0.1) while True: retval = predicate() if retval: @@ -759,17 +742,17 @@ def wait_until(predicate, success_description, timeout=10): def repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" - cmd = SON([('replSetStepDown', 1)]) + cmd = SON([("replSetStepDown", 1)]) cmd.update(kwargs) # Unfreeze a secondary to ensure a speedy election. - client.admin.command( - 'replSetFreeze', 0, read_preference=ReadPreference.SECONDARY) + client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) client.admin.command(cmd) + def is_mongos(client): res = client.admin.command(HelloCompat.LEGACY_CMD) - return res.get('msg', '') == 'isdbgrid' + return res.get("msg", "") == "isdbgrid" def assertRaisesExactly(cls, fn, *args, **kwargs): @@ -781,8 +764,7 @@ def assertRaisesExactly(cls, fn, *args, **kwargs): try: fn(*args, **kwargs) except Exception as e: - assert e.__class__ == cls, "got %s, expected %s" % ( - e.__class__.__name__, cls.__name__) + assert e.__class__ == cls, "got %s, expected %s" % (e.__class__.__name__, cls.__name__) else: raise AssertionError("%s not raised" % cls) @@ -797,6 +779,7 @@ def _ignore_deprecations(): def ignore_deprecations(wrapped=None): """A context manager or a decorator.""" if wrapped: + @functools.wraps(wrapped) def wrapper(*args, **kwargs): with _ignore_deprecations(): @@ -809,7 +792,6 @@ def wrapper(*args, **kwargs): class DeprecationFilter(object): - def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() @@ -831,9 +813,7 @@ def get_pool(client): def get_pools(client): """Get all pools.""" - return [ - server.pool for server in - client._get_topology().select_servers(any_server_selector)] + return [server.pool for server in client._get_topology().select_servers(any_server_selector)] # Constants for run_threads and lazy_client_trial. @@ -900,7 +880,9 @@ def gevent_monkey_patched(): warnings.simplefilter("ignore", ImportWarning) try: import socket + import gevent.socket + return socket.socket is gevent.socket.socket except ImportError: return False @@ -909,8 +891,8 @@ def gevent_monkey_patched(): def eventlet_monkey_patched(): """Check if eventlet's monkey patching is active.""" import threading - return (threading.current_thread.__module__ == - 'eventlet.green.threading') + + return threading.current_thread.__module__ == "eventlet.green.threading" def is_greenthread_patched(): @@ -921,20 +903,19 @@ def disable_replication(client): """Disable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) - secondary.admin.command('configureFailPoint', 'stopReplProducer', - mode='alwaysOn') + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") def enable_replication(client): """Enable replication on all secondaries.""" for host, port in client.secondaries: secondary = single_client(host, port) - secondary.admin.command('configureFailPoint', 'stopReplProducer', - mode='off') + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" + def __init__(self, *args, **kwargs): self.exc = None super(ExceptionCatchingThread, self).__init__(*args, **kwargs) @@ -949,13 +930,14 @@ def run(self): def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. - mode_string = pref.get('mode', 'primary') + mode_string = pref.get("mode", "primary") mode_string = mode_string[:1].lower() + mode_string[1:] mode = read_preferences.read_pref_mode_from_name(mode_string) - max_staleness = pref.get('maxStalenessSeconds', -1) - tag_sets = pref.get('tag_sets') + max_staleness = pref.get("maxStalenessSeconds", -1) + tag_sets = pref.get("tag_sets") return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness) + mode, tag_sets=tag_sets, max_staleness=max_staleness + ) def server_name_to_type(name): @@ -963,16 +945,16 @@ def server_name_to_type(name): # Special case, some tests in the spec include the PossiblePrimary # type, but only single-threaded drivers need that type. We call # possible primaries Unknown. - if name == 'PossiblePrimary': + if name == "PossiblePrimary": return SERVER_TYPE.Unknown return getattr(SERVER_TYPE, name) def cat_files(dest, *sources): """Cat multiple files into dest.""" - with open(dest, 'wb') as fdst: + with open(dest, "wb") as fdst: for src in sources: - with open(src, 'rb') as fsrc: + with open(src, "rb") as fsrc: shutil.copyfileobj(fsrc, fdst) @@ -982,65 +964,61 @@ def assertion_context(msg): try: yield except AssertionError as exc: - msg = '%s (%s)' % (exc, msg) + msg = "%s (%s)" % (exc, msg) exc_type, exc_val, exc_tb = sys.exc_info() assert exc_type is not None raise exc_type(exc_val).with_traceback(exc_tb) def parse_spec_options(opts): - if 'readPreference' in opts: - opts['read_preference'] = parse_read_preference( - opts.pop('readPreference')) + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - if 'writeConcern' in opts: - opts['write_concern'] = WriteConcern( - **dict(opts.pop('writeConcern'))) + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - if 'readConcern' in opts: - opts['read_concern'] = ReadConcern( - **dict(opts.pop('readConcern'))) + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - if 'maxTimeMS' in opts: - opts['max_time_ms'] = opts.pop('maxTimeMS') + if "maxTimeMS" in opts: + opts["max_time_ms"] = opts.pop("maxTimeMS") - if 'maxCommitTimeMS' in opts: - opts['max_commit_time_ms'] = opts.pop('maxCommitTimeMS') + if "maxCommitTimeMS" in opts: + opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - if 'hint' in opts: - hint = opts.pop('hint') + if "hint" in opts: + hint = opts.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) - opts['hint'] = hint + opts["hint"] = hint # Properly format 'hint' arguments for the Bulk API tests. - if 'requests' in opts: - reqs = opts.pop('requests') + if "requests" in opts: + reqs = opts.pop("requests") for req in reqs: - if 'name' in req: + if "name" in req: # CRUD v2 format - args = req.pop('arguments', {}) - if 'hint' in args: - hint = args.pop('hint') + args = req.pop("arguments", {}) + if "hint" in args: + hint = args.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) - args['hint'] = hint - req['arguments'] = args + args["hint"] = hint + req["arguments"] = args else: # Unified test format bulk_model, spec = next(iter(req.items())) - if 'hint' in spec: - hint = spec.pop('hint') + if "hint" in spec: + hint = spec.pop("hint") if not isinstance(hint, str): hint = list(hint.items()) - spec['hint'] = hint - opts['requests'] = reqs + spec["hint"] = hint + opts["requests"] = reqs return dict(opts) -def prepare_spec_arguments(spec, arguments, opname, entity_map, - with_txn_callback): +def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) # PyMongo accepts sort as list of tuples. @@ -1051,8 +1029,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) # Aggregate uses "batchSize", while find uses batch_size. - elif ((arg_name == "batchSize" or arg_name == "allowDiskUse") and - opname == "aggregate"): + elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": continue # Requires boolean returnDocument. elif arg_name == "returnDocument": @@ -1061,7 +1038,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, # Parse each request into a bulk write model. requests = [] for request in arguments["requests"]: - if 'name' in request: + if "name" in request: # CRUD v2 format bulk_model = camel_to_upper_camel(request["name"]) bulk_class = getattr(operations, bulk_model) @@ -1074,39 +1051,37 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, requests.append(bulk_class(**dict(bulk_arguments))) arguments["requests"] = requests elif arg_name == "session": - arguments['session'] = entity_map[arguments['session']] - elif (opname in ('command', 'run_admin_command') and - arg_name == 'command'): + arguments["session"] = entity_map[arguments["session"]] + elif opname in ("command", "run_admin_command") and arg_name == "command": # Ensure the first key is the command name. - ordered_command = SON([(spec['command_name'], 1)]) - ordered_command.update(arguments['command']) - arguments['command'] = ordered_command - elif opname == 'open_download_stream' and arg_name == 'id': - arguments['file_id'] = arguments.pop(arg_name) - elif opname != 'find' and c2s == 'max_time_ms': + ordered_command = SON([(spec["command_name"], 1)]) + ordered_command.update(arguments["command"]) + arguments["command"] = ordered_command + elif opname == "open_download_stream" and arg_name == "id": + arguments["file_id"] = arguments.pop(arg_name) + elif opname != "find" and c2s == "max_time_ms": # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. - arguments['maxTimeMS'] = arguments.pop('max_time_ms') - elif opname == 'with_transaction' and arg_name == 'callback': - if 'operations' in arguments[arg_name]: + arguments["maxTimeMS"] = arguments.pop("max_time_ms") + elif opname == "with_transaction" and arg_name == "callback": + if "operations" in arguments[arg_name]: # CRUD v2 format - callback_ops = arguments[arg_name]['operations'] + callback_ops = arguments[arg_name]["operations"] else: # Unified test format callback_ops = arguments[arg_name] - arguments['callback'] = lambda _: with_txn_callback( - copy.deepcopy(callback_ops)) - elif opname == 'drop_collection' and arg_name == 'collection': - arguments['name_or_collection'] = arguments.pop(arg_name) - elif opname == 'create_collection': - if arg_name == 'collection': - arguments['name'] = arguments.pop(arg_name) + arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) + elif opname == "drop_collection" and arg_name == "collection": + arguments["name_or_collection"] = arguments.pop(arg_name) + elif opname == "create_collection": + if arg_name == "collection": + arguments["name"] = arguments.pop(arg_name) # Any other arguments to create_collection are passed through # **kwargs. - elif opname == 'create_index' and arg_name == 'keys': - arguments['keys'] = list(arguments.pop(arg_name).items()) - elif opname == 'drop_index' and arg_name == 'name': - arguments['index_or_name'] = arguments.pop(arg_name) + elif opname == "create_index" and arg_name == "keys": + arguments["keys"] = list(arguments.pop(arg_name).items()) + elif opname == "drop_index" and arg_name == "name": + arguments["index_or_name"] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 76125b6f15..e693fc25f0 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -20,37 +20,37 @@ sys.path[0:0] = [""] +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import MockPool, parse_read_preference + from bson import json_util -from pymongo.common import clean_node, HEARTBEAT_FREQUENCY +from pymongo.common import HEARTBEAT_FREQUENCY, clean_node from pymongo.errors import AutoReconnect, ConfigurationError from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription -from pymongo.settings import TopologySettings from pymongo.server_selectors import writable_server_selector +from pymongo.settings import TopologySettings from pymongo.topology import Topology -from test import unittest -from test.utils import MockPool, parse_read_preference -from test.pymongo_mocks import DummyMonitor def get_addresses(server_list): seeds = [] hosts = [] for server in server_list: - seeds.append(clean_node(server['address'])) - hosts.append(server['address']) + seeds.append(clean_node(server["address"])) + hosts.append(server["address"]) return seeds, hosts def make_last_write_date(server): epoch = datetime.datetime.utcfromtimestamp(0) - millis = server.get('lastWrite', {}).get('lastWriteDate') + millis = server.get("lastWrite", {}).get("lastWriteDate") if millis: diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) / 1000 micros = diff * 1000 - return epoch + datetime.timedelta( - seconds=seconds, microseconds=micros) + return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) else: # "Unknown" server. return epoch @@ -58,61 +58,59 @@ def make_last_write_date(server): def make_server_description(server, hosts): """Make a ServerDescription from server info in a JSON test.""" - server_type = server['type'] + server_type = server["type"] if server_type in ("Unknown", "PossiblePrimary"): - return ServerDescription(clean_node(server['address']), Hello({})) + return ServerDescription(clean_node(server["address"]), Hello({})) - hello_response = {'ok': True, 'hosts': hosts} + hello_response = {"ok": True, "hosts": hosts} if server_type not in ("Standalone", "Mongos", "RSGhost"): - hello_response['setName'] = "rs" + hello_response["setName"] = "rs" if server_type == "RSPrimary": hello_response[HelloCompat.LEGACY_CMD] = True elif server_type == "RSSecondary": - hello_response['secondary'] = True + hello_response["secondary"] = True elif server_type == "Mongos": - hello_response['msg'] = 'isdbgrid' + hello_response["msg"] = "isdbgrid" elif server_type == "RSGhost": - hello_response['isreplicaset'] = True + hello_response["isreplicaset"] = True elif server_type == "RSArbiter": - hello_response['arbiterOnly'] = True + hello_response["arbiterOnly"] = True - hello_response['lastWrite'] = { - 'lastWriteDate': make_last_write_date(server) - } + hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} - for field in 'maxWireVersion', 'tags', 'idleWritePeriodMillis': + for field in "maxWireVersion", "tags", "idleWritePeriodMillis": if field in server: hello_response[field] = server[field] - hello_response.setdefault('maxWireVersion', 6) + hello_response.setdefault("maxWireVersion", 6) # Sets _last_update_time to now. - sd = ServerDescription(clean_node(server['address']), - Hello(hello_response), - round_trip_time=server['avg_rtt_ms'] / 1000.0) + sd = ServerDescription( + clean_node(server["address"]), + Hello(hello_response), + round_trip_time=server["avg_rtt_ms"] / 1000.0, + ) - if 'lastUpdateTime' in server: - sd._last_update_time = server['lastUpdateTime'] / 1000.0 # ms to sec. + if "lastUpdateTime" in server: + sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. return sd def get_topology_type_name(scenario_def): - td = scenario_def['topology_description'] - name = td['type'] - if name == 'Unknown': + td = scenario_def["topology_description"] + name = td["type"] + if name == "Unknown": # PyMongo never starts a topology in type Unknown. - return 'Sharded' if len(td['servers']) > 1 else 'Single' + return "Sharded" if len(td["servers"]) > 1 else "Single" else: return name def get_topology_settings_dict(**kwargs): settings = dict( - monitor_class=DummyMonitor, - heartbeat_frequency=HEARTBEAT_FREQUENCY, - pool_class=MockPool + monitor_class=DummyMonitor, heartbeat_frequency=HEARTBEAT_FREQUENCY, pool_class=MockPool ) settings.update(kwargs) return settings @@ -120,25 +118,20 @@ def get_topology_settings_dict(**kwargs): def create_topology(scenario_def, **kwargs): # Initialize topologies. - if 'heartbeatFrequencyMS' in scenario_def: - frequency = int(scenario_def['heartbeatFrequencyMS']) / 1000.0 + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 else: frequency = HEARTBEAT_FREQUENCY - seeds, hosts = get_addresses( - scenario_def['topology_description']['servers']) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) topology_type = get_topology_type_name(scenario_def) - if topology_type == 'LoadBalanced': - kwargs.setdefault('load_balanced', True) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) # Force topology description to ReplicaSet - elif topology_type in ['ReplicaSetNoPrimary', 'ReplicaSetWithPrimary']: - kwargs.setdefault('replica_set_name', 'rs') - settings = get_topology_settings_dict( - heartbeat_frequency=frequency, - seeds=seeds, - **kwargs - ) + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode @@ -147,21 +140,21 @@ def create_topology(scenario_def, **kwargs): topology.open() # Update topologies with server descriptions. - for server in scenario_def['topology_description']['servers']: + for server in scenario_def["topology_description"]["servers"]: server_description = make_server_description(server, hosts) topology.on_change(server_description) # Assert that descriptions match - assert (scenario_def['topology_description']['type'] == - topology.description.topology_type_name), topology.description.topology_type_name + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name return topology def create_test(scenario_def): def run_scenario(self): - _, hosts = get_addresses( - scenario_def['topology_description']['servers']) + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) # "Eligible servers" is defined in the server selection spec as # the set of servers matching both the ReadPreference's mode # and tag sets. @@ -170,16 +163,15 @@ def run_scenario(self): # "In latency window" is defined in the server selection # spec as the subset of suitable_servers that falls within the # allowable latency window. - top_suitable = create_topology( - scenario_def, local_threshold_ms=1000000) + top_suitable = create_topology(scenario_def, local_threshold_ms=1000000) # Create server selector. if scenario_def.get("operation") == "write": pref = writable_server_selector else: # Make first letter lowercase to match read_pref's modes. - pref_def = scenario_def['read_preference'] - if scenario_def.get('error'): + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) @@ -189,35 +181,33 @@ def run_scenario(self): pref = parse_read_preference(pref_def) # Select servers. - if not scenario_def.get('suitable_servers'): + if not scenario_def.get("suitable_servers"): with self.assertRaises(AutoReconnect): top_suitable.select_server(pref, server_selection_timeout=0) return - if not scenario_def['in_latency_window']: + if not scenario_def["in_latency_window"]: with self.assertRaises(AutoReconnect): top_latency.select_server(pref, server_selection_timeout=0) return - actual_suitable_s = top_suitable.select_servers( - pref, server_selection_timeout=0) - actual_latency_s = top_latency.select_servers( - pref, server_selection_timeout=0) + actual_suitable_s = top_suitable.select_servers(pref, server_selection_timeout=0) + actual_latency_s = top_latency.select_servers(pref, server_selection_timeout=0) expected_suitable_servers = {} - for server in scenario_def['suitable_servers']: + for server in scenario_def["suitable_servers"]: server_description = make_server_description(server, hosts) - expected_suitable_servers[server['address']] = server_description + expected_suitable_servers[server["address"]] = server_description actual_suitable_servers = {} for s in actual_suitable_s: - actual_suitable_servers["%s:%d" % (s.description.address[0], - s.description.address[1])] = s.description + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description - self.assertEqual(len(actual_suitable_servers), - len(expected_suitable_servers)) + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) for k, actual in actual_suitable_servers.items(): expected = expected_suitable_servers[k] self.assertEqual(expected.address, actual.address) @@ -227,18 +217,17 @@ def run_scenario(self): self.assertEqual(expected.all_hosts, actual.all_hosts) expected_latency_servers = {} - for server in scenario_def['in_latency_window']: + for server in scenario_def["in_latency_window"]: server_description = make_server_description(server, hosts) - expected_latency_servers[server['address']] = server_description + expected_latency_servers[server["address"]] = server_description actual_latency_servers = {} for s in actual_latency_s: - actual_latency_servers["%s:%d" % - (s.description.address[0], - s.description.address[1])] = s.description + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description - self.assertEqual(len(actual_latency_servers), - len(expected_latency_servers)) + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) for k, actual in actual_latency_servers.items(): expected = expected_latency_servers[k] self.assertEqual(expected.address, actual.address) @@ -256,7 +245,7 @@ class TestAllScenarios(unittest.TestCase): for dirpath, _, filenames in os.walk(test_dir): dirname = os.path.split(dirpath) - dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1] + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] for filename in filenames: if os.path.splitext(filename)[1] != ".json": @@ -266,8 +255,7 @@ class TestAllScenarios(unittest.TestCase): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = 'test_%s_%s' % ( - dirname, os.path.splitext(filename)[0]) + test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 8a53a365db..4a71fef328 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -16,41 +16,36 @@ import functools import threading - from collections import abc +from test import IntegrationTest, client_context, client_knobs +from test.utils import ( + CMAPListener, + CompareType, + EventListener, + OvertCommandListener, + ServerAndTopologyEventListener, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + rs_client, +) from typing import List from bson import decode, encode from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON - from gridfs import GridFSBucket - from pymongo import client_session from pymongo.command_cursor import CommandCursor from pymongo.cursor import Cursor -from pymongo.errors import (BulkWriteError, - OperationFailure, - PyMongoError) +from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.results import _WriteResult, BulkWriteResult +from pymongo.results import BulkWriteResult, _WriteResult from pymongo.write_concern import WriteConcern -from test import (client_context, - client_knobs, - IntegrationTest) -from test.utils import (EventListener, camel_to_snake, - camel_to_snake_args, - CompareType, - CMAPListener, - OvertCommandListener, - parse_spec_options, - prepare_spec_arguments, - rs_client, - ServerAndTopologyEventListener) - class SpecRunnerThread(threading.Thread): def __init__(self, name): @@ -74,7 +69,7 @@ def stop(self): def run(self): while not self.stopped or self.ops: - if not self. ops: + if not self.ops: with self.cond: self.cond.wait(10) if self.ops: @@ -97,8 +92,7 @@ def setUpClass(cls): cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, - min_heartbeat_interval=0.1) + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @classmethod @@ -115,7 +109,7 @@ def setUp(self): self.maxDiff = None def _set_fail_point(self, client, command_args): - cmd = SON([('configureFailPoint', 'failCommand')]) + cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) client.admin.command(cmd) @@ -132,7 +126,7 @@ def targeted_fail_point(self, session, fail_point): clients = {c.address: c for c in self.mongos_clients} client = clients[session._pinned_address] self._set_fail_point(client, fail_point) - self.addCleanup(self.set_fail_point, {'mode': 'off'}) + self.addCleanup(self.set_fail_point, {"mode": "off"}) def assert_session_pinned(self, session): """Run the assertSessionPinned test operation. @@ -162,12 +156,12 @@ def assert_collection_not_exists(self, database, collection): def assert_index_exists(self, database, collection, index): """Run the assertIndexExists test operation.""" coll = self.client[database][collection] - self.assertIn(index, [doc['name'] for doc in coll.list_indexes()]) + self.assertIn(index, [doc["name"] for doc in coll.list_indexes()]) def assert_index_not_exists(self, database, collection, index): """Run the assertIndexNotExists test operation.""" coll = self.client[database][collection] - self.assertNotIn(index, [doc['name'] for doc in coll.list_indexes()]) + self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()]) def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] @@ -176,14 +170,14 @@ def assertErrorLabelsContain(self, exc, expected_labels): def assertErrorLabelsOmit(self, exc, omit_labels): for label in omit_labels: self.assertFalse( - exc.has_error_label(label), - msg='error labels should not contain %s' % (label,)) + exc.has_error_label(label), msg="error labels should not contain %s" % (label,) + ) def kill_all_sessions(self): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: try: - client.admin.command('killAllSessions', []) + client.admin.command("killAllSessions", []) except OperationFailure: # "operation was interrupted" by killing the command's # own session. @@ -205,8 +199,7 @@ def check_result(self, expected_result, result): for res in expected_result: prop = camel_to_snake(res) # SPEC-869: Only BulkWriteResult has upserted_count. - if (prop == "upserted_count" - and not isinstance(result, BulkWriteResult)): + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: upserted_count = 1 else: @@ -215,8 +208,7 @@ def check_result(self, expected_result, result): elif prop == "inserted_ids": # BulkWriteResult does not have inserted_ids. if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), - result.inserted_count) + self.assertEqual(len(expected_result[res]), result.inserted_count) else: # InsertManyResult may be compared to [id1] from the # crud spec or {"0": id1} from the retryable write spec. @@ -233,8 +225,7 @@ def check_result(self, expected_result, result): expected_ids[int(str_index)] = ids[str_index] self.assertEqual(expected_ids, result.upserted_ids, prop) else: - self.assertEqual( - getattr(result, prop), expected_result[res], prop) + self.assertEqual(getattr(result, prop), expected_result[res], prop) return True else: @@ -245,7 +236,7 @@ def get_object_name(self, op): Transaction spec says 'object' is required. """ - return op['object'] + return op["object"] @staticmethod def parse_options(opts): @@ -253,54 +244,54 @@ def parse_options(opts): def run_operation(self, sessions, collection, operation): original_collection = collection - name = camel_to_snake(operation['name']) - if name == 'run_command': - name = 'command' - elif name == 'download_by_name': - name = 'open_download_stream_by_name' - elif name == 'download': - name = 'open_download_stream' - elif name == 'map_reduce': - self.skipTest('PyMongo does not support mapReduce') - elif name == 'count': - self.skipTest('PyMongo does not support count') + name = camel_to_snake(operation["name"]) + if name == "run_command": + name = "command" + elif name == "download_by_name": + name = "open_download_stream_by_name" + elif name == "download": + name = "open_download_stream" + elif name == "map_reduce": + self.skipTest("PyMongo does not support mapReduce") + elif name == "count": + self.skipTest("PyMongo does not support count") database = collection.database collection = database.get_collection(collection.name) - if 'collectionOptions' in operation: + if "collectionOptions" in operation: collection = collection.with_options( - **self.parse_options(operation['collectionOptions'])) + **self.parse_options(operation["collectionOptions"]) + ) object_name = self.get_object_name(operation) - if object_name == 'gridfsbucket': + if object_name == "gridfsbucket": # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). obj = GridFSBucket(database, bucket_name=collection.name) else: objects = { - 'client': database.client, - 'database': database, - 'collection': collection, - 'testRunner': self + "client": database.client, + "database": database, + "collection": collection, + "testRunner": self, } objects.update(sessions) obj = objects[object_name] # Combine arguments with options and handle special cases. - arguments = operation.get('arguments', {}) + arguments = operation.get("arguments", {}) arguments.update(arguments.pop("options", {})) self.parse_options(arguments) cmd = getattr(obj, name) with_txn_callback = functools.partial( - self.run_operations, sessions, original_collection, - in_with_transaction=True) - prepare_spec_arguments(operation, arguments, name, sessions, - with_txn_callback) + self.run_operations, sessions, original_collection, in_with_transaction=True + ) + prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) - if name == 'run_on_thread': - args = {'sessions': sessions, 'collection': collection} + if name == "run_on_thread": + args = {"sessions": sessions, "collection": collection} args.update(arguments) arguments = args result = cmd(**dict(arguments)) @@ -313,10 +304,10 @@ def run_operation(self, sessions, collection, operation): if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: # Read from the primary to ensure causal consistency. out = collection.database.get_collection( - arguments["pipeline"][-1]["$out"], - read_preference=ReadPreference.PRIMARY) + arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY + ) return out.find() - if 'download' in name: + if "download" in name: result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): @@ -329,10 +320,9 @@ def allowable_errors(self, op): return (PyMongoError,) def _run_op(self, sessions, collection, op, in_with_transaction): - expected_result = op.get('result') + expected_result = op.get("result") if expect_error(op): - with self.assertRaises(self.allowable_errors(op), - msg=op['name']) as context: + with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: self.run_operation(sessions, collection, op.copy()) if expect_error_message(expected_result): @@ -340,19 +330,17 @@ def _run_op(self, sessions, collection, op, in_with_transaction): errmsg = str(context.exception.details).lower() else: errmsg = str(context.exception).lower() - self.assertIn(expected_result['errorContains'].lower(), - errmsg) + self.assertIn(expected_result["errorContains"].lower(), errmsg) if expect_error_code(expected_result): - self.assertEqual(expected_result['errorCodeName'], - context.exception.details.get('codeName')) + self.assertEqual( + expected_result["errorCodeName"], context.exception.details.get("codeName") + ) if expect_error_labels_contain(expected_result): self.assertErrorLabelsContain( - context.exception, - expected_result['errorLabelsContain']) + context.exception, expected_result["errorLabelsContain"] + ) if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit( - context.exception, - expected_result['errorLabelsOmit']) + self.assertErrorLabelsOmit(context.exception, expected_result["errorLabelsOmit"]) # Reraise the exception if we're in the with_transaction # callback. @@ -360,65 +348,61 @@ def _run_op(self, sessions, collection, op, in_with_transaction): raise context.exception else: result = self.run_operation(sessions, collection, op.copy()) - if 'result' in op: - if op['name'] == 'runCommand': + if "result" in op: + if op["name"] == "runCommand": self.check_command_result(expected_result, result) else: self.check_result(expected_result, result) - def run_operations(self, sessions, collection, ops, - in_with_transaction=False): + def run_operations(self, sessions, collection, ops, in_with_transaction=False): for op in ops: self._run_op(sessions, collection, op, in_with_transaction) # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): res = listener.results - if not len(test['expectations']): + if not len(test["expectations"]): return # Give a nicer message when there are missing or extra events - cmds = decode_raw([event.command for event in res['started']]) - self.assertEqual( - len(res['started']), len(test['expectations']), cmds) - for i, expectation in enumerate(test['expectations']): + cmds = decode_raw([event.command for event in res["started"]]) + self.assertEqual(len(res["started"]), len(test["expectations"]), cmds) + for i, expectation in enumerate(test["expectations"]): event_type = next(iter(expectation)) - event = res['started'][i] + event = res["started"][i] # The tests substitute 42 for any number other than 0. - if (event.command_name == 'getMore' - and event.command['getMore']): - event.command['getMore'] = Int64(42) - elif event.command_name == 'killCursors': - event.command['cursors'] = [Int64(42)] - elif event.command_name == 'update': + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = Int64(42) + elif event.command_name == "killCursors": + event.command["cursors"] = [Int64(42)] + elif event.command_name == "update": # TODO: remove this once PYTHON-1744 is done. # Add upsert and multi fields back into expectations. - updates = expectation[event_type]['command']['updates'] + updates = expectation[event_type]["command"]["updates"] for update in updates: - update.setdefault('upsert', False) - update.setdefault('multi', False) + update.setdefault("upsert", False) + update.setdefault("multi", False) # Replace afterClusterTime: 42 with actual afterClusterTime. - expected_cmd = expectation[event_type]['command'] - expected_read_concern = expected_cmd.get('readConcern') + expected_cmd = expectation[event_type]["command"] + expected_read_concern = expected_cmd.get("readConcern") if expected_read_concern is not None: - time = expected_read_concern.get('afterClusterTime') + time = expected_read_concern.get("afterClusterTime") if time == 42: - actual_time = event.command.get( - 'readConcern', {}).get('afterClusterTime') + actual_time = event.command.get("readConcern", {}).get("afterClusterTime") if actual_time is not None: - expected_read_concern['afterClusterTime'] = actual_time + expected_read_concern["afterClusterTime"] = actual_time - recovery_token = expected_cmd.get('recoveryToken') + recovery_token = expected_cmd.get("recoveryToken") if recovery_token == 42: - expected_cmd['recoveryToken'] = CompareType(dict) + expected_cmd["recoveryToken"] = CompareType(dict) # Replace lsid with a name like "session0" to match test. - if 'lsid' in event.command: + if "lsid" in event.command: for name, lsid in session_ids.items(): - if event.command['lsid'] == lsid: - event.command['lsid'] = name + if event.command["lsid"] == lsid: + event.command["lsid"] = name break for attr, expected in expectation[event_type].items(): @@ -428,28 +412,27 @@ def check_events(self, test, listener, session_ids): for key, val in expected.items(): if val is None: if key in actual: - self.fail("Unexpected key [%s] in %r" % ( - key, actual)) + self.fail("Unexpected key [%s] in %r" % (key, actual)) elif key not in actual: - self.fail("Expected key [%s] in %r" % ( - key, actual)) + self.fail("Expected key [%s] in %r" % (key, actual)) else: - self.assertEqual(val, decode_raw(actual[key]), - "Key [%s] in %s" % (key, actual)) + self.assertEqual( + val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) + ) else: self.assertEqual(actual, expected) def maybe_skip_scenario(self, test): - if test.get('skipReason'): - self.skipTest(test.get('skipReason')) + if test.get("skipReason"): + self.skipTest(test.get("skipReason")) def get_scenario_db_name(self, scenario_def): """Allow subclasses to override a test's database name.""" - return scenario_def['database_name'] + return scenario_def["database_name"] def get_scenario_coll_name(self, scenario_def): """Allow subclasses to override a test's collection name.""" - return scenario_def['collection_name'] + return scenario_def["collection_name"] def get_outcome_coll_name(self, outcome, collection): """Allow subclasses to override outcome collection.""" @@ -458,7 +441,7 @@ def get_outcome_coll_name(self, outcome, collection): def run_test_ops(self, sessions, collection, test): """Added to allow retryable writes spec to override a test's operation.""" - self.run_operations(sessions, collection, test['operations']) + self.run_operations(sessions, collection, test["operations"]) def parse_client_options(self, opts): """Allow encryption spec to override a clientOptions parsing.""" @@ -470,14 +453,13 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w='majority')) + db = client_context.client.get_database(db_name, write_concern=WriteConcern(w="majority")) coll = db[coll_name] coll.drop() db.create_collection(coll_name) - if scenario_def['data']: + if scenario_def["data"]: # Load data. - coll.insert_many(scenario_def['data']) + coll.insert_many(scenario_def["data"]) def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) @@ -495,22 +477,22 @@ def run_scenario(self, scenario_def, test): c[database_name][collection_name].distinct("x") # Configure the fail point before creating the client. - if 'failPoint' in test: - fp = test['failPoint'] + if "failPoint" in test: + fp = test["failPoint"] self.set_fail_point(fp) - self.addCleanup(self.set_fail_point, { - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) listener = OvertCommandListener() pool_listener = CMAPListener() server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. - client_options = self.parse_client_options(test['clientOptions']) + client_options = self.parse_client_options(test["clientOptions"]) # MMAPv1 does not support retryable writes. - if (client_options.get('retryWrites') is True and - client_context.storage_engine == 'mmapv1'): + if client_options.get("retryWrites") is True and client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support retryWrites=True") - use_multi_mongos = test['useMultipleMongoses'] + use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: if client_context.load_balancer or client_context.serverless: @@ -518,9 +500,8 @@ def run_scenario(self, scenario_def, test): elif client_context.is_mongos: host = client_context.mongos_seeds() client = rs_client( - h=host, - event_listeners=[listener, pool_listener, server_listener], - **client_options) + h=host, event_listeners=[listener, pool_listener, server_listener], **client_options + ) self.scenario_client = client self.listener = listener self.pool_listener = pool_listener @@ -536,13 +517,12 @@ def run_scenario(self, scenario_def, test): # the running server version. if not client_context.sessions_enabled: break - session_name = 'session%d' % i - opts = camel_to_snake_args(test['sessionOptions'][session_name]) - if 'default_transaction_options' in opts: - txn_opts = self.parse_options( - opts['default_transaction_options']) + session_name = "session%d" % i + opts = camel_to_snake_args(test["sessionOptions"][session_name]) + if "default_transaction_options" in opts: + txn_opts = self.parse_options(opts["default_transaction_options"]) txn_opts = client_session.TransactionOptions(**txn_opts) - opts['default_transaction_options'] = txn_opts + opts["default_transaction_options"] = txn_opts s = client.start_session(**dict(opts)) @@ -560,74 +540,74 @@ def run_scenario(self, scenario_def, test): self.check_events(test, listener, session_ids) # Disable fail points. - if 'failPoint' in test: - fp = test['failPoint'] - self.set_fail_point({ - 'configureFailPoint': fp['configureFailPoint'], 'mode': 'off'}) + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point({"configureFailPoint": fp["configureFailPoint"], "mode": "off"}) # Assert final state is expected. - outcome = test['outcome'] - expected_c = outcome.get('collection') + outcome = test["outcome"] + expected_c = outcome.get("collection") if expected_c is not None: - outcome_coll_name = self.get_outcome_coll_name( - outcome, collection) + outcome_coll_name = self.get_outcome_coll_name(outcome, collection) # Read from the primary with local read concern to ensure causal # consistency. - outcome_coll = client_context.client[ - collection.database.name].get_collection( + outcome_coll = client_context.client[collection.database.name].get_collection( outcome_coll_name, read_preference=ReadPreference.PRIMARY, - read_concern=ReadConcern('local')) - actual_data = list(outcome_coll.find(sort=[('_id', 1)])) + read_concern=ReadConcern("local"), + ) + actual_data = list(outcome_coll.find(sort=[("_id", 1)])) # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. - self.assertEqual(wrap_types(expected_c['data']), actual_data) + self.assertEqual(wrap_types(expected_c["data"]), actual_data) def expect_any_error(op): if isinstance(op, dict): - return op.get('error') + return op.get("error") return False def expect_error_message(expected_result): if isinstance(expected_result, dict): - return isinstance(expected_result['errorContains'], str) + return isinstance(expected_result["errorContains"], str) return False def expect_error_code(expected_result): if isinstance(expected_result, dict): - return expected_result['errorCodeName'] + return expected_result["errorCodeName"] return False def expect_error_labels_contain(expected_result): if isinstance(expected_result, dict): - return expected_result['errorLabelsContain'] + return expected_result["errorLabelsContain"] return False def expect_error_labels_omit(expected_result): if isinstance(expected_result, dict): - return expected_result['errorLabelsOmit'] + return expected_result["errorLabelsOmit"] return False def expect_error(op): - expected_result = op.get('result') - return (expect_any_error(op) or - expect_error_message(expected_result) - or expect_error_code(expected_result) - or expect_error_labels_contain(expected_result) - or expect_error_labels_omit(expected_result)) + expected_result = op.get("result") + return ( + expect_any_error(op) + or expect_error_message(expected_result) + or expect_error_code(expected_result) + or expect_error_labels_contain(expected_result) + or expect_error_labels_omit(expected_result) + ) def end_sessions(sessions): @@ -639,13 +619,13 @@ def end_sessions(sessions): def decode_raw(val): """Decode RawBSONDocuments in the given container.""" if isinstance(val, (list, abc.Mapping)): - return decode(encode({'v': val}))['v'] + return decode(encode({"v": val}))["v"] return val TYPES = { - 'binData': Binary, - 'long': Int64, + "binData": Binary, + "long": Int64, } @@ -654,7 +634,7 @@ def wrap_types(val): if isinstance(val, list): return [wrap_types(v) for v in val] if isinstance(val, abc.Mapping): - typ = val.get('$$type') + typ = val.get("$$type") if typ: return CompareType(TYPES[typ]) d = {} diff --git a/test/version.py b/test/version.py index 3348060bfc..e102db7111 100644 --- a/test/version.py +++ b/test/version.py @@ -16,7 +16,6 @@ class Version(tuple): - def __new__(cls, *version): padded_version = cls._padded(version, 4) return super(Version, cls).__new__(cls, tuple(padded_version)) @@ -43,16 +42,15 @@ def from_string(cls, version_string): version_string = version_string[0:-1] mod = -1 # Deal with '-rcX' substrings - if '-rc' in version_string: - version_string = version_string[0:version_string.find('-rc')] + if "-rc" in version_string: + version_string = version_string[0 : version_string.find("-rc")] mod = -1 # Deal with git describe generated substrings - elif '-' in version_string: - version_string = version_string[0:version_string.find('-')] + elif "-" in version_string: + version_string = version_string[0 : version_string.find("-")] mod = -1 bump_patch_level = True - version = [int(part) for part in version_string.split(".")] version = cls._padded(version, 3) # Make from_string and from_version_array agree. For example: @@ -77,9 +75,9 @@ def from_version_array(cls, version_array): @classmethod def from_client(cls, client): info = client.server_info() - if 'versionArray' in info: - return cls.from_version_array(info['versionArray']) - return cls.from_string(info['version']) + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) def at_least(self, *other_version): return self >= Version(*other_version) diff --git a/tools/clean.py b/tools/clean.py index 53729d6406..7196b00e90 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -34,12 +34,14 @@ try: from pymongo import _cmessage # type: ignore[attr-defined] + sys.exit("could still import _cmessage") except ImportError: pass try: from bson import _cbson + sys.exit("could still import _cbson") except ImportError: pass diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index e6fd83a36b..a2d4954789 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -18,6 +18,7 @@ """ import sys + sys.path[0:0] = [""] import bson diff --git a/tools/ocsptest.py b/tools/ocsptest.py index 149da000ba..14df8a8fe3 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -21,18 +21,20 @@ # Enable logs in this format: # 2020-06-08 23:49:35,982 DEBUG ocsp_support Peer did not staple an OCSP response -FORMAT = '%(asctime)s %(levelname)s %(module)s %(message)s' +FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) + def check_ocsp(host, port, capath): ctx = get_ssl_context( - None, # certfile - None, # passphrase + None, # certfile + None, # passphrase capath, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False) # disable_ocsp_endpoint_check + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, + ) # disable_ocsp_endpoint_check # Ensure we're using pyOpenSSL. assert isinstance(ctx, SSLContext) @@ -44,18 +46,15 @@ def check_ocsp(host, port, capath): finally: s.close() + def main(): - parser = argparse.ArgumentParser( - description='Debug OCSP') - parser.add_argument( - '--host', type=str, required=True, help="Host to connect to") - parser.add_argument( - '-p', '--port', type=int, default=443, help="Port to connect to") - parser.add_argument( - '--ca_file', type=str, default=None, help="CA file for host") + parser = argparse.ArgumentParser(description="Debug OCSP") + parser.add_argument("--host", type=str, required=True, help="Host to connect to") + parser.add_argument("-p", "--port", type=int, default=443, help="Port to connect to") + parser.add_argument("--ca_file", type=str, default=None, help="CA file for host") args = parser.parse_args() check_ocsp(args.host, args.port, args.ca_file) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() From b7057ecf9f5f1591ffa0ba2b5d716cc8f1ad3068 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 9 Feb 2022 14:24:23 -0600 Subject: [PATCH 0580/2111] PYTHON-1834 (cont) Add pre-commit config (#853) --- .git-blame-ignore-revs | 2 ++ .github/workflows/test-python.yml | 12 +++++++++ .pre-commit-config.yaml | 31 ++++++++++++++++++++++++ CONTRIBUTING.rst | 19 ++++++++++++++- THIRD-PARTY-NOTICES | 1 - doc/api/pymongo/event_loggers.rst | 2 +- doc/api/pymongo/topology_description.rst | 1 - doc/atlas.rst | 1 - doc/examples/server_selection.rst | 2 +- doc/migrate-to-pymongo4.rst | 2 +- doc/tools.rst | 16 ++++++------ test/certificates/ca.pem | 2 +- 12 files changed, 75 insertions(+), 16 deletions(-) create mode 100644 .git-blame-ignore-revs create mode 100644 .pre-commit-config.yaml diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..8f02673e41 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# Initial pre-commit reformat +5578999a90e439fbca06fc0ffc98f4d04e96f7b4 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index ca1845e2cd..651f863d89 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -5,6 +5,18 @@ on: pull_request: jobs: + + pre-commit: + name: pre-commit + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - uses: pre-commit/action@v2.0.0 + with: + extra_args: --all-files --hook-stage=manual + build: # supercharge/mongodb-github-action requires containers so we don't test other platforms runs-on: ${{ matrix.os }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..39062bbdf5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.4.0 + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-toml + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + exclude: WHEEL + exclude_types: [json] + - id: forbid-new-submodules + - id: trailing-whitespace + exclude: .patch + exclude_types: [json] + +- repo: https://github.com/psf/black + rev: 22.1.0 + hooks: + - id: black + files: \.py$ + args: [--line-length=100] + +- repo: https://github.com/PyCQA/isort + rev: 5.7.0 + hooks: + - id: isort + files: \.py$ + args: [--profile=black] diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 40dca00e0c..bbc22954a0 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -38,6 +38,23 @@ General Guidelines from the cmd line to run the test suite). - Add yourself to doc/contributors.rst :) +Running Linters +--------------- + +PyMongo uses `pre-commit `_ +for managing linting of the codebase. +``pre-commit`` performs various checks on all files in PyMongo and uses tools +that help follow a consistent code style within the codebase. + +To set up ``pre-commit`` locally, run:: + + pip install pre-commit + pre-commit install + +To run ``pre-commit`` manually, run:: + + pre-commit run --all-files + Documentation ------------- @@ -67,4 +84,4 @@ The ``-b`` flag adds as a regex pattern to block files you do not wish to update in PyMongo. This is primarily helpful if you are implementing a new feature in PyMongo that has spec tests already implemented, or if you are attempting to -validate new spec tests in PyMongo. \ No newline at end of file +validate new spec tests in PyMongo. diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 28a340b3fb..a307b30432 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -94,4 +94,3 @@ supplied in this file in the creation of products supporting the Unicode Standard, and to make copies of this file in any form for internal or external distribution as long as this notice remains attached. - diff --git a/doc/api/pymongo/event_loggers.rst b/doc/api/pymongo/event_loggers.rst index f79bfb2345..9be0779c20 100644 --- a/doc/api/pymongo/event_loggers.rst +++ b/doc/api/pymongo/event_loggers.rst @@ -4,4 +4,4 @@ .. automodule:: pymongo.event_loggers :synopsis: A collection of simple listeners for monitoring driver events. - :members: \ No newline at end of file + :members: diff --git a/doc/api/pymongo/topology_description.rst b/doc/api/pymongo/topology_description.rst index 8141507df7..24353db2a9 100644 --- a/doc/api/pymongo/topology_description.rst +++ b/doc/api/pymongo/topology_description.rst @@ -7,4 +7,3 @@ .. autoclass:: pymongo.topology_description.TopologyDescription() :members: - diff --git a/doc/atlas.rst b/doc/atlas.rst index 0a64b294ce..6100e9d3c5 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -41,4 +41,3 @@ Connections to Atlas require TLS/SSL. .. _homebrew: https://brew.sh/ .. _macports: https://www.macports.org/ .. _requests: https://pypi.python.org/pypi/requests - diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index 28659c133e..fc436c0cd7 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -105,4 +105,4 @@ list of known hosts. As an example, for a 3-member replica set with a all available secondaries. -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ \ No newline at end of file +.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 22071bd3bb..b993e32f4e 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -950,4 +950,4 @@ Additional BSON classes implement ``__slots__`` :class:`~bson.max_key.MaxKey`, :class:`~bson.timestamp.Timestamp`, :class:`~bson.regex.Regex`, and :class:`~bson.dbref.DBRef` now implement ``__slots__`` to reduce memory usage. This means that their attributes are fixed, and new -attributes cannot be added to the object at runtime. \ No newline at end of file +attributes cannot be added to the object at runtime. diff --git a/doc/tools.rst b/doc/tools.rst index 65b38c16a8..304a1eaf5c 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -47,14 +47,14 @@ Humongolus possible. The code is available for download `at GitHub `_. Tutorials and usage examples are also available at GitHub. - + MincePy - `MincePy `_ is an - object-document mapper (ODM) designed to make any Python object storable - and queryable in a MongoDB database. It is designed with machine learning - and big-data computational and experimental science applications in mind - but is entirely general and can be useful to anyone looking to organise, - share, or process large amounts data with as little change to their current + `MincePy `_ is an + object-document mapper (ODM) designed to make any Python object storable + and queryable in a MongoDB database. It is designed with machine learning + and big-data computational and experimental science applications in mind + but is entirely general and can be useful to anyone looking to organise, + share, or process large amounts data with as little change to their current workflow as possible. Ming @@ -80,7 +80,7 @@ MotorEngine It implements the same modeling APIs to be data-portable, meaning that a model defined in MongoEngine can be read in MotorEngine. The source is `available on GitHub `_. - + uMongo `uMongo `_ is a Python MongoDB ODM. Its inception comes from two needs: the lack of async ODM and the diff --git a/test/certificates/ca.pem b/test/certificates/ca.pem index 6ac86cfcc1..24beea2d48 100644 --- a/test/certificates/ca.pem +++ b/test/certificates/ca.pem @@ -18,4 +18,4 @@ gT564CmvkUat8uXPz6olOCdwkMpJ9Sj62i0mpgXJdBfxKQ6TZ9yGz6m3jannjZpN LchB7xSAEWtqUgvNusq0dApJsf4n7jZ+oBZVaQw2+tzaMfaLqHgMwcu1FzA8UKCD sxCgIsZUs8DdxaD418Ot6nPfheOTqe24n+TTa+Z6O0W0QtnofJBx7tmAo1aEc57i 77s89pfwIJetpIlhzNSMKurCAocFCJMJLAASJFuu6dyDvPo= ------END CERTIFICATE----- \ No newline at end of file +-----END CERTIFICATE----- From ddb661444220474bcb448a7adbd13ef9220a588a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 9 Feb 2022 15:12:02 -0800 Subject: [PATCH 0581/2111] PYTHON-2682 Add support for the comment field to all helpers (#847) --- pymongo/aggregation.py | 3 + pymongo/bulk.py | 5 +- pymongo/change_stream.py | 5 +- pymongo/collection.py | 252 ++++++++- pymongo/cursor.py | 2 +- pymongo/database.py | 70 ++- pymongo/mongo_client.py | 41 +- .../unified/change-streams.json | 146 +++++- test/crud/unified/aggregate.json | 280 ++++++++++ test/crud/unified/bulkWrite-comment.json | 494 ++++++++++++++++++ test/crud/unified/deleteMany-comment.json | 244 +++++++++ test/crud/unified/deleteOne-comment.json | 242 +++++++++ test/crud/unified/find-comment.json | 298 +++++++++++ .../unified/findOneAndDelete-comment.json | 211 ++++++++ .../unified/findOneAndReplace-comment.json | 234 +++++++++ .../unified/findOneAndUpdate-comment.json | 228 ++++++++ test/crud/unified/insertMany-comment.json | 225 ++++++++ test/crud/unified/insertOne-comment.json | 219 ++++++++ test/crud/unified/replaceOne-comment.json | 229 ++++++++ test/crud/unified/updateMany-comment.json | 244 +++++++++ test/crud/unified/updateOne-comment.json | 241 +++++++++ test/test_comment.py | 183 +++++++ 22 files changed, 4048 insertions(+), 48 deletions(-) create mode 100644 test/crud/unified/bulkWrite-comment.json create mode 100644 test/crud/unified/deleteMany-comment.json create mode 100644 test/crud/unified/deleteOne-comment.json create mode 100644 test/crud/unified/find-comment.json create mode 100644 test/crud/unified/findOneAndDelete-comment.json create mode 100644 test/crud/unified/findOneAndReplace-comment.json create mode 100644 test/crud/unified/findOneAndUpdate-comment.json create mode 100644 test/crud/unified/insertMany-comment.json create mode 100644 test/crud/unified/insertOne-comment.json create mode 100644 test/crud/unified/replaceOne-comment.json create mode 100644 test/crud/unified/updateMany-comment.json create mode 100644 test/crud/unified/updateOne-comment.json create mode 100644 test/test_comment.py diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 2b8cafe7cb..51be0dfa81 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -39,6 +39,7 @@ def __init__( let=None, user_fields=None, result_processor=None, + comment=None, ): if "explain" in options: raise ConfigurationError( @@ -57,6 +58,8 @@ def __init__( if let: common.validate_is_mapping("let", let) options["let"] = let + if comment is not None: + options["comment"] = comment self._options = options # This is the batchSize that will be used for setting the initial diff --git a/pymongo/bulk.py b/pymongo/bulk.py index e043e09fdd..fae55a5c10 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -138,13 +138,14 @@ def _raise_bulk_write_error(full_result): class _Bulk(object): """The private guts of the bulk write API.""" - def __init__(self, collection, ordered, bypass_document_validation): + def __init__(self, collection, ordered, bypass_document_validation, comment=None): """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) ) + self.comment = comment self.ordered = ordered self.ops = [] self.executed = False @@ -308,6 +309,8 @@ def _execute_command( write_concern = final_write_concern or write_concern cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) + if self.comment: + cmd["comment"] = self.comment if not write_concern.is_server_default: cmd["writeConcern"] = write_concern.document if self.bypass_doc_val: diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index a35c9cb844..50f6f72b73 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -95,6 +95,7 @@ def __init__( start_at_operation_time: Optional[Timestamp], session: Optional["ClientSession"], start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, ) -> None: if pipeline is None: pipeline = [] @@ -125,7 +126,7 @@ def __init__( self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session - + self._comment = comment # Initialize cursor. self._cursor = self._create_cursor() @@ -209,8 +210,8 @@ def _run_aggregation_cmd(self, session, explicit_session): self._command_options(), explicit_session, result_processor=self._process_result, + comment=self._comment, ) - return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), session ) diff --git a/pymongo/collection.py b/pymongo/collection.py index b17bb61f34..df8db3f106 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -423,6 +423,7 @@ def bulk_write( ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -472,6 +473,8 @@ def bulk_write( ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.results.BulkWriteResult`. @@ -481,6 +484,9 @@ def bulk_write( .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -491,7 +497,7 @@ def bulk_write( """ common.validate_list("requests", requests) - blk = _Bulk(self, ordered, bypass_document_validation) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) for request in requests: try: request._add_to_bulk(blk) @@ -504,11 +510,15 @@ def bulk_write( return BulkWriteResult(bulk_api_result, True) return BulkWriteResult({}, False) - def _insert_one(self, doc, ordered, write_concern, op_id, bypass_doc_val, session): + def _insert_one( + self, doc, ordered, write_concern, op_id, bypass_doc_val, session, comment=None + ): """Internal helper for inserting a single document.""" write_concern = write_concern or self.write_concern acknowledged = write_concern.acknowledged command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) + if comment is not None: + command["comment"] = comment if not write_concern.is_server_default: command["writeConcern"] = write_concern.document @@ -538,6 +548,7 @@ def insert_one( document: _DocumentIn, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> InsertOneResult: """Insert a single document. @@ -558,6 +569,8 @@ def insert_one( ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.InsertOneResult`. @@ -567,6 +580,9 @@ def insert_one( .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -588,6 +604,7 @@ def insert_one( op_id=None, bypass_doc_val=bypass_document_validation, session=session, + comment=comment, ), write_concern.acknowledged, ) @@ -598,6 +615,7 @@ def insert_many( ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> InsertManyResult: """Insert an iterable of documents. @@ -621,6 +639,8 @@ def insert_many( ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.results.InsertManyResult`. @@ -630,6 +650,9 @@ def insert_many( .. note:: `bypass_document_validation` requires server version **>= 3.2** + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -657,7 +680,7 @@ def gen(): yield (message._INSERT, document) write_concern = self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_document_validation) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) blk.ops = [doc for doc in gen()] blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) @@ -679,6 +702,7 @@ def _update( session=None, retryable_write=False, let=None, + comment=None, ): """Internal update / replace helper.""" common.validate_boolean("upsert", upsert) @@ -704,7 +728,6 @@ def _update( if not isinstance(hint, str): hint = helpers._index_document(hint) update_doc["hint"] = hint - command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) if let: common.validate_is_mapping("let", let) @@ -712,6 +735,8 @@ def _update( if not write_concern.is_server_default: command["writeConcern"] = write_concern.document + if comment is not None: + command["comment"] = comment # Update command. if bypass_doc_val: command["bypassDocumentValidation"] = True @@ -757,6 +782,7 @@ def _update_retryable( hint=None, session=None, let=None, + comment=None, ): """Internal update / replace helper.""" @@ -777,6 +803,7 @@ def _update(session, sock_info, retryable_write): session=session, retryable_write=retryable_write, let=let, + comment=comment, ) return self.__database.client._retryable_write( @@ -793,6 +820,7 @@ def replace_one( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> UpdateResult: """Replace a single document matching the filter. @@ -845,12 +873,14 @@ def replace_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). - + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -878,6 +908,7 @@ def replace_one( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -893,6 +924,7 @@ def update_one( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> UpdateResult: """Update a single document matching the filter. @@ -938,12 +970,15 @@ def update_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -974,6 +1009,7 @@ def update_one( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -989,6 +1025,7 @@ def update_many( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> UpdateResult: """Update one or more documents that match the filter. @@ -1034,12 +1071,15 @@ def update_many( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.UpdateResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.9 @@ -1071,22 +1111,32 @@ def update_many( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) - def drop(self, session: Optional["ClientSession"] = None) -> None: + def drop( + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + ) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. The following two calls are equivalent: >>> db.foo.drop() >>> db.drop_collection("foo") + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.7 :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. @@ -1100,7 +1150,7 @@ def drop(self, session: Optional["ClientSession"] = None) -> None: self.write_concern, self.read_concern, ) - dbo.drop_collection(self.__name, session=session) + dbo.drop_collection(self.__name, session=session, comment=comment) def _delete( self, @@ -1115,6 +1165,7 @@ def _delete( session=None, retryable_write=False, let=None, + comment=None, ): """Internal delete helper.""" common.validate_is_mapping("filter", criteria) @@ -1143,6 +1194,9 @@ def _delete( common.validate_is_document_type("let", let) command["let"] = let + if comment is not None: + command["comment"] = comment + # Delete command. result = sock_info.command( self.__database.name, @@ -1167,6 +1221,7 @@ def _delete_retryable( hint=None, session=None, let=None, + comment=None, ): """Internal delete helper.""" @@ -1183,6 +1238,7 @@ def _delete(session, sock_info, retryable_write): session=session, retryable_write=retryable_write, let=let, + comment=comment, ) return self.__database.client._retryable_write( @@ -1196,6 +1252,7 @@ def delete_one( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> DeleteResult: """Delete a single document matching the filter. @@ -1223,12 +1280,15 @@ def delete_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1247,6 +1307,7 @@ def delete_one( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -1258,6 +1319,7 @@ def delete_many( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> DeleteResult: """Delete one or more documents matching the filter. @@ -1285,12 +1347,15 @@ def delete_many( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: - An instance of :class:`~pymongo.results.DeleteResult`. .. versionchanged:: 4.1 Added ``let`` parameter. + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``hint`` parameter. .. versionchanged:: 3.6 @@ -1309,6 +1374,7 @@ def delete_many( hint=hint, session=session, let=let, + comment=comment, ), write_concern.acknowledged, ) @@ -1339,10 +1405,10 @@ def find_one( are the same as the arguments to :meth:`find`. >>> collection.find_one(max_time_ms=100) + """ if filter is not None and not isinstance(filter, abc.Mapping): filter = {"_id": filter} - cursor = self.find(filter, *args, **kwargs) for result in cursor.limit(-1): return result @@ -1566,7 +1632,6 @@ def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_Documen # OP_MSG is required to support encryption. if self.__database.client._encrypter: raise InvalidOperation("find_raw_batches does not support auto encryption") - return RawBatchCursor(self, *args, **kwargs) def _count_cmd(self, session, sock_info, read_preference, cmd, collation): @@ -1605,7 +1670,7 @@ def _aggregate_one_result(self, sock_info, read_preference, cmd, collation, sess batch = result["cursor"]["firstBatch"] return batch[0] if batch else None - def estimated_document_count(self, **kwargs: Any) -> int: + def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: """Get an estimate of the number of documents in this collection using collection metadata. @@ -1619,12 +1684,17 @@ def estimated_document_count(self, **kwargs: Any) -> int: operation to run, in milliseconds. :Parameters: + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. + .. versionadded:: 3.7 """ if "session" in kwargs: raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment def _cmd(session, server, sock_info, read_preference): if sock_info.max_wire_version >= 12: @@ -1650,7 +1720,11 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read(_cmd, self.read_preference, None) def count_documents( - self, filter: Mapping[str, Any], session: Optional["ClientSession"] = None, **kwargs: Any + self, + filter: Mapping[str, Any], + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> int: """Count the number of documents in this collection. @@ -1696,8 +1770,11 @@ def count_documents( documents. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. + .. versionadded:: 3.7 .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ @@ -1710,6 +1787,8 @@ def count_documents( pipeline.append({"$skip": kwargs.pop("skip")}) if "limit" in kwargs: pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) if "hint" in kwargs and not isinstance(kwargs["hint"], str): @@ -1731,6 +1810,7 @@ def create_indexes( self, indexes: Sequence[IndexModel], session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> List[str]: """Create one or more indexes on this collection. @@ -1747,9 +1827,14 @@ def create_indexes( instances. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. @@ -1765,6 +1850,8 @@ def create_indexes( .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ """ common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment return self.__create_indexes(indexes, session, **kwargs) def __create_indexes(self, indexes, session, **kwargs): @@ -1811,7 +1898,11 @@ def gen_indexes(): return names def create_index( - self, keys: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + self, + keys: _IndexKeyHint, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> str: """Creates an index on this collection. @@ -1886,10 +1977,14 @@ def create_index( pairs specifying the index to create - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + arguments + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword - arguments + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.11 Added the ``hidden`` option. .. versionchanged:: 3.6 @@ -1912,10 +2007,17 @@ def create_index( cmd_options = {} if "maxTimeMS" in kwargs: cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment index = IndexModel(keys, **kwargs) return self.__create_indexes([index], session, **cmd_options)[0] - def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) -> None: + def drop_indexes( + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: """Drops all indexes on this collection. Can be used on non-existant collections or collections with no indexes. @@ -1924,9 +2026,14 @@ def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + arguments + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. @@ -1939,10 +2046,16 @@ def drop_indexes(self, session: Optional["ClientSession"] = None, **kwargs: Any) when connected to MongoDB >= 3.4. """ + if comment is not None: + kwargs["comment"] = comment self.drop_index("*", session=session, **kwargs) def drop_index( - self, index_or_name: _IndexKeyHint, session: Optional["ClientSession"] = None, **kwargs: Any + self, + index_or_name: _IndexKeyHint, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> None: """Drops the specified index on this collection. @@ -1964,12 +2077,17 @@ def drop_index( - `index_or_name`: index (or name of index) to drop - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. + .. versionchanged:: 3.6 Added ``session`` parameter. Added support for arbitrary keyword arguments. @@ -1988,6 +2106,8 @@ def drop_index( cmd = SON([("dropIndexes", self.__name), ("index", name)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment with self._socket_for_writes(session) as sock_info: self._command( sock_info, @@ -1999,7 +2119,9 @@ def drop_index( ) def list_indexes( - self, session: Optional["ClientSession"] = None + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. @@ -2011,10 +2133,15 @@ def list_indexes( :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2028,6 +2155,9 @@ def list_indexes( def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) + if comment is not None: + cmd["comment"] = comment + with self.__database.client._tmp_session(session, False) as s: try: cursor = self._command( @@ -2048,7 +2178,9 @@ def _cmd(session, server, sock_info, read_preference): return self.__database.client._retryable_read(_cmd, read_pref, session) def index_information( - self, session: Optional["ClientSession"] = None + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. @@ -2071,11 +2203,16 @@ def index_information( :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. """ - cursor = self.list_indexes(session=session) + cursor = self.list_indexes(session=session, comment=comment) info = {} for index in cursor: index["key"] = list(index["key"].items()) @@ -2083,7 +2220,11 @@ def index_information( info[index.pop("name")] = index return info - def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[str, Any]: + def options( + self, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: """Get the options set on this collection. Returns a dictionary of options and their values - see @@ -2094,6 +2235,8 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2105,7 +2248,9 @@ def options(self, session: Optional["ClientSession"] = None) -> MutableMapping[s self.write_concern, self.read_concern, ) - cursor = dbo.list_collections(session=session, filter={"name": self.__name}) + cursor = dbo.list_collections( + session=session, filter={"name": self.__name}, comment=comment + ) result = None for doc in cursor: @@ -2130,8 +2275,11 @@ def _aggregate( session, explicit_session, let=None, + comment=None, **kwargs, ): + if comment is not None: + kwargs["comment"] = comment cmd = aggregation_command( self, cursor_class, @@ -2154,6 +2302,7 @@ def aggregate( pipeline: _Pipeline, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> CommandCursor[_DocumentType]: """Perform an aggregation using the aggregation framework on this @@ -2196,12 +2345,16 @@ def aggregate( fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. ``"$$var"``). This option is only supported on MongoDB >= 5.0. + - `comment` (optional): A user-provided comment to attach to this + command. + :Returns: A :class:`~pymongo.command_cursor.CommandCursor` over the result set. .. versionchanged:: 4.1 + Added ``comment`` parameter. Added ``let`` parameter. Support $merge and $out executing on secondaries according to the collection's :attr:`read_preference`. @@ -2228,6 +2381,7 @@ def aggregate( .. _aggregate command: https://docs.mongodb.com/manual/reference/command/aggregate """ + with self.__database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionAggregationCommand, @@ -2236,11 +2390,16 @@ def aggregate( session=s, explicit_session=session is not None, let=let, + comment=comment, **kwargs, ) def aggregate_raw_batches( - self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any + self, + pipeline: _Pipeline, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> RawBatchCursor[_DocumentType]: """Perform an aggregation and retrieve batches of raw BSON. @@ -2268,7 +2427,8 @@ def aggregate_raw_batches( # OP_MSG is required to support encryption. if self.__database.client._encrypter: raise InvalidOperation("aggregate_raw_batches does not support auto encryption") - + if comment is not None: + kwargs["comment"] = comment with self.__database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionRawAggregationCommand, @@ -2290,6 +2450,7 @@ def watch( start_at_operation_time: Optional[Timestamp] = None, session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. @@ -2368,10 +2529,16 @@ def watch( - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -2396,10 +2563,15 @@ def watch( start_at_operation_time, session, start_after, + comment=comment, ) def rename( - self, new_name: str, session: Optional["ClientSession"] = None, **kwargs: Any + self, + new_name: str, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> MutableMapping[str, Any]: """Rename this collection. @@ -2413,6 +2585,8 @@ def rename( - `new_name`: new name for this collection - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional arguments to the rename command may be passed as keyword arguments to this helper method (i.e. ``dropTarget=True``) @@ -2441,6 +2615,8 @@ def rename( new_name = "%s.%s" % (self.__database.name, new_name) cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment write_concern = self._write_concern_for_cmd(cmd, session) with self._socket_for_writes(session) as sock_info: @@ -2459,6 +2635,7 @@ def distinct( key: str, filter: Optional[Mapping[str, Any]] = None, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> List: """Get a list of distinct values for `key` among all documents @@ -2485,6 +2662,8 @@ def distinct( from which to retrieve the distinct values. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): See list of options above. .. versionchanged:: 3.6 @@ -2503,6 +2682,8 @@ def distinct( kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment def _cmd(session, server, sock_info, read_preference): return self._command( @@ -2611,6 +2792,7 @@ def find_one_and_delete( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> _DocumentType: """Finds a single document and deletes it, returning the document. @@ -2656,13 +2838,15 @@ def find_one_and_delete( on MongoDB 4.4 and above. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - `let` (optional): Map of parameter names and values. Values must be constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). .. versionchanged:: 4.1 Added ``let`` parameter. @@ -2684,6 +2868,8 @@ def find_one_and_delete( .. versionadded:: 3.0 """ kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment return self.__find_and_modify( filter, projection, sort, let=let, hint=hint, session=session, **kwargs ) @@ -2699,6 +2885,7 @@ def find_one_and_replace( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> _DocumentType: """Finds a single document and replaces it, returning either the @@ -2754,11 +2941,13 @@ def find_one_and_replace( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). - .. versionchanged:: 4.1 + Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. @@ -2779,6 +2968,8 @@ def find_one_and_replace( """ common.validate_ok_for_replace(replacement) kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment return self.__find_and_modify( filter, projection, @@ -2803,6 +2994,7 @@ def find_one_and_update( hint: Optional[_IndexKeyHint] = None, session: Optional["ClientSession"] = None, let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> _DocumentType: """Finds a single document and updates it, returning either the @@ -2897,12 +3089,12 @@ def find_one_and_update( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). - .. versionchanged:: 4.1 - Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.9 @@ -2925,6 +3117,8 @@ def find_one_and_update( common.validate_ok_for_update(update) common.validate_list_or_none("array_filters", array_filters) kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment return self.__find_and_modify( filter, projection, diff --git a/pymongo/cursor.py b/pymongo/cursor.py index ba9e5956f2..be4b998d31 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -183,7 +183,7 @@ def __init__( return_key: Optional[bool] = None, show_record_id: Optional[bool] = None, snapshot: Optional[bool] = None, - comment: Any = None, + comment: Optional[Any] = None, session: Optional["ClientSession"] = None, allow_disk_use: Optional[bool] = None, let: Optional[bool] = None, diff --git a/pymongo/database.py b/pymongo/database.py index 675db132f7..e6633ed230 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -471,6 +471,7 @@ def watch( start_at_operation_time: Optional[Timestamp] = None, session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. @@ -542,10 +543,15 @@ def watch( - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -567,6 +573,7 @@ def watch( start_at_operation_time, session, start_after, + comment=comment, ) def _command( @@ -611,6 +618,7 @@ def command( read_preference: Optional[_ServerMode] = None, codec_options: Optional[CodecOptions] = DEFAULT_CODEC_OPTIONS, session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> Dict[str, Any]: """Issue a MongoDB command. @@ -665,9 +673,12 @@ def command( instance. - `session` (optional): A :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): additional keyword arguments will be added to the command document before it is sent + .. note:: :meth:`command` does **not** obey this Database's :attr:`read_preference` or :attr:`codec_options`. You must use the `read_preference` and `codec_options` parameters instead. @@ -695,6 +706,9 @@ def command( .. seealso:: The MongoDB documentation on `commands `_. """ + if comment is not None: + kwargs["comment"] = comment + if read_preference is None: read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY with self.__client._socket_for_reads(read_preference, session) as ( @@ -767,6 +781,7 @@ def list_collections( self, session: Optional["ClientSession"] = None, filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the collections of this database. @@ -776,12 +791,15 @@ def list_collections( :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listCollections command `_ can be passed as keyword arguments to this method. The supported options differ by server version. + :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. @@ -790,6 +808,8 @@ def list_collections( if filter is not None: kwargs["filter"] = filter read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment def _cmd(session, server, sock_info, read_preference): return self._list_collections( @@ -802,6 +822,7 @@ def list_collection_names( self, session: Optional["ClientSession"] = None, filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, **kwargs: Any, ) -> List[str]: """Get a list of all the collection names in this database. @@ -816,19 +837,25 @@ def list_collection_names( :class:`~pymongo.client_session.ClientSession`. - `filter` (optional): A query document to filter the list of collections returned from the listCollections command. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listCollections command `_ can be passed as keyword arguments to this method. The supported options differ by server version. + .. versionchanged:: 3.8 Added the ``filter`` and ``**kwargs`` parameters. .. versionadded:: 3.6 """ + if comment is not None: + kwargs["comment"] = comment if filter is None: kwargs["nameOnly"] = True + else: # The enumerate collections spec states that "drivers MUST NOT set # nameOnly if a filter specifies any keys other than name." @@ -840,7 +867,10 @@ def list_collection_names( return [result["name"] for result in self.list_collections(session=session, **kwargs)] def drop_collection( - self, name_or_collection: Union[str, Collection], session: Optional["ClientSession"] = None + self, + name_or_collection: Union[str, Collection], + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, ) -> Dict[str, Any]: """Drop a collection. @@ -849,10 +879,16 @@ def drop_collection( collection object itself - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + .. note:: The :attr:`~pymongo.database.Database.write_concern` of this database is automatically applied to this operation. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -868,11 +904,14 @@ def drop_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") + command = SON([("drop", name)]) + if comment is not None: + command["comment"] = comment + with self.__client._socket_for_writes(session) as sock_info: return self._command( sock_info, - "drop", - value=name, + command, allowable_errors=["ns not found", 26], write_concern=self._write_concern_for(session), parse_write_concern_error=True, @@ -886,6 +925,7 @@ def validate_collection( full: bool = False, session: Optional["ClientSession"] = None, background: Optional[bool] = None, + comment: Optional[Any] = None, ) -> Dict[str, Any]: """Validate a collection. @@ -907,6 +947,11 @@ def validate_collection( :class:`~pymongo.client_session.ClientSession`. - `background` (optional): A boolean flag that determines whether the command runs in the background. Requires MongoDB 4.4+. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.11 Added ``background`` parameter. @@ -922,8 +967,10 @@ def validate_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str or " "Collection") - cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) + if comment is not None: + cmd["comment"] = comment + if background is not None: cmd["background"] = background @@ -970,7 +1017,11 @@ def __bool__(self) -> bool: ) def dereference( - self, dbref: DBRef, session: Optional["ClientSession"] = None, **kwargs: Any + self, + dbref: DBRef, + session: Optional["ClientSession"] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> Optional[_DocumentType]: """Dereference a :class:`~bson.dbref.DBRef`, getting the document it points to. @@ -985,10 +1036,15 @@ def dereference( - `dbref`: the reference - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): any additional keyword arguments are the same as the arguments to :meth:`~pymongo.collection.Collection.find`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. """ @@ -999,4 +1055,6 @@ def dereference( "trying to dereference a DBRef that points to " "another database (%r not %r)" % (dbref.database, self.__name) ) - return self[dbref.collection].find_one({"_id": dbref.id}, session=session, **kwargs) + return self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 3fa2946c7c..6b0d55601f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -846,6 +846,7 @@ def watch( start_at_operation_time: Optional[Timestamp] = None, session: Optional[client_session.ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. @@ -917,10 +918,15 @@ def watch( - `start_after` (optional): The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. + - `comment` (optional): A user-provided comment to attach to this + command. :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.9 Added the ``start_after`` parameter. @@ -942,6 +948,7 @@ def watch( start_at_operation_time, session, start_after, + comment=comment, ) @property @@ -1709,19 +1716,25 @@ def server_info(self, session: Optional[client_session.ClientSession] = None) -> ) def list_databases( - self, session: Optional[client_session.ClientSession] = None, **kwargs: Any + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, ) -> CommandCursor[Dict[str, Any]]: """Get a cursor over the databases of the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. - `**kwargs` (optional): Optional parameters of the `listDatabases command `_ can be passed as keyword arguments to this method. The supported options differ by server version. + :Returns: An instance of :class:`~pymongo.command_cursor.CommandCursor`. @@ -1729,6 +1742,8 @@ def list_databases( """ cmd = SON([("listDatabases", 1)]) cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment admin = self._database_default_options("admin") res = admin._retryable_read_command(cmd, session=session) # listDatabases doesn't return a cursor (yet). Fake one. @@ -1740,22 +1755,30 @@ def list_databases( return CommandCursor(admin["$cmd"], cursor, None) def list_database_names( - self, session: Optional[client_session.ClientSession] = None + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, ) -> List[str]: """Get a list of the names of all databases on the connected server. :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionadded:: 3.6 """ - return [doc["name"] for doc in self.list_databases(session, nameOnly=True)] + return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] def drop_database( self, name_or_database: Union[str, database.Database], session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, ) -> None: """Drop a database. @@ -1769,6 +1792,11 @@ def drop_database( database to drop - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -1791,7 +1819,7 @@ def drop_database( with self._socket_for_writes(session) as sock_info: self[name]._command( sock_info, - "dropDatabase", + {"dropDatabase": 1, "comment": comment}, read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), parse_write_concern_error=True, @@ -1837,6 +1865,11 @@ def get_default_database( :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the default) the :attr:`read_concern` of this :class:`MongoClient` is used. + - `comment` (optional): A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. .. versionchanged:: 3.8 Undeprecated. Added the ``default``, ``codec_options``, diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index adaf00de2d..4aea9a4aa1 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -1,10 +1,21 @@ { "description": "change-streams", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], "createEntities": [ { "client": { - "id": "client0" + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -34,10 +45,7 @@ "description": "Test array truncation", "runOnRequirements": [ { - "minServerVersion": "4.7", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.7" } ], "operations": [ @@ -111,6 +119,134 @@ } } ] + }, + { + "description": "Test with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with document comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "name": "test1" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "name": "test1" + } + } + } + } + ] + } + ] + }, + { + "description": "Test with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json index dcdad761e8..f6da8ff32f 100644 --- a/test/crud/unified/aggregate.json +++ b/test/crud/unified/aggregate.json @@ -161,6 +161,286 @@ ] } ] + }, + { + "description": "aggregate with a string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "object": "collection0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with a document comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment does not set comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": "comment" + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/bulkWrite-comment.json b/test/crud/unified/bulkWrite-comment.json new file mode 100644 index 0000000000..fac9644543 --- /dev/null +++ b/test/crud/unified/bulkWrite-comment.json @@ -0,0 +1,494 @@ +{ + "description": "bulkWrite-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v2" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "BulkWrite_comment" + } + } + ], + "initialData": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + } + } + ], + "ordered": true, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "update": "BulkWrite_comment", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "_id": 1, + "x": "replaced" + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": "updated" + } + } + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "BulkWrite_comment", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + } + ], + "ordered": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": "replaced" + }, + { + "_id": 2, + "x": "updated" + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": "inserted" + } + ] + } + ] + }, + { + "description": "BulkWrite with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 5, + "x": "inserted" + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": "replaced" + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "updated" + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "BulkWrite_comment", + "documents": [ + { + "_id": 5, + "x": "inserted" + } + ], + "ordered": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "BulkWrite_comment", + "databaseName": "crud-v2", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-comment.json b/test/crud/unified/deleteMany-comment.json new file mode 100644 index 0000000000..ea6a8524d9 --- /dev/null +++ b/test/crud/unified/deleteMany-comment.json @@ -0,0 +1,244 @@ +{ + "description": "deleteMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ], + "tests": [ + { + "description": "deleteMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "deleteMany with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name2" + }, + { + "_id": 3, + "name": "name3" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-comment.json b/test/crud/unified/deleteOne-comment.json new file mode 100644 index 0000000000..37f356ec6f --- /dev/null +++ b/test/crud/unified/deleteOne-comment.json @@ -0,0 +1,242 @@ +{ + "description": "deleteOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + }, + { + "description": "deleteOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2, + "name": "name" + }, + { + "_id": 3, + "name": "name" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find-comment.json b/test/crud/unified/find-comment.json new file mode 100644 index 0000000000..6000bb0172 --- /dev/null +++ b/test/crud/unified/find-comment.json @@ -0,0 +1,298 @@ +{ + "description": "find-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "find with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "find with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with document comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99", + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment does not set comment on getMore", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-comment.json b/test/crud/unified/findOneAndDelete-comment.json new file mode 100644 index 0000000000..6853b9cc2d --- /dev/null +++ b/test/crud/unified/findOneAndDelete-comment.json @@ -0,0 +1,211 @@ +{ + "description": "findOneAndDelete-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndDelete with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "remove": true, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-comment.json b/test/crud/unified/findOneAndReplace-comment.json new file mode 100644 index 0000000000..f817bb6937 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-comment.json @@ -0,0 +1,234 @@ +{ + "description": "findOneAndReplace-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 5 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "findOneAndReplace with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 5 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": { + "x": 5 + }, + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-comment.json b/test/crud/unified/findOneAndUpdate-comment.json new file mode 100644 index 0000000000..6dec5b39ee --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-comment.json @@ -0,0 +1,228 @@ +{ + "description": "findOneAndUpdate-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + }, + "expectResult": { + "_id": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate with comment - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2.0", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll0", + "query": { + "_id": 1 + }, + "update": [ + { + "$set": { + "x": 5 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany-comment.json b/test/crud/unified/insertMany-comment.json new file mode 100644 index 0000000000..7e835e8011 --- /dev/null +++ b/test/crud/unified/insertMany-comment.json @@ -0,0 +1,225 @@ +{ + "description": "insertMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertMany with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-comment.json b/test/crud/unified/insertOne-comment.json new file mode 100644 index 0000000000..a9f735ab6c --- /dev/null +++ b/test/crud/unified/insertOne-comment.json @@ -0,0 +1,219 @@ +{ + "description": "insertOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "insertOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "insertOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-comment.json b/test/crud/unified/replaceOne-comment.json new file mode 100644 index 0000000000..02fe90a44d --- /dev/null +++ b/test/crud/unified/replaceOne-comment.json @@ -0,0 +1,229 @@ +{ + "description": "replaceOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 22 + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 22 + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-comment.json b/test/crud/unified/updateMany-comment.json new file mode 100644 index 0000000000..26abd92ed4 --- /dev/null +++ b/test/crud/unified/updateMany-comment.json @@ -0,0 +1,244 @@ +{ + "description": "updateMany-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateMany with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + }, + "multi": true + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-comment.json b/test/crud/unified/updateOne-comment.json new file mode 100644 index 0000000000..9b3b71d395 --- /dev/null +++ b/test/crud/unified/updateOne-comment.json @@ -0,0 +1,241 @@ +{ + "description": "updateOne-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": { + "key": "value" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + } + } + ], + "comment": { + "key": "value" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with comment - pre 4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 22 + } + }, + "comment": "comment" + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 22 + } + } + } + ], + "comment": "comment" + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/test_comment.py b/test/test_comment.py new file mode 100644 index 0000000000..1c0e741621 --- /dev/null +++ b/test/test_comment.py @@ -0,0 +1,183 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +import inspect +import sys +from collections import defaultdict +from typing import Any, Union + +sys.path[0:0] = [""] + +from test import IntegrationTest, SkipTest, client_context, unittest +from test.utils import EventListener, rs_or_single_client + +from bson.dbref import DBRef +from pymongo.collection import Collection +from pymongo.command_cursor import CommandCursor +from pymongo.database import Database +from pymongo.mongo_client import MongoClient +from pymongo.operations import IndexModel +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + + +class Empty(object): + def __getattr__(self, item): + try: + self.__dict__[item] + except KeyError: + return self.empty + + def empty(self, *args, **kwargs): + return Empty() + + +class TestComment(IntegrationTest): + def _test_ops(self, helpers, already_supported, listener, db=Empty(), coll=Empty()): + results = listener.results + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + results.clear() + kwargs = {"comment": cc} + if h == coll.rename: + tmp = db.get_collection("temp_temp_temp").drop() + destruct_coll = db.get_collection("test_temp") + destruct_coll.insert_one({}) + maybe_cursor = destruct_coll.rename(*args, **kwargs) + destruct_coll.drop() + elif h == db.validate_collection: + coll = db.get_collection("test") + coll.insert_one({}) + maybe_cursor = db.validate_collection(*args, **kwargs) + else: + coll.create_index("a") + maybe_cursor = h(*args, **kwargs) + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, Union[Any, None] + ) + if isinstance(maybe_cursor, CommandCursor): + maybe_cursor.close() + tested = False + # For some reason collection.list_indexes creates two commands and the first + # one doesn't contain 'comment'. + for i in results["started"]: + if cc == i.command.get("comment", ""): + self.assertEqual(cc, i.command["comment"]) + tested = True + self.assertTrue(tested) + if h not in [coll.aggregate_raw_batches]: + self.assertIn( + "`comment` (optional):", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + results.clear() + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_database_helpers(self): + listener = EventListener() + db = rs_or_single_client(event_listeners=[listener]).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + self._test_ops(helpers, already_supported, listener, db=db, coll=db.get_collection("test")) + + @client_context.require_version_min(4, 7, -1) + @client_context.require_replica_set + def test_client_helpers(self): + listener = EventListener() + cli = rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + self._test_ops(helpers, already_supported, listener) + + @client_context.require_version_min(4, 7, -1) + def test_collection_helpers(self): + listener = EventListener() + db = rs_or_single_client(event_listeners=[listener])[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + self._test_ops(helpers, already_supported, listener, coll=coll, db=db) + + +if __name__ == "__main__": + unittest.main() From cbc7cc33e54756fa797ea94883815c10ae788002 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Feb 2022 18:54:46 -0600 Subject: [PATCH 0582/2111] PYTHON-3073 Copy the unit tests from pymongo-stubs into pymongo (#859) --- .github/workflows/test-python.yml | 3 +- test/mypy_fails/insert_many_dict.py | 6 ++ test/mypy_fails/insert_one_list.py | 6 ++ test/test_bson.py | 10 +++ test/test_mypy.py | 125 ++++++++++++++++++++++++++++ 5 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 test/mypy_fails/insert_many_dict.py create mode 100644 test/mypy_fails/insert_one_list.py create mode 100644 test/test_mypy.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 651f863d89..4b5f762786 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -37,6 +37,7 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | + pip install mypy python setup.py test mypytest: @@ -59,4 +60,4 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index test + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --exclude "test/mypy_fails/*.*" test diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py new file mode 100644 index 0000000000..6e8acb67b4 --- /dev/null +++ b/test/mypy_fails/insert_many_dict.py @@ -0,0 +1,6 @@ +from pymongo import MongoClient + +client = MongoClient() +client.test.test.insert_many( + {"a": 1} +) # error: Dict entry 0 has incompatible type "str": "int"; expected "Mapping[str, Any]": "int" diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py new file mode 100644 index 0000000000..7a26a3ff79 --- /dev/null +++ b/test/mypy_fails/insert_one_list.py @@ -0,0 +1,6 @@ +from pymongo import MongoClient + +client = MongoClient() +client.test.test.insert_one( + [{}] +) # error: Argument 1 to "insert_one" of "Collection" has incompatible type "List[Dict[, ]]"; expected "Mapping[str, Any]" diff --git a/test/test_bson.py b/test/test_bson.py index f8f587567d..46aa6e5d9a 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1117,6 +1117,16 @@ def test_int64_pickling(self): ) self.round_trip_pickle(i64, pickled_with_3) + def test_bson_encode_decode(self) -> None: + doc = {"_id": ObjectId()} + encoded = bson.encode(doc) + decoded = bson.decode(encoded) + encoded = bson.encode(decoded) + decoded = bson.decode(encoded) + # Documents returned from decode are mutable. + decoded["new_field"] = 1 + self.assertTrue(decoded["_id"].generation_time) + if __name__ == "__main__": unittest.main() diff --git a/test/test_mypy.py b/test/test_mypy.py new file mode 100644 index 0000000000..0f1498c64b --- /dev/null +++ b/test/test_mypy.py @@ -0,0 +1,125 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that each file in mypy_fails/ actually fails mypy, and test some +sample client code that uses PyMongo typings.""" + +import os +import sys +import unittest +from typing import Any, Dict, Iterable, List + +try: + from mypy import api +except ImportError: + api = None + +from bson.son import SON +from pymongo.collection import Collection +from pymongo.errors import ServerSelectionTimeoutError +from pymongo.mongo_client import MongoClient +from pymongo.operations import InsertOne + +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") + + +def get_tests() -> Iterable[str]: + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + yield os.path.join(dirpath, filename) + + +class TestMypyFails(unittest.TestCase): + def ensure_mypy_fails(self, filename: str) -> None: + if api is None: + raise unittest.SkipTest("Mypy is not installed") + stdout, stderr, exit_status = api.run([filename]) + self.assertTrue(exit_status, msg=stdout) + + def test_mypy_failures(self) -> None: + for filename in get_tests(): + with self.subTest(filename=filename): + self.ensure_mypy_fails(filename) + + +class TestPymongo(unittest.TestCase): + client: MongoClient + coll: Collection + + @classmethod + def setUpClass(cls) -> None: + cls.client = MongoClient(serverSelectionTimeoutMS=250, directConnection=False) + cls.coll = cls.client.test.test + try: + cls.client.admin.command("ping") + except ServerSelectionTimeoutError as exc: + raise unittest.SkipTest(f"Could not connect to MongoDB: {exc}") + + @classmethod + def tearDownClass(cls) -> None: + cls.client.close() + + def test_insert_find(self) -> None: + doc = {"my": "doc"} + coll2 = self.client.test.test2 + result = self.coll.insert_one(doc) + self.assertEqual(result.inserted_id, doc["_id"]) + retreived = self.coll.find_one({"_id": doc["_id"]}) + if retreived: + # Documents returned from find are mutable. + retreived["new_field"] = 1 + result2 = coll2.insert_one(retreived) + self.assertEqual(result2.inserted_id, result.inserted_id) + + def test_cursor_iterable(self) -> None: + def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: + return list(iterable) + + self.coll.insert_one({}) + cursor = self.coll.find() + docs = to_list(cursor) + self.assertTrue(docs) + + def test_bulk_write(self) -> None: + self.coll.insert_one({}) + requests = [InsertOne({})] + result = self.coll.bulk_write(requests) + self.assertTrue(result.acknowledged) + + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + +if __name__ == "__main__": + unittest.main() From c47557bc63cd60d5f709da3ec6fbdaa9fb783c7e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Feb 2022 18:59:26 -0600 Subject: [PATCH 0583/2111] PYTHON-3062 Make Regex generic (#860) --- bson/regex.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bson/regex.py b/bson/regex.py index 317c65049f..3e98477198 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -16,7 +16,7 @@ """ import re -from typing import Any, Pattern, Type, Union +from typing import Any, Generic, Pattern, Type, TypeVar, Union from bson._helpers import _getstate_slots, _setstate_slots from bson.son import RE_TYPE @@ -40,7 +40,10 @@ def str_flags_to_int(str_flags: str) -> int: return flags -class Regex(object): +_T = TypeVar("_T", str, bytes) + + +class Regex(Generic[_T]): """BSON regular expression data.""" __slots__ = ("pattern", "flags") @@ -51,7 +54,7 @@ class Regex(object): _type_marker = 11 @classmethod - def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": + def from_native(cls: Type["Regex"], regex: "Pattern[_T]") -> "Regex[_T]": """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -80,7 +83,7 @@ def from_native(cls: Type["Regex"], regex: Pattern[Any]) -> "Regex": return Regex(regex.pattern, regex.flags) - def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> None: + def __init__(self, pattern: _T, flags: Union[str, int] = 0) -> None: """BSON regular expression data. This class is useful to store and retrieve regular expressions that are @@ -93,7 +96,7 @@ def __init__(self, pattern: Union[str, bytes], flags: Union[str, int] = 0) -> No """ if not isinstance(pattern, (str, bytes)): raise TypeError("pattern must be a string, not %s" % type(pattern)) - self.pattern = pattern + self.pattern: _T = pattern if isinstance(flags, str): self.flags = str_flags_to_int(flags) @@ -116,7 +119,7 @@ def __ne__(self, other: Any) -> bool: def __repr__(self): return "Regex(%r, %r)" % (self.pattern, self.flags) - def try_compile(self) -> Pattern[Any]: + def try_compile(self) -> "Pattern[_T]": """Compile this :class:`Regex` as a Python regular expression. .. warning:: From 0700a84432f07c7f1294e79b850b10da8accc017 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Feb 2022 06:32:01 -0600 Subject: [PATCH 0584/2111] PYTHON-1834 Add shellcheck (#858) --- .evergreen/build-manylinux.sh | 2 +- .evergreen/build-windows.sh | 4 ++-- .evergreen/release.sh | 2 +- .evergreen/run-mod-wsgi-tests.sh | 2 +- .evergreen/run-mongodb-aws-test.sh | 2 +- .evergreen/run-tests.sh | 17 +++++++++-------- .pre-commit-config.yaml | 9 +++++++++ .readthedocs.yaml | 1 + doc/conf.py | 1 + doc/docs-requirements.txt | 1 + pymongo/collection.py | 2 +- 11 files changed, 28 insertions(+), 15 deletions(-) diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 602a8e1e6c..a9a7238cb2 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -27,7 +27,7 @@ fi for image in "${images[@]}"; do docker pull $image - docker run --rm -v `pwd`:/src $image /src/.evergreen/build-manylinux-internal.sh + docker run --rm -v "`pwd`:/src" $image /src/.evergreen/build-manylinux-internal.sh done ls dist diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 97c7940769..3a33558cc9 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -9,8 +9,8 @@ mkdir -p validdist mv dist/* validdist || true for VERSION in 36 37 38 39 310; do - _pythons=(C:/Python/Python${VERSION}/python.exe \ - C:/Python/32/Python${VERSION}/python.exe) + _pythons=("C:/Python/Python${VERSION}/python.exe" \ + "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do rm -rf build $PYTHON setup.py bdist_wheel diff --git a/.evergreen/release.sh b/.evergreen/release.sh index 759786b934..1fdd459ad9 100755 --- a/.evergreen/release.sh +++ b/.evergreen/release.sh @@ -1,6 +1,6 @@ #!/bin/bash -ex -if [ $(uname -s) = "Darwin" ]; then +if [ "$(uname -s)" = "Darwin" ]; then .evergreen/build-mac.sh elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin .evergreen/build-windows.sh diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh index 03d72e9701..9a167895f8 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/run-mod-wsgi-tests.sh @@ -23,7 +23,7 @@ export PYTHONHOME=/opt/python/$PYTHON_VERSION cd .. $APACHE -k start -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG} -trap "$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}" EXIT HUP +trap '$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}' EXIT HUP set +e wget -t 1 -T 10 -O - "http://localhost:8080${PROJECT_DIRECTORY}" diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index e51c12d609..9a33507cc8 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -40,7 +40,7 @@ fi set -x # Workaround macOS python 3.9 incompatibility with system virtualenv. -if [ $(uname -s) = "Darwin" ]; then +if [ "$(uname -s)" = "Darwin" ]; then VIRTUALENV="/Library/Frameworks/Python.framework/Versions/3.9/bin/python3 -m virtualenv" else VIRTUALENV=$(command -v virtualenv) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 69550ec932..7b9d051bd7 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -67,9 +67,9 @@ fi if [ -z "$PYTHON_BINARY" ]; then # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a # system python3 doesn't exist or exists but is older than 3.6. - if is_python_36 $(command -v python3); then + if is_python_36 "$(command -v python3)"; then PYTHON=$(command -v python3) - elif is_python_36 $(command -v /opt/mongodbtoolchain/v2/bin/python3); then + elif is_python_36 "$(command -v /opt/mongodbtoolchain/v2/bin/python3)"; then PYTHON=$(command -v /opt/mongodbtoolchain/v2/bin/python3) else echo "Cannot test without python3.6+ installed!" @@ -119,20 +119,21 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE=$(pwd)/libmongocrypt/nocrypto if [ -f "${BASE}/lib/libmongocrypt.so" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so + PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so elif [ -f "${BASE}/lib/libmongocrypt.dylib" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib + PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib elif [ -f "${BASE}/bin/mongocrypt.dll" ]; then PYMONGOCRYPT_LIB=${BASE}/bin/mongocrypt.dll # libmongocrypt's windows dll is not marked executable. chmod +x $PYMONGOCRYPT_LIB - export PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) + PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) elif [ -f "${BASE}/lib64/libmongocrypt.so" ]; then - export PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so + PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so else echo "Cannot find libmongocrypt shared object file" exit 1 fi + export PYMONGOCRYPT_LIB # TODO: Test with 'pip install pymongocrypt' git clone --branch master https://github.com/mongodb/libmongocrypt.git libmongocrypt_git @@ -175,7 +176,7 @@ $PYTHON -c 'import sys; print(sys.version)' # Only cover CPython. PyPy reports suspiciously low coverage. PYTHON_IMPL=$($PYTHON -c "import platform; print(platform.python_implementation())") COVERAGE_ARGS="" -if [ -n "$COVERAGE" -a $PYTHON_IMPL = "CPython" ]; then +if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then if $PYTHON -m coverage --version; then echo "INFO: coverage is installed, running tests with coverage..." COVERAGE_ARGS="-m coverage run --branch" @@ -186,7 +187,7 @@ fi $PYTHON setup.py clean if [ -z "$GREEN_FRAMEWORK" ]; then - if [ -z "$C_EXTENSIONS" -a $PYTHON_IMPL = "CPython" ]; then + if [ -z "$C_EXTENSIONS" ] && [ "$PYTHON_IMPL" = "CPython" ]; then # Fail if the C extensions fail to build. # This always sets 0 for exit status, even if the build fails, due diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39062bbdf5..b20ad7ae55 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,3 +29,12 @@ repos: - id: isort files: \.py$ args: [--profile=black] + +# We use the Python version instead of the original version which seems to require Docker +# https://github.com/koalaman/shellcheck-precommit +- repo: https://github.com/shellcheck-py/shellcheck-py + rev: v0.8.0.1 + hooks: + - id: shellcheck + name: shellcheck + args: ["--severity=warning"] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 358e7502f3..e2956c122b 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -8,6 +8,7 @@ version: 2 # Build documentation in the doc/ directory with Sphinx sphinx: configuration: doc/conf.py + fail_on_warning: true # Set the version of Python and requirements required to build the docs. python: diff --git a/doc/conf.py b/doc/conf.py index 47debcf14c..3f74a11d60 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -21,6 +21,7 @@ "sphinx.ext.coverage", "sphinx.ext.todo", "sphinx.ext.intersphinx", + "sphinxcontrib.shellcheck", ] # Add any paths that contain templates here, relative to this directory. diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt index ce5d1abf36..455a47d217 100644 --- a/doc/docs-requirements.txt +++ b/doc/docs-requirements.txt @@ -1,3 +1,4 @@ Sphinx~=4.2 sphinx_rtd_theme~=0.5 readthedocs-sphinx-search~=0.1 +sphinxcontrib-shellcheck~=1.1 diff --git a/pymongo/collection.py b/pymongo/collection.py index df8db3f106..a61c905d29 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2947,7 +2947,7 @@ def find_one_and_replace( as keyword arguments (for example maxTimeMS can be used with recent server versions). - + .. versionchanged:: 4.1 Added ``let`` parameter. .. versionchanged:: 3.11 Added the ``hint`` option. From 80314255d7fb10769a083fcbee3d613a7263bd92 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Feb 2022 12:41:29 -0600 Subject: [PATCH 0585/2111] PYTHON-3092 Add Type Discovery Files (#863) --- MANIFEST.in | 3 +++ bson/py.typed | 2 ++ gridfs/py.typed | 2 ++ pymongo/py.typed | 2 ++ 4 files changed, 9 insertions(+) create mode 100644 bson/py.typed create mode 100644 gridfs/py.typed create mode 100644 pymongo/py.typed diff --git a/MANIFEST.in b/MANIFEST.in index d017d16ab0..726c631e89 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,3 +12,6 @@ include tools/README.rst recursive-include test *.pem recursive-include test *.py recursive-include bson *.h +include bson/py.typed +include gridfs/py.typed +include pymongo/py.typed diff --git a/bson/py.typed b/bson/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/bson/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/gridfs/py.typed b/gridfs/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/gridfs/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". diff --git a/pymongo/py.typed b/pymongo/py.typed new file mode 100644 index 0000000000..0f4057061a --- /dev/null +++ b/pymongo/py.typed @@ -0,0 +1,2 @@ +# PEP-561 Support File. +# "Package maintainers who wish to support type checking of their code MUST add a marker file named py.typed to their package supporting typing". From 405c11dc2ccc36edd0cdac69056a58c0dec66c43 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Feb 2022 12:43:02 -0600 Subject: [PATCH 0586/2111] PYTHON-3109 Test against latest rapid releases (#862) --- .evergreen/config.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d681815c12..d17054169f 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1258,6 +1258,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-rapid-standalone" + tags: ["rapid", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-rapid-replica_set" + tags: ["rapid", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-rapid-sharded_cluster" + tags: ["rapid", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "rapid" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-serverless" tags: ["serverless"] commands: @@ -2138,6 +2165,7 @@ buildvariants: auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: &all-server-versions + - ".rapid" - ".latest" - ".5.0" - ".4.4" From 341d489f38ad51620fab50bfc7c3f8c1227fefee Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 14 Feb 2022 11:26:14 -0800 Subject: [PATCH 0587/2111] PYTHON-3088 Update load balancer tests to support dedicated load balancer port (#866) --- .evergreen/config.yml | 6 ++++++ pymongo/pool.py | 7 ------- test/__init__.py | 4 ---- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d17054169f..2a65324300 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -292,6 +292,7 @@ functions: DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ + LOAD_BALANCER=${LOAD_BALANCER} \ bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update @@ -457,6 +458,7 @@ functions: fi if [ -n "${test_loadbalancer}" ]; then export TEST_LOADBALANCER=1 + export LOAD_BALANCER=1 export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" fi @@ -1712,8 +1714,12 @@ tasks: commands: - func: "bootstrap mongo-orchestration" vars: + VERSION: "latest" TOPOLOGY: "sharded_cluster" + LOAD_BALANCER: true - func: "run load-balancer" + vars: + LOAD_BALANCER: true - func: "run tests" - name: "test-fips-standalone" diff --git a/pymongo/pool.py b/pymongo/pool.py index d616408ef8..61945e2d5b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -248,9 +248,6 @@ def _set_keepalive_times(sock): # main thread, to avoid the deadlock. See PYTHON-607. "foo".encode("idna") -# Remove after PYTHON-2712 -_MOCK_SERVICE_ID = False - def _raise_connection_failure(address, error, msg_prefix=None): """Convert a socket.error to ConnectionFailure and raise it.""" @@ -633,10 +630,6 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): auth_ctx = None doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) - # PYTHON-2712 will remove this topologyVersion fallback logic. - if self.opts.load_balanced and _MOCK_SERVICE_ID: - process_id = doc.get("topologyVersion", {}).get("processId") - doc.setdefault("serviceId", process_id) if not self.opts.load_balanced: doc.pop("serviceId", None) hello = Hello(doc, awaitable=awaitable) diff --git a/test/__init__.py b/test/__init__.py index 32220cfff3..d75c011547 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -101,10 +101,6 @@ SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") if TEST_LOADBALANCER: - # Remove after PYTHON-2712 - from pymongo import pool - - pool._MOCK_SERVICE_ID = True res = parse_uri(SINGLE_MONGOS_LB_URI or "") host, port = res["nodelist"][0] db_user = res["username"] or db_user From 2db512f5d509e52ed0bc9cde55acd028dc81e022 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Feb 2022 16:14:36 -0600 Subject: [PATCH 0588/2111] PYTHON-3078 Remove Use of Unsupported NoReturn Type Class (#864) --- .evergreen/config.yml | 2 +- bson/__init__.py | 3 +-- bson/objectid.py | 4 ++-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2a65324300..8edc43df20 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1729,7 +1729,7 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" - PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" + PYTHON_BINARY: "/opt/mongodbtoolchain/v2/bin/python3" - func: "run tests" # }}} - name: "coverage-report" diff --git a/bson/__init__.py b/bson/__init__.py index 9431909f9c..d9124d1b32 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,7 +76,6 @@ List, Mapping, MutableMapping, - NoReturn, Sequence, Tuple, Type, @@ -167,7 +166,7 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: return view.tobytes(), view -def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: +def _raise_unknown_type(element_type: int, element_name: str) -> None: """Unknown type helper.""" raise InvalidBSON( "Detected unknown BSON type %r for fieldname '%s'. Are " diff --git a/bson/objectid.py b/bson/objectid.py index 9ad3ed60be..7413fd497b 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -24,7 +24,7 @@ import threading import time from random import SystemRandom -from typing import Any, NoReturn, Optional, Type, Union +from typing import Any, Optional, Type, Union from bson.errors import InvalidId from bson.tz_util import utc @@ -32,7 +32,7 @@ _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid: str) -> NoReturn: +def _raise_invalid_id(oid: str) -> None: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" " or a 24-character hex string" % oid From 9482019a537bdf3493122f5c9ec8167df0f15e02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Feb 2022 15:40:36 -0800 Subject: [PATCH 0589/2111] Add resync-syncs workarounds for incomplete spec work (#873) --- .evergreen/resync-specs.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 1d0742258b..bf20f23037 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -79,6 +79,7 @@ do ;; cmap|CMAP) cpjson connection-monitoring-and-pooling/tests cmap + rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 ;; command*monitoring) cpjson command-monitoring/tests command_monitoring @@ -127,6 +128,7 @@ do transactions|transactions-convenient-api) cpjson transactions/tests/ transactions cpjson transactions-convenient-api/tests/ transactions-convenient-api + rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 ;; unified) cpjson unified-test-format/tests/ unified-test-format/ From 09f8aa9928e9763e790a7fd6e30bccb701efea9d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Feb 2022 15:49:39 -0800 Subject: [PATCH 0590/2111] PYTHON-3072 Use _Address in more places (#871) --- pymongo/command_cursor.py | 8 ++++---- pymongo/mongo_client.py | 4 ++-- pymongo/uri_parser.py | 9 +++------ 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index d7a37766b2..2adc389baf 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -22,7 +22,7 @@ from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore from pymongo.response import PinnedResponse -from pymongo.typings import _DocumentType +from pymongo.typings import _Address, _DocumentType if TYPE_CHECKING: from pymongo.client_session import ClientSession @@ -38,7 +38,7 @@ def __init__( self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], - address: Optional[Tuple[str, Optional[int]]], + address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, @@ -254,7 +254,7 @@ def cursor_id(self) -> int: return self.__id @property - def address(self) -> Optional[Tuple[str, Optional[int]]]: + def address(self) -> Optional[_Address]: """The (host, port) of the server used, or None. .. versionadded:: 3.0 @@ -309,7 +309,7 @@ def __init__( self, collection: "Collection[_DocumentType]", cursor_info: Mapping[str, Any], - address: Optional[Tuple[str, Optional[int]]], + address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6b0d55601f..e9fa932ff1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -86,7 +86,7 @@ from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _Address, _CollationIn, _DocumentType, _Pipeline from pymongo.uri_parser import ( _check_options, _handle_option_deprecations, @@ -1061,7 +1061,7 @@ def is_mongos(self) -> bool: return self._server_property("server_type") == SERVER_TYPE.Mongos @property - def nodes(self) -> FrozenSet[Tuple[str, Optional[int]]]: + def nodes(self) -> FrozenSet[_Address]: """Set of all currently connected servers. .. warning:: When connected to a replica set the value of :attr:`nodes` diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 76c6e4d513..3417c4954e 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -40,6 +40,7 @@ ) from pymongo.errors import ConfigurationError, InvalidURI from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver +from pymongo.typings import _Address SCHEME = "mongodb://" SCHEME_LEN = len(SCHEME) @@ -114,9 +115,7 @@ def parse_ipv6_literal_host( return entity[1:i], entity[i + 2 :] -def parse_host( - entity: str, default_port: Optional[int] = DEFAULT_PORT -) -> Tuple[str, Optional[int]]: +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: """Validates a host string Returns a 2-tuple of host followed by port where port is default_port @@ -363,9 +362,7 @@ def split_options( return options -def split_hosts( - hosts: str, default_port: Optional[int] = DEFAULT_PORT -) -> List[Tuple[str, Optional[int]]]: +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[_Address]: """Takes a string of the form host1[:port],host2[:port]... and splits it into (host, port) tuples. If [:port] isn't present the default_port is used. From 7a8f6b344240a90c4922541c20b22571e42b4fb6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Feb 2022 17:11:12 -0800 Subject: [PATCH 0591/2111] PYTHON-2147 Use verified peer cert chain in OCSP when available (#877) --- pymongo/ocsp_support.py | 10 ++++++++-- pymongo/pyopenssl_context.py | 4 +++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 369055ea8d..56d18a29bf 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -275,12 +275,18 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("No peer cert?") return 0 cert = cert.to_cryptography() - chain = conn.get_peer_cert_chain() + # Use the verified chain when available (pyopenssl>=20.0). + if hasattr(conn, "get_verified_chain"): + chain = conn.get_verified_chain() + trusted_ca_certs = None + else: + chain = conn.get_peer_cert_chain() + trusted_ca_certs = user_data.trusted_ca_certs if not chain: _LOGGER.debug("No peer cert chain?") return 0 chain = [cer.to_cryptography() for cer in chain] - issuer = _get_issuer_cert(cert, chain, user_data.trusted_ca_certs) + issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 ext = _get_extension(cert, _TLSFeature) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index d42cafb084..9e4c5cab40 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -276,7 +276,9 @@ def load_verify_locations(self, cafile=None, capath=None): ssl.CERT_NONE. """ self._ctx.load_verify_locations(cafile, capath) - self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) + # Manually load the CA certs when get_verified_chain is not available (pyopenssl<20). + if not hasattr(_SSL.Connection, "get_verified_chain"): + self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) def _load_certifi(self): """Attempt to load CA certs from certifi.""" From 1a90e477cf0024c47d24e0c2cfe38442ed9ae222 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Feb 2022 19:29:21 -0600 Subject: [PATCH 0592/2111] PYTHON-3131 Test Failure - test_mypy on macos + auth (#875) --- test/test_mypy.py | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/test/test_mypy.py b/test/test_mypy.py index 0f1498c64b..5b9746f723 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -25,10 +25,10 @@ except ImportError: api = None +from test import IntegrationTest + from bson.son import SON from pymongo.collection import Collection -from pymongo.errors import ServerSelectionTimeoutError -from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -53,22 +53,13 @@ def test_mypy_failures(self) -> None: self.ensure_mypy_fails(filename) -class TestPymongo(unittest.TestCase): - client: MongoClient +class TestPymongo(IntegrationTest): coll: Collection @classmethod - def setUpClass(cls) -> None: - cls.client = MongoClient(serverSelectionTimeoutMS=250, directConnection=False) + def setUpClass(cls): + super().setUpClass() cls.coll = cls.client.test.test - try: - cls.client.admin.command("ping") - except ServerSelectionTimeoutError as exc: - raise unittest.SkipTest(f"Could not connect to MongoDB: {exc}") - - @classmethod - def tearDownClass(cls) -> None: - cls.client.close() def test_insert_find(self) -> None: doc = {"my": "doc"} From a0fe7c03af08adde0c893071e1664b43570b9841 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Feb 2022 06:44:08 -0600 Subject: [PATCH 0593/2111] PYTHON-3120 Set up flake8 linting (#868) --- .flake8 | 30 +++++++ .github/workflows/test-python.yml | 4 + .pre-commit-config.yaml | 25 +++++- bson/__init__.py | 20 ++--- bson/binary.py | 4 +- bson/codec_options.py | 6 +- bson/dbref.py | 4 +- bson/json_util.py | 12 ++- bson/objectid.py | 2 +- bson/raw_bson.py | 3 +- bson/tz_util.py | 2 +- doc/conf.py | 2 +- green_framework_test.py | 4 +- gridfs/__init__.py | 7 +- gridfs/grid_file.py | 17 ++-- pymongo/__init__.py | 19 ++-- pymongo/aggregation.py | 2 +- pymongo/auth.py | 16 ++-- pymongo/bulk.py | 5 +- pymongo/change_stream.py | 4 +- pymongo/client_session.py | 6 +- pymongo/collation.py | 2 +- pymongo/collection.py | 13 ++- pymongo/command_cursor.py | 2 +- pymongo/common.py | 39 ++++---- pymongo/compression_support.py | 2 +- pymongo/cursor.py | 13 ++- pymongo/daemon.py | 4 +- pymongo/database.py | 6 +- pymongo/encryption.py | 6 +- pymongo/encryption_options.py | 3 +- pymongo/errors.py | 2 +- pymongo/event_loggers.py | 88 ++++++++----------- pymongo/helpers.py | 10 +-- pymongo/message.py | 18 ++-- pymongo/mongo_client.py | 9 +- pymongo/monitoring.py | 6 +- pymongo/network.py | 8 +- pymongo/periodic_executor.py | 2 +- pymongo/pool.py | 18 ++-- pymongo/pyopenssl_context.py | 4 +- pymongo/read_preferences.py | 7 +- pymongo/results.py | 2 +- pymongo/server.py | 1 - pymongo/server_description.py | 2 +- pymongo/socket_checker.py | 2 +- pymongo/ssl_context.py | 4 +- pymongo/ssl_support.py | 4 +- pymongo/typings.py | 2 - pymongo/uri_parser.py | 45 ++++------ pymongo/write_concern.py | 2 +- setup.py | 15 ++-- test/__init__.py | 13 ++- test/atlas/test_connection.py | 4 +- test/mockupdb/test_cluster_time.py | 2 +- test/mockupdb/test_handshake.py | 4 +- test/mockupdb/test_list_indexes.py | 2 +- test/mockupdb/test_mixed_version_sharded.py | 2 +- .../mockupdb/test_mongos_command_read_mode.py | 3 +- .../test_network_disconnect_primary.py | 1 - test/mockupdb/test_slave_okay_single.py | 12 +-- test/mod_wsgi_test/mod_wsgi_test.wsgi | 4 +- test/ocsp/test_ocsp.py | 2 +- test/test_auth.py | 19 ++-- test/test_binary.py | 2 - test/test_bson.py | 64 ++++++-------- test/test_bson_corpus.py | 4 +- test/test_client.py | 25 ++---- test/test_client_context.py | 2 +- test/test_cmap.py | 2 - test/test_collection.py | 8 +- test/test_command_monitoring_legacy.py | 7 +- test/test_comment.py | 15 ++-- test/test_crud_v1.py | 2 +- test/test_cursor.py | 11 +-- test/test_custom_types.py | 4 +- test/test_data_lake.py | 4 +- test/test_database.py | 7 +- test/test_dbref.py | 2 +- test/test_discovery_and_monitoring.py | 2 +- test/test_dns.py | 2 +- test/test_encryption.py | 28 +++--- test/test_errors.py | 4 +- test/test_examples.py | 16 ++-- test/test_grid_file.py | 2 +- test/test_gridfs_bucket.py | 2 +- test/test_heartbeat_monitoring.py | 1 - test/test_json_util.py | 18 ++-- test/test_load_balancer.py | 2 +- test/test_max_staleness.py | 16 ++-- test/test_monitoring.py | 17 ++-- test/test_mypy.py | 1 - test/test_pooling.py | 2 +- test/test_saslprep.py | 2 +- test/test_sdam_monitoring_spec.py | 10 +-- test/test_server_description.py | 2 +- test/test_session.py | 2 +- test/test_son.py | 4 +- test/test_srv_polling.py | 4 +- test/test_ssl.py | 20 ++--- test/test_threads.py | 2 +- test/test_uri_parser.py | 46 ++++------ test/unified_format.py | 10 +-- tools/clean.py | 8 +- tools/fail_if_no_c.py | 4 +- 105 files changed, 454 insertions(+), 535 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..e5bc58921a --- /dev/null +++ b/.flake8 @@ -0,0 +1,30 @@ +[flake8] +max-line-length = 100 +enable-extensions = G +extend-ignore = + G200, G202, + # black adds spaces around ':' + E203, + # E501 line too long (let black handle line length) + E501 + # B305 `.next()` is not a thing on Python 3 + B305 +per-file-ignores = + # E402 module level import not at top of file + pymongo/__init__.py: E402 + + # G004 Logging statement uses f-string + pymongo/event_loggers.py: G004 + + # E402 module level import not at top of file + # B011 Do not call assert False since python -O removes these calls + # F405 'Foo' may be undefined, or defined from star imports + # E741 ambiguous variable name + # B007 Loop control variable 'foo' not used within the loop body + # F403 'from foo import *' used; unable to detect undefined names + # B001 Do not use bare `except:` + # E722 do not use bare 'except' + # E731 do not assign a lambda expression, use a def + # F811 redefinition of unused 'foo' from line XXX + # F841 local variable 'foo' is assigned to but never used + test/*: E402, B011, F405, E741, B007, F403, B001, E722, E731, F811, F841 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 4b5f762786..046915b04a 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -31,6 +31,8 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: 'setup.py' - name: Start MongoDB uses: supercharge/mongodb-github-action@1.7.0 with: @@ -53,6 +55,8 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: 'setup.py' - name: Install dependencies run: | python -m pip install -U pip mypy diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b20ad7ae55..5c1e92f5b7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + rev: v4.1.0 hooks: - id: check-added-large-files - id: check-case-conflict @@ -24,17 +24,36 @@ repos: args: [--line-length=100] - repo: https://github.com/PyCQA/isort - rev: 5.7.0 + rev: 5.10.1 hooks: - id: isort files: \.py$ args: [--profile=black] +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + additional_dependencies: [ + 'flake8-bugbear==20.1.4', + 'flake8-logging-format==0.6.0', + 'flake8-implicit-str-concat==0.2.0', + ] + # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.8.0.1 + rev: v0.8.0.4 hooks: - id: shellcheck name: shellcheck args: ["--severity=warning"] + +- repo: https://github.com/sirosen/check-jsonschema + rev: 0.11.0 + hooks: + - id: check-jsonschema + name: "Check GitHub Workflows" + files: ^\.github/workflows/ + types: [yaml] + args: ["--schemafile", "https://json.schemastore.org/github-workflow"] diff --git a/bson/__init__.py b/bson/__init__.py index d9124d1b32..a287db1801 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -84,7 +84,7 @@ cast, ) -from bson.binary import ( +from bson.binary import ( # noqa: F401 ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, @@ -513,7 +513,7 @@ def _bson_to_dict(data: Any, opts: Any) -> Any: if _USE_C: - _bson_to_dict = _cbson._bson_to_dict + _bson_to_dict = _cbson._bson_to_dict # noqa: F811 _PACK_FLOAT = struct.Struct(" bytes: """Make a 'C' string, checking for embedded NUL characters.""" if isinstance(string, bytes): if b"\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) else: if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -562,7 +562,7 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) + raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) else: return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -571,7 +571,7 @@ def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" # Keys can only be text in python 3. if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not " "contain a NUL character") + raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" @@ -846,7 +846,7 @@ def _name_value_to_bson( def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, " "key was %r" % (key,)) + raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % (key,)) @@ -876,7 +876,7 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) if _USE_C: - _dict_to_bson = _cbson._dict_to_bson + _dict_to_bson = _cbson._dict_to_bson # noqa: F811 def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: @@ -1032,7 +1032,7 @@ def decode_all( if _USE_C: - decode_all = _cbson.decode_all + decode_all = _cbson.decode_all # noqa: F811 def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: diff --git a/bson/binary.py b/bson/binary.py index e20bf87af3..93c43ee40c 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -260,7 +260,7 @@ def from_uuid( if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( - "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if uuid_representation == UuidRepresentation.UNSPECIFIED: @@ -310,7 +310,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( - "uuid_representation must be a value from " "bson.binary.UuidRepresentation" + "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if uuid_representation == UuidRepresentation.UNSPECIFIED: diff --git a/bson/codec_options.py b/bson/codec_options.py index b43a0275d8..8e5f97df30 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -23,12 +23,10 @@ Any, Callable, Dict, - Generic, Iterable, MutableMapping, Optional, Type, - TypeVar, Union, cast, ) @@ -312,10 +310,10 @@ def __new__( raise TypeError("tz_aware must be True or False") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( - "uuid_representation must be a value " "from bson.binary.UuidRepresentation" + "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore - raise ValueError("unicode_decode_error_handler must be a string " "or None") + raise ValueError("unicode_decode_error_handler must be a string or None") if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): raise TypeError("tzinfo must be an instance of datetime.tzinfo") diff --git a/bson/dbref.py b/bson/dbref.py index 773c95f59d..7849435f23 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -35,7 +35,7 @@ def __init__( collection: str, id: Any, database: Optional[str] = None, - _extra: Mapping[str, Any] = {}, + _extra: Optional[Mapping[str, Any]] = None, **kwargs: Any ) -> None: """Initialize a new :class:`DBRef`. @@ -63,7 +63,7 @@ def __init__( self.__collection = collection self.__id = id self.__database = database - kwargs.update(_extra) + kwargs.update(_extra or {}) self.__kwargs = kwargs @property diff --git a/bson/json_util.py b/bson/json_util.py index 3cdf701f70..99dbc62609 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -283,7 +283,7 @@ def __new__( self.json_mode = json_mode if self.json_mode == JSONMode.RELAXED: if strict_number_long: - raise ValueError("Cannot specify strict_number_long=True with" " JSONMode.RELAXED") + raise ValueError("Cannot specify strict_number_long=True with JSONMode.RELAXED") if datetime_representation not in (None, DatetimeRepresentation.ISO8601): raise ValueError( "datetime_representation must be DatetimeRepresentation." @@ -296,7 +296,7 @@ def __new__( self.strict_uuid = True elif self.json_mode == JSONMode.CANONICAL: if strict_number_long not in (None, True): - raise ValueError("Cannot specify strict_number_long=False with" " JSONMode.RELAXED") + raise ValueError("Cannot specify strict_number_long=False with JSONMode.RELAXED") if datetime_representation not in (None, DatetimeRepresentation.NUMBERLONG): raise ValueError( "datetime_representation must be DatetimeRepresentation." @@ -581,11 +581,9 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary if not isinstance(b64, str): raise TypeError("$binary base64 must be a string: %s" % (doc,)) if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError("$binary subType must be a string at most 2 " "characters: %s" % (doc,)) + raise TypeError("$binary subType must be a string at most 2 characters: %s" % (doc,)) if len(binary) != 2: - raise TypeError( - '$binary must include only "base64" and "subType" ' "components: %s" % (doc,) - ) + raise TypeError('$binary must include only "base64" and "subType" components: %s' % (doc,)) data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) @@ -686,7 +684,7 @@ def _parse_canonical_regex(doc: Any) -> Regex: opts = regex["options"] if not isinstance(opts, str): raise TypeError( - "Bad $regularExpression options, options must be " "string, was type %s" % (type(opts)) + "Bad $regularExpression options, options must be string, was type %s" % (type(opts)) ) return Regex(regex["pattern"], opts) diff --git a/bson/objectid.py b/bson/objectid.py index 7413fd497b..24d25d0377 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -204,7 +204,7 @@ def __validate(self, oid: Any) -> None: _raise_invalid_id(oid) else: raise TypeError( - "id must be an instance of (bytes, str, ObjectId), " "not %s" % (type(oid),) + "id must be an instance of (bytes, str, ObjectId), not %s" % (type(oid),) ) @property diff --git a/bson/raw_bson.py b/bson/raw_bson.py index c102b367a2..ca7207f0a2 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -51,8 +51,7 @@ overhead of decoding or encoding BSON. """ -from collections.abc import Mapping as _Mapping -from typing import Any, ItemsView, Iterator, Mapping, Optional, cast +from typing import Any, ItemsView, Iterator, Mapping, Optional from bson import _get_object_size, _raw_to_dict from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER diff --git a/bson/tz_util.py b/bson/tz_util.py index 43ae52ccff..8106c77b40 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -15,7 +15,7 @@ """Timezone related utilities for BSON.""" from datetime import datetime, timedelta, tzinfo -from typing import Any, Optional, Tuple, Union +from typing import Optional, Tuple, Union ZERO: timedelta = timedelta(0) diff --git a/doc/conf.py b/doc/conf.py index 3f74a11d60..c2f97dabfe 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -9,7 +9,7 @@ sys.path[0:0] = [os.path.abspath("..")] -import pymongo +import pymongo # noqa # -- General configuration ----------------------------------------------------- diff --git a/green_framework_test.py b/green_framework_test.py index 610845a9f6..d638d9b014 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -59,7 +59,7 @@ def run(framework_name, *args): # Run the tests. sys.argv[:] = ["setup.py", "test"] + list(args) - import setup + import setup # noqa def main(): @@ -87,7 +87,7 @@ def main(): list_frameworks() sys.exit() else: - assert False, "unhandled option" + raise AssertionError("unhandled option") if not args: print(usage) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 22b28af1a7..73425a9e53 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -35,11 +35,10 @@ ) from pymongo import ASCENDING, DESCENDING from pymongo.client_session import ClientSession -from pymongo.collation import Collation from pymongo.collection import Collection -from pymongo.common import UNAUTHORIZED_CODES, validate_string +from pymongo.common import validate_string from pymongo.database import Database -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.errors import ConfigurationError from pymongo.read_preferences import _ServerMode from pymongo.write_concern import WriteConcern @@ -83,7 +82,7 @@ def __init__(self, database: Database, collection: str = "fs"): database = _clear_entity_type_registry(database) if not database.write_concern.acknowledged: - raise ConfigurationError("database must use " "acknowledged write_concern") + raise ConfigurationError("database must use acknowledged write_concern") self.__collection = database[collection] self.__files = self.__collection.files diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 93a97158ae..b290fc68b0 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -17,7 +17,7 @@ import io import math import os -from typing import Any, Iterable, List, Mapping, Optional, cast +from typing import Any, Iterable, List, Mapping, Optional from bson.binary import Binary from bson.int64 import Int64 @@ -172,10 +172,10 @@ def __init__( :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") if not root_collection.write_concern.acknowledged: - raise ConfigurationError("root_collection must use " "acknowledged write_concern") + raise ConfigurationError("root_collection must use acknowledged write_concern") _disallow_transactions(session) # Handle alternative naming @@ -240,7 +240,7 @@ def closed(self) -> bool: "uploadDate", "Date that this file was uploaded.", closed_only=True ) md5: Optional[str] = _grid_in_property( - "md5", "MD5 of the contents of this file " "if an md5 sum was created.", closed_only=True + "md5", "MD5 of the contents of this file if an md5 sum was created.", closed_only=True ) _buffer: io.BytesIO @@ -356,7 +356,7 @@ def write(self, data: Any) -> None: try: data = data.encode(self.encoding) except AttributeError: - raise TypeError("must specify an encoding for file in " "order to write str") + raise TypeError("must specify an encoding for file in order to write str") read = io.BytesIO(data).read if self._buffer.tell() > 0: @@ -365,7 +365,7 @@ def write(self, data: Any) -> None: if space: try: to_write = read(space) - except: + except BaseException: self.abort() raise self._buffer.write(to_write) @@ -447,7 +447,7 @@ def __init__( from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an " "instance of Collection") + raise TypeError("root_collection must be an instance of Collection") _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) @@ -477,7 +477,7 @@ def __init__( "metadata", "Metadata attached to this file." ) md5: Optional[str] = _grid_out_property( - "md5", "MD5 of the contents of this file " "if an md5 sum was created." + "md5", "MD5 of the contents of this file if an md5 sum was created." ) _file: Any @@ -886,7 +886,6 @@ def __init__( def next(self) -> GridOut: """Get next GridOut object from cursor.""" _disallow_transactions(self.session) - # Work around "super is not iterable" issue in Python 3.x next_file = super(GridOutCursor, self).next() return GridOut(self.__root_collection, file_document=next_file, session=self.session) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index f8baa91971..9581068036 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -69,11 +69,14 @@ def get_version_string() -> str: """Current version of PyMongo.""" -from pymongo.collection import ReturnDocument -from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION -from pymongo.cursor import CursorType -from pymongo.mongo_client import MongoClient -from pymongo.operations import ( +from pymongo.collection import ReturnDocument # noqa: F401 +from pymongo.common import ( # noqa: F401 + MAX_SUPPORTED_WIRE_VERSION, + MIN_SUPPORTED_WIRE_VERSION, +) +from pymongo.cursor import CursorType # noqa: F401 +from pymongo.mongo_client import MongoClient # noqa: F401 +from pymongo.operations import ( # noqa: F401 DeleteMany, DeleteOne, IndexModel, @@ -82,14 +85,14 @@ def get_version_string() -> str: UpdateMany, UpdateOne, ) -from pymongo.read_preferences import ReadPreference -from pymongo.write_concern import WriteConcern +from pymongo.read_preferences import ReadPreference # noqa: F401 +from pymongo.write_concern import WriteConcern # noqa: F401 def has_c() -> bool: """Is the C extension installed?""" try: - from pymongo import _cmessage # type: ignore[attr-defined] + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 return True except ImportError: diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 51be0dfa81..e190fefc56 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -43,7 +43,7 @@ def __init__( ): if "explain" in options: raise ConfigurationError( - "The explain option is not supported. " "Use Database.command instead." + "The explain option is not supported. Use Database.command instead." ) self._target = target diff --git a/pymongo/auth.py b/pymongo/auth.py index 0a4e7e7324..3d259335b0 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -121,7 +121,7 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): if passwd is not None: raise ConfigurationError("Passwords are not supported by MONGODB-X509") if source is not None and source != "$external": - raise ValueError("authentication source must be " "$external or None for MONGODB-X509") + raise ValueError("authentication source must be $external or None for MONGODB-X509") # Source is always $external, user can be None. return MongoCredential(mech, "$external", user, None, None, None) elif mech == "MONGODB-AWS": @@ -129,7 +129,7 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): raise ConfigurationError("username without a password is not supported by MONGODB-AWS") if source is not None and source != "$external": raise ConfigurationError( - "authentication source must be " "$external or None for MONGODB-AWS" + "authentication source must be $external or None for MONGODB-AWS" ) properties = extra.get("authmechanismproperties", {}) @@ -302,7 +302,7 @@ def _authenticate_gssapi(credentials, sock_info): """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: raise ConfigurationError( - 'The "kerberos" module must be ' "installed to use GSSAPI authentication." + 'The "kerberos" module must be installed to use GSSAPI authentication.' ) try: @@ -351,7 +351,7 @@ def _authenticate_gssapi(credentials, sock_info): # 0 == continue, 1 == complete, -1 == error # Only authGSSClientStep can return 0. if kerberos.authGSSClientStep(ctx, "") != 0: - raise OperationFailure("Unknown kerberos " "failure in step function.") + raise OperationFailure("Unknown kerberos failure in step function.") # Start a SASL conversation with mongod/s # Note: pykerberos deals with base64 encoded byte strings. @@ -372,7 +372,7 @@ def _authenticate_gssapi(credentials, sock_info): for _ in range(10): result = kerberos.authGSSClientStep(ctx, str(response["payload"])) if result == -1: - raise OperationFailure("Unknown kerberos " "failure in step function.") + raise OperationFailure("Unknown kerberos failure in step function.") payload = kerberos.authGSSClientResponse(ctx) or "" @@ -388,15 +388,15 @@ def _authenticate_gssapi(credentials, sock_info): if result == kerberos.AUTH_GSS_COMPLETE: break else: - raise OperationFailure("Kerberos " "authentication failed to complete.") + raise OperationFailure("Kerberos authentication failed to complete.") # Once the security context is established actually authenticate. # See RFC 4752, Section 3.1, last two paragraphs. if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: - raise OperationFailure("Unknown kerberos " "failure during GSS_Unwrap step.") + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: - raise OperationFailure("Unknown kerberos " "failure during GSS_Wrap step.") + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") payload = kerberos.authGSSClientResponse(ctx) cmd = SON( diff --git a/pymongo/bulk.py b/pymongo/bulk.py index fae55a5c10..c736bd7d6f 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -26,7 +26,6 @@ from pymongo.collation import validate_collation_or_none from pymongo.common import ( validate_is_document_type, - validate_is_mapping, validate_ok_for_replace, validate_ok_for_update, ) @@ -476,7 +475,7 @@ def execute_no_results(self, sock_info, generator, write_concern): # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: raise OperationFailure( - "Cannot set bypass_document_validation with" " unacknowledged write concern" + "Cannot set bypass_document_validation with unacknowledged write concern" ) if self.ordered: @@ -488,7 +487,7 @@ def execute(self, write_concern, session): if not self.ops: raise InvalidOperation("No operations to execute") if self.executed: - raise InvalidOperation("Bulk operations can " "only be executed once.") + raise InvalidOperation("Bulk operations can only be executed once.") self.executed = True write_concern = write_concern or self.collection.write_concern session = _validate_session_write_concern(session, write_concern) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 50f6f72b73..d054046bda 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -15,7 +15,7 @@ """Watch changes on a collection, a database, or the entire cluster.""" import copy -from typing import TYPE_CHECKING, Any, Dict, Generic, Iterator, Mapping, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument @@ -363,7 +363,7 @@ def try_next(self) -> Optional[_DocumentType]: except KeyError: self.close() raise InvalidOperation( - "Cannot provide resume functionality when the resume " "token is missing." + "Cannot provide resume functionality when the resume token is missing." ) # If this is the last change document from the current batch, cache the diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 44381c0241..4cf41b2c70 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -192,7 +192,7 @@ def __init__( ) -> None: if snapshot: if causal_consistency: - raise ConfigurationError("snapshot reads do not support " "causal_consistency=True") + raise ConfigurationError("snapshot reads do not support causal_consistency=True") causal_consistency = False elif causal_consistency is None: causal_consistency = True @@ -717,7 +717,7 @@ def start_transaction( self._check_ended() if self.options.snapshot: - raise InvalidOperation("Transactions are not supported in " "snapshot sessions") + raise InvalidOperation("Transactions are not supported in snapshot sessions") if self.in_transaction: raise InvalidOperation("Transaction already in progress") @@ -885,7 +885,7 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance " "of bson.timestamp.Timestamp") + raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) def _process_response(self, reply): diff --git a/pymongo/collation.py b/pymongo/collation.py index aef480b932..5bc73c07c8 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -221,4 +221,4 @@ def validate_collation_or_none( return value.document if isinstance(value, dict): return value - raise TypeError("collation must be a dict, an instance of collation.Collation, " "or None.") + raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") diff --git a/pymongo/collection.py b/pymongo/collection.py index a61c905d29..8de1fbeeaa 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -29,7 +29,6 @@ Union, ) -from bson.code import Code from bson.codec_options import CodecOptions from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument @@ -204,11 +203,11 @@ def __init__( if not name or ".." in name: raise InvalidName("collection names cannot be empty") if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): - raise InvalidName("collection names must not " "contain '$': %r" % name) + raise InvalidName("collection names must not contain '$': %r" % name) if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start " "or end with '.': %r" % name) + raise InvalidName("collection names must not start or end with '.': %r" % name) if "\x00" in name: - raise InvalidName("collection names must not contain the " "null character") + raise InvalidName("collection names must not contain the null character") collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__database: Database[_DocumentType] = database @@ -1873,7 +1872,7 @@ def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of " "pymongo.operations.IndexModel" % (index,) + "%r is not an instance of pymongo.operations.IndexModel" % (index,) ) document = index.document names.append(document["name"]) @@ -2725,7 +2724,7 @@ def __find_and_modify( common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): raise ValueError( - "return_document must be " "ReturnDocument.BEFORE or ReturnDocument.AFTER" + "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) @@ -2751,7 +2750,7 @@ def _find_and_modify(session, sock_info, retryable_write): if array_filters is not None: if not acknowledged: raise ConfigurationError( - "arrayFilters is unsupported for unacknowledged " "writes." + "arrayFilters is unsupported for unacknowledged writes." ) cmd["arrayFilters"] = list(array_filters) if hint is not None: diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 2adc389baf..d10e23f957 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,7 +15,7 @@ """CommandCursor class to iterate over command results.""" from collections import deque -from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional, Tuple +from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional from bson import _convert_raw_document_lists_to_streams from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager diff --git a/pymongo/common.py b/pymongo/common.py index 769b277cf3..5255468b5a 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -30,7 +30,6 @@ Tuple, Type, Union, - cast, ) from urllib.parse import unquote_plus @@ -180,7 +179,7 @@ def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ("true", "false"): - raise ValueError("The value of %s must be " "'true' or 'false'" % (option,)) + raise ValueError("The value of %s must be 'true' or 'false'" % (option,)) return value == "true" return validate_boolean(option, value) @@ -193,7 +192,7 @@ def validate_integer(option: str, value: Any) -> int: try: return int(value) except ValueError: - raise ValueError("The value of %s must be " "an integer" % (option,)) + raise ValueError("The value of %s must be an integer" % (option,)) raise TypeError("Wrong type for %s, value must be an integer" % (option,)) @@ -201,7 +200,7 @@ def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be " "a positive integer" % (option,)) + raise ValueError("The value of %s must be a positive integer" % (option,)) return val @@ -209,7 +208,7 @@ def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be " "a non negative integer" % (option,)) + raise ValueError("The value of %s must be a non negative integer" % (option,)) return val @@ -242,7 +241,7 @@ def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of " "str" % (option,)) + raise TypeError("Wrong type for %s, value must be an instance of str" % (option,)) def validate_string_or_none(option: str, value: Any) -> Optional[str]: @@ -261,7 +260,7 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an " "integer or a string" % (option,)) + raise TypeError("Wrong type for %s, value must be an integer or a string" % (option,)) def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: @@ -275,7 +274,7 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in return value return validate_non_negative_integer(option, val) raise TypeError( - "Wrong type for %s, value must be an " "non negative integer or a string" % (option,) + "Wrong type for %s, value must be an non negative integer or a string" % (option,) ) @@ -294,7 +293,7 @@ def validate_positive_float(option: str, value: Any) -> float: # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and " "less than one billion" % (option,)) + raise ValueError("%s must be greater than 0 and less than one billion" % (option,)) return value @@ -402,7 +401,7 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) + raise ValueError("%r not a valid value for %s" % (tag_set, name)) return tag_sets @@ -735,7 +734,7 @@ def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError("Unknown " "authentication option: %s" % (option,)) + raise ConfigurationError("Unknown authentication option: %s" % (option,)) return option, value @@ -762,12 +761,12 @@ def get_validated_options( validated_options: MutableMapping[str, Any] if isinstance(options, _CaseInsensitiveDictionary): validated_options = _CaseInsensitiveDictionary() - get_normed_key = lambda x: x - get_setter_key = lambda x: options.cased_key(x) + get_normed_key = lambda x: x # noqa: E731 + get_setter_key = lambda x: options.cased_key(x) # noqa: E731 else: validated_options = {} - get_normed_key = lambda x: x.lower() - get_setter_key = lambda x: x + get_normed_key = lambda x: x.lower() # noqa: E731 + get_setter_key = lambda x: x # noqa: E731 for opt, value in options.items(): normed_key = get_normed_key(opt) @@ -804,9 +803,7 @@ def __init__( ) -> None: if not isinstance(codec_options, CodecOptions): - raise TypeError( - "codec_options must be an instance of " "bson.codec_options.CodecOptions" - ) + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self.__codec_options = codec_options if not isinstance(read_preference, _ServerMode): @@ -819,14 +816,12 @@ def __init__( if not isinstance(write_concern, WriteConcern): raise TypeError( - "write_concern must be an instance of " "pymongo.write_concern.WriteConcern" + "write_concern must be an instance of pymongo.write_concern.WriteConcern" ) self.__write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError( - "read_concern must be an instance of " "pymongo.read_concern.ReadConcern" - ) + raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern @property diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 72cc232867..ed7021494f 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -82,7 +82,7 @@ def validate_compressors(dummy, value): def validate_zlib_compression_level(option, value): try: level = int(value) - except: + except Exception: raise TypeError("%s must be an integer, not %r." % (option, value)) if level < -1 or level > 9: raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index be4b998d31..02f1905df3 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -25,7 +25,6 @@ Iterable, List, Mapping, - MutableMapping, Optional, Sequence, Tuple, @@ -277,7 +276,7 @@ def __init__( # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are " "not supported by mongos") + raise InvalidOperation("Exhaust cursors are not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True @@ -509,7 +508,7 @@ def add_option(self, mask: int) -> "Cursor[_DocumentType]": if self.__limit: raise InvalidOperation("Can't use limit and exhaust together.") if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are " "not supported by mongos") + raise InvalidOperation("Exhaust cursors are not supported by mongos") self.__exhaust = True self.__query_flags |= mask @@ -730,14 +729,14 @@ def __getitem__(self, index): skip = 0 if index.start is not None: if index.start < 0: - raise IndexError("Cursor instances do not support " "negative indices") + raise IndexError("Cursor instances do not support negative indices") skip = index.start if index.stop is not None: limit = index.stop - skip if limit < 0: raise IndexError( - "stop index must be greater than start " "index for slice %r" % index + "stop index must be greater than start index for slice %r" % index ) if limit == 0: self.__empty = True @@ -750,7 +749,7 @@ def __getitem__(self, index): if isinstance(index, int): if index < 0: - raise IndexError("Cursor instances do not support negative " "indices") + raise IndexError("Cursor instances do not support negative indices") clone = self.clone() clone.skip(index + self.__skip) clone.limit(-1) # use a hard limit @@ -758,7 +757,7 @@ def __getitem__(self, index): for doc in clone: return doc raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor " "instances" % index) + raise TypeError("index %r cannot be applied to Cursor instances" % index) def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": """**DEPRECATED** - Limit the number of documents to scan when diff --git a/pymongo/daemon.py b/pymongo/daemon.py index 53141751ac..4fdf147a59 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -70,7 +70,7 @@ def _spawn_daemon(args): _silence_resource_warning(popen) except FileNotFoundError as exc: warnings.warn( - f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", RuntimeWarning, stacklevel=2, ) @@ -96,7 +96,7 @@ def _spawn(args): ) except FileNotFoundError as exc: warnings.warn( - f"Failed to start {args[0]}: is it on your $PATH?\n" f"Original exception: {exc}", + f"Failed to start {args[0]}: is it on your $PATH?\nOriginal exception: {exc}", RuntimeWarning, stacklevel=2, ) diff --git a/pymongo/database.py b/pymongo/database.py index e6633ed230..f92dbc8aed 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -47,7 +47,7 @@ def _check_name(name): for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: if invalid_char in name: - raise InvalidName("database names cannot contain the " "character %r" % invalid_char) + raise InvalidName("database names cannot contain the character %r" % invalid_char) if TYPE_CHECKING: @@ -966,7 +966,7 @@ def validate_collection( name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or " "Collection") + raise TypeError("name_or_collection must be an instance of str or Collection") cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) if comment is not None: cmd["comment"] = comment @@ -988,7 +988,7 @@ def validate_collection( if "result" in res: info = res["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: " "%s" % (name, info)) + raise CollectionInvalid("%s invalid: %s" % (name, info)) elif not res.get("valid", False): valid = False break diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4a6653f959..9616ac89cd 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -21,7 +21,7 @@ try: from pymongocrypt.auto_encrypter import AutoEncrypter - from pymongocrypt.errors import MongoCryptError + from pymongocrypt.errors import MongoCryptError # noqa: F401 from pymongocrypt.explicit_encrypter import ExplicitEncrypter from pymongocrypt.mongocrypt import MongoCryptOptions from pymongocrypt.state_machine import MongoCryptCallback @@ -440,9 +440,7 @@ def __init__( ) if not isinstance(codec_options, CodecOptions): - raise TypeError( - "codec_options must be an instance of " "bson.codec_options.CodecOptions" - ) + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c206b4c8b5..2ac12bc4b4 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -14,11 +14,10 @@ """Support for automatic client-side field level encryption.""" -import copy from typing import TYPE_CHECKING, Any, List, Mapping, Optional try: - import pymongocrypt + import pymongocrypt # noqa: F401 _HAVE_PYMONGOCRYPT = True except ImportError: diff --git a/pymongo/errors.py b/pymongo/errors.py index a98a5a7fb8..4a167383ca 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -15,7 +15,7 @@ """Exceptions raised by PyMongo.""" from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Union -from bson.errors import * +from bson.errors import InvalidDocument try: # CPython 3.7+ diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 0b92d9fa2b..248dfb17bd 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -43,25 +43,25 @@ class CommandLogger(monitoring.CommandListener): def started(self, event: monitoring.CommandStartedEvent) -> None: logging.info( - "Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event) + f"Command {event.command_name} with request id " + f"{event.request_id} started on server " + f"{event.connection_id}" ) def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: logging.info( - "Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event) + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"succeeded in {event.duration_micros} " + "microseconds" ) def failed(self, event: monitoring.CommandFailedEvent) -> None: logging.info( - "Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event) + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"failed in {event.duration_micros} " + "microseconds" ) @@ -77,7 +77,7 @@ class ServerLogger(monitoring.ServerListener): """ def opened(self, event: monitoring.ServerOpeningEvent) -> None: - logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) + logging.info(f"Server {event.server_address} added to topology {event.topology_id}") def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type @@ -85,15 +85,13 @@ def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) - if new_server_type != previous_server_type: # server_type_name was added in PyMongo 3.4 logging.info( - "Server {0.server_address} changed type from " - "{0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event) + f"Server {event.server_address} changed type from " + f"{event.previous_description.server_type_name} to " + f"{event.new_description.server_type_name}" ) def closed(self, event: monitoring.ServerClosedEvent) -> None: - logging.warning( - "Server {0.server_address} removed from topology " "{0.topology_id}".format(event) - ) + logging.warning(f"Server {event.server_address} removed from topology {event.topology_id}") class HeartbeatLogger(monitoring.ServerHeartbeatListener): @@ -108,19 +106,19 @@ class HeartbeatLogger(monitoring.ServerHeartbeatListener): """ def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: - logging.info("Heartbeat sent to server " "{0.connection_id}".format(event)) + logging.info(f"Heartbeat sent to server {event.connection_id}") def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: # The reply.document attribute was added in PyMongo 3.4. logging.info( - "Heartbeat to server {0.connection_id} " + f"Heartbeat to server {event.connection_id} " "succeeded with reply " - "{0.reply.document}".format(event) + f"{event.reply.document}" ) def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: logging.warning( - "Heartbeat to server {0.connection_id} " "failed with error {0.reply}".format(event) + f"Heartbeat to server {event.connection_id} failed with error {event.reply}" ) @@ -136,20 +134,18 @@ class TopologyLogger(monitoring.TopologyListener): """ def opened(self, event: monitoring.TopologyOpenedEvent) -> None: - logging.info("Topology with id {0.topology_id} " "opened".format(event)) + logging.info(f"Topology with id {event.topology_id} opened") def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: - logging.info( - "Topology description updated for " "topology id {0.topology_id}".format(event) - ) + logging.info(f"Topology description updated for topology id {event.topology_id}") previous_topology_type = event.previous_description.topology_type new_topology_type = event.new_description.topology_type if new_topology_type != previous_topology_type: # topology_type_name was added in PyMongo 3.4 logging.info( - "Topology {0.topology_id} changed type from " - "{0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event) + f"Topology {event.topology_id} changed type from " + f"{event.previous_description.topology_type_name} to " + f"{event.new_description.topology_type_name}" ) # The has_writable_server and has_readable_server methods # were added in PyMongo 3.4. @@ -159,7 +155,7 @@ def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) logging.warning("No readable servers available.") def closed(self, event: monitoring.TopologyClosedEvent) -> None: - logging.info("Topology with id {0.topology_id} " "closed".format(event)) + logging.info(f"Topology with id {event.topology_id} closed") class ConnectionPoolLogger(monitoring.ConnectionPoolListener): @@ -181,53 +177,45 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): """ def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: - logging.info("[pool {0.address}] pool created".format(event)) + logging.info(f"[pool {event.address}] pool created") def pool_ready(self, event): - logging.info("[pool {0.address}] pool ready".format(event)) + logging.info(f"[pool {event.address}] pool ready") def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: - logging.info("[pool {0.address}] pool cleared".format(event)) + logging.info(f"[pool {event.address}] pool cleared") def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: - logging.info("[pool {0.address}] pool closed".format(event)) + logging.info(f"[pool {event.address}] pool closed") def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: - logging.info( - "[pool {0.address}][conn #{0.connection_id}] " "connection created".format(event) - ) + logging.info(f"[pool {event.address}][conn #{event.connection_id}] connection created") def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection setup succeeded".format(event) + f"[pool {event.address}][conn #{event.connection_id}] connection setup succeeded" ) def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection closed, reason: " - "{0.reason}".format(event) + f"[pool {event.address}][conn #{event.connection_id}] " + f'connection closed, reason: "{event.reason}"' ) def connection_check_out_started( self, event: monitoring.ConnectionCheckOutStartedEvent ) -> None: - logging.info("[pool {0.address}] connection check out " "started".format(event)) + logging.info(f"[pool {event.address}] connection check out started") def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: - logging.info( - "[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event) - ) + logging.info(f"[pool {event.address}] connection check out failed, reason: {event.reason}") def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection checked out of pool".format(event) + f"[pool {event.address}][conn #{event.connection_id}] connection checked out of pool" ) def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: logging.info( - "[pool {0.address}][conn #{0.connection_id}] " - "connection checked into pool".format(event) + f"[pool {event.address}][conn #{event.connection_id}] connection checked into pool" ) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index f12c1e1655..8311aafa8f 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -86,9 +86,7 @@ def _index_list(key_or_list, direction=None): if isinstance(key_or_list, abc.ItemsView): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): - raise TypeError( - "if no direction is specified, " "key_or_list must be an instance of list" - ) + raise TypeError("if no direction is specified, key_or_list must be an instance of list") return key_or_list @@ -104,7 +102,7 @@ def _index_document(index_list): "mean %r?" % list(index_list.items()) ) elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, " "not: " + repr(index_list)) + raise TypeError("must use a list of (key, direction) pairs, not: " + repr(index_list)) if not len(index_list): raise ValueError("key_or_list must not be the empty list") @@ -237,11 +235,11 @@ def _fields_list_to_dict(fields, option_name): if isinstance(fields, (abc.Sequence, abc.Set)): if not all(isinstance(field, str) for field in fields): raise TypeError( - "%s must be a list of key names, each an " "instance of str" % (option_name,) + "%s must be a list of key names, each an instance of str" % (option_name,) ) return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or " "list of key names" % (option_name,)) + raise TypeError("%s must be a mapping or list of key names" % (option_name,)) def _handle_exception(): diff --git a/pymongo/message.py b/pymongo/message.py index 18cf0a6bf3..92d59c3ebd 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -638,7 +638,7 @@ def _op_msg_uncompressed(flags, command, identifier, docs, opts): if _use_c: - _op_msg_uncompressed = _cmessage._op_msg + _op_msg_uncompressed = _cmessage._op_msg # noqa: F811 def _op_msg(flags, command, dbname, read_preference, opts, ctx=None): @@ -712,7 +712,7 @@ def _query_uncompressed( if _use_c: - _query_uncompressed = _cmessage._query_message + _query_uncompressed = _cmessage._query_message # noqa: F811 def _query( @@ -754,7 +754,7 @@ def _get_more_uncompressed(collection_name, num_to_return, cursor_id): if _use_c: - _get_more_uncompressed = _cmessage._get_more_message + _get_more_uncompressed = _cmessage._get_more_message # noqa: F811 def _get_more(collection_name, num_to_return, cursor_id, ctx=None): @@ -1085,7 +1085,7 @@ def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx): if _use_c: - _encode_batched_op_msg = _cmessage._encode_batched_op_msg + _encode_batched_op_msg = _cmessage._encode_batched_op_msg # noqa: F811 def _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx): @@ -1120,7 +1120,7 @@ def _batched_op_msg(operation, command, docs, ack, opts, ctx): if _use_c: - _batched_op_msg = _cmessage._batched_op_msg + _batched_op_msg = _cmessage._batched_op_msg # noqa: F811 def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx): @@ -1149,7 +1149,7 @@ def _encode_batched_write_command(namespace, operation, command, docs, opts, ctx if _use_c: - _encode_batched_write_command = _cmessage._encode_batched_write_command + _encode_batched_write_command = _cmessage._encode_batched_write_command # noqa: F811 def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf): @@ -1348,7 +1348,7 @@ def __init__(self, flags, payload_document): self.flags = flags self.payload_document = payload_document - def raw_response(self, cursor_id=None, user_fields={}): + def raw_response(self, cursor_id=None, user_fields={}): # noqa: B006 """ cursor_id is ignored user_fields is used to determine which fields must not be decoded @@ -1395,12 +1395,12 @@ def unpack(cls, msg): flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError("Unsupported OP_MSG flag checksumPresent: " "0x%x" % (flags,)) + raise ProtocolError("Unsupported OP_MSG flag checksumPresent: 0x%x" % (flags,)) if flags ^ cls.MORE_TO_COME: raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) if first_payload_type != 0: - raise ProtocolError("Unsupported OP_MSG payload type: " "0x%x" % (first_payload_type,)) + raise ProtocolError("Unsupported OP_MSG payload type: 0x%x" % (first_payload_type,)) if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e9fa932ff1..9414d71962 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -52,7 +52,6 @@ cast, ) -import bson from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.son import SON from bson.timestamp import Timestamp @@ -687,7 +686,7 @@ def __init__( srv_service_name = keyword_opts.get("srvservicename") srv_max_hosts = keyword_opts.get("srvmaxhosts") if len([h for h in host if "/" in h]) > 1: - raise ConfigurationError("host must not contain multiple MongoDB " "URIs") + raise ConfigurationError("host must not contain multiple MongoDB URIs") for entity in host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, @@ -1165,7 +1164,7 @@ def _get_socket(self, server, session): and sock_info.max_wire_version < 8 ): raise ConfigurationError( - "Auto-encryption requires a minimum MongoDB version " "of 4.2" + "Auto-encryption requires a minimum MongoDB version of 4.2" ) yield sock_info @@ -1229,7 +1228,7 @@ def _socket_from_server(self, read_preference, server, session): def _socket_for_reads(self, read_preference, session): assert read_preference is not None, "read_preference must not be None" - topology = self._get_topology() + _ = self._get_topology() server = self._select_server(read_preference, session) return self._socket_from_server(read_preference, server, session) @@ -1814,7 +1813,7 @@ def drop_database( name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance " "of str or a Database") + raise TypeError("name_or_database must be an instance of str or a Database") with self._socket_for_writes(session) as sock_info: self[name]._command( diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 6a3ed6d07e..4798542dc7 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -633,7 +633,7 @@ def __init__( super(CommandStartedEvent, self).__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) - cmd_name, cmd_doc = command_name.lower(), command[command_name] + cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): self.__cmd: Mapping[str, Any] = {} else: @@ -651,7 +651,7 @@ def database_name(self) -> str: return self.__db def __repr__(self): - return ("<%s %s db: %r, command: %r, operation_id: %s, " "service_id: %s>") % ( + return ("<%s %s db: %r, command: %r, operation_id: %s, service_id: %s>") % ( self.__class__.__name__, self.connection_id, self.database_name, @@ -708,7 +708,7 @@ def reply(self) -> _DocumentOut: return self.__reply def __repr__(self): - return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, " "service_id: %s>") % ( + return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, service_id: %s>") % ( self.__class__.__name__, self.connection_id, self.command_name, diff --git a/pymongo/network.py b/pymongo/network.py index db952af731..01dca0b835 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -210,10 +210,10 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: - raise ProtocolError("Got response id %r but expected " "%r" % (response_to, request_id)) + raise ProtocolError("Got response id %r but expected %r" % (response_to, request_id)) if length <= 16: raise ProtocolError( - "Message length (%r) not longer than standard " "message header size (16)" % (length,) + "Message length (%r) not longer than standard message header size (16)" % (length,) ) if length > max_message_size: raise ProtocolError( @@ -231,7 +231,7 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected " "%r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError("Got opcode %r but expected %r" % (op_code, _UNPACK_REPLY.keys())) return unpack_reply(data) @@ -272,7 +272,7 @@ def _receive_data_on_socket(sock_info, length, deadline): try: wait_for_read(sock_info, deadline) chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) - except (IOError, OSError) as exc: + except (IOError, OSError) as exc: # noqa: B014 if _errno_from_exception(exc) == errno.EINTR: continue raise diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 5bb08ec23f..2c3727a7a3 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -124,7 +124,7 @@ def _run(self): if not self._target(): self._stopped = True break - except: + except BaseException: with self._lock: self._stopped = True self._thread_will_exit = True diff --git a/pymongo/pool.py b/pymongo/pool.py index 61945e2d5b..09709ffbf4 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -72,7 +72,7 @@ def is_ip_address(address): try: ipaddress.ip_address(address) return True - except (ValueError, UnicodeError): + except (ValueError, UnicodeError): # noqa: B014 return False @@ -857,9 +857,7 @@ def validate_session(self, client, session): """ if session: if session._client is not client: - raise InvalidOperation( - "Can only use session with the MongoClient that" " started it" - ) + raise InvalidOperation("Can only use session with the MongoClient that started it") def close_socket(self, reason): """Close this connection with a reason.""" @@ -963,7 +961,7 @@ def _create_connection(address, options): # Check if dealing with a unix domain socket if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported " "on this system") + raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) # SOCK_CLOEXEC not supported for Unix sockets. _set_non_inheritable_non_atomic(sock.fileno()) @@ -1045,7 +1043,7 @@ def _configured_socket(address, options): # Raise _CertificateError directly like we do after match_hostname # below. raise - except (IOError, OSError, _SSLError) as exc: + except (IOError, OSError, _SSLError) as exc: # noqa: B014 sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1246,8 +1244,8 @@ def update_is_writable(self, is_writable): """ self.is_writable = is_writable with self.lock: - for socket in self.sockets: - socket.update_is_writable(self.is_writable) + for _socket in self.sockets: + _socket.update_is_writable(self.is_writable) def reset(self, service_id=None): self._reset(close=False, service_id=service_id) @@ -1386,7 +1384,7 @@ def get_socket(self, handler=None): listeners.publish_connection_checked_out(self.address, sock_info.id) try: yield sock_info - except: + except BaseException: # Exception in caller. Ensure the connection gets returned. # Note that when pinned is True, the session owns the # connection and it is responsible for checking the connection @@ -1433,7 +1431,7 @@ def _get_socket(self): self.address, ConnectionCheckOutFailedReason.POOL_CLOSED ) raise _PoolClosedError( - "Attempted to check out a connection from closed connection " "pool" + "Attempted to check out a connection from closed connection pool" ) with self.lock: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 9e4c5cab40..eae38daef8 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -75,7 +75,7 @@ def _is_ip_address(address): try: _ip_address(address) return True - except (ValueError, UnicodeError): + except (ValueError, UnicodeError): # noqa: B014 return False @@ -145,7 +145,7 @@ def sendall(self, buf, flags=0): # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. - except (IOError, OSError) as exc: + except (IOError, OSError) as exc: # noqa: B014 if _errno_from_exception(exc) == _EINTR: continue raise diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 02a2e88bf0..5ce2fbafcc 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -49,8 +49,7 @@ def _validate_tag_sets(tag_sets): raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) if len(tag_sets) == 0: raise ValueError( - ("Tag sets %r invalid, must be None or contain at least one set of" " tags") - % (tag_sets,) + ("Tag sets %r invalid, must be None or contain at least one set of tags") % (tag_sets,) ) for tags in tag_sets: @@ -500,10 +499,10 @@ def make_read_preference( ) -> _ServerMode: if mode == _PRIMARY: if tag_sets not in (None, [{}]): - raise ConfigurationError("Read preference primary " "cannot be combined with tags") + raise ConfigurationError("Read preference primary cannot be combined with tags") if max_staleness != -1: raise ConfigurationError( - "Read preference primary cannot be " "combined with maxStalenessSeconds" + "Read preference primary cannot be combined with maxStalenessSeconds" ) return Primary() return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore diff --git a/pymongo/results.py b/pymongo/results.py index 127f574184..1cbb614bf3 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -13,7 +13,7 @@ # limitations under the License. """Result class definitions.""" -from typing import Any, Dict, List, Mapping, Optional, Sequence, cast +from typing import Any, Dict, List, Optional, cast from pymongo.errors import InvalidOperation diff --git a/pymongo/server.py b/pymongo/server.py index be1e7da89c..f26f473c32 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -21,7 +21,6 @@ from pymongo.helpers import _check_command_response from pymongo.message import _convert_exception, _OpMsg from pymongo.response import PinnedResponse, Response -from pymongo.server_type import SERVER_TYPE _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 6b2a71df0b..47e27c531b 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -15,7 +15,7 @@ """Represent one server the driver is connected to.""" import time -from typing import Any, Dict, Mapping, Optional, Set, Tuple, cast +from typing import Any, Dict, Mapping, Optional, Set, Tuple from bson import EPOCH_NAIVE from bson.objectid import ObjectId diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 70c12f0699..420953db2e 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -17,7 +17,7 @@ import errno import select import sys -from typing import Any, Optional, Union +from typing import Any, Optional # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index e546105141..148bef936d 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -31,10 +31,10 @@ # Base Exception class SSLError = _ssl.SSLError -from ssl import SSLContext +from ssl import SSLContext # noqa: F401,E402 if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): - from ssl import VERIFY_CRL_CHECK_LEAF + from ssl import VERIFY_CRL_CHECK_LEAF # noqa: F401 # Python 3.7 uses OpenSSL's hostname matching implementation # making it the obvious version to start using SSLConext.check_hostname. # Python 3.6 might have been a good version, but it suffers diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 7b5417fefa..06ef7ef185 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -34,7 +34,7 @@ # CPython ssl module constants to configure certificate verification # at a high level. This is legacy behavior, but requires us to # import the ssl module even if we're only using it for this purpose. - import ssl as _stdlibssl + import ssl as _stdlibssl # noqa from ssl import CERT_NONE, CERT_REQUIRED HAS_SNI = _ssl.HAS_SNI @@ -79,7 +79,7 @@ def get_ssl_context( if _ssl.IS_PYOPENSSL: raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) + setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) # noqa ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) diff --git a/pymongo/typings.py b/pymongo/typings.py index 263b591e24..19d92b2381 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -17,13 +17,11 @@ TYPE_CHECKING, Any, Dict, - List, Mapping, MutableMapping, Optional, Sequence, Tuple, - Type, TypeVar, Union, ) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 3417c4954e..fa44dd8569 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -15,19 +15,8 @@ """Tools to parse and validate a MongoDB URI.""" import re -import sys import warnings -from typing import ( - Any, - Dict, - List, - Mapping, - MutableMapping, - Optional, - Tuple, - Union, - cast, -) +from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union from urllib.parse import unquote_plus from pymongo.client_options import _parse_ssl_options @@ -107,7 +96,7 @@ def parse_ipv6_literal_host( """ if entity.find("]") == -1: raise ValueError( - "an IPv6 address literal must be " "enclosed in '[' and ']' according " "to RFC 2732." + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." ) i = entity.find("]:") if i == -1: @@ -196,7 +185,7 @@ def _handle_security_options(options): if tlsinsecure is not None: for opt in _IMPLICIT_TLSINSECURE_OPTS: if opt in options: - err_msg = "URI options %s and %s cannot be specified " "simultaneously." + err_msg = "URI options %s and %s cannot be specified simultaneously." raise InvalidURI( err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) ) @@ -205,7 +194,7 @@ def _handle_security_options(options): tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") if tlsallowinvalidcerts is not None: if "tlsdisableocspendpointcheck" in options: - err_msg = "URI options %s and %s cannot be specified " "simultaneously." + err_msg = "URI options %s and %s cannot be specified simultaneously." raise InvalidURI( err_msg % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) @@ -218,7 +207,7 @@ def _handle_security_options(options): if tlscrlfile is not None: for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): if options.get(opt) is True: - err_msg = "URI option %s=True cannot be specified when " "CRL checking is enabled." + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." raise InvalidURI(err_msg % (opt,)) if "ssl" in options and "tls" in options: @@ -231,7 +220,7 @@ def truth_value(val): return val if truth_value(options.get("ssl")) != truth_value(options.get("tls")): - err_msg = "Can not specify conflicting values for URI options %s " "and %s." + err_msg = "Can not specify conflicting values for URI options %s and %s." raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) return options @@ -252,7 +241,7 @@ def _handle_option_deprecations(options): if mode == "renamed": newoptname = message if newoptname in options: - warn_msg = "Deprecated option '%s' ignored in favor of " "'%s'." + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." warnings.warn( warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), DeprecationWarning, @@ -378,7 +367,7 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[ nodes = [] for entity in hosts.split(","): if not entity: - raise ConfigurationError("Empty host " "(or extra comma in host list).") + raise ConfigurationError("Empty host (or extra comma in host list).") port = default_port # Unix socket entities don't have ports if entity.endswith(".sock"): @@ -486,7 +475,7 @@ def parse_uri( scheme_free = uri[SRV_SCHEME_LEN:] else: raise InvalidURI( - "Invalid URI scheme: URI must " "begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) + "Invalid URI scheme: URI must begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) ) if not scheme_free: @@ -504,7 +493,7 @@ def parse_uri( path_part = "" if not path_part and "?" in host_part: - raise InvalidURI("A '/' is required between " "the host list and any options.") + raise InvalidURI("A '/' is required between the host list and any options.") if path_part: dbase, _, opts = path_part.partition("?") @@ -528,9 +517,7 @@ def parse_uri( hosts = host_part if "/" in hosts: - raise InvalidURI( - "Any '/' in a unix domain socket must be" " percent-encoded: %s" % host_part - ) + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) hosts = unquote_plus(hosts) fqdn = None @@ -538,11 +525,11 @@ def parse_uri( if is_srv: if options.get("directConnection"): raise ConfigurationError( - "Cannot specify directConnection=true with " "%s URIs" % (SRV_SCHEME,) + "Cannot specify directConnection=true with %s URIs" % (SRV_SCHEME,) ) nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI("%s URIs must include one, " "and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI("%s URIs must include one, and only one, hostname" % (SRV_SCHEME,)) fqdn, port = nodes[0] if port is not None: raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) @@ -557,7 +544,7 @@ def parse_uri( parsed_dns_options = split_options(dns_options, validate, warn, normalize) if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are " "supported from DNS" + "Only authSource, replicaSet, and loadBalanced are supported from DNS" ) for opt, val in parsed_dns_options.items(): if opt not in options: @@ -570,11 +557,11 @@ def parse_uri( options["tls"] = True if validate else "true" elif not is_srv and options.get("srvServiceName") is not None: raise ConfigurationError( - "The srvServiceName option is only allowed " "with 'mongodb+srv://' URIs" + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" ) elif not is_srv and srv_max_hosts: raise ConfigurationError( - "The srvMaxHosts option is only allowed " "with 'mongodb+srv://' URIs" + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" ) else: nodes = split_hosts(hosts, default_port=default_port) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index fea912d569..ced71d0488 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -73,7 +73,7 @@ def __init__( if not isinstance(fsync, bool): raise TypeError("fsync must be True or False") if j and fsync: - raise ConfigurationError("Can't set both j " "and fsync at the same time") + raise ConfigurationError("Can't set both j and fsync at the same time") self.__document["fsync"] = fsync if w == 0 and j is True: diff --git a/setup.py b/setup.py index 5dbbdde22b..699ced1f85 100755 --- a/setup.py +++ b/setup.py @@ -10,11 +10,10 @@ # Hack to silence atexit traceback in some Python versions try: - import multiprocessing + import multiprocessing # noqa: F401 except ImportError: pass -from setuptools import __version__ as _setuptools_version from setuptools import setup if sys.version_info[:2] < (3, 10): @@ -41,7 +40,7 @@ try: try: readme_content = f.read() - except: + except BaseException: readme_content = "" finally: f.close() @@ -152,7 +151,7 @@ def run(self): try: os.makedirs(path) - except: + except BaseException: pass sphinx_args = ["-E", "-b", mode, "doc", path] @@ -169,7 +168,7 @@ def run(self): raise RuntimeError("documentation step '%s' failed" % (mode,)) sys.stdout.write( - "\nDocumentation step '%s' performed, results here:\n" " %s/\n" % (mode, path) + "\nDocumentation step '%s' performed, results here:\n %s/\n" % (mode, path) ) @@ -232,7 +231,7 @@ def run(self): self.warning_message % ( "Extension modules", - "There was an issue with " "your platform configuration" " - see above.", + "There was an issue with your platform configuration - see above.", ) ) @@ -246,8 +245,8 @@ def build_extension(self, ext): warnings.warn( self.warning_message % ( - "The %s extension " "module" % (name,), - "The output above " "this warning shows how " "the compilation " "failed.", + "The %s extension module" % (name,), + "The output above this warning shows how the compilation failed.", ) ) diff --git a/test/__init__.py b/test/__init__.py index d75c011547..be0825025a 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -20,7 +20,6 @@ import socket import sys import threading -import time import traceback import unittest import warnings @@ -34,7 +33,7 @@ HAVE_XML = False try: - import ipaddress + import ipaddress # noqa HAVE_IPADDRESS = True except ImportError: @@ -667,7 +666,7 @@ def require_secondary_read_pref(self): """ return self._require( lambda: self.supports_secondary_read_pref, - "This cluster does not support secondary read " "preference", + "This cluster does not support secondary read preference", ) def require_no_replica_set(self, func): @@ -757,7 +756,7 @@ def is_topology_type(self, topologies): return True return False - def require_cluster_type(self, topologies=[]): + def require_cluster_type(self, topologies=[]): # noqa """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies are 'single', 'replicaset', and 'sharded'.""" @@ -825,7 +824,7 @@ def require_server_resolvable(self, func): """Run a test only if the hostname 'server' is resolvable.""" return self._require( lambda: self.server_is_resolvable, - "No hosts entry for 'server'. Cannot validate " "hostname in the certificate", + "No hosts entry for 'server'. Cannot validate hostname in the certificate", func=func, ) @@ -1125,9 +1124,9 @@ def test_cases(suite): # Helper method to workaround https://bugs.python.org/issue21724 def clear_warning_registry(): """Clear the __warningregistry__ for all modules.""" - for name, module in list(sys.modules.items()): + for _, module in list(sys.modules.items()): if hasattr(module, "__warningregistry__"): - setattr(module, "__warningregistry__", {}) + setattr(module, "__warningregistry__", {}) # noqa class SystemCertsPatcher(object): diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index cad2b10683..a1eb97edee 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -25,7 +25,7 @@ from pymongo.ssl_support import HAS_SNI try: - import dns + import dns # noqa HAS_DNS = True except ImportError: @@ -120,7 +120,7 @@ def test_uniqueness(self): duplicates = [names for names in uri_to_names.values() if len(names) > 1] self.assertFalse( duplicates, - "Error: the following env variables have " "duplicate values: %s" % (duplicates,), + "Error: the following env variables have duplicate values: %s" % (duplicates,), ) diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index e6d8c2126c..cb06a129d2 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -28,7 +28,7 @@ def cluster_time_conversation(self, callback, replies): server = MockupDB() # First test all commands include $clusterTime with wire version 6. - responder = server.autoresponds( + _ = server.autoresponds( "ismaster", { "minWireVersion": 0, diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index c9799fa21e..39188e8ad0 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -33,7 +33,7 @@ def test_hello_with_option(self, protocol, **kwargs): def respond(r): # Only save the very first request from the driver. - if self.handshake_req == None: + if self.handshake_req is None: self.handshake_req = r load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} return r.reply( @@ -261,7 +261,7 @@ def responder(request): self.addCleanup(client.close) self.assertRaises(OperationFailure, client.db.collection.find_one, {"a": 1}) self.assertTrue( - self.found_auth_msg, "Could not find authentication " "command with correct protocol" + self.found_auth_msg, "Could not find authentication command with correct protocol" ) diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index 2bdbd7b910..20764e6e5a 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -16,7 +16,7 @@ import unittest -from mockupdb import MockupDB, OpGetMore, going +from mockupdb import MockupDB, going from bson import SON from pymongo import MongoClient diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index ce91794ee4..d5fb9913cc 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -18,7 +18,7 @@ import unittest from queue import Queue -from mockupdb import MockupDB, OpMsg, go +from mockupdb import MockupDB, go from operations import upgrades from pymongo import MongoClient diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index d2c3bfc1b0..b7f8532e38 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -15,10 +15,9 @@ import itertools import unittest -from mockupdb import MockupDB, OpMsg, go, going +from mockupdb import MockupDB, OpMsg, going from operations import operations -from bson import SON from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( _MONGOS_MODES, diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index dcf5256fac..ea13a3b042 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -13,7 +13,6 @@ # limitations under the License. import unittest -from queue import Queue from mockupdb import Future, MockupDB, OpReply, going, wait_until diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 98cd1f2706..07cd6c7448 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -48,15 +48,9 @@ def test(self): ismaster_with_version["minWireVersion"] = 2 ismaster_with_version["maxWireVersion"] = 6 self.server.autoresponds("ismaster", **ismaster_with_version) - if operation.op_type == "always-use-secondary": - slave_ok = True - elif operation.op_type == "may-use-secondary": - slave_ok = mode != "primary" or server_type != "mongos" - elif operation.op_type == "must-use-primary": - slave_ok = server_type != "mongos" - else: - assert False, "unrecognized op_type %r" % operation.op_type - + self.assertIn( + operation.op_type, ("always-use-secondary", "may-use-secondary", "must-use-primary") + ) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) client = MongoClient(self.server.uri, read_preference=pref) diff --git a/test/mod_wsgi_test/mod_wsgi_test.wsgi b/test/mod_wsgi_test/mod_wsgi_test.wsgi index bfd1c4bab0..7c7b24cb70 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.wsgi +++ b/test/mod_wsgi_test/mod_wsgi_test.wsgi @@ -25,7 +25,7 @@ repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) sys.path.insert(0, repository_path) import pymongo -from pymongo.hello import HelloCompat +from pymongo.hello import HelloCompat # noqa from pymongo.mongo_client import MongoClient client = MongoClient() @@ -33,7 +33,7 @@ collection = client.test.test ndocs = 20 collection.drop() collection.insert_many([{'i': i} for i in range(ndocs)]) -client.close() # Discard main thread's request socket. +client.close() # Discard main thread's request socket. client = MongoClient() collection = client.test.test diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index cce846feac..a0770afefa 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -40,7 +40,7 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s" "&tlsCAFile=%s&%s") % ( + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s&tlsCAFile=%s&%s") % ( TIMEOUT_MS, CA_FILE, options, diff --git a/test/test_auth.py b/test/test_auth.py index 5abdbef3dc..69ed27bda0 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -21,11 +21,10 @@ sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, Version, client_context, unittest +from test import IntegrationTest, SkipTest, client_context, unittest from test.utils import ( AllowListEventListener, delay, - get_pool, ignore_deprecations, rs_or_single_client, rs_or_single_client_noauth, @@ -119,14 +118,14 @@ def test_credentials_hashing(self): def test_gssapi_simple(self): assert GSSAPI_PRINCIPAL is not None if GSSAPI_PASS is not None: - uri = "mongodb://%s:%s@%s:%d/?authMechanism=" "GSSAPI" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( quote_plus(GSSAPI_PRINCIPAL), GSSAPI_PASS, GSSAPI_HOST, GSSAPI_PORT, ) else: - uri = "mongodb://%s@%s:%d/?authMechanism=" "GSSAPI" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( quote_plus(GSSAPI_PRINCIPAL), GSSAPI_HOST, GSSAPI_PORT, @@ -266,7 +265,7 @@ class TestSASLPlain(unittest.TestCase): @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: - raise SkipTest("Must set SASL_HOST, " "SASL_USER, and SASL_PASS to test SASL") + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): @@ -282,7 +281,7 @@ def test_sasl_plain(self): assert SASL_USER is not None assert SASL_PASS is not None - uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( quote_plus(SASL_USER), quote_plus(SASL_PASS), SASL_HOST, @@ -305,7 +304,7 @@ def test_sasl_plain(self): ) client.ldap.test.find_one() - uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s;replicaSet=%s" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( quote_plus(SASL_USER), quote_plus(SASL_PASS), SASL_HOST, @@ -318,7 +317,7 @@ def test_sasl_plain(self): def test_sasl_plain_bad_credentials(self): def auth_string(user, password): - uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;" "authSource=%s" % ( + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( quote_plus(user), quote_plus(password), SASL_HOST, @@ -484,7 +483,7 @@ def test_scram(self): if client_context.is_rs: host, port = client_context.host, client_context.port - uri = "mongodb://both:pwd@%s:%d/testscram" "?replicaSet=%s" % ( + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( host, port, client_context.replica_set_name, @@ -641,7 +640,7 @@ def test_uri_options(self): self.assertTrue(db.command("dbstats")) # Test authSource - uri = "mongodb://user:pass@%s:%d" "/pymongo_test2?authSource=pymongo_test" % (host, port) + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) diff --git a/test/test_binary.py b/test/test_binary.py index 6352e93d2c..7d0ef2ce2e 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -19,14 +19,12 @@ import copy import mmap import pickle -import platform import sys import uuid sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ignore_deprecations import bson from bson import decode, encode diff --git a/test/test_bson.py b/test/test_bson.py index 46aa6e5d9a..9bf8df897a 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -204,35 +204,33 @@ def test_basic_validation(self): self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12") self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00") self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" b"\x04\x00\x00\x00bar\x00\x00") + self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00\x04\x00\x00\x00bar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" - ) - self.assertInvalid( - b"\x15\x00\x00\x00\x03foo\x00\x0c" b"\x00\x00\x00\x08bar\x00\x01\x00\x00" + b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00" ) + self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c\x00\x00\x00\x08bar\x00\x01\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x03foo\x00" b"\x12\x00\x00\x00\x02bar\x00" b"\x05\x00\x00\x00baz\x00\x00\x00" ) - self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" b"\x04\x00\x00\x00abc\xff\x00") + self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00\x04\x00\x00\x00abc\xff\x00") def test_bad_string_lengths(self): - self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00" b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x12\x00\x00\x00\x02\x00" b"\xff\xff\xff\xfffoobar\x00\x00") - self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00" b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00" b"\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x02\x00\xff\xff\xff\xfffoobar\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00\xff\xff\xff\xfffoobar\x00\x00") self.assertInvalid( - b"\x18\x00\x00\x00\x0c\x00" b"\x00\x00\x00\x00\x00RY\xb5j" b"\xfa[\xd8A\xd6X]\x99\x00" + b"\x18\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" ) self.assertInvalid( b"\x1e\x00\x00\x00\x0c\x00" b"\xff\xff\xff\xfffoobar\x00" b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" ) - self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\x00\x00\x00\x00\x00\x00") - self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\xff\xff\xff\xff\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00") + self.assertInvalid(b"\x0c\x00\x00\x00\r\x00\xff\xff\xff\xff\x00\x00") self.assertInvalid( b"\x1c\x00\x00\x00\x0f\x00" b"\x15\x00\x00\x00\x00\x00" @@ -393,9 +391,7 @@ def test_invalid_field_name(self): def test_data_timestamp(self): self.assertEqual( {"test": Timestamp(4, 20)}, - decode( - b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" b"\x00\x00\x00\x04\x00\x00\x00\x00" - ), + decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00"), ) def test_basic_encode(self): @@ -414,29 +410,29 @@ def test_basic_encode(self): ) self.assertEqual( encode({"mike": 100}), - b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" b"\x00\x00\x00", + b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00\x00\x00\x00", ) self.assertEqual( encode({"hello": 1.5}), - b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" b"\x00\x00\x00\x00\x00\xF8\x3F\x00", + b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00\x00\x00\x00\x00\x00\xF8\x3F\x00", ) self.assertEqual( encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00" ) self.assertEqual( - encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" b"\x00" + encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00\x00" ) self.assertEqual( encode({"empty": []}), - b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" b"\x00\x00\x00\x00\x00", + b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05\x00\x00\x00\x00\x00", ) self.assertEqual( encode({"none": {}}), - b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" b"\x00\x00\x00\x00", + b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00\x00\x00\x00\x00", ) self.assertEqual( encode({"test": Binary(b"test", 0)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x00\x74\x65\x73\x74\x00", + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x00\x74\x65\x73\x74\x00", ) self.assertEqual( encode({"test": Binary(b"test", 2)}), @@ -445,24 +441,24 @@ def test_basic_encode(self): ) self.assertEqual( encode({"test": Binary(b"test", 128)}), - b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x80\x74\x65\x73\x74\x00", + b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x80\x74\x65\x73\x74\x00", ) self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") self.assertEqual( encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), - b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" b"\x1C\xFF\x0F\x01\x00\x00\x00", + b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE\x1C\xFF\x0F\x01\x00\x00\x00", ) self.assertEqual( encode({"regex": re.compile(b"a*b", re.IGNORECASE)}), - b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" b"\x2A\x62\x00\x69\x00\x00", + b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61\x2A\x62\x00\x69\x00\x00", ) self.assertEqual( encode({"$where": Code("test")}), - b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" b"\x00\x00", + b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test\x00\x00", ) self.assertEqual( encode({"$field": Code("function(){ return true;}", scope=None)}), - b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" b"function(){ return true;}\x00\x00", + b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00function(){ return true;}\x00\x00", ) self.assertEqual( encode({"$field": Code("return function(){ return x; }", scope={"x": False})}), @@ -496,7 +492,7 @@ def test_unknown_type(self): part = "type %r for fieldname 'foo'" % (b"\x14",) docs = [ b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", - (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140" b"\x00\x01\x00\x00\x00\x00\x00"), + (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140\x00\x01\x00\x00\x00\x00\x00"), ( b" \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00" b"\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00" @@ -518,7 +514,7 @@ def test_dbpointer(self): # not support creation of the DBPointer type, but will decode # DBPointer to DBRef. - bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" + bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00" self.assertEqual({"": DBRef("", ObjectId("5259b56afa5bd841d6585d99"))}, decode(bs)) @@ -785,9 +781,7 @@ def test_bson_regex(self): self.assertEqual(0, bson_re1.flags) doc1 = {"r": bson_re1} - doc1_bson = ( - b"\x11\x00\x00\x00" b"\x0br\x00[\\w-\\.]\x00\x00" b"\x00" # document length # r: regex - ) # document terminator + doc1_bson = b"\x11\x00\x00\x00\x0br\x00[\\w-\\.]\x00\x00\x00" # document length # r: regex # document terminator self.assertEqual(doc1_bson, encode(doc1)) self.assertEqual(doc1, decode(doc1_bson)) @@ -798,9 +792,7 @@ def test_bson_regex(self): doc2_with_re = {"r": re2} doc2_with_bson_re = {"r": bson_re2} - doc2_bson = ( - b"\x11\x00\x00\x00" b"\x0br\x00.*\x00imsux\x00" b"\x00" # document length # r: regex - ) # document terminator + doc2_bson = b"\x11\x00\x00\x00\x0br\x00.*\x00imsux\x00\x00" # document length # r: regex # document terminator self.assertEqual(doc2_bson, encode(doc2_with_re)) self.assertEqual(doc2_bson, encode(doc2_with_bson_re)) @@ -917,7 +909,7 @@ def test_timestamp_comparison(self): def test_timestamp_highorder_bits(self): doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} - doc_bson = b"\x10\x00\x00\x00" b"\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff" b"\x00" + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" self.assertEqual(doc_bson, encode(doc)) self.assertEqual(doc, decode(doc_bson)) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 4a46276573..4f8fc7413a 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -208,13 +208,13 @@ def run_test(self): # Null bytes are validated when encoding to BSON. if "Null" in description: to_bson(doc) - raise AssertionError("exception not raised for test " "case: " + description) + raise AssertionError("exception not raised for test case: " + description) except (ValueError, KeyError, TypeError, InvalidId, InvalidDocument): pass elif bson_type == "0x05": try: decode_extjson(parse_error_case["string"]) - raise AssertionError("exception not raised for test " "case: " + description) + raise AssertionError("exception not raised for test case: " + description) except (TypeError, ValueError): pass else: diff --git a/test/test_client.py b/test/test_client.py index 0487161b1e..9f01c1c054 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -26,7 +26,6 @@ import sys import threading import time -import warnings from typing import Type, no_type_check sys.path[0:0] = [""] @@ -88,7 +87,6 @@ ServerSelectionTimeoutError, WriteConcernError, ) -from pymongo.hello import HelloCompat from pymongo.mongo_client import MongoClient from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent from pymongo.pool import _METADATA, PoolOptions, SocketInfo @@ -99,10 +97,7 @@ from pymongo.settings import TOPOLOGY_TYPE from pymongo.srv_resolver import _HAVE_DNSPYTHON from pymongo.topology import _ErrorContext -from pymongo.topology_description import ( - TopologyDescription, - _updated_topology_description_srv_polling, -) +from pymongo.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern @@ -279,7 +274,7 @@ def test_primary_read_pref_with_tags(self): MongoClient("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient("mongodb://host/?" "readpreference=primary&readpreferencetags=dc:east") + MongoClient("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): c = rs_or_single_client( @@ -394,7 +389,7 @@ def test_uri_codec_options(self): def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. - uri = "mongodb://localhost/?ssl=true&replicaSet=name" "&readPreference=primary" + uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") clopts = c._MongoClient__options opts = clopts._options @@ -590,7 +585,7 @@ def test_max_idle_time_checkout(self): with server._pool.get_socket() as sock_info: pass self.assertEqual(1, len(server._pool.sockets)) - time.sleep(1) # Sleep so that the socket becomes stale. + time.sleep(1) # Sleep so that the socket becomes stale. with server._pool.get_socket() as new_sock_info: self.assertNotEqual(sock_info, new_sock_info) @@ -712,7 +707,7 @@ def test_host_w_port(self): def test_repr(self): # Used to test 'eval' below. - import bson + import bson # noqa: F401 client = MongoClient( "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" @@ -723,9 +718,7 @@ def test_repr(self): the_repr = repr(client) self.assertIn("MongoClient(host=", the_repr) - self.assertIn( - "document_class=bson.son.SON, " "tz_aware=False, " "connect=False, ", the_repr - ) + self.assertIn("document_class=bson.son.SON, tz_aware=False, connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("w=1", the_repr) @@ -744,7 +737,7 @@ def test_repr(self): ) the_repr = repr(client) self.assertIn("MongoClient(host=", the_repr) - self.assertIn("document_class=dict, " "tz_aware=False, " "connect=False, ", the_repr) + self.assertIn("document_class=dict, tz_aware=False, connect=False, ", the_repr) self.assertIn("connecttimeoutms=12345", the_repr) self.assertIn("replicaset='replset'", the_repr) self.assertIn("sockettimeoutms=None", the_repr) @@ -1651,7 +1644,7 @@ def test_service_name_from_kwargs(self): ) self.assertEqual(client._topology_settings.srv_service_name, "customname") client = MongoClient( - "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=customname", + "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", connect=False, ) self.assertEqual(client._topology_settings.srv_service_name, "customname") @@ -1864,7 +1857,7 @@ def test_discover_primary(self): # Fail over. c.kill_host("a:1") c.mock_primary = "b:2" - wait_until(lambda: c.address == ("b", 2), "wait for server " "address to be " "updated") + wait_until(lambda: c.address == ("b", 2), "wait for server address to be updated") # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) diff --git a/test/test_client_context.py b/test/test_client_context.py index b3eb711087..9ee5b96d61 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -49,7 +49,7 @@ def test_enableTestCommands_is_disabled(self): self.assertFalse( client_context.test_commands_enabled, - "enableTestCommands must be disabled when " "PYMONGO_DISABLE_TEST_COMMANDS is set.", + "enableTestCommands must be disabled when PYMONGO_DISABLE_TEST_COMMANDS is set.", ) def test_setdefaultencoding_worked(self): diff --git a/test/test_cmap.py b/test/test_cmap.py index b79f36b803..a2a1d8d214 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -16,7 +16,6 @@ import os import sys -import threading import time sys.path[0:0] = [""] @@ -25,7 +24,6 @@ from test.pymongo_mocks import DummyMonitor from test.utils import ( CMAPListener, - OvertCommandListener, TestCreator, camel_to_snake, client_context, diff --git a/test/test_collection.py b/test/test_collection.py index f81c2c2645..d9f51f530d 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -246,8 +246,7 @@ def test_create_indexes_commitQuorum_requires_44(self): db = self.db with self.assertRaisesRegex( ConfigurationError, - "Must be connected to MongoDB 4\.4\+ to use the commitQuorum " - "option for createIndexes", + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", ): db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") @@ -1511,7 +1510,7 @@ def test_aggregation_cursor(self): # batchSize - 1 self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore # Exhaust the cursor. There shouldn't be any errors. - for doc in cursor: + for _doc in cursor: pass def test_aggregation_cursor_alive(self): @@ -1898,7 +1897,8 @@ def test_array_filters_validation(self): with self.assertRaises(TypeError): c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] with self.assertRaises(TypeError): - c.find_one_and_update({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + update = {"$set": {"a": 1}} + c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index ed3d516f97..5d9f2fe3ee 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -21,12 +21,7 @@ sys.path[0:0] = [""] from test import client_context, unittest -from test.utils import ( - EventListener, - parse_read_preference, - rs_or_single_client, - wait_until, -) +from test.utils import EventListener, parse_read_preference, rs_or_single_client import pymongo from bson import json_util diff --git a/test/test_comment.py b/test/test_comment.py index 1c0e741621..c83428fd70 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -16,23 +16,16 @@ import inspect import sys -from collections import defaultdict from typing import Any, Union sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest +from test import IntegrationTest, client_context, unittest from test.utils import EventListener, rs_or_single_client from bson.dbref import DBRef -from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.database import Database -from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference -from pymongo.write_concern import WriteConcern class Empty(object): @@ -47,7 +40,9 @@ def empty(self, *args, **kwargs): class TestComment(IntegrationTest): - def _test_ops(self, helpers, already_supported, listener, db=Empty(), coll=Empty()): + def _test_ops( + self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 + ): results = listener.results for h, args in helpers: c = "testing comment with " + h.__name__ @@ -56,7 +51,7 @@ def _test_ops(self, helpers, already_supported, listener, db=Empty(), coll=Empty results.clear() kwargs = {"comment": cc} if h == coll.rename: - tmp = db.get_collection("temp_temp_temp").drop() + _ = db.get_collection("temp_temp_temp").drop() destruct_coll = db.get_collection("test_temp") destruct_coll.insert_one({}) maybe_cursor = destruct_coll.rename(*args, **kwargs) diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index c23ce28061..ca4b84c26d 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -19,7 +19,7 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import IntegrationTest, unittest from test.utils import ( TestCreator, camel_to_snake, diff --git a/test/test_cursor.py b/test/test_cursor.py index 7a80b003df..5b4efcd391 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -39,12 +39,7 @@ from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType -from pymongo.errors import ( - ConfigurationError, - ExecutionTimeout, - InvalidOperation, - OperationFailure, -) +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -355,7 +350,7 @@ def test_hint(self): ) spec = [("num", DESCENDING)] - index = db.test.create_index(spec) + _ = db.test.create_index(spec) first = next(db.test.find()) self.assertEqual(0, first.get("num")) @@ -763,7 +758,7 @@ def test_where(self): self.assertEqual([8, 9], [a["x"] for a in cursor]) a = db.test.find() - b = a.where("this.x > 3") + _ = a.where("this.x > 3") for _ in a: break self.assertRaises(InvalidOperation, a.where, "this.x < 3") diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 4659a62e62..a7073cde93 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -545,9 +545,7 @@ def transform_bson(self, value): ) def test_initialize_fail(self): - err_msg = ( - "Expected an instance of TypeEncoder, TypeDecoder, " "or TypeCodec, got .* instead" - ) + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(self.codecs) # type: ignore[arg-type] diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 863b3a4f59..fbf79994d3 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -28,8 +28,6 @@ rs_or_single_client, ) -from pymongo.auth import MECHANISMS - # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") @@ -42,7 +40,7 @@ def test_connected_to_data_lake(self): self.assertTrue( client_context.is_data_lake, - "client context.is_data_lake must be True when " "DATA_LAKE is set", + "client context.is_data_lake must be True when DATA_LAKE is set", ) diff --git a/test/test_database.py b/test/test_database.py index 9a08d971db..8844046ad1 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -14,22 +14,18 @@ """Test the database module.""" -import datetime import re import sys from typing import Any, List, Mapping sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest +from test import IntegrationTest, client_context, unittest from test.test_custom_types import DECIMAL_CODECOPTS from test.utils import ( IMPOSSIBLE_WRITE_CONCERN, - DeprecationFilter, OvertCommandListener, - ignore_deprecations, rs_or_single_client, - server_started_with_auth, wait_until, ) @@ -44,7 +40,6 @@ from pymongo.database import Database from pymongo.errors import ( CollectionInvalid, - ConfigurationError, ExecutionTimeout, InvalidName, OperationFailure, diff --git a/test/test_dbref.py b/test/test_dbref.py index 8e98bd8ce5..281aef473f 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -69,7 +69,7 @@ def test_repr(self): self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") self.assertEqual( repr(DBRef("coll", ObjectId("1234567890abcdef12345678"), "foo")), - "DBRef('coll', ObjectId('1234567890abcdef12345678'), " "'foo')", + "DBRef('coll', ObjectId('1234567890abcdef12345678'), 'foo')", ) def test_equality(self): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 51b168b0a0..d17a0d4166 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -284,7 +284,7 @@ def mock_command(*args, **kwargs): def insert_command(i): try: client.test.command("insert", "test", documents=[{"i": i}]) - except AutoReconnect as exc: + except AutoReconnect: pass threads = [] diff --git a/test/test_dns.py b/test/test_dns.py index d47e115f41..352d05376a 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -77,7 +77,7 @@ def run_test(self): options["tls"] = options.pop("ssl") parsed_options = test_case.get("parsed_options") # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. - needs_tls = not (options and (options.get("ssl") == False or options.get("tls") == False)) + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) if needs_tls and not client_context.tls: self.skipTest("this test requires a TLS cluster") if not needs_tls and client_context.tls: diff --git a/test/test_encryption.py b/test/test_encryption.py index 31c3dd2bcd..f63127a7be 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -718,7 +718,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): MASTER_KEYS = { "aws": { "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-" "4bd9-9f25-e30687b580d0", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", }, "azure": { "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", @@ -1259,7 +1259,7 @@ def test_01_aws_region_key(self): { "region": "us-east-1", "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" ), }, ) @@ -1271,7 +1271,7 @@ def test_02_aws_region_key_endpoint(self): { "region": "us-east-1", "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" ), "endpoint": "kms.us-east-1.amazonaws.com", }, @@ -1284,7 +1284,7 @@ def test_03_aws_region_key_endpoint_port(self): { "region": "us-east-1", "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" ), "endpoint": "kms.us-east-1.amazonaws.com:443", }, @@ -1294,9 +1294,7 @@ def test_03_aws_region_key_endpoint_port(self): def test_04_aws_endpoint_invalid_port(self): master_key = { "region": "us-east-1", - "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - ), + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:12345", } with self.assertRaises(EncryptionError) as ctx: @@ -1307,9 +1305,7 @@ def test_04_aws_endpoint_invalid_port(self): def test_05_aws_endpoint_wrong_region(self): master_key = { "region": "us-east-1", - "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - ), + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-2.amazonaws.com", } # The full error should be something like: @@ -1323,9 +1319,7 @@ def test_05_aws_endpoint_wrong_region(self): def test_06_aws_endpoint_invalid_host(self): master_key = { "region": "us-east-1", - "key": ( - "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0" - ), + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "doesnotexist.invalid", } with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): @@ -1583,9 +1577,9 @@ def _run_test(self, max_pool_size, auto_encryption_opts): event_listeners=[self.client_listener, self.topology_listener], ) - if auto_encryption_opts._bypass_auto_encryption == True: + if auto_encryption_opts._bypass_auto_encryption is True: self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) - elif auto_encryption_opts._bypass_auto_encryption == False: + elif auto_encryption_opts._bypass_auto_encryption is False: client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) else: raise RuntimeError("bypass_auto_encryption must be a bool") @@ -1825,7 +1819,7 @@ def setUp(self): def test_invalid_kms_certificate_expired(self): key = { "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", "endpoint": "mongodb://127.0.0.1:8000", } # Some examples: @@ -1837,7 +1831,7 @@ def test_invalid_kms_certificate_expired(self): def test_invalid_hostname_in_kms_certificate(self): key = { "region": "us-east-1", - "key": "arn:aws:kms:us-east-1:579766882180:key/" "89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", "endpoint": "mongodb://127.0.0.1:8001", } # Some examples: diff --git a/test/test_errors.py b/test/test_errors.py index 8a225b6548..747da48472 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -49,11 +49,11 @@ def _test_unicode_strs(self, exc): if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): # PyPy used to display unicode in repr differently. self.assertEqual( - "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \\U0001f40d'}", str(exc) + "unicode \U0001f40d, full error: {'errmsg': 'unicode \\U0001f40d'}", str(exc) ) else: self.assertEqual( - "unicode \U0001f40d, full error: {" "'errmsg': 'unicode \U0001f40d'}", str(exc) + "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) ) try: raise exc diff --git a/test/test_examples.py b/test/test_examples.py index 7354ac5be2..ccb48307e4 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -945,9 +945,7 @@ def update_employee_info(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print( - "UnknownTransactionCommitResult, retrying " "commit operation ..." - ) + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -970,11 +968,11 @@ def run_transaction_with_retry(txn_func, session): txn_func(session) # performs transaction break except (ConnectionFailure, OperationFailure) as exc: - print("Transaction aborted. Caught exception during " "transaction.") + print("Transaction aborted. Caught exception during transaction.") # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying" "transaction ...") + print("TransientTransactionError, retrying transaction ...") continue else: raise @@ -1000,7 +998,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " "commit operation ...") + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -1036,7 +1034,7 @@ def run_transaction_with_retry(txn_func, session): except (ConnectionFailure, OperationFailure) as exc: # If transient error, retry the whole transaction if exc.has_error_label("TransientTransactionError"): - print("TransientTransactionError, retrying " "transaction ...") + print("TransientTransactionError, retrying transaction ...") continue else: raise @@ -1051,7 +1049,7 @@ def commit_with_retry(session): except (ConnectionFailure, OperationFailure) as exc: # Can retry commit if exc.has_error_label("UnknownTransactionCommitResult"): - print("UnknownTransactionCommitResult, retrying " "commit operation ...") + print("UnknownTransactionCommitResult, retrying commit operation ...") continue else: print("Error during commit ...") @@ -1282,7 +1280,7 @@ def strptime(s): with self.assertRaisesRegex( OperationFailure, - "Provided apiStrict:true, but the command " "count is not in API Version 1", + "Provided apiStrict:true, but the command count is not in API Version 1", ): client.db.command("count", "sales", query={}) # Start Versioned API Example 6 diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 27d82e242b..b9fdeacef7 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -580,7 +580,7 @@ def test_iterator(self): self.assertEqual([b"hello world"], list(g)) def test_read_unaligned_buffer_size(self): - in_data = b"This is a text that doesn't " b"quite fit in a single 16-byte chunk." + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." f = GridIn(self.db.fs, chunkSize=16) f.write(in_data) f.close() diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 8b0a9a3936..d9bf0cf058 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -399,7 +399,7 @@ def test_download_to_stream(self): def test_download_to_stream_by_name(self): file1 = BytesIO(b"hello world") # Test with one chunk. - oid = self.fs.upload_from_stream("one_chunk", file1) + _ = self.fs.upload_from_stream("one_chunk", file1) self.assertEqual(1, self.db.fs.chunks.count_documents({})) file2 = BytesIO() self.fs.download_to_stream_by_name("one_chunk", file2) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index cd4a875e9e..a14ab9a3a7 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -15,7 +15,6 @@ """Test the monitoring of the server heartbeats.""" import sys -import threading sys.path[0:0] = [""] diff --git a/test/test_json_util.py b/test/test_json_util.py index 203542e822..ee5b7abb49 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -331,7 +331,7 @@ def test_uuid(self): json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS), ) self.assertEqual( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_util.dumps( doc, json_options=STRICT_JSON_OPTIONS.with_options( @@ -340,7 +340,7 @@ def test_uuid(self): ), ) self.assertEqual( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_util.dumps( doc, json_options=JSONOptions( @@ -351,7 +351,7 @@ def test_uuid(self): self.assertEqual( doc, json_util.loads( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}', json_options=uuid_legacy_opts, ), ) @@ -364,7 +364,7 @@ def test_uuid(self): self.assertEqual( doc, json_util.loads( - '{"uuid": ' '{"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', + '{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}', json_options=options, ), ) @@ -420,32 +420,32 @@ def test_binary(self): json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS) # Check order. self.assertEqual( - '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==",' + ' "$type": "05"}}', json_bin_dump + '{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==", "$type": "05"}}', json_bin_dump ) self.assertEqual( md5_type_dict, - json_util.loads('{"md5": {"$type": 5, "$binary":' ' "IG43GK8JL9HRL4DK53HMrA=="}}'), + json_util.loads('{"md5": {"$type": 5, "$binary": "IG43GK8JL9HRL4DK53HMrA=="}}'), ) json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS) self.assertIn('"$type": "80"', json_bin_dump) self.assertEqual( custom_type_dict, - json_util.loads('{"custom": {"$type": 128, "$binary":' ' "aGVsbG8="}}'), + json_util.loads('{"custom": {"$type": 128, "$binary": "aGVsbG8="}}'), ) # Handle mongoexport where subtype >= 128 self.assertEqual( 128, - json_util.loads('{"custom": {"$type": "ffffff80", "$binary":' ' "aGVsbG8="}}')[ + json_util.loads('{"custom": {"$type": "ffffff80", "$binary": "aGVsbG8="}}')[ "custom" ].subtype, ) self.assertEqual( 255, - json_util.loads('{"custom": {"$type": "ffffffff", "$binary":' ' "aGVsbG8="}}')[ + json_util.loads('{"custom": {"$type": "ffffffff", "$binary": "aGVsbG8="}}')[ "custom" ].subtype, ) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 547cf327d3..378ae33e03 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -158,7 +158,7 @@ def lock_pool(self): # Wait for the unlock flag. unlock_pool = self.unlock.wait(10) if not unlock_pool: - raise Exception("timed out waiting for unlock signal:" " deadlock?") + raise Exception("timed out waiting for unlock signal: deadlock?") if __name__ == "__main__": diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 4c17701133..799083f3b4 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -52,21 +52,21 @@ def test_max_staleness(self): with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&" "maxStalenessSeconds=120") + MongoClient("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&" "maxStalenessSeconds=-1") + client = MongoClient("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&" "maxStalenessSeconds=120") + client = MongoClient("mongodb://host/?readPreference=secondary&maxStalenessSeconds=120") self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=1") + client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&" "maxStalenessSeconds=-1") + client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") @@ -84,9 +84,7 @@ def test_max_staleness_float(self): with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient( - "mongodb://host/?maxStalenessSeconds=1.5" "&readPreference=nearest" - ) + client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -101,7 +99,7 @@ def test_max_staleness_zero(self): with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0" "&readPreference=nearest") + client = MongoClient("mongodb://host/?maxStalenessSeconds=0&readPreference=nearest") # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 1adb2983e4..0b8200c019 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -16,19 +16,12 @@ import datetime import sys import time -import warnings from typing import Any sys.path[0:0] = [""] from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest -from test.utils import ( - EventListener, - get_pool, - rs_or_single_client, - single_client, - wait_until, -) +from test.utils import EventListener, rs_or_single_client, single_client, wait_until from bson.int64 import Int64 from bson.objectid import ObjectId @@ -781,7 +774,7 @@ def test_non_bulk_writes(self): # delete_one self.listener.results.clear() - res2 = coll.delete_one({"x": 3}) + _ = coll.delete_one({"x": 3}) results = self.listener.results started = results["started"][0] succeeded = results["succeeded"][0] @@ -1242,19 +1235,19 @@ def test_server_event_repr(self): event = monitoring.ServerOpeningEvent(server_address, topology_id) self.assertEqual( repr(event), - "", + "", ) event = monitoring.ServerDescriptionChangedEvent( "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] ) self.assertEqual( repr(event), - "", + "", ) event = monitoring.ServerClosedEvent(server_address, topology_id) self.assertEqual( repr(event), - "", + "", ) def test_topology_event_repr(self): diff --git a/test/test_mypy.py b/test/test_mypy.py index 5b9746f723..36fe2ed424 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -16,7 +16,6 @@ sample client code that uses PyMongo typings.""" import os -import sys import unittest from typing import Any, Dict, Iterable, List diff --git a/test/test_pooling.py b/test/test_pooling.py index 07dbc3643d..00b947f10a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -320,7 +320,7 @@ def test_wait_queue_timeout(self): pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) - with pool.get_socket() as sock_info: + with pool.get_socket(): start = time.time() with self.assertRaises(ConnectionFailure): with pool.get_socket(): diff --git a/test/test_saslprep.py b/test/test_saslprep.py index 1dd4727181..c07870dad6 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -24,7 +24,7 @@ class TestSASLprep(unittest.TestCase): def test_saslprep(self): try: - import stringprep + import stringprep # noqa except ImportError: self.assertRaises(TypeError, saslprep, "anything...") # Bytes strings are ignored. diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index fee751fbdc..d7b3744399 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -98,13 +98,13 @@ def compare_events(expected_dict, actual): ) if not compare_server_descriptions(expected["newDescription"], actual.new_description): - return (False, "New ServerDescription incorrect in" " ServerDescriptionChangedEvent") + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") if not compare_server_descriptions( expected["previousDescription"], actual.previous_description ): return ( False, - "Previous ServerDescription incorrect in" " ServerDescriptionChangedEvent", + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", ) elif expected_type == "server_closed_event": @@ -125,19 +125,19 @@ def compare_events(expected_dict, actual): if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): return ( False, - "Expected TopologyDescriptionChangedEvent," " got %s" % (actual.__class__), + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), ) if not compare_topology_descriptions(expected["newDescription"], actual.new_description): return ( False, - "New TopologyDescription incorrect in " "TopologyDescriptionChangedEvent", + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", ) if not compare_topology_descriptions( expected["previousDescription"], actual.previous_description ): return ( False, - "Previous TopologyDescription incorrect in" " TopologyDescriptionChangedEvent", + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", ) elif expected_type == "topology_closed_event": diff --git a/test/test_server_description.py b/test/test_server_description.py index 1562711375..bb49141d2f 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -170,7 +170,7 @@ def test_all_hosts(self): def test_repr(self): s = parse_hello_response({"ok": 1, "msg": "isdbgrid"}) self.assertEqual( - repr(s), "" + repr(s), "" ) def test_topology_version(self): diff --git a/test/test_session.py b/test/test_session.py index 5a242d6c69..b7aa65a19d 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -145,7 +145,7 @@ def _test_ops(self, client, *ops): kw = copy.copy(kw) kw["session"] = s with self.assertRaisesRegex( - InvalidOperation, "Can only use session with the MongoClient" " that started it" + InvalidOperation, "Can only use session with the MongoClient that started it" ): f(*args, **kw) diff --git a/test/test_son.py b/test/test_son.py index 69beb81439..5c1f43594d 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -154,8 +154,8 @@ def test_contains_has(self): self.assertIn(1, test_son) self.assertTrue(2 in test_son, "in failed") self.assertFalse(22 in test_son, "in succeeded when it shouldn't") - self.assertTrue(test_son.has_key(2), "has_key failed") - self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") + self.assertTrue(test_son.has_key(2), "has_key failed") # noqa + self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") # noqa def test_clears(self): """ diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 6c240d7a78..0b54171dc9 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -96,7 +96,7 @@ class TestSrvPolling(unittest.TestCase): def setUp(self): if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython " "module") + raise unittest.SkipTest("SRV polling tests require the dnspython module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( heartbeat_frequency=WAIT_TIME, @@ -318,7 +318,7 @@ def nodelist_callback(): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = MongoClient( - "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName" "=customname" + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" ) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) diff --git a/test/test_ssl.py b/test/test_ssl.py index 7629c1fd88..0c45275fac 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -40,12 +40,12 @@ _HAVE_PYOPENSSL = False try: # All of these must be available to use PyOpenSSL - import OpenSSL - import requests - import service_identity + import OpenSSL # noqa + import requests # noqa + import service_identity # noqa # Ensure service_identity>=18.1 is installed - from service_identity.pyopenssl import verify_ip_address + from service_identity.pyopenssl import verify_ip_address # noqa from pymongo.ocsp_support import _load_trusted_ca_certs @@ -79,9 +79,7 @@ class TestClientSSL(unittest.TestCase): - @unittest.skipIf( - HAVE_SSL, "The ssl module is available, can't test what " "happens without it." - ) + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") def test_no_ssl_module(self): # Explicit self.assertRaises(ConfigurationError, MongoClient, ssl=True) @@ -406,7 +404,7 @@ def test_tlsCRLFile_support(self): ) ) - uri_fmt = "mongodb://localhost/?ssl=true&" "tlsCAFile=%s&serverSelectionTimeoutMS=100" + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=100" connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore uri_fmt = ( @@ -569,7 +567,7 @@ def test_mongodb_x509_auth(self): else: self.assertEqual(names, ["authenticate", "find"]) - uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( quote_plus(MONGODB_X509_USERNAME), host, port, @@ -589,7 +587,7 @@ def test_mongodb_x509_auth(self): # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match - uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( quote_plus("not the username"), host, port, @@ -617,7 +615,7 @@ def test_mongodb_x509_auth(self): bad_client.pymongo_test.test.find_one() # Invalid certificate (using CA certificate as client certificate) - uri = "mongodb://%s@%s:%d/?authMechanism=" "MONGODB-X509" % ( + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( quote_plus(MONGODB_X509_USERNAME), host, port, diff --git a/test/test_threads.py b/test/test_threads.py index 064008ee32..2c73de52e7 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -16,7 +16,7 @@ import threading from test import IntegrationTest, client_context, unittest -from test.utils import joinall, rs_or_single_client +from test.utils import joinall @client_context.require_connection diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index cfe21169fd..4fa288df44 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -59,11 +59,11 @@ def test_split_hosts(self): ) self.assertEqual( [("/tmp/mongodb-27017.sock", None), ("example.com", 27017)], - split_hosts("/tmp/mongodb-27017.sock," "example.com:27017"), + split_hosts("/tmp/mongodb-27017.sock,example.com:27017"), ) self.assertEqual( [("example.com", 27017), ("/tmp/mongodb-27017.sock", None)], - split_hosts("example.com:27017," "/tmp/mongodb-27017.sock"), + split_hosts("example.com:27017,/tmp/mongodb-27017.sock"), ) self.assertRaises(ValueError, split_hosts, "::1", 27017) self.assertRaises(ValueError, split_hosts, "[::1:27017") @@ -168,11 +168,11 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] - self.assertEqual(res, parse_uri("mongodb://example1.com:27017," "example2.com:27017")) + self.assertEqual(res, parse_uri("mongodb://example1.com:27017,example2.com:27017")) res = copy.deepcopy(orig) res["nodelist"] = [("localhost", 27017), ("localhost", 27018), ("localhost", 27019)] - self.assertEqual(res, parse_uri("mongodb://localhost," "localhost:27018,localhost:27019")) + self.assertEqual(res, parse_uri("mongodb://localhost,localhost:27018,localhost:27019")) res = copy.deepcopy(orig) res["database"] = "foo" @@ -182,21 +182,17 @@ def test_parse_uri(self): self.assertEqual(res, parse_uri("mongodb://localhost/")) res.update({"database": "test", "collection": "yield_historical.in"}) - self.assertEqual(res, parse_uri("mongodb://" "localhost/test.yield_historical.in")) + self.assertEqual(res, parse_uri("mongodb://localhost/test.yield_historical.in")) res.update({"username": "fred", "password": "foobar"}) - self.assertEqual( - res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") - ) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) res["nodelist"] = [("example1.com", 27017), ("example2.com", 27017)] res.update({"database": "test", "collection": "yield_historical.in"}) self.assertEqual( res, - parse_uri( - "mongodb://example1.com:27017,example2.com" ":27017/test.yield_historical.in" - ), + parse_uri("mongodb://example1.com:27017,example2.com:27017/test.yield_historical.in"), ) # Test socket path without escaped characters. @@ -205,14 +201,14 @@ def test_parse_uri(self): # Test with escaped characters. res = copy.deepcopy(orig) res["nodelist"] = [("example2.com", 27017), ("/tmp/mongodb-27017.sock", None)] - self.assertEqual(res, parse_uri("mongodb://example2.com," "%2Ftmp%2Fmongodb-27017.sock")) + self.assertEqual(res, parse_uri("mongodb://example2.com,%2Ftmp%2Fmongodb-27017.sock")) res = copy.deepcopy(orig) res["nodelist"] = [("shoe.sock.pants.co.uk", 27017), ("/tmp/mongodb-27017.sock", None)] res["database"] = "nethers_db" self.assertEqual( res, - parse_uri("mongodb://shoe.sock.pants.co.uk," "%2Ftmp%2Fmongodb-27017.sock/nethers_db"), + parse_uri("mongodb://shoe.sock.pants.co.uk,%2Ftmp%2Fmongodb-27017.sock/nethers_db"), ) res = copy.deepcopy(orig) @@ -242,15 +238,13 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["nodelist"] = [("example2.com", 27017)] res.update({"database": "test", "collection": "yield_historical.sock"}) - self.assertEqual( - res, parse_uri("mongodb://example2.com:27017" "/test.yield_historical.sock") - ) + self.assertEqual(res, parse_uri("mongodb://example2.com:27017/test.yield_historical.sock")) res = copy.deepcopy(orig) res["nodelist"] = [("/tmp/mongodb-27017.sock", None)] res.update({"database": "test", "collection": "mongodb-27017.sock"}) self.assertEqual( - res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock" "/test.mongodb-27017.sock") + res, parse_uri("mongodb://%2Ftmp%2Fmongodb-27017.sock/test.mongodb-27017.sock") ) res = copy.deepcopy(orig) @@ -275,9 +269,7 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res.update({"username": "fred", "password": "foobar"}) res.update({"database": "test", "collection": "yield_historical.in"}) - self.assertEqual( - res, parse_uri("mongodb://fred:foobar@localhost/" "test.yield_historical.in") - ) + self.assertEqual(res, parse_uri("mongodb://fred:foobar@localhost/test.yield_historical.in")) res = copy.deepcopy(orig) res["database"] = "test" @@ -294,7 +286,7 @@ def test_parse_uri(self): res["username"] = "user" res["password"] = "password" self.assertEqual( - res, parse_uri("mongodb://user:password@localhost/" "?authMechanism=MONGODB-CR") + res, parse_uri("mongodb://user:password@localhost/?authMechanism=MONGODB-CR") ) res = copy.deepcopy(orig) @@ -305,7 +297,7 @@ def test_parse_uri(self): self.assertEqual( res, parse_uri( - "mongodb://user:password@localhost/foo" "?authSource=bar;authMechanism=MONGODB-CR" + "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=MONGODB-CR" ), ) @@ -313,13 +305,13 @@ def test_parse_uri(self): res["options"] = {"authmechanism": "MONGODB-CR"} res["username"] = "user" res["password"] = "" - self.assertEqual(res, parse_uri("mongodb://user:@localhost/" "?authMechanism=MONGODB-CR")) + self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=MONGODB-CR")) res = copy.deepcopy(orig) res["username"] = "user@domain.com" res["password"] = "password" res["database"] = "foo" - self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password" "@localhost/foo")) + self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password@localhost/foo")) res = copy.deepcopy(orig) res["options"] = {"authmechanism": "GSSAPI"} @@ -328,7 +320,7 @@ def test_parse_uri(self): res["database"] = "foo" self.assertEqual( res, - parse_uri("mongodb://user%40domain.com:password" "@localhost/foo?authMechanism=GSSAPI"), + parse_uri("mongodb://user%40domain.com:password@localhost/foo?authMechanism=GSSAPI"), ) res = copy.deepcopy(orig) @@ -337,7 +329,7 @@ def test_parse_uri(self): res["password"] = "" res["database"] = "foo" self.assertEqual( - res, parse_uri("mongodb://user%40domain.com" "@localhost/foo?authMechanism=GSSAPI") + res, parse_uri("mongodb://user%40domain.com@localhost/foo?authMechanism=GSSAPI") ) res = copy.deepcopy(orig) @@ -410,7 +402,7 @@ def test_parse_uri(self): self.assertRaises( ValueError, parse_uri, - "mongodb://user%40domain.com:password" "@localhost/foo?uuidrepresentation=notAnOption", + "mongodb://user%40domain.com:password@localhost/foo?uuidrepresentation=notAnOption", ) def test_parse_ssl_paths(self): diff --git a/test/unified_format.py b/test/unified_format.py index ba1d063694..6f1e386932 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -826,14 +826,12 @@ def process_error(self, exception, spec): self.match_evaluator.match_result(expect_result, result) else: self.fail( - "expectResult can only be specified with %s " "exceptions" % (BulkWriteError,) + "expectResult can only be specified with %s exceptions" % (BulkWriteError,) ) def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail( - "Operation %s not supported for entity " "of type %s" % (opname, type(target)) - ) + self.fail("Operation %s not supported for entity of type %s" % (opname, type(target))) def __entityOperation_createChangeStream(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": @@ -891,7 +889,7 @@ def _collectionOperation_createFindCursor(self, target, *args, **kwargs): def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: - self.skipTest("PyMongo does not support batch_size for " "list_indexes") + self.skipTest("PyMongo does not support batch_size for list_indexes") return target.list_indexes(*args, **kwargs) def _sessionOperation_withTransaction(self, target, *args, **kwargs): @@ -1255,7 +1253,7 @@ def generate_test_classes( test_path, module=__name__, class_name_prefix="", - expected_failures=[], + expected_failures=[], # noqa: B006 bypass_test_generation_errors=False, **kwargs ): diff --git a/tools/clean.py b/tools/clean.py index 7196b00e90..0ea31fc3d9 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -23,24 +23,24 @@ try: os.remove("pymongo/_cmessage.so") os.remove("bson/_cbson.so") -except: +except BaseException: pass try: os.remove("pymongo/_cmessage.pyd") os.remove("bson/_cbson.pyd") -except: +except BaseException: pass try: - from pymongo import _cmessage # type: ignore[attr-defined] + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 sys.exit("could still import _cmessage") except ImportError: pass try: - from bson import _cbson + from bson import _cbson # noqa: F401 sys.exit("could still import _cbson") except ImportError: diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index a2d4954789..6cb82eed57 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -21,8 +21,8 @@ sys.path[0:0] = [""] -import bson -import pymongo +import bson # noqa: E402 +import pymongo # noqa: E402 if not pymongo.has_c() or not bson.has_c(): sys.exit("could not load C extensions") From f5eec45250781a3cd3369dc233d97f0272e905a2 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 17 Feb 2022 16:23:23 -0800 Subject: [PATCH 0594/2111] PYTHON-3111 Rename "Versioned API" to "Stable API" in documentation (#867) --- .evergreen/config.yml | 2 +- .evergreen/resync-specs.sh | 2 +- doc/api/pymongo/server_api.rst | 4 ++-- doc/changelog.rst | 2 +- pymongo/database.py | 2 +- pymongo/mongo_client.py | 6 +++--- pymongo/pool.py | 2 +- pymongo/server_api.py | 14 +++++++------- test/__init__.py | 2 +- 9 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8edc43df20..9bfd57c805 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2561,7 +2561,7 @@ buildvariants: tasks: - name: atlas-data-lake-tests -- matrix_name: "versioned-api-tests" +- matrix_name: "stable-api-tests" matrix_spec: platform: ubuntu-18.04 python-version: ["3.6", "3.10"] diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index bf20f23037..d1bc26a91c 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -136,7 +136,7 @@ do uri|uri*options) cpjson uri-options/tests uri_options ;; - versioned-api) + stable-api) cpjson versioned-api/tests versioned-api ;; *) diff --git a/doc/api/pymongo/server_api.rst b/doc/api/pymongo/server_api.rst index d961d07f1a..de74411aa4 100644 --- a/doc/api/pymongo/server_api.rst +++ b/doc/api/pymongo/server_api.rst @@ -1,8 +1,8 @@ -:mod:`server_api` -- Support for MongoDB Versioned API +:mod:`server_api` -- Support for MongoDB Stable API ====================================================== .. automodule:: pymongo.server_api - :synopsis: Support for MongoDB Versioned API + :synopsis: Support for MongoDB Stable API .. autoclass:: pymongo.server_api.ServerApi :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index de38f188e4..7dd57d5329 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -258,7 +258,7 @@ Notable improvements .................... - Added support for MongoDB 5.0. -- Support for MongoDB Versioned API, see :class:`~pymongo.server_api.ServerApi`. +- Support for MongoDB Stable API, see :class:`~pymongo.server_api.ServerApi`. - Support for snapshot reads on secondaries (see :ref:`snapshot-reads-ref`). - Support for Azure and GCP KMS providers for client side field level encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, diff --git a/pymongo/database.py b/pymongo/database.py index f92dbc8aed..f43f18d017 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -686,7 +686,7 @@ def command( .. note:: :meth:`command` does **not** apply any custom TypeDecoders when decoding the command response. - .. note:: If this client has been configured to use MongoDB Versioned + .. note:: If this client has been configured to use MongoDB Stable API (see :ref:`versioned-api-ref`), then :meth:`command` will automactically add API versioning options to the given command. Explicitly adding API versioning options in the command and diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 9414d71962..4965b5e439 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -515,12 +515,12 @@ def __init__( - ``bypass_auto_encrpytion=False`` is passed to :class:`~pymongo.encryption_options.AutoEncryptionOpts` - | **Versioned API options:** - | (If not set explicitly, Versioned API will not be enabled.) + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) - `server_api`: A :class:`~pymongo.server_api.ServerApi` which configures this - client to use Versioned API. See :ref:`versioned-api-ref` for + client to use Stable API. See :ref:`versioned-api-ref` for details. .. seealso:: The MongoDB documentation on `connections `_. diff --git a/pymongo/pool.py b/pymongo/pool.py index 09709ffbf4..4750163718 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -587,7 +587,7 @@ def unpin(self): def hello_cmd(self): # Handshake spec requires us to use OP_MSG+hello command for the - # initial handshake in load balanced or versioned api mode. + # initial handshake in load balanced or stable API mode. if self.opts.server_api or self.hello_ok or self.opts.load_balanced: self.op_msg_enabled = True return SON([(HelloCompat.CMD, 1)]) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 110406366a..e92d6e6179 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -12,11 +12,11 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Support for MongoDB Versioned API. +"""Support for MongoDB Stable API. .. _versioned-api-ref: -MongoDB Versioned API +MongoDB Stable API ===================== Starting in MongoDB 5.0, applications can specify the server API version @@ -27,9 +27,9 @@ Declaring an API Version ```````````````````````` -.. attention:: Versioned API requires MongoDB >=5.0. +.. attention:: Stable API requires MongoDB >=5.0. -To configure MongoDB Versioned API, pass the ``server_api`` keyword option to +To configure MongoDB Stable API, pass the ``server_api`` keyword option to :class:`~pymongo.mongo_client.MongoClient`:: >>> from pymongo.mongo_client import MongoClient @@ -44,7 +44,7 @@ :meth:`~pymongo.database.Database.command` helper. .. note:: Declaring an API version on the - :class:`~pymongo.mongo_client.MongoClient` **and** specifying versioned + :class:`~pymongo.mongo_client.MongoClient` **and** specifying stable API options in :meth:`~pymongo.database.Database.command` command document is not supported and will lead to undefined behaviour. @@ -96,10 +96,10 @@ class ServerApiVersion: class ServerApi(object): - """MongoDB Versioned API.""" + """MongoDB Stable API.""" def __init__(self, version, strict=None, deprecation_errors=None): - """Options to configure MongoDB Versioned API. + """Options to configure MongoDB Stable API. :Parameters: - `version`: The API version string. Must be one of the values in diff --git a/test/__init__.py b/test/__init__.py index be0825025a..b2906481e9 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -913,7 +913,7 @@ def sanitize_cmd(cmd): cp.pop("$readPreference", None) cp.pop("lsid", None) if MONGODB_API_VERSION: - # Versioned api parameters + # Stable API parameters cp.pop("apiVersion", None) # OP_MSG encoding may move the payload type one field to the # end of the command. Do the same here. From e6b65860f59b432487fcdb385fde663345ce2917 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 17 Feb 2022 17:13:25 -0800 Subject: [PATCH 0595/2111] PYTHON-3136 [DevOps] Resync-specs.sh removes ignored files from working tree (#878) --- .evergreen/resync-specs.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index d1bc26a91c..3042fd543b 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -56,10 +56,14 @@ cpjson () { cd "$SPECS"/source/$1 find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ $PYMONGO/test/$2 - printf "\nIgnored files for ${PWD}" - printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ + printf "\nIgnored files for ${PWD}\n" + IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ - sed -e '/^[0-9]/d' | sed -e 's|< ./||g' + sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" + printf "%s\n" $IGNORED_FILES + cd "$PYMONGO"/test/$2 + printf "%s\n" $IGNORED_FILES | xargs git checkout master + } for spec in "$@" From dce5072dd1f100947565555d4eafb748239f4385 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 18 Feb 2022 10:43:07 -0800 Subject: [PATCH 0596/2111] PYTHON-3137 Handle falsey values for "let" parameter (#881) --- pymongo/aggregation.py | 2 +- pymongo/collection.py | 8 ++++---- pymongo/cursor.py | 2 +- test/test_collection.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index e190fefc56..84ecffe5fb 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -55,7 +55,7 @@ def __init__( self._performs_write = True common.validate_is_mapping("options", options) - if let: + if let is not None: common.validate_is_mapping("let", let) options["let"] = let if comment is not None: diff --git a/pymongo/collection.py b/pymongo/collection.py index 8de1fbeeaa..65bd1c54e7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -728,7 +728,7 @@ def _update( hint = helpers._index_document(hint) update_doc["hint"] = hint command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) - if let: + if let is not None: common.validate_is_mapping("let", let) command["let"] = let if not write_concern.is_server_default: @@ -893,7 +893,7 @@ def replace_one( """ common.validate_is_mapping("filter", filter) common.validate_ok_for_replace(replacement) - if let: + if let is not None: common.validate_is_mapping("let", let) write_concern = self._write_concern_for(session) return UpdateResult( @@ -1189,7 +1189,7 @@ def _delete( if not write_concern.is_server_default: command["writeConcern"] = write_concern.document - if let: + if let is not None: common.validate_is_document_type("let", let) command["let"] = let @@ -2728,7 +2728,7 @@ def __find_and_modify( ) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) - if let: + if let is not None: common.validate_is_mapping("let", let) cmd["let"] = let cmd.update(kwargs) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 02f1905df3..a2ccdf5860 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -248,7 +248,7 @@ def __init__( if projection is not None: projection = helpers._fields_list_to_dict(projection, "projection") - if let: + if let is not None: validate_is_document_type("let", let) self.__let = let diff --git a/test/test_collection.py b/test/test_collection.py index d9f51f530d..47636b495f 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -2123,7 +2123,7 @@ def test_helpers_with_let(self): (c.find_one_and_replace, ({}, {})), (c.aggregate, ([], {})), ] - for let in [10, "str"]: + for let in [10, "str", [], False]: for helper, args in helpers: with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): helper(*args, let=let) # type: ignore From 52ff8c2e90cb90b3f734404400941c7b05c13c85 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 18 Feb 2022 10:43:56 -0800 Subject: [PATCH 0597/2111] PYTHON-3068 Support 'let' option in BulkWriteOptions (#874) --- pymongo/bulk.py | 8 +- pymongo/collection.py | 8 +- .../unified/bulkWrite-deleteMany-let.json | 200 ++++++++++++++ .../crud/unified/bulkWrite-deleteOne-let.json | 200 ++++++++++++++ .../unified/bulkWrite-replaceOne-let.json | 214 +++++++++++++++ .../unified/bulkWrite-updateMany-let.json | 243 +++++++++++++++++ .../crud/unified/bulkWrite-updateOne-let.json | 247 ++++++++++++++++++ 7 files changed, 1118 insertions(+), 2 deletions(-) create mode 100644 test/crud/unified/bulkWrite-deleteMany-let.json create mode 100644 test/crud/unified/bulkWrite-deleteOne-let.json create mode 100644 test/crud/unified/bulkWrite-replaceOne-let.json create mode 100644 test/crud/unified/bulkWrite-updateMany-let.json create mode 100644 test/crud/unified/bulkWrite-updateOne-let.json diff --git a/pymongo/bulk.py b/pymongo/bulk.py index c736bd7d6f..9055e40e98 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -22,6 +22,7 @@ from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON +from pymongo import common from pymongo.client_session import _validate_session_write_concern from pymongo.collation import validate_collation_or_none from pymongo.common import ( @@ -137,13 +138,16 @@ def _raise_bulk_write_error(full_result): class _Bulk(object): """The private guts of the bulk write API.""" - def __init__(self, collection, ordered, bypass_document_validation, comment=None): + def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) self.comment = comment self.ordered = ordered self.ops = [] @@ -314,6 +318,8 @@ def _execute_command( cmd["writeConcern"] = write_concern.document if self.bypass_doc_val: cmd["bypassDocumentValidation"] = True + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let if session: # Start a new retryable write unless one was already # started for this command. diff --git a/pymongo/collection.py b/pymongo/collection.py index 65bd1c54e7..bfe2007d5a 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -423,6 +423,7 @@ def bulk_write( bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, + let: Optional[Mapping] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -474,6 +475,10 @@ def bulk_write( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. + - `let` (optional): Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). :Returns: An instance of :class:`~pymongo.results.BulkWriteResult`. @@ -485,6 +490,7 @@ def bulk_write( .. versionchanged:: 4.1 Added ``comment`` parameter. + Added ``let`` parameter. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -496,7 +502,7 @@ def bulk_write( """ common.validate_list("requests", requests) - blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) for request in requests: try: request._add_to_bulk(blk) diff --git a/test/crud/unified/bulkWrite-deleteMany-let.json b/test/crud/unified/bulkWrite-deleteMany-let.json new file mode 100644 index 0000000000..c16161e4bc --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteMany-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 0 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Bulk Write deleteMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-deleteOne-let.json b/test/crud/unified/bulkWrite-deleteOne-let.json new file mode 100644 index 0000000000..29ac34d3dc --- /dev/null +++ b/test/crud/unified/bulkWrite-deleteOne-let.json @@ -0,0 +1,200 @@ +{ + "description": "BulkWrite deleteOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite deleteOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Bulk Write deleteOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'delete.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll0", + "deletes": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "limit": 1 + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json new file mode 100644 index 0000000000..bdd1c27a0b --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -0,0 +1,214 @@ +{ + "description": "BulkWrite replaceOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Bulk Write replaceOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "replacement": { + "x": 3 + } + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": { + "x": 3 + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json new file mode 100644 index 0000000000..6d437e9011 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -0,0 +1,243 @@ +{ + "description": "BulkWrite updateMany-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateMany with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 21 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "Bulk Write updateMany with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 21 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 21 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json new file mode 100644 index 0000000000..e248779da3 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -0,0 +1,247 @@ +{ + "description": "BulkWrite updateOne-let", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with let option", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 22 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + }, + { + "description": "Bulk Write updateOne with let option unsupported (server-side error)", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.9" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "update": [ + { + "$set": { + "x": 22 + } + } + ] + } + } + ], + "let": { + "id": 1 + } + }, + "expectError": { + "errorContains": "'update.let' is an unknown field", + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "$expr": { + "$eq": [ + "$_id", + "$$id" + ] + } + }, + "u": [ + { + "$set": { + "x": 22 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "let": { + "id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 20 + }, + { + "_id": 2, + "x": 21 + } + ] + } + ] + } + ] +} From 2f13a51cd42811d7364b789cbe45fd262afdfe61 Mon Sep 17 00:00:00 2001 From: Arie Bovenberg Date: Tue, 22 Feb 2022 19:27:16 +0100 Subject: [PATCH 0598/2111] PYTHON-3124 Remove overlapping slots from _WriteResult subclasses (#884) --- doc/contributors.rst | 1 + pymongo/results.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 22cbee3215..4275209781 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -91,3 +91,4 @@ The following is a list of people who have contributed to - Khanh Nguyen (KN99HN) - Henri Froese (henrifroese) - Ishmum Jawad Khan (ishmum123) +- Arie Bovenberg (ariebovenberg) diff --git a/pymongo/results.py b/pymongo/results.py index 1cbb614bf3..5803900398 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -59,7 +59,7 @@ def acknowledged(self) -> bool: class InsertOneResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" - __slots__ = ("__inserted_id", "__acknowledged") + __slots__ = ("__inserted_id",) def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id @@ -74,7 +74,7 @@ def inserted_id(self) -> Any: class InsertManyResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.insert_many`.""" - __slots__ = ("__inserted_ids", "__acknowledged") + __slots__ = ("__inserted_ids",) def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids @@ -98,7 +98,7 @@ class UpdateResult(_WriteResult): :meth:`~pymongo.collection.Collection.replace_one`. """ - __slots__ = ("__raw_result", "__acknowledged") + __slots__ = ("__raw_result",) def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result @@ -136,7 +136,7 @@ class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` and :meth:`~pymongo.collection.Collection.delete_many`""" - __slots__ = ("__raw_result", "__acknowledged") + __slots__ = ("__raw_result",) def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result @@ -157,7 +157,7 @@ def deleted_count(self) -> int: class BulkWriteResult(_WriteResult): """An object wrapper for bulk API write results.""" - __slots__ = ("__bulk_api_result", "__acknowledged") + __slots__ = ("__bulk_api_result",) def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: """Create a BulkWriteResult instance. From 2141621194715836325f3ce3b6a10e16712d909d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 22 Feb 2022 14:45:56 -0800 Subject: [PATCH 0599/2111] PYTHON-3088 Test rapid releases with load balancers (#885) PYTHON-3088 [v3.13] Update load balancer tests to support dedicated load balancer port (#870) (cherry picked from commit 341d489) --- .evergreen/config.yml | 9 +++++---- pymongo/pool.py | 5 ----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9bfd57c805..2e3c12f3f8 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1714,12 +1714,9 @@ tasks: commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "latest" TOPOLOGY: "sharded_cluster" LOAD_BALANCER: true - func: "run load-balancer" - vars: - LOAD_BALANCER: true - func: "run tests" - name: "test-fips-standalone" @@ -1937,6 +1934,10 @@ axes: display_name: "MongoDB latest" variables: VERSION: "latest" + - id: "rapid" + display_name: "MongoDB rapid" + variables: + VERSION: "rapid" # Choice of Python runtime version - id: python-version @@ -2636,7 +2637,7 @@ buildvariants: - matrix_name: "load-balancer" matrix_spec: platform: ubuntu-18.04 - mongodb-version: ["5.0", "latest"] + mongodb-version: ["rapid", "latest"] auth-ssl: "*" python-version: "*" loadbalancer: "*" diff --git a/pymongo/pool.py b/pymongo/pool.py index 4750163718..c7bd21fc8f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -630,8 +630,6 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): auth_ctx = None doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) - if not self.opts.load_balanced: - doc.pop("serviceId", None) hello = Hello(doc, awaitable=awaitable) self.is_writable = hello.is_writable self.max_wire_version = hello.max_wire_version @@ -676,9 +674,6 @@ def _next_reply(self): unpacked_docs = reply.unpack_response() response_doc = unpacked_docs[0] helpers._check_command_response(response_doc, self.max_wire_version) - # Remove after PYTHON-2712. - if not self.opts.load_balanced: - response_doc.pop("serviceId", None) return response_doc def command( From 8496ed4b3d9deccac490a5904d92dca3adb19f17 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 23 Feb 2022 11:52:55 -0800 Subject: [PATCH 0600/2111] PYTHON-3142 Stop using $where in test_maxConnecting (#886) --- test/test_pooling.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/test_pooling.py b/test/test_pooling.py index 00b947f10a..923c89d83b 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -378,12 +378,14 @@ def test_checkout_more_than_max_pool_size(self): def test_maxConnecting(self): client = rs_or_single_client() self.addCleanup(client.close) + self.client.test.test.insert_one({}) + self.addCleanup(self.client.test.test.delete_many, {}) pool = get_pool(client) docs = [] # Run 50 short running operations def find_one(): - docs.append(client.test.test.find_one({"$where": delay(0.001)})) + docs.append(client.test.test.find_one({})) threads = [threading.Thread(target=find_one) for _ in range(50)] for thread in threads: @@ -394,9 +396,8 @@ def find_one(): self.assertEqual(len(docs), 50) self.assertLessEqual(len(pool.sockets), 50) # TLS and auth make connection establishment more expensive than - # the artificially delayed query which leads to more threads - # hitting maxConnecting. The end result is fewer total connections - # and better latency. + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. if client_context.tls and client_context.auth_enabled: self.assertLessEqual(len(pool.sockets), 30) else: From 6fb8d7afe854bfe277e46c9fd3bcfc150db9b1e1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 23 Feb 2022 14:00:16 -0800 Subject: [PATCH 0601/2111] PYTHON-3129 Re-sync CRUD spec tests (#887) --- test/crud/unified/bulkWrite-deleteMany-let.json | 4 ++-- test/crud/unified/bulkWrite-deleteOne-let.json | 2 +- test/crud/unified/bulkWrite-replaceOne-let.json | 2 +- test/crud/unified/bulkWrite-updateMany-let.json | 2 +- test/crud/unified/bulkWrite-updateOne-let.json | 2 +- test/crud/unified/updateMany-let.json | 2 +- test/crud/unified/updateOne-let.json | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/crud/unified/bulkWrite-deleteMany-let.json b/test/crud/unified/bulkWrite-deleteMany-let.json index c16161e4bc..45c20ea49a 100644 --- a/test/crud/unified/bulkWrite-deleteMany-let.json +++ b/test/crud/unified/bulkWrite-deleteMany-let.json @@ -115,11 +115,11 @@ ] }, { - "description": "Bulk Write deleteMany with let option unsupported (server-side error)", + "description": "BulkWrite deleteMany with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", - "maxServerVersion": "4.9" + "maxServerVersion": "4.4.99" } ], "operations": [ diff --git a/test/crud/unified/bulkWrite-deleteOne-let.json b/test/crud/unified/bulkWrite-deleteOne-let.json index 29ac34d3dc..f3268163cb 100644 --- a/test/crud/unified/bulkWrite-deleteOne-let.json +++ b/test/crud/unified/bulkWrite-deleteOne-let.json @@ -115,7 +115,7 @@ ] }, { - "description": "Bulk Write deleteOne with let option unsupported (server-side error)", + "description": "BulkWrite deleteOne with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json index bdd1c27a0b..df4eafe62f 100644 --- a/test/crud/unified/bulkWrite-replaceOne-let.json +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -124,7 +124,7 @@ ] }, { - "description": "Bulk Write replaceOne with let option unsupported (server-side error)", + "description": "BulkWrite replaceOne with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "4.2", diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json index 6d437e9011..3cc8da4c53 100644 --- a/test/crud/unified/bulkWrite-updateMany-let.json +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -139,7 +139,7 @@ ] }, { - "description": "Bulk Write updateMany with let option unsupported (server-side error)", + "description": "BulkWrite updateMany with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json index e248779da3..2a3e4f79dc 100644 --- a/test/crud/unified/bulkWrite-updateOne-let.json +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -141,7 +141,7 @@ ] }, { - "description": "Bulk Write updateOne with let option unsupported (server-side error)", + "description": "BulkWrite updateOne with let option unsupported (server-side error)", "runOnRequirements": [ { "minServerVersion": "3.6.0", diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json index b4a4ddd800..8a19ac0933 100644 --- a/test/crud/unified/updateMany-let.json +++ b/test/crud/unified/updateMany-let.json @@ -158,7 +158,7 @@ "description": "updateMany with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.4.99" } ], diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json index 7b1cc4cf00..8237bef7e8 100644 --- a/test/crud/unified/updateOne-let.json +++ b/test/crud/unified/updateOne-let.json @@ -136,7 +136,7 @@ "description": "UpdateOne with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.4.99" } ], From f8f34b043843125f162d2b79307c9df68c16b51b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 23 Feb 2022 14:00:43 -0800 Subject: [PATCH 0602/2111] PYTHON-2951 Test that handshake errors are retryable (#846) --- .../{ => legacy}/aggregate-merge.json | 0 .../{ => legacy}/aggregate-serverErrors.json | 0 .../{ => legacy}/aggregate.json | 0 ...angeStreams-client.watch-serverErrors.json | 0 .../changeStreams-client.watch.json | 0 ...ngeStreams-db.coll.watch-serverErrors.json | 0 .../changeStreams-db.coll.watch.json | 0 .../changeStreams-db.watch-serverErrors.json | 0 .../{ => legacy}/changeStreams-db.watch.json | 0 .../{ => legacy}/count-serverErrors.json | 0 test/retryable_reads/{ => legacy}/count.json | 0 .../countDocuments-serverErrors.json | 0 .../{ => legacy}/countDocuments.json | 0 .../{ => legacy}/distinct-serverErrors.json | 0 .../{ => legacy}/distinct.json | 0 .../estimatedDocumentCount-4.9.json | 0 .../estimatedDocumentCount-pre4.9.json | 0 ...timatedDocumentCount-serverErrors-4.9.json | 0 ...atedDocumentCount-serverErrors-pre4.9.json | 0 .../{ => legacy}/find-serverErrors.json | 0 test/retryable_reads/{ => legacy}/find.json | 0 .../{ => legacy}/findOne-serverErrors.json | 0 .../retryable_reads/{ => legacy}/findOne.json | 0 .../gridfs-download-serverErrors.json | 0 .../{ => legacy}/gridfs-download.json | 0 .../gridfs-downloadByName-serverErrors.json | 0 .../{ => legacy}/gridfs-downloadByName.json | 0 .../listCollectionNames-serverErrors.json | 0 .../{ => legacy}/listCollectionNames.json | 0 .../listCollectionObjects-serverErrors.json | 0 .../{ => legacy}/listCollectionObjects.json | 0 .../listCollections-serverErrors.json | 0 .../{ => legacy}/listCollections.json | 0 .../listDatabaseNames-serverErrors.json | 0 .../{ => legacy}/listDatabaseNames.json | 0 .../listDatabaseObjects-serverErrors.json | 0 .../{ => legacy}/listDatabaseObjects.json | 0 .../listDatabases-serverErrors.json | 0 .../{ => legacy}/listDatabases.json | 0 .../listIndexNames-serverErrors.json | 0 .../{ => legacy}/listIndexNames.json | 0 .../listIndexes-serverErrors.json | 0 .../{ => legacy}/listIndexes.json | 0 .../{ => legacy}/mapReduce.json | 0 .../unified/handshakeError.json | 257 ++++++++++++++++ .../unified/handshakeError.json | 279 ++++++++++++++++++ test/test_retryable_reads.py | 2 +- test/test_retryable_reads_unified.py | 32 ++ .../unified/retryable-abort-handshake.json | 204 +++++++++++++ .../unified/retryable-commit-handshake.json | 211 +++++++++++++ test/unified_format.py | 3 +- 51 files changed, 985 insertions(+), 3 deletions(-) rename test/retryable_reads/{ => legacy}/aggregate-merge.json (100%) rename test/retryable_reads/{ => legacy}/aggregate-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/aggregate.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-client.watch-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-client.watch.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.coll.watch-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.coll.watch.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.watch-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/changeStreams-db.watch.json (100%) rename test/retryable_reads/{ => legacy}/count-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/count.json (100%) rename test/retryable_reads/{ => legacy}/countDocuments-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/countDocuments.json (100%) rename test/retryable_reads/{ => legacy}/distinct-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/distinct.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-4.9.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-pre4.9.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-serverErrors-4.9.json (100%) rename test/retryable_reads/{ => legacy}/estimatedDocumentCount-serverErrors-pre4.9.json (100%) rename test/retryable_reads/{ => legacy}/find-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/find.json (100%) rename test/retryable_reads/{ => legacy}/findOne-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/findOne.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-download-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-download.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-downloadByName-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/gridfs-downloadByName.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionNames-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionNames.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionObjects-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listCollectionObjects.json (100%) rename test/retryable_reads/{ => legacy}/listCollections-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listCollections.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseNames-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseNames.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseObjects-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listDatabaseObjects.json (100%) rename test/retryable_reads/{ => legacy}/listDatabases-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listDatabases.json (100%) rename test/retryable_reads/{ => legacy}/listIndexNames-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listIndexNames.json (100%) rename test/retryable_reads/{ => legacy}/listIndexes-serverErrors.json (100%) rename test/retryable_reads/{ => legacy}/listIndexes.json (100%) rename test/retryable_reads/{ => legacy}/mapReduce.json (100%) create mode 100644 test/retryable_reads/unified/handshakeError.json create mode 100644 test/retryable_writes/unified/handshakeError.json create mode 100644 test/test_retryable_reads_unified.py create mode 100644 test/transactions/unified/retryable-abort-handshake.json create mode 100644 test/transactions/unified/retryable-commit-handshake.json diff --git a/test/retryable_reads/aggregate-merge.json b/test/retryable_reads/legacy/aggregate-merge.json similarity index 100% rename from test/retryable_reads/aggregate-merge.json rename to test/retryable_reads/legacy/aggregate-merge.json diff --git a/test/retryable_reads/aggregate-serverErrors.json b/test/retryable_reads/legacy/aggregate-serverErrors.json similarity index 100% rename from test/retryable_reads/aggregate-serverErrors.json rename to test/retryable_reads/legacy/aggregate-serverErrors.json diff --git a/test/retryable_reads/aggregate.json b/test/retryable_reads/legacy/aggregate.json similarity index 100% rename from test/retryable_reads/aggregate.json rename to test/retryable_reads/legacy/aggregate.json diff --git a/test/retryable_reads/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json similarity index 100% rename from test/retryable_reads/changeStreams-client.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json diff --git a/test/retryable_reads/changeStreams-client.watch.json b/test/retryable_reads/legacy/changeStreams-client.watch.json similarity index 100% rename from test/retryable_reads/changeStreams-client.watch.json rename to test/retryable_reads/legacy/changeStreams-client.watch.json diff --git a/test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json similarity index 100% rename from test/retryable_reads/changeStreams-db.coll.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json diff --git a/test/retryable_reads/changeStreams-db.coll.watch.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch.json similarity index 100% rename from test/retryable_reads/changeStreams-db.coll.watch.json rename to test/retryable_reads/legacy/changeStreams-db.coll.watch.json diff --git a/test/retryable_reads/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json similarity index 100% rename from test/retryable_reads/changeStreams-db.watch-serverErrors.json rename to test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json diff --git a/test/retryable_reads/changeStreams-db.watch.json b/test/retryable_reads/legacy/changeStreams-db.watch.json similarity index 100% rename from test/retryable_reads/changeStreams-db.watch.json rename to test/retryable_reads/legacy/changeStreams-db.watch.json diff --git a/test/retryable_reads/count-serverErrors.json b/test/retryable_reads/legacy/count-serverErrors.json similarity index 100% rename from test/retryable_reads/count-serverErrors.json rename to test/retryable_reads/legacy/count-serverErrors.json diff --git a/test/retryable_reads/count.json b/test/retryable_reads/legacy/count.json similarity index 100% rename from test/retryable_reads/count.json rename to test/retryable_reads/legacy/count.json diff --git a/test/retryable_reads/countDocuments-serverErrors.json b/test/retryable_reads/legacy/countDocuments-serverErrors.json similarity index 100% rename from test/retryable_reads/countDocuments-serverErrors.json rename to test/retryable_reads/legacy/countDocuments-serverErrors.json diff --git a/test/retryable_reads/countDocuments.json b/test/retryable_reads/legacy/countDocuments.json similarity index 100% rename from test/retryable_reads/countDocuments.json rename to test/retryable_reads/legacy/countDocuments.json diff --git a/test/retryable_reads/distinct-serverErrors.json b/test/retryable_reads/legacy/distinct-serverErrors.json similarity index 100% rename from test/retryable_reads/distinct-serverErrors.json rename to test/retryable_reads/legacy/distinct-serverErrors.json diff --git a/test/retryable_reads/distinct.json b/test/retryable_reads/legacy/distinct.json similarity index 100% rename from test/retryable_reads/distinct.json rename to test/retryable_reads/legacy/distinct.json diff --git a/test/retryable_reads/estimatedDocumentCount-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-4.9.json diff --git a/test/retryable_reads/estimatedDocumentCount-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-serverErrors-4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json diff --git a/test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json similarity index 100% rename from test/retryable_reads/estimatedDocumentCount-serverErrors-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json diff --git a/test/retryable_reads/find-serverErrors.json b/test/retryable_reads/legacy/find-serverErrors.json similarity index 100% rename from test/retryable_reads/find-serverErrors.json rename to test/retryable_reads/legacy/find-serverErrors.json diff --git a/test/retryable_reads/find.json b/test/retryable_reads/legacy/find.json similarity index 100% rename from test/retryable_reads/find.json rename to test/retryable_reads/legacy/find.json diff --git a/test/retryable_reads/findOne-serverErrors.json b/test/retryable_reads/legacy/findOne-serverErrors.json similarity index 100% rename from test/retryable_reads/findOne-serverErrors.json rename to test/retryable_reads/legacy/findOne-serverErrors.json diff --git a/test/retryable_reads/findOne.json b/test/retryable_reads/legacy/findOne.json similarity index 100% rename from test/retryable_reads/findOne.json rename to test/retryable_reads/legacy/findOne.json diff --git a/test/retryable_reads/gridfs-download-serverErrors.json b/test/retryable_reads/legacy/gridfs-download-serverErrors.json similarity index 100% rename from test/retryable_reads/gridfs-download-serverErrors.json rename to test/retryable_reads/legacy/gridfs-download-serverErrors.json diff --git a/test/retryable_reads/gridfs-download.json b/test/retryable_reads/legacy/gridfs-download.json similarity index 100% rename from test/retryable_reads/gridfs-download.json rename to test/retryable_reads/legacy/gridfs-download.json diff --git a/test/retryable_reads/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json similarity index 100% rename from test/retryable_reads/gridfs-downloadByName-serverErrors.json rename to test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json diff --git a/test/retryable_reads/gridfs-downloadByName.json b/test/retryable_reads/legacy/gridfs-downloadByName.json similarity index 100% rename from test/retryable_reads/gridfs-downloadByName.json rename to test/retryable_reads/legacy/gridfs-downloadByName.json diff --git a/test/retryable_reads/listCollectionNames-serverErrors.json b/test/retryable_reads/legacy/listCollectionNames-serverErrors.json similarity index 100% rename from test/retryable_reads/listCollectionNames-serverErrors.json rename to test/retryable_reads/legacy/listCollectionNames-serverErrors.json diff --git a/test/retryable_reads/listCollectionNames.json b/test/retryable_reads/legacy/listCollectionNames.json similarity index 100% rename from test/retryable_reads/listCollectionNames.json rename to test/retryable_reads/legacy/listCollectionNames.json diff --git a/test/retryable_reads/listCollectionObjects-serverErrors.json b/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json similarity index 100% rename from test/retryable_reads/listCollectionObjects-serverErrors.json rename to test/retryable_reads/legacy/listCollectionObjects-serverErrors.json diff --git a/test/retryable_reads/listCollectionObjects.json b/test/retryable_reads/legacy/listCollectionObjects.json similarity index 100% rename from test/retryable_reads/listCollectionObjects.json rename to test/retryable_reads/legacy/listCollectionObjects.json diff --git a/test/retryable_reads/listCollections-serverErrors.json b/test/retryable_reads/legacy/listCollections-serverErrors.json similarity index 100% rename from test/retryable_reads/listCollections-serverErrors.json rename to test/retryable_reads/legacy/listCollections-serverErrors.json diff --git a/test/retryable_reads/listCollections.json b/test/retryable_reads/legacy/listCollections.json similarity index 100% rename from test/retryable_reads/listCollections.json rename to test/retryable_reads/legacy/listCollections.json diff --git a/test/retryable_reads/listDatabaseNames-serverErrors.json b/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json similarity index 100% rename from test/retryable_reads/listDatabaseNames-serverErrors.json rename to test/retryable_reads/legacy/listDatabaseNames-serverErrors.json diff --git a/test/retryable_reads/listDatabaseNames.json b/test/retryable_reads/legacy/listDatabaseNames.json similarity index 100% rename from test/retryable_reads/listDatabaseNames.json rename to test/retryable_reads/legacy/listDatabaseNames.json diff --git a/test/retryable_reads/listDatabaseObjects-serverErrors.json b/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json similarity index 100% rename from test/retryable_reads/listDatabaseObjects-serverErrors.json rename to test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json diff --git a/test/retryable_reads/listDatabaseObjects.json b/test/retryable_reads/legacy/listDatabaseObjects.json similarity index 100% rename from test/retryable_reads/listDatabaseObjects.json rename to test/retryable_reads/legacy/listDatabaseObjects.json diff --git a/test/retryable_reads/listDatabases-serverErrors.json b/test/retryable_reads/legacy/listDatabases-serverErrors.json similarity index 100% rename from test/retryable_reads/listDatabases-serverErrors.json rename to test/retryable_reads/legacy/listDatabases-serverErrors.json diff --git a/test/retryable_reads/listDatabases.json b/test/retryable_reads/legacy/listDatabases.json similarity index 100% rename from test/retryable_reads/listDatabases.json rename to test/retryable_reads/legacy/listDatabases.json diff --git a/test/retryable_reads/listIndexNames-serverErrors.json b/test/retryable_reads/legacy/listIndexNames-serverErrors.json similarity index 100% rename from test/retryable_reads/listIndexNames-serverErrors.json rename to test/retryable_reads/legacy/listIndexNames-serverErrors.json diff --git a/test/retryable_reads/listIndexNames.json b/test/retryable_reads/legacy/listIndexNames.json similarity index 100% rename from test/retryable_reads/listIndexNames.json rename to test/retryable_reads/legacy/listIndexNames.json diff --git a/test/retryable_reads/listIndexes-serverErrors.json b/test/retryable_reads/legacy/listIndexes-serverErrors.json similarity index 100% rename from test/retryable_reads/listIndexes-serverErrors.json rename to test/retryable_reads/legacy/listIndexes-serverErrors.json diff --git a/test/retryable_reads/listIndexes.json b/test/retryable_reads/legacy/listIndexes.json similarity index 100% rename from test/retryable_reads/listIndexes.json rename to test/retryable_reads/legacy/listIndexes.json diff --git a/test/retryable_reads/mapReduce.json b/test/retryable_reads/legacy/mapReduce.json similarity index 100% rename from test/retryable_reads/mapReduce.json rename to test/retryable_reads/legacy/mapReduce.json diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json new file mode 100644 index 0000000000..2cf1d173f8 --- /dev/null +++ b/test/retryable_reads/unified/handshakeError.json @@ -0,0 +1,257 @@ +{ + "description": "retryable reads handshake failures", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "find succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 2 + } + }, + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ] + }, + { + "description": "find succeeds after retryable handshake network error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 2 + } + }, + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json new file mode 100644 index 0000000000..6d6b4ac491 --- /dev/null +++ b/test/retryable_writes/unified/handshakeError.json @@ -0,0 +1,279 @@ +{ + "description": "retryable writes handshake failures", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne succeeds after retryable handshake error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after retryable handshake error ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 808477a8c0..01fe6901ae 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -46,7 +46,7 @@ from pymongo.write_concern import WriteConcern # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads") +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") class TestClientOptions(PyMongoTestCase): diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py new file mode 100644 index 0000000000..6bf4157763 --- /dev/null +++ b/test/test_retryable_reads_unified.py @@ -0,0 +1,32 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/transactions/unified/retryable-abort-handshake.json b/test/transactions/unified/retryable-abort-handshake.json new file mode 100644 index 0000000000..4ad56e2f2f --- /dev/null +++ b/test/transactions/unified/retryable-abort-handshake.json @@ -0,0 +1,204 @@ +{ + "description": "retryable abortTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "AbortTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-handshake.json b/test/transactions/unified/retryable-commit-handshake.json new file mode 100644 index 0000000000..d9315a8fc6 --- /dev/null +++ b/test/transactions/unified/retryable-commit-handshake.json @@ -0,0 +1,211 @@ +{ + "description": "retryable commitTransaction on handshake errors", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionCheckOutStartedEvent" + ], + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-handshake-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "CommitTransaction succeeds after handshake network error", + "skipReason": "DRIVERS-2032: Pinned servers need to be checked if they are still selectable", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "saslContinue", + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session1" + }, + "expectError": { + "isError": true + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "startTransaction": true + }, + "commandName": "insert", + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-handshake-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-handshake-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 6f1e386932..5bf98c5451 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1153,8 +1153,7 @@ def check_events(self, spec): self.assertEqual(actual_events, []) continue - if len(events) > len(actual_events): - self.fail("Expected to see %s events, got %s" % (len(events), len(actual_events))) + self.assertGreaterEqual(len(actual_events), len(events), actual_events) for idx, expected_event in enumerate(events): self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) From a3f0f9158814ba0d4881bedc2cb78be52986ddfa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 25 Feb 2022 10:36:05 -0800 Subject: [PATCH 0603/2111] PYTHON-3123 Convert sessions spec tests to unified test format (#888) Create implicit session _before_ starting a retryable read. --- pymongo/collection.py | 45 +- .../driver-sessions-dirty-session-errors.json | 969 ++++++++++++++++++ .../driver-sessions-server-support.json | 256 +++++ .../sessions/legacy/dirty-session-errors.json | 671 ------------ test/sessions/legacy/server-support.json | 181 ---- ...t-sessions-not-supported-client-error.json | 0 ...t-sessions-not-supported-server-error.json | 0 .../snapshot-sessions-unsupported-ops.json | 0 .../{unified => }/snapshot-sessions.json | 0 test/test_session.py | 53 +- test/test_sessions_unified.py | 2 +- 11 files changed, 1251 insertions(+), 926 deletions(-) create mode 100644 test/sessions/driver-sessions-dirty-session-errors.json create mode 100644 test/sessions/driver-sessions-server-support.json delete mode 100644 test/sessions/legacy/dirty-session-errors.json delete mode 100644 test/sessions/legacy/server-support.json rename test/sessions/{unified => }/snapshot-sessions-not-supported-client-error.json (100%) rename test/sessions/{unified => }/snapshot-sessions-not-supported-server-error.json (100%) rename test/sessions/{unified => }/snapshot-sessions-unsupported-ops.json (100%) rename test/sessions/{unified => }/snapshot-sessions.json (100%) diff --git a/pymongo/collection.py b/pymongo/collection.py index bfe2007d5a..46916f98f8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1720,9 +1720,9 @@ def _cmd(session, server, sock_info, read_preference): # MongoDB < 4.9 cmd = SON([("count", self.__name)]) cmd.update(kwargs) - return self._count_cmd(None, sock_info, read_preference, cmd, collation=None) + return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) - return self.__database.client._retryable_read(_cmd, self.read_preference, None) + return self._retryable_non_cursor_read(_cmd, None) def count_documents( self, @@ -1807,9 +1807,13 @@ def _cmd(session, server, sock_info, read_preference): return 0 return result["n"] - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session - ) + return self._retryable_non_cursor_read(_cmd, session) + + def _retryable_non_cursor_read(self, func, session): + """Non-cursor read helper to handle implicit session creation.""" + client = self.__database.client + with client._tmp_session(session) as s: + return client._retryable_read(func, self._read_preference_for(s), s) def create_indexes( self, @@ -2157,30 +2161,31 @@ def list_indexes( codec_options=codec_options, read_preference=ReadPreference.PRIMARY ) read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + explicit_session = session is not None def _cmd(session, server, sock_info, read_preference): cmd = SON([("listIndexes", self.__name), ("cursor", {})]) if comment is not None: cmd["comment"] = comment - with self.__database.client._tmp_session(session, False) as s: - try: - cursor = self._command( - sock_info, cmd, read_preference, codec_options, session=s - )["cursor"] - except OperationFailure as exc: - # Ignore NamespaceNotFound errors to match the behavior - # of reading from *.system.indexes. - if exc.code != 26: - raise - cursor = {"id": 0, "firstBatch": []} + try: + cursor = self._command( + sock_info, cmd, read_preference, codec_options, session=session + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=s, explicit_session=session is not None + coll, cursor, sock_info.address, session=session, explicit_session=explicit_session ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor - return self.__database.client._retryable_read(_cmd, read_pref, session) + with self.__database.client._tmp_session(session, False) as s: + return self.__database.client._retryable_read(_cmd, read_pref, s) def index_information( self, @@ -2701,9 +2706,7 @@ def _cmd(session, server, sock_info, read_preference): user_fields={"values": 1}, )["values"] - return self.__database.client._retryable_read( - _cmd, self._read_preference_for(session), session - ) + return self._retryable_non_cursor_read(_cmd, session) def _write_concern_for_cmd(self, cmd, session): raw_wc = cmd.get("writeConcern") diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json new file mode 100644 index 0000000000..88a9171db1 --- /dev/null +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -0,0 +1,969 @@ +{ + "description": "driver-sessions-dirty-session-errors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Dirty explicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 2 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Dirty explicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "assertSessionDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (insert)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1 + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (findAndModify)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1 + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": false, + "lsid": { + "$$type": "object" + }, + "txnNumber": 1, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Dirty implicit session is discarded (read not returning cursor)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": 1 + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertDifferentLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "lsid": { + "$$type": "object" + } + }, + "commandName": "aggregate", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/driver-sessions-server-support.json b/test/sessions/driver-sessions-server-support.json new file mode 100644 index 0000000000..55312b32eb --- /dev/null +++ b/test/sessions/driver-sessions-server-support.json @@ -0,0 +1,256 @@ +{ + "description": "driver-sessions-server-support", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "session-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Server supports explicit sessions", + "operations": [ + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "assertSessionNotDirty", + "object": "testRunner", + "arguments": { + "session": "session0" + } + }, + { + "name": "endSession", + "object": "session0" + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$sessionLsid": "session0" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Server supports implicit sessions", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": -1 + } + }, + "expectResult": [] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client0" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$type": "object" + } + }, + "commandName": "insert", + "databaseName": "session-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": -1 + }, + "lsid": { + "$$type": "object" + } + }, + "commandName": "find", + "databaseName": "session-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "session-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/sessions/legacy/dirty-session-errors.json b/test/sessions/legacy/dirty-session-errors.json deleted file mode 100644 index 77f71c7623..0000000000 --- a/test/sessions/legacy/dirty-session-errors.json +++ /dev/null @@ -1,671 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "session-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - } - ], - "tests": [ - { - "description": "Dirty explicit session is discarded", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - }, - { - "description": "Dirty explicit session is discarded (non-bulk write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 1 - } - }, - { - "name": "assertSessionDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "txnNumber": { - "$numberLong": "1" - } - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (non-bulk write)", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "txnNumber": { - "$numberLong": "1" - }, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "x": 1 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (read)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ] - }, - "error": true - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "Dirty implicit session is discarded (non-cursor returning read)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertDifferentLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/sessions/legacy/server-support.json b/test/sessions/legacy/server-support.json deleted file mode 100644 index 967c9143fd..0000000000 --- a/test/sessions/legacy/server-support.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6.0" - } - ], - "database_name": "session-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - } - ], - "tests": [ - { - "description": "Server supports explicit sessions", - "operations": [ - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "assertSessionNotDirty", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "endSession", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0" - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - }, - "lsid": "session0" - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Server supports implicit sessions", - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": -1 - } - }, - "result": [] - }, - { - "name": "assertSameLsidOnLastTwoCommands", - "object": "testRunner" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "session-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": -1 - } - }, - "command_name": "find", - "database_name": "session-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/sessions/unified/snapshot-sessions-not-supported-client-error.json b/test/sessions/snapshot-sessions-not-supported-client-error.json similarity index 100% rename from test/sessions/unified/snapshot-sessions-not-supported-client-error.json rename to test/sessions/snapshot-sessions-not-supported-client-error.json diff --git a/test/sessions/unified/snapshot-sessions-not-supported-server-error.json b/test/sessions/snapshot-sessions-not-supported-server-error.json similarity index 100% rename from test/sessions/unified/snapshot-sessions-not-supported-server-error.json rename to test/sessions/snapshot-sessions-not-supported-server-error.json diff --git a/test/sessions/unified/snapshot-sessions-unsupported-ops.json b/test/sessions/snapshot-sessions-unsupported-ops.json similarity index 100% rename from test/sessions/unified/snapshot-sessions-unsupported-ops.json rename to test/sessions/snapshot-sessions-unsupported-ops.json diff --git a/test/sessions/unified/snapshot-sessions.json b/test/sessions/snapshot-sessions.json similarity index 100% rename from test/sessions/unified/snapshot-sessions.json rename to test/sessions/snapshot-sessions.json diff --git a/test/test_session.py b/test/test_session.py index b7aa65a19d..ec39bb2411 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -15,7 +15,6 @@ """Test the client_session module.""" import copy -import os import sys import time from io import BytesIO @@ -26,8 +25,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import EventListener, TestCreator, rs_or_single_client, wait_until -from test.utils_spec_runner import SpecRunner +from test.utils import EventListener, rs_or_single_client, wait_until from bson import DBRef from gridfs import GridFS, GridFSBucket @@ -1095,54 +1093,5 @@ def insert_and_aggregate(): ) -class TestSpec(SpecRunner): - RUN_ON_SERVERLESS = True - # Location of JSON test specifications. - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "legacy") - - def last_two_command_events(self): - """Return the last two command started events.""" - started_events = self.listener.results["started"][-2:] - self.assertEqual(2, len(started_events)) - return started_events - - def assert_same_lsid_on_last_two_commands(self): - """Run the assertSameLsidOnLastTwoCommands test operation.""" - event1, event2 = self.last_two_command_events() - self.assertEqual(event1.command["lsid"], event2.command["lsid"]) - - def assert_different_lsid_on_last_two_commands(self): - """Run the assertDifferentLsidOnLastTwoCommands test operation.""" - event1, event2 = self.last_two_command_events() - self.assertNotEqual(event1.command["lsid"], event2.command["lsid"]) - - def assert_session_dirty(self, session): - """Run the assertSessionDirty test operation. - - Assert that the given session is dirty. - """ - self.assertIsNotNone(session._server_session) - self.assertTrue(session._server_session.dirty) - - def assert_session_not_dirty(self, session): - """Run the assertSessionNotDirty test operation. - - Assert that the given session is not dirty. - """ - self.assertIsNotNone(session._server_session) - self.assertFalse(session._server_session.dirty) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_test, TestSpec, TestSpec.TEST_PATH) -test_creator.create_tests() - if __name__ == "__main__": unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py index 2320d52718..8a6b8bc9bf 100644 --- a/test/test_sessions_unified.py +++ b/test/test_sessions_unified.py @@ -23,7 +23,7 @@ from test.unified_format import generate_test_classes # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions", "unified") +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) From 0672d2d1c3092cacdaa695151a8c6a306c9d60d5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Feb 2022 17:02:53 -0600 Subject: [PATCH 0604/2111] PYTHON-3141 Add slotscheck to pre-commit checks (#890) --- .pre-commit-config.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5c1e92f5b7..2fc5100787 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,6 +34,7 @@ repos: rev: 3.9.2 hooks: - id: flake8 + files: \.py$ additional_dependencies: [ 'flake8-bugbear==20.1.4', 'flake8-logging-format==0.6.0', @@ -57,3 +58,11 @@ repos: files: ^\.github/workflows/ types: [yaml] args: ["--schemafile", "https://json.schemastore.org/github-workflow"] + +- repo: https://github.com/ariebovenberg/slotscheck + rev: v0.14.0 + hooks: + - id: slotscheck + files: \.py$ + exclude: "^(test|tools)/" + stages: [manual] From 782c5517e09a532de4c2f68d7776b0caed2062cb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Mar 2022 14:10:57 -0600 Subject: [PATCH 0605/2111] PYTHON-3146 Test Failure - Could not import extension sphinxcontrib.shellcheck (#889) --- doc/conf.py | 11 ++++++++++- doc/examples/bulk.rst | 2 +- doc/examples/geo.rst | 2 +- doc/faq.rst | 7 +------ 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index c2f97dabfe..714e6121d4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -21,9 +21,18 @@ "sphinx.ext.coverage", "sphinx.ext.todo", "sphinx.ext.intersphinx", - "sphinxcontrib.shellcheck", ] + +# Add optional extensions +try: + import sphinxcontrib.shellcheck # noqa + + extensions += ["sphinxcontrib.shellcheck"] +except ImportError: + pass + + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 23505268f0..23367dd2c5 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -145,7 +145,7 @@ and fourth operations succeed. 'index': 0,... 'op': {'_id': 1}}, {'code': 11000, - 'errmsg': '...E11000...duplicate key error...', + 'errmsg': '...', 'index': 2,... 'op': {'_id': 3}}]} diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 9fe62f910b..2234a20757 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -36,7 +36,7 @@ insert a couple of example locations: >>> result = db.places.insert_many([{"loc": [2, 5]}, ... {"loc": [30, 5]}, ... {"loc": [1, 2]}, - ... {"loc": [4, 4]}]) # doctest: +ELLIPSIS + ... {"loc": [4, 4]}]) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] diff --git a/doc/faq.rst b/doc/faq.rst index c2a6fc7f7f..44c1c9a981 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -264,12 +264,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=, - tz_aware=False, - uuid_representation=UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler='strict', - tzinfo=None, type_registry=TypeRegistry(type_codecs=[], - fallback_encoder=None)) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with From b737b843e974d9524fdbfbc8d18e0004b7743715 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 1 Mar 2022 15:44:05 -0800 Subject: [PATCH 0606/2111] PYTHON-2956 Drivers should check out an implicit session only after checking out a connection (#876) --- pymongo/client_session.py | 23 ++++++++++++- pymongo/mongo_client.py | 9 ++++- pymongo/topology.py | 16 +++++---- test/test_session.py | 69 +++++++++++++++++++++++++++++++++++++-- 4 files changed, 106 insertions(+), 11 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 4cf41b2c70..20d36fb062 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -947,9 +947,16 @@ def _txn_read_preference(self): return self._transaction.opts.read_preference return None + def _materialize(self): + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session() + if old.started_retryable_write: + self._server_session.inc_transaction_id() + def _apply_to(self, command, is_retryable, read_preference, sock_info): self._check_ended() - + self._materialize() if self.options.snapshot: self._update_read_concern(command, sock_info) @@ -1000,6 +1007,20 @@ def __copy__(self): raise TypeError("A ClientSession cannot be copied, create a new session instead") +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self): + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self): + self.dirty = True + + def inc_transaction_id(self): + self.started_retryable_write = True + + class _ServerSession(object): def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4965b5e439..4ac4a5ba8f 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -66,6 +66,7 @@ ) from pymongo.change_stream import ChangeStream, ClusterChangeStream from pymongo.client_options import ClientOptions +from pymongo.client_session import _EmptyServerSession from pymongo.command_cursor import CommandCursor from pymongo.errors import ( AutoReconnect, @@ -1601,7 +1602,11 @@ def _process_periodic_tasks(self): def __start_session(self, implicit, **kwargs): # Raises ConfigurationError if sessions are not supported. - server_session = self._get_server_session() + if implicit: + self._topology._check_implicit_session_support() + server_session = _EmptyServerSession() + else: + server_session = self._get_server_session() opts = client_session.SessionOptions(**kwargs) return client_session.ClientSession(self, server_session, opts, implicit) @@ -1641,6 +1646,8 @@ def _get_server_session(self): def _return_server_session(self, server_session, lock): """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return return self._topology.return_server_session(server_session, lock) def _ensure_session(self, session=None): diff --git a/pymongo/topology.py b/pymongo/topology.py index 6134b8201b..03e0d4ee17 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -514,8 +514,15 @@ def pop_all_sessions(self): with self._lock: return self._session_pool.pop_all() + def _check_implicit_session_support(self): + with self._lock: + self._check_session_support() + def _check_session_support(self): - """Internal check for session support on non-load balanced clusters.""" + """Internal check for session support on clusters.""" + if self._settings.load_balanced: + # Sessions never time out in load balanced mode. + return float("inf") session_timeout = self._description.logical_session_timeout_minutes if session_timeout is None: # Maybe we need an initial scan? Can raise ServerSelectionError. @@ -537,12 +544,7 @@ def _check_session_support(self): def get_server_session(self): """Start or resume a server session, or raise ConfigurationError.""" with self._lock: - # Sessions are always supported in load balanced mode. - if not self._settings.load_balanced: - session_timeout = self._check_session_support() - else: - # Sessions never time out in load balanced mode. - session_timeout = float("inf") + session_timeout = self._check_session_support() return self._session_pool.get_server_session(session_timeout) def return_server_session(self, server_session, lock): diff --git a/test/test_session.py b/test/test_session.py index ec39bb2411..53609c70cb 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -18,20 +18,28 @@ import sys import time from io import BytesIO -from typing import Set +from typing import Any, Callable, List, Set, Tuple from pymongo.mongo_client import MongoClient sys.path[0:0] = [""] from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import EventListener, rs_or_single_client, wait_until +from test.utils import ( + EventListener, + ExceptionCatchingThread, + rs_or_single_client, + wait_until, +) from bson import DBRef from gridfs import GridFS, GridFSBucket from pymongo import ASCENDING, IndexModel, InsertOne, monitoring +from pymongo.command_cursor import CommandCursor from pymongo.common import _MAX_END_SESSIONS +from pymongo.cursor import Cursor from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import UpdateOne from pymongo.read_concern import ReadConcern @@ -171,6 +179,63 @@ def _test_ops(self, client, *ops): "%s did not return implicit session to pool" % (f.__name__,), ) + def test_implicit_sessions_checkout(self): + # "To confirm that implicit sessions only allocate their server session after a + # successful connection checkout" test from Driver Sessions Spec. + succeeded = False + failures = 0 + for _ in range(5): + listener = EventListener() + client = rs_or_single_client( + event_listeners=[listener], maxPoolSize=1, retryWrites=True + ) + cursor = client.db.test.find({}) + ops: List[Tuple[Callable, List[Any]]] = [ + (client.db.test.find_one, [{"_id": 1}]), + (client.db.test.delete_one, [{}]), + (client.db.test.update_one, [{}, {"$set": {"x": 2}}]), + (client.db.test.bulk_write, [[UpdateOne({}, {"$set": {"x": 2}})]]), + (client.db.test.find_one_and_delete, [{}]), + (client.db.test.find_one_and_update, [{}, {"$set": {"x": 1}}]), + (client.db.test.find_one_and_replace, [{}, {}]), + (client.db.test.aggregate, [[{"$limit": 1}]]), + (client.db.test.find, []), + (client.server_info, [{}]), + (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), + (cursor.distinct, ["_id"]), + (client.db.list_collections, []), + ] + threads = [] + listener.results.clear() + + def thread_target(op, *args): + res = op(*args) + if isinstance(res, (Cursor, CommandCursor)): + list(res) + + for op, args in ops: + threads.append( + ExceptionCatchingThread( + target=thread_target, args=[op, *args], name=op.__name__ + ) + ) + threads[-1].start() + self.assertEqual(len(threads), len(ops)) + for thread in threads: + thread.join() + self.assertIsNone(thread.exc) + client.close() + lsid_set = set() + for i in listener.results["started"]: + if i.command.get("lsid"): + lsid_set.add(i.command.get("lsid")["id"]) + if len(lsid_set) == 1: + succeeded = True + else: + failures += 1 + print(failures) + self.assertTrue(succeeded) + def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. a = self.client.start_session() From a61ea0660a0fa2aa9e6f67384e88ed60df803229 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Mar 2022 13:10:15 -0600 Subject: [PATCH 0607/2111] PYTHON-3090 Clean up Database Command Typing (#879) --- .github/workflows/test-python.yml | 2 +- bson/codec_options.py | 3 +- mypy.ini | 5 +- pymongo/database.py | 7 +-- pymongo/encryption.py | 2 +- pymongo/mongo_client.py | 13 +++-- pymongo/typings.py | 6 +-- test/mypy_fails/raw_bson_document.py | 13 +++++ test/mypy_fails/typedict_client.py | 18 +++++++ test/test_client.py | 2 +- test/test_mypy.py | 81 +++++++++++++++++++++++++++- 11 files changed, 131 insertions(+), 21 deletions(-) create mode 100644 test/mypy_fails/raw_bson_document.py create mode 100644 test/mypy_fails/typedict_client.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 046915b04a..5fda9b8817 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -64,4 +64,4 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --exclude "test/mypy_fails/*.*" test + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test diff --git a/bson/codec_options.py b/bson/codec_options.py index 8e5f97df30..b4436dfdb8 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -115,6 +115,7 @@ class TypeCodec(TypeEncoder, TypeDecoder): _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] +_DocumentClass = Union[Type[MutableMapping], Type["RawBSONDocument"]] class TypeRegistry(object): @@ -293,7 +294,7 @@ class CodecOptions(_options_base): def __new__( cls: Type["CodecOptions"], - document_class: Union[Type[MutableMapping], Type["RawBSONDocument"]] = dict, + document_class: _DocumentClass = dict, tz_aware: bool = False, uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler: Optional[str] = "strict", diff --git a/mypy.ini b/mypy.ini index 91b1121cd5..9b1348472c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,9 +32,8 @@ ignore_missing_imports = True [mypy-snappy.*] ignore_missing_imports = True -[mypy-test.*] -allow_redefinition = true -allow_untyped_globals = true +[mypy-test.test_mypy] +warn_unused_ignores = false [mypy-winkerberos.*] ignore_missing_imports = True diff --git a/pymongo/database.py b/pymongo/database.py index f43f18d017..c2f2eb4bc0 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -24,6 +24,7 @@ Optional, Sequence, Union, + cast, ) from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions @@ -37,7 +38,7 @@ from pymongo.command_cursor import CommandCursor from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentOut, _DocumentType, _Pipeline def _check_name(name): @@ -620,7 +621,7 @@ def command( session: Optional["ClientSession"] = None, comment: Optional[Any] = None, **kwargs: Any, - ) -> Dict[str, Any]: + ) -> _DocumentOut: """Issue a MongoDB command. Send command `command` to the database and return the @@ -974,7 +975,7 @@ def validate_collection( if background is not None: cmd["background"] = background - result = self.command(cmd, session=session) + result = cast(dict, self.command(cmd, session=session)) valid = True # Pre 1.9 results diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 9616ac89cd..502c83e47b 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -290,7 +290,7 @@ def _get_internal_client(encrypter, mongo_client): db, coll = opts._key_vault_namespace.split(".", 1) key_vault_coll = key_vault_client[db][coll] - mongocryptd_client = MongoClient( + mongocryptd_client: MongoClient = MongoClient( opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4ac4a5ba8f..cd9067f463 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -120,7 +120,7 @@ def __init__( self, host: Optional[Union[str, Sequence[str]]] = None, port: Optional[int] = None, - document_class: Type[_DocumentType] = dict, + document_class: Optional[Type[_DocumentType]] = None, tz_aware: Optional[bool] = None, connect: Optional[bool] = None, type_registry: Optional[TypeRegistry] = None, @@ -652,7 +652,7 @@ def __init__( self.__init_kwargs: Dict[str, Any] = { "host": host, "port": port, - "document_class": document_class, + "document_class": document_class or dict, "tz_aware": tz_aware, "connect": connect, "type_registry": type_registry, @@ -676,7 +676,7 @@ def __init__( # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts["document_class"] = document_class + keyword_opts["document_class"] = document_class or dict seeds = set() username = None @@ -1717,8 +1717,11 @@ def server_info(self, session: Optional[client_session.ClientSession] = None) -> .. versionchanged:: 3.6 Added ``session`` parameter. """ - return self.admin.command( - "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + return cast( + dict, + self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), ) def list_databases( diff --git a/pymongo/typings.py b/pymongo/typings.py index 19d92b2381..14e059a8f0 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -16,7 +16,6 @@ from typing import ( TYPE_CHECKING, Any, - Dict, Mapping, MutableMapping, Optional, @@ -36,6 +35,5 @@ _CollationIn = Union[Mapping[str, Any], "Collation"] _DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentType = TypeVar( - "_DocumentType", Mapping[str, Any], MutableMapping[str, Any], Dict[str, Any] -) +_DocumentOut = _DocumentIn +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py new file mode 100644 index 0000000000..427140dfac --- /dev/null +++ b/test/mypy_fails/raw_bson_document.py @@ -0,0 +1,13 @@ +from bson.raw_bson import RawBSONDocument +from pymongo import MongoClient + +client = MongoClient(document_class=RawBSONDocument) +coll = client.test.test +doc = {"my": "doc"} +coll.insert_one(doc) +retreived = coll.find_one({"_id": doc["_id"]}) +assert retreived is not None +assert len(retreived.raw) > 0 +retreived[ + "foo" +] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py new file mode 100644 index 0000000000..24dd84ee28 --- /dev/null +++ b/test/mypy_fails/typedict_client.py @@ -0,0 +1,18 @@ +from typing import TypedDict + +from pymongo import MongoClient + + +class Movie(TypedDict): + name: str + year: int + + +client: MongoClient[Movie] = MongoClient() +coll = client.test.test +retreived = coll.find_one({"_id": "foo"}) +assert retreived is not None +assert retreived["year"] == 1 +assert ( + retreived["name"] == 2 +) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/test_client.py b/test/test_client.py index 9f01c1c054..29a5b0f1d5 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -709,7 +709,7 @@ def test_repr(self): # Used to test 'eval' below. import bson # noqa: F401 - client = MongoClient( + client = MongoClient( # type: ignore[type-var] "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" "&connectTimeoutMS=12345&w=1&wtimeoutms=100", connect=False, diff --git a/test/test_mypy.py b/test/test_mypy.py index 36fe2ed424..55794e138e 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -17,17 +17,32 @@ import os import unittest -from typing import Any, Dict, Iterable, List +from typing import TYPE_CHECKING, Any, Dict, Iterable, List + +try: + from typing import TypedDict # type: ignore[attr-defined] + + # Not available in Python 3.6 and Python 3.7 + class Movie(TypedDict): # type: ignore[misc] + name: str + year: int + +except ImportError: + TypeDict = None + try: from mypy import api except ImportError: - api = None + api = None # type: ignore[assignment] from test import IntegrationTest +from test.utils import rs_or_single_client +from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.collection import Collection +from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -48,6 +63,8 @@ def ensure_mypy_fails(self, filename: str) -> None: def test_mypy_failures(self) -> None: for filename in get_tests(): + if filename == "typeddict_client.py" and TypedDict is None: + continue with self.subTest(filename=filename): self.ensure_mypy_fails(filename) @@ -87,6 +104,66 @@ def test_bulk_write(self) -> None: result = self.coll.bulk_write(requests) self.assertTrue(result.acknowledged) + def test_command(self) -> None: + result = self.client.admin.command("ping") + items = result.items() + + def test_list_collections(self) -> None: + cursor = self.client.test.list_collections() + value = cursor.next() + items = value.items() + + def test_list_databases(self) -> None: + cursor = self.client.list_databases() + value = cursor.next() + value.items() + + def test_default_document_type(self) -> None: + client = rs_or_single_client() + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retreived = coll.find_one({"_id": doc["_id"]}) + assert retreived is not None + retreived["a"] = 1 + + def test_explicit_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client: MongoClient[Dict[str, Any]] = MongoClient() + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + retreived["a"] = 1 + + def test_typeddict_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + assert retreived["year"] == 1 + assert retreived["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + assert len(retreived.raw) > 0 + + def test_son_document_type(self) -> None: + if not TYPE_CHECKING: + raise unittest.SkipTest("Do not use raw MongoClient") + client = MongoClient(document_class=SON[str, Any]) + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + retreived["a"] = 1 + def test_aggregate_pipeline(self) -> None: coll3 = self.client.test.test3 coll3.insert_many( From 671d1e622c03c6ba8453be8929a15d514f20abaf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 3 Mar 2022 12:47:36 -0800 Subject: [PATCH 0608/2111] PYTHON-3147 Fix pip install in MONGODB-AWS auth tests (#892) --- .evergreen/run-mongodb-aws-ecs-test.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index e7bcf1cda5..3484f41f43 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -23,9 +23,13 @@ set -o xtrace if command -v virtualenv ; then VIRTUALENV=$(command -v virtualenv) else + if ! python3 -m pip --version ; then + echo "Installing pip..." + apt-get update + apt install python3-pip -y + fi echo "Installing virtualenv..." - apt install python3-pip -y - pip3 install --user virtualenv + python3 -m pip install --user virtualenv VIRTUALENV='python3 -m virtualenv' fi From f081297a8634abb77f85a8f06c552c76a51f1120 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 4 Mar 2022 17:29:33 -0800 Subject: [PATCH 0609/2111] PYTHON-3159 Fix typo in zlib compression support (#894) --- pymongo/compression_support.py | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index ed7021494f..c9632a43d3 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -13,7 +13,6 @@ # limitations under the License. import warnings -from typing import Callable try: import snappy @@ -105,12 +104,6 @@ def get_compression_context(self, compressors): return ZstdContext() -def _zlib_no_compress(data, level=None): - """Compress data with zlib level 0.""" - cobj = zlib.compressobj(0) - return b"".join([cobj.compress(data), cobj.flush()]) - - class SnappyContext(object): compressor_id = 1 @@ -123,16 +116,10 @@ class ZlibContext(object): compressor_id = 2 def __init__(self, level): - self.compress: Callable[[bytes], bytes] - - # Jython zlib.compress doesn't support -1 - if level == -1: - self.compress = zlib.compress - # Jython zlib.compress also doesn't support 0 - elif level == 0: - self.compress = _zlib_no_compress - else: - self.compresss = lambda data, _: zlib.compress(data, level) + self.level = level + + def compress(self, data: bytes) -> bytes: + return zlib.compress(data, self.level) class ZstdContext(object): From 225d131c2d3f6f0b4c46c130abb3e1452010ad40 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 9 Mar 2022 11:13:18 -0800 Subject: [PATCH 0610/2111] PYTHON-2970 Prioritize electionId over setVersion for stale primary check (#845) --- doc/changelog.rst | 15 ++ pymongo/topology_description.py | 29 ++-- .../rs/electionId_precedence_setVersion.json | 92 +++++++++++ .../rs/null_election_id.json | 30 ++-- .../rs/secondary_ignore_ok_0.json | 2 +- .../rs/set_version_can_rollback.json | 149 ++++++++++++++++++ ...tversion_equal_max_without_electionid.json | 84 ++++++++++ ...on_greaterthan_max_without_electionid.json | 84 ++++++++++ .../rs/setversion_without_electionid.json | 12 +- .../rs/use_setversion_without_electionid.json | 32 ++-- test/test_discovery_and_monitoring.py | 2 + 11 files changed, 481 insertions(+), 50 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json create mode 100644 test/discovery_and_monitoring/rs/set_version_can_rollback.json create mode 100644 test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json create mode 100644 test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 7dd57d5329..61e2b659ec 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -26,6 +26,21 @@ PyMongo 4.1 brings a number of improvements including: - :meth:`gridfs.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. +Bug fixes +......... + +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). + +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 + +Issues Resolved +............... + +See the `PyMongo 4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30619 Changes in Version 4.0 ---------------------- diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b3dd60680f..9f718376ef 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -17,6 +17,7 @@ from random import sample from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError @@ -531,24 +532,16 @@ def _update_rs_from_primary( sds.pop(server_description.address) return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - max_election_tuple = max_set_version, max_election_id - if None not in server_description.election_tuple: - if ( - None not in max_election_tuple - and max_election_tuple > server_description.election_tuple - ): - - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - max_election_id = server_description.election_id - - if server_description.set_version is not None and ( - max_set_version is None or server_description.set_version > max_set_version - ): - - max_set_version = server_description.set_version + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe >= max_election_safe: + max_election_id, max_set_version = new_election_tuple + else: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id # We've heard from the primary. Is it the same primary as before? for server in sds.values(): diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..a7b49e2b97 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 62120e8448..8eb519595a 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -123,16 +123,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -174,16 +177,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index 4c1cb011a5..ee9519930b 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "New primary", + "description": "Secondary ignored when ok is zero", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..28ecbeefca --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,149 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "_comment": "Response from new primary with newer election Id", + "responses": [ + [ + "b:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "_comment": "Response from stale primary", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..91e84d4fa0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..b15fd5c1a7 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 2f68287f1d..f59c162ae1 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion is ignored if there is no electionId", + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -63,14 +63,14 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, "electionId": null }, "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, + "type": "Unknown", + "setName": null, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 421ff57c8d..6dd753d5d8 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -71,20 +71,23 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -115,22 +118,25 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { - "$oid": "000000000000000000000001" + "$oid": "000000000000000000000002" } } } diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index d17a0d4166..a97eb65432 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -220,6 +220,8 @@ def create_tests(): dirname = os.path.split(dirpath)[-1] for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json_util.loads(scenario_stream.read()) From 087950d869096cf44a797f6c402985a73ffec16e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Mar 2022 15:49:11 -0500 Subject: [PATCH 0611/2111] PYTHON-3164 Outdated link from PyMongo docs to community forum (#895) --- .github/workflows/test-python.yml | 18 ++++++++++++++++++ bson/binary.py | 2 +- doc/atlas.rst | 2 +- doc/changelog.rst | 2 +- doc/conf.py | 7 +++++++ doc/developer/periodic_executor.rst | 2 +- doc/examples/high_availability.rst | 2 +- doc/examples/mod_wsgi.rst | 6 +++--- doc/examples/tailable.rst | 2 +- doc/examples/tls.rst | 2 +- doc/faq.rst | 2 +- doc/index.rst | 2 +- doc/migrate-to-pymongo4.rst | 7 +++---- doc/tools.rst | 20 ++++++-------------- pymongo/change_stream.py | 2 +- pymongo/collection.py | 8 ++++---- pymongo/database.py | 2 +- pymongo/mongo_client.py | 2 +- pymongo/operations.py | 2 +- 19 files changed, 54 insertions(+), 38 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 5fda9b8817..8eec9d9bf1 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -65,3 +65,21 @@ jobs: run: | mypy --install-types --non-interactive bson gridfs tools pymongo mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + + linkcheck: + name: Check Links + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + cache: 'pip' + cache-dependency-path: 'setup.py' + - name: Install dependencies + run: | + python -m pip install -U pip + python -m pip install sphinx + - name: Check links + run: | + cd doc + make linkcheck diff --git a/bson/binary.py b/bson/binary.py index 93c43ee40c..a270eae8d2 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -208,7 +208,7 @@ class Binary(bytes): - `data`: the binary data to represent. Can be any bytes-like type that implements the buffer protocol. - `subtype` (optional): the `binary subtype - `_ + `_ to use .. versionchanged:: 3.9 diff --git a/doc/atlas.rst b/doc/atlas.rst index 6100e9d3c5..6685cf9fb8 100644 --- a/doc/atlas.rst +++ b/doc/atlas.rst @@ -35,7 +35,7 @@ Connections to Atlas require TLS/SSL. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ diff --git a/doc/changelog.rst b/doc/changelog.rst index 61e2b659ec..73e2ea9ba4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1072,7 +1072,7 @@ Changes and Deprecations: - Deprecated the MongoClient option `socketKeepAlive`. It now defaults to true and disabling it is not recommended, see `does TCP keepalive time affect MongoDB Deployments? - `_ + `_ - Deprecated :meth:`~pymongo.collection.Collection.initialize_ordered_bulk_op`, :meth:`~pymongo.collection.Collection.initialize_unordered_bulk_op`, and :class:`~pymongo.bulk.BulkOperationBuilder`. Use diff --git a/doc/conf.py b/doc/conf.py index 714e6121d4..a5c5be2694 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -80,6 +80,13 @@ # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] +# Options for link checking +# The anchors on the rendered markdown page are created after the fact, +# so this link results in a 404. +linkcheck_ignore = [ + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check" +] + # -- Options for extensions ---------------------------------------------------- autoclass_content = "init" diff --git a/doc/developer/periodic_executor.rst b/doc/developer/periodic_executor.rst index 6327cfd835..9cb0ce0eb9 100644 --- a/doc/developer/periodic_executor.rst +++ b/doc/developer/periodic_executor.rst @@ -106,7 +106,7 @@ Thus the current design of periodic executors is surprisingly simple: they do a simple `time.sleep` for a half-second, check if it is time to wake or terminate, and sleep again. -.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#requesting-an-immediate-check +.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check .. _PYTHON-863: https://jira.mongodb.org/browse/PYTHON-863 diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index 19b48f7d01..efd7a66cc6 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -308,7 +308,7 @@ milliseconds of the closest member's ping time. replica set *through* a mongos. The equivalent is the localThreshold_ command line option. -.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption--localThreshold +.. _localThreshold: https://docs.mongodb.com/manual/reference/program/mongos/#std-option-mongos.--localThreshold .. _health-monitoring: diff --git a/doc/examples/mod_wsgi.rst b/doc/examples/mod_wsgi.rst index 832d779fd8..96d6ce892f 100644 --- a/doc/examples/mod_wsgi.rst +++ b/doc/examples/mod_wsgi.rst @@ -3,7 +3,7 @@ PyMongo and mod_wsgi ==================== -To run your application under `mod_wsgi `_, +To run your application under `mod_wsgi `_, follow these guidelines: * Run ``mod_wsgi`` in daemon mode with the ``WSGIDaemonProcess`` directive. @@ -48,9 +48,9 @@ interpreter. Python C extensions in general have issues running in multiple Python sub interpreters. These difficulties are explained in the documentation for -`Py_NewInterpreter `_ +`Py_NewInterpreter `_ and in the `Multiple Python Sub Interpreters -`_ +`_ section of the ``mod_wsgi`` documentation. Beginning with PyMongo 2.7, the C extension for BSON detects when it is running diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst index 482b049c56..1242e9ddf5 100644 --- a/doc/examples/tailable.rst +++ b/doc/examples/tailable.rst @@ -5,7 +5,7 @@ By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for `capped collections `_ you may use a `tailable cursor -`_ +`_ that remains open after the client exhausts the results in the initial cursor. The following is a basic example of using a tailable cursor to tail the oplog diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 03ac63a633..f6920ad278 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -32,7 +32,7 @@ MongoDB. You can read more about TLS versions and their security implications here: - ``_ + ``_ .. _python.org: https://www.python.org/downloads/ .. _homebrew: https://brew.sh/ diff --git a/doc/faq.rst b/doc/faq.rst index 44c1c9a981..a7f7c87bdd 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -314,7 +314,7 @@ when it is serialized to BSON and used as a query. Thus you can create a subdocument that exactly matches the subdocument in the collection. .. seealso:: `MongoDB Manual entry on subdocument matching - `_. + `_. What does *CursorNotFound* cursor id not valid at server mean? -------------------------------------------------------------- diff --git a/doc/index.rst b/doc/index.rst index da05bf80ae..8fd357b4cd 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -54,7 +54,7 @@ everything you need to know to use **PyMongo**. Getting Help ------------ If you're having trouble or have questions about PyMongo, ask your question on -our `MongoDB Community Forum `_. +our `MongoDB Community Forum `_. You may also want to consider a `commercial support subscription `_. Once you get an answer, it'd be great if you could work it back into this diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index b993e32f4e..6fcbdf5011 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -88,8 +88,7 @@ The socketKeepAlive parameter is removed Removed the ``socketKeepAlive`` keyword argument to :class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP -keepalive. For more information see: -https://docs.mongodb.com/manual/faq/diagnostics/#does-tcp-keepalive-time-affect-mongodb-deployments +keepalive. For more information see the `documentation `_. Renamed URI options ................... @@ -545,8 +544,8 @@ Can be changed to this:: .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ -.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center -.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere +.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ +.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed ................................................................................. diff --git a/doc/tools.rst b/doc/tools.rst index 304a1eaf5c..69ee64448b 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -34,8 +34,7 @@ PyMODM libraries to target platforms like Django. At the same time, PyMODM is powerful enough to be used for developing applications on its own. Complete documentation is available on `readthedocs - `_ in addition to a `Gitter channel - `_ for discussing the project. + `_. Humongolus `Humongolus `_ is a lightweight ORM @@ -72,7 +71,7 @@ MongoEngine documents and query collections using syntax inspired by the Django ORM. The code is available on `GitHub `_; for more information, see - the `tutorial `_. + the `tutorial `_. MotorEngine `MotorEngine `_ is a port of @@ -122,10 +121,10 @@ Framework Tools This section lists tools and adapters that have been designed to work with various Python frameworks and libraries. -* `Djongo `_ is a connector for using +* `Djongo `_ is a connector for using Django with MongoDB as the database backend. Use the Django Admin GUI to add and modify documents in MongoDB. - The `Djongo Source Code `_ is hosted on GitHub + The `Djongo Source Code `_ is hosted on GitHub and the `Djongo package `_ is on pypi. * `Django MongoDB Engine `_ is a MongoDB @@ -138,24 +137,17 @@ various Python frameworks and libraries. `_ is a MongoDB backend for Django, an `example: `_. - For more information ``_ + For more information see ``_ * `mongodb_beaker `_ is a - project to enable using MongoDB as a backend for `beaker's - `_ caching / session system. + project to enable using MongoDB as a backend for `beakers `_ caching / session system. `The source is on GitHub `_. * `Log4Mongo `_ is a flexible Python logging handler that can store logs in MongoDB using normal and capped collections. * `MongoLog `_ is a Python logging handler that stores logs in MongoDB using a capped collection. -* `c5t `_ is a content-management system - using TurboGears and MongoDB. * `rod.recipe.mongodb `_ is a ZC Buildout recipe for downloading and installing MongoDB. -* `repoze-what-plugins-mongodb - `_ is a project - working to support a plugin for using MongoDB as a backend for - :mod:`repoze.what`. * `mongobox `_ is a tool to run a sandboxed MongoDB instance from within a python app. * `Flask-MongoAlchemy `_ Add diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index d054046bda..db33999788 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -78,7 +78,7 @@ class ChangeStream(Generic[_DocumentType]): :meth:`pymongo.mongo_client.MongoClient.watch` instead. .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. """ def __init__( diff --git a/pymongo/collection.py b/pymongo/collection.py index 46916f98f8..ad75fb760c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1784,8 +1784,8 @@ def count_documents( .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere + .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ + .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ """ pipeline = [{"$match": filter}] if "skip" in kwargs: @@ -2011,7 +2011,7 @@ def create_index( .. seealso:: The MongoDB documentation on `indexes `_. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core + .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ """ cmd_options = {} if "maxTimeMS" in kwargs: @@ -2557,7 +2557,7 @@ def watch( .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/database.py b/pymongo/database.py index c2f2eb4bc0..934b502191 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -558,7 +558,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index cd9067f463..ee89279812 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -932,7 +932,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/operations.py b/pymongo/operations.py index 8f264c48c2..e528f2a2df 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -488,7 +488,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: Added the ``partialFilterExpression`` option to support partial indexes. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core + .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ """ keys = _index_list(keys) if "name" not in kwargs: From 9ada6543d58714a48e42daeb60ff7d95b0ae3f17 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Mar 2022 14:52:47 -0700 Subject: [PATCH 0612/2111] PYTHON-3174 Remove noisy running Topology check for main test client (#898) --- test/__init__.py | 30 ++++++++++++++---------------- test/test_client.py | 11 +++++++++++ 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index b2906481e9..c432b26098 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -286,13 +286,8 @@ def hello(self): return self._hello def _connect(self, host, port, **kwargs): - # Jython takes a long time to connect. - if sys.platform.startswith("java"): - timeout_ms = 10000 - else: - timeout_ms = 5000 kwargs.update(self.default_client_options) - client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=timeout_ms, **kwargs) + client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=5000, **kwargs) try: try: client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? @@ -1037,21 +1032,26 @@ def _get_executors(topology): return [e for e in executors if e is not None] -def all_executors_stopped(topology): +def print_running_topology(topology): running = [e for e in _get_executors(topology) if not e._stopped] if running: print( - " Topology %s has THREADS RUNNING: %s, created at: %s" - % (topology, running, topology._settings._stack) + "WARNING: found Topology with running threads:\n" + " Threads: %s\n" + " Topology: %s\n" + " Creation traceback:\n%s" % (running, topology, topology._settings._stack) ) - return False - return True -def print_unclosed_clients(): +def print_running_clients(): from pymongo.topology import Topology processed = set() + # Avoid false positives on the main test client. + # XXX: Can be removed after PYTHON-1634 or PYTHON-1896. + c = client_context.client + if c: + processed.add(c._topology._topology_id) # Call collect to manually cleanup any would-be gc'd clients to avoid # false positives. gc.collect() @@ -1061,7 +1061,7 @@ def print_unclosed_clients(): # Avoid printing the same Topology multiple times. if obj._topology_id in processed: continue - all_executors_stopped(obj) + print_running_topology(obj) processed.add(obj._topology_id) except ReferenceError: pass @@ -1086,9 +1086,7 @@ def teardown(): c.drop_database("pymongo_test_bernie") c.close() - # Jython does not support gc.get_objects. - if not sys.platform.startswith("java"): - print_unclosed_clients() + print_running_clients() class PymongoTestRunner(unittest.TextTestRunner): diff --git a/test/test_client.py b/test/test_client.py index 29a5b0f1d5..a0d6e22d53 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -759,6 +759,7 @@ def test_list_databases(self): for doc in helper_docs: self.assertIs(type(doc), dict) client = rs_or_single_client(document_class=SON) + self.addCleanup(client.close) for doc in client.list_databases(): self.assertIs(type(doc), dict) @@ -979,6 +980,7 @@ def test_unix_socket(self): uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. client = rs_or_single_client(uri) + self.addCleanup(client.close) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) @@ -1002,6 +1004,7 @@ def test_document_class(self): self.assertFalse(isinstance(db.test.find_one(), SON)) c = rs_or_single_client(document_class=SON) + self.addCleanup(c.close) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) @@ -1040,6 +1043,7 @@ def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + self.addCleanup(timeout.close) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) @@ -1095,6 +1099,7 @@ def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware="foo") aware = rs_or_single_client(tz_aware=True) + self.addCleanup(aware.close) naive = self.client aware.pymongo_test.drop_collection("test") @@ -1124,6 +1129,7 @@ def test_ipv6(self): uri += "/?replicaSet=" + (client_context.replica_set_name or "") client = rs_or_single_client_noauth(uri) + self.addCleanup(client.close) client.pymongo_test.test.insert_one({"dummy": "object"}) client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) @@ -1222,6 +1228,7 @@ def test_operation_failure(self): # to avoid race conditions caused by replica set failover or idle # socket reaping. client = single_client() + self.addCleanup(client.close) client.pymongo_test.test.find_one() pool = get_pool(client) socket_count = len(pool.sockets) @@ -1245,18 +1252,21 @@ def test_lazy_connect_w0(self): self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.insert_one({}) wait_until( lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, "find one document" ) client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) wait_until( lambda: client.test_lazy_connect_w0.test.find_one().get("x") == 1, "update one document" ) client = rs_or_single_client(connect=False, w=0) + self.addCleanup(client.close) client.test_lazy_connect_w0.test.delete_one({}) wait_until( lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, "delete one document" @@ -1267,6 +1277,7 @@ def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1, retryReads=False) + self.addCleanup(client.close) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. From 474420b2e5b8318c58f596a9f5b4d3ed6a871ccd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Mar 2022 05:56:07 -0500 Subject: [PATCH 0613/2111] PYTHON-3085 Audit consistent and correct types for _DocumentOut (#893) --- .evergreen/config.yml | 2 +- .github/workflows/test-python.yml | 2 + README.rst | 2 +- bson/__init__.py | 111 +++++++------ bson/_cbsonmodule.c | 2 +- bson/codec_options.py | 63 ++++---- bson/codec_options.pyi | 100 ++++++++++++ doc/changelog.rst | 6 +- doc/examples/tls.rst | 2 +- doc/faq.rst | 2 +- doc/installation.rst | 4 +- doc/migrate-to-pymongo4.rst | 2 +- doc/python3.rst | 18 +-- pymongo/collection.py | 2 +- pymongo/database.py | 13 +- pymongo/encryption.py | 4 +- pymongo/message.py | 8 +- pymongo/mongo_client.py | 5 +- pymongo/monitoring.py | 11 +- setup.py | 6 +- test/test_binary.py | 20 +-- test/test_bson.py | 23 ++- test/test_bson_corpus.py | 4 +- test/test_custom_types.py | 4 +- test/test_mypy.py | 249 ++++++++++++++++++++++++++---- 25 files changed, 495 insertions(+), 170 deletions(-) create mode 100644 bson/codec_options.pyi diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2e3c12f3f8..ef60eaf7d7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1726,7 +1726,7 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" - PYTHON_BINARY: "/opt/mongodbtoolchain/v2/bin/python3" + PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" - func: "run tests" # }}} - name: "coverage-report" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 8eec9d9bf1..ba9b99e06b 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -64,6 +64,8 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo + # Test overshadowed codec_options.py file + mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test linkcheck: diff --git a/README.rst b/README.rst index 390599a6cf..fedb9e14d4 100644 --- a/README.rst +++ b/README.rst @@ -88,7 +88,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 3.6+ and PyPy3.6+. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. Optional dependencies: diff --git a/bson/__init__.py b/bson/__init__.py index a287db1801..343fbecb25 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,6 +76,7 @@ List, Mapping, MutableMapping, + Optional, Sequence, Tuple, Type, @@ -95,7 +96,12 @@ UuidRepresentation, ) from bson.code import Code -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, _raw_document_class +from bson.codec_options import ( + DEFAULT_CODEC_OPTIONS, + CodecOptions, + _DocumentType, + _raw_document_class, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -108,13 +114,11 @@ from bson.timestamp import Timestamp from bson.tz_util import utc -# Import RawBSONDocument for type-checking only to avoid circular dependency. +# Import some modules for type-checking only. if TYPE_CHECKING: from array import array from mmap import mmap - from bson.raw_bson import RawBSONDocument - try: from bson import _cbson # type: ignore[attr-defined] @@ -181,7 +185,7 @@ def _get_int( return _UNPACK_INT_FROM(data, position)[0], position + 4 -def _get_c_string(data: Any, view: Any, position: int, opts: Any) -> Tuple[str, int]: +def _get_c_string(data: Any, view: Any, position: int, opts: CodecOptions) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 @@ -195,7 +199,7 @@ def _get_float( def _get_string( - data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy: Any ) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] @@ -226,7 +230,7 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: def _get_object( - data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy: Any ) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) @@ -247,7 +251,7 @@ def _get_object( def _get_array( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] @@ -289,7 +293,7 @@ def _get_array( def _get_binary( - data: Any, view: Any, position: int, obj_end: int, opts: Any, dummy1: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy1: Any ) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) @@ -347,14 +351,14 @@ def _get_boolean( def _get_date( - data: Any, view: Any, position: int, dummy0: int, opts: Any, dummy1: Any + data: Any, view: Any, position: int, dummy0: int, opts: CodecOptions, dummy1: Any ) -> Tuple[datetime.datetime, int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 def _get_code( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) @@ -362,7 +366,7 @@ def _get_code( def _get_code_w_scope( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] @@ -374,7 +378,7 @@ def _get_code_w_scope( def _get_regex( - data: Any, view: Any, position: int, dummy0: Any, opts: Any, dummy1: Any + data: Any, view: Any, position: int, dummy0: Any, opts: CodecOptions, dummy1: Any ) -> Tuple[Regex, int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) @@ -384,7 +388,7 @@ def _get_regex( def _get_ref( - data: Any, view: Any, position: int, obj_end: int, opts: Any, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str ) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" collection, position = _get_string(data, view, position, obj_end, opts, element_name) @@ -448,12 +452,16 @@ def _get_decimal128( if _USE_C: - def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: + def _element_to_dict( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + ) -> Any: return _cbson._element_to_dict(data, position, obj_end, opts) else: - def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: Any) -> Any: + def _element_to_dict( + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + ) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 @@ -476,13 +484,13 @@ def _element_to_dict(data: Any, view: Any, position: int, obj_end: int, opts: An _T = TypeVar("_T", bound=MutableMapping[Any, Any]) -def _raw_to_dict(data: Any, position: int, obj_end: int, opts: Any, result: _T) -> _T: +def _raw_to_dict(data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T) -> _T: data, view = get_data_and_view(data) return _elements_to_dict(data, view, position, obj_end, opts, result) def _elements_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: Any, result: Any = None + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, result: Any = None ) -> Any: """Decode a BSON document into result.""" if result is None: @@ -496,7 +504,7 @@ def _elements_to_dict( return result -def _bson_to_dict(data: Any, opts: Any) -> Any: +def _bson_to_dict(data: Any, opts: CodecOptions) -> Any: """Decode a BSON string to document_class.""" data, view = get_data_and_view(data) try: @@ -586,7 +594,7 @@ def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> bytes: +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): return b"\x03" + name + value.raw @@ -594,7 +602,7 @@ def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: Any) -> byt return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> bytes: +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOptions) -> bytes: """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 @@ -611,7 +619,7 @@ def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: Any) -> byt return bytes(buf) -def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) -> bytes: +def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: CodecOptions) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) @@ -620,8 +628,8 @@ def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: Any) def _encode_text(name: bytes, value: str, dummy0: Any, dummy1: Any) -> bytes: """Encode a python str.""" - value = _utf_8_encode(value)[0] - return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00" # type: ignore + bvalue = _utf_8_encode(value)[0] + return b"\x02" + name + _PACK_INT(len(bvalue) + 1) + bvalue + b"\x00" def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> bytes: @@ -632,7 +640,7 @@ def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> byte return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value -def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: Any) -> bytes: +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: CodecOptions) -> bytes: """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation binval = Binary.from_uuid(value, uuid_representation=uuid_representation) @@ -686,7 +694,7 @@ def _encode_regex(name: bytes, value: Regex, dummy0: Any, dummy1: Any) -> bytes: return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags -def _encode_code(name: bytes, value: Code, dummy: Any, opts: Any) -> bytes: +def _encode_code(name: bytes, value: Code, dummy: Any, opts: CodecOptions) -> bytes: """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) @@ -790,7 +798,7 @@ def _name_value_to_bson( name: bytes, value: Any, check_keys: bool, - opts: Any, + opts: CodecOptions, in_custom_call: bool = False, in_fallback_call: bool = False, ) -> bytes: @@ -843,7 +851,7 @@ def _name_value_to_bson( raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) -def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes: +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) @@ -857,7 +865,7 @@ def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: Any) -> bytes return _name_value_to_bson(name, value, check_keys, opts) -def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) -> bytes: +def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: bool = True) -> bytes: """Encode a document to BSON.""" if _raw_document_class(doc): return cast(bytes, doc.raw) @@ -879,7 +887,7 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: Any, top_level: bool = True) _dict_to_bson = _cbson._dict_to_bson # noqa: F811 -def _millis_to_datetime(millis: int, opts: Any) -> datetime.datetime: +def _millis_to_datetime(millis: int, opts: CodecOptions) -> datetime.datetime: """Convert milliseconds since epoch UTC to datetime.""" diff = ((millis % 1000) + 1000) % 1000 seconds = (millis - diff) // 1000 @@ -904,7 +912,6 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _DocumentIn = Mapping[str, Any] -_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] _ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] @@ -940,8 +947,8 @@ def encode( def decode( - data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> Dict[str, Any]: + data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> _DocumentType: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -967,15 +974,16 @@ def decode( .. versionadded:: 3.9 """ - if not isinstance(codec_options, CodecOptions): + opts: CodecOptions = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _bson_to_dict(data, codec_options) + return _bson_to_dict(data, opts) def decode_all( - data: _ReadableBuffer, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> List[Dict[str, Any]]: + data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> List[_DocumentType]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -998,15 +1006,16 @@ def decode_all( Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. """ + opts = codec_options or DEFAULT_CODEC_OPTIONS data, view = get_data_and_view(data) - if not isinstance(codec_options, CodecOptions): + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR data_len = len(data) - docs = [] + docs: List[_DocumentType] = [] position = 0 end = data_len - 1 - use_raw = _raw_document_class(codec_options.document_class) + use_raw = _raw_document_class(opts.document_class) try: while position < end: obj_size = _UNPACK_INT_FROM(data, position)[0] @@ -1017,10 +1026,10 @@ def decode_all( raise InvalidBSON("bad eoo") if use_raw: docs.append( - codec_options.document_class(data[position : obj_end + 1], codec_options) + opts.document_class(data[position : obj_end + 1], codec_options) # type: ignore ) else: - docs.append(_elements_to_dict(data, view, position + 4, obj_end, codec_options)) + docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) position += obj_size return docs except InvalidBSON: @@ -1110,8 +1119,8 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - def decode_iter( - data: bytes, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> Iterator[_DocumentOut]: + data: bytes, codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> Iterator[_DocumentType]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1131,7 +1140,8 @@ def decode_iter( .. versionadded:: 2.8 """ - if not isinstance(codec_options, CodecOptions): + opts = codec_options or DEFAULT_CODEC_OPTIONS + if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR position = 0 @@ -1141,12 +1151,12 @@ def decode_iter( elements = data[position : position + obj_size] position += obj_size - yield _bson_to_dict(elements, codec_options) + yield _bson_to_dict(elements, opts) def decode_file_iter( - file_obj: Union[BinaryIO, IO], codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS -) -> Iterator[_DocumentOut]: + file_obj: Union[BinaryIO, IO], codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> Iterator[_DocumentType]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1163,6 +1173,7 @@ def decode_file_iter( .. versionadded:: 2.8 """ + opts = codec_options or DEFAULT_CODEC_OPTIONS while True: # Read size of next object. size_data = file_obj.read(4) @@ -1172,7 +1183,7 @@ def decode_file_iter( raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) - yield _bson_to_dict(elements, codec_options) + yield _bson_to_dict(elements, opts) def is_valid(bson: bytes) -> bool: @@ -1233,7 +1244,7 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> Dict[str, Any]: # type: ignore[override] + def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 93610f7c58..8100e951cf 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -2600,7 +2600,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { if (!PyArg_ParseTuple(args, "O|O", &bson, &options_obj)) { return NULL; } - if (PyTuple_GET_SIZE(args) < 2) { + if ((PyTuple_GET_SIZE(args) < 2) || (options_obj == Py_None)) { if (!default_codec_options(GETSTATE(self), &options)) { return NULL; } diff --git a/bson/codec_options.py b/bson/codec_options.py index b4436dfdb8..4eaff59ea7 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -16,17 +16,17 @@ import abc import datetime -from collections import namedtuple from collections.abc import MutableMapping as _MutableMapping from typing import ( - TYPE_CHECKING, Any, Callable, Dict, Iterable, - MutableMapping, + Mapping, + NamedTuple, Optional, Type, + TypeVar, Union, cast, ) @@ -37,10 +37,6 @@ UuidRepresentation, ) -# Import RawBSONDocument for type-checking only to avoid circular dependency. -if TYPE_CHECKING: - from bson.raw_bson import RawBSONDocument - def _abstractproperty(func: Callable[..., Any]) -> property: return property(abc.abstractmethod(func)) @@ -115,7 +111,7 @@ class TypeCodec(TypeEncoder, TypeDecoder): _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -_DocumentClass = Union[Type[MutableMapping], Type["RawBSONDocument"]] +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) class TypeRegistry(object): @@ -152,8 +148,8 @@ def __init__( ) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder - self._encoder_map = {} - self._decoder_map = {} + self._encoder_map: Dict[Any, Any] = {} + self._decoder_map: Dict[Any, Any] = {} if self._fallback_encoder is not None: if not callable(fallback_encoder): @@ -202,20 +198,16 @@ def __eq__(self, other: Any) -> Any: ) -_options_base = namedtuple( # type: ignore - "CodecOptions", - ( - "document_class", - "tz_aware", - "uuid_representation", - "unicode_decode_error_handler", - "tzinfo", - "type_registry", - ), -) +class _BaseCodecOptions(NamedTuple): + document_class: Type[Mapping[str, Any]] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: str + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry -class CodecOptions(_options_base): +class CodecOptions(_BaseCodecOptions): """Encapsulates options used encoding and / or decoding BSON. The `document_class` option is used to define a custom type for use @@ -250,7 +242,7 @@ class CodecOptions(_options_base): See :doc:`/examples/datetimes` for examples using the `tz_aware` and `tzinfo` options. - See :doc:`examples/uuid` for examples using the `uuid_representation` + See :doc:`/examples/uuid` for examples using the `uuid_representation` option. :Parameters: @@ -294,18 +286,27 @@ class CodecOptions(_options_base): def __new__( cls: Type["CodecOptions"], - document_class: _DocumentClass = dict, + document_class: Optional[Type[Mapping[str, Any]]] = None, tz_aware: bool = False, uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: Optional[str] = "strict", + unicode_decode_error_handler: str = "strict", tzinfo: Optional[datetime.tzinfo] = None, type_registry: Optional[TypeRegistry] = None, ) -> "CodecOptions": - if not (issubclass(document_class, _MutableMapping) or _raw_document_class(document_class)): + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) # type: ignore[union-attr] + if not (is_mapping or _raw_document_class(doc_class)): raise TypeError( "document_class must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.abc.MutableMapping" + "subclass of collections.abc.MutableMapping" ) if not isinstance(tz_aware, bool): raise TypeError("tz_aware must be True or False") @@ -313,8 +314,8 @@ def __new__( raise ValueError( "uuid_representation must be a value from bson.binary.UuidRepresentation" ) - if not isinstance(unicode_decode_error_handler, (str, None)): # type: ignore - raise ValueError("unicode_decode_error_handler must be a string or None") + if not isinstance(unicode_decode_error_handler, str): + raise ValueError("unicode_decode_error_handler must be a string") if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): raise TypeError("tzinfo must be an instance of datetime.tzinfo") @@ -329,7 +330,7 @@ def __new__( return tuple.__new__( cls, ( - document_class, + doc_class, tz_aware, uuid_representation, unicode_decode_error_handler, @@ -392,7 +393,7 @@ def with_options(self, **kwargs: Any) -> "CodecOptions": return CodecOptions(**opts) -DEFAULT_CODEC_OPTIONS: CodecOptions = CodecOptions() +DEFAULT_CODEC_OPTIONS = CodecOptions() def _parse_codec_options(options: Any) -> CodecOptions: diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi new file mode 100644 index 0000000000..9d5f5c2656 --- /dev/null +++ b/bson/codec_options.pyi @@ -0,0 +1,100 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Workaround for https://bugs.python.org/issue43923. +Ideally we would have done this with a single class, but +generic subclasses *must* take a parameter, and prior to Python 3.9 +or in Python 3.7 and 3.8 with `from __future__ import annotations`, +you get the error: "TypeError: 'type' object is not subscriptable". +""" + +import datetime +import abc +from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union + + +class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def python_type(self) -> Any: ... + @abc.abstractmethod + def transform_python(self, value: Any) -> Any: ... + +class TypeDecoder(abc.ABC, metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def bson_type(self) -> Any: ... + @abc.abstractmethod + def transform_bson(self, value: Any) -> Any: ... + +class TypeCodec(TypeEncoder, TypeDecoder, metaclass=abc.ABCMeta): ... + +Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] +Fallback = Callable[[Any], Any] + +class TypeRegistry: + _decoder_map: Dict[Any, Any] + _encoder_map: Dict[Any, Any] + _fallback_encoder: Optional[Fallback] + + def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... + def __eq__(self, other: Any) -> Any: ... + + +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) + + +class CodecOptions(Tuple, Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + + def __new__( + cls: Type[CodecOptions], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + ) -> CodecOptions[_DocumentType]: ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... + + def _arguments_repr(self) -> str: ... + + def _options_dict(self) -> Dict[Any, Any]: ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable) -> CodecOptions[_DocumentType]: ... + + def _asdict(self) -> Dict[str, Any]: ... + + def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... + + _source: str + _fields: Tuple[str] + + +DEFAULT_CODEC_OPTIONS: CodecOptions[MutableMapping[str, Any]] +_RAW_BSON_DOCUMENT_MARKER: int + +def _raw_document_class(document_class: Any) -> bool: ... + +def _parse_codec_options(options: Any) -> CodecOptions: ... diff --git a/doc/changelog.rst b/doc/changelog.rst index 73e2ea9ba4..d326c24b32 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -26,6 +26,10 @@ PyMongo 4.1 brings a number of improvements including: - :meth:`gridfs.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. +Breaking Changes in 4.1 +....................... +- Removed support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. + Bug fixes ......... @@ -57,7 +61,7 @@ before upgrading from PyMongo 3.x. Breaking Changes in 4.0 ....................... -- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6+ is now required. +- Removed support for Python 2.7, 3.4, and 3.5. Python 3.6.2+ is now required. - The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, :class:`~bson.json_util.JSONOptions`, and :class:`~pymongo.mongo_client.MongoClient` has been changed from diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index f6920ad278..9c3c2c829c 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -181,7 +181,7 @@ server's certificate:: This often occurs because OpenSSL does not have access to the system's root certificates or the certificates are out of date. Linux users should ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.6.0 or newer downloaded +their Linux vendor. macOS users using Python 3.6.2 or newer downloaded from python.org `may have to run a script included with python `_ to install root certificates:: diff --git a/doc/faq.rst b/doc/faq.rst index a7f7c87bdd..0d045f7629 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -145,7 +145,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.6+ and PyPy3.6+. See the :doc:`python3` for details. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index 9c9d80c7a1..4f14b31125 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.6+ and PyPy3.6+. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. Optional dependencies: @@ -133,7 +133,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.6+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.6.2+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 6fcbdf5011..6d290dd51b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -34,7 +34,7 @@ Python 3.6+ ----------- PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to -upgrade to 4.x must first upgrade to Python 3.6+. Users upgrading from +upgrade to 4.x must first upgrade to Python 3.6.2+. Users upgrading from Python 2 should consult the :doc:`python3`. Enable Deprecation Warnings diff --git a/doc/python3.rst b/doc/python3.rst index e001c55c8e..c14224166a 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -6,7 +6,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.6+ and PyPy3.6+. +PyMongo supports CPython 3.6.2+ and PyPy3.6+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- @@ -20,8 +20,8 @@ with subtype 0. For example, let's insert a :class:`bytes` instance using Python 3 then read it back. Notice the byte string is decoded back to :class:`bytes`:: - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.9.3] on linux + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pymongo >>> c = pymongo.MongoClient() @@ -49,8 +49,8 @@ decoded to :class:`~bson.binary.Binary` with subtype 0. For example, let's decode a JSON binary subtype 0 using Python 3. Notice the byte string is decoded to :class:`bytes`:: - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from bson.json_util import loads >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') @@ -86,8 +86,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: >>> pickle.dumps(oid) 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - Python 3.6.1 (v3.6.1:69c0db5050, Mar 21 2017, 01:21:04) - [GCC 4.9.3] on linux + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') @@ -97,8 +97,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 you must use ``protocol <= 2``:: - Python 3.6.5 (default, Jun 21 2018, 15:09:09) - [GCC 7.3.0] on linux + Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) + [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> from bson.objectid import ObjectId diff --git a/pymongo/collection.py b/pymongo/collection.py index ad75fb760c..dc344b640f 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2156,7 +2156,7 @@ def list_indexes( .. versionadded:: 3.0 """ - codec_options = CodecOptions(SON) + codec_options: CodecOptions = CodecOptions(SON) coll = self.with_options( codec_options=codec_options, read_preference=ReadPreference.PRIMARY ) diff --git a/pymongo/database.py b/pymongo/database.py index 934b502191..6f2d0fd5cc 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -23,6 +23,7 @@ MutableMapping, Optional, Sequence, + TypeVar, Union, cast, ) @@ -38,7 +39,7 @@ from pymongo.command_cursor import CommandCursor from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentOut, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline def _check_name(name): @@ -58,6 +59,9 @@ def _check_name(name): from pymongo.write_concern import WriteConcern +_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) + + class Database(common.BaseObject, Generic[_DocumentType]): """A Mongo database.""" @@ -617,11 +621,11 @@ def command( check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_preference: Optional[_ServerMode] = None, - codec_options: Optional[CodecOptions] = DEFAULT_CODEC_OPTIONS, + codec_options: "Optional[CodecOptions[_CodecDocumentType]]" = None, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, **kwargs: Any, - ) -> _DocumentOut: + ) -> _CodecDocumentType: """Issue a MongoDB command. Send command `command` to the database and return the @@ -707,6 +711,7 @@ def command( .. seealso:: The MongoDB documentation on `commands `_. """ + opts = codec_options or DEFAULT_CODEC_OPTIONS if comment is not None: kwargs["comment"] = comment @@ -723,7 +728,7 @@ def command( check, allowable_errors, read_preference, - codec_options, + opts, session=session, **kwargs, ) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 502c83e47b..1e06f7062d 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -56,7 +56,7 @@ _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. _MONGOCRYPTD_TIMEOUT_MS = 10000 -_DATA_KEY_OPTS = CodecOptions(document_class=SON, uuid_representation=STANDARD) +_DATA_KEY_OPTS: CodecOptions = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD) @@ -572,7 +572,7 @@ def encrypt( encrypted_doc = self._encryption.encrypt( doc, algorithm, key_id=key_id, key_alt_name=key_alt_name ) - return decode(encrypted_doc)["v"] + return decode(encrypted_doc)["v"] # type: ignore[index] def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. diff --git a/pymongo/message.py b/pymongo/message.py index 92d59c3ebd..58f71629d6 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,7 @@ import random import struct from io import BytesIO as _BytesIO -from typing import Any +from typing import Any, Dict import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode @@ -76,7 +76,9 @@ } _FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UNICODE_REPLACE_CODEC_OPTIONS = CodecOptions(unicode_decode_error_handler="replace") +_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Dict[str, Any]]" = CodecOptions( + unicode_decode_error_handler="replace" +) def _randint(): @@ -1259,7 +1261,7 @@ def raw_response(self, cursor_id=None, user_fields=None): errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: - error_object = bson.BSON(self.documents).decode() + error_object: dict = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ee89279812..4231db95ae 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -649,10 +649,11 @@ def __init__( client.__my_database__ """ + doc_class = document_class or dict self.__init_kwargs: Dict[str, Any] = { "host": host, "port": port, - "document_class": document_class or dict, + "document_class": doc_class, "tz_aware": tz_aware, "connect": connect, "type_registry": type_registry, @@ -676,7 +677,7 @@ def __init__( # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts["document_class"] = document_class or dict + keyword_opts["document_class"] = doc_class seeds = set() username = None diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 4798542dc7..ad604f3f16 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -182,12 +182,12 @@ def connection_checked_in(self, event): import datetime from collections import abc, namedtuple -from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional from bson.objectid import ObjectId from pymongo.hello import Hello, HelloCompat from pymongo.helpers import _handle_exception -from pymongo.typings import _Address +from pymongo.typings import _Address, _DocumentOut if TYPE_CHECKING: from pymongo.server_description import ServerDescription @@ -208,9 +208,6 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) -_DocumentOut = Mapping[str, Any] - - class _EventListener(object): """Abstract base class for all event listeners.""" @@ -635,7 +632,7 @@ def __init__( ) cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): - self.__cmd: Mapping[str, Any] = {} + self.__cmd: _DocumentOut = {} else: self.__cmd = command self.__db = database_name @@ -693,7 +690,7 @@ def __init__( self.__duration_micros = _to_micros(duration) cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): - self.__reply: Mapping[str, Any] = {} + self.__reply: _DocumentOut = {} else: self.__reply = reply diff --git a/setup.py b/setup.py index 699ced1f85..5bae7dc211 100755 --- a/setup.py +++ b/setup.py @@ -4,8 +4,8 @@ import sys import warnings -if sys.version_info[:2] < (3, 6): - raise RuntimeError("Python version >= 3.6 required.") +if sys.version_info[:3] < (3, 6, 2): + raise RuntimeError("Python version >= 3.6.2 required.") # Hack to silence atexit traceback in some Python versions @@ -321,7 +321,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=3.6", + python_requires=">=3.6.2", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", diff --git a/test/test_binary.py b/test/test_binary.py index 7d0ef2ce2e..65abdca796 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -158,19 +158,19 @@ def test_uuid_subtype_4(self): def test_legacy_java_uuid(self): # Test decoding data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) @@ -198,7 +198,7 @@ def test_legacy_java_uuid(self): @client_context.require_connection def test_legacy_java_uuid_roundtrip(self): data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) client_context.client.pymongo_test.drop_collection("java_uuid") db = client_context.client.pymongo_test @@ -218,19 +218,19 @@ def test_legacy_csharp_uuid(self): data = self.csharp_data # Test decoding - docs = bson.decode_all(data, CodecOptions(SON, False, PYTHON_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, STANDARD)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, STANDARD)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, JAVA_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) for d in docs: self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) @@ -258,7 +258,7 @@ def test_legacy_csharp_uuid(self): @client_context.require_connection def test_legacy_csharp_uuid_roundtrip(self): data = self.csharp_data - docs = bson.decode_all(data, CodecOptions(SON, False, CSHARP_LEGACY)) + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) client_context.client.pymongo_test.drop_collection("csharp_uuid") db = client_context.client.pymongo_test diff --git a/test/test_bson.py b/test/test_bson.py index 9bf8df897a..b0dce7db4e 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -744,12 +744,12 @@ def test_dates(self): def test_custom_class(self): self.assertIsInstance(decode(encode({})), dict) self.assertNotIsInstance(decode(encode({})), SON) - self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) + self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON) # type: ignore[type-var] - self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) + self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"]) # type: ignore[type-var] x = encode({"x": [{"y": 1}]}) - self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) + self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON) # type: ignore[type-var] def test_subclasses(self): # make sure we can serialize subclasses of native Python types. @@ -772,7 +772,7 @@ class _myunicode(str): def test_ordered_dict(self): d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)]) - self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) + self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict))) # type: ignore[type-var] def test_bson_regex(self): # Invalid Python regex, though valid PCRE. @@ -954,7 +954,7 @@ def __repr__(self): class TestCodecOptions(unittest.TestCase): def test_document_class(self): self.assertRaises(TypeError, CodecOptions, document_class=object) - self.assertIs(SON, CodecOptions(document_class=SON).document_class) + self.assertIs(SON, CodecOptions(document_class=SON).document_class) # type: ignore[type-var] def test_tz_aware(self): self.assertRaises(TypeError, CodecOptions, tz_aware=1) @@ -993,6 +993,19 @@ def test_decode_all_defaults(self): with self.assertRaisesRegex(ValueError, "cannot encode native uuid"): bson.decode_all(bson.encode({"uuid": uuid.uuid4()})) + def test_decode_all_no_options(self): + # Test decode_all()'s default document_class is dict and tz_aware is + # False. + doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + + decoded = bson.decode_all(bson.encode(doc), None)[0] + self.assertIsInstance(decoded["sub_document"], dict) + self.assertIsNone(decoded["dt"].tzinfo) + + doc2 = {"id": Binary.from_uuid(uuid.uuid4())} + decoded = bson.decode_all(bson.encode(doc2), None)[0] + self.assertIsInstance(decoded["id"], Binary) + def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 4f8fc7413a..193a6dff3d 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -71,8 +71,8 @@ # Need to set tz_aware=True in order to use "strict" dates in extended JSON. -codec_options = CodecOptions(tz_aware=True, document_class=SON) -codec_options_no_tzaware = CodecOptions(document_class=SON) +codec_options: CodecOptions = CodecOptions(tz_aware=True, document_class=SON) +codec_options_no_tzaware: CodecOptions = CodecOptions(document_class=SON) # We normally encode UUID as binary subtype 0x03, # but we'll need to encode to subtype 0x04 for one of the tests. codec_options_uuid_04 = codec_options._replace(uuid_representation=STANDARD) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index a7073cde93..e11b5ebe00 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -538,10 +538,10 @@ def transform_bson(self, value): self.assertEqual( type_registry._encoder_map, - {MyIntEncoder.python_type: codec_instances[1].transform_python}, # type: ignore[has-type] + {MyIntEncoder.python_type: codec_instances[1].transform_python}, ) self.assertEqual( - type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} # type: ignore[has-type] + type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} ) def test_initialize_fail(self): diff --git a/test/test_mypy.py b/test/test_mypy.py index 55794e138e..6cf3eb2c87 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -16,8 +16,9 @@ sample client code that uses PyMongo typings.""" import os +import tempfile import unittest -from typing import TYPE_CHECKING, Any, Dict, Iterable, List +from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List try: from typing import TypedDict # type: ignore[attr-defined] @@ -39,6 +40,7 @@ class Movie(TypedDict): # type: ignore[misc] from test import IntegrationTest from test.utils import rs_or_single_client +from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.collection import Collection @@ -54,6 +56,15 @@ def get_tests() -> Iterable[str]: yield os.path.join(dirpath, filename) +def only_type_check(func): + def inner(*args, **kwargs): + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + func(*args, **kwargs) + + return inner + + class TestMypyFails(unittest.TestCase): def ensure_mypy_fails(self, filename: str) -> None: if api is None: @@ -105,7 +116,7 @@ def test_bulk_write(self) -> None: self.assertTrue(result.acknowledged) def test_command(self) -> None: - result = self.client.admin.command("ping") + result: Dict = self.client.admin.command("ping") items = result.items() def test_list_collections(self) -> None: @@ -127,18 +138,154 @@ def test_default_document_type(self) -> None: assert retreived is not None retreived["a"] = 1 + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + +class TestDecode(unittest.TestCase): + def test_bson_decode(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + rt_document: Dict[str, Any] = decode(bsonbytes) + assert rt_document["_id"] == 1 + rt_document["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options = CodecOptions(document_class=MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options) + rt_document2 = decode(bsonbytes2, codec_options=codec_options) + assert rt_document2.foo() == "bar" + + codec_options2 = CodecOptions(document_class=RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options2) + rt_document3 = decode(bsonbytes2, codec_options=codec_options2) + assert rt_document3.raw + + def test_bson_decode_all(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: List[Dict[str, Any]] = decode_all(bsonbytes) + assert rt_documents[0]["_id"] == 1 + rt_documents[0]["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_all(bsonbytes2, codec_options2) + assert rt_documents2[0].foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_all(bsonbytes3, codec_options3) + assert rt_documents3[0].raw + + def test_bson_decode_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: Iterator[Dict[str, Any]] = decode_iter(bsonbytes) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_iter(bsonbytes2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_iter(bsonbytes3, codec_options3) + assert next(rt_documents3).raw + + def make_tempfile(self, content: bytes) -> Any: + fileobj = tempfile.TemporaryFile() + fileobj.write(content) + fileobj.seek(0) + self.addCleanup(fileobj.close) + return fileobj + + def test_bson_decode_file_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + fileobj = self.make_tempfile(bsonbytes) + rt_documents: Iterator[Dict[str, Any]] = decode_file_iter(fileobj) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + fileobj2 = self.make_tempfile(bsonbytes2) + rt_documents2 = decode_file_iter(fileobj2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + fileobj3 = self.make_tempfile(bsonbytes3) + rt_documents3 = decode_file_iter(fileobj3, codec_options3) + assert next(rt_documents3).raw + + +class TestDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + coll = client.test.test + retreived = coll.find_one({"_id": "foo"}) + assert retreived is not None + retreived["a"] = 1 + + @only_type_check def test_explicit_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client: MongoClient[Dict[str, Any]] = MongoClient() coll = client.test.test retreived = coll.find_one({"_id": "foo"}) assert retreived is not None retreived["a"] = 1 + @only_type_check def test_typeddict_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client: MongoClient[Movie] = MongoClient() coll = client.test.test retreived = coll.find_one({"_id": "foo"}) @@ -146,46 +293,88 @@ def test_typeddict_document_type(self) -> None: assert retreived["year"] == 1 assert retreived["name"] == "a" + @only_type_check def test_raw_bson_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client = MongoClient(document_class=RawBSONDocument) coll = client.test.test retreived = coll.find_one({"_id": "foo"}) assert retreived is not None assert len(retreived.raw) > 0 + @only_type_check def test_son_document_type(self) -> None: - if not TYPE_CHECKING: - raise unittest.SkipTest("Do not use raw MongoClient") client = MongoClient(document_class=SON[str, Any]) coll = client.test.test retreived = coll.find_one({"_id": "foo"}) assert retreived is not None retreived["a"] = 1 - def test_aggregate_pipeline(self) -> None: - coll3 = self.client.test.test3 - coll3.insert_many( - [ - {"x": 1, "tags": ["dog", "cat"]}, - {"x": 2, "tags": ["cat"]}, - {"x": 2, "tags": ["mouse", "cat", "dog"]}, - {"x": 3, "tags": []}, - ] - ) - class mydict(Dict[str, Any]): - pass +class TestCommandDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + result: Dict = client.admin.command("ping") + result["a"] = 1 - result = coll3.aggregate( - [ - mydict({"$unwind": "$tags"}), - {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - {"$sort": SON([("count", -1), ("_id", -1)])}, - ] - ) - self.assertTrue(len(list(result))) + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Dict[str, Any]] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Movie] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + assert result["year"] == 1 + assert result["name"] == "a" + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options = CodecOptions(RawBSONDocument) + result = client.admin.command("ping", codec_options=codec_options) + assert len(result.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + codec_options = CodecOptions(SON[str, Any]) + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + +class TestCodecOptionsDocumentType(unittest.TestCase): + def test_default(self) -> None: + options: CodecOptions = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_explicit_document_type(self) -> None: + options: CodecOptions[Dict[str, Any]] = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_typeddict_document_type(self) -> None: + options: CodecOptions[Movie] = CodecOptions() + # Suppress: Cannot instantiate type "Type[Movie]". + obj = options.document_class(name="a", year=1) # type: ignore[misc] + assert obj["year"] == 1 + assert obj["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + options = CodecOptions(RawBSONDocument) + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" + obj = options.document_class(doc_bson) + assert len(obj.raw) > 0 + + def test_son_document_type(self) -> None: + options = CodecOptions(SON[str, Any]) + obj = options.document_class() + obj["a"] = 1 if __name__ == "__main__": From 0a6e7bc38760842d5b99363168ba13447ba03799 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Mar 2022 09:36:31 -0700 Subject: [PATCH 0614/2111] PYTHON-3174 Don't reinit client_context.client (#899) --- test/test_raw_bson.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index a27af6e217..d82e5104c0 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -20,7 +20,6 @@ from test import client_context, unittest from test.test_client import IntegrationTest -from test.utils import rs_or_single_client from bson import decode, encode from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation @@ -42,12 +41,6 @@ class TestRawBSONDocument(IntegrationTest): ) document = RawBSONDocument(bson_string) - @classmethod - def setUpClass(cls): - super(TestRawBSONDocument, cls).setUpClass() - client_context.client = rs_or_single_client() - cls.client = client_context.client - def tearDown(self): if client_context.connected: self.client.pymongo_test.test_raw.drop() From 648a87e22867c49c23baf5caff982b8df8a735c3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Mar 2022 12:32:00 -0700 Subject: [PATCH 0615/2111] PYTHON-3173 Skip version API test for count (#902) --- test/test_examples.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test_examples.py b/test/test_examples.py index ccb48307e4..f38e540507 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1201,6 +1201,7 @@ def test_versioned_api(self): client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 + @unittest.skip("PYTHON-3167 count has been added to API version 1") @client_context.require_version_min(4, 7) def test_versioned_api_migration(self): # SERVER-58785 From b3604a81d30cf9d67ea94a7f314b5602f08b46a1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Mar 2022 15:26:10 -0500 Subject: [PATCH 0616/2111] PYTHON-3171 Add usage of NoReturn annotation (#901) --- bson/__init__.py | 3 ++- bson/objectid.py | 4 ++-- gridfs/grid_file.py | 18 +++++++++--------- pymongo/bulk.py | 3 ++- pymongo/client_session.py | 5 +++-- pymongo/collection.py | 7 ++++--- pymongo/command_cursor.py | 4 ++-- pymongo/common.py | 3 ++- pymongo/cursor.py | 3 ++- pymongo/database.py | 5 +++-- pymongo/helpers.py | 6 +++--- pymongo/message.py | 4 ++-- pymongo/mongo_client.py | 3 ++- pymongo/pool.py | 8 +++++--- 14 files changed, 43 insertions(+), 33 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 343fbecb25..11a87bbe79 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -76,6 +76,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, Tuple, @@ -170,7 +171,7 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: return view.tobytes(), view -def _raise_unknown_type(element_type: int, element_name: str) -> None: +def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON( "Detected unknown BSON type %r for fieldname '%s'. Are " diff --git a/bson/objectid.py b/bson/objectid.py index 24d25d0377..c174b47327 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -24,7 +24,7 @@ import threading import time from random import SystemRandom -from typing import Any, Optional, Type, Union +from typing import Any, NoReturn, Optional, Type, Union from bson.errors import InvalidId from bson.tz_util import utc @@ -32,7 +32,7 @@ _MAX_COUNTER_VALUE = 0xFFFFFF -def _raise_invalid_id(oid: str) -> None: +def _raise_invalid_id(oid: str) -> NoReturn: raise InvalidId( "%r is not a valid ObjectId, it must be a 12-byte input" " or a 24-character hex string" % oid diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index b290fc68b0..5d63d5c653 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -17,7 +17,7 @@ import io import math import os -from typing import Any, Iterable, List, Mapping, Optional +from typing import Any, Iterable, List, Mapping, NoReturn, Optional from bson.binary import Binary from bson.int64 import Int64 @@ -298,7 +298,7 @@ def __flush(self) -> Any: except DuplicateKeyError: self._raise_file_exists(self._id) - def _raise_file_exists(self, file_id: Any) -> None: + def _raise_file_exists(self, file_id: Any) -> NoReturn: """Raise a FileExists exception for the given file_id.""" raise FileExists("file with _id %r already exists" % file_id) @@ -312,7 +312,7 @@ def close(self) -> None: self.__flush() object.__setattr__(self, "_closed", True) - def read(self, size: Optional[int] = -1) -> None: + def read(self, size: int = -1) -> NoReturn: raise io.UnsupportedOperation("read") def readable(self) -> bool: @@ -682,10 +682,10 @@ def close(self) -> None: self.__chunk_iter = None super().close() - def write(self, value: Any) -> None: + def write(self, value: Any) -> NoReturn: raise io.UnsupportedOperation("write") - def writelines(self, lines: Any) -> None: + def writelines(self, lines: Any) -> NoReturn: raise io.UnsupportedOperation("writelines") def writable(self) -> bool: @@ -704,7 +704,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: self.close() return False - def fileno(self) -> int: + def fileno(self) -> NoReturn: raise io.UnsupportedOperation("fileno") def flush(self) -> None: @@ -714,7 +714,7 @@ def flush(self) -> None: def isatty(self) -> bool: return False - def truncate(self, size: Optional[int] = None) -> int: + def truncate(self, size: Optional[int] = None) -> NoReturn: # See https://docs.python.org/3/library/io.html#io.IOBase.writable # for why truncate has to raise. raise io.UnsupportedOperation("truncate") @@ -891,10 +891,10 @@ def next(self) -> GridOut: __next__ = next - def add_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: raise NotImplementedError("Method does not exist for GridOutCursor") - def remove_option(self, *args: Any, **kwargs: Any) -> None: # type: ignore[override] + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: raise NotImplementedError("Method does not exist for GridOutCursor") def _clone_base(self, session: ClientSession) -> "GridOutCursor": diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 9055e40e98..44923f73df 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -18,6 +18,7 @@ """ import copy from itertools import islice +from typing import Any, NoReturn from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument @@ -128,7 +129,7 @@ def _merge_command(run, full_result, offset, result): full_result["writeConcernErrors"].append(wce) -def _raise_bulk_write_error(full_result): +def _raise_bulk_write_error(full_result: Any) -> NoReturn: """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: full_result["writeErrors"].sort(key=lambda error: error["index"]) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 20d36fb062..a0c269cb8d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -142,6 +142,7 @@ ContextManager, Generic, Mapping, + NoReturn, Optional, TypeVar, ) @@ -422,7 +423,7 @@ def __del__(self): self.sock_mgr = None -def _reraise_with_unknown_commit(exc): +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: """Re-raise an exception with the UnknownTransactionCommitResult label.""" exc._add_error_label("UnknownTransactionCommitResult") raise @@ -1003,7 +1004,7 @@ def _update_read_concern(self, cmd, sock_info): if self._snapshot_time is not None: rc["atClusterTime"] = self._snapshot_time - def __copy__(self): + def __copy__(self) -> NoReturn: raise TypeError("A ClientSession cannot be copied, create a new session instead") diff --git a/pymongo/collection.py b/pymongo/collection.py index dc344b640f..d0ebd9311a 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -23,6 +23,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, Tuple, @@ -343,7 +344,7 @@ def __ne__(self, other: Any) -> bool: def __hash__(self) -> int: return hash((self.__database, self.__name)) - def __bool__(self) -> bool: + def __bool__(self) -> NoReturn: raise NotImplementedError( "Collection objects do not implement truth " "value testing or bool(). Please compare " @@ -3143,12 +3144,12 @@ def find_one_and_update( def __iter__(self) -> "Collection[_DocumentType]": return self - def __next__(self) -> None: + def __next__(self) -> NoReturn: raise TypeError("'Collection' object is not iterable") next = __next__ - def __call__(self, *args: Any, **kwargs: Any) -> None: + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: """This is only here so that some API misusages are easier to debug.""" if "." not in self.__name: raise TypeError( diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index d10e23f957..0bd99f0bbb 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -15,7 +15,7 @@ """CommandCursor class to iterate over command results.""" from collections import deque -from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, Optional +from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, NoReturn, Optional from bson import _convert_raw_document_lists_to_streams from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager @@ -344,5 +344,5 @@ def _unpack_response( _convert_raw_document_lists_to_streams(raw_response[0]) return raw_response - def __getitem__(self, index): + def __getitem__(self, index: int) -> NoReturn: raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/common.py b/pymongo/common.py index 5255468b5a..669e12ead7 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -25,6 +25,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, Tuple, @@ -153,7 +154,7 @@ def clean_node(node: str) -> Tuple[str, int]: return host.lower(), port -def raise_config_error(key: str, dummy: Any) -> None: +def raise_config_error(key: str, dummy: Any) -> NoReturn: """Raise ConfigurationError with the given key name.""" raise ConfigurationError("Unknown option %s" % (key,)) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index a2ccdf5860..9f6f0898b4 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -25,6 +25,7 @@ Iterable, List, Mapping, + NoReturn, Optional, Sequence, Tuple, @@ -1339,5 +1340,5 @@ def explain(self) -> _DocumentType: clone = self._clone(deepcopy=True, base=Cursor(self.collection)) return clone.explain() - def __getitem__(self, index: Any) -> "Cursor[_DocumentType]": + def __getitem__(self, index: Any) -> NoReturn: raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/database.py b/pymongo/database.py index 6f2d0fd5cc..17cba06b65 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -21,6 +21,7 @@ List, Mapping, MutableMapping, + NoReturn, Optional, Sequence, TypeVar, @@ -1010,12 +1011,12 @@ def validate_collection( def __iter__(self) -> "Database[_DocumentType]": return self - def __next__(self) -> "Database[_DocumentType]": + def __next__(self) -> NoReturn: raise TypeError("'Database' object is not iterable") next = __next__ - def __bool__(self) -> bool: + def __bool__(self) -> NoReturn: raise NotImplementedError( "Database objects do not implement truth " "value testing or bool(). Please compare " diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 8311aafa8f..60b69424a2 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -17,7 +17,7 @@ import sys import traceback from collections import abc -from typing import Any +from typing import Any, List, NoReturn from bson.son import SON from pymongo import ASCENDING @@ -180,7 +180,7 @@ def _check_command_response( raise OperationFailure(errmsg, code, response, max_wire_version) -def _raise_last_write_error(write_errors): +def _raise_last_write_error(write_errors: List[Any]) -> NoReturn: # If the last batch had multiple errors only report # the last error to emulate continue_on_error. error = write_errors[-1] @@ -189,7 +189,7 @@ def _raise_last_write_error(write_errors): raise WriteError(error.get("errmsg"), error.get("code"), error) -def _raise_write_concern_error(error): +def _raise_write_concern_error(error: Any) -> NoReturn: if "errInfo" in error and error["errInfo"].get("wtimeout"): # Make sure we raise WTimeoutError raise WTimeoutError(error.get("errmsg"), error.get("code"), error) diff --git a/pymongo/message.py b/pymongo/message.py index 58f71629d6..6aa8e4e7f9 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,7 @@ import random import struct from io import BytesIO as _BytesIO -from typing import Any, Dict +from typing import Any, Dict, NoReturn import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode @@ -991,7 +991,7 @@ def max_split_size(self): return _MAX_SPLIT_SIZE_ENC -def _raise_document_too_large(operation, doc_size, max_size): +def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> NoReturn: """Internal helper for raising DocumentTooLarge.""" if operation == "insert": raise DocumentTooLarge( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4231db95ae..280818ce00 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -43,6 +43,7 @@ Generic, List, Mapping, + NoReturn, Optional, Sequence, Set, @@ -1975,7 +1976,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: def __iter__(self) -> "MongoClient[_DocumentType]": return self - def __next__(self) -> None: + def __next__(self) -> NoReturn: raise TypeError("'MongoClient' object is not iterable") next = __next__ diff --git a/pymongo/pool.py b/pymongo/pool.py index c7bd21fc8f..1aaae4067f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -24,7 +24,7 @@ import threading import time import weakref -from typing import Any +from typing import Any, NoReturn, Optional from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -249,7 +249,9 @@ def _set_keepalive_times(sock): "foo".encode("idna") -def _raise_connection_failure(address, error, msg_prefix=None): +def _raise_connection_failure( + address: Any, error: Exception, msg_prefix: Optional[str] = None +) -> NoReturn: """Convert a socket.error to ConnectionFailure and raise it.""" host, port = address # If connecting to a Unix socket, port will be None. @@ -1593,7 +1595,7 @@ def _perished(self, sock_info): return False - def _raise_wait_queue_timeout(self): + def _raise_wait_queue_timeout(self) -> NoReturn: listeners = self.opts._event_listeners if self.enabled_for_cmap: listeners.publish_connection_check_out_failed( From da81c69644a3d8245c4b60a92d9ce39ff0a2e8ba Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Mar 2022 15:26:45 -0500 Subject: [PATCH 0617/2111] PYTHON-3157 Update Release Documentation to Include Github Releases (#900) --- RELEASE.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/RELEASE.rst b/RELEASE.rst index 84b60d9b6a..ad18446a0f 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -89,3 +89,9 @@ Doing a Release 15. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: https://jira.mongodb.org/browse/DOCSP-13536 + +16. Create a GitHub Release for the tag using + https://github.com/mongodb/mongo-python-driver/releases/new. + The title should be "PyMongo X.Y.Z", and the description should contain + a link to the release notes on the the community forum, e.g. + "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457." From 861d79537fee9dd80be02a4ffdbadc5897acfd7d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Mar 2022 14:52:06 -0700 Subject: [PATCH 0618/2111] PYTHON-3180 Use server v3 toolchain in perf tests (#905) --- .evergreen/run-perf-tests.sh | 2 +- .evergreen/run-tests.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index d2a913c824..bc447a9569 100644 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -13,7 +13,7 @@ cd .. export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data" export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" -MTCBIN=/opt/mongodbtoolchain/v2/bin +MTCBIN=/opt/mongodbtoolchain/v3/bin VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python3" $VIRTUALENV pyperftest diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 7b9d051bd7..ade267d2b1 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -69,8 +69,8 @@ if [ -z "$PYTHON_BINARY" ]; then # system python3 doesn't exist or exists but is older than 3.6. if is_python_36 "$(command -v python3)"; then PYTHON=$(command -v python3) - elif is_python_36 "$(command -v /opt/mongodbtoolchain/v2/bin/python3)"; then - PYTHON=$(command -v /opt/mongodbtoolchain/v2/bin/python3) + elif is_python_36 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + PYTHON=$(command -v /opt/mongodbtoolchain/v3/bin/python3) else echo "Cannot test without python3.6+ installed!" fi From 9562a81903fd4520127137573292b47a24df1459 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 25 Mar 2022 23:47:18 +0000 Subject: [PATCH 0619/2111] PYTHON-3119 getMore helper should explicitly send inherited comment (#904) --- .gitignore | 1 + pymongo/aggregation.py | 1 + pymongo/collection.py | 7 +- pymongo/command_cursor.py | 5 + pymongo/cursor.py | 1 + pymongo/database.py | 1 + pymongo/message.py | 15 +- pymongo/mongo_client.py | 2 +- .../unified/change-streams.json | 179 ++++++++++++++++++ test/crud/unified/aggregate.json | 125 +++++++++++- .../unified/bulkWrite-updateMany-let.json | 2 +- .../crud/unified/bulkWrite-updateOne-let.json | 2 +- test/crud/unified/find-comment.json | 109 ++++++++++- test/test_client.py | 1 + 14 files changed, 440 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index de435d109e..f7ad6563ff 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ pymongo.egg-info/ .tox mongocryptd.pid .idea/ +.nova/ diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 84ecffe5fb..62fe4bd055 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -174,6 +174,7 @@ def get_cursor(self, session, server, sock_info, read_preference): max_await_time_ms=self._max_await_time_ms, session=session, explicit_session=self._explicit_session, + comment=self._options.get("comment"), ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/collection.py b/pymongo/collection.py index d0ebd9311a..3de1210522 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2180,7 +2180,12 @@ def _cmd(session, server, sock_info, read_preference): raise cursor = {"id": 0, "firstBatch": []} cmd_cursor = CommandCursor( - coll, cursor, sock_info.address, session=session, explicit_session=explicit_session + coll, + cursor, + sock_info.address, + session=session, + explicit_session=explicit_session, + comment=cmd.get("comment"), ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 0bd99f0bbb..6f3f244419 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -43,6 +43,7 @@ def __init__( max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, explicit_session: bool = False, + comment: Any = None, ) -> None: """Create a new command cursor.""" self.__sock_mgr: Any = None @@ -56,6 +57,7 @@ def __init__( self.__session = session self.__explicit_session = explicit_session self.__killed = self.__id == 0 + self.__comment = comment if self.__killed: self.__end_session(True) @@ -224,6 +226,7 @@ def _refresh(self): self.__max_await_time_ms, self.__sock_mgr, False, + self.__comment, ) ) else: # Cursor id is zero nothing else to return @@ -314,6 +317,7 @@ def __init__( max_await_time_ms: Optional[int] = None, session: Optional["ClientSession"] = None, explicit_session: bool = False, + comment: Any = None, ) -> None: """Create a new cursor / iterator over raw batches of BSON data. @@ -332,6 +336,7 @@ def __init__( max_await_time_ms, session, explicit_session, + comment, ) def _unpack_response( diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 9f6f0898b4..350cc255bb 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1183,6 +1183,7 @@ def _refresh(self): self.__max_await_time_ms, self.__sock_mgr, self.__exhaust, + self.__comment, ) self.__send_message(g) diff --git a/pymongo/database.py b/pymongo/database.py index 17cba06b65..d3d1b274fd 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -780,6 +780,7 @@ def _list_collections(self, sock_info, session, read_preference, **kwargs): sock_info.address, session=tmp_session, explicit_session=session is not None, + comment=cmd.get("comment"), ) cmd_cursor._maybe_pin_connection(sock_info) return cmd_cursor diff --git a/pymongo/message.py b/pymongo/message.py index 6aa8e4e7f9..1fdf0ece35 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -222,13 +222,15 @@ def _gen_find_command( return cmd -def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms): +def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms, comment, sock_info): """Generate a getMore command document.""" cmd = SON([("getMore", cursor_id), ("collection", coll)]) if batch_size: cmd["batchSize"] = batch_size if max_await_time_ms is not None: cmd["maxTimeMS"] = max_await_time_ms + if comment is not None and sock_info.max_wire_version >= 9: + cmd["comment"] = comment return cmd @@ -421,6 +423,7 @@ class _GetMore(object): "sock_mgr", "_as_command", "exhaust", + "comment", ) name = "getMore" @@ -438,6 +441,7 @@ def __init__( max_await_time_ms, sock_mgr, exhaust, + comment, ): self.db = db self.coll = coll @@ -451,6 +455,7 @@ def __init__( self.sock_mgr = sock_mgr self._as_command = None self.exhaust = exhaust + self.comment = comment def namespace(self): return "%s.%s" % (self.db, self.coll) @@ -473,9 +478,13 @@ def as_command(self, sock_info): return self._as_command cmd = _gen_get_more_command( - self.cursor_id, self.coll, self.ntoreturn, self.max_await_time_ms + self.cursor_id, + self.coll, + self.ntoreturn, + self.max_await_time_ms, + self.comment, + sock_info, ) - if self.session: self.session._apply_to(cmd, False, self.read_preference, sock_info) sock_info.add_server_api(cmd) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 280818ce00..83295fccc9 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1763,7 +1763,7 @@ def list_databases( "firstBatch": res["databases"], "ns": "admin.$cmd", } - return CommandCursor(admin["$cmd"], cursor, None) + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) def list_database_names( self, diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 4aea9a4aa1..5fd2544ce0 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -247,6 +247,185 @@ ] } ] + }, + { + "description": "Test that comment is set on getMore", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": { + "key": "value" + } + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "key": "value" + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test that comment is not set on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "comment": "comment" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$changeStream": {} + } + ], + "comment": "comment" + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collection0", + "documents": [ + { + "_id": 1, + "a": 1 + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection0", + "comment": { + "$$exists": false + } + }, + "commandName": "getMore", + "databaseName": "database0" + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json index f6da8ff32f..0cbfb4e6e9 100644 --- a/test/crud/unified/aggregate.json +++ b/test/crud/unified/aggregate.json @@ -327,10 +327,131 @@ ] }, { - "description": "aggregate with comment does not set comment on getMore", + "description": "aggregate with comment sets comment on getMore", "runOnRequirements": [ { - "minServerVersion": "3.6.0" + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "cursor": { + "batchSize": 2 + }, + "comment": { + "content": "test" + } + }, + "commandName": "aggregate", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "content": "test" + } + }, + "commandName": "getMore", + "databaseName": "aggregate-tests" + } + } + ] + } + ] + }, + { + "description": "aggregate with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" } ], "operations": [ diff --git a/test/crud/unified/bulkWrite-updateMany-let.json b/test/crud/unified/bulkWrite-updateMany-let.json index 3cc8da4c53..fbeba1a607 100644 --- a/test/crud/unified/bulkWrite-updateMany-let.json +++ b/test/crud/unified/bulkWrite-updateMany-let.json @@ -142,7 +142,7 @@ "description": "BulkWrite updateMany with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.9" } ], diff --git a/test/crud/unified/bulkWrite-updateOne-let.json b/test/crud/unified/bulkWrite-updateOne-let.json index 2a3e4f79dc..96783c782f 100644 --- a/test/crud/unified/bulkWrite-updateOne-let.json +++ b/test/crud/unified/bulkWrite-updateOne-let.json @@ -144,7 +144,7 @@ "description": "BulkWrite updateOne with let option unsupported (server-side error)", "runOnRequirements": [ { - "minServerVersion": "3.6.0", + "minServerVersion": "4.2.0", "maxServerVersion": "4.9" } ], diff --git a/test/crud/unified/find-comment.json b/test/crud/unified/find-comment.json index 6000bb0172..600a3723f1 100644 --- a/test/crud/unified/find-comment.json +++ b/test/crud/unified/find-comment.json @@ -195,10 +195,115 @@ ] }, { - "description": "find with comment does not set comment on getMore", + "description": "find with comment sets comment on getMore", "runOnRequirements": [ { - "minServerVersion": "3.6" + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0", + "batchSize": 2, + "comment": { + "key": "value" + } + } + } + } + ] + } + ] + }, + { + "description": "find with comment does not set comment on getMore - pre 4.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" } ], "operations": [ diff --git a/test/test_client.py b/test/test_client.py index a0d6e22d53..7a66792873 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1344,6 +1344,7 @@ def test_stale_getmore(self): None, None, False, + None, ), unpack_res=Cursor(client.pymongo_test.collection)._unpack_response, address=("not-a-member", 27017), From e325b24b78e431cb889c5902d00b8f4af2c700c3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Mar 2022 12:18:26 -0500 Subject: [PATCH 0620/2111] PYTHON-3127 Snapshot Query Examples for the Manual (#907) --- test/test_examples.py | 87 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) diff --git a/test/test_examples.py b/test/test_examples.py index f38e540507..b7b70463ac 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import rs_client +from test.utils import rs_client, wait_until import pymongo from pymongo.errors import ConnectionFailure, OperationFailure @@ -1297,5 +1297,90 @@ def strptime(s): # End Versioned API Example 8 +class TestSnapshotQueryExamples(IntegrationTest): + @client_context.require_version_min(5, 0) + def test_snapshot_query(self): + client = self.client + + if not client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addCleanup(client.drop_database, "pets") + db = client.pets + db.drop_collection("cats") + db.drop_collection("dogs") + db.cats.insert_one({"name": "Whiskers", "color": "white", "age": 10, "adoptable": True}) + db.dogs.insert_one({"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True}) + wait_until(lambda: self.check_for_snapshot(db.cats), "success") + wait_until(lambda: self.check_for_snapshot(db.dogs), "success") + + # Start Snapshot Query Example 1 + + db = client.pets + with client.start_session(snapshot=True) as s: + adoptablePetsCount = db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], session=s + ).next()["adoptableCatsCount"] + + adoptablePetsCount += db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], session=s + ).next()["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addCleanup(client.drop_database, "retail") + db.drop_collection("sales") + + saleDate = datetime.datetime.now() + db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + wait_until(lambda: self.check_for_snapshot(db.sales), "success") + + # Start Snapshot Query Example 2 + db = client.retail + with client.start_session(snapshot=True) as s: + total = db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ).next()["totalDailySales"] + + # End Snapshot Query Example 2 + + def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + with self.client.start_session(snapshot=True) as s: + try: + with collection.aggregate([], session=s): + pass + return True + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + if __name__ == "__main__": unittest.main() From 72d8900c3612fc2ab838c2afe60c0e2680fde741 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Mar 2022 13:48:58 -0500 Subject: [PATCH 0621/2111] PYTHON-3058 Bump maxWireVersion for MongoDB 5.2 (#908) --- pymongo/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 669e12ead7..9007bbdfd2 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 14 +MAX_SUPPORTED_WIRE_VERSION = 15 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 From 75fa14d19bf3a592df2ff120dd412ad1fb565f02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 28 Mar 2022 15:09:53 -0700 Subject: [PATCH 0622/2111] PYTHON-3084 MongoClient/Database/Collection should not implement Iterable (#909) --- pymongo/collection.py | 4 ++-- pymongo/database.py | 4 ++-- pymongo/mongo_client.py | 4 ++-- test/test_client.py | 26 +++++++++++++++++++++----- test/test_collection.py | 23 +++++++++++++++++++++-- test/test_database.py | 23 +++++++++++++++++++++-- 6 files changed, 69 insertions(+), 15 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index 3de1210522..f382628aa8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -3146,8 +3146,8 @@ def find_one_and_update( **kwargs, ) - def __iter__(self) -> "Collection[_DocumentType]": - return self + # See PYTHON-3084. + __iter__ = None def __next__(self) -> NoReturn: raise TypeError("'Collection' object is not iterable") diff --git a/pymongo/database.py b/pymongo/database.py index d3d1b274fd..b5770b0db9 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1009,8 +1009,8 @@ def validate_collection( return result - def __iter__(self) -> "Database[_DocumentType]": - return self + # See PYTHON-3084. + __iter__ = None def __next__(self) -> NoReturn: raise TypeError("'Database' object is not iterable") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 83295fccc9..8781cb1f01 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1973,8 +1973,8 @@ def __enter__(self) -> "MongoClient[_DocumentType]": def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def __iter__(self) -> "MongoClient[_DocumentType]": - return self + # See PYTHON-3084. + __iter__ = None def __next__(self) -> NoReturn: raise TypeError("'MongoClient' object is not iterable") diff --git a/test/test_client.py b/test/test_client.py index 7a66792873..5958ff6d52 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -26,7 +26,7 @@ import sys import threading import time -from typing import Type, no_type_check +from typing import Iterable, Type, no_type_check sys.path[0:0] = [""] @@ -210,10 +210,26 @@ def test_getattr(self): self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): - def iterate(): - [a for a in self.client] - - self.assertRaises(TypeError, iterate) + client = self.client + if "PyPy" in sys.version: + msg = "'NoneType' object is not callable" + else: + msg = "'MongoClient' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in client: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = client[0] + # next fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = next(client) + # .next() fails + with self.assertRaisesRegex(TypeError, "'MongoClient' object is not iterable"): + _ = client.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(client, Iterable) def test_get_default_database(self): c = rs_or_single_client( diff --git a/test/test_collection.py b/test/test_collection.py index 47636b495f..6319321045 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -21,7 +21,7 @@ import sys from codecs import utf_8_decode # type: ignore from collections import defaultdict -from typing import no_type_check +from typing import Iterable, no_type_check from pymongo.database import Database @@ -124,7 +124,26 @@ def test_getattr(self): self.assertEqual(coll2.write_concern, coll4.write_concern) def test_iteration(self): - self.assertRaises(TypeError, next, self.db) + coll = self.db.coll + if "PyPy" in sys.version: + msg = "'NoneType' object is not callable" + else: + msg = "'Collection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) class TestCollection(IntegrationTest): diff --git a/test/test_database.py b/test/test_database.py index 8844046ad1..58cbe54335 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -16,7 +16,7 @@ import re import sys -from typing import Any, List, Mapping +from typing import Any, Iterable, List, Mapping sys.path[0:0] = [""] @@ -94,7 +94,26 @@ def test_getattr(self): self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): - self.assertRaises(TypeError, next, self.client.pymongo_test) + db = self.client.pymongo_test + if "PyPy" in sys.version: + msg = "'NoneType' object is not callable" + else: + msg = "'Database' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in db: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = db[0] + # next fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = next(db) + # .next() fails + with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + _ = db.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(db, Iterable) class TestDatabase(IntegrationTest): From c15fce0b3c1dab22e4434365b697ad38d0f23c5a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 28 Mar 2022 15:23:00 -0700 Subject: [PATCH 0623/2111] PYTHON-3138 copydb was removed in MongoDB 4.2 (#910) --- doc/examples/copydb.rst | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index 5cf5c66ded..27f1912c6e 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -1,8 +1,37 @@ Copying a Database ================== -To copy a database within a single mongod process, or between mongod -servers, simply connect to the target mongod and use the +MongoDB >= 4.2 +-------------- + +Starting in MongoDB version 4.2, the server removes the deprecated ``copydb`` command. +As an alternative, users can use ``mongodump`` and ``mongorestore`` (with the ``mongorestore`` +options ``--nsFrom`` and ``--nsTo``). + +For example, to copy the ``test`` database from a local instance running on the +default port 27017 to the ``examples`` database on the same instance, you can: + +#. Use ``mongodump`` to dump the test database to an archive ``mongodump-test-db``:: + + mongodump --archive="mongodump-test-db" --db=test + +#. Use ``mongorestore`` with ``--nsFrom`` and ``--nsTo`` to restore (with database name change) + from the archive:: + + mongorestore --archive="mongodump-test-db" --nsFrom='test.*' --nsTo='examples.*' + +Include additional options as necessary, such as to specify the uri or host, username, +password and authentication database. + +For more info about using ``mongodump`` and ``mongorestore`` see the `Copy a Database`_ example +in the official ``mongodump`` documentation. + +MongoDB <= 4.0 +-------------- + +When using MongoDB <= 4.0, it is possible to use the deprecated ``copydb`` command +to copy a database. To copy a database within a single ``mongod`` process, or +between ``mongod`` servers, connect to the target ``mongod`` and use the :meth:`~pymongo.database.Database.command` method:: >>> from pymongo import MongoClient @@ -39,3 +68,6 @@ but it has been removed. .. _copyDatabase function in the mongo shell: http://docs.mongodb.org/manual/reference/method/db.copyDatabase/ + +.. _Copy a Database: + https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database From d8c2b315b0ae9fb7e260f0224293605d249c460f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Mar 2022 14:59:33 -0500 Subject: [PATCH 0624/2111] PYTHON-3185 Pre-Commit Needs an Upgrade (#911) --- .pre-commit-config.yaml | 4 ++-- doc/changelog.rst | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2fc5100787..8b6671d41d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: exclude_types: [json] - repo: https://github.com/psf/black - rev: 22.1.0 + rev: 22.3.0 hooks: - id: black files: \.py$ @@ -51,7 +51,7 @@ repos: args: ["--severity=warning"] - repo: https://github.com/sirosen/check-jsonschema - rev: 0.11.0 + rev: 0.14.1 hooks: - id: check-jsonschema name: "Check GitHub Workflows" diff --git a/doc/changelog.rst b/doc/changelog.rst index d326c24b32..0bacb1bb79 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -2894,7 +2894,7 @@ highlights is `here - added support for :class:`~pymongo.cursor.Cursor.max_scan`. - raise :class:`~gridfs.errors.FileExists` exception when creating a duplicate GridFS file. -- use `y2038 `_ for time handling in +- use `y2038 `_ for time handling in the C extension - eliminates 2038 problems when extension is installed. - added `sort` parameter to From a4bba9dd5c60842c5cd69900a552f7c1288e5149 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 29 Mar 2022 13:45:27 -0700 Subject: [PATCH 0625/2111] Revert "PYTHON-2970 Prioritize electionId over setVersion for stale primary check (#845)" This reverts commit 225d131c2d3f6f0b4c46c130abb3e1452010ad40. --- doc/changelog.rst | 41 ++--- pymongo/topology_description.py | 29 ++-- .../rs/electionId_precedence_setVersion.json | 92 ----------- .../rs/null_election_id.json | 30 ++-- .../rs/secondary_ignore_ok_0.json | 2 +- .../rs/set_version_can_rollback.json | 149 ------------------ ...tversion_equal_max_without_electionid.json | 84 ---------- ...on_greaterthan_max_without_electionid.json | 84 ---------- .../rs/setversion_without_electionid.json | 12 +- .../rs/use_setversion_without_electionid.json | 32 ++-- 10 files changed, 67 insertions(+), 488 deletions(-) delete mode 100644 test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json delete mode 100644 test/discovery_and_monitoring/rs/set_version_can_rollback.json delete mode 100644 test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json delete mode 100644 test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 0bacb1bb79..d263d4534e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,40 +4,33 @@ Changelog Changes in Version 4.1 ---------------------- +.. warning:: PyMongo 4.1 drops support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. + PyMongo 4.1 brings a number of improvements including: -- :meth:`pymongo.collection.Collection.update_one`, - :meth:`pymongo.collection.Collection.update_many`, - :meth:`pymongo.collection.Collection.delete_one`, - :meth:`pymongo.collection.Collection.delete_many`, - :meth:`pymongo.collection.Collection.aggregate`, - :meth:`pymongo.collection.Collection.find_one_and_delete`, - :meth:`pymongo.collection.Collection.find_one_and_replace`, - :meth:`pymongo.collection.Collection.find_one_and_update`, - :meth:`pymongo.collection.Collection.find`, - and :meth:`pymongo.collection.Collection.replace_one `all support a new - keyword argument ``let`` which is a map of parameter names and values. +- Added support for the ``let`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, + :meth:`~pymongo.collection.Collection.update_many`, + :meth:`~pymongo.collection.Collection.delete_one`, + :meth:`~pymongo.collection.Collection.delete_many`, + :meth:`~pymongo.collection.Collection.replace_one`, + :meth:`~pymongo.collection.Collection.aggregate`, + :meth:`~pymongo.collection.Collection.find_one_and_delete`, + :meth:`~pymongo.collection.Collection.find_one_and_replace`, + :meth:`~pymongo.collection.Collection.find_one_and_update`, + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + and :meth:`~pymongo.collection.Collection.bulk_write`. + ``let`` is a map of parameter names and values. Parameters can then be accessed as variables in an aggregate expression context. - :meth:`~pymongo.collection.Collection.aggregate` now supports $merge and $out executing on secondaries on MongoDB >=5.0. aggregate() now always obeys the collection's :attr:`read_preference` on MongoDB >= 5.0. -- :meth:`gridfs.GridOut.seek` now returns the new position in the file, to +- :meth:`gridfs.grid_file.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. -Breaking Changes in 4.1 -....................... -- Removed support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. - -Bug fixes -......... - -- Fixed a bug where the client could be unable to discover the new primary - after a simultaneous replica set election and reconfig (`PYTHON-2970`_). - -.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 - Issues Resolved ............... diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 9f718376ef..b3dd60680f 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -17,7 +17,6 @@ from random import sample from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple -from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError @@ -532,16 +531,24 @@ def _update_rs_from_primary( sds.pop(server_description.address) return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - new_election_tuple = server_description.election_id, server_description.set_version - max_election_tuple = max_election_id, max_set_version - new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) - max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) - if new_election_safe >= max_election_safe: - max_election_id, max_set_version = new_election_tuple - else: - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_tuple = max_set_version, max_election_id + if None not in server_description.election_tuple: + if ( + None not in max_election_tuple + and max_election_tuple > server_description.election_tuple + ): + + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) + + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + + max_set_version = server_description.set_version # We've heard from the primary. Is it the same primary as before? for server in sds.values(): diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json deleted file mode 100644 index a7b49e2b97..0000000000 --- a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "description": "ElectionId is considered higher precedence than setVersion", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ], - [ - "b:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ], - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "setVersion": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1, - "maxElectionId": { - "$oid": "000000000000000000000002" - } - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 8eb519595a..62120e8448 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -123,18 +123,15 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, - "setVersion": null, - "electionId": null - }, - "b:27017": { "type": "RSPrimary", "setName": "rs", "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null }, "c:27017": { "type": "Unknown", @@ -177,18 +174,15 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, - "setVersion": null, - "electionId": null - }, - "b:27017": { "type": "RSPrimary", "setName": "rs", "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null }, "c:27017": { "type": "Unknown", diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index ee9519930b..4c1cb011a5 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "Secondary ignored when ok is zero", + "description": "New primary", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json deleted file mode 100644 index 28ecbeefca..0000000000 --- a/test/discovery_and_monitoring/rs/set_version_can_rollback.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "description": "Set version rolls back after new primary with higher election Id", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "hello": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - } - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 2, - "maxElectionId": { - "$oid": "000000000000000000000001" - } - } - }, - { - "_comment": "Response from new primary with newer election Id", - "responses": [ - [ - "b:27017", - { - "ok": 1, - "hello": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1, - "maxElectionId": { - "$oid": "000000000000000000000002" - } - } - }, - { - "_comment": "Response from stale primary", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "hello": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "electionId": { - "$oid": "000000000000000000000001" - }, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1, - "maxElectionId": { - "$oid": "000000000000000000000002" - } - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json deleted file mode 100644 index 91e84d4fa0..0000000000 --- a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1 - } - }, - { - "responses": [ - [ - "b:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1 - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json deleted file mode 100644 index b15fd5c1a7..0000000000 --- a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 1, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 1 - } - }, - { - "responses": [ - [ - "b:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017", - "b:27017" - ], - "setName": "rs", - "setVersion": 2, - "minWireVersion": 0, - "maxWireVersion": 6 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2, - "electionId": null - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs", - "maxSetVersion": 2 - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index f59c162ae1..2f68287f1d 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", + "description": "setVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -63,14 +63,14 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2, + "type": "Unknown", + "setName": null, "electionId": null }, "b:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 6dd753d5d8..421ff57c8d 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -71,23 +71,20 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000001" - } - }, - "b:27017": { "type": "Unknown", "setName": null, "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 1, + "maxSetVersion": 2, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -118,25 +115,22 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": { - "$oid": "000000000000000000000002" - } - }, - "b:27017": { "type": "Unknown", "setName": null, "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 1, + "maxSetVersion": 2, "maxElectionId": { - "$oid": "000000000000000000000002" + "$oid": "000000000000000000000001" } } } From 1d30802f8c7f997d1de482e537f9d88bc85655e3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Mar 2022 18:03:42 -0500 Subject: [PATCH 0626/2111] PYTHON-3074 Add documentation for type hints (#906) --- doc/changelog.rst | 1 + doc/examples/index.rst | 1 + doc/examples/type_hints.rst | 243 ++++++++++++++++++++++++++++ doc/index.rst | 3 + pymongo/common.py | 10 +- test/mypy_fails/insert_many_dict.py | 2 +- test/mypy_fails/insert_one_list.py | 2 +- test/test_mypy.py | 3 + 8 files changed, 262 insertions(+), 3 deletions(-) create mode 100644 doc/examples/type_hints.rst diff --git a/doc/changelog.rst b/doc/changelog.rst index d263d4534e..ab895fad51 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,6 +8,7 @@ Changes in Version 4.1 PyMongo 4.1 brings a number of improvements including: +- Type Hinting support (formerly provided by ``pymongo-stubs``). See :doc:`examples/type_hints` for more information. - Added support for the ``let`` parameter to :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.update_many`, diff --git a/doc/examples/index.rst b/doc/examples/index.rst index f8828cdfd7..6cdeafc201 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -31,5 +31,6 @@ MongoDB, you can start it like so: server_selection tailable tls + type_hints encryption uuid diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst new file mode 100644 index 0000000000..029761bc75 --- /dev/null +++ b/doc/examples/type_hints.rst @@ -0,0 +1,243 @@ + +.. _type_hints-example: + +Type Hints +=========== + +As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python +type checkers can easily find bugs before they reveal themselves in your code. + +If your IDE is configured to use type hints, +it can suggest more appropriate completions and highlight errors in your code. +Some examples include `PyCharm`_, `Sublime Text`_, and `Visual Studio Code`_. + +You can also use the `mypy`_ tool from your command line or in Continuous Integration tests. + +All of the public APIs in PyMongo are fully type hinted, and +several of them support generic parameters for the +type of document object returned when decoding BSON documents. + +Due to `limitations in mypy`_, the default +values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). + +For a larger set of examples that use types, see the PyMongo `test_mypy module`_. + +If you would like to opt out of using the provided types, add the following to +your `mypy config`_: :: + + [mypy-pymongo] + follow_imports = False + + +Basic Usage +----------- + +Note that a type for :class:`~pymongo.mongo_client.MongoClient` must be specified. Here we use the +default, unspecified document type: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> client: MongoClient = MongoClient() + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> retrieved = collection.find_one({"x": 1}) + >>> assert isinstance(retrieved, dict) + +For a more accurate typing for document type you can use: + +.. doctest:: + + >>> from typing import Any, Dict + >>> from pymongo import MongoClient + >>> client: MongoClient[Dict[str, Any]] = MongoClient() + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> retrieved = collection.find_one({"x": 1}) + >>> assert isinstance(retrieved, dict) + +Typed Client +------------ + +:class:`~pymongo.mongo_client.MongoClient` is generic on the document type used to decode BSON documents. + +You can specify a :class:`~bson.raw_bson.RawBSONDocument` document type: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> client = MongoClient(document_class=RawBSONDocument) + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) + >>> result = collection.find_one({"x": 1}) + >>> assert isinstance(result, RawBSONDocument) + +Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :class:`~bson.son.SON`: + +.. doctest:: + + >>> from bson import SON + >>> from pymongo import MongoClient + >>> client = MongoClient(document_class=SON[str, int]) + >>> collection = client.test.test + >>> inserted = collection.insert_one({"x": 1, "y": 2 }) + >>> result = collection.find_one({"x": 1}) + >>> assert result is not None + >>> assert result["x"] == 1 + +Note that when using :class:`~bson.son.SON`, the key and value types must be given, e.g. ``SON[str, Any]``. + + +Typed Collection +---------------- + +You can use :py:class:`~typing.TypedDict` when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: + +.. doctest:: + + >>> from typing import TypedDict + >>> from pymongo import MongoClient, Collection + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + +Typed Database +-------------- + +While less common, you could specify that the documents in an entire database +match a well-defined shema using :py:class:`~typing.TypedDict`. + + +.. doctest:: + + >>> from typing import TypedDict + >>> from pymongo import MongoClient, Database + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> db: Database[Movie] = client.test + >>> collection = db.test + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + +Typed Command +------------- +When using the :meth:`~pymongo.database.Database.command`, you can specify the document type by providing a custom :class:`~bson.codec_options.CodecOptions`: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> from bson.raw_bson import RawBSONDocument + >>> from bson import CodecOptions + >>> client: MongoClient = MongoClient() + >>> options = CodecOptions(RawBSONDocument) + >>> result = client.admin.command("ping", codec_options=options) + >>> assert isinstance(result, RawBSONDocument) + +Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` are also supported. +For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. + +Typed BSON Decoding +------------------- +You can specify the document type returned by :mod:`bson` decoding functions by providing :class:`~bson.codec_options.CodecOptions`: + +.. doctest:: + + >>> from typing import Any, Dict + >>> from bson import CodecOptions, encode, decode + >>> class MyDict(Dict[str, Any]): + ... def foo(self): + ... return "bar" + ... + >>> options = CodecOptions(document_class=MyDict) + >>> doc = {"x": 1, "y": 2 } + >>> bsonbytes = encode(doc, codec_options=options) + >>> rt_document = decode(bsonbytes, codec_options=options) + >>> assert rt_document.foo() == "bar" + +:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` are also supported. +For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. + + +Troubleshooting +--------------- + +Client Type Annotation +~~~~~~~~~~~~~~~~~~~~~~ +If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the followig ``mypy`` error:: + + from pymongo import MongoClient + client = MongoClient() # error: Need type annotation for "client" + +The solution is to annotate the type as ``client: MongoClient`` or ``client: MongoClient[Dict[str, Any]]``. See `Basic Usage`_. + +Incompatible Types +~~~~~~~~~~~~~~~~~~ +If you use the generic form of :class:`~pymongo.mongo_client.MongoClient` you +may encounter a ``mypy`` error like:: + + from pymongo import MongoClient + + client: MongoClient = MongoClient() + client.test.test.insert_many( + {"a": 1} + ) # error: Dict entry 0 has incompatible type "str": "int"; + # expected "Mapping[str, Any]": "int" + + +The solution is to use ``client: MongoClient[Dict[str, Any]]`` as used in +`Basic Usage`_ . + +Actual Type Errors +~~~~~~~~~~~~~~~~~~ + +Other times ``mypy`` will catch an actual error, like the following code:: + + from pymongo import MongoClient + from typing import Mapping + client: MongoClient = MongoClient() + client.test.test.insert_one( + [{}] + ) # error: Argument 1 to "insert_one" of "Collection" has + # incompatible type "List[Dict[, ]]"; + # expected "Mapping[str, Any]" + +In this case the solution is to use ``insert_one({})``, passing a document instead of a list. + +Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocument`, which is read-only.:: + + from bson.raw_bson import RawBSONDocument + from pymongo import MongoClient + + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retreived = coll.find_one({"_id": doc["_id"]}) + assert retreived is not None + assert len(retreived.raw) > 0 + retreived[ + "foo" + ] = "bar" # error: Unsupported target for indexed assignment + # ("RawBSONDocument") [index] + +.. _PyCharm: https://www.jetbrains.com/help/pycharm/type-hinting-in-product.html +.. _Visual Studio Code: https://code.visualstudio.com/docs/languages/python +.. _Sublime Text: https://github.com/sublimelsp/LSP-pyright +.. _type hints: https://docs.python.org/3/library/typing.html +.. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html +.. _limitations in mypy: https://github.com/python/mypy/issues/3737 +.. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html +.. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py diff --git a/doc/index.rst b/doc/index.rst index 8fd357b4cd..b6e510ad33 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -28,6 +28,9 @@ everything you need to know to use **PyMongo**. :doc:`examples/encryption` Using PyMongo with client side encryption. +:doc:`examples/type_hints` + Using PyMongo with type hints. + :doc:`faq` Some questions that come up often. diff --git a/pymongo/common.py b/pymongo/common.py index 9007bbdfd2..5a6ffbd369 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -448,7 +448,15 @@ def validate_document_class( option: str, value: Any ) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: """Validate the document_class option.""" - if not issubclass(value, (abc.MutableMapping, RawBSONDocument)): + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(value, abc.MutableMapping) + except TypeError: + if hasattr(value, "__origin__"): + is_mapping = issubclass(value.__origin__, abc.MutableMapping) + if not is_mapping and not issubclass(value, RawBSONDocument): raise TypeError( "%s must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py index 6e8acb67b4..7cbabc28f0 100644 --- a/test/mypy_fails/insert_many_dict.py +++ b/test/mypy_fails/insert_many_dict.py @@ -1,6 +1,6 @@ from pymongo import MongoClient -client = MongoClient() +client: MongoClient = MongoClient() client.test.test.insert_many( {"a": 1} ) # error: Dict entry 0 has incompatible type "str": "int"; expected "Mapping[str, Any]": "int" diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py index 7a26a3ff79..12079ffc6d 100644 --- a/test/mypy_fails/insert_one_list.py +++ b/test/mypy_fails/insert_one_list.py @@ -1,6 +1,6 @@ from pymongo import MongoClient -client = MongoClient() +client: MongoClient = MongoClient() client.test.test.insert_one( [{}] ) # error: Argument 1 to "insert_one" of "Collection" has incompatible type "List[Dict[, ]]"; expected "Mapping[str, Any]" diff --git a/test/test_mypy.py b/test/test_mypy.py index 6cf3eb2c87..12a6cffbe6 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -309,6 +309,9 @@ def test_son_document_type(self) -> None: assert retreived is not None retreived["a"] = 1 + def test_son_document_type_runtime(self) -> None: + client = MongoClient(document_class=SON[str, Any], connect=False) + class TestCommandDocumentType(unittest.TestCase): @only_type_check From c58950a8d4fd3d1238f08944d5d3e04bde6f1e46 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Mar 2022 14:29:46 -0700 Subject: [PATCH 0627/2111] PYTHON-3186 Avoid SDAM heartbeat timeouts on AWS Lambda (#912) Poll monitor socket with timeout=0 one last time after timeout expires. This avoids heartbeat timeouts and connection churn on Lambda and other FaaS envs. --- pymongo/network.py | 11 +++++- test/__init__.py | 23 +++++++++-- test/sigstop_sigcont.py | 85 +++++++++++++++++++++++++++++++++++++++++ test/test_client.py | 34 +++++++++++++++++ 4 files changed, 148 insertions(+), 5 deletions(-) create mode 100644 test/sigstop_sigcont.py diff --git a/pymongo/network.py b/pymongo/network.py index 01dca0b835..df08158b2f 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -244,6 +244,7 @@ def wait_for_read(sock_info, deadline): # Only Monitor connections can be cancelled. if context: sock = sock_info.sock + timed_out = False while True: # SSLSocket can have buffered data which won't be caught by select. if hasattr(sock, "pending") and sock.pending() > 0: @@ -252,7 +253,13 @@ def wait_for_read(sock_info, deadline): # Wait up to 500ms for the socket to become readable and then # check for cancellation. if deadline: - timeout = max(min(deadline - time.monotonic(), _POLL_TIMEOUT), 0.001) + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) else: timeout = _POLL_TIMEOUT readable = sock_info.socket_checker.select(sock, read=True, timeout=timeout) @@ -260,7 +267,7 @@ def wait_for_read(sock_info, deadline): raise _OperationCancelled("hello cancelled") if readable: return - if deadline and time.monotonic() > deadline: + if timed_out: raise socket.timeout("timed out") diff --git a/test/__init__.py b/test/__init__.py index c432b26098..ee6e3ca509 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -44,6 +44,7 @@ from test.version import Version from typing import Dict, no_type_check from unittest import SkipTest +from urllib.parse import quote_plus import pymongo import pymongo.errors @@ -279,6 +280,22 @@ def client_options(self): opts["replicaSet"] = self.replica_set_name return opts + @property + def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + return f"mongodb://{auth_part}{self.pair}/?{opts_part}" + @property def hello(self): if not self._hello: @@ -359,7 +376,7 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options + **self.default_client_options, ) # May not have this if OperationFailure was raised earlier. @@ -387,7 +404,7 @@ def _init_client(self): username=db_user, password=db_pwd, replicaSet=self.replica_set_name, - **self.default_client_options + **self.default_client_options, ) else: self.client = pymongo.MongoClient( @@ -490,7 +507,7 @@ def _check_user_provided(self): username=db_user, password=db_pwd, serverSelectionTimeoutMS=100, - **self.default_client_options + **self.default_client_options, ) try: diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py new file mode 100644 index 0000000000..ef4730f0bf --- /dev/null +++ b/test/sigstop_sigcont.py @@ -0,0 +1,85 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Used by test_client.TestClient.test_sigstop_sigcont.""" + +import logging +import sys + +sys.path[0:0] = [""] + +from pymongo import monitoring +from pymongo.mongo_client import MongoClient + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """Log events until the listener is closed.""" + + def __init__(self): + self.closed = False + + def close(self): + self.closed = True + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + if self.closed: + return + logging.info("%s", event) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + if self.closed: + return + logging.warning("%s", event) + + +def main(uri: str) -> None: + heartbeat_logger = HeartbeatLogger() + client = MongoClient( + uri, + event_listeners=[heartbeat_logger], + heartbeatFrequencyMS=500, + connectTimeoutMS=500, + ) + client.admin.command("ping") + logging.info("TEST STARTED") + # test_sigstop_sigcont will SIGSTOP and SIGCONT this process in this loop. + while True: + try: + data = input('Type "q" to quit: ') + except EOFError: + break + if data == "q": + break + client.admin.command("ping") + logging.info("TEST COMPLETED") + heartbeat_logger.close() + client.close() + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("unknown or missing options") + print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") + exit(1) + + # Enable logs in this format: + # 2022-03-30 12:40:55,582 INFO + FORMAT = "%(asctime)s %(levelname)s %(message)s" + logging.basicConfig(format=FORMAT, level=logging.INFO) + main(sys.argv[1]) diff --git a/test/test_client.py b/test/test_client.py index 5958ff6d52..40f276a9db 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -23,6 +23,7 @@ import signal import socket import struct +import subprocess import sys import threading import time @@ -1688,6 +1689,39 @@ def test_srv_max_hosts_kwarg(self): ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) + @unittest.skipIf( + client_context.load_balancer or client_context.serverless, + "loadBalanced clients do not run SDAM", + ) + @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + def test_sigstop_sigcont(self): + test_dir = os.path.dirname(os.path.realpath(__file__)) + script = os.path.join(test_dir, "sigstop_sigcont.py") + p = subprocess.Popen( + [sys.executable, script, client_context.uri], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + self.addCleanup(p.wait, timeout=1) + self.addCleanup(p.kill) + time.sleep(1) + # Stop the child, sleep for twice the streaming timeout + # (heartbeatFrequencyMS + connectTimeoutMS), and restart. + os.kill(p.pid, signal.SIGSTOP) + time.sleep(2) + os.kill(p.pid, signal.SIGCONT) + time.sleep(0.5) + # Tell the script to exit gracefully. + outs, _ = p.communicate(input=b"q\n", timeout=10) + self.assertTrue(outs) + log_output = outs.decode("utf-8") + self.assertIn("TEST STARTED", log_output) + self.assertIn("ServerHeartbeatStartedEvent", log_output) + self.assertIn("ServerHeartbeatSucceededEvent", log_output) + self.assertIn("TEST COMPLETED", log_output) + self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From 3179eab91d1bcb6d54527444114ab810ca772c9f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Mar 2022 18:57:12 -0700 Subject: [PATCH 0628/2111] Improve the changelog for 4.1 (#915) --- doc/changelog.rst | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ab895fad51..28c467a299 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,7 +8,9 @@ Changes in Version 4.1 PyMongo 4.1 brings a number of improvements including: -- Type Hinting support (formerly provided by ``pymongo-stubs``). See :doc:`examples/type_hints` for more information. +- Type Hinting support (formerly provided by `pymongo-stubs`_). See :doc:`examples/type_hints` for more information. +- Added support for the ``comment`` parameter to all helpers. For example see + :meth:`~pymongo.collection.Collection.insert_one`. - Added support for the ``let`` parameter to :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.update_many`, @@ -31,6 +33,16 @@ PyMongo 4.1 brings a number of improvements including: MongoDB >= 5.0. - :meth:`gridfs.grid_file.GridOut.seek` now returns the new position in the file, to conform to the behavior of :meth:`io.IOBase.seek`. +- Improved reuse of implicit sessions (`PYTHON-2956`_). + +Bug fixes +......... + +- Fixed bug that would cause SDAM heartbeat timeouts and connection churn on + AWS Lambda and other FaaS environments (`PYTHON-3186`_). +- Fixed bug where :class:`~pymongo.mongo_client.MongoClient`, + :class:`~pymongo.database.Database`, and :class:`~pymongo.collection.Collection` + mistakenly implemented :class:`typing.Iterable` (`PYTHON-3084`_). Issues Resolved ............... @@ -39,6 +51,10 @@ See the `PyMongo 4.1 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30619 +.. _PYTHON-2956: https://jira.mongodb.org/browse/PYTHON-2956 +.. _PYTHON-3084: https://jira.mongodb.org/browse/PYTHON-3084 +.. _PYTHON-3186: https://jira.mongodb.org/browse/PYTHON-3186 +.. _pymongo-stubs: https://github.com/mongodb-labs/pymongo-stubs Changes in Version 4.0 ---------------------- From 484058e18d95793a95d3d7ad5a4e7f7e3f6161d9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 30 Mar 2022 21:50:22 -0700 Subject: [PATCH 0629/2111] PYTHON-3160 Fix MMAPv1 tests (#914) --- .../driver-sessions-dirty-session-errors.json | 1 - test/test_session.py | 10 ++++----- test/unified_format.py | 21 ++++++++++++++++++- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json index 88a9171db1..361ea83d7b 100644 --- a/test/sessions/driver-sessions-dirty-session-errors.json +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -448,7 +448,6 @@ "name": "insertOne", "object": "collection0", "arguments": { - "session": "session0", "document": { "_id": 2 } diff --git a/test/test_session.py b/test/test_session.py index 53609c70cb..e6f15de6bf 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -183,12 +183,11 @@ def test_implicit_sessions_checkout(self): # "To confirm that implicit sessions only allocate their server session after a # successful connection checkout" test from Driver Sessions Spec. succeeded = False + lsid_set = set() failures = 0 for _ in range(5): listener = EventListener() - client = rs_or_single_client( - event_listeners=[listener], maxPoolSize=1, retryWrites=True - ) + client = rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -225,7 +224,7 @@ def thread_target(op, *args): thread.join() self.assertIsNone(thread.exc) client.close() - lsid_set = set() + lsid_set.clear() for i in listener.results["started"]: if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) @@ -233,8 +232,7 @@ def thread_target(op, *args): succeeded = True else: failures += 1 - print(failures) - self.assertTrue(succeeded) + self.assertTrue(succeeded, lsid_set) def test_pool_lifo(self): # "Pool is LIFO" test from Driver Sessions Spec. diff --git a/test/unified_format.py b/test/unified_format.py index 5bf98c5451..adfd0cac0a 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -766,11 +766,30 @@ def setUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here if client_context.storage_engine == "mmapv1": - if "Dirty explicit session is discarded" in spec["description"]: + if ( + "Dirty explicit session is discarded" in spec["description"] + or "Dirty implicit session is discarded" in spec["description"] + ): raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") elif "Client side error in command starting transaction" in spec["description"]: raise unittest.SkipTest("Implement PYTHON-1894") + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if client_context.storage_engine == "mmapv1": + if name == "createChangeStream": + self.skipTest("MMAPv1 does not support change streams") + if name == "withTransaction" or name == "startTransaction": + self.skipTest("MMAPv1 does not support document-level locking") + if not client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + def process_error(self, exception, spec): is_error = spec.get("isError") is_client_error = spec.get("isClientError") From 113d66dc181795d4182f4d94e7c0a9c8a8ef733e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 31 Mar 2022 04:31:05 -0500 Subject: [PATCH 0630/2111] PYTHON-2406 Clean up of tools documentation page (#913) --- doc/tools.rst | 56 +++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/doc/tools.rst b/doc/tools.rst index 69ee64448b..e88b57ee69 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -26,26 +26,13 @@ needs. Even if you eventually come to the decision to use one of these layers, the time spent working directly with the driver will have increased your understanding of how MongoDB actually works. -PyMODM - `PyMODM `_ is an ORM-like framework on top - of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick - to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it - provides simple, extensible functionality that can be leveraged by other - libraries to target platforms like Django. At the same time, PyMODM is - powerful enough to be used for developing applications on its own. Complete - documentation is available on `readthedocs - `_. - -Humongolus - `Humongolus `_ is a lightweight ORM - framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the - concept of a miniature though fully formed human body). Humongolus allows - you to create models/schemas with robust validation. It attempts to be as - pythonic as possible and exposes the pymongo cursor objects whenever - possible. The code is available for download - `at GitHub `_. Tutorials and usage - examples are also available at GitHub. +MongoEngine + `MongoEngine `_ is another ORM-like + layer on top of PyMongo. It allows you to define schemas for + documents and query collections using syntax inspired by the Django + ORM. The code is available on `GitHub + `_; for more information, see + the `tutorial `_. MincePy `MincePy `_ is an @@ -65,14 +52,6 @@ Ming `_ for more details. -MongoEngine - `MongoEngine `_ is another ORM-like - layer on top of PyMongo. It allows you to define schemas for - documents and query collections using syntax inspired by the Django - ORM. The code is available on `GitHub - `_; for more information, see - the `tutorial `_. - MotorEngine `MotorEngine `_ is a port of MongoEngine to Motor, for asynchronous access with Tornado. @@ -91,6 +70,16 @@ uMongo No longer maintained """""""""""""""""""" +PyMODM + `PyMODM `_ is an ORM-like framework on top + of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick + to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it + provides simple, extensible functionality that can be leveraged by other + libraries to target platforms like Django. At the same time, PyMODM is + powerful enough to be used for developing applications on its own. Complete + documentation is available on `readthedocs + `_. + MongoKit The `MongoKit `_ framework is an ORM-like layer on top of PyMongo. There is also a MongoKit @@ -116,6 +105,17 @@ Manga Django ORM, but Pymongo's query language is maintained. The source `is on GitHub `_. +Humongolus + `Humongolus `_ is a lightweight ORM + framework for Python and MongoDB. The name comes from the combination of + MongoDB and `Homunculus `_ (the + concept of a miniature though fully formed human body). Humongolus allows + you to create models/schemas with robust validation. It attempts to be as + pythonic as possible and exposes the pymongo cursor objects whenever + possible. The code is available for download + `at GitHub `_. Tutorials and usage + examples are also available at GitHub. + Framework Tools --------------- This section lists tools and adapters that have been designed to work with From 1d6914f749baa6e538a7c6f327eb626d6c97a206 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 31 Mar 2022 12:25:45 -0700 Subject: [PATCH 0631/2111] PYTHON-3191 Fix test_sigstop_sigcont with Versioned API (#916) --- test/__init__.py | 1 + test/sigstop_sigcont.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/test/__init__.py b/test/__init__.py index ee6e3ca509..3800c7890e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -284,6 +284,7 @@ def client_options(self): def uri(self): """Return the MongoClient URI for creating a duplicate client.""" opts = client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI opts_parts = [] for opt, val in opts.items(): strval = str(val) diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py index ef4730f0bf..87b4f62038 100644 --- a/test/sigstop_sigcont.py +++ b/test/sigstop_sigcont.py @@ -15,12 +15,19 @@ """Used by test_client.TestClient.test_sigstop_sigcont.""" import logging +import os import sys sys.path[0:0] = [""] from pymongo import monitoring from pymongo.mongo_client import MongoClient +from pymongo.server_api import ServerApi + +SERVER_API = None +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +if MONGODB_API_VERSION: + SERVER_API = ServerApi(MONGODB_API_VERSION) class HeartbeatLogger(monitoring.ServerHeartbeatListener): @@ -55,6 +62,7 @@ def main(uri: str) -> None: event_listeners=[heartbeat_logger], heartbeatFrequencyMS=500, connectTimeoutMS=500, + server_api=SERVER_API, ) client.admin.command("ping") logging.info("TEST STARTED") From a809b3c005392017bd9d4de41bb286bd26bb42b8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 31 Mar 2022 16:11:20 -0500 Subject: [PATCH 0632/2111] PYTHON-3190 Test Failure - doctests failing cannot import name 'TypedDict' (#917) --- .evergreen/config.yml | 2 +- doc/examples/type_hints.rst | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ef60eaf7d7..a6d9375f26 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2506,7 +2506,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6"] + python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index 029761bc75..6858e95290 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -92,12 +92,13 @@ Note that when using :class:`~bson.son.SON`, the key and value types must be giv Typed Collection ---------------- -You can use :py:class:`~typing.TypedDict` when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: +You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: .. doctest:: >>> from typing import TypedDict - >>> from pymongo import MongoClient, Collection + >>> from pymongo import MongoClient + >>> from pymongo.collection import Collection >>> class Movie(TypedDict): ... name: str ... year: int @@ -113,13 +114,14 @@ Typed Database -------------- While less common, you could specify that the documents in an entire database -match a well-defined shema using :py:class:`~typing.TypedDict`. +match a well-defined shema using :py:class:`~typing.TypedDict` (Python 3.8+). .. doctest:: >>> from typing import TypedDict - >>> from pymongo import MongoClient, Database + >>> from pymongo import MongoClient + >>> from pymongo.database import Database >>> class Movie(TypedDict): ... name: str ... year: int @@ -146,7 +148,7 @@ When using the :meth:`~pymongo.database.Database.command`, you can specify the d >>> result = client.admin.command("ping", codec_options=options) >>> assert isinstance(result, RawBSONDocument) -Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` are also supported. +Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. Typed BSON Decoding @@ -167,7 +169,7 @@ You can specify the document type returned by :mod:`bson` decoding functions by >>> rt_document = decode(bsonbytes, codec_options=options) >>> assert rt_document.foo() == "bar" -:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` are also supported. +:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. From b0fd5cbdf5733450a864642130a26816dada349c Mon Sep 17 00:00:00 2001 From: Duncan <52967253+dunkOnIT@users.noreply.github.com> Date: Fri, 1 Apr 2022 19:37:47 +0200 Subject: [PATCH 0633/2111] Improve docstrings for SON parameters (#919) --- pymongo/collection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index f382628aa8..9ab56dd41c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -246,7 +246,7 @@ def _command( :Parameters: - `sock_info` - A SocketInfo instance. - - `command` - The command itself, as a SON instance. + - `command` - The command itself, as a :class:`~bson.son.SON` instance. - `codec_options` (optional) - An instance of :class:`~bson.codec_options.CodecOptions`. - `check`: raise OperationFailure if there are errors @@ -1443,7 +1443,7 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: this :class:`Collection`. :Parameters: - - `filter` (optional): a SON object specifying elements which + - `filter` (optional): a :class:`~bson.son.SON` object specifying elements which must be present for a document to be included in the result set - `projection` (optional): a list of field names that should be From 6e99bf451503825577213ef148ec6b519a41257b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 4 Apr 2022 10:57:01 -0700 Subject: [PATCH 0634/2111] BUMP 4.1.0 --- gridfs/__init__.py | 6 +++--- pymongo/__init__.py | 2 +- pymongo/collection.py | 8 ++++---- setup.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 73425a9e53..5675e8f937 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -364,9 +364,9 @@ def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: are associated with that session. :Parameters: - - `filter` (optional): a SON object specifying elements which - must be present for a document to be included in the - result set + - `filter` (optional): A query document that selects which files + to include in the result set. Can be an empty document to include + all files. - `skip` (optional): the number of files to omit (from the start of the result set) when returning the results - `limit` (optional): the maximum number of results to diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 9581068036..69536d5e31 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0) def get_version_string() -> str: diff --git a/pymongo/collection.py b/pymongo/collection.py index 9ab56dd41c..d6e308b260 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1423,7 +1423,7 @@ def find_one( def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: """Query the database. - The `filter` argument is a prototype document that all results + The `filter` argument is a query document that all results must match. For example: >>> db.test.find({"hello": "world"}) @@ -1443,9 +1443,9 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: this :class:`Collection`. :Parameters: - - `filter` (optional): a :class:`~bson.son.SON` object specifying elements which - must be present for a document to be included in the - result set + - `filter` (optional): A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. - `projection` (optional): a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude. If `projection` is a list "_id" will diff --git a/setup.py b/setup.py index 5bae7dc211..d12918501d 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.1.0.dev0" +version = "4.1.0" f = open("README.rst") try: From 331600d4910c377d5b4b95825a93b932ce0c48b6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 4 Apr 2022 11:06:52 -0700 Subject: [PATCH 0635/2111] BUMP 4.2.0.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 69536d5e31..fd1309b6df 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://docs.mongodb.org/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index d12918501d..8a59e6d8d6 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.1.0" +version = "4.2.0.dev0" f = open("README.rst") try: From 821b5620f796250602b2edc97db14e2ae11eb0e2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 Apr 2022 13:07:06 -0700 Subject: [PATCH 0636/2111] PYTHON-3198 Fix NameError: name sys is not defined (#920) --- pymongo/uri_parser.py | 2 +- test/test_client.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index fa44dd8569..bfbf214bcb 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -15,6 +15,7 @@ """Tools to parse and validate a MongoDB URI.""" import re +import sys import warnings from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union from urllib.parse import unquote_plus @@ -615,7 +616,6 @@ def _parse_kms_tls_options(kms_tls_options): if __name__ == "__main__": import pprint - import sys try: pprint.pprint(parse_uri(sys.argv[1])) diff --git a/test/test_client.py b/test/test_client.py index 40f276a9db..59a8324d6e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1689,6 +1689,11 @@ def test_srv_max_hosts_kwarg(self): ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) + @unittest.skipIf(_HAVE_DNSPYTHON, "dnspython must not be installed") + def test_srv_no_dnspython_error(self): + with self.assertRaisesRegex(ConfigurationError, 'The "dnspython" module must be'): + MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + @unittest.skipIf( client_context.load_balancer or client_context.serverless, "loadBalanced clients do not run SDAM", From 01f983e8abfe8235afe224a5e4281f8175560604 Mon Sep 17 00:00:00 2001 From: Terence Honles Date: Wed, 6 Apr 2022 11:25:25 -0700 Subject: [PATCH 0637/2111] PYTHON-3214 Fix typing markers not being included in the distribution (#921) --- MANIFEST.in | 3 --- setup.py | 9 ++++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 726c631e89..d017d16ab0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,6 +12,3 @@ include tools/README.rst recursive-include test *.pem recursive-include test *.py recursive-include bson *.h -include bson/py.typed -include gridfs/py.typed -include pymongo/py.typed diff --git a/setup.py b/setup.py index 8a59e6d8d6..e8e5d37bfb 100755 --- a/setup.py +++ b/setup.py @@ -295,7 +295,14 @@ def build_extension(self, ext): else: extras_require["gssapi"] = ["pykerberos"] -extra_opts = {"packages": ["bson", "pymongo", "gridfs"]} +extra_opts = { + "packages": ["bson", "pymongo", "gridfs"], + "package_data": { + "bson": ["py.typed"], + "pymongo": ["py.typed"], + "gridfs": ["py.typed"], + }, +} if "--no_ext" in sys.argv: sys.argv.remove("--no_ext") From 49c3f9fdfd5925015293ad8ac3130370f249619e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Apr 2022 11:47:31 -0700 Subject: [PATCH 0638/2111] PYTHON-3215 Add Typing :: Typed trove classifier --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index e8e5d37bfb..8417178441 100755 --- a/setup.py +++ b/setup.py @@ -346,6 +346,7 @@ def build_extension(self, ext): "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", + "Typing :: Typed", ], cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, extras_require=extras_require, From 3cb16cae24dd427dfccdaa0ec5324a6ac6af7a8d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Apr 2022 12:09:47 -0700 Subject: [PATCH 0639/2111] PYTHON-3210 Remove flakey string assertion from invalid aws creds FLE test (#922) --- test/test_encryption.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index f63127a7be..987c02618f 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1310,9 +1310,9 @@ def test_05_aws_endpoint_wrong_region(self): } # The full error should be something like: # "Credential should be scoped to a valid region, not 'us-east-1'" - # but we only check for "us-east-1" to avoid breaking on slight + # but we only check for EncryptionError to avoid breaking on slight # changes to AWS' error message. - with self.assertRaisesRegex(EncryptionError, "us-east-1"): + with self.assertRaises(EncryptionError): self.client_encryption.create_data_key("aws", master_key=master_key) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") From 5ccbb4d6d8b4725aa0ae86e612aba3d438094dd5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Apr 2022 12:18:44 -0700 Subject: [PATCH 0640/2111] PYTHON-3216 Include codec_options.pyi in release distributions (#923) --- setup.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index 8417178441..dff4678d4b 100755 --- a/setup.py +++ b/setup.py @@ -295,14 +295,7 @@ def build_extension(self, ext): else: extras_require["gssapi"] = ["pykerberos"] -extra_opts = { - "packages": ["bson", "pymongo", "gridfs"], - "package_data": { - "bson": ["py.typed"], - "pymongo": ["py.typed"], - "gridfs": ["py.typed"], - }, -} +extra_opts = {} if "--no_ext" in sys.argv: sys.argv.remove("--no_ext") @@ -350,5 +343,11 @@ def build_extension(self, ext): ], cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, extras_require=extras_require, + packages=["bson", "pymongo", "gridfs"], + package_data={ + "bson": ["py.typed", "*.pyi"], + "pymongo": ["py.typed", "*.pyi"], + "gridfs": ["py.typed", "*.pyi"], + }, **extra_opts ) From dca72b7884f7940498c0898cdaf7b041bc6386db Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 12 Apr 2022 17:18:23 -0700 Subject: [PATCH 0641/2111] PYTHON-3222 Fix memory leak in cbson decode_all (#927) Add decode_all keyword arg for codec_options. Make decode_all show up in docs. --- bson/__init__.py | 67 +++++++++++++++++++++++---------------------- bson/_cbsonmodule.c | 43 ++++------------------------- doc/changelog.rst | 26 ++++++++++++++++++ test/test_bson.py | 9 ++++++ 4 files changed, 76 insertions(+), 69 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 11a87bbe79..70aa6ae86c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -982,6 +982,40 @@ def decode( return _bson_to_dict(data, opts) +def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> List[_DocumentType]: + """Decode a BSON data to multiple documents.""" + data, view = get_data_and_view(data) + data_len = len(data) + docs: List[_DocumentType] = [] + position = 0 + end = data_len - 1 + use_raw = _raw_document_class(opts.document_class) + try: + while position < end: + obj_size = _UNPACK_INT_FROM(data, position)[0] + if data_len - position < obj_size: + raise InvalidBSON("invalid object size") + obj_end = position + obj_size - 1 + if data[obj_end] != 0: + raise InvalidBSON("bad eoo") + if use_raw: + docs.append(opts.document_class(data[position : obj_end + 1], opts)) # type: ignore + else: + docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) + position += obj_size + return docs + except InvalidBSON: + raise + except Exception: + # Change exception type to InvalidBSON but preserve traceback. + _, exc_value, exc_tb = sys.exc_info() + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) + + +if _USE_C: + _decode_all = _cbson._decode_all # noqa: F811 + + def decode_all( data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None ) -> List[_DocumentType]: @@ -1008,41 +1042,10 @@ def decode_all( `codec_options`. """ opts = codec_options or DEFAULT_CODEC_OPTIONS - data, view = get_data_and_view(data) if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - data_len = len(data) - docs: List[_DocumentType] = [] - position = 0 - end = data_len - 1 - use_raw = _raw_document_class(opts.document_class) - try: - while position < end: - obj_size = _UNPACK_INT_FROM(data, position)[0] - if data_len - position < obj_size: - raise InvalidBSON("invalid object size") - obj_end = position + obj_size - 1 - if data[obj_end] != 0: - raise InvalidBSON("bad eoo") - if use_raw: - docs.append( - opts.document_class(data[position : obj_end + 1], codec_options) # type: ignore - ) - else: - docs.append(_elements_to_dict(data, view, position + 4, obj_end, opts)) - position += obj_size - return docs - except InvalidBSON: - raise - except Exception: - # Change exception type to InvalidBSON but preserve traceback. - _, exc_value, exc_tb = sys.exc_info() - raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) - - -if _USE_C: - decode_all = _cbson.decode_all # noqa: F811 + return _decode_all(data, opts) # type: ignore[arg-type] def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 8100e951cf..1a296db527 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -53,7 +53,6 @@ struct module_state { PyObject* BSONInt64; PyObject* Decimal128; PyObject* Mapping; - PyObject* CodecOptions; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -344,8 +343,7 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || - _load_object(&state->Mapping, "collections.abc", "Mapping") || - _load_object(&state->CodecOptions, "bson.codec_options", "CodecOptions")) { + _load_object(&state->Mapping, "collections.abc", "Mapping")) { return 1; } /* Reload our REType hack too. */ @@ -498,26 +496,6 @@ int convert_codec_options(PyObject* options_obj, void* p) { return 1; } -/* Fill out a codec_options_t* with default options. - * - * Return 1 on success. - * Return 0 on failure. - */ -int default_codec_options(struct module_state* state, codec_options_t* options) { - PyObject* options_obj = NULL; - PyObject* codec_options_func = _get_object( - state->CodecOptions, "bson.codec_options", "CodecOptions"); - if (codec_options_func == NULL) { - return 0; - } - options_obj = PyObject_CallFunctionObjArgs(codec_options_func, NULL); - Py_DECREF(codec_options_func); - if (options_obj == NULL) { - return 0; - } - return convert_codec_options(options_obj, options); -} - void destroy_codec_options(codec_options_t* options) { Py_CLEAR(options->document_class); Py_CLEAR(options->tzinfo); @@ -2411,15 +2389,10 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OII|O&", &bson, &position, &max, + if (!PyArg_ParseTuple(args, "OIIO&", &bson, &position, &max, convert_codec_options, &options)) { return NULL; } - if (PyTuple_GET_SIZE(args) < 4) { - if (!default_codec_options(GETSTATE(self), &options)) { - return NULL; - } - } if (!PyBytes_Check(bson)) { PyErr_SetString(PyExc_TypeError, "argument to _element_to_dict must be a bytes object"); @@ -2594,17 +2567,13 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* dict; PyObject* result = NULL; codec_options_t options; - PyObject* options_obj; + PyObject* options_obj = NULL; Py_buffer view = {0}; - if (!PyArg_ParseTuple(args, "O|O", &bson, &options_obj)) { + if (!PyArg_ParseTuple(args, "OO", &bson, &options_obj)) { return NULL; } - if ((PyTuple_GET_SIZE(args) < 2) || (options_obj == Py_None)) { - if (!default_codec_options(GETSTATE(self), &options)) { - return NULL; - } - } else if (!convert_codec_options(options_obj, &options)) { + if (!convert_codec_options(options_obj, &options)) { return NULL; } @@ -2698,7 +2667,7 @@ static PyMethodDef _CBSONMethods[] = { "convert a dictionary to a string containing its BSON representation."}, {"_bson_to_dict", _cbson_bson_to_dict, METH_VARARGS, "convert a BSON string to a SON object."}, - {"decode_all", _cbson_decode_all, METH_VARARGS, + {"_decode_all", _cbson_decode_all, METH_VARARGS, "convert binary data to a sequence of documents."}, {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, diff --git a/doc/changelog.rst b/doc/changelog.rst index 28c467a299..1f8a146b37 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,32 @@ Changelog ========= +Changes in Version 4.1.1 +------------------------- + +Issues Resolved +............... + +Version 4.1.1 fixes a number of bugs: + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). +- Fixed an oversight where type markers (py.typed files) were not included + in our release distributions (`PYTHON-3214`_). +- Fixed a bug where pymongo would raise a "NameError: name sys is not defined" + exception when attempting to parse a "mongodb+srv://" URI when the dnspython + dependency was not installed (`PYTHON-3198`_). + +See the `PyMongo 4.1.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3198: https://jira.mongodb.org/browse/PYTHON-3198 +.. _PYTHON-3214: https://jira.mongodb.org/browse/PYTHON-3214 +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 4.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33290 + Changes in Version 4.1 ---------------------- diff --git a/test/test_bson.py b/test/test_bson.py index b0dce7db4e..8ad65f3412 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1006,6 +1006,15 @@ def test_decode_all_no_options(self): decoded = bson.decode_all(bson.encode(doc2), None)[0] self.assertIsInstance(decoded["id"], Binary) + def test_decode_all_kwarg(self): + doc = {"a": uuid.uuid4()} + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encoded = encode(doc, codec_options=opts) + # Positional codec_options + self.assertEqual([doc], decode_all(encoded, opts)) + # Keyword codec_options + self.assertEqual([doc], decode_all(encoded, codec_options=opts)) + def test_unicode_decode_error_handler(self): enc = encode({"keystr": "foobar"}) From fd512d5c90220a37341aca068bc9e3f969c8eead Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 12 Apr 2022 17:43:10 -0700 Subject: [PATCH 0642/2111] PYTHON-3225 Stop testing delete on capped collections (#928) --- test/test_collection.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/test/test_collection.py b/test/test_collection.py index 6319321045..d1a3a6a980 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1420,16 +1420,7 @@ def test_update_check_keys(self): def test_acknowledged_delete(self): db = self.db db.drop_collection("test") - db.create_collection("test", capped=True, size=1000) - - db.test.insert_one({"x": 1}) - self.assertEqual(1, db.test.count_documents({})) - - # Can't remove from capped collection. - self.assertRaises(OperationFailure, db.test.delete_one, {"x": 1}) - db.drop_collection("test") - db.test.insert_one({"x": 1}) - db.test.insert_one({"x": 1}) + db.test.insert_many([{"x": 1}, {"x": 1}]) self.assertEqual(2, db.test.delete_many({}).deleted_count) self.assertEqual(0, db.test.delete_many({}).deleted_count) From a319075ba7d9a61bd93126b8e08be0ee0b0b667c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 13 Apr 2022 14:11:13 -0500 Subject: [PATCH 0643/2111] PYTHON-3197 Update docs.mongodb.com links in source, API & Reference documentation (#926) --- README.rst | 4 +-- doc/api/index.rst | 2 +- doc/changelog.rst | 12 ++++---- doc/examples/aggregation.rst | 2 +- doc/examples/copydb.rst | 2 +- doc/examples/high_availability.rst | 8 +++--- doc/examples/server_selection.rst | 4 +-- doc/examples/tailable.rst | 4 +-- doc/examples/tls.rst | 2 +- doc/faq.rst | 4 +-- doc/migrate-to-pymongo4.rst | 44 +++++++++++++++--------------- doc/tutorial.rst | 16 +++++------ pymongo/__init__.py | 8 +++--- pymongo/change_stream.py | 2 +- pymongo/collection.py | 18 ++++++------ pymongo/cursor.py | 12 ++++---- pymongo/database.py | 14 +++++----- pymongo/message.py | 2 +- pymongo/mongo_client.py | 4 +-- pymongo/operations.py | 2 +- pymongo/read_preferences.py | 2 +- test/unified_format.py | 2 +- 22 files changed, 85 insertions(+), 85 deletions(-) diff --git a/README.rst b/README.rst index fedb9e14d4..c3c3757289 100644 --- a/README.rst +++ b/README.rst @@ -13,7 +13,7 @@ database from Python. The ``bson`` package is an implementation of the `BSON format `_ for Python. The ``pymongo`` package is a native Python driver for MongoDB. The ``gridfs`` package is a `gridfs -`_ +`_ implementation on top of ``pymongo``. PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, and 5.0. @@ -63,7 +63,7 @@ Security Vulnerabilities If you’ve identified a security vulnerability in a driver or any other MongoDB project, please report it according to the `instructions here -`_. +`_. Installation ============ diff --git a/doc/api/index.rst b/doc/api/index.rst index 64c407fd04..30ae3608ca 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -6,7 +6,7 @@ interacting with MongoDB. :mod:`bson` is an implementation of the `BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS -`_ storage +`_ storage specification. .. toctree:: diff --git a/doc/changelog.rst b/doc/changelog.rst index 1f8a146b37..eee3e4a81d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -376,7 +376,7 @@ Deprecations .. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 .. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 .. _PYTHON-2472: https://jira.mongodb.org/browse/PYTHON-2472 -.. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ Issues Resolved ............... @@ -548,7 +548,7 @@ Unavoidable breaking changes: now always raises the following error: ``InvalidOperation: GridFS does not support multi-document transactions`` -.. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ +.. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ Issues Resolved ............... @@ -896,7 +896,7 @@ Deprecations: - Deprecated :meth:`pymongo.collection.Collection.count` and :meth:`pymongo.cursor.Cursor.count`. These two methods use the `count` command and `may or may not be accurate - `_, + `_, depending on the options used and connected MongoDB topology. Use :meth:`~pymongo.collection.Collection.count_documents` instead. - Deprecated the snapshot option of :meth:`~pymongo.collection.Collection.find` @@ -1112,7 +1112,7 @@ Changes and Deprecations: - Deprecated the MongoClient option `socketKeepAlive`. It now defaults to true and disabling it is not recommended, see `does TCP keepalive time affect MongoDB Deployments? - `_ + `_ - Deprecated :meth:`~pymongo.collection.Collection.initialize_ordered_bulk_op`, :meth:`~pymongo.collection.Collection.initialize_unordered_bulk_op`, and :class:`~pymongo.bulk.BulkOperationBuilder`. Use @@ -2408,7 +2408,7 @@ Important New Features: - Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework - `_. + `_. - Support for legacy Java and C# byte order when encoding and decoding UUIDs. - Support for connecting directly to an arbiter. @@ -2416,7 +2416,7 @@ Important New Features: Starting with MongoDB 2.2 the getLastError command requires authentication when the server's `authentication features - `_ are enabled. + `_ are enabled. Changes to PyMongo were required to support this behavior change. Users of authentication must upgrade to PyMongo 2.3 (or newer) for "safe" write operations to function correctly. diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index 738b09485a..cdd82ff6fb 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -74,4 +74,4 @@ you can add computed fields, create new virtual sub-objects, and extract sub-fields into the top-level of results. .. seealso:: The full documentation for MongoDB's `aggregation framework - `_ + `_ diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index 27f1912c6e..76d0c97a36 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -67,7 +67,7 @@ Versions of PyMongo before 3.0 included a ``copy_database`` helper method, but it has been removed. .. _copyDatabase function in the mongo shell: - http://docs.mongodb.org/manual/reference/method/db.copyDatabase/ + http://mongodb.com/docs/manual/reference/method/db.copyDatabase/ .. _Copy a Database: https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index efd7a66cc6..8f94aba074 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -4,7 +4,7 @@ High Availability and PyMongo PyMongo makes it easy to write highly available applications whether you use a `single replica set `_ or a `large sharded cluster -`_. +`_. Connecting to a Replica Set --------------------------- @@ -14,7 +14,7 @@ PyMongo makes working with `replica sets replica set and show how to handle both initialization and normal connections with PyMongo. -.. seealso:: The MongoDB documentation on `replication `_. +.. seealso:: The MongoDB documentation on `replication `_. Starting a Replica Set ~~~~~~~~~~~~~~~~~~~~~~ @@ -261,7 +261,7 @@ attributes: **Tag sets**: Replica-set members can be `tagged -`_ according to any +`_ according to any criteria you choose. By default, PyMongo ignores tags when choosing a member to read from, but your read preference can be configured with a ``tag_sets`` parameter. ``tag_sets`` must be a list of dictionaries, each @@ -308,7 +308,7 @@ milliseconds of the closest member's ping time. replica set *through* a mongos. The equivalent is the localThreshold_ command line option. -.. _localThreshold: https://docs.mongodb.com/manual/reference/program/mongos/#std-option-mongos.--localThreshold +.. _localThreshold: https://mongodb.com/docs/manual/reference/program/mongos/#std-option-mongos.--localThreshold .. _health-monitoring: diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index fc436c0cd7..be2172489e 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -19,7 +19,7 @@ to prefer servers running on ``localhost``. from pymongo import MongoClient -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ +.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ Example: Selecting Servers Running on ``localhost`` @@ -105,4 +105,4 @@ list of known hosts. As an example, for a 3-member replica set with a all available secondaries. -.. _server selection algorithm: https://docs.mongodb.com/manual/core/read-preference-mechanics/ +.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst index 1242e9ddf5..79458dc2ff 100644 --- a/doc/examples/tailable.rst +++ b/doc/examples/tailable.rst @@ -3,9 +3,9 @@ Tailable Cursors By default, MongoDB will automatically close a cursor when the client has exhausted all results in the cursor. However, for `capped collections -`_ you may +`_ you may use a `tailable cursor -`_ +`_ that remains open after the client exhausts the results in the initial cursor. The following is a basic example of using a tailable cursor to tail the oplog diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 9c3c2c829c..5a851e2530 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -3,7 +3,7 @@ TLS/SSL and PyMongo PyMongo supports connecting to MongoDB over TLS/SSL. This guide covers the configuration options supported by PyMongo. See `the server documentation -`_ to configure +`_ to configure MongoDB. .. warning:: Industry best practices recommend, and some regulations require, diff --git a/doc/faq.rst b/doc/faq.rst index 0d045f7629..06559ddb9b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -314,7 +314,7 @@ when it is serialized to BSON and used as a query. Thus you can create a subdocument that exactly matches the subdocument in the collection. .. seealso:: `MongoDB Manual entry on subdocument matching - `_. + `_. What does *CursorNotFound* cursor id not valid at server mean? -------------------------------------------------------------- @@ -468,7 +468,7 @@ How can I use something like Python's ``json`` module to encode my documents to ------------------------------------------------------------------------------------- :mod:`~bson.json_util` is PyMongo's built in, flexible tool for using Python's :mod:`json` module with BSON documents and `MongoDB Extended JSON -`_. The +`_. The :mod:`json` module won't work out of the box with all documents from PyMongo as PyMongo supports some special types (like :class:`~bson.objectid.ObjectId` and :class:`~bson.dbref.DBRef`) that are not supported in JSON. diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 6d290dd51b..5f75ed1760 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -88,7 +88,7 @@ The socketKeepAlive parameter is removed Removed the ``socketKeepAlive`` keyword argument to :class:`~pymongo.mongo_client.MongoClient`. PyMongo now always enables TCP -keepalive. For more information see the `documentation `_. +keepalive. For more information see the `documentation `_. Renamed URI options ................... @@ -138,7 +138,7 @@ instead. For example:: client.admin.command('fsync', lock=True) -.. _fsync command: https://docs.mongodb.com/manual/reference/command/fsync/ +.. _fsync command: https://mongodb.com/docs/manual/reference/command/fsync/ MongoClient.unlock is removed ............................. @@ -149,7 +149,7 @@ Removed :meth:`pymongo.mongo_client.MongoClient.unlock`. Run the client.admin.command('fsyncUnlock') -.. _fsyncUnlock command: https://docs.mongodb.com/manual/reference/command/fsyncUnlock/ +.. _fsyncUnlock command: https://mongodb.com/docs/manual/reference/command/fsyncUnlock/ MongoClient.is_locked is removed ................................ @@ -160,7 +160,7 @@ Removed :attr:`pymongo.mongo_client.MongoClient.is_locked`. Run the is_locked = client.admin.command('currentOp').get('fsyncLock') -.. _currentOp command: https://docs.mongodb.com/manual/reference/command/currentOp/ +.. _currentOp command: https://mongodb.com/docs/manual/reference/command/currentOp/ MongoClient.database_names is removed ..................................... @@ -196,7 +196,7 @@ can be changed to this:: max_message_size = doc['maxMessageSizeBytes'] max_write_batch_size = doc['maxWriteBatchSize'] -.. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ +.. _hello command: https://mongodb.com/docs/manual/reference/command/hello/ MongoClient.event_listeners and other configuration option helpers are removed .............................................................................. @@ -309,7 +309,7 @@ can be changed to this:: ops = list(client.admin.aggregate([{'$currentOp': {}}])) -.. _$currentOp aggregation pipeline stage: https://docs.mongodb.com/manual/reference/operator/aggregation/currentOp/ +.. _$currentOp aggregation pipeline stage: https://mongodb.com/docs/manual/reference/operator/aggregation/currentOp/ Database.add_user is removed ............................ @@ -332,8 +332,8 @@ Or change roles:: db.command("updateUser", "user", roles=["readWrite"]) -.. _createUser command: https://docs.mongodb.com/manual/reference/command/createUser/ -.. _updateUser command: https://docs.mongodb.com/manual/reference/command/updateUser/ +.. _createUser command: https://mongodb.com/docs/manual/reference/command/createUser/ +.. _updateUser command: https://mongodb.com/docs/manual/reference/command/updateUser/ Database.remove_user is removed ............................... @@ -343,7 +343,7 @@ PyMongo 3.6. Use the `dropUser command`_ instead:: db.command("dropUser", "user") -.. _dropUser command: https://docs.mongodb.com/manual/reference/command/createUser/ +.. _dropUser command: https://mongodb.com/docs/manual/reference/command/createUser/ Database.profiling_level is removed ................................... @@ -358,7 +358,7 @@ Can be changed to this:: profile = db.command('profile', -1) level = profile['was'] -.. _profile command: https://docs.mongodb.com/manual/reference/command/profile/ +.. _profile command: https://mongodb.com/docs/manual/reference/command/profile/ Database.set_profiling_level is removed ....................................... @@ -384,7 +384,7 @@ Can be changed to this:: profiling_info = list(db['system.profile'].find()) -.. _'system.profile' collection: https://docs.mongodb.com/manual/reference/database-profiler/ +.. _'system.profile' collection: https://mongodb.com/docs/manual/reference/database-profiler/ Database.__bool__ raises NotImplementedError ............................................ @@ -542,10 +542,10 @@ Can be changed to this:: | | ``{'$geoWithin': {'$centerSphere': [[,], ]}}`` | +-------------+--------------------------------------------------------------+ -.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ -.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ -.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ -.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ +.. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ +.. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ +.. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ +.. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ Collection.initialize_ordered_bulk_op and initialize_unordered_bulk_op is removed ................................................................................. @@ -600,7 +600,7 @@ deprecated in PyMongo 3.5. MongoDB 4.2 removed the `group command`_. Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage instead. -.. _group command: https://docs.mongodb.com/manual/reference/command/group/ +.. _group command: https://mongodb.com/docs/manual/reference/command/group/ Collection.map_reduce and Collection.inline_map_reduce are removed .................................................................. @@ -611,10 +611,10 @@ Migrate to :meth:`~pymongo.collection.Collection.aggregate` or run the `mapReduce command`_ directly with :meth:`~pymongo.database.Database.command` instead. For more guidance on this migration see: -- https://docs.mongodb.com/manual/reference/map-reduce-to-aggregation-pipeline/ -- https://docs.mongodb.com/manual/reference/aggregation-commands-comparison/ +- https://mongodb.com/docs/manual/reference/map-reduce-to-aggregation-pipeline/ +- https://mongodb.com/docs/manual/reference/aggregation-commands-comparison/ -.. _mapReduce command: https://docs.mongodb.com/manual/reference/command/mapReduce/ +.. _mapReduce command: https://mongodb.com/docs/manual/reference/command/mapReduce/ Collection.ensure_index is removed .................................. @@ -651,7 +651,7 @@ can be changed to this:: >>> result = database.command('reIndex', 'my_collection') -.. _reIndex command: https://docs.mongodb.com/manual/reference/command/reIndex/ +.. _reIndex command: https://mongodb.com/docs/manual/reference/command/reIndex/ The modifiers parameter is removed .................................. @@ -865,7 +865,7 @@ Removed :meth:`pymongo.mongo_client.MongoClient.close_cursor` and with :meth:`pymongo.cursor.Cursor.close` or :meth:`pymongo.command_cursor.CommandCursor.close`. -.. _killCursors command: https://docs.mongodb.com/manual/reference/command/killCursors/ +.. _killCursors command: https://mongodb.com/docs/manual/reference/command/killCursors/ Database.eval, Database.system_js, and SystemJS are removed ........................................................... @@ -902,7 +902,7 @@ Collection.parallel_scan is removed Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 removed the `parallelCollectionScan command`_. There is no replacement. -.. _parallelCollectionScan command: https://docs.mongodb.com/manual/reference/command/parallelCollectionScan/ +.. _parallelCollectionScan command: https://mongodb.com/docs/manual/reference/command/parallelCollectionScan/ pymongo.message helpers are removed ................................... diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 2ec6c44da8..55961241e8 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -22,7 +22,7 @@ should run without raising an exception: This tutorial also assumes that a MongoDB instance is running on the default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you +`_ MongoDB, you can start it like so: .. code-block:: bash @@ -56,7 +56,7 @@ Or use the MongoDB URI format: Getting a Database ------------------ A single instance of MongoDB can support multiple independent -`databases `_. When +`databases `_. When working with PyMongo you access databases using attribute style access on :class:`~pymongo.mongo_client.MongoClient` instances: @@ -74,7 +74,7 @@ instead: Getting a Collection -------------------- -A `collection `_ is a +A `collection `_ is a group of documents stored in MongoDB, and can be thought of as roughly the equivalent of a table in a relational database. Getting a collection in PyMongo works the same as getting a database: @@ -112,7 +112,7 @@ post: Note that documents can contain native Python types (like :class:`datetime.datetime` instances) which will be automatically converted to and from the appropriate `BSON -`_ types. +`_ types. .. todo:: link to table of Python <-> BSON types @@ -134,7 +134,7 @@ of ``"_id"`` must be unique across the collection. :meth:`~pymongo.collection.Collection.insert_one` returns an instance of :class:`~pymongo.results.InsertOneResult`. For more information on ``"_id"``, see the `documentation on _id -`_. +`_. After inserting the first document, the *posts* collection has actually been created on the server. We can verify this by listing all @@ -335,7 +335,7 @@ or just of those documents that match a specific query: Range Queries ------------- MongoDB supports many different types of `advanced queries -`_. As an +`_. As an example, lets perform a query where we limit results to posts older than a certain date, but also sort the results by author: @@ -366,7 +366,7 @@ Indexing Adding indexes can help accelerate certain queries and can also add additional functionality to querying and storing documents. In this example, we'll demonstrate how to create a `unique index -`_ on a key that rejects +`_ on a key that rejects documents whose value for that key already exists in the index. First, we'll need to create the index: @@ -404,4 +404,4 @@ the collection: Traceback (most recent call last): DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } -.. seealso:: The MongoDB documentation on `indexes `_ +.. seealso:: The MongoDB documentation on `indexes `_ diff --git a/pymongo/__init__.py b/pymongo/__init__.py index fd1309b6df..a47fd0a7b3 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -24,7 +24,7 @@ GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`_. -.. _geospatial index: http://docs.mongodb.org/manual/core/2d/ +.. _geospatial index: http://mongodb.com/docs/manual/core/2d/ """ GEOSPHERE = "2dsphere" @@ -32,7 +32,7 @@ .. versionadded:: 2.5 -.. _spherical geospatial index: http://docs.mongodb.org/manual/core/2dsphere/ +.. _spherical geospatial index: http://mongodb.com/docs/manual/core/2dsphere/ """ HASHED = "hashed" @@ -40,7 +40,7 @@ .. versionadded:: 2.5 -.. _hashed index: http://docs.mongodb.org/manual/core/index-hashed/ +.. _hashed index: http://mongodb.com/docs/manual/core/index-hashed/ """ TEXT = "text" @@ -52,7 +52,7 @@ .. versionadded:: 2.7.1 -.. _text index: http://docs.mongodb.org/manual/core/index-text/ +.. _text index: http://mongodb.com/docs/manual/core/index-text/ """ version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev0") diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index db33999788..b4bce8da59 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -78,7 +78,7 @@ class ChangeStream(Generic[_DocumentType]): :meth:`pymongo.mongo_client.MongoClient.watch` instead. .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. """ def __init__( diff --git a/pymongo/collection.py b/pymongo/collection.py index d6e308b260..79b745d355 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1475,7 +1475,7 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: marks the final document position. If more data is received iteration of the cursor will continue from the last document received. For details, see the `tailable cursor documentation - `_. + `_. - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result of this find call will be a tailable cursor with the await flag set. The server will wait for a few seconds after returning the @@ -1783,10 +1783,10 @@ def count_documents( .. versionadded:: 3.7 - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ - .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/ - .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/ + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ """ pipeline = [{"$match": filter}] if "skip" in kwargs: @@ -1857,7 +1857,7 @@ def create_indexes( when connected to MongoDB >= 3.4. .. versionadded:: 3.0 - .. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/ + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ """ common.validate_list("indexes", indexes) if comment is not None: @@ -2012,7 +2012,7 @@ def create_index( .. seealso:: The MongoDB documentation on `indexes `_. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ """ cmd_options = {} if "maxTimeMS" in kwargs: @@ -2395,7 +2395,7 @@ def aggregate( .. seealso:: :doc:`/examples/aggregation` .. _aggregate command: - https://docs.mongodb.com/manual/reference/command/aggregate + https://mongodb.com/docs/manual/reference/command/aggregate """ with self.__database.client._tmp_session(session, close=False) as s: @@ -2563,7 +2563,7 @@ def watch( .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 350cc255bb..2a85f1d82a 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -223,7 +223,7 @@ def __init__( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " "30 minutes, for more info see " - "https://docs.mongodb.com/v4.4/reference/method/" + "https://mongodb.com/docs/v4.4/reference/method/" "cursor.noCursorTimeout/" "#session-idle-timeout-overrides-nocursortimeout", UserWarning, @@ -908,7 +908,7 @@ def explain(self) -> _DocumentType: .. note:: This method uses the default verbosity mode of the `explain command - `_, + `_, ``allPlansExecution``. To use a different verbosity use :meth:`~pymongo.database.Database.command` to run the explain command directly. @@ -961,7 +961,7 @@ def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]": def comment(self, comment: Any) -> "Cursor[_DocumentType]": """Adds a 'comment' to the cursor. - http://docs.mongodb.org/manual/reference/operator/comment/ + http://mongodb.com/docs/manual/reference/operator/comment/ :Parameters: - `comment`: A string to attach to the query to help interpret and @@ -1000,8 +1000,8 @@ def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]": :Parameters: - `code`: JavaScript expression to use as a filter - .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ - .. _$where: https://docs.mongodb.com/manual/reference/operator/query/where/ + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ """ self.__check_okay_to_chain() if not isinstance(code, Code): @@ -1194,7 +1194,7 @@ def alive(self) -> bool: """Does this cursor have the potential to return more data? This is mostly useful with `tailable cursors - `_ + `_ since they will stop iterating even though they *may* return more results in the future. diff --git a/pymongo/database.py b/pymongo/database.py index b5770b0db9..2156a5e972 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -367,7 +367,7 @@ def create_collection( Added the codec_options, read_preference, and write_concern options. .. _create collection command: - https://docs.mongodb.com/manual/reference/command/create + https://mongodb.com/docs/manual/reference/command/create """ with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not @@ -448,10 +448,10 @@ def aggregate( .. versionadded:: 3.9 .. _aggregation pipeline: - https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline .. _aggregate command: - https://docs.mongodb.com/manual/reference/command/aggregate + https://mongodb.com/docs/manual/reference/command/aggregate """ with self.client._tmp_session(session, close=False) as s: cmd = _DatabaseAggregationCommand( @@ -563,7 +563,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst @@ -803,7 +803,7 @@ def list_collections( command. - `**kwargs` (optional): Optional parameters of the `listCollections command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. @@ -849,7 +849,7 @@ def list_collection_names( command. - `**kwargs` (optional): Optional parameters of the `listCollections command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. @@ -967,7 +967,7 @@ def validate_collection( .. versionchanged:: 3.6 Added ``session`` parameter. - .. _validate command: https://docs.mongodb.com/manual/reference/command/validate/ + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ """ name = name_or_collection if isinstance(name, Collection): diff --git a/pymongo/message.py b/pymongo/message.py index 1fdf0ece35..de43d20c97 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -13,7 +13,7 @@ # limitations under the License. """Tools for creating `messages -`_ to be sent to +`_ to be sent to MongoDB. .. note:: This module is for internal use and is generally not needed by diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 8781cb1f01..1f1e4f725b 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -934,7 +934,7 @@ def watch( .. versionadded:: 3.7 - .. seealso:: The MongoDB documentation on `changeStreams `_. + .. seealso:: The MongoDB documentation on `changeStreams `_. .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst @@ -1741,7 +1741,7 @@ def list_databases( command. - `**kwargs` (optional): Optional parameters of the `listDatabases command - `_ + `_ can be passed as keyword arguments to this method. The supported options differ by server version. diff --git a/pymongo/operations.py b/pymongo/operations.py index e528f2a2df..84e8bf4d35 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -488,7 +488,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: Added the ``partialFilterExpression`` option to support partial indexes. - .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ + .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ """ keys = _index_list(keys) if "name" not in kwargs: diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 5ce2fbafcc..ccb635bec0 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -153,7 +153,7 @@ def tag_sets(self) -> _TagSets: until it finds a set of tags with at least one matching member. .. seealso:: `Data-Center Awareness - `_ + `_ """ return list(self.__tag_sets) if self.__tag_sets else [{}] diff --git a/test/unified_format.py b/test/unified_format.py index adfd0cac0a..378fcc4759 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -432,7 +432,7 @@ def get_lsid_for_session(self, session_name): BSON_TYPE_ALIAS_MAP = { - # https://docs.mongodb.com/manual/reference/operator/query/type/ + # https://mongodb.com/docs/manual/reference/operator/query/type/ # https://pymongo.readthedocs.io/en/stable/api/bson/index.html "double": (float,), "string": (str,), From ff288faf39f113650f09be999201992c1a44c67d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Apr 2022 12:20:54 -0700 Subject: [PATCH 0644/2111] PYTHON-3221 Resync CSFLE spec tests (#929) --- .evergreen/resync-specs.sh | 6 ++++++ test/client-side-encryption/spec/badQueries.json | 4 ++-- test/client-side-encryption/spec/types.json | 12 ++++++------ test/test_encryption.py | 6 +----- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 3042fd543b..af4228d081 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -81,6 +81,12 @@ do change*streams) cpjson change-streams/tests/ change_streams/ ;; + client-side-encryption|csfle|fle) + cpjson client-side-encryption/tests/ client-side-encryption/spec + cpjson client-side-encryption/corpus/ client-side-encryption/corpus + cpjson client-side-encryption/external/ client-side-encryption/external + cpjson client-side-encryption/limits/ client-side-encryption/limits + ;; cmap|CMAP) cpjson connection-monitoring-and-pooling/tests cmap rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 diff --git a/test/client-side-encryption/spec/badQueries.json b/test/client-side-encryption/spec/badQueries.json index 824a53c00b..4968307ba3 100644 --- a/test/client-side-encryption/spec/badQueries.json +++ b/test/client-side-encryption/spec/badQueries.json @@ -1318,7 +1318,7 @@ } }, "result": { - "errorContains": "Cannot encrypt element of type array" + "errorContains": "Cannot encrypt element of type" } } ] @@ -1387,7 +1387,7 @@ } }, "result": { - "errorContains": "Cannot encrypt element of type array" + "errorContains": "Cannot encrypt element of type" } } ] diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/types.json index a070f8bff7..a6c6507e90 100644 --- a/test/client-side-encryption/spec/types.json +++ b/test/client-side-encryption/spec/types.json @@ -504,7 +504,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: double" + "errorContains": "element of type: double" } } ] @@ -551,7 +551,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: decimal" + "errorContains": "element of type: decimal" } } ] @@ -883,7 +883,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: javascriptWithScope" + "errorContains": "element of type: javascriptWithScope" } } ] @@ -928,7 +928,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: object" + "errorContains": "element of type: object" } } ] @@ -1547,7 +1547,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: array" + "errorContains": "element of type: array" } } ] @@ -1592,7 +1592,7 @@ } }, "result": { - "errorContains": "Cannot use deterministic encryption for element of type: bool" + "errorContains": "element of type: bool" } } ] diff --git a/test/test_encryption.py b/test/test_encryption.py index 987c02618f..ec854ff03a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -815,11 +815,7 @@ def run_test(self, provider_name): self.assertEqual(encrypted_altname, encrypted) # Explicitly encrypting an auto encrypted field. - msg = ( - r"Cannot encrypt element of type binData because schema " - r"requires that type is one of: \[ string \]" - ) - with self.assertRaisesRegex(EncryptionError, msg): + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) def test_data_key_local(self): From 868b3f77f38cefd37d518c76435b8081233cbee1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 13 Apr 2022 20:03:37 +0000 Subject: [PATCH 0645/2111] PYTHON-3080 Add section to troubleshooting FAQ per driver with top SEO results (#918) --- doc/common-issues.rst | 98 +++++++++++++++++++++++++++++++++++++++++++ doc/examples/tls.rst | 1 + doc/index.rst | 4 ++ 3 files changed, 103 insertions(+) create mode 100644 doc/common-issues.rst diff --git a/doc/common-issues.rst b/doc/common-issues.rst new file mode 100644 index 0000000000..1571b985e0 --- /dev/null +++ b/doc/common-issues.rst @@ -0,0 +1,98 @@ +Frequently Encountered Issues +============================= + +Also see the :ref:`TLSErrors` section. + +.. contents:: + +Server reports wire version X, PyMongo requires Y +------------------------------------------------- + +When one attempts to connect to a <=3.4 version server, PyMongo will throw the following error:: + + >>> client.admin.command('ping') + ... + pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 5, but this version of PyMongo requires at least 6 (MongoDB 3.6). + +This is caused by the driver being too new for the server it is being run against. +To resolve this issue either upgrade your database to version >= 3.6 or downgrade to PyMongo 3.x which supports MongoDB >= 2.6. + + +'Cursor' object has no attribute '_Cursor__killed' +-------------------------------------------------- + +On versions of PyMongo <3.9, when supplying invalid arguments the constructor of Cursor, +there will be a TypeError raised, and an AttributeError printed to ``stderr``. The AttributeError is not relevant, +instead look at the TypeError for debugging information:: + + >>> coll.find(wrong=1) + Exception ignored in: + ... + AttributeError: 'Cursor' object has no attribute '_Cursor__killed' + ... + TypeError: __init__() got an unexpected keyword argument 'wrong' + +To fix this, make sure that you are supplying the correct keyword arguments. +In addition, you can also upgrade to PyMongo >=3.9, which will remove the spurious error. + + +MongoClient fails ConfigurationError +------------------------------------ + +This is a common issue stemming from using incorrect keyword argument names. + + >>> client = MongoClient(wrong=1) + ... + pymongo.errors.ConfigurationError: Unknown option wrong + +To fix this, check your spelling and make sure that the keyword argument you are specifying exists. + + +DeprecationWarning: count is deprecated +--------------------------------------- + +PyMongo no longer supports :meth:`pymongo.cursor.count`. +Instead, use :meth:`pymongo.collection.count_documents`:: + + >>> client = MongoClient() + >>> d = datetime.datetime(2009, 11, 12, 12) + >>> list(client.db.coll.find({"date": {"$lt": d}}, limit=2)) + [{'_id': ObjectId('6247b058cebb8b179b7039f8'), 'date': datetime.datetime(1, 1, 1, 0, 0)}, {'_id': ObjectId('6247b059cebb8b179b7039f9'), 'date': datetime.datetime(1, 1, 1, 0, 0)}] + >>> client.db.coll.count_documents({"date": {"$lt": d}}, limit=2) + 2 + +Note that this is NOT the same as ``Cursor.count_documents`` (which does not exist), +this is a method of the Collection class, so you must call it on a collection object +or you will receive the following error:: + + >>> Cursor(MongoClient().db.coll).count() + Traceback (most recent call last): + File "", line 1, in + AttributeError: 'Cursor' object has no attribute 'count' + >>> + +Timeout when accessing MongoDB from PyMongo with tunneling +---------------------------------------------------------- + +When attempting to connect to a replica set MongoDB instance over an SSH tunnel you +will receive the following error:: + + File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1560, in count + return self._count(cmd, collation, session) + File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1504, in _count + with self._socket_for_reads() as (sock_info, slave_ok): + File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py", line 17, in __enter__ + return self.gen.next() + File "/Library/Python/2.7/site-packages/pymongo/mongo_client.py", line 982, in _socket_for_reads + server = topology.select_server(read_preference) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 224, in select_server + address)) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 183, in select_servers + selector, server_timeout, address) + File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 199, in _select_servers_loop + self._error_message(selector)) + pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out + +This is due to the fact that PyMongo discovers replica set members using the response from the isMaster command which +then contains the address and ports of the other members. However, these addresses and ports will not be accessible through the SSH tunnel. Thus, this behavior is unsupported. +You can, however, connect directly to a single MongoDB node using the directConnection=True option with SSH tunneling. diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 5a851e2530..6dcb7a1759 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -167,6 +167,7 @@ handshake will only fail in this case if the response indicates that the certificate is revoked. Invalid or malformed responses will be ignored, favoring availability over maximum security. +.. _TLSErrors: Troubleshooting TLS Errors .......................... diff --git a/doc/index.rst b/doc/index.rst index b6e510ad33..b43f5cf580 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -54,6 +54,9 @@ everything you need to know to use **PyMongo**. :doc:`developer/index` Developer guide for contributors to PyMongo. +:doc:`common-issues` + Common issues encountered when using PyMongo. + Getting Help ------------ If you're having trouble or have questions about PyMongo, ask your question on @@ -124,3 +127,4 @@ Indices and tables python3 migrate-to-pymongo4 developer/index + common-issues From 8b3eaafb40b751e8cc78db1152c480eedd1c4c0f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Apr 2022 13:27:30 -0700 Subject: [PATCH 0646/2111] BUMP 4.1.1 --- doc/changelog.rst | 6 +++--- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index eee3e4a81d..0fe2300120 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,9 +4,6 @@ Changelog Changes in Version 4.1.1 ------------------------- -Issues Resolved -............... - Version 4.1.1 fixes a number of bugs: - Fixed a memory leak bug when calling :func:`~bson.decode_all` without a @@ -19,6 +16,9 @@ Version 4.1.1 fixes a number of bugs: exception when attempting to parse a "mongodb+srv://" URI when the dnspython dependency was not installed (`PYTHON-3198`_). +Issues Resolved +............... + See the `PyMongo 4.1.1 release notes in JIRA`_ for the list of resolved issues in this release. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a47fd0a7b3..8a4288a996 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 1, 1) def get_version_string() -> str: diff --git a/setup.py b/setup.py index dff4678d4b..9d804a06c8 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0.dev0" +version = "4.1.1" f = open("README.rst") try: From 109eaaff7b447e998c30b043d27346142905fc6e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 13 Apr 2022 13:28:42 -0700 Subject: [PATCH 0647/2111] BUMP 4.2.0.dev1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 8a4288a996..17c640b1fd 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 1, 1) +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev1") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 9d804a06c8..9e8e919e88 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.1.1" +version = "4.2.0.dev1" f = open("README.rst") try: From cfa2d990f056a815490883e1fa13b81371a00c20 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 18 Apr 2022 20:38:46 -0500 Subject: [PATCH 0648/2111] PYTHON-3228 _tmp_session should validate session input (#930) --- pymongo/mongo_client.py | 16 ++++++++++++++-- test/test_collection.py | 9 ++++++++- test/test_session.py | 2 +- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 1f1e4f725b..5c7e7cb176 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -97,8 +97,16 @@ from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern if TYPE_CHECKING: + import sys + from pymongo.read_concern import ReadConcern + if sys.version_info[:2] >= (3, 9): + from collections.abc import Generator + else: + # Deprecated since version 3.9: collections.abc.Generator now supports []. + from typing import Generator + class MongoClient(common.BaseObject, Generic[_DocumentType]): """ @@ -1666,9 +1674,13 @@ def _ensure_session(self, session=None): return None @contextlib.contextmanager - def _tmp_session(self, session, close=True): + def _tmp_session( + self, session: Optional[client_session.ClientSession], close: bool = True + ) -> "Generator[Optional[client_session.ClientSession[Any]], None, None]": """If provided session is None, lend a temporary session.""" - if session: + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError("'session' argument must be a ClientSession or None.") # Don't call end_session. yield session return diff --git a/test/test_collection.py b/test/test_collection.py index d1a3a6a980..bea2ed6ca6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1538,6 +1538,13 @@ def test_aggregation_cursor_alive(self): self.assertTrue(cursor.alive) + def test_invalid_session_parameter(self): + def try_invalid_session(): + with self.db.test.aggregate([], {}): # type:ignore + pass + + self.assertRaisesRegex(ValueError, "must be a ClientSession", try_invalid_session) + def test_large_limit(self): db = self.db db.drop_collection("test_large_limit") @@ -2131,7 +2138,7 @@ def test_helpers_with_let(self): (c.update_one, ({}, {"$inc": {"x": 3}})), (c.find_one_and_delete, ({}, {})), (c.find_one_and_replace, ({}, {})), - (c.aggregate, ([], {})), + (c.aggregate, ([],)), ] for let in [10, "str", [], False]: for helper, args in helpers: diff --git a/test/test_session.py b/test/test_session.py index e6f15de6bf..f22a2d5eab 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -199,7 +199,7 @@ def test_implicit_sessions_checkout(self): (client.db.test.find_one_and_replace, [{}, {}]), (client.db.test.aggregate, [[{"$limit": 1}]]), (client.db.test.find, []), - (client.server_info, [{}]), + (client.server_info, []), (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), (cursor.distinct, ["_id"]), (client.db.list_collections, []), From fe057cf5776348e1c2f3132fd2a395be1679a7e4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Apr 2022 12:01:26 -0700 Subject: [PATCH 0649/2111] PYTHON-3220 Add CSFLE spec test for auto encryption on a collection with no jsonSchema --- .../client-side-encryption/spec/noSchema.json | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 test/client-side-encryption/spec/noSchema.json diff --git a/test/client-side-encryption/spec/noSchema.json b/test/client-side-encryption/spec/noSchema.json new file mode 100644 index 0000000000..095434f886 --- /dev/null +++ b/test/client-side-encryption/spec/noSchema.json @@ -0,0 +1,67 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "unencrypted", + "tests": [ + { + "description": "Insert on an unencrypted collection", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} From a2606cfc03b73aba54dfe22eab20df989da0b077 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 22 Apr 2022 13:44:14 -0500 Subject: [PATCH 0650/2111] PYTHON-3194 Adopt doc8 checker (#931) --- .pre-commit-config.yaml | 10 ++++++++++ CONTRIBUTING.rst | 6 +++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b6671d41d..1fd86e0926 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,6 +40,7 @@ repos: 'flake8-logging-format==0.6.0', 'flake8-implicit-str-concat==0.2.0', ] + stages: [manual] # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit @@ -49,6 +50,14 @@ repos: - id: shellcheck name: shellcheck args: ["--severity=warning"] + stages: [manual] + +- repo: https://github.com/PyCQA/doc8 + rev: 0.11.1 + hooks: + - id: doc8 + args: [--max-line-length=200] + stages: [manual] - repo: https://github.com/sirosen/check-jsonschema rev: 0.14.1 @@ -58,6 +67,7 @@ repos: files: ^\.github/workflows/ types: [yaml] args: ["--schemafile", "https://json.schemastore.org/github-workflow"] + stages: [manual] - repo: https://github.com/ariebovenberg/slotscheck rev: v0.14.0 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index bbc22954a0..1a4423f3ef 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -55,6 +55,10 @@ To run ``pre-commit`` manually, run:: pre-commit run --all-files +To run a manual hook like `flake8` manually, run:: + + pre-commit run --all-files --hook-stage manual flake8 + Documentation ------------- @@ -67,7 +71,7 @@ You might also use the GitHub `Edit Date: Thu, 28 Apr 2022 11:07:46 -0500 Subject: [PATCH 0651/2111] PYTHON-3243 Pin version of mypy used (#932) --- .github/workflows/test-python.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index ba9b99e06b..1eea4ff166 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -39,7 +39,7 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | - pip install mypy + pip install mypy==0.942 python setup.py test mypytest: @@ -59,7 +59,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy + python -m pip install -U pip mypy==0.942 pip install -e ".[zstd, srv]" - name: Run mypy run: | From 05b55e88dfa1636511eeeac56d4a75593a76fbeb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 2 May 2022 06:21:44 -0700 Subject: [PATCH 0652/2111] PYTHON-3038 The doc should clarify that the resulting documents that are produced with upserts are constructed from both the filter and the update params (#933) --- pymongo/collection.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pymongo/collection.py b/pymongo/collection.py index 79b745d355..1d0eb1035e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -952,6 +952,19 @@ def update_one( {'x': 1, '_id': 1} {'x': 1, '_id': 2} + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + :Parameters: - `filter`: A query that matches the document to update. - `update`: The modifications to apply. From 6e4e90a882e64274d7b3ce44971c83e5a0dbeb58 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 2 May 2022 16:32:05 -0700 Subject: [PATCH 0653/2111] PYTHON-3167 Revert to using the count command for estimated_document_count (#934) Resolves PYTHON-2885, PYTHON-3166, PYTHON-3224, and PYTHON-3219. --- .evergreen/resync-specs.sh | 42 +- doc/changelog.rst | 31 + pymongo/collection.py | 29 +- .../timeseries-collection.json | 16 - test/crud/unified/aggregate-allowdiskuse.json | 155 +++ test/crud/unified/bulkWrite-comment.json | 25 + .../unified/bulkWrite-replaceOne-let.json | 12 + test/crud/unified/countDocuments-comment.json | 208 ++++ test/crud/unified/deleteMany-comment.json | 1 + test/crud/unified/deleteOne-comment.json | 1 + .../estimatedDocumentCount-comment.json | 170 ++++ test/crud/unified/estimatedDocumentCount.json | 371 ++----- test/crud/unified/find-allowdiskuse.json | 4 +- test/crud/unified/insertMany-comment.json | 1 + test/crud/unified/insertOne-comment.json | 1 + test/crud/unified/replaceOne-comment.json | 19 + test/crud/unified/replaceOne-let.json | 12 + test/crud/unified/updateMany-comment.json | 16 +- test/crud/unified/updateMany-let.json | 10 +- test/crud/unified/updateOne-comment.json | 19 + test/crud/unified/updateOne-let.json | 16 +- test/data_lake/estimatedDocumentCount.json | 19 +- .../legacy/estimatedDocumentCount-4.9.json | 246 ----- ...timatedDocumentCount-serverErrors-4.9.json | 911 ------------------ ... estimatedDocumentCount-serverErrors.json} | 2 - ...re4.9.json => estimatedDocumentCount.json} | 2 - test/unified_format.py | 17 +- .../crud-api-version-1-strict.json | 26 +- test/versioned-api/crud-api-version-1.json | 28 +- 29 files changed, 847 insertions(+), 1563 deletions(-) create mode 100644 test/crud/unified/aggregate-allowdiskuse.json create mode 100644 test/crud/unified/countDocuments-comment.json create mode 100644 test/crud/unified/estimatedDocumentCount-comment.json delete mode 100644 test/retryable_reads/legacy/estimatedDocumentCount-4.9.json delete mode 100644 test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json rename test/retryable_reads/legacy/{estimatedDocumentCount-serverErrors-pre4.9.json => estimatedDocumentCount-serverErrors.json} (99%) rename test/retryable_reads/legacy/{estimatedDocumentCount-pre4.9.json => estimatedDocumentCount.json} (97%) diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index af4228d081..a98b091d59 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -68,17 +68,24 @@ cpjson () { for spec in "$@" do + # Match the spec dir name, the python test dir name, and/or common abbreviations. case "$spec" in - bson*corpus) + atlas-data-lake-testing|data_lake) + cpjson atlas-data-lake-testing/tests/ data_lake + ;; + bson-corpus|bson_corpus) cpjson bson-corpus/tests/ bson_corpus ;; - max*staleness) + max-staleness|max_staleness) cpjson max-staleness/tests/ max_staleness ;; - connection*string) + collection-management|collection_management) + cpjson collection-management/tests/ collection_management + ;; + connection-string|connection_string) cpjson connection-string/tests/ connection_string/test ;; - change*streams) + change-streams|change_streams) cpjson change-streams/tests/ change_streams/ ;; client-side-encryption|csfle|fle) @@ -87,32 +94,29 @@ do cpjson client-side-encryption/external/ client-side-encryption/external cpjson client-side-encryption/limits/ client-side-encryption/limits ;; - cmap|CMAP) + cmap|CMAP|connection-monitoring-and-pooling) cpjson connection-monitoring-and-pooling/tests cmap rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 ;; - command*monitoring) + apm|APM|command-monitoring|command_monitoring) cpjson command-monitoring/tests command_monitoring ;; crud|CRUD) cpjson crud/tests/ crud ;; - load*balancer) + load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; - initial-dns-seedlist-discovery|srv_seedlist) + srv|SRV|initial-dns-seedlist-discovery|srv_seedlist) cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist ;; - old_srv_seedlist) - cpjson initial-dns-seedlist-discovery/tests srv_seedlist - ;; - retryable*reads) + retryable-reads|retryable_reads) cpjson retryable-reads/tests/ retryable_reads ;; - retryable*writes) + retryable-writes|retryable_writes) cpjson retryable-writes/tests/ retryable_writes ;; - sdam|SDAM) + sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) cpjson server-discovery-and-monitoring/tests/errors \ discovery_and_monitoring/errors cpjson server-discovery-and-monitoring/tests/rs \ @@ -126,10 +130,10 @@ do cpjson server-discovery-and-monitoring/tests/load-balanced \ discovery_and_monitoring/load-balanced ;; - sdam*monitoring) + sdam-monitoring|sdam_monitoring) cpjson server-discovery-and-monitoring/tests/monitoring sdam_monitoring ;; - server*selection) + server-selection|server_selection) cpjson server-selection/tests/ server_selection ;; sessions) @@ -140,13 +144,13 @@ do cpjson transactions-convenient-api/tests/ transactions-convenient-api rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 ;; - unified) + unified|unified-test-format) cpjson unified-test-format/tests/ unified-test-format/ ;; - uri|uri*options) + uri|uri-options|uri_options) cpjson uri-options/tests uri_options ;; - stable-api) + stable-api|versioned-api) cpjson versioned-api/tests versioned-api ;; *) diff --git a/doc/changelog.rst b/doc/changelog.rst index 0fe2300120..3d2f7cadc4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,37 @@ Changelog ========= +Changes in Version 4.2 +---------------------- + +Bug fixes +......... + +- Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` + would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). + +Unavoidable breaking changes +............................ + +- :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses + the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, + the count command was not included in V1 of the :ref:`versioned-api-ref`. + Users of the Stable API with estimated_document_count are recommended to upgrade + their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` + to ``False`` to avoid encountering errors (`PYTHON-3167`_). + +.. _count: https://mongodb.com/docs/manual/reference/command/count/ + +Issues Resolved +............... + +See the `PyMongo 4.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 +.. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 + Changes in Version 4.1.1 ------------------------- diff --git a/pymongo/collection.py b/pymongo/collection.py index 1d0eb1035e..0197198108 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1707,8 +1707,15 @@ def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) command. - `**kwargs` (optional): See list of options above. + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ """ if "session" in kwargs: raise ConfigurationError("estimated_document_count does not support sessions") @@ -1716,25 +1723,9 @@ def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) kwargs["comment"] = comment def _cmd(session, server, sock_info, read_preference): - if sock_info.max_wire_version >= 12: - # MongoDB 4.9+ - pipeline = [ - {"$collStats": {"count": {}}}, - {"$group": {"_id": 1, "n": {"$sum": "$count"}}}, - ] - cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) - cmd.update(kwargs) - result = self._aggregate_one_result( - sock_info, read_preference, cmd, collation=None, session=session - ) - if not result: - return 0 - return int(result["n"]) - else: - # MongoDB < 4.9 - cmd = SON([("count", self.__name)]) - cmd.update(kwargs) - return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) + cmd = SON([("count", self.__name)]) + cmd.update(kwargs) + return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) return self._retryable_non_cursor_read(_cmd, None) diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json index 99f642e597..b5638fd36e 100644 --- a/test/collection_management/timeseries-collection.json +++ b/test/collection_management/timeseries-collection.json @@ -82,14 +82,6 @@ "databaseName": "ts-tests" } }, - { - "commandStartedEvent": { - "command": { - "listCollections": 1 - }, - "databaseName": "ts-tests" - } - }, { "commandStartedEvent": { "command": { @@ -204,14 +196,6 @@ "databaseName": "ts-tests" } }, - { - "commandStartedEvent": { - "command": { - "listCollections": 1 - }, - "databaseName": "ts-tests" - } - }, { "commandStartedEvent": { "command": { diff --git a/test/crud/unified/aggregate-allowdiskuse.json b/test/crud/unified/aggregate-allowdiskuse.json new file mode 100644 index 0000000000..2e54175b8a --- /dev/null +++ b/test/crud/unified/aggregate-allowdiskuse.json @@ -0,0 +1,155 @@ +{ + "description": "aggregate-allowdiskuse", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Aggregate does not send allowDiskUse when value is not specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": { + "$$exists": false + } + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse false when false is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": false + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate sends allowDiskUse true when true is specified", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + } + ], + "allowDiskUse": true + }, + "commandName": "aggregate", + "databaseName": "crud-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-comment.json b/test/crud/unified/bulkWrite-comment.json index fac9644543..0b2addc850 100644 --- a/test/crud/unified/bulkWrite-comment.json +++ b/test/crud/unified/bulkWrite-comment.json @@ -150,6 +150,12 @@ "u": { "_id": 1, "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } }, { @@ -160,6 +166,12 @@ "$set": { "x": "updated" } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -317,6 +329,12 @@ "u": { "_id": 1, "x": "replaced" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } }, { @@ -327,6 +345,12 @@ "$set": { "x": "updated" } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -388,6 +412,7 @@ "description": "BulkWrite with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/bulkWrite-replaceOne-let.json b/test/crud/unified/bulkWrite-replaceOne-let.json index df4eafe62f..70f63837a8 100644 --- a/test/crud/unified/bulkWrite-replaceOne-let.json +++ b/test/crud/unified/bulkWrite-replaceOne-let.json @@ -95,6 +95,12 @@ }, "u": { "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -183,6 +189,12 @@ }, "u": { "x": 3 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/countDocuments-comment.json b/test/crud/unified/countDocuments-comment.json new file mode 100644 index 0000000000..e6c7ae8170 --- /dev/null +++ b/test/crud/unified/countDocuments-comment.json @@ -0,0 +1,208 @@ +{ + "description": "countDocuments-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "countDocuments-comments-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "countDocuments-comments-test", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with string comment", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": "comment" + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + }, + { + "description": "countDocuments with document comment on less than 4.4.0 - server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.3.99" + } + ], + "operations": [ + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll0", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "comment": { + "key": "value" + } + }, + "commandName": "aggregate", + "databaseName": "countDocuments-comments-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany-comment.json b/test/crud/unified/deleteMany-comment.json index ea6a8524d9..6abc5fd58a 100644 --- a/test/crud/unified/deleteMany-comment.json +++ b/test/crud/unified/deleteMany-comment.json @@ -175,6 +175,7 @@ "description": "deleteMany with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/deleteOne-comment.json b/test/crud/unified/deleteOne-comment.json index 37f356ec6f..0f42b086a3 100644 --- a/test/crud/unified/deleteOne-comment.json +++ b/test/crud/unified/deleteOne-comment.json @@ -177,6 +177,7 @@ "description": "deleteOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/estimatedDocumentCount-comment.json b/test/crud/unified/estimatedDocumentCount-comment.json new file mode 100644 index 0000000000..6c0adacc8f --- /dev/null +++ b/test/crud/unified/estimatedDocumentCount-comment.json @@ -0,0 +1,170 @@ +{ + "description": "estimatedDocumentCount-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "edc-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "edc-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "estimatedDocumentCount with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": "comment" + }, + "expectResult": 3 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": "comment" + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount with document comment - pre 4.4.14, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "estimatedDocumentCount", + "object": "collection0", + "arguments": { + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll0", + "comment": { + "key": "value" + } + }, + "commandName": "count", + "databaseName": "edc-comment-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json index bcd66ea954..1b650c1cb6 100644 --- a/test/crud/unified/estimatedDocumentCount.json +++ b/test/crud/unified/estimatedDocumentCount.json @@ -34,6 +34,13 @@ "database": "database0", "collectionName": "coll1" } + }, + { + "collection": { + "id": "collection0View", + "database": "database0", + "collectionName": "coll0view" + } } ], "initialData": [ @@ -58,12 +65,7 @@ ], "tests": [ { - "description": "estimatedDocumentCount uses $collStats on 4.9.0 or greater", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } - ], + "description": "estimatedDocumentCount always uses count", "operations": [ { "name": "estimatedDocumentCount", @@ -78,24 +80,9 @@ { "commandStartedEvent": { "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] + "count": "coll0" }, - "commandName": "aggregate", + "commandName": "count", "databaseName": "edc-tests" } } @@ -104,12 +91,7 @@ ] }, { - "description": "estimatedDocumentCount with maxTimeMS on 4.9.0 or greater", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } - ], + "description": "estimatedDocumentCount with maxTimeMS", "operations": [ { "name": "estimatedDocumentCount", @@ -127,25 +109,10 @@ { "commandStartedEvent": { "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ], + "count": "coll0", "maxTimeMS": 6000 }, - "commandName": "aggregate", + "commandName": "count", "databaseName": "edc-tests" } } @@ -154,12 +121,7 @@ ] }, { - "description": "estimatedDocumentCount on non-existent collection on 4.9.0 or greater", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } - ], + "description": "estimatedDocumentCount on non-existent collection", "operations": [ { "name": "estimatedDocumentCount", @@ -174,24 +136,9 @@ { "commandStartedEvent": { "command": { - "aggregate": "coll1", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] + "count": "coll1" }, - "commandName": "aggregate", + "commandName": "count", "databaseName": "edc-tests" } } @@ -200,78 +147,21 @@ ] }, { - "description": "estimatedDocumentCount errors correctly on 4.9.0 or greater--command error", + "description": "estimatedDocumentCount errors correctly--command error", "runOnRequirements": [ { - "minServerVersion": "4.9.0" - } - ], - "operations": [ - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 8 - } - } - } + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] }, { - "name": "estimatedDocumentCount", - "object": "collection0", - "expectError": { - "errorCode": 8 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "commandName": "aggregate", - "databaseName": "edc-tests" - } - } + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" ] } - ] - }, - { - "description": "estimatedDocumentCount errors correctly on 4.9.0 or greater--socket error", - "runOnRequirements": [ - { - "minServerVersion": "4.9.0" - } ], "operations": [ { @@ -286,9 +176,9 @@ }, "data": { "failCommands": [ - "aggregate" + "count" ], - "closeConnection": true + "errorCode": 8 } } } @@ -297,56 +187,10 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "isError": true + "errorCode": 8 } } ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "coll0", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "commandName": "aggregate", - "databaseName": "edc-tests" - } - } - ] - } - ] - }, - { - "description": "estimatedDocumentCount uses count on less than 4.9.0", - "runOnRequirements": [ - { - "maxServerVersion": "4.8.99" - } - ], - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection0", - "expectResult": 3 - } - ], "expectEvents": [ { "client": "client0", @@ -365,77 +209,10 @@ ] }, { - "description": "estimatedDocumentCount with maxTimeMS on less than 4.9.0", - "runOnRequirements": [ - { - "maxServerVersion": "4.8.99" - } - ], - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection0", - "arguments": { - "maxTimeMS": 6000 - }, - "expectResult": 3 - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "count": "coll0", - "maxTimeMS": 6000 - }, - "commandName": "count", - "databaseName": "edc-tests" - } - } - ] - } - ] - }, - { - "description": "estimatedDocumentCount on non-existent collection on less than 4.9.0", - "runOnRequirements": [ - { - "maxServerVersion": "4.8.99" - } - ], - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection1", - "expectResult": 0 - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "count": "coll1" - }, - "commandName": "count", - "databaseName": "edc-tests" - } - } - ] - } - ] - }, - { - "description": "estimatedDocumentCount errors correctly on less than 4.9.0--command error", + "description": "estimatedDocumentCount errors correctly--socket error", "runOnRequirements": [ { "minServerVersion": "4.0.0", - "maxServerVersion": "4.8.99", "topologies": [ "single", "replicaset" @@ -443,7 +220,6 @@ }, { "minServerVersion": "4.2.0", - "maxServerVersion": "4.8.99", "topologies": [ "sharded" ] @@ -464,7 +240,7 @@ "failCommands": [ "count" ], - "errorCode": 8 + "closeConnection": true } } } @@ -473,7 +249,7 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "errorCode": 8 + "isError": true } } ], @@ -495,50 +271,41 @@ ] }, { - "description": "estimatedDocumentCount errors correctly on less than 4.9.0--socket error", + "description": "estimatedDocumentCount works correctly on views", "runOnRequirements": [ { - "minServerVersion": "4.0.0", - "maxServerVersion": "4.8.99", - "topologies": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.2.0", - "maxServerVersion": "4.8.99", - "topologies": [ - "sharded" - ] + "minServerVersion": "3.4.0" } ], "operations": [ { - "name": "failPoint", - "object": "testRunner", + "name": "dropCollection", + "object": "database0", "arguments": { - "client": "client0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true + "collection": "coll0view" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } } - } + ] } }, { "name": "estimatedDocumentCount", - "object": "collection0", - "expectError": { - "isError": true - } + "object": "collection0View", + "expectResult": 2 } ], "expectEvents": [ @@ -548,7 +315,35 @@ { "commandStartedEvent": { "command": { - "count": "coll0" + "drop": "coll0view" + }, + "commandName": "drop", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "coll0view", + "viewOn": "coll0", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + }, + "commandName": "create", + "databaseName": "edc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll0view" }, "commandName": "count", "databaseName": "edc-tests" diff --git a/test/crud/unified/find-allowdiskuse.json b/test/crud/unified/find-allowdiskuse.json index 789bb7fbf1..eb238ab93a 100644 --- a/test/crud/unified/find-allowdiskuse.json +++ b/test/crud/unified/find-allowdiskuse.json @@ -32,7 +32,7 @@ ], "tests": [ { - "description": "Find does not send allowDiskuse when value is not specified", + "description": "Find does not send allowDiskUse when value is not specified", "operations": [ { "object": "collection0", @@ -61,7 +61,7 @@ ] }, { - "description": "Find sends allowDiskuse false when false is specified", + "description": "Find sends allowDiskUse false when false is specified", "operations": [ { "object": "collection0", diff --git a/test/crud/unified/insertMany-comment.json b/test/crud/unified/insertMany-comment.json index 7e835e8011..2b4c80b3f0 100644 --- a/test/crud/unified/insertMany-comment.json +++ b/test/crud/unified/insertMany-comment.json @@ -166,6 +166,7 @@ "description": "insertMany with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/insertOne-comment.json b/test/crud/unified/insertOne-comment.json index a9f735ab6c..dbd83d9f64 100644 --- a/test/crud/unified/insertOne-comment.json +++ b/test/crud/unified/insertOne-comment.json @@ -162,6 +162,7 @@ "description": "insertOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], diff --git a/test/crud/unified/replaceOne-comment.json b/test/crud/unified/replaceOne-comment.json index 02fe90a44d..88bee5d7b7 100644 --- a/test/crud/unified/replaceOne-comment.json +++ b/test/crud/unified/replaceOne-comment.json @@ -75,6 +75,12 @@ }, "u": { "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -137,6 +143,12 @@ }, "u": { "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -166,6 +178,7 @@ "description": "ReplaceOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], @@ -202,6 +215,12 @@ }, "u": { "x": 22 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/replaceOne-let.json b/test/crud/unified/replaceOne-let.json index 6cf8e15675..e7a7ee65a5 100644 --- a/test/crud/unified/replaceOne-let.json +++ b/test/crud/unified/replaceOne-let.json @@ -94,6 +94,12 @@ }, "u": { "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -176,6 +182,12 @@ }, "u": { "x": "foo" + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/updateMany-comment.json b/test/crud/unified/updateMany-comment.json index 26abd92ed4..88b8b67f5a 100644 --- a/test/crud/unified/updateMany-comment.json +++ b/test/crud/unified/updateMany-comment.json @@ -80,7 +80,10 @@ "x": 22 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "comment": "comment" @@ -147,7 +150,10 @@ "x": 22 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "comment": { @@ -176,6 +182,7 @@ "description": "UpdateMany with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], @@ -217,7 +224,10 @@ "x": 22 } }, - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "comment": "comment" diff --git a/test/crud/unified/updateMany-let.json b/test/crud/unified/updateMany-let.json index 8a19ac0933..cff3bd4c79 100644 --- a/test/crud/unified/updateMany-let.json +++ b/test/crud/unified/updateMany-let.json @@ -114,7 +114,10 @@ } } ], - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { @@ -207,7 +210,10 @@ } } ], - "multi": true + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { diff --git a/test/crud/unified/updateOne-comment.json b/test/crud/unified/updateOne-comment.json index 9b3b71d395..f4ee74db38 100644 --- a/test/crud/unified/updateOne-comment.json +++ b/test/crud/unified/updateOne-comment.json @@ -79,6 +79,12 @@ "$set": { "x": 22 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -145,6 +151,12 @@ "$set": { "x": 22 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], @@ -174,6 +186,7 @@ "description": "UpdateOne with comment - pre 4.4", "runOnRequirements": [ { + "minServerVersion": "3.4.0", "maxServerVersion": "4.2.99" } ], @@ -214,6 +227,12 @@ "$set": { "x": 22 } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false } } ], diff --git a/test/crud/unified/updateOne-let.json b/test/crud/unified/updateOne-let.json index 8237bef7e8..e43b979358 100644 --- a/test/crud/unified/updateOne-let.json +++ b/test/crud/unified/updateOne-let.json @@ -103,7 +103,13 @@ "x": "$$x" } } - ] + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { @@ -184,7 +190,13 @@ "x": "$$x" } } - ] + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "let": { diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json index 87b385208d..997a3ab3fc 100644 --- a/test/data_lake/estimatedDocumentCount.json +++ b/test/data_lake/estimatedDocumentCount.json @@ -15,24 +15,9 @@ { "command_started_event": { "command": { - "aggregate": "driverdata", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] + "count": "driverdata" }, - "command_name": "aggregate", + "command_name": "count", "database_name": "test" } } diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json deleted file mode 100644 index a4c46fc074..0000000000 --- a/test/retryable_reads/legacy/estimatedDocumentCount-4.9.json +++ /dev/null @@ -1,246 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9.0" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds on first attempt", - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json deleted file mode 100644 index 756b02b3a8..0000000000 --- a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-4.9.json +++ /dev/null @@ -1,911 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9.0" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json similarity index 99% rename from test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json index 0b9a2615d1..6bb128f5f3 100644 --- a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors-pre4.9.json +++ b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "4.0", - "maxServerVersion": "4.8.99", "topology": [ "single", "replicaset" @@ -10,7 +9,6 @@ }, { "minServerVersion": "4.1.7", - "maxServerVersion": "4.8.99", "topology": [ "sharded" ] diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json b/test/retryable_reads/legacy/estimatedDocumentCount.json similarity index 97% rename from test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json rename to test/retryable_reads/legacy/estimatedDocumentCount.json index 44be966ae7..8dfa15a2cd 100644 --- a/test/retryable_reads/legacy/estimatedDocumentCount-pre4.9.json +++ b/test/retryable_reads/legacy/estimatedDocumentCount.json @@ -2,7 +2,6 @@ "runOn": [ { "minServerVersion": "4.0", - "maxServerVersion": "4.8.99", "topology": [ "single", "replicaset" @@ -10,7 +9,6 @@ }, { "minServerVersion": "4.1.7", - "maxServerVersion": "4.8.99", "topology": [ "sharded" ] diff --git a/test/unified_format.py b/test/unified_format.py index 378fcc4759..459566d711 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -226,6 +226,7 @@ def __init__( self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add("configurefailpoint") + self.ignore_list_collections = False self._event_mapping = collections.defaultdict(list) self.entity_map = entity_map if store_events: @@ -256,7 +257,10 @@ def add_event(self, event): ) def _command_event(self, event): - if event.command_name.lower() not in self._ignore_commands: + if not ( + event.command_name.lower() in self._ignore_commands + or (self.ignore_list_collections and event.command_name == "listCollections") + ): self.add_event(event) def started(self, event): @@ -883,6 +887,17 @@ def _databaseOperation_listCollections(self, target, *args, **kwargs): cursor = target.list_collections(*args, **kwargs) return list(cursor) + def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + for listener in target.client.options.event_listeners: + if isinstance(listener, EventListenerUtil): + listener.ignore_list_collections = True + ret = target.create_collection(*args, **kwargs) + for listener in target.client.options.event_listeners: + if isinstance(listener, EventListenerUtil): + listener.ignore_list_collections = False + return ret + def __entityOperation_aggregate(self, target, *args, **kwargs): self.__raise_if_unsupported("aggregate", target, Database, Collection) return list(target.aggregate(*args, **kwargs)) diff --git a/test/versioned-api/crud-api-version-1-strict.json b/test/versioned-api/crud-api-version-1-strict.json index 29a0ec4e3b..c1c8ecce01 100644 --- a/test/versioned-api/crud-api-version-1-strict.json +++ b/test/versioned-api/crud-api-version-1-strict.json @@ -613,6 +613,15 @@ }, { "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], "operations": [ { "name": "estimatedDocumentCount", @@ -627,22 +636,7 @@ { "commandStartedEvent": { "command": { - "aggregate": "test", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ], + "count": "test", "apiVersion": "1", "apiStrict": true, "apiDeprecationErrors": { diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index 1f135eea18..a387d0587e 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -604,7 +604,16 @@ ] }, { - "description": "estimatedDocumentCount appends declared API version on 4.9.0 or greater", + "description": "estimatedDocumentCount appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "5.0.9", + "maxServerVersion": "5.0.99" + }, + { + "minServerVersion": "5.3.2" + } + ], "operations": [ { "name": "estimatedDocumentCount", @@ -619,22 +628,7 @@ { "commandStartedEvent": { "command": { - "aggregate": "test", - "pipeline": [ - { - "$collStats": { - "count": {} - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": "$count" - } - } - } - ], + "count": "test", "apiVersion": "1", "apiStrict": { "$$unsetOrMatches": false From ede07f44dd0adbe9a664c7a19392d79e3cbea9f0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 3 May 2022 14:41:24 -0700 Subject: [PATCH 0654/2111] PYTHON-3250 Speed up majority writes in test suite (#936) --- test/__init__.py | 6 ++++++ test/test_encryption.py | 31 +++++++++++++++---------------- test/test_retryable_reads.py | 21 ++++++++++++--------- test/unified_format.py | 24 +++++++++++++----------- test/utils_spec_runner.py | 21 ++++++++++++++------- 5 files changed, 60 insertions(+), 43 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 3800c7890e..64c812c112 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1029,10 +1029,15 @@ def tearDown(self): super(MockClientTest, self).tearDown() +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + def setup(): client_context.init() warnings.resetwarnings() warnings.simplefilter("always") + global_knobs.enable() def _get_executors(topology): @@ -1086,6 +1091,7 @@ def print_running_clients(): def teardown(): + global_knobs.disable() garbage = [] for g in gc.garbage: garbage.append("GARBAGE: %r" % (g,)) diff --git a/test/test_encryption.py b/test/test_encryption.py index ec854ff03a..366c406b03 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -24,7 +24,7 @@ import textwrap import traceback import uuid -from typing import Any +from typing import Any, Dict from pymongo.collection import Collection @@ -621,31 +621,30 @@ def maybe_skip_scenario(self, test): def setup_scenario(self, scenario_def): """Override a test's setup.""" key_vault_data = scenario_def["key_vault_data"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] if key_vault_data: - coll = client_context.client.get_database( - "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS - )["datakeys"] - coll.drop() + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w="majority"), codec_options=OPTS - ) + db = client_context.client.get_database(db_name, codec_options=OPTS) coll = db[coll_name] coll.drop() - json_schema = scenario_def["json_schema"] + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} if json_schema: - db.create_collection( - coll_name, validator={"$jsonSchema": json_schema}, codec_options=OPTS - ) - else: - db.create_collection(coll_name) + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + db.create_collection(coll_name, **kwargs) - if scenario_def["data"]: + if data: # Load data. - coll.insert_many(scenario_def["data"]) + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) def allowable_errors(self, op): """Override expected error classes.""" diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 01fe6901ae..2b8bc17c58 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -112,17 +112,20 @@ def get_scenario_coll_name(self, scenario_def): def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" if "bucket_name" in scenario_def: + data = scenario_def["data"] db_name = self.get_scenario_db_name(scenario_def) - db = client_context.client.get_database( - db_name, write_concern=WriteConcern(w="majority") - ) - # Create a bucket for the retryable reads GridFS tests. - client_context.client.drop_database(db_name) - if scenario_def["data"]: - data = scenario_def["data"] - # Load data. + db = client_context.client[db_name] + # Create a bucket for the retryable reads GridFS tests with as few + # majority writes as possible. + wc = WriteConcern(w="majority") + if data: + db["fs.chunks"].drop() + db["fs.files"].drop() db["fs.chunks"].insert_many(data["fs.chunks"]) - db["fs.files"].insert_many(data["fs.files"]) + db.get_collection("fs.files", write_concern=wc).insert_many(data["fs.files"]) + else: + db.get_collection("fs.chunks").drop() + db.get_collection("fs.files", write_concern=wc).drop() else: super(TestSpec, self).setup_scenario(scenario_def) diff --git a/test/unified_format.py b/test/unified_format.py index 459566d711..9edf499ece 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -721,22 +721,24 @@ def should_run_on(run_on_spec): return False def insert_initial_data(self, initial_data): - for collection_data in initial_data: + for i, collection_data in enumerate(initial_data): coll_name = collection_data["collectionName"] db_name = collection_data["databaseName"] documents = collection_data["documents"] - coll = self.client.get_database(db_name).get_collection( - coll_name, write_concern=WriteConcern(w="majority") - ) - coll.drop() - - if len(documents) > 0: - coll.insert_many(documents) + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) else: - # ensure collection exists - result = coll.insert_one({}) - coll.delete_one({"_id": result.inserted_id}) + # Ensure collection exists + db.create_collection(coll_name, write_concern=wc) @classmethod def setUpClass(cls): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4a71fef328..4ae4d1bfb4 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -453,13 +453,20 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database(db_name, write_concern=WriteConcern(w="majority")) - coll = db[coll_name] - coll.drop() - db.create_collection(coll_name) - if scenario_def["data"]: - # Load data. - coll.insert_many(scenario_def["data"]) + documents = scenario_def["data"] + + # Setup the collection with as few majority writes as possible. + db = client_context.client.get_database(db_name) + coll_exists = bool(db.list_collection_names(filter={"name": coll_name})) + if coll_exists: + db[coll_name].delete_many({}) + # Only use majority wc only on the final write. + wc = WriteConcern(w="majority") + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + elif not coll_exists: + # Ensure collection exists. + db.create_collection(coll_name, write_concern=wc) def run_scenario(self, scenario_def, test): self.maybe_skip_scenario(test) From 252ed1cef67663e125ed07eee92ef8e096cc2ad0 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 3 May 2022 14:49:18 -0700 Subject: [PATCH 0655/2111] PYTHON-3247 Mitigate user issues caused by change in directConnection defaults in 4.x (#935) --- doc/changelog.rst | 9 +++++++++ doc/migrate-to-pymongo4.rst | 24 ++++++++++++++++++++++++ pymongo/mongo_client.py | 5 +++++ 3 files changed, 38 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3d2f7cadc4..97795fdfb9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -120,6 +120,15 @@ Changes in Version 4.0 .. warning:: PyMongo 4.0 drops support for MongoDB 2.6, 3.0, 3.2, and 3.4. +.. warning:: PyMongo 4.0 changes the default value of the ``directConnection`` URI option and + keyword argument to :class:`~pymongo.mongo_client.MongoClient` + to ``False`` instead of ``None``, allowing for the automatic + discovery of replica sets. This means that if you + want a direct connection to a single server you must pass + ``directConnection=True`` as a URI option or keyword argument. + For more details, see the relevant section of the PyMongo 4.x migration + guide: :ref:`pymongo4-migration-direct-connection`. + PyMongo 4.0 brings a number of improvements as well as some backward breaking changes. For example, all APIs deprecated in PyMongo 3.X have been removed. Be sure to read the changes listed below and the :doc:`migrate-to-pymongo4` diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5f75ed1760..d70d7b8a2c 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -65,6 +65,8 @@ get the same behavior. MongoClient ----------- +.. _pymongo4-migration-direct-connection: + ``directConnection`` defaults to False ...................................... @@ -74,6 +76,28 @@ allowing for the automatic discovery of replica sets. This means that if you want a direct connection to a single server you must pass ``directConnection=True`` as a URI option or keyword argument. +If you see any :exc:`~pymongo.errors.ServerSelectionTimeoutError`'s after upgrading from PyMongo 3 to 4.x, you likely +need to add ``directConnection=True`` when creating the client. +Here are some example errors: + +.. code-block:: + + pymongo.errors.ServerSelectionTimeoutError: mongo_node2: [Errno 8] nodename nor servname + provided, or not known,mongo_node1:27017 + +.. code-block:: + + ServerSelectionTimeoutError: No servers match selector "Primary()", Timeout: 30s, + Topology Description: ... + + +Additionally, the "isWritablePrimary" attribute of a hello command sent back by the server will +always be True if ``directConnection=False``:: + + >>> client.admin.command('hello')['isWritablePrimary'] + True + + The waitQueueMultiple parameter is removed .......................................... diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5c7e7cb176..6601c18aca 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -137,6 +137,11 @@ def __init__( ) -> None: """Client for a MongoDB instance, a replica set, or a set of mongoses. + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + The client object is thread-safe and has connection-pooling built in. If an operation fails because of a network error, :class:`~pymongo.errors.ConnectionFailure` is raised and the client From 9a829acf2e5a009e21012d7b381a65e54b0a0c02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 May 2022 11:12:58 -0700 Subject: [PATCH 0656/2111] PYTHON-3251 Make extra whitespace visible in invalid port exception (#937) --- pymongo/uri_parser.py | 2 +- test/test_uri_parser.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index bfbf214bcb..cd18c067e7 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -134,7 +134,7 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %s" % (port,)) + raise ValueError("Port must be an integer between 0 and 65535: %r" % (port,)) port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 4fa288df44..2f81e3b512 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -147,6 +147,10 @@ def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) + # Extra whitespace should be visible in error message. + with self.assertRaisesRegex(ValueError, "'27017 '"): + parse_uri("mongodb://localhost:27017 ") + orig: dict = { "nodelist": [("localhost", 27017)], "username": None, From 502effeebabd7092897273dc3a972e9f31160ec3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 May 2022 10:52:53 -0700 Subject: [PATCH 0657/2111] PYTHON-3167 Fix mockupdb tests for estimated_document_count (#938) --- test/mockupdb/operations.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index efb9e5084e..90d7f27c39 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -14,7 +14,7 @@ from collections import namedtuple -from mockupdb import OpMsg, OpMsgReply, OpReply +from mockupdb import OpMsgReply, OpReply from pymongo import ReadPreference @@ -61,12 +61,19 @@ not_master=not_master_reply, ), Operation( - "count", + "count_documents", lambda client: client.db.collection.count_documents({}), reply={"n": 1}, op_type="may-use-secondary", not_master=not_master_reply, ), + Operation( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), Operation( "aggregate", lambda client: client.db.collection.aggregate([]), @@ -109,12 +116,4 @@ Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) -upgrades = [ - Upgrade( - "estimated_document_count", - lambda client: client.db.collection.estimated_document_count(), - old=OpMsg("count", "collection", namespace="db"), - new=OpMsg("aggregate", "collection", namespace="db"), - wire_version=12, - ), -] +upgrades = [] From 75685c006c9184230af131a09611aa3e6e1ac649 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 May 2022 16:13:41 -0700 Subject: [PATCH 0658/2111] PYTHON-3235 Drop support for Python 3.6 (#939) --- .evergreen/build-mac.sh | 2 +- .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 1 - .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 77 ++++++++++++-------------- .evergreen/run-mongodb-aws-ecs-test.sh | 28 +++------- .evergreen/run-tests.sh | 8 +-- .evergreen/utils.sh | 10 ++-- .github/workflows/test-python.yml | 2 +- CONTRIBUTING.rst | 2 +- README.rst | 2 +- doc/changelog.rst | 2 + doc/examples/tls.rst | 2 +- doc/faq.rst | 2 +- doc/installation.rst | 4 +- doc/python3.rst | 18 +++--- pymongo/pool.py | 4 +- pymongo/pyopenssl_context.py | 1 - pymongo/ssl_context.py | 6 -- pymongo/ssl_support.py | 2 +- setup.py | 7 +-- test/test_mypy.py | 3 +- test/utils.py | 15 ++--- 23 files changed, 86 insertions(+), 116 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 5671ae6c6f..09950a592f 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.6 3.7 3.8 3.9 3.10; do +for VERSION in 3.7 3.8 3.9 3.10; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 1b74fc68e1..4fd43a67a3 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp36|cp37|cp38|cp39|cp310) ]]; then + if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index a9a7238cb2..cac435fb11 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -34,7 +34,6 @@ ls dist # Check for any unexpected files. unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp36*' -or \ -iname '*cp37*' -or \ -iname '*cp38*' -or \ -iname '*cp39*' -or \ diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 3a33558cc9..09f5e7f0b4 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 36 37 38 39 310; do +for VERSION in 37 38 39 310; do _pythons=("C:/Python/Python${VERSION}/python.exe" \ "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a6d9375f26..2576307364 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -135,8 +135,8 @@ functions: # Coverage combine merges (and removes) all the coverage files and # generates a new .coverage file in the current directory. ls -la coverage/ - /opt/python/3.6/bin/python3 -m coverage combine coverage/coverage.* - /opt/python/3.6/bin/python3 -m coverage html -d htmlcov + /opt/python/3.7/bin/python3 -m coverage combine coverage/coverage.* + /opt/python/3.7/bin/python3 -m coverage html -d htmlcov # Upload the resulting html coverage report. - command: shell.exec params: @@ -932,7 +932,7 @@ functions: done # Build source distribution. cd src/ - /opt/python/3.6/bin/python3 setup.py sdist + /opt/python/3.7/bin/python3 setup.py sdist cp dist/* ../releases - command: archive.targz_pack params: @@ -1324,7 +1324,7 @@ tasks: commands: - func: "run tests" vars: - PYTHON_BINARY: /opt/python/3.6/bin/python3 + PYTHON_BINARY: /opt/python/3.7/bin/python3 - name: "atlas-connect" tags: ["atlas-connect"] @@ -1945,10 +1945,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "3.6" - display_name: "Python 3.6" - variables: - PYTHON_BINARY: "/opt/python/3.6/bin/python3" - id: "3.7" display_name: "Python 3.7" variables: @@ -1965,10 +1961,6 @@ axes: display_name: "Python 3.10" variables: PYTHON_BINARY: "/opt/python/3.10/bin/python3" - - id: "pypy3.6" - display_name: "PyPy 3.6" - variables: - PYTHON_BINARY: "/opt/python/pypy3.6/bin/pypy3" - id: "pypy3.7" display_name: "PyPy 3.7" variables: @@ -1977,6 +1969,10 @@ axes: display_name: "PyPy 3.8" variables: PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" + + - id: python-version-mac + display_name: "Python" + values: - id: "system-python3" display_name: "Python3" variables: @@ -1985,10 +1981,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "3.6" - display_name: "Python 3.6" - variables: - PYTHON_BINARY: "C:/python/Python36/python.exe" - id: "3.7" display_name: "Python 3.7" variables: @@ -2009,10 +2001,6 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "3.6" - display_name: "32-bit Python 3.6" - variables: - PYTHON_BINARY: "C:/python/32/Python36/python.exe" - id: "3.7" display_name: "32-bit Python 3.7" variables: @@ -2281,7 +2269,7 @@ buildvariants: # Only test "noauth" with Python 3.7. exclude_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.8", "3.9", "3.10", "pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2334,7 +2322,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" coverage: "*" @@ -2350,7 +2338,7 @@ buildvariants: exclude_spec: # These interpreters are always tested without extensions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" display_name: "${compression} ${c-extensions} ${python-version} ${platform}" @@ -2379,7 +2367,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: ubuntu-18.04 - python-version: ["pypy3.6", "pypy3.7", "pypy3.8", "system-python3"] + python-version: ["pypy3.7", "pypy3.8"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2405,7 +2393,7 @@ buildvariants: matrix_spec: platform: awslinux # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.6", "3.7", "3.8", "3.9", "pypy3.6", "pypy3.7", "pypy3.8"] + python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: @@ -2420,12 +2408,12 @@ buildvariants: display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions -# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.6. +# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: platform: ubuntu-18.04 storage-engine: "*" - python-version: 3.6 + python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: @@ -2452,12 +2440,12 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.6. +# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: platform: ubuntu-18.04 disableTestCommands: "*" - python-version: "3.6" + python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" tasks: - ".latest" @@ -2483,7 +2471,7 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ @@ -2498,7 +2486,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: platform: ubuntu-18.04 - python-version: 3.6 + python-version: 3.7 display_name: "MockupDB Tests" tasks: - name: "mockupdb" @@ -2543,11 +2531,6 @@ buildvariants: python-version: "*" auth-ssl: auth-ssl serverless: "*" - exclude_spec: - - platform: ubuntu-18.04 - python-version: ["system-python3"] - auth-ssl: auth-ssl - serverless: "*" display_name: "Serverless ${python-version} ${platform}" tasks: - "serverless_task_group" @@ -2555,7 +2538,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.10"] + python-version: ["3.7", "3.10"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2565,7 +2548,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: platform: ubuntu-18.04 - python-version: ["3.6", "3.10"] + python-version: ["3.7", "3.10"] auth: "auth" versionedApi: "*" display_name: "Versioned API ${versionedApi} ${python-version}" @@ -2580,7 +2563,7 @@ buildvariants: # OCSP stapling is not supported on Ubuntu 18.04. # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 - python-version: ["3.6", "3.10", "pypy3.6", "pypy3.8"] + python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2592,7 +2575,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows-64-vsMulti-small - python-version-windows: ["3.6", "3.10"] + python-version-windows: ["3.7", "3.10"] mongodb-version: ["4.4", "5.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2616,14 +2599,24 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: [ubuntu-18.04, macos-1014] - python-version: ["system-python3"] + platform: [ubuntu-18.04] + python-version: ["3.7"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" +- matrix_name: "aws-auth-test-mac" + matrix_spec: + platform: [macos-1014] + python-version-mac: ["system-python3"] + display_name: "MONGODB-AWS Auth ${platform} ${python-version-mac}" + tasks: + - name: "aws-auth-test-4.4" + - name: "aws-auth-test-5.0" + - name: "aws-auth-test-latest" + - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 3484f41f43..83f3975e9e 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -20,32 +20,18 @@ fi # Now we can safely enable xtrace set -o xtrace -if command -v virtualenv ; then - VIRTUALENV=$(command -v virtualenv) -else - if ! python3 -m pip --version ; then - echo "Installing pip..." - apt-get update - apt install python3-pip -y - fi - echo "Installing virtualenv..." - python3 -m pip install --user virtualenv - VIRTUALENV='python3 -m virtualenv' -fi +# Install python3.7 with pip. +apt-get update +apt install python3.7 python3-pip -y authtest () { echo "Running MONGODB-AWS ECS authentication tests with $PYTHON" $PYTHON --version - - $VIRTUALENV -p $PYTHON --never-download venvaws - . venvaws/bin/activate - + $PYTHON -m pip install --upgrade wheel setuptools pip cd src - python -m pip install '.[aws]' - python test/auth_aws/test_auth_aws.py + $PYTHON -m pip install '.[aws]' + $PYTHON test/auth_aws/test_auth_aws.py cd - - deactivate - rm -rf venvaws } -PYTHON=$(command -v python3) authtest +PYTHON="python3.7" authtest diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index ade267d2b1..4a48b4a33b 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -66,13 +66,13 @@ fi if [ -z "$PYTHON_BINARY" ]; then # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a - # system python3 doesn't exist or exists but is older than 3.6. - if is_python_36 "$(command -v python3)"; then + # system python3 doesn't exist or exists but is older than 3.7. + if is_python_37 "$(command -v python3)"; then PYTHON=$(command -v python3) - elif is_python_36 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + elif is_python_37 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then PYTHON=$(command -v /opt/mongodbtoolchain/v3/bin/python3) else - echo "Cannot test without python3.6+ installed!" + echo "Cannot test without python3.7+ installed!" fi elif [ "$COMPRESSORS" = "snappy" ]; then createvirtualenv $PYTHON_BINARY snappytest diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index b7f65104e8..67fa272683 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -59,15 +59,15 @@ testinstall () { fi } -# Function that returns success if the provided Python binary is version 3.6 or later +# Function that returns success if the provided Python binary is version 3.7 or later # Usage: -# is_python_36 /path/to/python +# is_python_37 /path/to/python # * param1: Python binary -is_python_36() { +is_python_37() { if [ -z "$1" ]; then return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 6))"; then - # runs when sys.version_info[:2] >= (3, 6) + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 7))"; then + # runs when sys.version_info[:2] >= (3, 7) return 0 else return 1 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 1eea4ff166..89d9830e82 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -23,7 +23,7 @@ jobs: strategy: matrix: os: [ubuntu-20.04] - python-version: ["3.6", "3.10", "pypy-3.8"] + python-version: ["3.7", "3.10", "pypy-3.8"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v2 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1a4423f3ef..b8bbad93f6 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,7 +19,7 @@ that might not be of interest or that has already been addressed. Supported Interpreters ---------------------- -PyMongo supports CPython 3.6+ and PyPy3.6+. Language +PyMongo supports CPython 3.7+ and PyPy3.7+. Language features not supported by all interpreters can not be used. Style Guide diff --git a/README.rst b/README.rst index c3c3757289..c301932643 100644 --- a/README.rst +++ b/README.rst @@ -88,7 +88,7 @@ is incompatible with PyMongo. Dependencies ============ -PyMongo supports CPython 3.6.2+ and PyPy3.6+. +PyMongo supports CPython 3.7+ and PyPy3.7+. Optional dependencies: diff --git a/doc/changelog.rst b/doc/changelog.rst index 97795fdfb9..7f002fb470 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,8 @@ Changelog Changes in Version 4.2 ---------------------- +.. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. + Bug fixes ......... diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 6dcb7a1759..557ee7d9b9 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -182,7 +182,7 @@ server's certificate:: This often occurs because OpenSSL does not have access to the system's root certificates or the certificates are out of date. Linux users should ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.6.2 or newer downloaded +their Linux vendor. macOS users using Python 3.7 or newer downloaded from python.org `may have to run a script included with python `_ to install root certificates:: diff --git a/doc/faq.rst b/doc/faq.rst index 06559ddb9b..ca83f5de4c 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -145,7 +145,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.6.2+ and PyPy3.6+. See the :doc:`python3` for details. +PyMongo supports CPython 3.7+ and PyPy3.7+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index 4f14b31125..788faf46cc 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.6.2+ and PyPy3.6+. +PyMongo supports CPython 3.7+ and PyPy3.7+. Optional dependencies: @@ -133,7 +133,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.6.2+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.7+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/python3.rst b/doc/python3.rst index c14224166a..812bc33b35 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -6,7 +6,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.6.2+ and PyPy3.6+. +PyMongo supports CPython 3.7+ and PyPy3.7+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- @@ -20,8 +20,8 @@ with subtype 0. For example, let's insert a :class:`bytes` instance using Python 3 then read it back. Notice the byte string is decoded back to :class:`bytes`:: - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pymongo >>> c = pymongo.MongoClient() @@ -49,8 +49,8 @@ decoded to :class:`~bson.binary.Binary` with subtype 0. For example, let's decode a JSON binary subtype 0 using Python 3. Notice the byte string is decoded to :class:`bytes`:: - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> from bson.json_util import loads >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') @@ -86,8 +86,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: >>> pickle.dumps(oid) 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') @@ -97,8 +97,8 @@ Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 you must use ``protocol <= 2``:: - Python 3.6.8 (v3.6.8:3c6b436a57, Dec 24 2018, 02:04:31) - [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin + Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) + [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pickle >>> from bson.objectid import ObjectId diff --git a/pymongo/pool.py b/pymongo/pool.py index 1aaae4067f..13d0e78d1e 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -357,9 +357,9 @@ def __init__( # { # 'driver': { # 'name': 'PyMongo|MyDriver', - # 'version': '3.7.0|1.2.3', + # 'version': '4.2.0|1.2.3', # }, - # 'platform': 'CPython 3.6.0|MyPlatform' + # 'platform': 'CPython 3.7.0|MyPlatform' # } if driver: if driver.name: diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index eae38daef8..3736a4f381 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -55,7 +55,6 @@ # Always available HAS_SNI = True -CHECK_HOSTNAME_SAFE = True IS_PYOPENSSL = True # Base Exception class diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 148bef936d..4e997a439e 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -15,7 +15,6 @@ """A fake SSLContext implementation.""" import ssl as _ssl -import sys as _sys # PROTOCOL_TLS_CLIENT is Python 3.6+ PROTOCOL_SSLv23 = getattr(_ssl, "PROTOCOL_TLS_CLIENT", _ssl.PROTOCOL_SSLv23) @@ -35,8 +34,3 @@ if hasattr(_ssl, "VERIFY_CRL_CHECK_LEAF"): from ssl import VERIFY_CRL_CHECK_LEAF # noqa: F401 -# Python 3.7 uses OpenSSL's hostname matching implementation -# making it the obvious version to start using SSLConext.check_hostname. -# Python 3.6 might have been a good version, but it suffers -# from https://bugs.python.org/issue32185. -CHECK_HOSTNAME_SAFE = _sys.version_info[:2] >= (3, 7) diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 06ef7ef185..6adf629ad3 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -55,7 +55,7 @@ def get_ssl_context( ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) # SSLContext.check_hostname was added in CPython 3.4. if hasattr(ctx, "check_hostname"): - if _ssl.CHECK_HOSTNAME_SAFE and verify_mode != CERT_NONE: + if verify_mode != CERT_NONE: ctx.check_hostname = not allow_invalid_hostnames else: ctx.check_hostname = False diff --git a/setup.py b/setup.py index 9e8e919e88..40fb484ad1 100755 --- a/setup.py +++ b/setup.py @@ -4,8 +4,8 @@ import sys import warnings -if sys.version_info[:3] < (3, 6, 2): - raise RuntimeError("Python version >= 3.6.2 required.") +if sys.version_info[:3] < (3, 7): + raise RuntimeError("Python version >= 3.7 required.") # Hack to silence atexit traceback in some Python versions @@ -321,7 +321,7 @@ def build_extension(self, ext): keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], install_requires=[], license="Apache License, Version 2.0", - python_requires=">=3.6.2", + python_requires=">=3.7", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", @@ -331,7 +331,6 @@ def build_extension(self, ext): "Operating System :: POSIX", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", diff --git a/test/test_mypy.py b/test/test_mypy.py index 12a6cffbe6..07af61ed36 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -23,7 +23,7 @@ try: from typing import TypedDict # type: ignore[attr-defined] - # Not available in Python 3.6 and Python 3.7 + # Not available in Python 3.7 class Movie(TypedDict): # type: ignore[misc] name: str year: int @@ -131,6 +131,7 @@ def test_list_databases(self) -> None: def test_default_document_type(self) -> None: client = rs_or_single_client() + self.addCleanup(client.close) coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) diff --git a/test/utils.py b/test/utils.py index 2c50797266..9e8d6448d9 100644 --- a/test/utils.py +++ b/test/utils.py @@ -875,17 +875,14 @@ def lazy_client_trial(reset, target, test, get_client): def gevent_monkey_patched(): """Check if gevent's monkey patching is active.""" - # In Python 3.6 importing gevent.socket raises an ImportWarning. - with warnings.catch_warnings(): - warnings.simplefilter("ignore", ImportWarning) - try: - import socket + try: + import socket - import gevent.socket + import gevent.socket - return socket.socket is gevent.socket.socket - except ImportError: - return False + return socket.socket is gevent.socket.socket + except ImportError: + return False def eventlet_monkey_patched(): From aa16f1c5feb41833e634a5d1e841854d31b32ba1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 6 May 2022 12:02:36 -0700 Subject: [PATCH 0659/2111] PYTHON-3242 Test against MongoDB 6.0 (#940) --- .evergreen/config.yml | 107 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 94 insertions(+), 13 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2576307364..97d13654c0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1233,6 +1233,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-6.0-standalone" + tags: ["6.0", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-6.0-replica_set" + tags: ["6.0", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-6.0-sharded_cluster" + tags: ["6.0", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-latest-standalone" tags: ["latest", "standalone"] commands: @@ -1694,6 +1721,22 @@ tasks: - func: "run aws auth test with aws EC2 credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-6.0" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "6.0" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" + - name: "aws-auth-test-latest" commands: - func: "bootstrap mongo-orchestration" @@ -1709,6 +1752,21 @@ tasks: - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-rapid" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "rapid" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws ECS auth test" - name: load-balancer-test commands: @@ -1930,6 +1988,10 @@ axes: display_name: "MongoDB 5.0" variables: VERSION: "5.0" + - id: "6.0" + display_name: "MongoDB 6.0" + variables: + VERSION: "6.0" - id: "latest" display_name: "MongoDB latest" variables: @@ -2159,9 +2221,8 @@ buildvariants: - awslinux auth-ssl: "*" display_name: "${platform} ${auth-ssl}" - tasks: &all-server-versions - - ".rapid" - - ".latest" + tasks: + - ".6.0" - ".5.0" - ".4.4" - ".4.2" @@ -2176,8 +2237,8 @@ buildvariants: auth-ssl: "*" encryption: "*" display_name: "Encryption ${platform} ${auth-ssl}" - tasks: &encryption-server-versions - - ".latest" + tasks: + - ".6.0" - ".5.0" - ".4.4" - ".4.2" @@ -2222,6 +2283,7 @@ buildvariants: display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".6.0" - ".5.0" - ".4.4" - ".4.2" @@ -2236,7 +2298,13 @@ buildvariants: ssl: "nossl" encryption: "*" display_name: "Encryption ${platform} ${auth} ${ssl}" - tasks: *encryption-server-versions + tasks: &encryption-server-versions + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Test one server version (4.2) with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2257,7 +2325,15 @@ buildvariants: auth-ssl: "*" coverage: "*" display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" - tasks: *all-server-versions + tasks: &all-server-versions + - ".rapid" + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" + - ".3.6" - matrix_name: "tests-pyopenssl" matrix_spec: @@ -2397,7 +2473,7 @@ buildvariants: auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: - - ".latest" + - ".5.0" - matrix_name: "tests-windows-encryption" matrix_spec: @@ -2564,7 +2640,7 @@ buildvariants: # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] - mongodb-version: ["4.4", "5.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" @@ -2576,7 +2652,7 @@ buildvariants: matrix_spec: platform: windows-64-vsMulti-small python-version-windows: ["3.7", "3.10"] - mongodb-version: ["4.4", "5.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" @@ -2588,7 +2664,7 @@ buildvariants: - matrix_name: "ocsp-test-macos" matrix_spec: platform: macos-1014 - mongodb-version: ["4.4", "5.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${mongodb-version}" @@ -2606,6 +2682,8 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-rapid" - matrix_name: "aws-auth-test-mac" matrix_spec: @@ -2616,7 +2694,8 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" - + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-rapid" - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] @@ -2626,11 +2705,13 @@ buildvariants: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - name: "aws-auth-test-latest" + - name: "aws-auth-test-6.0" + - name: "aws-auth-test-rapid" - matrix_name: "load-balancer" matrix_spec: platform: ubuntu-18.04 - mongodb-version: ["rapid", "latest"] + mongodb-version: ["rapid", "latest", "6.0"] auth-ssl: "*" python-version: "*" loadbalancer: "*" From 3e57bde2ee3a3f44180e7d388b8515366db1ffe8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 May 2022 13:53:30 -0700 Subject: [PATCH 0660/2111] PYTHON-3230 Migrate to newer zSeries, POWER8, and ARM platforms (#942) --- .evergreen/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 97d13654c0..3f8955f40e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1882,17 +1882,17 @@ axes: batchtime: 10080 # 7 days variables: python3_binary: python3 - - id: ubuntu1804-zseries - display_name: "Ubuntu 18.04 (zSeries)" - run_on: ubuntu1804-zseries-small + - id: rhel83-zseries + display_name: "RHEL 8.3 (zSeries)" + run_on: rhel83-zseries-small batchtime: 10080 # 7 days - - id: ubuntu1804-power8 - display_name: "Ubuntu 18.04 (POWER8)" - run_on: ubuntu1804-power8-small + - id: rhel81-power8 + display_name: "RHEL 8.1 (POWER8)" + run_on: rhel81-power8-small batchtime: 10080 # 7 days - - id: ubuntu1804-arm64 - display_name: "Ubuntu 18.04 (ARM64)" - run_on: ubuntu1804-arm64-small + - id: rhel82-arm64 + display_name: "RHEL 8.2 (ARM64)" + run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz @@ -2306,17 +2306,17 @@ buildvariants: - ".4.2" - ".4.0" -# Test one server version (4.2) with zSeries, POWER8, and ARM. +# Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" matrix_spec: platform: - - ubuntu1804-zseries # Ubuntu 18 or RHEL 8.x? - - ubuntu1804-power8 # Ubuntu 18 or RHEL 7? - - ubuntu1804-arm64 + - rhel83-zseries # Added in 5.0.8 (SERVER-44074) + - rhel81-power8 # Added in 4.2.7 (SERVER-44072) + - rhel82-arm64 # Added in 4.4.2 (SERVER-48282) auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: - - ".4.2" + - ".6.0" - matrix_name: "tests-python-version-ubuntu18-test-ssl" matrix_spec: From 21ead3a7e5ff5cdc6fd4cad6f92d5efbb0899757 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 6 May 2022 16:48:02 -0500 Subject: [PATCH 0661/2111] PYTHON-3189 Change Stream event document missing to field for rename events (#924) --- .../unified/change-streams.json | 322 +++++++++++++++++- test/utils.py | 2 + 2 files changed, 317 insertions(+), 7 deletions(-) diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 5fd2544ce0..8bc0c956cd 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -3,6 +3,7 @@ "schemaVersion": "1.0", "runOnRequirements": [ { + "minServerVersion": "3.6", "topologies": [ "replicaset", "sharded-replicaset" @@ -167,7 +168,6 @@ "description": "Test with document comment - pre 4.4", "runOnRequirements": [ { - "minServerVersion": "3.6.0", "maxServerVersion": "4.2.99" } ], @@ -211,11 +211,6 @@ }, { "description": "Test with string comment", - "runOnRequirements": [ - { - "minServerVersion": "3.6.0" - } - ], "operations": [ { "name": "createChangeStream", @@ -343,7 +338,6 @@ "description": "Test that comment is not set on getMore - pre 4.4", "runOnRequirements": [ { - "minServerVersion": "3.6.0", "maxServerVersion": "4.3.99", "topologies": [ "replicaset" @@ -426,6 +420,320 @@ ] } ] + }, + { + "description": "to field is set in a rename change event", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + } + ] + }, + { + "description": "Test unknown operationType MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "addedInFutureMongoDBVersion", + "ns": 1 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "addedInFutureMongoDBVersion", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ] + }, + { + "description": "Test newField added in response MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": 1, + "ns": 1, + "newField": "newFieldValue" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "newFieldValue" + } + } + ] + }, + { + "description": "Test new structure in ns document MUST NOT err", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "maxServerVersion": "5.2" + }, + { + "minServerVersion": "6.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns.viewOn": "db.coll" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test modified structure in ns document MUST NOT err", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "operationType": "insert", + "ns": { + "db": "$ns.db", + "coll": "$ns.coll", + "viewOn": "db.coll" + } + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0", + "viewOn": "db.coll" + } + } + } + ] + }, + { + "description": "Test server error on projecting out _id", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280, + "errorCodeName": "ChangeStreamFatalError", + "errorLabelsContain": [ + "NonResumableChangeStreamError" + ] + } + } + ] + }, + { + "description": "Test projection in change stream returns expected fields", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "optype": "$operationType", + "ns": 1, + "newField": "value" + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "optype": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "newField": "value" + } + } + ] } ] } diff --git a/test/utils.py b/test/utils.py index 9e8d6448d9..8a79c97d93 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1080,5 +1080,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["keys"] = list(arguments.pop(arg_name).items()) elif opname == "drop_index" and arg_name == "name": arguments["index_or_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "to": + arguments["new_name"] = arguments.pop(arg_name) else: arguments[c2s] = arguments.pop(arg_name) From cbab615231487d514e6c37eb8a853624610b961f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 May 2022 12:01:36 -0700 Subject: [PATCH 0662/2111] PYTHON-3065 Ignore SRV polling update when topology is discovered to be a replica set (#943) --- pymongo/topology.py | 2 ++ pymongo/topology_description.py | 1 + 2 files changed, 3 insertions(+) diff --git a/pymongo/topology.py b/pymongo/topology.py index 03e0d4ee17..1d4c9a86a8 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -358,6 +358,8 @@ def _process_srv_update(self, seedlist): Hold the lock when calling this. """ td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return self._description = _updated_topology_description_srv_polling(self._description, seedlist) self._update_servers() diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b3dd60680f..b32a86e2d7 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -477,6 +477,7 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): - `seedlist`: a list of new seeds new ServerDescription that resulted from a hello call """ + assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES # Create a copy of the server descriptions. sds = topology_description.server_descriptions() From a1c33e0b84743b26a8f44a5fd67a60304b0c92cd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 May 2022 15:37:48 -0700 Subject: [PATCH 0663/2111] PYTHON-3257 Fix "connection pool paused" errors in child after fork (#944) --- pymongo/pool.py | 4 ++-- pymongo/topology.py | 30 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 13d0e78d1e..e2f9698212 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1420,7 +1420,7 @@ def _get_socket(self): # See test.test_client:TestClient.test_fork for an example of # what could go wrong otherwise if self.pid != os.getpid(): - self.reset() + self.reset_without_pause() if self.closed: if self.enabled_for_cmap: @@ -1526,7 +1526,7 @@ def return_socket(self, sock_info): if self.enabled_for_cmap: listeners.publish_connection_checked_in(self.address, sock_info.id) if self.pid != os.getpid(): - self.reset() + self.reset_without_pause() else: if self.closed: sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) diff --git a/pymongo/topology.py b/pymongo/topology.py index 1d4c9a86a8..4b5ff87bb5 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -169,20 +169,24 @@ def open(self): forking. """ + pid = os.getpid() if self._pid is None: - self._pid = os.getpid() - else: - if os.getpid() != self._pid: - warnings.warn( - "MongoClient opened before fork. Create MongoClient only " - "after forking. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) - with self._lock: - # Reset the session pool to avoid duplicate sessions in - # the child process. - self._session_pool.reset() + self._pid = pid + elif pid != self._pid: + self._pid = pid + warnings.warn( + "MongoClient opened before fork. Create MongoClient only " + "after forking. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe" + ) + with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() with self._lock: self._ensure_opened() From a6241973385fbe59eb86d433a952e06d91f5ff79 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 May 2022 10:29:48 -0700 Subject: [PATCH 0664/2111] PYTHON-3260 Improve test_transaction_starts_with_batched_write and test_continuous_network_errors (#945) --- test/test_client.py | 7 +++---- test/test_transactions.py | 10 ++++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/test/test_client.py b/test/test_client.py index 59a8324d6e..3630cec06c 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1596,7 +1596,6 @@ def test_direct_connection(self): with self.assertRaises(ConfigurationError): MongoClient(["host1", "host2"], directConnection=True) - @unittest.skipIf(sys.platform.startswith("java"), "Jython does not support gc.get_objects") @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") def test_continuous_network_errors(self): def server_description_count(): @@ -1612,7 +1611,7 @@ def server_description_count(): gc.collect() with client_knobs(min_heartbeat_interval=0.003): client = MongoClient( - "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=100 + "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 ) initial_count = server_description_count() self.addCleanup(client.close) @@ -1622,8 +1621,8 @@ def server_description_count(): final_count = server_description_count() # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: - # AssertionError: 4 != 22 within 5 delta (18 difference) - self.assertAlmostEqual(initial_count, final_count, delta=10) + # AssertionError: 19 != 46 within 15 delta (27 difference) + self.assertAlmostEqual(initial_count, final_count, delta=15) @client_context.require_failCommand_fail_point def test_network_error_message(self): diff --git a/test/test_transactions.py b/test/test_transactions.py index 34dbbba34b..136a19baaa 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -30,6 +30,8 @@ ) from test.utils_spec_runner import SpecRunner +from bson import encode +from bson.raw_bson import RawBSONDocument from gridfs import GridFS, GridFSBucket from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions @@ -330,14 +332,14 @@ def test_transaction_starts_with_batched_write(self): listener.reset() self.addCleanup(client.close) self.addCleanup(coll.drop) - large_str = "\0" * (10 * 1024 * 1024) - ops = [InsertOne({"a": large_str}) for _ in range(10)] + large_str = "\0" * (1 * 1024 * 1024) + ops = [InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48)] with client.start_session() as session: with session.start_transaction(): coll.bulk_write(ops, session=session) # Assert commands were constructed properly. self.assertEqual( - ["insert", "insert", "insert", "commitTransaction"], listener.started_command_names() + ["insert", "insert", "commitTransaction"], listener.started_command_names() ) first_cmd = listener.results["started"][0].command self.assertTrue(first_cmd["startTransaction"]) @@ -347,7 +349,7 @@ def test_transaction_starts_with_batched_write(self): self.assertNotIn("startTransaction", event.command) self.assertEqual(lsid, event.command["lsid"]) self.assertEqual(txn_number, event.command["txnNumber"]) - self.assertEqual(10, coll.count_documents({})) + self.assertEqual(48, coll.count_documents({})) class PatchSessionTimeout(object): From a7579b02d24ad4af1b984317383bb8d68d1973fe Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 10 May 2022 11:05:59 -0700 Subject: [PATCH 0665/2111] PYTHON-3259 Improve migration guide for loads/JSONOptions/tz_aware (#946) --- bson/json_util.py | 5 +++++ doc/changelog.rst | 6 +++--- doc/migrate-to-pymongo4.rst | 24 +++++++++++++++++++++--- 3 files changed, 29 insertions(+), 6 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index 99dbc62609..369c3d5f4a 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -438,6 +438,11 @@ def loads(s: str, *args: Any, **kwargs: Any) -> Any: decoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. + .. versionchanged:: 4.0 + Now loads :class:`datetime.datetime` instances as naive by default. To + load timezone aware instances utilize the `json_options` parameter. + See :ref:`tz_aware_default_change` for an example. + .. versionchanged:: 3.5 Parses Relaxed and Canonical Extended JSON as well as PyMongo's legacy format. Now raises ``TypeError`` or ``ValueError`` when parsing JSON diff --git a/doc/changelog.rst b/doc/changelog.rst index 7f002fb470..5538467d0c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -256,9 +256,9 @@ Breaking Changes in 4.0 :class:`~bson.dbref.DBRef`. - The "tls" install extra is no longer necessary or supported and will be ignored by pip. -- ``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, - now defaults to ``False`` instead of ``True``. ``json_util.loads`` now - decodes datetime as naive by default. +- The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` + now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` now + decodes datetime as naive by default. See :ref:`tz_aware_default_change` for more info. - ``directConnection`` URI option and keyword argument to :class:`~pymongo.mongo_client.MongoClient` defaults to ``False`` instead of ``None``, allowing for the automatic discovery of replica sets. This means that if you diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index d70d7b8a2c..eca479c7c7 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -253,12 +253,30 @@ can be changed to this:: client.options.pool_options.min_pool_size client.options.pool_options.max_idle_time_seconds +.. _tz_aware_default_change: + ``tz_aware`` defaults to ``False`` .................................. -``tz_aware``, an argument for :class:`~bson.json_util.JSONOptions`, -now defaults to ``False`` instead of ``True``. ``json_util.loads`` now -decodes datetime as naive by default. +The ``tz_aware`` argument to :class:`~bson.json_util.JSONOptions` +now defaults to ``False`` instead of ``True``. :meth:`bson.json_util.loads` +now decodes datetime as naive by default:: + + >>> from bson import json_util + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s) + {'dt': datetime.datetime(2022, 5, 9, 17, 54)} + +To retain the PyMongo 3 behavior set ``tz_aware=True``, for example:: + + >>> from bson import json_util + >>> opts = json_util.JSONOptions(tz_aware=True) + >>> s = '{"dt": {"$date": "2022-05-09T17:54:00Z"}}' + >>> json_util.loads(s, json_options=opts) + {'dt': datetime.datetime(2022, 5, 9, 17, 54, tzinfo=)} + +This change was made to match the default behavior of +:class:`~bson.codec_options.CodecOptions` and :class:`bson.decode`. MongoClient cannot execute operations after ``close()`` ....................................................... From e02eb287e896ff0e301e67648ba9398f6fe5d799 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 12 May 2022 15:42:06 -0500 Subject: [PATCH 0666/2111] PYTHON-3254 Bump maxWireVersion for MongoDB 6.0 (#948) --- pymongo/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/common.py b/pymongo/common.py index 5a6ffbd369..552faf94a2 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 15 +MAX_SUPPORTED_WIRE_VERSION = 17 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 From 89d3fd035519055c6fac963bd3047f661b1c9219 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 23 May 2022 12:44:44 -0700 Subject: [PATCH 0667/2111] PYTHON-3279 Don't link check flakey wiki.centos.org (#951) --- doc/changelog.rst | 4 ++-- doc/conf.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5538467d0c..f1085c4bff 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -755,8 +755,8 @@ Changes in Version 3.8.0 .. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install Python 2.7 or newer from `Red Hat Software Collections - `_. CentOS 6 users should install Python - 2.7 or newer from `SCL + `_. + CentOS 6 users should install Python 2.7 or newer from `SCL `_ .. warning:: PyMongo no longer supports PyPy3 versions older than 3.5. Users diff --git a/doc/conf.py b/doc/conf.py index a5c5be2694..7b1580de32 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -84,7 +84,8 @@ # The anchors on the rendered markdown page are created after the fact, # so this link results in a 404. linkcheck_ignore = [ - "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check" + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + r"https://wiki.centos.org/[\w/]*", ] # -- Options for extensions ---------------------------------------------------- From 9f191d6bb35ae3252dec267256c45694cd373685 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 25 May 2022 05:55:36 -0500 Subject: [PATCH 0668/2111] PYTHON-3283 Remove Generic Typing from the ClientSession Class (#952) --- pymongo/client_session.py | 12 +++++------- pymongo/mongo_client.py | 4 ++-- test/test_mypy.py | 9 +++++++++ 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index a0c269cb8d..7d70eb8f19 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -140,7 +140,6 @@ Any, Callable, ContextManager, - Generic, Mapping, NoReturn, Optional, @@ -164,7 +163,6 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE -from pymongo.typings import _DocumentType from pymongo.write_concern import WriteConcern @@ -461,7 +459,7 @@ def _within_time_limit(start_time): from pymongo.mongo_client import MongoClient -class ClientSession(Generic[_DocumentType]): +class ClientSession: """A session for ordering sequential operations. :class:`ClientSession` instances are **not thread-safe or fork-safe**. @@ -476,13 +474,13 @@ class ClientSession(Generic[_DocumentType]): def __init__( self, - client: "MongoClient[_DocumentType]", + client: "MongoClient", server_session: Any, options: SessionOptions, implicit: bool, ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client: MongoClient[_DocumentType] = client + self._client: MongoClient = client self._server_session = server_session self._options = options self._cluster_time = None @@ -515,14 +513,14 @@ def _check_ended(self): if self._server_session is None: raise InvalidOperation("Cannot use ended session") - def __enter__(self) -> "ClientSession[_DocumentType]": + def __enter__(self) -> "ClientSession": return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self) -> "MongoClient[_DocumentType]": + def client(self) -> "MongoClient": """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6601c18aca..e1aa80e2f9 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1630,7 +1630,7 @@ def start_session( causal_consistency: Optional[bool] = None, default_transaction_options: Optional[client_session.TransactionOptions] = None, snapshot: Optional[bool] = False, - ) -> client_session.ClientSession[_DocumentType]: + ) -> client_session.ClientSession: """Start a logical session. This method takes the same parameters as @@ -1681,7 +1681,7 @@ def _ensure_session(self, session=None): @contextlib.contextmanager def _tmp_session( self, session: Optional[client_session.ClientSession], close: bool = True - ) -> "Generator[Optional[client_session.ClientSession[Any]], None, None]": + ) -> "Generator[Optional[client_session.ClientSession], None, None]": """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.ClientSession): diff --git a/test/test_mypy.py b/test/test_mypy.py index 07af61ed36..dfdcefbdb3 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -43,6 +43,7 @@ class Movie(TypedDict): # type: ignore[misc] from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON +from pymongo import ASCENDING from pymongo.collection import Collection from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne @@ -313,6 +314,14 @@ def test_son_document_type(self) -> None: def test_son_document_type_runtime(self) -> None: client = MongoClient(document_class=SON[str, Any], connect=False) + @only_type_check + def test_create_index(self) -> None: + client: MongoClient[Dict[str, str]] = MongoClient("test") + db = client.test + with client.start_session() as session: + index = db.test.create_index([("user_id", ASCENDING)], unique=True, session=session) + assert isinstance(index, str) + class TestCommandDocumentType(unittest.TestCase): @only_type_check From 78476d0217289e5a3fafb5c599a8a88558d87d92 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 26 May 2022 15:14:59 -0700 Subject: [PATCH 0669/2111] PYTHON-3187 Avoid tight poll() loop on pyopenssl connections (#953) --- pymongo/pool.py | 33 +++++++-------------------------- pymongo/pyopenssl_context.py | 15 +++++++++++++-- pymongo/ssl_support.py | 14 +++++--------- test/test_encryption.py | 10 +++------- test/test_ssl.py | 7 +------ 5 files changed, 29 insertions(+), 50 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index e2f9698212..d68ba238f2 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -15,7 +15,6 @@ import collections import contextlib import copy -import ipaddress import os import platform import socket @@ -61,20 +60,7 @@ from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import HAS_SNI as _HAVE_SNI -from pymongo.ssl_support import IPADDR_SAFE as _IPADDR_SAFE -from pymongo.ssl_support import SSLError as _SSLError - - -# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are -# not permitted for SNI hostname. -def is_ip_address(address): - try: - ipaddress.ip_address(address) - return True - except (ValueError, UnicodeError): # noqa: B014 - return False - +from pymongo.ssl_support import HAS_SNI, SSLError try: from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl @@ -263,7 +249,7 @@ def _raise_connection_failure( msg = msg_prefix + msg if isinstance(error, socket.timeout): raise NetworkTimeout(msg) from error - elif isinstance(error, _SSLError) and "timed out" in str(error): + elif isinstance(error, SSLError) and "timed out" in str(error): # Eventlet does not distinguish TLS network timeouts from other # SSLErrors (https://github.com/eventlet/eventlet/issues/692). # Luckily, we can work around this limitation because the phrase @@ -924,7 +910,7 @@ def _raise_connection_failure(self, error): reason = ConnectionClosedReason.ERROR self.close_socket(reason) # SSLError from PyOpenSSL inherits directly from Exception. - if isinstance(error, (IOError, OSError, _SSLError)): + if isinstance(error, (IOError, OSError, SSLError)): _raise_connection_failure(self.address, error) else: raise @@ -1024,14 +1010,9 @@ def _configured_socket(address, options): if ssl_context is not None: host = address[0] try: - # According to RFC6066, section 3, IPv4 and IPv6 literals are - # not permitted for SNI hostname. - # Previous to Python 3.7 wrap_socket would blindly pass - # IP addresses as SNI hostname. - # https://bugs.python.org/issue32185 # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. - if _HAVE_SNI and (not is_ip_address(host) or _IPADDR_SAFE): + if HAS_SNI: sock = ssl_context.wrap_socket(sock, server_hostname=host) else: sock = ssl_context.wrap_socket(sock) @@ -1040,7 +1021,7 @@ def _configured_socket(address, options): # Raise _CertificateError directly like we do after match_hostname # below. raise - except (IOError, OSError, _SSLError) as exc: # noqa: B014 + except (IOError, OSError, SSLError) as exc: # noqa: B014 sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1048,7 +1029,7 @@ def _configured_socket(address, options): _raise_connection_failure(address, exc, "SSL handshake failed: ") if ( ssl_context.verify_mode - and not getattr(ssl_context, "check_hostname", False) + and not ssl_context.check_hostname and not options.tls_allow_invalid_hostnames ): try: @@ -1336,7 +1317,7 @@ def connect(self): self.address, conn_id, ConnectionClosedReason.ERROR ) - if isinstance(error, (IOError, OSError, _SSLError)): + if isinstance(error, (IOError, OSError, SSLError)): _raise_connection_failure(self.address, error) raise diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 3736a4f381..1a57ff4f2b 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -70,6 +70,8 @@ _REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) +# For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are +# not permitted for SNI hostname. def _is_ip_address(address): try: _ip_address(address) @@ -104,8 +106,17 @@ def _call(self, call, *args, **kwargs): while True: try: return call(*args, **kwargs) - except _RETRY_ERRORS: - self.socket_checker.select(self, True, True, timeout) + except _RETRY_ERRORS as exc: + if isinstance(exc, _SSL.WantReadError): + want_read = True + want_write = False + elif isinstance(exc, _SSL.WantWriteError): + want_read = False + want_write = True + else: + want_read = True + want_write = True + self.socket_checker.select(self, want_read, want_write, timeout) if timeout and _time.monotonic() - start > timeout: raise _socket.timeout("timed out") continue diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 6adf629ad3..d1381ce0e4 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -14,8 +14,6 @@ """Support for SSL in PyMongo.""" -import sys - from pymongo.errors import ConfigurationError HAVE_SSL = True @@ -38,7 +36,7 @@ from ssl import CERT_NONE, CERT_REQUIRED HAS_SNI = _ssl.HAS_SNI - IPADDR_SAFE = _ssl.IS_PYOPENSSL or sys.version_info[:2] >= (3, 7) + IPADDR_SAFE = True SSLError = _ssl.SSLError def get_ssl_context( @@ -53,12 +51,10 @@ def get_ssl_context( """Create and return an SSLContext object.""" verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) - # SSLContext.check_hostname was added in CPython 3.4. - if hasattr(ctx, "check_hostname"): - if verify_mode != CERT_NONE: - ctx.check_hostname = not allow_invalid_hostnames - else: - ctx.check_hostname = False + if verify_mode != CERT_NONE: + ctx.check_hostname = not allow_invalid_hostnames + else: + ctx.check_hostname = False if hasattr(ctx, "check_ocsp_endpoint"): ctx.check_ocsp_endpoint = not disable_ocsp_endpoint_check if hasattr(ctx, "options"): diff --git a/test/test_encryption.py b/test/test_encryption.py index 366c406b03..c0d278d577 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -145,13 +145,10 @@ def test_init_kms_tls_options(self): self.assertEqual(opts._kms_ssl_contexts, {}) opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) ctx = opts._kms_ssl_contexts["kmip"] - # On < 3.7 we check hostnames manually. - if sys.version_info[:2] >= (3, 7): - self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx = opts._kms_ssl_contexts["aws"] - if sys.version_info[:2] >= (3, 7): - self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( {}, @@ -159,8 +156,7 @@ def test_init_kms_tls_options(self): kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, ) ctx = opts._kms_ssl_contexts["kmip"] - if sys.version_info[:2] >= (3, 7): - self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) diff --git a/test/test_ssl.py b/test/test_ssl.py index 0c45275fac..9b58c2251b 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -65,8 +65,6 @@ CRL_PEM = os.path.join(CERT_PATH, "crl.pem") MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" -_PY37PLUS = sys.version_info[:2] >= (3, 7) - # To fully test this start a mongod instance (built with SSL support) like so: # mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ # --sslPEMKeyFile /path/to/pymongo/test/certificates/server.pem \ @@ -306,10 +304,7 @@ def test_cert_ssl_validation_hostname_matching(self): ctx = get_ssl_context(None, None, None, None, False, True, False) self.assertFalse(ctx.check_hostname) ctx = get_ssl_context(None, None, None, None, False, False, False) - if _PY37PLUS or _HAVE_PYOPENSSL: - self.assertTrue(ctx.check_hostname) - else: - self.assertFalse(ctx.check_hostname) + self.assertTrue(ctx.check_hostname) response = self.client.admin.command(HelloCompat.LEGACY_CMD) From f4fc742ff38110aa41f2a46b267319403d89f3b1 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 27 May 2022 12:34:22 -0700 Subject: [PATCH 0670/2111] PYTHON-3276 [pymongo] FLE 1.0 shared library (#947) --- .evergreen/config.yml | 71 +++++++++++++++++++++++++---------- .evergreen/run-tests.sh | 13 ++++++- pymongo/encryption.py | 9 ++++- pymongo/encryption_options.py | 13 ++++++- test/test_encryption.py | 16 ++++++++ 5 files changed, 99 insertions(+), 23 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3f8955f40e..c12d4167b7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -450,6 +450,9 @@ functions: export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi + if [ -n "${test_csfle}" ]; then + export TEST_CSFLE=1 + fi if [ -n "${test_pyopenssl}" ]; then export TEST_PYOPENSSL=1 fi @@ -1232,7 +1235,6 @@ tasks: VERSION: "5.0" TOPOLOGY: "sharded_cluster" - func: "run tests" - - name: "test-6.0-standalone" tags: ["6.0", "standalone"] commands: @@ -2161,6 +2163,14 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days + - id: "encryption_with_csfle" + display_name: "Encryption with CSFLE" + tags: ["encryption_tag", "csfle"] + variables: + test_encryption: true + test_csfle: true + batchtime: 10080 # 7 days + # Run pyopenssl tests? - id: pyopenssl @@ -2229,21 +2239,6 @@ buildvariants: - ".4.0" - ".3.6" -- matrix_name: "tests-all-encryption" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux - auth-ssl: "*" - encryption: "*" - display_name: "Encryption ${platform} ${auth-ssl}" - tasks: - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - matrix_name: "tests-archlinux" matrix_spec: platform: @@ -2297,14 +2292,27 @@ buildvariants: auth: "auth" ssl: "nossl" encryption: "*" - display_name: "Encryption ${platform} ${auth} ${ssl}" + display_name: "${encryption} ${platform} ${auth} ${ssl}" tasks: &encryption-server-versions + - ".rapid" - ".latest" - ".6.0" - ".5.0" - ".4.4" - ".4.2" - ".4.0" + rules: &encryption-exclude-rules + - if: + platform: "*" + auth: "*" + ssl: "*" + encryption: [ "encryption_with_csfle" ] + then: + remove_tasks: + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2385,8 +2393,21 @@ buildvariants: # dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config # coverage: "*" encryption: "*" - display_name: "Encryption ${python-version} ${platform} ${auth-ssl}" + display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions + rules: + - if: + platform: "*" + python-version: "*" + auth-ssl: "*" + encryption: [ "encryption_with_csfle" ] + then: + remove_tasks: + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" + - matrix_name: "tests-python-version-ubuntu18-without-c-extensions" matrix_spec: @@ -2481,8 +2502,20 @@ buildvariants: python-version-windows: "*" auth-ssl: "*" encryption: "*" - display_name: "Encryption ${platform} ${python-version-windows} ${auth-ssl}" + display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions + rules: + - if: + platform: "*" + python-version-windows: "*" + auth-ssl: "*" + encryption: [ "encryption_with_csfle" ] + then: + remove_tasks: + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4a48b4a33b..96f42fa517 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -11,6 +11,7 @@ set -o errexit # Exit the script with error if any of the commands fail # COVERAGE If non-empty, run the test suite with coverage. # TEST_ENCRYPTION If non-empty, install pymongocrypt. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# TEST_CSFLE If non-empty, install CSFLE if [ -n "${SET_XTRACE_ON}" ]; then set -o xtrace @@ -27,6 +28,7 @@ COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} +TEST_CSFLE=${TEST_CSFLE:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} DATA_LAKE=${DATA_LAKE:-} @@ -153,7 +155,16 @@ if [ -z "$DATA_LAKE" ]; then else TEST_ARGS="-s test.test_data_lake" fi - +if [ -z $TEST_CSFLE ]; then + echo "CSFLE not being tested" +else + $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component csfle \ + --version latest --out ../csfle/ + export DYLD_FALLBACK_LIBRARY_PATH=../csfle/lib/:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=../csfle/lib:$LD_LIBRARY_PATH + export PATH=../csfle/bin:$PATH + TEST_ARGS="-s test.test_encryption" +fi # Don't download unittest-xml-reporting from pypi, which often fails. if $PYTHON -c "import xmlrunner"; then # The xunit output dir must be a Python style absolute path. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1e06f7062d..1a29131890 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -296,7 +296,14 @@ def _get_internal_client(encrypter, mongo_client): io_callbacks = _EncryptionIO(metadata_client, key_vault_coll, mongocryptd_client, opts) self._auto_encrypter = AutoEncrypter( - io_callbacks, MongoCryptOptions(opts._kms_providers, schema_map) + io_callbacks, + MongoCryptOptions( + opts._kms_providers, + schema_map, + csfle_path=opts._csfle_path, + csfle_required=opts._csfle_required, + bypass_encryption=opts._bypass_auto_encryption, + ), ) self._closed = False diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 2ac12bc4b4..0ce828ae4c 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -39,12 +39,14 @@ def __init__( key_vault_namespace: str, key_vault_client: Optional["MongoClient"] = None, schema_map: Optional[Mapping[str, Any]] = None, - bypass_auto_encryption: Optional[bool] = False, + bypass_auto_encryption: bool = False, mongocryptd_uri: str = "mongodb://localhost:27020", mongocryptd_bypass_spawn: bool = False, mongocryptd_spawn_path: str = "mongocryptd", mongocryptd_spawn_args: Optional[List[str]] = None, kms_tls_options: Optional[Mapping[str, Any]] = None, + csfle_path: Optional[str] = None, + csfle_required: bool = False, ) -> None: """Options to configure automatic client-side field level encryption. @@ -140,6 +142,12 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + - `csfle_path` (optional): Override the path to load the CSFLE library. + - `csfle_required` (optional): If 'true', refuse to continue encryption without a CSFLE + library + + .. versionchanged:: 4.2 + Added `csfle_path` and `csfle_required` parameters .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -152,7 +160,8 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) - + self._csfle_path = csfle_path + self._csfle_required = csfle_required self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client diff --git a/test/test_encryption.py b/test/test_encryption.py index c0d278d577..fc9d4eec3b 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -82,6 +82,18 @@ def get_client_opts(client): class TestAutoEncryptionOpts(PyMongoTestCase): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CSFLE"), "csfle is not installed") + def test_csfle(self): + # Test that we can pick up csfle automatically + client = MongoClient( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", csfle_required=True + ), + connect=False, + ) + self.addCleanup(client.close) + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): with self.assertRaises(ConfigurationError): @@ -1749,6 +1761,10 @@ def test_case_8(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CSFLE"), + "this prose test does not work when CSFLE is on a system dynamic library search path.", + ) def test_mongocryptd_bypass_spawn(self): # Lower the mongocryptd timeout to reduce the test run time. self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS From 62a630218179a77e662630a6799d7c3f267459c0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 1 Jun 2022 18:26:52 -0500 Subject: [PATCH 0671/2111] PYTHON-2683 Convert change stream spec tests to unified test format (#950) --- .../legacy/change-streams-errors.json | 153 -- .../change-streams-resume-allowlist.json | 1750 ------------ .../change-streams-resume-errorLabels.json | 1652 ------------ .../change_streams/legacy/change-streams.json | 795 ------ .../unified/change-streams-errors.json | 246 ++ .../change-streams-resume-allowlist.json | 2348 +++++++++++++++++ .../change-streams-resume-errorLabels.json | 2125 +++++++++++++++ .../unified/change-streams.json | 1101 +++++++- test/test_change_stream.py | 128 +- test/test_unified_format.py | 6 + ...ctedEventsForClient-ignoreExtraEvents.json | 151 ++ .../valid-pass/poc-change-streams.json | 43 +- test/unified_format.py | 27 +- 13 files changed, 6039 insertions(+), 4486 deletions(-) delete mode 100644 test/change_streams/legacy/change-streams-errors.json delete mode 100644 test/change_streams/legacy/change-streams-resume-allowlist.json delete mode 100644 test/change_streams/legacy/change-streams-resume-errorLabels.json delete mode 100644 test/change_streams/legacy/change-streams.json create mode 100644 test/change_streams/unified/change-streams-errors.json create mode 100644 test/change_streams/unified/change-streams-resume-allowlist.json create mode 100644 test/change_streams/unified/change-streams-resume-errorLabels.json create mode 100644 test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json diff --git a/test/change_streams/legacy/change-streams-errors.json b/test/change_streams/legacy/change-streams-errors.json deleted file mode 100644 index 7b3fa80689..0000000000 --- a/test/change_streams/legacy/change-streams-errors.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "collection2_name": "test2", - "database2_name": "change-stream-tests-2", - "tests": [ - { - "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "single" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [], - "expectations": null, - "result": { - "error": { - "code": 40573 - } - } - }, - { - "description": "Change Stream should error when an invalid aggregation stage is passed in", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [ - { - "$unsupported": "foo" - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - }, - { - "$unsupported": "foo" - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "error": { - "code": 40324 - } - } - }, - { - "description": "Change Stream should error when _id is projected out", - "minServerVersion": "4.1.11", - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [ - { - "$project": { - "_id": 0 - } - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "result": { - "error": { - "code": 280 - } - } - }, - { - "description": "change stream errors on ElectionInProgress", - "minServerVersion": "4.2", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 216, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "result": { - "error": { - "code": 216 - } - } - } - ] -} diff --git a/test/change_streams/legacy/change-streams-resume-allowlist.json b/test/change_streams/legacy/change-streams-resume-allowlist.json deleted file mode 100644 index baffc8fba9..0000000000 --- a/test/change_streams/legacy/change-streams-resume-allowlist.json +++ /dev/null @@ -1,1750 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "tests": [ - { - "description": "change stream resumes after a network error", - "minServerVersion": "4.2", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "closeConnection": true - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after HostUnreachable", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 6, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after HostNotFound", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 7, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NetworkTimeout", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 89, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ShutdownInProgress", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 91, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after PrimarySteppedDown", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 189, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ExceededTimeLimit", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 262, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after SocketException", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 9001, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotWritablePrimary", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 10107, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedAtShutdown", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 11600, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedDueToReplStateChange", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 11602, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryNoSecondaryOk", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 13435, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryOrSecondary", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 13436, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleShardVersion", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 63, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleEpoch", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 150, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after RetryChangeStream", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 234, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after FailedToSatisfyReadPreference", - "minServerVersion": "4.2", - "maxServerVersion": "4.2.99", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 133, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after CursorNotFound", - "minServerVersion": "4.2", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 43, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - } - ] -} diff --git a/test/change_streams/legacy/change-streams-resume-errorLabels.json b/test/change_streams/legacy/change-streams-resume-errorLabels.json deleted file mode 100644 index 2bac61d3b1..0000000000 --- a/test/change_streams/legacy/change-streams-resume-errorLabels.json +++ /dev/null @@ -1,1652 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "tests": [ - { - "description": "change stream resumes after HostUnreachable", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 6, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after HostNotFound", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 7, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NetworkTimeout", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 89, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ShutdownInProgress", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 91, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after PrimarySteppedDown", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 189, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after ExceededTimeLimit", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 262, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after SocketException", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 9001, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotWritablePrimary", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 10107, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedAtShutdown", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 11600, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after InterruptedDueToReplStateChange", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 11602, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryNoSecondaryOk", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 13435, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after NotPrimaryOrSecondary", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 13436, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleShardVersion", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 63, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after StaleEpoch", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 150, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after RetryChangeStream", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 234, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes after FailedToSatisfyReadPreference", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failGetMoreAfterCursorCheckout", - "mode": { - "times": 1 - }, - "data": { - "errorCode": 133, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream resumes if error contains ResumableChangeStreamError", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 50, - "closeConnection": false, - "errorLabels": [ - "ResumableChangeStreamError" - ] - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": 42, - "collection": "test" - }, - "command_name": "getMore", - "database_name": "change-stream-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "change stream does not resume if error does not contain ResumableChangeStreamError", - "minServerVersion": "4.3.1", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "getMore" - ], - "errorCode": 6, - "closeConnection": false - } - }, - "target": "collection", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "result": { - "error": { - "code": 6 - } - } - } - ] -} diff --git a/test/change_streams/legacy/change-streams.json b/test/change_streams/legacy/change-streams.json deleted file mode 100644 index 54b76af0a3..0000000000 --- a/test/change_streams/legacy/change-streams.json +++ /dev/null @@ -1,795 +0,0 @@ -{ - "collection_name": "test", - "database_name": "change-stream-tests", - "collection2_name": "test2", - "database2_name": "change-stream-tests-2", - "tests": [ - { - "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "The server returns change stream responses in the specified server response format", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectations": null, - "result": { - "success": [ - { - "_id": "42", - "documentKey": "42", - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Change Stream should allow valid aggregate pipeline stages", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [ - { - "$match": { - "fullDocument.z": 3 - } - } - ], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - }, - { - "$match": { - "fullDocument.z": { - "$numberInt": "3" - } - } - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", - "minServerVersion": "3.8.0", - "target": "database", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": { - "$numberInt": "1" - }, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test2" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", - "minServerVersion": "3.8.0", - "target": "client", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test2", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests-2", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "y": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "z": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": { - "$numberInt": "1" - }, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "command_name": "aggregate", - "database_name": "admin" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test2" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests-2", - "coll": "test" - }, - "fullDocument": { - "y": { - "$numberInt": "2" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "z": { - "$numberInt": "3" - } - } - } - ] - } - }, - { - "description": "Test insert, update, replace, and delete event types", - "minServerVersion": "3.6.0", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "updateOne", - "arguments": { - "filter": { - "x": 1 - }, - "update": { - "$set": { - "x": 2 - } - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "replaceOne", - "arguments": { - "filter": { - "x": 2 - }, - "replacement": { - "x": 3 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "deleteOne", - "arguments": { - "filter": { - "x": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "update", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "updateDescription": { - "updatedFields": { - "x": { - "$numberInt": "2" - } - } - } - }, - { - "operationType": "replace", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "3" - } - } - }, - { - "operationType": "delete", - "ns": { - "db": "change-stream-tests", - "coll": "test" - } - } - ] - } - }, - { - "description": "Test rename and invalidate event types", - "minServerVersion": "4.0.1", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "rename", - "arguments": { - "to": "test2" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "rename", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "to": { - "db": "change-stream-tests", - "coll": "test2" - } - }, - { - "operationType": "invalidate" - } - ] - } - }, - { - "description": "Test drop and invalidate event types", - "minServerVersion": "4.0.1", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": {}, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "drop" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "drop", - "ns": { - "db": "change-stream-tests", - "coll": "test" - } - }, - { - "operationType": "invalidate" - } - ] - } - }, - { - "description": "Test consecutive resume", - "minServerVersion": "4.1.7", - "target": "collection", - "topology": [ - "replicaset" - ], - "changeStreamPipeline": [], - "changeStreamOptions": { - "batchSize": 1 - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "getMore" - ], - "closeConnection": true - } - }, - "operations": [ - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 2 - } - } - }, - { - "database": "change-stream-tests", - "collection": "test", - "name": "insertOne", - "arguments": { - "document": { - "x": 3 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "cursor": { - "batchSize": 1 - }, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "command_name": "aggregate", - "database_name": "change-stream-tests" - } - } - ], - "result": { - "success": [ - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "1" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "2" - } - } - }, - { - "operationType": "insert", - "ns": { - "db": "change-stream-tests", - "coll": "test" - }, - "fullDocument": { - "x": { - "$numberInt": "3" - } - } - } - ] - } - } - ] -} diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json new file mode 100644 index 0000000000..4a413fce84 --- /dev/null +++ b/test/change_streams/unified/change-streams-errors.json @@ -0,0 +1,246 @@ +{ + "description": "change-streams-errors", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "expectError": { + "errorCode": 40573 + } + } + ] + }, + { + "description": "Change Stream should error when an invalid aggregation stage is passed in", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$unsupported": "foo" + } + ] + }, + "expectError": { + "errorCode": 40324 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$unsupported": "foo" + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should error when _id is projected out", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 280 + } + } + ] + }, + { + "description": "change stream errors on ElectionInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 216, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 216 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-allowlist.json b/test/change_streams/unified/change-streams-resume-allowlist.json new file mode 100644 index 0000000000..b4953ec736 --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-allowlist.json @@ -0,0 +1,2348 @@ +{ + "description": "change-streams-resume-allowlist", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after a network error", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostUnreachable", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after CursorNotFound", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 43, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json new file mode 100644 index 0000000000..c156b550ce --- /dev/null +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -0,0 +1,2125 @@ +{ + "description": "change-streams-resume-errorlabels", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + } + ], + "tests": [ + { + "description": "change stream resumes after HostUnreachable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after HostNotFound", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 7, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NetworkTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 89, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 91, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 189, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 262, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after SocketException", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 9001, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotWritablePrimary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 10107, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11600, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 11602, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryNoSecondaryOk", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13435, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after NotPrimaryOrSecondary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 13436, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleShardVersion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 63, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after StaleEpoch", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 150, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after RetryChangeStream", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 234, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes after FailedToSatisfyReadPreference", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failGetMoreAfterCursorCheckout", + "mode": { + "times": 1 + }, + "data": { + "errorCode": 133, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream resumes if error contains ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 50, + "closeConnection": false, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$exists": true + }, + "collection": "collection0" + }, + "commandName": "getMore", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "resumeAfter": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "change stream does not resume if error does not contain ResumableChangeStreamError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "errorCode": 6, + "closeConnection": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "errorCode": 6 + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 8bc0c956cd..572d2d6e97 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -1,13 +1,14 @@ { "description": "change-streams", - "schemaVersion": "1.0", + "schemaVersion": "1.7", "runOnRequirements": [ { "minServerVersion": "3.6", "topologies": [ "replicaset", "sharded-replicaset" - ] + ], + "serverless": "forbid" } ], "createEntities": [ @@ -16,7 +17,17 @@ "id": "client0", "observeEvents": [ "commandStartedEvent" - ] + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "globalClient", + "useMultipleMongoses": false } }, { @@ -32,6 +43,62 @@ "database": "database0", "collectionName": "collection0" } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "globalDatabase0", + "client": "globalClient", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "globalCollection0", + "database": "globalDatabase0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "globalDatabase1", + "client": "globalClient", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "globalCollection1", + "database": "globalDatabase1", + "collectionName": "collection1" + } + }, + { + "collection": { + "id": "globalDb1Collection0", + "database": "globalDatabase1", + "collectionName": "collection0" + } + }, + { + "collection": { + "id": "globalDb0Collection1", + "database": "globalDatabase0", + "collectionName": "collection1" + } } ], "initialData": [ @@ -557,7 +624,7 @@ "runOnRequirements": [ { "minServerVersion": "3.6", - "maxServerVersion": "5.2" + "maxServerVersion": "5.2.99" }, { "minServerVersion": "6.0" @@ -734,6 +801,1032 @@ } } ] + }, + { + "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "The server returns change stream responses in the specified server response format", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "_id": { + "$$exists": true + }, + "documentKey": { + "$$exists": true + }, + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + } + ] + }, + { + "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Change Stream should allow valid aggregate pipeline stages", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + }, + { + "$match": { + "fullDocument.z": 3 + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalDb0Collection1", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalDb1Collection0", + "arguments": { + "document": { + "y": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "z": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection1" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database1", + "coll": "collection0" + }, + "fullDocument": { + "y": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "z": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "Test insert, update, replace, and delete event types", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "updateOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 1 + }, + "update": { + "$set": { + "x": 2 + } + } + } + }, + { + "name": "replaceOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 2 + }, + "replacement": { + "x": 3 + } + } + }, + { + "name": "deleteOne", + "object": "globalCollection0", + "arguments": { + "filter": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "x": 2 + }, + "removedFields": [], + "truncatedArrays": { + "$$unsetOrMatches": { + "$$exists": true + } + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "replace", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "delete", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test rename and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection1" + } + }, + { + "name": "rename", + "object": "globalCollection0", + "arguments": { + "to": "collection1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "collection1" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test drop and invalidate event types", + "runOnRequirements": [ + { + "minServerVersion": "4.0.1", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "collection0" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "drop", + "ns": { + "db": "database0", + "coll": "collection0" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "invalidate" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test consecutive resume", + "runOnRequirements": [ + { + "minServerVersion": "4.1.7", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "globalClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore" + ], + "closeConnection": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "batchSize": 1 + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 2 + } + } + }, + { + "name": "insertOne", + "object": "globalCollection0", + "arguments": { + "document": { + "x": 3 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 1, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 2, + "_id": { + "$$exists": true + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "fullDocument": { + "x": 3, + "_id": { + "$$exists": true + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 1 + }, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "Test wallTime field is set in a change event", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0" + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "wallTime": { + "$$exists": true + } + } + } + ] } ] } diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 73768fd0f6..f3f206d965 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -16,7 +16,6 @@ import os import random -import re import string import sys import threading @@ -36,7 +35,7 @@ wait_until, ) -from bson import SON, ObjectId, Timestamp, encode, json_util +from bson import SON, ObjectId, Timestamp, encode from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient @@ -1141,131 +1140,6 @@ def tearDown(self): _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -def get_change_stream(client, scenario_def, test): - # Get target namespace on which to instantiate change stream - target = test["target"] - if target == "collection": - db = client.get_database(scenario_def["database_name"]) - cs_target = db.get_collection(scenario_def["collection_name"]) - elif target == "database": - cs_target = client.get_database(scenario_def["database_name"]) - elif target == "client": - cs_target = client - else: - raise ValueError("Invalid target in spec") - - # Construct change stream kwargs dict - cs_pipeline = test["changeStreamPipeline"] - options = test["changeStreamOptions"] - cs_options = {} - for key, value in options.items(): - cs_options[camel_to_snake(key)] = value - - # Create and return change stream - return cs_target.watch(pipeline=cs_pipeline, **cs_options) - - -def run_operation(client, operation): - # Apply specified operations - opname = camel_to_snake(operation["name"]) - arguments = operation.get("arguments", {}) - if opname == "rename": - # Special case for rename operation. - arguments = {"new_name": arguments["to"]} - cmd = getattr( - client.get_database(operation["database"]).get_collection(operation["collection"]), opname - ) - return cmd(**arguments) - - -def create_test(scenario_def, test): - def run_scenario(self): - # Set up - self.setUpCluster(scenario_def) - self.setFailPoint(test) - is_error = test["result"].get("error", False) - try: - with get_change_stream(self.client, scenario_def, test) as change_stream: - for operation in test["operations"]: - # Run specified operations - run_operation(self.client, operation) - num_expected_changes = len(test["result"].get("success", [])) - changes = [change_stream.next() for _ in range(num_expected_changes)] - # Run a next() to induce an error if one is expected and - # there are no changes. - if is_error and not changes: - change_stream.next() - - except OperationFailure as exc: - if not is_error: - raise - expected_code = test["result"]["error"]["code"] - self.assertEqual(exc.code, expected_code) - - else: - # Check for expected output from change streams - if test["result"].get("success"): - for change, expected_changes in zip(changes, test["result"]["success"]): - self.assert_dict_is_subset(change, expected_changes) - self.assertEqual(len(changes), len(test["result"]["success"])) - - finally: - # Check for expected events - results = self.listener.results - # Note: expectations may be missing, null, or a list of events. - # Extra events emitted by the test are intentionally ignored. - for idx, expectation in enumerate(test.get("expectations") or []): - for event_type, event_desc in expectation.items(): - results_key = event_type.split("_")[1] - event = results[results_key][idx] if len(results[results_key]) > idx else None - self.check_event(event, event_desc) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): - dirname = os.path.split(dirpath)[-1] - - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - - test_type = os.path.splitext(filename)[0] - - for test in scenario_def["tests"]: - new_test = create_test(scenario_def, test) - new_test = client_context.require_no_mmap(new_test) - - if "minServerVersion" in test: - min_ver = tuple(int(elt) for elt in test["minServerVersion"].split(".")) - new_test = client_context.require_version_min(*min_ver)(new_test) - if "maxServerVersion" in test: - max_ver = tuple(int(elt) for elt in test["maxServerVersion"].split(".")) - new_test = client_context.require_version_max(*max_ver)(new_test) - - topologies = test["topology"] - new_test = client_context.require_cluster_type(topologies)(new_test) - - test_name = "test_%s_%s_%s" % ( - dirname, - test_type.replace("-", "_"), - str(test["description"].replace(" ", "_")), - ) - - new_test.__name__ = test_name - setattr(TestAllLegacyScenarios, new_test.__name__, new_test) - - -create_tests() - - globals().update( generate_test_classes( os.path.join(_TEST_PATH, "unified"), diff --git a/test/test_unified_format.py b/test/test_unified_format.py index e36959a224..8a6e3da549 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -65,6 +65,12 @@ def test_unsetOrMatches(self): for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: self.match_evaluator.match_result(spec, actual) + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + def test_type(self): self.match_evaluator.match_result( { diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json new file mode 100644 index 0000000000..178b756c2c --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.json @@ -0,0 +1,151 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "ignoreExtraEvents can be set to false", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": false, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents can be set to true", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 2 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "ignoreExtraEvents defaults to false if unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 4 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": 4 + } + ] + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-change-streams.json b/test/unified-test-format/valid-pass/poc-change-streams.json index 2a2c41a682..50f0d06f08 100644 --- a/test/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/unified-test-format/valid-pass/poc-change-streams.json @@ -1,6 +1,11 @@ { "description": "poc-change-streams", - "schemaVersion": "1.0", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "createEntities": [ { "client": { @@ -89,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/unified_format.py b/test/unified_format.py index 9edf499ece..61c96d6021 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -470,9 +470,15 @@ def __init__(self, test_class): def _operation_exists(self, spec, actual, key_to_compare): if spec is True: - self.test.assertIn(key_to_compare, actual) + if key_to_compare is None: + assert actual is not None + else: + self.test.assertIn(key_to_compare, actual) elif spec is False: - self.test.assertNotIn(key_to_compare, actual) + if key_to_compare is None: + assert actual is None + else: + self.test.assertNotIn(key_to_compare, actual) else: self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) @@ -704,7 +710,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.5") + SCHEMA_VERSION = Version.from_string("1.7") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -1181,19 +1187,32 @@ def check_events(self, spec): events = event_spec["events"] # Valid types: 'command', 'cmap' event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + assert event_type in ("command", "cmap") listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + if len(events) == 0: self.assertEqual(actual_events, []) continue - self.assertGreaterEqual(len(actual_events), len(events), actual_events) + self.assertEqual(len(actual_events), len(events), actual_events) for idx, expected_event in enumerate(events): self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + def verify_outcome(self, spec): for collection_data in spec: coll_name = collection_data["collectionName"] From cf08d46ff943c515d4c191650f478fbe9f737b01 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 1 Jun 2022 16:48:08 -0700 Subject: [PATCH 0672/2111] PYTHON-3277 Rename csfle library to crypt_shared (#956) --- .evergreen/config.yml | 19 +++++++++---------- .evergreen/run-tests.sh | 31 ++++++++++++++++--------------- pymongo/encryption.py | 4 ++-- pymongo/encryption_options.py | 16 ++++++++-------- test/test_encryption.py | 13 +++++++------ 5 files changed, 42 insertions(+), 41 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c12d4167b7..721de7cc61 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -450,8 +450,8 @@ functions: export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 fi - if [ -n "${test_csfle}" ]; then - export TEST_CSFLE=1 + if [ -n "${test_crypt_shared}" ]; then + export TEST_CRYPT_SHARED=1 fi if [ -n "${test_pyopenssl}" ]; then export TEST_PYOPENSSL=1 @@ -2163,15 +2163,14 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days - - id: "encryption_with_csfle" - display_name: "Encryption with CSFLE" - tags: ["encryption_tag", "csfle"] + - id: "encryption_crypt_shared" + display_name: "Encryption shared lib" + tags: ["encryption_tag"] variables: test_encryption: true - test_csfle: true + test_crypt_shared: true batchtime: 10080 # 7 days - # Run pyopenssl tests? - id: pyopenssl display_name: "PyOpenSSL" @@ -2306,7 +2305,7 @@ buildvariants: platform: "*" auth: "*" ssl: "*" - encryption: [ "encryption_with_csfle" ] + encryption: [ "encryption_crypt_shared" ] then: remove_tasks: - ".5.0" @@ -2400,7 +2399,7 @@ buildvariants: platform: "*" python-version: "*" auth-ssl: "*" - encryption: [ "encryption_with_csfle" ] + encryption: [ "encryption_crypt_shared" ] then: remove_tasks: - ".5.0" @@ -2509,7 +2508,7 @@ buildvariants: platform: "*" python-version-windows: "*" auth-ssl: "*" - encryption: [ "encryption_with_csfle" ] + encryption: [ "encryption_crypt_shared" ] then: remove_tasks: - ".5.0" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 96f42fa517..5f5bda7dc1 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -11,7 +11,7 @@ set -o errexit # Exit the script with error if any of the commands fail # COVERAGE If non-empty, run the test suite with coverage. # TEST_ENCRYPTION If non-empty, install pymongocrypt. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# TEST_CSFLE If non-empty, install CSFLE +# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. if [ -n "${SET_XTRACE_ON}" ]; then set -o xtrace @@ -28,9 +28,10 @@ COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} -TEST_CSFLE=${TEST_CSFLE:-} +TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} DATA_LAKE=${DATA_LAKE:-} +TEST_ARGS="" if [ -n "$COMPRESSORS" ]; then export COMPRESSORS=$COMPRESSORS @@ -148,23 +149,23 @@ if [ -n "$TEST_ENCRYPTION" ]; then # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh + + if [ -n "$TEST_CRYPT_SHARED" ]; then + echo "Testing CSFLE with crypt_shared lib" + $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ + --version latest --out ../crypt_shared/ + export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH + export PATH=../crypt_shared/bin:$PATH + fi + # Only run the encryption tests. + TEST_ARGS="-s test.test_encryption" fi -if [ -z "$DATA_LAKE" ]; then - TEST_ARGS="" -else +if [ -n "$DATA_LAKE" ]; then TEST_ARGS="-s test.test_data_lake" fi -if [ -z $TEST_CSFLE ]; then - echo "CSFLE not being tested" -else - $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component csfle \ - --version latest --out ../csfle/ - export DYLD_FALLBACK_LIBRARY_PATH=../csfle/lib/:$DYLD_FALLBACK_LIBRARY_PATH - export LD_LIBRARY_PATH=../csfle/lib:$LD_LIBRARY_PATH - export PATH=../csfle/bin:$PATH - TEST_ARGS="-s test.test_encryption" -fi + # Don't download unittest-xml-reporting from pypi, which often fails. if $PYTHON -c "import xmlrunner"; then # The xunit output dir must be a Python style absolute path. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 1a29131890..40f7d20f23 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -300,8 +300,8 @@ def _get_internal_client(encrypter, mongo_client): MongoCryptOptions( opts._kms_providers, schema_map, - csfle_path=opts._csfle_path, - csfle_required=opts._csfle_required, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, ), ) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 0ce828ae4c..cdb77c9707 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -45,8 +45,8 @@ def __init__( mongocryptd_spawn_path: str = "mongocryptd", mongocryptd_spawn_args: Optional[List[str]] = None, kms_tls_options: Optional[Mapping[str, Any]] = None, - csfle_path: Optional[str] = None, - csfle_required: bool = False, + crypt_shared_lib_path: Optional[str] = None, + crypt_shared_lib_required: bool = False, ) -> None: """Options to configure automatic client-side field level encryption. @@ -142,12 +142,12 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - - `csfle_path` (optional): Override the path to load the CSFLE library. - - `csfle_required` (optional): If 'true', refuse to continue encryption without a CSFLE - library + - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. + - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is + unable to load the crypt_shared library. .. versionchanged:: 4.2 - Added `csfle_path` and `csfle_required` parameters + Added `crypt_shared_lib_path` and `crypt_shared_lib_required` parameters .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -160,8 +160,8 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) - self._csfle_path = csfle_path - self._csfle_required = csfle_required + self._crypt_shared_lib_path = crypt_shared_lib_path + self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client diff --git a/test/test_encryption.py b/test/test_encryption.py index fc9d4eec3b..500c95af04 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -83,12 +83,12 @@ def get_client_opts(client): class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - @unittest.skipUnless(os.environ.get("TEST_CSFLE"), "csfle is not installed") - def test_csfle(self): - # Test that we can pick up csfle automatically + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically client = MongoClient( auto_encryption_opts=AutoEncryptionOpts( - KMS_PROVIDERS, "keyvault.datakeys", csfle_required=True + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True ), connect=False, ) @@ -1762,8 +1762,9 @@ def test_case_8(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): @unittest.skipIf( - os.environ.get("TEST_CSFLE"), - "this prose test does not work when CSFLE is on a system dynamic library search path.", + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", ) def test_mongocryptd_bypass_spawn(self): # Lower the mongocryptd timeout to reduce the test run time. From 09385be54977aa0c6e620f342a41316d748109d6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Jun 2022 10:55:15 -0700 Subject: [PATCH 0673/2111] PYTHON-2924 Improve test_load_balancing (#955) --- test/test_server_selection_in_window.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 4b24d0d7b0..cae2d7661b 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -17,7 +17,13 @@ import os import threading from test import IntegrationTest, client_context, unittest -from test.utils import OvertCommandListener, TestCreator, rs_client, wait_until +from test.utils import ( + OvertCommandListener, + TestCreator, + get_pool, + rs_client, + wait_until, +) from test.utils_selection_tests import create_topology from pymongo.common import clean_node @@ -98,11 +104,10 @@ def run(self): class TestProse(IntegrationTest): - def frequencies(self, client, listener): + def frequencies(self, client, listener, n_finds=10): coll = client.test.test - N_FINDS = 10 N_THREADS = 10 - threads = [FinderThread(coll, N_FINDS) for _ in range(N_THREADS)] + threads = [FinderThread(coll, n_finds) for _ in range(N_THREADS)] for thread in threads: thread.start() for thread in threads: @@ -111,7 +116,7 @@ def frequencies(self, client, listener): self.assertTrue(thread.passed) events = listener.results["started"] - self.assertEqual(len(events), N_FINDS * N_THREADS) + self.assertEqual(len(events), n_finds * N_THREADS) nodes = client.nodes self.assertEqual(len(nodes), 2) freqs = {address: 0.0 for address in nodes} @@ -131,10 +136,12 @@ def test_load_balancing(self): client_context.mongos_seeds(), appName="loadBalancingTest", event_listeners=[listener], - localThresholdMS=10000, + localThresholdMS=30000, + minPoolSize=10, ) self.addCleanup(client.close) wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + wait_until(lambda: len(get_pool(client).sockets) >= 10, "create 10 connections") # Delay find commands on delay_finds = { "configureFailPoint": "failCommand", @@ -153,7 +160,7 @@ def test_load_balancing(self): freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() - freqs = self.frequencies(client, listener) + freqs = self.frequencies(client, listener, n_finds=100) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) From 154d8787c5cc4fe6d077f7bc60cbc6c0b689e693 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 3 Jun 2022 13:11:28 -0700 Subject: [PATCH 0674/2111] PYTHON-3245 Support explicit queryable encryption (#959) --- .evergreen/resync-specs.sh | 1 + .evergreen/run-tests.sh | 2 +- pymongo/encryption.py | 49 +++++- pymongo/encryption_options.py | 9 +- .../etc/data/encryptedFields.json | 33 ++++ .../etc/data/keys/key1-document.json | 30 ++++ .../etc/data/keys/key1-id.json | 6 + .../etc/data/keys/key2-document.json | 30 ++++ .../etc/data/keys/key2-id.json | 6 + test/test_encryption.py | 150 ++++++++++++++++-- 10 files changed, 298 insertions(+), 18 deletions(-) create mode 100644 test/client-side-encryption/etc/data/encryptedFields.json create mode 100644 test/client-side-encryption/etc/data/keys/key1-document.json create mode 100644 test/client-side-encryption/etc/data/keys/key1-id.json create mode 100644 test/client-side-encryption/etc/data/keys/key2-document.json create mode 100644 test/client-side-encryption/etc/data/keys/key2-id.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index a98b091d59..1177ebb04a 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -93,6 +93,7 @@ do cpjson client-side-encryption/corpus/ client-side-encryption/corpus cpjson client-side-encryption/external/ client-side-encryption/external cpjson client-side-encryption/limits/ client-side-encryption/limits + cpjson client-side-encryption/etc/data client-side-encryption/etc/data ;; cmap|CMAP|connection-monitoring-and-pooling) cpjson connection-monitoring-and-pooling/tests cmap diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 5f5bda7dc1..4367bad246 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -139,7 +139,7 @@ if [ -n "$TEST_ENCRYPTION" ]; then export PYMONGOCRYPT_LIB # TODO: Test with 'pip install pymongocrypt' - git clone --branch master https://github.com/mongodb/libmongocrypt.git libmongocrypt_git + git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git python -m pip install --prefer-binary -r .evergreen/test-encryption-requirements.txt python -m pip install ./libmongocrypt_git/bindings/python python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 40f7d20f23..71642aaa2a 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -15,6 +15,7 @@ """Support for explicit client-side field level encryption.""" import contextlib +import enum import uuid import weakref from typing import Any, Mapping, Optional, Sequence @@ -303,6 +304,7 @@ def _get_internal_client(encrypter, mongo_client): crypt_shared_lib_path=opts._crypt_shared_lib_path, crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, + bypass_query_analysis=opts._bypass_query_analysis, ), ) self._closed = False @@ -352,11 +354,33 @@ def close(self): self._internal_client = None -class Algorithm(object): +class Algorithm(str, enum.Enum): """An enum that defines the supported encryption algorithms.""" AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + + +class QueryType(enum.IntEnum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = 1 + """Used to encrypt a value for an equality query.""" class ClientEncryption(object): @@ -550,6 +574,9 @@ def encrypt( algorithm: str, key_id: Optional[Binary] = None, key_alt_name: Optional[str] = None, + index_key_id: Optional[Binary] = None, + query_type: Optional[int] = None, + contention_factor: Optional[int] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -564,20 +591,38 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. + - `index_key_id` (bytes): the index key id to use for Queryable Encryption. + - `query_type` (int): The query type to execute. See + :class:`QueryType` for valid options. + - `contention_factor` (int): The contention factor to use + when the algorithm is "Indexed". :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.2 + Added the `index_key_id`, `query_type`, and `contention_factor` parameters. """ self._check_closed() if key_id is not None and not ( isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE ): raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + if index_key_id is not None and not ( + isinstance(index_key_id, Binary) and index_key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("index_key_id must be a bson.binary.Binary with subtype 4") doc = encode({"v": value}, codec_options=self._codec_options) with _wrap_encryption_errors(): encrypted_doc = self._encryption.encrypt( - doc, algorithm, key_id=key_id, key_alt_name=key_alt_name + doc, + algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + index_key_id=index_key_id, + query_type=query_type, + contention_factor=contention_factor, ) return decode(encrypted_doc)["v"] # type: ignore[index] diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index cdb77c9707..5acc55042a 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -47,6 +47,7 @@ def __init__( kms_tls_options: Optional[Mapping[str, Any]] = None, crypt_shared_lib_path: Optional[str] = None, crypt_shared_lib_required: bool = False, + bypass_query_analysis: bool = False, ) -> None: """Options to configure automatic client-side field level encryption. @@ -145,9 +146,14 @@ def __init__( - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is unable to load the crypt_shared library. + - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis of + outgoing commands. Set `bypass_query_analysis` to use explicit + encryption on indexed fields without the MongoDB Enterprise Advanced + licensed crypt_shared library. .. versionchanged:: 4.2 - Added `crypt_shared_lib_path` and `crypt_shared_lib_required` parameters + Added `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` + parameters. .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -179,3 +185,4 @@ def __init__( self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) + self._bypass_query_analysis = bypass_query_analysis diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json new file mode 100644 index 0000000000..2364590e4c --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -0,0 +1,33 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] +} diff --git a/test/client-side-encryption/etc/data/keys/key1-document.json b/test/client-side-encryption/etc/data/keys/key1-document.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key1-id.json b/test/client-side-encryption/etc/data/keys/key1-id.json new file mode 100644 index 0000000000..7d18f52ebb --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key1-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-document.json b/test/client-side-encryption/etc/data/keys/key2-document.json new file mode 100644 index 0000000000..a654d980ba --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/keys/key2-id.json b/test/client-side-encryption/etc/data/keys/key2-id.json new file mode 100644 index 0000000000..6e9b87bbc2 --- /dev/null +++ b/test/client-side-encryption/etc/data/keys/key2-id.json @@ -0,0 +1,6 @@ +{ + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 500c95af04..288c137c7e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -51,14 +51,14 @@ from test.utils_spec_runner import SpecRunner from bson import encode, json_util -from bson.binary import JAVA_LEGACY, STANDARD, UUID_SUBTYPE, Binary, UuidRepresentation +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON from pymongo import encryption from pymongo.cursor import CursorType -from pymongo.encryption import Algorithm, ClientEncryption +from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( BulkWriteError, @@ -212,11 +212,11 @@ def assertBinaryUUID(self, val): BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") SPEC_PATH = os.path.join(BASE, "spec") -OPTS = CodecOptions(uuid_representation=STANDARD) +OPTS = CodecOptions() # Use SON to preserve the order of fields while parsing json. Use tz_aware # =False to match how CodecOptions decodes dates. -JSON_OPTS = JSONOptions(document_class=SON, uuid_representation=STANDARD, tz_aware=False) +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) def read(*paths): @@ -324,7 +324,7 @@ def test_use_after_close(self): class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): - def test_upsert_uuid_standard_encrypte(self): + def test_upsert_uuid_standard_encrypt(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client.close) @@ -449,11 +449,19 @@ def test_validation(self): msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + uid = uuid.uuid4() with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, key_id=uuid.uuid4()) # type: ignore[arg-type] + client_encryption.encrypt("str", algo, key_id=uid) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) + msg = "index_key_id must be a bson.binary.Binary with subtype 4" + algo = Algorithm.INDEXED + with self.assertRaisesRegex(TypeError, msg): + client_encryption.encrypt("str", algo, index_key_id=uid) # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, msg): + client_encryption.encrypt("str", algo, index_key_id=Binary(b"123")) + def test_bson_errors(self): client_encryption = ClientEncryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS @@ -466,7 +474,7 @@ def test_bson_errors(self): client_encryption.encrypt( unencodable_value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary(uuid.uuid4().bytes, UUID_SUBTYPE), + key_id=Binary.from_uuid(uuid.uuid4()), ) def test_codec_options(self): @@ -475,7 +483,7 @@ def test_codec_options(self): KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] ) - opts = CodecOptions(uuid_representation=JAVA_LEGACY) + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) client_encryption_legacy = ClientEncryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) @@ -493,8 +501,9 @@ def test_codec_options(self): self.assertEqual(decrypted_value_legacy, value) # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) client_encryption = ClientEncryption( - KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS + KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( @@ -986,9 +995,7 @@ def _test_corpus(self, opts): ) self.addCleanup(vault.drop) - client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, uuidRepresentation="standard" - ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) self.addCleanup(client_encrypted.close) client_encryption = ClientEncryption( @@ -1436,7 +1443,7 @@ def _test_explicit(self, expectation): ciphertext = client_encryption.encrypt( "string0", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=Binary.from_uuid(self.DEK["_id"], STANDARD), + key_id=self.DEK["_id"], ) self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) @@ -1972,9 +1979,124 @@ def test_04_kmip(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("kmip") +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +class TestExplicitQueryableEncryption(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(6, 0, -1) + def setUp(self): + super().setUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + self.client.drop_database(self.db) + self.db.command("create", self.encrypted_fields["escCollection"]) + self.db.command("create", self.encrypted_fields["eccCollection"]) + self.db.command("create", self.encrypted_fields["ecocCollection"]) + self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(self.encrypted_client.close) + + def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + # Find without contention_factor non-deterministically returns 0-9 documents. + find_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = list( + self.encrypted_client[self.db.name].explicit_encryption.find( + {"encryptedIndexed": find_payload} + ) + ) + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = list(self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1})) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + if __name__ == "__main__": unittest.main() From d98e44e27e4ecbbaee301649c1ecb6a41fc2e895 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 3 Jun 2022 13:43:47 -0700 Subject: [PATCH 0675/2111] PYTHON-3245 Fix docs for index_key_id (#960) --- pymongo/encryption.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 71642aaa2a..25d216d5b5 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -591,11 +591,12 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `index_key_id` (bytes): the index key id to use for Queryable Encryption. + - `index_key_id`: The index key id to use for Queryable Encryption. Must be + a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - `query_type` (int): The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): The contention factor to use - when the algorithm is "Indexed". + when the algorithm is :attr:`Algorithm.INDEXED`. :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. From 6b088ffa4e813272ffb25637dd05cc05fe42288f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 6 Jun 2022 09:33:31 -0700 Subject: [PATCH 0676/2111] PYTHON-3241 Add Queryable Encryption API to AutoEncryptionOpts (#957) --- pymongo/collection.py | 23 +- pymongo/common.py | 12 + pymongo/database.py | 117 +- pymongo/encryption.py | 6 + pymongo/encryption_options.py | 33 +- .../spec/{ => legacy}/aggregate.json | 0 .../spec/{ => legacy}/awsTemporary.json | 0 .../spec/{ => legacy}/azureKMS.json | 0 .../spec/{ => legacy}/badQueries.json | 0 .../spec/{ => legacy}/badSchema.json | 0 .../spec/{ => legacy}/basic.json | 0 .../spec/{ => legacy}/bulk.json | 0 .../{ => legacy}/bypassAutoEncryption.json | 0 .../spec/{ => legacy}/bypassedCommand.json | 0 .../spec/{ => legacy}/count.json | 0 .../spec/{ => legacy}/countDocuments.json | 0 .../spec/legacy/create-and-createIndexes.json | 115 + .../spec/{ => legacy}/delete.json | 0 .../spec/{ => legacy}/distinct.json | 0 .../spec/{ => legacy}/explain.json | 0 .../spec/{ => legacy}/find.json | 0 .../spec/{ => legacy}/findOneAndDelete.json | 0 .../spec/{ => legacy}/findOneAndReplace.json | 0 .../spec/{ => legacy}/findOneAndUpdate.json | 0 .../spec/legacy/fle2-BypassQueryAnalysis.json | 289 +++ .../spec/legacy/fle2-Compact.json | 232 ++ .../spec/legacy/fle2-CreateCollection.json | 2239 +++++++++++++++++ .../spec/legacy/fle2-DecryptExistingData.json | 148 ++ .../spec/legacy/fle2-Delete.json | 305 +++ ...EncryptedFields-vs-EncryptedFieldsMap.json | 217 ++ .../fle2-EncryptedFields-vs-jsonSchema.json | 304 +++ .../fle2-EncryptedFieldsMap-defaults.json | 105 + .../spec/legacy/fle2-FindOneAndUpdate.json | 602 +++++ .../spec/legacy/fle2-InsertFind-Indexed.json | 300 +++ .../legacy/fle2-InsertFind-Unindexed.json | 250 ++ .../spec/legacy/fle2-MissingKey.json | 118 + .../spec/legacy/fle2-NoEncryption.json | 86 + .../spec/legacy/fle2-Update.json | 610 +++++ ...e2-validatorAndPartialFieldExpression.json | 520 ++++ .../spec/{ => legacy}/gcpKMS.json | 0 .../spec/{ => legacy}/getMore.json | 0 .../spec/{ => legacy}/insert.json | 0 .../spec/{ => legacy}/keyAltName.json | 0 .../spec/{ => legacy}/kmipKMS.json | 0 .../spec/{ => legacy}/localKMS.json | 0 .../spec/{ => legacy}/localSchema.json | 0 .../{ => legacy}/malformedCiphertext.json | 0 .../spec/{ => legacy}/maxWireVersion.json | 0 .../spec/{ => legacy}/missingKey.json | 0 .../spec/{ => legacy}/noSchema.json | 0 .../spec/{ => legacy}/replaceOne.json | 0 .../spec/{ => legacy}/types.json | 0 .../spec/{ => legacy}/unsupportedCommand.json | 0 .../spec/{ => legacy}/updateMany.json | 0 .../spec/{ => legacy}/updateOne.json | 0 .../validatorAndPartialFieldExpression.json | 642 +++++ .../spec/unified/addKeyAltName.json | 603 +++++ .../createKey-kms_providers-invalid.json | 112 + .../spec/unified/createKey.json | 711 ++++++ .../spec/unified/deleteKey.json | 553 ++++ .../spec/unified/getKey.json | 313 +++ .../spec/unified/getKeyByAltName.json | 283 +++ .../spec/unified/getKeys.json | 260 ++ .../spec/unified/removeKeyAltName.json | 572 +++++ .../rewrapManyDataKey-decrypt_failure.json | 162 ++ .../rewrapManyDataKey-encrypt_failure.json | 250 ++ .../spec/unified/rewrapManyDataKey.json | 1373 ++++++++++ test/test_encryption.py | 17 +- test/utils.py | 18 +- test/utils_spec_runner.py | 27 +- 70 files changed, 12489 insertions(+), 38 deletions(-) rename test/client-side-encryption/spec/{ => legacy}/aggregate.json (100%) rename test/client-side-encryption/spec/{ => legacy}/awsTemporary.json (100%) rename test/client-side-encryption/spec/{ => legacy}/azureKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/badQueries.json (100%) rename test/client-side-encryption/spec/{ => legacy}/badSchema.json (100%) rename test/client-side-encryption/spec/{ => legacy}/basic.json (100%) rename test/client-side-encryption/spec/{ => legacy}/bulk.json (100%) rename test/client-side-encryption/spec/{ => legacy}/bypassAutoEncryption.json (100%) rename test/client-side-encryption/spec/{ => legacy}/bypassedCommand.json (100%) rename test/client-side-encryption/spec/{ => legacy}/count.json (100%) rename test/client-side-encryption/spec/{ => legacy}/countDocuments.json (100%) create mode 100644 test/client-side-encryption/spec/legacy/create-and-createIndexes.json rename test/client-side-encryption/spec/{ => legacy}/delete.json (100%) rename test/client-side-encryption/spec/{ => legacy}/distinct.json (100%) rename test/client-side-encryption/spec/{ => legacy}/explain.json (100%) rename test/client-side-encryption/spec/{ => legacy}/find.json (100%) rename test/client-side-encryption/spec/{ => legacy}/findOneAndDelete.json (100%) rename test/client-side-encryption/spec/{ => legacy}/findOneAndReplace.json (100%) rename test/client-side-encryption/spec/{ => legacy}/findOneAndUpdate.json (100%) create mode 100644 test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Compact.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-CreateCollection.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-MissingKey.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-NoEncryption.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json rename test/client-side-encryption/spec/{ => legacy}/gcpKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/getMore.json (100%) rename test/client-side-encryption/spec/{ => legacy}/insert.json (100%) rename test/client-side-encryption/spec/{ => legacy}/keyAltName.json (100%) rename test/client-side-encryption/spec/{ => legacy}/kmipKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/localKMS.json (100%) rename test/client-side-encryption/spec/{ => legacy}/localSchema.json (100%) rename test/client-side-encryption/spec/{ => legacy}/malformedCiphertext.json (100%) rename test/client-side-encryption/spec/{ => legacy}/maxWireVersion.json (100%) rename test/client-side-encryption/spec/{ => legacy}/missingKey.json (100%) rename test/client-side-encryption/spec/{ => legacy}/noSchema.json (100%) rename test/client-side-encryption/spec/{ => legacy}/replaceOne.json (100%) rename test/client-side-encryption/spec/{ => legacy}/types.json (100%) rename test/client-side-encryption/spec/{ => legacy}/unsupportedCommand.json (100%) rename test/client-side-encryption/spec/{ => legacy}/updateMany.json (100%) rename test/client-side-encryption/spec/{ => legacy}/updateOne.json (100%) create mode 100644 test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json create mode 100644 test/client-side-encryption/spec/unified/addKeyAltName.json create mode 100644 test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json create mode 100644 test/client-side-encryption/spec/unified/createKey.json create mode 100644 test/client-side-encryption/spec/unified/deleteKey.json create mode 100644 test/client-side-encryption/spec/unified/getKey.json create mode 100644 test/client-side-encryption/spec/unified/getKeyByAltName.json create mode 100644 test/client-side-encryption/spec/unified/getKeys.json create mode 100644 test/client-side-encryption/spec/unified/removeKeyAltName.json create mode 100644 test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json create mode 100644 test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json create mode 100644 test/client-side-encryption/spec/unified/rewrapManyDataKey.json diff --git a/pymongo/collection.py b/pymongo/collection.py index 0197198108..ffd883e939 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -35,7 +35,7 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from bson.timestamp import Timestamp -from pymongo import common, helpers, message +from pymongo import ASCENDING, common, helpers, message from pymongo.aggregation import ( _CollectionAggregationCommand, _CollectionRawAggregationCommand, @@ -44,6 +44,7 @@ from pymongo.change_stream import CollectionChangeStream from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor +from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name from pymongo.cursor import Cursor, RawBatchCursor from pymongo.errors import ( ConfigurationError, @@ -115,6 +116,7 @@ def __init__( write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -197,7 +199,6 @@ def __init__( write_concern or database.write_concern, read_concern or database.read_concern, ) - if not isinstance(name, str): raise TypeError("name must be an instance of str") @@ -215,7 +216,16 @@ def __init__( self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) if create or kwargs or collation: - self.__create(kwargs, collation, session) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self.__create(_esc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(_ecc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) + self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self.__create(name, kwargs, collation, session) self.__write_response_codec_options = self.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict @@ -286,9 +296,12 @@ def _command( user_fields=user_fields, ) - def __create(self, options, collation, session): + def __create(self, name, options, collation, session, encrypted_fields=None): """Sends a create command with the given options.""" - cmd = SON([("create", self.__name)]) + cmd = SON([("create", name)]) + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + if options: if "size" in options: options["size"] = float(options["size"]) diff --git a/pymongo/common.py b/pymongo/common.py index 552faf94a2..4376654405 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -792,6 +792,18 @@ def get_validated_options( return validated_options +def _esc_coll_name(encrypted_fields, name): + return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") + + +def _ecc_coll_name(encrypted_fields, name): + return encrypted_fields.get("eccCollection", f"enxcol_.{name}.ecc") + + +def _ecoc_coll_name(encrypted_fields, name): + return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") + + # List of write-concern-related options. WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) diff --git a/pymongo/database.py b/pymongo/database.py index 2156a5e972..bb91196f2e 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -38,6 +38,7 @@ from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor +from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _Pipeline @@ -290,6 +291,7 @@ def create_collection( write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this @@ -321,6 +323,29 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - `encrypted_fields`: Document that describes the encrypted fields for Queryable + Encryption. + For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } } - `**kwargs` (optional): additional keyword arguments will be passed as options for the `create collection command`_ @@ -369,6 +394,17 @@ def create_collection( .. _create collection command: https://mongodb.com/docs/manual/reference/command/create """ + if ( + not encrypted_fields + and self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + ): + encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( + "%s.%s" % (self.name, name) + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. @@ -376,7 +412,6 @@ def create_collection( filter={"name": name}, session=s ): raise CollectionInvalid("collection %s already exists" % name) - return Collection( self, name, @@ -386,6 +421,7 @@ def create_collection( write_concern, read_concern, session=s, + encrypted_fields=encrypted_fields, **kwargs, ) @@ -874,11 +910,27 @@ def list_collection_names( return [result["name"] for result in self.list_collections(session=session, **kwargs)] + def _drop_helper(self, name, session=None, comment=None): + command = SON([("drop", name)]) + if comment is not None: + command["comment"] = comment + + with self.__client._socket_for_writes(session) as sock_info: + return self._command( + sock_info, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + def drop_collection( self, name_or_collection: Union[str, Collection], session: Optional["ClientSession"] = None, comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> Dict[str, Any]: """Drop a collection. @@ -889,6 +941,29 @@ def drop_collection( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. + - `encrypted_fields`: Document that describes the encrypted fields for Queryable + Encryption. + For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } .. note:: The :attr:`~pymongo.database.Database.write_concern` of @@ -911,20 +986,34 @@ def drop_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") - - command = SON([("drop", name)]) - if comment is not None: - command["comment"] = comment - - with self.__client._socket_for_writes(session) as sock_info: - return self._command( - sock_info, - command, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session, + full_name = "%s.%s" % (self.name, name) + if ( + not encrypted_fields + and self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + ): + encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( + full_name + ) + if not encrypted_fields and self.client.options.auto_encryption_opts: + colls = list( + self.list_collections(filter={"name": name}, session=session, comment=comment) ) + if colls and colls[0]["options"].get("encryptedFields"): + encrypted_fields = colls[0]["options"]["encryptedFields"] + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return self._drop_helper(name, session, comment) def validate_collection( self, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 25d216d5b5..a7a69dbe34 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -264,6 +264,11 @@ def __init__(self, client, opts): schema_map = None else: schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) self._bypass_auto_encryption = opts._bypass_auto_encryption self._internal_client = None @@ -304,6 +309,7 @@ def _get_internal_client(encrypter, mongo_client): crypt_shared_lib_path=opts._crypt_shared_lib_path, crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, bypass_query_analysis=opts._bypass_query_analysis, ), ) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 5acc55042a..eedc2ee23c 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -23,6 +23,7 @@ except ImportError: _HAVE_PYMONGOCRYPT = False +from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError from pymongo.uri_parser import _parse_kms_tls_options @@ -48,6 +49,7 @@ def __init__( crypt_shared_lib_path: Optional[str] = None, crypt_shared_lib_required: bool = False, bypass_query_analysis: bool = False, + encrypted_fields_map: Optional[Mapping] = None, ) -> None: """Options to configure automatic client-side field level encryption. @@ -150,10 +152,33 @@ def __init__( outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. + - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents that + described the encrypted fields for Queryable Encryption. For example:: + + { + "db.encryptedCollection": { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + } .. versionchanged:: 4.2 - Added `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` - parameters. + Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, + and `bypass_query_analysis` parameters. .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -166,6 +191,10 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) + if encrypted_fields_map: + validate_is_mapping("encrypted_fields_map", encrypted_fields_map) + self._encrypted_fields_map = encrypted_fields_map + self._bypass_query_analysis = bypass_query_analysis self._crypt_shared_lib_path = crypt_shared_lib_path self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers diff --git a/test/client-side-encryption/spec/aggregate.json b/test/client-side-encryption/spec/legacy/aggregate.json similarity index 100% rename from test/client-side-encryption/spec/aggregate.json rename to test/client-side-encryption/spec/legacy/aggregate.json diff --git a/test/client-side-encryption/spec/awsTemporary.json b/test/client-side-encryption/spec/legacy/awsTemporary.json similarity index 100% rename from test/client-side-encryption/spec/awsTemporary.json rename to test/client-side-encryption/spec/legacy/awsTemporary.json diff --git a/test/client-side-encryption/spec/azureKMS.json b/test/client-side-encryption/spec/legacy/azureKMS.json similarity index 100% rename from test/client-side-encryption/spec/azureKMS.json rename to test/client-side-encryption/spec/legacy/azureKMS.json diff --git a/test/client-side-encryption/spec/badQueries.json b/test/client-side-encryption/spec/legacy/badQueries.json similarity index 100% rename from test/client-side-encryption/spec/badQueries.json rename to test/client-side-encryption/spec/legacy/badQueries.json diff --git a/test/client-side-encryption/spec/badSchema.json b/test/client-side-encryption/spec/legacy/badSchema.json similarity index 100% rename from test/client-side-encryption/spec/badSchema.json rename to test/client-side-encryption/spec/legacy/badSchema.json diff --git a/test/client-side-encryption/spec/basic.json b/test/client-side-encryption/spec/legacy/basic.json similarity index 100% rename from test/client-side-encryption/spec/basic.json rename to test/client-side-encryption/spec/legacy/basic.json diff --git a/test/client-side-encryption/spec/bulk.json b/test/client-side-encryption/spec/legacy/bulk.json similarity index 100% rename from test/client-side-encryption/spec/bulk.json rename to test/client-side-encryption/spec/legacy/bulk.json diff --git a/test/client-side-encryption/spec/bypassAutoEncryption.json b/test/client-side-encryption/spec/legacy/bypassAutoEncryption.json similarity index 100% rename from test/client-side-encryption/spec/bypassAutoEncryption.json rename to test/client-side-encryption/spec/legacy/bypassAutoEncryption.json diff --git a/test/client-side-encryption/spec/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json similarity index 100% rename from test/client-side-encryption/spec/bypassedCommand.json rename to test/client-side-encryption/spec/legacy/bypassedCommand.json diff --git a/test/client-side-encryption/spec/count.json b/test/client-side-encryption/spec/legacy/count.json similarity index 100% rename from test/client-side-encryption/spec/count.json rename to test/client-side-encryption/spec/legacy/count.json diff --git a/test/client-side-encryption/spec/countDocuments.json b/test/client-side-encryption/spec/legacy/countDocuments.json similarity index 100% rename from test/client-side-encryption/spec/countDocuments.json rename to test/client-side-encryption/spec/legacy/countDocuments.json diff --git a/test/client-side-encryption/spec/legacy/create-and-createIndexes.json b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json new file mode 100644 index 0000000000..48638a97c8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/create-and-createIndexes.json @@ -0,0 +1,115 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "unencryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "unencryptedCollection", + "index": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/delete.json b/test/client-side-encryption/spec/legacy/delete.json similarity index 100% rename from test/client-side-encryption/spec/delete.json rename to test/client-side-encryption/spec/legacy/delete.json diff --git a/test/client-side-encryption/spec/distinct.json b/test/client-side-encryption/spec/legacy/distinct.json similarity index 100% rename from test/client-side-encryption/spec/distinct.json rename to test/client-side-encryption/spec/legacy/distinct.json diff --git a/test/client-side-encryption/spec/explain.json b/test/client-side-encryption/spec/legacy/explain.json similarity index 100% rename from test/client-side-encryption/spec/explain.json rename to test/client-side-encryption/spec/legacy/explain.json diff --git a/test/client-side-encryption/spec/find.json b/test/client-side-encryption/spec/legacy/find.json similarity index 100% rename from test/client-side-encryption/spec/find.json rename to test/client-side-encryption/spec/legacy/find.json diff --git a/test/client-side-encryption/spec/findOneAndDelete.json b/test/client-side-encryption/spec/legacy/findOneAndDelete.json similarity index 100% rename from test/client-side-encryption/spec/findOneAndDelete.json rename to test/client-side-encryption/spec/legacy/findOneAndDelete.json diff --git a/test/client-side-encryption/spec/findOneAndReplace.json b/test/client-side-encryption/spec/legacy/findOneAndReplace.json similarity index 100% rename from test/client-side-encryption/spec/findOneAndReplace.json rename to test/client-side-encryption/spec/legacy/findOneAndReplace.json diff --git a/test/client-side-encryption/spec/findOneAndUpdate.json b/test/client-side-encryption/spec/legacy/findOneAndUpdate.json similarity index 100% rename from test/client-side-encryption/spec/findOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/findOneAndUpdate.json diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..629faf189d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json @@ -0,0 +1,289 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "bypassQueryAnalysis": true + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2-Compact.json new file mode 100644 index 0000000000..46da99cbfc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Compact.json @@ -0,0 +1,232 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json new file mode 100644 index 0000000000..6836f40e04 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json @@ -0,0 +1,2239 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "state collections and index are created", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "default state collection names are applied", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "drop removes all state collections", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + }, + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "encryptedFieldsMap with cyclic entries does not loop", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + }, + "default.encryptedCollection.esc": { + "escCollection": "encryptedCollection", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "plaintextCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "plaintextCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "plaintextCollection" + }, + "command_name": "create", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json new file mode 100644 index 0000000000..c6d0bca0d1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json @@ -0,0 +1,148 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json new file mode 100644 index 0000000000..790e818295 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Delete.json @@ -0,0 +1,305 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..ea3eb4850c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,217 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "escCollection": "esc", + "eccCollection": "ecc", + "ecocCollection": "ecoc", + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..69abfa7cfb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,304 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": {}, + "bsonType": "object" + }, + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..030952e056 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,105 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "fields": [], + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc" + } + } + }, + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json new file mode 100644 index 0000000000..b8088515ca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json @@ -0,0 +1,602 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json new file mode 100644 index 0000000000..142cacf2fd --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json @@ -0,0 +1,300 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..1a75095907 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json @@ -0,0 +1,250 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "Query with an unindexed field fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "result": { + "errorContains": "Cannot query" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json new file mode 100644 index 0000000000..2db1cd7702 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json @@ -0,0 +1,118 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [], + "tests": [ + { + "description": "FLE2 encrypt fails with mising key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with mising key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json new file mode 100644 index 0000000000..e9dd586c26 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json @@ -0,0 +1,86 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "encrypted_fields": { + "fields": [] + }, + "tests": [ + { + "description": "insert with no encryption succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": "bar" + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json new file mode 100644 index 0000000000..66a291902a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Update.json @@ -0,0 +1,610 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "Update can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedIndexed": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "update" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..fab36f75a1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json @@ -0,0 +1,520 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/gcpKMS.json b/test/client-side-encryption/spec/legacy/gcpKMS.json similarity index 100% rename from test/client-side-encryption/spec/gcpKMS.json rename to test/client-side-encryption/spec/legacy/gcpKMS.json diff --git a/test/client-side-encryption/spec/getMore.json b/test/client-side-encryption/spec/legacy/getMore.json similarity index 100% rename from test/client-side-encryption/spec/getMore.json rename to test/client-side-encryption/spec/legacy/getMore.json diff --git a/test/client-side-encryption/spec/insert.json b/test/client-side-encryption/spec/legacy/insert.json similarity index 100% rename from test/client-side-encryption/spec/insert.json rename to test/client-side-encryption/spec/legacy/insert.json diff --git a/test/client-side-encryption/spec/keyAltName.json b/test/client-side-encryption/spec/legacy/keyAltName.json similarity index 100% rename from test/client-side-encryption/spec/keyAltName.json rename to test/client-side-encryption/spec/legacy/keyAltName.json diff --git a/test/client-side-encryption/spec/kmipKMS.json b/test/client-side-encryption/spec/legacy/kmipKMS.json similarity index 100% rename from test/client-side-encryption/spec/kmipKMS.json rename to test/client-side-encryption/spec/legacy/kmipKMS.json diff --git a/test/client-side-encryption/spec/localKMS.json b/test/client-side-encryption/spec/legacy/localKMS.json similarity index 100% rename from test/client-side-encryption/spec/localKMS.json rename to test/client-side-encryption/spec/legacy/localKMS.json diff --git a/test/client-side-encryption/spec/localSchema.json b/test/client-side-encryption/spec/legacy/localSchema.json similarity index 100% rename from test/client-side-encryption/spec/localSchema.json rename to test/client-side-encryption/spec/legacy/localSchema.json diff --git a/test/client-side-encryption/spec/malformedCiphertext.json b/test/client-side-encryption/spec/legacy/malformedCiphertext.json similarity index 100% rename from test/client-side-encryption/spec/malformedCiphertext.json rename to test/client-side-encryption/spec/legacy/malformedCiphertext.json diff --git a/test/client-side-encryption/spec/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json similarity index 100% rename from test/client-side-encryption/spec/maxWireVersion.json rename to test/client-side-encryption/spec/legacy/maxWireVersion.json diff --git a/test/client-side-encryption/spec/missingKey.json b/test/client-side-encryption/spec/legacy/missingKey.json similarity index 100% rename from test/client-side-encryption/spec/missingKey.json rename to test/client-side-encryption/spec/legacy/missingKey.json diff --git a/test/client-side-encryption/spec/noSchema.json b/test/client-side-encryption/spec/legacy/noSchema.json similarity index 100% rename from test/client-side-encryption/spec/noSchema.json rename to test/client-side-encryption/spec/legacy/noSchema.json diff --git a/test/client-side-encryption/spec/replaceOne.json b/test/client-side-encryption/spec/legacy/replaceOne.json similarity index 100% rename from test/client-side-encryption/spec/replaceOne.json rename to test/client-side-encryption/spec/legacy/replaceOne.json diff --git a/test/client-side-encryption/spec/types.json b/test/client-side-encryption/spec/legacy/types.json similarity index 100% rename from test/client-side-encryption/spec/types.json rename to test/client-side-encryption/spec/legacy/types.json diff --git a/test/client-side-encryption/spec/unsupportedCommand.json b/test/client-side-encryption/spec/legacy/unsupportedCommand.json similarity index 100% rename from test/client-side-encryption/spec/unsupportedCommand.json rename to test/client-side-encryption/spec/legacy/unsupportedCommand.json diff --git a/test/client-side-encryption/spec/updateMany.json b/test/client-side-encryption/spec/legacy/updateMany.json similarity index 100% rename from test/client-side-encryption/spec/updateMany.json rename to test/client-side-encryption/spec/legacy/updateMany.json diff --git a/test/client-side-encryption/spec/updateOne.json b/test/client-side-encryption/spec/legacy/updateOne.json similarity index 100% rename from test/client-side-encryption/spec/updateOne.json rename to test/client-side-encryption/spec/legacy/updateOne.json diff --git a/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..e07137ce15 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/validatorAndPartialFieldExpression.json @@ -0,0 +1,642 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json new file mode 100644 index 0000000000..7dc371143b --- /dev/null +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -0,0 +1,603 @@ +{ + "description": "addKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "add keyAltName to non-existent data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "new_key_alt_name" + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "new_key_alt_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with no keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add existing keyAltName to existing data key", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "add new keyAltName to data key with keyAltNames", + "operations": [ + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "addKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "another_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": "$keyAltNames" + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "another_name" + }, + { + "keyAltNames": "local_key" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$addToSet": { + "keyAltNames": "another_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json new file mode 100644 index 0000000000..b2c8d83e05 --- /dev/null +++ b/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json @@ -0,0 +1,112 @@ +{ + "description": "createKey-provider-invalid", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "create data key without required master key fields", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": {} + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key field", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "masterKey": { + "invalid": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + }, + { + "description": "create data key with invalid master key", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "invalid" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createKey.json b/test/client-side-encryption/spec/unified/createKey.json new file mode 100644 index 0000000000..adb3fff20d --- /dev/null +++ b/test/client-side-encryption/spec/unified/createKey.json @@ -0,0 +1,711 @@ +{ + "description": "createKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with AWS KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with Azure KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with GCP KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with KMIP KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with local KMS provider", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with no keyAltName", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$exists": false + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with single keyAltName", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "local_key" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with multiple keyAltNames", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc", + "def" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0, + "keyAltNames": 1 + } + }, + { + "$unwind": "$keyAltNames" + }, + { + "$sort": { + "keyAltNames": 1 + } + } + ] + }, + "expectResult": [ + { + "keyAltNames": "abc" + }, + { + "keyAltNames": "def" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": { + "$$type": "array" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "create datakey with custom key material", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with invalid custom key material (too short)", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyMaterial": { + "$binary": { + "base64": "a2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFsa2V5X21hdGVyaWFs", + "subType": "00" + } + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/deleteKey.json b/test/client-side-encryption/spec/unified/deleteKey.json new file mode 100644 index 0000000000..a3b2f98a50 --- /dev/null +++ b/test/client-side-encryption/spec/unified/deleteKey.json @@ -0,0 +1,553 @@ +{ + "description": "deleteKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "delete non-existent data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing AWS data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "delete existing local data key", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ] + } + ] + }, + { + "description": "delete existing data key twice", + "operations": [ + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "deleteKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "delete": "datakeys", + "deletes": [ + { + "q": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "limit": 1 + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json new file mode 100644 index 0000000000..f2f2c68113 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -0,0 +1,313 @@ +{ + "description": "getKey", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "AAAzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKey", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json new file mode 100644 index 0000000000..18ed2e1943 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -0,0 +1,283 @@ +{ + "description": "getKeyByAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "get non-existent data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "does_not_exist" + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "does_not_exist" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing AWS data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "aws_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "aws_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "get existing local data key", + "operations": [ + { + "name": "getKeyByAltName", + "object": "clientEncryption0", + "arguments": { + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "local_key" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKeys.json b/test/client-side-encryption/spec/unified/getKeys.json new file mode 100644 index 0000000000..bd07af3804 --- /dev/null +++ b/test/client-side-encryption/spec/unified/getKeys.json @@ -0,0 +1,260 @@ +{ + "description": "getKeys", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "getKeys with zero key documents", + "operations": [ + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with single key documents", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local", + "opts": { + "keyAltNames": [ + "abc" + ] + } + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyAltNames": [ + "abc" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "getKeys with many key documents", + "operations": [ + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "createKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local" + }, + "expectResult": { + "$$type": "binData" + } + }, + { + "name": "getKeys", + "object": "clientEncryption0", + "expectResult": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + }, + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$type": "int" + }, + "masterKey": { + "$$type": "object" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json new file mode 100644 index 0000000000..f94d9b02dc --- /dev/null +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -0,0 +1,572 @@ +{ + "description": "removeKeyAltName", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": {} + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "remove keyAltName from non-existent data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": null + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "AAAjYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "does_not_exist" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove non-existent keyAltName from existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "does_not_exist" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "does_not_exist" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "datakeys", + "databaseName": "keyvault", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ] + }, + { + "description": "remove an existing keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "_id": 0, + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "keyAltNames": [ + "local_key" + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "alternate_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "remove the last keyAltName from an existing data key", + "operations": [ + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "alternate_name" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "alternate_name", + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + }, + { + "name": "removeKeyAltName", + "object": "clientEncryption0", + "arguments": { + "id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltName": "local_key" + }, + "expectResult": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "alternate_name" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "findAndModify": "datakeys", + "query": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "update": { + "$pull": { + "keyAltNames": "local_key" + } + }, + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "updates": [ + { + "q": { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + } + }, + "u": { + "$unset": { + "keyAltNames": true + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json new file mode 100644 index 0000000000..4c7d4e8048 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-decrypt_failure.json @@ -0,0 +1,162 @@ +{ + "description": "rewrapManyDataKey-decrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap data key that fails during decryption due to invalid masterKey", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "local" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json new file mode 100644 index 0000000000..cd2d20c255 --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey-encrypt_failure.json @@ -0,0 +1,250 @@ +{ + "description": "rewrapManyDataKey-encrypt_failure", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap with invalid masterKey for AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-2:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-2" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "invalid-vault-csfle.vault.azure.net", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with invalid masterKey for GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {}, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "invalid-ring-csfle", + "keyName": "invalid-name-csfle" + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json new file mode 100644 index 0000000000..ed7568ca4d --- /dev/null +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -0,0 +1,1373 @@ +{ + "description": "rewrapManyDataKey-kms_providers", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEGkNTybTc7Eyif0f+qqE0lAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDB2j78AeuIQxcRh8cQIBEIB7vj9buHEaT7XHFIsKBJiyzZRmNnjvqMK5LSdzonKdx97jlqauvPvTDXSsdQDcspUs5oLrGmAXpbFResscxmbwZoKgUtWiuIOpeAcYuszCiMKt15s1WIMLDXUhYtfCmhRhekvgHnRAaK4HJMlGE+lKJXYI84E0b86Cd/g+", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip_key" + ], + "keyMaterial": { + "$binary": { + "base64": "VoI9J8HusQ3u2gT9i8Awgg/6W4/igvLwRzn3SRDGx0Dl/1ayDMubphOw0ONPVKfuvS6HL3e4gAoCJ/uEz2KLFTVsEqYCpMhfAhgXxm8Ena8vDcOkCzFX+euvN/N2ES3wpzAD18b3qIH0MbBwKJP82d5GQ4pVfGnPW8Ujp9aO1qC/s0EqNqYyzJ1SyzhV9lAjHHGIENYJx+bBrekg2EeZBA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "no keys to rewrap due to no filter matches", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": "no_matching_keys" + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": "no_matching_keys" + }, + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new AWS KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "opts": { + "provider": "aws", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new Azure KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "opts": { + "provider": "azure", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new GCP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "opts": { + "provider": "gcp", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new KMIP KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "opts": { + "provider": "kmip" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new local KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "opts": { + "provider": "local" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with current KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": {} + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "projection": { + "masterKey": 1 + }, + "sort": { + "keyAltNames": 1 + } + }, + "expectResult": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": {}, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "upsert": false, + "multi": false + } + ], + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 288c137c7e..f5c6127a25 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -210,7 +210,7 @@ def assertBinaryUUID(self, val): # Location of JSON test files. BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") -SPEC_PATH = os.path.join(BASE, "spec") +SPEC_PATH = os.path.join(BASE, "spec", "legacy") OPTS = CodecOptions() @@ -614,12 +614,13 @@ def parse_auto_encrypt_opts(self, opts): opts["kms_tls_options"] = KMS_TLS_OPTS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" + opts = dict(opts) return AutoEncryptionOpts(**opts) def parse_client_options(self, opts): """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop("autoEncryptOpts") + encrypt_opts = opts.pop("autoEncryptOpts", None) if encrypt_opts: opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) @@ -638,18 +639,18 @@ def maybe_skip_scenario(self, test): def setup_scenario(self, scenario_def): """Override a test's setup.""" key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] json_schema = scenario_def["json_schema"] data = scenario_def["data"] + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) if key_vault_data: - coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] - coll.delete_many({}) coll.insert_many(key_vault_data) db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) db = client_context.client.get_database(db_name, codec_options=OPTS) - coll = db[coll_name] - coll.drop() + coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) wc = WriteConcern(w="majority") kwargs: Dict[str, Any] = {} if json_schema: @@ -657,8 +658,8 @@ def setup_scenario(self, scenario_def): kwargs["codec_options"] = OPTS if not data: kwargs["write_concern"] = wc - db.create_collection(coll_name, **kwargs) - + db.create_collection(coll_name, **kwargs, encrypted_fields=encrypted_fields) + coll = db[coll_name] if data: # Load data. coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) diff --git a/test/utils.py b/test/utils.py index 8a79c97d93..03985772a0 100644 --- a/test/utils.py +++ b/test/utils.py @@ -174,15 +174,26 @@ def failed(self, event): class OvertCommandListener(EventListener): """A CommandListener that ignores sensitive commands.""" + ignore_list_collections = False + def started(self, event): + if self.ignore_list_collections and event.command_name.lower() == "listcollections": + self.ignore_list_collections = False + return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): + if self.ignore_list_collections and event.command_name.lower() == "listcollections": + self.ignore_list_collections = False + return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): + if self.ignore_list_collections and event.command_name.lower() == "listcollections": + self.ignore_list_collections = False + return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) @@ -983,6 +994,8 @@ def parse_spec_options(opts): if "maxCommitTimeMS" in opts: opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") + if "encryptedFields" in opts: + opts["encrypted_fields"] = opts.pop("encryptedFields") if "hint" in opts: hint = opts.pop("hint") if not isinstance(hint, str): @@ -1049,11 +1062,6 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["requests"] = requests elif arg_name == "session": arguments["session"] = entity_map[arguments["session"]] - elif opname in ("command", "run_admin_command") and arg_name == "command": - # Ensure the first key is the command name. - ordered_command = SON([(spec["command_name"], 1)]) - ordered_command.update(arguments["command"]) - arguments["command"] = ordered_command elif opname == "open_download_stream" and arg_name == "id": arguments["file_id"] = arguments.pop(arg_name) elif opname != "find" and c2s == "max_time_ms": diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4ae4d1bfb4..498a60220b 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -229,7 +229,19 @@ def check_result(self, expected_result, result): return True else: - self.assertEqual(result, expected_result) + + def _helper(expected_result, result): + if isinstance(expected_result, abc.Mapping): + for i in expected_result.keys(): + self.assertEqual(expected_result[i], result[i]) + + elif isinstance(expected_result, list): + for i, k in zip(expected_result, result): + _helper(i, k) + else: + self.assertEqual(expected_result, result) + + _helper(expected_result, result) def get_object_name(self, op): """Allow subclasses to override handling of 'object' @@ -294,8 +306,16 @@ def run_operation(self, sessions, collection, operation): args = {"sessions": sessions, "collection": collection} args.update(arguments) arguments = args - result = cmd(**dict(arguments)) + try: + if name == "create_collection" and ( + "encrypted" in operation["arguments"]["name"] + or "plaintext" in operation["arguments"]["name"] + ): + self.listener.ignore_list_collections = True + result = cmd(**dict(arguments)) + finally: + self.listener.ignore_list_collections = False # Cleanup open change stream cursors. if name == "watch": self.addCleanup(result.close) @@ -323,8 +343,7 @@ def _run_op(self, sessions, collection, op, in_with_transaction): expected_result = op.get("result") if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: - self.run_operation(sessions, collection, op.copy()) - + out = self.run_operation(sessions, collection, op.copy()) if expect_error_message(expected_result): if isinstance(context.exception, BulkWriteError): errmsg = str(context.exception.details).lower() From 890cd26e1a2de3c0024ca7a9e35cdc8add088b34 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 6 Jun 2022 15:36:52 -0400 Subject: [PATCH 0677/2111] PYTHON-3288 Implement client side operation timeout (#954) Add timeoutMS URI option and MongoClient keyword argument. Add provisional/beta pymongo.timeout() api to set a deadline for a block of operations. --- .evergreen/resync-specs.sh | 4 + doc/api/pymongo/index.rst | 2 + doc/changelog.rst | 7 + pymongo/__init__.py | 47 +- pymongo/_csot.py | 80 + pymongo/bulk.py | 2 + pymongo/client_options.py | 11 + pymongo/client_session.py | 3 +- pymongo/collection.py | 4 + pymongo/common.py | 26 +- pymongo/database.py | 16 +- pymongo/encryption.py | 13 +- pymongo/message.py | 20 +- pymongo/mongo_client.py | 21 +- pymongo/network.py | 31 +- pymongo/ocsp_support.py | 8 +- pymongo/pool.py | 65 +- pymongo/pyopenssl_context.py | 4 +- pymongo/ssl_context.py | 3 + pymongo/ssl_support.py | 2 + pymongo/topology.py | 24 +- test/csot/bulkWrite.json | 159 + test/csot/change-streams.json | 598 ++ test/csot/close-cursors.json | 239 + test/csot/command-execution.json | 260 + test/csot/convenient-transactions.json | 191 + test/csot/cursors.json | 113 + test/csot/deprecated-options.json | 7179 +++++++++++++++++ test/csot/error-transformations.json | 181 + test/csot/global-timeoutMS.json | 5830 +++++++++++++ test/csot/gridfs-advanced.json | 370 + test/csot/gridfs-delete.json | 270 + test/csot/gridfs-download.json | 344 + test/csot/gridfs-find.json | 182 + test/csot/gridfs-upload.json | 408 + test/csot/legacy-timeouts.json | 379 + test/csot/non-tailable-cursors.json | 541 ++ test/csot/override-collection-timeoutMS.json | 3498 ++++++++ test/csot/override-database-timeoutMS.json | 4622 +++++++++++ test/csot/override-operation-timeoutMS.json | 3577 ++++++++ test/csot/retryability-legacy-timeouts.json | 3042 +++++++ test/csot/retryability-timeoutMS.json | 5439 +++++++++++++ test/csot/sessions-inherit-timeoutMS.json | 311 + ...sessions-override-operation-timeoutMS.json | 315 + test/csot/sessions-override-timeoutMS.json | 311 + test/csot/tailable-awaitData.json | 422 + test/csot/tailable-non-awaitData.json | 312 + test/test_csot.py | 32 + test/test_discovery_and_monitoring.py | 3 +- .../legacy/error-labels-blockConnection.json | 159 + .../collectionData-additionalProperties.json | 3 +- ...ollectionData-collectionName-required.json | 3 +- .../collectionData-collectionName-type.json | 3 +- .../collectionData-createOptions-type.json | 39 + .../collectionData-databaseName-required.json | 3 +- .../collectionData-databaseName-type.json | 3 +- .../collectionData-documents-items.json | 3 +- .../collectionData-documents-required.json | 3 +- .../collectionData-documents-type.json | 3 +- ...ctionOrDatabaseOptions-timeoutMS-type.json | 27 + .../expectedError-isTimeoutError-type.json | 25 + ...ventsForClient-ignoreExtraEvents-type.json | 24 + .../collectionData-createOptions.json | 68 + .../valid-pass/createEntities-operation.json | 74 + .../valid-pass/entity-cursor-iterateOnce.json | 108 + .../valid-pass/matches-lte-operator.json | 78 + test/unified_format.py | 114 +- test/uri_options/connection-options.json | 34 +- test/uri_options/tls-options.json | 9 - test/utils.py | 18 + 70 files changed, 40245 insertions(+), 77 deletions(-) create mode 100644 pymongo/_csot.py create mode 100644 test/csot/bulkWrite.json create mode 100644 test/csot/change-streams.json create mode 100644 test/csot/close-cursors.json create mode 100644 test/csot/command-execution.json create mode 100644 test/csot/convenient-transactions.json create mode 100644 test/csot/cursors.json create mode 100644 test/csot/deprecated-options.json create mode 100644 test/csot/error-transformations.json create mode 100644 test/csot/global-timeoutMS.json create mode 100644 test/csot/gridfs-advanced.json create mode 100644 test/csot/gridfs-delete.json create mode 100644 test/csot/gridfs-download.json create mode 100644 test/csot/gridfs-find.json create mode 100644 test/csot/gridfs-upload.json create mode 100644 test/csot/legacy-timeouts.json create mode 100644 test/csot/non-tailable-cursors.json create mode 100644 test/csot/override-collection-timeoutMS.json create mode 100644 test/csot/override-database-timeoutMS.json create mode 100644 test/csot/override-operation-timeoutMS.json create mode 100644 test/csot/retryability-legacy-timeouts.json create mode 100644 test/csot/retryability-timeoutMS.json create mode 100644 test/csot/sessions-inherit-timeoutMS.json create mode 100644 test/csot/sessions-override-operation-timeoutMS.json create mode 100644 test/csot/sessions-override-timeoutMS.json create mode 100644 test/csot/tailable-awaitData.json create mode 100644 test/csot/tailable-non-awaitData.json create mode 100644 test/test_csot.py create mode 100644 test/transactions/legacy/error-labels-blockConnection.json create mode 100644 test/unified-test-format/invalid/collectionData-createOptions-type.json create mode 100644 test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json create mode 100644 test/unified-test-format/invalid/expectedError-isTimeoutError-type.json create mode 100644 test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json create mode 100644 test/unified-test-format/valid-pass/collectionData-createOptions.json create mode 100644 test/unified-test-format/valid-pass/createEntities-operation.json create mode 100644 test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json create mode 100644 test/unified-test-format/valid-pass/matches-lte-operator.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 1177ebb04a..4f5366098b 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -105,6 +105,9 @@ do crud|CRUD) cpjson crud/tests/ crud ;; + csot|CSOT|client-side-operations-timeout) + cpjson client-side-operations-timeout/tests csot + ;; load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; @@ -150,6 +153,7 @@ do ;; uri|uri-options|uri_options) cpjson uri-options/tests uri_options + cp "$SPECS"/source/uri-options/tests/*.pem $PYMONGO/test/uri_options ;; stable-api|versioned-api) cpjson versioned-api/tests versioned-api diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index 6e6e337950..a4e15b9878 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -22,6 +22,8 @@ The maximum wire protocol version PyMongo supports. + .. autofunction:: timeout + Sub-modules: .. toctree:: diff --git a/doc/changelog.rst b/doc/changelog.rst index f1085c4bff..5497b4f3e9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,13 @@ Changes in Version 4.2 .. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. +PyMongo 4.2 brings a number of improvements including: + +- Support for MongoDB 6.0. +- Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout + to an entire block of pymongo operations. +- Beta support for Queryable Encryption with MongoDB 6.0. + Bug fixes ......... diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 17c640b1fd..bdb1ec97c1 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -14,7 +14,7 @@ """Python driver for MongoDB.""" -from typing import Tuple, Union +from typing import ContextManager, Optional, Tuple, Union ASCENDING = 1 """Ascending sort order.""" @@ -69,6 +69,7 @@ def get_version_string() -> str: """Current version of PyMongo.""" +from pymongo import _csot from pymongo.collection import ReturnDocument # noqa: F401 from pymongo.common import ( # noqa: F401 MAX_SUPPORTED_WIRE_VERSION, @@ -97,3 +98,47 @@ def has_c() -> bool: return True except ImportError: return False + + +def timeout(seconds: Optional[float]) -> ContextManager: + """**(Provisional)** Apply the given timeout for a block of operations. + + .. note:: :func:`~pymongo.timeout` is currently provisional. Backwards + incompatible changes may occur before becoming officially supported. + + Use :func:`~pymongo.timeout` in a with-statement:: + + with pymongo.timeout(5): + client.db.coll.insert_one({}) + client.db.coll2.insert_one({}) + + When the with-statement is entered, a deadline is set for the entire + block. When that deadline is exceeded, any blocking pymongo operation + will raise a timeout exception. For example:: + + try: + with pymongo.timeout(5): + client.db.coll.insert_one({}) + time.sleep(5) + # The deadline has now expired, the next operation will raise + # a timeout exception. + client.db.coll2.insert_one({}) + except (ServerSelectionTimeoutError, ExecutionTimeout, WTimeoutError, + NetworkTimeout) as exc: + print(f"block timed out: {exc!r}") + + :Parameters: + - `seconds`: A non-negative floating point number expressing seconds, or None. + + :Raises: + - :py:class:`ValueError`: When `seconds` is negative. + + .. versionadded:: 4.2 + """ + if not isinstance(seconds, (int, float, type(None))): + raise TypeError("timeout must be None, an int, or a float") + if seconds and seconds < 0: + raise ValueError("timeout cannot be negative") + if seconds is not None: + seconds = float(seconds) + return _csot._TimeoutContext(seconds) diff --git a/pymongo/_csot.py b/pymongo/_csot.py new file mode 100644 index 0000000000..4085562ca8 --- /dev/null +++ b/pymongo/_csot.py @@ -0,0 +1,80 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal helpers for CSOT.""" + +import time +from contextvars import ContextVar +from typing import Optional + +TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) +RTT: ContextVar[float] = ContextVar("RTT", default=0.0) +DEADLINE: ContextVar[float] = ContextVar("DEADLINE", default=float("inf")) + + +def get_timeout() -> Optional[float]: + return TIMEOUT.get(None) + + +def get_rtt() -> float: + return RTT.get() + + +def get_deadline() -> float: + return DEADLINE.get() + + +def set_rtt(rtt: float) -> None: + RTT.set(rtt) + + +def set_timeout(timeout: Optional[float]) -> None: + TIMEOUT.set(timeout) + DEADLINE.set(time.monotonic() + timeout if timeout else float("inf")) + + +def remaining() -> Optional[float]: + if not get_timeout(): + return None + return DEADLINE.get() - time.monotonic() + + +def clamp_remaining(max_timeout: float) -> float: + """Return the remaining timeout clamped to a max value.""" + timeout = remaining() + if timeout is None: + return max_timeout + return min(timeout, max_timeout) + + +class _TimeoutContext(object): + """Internal timeout context manager. + + Use :func:`pymongo.timeout` instead:: + + with client.timeout(0.5): + client.test.test.insert_one({}) + """ + + __slots__ = ("_timeout",) + + def __init__(self, timeout: Optional[float]): + self._timeout = timeout + + def __enter__(self): + set_timeout(self._timeout) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + set_timeout(None) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 44923f73df..7992383f67 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -330,6 +330,8 @@ def _execute_command( session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info) sock_info.send_cluster_time(cmd, session, client) sock_info.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + sock_info.apply_timeout(client, cmd) ops = islice(run.ops, run.idx_offset, None) # Run as many ops as possible in one command. diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 4987601d5c..6784e32848 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -14,6 +14,8 @@ """Tools to parse mongo client options.""" +from typing import Optional + from bson.codec_options import _parse_codec_options from pymongo import common from pymongo.auth import _build_credentials_tuple @@ -195,6 +197,7 @@ def __init__(self, username, password, database, options): self.__server_selector = options.get("server_selector", any_server_selector) self.__auto_encryption_opts = options.get("auto_encryption_opts") self.__load_balanced = options.get("loadbalanced") + self.__timeout = options.get("timeoutms") @property def _options(self): @@ -260,6 +263,14 @@ def read_concern(self): """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern + @property + def timeout(self) -> Optional[float]: + """The timeout. + + ..versionadded: 4.2 + """ + return self.__timeout + @property def retry_writes(self): """If this instance should retry supported write operations.""" diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 7d70eb8f19..3ff98a579f 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -150,6 +150,7 @@ from bson.int64 import Int64 from bson.son import SON from bson.timestamp import Timestamp +from pymongo import _csot from pymongo.cursor import _SocketManager from pymongo.errors import ( ConfigurationError, @@ -826,7 +827,7 @@ def _finish_transaction(self, sock_info, command_name): wc = opts.write_concern cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": - if opts.max_commit_time_ms: + if opts.max_commit_time_ms and _csot.get_timeout() is None: cmd["maxTimeMS"] = opts.max_commit_time_ms # Transaction spec says that after the initial commit attempt, diff --git a/pymongo/collection.py b/pymongo/collection.py index ffd883e939..9f3f73198b 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -116,6 +116,7 @@ def __init__( write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + timeout: Optional[float] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> None: @@ -198,6 +199,7 @@ def __init__( read_preference or database.read_preference, write_concern or database.write_concern, read_concern or database.read_concern, + timeout if timeout is not None else database.timeout, ) if not isinstance(name, str): raise TypeError("name must be an instance of str") @@ -390,6 +392,7 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> "Collection[_DocumentType]": """Get a clone of this collection changing the specified settings. @@ -428,6 +431,7 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, + timeout=timeout if timeout is not None else self.timeout, ) def bulk_write( diff --git a/pymongo/common.py b/pymongo/common.py index 4376654405..858684bf05 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -339,6 +339,15 @@ def validate_timeout_or_none_or_zero(option: Any, value: Any) -> Optional[float] return validate_positive_float(option, value) / 1000.0 +def validate_timeoutms(option: Any, value: Any) -> Optional[float]: + """Validates a timeout specified in milliseconds returning + a value in floating point seconds. + """ + if value is None: + return None + return validate_positive_float_or_zero(option, value) / 1000.0 + + def validate_max_staleness(option: str, value: Any) -> int: """Validates maxStalenessSeconds according to the Max Staleness Spec.""" if value == -1 or value == "-1": @@ -658,6 +667,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "zlibcompressionlevel": validate_zlib_compression_level, "srvservicename": validate_string, "srvmaxhosts": validate_non_negative_integer, + "timeoutms": validate_timeoutms, } # Dictionary where keys are the names of URI options specific to pymongo, @@ -821,8 +831,8 @@ def __init__( read_preference: _ServerMode, write_concern: WriteConcern, read_concern: ReadConcern, + timeout: Optional[float], ) -> None: - if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") self.__codec_options = codec_options @@ -845,6 +855,12 @@ def __init__( raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern + if not isinstance(timeout, (int, float, type(None))): + raise TypeError("timeout must be None, an int, or a float") + if timeout and timeout < 0: + raise TypeError("timeout cannot be negative") + self.__timeout = float(timeout) if timeout else None + @property def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` @@ -894,6 +910,14 @@ def read_concern(self) -> ReadConcern: """ return self.__read_concern + @property + def timeout(self) -> Optional[float]: + """Read only access to the timeout of this instance. + + .. versionadded:: 4.2 + """ + return self.__timeout + class _CaseInsensitiveDictionary(abc.MutableMapping): def __init__(self, *args, **kwargs): diff --git a/pymongo/database.py b/pymongo/database.py index bb91196f2e..393f63c8c8 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -75,6 +75,7 @@ def __init__( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> None: """Get a database by client and name. @@ -127,6 +128,7 @@ def __init__( read_preference or client.read_preference, write_concern or client.write_concern, read_concern or client.read_concern, + timeout if timeout is not None else client.timeout, ) if not isinstance(name, str): @@ -154,6 +156,7 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. @@ -193,6 +196,7 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, + timeout if timeout is not None else self.timeout, ) def __eq__(self, other: Any) -> bool: @@ -241,6 +245,7 @@ def get_collection( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -280,7 +285,14 @@ def get_collection( used. """ return Collection( - self, name, False, codec_options, read_preference, write_concern, read_concern + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + timeout=timeout, ) def create_collection( @@ -291,6 +303,7 @@ def create_collection( write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, + timeout: Optional[float] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Collection[_DocumentType]: @@ -421,6 +434,7 @@ def create_collection( write_concern, read_concern, session=s, + timeout=timeout, encrypted_fields=encrypted_fields, **kwargs, ) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a7a69dbe34..a088bd2da8 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -16,6 +16,7 @@ import contextlib import enum +import socket import uuid import weakref from typing import Any, Mapping, Optional, Sequence @@ -38,6 +39,7 @@ from bson.errors import BSONError from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON +from pymongo import _csot from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import ( @@ -47,6 +49,7 @@ ServerSelectionTimeoutError, ) from pymongo.mongo_client import MongoClient +from pymongo.network import BLOCKING_IO_ERRORS from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern from pymongo.ssl_support import get_ssl_context @@ -119,9 +122,11 @@ def kms_request(self, kms_context): False, # allow_invalid_hostnames False, ) # disable_ocsp_endpoint_check + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) opts = PoolOptions( - connect_timeout=_KMS_CONNECT_TIMEOUT, - socket_timeout=_KMS_CONNECT_TIMEOUT, + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, ssl_context=ctx, ) host, port = parse_host(endpoint, _HTTPS_PORT) @@ -129,10 +134,14 @@ def kms_request(self, kms_context): try: conn.sendall(message) while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) data = conn.recv(kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") finally: conn.close() diff --git a/pymongo/message.py b/pymongo/message.py index de43d20c97..bcdedd7b48 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -300,6 +300,9 @@ def __init__( self._as_command = None self.exhaust = exhaust + def reset(self): + self._as_command = None + def namespace(self): return "%s.%s" % (self.db, self.coll) @@ -320,7 +323,7 @@ def use_command(self, sock_info): sock_info.validate_session(self.client, self.session) return use_find_cmd - def as_command(self, sock_info): + def as_command(self, sock_info, apply_timeout=False): """Return a find command document for this query.""" # We use the command twice: on the wire and for command monitoring. # Generate it once, for speed and to avoid repeating side-effects. @@ -356,6 +359,9 @@ def as_command(self, sock_info): client = self.client if client._encrypter and not client._encrypter._bypass_auto_encryption: cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) + # Support CSOT + if apply_timeout: + sock_info.apply_timeout(client, cmd) self._as_command = cmd, self.db return self._as_command @@ -371,7 +377,7 @@ def get_message(self, read_preference, sock_info, use_cmd=False): spec = self.spec if use_cmd: - spec = self.as_command(sock_info)[0] + spec = self.as_command(sock_info, apply_timeout=True)[0] request_id, msg, size, _ = _op_msg( 0, spec, @@ -457,6 +463,9 @@ def __init__( self.exhaust = exhaust self.comment = comment + def reset(self): + self._as_command = None + def namespace(self): return "%s.%s" % (self.db, self.coll) @@ -471,7 +480,7 @@ def use_command(self, sock_info): sock_info.validate_session(self.client, self.session) return use_cmd - def as_command(self, sock_info): + def as_command(self, sock_info, apply_timeout=False): """Return a getMore command document for this query.""" # See _Query.as_command for an explanation of this caching. if self._as_command is not None: @@ -493,6 +502,9 @@ def as_command(self, sock_info): client = self.client if client._encrypter and not client._encrypter._bypass_auto_encryption: cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) + # Support CSOT + if apply_timeout: + sock_info.apply_timeout(client, cmd=None) self._as_command = cmd, self.db return self._as_command @@ -503,7 +515,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): ctx = sock_info.compression_context if use_cmd: - spec = self.as_command(sock_info)[0] + spec = self.as_command(sock_info, apply_timeout=True)[0] if self.sock_mgr: flags = _OpMsg.EXHAUST_ALLOWED else: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e1aa80e2f9..7af4b167f1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -57,6 +57,7 @@ from bson.son import SON from bson.timestamp import Timestamp from pymongo import ( + _csot, client_session, common, database, @@ -260,6 +261,10 @@ def __init__( replaced. Defaults to `None` (no limit). - `maxConnecting` (optional): The maximum number of connections that each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. - `socketTimeoutMS`: (integer or None) Controls how long (in milliseconds) the driver will wait for a response after sending an ordinary (non-monitoring) database operation before concluding that @@ -540,6 +545,9 @@ def __init__( .. seealso:: The MongoDB documentation on `connections `_. + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + .. versionchanged:: 4.0 - Removed the fsync, unlock, is_locked, database_names, and @@ -780,6 +788,7 @@ def __init__( options.read_preference, options.write_concern, options.read_concern, + options.timeout, ) self._topology_settings = TopologySettings( @@ -1273,6 +1282,7 @@ def _run_operation(self, operation, unpack_res, address=None): ) def _cmd(session, server, sock_info, read_preference): + operation.reset() # Reset op in case of retry. return server.run_operation( sock_info, operation, read_preference, self._event_listeners, unpack_res ) @@ -1303,6 +1313,7 @@ def _retry_internal(self, retryable, func, session, bulk): max_wire_version = 0 last_error: Optional[Exception] = None retrying = False + multiple_retries = _csot.get_timeout() is not None def is_retrying(): return bulk.retrying if bulk else retrying @@ -1350,7 +1361,7 @@ def is_retrying(): retryable_error = exc.has_error_label("RetryableWriteError") if retryable_error: session._unpin() - if is_retrying() or not retryable_error: + if not retryable_error or (is_retrying() and not multiple_retries): raise if bulk: bulk.retrying = True @@ -1371,6 +1382,7 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable=True ) last_error: Optional[Exception] = None retrying = False + multiple_retries = _csot.get_timeout() is not None while True: try: @@ -1394,12 +1406,12 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable=True # most likely be a waste of time. raise except ConnectionFailure as exc: - if not retryable or retrying: + if not retryable or (retrying and not multiple_retries): raise retrying = True last_error = exc except OperationFailure as exc: - if not retryable or retrying: + if not retryable or (retrying and not multiple_retries): raise if exc.code not in helpers._RETRYABLE_ERROR_CODES: raise @@ -1922,6 +1934,7 @@ def get_database( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, + timeout: Optional[float] = None, ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -1972,7 +1985,7 @@ def get_database( name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern + self, name, codec_options, read_preference, write_concern, read_concern, timeout ) def _database_default_options(self, name): diff --git a/pymongo/network.py b/pymongo/network.py index df08158b2f..3eac0d02d3 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -21,7 +21,7 @@ import time from bson import _decode_all_selective -from pymongo import helpers, message +from pymongo import _csot, helpers, message, ssl_support from pymongo.common import MAX_MESSAGE_SIZE from pymongo.compression_support import _NO_COMPRESSION, decompress from pymongo.errors import ( @@ -59,6 +59,7 @@ def command( unacknowledged=False, user_fields=None, exhaust_allowed=False, + write_concern=None, ): """Execute a command over the socket, or raise socket.error. @@ -115,6 +116,12 @@ def command( if client and client._encrypter and not client._encrypter._bypass_auto_encryption: spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) + # Support CSOT + if client: + sock_info.apply_timeout(client, spec, write_concern) + elif write_concern and not write_concern.is_server_default: + spec["writeConcern"] = write_concern.document + if use_op_msg: flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 @@ -198,11 +205,14 @@ def command( def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): """Receive a raw BSON message or raise socket.error.""" - timeout = sock_info.sock.gettimeout() - if timeout: - deadline = time.monotonic() + timeout + if _csot.get_timeout(): + deadline = _csot.get_deadline() else: - deadline = None + timeout = sock_info.sock.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None # Ignore the response's request id. length, _, response_to, op_code = _UNPACK_HEADER( _receive_data_on_socket(sock_info, 16, deadline) @@ -271,6 +281,10 @@ def wait_for_read(sock_info, deadline): raise socket.timeout("timed out") +# Errors raised by sockets (and TLS sockets) when in non-blocking mode. +BLOCKING_IO_ERRORS = (BlockingIOError,) + ssl_support.BLOCKING_IO_ERRORS + + def _receive_data_on_socket(sock_info, length, deadline): buf = bytearray(length) mv = memoryview(buf) @@ -278,7 +292,14 @@ def _receive_data_on_socket(sock_info, length, deadline): while bytes_read < length: try: wait_for_read(sock_info, deadline) + # CSOT: Update timeout. When the timeout has expired perform one + # final non-blocking recv. This helps avoid spurious timeouts when + # the response is actually already buffered on the client. + if _csot.get_timeout(): + sock_info.set_socket_timeout(max(deadline - time.monotonic(), 0)) chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") except (IOError, OSError) as exc: # noqa: B014 if _errno_from_exception(exc) == errno.EINTR: continue diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 56d18a29bf..94905d9f47 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -48,6 +48,8 @@ from requests import post as _post from requests.exceptions import RequestException as _RequestException +from pymongo import _csot + # Note: the functions in this module generally return 1 or 0. The reason # is simple. The entry point, ocsp_callback, is registered as a callback # with OpenSSL through PyOpenSSL. The callback must return 1 (success) or @@ -235,12 +237,16 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): ocsp_response = ocsp_response_cache[ocsp_request] _LOGGER.debug("Using cached OCSP response.") except KeyError: + # CSOT: use the configured timeout or 5 seconds, whichever is smaller. + # Note that request's timeout works differently and does not imply an absolute + # deadline: https://requests.readthedocs.io/en/stable/user/quickstart/#timeouts + timeout = max(_csot.clamp_remaining(5), 0.001) try: response = _post( uri, data=ocsp_request.public_bytes(_Encoding.DER), headers={"Content-Type": "application/ocsp-request"}, - timeout=5, + timeout=timeout, ) except _RequestException as exc: _LOGGER.debug("HTTP request failed: %s", exc) diff --git a/pymongo/pool.py b/pymongo/pool.py index d68ba238f2..8a1e72fc0d 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -27,7 +27,7 @@ from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON -from pymongo import __version__, auth, helpers +from pymongo import __version__, _csot, auth, helpers from pymongo.client_session import _validate_session_write_concern from pymongo.common import ( MAX_BSON_SIZE, @@ -46,6 +46,7 @@ ConfigurationError, ConnectionFailure, DocumentTooLarge, + ExecutionTimeout, InvalidOperation, NetworkTimeout, NotPrimaryError, @@ -557,6 +558,43 @@ def __init__(self, sock, pool, address, id): self.pinned_txn = False self.pinned_cursor = False self.active = False + self.last_timeout = self.opts.socket_timeout + + def set_socket_timeout(self, timeout): + """Cache last timeout to avoid duplicate calls to sock.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.sock.settimeout(timeout) + + def apply_timeout(self, client, cmd, write_concern=None): + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_socket_timeout(self.opts.socket_timeout) + + if cmd and write_concern and not write_concern.is_server_default: + cmd["writeConcern"] = write_concern.document + return None + # RTT validation. + rtt = _csot.get_rtt() + max_time_ms = timeout - rtt + if max_time_ms < 0: + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f}" + raise ExecutionTimeout( + errmsg, 50, {"ok": 0, "errmsg": errmsg, "code": 50}, self.max_wire_version + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + wc = write_concern.document if write_concern else {} + wc.pop("wtimeout", None) + if wc: + cmd["writeConcern"] = wc + self.set_socket_timeout(timeout) + return timeout def pin_txn(self): self.pinned_txn = True @@ -602,7 +640,7 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): awaitable = True # If connect_timeout is None there is no timeout. if self.opts.connect_timeout: - self.sock.settimeout(self.opts.connect_timeout + heartbeat_frequency) + self.set_socket_timeout(self.opts.connect_timeout + heartbeat_frequency) if not performing_handshake and cluster_time is not None: cmd["$clusterTime"] = cluster_time @@ -714,8 +752,6 @@ def command( if not (write_concern is None or write_concern.acknowledged or collation is None): raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - if write_concern and not write_concern.is_server_default: - spec["writeConcern"] = write_concern.document self.add_server_api(spec) if session: @@ -748,6 +784,7 @@ def command( unacknowledged=unacknowledged, user_fields=user_fields, exhaust_allowed=exhaust_allowed, + write_concern=write_concern, ) except (OperationFailure, NotPrimaryError): raise @@ -978,7 +1015,13 @@ def _create_connection(address, options): _set_non_inheritable_non_atomic(sock.fileno()) try: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - sock.settimeout(options.connect_timeout) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) _set_keepalive_times(sock) sock.connect(sa) @@ -1416,7 +1459,9 @@ def _get_socket(self): self.operation_count += 1 # Get a free socket or create one. - if self.opts.wait_queue_timeout: + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: deadline = time.monotonic() + self.opts.wait_queue_timeout else: deadline = None @@ -1582,25 +1627,25 @@ def _raise_wait_queue_timeout(self) -> NoReturn: listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.TIMEOUT ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns raise ConnectionFailure( "Timeout waiting for connection from the connection pool. " "maxPoolSize: %s, connections in use by cursors: %s, " "connections in use by transactions: %s, connections in use " - "by other operations: %s, wait_queue_timeout: %s" + "by other operations: %s, timeout: %s" % ( self.opts.max_pool_size, self.ncursors, self.ntxns, other_ops, - self.opts.wait_queue_timeout, + timeout, ) ) raise ConnectionFailure( "Timed out while checking out a connection from connection pool. " - "maxPoolSize: %s, wait_queue_timeout: %s" - % (self.opts.max_pool_size, self.opts.wait_queue_timeout) + "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) ) def __del__(self): diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 1a57ff4f2b..758a741b6f 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -82,7 +82,7 @@ def _is_ip_address(address): # According to the docs for Connection.send it can raise # WantX509LookupError and should be retried. -_RETRY_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +BLOCKING_IO_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) def _ragged_eof(exc): @@ -106,7 +106,7 @@ def _call(self, call, *args, **kwargs): while True: try: return call(*args, **kwargs) - except _RETRY_ERRORS as exc: + except BLOCKING_IO_ERRORS as exc: if isinstance(exc, _SSL.WantReadError): want_read = True want_write = False diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 4e997a439e..63970cb5e2 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -27,6 +27,9 @@ HAS_SNI = getattr(_ssl, "HAS_SNI", False) IS_PYOPENSSL = False +# Errors raised by SSL sockets when in non-blocking mode. +BLOCKING_IO_ERRORS = (_ssl.SSLWantReadError, _ssl.SSLWantWriteError) + # Base Exception class SSLError = _ssl.SSLError diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index d1381ce0e4..13c5315eee 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -38,6 +38,7 @@ HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = True SSLError = _ssl.SSLError + BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS def get_ssl_context( certfile, @@ -91,6 +92,7 @@ class SSLError(Exception): # type: ignore HAS_SNI = False IPADDR_SAFE = False + BLOCKING_IO_ERRORS = () # type: ignore def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" diff --git a/pymongo/topology.py b/pymongo/topology.py index 4b5ff87bb5..db832a8e55 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -23,7 +23,7 @@ import weakref from typing import Any -from pymongo import common, helpers, periodic_executor +from pymongo import _csot, common, helpers, periodic_executor from pymongo.client_session import _ServerSessionPool from pymongo.errors import ( ConfigurationError, @@ -191,6 +191,13 @@ def open(self): with self._lock: self._ensure_opened() + def get_server_selection_timeout(self): + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + def select_servers(self, selector, server_selection_timeout=None, address=None): """Return a list of Servers matching selector, or time out. @@ -208,7 +215,7 @@ def select_servers(self, selector, server_selection_timeout=None, address=None): `server_selection_timeout` if no matching servers are found. """ if server_selection_timeout is None: - server_timeout = self._settings.server_selection_timeout + server_timeout = self.get_server_selection_timeout() else: server_timeout = server_selection_timeout @@ -250,8 +257,7 @@ def _select_servers_loop(self, selector, timeout, address): self._description.check_compatible() return server_descriptions - def select_server(self, selector, server_selection_timeout=None, address=None): - """Like select_servers, but choose a random server if several match.""" + def _select_server(self, selector, server_selection_timeout=None, address=None): servers = self.select_servers(selector, server_selection_timeout, address) if len(servers) == 1: return servers[0] @@ -261,6 +267,12 @@ def select_server(self, selector, server_selection_timeout=None, address=None): else: return server2 + def select_server(self, selector, server_selection_timeout=None, address=None): + """Like select_servers, but choose a random server if several match.""" + server = self._select_server(selector, server_selection_timeout, address) + _csot.set_rtt(server.description.round_trip_time) + return server + def select_server_by_address(self, address, server_selection_timeout=None): """Return a Server for "address", reconnecting if necessary. @@ -535,11 +547,11 @@ def _check_session_support(self): if self._description.topology_type == TOPOLOGY_TYPE.Single: if not self._description.has_known_servers: self._select_servers_loop( - any_server_selector, self._settings.server_selection_timeout, None + any_server_selector, self.get_server_selection_timeout(), None ) elif not self._description.readable_servers: self._select_servers_loop( - readable_server_selector, self._settings.server_selection_timeout, None + readable_server_selector, self.get_server_selection_timeout(), None ) session_timeout = self._description.logical_session_timeout_minutes diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json new file mode 100644 index 0000000000..14d5b654f6 --- /dev/null +++ b/test/csot/bulkWrite.json @@ -0,0 +1,159 @@ +{ + "description": "timeoutMS behaves correctly for bulkWrite operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to entire bulkWrite, not individual commands", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert", + "update" + ], + "blockConnection": true, + "blockTimeMS": 120 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 1 + } + } + } + ], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/change-streams.json b/test/csot/change-streams.json new file mode 100644 index 0000000000..a8b2b7e170 --- /dev/null +++ b/test/csot/change-streams.json @@ -0,0 +1,598 @@ +{ + "description": "timeoutMS behaves correctly for change streams", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to initial aggregate", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 50 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 1050 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 20, + "batchSize": 2, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to full resume attempt in a next call", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 20 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "getMore", + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 12, + "errorCode": 7, + "errorLabels": [ + "ResumableChangeStreamError" + ] + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "change stream can be iterated again if previous iteration times out", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "maxAwaitTimeMS": 1, + "timeoutMS": 100 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 10 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/close-cursors.json b/test/csot/close-cursors.json new file mode 100644 index 0000000000..1361971c4c --- /dev/null +++ b/test/csot/close-cursors.json @@ -0,0 +1,239 @@ +{ + "description": "timeoutMS behaves correctly when closing cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore" + } + }, + { + "commandFailedEvent": { + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for close", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "killCursors" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "batchSize": 2, + "timeoutMS": 20 + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "close", + "object": "cursor", + "arguments": { + "timeoutMS": 40 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "killCursors" + } + }, + { + "commandSucceededEvent": { + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json new file mode 100644 index 0000000000..f51b09d2d7 --- /dev/null +++ b/test/csot/command-execution.json @@ -0,0 +1,260 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 20 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "regularCollection", + "database": "database", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl", + "collectionOptions": { + "timeoutMS": 60 + } + } + } + ] + } + }, + { + "name": "insertOne", + "object": "regularCollection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 60 + } + } + } + } + ] + } + ] + }, + { + "description": "command is not sent if RTT is greater than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "rttTooHighTest", + "blockConnection": true, + "blockTimeMS": 20 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "rttTooHighTest", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "regularCollection", + "database": "database", + "collectionName": "coll" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl", + "collectionOptions": { + "timeoutMS": 2 + } + } + } + ] + } + }, + { + "name": "insertOne", + "object": "regularCollection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json new file mode 100644 index 0000000000..0c8cc6edd9 --- /dev/null +++ b/test/csot/convenient-transactions.json @@ -0,0 +1,191 @@ +{ + "description": "timeoutMS behaves correctly for the withTransaction API", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback", + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session", + "timeoutMS": 100 + }, + "expectError": { + "isClientError": true + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "timeoutMS is not refreshed for each operation in the callback", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + }, + "session": "session" + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/cursors.json b/test/csot/cursors.json new file mode 100644 index 0000000000..36949d7509 --- /dev/null +++ b/test/csot/cursors.json @@ -0,0 +1,113 @@ +{ + "description": "tests for timeoutMS behavior that applies to all cursor types", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client" + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "find errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "collection aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "database aggregate errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [], + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listCollections errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "listIndexes errors if timeoutMode is set and timeoutMS is not", + "operations": [ + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json new file mode 100644 index 0000000000..0e2bdefd73 --- /dev/null +++ b/test/csot/deprecated-options.json @@ -0,0 +1,7179 @@ +{ + "description": "operations ignore deprected timeout options if timeoutMS is set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json new file mode 100644 index 0000000000..4d9e061c3b --- /dev/null +++ b/test/csot/error-transformations.json @@ -0,0 +1,181 @@ +{ + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "basic MaxTimeMSExpired error is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "write concern error MaxTimeMSExpired is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 50, + "errmsg": "maxTimeMS expired" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json new file mode 100644 index 0000000000..34854ac155 --- /dev/null +++ b/test/csot/global-timeoutMS.json @@ -0,0 +1,5830 @@ +{ + "description": "timeoutMS can be configured on a MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json new file mode 100644 index 0000000000..668b93f37a --- /dev/null +++ b/test/csot/gridfs-advanced.json @@ -0,0 +1,370 @@ +{ + "description": "timeoutMS behaves correctly for advanced GridFS API operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo", + "timeoutMS": 100 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to update during a rename", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "rename", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "newFilename": "foo" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be overridden for drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "arguments": { + "timeoutMS": 100 + } + } + ] + }, + { + "description": "timeoutMS applied to files collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop", + "databaseName": "test", + "command": { + "drop": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to chunks collection drop", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to drop as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "drop" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "drop", + "object": "bucket", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json new file mode 100644 index 0000000000..f458fa827c --- /dev/null +++ b/test/csot/gridfs-delete.json @@ -0,0 +1,270 @@ +{ + "description": "timeoutMS behaves correctly for GridFS delete operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for delete", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 100 + } + } + ] + }, + { + "description": "timeoutMS applied to delete against the files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to delete against the chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to entire delete, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "delete", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json new file mode 100644 index 0000000000..a3044a6d81 --- /dev/null +++ b/test/csot/gridfs-download.json @@ -0,0 +1,344 @@ +{ + "description": "timeoutMS behaves correctly for GridFS download operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for download", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + }, + "timeoutMS": 100 + } + } + ] + }, + { + "description": "timeoutMS applied to find to get files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find to get chunks", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to entire download, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "download", + "object": "bucket", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.chunks", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json new file mode 100644 index 0000000000..f75a279c01 --- /dev/null +++ b/test/csot/gridfs-find.json @@ -0,0 +1,182 @@ +{ + "description": "timeoutMS behaves correctly for GridFS find operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for a find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {}, + "timeoutMS": 100 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find command", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "bucket", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "fs.files", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json new file mode 100644 index 0000000000..b0daeb2e42 --- /dev/null +++ b/test/csot/gridfs-upload.json @@ -0,0 +1,408 @@ +{ + "description": "timeoutMS behaves correctly for GridFS upload operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "collection": { + "id": "filesCollection", + "database": "database", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "chunksCollection", + "database": "database", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for upload", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, + "timeoutMS": 1000 + } + } + ] + }, + { + "description": "timeoutMS applied to initial find on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for files collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to listIndexes on chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to index creation for chunks collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to chunk insertion", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to creation of files document", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "timeoutMS applied to upload as a whole, not individual parts", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "upload", + "object": "bucket", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/csot/legacy-timeouts.json b/test/csot/legacy-timeouts.json new file mode 100644 index 0000000000..3a2d2eaefb --- /dev/null +++ b/test/csot/legacy-timeouts.json @@ -0,0 +1,379 @@ +{ + "description": "legacy timeouts continue to work if timeoutMS is not set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "socketTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "socketTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "waitQueueTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "wTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "wTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + }, + "writeConcern": { + "wtimeout": 50000 + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS option is used directly as the maxTimeMS field on a command", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "maxTimeMS": 50000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": 50000 + } + } + } + ] + } + ] + }, + { + "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 1000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": 1000 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json new file mode 100644 index 0000000000..0a5448a6bb --- /dev/null +++ b/test/csot/non-tailable-cursors.json @@ -0,0 +1,541 @@ +{ + "description": "timeoutMS behaves correctly for non-tailable cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "collectionName": "aggregateOutputColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to find if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is unset", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMS": 20, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "timeoutMS": 20, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to find if timeoutMode is iteration", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "timeoutMS": 20, + "batchSize": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "iteration", + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $out errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$out": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "aggregate with $merge errors if timeoutMode is iteration", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$merge": "aggregateOutputColl" + } + ], + "timeoutMS": 100, + "timeoutMode": "iteration" + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + } + ] +} diff --git a/test/csot/override-collection-timeoutMS.json b/test/csot/override-collection-timeoutMS.json new file mode 100644 index 0000000000..7d2c663fc1 --- /dev/null +++ b/test/csot/override-collection-timeoutMS.json @@ -0,0 +1,3498 @@ +{ + "description": "timeoutMS can be overridden for a MongoCollection", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/override-database-timeoutMS.json b/test/csot/override-database-timeoutMS.json new file mode 100644 index 0000000000..9c1b77f903 --- /dev/null +++ b/test/csot/override-database-timeoutMS.json @@ -0,0 +1,4622 @@ +{ + "description": "timeoutMS can be overridden for a MongoDatabase", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isClientError": false, + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json new file mode 100644 index 0000000000..896b996ee8 --- /dev/null +++ b/test/csot/override-operation-timeoutMS.json @@ -0,0 +1,3577 @@ +{ + "description": "timeoutMS can be overridden for an operation", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 0, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json new file mode 100644 index 0000000000..cd2af7fab6 --- /dev/null +++ b/test/csot/retryability-legacy-timeouts.json @@ -0,0 +1,3042 @@ +{ + "description": "legacy timeouts behave correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "operation succeeds after one socket timeout - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 55 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json new file mode 100644 index 0000000000..438ba6b8d2 --- /dev/null +++ b/test/csot/retryability-timeoutMS.json @@ -0,0 +1,5439 @@ +{ + "description": "timeoutMS behaves correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 500, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 500, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json new file mode 100644 index 0000000000..8205c086bc --- /dev/null +++ b/test/csot/sessions-inherit-timeoutMS.json @@ -0,0 +1,311 @@ +{ + "description": "sessions inherit timeoutMS from their parent MongoClient", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 50 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-operation-timeoutMS.json b/test/csot/sessions-override-operation-timeoutMS.json new file mode 100644 index 0000000000..ff26de29f5 --- /dev/null +++ b/test/csot/sessions-override-operation-timeoutMS.json @@ -0,0 +1,315 @@ +{ + "description": "timeoutMS can be overridden for individual session operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 50, + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-override-timeoutMS.json b/test/csot/sessions-override-timeoutMS.json new file mode 100644 index 0000000000..1d3b8932af --- /dev/null +++ b/test/csot/sessions-override-timeoutMS.json @@ -0,0 +1,311 @@ +{ + "description": "timeoutMS can be overridden at the level of a ClientSession", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded-replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 50 + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json new file mode 100644 index 0000000000..6da85c7783 --- /dev/null +++ b/test/csot/tailable-awaitData.json @@ -0,0 +1,422 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 20, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 20, + "batchSize": 1, + "maxAwaitTimeMS": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 1 + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/tailable-non-awaitData.json b/test/csot/tailable-non-awaitData.json new file mode 100644 index 0000000000..34ee660963 --- /dev/null +++ b/test/csot/tailable-non-awaitData.json @@ -0,0 +1,312 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "error if timeoutMode is cursor_lifetime", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "timeoutMode": "cursorLifetime", + "cursorType": "tailable" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "timeoutMS applied to find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - success", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "timeoutMS": 20, + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/test_csot.py b/test/test_csot.py new file mode 100644 index 0000000000..5c7833467f --- /dev/null +++ b/test/test_csot.py @@ -0,0 +1,32 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" + +import os +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +# Location of JSON test specifications. +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index a97eb65432..39979c2d10 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -395,7 +395,7 @@ def record_primary(self): """Run the recordPrimary test operation.""" self._previous_primary = self.scenario_client.primary - def wait_for_primary_change(self, timeout_ms): + def wait_for_primary_change(self, timeout): """Run the waitForPrimaryChange test operation.""" def primary_changed(): @@ -404,7 +404,6 @@ def primary_changed(): return False return primary != self._previous_primary - timeout = timeout_ms / 1000.0 wait_until(primary_changed, "change primary", timeout=timeout) def wait(self, ms): diff --git a/test/transactions/legacy/error-labels-blockConnection.json b/test/transactions/legacy/error-labels-blockConnection.json new file mode 100644 index 0000000000..56b646f7ad --- /dev/null +++ b/test/transactions/legacy/error-labels-blockConnection.json @@ -0,0 +1,159 @@ +{ + "runOn": [ + { + "minServerVersion": "4.2", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "transaction-tests", + "collection_name": "test", + "data": [], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "clientOptions": { + "socketTimeoutMS": 100 + }, + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + }, + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "result": { + "insertedId": 1 + } + }, + { + "name": "commitTransaction", + "object": "session0", + "result": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": null, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": null + }, + "command_name": "insert", + "database_name": "transaction-tests" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": null + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + }, + { + "command_started_event": { + "command": { + "commitTransaction": 1, + "lsid": "session0", + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": null, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "command_name": "commitTransaction", + "database_name": "admin" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + } + ] + } + } + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-additionalProperties.json b/test/unified-test-format/invalid/collectionData-additionalProperties.json index 2d85093109..1f4ed4c154 100644 --- a/test/unified-test-format/invalid/collectionData-additionalProperties.json +++ b/test/unified-test-format/invalid/collectionData-additionalProperties.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-collectionName-required.json b/test/unified-test-format/invalid/collectionData-collectionName-required.json index 040dd86a1c..5426418c88 100644 --- a/test/unified-test-format/invalid/collectionData-collectionName-required.json +++ b/test/unified-test-format/invalid/collectionData-collectionName-required.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-collectionName-type.json b/test/unified-test-format/invalid/collectionData-collectionName-type.json index 676d822e5e..2a922de13e 100644 --- a/test/unified-test-format/invalid/collectionData-collectionName-type.json +++ b/test/unified-test-format/invalid/collectionData-collectionName-type.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-createOptions-type.json b/test/unified-test-format/invalid/collectionData-createOptions-type.json new file mode 100644 index 0000000000..5b78bbcbb6 --- /dev/null +++ b/test/unified-test-format/invalid/collectionData-createOptions-type.json @@ -0,0 +1,39 @@ +{ + "description": "collectionData-createOptions-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "foo" + } + } + ], + "initialData": [ + { + "collectionName": "foo", + "databaseName": "foo", + "createOptions": 0, + "documents": [] + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/collectionData-databaseName-required.json b/test/unified-test-format/invalid/collectionData-databaseName-required.json index 7548f9d5be..8417801390 100644 --- a/test/unified-test-format/invalid/collectionData-databaseName-required.json +++ b/test/unified-test-format/invalid/collectionData-databaseName-required.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-databaseName-type.json b/test/unified-test-format/invalid/collectionData-databaseName-type.json index ef719bbf6a..d3480e8034 100644 --- a/test/unified-test-format/invalid/collectionData-databaseName-type.json +++ b/test/unified-test-format/invalid/collectionData-databaseName-type.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-documents-items.json b/test/unified-test-format/invalid/collectionData-documents-items.json index 2916718d50..beb5af61c4 100644 --- a/test/unified-test-format/invalid/collectionData-documents-items.json +++ b/test/unified-test-format/invalid/collectionData-documents-items.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-documents-required.json b/test/unified-test-format/invalid/collectionData-documents-required.json index 7b8a7ead2a..4aadf9b159 100644 --- a/test/unified-test-format/invalid/collectionData-documents-required.json +++ b/test/unified-test-format/invalid/collectionData-documents-required.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionData-documents-type.json b/test/unified-test-format/invalid/collectionData-documents-type.json index 953cabae6e..9cbd3c164c 100644 --- a/test/unified-test-format/invalid/collectionData-documents-type.json +++ b/test/unified-test-format/invalid/collectionData-documents-type.json @@ -18,8 +18,7 @@ "collection": { "id": "collection0", "database": "database0", - "collectionName": "foo", - "foo": 0 + "collectionName": "foo" } } ], diff --git a/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json new file mode 100644 index 0000000000..088e9d1eb2 --- /dev/null +++ b/test/unified-test-format/invalid/collectionOrDatabaseOptions-timeoutMS-type.json @@ -0,0 +1,27 @@ +{ + "description": "collectionOrDatabaseOptions-timeoutMS-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "foo", + "databaseOptions": { + "timeoutMS": 4.5 + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json new file mode 100644 index 0000000000..5683911d0d --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-isTimeoutError-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-isTimeoutError-type", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "isTimeoutError": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json new file mode 100644 index 0000000000..965190664e --- /dev/null +++ b/test/unified-test-format/invalid/expectedEventsForClient-ignoreExtraEvents-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedEventsForClient-ignoreExtraEvents-type", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [], + "ignoreExtraEvents": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json new file mode 100644 index 0000000000..07ab66baa0 --- /dev/null +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -0,0 +1,68 @@ +{ + "description": "collectionData-createOptions", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "3.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0", + "createOptions": { + "capped": true, + "size": 512 + }, + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "collection is created with the correct options", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collStats", + "command": { + "collStats": "coll0", + "scale": 1 + } + }, + "expectResult": { + "capped": true, + "maxSize": 512 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/createEntities-operation.json b/test/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 0000000000..3fde42919d --- /dev/null +++ b/test/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 0000000000..88fc28e34e --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,108 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": "long" + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/matches-lte-operator.json b/test/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 0000000000..4de65c5838 --- /dev/null +++ b/test/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 61c96d6021..cdba80c23e 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -42,6 +42,7 @@ from test.version import Version from typing import Any +import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util from bson.binary import Binary from bson.objectid import ObjectId @@ -56,9 +57,13 @@ BulkWriteError, ConfigurationError, ConnectionFailure, + ExecutionTimeout, InvalidOperation, + NetworkTimeout, NotPrimaryError, PyMongoError, + ServerSelectionTimeoutError, + WriteConcernError, ) from pymongo.monitoring import ( _SENSITIVE_COMMANDS, @@ -198,11 +203,16 @@ def parse_bulk_write_error_result(error): class NonLazyCursor(object): """A find cursor proxy that creates the remote cursor when initialized.""" - def __init__(self, find_cursor): + def __init__(self, find_cursor, client): + self.client = client self.find_cursor = find_cursor # Create the server side cursor. self.first_result = next(find_cursor, None) + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + def __next__(self): if self.first_result is not None: first = self.first_result @@ -210,8 +220,12 @@ def __next__(self): return first return next(self.find_cursor) + # Added to support the iterateOnce operation. + try_next = __next__ + def close(self): self.find_cursor.close() + self.client = None class EventListenerUtil(CMAPListener, CommandListener): @@ -520,6 +534,11 @@ def _operation_sessionLsid(self, spec, actual, key_to_compare): expected_lsid = self.test.entity_map.get_lsid_for_session(spec) self.test.assertEqual(expected_lsid, actual[key_to_compare]) + def _operation_lte(self, spec, actual, key_to_compare): + if key_to_compare not in actual: + self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") + self.test.assertLessEqual(actual[key_to_compare], spec) + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): method_name = "_operation_%s" % (opname.strip("$"),) try: @@ -710,7 +729,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.7") + SCHEMA_VERSION = Version.from_string("1.9") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -730,6 +749,7 @@ def insert_initial_data(self, initial_data): for i, collection_data in enumerate(initial_data): coll_name = collection_data["collectionName"] db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) documents = collection_data["documents"] # Setup the collection with as few majority writes as possible. @@ -741,10 +761,12 @@ def insert_initial_data(self, initial_data): else: wc = WriteConcern(w=1) if documents: + if opts: + db.create_collection(coll_name, **opts) db.get_collection(coll_name, write_concern=wc).insert_many(documents) else: # Ensure collection exists - db.create_collection(coll_name, write_concern=wc) + db.create_collection(coll_name, write_concern=wc, **opts) @classmethod def setUpClass(cls): @@ -782,9 +804,26 @@ def maybe_skip_test(self, spec): "Dirty explicit session is discarded" in spec["description"] or "Dirty implicit session is discarded" in spec["description"] ): - raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + self.skipTest("MMAPv1 does not support retryWrites=True") elif "Client side error in command starting transaction" in spec["description"]: - raise unittest.SkipTest("Implement PYTHON-1894") + self.skipTest("Implement PYTHON-1894") + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + if "csot" in class_name: + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if "tailable" in class_name: + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + if "socket timeout" in description: + self.skipTest("CSOT not implemented for socket timeouts") # Some tests need to be skipped based on the operations they try to run. for op in spec["operations"]: @@ -801,10 +840,21 @@ def maybe_skip_test(self, spec): if not client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + if name == "createEntities": + self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) + + def maybe_skip_entity(self, entities): + for entity in entities: + entity_type = next(iter(entity)) + if entity_type == "bucket": + self.skipTest("GridFS is not currently supported (PYTHON-2459)") def process_error(self, exception, spec): is_error = spec.get("isError") is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") error_contains = spec.get("errorContains") error_code = spec.get("errorCode") error_code_name = spec.get("errorCodeName") @@ -825,6 +875,15 @@ def process_error(self, exception, spec): else: self.assertNotIsInstance(exception, PyMongoError) + if is_timeout_error: + # TODO: PYTHON-3291 Implement error transformation. + if isinstance(exception, WriteConcernError): + self.assertEqual(exception.code, 50) + else: + self.assertIsInstance( + exception, (NetworkTimeout, ExecutionTimeout, ServerSelectionTimeoutError) + ) + if error_contains: if isinstance(exception, BulkWriteError): errmsg = str(exception.details).lower() @@ -925,15 +984,21 @@ def _collectionOperation_createFindCursor(self, target, *args, **kwargs): self.__raise_if_unsupported("find", target, Collection) if "filter" not in kwargs: self.fail('createFindCursor requires a "filter" argument') - cursor = NonLazyCursor(target.find(*args, **kwargs)) + cursor = NonLazyCursor(target.find(*args, **kwargs), target.database.client) self.addCleanup(cursor.close) return cursor + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: self.skipTest("PyMongo does not support batch_size for list_indexes") return target.list_indexes(*args, **kwargs) + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + def _sessionOperation_withTransaction(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": self.skipTest("MMAPv1 does not support document-level locking") @@ -946,13 +1011,21 @@ def _sessionOperation_startTransaction(self, target, *args, **kwargs): self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) + def _cursor_iterateOnce(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateOnce", target, NonLazyCursor, ChangeStream) + return target.try_next() + def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) - return next(target) + while target.alive: + try: + return next(target) + except StopIteration: + pass def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor) @@ -960,6 +1033,7 @@ def _cursor_close(self, target, *args, **kwargs): def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] + client = target opname = spec["name"] opargs = spec.get("arguments") expect_error = spec.get("expectError") @@ -977,20 +1051,26 @@ def run_entity_operation(self, spec): spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations ) else: - arguments = tuple() + arguments = {} if isinstance(target, MongoClient): method_name = "_clientOperation_%s" % (opname,) + client = target elif isinstance(target, Database): method_name = "_databaseOperation_%s" % (opname,) + client = target.client elif isinstance(target, Collection): method_name = "_collectionOperation_%s" % (opname,) + client = target.database.client elif isinstance(target, ChangeStream): method_name = "_changeStreamOperation_%s" % (opname,) + client = target._client elif isinstance(target, NonLazyCursor): method_name = "_cursor_%s" % (opname,) + client = target.client elif isinstance(target, ClientSession): method_name = "_sessionOperation_%s" % (opname,) + client = target._client elif isinstance(target, GridFSBucket): raise NotImplementedError else: @@ -1007,7 +1087,17 @@ def run_entity_operation(self, spec): cmd = functools.partial(method, target) try: - result = cmd(**dict(arguments)) + # TODO: PYTHON-3289 apply inherited timeout by default. + inherit_timeout = getattr(target, "timeout", None) + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments or inherit_timeout is not None: + timeout = arguments.pop("timeout", None) + if timeout is None: + timeout = inherit_timeout + with pymongo.timeout(timeout): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) except Exception as exc: # Ignore all operation errors but to avoid masking bugs don't # ignore things like TypeError and ValueError. @@ -1057,6 +1147,9 @@ def _testOperation_targetedFailPoint(self, spec): self.addCleanup(client.close) self.__set_fail_point(client=client, command_args=spec["failPoint"]) + def _testOperation_createEntities(self, spec): + self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec["session"]] expected_state = getattr(_TxnState, spec["state"].upper()) @@ -1245,6 +1338,7 @@ def run_scenario(self, spec, uri=None): raise unittest.SkipTest("%s" % (skip_reason,)) # process createEntities + self._uri = uri self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) # process initialData @@ -1309,7 +1403,7 @@ def generate_test_classes( class_name_prefix="", expected_failures=[], # noqa: B006 bypass_test_generation_errors=False, - **kwargs + **kwargs, ): """Method for generating test classes. Returns a dictionary where keys are the names of test classes and values are the test class objects.""" diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index 8bb05cc721..b2669b6cf1 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid connection and timeout options are parsed correctly", - "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500", + "uri": "mongodb://example.com/?appname=URI-OPTIONS-SPEC-TEST&connectTimeoutMS=20000&heartbeatFrequencyMS=5000&localThresholdMS=3000&maxIdleTimeMS=50000&replicaSet=uri-options-spec&retryWrites=true&serverSelectionTimeoutMS=15000&socketTimeoutMS=7500&timeoutMS=100", "valid": true, "warning": false, "hosts": null, @@ -16,7 +16,8 @@ "replicaSet": "uri-options-spec", "retryWrites": true, "serverSelectionTimeoutMS": 15000, - "socketTimeoutMS": 7500 + "socketTimeoutMS": 7500, + "timeoutMS": 100 } }, { @@ -238,6 +239,35 @@ "hosts": null, "auth": null, "options": {} + }, + { + "description": "timeoutMS=0", + "uri": "mongodb://example.com/?timeoutMS=0", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "timeoutMS": 0 + } + }, + { + "description": "Non-numeric timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "Too low timeoutMS causes a warning", + "uri": "mongodb://example.com/?timeoutMS=-2", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": {} } ] } diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json index edf6042943..8beaaddd86 100644 --- a/test/uri_options/tls-options.json +++ b/test/uri_options/tls-options.json @@ -44,15 +44,6 @@ "tlsAllowInvalidCertificates": true } }, - { - "description": "Invalid tlsAllowInvalidCertificates causes a warning", - "uri": "mongodb://example.com/?tlsAllowInvalidCertificates=invalid", - "valid": true, - "warning": true, - "hosts": null, - "auth": null, - "options": {} - }, { "description": "tlsAllowInvalidHostnames is parsed correctly", "uri": "mongodb://example.com/?tlsAllowInvalidHostnames=true", diff --git a/test/utils.py b/test/utils.py index 03985772a0..1aeb7571ab 100644 --- a/test/utils.py +++ b/test/utils.py @@ -35,6 +35,7 @@ from bson.son import SON from pymongo import MongoClient, monitoring, operations, read_preferences from pymongo.collection import ReturnDocument +from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS @@ -651,6 +652,9 @@ def parse_collection_options(opts): if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 return opts @@ -988,6 +992,10 @@ def parse_spec_options(opts): if "readConcern" in opts: opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + if "timeoutMS" in opts: + assert isinstance(opts["timeoutMS"], int) + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + if "maxTimeMS" in opts: opts["max_time_ms"] = opts.pop("maxTimeMS") @@ -1041,6 +1049,8 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac # Aggregate uses "batchSize", while find uses batch_size. elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": continue + elif arg_name == "timeoutMode": + raise unittest.SkipTest("PyMongo does not support timeoutMode") # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) @@ -1090,5 +1100,13 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["index_or_name"] = arguments.pop(arg_name) elif opname == "rename" and arg_name == "to": arguments["new_name"] = arguments.pop(arg_name) + elif arg_name == "cursorType": + cursor_type = arguments.pop(arg_name) + if cursor_type == "tailable": + arguments["cursor_type"] = CursorType.TAILABLE + elif cursor_type == "tailableAwait": + arguments["cursor_type"] = CursorType.TAILABLE + else: + assert False, f"Unsupported cursorType: {cursor_type}" else: arguments[c2s] = arguments.pop(arg_name) From 70cfe460639e66b00395567a2f441db705c7a1a9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jun 2022 14:29:07 -0400 Subject: [PATCH 0678/2111] PYTHON-3290 Support nested pymongo.timeout() calls (#962) --- pymongo/__init__.py | 12 ++++++++++++ pymongo/_csot.py | 25 +++++++++++++++---------- pymongo/topology.py | 3 ++- test/test_csot.py | 43 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 71 insertions(+), 12 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index bdb1ec97c1..9e877e9551 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -127,6 +127,18 @@ def timeout(seconds: Optional[float]) -> ContextManager: NetworkTimeout) as exc: print(f"block timed out: {exc!r}") + When nesting :func:`~pymongo.timeout`, the nested block overrides the + timeout. When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Uses the 10 second deadline. + coll.find_one() # Uses the original 5 second deadline. + :Parameters: - `seconds`: A non-negative floating point number expressing seconds, or None. diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 4085562ca8..f1601f75d2 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -15,8 +15,8 @@ """Internal helpers for CSOT.""" import time -from contextvars import ContextVar -from typing import Optional +from contextvars import ContextVar, Token +from typing import Optional, Tuple TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) @@ -39,11 +39,6 @@ def set_rtt(rtt: float) -> None: RTT.set(rtt) -def set_timeout(timeout: Optional[float]) -> None: - TIMEOUT.set(timeout) - DEADLINE.set(time.monotonic() + timeout if timeout else float("inf")) - - def remaining() -> Optional[float]: if not get_timeout(): return None @@ -67,14 +62,24 @@ class _TimeoutContext(object): client.test.test.insert_one({}) """ - __slots__ = ("_timeout",) + __slots__ = ("_timeout", "_tokens") def __init__(self, timeout: Optional[float]): self._timeout = timeout + self._tokens: Optional[Tuple[Token, Token, Token]] = None def __enter__(self): - set_timeout(self._timeout) + timeout_token = TIMEOUT.set(self._timeout) + deadline_token = DEADLINE.set( + time.monotonic() + self._timeout if self._timeout else float("inf") + ) + rtt_token = RTT.set(0.0) + self._tokens = (timeout_token, deadline_token, rtt_token) return self def __exit__(self, exc_type, exc_val, exc_tb): - set_timeout(None) + if self._tokens: + timeout_token, deadline_token, rtt_token = self._tokens + TIMEOUT.reset(timeout_token) + DEADLINE.reset(deadline_token) + RTT.reset(rtt_token) diff --git a/pymongo/topology.py b/pymongo/topology.py index db832a8e55..4e82a41228 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -270,7 +270,8 @@ def _select_server(self, selector, server_selection_timeout=None, address=None): def select_server(self, selector, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" server = self._select_server(selector, server_selection_timeout, address) - _csot.set_rtt(server.description.round_trip_time) + if _csot.get_timeout(): + _csot.set_rtt(server.description.round_trip_time) return server def select_server_by_address(self, address, server_selection_timeout=None): diff --git a/test/test_csot.py b/test/test_csot.py index 5c7833467f..d00f8c2916 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -19,14 +19,55 @@ sys.path[0:0] = [""] -from test import unittest +from test import IntegrationTest, unittest from test.unified_format import generate_test_classes +import pymongo +from pymongo import _csot + # Location of JSON test specifications. TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +class TestCSOT(IntegrationTest): + def test_timeout_nested(self): + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + with pymongo.timeout(15): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertGreater(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + with pymongo.timeout(5): + coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + if __name__ == "__main__": unittest.main() From 09b18244ccb80c58dc203208cf1ca04f7381f8f7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jun 2022 15:46:13 -0400 Subject: [PATCH 0679/2111] PYTHON-3293 Document Queryable Encryption API is in beta (#965) --- doc/changelog.rst | 3 ++- pymongo/collection.py | 15 ++++++++++++++- pymongo/database.py | 16 ++++++++++------ pymongo/encryption.py | 21 +++++++++++++++++---- pymongo/encryption_options.py | 12 ++++++++---- 5 files changed, 51 insertions(+), 16 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5497b4f3e9..c53ec2201a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,9 +9,10 @@ Changes in Version 4.2 PyMongo 4.2 brings a number of improvements including: - Support for MongoDB 6.0. +- Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking + changes may be made before the final release. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. -- Beta support for Queryable Encryption with MongoDB 6.0. Bug fixes ......... diff --git a/pymongo/collection.py b/pymongo/collection.py index 9f3f73198b..27550e0fb3 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -159,9 +159,14 @@ def __init__( - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. If provided it will be passed to the create collection command. - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 4.0 Removed the reindex, map_reduce, inline_map_reduce, parallel_scan, initialize_unordered_bulk_op, @@ -1156,6 +1161,7 @@ def drop( self, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> None: """Alias for :meth:`~pymongo.database.Database.drop_collection`. @@ -1164,12 +1170,17 @@ def drop( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. The following two calls are equivalent: >>> db.foo.drop() >>> db.drop_collection("foo") + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -1186,7 +1197,9 @@ def drop( self.write_concern, self.read_concern, ) - dbo.drop_collection(self.__name, session=session, comment=comment) + dbo.drop_collection( + self.__name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) def _delete( self, diff --git a/pymongo/database.py b/pymongo/database.py index 393f63c8c8..c9447c1a77 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -336,9 +336,8 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `encrypted_fields`: Document that describes the encrypted fields for Queryable - Encryption. - For example:: + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: { "escCollection": "enxcol_.encryptedCollection.esc", @@ -391,6 +390,9 @@ def create_collection( - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 3.11 This method is now supported inside multi-document transactions with MongoDB 4.4+. @@ -955,9 +957,8 @@ def drop_collection( :class:`~pymongo.client_session.ClientSession`. - `comment` (optional): A user-provided comment to attach to this command. - - `encrypted_fields`: Document that describes the encrypted fields for Queryable - Encryption. - For example:: + - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: { "escCollection": "enxcol_.encryptedCollection.esc", @@ -983,6 +984,9 @@ def drop_collection( .. note:: The :attr:`~pymongo.database.Database.write_concern` of this database is automatically applied to this operation. + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a088bd2da8..0a8bf69a38 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -379,17 +379,26 @@ class Algorithm(str, enum.Enum): INDEXED = "Indexed" """Indexed. + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. + .. versionadded:: 4.2 """ UNINDEXED = "Unindexed" """Unindexed. + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. + .. versionadded:: 4.2 """ class QueryType(enum.IntEnum): - """An enum that defines the supported values for explicit encryption query_type. + """**(BETA)** An enum that defines the supported values for explicit encryption query_type. + + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. .. versionadded:: 4.2 """ @@ -606,13 +615,17 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `index_key_id`: The index key id to use for Queryable Encryption. Must be + - `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - - `query_type` (int): The query type to execute. See + - `query_type` (int): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - - `contention_factor` (int): The contention factor to use + - `contention_factor` (int): **(BETA)** The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. + .. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the + Queryable Encryption beta. Backwards-breaking changes may be made before the + final release. + :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index eedc2ee23c..c5e6f47837 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -148,12 +148,12 @@ def __init__( - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is unable to load the crypt_shared library. - - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis of - outgoing commands. Set `bypass_query_analysis` to use explicit + - `bypass_query_analysis` (optional): **(BETA)** If ``True``, disable automatic analysis + of outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. - - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents that - described the encrypted fields for Queryable Encryption. For example:: + - `encrypted_fields_map`: **(BETA)** Map of collection namespace ("db.coll") to documents + that described the encrypted fields for Queryable Encryption. For example:: { "db.encryptedCollection": { @@ -176,6 +176,10 @@ def __init__( } } + .. note:: `bypass_query_analysis` and `encrypted_fields_map` are part of the + Queryable Encryption beta. Backwards-breaking changes may be made before the + final release. + .. versionchanged:: 4.2 Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` parameters. From 3e8487826a05ff9d891d57e62ac35206a9bb622e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 17:06:54 -0400 Subject: [PATCH 0680/2111] PYTHON-3294 Depend on PyMongoCrypt 1.3.0b0 tag for beta (#963) --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 40fb484ad1..655cc5ea0c 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,9 @@ def build_extension(self, ext): pyopenssl_reqs.append("certifi") extras_require = { - "encryption": ["pymongocrypt>=1.2.0,<2.0.0"], + "encryption": [ + "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@pymongocrypt-1.3.0b0#subdirectory=bindings/python" + ], "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 77ace9a988051a1967671e8ca4baa93c06ca98c6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 17:29:51 -0400 Subject: [PATCH 0681/2111] PYTHON-3299 Add Automatic Queryable Encryption Example to Docs (#964) --- doc/changelog.rst | 3 +- doc/conf.py | 1 + doc/examples/encryption.rst | 73 +++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c53ec2201a..b2fcb7fa24 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -10,7 +10,7 @@ PyMongo 4.2 brings a number of improvements including: - Support for MongoDB 6.0. - Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking - changes may be made before the final release. + changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. @@ -41,6 +41,7 @@ in this release. .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 +.. _Queryable Encryption: automatic-queryable-client-side-encryption Changes in Version 4.1.1 ------------------------- diff --git a/doc/conf.py b/doc/conf.py index 7b1580de32..ff330b59a4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -85,6 +85,7 @@ # so this link results in a 404. linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", ] diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index e86eb7733d..5568b0d741 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -336,6 +336,79 @@ data key and create a collection with the if __name__ == "__main__": main() +.. _automatic-queryable-client-side-encryption: + +Automatic Queryable Encryption (Beta) +````````````````````````````````````` + +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +You must have MongoDB 6.0rc8+ Enterprise to preview the capability. + +Until PyMongo 4.2 release is finalized, it can be installed using:: + + pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" + +Additionally, ``libmongocrypt`` must be installed from `source `_. + +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + from bson.codec_options import CodecOptions + from pymongo import MongoClient + from pymongo.encryption import Algorithm, ClientEncryption, QueryType + from pymongo.encryption_options import AutoEncryptionOpts + + + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + key_vault_namespace = "keyvault.datakeys" + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, CodecOptions() + ) + key_vault = key_vault_client["keyvault"]["datakeys"] + key_vault.drop() + key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) + key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) + + encrypted_fields_map = { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "eccCollection": "encryptedCollection.ecc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + } + ] + } + } + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + client.default.drop_collection('encryptedCollection') + coll = client.default.create_collection('encryptedCollection') + coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) + docs = list(coll.find({"firstName": "Jane"})) + print(docs) + +In the above example, the ``firstName`` and ``lastName`` fields are +automatically encrypted and decrypted. + .. _explicit-client-side-encryption: Explicit Encryption From a6ae852c364ed373392b4e7cae2994d83b4514e4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Jun 2022 17:40:46 -0400 Subject: [PATCH 0682/2111] PYTHON-3290 Nested pymongo.timeout() calls only shorten the deadline (#966) --- pymongo/__init__.py | 7 ++++--- pymongo/_csot.py | 6 +++--- test/test_csot.py | 3 ++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 9e877e9551..801d466c2e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -127,8 +127,9 @@ def timeout(seconds: Optional[float]) -> ContextManager: NetworkTimeout) as exc: print(f"block timed out: {exc!r}") - When nesting :func:`~pymongo.timeout`, the nested block overrides the - timeout. When exiting the block, the previous deadline is restored:: + When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most + the existing deadline. The deadline can only be shortened, not extended. + When exiting the block, the previous deadline is restored:: with pymongo.timeout(5): coll.find_one() # Uses the 5 second deadline. @@ -136,7 +137,7 @@ def timeout(seconds: Optional[float]) -> ContextManager: coll.find_one() # Uses the 3 second deadline. coll.find_one() # Uses the original 5 second deadline. with pymongo.timeout(10): - coll.find_one() # Uses the 10 second deadline. + coll.find_one() # Still uses the original 5 second deadline. coll.find_one() # Uses the original 5 second deadline. :Parameters: diff --git a/pymongo/_csot.py b/pymongo/_csot.py index f1601f75d2..ddd4e9233f 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -70,9 +70,9 @@ def __init__(self, timeout: Optional[float]): def __enter__(self): timeout_token = TIMEOUT.set(self._timeout) - deadline_token = DEADLINE.set( - time.monotonic() + self._timeout if self._timeout else float("inf") - ) + prev_deadline = DEADLINE.get() + next_deadline = time.monotonic() + self._timeout if self._timeout else float("inf") + deadline_token = DEADLINE.set(min(prev_deadline, next_deadline)) rtt_token = RTT.set(0.0) self._tokens = (timeout_token, deadline_token, rtt_token) return self diff --git a/test/test_csot.py b/test/test_csot.py index d00f8c2916..290851159d 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -43,10 +43,11 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_timeout(), 10) deadline_10 = _csot.get_deadline() + # Capped at the original 10 deadline. with pymongo.timeout(15): coll.find_one() self.assertEqual(_csot.get_timeout(), 15) - self.assertGreater(_csot.get_deadline(), deadline_10) + self.assertEqual(_csot.get_deadline(), deadline_10) # Should be reset to previous values self.assertEqual(_csot.get_timeout(), 10) From 83ade52b1b7eef0ed526a3180b507338dd6c74b2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 20:24:12 -0400 Subject: [PATCH 0683/2111] bump to 4.2.0b0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 801d466c2e..62139dac11 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev1") +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, "b0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 655cc5ea0c..0d77c7c720 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0.dev1" +version = "4.2.0b0" f = open("README.rst") try: From e59a11ef4bceaab4da2e81bfca0713e4a333c296 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jun 2022 20:24:59 -0400 Subject: [PATCH 0684/2111] back to dev version --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 62139dac11..30bfc2bdf7 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -55,7 +55,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, "b0") +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev2") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 0d77c7c720..a2df4fac67 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0b0" +version = "4.2.0.dev2" f = open("README.rst") try: From b8653b018d0f16f2f60640a633c832939fffccbe Mon Sep 17 00:00:00 2001 From: Atiab Bin Zakaria <61742543+atiabbz@users.noreply.github.com> Date: Thu, 9 Jun 2022 23:10:03 +0800 Subject: [PATCH 0685/2111] Remove extra period in `is_mongos` documentation (#967) --- pymongo/mongo_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7af4b167f1..5e4cf0d754 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1080,7 +1080,7 @@ def is_primary(self) -> bool: def is_mongos(self) -> bool: """If this client is connected to mongos. If the client is not connected, this will block until a connection is established or raise - ServerSelectionTimeoutError if no server is available.. + ServerSelectionTimeoutError if no server is available. """ return self._server_property("server_type") == SERVER_TYPE.Mongos From be3008aa11f51c692ab903e744d305e8e230d5df Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Mon, 13 Jun 2022 11:42:41 -0700 Subject: [PATCH 0686/2111] PYTHON-2110 Refactored some C to avoid symbol conflicts (#968) * Refactored to avoid symbol conflicts * Forgot a replacement * Found a symbol * Undid symbol replacement for PyInit__cmessage * Changed cbson too Co-authored-by: Ben Warner --- bson/_cbsonmodule.c | 90 +++++++++++++++---------------- bson/buffer.c | 14 ++--- bson/buffer.h | 14 ++--- bson/encoding_helpers.c | 2 +- bson/encoding_helpers.h | 2 +- bson/time64.c | 64 +++++++++++----------- bson/time64.h | 16 +++--- doc/contributors.rst | 1 + pymongo/_cmessagemodule.c | 108 +++++++++++++++++++------------------- 9 files changed, 156 insertions(+), 155 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 1a296db527..191ce9886f 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -153,7 +153,7 @@ static PyObject* datetime_from_millis(long long millis) { int microseconds = diff * 1000; Time64_T seconds = (millis - diff) / 1000; struct TM timeinfo; - gmtime64_r(&seconds, &timeinfo); + cbson_gmtime64_r(&seconds, &timeinfo); return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, timeinfo.tm_mon + 1, @@ -175,14 +175,14 @@ static long long millis_from_datetime(PyObject* datetime) { timeinfo.tm_min = PyDateTime_DATE_GET_MINUTE(datetime); timeinfo.tm_sec = PyDateTime_DATE_GET_SECOND(datetime); - millis = timegm64(&timeinfo) * 1000; + millis = cbson_timegm64(&timeinfo) * 1000; millis += PyDateTime_DATE_GET_MICROSECOND(datetime) / 1000; return millis; } /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { - if (buffer_write(buffer, data, size)) { + if (pymongo_buffer_write(buffer, data, size)) { return 0; } return 1; @@ -207,7 +207,7 @@ void buffer_write_int32_at_position(buffer_t buffer, int position, int32_t data) { uint32_t data_le = BSON_UINT32_TO_LE(data); - memcpy(buffer_get_buffer(buffer) + position, &data_le, 4); + memcpy(pymongo_buffer_get_buffer(buffer) + position, &data_le, 4); } static int write_unicode(buffer_t buffer, PyObject* py_string) { @@ -419,7 +419,7 @@ static long _type_marker(PyObject* object) { * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { registry->encoder_map = NULL; registry->decoder_map = NULL; registry->fallback_encoder = NULL; @@ -481,7 +481,7 @@ int convert_codec_options(PyObject* options_obj, void* p) { return 0; } - if (!convert_type_registry(type_registry_obj, + if (!cbson_convert_type_registry(type_registry_obj, &options->type_registry)) { return 0; } @@ -597,7 +597,7 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } - status = check_string((const unsigned char*)pattern_data, + status = cbson_check_string((const unsigned char*)pattern_data, pattern_length, check_utf8, 1); if (status == NOT_UTF_8) { PyObject* InvalidStringData = _error("InvalidStringData"); @@ -649,7 +649,7 @@ static int _write_regex_to_buffer( if (!buffer_write_bytes(buffer, flags, flags_length)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x0B; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0B; return 1; } @@ -687,7 +687,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, const char* data; int size; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; subtype_object = PyObject_GetAttrString(value, "subtype"); if (!subtype_object) { return 0; @@ -750,7 +750,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x07; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; return 1; } case 11: @@ -772,15 +772,15 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (scope == Py_None) { Py_DECREF(scope); - *(buffer_get_buffer(buffer) + type_byte) = 0x0D; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0D; return write_string(buffer, value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x0F; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0F; - start_position = buffer_get_position(buffer); + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { Py_DECREF(scope); return 0; @@ -797,7 +797,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } Py_DECREF(scope); - length = buffer_get_position(buffer) - start_position; + length = pymongo_buffer_get_position(buffer) - start_position; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; @@ -834,7 +834,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x11; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x11; return 1; } case 18: @@ -849,7 +849,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!buffer_write_int64(buffer, (int64_t)ll)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return 1; } case 19: @@ -870,7 +870,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(pystring); - *(buffer_get_buffer(buffer) + type_byte) = 0x13; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x13; return 1; } case 100: @@ -885,7 +885,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } Py_DECREF(as_doc); - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return 1; } case 101: @@ -894,19 +894,19 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!write_raw_doc(buffer, value)) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return 1; } case 255: { /* MinKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0xFF; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0xFF; return 1; } case 127: { /* MaxKey */ - *(buffer_get_buffer(buffer) + type_byte) = 0x7F; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x7F; return 1; } } @@ -915,7 +915,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (PyBool_Check(value)) { const char c = (value == Py_True) ? 0x01 : 0x00; - *(buffer_get_buffer(buffer) + type_byte) = 0x08; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x08; return buffer_write_bytes(buffer, &c, 1); } else if (PyLong_Check(value)) { @@ -931,20 +931,20 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, "MongoDB can only handle up to 8-byte ints"); return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x12; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x10; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; return buffer_write_int32(buffer, (int32_t)int_value); } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); - *(buffer_get_buffer(buffer) + type_byte) = 0x01; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; return buffer_write_double(buffer, d); } else if (value == Py_None) { - *(buffer_get_buffer(buffer) + type_byte) = 0x0A; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x0A; return 1; } else if (PyDict_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); } else if (PyList_Check(value) || PyTuple_Check(value)) { Py_ssize_t items, i; @@ -953,11 +953,11 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, length; char zero = 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x04; - start_position = buffer_get_position(buffer); + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x04; + start_position = pymongo_buffer_get_position(buffer); /* save space for length */ - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { return 0; } @@ -972,7 +972,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } for(i = 0; i < items; i++) { - int list_type_byte = buffer_save_space(buffer, 1); + int list_type_byte = pymongo_buffer_save_space(buffer, 1); char name[16]; PyObject* item_value; @@ -999,7 +999,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - start_position; + length = pymongo_buffer_get_position(buffer) - start_position; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return 1; @@ -1012,7 +1012,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; if ((size = _downcast_and_check(PyBytes_GET_SIZE(value), 0)) == -1) return 0; - *(buffer_get_buffer(buffer) + type_byte) = 0x05; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; if (!buffer_write_int32(buffer, (int32_t)size)) { return 0; } @@ -1024,7 +1024,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } return 1; } else if (PyUnicode_Check(value)) { - *(buffer_get_buffer(buffer) + type_byte) = 0x02; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x02; return write_unicode(buffer, value); } else if (PyDateTime_Check(value)) { long long millis; @@ -1042,7 +1042,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else { millis = millis_from_datetime(value); } - *(buffer_get_buffer(buffer) + type_byte) = 0x09; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { return _write_regex_to_buffer(buffer, type_byte, value); @@ -1059,7 +1059,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (PyErr_Occurred()) { return 0; } - *(buffer_get_buffer(buffer) + type_byte) = 0x03; + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); } @@ -1189,7 +1189,7 @@ int write_pair(PyObject* self, buffer_t buffer, const char* name, int name_lengt return 1; } - type_byte = buffer_save_space(buffer, 1); + type_byte = pymongo_buffer_save_space(buffer, 1); if (type_byte == -1) { return 0; } @@ -1362,7 +1362,7 @@ int write_dict(PyObject* self, buffer_t buffer, } } - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { return 0; } @@ -1429,7 +1429,7 @@ int write_dict(PyObject* self, buffer_t buffer, if (!buffer_write_bytes(buffer, &zero, 1)) { return 0; } - length = buffer_get_position(buffer) - length_location; + length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)length); return length; @@ -1464,7 +1464,7 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { return raw_bson_document_bytes_obj; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { destroy_codec_options(&options); return NULL; @@ -1472,15 +1472,15 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { if (!write_dict(self, buffer, dict, check_keys, &options, top_level)) { destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); return NULL; } /* objectify buffer */ - result = Py_BuildValue("y#", buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer)); + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); return result; } diff --git a/bson/buffer.c b/bson/buffer.c index bb92ab3ee5..cc75202746 100644 --- a/bson/buffer.c +++ b/bson/buffer.c @@ -39,7 +39,7 @@ static void set_memory_error(void) { /* Allocate and return a new buffer. * Return NULL and sets MemoryError on allocation failure. */ -buffer_t buffer_new(void) { +buffer_t pymongo_buffer_new(void) { buffer_t buffer; buffer = (buffer_t)malloc(sizeof(struct buffer)); if (buffer == NULL) { @@ -61,7 +61,7 @@ buffer_t buffer_new(void) { /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer) { +int pymongo_buffer_free(buffer_t buffer) { if (buffer == NULL) { return 1; } @@ -122,7 +122,7 @@ static int buffer_assure_space(buffer_t buffer, int size) { /* Save `size` bytes from the current position in `buffer` (and grow if needed). * Return offset for writing, or -1 on failure. * Sets MemoryError or ValueError on failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size) { +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size) { int position = buffer->position; if (buffer_assure_space(buffer, size) != 0) { return -1; @@ -134,7 +134,7 @@ buffer_position buffer_save_space(buffer_t buffer, int size) { /* Write `size` bytes from `data` to `buffer` (and grow if needed). * Return non-zero on failure. * Sets MemoryError or ValueError on failure. */ -int buffer_write(buffer_t buffer, const char* data, int size) { +int pymongo_buffer_write(buffer_t buffer, const char* data, int size) { if (buffer_assure_space(buffer, size) != 0) { return 1; } @@ -144,14 +144,14 @@ int buffer_write(buffer_t buffer, const char* data, int size) { return 0; } -int buffer_get_position(buffer_t buffer) { +int pymongo_buffer_get_position(buffer_t buffer) { return buffer->position; } -char* buffer_get_buffer(buffer_t buffer) { +char* pymongo_buffer_get_buffer(buffer_t buffer) { return buffer->buffer; } -void buffer_update_position(buffer_t buffer, buffer_position new_position) { +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position) { buffer->position = new_position; } diff --git a/bson/buffer.h b/bson/buffer.h index 1485082d95..a78e34e4de 100644 --- a/bson/buffer.h +++ b/bson/buffer.h @@ -27,25 +27,25 @@ typedef int buffer_position; /* Allocate and return a new buffer. * Return NULL on allocation failure. */ -buffer_t buffer_new(void); +buffer_t pymongo_buffer_new(void); /* Free the memory allocated for `buffer`. * Return non-zero on failure. */ -int buffer_free(buffer_t buffer); +int pymongo_buffer_free(buffer_t buffer); /* Save `size` bytes from the current position in `buffer` (and grow if needed). * Return offset for writing, or -1 on allocation failure. */ -buffer_position buffer_save_space(buffer_t buffer, int size); +buffer_position pymongo_buffer_save_space(buffer_t buffer, int size); /* Write `size` bytes from `data` to `buffer` (and grow if needed). * Return non-zero on allocation failure. */ -int buffer_write(buffer_t buffer, const char* data, int size); +int pymongo_buffer_write(buffer_t buffer, const char* data, int size); /* Getters for the internals of a buffer_t. * Should try to avoid using these as much as possible * since they break the abstraction. */ -buffer_position buffer_get_position(buffer_t buffer); -char* buffer_get_buffer(buffer_t buffer); -void buffer_update_position(buffer_t buffer, buffer_position new_position); +buffer_position pymongo_buffer_get_position(buffer_t buffer); +char* pymongo_buffer_get_buffer(buffer_t buffer); +void pymongo_buffer_update_position(buffer_t buffer, buffer_position new_position); #endif diff --git a/bson/encoding_helpers.c b/bson/encoding_helpers.c index ea96810878..187ce6f3bd 100644 --- a/bson/encoding_helpers.c +++ b/bson/encoding_helpers.c @@ -87,7 +87,7 @@ static unsigned char isLegalUTF8(const unsigned char* source, int length) { return 1; } -result_t check_string(const unsigned char* string, const int length, +result_t cbson_check_string(const unsigned char* string, const int length, const char check_utf8, const char check_null) { int position = 0; /* By default we go character by character. Will be different for checking diff --git a/bson/encoding_helpers.h b/bson/encoding_helpers.h index b1a90fa510..a5fb75860f 100644 --- a/bson/encoding_helpers.h +++ b/bson/encoding_helpers.h @@ -23,7 +23,7 @@ typedef enum { HAS_NULL } result_t; -result_t check_string(const unsigned char* string, const int length, +result_t cbson_check_string(const unsigned char* string, const int length, const char check_utf8, const char check_null); #endif diff --git a/bson/time64.c b/bson/time64.c index bad6b51dc1..8d2886592e 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -29,13 +29,13 @@ THE SOFTWARE. /* Programmers who have available to them 64-bit time values as a 'long -long' type can use localtime64_r() and gmtime64_r() which correctly +long' type can use cbson_localtime64_r() and cbson_gmtime64_r() which correctly converts the time even on 32-bit systems. Whether you have 64-bit time values will depend on the operating system. -localtime64_r() is a 64-bit equivalent of localtime_r(). +cbson_localtime64_r() is a 64-bit equivalent of localtime_r(). -gmtime64_r() is a 64-bit equivalent of gmtime_r(). +cbson_gmtime64_r() is a 64-bit equivalent of gmtime_r(). */ @@ -158,7 +158,7 @@ static int is_exception_century(Year year) The result is like cmp. Ignores things like gmtoffset and dst */ -int cmp_date( const struct TM* left, const struct tm* right ) { +int cbson_cmp_date( const struct TM* left, const struct tm* right ) { if( left->tm_year > right->tm_year ) return 1; else if( left->tm_year < right->tm_year ) @@ -196,11 +196,11 @@ int cmp_date( const struct TM* left, const struct tm* right ) { /* Check if a date is safely inside a range. The intention is to check if its a few days inside. */ -int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { - if( cmp_date(date, min) == -1 ) +int cbson_date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) { + if( cbson_cmp_date(date, min) == -1 ) return 0; - if( cmp_date(date, max) == 1 ) + if( cbson_cmp_date(date, max) == 1 ) return 0; return 1; @@ -209,9 +209,9 @@ int date_in_safe_range( const struct TM* date, const struct tm* min, const struc /* timegm() is not in the C or POSIX spec, but it is such a useful extension I would be remiss in leaving it out. Also I need it - for localtime64() + for cbson_localtime64() */ -Time64_T timegm64(const struct TM *date) { +Time64_T cbson_timegm64(const struct TM *date) { Time64_T days = 0; Time64_T seconds = 0; Year year; @@ -376,7 +376,7 @@ static int safe_year(const Year year) } -void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { +void pymongo_copy_tm_to_TM64(const struct tm *src, struct TM *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -408,7 +408,7 @@ void copy_tm_to_TM64(const struct tm *src, struct TM *dest) { } -void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { +void cbson_copy_TM64_to_tm(const struct TM *src, struct tm *dest) { if( src == NULL ) { memset(dest, 0, sizeof(*dest)); } @@ -441,7 +441,7 @@ void copy_TM64_to_tm(const struct TM *src, struct tm *dest) { /* Simulate localtime_r() to the best of our ability */ -struct tm * fake_localtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_localtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = localtime(time); assert(result != NULL); @@ -458,7 +458,7 @@ struct tm * fake_localtime_r(const time_t *time, struct tm *result) { /* Simulate gmtime_r() to the best of our ability */ -struct tm * fake_gmtime_r(const time_t *time, struct tm *result) { +struct tm * cbson_fake_gmtime_r(const time_t *time, struct tm *result) { const struct tm *static_result = gmtime(time); assert(result != NULL); @@ -499,22 +499,22 @@ static Time64_T seconds_between_years(Year left_year, Year right_year) { } -Time64_T mktime64(const struct TM *input_date) { +Time64_T cbson_mktime64(const struct TM *input_date) { struct tm safe_date; struct TM date; Time64_T time; Year year = input_date->tm_year + 1900; - if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) + if( cbson_date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) ) { - copy_TM64_to_tm(input_date, &safe_date); + cbson_copy_TM64_to_tm(input_date, &safe_date); return (Time64_T)mktime(&safe_date); } /* Have to make the year safe in date else it won't fit in safe_date */ date = *input_date; date.tm_year = safe_year(year) - 1900; - copy_TM64_to_tm(&date, &safe_date); + cbson_copy_TM64_to_tm(&date, &safe_date); time = (Time64_T)mktime(&safe_date); @@ -526,11 +526,11 @@ Time64_T mktime64(const struct TM *input_date) { /* Because I think mktime() is a crappy name */ Time64_T timelocal64(const struct TM *date) { - return mktime64(date); + return cbson_mktime64(date); } -struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) +struct TM *cbson_gmtime64_r (const Time64_T *in_time, struct TM *p) { int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday; Time64_T v_tm_tday; @@ -549,7 +549,7 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) struct tm safe_date; GMTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, p); + pymongo_copy_tm_to_TM64(&safe_date, p); assert(check_tm(p)); return p; @@ -659,7 +659,7 @@ struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p) } -struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) +struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) { time_t safe_time; struct tm safe_date; @@ -678,15 +678,15 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) LOCALTIME_R(&safe_time, &safe_date); - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); assert(check_tm(local_tm)); return local_tm; } #endif - if( gmtime64_r(time, &gm_tm) == NULL ) { - TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time); + if( cbson_gmtime64_r(time, &gm_tm) == NULL ) { + TIME64_TRACE1("cbson_gmtime64_r returned null for %lld\n", *time); return NULL; } @@ -700,13 +700,13 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900; } - safe_time = (time_t)timegm64(&gm_tm); + safe_time = (time_t)cbson_timegm64(&gm_tm); if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) { TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time); return NULL; } - copy_tm_to_TM64(&safe_date, local_tm); + pymongo_copy_tm_to_TM64(&safe_date, local_tm); local_tm->tm_year = (int)orig_year; if( local_tm->tm_year != orig_year ) { @@ -751,14 +751,14 @@ struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm) } -int valid_tm_wday( const struct TM* date ) { +int cbson_valid_tm_wday( const struct TM* date ) { if( 0 <= date->tm_wday && date->tm_wday <= 6 ) return 1; else return 0; } -int valid_tm_mon( const struct TM* date ) { +int cbson_valid_tm_mon( const struct TM* date ) { if( 0 <= date->tm_mon && date->tm_mon <= 11 ) return 1; else @@ -767,15 +767,15 @@ int valid_tm_mon( const struct TM* date ) { /* Non-thread safe versions of the above */ -struct TM *localtime64(const Time64_T *time) { +struct TM *cbson_localtime64(const Time64_T *time) { #ifdef _MSC_VER _tzset(); #else tzset(); #endif - return localtime64_r(time, &Static_Return_Date); + return cbson_localtime64_r(time, &Static_Return_Date); } -struct TM *gmtime64(const Time64_T *time) { - return gmtime64_r(time, &Static_Return_Date); +struct TM *cbson_gmtime64(const Time64_T *time) { + return cbson_gmtime64_r(time, &Static_Return_Date); } diff --git a/bson/time64.h b/bson/time64.h index 61d9776926..6321eb307e 100644 --- a/bson/time64.h +++ b/bson/time64.h @@ -41,13 +41,13 @@ struct TM64 { /* Declare public functions */ -struct TM *gmtime64_r (const Time64_T *, struct TM *); -struct TM *localtime64_r (const Time64_T *, struct TM *); -struct TM *gmtime64 (const Time64_T *); -struct TM *localtime64 (const Time64_T *); +struct TM *cbson_gmtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_localtime64_r (const Time64_T *, struct TM *); +struct TM *cbson_gmtime64 (const Time64_T *); +struct TM *cbson_localtime64 (const Time64_T *); -Time64_T timegm64 (const struct TM *); -Time64_T mktime64 (const struct TM *); +Time64_T cbson_timegm64 (const struct TM *); +Time64_T cbson_mktime64 (const struct TM *); Time64_T timelocal64 (const struct TM *); @@ -55,12 +55,12 @@ Time64_T timelocal64 (const struct TM *); #ifdef HAS_LOCALTIME_R # define LOCALTIME_R(clock, result) localtime_r(clock, result) #else -# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result) +# define LOCALTIME_R(clock, result) cbson_fake_localtime_r(clock, result) #endif #ifdef HAS_GMTIME_R # define GMTIME_R(clock, result) gmtime_r(clock, result) #else -# define GMTIME_R(clock, result) fake_gmtime_r(clock, result) +# define GMTIME_R(clock, result) cbson_fake_gmtime_r(clock, result) #endif diff --git a/doc/contributors.rst b/doc/contributors.rst index 4275209781..7ab87f7790 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -92,3 +92,4 @@ The following is a list of people who have contributed to - Henri Froese (henrifroese) - Ishmum Jawad Khan (ishmum123) - Arie Bovenberg (ariebovenberg) +- Ben Warner (bcwarner) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 517c0fb798..2f03ce73e0 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -90,13 +90,13 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { convert_codec_options, &options)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { goto fail; } @@ -111,37 +111,37 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { goto fail; } - begin = buffer_get_position(buffer); + begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, query, 0, &options, 1)) { goto fail; } - max_size = buffer_get_position(buffer) - begin; + max_size = pymongo_buffer_get_position(buffer) - begin; if (field_selector != Py_None) { - begin = buffer_get_position(buffer); + begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, field_selector, 0, &options, 1)) { goto fail; } - cur_size = buffer_get_position(buffer) - begin; + cur_size = pymongo_buffer_get_position(buffer) - begin; max_size = (cur_size > max_size) ? cur_size : max_size; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ result = Py_BuildValue("iy#i", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), max_size); fail: PyMem_Free(collection_name); destroy_codec_options(&options); if (buffer) { - buffer_free(buffer); + pymongo_buffer_free(buffer); } return result; } @@ -165,13 +165,13 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { &cursor_id)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { goto fail; } @@ -188,18 +188,18 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { goto fail; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ result = Py_BuildValue("iy#", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer)); + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); fail: PyMem_Free(collection_name); if (buffer) { - buffer_free(buffer); + pymongo_buffer_free(buffer); } return result; } @@ -239,13 +239,13 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { convert_codec_options, &options)) { return NULL; } - buffer = buffer_new(); + buffer = pymongo_buffer_new(); if (!buffer) { goto fail; } // save space for message length - length_location = buffer_save_space(buffer, 4); + length_location = pymongo_buffer_save_space(buffer, 4); if (length_location == -1) { goto fail; } @@ -273,7 +273,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { goto fail; } /* save space for payload 0 length */ - payload_one_length_location = buffer_save_space(buffer, 4); + payload_one_length_location = pymongo_buffer_save_space(buffer, 4); /* C string identifier */ if (!buffer_write_bytes_ssize_t(buffer, identifier, identifier_length + 1)) { goto fail; @@ -295,26 +295,26 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_CLEAR(doc); } - payload_length = buffer_get_position(buffer) - payload_one_length_location; + payload_length = pymongo_buffer_get_position(buffer) - payload_one_length_location; buffer_write_int32_at_position( buffer, payload_one_length_location, (int32_t)payload_length); total_size += payload_length; } - message_length = buffer_get_position(buffer) - length_location; + message_length = pymongo_buffer_get_position(buffer) - length_location; buffer_write_int32_at_position( buffer, length_location, (int32_t)message_length); /* objectify buffer */ result = Py_BuildValue("iy#ii", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), total_size, max_doc_size); fail: Py_XDECREF(iterator); if (buffer) { - buffer_free(buffer); + pymongo_buffer_free(buffer); } PyMem_Free(identifier); destroy_codec_options(&options); @@ -400,7 +400,7 @@ _batched_op_msg( return 0; } /* Save space for size */ - size_location = buffer_save_space(buffer, 4); + size_location = pymongo_buffer_save_space(buffer, 4); if (size_location == -1) { return 0; } @@ -445,17 +445,17 @@ _batched_op_msg( return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int cur_doc_begin = buffer_get_position(buffer); + int cur_doc_begin = pymongo_buffer_get_position(buffer); int cur_size; int doc_too_large = 0; int unacked_doc_too_large = 0; if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } - cur_size = buffer_get_position(buffer) - cur_doc_begin; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; /* Does the first document exceed max_message_size? */ - doc_too_large = (idx == 0 && (buffer_get_position(buffer) > max_message_size)); + doc_too_large = (idx == 0 && (pymongo_buffer_get_position(buffer) > max_message_size)); /* When OP_MSG is used unacknowledged we have to check * document size client side or applications won't be notified. * Otherwise we let the server deal with documents that are too large @@ -483,12 +483,12 @@ _batched_op_msg( goto fail; } /* We have enough data, return this batch. */ - if (buffer_get_position(buffer) > max_message_size) { + if (pymongo_buffer_get_position(buffer) > max_message_size) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, cur_doc_begin); + pymongo_buffer_update_position(buffer, cur_doc_begin); Py_CLEAR(doc); break; } @@ -508,7 +508,7 @@ _batched_op_msg( goto fail; } - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); length = position - size_location; buffer_write_int32_at_position(buffer, size_location, (int32_t)length); return 1; @@ -538,7 +538,7 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { &ctx)) { return NULL; } - if (!(buffer = buffer_new())) { + if (!(buffer = pymongo_buffer_new())) { destroy_codec_options(&options); return NULL; } @@ -560,12 +560,12 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { } result = Py_BuildValue("y#O", - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } @@ -591,12 +591,12 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { &ctx)) { return NULL; } - if (!(buffer = buffer_new())) { + if (!(buffer = pymongo_buffer_new())) { destroy_codec_options(&options); return NULL; } /* Save space for message length and request id */ - if ((buffer_save_space(buffer, 8)) == -1) { + if ((pymongo_buffer_save_space(buffer, 8)) == -1) { goto fail; } if (!buffer_write_bytes(buffer, @@ -623,16 +623,16 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { } request_id = rand(); - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); buffer_write_int32_at_position(buffer, 0, (int32_t)position); buffer_write_int32_at_position(buffer, 4, (int32_t)request_id); result = Py_BuildValue("iy#O", request_id, - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } @@ -702,14 +702,14 @@ _batched_write_command( } /* Position of command document length */ - cmd_len_loc = buffer_get_position(buffer); + cmd_len_loc = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, command, 0, &options, 0)) { return 0; } /* Write type byte for array */ - *(buffer_get_buffer(buffer) + (buffer_get_position(buffer) - 1)) = 0x4; + *(pymongo_buffer_get_buffer(buffer) + (pymongo_buffer_get_position(buffer) - 1)) = 0x4; switch (op) { case _INSERT: @@ -742,7 +742,7 @@ _batched_write_command( } /* Save space for list document */ - lst_len_loc = buffer_save_space(buffer, 4); + lst_len_loc = pymongo_buffer_save_space(buffer, 4); if (lst_len_loc == -1) { return 0; } @@ -757,7 +757,7 @@ _batched_write_command( return 0; } while ((doc = PyIter_Next(iterator)) != NULL) { - int sub_doc_begin = buffer_get_position(buffer); + int sub_doc_begin = pymongo_buffer_get_position(buffer); int cur_doc_begin; int cur_size; int enough_data = 0; @@ -767,7 +767,7 @@ _batched_write_command( !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { goto fail; } - cur_doc_begin = buffer_get_position(buffer); + cur_doc_begin = pymongo_buffer_get_position(buffer); if (!write_dict(state->_cbson, buffer, doc, 0, &options, 1)) { goto fail; } @@ -775,7 +775,7 @@ _batched_write_command( /* We have enough data, return this batch. * max_cmd_size accounts for the two trailing null bytes. */ - cur_size = buffer_get_position(buffer) - cur_doc_begin; + cur_size = pymongo_buffer_get_position(buffer) - cur_doc_begin; /* This single document is too large for the command. */ if (cur_size > max_cmd_size) { if (op == _INSERT) { @@ -797,13 +797,13 @@ _batched_write_command( goto fail; } enough_data = (idx >= 1 && - (buffer_get_position(buffer) > max_split_size)); + (pymongo_buffer_get_position(buffer) > max_split_size)); if (enough_data) { /* * Roll the existing buffer back to the beginning * of the last document encoded. */ - buffer_update_position(buffer, sub_doc_begin); + pymongo_buffer_update_position(buffer, sub_doc_begin); Py_CLEAR(doc); break; } @@ -827,7 +827,7 @@ _batched_write_command( goto fail; } - position = buffer_get_position(buffer); + position = pymongo_buffer_get_position(buffer); length = position - lst_len_loc - 1; buffer_write_int32_at_position(buffer, lst_len_loc, (int32_t)length); length = position - cmd_len_loc; @@ -860,7 +860,7 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { &ctx)) { return NULL; } - if (!(buffer = buffer_new())) { + if (!(buffer = pymongo_buffer_new())) { PyMem_Free(ns); destroy_codec_options(&options); return NULL; @@ -884,13 +884,13 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { } result = Py_BuildValue("y#O", - buffer_get_buffer(buffer), - (Py_ssize_t)buffer_get_position(buffer), + pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer), to_publish); fail: PyMem_Free(ns); destroy_codec_options(&options); - buffer_free(buffer); + pymongo_buffer_free(buffer); Py_XDECREF(to_publish); return result; } From 3f7231a1a2668c80b1dd82d18f079b387d881fa3 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Mon, 13 Jun 2022 16:04:30 -0700 Subject: [PATCH 0687/2111] PYTHON-3048 Fixed bug with incorrect validation of UTF-8 regex patterns (#970) --- THIRD-PARTY-NOTICES | 23 -------- bson/_cbsonmodule.c | 33 ++++++----- bson/encoding_helpers.c | 118 ---------------------------------------- bson/encoding_helpers.h | 29 ---------- doc/changelog.rst | 3 + setup.py | 7 +-- 6 files changed, 23 insertions(+), 190 deletions(-) delete mode 100644 bson/encoding_helpers.c delete mode 100644 bson/encoding_helpers.h diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index a307b30432..0b9fc738ed 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -71,26 +71,3 @@ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -3) License Notice for encoding_helpers.c ----------------------------------------- - -Portions Copyright 2001 Unicode, Inc. - -Disclaimer - -This source code is provided as is by Unicode, Inc. No claims are -made as to fitness for any particular purpose. No warranties of any -kind are expressed or implied. The recipient agrees to determine -applicability of information provided. If this file has been -purchased on magnetic or optical media from Unicode, Inc., the -sole remedy for any claim will be exchange of defective media -within 90 days of receipt. - -Limitations on Rights to Redistribute This Code - -Unicode, Inc. hereby grants the right to freely use the information -supplied in this file in the creation of products supporting the -Unicode Standard, and to make copies of this file in any form -for internal or external distribution as long as this notice -remains attached. diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 191ce9886f..da6a5cbda7 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -26,7 +26,6 @@ #include "buffer.h" #include "time64.h" -#include "encoding_helpers.h" #define _CBSON_MODULE #include "_cbsonmodule.h" @@ -553,12 +552,12 @@ static int _write_regex_to_buffer( PyObject* py_flags; PyObject* py_pattern; PyObject* encoded_pattern; + PyObject* decoded_pattern; long int_flags; char flags[FLAGS_SIZE]; char check_utf8 = 0; const char* pattern_data; int pattern_length, flags_length; - result_t status; /* * Both the builtin re type and our Regex class have attributes @@ -597,18 +596,8 @@ static int _write_regex_to_buffer( Py_DECREF(encoded_pattern); return 0; } - status = cbson_check_string((const unsigned char*)pattern_data, - pattern_length, check_utf8, 1); - if (status == NOT_UTF_8) { - PyObject* InvalidStringData = _error("InvalidStringData"); - if (InvalidStringData) { - PyErr_SetString(InvalidStringData, - "regex patterns must be valid UTF-8"); - Py_DECREF(InvalidStringData); - } - Py_DECREF(encoded_pattern); - return 0; - } else if (status == HAS_NULL) { + + if (strlen(pattern_data) != (size_t) pattern_length){ PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument) { PyErr_SetString(InvalidDocument, @@ -619,6 +608,22 @@ static int _write_regex_to_buffer( return 0; } + if (check_utf8) { + decoded_pattern = PyUnicode_DecodeUTF8(pattern_data, (Py_ssize_t) pattern_length, NULL); + if (decoded_pattern == NULL) { + PyErr_Clear(); + PyObject* InvalidStringData = _error("InvalidStringData"); + if (InvalidStringData) { + PyErr_SetString(InvalidStringData, + "regex patterns must be valid UTF-8"); + Py_DECREF(InvalidStringData); + } + Py_DECREF(encoded_pattern); + return 0; + } + Py_DECREF(decoded_pattern); + } + if (!buffer_write_bytes(buffer, pattern_data, pattern_length + 1)) { Py_DECREF(encoded_pattern); return 0; diff --git a/bson/encoding_helpers.c b/bson/encoding_helpers.c deleted file mode 100644 index 187ce6f3bd..0000000000 --- a/bson/encoding_helpers.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2009-2015 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "encoding_helpers.h" - -/* - * Portions Copyright 2001 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * The length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns 0. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ -static unsigned char isLegalUTF8(const unsigned char* source, int length) { - unsigned char a; - const unsigned char* srcptr = source + length; - switch (length) { - default: return 0; - /* Everything else falls through when "true"... */ - case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return 0; - case 2: if ((a = (*--srcptr)) > 0xBF) return 0; - switch (*source) { - /* no fall-through in this inner switch */ - case 0xE0: if (a < 0xA0) return 0; break; - case 0xF0: if (a < 0x90) return 0; break; - case 0xF4: if ((a > 0x8F) || (a < 0x80)) return 0; break; - default: if (a < 0x80) return 0; - } - case 1: if (*source >= 0x80 && *source < 0xC2) return 0; - if (*source > 0xF4) return 0; - } - return 1; -} - -result_t cbson_check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null) { - int position = 0; - /* By default we go character by character. Will be different for checking - * UTF-8 */ - int sequence_length = 1; - - if (!check_utf8 && !check_null) { - return VALID; - } - - while (position < length) { - if (check_null && *(string + position) == 0) { - return HAS_NULL; - } - if (check_utf8) { - sequence_length = trailingBytesForUTF8[*(string + position)] + 1; - if ((position + sequence_length) > length) { - return NOT_UTF_8; - } - if (!isLegalUTF8(string + position, sequence_length)) { - return NOT_UTF_8; - } - } - position += sequence_length; - } - - return VALID; -} diff --git a/bson/encoding_helpers.h b/bson/encoding_helpers.h deleted file mode 100644 index a5fb75860f..0000000000 --- a/bson/encoding_helpers.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2009-2015 MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ENCODING_HELPERS_H -#define ENCODING_HELPERS_H - -typedef enum { - VALID, - NOT_UTF_8, - HAS_NULL -} result_t; - -result_t cbson_check_string(const unsigned char* string, const int length, - const char check_utf8, const char check_null); - -#endif diff --git a/doc/changelog.rst b/doc/changelog.rst index b2fcb7fa24..f074a9d464 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -19,6 +19,8 @@ Bug fixes - Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). +- Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` + objects (`PYTHON-3048`_). :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData`. Unavoidable breaking changes ............................ @@ -38,6 +40,7 @@ Issues Resolved See the `PyMongo 4.2 release notes in JIRA`_ for the list of resolved issues in this release. +.. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 diff --git a/setup.py b/setup.py index a2df4fac67..c6b32b9fba 100755 --- a/setup.py +++ b/setup.py @@ -255,12 +255,7 @@ def build_extension(self, ext): Extension( "bson._cbson", include_dirs=["bson"], - sources=[ - "bson/_cbsonmodule.c", - "bson/time64.c", - "bson/buffer.c", - "bson/encoding_helpers.c", - ], + sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], ), Extension( "pymongo._cmessage", From 98d393336411b7cd5ad4e184ca45192f76fb48e8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 13 Jun 2022 19:54:36 -0500 Subject: [PATCH 0688/2111] PYTHON-3253 Provide FLE 2.0 API example for docs team (#969) --- test/test_encryption.py | 92 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index f5c6127a25..e5a9666d2c 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2099,5 +2099,97 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) +class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @client_context.require_no_standalone + @client_context.require_version_min(6, 0, -1) + def setUp(self): + super().setUp() + + def test_queryable_encryption(self): + # MongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + def MongoClient(**kwargs): + c = rs_or_single_client(**kwargs) + self.addCleanup(c.close) + return c + + # Drop data from prior test runs. + self.client.keyvault.datakeys.drop() + self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = client_encryption.create_data_key("local") + key2_id = client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = MongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = MongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + client_encryption.close() + + if __name__ == "__main__": unittest.main() From 43c2062305d25a7c81fee27109ea30de57379690 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Wed, 15 Jun 2022 11:22:55 -0700 Subject: [PATCH 0689/2111] PYTHON-3093 Change streams support for user-facing PIT pre- and post-images (#972) --- pymongo/change_stream.py | 5 + pymongo/collection.py | 8 +- pymongo/database.py | 13 +- pymongo/mongo_client.py | 9 +- .../change-streams-pre_and_post_images.json | 827 ++++++++++++++++++ .../unified/change-streams.json | 63 +- .../createCollection-pre_and_post_images.json | 92 ++ .../modifyCollection-pre_and_post_images.json | 111 +++ test/unified_format.py | 2 + 9 files changed, 1076 insertions(+), 54 deletions(-) create mode 100644 test/change_streams/unified/change-streams-pre_and_post_images.json create mode 100644 test/collection_management/createCollection-pre_and_post_images.json create mode 100644 test/collection_management/modifyCollection-pre_and_post_images.json diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index b4bce8da59..d2d60e25a4 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -96,6 +96,7 @@ def __init__( session: Optional["ClientSession"], start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> None: if pipeline is None: pipeline = [] @@ -118,6 +119,7 @@ def __init__( self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document + self._full_document_before_change = full_document_before_change self._uses_start_after = start_after is not None self._uses_resume_after = resume_after is not None self._resume_token = copy.deepcopy(start_after or resume_after) @@ -147,6 +149,9 @@ def _change_stream_options(self): if self._full_document is not None: options["fullDocument"] = self._full_document + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + resume_token = self.resume_token if resume_token is not None: if self._uses_start_after: diff --git a/pymongo/collection.py b/pymongo/collection.py index 27550e0fb3..b43e06c2a4 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2501,6 +2501,7 @@ def watch( session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. @@ -2559,6 +2560,8 @@ def watch( updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. + - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events + may now result in a `fullDocumentBeforeChange` response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -2585,6 +2588,8 @@ def watch( :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -2613,7 +2618,8 @@ def watch( start_at_operation_time, session, start_after, - comment=comment, + comment, + full_document_before_change, ) def rename( diff --git a/pymongo/database.py b/pymongo/database.py index c9447c1a77..f764ade522 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -389,6 +389,8 @@ def create_collection( - ``pipeline`` (list): a list of aggregation pipeline stages - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. .. versionchanged:: 4.2 Added ``encrypted_fields`` parameter. @@ -530,6 +532,7 @@ def watch( session: Optional["ClientSession"] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. @@ -576,11 +579,13 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. When set to 'updateLookup', the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. + - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events + may now result in a `fullDocumentBeforeChange` response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -607,6 +612,9 @@ def watch( :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -631,7 +639,8 @@ def watch( start_at_operation_time, session, start_after, - comment=comment, + comment, + full_document_before_change, ) def _command( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 5e4cf0d754..2a4b8a1d90 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -871,6 +871,7 @@ def watch( session: Optional[client_session.ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. @@ -922,6 +923,8 @@ def watch( updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred. + - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events + may now result in a `fullDocumentBeforeChange` response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token @@ -948,6 +951,9 @@ def watch( :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -972,7 +978,8 @@ def watch( start_at_operation_time, session, start_after, - comment=comment, + comment, + full_document_before_change, ) @property diff --git a/test/change_streams/unified/change-streams-pre_and_post_images.json b/test/change_streams/unified/change-streams-pre_and_post_images.json new file mode 100644 index 0000000000..8beefb2bc8 --- /dev/null +++ b/test/change_streams/unified/change-streams-pre_and_post_images.json @@ -0,0 +1,827 @@ +{ + "description": "change-streams-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "collMod", + "insert", + "update", + "getMore", + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "change-stream-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "change-stream-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocument": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocument:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocument": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocument": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "whenAvailable" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": null + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "whenAvailable" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:required with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "required" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "required" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + }, + { + "description": "fullDocumentBeforeChange:off with changeStreamPreAndPostImages disabled", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "fullDocumentBeforeChange": "off" + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "change-stream-tests", + "coll": "test" + }, + "updateDescription": { + "$$type": "object" + }, + "fullDocumentBeforeChange": { + "$$exists": false + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$changeStream": { + "fullDocumentBeforeChange": "off" + } + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index 572d2d6e97..c8b60ed4e2 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -5,8 +5,7 @@ { "minServerVersion": "3.6", "topologies": [ - "replicaset", - "sharded-replicaset" + "replicaset" ], "serverless": "forbid" } @@ -314,10 +313,7 @@ "description": "Test that comment is set on getMore", "runOnRequirements": [ { - "minServerVersion": "4.4.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.4.0" } ], "operations": [ @@ -405,10 +401,7 @@ "description": "Test that comment is not set on getMore - pre 4.4", "runOnRequirements": [ { - "maxServerVersion": "4.3.99", - "topologies": [ - "replicaset" - ] + "maxServerVersion": "4.3.99" } ], "operations": [ @@ -806,10 +799,7 @@ "description": "$changeStream must be the first stage in a change stream pipeline sent to the server", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -882,10 +872,7 @@ "description": "The server returns change stream responses in the specified server response format", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -935,10 +922,7 @@ "description": "Executing a watch helper on a Collection results in notifications for changes to the specified collection", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -1023,10 +1007,7 @@ "description": "Change Stream should allow valid aggregate pipeline stages", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -1113,10 +1094,7 @@ "description": "Executing a watch helper on a Database results in notifications for changes to all collections in the specified database.", "runOnRequirements": [ { - "minServerVersion": "3.8.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.8.0" } ], "operations": [ @@ -1218,10 +1196,7 @@ "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ { - "minServerVersion": "3.8.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.8.0" } ], "operations": [ @@ -1342,10 +1317,7 @@ "description": "Test insert, update, replace, and delete event types", "runOnRequirements": [ { - "minServerVersion": "3.6.0", - "topologies": [ - "replicaset" - ] + "minServerVersion": "3.6.0" } ], "operations": [ @@ -1497,10 +1469,7 @@ "description": "Test rename and invalidate event types", "runOnRequirements": [ { - "minServerVersion": "4.0.1", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.0.1" } ], "operations": [ @@ -1577,10 +1546,7 @@ "description": "Test drop and invalidate event types", "runOnRequirements": [ { - "minServerVersion": "4.0.1", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.0.1" } ], "operations": [ @@ -1646,10 +1612,7 @@ "description": "Test consecutive resume", "runOnRequirements": [ { - "minServerVersion": "4.1.7", - "topologies": [ - "replicaset" - ] + "minServerVersion": "4.1.7" } ], "operations": [ diff --git a/test/collection_management/createCollection-pre_and_post_images.json b/test/collection_management/createCollection-pre_and_post_images.json new file mode 100644 index 0000000000..f488deacd8 --- /dev/null +++ b/test/collection_management/createCollection-pre_and_post_images.json @@ -0,0 +1,92 @@ +{ + "description": "createCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "createCollection with changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + }, + "databaseName": "papi-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/collection_management/modifyCollection-pre_and_post_images.json b/test/collection_management/modifyCollection-pre_and_post_images.json new file mode 100644 index 0000000000..8026faeb17 --- /dev/null +++ b/test/collection_management/modifyCollection-pre_and_post_images.json @@ -0,0 +1,111 @@ +{ + "description": "modifyCollection-pre_and_post_images", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "papi-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "modifyCollection to changeStreamPreAndPostImages enabled", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "papi-tests", + "collectionName": "test" + } + }, + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "papi-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "changeStreamPreAndPostImages": { + "enabled": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "collMod": "test", + "changeStreamPreAndPostImages": { + "enabled": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index cdba80c23e..cb69882b2c 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -844,6 +844,8 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support timeoutMode") if name == "createEntities": self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") def maybe_skip_entity(self, entities): for entity in entities: From dc21a083f47e23a953032610448f3caaaa34f496 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Jun 2022 12:25:11 -0700 Subject: [PATCH 0690/2111] PYTHON-3300 Add Explicit Queryable Encryption Example to Docs (#973) --- doc/examples/encryption.rst | 136 ++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 5568b0d741..941e1bd029 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -409,6 +409,142 @@ Automatic encryption in Queryable Encryption is configured with an ``encrypted_f In the above example, the ``firstName`` and ``lastName`` fields are automatically encrypted and decrypted. +Explicit Queryable Encryption (Beta) +```````````````````````````````````` + +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +You must have MongoDB 6.0rc8+ to preview the capability. + +Until PyMongo 4.2 release is finalized, it can be installed using:: + + pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" + +Additionally, ``libmongocrypt`` must be installed from `source `_. + +Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` +methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured +using an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, AutoEncryptionOpts, + ClientEncryption, QueryType) + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # Set up the key vault (key_vault_namespace) for this example. + client = MongoClient() + key_vault = client[key_vault_db_name][key_vault_coll_name] + + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + client.codec_options) + + # Create a new data key for the encryptedField. + indexed_key_id = client_encryption.create_data_key( + 'local') + unindexed_key_id = client_encryption.create_data_key( + 'local') + + encrypted_fields = { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality" + } + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + } + ] + } + + opts = AutoEncryptionOpts( + {"local": {"key": local_master_key}}, + key_vault.full_name, + bypass_query_analysis=True, + key_vault_client=client, + ) + + # The MongoClient used to read/write application data. + encrypted_client = MongoClient(auto_encryption_opts=opts) + encrypted_client.drop_database("test") + db = encrypted_client.test + + # Create the collection with encrypted fields. + coll = db.create_collection("coll", encrypted_fields=encrypted_fields) + + # Create and encrypt an indexed and unindexed value. + val = "encrypted indexed value" + unindexed_val = "encrypted unindexed value" + insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id) + insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, + unindexed_key_id) + + # Insert the payloads. + coll.insert_one({ + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed + }) + + # Encrypt our find payload using QueryType.EQUALITY. + # The value of "data_key_id" must be the same as used to encrypt the values + # above. + find_payload = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY + ) + + # Find the document we inserted using the encrypted payload. + # The returned document is automatically decrypted. + doc = coll.find_one({"encryptedIndexed": find_payload}) + print('Returned document: %s' % (doc,)) + + # Cleanup resources. + client_encryption.close() + encrypted_client.close() + + + if __name__ == "__main__": + main() + .. _explicit-client-side-encryption: Explicit Encryption From f45f00b4e53fd92702b50757b9cbcf9a4458f6cb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Jun 2022 13:16:07 -0700 Subject: [PATCH 0691/2111] PYTHON-3300 Fix Explicit Queryable Encryption Example (#975) --- doc/examples/encryption.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 941e1bd029..0e349f48da 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -540,6 +540,7 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # Cleanup resources. client_encryption.close() encrypted_client.close() + client.close() if __name__ == "__main__": From 02a9df69f66cf34e3d3858e49152af2ad2c88bfd Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Jun 2022 13:16:22 -0700 Subject: [PATCH 0692/2111] PYTHON-3227 Clustered Indexes for all Collections (#971) --- pymongo/collection.py | 6 +- pymongo/database.py | 66 ++++--- .../clustered-indexes.json | 177 ++++++++++++++++++ test/test_encryption.py | 4 +- test/unified_format.py | 2 +- test/utils.py | 2 - 6 files changed, 223 insertions(+), 34 deletions(-) create mode 100644 test/collection_management/clustered-indexes.json diff --git a/pymongo/collection.py b/pymongo/collection.py index b43e06c2a4..afaef480cc 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -117,7 +117,6 @@ def __init__( read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, timeout: Optional[float] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -159,13 +158,11 @@ def __init__( - `session` (optional): a :class:`~pymongo.client_session.ClientSession` that is used with the create collection command - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. If provided it will be passed to the create collection command. - `**kwargs` (optional): additional keyword arguments will be passed as options for the create collection command .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. .. versionchanged:: 4.0 Removed the reindex, map_reduce, inline_map_reduce, @@ -222,6 +219,7 @@ def __init__( self.__database: Database[_DocumentType] = database self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) + encrypted_fields = kwargs.pop("encryptedFields", None) if create or kwargs or collation: if encrypted_fields: common.validate_is_mapping("encrypted_fields", encrypted_fields) diff --git a/pymongo/database.py b/pymongo/database.py index f764ade522..d3746b0c55 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -304,7 +304,6 @@ def create_collection( read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, timeout: Optional[float] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this @@ -336,28 +335,6 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. For example:: - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - - } } - `**kwargs` (optional): additional keyword arguments will be passed as options for the `create collection command`_ @@ -389,11 +366,42 @@ def create_collection( - ``pipeline`` (list): a list of aggregation pipeline stages - ``comment`` (str): a user-provided comment to attach to this command. This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be ‘true’ + name: , // optional, otherwise automatically generated + v: , // optional, must be ‘2’ if provided + } - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for enabling pre- and post-images. .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. .. versionchanged:: 3.11 This method is now supported inside multi-document transactions @@ -411,6 +419,7 @@ def create_collection( .. _create collection command: https://mongodb.com/docs/manual/reference/command/create """ + encrypted_fields = kwargs.get("encryptedFields") if ( not encrypted_fields and self.client.options.auto_encryption_opts @@ -419,8 +428,14 @@ def create_collection( encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( "%s.%s" % (self.name, name) ) + kwargs["encryptedFields"] = encrypted_fields + if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) + common.validate_is_mapping("encryptedFields", encrypted_fields) + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not @@ -439,7 +454,6 @@ def create_collection( read_concern, session=s, timeout=timeout, - encrypted_fields=encrypted_fields, **kwargs, ) diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json new file mode 100644 index 0000000000..739d0fd8b6 --- /dev/null +++ b/test/collection_management/clustered-indexes.json @@ -0,0 +1,177 @@ +{ + "description": "clustered-indexes", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "5.3", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "ts-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "ts-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "createCollection with clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ] + }, + { + "description": "listCollections includes clusteredIndex", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listCollections", + "object": "database0", + "arguments": { + "filter": { + "name": { + "$eq": "test" + } + } + }, + "expectResult": [ + { + "name": "test", + "options": { + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index", + "v": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "listIndexes returns the index", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + } + }, + { + "name": "listIndexes", + "object": "collection0", + "expectResult": [ + { + "key": { + "_id": 1 + }, + "name": "test index", + "clustered": true, + "unique": true, + "v": { + "$$type": [ + "int", + "long" + ] + } + } + ] + } + ] + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index e5a9666d2c..209308aba6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -658,7 +658,9 @@ def setup_scenario(self, scenario_def): kwargs["codec_options"] = OPTS if not data: kwargs["write_concern"] = wc - db.create_collection(coll_name, **kwargs, encrypted_fields=encrypted_fields) + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) coll = db[coll_name] if data: # Load data. diff --git a/test/unified_format.py b/test/unified_format.py index cb69882b2c..001af4434c 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -996,7 +996,7 @@ def _collectionOperation_count(self, target, *args, **kwargs): def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: self.skipTest("PyMongo does not support batch_size for list_indexes") - return target.list_indexes(*args, **kwargs) + return list(target.list_indexes(*args, **kwargs)) def _collectionOperation_listIndexNames(self, target, *args, **kwargs): self.skipTest("PyMongo does not support list_index_names") diff --git a/test/utils.py b/test/utils.py index 1aeb7571ab..7071764b15 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1002,8 +1002,6 @@ def parse_spec_options(opts): if "maxCommitTimeMS" in opts: opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - if "encryptedFields" in opts: - opts["encrypted_fields"] = opts.pop("encryptedFields") if "hint" in opts: hint = opts.pop("hint") if not isinstance(hint, str): From 922e63d6e0b235d13ee81739aceecf0cb92a4dd3 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 16 Jun 2022 11:40:09 -0700 Subject: [PATCH 0693/2111] PYTHON-3093 Continuation of #972 (#976) --- pymongo/collection.py | 16 +++++++++------- pymongo/database.py | 16 +++++++++------- pymongo/mongo_client.py | 16 +++++++++------- 3 files changed, 27 insertions(+), 21 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index afaef480cc..0088388624 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2553,13 +2553,15 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events - may now result in a `fullDocumentBeforeChange` response field. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token diff --git a/pymongo/database.py b/pymongo/database.py index d3746b0c55..fcf1f3e36c 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -593,13 +593,15 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', 'whenAvailable', 'required'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events - may now result in a `fullDocumentBeforeChange` response field. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 2a4b8a1d90..4b20c2e5b7 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -918,13 +918,15 @@ def watch( pipeline stages are valid after a ``$changeStream`` stage, see the MongoDB documentation on change streams for the supported stages. - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup'. - When set to 'updateLookup', the change notification for partial - updates will include both a delta describing the changes to the - document, as well as a copy of the entire document that was - changed from some time after the change occurred. - - `full_document_before_change`: Allowed values: `whenAvailable` and `required`. Change events - may now result in a `fullDocumentBeforeChange` response field. + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + - `full_document_before_change`: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. - `resume_after` (optional): A resume token. If provided, the change stream will start returning changes that occur directly after the operation specified in the resume token. A resume token From 4ae93c49378d73e4af127ca65030a517fd814f34 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 16 Jun 2022 15:26:27 -0700 Subject: [PATCH 0694/2111] PYTHON-1552 Prevent uploading partial or corrupt GridFS files after an error occurs --- doc/migrate-to-pymongo4.rst | 11 +++++------ gridfs/__init__.py | 30 ++++++++++++------------------ gridfs/grid_file.py | 9 +++++++-- test/test_grid_file.py | 16 ++++++++++++++++ test/test_gridfs.py | 2 +- 5 files changed, 41 insertions(+), 27 deletions(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index eca479c7c7..5843a2261b 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -879,12 +879,11 @@ and store it with other file metadata. For example:: import hashlib my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream("test_file") - file_data = b'...' - sha356 = hashlib.sha256(file_data).hexdigest() - grid_in.write(file_data) - grid_in.sha356 = sha356 # Set the custom 'sha356' field - grid_in.close() + with fs.open_upload_stream("test_file") as grid_in: + file_data = b'...' + sha356 = hashlib.sha256(file_data).hexdigest() + grid_in.write(file_data) + grid_in.sha356 = sha356 # Set the custom 'sha356' field Note that for large files, the checksum may need to be computed in chunks to avoid the excessive memory needed to load the entire file at once. diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 5675e8f937..29d582cd21 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -109,11 +109,8 @@ def put(self, data: Any, **kwargs: Any) -> Any: Equivalent to doing:: - try: - f = new_file(**kwargs) + with fs.new_file(**kwargs) as f: f.write(data) - finally: - f.close() `data` can be either an instance of :class:`bytes` or a file-like object providing a :meth:`read` method. If an `encoding` keyword @@ -134,13 +131,10 @@ def put(self, data: Any, **kwargs: Any) -> Any: .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ - grid_file = GridIn(self.__collection, **kwargs) - try: - grid_file.write(data) - finally: - grid_file.close() - return grid_file._id + with GridIn(self.__collection, **kwargs) as grid_file: + grid_file.write(data) + return grid_file._id def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: """Get a file from GridFS by ``"_id"``. @@ -528,11 +522,11 @@ def open_upload_stream( my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream( + with fs.open_upload_stream( "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - grid_in.write("data I want to store!") - grid_in.close() # uploaded on close + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. @@ -584,13 +578,13 @@ def open_upload_stream_with_id( my_db = MongoClient().test fs = GridFSBucket(my_db) - grid_in = fs.open_upload_stream_with_id( + with fs.open_upload_stream_with_id( ObjectId(), "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - grid_in.write("data I want to store!") - grid_in.close() # uploaded on close + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close Returns an instance of :class:`~gridfs.grid_file.GridIn`. diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 5d63d5c653..cec7d57a22 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -396,9 +396,14 @@ def __enter__(self) -> "GridIn": def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Support for the context manager protocol. - Close the file and allow exceptions to propagate. + Close the file if no exceptions occur and allow exceptions to propagate. """ - self.close() + if exc_type is None: + # No exceptions happened. + self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) # propagate exceptions return False diff --git a/test/test_grid_file.py b/test/test_grid_file.py index b9fdeacef7..8b46133a60 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -675,6 +675,22 @@ def test_context_manager(self): with GridOut(self.db.fs, infile._id) as outfile: self.assertEqual(contents, outfile.read()) + def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + with GridIn(self.db.fs, filename="important") as infile: + infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + + self.assertIsNone(self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + def test_prechunked_string(self): def write_me(s, chunk_size): buf = BytesIO(s) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index ec88dcd488..35a574a1d9 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -540,7 +540,7 @@ def test_gridfs_secondary_lazy(self): # Connects, doesn't create index. self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotPrimaryError, fs.put, "data") + self.assertRaises(NotPrimaryError, fs.put, "data", encoding="utf-8") if __name__ == "__main__": From 3169f1fe314e448dea7126ba85ac0d2cd7ea836e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 21 Jun 2022 10:46:49 -0700 Subject: [PATCH 0695/2111] PYTHON-3310 Test Failure - query_type must be str or None, not: (#978) --- pymongo/encryption.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 0a8bf69a38..a49cf7df5a 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -394,7 +394,7 @@ class Algorithm(str, enum.Enum): """ -class QueryType(enum.IntEnum): +class QueryType(str, enum.Enum): """**(BETA)** An enum that defines the supported values for explicit encryption query_type. .. note:: Support for Queryable Encryption is in beta. @@ -403,7 +403,7 @@ class QueryType(enum.IntEnum): .. versionadded:: 4.2 """ - EQUALITY = 1 + EQUALITY = "equality" """Used to encrypt a value for an equality query.""" @@ -599,7 +599,7 @@ def encrypt( key_id: Optional[Binary] = None, key_alt_name: Optional[str] = None, index_key_id: Optional[Binary] = None, - query_type: Optional[int] = None, + query_type: Optional[str] = None, contention_factor: Optional[int] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -617,7 +617,7 @@ def encrypt( - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - - `query_type` (int): **(BETA)** The query type to execute. See + - `query_type` (str): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): **(BETA)** The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. From 1f7f46faa2d5a20f2f175499b161155f8bf9ae50 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 21 Jun 2022 15:30:17 -0700 Subject: [PATCH 0696/2111] PYTHON-3282 Add comment option tests for distinct helper (#979) --- test/crud/unified/distinct-comment.json | 178 ++++++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 test/crud/unified/distinct-comment.json diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json new file mode 100644 index 0000000000..0669d4f30a --- /dev/null +++ b/test/crud/unified/distinct-comment.json @@ -0,0 +1,178 @@ +{ + "description": "distinct-comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-comment-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-comment-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with document comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.14" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectResult": [ 11, 22, 33 ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with string comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": "comment" + }, + "expectResult": [ 11, 22, 33 ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": "comment" + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with document comment - pre 4.4, server error", + "runOnRequirements": [ + { + "minServerVersion": "3.6.0", + "maxServerVersion": "4.4.13" + } + ], + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": {}, + "comment": { + "key": "value" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": {}, + "comment": { + "key": "value" + } + }, + "commandName": "distinct", + "databaseName": "distinct-comment-tests" + } + } + ] + } + ] + } + ] +} From ae71872fa97b50fd85ef8efe4b8d0b2a362bd6f7 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 24 Jun 2022 10:52:09 -0700 Subject: [PATCH 0697/2111] PYTHON-3297 Test auto decryption occurs after CommandSucceeded events (#980) --- test/test_encryption.py | 78 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index 209308aba6..f2a02780b3 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -61,6 +61,7 @@ from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( + AutoReconnect, BulkWriteError, ConfigurationError, EncryptionError, @@ -1769,6 +1770,83 @@ def test_case_8(self): self.assertEqual(len(self.topology_listener.results["opened"]), 1) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +class TestDecryptProse(EncryptionIntegrationTest): + def setUp(self): + self.client = client_context.client + self.client.db.drop_collection("decryption_events") + self.client.keyvault.drop_collection("datakeys") + self.client.keyvault.datakeys.create_index( + "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + ) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = self.client_encryption.create_data_key("local") + self.cipher_text = self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + if self.cipher_text[-1] == 0: + self.malformed_cipher_text = self.cipher_text[:-1] + b"1" + else: + self.malformed_cipher_text = self.cipher_text[:-1] + b"0" + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = MongoClient( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + self.addCleanup(self.encrypted_client.close) + + def test_01_command_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.results["failed"]), 1) + for event in self.listener.results["failed"]: + self.assertEqual(event.failure["code"], 123) + + def test_02_network_error(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.results["failed"]), 1) + self.assertEqual(self.listener.results["failed"][0].command_name, "aggregate") + + def test_03_decrypt_error(self): + self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.results["succeeded"][0] + self.assertEqual(len(self.listener.results["failed"]), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + def test_04_decrypt_success(self): + self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + next(self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.results["succeeded"][0] + self.assertEqual(len(self.listener.results["failed"]), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): @unittest.skipIf( From f2902902613b708c071a7c4a6d78cf23fb0f030b Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 29 Jun 2022 12:18:52 -0700 Subject: [PATCH 0698/2111] PYTHON-3097 Language specific examples for AWS Lambda (#984) --- test/auth_aws/test_auth_aws.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 750d18c4fe..a63e60718c 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -54,5 +54,37 @@ def test_connect_uri(self): client.get_database().test.find_one() +class TestAWSLambdaExamples(unittest.TestCase): + def test_shared_client(self): + # Start AWS Lambda Example 1 + import os + + from pymongo import MongoClient + + client = MongoClient(host=os.environ["MONGODB_URI"]) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 1 + + def test_IAM_auth(self): + # Start AWS Lambda Example 2 + import os + + from pymongo import MongoClient + + client = MongoClient( + host=os.environ["MONGODB_URI"], + authSource="$external", + authMechanism="MONGODB-AWS", + ) + + def lambda_handler(event, context): + return client.db.command("ping") + + # End AWS Lambda Example 2 + + if __name__ == "__main__": unittest.main() From 6ed38529e81df9cc39693a269574baeca9a35f4b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jun 2022 15:11:13 -0500 Subject: [PATCH 0699/2111] PYTHON-3286 Update expected FLE 2 find payloads in tests (#988) --- test/client-side-encryption/spec/legacy/fle2-Delete.json | 2 +- .../spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json | 2 +- .../spec/legacy/fle2-FindOneAndUpdate.json | 4 ++-- .../spec/legacy/fle2-InsertFind-Indexed.json | 2 +- test/client-side-encryption/spec/legacy/fle2-Update.json | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json index 790e818295..0e3e06396e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2-Delete.json @@ -225,7 +225,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json index 69abfa7cfb..1d3227ee7f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json @@ -230,7 +230,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json index b8088515ca..b31438876f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json @@ -230,7 +230,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } @@ -490,7 +490,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json index 142cacf2fd..81a549590e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json @@ -226,7 +226,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json index 66a291902a..87830af32d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2-Update.json @@ -232,7 +232,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } @@ -496,7 +496,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BYkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcSY20AAAAAAAAAAAAA", + "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", "subType": "06" } } From bacaf7fa50479b79ceda0ad6f32cbe3ae0d2dec8 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 29 Jun 2022 13:11:25 -0700 Subject: [PATCH 0700/2111] PYTHON-3309 Explicit Queryable Encryption doc example needs to utilize index_key_Id (#986) --- doc/examples/encryption.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 0e349f48da..5c3dc0864b 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -510,12 +510,12 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: db = encrypted_client.test # Create the collection with encrypted fields. - coll = db.create_collection("coll", encrypted_fields=encrypted_fields) + coll = db.create_collection("coll", encryptedFields=encrypted_fields) # Create and encrypt an indexed and unindexed value. val = "encrypted indexed value" unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id) + insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, unindexed_key_id) @@ -529,7 +529,7 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # The value of "data_key_id" must be the same as used to encrypt the values # above. find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY + val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 ) # Find the document we inserted using the encrypted payload. From 06310391185cad2af5310fa75e68d17b1f46522a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jun 2022 16:08:38 -0500 Subject: [PATCH 0701/2111] PYTHON-3319 Require contentionFactor for "Indexed" explicit encryption (#987) --- pymongo/encryption.py | 4 +++- test/test_encryption.py | 13 ++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index a49cf7df5a..096090e4af 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -620,7 +620,9 @@ def encrypt( - `query_type` (str): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): **(BETA)** The contention factor to use - when the algorithm is :attr:`Algorithm.INDEXED`. + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. .. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the Queryable Encryption beta. Backwards-breaking changes may be made before the diff --git a/test/test_encryption.py b/test/test_encryption.py index f2a02780b3..458dd68f32 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2098,13 +2098,15 @@ def setUp(self): def test_01_insert_encrypted_indexed_and_find(self): val = "encrypted indexed value" - insert_payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + insert_payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) self.encrypted_client[self.db.name].explicit_encryption.insert_one( {"encryptedIndexed": insert_payload} ) find_payload = self.client_encryption.encrypt( - val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) docs = list( self.encrypted_client[self.db.name].explicit_encryption.find( @@ -2125,9 +2127,8 @@ def test_02_insert_encrypted_indexed_and_find_contention(self): {"encryptedIndexed": insert_payload} ) - # Find without contention_factor non-deterministically returns 0-9 documents. find_payload = self.client_encryption.encrypt( - val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) docs = list( self.encrypted_client[self.db.name].explicit_encryption.find( @@ -2168,7 +2169,9 @@ def test_03_insert_encrypted_unindexed(self): def test_04_roundtrip_encrypted_indexed(self): val = "encrypted indexed value" - payload = self.client_encryption.encrypt(val, Algorithm.INDEXED, self.key1_id) + payload = self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) decrypted = self.client_encryption.decrypt(payload) self.assertEqual(decrypted, val) From b37b146ac88fc9647c3effd6e031dbf5cbee3cf5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 30 Jun 2022 12:35:29 -0500 Subject: [PATCH 0702/2111] PYTHON-3053 Key Management API (#958) --- pymongo/encryption.py | 189 +++++++++++++++- setup.py | 2 +- test/__init__.py | 22 ++ .../spec/unified/addKeyAltName.json | 6 +- ... createDataKey-kms_providers-invalid.json} | 17 +- .../{createKey.json => createDataKey.json} | 22 +- .../spec/unified/deleteKey.json | 6 +- .../spec/unified/getKey.json | 6 +- .../spec/unified/getKeyByAltName.json | 6 +- .../spec/unified/getKeys.json | 6 +- .../spec/unified/removeKeyAltName.json | 196 ++++++++++++----- .../spec/unified/rewrapManyDataKey.json | 204 +++++++++++++----- test/test_encryption.py | 92 +++++--- ...ntEncryptionOpts-additionalProperties.json | 30 +++ ...ncryptionOpts-keyVaultClient-required.json | 23 ++ ...entEncryptionOpts-keyVaultClient-type.json | 29 +++ ...yptionOpts-keyVaultNamespace-required.json | 28 +++ ...EncryptionOpts-keyVaultNamespace-type.json | 29 +++ ...pts-kmsProviders-additionalProperties.json | 29 +++ ...kmsProviders-aws-additionalProperties.json | 31 +++ ...tEncryptionOpts-kmsProviders-aws-type.json | 29 +++ ...sProviders-azure-additionalProperties.json | 31 +++ ...ncryptionOpts-kmsProviders-azure-type.json | 29 +++ ...kmsProviders-gcp-additionalProperties.json | 31 +++ ...tEncryptionOpts-kmsProviders-gcp-type.json | 29 +++ ...msProviders-kmip-additionalProperties.json | 31 +++ ...EncryptionOpts-kmsProviders-kmip-type.json | 29 +++ ...sProviders-local-additionalProperties.json | 31 +++ ...ncryptionOpts-kmsProviders-local-type.json | 29 +++ ...tEncryptionOpts-kmsProviders-required.json | 26 +++ ...lientEncryptionOpts-kmsProviders-type.json | 27 +++ ...cryptionOpts-tlsOptions_not_supported.json | 30 +++ ...clientEncryption-additionalProperties.json | 30 +++ ...ryption-clientEncryptionOpts-required.json | 17 ++ ...tEncryption-clientEncryptionOpts-type.json | 18 ++ .../entity-clientEncryption-id-required.json | 28 +++ .../entity-clientEncryption-id-type.json | 29 +++ .../invalid/runOnRequirement-csfle-type.json | 15 ++ ...Providers-missing_aws_kms_credentials.json | 36 ++++ ...oviders-missing_azure_kms_credentials.json | 36 ++++ ...Providers-missing_gcp_kms_credentials.json | 36 ++++ .../valid-fail/kmsProviders-no_kms.json | 32 +++ .../valid-fail/operation-unsupported.json | 22 ++ .../collectionData-createOptions.json | 3 +- ...kmsProviders-explicit_kms_credentials.json | 52 +++++ ...Providers-mixed_kms_credential_fields.json | 54 +++++ ...Providers-placeholder_kms_credentials.json | 70 ++++++ .../kmsProviders-unconfigured_kms.json | 39 ++++ test/unified_format.py | 103 ++++++++- 49 files changed, 1780 insertions(+), 165 deletions(-) rename test/client-side-encryption/spec/unified/{createKey-kms_providers-invalid.json => createDataKey-kms_providers-invalid.json} (86%) rename test/client-side-encryption/spec/unified/{createKey.json => createDataKey.json} (97%) create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json create mode 100644 test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-id-required.json create mode 100644 test/unified-test-format/invalid/entity-clientEncryption-id-type.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-csfle-type.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json create mode 100644 test/unified-test-format/valid-fail/kmsProviders-no_kms.json create mode 100644 test/unified-test-format/valid-fail/operation-unsupported.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json create mode 100644 test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 096090e4af..b792a4487e 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -17,7 +17,6 @@ import contextlib import enum import socket -import uuid import weakref from typing import Any, Mapping, Optional, Sequence @@ -40,6 +39,7 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON from pymongo import _csot +from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import ( @@ -50,8 +50,10 @@ ) from pymongo.mongo_client import MongoClient from pymongo.network import BLOCKING_IO_ERRORS +from pymongo.operations import UpdateOne from pymongo.pool import PoolOptions, _configured_socket from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -60,10 +62,11 @@ _KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. _MONGOCRYPTD_TIMEOUT_MS = 10000 + _DATA_KEY_OPTS: CodecOptions = CodecOptions(document_class=SON, uuid_representation=STANDARD) # Use RawBSONDocument codec options to avoid needlessly decoding # documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument, uuid_representation=STANDARD) +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) @contextlib.contextmanager @@ -225,11 +228,11 @@ def insert_data_key(self, data_key): """ raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) data_key_id = raw_doc.get("_id") - if not isinstance(data_key_id, uuid.UUID): - raise TypeError("data_key _id must be a UUID") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError("data_key _id must be Binary with a UUID subtype") self.key_vault_coll.insert_one(raw_doc) - return Binary(data_key_id.bytes, subtype=UUID_SUBTYPE) + return data_key_id def bson_encode(self, doc): """Encode a document to BSON. @@ -256,6 +259,30 @@ def close(self): self.mongocryptd_client = None +class RewrapManyDataKeyResult(object): + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + """Result object returned by a ``rewrap_many_data_key`` operation. + + :Parameters: + - `bulk_write_result`: The result of the bulk write operation used to + update the key vault collection with one or more rewrapped data keys. + If ``rewrap_many_data_key()`` does not find any matching keys to + rewrap, no bulk write operation will be executed and this field will + be ``None``. + """ + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + ``rewrap_many_data_key()`` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + class _Encrypter(object): """Encrypts and decrypts MongoDB commands. @@ -514,12 +541,15 @@ def __init__( self._encryption = ExplicitEncrypter( self._io_callbacks, MongoCryptOptions(kms_providers, None) ) + # Use the same key vault collection as the callback. + self._key_vault_coll = self._io_callbacks.key_vault_coll def create_data_key( self, kms_provider: str, master_key: Optional[Mapping[str, Any]] = None, key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, ) -> Binary: """Create and insert a new data key into the key vault collection. @@ -580,16 +610,24 @@ def create_data_key( # reference the key with the alternate name client_encryption.encrypt("457-55-5462", keyAltName="name1", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + - `key_material` (optional): Sets the custom key material to be used + by the data key for encryption and decryption. :Returns: The ``_id`` of the created data key document as a :class:`~bson.binary.Binary` with subtype :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. """ self._check_closed() with _wrap_encryption_errors(): return self._encryption.create_data_key( - kms_provider, master_key=master_key, key_alt_names=key_alt_names + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, ) def encrypt( @@ -676,6 +714,145 @@ def decrypt(self, value: Binary) -> Any: decrypted_doc = self._encryption.decrypt(doc) return decode(decrypted_doc, codec_options=self._codec_options)["v"] + def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :Parameters: + - `id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :Returns: + The key document. + """ + self._check_closed() + return self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> Cursor[RawBSONDocument]: + """Get all of the data keys. + + :Returns: + An instance of :class:`~pymongo.cursor.Cursor` over the data key + documents. + """ + self._check_closed() + return self._key_vault_coll.find({}) + + def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :Parameters: + - `id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :Returns: + The delete result. + """ + self._check_closed() + return self._key_vault_coll.delete_one({"_id": id}) + + def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :Parameters: + - ``id``: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - ``key_alt_name``: The key alternate name to add. + + :Returns: + The previous version of the key document. + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + return self._key_vault_coll.find_one_and_update({"_id": id}, update) + + def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :Parameters: + - `key_alt_name`: (str): The key alternate name of the key to get. + + :Returns: + The key document. + """ + self._check_closed() + return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :Parameters: + - ``id``: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - ``key_alt_name``: The key alternate name to remove. + + :Returns: + Returns the previous version of the key document. + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :Parameters: + - `filter`: A document used to filter the data keys. + - `provider`: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + - ``master_key``: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :Returns: + A :class:`RewrapManyDataKeyResult`. + """ + self._check_closed() + with _wrap_encryption_errors(): + raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + result = self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + def __enter__(self) -> "ClientEncryption": return self diff --git a/setup.py b/setup.py index c6b32b9fba..a61f56c3f6 100755 --- a/setup.py +++ b/setup.py @@ -277,7 +277,7 @@ def build_extension(self, ext): extras_require = { "encryption": [ - "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@pymongocrypt-1.3.0b0#subdirectory=bindings/python" + "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@161dbc8ae#subdirectory=bindings/python" ], "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], diff --git a/test/__init__.py b/test/__init__.py index 64c812c112..4ecc3c9e9e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -15,6 +15,7 @@ """Test suite for pymongo, bson, and gridfs. """ +import base64 import gc import os import socket @@ -116,6 +117,27 @@ COMPRESSORS = COMPRESSORS or "zlib" +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} + + def is_server_resolvable(): """Returns True if 'server' is resolvable.""" socket_timeout = socket.getdefaulttimeout() diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json index 7dc371143b..8b6c174cbc 100644 --- a/test/client-side-encryption/spec/unified/addKeyAltName.json +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json similarity index 86% rename from test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json rename to test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json index b2c8d83e05..16cf6ca70d 100644 --- a/test/client-side-encryption/spec/unified/createKey-kms_providers-invalid.json +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -1,5 +1,5 @@ { - "description": "createKey-provider-invalid", + "description": "createDataKey-provider-invalid", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -24,7 +24,14 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "aws": {} + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } } } } @@ -35,7 +42,7 @@ "description": "create data key without required master key fields", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "aws", @@ -59,7 +66,7 @@ "description": "create data key with invalid master key field", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -85,7 +92,7 @@ "description": "create data key with invalid master key", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "aws", diff --git a/test/client-side-encryption/spec/unified/createKey.json b/test/client-side-encryption/spec/unified/createDataKey.json similarity index 97% rename from test/client-side-encryption/spec/unified/createKey.json rename to test/client-side-encryption/spec/unified/createDataKey.json index adb3fff20d..110c726f9a 100644 --- a/test/client-side-encryption/spec/unified/createKey.json +++ b/test/client-side-encryption/spec/unified/createDataKey.json @@ -1,5 +1,5 @@ { - "description": "createKey", + "description": "createDataKey", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -90,7 +90,7 @@ "description": "create data key with AWS KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "aws", @@ -153,7 +153,7 @@ "description": "create datakey with Azure KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "azure", @@ -216,7 +216,7 @@ "description": "create datakey with GCP KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "gcp", @@ -283,7 +283,7 @@ "description": "create datakey with KMIP KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "kmip" @@ -341,7 +341,7 @@ "description": "create datakey with local KMS provider", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local" @@ -396,7 +396,7 @@ "description": "create datakey with no keyAltName", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -457,7 +457,7 @@ "description": "create datakey with single keyAltName", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -520,7 +520,7 @@ "description": "create datakey with multiple keyAltNames", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -619,7 +619,7 @@ "description": "create datakey with custom key material", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -682,7 +682,7 @@ "description": "create datakey with invalid custom key material (too short)", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", diff --git a/test/client-side-encryption/spec/unified/deleteKey.json b/test/client-side-encryption/spec/unified/deleteKey.json index a3b2f98a50..3a10fb082f 100644 --- a/test/client-side-encryption/spec/unified/deleteKey.json +++ b/test/client-side-encryption/spec/unified/deleteKey.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json index f2f2c68113..6a7269b2ca 100644 --- a/test/client-side-encryption/spec/unified/getKey.json +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json index 18ed2e1943..f94459bbd8 100644 --- a/test/client-side-encryption/spec/unified/getKeyByAltName.json +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } diff --git a/test/client-side-encryption/spec/unified/getKeys.json b/test/client-side-encryption/spec/unified/getKeys.json index bd07af3804..d944712357 100644 --- a/test/client-side-encryption/spec/unified/getKeys.json +++ b/test/client-side-encryption/spec/unified/getKeys.json @@ -87,7 +87,7 @@ "description": "getKeys with single key documents", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local", @@ -160,7 +160,7 @@ "description": "getKeys with many key documents", "operations": [ { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local" @@ -170,7 +170,7 @@ } }, { - "name": "createKey", + "name": "createDataKey", "object": "clientEncryption0", "arguments": { "kmsProvider": "local" diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json index f94d9b02dc..bef13c87de 100644 --- a/test/client-side-encryption/spec/unified/removeKeyAltName.json +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -22,7 +22,11 @@ "keyVaultClient": "client0", "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "local": {} + "local": { + "key": { + "$$placeholder": 1 + } + } } } } @@ -118,11 +122,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "does_not_exist" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -239,11 +268,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "does_not_exist" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "does_not_exist" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "does_not_exist" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -378,11 +432,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "alternate_name" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -501,11 +580,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "alternate_name" + "update": [ + { + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "alternate_name" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "alternate_name" + ] + } + } + } + ] + } + } } - }, + ], "writeConcern": { "w": "majority" } @@ -525,42 +629,36 @@ } } }, - "update": { - "$pull": { - "keyAltNames": "local_key" - } - }, - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "commandStartedEvent": { - "databaseName": "keyvault", - "command": { - "update": "datakeys", - "updates": [ + "update": [ { - "q": { - "_id": { - "$binary": { - "base64": "bG9jYWxrZXlsb2NhbGtleQ==", - "subType": "04" - } - } - }, - "u": { - "$unset": { - "keyAltNames": true + "$set": { + "keyAltNames": { + "$cond": [ + { + "$eq": [ + "$keyAltNames", + [ + "local_key" + ] + ] + }, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": { + "$ne": [ + "$$this", + "local_key" + ] + } + } + } + ] } } } - ], - "writeConcern": { - "w": "majority" - } + ] } } } diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index ed7568ca4d..7e3abb1274 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -266,7 +266,9 @@ } }, "expectResult": { - "bulkWriteResult": {} + "bulkWriteResult": { + "$$exists": false + } } } ], @@ -372,8 +374,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -396,8 +402,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -420,8 +430,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -444,8 +458,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -538,8 +556,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -562,8 +584,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -586,8 +612,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -610,8 +640,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -708,8 +742,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -734,8 +772,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -760,8 +802,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -786,8 +832,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -877,8 +927,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -902,8 +956,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -927,8 +985,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -952,8 +1014,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -1040,8 +1106,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1062,8 +1132,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1084,8 +1158,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1106,8 +1184,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -1262,8 +1344,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1284,8 +1370,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1306,8 +1396,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1328,8 +1422,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } }, { "q": { @@ -1350,8 +1448,12 @@ "updateDate": true } }, - "upsert": false, - "multi": false + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { diff --git a/test/test_encryption.py b/test/test_encryption.py index 458dd68f32..c3ba61d6e4 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -31,14 +31,20 @@ sys.path[0:0] = [""] from test import ( + AWS_CREDS, + AZURE_CREDS, CA_PEM, CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, IntegrationTest, PyMongoTestCase, client_context, unittest, ) from test.test_bulk import BulkTestBase +from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, OvertCommandListener, @@ -64,6 +70,7 @@ AutoReconnect, BulkWriteError, ConfigurationError, + DuplicateKeyError, EncryptionError, InvalidOperation, OperationFailure, @@ -74,14 +81,13 @@ from pymongo.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} + def get_client_opts(client): return client._MongoClient__options -KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} - - class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") @@ -211,7 +217,7 @@ def assertBinaryUUID(self, val): # Location of JSON test files. BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") -SPEC_PATH = os.path.join(BASE, "spec", "legacy") +SPEC_PATH = os.path.join(BASE, "spec") OPTS = CodecOptions() @@ -547,11 +553,6 @@ def test_with_statement(self): # Spec tests -AWS_CREDS = { - "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), -} - AWS_TEMP_CREDS = { "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), @@ -562,19 +563,6 @@ def test_with_statement(self): "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } - -AZURE_CREDS = { - "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), - "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), - "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), -} - -GCP_CREDS = { - "email": os.environ.get("FLE_GCP_EMAIL", ""), - "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), -} - -KMIP = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} @@ -611,7 +599,7 @@ def parse_auto_encrypt_opts(self, opts): if not any(AZURE_CREDS.values()): self.skipTest("GCP environment credentials are not set") if "kmip" in kms_providers: - kms_providers["kmip"] = KMIP + kms_providers["kmip"] = KMIP_CREDS opts["kms_tls_options"] = KMS_TLS_OPTS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" @@ -685,21 +673,24 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, SPEC_PATH) +test_creator = TestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) test_creator.create_tests() -# Prose Tests -LOCAL_MASTER_KEY = base64.b64decode( - b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" - b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" -) +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, + ) + ) +# Prose Tests ALL_KMS_PROVIDERS = { "aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, - "kmip": KMIP, + "kmip": KMIP_CREDS, "local": {"key": LOCAL_MASTER_KEY}, } @@ -1232,7 +1223,12 @@ def setUpClass(cls): super(TestCustomEndpoint, cls).setUpClass() def setUp(self): - kms_providers = {"aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, "kmip": KMIP} + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } self.client_encryption = ClientEncryption( kms_providers=kms_providers, key_vault_namespace="keyvault.datakeys", @@ -1409,7 +1405,7 @@ def test_10_kmip_invalid_endpoint(self): self.client_encryption_invalid.create_data_key("kmip", key) def test_11_kmip_master_key_endpoint(self): - key = {"keyId": "1", "endpoint": KMIP["endpoint"]} + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} self.run_test_expected_success("kmip", key) # Override invalid endpoint: data_key_id = self.client_encryption_invalid.create_data_key("kmip", master_key=key) @@ -2066,6 +2062,38 @@ def test_04_kmip(self): self.client_encryption_invalid_hostname.create_data_key("kmip") +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): + def setUp(self): + self.client = client_context.client + self.client.keyvault.drop_collection("datakeys") + self.client.keyvault.datakeys.create_index( + "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + ) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = ClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_01_create_key(self): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + def test_02_add_key_alt_name(self): + key_id = self.client_encryption.create_data_key("local") + self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + # https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption class TestExplicitQueryableEncryption(EncryptionIntegrationTest): @client_context.require_no_standalone diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json new file mode 100644 index 0000000000..26d14051a7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "invalid": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json new file mode 100644 index 0000000000..c43a2a9125 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-required.json @@ -0,0 +1,23 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json new file mode 100644 index 0000000000..1be9167a40 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultClient-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultClient-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": 0, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json new file mode 100644 index 0000000000..3f54d89aa7 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-required.json @@ -0,0 +1,28 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json new file mode 100644 index 0000000000..53f2f5f086 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-keyVaultNamespace-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-keyVaultNamespace-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": 0, + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json new file mode 100644 index 0000000000..cfd979e2b2 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-additionalProperties.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "invalid": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json new file mode 100644 index 0000000000..59b273487d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json new file mode 100644 index 0000000000..ffcc85bfcf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-aws-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-aws-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json new file mode 100644 index 0000000000..1664b79097 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json new file mode 100644 index 0000000000..5bd50c8078 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-azure-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-azure-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json new file mode 100644 index 0000000000..120c088b00 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json new file mode 100644 index 0000000000..1dd1c8a2a3 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-gcp-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-gcp-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json new file mode 100644 index 0000000000..22ded20440 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json new file mode 100644 index 0000000000..9b9e74be37 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-kmip-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-kmip-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json new file mode 100644 index 0000000000..b93cfe00d1 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-additionalProperties.json @@ -0,0 +1,31 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "invalid": {} + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json new file mode 100644 index 0000000000..526ea24831 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-local-type.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-local-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": 0 + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json new file mode 100644 index 0000000000..b823a67baf --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-required.json @@ -0,0 +1,26 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys" + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json new file mode 100644 index 0000000000..e7a6190b68 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-type.json @@ -0,0 +1,27 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": 0 + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json new file mode 100644 index 0000000000..3b4972f23d --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-tlsOptions_not_supported.json @@ -0,0 +1,30 @@ +{ + "description": "clientEncryptionOpts-tlsOptions_not_supported", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + }, + "tlsOptions": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json new file mode 100644 index 0000000000..77c0a91434 --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-additionalProperties.json @@ -0,0 +1,30 @@ +{ + "description": "entity-clientEncryption-additionalProperties", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + }, + "invalid": {} + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json new file mode 100644 index 0000000000..88e852342a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-required.json @@ -0,0 +1,17 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0" + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json new file mode 100644 index 0000000000..77fb6a362a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-clientEncryptionOpts-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-clientEncryption-clientEncryptionOpts-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": 0 + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-required.json b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json new file mode 100644 index 0000000000..464ba7159a --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-required.json @@ -0,0 +1,28 @@ +{ + "description": "entity-clientEncryption-id-required", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-clientEncryption-id-type.json b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json new file mode 100644 index 0000000000..a7746657fc --- /dev/null +++ b/test/unified-test-format/invalid/entity-clientEncryption-id-type.json @@ -0,0 +1,29 @@ +{ + "description": "entity-clientEncryption-id-type", + "schemaVersion": "1.8", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": 0, + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json new file mode 100644 index 0000000000..b48c850d14 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-csfle-type", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": "foo" + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json new file mode 100644 index 0000000000..e62de80033 --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_aws_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_aws_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json new file mode 100644 index 0000000000..8ef805d0fa --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_azure_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_azure_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": "tenantId" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json new file mode 100644 index 0000000000..c6da1ce58c --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-missing_gcp_kms_credentials.json @@ -0,0 +1,36 @@ +{ + "description": "kmsProviders-missing_gcp_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": "email" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/kmsProviders-no_kms.json b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json new file mode 100644 index 0000000000..57499b4eaf --- /dev/null +++ b/test/unified-test-format/valid-fail/kmsProviders-no_kms.json @@ -0,0 +1,32 @@ +{ + "description": "clientEncryptionOpts-no_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": {} + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operation-unsupported.json b/test/unified-test-format/valid-fail/operation-unsupported.json new file mode 100644 index 0000000000..d8ef5ab1c8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operation-unsupported.json @@ -0,0 +1,22 @@ +{ + "description": "operation-unsupported", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "Unsupported operation", + "operations": [ + { + "name": "unsupportedOperation", + "object": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index 07ab66baa0..df3321a55b 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "3.6" + "minServerVersion": "3.6", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json new file mode 100644 index 0000000000..7cc74939eb --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-explicit_kms_credentials.json @@ -0,0 +1,52 @@ +{ + "description": "kmsProviders-explicit_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": "secretAccessKey" + }, + "azure": { + "tenantId": "tenantId", + "clientId": "clientId", + "clientSecret": "clientSecret" + }, + "gcp": { + "email": "email", + "privateKey": "cHJpdmF0ZUtleQo=" + }, + "kmip": { + "endpoint": "endpoint" + }, + "local": { + "key": "a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5a2V5" + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json new file mode 100644 index 0000000000..363f2a4576 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-mixed_kms_credential_fields.json @@ -0,0 +1,54 @@ +{ + "description": "kmsProviders-mixed_kms_credential_fields", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": "accessKeyId", + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": "tenantId", + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": "email", + "privateKey": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json new file mode 100644 index 0000000000..3f7721f01d --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-placeholder_kms_credentials.json @@ -0,0 +1,70 @@ +{ + "description": "kmsProviders-placeholder_kms_credentials", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json new file mode 100644 index 0000000000..12ca580941 --- /dev/null +++ b/test/unified-test-format/valid-pass/kmsProviders-unconfigured_kms.json @@ -0,0 +1,39 @@ +{ + "description": "kmsProviders-unconfigured_kms", + "schemaVersion": "1.8", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": {}, + "azure": {}, + "gcp": {}, + "kmip": {}, + "local": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "skipReason": "DRIVERS-2280: waiting on driver support for on-demand credentials", + "operations": [] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 001af4434c..a7d8b533dd 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -26,7 +26,18 @@ import time import types from collections import abc -from test import IntegrationTest, client_context, unittest +from test import ( + AWS_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, + IntegrationTest, + client_context, + unittest, +) from test.utils import ( CMAPListener, camel_to_snake, @@ -45,6 +56,7 @@ import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util from bson.binary import Binary +from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket @@ -53,10 +65,13 @@ from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection from pymongo.database import Database +from pymongo.encryption import ClientEncryption +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( BulkWriteError, ConfigurationError, ConnectionFailure, + EncryptionError, ExecutionTimeout, InvalidOperation, NetworkTimeout, @@ -93,6 +108,27 @@ IS_INTERRUPTED = False +KMS_TLS_OPTS = { + "kmip": { + "tlsCAFile": CA_PEM, + "tlsCertificateKeyFile": CLIENT_PEM, + } +} + + +# Build up a placeholder map. +PLACEHOLDER_MAP = dict() +for (provider_name, provider_data) in [ + ("local", {"key": LOCAL_MASTER_KEY}), + ("aws", AWS_CREDS), + ("azure", AZURE_CREDS), + ("gcp", GCP_CREDS), + ("kmip", KMIP_CREDS), +]: + for (key, value) in provider_data.items(): + placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + def interrupt_loop(): global IS_INTERRUPTED @@ -169,6 +205,12 @@ def is_run_on_requirement_satisfied(requirement): else: auth_satisfied = not client_context.auth_enabled + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + return ( topology_satisfied and min_version_satisfied @@ -176,6 +218,7 @@ def is_run_on_requirement_satisfied(requirement): and serverless_satisfied and params_satisfied and auth_satisfied + and csfle_satisfied ) @@ -328,6 +371,19 @@ def __setitem__(self, key, value): self._entities[key] = value + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: self.test.fail( @@ -335,6 +391,7 @@ def _create_entity(self, entity_spec, uri=None): ) entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") if entity_type == "client": kwargs: dict = {} observe_events = spec.get("observeEvents", []) @@ -410,6 +467,19 @@ def _create_entity(self, entity_spec, uri=None): elif entity_type == "bucket": # TODO: implement the 'bucket' entity type self.test.skipTest("GridFS is not currently supported (PYTHON-2459)") + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + self[spec["id"]] = ClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", KMS_TLS_OPTS), + ) + return + self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) def create_entities_from_spec(self, entity_spec, uri=None): @@ -872,7 +942,7 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(exception, ConnectionFailure): self.assertNotIsInstance(exception, NotPrimaryError) - elif isinstance(exception, (InvalidOperation, ConfigurationError)): + elif isinstance(exception, (InvalidOperation, ConfigurationError, EncryptionError)): pass else: self.assertNotIsInstance(exception, PyMongoError) @@ -1033,6 +1103,33 @@ def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor) return target.close() + def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + opts = kwargs.pop("opts") + kwargs["master_key"] = opts.get("masterKey") + kwargs["key_alt_names"] = opts.get("keyAltNames") + kwargs["key_material"] = opts.get("keyMaterial") + return target.create_data_key(*args, **kwargs) + + def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return list(target.get_keys(*args, **kwargs)) + + def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + opts = kwargs.pop("opts") + kwargs["provider"] = opts.get("provider") + kwargs["master_key"] = opts.get("masterKey") + data = target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) + return dict() + def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] client = target @@ -1075,6 +1172,8 @@ def run_entity_operation(self, spec): client = target._client elif isinstance(target, GridFSBucket): raise NotImplementedError + elif isinstance(target, ClientEncryption): + method_name = "_clientEncryptionOperation_%s" % (opname,) else: method_name = "doesNotExist" From 6d916d68c2db341847b46fabf961f3ad4ba045e4 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 1 Jul 2022 12:36:12 -0700 Subject: [PATCH 0703/2111] PYTHON-3315 Remove index_key_id option from ClientEncryption encrypt method (#989) --- pymongo/encryption.py | 13 +++---------- test/test_encryption.py | 7 ------- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index b792a4487e..adbdeb9d9f 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -636,7 +636,6 @@ def encrypt( algorithm: str, key_id: Optional[Binary] = None, key_alt_name: Optional[str] = None, - index_key_id: Optional[Binary] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, ) -> Binary: @@ -653,8 +652,6 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `index_key_id`: **(BETA)** The index key id to use for Queryable Encryption. Must be - a :class:`~bson.binary.Binary` with subtype 4 (:attr:`~bson.binary.UUID_SUBTYPE`). - `query_type` (str): **(BETA)** The query type to execute. See :class:`QueryType` for valid options. - `contention_factor` (int): **(BETA)** The contention factor to use @@ -662,7 +659,7 @@ def encrypt( *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - .. note:: `index_key_id`, `query_type`, and `contention_factor` are part of the + .. note:: `query_type` and `contention_factor` are part of the Queryable Encryption beta. Backwards-breaking changes may be made before the final release. @@ -670,17 +667,14 @@ def encrypt( The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. .. versionchanged:: 4.2 - Added the `index_key_id`, `query_type`, and `contention_factor` parameters. + Added the `query_type` and `contention_factor` parameters. + """ self._check_closed() if key_id is not None and not ( isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE ): raise TypeError("key_id must be a bson.binary.Binary with subtype 4") - if index_key_id is not None and not ( - isinstance(index_key_id, Binary) and index_key_id.subtype == UUID_SUBTYPE - ): - raise TypeError("index_key_id must be a bson.binary.Binary with subtype 4") doc = encode({"v": value}, codec_options=self._codec_options) with _wrap_encryption_errors(): @@ -689,7 +683,6 @@ def encrypt( algorithm, key_id=key_id, key_alt_name=key_alt_name, - index_key_id=index_key_id, query_type=query_type, contention_factor=contention_factor, ) diff --git a/test/test_encryption.py b/test/test_encryption.py index c3ba61d6e4..45e78d427a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -462,13 +462,6 @@ def test_validation(self): with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) - msg = "index_key_id must be a bson.binary.Binary with subtype 4" - algo = Algorithm.INDEXED - with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, index_key_id=uid) # type: ignore[arg-type] - with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, index_key_id=Binary(b"123")) - def test_bson_errors(self): client_encryption = ClientEncryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS From 02de2c93e0b15d635c55321680fe637818017170 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Jul 2022 11:39:07 -0700 Subject: [PATCH 0704/2111] PYTHON-3337 Fix capped collection test on MMAPv1 (#990) --- .../valid-pass/collectionData-createOptions.json | 4 ++-- test/unified_format.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index df3321a55b..64f8fb02ff 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -34,7 +34,7 @@ "databaseName": "database0", "createOptions": { "capped": true, - "size": 512 + "size": 4096 }, "documents": [ { @@ -60,7 +60,7 @@ }, "expectResult": { "capped": true, - "maxSize": 512 + "maxSize": 4096 } } ] diff --git a/test/unified_format.py b/test/unified_format.py index a7d8b533dd..2d223d26d2 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -880,6 +880,10 @@ def maybe_skip_test(self, spec): class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: + if client_context.storage_engine == "mmapv1": + self.skipTest( + "MMAPv1 does not support retryable writes which is required for CSOT tests" + ) if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: From 6acc9f64cff1db85e41796ff7eefce5bec594848 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 6 Jul 2022 11:39:41 -0700 Subject: [PATCH 0705/2111] PYTHON-3333 Fix bug where non-cursor read operations fail in a transaction with directConnection=True on primary (#991) --- pymongo/message.py | 2 ++ pymongo/mongo_client.py | 2 +- test/test_transactions.py | 38 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pymongo/message.py b/pymongo/message.py index bcdedd7b48..8f37fdc062 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -367,6 +367,8 @@ def as_command(self, sock_info, apply_timeout=False): def get_message(self, read_preference, sock_info, use_cmd=False): """Get a query message, possibly setting the secondaryOk bit.""" + # Use the read_preference decided by _socket_from_server. + self.read_preference = read_preference if read_preference.mode: # Set the secondaryOk bit. flags = self.flags | 4 diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 4b20c2e5b7..bfa22f5458 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1251,7 +1251,7 @@ def _socket_from_server(self, read_preference, server, session): with self._get_socket(server, session) as sock_info: if single: - if sock_info.is_repl: + if sock_info.is_repl and not (session and session.in_transaction): # Use primary preferred to ensure any repl set member # can handle the request. read_preference = ReadPreference.PRIMARY_PREFERRED diff --git a/test/test_transactions.py b/test/test_transactions.py index 136a19baaa..4cee3fa236 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -35,6 +35,8 @@ from gridfs import GridFS, GridFSBucket from pymongo import WriteConcern, client_session from pymongo.client_session import TransactionOptions +from pymongo.command_cursor import CommandCursor +from pymongo.cursor import Cursor from pymongo.errors import ( CollectionInvalid, ConfigurationError, @@ -351,6 +353,42 @@ def test_transaction_starts_with_batched_write(self): self.assertEqual(txn_number, event.command["txnNumber"]) self.assertEqual(48, coll.count_documents({})) + @client_context.require_transactions + def test_transaction_direct_connection(self): + client = single_client() + self.addCleanup(client.close) + coll = client.pymongo_test.test + + # Make sure the collection exists. + coll.insert_one({}) + self.assertEqual(client.topology_description.topology_type_name, "Single") + ops = [ + (coll.bulk_write, [[InsertOne({})]]), + (coll.insert_one, [{}]), + (coll.insert_many, [[{}, {}]]), + (coll.replace_one, [{}, {}]), + (coll.update_one, [{}, {"$set": {"a": 1}}]), + (coll.update_many, [{}, {"$set": {"a": 1}}]), + (coll.delete_one, [{}]), + (coll.delete_many, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.find_one_and_delete, [{}, {}]), + (coll.find_one, [{}]), + (coll.count_documents, [{}]), + (coll.distinct, ["foo"]), + (coll.aggregate, [[]]), + (coll.find, [{}]), + (coll.aggregate_raw_batches, [[]]), + (coll.find_raw_batches, [{}]), + (coll.database.command, ["find", coll.name]), + ] + for f, args in ops: + with client.start_session() as s, s.start_transaction(): + res = f(*args, session=s) + if isinstance(res, (CommandCursor, Cursor)): + list(res) + class PatchSessionTimeout(object): """Patches the client_session's with_transaction timeout for testing.""" From 256cd002d671d998ccc3e9e594146f706cd8fb56 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 14:23:14 -0500 Subject: [PATCH 0706/2111] PYTHON-3339 Ignore Sourceforge link that is giving 403 Error (#993) --- doc/conf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index ff330b59a4..1e18eb29bf 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,11 +82,14 @@ # Options for link checking # The anchors on the rendered markdown page are created after the fact, -# so this link results in a 404. +# so those link results in a 404. +# wiki.centos.org has been flakey. +# sourceforge.net is giving a 403 error, but is still accessible from the browser. linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", + r"http://sourceforge.net/", ] # -- Options for extensions ---------------------------------------------------- From 751949a22a5174fb3d08806d4722f07082952adb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 14:24:06 -0500 Subject: [PATCH 0707/2111] PYTHON-3316 Add Type Check Test for Transactions (#995) --- test/test_mypy.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/test_mypy.py b/test/test_mypy.py index dfdcefbdb3..b320d5d139 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -47,6 +47,7 @@ class Movie(TypedDict): # type: ignore[misc] from pymongo.collection import Collection from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne +from pymongo.read_preferences import ReadPreference TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -163,6 +164,15 @@ class mydict(Dict[str, Any]): ) self.assertTrue(len(list(result))) + def test_with_transaction(self) -> None: + def execute_transaction(session): + pass + + with self.client.start_session() as session: + return session.with_transaction( + execute_transaction, read_preference=ReadPreference.PRIMARY + ) + class TestDecode(unittest.TestCase): def test_bson_decode(self) -> None: From b8f857d19e69f4c78959909e149c9943068aac01 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 16:59:46 -0500 Subject: [PATCH 0708/2111] PYTHON-3292 Remove ElectionInProgress (216) from ResumableChangeStreamError (#996) --- pymongo/change_stream.py | 1 - test/change_streams/unified/change-streams-errors.json | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index d2d60e25a4..ef3573022d 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -57,7 +57,6 @@ 13388, # StaleConfig 234, # RetryChangeStream 133, # FailedToSatisfyReadPreference - 216, # ElectionInProgress ] ) diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json index 4a413fce84..04fe8f04f3 100644 --- a/test/change_streams/unified/change-streams-errors.json +++ b/test/change_streams/unified/change-streams-errors.json @@ -187,7 +187,7 @@ "description": "change stream errors on ElectionInProgress", "runOnRequirements": [ { - "minServerVersion": "4.4", + "minServerVersion": "4.2", "topologies": [ "replicaset", "sharded-replicaset", From ff1efd1ab28c56e70a101768bd285055b5e4fd9a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 17:03:58 -0500 Subject: [PATCH 0709/2111] PYTHON-2986 Update serverless testing for load balancer fronting single proxy (#997) --- .evergreen/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 721de7cc61..653515279a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -467,11 +467,11 @@ functions: fi if [ -n "${test_serverless}" ]; then export TEST_SERVERLESS=1 - export MONGODB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}" export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" - export SINGLE_MONGOS_LB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}" - export MULTI_MONGOS_LB_URI="${MULTI_ATLASPROXY_SERVERLESS_URI}" + export MONGODB_URI="${SERVERLESS_URI}" + export SINGLE_MONGOS_LB_URI="${MONGODB_URI}" + export MULTI_MONGOS_LB_URI="${MONGODB_URI}" fi PYTHON_BINARY=${PYTHON_BINARY} \ From c09af5876dcc6325e54116ca4af9377f2676e0ba Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 18:29:28 -0500 Subject: [PATCH 0710/2111] PYTHON-3338 Add versionadded to docs for key management APIs (#992) --- pymongo/encryption.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index adbdeb9d9f..9fef5963a6 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -260,23 +260,19 @@ def close(self): class RewrapManyDataKeyResult(object): - def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: - """Result object returned by a ``rewrap_many_data_key`` operation. + """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. - :Parameters: - - `bulk_write_result`: The result of the bulk write operation used to - update the key vault collection with one or more rewrapped data keys. - If ``rewrap_many_data_key()`` does not find any matching keys to - rewrap, no bulk write operation will be executed and this field will - be ``None``. - """ + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: self._bulk_write_result = bulk_write_result @property def bulk_write_result(self) -> Optional[BulkWriteResult]: """The result of the bulk write operation used to update the key vault collection with one or more rewrapped data keys. If - ``rewrap_many_data_key()`` does not find any matching keys to rewrap, + :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, no bulk write operation will be executed and this field will be ``None``. """ @@ -717,6 +713,8 @@ def get_key(self, id: Binary) -> Optional[RawBSONDocument]: :Returns: The key document. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.find_one({"_id": id}) @@ -727,6 +725,8 @@ def get_keys(self) -> Cursor[RawBSONDocument]: :Returns: An instance of :class:`~pymongo.cursor.Cursor` over the data key documents. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.find({}) @@ -741,6 +741,8 @@ def delete_key(self, id: Binary) -> DeleteResult: :Returns: The delete result. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.delete_one({"_id": id}) @@ -756,6 +758,8 @@ def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: :Returns: The previous version of the key document. + + .. versionadded:: 4.2 """ self._check_closed() update = {"$addToSet": {"keyAltNames": key_alt_name}} @@ -769,6 +773,8 @@ def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: :Returns: The key document. + + .. versionadded:: 4.2 """ self._check_closed() return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) @@ -786,6 +792,8 @@ def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSON :Returns: Returns the previous version of the key document. + + .. versionadded:: 4.2 """ self._check_closed() pipeline = [ @@ -825,6 +833,8 @@ def rewrap_many_data_key( :Returns: A :class:`RewrapManyDataKeyResult`. + + .. versionadded:: 4.2 """ self._check_closed() with _wrap_encryption_errors(): From b40f13bf7d09d08ab7398c5da7b9f36420206f02 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 6 Jul 2022 18:30:00 -0500 Subject: [PATCH 0711/2111] PYTHON-3311 Module "pymongo" does not explicitly export attribute "MongoClient"; implicit reexport disabled (#994) --- bson/__init__.py | 62 +++++++++++++++++++++++++++++++- bson/objectid.py | 5 ++- gridfs/__init__.py | 10 ++++++ pymongo/__init__.py | 46 ++++++++++++++++++------ pymongo/database.py | 7 ++-- test/test_default_exports.py | 70 ++++++++++++++++++++++++++++++++++++ test/test_mypy.py | 3 +- 7 files changed, 184 insertions(+), 19 deletions(-) create mode 100644 test/test_default_exports.py diff --git a/bson/__init__.py b/bson/__init__.py index 70aa6ae86c..cc0850709e 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -86,7 +86,7 @@ cast, ) -from bson.binary import ( # noqa: F401 +from bson.binary import ( ALL_UUID_SUBTYPES, CSHARP_LEGACY, JAVA_LEGACY, @@ -128,6 +128,66 @@ except ImportError: _USE_C = False +__all__ = [ + "ALL_UUID_SUBTYPES", + "CSHARP_LEGACY", + "JAVA_LEGACY", + "OLD_UUID_SUBTYPE", + "STANDARD", + "UUID_SUBTYPE", + "Binary", + "UuidRepresentation", + "Code", + "DEFAULT_CODEC_OPTIONS", + "CodecOptions", + "DBRef", + "Decimal128", + "InvalidBSON", + "InvalidDocument", + "InvalidStringData", + "Int64", + "MaxKey", + "MinKey", + "ObjectId", + "Regex", + "RE_TYPE", + "SON", + "Timestamp", + "utc", + "EPOCH_AWARE", + "EPOCH_NAIVE", + "BSONNUM", + "BSONSTR", + "BSONOBJ", + "BSONARR", + "BSONBIN", + "BSONUND", + "BSONOID", + "BSONBOO", + "BSONDAT", + "BSONNUL", + "BSONRGX", + "BSONREF", + "BSONCOD", + "BSONSYM", + "BSONCWS", + "BSONINT", + "BSONTIM", + "BSONLON", + "BSONDEC", + "BSONMIN", + "BSONMAX", + "get_data_and_view", + "gen_list_name", + "encode", + "decode", + "decode_all", + "decode_iter", + "decode_file_iter", + "is_valid", + "BSON", + "has_c", +] EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) diff --git a/bson/objectid.py b/bson/objectid.py index c174b47327..4bc0243532 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB `ObjectIds -`_. +"""Tools for working with MongoDB ObjectIds. """ import binascii @@ -88,7 +87,7 @@ def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: :Parameters: - `oid` (optional): a valid ObjectId. - .. seealso:: The MongoDB documentation on `ObjectIds`_. + .. seealso:: The MongoDB documentation on `ObjectIds `_. .. versionchanged:: 3.8 :class:`~bson.objectid.ObjectId` now implements the `ObjectID diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 29d582cd21..08c7e1d2cd 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -42,6 +42,16 @@ from pymongo.read_preferences import _ServerMode from pymongo.write_concern import WriteConcern +__all__ = [ + "GridFS", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "GridIn", + "GridOut", + "GridOutCursor", +] + class GridFS(object): """An instance of GridFS on top of a single Database.""" diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 30bfc2bdf7..32e8f0f82e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -16,6 +16,35 @@ from typing import ContextManager, Optional, Tuple, Union +__all__ = [ + "ASCENDING", + "DESCENDING", + "GEO2D", + "GEOSPHERE", + "HASHED", + "TEXT", + "version_tuple", + "get_version_string", + "__version__", + "version", + "ReturnDocument", + "MAX_SUPPORTED_WIRE_VERSION", + "MIN_SUPPORTED_WIRE_VERSION", + "CursorType", + "MongoClient", + "DeleteMany", + "DeleteOne", + "IndexModel", + "InsertOne", + "ReplaceOne", + "UpdateMany", + "UpdateOne", + "ReadPreference", + "WriteConcern", + "has_c", + "timeout", +] + ASCENDING = 1 """Ascending sort order.""" DESCENDING = -1 @@ -70,14 +99,11 @@ def get_version_string() -> str: """Current version of PyMongo.""" from pymongo import _csot -from pymongo.collection import ReturnDocument # noqa: F401 -from pymongo.common import ( # noqa: F401 - MAX_SUPPORTED_WIRE_VERSION, - MIN_SUPPORTED_WIRE_VERSION, -) -from pymongo.cursor import CursorType # noqa: F401 -from pymongo.mongo_client import MongoClient # noqa: F401 -from pymongo.operations import ( # noqa: F401 +from pymongo.collection import ReturnDocument +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION +from pymongo.cursor import CursorType +from pymongo.mongo_client import MongoClient +from pymongo.operations import ( DeleteMany, DeleteOne, IndexModel, @@ -86,8 +112,8 @@ def get_version_string() -> str: UpdateMany, UpdateOne, ) -from pymongo.read_preferences import ReadPreference # noqa: F401 -from pymongo.write_concern import WriteConcern # noqa: F401 +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern def has_c() -> bool: diff --git a/pymongo/database.py b/pymongo/database.py index fcf1f3e36c..d182012cd4 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -55,6 +55,7 @@ def _check_name(name): if TYPE_CHECKING: + import bson.codec_options from pymongo.client_session import ClientSession from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern @@ -699,7 +700,7 @@ def command( check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_preference: Optional[_ServerMode] = None, - codec_options: "Optional[CodecOptions[_CodecDocumentType]]" = None, + codec_options: "Optional[bson.codec_options.CodecOptions[_CodecDocumentType]]" = None, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -764,7 +765,7 @@ def command( .. note:: :meth:`command` does **not** obey this Database's :attr:`read_preference` or :attr:`codec_options`. You must use the - `read_preference` and `codec_options` parameters instead. + ``read_preference`` and ``codec_options`` parameters instead. .. note:: :meth:`command` does **not** apply any custom TypeDecoders when decoding the command response. @@ -785,7 +786,7 @@ def command( regular expressions as :class:`~bson.regex.Regex` objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a BSON regular expression to a Python regular expression object. - Added the `codec_options` parameter. + Added the ``codec_options`` parameter. .. seealso:: The MongoDB documentation on `commands `_. """ diff --git a/test/test_default_exports.py b/test/test_default_exports.py new file mode 100644 index 0000000000..42e5831646 --- /dev/null +++ b/test/test_default_exports.py @@ -0,0 +1,70 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the default exports of the top level packages.""" +import inspect +import unittest + +import bson +import gridfs +import pymongo + +BSON_IGNORE = [] +GRIDFS_IGNORE = [ + "ASCENDING", + "DESCENDING", + "ClientSession", + "Collection", + "ObjectId", + "validate_string", + "Database", + "ConfigurationError", + "WriteConcern", +] +PYMONGO_IGNORE = [] +GLOBAL_INGORE = ["TYPE_CHECKING"] + + +class TestDefaultExports(unittest.TestCase): + def check_module(self, mod, ignores): + names = dir(mod) + names.remove("__all__") + for name in mod.__all__: + if name not in names and name not in ignores: + self.fail(f"{name} was included in {mod}.__all__ but is not a valid symbol") + + for name in names: + if name not in mod.__all__ and name not in ignores: + if name in GLOBAL_INGORE: + continue + value = getattr(mod, name) + if inspect.ismodule(value): + continue + if getattr(value, "__module__", None) == "typing": + continue + if not name.startswith("_"): + self.fail(f"{name} was not included in {mod}.__all__") + + def test_pymongo(self): + self.check_module(pymongo, PYMONGO_IGNORE) + + def test_gridfs(self): + self.check_module(gridfs, GRIDFS_IGNORE) + + def test_bson(self): + self.check_module(bson, BSON_IGNORE) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_mypy.py b/test/test_mypy.py index b320d5d139..c692c70789 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -43,9 +43,8 @@ class Movie(TypedDict): # type: ignore[misc] from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON -from pymongo import ASCENDING +from pymongo import ASCENDING, MongoClient from pymongo.collection import Collection -from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne from pymongo.read_preferences import ReadPreference From b16533951ca40d0b30a28ad4c781b256901ab151 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 7 Jul 2022 16:55:16 -0700 Subject: [PATCH 0712/2111] PYTHON-3345 CSOT use connection handshake RTT for load balanced mode (#998) --- pymongo/mongo_client.py | 10 ++++++++++ pymongo/pool.py | 7 +++++++ test/test_csot.py | 3 +++ 3 files changed, 20 insertions(+) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index bfa22f5458..6d139a238a 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1336,6 +1336,11 @@ def is_retrying(): bulk.started_retryable_write = True while True: + if is_retrying(): + remaining = _csot.remaining() + if remaining is not None and remaining <= 0: + assert last_error is not None + raise last_error try: server = self._select_server(writable_server_selector, session) supports_session = ( @@ -1394,6 +1399,11 @@ def _retryable_read(self, func, read_pref, session, address=None, retryable=True multiple_retries = _csot.get_timeout() is not None while True: + if retrying: + remaining = _csot.remaining() + if remaining is not None and remaining <= 0: + assert last_error is not None + raise last_error try: server = self._select_server(read_pref, session, address=address) with self._socket_from_server(read_pref, server, session) as (sock_info, read_pref): diff --git a/pymongo/pool.py b/pymongo/pool.py index 8a1e72fc0d..f8cc60329b 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -559,6 +559,7 @@ def __init__(self, sock, pool, address, id): self.pinned_cursor = False self.active = False self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 def set_socket_timeout(self, timeout): """Cache last timeout to avoid duplicate calls to sock.settimeout.""" @@ -580,6 +581,8 @@ def apply_timeout(self, client, cmd, write_concern=None): return None # RTT validation. rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt max_time_ms = timeout - rtt if max_time_ms < 0: # CSOT: raise an error without running the command since we know it will time out. @@ -655,7 +658,11 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): else: auth_ctx = None + if performing_handshake: + start = time.monotonic() doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start hello = Hello(doc, awaitable=awaitable) self.is_writable = hello.is_writable self.max_wire_version = hello.max_wire_version diff --git a/test/test_csot.py b/test/test_csot.py index 290851159d..4d71973320 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -33,6 +33,9 @@ class TestCSOT(IntegrationTest): + RUN_ON_SERVERLESS = True + RUN_ON_LOAD_BALANCER = True + def test_timeout_nested(self): coll = self.db.coll self.assertEqual(_csot.get_timeout(), None) From b9884f34a963d85d0da7d6dbeaa84f76e32f353a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 8 Jul 2022 12:30:39 -0700 Subject: [PATCH 0713/2111] Test Failure - crypt_shared FLE tests fail on Windows/macos (#999) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 45e78d427a..c75b5f3ebd 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1786,7 +1786,7 @@ def setUp(self): key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map ) self.listener = AllowListEventListener("aggregate") - self.encrypted_client = MongoClient( + self.encrypted_client = rs_or_single_client( auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] ) self.addCleanup(self.encrypted_client.close) From d2b95d1bf027c17ee1f049c3077354a0ecdcf947 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 8 Jul 2022 19:40:25 -0500 Subject: [PATCH 0714/2111] PYTHON-3336 Test Failure - test_load_balancer failing (#1000) --- .pre-commit-config.yaml | 2 +- CONTRIBUTING.rst | 17 +++++++++++++++++ pymongo/errors.py | 9 +++++++++ pymongo/mongo_client.py | 8 ++++++-- pymongo/monitoring.py | 2 +- pymongo/pool.py | 5 +++-- test/test_cmap.py | 9 +++++++-- test/utils.py | 26 +++++++++++++++++++++++++- 8 files changed, 69 insertions(+), 9 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1fd86e0926..d72d51971c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -56,7 +56,7 @@ repos: rev: 0.11.1 hooks: - id: doc8 - args: [--max-line-length=200] + args: ["--ignore=D001"] # ignore line length stages: [manual] - repo: https://github.com/sirosen/check-jsonschema diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index b8bbad93f6..f44e746888 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -70,6 +70,23 @@ branch and submit a `pull request `_ button. +Running Tests Locally +--------------------- +- Ensure you have started the appropriate Mongo Server(s). +- Run ``python setup.py test`` to run all of the tests. +- Run ``python setup.py test -s test...`` to + run specific tests. You can omit the ```` to test a full class + and the ```` to test a full module. For example: + ``python setup.py test -s test.test_change_stream.TestUnifiedChangeStreamsErrors.test_change_stream_errors_on_ElectionInProgress``. + +Running Load Balancer Tests Locally +----------------------------------- +- Install ``haproxy`` (available as ``brew install haproxy`` on macOS). +- Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. +- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=./drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=./drivers-evergreen-tools/.evergreen/orchestration ./drivers-evergreen-tools/.evergreen/run-orchestration.sh``. +- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' .evergreen/run-load-balancer.sh start``. +- Run the tests using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. + Re-sync Spec Tests ------------------ diff --git a/pymongo/errors.py b/pymongo/errors.py index 4a167383ca..a01911c7eb 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -61,6 +61,15 @@ class ConnectionFailure(PyMongoError): """Raised when a connection to the database cannot be made or is lost.""" +class WaitQueueTimeoutError(ConnectionFailure): + """Raised when an operation times out waiting to checkout a connection from the pool. + + Subclass of :exc:`~pymongo.errors.ConnectionFailure`. + + .. versionadded:: 4.2 + """ + + class AutoReconnect(ConnectionFailure): """Raised when a connection to the database is lost and an attempt to auto-reconnect will be made. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 6d139a238a..1defe32536 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -80,6 +80,7 @@ OperationFailure, PyMongoError, ServerSelectionTimeoutError, + WaitQueueTimeoutError, ) from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode @@ -1182,6 +1183,7 @@ def _get_socket(self, server, session): with _MongoClientErrorHandler(self, server, session) as err_handler: # Reuse the pinned connection, if it exists. if in_txn and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) yield session._pinned_connection return with server.get_socket(handler=err_handler) as sock_info: @@ -2064,9 +2066,11 @@ def _add_retryable_write_error(exc, max_wire_version): if code in helpers._RETRYABLE_ERROR_CODES: exc._add_error_label("RetryableWriteError") - # Connection errors are always retryable except NotPrimaryError which is + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance(exc, NotPrimaryError): + if isinstance(exc, ConnectionFailure) and not isinstance( + exc, (NotPrimaryError, WaitQueueTimeoutError) + ): exc._add_error_label("RetryableWriteError") diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index ad604f3f16..f3f773fbbd 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1774,7 +1774,7 @@ def publish_connection_check_out_failed(self, address, reason): event = ConnectionCheckOutFailedEvent(address, reason) for subscriber in self.__cmap_listeners: try: - subscriber.connection_check_out_started(event) + subscriber.connection_check_out_failed(event) except Exception: _handle_exception() diff --git a/pymongo/pool.py b/pymongo/pool.py index f8cc60329b..493a544d01 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -52,6 +52,7 @@ NotPrimaryError, OperationFailure, PyMongoError, + WaitQueueTimeoutError, _CertificateError, ) from pymongo.hello import Hello, HelloCompat @@ -1637,7 +1638,7 @@ def _raise_wait_queue_timeout(self) -> NoReturn: timeout = _csot.get_timeout() or self.opts.wait_queue_timeout if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns - raise ConnectionFailure( + raise WaitQueueTimeoutError( "Timeout waiting for connection from the connection pool. " "maxPoolSize: %s, connections in use by cursors: %s, " "connections in use by transactions: %s, connections in use " @@ -1650,7 +1651,7 @@ def _raise_wait_queue_timeout(self) -> NoReturn: timeout, ) ) - raise ConnectionFailure( + raise WaitQueueTimeoutError( "Timed out while checking out a connection from connection pool. " "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) ) diff --git a/test/test_cmap.py b/test/test_cmap.py index a2a1d8d214..360edef0e8 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -38,7 +38,12 @@ from bson.objectid import ObjectId from bson.son import SON -from pymongo.errors import ConnectionFailure, OperationFailure, PyMongoError +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) from pymongo.monitoring import ( ConnectionCheckedInEvent, ConnectionCheckedOutEvent, @@ -73,7 +78,7 @@ "ConnectionPoolClosed": PoolClosedEvent, # Error types. "PoolClosedError": _PoolClosedError, - "WaitQueueTimeoutError": ConnectionFailure, + "WaitQueueTimeoutError": WaitQueueTimeoutError, } diff --git a/test/utils.py b/test/utils.py index 7071764b15..d80bf551df 100644 --- a/test/utils.py +++ b/test/utils.py @@ -38,7 +38,20 @@ from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat -from pymongo.monitoring import _SENSITIVE_COMMANDS +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) from pymongo.pool import _CancellationContext, _PoolGeneration from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference @@ -81,36 +94,47 @@ def wait_for_event(self, event, count): class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): + assert isinstance(event, ConnectionCreatedEvent) self.add_event(event) def connection_ready(self, event): + assert isinstance(event, ConnectionReadyEvent) self.add_event(event) def connection_closed(self, event): + assert isinstance(event, ConnectionClosedEvent) self.add_event(event) def connection_check_out_started(self, event): + assert isinstance(event, ConnectionCheckOutStartedEvent) self.add_event(event) def connection_check_out_failed(self, event): + assert isinstance(event, ConnectionCheckOutFailedEvent) self.add_event(event) def connection_checked_out(self, event): + assert isinstance(event, ConnectionCheckedOutEvent) self.add_event(event) def connection_checked_in(self, event): + assert isinstance(event, ConnectionCheckedInEvent) self.add_event(event) def pool_created(self, event): + assert isinstance(event, PoolCreatedEvent) self.add_event(event) def pool_ready(self, event): + assert isinstance(event, PoolReadyEvent) self.add_event(event) def pool_cleared(self, event): + assert isinstance(event, PoolClearedEvent) self.add_event(event) def pool_closed(self, event): + assert isinstance(event, PoolClosedEvent) self.add_event(event) From 34f3a1585c31b51c31395958ee6eab84b7bc3967 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Jul 2022 16:11:32 -0500 Subject: [PATCH 0715/2111] PYTHON-3349 Don't clear entire load balanced pool when serviceId is unknown (#1001) --- CONTRIBUTING.rst | 6 +++--- pymongo/topology.py | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f44e746888..a457b3e4c3 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -83,9 +83,9 @@ Running Load Balancer Tests Locally ----------------------------------- - Install ``haproxy`` (available as ``brew install haproxy`` on macOS). - Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. -- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=./drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=./drivers-evergreen-tools/.evergreen/orchestration ./drivers-evergreen-tools/.evergreen/run-orchestration.sh``. -- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' .evergreen/run-load-balancer.sh start``. -- Run the tests using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. +- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh``. +- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start``. +- Run the tests from the ``pymongo`` checkout directory using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. Re-sync Spec Tests ------------------ diff --git a/pymongo/topology.py b/pymongo/topology.py index 4e82a41228..6781a9e549 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -644,6 +644,14 @@ def _handle_error(self, address, err_ctx): error = err_ctx.error exc_type = type(error) service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + if issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake: # The socket has been closed. Don't reset the server. # Server Discovery And Monitoring Spec: "When an application From 418130d9239c12fb10f2f1f6c6957bcc9cd0df48 Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Wed, 13 Jul 2022 03:00:39 +1000 Subject: [PATCH 0716/2111] docs: Fix a few typos (#1003) --- pymongo/database.py | 2 +- pymongo/ocsp_support.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index d182012cd4..0047568199 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -772,7 +772,7 @@ def command( .. note:: If this client has been configured to use MongoDB Stable API (see :ref:`versioned-api-ref`), then :meth:`command` will - automactically add API versioning options to the given command. + automatically add API versioning options to the given command. Explicitly adding API versioning options in the command and declaring an API version on the client is not supported. diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 94905d9f47..3a201f1f5e 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -217,7 +217,7 @@ def _verify_response(issuer, response): if not res: return 0 - # Note that we are not using a "tolerence period" as discussed in + # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? now = _datetime.utcnow() # RFC6960, Section 3.2, Number 5 From 135efdd23fd75dbce827d9af6d9cbbf2f84d236e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 14 Jul 2022 14:57:52 -0500 Subject: [PATCH 0717/2111] PYTHON-3153 Update initial DNS seedlist discovery tests to support dedicated load balancer port (#1002) --- pymongo/mongo_client.py | 4 ++-- pymongo/pool.py | 11 +++++++---- .../load-balanced/loadBalanced-directConnection.json | 6 +++--- .../load-balanced/loadBalanced-replicaSet-errors.json | 2 +- .../load-balanced/loadBalanced-true-txt.json | 6 +++--- ...MaxHosts-conflicts_with_loadBalanced-true-txt.json | 2 +- .../load-balanced/srvMaxHosts-zero-txt.json | 6 +++--- test/srv_seedlist/load-balanced/srvMaxHosts-zero.json | 6 +++--- 8 files changed, 23 insertions(+), 20 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 1defe32536..82fab2891c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -2102,12 +2102,12 @@ def __init__(self, client, server, session): self.service_id = None self.handled = False - def contribute_socket(self, sock_info): + def contribute_socket(self, sock_info, completed_handshake=True): """Provide socket information to the error handler.""" self.max_wire_version = sock_info.max_wire_version self.sock_generation = sock_info.generation self.service_id = sock_info.service_id - self.completed_handshake = True + self.completed_handshake = completed_handshake def handle(self, exc_type, exc_val): if self.handled or exc_type is None: diff --git a/pymongo/pool.py b/pymongo/pool.py index 493a544d01..ed9feac918 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1344,7 +1344,7 @@ def remove_stale_sockets(self, reference_generation): self.requests -= 1 self.size_cond.notify() - def connect(self): + def connect(self, handler=None): """Connect to Mongo and return a new SocketInfo. Can raise ConnectionFailure. @@ -1378,6 +1378,8 @@ def connect(self): if self.handshake: sock_info.hello() self.is_writable = sock_info.is_writable + if handler: + handler.contribute_socket(sock_info, completed_handshake=False) sock_info.authenticate() except BaseException: @@ -1408,7 +1410,8 @@ def get_socket(self, handler=None): if self.enabled_for_cmap: listeners.publish_connection_check_out_started(self.address) - sock_info = self._get_socket() + sock_info = self._get_socket(handler=handler) + if self.enabled_for_cmap: listeners.publish_connection_checked_out(self.address, sock_info.id) try: @@ -1446,7 +1449,7 @@ def _raise_if_not_ready(self, emit_event): ) _raise_connection_failure(self.address, AutoReconnect("connection pool paused")) - def _get_socket(self): + def _get_socket(self, handler=None): """Get or create a SocketInfo. Can raise ConnectionFailure.""" # We use the pid here to avoid issues with fork / multiprocessing. # See test.test_client:TestClient.test_fork for an example of @@ -1520,7 +1523,7 @@ def _get_socket(self): continue else: # We need to create a new connection try: - sock_info = self.connect() + sock_info = self.connect(handler=handler) finally: with self._max_connecting_cond: self._pending -= 1 diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json index 7f41932bb2..3f500acdc6 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?directConnection=false", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?directConnection=false", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, diff --git a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json index 9ed5ff22c2..2133dee532 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-replicaSet-errors.json @@ -1,5 +1,5 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?replicaSet=replset", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?replicaSet=replset", "seeds": [], "hosts": [], "error": true, diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json index 0117b3e9cb..f9719e760d 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/", + "uri": "mongodb+srv://test24.test.build.10gen.cc/", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json index a7600a8a7b..593a521c26 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-conflicts_with_loadBalanced-true-txt.json @@ -1,5 +1,5 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?srvMaxHosts=1", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=1", "seeds": [], "hosts": [], "error": true, diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json index 8d48b5bbb9..a18360ea64 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test20.test.build.10gen.cc/?srvMaxHosts=0", + "uri": "mongodb+srv://test24.test.build.10gen.cc/?srvMaxHosts=0", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json index 2382fccf85..bd85418117 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -1,10 +1,10 @@ { - "uri": "mongodb+srv://test3.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", + "uri": "mongodb+srv://test23.test.build.10gen.cc/?loadBalanced=true&srvMaxHosts=0", "seeds": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "hosts": [ - "localhost.test.build.10gen.cc:27017" + "localhost.test.build.10gen.cc:8000" ], "options": { "loadBalanced": true, From 309a7e0b3d8c21fc32ff8af76644d9c01ba916d0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 14 Jul 2022 17:09:42 -0500 Subject: [PATCH 0718/2111] PYTHON-3353 Improve reliability of SDAM heartbeat error spec tests (#1005) --- .../hello-command-error.json | 18 +----------------- .../hello-network-error.json | 2 +- .../hello-timeout.json | 18 +----------------- 3 files changed, 3 insertions(+), 35 deletions(-) diff --git a/test/discovery_and_monitoring_integration/hello-command-error.json b/test/discovery_and_monitoring_integration/hello-command-error.json index 05a93e751c..d3bccd3900 100644 --- a/test/discovery_and_monitoring_integration/hello-command-error.json +++ b/test/discovery_and_monitoring_integration/hello-command-error.json @@ -117,7 +117,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 4 }, "data": { "failCommands": [ @@ -162,22 +162,6 @@ } ] } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } } ], "expectations": [ diff --git a/test/discovery_and_monitoring_integration/hello-network-error.json b/test/discovery_and_monitoring_integration/hello-network-error.json index b699363923..f9761d7556 100644 --- a/test/discovery_and_monitoring_integration/hello-network-error.json +++ b/test/discovery_and_monitoring_integration/hello-network-error.json @@ -116,7 +116,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 4 }, "data": { "failCommands": [ diff --git a/test/discovery_and_monitoring_integration/hello-timeout.json b/test/discovery_and_monitoring_integration/hello-timeout.json index 7bdc61a912..004f8f449d 100644 --- a/test/discovery_and_monitoring_integration/hello-timeout.json +++ b/test/discovery_and_monitoring_integration/hello-timeout.json @@ -117,7 +117,7 @@ "failPoint": { "configureFailPoint": "failCommand", "mode": { - "times": 2 + "times": 4 }, "data": { "failCommands": [ @@ -160,22 +160,6 @@ } ] } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } } ], "expectations": [ From 61add4a1cfe0cb56d3790c63e332456366ad0b3a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 14 Jul 2022 16:30:52 -0700 Subject: [PATCH 0719/2111] PYTHON-3303 Upgrade encryption testing to macos 10.15+ (#1004) --- .evergreen/config.yml | 10 +++++++++- test/test_encryption.py | 7 +++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 653515279a..ac7f97f6fa 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1843,6 +1843,14 @@ axes: skip_ECS_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: macos-1100 + display_name: "macOS 11.00" + run_on: macos-1100 + variables: + skip_EC2_auth_test: true + skip_ECS_auth_test: true + python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 display_name: "RHEL 6.2 (x86_64)" run_on: rhel62-small @@ -2287,7 +2295,7 @@ buildvariants: - matrix_name: "test-macos-encryption" matrix_spec: platform: - - macos-1014 + - macos-1100 auth: "auth" ssl: "nossl" encryption: "*" diff --git a/test/test_encryption.py b/test/test_encryption.py index c75b5f3ebd..94a588bd6a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1777,10 +1777,9 @@ def setUp(self): self.cipher_text = self.client_encryption.encrypt( "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic ) - if self.cipher_text[-1] == 0: - self.malformed_cipher_text = self.cipher_text[:-1] + b"1" - else: - self.malformed_cipher_text = self.cipher_text[:-1] + b"0" + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) opts = AutoEncryptionOpts( key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map From bbe364fea84ce181d98755fd2445123243493c61 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Jul 2022 13:06:08 -0500 Subject: [PATCH 0720/2111] PYTHON-3294 Bump minimum pymongocrypt version to 1.3.0 (#1007) --- doc/changelog.rst | 6 ++++-- setup.py | 4 +--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index f074a9d464..4ab1348078 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -25,6 +25,8 @@ Bug fixes Unavoidable breaking changes ............................ +- pymongocrypt 1.3.0 or later is now required for client side field level + encryption support. - :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the :ref:`versioned-api-ref`. @@ -317,7 +319,7 @@ Breaking Changes in 4.0 :attr:`~pymongo.mongo_client.MongoClient.address` which can change. - Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. -- PyMongoCrypt 1.2.0 or later is now required for client side field level +- pymongocrypt 1.2.0 or later is now required for client side field level encryption support. Notable improvements @@ -356,7 +358,7 @@ Changes in Version 3.12.0 .. warning:: PyMongo now allows insertion of documents with keys that include dots ('.') or start with dollar signs ('$'). -- PyMongoCrypt 1.1.0 or later is now required for client side field level +- pymongocrypt 1.1.0 or later is now required for client side field level encryption support. - Iterating over :class:`gridfs.grid_file.GridOut` now moves through the file line by line instead of chunk by chunk, and does not diff --git a/setup.py b/setup.py index a61f56c3f6..ce6cce712e 100755 --- a/setup.py +++ b/setup.py @@ -276,9 +276,7 @@ def build_extension(self, ext): pyopenssl_reqs.append("certifi") extras_require = { - "encryption": [ - "pymongocrypt@git+ssh://git@github.com/mongodb/libmongocrypt.git@161dbc8ae#subdirectory=bindings/python" - ], + "encryption": ["pymongocrypt>=1.3.0,<2.0.0"], "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 484374eb3f9f5a44c22a529a28c6bd9b99d93869 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 18 Jul 2022 13:40:16 -0700 Subject: [PATCH 0721/2111] PYTHON-3298 Add flag to create_collection to skip listCollections pre-check (#1006) --- doc/changelog.rst | 3 +++ pymongo/database.py | 11 ++++++++--- test/test_database.py | 15 +++++++++++++++ test/unified_format.py | 13 ++----------- test/utils.py | 10 +--------- test/utils_spec_runner.py | 10 +--------- 6 files changed, 30 insertions(+), 32 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4ab1348078..5594dd4f74 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,6 +13,9 @@ PyMongo 4.2 brings a number of improvements including: changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. +- Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` + that when True (the default) runs an additional ``listCollections`` command to verify that the + collection does not exist already. Bug fixes ......... diff --git a/pymongo/database.py b/pymongo/database.py index 0047568199..665b94cad1 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -305,6 +305,7 @@ def create_collection( read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, timeout: Optional[float] = None, + check_exists: Optional[bool] = True, **kwargs: Any, ) -> Collection[_DocumentType]: """Create a new :class:`~pymongo.collection.Collection` in this @@ -336,6 +337,8 @@ def create_collection( :class:`~pymongo.collation.Collation`. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. + - ``check_exists`` (optional): if True (the default), send a listCollections command to + check if the collection already exists before creation. - `**kwargs` (optional): additional keyword arguments will be passed as options for the `create collection command`_ @@ -402,7 +405,7 @@ def create_collection( enabling pre- and post-images. .. versionchanged:: 4.2 - Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. .. versionchanged:: 3.11 This method is now supported inside multi-document transactions @@ -441,8 +444,10 @@ def create_collection( with self.__client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. - if (not s or not s.in_transaction) and name in self.list_collection_names( - filter={"name": name}, session=s + if ( + check_exists + and (not s or not s.in_transaction) + and name in self.list_collection_names(filter={"name": name}, session=s) ): raise CollectionInvalid("collection %s already exists" % name) return Collection( diff --git a/test/test_database.py b/test/test_database.py index 58cbe54335..d49ac8324f 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -220,6 +220,21 @@ def test_list_collection_names_filter(self): self.assertIn("nameOnly", command) self.assertTrue(command["nameOnly"]) + def test_check_exists(self): + listener = OvertCommandListener() + results = listener.results + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + db = client[self.db.name] + db.drop_collection("unique") + db.create_collection("unique", check_exists=True) + self.assertIn("listCollections", listener.started_command_names()) + listener.reset() + db.drop_collection("unique") + db.create_collection("unique", check_exists=False) + self.assertTrue(len(results["started"]) > 0) + self.assertNotIn("listCollections", listener.started_command_names()) + def test_list_collections(self): self.client.drop_database("pymongo_test") db = Database(self.client, "pymongo_test") diff --git a/test/unified_format.py b/test/unified_format.py index 2d223d26d2..200040353d 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -283,7 +283,6 @@ def __init__( self._observe_sensitive_commands = False self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) self._ignore_commands.add("configurefailpoint") - self.ignore_list_collections = False self._event_mapping = collections.defaultdict(list) self.entity_map = entity_map if store_events: @@ -314,10 +313,7 @@ def add_event(self, event): ) def _command_event(self, event): - if not ( - event.command_name.lower() in self._ignore_commands - or (self.ignore_list_collections and event.command_name == "listCollections") - ): + if not event.command_name.lower() in self._ignore_commands: self.add_event(event) def started(self, event): @@ -1032,13 +1028,8 @@ def _databaseOperation_listCollections(self, target, *args, **kwargs): def _databaseOperation_createCollection(self, target, *args, **kwargs): # PYTHON-1936 Ignore the listCollections event from create_collection. - for listener in target.client.options.event_listeners: - if isinstance(listener, EventListenerUtil): - listener.ignore_list_collections = True + kwargs["check_exists"] = False ret = target.create_collection(*args, **kwargs) - for listener in target.client.options.event_listeners: - if isinstance(listener, EventListenerUtil): - listener.ignore_list_collections = False return ret def __entityOperation_aggregate(self, target, *args, **kwargs): diff --git a/test/utils.py b/test/utils.py index d80bf551df..5421d584d1 100644 --- a/test/utils.py +++ b/test/utils.py @@ -202,23 +202,14 @@ class OvertCommandListener(EventListener): ignore_list_collections = False def started(self, event): - if self.ignore_list_collections and event.command_name.lower() == "listcollections": - self.ignore_list_collections = False - return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).started(event) def succeeded(self, event): - if self.ignore_list_collections and event.command_name.lower() == "listcollections": - self.ignore_list_collections = False - return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).succeeded(event) def failed(self, event): - if self.ignore_list_collections and event.command_name.lower() == "listcollections": - self.ignore_list_collections = False - return if event.command_name.lower() not in _SENSITIVE_COMMANDS: super(OvertCommandListener, self).failed(event) @@ -1114,6 +1105,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac elif opname == "create_collection": if arg_name == "collection": arguments["name"] = arguments.pop(arg_name) + arguments["check_exists"] = False # Any other arguments to create_collection are passed through # **kwargs. elif opname == "create_index" and arg_name == "keys": diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 498a60220b..f8ad26efe7 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -307,15 +307,7 @@ def run_operation(self, sessions, collection, operation): args.update(arguments) arguments = args - try: - if name == "create_collection" and ( - "encrypted" in operation["arguments"]["name"] - or "plaintext" in operation["arguments"]["name"] - ): - self.listener.ignore_list_collections = True - result = cmd(**dict(arguments)) - finally: - self.listener.ignore_list_collections = False + result = cmd(**dict(arguments)) # Cleanup open change stream cursors. if name == "watch": self.addCleanup(result.close) From c43486101fb37ccfd590499aad04f57943bf75d3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Jul 2022 15:58:20 -0500 Subject: [PATCH 0722/2111] PYTHON-3291 Add PyMongoError.timeout to identify timeout related errors (#1008) --- doc/changelog.rst | 2 ++ pymongo/__init__.py | 8 +++--- pymongo/errors.py | 56 ++++++++++++++++++++++++++++++++++++++++++ pymongo/helpers.py | 3 ++- test/unified_format.py | 13 ++-------- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5594dd4f74..b6b099fd31 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,6 +13,8 @@ PyMongo 4.2 brings a number of improvements including: changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. +- Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when + the error was caused by a timeout. - Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` that when True (the default) runs an additional ``listCollections`` command to verify that the collection does not exist already. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 32e8f0f82e..7eaa793648 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -149,9 +149,11 @@ def timeout(seconds: Optional[float]) -> ContextManager: # The deadline has now expired, the next operation will raise # a timeout exception. client.db.coll2.insert_one({}) - except (ServerSelectionTimeoutError, ExecutionTimeout, WTimeoutError, - NetworkTimeout) as exc: - print(f"block timed out: {exc!r}") + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most the existing deadline. The deadline can only be shortened, not extended. diff --git a/pymongo/errors.py b/pymongo/errors.py index a01911c7eb..efc7e2eca0 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -52,6 +52,14 @@ def _remove_error_label(self, label): """Remove the given label from this error.""" self._error_labels.discard(label) + @property + def timeout(self) -> bool: + """True if this error was caused by a timeout. + + .. versionadded:: 4.2 + """ + return False + class ProtocolError(PyMongoError): """Raised for failures related to the wire protocol.""" @@ -69,6 +77,10 @@ class WaitQueueTimeoutError(ConnectionFailure): .. versionadded:: 4.2 """ + @property + def timeout(self) -> bool: + return True + class AutoReconnect(ConnectionFailure): """Raised when a connection to the database is lost and an attempt to @@ -106,6 +118,10 @@ class NetworkTimeout(AutoReconnect): Subclass of :exc:`~pymongo.errors.AutoReconnect`. """ + @property + def timeout(self) -> bool: + return True + def _format_detailed_error(message, details): if details is not None: @@ -149,6 +165,10 @@ class ServerSelectionTimeoutError(AutoReconnect): Preference that the replica set cannot satisfy. """ + @property + def timeout(self) -> bool: + return True + class ConfigurationError(PyMongoError): """Raised when something is incorrectly configured.""" @@ -199,6 +219,10 @@ def details(self) -> Optional[Mapping[str, Any]]: """ return self.__details + @property + def timeout(self) -> bool: + return self.__code in (50,) + class CursorNotFound(OperationFailure): """Raised while iterating query results if the cursor is @@ -217,6 +241,10 @@ class ExecutionTimeout(OperationFailure): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + class WriteConcernError(OperationFailure): """Base exception type for errors raised due to write concern. @@ -242,11 +270,20 @@ class WTimeoutError(WriteConcernError): .. versionadded:: 2.7 """ + @property + def timeout(self) -> bool: + return True + class DuplicateKeyError(WriteError): """Raised when an insert or update fails due to a duplicate key error.""" +def _wtimeout_error(error: Any) -> bool: + """Return True if this writeConcernError doc is a caused by a timeout.""" + return error.get("code") == 50 or ("errInfo" in error and error["errInfo"].get("wtimeout")) + + class BulkWriteError(OperationFailure): """Exception class for bulk write errors. @@ -261,6 +298,19 @@ def __init__(self, results: Mapping[str, Any]) -> None: def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) + @property + def timeout(self) -> bool: + # Check the last writeConcernError and last writeError to determine if this + # BulkWriteError was caused by a timeout. + wces = self.details.get("writeConcernErrors", []) + if wces and _wtimeout_error(wces[-1]): + return True + + werrs = self.details.get("writeErrors", []) + if werrs and werrs[-1].get("code") == 50: + return True + return False + class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -302,6 +352,12 @@ def cause(self) -> Exception: """The exception that caused this encryption or decryption error.""" return self.__cause + @property + def timeout(self) -> bool: + if isinstance(self.__cause, PyMongoError): + return self.__cause.timeout + return False + class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 60b69424a2..4df8ab8e7a 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -30,6 +30,7 @@ WriteConcernError, WriteError, WTimeoutError, + _wtimeout_error, ) from pymongo.hello import HelloCompat @@ -190,7 +191,7 @@ def _raise_last_write_error(write_errors: List[Any]) -> NoReturn: def _raise_write_concern_error(error: Any) -> NoReturn: - if "errInfo" in error and error["errInfo"].get("wtimeout"): + if _wtimeout_error(error): # Make sure we raise WTimeoutError raise WTimeoutError(error.get("errmsg"), error.get("code"), error) raise WriteConcernError(error.get("errmsg"), error.get("code"), error) diff --git a/test/unified_format.py b/test/unified_format.py index 200040353d..ebeb62ceaa 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -72,13 +72,9 @@ ConfigurationError, ConnectionFailure, EncryptionError, - ExecutionTimeout, InvalidOperation, - NetworkTimeout, NotPrimaryError, PyMongoError, - ServerSelectionTimeoutError, - WriteConcernError, ) from pymongo.monitoring import ( _SENSITIVE_COMMANDS, @@ -948,13 +944,8 @@ def process_error(self, exception, spec): self.assertNotIsInstance(exception, PyMongoError) if is_timeout_error: - # TODO: PYTHON-3291 Implement error transformation. - if isinstance(exception, WriteConcernError): - self.assertEqual(exception.code, 50) - else: - self.assertIsInstance( - exception, (NetworkTimeout, ExecutionTimeout, ServerSelectionTimeoutError) - ) + self.assertIsInstance(exception, PyMongoError) + self.assertTrue(exception.timeout, msg=exception) if error_contains: if isinstance(exception, BulkWriteError): From 5c38676d531432004be9f4ec0fe38df508b67b1f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Jul 2022 19:54:45 -0500 Subject: [PATCH 0723/2111] PYTHON-3359 Remove Database and Collection timeout override (#1009) Remove MongoClient.timeout in favor of client.options.timeout. --- pymongo/_csot.py | 2 +- pymongo/client_options.py | 2 +- pymongo/collection.py | 4 - pymongo/common.py | 15 - pymongo/database.py | 8 - pymongo/mongo_client.py | 4 +- test/csot/command-execution.json | 99 +- test/csot/override-collection-timeoutMS.json | 3498 ------------- test/csot/override-database-timeoutMS.json | 4622 ------------------ test/unified_format.py | 3 +- 10 files changed, 42 insertions(+), 8215 deletions(-) delete mode 100644 test/csot/override-collection-timeoutMS.json delete mode 100644 test/csot/override-database-timeoutMS.json diff --git a/pymongo/_csot.py b/pymongo/_csot.py index ddd4e9233f..6d3cd3c0f9 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -58,7 +58,7 @@ class _TimeoutContext(object): Use :func:`pymongo.timeout` instead:: - with client.timeout(0.5): + with pymongo.timeout(0.5): client.test.test.insert_one({}) """ diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 6784e32848..882474e258 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -265,7 +265,7 @@ def read_concern(self): @property def timeout(self) -> Optional[float]: - """The timeout. + """The configured timeoutMS converted to seconds, or None. ..versionadded: 4.2 """ diff --git a/pymongo/collection.py b/pymongo/collection.py index 0088388624..4aff5c1784 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -116,7 +116,6 @@ def __init__( write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, - timeout: Optional[float] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -201,7 +200,6 @@ def __init__( read_preference or database.read_preference, write_concern or database.write_concern, read_concern or database.read_concern, - timeout if timeout is not None else database.timeout, ) if not isinstance(name, str): raise TypeError("name must be an instance of str") @@ -395,7 +393,6 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> "Collection[_DocumentType]": """Get a clone of this collection changing the specified settings. @@ -434,7 +431,6 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, - timeout=timeout if timeout is not None else self.timeout, ) def bulk_write( diff --git a/pymongo/common.py b/pymongo/common.py index 858684bf05..6ffc97f2a8 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -831,7 +831,6 @@ def __init__( read_preference: _ServerMode, write_concern: WriteConcern, read_concern: ReadConcern, - timeout: Optional[float], ) -> None: if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") @@ -855,12 +854,6 @@ def __init__( raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") self.__read_concern = read_concern - if not isinstance(timeout, (int, float, type(None))): - raise TypeError("timeout must be None, an int, or a float") - if timeout and timeout < 0: - raise TypeError("timeout cannot be negative") - self.__timeout = float(timeout) if timeout else None - @property def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` @@ -910,14 +903,6 @@ def read_concern(self) -> ReadConcern: """ return self.__read_concern - @property - def timeout(self) -> Optional[float]: - """Read only access to the timeout of this instance. - - .. versionadded:: 4.2 - """ - return self.__timeout - class _CaseInsensitiveDictionary(abc.MutableMapping): def __init__(self, *args, **kwargs): diff --git a/pymongo/database.py b/pymongo/database.py index 665b94cad1..9b9d512014 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -76,7 +76,6 @@ def __init__( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> None: """Get a database by client and name. @@ -129,7 +128,6 @@ def __init__( read_preference or client.read_preference, write_concern or client.write_concern, read_concern or client.read_concern, - timeout if timeout is not None else client.timeout, ) if not isinstance(name, str): @@ -157,7 +155,6 @@ def with_options( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. @@ -197,7 +194,6 @@ def with_options( read_preference or self.read_preference, write_concern or self.write_concern, read_concern or self.read_concern, - timeout if timeout is not None else self.timeout, ) def __eq__(self, other: Any) -> bool: @@ -246,7 +242,6 @@ def get_collection( read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -293,7 +288,6 @@ def get_collection( read_preference, write_concern, read_concern, - timeout=timeout, ) def create_collection( @@ -304,7 +298,6 @@ def create_collection( write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, session: Optional["ClientSession"] = None, - timeout: Optional[float] = None, check_exists: Optional[bool] = True, **kwargs: Any, ) -> Collection[_DocumentType]: @@ -459,7 +452,6 @@ def create_collection( write_concern, read_concern, session=s, - timeout=timeout, **kwargs, ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 82fab2891c..e949ba5cd5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -789,7 +789,6 @@ def __init__( options.read_preference, options.write_concern, options.read_concern, - options.timeout, ) self._topology_settings = TopologySettings( @@ -1955,7 +1954,6 @@ def get_database( read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, - timeout: Optional[float] = None, ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. @@ -2006,7 +2004,7 @@ def get_database( name = self.__default_database_name return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern, timeout + self, name, codec_options, read_preference, write_concern, read_concern ) def _database_default_options(self, name): diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index f51b09d2d7..92358f2184 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -61,7 +61,8 @@ "useMultipleMongoses": false, "uriOptions": { "appName": "reduceMaxTimeMSTest", - "w": 1 + "w": 1, + "timeoutMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -75,35 +76,16 @@ "databaseName": "test" } }, - { - "collection": { - "id": "regularCollection", - "database": "database", - "collectionName": "coll" - } - }, { "collection": { "id": "timeoutCollection", "database": "database", - "collectionName": "timeoutColl", - "collectionOptions": { - "timeoutMS": 60 - } + "collectionName": "timeoutColl" } } ] } }, - { - "name": "insertOne", - "object": "regularCollection", - "arguments": { - "document": { - "_id": 1 - } - } - }, { "name": "insertOne", "object": "timeoutCollection", @@ -118,18 +100,6 @@ { "client": "client", "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - }, { "commandStartedEvent": { "commandName": "insert", @@ -137,7 +107,7 @@ "command": { "insert": "timeoutColl", "maxTimeMS": { - "$$lte": 60 + "$$lte": 500 } } } @@ -180,7 +150,8 @@ "useMultipleMongoses": false, "uriOptions": { "appName": "rttTooHighTest", - "w": 1 + "w": 1, + "timeoutMS": 10 }, "observeEvents": [ "commandStartedEvent" @@ -194,21 +165,11 @@ "databaseName": "test" } }, - { - "collection": { - "id": "regularCollection", - "database": "database", - "collectionName": "coll" - } - }, { "collection": { "id": "timeoutCollection", "database": "database", - "collectionName": "timeoutColl", - "collectionOptions": { - "timeoutMS": 2 - } + "collectionName": "timeoutColl" } } ] @@ -216,11 +177,38 @@ }, { "name": "insertOne", - "object": "regularCollection", + "object": "timeoutCollection", "arguments": { "document": { - "_id": 1 + "_id": 2 } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true } }, { @@ -239,20 +227,7 @@ "expectEvents": [ { "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] + "events": [] } ] } diff --git a/test/csot/override-collection-timeoutMS.json b/test/csot/override-collection-timeoutMS.json deleted file mode 100644 index 7d2c663fc1..0000000000 --- a/test/csot/override-collection-timeoutMS.json +++ /dev/null @@ -1,3498 +0,0 @@ -{ - "description": "timeoutMS can be overridden for a MongoCollection", - "schemaVersion": "1.9", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "topologies": [ - "replicaset", - "sharded-replicaset" - ] - } - ], - "createEntities": [ - { - "client": { - "id": "failPointClient", - "useMultipleMongoses": false - } - }, - { - "client": { - "id": "client", - "uriOptions": { - "timeoutMS": 10 - }, - "useMultipleMongoses": false, - "observeEvents": [ - "commandStartedEvent" - ], - "ignoreCommandMonitoringEvents": [ - "killCursors" - ] - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test" - } - } - ], - "initialData": [ - { - "collectionName": "coll", - "databaseName": "test", - "documents": [] - } - ], - "tests": [ - { - "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 1000 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll", - "collectionOptions": { - "timeoutMS": 0 - } - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/csot/override-database-timeoutMS.json b/test/csot/override-database-timeoutMS.json deleted file mode 100644 index 9c1b77f903..0000000000 --- a/test/csot/override-database-timeoutMS.json +++ /dev/null @@ -1,4622 +0,0 @@ -{ - "description": "timeoutMS can be overridden for a MongoDatabase", - "schemaVersion": "1.9", - "runOnRequirements": [ - { - "minServerVersion": "4.4", - "topologies": [ - "replicaset", - "sharded-replicaset" - ] - } - ], - "createEntities": [ - { - "client": { - "id": "failPointClient", - "useMultipleMongoses": false - } - }, - { - "client": { - "id": "client", - "uriOptions": { - "timeoutMS": 10 - }, - "useMultipleMongoses": false, - "observeEvents": [ - "commandStartedEvent" - ], - "ignoreCommandMonitoringEvents": [ - "killCursors" - ] - } - } - ], - "initialData": [ - { - "collectionName": "coll", - "databaseName": "test", - "documents": [] - } - ], - "tests": [ - { - "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "database", - "arguments": { - "pipeline": [ - { - "$listLocalSessions": {} - }, - { - "$limit": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "database", - "arguments": { - "pipeline": [ - { - "$listLocalSessions": {} - }, - { - "$limit": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollections", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollections", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollectionNames", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listCollectionNames", - "object": "database", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listCollections", - "databaseName": "test", - "command": { - "listCollections": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "ping" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "ping": 1 - }, - "commandName": "ping" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "ping", - "databaseName": "test", - "command": { - "ping": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "ping" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "ping": 1 - }, - "commandName": "ping" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "ping", - "databaseName": "test", - "command": { - "ping": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "database", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "database", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": 1, - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "estimatedDocumentCount", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "count", - "databaseName": "test", - "command": { - "count": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "distinct", - "databaseName": "test", - "command": { - "distinct": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "find", - "databaseName": "test", - "command": { - "find": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "listIndexes", - "databaseName": "test", - "command": { - "listIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection", - "arguments": { - "pipeline": [] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "aggregate", - "databaseName": "test", - "command": { - "aggregate": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "x": 1 - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "delete", - "databaseName": "test", - "command": { - "delete": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "update", - "databaseName": "test", - "command": { - "update": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": {}, - "replacement": { - "x": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "filter": {}, - "update": { - "$set": { - "x": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "findAndModify", - "databaseName": "test", - "command": { - "findAndModify": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "insertOne": { - "document": { - "_id": 1 - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "databaseName": "test", - "command": { - "insert": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "createIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createIndexes", - "databaseName": "test", - "command": { - "createIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 1000 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$type": [ - "int", - "long" - ] - } - } - } - } - ] - } - ] - }, - { - "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "database": { - "id": "database", - "client": "client", - "databaseName": "test", - "databaseOptions": { - "timeoutMS": 0 - } - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ] - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "failPointClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "dropIndexes" - ], - "blockConnection": true, - "blockTimeMS": 15 - } - } - } - }, - { - "name": "dropIndexes", - "object": "collection" - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "dropIndexes", - "databaseName": "test", - "command": { - "dropIndexes": "coll", - "maxTimeMS": { - "$$exists": false - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/unified_format.py b/test/unified_format.py index ebeb62ceaa..7e6c09023b 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1160,6 +1160,7 @@ def run_entity_operation(self, spec): raise NotImplementedError elif isinstance(target, ClientEncryption): method_name = "_clientEncryptionOperation_%s" % (opname,) + client = target._key_vault_client else: method_name = "doesNotExist" @@ -1175,7 +1176,7 @@ def run_entity_operation(self, spec): try: # TODO: PYTHON-3289 apply inherited timeout by default. - inherit_timeout = getattr(target, "timeout", None) + inherit_timeout = client.options.timeout # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. if "timeout" in arguments or inherit_timeout is not None: timeout = arguments.pop("timeout", None) From 667046129a54e5f9a4e29334470bdef235e891dd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 01:22:43 -0500 Subject: [PATCH 0724/2111] PYTHON-3289 Apply client timeoutMS to every operation (#1011) --- pymongo/_csot.py | 22 +++++++++++++++++++++- pymongo/collection.py | 16 +++++++++++----- pymongo/database.py | 6 +++++- pymongo/mongo_client.py | 5 +++++ test/unified_format.py | 15 ++------------- 5 files changed, 44 insertions(+), 20 deletions(-) diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 6d3cd3c0f9..e25bba108f 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -14,9 +14,10 @@ """Internal helpers for CSOT.""" +import functools import time from contextvars import ContextVar, Token -from typing import Optional, Tuple +from typing import Any, Callable, Optional, Tuple, TypeVar, cast TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) @@ -83,3 +84,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): TIMEOUT.reset(timeout_token) DEADLINE.reset(deadline_token) RTT.reset(rtt_token) + + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def apply(func: F) -> F: + """Apply the client's timeoutMS to this operation.""" + + @functools.wraps(func) + def csot_wrapper(self, *args, **kwargs): + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return func(self, *args, **kwargs) + return func(self, *args, **kwargs) + + return cast(F, csot_wrapper) diff --git a/pymongo/collection.py b/pymongo/collection.py index 4aff5c1784..22af5a6426 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -35,7 +35,7 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from bson.timestamp import Timestamp -from pymongo import ASCENDING, common, helpers, message +from pymongo import ASCENDING, _csot, common, helpers, message from pymongo.aggregation import ( _CollectionAggregationCommand, _CollectionRawAggregationCommand, @@ -217,6 +217,10 @@ def __init__( self.__database: Database[_DocumentType] = database self.__name = name self.__full_name = "%s.%s" % (self.__database.name, self.__name) + self.__write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout encrypted_fields = kwargs.pop("encryptedFields", None) if create or kwargs or collation: if encrypted_fields: @@ -230,10 +234,6 @@ def __init__( else: self.__create(name, kwargs, collation, session) - self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler="replace", document_class=dict - ) - def _socket_for_reads(self, session): return self.__database.client._socket_for_reads(self._read_preference_for(session), session) @@ -433,6 +433,7 @@ def with_options( read_concern or self.read_concern, ) + @_csot.apply def bulk_write( self, requests: Sequence[_WriteOp], @@ -631,6 +632,7 @@ def insert_one( write_concern.acknowledged, ) + @_csot.apply def insert_many( self, documents: Iterable[_DocumentIn], @@ -1892,6 +1894,7 @@ def create_indexes( kwargs["comment"] = comment return self.__create_indexes(indexes, session, **kwargs) + @_csot.apply def __create_indexes(self, indexes, session, **kwargs): """Internal createIndexes helper. @@ -2088,6 +2091,7 @@ def drop_indexes( kwargs["comment"] = comment self.drop_index("*", session=session, **kwargs) + @_csot.apply def drop_index( self, index_or_name: _IndexKeyHint, @@ -2311,6 +2315,7 @@ def options( return options + @_csot.apply def _aggregate( self, aggregation_command, @@ -2618,6 +2623,7 @@ def watch( full_document_before_change, ) + @_csot.apply def rename( self, new_name: str, diff --git a/pymongo/database.py b/pymongo/database.py index 9b9d512014..4f87a58dda 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -33,7 +33,7 @@ from bson.dbref import DBRef from bson.son import SON from bson.timestamp import Timestamp -from pymongo import common +from pymongo import _csot, common from pymongo.aggregation import _DatabaseAggregationCommand from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection @@ -138,6 +138,7 @@ def __init__( self.__name = name self.__client: MongoClient[_DocumentType] = client + self._timeout = client.options.timeout @property def client(self) -> "MongoClient[_DocumentType]": @@ -290,6 +291,7 @@ def get_collection( read_concern, ) + @_csot.apply def create_collection( self, name: str, @@ -690,6 +692,7 @@ def _command( client=self.__client, ) + @_csot.apply def command( self, command: Union[str, MutableMapping[str, Any]], @@ -964,6 +967,7 @@ def _drop_helper(self, name, session=None, comment=None): session=session, ) + @_csot.apply def drop_collection( self, name_or_collection: Union[str, Collection], diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index e949ba5cd5..080ae8757c 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -838,6 +838,7 @@ def target(): from pymongo.encryption import _Encrypter self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) + self._timeout = options.timeout def _duplicate(self, **kwargs): args = self.__init_kwargs.copy() @@ -1270,6 +1271,7 @@ def _socket_for_reads(self, read_preference, session): def _should_pin_cursor(self, session): return self.__options.load_balanced and not (session and session.in_transaction) + @_csot.apply def _run_operation(self, operation, unpack_res, address=None): """Run a _Query/_GetMore operation and return a Response. @@ -1318,6 +1320,7 @@ def _retry_with_session(self, retryable, func, session, bulk): ) return self._retry_internal(retryable, func, session, bulk) + @_csot.apply def _retry_internal(self, retryable, func, session, bulk): """Internal retryable write helper.""" max_wire_version = 0 @@ -1384,6 +1387,7 @@ def is_retrying(): retrying = True last_error = exc + @_csot.apply def _retryable_read(self, func, read_pref, session, address=None, retryable=True): """Execute an operation with at most one consecutive retries @@ -1834,6 +1838,7 @@ def list_database_names( """ return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] + @_csot.apply def drop_database( self, name_or_database: Union[str, database.Database], diff --git a/test/unified_format.py b/test/unified_format.py index 7e6c09023b..e37bc1bb6d 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1140,27 +1140,20 @@ def run_entity_operation(self, spec): if isinstance(target, MongoClient): method_name = "_clientOperation_%s" % (opname,) - client = target elif isinstance(target, Database): method_name = "_databaseOperation_%s" % (opname,) - client = target.client elif isinstance(target, Collection): method_name = "_collectionOperation_%s" % (opname,) - client = target.database.client elif isinstance(target, ChangeStream): method_name = "_changeStreamOperation_%s" % (opname,) - client = target._client elif isinstance(target, NonLazyCursor): method_name = "_cursor_%s" % (opname,) - client = target.client elif isinstance(target, ClientSession): method_name = "_sessionOperation_%s" % (opname,) - client = target._client elif isinstance(target, GridFSBucket): raise NotImplementedError elif isinstance(target, ClientEncryption): method_name = "_clientEncryptionOperation_%s" % (opname,) - client = target._key_vault_client else: method_name = "doesNotExist" @@ -1175,13 +1168,9 @@ def run_entity_operation(self, spec): cmd = functools.partial(method, target) try: - # TODO: PYTHON-3289 apply inherited timeout by default. - inherit_timeout = client.options.timeout # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. - if "timeout" in arguments or inherit_timeout is not None: - timeout = arguments.pop("timeout", None) - if timeout is None: - timeout = inherit_timeout + if "timeout" in arguments: + timeout = arguments.pop("timeout") with pymongo.timeout(timeout): result = cmd(**dict(arguments)) else: From db3f2dca05485118c90ff1904fe0400818fb212d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 15:01:41 -0700 Subject: [PATCH 0725/2111] PYTHON-2459 Implement unified GridFS tests (#1012) --- .evergreen/resync-specs.sh | 3 + test/gridfs/delete.json | 1019 +++++++++++++++++++++------- test/gridfs/download.json | 861 ++++++++++++----------- test/gridfs/downloadByName.json | 330 +++++++++ test/gridfs/download_by_name.json | 240 ------- test/gridfs/upload-disableMD5.json | 172 +++++ test/gridfs/upload.json | 891 +++++++++++++++--------- test/test_gridfs_spec.py | 214 +----- test/unified_format.py | 62 +- test/utils.py | 2 +- 10 files changed, 2362 insertions(+), 1432 deletions(-) create mode 100644 test/gridfs/downloadByName.json delete mode 100644 test/gridfs/download_by_name.json create mode 100644 test/gridfs/upload-disableMD5.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 4f5366098b..b64868c5a9 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -108,6 +108,9 @@ do csot|CSOT|client-side-operations-timeout) cpjson client-side-operations-timeout/tests csot ;; + gridfs) + cpjson gridfs/tests gridfs + ;; load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json index fb5de861f1..7a4ec27f88 100644 --- a/test/gridfs/delete.json +++ b/test/gridfs/delete.json @@ -1,304 +1,799 @@ { - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 8, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} + "description": "gridfs-delete", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000002" + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000003" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "1122" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} } - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "files_id": { - "$oid": "000000000000000000000004" + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 1, - "data": { - "$hex": "55667788" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } } - } - ] - }, + ] + } + ], "tests": [ { - "description": "Delete when length is 0", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000001" + "description": "delete when length is 0", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000001" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - } - ] - } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when length is 0 and there is one extra empty chunk", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000002" + "description": "delete when length is 0 and there is one extra empty chunk", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000002" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000002" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } } - ] - } - ] - } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when length is 8", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + "description": "delete when length is 8", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } } } - }, - "assert": { - "result": "void", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Delete when files entry does not exist", - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000000" + "description": "delete when files entry does not exist", + "operations": [ + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "FileNotFound" - } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } + } + ] + } + ] }, { - "description": "Delete when files entry does not exist and there are orphaned chunks", - "arrange": { - "data": [ - { - "delete": "fs.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + "description": "delete when files entry does not exist and there are orphaned chunks", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_files_collection", + "arguments": { + "filter": { + "_id": { + "$oid": "000000000000000000000004" } - ] + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "delete", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + }, + { + "name": "delete", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "FileNotFound", - "data": [ - { - "delete": "expected.files", - "deletes": [ - { - "q": { - "_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 1 + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } } - ] - }, - { - "delete": "expected.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000004" - } - }, - "limit": 0 + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } } - ] - } - ] - } + } + ] + } + ] } ] } diff --git a/test/gridfs/download.json b/test/gridfs/download.json index 5092fba981..48d3246218 100644 --- a/test/gridfs/download.json +++ b/test/gridfs/download.json @@ -1,467 +1,558 @@ { - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 0, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", - "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 8, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "dd254cdc958e53abaa67da9f797125f5", - "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 10, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000006" - }, - "length": 2, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} + "description": "gridfs-download", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000002" - }, - "n": 0, - "data": { - "$hex": "" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000003" + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "1122" - } - }, - { - "_id": { - "$oid": "000000000000000000000003" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "d41d8cd98f00b204e9800998ecf8427e", + "filename": "length-0-with-empty-chunk", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "filename": "length-2", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "dd254cdc958e53abaa67da9f797125f5", + "filename": "length-8", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "files_id": { - "$oid": "000000000000000000000004" + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} }, - "n": 1, - "data": { - "$hex": "55667788" + { + "_id": { + "$oid": "000000000000000000000006" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "c700ed4fdb1d27055aa3faa2c2432283", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} } - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "files_id": { - "$oid": "000000000000000000000005" + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "11223344" - } - }, - { - "_id": { - "$oid": "000000000000000000000006" + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000005" + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 1, - "data": { - "$hex": "55667788" - } - }, - { - "_id": { - "$oid": "000000000000000000000007" + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000005" + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } }, - "n": 2, - "data": { - "$hex": "99aa" - } - }, - { - "_id": { - "$oid": "000000000000000000000008" + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" + } + } }, - "files_id": { - "$oid": "000000000000000000000006" + { + "_id": { + "$oid": "000000000000000000000007" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2, + "data": { + "$binary": { + "base64": "mao=", + "subType": "00" + } + } }, - "n": 0, - "data": { - "$hex": "1122" + { + "_id": { + "$oid": "000000000000000000000008" + }, + "files_id": { + "$oid": "000000000000000000000006" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESI=", + "subType": "00" + } + } } - } - ] - }, + ] + } + ], "tests": [ { - "description": "Download when length is zero", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000001" + "description": "download when length is zero", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "" + "expectResult": { + "$$matchesHexBytes": "" + } } - } + ] }, { - "description": "Download when length is zero and there is one empty chunk", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000002" + "description": "download when length is zero and there is one empty chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000002" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "" + "expectResult": { + "$$matchesHexBytes": "" + } } - } + ] }, { - "description": "Download when there is one chunk", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000003" + "description": "download when there is one chunk", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122" + "expectResult": { + "$$matchesHexBytes": "1122" + } } - } + ] }, { - "description": "Download when there are two chunks", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000004" + "description": "download when there are two chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000004" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122334455667788" + "expectResult": { + "$$matchesHexBytes": "1122334455667788" + } } - } + ] }, { - "description": "Download when there are three chunks", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + "description": "download when there are three chunks", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "112233445566778899aa" + "expectResult": { + "$$matchesHexBytes": "112233445566778899aa" + } } - } + ] }, { - "description": "Download when files entry does not exist", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000000" - }, - "options": {} + "description": "download when files entry does not exist", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000000" + } + }, + "expectError": { + "isError": true + } } - }, - "assert": { - "error": "FileNotFound" - } + ] }, { - "description": "Download when an intermediate chunk is missing", - "arrange": { - "data": [ - { - "delete": "fs.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 1 - }, - "limit": 1 - } - ] + "description": "download when an intermediate chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsMissing" - } + ] }, { - "description": "Download when final chunk is missing", - "arrange": { - "data": [ - { - "delete": "fs.chunks", - "deletes": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 1 - }, - "limit": 1 - } - ] + "description": "download when final chunk is missing", + "operations": [ + { + "name": "deleteOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + } + }, + "expectResult": { + "deletedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsMissing" - } + ] }, { - "description": "Download when an intermediate chunk is the wrong size", - "arrange": { - "data": [ - { - "update": "fs.chunks", - "updates": [ + "description": "download when an intermediate chunk is the wrong size", + "operations": [ + { + "name": "bulkWrite", + "object": "bucket0_chunks_collection", + "arguments": { + "requests": [ { - "q": { - "files_id": { - "$oid": "000000000000000000000005" + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1 }, - "n": 1 - }, - "u": { - "$set": { - "data": { - "$hex": "556677" + "update": { + "$set": { + "data": { + "$binary": { + "base64": "VWZ3", + "subType": "00" + } + } } } } }, { - "q": { - "files_id": { - "$oid": "000000000000000000000005" + "updateOne": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 }, - "n": 2 - }, - "u": { - "$set": { - "data": { - "$hex": "8899aa" + "update": { + "$set": { + "data": { + "$binary": { + "base64": "iJmq", + "subType": "00" + } + } } } } } ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsWrongSize" - } + ] }, { - "description": "Download when final chunk is the wrong size", - "arrange": { - "data": [ - { - "update": "fs.chunks", - "updates": [ - { - "q": { - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 2 - }, - "u": { - "$set": { - "data": { - "$hex": "99" - } + "description": "download when final chunk is the wrong size", + "operations": [ + { + "name": "updateOne", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": { + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 2 + }, + "update": { + "$set": { + "data": { + "$binary": { + "base64": "mQ==", + "subType": "00" } } } - ] + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1 } - ] - }, - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000005" + }, + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000005" + } + }, + "expectError": { + "isError": true } } - }, - "assert": { - "error": "ChunkIsWrongSize" - } + ] }, { - "description": "Download legacy file with no name", - "act": { - "operation": "download", - "arguments": { - "id": { - "$oid": "000000000000000000000006" + "description": "download legacy file with no name", + "operations": [ + { + "name": "download", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000006" + } }, - "options": {} - } - }, - "assert": { - "result": { - "$hex": "1122" + "expectResult": { + "$$matchesHexBytes": "1122" + } } - } + ] } ] } diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json new file mode 100644 index 0000000000..cd44663957 --- /dev/null +++ b/test/gridfs/downloadByName.json @@ -0,0 +1,330 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "47ed733b8d10be225eceba344d533586", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-02T00:00:00.000Z" + }, + "md5": "b15835f133ff2e27c7cb28117bfae8f4", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-03T00:00:00.000Z" + }, + "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-04T00:00:00.000Z" + }, + "md5": "f623e75af30e62bbd73d6df5b50bb7b5", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-05T00:00:00.000Z" + }, + "md5": "4c614360da93c0a041b22e537de151eb", + "filename": "abc", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Ig==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "Mw==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "RA==", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000005" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 0, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "downloadByName defaults to latest revision (-1)", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when revision is 0", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 0 + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ] + }, + { + "description": "downloadByName when revision is 1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 1 + }, + "expectResult": { + "$$matchesHexBytes": "22" + } + } + ] + }, + { + "description": "downloadByName when revision is 2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 2 + }, + "expectResult": { + "$$matchesHexBytes": "33" + } + } + ] + }, + { + "description": "downloadByName when revision is -2", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -2 + }, + "expectResult": { + "$$matchesHexBytes": "44" + } + } + ] + }, + { + "description": "downloadByName when revision is -1", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": -1 + }, + "expectResult": { + "$$matchesHexBytes": "55" + } + } + ] + }, + { + "description": "downloadByName when files entry does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "xyz" + }, + "expectError": { + "isError": true + } + } + ] + }, + { + "description": "downloadByName when revision does not exist", + "operations": [ + { + "name": "downloadByName", + "object": "bucket0", + "arguments": { + "filename": "abc", + "revision": 999 + }, + "expectError": { + "isError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/download_by_name.json b/test/gridfs/download_by_name.json deleted file mode 100644 index ecc8c9e2cc..0000000000 --- a/test/gridfs/download_by_name.json +++ /dev/null @@ -1,240 +0,0 @@ -{ - "data": { - "files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "md5": "47ed733b8d10be225eceba344d533586", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-02T00:00:00.000Z" - }, - "md5": "b15835f133ff2e27c7cb28117bfae8f4", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-03T00:00:00.000Z" - }, - "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-04T00:00:00.000Z" - }, - "md5": "f623e75af30e62bbd73d6df5b50bb7b5", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-05T00:00:00.000Z" - }, - "md5": "4c614360da93c0a041b22e537de151eb", - "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], - "metadata": {} - } - ], - "chunks": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$hex": "11" - } - }, - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000002" - }, - "n": 0, - "data": { - "$hex": "22" - } - }, - { - "_id": { - "$oid": "000000000000000000000003" - }, - "files_id": { - "$oid": "000000000000000000000003" - }, - "n": 0, - "data": { - "$hex": "33" - } - }, - { - "_id": { - "$oid": "000000000000000000000004" - }, - "files_id": { - "$oid": "000000000000000000000004" - }, - "n": 0, - "data": { - "$hex": "44" - } - }, - { - "_id": { - "$oid": "000000000000000000000005" - }, - "files_id": { - "$oid": "000000000000000000000005" - }, - "n": 0, - "data": { - "$hex": "55" - } - } - ] - }, - "tests": [ - { - "description": "Download_by_name when revision is 0", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 0 - } - } - }, - "assert": { - "result": { - "$hex": "11" - } - } - }, - { - "description": "Download_by_name when revision is 1", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 1 - } - } - }, - "assert": { - "result": { - "$hex": "22" - } - } - }, - { - "description": "Download_by_name when revision is -2", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": -2 - } - } - }, - "assert": { - "result": { - "$hex": "44" - } - } - }, - { - "description": "Download_by_name when revision is -1", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": -1 - } - } - }, - "assert": { - "result": { - "$hex": "55" - } - } - }, - { - "description": "Download_by_name when files entry does not exist", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "xyz" - } - }, - "assert": { - "error": "FileNotFound" - } - }, - { - "description": "Download_by_name when revision does not exist", - "act": { - "operation": "download_by_name", - "arguments": { - "filename": "abc", - "options": { - "revision": 999 - } - } - }, - "assert": { - "error": "RevisionNotFound" - } - } - ] -} diff --git a/test/gridfs/upload-disableMD5.json b/test/gridfs/upload-disableMD5.json new file mode 100644 index 0000000000..d5a9d6f4ab --- /dev/null +++ b/test/gridfs/upload-disableMD5.json @@ -0,0 +1,172 @@ +{ + "description": "gridfs-upload-disableMD5", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "upload when length is 0 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ] + }, + { + "description": "upload when length is 1 sans MD5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, + "chunkSizeBytes": 4, + "disableMD5": true + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$exists": false + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json index 7d4adec1d8..97e18d2bc2 100644 --- a/test/gridfs/upload.json +++ b/test/gridfs/upload.json @@ -1,379 +1,616 @@ { - "data": { - "files": [], - "chunks": [] - }, + "description": "gridfs-upload", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [] + } + ], "tests": [ { - "description": "Upload when length is 0", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "" - }, - "options": { + "description": "upload when length is 0", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "" + }, "chunkSizeBytes": 4 - } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "d41d8cd98f00b204e9800998ecf8427e" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [] } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 0, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - } - ] - } + ] }, { - "description": "Upload when length is 1", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when length is 1", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 3", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "112233" - }, - "options": { + "description": "upload when length is 3", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "112233" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 3, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "112233" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 3, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "bafae3a174ab91fc70db7a6aa50f4f52" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIz", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 4", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11223344" - }, - "options": { + "description": "upload when length is 4", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11223344" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 4, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 4, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "7e7c77cff5705d1f7574a25ef6662117" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 5", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "1122334455" - }, - "options": { + "description": "upload when length is 5", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 5, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 5, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "283d4fea5dded59cf837d3047328f5af" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } + } + }, + { + "_id": { + "$$type": "objectId" }, - { - "_id": "*actual", - "files_id": "*result", - "n": 1, - "data": { - "$hex": "55" + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 1, + "data": { + "$binary": { + "base64": "VQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when length is 8", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "1122334455667788" - }, - "options": { + "description": "upload when length is 8", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "1122334455667788" + }, "chunkSizeBytes": 4 - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 8, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11223344" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "dd254cdc958e53abaa67da9f797125f5" + }, + "filename": "filename" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {}, + "sort": { + "n": 1 + } + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" } + } + }, + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" }, - { - "_id": "*actual", - "files_id": "*result", - "n": 1, - "data": { - "$hex": "55667788" + "n": 1, + "data": { + "$binary": { + "base64": "VWZ3iA==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when contentType is provided", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when contentType is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4, "contentType": "image/jpeg" - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename", - "contentType": "image/jpeg" - } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "contentType": "image/jpeg" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] }, { - "description": "Upload when metadata is provided", - "act": { - "operation": "upload", - "arguments": { - "filename": "filename", - "source": { - "$hex": "11" - }, - "options": { + "description": "upload when metadata is provided", + "operations": [ + { + "name": "upload", + "object": "bucket0", + "arguments": { + "filename": "filename", + "source": { + "$$hexBytes": "11" + }, "chunkSizeBytes": 4, "metadata": { "x": 1 } - } - } - }, - "assert": { - "result": "&result", - "data": [ - { - "insert": "expected.files", - "documents": [ - { - "_id": "*result", - "length": 1, - "chunkSize": 4, - "uploadDate": "*actual", - "filename": "filename", - "metadata": { - "x": 1 - } + }, + "expectResult": { + "$$type": "objectId" + }, + "saveResultAsEntity": "uploadedObjectId" + }, + { + "name": "find", + "object": "bucket0_files_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$$type": "date" + }, + "md5": { + "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" + }, + "filename": "filename", + "metadata": { + "x": 1 } - ] - }, - { - "insert": "expected.chunks", - "documents": [ - { - "_id": "*actual", - "files_id": "*result", - "n": 0, - "data": { - "$hex": "11" + } + ] + }, + { + "name": "find", + "object": "bucket0_chunks_collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$$type": "objectId" + }, + "files_id": { + "$$matchesEntity": "uploadedObjectId" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" } } - ] - } - ] - } + } + ] + } + ] } ] } diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 3c6f6b76c4..d080c05c4d 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -1,4 +1,4 @@ -# Copyright 2015 MongoDB, Inc. +# Copyright 2015-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,221 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test GridFSBucket class.""" +"""Test the GridFS unified spec tests.""" -import copy -import datetime import os -import re import sys -from json import loads sys.path[0:0] = [""] -from test import IntegrationTest, unittest - -import gridfs -from bson import Binary -from bson.int64 import Int64 -from bson.json_util import object_hook -from gridfs.errors import CorruptGridFile, NoFile - -# Commands. -_COMMANDS = { - "delete": lambda coll, doc: [coll.delete_many(d["q"]) for d in doc["deletes"]], - "insert": lambda coll, doc: coll.insert_many(doc["documents"]), - "update": lambda coll, doc: [coll.update_many(u["q"], u["u"]) for u in doc["updates"]], -} +from test import unittest +from test.unified_format import generate_test_classes # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. Special case for _id. - if camel == "id": - return "file_id" - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -class TestAllScenarios(IntegrationTest): - fs: gridfs.GridFSBucket - str_to_cmd: dict - - @classmethod - def setUpClass(cls): - super(TestAllScenarios, cls).setUpClass() - cls.fs = gridfs.GridFSBucket(cls.db) - cls.str_to_cmd = { - "upload": cls.fs.upload_from_stream, - "download": cls.fs.open_download_stream, - "delete": cls.fs.delete, - "download_by_name": cls.fs.open_download_stream_by_name, - } - - def init_db(self, data, test): - self.cleanup_colls( - self.db.fs.files, self.db.fs.chunks, self.db.expected.files, self.db.expected.chunks - ) - - # Read in data. - if data["files"]: - self.db.fs.files.insert_many(data["files"]) - self.db.expected.files.insert_many(data["files"]) - if data["chunks"]: - self.db.fs.chunks.insert_many(data["chunks"]) - self.db.expected.chunks.insert_many(data["chunks"]) - - # Make initial modifications. - if "arrange" in test: - for cmd in test["arrange"].get("data", []): - for key in cmd.keys(): - if key in _COMMANDS: - coll = self.db.get_collection(cmd[key]) - _COMMANDS[key](coll, cmd) - - def init_expected_db(self, test, result): - # Modify outcome DB. - for cmd in test["assert"].get("data", []): - for key in cmd.keys(): - if key in _COMMANDS: - # Replace wildcards in inserts. - for doc in cmd.get("documents", []): - keylist = doc.keys() - for dockey in copy.deepcopy(list(keylist)): - if "result" in str(doc[dockey]): - doc[dockey] = result - if "actual" in str(doc[dockey]): # Avoid duplicate - doc.pop(dockey) - # Move contentType to metadata. - if dockey == "contentType": - doc["metadata"] = {dockey: doc.pop(dockey)} - coll = self.db.get_collection(cmd[key]) - _COMMANDS[key](coll, cmd) - - if test["assert"].get("result") == "&result": - test["assert"]["result"] = result - - def sorted_list(self, coll, ignore_id): - to_sort = [] - for doc in coll.find(): - docstr = "{" - if ignore_id: # Cannot compare _id in chunks collection. - doc.pop("_id") - for k in sorted(doc.keys()): - if k == "uploadDate": # Can't compare datetime. - self.assertTrue(isinstance(doc[k], datetime.datetime)) - else: - docstr += "%s:%s " % (k, repr(doc[k])) - to_sort.append(docstr + "}") - return to_sort - - -def create_test(scenario_def): - def run_scenario(self): - - # Run tests. - self.assertTrue(scenario_def["tests"], "tests cannot be empty") - for test in scenario_def["tests"]: - self.init_db(scenario_def["data"], test) - - # Run GridFs Operation. - operation = self.str_to_cmd[test["act"]["operation"]] - args = test["act"]["arguments"] - extra_opts = args.pop("options", {}) - if "contentType" in extra_opts: - extra_opts["metadata"] = {"contentType": extra_opts.pop("contentType")} - - args.update(extra_opts) - - converted_args = dict((camel_to_snake(c), v) for c, v in args.items()) - - expect_error = test["assert"].get("error", False) - result = None - error = None - try: - result = operation(**converted_args) - - if "download" in test["act"]["operation"]: - result = Binary(result.read()) - except Exception as exc: - if not expect_error: - raise - error = exc - - self.init_expected_db(test, result) - - # Asserts. - errors = { - "FileNotFound": NoFile, - "ChunkIsMissing": CorruptGridFile, - "ExtraChunk": CorruptGridFile, - "ChunkIsWrongSize": CorruptGridFile, - "RevisionNotFound": NoFile, - } - - if expect_error: - self.assertIsNotNone(error) - self.assertIsInstance(error, errors[test["assert"]["error"]], test["description"]) - else: - self.assertIsNone(error) - - if "result" in test["assert"]: - if test["assert"]["result"] == "void": - test["assert"]["result"] = None - self.assertEqual(result, test["assert"].get("result")) - - if "data" in test["assert"]: - # Create alphabetized list - self.assertEqual( - set(self.sorted_list(self.db.fs.chunks, True)), - set(self.sorted_list(self.db.expected.chunks, True)), - ) - - self.assertEqual( - set(self.sorted_list(self.db.fs.files, False)), - set(self.sorted_list(self.db.expected.files, False)), - ) - - return run_scenario - - -def _object_hook(dct): - if "length" in dct: - dct["length"] = Int64(dct["length"]) - return object_hook(dct) - - -def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = loads(scenario_stream.read(), object_hook=_object_hook) - - # Because object_hook is already defined by bson.json_util, - # and everything is named 'data' - def str2hex(jsn): - for key, val in jsn.items(): - if key in ("data", "source", "result"): - if "$hex" in val: - jsn[key] = Binary(bytes.fromhex(val["$hex"])) - if isinstance(jsn[key], dict): - str2hex(jsn[key]) - if isinstance(jsn[key], list): - for k in jsn[key]: - str2hex(k) - - str2hex(scenario_def) - - # Construct test from scenario. - new_test = create_test(scenario_def) - test_name = "test_%s" % (os.path.splitext(filename)[0]) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") -create_tests() +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index e37bc1bb6d..d36b5d0a48 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -16,6 +16,7 @@ https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst """ +import binascii import collections import copy import datetime @@ -457,8 +458,10 @@ def _create_entity(self, entity_spec, uri=None): self.test.addCleanup(session.end_session) return elif entity_type == "bucket": - # TODO: implement the 'bucket' entity type - self.test.skipTest("GridFS is not currently supported (PYTHON-2459)") + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + self[spec["id"]] = GridFSBucket(db, **kwargs) + return elif entity_type == "clientEncryption": opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) if isinstance(opts["key_vault_client"], str): @@ -575,11 +578,12 @@ def _operation_type(self, spec, actual, key_to_compare): def _operation_matchesEntity(self, spec, actual, key_to_compare): expected_entity = self.test.entity_map[spec] - self.test.assertIsInstance(expected_entity, abc.Mapping) self.test.assertEqual(expected_entity, actual[key_to_compare]) def _operation_matchesHexBytes(self, spec, actual, key_to_compare): - raise NotImplementedError + expected = binascii.unhexlify(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertEqual(value, expected) def _operation_unsetOrMatches(self, spec, actual, key_to_compare): if key_to_compare is None and not actual: @@ -906,12 +910,15 @@ def maybe_skip_test(self, spec): if not client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") - if "timeoutMode" in op.get("arguments", {}): - self.skipTest("PyMongo does not support timeoutMode") - if name == "createEntities": - self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) if name == "modifyCollection": self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + if "csot" in class_name: + if "bucket" in op["object"]: + self.skipTest("CSOT not implemented for GridFS") + if name == "createEntities": + self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) def maybe_skip_entity(self, entities): for entity in entities: @@ -1116,9 +1123,35 @@ def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) return dict() + def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: + with target.open_download_stream(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_downloadByName( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + with target.open_download_stream_by_name(*args, **kwargs) as gout: + return gout.read() + + def _bucketOperation_upload(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream(*args, **kwargs) + + def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return target.upload_from_stream_with_id(*args, **kwargs) + + def _bucketOperation_drop(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> None: + # PyMongo does not support GridFSBucket.drop(), emulate it. + target._files.drop(*args, **kwargs) + target._chunks.drop(*args, **kwargs) + def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] - client = target opname = spec["name"] opargs = spec.get("arguments") expect_error = spec.get("expectError") @@ -1144,6 +1177,11 @@ def run_entity_operation(self, spec): method_name = "_databaseOperation_%s" % (opname,) elif isinstance(target, Collection): method_name = "_collectionOperation_%s" % (opname,) + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): method_name = "_changeStreamOperation_%s" % (opname,) elif isinstance(target, NonLazyCursor): @@ -1151,7 +1189,11 @@ def run_entity_operation(self, spec): elif isinstance(target, ClientSession): method_name = "_sessionOperation_%s" % (opname,) elif isinstance(target, GridFSBucket): - raise NotImplementedError + method_name = "_bucketOperation_%s" % (opname,) + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) elif isinstance(target, ClientEncryption): method_name = "_clientEncryptionOperation_%s" % (opname,) else: diff --git a/test/utils.py b/test/utils.py index 5421d584d1..29ee1ca477 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1087,7 +1087,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["session"] = entity_map[arguments["session"]] elif opname == "open_download_stream" and arg_name == "id": arguments["file_id"] = arguments.pop(arg_name) - elif opname != "find" and c2s == "max_time_ms": + elif opname not in ("find", "find_one") and c2s == "max_time_ms": # find is the only method that accepts snake_case max_time_ms. # All other methods take kwargs which must use the server's # camelCase maxTimeMS. See PYTHON-1855. From 935f926bd9bf556cadba5d7bda344371b4da24ef Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 17:46:09 -0700 Subject: [PATCH 0726/2111] PYTHON-3362 Ignore wtimeout when timeoutMS or timeout() is configured (#1013) Apply client timeoutMS to gridfs operations. --- gridfs/__init__.py | 8 ++++++- pymongo/_csot.py | 15 +++++++++++- pymongo/bulk.py | 5 ++-- pymongo/collection.py | 8 ------- pymongo/network.py | 5 ++-- pymongo/pool.py | 9 +------- test/csot/gridfs-advanced.json | 36 ++++++++++++++++++++--------- test/csot/gridfs-delete.json | 32 ++++++++++++++++++-------- test/csot/gridfs-download.json | 32 ++++++++++++++++++-------- test/csot/gridfs-find.json | 8 +++---- test/csot/gridfs-upload.json | 20 ++++++++-------- test/unified_format.py | 42 ++++++++++++++++++---------------- 12 files changed, 133 insertions(+), 87 deletions(-) diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 08c7e1d2cd..6ab843a85e 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -33,7 +33,7 @@ _clear_entity_type_registry, _disallow_transactions, ) -from pymongo import ASCENDING, DESCENDING +from pymongo import ASCENDING, DESCENDING, _csot from pymongo.client_session import ClientSession from pymongo.collection import Collection from pymongo.common import validate_string @@ -514,6 +514,7 @@ def __init__( ) self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout def open_upload_stream( self, @@ -631,6 +632,7 @@ def open_upload_stream_with_id( return GridIn(self._collection, session=session, **opts) + @_csot.apply def upload_from_stream( self, filename: str, @@ -679,6 +681,7 @@ def upload_from_stream( return cast(ObjectId, gin._id) + @_csot.apply def upload_from_stream_with_id( self, file_id: Any, @@ -762,6 +765,7 @@ def open_download_stream( gout._ensure_file() return gout + @_csot.apply def download_to_stream( self, file_id: Any, destination: Any, session: Optional[ClientSession] = None ) -> None: @@ -795,6 +799,7 @@ def download_to_stream( for chunk in gout: destination.write(chunk) + @_csot.apply def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: """Given an file_id, delete this stored file's files collection document and associated chunks from a GridFS bucket. @@ -926,6 +931,7 @@ def open_download_stream_by_name( except StopIteration: raise NoFile("no version %d for filename %r" % (revision, filename)) + @_csot.apply def download_to_stream_by_name( self, filename: str, diff --git a/pymongo/_csot.py b/pymongo/_csot.py index e25bba108f..5170c0d8ca 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -17,7 +17,9 @@ import functools import time from contextvars import ContextVar, Token -from typing import Any, Callable, Optional, Tuple, TypeVar, cast +from typing import Any, Callable, MutableMapping, Optional, Tuple, TypeVar, cast + +from pymongo.write_concern import WriteConcern TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) @@ -103,3 +105,14 @@ def csot_wrapper(self, *args, **kwargs): return func(self, *args, **kwargs) return cast(F, csot_wrapper) + + +def apply_write_concern(cmd: MutableMapping, write_concern: Optional[WriteConcern]) -> None: + """Apply the given write concern to a command.""" + if not write_concern or write_concern.is_server_default: + return + wc = write_concern.document + if get_timeout() is not None: + wc.pop("wtimeout", None) + if wc: + cmd["writeConcern"] = wc diff --git a/pymongo/bulk.py b/pymongo/bulk.py index 7992383f67..b21b576aa5 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -23,7 +23,7 @@ from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON -from pymongo import common +from pymongo import _csot, common from pymongo.client_session import _validate_session_write_concern from pymongo.collation import validate_collation_or_none from pymongo.common import ( @@ -315,8 +315,7 @@ def _execute_command( cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) if self.comment: cmd["comment"] = self.comment - if not write_concern.is_server_default: - cmd["writeConcern"] = write_concern.document + _csot.apply_write_concern(cmd, write_concern) if self.bypass_doc_val: cmd["bypassDocumentValidation"] = True if self.let is not None and run.op_type in (_DELETE, _UPDATE): diff --git a/pymongo/collection.py b/pymongo/collection.py index 22af5a6426..9a9ba56618 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -542,8 +542,6 @@ def _insert_one( command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) if comment is not None: command["comment"] = comment - if not write_concern.is_server_default: - command["writeConcern"] = write_concern.document def _insert_command(session, sock_info, retryable_write): if bypass_doc_val: @@ -756,8 +754,6 @@ def _update( if let is not None: common.validate_is_mapping("let", let) command["let"] = let - if not write_concern.is_server_default: - command["writeConcern"] = write_concern.document if comment is not None: command["comment"] = comment @@ -1232,8 +1228,6 @@ def _delete( hint = helpers._index_document(hint) delete_doc["hint"] = hint command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) - if not write_concern.is_server_default: - command["writeConcern"] = write_concern.document if let is not None: common.validate_is_document_type("let", let) @@ -2820,8 +2814,6 @@ def _find_and_modify(session, sock_info, retryable_write): "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." ) cmd["hint"] = hint - if not write_concern.is_server_default: - cmd["writeConcern"] = write_concern.document out = self._command( sock_info, cmd, diff --git a/pymongo/network.py b/pymongo/network.py index 3eac0d02d3..a5c5459e14 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -118,9 +118,8 @@ def command( # Support CSOT if client: - sock_info.apply_timeout(client, spec, write_concern) - elif write_concern and not write_concern.is_server_default: - spec["writeConcern"] = write_concern.document + sock_info.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) if use_op_msg: flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 diff --git a/pymongo/pool.py b/pymongo/pool.py index ed9feac918..1fab98209f 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -569,16 +569,13 @@ def set_socket_timeout(self, timeout): self.last_timeout = timeout self.sock.settimeout(timeout) - def apply_timeout(self, client, cmd, write_concern=None): + def apply_timeout(self, client, cmd): # CSOT: use remaining timeout when set. timeout = _csot.remaining() if timeout is None: # Reset the socket timeout unless we're performing a streaming monitor check. if not self.more_to_come: self.set_socket_timeout(self.opts.socket_timeout) - - if cmd and write_concern and not write_concern.is_server_default: - cmd["writeConcern"] = write_concern.document return None # RTT validation. rtt = _csot.get_rtt() @@ -593,10 +590,6 @@ def apply_timeout(self, client, cmd, write_concern=None): ) if cmd is not None: cmd["maxTimeMS"] = int(max_time_ms * 1000) - wc = write_concern.document if write_concern else {} - wc.pop("wtimeout", None) - if wc: - cmd["writeConcern"] = wc self.set_socket_timeout(timeout) return timeout diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 668b93f37a..0b09684fc7 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -62,13 +62,12 @@ "_id": { "$oid": "000000000000000000000005" }, - "length": 10, + "length": 8, "chunkSize": 4, "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", + "filename": "length-8", "contentType": "application/octet-stream", "aliases": [], "metadata": {} @@ -93,6 +92,21 @@ "subType": "00" } } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } } ] } @@ -116,7 +130,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -129,7 +143,7 @@ "$oid": "000000000000000000000005" }, "newFilename": "foo", - "timeoutMS": 100 + "timeoutMS": 2000 } } ], @@ -174,7 +188,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -234,7 +248,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -243,7 +257,7 @@ "name": "drop", "object": "bucket", "arguments": { - "timeoutMS": 100 + "timeoutMS": 2000 } } ] @@ -266,7 +280,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -320,7 +334,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json index f458fa827c..8701929ff3 100644 --- a/test/csot/gridfs-delete.json +++ b/test/csot/gridfs-delete.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -62,13 +62,12 @@ "_id": { "$oid": "000000000000000000000005" }, - "length": 10, + "length": 8, "chunkSize": 4, "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", + "filename": "length-8", "contentType": "application/octet-stream", "aliases": [], "metadata": {} @@ -93,6 +92,21 @@ "subType": "00" } } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } } ] } @@ -116,7 +130,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -128,7 +142,7 @@ "id": { "$oid": "000000000000000000000005" }, - "timeoutMS": 100 + "timeoutMS": 1000 } } ] @@ -151,7 +165,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -210,7 +224,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -247,7 +261,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json index a3044a6d81..2ab64010f8 100644 --- a/test/csot/gridfs-download.json +++ b/test/csot/gridfs-download.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -62,13 +62,12 @@ "_id": { "$oid": "000000000000000000000005" }, - "length": 10, + "length": 8, "chunkSize": 4, "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", - "filename": "length-10", + "filename": "length-8", "contentType": "application/octet-stream", "aliases": [], "metadata": {} @@ -93,6 +92,21 @@ "subType": "00" } } + }, + { + "_id": { + "$oid": "000000000000000000000006" + }, + "files_id": { + "$oid": "000000000000000000000005" + }, + "n": 1, + "data": { + "$binary": { + "base64": "ESIzRA==", + "subType": "00" + } + } } ] } @@ -116,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -128,7 +142,7 @@ "id": { "$oid": "000000000000000000000005" }, - "timeoutMS": 100 + "timeoutMS": 1000 } } ] @@ -151,7 +165,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -210,7 +224,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -284,7 +298,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json index f75a279c01..45bb7066d6 100644 --- a/test/csot/gridfs-find.json +++ b/test/csot/gridfs-find.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -94,7 +94,7 @@ "object": "bucket", "arguments": { "filter": {}, - "timeoutMS": 100 + "timeoutMS": 1000 } } ], @@ -139,7 +139,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json index b0daeb2e42..690fdda77f 100644 --- a/test/csot/gridfs-upload.json +++ b/test/csot/gridfs-upload.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 75 }, "useMultipleMongoses": false } @@ -81,7 +81,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -117,7 +117,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -155,7 +155,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -193,7 +193,7 @@ "createIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -231,7 +231,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -269,7 +269,7 @@ "createIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -307,7 +307,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -345,7 +345,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 100 } } } @@ -384,7 +384,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } diff --git a/test/unified_format.py b/test/unified_format.py index d36b5d0a48..d81238c0ee 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -52,7 +52,7 @@ snake_to_camel, ) from test.version import Version -from typing import Any +from typing import Any, List import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util @@ -60,8 +60,8 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId from bson.regex import RE_TYPE, Regex -from gridfs import GridFSBucket -from pymongo import ASCENDING, MongoClient +from gridfs import GridFSBucket, GridOut +from pymongo import ASCENDING, MongoClient, _csot from pymongo.change_stream import ChangeStream from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection @@ -460,7 +460,17 @@ def _create_entity(self, entity_spec, uri=None): elif entity_type == "bucket": db = self[spec["database"]] kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) - self[spec["id"]] = GridFSBucket(db, **kwargs) + bucket = GridFSBucket(db, **kwargs) + + # PyMongo does not support GridFSBucket.drop(), emulate it. + @_csot.apply + def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: + self._files.drop(*args, **kwargs) + self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket return elif entity_type == "clientEncryption": opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) @@ -871,8 +881,11 @@ def maybe_skip_test(self, spec): or "Dirty implicit session is discarded" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") - elif "Client side error in command starting transaction" in spec["description"]: + if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") + if "timeoutMS applied to entire download" in spec["description"]: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: @@ -914,17 +927,6 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support modifyCollection") if "timeoutMode" in op.get("arguments", {}): self.skipTest("PyMongo does not support timeoutMode") - if "csot" in class_name: - if "bucket" in op["object"]: - self.skipTest("CSOT not implemented for GridFS") - if name == "createEntities": - self.maybe_skip_entity(op.get("arguments", {}).get("entities", [])) - - def maybe_skip_entity(self, entities): - for entity in entities: - entity_type = next(iter(entity)) - if entity_type == "bucket": - self.skipTest("GridFS is not currently supported (PYTHON-2459)") def process_error(self, exception, spec): is_error = spec.get("isError") @@ -1145,10 +1147,10 @@ def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwar kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") return target.upload_from_stream_with_id(*args, **kwargs) - def _bucketOperation_drop(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> None: - # PyMongo does not support GridFSBucket.drop(), emulate it. - target._files.drop(*args, **kwargs) - target._chunks.drop(*args, **kwargs) + def _bucketOperation_find( + self, target: GridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return list(target.find(*args, **kwargs)) def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] From 4d4fddaf699d16af6e082da5b5c3303cbafc2818 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 18:17:12 -0700 Subject: [PATCH 0727/2111] PYTHON-3363 Allow change stream to be resumed after a timeout (#1014) Apply client timeoutMS to ChangeStream iteration. --- pymongo/change_stream.py | 16 ++++++++++++++-- test/test_change_stream.py | 14 ++++++-------- test/test_csot.py | 33 ++++++++++++++++++++++++++++++++- test/unified_format.py | 9 ++++----- 4 files changed, 56 insertions(+), 16 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index ef3573022d..80820dff91 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -20,7 +20,7 @@ from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp -from pymongo import common +from pymongo import _csot, common from pymongo.aggregation import ( _CollectionAggregationCommand, _DatabaseAggregationCommand, @@ -128,6 +128,8 @@ def __init__( self._start_at_operation_time = start_at_operation_time self._session = session self._comment = comment + self._closed = False + self._timeout = self._target._timeout # Initialize cursor. self._cursor = self._create_cursor() @@ -234,6 +236,7 @@ def _resume(self): def close(self) -> None: """Close this ChangeStream.""" + self._closed = True self._cursor.close() def __iter__(self) -> "ChangeStream[_DocumentType]": @@ -248,6 +251,7 @@ def resume_token(self) -> Optional[Mapping[str, Any]]: """ return copy.deepcopy(self._resume_token) + @_csot.apply def next(self) -> _DocumentType: """Advance the cursor. @@ -298,8 +302,9 @@ def alive(self) -> bool: .. versionadded:: 3.8 """ - return self._cursor.alive + return not self._closed + @_csot.apply def try_next(self) -> Optional[_DocumentType]: """Advance the cursor without blocking indefinitely. @@ -332,6 +337,9 @@ def try_next(self) -> Optional[_DocumentType]: .. versionadded:: 3.8 """ + if not self._closed and not self._cursor.alive: + self._resume() + # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: @@ -350,6 +358,10 @@ def try_next(self) -> Optional[_DocumentType]: self._resume() change = self._cursor._try_next(False) + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + # If no changes are available. if change is None: # We have either iterated over all documents in the cursor, diff --git a/test/test_change_stream.py b/test/test_change_stream.py index f3f206d965..11ed2895ac 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -486,7 +486,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): return response["cursor"]["postBatchResumeToken"] @no_type_check - def _test_raises_error_on_missing_id(self, expected_exception): + def _test_raises_error_on_missing_id(self, expected_exception, expected_exception2): """ChangeStream will raise an exception if the server response is missing the resume token. """ @@ -494,8 +494,7 @@ def _test_raises_error_on_missing_id(self, expected_exception): self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) - # The cursor should now be closed. - with self.assertRaises(StopIteration): + with self.assertRaises(expected_exception2): next(change_stream) @no_type_check @@ -525,17 +524,16 @@ def test_update_resume_token_legacy(self): self._test_update_resume_token(self._get_expected_resume_token_legacy) # Prose test no. 2 - @client_context.require_version_max(4, 3, 3) # PYTHON-2120 @client_context.require_version_min(4, 1, 8) def test_raises_error_on_missing_id_418plus(self): - # Server returns an error on 4.1.8+ - self._test_raises_error_on_missing_id(OperationFailure) + # Server returns an error on 4.1.8+, subsequent next() resumes and gets the same error. + self._test_raises_error_on_missing_id(OperationFailure, OperationFailure) # Prose test no. 2 @client_context.require_version_max(4, 1, 8) def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error - self._test_raises_error_on_missing_id(InvalidOperation) + # PyMongo raises an error, closes the cursor, subsequent next() raises StopIteration. + self._test_raises_error_on_missing_id(InvalidOperation, StopIteration) # Prose test no. 3 @no_type_check diff --git a/test/test_csot.py b/test/test_csot.py index 4d71973320..7b82a49caf 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -19,11 +19,12 @@ sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes import pymongo from pymongo import _csot +from pymongo.errors import PyMongoError # Location of JSON test specifications. TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") @@ -72,6 +73,36 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_deadline(), float("inf")) self.assertEqual(_csot.get_rtt(), 0.0) + @client_context.require_version_min(3, 6) + @client_context.require_no_mmap + @client_context.require_no_standalone + def test_change_stream_can_resume_after_timeouts(self): + coll = self.db.test + with coll.watch(max_await_time_ms=150) as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if client_context.version < (4, 0): + stream.try_next() + coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index d81238c0ee..ee64915202 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1078,10 +1078,6 @@ def _sessionOperation_startTransaction(self, target, *args, **kwargs): self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) - def _cursor_iterateOnce(self, target, *args, **kwargs): - self.__raise_if_unsupported("iterateOnce", target, NonLazyCursor, ChangeStream) - return target.try_next() - def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): self.__raise_if_unsupported("iterateUntilDocumentOrError", target, ChangeStream) return next(target) @@ -1204,8 +1200,11 @@ def run_entity_operation(self, spec): try: method = getattr(self, method_name) except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" try: - cmd = getattr(target, camel_to_snake(opname)) + cmd = getattr(target, target_opname) except AttributeError: self.fail("Unsupported operation %s on entity %s" % (opname, target)) else: From 6172c00dbe1b53152293560a3c2272b44776fa9d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Jul 2022 19:08:54 -0700 Subject: [PATCH 0728/2111] PYTHON-3362 Fix CSOT gridfs test (#1015) --- test/csot/gridfs-advanced.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 0b09684fc7..6bf0229a04 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -366,7 +366,7 @@ "drop" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 50 } } } From 9bc134cf612fc29675a0388b6d30840c05fc1475 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jul 2022 08:33:41 -0700 Subject: [PATCH 0729/2111] BUMP 4.2 (#1016) --- doc/changelog.rst | 42 ++++++++++++++++++++++++++++++++++++++++-- pymongo/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index b6b099fd31..7afaca22a1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,11 +13,26 @@ PyMongo 4.2 brings a number of improvements including: changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout to an entire block of pymongo operations. +- Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. - Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when the error was caused by a timeout. -- Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` +- Added the ``check_exists`` argument to :meth:`~pymongo.database.Database.create_collection` that when True (the default) runs an additional ``listCollections`` command to verify that the collection does not exist already. +- Added the following key management APIs to :class:`~pymongo.encryption.ClientEncryption`: + + - :meth:`~pymongo.encryption.ClientEncryption.get_key` + - :meth:`~pymongo.encryption.ClientEncryption.get_keys` + - :meth:`~pymongo.encryption.ClientEncryption.delete_key` + - :meth:`~pymongo.encryption.ClientEncryption.add_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.get_key_by_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.remove_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.rewrap_many_data_key` + - :class:`~pymongo.encryption.RewrapManyDataKeyResult` + +- Support for the ``crypt_shared`` library to replace ``mongocryptd`` using the new + ``crypt_shared_lib_path`` and ``crypt_shared_lib_required`` arguments to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. Bug fixes ......... @@ -25,7 +40,18 @@ Bug fixes - Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). - Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` - objects (`PYTHON-3048`_). :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData`. + objects. :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData` (`PYTHON-3048`_). +- Fixed a bug that caused ``AutoReconnect("connection pool paused")`` errors in the child + process after fork (`PYTHON-3257`_). +- Fixed a bug where :meth:`~pymongo.collection.Collection.count_documents` and + :meth:`~pymongo.collection.Collection.distinct` would fail in a transaction with + ``directConnection=True`` (`PYTHON-3333`_). +- GridFS no longer uploads an incomplete files collection document after encountering an + error in the middle of an upload fork. This results in fewer + :class:`~gridfs.errors.CorruptGridFile` errors (`PYTHON-1552`_). +- Renamed PyMongo's internal C extension methods to avoid crashing due to name conflicts + with mpi4py and other shared libraries (`PYTHON-2110`_). +- Fixed tight CPU loop for network I/O when using PyOpenSSL (`PYTHON-3187`_). Unavoidable breaking changes ............................ @@ -38,6 +64,11 @@ Unavoidable breaking changes Users of the Stable API with estimated_document_count are recommended to upgrade their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors (`PYTHON-3167`_). +- Removed generic typing from :class:`~pymongo.client_session.ClientSession` to improve + support for Pyright (`PYTHON-3283`_). +- Added ``__all__`` to the bson, pymongo, and gridfs packages. This could be a breaking + change for apps that relied on ``from bson import *`` to import APIs not present in + ``__all__`` (`PYTHON-3311`_). .. _count: https://mongodb.com/docs/manual/reference/command/count/ @@ -50,6 +81,13 @@ in this release. .. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PYTHON-3257: https://jira.mongodb.org/browse/PYTHON-3257 +.. _PYTHON-3333: https://jira.mongodb.org/browse/PYTHON-3333 +.. _PYTHON-1552: https://jira.mongodb.org/browse/PYTHON-1552 +.. _PYTHON-2110: https://jira.mongodb.org/browse/PYTHON-2110 +.. _PYTHON-3283: https://jira.mongodb.org/browse/PYTHON-3283 +.. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 +.. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 .. _Queryable Encryption: automatic-queryable-client-side-encryption diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 7eaa793648..ee246d25a9 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev2") +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0) def get_version_string() -> str: diff --git a/setup.py b/setup.py index ce6cce712e..94889d7261 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0.dev2" +version = "4.2.0" f = open("README.rst") try: From e192c7f85ec627e09cf934e15bc7c009b64c51a6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jul 2022 08:37:16 -0700 Subject: [PATCH 0730/2111] BUMP 4.2.1.dev0 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index ee246d25a9..257c1dbac1 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 2, 1, ".dev0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 94889d7261..0e983e4642 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.0" +version = "4.2.1.dev0" f = open("README.rst") try: From c131ad8cc13de32aca23d1b1d352d1a9892896b0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 20 Jul 2022 13:04:49 -0700 Subject: [PATCH 0731/2111] Update readme for 6.0 support (#1017) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index c301932643..f60b8da680 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, and 5.0. +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, and 6.0. Support / Feedback ================== From 065b02bcb3ff6d8c088e4934105b9158f48d7074 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 21 Jul 2022 11:47:02 -0700 Subject: [PATCH 0732/2111] PYTHON-3358 Skip obsolete StaleShardVersion test on 6.1.0+ (#1018) --- .../unified/change-streams-resume-errorLabels.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json index c156b550ce..f5f4505a9f 100644 --- a/test/change_streams/unified/change-streams-resume-errorLabels.json +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -1478,6 +1478,11 @@ }, { "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "maxServerVersion": "6.0.99" + } + ], "operations": [ { "name": "failPoint", From 925537575b63029931d54d3abf250e4ecdbcae75 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jul 2022 14:22:51 -0700 Subject: [PATCH 0733/2111] PYTHON-3284 Fix test_snapshot_query by waiting for documents to be committed to the snapshot (#1019) --- test/test_examples.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_examples.py b/test/test_examples.py index b7b70463ac..e23abe104f 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1372,9 +1372,9 @@ def check_for_snapshot(self, collection): """ with self.client.start_session(snapshot=True) as s: try: - with collection.aggregate([], session=s): - pass - return True + if collection.find_one(session=s): + return True + return False except OperationFailure as e: # Retry them as the server demands... if e.code == 246: # SnapshotUnavailable From f5ac946020609569e0eb7ca51a6316ad03a54fb1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jul 2022 15:19:22 -0700 Subject: [PATCH 0734/2111] PYTHON-3368 Add test that reads are not retried in a transaction (#1020) --- .../do-not-retry-read-in-transaction.json | 115 ++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 test/transactions/unified/do-not-retry-read-in-transaction.json diff --git a/test/transactions/unified/do-not-retry-read-in-transaction.json b/test/transactions/unified/do-not-retry-read-in-transaction.json new file mode 100644 index 0000000000..6d9dc704b8 --- /dev/null +++ b/test/transactions/unified/do-not-retry-read-in-transaction.json @@ -0,0 +1,115 @@ +{ + "description": "do not retry read in a transaction", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryReads": true + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-read-in-transaction-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "find does not retry in a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "startTransaction": true + }, + "commandName": "find", + "databaseName": "retryable-read-in-transaction-test" + } + } + ] + } + ] + } + ] +} From 864812d40093fd1502626b9cc45d62f97a29025a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 25 Jul 2022 15:25:41 -0700 Subject: [PATCH 0735/2111] PYTHON-3366 Support mypy 0.971 and test with latest version (#1021) PYTHON-3369 Use https://www.gevent.org --- .github/workflows/test-python.yml | 4 ++-- bson/__init__.py | 12 ++++++------ doc/conf.py | 2 +- pymongo/pyopenssl_context.py | 9 +++------ test/test_auth.py | 4 ++-- test/test_bson.py | 21 ++++++++++++++++----- test/test_change_stream.py | 3 ++- test/test_collection.py | 2 +- test/test_database.py | 5 +++-- 9 files changed, 36 insertions(+), 26 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 89d9830e82..6d5f26c503 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -59,8 +59,8 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy==0.942 - pip install -e ".[zstd, srv]" + python -m pip install -U pip mypy + pip install -e ".[zstd, srv, encryption, ocsp]" - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo diff --git a/bson/__init__.py b/bson/__init__.py index cc0850709e..2db1fb5d0b 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -61,8 +61,8 @@ import struct import sys import uuid -from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] -from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] +from codecs import utf_8_decode as _utf_8_decode +from codecs import utf_8_encode as _utf_8_encode from collections import abc as _abc from typing import ( IO, @@ -621,7 +621,7 @@ def _make_c_string_check(string: Union[str, bytes]) -> bytes: else: if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _make_c_string(string: Union[str, bytes]) -> bytes: @@ -633,7 +633,7 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: except UnicodeError: raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) else: - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _make_name(string: str) -> bytes: @@ -641,7 +641,7 @@ def _make_name(string: str) -> bytes: # Keys can only be text in python 3. if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: @@ -1308,7 +1308,7 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override] + def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override,assignment] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/doc/conf.py b/doc/conf.py index 1e18eb29bf..f66de3868a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -192,6 +192,6 @@ intersphinx_mapping = { - "gevent": ("http://www.gevent.org/", None), + "gevent": ("https://www.gevent.org/", None), "py": ("https://docs.python.org/3/", None), } diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 758a741b6f..2d9c904bb3 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -135,7 +135,7 @@ def recv(self, *args, **kwargs): def recv_into(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) # type: ignore + return self._call(super(_sslConn, self).recv_into, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -146,12 +146,9 @@ def sendall(self, buf, flags=0): view = memoryview(buf) total_length = len(buf) total_sent = 0 - sent = 0 while total_sent < total_length: try: - sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags # type: ignore - ) + sent = self._call(super(_sslConn, self).send, view[total_sent:], flags) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. @@ -162,7 +159,7 @@ def sendall(self, buf, flags=0): # https://github.com/pyca/pyopenssl/blob/19.1.0/src/OpenSSL/SSL.py#L1756 # https://www.openssl.org/docs/man1.0.2/man3/SSL_write.html if sent <= 0: - raise Exception("Connection closed") + raise OSError("connection closed") total_sent += sent diff --git a/test/test_auth.py b/test/test_auth.py index 69ed27bda0..20d53ef24b 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -329,8 +329,8 @@ def auth_string(user, password): bad_user = MongoClient(auth_string("not-user", SASL_PASS)) bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, "ping") - self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") # type: ignore[arg-type] + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") # type: ignore[arg-type] class TestSCRAMSHA1(IntegrationTest): diff --git a/test/test_bson.py b/test/test_bson.py index 8ad65f3412..aa77954fa2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -117,7 +117,8 @@ def tzname(self, dt): class TestBSON(unittest.TestCase): def assertInvalid(self, data): - self.assertRaises(InvalidBSON, decode, data) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 + self.assertRaises(InvalidBSON, decode, data) # type: ignore[arg-type] def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): @@ -1025,11 +1026,17 @@ def test_unicode_decode_error_handler(self): # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: + # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( - InvalidBSON, decode, invalid, CodecOptions(unicode_decode_error_handler="strict") + InvalidBSON, + decode, # type: ignore[arg-type] + invalid, + CodecOptions(unicode_decode_error_handler="strict"), ) - self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) - self.assertRaises(InvalidBSON, decode, invalid) + self.assertRaises( + InvalidBSON, decode, invalid, CodecOptions() # type: ignore[arg-type] + ) + self.assertRaises(InvalidBSON, decode, invalid) # type: ignore[arg-type] # Test all other error handlers. for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: @@ -1046,8 +1053,12 @@ def test_unicode_decode_error_handler(self): dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) self.assertEqual(dec, {"keystr": "foobar"}) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( - InvalidBSON, decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk") + InvalidBSON, + decode, # type: ignore[arg-type] + invalid_both, + CodecOptions(unicode_decode_error_handler="junk"), ) def round_trip_pickle(self, obj, pickled_with_older): diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 11ed2895ac..b5b260086d 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1084,8 +1084,9 @@ def setFailPoint(self, scenario_dict): fail_cmd = SON([("configureFailPoint", "failCommand")]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.addCleanup( - client_context.client.admin.command, + client_context.client.admin.command, # type: ignore[arg-type] "configureFailPoint", fail_cmd["configureFailPoint"], mode="off", diff --git a/test/test_collection.py b/test/test_collection.py index bea2ed6ca6..37f1b1eae2 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -19,7 +19,7 @@ import contextlib import re import sys -from codecs import utf_8_decode # type: ignore +from codecs import utf_8_decode from collections import defaultdict from typing import Iterable, no_type_check diff --git a/test/test_database.py b/test/test_database.py index d49ac8324f..a1c0439089 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -604,13 +604,14 @@ def test_command_max_time_ms(self): try: db = self.client.pymongo_test db.command("count", "test") - self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) + # Remove type ignore after: https://github.com/python/mypy/issues/13220 + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) # type: ignore[arg-type] pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. db.command("aggregate", "test", pipeline=pipeline, cursor={}) self.assertRaises( ExecutionTimeout, - db.command, + db.command, # type: ignore[arg-type] "aggregate", "test", pipeline=pipeline, From e96f112d84c0a62ae601d1c65835c7319ea91255 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 27 Jul 2022 18:28:23 -0500 Subject: [PATCH 0736/2111] PYTHON-3274 Add commandStartedEvent assertions to clustered index spec tests (#1022) --- .../clustered-indexes.json | 122 +++++++++++++++++- 1 file changed, 118 insertions(+), 4 deletions(-) diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json index 739d0fd8b6..9db5ff06d7 100644 --- a/test/collection_management/clustered-indexes.json +++ b/test/collection_management/clustered-indexes.json @@ -10,14 +10,17 @@ "createEntities": [ { "client": { - "id": "client0" + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] } }, { "database": { "id": "database0", "client": "client0", - "databaseName": "ts-tests" + "databaseName": "ci-tests" } }, { @@ -31,7 +34,7 @@ "initialData": [ { "collectionName": "test", - "databaseName": "ts-tests", + "databaseName": "ci-tests", "documents": [] } ], @@ -64,10 +67,40 @@ "name": "assertCollectionExists", "object": "testRunner", "arguments": { - "databaseName": "ts-tests", + "databaseName": "ci-tests", "collectionName": "test" } } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + } + ] + } ] }, { @@ -125,6 +158,49 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": { + "$eq": "test" + } + } + }, + "databaseName": "ci-tests" + } + } + ] + } ] }, { @@ -171,6 +247,44 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "test" + }, + "databaseName": "ci-tests" + } + } + ] + } ] } ] From 14002a5a0d294cae8c2c5349e1a92364381dfd4d Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Wed, 27 Jul 2022 16:53:52 -0700 Subject: [PATCH 0737/2111] PYTHON-1824 Allow encoding/decoding out-of-range datetimes via DatetimeMS and datetime_conversion (#981) https://jira.mongodb.org/browse/PYTHON-1824 Co-authored-by: Ben Warner --- bson/__init__.py | 47 ++++------ bson/_cbsonmodule.c | 138 +++++++++++++++++++++++++++- bson/_cbsonmodule.h | 1 + bson/codec_options.py | 27 +++++- bson/codec_options.pyi | 8 ++ bson/datetime_ms.py | 157 ++++++++++++++++++++++++++++++++ bson/json_util.py | 43 +++++++-- doc/api/bson/datetime_ms.rst | 4 + doc/api/bson/index.rst | 1 + doc/examples/datetimes.rst | 54 +++++++++++ pymongo/common.py | 18 +++- pymongo/mongo_client.py | 8 ++ test/test_bson.py | 170 ++++++++++++++++++++++++++++++++++- test/test_client.py | 23 ++++- test/test_json_util.py | 68 +++++++++++++- 15 files changed, 721 insertions(+), 46 deletions(-) create mode 100644 bson/datetime_ms.py create mode 100644 doc/api/bson/datetime_ms.rst diff --git a/bson/__init__.py b/bson/__init__.py index 2db1fb5d0b..4283faf7dc 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -54,7 +54,6 @@ subtype 0. It will be decoded back to bytes. """ -import calendar import datetime import itertools import re @@ -100,9 +99,18 @@ from bson.codec_options import ( DEFAULT_CODEC_OPTIONS, CodecOptions, + DatetimeConversionOpts, _DocumentType, _raw_document_class, ) +from bson.datetime_ms import ( + EPOCH_AWARE, + EPOCH_NAIVE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, + utc, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -113,7 +121,6 @@ from bson.regex import Regex from bson.son import RE_TYPE, SON from bson.timestamp import Timestamp -from bson.tz_util import utc # Import some modules for type-checking only. if TYPE_CHECKING: @@ -187,12 +194,10 @@ "is_valid", "BSON", "has_c", + "DatetimeConversionOpts", + "DatetimeMS", ] -EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) - - BSONNUM = b"\x01" # Floating point BSONSTR = b"\x02" # UTF-8 string BSONOBJ = b"\x03" # Embedded document @@ -413,7 +418,7 @@ def _get_boolean( def _get_date( data: Any, view: Any, position: int, dummy0: int, opts: CodecOptions, dummy1: Any -) -> Tuple[datetime.datetime, int]: +) -> Tuple[Union[datetime.datetime, DatetimeMS], int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 @@ -724,6 +729,12 @@ def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: return b"\x09" + name + _PACK_LONG(millis) +def _encode_datetime_ms(name: bytes, value: DatetimeMS, dummy0: Any, dummy1: Any) -> bytes: + """Encode datetime.datetime.""" + millis = int(value) + return b"\x09" + name + _PACK_LONG(millis) + + def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode python None.""" return b"\x0A" + name @@ -814,6 +825,7 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: bool: _encode_bool, bytes: _encode_bytes, datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetime_ms, dict: _encode_mapping, float: _encode_float, int: _encode_int, @@ -948,27 +960,6 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo _dict_to_bson = _cbson._dict_to_bson # noqa: F811 -def _millis_to_datetime(millis: int, opts: CodecOptions) -> datetime.datetime: - """Convert milliseconds since epoch UTC to datetime.""" - diff = ((millis % 1000) + 1000) % 1000 - seconds = (millis - diff) // 1000 - micros = diff * 1000 - if opts.tz_aware: - dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) - if opts.tzinfo: - dt = dt.astimezone(opts.tzinfo) - return dt - else: - return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) - - -def _datetime_to_millis(dtm: datetime.datetime) -> int: - """Convert datetime to milliseconds since epoch UTC.""" - if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() # type: ignore - return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) - - _CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index da6a5cbda7..019f049bb5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -52,6 +52,9 @@ struct module_state { PyObject* BSONInt64; PyObject* Decimal128; PyObject* Mapping; + PyObject* DatetimeMS; + PyObject* _min_datetime_ms; + PyObject* _max_datetime_ms; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -72,6 +75,12 @@ struct module_state { /* The smallest possible BSON document, i.e. "{}" */ #define BSON_MIN_SIZE 5 +/* Datetime codec options */ +#define DATETIME 1 +#define DATETIME_CLAMP 2 +#define DATETIME_MS 3 +#define DATETIME_AUTO 4 + /* Get an error class from the bson.errors module. * * Returns a new ref */ @@ -179,6 +188,45 @@ static long long millis_from_datetime(PyObject* datetime) { return millis; } +/* Extended-range datetime, returns a DatetimeMS object with millis */ +static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ + // Allocate a new DatetimeMS object. + struct module_state *state = GETSTATE(self); + + PyObject* dt; + PyObject* ll_millis; + + if (!(ll_millis = PyLong_FromLongLong(millis))){ + return NULL; + } + dt = PyObject_CallFunctionObjArgs(state->DatetimeMS, ll_millis, NULL); + Py_DECREF(ll_millis); + return dt; +} + +/* Extended-range datetime, takes a DatetimeMS object and extracts the long long value. */ +static int millis_from_datetime_ms(PyObject* dt, long long* out){ + PyObject* ll_millis; + long long millis; + + if (!(ll_millis = PyNumber_Long(dt))){ + if (PyErr_Occurred()) { // TypeError + return 0; + } + } + + if ((millis = PyLong_AsLongLong(ll_millis)) == -1){ + if (PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } + } + Py_DECREF(ll_millis); + *out = millis; + return 1; +} + /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { if (pymongo_buffer_write(buffer, data, size)) { @@ -342,7 +390,10 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || - _load_object(&state->Mapping, "collections.abc", "Mapping")) { + _load_object(&state->Mapping, "collections.abc", "Mapping") || + _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || + _load_object(&state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms") || + _load_object(&state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms")) { return 1; } /* Reload our REType hack too. */ @@ -466,13 +517,14 @@ int convert_codec_options(PyObject* options_obj, void* p) { options->unicode_decode_error_handler = NULL; - if (!PyArg_ParseTuple(options_obj, "ObbzOO", + if (!PyArg_ParseTuple(options_obj, "ObbzOOb", &options->document_class, &options->tz_aware, &options->uuid_rep, &options->unicode_decode_error_handler, &options->tzinfo, - &type_registry_obj)) + &type_registry_obj, + &options->datetime_conversion)) return 0; type_marker = _type_marker(options->document_class); @@ -1049,6 +1101,13 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); + } else if (PyObject_TypeCheck(value, (PyTypeObject *) state->DatetimeMS)) { + long long millis; + if (!millis_from_datetime_ms(value, &millis)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { return _write_regex_to_buffer(buffer, type_byte, value); } @@ -1854,8 +1913,79 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } memcpy(&millis, buffer + *position, 8); millis = (int64_t)BSON_UINT64_FROM_LE(millis); - naive = datetime_from_millis(millis); *position += 8; + + if (options->datetime_conversion == DATETIME_MS){ + value = datetime_ms_from_millis(self, millis); + break; + } + + int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; + int dt_auto = options->datetime_conversion == DATETIME_AUTO; + + + if (dt_clamp || dt_auto){ + PyObject *min_millis_fn = _get_object(state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms"); + PyObject *max_millis_fn = _get_object(state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms"); + PyObject *min_millis_fn_res; + PyObject *max_millis_fn_res; + int64_t min_millis; + int64_t max_millis; + + if (min_millis_fn == NULL || max_millis_fn == NULL) { + Py_XDECREF(min_millis_fn); + Py_XDECREF(max_millis_fn); + goto invalid; + } + + if (options->tz_aware){ + PyObject* tzinfo = options->tzinfo; + if (tzinfo == Py_None) { + // Default to UTC. + utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); + tzinfo = utc_type; + } + min_millis_fn_res = PyObject_CallFunctionObjArgs(min_millis_fn, tzinfo, NULL); + max_millis_fn_res = PyObject_CallFunctionObjArgs(max_millis_fn, tzinfo, NULL); + } else { + min_millis_fn_res = PyObject_CallObject(min_millis_fn, NULL); + max_millis_fn_res = PyObject_CallObject(max_millis_fn, NULL); + } + + Py_DECREF(min_millis_fn); + Py_DECREF(max_millis_fn); + + if (!min_millis_fn_res || !max_millis_fn_res){ + Py_XDECREF(min_millis_fn_res); + Py_XDECREF(max_millis_fn_res); + goto invalid; + } + + min_millis = PyLong_AsLongLong(min_millis_fn_res); + max_millis = PyLong_AsLongLong(max_millis_fn_res); + + if ((min_millis == -1 || max_millis == -1) && PyErr_Occurred()) + { + // min/max_millis check + goto invalid; + } + + if (dt_clamp) { + if (millis < min_millis) { + millis = min_millis; + } else if (millis > max_millis) { + millis = max_millis; + } + // Continues from here to return a datetime. + } else if (dt_auto) { + if (millis < min_millis || millis > max_millis){ + value = datetime_ms_from_millis(self, millis); + break; // Out-of-range so done. + } + } + } + + naive = datetime_from_millis(millis); if (!options->tz_aware) { /* In the naive case, we're done here. */ value = naive; break; diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 12a2c8ac67..6ff453b8ff 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -62,6 +62,7 @@ typedef struct codec_options_t { char* unicode_decode_error_handler; PyObject* tzinfo; type_registry_t type_registry; + unsigned char datetime_conversion; PyObject* options_obj; unsigned char is_raw_bson; } codec_options_t; diff --git a/bson/codec_options.py b/bson/codec_options.py index 4eaff59ea7..a29c878929 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -16,6 +16,7 @@ import abc import datetime +import enum from collections.abc import MutableMapping as _MutableMapping from typing import ( Any, @@ -198,6 +199,16 @@ def __eq__(self, other: Any) -> Any: ) +class DatetimeConversionOpts(enum.IntEnum): + DATETIME = 1 + DATETIME_CLAMP = 2 + DATETIME_MS = 3 + DATETIME_AUTO = 4 + + def __repr__(self): + return f"{self.value}" + + class _BaseCodecOptions(NamedTuple): document_class: Type[Mapping[str, Any]] tz_aware: bool @@ -205,6 +216,7 @@ class _BaseCodecOptions(NamedTuple): unicode_decode_error_handler: str tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry + datetime_conversion: Optional[DatetimeConversionOpts] class CodecOptions(_BaseCodecOptions): @@ -268,7 +280,13 @@ class CodecOptions(_BaseCodecOptions): encoded/decoded. - `type_registry`: Instance of :class:`TypeRegistry` used to customize encoding and decoding behavior. - + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. .. versionchanged:: 4.0 The default for `uuid_representation` was changed from :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to @@ -292,6 +310,7 @@ def __new__( unicode_decode_error_handler: str = "strict", tzinfo: Optional[datetime.tzinfo] = None, type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversionOpts] = DatetimeConversionOpts.DATETIME, ) -> "CodecOptions": doc_class = document_class or dict # issubclass can raise TypeError for generic aliases like SON[str, Any]. @@ -336,6 +355,7 @@ def __new__( unicode_decode_error_handler, tzinfo, type_registry, + datetime_conversion, ), ) @@ -350,7 +370,7 @@ def _arguments_repr(self) -> str: return ( "document_class=%s, tz_aware=%r, uuid_representation=%s, " "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r" + "type_registry=%r, datetime_conversion=%r" % ( document_class_repr, self.tz_aware, @@ -358,6 +378,7 @@ def _arguments_repr(self) -> str: self.unicode_decode_error_handler, self.tzinfo, self.type_registry, + self.datetime_conversion, ) ) @@ -371,6 +392,7 @@ def _options_dict(self) -> Dict[str, Any]: "unicode_decode_error_handler": self.unicode_decode_error_handler, "tzinfo": self.tzinfo, "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, } def __repr__(self): @@ -406,6 +428,7 @@ def _parse_codec_options(options: Any) -> CodecOptions: "unicode_decode_error_handler", "tzinfo", "type_registry", + "datetime_conversion", }: if k == "uuidrepresentation": kwargs["uuid_representation"] = options[k] diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi index 9d5f5c2656..260407524f 100644 --- a/bson/codec_options.pyi +++ b/bson/codec_options.pyi @@ -21,6 +21,7 @@ you get the error: "TypeError: 'type' object is not subscriptable". import datetime import abc +import enum from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union @@ -54,6 +55,11 @@ class TypeRegistry: _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +class DatetimeConversionOpts(int, enum.Enum): + DATETIME = ... + DATETIME_CLAMP = ... + DATETIME_MS = ... + DATETIME_AUTO = ... class CodecOptions(Tuple, Generic[_DocumentType]): document_class: Type[_DocumentType] @@ -62,6 +68,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): unicode_decode_error_handler: Optional[str] tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry + datetime_conversion: Optional[int] def __new__( cls: Type[CodecOptions], @@ -71,6 +78,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): unicode_decode_error_handler: Optional[str] = ..., tzinfo: Optional[datetime.tzinfo] = ..., type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., ) -> CodecOptions[_DocumentType]: ... # CodecOptions API diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py new file mode 100644 index 0000000000..f3e25ed05a --- /dev/null +++ b/bson/datetime_ms.py @@ -0,0 +1,157 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for representing the BSON datetime type.""" + +import calendar +import datetime +import functools +from typing import Any, Union, cast + +from bson.codec_options import ( + DEFAULT_CODEC_OPTIONS, + CodecOptions, + DatetimeConversionOpts, +) +from bson.tz_util import utc + +EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) +EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) + + +class DatetimeMS: + __slots__ = ("_value",) + + def __init__(self, value: Union[int, datetime.datetime]): + """Represents a BSON UTC datetime. + + BSON UTC datetimes are defined as an int64 of milliseconds since the Unix + epoch. The principal use of DatetimeMS is to represent datetimes outside + the range of the Python builtin :class:`~datetime.datetime` class when + encoding/decoding BSON. + + To decode UTC datetimes as a ``DatetimeMS``,`datetime_conversion` in + :class:`~bson.CodecOptions` must be set to 'datetime_ms' or + 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for details. + + :Parameters: + - `value`: An instance of :class:`datetime.datetime` to be + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. + + .. versionadded:: 4.3 + """ + if isinstance(value, int): + if not (-(2**63) <= value <= 2**63 - 1): + raise OverflowError("Must be a 64-bit integer of milliseconds") + self._value = value + elif isinstance(value, datetime.datetime): + self._value = _datetime_to_millis(value) + else: + raise TypeError(f"{type(value)} is not a valid type for DatetimeMS") + + def __hash__(self) -> int: + return hash(self._value) + + def __repr__(self) -> str: + return type(self).__name__ + "(" + str(self._value) + ")" + + def __lt__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value < other + + def __le__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value <= other + + def __eq__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value == other._value + return False + + def __ne__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value != other._value + return True + + def __gt__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value > other + + def __ge__(self, other: Union["DatetimeMS", int]) -> bool: + return self._value >= other + + _type_marker = 9 + + def as_datetime(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> datetime.datetime: + """Create a Python :class:`~datetime.datetime` from this DatetimeMS object. + + :Parameters: + - `codec_options`: A CodecOptions instance for specifying how the + resulting DatetimeMS object will be formatted using ``tz_aware`` + and ``tz_info``. Defaults to + :const:`~bson.codec_options.DEFAULT_CODEC_OPTIONS`. + """ + return cast(datetime.datetime, _millis_to_datetime(self._value, codec_options)) + + def __int__(self) -> int: + return self._value + + +# Inclusive and exclusive min and max for timezones. +# Timezones are hashed by their offset, which is a timedelta +# and therefore there are more than 24 possible timezones. +@functools.lru_cache(maxsize=None) +def _min_datetime_ms(tz=datetime.timezone.utc): + return _datetime_to_millis(datetime.datetime.min.replace(tzinfo=tz)) + + +@functools.lru_cache(maxsize=None) +def _max_datetime_ms(tz=datetime.timezone.utc): + return _datetime_to_millis(datetime.datetime.max.replace(tzinfo=tz)) + + +def _millis_to_datetime(millis: int, opts: CodecOptions) -> Union[datetime.datetime, DatetimeMS]: + """Convert milliseconds since epoch UTC to datetime.""" + if ( + opts.datetime_conversion == DatetimeConversionOpts.DATETIME + or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO + ): + tz = opts.tzinfo or datetime.timezone.utc + if opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP: + millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) + elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO: + if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): + return DatetimeMS(millis) + + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) // 1000 + micros = diff * 1000 + + if opts.tz_aware: + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) + if opts.tzinfo: + dt = dt.astimezone(tz) + return dt + else: + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) + elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + return DatetimeMS(millis) + else: + raise ValueError("datetime_conversion must be an element of DatetimeConversionOpts") + + +def _datetime_to_millis(dtm: datetime.datetime) -> int: + """Convert datetime to milliseconds since epoch UTC.""" + if dtm.utcoffset() is not None: + dtm = dtm - dtm.utcoffset() # type: ignore + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) diff --git a/bson/json_util.py b/bson/json_util.py index 369c3d5f4a..0b5494e85c 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -94,11 +94,16 @@ import uuid from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, cast -import bson -from bson import EPOCH_AWARE from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.datetime_ms import ( + EPOCH_AWARE, + DatetimeMS, + _datetime_to_millis, + _max_datetime_ms, + _millis_to_datetime, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.int64 import Int64 @@ -228,6 +233,14 @@ class JSONOptions(CodecOptions): - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the timezone from which :class:`~datetime.datetime` objects should be decoded. Defaults to :const:`~bson.tz_util.utc`. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` @@ -594,7 +607,9 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary return _binary_or_uuid(data, int(subtype, 16), json_options) -def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.datetime: +def _parse_canonical_datetime( + doc: Any, json_options: JSONOptions +) -> Union[datetime.datetime, DatetimeMS]: """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: @@ -647,10 +662,15 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d if json_options.tz_aware: if json_options.tzinfo: aware = aware.astimezone(json_options.tzinfo) + if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + return DatetimeMS(aware) return aware else: - return aware.replace(tzinfo=None) - return bson._millis_to_datetime(int(dtm), json_options) + aware_tzinfo_none = aware.replace(tzinfo=None) + if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + return DatetimeMS(aware_tzinfo_none) + return aware_tzinfo_none + return _millis_to_datetime(int(dtm), json_options) def _parse_canonical_oid(doc: Any) -> ObjectId: @@ -806,10 +826,19 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) } - millis = bson._datetime_to_millis(obj) + millis = _datetime_to_millis(obj) if json_options.datetime_representation == DatetimeRepresentation.LEGACY: return {"$date": millis} return {"$date": {"$numberLong": str(millis)}} + if isinstance(obj, DatetimeMS): + if ( + json_options.datetime_representation == DatetimeRepresentation.ISO8601 + and 0 <= int(obj) <= _max_datetime_ms() + ): + return default(obj.as_datetime(), json_options) + elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": str(int(obj))} + return {"$date": {"$numberLong": str(int(obj))}} if json_options.strict_number_long and isinstance(obj, Int64): return {"$numberLong": str(obj)} if isinstance(obj, (RE_TYPE, Regex)): diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst new file mode 100644 index 0000000000..254f115eb8 --- /dev/null +++ b/doc/api/bson/datetime_ms.rst @@ -0,0 +1,4 @@ +:mod:`datetime_ms` -- Support for BSON UTC Datetime +=================================================== +.. automodule:: bson.datetime_ms + :members: diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 5f15ed99eb..72baae68a6 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -13,6 +13,7 @@ Sub-modules: binary code codec_options + datetime_ms dbref decimal128 errors diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index d712ce6138..b9c509e075 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -102,3 +102,57 @@ out of MongoDB in US/Pacific time: >>> result = aware_times.find_one() datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE tzinfo=) + +.. _handling-out-of-range-datetimes: + +Handling out of range datetimes +------------------------------- + +Python's :class:`~datetime.datetime` can only represent datetimes within the +range allowed by +:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`, whereas +the range of datetimes allowed in BSON can represent any 64-bit number +of milliseconds from the Unix epoch. To deal with this, we can use the +:class:`bson.datetime_ms.DatetimeMS` object, which is a wrapper for the +:class:`int` built-in. + +To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, +:class:`~bson.codec_options.CodecOptions` should have its +``datetime_conversion`` parameter set to one of the options available in +:class:`bson.datetime_ms.DatetimeConversionOpts`. These include +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME`, +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS`, +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO`, +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP`. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME` is the default +option and has the behavior of raising an exception upon attempting to +decode an out-of-range date. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS` will only return +:class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the +represented datetime is in- or out-of-range. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO` will return +:class:`~datetime.datetime` if the underlying UTC datetime is within range, +or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime +cannot be represented using the builtin Python :class:`~datetime.datetime`. +:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP` will clamp +resulting :class:`~datetime.datetime` objects to be within +:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` +(trimmed to `999000` microseconds). + +An example of encoding and decoding using `DATETIME_MS` is as follows: + +.. doctest:: + >>> from datetime import datetime + >>> from bson import encode, decode + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import CodecOptions,DatetimeConversionOpts + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> x + b'\x10\x00\x00\x00\tx\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + >>> decode(x, codec_options=CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS)) + {'x': DatetimeMS(0)} + +:class:`~bson.datetime_ms.DatetimeMS` objects have support for rich comparison +methods against other instances of :class:`~bson.datetime_ms.DatetimeMS`. +They can also be converted to :class:`~datetime.datetime` objects with +:meth:`~bson.datetime_ms.DatetimeMS.to_datetime()`. diff --git a/pymongo/common.py b/pymongo/common.py index 6ffc97f2a8..319b07193c 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -36,7 +36,7 @@ from bson import SON from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions, TypeRegistry +from bson.codec_options import CodecOptions, DatetimeConversionOpts, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.compression_support import ( @@ -620,6 +620,21 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A return value +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversionOpts]: + """Validate a DatetimeConversionOpts string.""" + if value is None: + return DatetimeConversionOpts.DATETIME + + if isinstance(value, str): + if value.isdigit(): + return DatetimeConversionOpts(int(value)) + return DatetimeConversionOpts[value] + elif isinstance(value, int): + return DatetimeConversionOpts(value) + + raise TypeError("%s must be a str or int representing DatetimeConversionOpts" % (option,)) + + # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { @@ -684,6 +699,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "uuidrepresentation": validate_uuid_representation, "waitqueuemultiple": validate_non_negative_integer_or_none, "waitqueuetimeoutms": validate_timeout_or_none, + "datetime_conversion": validate_datetime_conversion, } # Dictionary where keys are the names of keyword-only options for the diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 080ae8757c..fd4c0e84bc 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -239,6 +239,14 @@ def __init__( - `type_registry` (optional): instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. | **Other optional parameters can be passed as keyword arguments:** diff --git a/test/test_bson.py b/test/test_bson.py index aa77954fa2..0893000c0c 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -38,7 +38,9 @@ from bson import ( BSON, EPOCH_AWARE, + DatetimeMS, Regex, + _datetime_to_millis, decode, decode_all, decode_file_iter, @@ -48,7 +50,7 @@ ) from bson.binary import Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversionOpts from bson.dbref import DBRef from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 @@ -978,7 +980,7 @@ def test_codec_options_repr(self): "uuid_representation=UuidRepresentation.UNSPECIFIED, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None))" + "fallback_encoder=None), datetime_conversion=1)" ) self.assertEqual(r, repr(CodecOptions())) @@ -1153,5 +1155,169 @@ def test_bson_encode_decode(self) -> None: self.assertTrue(decoded["_id"].generation_time) +class TestDatetimeConversion(unittest.TestCase): + def test_comps(self): + # Tests other timestamp formats. + # Test each of the rich comparison methods. + pairs = [ + (DatetimeMS(-1), DatetimeMS(1)), + (DatetimeMS(0), DatetimeMS(0)), + (DatetimeMS(1), DatetimeMS(-1)), + ] + + comp_ops = ["__lt__", "__le__", "__eq__", "__ne__", "__gt__", "__ge__"] + for lh, rh in pairs: + for op in comp_ops: + self.assertEqual(getattr(lh, op)(rh), getattr(lh._value, op)(rh._value)) + + def test_class_conversions(self): + # Test class conversions. + dtr1 = DatetimeMS(1234) + dt1 = dtr1.as_datetime() + self.assertEqual(dtr1, DatetimeMS(dt1)) + + dt2 = datetime.datetime(1969, 1, 1) + dtr2 = DatetimeMS(dt2) + self.assertEqual(dtr2.as_datetime(), dt2) + + # Test encode and decode without codec options. Expect: DatetimeMS => datetime + dtr1 = DatetimeMS(0) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1) + self.assertEqual(dec1["x"], datetime.datetime(1970, 1, 1)) + self.assertNotEqual(type(dtr1), type(dec1["x"])) + + # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1, opts1) + self.assertEqual(type(dtr1), type(dec1["x"])) + self.assertEqual(dtr1, dec1["x"]) + + # Expect: datetime => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + enc1 = encode({"x": dt1}) + dec1 = decode(enc1, opts1) + self.assertEqual(dec1["x"], DatetimeMS(0)) + self.assertNotEqual(dt1, type(dec1["x"])) + + def test_clamping(self): + # Test clamping from below and above. + opts1 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_tz_clamping(self): + # Naive clamping to local tz. + opts1 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=False + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + + dec_below = decode(below, opts1) + self.assertEqual(dec_below["x"], datetime.datetime.min) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(microsecond=999000), + ) + + # Aware clamping. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=True + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_datetime_auto(self): + # Naive auto, in range. + opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Naive auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Naive auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + # Aware auto, in range. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts2) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Aware auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Aware auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + def test_millis_from_datetime_ms(self): + # Test 65+ bit integer conversion, expect OverflowError. + big_ms = 2**65 + with self.assertRaises(OverflowError): + encode({"x": DatetimeMS(big_ms)}) + + # Subclass of DatetimeMS w/ __int__ override, expect an Error. + class DatetimeMSOverride(DatetimeMS): + def __int__(self): + return float(self._value) + + float_ms = DatetimeMSOverride(2) + with self.assertRaises(TypeError): + encode({"x": float_ms}) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 3630cec06c..f520043ecf 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -65,7 +65,12 @@ import pymongo from bson import encode -from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry +from bson.codec_options import ( + CodecOptions, + DatetimeConversionOpts, + TypeEncoder, + TypeRegistry, +) from bson.son import SON from bson.tz_util import utc from pymongo import event_loggers, message, monitoring @@ -386,14 +391,17 @@ def test_uri_codec_options(self): # Ensure codec options are passed in correctly uuid_representation_label = "javaLegacy" unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" uri = ( "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" % ( client_context.host, client_context.port, uuid_representation_label, unicode_decode_error_handler, + datetime_conversion, ) ) c = MongoClient(uri, connect=False) @@ -403,6 +411,19 @@ def test_uri_codec_options(self): c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + ) + + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace( + datetime_conversion, f"{int(DatetimeConversionOpts[datetime_conversion])}" + ) + c = MongoClient(uri, connect=False) + + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + ) def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. diff --git a/test/test_json_util.py b/test/test_json_util.py index ee5b7abb49..576746e865 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -21,11 +21,13 @@ import uuid from typing import Any, List, MutableMapping +from bson.codec_options import CodecOptions, DatetimeConversionOpts + sys.path[0:0] = [""] from test import IntegrationTest, unittest -from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, json_util +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, DatetimeMS, json_util from bson.binary import ( ALL_UUID_REPRESENTATIONS, MD5_SUBTYPE, @@ -35,6 +37,7 @@ UuidRepresentation, ) from bson.code import Code +from bson.datetime_ms import _max_datetime_ms from bson.dbref import DBRef from bson.int64 import Int64 from bson.json_util import ( @@ -241,6 +244,69 @@ def test_datetime(self): ), ) + def test_datetime_ms(self): + # Test ISO8601 in-range + dat_min = {"x": DatetimeMS(0)} + dat_max = {"x": DatetimeMS(_max_datetime_ms())} + opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) + + self.assertEqual( + dat_min["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_min))["x"], + ) + self.assertEqual( + dat_max["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_max))["x"], + ) + + # Test ISO8601 out-of-range + dat_min = {"x": DatetimeMS(-1)} + dat_max = {"x": DatetimeMS(_max_datetime_ms() + 1)} + + self.assertEqual('{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min)) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max), + ) + # Test legacy. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.LEGACY, json_mode=JSONMode.LEGACY + ) + self.assertEqual('{"x": {"$date": "-1"}}', json_util.dumps(dat_min, json_options=opts)) + self.assertEqual( + '{"x": {"$date": "' + str(int(dat_max["x"])) + '"}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test regular. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min, json_options=opts) + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test decode from datetime.datetime to DatetimeMS + dat_min = {"x": datetime.datetime.min} + dat_max = {"x": DatetimeMS(_max_datetime_ms()).as_datetime(CodecOptions(tz_aware=False))} + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + datetime_conversion=DatetimeConversionOpts.DATETIME_MS, + ) + + self.assertEqual( + DatetimeMS(dat_min["x"]), + json_util.loads(json_util.dumps(dat_min), json_options=opts)["x"], + ) + self.assertEqual( + DatetimeMS(dat_max["x"]), + json_util.loads(json_util.dumps(dat_max), json_options=opts)["x"], + ) + def test_regex_object_hook(self): # Extended JSON format regular expression. pat = "a*b" From 0c56d5665811df65aa25602b61d289cf605e3647 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 28 Jul 2022 15:55:34 -0700 Subject: [PATCH 0738/2111] PYTHON-3371 Remove DatetimeConversionOpts.__repr__ (#1023) * Removed __repr__ and adjusted repr string * Changed to %s Co-authored-by: Ben Warner --- bson/codec_options.py | 5 +---- test/test_bson.py | 3 ++- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index a29c878929..afffa2f120 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -205,9 +205,6 @@ class DatetimeConversionOpts(enum.IntEnum): DATETIME_MS = 3 DATETIME_AUTO = 4 - def __repr__(self): - return f"{self.value}" - class _BaseCodecOptions(NamedTuple): document_class: Type[Mapping[str, Any]] @@ -370,7 +367,7 @@ def _arguments_repr(self) -> str: return ( "document_class=%s, tz_aware=%r, uuid_representation=%s, " "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r, datetime_conversion=%r" + "type_registry=%r, datetime_conversion=%s" % ( document_class_repr, self.tz_aware, diff --git a/test/test_bson.py b/test/test_bson.py index 0893000c0c..7fe0c168c6 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -980,7 +980,8 @@ def test_codec_options_repr(self): "uuid_representation=UuidRepresentation.UNSPECIFIED, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None), datetime_conversion=1)" + "fallback_encoder=None), " + "datetime_conversion=DatetimeConversionOpts.DATETIME)" ) self.assertEqual(r, repr(CodecOptions())) From 3c18c2079524d322d24b5dcc515ddc91c59e3fbd Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Fri, 29 Jul 2022 12:07:04 -0700 Subject: [PATCH 0739/2111] PYTHON-3377 datetime_ms documentation page is empty (#1026) Co-authored-by: Ben Warner --- bson/datetime_ms.py | 25 +++++++++++++++---------- doc/api/bson/datetime_ms.rst | 2 ++ 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index f3e25ed05a..925087a5aa 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -12,7 +12,10 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Tools for representing the BSON datetime type.""" +"""Tools for representing the BSON datetime type. + +.. versionadded:: 4.3 +""" import calendar import datetime @@ -31,26 +34,28 @@ class DatetimeMS: + """Represents a BSON UTC datetime.""" + __slots__ = ("_value",) def __init__(self, value: Union[int, datetime.datetime]): """Represents a BSON UTC datetime. - BSON UTC datetimes are defined as an int64 of milliseconds since the Unix - epoch. The principal use of DatetimeMS is to represent datetimes outside - the range of the Python builtin :class:`~datetime.datetime` class when + BSON UTC datetimes are defined as an int64 of milliseconds since the + Unix epoch. The principal use of DatetimeMS is to represent + datetimes outside the range of the Python builtin + :class:`~datetime.datetime` class when encoding/decoding BSON. - To decode UTC datetimes as a ``DatetimeMS``,`datetime_conversion` in + To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in :class:`~bson.CodecOptions` must be set to 'datetime_ms' or - 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for details. + 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for + details. :Parameters: - `value`: An instance of :class:`datetime.datetime` to be - represented as milliseconds since the Unix epoch, or int of - milliseconds since the Unix epoch. - - .. versionadded:: 4.3 + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. """ if isinstance(value, int): if not (-(2**63) <= value <= 2**63 - 1): diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst index 254f115eb8..1afaad69fc 100644 --- a/doc/api/bson/datetime_ms.rst +++ b/doc/api/bson/datetime_ms.rst @@ -1,4 +1,6 @@ :mod:`datetime_ms` -- Support for BSON UTC Datetime =================================================== + .. automodule:: bson.datetime_ms + :synopsis: Support for BSON UTC datetimes. :members: From 1166bb96cd538e90c65dc9a6674791b5accb16dc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 29 Jul 2022 15:39:11 -0700 Subject: [PATCH 0740/2111] PYTHON-3382 Resync csfle tests (#1027) --- test/client-side-encryption/spec/unified/addKeyAltName.json | 4 +++- .../spec/unified/createDataKey-kms_providers-invalid.json | 2 +- test/client-side-encryption/spec/unified/getKey.json | 4 +++- .../spec/unified/getKeyByAltName.json | 4 +++- .../spec/unified/removeKeyAltName.json | 4 +++- .../spec/unified/rewrapManyDataKey.json | 6 +++--- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json index 8b6c174cbc..f70bc572a8 100644 --- a/test/client-side-encryption/spec/unified/addKeyAltName.json +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -98,7 +98,9 @@ }, "keyAltName": "new_key_alt_name" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json index 16cf6ca70d..2344a61a95 100644 --- a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -1,5 +1,5 @@ { - "description": "createDataKey-provider-invalid", + "description": "createDataKey-kms_providers-invalid", "schemaVersion": "1.8", "runOnRequirements": [ { diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json index 6a7269b2ca..2ea3fe7358 100644 --- a/test/client-side-encryption/spec/unified/getKey.json +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -133,7 +133,9 @@ } } }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json index f94459bbd8..2505abc16e 100644 --- a/test/client-side-encryption/spec/unified/getKeyByAltName.json +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -128,7 +128,9 @@ "arguments": { "keyAltName": "does_not_exist" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json index bef13c87de..1b7077077a 100644 --- a/test/client-side-encryption/spec/unified/removeKeyAltName.json +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -102,7 +102,9 @@ }, "keyAltName": "does_not_exist" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index 7e3abb1274..89860de0c0 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -1,5 +1,5 @@ { - "description": "rewrapManyDataKey-kms_providers", + "description": "rewrapManyDataKey", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -128,7 +128,7 @@ ], "keyMaterial": { "$binary": { - "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEGkNTybTc7Eyif0f+qqE0lAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDB2j78AeuIQxcRh8cQIBEIB7vj9buHEaT7XHFIsKBJiyzZRmNnjvqMK5LSdzonKdx97jlqauvPvTDXSsdQDcspUs5oLrGmAXpbFResscxmbwZoKgUtWiuIOpeAcYuszCiMKt15s1WIMLDXUhYtfCmhRhekvgHnRAaK4HJMlGE+lKJXYI84E0b86Cd/g+", + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", "subType": "00" } }, @@ -196,7 +196,7 @@ ], "keyMaterial": { "$binary": { - "base64": "VoI9J8HusQ3u2gT9i8Awgg/6W4/igvLwRzn3SRDGx0Dl/1ayDMubphOw0ONPVKfuvS6HL3e4gAoCJ/uEz2KLFTVsEqYCpMhfAhgXxm8Ena8vDcOkCzFX+euvN/N2ES3wpzAD18b3qIH0MbBwKJP82d5GQ4pVfGnPW8Ujp9aO1qC/s0EqNqYyzJ1SyzhV9lAjHHGIENYJx+bBrekg2EeZBA==", + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", "subType": "00" } }, From fbb8dde826f5c32ef3db5c046eb81a73c902241a Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Fri, 29 Jul 2022 15:53:38 -0700 Subject: [PATCH 0741/2111] PYTHON-3375 Added docstrings to DatetimeConversionOpts (#1024) * Added docstrings * Fixed detail * Fixed punctuation and links Co-authored-by: Ben Warner --- bson/codec_options.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/bson/codec_options.py b/bson/codec_options.py index afffa2f120..bceab5e003 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -200,10 +200,38 @@ def __eq__(self, other: Any) -> Any: class DatetimeConversionOpts(enum.IntEnum): + """Options for decoding BSON datetimes.""" + DATETIME = 1 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`. + + BSON UTC datetimes that cannot be represented as a + :class:`~datetime.datetime` will raise an :class:`OverflowError` + or a :class:`ValueError`. + + .. versionadded 4.3 + """ + DATETIME_CLAMP = 2 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`, clamping + to :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`. + + .. versionadded 4.3 + """ + DATETIME_MS = 3 + """Decode a BSON UTC datetime as a :class:`~bson.datetime_ms.DatetimeMS` + object. + + .. versionadded 4.3 + """ + DATETIME_AUTO = 4 + """Decode a BSON UTC datetime as a :class:`datetime.datetime` if possible, + and a :class:`~bson.datetime_ms.DatetimeMS` if not. + + .. versionadded 4.3 + """ class _BaseCodecOptions(NamedTuple): From 7c19ff7f7ac7199e8ec9026110daa7cefc0a3a7a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Aug 2022 12:23:50 -0700 Subject: [PATCH 0742/2111] PYTHON-3389 Close ChangeStream after non-resumable non-timeout errors (#1029) --- pymongo/change_stream.py | 42 +++++++++++++++++++++++++------------- test/test_change_stream.py | 13 ++++++------ 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 80820dff91..0edf513a3c 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -68,6 +68,19 @@ from pymongo.mongo_client import MongoClient +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + class ChangeStream(Generic[_DocumentType]): """The internal abstract base class for change stream cursors. @@ -343,20 +356,21 @@ def try_next(self) -> Optional[_DocumentType]: # Attempt to get the next change with at most one getMore and at most # one resume attempt. try: - change = self._cursor._try_next(True) - except (ConnectionFailure, CursorNotFound): - self._resume() - change = self._cursor._try_next(False) - except OperationFailure as exc: - if exc._max_wire_version is None: - raise - is_resumable = ( - exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") - ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) - if not is_resumable: - raise - self._resume() - change = self._cursor._try_next(False) + try: + change = self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + self._resume() + change = self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + self.close() + raise + except Exception: + self.close() + raise # Check if the cursor was invalidated. if not self._cursor.alive: diff --git a/test/test_change_stream.py b/test/test_change_stream.py index b5b260086d..a8b793333e 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -486,7 +486,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): return response["cursor"]["postBatchResumeToken"] @no_type_check - def _test_raises_error_on_missing_id(self, expected_exception, expected_exception2): + def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ @@ -494,7 +494,8 @@ def _test_raises_error_on_missing_id(self, expected_exception, expected_exceptio self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) - with self.assertRaises(expected_exception2): + # The cursor should now be closed. + with self.assertRaises(StopIteration): next(change_stream) @no_type_check @@ -526,14 +527,14 @@ def test_update_resume_token_legacy(self): # Prose test no. 2 @client_context.require_version_min(4, 1, 8) def test_raises_error_on_missing_id_418plus(self): - # Server returns an error on 4.1.8+, subsequent next() resumes and gets the same error. - self._test_raises_error_on_missing_id(OperationFailure, OperationFailure) + # Server returns an error on 4.1.8+ + self._test_raises_error_on_missing_id(OperationFailure) # Prose test no. 2 @client_context.require_version_max(4, 1, 8) def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error, closes the cursor, subsequent next() raises StopIteration. - self._test_raises_error_on_missing_id(InvalidOperation, StopIteration) + # PyMongo raises an error + self._test_raises_error_on_missing_id(InvalidOperation) # Prose test no. 3 @no_type_check From 5b85ad2bcf5f1a996f8b288b27a849e8e75b4779 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Aug 2022 13:30:41 -0700 Subject: [PATCH 0743/2111] PYTHON-3391 Skip unsupported CSOT tests on serverless (#1030) --- test/__init__.py | 10 ++++++++++ test/csot/gridfs-advanced.json | 3 ++- test/csot/gridfs-delete.json | 3 ++- test/csot/gridfs-download.json | 3 ++- test/csot/gridfs-find.json | 3 ++- test/csot/gridfs-upload.json | 3 ++- test/test_change_stream.py | 13 ++++--------- test/test_csot.py | 4 +--- test/test_custom_types.py | 9 +++------ test/test_examples.py | 3 +-- 10 files changed, 29 insertions(+), 25 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 4ecc3c9e9e..2a3e59adf9 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -759,6 +759,16 @@ def require_no_load_balancer(self, func): lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func ) + def require_no_serverless(self, func): + """Run a test only if the client is not connected to serverless.""" + return self._require( + lambda: not self.serverless, "Must not be connected to serverless", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) + def is_topology_type(self, topologies): unknown = set(topologies) - { "single", diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 6bf0229a04..c6c0944d2f 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json index 8701929ff3..9f4980114b 100644 --- a/test/csot/gridfs-delete.json +++ b/test/csot/gridfs-delete.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json index 2ab64010f8..8542f69e89 100644 --- a/test/csot/gridfs-download.json +++ b/test/csot/gridfs-download.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json index 45bb7066d6..7409036284 100644 --- a/test/csot/gridfs-find.json +++ b/test/csot/gridfs-find.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json index 690fdda77f..b3f174973d 100644 --- a/test/csot/gridfs-upload.json +++ b/test/csot/gridfs-upload.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/test_change_stream.py b/test/test_change_stream.py index a8b793333e..18a0ec84c4 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -430,8 +430,7 @@ def test_start_after_resume_process_with_changes(self): self.assertEqual(change["fullDocument"], {"_id": 3}) @no_type_check - @client_context.require_no_mongos # Remove after SERVER-41196 - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2) def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -767,8 +766,7 @@ class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestClusterChangeStream, cls).setUpClass() cls.dbs = [cls.db, cls.client.pymongo_test_2] @@ -829,8 +827,7 @@ def test_full_pipeline(self): class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestDatabaseChangeStream, cls).setUpClass() @@ -915,9 +912,7 @@ def test_isolation(self): class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod - @client_context.require_version_min(3, 5, 11) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestCollectionChangeStream, cls).setUpClass() diff --git a/test/test_csot.py b/test/test_csot.py index 7b82a49caf..a9cf7a0124 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -73,9 +73,7 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_deadline(), float("inf")) self.assertEqual(_csot.get_rtt(), 0.0) - @client_context.require_version_min(3, 6) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def test_change_stream_can_resume_after_timeouts(self): coll = self.db.test with coll.watch(max_await_time_ms=150) as stream: diff --git a/test/test_custom_types.py b/test/test_custom_types.py index e11b5ebe00..868756c67d 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -896,8 +896,7 @@ def run_test(doc_cls): class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() cls.db.test.delete_many({}) @@ -916,8 +915,7 @@ def create_targets(self, *args, **kwargs): class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() cls.db.test.delete_many({}) @@ -936,8 +934,7 @@ def create_targets(self, *args, **kwargs): class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): @classmethod @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams def setUpClass(cls): super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() cls.db.test.delete_many({}) diff --git a/test/test_examples.py b/test/test_examples.py index e23abe104f..9c1adda69c 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -740,8 +740,7 @@ def test_delete(self): self.assertEqual(db.inventory.count_documents({}), 0) - @client_context.require_replica_set - @client_context.require_no_mmap + @client_context.require_change_streams def test_change_streams(self): db = self.db done = False From 13e2715af0eb2f8fdcb5fae470db1120112202a3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 3 Aug 2022 15:34:16 -0700 Subject: [PATCH 0744/2111] PYTHON-3312 Convert SDAM integration tests to unified (#1028) --- .evergreen/resync-specs.sh | 4 +- pymongo/monitoring.py | 64 +-- .../unified/auth-error.json | 230 ++++++++ .../unified/auth-misc-command-error.json | 230 ++++++++ .../unified/auth-network-error.json | 230 ++++++++ .../unified/auth-network-timeout-error.json | 233 ++++++++ .../unified/auth-shutdown-error.json | 230 ++++++++ .../unified/cancel-server-check.json | 201 +++++++ .../unified/connectTimeoutMS.json | 221 ++++++++ .../unified/find-network-error.json | 234 ++++++++ .../unified/find-network-timeout-error.json | 199 +++++++ .../unified/find-shutdown-error.json | 251 +++++++++ .../unified/hello-command-error.json | 376 +++++++++++++ .../unified/hello-network-error.json | 346 ++++++++++++ .../unified/hello-timeout.json | 514 ++++++++++++++++++ .../unified/insert-network-error.json | 246 +++++++++ .../unified/insert-shutdown-error.json | 250 +++++++++ .../unified/minPoolSize-error.json | 177 ++++++ .../unified}/pool-cleared-error.json | 204 ++++--- .../rediscover-quickly-after-step-down.json | 242 +++++++++ .../auth-error.json | 140 ----- .../auth-misc-command-error.json | 140 ----- .../auth-network-error.json | 140 ----- .../auth-network-timeout-error.json | 143 ----- .../auth-shutdown-error.json | 140 ----- .../cancel-server-check.json | 130 ----- .../connectTimeoutMS.json | 149 ----- .../find-network-error.json | 144 ----- .../find-network-timeout-error.json | 119 ---- .../find-shutdown-error.json | 168 ------ .../hello-command-error.json | 223 -------- .../hello-network-error.json | 219 -------- .../hello-timeout.json | 337 ------------ .../insert-network-error.json | 156 ------ .../insert-shutdown-error.json | 167 ------ .../minPoolSize-error.json | 102 ---- .../rediscover-quickly-after-step-down.json | 165 ------ test/test_discovery_and_monitoring.py | 114 +--- .../entity-thread-additionalProperties.json | 18 + .../invalid/entity-thread-id-required.json | 15 + .../invalid/entity-thread-id-type.json | 17 + ...tionChangedEvent-additionalProperties.json | 23 + ...erverDescription-additionalProperties.json | 25 + ...ngedEvent-serverDescription-type-enum.json | 25 + ...ngedEvent-serverDescription-type-type.json | 25 + test/unified_format.py | 165 +++++- 46 files changed, 4881 insertions(+), 3010 deletions(-) create mode 100644 test/discovery_and_monitoring/unified/auth-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-misc-command-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-network-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-network-timeout-error.json create mode 100644 test/discovery_and_monitoring/unified/auth-shutdown-error.json create mode 100644 test/discovery_and_monitoring/unified/cancel-server-check.json create mode 100644 test/discovery_and_monitoring/unified/connectTimeoutMS.json create mode 100644 test/discovery_and_monitoring/unified/find-network-error.json create mode 100644 test/discovery_and_monitoring/unified/find-network-timeout-error.json create mode 100644 test/discovery_and_monitoring/unified/find-shutdown-error.json create mode 100644 test/discovery_and_monitoring/unified/hello-command-error.json create mode 100644 test/discovery_and_monitoring/unified/hello-network-error.json create mode 100644 test/discovery_and_monitoring/unified/hello-timeout.json create mode 100644 test/discovery_and_monitoring/unified/insert-network-error.json create mode 100644 test/discovery_and_monitoring/unified/insert-shutdown-error.json create mode 100644 test/discovery_and_monitoring/unified/minPoolSize-error.json rename test/{discovery_and_monitoring_integration => discovery_and_monitoring/unified}/pool-cleared-error.json (60%) create mode 100644 test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json delete mode 100644 test/discovery_and_monitoring_integration/auth-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-misc-command-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-network-timeout-error.json delete mode 100644 test/discovery_and_monitoring_integration/auth-shutdown-error.json delete mode 100644 test/discovery_and_monitoring_integration/cancel-server-check.json delete mode 100644 test/discovery_and_monitoring_integration/connectTimeoutMS.json delete mode 100644 test/discovery_and_monitoring_integration/find-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/find-network-timeout-error.json delete mode 100644 test/discovery_and_monitoring_integration/find-shutdown-error.json delete mode 100644 test/discovery_and_monitoring_integration/hello-command-error.json delete mode 100644 test/discovery_and_monitoring_integration/hello-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/hello-timeout.json delete mode 100644 test/discovery_and_monitoring_integration/insert-network-error.json delete mode 100644 test/discovery_and_monitoring_integration/insert-shutdown-error.json delete mode 100644 test/discovery_and_monitoring_integration/minPoolSize-error.json delete mode 100644 test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json create mode 100644 test/unified-test-format/invalid/entity-thread-additionalProperties.json create mode 100644 test/unified-test-format/invalid/entity-thread-id-required.json create mode 100644 test/unified-test-format/invalid/entity-thread-id-type.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json create mode 100644 test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index b64868c5a9..817fa4b730 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -132,8 +132,8 @@ do discovery_and_monitoring/sharded cpjson server-discovery-and-monitoring/tests/single \ discovery_and_monitoring/single - cpjson server-discovery-and-monitoring/tests/integration \ - discovery_and_monitoring_integration + cpjson server-discovery-and-monitoring/tests/unified \ + discovery_and_monitoring/unified cpjson server-discovery-and-monitoring/tests/load-balanced \ discovery_and_monitoring/load-balanced ;; diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index f3f773fbbd..90b8c1a3eb 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -914,13 +914,12 @@ class ConnectionCheckOutFailedReason(object): class _ConnectionEvent(object): - """Private base class for some connection events.""" + """Private base class for connection events.""" - __slots__ = ("__address", "__connection_id") + __slots__ = ("__address",) - def __init__(self, address: _Address, connection_id: int) -> None: + def __init__(self, address: _Address) -> None: self.__address = address - self.__connection_id = connection_id @property def address(self) -> _Address: @@ -929,16 +928,29 @@ def address(self) -> _Address: """ return self.__address + def __repr__(self): + return "%s(%r)" % (self.__class__.__name__, self.__address) + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + @property def connection_id(self) -> int: """The ID of the Connection.""" return self.__connection_id def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__connection_id) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__connection_id) -class ConnectionCreatedEvent(_ConnectionEvent): +class ConnectionCreatedEvent(_ConnectionIdEvent): """Published when a Connection Pool creates a Connection object. NOTE: This connection is not ready for use until the @@ -955,7 +967,7 @@ class ConnectionCreatedEvent(_ConnectionEvent): __slots__ = () -class ConnectionReadyEvent(_ConnectionEvent): +class ConnectionReadyEvent(_ConnectionIdEvent): """Published when a Connection has finished its setup, and is ready to use. :Parameters: @@ -969,7 +981,7 @@ class ConnectionReadyEvent(_ConnectionEvent): __slots__ = () -class ConnectionClosedEvent(_ConnectionEvent): +class ConnectionClosedEvent(_ConnectionIdEvent): """Published when a Connection is closed. :Parameters: @@ -1005,7 +1017,7 @@ def __repr__(self): ) -class ConnectionCheckOutStartedEvent(object): +class ConnectionCheckOutStartedEvent(_ConnectionEvent): """Published when the driver starts attempting to check out a connection. :Parameters: @@ -1015,23 +1027,10 @@ class ConnectionCheckOutStartedEvent(object): .. versionadded:: 3.9 """ - __slots__ = ("__address",) - - def __init__(self, address): - self.__address = address - - @property - def address(self): - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + __slots__ = () -class ConnectionCheckOutFailedEvent(object): +class ConnectionCheckOutFailedEvent(_ConnectionEvent): """Published when the driver's attempt to check out a connection fails. :Parameters: @@ -1042,19 +1041,12 @@ class ConnectionCheckOutFailedEvent(object): .. versionadded:: 3.9 """ - __slots__ = ("__address", "__reason") + __slots__ = ("__reason",) def __init__(self, address: _Address, reason: str) -> None: - self.__address = address + super().__init__(address) self.__reason = reason - @property - def address(self) -> _Address: - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - @property def reason(self) -> str: """A reason explaining why connection check out failed. @@ -1065,10 +1057,10 @@ def reason(self) -> str: return self.__reason def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__reason) + return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__reason) -class ConnectionCheckedOutEvent(_ConnectionEvent): +class ConnectionCheckedOutEvent(_ConnectionIdEvent): """Published when the driver successfully checks out a Connection. :Parameters: @@ -1082,7 +1074,7 @@ class ConnectionCheckedOutEvent(_ConnectionEvent): __slots__ = () -class ConnectionCheckedInEvent(_ConnectionEvent): +class ConnectionCheckedInEvent(_ConnectionIdEvent): """Published when the driver checks in a Connection into the Pool. :Parameters: diff --git a/test/discovery_and_monitoring/unified/auth-error.json b/test/discovery_and_monitoring/unified/auth-error.json new file mode 100644 index 0000000000..5c78ecfe50 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-misc-command-error.json b/test/discovery_and_monitoring/unified/auth-misc-command-error.json new file mode 100644 index 0000000000..6e1b645461 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-misc-command-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-misc-command-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-misc-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json new file mode 100644 index 0000000000..7606d2db7a --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json new file mode 100644 index 0000000000..22066e8bae --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -0,0 +1,233 @@ +{ + "description": "auth-network-timeout-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-timeout-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-shutdown-error.json b/test/discovery_and_monitoring/unified/auth-shutdown-error.json new file mode 100644 index 0000000000..5dd7b5bb6f --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-shutdown-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-shutdown-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-shutdown-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/cancel-server-check.json b/test/discovery_and_monitoring/unified/cancel-server-check.json new file mode 100644 index 0000000000..896cc8d087 --- /dev/null +++ b/test/discovery_and_monitoring/unified/cancel-server-check.json @@ -0,0 +1,201 @@ +{ + "description": "cancel-server-check", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Cancel server check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "heartbeatFrequencyMS": 10000, + "serverSelectionTimeoutMS": 5000, + "appname": "cancelServerCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "cancel-server-check" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "insertedId": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectResult": { + "insertedId": 3 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/connectTimeoutMS.json b/test/discovery_and_monitoring/unified/connectTimeoutMS.json new file mode 100644 index 0000000000..67a4d9da1d --- /dev/null +++ b/test/discovery_and_monitoring/unified/connectTimeoutMS.json @@ -0,0 +1,221 @@ +{ + "description": "connectTimeoutMS", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "connectTimeoutMS=0", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 0, + "heartbeatFrequencyMS": 500, + "appname": "connectTimeoutMS=0" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "connectTimeoutMS" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "connectTimeoutMS=0", + "blockConnection": true, + "blockTimeMS": 550 + } + }, + "client": "setupClient" + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 750 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-error.json b/test/discovery_and_monitoring/unified/find-network-error.json new file mode 100644 index 0000000000..651466bfa6 --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-error.json @@ -0,0 +1,234 @@ +{ + "description": "find-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-timeout-error.json b/test/discovery_and_monitoring/unified/find-network-timeout-error.json new file mode 100644 index 0000000000..2bde6daa5d --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-timeout-error.json @@ -0,0 +1,199 @@ +{ + "description": "find-network-timeout-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-timeout-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-timeout-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-shutdown-error.json b/test/discovery_and_monitoring/unified/find-shutdown-error.json new file mode 100644 index 0000000000..624ad352fc --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-shutdown-error.json @@ -0,0 +1,251 @@ +{ + "description": "find-shutdown-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on find", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorFindTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "appName": "shutdownErrorFindTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-command-error.json b/test/discovery_and_monitoring/unified/hello-command-error.json new file mode 100644 index 0000000000..7d6046b76f --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-command-error.json @@ -0,0 +1,376 @@ +{ + "description": "hello-command-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Command error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorHandshakeTest", + "closeConnection": false, + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Command error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 1000, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorCheckTest", + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750, + "errorCode": 91 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-network-error.json b/test/discovery_and_monitoring/unified/hello-network-error.json new file mode 100644 index 0000000000..f44b26a9f9 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-network-error.json @@ -0,0 +1,346 @@ +{ + "description": "hello-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorHandshakeTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorCheckTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-timeout.json b/test/discovery_and_monitoring/unified/hello-timeout.json new file mode 100644 index 0000000000..dfa6b48d66 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-timeout.json @@ -0,0 +1,514 @@ +{ + "description": "hello-timeout", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network timeout on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorHandshakeTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network timeout on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 750, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorCheckTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "Driver extends timeout while streaming", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "extendsTimeoutTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 2000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-network-error.json b/test/discovery_and_monitoring/unified/insert-network-error.json new file mode 100644 index 0000000000..e4ba6684ae --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-network-error.json @@ -0,0 +1,246 @@ +{ + "description": "insert-network-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on insert", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true, + "appName": "insertNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "insertNetworkErrorTest" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-shutdown-error.json b/test/discovery_and_monitoring/unified/insert-shutdown-error.json new file mode 100644 index 0000000000..3c724fa5e4 --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-shutdown-error.json @@ -0,0 +1,250 @@ +{ + "description": "insert-shutdown-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on insert", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorInsertTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "appName": "shutdownErrorInsertTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/minPoolSize-error.json b/test/discovery_and_monitoring/unified/minPoolSize-error.json new file mode 100644 index 0000000000..0234ac9929 --- /dev/null +++ b/test/discovery_and_monitoring/unified/minPoolSize-error.json @@ -0,0 +1,177 @@ +{ + "description": "minPoolSize-error", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "sdam-minPoolSize-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "poolReadyEvent" + ], + "uriOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "sdam-minPoolSize-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": {} + }, + "commandName": "ping" + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + }, + "client": "setupClient" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 2 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring_integration/pool-cleared-error.json b/test/discovery_and_monitoring/unified/pool-cleared-error.json similarity index 60% rename from test/discovery_and_monitoring_integration/pool-cleared-error.json rename to test/discovery_and_monitoring/unified/pool-cleared-error.json index 52456f9e13..9a7dfd901c 100644 --- a/test/discovery_and_monitoring_integration/pool-cleared-error.json +++ b/test/discovery_and_monitoring/unified/pool-cleared-error.json @@ -1,25 +1,72 @@ { - "runOn": [ + "description": "pool-cleared-error", + "schemaVersion": "1.10", + "runOnRequirements": [ { "minServerVersion": "4.9", - "topology": [ + "serverless": "forbid", + "topologies": [ "replicaset", "sharded" ] } ], - "database_name": "sdam-tests", - "collection_name": "pool-cleared-error", - "data": [], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], "tests": [ { "description": "PoolClearedError does not mark server unknown", - "clientOptions": { - "retryWrites": true, - "maxPoolSize": 1, - "appname": "poolClearedErrorTest" - }, "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "pool-cleared-error" + } + } + ] + } + }, { "name": "insertOne", "object": "collection", @@ -30,7 +77,7 @@ } }, { - "name": "configureFailPoint", + "name": "failPoint", "object": "testRunner", "arguments": { "failPoint": { @@ -47,56 +94,53 @@ "closeConnection": true, "appName": "poolClearedErrorTest" } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread3" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread4" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread5" + }, + "client": "setupClient" } }, { - "name": "startThread", + "name": "createEntities", "object": "testRunner", "arguments": { - "name": "thread6" + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + }, + { + "thread": { + "id": "thread2" + } + }, + { + "thread": { + "id": "thread3" + } + }, + { + "thread": { + "id": "thread4" + } + }, + { + "thread": { + "id": "thread5" + } + } + ] } }, { "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread1", + "thread": "thread0", "operation": { "name": "insertOne", "object": "collection", @@ -112,7 +156,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread2", + "thread": "thread1", "operation": { "name": "insertOne", "object": "collection", @@ -128,7 +172,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread3", + "thread": "thread2", "operation": { "name": "insertOne", "object": "collection", @@ -144,7 +188,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread4", + "thread": "thread3", "operation": { "name": "insertOne", "object": "collection", @@ -160,7 +204,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread5", + "thread": "thread4", "operation": { "name": "insertOne", "object": "collection", @@ -176,7 +220,7 @@ "name": "runOnThread", "object": "testRunner", "arguments": { - "name": "thread6", + "thread": "thread5", "operation": { "name": "insertOne", "object": "collection", @@ -192,49 +236,56 @@ "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread1" + "thread": "thread0" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread2" + "thread": "thread1" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread3" + "thread": "thread2" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread4" + "thread": "thread3" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread5" + "thread": "thread4" } }, { "name": "waitForThread", "object": "testRunner", "arguments": { - "name": "thread6" + "thread": "thread5" } }, { "name": "waitForEvent", "object": "testRunner", "arguments": { - "event": "ServerMarkedUnknownEvent", + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, "count": 1 } }, @@ -242,7 +293,10 @@ "name": "waitForEvent", "object": "testRunner", "arguments": { - "event": "PoolClearedEvent", + "client": "client", + "event": { + "poolClearedEvent": {} + }, "count": 1 } }, @@ -259,7 +313,14 @@ "name": "assertEventCount", "object": "testRunner", "arguments": { - "event": "ServerMarkedUnknownEvent", + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, "count": 1 } }, @@ -267,14 +328,19 @@ "name": "assertEventCount", "object": "testRunner", "arguments": { - "event": "PoolClearedEvent", + "client": "client", + "event": { + "poolClearedEvent": {} + }, "count": 1 } } ], - "outcome": { - "collection": { - "data": [ + "outcome": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [ { "_id": 1 }, @@ -301,7 +367,7 @@ } ] } - } + ] } ] } diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json new file mode 100644 index 0000000000..0ad575cc9d --- /dev/null +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -0,0 +1,242 @@ +{ + "description": "rediscover-quickly-after-step-down", + "schemaVersion": "1.10", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "setupClient", + "databaseName": "admin" + } + } + ], + "initialData": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Rediscover quickly after replSetStepDown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "appname": "replSetStepDownTest", + "heartbeatFrequencyMS": 60000, + "serverSelectionTimeoutMS": 5000, + "w": "majority" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test-replSetStepDown" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "recordTopologyDescription", + "object": "testRunner", + "arguments": { + "client": "client", + "id": "topologyDescription" + } + }, + { + "name": "assertTopologyType", + "object": "testRunner", + "arguments": { + "topologyDescription": "topologyDescription", + "topologyType": "ReplicaSetWithPrimary" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetFreeze": 0 + }, + "readPreference": { + "mode": "Secondary" + }, + "commandName": "replSetFreeze" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetStepDown": 30, + "secondaryCatchUpPeriodSecs": 30, + "force": false + }, + "commandName": "replSetStepDown" + } + }, + { + "name": "waitForPrimaryChange", + "object": "testRunner", + "arguments": { + "client": "client", + "priorTopologyDescription": "topologyDescription", + "timeoutMS": 15000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-error.json b/test/discovery_and_monitoring_integration/auth-error.json deleted file mode 100644 index 064d660e32..0000000000 --- a/test/discovery_and_monitoring_integration/auth-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after AuthenticationFailure error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authErrorTest", - "errorCode": 18 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-misc-command-error.json b/test/discovery_and_monitoring_integration/auth-misc-command-error.json deleted file mode 100644 index 70dd59251d..0000000000 --- a/test/discovery_and_monitoring_integration/auth-misc-command-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-misc-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after misc command error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authMiscErrorTest", - "errorCode": 1 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authMiscErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-misc-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-network-error.json b/test/discovery_and_monitoring_integration/auth-network-error.json deleted file mode 100644 index a75a398c5e..0000000000 --- a/test/discovery_and_monitoring_integration/auth-network-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "closeConnection": true, - "appName": "authNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authNetworkErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json b/test/discovery_and_monitoring_integration/auth-network-timeout-error.json deleted file mode 100644 index a4ee7d9eff..0000000000 --- a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-network-timeout-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network timeout error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "blockConnection": true, - "blockTimeMS": 500, - "appName": "authNetworkTimeoutErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authNetworkTimeoutErrorTest", - "connectTimeoutMS": 250, - "socketTimeoutMS": 250 - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-network-timeout-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-shutdown-error.json b/test/discovery_and_monitoring_integration/auth-shutdown-error.json deleted file mode 100644 index 2dab90e1c5..0000000000 --- a/test/discovery_and_monitoring_integration/auth-shutdown-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-shutdown-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after shutdown error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authShutdownErrorTest", - "errorCode": 91 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authShutdownErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-shutdown-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/cancel-server-check.json b/test/discovery_and_monitoring_integration/cancel-server-check.json deleted file mode 100644 index 9586350959..0000000000 --- a/test/discovery_and_monitoring_integration/cancel-server-check.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.2", - "topology": [ - "sharded" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "cancel-server-check", - "data": [], - "tests": [ - { - "description": "Cancel server check", - "clientOptions": { - "retryWrites": true, - "heartbeatFrequencyMS": 10000, - "serverSelectionTimeoutMS": 5000, - "appname": "cancelServerCheckTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/connectTimeoutMS.json b/test/discovery_and_monitoring_integration/connectTimeoutMS.json deleted file mode 100644 index 36a6dc4507..0000000000 --- a/test/discovery_and_monitoring_integration/connectTimeoutMS.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "connectTimeoutMS", - "data": [], - "tests": [ - { - "description": "connectTimeoutMS=0", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 0, - "heartbeatFrequencyMS": 500, - "appname": "connectTimeoutMS=0" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "connectTimeoutMS=0", - "blockConnection": true, - "blockTimeMS": 550 - } - } - } - }, - { - "name": "wait", - "object": "testRunner", - "arguments": { - "ms": 750 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "connectTimeoutMS", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "connectTimeoutMS", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-network-error.json b/test/discovery_and_monitoring_integration/find-network-error.json deleted file mode 100644 index 4db2634cd6..0000000000 --- a/test/discovery_and_monitoring_integration/find-network-error.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error on find", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true, - "appName": "findNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "appname": "findNetworkErrorTest" - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "find-network-error" - }, - "command_name": "find", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "find-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-network-timeout-error.json b/test/discovery_and_monitoring_integration/find-network-timeout-error.json deleted file mode 100644 index c4e10b3a76..0000000000 --- a/test/discovery_and_monitoring_integration/find-network-timeout-error.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-network-timeout-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Ignore network timeout error on find", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 500, - "appName": "findNetworkTimeoutErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "appname": "findNetworkTimeoutErrorTest", - "socketTimeoutMS": 250 - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "find-network-timeout-error" - }, - "command_name": "find", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "find-network-timeout-error", - "documents": [ - { - "_id": 3 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-shutdown-error.json b/test/discovery_and_monitoring_integration/find-shutdown-error.json deleted file mode 100644 index 65de8398b1..0000000000 --- a/test/discovery_and_monitoring_integration/find-shutdown-error.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-shutdown-error", - "data": [], - "tests": [ - { - "description": "Concurrent shutdown error on find", - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "heartbeatFrequencyMS": 500, - "appname": "shutdownErrorFindTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "appName": "shutdownErrorFindTest", - "errorCode": 91, - "blockConnection": true, - "blockTimeMS": 500 - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-command-error.json b/test/discovery_and_monitoring_integration/hello-command-error.json deleted file mode 100644 index d3bccd3900..0000000000 --- a/test/discovery_and_monitoring_integration/hello-command-error.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-command-error", - "data": [], - "tests": [ - { - "description": "Command error on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "commandErrorHandshakeTest", - "closeConnection": false, - "errorCode": 91 - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "commandErrorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Command error on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 1000, - "heartbeatFrequencyMS": 500, - "appname": "commandErrorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "commandErrorCheckTest", - "closeConnection": false, - "blockConnection": true, - "blockTimeMS": 750, - "errorCode": 91 - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-network-error.json b/test/discovery_and_monitoring_integration/hello-network-error.json deleted file mode 100644 index f9761d7556..0000000000 --- a/test/discovery_and_monitoring_integration/hello-network-error.json +++ /dev/null @@ -1,219 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-network-error", - "data": [], - "tests": [ - { - "description": "Network error on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "networkErrorHandshakeTest", - "closeConnection": true - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "networkErrorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Network error on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "networkErrorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "networkErrorCheckTest", - "closeConnection": true - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-timeout.json b/test/discovery_and_monitoring_integration/hello-timeout.json deleted file mode 100644 index 004f8f449d..0000000000 --- a/test/discovery_and_monitoring_integration/hello-timeout.json +++ /dev/null @@ -1,337 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-timeout", - "data": [], - "tests": [ - { - "description": "Network timeout on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "timeoutMonitorHandshakeTest", - "blockConnection": true, - "blockTimeMS": 1000 - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "timeoutMonitorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Network timeout on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 750, - "heartbeatFrequencyMS": 500, - "appname": "timeoutMonitorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "timeoutMonitorCheckTest", - "blockConnection": true, - "blockTimeMS": 1000 - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "Driver extends timeout while streaming", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "extendsTimeoutTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "wait", - "object": "testRunner", - "arguments": { - "ms": 2000 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/insert-network-error.json b/test/discovery_and_monitoring_integration/insert-network-error.json deleted file mode 100644 index fa8bb253e1..0000000000 --- a/test/discovery_and_monitoring_integration/insert-network-error.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "insert-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error on insert", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true, - "appName": "insertNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "insertNetworkErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "insert-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "insert-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/insert-shutdown-error.json b/test/discovery_and_monitoring_integration/insert-shutdown-error.json deleted file mode 100644 index edde149a91..0000000000 --- a/test/discovery_and_monitoring_integration/insert-shutdown-error.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "insert-shutdown-error", - "data": [], - "tests": [ - { - "description": "Concurrent shutdown error on insert", - "clientOptions": { - "retryWrites": false, - "heartbeatFrequencyMS": 500, - "appname": "shutdownErrorInsertTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "appName": "shutdownErrorInsertTest", - "errorCode": 91, - "blockConnection": true, - "blockTimeMS": 500 - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "error": true - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - }, - "error": true - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/minPoolSize-error.json b/test/discovery_and_monitoring_integration/minPoolSize-error.json deleted file mode 100644 index 9f8e4f6f8b..0000000000 --- a/test/discovery_and_monitoring_integration/minPoolSize-error.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "sdam-minPoolSize-error", - "data": [], - "tests": [ - { - "description": "Network error on minPoolSize background creation", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "skip": 3 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "SDAMminPoolSizeError", - "closeConnection": true - } - }, - "clientOptions": { - "heartbeatFrequencyMS": 10000, - "appname": "SDAMminPoolSizeError", - "minPoolSize": 10, - "serverSelectionTimeoutMS": 1000, - "directConnection": true - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolReadyEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": {} - } - }, - "error": true - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "off" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": 1 - } - }, - "error": false - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolReadyEvent", - "count": 2 - } - } - ] - } - ] -} diff --git a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json deleted file mode 100644 index 41fbdc695c..0000000000 --- a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json +++ /dev/null @@ -1,165 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "topology": [ - "replicaset" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "test-replSetStepDown", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Rediscover quickly after replSetStepDown", - "clientOptions": { - "appname": "replSetStepDownTest", - "heartbeatFrequencyMS": 60000, - "serverSelectionTimeoutMS": 5000, - "w": "majority" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "recordPrimary", - "object": "testRunner" - }, - { - "name": "runAdminCommand", - "object": "testRunner", - "command_name": "replSetFreeze", - "arguments": { - "command": { - "replSetFreeze": 0 - }, - "readPreference": { - "mode": "Secondary" - } - } - }, - { - "name": "runAdminCommand", - "object": "testRunner", - "command_name": "replSetStepDown", - "arguments": { - "command": { - "replSetStepDown": 30, - "secondaryCatchUpPeriodSecs": 30, - "force": false - } - } - }, - { - "name": "waitForPrimaryChange", - "object": "testRunner", - "arguments": { - "timeoutMS": 15000 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test-replSetStepDown", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test-replSetStepDown", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 39979c2d10..9af8185ab5 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -17,16 +17,15 @@ import os import sys import threading -import time sys.path[0:0] = [""] from test import IntegrationTest, unittest from test.pymongo_mocks import DummyMonitor +from test.unified_format import generate_test_classes from test.utils import ( CMAPListener, HeartbeatEventListener, - TestCreator, assertion_context, client_context, get_pool, @@ -35,7 +34,6 @@ single_client, wait_until, ) -from test.utils_spec_runner import SpecRunner, SpecRunnerThread from bson import Timestamp, json_util from pymongo import common, monitoring @@ -55,7 +53,7 @@ from pymongo.uri_parser import parse_uri # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") +SDAM_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") def create_mock_topology(uri, monitor_class=DummyMonitor): @@ -216,8 +214,11 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(SDAM_PATH): dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue for filename in filenames: if os.path.splitext(filename)[1] != ".json": @@ -340,107 +341,8 @@ def test_pool_unpause(self): listener.wait_for_event(monitoring.PoolReadyEvent, 1) -class TestIntegration(SpecRunner): - # Location of JSON test specifications. - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring_integration" - ) - - def _event_count(self, event): - if event == "ServerMarkedUnknownEvent": - - def marked_unknown(e): - return ( - isinstance(e, monitoring.ServerDescriptionChangedEvent) - and not e.new_description.is_server_type_known - ) - - assert self.server_listener is not None - return len(self.server_listener.matching(marked_unknown)) - # Only support CMAP events for now. - self.assertTrue(event.startswith("Pool") or event.startswith("Conn")) - event_type = getattr(monitoring, event) - assert self.pool_listener is not None - return self.pool_listener.event_count(event_type) - - def assert_event_count(self, event, count): - """Run the assertEventCount test operation. - - Assert the given event was published exactly `count` times. - """ - self.assertEqual(self._event_count(event), count, "expected %s not %r" % (count, event)) - - def wait_for_event(self, event, count): - """Run the waitForEvent test operation. - - Wait for a number of events to be published, or fail. - """ - wait_until( - lambda: self._event_count(event) >= count, "find %s %s event(s)" % (count, event) - ) - - def configure_fail_point(self, fail_point): - """Run the configureFailPoint test operation.""" - self.set_fail_point(fail_point) - self.addCleanup( - self.set_fail_point, - {"configureFailPoint": fail_point["configureFailPoint"], "mode": "off"}, - ) - - def run_admin_command(self, command, **kwargs): - """Run the runAdminCommand test operation.""" - self.client.admin.command(command, **kwargs) - - def record_primary(self): - """Run the recordPrimary test operation.""" - self._previous_primary = self.scenario_client.primary - - def wait_for_primary_change(self, timeout): - """Run the waitForPrimaryChange test operation.""" - - def primary_changed(): - primary = self.scenario_client.primary - if primary is None: - return False - return primary != self._previous_primary - - wait_until(primary_changed, "change primary", timeout=timeout) - - def wait(self, ms): - """Run the "wait" test operation.""" - time.sleep(ms / 1000.0) - - def start_thread(self, name): - """Run the 'startThread' thread operation.""" - thread = SpecRunnerThread(name) - thread.start() - self.targets[name] = thread - - def run_on_thread(self, sessions, collection, name, operation): - """Run the 'runOnThread' operation.""" - thread = self.targets[name] - thread.schedule(lambda: self._run_op(sessions, collection, operation, False)) - - def wait_for_thread(self, name): - """Run the 'waitForThread' operation.""" - thread = self.targets[name] - thread.stop() - thread.join(60) - if thread.exc: - raise thread.exc - self.assertFalse(thread.is_alive(), "Thread %s is still running" % (name,)) - - -def create_spec_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_spec_test, TestIntegration, TestIntegration.TEST_PATH) -test_creator.create_tests() +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) if __name__ == "__main__": diff --git a/test/unified-test-format/invalid/entity-thread-additionalProperties.json b/test/unified-test-format/invalid/entity-thread-additionalProperties.json new file mode 100644 index 0000000000..b296719f13 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-thread-additionalProperties", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": "thread0", + "foo": "bar" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-required.json b/test/unified-test-format/invalid/entity-thread-id-required.json new file mode 100644 index 0000000000..3b197e3d6b --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-thread-id-required", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-type.json b/test/unified-test-format/invalid/entity-thread-id-type.json new file mode 100644 index 0000000000..8f281ef6f4 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-thread-id-type", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..1c6ec460b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json new file mode 100644 index 0000000000..58f686739a --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "foo": "bar" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json new file mode 100644 index 0000000000..1b4a7e2e70 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": "not a server type" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json new file mode 100644 index 0000000000..c7ea9cc9be --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": 12 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index ee64915202..dbf4ef988f 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -50,9 +50,11 @@ rs_or_single_client, single_client, snake_to_camel, + wait_until, ) +from test.utils_spec_runner import SpecRunnerThread from test.version import Version -from typing import Any, List +from typing import Any, Dict, List, Mapping, Optional import pymongo from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util @@ -94,11 +96,24 @@ PoolClosedEvent, PoolCreatedEvent, PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerListener, + ServerOpeningEvent, + TopologyEvent, + _CommandEvent, + _ConnectionEvent, + _PoolEvent, + _ServerEvent, ) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult from pymongo.server_api import ServerApi +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern JSON_OPTS = json_util.JSONOptions(tz_aware=False) @@ -268,7 +283,7 @@ def close(self): self.client = None -class EventListenerUtil(CMAPListener, CommandListener): +class EventListenerUtil(CMAPListener, CommandListener, ServerListener): def __init__( self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map ): @@ -292,9 +307,14 @@ def __init__( super(EventListenerUtil, self).__init__() def get_events(self, event_type): + assert event_type in ("command", "cmap", "sdam", "all"), event_type + if event_type == "all": + return list(self.events) if event_type == "command": - return [e for e in self.events if "Command" in type(e).__name__] - return [e for e in self.events if "Command" not in type(e).__name__] + return [e for e in self.events if isinstance(e, _CommandEvent)] + if event_type == "cmap": + return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] + return [e for e in self.events if isinstance(e, (_ServerEvent, TopologyEvent))] def add_event(self, event): event_name = type(event).__name__.lower() @@ -332,16 +352,25 @@ def succeeded(self, event): def failed(self, event): self._command_event(event) + def opened(self, event: ServerOpeningEvent) -> None: + self.add_event(event) + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: ServerClosedEvent) -> None: + self.add_event(event) + class EntityMapUtil(object): """Utility class that implements an entity map as per the unified test format specification.""" def __init__(self, test_class): - self._entities = {} - self._listeners = {} - self._session_lsids = {} - self.test = test_class + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class def __contains__(self, item): return item in self._entities @@ -484,6 +513,12 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: opts.get("kms_tls_options", KMS_TLS_OPTS), ) return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerThread(name) + thread.start() + self[name] = thread + return self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) @@ -491,7 +526,7 @@ def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: self._create_entity(spec, uri=uri) - def get_listener_for_client(self, client_name): + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: client = self[client_name] if not isinstance(client, MongoClient): self.test.fail( @@ -710,6 +745,18 @@ def assertHasServiceId(self, spec, actual): else: self.test.assertIsNone(actual.service_id) + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + if "type" in spec: + self.test.assertEqual(actual.server_type_name, spec["type"]) + if "error" in spec: + self.test.process_error(actual.error, spec["error"]) + if "minWireVersion" in spec: + self.test.assertEqual(actual.min_wire_version, spec["minWireVersion"]) + if "maxWireVersion" in spec: + self.test.assertEqual(actual.max_wire_version, spec["maxWireVersion"]) + if "topologyVersion" in spec: + self.test.assertEqual(actual.topology_version, spec["topologyVersion"]) + def match_event(self, event_type, expectation, actual): name, spec = next(iter(expectation.items())) @@ -770,8 +817,16 @@ def match_event(self, event_type, expectation, actual): self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) elif name == "connectionCheckedInEvent": self.test.assertIsInstance(actual, ConnectionCheckedInEvent) + elif name == "serverDescriptionChangedEvent": + self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) + if "previousDescription" in spec: + self.match_server_description( + actual.previous_description, spec["previousDescription"] + ) + if "newDescription" in spec: + self.match_server_description(actual.new_description, spec["newDescription"]) else: - self.test.fail("Unsupported event type %s" % (name,)) + raise Exception("Unsupported event type %s" % (name,)) def coerce_result(opname, result): @@ -805,7 +860,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.9") + SCHEMA_VERSION = Version.from_string("1.10") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -1339,6 +1394,90 @@ def _testOperation_assertNumberConnectionsCheckedOut(self, spec): pool = get_pool(client) self.assertEqual(spec["connections"], pool.active_sockets) + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event("all", event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual( + self._event_count(client, event), count, "expected %s not %r" % (count, event) + ) + + def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + wait_until( + lambda: self._event_count(client, event) >= count, + "find %s %s event(s)" % (count, event), + ) + + def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + time.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + def _testOperation_waitForPrimaryChange(self, spec): + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[ServerDescription]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0] + return None + + old_primary = get_primary(old_description) + + def primary_changed(): + primary = client.primary + if primary is None: + return False + return primary != old_primary + + wait_until(primary_changed, "change primary", timeout=timeout) + + def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.schedule(lambda: self.run_entity_operation(spec["operation"])) + + def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.stop() + thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread %s is still running" % (spec["thread"],)) + def _testOperation_loop(self, spec): failure_key = spec.get("storeFailuresAsEntity") error_key = spec.get("storeErrorsAsEntity") @@ -1398,14 +1537,10 @@ def check_events(self, spec): for event_spec in spec: client_name = event_spec["client"] events = event_spec["events"] - # Valid types: 'command', 'cmap' event_type = event_spec.get("eventType", "command") ignore_extra_events = event_spec.get("ignoreExtraEvents", False) server_connection_id = event_spec.get("serverConnectionId") has_server_connection_id = event_spec.get("hasServerConnectionId", False) - - assert event_type in ("command", "cmap") - listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) if ignore_extra_events: From 92a6fa79b66ae1f91691c7540d3f40331195278f Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Wed, 3 Aug 2022 16:53:50 -0700 Subject: [PATCH 0745/2111] PYTHON-3376/PYTHON-3378 Update FAQ about OverflowError when decoding out of range datetimes (#1025) --- doc/examples/datetimes.rst | 52 ++++++++++++++++++++++++++------------ doc/faq.rst | 41 +++++++++++++++++++++++++++--- 2 files changed, 73 insertions(+), 20 deletions(-) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index b9c509e075..f965b9f58c 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -125,32 +125,52 @@ To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO`, :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP`. :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME` is the default -option and has the behavior of raising an exception upon attempting to -decode an out-of-range date. +option and has the behavior of raising an :class:`~builtin.OverflowError` upon +attempting to decode an out-of-range date. :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS` will only return :class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the -represented datetime is in- or out-of-range. +represented datetime is in- or out-of-range: + +.. doctest:: + + >>> from datetime import datetime + >>> from bson import encode, decode + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import CodecOptions, DatetimeConversionOpts + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + >>> decode(x, codec_options=codec_ms) + {'x': DatetimeMS(0)} + :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO` will return :class:`~datetime.datetime` if the underlying UTC datetime is within range, or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime -cannot be represented using the builtin Python :class:`~datetime.datetime`. +cannot be represented using the builtin Python :class:`~datetime.datetime`: + +.. doctest:: + + >>> x = encode({"x": datetime(1970, 1, 1)}) + >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> decode(x, codec_options=codec_auto) + {'x': datetime.datetime(1970, 1, 1, 0, 0)} + >>> decode(y, codec_options=codec_auto) + {'x': DatetimeMS(-4611686018427387904)} + :attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP` will clamp resulting :class:`~datetime.datetime` objects to be within :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` -(trimmed to `999000` microseconds). - -An example of encoding and decoding using `DATETIME_MS` is as follows: +(trimmed to `999000` microseconds): .. doctest:: - >>> from datetime import datetime - >>> from bson import encode, decode - >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import CodecOptions,DatetimeConversionOpts - >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> x - b'\x10\x00\x00\x00\tx\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - >>> decode(x, codec_options=CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS)) - {'x': DatetimeMS(0)} + + >>> x = encode({"x": DatetimeMS(2**62)}) + >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP) + >>> decode(x, codec_options=codec_clamp) + {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} + >>> decode(y, codec_options=codec_clamp) + {'x': datetime.datetime(1, 1, 1, 0, 0)} :class:`~bson.datetime_ms.DatetimeMS` objects have support for rich comparison methods against other instances of :class:`~bson.datetime_ms.DatetimeMS`. diff --git a/doc/faq.rst b/doc/faq.rst index ca83f5de4c..5eb39c4276 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -264,7 +264,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversionOpts.DATETIME) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with @@ -489,9 +489,42 @@ limited to years between :data:`datetime.MINYEAR` (usually 1) and driver) can store BSON datetimes with year values far outside those supported by :class:`datetime.datetime`. -There are a few ways to work around this issue. One option is to filter -out documents with values outside of the range supported by -:class:`datetime.datetime`:: +There are a few ways to work around this issue. Starting with PyMongo 4.3, +:func:`bson.decode` can decode BSON datetimes in one of four ways, and can +be specified using the ``datetime_conversion`` parameter of +:class:`~bson.codec_options.CodecOptions`. + +The default option is +:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME`, which will +attempt to decode as a :class:`datetime.datetime`, allowing +:class:`~builtin.OverflowError` to occur upon out-of-range dates. +:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME_AUTO` alters +this behavior to instead return :class:`~bson.datetime_ms.DatetimeMS` when +representations are out-of-range, while returning :class:`~datetime.datetime` +objects as before: + +.. doctest:: + + >>> from datetime import datetime + >>> from bson.datetime_ms import DatetimeMS + >>> from bson.codec_options import DatetimeConversionOpts + >>> from pymongo import MongoClient + >>> client = MongoClient(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> client.db.collection.insert_one({"x": datetime(1970, 1, 1)}) + + >>> client.db.collection.insert_one({"x": DatetimeMS(2**62)}) + + >>> for x in client.db.collection.find(): + ... print(x) + {'_id': ObjectId('...'), 'x': datetime.datetime(1970, 1, 1, 0, 0)} + {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} + +For other options, please refer to +:class:`~bson.codec_options.DatetimeConversionOpts`. + +Another option that does not involve setting `datetime_conversion` is to to +filter out documents values outside of the range supported by +:class:`~datetime.datetime`: >>> from datetime import datetime >>> coll = client.test.dates From 46673c370521330f2705ae83c2b74db2a34fe7e5 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 4 Aug 2022 12:53:57 -0700 Subject: [PATCH 0746/2111] PYTHON-3379 Refactored DatetimeConversionOpts to DatetimeConversion (#1031) --- bson/__init__.py | 4 ++-- bson/codec_options.py | 6 +++--- bson/codec_options.pyi | 2 +- bson/datetime_ms.py | 20 ++++++++------------ bson/json_util.py | 6 +++--- doc/examples/datetimes.rst | 26 +++++++++++++------------- doc/faq.rst | 12 ++++++------ pymongo/common.py | 16 ++++++++-------- test/test_bson.py | 22 +++++++++------------- test/test_client.py | 10 ++++------ test/test_json_util.py | 4 ++-- 11 files changed, 59 insertions(+), 69 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 4283faf7dc..b43c686de8 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -99,7 +99,7 @@ from bson.codec_options import ( DEFAULT_CODEC_OPTIONS, CodecOptions, - DatetimeConversionOpts, + DatetimeConversion, _DocumentType, _raw_document_class, ) @@ -194,7 +194,7 @@ "is_valid", "BSON", "has_c", - "DatetimeConversionOpts", + "DatetimeConversion", "DatetimeMS", ] diff --git a/bson/codec_options.py b/bson/codec_options.py index bceab5e003..efba8af78d 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -199,7 +199,7 @@ def __eq__(self, other: Any) -> Any: ) -class DatetimeConversionOpts(enum.IntEnum): +class DatetimeConversion(enum.IntEnum): """Options for decoding BSON datetimes.""" DATETIME = 1 @@ -241,7 +241,7 @@ class _BaseCodecOptions(NamedTuple): unicode_decode_error_handler: str tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry - datetime_conversion: Optional[DatetimeConversionOpts] + datetime_conversion: Optional[DatetimeConversion] class CodecOptions(_BaseCodecOptions): @@ -335,7 +335,7 @@ def __new__( unicode_decode_error_handler: str = "strict", tzinfo: Optional[datetime.tzinfo] = None, type_registry: Optional[TypeRegistry] = None, - datetime_conversion: Optional[DatetimeConversionOpts] = DatetimeConversionOpts.DATETIME, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, ) -> "CodecOptions": doc_class = document_class or dict # issubclass can raise TypeError for generic aliases like SON[str, Any]. diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi index 260407524f..2424516f08 100644 --- a/bson/codec_options.pyi +++ b/bson/codec_options.pyi @@ -55,7 +55,7 @@ class TypeRegistry: _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) -class DatetimeConversionOpts(int, enum.Enum): +class DatetimeConversion(int, enum.Enum): DATETIME = ... DATETIME_CLAMP = ... DATETIME_MS = ... diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 925087a5aa..c64a0cce87 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -22,11 +22,7 @@ import functools from typing import Any, Union, cast -from bson.codec_options import ( - DEFAULT_CODEC_OPTIONS, - CodecOptions, - DatetimeConversionOpts, -) +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion from bson.tz_util import utc EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) @@ -127,14 +123,14 @@ def _max_datetime_ms(tz=datetime.timezone.utc): def _millis_to_datetime(millis: int, opts: CodecOptions) -> Union[datetime.datetime, DatetimeMS]: """Convert milliseconds since epoch UTC to datetime.""" if ( - opts.datetime_conversion == DatetimeConversionOpts.DATETIME - or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP - or opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO + opts.datetime_conversion == DatetimeConversion.DATETIME + or opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO ): tz = opts.tzinfo or datetime.timezone.utc - if opts.datetime_conversion == DatetimeConversionOpts.DATETIME_CLAMP: + if opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP: millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) - elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_AUTO: + elif opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO: if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): return DatetimeMS(millis) @@ -149,10 +145,10 @@ def _millis_to_datetime(millis: int, opts: CodecOptions) -> Union[datetime.datet return dt else: return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) - elif opts.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + elif opts.datetime_conversion == DatetimeConversion.DATETIME_MS: return DatetimeMS(millis) else: - raise ValueError("datetime_conversion must be an element of DatetimeConversionOpts") + raise ValueError("datetime_conversion must be an element of DatetimeConversion") def _datetime_to_millis(dtm: datetime.datetime) -> int: diff --git a/bson/json_util.py b/bson/json_util.py index 0b5494e85c..517adff4e0 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -96,7 +96,7 @@ from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.codec_options import CodecOptions, DatetimeConversion from bson.datetime_ms import ( EPOCH_AWARE, DatetimeMS, @@ -662,12 +662,12 @@ def _parse_canonical_datetime( if json_options.tz_aware: if json_options.tzinfo: aware = aware.astimezone(json_options.tzinfo) - if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: return DatetimeMS(aware) return aware else: aware_tzinfo_none = aware.replace(tzinfo=None) - if json_options.datetime_conversion == DatetimeConversionOpts.DATETIME_MS: + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: return DatetimeMS(aware_tzinfo_none) return aware_tzinfo_none return _millis_to_datetime(int(dtm), json_options) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index f965b9f58c..3b30000ffc 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -119,15 +119,15 @@ of milliseconds from the Unix epoch. To deal with this, we can use the To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, :class:`~bson.codec_options.CodecOptions` should have its ``datetime_conversion`` parameter set to one of the options available in -:class:`bson.datetime_ms.DatetimeConversionOpts`. These include -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME`, -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS`, -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO`, -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP`. -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME` is the default +:class:`bson.datetime_ms.DatetimeConversion`. These include +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO`, +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP`. +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME` is the default option and has the behavior of raising an :class:`~builtin.OverflowError` upon attempting to decode an out-of-range date. -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_MS` will only return +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS` will only return :class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the represented datetime is in- or out-of-range: @@ -136,13 +136,13 @@ represented datetime is in- or out-of-range: >>> from datetime import datetime >>> from bson import encode, decode >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import CodecOptions, DatetimeConversionOpts + >>> from bson.codec_options import CodecOptions, DatetimeConversion >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) >>> decode(x, codec_options=codec_ms) {'x': DatetimeMS(0)} -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_AUTO` will return +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO` will return :class:`~datetime.datetime` if the underlying UTC datetime is within range, or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime cannot be represented using the builtin Python :class:`~datetime.datetime`: @@ -151,13 +151,13 @@ cannot be represented using the builtin Python :class:`~datetime.datetime`: >>> x = encode({"x": datetime(1970, 1, 1)}) >>> y = encode({"x": DatetimeMS(-2**62)}) - >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) >>> decode(x, codec_options=codec_auto) {'x': datetime.datetime(1970, 1, 1, 0, 0)} >>> decode(y, codec_options=codec_auto) {'x': DatetimeMS(-4611686018427387904)} -:attr:`~bson.datetime_ms.DatetimeConversionOpts.DATETIME_CLAMP` will clamp +:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP` will clamp resulting :class:`~datetime.datetime` objects to be within :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` (trimmed to `999000` microseconds): @@ -166,7 +166,7 @@ resulting :class:`~datetime.datetime` objects to be within >>> x = encode({"x": DatetimeMS(2**62)}) >>> y = encode({"x": DatetimeMS(-2**62)}) - >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP) + >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP) >>> decode(x, codec_options=codec_clamp) {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} >>> decode(y, codec_options=codec_clamp) diff --git a/doc/faq.rst b/doc/faq.rst index 5eb39c4276..c48dd316e5 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -264,7 +264,7 @@ collection, configured to use :class:`~bson.son.SON` instead of dict: >>> from bson import CodecOptions, SON >>> opts = CodecOptions(document_class=SON) >>> opts - CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversionOpts.DATETIME) + CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversion.DATETIME) >>> collection_son = collection.with_options(codec_options=opts) Now, documents and subdocuments in query results are represented with @@ -495,10 +495,10 @@ be specified using the ``datetime_conversion`` parameter of :class:`~bson.codec_options.CodecOptions`. The default option is -:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME`, which will +:attr:`~bson.codec_options.DatetimeConversion.DATETIME`, which will attempt to decode as a :class:`datetime.datetime`, allowing :class:`~builtin.OverflowError` to occur upon out-of-range dates. -:attr:`~bson.codec_options.DatetimeConversionOpts.DATETIME_AUTO` alters +:attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` alters this behavior to instead return :class:`~bson.datetime_ms.DatetimeMS` when representations are out-of-range, while returning :class:`~datetime.datetime` objects as before: @@ -507,9 +507,9 @@ objects as before: >>> from datetime import datetime >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import DatetimeConversionOpts + >>> from bson.codec_options import DatetimeConversion >>> from pymongo import MongoClient - >>> client = MongoClient(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + >>> client = MongoClient(datetime_conversion=DatetimeConversion.DATETIME_AUTO) >>> client.db.collection.insert_one({"x": datetime(1970, 1, 1)}) >>> client.db.collection.insert_one({"x": DatetimeMS(2**62)}) @@ -520,7 +520,7 @@ objects as before: {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} For other options, please refer to -:class:`~bson.codec_options.DatetimeConversionOpts`. +:class:`~bson.codec_options.DatetimeConversion`. Another option that does not involve setting `datetime_conversion` is to to filter out documents values outside of the range supported by diff --git a/pymongo/common.py b/pymongo/common.py index 319b07193c..add70cfb5f 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -36,7 +36,7 @@ from bson import SON from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions, DatetimeConversionOpts, TypeRegistry +from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry from bson.raw_bson import RawBSONDocument from pymongo.auth import MECHANISMS from pymongo.compression_support import ( @@ -620,19 +620,19 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A return value -def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversionOpts]: - """Validate a DatetimeConversionOpts string.""" +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversion]: + """Validate a DatetimeConversion string.""" if value is None: - return DatetimeConversionOpts.DATETIME + return DatetimeConversion.DATETIME if isinstance(value, str): if value.isdigit(): - return DatetimeConversionOpts(int(value)) - return DatetimeConversionOpts[value] + return DatetimeConversion(int(value)) + return DatetimeConversion[value] elif isinstance(value, int): - return DatetimeConversionOpts(value) + return DatetimeConversion(value) - raise TypeError("%s must be a str or int representing DatetimeConversionOpts" % (option,)) + raise TypeError("%s must be a str or int representing DatetimeConversion" % (option,)) # Dictionary where keys are the names of public URI options, and values diff --git a/test/test_bson.py b/test/test_bson.py index 7fe0c168c6..e3c4a3a028 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -50,7 +50,7 @@ ) from bson.binary import Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.codec_options import CodecOptions, DatetimeConversion from bson.dbref import DBRef from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 @@ -981,7 +981,7 @@ def test_codec_options_repr(self): "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " "fallback_encoder=None), " - "datetime_conversion=DatetimeConversionOpts.DATETIME)" + "datetime_conversion=DatetimeConversion.DATETIME)" ) self.assertEqual(r, repr(CodecOptions())) @@ -1189,14 +1189,14 @@ def test_class_conversions(self): self.assertNotEqual(type(dtr1), type(dec1["x"])) # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS - opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) enc1 = encode({"x": dtr1}) dec1 = decode(enc1, opts1) self.assertEqual(type(dtr1), type(dec1["x"])) self.assertEqual(dtr1, dec1["x"]) # Expect: datetime => DatetimeMS - opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_MS) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) enc1 = encode({"x": dt1}) dec1 = decode(enc1, opts1) @@ -1206,7 +1206,7 @@ def test_class_conversions(self): def test_clamping(self): # Test clamping from below and above. opts1 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=datetime.timezone.utc, ) @@ -1225,9 +1225,7 @@ def test_clamping(self): def test_tz_clamping(self): # Naive clamping to local tz. - opts1 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=False - ) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) dec_below = decode(below, opts1) @@ -1241,9 +1239,7 @@ def test_tz_clamping(self): ) # Aware clamping. - opts2 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_CLAMP, tz_aware=True - ) + opts2 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) dec_below = decode(below, opts2) self.assertEqual( @@ -1259,7 +1255,7 @@ def test_tz_clamping(self): def test_datetime_auto(self): # Naive auto, in range. - opts1 = CodecOptions(datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO) + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) dec_inr = decode(inr) self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) @@ -1281,7 +1277,7 @@ def test_datetime_auto(self): # Aware auto, in range. opts2 = CodecOptions( - datetime_conversion=DatetimeConversionOpts.DATETIME_AUTO, + datetime_conversion=DatetimeConversion.DATETIME_AUTO, tz_aware=True, tzinfo=datetime.timezone.utc, ) diff --git a/test/test_client.py b/test/test_client.py index f520043ecf..7e7e14c0e5 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -67,7 +67,7 @@ from bson import encode from bson.codec_options import ( CodecOptions, - DatetimeConversionOpts, + DatetimeConversion, TypeEncoder, TypeRegistry, ) @@ -412,17 +412,15 @@ def test_uri_codec_options(self): ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual( - c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] ) # Change the passed datetime_conversion to a number and re-assert. - uri = uri.replace( - datetime_conversion, f"{int(DatetimeConversionOpts[datetime_conversion])}" - ) + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") c = MongoClient(uri, connect=False) self.assertEqual( - c.codec_options.datetime_conversion, DatetimeConversionOpts[datetime_conversion] + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] ) def test_uri_option_precedence(self): diff --git a/test/test_json_util.py b/test/test_json_util.py index 576746e865..08ee63618f 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -21,7 +21,7 @@ import uuid from typing import Any, List, MutableMapping -from bson.codec_options import CodecOptions, DatetimeConversionOpts +from bson.codec_options import CodecOptions, DatetimeConversion sys.path[0:0] = [""] @@ -295,7 +295,7 @@ def test_datetime_ms(self): dat_max = {"x": DatetimeMS(_max_datetime_ms()).as_datetime(CodecOptions(tz_aware=False))} opts = JSONOptions( datetime_representation=DatetimeRepresentation.ISO8601, - datetime_conversion=DatetimeConversionOpts.DATETIME_MS, + datetime_conversion=DatetimeConversion.DATETIME_MS, ) self.assertEqual( From 3204290e93594f8ffe537ca5e7ed071d4c13056e Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 4 Aug 2022 16:58:56 -0700 Subject: [PATCH 0747/2111] PYTHON-2484 Added lock sanitization for MongoClient and ObjectId (#985) --- bson/__init__.py | 14 +++++ pymongo/cursor.py | 4 +- pymongo/lock.py | 39 ++++++++++++++ pymongo/mongo_client.py | 54 +++++++++++++++---- pymongo/monitor.py | 4 +- pymongo/ocsp_cache.py | 5 +- pymongo/periodic_executor.py | 5 +- pymongo/pool.py | 3 +- pymongo/topology.py | 17 +++--- test/test_fork.py | 100 +++++++++++++++++++++++++++++++++++ test/utils.py | 3 +- 11 files changed, 219 insertions(+), 29 deletions(-) create mode 100644 pymongo/lock.py create mode 100644 test/test_fork.py diff --git a/bson/__init__.py b/bson/__init__.py index b43c686de8..dc2e29238a 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -56,6 +56,7 @@ import datetime import itertools +import os import re import struct import sys @@ -1336,3 +1337,16 @@ def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OP def has_c() -> bool: """Is the C extension installed?""" return _USE_C + + +def _after_fork(): + """Releases the ObjectID lock child.""" + if ObjectId._inc_lock.locked(): + ObjectId._inc_lock.release() + + +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 2a85f1d82a..658c4276ef 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -14,7 +14,6 @@ """Cursor class to iterate over Mongo query results.""" import copy -import threading import warnings from collections import deque from typing import ( @@ -45,6 +44,7 @@ validate_is_mapping, ) from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _create_lock from pymongo.message import ( _CursorAddress, _GetMore, @@ -133,7 +133,7 @@ def __init__(self, sock, more_to_come): self.sock = sock self.more_to_come = more_to_come self.closed = False - self.lock = threading.Lock() + self.lock = _create_lock() def update_exhaust(self, more_to_come): self.more_to_come = more_to_come diff --git a/pymongo/lock.py b/pymongo/lock.py new file mode 100644 index 0000000000..b7c01f56b7 --- /dev/null +++ b/pymongo/lock.py @@ -0,0 +1,39 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import threading +import weakref + +_HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") + +# References to instances of _create_lock +_forkable_locks: weakref.WeakSet = weakref.WeakSet() + + +def _create_lock(): + """Represents a lock that is tracked upon instantiation using a WeakSet and + reset by pymongo upon forking. + """ + lock = threading.Lock() + if _HAS_REGISTER_AT_FORK: + _forkable_locks.add(lock) + return lock + + +def _release_locks() -> None: + # Completed the fork, reset all the locks in the child. + for lock in _forkable_locks: + if lock.locked(): + lock.release() diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index fd4c0e84bc..c8330f32d0 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -32,7 +32,7 @@ """ import contextlib -import threading +import os import weakref from collections import defaultdict from typing import ( @@ -82,6 +82,7 @@ ServerSelectionTimeoutError, WaitQueueTimeoutError, ) +from pymongo.lock import _create_lock, _release_locks from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector @@ -126,6 +127,7 @@ class MongoClient(common.BaseObject, Generic[_DocumentType]): # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() def __init__( self, @@ -788,7 +790,7 @@ def __init__( self.__options = options = ClientOptions(username, password, dbase, opts) self.__default_database_name = dbase - self.__lock = threading.Lock() + self.__lock = _create_lock() self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners @@ -817,6 +819,23 @@ def __init__( srv_max_hosts=srv_max_hosts, ) + self._init_background() + + if connect: + self._get_topology() + + self._encrypter = None + if self.__options.auto_encryption_opts: + from pymongo.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) + self._timeout = self.__options.timeout + + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self + + def _init_background(self): self._topology = Topology(self._topology_settings) def target(): @@ -838,15 +857,9 @@ def target(): self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor - if connect: - self._get_topology() - - self._encrypter = None - if self.__options.auto_encryption_opts: - from pymongo.encryption import _Encrypter - - self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) - self._timeout = options.timeout + def _after_fork(self): + """Resets topology in a child after successfully forking.""" + self._init_background() def _duplicate(self, **kwargs): args = self.__init_kwargs.copy() @@ -2150,3 +2163,22 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): return self.handle(exc_type, exc_val) + + +def _after_fork_child(): + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in MongoClient._clients.items(): + client._after_fork() + + +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 844ad02262..b7d2b19118 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -15,7 +15,6 @@ """Class to monitor a MongoDB server on a background thread.""" import atexit -import threading import time import weakref from typing import Any, Mapping, cast @@ -23,6 +22,7 @@ from pymongo import common, periodic_executor from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello +from pymongo.lock import _create_lock from pymongo.periodic_executor import _shutdown_executors from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -350,7 +350,7 @@ def __init__(self, topology, topology_settings, pool): self._pool = pool self._moving_average = MovingAverage() - self._lock = threading.Lock() + self._lock = _create_lock() def close(self): self.gc_safe_close() diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 24507260ed..389ee09ce7 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -16,7 +16,8 @@ from collections import namedtuple from datetime import datetime as _datetime -from threading import Lock + +from pymongo.lock import _create_lock class _OCSPCache(object): @@ -30,7 +31,7 @@ class _OCSPCache(object): def __init__(self): self._data = {} # Hold this lock when accessing _data. - self._lock = Lock() + self._lock = _create_lock() def _get_cache_key(self, ocsp_request): return self.CACHE_KEY_TYPE( diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 2c3727a7a3..95e7830674 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -19,6 +19,8 @@ import weakref from typing import Any, Optional +from pymongo.lock import _create_lock + class PeriodicExecutor(object): def __init__(self, interval, min_interval, target, name=None): @@ -45,9 +47,8 @@ def __init__(self, interval, min_interval, target, name=None): self._thread: Optional[threading.Thread] = None self._name = name self._skip_sleep = False - self._thread_will_exit = False - self._lock = threading.Lock() + self._lock = _create_lock() def __repr__(self): return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) diff --git a/pymongo/pool.py b/pymongo/pool.py index 1fab98209f..6355692ac9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -56,6 +56,7 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat +from pymongo.lock import _create_lock from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason from pymongo.network import command, receive_message from pymongo.read_preferences import ReadPreference @@ -1152,7 +1153,7 @@ def __init__(self, address, options, handshake=True): # and returned to pool from the left side. Stale sockets removed # from the right side. self.sockets: collections.deque = collections.deque() - self.lock = threading.Lock() + self.lock = _create_lock() self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 diff --git a/pymongo/topology.py b/pymongo/topology.py index 6781a9e549..84975ca076 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -17,7 +17,6 @@ import os import queue import random -import threading import time import warnings import weakref @@ -37,6 +36,7 @@ WriteError, ) from pymongo.hello import Hello +from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -127,7 +127,7 @@ def __init__(self, topology_settings): self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._closed = False - self._lock = threading.Lock() + self._lock = _create_lock() self._condition = self._settings.condition_class(self._lock) self._servers = {} self._pid = None @@ -174,12 +174,13 @@ def open(self): self._pid = pid elif pid != self._pid: self._pid = pid - warnings.warn( - "MongoClient opened before fork. Create MongoClient only " - "after forking. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) + if not _HAS_REGISTER_AT_FORK: + warnings.warn( + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe" + ) with self._lock: # Close servers and clear the pools. for server in self._servers.values(): diff --git a/test/test_fork.py b/test/test_fork.py new file mode 100644 index 0000000000..7180e1a239 --- /dev/null +++ b/test/test_fork.py @@ -0,0 +1,100 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that pymongo is fork safe.""" + +import os +from multiprocessing import Pipe +from test import IntegrationTest, client_context +from unittest import skipIf + +from bson.objectid import ObjectId + + +@client_context.require_connection +def setUpModule(): + pass + + +# Not available for versions of Python without "register_at_fork" +@skipIf( + not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" +) +class TestFork(IntegrationTest): + def test_lock_client(self): + """ + Forks the client with some items locked. + Parent => All locks should be as before the fork. + Child => All locks should be reset. + """ + + def exit_cond(): + self.client.admin.command("ping") + return 0 + + with self.client._MongoClient__lock: + # Call _get_topology, will launch a thread to fork upon __enter__ing + # the with region. + lock_pid = os.fork() + # The POSIX standard states only the forking thread is cloned. + # In the parent, it'll return here. + # In the child, it'll end with the calling thread. + if lock_pid == 0: + os._exit(exit_cond()) + else: + self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + + def test_lock_object_id(self): + """ + Forks the client with ObjectId's _inc_lock locked. + Parent => _inc_lock should remain locked. + Child => _inc_lock should be unlocked. + """ + with ObjectId._inc_lock: + lock_pid: int = os.fork() + + if lock_pid == 0: + os._exit(int(ObjectId._inc_lock.locked())) + else: + self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + + def test_topology_reset(self): + """ + Tests that topologies are different from each other. + Cannot use ID because virtual memory addresses may be the same. + Cannot reinstantiate ObjectId in the topology settings. + Relies on difference in PID when opened again. + """ + parent_conn, child_conn = Pipe() + init_id = self.client._topology._pid + parent_cursor_exc = self.client._kill_cursors_executor + lock_pid: int = os.fork() + + if lock_pid == 0: # Child + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) + ) + os._exit(0) + else: # Parent + self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) diff --git a/test/utils.py b/test/utils.py index 29ee1ca477..73003585c3 100644 --- a/test/utils.py +++ b/test/utils.py @@ -38,6 +38,7 @@ from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock from pymongo.monitoring import ( _SENSITIVE_COMMANDS, ConnectionCheckedInEvent, @@ -279,7 +280,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool(object): def __init__(self, address, options, handshake=True): self.gen = _PoolGeneration() - self._lock = threading.Lock() + self._lock = _create_lock() self.opts = options self.operation_count = 0 From c0dadcb6ca77177db7120c0c5712f251ea637d84 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 Aug 2022 13:53:07 -0500 Subject: [PATCH 0748/2111] PYTHON-3385 Add prose test for RewrapManyDataKey (#1034) --- test/test_encryption.py | 82 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 94a588bd6a..00f76b7c95 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -24,7 +24,7 @@ import textwrap import traceback import uuid -from typing import Any, Dict +from typing import Any, Dict, Mapping from pymongo.collection import Collection @@ -2202,6 +2202,86 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap +class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): + + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + self.run_test(src_provider, dst_provider) + + def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``ClientEncryption`` object named ``client_encryption1`` + client_encryption1 = ClientEncryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addCleanup(client_encryption1.close) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` + client2 = MongoClient() + self.addCleanup(client2.close) + client_encryption2 = ClientEncryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addCleanup(client_encryption1.close) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): # Queryable Encryption is not supported on Standalone topology. @client_context.require_no_standalone From a20ff68d51734d272542d03329b657101a093806 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Mon, 15 Aug 2022 12:07:49 -0700 Subject: [PATCH 0749/2111] PYTHON-3390 Test for encrypted client post-fork (#1037) --- test/test_encryption.py | 19 +++++++++++++++++++ test/test_fork.py | 6 +++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 00f76b7c95..e4372d7e5a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -329,6 +329,25 @@ def test_use_after_close(self): with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): client.admin.command("ping") + # Not available for versions of Python without "register_at_fork" + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + def test_fork(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = rs_or_single_client(auto_encryption_opts=opts) + + lock_pid = os.fork() + if lock_pid == 0: + client.admin.command("ping") + client.close() + os._exit(0) + else: + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + client.admin.command("ping") + client.close() + class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): def test_upsert_uuid_standard_encrypt(self): diff --git a/test/test_fork.py b/test/test_fork.py index 7180e1a239..b1c98a26f1 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -53,7 +53,7 @@ def exit_cond(): if lock_pid == 0: os._exit(exit_cond()) else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) def test_lock_object_id(self): """ @@ -67,7 +67,7 @@ def test_lock_object_id(self): if lock_pid == 0: os._exit(int(ObjectId._inc_lock.locked())) else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) def test_topology_reset(self): """ @@ -92,7 +92,7 @@ def test_topology_reset(self): ) os._exit(0) else: # Parent - self.assertEqual(0, os.waitpid(lock_pid, 0)[1] >> 8) + self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) self.assertEqual(self.client._topology._pid, init_id) child_id = parent_conn.recv() self.assertNotEqual(child_id, init_id) From 6d2e27a1b743e4c3747975e26b8b587c2faae428 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 15 Aug 2022 21:18:44 -0700 Subject: [PATCH 0750/2111] PYTHON-3355 Test with consistent versions of crypt_shared and server (#1033) --- .evergreen/config.yml | 6 +++++- .evergreen/run-tests.sh | 8 +++++++- .../spec/legacy/fle2-InsertFind-Unindexed.json | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac7f97f6fa..6acb6e3b74 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -474,7 +474,8 @@ functions: export MULTI_MONGOS_LB_URI="${MONGODB_URI}" fi - PYTHON_BINARY=${PYTHON_BINARY} \ + MONGODB_VERSION=${VERSION} \ + PYTHON_BINARY=${PYTHON_BINARY} \ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ C_EXTENSIONS=${C_EXTENSIONS} \ COVERAGE=${COVERAGE} \ @@ -2316,6 +2317,7 @@ buildvariants: encryption: [ "encryption_crypt_shared" ] then: remove_tasks: + - ".rapid" - ".5.0" - ".4.4" - ".4.2" @@ -2410,6 +2412,7 @@ buildvariants: encryption: [ "encryption_crypt_shared" ] then: remove_tasks: + - ".rapid" - ".5.0" - ".4.4" - ".4.2" @@ -2519,6 +2522,7 @@ buildvariants: encryption: [ "encryption_crypt_shared" ] then: remove_tasks: + - ".rapid" - ".5.0" - ".4.4" - ".4.2" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4367bad246..9a0eb25e00 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -26,6 +26,7 @@ GREEN_FRAMEWORK=${GREEN_FRAMEWORK:-} C_EXTENSIONS=${C_EXTENSIONS:-} COVERAGE=${COVERAGE:-} COMPRESSORS=${COMPRESSORS:-} +MONGODB_VERSION=${MONGODB_VERSION:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} @@ -151,9 +152,14 @@ if [ -n "$TEST_ENCRYPTION" ]; then . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh if [ -n "$TEST_CRYPT_SHARED" ]; then + REAL_VERSION=$(mongod --version | head -n1 | cut -d v -f3 | tr -d "\r") + if [ "$MONGODB_VERSION" = "latest" ]; then + REAL_VERSION="latest" + fi echo "Testing CSFLE with crypt_shared lib" $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ - --version latest --out ../crypt_shared/ + --version "$REAL_VERSION" \ + --out ../crypt_shared/ export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH export PATH=../crypt_shared/bin:$PATH diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json index 1a75095907..c1bdc90760 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json @@ -241,7 +241,7 @@ } }, "result": { - "errorContains": "Cannot query" + "errorContains": "encrypt" } } ] From 4170dc958e2ac1a43d92fe0ea3bb8f22674cff0a Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Tue, 16 Aug 2022 10:40:28 -0700 Subject: [PATCH 0751/2111] PYTHON-3393 Added fork-safety stress test. (#1036) --- test/test_fork.py | 64 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/test/test_fork.py b/test/test_fork.py index b1c98a26f1..41ce162492 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -17,6 +17,7 @@ import os from multiprocessing import Pipe from test import IntegrationTest, client_context +from test.utils import ExceptionCatchingThread, rs_or_single_client from unittest import skipIf from bson.objectid import ObjectId @@ -51,7 +52,11 @@ def exit_cond(): # In the parent, it'll return here. # In the child, it'll end with the calling thread. if lock_pid == 0: - os._exit(exit_cond()) + code = -1 + try: + code = exit_cond() + finally: + os._exit(code) else: self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) @@ -65,7 +70,11 @@ def test_lock_object_id(self): lock_pid: int = os.fork() if lock_pid == 0: - os._exit(int(ObjectId._inc_lock.locked())) + code = -1 + try: + code = int(ObjectId._inc_lock.locked()) + finally: + os._exit(code) else: self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) @@ -98,3 +107,54 @@ def test_topology_reset(self): self.assertNotEqual(child_id, init_id) passed, msg = parent_conn.recv() self.assertTrue(passed, msg) + + def test_many_threaded(self): + # Fork randomly while doing operations. + + clients = [] + for _ in range(10): + c = rs_or_single_client() + clients.append(c) + self.addCleanup(c.close) + + class ForkThread(ExceptionCatchingThread): + def __init__(self, runner, clients): + self.runner = runner + self.clients = clients + self.fork = False + + super().__init__(target=self.fork_behavior) + + def fork_behavior(self) -> None: + def action(client): + client.admin.command("ping") + return 0 + + for i in range(200): + # Pick a random client. + rc = self.clients[i % len(self.clients)] + if i % 50 == 0 and self.fork: + # Fork + pid = os.fork() + if pid == 0: + code = -1 + try: + for c in self.clients: + action(c) + code = 0 + finally: + os._exit(code) + else: + self.runner.assertEqual(0, os.waitpid(pid, 0)[1]) + action(rc) + + threads = [ForkThread(self, clients) for _ in range(10)] + threads[-1].fork = True + for t in threads: + t.start() + + for t in threads: + t.join() + + for c in clients: + c.close() From dd3b4b11d2ce4e08ed27d0ea1d9c95c527c0aec0 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 18 Aug 2022 12:15:44 -0700 Subject: [PATCH 0752/2111] PYTHON-3403 Skips unit test if eventlent or gevent is imported (#1039) --- test/test_encryption.py | 5 +++++ test/test_fork.py | 13 ++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index e4372d7e5a..cf34ca61a0 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -51,6 +51,7 @@ TestCreator, TopologyEventListener, camel_to_snake_args, + is_greenthread_patched, rs_or_single_client, wait_until, ) @@ -334,6 +335,10 @@ def test_use_after_close(self): not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python", ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", + ) def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/test_fork.py b/test/test_fork.py index 41ce162492..df1f009e21 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -17,7 +17,11 @@ import os from multiprocessing import Pipe from test import IntegrationTest, client_context -from test.utils import ExceptionCatchingThread, rs_or_single_client +from test.utils import ( + ExceptionCatchingThread, + is_greenthread_patched, + rs_or_single_client, +) from unittest import skipIf from bson.objectid import ObjectId @@ -32,6 +36,10 @@ def setUpModule(): @skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" ) +@skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", +) class TestFork(IntegrationTest): def test_lock_client(self): """ @@ -156,5 +164,8 @@ def action(client): for t in threads: t.join() + for t in threads: + self.assertIsNone(t.exc) + for c in clients: c.close() From cfc99c82f34ddc402bac6010db1ca54bd093dd8c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Aug 2022 14:01:35 -0700 Subject: [PATCH 0753/2111] PYTHON-3402 Fix TestRewrapWithSeparateClientEncryption (#1040) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index cf34ca61a0..4ed415d4d5 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2279,7 +2279,7 @@ def run_test(self, src_provider, dst_provider): ) # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` - client2 = MongoClient() + client2 = rs_or_single_client() self.addCleanup(client2.close) client_encryption2 = ClientEncryption( key_vault_client=client2, From 09aeef0f9d7118f3a1faaed78dcf88ba01180dc2 Mon Sep 17 00:00:00 2001 From: Ben Warner Date: Thu, 18 Aug 2022 15:30:45 -0700 Subject: [PATCH 0754/2111] Changelog 4.3 (#1038) --- doc/changelog.rst | 33 +++++++++++++++++++++++++++++++++ doc/faq.rst | 24 +++++++++++++++--------- 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 7afaca22a1..a83df179c1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,39 @@ Changelog ========= +Changes in Version 4.3 +---------------------- + +PyMongo 4.3 brings a number of improvements including: + +- Added support for decoding BSON datetimes outside of the range supported + by Python's :class:`~datetime.datetime` builtin. See + :ref:`handling-out-of-range-datetimes` for examples, as well as + :class:`bson.datetime_ms.DatetimeMS`, + :class:`bson.codec_options.DatetimeConversion`, and + :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` + parameter for more details (`PYTHON-1824`_). +- Added support for using a :class:`~pymongo.mongo_client.MongoClient` after + an :py:func:`os.fork` (`PYTHON-2484`_). + +Bug fixes +......... + +- Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` + would allow an app to retry calling ``next()`` or ``try_next()`` even + after non-resumable errors (`PYTHON-3389`_). + +Issues Resolved +............... + +See the `PyMongo 4.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 +.. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 +.. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 + Changes in Version 4.2 ---------------------- diff --git a/doc/faq.rst b/doc/faq.rst index c48dd316e5..a04f761f84 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -14,15 +14,21 @@ for threaded applications. Is PyMongo fork-safe? --------------------- -PyMongo is not fork-safe. Care must be taken when using instances of -:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, -instances of MongoClient must not be copied from a parent process to -a child process. Instead, the parent process and each child process must -create their own instances of MongoClient. Instances of MongoClient copied from -the parent process have a high probability of deadlock in the child process due -to the inherent incompatibilities between ``fork()``, threads, and locks -described :ref:`below `. PyMongo will attempt to -issue a warning if there is a chance of this deadlock occurring. +Starting in PyMongo 4.3, forking on a compatible Python interpreter while +using a client will result in all locks held by :class:`~bson.objectid +.ObjectId` and :class:`~pymongo.mongo_client.MongoClient` being released in +the child, as well as state shared between child and parent processes being +reset. + +If greenlet has been imported (usually with a library like gevent or +Eventlet), care must be taken when using instances of :class:`~pymongo +.mongo_client.MongoClient` with ``fork()``. Specifically, instances of +MongoClient must not be copied from a parent process to a child process. +Instead, the parent process and each child process must create their own +instances of MongoClient. Instances of MongoClient copied from the parent +process have a high probability of deadlock in the child process due to the +inherent incompatibilities between ``fork()``, threads, and locks described +:ref:`below`. .. _pymongo-fork-safe-details: From a0a5c7194de2b0f8b7e814a6beb56a2e15517bb9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Aug 2022 15:38:09 -0700 Subject: [PATCH 0755/2111] PYTHON-3405/PYTHON-2531 Fix tests for primary step down (#1041) --- test/unified_format.py | 9 +++++---- test/utils.py | 24 ++++++++++++++++++------ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/test/unified_format.py b/test/unified_format.py index dbf4ef988f..173b4dcb97 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -114,6 +114,7 @@ from pymongo.server_selectors import Selection, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address from pymongo.write_concern import WriteConcern JSON_OPTS = json_util.JSONOptions(tz_aware=False) @@ -1442,21 +1443,21 @@ def _testOperation_assertTopologyType(self, spec): self.assertIsInstance(description, TopologyDescription) self.assertEqual(description.topology_type_name, spec["topologyType"]) - def _testOperation_waitForPrimaryChange(self, spec): + def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: """Run the waitForPrimaryChange test operation.""" client = self.entity_map[spec["client"]] old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] timeout = spec["timeoutMS"] / 1000.0 - def get_primary(td: TopologyDescription) -> Optional[ServerDescription]: + def get_primary(td: TopologyDescription) -> Optional[_Address]: servers = writable_server_selector(Selection.from_topology_description(td)) if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: - return servers[0] + return servers[0].address return None old_primary = get_primary(old_description) - def primary_changed(): + def primary_changed() -> bool: primary = client.primary if primary is None: return False diff --git a/test/utils.py b/test/utils.py index 73003585c3..1ac726d2d4 100644 --- a/test/utils.py +++ b/test/utils.py @@ -593,7 +593,7 @@ def rs_or_single_client(h=None, p=None, **kwargs): return _mongo_client(h, p, **kwargs) -def ensure_all_connected(client): +def ensure_all_connected(client: MongoClient) -> None: """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a non-replica set client. @@ -605,14 +605,26 @@ def ensure_all_connected(client): if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") - target_host_list = set(hello["hosts"]) + target_host_list = set(hello["hosts"] + hello.get("passives", [])) connected_host_list = set([hello["me"]]) - admindb = client.get_database("admin") # Run hello until we have connected to each host at least once. - while connected_host_list != target_host_list: - hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) - connected_host_list.update([hello["me"]]) + def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello = client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + wait_until(lambda: target_host_list == discover(), "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) def one(s): From 7f19186cacbcf3e2bcb42dba46997f5ff68c5378 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 18 Aug 2022 17:06:02 -0700 Subject: [PATCH 0756/2111] PYTHON-3406 Refactor fork tests to print traceback on failure (#1042) --- test/__init__.py | 29 +++++++++- test/test_encryption.py | 10 +--- test/test_fork.py | 116 +++++++++++++--------------------------- 3 files changed, 65 insertions(+), 90 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 2a3e59adf9..a3e1ca7342 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -43,7 +43,7 @@ from contextlib import contextmanager from functools import wraps from test.version import Version -from typing import Dict, no_type_check +from typing import Dict, Generator, no_type_check from unittest import SkipTest from urllib.parse import quote_plus @@ -998,6 +998,33 @@ def fail_point(self, command_args): "configureFailPoint", cmd_on["configureFailPoint"], mode="off" ) + @contextmanager + def fork(self) -> Generator[int, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork() as pid: + if pid == 0: # Child + pass + else: # Parent + pass + """ + pid = os.fork() + in_child = pid == 0 + try: + yield pid + except: + if in_child: + traceback.print_exc() + os._exit(1) + raise + finally: + if in_child: + os._exit(0) + # In parent, assert child succeeded. + self.assertEqual(0, os.waitpid(pid, 0)[1]) + class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" diff --git a/test/test_encryption.py b/test/test_encryption.py index 4ed415d4d5..4146695707 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -330,7 +330,6 @@ def test_use_after_close(self): with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): client.admin.command("ping") - # Not available for versions of Python without "register_at_fork" @unittest.skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python", @@ -342,14 +341,7 @@ def test_use_after_close(self): def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) - - lock_pid = os.fork() - if lock_pid == 0: - client.admin.command("ping") - client.close() - os._exit(0) - else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + with self.fork(): client.admin.command("ping") client.close() diff --git a/test/test_fork.py b/test/test_fork.py index df1f009e21..092ac434a0 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -16,7 +16,7 @@ import os from multiprocessing import Pipe -from test import IntegrationTest, client_context +from test import IntegrationTest from test.utils import ( ExceptionCatchingThread, is_greenthread_patched, @@ -27,12 +27,6 @@ from bson.objectid import ObjectId -@client_context.require_connection -def setUpModule(): - pass - - -# Not available for versions of Python without "register_at_fork" @skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" ) @@ -42,83 +36,52 @@ def setUpModule(): ) class TestFork(IntegrationTest): def test_lock_client(self): - """ - Forks the client with some items locked. - Parent => All locks should be as before the fork. - Child => All locks should be reset. - """ - - def exit_cond(): - self.client.admin.command("ping") - return 0 - + # Forks the client with some items locked. + # Parent => All locks should be as before the fork. + # Child => All locks should be reset. with self.client._MongoClient__lock: - # Call _get_topology, will launch a thread to fork upon __enter__ing - # the with region. - lock_pid = os.fork() - # The POSIX standard states only the forking thread is cloned. - # In the parent, it'll return here. - # In the child, it'll end with the calling thread. - if lock_pid == 0: - code = -1 - try: - code = exit_cond() - finally: - os._exit(code) - else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + with self.fork() as pid: + if pid == 0: # Child + self.client.admin.command("ping") + self.client.admin.command("ping") def test_lock_object_id(self): - """ - Forks the client with ObjectId's _inc_lock locked. - Parent => _inc_lock should remain locked. - Child => _inc_lock should be unlocked. - """ + # Forks the client with ObjectId's _inc_lock locked. + # Parent => _inc_lock should remain locked. + # Child => _inc_lock should be unlocked. with ObjectId._inc_lock: - lock_pid: int = os.fork() - - if lock_pid == 0: - code = -1 - try: - code = int(ObjectId._inc_lock.locked()) - finally: - os._exit(code) - else: - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) + with self.fork() as pid: + if pid == 0: # Child + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) def test_topology_reset(self): - """ - Tests that topologies are different from each other. - Cannot use ID because virtual memory addresses may be the same. - Cannot reinstantiate ObjectId in the topology settings. - Relies on difference in PID when opened again. - """ + # Tests that topologies are different from each other. + # Cannot use ID because virtual memory addresses may be the same. + # Cannot reinstantiate ObjectId in the topology settings. + # Relies on difference in PID when opened again. parent_conn, child_conn = Pipe() init_id = self.client._topology._pid parent_cursor_exc = self.client._kill_cursors_executor - lock_pid: int = os.fork() - - if lock_pid == 0: # Child - self.client.admin.command("ping") - child_conn.send(self.client._topology._pid) - child_conn.send( - ( - parent_cursor_exc != self.client._kill_cursors_executor, - "client._kill_cursors_executor was not reinitialized", + with self.fork() as pid: + if pid == 0: # Child + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) ) - ) - os._exit(0) - else: # Parent - self.assertEqual(0, os.waitpid(lock_pid, 0)[1]) - self.assertEqual(self.client._topology._pid, init_id) - child_id = parent_conn.recv() - self.assertNotEqual(child_id, init_id) - passed, msg = parent_conn.recv() - self.assertTrue(passed, msg) + else: # Parent + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) def test_many_threaded(self): # Fork randomly while doing operations. - clients = [] for _ in range(10): c = rs_or_single_client() @@ -143,17 +106,10 @@ def action(client): rc = self.clients[i % len(self.clients)] if i % 50 == 0 and self.fork: # Fork - pid = os.fork() - if pid == 0: - code = -1 - try: + with self.runner.fork() as pid: + if pid == 0: # Child for c in self.clients: action(c) - code = 0 - finally: - os._exit(code) - else: - self.runner.assertEqual(0, os.waitpid(pid, 0)[1]) action(rc) threads = [ForkThread(self, clients) for _ in range(10)] From 1e6b4a48d45b502986e1dd5033b3b6b06a59d3d6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 22 Aug 2022 14:16:27 -0700 Subject: [PATCH 0757/2111] PYTHON-3406 Log traceback when fork() test encounters a deadlock (#1045) Co-authored-by: Ben Warner --- test/__init__.py | 42 ++++++++++++---------- test/test_encryption.py | 8 +++-- test/test_fork.py | 80 +++++++++++++++++++++++++---------------- 3 files changed, 78 insertions(+), 52 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index a3e1ca7342..ada09db55e 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -17,7 +17,9 @@ import base64 import gc +import multiprocessing import os +import signal import socket import sys import threading @@ -43,7 +45,7 @@ from contextlib import contextmanager from functools import wraps from test.version import Version -from typing import Dict, Generator, no_type_check +from typing import Callable, Dict, Generator, no_type_check from unittest import SkipTest from urllib.parse import quote_plus @@ -999,31 +1001,33 @@ def fail_point(self, command_args): ) @contextmanager - def fork(self) -> Generator[int, None, None]: + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: """Helper for tests that use os.fork() Use in a with statement: - with self.fork() as pid: - if pid == 0: # Child - pass - else: # Parent - pass + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started """ - pid = os.fork() - in_child = pid == 0 + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=target) + proc.start() try: - yield pid - except: - if in_child: - traceback.print_exc() - os._exit(1) - raise + yield proc # type: ignore finally: - if in_child: - os._exit(0) - # In parent, assert child succeeded. - self.assertEqual(0, os.waitpid(pid, 0)[1]) + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # If it failed, SIGINT to get traceback and wait 10s. + os.kill(pid, signal.SIGINT) + proc.join(10) + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) class IntegrationTest(PyMongoTestCase): diff --git a/test/test_encryption.py b/test/test_encryption.py index 4146695707..8e8814a421 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -341,9 +341,13 @@ def test_use_after_close(self): def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = rs_or_single_client(auto_encryption_opts=opts) - with self.fork(): + self.addCleanup(client.close) + + def target(): client.admin.command("ping") - client.close() + + with self.fork(target): + target() class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): diff --git a/test/test_fork.py b/test/test_fork.py index 092ac434a0..ac103af385 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -15,22 +15,26 @@ """Test that pymongo is fork safe.""" import os +import sys +import unittest from multiprocessing import Pipe + +from bson.objectid import ObjectId + +sys.path[0:0] = [""] + from test import IntegrationTest from test.utils import ( ExceptionCatchingThread, is_greenthread_patched, rs_or_single_client, ) -from unittest import skipIf -from bson.objectid import ObjectId - -@skipIf( +@unittest.skipIf( not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" ) -@skipIf( +@unittest.skipIf( is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) @@ -40,9 +44,12 @@ def test_lock_client(self): # Parent => All locks should be as before the fork. # Child => All locks should be reset. with self.client._MongoClient__lock: - with self.fork() as pid: - if pid == 0: # Child - self.client.admin.command("ping") + + def target(): + self.client.admin.command("ping") + + with self.fork(target): + pass self.client.admin.command("ping") def test_lock_object_id(self): @@ -50,10 +57,13 @@ def test_lock_object_id(self): # Parent => _inc_lock should remain locked. # Child => _inc_lock should be unlocked. with ObjectId._inc_lock: - with self.fork() as pid: - if pid == 0: # Child - self.assertFalse(ObjectId._inc_lock.locked()) - self.assertTrue(ObjectId()) + + def target(): + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) + + with self.fork(target): + pass def test_topology_reset(self): # Tests that topologies are different from each other. @@ -63,22 +73,23 @@ def test_topology_reset(self): parent_conn, child_conn = Pipe() init_id = self.client._topology._pid parent_cursor_exc = self.client._kill_cursors_executor - with self.fork() as pid: - if pid == 0: # Child - self.client.admin.command("ping") - child_conn.send(self.client._topology._pid) - child_conn.send( - ( - parent_cursor_exc != self.client._kill_cursors_executor, - "client._kill_cursors_executor was not reinitialized", - ) + + def target(): + self.client.admin.command("ping") + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", ) - else: # Parent - self.assertEqual(self.client._topology._pid, init_id) - child_id = parent_conn.recv() - self.assertNotEqual(child_id, init_id) - passed, msg = parent_conn.recv() - self.assertTrue(passed, msg) + ) + + with self.fork(target): + self.assertEqual(self.client._topology._pid, init_id) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) def test_many_threaded(self): # Fork randomly while doing operations. @@ -106,10 +117,13 @@ def action(client): rc = self.clients[i % len(self.clients)] if i % 50 == 0 and self.fork: # Fork - with self.runner.fork() as pid: - if pid == 0: # Child - for c in self.clients: - action(c) + def target(): + for c_ in self.clients: + action(c_) + c_.close() + + with self.runner.fork(target=target) as proc: + self.runner.assertTrue(proc.pid) action(rc) threads = [ForkThread(self, clients) for _ in range(10)] @@ -125,3 +139,7 @@ def action(client): for c in clients: c.close() + + +if __name__ == "__main__": + unittest.main() From 9ff0ac8a62f1ef1a334c01af88a9c853cd7dec18 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 22 Aug 2022 15:05:39 -0700 Subject: [PATCH 0758/2111] PYTHON-3407 macos release failing on Python 3.8 AttributeError: 'Distribution' object has no attribute 'convert_2to3_doctests' (#1044) --- .evergreen/build-mac.sh | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 09950a592f..2dd02a0fbe 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -12,16 +12,10 @@ for VERSION in 3.7 3.8 3.9 3.10; do PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build - # Install wheel if not already there. - if ! $PYTHON -m wheel version; then - createvirtualenv $PYTHON releasevenv - WHEELPYTHON=python - python -m pip install --upgrade wheel - else - WHEELPYTHON=$PYTHON - fi - - $WHEELPYTHON setup.py bdist_wheel + createvirtualenv $PYTHON releasevenv + python -m pip install --upgrade wheel + python -m pip install setuptools==63.2.0 + python setup.py bdist_wheel deactivate || true rm -rf releasevenv From 1575e53ef739008951a92717b190b8ec290165c7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 22 Aug 2022 16:19:30 -0700 Subject: [PATCH 0759/2111] PYTHON-3409 Retry flakey CSOT tests twice (#1046) --- test/test_retryable_writes.py | 1 + test/unified_format.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 0eb863f4cf..8d556b90ae 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -471,6 +471,7 @@ def setUpClass(cls): def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + self.addCleanup(client.close) # Ensure collection exists. client.pymongo_test.testcoll.insert_one({}) diff --git a/test/unified_format.py b/test/unified_format.py index 173b4dcb97..aec7763272 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -25,6 +25,7 @@ import re import sys import time +import traceback import types from collections import abc from test import ( @@ -1580,6 +1581,25 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): + if "csot" in self.id().lower(): + # Retry CSOT tests up to 2 times to deal with flakey tests. + attempts = 3 + for i in range(attempts): + try: + return self._run_scenario(spec, uri) + except AssertionError: + if i < attempts - 1: + print( + f"Retrying after attempt {i+1} of {self.id()} failed with:\n" + f"{traceback.format_exc()}" + ) + self.setUp() + continue + raise + else: + self._run_scenario(spec, uri) + + def _run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) From 0f135a157e2fa6ae66d4091186bdf0c40113ef77 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 25 Aug 2022 20:16:39 -0500 Subject: [PATCH 0760/2111] PYTHON-3413 Ensure AWS EC2 Credential Test is Running Properly (#1048) --- .evergreen/config.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6acb6e3b74..621542226d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -643,6 +643,13 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the EC2 auth test, skipping..." + exit 0 + fi + # Write an empty prepare_mongodb_aws so no auth environment variables + # are set. + echo "" > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials as environment variables": From 78256368c7073e5c71007a7dc4bb9f12db7746b7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 29 Aug 2022 17:33:00 -0500 Subject: [PATCH 0761/2111] PYTHON-3411 Stop testing MongoDB 6.0 on Amazon1 2018 (#1049) --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 621542226d..a487c264a0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2242,8 +2242,8 @@ buildvariants: - matrix_name: "tests-all" matrix_spec: platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux + # OSes that support versions of MongoDB>=3.6 with SSL. + - ubuntu-18.04 auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: From e3ff041b474835f007faaead470bb12dcc9dc22c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 9 Sep 2022 16:28:15 -0500 Subject: [PATCH 0762/2111] PYTHON-3433 Failure: test.test_encryption.TestSpec.test_legacy_maxWireVersion_operation_fails_with_maxWireVersion___8 (#1052) --- test/client-side-encryption/spec/legacy/maxWireVersion.json | 2 +- test/test_encryption.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/client-side-encryption/spec/legacy/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json index c1088a0ecf..f04f58dffd 100644 --- a/test/client-side-encryption/spec/legacy/maxWireVersion.json +++ b/test/client-side-encryption/spec/legacy/maxWireVersion.json @@ -1,7 +1,7 @@ { "runOn": [ { - "maxServerVersion": "4.0" + "maxServerVersion": "4.0.99" } ], "database_name": "default", diff --git a/test/test_encryption.py b/test/test_encryption.py index 8e8814a421..567d606893 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -616,6 +616,8 @@ def parse_auto_encrypt_opts(self, opts): opts["kms_tls_options"] = KMS_TLS_OPTS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) opts = dict(opts) return AutoEncryptionOpts(**opts) From 1019c91bf67fbed09f6ce26df16614092676ab54 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 12 Sep 2022 19:14:50 -0500 Subject: [PATCH 0763/2111] PYTHON-3424 PyMongo Universal Wheels Are Improperly Compiled (#1051) --- .evergreen/build-mac.sh | 27 +++++++++++++-------------- .evergreen/config.yml | 23 +++++++++++++++++++++-- tools/fail_if_no_c.py | 13 +++++++++++++ 3 files changed, 47 insertions(+), 16 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 2dd02a0fbe..270c92b59a 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -8,22 +8,21 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 3.7 3.8 3.9 3.10; do - PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 - rm -rf build +VERSION=${VERSION:-3.10} +PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 +rm -rf build - createvirtualenv $PYTHON releasevenv - python -m pip install --upgrade wheel - python -m pip install setuptools==63.2.0 - python setup.py bdist_wheel - deactivate || true - rm -rf releasevenv +createvirtualenv $PYTHON releasevenv +python -m pip install --upgrade wheel +python -m pip install setuptools==63.2.0 +python setup.py bdist_wheel +deactivate || true +rm -rf releasevenv - # Test that each wheel is installable. - for release in dist/*; do - testinstall $PYTHON $release - mv $release validdist/ - done +# Test that each wheel is installable. +for release in dist/*; do + testinstall $PYTHON $release + mv $release validdist/ done mv validdist/* dist diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a487c264a0..0808cc11be 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -891,7 +891,7 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - .evergreen/release.sh + VERSION=${VERSION} ENSURE_UNIVERSAL2=${ENSURE_UNIVERSAL2} .evergreen/release.sh "upload release": - command: archive.targz_pack @@ -1046,11 +1046,30 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release-mac" + - name: "release-mac-1100" + tags: ["release_tag"] + run_on: macos-1100 + commands: + - func: "build release" + vars: + VERSION: "3.10" + ENSURE_UNIVERSAL2: "1" + - func: "build release" + vars: + VERSION: "3.9" + ENSURE_UNIVERSAL2: "1" + - func: "upload release" + + - name: "release-mac-1014" tags: ["release_tag"] run_on: macos-1014 commands: - func: "build release" + vars: + VERSION: "3.7" + - func: "build release" + vars: + VERSION: "3.8" - func: "upload release" - name: "release-windows" diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 6cb82eed57..e2e9c52527 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -17,6 +17,9 @@ Only really intended to be used by internal build scripts. """ +import glob +import os +import subprocess import sys sys.path[0:0] = [""] @@ -26,3 +29,13 @@ if not pymongo.has_c() or not bson.has_c(): sys.exit("could not load C extensions") + +if os.environ.get("ENSURE_UNIVERSAL2") == "1": + parent_dir = os.path.dirname(pymongo.__path__[0]) + for so_file in glob.glob(f"{parent_dir}/**/*.so"): + print(f"Checking universal2 compatibility in {so_file}...") + output = subprocess.check_output(["file", so_file]) + if "arm64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with arm64 support") + if "x86_64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with x86_64 support") From b8cb1c1cf06623ff52faf4813afee3bca420c995 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 13 Sep 2022 15:30:56 -0500 Subject: [PATCH 0764/2111] PYTHON-3413 Skip EC2 test on Windows (#1054) --- .evergreen/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0808cc11be..9d016f4d8a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1938,6 +1938,7 @@ axes: run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days variables: + skip_EC2_auth_test: true skip_ECS_auth_test: true python3_binary: "C:/python/Python38/python.exe" venv_bin_dir: "Scripts" From 179efda31200b495beab6c2e94f365f5713aadc4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 13 Sep 2022 14:14:53 -0700 Subject: [PATCH 0765/2111] PYTHON-3406 Reinstate warning and docs that PyMongo is not fork safe (#1050) Log child process C-level stacks when fork tests deadlock. Encode hostname to bytes to avoid getaddrinfo importlib deadlock. --- doc/changelog.rst | 7 +++-- doc/faq.rst | 47 +++++++++++++++++++---------- pymongo/mongo_client.py | 11 +++---- pymongo/pool.py | 6 ++-- pymongo/topology.py | 15 +++++----- test/__init__.py | 66 +++++++++++++++++++++++++++++++++++++---- test/test_fork.py | 61 +++---------------------------------- test/unified_format.py | 3 +- 8 files changed, 120 insertions(+), 96 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index a83df179c1..7107e57333 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,8 +13,11 @@ PyMongo 4.3 brings a number of improvements including: :class:`bson.codec_options.DatetimeConversion`, and :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` parameter for more details (`PYTHON-1824`_). -- Added support for using a :class:`~pymongo.mongo_client.MongoClient` after - an :py:func:`os.fork` (`PYTHON-2484`_). +- PyMongo now resets its locks and other shared state in the child process + after a :py:func:`os.fork` to reduce the frequency of deadlocks. Note that + deadlocks are still possible because libraries that PyMongo depends like + OpenSSL cannot be made fork() safe in multithreaded applications. + (`PYTHON-2484`_). For more info see :ref:`pymongo-fork-safe`. Bug fixes ......... diff --git a/doc/faq.rst b/doc/faq.rst index a04f761f84..acf557a81b 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -14,21 +14,15 @@ for threaded applications. Is PyMongo fork-safe? --------------------- -Starting in PyMongo 4.3, forking on a compatible Python interpreter while -using a client will result in all locks held by :class:`~bson.objectid -.ObjectId` and :class:`~pymongo.mongo_client.MongoClient` being released in -the child, as well as state shared between child and parent processes being -reset. - -If greenlet has been imported (usually with a library like gevent or -Eventlet), care must be taken when using instances of :class:`~pymongo -.mongo_client.MongoClient` with ``fork()``. Specifically, instances of -MongoClient must not be copied from a parent process to a child process. -Instead, the parent process and each child process must create their own -instances of MongoClient. Instances of MongoClient copied from the parent -process have a high probability of deadlock in the child process due to the -inherent incompatibilities between ``fork()``, threads, and locks described -:ref:`below`. +PyMongo is not fork-safe. Care must be taken when using instances of +:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, +instances of MongoClient must not be copied from a parent process to +a child process. Instead, the parent process and each child process must +create their own instances of MongoClient. Instances of MongoClient copied from +the parent process have a high probability of deadlock in the child process due +to the inherent incompatibilities between ``fork()``, threads, and locks +described :ref:`below `. PyMongo will attempt to +issue a warning if there is a chance of this deadlock occurring. .. _pymongo-fork-safe-details: @@ -44,10 +38,33 @@ created by ``fork()`` only has one thread, so any locks that were taken out by other threads in the parent will never be released in the child. The next time the child process attempts to acquire one of these locks, deadlock occurs. +Starting in version 4.3, PyMongo utilizes :py:func:`os.register_at_fork` to +reset its locks and other shared state in the child process after a +:py:func:`os.fork` to reduce the frequency of deadlocks. However deadlocks +are still possible because libraries that PyMongo depends on, like `OpenSSL`_ +and `getaddrinfo(3)`_ (on some platforms), are not fork() safe in a +multithreaded application. Linux also imposes the restriction that: + + After a `fork()`_ in a multithreaded program, the child can + safely call only async-signal-safe functions (see + `signal-safety(7)`_) until such time as it calls `execve(2)`_. + +PyMongo relies on functions that are *not* `async-signal-safe`_ and hence the +child process can experience deadlocks or crashes when attempting to call +a non `async-signal-safe`_ function. For examples of deadlocks or crashes +that could occur see `PYTHON-3406`_. + For a long but interesting read about the problems of Python locks in multithreaded contexts with ``fork()``, see http://bugs.python.org/issue6721. .. _not fork-safe: http://bugs.python.org/issue6721 +.. _OpenSSL: https://github.com/openssl/openssl/issues/19066 +.. _fork(): https://man7.org/linux/man-pages/man2/fork.2.html +.. _signal-safety(7): https://man7.org/linux/man-pages/man7/signal-safety.7.html +.. _async-signal-safe: https://man7.org/linux/man-pages/man7/signal-safety.7.html +.. _execve(2): https://man7.org/linux/man-pages/man2/execve.2.html +.. _getaddrinfo(3): https://man7.org/linux/man-pages/man3/gai_strerror.3.html +.. _PYTHON-3406: https://jira.mongodb.org/browse/PYTHON-3406 .. _connection-pooling: diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index c8330f32d0..7e4e4f10ca 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -82,7 +82,7 @@ ServerSelectionTimeoutError, WaitQueueTimeoutError, ) -from pymongo.lock import _create_lock, _release_locks +from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks from pymongo.pool import ConnectionClosedReason from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_selectors import writable_server_selector @@ -831,9 +831,10 @@ def __init__( self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) self._timeout = self.__options.timeout - # Add this client to the list of weakly referenced items. - # This will be used later if we fork. - MongoClient._clients[self._topology._topology_id] = self + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self def _init_background(self): self._topology = Topology(self._topology_settings) @@ -2177,7 +2178,7 @@ def _after_fork_child(): client._after_fork() -if hasattr(os, "register_at_fork"): +if _HAS_REGISTER_AT_FORK: # This will run in the same thread as the fork was called. # If we fork in a critical region on the same thread, it should break. # This is fine since we would never call fork directly from a critical region. diff --git a/pymongo/pool.py b/pymongo/pool.py index 6355692ac9..88f56b16e5 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -979,9 +979,11 @@ def _create_connection(address, options): This is a modified version of create_connection from CPython >= 2.7. """ host, port = address + # Avoid the getaddrinfo importlib deadlock on fork() described in PYTHON-3406. + host = host.encode("idna") # Check if dealing with a unix domain socket - if host.endswith(".sock"): + if host.endswith(b".sock"): if not hasattr(socket, "AF_UNIX"): raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) @@ -998,7 +1000,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != "localhost": + if socket.has_ipv6 and host != b"localhost": family = socket.AF_UNSPEC err = None diff --git a/pymongo/topology.py b/pymongo/topology.py index 84975ca076..87a566fa6e 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -36,7 +36,7 @@ WriteError, ) from pymongo.hello import Hello -from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock +from pymongo.lock import _create_lock from pymongo.monitor import SrvMonitor from pymongo.pool import PoolOptions from pymongo.server import Server @@ -174,13 +174,12 @@ def open(self): self._pid = pid elif pid != self._pid: self._pid = pid - if not _HAS_REGISTER_AT_FORK: - warnings.warn( - "MongoClient opened before fork. May not be entirely fork-safe, " - "proceed with caution. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) + warnings.warn( + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe" + ) with self._lock: # Close servers and clear the pools. for server in self._servers.values(): diff --git a/test/__init__.py b/test/__init__.py index ada09db55e..b89cd88d26 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -21,8 +21,10 @@ import os import signal import socket +import subprocess import sys import threading +import time import traceback import unittest import warnings @@ -1011,8 +1013,28 @@ def fork( with self.fork(target=lambda: print('in child')) as proc: self.assertTrue(proc.pid) # Child process was started """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: + return + _print_threads.called = True + print_thread_tracebacks() + + _print_threads.called = False + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + ctx = multiprocessing.get_context("fork") - proc = ctx.Process(target=target) + proc = ctx.Process(target=_target) proc.start() try: yield proc # type: ignore @@ -1021,15 +1043,47 @@ def fork( pid = proc.pid assert pid if proc.exitcode is None: - # If it failed, SIGINT to get traceback and wait 10s. - os.kill(pid, signal.SIGINT) - proc.join(10) - proc.kill() - proc.join(1) + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") self.assertEqual(proc.exitcode, 0) +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + + class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" diff --git a/test/test_fork.py b/test/test_fork.py index ac103af385..422cd89f28 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -12,23 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test that pymongo is fork safe.""" +"""Test that pymongo resets its own locks after a fork.""" import os import sys import unittest from multiprocessing import Pipe -from bson.objectid import ObjectId - sys.path[0:0] = [""] from test import IntegrationTest -from test.utils import ( - ExceptionCatchingThread, - is_greenthread_patched, - rs_or_single_client, -) +from test.utils import is_greenthread_patched + +from bson.objectid import ObjectId @unittest.skipIf( @@ -91,55 +87,6 @@ def target(): passed, msg = parent_conn.recv() self.assertTrue(passed, msg) - def test_many_threaded(self): - # Fork randomly while doing operations. - clients = [] - for _ in range(10): - c = rs_or_single_client() - clients.append(c) - self.addCleanup(c.close) - - class ForkThread(ExceptionCatchingThread): - def __init__(self, runner, clients): - self.runner = runner - self.clients = clients - self.fork = False - - super().__init__(target=self.fork_behavior) - - def fork_behavior(self) -> None: - def action(client): - client.admin.command("ping") - return 0 - - for i in range(200): - # Pick a random client. - rc = self.clients[i % len(self.clients)] - if i % 50 == 0 and self.fork: - # Fork - def target(): - for c_ in self.clients: - action(c_) - c_.close() - - with self.runner.fork(target=target) as proc: - self.runner.assertTrue(proc.pid) - action(rc) - - threads = [ForkThread(self, clients) for _ in range(10)] - threads[-1].fork = True - for t in threads: - t.start() - - for t in threads: - t.join() - - for t in threads: - self.assertIsNone(t.exc) - - for c in clients: - c.close() - if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index aec7763272..3f51c335eb 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1591,7 +1591,8 @@ def run_scenario(self, spec, uri=None): if i < attempts - 1: print( f"Retrying after attempt {i+1} of {self.id()} failed with:\n" - f"{traceback.format_exc()}" + f"{traceback.format_exc()}", + file=sys.stderr, ) self.setUp() continue From dcb1327395c96b6401492737db9e923d8577b35e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 15 Sep 2022 13:31:45 -0700 Subject: [PATCH 0766/2111] PYTHON-3423 Make dnspython a required dependency (#1055) --- README.rst | 9 +++++---- doc/changelog.rst | 4 ++++ doc/installation.rst | 9 ++++++--- pymongo/uri_parser.py | 4 ++-- setup.py | 4 ++-- 5 files changed, 19 insertions(+), 11 deletions(-) diff --git a/README.rst b/README.rst index f60b8da680..576facb5b5 100644 --- a/README.rst +++ b/README.rst @@ -90,6 +90,11 @@ Dependencies PyMongo supports CPython 3.7+ and PyPy3.7+. +Required dependencies: + +Support for mongodb+srv:// URIs requires `dnspython +`_ + Optional dependencies: GSSAPI authentication requires `pykerberos @@ -104,10 +109,6 @@ MONGODB-AWS authentication requires `pymongo-auth-aws $ python -m pip install "pymongo[aws]" -Support for mongodb+srv:// URIs requires `dnspython -`_:: - - $ python -m pip install "pymongo[srv]" OCSP (Online Certificate Status Protocol) requires `PyOpenSSL `_, `requests diff --git a/doc/changelog.rst b/doc/changelog.rst index 7107e57333..24c80efa2e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,10 @@ Changelog Changes in Version 4.3 ---------------------- +`dnspython `_ is now a required +dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" +connection strings and `MongoDB Atlas `_. + PyMongo 4.3 brings a number of improvements including: - Added support for decoding BSON datetimes outside of the range supported diff --git a/doc/installation.rst b/doc/installation.rst index 788faf46cc..4355f771eb 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -30,6 +30,12 @@ Dependencies PyMongo supports CPython 3.7+ and PyPy3.7+. +Required dependencies: + +Support for mongodb+srv:// URIs requires `dnspython +`_ + + Optional dependencies: GSSAPI authentication requires `pykerberos @@ -44,10 +50,7 @@ dependency can be installed automatically along with PyMongo:: $ python3 -m pip install "pymongo[aws]" -Support for mongodb+srv:// URIs requires `dnspython -`_:: - $ python3 -m pip install "pymongo[srv]" :ref:`OCSP` requires `PyOpenSSL `_, `requests diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index cd18c067e7..f59af2e74c 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -469,8 +469,8 @@ def parse_uri( raise ConfigurationError( 'The "dnspython" module must be ' "installed to use mongodb+srv:// URIs. " - "To fix this error install pymongo with the srv extra:\n " - '%s -m pip install "pymongo[srv]"' % (python_path) + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) ) is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] diff --git a/setup.py b/setup.py index 0e983e4642..524c1303e6 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,7 @@ def build_extension(self, ext): "snappy": ["python-snappy"], "zstd": ["zstandard"], "aws": ["pymongo-auth-aws<2.0.0"], - "srv": ["dnspython>=1.16.0,<3.0.0"], + "srv": [], } # GSSAPI extras @@ -314,7 +314,7 @@ def build_extension(self, ext): author="The MongoDB Python Team", url="http://github.com/mongodb/mongo-python-driver", keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=[], + install_requires=["dnspython>=1.16.0,<3.0.0"], license="Apache License, Version 2.0", python_requires=">=3.7", classifiers=[ From eb028d0195ebfb21fa290688202251f275131709 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 16 Sep 2022 13:41:06 -0500 Subject: [PATCH 0767/2111] PYTHON-3420 Update ChangeStreamEvent type definition to include clusterTime (#1057) --- .../unified/change-streams-clusterTime.json | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 test/change_streams/unified/change-streams-clusterTime.json diff --git a/test/change_streams/unified/change-streams-clusterTime.json b/test/change_streams/unified/change-streams-clusterTime.json new file mode 100644 index 0000000000..55b4ae3fbc --- /dev/null +++ b/test/change_streams/unified/change-streams-clusterTime.json @@ -0,0 +1,82 @@ +{ + "description": "change-streams-clusterTime", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "clusterTime is present", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "ns": { + "db": "database0", + "coll": "collection0" + }, + "clusterTime": { + "$$exists": true + } + } + } + ] + } + ] +} From 0143881f0261a54a9d0ca99ad98ecc825dd89d56 Mon Sep 17 00:00:00 2001 From: Max Zhenzhera <59729293+maxzhenzhera@users.noreply.github.com> Date: Mon, 19 Sep 2022 23:19:14 +0300 Subject: [PATCH 0768/2111] PYTHON-3441 Add missing pool_ready method in monitoring docs example (#1060) --- pymongo/monitoring.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 90b8c1a3eb..c53e7e5727 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -125,6 +125,9 @@ class ConnectionPoolLogger(ConnectionPoolListener): def pool_created(self, event): logging.info("[pool {0.address}] pool created".format(event)) + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + def pool_cleared(self, event): logging.info("[pool {0.address}] pool cleared".format(event)) From 449cb8fb0fc596ce7d453aa3a48bad7f275d480e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 22 Sep 2022 15:14:40 -0500 Subject: [PATCH 0769/2111] PYTHON-2722 Improve performance of find/aggregate_raw_batches (#1047) --- bson/__init__.py | 80 +++++++++++++++++++----- bson/_cbsonmodule.c | 147 +++++++++++++++++++++++++++++++++++++++++--- bson/raw_bson.py | 47 ++++++++++---- pymongo/message.py | 9 ++- 4 files changed, 245 insertions(+), 38 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index dc2e29238a..c6a81d97ec 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -128,7 +128,6 @@ from array import array from mmap import mmap - try: from bson import _cbson # type: ignore[attr-defined] @@ -520,19 +519,32 @@ def _get_decimal128( if _USE_C: def _element_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions, + raw_array: bool = False, ) -> Any: - return _cbson._element_to_dict(data, position, obj_end, opts) + return _cbson._element_to_dict(data, position, obj_end, opts, raw_array) else: def _element_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions, + raw_array: bool = False, ) -> Any: """Decode a single key, value pair.""" element_type = data[position] position += 1 element_name, position = _get_c_string(data, view, position, opts) + if raw_array and element_type == ord(BSONARR): + _, end = _get_object_size(data, position, len(data)) + return element_name, view[position : end + 1], end + 1 try: value, position = _ELEMENT_GETTER[element_type]( data, view, position, obj_end, opts, element_name @@ -551,20 +563,30 @@ def _element_to_dict( _T = TypeVar("_T", bound=MutableMapping[Any, Any]) -def _raw_to_dict(data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T) -> _T: +def _raw_to_dict( + data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T, raw_array: bool = False +) -> _T: data, view = get_data_and_view(data) - return _elements_to_dict(data, view, position, obj_end, opts, result) + return _elements_to_dict(data, view, position, obj_end, opts, result, raw_array=raw_array) def _elements_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, result: Any = None + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions, + result: Any = None, + raw_array: bool = False, ) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() end = obj_end - 1 while position < end: - key, value, position = _element_to_dict(data, view, position, obj_end, opts) + key, value, position = _element_to_dict( + data, view, position, obj_end, opts, raw_array=raw_array + ) result[key] = value if position != obj_end: raise InvalidBSON("bad object or element length") @@ -1119,14 +1141,44 @@ def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[A return doc +def _array_of_documents_to_buffer(view: memoryview) -> bytes: + # Extract the raw bytes of each document. + position = 0 + _, end = _get_object_size(view, position, len(view)) + position += 4 + buffers: List[memoryview] = [] + append = buffers.append + while position < end - 1: + # Just skip the keys. + while view[position] != 0: + position += 1 + position += 1 + obj_size, _ = _get_object_size(view, position, end) + append(view[position : position + obj_size]) + position += obj_size + if position != end: + raise InvalidBSON("bad object or element length") + return b"".join(buffers) + + +if _USE_C: + _array_of_documents_to_buffer = _cbson._array_of_documents_to_buffer # noqa: F811 + + def _convert_raw_document_lists_to_streams(document: Any) -> None: + """Convert raw array of documents to a stream of BSON documents.""" cursor = document.get("cursor") - if cursor: - for key in ("firstBatch", "nextBatch"): - batch = cursor.get(key) - if batch: - stream = b"".join(doc.raw for doc in batch) - cursor[key] = [stream] + if not cursor: + return + for key in ("firstBatch", "nextBatch"): + batch = cursor.get(key) + if not batch: + continue + data = _array_of_documents_to_buffer(batch) + if data: + cursor[key] = [data] + else: + cursor[key] = [] def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) -> List[Any]: diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 019f049bb5..8678e8050b 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1615,7 +1615,7 @@ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned* position, unsigned char type, - unsigned max, const codec_options_t* options) { + unsigned max, const codec_options_t* options, int raw_array) { struct module_state *state = GETSTATE(self); PyObject* value = NULL; switch (type) { @@ -1712,11 +1712,20 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (size < BSON_MIN_SIZE || max < size) { goto invalid; } + end = *position + size - 1; /* Check for bad eoo */ if (buffer[end]) { goto invalid; } + + if (raw_array != 0) { + // Treat it as a binary buffer. + value = PyBytes_FromStringAndSize(buffer + *position, size); + *position += size; + break; + } + *position += 4; value = PyList_New(0); @@ -1740,7 +1749,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } to_append = get_value(self, name, buffer, position, bson_type, - max - (unsigned)key_size, options); + max - (unsigned)key_size, options, raw_array); Py_LeaveRecursiveCall(); if (!to_append) { Py_DECREF(value); @@ -2464,6 +2473,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, static int _element_to_dict(PyObject* self, const char* string, unsigned position, unsigned max, const codec_options_t* options, + int raw_array, PyObject** name, PyObject** value) { unsigned char type = (unsigned char)string[position++]; size_t name_length = strlen(string + position); @@ -2504,7 +2514,7 @@ static int _element_to_dict(PyObject* self, const char* string, } position += (unsigned)name_length + 1; *value = get_value(self, *name, string, &position, type, - max - position, options); + max - position, options, raw_array); if (!*value) { Py_DECREF(*name); return -1; @@ -2520,12 +2530,13 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { unsigned position; unsigned max; int new_position; + int raw_array = 0; PyObject* name; PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OIIO&", &bson, &position, &max, - convert_codec_options, &options)) { + if (!PyArg_ParseTuple(args, "OIIO&p", &bson, &position, &max, + convert_codec_options, &options, &raw_array)) { return NULL; } @@ -2535,8 +2546,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { } string = PyBytes_AS_STRING(bson); - new_position = _element_to_dict(self, string, position, max, &options, - &name, &value); + new_position = _element_to_dict(self, string, position, max, &options, raw_array, &name, &value); if (new_position < 0) { return NULL; } @@ -2560,13 +2570,14 @@ static PyObject* _elements_to_dict(PyObject* self, const char* string, if (!dict) { return NULL; } + int raw_array = 0; while (position < max) { PyObject* name = NULL; PyObject* value = NULL; int new_position; new_position = _element_to_dict( - self, string, position, max, options, &name, &value); + self, string, position, max, options, raw_array, &name, &value); if (new_position < 0) { Py_DECREF(dict); return NULL; @@ -2649,7 +2660,6 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { } string = (char*)view.buf; - memcpy(&size, string, 4); size = (int32_t)BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE) { @@ -2797,6 +2807,124 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { return result; } + +static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* args) { + uint32_t size; + uint32_t value_length; + uint32_t position = 0; + buffer_t buffer; + const char* string; + PyObject* arr; + PyObject* result = NULL; + Py_buffer view = {0}; + + if (!PyArg_ParseTuple(args, "O", &arr)) { + return NULL; + } + + if (!_get_buffer(arr, &view)) { + return NULL; + } + + buffer = pymongo_buffer_new(); + if (!buffer) { + PyBuffer_Release(&view); + return NULL; + } + + string = (char*)view.buf; + + if (view.len < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + } + goto done; + } + + memcpy(&size, string, 4); + size = BSON_UINT32_FROM_LE(size); + /* save space for length */ + if (pymongo_buffer_save_space(buffer, size) == -1) { + goto fail; + } + pymongo_buffer_update_position(buffer, 0); + + position += 4; + while (position < size - 1) { + // Verify the value is an object. + unsigned char type = (unsigned char)string[position]; + if (type != 3) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "array element was not an object"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + // Just skip the keys. + position = position + strlen(string + position) + 1; + + if (position >= size || (size - position) < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid array content"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&value_length, string + position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); + if (value_length < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid message size"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (view.len < size) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "objsize too large"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "bad eoo"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { + goto fail; + } + position += value_length; + } + + /* objectify buffer */ + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); + goto done; +fail: + result = NULL; +done: + PyBuffer_Release(&view); + pymongo_buffer_free(buffer); + return result; +} + + static PyMethodDef _CBSONMethods[] = { {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, "convert a dictionary to a string containing its BSON representation."}, @@ -2806,6 +2934,7 @@ static PyMethodDef _CBSONMethods[] = { "convert binary data to a sequence of documents."}, {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, + {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, {NULL, NULL, 0, NULL} }; diff --git a/bson/raw_bson.py b/bson/raw_bson.py index ca7207f0a2..6a80ea70ca 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -60,6 +60,23 @@ from bson.son import SON +def _inflate_bson( + bson_bytes: bytes, codec_options: CodecOptions, raw_array: bool = False +) -> Mapping[Any, Any]: + """Inflates the top level fields of a BSON document. + + :Parameters: + - `bson_bytes`: the BSON bytes that compose this document + - `codec_options`: An instance of + :class:`~bson.codec_options.CodecOptions` whose ``document_class`` + must be :class:`RawBSONDocument`. + """ + # Use SON to preserve ordering of elements. + return _raw_to_dict( + bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON(), raw_array=raw_array + ) + + class RawBSONDocument(Mapping[str, Any]): """Representation for a MongoDB document that provides access to the raw BSON bytes that compose it. @@ -111,7 +128,7 @@ class from the standard library so it can be used like a read-only # it refers to this class RawBSONDocument. if codec_options is None: codec_options = DEFAULT_RAW_BSON_OPTIONS - elif codec_options.document_class is not RawBSONDocument: + elif not issubclass(codec_options.document_class, RawBSONDocument): raise TypeError( "RawBSONDocument cannot use CodecOptions with document " "class %s" % (codec_options.document_class,) @@ -135,9 +152,13 @@ def __inflated(self) -> Mapping[str, Any]: # We already validated the object's size when this document was # created, so no need to do that again. # Use SON to preserve ordering of elements. - self.__inflated_doc = _inflate_bson(self.__raw, self.__codec_options) + self.__inflated_doc = self._inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc + @staticmethod + def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: + return _inflate_bson(bson_bytes, codec_options) + def __getitem__(self, item: str) -> Any: return self.__inflated[item] @@ -153,23 +174,23 @@ def __eq__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return "RawBSONDocument(%r, codec_options=%r)" % (self.raw, self.__codec_options) + return "%s(%r, codec_options=%r)" % ( + self.__class__.__name__, + self.raw, + self.__codec_options, + ) -def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: - """Inflates the top level fields of a BSON document. +class _RawArrayBSONDocument(RawBSONDocument): + """A RawBSONDocument that only expands sub-documents and arrays when accessed.""" - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options`: An instance of - :class:`~bson.codec_options.CodecOptions` whose ``document_class`` - must be :class:`RawBSONDocument`. - """ - # Use SON to preserve ordering of elements. - return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON()) + @staticmethod + def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: + return _inflate_bson(bson_bytes, codec_options, raw_array=True) DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) +_RAW_ARRAY_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=_RawArrayBSONDocument) """The default :class:`~bson.codec_options.CodecOptions` for :class:`RawBSONDocument`. """ diff --git a/pymongo/message.py b/pymongo/message.py index 8f37fdc062..960832cb9e 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -29,7 +29,12 @@ import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode from bson.int64 import Int64 -from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from bson.raw_bson import ( + _RAW_ARRAY_BSON_OPTIONS, + DEFAULT_RAW_BSON_OPTIONS, + RawBSONDocument, + _inflate_bson, +) from bson.son import SON try: @@ -1379,7 +1384,7 @@ def raw_response(self, cursor_id=None, user_fields={}): # noqa: B006 user_fields is used to determine which fields must not be decoded """ inflated_response = _decode_selective( - RawBSONDocument(self.payload_document), user_fields, DEFAULT_RAW_BSON_OPTIONS + RawBSONDocument(self.payload_document), user_fields, _RAW_ARRAY_BSON_OPTIONS ) return [inflated_response] From 2af12e64639b55c2d8c5d52892d42fab89355220 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 26 Sep 2022 15:48:48 -0700 Subject: [PATCH 0770/2111] PYTHON-3444 MyPy Errors With Version 0.981 (#1063) --- test/test_auth.py | 4 ++-- test/test_bson.py | 15 +++++---------- test/test_change_stream.py | 3 +-- test/test_database.py | 5 ++--- 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/test/test_auth.py b/test/test_auth.py index 20d53ef24b..69ed27bda0 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -329,8 +329,8 @@ def auth_string(user, password): bad_user = MongoClient(auth_string("not-user", SASL_PASS)) bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, "ping") # type: ignore[arg-type] - self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") # type: ignore[arg-type] + self.assertRaises(OperationFailure, bad_user.admin.command, "ping") + self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") class TestSCRAMSHA1(IntegrationTest): diff --git a/test/test_bson.py b/test/test_bson.py index e3c4a3a028..a8fd1fef45 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -119,8 +119,7 @@ def tzname(self, dt): class TestBSON(unittest.TestCase): def assertInvalid(self, data): - # Remove type ignore after: https://github.com/python/mypy/issues/13220 - self.assertRaises(InvalidBSON, decode, data) # type: ignore[arg-type] + self.assertRaises(InvalidBSON, decode, data) def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): @@ -1029,17 +1028,14 @@ def test_unicode_decode_error_handler(self): # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: - # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( InvalidBSON, - decode, # type: ignore[arg-type] + decode, invalid, CodecOptions(unicode_decode_error_handler="strict"), ) - self.assertRaises( - InvalidBSON, decode, invalid, CodecOptions() # type: ignore[arg-type] - ) - self.assertRaises(InvalidBSON, decode, invalid) # type: ignore[arg-type] + self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) + self.assertRaises(InvalidBSON, decode, invalid) # Test all other error handlers. for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]: @@ -1056,10 +1052,9 @@ def test_unicode_decode_error_handler(self): dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk")) self.assertEqual(dec, {"keystr": "foobar"}) - # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.assertRaises( InvalidBSON, - decode, # type: ignore[arg-type] + decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk"), ) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 18a0ec84c4..62d7abee62 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1080,9 +1080,8 @@ def setFailPoint(self, scenario_dict): fail_cmd = SON([("configureFailPoint", "failCommand")]) fail_cmd.update(fail_point) client_context.client.admin.command(fail_cmd) - # Remove type ignore after: https://github.com/python/mypy/issues/13220 self.addCleanup( - client_context.client.admin.command, # type: ignore[arg-type] + client_context.client.admin.command, "configureFailPoint", fail_cmd["configureFailPoint"], mode="off", diff --git a/test/test_database.py b/test/test_database.py index a1c0439089..d49ac8324f 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -604,14 +604,13 @@ def test_command_max_time_ms(self): try: db = self.client.pymongo_test db.command("count", "test") - # Remove type ignore after: https://github.com/python/mypy/issues/13220 - self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) # type: ignore[arg-type] + self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. db.command("aggregate", "test", pipeline=pipeline, cursor={}) self.assertRaises( ExecutionTimeout, - db.command, # type: ignore[arg-type] + db.command, "aggregate", "test", pipeline=pipeline, From c874c96e29e1b2e19b6bc456eb476e8512e22683 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 27 Sep 2022 15:31:20 -0700 Subject: [PATCH 0771/2111] PYTHON-3232 Improved change stream event visibility for C2C Replication (#1062) --- doc/changelog.rst | 4 + pymongo/aggregation.py | 2 + pymongo/change_stream.py | 7 + pymongo/collection.py | 8 +- pymongo/database.py | 6 + pymongo/mongo_client.py | 6 + .../change-streams-disambiguatedPaths.json | 252 +++++++++ .../change-streams-showExpandedEvents.json | 517 ++++++++++++++++++ test/utils.py | 2 + 9 files changed, 803 insertions(+), 1 deletion(-) create mode 100644 test/change_streams/unified/change-streams-disambiguatedPaths.json create mode 100644 test/change_streams/unified/change-streams-showExpandedEvents.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 24c80efa2e..b8f346e571 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -22,6 +22,10 @@ PyMongo 4.3 brings a number of improvements including: deadlocks are still possible because libraries that PyMongo depends like OpenSSL cannot be made fork() safe in multithreaded applications. (`PYTHON-2484`_). For more info see :ref:`pymongo-fork-safe`. +- When used with MongoDB 6.0+, :class:`~pymongo.change_stream.ChangeStream` s + now allow for new types of events (such as DDL and C2C replication events) + to be recorded with the new parameter ``show_expanded_events`` + that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. Bug fixes ......... diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index 62fe4bd055..a13f164f53 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -40,6 +40,7 @@ def __init__( user_fields=None, result_processor=None, comment=None, + show_expanded_events=None, ): if "explain" in options: raise ConfigurationError( @@ -60,6 +61,7 @@ def __init__( options["let"] = let if comment is not None: options["comment"] = comment + self._options = options # This is the batchSize that will be used for setting the initial diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 0edf513a3c..775f93c79a 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -109,6 +109,7 @@ def __init__( start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> None: if pipeline is None: pipeline = [] @@ -143,6 +144,7 @@ def __init__( self._comment = comment self._closed = False self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events # Initialize cursor. self._cursor = self._create_cursor() @@ -175,6 +177,10 @@ def _change_stream_options(self): if self._start_at_operation_time is not None: options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + return options def _command_options(self): @@ -230,6 +236,7 @@ def _run_aggregation_cmd(self, session, explicit_session): explicit_session, result_processor=self._process_result, comment=self._comment, + show_expanded_events=self._show_expanded_events, ) return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), session diff --git a/pymongo/collection.py b/pymongo/collection.py index 9a9ba56618..8f1afc575d 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2495,6 +2495,7 @@ def watch( start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> CollectionChangeStream[_DocumentType]: """Watch changes on this collection. @@ -2579,12 +2580,16 @@ def watch( This option and `resume_after` are mutually exclusive. - `comment` (optional): A user-provided comment to attach to this command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. + Added ``full_document_before_change`` parameter. .. versionchanged:: 4.1 Added ``comment`` parameter. @@ -2615,6 +2620,7 @@ def watch( start_after, comment, full_document_before_change, + show_expanded_events, ) @_csot.apply diff --git a/pymongo/database.py b/pymongo/database.py index 4f87a58dda..59328a1b53 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -547,6 +547,7 @@ def watch( start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> DatabaseChangeStream[_DocumentType]: """Watch changes on this database. @@ -624,10 +625,14 @@ def watch( This option and `resume_after` are mutually exclusive. - `comment` (optional): A user-provided comment to attach to this command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + .. versionchanged:: 4.2 Added ``full_document_before_change`` parameter. @@ -657,6 +662,7 @@ def watch( start_after, comment, full_document_before_change, + show_expanded_events=show_expanded_events, ) def _command( diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7e4e4f10ca..7d16e58777 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -895,6 +895,7 @@ def watch( start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, ) -> ChangeStream[_DocumentType]: """Watch changes on this cluster. @@ -972,10 +973,14 @@ def watch( This option and `resume_after` are mutually exclusive. - `comment` (optional): A user-provided comment to attach to this command. + - `show_expanded_events` (optional): Include expanded events such as DDL events like `dropIndexes`. :Returns: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + .. versionchanged:: 4.2 Added ``full_document_before_change`` parameter. @@ -1005,6 +1010,7 @@ def watch( start_after, comment, full_document_before_change, + show_expanded_events=show_expanded_events, ) @property diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json new file mode 100644 index 0000000000..91d8e66da2 --- /dev/null +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -0,0 +1,252 @@ +{ + "description": "disambiguatedPaths", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "6.1.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "disambiguatedPaths is not present when showExpandedEvents is false/unset", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "$$exists": false + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths is present on updateDescription when an ambiguous path is present", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.1": [ + "a", + "1" + ] + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths returns array indices as integers", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": [ + { + "1": 1 + } + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.0.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.0.1": [ + "a", + { + "$$type": "int" + }, + "1" + ] + } + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-showExpandedEvents.json b/test/change_streams/unified/change-streams-showExpandedEvents.json new file mode 100644 index 0000000000..3eed2f534a --- /dev/null +++ b/test/change_streams/unified/change-streams-showExpandedEvents.json @@ -0,0 +1,517 @@ +{ + "description": "change-streams-showExpandedEvents", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded-replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "shardedDb", + "client": "client0", + "databaseName": "shardedDb" + } + }, + { + "database": { + "id": "adminDb", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "shardedCollection", + "database": "shardedDb", + "collectionName": "shardedCollection" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "when provided, showExpandedEvents is sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when omitted, showExpandedEvents is not sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": { + "$$exists": false + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when showExpandedEvents is true, new fields on change stream events are handled appropriately", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "a": 1 + } + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "foo", + "dropTarget": true + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "collectionUUID": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "operationDescription": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "foo" + }, + "operationDescription": { + "dropTarget": { + "$$exists": true + }, + "to": { + "db": "database0", + "coll": "foo" + } + } + } + } + ] + }, + { + "description": "when showExpandedEvents is true, createIndex events are reported", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "operationType": { + "$ne": "create" + } + } + } + ], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, dropIndexes events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropIndex", + "object": "collection0", + "arguments": { + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "dropIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events on views are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, modify events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_2" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "command": { + "collMod": "collection0" + }, + "commandName": "collMod" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "modify" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, shardCollection events are reported", + "runOnRequirements": [ + { + "topologies": [ + "sharded-replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createChangeStream", + "object": "shardedCollection", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "adminDb", + "arguments": { + "command": { + "shardCollection": "shardedDb.shardedCollection", + "key": { + "_id": 1 + } + }, + "commandName": "shardCollection" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "shardCollection" + } + } + ] + } + ] +} diff --git a/test/utils.py b/test/utils.py index 1ac726d2d4..33a594d15a 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1127,6 +1127,8 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac arguments["index_or_name"] = arguments.pop(arg_name) elif opname == "rename" and arg_name == "to": arguments["new_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "dropTarget": + arguments["dropTarget"] = arguments.pop(arg_name) elif arg_name == "cursorType": cursor_type = arguments.pop(arg_name) if cursor_type == "tailable": From c9ac5a5cf8816893376912ca9d5fc024fbf28e03 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Sep 2022 16:00:15 -0700 Subject: [PATCH 0772/2111] PYTHON-3447 Add back empty tls extra to avoid pip warnings (#1065) --- .github/workflows/test-python.yml | 2 +- README.rst | 2 +- doc/installation.rst | 2 +- setup.py | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 6d5f26c503..d451197e4e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -60,7 +60,7 @@ jobs: - name: Install dependencies run: | python -m pip install -U pip mypy - pip install -e ".[zstd, srv, encryption, ocsp]" + pip install -e ".[zstd, encryption, ocsp]" - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo diff --git a/README.rst b/README.rst index 576facb5b5..f15ac48098 100644 --- a/README.rst +++ b/README.rst @@ -137,7 +137,7 @@ Client-Side Field Level Encryption requires `pymongocrypt You can install all dependencies automatically with the following command:: - $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption]" + $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" Additional dependencies are: diff --git a/doc/installation.rst b/doc/installation.rst index 4355f771eb..b02949335b 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -77,7 +77,7 @@ Wire protocol compression with zstandard requires `zstandard You can install all dependencies automatically with the following command:: - $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption]" + $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" Installing from source ---------------------- diff --git a/setup.py b/setup.py index 524c1303e6..2706facf90 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,8 @@ def build_extension(self, ext): "snappy": ["python-snappy"], "zstd": ["zstandard"], "aws": ["pymongo-auth-aws<2.0.0"], - "srv": [], + "srv": [], # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. + "tls": [], # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. } # GSSAPI extras From 64d7d6da8af8bf10a8d1a58482a610fe64507d38 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 9 Mar 2022 11:13:18 -0800 Subject: [PATCH 0773/2111] PYTHON-2970 Prioritize electionId over setVersion for stale primary check (#845) --- doc/changelog.rst | 3 + pymongo/topology_description.py | 29 ++-- .../rs/electionId_precedence_setVersion.json | 92 +++++++++++ .../rs/null_election_id.json | 30 ++-- .../rs/secondary_ignore_ok_0.json | 2 +- .../rs/set_version_can_rollback.json | 149 ++++++++++++++++++ ...tversion_equal_max_without_electionid.json | 84 ++++++++++ ...on_greaterthan_max_without_electionid.json | 84 ++++++++++ .../rs/setversion_without_electionid.json | 12 +- .../rs/use_setversion_without_electionid.json | 32 ++-- 10 files changed, 467 insertions(+), 50 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json create mode 100644 test/discovery_and_monitoring/rs/set_version_can_rollback.json create mode 100644 test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json create mode 100644 test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json diff --git a/doc/changelog.rst b/doc/changelog.rst index b8f346e571..eb9d1233bb 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -33,6 +33,8 @@ Bug fixes - Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` would allow an app to retry calling ``next()`` or ``try_next()`` even after non-resumable errors (`PYTHON-3389`_). +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). Issues Resolved ............... @@ -42,6 +44,7 @@ in this release. .. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 .. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 .. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 .. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b32a86e2d7..552d8f719a 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -17,6 +17,7 @@ from random import sample from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError @@ -532,24 +533,16 @@ def _update_rs_from_primary( sds.pop(server_description.address) return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - max_election_tuple = max_set_version, max_election_id - if None not in server_description.election_tuple: - if ( - None not in max_election_tuple - and max_election_tuple > server_description.election_tuple - ): - - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - max_election_id = server_description.election_id - - if server_description.set_version is not None and ( - max_set_version is None or server_description.set_version > max_set_version - ): - - max_set_version = server_description.set_version + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe >= max_election_safe: + max_election_id, max_set_version = new_election_tuple + else: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id # We've heard from the primary. Is it the same primary as before? for server in sds.values(): diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..a7b49e2b97 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 62120e8448..8eb519595a 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -123,16 +123,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -174,16 +177,19 @@ "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index 4c1cb011a5..ee9519930b 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "New primary", + "description": "Secondary ignored when ok is zero", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..28ecbeefca --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,149 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "_comment": "Response from new primary with newer election Id", + "responses": [ + [ + "b:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "_comment": "Response from stale primary", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "hello": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..91e84d4fa0 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..b15fd5c1a7 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 2f68287f1d..f59c162ae1 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion is ignored if there is no electionId", + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -63,14 +63,14 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, "electionId": null }, "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, + "type": "Unknown", + "setName": null, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 421ff57c8d..6dd753d5d8 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -71,20 +71,23 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -115,22 +118,25 @@ "outcome": { "servers": { "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { "type": "Unknown", "setName": null, "electionId": null - }, - "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 2 } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { - "$oid": "000000000000000000000001" + "$oid": "000000000000000000000002" } } } From 85f0987e1d6609ea71d5d6840add033608d22877 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 29 Sep 2022 14:31:47 -0700 Subject: [PATCH 0774/2111] PYTHON-3400 Only use new electionId/setVersion logic on 6.0+ --- pymongo/topology_description.py | 38 +++- .../rs/electionId_precedence_setVersion.json | 6 +- .../rs/null_election_id-pre-6.0.json | 203 ++++++++++++++++++ .../rs/null_election_id.json | 8 +- .../rs/secondary_ignore_ok_0-pre-6.0.json | 83 +++++++ .../rs/set_version_can_rollback.json | 14 +- ...tversion_equal_max_without_electionid.json | 4 +- ...on_greaterthan_max_without_electionid.json | 4 +- ...setversion_without_electionid-pre-6.0.json | 84 ++++++++ .../rs/setversion_without_electionid.json | 4 +- ...setversion_without_electionid-pre-6.0.json | 138 ++++++++++++ .../rs/use_setversion_without_electionid.json | 6 +- .../rediscover-quickly-after-step-down.json | 2 +- test/utils.py | 5 +- 14 files changed, 559 insertions(+), 40 deletions(-) create mode 100644 test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json create mode 100644 test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json create mode 100644 test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json create mode 100644 test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 552d8f719a..df11a6ec75 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -531,19 +531,35 @@ def _update_rs_from_primary( # We found a primary but it doesn't have the replica_set_name # provided by the user. sds.pop(server_description.address) - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - new_election_tuple = server_description.election_id, server_description.set_version - max_election_tuple = max_election_id, max_set_version - new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) - max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) - if new_election_safe >= max_election_safe: - max_election_id, max_set_version = new_election_tuple - else: - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple = server_description.set_version, server_description.election_id + max_election_tuple = max_set_version, max_election_id + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version + # We've heard from the primary. Is it the same primary as before? for server in sds.values(): if ( diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json index a7b49e2b97..2fcea2bf66 100644 --- a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ], [ @@ -58,7 +58,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json new file mode 100644 index 0000000000..f1fa2e252e --- /dev/null +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -0,0 +1,203 @@ +{ + "description": "Pre 6.0 Primaries with and without electionIds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 8eb519595a..8a99a78475 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -18,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -66,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -116,7 +116,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -170,7 +170,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json new file mode 100644 index 0000000000..054425c84c --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json @@ -0,0 +1,83 @@ +{ + "description": "Pre 6.0 New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json index 28ecbeefca..1cc608a344 100644 --- a/test/discovery_and_monitoring/rs/set_version_can_rollback.json +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "hello": true, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -50,13 +50,12 @@ } }, { - "_comment": "Response from new primary with newer election Id", "responses": [ [ "b:27017", { "ok": 1, - "hello": true, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -68,7 +67,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -98,13 +97,12 @@ } }, { - "_comment": "Response from stale primary", "responses": [ [ "a:27017", { "ok": 1, - "hello": true, + "helloOk": true, "isWritablePrimary": true, "hosts": [ "a:27017", @@ -116,7 +114,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json index 91e84d4fa0..3669511c5a 100644 --- a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json index b15fd5c1a7..97870d71d5 100644 --- a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..c2e2fe5b9b --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -0,0 +1,84 @@ +{ + "description": "Pre 6.0 setVersion is ignored if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index f59c162ae1..256fafe108 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..5c58b65614 --- /dev/null +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -0,0 +1,138 @@ +{ + "description": "Pre 6.0 Record max setVersion, even from primary without electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 6 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 6dd753d5d8..551f3e12c2 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -64,7 +64,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -111,7 +111,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json index 0ad575cc9d..c7c2494857 100644 --- a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -117,7 +117,7 @@ "replSetFreeze": 0 }, "readPreference": { - "mode": "Secondary" + "mode": "secondary" }, "commandName": "replSetFreeze" } diff --git a/test/utils.py b/test/utils.py index 33a594d15a..6f35b48538 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1004,10 +1004,7 @@ def assertion_context(msg): try: yield except AssertionError as exc: - msg = "%s (%s)" % (exc, msg) - exc_type, exc_val, exc_tb = sys.exc_info() - assert exc_type is not None - raise exc_type(exc_val).with_traceback(exc_tb) + raise AssertionError(f"{msg}: {exc}") def parse_spec_options(opts): From 774154e934509dd0c7d854f6491304a295845dd6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Oct 2022 14:34:59 -0700 Subject: [PATCH 0775/2111] PYTHON-3451 Stop passing bytes to getaddrinfo to fix eventlet support (#1066) --- pymongo/pool.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 88f56b16e5..6355692ac9 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -979,11 +979,9 @@ def _create_connection(address, options): This is a modified version of create_connection from CPython >= 2.7. """ host, port = address - # Avoid the getaddrinfo importlib deadlock on fork() described in PYTHON-3406. - host = host.encode("idna") # Check if dealing with a unix domain socket - if host.endswith(b".sock"): + if host.endswith(".sock"): if not hasattr(socket, "AF_UNIX"): raise ConnectionFailure("UNIX-sockets are not supported on this system") sock = socket.socket(socket.AF_UNIX) @@ -1000,7 +998,7 @@ def _create_connection(address, options): # is 'localhost' (::1 is fine). Avoids slow connect issues # like PYTHON-356. family = socket.AF_INET - if socket.has_ipv6 and host != b"localhost": + if socket.has_ipv6 and host != "localhost": family = socket.AF_UNSPEC err = None From eaf0e6d84f41136ce32f1aa61a8cb48b5895cfaa Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 7 Oct 2022 12:05:20 -0700 Subject: [PATCH 0776/2111] PYTHON-3445 Improve documentation for custom readPreference tags (#1068) --- README.rst | 2 +- RELEASE.rst | 2 +- pymongo/read_preferences.py | 13 ++++++++++++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index f15ac48098..115085ac13 100644 --- a/README.rst +++ b/README.rst @@ -25,7 +25,7 @@ For issues with, questions about, or feedback for PyMongo, please look into our `support channels `_. Please do not email any of the PyMongo developers directly with issues or questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. +Forums `_. Bugs / Feature Requests ======================= diff --git a/RELEASE.rst b/RELEASE.rst index ad18446a0f..83c6c0f1d4 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -84,7 +84,7 @@ Doing a Release 13. Publish the release version in Jira. 14. Announce the release on: - https://developer.mongodb.com/community/forums/c/community/release-notes/ + https://www.mongodb.com/community/forums/c/announcements/driver-releases/110 15. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index ccb635bec0..46f029ed31 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -151,6 +151,13 @@ def tag_sets(self) -> _TagSets: set, ``{}``, means "read from any member that matches the mode, ignoring tags." MongoClient tries each set of tags in turn until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) .. seealso:: `Data-Center Awareness `_ @@ -518,7 +525,11 @@ def make_read_preference( class ReadPreference(object): - """An enum that defines the read preference modes supported by PyMongo. + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) See :doc:`/examples/high_availability` for code examples. From 8abeb882b4ebd6f72ba2cf032ac545d70407a3b7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Oct 2022 12:48:42 -0700 Subject: [PATCH 0777/2111] PYTHON-3452 Skip SDAM test that relies on retryWrites on MMAPv1 (#1071) --- test/unified_format.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/unified_format.py b/test/unified_format.py index 3f51c335eb..005e91f6b6 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -936,6 +936,7 @@ def maybe_skip_test(self, spec): if ( "Dirty explicit session is discarded" in spec["description"] or "Dirty implicit session is discarded" in spec["description"] + or "Cancel server check" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") if "Client side error in command starting transaction" in spec["description"]: From 24a343b830e7382576691712873893902159ee4f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 11 Oct 2022 14:14:59 -0500 Subject: [PATCH 0778/2111] PYTHON-3468 Test failures in test_srv_polling.TestSrvPolling (#1073) --- test/test_srv_polling.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 0b54171dc9..7a6c61ad21 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -130,7 +130,14 @@ def assert_nodelist_nochange(self, expected_nodelist, client): (WAIT_TIME * 10) seconds. Also check that the resolver is called at least once. """ - sleep(WAIT_TIME * 10) + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return False + + wait_until(predicate, "Node list equals expected nodelist", timeout=100 * WAIT_TIME) + nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): msg = "Client nodelist %s changed unexpectedly (expected %s)" From 775c0203ca0df215b19b6d2e352e128dea14582d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 11 Oct 2022 14:16:08 -0500 Subject: [PATCH 0779/2111] PYTHON-3453 Test failure - Enterprise Auth Windows 64 Python 3.7 (#1072) --- .evergreen/config.yml | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9d016f4d8a..3047ab475c 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -493,8 +493,27 @@ functions: silent: true working_dir: "src" script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} SASL_HOST=${sasl_host} SASL_PORT=${sasl_port} SASL_USER=${sasl_user} SASL_PASS=${sasl_pass} SASL_DB=${sasl_db} PRINCIPAL=${principal} GSSAPI_DB=${gssapi_db} KEYTAB_BASE64=${keytab_base64} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh + cat < prepare_enterprise_auth.sh + export SASL_HOST=${sasl_host} + export SASL_PORT=${sasl_port} + export SASL_USER=${sasl_user} + export SASL_PASS=${sasl_pass} + export SASL_DB=${sasl_db} + export PRINCIPAL=${principal} + export GSSAPI_DB=${gssapi_db} + export KEYTAB_BASE64=${keytab_base64} + EOT + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + # Disable xtrace (just in case it was accidentally set). + set +x + . ./prepare_enterprise_auth.sh + rm -f ./prepare_enterprise_auth.sh + + PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh "run atlas tests": - command: shell.exec From 4a5e0f6655c2ecf0e807a9614dd2b166c0e4e4f2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Oct 2022 10:21:06 -0500 Subject: [PATCH 0780/2111] PYTHON-3313 Cache AWS Credentials Where Possible (#982) --- .evergreen/run-mongodb-aws-ecs-test.sh | 2 +- .evergreen/run-mongodb-aws-test.sh | 2 +- doc/changelog.rst | 3 ++ pymongo/auth_aws.py | 17 ++++++++ test/auth_aws/test_auth_aws.py | 58 ++++++++++++++++++++++++++ 5 files changed, 80 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 83f3975e9e..fcadea208c 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -30,7 +30,7 @@ authtest () { $PYTHON -m pip install --upgrade wheel setuptools pip cd src $PYTHON -m pip install '.[aws]' - $PYTHON test/auth_aws/test_auth_aws.py + $PYTHON test/auth_aws/test_auth_aws.py -v cd - } diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh index 9a33507cc8..b2a4fd146a 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/run-mongodb-aws-test.sh @@ -61,7 +61,7 @@ authtest () { . venvaws/bin/activate fi python -m pip install '.[aws]' - python test/auth_aws/test_auth_aws.py + python test/auth_aws/test_auth_aws.py -v deactivate rm -rf venvaws } diff --git a/doc/changelog.rst b/doc/changelog.rst index eb9d1233bb..c11ac95888 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -26,6 +26,9 @@ PyMongo 4.3 brings a number of improvements including: now allow for new types of events (such as DDL and C2C replication events) to be recorded with the new parameter ``show_expanded_events`` that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. +- PyMongo now internally caches AWS credentials that it fetches from AWS + endpoints, to avoid rate limitations. The cache is cleared when the + credentials expire or an error is encountered. Bug fixes ......... diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index 4b2af35ea4..e84465ea66 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -27,6 +27,17 @@ def __init__(self, credentials): _HAVE_MONGODB_AWS = False +try: + from pymongo_auth_aws.auth import set_cached_credentials, set_use_cached_credentials + + # Enable credential caching. + set_use_cached_credentials(True) +except ImportError: + + def set_cached_credentials(creds): + pass + + import bson from bson.binary import Binary from bson.son import SON @@ -88,7 +99,13 @@ def _authenticate_aws(credentials, sock_info): # SASL complete. break except PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) # Convert to OperationFailure and include pymongo-auth-aws version. raise OperationFailure( "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) ) + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index a63e60718c..372806bd24 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -20,6 +20,8 @@ sys.path[0:0] = [""] +from pymongo_auth_aws import AwsCredential, auth + from pymongo import MongoClient from pymongo.errors import OperationFailure from pymongo.uri_parser import parse_uri @@ -53,6 +55,62 @@ def test_connect_uri(self): with MongoClient(self.uri) as client: client.get_database().test.find_one() + def setup_cache(self): + if os.environ.get("AWS_ACCESS_KEY_ID", None) or "@" in self.uri: + self.skipTest("Not testing cached credentials") + if not hasattr(auth, "set_cached_credentials"): + self.skipTest("Cached credentials not available") + + # Ensure cleared credentials. + auth.set_cached_credentials(None) + self.assertEqual(auth.get_cached_credentials(), None) + + client = MongoClient(self.uri) + client.get_database().test.find_one() + client.close() + return auth.get_cached_credentials() + + def test_cache_credentials(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + + def test_cache_about_to_expire(self): + creds = self.setup_cache() + client = MongoClient(self.uri) + self.addCleanup(client.close) + + # Make the creds about to expire. + creds = auth.get_cached_credentials() + assert creds is not None + + creds = AwsCredential(creds.username, creds.password, creds.token, lambda x: True) + auth.set_cached_credentials(creds) + + client.get_database().test.find_one() + new_creds = auth.get_cached_credentials() + self.assertNotEqual(creds, new_creds) + + def test_poisoned_cache(self): + creds = self.setup_cache() + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + # Poison the creds with invalid password. + assert creds is not None + creds = AwsCredential("a" * 24, "b" * 24, "c" * 24) + auth.set_cached_credentials(creds) + + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + # Make sure the cache was cleared. + self.assertEqual(auth.get_cached_credentials(), None) + + # The next attempt should generate a new cred and succeed. + client.get_database().test.find_one() + self.assertNotEqual(auth.get_cached_credentials(), None) + class TestAWSLambdaExamples(unittest.TestCase): def test_shared_client(self): From 438539eaa2db753d6f5ebee3cf3cba3a6d530e21 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 12 Oct 2022 13:59:51 -0700 Subject: [PATCH 0781/2111] PYTHON-3445 Improve documentation for with_options (#1074) --- pymongo/database.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 59328a1b53..d28578b4dd 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -162,11 +162,11 @@ def with_options( >>> db1.read_preference Primary() >>> from pymongo import ReadPreference - >>> db2 = db1.with_options(read_preference=ReadPreference.SECONDARY) + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) >>> db1.read_preference Primary() >>> db2.read_preference - Secondary(tag_sets=None) + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) :Parameters: - `codec_options` (optional): An instance of From 4e11bdaa3e27c2130db004a58751feb43fe9fa14 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 12 Oct 2022 15:30:37 -0700 Subject: [PATCH 0782/2111] PYTHON-3445 Fix documentation for with_options (#1075) --- pymongo/database.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/database.py b/pymongo/database.py index d28578b4dd..259c22d558 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -161,7 +161,7 @@ def with_options( >>> db1.read_preference Primary() - >>> from pymongo import ReadPreference + >>> from pymongo.read_preferences import Secondary >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) >>> db1.read_preference Primary() From df77653ccc6b1bbfa18f8fca41cd4a1bfecc25c6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 06:09:23 -0500 Subject: [PATCH 0783/2111] PYTHON-3347 Test against Python 3.11 prerelease (#1069) --- .evergreen/build-mac.sh | 1 + .evergreen/build-manylinux-internal.sh | 2 +- .evergreen/build-manylinux.sh | 3 ++- .evergreen/build-windows.sh | 2 +- .evergreen/config.yml | 12 ++++++++++++ bson/codec_options.py | 2 +- pymongo/topology_description.py | 2 +- setup.py | 1 + test/test_client.py | 3 ++- 9 files changed, 22 insertions(+), 6 deletions(-) diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 270c92b59a..60846ae92a 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -9,6 +9,7 @@ mkdir -p validdist mv dist/* validdist || true VERSION=${VERSION:-3.10} + PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 4fd43a67a3..7c3747f4e2 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -11,7 +11,7 @@ mv dist/* validdist || true # Compile wheels for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310) ]]; then + if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310|cp311) ]]; then continue fi # https://github.com/pypa/manylinux/issues/49 diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index cac435fb11..871151a5f3 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -37,7 +37,8 @@ unexpected=$(find dist \! \( -iname dist -or \ -iname '*cp37*' -or \ -iname '*cp38*' -or \ -iname '*cp39*' -or \ - -iname '*cp310*' \)) + -iname '*cp310*' -or \ + -iname '*cp311*' \)) if [ -n "$unexpected" ]; then echo "Unexpected files:" $unexpected exit 1 diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index 09f5e7f0b4..aeb16892b1 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -8,7 +8,7 @@ rm -rf validdist mkdir -p validdist mv dist/* validdist || true -for VERSION in 37 38 39 310; do +for VERSION in 37 38 39 310 311; do _pythons=("C:/Python/Python${VERSION}/python.exe" \ "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3047ab475c..d824b68f5c 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2080,6 +2080,10 @@ axes: display_name: "Python 3.10" variables: PYTHON_BINARY: "/opt/python/3.10/bin/python3" + - id: "3.11" + display_name: "Python 3.11" + variables: + PYTHON_BINARY: "/opt/python/3.11/bin/python3" - id: "pypy3.7" display_name: "PyPy 3.7" variables: @@ -2116,6 +2120,10 @@ axes: display_name: "Python 3.10" variables: PYTHON_BINARY: "C:/python/Python310/python.exe" + - id: "3.11" + display_name: "Python 3.11" + variables: + PYTHON_BINARY: "C:/python/Python311/python.exe" - id: python-version-windows-32 display_name: "Python" @@ -2136,6 +2144,10 @@ axes: display_name: "32-bit Python 3.10" variables: PYTHON_BINARY: "C:/python/32/Python310/python.exe" + - id: "3.11" + display_name: "32-bit Python 3.11" + variables: + PYTHON_BINARY: "C:/python/32/Python311/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version diff --git a/bson/codec_options.py b/bson/codec_options.py index efba8af78d..3c0a976a1b 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -199,7 +199,7 @@ def __eq__(self, other: Any) -> Any: ) -class DatetimeConversion(enum.IntEnum): +class DatetimeConversion(int, enum.Enum): """Options for decoding BSON datetimes.""" DATETIME = 1 diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index df11a6ec75..7503a72704 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -495,7 +495,7 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): new_hosts = set(seedlist) - set(sds.keys()) n_to_add = topology_description.srv_max_hosts - len(sds) if n_to_add > 0: - seedlist = sample(new_hosts, min(n_to_add, len(new_hosts))) + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) else: seedlist = [] # Add SDs corresponding to servers recently added to the SRV record. diff --git a/setup.py b/setup.py index 2706facf90..d895bf7dec 100755 --- a/setup.py +++ b/setup.py @@ -331,6 +331,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database", diff --git a/test/test_client.py b/test/test_client.py index 7e7e14c0e5..5bb116dbda 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1641,7 +1641,8 @@ def server_description_count(): # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: # AssertionError: 19 != 46 within 15 delta (27 difference) - self.assertAlmostEqual(initial_count, final_count, delta=15) + # On Python 3.11 we seem to get more of a delta. + self.assertAlmostEqual(initial_count, final_count, delta=20) @client_context.require_failCommand_fail_point def test_network_error_message(self): From f3fc409e2476684d8efec84a15717c4467bf466a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 16:44:43 -0500 Subject: [PATCH 0784/2111] PYTHON-3474 Document changes to AWS Credential Handling (#1077) --- doc/changelog.rst | 4 ++++ doc/examples/authentication.rst | 19 +++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c11ac95888..279c535180 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -29,6 +29,10 @@ PyMongo 4.3 brings a number of improvements including: - PyMongo now internally caches AWS credentials that it fetches from AWS endpoints, to avoid rate limitations. The cache is cleared when the credentials expire or an error is encountered. +- When using the ``MONGODB-AWS`` authentication mechanism with the + ``aws`` extra, the behavior of credential fetching has changed with + ``pymongo_auth_aws>=1.1.0``. Please see :doc:`examples/authentication` for + more information. Bug fixes ......... diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index db2dbd3d1f..9512b23e4b 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -264,16 +264,23 @@ security (or session) token. Credentials can be configured through the MongoDB URI, environment variables, or the local EC2 or ECS endpoint. The order in which the client searches for -credentials is: - -#. Credentials passed through the URI -#. Environment variables -#. ECS endpoint if and only if ``AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`` is set. -#. EC2 endpoint +`credentials`_ is the same as the one used by the AWS ``boto3`` library +when using ``pymongo_auth_aws>=1.1.0``. + +Because we are now using ``boto3`` to handle credentials, the order and +locations of credentials are slightly different from previous versions. +Particularly, if you have a shared AWS credentials or config file, +then those credentials will be used by default if AWS auth environment +variables are not set. To override this behavior, set ``AWS_PROFILE=""`` in +your shell or add ``os.environ["AWS_PROFILE"] = ""`` to your script or +application. Alternatively, you can create an AWS profile specifically for +your MongoDB credentials and set ``AWS_PROFILE`` to that profile name. MONGODB-AWS authenticates against the "$external" virtual database, so none of the URIs in this section need to include the ``authSource`` URI option. +.. _credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + AWS IAM credentials ~~~~~~~~~~~~~~~~~~~ From f79b90992e0f764ddec55ffa7748e8a81b236abb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 17:01:52 -0500 Subject: [PATCH 0785/2111] PYTHON-3453 Fix handling of enterprise auth vars (#1076) --- .evergreen/config.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d824b68f5c..8b37663878 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -494,14 +494,14 @@ functions: working_dir: "src" script: | cat < prepare_enterprise_auth.sh - export SASL_HOST=${sasl_host} - export SASL_PORT=${sasl_port} - export SASL_USER=${sasl_user} - export SASL_PASS=${sasl_pass} - export SASL_DB=${sasl_db} - export PRINCIPAL=${principal} - export GSSAPI_DB=${gssapi_db} - export KEYTAB_BASE64=${keytab_base64} + export SASL_HOST='${sasl_host}' + export SASL_PORT='${sasl_port}' + export SASL_USER='${sasl_user}' + export SASL_PASS='${sasl_pass}' + export SASL_DB='${sasl_db}' + export PRINCIPAL='${principal}' + export GSSAPI_DB='${gssapi_db}' + export KEYTAB_BASE64='${keytab_base64}' EOT - command: shell.exec type: test From 3eb316ed3042d7b9690afbf04acb86b318b82658 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Oct 2022 17:06:55 -0500 Subject: [PATCH 0786/2111] BUMP 4.3.0 --- pymongo/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 257c1dbac1..1b5d8abfce 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 1, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 0) def get_version_string() -> str: From 1c9193f226c02d3396e5658ce0e89c254c030ed1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Oct 2022 09:46:25 -0500 Subject: [PATCH 0787/2111] BUMP 4.3.1 --- pymongo/__init__.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 1b5d8abfce..a1624a49b0 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -84,7 +84,7 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) def get_version_string() -> str: diff --git a/setup.py b/setup.py index d895bf7dec..6b23775664 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ except ImportError: _HAVE_SPHINX = False -version = "4.2.1.dev0" +version = "4.3.1" f = open("README.rst") try: From 942e28170ade3fa86950c06a22384e1ae781f7a8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Oct 2022 14:49:02 -0500 Subject: [PATCH 0788/2111] PYTHON-1889 Single-source the version tuple/string (#1079) --- RELEASE.rst | 27 ++++++++++++--------------- pymongo/__init__.py | 17 ++--------------- pymongo/_version.py | 28 ++++++++++++++++++++++++++++ setup.py | 5 ++++- 4 files changed, 46 insertions(+), 31 deletions(-) create mode 100644 pymongo/_version.py diff --git a/RELEASE.rst b/RELEASE.rst index 83c6c0f1d4..4150126f22 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -43,18 +43,15 @@ Doing a Release 3. Add release notes to doc/changelog.rst. Generally just summarize/clarify the git log, but you might add some more long form notes for big changes. -4. Search and replace the "devN" version number w/ the new version number (see - note above in `Versioning`_). +4. Make sure version number is updated in ``pymongo/_version.py`` -5. Make sure version number is updated in setup.py and pymongo/__init__.py +5. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. -6. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. +6. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m 'BUMP 3.11.0' ``. -7. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m '3.11.0' ``. +7. Push commit / tag, eg ``git push && git push --tags``. -8. Push commit / tag, eg ``git push && git push --tags``. - -9. Pushing a tag will trigger a release process in Evergreen which builds +8. Pushing a tag will trigger a release process in Evergreen which builds wheels for manylinux, macOS, and Windows. Wait for the "release-combine" task to complete and then download the "Release files all" archive. See: https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release @@ -70,27 +67,27 @@ Doing a Release ... pymongo-.tar.gz -10. Upload all the release packages to PyPI with twine:: +9. Upload all the release packages to PyPI with twine:: $ python3 -m twine upload path/to/archive/* -11. Make sure the new version appears on https://pymongo.readthedocs.io/. If the +10. Make sure the new version appears on https://pymongo.readthedocs.io/. If the new version does not show up automatically, trigger a rebuild of "latest": https://readthedocs.org/projects/pymongo/builds/ -12. Bump the version number to .dev0 in setup.py/__init__.py, +11. Bump the version number to .dev0 in ``pymongo/_version.py``, commit, push. -13. Publish the release version in Jira. +12. Publish the release version in Jira. -14. Announce the release on: +13. Announce the release on: https://www.mongodb.com/community/forums/c/announcements/driver-releases/110 -15. File a ticket for DOCSP highlighting changes in server version and Python +14. File a ticket for DOCSP highlighting changes in server version and Python version compatibility or the lack thereof, for example: https://jira.mongodb.org/browse/DOCSP-13536 -16. Create a GitHub Release for the tag using +15. Create a GitHub Release for the tag using https://github.com/mongodb/mongo-python-driver/releases/new. The title should be "PyMongo X.Y.Z", and the description should contain a link to the release notes on the the community forum, e.g. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index a1624a49b0..6394e8250e 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -14,7 +14,7 @@ """Python driver for MongoDB.""" -from typing import ContextManager, Optional, Tuple, Union +from typing import ContextManager, Optional __all__ = [ "ASCENDING", @@ -84,21 +84,8 @@ .. _text index: http://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) - - -def get_version_string() -> str: - if isinstance(version_tuple[-1], str): - return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] - return ".".join(map(str, version_tuple)) - - -__version__: str = get_version_string() -version = __version__ - -"""Current version of PyMongo.""" - from pymongo import _csot +from pymongo._version import __version__, get_version_string, version, version_tuple from pymongo.collection import ReturnDocument from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType diff --git a/pymongo/_version.py b/pymongo/_version.py new file mode 100644 index 0000000000..99b25d7dcd --- /dev/null +++ b/pymongo/_version.py @@ -0,0 +1,28 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Current version of PyMongo.""" +from typing import Tuple, Union + +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) + + +def get_version_string() -> str: + if isinstance(version_tuple[-1], str): + return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] + return ".".join(map(str, version_tuple)) + + +__version__: str = get_version_string() +version = __version__ diff --git a/setup.py b/setup.py index 6b23775664..52892e8507 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,10 @@ except ImportError: _HAVE_SPHINX = False -version = "4.3.1" +version_ns = {} +with open("pymongo/_version.py") as fp: + exec(fp.read(), version_ns) +version = version_ns["__version__"] f = open("README.rst") try: From 5dec36195a73c4dc841388576a8e4b56a39c2cb2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Oct 2022 19:12:41 -0500 Subject: [PATCH 0789/2111] PYTHON-3474 Improve documentation about credential handling (#1080) --- doc/changelog.rst | 7 +++++-- doc/examples/authentication.rst | 9 +++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 279c535180..4688a8fb65 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,11 @@ Changelog ========= -Changes in Version 4.3 ----------------------- +Changes in Version 4.3 (4.3.2) +------------------------------ + +Note: We withheld uploading tags 4.3.0 and 4.3.1 to PyPI due to a +version handling error and a necessary documentation update. `dnspython `_ is now a required dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 9512b23e4b..862ac40db2 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -268,11 +268,12 @@ or the local EC2 or ECS endpoint. The order in which the client searches for when using ``pymongo_auth_aws>=1.1.0``. Because we are now using ``boto3`` to handle credentials, the order and -locations of credentials are slightly different from previous versions. -Particularly, if you have a shared AWS credentials or config file, +locations of credentials are slightly different from before. Particularly, +if you have a shared AWS credentials or config file, then those credentials will be used by default if AWS auth environment -variables are not set. To override this behavior, set ``AWS_PROFILE=""`` in -your shell or add ``os.environ["AWS_PROFILE"] = ""`` to your script or +variables are not set. To override this behavior, set +``AWS_SHARED_CREDENTIALS_FILE=""`` in your shell or add +``os.environ["AWS_SHARED_CREDENTIALS_FILE"] = ""`` to your script or application. Alternatively, you can create an AWS profile specifically for your MongoDB credentials and set ``AWS_PROFILE`` to that profile name. From bed75044e8116abdbf4e5610db2bf7760ff07566 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Oct 2022 09:09:45 -0500 Subject: [PATCH 0790/2111] BUMP 4.3.2 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 99b25d7dcd..2df9b484d9 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 1) +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 2) def get_version_string() -> str: From 520b26fba346efefc7d38adb02e255146db7ea79 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Oct 2022 09:50:46 -0500 Subject: [PATCH 0791/2111] BUMP 4.4.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 2df9b484d9..7331d6ff25 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 2) +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, '.dev0') def get_version_string() -> str: From 1d117c1f39983ec88376816c7e3fd73fb140b863 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 19 Oct 2022 12:09:58 -0700 Subject: [PATCH 0792/2111] Fix pre-commit for _version.py --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 7331d6ff25..78c325a23c 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, '.dev0') +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") def get_version_string() -> str: From 614e22c46c57deca443a6ebfa2123d5ce383ef8f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 19 Oct 2022 15:14:46 -0700 Subject: [PATCH 0793/2111] PYTHON-3478 Improve test_change_stream_can_resume_after_timeouts (#1083) --- test/test_csot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_csot.py b/test/test_csot.py index a9cf7a0124..c2a62aa7f2 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -76,10 +76,10 @@ def test_timeout_nested(self): @client_context.require_change_streams def test_change_stream_can_resume_after_timeouts(self): coll = self.db.test - with coll.watch(max_await_time_ms=150) as stream: + with coll.watch() as stream: with pymongo.timeout(0.1): with self.assertRaises(PyMongoError) as ctx: - stream.try_next() + stream.next() self.assertTrue(ctx.exception.timeout) self.assertTrue(stream.alive) with self.assertRaises(PyMongoError) as ctx: From 3d3ffaf6f3db3cdba3883ca34ba0d97cc376c49c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 20 Oct 2022 13:06:34 -0700 Subject: [PATCH 0794/2111] PYTHON-3410 Resync CSOT spec tests to be less flaky (#1087) --- test/csot/bulkWrite.json | 25 ++--- test/csot/retryability-legacy-timeouts.json | 102 ++++++++++---------- test/csot/retryability-timeoutMS.json | 50 +++++----- test/unified_format.py | 2 - 4 files changed, 89 insertions(+), 90 deletions(-) diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json index 14d5b654f6..9a05809f77 100644 --- a/test/csot/bulkWrite.json +++ b/test/csot/bulkWrite.json @@ -19,7 +19,10 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent" - ] + ], + "uriOptions": { + "w": 1 + } } }, { @@ -48,6 +51,13 @@ { "description": "timeoutMS applied to entire bulkWrite, not individual commands", "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, { "name": "failPoint", "object": "testRunner", @@ -69,15 +79,6 @@ } } }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - } - }, { "name": "bulkWrite", "object": "collection", @@ -114,10 +115,10 @@ "events": [ { "commandStartedEvent": { - "commandName": "find", + "commandName": "insert", "databaseName": "test", "command": { - "find": "coll" + "insert": "coll" } } }, diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json index cd2af7fab6..63e8efccfc 100644 --- a/test/csot/retryability-legacy-timeouts.json +++ b/test/csot/retryability-legacy-timeouts.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "socketTimeoutMS": 50 + "socketTimeoutMS": 100 }, "useMultipleMongoses": false, "observeEvents": [ @@ -73,7 +73,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -132,7 +132,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -194,7 +194,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -255,7 +255,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -319,7 +319,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -376,7 +376,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -436,7 +436,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -496,7 +496,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -559,7 +559,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -621,7 +621,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -686,7 +686,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -743,7 +743,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -803,7 +803,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -863,7 +863,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -926,7 +926,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -988,7 +988,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1053,7 +1053,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1118,7 +1118,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1186,7 +1186,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1243,7 +1243,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1303,7 +1303,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1357,7 +1357,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1414,7 +1414,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1471,7 +1471,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1531,7 +1531,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1595,7 +1595,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1662,7 +1662,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1719,7 +1719,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1779,7 +1779,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1836,7 +1836,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1896,7 +1896,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1953,7 +1953,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2013,7 +2013,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2070,7 +2070,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2130,7 +2130,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2187,7 +2187,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2247,7 +2247,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2304,7 +2304,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2364,7 +2364,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2418,7 +2418,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2475,7 +2475,7 @@ "distinct" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2533,7 +2533,7 @@ "distinct" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2594,7 +2594,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2651,7 +2651,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2711,7 +2711,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2768,7 +2768,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2828,7 +2828,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2882,7 +2882,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2939,7 +2939,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2996,7 +2996,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json index 438ba6b8d2..642eca0ee9 100644 --- a/test/csot/retryability-timeoutMS.json +++ b/test/csot/retryability-timeoutMS.json @@ -137,7 +137,7 @@ "name": "insertOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "document": { "x": 1 } @@ -356,7 +356,7 @@ "name": "insertMany", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "documents": [ { "x": 1 @@ -575,7 +575,7 @@ "name": "deleteOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -789,7 +789,7 @@ "name": "replaceOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -1011,7 +1011,7 @@ "name": "updateOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1232,7 +1232,7 @@ "name": "findOneAndDelete", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -1446,7 +1446,7 @@ "name": "findOneAndReplace", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -1668,7 +1668,7 @@ "name": "findOneAndUpdate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1897,7 +1897,7 @@ "name": "bulkWrite", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "requests": [ { "insertOne": { @@ -2124,7 +2124,7 @@ "name": "listDatabases", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -2332,7 +2332,7 @@ "name": "listDatabaseNames", "object": "client", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -2541,7 +2541,7 @@ "name": "createChangeStream", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -2759,7 +2759,7 @@ "name": "aggregate", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [ { "$listLocalSessions": {} @@ -2984,7 +2984,7 @@ "name": "listCollections", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3195,7 +3195,7 @@ "name": "listCollectionNames", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3406,7 +3406,7 @@ "name": "createChangeStream", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3617,7 +3617,7 @@ "name": "aggregate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3828,7 +3828,7 @@ "name": "count", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4039,7 +4039,7 @@ "name": "countDocuments", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4247,7 +4247,7 @@ "name": "estimatedDocumentCount", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -4457,7 +4457,7 @@ "name": "distinct", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "fieldName": "x", "filter": {} } @@ -4670,7 +4670,7 @@ "name": "find", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4881,7 +4881,7 @@ "name": "findOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -5089,7 +5089,7 @@ "name": "listIndexes", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -5298,7 +5298,7 @@ "name": "createChangeStream", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } diff --git a/test/unified_format.py b/test/unified_format.py index 005e91f6b6..12eaceed35 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -963,8 +963,6 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for with_transaction") if "transaction" in class_name or "transaction" in description: self.skipTest("CSOT not implemented for transactions") - if "socket timeout" in description: - self.skipTest("CSOT not implemented for socket timeouts") # Some tests need to be skipped based on the operations they try to run. for op in spec["operations"]: From 45b809e41c0064676bdb2e327920ce9e497ebd06 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 20 Oct 2022 16:07:39 -0700 Subject: [PATCH 0795/2111] Mention crypt_shared in encryption examples pages and fix formatting (#1088) --- doc/examples/encryption.rst | 46 ++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 5c3dc0864b..d7341b3ef4 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -19,7 +19,7 @@ encrypted data. .. seealso:: The MongoDB documentation on `Client Side Field Level Encryption `_. Dependencies ------------- +~~~~~~~~~~~~ To get started using client-side field level encryption in your project, you will need to install the @@ -34,8 +34,30 @@ support. For more information about installing pymongocrypt see `the installation instructions on the project's PyPI page `_. +Additionally, either `crypt_shared`_ or `mongocryptd`_ are required in order +to use *automatic* client-side encryption. + +crypt_shared +```````````` + +The Automatic Encryption Shared Library (crypt_shared) provides the same +functionality as `mongocryptd`_, but does not require you to spawn another +process to perform automatic encryption. + +By default, pymongo attempts to load crypt_shared from the system and if +found uses it automatically. To load crypt_shared from another location, +use the ``crypt_shared_lib_path`` argument to +:class:`~pymongo.encryption_options.AutoEncryptionOpts`. +If pymongo cannot load crypt_shared it will attempt to fallback to using +`mongocryptd`_ by default. Set ``crypt_shared_lib_required=True`` to make +the app always use crypt_shared and fail if it could not be loaded. + +For detailed installation instructions see +`the MongoDB documentation on Automatic Encryption Shared Library +`_. + mongocryptd ------------ +``````````` The ``mongocryptd`` binary is required for automatic client-side encryption and is included as a component in the `MongoDB Enterprise Server package @@ -341,19 +363,13 @@ data key and create a collection with the Automatic Queryable Encryption (Beta) ````````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 6.0rc8+ Enterprise to preview the capability. - -Until PyMongo 4.2 release is finalized, it can be installed using:: - - pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" - -Additionally, ``libmongocrypt`` must be installed from `source `_. +You must have MongoDB 6.0 Enterprise to preview the capability. Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: @@ -412,20 +428,12 @@ automatically encrypted and decrypted. Explicit Queryable Encryption (Beta) ```````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. +PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 6.0rc8+ to preview the capability. - -Until PyMongo 4.2 release is finalized, it can be installed using:: - - pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" - -Additionally, ``libmongocrypt`` must be installed from `source `_. - Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured using an ``encrypted_fields`` mapping, as demonstrated by the following example:: From 84fbc1f3197012700bc94a1fc7972ce60a1456e5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 21 Oct 2022 05:22:39 -0500 Subject: [PATCH 0796/2111] PYTHON-3367 Add support for GCP attached service accounts when using GCP KMS (#1064) --- .evergreen/config.yml | 91 ++++++++++++++++++++++++++ .evergreen/run-mongodb-fle-gcp-auto.sh | 35 ++++++++++ test/test_on_demand_csfle.py | 67 +++++++++++++++++++ 3 files changed, 193 insertions(+) create mode 100644 .evergreen/run-mongodb-fle-gcp-auto.sh create mode 100644 test/test_on_demand_csfle.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8b37663878..4d3024589f 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1035,6 +1035,43 @@ task_groups: tasks: - ".serverless" + - name: testgcpkms_task_group + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + echo '${testgcpkms_key_file}' > /tmp/testgcpkms_key_file.json + export GCPKMS_KEYFILE=/tmp/testgcpkms_key_file.json + export GCPKMS_DRIVERS_TOOLS=$DRIVERS_TOOLS + export GCPKMS_SERVICEACCOUNT="${testgcpkms_service_account}" + export GCPKMS_MACHINETYPE="e2-standard-4" + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/create-and-setup-instance.sh + # Load the GCPKMS_GCLOUD, GCPKMS_INSTANCE, GCPKMS_REGION, and GCPKMS_ZONE expansions. + - command: expansions.update + params: + file: testgcpkms-expansions.yml + teardown_group: + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/delete-instance.sh + tasks: + - testgcpkms-task + tasks: # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants @@ -1857,6 +1894,51 @@ tasks: commands: - func: "download and merge coverage" + - name: "testgcpkms-task" + commands: + - command: shell.exec + type: setup + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + echo "Copying files ... begin" + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + tar czf /tmp/mongo-python-driver.tgz . + GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + GCPKMS_CMD="SUCCESS=true ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + + - name: "testgcpkms-fail-task" + # testgcpkms-fail-task runs in a non-GCE environment. + # It is expected to fail to obtain GCE credentials. + commands: + - command: shell.exec + type: test + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + SUCCESS=false ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017 axes: # Choice of distro @@ -2821,6 +2903,15 @@ buildvariants: tasks: - name: "load-balancer-test" +- name: testgcpkms-variant + display_name: "GCP KMS" + run_on: + - debian11-small + tasks: + - name: testgcpkms_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + - testgcpkms-fail-task + - name: Release display_name: Release batchtime: 20160 # 14 days diff --git a/.evergreen/run-mongodb-fle-gcp-auto.sh b/.evergreen/run-mongodb-fle-gcp-auto.sh new file mode 100644 index 0000000000..81c4660275 --- /dev/null +++ b/.evergreen/run-mongodb-fle-gcp-auto.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use to connect to the server +# SUCCESS Whether the authentication is expected to succeed or fail. One of "true" or "false" +############################################ +# Main Program # +############################################ + +if [[ -z "$1" ]]; then + echo "usage: $0 " + exit 1 +fi +export MONGODB_URI="$1" + +if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in FLE GCP test!"; + exit 1 +fi +# Now we can safely enable xtrace +set -o xtrace + +authtest () { + echo "Running GCP Credential Acquisition Test with $PYTHON" + $PYTHON --version + $PYTHON -m pip install --upgrade wheel setuptools pip + $PYTHON -m pip install '.[encryption]' + $PYTHON -m pip install https://github.com/mongodb/libmongocrypt#subdirectory=bindings/python + TEST_FLE_GCP_AUTO=1 $PYTHON test/test_on_demand_csfle.py +} + +PYTHON="python3" authtest diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py new file mode 100644 index 0000000000..408c942cc7 --- /dev/null +++ b/test/test_on_demand_csfle.py @@ -0,0 +1,67 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +import os +import sys +import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context + +from bson.codec_options import CodecOptions +from pymongo.encryption import _HAVE_PYMONGOCRYPT, ClientEncryption, EncryptionError + + +class TestonDemandGCPCredentials(IntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUpClass(cls): + super(TestonDemandGCPCredentials, cls).setUpClass() + + def setUp(self): + super(TestonDemandGCPCredentials, self).setUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("gcp", self.master_key) From 228edd21f858fe20c3dd0bffdc6aff054a5726a0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Oct 2022 12:10:22 -0500 Subject: [PATCH 0797/2111] PYTHON-3471 Test Support for Gevent in Python 3.11 (#1091) --- .evergreen/config.yml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4d3024589f..f0514681db 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2614,12 +2614,26 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: ubuntu-18.04 - python-version: ["pypy3.7", "pypy3.8"] + python-version: ["pypy3.7", "pypy3.8", "3.11"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions +- matrix_name: "tests-python-version-green-framework-ubuntu20" + matrix_spec: + platform: ubuntu-20.04 + python-version: ["3.11"] + green-framework: "*" + auth-ssl: "*" + display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" + tasks: + - ".rapid" + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - matrix_name: "tests-windows-python-version" matrix_spec: platform: windows-64-vsMulti-small From 3fc301cd22b2b235cc0e56800c507b7c798bfdca Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Oct 2022 14:55:58 -0500 Subject: [PATCH 0798/2111] PYTHON-3256 Obtain AWS credentials for CSFLE in the same way as for MONGODB-AWS (#1035) --- .evergreen/run-tests.sh | 3 +++ README.rst | 3 ++- doc/examples/encryption.rst | 5 +++-- doc/installation.rst | 3 ++- setup.py | 6 ++++-- test/test_encryption.py | 31 +++++++++++++++++++++++++++++++ 6 files changed, 45 insertions(+), 6 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9a0eb25e00..db20c9111e 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -147,6 +147,9 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by PREPARE_SHELL for access to mongocryptd. + # Need aws dependency for On-Demand KMS Credentials. + python -m pip install '.[aws]' + # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh diff --git a/README.rst b/README.rst index 115085ac13..530829f957 100644 --- a/README.rst +++ b/README.rst @@ -130,7 +130,8 @@ Wire protocol compression with zstandard requires `zstandard $ python -m pip install "pymongo[zstd]" Client-Side Field Level Encryption requires `pymongocrypt -`_:: +`_ and +`pymongo-auth-aws `_:: $ python -m pip install "pymongo[encryption]" diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index d7341b3ef4..72205ad119 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -23,9 +23,10 @@ Dependencies To get started using client-side field level encryption in your project, you will need to install the -`pymongocrypt `_ library +`pymongocrypt `_ and +`pymongo-auth-aws `_ libraries as well as the driver itself. Install both the driver and a compatible -version of pymongocrypt like this:: +version of the dependencies like this:: $ python -m pip install 'pymongo[encryption]' diff --git a/doc/installation.rst b/doc/installation.rst index b02949335b..4810353f98 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -70,7 +70,8 @@ Wire protocol compression with zstandard requires `zstandard $ python3 -m pip install "pymongo[zstd]" :ref:`Client-Side Field Level Encryption` requires `pymongocrypt -`_:: +`_ and +`pymongo-auth-aws `_:: $ python3 -m pip install "pymongo[encryption]" diff --git a/setup.py b/setup.py index 52892e8507..6d1a711708 100755 --- a/setup.py +++ b/setup.py @@ -278,12 +278,14 @@ def build_extension(self, ext): # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths pyopenssl_reqs.append("certifi") +aws_reqs = ["pymongo-auth-aws<2.0.0"] + extras_require = { - "encryption": ["pymongocrypt>=1.3.0,<2.0.0"], + "encryption": ["pymongocrypt>=1.3.0,<2.0.0"] + aws_reqs, "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], - "aws": ["pymongo-auth-aws<2.0.0"], + "aws": aws_reqs, "srv": [], # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. "tls": [], # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. } diff --git a/test/test_encryption.py b/test/test_encryption.py index 567d606893..6c54a90f7a 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2304,6 +2304,37 @@ def run_test(self, src_provider, dst_provider): self.assertEqual(decrypt_result2, "test") +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials +class TestOnDemandAWSCredentials(EncryptionIntegrationTest): + def setUp(self): + super(TestOnDemandAWSCredentials, self).setUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + def test_01_failure(self): + self.client_encryption = ClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_success(self): + self.client_encryption = ClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + self.client_encryption.create_data_key("aws", self.master_key) + + class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): # Queryable Encryption is not supported on Standalone topology. @client_context.require_no_standalone From f08776c5222fc691219b2fa54a147354cf0be2e9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Oct 2022 14:57:01 -0500 Subject: [PATCH 0799/2111] PYTHON-3367 Use zip url for install (#1093) --- .evergreen/run-mongodb-fle-gcp-auto.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/run-mongodb-fle-gcp-auto.sh b/.evergreen/run-mongodb-fle-gcp-auto.sh index 81c4660275..8b92551c10 100644 --- a/.evergreen/run-mongodb-fle-gcp-auto.sh +++ b/.evergreen/run-mongodb-fle-gcp-auto.sh @@ -28,7 +28,7 @@ authtest () { $PYTHON --version $PYTHON -m pip install --upgrade wheel setuptools pip $PYTHON -m pip install '.[encryption]' - $PYTHON -m pip install https://github.com/mongodb/libmongocrypt#subdirectory=bindings/python + $PYTHON -m pip install https://github.com/mongodb/libmongocrypt/archive/refs/heads/master.zip#subdirectory=bindings/python TEST_FLE_GCP_AUTO=1 $PYTHON test/test_on_demand_csfle.py } From 908382130045f7dabd2dd17914b600f5dcde2f26 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 1 Nov 2022 14:33:21 -0700 Subject: [PATCH 0800/2111] PYTHON-3454 Specifying a generic type for a collection does not correctly enforce type safety when inserting data (#1081) --- .github/workflows/test-python.yml | 2 ++ doc/examples/type_hints.rst | 9 +++++++-- pymongo/client_session.py | 2 +- pymongo/collection.py | 10 +++++----- pymongo/helpers.py | 4 ++-- pymongo/monitoring.py | 2 +- test/__init__.py | 23 +++++++++++++---------- test/test_collection.py | 2 +- test/test_mypy.py | 28 ++++++++++++++++++++++++---- test/utils.py | 4 ++-- 10 files changed, 58 insertions(+), 28 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index d451197e4e..cbebc94e6f 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -67,6 +67,8 @@ jobs: # Test overshadowed codec_options.py file mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + python -m pip install -U typing_extensions + mypy --install-types --non-interactive test/test_mypy.py linkcheck: name: Check Links diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index 6858e95290..e829441976 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -92,7 +92,9 @@ Note that when using :class:`~bson.son.SON`, the key and value types must be giv Typed Collection ---------------- -You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: +You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a +:class:`~pymongo.collection.Collection`. Note that all `schema validation`_ for inserts and updates is done on the server. +These methods automatically add an "_id" field. .. doctest:: @@ -105,10 +107,12 @@ You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-define ... >>> client: MongoClient = MongoClient() >>> collection: Collection[Movie] = client.test.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> assert result["year"] == 1993 + >>> # This will not be type checked, despite being present, because it is added by PyMongo. + >>> assert type(result["_id"]) == ObjectId Typed Database -------------- @@ -243,3 +247,4 @@ Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocu .. _limitations in mypy: https://github.com/python/mypy/issues/3737 .. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html .. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py +.. _schema validation: https://www.mongodb.com/docs/manual/core/schema-validation/#when-to-use-schema-validation diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 3ff98a579f..d2479942e4 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -435,7 +435,7 @@ def _max_time_expired_error(exc): # From the transactions spec, all the retryable writes errors plus # WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset( +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( [ 64, # WriteConcernFailed 50, # MaxTimeMSExpired diff --git a/pymongo/collection.py b/pymongo/collection.py index 8f1afc575d..23efe8fd35 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -71,7 +71,7 @@ InsertOneResult, UpdateResult, ) -from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} @@ -566,7 +566,7 @@ def _insert_command(session, sock_info, retryable_write): def insert_one( self, - document: _DocumentIn, + document: Union[_DocumentType, RawBSONDocument], bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, comment: Optional[Any] = None, @@ -614,7 +614,7 @@ def insert_one( """ common.validate_is_document_type("document", document) if not (isinstance(document, RawBSONDocument) or "_id" in document): - document["_id"] = ObjectId() + document["_id"] = ObjectId() # type: ignore[index] write_concern = self._write_concern_for(session) return InsertOneResult( @@ -633,7 +633,7 @@ def insert_one( @_csot.apply def insert_many( self, - documents: Iterable[_DocumentIn], + documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, @@ -697,7 +697,7 @@ def gen(): common.validate_is_document_type("document", document) if not isinstance(document, RawBSONDocument): if "_id" not in document: - document["_id"] = ObjectId() + document["_id"] = ObjectId() # type: ignore[index] inserted_ids.append(document["_id"]) yield (message._INSERT, document) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 4df8ab8e7a..dd210db188 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -44,7 +44,7 @@ # From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_PRIMARY_CODES = ( +_NOT_PRIMARY_CODES: frozenset = ( frozenset( [ 10058, # LegacyNotPrimary <=3.2 "not primary" error code @@ -58,7 +58,7 @@ | _SHUTDOWN_CODES ) # From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset( +_RETRYABLE_ERROR_CODES: frozenset = _NOT_PRIMARY_CODES | frozenset( [ 7, # HostNotFound 6, # HostUnreachable diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index c53e7e5727..5b729652ad 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -528,7 +528,7 @@ def register(listener: _EventListener) -> None: # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS = set( +_SENSITIVE_COMMANDS: set = set( [ "authenticate", "saslstart", diff --git a/test/__init__.py b/test/__init__.py index b89cd88d26..eb66e45667 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -43,11 +43,10 @@ HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False - from contextlib import contextmanager from functools import wraps from test.version import Version -from typing import Callable, Dict, Generator, no_type_check +from typing import Any, Callable, Dict, Generator, no_type_check from unittest import SkipTest from urllib.parse import quote_plus @@ -331,7 +330,9 @@ def hello(self): def _connect(self, host, port, **kwargs): kwargs.update(self.default_client_options) - client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=5000, **kwargs) + client: MongoClient = pymongo.MongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) try: try: client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? @@ -356,7 +357,7 @@ def _init_client(self): if self.client is not None: # Return early when connected to dataLake as mongohoused does not # support the getCmdLineOpts command and is tested without TLS. - build_info = self.client.admin.command("buildInfo") + build_info: Any = self.client.admin.command("buildInfo") if "dataLake" in build_info: self.is_data_lake = True self.auth_enabled = True @@ -521,14 +522,16 @@ def has_secondaries(self): @property def storage_engine(self): try: - return self.server_status.get("storageEngine", {}).get("name") + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) except AttributeError: # Raised if self.server_status is None. return None def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" - client = pymongo.MongoClient( + client: MongoClient = pymongo.MongoClient( host, port, username=db_user, @@ -694,7 +697,7 @@ def supports_secondary_read_pref(self): if self.has_secondaries: return True if self.is_mongos: - shard = self.client.config.shards.find_one()["host"] + shard = self.client.config.shards.find_one()["host"] # type:ignore[index] num_members = shard.count(",") + 1 return num_members > 1 return False @@ -1015,12 +1018,12 @@ def fork( """ def _print_threads(*args: object) -> None: - if _print_threads.called: + if _print_threads.called: # type:ignore[attr-defined] return - _print_threads.called = True + _print_threads.called = True # type:ignore[attr-defined] print_thread_tracebacks() - _print_threads.called = False + _print_threads.called = False # type:ignore[attr-defined] def _target() -> None: signal.signal(signal.SIGUSR1, _print_threads) diff --git a/test/test_collection.py b/test/test_collection.py index 37f1b1eae2..e7ac248124 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -785,7 +785,7 @@ def test_insert_many_invalid(self): db.test.insert_many(1) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) # type: ignore[arg-type] + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) def test_delete_one(self): self.db.test.drop() diff --git a/test/test_mypy.py b/test/test_mypy.py index c692c70789..a1e94937b2 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -14,22 +14,20 @@ """Test that each file in mypy_fails/ actually fails mypy, and test some sample client code that uses PyMongo typings.""" - import os import tempfile import unittest from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List try: - from typing import TypedDict # type: ignore[attr-defined] + from typing_extensions import TypedDict - # Not available in Python 3.7 class Movie(TypedDict): # type: ignore[misc] name: str year: int except ImportError: - TypeDict = None + TypedDict = None try: @@ -304,6 +302,28 @@ def test_typeddict_document_type(self) -> None: assert retreived["year"] == 1 assert retreived["name"] == "a" + @only_type_check + def test_typeddict_document_type_insertion(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + mov = {"name": "THX-1138", "year": 1971} + movie = Movie(name="THX-1138", year=1971) + coll.insert_one(mov) # type: ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": 1971}) # This will work because it is in-line. + coll.insert_one(movie) + coll.insert_many([mov]) # type: ignore[list-item] + coll.insert_many([movie]) + bad_mov = {"name": "THX-1138", "year": "WRONG TYPE"} + bad_movie = Movie(name="THX-1138", year="WRONG TYPE") # type: ignore[typeddict-item] + coll.insert_one(bad_mov) # type:ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[typeddict-item] + coll.insert_one(bad_movie) + coll.insert_many([bad_mov]) # type: ignore[list-item] + coll.insert_many( + [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[typeddict-item] + ) + coll.insert_many([bad_movie]) + @only_type_check def test_raw_bson_document_type(self) -> None: client = MongoClient(document_class=RawBSONDocument) diff --git a/test/utils.py b/test/utils.py index 6f35b48538..59349f4fdc 100644 --- a/test/utils.py +++ b/test/utils.py @@ -601,7 +601,7 @@ def ensure_all_connected(client: MongoClient) -> None: Depending on the use-case, the caller may need to clear any event listeners that are configured on the client. """ - hello = client.admin.command(HelloCompat.LEGACY_CMD) + hello: dict = client.admin.command(HelloCompat.LEGACY_CMD) if "setName" not in hello: raise ConfigurationError("cluster is not a replica set") @@ -612,7 +612,7 @@ def ensure_all_connected(client: MongoClient) -> None: def discover(): i = 0 while i < 100 and connected_host_list != target_host_list: - hello = client.admin.command( + hello: dict = client.admin.command( HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY ) connected_host_list.update([hello["me"]]) From 04356b0ffda966feed19057e21a02526312c08ad Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Nov 2022 17:37:41 -0500 Subject: [PATCH 0801/2111] PYTHON-3498 Error installing virtual environment on zseries hosts (#1101) (#1103) --- .evergreen/utils.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 67fa272683..30013ed06b 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -18,7 +18,8 @@ createvirtualenv () { echo "Cannot test without virtualenv" exit 1 fi - $VIRTUALENV $VENVPATH + # Workaround for bug in older versions of virtualenv. + $VIRTUALENV $VENVPATH || $PYTHON -m venv $VENVPATH if [ "Windows_NT" = "$OS" ]; then # Workaround https://bugs.python.org/issue32451: # mongovenv/Scripts/activate: line 3: $'\r': command not found From a00aabfa0d88ef239484cde8535c13ad77017955 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 4 Nov 2022 13:47:32 -0500 Subject: [PATCH 0802/2111] PYTHON-3502 GridFSBucket.download_to_stream slow (#1108) --- doc/changelog.rst | 8 ++++++++ gridfs/__init__.py | 10 ++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4688a8fb65..4f4e5ace71 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,14 @@ Changelog ========= +Changes in Version 4.3.3 +------------------------ + +- Fixed a performance regression in :meth:`~gridfs.GridOut.download_to_stream` + and :meth:`~gridfs.GridOut.download_to_stream_by_name` by reading in chunks + instead of line by line. + + Changes in Version 4.3 (4.3.2) ------------------------------ diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 6ab843a85e..692567b2de 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -796,7 +796,10 @@ def download_to_stream( Added ``session`` parameter. """ with self.open_download_stream(file_id, session=session) as gout: - for chunk in gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break destination.write(chunk) @_csot.apply @@ -977,7 +980,10 @@ def download_to_stream_by_name( Added ``session`` parameter. """ with self.open_download_stream_by_name(filename, revision, session=session) as gout: - for chunk in gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break destination.write(chunk) def rename( From ff94b0e3094f6bf08645ff0a491ec9b51f504b53 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 4 Nov 2022 14:25:36 -0500 Subject: [PATCH 0803/2111] PYTHON-3501 Ensure Auth Environment Variables are Always Dynamic (#1107) --- test/auth_aws/test_auth_aws.py | 58 ++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 372806bd24..e0329a783e 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -17,6 +17,7 @@ import os import sys import unittest +from unittest.mock import patch sys.path[0:0] = [""] @@ -111,6 +112,63 @@ def test_poisoned_cache(self): client.get_database().test.find_one() self.assertNotEqual(auth.get_cached_credentials(), None) + def test_environment_variables_ignored(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + prev = os.environ.copy() + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + client.get_database().test.find_one() + + self.assertIsNotNone(auth.get_cached_credentials()) + + mock_env = dict( + AWS_ACCESS_KEY_ID="foo", AWS_SECRET_ACCESS_KEY="bar", AWS_SESSION_TOKEN="baz" + ) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client.get_database().test.find_one() + + auth.set_cached_credentials(None) + + client2 = MongoClient(self.uri) + self.addCleanup(client2.close) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + with self.assertRaises(OperationFailure): + client2.get_database().test.find_one() + + def test_no_cache_environment_variables(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + auth.set_cached_credentials(None) + + mock_env = dict(AWS_ACCESS_KEY_ID=creds.username, AWS_SECRET_ACCESS_KEY=creds.password) + if creds.token: + mock_env["AWS_SESSION_TOKEN"] = creds.token + + client = MongoClient(self.uri) + self.addCleanup(client.close) + + with patch.dict(os.environ, mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], creds.username) + client.get_database().test.find_one() + + self.assertIsNone(auth.get_cached_credentials()) + + mock_env["AWS_ACCESS_KEY_ID"] = "foo" + + client2 = MongoClient(self.uri) + self.addCleanup(client2.close) + + with patch.dict("os.environ", mock_env), self.assertRaises(OperationFailure): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client2.get_database().test.find_one() + class TestAWSLambdaExamples(unittest.TestCase): def test_shared_client(self): From da4df7955529ce6edcebc65e10dbaab977a132fc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Nov 2022 10:37:33 -0800 Subject: [PATCH 0804/2111] PYTHON-3508 Improve the performance of GridOut.readline and GridOut.read (#1109) --- doc/changelog.rst | 19 ++++++-- gridfs/grid_file.py | 109 ++++++++++++++++++++++---------------------- 2 files changed, 71 insertions(+), 57 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4f4e5ace71..b3587e04ca 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,10 +4,23 @@ Changelog Changes in Version 4.3.3 ------------------------ -- Fixed a performance regression in :meth:`~gridfs.GridOut.download_to_stream` - and :meth:`~gridfs.GridOut.download_to_stream_by_name` by reading in chunks - instead of line by line. +Version 4.3.3 fixes a number of bugs: +- Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` + and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks + instead of line by line (`PYTHON-3502`_). +- Improved performance of :meth:`gridfs.grid_file.GridOut.read` and + :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). + +Issues Resolved +............... + +See the `PyMongo 4.3.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3502: https://jira.mongodb.org/browse/PYTHON-3502 +.. _PYTHON-3508: https://jira.mongodb.org/browse/PYTHON-3508 +.. _PyMongo 4.3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34709 Changes in Version 4.3 (4.3.2) ------------------------------ diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index cec7d57a22..50efc0cd23 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -463,7 +463,10 @@ def __init__( self.__files = root_collection.files self.__file_id = file_id self.__buffer = EMPTY + # Start position within the current buffered chunk. + self.__buffer_pos = 0 self.__chunk_iter = None + # Position within the total file. self.__position = 0 self._file = file_document self._session = session @@ -510,12 +513,12 @@ def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ - received = len(self.__buffer) + received = len(self.__buffer) - self.__buffer_pos chunk_data = EMPTY chunk_size = int(self.chunk_size) if received > 0: - chunk_data = self.__buffer + chunk_data = self.__buffer[self.__buffer_pos :] elif self.__position < int(self.length): chunk_number = int((received + self.__position) / chunk_size) if self.__chunk_iter is None: @@ -531,25 +534,12 @@ def readchunk(self) -> bytes: self.__position += len(chunk_data) self.__buffer = EMPTY + self.__buffer_pos = 0 return chunk_data - def read(self, size: int = -1) -> bytes: - """Read at most `size` bytes from the file (less if there - isn't enough data). - - The bytes are returned as an instance of :class:`str` (:class:`bytes` - in python 3). If `size` is negative or omitted all data is read. - - :Parameters: - - `size` (optional): the number of bytes to read - - .. versionchanged:: 3.8 - This method now only checks for extra chunks after reading the - entire file. Previously, this method would check for extra chunks - on every call. - """ + def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" self._ensure_file() - remainder = int(self.length) - self.__position if size < 0 or size > remainder: size = remainder @@ -558,11 +548,36 @@ def read(self, size: int = -1) -> bytes: return EMPTY received = 0 - data = io.BytesIO() + data = [] while received < size: - chunk_data = self.readchunk() + needed = size - received + if self.__buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self.__buffer + chunk_start = self.__buffer_pos + chunk_data = memoryview(buf)[self.__buffer_pos :] + self.__buffer = EMPTY + self.__buffer_pos = 0 + self.__position += len(chunk_data) + else: + buf = self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self.__buffer = buf + self.__buffer_pos = chunk_start + needed + self.__position -= len(self.__buffer) - self.__buffer_pos + else: + data.append(chunk_data) received += len(chunk_data) - data.write(chunk_data) # Detect extra chunks after reading the entire file. if size == remainder and self.__chunk_iter: @@ -571,13 +586,24 @@ def read(self, size: int = -1) -> bytes: except StopIteration: pass - self.__position -= received - size + return b"".join(data) + + def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). + + The bytes are returned as an instance of :class:`str` (:class:`bytes` + in python 3). If `size` is negative or omitted all data is read. + + :Parameters: + - `size` (optional): the number of bytes to read - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return self._read_size_or_line(size=size) def readline(self, size: int = -1) -> bytes: # type: ignore[override] """Read one line or up to `size` bytes from the file. @@ -585,33 +611,7 @@ def readline(self, size: int = -1) -> bytes: # type: ignore[override] :Parameters: - `size` (optional): the maximum number of bytes to read """ - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - if size == 0: - return EMPTY - - received = 0 - data = io.BytesIO() - while received < size: - chunk_data = self.readchunk() - pos = chunk_data.find(NEWLN, 0, size) - if pos != -1: - size = received + pos + 1 - - received += len(chunk_data) - data.write(chunk_data) - if pos != -1: - break - - self.__position -= received - size - - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) + return self._read_size_or_line(size=size, line=True) def tell(self) -> int: """Return the current position of this file.""" @@ -651,6 +651,7 @@ def seek(self, pos: int, whence: int = _SEEK_SET) -> int: self.__position = new_pos self.__buffer = EMPTY + self.__buffer_pos = 0 if self.__chunk_iter: self.__chunk_iter.close() self.__chunk_iter = None From 1abcd3fc0c3f20c051cda36cdd01cc553dce5c53 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Nov 2022 13:01:56 -0800 Subject: [PATCH 0805/2111] PYTHON-3513 Correctly pin to mypy==0.990 (#1110) --- .github/workflows/test-python.yml | 3 +-- pymongo/monitor.py | 4 ++-- test/mockupdb/test_mixed_version_sharded.py | 3 ++- test/mockupdb/test_mongos_command_read_mode.py | 3 ++- test/mockupdb/test_op_msg_read_preference.py | 3 ++- test/mockupdb/test_reset_and_request_check.py | 3 ++- test/mockupdb/test_slave_okay_rs.py | 3 ++- test/mockupdb/test_slave_okay_sharded.py | 3 ++- test/mockupdb/test_slave_okay_single.py | 3 ++- 9 files changed, 17 insertions(+), 11 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index cbebc94e6f..414eef7a1b 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -39,7 +39,6 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | - pip install mypy==0.942 python setup.py test mypytest: @@ -59,7 +58,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy + python -m pip install -U pip mypy==0.990 pip install -e ".[zstd, encryption, ocsp]" - name: Run mypy run: | diff --git a/pymongo/monitor.py b/pymongo/monitor.py index b7d2b19118..44390e9180 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -430,10 +430,10 @@ def _shutdown_monitors(): def _shutdown_resources(): # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. shutdown = _shutdown_monitors - if shutdown: + if shutdown: # type:ignore[truthy-function] shutdown() shutdown = _shutdown_executors - if shutdown: + if shutdown: # type:ignore[truthy-function] shutdown() diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index d5fb9913cc..7e12fcab35 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -19,10 +19,11 @@ from queue import Queue from mockupdb import MockupDB, go -from operations import upgrades from pymongo import MongoClient +from .operations import upgrades + class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index b7f8532e38..a84907d8cf 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -16,7 +16,6 @@ import unittest from mockupdb import MockupDB, OpMsg, going -from operations import operations from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -25,6 +24,8 @@ read_pref_mode_from_name, ) +from .operations import operations + class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index b8d1348b97..37882912bb 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -18,7 +18,6 @@ from typing import Any from mockupdb import CommandBase, MockupDB, going -from operations import operations from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -27,6 +26,8 @@ read_pref_mode_from_name, ) +from .operations import operations + class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 778be3d5ca..bc00e38a09 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -17,12 +17,13 @@ import unittest from mockupdb import MockupDB, going, wait_until -from operations import operations from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymongo.server_type import SERVER_TYPE +from .operations import operations + class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 5a162c08e3..7ac489117a 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -20,10 +20,11 @@ import unittest from mockupdb import MockupDB, going -from operations import operations from pymongo import MongoClient +from .operations import operations + class TestSlaveOkayRS(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 52c643b417..51e422595e 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -23,11 +23,12 @@ from queue import Queue from mockupdb import MockupDB, going -from operations import operations from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from .operations import operations + class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 07cd6c7448..bd36c77a04 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -23,12 +23,13 @@ import unittest from mockupdb import MockupDB, going -from operations import operations from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE +from .operations import operations + def topology_type_name(client): topology_type = client._topology._description.topology_type From bcb0ac0170a712391b353531d865f6cb43d83bc1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 8 Nov 2022 12:10:44 -0600 Subject: [PATCH 0806/2111] PYTHON-3396 Support the Azure VM-assigned Managed Identity for Automatic KMS Credentials (#1105) --- .evergreen/config.yml | 121 ++++++++++++++++++++++++- .evergreen/run-mongodb-fle-gcp-auto.sh | 35 ------- .evergreen/run-tests.sh | 19 +++- test/test_on_demand_csfle.py | 44 +++++++++ 4 files changed, 180 insertions(+), 39 deletions(-) delete mode 100644 .evergreen/run-mongodb-fle-gcp-auto.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f0514681db..28e54e2ded 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1072,6 +1072,50 @@ task_groups: tasks: - testgcpkms-task + - name: testazurekms_task_group + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: shell.exec + params: + silent: true + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + echo '${testazurekms_publickey}' > /tmp/testazurekms_publickey + echo '${testazurekms_privatekey}' > /tmp/testazurekms_privatekey + # Set 600 permissions on private key file. Otherwise ssh / scp may error with permissions "are too open". + chmod 600 /tmp/testazurekms_privatekey + export AZUREKMS_CLIENTID="${testazurekms_clientid}" + export AZUREKMS_TENANTID="${testazurekms_tenantid}" + export AZUREKMS_SECRET="${testazurekms_secret}" + export AZUREKMS_DRIVERS_TOOLS="$DRIVERS_TOOLS" + export AZUREKMS_RESOURCEGROUP="${testazurekms_resourcegroup}" + export AZUREKMS_PUBLICKEYPATH="/tmp/testazurekms_publickey" + export AZUREKMS_PRIVATEKEYPATH="/tmp/testazurekms_privatekey" + export AZUREKMS_SCOPE="${testazurekms_scope}" + export AZUREKMS_VMNAME_PREFIX="PYTHON_DRIVER" + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/create-and-setup-vm.sh + - command: expansions.update + params: + file: testazurekms-expansions.yml + teardown_group: + - command: shell.exec + params: + shell: bash + script: |- + ${PREPARE_SHELL} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/delete-vm.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - testazurekms-task + tasks: # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants @@ -1925,12 +1969,16 @@ tasks: export GCPKMS_PROJECT=${GCPKMS_PROJECT} export GCPKMS_ZONE=${GCPKMS_ZONE} export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} - GCPKMS_CMD="SUCCESS=true ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz ./.evergreen/run-tests.sh" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh - name: "testgcpkms-fail-task" # testgcpkms-fail-task runs in a non-GCE environment. # It is expected to fail to obtain GCE credentials. commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" - command: shell.exec type: test params: @@ -1938,7 +1986,66 @@ tasks: shell: "bash" script: | ${PREPARE_SHELL} - SUCCESS=false ./.evergreen/run-mongodb-fle-gcp-auto.sh mongodb://localhost:27017 + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz + SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/run-tests.sh + + - name: testazurekms-task + commands: + - command: shell.exec + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + cd src + echo "Copying files ... begin" + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + tar czf /tmp/mongo-python-driver.tgz . + AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" \ + AZUREKMS_DST="~/" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + AZUREKMS_CMD="KEY_NAME='${testazurekms_keyname}' KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/run-tests.sh" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + + - name: testazurekms-fail-task + commands: + - func: fetch source + - func: make files executable + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "server" + - command: shell.exec + type: test + params: + shell: bash + script: |- + set -o errexit + ${PREPARE_SHELL} + cd src + PYTHON_BINARY= + KEY_NAME='${testazurekms_keyname}' \ + KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz \ + SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ + ./.evergreen/run-tests.sh axes: # Choice of distro @@ -2920,12 +3027,20 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - debian11-small + - ubuntu1804-test tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - testgcpkms-fail-task +- name: testazurekms-variant + display_name: "Azure KMS" + run_on: ubuntu1804-test + tasks: + - name: testazurekms_task_group + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + - testazurekms-fail-task + - name: Release display_name: Release batchtime: 20160 # 14 days diff --git a/.evergreen/run-mongodb-fle-gcp-auto.sh b/.evergreen/run-mongodb-fle-gcp-auto.sh deleted file mode 100644 index 8b92551c10..0000000000 --- a/.evergreen/run-mongodb-fle-gcp-auto.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit # Exit the script with error if any of the commands fail - -# Supported/used environment variables: -# MONGODB_URI Set the URI, including an optional username/password to use to connect to the server -# SUCCESS Whether the authentication is expected to succeed or fail. One of "true" or "false" -############################################ -# Main Program # -############################################ - -if [[ -z "$1" ]]; then - echo "usage: $0 " - exit 1 -fi -export MONGODB_URI="$1" - -if echo "$MONGODB_URI" | grep -q "@"; then - echo "MONGODB_URI unexpectedly contains user credentials in FLE GCP test!"; - exit 1 -fi -# Now we can safely enable xtrace -set -o xtrace - -authtest () { - echo "Running GCP Credential Acquisition Test with $PYTHON" - $PYTHON --version - $PYTHON -m pip install --upgrade wheel setuptools pip - $PYTHON -m pip install '.[encryption]' - $PYTHON -m pip install https://github.com/mongodb/libmongocrypt/archive/refs/heads/master.zip#subdirectory=bindings/python - TEST_FLE_GCP_AUTO=1 $PYTHON test/test_on_demand_csfle.py -} - -PYTHON="python3" authtest diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index db20c9111e..959ad901ad 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -101,7 +101,8 @@ if [ -n "$TEST_PYOPENSSL" ]; then python -m pip install --prefer-binary pyopenssl requests service_identity fi -if [ -n "$TEST_ENCRYPTION" ]; then +if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then + createvirtualenv $PYTHON venv-encryption trap "deactivate; rm -rf venv-encryption" EXIT HUP PYTHON=python @@ -146,7 +147,9 @@ if [ -n "$TEST_ENCRYPTION" ]; then python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by PREPARE_SHELL for access to mongocryptd. +fi +if [ -n "$TEST_ENCRYPTION" ]; then # Need aws dependency for On-Demand KMS Credentials. python -m pip install '.[aws]' @@ -171,6 +174,20 @@ if [ -n "$TEST_ENCRYPTION" ]; then TEST_ARGS="-s test.test_encryption" fi +if [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then + if [[ -z "$SUCCESS" ]]; then + echo "Must define SUCCESS" + exit 1 + fi + + if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials in FLE test!"; + exit 1 + fi + + TEST_ARGS="-s test.test_on_demand_csfle" +fi + if [ -n "$DATA_LAKE" ]; then TEST_ARGS="-s test.test_data_lake" fi diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index 408c942cc7..d5668199a3 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -65,3 +65,47 @@ def test_02_success(self): codec_options=CodecOptions(), ) self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(IntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUpClass(cls): + super(TestonDemandAzureCredentials, cls).setUpClass() + + def setUp(self): + super(TestonDemandAzureCredentials, self).setUp() + self.master_key = { + "keyVaultEndpoint": "https://keyvault-drivers-2411.vault.azure.net/keys/", + "keyName": "KEY-NAME", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) From c106c08c1d504866ca1a40043cf7e93c675d4ccf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 8 Nov 2022 11:13:34 -0800 Subject: [PATCH 0807/2111] PYTHON-3295 Add CSOT docs page (#1111) --- doc/changelog.rst | 4 +- doc/examples/index.rst | 1 + doc/examples/timeouts.rst | 162 ++++++++++++++++++++++++++++++++++++ doc/examples/type_hints.rst | 2 +- pymongo/__init__.py | 2 + 5 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 doc/examples/timeouts.rst diff --git a/doc/changelog.rst b/doc/changelog.rst index b3587e04ca..8220b6897a 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -11,6 +11,8 @@ Version 4.3.3 fixes a number of bugs: instead of line by line (`PYTHON-3502`_). - Improved performance of :meth:`gridfs.grid_file.GridOut.read` and :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). +- Added the :ref:`timeout-example` example page to improve the documentation + for :func:`pymongo.timeout`. Issues Resolved ............... @@ -90,7 +92,7 @@ PyMongo 4.2 brings a number of improvements including: - Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout - to an entire block of pymongo operations. + to an entire block of pymongo operations. See :ref:`timeout-example` for examples. - Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. - Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when the error was caused by a timeout. diff --git a/doc/examples/index.rst b/doc/examples/index.rst index 6cdeafc201..ee4aa27284 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -30,6 +30,7 @@ MongoDB, you can start it like so: mod_wsgi server_selection tailable + timeouts tls type_hints encryption diff --git a/doc/examples/timeouts.rst b/doc/examples/timeouts.rst new file mode 100644 index 0000000000..73095e5af5 --- /dev/null +++ b/doc/examples/timeouts.rst @@ -0,0 +1,162 @@ + +.. _timeout-example: + +Client Side Operation Timeout +============================= + +PyMongo 4.2 introduced :meth:`~pymongo.timeout` and the ``timeoutMS`` +URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +These features allow applications to more easily limit the amount of time that +one or more operations can execute before control is returned to the app. This +timeout applies to all of the work done to execute the operation, including +but not limited to server selection, connection checkout, serialization, and +server-side execution. + +Basic Usage +----------- + +The following example uses :meth:`~pymongo.timeout` to configure a 10-second +timeout for an :meth:`~pymongo.collection.Collection.insert_one` operation:: + + import pymongo + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + +The :meth:`~pymongo.timeout` applies to all pymongo operations within the block. +The following example ensures that both the `insert` and the `find` complete +within 10 seconds total, or raise a timeout error:: + + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + coll.find_one({"name": "Nunu"}) + +When nesting :func:`~pymongo.timeout`, the nested deadline is capped by the outer +deadline. The deadline can only be shortened, not extended. +When exiting the block, the previous deadline is restored:: + + with pymongo.timeout(5): + coll.find_one() # Uses the 5 second deadline. + with pymongo.timeout(3): + coll.find_one() # Uses the 3 second deadline. + coll.find_one() # Uses the original 5 second deadline. + with pymongo.timeout(10): + coll.find_one() # Still uses the original 5 second deadline. + coll.find_one() # Uses the original 5 second deadline. + +Timeout errors +-------------- + +When the :meth:`~pymongo.timeout` with-statement is entered, a deadline is set +for the entire block. When that deadline is exceeded, any blocking pymongo operation +will raise a timeout exception. For example:: + + try: + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + time.sleep(10) + # The deadline has now expired, the next operation will raise + # a timeout exception. + coll.find_one({"name": "Nunu"}) + except PyMongoError as exc: + if exc.timeout: + print(f"block timed out: {exc!r}") + else: + print(f"failed with non-timeout error: {exc!r}") + +The :attr:`pymongo.errors.PyMongoError.timeout` property (added in PyMongo 4.2) +will be ``True`` when the error was caused by a timeout and ``False`` otherwise. + +The timeoutMS URI option +------------------------ + +PyMongo 4.2 also added support for the ``timeoutMS`` URI and keyword argument to +:class:`~pymongo.mongo_client.MongoClient`. When this option is configured, the +client will automatically apply the timeout to each API call. For example:: + + client = MongoClient("mongodb://localhost/?timeoutMS=10000") + coll = client.test.test + coll.insert_one({"name": "Nunu"}) # Uses a 10-second timeout. + coll.find_one({"name": "Nunu"}) # Also uses a 10-second timeout. + +The above is roughly equivalent to:: + + client = MongoClient() + coll = client.test.test + with pymongo.timeout(10): + coll.insert_one({"name": "Nunu"}) + with pymongo.timeout(10): + coll.find_one({"name": "Nunu"}) + +pymongo.timeout overrides timeoutMS +----------------------------------- + +:meth:`~pymongo.timeout` overrides ``timeoutMS``; within a +:meth:`~pymongo.timeout` block a client's ``timeoutMS`` option is ignored:: + + client = MongoClient("mongodb://localhost/?timeoutMS=10000") + coll = client.test.test + coll.insert_one({"name": "Nunu"}) # Uses the client's 10-second timeout. + # pymongo.timeout overrides the client's timeoutMS. + with pymongo.timeout(20): + coll.insert_one({"name": "Nunu"}) # Uses the 20-second timeout. + with pymongo.timeout(5): + coll.find_one({"name": "Nunu"}) # Uses the 5-second timeout. + +pymongo.timeout is thread safe +------------------------------ + +:meth:`~pymongo.timeout` is thread safe; the timeout only applies to current +thread and multiple threads can configure different timeouts in parallel. + +pymongo.timeout is asyncio safe +------------------------------- + +:meth:`~pymongo.timeout` is asyncio safe; the timeout only applies to current +Task and multiple Tasks can configure different timeouts concurrently. +:meth:`~pymongo.timeout` can be used identically in +`Motor `_, for example:: + + import motor.motor_asyncio + client = motor.motor_asyncio.AsyncIOMotorClient() + coll = client.test.test + with pymongo.timeout(10): + await coll.insert_one({"name": "Nunu"}) + await coll.find_one({"name": "Nunu"}) + +Troubleshooting +--------------- + +There are many timeout errors that can be raised depending on when the timeout +expires. In code, these can be identified with the :attr:`pymongo.errors.PyMongoError.timeout` +property. Some specific timeout errors examples are described below. + +When the client was unable to find an available server to run the operation +within the given timeout:: + + pymongo.errors.ServerSelectionTimeoutError: No servers found yet, Timeout: -0.00202266700216569s, Topology Description: ]> + +When either the client was unable to establish a connection within the given +timeout or the operation was sent but the server was not able to respond in time:: + + pymongo.errors.NetworkTimeout: localhost:27017: timed out + +When the server cancelled the operation because it exceeded the given timeout. +Note that the operation may have partially completed on the server (depending +on the operation):: + + pymongo.errors.ExecutionTimeout: operation exceeded time limit, full error: {'ok': 0.0, 'errmsg': 'operation exceeded time limit', 'code': 50, 'codeName': 'MaxTimeMSExpired'} + +When the client cancelled the operation because it was not possible to complete +within the given timeout:: + + pymongo.errors.ExecutionTimeout: operation would exceed time limit, remaining timeout:0.00196 <= network round trip time:0.00427 + +When the client attempted a write operation but the server could not replicate +that write (according to the configured write concern) within the given timeout:: + + pymongo.errors.WTimeoutError: operation exceeded time limit, full error: {'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}} + +The same error as above but for :meth:`~pymongo.collection.Collection.insert_many` +or :meth:`~pymongo.collection.Collection.bulk_write`:: + + pymongo.errors.BulkWriteError: batch op errors occurred, full error: {'writeErrors': [], 'writeConcernErrors': [{'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}}], 'nInserted': 2, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, 'nRemoved': 0, 'upserted': []} diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index e829441976..cd178038ad 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -2,7 +2,7 @@ .. _type_hints-example: Type Hints -=========== +========== As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python type checkers can easily find bugs before they reveal themselves in your code. diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 6394e8250e..12b62fe9f5 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -161,6 +161,8 @@ def timeout(seconds: Optional[float]) -> ContextManager: :Raises: - :py:class:`ValueError`: When `seconds` is negative. + See :ref:`timeout-example` for more examples. + .. versionadded:: 4.2 """ if not isinstance(seconds, (int, float, type(None))): From 0d301f13c51791c52a57e5c1c07abcabe19d0fd5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 8 Nov 2022 12:46:52 -0800 Subject: [PATCH 0808/2111] PYTHON-3295 Improve description of nested timeout() calls --- pymongo/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 12b62fe9f5..789df62071 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -142,8 +142,8 @@ def timeout(seconds: Optional[float]) -> ContextManager: else: print(f"failed with non-timeout error: {exc!r}") - When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most - the existing deadline. The deadline can only be shortened, not extended. + When nesting :func:`~pymongo.timeout`, the nested deadline is capped by + the outer deadline. The deadline can only be shortened, not extended. When exiting the block, the previous deadline is restored:: with pymongo.timeout(5): From 87b09847a476fdc24ed7e847dbb94076c4eb9c83 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 10 Nov 2022 09:53:19 -0800 Subject: [PATCH 0809/2111] PYTHON-3494 Improve Documentation Surrounding Type-Checking "_id" (#1104) --- doc/examples/type_hints.rst | 66 +++++++++++++++++++++++++++++++++++-- test/test_mypy.py | 61 +++++++++++++++++++++++++++++++--- 2 files changed, 120 insertions(+), 7 deletions(-) diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index cd178038ad..38349038b1 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -97,6 +97,7 @@ You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-define These methods automatically add an "_id" field. .. doctest:: + :pyversion: >= 3.8 >>> from typing import TypedDict >>> from pymongo import MongoClient @@ -111,14 +112,73 @@ These methods automatically add an "_id" field. >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> assert result["year"] == 1993 - >>> # This will not be type checked, despite being present, because it is added by PyMongo. - >>> assert type(result["_id"]) == ObjectId + >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + +Modeling Document Types with TypedDict +-------------------------------------- + +You can use :py:class:`~typing.TypedDict` (Python 3.8+) to model structured data. +As noted above, PyMongo will automatically add an `_id` field if it is not present. This also applies to TypedDict. +There are three approaches to this: + + 1. Do not specify `_id` at all. It will be inserted automatically, and can be retrieved at run-time, but will yield a type-checking error unless explicitly ignored. + + 2. Specify `_id` explicitly. This will mean that every instance of your custom TypedDict class will have to pass a value for `_id`. + + 3. Make use of :py:class:`~typing.NotRequired`. This has the flexibility of option 1, but with the ability to access the `_id` field without causing a type-checking error. + +Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` in earlier versions of Python (<3.8, <3.11), use the `typing_extensions` package. + +.. doctest:: typed-dict-example + :pyversion: >= 3.11 + + >>> from typing import TypedDict, NotRequired + >>> from pymongo import MongoClient + >>> from pymongo.collection import Collection + >>> from bson import ObjectId + >>> class Movie(TypedDict): + ... name: str + ... year: int + ... + >>> class ExplicitMovie(TypedDict): + ... _id: ObjectId + ... name: str + ... year: int + ... + >>> class NotRequiredMovie(TypedDict): + ... _id: NotRequired[ObjectId] + ... name: str + ... year: int + ... + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will yield a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + >>> collection: Collection[ExplicitMovie] = client.test.test + >>> # Note that the _id keyword argument must be supplied + >>> inserted = collection.insert_one(ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will not raise a type-checking error. + >>> assert result["_id"] + >>> collection: Collection[NotRequiredMovie] = client.test.test + >>> # Note the lack of _id, similar to the first example + >>> inserted = collection.insert_one(NotRequiredMovie(name="Jurassic Park", year=1993)) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> # This will not raise a type-checking error, despite not being provided explicitly. + >>> assert result["_id"] + Typed Database -------------- While less common, you could specify that the documents in an entire database -match a well-defined shema using :py:class:`~typing.TypedDict` (Python 3.8+). +match a well-defined schema using :py:class:`~typing.TypedDict` (Python 3.8+). .. doctest:: diff --git a/test/test_mypy.py b/test/test_mypy.py index a1e94937b2..807f0e8ef3 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -20,14 +20,30 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List try: - from typing_extensions import TypedDict + from typing_extensions import NotRequired, TypedDict - class Movie(TypedDict): # type: ignore[misc] + from bson import ObjectId + + class Movie(TypedDict): name: str year: int -except ImportError: - TypedDict = None + class MovieWithId(TypedDict): + _id: ObjectId + name: str + year: int + + class ImplicitMovie(TypedDict): + _id: NotRequired[ObjectId] + name: str + year: int + +except ImportError as exc: + Movie = dict # type:ignore[misc,assignment] + ImplicitMovie = dict # type: ignore[assignment,misc] + MovieWithId = dict # type: ignore[assignment,misc] + TypedDict = None # type: ignore[assignment] + NotRequired = None # type: ignore[assignment] try: @@ -324,6 +340,43 @@ def test_typeddict_document_type_insertion(self) -> None: ) coll.insert_many([bad_movie]) + @only_type_check + def test_typeddict_explicit_document_type(self) -> None: + out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + # This should work the same as the test above, but this time using NotRequired to allow + # automatic insertion of the _id field by insert_one. + @only_type_check + def test_typeddict_not_required_document_type(self) -> None: + out = ImplicitMovie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + @only_type_check + def test_typeddict_empty_document_type(self) -> None: + out = Movie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # This should fail because _id is not included in our TypedDict definition. + assert out["_id"] # type:ignore[typeddict-item] + + def test_typeddict_find_notrequired(self): + if NotRequired is None or ImplicitMovie is None: + raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") + client: MongoClient[ImplicitMovie] = rs_or_single_client() + coll = client.test.test + coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) + out = coll.find_one({}) + assert out is not None + assert out["_id"] + @only_type_check def test_raw_bson_document_type(self) -> None: client = MongoClient(document_class=RawBSONDocument) From 133c55d8cb8ca87beb44f22932c4391803c34694 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 10 Nov 2022 13:31:14 -0800 Subject: [PATCH 0810/2111] PYTHON-3500 Improve test coverage for retryable handshake errors (#1112) --- .../unified/handshakeError.json | 2888 ++++++++++++++++- .../unified/handshakeError.json | 1648 +++++++++- 2 files changed, 4418 insertions(+), 118 deletions(-) diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json index 2cf1d173f8..58bbce66a8 100644 --- a/test/retryable_reads/unified/handshakeError.json +++ b/test/retryable_reads/unified/handshakeError.json @@ -15,25 +15,27 @@ "createEntities": [ { "client": { - "id": "client0", + "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "connectionCheckOutStartedEvent", "commandStartedEvent", - "connectionCheckOutStartedEvent" + "commandSucceededEvent", + "commandFailedEvent" ] } }, { "database": { - "id": "database0", - "client": "client0", - "databaseName": "retryable-handshake-tests" + "id": "database", + "client": "client", + "databaseName": "retryable-reads-handshake-tests" } }, { "collection": { - "id": "collection0", - "database": "database0", + "id": "collection", + "database": "database", "collectionName": "coll" } } @@ -41,7 +43,7 @@ "initialData": [ { "collectionName": "coll", - "databaseName": "retryable-handshake-tests", + "databaseName": "retryable-reads-handshake-tests", "documents": [ { "_id": 1, @@ -59,6 +61,2060 @@ } ], "tests": [ + { + "description": "listDatabases succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "listDatabaseNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "listCollections succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "listCollectionNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "countDocuments succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "distinct succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, { "description": "find succeeds after retryable handshake network error", "operations": [ @@ -66,7 +2122,469 @@ "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "find succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "findOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "listIndexes succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -74,8 +2592,8 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], "closeConnection": true } @@ -84,7 +2602,7 @@ }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -96,24 +2614,103 @@ } }, { - "name": "find", - "object": "collection0", - "arguments": { - "filter": { - "_id": 2 + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} } - }, - "expectResult": [ + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, { - "_id": 2, - "x": 22 + "commandSucceededEvent": { + "commandName": "listIndexes" + } } ] } + ] + }, + { + "description": "listIndexNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -131,25 +2728,119 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "find": "coll", - "filter": { - "_id": 2 - } + "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" } } ] @@ -157,13 +2848,13 @@ ] }, { - "description": "find succeeds after retryable handshake network error (ShutdownInProgress)", + "description": "createChangeStream succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -171,17 +2862,17 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], - "errorCode": 91 + "closeConnection": true } } } }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -193,24 +2884,111 @@ } }, { - "name": "find", - "object": "collection0", + "name": "createChangeStream", + "object": "collection", "arguments": { - "filter": { - "_id": 2 - } + "pipeline": [] }, - "expectResult": [ + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, { - "_id": 2, - "x": 22 + "commandSucceededEvent": { + "commandName": "aggregate" + } } ] } + ] + }, + { + "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -228,25 +3006,29 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" } }, { "commandStartedEvent": { - "command": { - "find": "coll", - "filter": { - "_id": 2 - } - }, - "databaseName": "retryable-handshake-tests" + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" } } ] diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index 6d6b4ac491..e07e5412b2 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -15,25 +15,27 @@ "createEntities": [ { "client": { - "id": "client0", + "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "connectionCheckOutStartedEvent", "commandStartedEvent", - "connectionCheckOutStartedEvent" + "commandSucceededEvent", + "commandFailedEvent" ] } }, { "database": { - "id": "database0", - "client": "client0", - "databaseName": "retryable-handshake-tests" + "id": "database", + "client": "client", + "databaseName": "retryable-writes-handshake-tests" } }, { "collection": { - "id": "collection0", - "database": "database0", + "id": "collection", + "database": "database", "collectionName": "coll" } } @@ -41,7 +43,7 @@ "initialData": [ { "collectionName": "coll", - "databaseName": "retryable-handshake-tests", + "databaseName": "retryable-writes-handshake-tests", "documents": [ { "_id": 1, @@ -52,13 +54,13 @@ ], "tests": [ { - "description": "InsertOne succeeds after retryable handshake error", + "description": "insertOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -66,8 +68,8 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], "closeConnection": true } @@ -76,7 +78,7 @@ }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -89,7 +91,7 @@ }, { "name": "insertOne", - "object": "collection0", + "object": "collection", "arguments": { "document": { "_id": 2, @@ -100,7 +102,7 @@ ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -118,59 +120,1385 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" } }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "deleteOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "replaceOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "insert": "coll", - "documents": [ - { - "_id": 2, - "x": 22 - } - ] + "ping": 1 }, - "commandName": "insert", - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" } } ] } + ] + }, + { + "description": "findOneAndUpdate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } ], - "outcome": [ + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, { - "collectionName": "coll", - "databaseName": "retryable-handshake-tests", - "documents": [ + "client": "client", + "events": [ { - "_id": 1, - "x": 11 + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } }, { - "_id": 2, - "x": 22 + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } } ] } ] }, { - "description": "InsertOne succeeds after retryable handshake error ShutdownInProgress", + "description": "findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -178,17 +1506,17 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], - "errorCode": 91 + "closeConnection": true } } } }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -200,19 +1528,21 @@ } }, { - "name": "insertOne", - "object": "collection0", + "name": "findOneAndUpdate", + "object": "collection", "arguments": { - "document": { - "_id": 2, - "x": 22 + "filter": {}, + "update": { + "$set": { + "x": 22 + } } } } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -230,46 +1560,234 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "bulkWrite succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "insert": "coll", - "documents": [ - { - "_id": 2, - "x": 22 - } - ] + "ping": 1 }, - "commandName": "insert", - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" } } ] } + ] + }, + { + "description": "bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } ], - "outcome": [ + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, { - "collectionName": "coll", - "databaseName": "retryable-handshake-tests", - "documents": [ + "client": "client", + "events": [ { - "_id": 1, - "x": 11 + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } }, { - "_id": 2, - "x": 22 + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } } ] } From 92e6150d84f128463f6e8f5f6b9d0e2537fef64e Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 10 Nov 2022 14:19:55 -0800 Subject: [PATCH 0811/2111] PYTHON-3493 Bulk Write InsertOne Should Be Parameter Of Collection Type (#1106) --- doc/examples/type_hints.rst | 20 ++++++++ mypy.ini | 2 +- pymongo/collection.py | 11 ++++- pymongo/encryption.py | 5 +- pymongo/operations.py | 13 +++--- pymongo/typings.py | 7 +++ test/__init__.py | 2 +- test/mockupdb/test_cluster_time.py | 2 +- test/mockupdb/test_op_msg.py | 6 +-- test/test_bulk.py | 4 +- test/test_client.py | 1 + test/test_database.py | 5 +- test/test_mypy.py | 75 +++++++++++++++++++++++++++--- test/test_server_selection.py | 6 ++- test/test_session.py | 6 ++- test/test_transactions.py | 4 +- test/utils.py | 13 +++--- 17 files changed, 144 insertions(+), 38 deletions(-) diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index 38349038b1..b413ad7b24 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -113,6 +113,26 @@ These methods automatically add an "_id" field. >>> assert result is not None >>> assert result["year"] == 1993 >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. + >>> assert result["_id"] # type:ignore[typeddict-item] + +This same typing scheme works for all of the insert methods (:meth:`~pymongo.collection.Collection.insert_one`, +:meth:`~pymongo.collection.Collection.insert_many`, and :meth:`~pymongo.collection.Collection.bulk_write`). +For `bulk_write` both :class:`~pymongo.operations.InsertOne` and :class:`~pymongo.operations.ReplaceOne` operators are generic. + +.. doctest:: + :pyversion: >= 3.8 + + >>> from typing import TypedDict + >>> from pymongo import MongoClient + >>> from pymongo.operations import InsertOne + >>> from pymongo.collection import Collection + >>> client: MongoClient = MongoClient() + >>> collection: Collection[Movie] = client.test.test + >>> inserted = collection.bulk_write([InsertOne(Movie(name="Jurassic Park", year=1993))]) + >>> result = collection.find_one({"name": "Jurassic Park"}) + >>> assert result is not None + >>> assert result["year"] == 1993 + >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. >>> assert result["_id"] # type:ignore[typeddict-item] Modeling Document Types with TypedDict diff --git a/mypy.ini b/mypy.ini index 9b1348472c..2562177ab1 100644 --- a/mypy.ini +++ b/mypy.ini @@ -33,7 +33,7 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-test.test_mypy] -warn_unused_ignores = false +warn_unused_ignores = True [mypy-winkerberos.*] ignore_missing_imports = True diff --git a/pymongo/collection.py b/pymongo/collection.py index 23efe8fd35..600d73c4bc 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -77,7 +77,14 @@ _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} -_WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] # Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] _IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] _IndexKeyHint = Union[str, _IndexList] @@ -436,7 +443,7 @@ def with_options( @_csot.apply def bulk_write( self, - requests: Sequence[_WriteOp], + requests: Sequence[_WriteOp[_DocumentType]], ordered: bool = True, bypass_document_validation: bool = False, session: Optional["ClientSession"] = None, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 9fef5963a6..92a268f452 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -18,7 +18,7 @@ import enum import socket import weakref -from typing import Any, Mapping, Optional, Sequence +from typing import Any, Generic, Mapping, Optional, Sequence try: from pymongocrypt.auto_encrypter import AutoEncrypter @@ -55,6 +55,7 @@ from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context +from pymongo.typings import _DocumentType from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -430,7 +431,7 @@ class QueryType(str, enum.Enum): """Used to encrypt a value for an equality query.""" -class ClientEncryption(object): +class ClientEncryption(Generic[_DocumentType]): """Explicit client-side field level encryption.""" def __init__( diff --git a/pymongo/operations.py b/pymongo/operations.py index 84e8bf4d35..92a4dad0ac 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -13,21 +13,22 @@ # limitations under the License. """Operation class definitions.""" -from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Dict, Generic, List, Mapping, Optional, Sequence, Tuple, Union +from bson.raw_bson import RawBSONDocument from pymongo import helpers from pymongo.collation import validate_collation_or_none from pymongo.common import validate_boolean, validate_is_mapping, validate_list from pymongo.helpers import _gen_index_name, _index_document, _index_list -from pymongo.typings import _CollationIn, _DocumentIn, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline -class InsertOne(object): +class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" __slots__ = ("_doc",) - def __init__(self, document: _DocumentIn) -> None: + def __init__(self, document: Union[_DocumentType, RawBSONDocument]) -> None: """Create an InsertOne instance. For use with :meth:`~pymongo.collection.Collection.bulk_write`. @@ -170,7 +171,7 @@ def __ne__(self, other: Any) -> bool: return not self == other -class ReplaceOne(object): +class ReplaceOne(Generic[_DocumentType]): """Represents a replace_one operation.""" __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") @@ -178,7 +179,7 @@ class ReplaceOne(object): def __init__( self, filter: Mapping[str, Any], - replacement: Mapping[str, Any], + replacement: Union[_DocumentType, RawBSONDocument], upsert: bool = False, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, diff --git a/pymongo/typings.py b/pymongo/typings.py index 14e059a8f0..fe0e8bd523 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -37,3 +37,10 @@ _Pipeline = Sequence[Mapping[str, Any]] _DocumentOut = _DocumentIn _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) + + +def strip_optional(elem): + """This function is to allow us to cast all of the elements of an iterator from Optional[_T] to _T + while inside a list comprehension.""" + assert elem is not None + return elem diff --git a/test/__init__.py b/test/__init__.py index eb66e45667..20b1d00ca8 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1090,7 +1090,7 @@ def print_thread_stacks(pid: int) -> None: class IntegrationTest(PyMongoTestCase): """Base class for TestCases that need a connection to MongoDB to pass.""" - client: MongoClient + client: MongoClient[dict] db: Database credentials: Dict[str, str] diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index cb06a129d2..e4f3e12d07 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -60,7 +60,7 @@ def callback(client): self.cluster_time_conversation(callback, [{"ok": 1}] * 2) def test_bulk(self): - def callback(client): + def callback(client: MongoClient[dict]) -> None: client.db.collection.bulk_write( [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] ) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index da7ff3d33e..22fe38fd02 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -137,14 +137,14 @@ # Legacy methods Operation( "bulk_write_insert", - lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), + lambda coll: coll.bulk_write([InsertOne[dict]({}), InsertOne[dict]({})]), request=OpMsg({"insert": "coll"}, flags=0), reply={"ok": 1, "n": 2}, ), Operation( "bulk_write_insert-w0", lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})] + [InsertOne[dict]({}), InsertOne[dict]({})] ), request=OpMsg({"insert": "coll"}, flags=0), reply={"ok": 1, "n": 2}, @@ -152,7 +152,7 @@ Operation( "bulk_write_insert-w0-unordered", lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})], ordered=False + [InsertOne[dict]({}), InsertOne[dict]({})], ordered=False ), request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), reply=None, diff --git a/test/test_bulk.py b/test/test_bulk.py index fae1c7e201..ac7073c0ef 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -296,7 +296,7 @@ def test_upsert(self): def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. n_docs = client_context.max_write_batch_size + 100 - requests = [InsertOne({}) for _ in range(n_docs)] + requests = [InsertOne[dict]({}) for _ in range(n_docs)] result = self.coll.bulk_write(requests, ordered=False) self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) @@ -347,7 +347,7 @@ def test_bulk_write_no_results(self): def test_bulk_write_invalid_arguments(self): # The requests argument must be a list. - generator = (InsertOne({}) for _ in range(10)) + generator = (InsertOne[dict]({}) for _ in range(10)) with self.assertRaises(TypeError): self.coll.bulk_write(generator) # type: ignore[arg-type] diff --git a/test/test_client.py b/test/test_client.py index 5bb116dbda..a33881fded 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1652,6 +1652,7 @@ def test_network_error_message(self): with self.fail_point( {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} ): + assert client.address is not None expected = "%s:%s: " % client.address with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) diff --git a/test/test_database.py b/test/test_database.py index d49ac8324f..49387b8bb9 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -16,7 +16,7 @@ import re import sys -from typing import Any, Iterable, List, Mapping +from typing import Any, Iterable, List, Mapping, Union sys.path[0:0] = [""] @@ -201,7 +201,7 @@ def test_list_collection_names_filter(self): db.capped.insert_one({}) db.non_capped.insert_one({}) self.addCleanup(client.drop_database, db.name) - + filter: Union[None, dict] # Should not send nameOnly. for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): results.clear() @@ -210,7 +210,6 @@ def test_list_collection_names_filter(self): self.assertNotIn("nameOnly", results["started"][0].command) # Should send nameOnly (except on 2.6). - filter: Any for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): results.clear() names = db.list_collection_names(filter=filter) diff --git a/test/test_mypy.py b/test/test_mypy.py index 807f0e8ef3..58e69853ca 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -17,7 +17,7 @@ import os import tempfile import unittest -from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List +from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Union try: from typing_extensions import NotRequired, TypedDict @@ -42,7 +42,7 @@ class ImplicitMovie(TypedDict): Movie = dict # type:ignore[misc,assignment] ImplicitMovie = dict # type: ignore[assignment,misc] MovieWithId = dict # type: ignore[assignment,misc] - TypedDict = None # type: ignore[assignment] + TypedDict = None NotRequired = None # type: ignore[assignment] @@ -59,7 +59,7 @@ class ImplicitMovie(TypedDict): from bson.son import SON from pymongo import ASCENDING, MongoClient from pymongo.collection import Collection -from pymongo.operations import InsertOne +from pymongo.operations import DeleteOne, InsertOne, ReplaceOne from pymongo.read_preferences import ReadPreference TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") @@ -124,11 +124,40 @@ def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: docs = to_list(cursor) self.assertTrue(docs) + @only_type_check def test_bulk_write(self) -> None: self.coll.insert_one({}) - requests = [InsertOne({})] - result = self.coll.bulk_write(requests) - self.assertTrue(result.acknowledged) + coll: Collection[Movie] = self.coll + requests: List[InsertOne[Movie]] = [InsertOne(Movie(name="American Graffiti", year=1973))] + self.assertTrue(coll.bulk_write(requests).acknowledged) + new_requests: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [] + input_list: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, Movie(name="American Graffiti", year=1973)), + ] + for i in input_list: + new_requests.append(i) + self.assertTrue(coll.bulk_write(new_requests).acknowledged) + + # Because ReplaceOne is not generic, type checking is not enforced for ReplaceOne in the first example. + @only_type_check + def test_bulk_write_heterogeneous(self): + coll: Collection[Movie] = self.coll + requests: List[Union[InsertOne[Movie], ReplaceOne, DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, {"name": "American Graffiti", "year": "WRONG_TYPE"}), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests).acknowledged) + requests_two: List[Union[InsertOne[Movie], ReplaceOne[Movie], DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne( + {}, + {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[typeddict-item] + ), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests_two).acknowledged) def test_command(self) -> None: result: Dict = self.client.admin.command("ping") @@ -340,6 +369,40 @@ def test_typeddict_document_type_insertion(self) -> None: ) coll.insert_many([bad_movie]) + @only_type_check + def test_bulk_write_document_type_insertion(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [InsertOne(Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [InsertOne(mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ] # No error because it is in-line. + ) + + @only_type_check + def test_bulk_write_document_type_replacement(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [ReplaceOne({}, Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [ReplaceOne({}, mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ] # No error because it is in-line. + ) + @only_type_check def test_typeddict_explicit_document_type(self) -> None: out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index a80d5f13d9..c3f3762f9a 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -23,6 +23,7 @@ from pymongo.server_selectors import writable_server_selector from pymongo.settings import TopologySettings from pymongo.topology import Topology +from pymongo.typings import strip_optional sys.path[0:0] = [""] @@ -85,7 +86,10 @@ def all_hosts_started(): ) wait_until(all_hosts_started, "receive heartbeat from all hosts") - expected_port = max([n.address[1] for n in client._topology._description.readable_servers]) + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) # Insert 1 record and access it 10 times. coll.insert_one({"name": "John Doe"}) diff --git a/test/test_session.py b/test/test_session.py index f22a2d5eab..386bab295c 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -898,7 +898,9 @@ def _test_writes(self, op): @client_context.require_no_standalone def test_writes(self): - self._test_writes(lambda coll, session: coll.bulk_write([InsertOne({})], session=session)) + self._test_writes( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) self._test_writes( @@ -944,7 +946,7 @@ def _test_no_read_concern(self, op): @client_context.require_no_standalone def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: coll.bulk_write([InsertOne({})], session=session) + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) ) self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) diff --git a/test/test_transactions.py b/test/test_transactions.py index 4cee3fa236..02e691329e 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -363,7 +363,7 @@ def test_transaction_direct_connection(self): coll.insert_one({}) self.assertEqual(client.topology_description.topology_type_name, "Single") ops = [ - (coll.bulk_write, [[InsertOne({})]]), + (coll.bulk_write, [[InsertOne[dict]({})]]), (coll.insert_one, [{}]), (coll.insert_many, [[{}, {}]]), (coll.replace_one, [{}, {}]), @@ -385,7 +385,7 @@ def test_transaction_direct_connection(self): ] for f, args in ops: with client.start_session() as s, s.start_transaction(): - res = f(*args, session=s) + res = f(*args, session=s) # type:ignore[operator] if isinstance(res, (CommandCursor, Cursor)): list(res) diff --git a/test/utils.py b/test/utils.py index 59349f4fdc..6b0876a158 100644 --- a/test/utils.py +++ b/test/utils.py @@ -29,6 +29,7 @@ from collections import abc, defaultdict from functools import partial from test import client_context, db_pwd, db_user +from typing import Any from bson import json_util from bson.objectid import ObjectId @@ -557,27 +558,27 @@ def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs return MongoClient(uri, port, **client_options) -def single_client_noauth(h=None, p=None, **kwargs): +def single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) -def single_client(h=None, p=None, **kwargs): +def single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection, and authenticate if necessary.""" return _mongo_client(h, p, directConnection=True, **kwargs) -def rs_client_noauth(h=None, p=None, **kwargs): +def rs_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, **kwargs) -def rs_client(h=None, p=None, **kwargs): +def rs_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set and authenticate if necessary.""" return _mongo_client(h, p, **kwargs) -def rs_or_single_client_noauth(h=None, p=None, **kwargs): +def rs_or_single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Connect to the replica set if there is one, otherwise the standalone. Like rs_or_single_client, but does not authenticate. @@ -585,7 +586,7 @@ def rs_or_single_client_noauth(h=None, p=None, **kwargs): return _mongo_client(h, p, authenticate=False, **kwargs) -def rs_or_single_client(h=None, p=None, **kwargs): +def rs_or_single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: """Connect to the replica set if there is one, otherwise the standalone. Authenticates if necessary. From fcb11514506acddbd50b8ec13e76a7c34d336aac Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Nov 2022 16:23:03 -0600 Subject: [PATCH 0812/2111] PYTHON-3517 Add documentation for on-demand KMS providers (#1113) --- doc/changelog.rst | 2 +- doc/examples/encryption.rst | 73 ++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8220b6897a..ebd796116e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,7 +4,7 @@ Changelog Changes in Version 4.3.3 ------------------------ -Version 4.3.3 fixes a number of bugs: +Version 4.3.3 documents support for :ref:`CSFLE on-demand credentials` for cloud KMS providers, and fixes the following bugs: - Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 72205ad119..9978cb6e36 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -713,6 +713,77 @@ To configure automatic *decryption* without automatic *encryption* set client_encryption.close() client.close() - if __name__ == "__main__": main() + + +.. _CSFLE on-demand credentials: + + +CSFLE on-demand credentials +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``pymongocrypt`` 1.4 adds support for fetching on-demand KMS credentials for +AWS, GCP, and Azure cloud environments. + +To enable the driver's behavior to obtain credentials from the environment, add the appropriate key ("aws", "gcp", or "azure") with an empty map to +"kms_providers" in either :class:`~pymongo.encryption_options.AutoEncryptionOpts` or :class:`~pymongo.encryption.ClientEncryption` options. + +An application using AWS credentials would look like:: + + from pymongo import MongoClient + from pymongo.encryption import ClientEncryption + client = MongoClient() + client_encryption = ClientEncryption( + # The empty dictionary enables on-demand credentials. + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, + ) + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + client_encryption.create_data_key("aws", master_key) + +The above will enable the same behavior of obtaining AWS credentials from the environment as is used for :ref:`MONGODB-AWS` authentication, including the +caching to avoid rate limiting. + +An application using GCP credentials would look like:: + + from pymongo import MongoClient + from pymongo.encryption import ClientEncryption + client = MongoClient() + client_encryption = ClientEncryption( + # The empty dictionary enables on-demand credentials. + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, + ) + master_key = { + "projectId": "my-project", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + client_encryption.create_data_key("gcp", master_key) + +The driver will query the `VM instance metadata `_ to obtain credentials. + +An application using Azure credentials would look like, this time using +:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: + + from pymongo import MongoClient + from pymongo.encryption_options import AutoEncryptionOpts + # The empty dictionary enables on-demand credentials. + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys" + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + coll = client.test.coll + coll.insert_one({"encryptedField": "123456789"}) + +The driver will `acquire an access token `_ from the Azure VM. From d0568042fa3e89786a47a182718a0210e910cce6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Nov 2022 07:41:49 -0600 Subject: [PATCH 0813/2111] PYTHON-2818 Add native support for AWS IAM Roles for service accounts, EKS in particular (#1032) --- .evergreen/config.yml | 80 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 77 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 28e54e2ded..96b6a00688 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -572,7 +572,13 @@ functions: "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", - "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}" + "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}", + + "iam_auth_assume_web_role_name": "${iam_auth_assume_web_role_name}", + "iam_web_identity_issuer": "${iam_web_identity_issuer}", + "iam_web_identity_rsa_key": "${iam_web_identity_rsa_key}", + "iam_web_identity_jwks_uri": "${iam_web_identity_jwks_uri}", + "iam_web_identity_token_file": "${iam_web_identity_token_file}" } EOF @@ -668,7 +674,67 @@ functions: fi # Write an empty prepare_mongodb_aws so no auth environment variables # are set. - echo "" > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + rm "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" || true + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + + "run aws auth test with aws web identity credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + cd ${DRIVERS_TOOLS}/.evergreen/auth_aws + . ./activate_venv.sh + mongo aws_e2e_web_identity.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ROLE_ARN="${iam_auth_assume_web_role_name}" + export AWS_WEB_IDENTITY_TOKEN_FILE="${iam_web_identity_token_file}" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_web_identity_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" + export AWS_ROLE_ARN="${iam_auth_assume_web_role_name}" + export AWS_WEB_IDENTITY_TOKEN_FILE="${iam_web_identity_token_file}" + export AWS_ROLE_SESSION_NAME="test" + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_web_identity_auth_test}" = "true" ]; then + echo "This platform does not support the web identity auth test, skipping..." + exit 0 + fi PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh "run aws auth test with aws credentials as environment variables": @@ -1832,6 +1898,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-5.0" @@ -1848,6 +1915,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-6.0" @@ -1864,6 +1932,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-latest" @@ -1880,6 +1949,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: "aws-auth-test-rapid" commands: @@ -1895,6 +1965,7 @@ tasks: - func: "run aws auth test with aws credentials as environment variables" - func: "run aws auth test with aws credentials and session token as environment variables" - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - name: load-balancer-test @@ -2076,6 +2147,7 @@ axes: variables: skip_EC2_auth_test: true skip_ECS_auth_test: true + skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: macos-1100 @@ -2084,6 +2156,7 @@ axes: variables: skip_EC2_auth_test: true skip_ECS_auth_test: true + skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 @@ -2146,8 +2219,9 @@ axes: run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days variables: - skip_EC2_auth_test: true skip_ECS_auth_test: true + skip_EC2_auth_test: true + skip_web_identity_auth_test: true python3_binary: "C:/python/Python38/python.exe" venv_bin_dir: "Scripts" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz From 79aa5e6757fe816c7aaadf08b56120e4375b904e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 14 Nov 2022 08:50:08 -0800 Subject: [PATCH 0814/2111] PYTHON-3516 Improve test EventListener api (#1114) --- test/test_auth.py | 8 +- test/test_change_stream.py | 55 +++-- test/test_collation.py | 38 ++-- test/test_collection.py | 33 ++- test/test_command_monitoring_legacy.py | 32 +-- test/test_comment.py | 7 +- test/test_cursor.py | 150 ++++++------- test/test_data_lake.py | 6 +- test/test_database.py | 12 +- test/test_encryption.py | 44 ++-- test/test_monitoring.py | 283 +++++++++++------------- test/test_read_concern.py | 29 ++- test/test_read_preferences.py | 2 +- test/test_read_write_concern_spec.py | 8 +- test/test_retryable_reads.py | 6 +- test/test_retryable_writes.py | 56 +++-- test/test_server_selection.py | 2 +- test/test_server_selection_in_window.py | 2 +- test/test_session.py | 112 +++++----- test/test_transactions.py | 10 +- test/test_versioned_api.py | 4 +- test/utils.py | 39 +++- test/utils_spec_runner.py | 8 +- 23 files changed, 460 insertions(+), 486 deletions(-) diff --git a/test/test_auth.py b/test/test_auth.py index 69ed27bda0..9d80f06c00 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -392,7 +392,7 @@ def test_scram_skip_empty_exchange(self): if client_context.version < (4, 4, -1): # Assert we sent the skipEmptyExchange option. - first_event = listener.results["started"][0] + first_event = listener.started_events[0] self.assertEqual(first_event.command_name, "saslStart") self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) @@ -449,7 +449,7 @@ def test_scram(self): ) client.testscram.command("dbstats") - self.listener.results.clear() + self.listener.reset() client = rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] ) @@ -457,9 +457,9 @@ def test_scram(self): if client_context.version.at_least(4, 4, -1): # Speculative authentication in 4.4+ sends saslStart with the # handshake. - self.assertEqual(self.listener.results["started"], []) + self.assertEqual(self.listener.started_events, []) else: - started = self.listener.results["started"][0] + started = self.listener.started_events[0] self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 62d7abee62..2388a6e1f4 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -167,7 +167,7 @@ def test_try_next_runs_one_getmore(self): client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") - listener.results.clear() + listener.reset() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() @@ -177,25 +177,25 @@ def test_try_next_runs_one_getmore(self): self.addCleanup(coll.drop) with self.change_stream_with_client(client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) - listener.results.clear() + listener.reset() # Confirm that only a single getMore is run even when no documents # are returned. self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() # Get at least one change before resuming. coll.insert_one({"_id": 2}) wait_until(lambda: stream.try_next() is not None, "get change from try_next") - listener.results.clear() + listener.reset() # Cause the next request to initiate the resume process. self.kill_change_stream_cursor(stream) - listener.results.clear() + listener.reset() # The sequence should be: # - getMore, fail @@ -203,7 +203,7 @@ def test_try_next_runs_one_getmore(self): # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) - listener.results.clear() + listener.reset() # Stream still works after a resume. coll.insert_one({"_id": 3}) @@ -217,7 +217,7 @@ def test_batch_size_is_honored(self): client = rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") - listener.results.clear() + listener.reset() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() @@ -229,12 +229,12 @@ def test_batch_size_is_honored(self): expected = {"batchSize": 23} with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. - cmd = listener.results["started"][0].command + cmd = listener.started_events[0].command self.assertEqual(cmd["cursor"], expected) - listener.results.clear() + listener.reset() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) - cmd = listener.results["started"][0].command + cmd = listener.started_events[0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) @@ -255,12 +255,11 @@ def test_start_at_operation_time(self): @no_type_check def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") - results = listener.results with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: pass - self.assertEqual(1, len(results["started"])) - command = results["started"][0] + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] self.assertEqual("aggregate", command.command_name) self.assertEqual( [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], @@ -464,7 +463,7 @@ def _get_expected_resume_token_legacy(self, stream, listener, previous_change=No versions that don't support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None.""" if previous_change is None: - agg_cmd = listener.results["started"][0] + agg_cmd = listener.started_events[0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") @@ -481,7 +480,7 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): if token is not None: return token - response = listener.results["succeeded"][-1].reply + response = listener.succeeded_events[-1].reply return response["cursor"]["postBatchResumeToken"] @no_type_check @@ -558,8 +557,8 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): pass # Driver should have attempted aggregate command only once. - self.assertEqual(len(listener.results["started"]), 1) - self.assertEqual(listener.results["started"][0].command_name, "aggregate") + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED @@ -603,20 +602,20 @@ def test_start_at_operation_time_caching(self): with self.change_stream_with_client(client) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results["started"][-1].command + cmd = listener.started_events[-1].command self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) # Case 2: change stream started with startAtOperationTime - listener.results.clear() + listener.reset() optime = self.get_start_at_operation_time() with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: self.kill_change_stream_cursor(cs) cs.try_next() - cmd = listener.results["started"][-1].command + cmd = listener.started_events[-1].command self.assertEqual( cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), optime, - str([k.command for k in listener.results["started"]]), + str([k.command for k in listener.started_events]), ) # Prose test no. 10 - SKIPPED @@ -631,7 +630,7 @@ def test_resumetoken_empty_batch(self): self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token - response = listener.results["succeeded"][0].reply + response = listener.succeeded_events[0].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 @@ -643,7 +642,7 @@ def test_resumetoken_exhausted_batch(self): self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token - response = listener.results["succeeded"][-1].reply + response = listener.succeeded_events[-1].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 12 @@ -737,7 +736,7 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt - response = listener.results["started"][-1] + response = listener.started_events[-1] self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) @@ -756,7 +755,7 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt - response = listener.results["started"][-1] + response = listener.started_events[-1] self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) @@ -1056,7 +1055,7 @@ def tearDownClass(cls): def setUp(self): super(TestAllLegacyScenarios, self).setUp() - self.listener.results.clear() + self.listener.reset() def setUpCluster(self, scenario_dict): assets = [ @@ -1128,7 +1127,7 @@ def check_event(self, event, expectation_dict): self.assertEqual(getattr(event, key), value) def tearDown(self): - self.listener.results.clear() + self.listener.reset() _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") diff --git a/test/test_collation.py b/test/test_collation.py index d8410a9de4..18f8bc78ac 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -113,11 +113,11 @@ def tearDownClass(cls): super(TestCollation, cls).tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() super(TestCollation, self).tearDown() def last_command_started(self): - return self.listener.results["started"][-1].command + return self.listener.started_events[-1].command def assertCollationInLastCommand(self): self.assertEqual(self.collation.document, self.last_command_started()["collation"]) @@ -129,7 +129,7 @@ def test_create_collection(self): # Test passing collation as a dict as well. self.db.test.drop() - self.listener.results.clear() + self.listener.reset() self.db.create_collection("test", collation=self.collation.document) self.assertCollationInLastCommand() @@ -139,7 +139,7 @@ def test_index_model(self): def test_create_index(self): self.db.test.create_index("foo", collation=self.collation) - ci_cmd = self.listener.results["started"][0].command + ci_cmd = self.listener.started_events[0].command self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) def test_aggregate(self): @@ -154,18 +154,18 @@ def test_distinct(self): self.db.test.distinct("foo", collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).distinct("foo") self.assertCollationInLastCommand() def test_find_command(self): self.db.test.insert_one({"is this thing on?": True}) - self.listener.results.clear() + self.listener.reset() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() def test_explain_command(self): - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).explain() # The collation should be part of the explained command. self.assertEqual( @@ -174,40 +174,40 @@ def test_explain_command(self): def test_delete(self): self.db.test.delete_one({"foo": 42}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.delete_many({"foo": 42}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) def test_update(self): self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) def test_find_and(self): self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find_one_and_update( {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation ) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) self.assertCollationInLastCommand() @@ -229,8 +229,8 @@ def test_bulk_write(self): ] ) - delete_cmd = self.listener.results["started"][0].command - update_cmd = self.listener.results["started"][1].command + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command def check_ops(ops): for op in ops: diff --git a/test/test_collection.py b/test/test_collection.py index e7ac248124..49a7017ef3 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1986,21 +1986,20 @@ def test_find_one_and_write_concern(self): c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. c_default = db.get_collection("test", write_concern=WriteConcern()) - results = listener.results # Authenticate the client and throw out auth commands from the listener. db.command("ping") - results.clear() + listener.reset() c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() c_w0.find_one_and_delete({"_id": 1}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() # Test write concern errors. if client_context.is_rs: @@ -2017,27 +2016,27 @@ def test_find_one_and_write_concern(self): WriteConcernError, c_wc_error.find_one_and_replace, {"w": 0}, - results["started"][0].command["writeConcern"], + listener.started_events[0].command["writeConcern"], ) self.assertRaises( WriteConcernError, c_wc_error.find_one_and_delete, {"w": 0}, - results["started"][0].command["writeConcern"], + listener.started_events[0].command["writeConcern"], ) - results.clear() + listener.reset() c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() c_default.find_one_and_delete({"_id": 1}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() def test_find_with_nested(self): c = self.db.test diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py index 5d9f2fe3ee..1cc3e15cc9 100644 --- a/test/test_command_monitoring_legacy.py +++ b/test/test_command_monitoring_legacy.py @@ -54,17 +54,7 @@ def tearDownClass(cls): cls.client.close() def tearDown(self): - self.listener.results.clear() - - -def format_actual_results(results): - started = results["started"] - succeeded = results["succeeded"] - failed = results["failed"] - msg = "\nStarted: %r" % (started[0].command if len(started) else None,) - msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,) - msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,) - return msg + self.listener.reset() def create_test(scenario_def, test): @@ -75,7 +65,7 @@ def run_scenario(self): coll = self.client[dbname][collname] coll.drop() coll.insert_many(scenario_def["data"]) - self.listener.results.clear() + self.listener.reset() name = camel_to_snake(test["operation"]["name"]) if "read_preference" in test["operation"]: coll = coll.with_options( @@ -127,11 +117,13 @@ def run_scenario(self): except OperationFailure: pass - res = self.listener.results + started_events = self.listener.started_events + succeeded_events = self.listener.succeeded_events + failed_events = self.listener.failed_events for expectation in test["expectations"]: event_type = next(iter(expectation)) if event_type == "command_started_event": - event = res["started"][0] if len(res["started"]) else None + event = started_events[0] if len(started_events) else None if event is not None: # The tests substitute 42 for any number other than 0. if event.command_name == "getMore" and event.command["getMore"]: @@ -147,7 +139,7 @@ def run_scenario(self): update.setdefault("upsert", False) update.setdefault("multi", False) elif event_type == "command_succeeded_event": - event = res["succeeded"].pop(0) if len(res["succeeded"]) else None + event = succeeded_events.pop(0) if len(succeeded_events) else None if event is not None: reply = event.reply # The tests substitute 42 for any number other than 0, @@ -171,12 +163,12 @@ def run_scenario(self): reply.pop("cursorsKilled") reply["cursorsUnknown"] = [42] # Found succeeded event. Pop related started event. - res["started"].pop(0) + started_events.pop(0) elif event_type == "command_failed_event": - event = res["failed"].pop(0) if len(res["failed"]) else None + event = failed_events.pop(0) if len(failed_events) else None if event is not None: # Found failed event. Pop related started event. - res["started"].pop(0) + started_events.pop(0) else: self.fail("Unknown event type") @@ -184,11 +176,11 @@ def run_scenario(self): event_name = event_type.split("_")[1] self.fail( "Expected %s event for %s command. Actual " - "results:%s" + "results:\n%s" % ( event_name, expectation[event_type]["command_name"], - format_actual_results(res), + "\n".join(str(e) for e in self.listener.events), ) ) diff --git a/test/test_comment.py b/test/test_comment.py index c83428fd70..85e5470d74 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -43,12 +43,11 @@ class TestComment(IntegrationTest): def _test_ops( self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 ): - results = listener.results for h, args in helpers: c = "testing comment with " + h.__name__ with self.subTest("collection-" + h.__name__ + "-comment"): for cc in [c, {"key": c}, ["any", 1]]: - results.clear() + listener.reset() kwargs = {"comment": cc} if h == coll.rename: _ = db.get_collection("temp_temp_temp").drop() @@ -77,7 +76,7 @@ def _test_ops( tested = False # For some reason collection.list_indexes creates two commands and the first # one doesn't contain 'comment'. - for i in results["started"]: + for i in listener.started_events: if cc == i.command.get("comment", ""): self.assertEqual(cc, i.command["comment"]) tested = True @@ -98,7 +97,7 @@ def _test_ops( h.__doc__, ) - results.clear() + listener.reset() @client_context.require_version_min(4, 7, -1) @client_context.require_replica_set diff --git a/test/test_cursor.py b/test/test_cursor.py index 5b4efcd391..96d83fecf1 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -218,79 +218,78 @@ def test_max_await_time_ms(self): listener = AllowListEventListener("find", "getMore") coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test - results = listener.results # Tailable_await defaults. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) # find - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Tailable_await with max_await_time_ms set. list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertTrue("maxTimeMS" in results["started"][1].command) - self.assertEqual(99, results["started"][1].command["maxTimeMS"]) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Tailable_await with max_time_ms list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Tailable_await with both max_time_ms and max_await_time_ms list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).max_await_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertTrue("maxTimeMS" in results["started"][1].command) - self.assertEqual(99, results["started"][1].command["maxTimeMS"]) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Non tailable_await with max_await_time_ms list(coll.find(batch_size=1).max_await_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[0].command) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) + listener.reset() # Non tailable_await with max_time_ms list(coll.find(batch_size=1).max_time_ms(99)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) # Non tailable_await with both max_time_ms and max_await_time_ms list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertFalse("maxTimeMS" in listener.started_events[1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -329,7 +328,7 @@ def test_explain_with_read_concern(self): self.addCleanup(client.close) coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) - started = listener.results["started"] + started = listener.started_events self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) @@ -1169,7 +1168,6 @@ def test_close_kills_cursor_synchronously(self): self.client._process_periodic_tasks() listener = AllowListEventListener("killCursors") - results = listener.results client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) coll = client[self.db.name].test_close_kills_cursors @@ -1178,7 +1176,7 @@ def test_close_kills_cursor_synchronously(self): docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) - results.clear() + listener.reset() # Close a cursor while it's still open on the server. cursor = coll.find().batch_size(10) @@ -1187,13 +1185,13 @@ def test_close_kills_cursor_synchronously(self): cursor.close() def assertCursorKilled(): - self.assertEqual(1, len(results["started"])) - self.assertEqual("killCursors", results["started"][0].command_name) - self.assertEqual(1, len(results["succeeded"])) - self.assertEqual("killCursors", results["succeeded"][0].command_name) + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) assertCursorKilled() - results.clear() + listener.reset() # Close a command cursor while it's still open on the server. cursor = coll.aggregate([], batchSize=10) @@ -1204,7 +1202,7 @@ def assertCursorKilled(): if cursor.cursor_id: assertCursorKilled() else: - self.assertEqual(0, len(results["started"])) + self.assertEqual(0, len(listener.started_events)) def test_delete_not_initialized(self): # Creating a cursor with invalid arguments will not run __init__ @@ -1226,7 +1224,7 @@ def test_getMore_does_not_send_readPreference(self): self.addCleanup(coll.drop) list(coll.find(batch_size=3)) - started = listener.results["started"] + started = listener.started_events self.assertEqual(2, len(started)) self.assertEqual("find", started[0].command_name) if client_context.is_rs or client_context.is_mongos: @@ -1261,13 +1259,13 @@ def test_find_raw_transaction(self): batches = list( client[self.db.name].test.find_raw_batches(session=session).sort("_id") ) - cmd = listener.results["started"][0] + cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "find") self.assertIn("$clusterTime", cmd.command) self.assertEqual(cmd.command["startTransaction"], True) self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results["succeeded"][-1] + last_cmd = listener.succeeded_events[-1] self.assertEqual( last_cmd.reply["$clusterTime"]["clusterTime"], session.cluster_time["clusterTime"], @@ -1293,8 +1291,8 @@ def test_find_raw_retryable_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results["started"]), 2) - for cmd in listener.results["started"]: + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: self.assertEqual(cmd.command_name, "find") @client_context.require_version_min(5, 0, 0) @@ -1314,7 +1312,7 @@ def test_find_raw_snapshot_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results["started"][1].command + find_cmd = listener.started_events[1].command self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) @@ -1372,15 +1370,15 @@ def test_monitoring(self): c.drop() c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.find_raw_batches(batch_size=4) # First raw batch of 4 documents. next(cursor) - started = listener.results["started"][0] - succeeded = listener.results["succeeded"][0] - self.assertEqual(0, len(listener.results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("find", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("find", succeeded.command_name) @@ -1391,15 +1389,14 @@ def test_monitoring(self): self.assertEqual(len(csr["firstBatch"]), 1) self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(0, 4)]) - listener.results.clear() + listener.reset() # Next raw batch of 4 documents. next(cursor) try: - results = listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("getMore", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getMore", succeeded.command_name) @@ -1442,13 +1439,13 @@ def test_aggregate_raw_transaction(self): [{"$sort": {"_id": 1}}], session=session ) ) - cmd = listener.results["started"][0] + cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "aggregate") self.assertIn("$clusterTime", cmd.command) self.assertEqual(cmd.command["startTransaction"], True) self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results["succeeded"][-1] + last_cmd = listener.succeeded_events[-1] self.assertEqual( last_cmd.reply["$clusterTime"]["clusterTime"], session.cluster_time["clusterTime"], @@ -1473,8 +1470,8 @@ def test_aggregate_raw_retryable_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results["started"]), 3) - cmds = listener.results["started"] + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events self.assertEqual(cmds[0].command_name, "aggregate") self.assertEqual(cmds[1].command_name, "aggregate") @@ -1495,7 +1492,7 @@ def test_aggregate_raw_snapshot_reads(self): self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results["started"][1].command + find_cmd = listener.started_events[1].command self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) @@ -1536,13 +1533,13 @@ def test_monitoring(self): c.drop() c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) # Start cursor, no initial batch. - started = listener.results["started"][0] - succeeded = listener.results["succeeded"][0] - self.assertEqual(0, len(listener.results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("aggregate", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("aggregate", succeeded.command_name) @@ -1551,15 +1548,14 @@ def test_monitoring(self): # First batch is empty. self.assertEqual(len(csr["firstBatch"]), 0) - listener.results.clear() + listener.reset() # Batches of 4 documents. n = 0 for batch in cursor: - results = listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("getMore", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getMore", succeeded.command_name) @@ -1570,7 +1566,7 @@ def test_monitoring(self): self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) n += 4 - listener.results.clear() + listener.reset() if __name__ == "__main__": diff --git a/test/test_data_lake.py b/test/test_data_lake.py index fbf79994d3..4fa38435a3 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -62,16 +62,16 @@ def test_1(self): next(cursor) # find command assertions - find_cmd = listener.results["succeeded"][-1] + find_cmd = listener.succeeded_events[-1] self.assertEqual(find_cmd.command_name, "find") cursor_id = find_cmd.reply["cursor"]["id"] cursor_ns = find_cmd.reply["cursor"]["ns"] # killCursors command assertions cursor.close() - started = listener.results["started"][-1] + started = listener.started_events[-1] self.assertEqual(started.command_name, "killCursors") - succeeded = listener.results["succeeded"][-1] + succeeded = listener.succeeded_events[-1] self.assertEqual(succeeded.command_name, "killCursors") self.assertIn(cursor_id, started.command["cursors"]) diff --git a/test/test_database.py b/test/test_database.py index 49387b8bb9..b1b2999df4 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -193,7 +193,6 @@ def test_list_collection_names(self): def test_list_collection_names_filter(self): listener = OvertCommandListener() - results = listener.results client = rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.capped.drop() @@ -204,24 +203,23 @@ def test_list_collection_names_filter(self): filter: Union[None, dict] # Should not send nameOnly. for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): - results.clear() + listener.reset() names = db.list_collection_names(filter=filter) self.assertEqual(names, ["capped"]) - self.assertNotIn("nameOnly", results["started"][0].command) + self.assertNotIn("nameOnly", listener.started_events[0].command) # Should send nameOnly (except on 2.6). for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): - results.clear() + listener.reset() names = db.list_collection_names(filter=filter) self.assertIn("capped", names) self.assertIn("non_capped", names) - command = results["started"][0].command + command = listener.started_events[0].command self.assertIn("nameOnly", command) self.assertTrue(command["nameOnly"]) def test_check_exists(self): listener = OvertCommandListener() - results = listener.results client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) db = client[self.db.name] @@ -231,7 +229,7 @@ def test_check_exists(self): listener.reset() db.drop_collection("unique") db.create_collection("unique", check_exists=False) - self.assertTrue(len(results["started"]) > 0) + self.assertTrue(len(listener.started_events) > 0) self.assertNotIn("listCollections", listener.started_command_names()) def test_list_collections(self): diff --git a/test/test_encryption.py b/test/test_encryption.py index 6c54a90f7a..eaee22ebac 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -814,7 +814,7 @@ def run_test(self, provider_name): provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] ) self.assertBinaryUUID(datakey_id) - cmd = self.listener.results["started"][-1] + cmd = self.listener.started_events[-1] self.assertEqual("insert", cmd.command_name) self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) docs = list(self.vault.find({"_id": datakey_id})) @@ -1489,7 +1489,7 @@ def _test_automatic(self, expectation_extjson, payload): expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) coll.insert_one(payload) - event = insert_listener.results["started"][0] + event = insert_listener.started_events[0] inserted_doc = event.command["documents"][0] for key, value in expected_document.items(): @@ -1622,7 +1622,7 @@ def test_case_1(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 4) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1643,7 +1643,7 @@ def test_case_2(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 3) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1652,7 +1652,7 @@ def test_case_2(self): self.assertEqual(cev[2].command_name, "find") self.assertEqual(cev[2].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1667,7 +1667,7 @@ def test_case_3(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 2) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") @@ -1684,12 +1684,12 @@ def test_case_4(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1704,7 +1704,7 @@ def test_case_5(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 5) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1727,7 +1727,7 @@ def test_case_6(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 3) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1736,7 +1736,7 @@ def test_case_6(self): self.assertEqual(cev[2].command_name, "find") self.assertEqual(cev[2].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1751,7 +1751,7 @@ def test_case_7(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 2) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") @@ -1768,12 +1768,12 @@ def test_case_8(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1821,8 +1821,8 @@ def test_01_command_error(self): ): with self.assertRaises(OperationFailure): self.encrypted_client.db.decryption_events.aggregate([]) - self.assertEqual(len(self.listener.results["failed"]), 1) - for event in self.listener.results["failed"]: + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: self.assertEqual(event.failure["code"], 123) def test_02_network_error(self): @@ -1834,8 +1834,8 @@ def test_02_network_error(self): ): with self.assertRaises(AutoReconnect): self.encrypted_client.db.decryption_events.aggregate([]) - self.assertEqual(len(self.listener.results["failed"]), 1) - self.assertEqual(self.listener.results["failed"][0].command_name, "aggregate") + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") def test_03_decrypt_error(self): self.encrypted_client.db.decryption_events.insert_one( @@ -1843,8 +1843,8 @@ def test_03_decrypt_error(self): ) with self.assertRaises(EncryptionError): next(self.encrypted_client.db.decryption_events.aggregate([])) - event = self.listener.results["succeeded"][0] - self.assertEqual(len(self.listener.results["failed"]), 0) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) self.assertEqual( event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text ) @@ -1852,8 +1852,8 @@ def test_03_decrypt_error(self): def test_04_decrypt_success(self): self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) next(self.encrypted_client.db.decryption_events.aggregate([])) - event = self.listener.results["succeeded"][0] - self.assertEqual(len(self.listener.results["failed"]), 0) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 0b8200c019..ffa535eeed 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -49,15 +49,14 @@ def tearDownClass(cls): super(TestCommandMonitoring, cls).tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() super(TestCommandMonitoring, self).tearDown() def test_started_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(SON([("ping", 1)]), started.command) @@ -68,10 +67,9 @@ def test_started_simple(self): def test_succeeded_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertEqual("ping", succeeded.command_name) @@ -85,10 +83,9 @@ def test_failed_simple(self): self.client.pymongo_test.command("oops!") except OperationFailure: pass - results = self.listener.results - started = results["started"][0] - failed = results["failed"][0] - self.assertEqual(0, len(results["succeeded"])) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertEqual("oops!", failed.command_name) @@ -99,10 +96,9 @@ def test_failed_simple(self): def test_find_one(self): self.client.pymongo_test.test.find_one() - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( @@ -117,15 +113,14 @@ def test_find_one(self): def test_find_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) - self.listener.results.clear() + self.listener.reset() cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON( @@ -147,15 +142,14 @@ def test_find_and_get_more(self): self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) - self.listener.results.clear() + self.listener.reset() # Next batch. Exhausting the cursor could cause a getMore # that returns id of 0 and no results. next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), @@ -182,16 +176,15 @@ def test_find_with_explain(self): cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) res = coll.find().explain() - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(cmd, started.command) self.assertEqual("explain", started.command_name) @@ -212,7 +205,7 @@ def _test_find_options(self, query, expected_cmd): coll.insert_many([{"x": i} for i in range(5)]) # Test that we publish the unwrapped command. - self.listener.results.clear() + self.listener.reset() if self.client.is_mongos: coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) @@ -220,10 +213,9 @@ def _test_find_options(self, query, expected_cmd): next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(expected_cmd, started.command) self.assertEqual("find", started.command_name) @@ -293,7 +285,7 @@ def test_find_snapshot(self): def test_command_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: @@ -302,10 +294,9 @@ def test_command_and_get_more(self): for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON( @@ -333,13 +324,12 @@ def test_command_and_get_more(self): } self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) - self.listener.results.clear() + self.listener.reset() next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), @@ -377,10 +367,9 @@ def test_get_more_failure(self): next(cursor) except Exception: pass - results = self.listener.results - started = results["started"][0] - self.assertEqual(0, len(results["succeeded"])) - failed = results["failed"][0] + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test")]), started.command @@ -403,16 +392,15 @@ def test_not_primary_error(self): client = single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. client.admin.command("ping") - self.listener.results.clear() + self.listener.reset() error = None try: client.pymongo_test.test.find_one_and_delete({}) except NotPrimaryError as exc: error = exc.errors - results = self.listener.results - started = results["started"][0] - failed = results["failed"][0] - self.assertEqual(0, len(results["succeeded"])) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) self.assertEqual("findAndModify", failed.command_name) @@ -426,16 +414,15 @@ def test_not_primary_error(self): def test_exhaust(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) - self.listener.results.clear() + self.listener.reset() cursor = self.client.pymongo_test.test.find( projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST ) next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON( @@ -462,11 +449,10 @@ def test_exhaust(self): } self.assertEqualReply(expected_result, succeeded.reply) - self.listener.results.clear() + self.listener.reset() tuple(cursor) - results = self.listener.results - self.assertEqual(0, len(results["failed"])) - for event in results["started"]: + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), @@ -476,14 +462,14 @@ def test_exhaust(self): self.assertEqual(cursor.address, event.connection_id) self.assertEqual("pymongo_test", event.database_name) self.assertTrue(isinstance(event.request_id, int)) - for event in results["succeeded"]: + for event in self.listener.succeeded_events: self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(event.duration_micros, int)) self.assertEqual("getMore", event.command_name) self.assertTrue(isinstance(event.request_id, int)) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. - self.assertEqual(0, results["succeeded"][-1].reply["cursor"]["id"]) + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): @@ -492,13 +478,12 @@ def test_kill_cursors(self): cursor = self.client.pymongo_test.test.find().batch_size(5) next(cursor) cursor_id = cursor.cursor_id - self.listener.results.clear() + self.listener.reset() cursor.close() time.sleep(2) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) # There could be more than one cursor_id here depending on # when the thread last ran. @@ -524,14 +509,13 @@ def test_kill_cursors(self): def test_non_bulk_writes(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() # Implied write concern insert_one res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -555,13 +539,12 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # Unacknowledged insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=0)) res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -584,13 +567,12 @@ def test_non_bulk_writes(self): self.assertEqualReply(succeeded.reply, {"ok": 1}) # Explicit write concern insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=1)) res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -615,12 +597,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # delete_many - self.listener.results.clear() + self.listener.reset() res = coll.delete_many({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -645,13 +626,12 @@ def test_non_bulk_writes(self): self.assertEqual(res.deleted_count, reply.get("n")) # replace_one - self.listener.results.clear() + self.listener.reset() oid = ObjectId() res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -689,12 +669,11 @@ def test_non_bulk_writes(self): self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) # update_one - self.listener.results.clear() + self.listener.reset() res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -731,12 +710,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # update_many - self.listener.results.clear() + self.listener.reset() res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -773,12 +751,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # delete_one - self.listener.results.clear() + self.listener.reset() _ = coll.delete_one({"x": 3}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -807,14 +784,13 @@ def test_non_bulk_writes(self): # write errors coll.insert_one({"_id": 1}) try: - self.listener.results.clear() + self.listener.reset() coll.insert_one({"_id": 1}) except OperationFailure: pass - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -848,15 +824,14 @@ def test_insert_many(self): # This always uses the bulk API. coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() big = "x" * (1024 * 1024 * 4) docs = [{"_id": i, "big": big} for i in range(6)] coll.insert_many(docs) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] count = 0 operation_id = started[0].operation_id @@ -889,16 +864,15 @@ def test_insert_many_unacknowledged(self): coll = self.client.pymongo_test.test coll.drop() unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) - self.listener.results.clear() + self.listener.reset() # Force two batches on legacy servers. big = "x" * (1024 * 1024 * 12) docs = [{"_id": i, "big": big} for i in range(6)] unack_coll.insert_many(docs) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] operation_id = started[0].operation_id self.assertIsInstance(operation_id, int) @@ -928,7 +902,7 @@ def test_insert_many_unacknowledged(self): def test_bulk_write(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() coll.bulk_write( [ @@ -937,10 +911,9 @@ def test_bulk_write(self): DeleteOne({"_id": 1}), ] ) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) self.assertEqual(3, len(pairs)) @@ -991,7 +964,7 @@ def test_bulk_write(self): @client_context.require_failCommand_fail_point def test_bulk_write_command_network_error(self): coll = self.client.pymongo_test.test - self.listener.results.clear() + self.listener.reset() insert_network_error = { "configureFailPoint": "failCommand", @@ -1004,7 +977,7 @@ def test_bulk_write_command_network_error(self): with self.fail_point(insert_network_error): with self.assertRaises(AutoReconnect): coll.bulk_write([InsertOne({"_id": 1})]) - failed = self.listener.results["failed"] + failed = self.listener.failed_events self.assertEqual(1, len(failed)) event = failed[0] self.assertEqual(event.command_name, "insert") @@ -1015,7 +988,7 @@ def test_bulk_write_command_network_error(self): @client_context.require_failCommand_fail_point def test_bulk_write_command_error(self): coll = self.client.pymongo_test.test - self.listener.results.clear() + self.listener.reset() insert_command_error = { "configureFailPoint": "failCommand", @@ -1029,7 +1002,7 @@ def test_bulk_write_command_error(self): with self.fail_point(insert_command_error): with self.assertRaises(NotPrimaryError): coll.bulk_write([InsertOne({"_id": 1})]) - failed = self.listener.results["failed"] + failed = self.listener.failed_events self.assertEqual(1, len(failed)) event = failed[0] self.assertEqual(event.command_name, "insert") @@ -1040,7 +1013,7 @@ def test_bulk_write_command_error(self): def test_write_errors(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() try: coll.bulk_write( @@ -1054,10 +1027,9 @@ def test_write_errors(self): ) except OperationFailure: pass - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) errors = [] @@ -1084,12 +1056,11 @@ def test_write_errors(self): def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch # this test should still pass. - self.listener.results.clear() + self.listener.reset() tuple(self.client.pymongo_test.test.list_indexes()) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON([("listIndexes", "test"), ("cursor", {})]) self.assertEqualCommand(expected, started.command) @@ -1105,22 +1076,21 @@ def test_first_batch_helper(self): self.assertTrue("cursor" in succeeded.reply) self.assertTrue("ok" in succeeded.reply) - self.listener.results.clear() + self.listener.reset() def test_sensitive_commands(self): listeners = self.client._event_listeners - self.listener.results.clear() + self.listener.reset() cmd = SON([("getnonce", 1)]) listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( delta, {"nonce": "e474f4561c5eb40b", "ok": 1.0}, "getnonce", 12345, self.client.address ) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) self.assertEqual("pymongo_test", started.database_name) @@ -1159,14 +1129,13 @@ def tearDownClass(cls): def setUp(self): super(TestGlobalListener, self).setUp() - self.listener.results.clear() + self.listener.reset() def test_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) self.assertEqualCommand(SON([("ping", 1)]), started.command) diff --git a/test/test_read_concern.py b/test/test_read_concern.py index d5df682fba..3a1c8f3a54 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -14,6 +14,11 @@ """Test the read_concern module.""" +import sys +import unittest + +sys.path[0:0] = [""] + from test import IntegrationTest, client_context from test.utils import OvertCommandListener, rs_or_single_client, single_client @@ -41,7 +46,7 @@ def tearDownClass(cls): super(TestReadConcern, cls).tearDownClass() def tearDown(self): - self.listener.results.clear() + self.listener.reset() super(TestReadConcern, self).tearDown() def test_read_concern(self): @@ -74,9 +79,9 @@ def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll tuple(coll.find({"field": "value"})) - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) @@ -89,23 +94,21 @@ def test_find_command(self): ("readConcern", {"level": "local"}), ] ), - self.listener.results["started"][0].command, + self.listener.started_events[0].command, ) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll tuple(coll.aggregate([{"$match": {"field": "value"}}])) - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) tuple(coll.aggregate([{"$match": {"field": "value"}}])) - self.assertEqual( - {"level": "local"}, self.listener.results["started"][0].command["readConcern"] - ) + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) def test_aggregate_out(self): coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) @@ -113,6 +116,10 @@ def test_aggregate_out(self): # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): - self.assertIn("readConcern", self.listener.results["started"][0].command) + self.assertIn("readConcern", self.listener.started_events[0].command) else: - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index ae2fa8bcee..1362623dff 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -541,7 +541,7 @@ def test_send_hedge(self): coll = client.test.get_collection("test", read_preference=pref) listener.reset() coll.find_one() - started = listener.results["started"] + started = listener.started_events self.assertEqual(len(started), 1, started) cmd = started[0].command if client_context.is_rs or client_context.is_mongos: diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 4dfc8f068c..5cc4845e32 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -85,11 +85,11 @@ def insert_command_default_write_concern(): ] for name, f in ops: - listener.results.clear() + listener.reset() f() - self.assertGreaterEqual(len(listener.results["started"]), 1) - for i, event in enumerate(listener.results["started"]): + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): self.assertNotIn( "readConcern", event.command, @@ -221,7 +221,7 @@ def test_write_error_details_exposes_errinfo(self): self.assertIsNotNone(ctx.exception.details) assert ctx.exception.details is not None self.assertIsNotNone(ctx.exception.details.get("errInfo")) - for event in listener.results["succeeded"]: + for event in listener.succeeded_events: if event.command_name == "insert": self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) break diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 2b8bc17c58..517e1122b0 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -208,12 +208,12 @@ def test_pool_paused_error_is_retryable(self): # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results["started"] + started = cmd_listener.started_events msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results["succeeded"] + succeeded = cmd_listener.succeeded_events self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results["failed"] + failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 8d556b90ae..7ca1c9c1ef 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -227,9 +227,9 @@ def test_supported_single_statement_no_retry(self): self.addCleanup(client.close) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + listener.reset() method(*args, **kwargs) - for event in listener.results["started"]: + for event in listener.started_events: self.assertNotIn( "txnNumber", event.command, @@ -240,10 +240,10 @@ def test_supported_single_statement_no_retry(self): def test_supported_single_statement_supported_cluster(self): for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + self.listener.reset() method(*args, **kwargs) - commands_started = self.listener.results["started"] - self.assertEqual(len(self.listener.results["succeeded"]), 1, msg) + commands_started = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), 1, msg) first_attempt = commands_started[0] self.assertIn( "lsid", @@ -283,10 +283,10 @@ def test_supported_single_statement_unsupported_cluster(self): for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + self.listener.reset() method(*args, **kwargs) - for event in self.listener.results["started"]: + for event in self.listener.started_events: self.assertNotIn( "txnNumber", event.command, @@ -301,11 +301,11 @@ def test_unsupported_single_statement(self): coll ) + retryable_single_statement_ops(coll_w0): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + self.listener.reset() method(*args, **kwargs) - started_events = self.listener.results["started"] - self.assertEqual(len(self.listener.results["succeeded"]), len(started_events), msg) - self.assertEqual(len(self.listener.results["failed"]), 0, msg) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) for event in started_events: self.assertNotIn( "txnNumber", @@ -324,10 +324,10 @@ def test_server_selection_timeout_not_retried(self): ) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + listener.reset() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 0, msg) + self.assertEqual(len(listener.started_events), 0, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -353,11 +353,11 @@ def raise_error(*args, **kwargs): for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + listener.reset() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 1, msg) + self.assertEqual(len(listener.started_events), 1, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -366,7 +366,7 @@ def test_batch_splitting(self): large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.listener.results.clear() + self.listener.reset() bulk_result = coll.bulk_write( [ InsertOne({"_id": 1, "l": large}), @@ -381,7 +381,7 @@ def test_batch_splitting(self): # Each command should fail and be retried. # With OP_MSG 3 inserts are one batch. 2 updates another. # 2 deletes a third. - self.assertEqual(len(self.listener.results["started"]), 6) + self.assertEqual(len(self.listener.started_events), 6) self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) # Assert the final result expected_result = { @@ -412,7 +412,7 @@ def test_batch_splitting_retry_fails(self): ] ) ) - self.listener.results.clear() + self.listener.reset() with self.client.start_session() as session: initial_txn = session._server_session._transaction_id try: @@ -430,9 +430,9 @@ def test_batch_splitting_retry_fails(self): else: self.fail("bulk_write should have failed") - started = self.listener.results["started"] + started = self.listener.started_events self.assertEqual(len(started), 3) - self.assertEqual(len(self.listener.results["succeeded"]), 1) + self.assertEqual(len(self.listener.succeeded_events), 1) expected_txn = Int64(initial_txn + 1) self.assertEqual(started[0].command["txnNumber"], expected_txn) self.assertEqual(started[0].command["lsid"], session.session_id) @@ -483,9 +483,7 @@ def test_RetryableWriteError_error_label(self): if client_context.version >= Version(4, 4): # In MongoDB 4.4+ we rely on the server returning the error label. - self.assertIn( - "RetryableWriteError", listener.results["succeeded"][-1].reply["errorLabels"] - ) + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) @client_context.require_version_min(4, 4) def test_RetryableWriteError_error_label_RawBSONDocument(self): @@ -575,12 +573,12 @@ def test_pool_paused_error_is_retryable(self): # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results["started"] + started = cmd_listener.started_events msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results["succeeded"] + succeeded = cmd_listener.succeeded_events self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results["failed"] + failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) @@ -605,7 +603,7 @@ def raise_connection_err_select_server(*args, **kwargs): raise ConnectionFailure("Connection refused") for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - listener.results.clear() + listener.reset() topology.select_server = raise_connection_err_select_server with client.start_session() as session: kwargs = copy.deepcopy(kwargs) @@ -616,8 +614,8 @@ def raise_connection_err_select_server(*args, **kwargs): # Each operation should fail on the first attempt and succeed # on the second. method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 1, msg) - retry_cmd = listener.results["started"][0].command + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command sent_txn_id = retry_cmd["txnNumber"] final_txn_id = session._server_session.transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index c3f3762f9a..8d4ffe5e9b 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -97,7 +97,7 @@ def all_hosts_started(): coll.find_one({"name": "John Doe"}) # Confirm all find commands are run against appropriate host. - for command in listener.results["started"]: + for command in listener.started_events: if command.command_name == "find": self.assertEqual(command.connection_id[1], expected_port) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index cae2d7661b..d076ae77b3 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -115,7 +115,7 @@ def frequencies(self, client, listener, n_finds=10): for thread in threads: self.assertTrue(thread.passed) - events = listener.results["started"] + events = listener.started_events self.assertEqual(len(events), n_finds * N_THREADS) nodes = client.nodes self.assertEqual(len(nodes), 2) diff --git a/test/test_session.py b/test/test_session.py index 386bab295c..25d209ebaf 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -58,9 +58,9 @@ def failed(self, event): super(SessionTestListener, self).failed(event) def first_command_started(self): - assert len(self.results["started"]) >= 1, "No command-started events" + assert len(self.started_events) >= 1, "No command-started events" - return self.results["started"][0] + return self.started_events[0] def session_ids(client): @@ -103,7 +103,7 @@ def tearDown(self): """All sessions used in the test must be returned to the pool.""" self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() - for event in self.session_checker_listener.results["started"]: + for event in self.session_checker_listener.started_events: if "lsid" in event.command: used_lsids.add(event.command["lsid"]["id"]) @@ -118,15 +118,15 @@ def _test_ops(self, client, *ops): last_use = s._server_session.last_use start = time.monotonic() self.assertLessEqual(last_use, start) - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw["session"] = s f(*args, **kw) self.assertGreaterEqual(s._server_session.last_use, start) - self.assertGreaterEqual(len(listener.results["started"]), 1) - for event in listener.results["started"]: + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (f.__name__, event.command_name), @@ -157,11 +157,11 @@ def _test_ops(self, client, *ops): # No explicit session. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results["started"]), 1) + self.assertGreaterEqual(len(listener.started_events), 1) lsids = [] - for event in listener.results["started"]: + for event in listener.started_events: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (f.__name__, event.command_name), @@ -205,7 +205,7 @@ def test_implicit_sessions_checkout(self): (client.db.list_collections, []), ] threads = [] - listener.results.clear() + listener.reset() def thread_target(op, *args): res = op(*args) @@ -225,7 +225,7 @@ def thread_target(op, *args): self.assertIsNone(thread.exc) client.close() lsid_set.clear() - for i in listener.results["started"]: + for i in listener.started_events: if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) if len(lsid_set) == 1: @@ -280,13 +280,13 @@ def test_end_sessions(self): self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) client.close() self.assertEqual(len(client._topology._session_pool), 0) - end_sessions = [e for e in listener.results["started"] if e.command_name == "endSessions"] + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. - listener.results.clear() + listener.reset() client.close() - self.assertEqual(len(listener.results["started"]), 0) + self.assertEqual(len(listener.started_events), 0) def test_client(self): client = self.client @@ -399,10 +399,10 @@ def test_cursor(self): for name, f in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() f(session=s) - self.assertGreaterEqual(len(listener.results["started"]), 1) - for event in listener.results["started"]: + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name), @@ -419,7 +419,7 @@ def test_cursor(self): # No explicit session. for name, f in ops: - listener.results.clear() + listener.reset() f(session=None) event0 = listener.first_command_started() self.assertTrue( @@ -428,7 +428,7 @@ def test_cursor(self): lsid = event0.command["lsid"] - for event in listener.results["started"][1:]: + for event in listener.started_events[1:]: self.assertTrue( "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) ) @@ -600,7 +600,7 @@ def test_aggregate_error(self): # 3.6.0 mongos only validates the aggregate pipeline when the # database exists. coll.insert_one({}) - listener.results.clear() + listener.reset() with self.assertRaises(OperationFailure): coll.aggregate([{"$badOperation": {"bar": 1}}]) @@ -687,7 +687,7 @@ def _test_unacknowledged_ops(self, client, *ops): for f, args, kw in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) @@ -698,7 +698,7 @@ def _test_unacknowledged_ops(self, client, *ops): f(*args, **kw) if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results["started"].pop(0) + event = listener.started_events.pop(0) self.assertEqual("listCollections", event.command_name) self.assertIn( "lsid", @@ -707,19 +707,19 @@ def _test_unacknowledged_ops(self, client, *ops): ) # Should not run any command before raising an error. - self.assertFalse(listener.results["started"], "%s sent command" % (f.__name__,)) + self.assertFalse(listener.started_events, "%s sent command" % (f.__name__,)) self.assertTrue(s.has_ended) # Unacknowledged write without a session does not send an lsid. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results["started"]), 1) + self.assertGreaterEqual(len(listener.started_events), 1) if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results["started"].pop(0) + event = listener.started_events.pop(0) self.assertEqual("listCollections", event.command_name) self.assertIn( "lsid", @@ -727,7 +727,7 @@ def _test_unacknowledged_ops(self, client, *ops): "%s sent no lsid with %s" % (f.__name__, event.command_name), ) - for event in listener.results["started"]: + for event in listener.started_events: self.assertNotIn( "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) ) @@ -799,26 +799,26 @@ def test_core(self): with self.client.start_session() as sess: self.assertIsNone(sess.cluster_time) self.assertIsNone(sess.operation_time) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one(session=sess) - started = self.listener.results["started"][0] + started = self.listener.started_events[0] cmd = started.command self.assertIsNone(cmd.get("readConcern")) op_time = sess.operation_time self.assertIsNotNone(op_time) - succeeded = self.listener.results["succeeded"][0] + succeeded = self.listener.succeeded_events[0] reply = succeeded.reply self.assertEqual(op_time, reply.get("operationTime")) # No explicit session self.client.pymongo_test.test.insert_one({}) self.assertEqual(sess.operation_time, op_time) - self.listener.results.clear() + self.listener.reset() try: self.client.pymongo_test.command("doesntexist", session=sess) except: pass - failed = self.listener.results["failed"][0] + failed = self.listener.failed_events[0] failed_op_time = failed.failure.get("operationTime") # Some older builds of MongoDB 3.5 / 3.6 return None for # operationTime when a command fails. Make sure we don't @@ -848,14 +848,14 @@ def _test_reads(self, op, exception=None): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() if exception: with self.assertRaises(exception): op(coll, sess) else: op(coll, sess) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -887,10 +887,10 @@ def _test_writes(self, op): op(coll, sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=sess) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -938,9 +938,9 @@ def _test_no_read_concern(self, op): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() op(coll, sess) - rc = self.listener.results["started"][0].command.get("readConcern") + rc = self.listener.started_events[0].command.get("readConcern") self.assertIsNone(rc) @client_context.require_no_standalone @@ -1001,19 +1001,19 @@ def test_get_more_does_not_include_read_concern(self): coll.insert_many([{}, {}]) cursor = coll.find({}).batch_size(1) next(cursor) - self.listener.results.clear() + self.listener.reset() list(cursor) - started = self.listener.results["started"][0] + started = self.listener.started_events[0] self.assertEqual(started.command_name, "getMore") self.assertIsNone(started.command.get("readConcern")) def test_session_not_causal(self): with self.client.start_session(causal_consistency=False) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -1023,10 +1023,10 @@ def test_session_not_causal(self): def test_server_not_causal(self): with self.client.start_session(causal_consistency=True) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -1038,17 +1038,17 @@ def test_read_concern(self): with self.client.start_session(causal_consistency=True) as s: coll = self.client.pymongo_test.test coll.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results["started"][0].command.get("readConcern") + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) self.assertIsNone(read_concern.get("level")) self.assertIsNotNone(read_concern.get("afterClusterTime")) coll = coll.with_options(read_concern=ReadConcern("majority")) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results["started"][0].command.get("readConcern") + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) self.assertEqual(read_concern.get("level"), "majority") self.assertIsNotNone(read_concern.get("afterClusterTime")) @@ -1056,17 +1056,17 @@ def test_read_concern(self): @client_context.require_no_standalone def test_cluster_time_with_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNotNone(after_cluster_time) @client_context.require_standalone def test_cluster_time_no_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNone(after_cluster_time) @@ -1129,22 +1129,22 @@ def insert_and_aggregate(): ] for name, f in ops: - listener.results.clear() + listener.reset() # Call f() twice, insert to advance clusterTime, call f() again. f() f() collection.insert_one({}) f() - self.assertGreaterEqual(len(listener.results["started"]), 1) - for i, event in enumerate(listener.results["started"]): + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): self.assertTrue( "$clusterTime" in event.command, "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), ) if i > 0: - succeeded = listener.results["succeeded"][i - 1] + succeeded = listener.succeeded_events[i - 1] self.assertTrue( "$clusterTime" in succeeded.reply, "%s received no $clusterTime with %s" diff --git a/test/test_transactions.py b/test/test_transactions.py index 02e691329e..dc58beb930 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -343,11 +343,11 @@ def test_transaction_starts_with_batched_write(self): self.assertEqual( ["insert", "insert", "commitTransaction"], listener.started_command_names() ) - first_cmd = listener.results["started"][0].command + first_cmd = listener.started_events[0].command self.assertTrue(first_cmd["startTransaction"]) lsid = first_cmd["lsid"] txn_number = first_cmd["txnNumber"] - for event in listener.results["started"][1:]: + for event in listener.started_events[1:]: self.assertNotIn("startTransaction", event.command) self.assertEqual(lsid, event.command["lsid"]) self.assertEqual(txn_number, event.command["txnNumber"]) @@ -459,7 +459,7 @@ def callback(session): # Create the collection. coll.insert_one({}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): @@ -491,7 +491,7 @@ def callback(session): } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): @@ -521,7 +521,7 @@ def callback(session): } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index a2fd059d21..7dbf2c867d 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -83,7 +83,7 @@ def test_command_options(self): self.addCleanup(coll.delete_many, {}) list(coll.find(batch_size=25)) client.admin.command("ping") - self.assertServerApiInAllCommands(listener.results["started"]) + self.assertServerApiInAllCommands(listener.started_events) @client_context.require_version_min(4, 7) @client_context.require_transactions @@ -100,7 +100,7 @@ def test_command_options_txn(self): coll.insert_many([{} for _ in range(100)], session=s) list(coll.find(batch_size=25, session=s)) client.test.command("find", "test", session=s) - self.assertServerApiInAllCommands(listener.results["started"]) + self.assertServerApiInAllCommands(listener.started_events) if __name__ == "__main__": diff --git a/test/utils.py b/test/utils.py index 6b0876a158..842e9e3a7b 100644 --- a/test/utils.py +++ b/test/utils.py @@ -29,7 +29,7 @@ from collections import abc, defaultdict from functools import partial from test import client_context, db_pwd, db_user -from typing import Any +from typing import Any, List from bson import json_util from bson.objectid import ObjectId @@ -140,26 +140,43 @@ def pool_closed(self, event): self.add_event(event) -class EventListener(monitoring.CommandListener): +class EventListener(BaseListener, monitoring.CommandListener): def __init__(self): + super(EventListener, self).__init__() self.results = defaultdict(list) - def started(self, event): - self.results["started"].append(event) + @property + def started_events(self) -> List[monitoring.CommandStartedEvent]: + return self.results["started"] - def succeeded(self, event): - self.results["succeeded"].append(event) + @property + def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: + return self.results["succeeded"] - def failed(self, event): - self.results["failed"].append(event) + @property + def failed_events(self) -> List[monitoring.CommandFailedEvent]: + return self.results["failed"] - def started_command_names(self): + def started(self, event: monitoring.CommandStartedEvent) -> None: + self.started_events.append(event) + self.add_event(event) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + self.succeeded_events.append(event) + self.add_event(event) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + self.failed_events.append(event) + self.add_event(event) + + def started_command_names(self) -> List[str]: """Return list of command names started.""" - return [event.command_name for event in self.results["started"]] + return [event.command_name for event in self.started_events] - def reset(self): + def reset(self) -> None: """Reset the state of this listener.""" self.results.clear() + super(EventListener, self).reset() class TopologyEventListener(monitoring.TopologyListener): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index f8ad26efe7..8528ecb8c7 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -371,16 +371,16 @@ def run_operations(self, sessions, collection, ops, in_with_transaction=False): # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): - res = listener.results + events = listener.started_events if not len(test["expectations"]): return # Give a nicer message when there are missing or extra events - cmds = decode_raw([event.command for event in res["started"]]) - self.assertEqual(len(res["started"]), len(test["expectations"]), cmds) + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) for i, expectation in enumerate(test["expectations"]): event_type = next(iter(expectation)) - event = res["started"][i] + event = events[i] # The tests substitute 42 for any number other than 0. if event.command_name == "getMore" and event.command["getMore"]: From 363e0b2b2c12d0f0c4ef064b390086fea4688dc3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Nov 2022 15:13:51 -0600 Subject: [PATCH 0815/2111] PYTHON-2818 Add documentation and changelog (#1115) --- doc/changelog.rst | 11 ++++++++--- doc/examples/authentication.rst | 18 ++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ebd796116e..89d3f2fdde 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,15 +4,20 @@ Changelog Changes in Version 4.3.3 ------------------------ -Version 4.3.3 documents support for :ref:`CSFLE on-demand credentials` for cloud KMS providers, and fixes the following bugs: +Version 4.3.3 documents support for the following: +- :ref:`CSFLE on-demand credentials` for cloud KMS providers. +- Authentication support for :ref:`EKS Clusters`. +- Added the :ref:`timeout-example` example page to improve the documentation + for :func:`pymongo.timeout`. + +Bug Fixes +......... - Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks instead of line by line (`PYTHON-3502`_). - Improved performance of :meth:`gridfs.grid_file.GridOut.read` and :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). -- Added the :ref:`timeout-example` example page to improve the documentation - for :func:`pymongo.timeout`. Issues Resolved ............... diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 862ac40db2..a984d17fc0 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -311,6 +311,7 @@ A sample URI would be:: .. note:: The access_key_id, secret_access_key, and session_token passed into the URI MUST be `percent escaped`_. + AWS Lambda (Environment Variables) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -331,6 +332,23 @@ for the access key ID, secret access key, and session token, respectively:: PyMongo will use credentials set via the environment variables. These environment variables MUST NOT be `percent escaped`_. + +.. _EKS Clusters: + +EKS Clusters +~~~~~~~~~~~~ + +Applications using the `Authenticating users for your cluster from an OpenID Connect identity provider `_ capability on EKS can now +use the provided credentials, by giving the associated IAM User +`sts:AssumeRoleWithWebIdentity `_ +permission. + +When the username and password are not provided, the MONGODB-AWS mechanism +is set, and ``AWS_WEB_IDENTITY_TOKEN_FILE``, ``AWS_ROLE_ARN``, and +optional ``AWS_ROLE_SESSION_NAME`` are available, the driver will use +an ``AssumeRoleWithWebIdentity`` call to retrieve temporary credentials. +The application must be using ``pymongo_auth_aws`` >= 1.1.0 for EKS support. + ECS Container ~~~~~~~~~~~~~ From 3ab73905dc957c919112ad0def10ef27024659c8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 15 Nov 2022 17:43:34 -0800 Subject: [PATCH 0816/2111] PYTHON-3500 Resync retryable tests to fix serverless failures (#1116) --- .../unified/handshakeError.json | 106 ++++++++++++------ .../unified/handshakeError.json | 36 +++--- 2 files changed, 91 insertions(+), 51 deletions(-) diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json index 58bbce66a8..2921d8a954 100644 --- a/test/retryable_reads/unified/handshakeError.json +++ b/test/retryable_reads/unified/handshakeError.json @@ -1,6 +1,6 @@ { "description": "retryable reads handshake failures", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.2", @@ -62,7 +62,7 @@ ], "tests": [ { - "description": "listDatabases succeeds after retryable handshake network error", + "description": "client.listDatabases succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -155,7 +155,7 @@ ] }, { - "description": "listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "client.listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -248,7 +248,7 @@ ] }, { - "description": "listDatabaseNames succeeds after retryable handshake network error", + "description": "client.listDatabaseNames succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -338,7 +338,7 @@ ] }, { - "description": "listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "client.listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -428,7 +428,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake network error", + "description": "client.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -522,7 +527,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "client.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -616,7 +626,12 @@ ] }, { - "description": "aggregate succeeds after retryable handshake network error", + "description": "database.aggregate succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -716,7 +731,12 @@ ] }, { - "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -816,7 +836,7 @@ ] }, { - "description": "listCollections succeeds after retryable handshake network error", + "description": "database.listCollections succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -909,7 +929,7 @@ ] }, { - "description": "listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.listCollections succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1002,7 +1022,7 @@ ] }, { - "description": "listCollectionNames succeeds after retryable handshake network error", + "description": "database.listCollectionNames succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1095,7 +1115,7 @@ ] }, { - "description": "listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1188,7 +1208,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake network error", + "description": "database.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -1282,7 +1307,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "database.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -1376,7 +1406,7 @@ ] }, { - "description": "aggregate succeeds after retryable handshake network error", + "description": "collection.aggregate succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1469,7 +1499,7 @@ ] }, { - "description": "aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1562,7 +1592,7 @@ ] }, { - "description": "countDocuments succeeds after retryable handshake network error", + "description": "collection.countDocuments succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1655,7 +1685,7 @@ ] }, { - "description": "countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1748,7 +1778,7 @@ ] }, { - "description": "estimatedDocumentCount succeeds after retryable handshake network error", + "description": "collection.estimatedDocumentCount succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1838,7 +1868,7 @@ ] }, { - "description": "estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1928,7 +1958,7 @@ ] }, { - "description": "distinct succeeds after retryable handshake network error", + "description": "collection.distinct succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2022,7 +2052,7 @@ ] }, { - "description": "distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.distinct succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2116,7 +2146,7 @@ ] }, { - "description": "find succeeds after retryable handshake network error", + "description": "collection.find succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2209,7 +2239,7 @@ ] }, { - "description": "find succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.find succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2302,7 +2332,7 @@ ] }, { - "description": "findOne succeeds after retryable handshake network error", + "description": "collection.findOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2395,7 +2425,7 @@ ] }, { - "description": "findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2488,7 +2518,7 @@ ] }, { - "description": "listIndexes succeeds after retryable handshake network error", + "description": "collection.listIndexes succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2578,7 +2608,7 @@ ] }, { - "description": "listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2668,7 +2698,7 @@ ] }, { - "description": "listIndexNames succeeds after retryable handshake network error", + "description": "collection.listIndexNames succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -2758,7 +2788,7 @@ ] }, { - "description": "listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -2848,7 +2878,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake network error", + "description": "collection.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -2942,7 +2977,12 @@ ] }, { - "description": "createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index e07e5412b2..df37bd7232 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -54,7 +54,7 @@ ], "tests": [ { - "description": "insertOne succeeds after retryable handshake network error", + "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -150,7 +150,7 @@ ] }, { - "description": "insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.insertOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -246,7 +246,7 @@ ] }, { - "description": "insertMany succeeds after retryable handshake network error", + "description": "collection.insertMany succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -344,7 +344,7 @@ ] }, { - "description": "insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.insertMany succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -442,7 +442,7 @@ ] }, { - "description": "deleteOne succeeds after retryable handshake network error", + "description": "collection.deleteOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -535,7 +535,7 @@ ] }, { - "description": "deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -628,7 +628,7 @@ ] }, { - "description": "replaceOne succeeds after retryable handshake network error", + "description": "collection.replaceOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -724,7 +724,7 @@ ] }, { - "description": "replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -820,7 +820,7 @@ ] }, { - "description": "updateOne succeeds after retryable handshake network error", + "description": "collection.updateOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -918,7 +918,7 @@ ] }, { - "description": "updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.updateOne succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1016,7 +1016,7 @@ ] }, { - "description": "findOneAndDelete succeeds after retryable handshake network error", + "description": "collection.findOneAndDelete succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1109,7 +1109,7 @@ ] }, { - "description": "findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1202,7 +1202,7 @@ ] }, { - "description": "findOneAndReplace succeeds after retryable handshake network error", + "description": "collection.findOneAndReplace succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1298,7 +1298,7 @@ ] }, { - "description": "findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1394,7 +1394,7 @@ ] }, { - "description": "findOneAndUpdate succeeds after retryable handshake network error", + "description": "collection.findOneAndUpdate succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1492,7 +1492,7 @@ ] }, { - "description": "findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", @@ -1590,7 +1590,7 @@ ] }, { - "description": "bulkWrite succeeds after retryable handshake network error", + "description": "collection.bulkWrite succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", @@ -1692,7 +1692,7 @@ ] }, { - "description": "bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "description": "collection.bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", "operations": [ { "name": "failPoint", From b290f7b1a17f7f2195503034e627b922936b98bc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Nov 2022 12:26:32 -0800 Subject: [PATCH 0817/2111] PYTHON-3526 Fix mockup tests (#1119) --- test/mockupdb/test_mixed_version_sharded.py | 3 +-- test/mockupdb/test_mongos_command_read_mode.py | 3 +-- test/mockupdb/test_op_msg_read_preference.py | 3 +-- test/mockupdb/test_reset_and_request_check.py | 3 +-- test/mockupdb/test_slave_okay_rs.py | 3 +-- test/mockupdb/test_slave_okay_sharded.py | 3 +-- test/mockupdb/test_slave_okay_single.py | 3 +-- 7 files changed, 7 insertions(+), 14 deletions(-) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 7e12fcab35..dc2cd57380 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -19,11 +19,10 @@ from queue import Queue from mockupdb import MockupDB, go +from operations import upgrades # type: ignore[import] from pymongo import MongoClient -from .operations import upgrades - class TestMixedVersionSharded(unittest.TestCase): def setup_server(self, upgrade): diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index a84907d8cf..997f5af118 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -16,6 +16,7 @@ import unittest from mockupdb import MockupDB, OpMsg, going +from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -24,8 +25,6 @@ read_pref_mode_from_name, ) -from .operations import operations - class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 37882912bb..b377f4cf69 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -18,6 +18,7 @@ from typing import Any from mockupdb import CommandBase, MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference from pymongo.read_preferences import ( @@ -26,8 +27,6 @@ read_pref_mode_from_name, ) -from .operations import operations - class OpMsgReadPrefBase(unittest.TestCase): single_mongod = False diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index bc00e38a09..841cd41846 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -17,13 +17,12 @@ import unittest from mockupdb import MockupDB, going, wait_until +from operations import operations # type: ignore[import] from pymongo import MongoClient from pymongo.errors import ConnectionFailure from pymongo.server_type import SERVER_TYPE -from .operations import operations - class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 7ac489117a..225d8e4071 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -20,11 +20,10 @@ import unittest from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient -from .operations import operations - class TestSlaveOkayRS(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 51e422595e..18f2016126 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -23,12 +23,11 @@ from queue import Queue from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name -from .operations import operations - class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index bd36c77a04..4b2846490f 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -23,13 +23,12 @@ import unittest from mockupdb import MockupDB, going +from operations import operations # type: ignore[import] from pymongo import MongoClient from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE -from .operations import operations - def topology_type_name(client): topology_type = client._topology._description.topology_type From cde9adf6aba388e2ed6bc135750d6a01ec63c660 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Nov 2022 12:27:00 -0800 Subject: [PATCH 0818/2111] PYTHON-3527 + PYTHON-3528 Fix no-server tests (#1118) Fix TestCreateEntities when no server is running. Fix no-server test_typeddict_find_notrequired. --- test/test_create_entities.py | 29 +++++++++++++++++++---------- test/test_mypy.py | 6 +++++- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index ad0ac9347e..1e46614da0 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -11,11 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest from test.unified_format import UnifiedSpecTestMixinV1 -class TestCreateEntities(unittest.TestCase): +class TestCreateEntities(IntegrationTest): def test_store_events_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() spec = { @@ -91,7 +96,7 @@ def test_store_all_others_as_entities(self): { "name": "insertOne", "object": "collection0", - "arguments": {"document": {"_id": 1, "x": 44}}, + "arguments": {"document": {"_id": 2, "x": 44}}, }, ], }, @@ -101,15 +106,19 @@ def test_store_all_others_as_entities(self): ], } + self.client.dat.dat.delete_many({}) self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) self.scenario_runner.entity_map["client0"].close() - final_entity_map = self.scenario_runner.entity_map - for entity in ["errors", "failures"]: - self.assertIn(entity, final_entity_map) - self.assertGreaterEqual(len(final_entity_map[entity]), 0) - self.assertEqual(type(final_entity_map[entity]), list) - for entity in ["successes", "iterations"]: - self.assertIn(entity, final_entity_map) - self.assertEqual(type(final_entity_map[entity]), int) + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_mypy.py b/test/test_mypy.py index 58e69853ca..3b29bbf20e 100644 --- a/test/test_mypy.py +++ b/test/test_mypy.py @@ -15,6 +15,7 @@ """Test that each file in mypy_fails/ actually fails mypy, and test some sample client code that uses PyMongo typings.""" import os +import sys import tempfile import unittest from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Union @@ -51,7 +52,9 @@ class ImplicitMovie(TypedDict): except ImportError: api = None # type: ignore[assignment] -from test import IntegrationTest +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context from test.utils import rs_or_single_client from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode @@ -430,6 +433,7 @@ def test_typeddict_empty_document_type(self) -> None: # This should fail because _id is not included in our TypedDict definition. assert out["_id"] # type:ignore[typeddict-item] + @client_context.require_connection def test_typeddict_find_notrequired(self): if NotRequired is None or ImplicitMovie is None: raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") From 1edbfad0c8afc17e899b7982cd4c7942deab16f1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Nov 2022 12:27:15 -0800 Subject: [PATCH 0819/2111] PYTHON-3529 Improve reliability of test_list_databases (#1120) --- test/test_client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test_client.py b/test/test_client.py index a33881fded..53a234a33d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -791,6 +791,10 @@ def test_list_databases(self): self.assertIsInstance(cursor, CommandCursor) helper_docs = list(cursor) self.assertTrue(len(helper_docs) > 0) + # sizeOnDisk can change between calls. + for doc_list in (helper_docs, cmd_docs): + for doc in doc_list: + doc.pop("sizeOnDisk", None) self.assertEqual(helper_docs, cmd_docs) for doc in helper_docs: self.assertIs(type(doc), dict) From 3d032768a0c617e4c594cb3f971df1cd06b3e0d4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Nov 2022 14:59:25 -0600 Subject: [PATCH 0820/2111] BUMP 4.3.3 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 78c325a23c..7eff43faa8 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 3, 3) def get_version_string() -> str: From f92dd40c8696c0e26ba7d544e95ddfd19402f7df Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Nov 2022 15:00:04 -0600 Subject: [PATCH 0821/2111] BUMP 4.4.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 7eff43faa8..78c325a23c 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 3, 3) +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") def get_version_string() -> str: From 0c6aacb0fb20687e6f99b4886fdd0951fd8347ae Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 22 Nov 2022 13:34:17 -0600 Subject: [PATCH 0822/2111] PYTHON-3531 Pre-commit failure due to flake8 repository move (#1122) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d72d51971c..cfe0db31cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,7 @@ repos: files: \.py$ args: [--profile=black] -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/PyCQA/flake8 rev: 3.9.2 hooks: - id: flake8 From ee2badff75e9523b838f1a9242cfde2018b74703 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Nov 2022 05:27:45 -0600 Subject: [PATCH 0823/2111] PYTHON-3524 Support passing list of strings to create_index (#1121) --- doc/changelog.rst | 14 ++++++++++++++ pymongo/collection.py | 15 +++++++++------ pymongo/cursor.py | 9 +++++---- pymongo/helpers.py | 14 ++++++++++++-- pymongo/operations.py | 16 +++++++++------- test/test_collection.py | 6 +++++- test/test_cursor.py | 18 ++++++++++++++++++ 7 files changed, 72 insertions(+), 20 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 89d3f2fdde..6913f09fc3 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,20 @@ Changelog ========= +Changes in Version 4.4 +----------------------- + +- Added support for passing a list containing (key, direction) pairs + or keys to :meth:`~pymongo.collection.Collection.create_index`. + +Issues Resolved +............... + +See the `PyMongo 4.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 + Changes in Version 4.3.3 ------------------------ diff --git a/pymongo/collection.py b/pymongo/collection.py index 600d73c4bc..77f154f5e7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -26,7 +26,6 @@ NoReturn, Optional, Sequence, - Tuple, Union, ) @@ -62,6 +61,8 @@ ReplaceOne, UpdateMany, UpdateOne, + _IndexKeyHint, + _IndexList, ) from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.results import ( @@ -85,9 +86,6 @@ UpdateOne, UpdateMany, ] -# Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] -_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_IndexKeyHint = Union[str, _IndexList] class ReturnDocument(object): @@ -1948,7 +1946,9 @@ def create_index( ) -> str: """Creates an index on this collection. - Takes either a single key or a list of (key, direction) pairs. + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, @@ -1964,7 +1964,7 @@ def create_index( ascending we need to use a list of tuples:: >>> my_collection.create_index([("mike", pymongo.DESCENDING), - ... ("eliot", pymongo.ASCENDING)]) + ... "eliot"]) All optional index creation parameters should be passed as keyword arguments to this method. For example:: @@ -2025,6 +2025,9 @@ def create_index( - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. .. versionchanged:: 4.1 Added ``comment`` parameter. .. versionchanged:: 3.11 diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 658c4276ef..ccf0bfd71b 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -146,7 +146,7 @@ def close(self): self.sock = None -_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] +_Sort = Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]] _Hint = Union[str, _Sort] @@ -832,15 +832,16 @@ def sort( """Sorts this cursor's results. Pass a field name and a direction, either - :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) - To sort by multiple fields, pass a list of (key, direction) pairs:: + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: for doc in collection.find().sort([ - ('field1', pymongo.ASCENDING), + 'field1', ('field2', pymongo.DESCENDING)]): print(doc) diff --git a/pymongo/helpers.py b/pymongo/helpers.py index dd210db188..31325c8af2 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -80,6 +80,8 @@ def _index_list(key_or_list, direction=None): Takes such a list, or a single key, or a single key and direction. """ if direction is not None: + if not isinstance(key_or_list, str): + raise TypeError("Expected a string and a direction") return [(key_or_list, direction)] else: if isinstance(key_or_list, str): @@ -88,7 +90,12 @@ def _index_list(key_or_list, direction=None): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, key_or_list must be an instance of list") - return key_or_list + values = [] + for item in key_or_list: + if isinstance(item, str): + item = (item, ASCENDING) + values.append(item) + return values def _index_document(index_list): @@ -108,7 +115,10 @@ def _index_document(index_list): raise ValueError("key_or_list must not be the empty list") index: SON[str, Any] = SON() - for (key, value) in index_list: + for item in index_list: + if isinstance(item, str): + item = (item, ASCENDING) + key, value = item if not isinstance(key, str): raise TypeError("first item in each key pair must be an instance of str") if not isinstance(value, (str, int, abc.Mapping)): diff --git a/pymongo/operations.py b/pymongo/operations.py index 92a4dad0ac..f939cd479f 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -22,6 +22,10 @@ from pymongo.helpers import _gen_index_name, _index_document, _index_list from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +# Hint supports index name, "myIndex", or list of either strings or index pairs: [('x', 1), ('y', -1), 'z''] +_IndexList = Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]] +_IndexKeyHint = Union[str, _IndexList] + class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" @@ -55,10 +59,6 @@ def __ne__(self, other: Any) -> bool: return not self == other -_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_IndexKeyHint = Union[str, _IndexList] - - class DeleteOne(object): """Represents a delete_one operation.""" @@ -435,7 +435,9 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: For use with :meth:`~pymongo.collection.Collection.create_indexes`. - Takes either a single key or a list of (key, direction) pairs. + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, @@ -477,8 +479,8 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: server version. :Parameters: - - `keys`: a single key or a list of (key, direction) - pairs specifying the index to create + - `keys`: a single key or a list containing (key, direction) pairs + or keys specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments diff --git a/test/test_collection.py b/test/test_collection.py index 49a7017ef3..b6883f4ece 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -315,6 +315,10 @@ def test_create_index(self): with self.write_concern_collection() as coll: coll.create_index([("hello", DESCENDING)]) + db.test.create_index(["hello", "world"]) + db.test.create_index(["hello", ("world", DESCENDING)]) + db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + def test_drop_index(self): db = self.db db.test.drop_indexes() @@ -1680,7 +1684,7 @@ def to_list(things): self.assertRaises(TypeError, db.test.find, sort=5) self.assertRaises(TypeError, db.test.find, sort="hello") - self.assertRaises(ValueError, db.test.find, sort=["hello", 1]) + self.assertRaises(TypeError, db.test.find, sort=["hello", 1]) # TODO doesn't actually test functionality, just that it doesn't blow up def test_cursor_timeout(self): diff --git a/test/test_cursor.py b/test/test_cursor.py index 96d83fecf1..e96efb92b0 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -40,6 +40,7 @@ from pymongo.collation import Collation from pymongo.cursor import Cursor, CursorType from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure +from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -366,6 +367,21 @@ def test_hint(self): break self.assertRaises(InvalidOperation, a.hint, spec) + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + def test_hint_by_name(self): db = self.db db.test.drop() @@ -715,6 +731,8 @@ def test_sort(self): (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) ] self.assertEqual(result, expected) + result = [(i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), "a"])] + self.assertEqual(result, expected) a = db.test.find() a.sort("x", ASCENDING) From 26efc0f43ddb937a5f37bffc003a56627c1a8252 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 1 Dec 2022 17:54:15 -0800 Subject: [PATCH 0824/2111] PYTHON-3388 Propagate Original Error for Write Errors Labeled NoWritesPerformed (#1117) --- pymongo/mongo_client.py | 10 ++- .../insertOne-noWritesPerformedError.json | 90 +++++++++++++++++++ test/test_retryable_writes.py | 59 ++++++++++++ 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 test/retryable_writes/unified/insertOne-noWritesPerformedError.json diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 7d16e58777..dccd4bb6b1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1408,12 +1408,18 @@ def is_retrying(): if retryable_error: session._unpin() if not retryable_error or (is_retrying() and not multiple_retries): - raise + if exc.has_error_label("NoWritesPerformed") and last_error: + raise last_error from exc + else: + raise if bulk: bulk.retrying = True else: retrying = True - last_error = exc + if not exc.has_error_label("NoWritesPerformed"): + last_error = exc + if last_error is None: + last_error = exc @_csot.apply def _retryable_read(self, func, read_pref, session, address=None, retryable=True): diff --git a/test/retryable_writes/unified/insertOne-noWritesPerformedError.json b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json new file mode 100644 index 0000000000..3194e91c5c --- /dev/null +++ b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json @@ -0,0 +1,90 @@ +{ + "description": "retryable-writes insertOne noWritesPerformedErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "no-writes-performed-collection" + } + } + ], + "tests": [ + { + "description": "InsertOne fails after NoWritesPerformed error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 64, + "errorLabels": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 64, + "errorLabelsContain": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "no-writes-performed-collection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 7ca1c9c1ef..a22c776534 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -26,6 +26,7 @@ from test.utils import ( CMAPListener, DeprecationFilter, + EventListener, OvertCommandListener, TestCreator, rs_or_single_client, @@ -45,6 +46,7 @@ ) from pymongo.mongo_client import MongoClient from pymongo.monitoring import ( + CommandSucceededEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, ConnectionCheckOutFailedReason, @@ -64,6 +66,26 @@ _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super(InsertEventListener, self).succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) + + class TestAllScenarios(SpecRunner): RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True @@ -581,6 +603,43 @@ def test_pool_paused_error_is_retryable(self): failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) + @client_context.require_failCommand_fail_point + @client_context.require_replica_set + @client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) + client.test.test.drop() + self.addCleanup(client.close) + cmd_listener.reset() + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): From ccade9bc058e0ccdd4363a09af1e8ac1bf76856a Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 2 Dec 2022 12:52:01 -0800 Subject: [PATCH 0825/2111] PYTHON-3351 Provide access to raw result document when the server returns an error for a command (#1125) --- .../aggregate-merge-errorResponse.json | 90 ++++++++++++ .../crud/unified/bulkWrite-errorResponse.json | 88 ++++++++++++ .../crud/unified/deleteOne-errorResponse.json | 82 +++++++++++ test/crud/unified/distinct-comment.json | 12 +- .../findOneAndUpdate-errorResponse.json | 132 ++++++++++++++++++ .../crud/unified/insertOne-errorResponse.json | 82 +++++++++++ .../crud/unified/updateOne-errorResponse.json | 87 ++++++++++++ test/unified_format.py | 6 +- 8 files changed, 576 insertions(+), 3 deletions(-) create mode 100644 test/crud/unified/aggregate-merge-errorResponse.json create mode 100644 test/crud/unified/bulkWrite-errorResponse.json create mode 100644 test/crud/unified/deleteOne-errorResponse.json create mode 100644 test/crud/unified/findOneAndUpdate-errorResponse.json create mode 100644 test/crud/unified/insertOne-errorResponse.json create mode 100644 test/crud/unified/updateOne-errorResponse.json diff --git a/test/crud/unified/aggregate-merge-errorResponse.json b/test/crud/unified/aggregate-merge-errorResponse.json new file mode 100644 index 0000000000..6c7305fd91 --- /dev/null +++ b/test/crud/unified/aggregate-merge-errorResponse.json @@ -0,0 +1,90 @@ +{ + "description": "aggregate-merge-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 1 + } + ] + } + ], + "tests": [ + { + "description": "aggregate $merge DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.1", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database0", + "arguments": { + "pipeline": [ + { + "$documents": [ + { + "_id": 2, + "x": 1 + } + ] + }, + { + "$merge": { + "into": "test", + "whenMatched": "fail" + } + } + ] + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "_id": 1 + }, + "keyValue": { + "_id": 2 + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-errorResponse.json b/test/crud/unified/bulkWrite-errorResponse.json new file mode 100644 index 0000000000..157637c713 --- /dev/null +++ b/test/crud/unified/bulkWrite-errorResponse.json @@ -0,0 +1,88 @@ +{ + "description": "bulkWrite-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "bulkWrite operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-errorResponse.json b/test/crud/unified/deleteOne-errorResponse.json new file mode 100644 index 0000000000..1f3a266f1e --- /dev/null +++ b/test/crud/unified/deleteOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "deleteOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "delete operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json index 0669d4f30a..11bce9ac9d 100644 --- a/test/crud/unified/distinct-comment.json +++ b/test/crud/unified/distinct-comment.json @@ -64,7 +64,11 @@ "key": "value" } }, - "expectResult": [ 11, 22, 33 ] + "expectResult": [ + 11, + 22, + 33 + ] } ], "expectEvents": [ @@ -105,7 +109,11 @@ "filter": {}, "comment": "comment" }, - "expectResult": [ 11, 22, 33 ] + "expectResult": [ + 11, + 22, + 33 + ] } ], "expectEvents": [ diff --git a/test/crud/unified/findOneAndUpdate-errorResponse.json b/test/crud/unified/findOneAndUpdate-errorResponse.json new file mode 100644 index 0000000000..5023a450f3 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-errorResponse.json @@ -0,0 +1,132 @@ +{ + "description": "findOneAndUpdate-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "unique": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "foo" + } + }, + "upsert": true + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "x": 1 + }, + "keyValue": { + "x": "foo" + } + } + } + } + ] + }, + { + "description": "findOneAndUpdate document validation errInfo is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "validator": { + "x": { + "$type": "string" + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 121, + "errorResponse": { + "errInfo": { + "failingDocumentId": 1, + "details": { + "$$type": "object" + } + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-errorResponse.json b/test/crud/unified/insertOne-errorResponse.json new file mode 100644 index 0000000000..04ea6a7451 --- /dev/null +++ b/test/crud/unified/insertOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "insertOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "insert operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-errorResponse.json b/test/crud/unified/updateOne-errorResponse.json new file mode 100644 index 0000000000..0ceddbc4fc --- /dev/null +++ b/test/crud/unified/updateOne-errorResponse.json @@ -0,0 +1,87 @@ +{ + "description": "updateOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "update operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 12eaceed35..5afc746859 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -862,7 +862,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.10") + SCHEMA_VERSION = Version.from_string("1.12") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -994,6 +994,10 @@ def process_error(self, exception, spec): error_labels_contain = spec.get("errorLabelsContain") error_labels_omit = spec.get("errorLabelsOmit") expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + for k in error_response.keys(): + self.assertEqual(error_response[k], exception.details[k]) if is_error: # already satisfied because exception was raised From 64192663954569fab553c60d3259017a1a7b5fcb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 6 Dec 2022 15:29:48 -0800 Subject: [PATCH 0826/2111] PYTHON-3492 Test mongocryptd is not spawned when shared library is loaded (#1124) --- test/test_encryption.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index eaee22ebac..3c422b8c87 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1910,6 +1910,38 @@ def test_bypassAutoEncryption(self): with self.assertRaises(ServerSelectionTimeoutError): mongocryptd_client.admin.command("ping") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_via_loading_shared_library(self): + key_vault = client_context.client.keyvault.datakeys + key_vault.drop() + key_vault.create_index( + "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + ) + key_vault.insert_one(json_data("external", "external-key.json")) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted(client_context.client.db.coll.find_one({})["encrypted"]) + no_mongocryptd_client = MongoClient( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + self.addCleanup(no_mongocryptd_client.close) + with self.assertRaises(ServerSelectionTimeoutError): + no_mongocryptd_client.db.command("ping") + # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): From 024148ca2b4861c2ead627213b55c37e280c35d7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 19 Dec 2022 14:23:40 -0800 Subject: [PATCH 0827/2111] PYTHON-3541 Use bash instead of sh in perf testing (#1127) --- .evergreen/perf.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index 8b3638d535..d975fca79f 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -105,7 +105,7 @@ functions: params: script: | ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh # run-orchestration generates expansion file with the MONGODB_URI for the cluster - command: expansions.update params: @@ -116,7 +116,7 @@ functions: params: script: | ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh + bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh "run perf tests": - command: shell.exec @@ -125,7 +125,7 @@ functions: working_dir: "src" script: | ${PREPARE_SHELL} - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh "attach benchmark test results": - command: attach.results @@ -182,7 +182,7 @@ functions: ${PREPARE_SHELL} file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && sh $file || echo "$file not available, skipping" + [ -f "$file" ] && bash $file || echo "$file not available, skipping" pre: - func: "fetch source" From f5d09e1c97ca12c94c555b40364b2ae10ec5126c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 20 Dec 2022 13:39:04 -0600 Subject: [PATCH 0828/2111] PYTHON-3542 Test Failure - test_iteration on PyPy 3.8+ (#1128) --- test/test_client.py | 2 +- test/test_collection.py | 2 +- test/test_database.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_client.py b/test/test_client.py index 53a234a33d..b2f128f11a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -217,7 +217,7 @@ def test_getattr(self): def test_iteration(self): client = self.client - if "PyPy" in sys.version: + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): msg = "'NoneType' object is not callable" else: msg = "'MongoClient' object is not iterable" diff --git a/test/test_collection.py b/test/test_collection.py index b6883f4ece..881896c847 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -125,7 +125,7 @@ def test_getattr(self): def test_iteration(self): coll = self.db.coll - if "PyPy" in sys.version: + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): msg = "'NoneType' object is not callable" else: msg = "'Collection' object is not iterable" diff --git a/test/test_database.py b/test/test_database.py index b1b2999df4..53af4912e4 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -95,7 +95,7 @@ def test_getattr(self): def test_iteration(self): db = self.client.pymongo_test - if "PyPy" in sys.version: + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): msg = "'NoneType' object is not callable" else: msg = "'Database' object is not iterable" From 47686c8f68363645579ee0ed0f841fdd3b7362f4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 20 Dec 2022 16:29:43 -0600 Subject: [PATCH 0829/2111] PYTHON-3543 Broken Links for ICU Project (#1129) --- doc/examples/collations.rst | 2 +- pymongo/collation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/examples/collations.rst b/doc/examples/collations.rst index 1a5106039c..45e647d816 100644 --- a/doc/examples/collations.rst +++ b/doc/examples/collations.rst @@ -42,7 +42,7 @@ or with plain Python dictionaries. The structure is the same:: backwards=) The only required parameter is ``locale``, which the server parses as -an `ICU format locale ID `_. +an `ICU format locale ID `_. For example, set ``locale`` to ``en_US`` to represent US English or ``fr_CA`` to represent Canadian French. diff --git a/pymongo/collation.py b/pymongo/collation.py index 5bc73c07c8..3d8503f7d5 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -14,7 +14,7 @@ """Tools for working with `collations`_. -.. _collations: http://userguide.icu-project.org/collation/concepts +.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ """ from typing import Any, Dict, Mapping, Optional, Union From 7299dff84d3c9b6f137f83a9121e5209ee13efbf Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 5 Jan 2023 13:55:47 -0600 Subject: [PATCH 0830/2111] PYTHON-3546 bson.CodecOptions docs missing unicode_decode_error_handler=ignore option in newer documentation (#1131) --- bson/codec_options.py | 167 ++++++++++++++++++++++-------------------- 1 file changed, 86 insertions(+), 81 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 3c0a976a1b..6f4fdaac8d 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -245,87 +245,92 @@ class _BaseCodecOptions(NamedTuple): class CodecOptions(_BaseCodecOptions): - """Encapsulates options used encoding and / or decoding BSON. - - The `document_class` option is used to define a custom type for use - decoding BSON documents. Access to the underlying raw BSON bytes for - a document is available using the :class:`~bson.raw_bson.RawBSONDocument` - type:: - - >>> from bson.raw_bson import RawBSONDocument - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(document_class=RawBSONDocument) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc.raw - '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' - - The document class can be any type that inherits from - :class:`~collections.abc.MutableMapping`:: - - >>> class AttributeDict(dict): - ... # A dict that supports attribute access. - ... def __getattr__(self, key): - ... return self[key] - ... def __setattr__(self, key, value): - ... self[key] = value - ... - >>> codec_options = CodecOptions(document_class=AttributeDict) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc._id - ObjectId('5b3016359110ea14e8c58b93') - - See :doc:`/examples/datetimes` for examples using the `tz_aware` and - `tzinfo` options. - - See :doc:`/examples/uuid` for examples using the `uuid_representation` - option. - - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded - to an instance of this class. Must be a subclass of - :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone - aware instances of :class:`~datetime.datetime`. Otherwise they will be - naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New - applications should consider setting this to - :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the - timezone to/from which :class:`~datetime.datetime` objects should be - encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize - encoding and decoding behavior. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. - .. versionchanged:: 4.0 - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionadded:: 3.8 - `type_registry` attribute. - - .. warning:: Care must be taken when changing - `unicode_decode_error_handler` from its default value ('strict'). - The 'replace' and 'ignore' modes should not be used when documents - retrieved from the server will be modified in the client application - and stored back to the server. - """ + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See :doc:`/examples/datetimes` for examples using the `tz_aware` and + `tzinfo` options. + + See :doc:`/examples/uuid` for examples using the `uuid_representation` + option. + + :Parameters: + - `document_class`: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + - `uuid_representation`: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + - `type_registry`: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + return super().__init__() def __new__( cls: Type["CodecOptions"], From a43f320753a0b8710e23fb36ae7bb488ef790b41 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 5 Jan 2023 13:56:22 -0600 Subject: [PATCH 0831/2111] PYTHON-3470 Build Python 3.11 Wheels for MacOS (#1130) --- .evergreen/config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 96b6a00688..65b29dab14 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1216,6 +1216,10 @@ tasks: tags: ["release_tag"] run_on: macos-1100 commands: + - func: "build release" + vars: + VERSION: "3.11" + ENSURE_UNIVERSAL2: "1" - func: "build release" vars: VERSION: "3.10" From eaea70bf08189d567838b3408bdc9fd5ae7cecf6 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 10 Jan 2023 15:49:46 -0800 Subject: [PATCH 0832/2111] DRIVERS-2369 Disable causal consistency in implicit sessions (#1132) --- ...t-sessions-default-causal-consistency.json | 318 ++++++++++++++++++ 1 file changed, 318 insertions(+) create mode 100644 test/sessions/implicit-sessions-default-causal-consistency.json diff --git a/test/sessions/implicit-sessions-default-causal-consistency.json b/test/sessions/implicit-sessions-default-causal-consistency.json new file mode 100644 index 0000000000..517c8ebc63 --- /dev/null +++ b/test/sessions/implicit-sessions-default-causal-consistency.json @@ -0,0 +1,318 @@ +{ + "description": "implicit sessions default causal consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "implicit-cc-tests" + } + }, + { + "collection": { + "id": "collectionDefault", + "database": "database0", + "collectionName": "coll-default" + } + }, + { + "collection": { + "id": "collectionSnapshot", + "database": "database0", + "collectionName": "coll-snapshot", + "collectionOptions": { + "readConcern": { + "level": "snapshot" + } + } + } + }, + { + "collection": { + "id": "collectionlinearizable", + "database": "database0", + "collectionName": "coll-linearizable", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll-default", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "default" + } + ] + }, + { + "collectionName": "coll-snapshot", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "snapshot" + } + ] + }, + { + "collectionName": "coll-linearizable", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "tests": [ + { + "description": "readConcern is not sent on retried read in implicit session when readConcern level is not specified", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionDefault", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "default" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is snapshot", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionSnapshot", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "snapshot" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is linearizable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionlinearizable", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + } + ] +} From a4c90ae157ffcb1d4a073ceeb9177400126ad871 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 11 Jan 2023 20:03:28 -0800 Subject: [PATCH 0833/2111] PYTHON-3466 Test crypt_shared with older server versions (#1133) --- .evergreen/config.yml | 41 ++--------------------------------------- .evergreen/run-tests.sh | 18 ++++++------------ 2 files changed, 8 insertions(+), 51 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 65b29dab14..ab61725a20 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -452,6 +452,7 @@ functions: fi if [ -n "${test_crypt_shared}" ]; then export TEST_CRYPT_SHARED=1 + export CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} fi if [ -n "${test_pyopenssl}" ]; then export TEST_PYOPENSSL=1 @@ -2497,6 +2498,7 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days + # The path to crypt_shared is stored in the $CRYPT_SHARED_LIB_PATH expansion. - id: "encryption_crypt_shared" display_name: "Encryption shared lib" tags: ["encryption_tag"] @@ -2634,19 +2636,6 @@ buildvariants: - ".4.4" - ".4.2" - ".4.0" - rules: &encryption-exclude-rules - - if: - platform: "*" - auth: "*" - ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".rapid" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2729,19 +2718,6 @@ buildvariants: encryption: "*" display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" tasks: *encryption-server-versions - rules: - - if: - platform: "*" - python-version: "*" - auth-ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".rapid" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - matrix_name: "tests-python-version-ubuntu18-without-c-extensions" @@ -2853,19 +2829,6 @@ buildvariants: encryption: "*" display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" tasks: *encryption-server-versions - rules: - - if: - platform: "*" - python-version-windows: "*" - auth-ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".rapid" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" # Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 959ad901ad..d495e2671a 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -29,7 +29,7 @@ COMPRESSORS=${COMPRESSORS:-} MONGODB_VERSION=${MONGODB_VERSION:-} MONGODB_API_VERSION=${MONGODB_API_VERSION:-} TEST_ENCRYPTION=${TEST_ENCRYPTION:-} -TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} +CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH:-} LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} DATA_LAKE=${DATA_LAKE:-} TEST_ARGS="" @@ -158,17 +158,11 @@ if [ -n "$TEST_ENCRYPTION" ]; then . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh if [ -n "$TEST_CRYPT_SHARED" ]; then - REAL_VERSION=$(mongod --version | head -n1 | cut -d v -f3 | tr -d "\r") - if [ "$MONGODB_VERSION" = "latest" ]; then - REAL_VERSION="latest" - fi - echo "Testing CSFLE with crypt_shared lib" - $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ - --version "$REAL_VERSION" \ - --out ../crypt_shared/ - export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH - export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH - export PATH=../crypt_shared/bin:$PATH + CRYPT_SHARED_DIR=`dirname $CRYPT_SHARED_LIB_PATH` + echo "using crypt_shared_dir $CRYPT_SHARED_DIR" + export DYLD_FALLBACK_LIBRARY_PATH=$CRYPT_SHARED_DIR:$DYLD_FALLBACK_LIBRARY_PATH + export LD_LIBRARY_PATH=$CRYPT_SHARED_DIR:$LD_LIBRARY_PATH + export PATH=$CRYPT_SHARED_DIR:$PATH fi # Only run the encryption tests. TEST_ARGS="-s test.test_encryption" From 24170dd523b04e5f28ada72e125fc9d4c36a3510 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 12 Jan 2023 12:08:53 -0600 Subject: [PATCH 0834/2111] PYTHON-3523 Remove getNonce command usage for 6.2+ (#1135) --- .evergreen/resync-specs.sh | 2 +- test/command_monitoring/bulkWrite.json | 154 +++++ test/command_monitoring/command.json | 83 +++ test/command_monitoring/deleteMany.json | 162 +++++ test/command_monitoring/deleteOne.json | 162 +++++ test/command_monitoring/find.json | 550 +++++++++++++++++ test/command_monitoring/insertMany.json | 148 +++++ test/command_monitoring/insertOne.json | 144 +++++ test/command_monitoring/legacy/bulkWrite.json | 110 ---- test/command_monitoring/legacy/command.json | 113 ---- .../command_monitoring/legacy/deleteMany.json | 115 ---- test/command_monitoring/legacy/deleteOne.json | 115 ---- test/command_monitoring/legacy/find.json | 559 ------------------ .../command_monitoring/legacy/insertMany.json | 145 ----- test/command_monitoring/legacy/insertOne.json | 97 --- .../legacy/unacknowledgedBulkWrite.json | 69 --- .../command_monitoring/legacy/updateMany.json | 135 ----- test/command_monitoring/legacy/updateOne.json | 190 ------ .../{unified => }/redacted-commands.json | 20 + .../unacknowledgedBulkWrite.json | 108 ++++ test/command_monitoring/updateMany.json | 188 ++++++ test/command_monitoring/updateOne.json | 260 ++++++++ ..._unified.py => test_command_monitoring.py} | 2 +- test/test_command_monitoring_legacy.py | 237 -------- test/test_monitoring.py | 1 + 25 files changed, 1982 insertions(+), 1887 deletions(-) create mode 100644 test/command_monitoring/bulkWrite.json create mode 100644 test/command_monitoring/command.json create mode 100644 test/command_monitoring/deleteMany.json create mode 100644 test/command_monitoring/deleteOne.json create mode 100644 test/command_monitoring/find.json create mode 100644 test/command_monitoring/insertMany.json create mode 100644 test/command_monitoring/insertOne.json delete mode 100644 test/command_monitoring/legacy/bulkWrite.json delete mode 100644 test/command_monitoring/legacy/command.json delete mode 100644 test/command_monitoring/legacy/deleteMany.json delete mode 100644 test/command_monitoring/legacy/deleteOne.json delete mode 100644 test/command_monitoring/legacy/find.json delete mode 100644 test/command_monitoring/legacy/insertMany.json delete mode 100644 test/command_monitoring/legacy/insertOne.json delete mode 100644 test/command_monitoring/legacy/unacknowledgedBulkWrite.json delete mode 100644 test/command_monitoring/legacy/updateMany.json delete mode 100644 test/command_monitoring/legacy/updateOne.json rename test/command_monitoring/{unified => }/redacted-commands.json (97%) create mode 100644 test/command_monitoring/unacknowledgedBulkWrite.json create mode 100644 test/command_monitoring/updateMany.json create mode 100644 test/command_monitoring/updateOne.json rename test/{test_command_monitoring_unified.py => test_command_monitoring.py} (95%) delete mode 100644 test/test_command_monitoring_legacy.py diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 817fa4b730..489ff28b3a 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -100,7 +100,7 @@ do rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 ;; apm|APM|command-monitoring|command_monitoring) - cpjson command-monitoring/tests command_monitoring + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring ;; crud|CRUD) cpjson crud/tests/ crud diff --git a/test/command_monitoring/bulkWrite.json b/test/command_monitoring/bulkWrite.json new file mode 100644 index 0000000000..49c728442e --- /dev/null +++ b/test/command_monitoring/bulkWrite.json @@ -0,0 +1,154 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful mixed bulk write", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "$set": { + "x": 333 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/command.json b/test/command_monitoring/command.json new file mode 100644 index 0000000000..c28af95fed --- /dev/null +++ b/test/command_monitoring/command.json @@ -0,0 +1,83 @@ +{ + "description": "command", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteMany.json b/test/command_monitoring/deleteMany.json new file mode 100644 index 0000000000..78ebad1f98 --- /dev/null +++ b/test/command_monitoring/deleteMany.json @@ -0,0 +1,162 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteMany", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteMany with write errors", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteOne.json b/test/command_monitoring/deleteOne.json new file mode 100644 index 0000000000..2420794fe5 --- /dev/null +++ b/test/command_monitoring/deleteOne.json @@ -0,0 +1,162 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteOne", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteOne with write errors", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/find.json b/test/command_monitoring/find.json new file mode 100644 index 0000000000..4b5f45ae99 --- /dev/null +++ b/test/command_monitoring/find.json @@ -0,0 +1,550 @@ +{ + "description": "find", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find with no options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": 1 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + } + ] + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A successful find with options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "x": 33 + }, + { + "x": 22 + } + ] + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A successful find with showRecordId and returnKey", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "showRecordId": true, + "returnKey": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "showRecordId": true, + "returnKey": true + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A successful find with a getMore", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertMany.json b/test/command_monitoring/insertMany.json new file mode 100644 index 0000000000..a80a218c67 --- /dev/null +++ b/test/command_monitoring/insertMany.json @@ -0,0 +1,148 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertMany with write errors", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertOne.json b/test/command_monitoring/insertOne.json new file mode 100644 index 0000000000..6ff732e41b --- /dev/null +++ b/test/command_monitoring/insertOne.json @@ -0,0 +1,144 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertOne with write errors", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/legacy/bulkWrite.json b/test/command_monitoring/legacy/bulkWrite.json deleted file mode 100644 index ca5a9a105c..0000000000 --- a/test/command_monitoring/legacy/bulkWrite.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful mixed bulk write", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "x": 333 - } - } - } - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4, - "x": 44 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 3 - }, - "u": { - "$set": { - "x": 333 - } - } - } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/command.json b/test/command_monitoring/legacy/command.json deleted file mode 100644 index 7e1e347be0..0000000000 --- a/test/command_monitoring/legacy/command.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful command", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } - } - ] - }, - { - "description": "A failed command event", - "operation": { - "name": "count", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "$or": true - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "count" - } - } - ] - }, - { - "description": "A successful command with a non-primary read preference", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - }, - "read_preference": { - "mode": "primaryPreferred" - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/deleteMany.json b/test/command_monitoring/legacy/deleteMany.json deleted file mode 100644 index 7cd396806c..0000000000 --- a/test/command_monitoring/legacy/deleteMany.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful delete many", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "delete" - } - } - ] - }, - { - "description": "A successful delete many command with write errors", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 - } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/deleteOne.json b/test/command_monitoring/legacy/deleteOne.json deleted file mode 100644 index 0971dfcf2c..0000000000 --- a/test/command_monitoring/legacy/deleteOne.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful delete one", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "delete" - } - } - ] - }, - { - "description": "A successful delete one command with write errors", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 - } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/find.json b/test/command_monitoring/legacy/find.json deleted file mode 100644 index e2bb95306f..0000000000 --- a/test/command_monitoring/legacy/find.json +++ /dev/null @@ -1,559 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "namespace": "command-monitoring-tests.test", - "tests": [ - { - "description": "A successful find event with no options", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": 1 - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "command_name": "find" - } - } - ] - }, - { - "description": "A successful find event with options", - "operation": { - "name": "find", - "read_preference": { - "mode": "primaryPreferred" - }, - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ] - } - }, - "command_name": "find" - } - } - ] - }, - { - "description": "A successful find event with a getmore", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "3" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ] - } - }, - "command_name": "getMore" - } - } - ] - }, - { - "description": "A successful find event with a getmore and killcursors", - "ignore_if_server_version_greater_than": "3.0", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] - } - }, - "command_name": "getMore" - } - }, - { - "command_started_event": { - "command": { - "killCursors": "test", - "cursors": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursorsUnknown": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors" - } - } - ] - }, - { - "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", - "ignore_if_server_version_less_than": "3.1", - "ignore_if_server_version_greater_than": "4.4", - "ignore_if_topology_type": [ - "sharded" - ], - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] - } - }, - "command_name": "getMore" - } - } - ] - }, - { - "description": "A failed find event", - "operation": { - "name": "find", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "$or": true - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "find" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/insertMany.json b/test/command_monitoring/legacy/insertMany.json deleted file mode 100644 index 0becf928e4..0000000000 --- a/test/command_monitoring/legacy/insertMany.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful insert many command with write errors", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful unordered insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": false - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/insertOne.json b/test/command_monitoring/legacy/insertOne.json deleted file mode 100644 index 877bca1a61..0000000000 --- a/test/command_monitoring/legacy/insertOne.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful insert one", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful insert one command with write errors", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "x": 11 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/unacknowledgedBulkWrite.json b/test/command_monitoring/legacy/unacknowledgedBulkWrite.json deleted file mode 100644 index ae116289eb..0000000000 --- a/test/command_monitoring/legacy/unacknowledgedBulkWrite.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test-unacknowledged-bulk-write", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful unordered bulk write with an unacknowledged write concern", - "comment": "On a 2.4 server, no GLE is sent and requires a client-side manufactured reply", - "operation": { - "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test-unacknowledged-bulk-write", - "documents": [ - { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - ], - "ordered": false, - "writeConcern": { - "w": 0 - } - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1 - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/updateMany.json b/test/command_monitoring/legacy/updateMany.json deleted file mode 100644 index d82792fc4e..0000000000 --- a/test/command_monitoring/legacy/updateMany.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful update many", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update many command with write errors", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$nothing": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$nothing": { - "x": 1 - } - }, - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/updateOne.json b/test/command_monitoring/legacy/updateOne.json deleted file mode 100644 index ba41dbb0c0..0000000000 --- a/test/command_monitoring/legacy/updateOne.json +++ /dev/null @@ -1,190 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful update one", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - } - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update one with upsert when the upserted id is not an object id", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1, - "upserted": [ - { - "index": 0, - "_id": 4 - } - ] - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update one command with write errors", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$nothing": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$nothing": { - "x": 1 - } - } - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/unified/redacted-commands.json b/test/command_monitoring/redacted-commands.json similarity index 97% rename from test/command_monitoring/unified/redacted-commands.json rename to test/command_monitoring/redacted-commands.json index 0f85dc3e94..4302ba8900 100644 --- a/test/command_monitoring/unified/redacted-commands.json +++ b/test/command_monitoring/redacted-commands.json @@ -162,6 +162,11 @@ }, { "description": "getnonce", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -293,6 +298,11 @@ }, { "description": "copydbgetnonce", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], "operations": [ { "name": "runCommand", @@ -328,6 +338,11 @@ }, { "description": "copydbsaslstart", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], "operations": [ { "name": "runCommand", @@ -363,6 +378,11 @@ }, { "description": "copydb", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], "operations": [ { "name": "runCommand", diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/unacknowledgedBulkWrite.json new file mode 100644 index 0000000000..4c16d6df11 --- /dev/null +++ b/test/command_monitoring/unacknowledgedBulkWrite.json @@ -0,0 +1,108 @@ +{ + "description": "unacknowledgedBulkWrite", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful unordered bulk write with an unacknowledged write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + } + } + ], + "ordered": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + ], + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": { + "$$exists": false + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateMany.json b/test/command_monitoring/updateMany.json new file mode 100644 index 0000000000..b15434226c --- /dev/null +++ b/test/command_monitoring/updateMany.json @@ -0,0 +1,188 @@ +{ + "description": "updateMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateMany with write errors", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateOne.json b/test/command_monitoring/updateOne.json new file mode 100644 index 0000000000..a0ae99e88d --- /dev/null +++ b/test/command_monitoring/updateOne.json @@ -0,0 +1,260 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with upsert where the upserted id is not an ObjectId", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "upserted": [ + { + "index": 0, + "_id": 4 + } + ] + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with write errors", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_command_monitoring_unified.py b/test/test_command_monitoring.py similarity index 95% rename from test/test_command_monitoring_unified.py rename to test/test_command_monitoring.py index 46e1e4724c..c88b7ef810 100644 --- a/test/test_command_monitoring_unified.py +++ b/test/test_command_monitoring.py @@ -28,7 +28,7 @@ globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "unified"), + _TEST_PATH, module=__name__, ) ) diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py deleted file mode 100644 index 1cc3e15cc9..0000000000 --- a/test/test_command_monitoring_legacy.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run the command monitoring legacy-format spec tests.""" - -import os -import re -import sys - -sys.path[0:0] = [""] - -from test import client_context, unittest -from test.utils import EventListener, parse_read_preference, rs_or_single_client - -import pymongo -from bson import json_util -from pymongo import MongoClient -from pymongo.errors import OperationFailure -from pymongo.write_concern import WriteConcern - -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -class TestAllScenarios(unittest.TestCase): - listener: EventListener - client: MongoClient - - @classmethod - @client_context.require_connection - def setUpClass(cls): - cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def tearDownClass(cls): - cls.client.close() - - def tearDown(self): - self.listener.reset() - - -def create_test(scenario_def, test): - def run_scenario(self): - dbname = scenario_def["database_name"] - collname = scenario_def["collection_name"] - - coll = self.client[dbname][collname] - coll.drop() - coll.insert_many(scenario_def["data"]) - self.listener.reset() - name = camel_to_snake(test["operation"]["name"]) - if "read_preference" in test["operation"]: - coll = coll.with_options( - read_preference=parse_read_preference(test["operation"]["read_preference"]) - ) - if "collectionOptions" in test["operation"]: - colloptions = test["operation"]["collectionOptions"] - if "writeConcern" in colloptions: - concern = colloptions["writeConcern"] - coll = coll.with_options(write_concern=WriteConcern(**concern)) - - test_args = test["operation"]["arguments"] - if "options" in test_args: - options = test_args.pop("options") - test_args.update(options) - args = {} - for arg in test_args: - args[camel_to_snake(arg)] = test_args[arg] - - if name == "count": - self.skipTest("PyMongo does not support count") - elif name == "bulk_write": - bulk_args = [] - for request in args["requests"]: - opname = request["name"] - klass = opname[0:1].upper() + opname[1:] - arg = getattr(pymongo, klass)(**request["arguments"]) - bulk_args.append(arg) - try: - coll.bulk_write(bulk_args, args.get("ordered", True)) - except OperationFailure: - pass - elif name == "find": - if "sort" in args: - args["sort"] = list(args["sort"].items()) - if "hint" in args: - args["hint"] = list(args["hint"].items()) - for arg in "skip", "limit": - if arg in args: - args[arg] = int(args[arg]) - try: - # Iterate the cursor. - tuple(coll.find(**args)) - except OperationFailure: - pass - else: - try: - getattr(coll, name)(**args) - except OperationFailure: - pass - - started_events = self.listener.started_events - succeeded_events = self.listener.succeeded_events - failed_events = self.listener.failed_events - for expectation in test["expectations"]: - event_type = next(iter(expectation)) - if event_type == "command_started_event": - event = started_events[0] if len(started_events) else None - if event is not None: - # The tests substitute 42 for any number other than 0. - if event.command_name == "getMore" and event.command["getMore"]: - event.command["getMore"] = 42 - elif event.command_name == "killCursors": - event.command["cursors"] = [42] - elif event.command_name == "update": - # TODO: remove this once PYTHON-1744 is done. - # Add upsert and multi fields back into - # expectations. - updates = expectation[event_type]["command"]["updates"] - for update in updates: - update.setdefault("upsert", False) - update.setdefault("multi", False) - elif event_type == "command_succeeded_event": - event = succeeded_events.pop(0) if len(succeeded_events) else None - if event is not None: - reply = event.reply - # The tests substitute 42 for any number other than 0, - # and "" for any error message. - if "writeErrors" in reply: - for doc in reply["writeErrors"]: - # Remove any new fields the server adds. The tests - # only have index, code, and errmsg. - diff = set(doc) - set(["index", "code", "errmsg"]) - for field in diff: - doc.pop(field) - doc["code"] = 42 - doc["errmsg"] = "" - elif "cursor" in reply: - if reply["cursor"]["id"]: - reply["cursor"]["id"] = 42 - elif event.command_name == "killCursors": - # Make the tests continue to pass when the killCursors - # command is actually in use. - if "cursorsKilled" in reply: - reply.pop("cursorsKilled") - reply["cursorsUnknown"] = [42] - # Found succeeded event. Pop related started event. - started_events.pop(0) - elif event_type == "command_failed_event": - event = failed_events.pop(0) if len(failed_events) else None - if event is not None: - # Found failed event. Pop related started event. - started_events.pop(0) - else: - self.fail("Unknown event type") - - if event is None: - event_name = event_type.split("_")[1] - self.fail( - "Expected %s event for %s command. Actual " - "results:\n%s" - % ( - event_name, - expectation[event_type]["command_name"], - "\n".join(str(e) for e in self.listener.events), - ) - ) - - for attr, expected in expectation[event_type].items(): - if "options" in expected: - options = expected.pop("options") - expected.update(options) - actual = getattr(event, attr) - if isinstance(expected, dict): - for key, val in expected.items(): - self.assertEqual(val, actual[key]) - else: - self.assertEqual(actual, expected) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): - dirname = os.path.split(dirpath)[-1] - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - assert bool(scenario_def.get("tests")), "tests cannot be empty" - # Construct test from scenario. - for test in scenario_def["tests"]: - new_test = create_test(scenario_def, test) - if "ignore_if_server_version_greater_than" in test: - version = test["ignore_if_server_version_greater_than"] - ver = tuple(int(elt) for elt in version.split(".")) - new_test = client_context.require_version_max(*ver)(new_test) - if "ignore_if_server_version_less_than" in test: - version = test["ignore_if_server_version_less_than"] - ver = tuple(int(elt) for elt in version.split(".")) - new_test = client_context.require_version_min(*ver)(new_test) - if "ignore_if_topology_type" in test: - types = set(test["ignore_if_topology_type"]) - if "sharded" in types: - new_test = client_context.require_no_mongos(None)(new_test) - - test_name = "test_%s_%s_%s" % ( - dirname, - os.path.splitext(filename)[0], - str(test["description"].replace(" ", "_")), - ) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - - -create_tests() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index ffa535eeed..39b3d2f896 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1078,6 +1078,7 @@ def test_first_batch_helper(self): self.listener.reset() + @client_context.require_version_max(6, 1, 99) def test_sensitive_commands(self): listeners = self.client._event_listeners From 57f757b74c7d0bfac3d0ef8e9e8df4f37edd7018 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 17 Jan 2023 23:14:26 -0800 Subject: [PATCH 0835/2111] PYTHON-3446 Do not connect to mongocryptd if shared library is loaded (#1136) --- test/test_encryption.py | 61 ++++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 3c422b8c87..35dea51885 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -19,11 +19,13 @@ import os import re import socket +import socketserver import ssl import sys import textwrap import traceback import uuid +from threading import Thread from typing import Any, Dict, Mapping from pymongo.collection import Collection @@ -730,6 +732,11 @@ def create_key_vault(vault, *data_keys): vault.drop() if data_keys: vault.insert_many(data_keys) + vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) return vault @@ -1786,10 +1793,7 @@ class TestDecryptProse(EncryptionIntegrationTest): def setUp(self): self.client = client_context.client self.client.db.drop_collection("decryption_events") - self.client.keyvault.drop_collection("datakeys") - self.client.keyvault.datakeys.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} - ) + create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} self.client_encryption = ClientEncryption( @@ -1912,12 +1916,9 @@ def test_bypassAutoEncryption(self): @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") def test_via_loading_shared_library(self): - key_vault = client_context.client.keyvault.datakeys - key_vault.drop() - key_vault.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} + create_key_vault( + client_context.client.keyvault.datakeys, json_data("external", "external-key.json") ) - key_vault.insert_one(json_data("external", "external-key.json")) schemas = {"db.coll": json_data("external", "external-schema.json")} opts = AutoEncryptionOpts( kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, @@ -1942,6 +1943,43 @@ def test_via_loading_shared_library(self): with self.assertRaises(ServerSelectionTimeoutError): no_mongocryptd_client.db.command("ping") + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + create_key_vault( + client_context.client.keyvault.datakeys, json_data("external", "external-key.json") + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client_encrypted.close) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): @@ -2112,10 +2150,7 @@ def test_04_kmip(self): class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): def setUp(self): self.client = client_context.client - self.client.keyvault.drop_collection("datakeys") - self.client.keyvault.datakeys.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} - ) + create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} self.client_encryption = ClientEncryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() From 124dee66c3df47883b85df82b34f740ce35f69e1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 18 Jan 2023 13:39:38 -0600 Subject: [PATCH 0836/2111] PYTHON-3565 The docs page appears to be missing a component reference (#1137) --- pymongo/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 789df62071..a3bdb4c163 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -85,7 +85,7 @@ """ from pymongo import _csot -from pymongo._version import __version__, get_version_string, version, version_tuple +from pymongo._version import __version__, get_version_string, version_tuple from pymongo.collection import ReturnDocument from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType @@ -102,6 +102,9 @@ from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern +version = __version__ +"""Current version of PyMongo.""" + def has_c() -> bool: """Is the C extension installed?""" From ec074010d81f72826fdba230ef6cda1bbf034a27 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 24 Jan 2023 14:38:48 -0800 Subject: [PATCH 0837/2111] PYTHON-3523 Resync unified test format tests for getnonce (#1141) --- test/test_auth.py | 4 +++- .../valid-pass/observeSensitiveCommands.json | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/test/test_auth.py b/test/test_auth.py index 9d80f06c00..7db2247746 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -581,7 +581,9 @@ def test_scram_threaded(self): coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate - coll = rs_or_single_client().db.test + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.db.test threads = [] for _ in range(4): threads.append(AutoAuthenticateThread(coll)) diff --git a/test/unified-test-format/valid-pass/observeSensitiveCommands.json b/test/unified-test-format/valid-pass/observeSensitiveCommands.json index 411ca19c5d..d3ae5665be 100644 --- a/test/unified-test-format/valid-pass/observeSensitiveCommands.json +++ b/test/unified-test-format/valid-pass/observeSensitiveCommands.json @@ -61,6 +61,11 @@ "tests": [ { "description": "getnonce is observed with observeSensitiveCommands=true", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -106,6 +111,11 @@ }, { "description": "getnonce is not observed with observeSensitiveCommands=false", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -127,6 +137,11 @@ }, { "description": "getnonce is not observed by default", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", From d3117ce75dfe86fd6a7ab2380759f4efaa9cfb3f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 24 Jan 2023 15:33:56 -0800 Subject: [PATCH 0838/2111] PYTHON-3280 Support for Range Indexes (#1140) --- pymongo/encryption.py | 134 ++++++++++-- pymongo/encryption_options.py | 44 +++- .../etc/data/encryptedFields-Range-Date.json | 36 ++++ .../data/encryptedFields-Range-Decimal.json | 26 +++ ...ncryptedFields-Range-DecimalPrecision.json | 35 ++++ .../data/encryptedFields-Range-Double.json | 26 +++ ...encryptedFields-Range-DoublePrecision.json | 35 ++++ .../etc/data/encryptedFields-Range-Int.json | 32 +++ .../etc/data/encryptedFields-Range-Long.json | 32 +++ .../etc/data/range-encryptedFields-Date.json | 30 +++ ...ge-encryptedFields-DecimalNoPrecision.json | 21 ++ ...ange-encryptedFields-DecimalPrecision.json | 29 +++ ...nge-encryptedFields-DoubleNoPrecision.json | 21 ++ ...range-encryptedFields-DoublePrecision.json | 30 +++ .../etc/data/range-encryptedFields-Int.json | 27 +++ .../etc/data/range-encryptedFields-Long.json | 27 +++ test/test_encryption.py | 197 +++++++++++++++++- 17 files changed, 759 insertions(+), 23 deletions(-) create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Date.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Double.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Int.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Long.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-Date.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-Int.json create mode 100644 test/client-side-encryption/etc/data/range-encryptedFields-Long.json diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 92a268f452..8b51863f96 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -41,7 +41,7 @@ from pymongo import _csot from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts +from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, EncryptionError, @@ -416,6 +416,14 @@ class Algorithm(str, enum.Enum): .. versionadded:: 4.2 """ + RANGEPREVIEW = "RangePreview" + """RangePreview. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. + + .. versionadded:: 4.4 + """ class QueryType(str, enum.Enum): @@ -430,6 +438,9 @@ class QueryType(str, enum.Enum): EQUALITY = "equality" """Used to encrypt a value for an equality query.""" + RANGEPREVIEW = "rangePreview" + """Used to encrypt a value for a range query.""" + class ClientEncryption(Generic[_DocumentType]): """Explicit client-side field level encryption.""" @@ -627,6 +638,45 @@ def create_data_key( key_material=key_material, ) + def _encrypt_helper( + self, + value, + algorithm, + key_id=None, + key_alt_name=None, + query_type=None, + contention_factor=None, + range_opts=None, + is_expression=False, + ): + self._check_closed() + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + if range_opts: + range_opts = encode( + range_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=is_expression, + ) + return decode(encrypted_doc)["v"] # type: ignore[index] + def encrypt( self, value: Any, @@ -635,6 +685,7 @@ def encrypt( key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -655,10 +706,10 @@ def encrypt( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. + - `range_opts`: **(BETA)** An instance of RangeOpts. - .. note:: `query_type` and `contention_factor` are part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the - final release. + .. note:: `query_type`, `contention_factor` and `range_opts` are part of the Queryable Encryption beta. + Backwards-breaking changes may be made before the final release. :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. @@ -667,23 +718,66 @@ def encrypt( Added the `query_type` and `contention_factor` parameters. """ - self._check_closed() - if key_id is not None and not ( - isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE - ): - raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + return self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + ) - doc = encode({"v": value}, codec_options=self._codec_options) - with _wrap_encryption_errors(): - encrypted_doc = self._encryption.encrypt( - doc, - algorithm, - key_id=key_id, - key_alt_name=key_alt_name, - query_type=query_type, - contention_factor=contention_factor, - ) - return decode(encrypted_doc)["v"] # type: ignore[index] + def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Binary] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :Parameters: + - `expression`: **(BETA)** The BSON aggregate or match expression to encrypt. + - `algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + - `key_id`: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + - `key_alt_name`: Identifies a key vault document by 'keyAltName'. + - `query_type` (str): **(BETA)** The query type to execute. See + :class:`QueryType` for valid options. + - `contention_factor` (int): **(BETA)** The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + - `range_opts`: **(BETA)** An instance of RangeOpts. + + .. note:: Support for range queries is in beta. + Backwards-breaking changes may be made before the final release. + + :Returns: + The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionadded:: 4.4 + """ + return self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ) def decrypt(self, value: Binary) -> Any: """Decrypt an encrypted value. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c5e6f47837..6c966e30cd 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -22,7 +22,7 @@ _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False - +from bson import int64 from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError from pymongo.uri_parser import _parse_kms_tls_options @@ -219,3 +219,45 @@ def __init__( # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) self._bypass_query_analysis = bypass_query_analysis + + +class RangeOpts: + """Options to configure encrypted queries using the rangePreview algorithm.""" + + def __init__( + self, + sparsity: int, + min: Optional[Any] = None, + max: Optional[Any] = None, + precision: Optional[int] = None, + ) -> None: + """Options to configure encrypted queries using the rangePreview algorithm. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. + + :Parameters: + - `sparsity`: An integer. + - `min`: A BSON scalar value corresponding to the type being queried. + - `max`: A BSON scalar value corresponding to the type being queried. + - `precision`: An integer, may only be set for double or decimal128 types. + + .. versionadded:: 4.4 + """ + self.min = min + self.max = max + self.sparsity = sparsity + self.precision = precision + + @property + def document(self) -> Mapping[str, Any]: + doc = {} + for k, v in [ + ("sparsity", int64.Int64(self.sparsity)), + ("precision", self.precision), + ("min", self.min), + ("max", self.max), + ]: + if v is not None: + doc[k] = v + return doc diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json new file mode 100644 index 0000000000..c9ad1ffdd4 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json @@ -0,0 +1,36 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json new file mode 100644 index 0000000000..f209536c9c --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json @@ -0,0 +1,26 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json new file mode 100644 index 0000000000..e7634152ba --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json @@ -0,0 +1,35 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json new file mode 100644 index 0000000000..4e9e8d6d81 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json @@ -0,0 +1,26 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json new file mode 100644 index 0000000000..17c725ec44 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json @@ -0,0 +1,35 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json new file mode 100644 index 0000000000..661d7395c5 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json @@ -0,0 +1,32 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json new file mode 100644 index 0000000000..b36bfb2c46 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json @@ -0,0 +1,32 @@ +{ + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json new file mode 100644 index 0000000000..e19fc1e182 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json new file mode 100644 index 0000000000..c6d129d4ca --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -0,0 +1,21 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberInt": "1" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json new file mode 100644 index 0000000000..c23c3fa923 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -0,0 +1,29 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberInt": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json new file mode 100644 index 0000000000..4af6422714 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -0,0 +1,21 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json new file mode 100644 index 0000000000..c1f388219d --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json new file mode 100644 index 0000000000..217bf6743c --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -0,0 +1,27 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json new file mode 100644 index 0000000000..0fb87edaef --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -0,0 +1,27 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + \ No newline at end of file diff --git a/test/test_encryption.py b/test/test_encryption.py index 35dea51885..fc6d62c727 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -59,7 +59,7 @@ ) from test.utils_spec_runner import SpecRunner -from bson import encode, json_util +from bson import DatetimeMS, Decimal128, encode, json_util from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError @@ -68,7 +68,7 @@ from pymongo import encryption from pymongo.cursor import CursorType from pymongo.encryption import Algorithm, ClientEncryption, QueryType -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -2494,5 +2494,198 @@ def MongoClient(**kwargs): client_encryption.close() +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#range-explicit-encryption +class TestRangeQueryProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(6, 2, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + self.addCleanup(self.encrypted_client.close) + + def run_expression_find(self, name, expression, expected_elems, range_opts, use_expr=False): + find_payload = self.client_encryption.encrypt_expression( + expression=expression, + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + query_type=QueryType.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + self.encrypted_client.db.explicit_encryption.find(find_payload), key=lambda x: x["_id"] + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + def encrypt_and_cast(i): + return self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + }, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + ) + + # Case 3. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + self.client_encryption.encrypt( + int(6) if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGEPREVIEW, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), max=cast_func(200), sparsity=1, precision=2 + ), + ) + + def test_double_no_precision(self): + self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1), float) + + def test_double_precision(self): + self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, precision=2), + float, + ) + + def test_decimal_no_precision(self): + self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1), lambda x: Decimal128(str(x)) + ) + + def test_decimal_precision(self): + self.run_test_cases( + "DecimalPrecision", + RangeOpts(min=Decimal128("0.0"), max=Decimal128("200.0"), sparsity=1, precision=2), + lambda x: Decimal128(str(x)), + ) + + def test_datetime(self): + self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + def test_int(self): + self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1), int) + + if __name__ == "__main__": unittest.main() From 2b21e7359f4dd0ad9a30bf919e8bb4114da6f522 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 24 Jan 2023 21:40:18 -0800 Subject: [PATCH 0839/2111] PYTHON-3582 Add specification tests for range indexes (#1142) --- .../spec/legacy/fle2-BypassQueryAnalysis.json | 3 +- .../spec/legacy/fle2-Compact.json | 3 +- .../spec/legacy/fle2-CreateCollection.json | 3 +- .../spec/legacy/fle2-DecryptExistingData.json | 3 +- .../spec/legacy/fle2-Delete.json | 3 +- ...EncryptedFields-vs-EncryptedFieldsMap.json | 3 +- .../fle2-EncryptedFields-vs-jsonSchema.json | 3 +- .../fle2-EncryptedFieldsMap-defaults.json | 3 +- .../spec/legacy/fle2-FindOneAndUpdate.json | 3 +- .../spec/legacy/fle2-InsertFind-Indexed.json | 3 +- .../legacy/fle2-InsertFind-Unindexed.json | 3 +- .../spec/legacy/fle2-MissingKey.json | 3 +- .../spec/legacy/fle2-NoEncryption.json | 3 +- .../legacy/fle2-Range-Date-Aggregate.json | 514 +++++ .../legacy/fle2-Range-Date-Correctness.json | 1842 ++++++++++++++++ .../spec/legacy/fle2-Range-Date-Delete.json | 459 ++++ .../fle2-Range-Date-FindOneAndUpdate.json | 538 +++++ .../legacy/fle2-Range-Date-InsertFind.json | 505 +++++ .../spec/legacy/fle2-Range-Date-Update.json | 540 +++++ .../legacy/fle2-Range-Decimal-Aggregate.json | 1908 ++++++++++++++++ .../fle2-Range-Decimal-Correctness.json | 1158 ++++++++++ .../legacy/fle2-Range-Decimal-Delete.json | 1133 ++++++++++ .../fle2-Range-Decimal-FindOneAndUpdate.json | 1930 ++++++++++++++++ .../legacy/fle2-Range-Decimal-InsertFind.json | 1899 ++++++++++++++++ .../legacy/fle2-Range-Decimal-Update.json | 1934 +++++++++++++++++ ...fle2-Range-DecimalPrecision-Aggregate.json | 590 +++++ ...e2-Range-DecimalPrecision-Correctness.json | 1650 ++++++++++++++ .../fle2-Range-DecimalPrecision-Delete.json | 493 +++++ ...nge-DecimalPrecision-FindOneAndUpdate.json | 612 ++++++ ...le2-Range-DecimalPrecision-InsertFind.json | 577 +++++ .../fle2-Range-DecimalPrecision-Update.json | 612 ++++++ .../legacy/fle2-Range-Double-Aggregate.json | 1138 ++++++++++ .../legacy/fle2-Range-Double-Correctness.json | 1160 ++++++++++ .../spec/legacy/fle2-Range-Double-Delete.json | 749 +++++++ .../fle2-Range-Double-FindOneAndUpdate.json | 1160 ++++++++++ .../legacy/fle2-Range-Double-InsertFind.json | 1129 ++++++++++ .../spec/legacy/fle2-Range-Double-Update.json | 1164 ++++++++++ .../fle2-Range-DoublePrecision-Aggregate.json | 586 +++++ ...le2-Range-DoublePrecision-Correctness.json | 1650 ++++++++++++++ .../fle2-Range-DoublePrecision-Delete.json | 491 +++++ ...ange-DoublePrecision-FindOneAndUpdate.json | 608 ++++++ ...fle2-Range-DoublePrecision-InsertFind.json | 577 +++++ .../fle2-Range-DoublePrecision-Update.json | 612 ++++++ .../spec/legacy/fle2-Range-Int-Aggregate.json | 490 +++++ .../legacy/fle2-Range-Int-Correctness.json | 1644 ++++++++++++++ .../spec/legacy/fle2-Range-Int-Delete.json | 437 ++++ .../fle2-Range-Int-FindOneAndUpdate.json | 512 +++++ .../legacy/fle2-Range-Int-InsertFind.json | 481 ++++ .../spec/legacy/fle2-Range-Int-Update.json | 516 +++++ .../legacy/fle2-Range-Long-Aggregate.json | 490 +++++ .../legacy/fle2-Range-Long-Correctness.json | 1644 ++++++++++++++ .../spec/legacy/fle2-Range-Long-Delete.json | 437 ++++ .../fle2-Range-Long-FindOneAndUpdate.json | 512 +++++ .../legacy/fle2-Range-Long-InsertFind.json | 481 ++++ .../spec/legacy/fle2-Range-Long-Update.json | 516 +++++ .../spec/legacy/fle2-Range-WrongType.json | 162 ++ .../spec/legacy/fle2-Update.json | 3 +- ...e2-validatorAndPartialFieldExpression.json | 3 +- 58 files changed, 38270 insertions(+), 15 deletions(-) create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json create mode 100644 test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json index 629faf189d..b8d06e8bcd 100644 --- a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json +++ b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2-Compact.json index 46da99cbfc..6ca0f9ba02 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2-Compact.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json index 6836f40e04..9f8db41f87 100644 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json index c6d0bca0d1..e622d3334d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json +++ b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json index 0e3e06396e..8687127748 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2-Delete.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json index ea3eb4850c..911b428633 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json index 1d3227ee7f..f4386483da 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json index 030952e056..60820aae95 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json index b31438876f..de1b5c5aad 100644 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json index 81a549590e..84b69d7de9 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json index c1bdc90760..9b31438525 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json index 2db1cd7702..4210da09e4 100644 --- a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json index e9dd586c26..9d255bd493 100644 --- a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json +++ b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json new file mode 100644 index 0000000000..a35321cd35 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json @@ -0,0 +1,514 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json new file mode 100644 index 0000000000..5832e85418 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json @@ -0,0 +1,1842 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json new file mode 100644 index 0000000000..b5856e7620 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json @@ -0,0 +1,459 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDate": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..a59258a466 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json @@ -0,0 +1,538 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDate": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json new file mode 100644 index 0000000000..4357fafeea --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json @@ -0,0 +1,505 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json new file mode 100644 index 0000000000..fd170554f6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json @@ -0,0 +1,540 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDate": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json new file mode 100644 index 0000000000..73d2cf4892 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json @@ -0,0 +1,1908 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json new file mode 100644 index 0000000000..89b7bd3118 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json @@ -0,0 +1,1158 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimal": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json new file mode 100644 index 0000000000..0463be1c69 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json @@ -0,0 +1,1133 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimal": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..d0e2967771 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1930 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimal": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimal": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimal": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json new file mode 100644 index 0000000000..cea03e23fe --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json @@ -0,0 +1,1899 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json new file mode 100644 index 0000000000..2f8b991cf7 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json @@ -0,0 +1,1934 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimal": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimal": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimal": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimal": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimal": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimal": { + "$gt": { + "$binary": { + "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimal": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimal", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimal": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimal": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..a3e605d1bb --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json @@ -0,0 +1,590 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..9fafc243d6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..3d7d359af6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json @@ -0,0 +1,493 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimalPrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..b1442c3a3c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,612 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimalPrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..3b8202ff87 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json @@ -0,0 +1,577 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json new file mode 100644 index 0000000000..3dc6631c61 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json @@ -0,0 +1,612 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDecimalPrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json new file mode 100644 index 0000000000..3d54be3d18 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json @@ -0,0 +1,1138 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json new file mode 100644 index 0000000000..b09e966324 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json @@ -0,0 +1,1160 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDouble": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json new file mode 100644 index 0000000000..fa09cb87df --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json @@ -0,0 +1,749 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDouble": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..59a304166b --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json @@ -0,0 +1,1160 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDouble": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDouble": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDouble": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json new file mode 100644 index 0000000000..634230eaca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json @@ -0,0 +1,1129 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json new file mode 100644 index 0000000000..cdc9f28e76 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json @@ -0,0 +1,1164 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDouble": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDouble": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDouble": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDouble": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDouble": { + "$gt": { + "$binary": { + "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDouble": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDouble", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDouble": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDouble": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..f2ea49ad75 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json @@ -0,0 +1,586 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..e69d912694 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json new file mode 100644 index 0000000000..d6a9c4b7e7 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json @@ -0,0 +1,491 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDoublePrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..0511c2e37e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,608 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDoublePrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..616101b4d4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json @@ -0,0 +1,577 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json new file mode 100644 index 0000000000..300202e227 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json @@ -0,0 +1,612 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedDoublePrecision": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json new file mode 100644 index 0000000000..536415f3fe --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json @@ -0,0 +1,490 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json new file mode 100644 index 0000000000..6abd773da8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json new file mode 100644 index 0000000000..9d5bff1d19 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json @@ -0,0 +1,437 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedInt": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..4bf57700c9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json @@ -0,0 +1,512 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedInt": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json new file mode 100644 index 0000000000..6f6022e749 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json @@ -0,0 +1,481 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json new file mode 100644 index 0000000000..17d23b957f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json @@ -0,0 +1,516 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedInt": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json new file mode 100644 index 0000000000..3f1c723bd2 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json @@ -0,0 +1,490 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json new file mode 100644 index 0000000000..972388c6c4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json new file mode 100644 index 0000000000..89e1898406 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json @@ -0,0 +1,437 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedLong": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..59342a343a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json @@ -0,0 +1,512 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedLong": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json new file mode 100644 index 0000000000..882e52170d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json @@ -0,0 +1,481 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "bE1vqWj3KNyM7cCYUv/cnYm8BPaUL3eMp5syTHq6NF4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json new file mode 100644 index 0000000000..92e3e390a5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json @@ -0,0 +1,516 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + }, + "deleteTokens": { + "default.default": { + "encryptedLong": { + "e": { + "$binary": { + "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", + "subType": "00" + } + }, + "o": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + } + } + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "5nRutVIyq7URVOVtbE4vM01APSIajAVnsShMwjBlzkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "DLCAJs+W2PL2DV5YChCL6dYrQNr+j4p3L7xhVaub4ic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json b/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json new file mode 100644 index 0000000000..9eddf1c99c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json @@ -0,0 +1,162 @@ +{ + "runOn": [ + { + "minServerVersion": "6.2.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json index 87830af32d..090f44f9ac 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2-Update.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json index fab36f75a1..e70ca7c72d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json +++ b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json @@ -4,7 +4,8 @@ "minServerVersion": "6.0.0", "topology": [ "replicaset", - "sharded" + "sharded", + "load-balanced" ] } ], From a3720d9ceaa699e196fc1624f4c62515c2e5410d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 25 Jan 2023 09:41:23 -0600 Subject: [PATCH 0840/2111] PYTHON-3568 Intellisense highlights multiple PyMongo methods because of CodecOptions (#1139) --- .github/workflows/test-python.yml | 13 ++++++++- bson/__init__.py | 34 +++++++++++------------- bson/codec_options.pyi | 10 +++---- bson/typings.py | 30 +++++++++++++++++++++ doc/examples/type_hints.rst | 4 +-- mypy.ini | 2 +- pymongo/collection.py | 7 ++--- pymongo/database.py | 17 ++++++------ pymongo/message.py | 4 +-- pymongo/mongo_client.py | 17 ++++++++---- pymongo/typings.py | 30 ++++++++++----------- test/test_database.py | 4 +-- test/{test_mypy.py => test_typing.py} | 6 +++-- test/test_typing_strict.py | 38 +++++++++++++++++++++++++++ 14 files changed, 150 insertions(+), 66 deletions(-) create mode 100644 bson/typings.py rename test/{test_mypy.py => test_typing.py} (98%) create mode 100644 test/test_typing_strict.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 414eef7a1b..8dad68ab20 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -67,7 +67,18 @@ jobs: mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test python -m pip install -U typing_extensions - mypy --install-types --non-interactive test/test_mypy.py + mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py + - name: Run mypy strict + run: | + mypy --strict test/test_typing_strict.py + - name: Run pyright + run: | + python -m pip install -U pip pyright==1.1.290 + pyright test/test_typing.py test/test_typing_strict.py + - name: Run pyright strict + run: | + echo '{"strict": ["tests/test_typing_strict.py"]}' >> pyrightconfig.json + pyright test/test_typing_strict.py linkcheck: name: Check Links diff --git a/bson/__init__.py b/bson/__init__.py index c6a81d97ec..2fe4aa173e 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -101,7 +101,6 @@ DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion, - _DocumentType, _raw_document_class, ) from bson.datetime_ms import ( @@ -125,8 +124,7 @@ # Import some modules for type-checking only. if TYPE_CHECKING: - from array import array - from mmap import mmap + from bson.typings import _DocumentIn, _DocumentType, _ReadableBuffer try: from bson import _cbson # type: ignore[attr-defined] @@ -986,12 +984,8 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo _CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") -_DocumentIn = Mapping[str, Any] -_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] - - def encode( - document: _DocumentIn, + document: "_DocumentIn", check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, ) -> bytes: @@ -1022,8 +1016,8 @@ def encode( def decode( - data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> _DocumentType: + data: "_ReadableBuffer", codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> "_DocumentType": """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -1056,11 +1050,13 @@ def decode( return _bson_to_dict(data, opts) -def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> List[_DocumentType]: +def _decode_all( + data: "_ReadableBuffer", opts: "CodecOptions[_DocumentType]" +) -> "List[_DocumentType]": """Decode a BSON data to multiple documents.""" data, view = get_data_and_view(data) data_len = len(data) - docs: List[_DocumentType] = [] + docs: "List[_DocumentType]" = [] position = 0 end = data_len - 1 use_raw = _raw_document_class(opts.document_class) @@ -1091,8 +1087,8 @@ def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> L def decode_all( - data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> List[_DocumentType]: + data: "_ReadableBuffer", codec_options: "Optional[CodecOptions[_DocumentType]]" = None +) -> "List[_DocumentType]": """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that @@ -1213,7 +1209,7 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - # Decode documents for internal use. from bson.raw_bson import RawBSONDocument - internal_codec_options = codec_options.with_options( + internal_codec_options: CodecOptions[RawBSONDocument] = codec_options.with_options( document_class=RawBSONDocument, type_registry=None ) _doc = _bson_to_dict(data, internal_codec_options) @@ -1228,7 +1224,7 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - def decode_iter( data: bytes, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> Iterator[_DocumentType]: +) -> "Iterator[_DocumentType]": """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1264,7 +1260,7 @@ def decode_iter( def decode_file_iter( file_obj: Union[BinaryIO, IO], codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> Iterator[_DocumentType]: +) -> "Iterator[_DocumentType]": """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object @@ -1325,7 +1321,7 @@ class BSON(bytes): @classmethod def encode( cls: Type["BSON"], - document: _DocumentIn, + document: "_DocumentIn", check_keys: bool = False, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, ) -> "BSON": @@ -1352,7 +1348,7 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override,assignment] + def decode(self, codec_options: "CodecOptions[_DocumentType]" = DEFAULT_CODEC_OPTIONS) -> "_DocumentType": # type: ignore[override,assignment] """Decode this BSON data. By default, returns a BSON document represented as a Python diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi index 2424516f08..8242bd4cb2 100644 --- a/bson/codec_options.pyi +++ b/bson/codec_options.pyi @@ -22,7 +22,8 @@ you get the error: "TypeError: 'type' object is not subscriptable". import datetime import abc import enum -from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union +from typing import Tuple, Generic, Optional, Mapping, Any, Type, Dict, Iterable, Tuple, Callable, Union +from bson.typings import _DocumentType, _DocumentTypeArg class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): @@ -52,9 +53,6 @@ class TypeRegistry: def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... def __eq__(self, other: Any) -> Any: ... - -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) - class DatetimeConversion(int, enum.Enum): DATETIME = ... DATETIME_CLAMP = ... @@ -82,7 +80,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): ) -> CodecOptions[_DocumentType]: ... # CodecOptions API - def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... + def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentTypeArg]: ... def _arguments_repr(self) -> str: ... @@ -100,7 +98,7 @@ class CodecOptions(Tuple, Generic[_DocumentType]): _fields: Tuple[str] -DEFAULT_CODEC_OPTIONS: CodecOptions[MutableMapping[str, Any]] +DEFAULT_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" _RAW_BSON_DOCUMENT_MARKER: int def _raw_document_class(document_class: Any) -> bool: ... diff --git a/bson/typings.py b/bson/typings.py new file mode 100644 index 0000000000..14a8131f69 --- /dev/null +++ b/bson/typings.py @@ -0,0 +1,30 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by bson""" +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, TypeVar, Union + +if TYPE_CHECKING: + from array import array + from mmap import mmap + + from bson.raw_bson import RawBSONDocument + + +# Common Shared Types. +_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] +_DocumentOut = _DocumentIn +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +_DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) +_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index b413ad7b24..e5ad3338e1 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -20,7 +20,7 @@ type of document object returned when decoding BSON documents. Due to `limitations in mypy`_, the default values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). -For a larger set of examples that use types, see the PyMongo `test_mypy module`_. +For a larger set of examples that use types, see the PyMongo `test_typing module`_. If you would like to opt out of using the provided types, add the following to your `mypy config`_: :: @@ -326,5 +326,5 @@ Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocu .. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html .. _limitations in mypy: https://github.com/python/mypy/issues/3737 .. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html -.. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py +.. _test_typing module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_typing.py .. _schema validation: https://www.mongodb.com/docs/manual/core/schema-validation/#when-to-use-schema-validation diff --git a/mypy.ini b/mypy.ini index 2562177ab1..d0e6ab5ff9 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,7 +32,7 @@ ignore_missing_imports = True [mypy-snappy.*] ignore_missing_imports = True -[mypy-test.test_mypy] +[mypy-test.test_typing] warn_unused_ignores = True [mypy-winkerberos.*] diff --git a/pymongo/collection.py b/pymongo/collection.py index 77f154f5e7..4cb3fa79c9 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -72,7 +72,7 @@ InsertOneResult, UpdateResult, ) -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline from pymongo.write_concern import WriteConcern _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} @@ -103,6 +103,7 @@ class ReturnDocument(object): if TYPE_CHECKING: + import bson from pymongo.client_session import ClientSession from pymongo.database import Database from pymongo.read_concern import ReadConcern @@ -116,7 +117,7 @@ def __init__( database: "Database[_DocumentType]", name: str, create: Optional[bool] = False, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, @@ -394,7 +395,7 @@ def database(self) -> "Database[_DocumentType]": def with_options( self, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, diff --git a/pymongo/database.py b/pymongo/database.py index 259c22d558..86754b2c05 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -29,7 +29,7 @@ cast, ) -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.dbref import DBRef from bson.son import SON from bson.timestamp import Timestamp @@ -41,7 +41,7 @@ from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline def _check_name(name): @@ -55,6 +55,7 @@ def _check_name(name): if TYPE_CHECKING: + import bson import bson.codec_options from pymongo.client_session import ClientSession from pymongo.mongo_client import MongoClient @@ -72,7 +73,7 @@ def __init__( self, client: "MongoClient[_DocumentType]", name: str, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -152,7 +153,7 @@ def name(self) -> str: def with_options( self, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -239,7 +240,7 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": def get_collection( self, name: str, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -295,7 +296,7 @@ def get_collection( def create_collection( self, name: str, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional["WriteConcern"] = None, read_concern: Optional["ReadConcern"] = None, @@ -976,7 +977,7 @@ def _drop_helper(self, name, session=None, comment=None): @_csot.apply def drop_collection( self, - name_or_collection: Union[str, Collection], + name_or_collection: Union[str, Collection[_DocumentTypeArg]], session: Optional["ClientSession"] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, @@ -1068,7 +1069,7 @@ def drop_collection( def validate_collection( self, - name_or_collection: Union[str, Collection], + name_or_collection: Union[str, Collection[_DocumentTypeArg]], scandata: bool = False, full: bool = False, session: Optional["ClientSession"] = None, diff --git a/pymongo/message.py b/pymongo/message.py index 960832cb9e..9fa64a875a 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,7 @@ import random import struct from io import BytesIO as _BytesIO -from typing import Any, Dict, NoReturn +from typing import Any, Mapping, NoReturn import bson from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode @@ -81,7 +81,7 @@ } _FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} -_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Dict[str, Any]]" = CodecOptions( +_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" = CodecOptions( unicode_decode_error_handler="replace" ) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index dccd4bb6b1..ab0c749889 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -53,7 +53,8 @@ cast, ) -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +import bson +from bson.codec_options import DEFAULT_CODEC_OPTIONS, TypeRegistry from bson.son import SON from bson.timestamp import Timestamp from pymongo import ( @@ -90,7 +91,13 @@ from pymongo.settings import TopologySettings from pymongo.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription -from pymongo.typings import _Address, _CollationIn, _DocumentType, _Pipeline +from pymongo.typings import ( + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) from pymongo.uri_parser import ( _check_options, _handle_option_deprecations, @@ -1875,7 +1882,7 @@ def list_database_names( @_csot.apply def drop_database( self, - name_or_database: Union[str, database.Database], + name_or_database: Union[str, database.Database[_DocumentTypeArg]], session: Optional[client_session.ClientSession] = None, comment: Optional[Any] = None, ) -> None: @@ -1928,7 +1935,7 @@ def drop_database( def get_default_database( self, default: Optional[str] = None, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, @@ -1989,7 +1996,7 @@ def get_default_database( def get_database( self, name: Optional[str] = None, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional["ReadConcern"] = None, diff --git a/pymongo/typings.py b/pymongo/typings.py index fe0e8bd523..32cd980c97 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -13,30 +13,18 @@ # limitations under the License. """Type aliases used by PyMongo""" -from typing import ( - TYPE_CHECKING, - Any, - Mapping, - MutableMapping, - Optional, - Sequence, - Tuple, - TypeVar, - Union, -) +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Tuple, Union + +from bson.typings import _DocumentIn, _DocumentOut, _DocumentType, _DocumentTypeArg if TYPE_CHECKING: - from bson.raw_bson import RawBSONDocument from pymongo.collation import Collation # Common Shared Types. _Address = Tuple[str, Optional[int]] _CollationIn = Union[Mapping[str, Any], "Collation"] -_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentOut = _DocumentIn -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) def strip_optional(elem): @@ -44,3 +32,15 @@ def strip_optional(elem): while inside a list comprehension.""" assert elem is not None return elem + + +__all__ = [ + "_DocumentIn", + "_DocumentOut", + "_DocumentType", + "_DocumentTypeArg", + "_Address", + "_CollationIn", + "_Pipeline", + "strip_optional", +] diff --git a/test/test_database.py b/test/test_database.py index 53af4912e4..b6be380aab 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -435,7 +435,7 @@ def test_id_ordering(self): db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) ) cursor = db.test.find() for x in cursor: @@ -469,7 +469,7 @@ def test_deref_kwargs(self): db.test.insert_one({"_id": 4, "foo": "bar"}) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) ) self.assertEqual( SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) diff --git a/test/test_mypy.py b/test/test_typing.py similarity index 98% rename from test/test_mypy.py rename to test/test_typing.py index 3b29bbf20e..8fc0f5a23e 100644 --- a/test/test_mypy.py +++ b/test/test_typing.py @@ -422,7 +422,8 @@ def test_typeddict_not_required_document_type(self) -> None: assert out is not None # This should fail because the output is a Movie. assert out["foo"] # type:ignore[typeddict-item] - assert out["_id"] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore @only_type_check def test_typeddict_empty_document_type(self) -> None: @@ -442,7 +443,8 @@ def test_typeddict_find_notrequired(self): coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) out = coll.find_one({}) assert out is not None - assert out["_id"] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore @only_type_check def test_raw_bson_document_type(self) -> None: diff --git a/test/test_typing_strict.py b/test/test_typing_strict.py new file mode 100644 index 0000000000..55cb1454bc --- /dev/null +++ b/test/test_typing_strict.py @@ -0,0 +1,38 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test typings in strict mode.""" +import unittest +from typing import TYPE_CHECKING, Any, Dict + +import pymongo +from pymongo.collection import Collection +from pymongo.database import Database + + +def test_generic_arguments() -> None: + """Ensure known usages of generic arguments pass strict typing""" + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + mongo_client: pymongo.MongoClient[Dict[str, Any]] = pymongo.MongoClient() + mongo_client.drop_database("foo") + mongo_client.get_default_database() + db = mongo_client.get_database("test_db") + db = Database(mongo_client, "test_db") + db.with_options() + db.validate_collection("py_test") + col = db.get_collection("py_test") + col.insert_one({"abc": 123}) + col = Collection(db, "py_test") + col.with_options() From 0b843b76f6a426b1a6840872cd2991d1aaa3f0e9 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 25 Jan 2023 14:09:15 -0800 Subject: [PATCH 0841/2111] BUMP 4.4.0b0 (#1144) --- doc/changelog.rst | 6 ++++++ doc/installation.rst | 2 +- pymongo/_version.py | 2 +- setup.py | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 6913f09fc3..6a6e6fef2d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,12 @@ Changes in Version 4.4 - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. +- **BETA** Added support for range queries on client side field level encrypted collections. +- pymongocrypt 1.5.0 or later is now required for client side field level + encryption support. +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. +- Improved support for type-checking with MyPy "strict" mode (`--strict`). +- Added support for Python 3.11. Issues Resolved ............... diff --git a/doc/installation.rst b/doc/installation.rst index 4810353f98..c4cbc78d93 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -197,4 +197,4 @@ but can be found on the `GitHub tags page `_. They can be installed by passing the full URL for the tag to pip:: - $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0rc0.tar.gz + $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/4.4.0b0.tar.gz diff --git a/pymongo/_version.py b/pymongo/_version.py index 78c325a23c..71a59a0dee 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, "b0") def get_version_string() -> str: diff --git a/setup.py b/setup.py index 6d1a711708..4fa51fa314 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,7 @@ def build_extension(self, ext): aws_reqs = ["pymongo-auth-aws<2.0.0"] extras_require = { - "encryption": ["pymongocrypt>=1.3.0,<2.0.0"] + aws_reqs, + "encryption": ["pymongocrypt>=1.5.0,<2.0.0"] + aws_reqs, "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 4af7a076186a93f87a542051d19684ded0d00fe8 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 25 Jan 2023 14:13:17 -0800 Subject: [PATCH 0842/2111] BUMP 4.5.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 71a59a0dee..db32b1ddb2 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, "b0") +version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev0") def get_version_string() -> str: From e353d5791b020a40a875cbf401a788c6699eb044 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 25 Jan 2023 15:29:54 -0800 Subject: [PATCH 0843/2111] BUMP 4.5.0.dev1 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index db32b1ddb2..514dd7c366 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev0") +version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev1") def get_version_string() -> str: From 06dd53666909e3a0cbbe58bc81a64bda33cd16b4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 27 Jan 2023 12:32:18 -0800 Subject: [PATCH 0844/2111] BUMP 4.4.0.dev1 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 514dd7c366..a5885d8cc5 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev1") +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev1") def get_version_string() -> str: From b3099c62de61205b87d9578c5d2ed1bba9451eb8 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 30 Jan 2023 12:13:30 -0800 Subject: [PATCH 0845/2111] PYTHON-3558 Missing docs for JSONOptions (#1143) --- bson/codec_options.py | 2 +- bson/json_util.py | 112 +++++++++++++++++++++--------------------- 2 files changed, 58 insertions(+), 56 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index 6f4fdaac8d..c09de8a931 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -330,7 +330,7 @@ def __init__(self, *args, **kwargs): retrieved from the server will be modified in the client application and stored back to the server. """ - return super().__init__() + super().__init__() def __new__( cls: Type["CodecOptions"], diff --git a/bson/json_util.py b/bson/json_util.py index 517adff4e0..0c2ca58283 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -205,66 +205,68 @@ class JSONMode: class JSONOptions(CodecOptions): - """Encapsulates JSON options for :func:`dumps` and :func:`loads`. - - :Parameters: - - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects - are encoded to MongoDB Extended JSON's *Strict mode* type - `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they - will be encoded as an `int`. Defaults to ``False``. - - `datetime_representation`: The representation to use when encoding - instances of :class:`datetime.datetime`. Defaults to - :const:`~DatetimeRepresentation.LEGACY`. - - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to - MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it - will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. - - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to - Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. - - `document_class`: BSON documents returned by :func:`loads` will be - decoded to an instance of this class. Must be a subclass of - :class:`collections.MutableMapping`. Defaults to :class:`dict`. - - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` - to use when encoding and decoding instances of :class:`uuid.UUID`. - Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type - `Date` will be decoded to timezone aware instances of - :class:`datetime.datetime`. Otherwise they will be naive. Defaults - to ``False``. - - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the - timezone from which :class:`~datetime.datetime` objects should be - decoded. Defaults to :const:`~bson.tz_util.utc`. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. See - :ref:`handling-out-of-range-datetimes` for details. - - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` - - .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. - - .. versionchanged:: 4.0 - The default for `json_mode` was changed from :const:`JSONMode.LEGACY` - to :const:`JSONMode.RELAXED`. - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionchanged:: 3.5 - Accepts the optional parameter `json_mode`. - - .. versionchanged:: 4.0 - Changed default value of `tz_aware` to False. - """ - json_mode: int strict_number_long: bool datetime_representation: int strict_uuid: bool + def __init__(self, *args, **kwargs): + """Encapsulates JSON options for :func:`dumps` and :func:`loads`. + + :Parameters: + - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects + are encoded to MongoDB Extended JSON's *Strict mode* type + `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they + will be encoded as an `int`. Defaults to ``False``. + - `datetime_representation`: The representation to use when encoding + instances of :class:`datetime.datetime`. Defaults to + :const:`~DatetimeRepresentation.LEGACY`. + - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to + MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it + will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. + - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to + Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. + - `document_class`: BSON documents returned by :func:`loads` will be + decoded to an instance of this class. Must be a subclass of + :class:`collections.MutableMapping`. Defaults to :class:`dict`. + - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` + to use when encoding and decoding instances of :class:`uuid.UUID`. + Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type + `Date` will be decoded to timezone aware instances of + :class:`datetime.datetime`. Otherwise they will be naive. Defaults + to ``False``. + - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the + timezone from which :class:`~datetime.datetime` objects should be + decoded. Defaults to :const:`~bson.tz_util.utc`. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. + - `args`: arguments to :class:`~bson.codec_options.CodecOptions` + - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` + + .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. + + .. versionchanged:: 4.0 + The default for `json_mode` was changed from :const:`JSONMode.LEGACY` + to :const:`JSONMode.RELAXED`. + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionchanged:: 3.5 + Accepts the optional parameter `json_mode`. + + .. versionchanged:: 4.0 + Changed default value of `tz_aware` to False. + """ + super().__init__() + def __new__( cls: Type["JSONOptions"], strict_number_long: Optional[bool] = None, From b492263826123a78513d94eec186e184eb97f421 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 31 Jan 2023 14:58:37 -0800 Subject: [PATCH 0846/2111] PYTHON-3357 Automatically create Queryable Encryption keys (#1145) --- pymongo/database.py | 56 ++++++----- pymongo/encryption.py | 106 +++++++++++++++++++- test/test_encryption.py | 214 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 348 insertions(+), 28 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 86754b2c05..b3c6c60851 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,6 +13,7 @@ # limitations under the License. """Database level operations.""" +from copy import deepcopy from typing import ( TYPE_CHECKING, Any, @@ -292,6 +293,28 @@ def get_collection( read_concern, ) + def _get_encrypted_fields(self, kwargs, coll_name, ask_db): + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return deepcopy(encrypted_fields) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ) + if ask_db and self.client.options.auto_encryption_opts: + options = self[coll_name].options() + if options.get("encryptedFields"): + return deepcopy(options["encryptedFields"]) + return None + @_csot.apply def create_collection( self, @@ -419,19 +442,10 @@ def create_collection( .. _create collection command: https://mongodb.com/docs/manual/reference/command/create """ - encrypted_fields = kwargs.get("encryptedFields") - if ( - not encrypted_fields - and self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - ): - encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( - "%s.%s" % (self.name, name) - ) - kwargs["encryptedFields"] = encrypted_fields - + encrypted_fields = self._get_encrypted_fields(kwargs, name, False) if encrypted_fields: common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields clustered_index = kwargs.get("clusteredIndex") if clustered_index: @@ -1038,21 +1052,11 @@ def drop_collection( if not isinstance(name, str): raise TypeError("name_or_collection must be an instance of str") - full_name = "%s.%s" % (self.name, name) - if ( - not encrypted_fields - and self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - ): - encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( - full_name - ) - if not encrypted_fields and self.client.options.auto_encryption_opts: - colls = list( - self.list_collections(filter={"name": name}, session=session, comment=comment) - ) - if colls and colls[0]["options"].get("encryptedFields"): - encrypted_fields = colls[0]["options"]["encryptedFields"] + encrypted_fields = self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) if encrypted_fields: common.validate_is_mapping("encrypted_fields", encrypted_fields) self._drop_helper( diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 8b51863f96..0e281f7b37 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -18,7 +18,8 @@ import enum import socket import weakref -from typing import Any, Generic, Mapping, Optional, Sequence +from copy import deepcopy +from typing import Any, Generic, Mapping, Optional, Sequence, Tuple try: from pymongocrypt.auto_encrypter import AutoEncrypter @@ -39,8 +40,10 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from bson.son import SON from pymongo import _csot +from pymongo.collection import Collection from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon +from pymongo.database import Database from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, @@ -552,6 +555,107 @@ def __init__( # Use the same key vault collection as the callback. self._key_vault_coll = self._io_callbacks.key_vault_coll + def create_encrypted_collection( + self, + database: Database, + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + **kwargs: Any, + ) -> Tuple[Collection[_DocumentType], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :Parameters: + - `name`: the name of the collection to create + - `encrypted_fields` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + The "keyId" may be set to ``None`` to auto-generate the data keys. + - `kms_provider` (optional): the KMS provider to be used + - `master_key` (optional): Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + - `key_alt_names` (optional): An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. + - `key_material` (optional): Sets the custom key material to be used + by the data key for encryption and decryption. + - `**kwargs` (optional): additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ) + except EncryptionError as exc: + raise EncryptionError( + Exception( + "Error occurred while creating data key for field %s with encryptedFields=%s" + % (field["path"], encrypted_fields) + ) + ) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptionError( + Exception( + f"Error: {str(exc)} occurred while creating collection with encryptedFields={str(encrypted_fields)}" + ) + ) from exc + def create_data_key( self, kms_provider: str, diff --git a/test/test_encryption.py b/test/test_encryption.py index fc6d62c727..0df875d956 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -65,7 +65,7 @@ from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON -from pymongo import encryption +from pymongo import ReadPreference, encryption from pymongo.cursor import CursorType from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts @@ -2687,5 +2687,217 @@ def test_int(self): self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1), int) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(6, 0, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + self.addCleanup(self.client_encryption.close) + + def test_01_simple_create(self): + coll, _ = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + def test_03_invalid_keyid(self): + with self.assertRaisesRegex( + EncryptionError, + "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + def test_04_insert_encrypted(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + coll.insert_one({"ssn": encrypted_value}) + + def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + def test_options_forward(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + def test_mixed_null_keyids(self): + key = self.client_encryption.create_data_key(kms_provider="local") + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + def test_create_datakey_fails(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the error message includes the previous keys in the error message even when generating keys fails. + with self.assertRaisesRegex( + EncryptionError, + f"data key for field ssn with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary.*keyId.*None", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + # Because this is the second one to use the altName "1", it will fail when creating the data_key. + {"path": "ssn", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + key_alt_names=["1"], + ) + + def test_create_failure(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the error message includes the previous keys in the error message even when it is the creation + # of the collection that fails. + with self.assertRaisesRegex( + EncryptionError, + f"while creating collection with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + + def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + self.db.create_collection("testing1") + with self.assertRaisesRegex( + EncryptionError, + "while creating collection with encryptedFields=.*keyId.*Binary", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaisesRegex( + EncryptionError, + "while creating collection with encryptedFields=.*keyId.*Binary", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + + if __name__ == "__main__": unittest.main() From 540562a60630a57d3eb0c06358b19d3882a5de18 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 31 Jan 2023 15:22:28 -0800 Subject: [PATCH 0847/2111] PYTHON-3577 Fix test_aggregate_out on 4.0 replica set (#1146) --- test/test_load_balancer.py | 2 ++ test/test_read_concern.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 378ae33e03..728b4e567f 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -122,6 +122,8 @@ def test_session_gc(self): session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server. + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 3a1c8f3a54..2230f2bef2 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context -from test.utils import OvertCommandListener, rs_or_single_client, single_client +from test.utils import OvertCommandListener, rs_or_single_client from bson.son import SON from pymongo.errors import OperationFailure @@ -35,7 +35,7 @@ class TestReadConcern(IntegrationTest): def setUpClass(cls): super(TestReadConcern, cls).setUpClass() cls.listener = OvertCommandListener() - cls.client = single_client(event_listeners=[cls.listener]) + cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test client_context.client.pymongo_test.create_collection("coll") From 79ccf4e2874c7ed73fb17fa880806bc75dc1b8de Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 3 Feb 2023 21:10:30 -0800 Subject: [PATCH 0848/2111] PYTHON-3589 createEncryptedCollection should not accept keyAltNames (#1147) --- pymongo/encryption.py | 13 +++---------- test/test_encryption.py | 7 ++----- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 0e281f7b37..cf76cbe146 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -562,12 +562,13 @@ def create_encrypted_collection( encrypted_fields: Mapping[str, Any], kms_provider: Optional[str] = None, master_key: Optional[Mapping[str, Any]] = None, - key_alt_names: Optional[Sequence[str]] = None, - key_material: Optional[bytes] = None, **kwargs: Any, ) -> Tuple[Collection[_DocumentType], Mapping[str, Any]]: """Create a collection with encryptedFields. + .. note:: Support for Queryable Encryption is in beta. + Backwards-breaking changes may be made before the final release. + .. warning:: This function does not update the encryptedFieldsMap in the client's AutoEncryptionOpts, thus the user must create a new client after calling this function with @@ -607,12 +608,6 @@ def create_encrypted_collection( - `master_key` (optional): Identifies a KMS-specific key used to encrypt the new data key. If the kmsProvider is "local" the `master_key` is not applicable and may be omitted. - - `key_alt_names` (optional): An optional list of string alternate - names used to reference a key. If a key is created with alternate - names, then encryption may refer to the key by the unique alternate - name instead of by ``key_id``. - - `key_material` (optional): Sets the custom key material to be used - by the data key for encryption and decryption. - `**kwargs` (optional): additional keyword arguments are the same as "create_collection". All optional `create collection command`_ parameters should be passed @@ -632,8 +627,6 @@ def create_encrypted_collection( encrypted_fields["fields"][i]["keyId"] = self.create_data_key( kms_provider=kms_provider, # type:ignore[arg-type] master_key=master_key, - key_alt_names=key_alt_names, - key_material=key_material, ) except EncryptionError as exc: raise EncryptionError( diff --git a/test/test_encryption.py b/test/test_encryption.py index 0df875d956..eb9bf8e984 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2826,7 +2826,7 @@ def test_create_datakey_fails(self): # Make sure the error message includes the previous keys in the error message even when generating keys fails. with self.assertRaisesRegex( EncryptionError, - f"data key for field ssn with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary.*keyId.*None", + f"data key for field dob with encryptedFields=.*{re.escape(repr(key))}.*keyId.*None", ): self.client_encryption.create_encrypted_collection( database=self.db, @@ -2835,12 +2835,9 @@ def test_create_datakey_fails(self): "fields": [ {"path": "address", "bsonType": "string", "keyId": key}, {"path": "dob", "bsonType": "string", "keyId": None}, - # Because this is the second one to use the altName "1", it will fail when creating the data_key. - {"path": "ssn", "bsonType": "string", "keyId": None}, ] }, - kms_provider="local", - key_alt_names=["1"], + kms_provider="does not exist", ) def test_create_failure(self): From dcbba962dd480f67a54eaefdb7018ae164cd46da Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Feb 2023 07:35:19 -0800 Subject: [PATCH 0849/2111] PYTHON-3596 Guarantee a document update in retryable writes tests (#1149) --- test/test_retryable_writes.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index a22c776534..1e978f21be 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -128,23 +128,23 @@ def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), - (coll.bulk_write, [[ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[ReplaceOne({}, {}), ReplaceOne({}, {})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), ( coll.bulk_write, - [[UpdateOne({}, {"$set": {"a": 1}}), UpdateOne({}, {"$set": {"a": 1}})]], + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], {}, ), (coll.bulk_write, [[DeleteOne({})]], {}), (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), - (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), (coll.delete_one, [{}], {}), - (coll.find_one_and_replace, [{}, {"a": 3}], {}), - (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), - (coll.find_one_and_delete, [{}, {}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), ] @@ -490,6 +490,7 @@ def setUpClass(cls): } @client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) From 2e6e9a85070a766c2eb53351dabdc85956367f1f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 7 Feb 2023 10:23:59 -0800 Subject: [PATCH 0850/2111] PYTHON-3592 createEncryptedCollection should raise a specialized exception to report the intermediate encryptedFields (#1148) --- pymongo/encryption.py | 17 +++++-------- pymongo/errors.py | 25 ++++++++++++++++++ test/test_encryption.py | 56 ++++++++++++++++++++++------------------- 3 files changed, 61 insertions(+), 37 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index cf76cbe146..6a6150d0c0 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -47,6 +47,7 @@ from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, + EncryptedCollectionError, EncryptionError, InvalidOperation, ServerSelectionTimeoutError, @@ -614,6 +615,9 @@ def create_encrypted_collection( as keyword arguments to this method. See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + :Raises: + - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + .. versionadded:: 4.4 .. _create collection command: @@ -629,12 +633,7 @@ def create_encrypted_collection( master_key=master_key, ) except EncryptionError as exc: - raise EncryptionError( - Exception( - "Error occurred while creating data key for field %s with encryptedFields=%s" - % (field["path"], encrypted_fields) - ) - ) from exc + raise EncryptedCollectionError(exc, encrypted_fields) from exc kwargs["encryptedFields"] = encrypted_fields kwargs["check_exists"] = False try: @@ -643,11 +642,7 @@ def create_encrypted_collection( encrypted_fields, ) except Exception as exc: - raise EncryptionError( - Exception( - f"Error: {str(exc)} occurred while creating collection with encryptedFields={str(encrypted_fields)}" - ) - ) from exc + raise EncryptedCollectionError(exc, encrypted_fields) from exc def create_data_key( self, diff --git a/pymongo/errors.py b/pymongo/errors.py index efc7e2eca0..192eec99d9 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -359,6 +359,31 @@ def timeout(self) -> bool: return False +class EncryptedCollectionError(EncryptionError): + """Raised when creating a collection with encrypted_fields fails. + + .. note:: EncryptedCollectionError and `create_encrypted_collection` are both part of the + Queryable Encryption beta. Backwards-breaking changes may be made before the final release. + + .. versionadded:: 4.4 + """ + + def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: + super(EncryptedCollectionError, self).__init__(cause) + self.__encrypted_fields = encrypted_fields + + @property + def encrypted_fields(self) -> Mapping[str, Any]: + """The encrypted_fields document that allows inferring which data keys are *known* to be created. + + Note that the returned document is not guaranteed to contain information about *all* of the data keys that + were created, for example in the case of an indefinite error like a timeout. Use the `cause` property to + determine whether a definite or indefinite error caused this error, and only rely on the accuracy of the + encrypted_fields if the error is definite. + """ + return self.__encrypted_fields + + class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" diff --git a/test/test_encryption.py b/test/test_encryption.py index eb9bf8e984..dcfb639160 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -74,6 +74,7 @@ BulkWriteError, ConfigurationError, DuplicateKeyError, + EncryptedCollectionError, EncryptionError, InvalidOperation, OperationFailure, @@ -2729,7 +2730,7 @@ def test_02_no_fields(self): def test_03_invalid_keyid(self): with self.assertRaisesRegex( - EncryptionError, + EncryptedCollectionError, "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", ): self.client_encryption.create_encrypted_collection( @@ -2823,31 +2824,32 @@ def test_mixed_null_keyids(self): def test_create_datakey_fails(self): key = self.client_encryption.create_data_key(kms_provider="local") - # Make sure the error message includes the previous keys in the error message even when generating keys fails. - with self.assertRaisesRegex( - EncryptionError, - f"data key for field dob with encryptedFields=.*{re.escape(repr(key))}.*keyId.*None", - ): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name="testing1", - encrypted_fields={ - "fields": [ - {"path": "address", "bsonType": "string", "keyId": key}, - {"path": "dob", "bsonType": "string", "keyId": None}, - ] - }, + encrypted_fields=encrypted_fields, kms_provider="does not exist", ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) def test_create_failure(self): key = self.client_encryption.create_data_key(kms_provider="local") - # Make sure the error message includes the previous keys in the error message even when it is the creation - # of the collection that fails. - with self.assertRaisesRegex( - EncryptionError, - f"while creating collection with encryptedFields=.*{re.escape(repr(key))}.*keyId.*Binary", - ): + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name=1, # type:ignore[arg-type] @@ -2859,6 +2861,8 @@ def test_create_failure(self): }, kms_provider="local", ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) def test_collection_name_collision(self): encrypted_fields = { @@ -2867,16 +2871,16 @@ def test_collection_name_collision(self): ] } self.db.create_collection("testing1") - with self.assertRaisesRegex( - EncryptionError, - "while creating collection with encryptedFields=.*keyId.*Binary", - ): + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name="testing1", encrypted_fields=encrypted_fields, kms_provider="local", ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) self.client_encryption.create_encrypted_collection( database=self.db, @@ -2884,16 +2888,16 @@ def test_collection_name_collision(self): encrypted_fields=encrypted_fields, kms_provider="local", ) - with self.assertRaisesRegex( - EncryptionError, - "while creating collection with encryptedFields=.*keyId.*Binary", - ): + with self.assertRaises( + EncryptedCollectionError, + ) as exc: self.client_encryption.create_encrypted_collection( database=self.db, name="testing1", encrypted_fields=encrypted_fields, kms_provider="local", ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) if __name__ == "__main__": From 5635ef9ff02867ba0bb96b85f9c28ce6c69a76bc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Feb 2023 11:39:04 -0800 Subject: [PATCH 0851/2111] PYTHON-3599 Fix create_data_key/key_alt_names docs example (#1151) --- pymongo/encryption.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 6a6150d0c0..2bd6880065 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -706,9 +706,9 @@ def create_data_key( name instead of by ``key_id``. The following example shows creating and referring to a data key by alternate name:: - client_encryption.create_data_key("local", keyAltNames=["name1"]) + client_encryption.create_data_key("local", key_alt_names=["name1"]) # reference the key with the alternate name - client_encryption.encrypt("457-55-5462", keyAltName="name1", + client_encryption.encrypt("457-55-5462", key_alt_name="name1", algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) - `key_material` (optional): Sets the custom key material to be used by the data key for encryption and decryption. From 05845b803866559957d4880916e1c88604ab6c6d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Fri, 10 Feb 2023 15:28:04 -0800 Subject: [PATCH 0852/2111] PYTHON-3562 Type annotation of `bson.json_utils.loads` is incorrect (only accepts `str`) (#1152) --- bson/json_util.py | 2 +- test/test_json_util.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/bson/json_util.py b/bson/json_util.py index 0c2ca58283..ae464e4ed8 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -440,7 +440,7 @@ def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s: str, *args: Any, **kwargs: Any) -> Any: +def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. diff --git a/test/test_json_util.py b/test/test_json_util.py index 08ee63618f..b7960a16ea 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -71,6 +71,11 @@ def round_trip(self, doc, **kwargs): def test_basic(self): self.round_trip({"hello": "world"}) + def test_loads_bytes(self): + string = b'{"hello": "world"}' + self.assertEqual(json_util.loads(bytes(string)), {"hello": "world"}) + self.assertEqual(json_util.loads(bytearray(string)), {"hello": "world"}) + def test_json_options_with_options(self): opts = JSONOptions( datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY From 6ed6c374f133540ddc5c9d25fff638398ba83649 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 13 Feb 2023 16:50:29 -0600 Subject: [PATCH 0853/2111] PYTHON-3593 Remove bulk api docs page which is blank (#1154) --- doc/api/pymongo/bulk.rst | 6 ------ doc/api/pymongo/index.rst | 1 - 2 files changed, 7 deletions(-) delete mode 100644 doc/api/pymongo/bulk.rst diff --git a/doc/api/pymongo/bulk.rst b/doc/api/pymongo/bulk.rst deleted file mode 100644 index 0d597c26df..0000000000 --- a/doc/api/pymongo/bulk.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`bulk` -- The bulk write operations interface -================================================== - -.. automodule:: pymongo.bulk - :synopsis: The bulk write operations interface. - :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index a4e15b9878..625c138170 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -29,7 +29,6 @@ Sub-modules: .. toctree:: :maxdepth: 2 - bulk change_stream client_options client_session From 1797785f993ffcc1907613e18c514eeb34ce7fff Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 14 Feb 2023 10:18:55 -0800 Subject: [PATCH 0854/2111] PYTHON-3577 Fix test_session_gc on serverless (#1153) --- test/test_load_balancer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 728b4e567f..d4de8debf5 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -122,8 +122,10 @@ def test_session_gc(self): session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) - # Cleanup the transaction left open on the server. - self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) + # Cleanup the transaction left open on the server unless we're + # testing serverless which does not support killSessions. + if not client_context.serverless: + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. From b63dfbe1e40be437aa462e1ec96fc6836f25df62 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 15 Feb 2023 11:36:42 -0600 Subject: [PATCH 0855/2111] PYTHON-3533 Permit tlsDisableOCSPEndpointCheck in KMS TLS options (#1155) --- .evergreen/run-tests.sh | 3 ++- pymongo/uri_parser.py | 1 - test/test_encryption.py | 28 ++++++++++++++++++++++------ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index d495e2671a..3a15163b63 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -151,7 +151,8 @@ fi if [ -n "$TEST_ENCRYPTION" ]; then # Need aws dependency for On-Demand KMS Credentials. - python -m pip install '.[aws]' + # Need OSCP dependency to verify OCSP TSL args. + python -m pip install '.[aws,ocsp]' # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index f59af2e74c..398dfbff00 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -605,7 +605,6 @@ def _parse_kms_tls_options(kms_tls_options): "tlsInsecure", "tlsAllowInvalidCertificates", "tlsAllowInvalidHostnames", - "tlsDisableOCSPEndpointCheck", "tlsDisableCertificateRevocationCheck", ]: if n in opts: diff --git a/test/test_encryption.py b/test/test_encryption.py index dcfb639160..1b9a0d8233 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -155,7 +155,6 @@ def test_init_kms_tls_options(self): {"kmip": {"tls": True, "tlsInsecure": True}}, {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, - {"kmip": {"tls": True, "tlsDisableOCSPEndpointCheck": True}}, ]: with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) @@ -2014,7 +2013,9 @@ def test_invalid_hostname_in_kms_certificate(self): # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encrypted.create_data_key("aws", master_key=key) @@ -2067,7 +2068,7 @@ def setUp(self): # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( "certificate required|SSL handshake failed|" - "KMS connection closed|Connection reset by peer" + "KMS connection closed|Connection reset by peer|ECONNRESET" ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) @@ -2099,7 +2100,9 @@ def test_01_aws(self): # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' key["endpoint"] = "127.0.0.1:8001" - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("aws", key) def test_02_azure(self): @@ -2114,7 +2117,9 @@ def test_02_azure(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("azure", key) def test_03_gcp(self): @@ -2129,7 +2134,9 @@ def test_03_gcp(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + ): self.client_encryption_invalid_hostname.create_data_key("gcp", key) def test_04_kmip(self): @@ -2146,6 +2153,15 @@ def test_04_kmip(self): ): self.client_encryption_invalid_hostname.create_data_key("kmip") + def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = ClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + self.assertFalse(encryption._io_callbacks.opts._kms_ssl_contexts["aws"].check_ocsp_endpoint) + encryption.close() + # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): From 5b96757b0eb14fec16214f942112a6f4293f9fbb Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 15 Feb 2023 13:03:12 -0800 Subject: [PATCH 0856/2111] PYTHON-3579 Test Failure - Amazon Linux 2018 fails downloading crypt_shared when it is not even needed (#1157) --- .evergreen/config.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ab61725a20..1bdab16bed 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -283,6 +283,10 @@ functions: fi fi + if [ -n "${skip_crypt_shared}" ]; then + export SKIP_CRYPT_SHARED=1 + fi + ${PREPARE_SHELL} MONGODB_VERSION=${VERSION} \ TOPOLOGY=${TOPOLOGY} \ @@ -2133,6 +2137,7 @@ axes: run_on: amazon1-2018-test batchtime: 10080 # 7 days variables: + skip_crypt_shared: true python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - id: archlinux-test From c0dd24e4a7b45f2985a8fca4a0f44ea82974115d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Feb 2023 15:49:00 -0800 Subject: [PATCH 0857/2111] PYTHON-3609 Stop using deprecated setDaemon api in test suite (#1158) --- test/test_gridfs.py | 4 ++-- test/test_gridfs_bucket.py | 4 ++-- test/test_threads.py | 8 ++++---- test/utils_spec_runner.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 35a574a1d9..cfa6e43e85 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -47,7 +47,7 @@ def __init__(self, fs, n): threading.Thread.__init__(self) self.fs = fs self.n = n - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -62,7 +62,7 @@ def __init__(self, fs, n, results): self.fs = fs self.n = n self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index d9bf0cf058..b6a33b4ecc 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -44,7 +44,7 @@ def __init__(self, gfs, num): threading.Thread.__init__(self) self.gfs = gfs self.num = num - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): @@ -59,7 +59,7 @@ def __init__(self, gfs, num, results): self.gfs = gfs self.num = num self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): diff --git a/test/test_threads.py b/test/test_threads.py index 2c73de52e7..899392e1a0 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -30,7 +30,7 @@ def __init__(self, collection, num): self.coll = collection self.num = num self.success = False - self.setDaemon(True) + self.daemon = True def run(self): for i in range(self.num): @@ -44,7 +44,7 @@ class SaveAndFind(threading.Thread): def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection - self.setDaemon(True) + self.daemon = True self.passed = False def run(self): @@ -62,7 +62,7 @@ def __init__(self, collection, n, expect_exception): self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -85,7 +85,7 @@ def __init__(self, collection, n, expect_exception): self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 8528ecb8c7..4252420909 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -52,7 +52,7 @@ def __init__(self, name): super(SpecRunnerThread, self).__init__() self.name = name self.exc = None - self.setDaemon(True) + self.daemon = True self.cond = threading.Condition() self.ops = [] self.stopped = False From 1f8080525146e1155fd1688a6862a02fd42f7094 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 17 Feb 2023 08:31:09 -0600 Subject: [PATCH 0858/2111] PYTHON-3607 FAIL: test_01_aws (test.test_encryption.TestKmsTLSOptions) (#1159) --- test/test_encryption.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test_encryption.py b/test/test_encryption.py index 1b9a0d8233..b7d588e747 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2068,7 +2068,7 @@ def setUp(self): # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( "certificate required|SSL handshake failed|" - "KMS connection closed|Connection reset by peer|ECONNRESET" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) From 6e2e70ab803e14bdf4b07eddbb5385d01be80cbc Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 17 Feb 2023 08:31:57 -0600 Subject: [PATCH 0859/2111] PYTHON-3381 Improve readability of sphinx docs (#1156) --- .pre-commit-config.yaml | 2 +- doc/changelog.rst | 6 ------ doc/common-issues.rst | 2 -- doc/conf.py | 18 +++++++++++------- doc/docs-requirements.txt | 3 ++- doc/faq.rst | 2 -- doc/migrate-to-pymongo4.rst | 2 -- doc/python3.rst | 2 -- 8 files changed, 14 insertions(+), 23 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cfe0db31cf..f0ee74c785 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: args: [--line-length=100] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort files: \.py$ diff --git a/doc/changelog.rst b/doc/changelog.rst index 6a6e6fef2d..2ad33e41ec 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -3519,9 +3519,3 @@ Changes in Version 0.9.7 :class:`~pymongo.collection.Collection` names - add version as :attr:`pymongo.version` - add ``--no_ext`` command line option to *setup.py* - -.. toctree:: - :hidden: - - python3 - examples/gevent diff --git a/doc/common-issues.rst b/doc/common-issues.rst index 1571b985e0..f0c9716689 100644 --- a/doc/common-issues.rst +++ b/doc/common-issues.rst @@ -3,8 +3,6 @@ Frequently Encountered Issues Also see the :ref:`TLSErrors` section. -.. contents:: - Server reports wire version X, PyMongo requires Y ------------------------------------------------- diff --git a/doc/conf.py b/doc/conf.py index f66de3868a..cbb525b419 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -32,7 +32,6 @@ except ImportError: pass - # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -108,13 +107,18 @@ # -- Options for HTML output --------------------------------------------------- -# Theme gratefully vendored from CPython source. -html_theme = "pydoctheme" -html_theme_path = ["."] -html_theme_options = {"collapsiblesidebar": True, "googletag": False} +try: + import furo # noqa + + html_theme = "furo" +except ImportError: + # Theme gratefully vendored from CPython source. + html_theme = "pydoctheme" + html_theme_path = ["."] + html_theme_options = {"collapsiblesidebar": True, "googletag": False} -# Additional static files. -html_static_path = ["static"] + # Additional static files. + html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt index 455a47d217..3c66962855 100644 --- a/doc/docs-requirements.txt +++ b/doc/docs-requirements.txt @@ -1,4 +1,5 @@ -Sphinx~=4.2 +Sphinx~=6.1 sphinx_rtd_theme~=0.5 readthedocs-sphinx-search~=0.1 sphinxcontrib-shellcheck~=1.1 +furo==2022.12.7 diff --git a/doc/faq.rst b/doc/faq.rst index acf557a81b..876dc68ed8 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -1,8 +1,6 @@ Frequently Asked Questions ========================== -.. contents:: - Is PyMongo thread-safe? ----------------------- diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5843a2261b..561261c7ad 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -3,8 +3,6 @@ PyMongo 4 Migration Guide ========================= -.. contents:: - .. testsetup:: from pymongo import MongoClient, ReadPreference diff --git a/doc/python3.rst b/doc/python3.rst index 812bc33b35..40d5fec661 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -1,8 +1,6 @@ Python 3 FAQ ============ -.. contents:: - What Python 3 versions are supported? ------------------------------------- From 715dd348102a8a5f81f620a3b25c7dedebeece8f Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 23 Feb 2023 10:20:17 -0800 Subject: [PATCH 0860/2111] PYTHON-2754 Add Spec Tests For DB Names With Commas (#1162) --- .../dbname-with-commas-escaped.json | 19 +++++++++++++++++++ .../replica-set/dbname-with-commas.json | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 test/srv_seedlist/replica-set/dbname-with-commas-escaped.json create mode 100644 test/srv_seedlist/replica-set/dbname-with-commas.json diff --git a/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json new file mode 100644 index 0000000000..b5fcfd2c07 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some%2Cdb?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas.json b/test/srv_seedlist/replica-set/dbname-with-commas.json new file mode 100644 index 0000000000..c1e85f4b99 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some,db?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} From 32faa261b68a2fd33c16b1ab88f97bb73b58e85d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 23 Feb 2023 11:09:11 -0800 Subject: [PATCH 0861/2111] PYTHON-3616 Use minimum RTT for CSOT maxTimeMS calculation (#1163) Require at least 2 RTT samples, otherwise use 0 as RTT. Only keep last 10 samples. --- pymongo/_csot.py | 33 +++++- pymongo/monitor.py | 19 ++-- pymongo/server_description.py | 9 ++ pymongo/topology.py | 2 +- test/csot/command-execution.json | 183 +++++++++++++++++++++++++++++-- 5 files changed, 225 insertions(+), 21 deletions(-) diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 5170c0d8ca..8a4617ecaf 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -16,8 +16,9 @@ import functools import time +from collections import deque from contextvars import ContextVar, Token -from typing import Any, Callable, MutableMapping, Optional, Tuple, TypeVar, cast +from typing import Any, Callable, Deque, MutableMapping, Optional, Tuple, TypeVar, cast from pymongo.write_concern import WriteConcern @@ -116,3 +117,33 @@ def apply_write_concern(cmd: MutableMapping, write_concern: Optional[WriteConcer wc.pop("wtimeout", None) if wc: cmd["writeConcern"] = wc + + +_MAX_RTT_SAMPLES: int = 10 +_MIN_RTT_SAMPLES: int = 2 + + +class MovingMinimum: + """Tracks a minimum RTT within the last 10 RTT samples.""" + + samples: Deque[float] + + def __init__(self) -> None: + self.samples = deque(maxlen=_MAX_RTT_SAMPLES) + + def add_sample(self, sample: float) -> None: + if sample < 0: + # Likely system time change while waiting for hello response + # and not using time.monotonic. Ignore it, the next one will + # probably be valid. + return + self.samples.append(sample) + + def get(self) -> float: + """Get the min, or 0.0 if there aren't enough samples yet.""" + if len(self.samples) >= _MIN_RTT_SAMPLES: + return min(self.samples) + return 0.0 + + def reset(self) -> None: + self.samples.clear() diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 44390e9180..9031d4b785 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -20,6 +20,7 @@ from typing import Any, Mapping, cast from pymongo import common, periodic_executor +from pymongo._csot import MovingMinimum from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock @@ -40,7 +41,7 @@ class MonitorBase(object): def __init__(self, topology, name, interval, min_interval): """Base class to do periodic work on a background thread. - The the background thread is signaled to stop when the Topology or + The background thread is signaled to stop when the Topology or this instance is freed. """ # We strongly reference the executor and it weakly references us via @@ -250,7 +251,8 @@ def _check_once(self): if not response.awaitable: self._rtt_monitor.add_sample(round_trip_time) - sd = ServerDescription(address, response, self._rtt_monitor.average()) + avg_rtt, min_rtt = self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) if self._publish: self._listeners.publish_server_heartbeat_succeeded( address, round_trip_time, response, response.awaitable @@ -350,6 +352,7 @@ def __init__(self, topology, topology_settings, pool): self._pool = pool self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() self._lock = _create_lock() def close(self): @@ -362,20 +365,22 @@ def add_sample(self, sample): """Add a RTT sample.""" with self._lock: self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) - def average(self): - """Get the calculated average, or None if no samples yet.""" + def get(self): + """Get the calculated average, or None if no samples yet and the min.""" with self._lock: - return self._moving_average.get() + return self._moving_average.get(), self._moving_min.get() def reset(self): """Reset the average RTT.""" with self._lock: - return self._moving_average.reset() + self._moving_average.reset() + self._moving_min.reset() def _run(self): try: - # NOTE: This thread is only run when when using the streaming + # NOTE: This thread is only run when using the streaming # heartbeat protocol (MongoDB 4.4+). # XXX: Skip check if the server is unknown? rtt = self._ping() diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 47e27c531b..53f90cea25 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -32,6 +32,7 @@ class ServerDescription(object): - `hello`: Optional Hello instance - `round_trip_time`: Optional float - `error`: Optional, the last error attempting to connect to the server + - `round_trip_time`: Optional float, the min latency from the most recent samples """ __slots__ = ( @@ -47,6 +48,7 @@ class ServerDescription(object): "_min_wire_version", "_max_wire_version", "_round_trip_time", + "_min_round_trip_time", "_me", "_is_writable", "_is_readable", @@ -66,6 +68,7 @@ def __init__( hello: Optional[Hello] = None, round_trip_time: Optional[float] = None, error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, ) -> None: self._address = address if not hello: @@ -88,6 +91,7 @@ def __init__( self._is_readable = hello.is_readable self._ls_timeout_minutes = hello.logical_session_timeout_minutes self._round_trip_time = round_trip_time + self._min_round_trip_time = min_round_trip_time self._me = hello.me self._last_update_time = time.monotonic() self._error = error @@ -203,6 +207,11 @@ def round_trip_time(self) -> Optional[float]: return self._round_trip_time + @property + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + @property def error(self) -> Optional[Exception]: """The last error attempting to connect to the server, or None.""" diff --git a/pymongo/topology.py b/pymongo/topology.py index 87a566fa6e..904f6b1836 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -271,7 +271,7 @@ def select_server(self, selector, server_selection_timeout=None, address=None): """Like select_servers, but choose a random server if several match.""" server = self._select_server(selector, server_selection_timeout, address) if _csot.get_timeout(): - _csot.set_rtt(server.description.round_trip_time) + _csot.set_rtt(server.description.min_round_trip_time) return server def select_server_by_address(self, address, server_selection_timeout=None): diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index 92358f2184..10f87d43ac 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -3,7 +3,14 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9" + "minServerVersion": "4.9", + "topologies": [ + "single", + "replicaset", + "sharded-replicaset", + "sharded" + ], + "serverless": "forbid" } ], "createEntities": [ @@ -45,7 +52,7 @@ ], "appName": "reduceMaxTimeMSTest", "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 50 } } } @@ -62,7 +69,8 @@ "uriOptions": { "appName": "reduceMaxTimeMSTest", "w": 1, - "timeoutMS": 500 + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -86,6 +94,23 @@ ] } }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, { "name": "insertOne", "object": "timeoutCollection", @@ -100,6 +125,15 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, { "commandStartedEvent": { "commandName": "insert", @@ -107,7 +141,7 @@ "command": { "insert": "timeoutColl", "maxTimeMS": { - "$$lte": 500 + "$$lte": 450 } } } @@ -134,7 +168,7 @@ ], "appName": "rttTooHighTest", "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 50 } } } @@ -151,7 +185,8 @@ "uriOptions": { "appName": "rttTooHighTest", "w": 1, - "timeoutMS": 10 + "timeoutMS": 10, + "heartbeatFrequencyMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -175,6 +210,23 @@ ] } }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, { "name": "insertOne", "object": "timeoutCollection", @@ -192,7 +244,7 @@ "object": "timeoutCollection", "arguments": { "document": { - "_id": 2 + "_id": 3 } }, "expectError": { @@ -204,12 +256,100 @@ "object": "timeoutCollection", "arguments": { "document": { - "_id": 2 + "_id": 4 } }, "expectError": { "isTimeoutError": true } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } }, { "name": "insertOne", @@ -218,16 +358,35 @@ "document": { "_id": 2 } - }, - "expectError": { - "isTimeoutError": true } } ], "expectEvents": [ { "client": "client", - "events": [] + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] } ] } From 715535159968c32b558345789fd70db88ac116c4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 23 Feb 2023 15:00:57 -0600 Subject: [PATCH 0862/2111] PYTHON-3618 Perf tests are failing on the centos6-perf boxes due to mongosh download (#1164) --- .evergreen/perf.yml | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml index d975fca79f..43b21a65fb 100644 --- a/.evergreen/perf.yml +++ b/.evergreen/perf.yml @@ -199,17 +199,6 @@ post: - func: "cleanup" tasks: - - name: "perf-3.6-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - name: "perf-4.0-standalone" tags: ["perf"] commands: @@ -221,23 +210,23 @@ tasks: - func: "attach benchmark test results" - func: "send dashboard data" - - name: "perf-4.2-standalone" + - name: "perf-4.4-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.2" + VERSION: "4.4" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" - func: "send dashboard data" - - name: "perf-4.4-standalone" + - name: "perf-6.0-standalone" tags: ["perf"] commands: - func: "bootstrap mongo-orchestration" vars: - VERSION: "4.4" + VERSION: "6.0" TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" @@ -248,9 +237,8 @@ buildvariants: - name: "perf-tests" display_name: "Performance Benchmark Tests" batchtime: 10080 # 7 days - run_on: centos6-perf + run_on: ubuntu2004-large tasks: - - name: "perf-3.6-standalone" - name: "perf-4.0-standalone" - - name: "perf-4.2-standalone" - name: "perf-4.4-standalone" + - name: "perf-6.0-standalone" From 67023b3835fb9371fb2c6d7fd8980a25626d1973 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 1 Mar 2023 15:46:19 -0600 Subject: [PATCH 0863/2111] PYTHON-3622 Improve Server Log Download in Evergreen (#1166) --- .evergreen/config.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1bdab16bed..e92cf96a1e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -165,7 +165,10 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz + mkdir out_dir + find $MONGO_ORCHESTRATION_HOME -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; + tar zcvf mongodb-logs.tar.gz -C out_dir/ . + rm -rf out_dir - command: archive.targz_pack params: target: "mongo-coredumps.tgz" From c27ce70d1c7a7cb0230fc5c33fdb1bbc8a66fc5d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 1 Mar 2023 19:02:24 -0600 Subject: [PATCH 0864/2111] PYTHON-3620 Ensure unittest-xml-reporting is Installed in Evergreen Tasks (#1165) --- .evergreen/run-atlas-tests.sh | 5 ----- .evergreen/utils.sh | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index 3f8a1b45f0..2e6272040f 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -17,9 +17,4 @@ fi createvirtualenv $PYTHON_BINARY atlastest trap "deactivate; rm -rf atlastest" EXIT HUP -echo "Running tests without dnspython" -python test/atlas/test_connection.py - -python -m pip install dnspython -echo "Running tests with dnspython" MUST_TEST_SRV="1" python test/atlas/test_connection.py diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 30013ed06b..a474ce545e 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -30,7 +30,7 @@ createvirtualenv () { fi python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel + python -m pip install --upgrade setuptools wheel unittest-xml-reporting } # Usage: From 10a55001c89049afe7436e6521d8a6b6d0ef9267 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Thu, 2 Mar 2023 14:14:47 -0800 Subject: [PATCH 0865/2111] PYTHON-3626 Document srvMaxHosts in MongoClient options (#1167) --- pymongo/mongo_client.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ab0c749889..05f00b48ee 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -401,6 +401,10 @@ def __init__( "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. | **Write Concern options:** @@ -575,8 +579,8 @@ def __init__( keyword arguments. - The default for `uuidRepresentation` was changed from ``pythonLegacy`` to ``unspecified``. - - Added the ``srvServiceName`` and ``maxConnecting`` URI and - keyword argument. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. .. versionchanged:: 3.12 Added the ``server_api`` keyword argument. From 5e203bea8dd8cfd6d6dd9f238656976194e6c769 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 7 Mar 2023 10:55:20 -0800 Subject: [PATCH 0866/2111] PYTHON-3629 Actually install pymongo for Atlas connect tests (#1168) --- .evergreen/run-atlas-tests.sh | 3 ++- test/atlas/test_connection.py | 18 ------------------ 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh index 2e6272040f..4a39880d0c 100644 --- a/.evergreen/run-atlas-tests.sh +++ b/.evergreen/run-atlas-tests.sh @@ -17,4 +17,5 @@ fi createvirtualenv $PYTHON_BINARY atlastest trap "deactivate; rm -rf atlastest" EXIT HUP -MUST_TEST_SRV="1" python test/atlas/test_connection.py +python -m pip install . +python test/atlas/test_connection.py diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index a1eb97edee..39d817140e 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -24,14 +24,6 @@ import pymongo from pymongo.ssl_support import HAS_SNI -try: - import dns # noqa - - HAS_DNS = True -except ImportError: - HAS_DNS = False - - URIS = { "ATLAS_REPL": os.environ.get("ATLAS_REPL"), "ATLAS_SHRD": os.environ.get("ATLAS_SHRD"), @@ -47,10 +39,6 @@ "ATLAS_SRV_SERVERLESS": os.environ.get("ATLAS_SRV_SERVERLESS"), } -# Set this variable to true to run the SRV tests even when dnspython is not -# installed. -MUST_TEST_SRV = os.environ.get("MUST_TEST_SRV") - def connect(uri): if not uri: @@ -87,27 +75,21 @@ def connect_srv(self, uri): self.assertIn("mongodb+srv://", uri) @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_free_tier(self): self.connect_srv(URIS["ATLAS_SRV_FREE"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_replica_set(self): self.connect_srv(URIS["ATLAS_SRV_REPL"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_sharded_cluster(self): self.connect_srv(URIS["ATLAS_SRV_SHRD"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_11(self): self.connect_srv(URIS["ATLAS_SRV_TLS11"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_12(self): self.connect_srv(URIS["ATLAS_SRV_TLS12"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_serverless(self): self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) From 25ba21770c7f0dbffdbe87f6b6087dd5b521e258 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 13 Mar 2023 14:57:46 -0700 Subject: [PATCH 0867/2111] PYTHON-3624 Update fle2-* tests to match name requirements in SERVER-74069 (#1169) --- .../spec/legacy/fle2-CreateCollection.json | 252 +++++++++--------- ...EncryptedFields-vs-EncryptedFieldsMap.json | 6 +- 2 files changed, 129 insertions(+), 129 deletions(-) diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json index 9f8db41f87..7f4f38161e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json @@ -21,9 +21,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -60,7 +60,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -68,7 +68,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -76,7 +76,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -101,7 +101,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -110,7 +110,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -119,7 +119,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -137,7 +137,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -152,7 +152,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -167,7 +167,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -184,9 +184,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -745,9 +745,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -762,9 +762,9 @@ ] }, "default.encryptedCollection.esc": { - "escCollection": "encryptedCollection", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -801,7 +801,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -809,7 +809,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -817,7 +817,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -842,7 +842,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -851,7 +851,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -860,7 +860,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -878,7 +878,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -893,7 +893,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -908,7 +908,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -925,9 +925,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -974,9 +974,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1059,9 +1059,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1098,7 +1098,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1106,7 +1106,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1114,7 +1114,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1139,7 +1139,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1148,7 +1148,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1157,7 +1157,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1175,7 +1175,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -1190,7 +1190,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -1205,7 +1205,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -1222,9 +1222,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1278,9 +1278,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1302,9 +1302,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1325,7 +1325,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1333,7 +1333,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1341,7 +1341,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1366,7 +1366,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1375,7 +1375,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1384,7 +1384,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1402,7 +1402,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -1417,7 +1417,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -1432,7 +1432,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -1449,9 +1449,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1510,9 +1510,9 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1542,7 +1542,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1551,7 +1551,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1560,7 +1560,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1594,9 +1594,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1618,9 +1618,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1641,7 +1641,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1649,7 +1649,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1657,7 +1657,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1683,9 +1683,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1706,7 +1706,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1714,7 +1714,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1722,7 +1722,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -1738,7 +1738,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1747,7 +1747,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1756,7 +1756,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1774,7 +1774,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -1789,7 +1789,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -1804,7 +1804,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -1821,9 +1821,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1874,7 +1874,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -1883,7 +1883,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -1892,7 +1892,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -1926,9 +1926,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1950,9 +1950,9 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1973,7 +1973,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -1981,7 +1981,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -1989,7 +1989,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -2021,7 +2021,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.esc" + "collection": "enxcol_.encryptedCollection.esc" } }, { @@ -2029,7 +2029,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecc" + "collection": "enxcol_.encryptedCollection.ecc" } }, { @@ -2037,7 +2037,7 @@ "object": "testRunner", "arguments": { "database": "default", - "collection": "encryptedCollection.ecoc" + "collection": "enxcol_.encryptedCollection.ecoc" } }, { @@ -2053,7 +2053,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -2062,7 +2062,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -2071,7 +2071,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -2089,7 +2089,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.esc", + "create": "enxcol_.encryptedCollection.esc", "clusteredIndex": { "key": { "_id": 1 @@ -2104,7 +2104,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecc", + "create": "enxcol_.encryptedCollection.ecc", "clusteredIndex": { "key": { "_id": 1 @@ -2119,7 +2119,7 @@ { "command_started_event": { "command": { - "create": "encryptedCollection.ecoc", + "create": "enxcol_.encryptedCollection.ecoc", "clusteredIndex": { "key": { "_id": 1 @@ -2136,9 +2136,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", + "escCollection": "enxcol_.encryptedCollection.esc", + "eccCollection": "enxcol_.encryptedCollection.ecc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -2201,7 +2201,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.esc" + "drop": "enxcol_.encryptedCollection.esc" }, "command_name": "drop", "database_name": "default" @@ -2210,7 +2210,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecc" }, "command_name": "drop", "database_name": "default" @@ -2219,7 +2219,7 @@ { "command_started_event": { "command": { - "drop": "encryptedCollection.ecoc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json index 911b428633..42cd4bbc9c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -94,9 +94,9 @@ }, "encryptedFieldsMap": { "default.default": { - "escCollection": "esc", - "eccCollection": "ecc", - "ecocCollection": "ecoc", + "escCollection": "enxcol_.default.esc", + "eccCollection": "enxcol_.default.ecc", + "ecocCollection": "enxcol_.default.ecoc", "fields": [] } } From e9a6482c4d6042445a95973926be8dc9ce451e47 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 14 Mar 2023 15:37:45 -0500 Subject: [PATCH 0868/2111] PYTHON-3610 Add blacken-docs to pre-commit hook (#1170) --- .pre-commit-config.yaml | 7 ++++ README.rst | 2 +- bson/json_util.py | 44 +++++++++++++------- bson/raw_bson.py | 18 ++++----- doc/examples/aggregation.rst | 17 +++++--- doc/examples/bulk.rst | 40 ++++++++++-------- doc/examples/custom_type.rst | 67 +++++++++++++++++++------------ doc/examples/datetimes.rst | 29 ++++++------- doc/examples/geo.rst | 21 +++++----- doc/examples/gevent.rst | 2 + doc/examples/gridfs.rst | 3 +- doc/examples/server_selection.rst | 4 +- doc/examples/type_hints.rst | 40 +++++++++--------- doc/faq.rst | 4 +- doc/migrate-to-pymongo4.rst | 1 + doc/tutorial.rst | 62 +++++++++++++++------------- pymongo/client_session.py | 14 ++++--- pymongo/collection.py | 5 +-- pymongo/database.py | 5 +-- pymongo/mongo_client.py | 7 ++-- 20 files changed, 224 insertions(+), 168 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f0ee74c785..d8455981f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,6 +30,13 @@ repos: files: \.py$ args: [--profile=black] +- repo: https://github.com/adamchainz/blacken-docs + rev: "1.13.0" + hooks: + - id: blacken-docs + additional_dependencies: + - black==22.3.0 + - repo: https://github.com/PyCQA/flake8 rev: 3.9.2 hooks: diff --git a/README.rst b/README.rst index 530829f957..bb409a94ff 100644 --- a/README.rst +++ b/README.rst @@ -148,7 +148,7 @@ Examples ======== Here's a basic example (for more see the *examples* section of the docs): -.. code-block:: python +.. code-block:: pycon >>> import pymongo >>> client = pymongo.MongoClient("localhost", 27017) diff --git a/bson/json_util.py b/bson/json_util.py index ae464e4ed8..8842d5c74d 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -29,7 +29,9 @@ .. doctest:: >>> from bson.json_util import loads - >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]') + >>> loads( + ... '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]' + ... ) [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] Example usage with :const:`RELAXED_JSON_OPTIONS` (the default): @@ -38,10 +40,14 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}]) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ] + ... ) '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`CANONICAL_JSON_OPTIONS`): @@ -50,11 +56,15 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=CANONICAL_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=CANONICAL_JSON_OPTIONS, + ... ) '[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`LEGACY_JSON_OPTIONS`): @@ -63,11 +73,15 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }", {})}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=LEGACY_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }", {})}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=LEGACY_JSON_OPTIONS, + ... ) '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' Alternatively, you can manually pass the `default` to :func:`json.dumps`. diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 6a80ea70ca..2c2b3c97ca 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -25,18 +25,18 @@ >>> from pymongo import MongoClient >>> from bson.raw_bson import RawBSONDocument >>> client = MongoClient(document_class=RawBSONDocument) - >>> client.drop_database('db') - >>> client.drop_database('replica_db') + >>> client.drop_database("db") + >>> client.drop_database("replica_db") >>> db = client.db - >>> result = db.test.insert_many([{'_id': 1, 'a': 1}, - ... {'_id': 2, 'b': 1}, - ... {'_id': 3, 'c': 1}, - ... {'_id': 4, 'd': 1}]) + >>> result = db.test.insert_many( + ... [{"_id": 1, "a": 1}, {"_id": 2, "b": 1}, {"_id": 3, "c": 1}, {"_id": 4, "d": 1}] + ... ) >>> replica_db = client.replica_db >>> for doc in db.test.find(): - ... print(f"raw document: {doc.raw}") - ... print(f"decoded document: {bson.decode(doc.raw)}") - ... result = replica_db.test.insert_one(doc) + ... print(f"raw document: {doc.raw}") + ... print(f"decoded document: {bson.decode(doc.raw)}") + ... result = replica_db.test.insert_one(doc) + ... raw document: b'...' decoded document: {'_id': 1, 'a': 1} raw document: b'...' diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index cdd82ff6fb..bd20db2304 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -8,8 +8,9 @@ group method. .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('aggregation_example') + client.drop_database("aggregation_example") Setup ----- @@ -20,10 +21,14 @@ aggregations on: >>> from pymongo import MongoClient >>> db = MongoClient().aggregation_example - >>> result = db.things.insert_many([{"x": 1, "tags": ["dog", "cat"]}, - ... {"x": 2, "tags": ["cat"]}, - ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, - ... {"x": 3, "tags": []}]) + >>> result = db.things.insert_many( + ... [ + ... {"x": 1, "tags": ["dog", "cat"]}, + ... {"x": 2, "tags": ["cat"]}, + ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, + ... {"x": 3, "tags": []}, + ... ] + ... ) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] @@ -54,7 +59,7 @@ eg "$sort": >>> pipeline = [ ... {"$unwind": "$tags"}, ... {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - ... {"$sort": SON([("count", -1), ("_id", -1)])} + ... {"$sort": SON([("count", -1), ("_id", -1)])}, ... ] >>> import pprint >>> pprint.pprint(list(db.things.aggregate(pipeline))) diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index 23367dd2c5..c2c5acc687 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -4,8 +4,9 @@ Bulk Write Operations .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('bulk_example') + client.drop_database("bulk_example") This tutorial explains how to take advantage of PyMongo's bulk write operation features. Executing write operations in batches @@ -27,7 +28,7 @@ bulk insert operations. >>> import pymongo >>> db = pymongo.MongoClient().bulk_example - >>> db.test.insert_many([{'i': i} for i in range(10000)]).inserted_ids + >>> db.test.insert_many([{"i": i} for i in range(10000)]).inserted_ids [...] >>> db.test.count_documents({}) 10000 @@ -56,14 +57,17 @@ of operations performed. >>> from pprint import pprint >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne - >>> result = db.test.bulk_write([ - ... DeleteMany({}), # Remove all documents from the previous example. - ... InsertOne({'_id': 1}), - ... InsertOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... UpdateOne({'_id': 1}, {'$set': {'foo': 'bar'}}), - ... UpdateOne({'_id': 4}, {'$inc': {'j': 1}}, upsert=True), - ... ReplaceOne({'j': 1}, {'j': 2})]) + >>> result = db.test.bulk_write( + ... [ + ... DeleteMany({}), # Remove all documents from the previous example. + ... InsertOne({"_id": 1}), + ... InsertOne({"_id": 2}), + ... InsertOne({"_id": 3}), + ... UpdateOne({"_id": 1}, {"$set": {"foo": "bar"}}), + ... UpdateOne({"_id": 4}, {"$inc": {"j": 1}}, upsert=True), + ... ReplaceOne({"j": 1}, {"j": 2}), + ... ] + ... ) >>> pprint(result.bulk_api_result) {'nInserted': 3, 'nMatched': 2, @@ -87,9 +91,10 @@ the failure. >>> from pymongo import InsertOne, DeleteOne, ReplaceOne >>> from pymongo.errors import BulkWriteError >>> requests = [ - ... ReplaceOne({'j': 2}, {'i': 5}), - ... InsertOne({'_id': 4}), # Violates the unique key constraint on _id. - ... DeleteOne({'i': 5})] + ... ReplaceOne({"j": 2}, {"i": 5}), + ... InsertOne({"_id": 4}), # Violates the unique key constraint on _id. + ... DeleteOne({"i": 5}), + ... ] >>> try: ... db.test.bulk_write(requests) ... except BulkWriteError as bwe: @@ -124,10 +129,11 @@ and fourth operations succeed. :options: +NORMALIZE_WHITESPACE >>> requests = [ - ... InsertOne({'_id': 1}), - ... DeleteOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... ReplaceOne({'_id': 4}, {'i': 1})] + ... InsertOne({"_id": 1}), + ... DeleteOne({"_id": 2}), + ... InsertOne({"_id": 3}), + ... ReplaceOne({"_id": 4}, {"i": 1}), + ... ] >>> try: ... db.test.bulk_write(requests, ordered=False) ... except BulkWriteError as bwe: diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst index 404a6c8b55..cbb2f8515b 100644 --- a/doc/examples/custom_type.rst +++ b/doc/examples/custom_type.rst @@ -19,7 +19,7 @@ We'll start by getting a clean database to use for the example: >>> from pymongo import MongoClient >>> client = MongoClient() - >>> client.drop_database('custom_type_example') + >>> client.drop_database("custom_type_example") >>> db = client.custom_type_example @@ -36,7 +36,7 @@ to save an instance of ``Decimal`` with PyMongo, results in an >>> from decimal import Decimal >>> num = Decimal("45.321") - >>> db.test.insert_one({'num': num}) + >>> db.test.insert_one({"num": num}) Traceback (most recent call last): ... bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: @@ -78,8 +78,8 @@ interested in both encoding and decoding our custom type, we use the >>> from bson.decimal128 import Decimal128 >>> from bson.codec_options import TypeCodec >>> class DecimalCodec(TypeCodec): - ... python_type = Decimal # the Python type acted upon by this type codec - ... bson_type = Decimal128 # the BSON type acted upon by this type codec + ... python_type = Decimal # the Python type acted upon by this type codec + ... bson_type = Decimal128 # the BSON type acted upon by this type codec ... def transform_python(self, value): ... """Function that transforms a custom type value into a type ... that BSON can encode.""" @@ -88,6 +88,7 @@ interested in both encoding and decoding our custom type, we use the ... """Function that transforms a vanilla BSON type value into our ... custom type.""" ... return value.to_decimal() + ... >>> decimal_codec = DecimalCodec() @@ -125,7 +126,7 @@ with our ``type_registry`` and use it to get a >>> from bson.codec_options import CodecOptions >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) Now, we can seamlessly encode and decode instances of @@ -133,7 +134,7 @@ Now, we can seamlessly encode and decode instances of .. doctest:: - >>> collection.insert_one({'num': Decimal("45.321")}) + >>> collection.insert_one({"num": Decimal("45.321")}) >>> mydoc = collection.find_one() >>> import pprint @@ -147,7 +148,7 @@ MongoDB: .. doctest:: - >>> vanilla_collection = db.get_collection('test') + >>> vanilla_collection = db.get_collection("test") >>> pprint.pprint(vanilla_collection.find_one()) {'_id': ObjectId('...'), 'num': Decimal128('45.321')} @@ -170,13 +171,14 @@ an integer: ... def my_method(self): ... """Method implementing some custom logic.""" ... return int(self) + ... If we try to save an instance of this type without first registering a type codec for it, we get an error: .. doctest:: - >>> collection.insert_one({'num': DecimalInt("45.321")}) + >>> collection.insert_one({"num": DecimalInt("45.321")}) Traceback (most recent call last): ... bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: @@ -192,6 +194,7 @@ This is trivial to do since the same transformation as the one used for ... def python_type(self): ... """The Python type acted upon by this type codec.""" ... return DecimalInt + ... >>> decimalint_codec = DecimalIntCodec() @@ -211,9 +214,9 @@ object, we can seamlessly encode instances of ``DecimalInt``: >>> type_registry = TypeRegistry([decimal_codec, decimalint_codec]) >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) >>> collection.drop() - >>> collection.insert_one({'num': DecimalInt("45.321")}) + >>> collection.insert_one({"num": DecimalInt("45.321")}) >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) @@ -236,26 +239,26 @@ writing a ``TypeDecoder`` that modifies how this datatype is decoded. On Python 3.x, :class:`~bson.binary.Binary` data (``subtype = 0``) is decoded as a ``bytes`` instance: -.. code-block:: python +.. code-block:: pycon >>> # On Python 3.x. >>> from bson.binary import Binary - >>> newcoll = db.get_collection('new') - >>> newcoll.insert_one({'_id': 1, 'data': Binary(b"123", subtype=0)}) + >>> newcoll = db.get_collection("new") + >>> newcoll.insert_one({"_id": 1, "data": Binary(b"123", subtype=0)}) >>> doc = newcoll.find_one() - >>> type(doc['data']) + >>> type(doc["data"]) bytes On Python 2.7.x, the same data is decoded as a :class:`~bson.binary.Binary` instance: -.. code-block:: python +.. code-block:: pycon >>> # On Python 2.7.x - >>> newcoll = db.get_collection('new') + >>> newcoll = db.get_collection("new") >>> doc = newcoll.find_one() - >>> type(doc['data']) + >>> type(doc["data"]) bson.binary.Binary @@ -291,6 +294,7 @@ BSON-encodable value. The following fallback encoder encodes python's ... if isinstance(value, Decimal): ... return Decimal128(value) ... return value + ... After declaring the callback, we must create a type registry and codec options with this fallback encoder before it can be used for initializing a collection: @@ -299,14 +303,14 @@ with this fallback encoder before it can be used for initializing a collection: >>> type_registry = TypeRegistry(fallback_encoder=fallback_encoder) >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) + >>> collection = db.get_collection("test", codec_options=codec_options) >>> collection.drop() We can now seamlessly encode instances of :py:class:`~decimal.Decimal`: .. doctest:: - >>> collection.insert_one({'num': Decimal("45.321")}) + >>> collection.insert_one({"num": Decimal("45.321")}) >>> mydoc = collection.find_one() >>> pprint.pprint(mydoc) @@ -343,12 +347,15 @@ We start by defining some arbitrary custom types: class MyStringType(object): def __init__(self, value): self.__value = value + def __repr__(self): return "MyStringType('%s')" % (self.__value,) + class MyNumberType(object): def __init__(self, value): self.__value = value + def __repr__(self): return "MyNumberType(%s)" % (self.__value,) @@ -362,11 +369,15 @@ back into Python objects: import pickle from bson.binary import Binary, USER_DEFINED_SUBTYPE + + def fallback_pickle_encoder(value): return Binary(pickle.dumps(value), USER_DEFINED_SUBTYPE) + class PickledBinaryDecoder(TypeDecoder): bson_type = Binary + def transform_bson(self, value): if value.subtype == USER_DEFINED_SUBTYPE: return pickle.loads(value) @@ -384,19 +395,23 @@ Finally, we create a ``CodecOptions`` instance: .. code-block:: python - codec_options = CodecOptions(type_registry=TypeRegistry( - [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder)) + codec_options = CodecOptions( + type_registry=TypeRegistry( + [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder + ) + ) We can now round trip our custom objects to MongoDB: .. code-block:: python - collection = db.get_collection('test_fe', codec_options=codec_options) - collection.insert_one({'_id': 1, 'str': MyStringType("hello world"), - 'num': MyNumberType(2)}) + collection = db.get_collection("test_fe", codec_options=codec_options) + collection.insert_one( + {"_id": 1, "str": MyStringType("hello world"), "num": MyNumberType(2)} + ) mydoc = collection.find_one() - assert isinstance(mydoc['str'], MyStringType) - assert isinstance(mydoc['num'], MyNumberType) + assert isinstance(mydoc["str"], MyStringType) + assert isinstance(mydoc["num"], MyNumberType) Limitations diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 3b30000ffc..562c9480a6 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -6,8 +6,9 @@ Datetimes and Timezones import datetime from pymongo import MongoClient from bson.codec_options import CodecOptions + client = MongoClient() - client.drop_database('dt_example') + client.drop_database("dt_example") db = client.dt_example These examples show how to handle Python :class:`datetime.datetime` objects @@ -24,8 +25,7 @@ time into MongoDB: .. doctest:: - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.utcnow()}) + >>> result = db.objects.insert_one({"last_modified": datetime.datetime.utcnow()}) Always use :meth:`datetime.datetime.utcnow`, which returns the current time in UTC, instead of :meth:`datetime.datetime.now`, which returns the current local @@ -33,8 +33,7 @@ time. Avoid doing this: .. doctest:: - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now()}) + >>> result = db.objects.insert_one({"last_modified": datetime.datetime.now()}) The value for `last_modified` is very different between these two examples, even though both documents were stored at around the same local time. This will be @@ -42,7 +41,7 @@ confusing to the application that reads them: .. doctest:: - >>> [doc['last_modified'] for doc in db.objects.find()] # doctest: +SKIP + >>> [doc["last_modified"] for doc in db.objects.find()] # doctest: +SKIP [datetime.datetime(2015, 7, 8, 18, 17, 28, 324000), datetime.datetime(2015, 7, 8, 11, 17, 42, 911000)] @@ -52,12 +51,11 @@ timezone they're in. By default, PyMongo retrieves naive datetimes: .. doctest:: - >>> result = db.tzdemo.insert_one( - ... {'date': datetime.datetime(2002, 10, 27, 6, 0, 0)}) - >>> db.tzdemo.find_one()['date'] + >>> result = db.tzdemo.insert_one({"date": datetime.datetime(2002, 10, 27, 6, 0, 0)}) + >>> db.tzdemo.find_one()["date"] datetime.datetime(2002, 10, 27, 6, 0) >>> options = CodecOptions(tz_aware=True) - >>> db.get_collection('tzdemo', codec_options=options).find_one()['date'] # doctest: +SKIP + >>> db.get_collection("tzdemo", codec_options=options).find_one()["date"] # doctest: +SKIP datetime.datetime(2002, 10, 27, 6, 0, tzinfo=) @@ -71,11 +69,10 @@ those datetimes to UTC automatically: .. doctest:: >>> import pytz - >>> pacific = pytz.timezone('US/Pacific') - >>> aware_datetime = pacific.localize( - ... datetime.datetime(2002, 10, 27, 6, 0, 0)) + >>> pacific = pytz.timezone("US/Pacific") + >>> aware_datetime = pacific.localize(datetime.datetime(2002, 10, 27, 6, 0, 0)) >>> result = db.times.insert_one({"date": aware_datetime}) - >>> db.times.find_one()['date'] + >>> db.times.find_one()["date"] datetime.datetime(2002, 10, 27, 14, 0) Reading Time @@ -150,7 +147,7 @@ cannot be represented using the builtin Python :class:`~datetime.datetime`: .. doctest:: >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> y = encode({"x": DatetimeMS(-(2**62))}) >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) >>> decode(x, codec_options=codec_auto) {'x': datetime.datetime(1970, 1, 1, 0, 0)} @@ -165,7 +162,7 @@ resulting :class:`~datetime.datetime` objects to be within .. doctest:: >>> x = encode({"x": DatetimeMS(2**62)}) - >>> y = encode({"x": DatetimeMS(-2**62)}) + >>> y = encode({"x": DatetimeMS(-(2**62))}) >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP) >>> decode(x, codec_options=codec_clamp) {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst index 2234a20757..e7da156720 100644 --- a/doc/examples/geo.rst +++ b/doc/examples/geo.rst @@ -4,8 +4,9 @@ Geospatial Indexing Example .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('geo_example') + client.drop_database("geo_example") This example shows how to create and use a :data:`~pymongo.GEO2D` index in PyMongo. To create a spherical (earth-like) geospatial index use :data:`~pymongo.GEOSPHERE` instead. @@ -33,10 +34,9 @@ insert a couple of example locations: .. doctest:: - >>> result = db.places.insert_many([{"loc": [2, 5]}, - ... {"loc": [30, 5]}, - ... {"loc": [1, 2]}, - ... {"loc": [4, 4]}]) + >>> result = db.places.insert_many( + ... [{"loc": [2, 5]}, {"loc": [30, 5]}, {"loc": [1, 2]}, {"loc": [4, 4]}] + ... ) >>> result.inserted_ids [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] @@ -51,7 +51,7 @@ Using the geospatial index we can find documents near another point: >>> import pprint >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): - ... pprint.pprint(doc) + ... pprint.pprint(doc) ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [4, 4]} @@ -66,7 +66,7 @@ The $maxDistance operator requires the use of :class:`~bson.son.SON`: >>> from bson.son import SON >>> query = {"loc": SON([("$near", [3, 6]), ("$maxDistance", 100)])} >>> for doc in db.places.find(query).limit(3): - ... pprint.pprint(doc) + ... pprint.pprint(doc) ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [4, 4]} @@ -78,8 +78,9 @@ It's also possible to query for all items within a given rectangle .. doctest:: >>> query = {"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}} - >>> for doc in db.places.find(query).sort('_id'): + >>> for doc in db.places.find(query).sort("_id"): ... pprint.pprint(doc) + ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [4, 4]} @@ -88,8 +89,8 @@ Or circle (specified by center point and radius): .. doctest:: >>> query = {"loc": {"$within": {"$center": [[0, 0], 6]}}} - >>> for doc in db.places.find(query).sort('_id'): - ... pprint.pprint(doc) + >>> for doc in db.places.find(query).sort("_id"): + ... pprint.pprint(doc) ... {'_id': ObjectId('...'), 'loc': [2, 5]} {'_id': ObjectId('...'), 'loc': [1, 2]} diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst index 6eb283dca9..de31158151 100644 --- a/doc/examples/gevent.rst +++ b/doc/examples/gevent.rst @@ -38,10 +38,12 @@ handler to end background greenlets when your application receives SIGHUP: import signal + def graceful_reload(signum, traceback): """Explicitly close some global MongoClient object.""" client.close() + signal.signal(signal.SIGHUP, graceful_reload) Applications using uWSGI prior to 1.9.16 are affected by this issue, diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst index a015f6a9fd..5f40805d79 100644 --- a/doc/examples/gridfs.rst +++ b/doc/examples/gridfs.rst @@ -4,8 +4,9 @@ GridFS Example .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('gridfs_example') + client.drop_database("gridfs_example") This example shows how to use :mod:`gridfs` to store large binary objects (e.g. files) in MongoDB. diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst index be2172489e..18de677a58 100644 --- a/doc/examples/server_selection.rst +++ b/doc/examples/server_selection.rst @@ -55,12 +55,12 @@ selector function: >>> def server_selector(server_descriptions): ... servers = [ - ... server for server in server_descriptions - ... if server.address[0] == 'localhost' + ... server for server in server_descriptions if server.address[0] == "localhost" ... ] ... if not servers: ... return server_descriptions ... return servers + ... diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index e5ad3338e1..f202ab32e1 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -81,7 +81,7 @@ Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :cla >>> from pymongo import MongoClient >>> client = MongoClient(document_class=SON[str, int]) >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "y": 2 }) + >>> inserted = collection.insert_one({"x": 1, "y": 2}) >>> result = collection.find_one({"x": 1}) >>> assert result is not None >>> assert result["x"] == 1 @@ -103,8 +103,8 @@ These methods automatically add an "_id" field. >>> from pymongo import MongoClient >>> from pymongo.collection import Collection >>> class Movie(TypedDict): - ... name: str - ... year: int + ... name: str + ... year: int ... >>> client: MongoClient = MongoClient() >>> collection: Collection[Movie] = client.test.test @@ -113,7 +113,7 @@ These methods automatically add an "_id" field. >>> assert result is not None >>> assert result["year"] == 1993 >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. - >>> assert result["_id"] # type:ignore[typeddict-item] + >>> assert result["_id"] # type:ignore[typeddict-item] This same typing scheme works for all of the insert methods (:meth:`~pymongo.collection.Collection.insert_one`, :meth:`~pymongo.collection.Collection.insert_many`, and :meth:`~pymongo.collection.Collection.bulk_write`). @@ -158,18 +158,18 @@ Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` i >>> from pymongo.collection import Collection >>> from bson import ObjectId >>> class Movie(TypedDict): - ... name: str - ... year: int + ... name: str + ... year: int ... >>> class ExplicitMovie(TypedDict): - ... _id: ObjectId - ... name: str - ... year: int + ... _id: ObjectId + ... name: str + ... year: int ... >>> class NotRequiredMovie(TypedDict): - ... _id: NotRequired[ObjectId] - ... name: str - ... year: int + ... _id: NotRequired[ObjectId] + ... name: str + ... year: int ... >>> client: MongoClient = MongoClient() >>> collection: Collection[Movie] = client.test.test @@ -180,7 +180,9 @@ Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` i >>> assert result["_id"] # type:ignore[typeddict-item] >>> collection: Collection[ExplicitMovie] = client.test.test >>> # Note that the _id keyword argument must be supplied - >>> inserted = collection.insert_one(ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993)) + >>> inserted = collection.insert_one( + ... ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993) + ... ) >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> # This will not raise a type-checking error. @@ -207,13 +209,13 @@ match a well-defined schema using :py:class:`~typing.TypedDict` (Python 3.8+). >>> from pymongo import MongoClient >>> from pymongo.database import Database >>> class Movie(TypedDict): - ... name: str - ... year: int + ... name: str + ... year: int ... >>> client: MongoClient = MongoClient() >>> db: Database[Movie] = client.test >>> collection = db.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) + >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993}) >>> result = collection.find_one({"name": "Jurassic Park"}) >>> assert result is not None >>> assert result["year"] == 1993 @@ -244,11 +246,11 @@ You can specify the document type returned by :mod:`bson` decoding functions by >>> from typing import Any, Dict >>> from bson import CodecOptions, encode, decode >>> class MyDict(Dict[str, Any]): - ... def foo(self): - ... return "bar" + ... def foo(self): + ... return "bar" ... >>> options = CodecOptions(document_class=MyDict) - >>> doc = {"x": 1, "y": 2 } + >>> doc = {"x": 1, "y": 2} >>> bsonbytes = encode(doc, codec_options=options) >>> rt_document = decode(bsonbytes, codec_options=options) >>> assert rt_document.foo() == "bar" diff --git a/doc/faq.rst b/doc/faq.rst index 876dc68ed8..e64e3c79ed 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -244,8 +244,7 @@ Key order in subdocuments -- why does my query work in the shell but not PyMongo collection = MongoClient().test.collection collection.drop() - collection.insert_one({'_id': 1.0, - 'subdocument': SON([('b', 1.0), ('a', 1.0)])}) + collection.insert_one({"_id": 1.0, "subdocument": SON([("b", 1.0), ("a", 1.0)])}) The key-value pairs in a BSON document can have any order (except that ``_id`` is always first). The mongo shell preserves key order when reading and writing @@ -537,6 +536,7 @@ objects as before: >>> for x in client.db.collection.find(): ... print(x) + ... {'_id': ObjectId('...'), 'x': datetime.datetime(1970, 1, 1, 0, 0)} {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 561261c7ad..687fec11bc 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -6,6 +6,7 @@ PyMongo 4 Migration Guide .. testsetup:: from pymongo import MongoClient, ReadPreference + client = MongoClient() database = client.my_database collection = database.my_collection diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 55961241e8..d7854c885a 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -4,8 +4,9 @@ Tutorial .. testsetup:: from pymongo import MongoClient + client = MongoClient() - client.drop_database('test-database') + client.drop_database("test-database") This tutorial is intended as an introduction to working with **MongoDB** and **PyMongo**. @@ -45,13 +46,13 @@ specify the host and port explicitly, as follows: .. doctest:: - >>> client = MongoClient('localhost', 27017) + >>> client = MongoClient("localhost", 27017) Or use the MongoDB URI format: .. doctest:: - >>> client = MongoClient('mongodb://localhost:27017/') + >>> client = MongoClient("mongodb://localhost:27017/") Getting a Database ------------------ @@ -70,7 +71,7 @@ instead: .. doctest:: - >>> db = client['test-database'] + >>> db = client["test-database"] Getting a Collection -------------------- @@ -87,7 +88,7 @@ or (using dictionary style access): .. doctest:: - >>> collection = db['test-collection'] + >>> collection = db["test-collection"] An important note about collections (and databases) in MongoDB is that they are created lazily - none of the above commands have actually @@ -104,10 +105,12 @@ post: .. doctest:: >>> import datetime - >>> post = {"author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow()} + >>> post = { + ... "author": "Mike", + ... "text": "My first blog post!", + ... "tags": ["mongodb", "python", "pymongo"], + ... "date": datetime.datetime.utcnow(), + ... } Note that documents can contain native Python types (like :class:`datetime.datetime` instances) which will be automatically @@ -212,7 +215,7 @@ Note that an ObjectId is not the same as its string representation: .. doctest:: >>> post_id_as_str = str(post_id) - >>> posts.find_one({"_id": post_id_as_str}) # No result + >>> posts.find_one({"_id": post_id_as_str}) # No result >>> A common task in web applications is to get an ObjectId from the @@ -240,14 +243,20 @@ command to the server: .. doctest:: - >>> new_posts = [{"author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14)}, - ... {"author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45)}] + >>> new_posts = [ + ... { + ... "author": "Mike", + ... "text": "Another post!", + ... "tags": ["bulk", "insert"], + ... "date": datetime.datetime(2009, 11, 12, 11, 14), + ... }, + ... { + ... "author": "Eliot", + ... "title": "MongoDB is fun", + ... "text": "and pretty easy too!", + ... "date": datetime.datetime(2009, 11, 10, 10, 45), + ... }, + ... ] >>> result = posts.insert_many(new_posts) >>> result.inserted_ids [ObjectId('...'), ObjectId('...')] @@ -274,7 +283,7 @@ document in the ``posts`` collection: .. doctest:: >>> for post in posts.find(): - ... pprint.pprint(post) + ... pprint.pprint(post) ... {'_id': ObjectId('...'), 'author': 'Mike', @@ -300,7 +309,7 @@ author is "Mike": .. doctest:: >>> for post in posts.find({"author": "Mike"}): - ... pprint.pprint(post) + ... pprint.pprint(post) ... {'_id': ObjectId('...'), 'author': 'Mike', @@ -343,7 +352,7 @@ than a certain date, but also sort the results by author: >>> d = datetime.datetime(2009, 11, 12, 12) >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... pprint.pprint(post) + ... pprint.pprint(post) ... {'_id': ObjectId('...'), 'author': 'Eliot', @@ -373,8 +382,7 @@ First, we'll need to create the index: .. doctest:: - >>> result = db.profiles.create_index([('user_id', pymongo.ASCENDING)], - ... unique=True) + >>> result = db.profiles.create_index([("user_id", pymongo.ASCENDING)], unique=True) >>> sorted(list(db.profiles.index_information())) ['_id_', 'user_id_1'] @@ -386,9 +394,7 @@ Now let's set up some user profiles: .. doctest:: - >>> user_profiles = [ - ... {'user_id': 211, 'name': 'Luke'}, - ... {'user_id': 212, 'name': 'Ziltoid'}] + >>> user_profiles = [{"user_id": 211, "name": "Luke"}, {"user_id": 212, "name": "Ziltoid"}] >>> result = db.profiles.insert_many(user_profiles) The index prevents us from inserting a document whose ``user_id`` is already in @@ -397,8 +403,8 @@ the collection: .. doctest:: :options: +IGNORE_EXCEPTION_DETAIL - >>> new_profile = {'user_id': 213, 'name': 'Drew'} - >>> duplicate_profile = {'user_id': 212, 'name': 'Tommy'} + >>> new_profile = {"user_id": 213, "name": "Drew"} + >>> duplicate_profile = {"user_id": 212, "name": "Tommy"} >>> result = db.profiles.insert_one(new_profile) # This is fine. >>> result = db.profiles.insert_one(duplicate_profile) Traceback (most recent call last): diff --git a/pymongo/client_session.py b/pymongo/client_session.py index d2479942e4..d73672c5b5 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -23,12 +23,11 @@ with client.start_session(causal_consistency=True) as session: collection = client.db.collection - collection.update_one({'_id': 1}, {'$set': {'x': 10}}, session=session) - secondary_c = collection.with_options( - read_preference=ReadPreference.SECONDARY) + collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) # A secondary read waits for replication of the write. - secondary_c.find_one({'_id': 1}, session=session) + secondary_c.find_one({"_id": 1}, session=session) If `causal_consistency` is True (the default), read operations that use the session are causally after previous read and write operations. Using a @@ -57,8 +56,11 @@ with client.start_session() as session: with session.start_transaction(): orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, session=session) + inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) Upon normal completion of ``with session.start_transaction()`` block, the transaction automatically calls :meth:`ClientSession.commit_transaction`. diff --git a/pymongo/collection.py b/pymongo/collection.py index 4cb3fa79c9..7ce881613c 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2533,14 +2533,13 @@ def watch( .. code-block:: python try: - with db.collection.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. diff --git a/pymongo/database.py b/pymongo/database.py index b3c6c60851..6a73f884c5 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -591,14 +591,13 @@ def watch( .. code-block:: python try: - with db.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 05f00b48ee..ca60affdf5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -27,7 +27,7 @@ >>> c = MongoClient() >>> c.test_database Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') - >>> c['test-database'] + >>> c["test-database"] Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') """ @@ -935,14 +935,13 @@ def watch( .. code-block:: python try: - with client.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. - logging.error('...') + logging.error("...") For a precise description of the resume process see the `change streams specification`_. From 04c9f87d7027e90b04819292f450c63b75b85a22 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 20 Mar 2023 15:54:42 -0700 Subject: [PATCH 0869/2111] PYTHON-2468 Add pymongoexplain example to pymongo docs (#1172) --- doc/examples/aggregation.rst | 12 ++++++++++-- pymongo/collection.py | 5 +++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index bd20db2304..22e19e9842 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -67,8 +67,16 @@ eg "$sort": {'_id': 'dog', 'count': 2}, {'_id': 'mouse', 'count': 1}] -To run an explain plan for this aggregation use the -:meth:`~pymongo.database.Database.command` method:: +To run an explain plan for this aggregation use +`PyMongoExplain `_, +a companion library for PyMongo. It allows you to explain any CRUD operation +by providing a few convenience classes:: + + >>> from pymongoexplain import ExplainableCollection + >>> ExplainableCollection(collection).aggregate(pipeline) + {'ok': 1.0, 'queryPlanner': [...]} + +Or, use the :meth:`~pymongo.database.Database.command` method:: >>> db.command('aggregate', 'things', pipeline=pipeline, explain=True) {'ok': 1.0, 'stages': [...]} diff --git a/pymongo/collection.py b/pymongo/collection.py index 7ce881613c..0ff56d10cd 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2368,8 +2368,9 @@ def aggregate( :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. .. note:: This method does not support the 'explain' option. Please - use :meth:`~pymongo.database.Database.command` instead. An - example is included in the :ref:`aggregate-examples` documentation. + use `PyMongoExplain `_ + instead. An example is included in the :ref:`aggregate-examples` + documentation. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. From 880f3dd8eaaa91fa017f7bb7993f26ccf4f4670d Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 21 Mar 2023 16:59:35 -0700 Subject: [PATCH 0870/2111] PYTHON-3615 Add docs example for how to rotate CMKs using rewrap_many_data_key (#1171) --- pymongo/encryption.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 2bd6880065..7d017c2c0a 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -1021,6 +1021,23 @@ def rewrap_many_data_key( :Returns: A :class:`RewrapManyDataKeyResult`. + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + .. versionadded:: 4.2 """ self._check_closed() From 1d052cb7061e3a3c1aa13cc6d4cf17477a60aab5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Mar 2023 11:31:51 -0500 Subject: [PATCH 0871/2111] PYTHON-3639 Release Build is Failing to Create Universal Wheels for MacOS (#1174) --- tools/fail_if_no_c.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index e2e9c52527..60fed0ee8a 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -32,10 +32,11 @@ if os.environ.get("ENSURE_UNIVERSAL2") == "1": parent_dir = os.path.dirname(pymongo.__path__[0]) - for so_file in glob.glob(f"{parent_dir}/**/*.so"): - print(f"Checking universal2 compatibility in {so_file}...") - output = subprocess.check_output(["file", so_file]) - if "arm64" not in output.decode("utf-8"): - sys.exit("Universal wheel was not compiled with arm64 support") - if "x86_64" not in output.decode("utf-8"): - sys.exit("Universal wheel was not compiled with x86_64 support") + for pkg in ["pymongo", "bson", "grifs"]: + for so_file in glob.glob(f"{parent_dir}/{pkg}/*.so"): + print(f"Checking universal2 compatibility in {so_file}...") + output = subprocess.check_output(["file", so_file]) + if "arm64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with arm64 support") + if "x86_64" not in output.decode("utf-8"): + sys.exit("Universal wheel was not compiled with x86_64 support") From cbad35ec9d591b09c492f4c869c50fd203b55e44 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Mar 2023 18:32:34 -0500 Subject: [PATCH 0872/2111] PYTHON-3619 MacOS hosts are incredibly slow (#1175) --- .evergreen/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index e92cf96a1e..b697074020 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -15,7 +15,8 @@ command_type: system # Protect ourself against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 1800 # 30 minutes is the longest we'll ever run +exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily + # for macos hosts) # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: From 9d65395d7f240e187c5744e601c0eb31d3a039d2 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Tue, 28 Mar 2023 14:51:08 -0700 Subject: [PATCH 0873/2111] PYTHON-3567 Add guidance for setting uuidRepresentation in 4.0 migration guide (#1176) --- doc/migrate-to-pymongo4.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 687fec11bc..19aa87fcd8 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -974,12 +974,19 @@ subdocument containing a ``$ref`` field would be decoded as a Encoding a UUID raises an error by default .......................................... -The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, +The default ``uuid_representation`` for :class:`~bson.codec_options.CodecOptions`, :class:`~bson.json_util.JSONOptions`, and :class:`~pymongo.mongo_client.MongoClient` has been changed from :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. +If you were using UUIDs previously, you will need to set your ``uuid_representation`` to +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to avoid data corruption. If you do not have UUIDs, +then you should set :data:`bson.binary.UuidRepresentation.STANDARD`. If you do not explicitly set a value, +you will receive an error like this when attempting to encode a :class:`uuid.UUID`:: + + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted... + See :ref:`handling-uuid-data-example` for details. Additional BSON classes implement ``__slots__`` From e85a84e3f4fb620bd5836b94a83a1e15d0b87951 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 31 Mar 2023 13:58:47 -0700 Subject: [PATCH 0874/2111] PYTHON-3643 Use mongodb+srv in MONGODB-AWS auth examples (#1177) --- doc/examples/authentication.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index a984d17fc0..a46f95c789 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -290,7 +290,7 @@ access key id and secret access key pair as the username and password, respectively, in the MongoDB URI. A sample URI would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://:@localhost/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: The access_key_id and secret_access_key passed into the URI MUST @@ -305,7 +305,7 @@ ID, a secret access key, and a security token passed into the URI. A sample URI would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://:@example.com/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" + >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" >>> client = MongoClient(uri) .. note:: The access_key_id, secret_access_key, and session_token passed into @@ -325,7 +325,7 @@ for the access key ID, secret access key, and session token, respectively:: $ export AWS_SESSION_TOKEN= $ python >>> from pymongo import MongoClient - >>> uri = "mongodb://example.com/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://example.mongodb.net/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: No username, password, or session token is passed into the URI. @@ -357,7 +357,7 @@ credentials assigned to the machine. A sample URI on an ECS container would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: No username, password, or session token is passed into the URI. @@ -372,7 +372,7 @@ credentials assigned to the machine. A sample URI on an EC2 machine would be:: >>> from pymongo import MongoClient - >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" + >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" >>> client = MongoClient(uri) .. note:: No username, password, or session token is passed into the URI. From 9bc70933554d7809a8169dfdea73b088d9271231 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Mon, 3 Apr 2023 13:45:54 -0700 Subject: [PATCH 0875/2111] PYTHON-3634 Windows crypt shared rewrap many data key timing out sometimes (#1173) --- pymongo/encryption.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 7d017c2c0a..d94e1969b0 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -41,6 +41,7 @@ from bson.son import SON from pymongo import _csot from pymongo.collection import Collection +from pymongo.common import CONNECT_TIMEOUT from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon from pymongo.database import Database @@ -64,7 +65,7 @@ from pymongo.write_concern import WriteConcern _HTTPS_PORT = 443 -_KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT _MONGOCRYPTD_TIMEOUT_MS = 10000 From 148f7877cf568e93c8bc7c7c260617de03e2326f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Apr 2023 17:13:23 -0500 Subject: [PATCH 0876/2111] PYTHON-3644 Test encryption KMS connections with stdlib ssl, not just pyopenssl (#1178) --- .evergreen/config.yml | 55 ++++++++++++++++++++++++++++++++--------- .evergreen/run-tests.sh | 7 ++++-- test/test_encryption.py | 7 ++++-- 3 files changed, 54 insertions(+), 15 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index b697074020..6825aac10a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -457,6 +457,9 @@ functions: rm -f ./fle_creds.sh export LIBMONGOCRYPT_URL="${libmongocrypt_url}" export TEST_ENCRYPTION=1 + if [ -n "${test_encryption_pyopenssl}" ]; then + export TEST_ENCRYPTION_PYOPENSSL=1 + fi fi if [ -n "${test_crypt_shared}" ]; then export TEST_CRYPT_SHARED=1 @@ -2507,6 +2510,13 @@ axes: variables: test_encryption: true batchtime: 10080 # 7 days + - id: "encryption_pyopenssl" + display_name: "Encryption PyOpenSSL" + tags: ["encryption_tag"] + variables: + test_encryption: true + test_encryption_pyopenssl: true + batchtime: 10080 # 7 days # The path to crypt_shared is stored in the $CRYPT_SHARED_LIB_PATH expansion. - id: "encryption_crypt_shared" display_name: "Encryption shared lib" @@ -2637,14 +2647,22 @@ buildvariants: ssl: "nossl" encryption: "*" display_name: "${encryption} ${platform} ${auth} ${ssl}" - tasks: &encryption-server-versions - - ".rapid" - - ".latest" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: macos-1100 + auth: "auth" + ssl: "nossl" + then: + add_tasks: &encryption-server-versions + - ".rapid" + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" @@ -2726,8 +2744,15 @@ buildvariants: # coverage: "*" encryption: "*" display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions - + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: ubuntu-18.04 + auth-ssl: noauth-nossl + python-version: "*" + then: + add_tasks: *encryption-server-versions - matrix_name: "tests-python-version-ubuntu18-without-c-extensions" matrix_spec: @@ -2837,7 +2862,15 @@ buildvariants: auth-ssl: "*" encryption: "*" display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" - tasks: *encryption-server-versions + tasks: "test-latest-replica_set" + rules: + - if: + encryption: ["encryption", "encryption_crypt_shared"] + platform: windows-64-vsMulti-small + python-version-windows: "*" + auth-ssl: "*" + then: + add_tasks: *encryption-server-versions # Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 3a15163b63..556d60f07f 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -151,8 +151,11 @@ fi if [ -n "$TEST_ENCRYPTION" ]; then # Need aws dependency for On-Demand KMS Credentials. - # Need OSCP dependency to verify OCSP TSL args. - python -m pip install '.[aws,ocsp]' + if [ -n "$TEST_ENCRYPTION_PYOPENSSL" ]; then + python -m pip install '.[aws,ocsp]' + else + python -m pip install '.[aws]' + fi # Get access to the AWS temporary credentials: # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN diff --git a/test/test_encryption.py b/test/test_encryption.py index b7d588e747..6cdc8da3b6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2159,8 +2159,11 @@ def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): encryption = ClientEncryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options ) - self.assertFalse(encryption._io_callbacks.opts._kms_ssl_contexts["aws"].check_ocsp_endpoint) - encryption.close() + self.addCleanup(encryption.close) + ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") # type:ignore + self.assertFalse(ctx.check_ocsp_endpoint) # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames From 30ebc1d0902cedfd408b2652aedf847bbf11b22a Mon Sep 17 00:00:00 2001 From: lilinjie <102012657+uniontech-lilinjie@users.noreply.github.com> Date: Tue, 4 Apr 2023 11:46:14 +0000 Subject: [PATCH 0877/2111] fix typo (#1179) --- pymongo/client_session.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index d73672c5b5..1ec0b16476 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -600,7 +600,7 @@ def callback(session, custom_arg, custom_kwarg=None): In the event of an exception, ``with_transaction`` may retry the commit or the entire transaction, therefore ``callback`` may be invoked multiple times by a single call to ``with_transaction``. Developers - should be mindful of this possiblity when writing a ``callback`` that + should be mindful of this possibility when writing a ``callback`` that modifies application state or has any other side-effects. Note that even when the ``callback`` is invoked multiple times, ``with_transaction`` ensures that the transaction will be committed From d8897fce3ef685ccda6d9ab671125a9a07558bf3 Mon Sep 17 00:00:00 2001 From: Julius Park Date: Wed, 5 Apr 2023 08:45:27 -0700 Subject: [PATCH 0878/2111] PYTHON-3066 Test against Apple silicon in Evergreen (#1180) --- .evergreen/config.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6825aac10a..37bc5751da 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2176,6 +2176,15 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: macos-1100-arm64 + display_name: "macOS 11.00 Arm64" + run_on: macos-1100-arm64 + variables: + skip_EC2_auth_test: true + skip_ECS_auth_test: true + skip_web_identity_auth_test: true + python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel62 display_name: "RHEL 6.2 (x86_64)" run_on: rhel62-small @@ -2639,6 +2648,18 @@ buildvariants: - ".4.0" - ".3.6" +- matrix_name: "test-macos-arm64" + matrix_spec: + platform: + - macos-1100-arm64 + auth-ssl: "*" + display_name: "${platform} ${auth-ssl}" + tasks: + - ".latest" + - ".6.0" + - ".5.0" + - ".4.4" + - matrix_name: "test-macos-encryption" matrix_spec: platform: From acc6605ea119ca62b4d9dbff07b62bbc7c86c3d8 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Apr 2023 11:24:27 -0700 Subject: [PATCH 0879/2111] PYTHON-3522 Increase test timeout for Windows (#1181) Temporarily skip CSOT GridFS tests on Windows. --- test/csot/command-execution.json | 2 +- test/unified_format.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index 10f87d43ac..f0858791e9 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -52,7 +52,7 @@ ], "appName": "reduceMaxTimeMSTest", "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 75 } } } diff --git a/test/unified_format.py b/test/unified_format.py index 5afc746859..18130290b5 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -947,6 +947,8 @@ def maybe_skip_test(self, spec): class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: + if "gridfs" in class_name and sys.platform == "win32": + self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") if client_context.storage_engine == "mmapv1": self.skipTest( "MMAPv1 does not support retryable writes which is required for CSOT tests" From 3077bbf1f946c2900a8a1026bc51d3ba4e208f5d Mon Sep 17 00:00:00 2001 From: Michael Pacheco Date: Fri, 7 Apr 2023 15:09:50 -0300 Subject: [PATCH 0880/2111] PYTHON-3657 Allow index name explicitly set to None (#1182) Co-authored-by: Michael Pacheco --- .gitignore | 1 + pymongo/operations.py | 2 +- test/test_collection.py | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f7ad6563ff..269a7e7081 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ pymongo.egg-info/ mongocryptd.pid .idea/ .nova/ +venv/ diff --git a/pymongo/operations.py b/pymongo/operations.py index f939cd479f..f73262074d 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -494,7 +494,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ """ keys = _index_list(keys) - if "name" not in kwargs: + if kwargs.get("name") is None: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop("collation", None)) diff --git a/test/test_collection.py b/test/test_collection.py index 881896c847..e36d6663f0 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -307,6 +307,10 @@ def test_create_index(self): db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) self.assertTrue("hello_-1_world_1" in db.test.index_information()) + db.test.drop_indexes() + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertTrue("hello_-1_world_1" in db.test.index_information()) + db.test.drop() db.test.insert_one({"a": 1}) db.test.insert_one({"a": 1}) From 6088b5315259cc09420831d022b4ddb5f0be818c Mon Sep 17 00:00:00 2001 From: Kevin Albertson Date: Mon, 10 Apr 2023 17:24:44 -0400 Subject: [PATCH 0881/2111] PYTHON-3658 Reload expansions before deleting Azure resources (#1185) --- .evergreen/config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 37bc5751da..0b2aaa8d54 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1181,6 +1181,10 @@ task_groups: params: file: testazurekms-expansions.yml teardown_group: + # Load expansions again. The setup task may have failed before running `expansions.update`. + - command: expansions.update + params: + file: testazurekms-expansions.yml - command: shell.exec params: shell: bash From 1010ea62f5e2196c36e2a6eb61549fc6e884a100 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 10 Apr 2023 16:41:30 -0500 Subject: [PATCH 0882/2111] PYTHON-3649 Switch to Supported Build Hosts (#1184) --- .evergreen/config.yml | 132 +++++++++++++++++------------------------- 1 file changed, 54 insertions(+), 78 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0b2aaa8d54..21c9992dd8 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -321,7 +321,7 @@ functions: ${PREPARE_SHELL} # The mongohouse build script needs to be passed the VARIANT variable, see # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 - VARIANT=ubuntu1804 bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh + VARIANT=rhel84-small bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - command: shell.exec type: setup params: @@ -2077,7 +2077,7 @@ tasks: shell: "bash" script: | ${PREPARE_SHELL} - export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/run-tests.sh - name: testazurekms-task @@ -2134,7 +2134,7 @@ tasks: PYTHON_BINARY= KEY_NAME='${testazurekms_keyname}' \ KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' \ - LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz \ SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ ./.evergreen/run-tests.sh @@ -2189,41 +2189,18 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: rhel62 - display_name: "RHEL 6.2 (x86_64)" - run_on: rhel62-small + - id: rhel84 + display_name: "RHEL 8.4" + run_on: rhel84-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-62-64-bit/master/latest/libmongocrypt.tar.gz - # Note that rhel70 isn't currently used since it doesn't - # have a system Python 3. We'll switch to rhel70 as our main test - # system (using /opt/python) in a future change. - - id: rhel70 - display_name: "RHEL 7.0" - run_on: rhel70-small + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz + - id: rhel80-fips + display_name: "RHEL 8.0 FIPS" + run_on: rhel80-fips batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel70-fips - display_name: "RHEL 7.0 FIPS" - run_on: rhel70-fips - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: ubuntu-16.04 - display_name: "Ubuntu 16.04" - run_on: ubuntu1604-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604/master/latest/libmongocrypt.tar.gz - python3_binary: "/opt/python/3.8/bin/python3" - - id: ubuntu-18.04 - display_name: "Ubuntu 18.04" - run_on: ubuntu1804-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz - python3_binary: "/opt/python/3.8/bin/python3" + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz - id: ubuntu-20.04 display_name: "Ubuntu 20.04" run_on: ubuntu2004-small @@ -2243,7 +2220,7 @@ axes: run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-82-arm64/master/latest/libmongocrypt.tar.gz - id: windows-64-vsMulti-small display_name: "Windows 64" run_on: windows-64-vsMulti-small @@ -2595,7 +2572,7 @@ buildvariants: matrix_spec: platform: # OSes that support versions of MongoDB>=3.6 with SSL. - - ubuntu-18.04 + - rhel84 auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: @@ -2622,7 +2599,7 @@ buildvariants: - matrix_name: "tests-fips" matrix_spec: platform: - - rhel70-fips + - rhel80-fips auth: "auth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" @@ -2701,9 +2678,9 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-python-version-ubuntu18-test-ssl" +- matrix_name: "tests-python-version-rhel8.4-test-ssl" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth-ssl: "*" coverage: "*" @@ -2720,14 +2697,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: ubuntu-18.04 + platform: ubuntu-20.04 python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: ubuntu-18.04 + platform: ubuntu-20.04 python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" @@ -2759,9 +2736,9 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-ubuntu18-test-encryption" +- matrix_name: "tests-python-version-rhel84-test-encryption" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: @@ -2773,22 +2750,22 @@ buildvariants: rules: - if: encryption: ["encryption", "encryption_crypt_shared"] - platform: ubuntu-18.04 + platform: rhel84 auth-ssl: noauth-nossl python-version: "*" then: add_tasks: *encryption-server-versions -- matrix_name: "tests-python-version-ubuntu18-without-c-extensions" +- matrix_name: "tests-python-version-rhel84-without-c-extensions" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: ubuntu-18.04 + - platform: rhel84 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" @@ -2796,15 +2773,15 @@ buildvariants: display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-ubuntu18-compression" +- matrix_name: "tests-python-version-rhel84-compression" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: ubuntu-18.04 + - platform: rhel84 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" @@ -2825,15 +2802,15 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-ubuntu18" +- matrix_name: "tests-python-version-green-framework-rhel84" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: ubuntu-18.04 + - platform: rhel84 python-version: ["pypy3.7", "pypy3.8", "3.11"] green-framework: "*" auth-ssl: "*" @@ -2897,16 +2874,16 @@ buildvariants: then: add_tasks: *encryption-server-versions -# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. +# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 storage-engine: "*" python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: ubuntu-18.04 + platform: rhel84 storage-engine: ["inmemory"] python-version: "*" then: @@ -2919,7 +2896,7 @@ buildvariants: - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: ubuntu-18.04 + platform: rhel84 storage-engine: ["mmapv1"] python-version: "*" then: @@ -2929,10 +2906,10 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.7. +# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 disableTestCommands: "*" python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" @@ -2941,7 +2918,7 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" @@ -2959,13 +2936,13 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: - platform: ubuntu-18.04 - python-version: ["3.7", "3.8", "3.9", "3.10"] + platform: ubuntu-20.04 + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] mod-wsgi-version: "*" exclude_spec: # mod-wsgi 3.5 won't build against CPython 3.8+ - - platform: ubuntu-18.04 - python-version: ["3.8", "3.9", "3.10"] + - platform: ubuntu-20.04 + python-version: ["3.8", "3.9", "3.10", "3.11"] mod-wsgi-version: "3" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: @@ -2974,7 +2951,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: 3.7 display_name: "MockupDB Tests" tasks: @@ -2982,7 +2959,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -2991,7 +2968,7 @@ buildvariants: - name: "no-server" display_name: "No server test" run_on: - - ubuntu1804-test + - rhel84-small tasks: - name: "no-server" expansions: @@ -3000,7 +2977,7 @@ buildvariants: - name: "Coverage Report" display_name: "Coverage Report" run_on: - - ubuntu1804-test + - rhel84-small tasks: - name: "coverage-report" expansions: @@ -3008,7 +2985,7 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: @@ -3016,7 +2993,7 @@ buildvariants: - matrix_name: "serverless" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: "*" auth-ssl: auth-ssl serverless: "*" @@ -3026,7 +3003,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: ["3.7", "3.10"] auth: "auth" c-extensions: "*" @@ -3036,7 +3013,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 python-version: ["3.7", "3.10"] auth: "auth" versionedApi: "*" @@ -3049,8 +3026,6 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: - # OCSP stapling is not supported on Ubuntu 18.04. - # See https://jira.mongodb.org/browse/SERVER-51364. platform: ubuntu-20.04 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] mongodb-version: ["4.4", "5.0", "6.0", "latest"] @@ -3088,7 +3063,7 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: [ubuntu-18.04] + platform: [ubuntu-20.04] python-version: ["3.7"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: @@ -3123,7 +3098,7 @@ buildvariants: - matrix_name: "load-balancer" matrix_spec: - platform: ubuntu-18.04 + platform: rhel84 mongodb-version: ["rapid", "latest", "6.0"] auth-ssl: "*" python-version: "*" @@ -3135,7 +3110,7 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - ubuntu1804-test + - ubuntu2004-small tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3143,7 +3118,7 @@ buildvariants: - name: testazurekms-variant display_name: "Azure KMS" - run_on: ubuntu1804-test + run_on: ubuntu2004-small tasks: - name: testazurekms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3165,6 +3140,7 @@ buildvariants: # Debian 8.1 only supports MongoDB 3.4+ # SUSE12 s390x is only supported by MongoDB 3.4+ # No enterprise build for Archlinux, SSL not available + # RHEL 7.6 and RHEL 8.4 only supports 3.6+. # RHEL 7 only supports 2.6+ # RHEL 7.1 ppc64le is only supported by MongoDB 3.2+ # RHEL 7.2 s390x is only supported by MongoDB 3.4+ From 9256cb20afcbeaa2a1af06e579fc5349232864b7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 11 Apr 2023 12:49:34 -0500 Subject: [PATCH 0883/2111] PYTHON-3649 Use RHEL8 for PyOpenSSL Builds (#1187) --- .evergreen/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 21c9992dd8..f102668206 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2697,14 +2697,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: ubuntu-20.04 + platform: rhel84 python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: ubuntu-20.04 + platform: rhel84 python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" From f7225fda55df81265c3284b2b159a610eb390539 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Apr 2023 10:40:44 -0500 Subject: [PATCH 0884/2111] PYTHON-3652 Bump maxWireVersion for MongoDB 7.0 (#1188) --- pymongo/common.py | 2 +- test/test_topology.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index add70cfb5f..707cf5d23f 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -63,7 +63,7 @@ # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "3.6" MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 17 +MAX_SUPPORTED_WIRE_VERSION = 21 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 diff --git a/test/test_topology.py b/test/test_topology.py index d7bae9229f..e09d7c3691 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -540,8 +540,8 @@ def test_wire_version(self): HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"], - "minWireVersion": 21, - "maxWireVersion": 22, + "minWireVersion": 22, + "maxWireVersion": 24, }, ) @@ -551,7 +551,7 @@ def test_wire_version(self): # Error message should say which server failed and why. self.assertEqual( str(e), - "Server at a:27017 requires wire version 21, but this version " + "Server at a:27017 requires wire version 22, but this version " "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), ) else: From b38a416836ca26957ddc8865db300c6f2178f648 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Apr 2023 11:10:28 -0500 Subject: [PATCH 0885/2111] PYTHON-3162 Deprecate ServerDescription.election_tuple (#1189) --- pymongo/server_description.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 53f90cea25..46517ee95e 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -15,6 +15,7 @@ """Represent one server the driver is connected to.""" import time +import warnings from typing import Any, Dict, Mapping, Optional, Set, Tuple from bson import EPOCH_NAIVE @@ -180,6 +181,11 @@ def cluster_time(self) -> Optional[Mapping[str, Any]]: @property def election_tuple(self) -> Tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) return self._set_version, self._election_id @property From be355e2bea995ad72c8d1fde6dedf9ec7a637352 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Apr 2023 10:30:54 -0500 Subject: [PATCH 0886/2111] PYTHON-3604 Remove Duplicate API Docs (#1190) --- doc/api/bson/index.rst | 2 +- doc/api/gridfs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 72baae68a6..d5b69607de 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -3,7 +3,7 @@ .. automodule:: bson :synopsis: BSON (Binary JSON) Encoding and Decoding - :members: + :members: BSON, decode, decode_all, decode_file_iter, decode_iter, encode, gen_list_name, has_c, is_valid Sub-modules: diff --git a/doc/api/gridfs/index.rst b/doc/api/gridfs/index.rst index 6764ef622b..b81fbde782 100644 --- a/doc/api/gridfs/index.rst +++ b/doc/api/gridfs/index.rst @@ -3,7 +3,7 @@ .. automodule:: gridfs :synopsis: Tools for working with GridFS - :members: + :members: GridFS, GridFSBucket Sub-modules: From c5652336efe04a770fb316e7b48d5b5509c4f3f0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 14 Apr 2023 14:45:57 -0700 Subject: [PATCH 0887/2111] PYTHON-3671 Use default server selection timeout in test setup (#1191) --- test/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/__init__.py b/test/__init__.py index 20b1d00ca8..dc324c6911 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -536,7 +536,6 @@ def _check_user_provided(self): port, username=db_user, password=db_pwd, - serverSelectionTimeoutMS=100, **self.default_client_options, ) @@ -550,6 +549,8 @@ def _check_user_provided(self): return False else: raise + finally: + client.close() def _server_started_with_auth(self): # MongoDB >= 2.0 From 2cc8fb1f2ea871860c1b3731bf374ca34f63712a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 14 Apr 2023 15:35:20 -0700 Subject: [PATCH 0888/2111] PYTHON-3672 Increase server selection timeout in more tests (#1192) --- test/test_ssl.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/test/test_ssl.py b/test/test_ssl.py index 9b58c2251b..bf151578cb 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -174,7 +174,7 @@ def test_tlsCertificateKeyFilePassword(self): tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, tlsCertificateKeyFilePassword="qwerty", tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) else: connected( @@ -374,7 +374,7 @@ def test_tlsCRLFile_support(self): ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) else: connected( @@ -382,7 +382,7 @@ def test_tlsCRLFile_support(self): "localhost", ssl=True, tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, **self.credentials # type: ignore[arg-type] ) ) @@ -394,17 +394,17 @@ def test_tlsCRLFile_support(self): ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, **self.credentials # type: ignore[arg-type] ) ) - uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=100" + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore uri_fmt = ( "mongodb://localhost/?ssl=true&tlsCRLFile=%s" - "&tlsCAFile=%s&serverSelectionTimeoutMS=100" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" ) with self.assertRaises(ConnectionFailure): connected( @@ -425,7 +425,7 @@ def test_validation_with_system_ca_certs(self): with self.assertRaises(ConnectionFailure): # Server cert is verified but hostname matching fails connected( - MongoClient("server", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + MongoClient("server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] ) # Server cert is verified. Disable hostname matching. @@ -434,20 +434,20 @@ def test_validation_with_system_ca_certs(self): "server", ssl=True, tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, **self.credentials # type: ignore[arg-type] ) ) # Server cert and hostname are verified. connected( - MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] ) # Server cert and hostname are verified. connected( MongoClient( - "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100", **self.credentials # type: ignore[arg-type] + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", **self.credentials # type: ignore[arg-type] ) ) @@ -622,7 +622,7 @@ def test_mongodb_x509_auth(self): ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) ) except (ConnectionFailure, ConfigurationError): From 79488d95dbeac0a6d48256ecaba7c0ebdf90884d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 25 Apr 2023 12:30:23 -0600 Subject: [PATCH 0889/2111] PYTHON-3678 Username/password needs to be escaped with quote_plus to account for '/' (#1193) --- doc/examples/authentication.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index a46f95c789..5bd3282146 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -11,7 +11,7 @@ Percent-Escaping Username and Password -------------------------------------- Username and password must be percent-escaped with -:py:func:`urllib.parse.quote`, to be used in a MongoDB URI. For example:: +:py:func:`urllib.parse.quote_plus`, to be used in a MongoDB URI. For example:: >>> from pymongo import MongoClient >>> import urllib.parse From dfd82d2375bd641f9d7c70ae34751c1db3fc673d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Apr 2023 17:28:10 -0500 Subject: [PATCH 0890/2111] PYTHON-3677 Update docs on Range Index (#1195) --- doc/changelog.rst | 1 - pymongo/encryption.py | 9 +++------ pymongo/encryption_options.py | 3 +-- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 2ad33e41ec..3c0419f401 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,7 +6,6 @@ Changes in Version 4.4 - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. -- **BETA** Added support for range queries on client side field level encrypted collections. - pymongocrypt 1.5.0 or later is now required for client side field level encryption support. - Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. diff --git a/pymongo/encryption.py b/pymongo/encryption.py index d94e1969b0..4ad59d436e 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -799,9 +799,9 @@ def encrypt( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - - `range_opts`: **(BETA)** An instance of RangeOpts. + - `range_opts`: Experimental only, not intended for public use. - .. note:: `query_type`, `contention_factor` and `range_opts` are part of the Queryable Encryption beta. + .. note:: `query_type`, and `contention_factor` are part of the Queryable Encryption beta. Backwards-breaking changes may be made before the final release. :Returns: @@ -851,10 +851,7 @@ def encrypt_expression( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - - `range_opts`: **(BETA)** An instance of RangeOpts. - - .. note:: Support for range queries is in beta. - Backwards-breaking changes may be made before the final release. + - `range_opts`: Experimental only, not intended for public use. :Returns: The encrypted expression, a :class:`~bson.RawBSONDocument`. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 6c966e30cd..d8e9daad1f 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -233,8 +233,7 @@ def __init__( ) -> None: """Options to configure encrypted queries using the rangePreview algorithm. - .. note:: Support for Range queries is in beta. - Backwards-breaking changes may be made before the final release. + .. note:: This feature is experimental only, and not intended for public use. :Parameters: - `sparsity`: An integer. From e75cfec34f529c88e837f58267796ff8b13a4dc0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 28 Apr 2023 16:11:27 -0500 Subject: [PATCH 0891/2111] PYTHON-3686 Consolidate CodecOptions Typings (#1199) --- .github/workflows/test-python.yml | 3 - bson/__init__.py | 2 +- bson/codec_options.py | 458 +++++++++++++++++------------- bson/codec_options.pyi | 106 ------- test/test_custom_types.py | 3 +- 5 files changed, 256 insertions(+), 316 deletions(-) delete mode 100644 bson/codec_options.pyi diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 8dad68ab20..2941f9c3ab 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -68,9 +68,6 @@ jobs: mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test python -m pip install -U typing_extensions mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py - - name: Run mypy strict - run: | - mypy --strict test/test_typing_strict.py - name: Run pyright run: | python -m pip install -U pip pyright==1.1.290 diff --git a/bson/__init__.py b/bson/__init__.py index 2fe4aa173e..700a5d4cf8 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1115,7 +1115,7 @@ def decode_all( if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _decode_all(data, opts) # type: ignore[arg-type] + return _decode_all(data, opts) # type:ignore[arg-type] def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: diff --git a/bson/codec_options.py b/bson/codec_options.py index c09de8a931..096be85264 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -19,15 +19,17 @@ import enum from collections.abc import MutableMapping as _MutableMapping from typing import ( + TYPE_CHECKING, Any, Callable, Dict, + Generic, Iterable, Mapping, NamedTuple, Optional, + Tuple, Type, - TypeVar, Union, cast, ) @@ -37,11 +39,7 @@ UUID_REPRESENTATION_NAMES, UuidRepresentation, ) - - -def _abstractproperty(func: Callable[..., Any]) -> property: - return property(abc.abstractmethod(func)) - +from bson.typings import _DocumentType _RAW_BSON_DOCUMENT_MARKER = 101 @@ -62,7 +60,7 @@ class TypeEncoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ - @_abstractproperty + @abc.abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" pass @@ -83,7 +81,7 @@ class TypeDecoder(abc.ABC): See :ref:`custom-type-type-codec` documentation for an example. """ - @_abstractproperty + @abc.abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" pass @@ -112,7 +110,6 @@ class TypeCodec(TypeEncoder, TypeDecoder): _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) class TypeRegistry(object): @@ -244,208 +241,259 @@ class _BaseCodecOptions(NamedTuple): datetime_conversion: Optional[DatetimeConversion] -class CodecOptions(_BaseCodecOptions): - """Encapsulates options used encoding and / or decoding BSON.""" - - def __init__(self, *args, **kwargs): - """Encapsulates options used encoding and / or decoding BSON. - - The `document_class` option is used to define a custom type for use - decoding BSON documents. Access to the underlying raw BSON bytes for - a document is available using the :class:`~bson.raw_bson.RawBSONDocument` - type:: - - >>> from bson.raw_bson import RawBSONDocument - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(document_class=RawBSONDocument) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc.raw - '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' - - The document class can be any type that inherits from - :class:`~collections.abc.MutableMapping`:: - - >>> class AttributeDict(dict): - ... # A dict that supports attribute access. - ... def __getattr__(self, key): - ... return self[key] - ... def __setattr__(self, key, value): - ... self[key] = value - ... - >>> codec_options = CodecOptions(document_class=AttributeDict) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc._id - ObjectId('5b3016359110ea14e8c58b93') - - See :doc:`/examples/datetimes` for examples using the `tz_aware` and - `tzinfo` options. - - See :doc:`/examples/uuid` for examples using the `uuid_representation` - option. - - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded - to an instance of this class. Must be a subclass of - :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone - aware instances of :class:`~datetime.datetime`. Otherwise they will be - naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New - applications should consider setting this to - :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the - timezone to/from which :class:`~datetime.datetime` objects should be - encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize - encoding and decoding behavior. - - `datetime_conversion`: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. - - .. versionchanged:: 4.0 - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionadded:: 3.8 - `type_registry` attribute. - - .. warning:: Care must be taken when changing - `unicode_decode_error_handler` from its default value ('strict'). - The 'replace' and 'ignore' modes should not be used when documents - retrieved from the server will be modified in the client application - and stored back to the server. - """ - super().__init__() - - def __new__( - cls: Type["CodecOptions"], - document_class: Optional[Type[Mapping[str, Any]]] = None, - tz_aware: bool = False, - uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: str = "strict", - tzinfo: Optional[datetime.tzinfo] = None, - type_registry: Optional[TypeRegistry] = None, - datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, - ) -> "CodecOptions": - doc_class = document_class or dict - # issubclass can raise TypeError for generic aliases like SON[str, Any]. - # In that case we can use the base class for the comparison. - is_mapping = False - try: - is_mapping = issubclass(doc_class, _MutableMapping) - except TypeError: - if hasattr(doc_class, "__origin__"): - is_mapping = issubclass(doc_class.__origin__, _MutableMapping) # type: ignore[union-attr] - if not (is_mapping or _raw_document_class(doc_class)): - raise TypeError( - "document_class must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "subclass of collections.abc.MutableMapping" - ) - if not isinstance(tz_aware, bool): - raise TypeError("tz_aware must be True or False") - if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError( - "uuid_representation must be a value from bson.binary.UuidRepresentation" +if TYPE_CHECKING: + + class CodecOptions(Tuple, Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[int] + + def __new__( + cls: Type["CodecOptions"], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., + ) -> "CodecOptions[_DocumentType]": + ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> "CodecOptions[_DocumentType]": + ... + + def _arguments_repr(self) -> str: + ... + + def _options_dict(self) -> Dict[Any, Any]: + ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable) -> "CodecOptions[_DocumentType]": + ... + + def _asdict(self) -> Dict[str, Any]: + ... + + def _replace(self, **kwargs: Any) -> "CodecOptions[_DocumentType]": + ... + + _source: str + _fields: Tuple[str] + +else: + + class CodecOptions(_BaseCodecOptions): + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See :doc:`/examples/datetimes` for examples using the `tz_aware` and + `tzinfo` options. + + See :doc:`/examples/uuid` for examples using the `uuid_representation` + option. + + :Parameters: + - `document_class`: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + - `uuid_representation`: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + - `type_registry`: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + - `datetime_conversion`: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + super().__init__() + + def __new__( + cls: Type["CodecOptions"], + document_class: Optional[Type[Mapping[str, Any]]] = None, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: str = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, + ) -> "CodecOptions": + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) + if not (is_mapping or _raw_document_class(doc_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.abc.MutableMapping" + ) + if not isinstance(tz_aware, bool): + raise TypeError("tz_aware must be True or False") + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + if not isinstance(unicode_decode_error_handler, str): + raise ValueError("unicode_decode_error_handler must be a string") + if tzinfo is not None: + if not isinstance(tzinfo, datetime.tzinfo): + raise TypeError("tzinfo must be an instance of datetime.tzinfo") + if not tz_aware: + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") + + type_registry = type_registry or TypeRegistry() + + if not isinstance(type_registry, TypeRegistry): + raise TypeError("type_registry must be an instance of TypeRegistry") + + return tuple.__new__( + cls, + ( + doc_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + datetime_conversion, + ), ) - if not isinstance(unicode_decode_error_handler, str): - raise ValueError("unicode_decode_error_handler must be a string") - if tzinfo is not None: - if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError("tzinfo must be an instance of datetime.tzinfo") - if not tz_aware: - raise ValueError("cannot specify tzinfo without also setting tz_aware=True") - - type_registry = type_registry or TypeRegistry() - - if not isinstance(type_registry, TypeRegistry): - raise TypeError("type_registry must be an instance of TypeRegistry") - - return tuple.__new__( - cls, - ( - doc_class, - tz_aware, - uuid_representation, - unicode_decode_error_handler, - tzinfo, - type_registry, - datetime_conversion, - ), - ) - - def _arguments_repr(self) -> str: - """Representation of the arguments used to create this object.""" - document_class_repr = "dict" if self.document_class is dict else repr(self.document_class) - - uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( - self.uuid_representation, self.uuid_representation - ) - return ( - "document_class=%s, tz_aware=%r, uuid_representation=%s, " - "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r, datetime_conversion=%s" - % ( - document_class_repr, - self.tz_aware, - uuid_rep_repr, - self.unicode_decode_error_handler, - self.tzinfo, - self.type_registry, - self.datetime_conversion, + def _arguments_repr(self) -> str: + """Representation of the arguments used to create this object.""" + document_class_repr = ( + "dict" if self.document_class is dict else repr(self.document_class) ) - ) - - def _options_dict(self) -> Dict[str, Any]: - """Dictionary of the arguments used to create this object.""" - # TODO: PYTHON-2442 use _asdict() instead - return { - "document_class": self.document_class, - "tz_aware": self.tz_aware, - "uuid_representation": self.uuid_representation, - "unicode_decode_error_handler": self.unicode_decode_error_handler, - "tzinfo": self.tzinfo, - "type_registry": self.type_registry, - "datetime_conversion": self.datetime_conversion, - } - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) - def with_options(self, **kwargs: Any) -> "CodecOptions": - """Make a copy of this CodecOptions, overriding some options:: - - >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS - >>> DEFAULT_CODEC_OPTIONS.tz_aware - False - >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) - >>> options.tz_aware - True - - .. versionadded:: 3.5 - """ - opts = self._options_dict() - opts.update(kwargs) - return CodecOptions(**opts) + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) + return ( + "document_class=%s, tz_aware=%r, uuid_representation=%s, " + "unicode_decode_error_handler=%r, tzinfo=%r, " + "type_registry=%r, datetime_conversion=%s" + % ( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + self.datetime_conversion, + ) + ) -DEFAULT_CODEC_OPTIONS = CodecOptions() + def _options_dict(self) -> Dict[str, Any]: + """Dictionary of the arguments used to create this object.""" + # TODO: PYTHON-2442 use _asdict() instead + return { + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, + } + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) + + def with_options(self, **kwargs: Any) -> "CodecOptions": + """Make a copy of this CodecOptions, overriding some options:: + + >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS + >>> DEFAULT_CODEC_OPTIONS.tz_aware + False + >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) + >>> options.tz_aware + True + + .. versionadded:: 3.5 + """ + opts = self._options_dict() + opts.update(kwargs) + return CodecOptions(**opts) + + +DEFAULT_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" = CodecOptions() def _parse_codec_options(options: Any) -> CodecOptions: diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi deleted file mode 100644 index 8242bd4cb2..0000000000 --- a/bson/codec_options.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2022-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Workaround for https://bugs.python.org/issue43923. -Ideally we would have done this with a single class, but -generic subclasses *must* take a parameter, and prior to Python 3.9 -or in Python 3.7 and 3.8 with `from __future__ import annotations`, -you get the error: "TypeError: 'type' object is not subscriptable". -""" - -import datetime -import abc -import enum -from typing import Tuple, Generic, Optional, Mapping, Any, Type, Dict, Iterable, Tuple, Callable, Union -from bson.typings import _DocumentType, _DocumentTypeArg - - -class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def python_type(self) -> Any: ... - @abc.abstractmethod - def transform_python(self, value: Any) -> Any: ... - -class TypeDecoder(abc.ABC, metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def bson_type(self) -> Any: ... - @abc.abstractmethod - def transform_bson(self, value: Any) -> Any: ... - -class TypeCodec(TypeEncoder, TypeDecoder, metaclass=abc.ABCMeta): ... - -Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] -Fallback = Callable[[Any], Any] - -class TypeRegistry: - _decoder_map: Dict[Any, Any] - _encoder_map: Dict[Any, Any] - _fallback_encoder: Optional[Fallback] - - def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... - def __eq__(self, other: Any) -> Any: ... - -class DatetimeConversion(int, enum.Enum): - DATETIME = ... - DATETIME_CLAMP = ... - DATETIME_MS = ... - DATETIME_AUTO = ... - -class CodecOptions(Tuple, Generic[_DocumentType]): - document_class: Type[_DocumentType] - tz_aware: bool - uuid_representation: int - unicode_decode_error_handler: Optional[str] - tzinfo: Optional[datetime.tzinfo] - type_registry: TypeRegistry - datetime_conversion: Optional[int] - - def __new__( - cls: Type[CodecOptions], - document_class: Optional[Type[_DocumentType]] = ..., - tz_aware: bool = ..., - uuid_representation: Optional[int] = ..., - unicode_decode_error_handler: Optional[str] = ..., - tzinfo: Optional[datetime.tzinfo] = ..., - type_registry: Optional[TypeRegistry] = ..., - datetime_conversion: Optional[int] = ..., - ) -> CodecOptions[_DocumentType]: ... - - # CodecOptions API - def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentTypeArg]: ... - - def _arguments_repr(self) -> str: ... - - def _options_dict(self) -> Dict[Any, Any]: ... - - # NamedTuple API - @classmethod - def _make(cls, obj: Iterable) -> CodecOptions[_DocumentType]: ... - - def _asdict(self) -> Dict[str, Any]: ... - - def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... - - _source: str - _fields: Tuple[str] - - -DEFAULT_CODEC_OPTIONS: "CodecOptions[Mapping[str, Any]]" -_RAW_BSON_DOCUMENT_MARKER: int - -def _raw_document_class(document_class: Any) -> bool: ... - -def _parse_codec_options(options: Any) -> CodecOptions: ... diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 868756c67d..676b3b6af0 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -541,7 +541,8 @@ def transform_bson(self, value): {MyIntEncoder.python_type: codec_instances[1].transform_python}, ) self.assertEqual( - type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, ) def test_initialize_fail(self): From bc9029a22879c772e6049b962435fd563636ea92 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Apr 2023 14:27:06 -0700 Subject: [PATCH 0892/2111] PYTHON-3679 Support mypy 1.2 (#1194) --- .github/workflows/test-python.yml | 2 +- bson/decimal128.py | 2 +- bson/son.py | 4 ++-- test/test_encryption.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 2941f9c3ab..bb0b836788 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -58,7 +58,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy==0.990 + python -m pip install -U pip mypy==1.2 pip install -e ".[zstd, encryption, ocsp]" - name: Run mypy run: | diff --git a/bson/decimal128.py b/bson/decimal128.py index ab2d1a24ac..bce5b251e9 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -100,7 +100,7 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: if significand & (1 << i): high |= 1 << (i - 64) - biased_exponent = exponent + _EXPONENT_BIAS + biased_exponent = exponent + _EXPONENT_BIAS # type: ignore[operator] if high >> 49 == 1: high = high & 0x7FFFFFFFFFFF diff --git a/bson/son.py b/bson/son.py index e4238b4058..bba108aa80 100644 --- a/bson/son.py +++ b/bson/son.py @@ -66,7 +66,7 @@ def __init__( self.update(kwargs) def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": - instance = super(SON, cls).__new__(cls, *args, **kwargs) + instance = super(SON, cls).__new__(cls, *args, **kwargs) # type: ignore[type-var] instance.__keys = [] return instance @@ -115,7 +115,7 @@ def clear(self) -> None: self.__keys = [] super(SON, self).clear() - def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[override] + def setdefault(self, key: _Key, default: _Value) -> _Value: try: return self[key] except KeyError: diff --git a/test/test_encryption.py b/test/test_encryption.py index 6cdc8da3b6..872e0356ad 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2162,7 +2162,7 @@ def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): self.addCleanup(encryption.close) ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] if not hasattr(ctx, "check_ocsp_endpoint"): - raise self.skipTest("OCSP not enabled") # type:ignore + raise self.skipTest("OCSP not enabled") self.assertFalse(ctx.check_ocsp_endpoint) From deb0566c3e8ede09fa7f88fd173ef25b0e40c3a9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 1 May 2023 09:16:28 -0700 Subject: [PATCH 0893/2111] PYTHON-3614 Support Queryable Encryption protocol v2 on 7.0+ (#1197) Resync FLE spec tests and update docs for new QE protocol on 7.0. Add client side error for createEncryptedCollection on MongoDB < 7.0. KMS timeout errors should always have exc.timeout==True. PYTHON-3583 Drivers should not create the ECC collection in v2 of queryable encryption. --- doc/changelog.rst | 7 +- doc/examples/encryption.rst | 8 +- pymongo/collection.py | 18 +- pymongo/common.py | 4 - pymongo/database.py | 7 +- pymongo/encryption.py | 36 +- pymongo/encryption_options.py | 1 - .../etc/data/encryptedFields-Range-Date.json | 36 -- .../data/encryptedFields-Range-Decimal.json | 26 - ...ncryptedFields-Range-DecimalPrecision.json | 35 - .../data/encryptedFields-Range-Double.json | 26 - ...encryptedFields-Range-DoublePrecision.json | 35 - .../etc/data/encryptedFields-Range-Int.json | 32 - .../etc/data/encryptedFields-Range-Long.json | 32 - .../etc/data/encryptedFields.json | 5 +- .../etc/data/range-encryptedFields-Date.json | 49 +- ...ge-encryptedFields-DecimalNoPrecision.json | 36 +- ...ange-encryptedFields-DecimalPrecision.json | 5 +- ...nge-encryptedFields-DoubleNoPrecision.json | 36 +- ...range-encryptedFields-DoublePrecision.json | 54 +- .../etc/data/range-encryptedFields-Int.json | 48 +- .../etc/data/range-encryptedFields-Long.json | 48 +- .../spec/legacy/bypassedCommand.json | 9 +- ...s.json => fle2v2-BypassQueryAnalysis.json} | 116 ++-- ...{fle2-Compact.json => fle2v2-Compact.json} | 6 +- .../fle2v2-CreateCollection-OldServer.json | 62 ++ ...tion.json => fle2v2-CreateCollection.json} | 609 +++--------------- ...a.json => fle2v2-DecryptExistingData.json} | 3 +- .../{fle2-Delete.json => fle2v2-Delete.json} | 29 +- ...ncryptedFields-vs-EncryptedFieldsMap.json} | 9 +- ...fle2v2-EncryptedFields-vs-jsonSchema.json} | 10 +- ...> fle2v2-EncryptedFieldsMap-defaults.json} | 8 +- ...date.json => fle2v2-FindOneAndUpdate.json} | 50 +- ...ed.json => fle2v2-InsertFind-Indexed.json} | 10 +- ....json => fle2v2-InsertFind-Unindexed.json} | 6 +- ...MissingKey.json => fle2v2-MissingKey.json} | 6 +- ...cryption.json => fle2v2-NoEncryption.json} | 3 +- ....json => fle2v2-Range-Date-Aggregate.json} | 11 +- ...son => fle2v2-Range-Date-Correctness.json} | 6 +- ...ete.json => fle2v2-Range-Date-Delete.json} | 30 +- ...> fle2v2-Range-Date-FindOneAndUpdate.json} | 29 +- ...json => fle2v2-Range-Date-InsertFind.json} | 11 +- ...ate.json => fle2v2-Range-Date-Update.json} | 29 +- ...on => fle2v2-Range-Decimal-Aggregate.json} | 37 +- ... => fle2v2-Range-Decimal-Correctness.json} | 144 ++--- ....json => fle2v2-Range-Decimal-Delete.json} | 52 +- ...le2v2-Range-Decimal-FindOneAndUpdate.json} | 59 +- ...n => fle2v2-Range-Decimal-InsertFind.json} | 37 +- ....json => fle2v2-Range-Decimal-Update.json} | 57 +- ...2v2-Range-DecimalPrecision-Aggregate.json} | 11 +- ...2-Range-DecimalPrecision-Correctness.json} | 6 +- ...fle2v2-Range-DecimalPrecision-Delete.json} | 30 +- ...ge-DecimalPrecision-FindOneAndUpdate.json} | 29 +- ...v2-Range-DecimalPrecision-InsertFind.json} | 11 +- ...fle2v2-Range-DecimalPrecision-Update.json} | 29 +- ...son => fle2v2-Range-Double-Aggregate.json} | 37 +- ...n => fle2v2-Range-Double-Correctness.json} | 144 ++--- ...e.json => fle2v2-Range-Double-Delete.json} | 52 +- ...fle2v2-Range-Double-FindOneAndUpdate.json} | 59 +- ...on => fle2v2-Range-Double-InsertFind.json} | 37 +- ...e.json => fle2v2-Range-Double-Update.json} | 57 +- ...e2v2-Range-DoublePrecision-Aggregate.json} | 11 +- ...v2-Range-DoublePrecision-Correctness.json} | 6 +- ... fle2v2-Range-DoublePrecision-Delete.json} | 30 +- ...nge-DoublePrecision-FindOneAndUpdate.json} | 29 +- ...2v2-Range-DoublePrecision-InsertFind.json} | 11 +- ... fle2v2-Range-DoublePrecision-Update.json} | 29 +- ...e.json => fle2v2-Range-Int-Aggregate.json} | 11 +- ...json => fle2v2-Range-Int-Correctness.json} | 6 +- ...lete.json => fle2v2-Range-Int-Delete.json} | 30 +- ...=> fle2v2-Range-Int-FindOneAndUpdate.json} | 29 +- ....json => fle2v2-Range-Int-InsertFind.json} | 11 +- ...date.json => fle2v2-Range-Int-Update.json} | 29 +- ....json => fle2v2-Range-Long-Aggregate.json} | 11 +- ...son => fle2v2-Range-Long-Correctness.json} | 6 +- ...ete.json => fle2v2-Range-Long-Delete.json} | 30 +- ...> fle2v2-Range-Long-FindOneAndUpdate.json} | 29 +- ...json => fle2v2-Range-Long-InsertFind.json} | 11 +- ...ate.json => fle2v2-Range-Long-Update.json} | 29 +- ...gType.json => fle2v2-Range-WrongType.json} | 6 +- .../{fle2-Update.json => fle2v2-Update.json} | 52 +- ...2-validatorAndPartialFieldExpression.json} | 21 +- .../spec/legacy/timeoutMS.json | 200 ++++++ .../spec/unified/rewrapManyDataKey.json | 30 +- test/test_encryption.py | 15 +- test/unified_format.py | 4 +- test/utils.py | 12 +- test/utils_spec_runner.py | 49 +- 88 files changed, 1086 insertions(+), 2175 deletions(-) delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Date.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Double.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Int.json delete mode 100644 test/client-side-encryption/etc/data/encryptedFields-Range-Long.json rename test/client-side-encryption/spec/legacy/{fle2-BypassQueryAnalysis.json => fle2v2-BypassQueryAnalysis.json} (65%) rename test/client-side-encryption/spec/legacy/{fle2-Compact.json => fle2v2-Compact.json} (97%) create mode 100644 test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json rename test/client-side-encryption/spec/legacy/{fle2-CreateCollection.json => fle2v2-CreateCollection.json} (74%) rename test/client-side-encryption/spec/legacy/{fle2-DecryptExistingData.json => fle2v2-DecryptExistingData.json} (98%) rename test/client-side-encryption/spec/legacy/{fle2-Delete.json => fle2v2-Delete.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-EncryptedFields-vs-EncryptedFieldsMap.json => fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-EncryptedFields-vs-jsonSchema.json => fle2v2-EncryptedFields-vs-jsonSchema.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-EncryptedFieldsMap-defaults.json => fle2v2-EncryptedFieldsMap-defaults.json} (93%) rename test/client-side-encryption/spec/legacy/{fle2-FindOneAndUpdate.json => fle2v2-FindOneAndUpdate.json} (87%) rename test/client-side-encryption/spec/legacy/{fle2-InsertFind-Indexed.json => fle2v2-InsertFind-Indexed.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-InsertFind-Unindexed.json => fle2v2-InsertFind-Unindexed.json} (97%) rename test/client-side-encryption/spec/legacy/{fle2-MissingKey.json => fle2v2-MissingKey.json} (94%) rename test/client-side-encryption/spec/legacy/{fle2-NoEncryption.json => fle2v2-NoEncryption.json} (96%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Aggregate.json => fle2v2-Range-Date-Aggregate.json} (89%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Correctness.json => fle2v2-Range-Date-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Delete.json => fle2v2-Range-Date-Delete.json} (83%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-FindOneAndUpdate.json => fle2v2-Range-Date-FindOneAndUpdate.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-InsertFind.json => fle2v2-Range-Date-InsertFind.json} (89%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Date-Update.json => fle2v2-Range-Date-Update.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Aggregate.json => fle2v2-Range-Decimal-Aggregate.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Correctness.json => fle2v2-Range-Decimal-Correctness.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Delete.json => fle2v2-Range-Decimal-Delete.json} (65%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-FindOneAndUpdate.json => fle2v2-Range-Decimal-FindOneAndUpdate.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-InsertFind.json => fle2v2-Range-Decimal-InsertFind.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Decimal-Update.json => fle2v2-Range-Decimal-Update.json} (75%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Aggregate.json => fle2v2-Range-DecimalPrecision-Aggregate.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Correctness.json => fle2v2-Range-DecimalPrecision-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Delete.json => fle2v2-Range-DecimalPrecision-Delete.json} (81%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-FindOneAndUpdate.json => fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-InsertFind.json => fle2v2-Range-DecimalPrecision-InsertFind.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DecimalPrecision-Update.json => fle2v2-Range-DecimalPrecision-Update.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Aggregate.json => fle2v2-Range-Double-Aggregate.json} (81%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Correctness.json => fle2v2-Range-Double-Correctness.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Delete.json => fle2v2-Range-Double-Delete.json} (72%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-FindOneAndUpdate.json => fle2v2-Range-Double-FindOneAndUpdate.json} (79%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-InsertFind.json => fle2v2-Range-Double-InsertFind.json} (80%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Double-Update.json => fle2v2-Range-Double-Update.json} (80%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Aggregate.json => fle2v2-Range-DoublePrecision-Aggregate.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Correctness.json => fle2v2-Range-DoublePrecision-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Delete.json => fle2v2-Range-DoublePrecision-Delete.json} (80%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-FindOneAndUpdate.json => fle2v2-Range-DoublePrecision-FindOneAndUpdate.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-InsertFind.json => fle2v2-Range-DoublePrecision-InsertFind.json} (86%) rename test/client-side-encryption/spec/legacy/{fle2-Range-DoublePrecision-Update.json => fle2v2-Range-DoublePrecision-Update.json} (84%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Aggregate.json => fle2v2-Range-Int-Aggregate.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Correctness.json => fle2v2-Range-Int-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Delete.json => fle2v2-Range-Int-Delete.json} (83%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-FindOneAndUpdate.json => fle2v2-Range-Int-FindOneAndUpdate.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-InsertFind.json => fle2v2-Range-Int-InsertFind.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Int-Update.json => fle2v2-Range-Int-Update.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Aggregate.json => fle2v2-Range-Long-Aggregate.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Correctness.json => fle2v2-Range-Long-Correctness.json} (99%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Delete.json => fle2v2-Range-Long-Delete.json} (83%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-FindOneAndUpdate.json => fle2v2-Range-Long-FindOneAndUpdate.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-InsertFind.json => fle2v2-Range-Long-InsertFind.json} (88%) rename test/client-side-encryption/spec/legacy/{fle2-Range-Long-Update.json => fle2v2-Range-Long-Update.json} (85%) rename test/client-side-encryption/spec/legacy/{fle2-Range-WrongType.json => fle2v2-Range-WrongType.json} (95%) rename test/client-side-encryption/spec/legacy/{fle2-Update.json => fle2v2-Update.json} (87%) rename test/client-side-encryption/spec/legacy/{fle2-validatorAndPartialFieldExpression.json => fle2v2-validatorAndPartialFieldExpression.json} (93%) create mode 100644 test/client-side-encryption/spec/legacy/timeoutMS.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 3c0419f401..19830b09ac 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,9 +8,14 @@ Changes in Version 4.4 or keys to :meth:`~pymongo.collection.Collection.create_index`. - pymongocrypt 1.5.0 or later is now required for client side field level encryption support. -- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code + or Visual Studio. - Improved support for type-checking with MyPy "strict" mode (`--strict`). - Added support for Python 3.11. +- pymongocrypt 1.6.0 or later is now required for Client Side Field Level Encryption (CSFLE) + and Queryable Encryption (QE) support. MongoDB Server 7.0 introduced a backwards breaking + change to the QE protocol. Users taking advantage of the QE beta must now upgrade to + MongoDB 7.0+ and PyMongo 4.4+. Issues Resolved ............... diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 9978cb6e36..57c1a84b0f 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -364,13 +364,13 @@ data key and create a collection with the Automatic Queryable Encryption (Beta) ````````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 6.0 Enterprise to preview the capability. +You must have MongoDB 7.0 Enterprise to preview the capability. Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: @@ -396,7 +396,6 @@ Automatic encryption in Queryable Encryption is configured with an ``encrypted_f encrypted_fields_map = { "default.encryptedCollection": { "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", "ecocCollection": "encryptedCollection.ecoc", "fields": [ { @@ -429,7 +428,7 @@ automatically encrypted and decrypted. Explicit Queryable Encryption (Beta) ```````````````````````````````````` -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB >=6.0. +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, @@ -487,7 +486,6 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: encrypted_fields = { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/pymongo/collection.py b/pymongo/collection.py index 0ff56d10cd..a5d3be9e05 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -43,7 +43,7 @@ from pymongo.change_stream import CollectionChangeStream from pymongo.collation import validate_collation_or_none from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name +from pymongo.common import _ecoc_coll_name, _esc_coll_name from pymongo.cursor import Cursor, RawBatchCursor from pymongo.errors import ( ConfigurationError, @@ -232,8 +232,9 @@ def __init__( if encrypted_fields: common.validate_is_mapping("encrypted_fields", encrypted_fields) opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} - self.__create(_esc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(_ecc_coll_name(encrypted_fields, name), opts, None, session) + self.__create( + _esc_coll_name(encrypted_fields, name), opts, None, session, qev2_required=True + ) self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) self.create_index([("__safeContent__", ASCENDING)], session) @@ -305,7 +306,9 @@ def _command( user_fields=user_fields, ) - def __create(self, name, options, collation, session, encrypted_fields=None): + def __create( + self, name, options, collation, session, encrypted_fields=None, qev2_required=False + ): """Sends a create command with the given options.""" cmd = SON([("create", name)]) if encrypted_fields: @@ -316,6 +319,13 @@ def __create(self, name, options, collation, session, encrypted_fields=None): options["size"] = float(options["size"]) cmd.update(options) with self._socket_for_writes(session) as sock_info: + if qev2_required and sock_info.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {sock_info.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + self._command( sock_info, cmd, diff --git a/pymongo/common.py b/pymongo/common.py index 707cf5d23f..ba861c1545 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -822,10 +822,6 @@ def _esc_coll_name(encrypted_fields, name): return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") -def _ecc_coll_name(encrypted_fields, name): - return encrypted_fields.get("eccCollection", f"enxcol_.{name}.ecc") - - def _ecoc_coll_name(encrypted_fields, name): return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") diff --git a/pymongo/database.py b/pymongo/database.py index 6a73f884c5..358b946201 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -39,7 +39,7 @@ from pymongo.change_stream import DatabaseChangeStream from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor -from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name +from pymongo.common import _ecoc_coll_name, _esc_coll_name from pymongo.errors import CollectionInvalid, InvalidName from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline @@ -394,7 +394,6 @@ def create_collection( { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { @@ -1009,7 +1008,6 @@ def drop_collection( { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { @@ -1061,9 +1059,6 @@ def drop_collection( self._drop_helper( _esc_coll_name(encrypted_fields, name), session=session, comment=comment ) - self._drop_helper( - _ecc_coll_name(encrypted_fields, name), session=session, comment=comment - ) self._drop_helper( _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment ) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4ad59d436e..4c46bf56ae 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -51,12 +51,13 @@ EncryptedCollectionError, EncryptionError, InvalidOperation, + PyMongoError, ServerSelectionTimeoutError, ) from pymongo.mongo_client import MongoClient from pymongo.network import BLOCKING_IO_ERRORS from pymongo.operations import UpdateOne -from pymongo.pool import PoolOptions, _configured_socket +from pymongo.pool import PoolOptions, _configured_socket, _raise_connection_failure from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context @@ -139,20 +140,26 @@ def kms_request(self, kms_context): ssl_context=ctx, ) host, port = parse_host(endpoint, _HTTPS_PORT) - conn = _configured_socket((host, port), opts) try: - conn.sendall(message) - while kms_context.bytes_needed > 0: - # CSOT: update timeout. - conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = conn.recv(kms_context.bytes_needed) - if not data: - raise OSError("KMS connection closed") - kms_context.feed(data) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") - finally: - conn.close() + conn = _configured_socket((host, port), opts) + try: + conn.sendall(message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data = conn.recv(kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") + finally: + conn.close() + except (PyMongoError, MongoCryptError): + raise # Propagate pymongo errors directly. + except Exception as error: + # Wrap I/O errors in PyMongo exceptions. + _raise_connection_failure((host, port), error) def collection_info(self, database, filter): """Get the collection info for a namespace. @@ -588,7 +595,6 @@ def create_encrypted_collection( { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index d8e9daad1f..0cb96d7dad 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -158,7 +158,6 @@ def __init__( { "db.encryptedCollection": { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json deleted file mode 100644 index c9ad1ffdd4..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Date.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDate", - "bsonType": "date", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$date": { - "$numberLong": "0" - } - }, - "max": { - "$date": { - "$numberLong": "200" - } - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json deleted file mode 100644 index f209536c9c..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Decimal.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDecimal", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json deleted file mode 100644 index e7634152ba..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-DecimalPrecision.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDecimalPrecision", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDecimal": "0.0" - }, - "max": { - "$numberDecimal": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json deleted file mode 100644 index 4e9e8d6d81..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Double.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDouble", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json b/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json deleted file mode 100644 index 17c725ec44..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-DoublePrecision.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json deleted file mode 100644 index 661d7395c5..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Int.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedInt", - "bsonType": "int", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberInt": "0" - }, - "max": { - "$numberInt": "200" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json b/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json deleted file mode 100644 index b36bfb2c46..0000000000 --- a/test/client-side-encryption/etc/data/encryptedFields-Range-Long.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedLong", - "bsonType": "long", - "queries": { - "queryType": "rangePreview", - "contention": { - "$numberLong": "0" - }, - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberLong": "0" - }, - "max": { - "$numberLong": "200" - } - } - } - ] -} diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json index 2364590e4c..88abe5a604 100644 --- a/test/client-side-encryption/etc/data/encryptedFields.json +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -1,7 +1,4 @@ { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -30,4 +27,4 @@ "bsonType": "string" } ] -} +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json index e19fc1e182..97a2b2d4e5 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -1,30 +1,33 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDate", - "bsonType": "date", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$date": { - "$numberLong": "0" - } - }, - "max": { - "$date": { - "$numberLong": "200" + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" } + }, + "max": { + "$date": { + "$numberLong": "200" } } } - ] + } + ] } diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json index c6d129d4ca..4d284475f4 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -1,21 +1,23 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDecimalNoPrecision", - "bsonType": "decimal", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberInt": "1" - } + "sparsity": { + "$numberLong": "1" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json index c23c3fa923..53449182b2 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -11,8 +11,11 @@ "bsonType": "decimal", "queries": { "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, "sparsity": { - "$numberInt": "1" + "$numberLong": "1" }, "min": { "$numberDecimal": "0.0" diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json index 4af6422714..b478a772d7 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -1,21 +1,23 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" }, - "path": "encryptedDoubleNoPrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - } + "sparsity": { + "$numberLong": "1" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json index c1f388219d..395a369680 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -1,30 +1,32 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" }, - "path": "encryptedDoublePrecision", - "bsonType": "double", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberDouble": "0.0" - }, - "max": { - "$numberDouble": "200.0" - }, - "precision": { - "$numberInt": "2" - } + "precision": { + "$numberInt": "2" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json index 217bf6743c..61b7082dff 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -1,27 +1,29 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" }, - "path": "encryptedInt", - "bsonType": "int", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberInt": "0" - }, - "max": { - "$numberInt": "200" - } + "max": { + "$numberInt": "200" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json index 0fb87edaef..b18b84b6e8 100644 --- a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -1,27 +1,29 @@ { - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "rangePreview", + "contention": { + "$numberLong": "0" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" }, - "path": "encryptedLong", - "bsonType": "long", - "queries": { - "queryType": "rangePreview", - "sparsity": { - "$numberLong": "1" - }, - "min": { - "$numberLong": "0" - }, - "max": { - "$numberLong": "200" - } + "max": { + "$numberLong": "200" } } - ] - } - \ No newline at end of file + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json index bd0b1c565d..18054a70cb 100644 --- a/test/client-side-encryption/spec/legacy/bypassedCommand.json +++ b/test/client-side-encryption/spec/legacy/bypassedCommand.json @@ -78,7 +78,7 @@ ] }, { - "description": "current op is not bypassed", + "description": "kill op is not bypassed", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -90,14 +90,15 @@ { "name": "runCommand", "object": "database", - "command_name": "currentOp", + "command_name": "killOp", "arguments": { "command": { - "currentOp": 1 + "killOp": 1, + "op": 1234 } }, "result": { - "errorContains": "command not supported for auto encryption: currentOp" + "errorContains": "command not supported for auto encryption: killOp" } } ] diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json similarity index 65% rename from test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json rename to test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json index b8d06e8bcd..dcc3983ae0 100644 --- a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -75,36 +73,6 @@ "masterKey": { "provider": "local" } - }, - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } } ], "tests": [ @@ -133,7 +101,7 @@ "_id": 1, "encryptedIndexed": { "$binary": { - "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", "subType": "06" } } @@ -150,7 +118,7 @@ "result": [ { "_id": 1, - "encryptedIndexed": "value123" + "encryptedIndexed": "123" } ] } @@ -176,13 +144,50 @@ "_id": 1, "encryptedIndexed": { "$binary": { - "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", "subType": "06" } } } ], - "ordered": true + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } }, "command_name": "insert" } @@ -230,39 +235,6 @@ }, "command_name": "find" } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } } ], "outcome": { @@ -276,7 +248,7 @@ "__safeContent__": [ { "$binary": { - "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", "subType": "00" } } diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json similarity index 97% rename from test/client-side-encryption/spec/legacy/fle2-Compact.json rename to test/client-side-encryption/spec/legacy/fle2v2-Compact.json index 6ca0f9ba02..e47c689bf0 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..d5b04b3ea5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,62 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json similarity index 74% rename from test/client-side-encryption/spec/legacy/fle2-CreateCollection.json rename to test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json index 7f4f38161e..819d2eec3c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -21,9 +22,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -64,7 +62,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -107,15 +105,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -149,21 +138,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -184,9 +158,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -242,12 +216,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -279,7 +247,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -322,15 +290,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -364,21 +323,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -399,6 +343,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -408,12 +355,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -460,12 +401,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -497,7 +432,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -536,14 +471,6 @@ "collection": "encryptedCollection" } }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, { "name": "assertCollectionNotExists", "object": "testRunner", @@ -580,15 +507,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -622,21 +540,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -666,12 +569,6 @@ "subType": "04", "base64": "AAAAAAAAAAAAAAAAAAAAAA==" } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } } } ] @@ -707,156 +604,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - } - ] - }, - { - "description": "encryptedFieldsMap with cyclic entries does not loop", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - }, - "default.encryptedCollection.esc": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -874,94 +621,6 @@ "command_name": "drop", "database_name": "default" } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } } ] }, @@ -974,9 +633,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1059,9 +715,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1102,7 +755,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -1145,15 +798,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1187,21 +831,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1222,9 +851,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -1278,9 +907,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1302,9 +928,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1329,7 +952,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -1372,15 +995,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1414,21 +1028,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1449,9 +1048,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -1510,9 +1109,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1548,15 +1144,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1594,9 +1181,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1618,9 +1202,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1645,7 +1226,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -1683,9 +1264,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1709,14 +1287,6 @@ "collection": "enxcol_.encryptedCollection.esc" } }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, { "name": "assertCollectionNotExists", "object": "testRunner", @@ -1744,15 +1314,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1786,21 +1347,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1821,9 +1367,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -1880,15 +1426,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -1926,9 +1463,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1950,9 +1484,6 @@ "arguments": { "collection": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -1977,7 +1508,7 @@ } }, { - "name": "assertCollectionExists", + "name": "assertCollectionNotExists", "object": "testRunner", "arguments": { "database": "default", @@ -2024,14 +1555,6 @@ "collection": "enxcol_.encryptedCollection.esc" } }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, { "name": "assertCollectionNotExists", "object": "testRunner", @@ -2059,15 +1582,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -2101,21 +1615,6 @@ "database_name": "default" } }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, { "command_started_event": { "command": { @@ -2136,9 +1635,9 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "escCollection": null, + "ecocCollection": null, + "eccCollection": null, "fields": [ { "path": "firstName", @@ -2210,7 +1709,7 @@ { "command_started_event": { "command": { - "drop": "enxcol_.encryptedCollection.ecc" + "drop": "enxcol_.encryptedCollection.ecoc" }, "command_name": "drop", "database_name": "default" @@ -2219,19 +1718,57 @@ { "command_started_event": { "command": { - "drop": "enxcol_.encryptedCollection.ecoc" + "drop": "encryptedCollection" }, "command_name": "drop", "database_name": "default" } + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } }, { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Encrypted State Collection name should follow" } } ] diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json similarity index 98% rename from test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json rename to test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json index e622d3334d..905d3c9456 100644 --- a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Delete.json index 8687127748..e4150eab8e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -179,7 +177,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -226,7 +223,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -235,12 +232,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -271,24 +268,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json rename to test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json index 42cd4bbc9c..b579979e94 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -94,9 +92,6 @@ }, "encryptedFieldsMap": { "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [] } } diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json rename to test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json index f4386483da..0a84d73650 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -17,9 +18,6 @@ "bsonType": "object" }, "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -186,7 +184,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -231,7 +228,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", "subType": "06" } } @@ -242,7 +239,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json similarity index 93% rename from test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json rename to test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json index 60820aae95..3e0905eadf 100644 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -73,10 +74,9 @@ }, "schema": { "default.default": { - "fields": [], "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc" + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json similarity index 87% rename from test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json index de1b5c5aad..4606fbb930 100644 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -186,7 +184,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -231,7 +228,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -247,7 +244,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -278,24 +274,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -446,7 +424,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -491,7 +468,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -509,7 +486,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -540,24 +516,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json rename to test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json index 84b69d7de9..c7149d1f5c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -182,7 +180,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -227,7 +224,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", "subType": "06" } } @@ -238,7 +235,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json similarity index 97% rename from test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json rename to test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json index 9b31438525..008b0c959f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json similarity index 94% rename from test/client-side-encryption/spec/legacy/fle2-MissingKey.json rename to test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json index 4210da09e4..0b7e86bca3 100644 --- a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -22,9 +23,6 @@ } ], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json similarity index 96% rename from test/client-side-encryption/spec/legacy/fle2-NoEncryption.json rename to test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json index 9d255bd493..185691d61c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json similarity index 89% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json index a35321cd35..dea821bd1e 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -216,7 +214,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -274,7 +271,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -324,7 +320,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -338,7 +334,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json index 5832e85418..9e4f525877 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json similarity index 83% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json index b5856e7620..7f4094f50c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -205,7 +203,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -263,7 +260,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -322,12 +318,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -361,24 +357,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDate": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json index a59258a466..5ec0601603 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -220,7 +218,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -278,7 +275,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -326,7 +322,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -344,7 +340,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -378,24 +373,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDate": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json similarity index 89% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json index 4357fafeea..efce1511c0 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -212,7 +210,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -270,7 +267,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -318,7 +314,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -329,7 +325,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json index fd170554f6..7f9fadcda4 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Date-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Date-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -216,7 +214,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -274,7 +271,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -326,7 +322,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -346,7 +342,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -380,24 +375,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDate": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json index 73d2cf4892..fb129392b1 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -114,7 +112,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -126,7 +124,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -185,7 +183,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -196,7 +194,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -206,7 +203,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -233,7 +230,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -244,7 +241,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +250,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -281,10 +277,10 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -298,7 +294,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -308,7 +303,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -336,7 +331,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1120,7 +1115,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json index 89b7bd3118..5120aecb7a 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -112,7 +110,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0.0" } @@ -122,7 +120,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -152,7 +150,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -163,7 +161,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -173,7 +171,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gte": { "$numberDecimal": "0.0" } @@ -186,13 +184,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -222,7 +220,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -233,7 +231,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -243,7 +241,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "1.0" } @@ -276,7 +274,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -287,7 +285,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -297,7 +295,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lt": { "$numberDecimal": "1.0" } @@ -307,7 +305,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -337,7 +335,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -348,7 +346,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -358,7 +356,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lte": { "$numberDecimal": "1.0" } @@ -371,13 +369,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -407,7 +405,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -418,7 +416,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -428,7 +426,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0.0" }, @@ -441,7 +439,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -471,7 +469,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -482,7 +480,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -492,7 +490,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -500,7 +498,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -510,7 +508,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -518,7 +516,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -548,7 +546,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -559,7 +557,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -569,7 +567,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$in": [ { "$numberDecimal": "0.0" @@ -581,7 +579,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -611,7 +609,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -622,7 +620,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -634,7 +632,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gte": { "$numberDecimal": "0.0" } @@ -651,13 +649,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -687,7 +685,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -698,7 +696,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -710,7 +708,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "1.0" } @@ -745,7 +743,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -756,7 +754,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -768,7 +766,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lt": { "$numberDecimal": "1.0" } @@ -780,7 +778,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -810,7 +808,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -821,7 +819,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -833,7 +831,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$lte": { "$numberDecimal": "1.0" } @@ -850,13 +848,13 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } }, { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -886,7 +884,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -897,7 +895,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -909,7 +907,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0.0" }, @@ -924,7 +922,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -954,7 +952,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -965,7 +963,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -977,7 +975,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -987,7 +985,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -999,7 +997,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -1009,7 +1007,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -1039,7 +1037,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -1050,7 +1048,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1.0" } } @@ -1062,7 +1060,7 @@ "pipeline": [ { "$match": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$in": [ { "$numberDecimal": "0.0" @@ -1076,7 +1074,7 @@ "result": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0.0" } } @@ -1106,7 +1104,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberInt": "0" } } @@ -1138,7 +1136,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gte": { "$numberInt": "0" } diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json similarity index 65% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json index 0463be1c69..de81159b43 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "deleteOne", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -176,7 +174,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -187,7 +185,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -197,7 +194,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -224,7 +221,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -235,7 +232,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -245,7 +241,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -272,10 +268,10 @@ "deletes": [ { "q": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -284,12 +280,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -299,7 +295,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -313,24 +309,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimal": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -345,7 +323,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json index d0e2967771..36cf91c88c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "findOneAndUpdate", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -120,7 +118,7 @@ }, "update": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "2" } } @@ -129,7 +127,7 @@ }, "result": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -187,7 +185,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -208,7 +205,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -235,7 +232,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -246,7 +243,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -256,7 +252,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -281,10 +277,10 @@ "command": { "findAndModify": "default", "query": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -292,7 +288,7 @@ }, "update": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -302,7 +298,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -312,7 +307,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -326,24 +321,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimal": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -358,7 +335,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1142,7 +1119,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json index cea03e23fe..6b5a642aa8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -122,7 +120,7 @@ "result": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -181,7 +179,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -192,7 +190,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -202,7 +199,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -229,7 +226,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -240,7 +237,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -250,7 +246,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -275,10 +271,10 @@ "command": { "find": "default", "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -289,7 +285,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -299,7 +294,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -327,7 +322,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1111,7 +1106,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json similarity index 75% rename from test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json index 2f8b991cf7..8cfb7b525b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Decimal-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Decimal-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset" ] @@ -11,9 +12,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -22,7 +20,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -91,7 +89,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "0" } } @@ -102,7 +100,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "1" } } @@ -112,7 +110,7 @@ "name": "updateOne", "arguments": { "filter": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$numberDecimal": "0" } @@ -120,7 +118,7 @@ }, "update": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$numberDecimal": "2" } } @@ -185,7 +183,7 @@ "documents": [ { "_id": 0, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -196,7 +194,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -206,7 +203,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -233,7 +230,7 @@ "documents": [ { "_id": 1, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -244,7 +241,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +250,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -283,10 +279,10 @@ "updates": [ { "q": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "CgljAAADcGF5bG9hZADZYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVjACAAAAAAeEtIU/sp7zwjA/VArYkCzkUUiRiQOmlTaVZvfUqbEp0AAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWMAIAAAAACMKx1UyNAN4yVafFht8jp4w4LNaJcLhnozfSMzDHD3owADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFYwAgAAAAAHdek2wlAQ5BQORVjudyziRTV0EKx8qbsnoDiw4HG2xaAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVjACAAAAAA3tld+twIlVWnCTXElspau5k02CwxXDjh+3u1CQtV/W4AAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWMAIAAAAABRmXIchytMNz4J439viiY5FZ1OB3AXJBkZ3udUqHpsqAADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFYwAgAAAAAJQdzGMO2s1AkdKz3ZPytivrQphUFVhvhMcXjvUGzHBDAAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVjACAAAAAA161loizzKvXS1Po/3bxeNICmKpsPSK9Q+EpkISK3jVoAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWMAIAAAAAACyakB3CZWEjbxK0u9Sflc3+EafBAQFbvBpCxKvxuEQAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFYwAgAAAAAJ43998lzKoVqWm99ZzKHJLeVscGNCVoKDvpUtt2rDI9AAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVjACAAAAAAj2h1WLr0EFEFZ31djx3BtLIbAtdE05ax54opkJo4/bQAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVjACAAAAAAqFD83E8POmo6pdqg7D+jWtngbgcV99mbQBxkbpX7ds4AAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVjACAAAAAAp0cwpAh7SiuStPwYNVvp83N5GWyWRWA7UGRxHDDj8g8AAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVjACAAAAAAJr/tEuxScrJJEMECy4itHf1y5+gO2l5sBjgM/EOLXw0AAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVjACAAAAAAeNCEn5Z3A5CgCnIgaBl2N8p0t+5rRY06vGg6ePVMaTUAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVjACAAAAAATozHsao4er/Uk5H/pKe/1JEo/3h4Pgah2RgJHXf2PeYAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVjACAAAAAAtD48LbnCkTvacfz8DvR1ixsbYceiJxJRV7tLqDUWvVwAAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVjACAAAAAAphx2eRFGSW/tm3HxilqRv5Zj5TtOy4KSGiCIm6yJANYAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVjACAAAAAAIHYCljTfUu/Ox23Nrjz2Z6cqQpn4s0sJV/SxkC9UtIEAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVjACAAAAAAGs7l0GsFNkQMcJBUuUsEOMuQupbEvi7MDcezspsCY3EAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVjACAAAAAAWnfsbdu5Qvb62rEpBR90qh+3jdYfp266dmuNPLoXys4AAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVjACAAAAAAdNxswYIMjqVta8sLC8wniMm6gOgIgG5eWTPAxItBJJ4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVjACAAAAAArWXr2Myg8T8/0eYBCUXDOsIR//zqZtlE6Lb1YZJkvYcAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVjACAAAAAAWeF6F8pBeCm27PWjQGrlyN7JHp9WqCNnwFzqZ+V51t0AAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVjACAAAAAAjbsmlVrfexmULer3RtGwnOf20pF0xv4Z46OHOaIwgzEAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVjACAAAAAAsNKKan8cMW1oYFmQiyPZbepuJlDD0/CQUS21++CP16kAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVjACAAAAAAUGZ/BwF6Q9hcFBC8F5ed6RwYWJ1/OahRwljBloMgkFAAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVjACAAAAAApU9zcPAQ2xOspmwhjWN90v2m2clQ4rk7WBmm/nUyOHQAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVjACAAAAAAl/hDhauhBkda/I/kLr/XLxyso2guawC2isj+tDQhfMoAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVjACAAAAAAVKbMdxvkfCBoqJCUVjLrtKF90a80LocckdFc7LvYuWIAAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVjACAAAAAARiC2ShVCKMjjiqB6xg+jDIYJNhhWqmceO07EC8rEXkoAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVjACAAAAAAWU2Vw2Rta8oHxPPfOWQa86tjYWndb0E3RNlZ9gthHt0AAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVjACAAAAAAqcsCyJuLvxG8STrUI1Ccaga760+4fbmIPc6bXodea0gAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVjACAAAAAA47u7VuqtYJ4jt5CcGrUacopqApwpqEpIpkalvCRtul0AAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVjACAAAAAAvTQRtEYdIj2+bfJNamvrgWRoBsYYjMQx9MyuFwSfKmYAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVjACAAAAAA4eLwHqiSgM1pMqjK3QFgYauOnI3emnlOK3rgQPuec4AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVjACAAAAAAxA7XR9Cp6671TiZesvqE/EPKVPXfGDaYuDnce+MaZw0AAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVjACAAAAAA9F9Ztm39rTapXoeRmzk6eUoa3MDPatwoQD5PvD+3xGEAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVjACAAAAAArsw2CrSC2y02EiWJxViVxlYRIXtWARpUTwO/mfqS7O4AAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVjACAAAAAAaVoGxV5YH4urmcw3Zocg+QHoq+hA0u6nIkconI0MtZoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVjACAAAAAAAI3iOqWSA+DpgNr/neIRpt85Q8pLS/81ZKDnbC4io/0AAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVjACAAAAAAIZqKrUcbtze/K3CUKdchuVXQYkgj6jQfIeMZ3GrtYeEAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVjACAAAAAAQ4K5cDGLM/2L7KbMt537tsG1eiWrboKMj6pXVfBDIBoAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVjACAAAAAAe0oLBk+ZaipcPgjjHvACAyA42cLY8i+s+V0EsarulZkAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVjACAAAAAAAeXpcv0oCp9iY/U81w+WJjFjGn60KLQEURJd/pSoaJgAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVjACAAAAAAmSJiRfjmuwq5cYfadeYQRkPqbGYYrMNTQeyhBEAYClEAAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVjACAAAAAAUHZ9EwBIvDI4PUeCOzZCuVN9g9h5Zvq2mSpmejPLBE8AAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVjACAAAAAAvz1koQRh+weAUstLWz/iHxFrLReN4KtQjdEU/zrKOFsAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVjACAAAAAARL0k3Uwe7OUwTejHMoNwK+twNHQznmo1vVUZaH7h8BIAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVjACAAAAAA93/qZqyku1BJePH5hmMWsMfuMlF7TOFZp7z2wLJNdeYAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVjACAAAAAARUbN9bbz8Balyw8pmBLg3L5dy3aQ9s8IgqpVLeQzTkUAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVjACAAAAAAnJXYvnjljaE6ajRF+h90FXzJ9EJY26D0oG9t61ZMzqgAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVjACAAAAAAbFS6SmTSDF3AsPceQrRwMMRoAnzL/k5avSHlIji5zYIAAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVjACAAAAAAoIvo/iOb/eXJv7w4KSQo74DOyO4uAIO09Ba7JZw9ousAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVjACAAAAAAlCTphL00SYotztwI+txbHAFTusjh1nNbiya24rM1dUEAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVjACAAAAAA431CBTW4s3IQde024W2cz5iEnj8EJ7p07esepoVDxmQAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVjACAAAAAAywuLdjJJuXewOXMAr25GMBEif8+P74+pGaENUn+XIB8AAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVjACAAAAAAvwntX1RgeNfp5oiD+r3sAKIbMGy1yEblODDYWec1xuIAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVjACAAAAAAKW8QTERGXwh6fE4gJsULcmu80B4BMkMaZR9USn/C6RgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVjACAAAAAAmVcLt9hf9/q+2O3K4cvFAsvnCsSofHZ5iTzIaR5YKFIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVjACAAAAAAYJOnAwVW0gPl1jkCxAL1ZAhKkJ1+ShfwyaIaipflCegAAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVjACAAAAAA1JYhGDDvtf8b2Pup5jdzQmy325tGo27n/5thjZ/GI4sAAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVjACAAAAAAhoVLxo0hXbxAzz2BWbeRxAzk08TbVuHxJkdsbCISswkAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVjACAAAAAAwLIixR/svwADNhtiOZ9mDF2rUqEvuPx2tUSQbhOpersAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVjACAAAAAAKMVXumaH5QOt5WVHFip9xWx9e5I/5Y9uEO0UAX6dy7QAAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVjACAAAAAAezZExYLAbjyMsVuSrWGQ+LsXYEcp6AFMANkQ496mTXcAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVjACAAAAAAQ011358xXtVDOyGPpF2+zGoMdiQ8PGCB1QfDcluYUF4AAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVjACAAAAAAWHAiQNo6HBkIhf72fVQxJLaCtNeTqn2OuMmYZjT7PRkAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVjACAAAAAAbfZdoqMQ71hNIsycNHl6JpSmAFDPSfgzdWkGqFZNoScAAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVjACAAAAAA4byh4Re83zUZxvEH4iJSsnFei+ohsWxjSpB5YoAPawIAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVjACAAAAAAx9wkqPvtzrL9EFDmFvktI129ti8IAdHn0oudubGPWAAAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFYwAgAAAAAITXOaLyGsYLUvOemQXxvnAHBwM/t3sMwfQus2aGoII+AAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWMAIAAAAABzRshE3O7JzVjSGQSabvFXpen8sGUCAyr8hIIevROrjgADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFYwAgAAAAAA92s3DQ3aIVOrRzusqU+xdK7cnQGCDJiyLF+su3F1ZDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWMAIAAAAABR3y/ZE3VZqBhqfUttX4bzW9wkpDoexfYZIG2fMwftcQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FYwAgAAAAANTp7TwMatTSeLH3GvsrB3IOvJo+0B8HRGUFcRzJVesdAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWMAIAAAAACVl3ds8xRdGFPAtZ+WtC4ojC1q5OzB6dSJoOLaJsdhhgADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFYwAgAAAAANSmECEsZgXBpoAzAIwaU3H0K/6HIutCn+ELRGBFzykkAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWMAIAAAAACKzGyyRT/3KSmGlKMHpswxj7pNu3rCmOETZ2DUhQsCBgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFYwAgAAAAAKeagivKdHk7wAFEPHyDhsm9mDNWH5UPB+oIIlmfbzSOAAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWMAIAAAAADH8s64PC+lo8Ul4oujBlb/irtJSwu12V0HbvNzj/9qUQADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFYwAgAAAAANeYm2aAT9f6T9uwQyH4sw2DUQyKTpo/CHt+Bb67ao4TAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWMAIAAAAAASUKCgxGu2U42qfKiyMimLPiZBMurf5v8lQJUSrhAWIAADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFYwAgAAAAAHuM0gIDED47c9rdQ4MVtVzpVfn5zTF7eokO4lew4EM+AAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWMAIAAAAAD6LppOeSiLHFI7EOBAz9pZPKU7LWbRWIINTlqw4zmJ5gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FYwAgAAAAADX1qVlsvYru6YDBMtmlQoIBhLSbVykBtft8wn/A+ohJAAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWMAIAAAAAD+RiUk9IWfaDWXtzLp2NX/vCCeU9amFnQgpJjy56dQXQADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FYwAgAAAAAMwY/KvSIOLoSHOq4TQ1V9ZSCPZc34SdPooPL4gYXa7UAAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWMAIAAAAACpywsi9vc979ApgH3BDzph7Y4VIAkQhji97sswzdLAYwADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVjACAAAAAAEPmYjDZnMZdkaYlLZLlRgist9kCrSQe0mSuKxPwwYbQAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFYwAgAAAAAKMbsr4JCCodHmkBvfhOe7BJigKiJik95wLeu3PjLSpXAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWMAIAAAAABi0dME7AUSoeMv4yIgnGMgrX9G+Uk6HV+I9/9zzABcZwADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVjACAAAAAAagOaxIb7co+HxAXhfI9ahHLUNqmGuN/4JxeR9sm6PdkAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFYwAgAAAAAAMsWoODZkTvKSNWzloogbp/AiFFspENp49RwKjWpMLHAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWMAIAAAAADOmDHuXwsu9uwKYYSN5XIb/TG3K5DB5aci/X1ATKTaSwADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVjACAAAAAAUK1Gzlm0meDhYPL7ZgKaCD0qz7Zb5rtnPn1EEW3UZxoAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FYwAgAAAAAEA5mnfyccI1SjyYftq3qCuORs/NxzzqUA+fxn3FRjVsAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWMAIAAAAABTj7i+CxxNVkbnAkSYga/v2X3/NH3nD/FRiOo2FPkzvAADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVjACAAAAAATkNGECGUFzo34ItVnTEmxbe8dbde8QzMgEEdjMhRLz8AAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFYwAgAAAAAOHfn3eVRhSWn61AfZuAnCGleSLluJ3sF9IFGUWrCsBaAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWMAIAAAAAC3Cs/mKg+sbU/BC9QWFglOau6FWAP1sJqqSZOEOdzh+gADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVjACAAAAAA56lpvjMjFzR6NMGiSF1es1X01dwINWqaxyq3UUL8XJ0AAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFYwAgAAAAAMjJrrd6E5DyUuXkWZsAqo020Qvom6xqAluQLod5SmtdAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWMAIAAAAAD0kQXxOWWOfIzqhJk5Rnvp/h5VXcGxxLZ8HMvVVH++YQADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVjACAAAAAAT3sD7PTrAPtOtiSRrG4IKmSNr/uWgX8TvIyZpZzj4wkAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FYwAgAAAAAP4A3nd470CwvaGzyJLfzbrw7ePJV35V3Cw6tuiPFlsCAAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWMAIAAAAADi6/zM3xSBXEfpRpvEdeycRDl08SmwwLey72qgZaY7FwADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVjACAAAAAAB4Kp5ewWpFMJ2T+QCrfw2eJV70L4M+GM8khV1JwqOqQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFYwAgAAAAALDJDWkmExE4L8hf2CDNLuF+dgivUGXb7ZvBp90EALm+AAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWMAIAAAAABauth7HsI90juToIGY6149TbrxP8Emoa+5Izilj9zeWQADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVjACAAAAAAy7IAYSgbqP7CyvurHMoEq1eO08TIPNO/XcS1L5RKXHkAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFYwAgAAAAABaAwqPDgkTaz4888/3PeF64+vtHbLtAZgFgEurWT5kbAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWMAIAAAAAAVwUZIMifuPtmiFSEctLt+bOFo1T4fgGiWNsZAP0ddeQADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVjACAAAAAAIPTDniUskKj5mEzULdlKtIDquwjcqkwunJyhmBazNNcAAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFYwAgAAAAACHHCAszhxijRN9Es+XRQxXo7JQ6g2qM6f5sIdMy0VKbAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWMAIAAAAAD1mJD7i29l2xloVHwS1n0CbkyodWeFfZM1KF1r4HqNIgADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVjACAAAAAAAIUpF9mApVCMCckE7S+K9RL6TPtlTTYuzUsj9E6oIpUAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFYwAgAAAAAKwOaUTAOw6Y1F5vFrbDf8yVua5Fm6hKhkdIrzejuaatAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWMAIAAAAAASanx14WR2rmomuZ2x/nuD8j75pYzz+ZAt/v7uRcoOEQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVjACAAAAAAWhdUWP186dYAIDx6RtC0o/lzwYNvYBXm4RWFMbcU3YkAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFYwAgAAAAAPS3GDNsHgCuTgEWTHiEStoY0VMzlopZ8L7wPUquK7ayAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWMAIAAAAABmDfQAQeyeutCB+wSn4RmErwYEkYpyBeCYA+3iutFzAQADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVjACAAAAAAucFM6KF1YHEhH9vn16ox+VSqNEykMGaJzhLPsLI7qk8AAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFYwAgAAAAALxpb5qDek038/wz4E//KIrwwo0voaqwlB7cTXSQ44EHAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWMAIAAAAAC1ONheOA57vNof7iZM3b82xk1abqT0bppzjQnHCmnUIwADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVjACAAAAAAQZlvU/gzOwW2rcr5+W43Q6EkfX8oX1TpRJWEAZAULx4AAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FYwAgAAAAAFWkHkxPA28FjeUjeRILC0tX8OJcanhPP68QxM5s1kcMAAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWMAIAAAAADS3qKdAEzPtGMFpsisEhnOfF0zfxmLXjHKKaNCzX4PhQADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVjACAAAAAAxeqPjcgDwae+AgEXg3hU118dHoy6rcHd6jAJrGik+EcAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFYwAgAAAAAIFYquIAyUn3D2peptolq3s8oIgSLEIctqBYx6qEeQTxAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWMAIAAAAABBO8poBKW/RWjdHBARy3Rzghz3frQIZEPE/nSKWYlVPgADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVjACAAAAAA4JWOs1XAUgX3rGBBGEDDIC9/i9khJ+FgIcwfvj/SnS0AAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFYwAgAAAAAMRQnV65wbGlT+7Wj/HvGPLKg8NBnhkQ8Hx/MIyWvynTAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWMAIAAAAAANDuaZsI2eS6/qiSdG9pCLjcPdKS96xWUJr84V/1La7gADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVjACAAAAAAd93miIlZ/uuhzjVBt0BPjzeewxih6TSzkzbsNhpMjA8AAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFYwAgAAAAAF9OccClBu1j8BVMDmMtJyCxnhWCbg9FuY/8nhuFGJknAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWMAIAAAAAC63uwNXVbd1VP5If++NprVRsHCbomU9Iq3GxCttNAweAADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVjACAAAAAA5d2REAVBZI9rUfeqJ9jhePmRXvrGolUbMTFiC78k/OIAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFYwAgAAAAACZHzWGY3PEL4uPSKPezGGJkyScYw/Vt3bKmX5inVMaOAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWMAIAAAAABb9Y2Ox3MeHCfGnMxpR7nk6hwIJg0J8/tT8fZHwxMCBQADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVjACAAAAAA4rDk/UlRHudzR12i5Ivcb5Pxwn63JX2Sjf2xrlALL7kAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFYwAgAAAAAEac2NX3v0n87D/eUM/a3Gj+5Axs896SCidMyOMaOz6zAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWMAIAAAAABK08kOnYjDWhLJ70iVhd3XEj0Q0srEQttJTawBm6GuOwADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVjACAAAAAA/9XTbq8UVKJbIeYtfNg8pdDrpDqbCwVr5Rq3Tb4dFwwAAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FYwAgAAAAAJ3GMHBeIaCyFV5zBgcUMpjfIDcdoaTYQSyQN2shVJMWAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWMAIAAAAABKIdcHRSJ3dnevw5Pj0BfULPqmDOA7KBDREMGv5rXUEAADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVjACAAAAAABf2EvIMumZohp2E/sHWEJW4VMU3ez95hwWU9pjKGRCYAAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFYwAgAAAAAPl1vH6JaJGZSJ8au0NK58X4gqleryub8slUhA0HRtzmAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWMAIAAAAAA+rgph1avVrhyHl/6G+HtASJSiJkv7u2UPYj+25fwtLAADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVjACAAAAAAKw6WqWDrI7ZfyndtrftL3uh7BaF/FQxhkh8b0Zl547gAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFYwAgAAAAADmUpVNJUwrydIllFvI/aau1703nk6tF2sfzkgFlWsKCAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWMAIAAAAADKOab9GTEiaybo9o4J72Pb9LiQU/CuKoQm6Jtlmo/IeQADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVjACAAAAAAvJcnqBNCZgHe+Q7Cqk+yiAaIuSJIRItXEsAlKy9PES4AAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFYwAgAAAAABQqDP10DcaqEY7vxFS7U/9xcadDASONow1+xmVsPZ11AAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWMAIAAAAABxLEFq6nFVI3WaYhggo1d6oeDsBSm4wCW0T6D0yE/puwADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVjACAAAAAAiThzC5jAZ5D6xuCIsUD1aLy7KxmT2Fb8ybTkLxvj17MAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FYwAgAAAAADZpcxtBFv18EdmhxsoxlDs253tNB5zqVY/h+nFD1isIAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWMAIAAAAADSTHgALJugYsjIvSK1o0UwG6VizNokXQ/tnPNjOpGB1AAABWUAIAAAAADrmnP3kS2GpCl+gdL2da90KHTkBX46iQ/sZRoj7uPz7BJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", "subType": "06" } } @@ -294,7 +290,7 @@ }, "u": { "$set": { - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" } } @@ -306,7 +302,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -316,7 +311,7 @@ "subType": "04" } }, - "path": "encryptedDecimal", + "path": "encryptedDecimalNoPrecision", "bsonType": "decimal", "queries": { "queryType": "rangePreview", @@ -330,24 +325,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimal": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" @@ -362,7 +339,7 @@ "_id": { "$numberInt": "0" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -1146,7 +1123,7 @@ "_id": { "$numberInt": "1" }, - "encryptedDecimal": { + "encryptedDecimalNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json index a3e605d1bb..801beefe18 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -327,7 +323,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json index 9fafc243d6..b8a6953611 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json similarity index 81% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json index 3d7d359af6..1abb59bfd1 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -255,7 +252,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -313,12 +309,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -351,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimalPrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json index b1442c3a3c..8d763431fa 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -209,7 +207,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -266,7 +263,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -331,7 +327,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -364,24 +359,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimalPrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json index 3b8202ff87..5407fba18b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -203,7 +201,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -307,7 +303,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json index 3dc6631c61..e5d1a4e059 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DecimalPrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DecimalPrecision-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -315,7 +311,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -335,7 +331,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -368,24 +363,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDecimalPrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json similarity index 81% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json index 3d54be3d18..d8c9cacdcc 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -116,7 +114,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -128,7 +126,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -187,7 +185,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -208,7 +205,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -235,7 +232,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -246,7 +243,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -256,7 +252,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -283,10 +279,10 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -300,7 +296,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -310,7 +305,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -336,7 +331,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -734,7 +729,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json index b09e966324..65594bcb11 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -114,7 +112,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0.0" } @@ -124,7 +122,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -154,7 +152,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -165,7 +163,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -175,7 +173,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gte": { "$numberDouble": "0.0" } @@ -188,13 +186,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -224,7 +222,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -235,7 +233,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -245,7 +243,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "1.0" } @@ -278,7 +276,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -289,7 +287,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -299,7 +297,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lt": { "$numberDouble": "1.0" } @@ -309,7 +307,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -339,7 +337,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -350,7 +348,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -360,7 +358,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lte": { "$numberDouble": "1.0" } @@ -373,13 +371,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -409,7 +407,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -420,7 +418,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -430,7 +428,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0.0" }, @@ -443,7 +441,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -473,7 +471,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -484,7 +482,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -494,7 +492,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -502,7 +500,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -512,7 +510,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -520,7 +518,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -550,7 +548,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -561,7 +559,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -571,7 +569,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$in": [ { "$numberDouble": "0.0" @@ -583,7 +581,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -613,7 +611,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -624,7 +622,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -636,7 +634,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gte": { "$numberDouble": "0.0" } @@ -653,13 +651,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -689,7 +687,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -700,7 +698,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -712,7 +710,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "1.0" } @@ -747,7 +745,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -758,7 +756,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -770,7 +768,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lt": { "$numberDouble": "1.0" } @@ -782,7 +780,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -812,7 +810,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -823,7 +821,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -835,7 +833,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$lte": { "$numberDouble": "1.0" } @@ -852,13 +850,13 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -888,7 +886,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -899,7 +897,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -911,7 +909,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0.0" }, @@ -926,7 +924,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -956,7 +954,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -967,7 +965,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -979,7 +977,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -989,7 +987,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -1001,7 +999,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -1011,7 +1009,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -1041,7 +1039,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -1052,7 +1050,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1.0" } } @@ -1064,7 +1062,7 @@ "pipeline": [ { "$match": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$in": [ { "$numberDouble": "0.0" @@ -1078,7 +1076,7 @@ "result": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0.0" } } @@ -1108,7 +1106,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberInt": "0" } } @@ -1140,7 +1138,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gte": { "$numberInt": "0" } diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json similarity index 72% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json index fa09cb87df..392e722f1f 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "deleteOne", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -178,7 +176,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -189,7 +187,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -199,7 +196,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -226,7 +223,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -237,7 +234,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -247,7 +243,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -274,10 +270,10 @@ "deletes": [ { "q": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -286,12 +282,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -301,7 +297,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -315,24 +311,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDouble": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -345,7 +323,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json similarity index 79% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json index 59a304166b..bbcfb321f5 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "findOneAndUpdate", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -122,7 +120,7 @@ }, "update": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "2" } } @@ -131,7 +129,7 @@ }, "result": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -189,7 +187,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -200,7 +198,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -210,7 +207,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -237,7 +234,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -248,7 +245,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +254,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -283,10 +279,10 @@ "command": { "findAndModify": "default", "query": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -294,7 +290,7 @@ }, "update": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -304,7 +300,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -314,7 +309,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -328,24 +323,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDouble": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -358,7 +335,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -756,7 +733,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json similarity index 80% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json index 634230eaca..9f2c7c9911 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "find", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -124,7 +122,7 @@ "result": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -183,7 +181,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -194,7 +192,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -204,7 +201,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -231,7 +228,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -242,7 +239,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -252,7 +248,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -277,10 +273,10 @@ "command": { "find": "default", "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -291,7 +287,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -301,7 +296,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -327,7 +322,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -725,7 +720,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json similarity index 80% rename from test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json index cdc9f28e76..ce03576f88 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Double-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Double-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -24,7 +22,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -93,7 +91,7 @@ "arguments": { "document": { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "0" } } @@ -104,7 +102,7 @@ "arguments": { "document": { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "1" } } @@ -114,7 +112,7 @@ "name": "updateOne", "arguments": { "filter": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$numberDouble": "0" } @@ -122,7 +120,7 @@ }, "update": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$numberDouble": "2" } } @@ -187,7 +185,7 @@ "documents": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -208,7 +205,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -235,7 +232,7 @@ "documents": [ { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -246,7 +243,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -256,7 +252,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -285,10 +281,10 @@ "updates": [ { "q": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "Cq8kAAADcGF5bG9hZAB/JAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVjACAAAAAAPMgD8Rqnd94atKnMyPjlTwthUQ710MKJVqgtwNXLFWwAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWMAIAAAAAB7NkTFgsBuPIyxW5KtYZD4uxdgRynoAUwA2RDj3qZNdwADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFYwAgAAAAAENNdd+fMV7VQzshj6RdvsxqDHYkPDxggdUHw3JbmFBeAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVjACAAAAAA3eqduo9CWE5BePqBq7KCh5++QqXnyjCwybB15Zoeiu4AAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWMAIAAAAABYcCJA2jocGQiF/vZ9VDEktoK015OqfY64yZhmNPs9GQADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFYwAgAAAAAG32XaKjEO9YTSLMnDR5eiaUpgBQz0n4M3VpBqhWTaEnAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVjACAAAAAAZCaO06/DjOBjNrTxB8jdxM7X4XvNu9QTa2Y00kZJwgEAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWMAIAAAAADhvKHhF7zfNRnG8QfiIlKycV6L6iGxbGNKkHligA9rAgADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFYwAgAAAAAMfcJKj77c6y/RBQ5hb5LSNdvbYvCAHR59KLnbmxj1gAAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVjACAAAAAAiGdTXD22l1zDxHeF4NXUTiBnNTsdpzJRwM6riTPuOogAAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVjACAAAAAAl/wlBjSJW/hKkll8HSBCGw4Ce1pJ5rZuhVE09hKq4jQAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVjACAAAAAAKSsxERy2OBhb5MiH9/H2BET2oeU6yVihiAoWi/16FroAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVjACAAAAAACJ+VAwl9X88mjC76yzkWeL4K6AamNYjbNkpS9B6fQE4AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVjACAAAAAAZFgYh4nV8YN+/AAqe/QhKDkNBPg8KraCG7y3bOzhPV4AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVjACAAAAAARuBn3JdQ8so7Gy0XVH0CtlvtRoJWhSrJP6eRIqbyWh8AAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVjACAAAAAATxLY6SkcLPaoOBJpQsggEqoxgJgiY9seP3fBQM05PckAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVjACAAAAAA6O+wIMt3xMITuCxVrqOCNBPX5F122G+/Is+EYkzUvVYAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVjACAAAAAANv97jMd9JmZ7lMeCzK7jaZHZYsZJNl6N9g9WXzd2om0AAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVjACAAAAAAtOUZ2NeUWwESpq95O92dhqYBt8RtFnjT43E7k9mQF3oAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVjACAAAAAA5nAWj1Ek8p0MLYZEB3yoVFDfBYZ+/ZpIo71u+W9hprcAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVjACAAAAAA37oP30mTzVftI2FL9Uwyls/jqLqbmRDQk7p7nlx44uYAAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVjACAAAAAAhVIkl8p19VZ0gpg32s++Jda3qsVSVB5tV4CKrtjhE3IAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVjACAAAAAAriBex2kK//RPhyVpCYJDBng4l5w8jD3m8BF7dVAP0p8AAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVjACAAAAAAGeUzdG2kQrx5XypXJezZmPVzMYuqYZw7Bhbl4EPT0SMAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVjACAAAAAAbO+DEBY3STVMQN7CbxmHDUBYrDR+80e797u/VmiK4vcAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVjACAAAAAAHlE3RLKcXpXto7Mr8nRgzEsmhjfGh4vcgPxashCSMg4AAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVjACAAAAAA1c/sy3255NofBQrnBNSmVkSzMgsGPaaOUJShddVrnuQAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVjACAAAAAALBvQumG8m7/bzJjGWN2cHSAncdN8jMtOSmEhEhGom24AAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVjACAAAAAAGCDHuUJusGHKQ+r9nrFChmUUsRcqZKPGsRiLSk5gbFcAAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVjACAAAAAApZ6l+OrocOqgFek7WOqOP9JruTWZ+iW+5zdL3DZwzhkAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVjACAAAAAAeICKly5Xtwmfd4JFD+5e1UWpu4V3KoRim7jeBICDs+UAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVjACAAAAAA0/uh1hrAlGGna/njCVaCqBwubxkifzF0zjCDQrIJOtUAAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVjACAAAAAAAoJ4Ufdq45T9Aun5FupHlaBCIUsmUn6dXgV9KorpFikAAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVjACAAAAAAO/QFjczoznH95Iu0YEVMsU1GA1yxSGL8bcwSweNzAtkAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVjACAAAAAAUtdJucZoQvvPJiy65RonQrxBCcvtfHslpbgLbtWirCYAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVjACAAAAAAVc5njbLX1afpsuG662U/OBo4LanEk06lKbQtd95fswgAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVjACAAAAAAhNc5ovIaxgtS856ZBfG+cAcHAz+3ewzB9C6zZoaggj4AAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVjACAAAAAAc0bIRNzuyc1Y0hkEmm7xV6Xp/LBlAgMq/ISCHr0Tq44AAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVjACAAAAAAyGLbmHhe8eTQC8L2eDRcezUWULLZ9E8DPic7Mfp8/+4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVjACAAAAAAD3azcNDdohU6tHO6ypT7F0rtydAYIMmLIsX6y7cXVkMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVjACAAAAAAUd8v2RN1WagYan1LbV+G81vcJKQ6HsX2GSBtnzMH7XEAAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVjACAAAAAAHvQLJ2X4ncjVv1BsOd/jbVouRPwf4222WlGSzPyAfnsAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVjACAAAAAA1OntPAxq1NJ4sfca+ysHcg68mj7QHwdEZQVxHMlV6x0AAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVjACAAAAAAlZd3bPMUXRhTwLWflrQuKIwtauTswenUiaDi2ibHYYYAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVjACAAAAAAEEOCUo+ihRGl1kuHlabWBWUFyJPAXSXzpQB4od57cMEAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVjACAAAAAA1KYQISxmBcGmgDMAjBpTcfQr/oci60Kf4QtEYEXPKSQAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVjACAAAAAAisxsskU/9ykphpSjB6bMMY+6Tbt6wpjhE2dg1IULAgYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVjACAAAAAAnPa8PTfcaSSAdzNAC54IMTicbShbtt/cSnFHz7u7g8wAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVjACAAAAAAp5qCK8p0eTvAAUQ8fIOGyb2YM1YflQ8H6ggiWZ9vNI4AAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVjACAAAAAAx/LOuDwvpaPFJeKLowZW/4q7SUsLtdldB27zc4//alEAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVjACAAAAAA4SEPY+qEZ2dCu/9485IEVybiM3Z7szXYM+izxklNm14AAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVjACAAAAAA15ibZoBP1/pP27BDIfizDYNRDIpOmj8Ie34FvrtqjhMAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVjACAAAAAAElCgoMRrtlONqnyosjIpiz4mQTLq3+b/JUCVEq4QFiAAAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVjACAAAAAAPzOTJ088k9QdGUpZ1ubUdkw3pTGkNF8bui4a9wqeo08AAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVjACAAAAAAe4zSAgMQPjtz2t1DgxW1XOlV+fnNMXt6iQ7iV7DgQz4AAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVjACAAAAAA+i6aTnkoixxSOxDgQM/aWTylOy1m0ViCDU5asOM5ieYAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVjACAAAAAA1I5pLbnlFf/EfJlhE0RFIioDjzKrWNh4TO8H/ksrbMcAAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVjACAAAAAANfWpWWy9iu7pgMEy2aVCggGEtJtXKQG1+3zCf8D6iEkAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVjACAAAAAA/kYlJPSFn2g1l7cy6djV/7wgnlPWphZ0IKSY8uenUF0AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVjACAAAAAAyiZxtuOZQNSit4wFMJS3jnqLbDzyJ0OtDTKs6r0EO0cAAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVjACAAAAAAzBj8q9Ig4uhIc6rhNDVX1lII9lzfhJ0+ig8viBhdrtQAAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVjACAAAAAAqcsLIvb3Pe/QKYB9wQ86Ye2OFSAJEIY4ve7LMM3SwGMAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVjACAAAAAA9Kd65vKIUckNPvxl/HHNPJVnFHXyErp4qibmD7BloukAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVjACAAAAAAaFrBx0xXIryTe7V/kX+jfPHd0057x7k7MxzDAzi0RpcAAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVjACAAAAAAOl/tartywd/fJj5DNRsVH/ml9tJ8KkkCbKObsFe8lHcAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVjACAAAAAAfn/jP8K3QUILngCNkydHARyBvBHIFdaJjzV0EXsFruMAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVjACAAAAAAf0XyQQLd/HIYaf9EeAV0o2h12K1AV5piLCpZihznBXoAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVjACAAAAAAm2Ukk5kkQp+PDpBCCefQOqFKKZie4hINim3yvtypsEAAAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVjACAAAAAAnu33SngYPOtRgdJ3aBBuxWn80ti3OO6nZJcI6eh0VfQAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVjACAAAAAAUc5dxcbNYY3qMO8+Xm2xis+pH9NjPLrHqHenwDEImAEAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVjACAAAAAAnmej008JFQAgdImjk26PYvqdATjRYRufLi+vEVVwtucAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVjACAAAAAAhpNHI4+aRdWHCvZfEjlgpRBz36g05wN9p/hj3NEwc7EAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -296,7 +292,7 @@ }, "u": { "$set": { - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" } } @@ -308,7 +304,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -318,7 +313,7 @@ "subType": "04" } }, - "path": "encryptedDouble", + "path": "encryptedDoubleNoPrecision", "bsonType": "double", "queries": { "queryType": "rangePreview", @@ -332,24 +327,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDouble": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" @@ -362,7 +339,7 @@ "data": [ { "_id": 0, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ @@ -760,7 +737,7 @@ }, { "_id": 1, - "encryptedDouble": { + "encryptedDoubleNoPrecision": { "$$type": "binData" }, "__safeContent__": [ diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json index f2ea49ad75..b121c72f14 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -327,7 +323,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json index e69d912694..6b42ecfe82 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json similarity index 80% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json index d6a9c4b7e7..a5c397d0be 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -198,7 +196,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -255,7 +252,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -313,12 +309,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -351,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDoublePrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json index 0511c2e37e..b6df9463e8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -209,7 +207,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -266,7 +263,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -313,7 +309,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -331,7 +327,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -364,24 +359,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDoublePrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json similarity index 86% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json index 616101b4d4..1cea25545b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -203,7 +201,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -307,7 +303,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json similarity index 84% rename from test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json index 300202e227..7703c9057d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-DoublePrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-DoublePrecision-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -207,7 +205,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -264,7 +261,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -315,7 +311,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "CvoJAAADcGF5bG9hZADKCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVjACAAAAAAqZO+/+gRWlPaMOvuiXizSmBe7lp1VWg1vJ4UmW8o3bQAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWMAIAAAAAD4FTKJ6CTzKBAyAwZCLUoDEfnZTRZmhF1q/2hnDzmG9gADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFYwAgAAAAAHHy019aPatHTST+0wGsmukUcsQNQj6KpoS9b7iGeThAAAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVjACAAAAAAvUc1q7pyhjU0ilgmwiKkHIY3V4/LxO+Y2uT7eSpBOs8AAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWMAIAAAAACtbNc1DCoUUyzlkrYmJi4NlwOqLYmb6au4pDc8clXVXwADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFYwAgAAAAAAaqju6Dv8wqXxcsIbP67V1QGaD5kNTFofZ9Zuf1LGnKAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVjACAAAAAAQd2pWVqlmmLg8m8xbs7yLewmR0Z6UQgXofbCsMHaGSoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWMAIAAAAAAqzpfyBpr4Ano+nFWJyyTuIJelJgiRDnMHQqdeqV8JaAADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFYwAgAAAAAFhNY4qwNntyA+GIoNHZsTkIUbPgy4TBlvNnTPjp4bMFAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVjACAAAAAAFKqAqXG/ktejFQ7fM2aobO2VmEvZLXnRaJH97Jy/sJYAAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVjACAAAAAA7ty+Nif6KjS3v1zWKaHX9n4Zj3XC4ajuCduKNIYr3l8AAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVjACAAAAAABSWO0Ii+NGcsHZQ9MR5EjPXVKeXlI4FQ1pcxeKDiuooAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVjACAAAAAAKUhYSt4nvvUfbNgPJ2E79SciVZ0ZzbzoZ2nKr4ewNLsAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVjACAAAAAAzCICkPZAkfTiD0MUt155dIPgLJ4/e0qFTM2FR0U261YAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVjACAAAAAAn27H0Mpwatgc1R/4nXSRjsG2PzB0ol5YR9f3mCb2y/0AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVjACAAAAAAMinHEu4wkbeOpdZbXQ94q5o5pIEubqXUDrTRYOGmJC0AAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVjACAAAAAAvlZo8Qj3eAdxzZxN5sHKhxi+a9Npj7cZC5+pE6qrOawAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -335,7 +331,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -368,24 +363,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedDoublePrecision": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json index 536415f3fe..9c2536264d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json index 6abd773da8..58ccf3efc8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json similarity index 83% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json index 9d5bff1d19..b20b2750bb 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -195,7 +193,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -249,7 +246,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -295,7 +291,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -304,12 +300,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -339,24 +335,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedInt": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json index 4bf57700c9..f9c189ace9 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -206,7 +204,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -322,7 +318,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -352,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedInt": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json index 6f6022e749..874d4760c8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -200,7 +198,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +251,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -298,7 +294,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -309,7 +305,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json index 17d23b957f..c2b62b4d1c 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Int-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Int-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -306,7 +302,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -326,7 +322,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -356,24 +351,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedInt": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json index 3f1c723bd2..afc0f97be1 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Aggregate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -318,7 +314,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json similarity index 99% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json index 972388c6c4..cda941de8a 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Correctness.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Correctness.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json similarity index 83% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json index 89e1898406..ad344e21b4 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Delete.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -195,7 +193,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -249,7 +246,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -295,7 +291,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -304,12 +300,12 @@ "limit": 1 } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -339,24 +335,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedLong": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json index 59342a343a..d447200468 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-FindOneAndUpdate.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -206,7 +204,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -260,7 +257,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -304,7 +300,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -322,7 +318,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -352,24 +347,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedLong": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json similarity index 88% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json index 882e52170d..4eb837f28b 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-InsertFind.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -200,7 +198,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -254,7 +251,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -298,7 +294,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -309,7 +305,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json similarity index 85% rename from test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json index 92e3e390a5..3ba7f17c14 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-Long-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-Long-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -204,7 +202,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -258,7 +255,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -306,7 +302,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "CnEFAAADcGF5bG9hZABBBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVjACAAAAAAKs6X8gaa+AJ6PpxVicsk7iCXpSYIkQ5zB0KnXqlfCWgAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWMAIAAAAABYTWOKsDZ7cgPhiKDR2bE5CFGz4MuEwZbzZ0z46eGzBQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFYwAgAAAAABSqgKlxv5LXoxUO3zNmqGztlZhL2S150WiR/eycv7CWAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVjACAAAAAAfsofSP7nQHv8ic8ZW0aNlWxplS46Z+mywPR4rQk+wcgAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWMAIAAAAADu3L42J/oqNLe/XNYpodf2fhmPdcLhqO4J24o0hiveXwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFYwAgAAAAAAUljtCIvjRnLB2UPTEeRIz11Snl5SOBUNaXMXig4rqKAAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVjACAAAAAAD51NYesiO4Fo7w7iWBfqAFxEqkaDVctpvzZ28nT4SE8AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWMAIAAAAABJDtFhJ2tPbowp1UUmOCN/rqSqHRL1dtMu0c47vIlK4wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FYwAgAAAAAE6kvmXPqTnYIH4EJmNhy8OLVJZFOmdiBXLMorhELjKWAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVjACAAAAAA85AiE+bNFAYQTXQAFexgeczdVhf8FUnf16WzJlI/kmsAAAVlACAAAAAA65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+wSY20AAAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAQAAAAA=", + "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", "subType": "06" } } @@ -326,7 +322,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -356,24 +351,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedLong": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } }, "$db": "default" diff --git a/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json similarity index 95% rename from test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json rename to test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json index 9eddf1c99c..e5e9ddc821 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Range-WrongType.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Range-WrongType.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.2.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Update.json similarity index 87% rename from test/client-side-encryption/spec/legacy/fle2-Update.json rename to test/client-side-encryption/spec/legacy/fle2v2-Update.json index 090f44f9ac..14104e2cd8 100644 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Update.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -13,9 +14,6 @@ "collection_name": "default", "data": [], "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -186,7 +184,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -233,7 +230,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -246,12 +243,12 @@ } } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -282,24 +279,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, @@ -450,7 +429,6 @@ "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -497,7 +475,7 @@ "encryptedIndexed": { "$eq": { "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", "subType": "06" } } @@ -512,12 +490,12 @@ } } ], + "ordered": true, "encryptionInformation": { "type": 1, "schema": { "default.default": { "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", "ecocCollection": "enxcol_.default.ecoc", "fields": [ { @@ -548,24 +526,6 @@ } ] } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } } } }, diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json similarity index 93% rename from test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json rename to test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json index e70ca7c72d..4adf6fc07d 100644 --- a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -1,7 +1,8 @@ { "runOn": [ { - "minServerVersion": "6.0.0", + "minServerVersion": "7.0.0", + "serverless": "forbid", "topology": [ "replicaset", "sharded", @@ -29,9 +30,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -108,9 +106,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -182,9 +177,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -262,9 +254,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -345,9 +334,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -442,9 +428,6 @@ }, "encryptedFieldsMap": { "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json new file mode 100644 index 0000000000..443aa0aa23 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 3 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 20 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index 89860de0c0..6b3c9664a9 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -321,7 +321,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -503,7 +506,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -687,7 +693,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -873,7 +882,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -1055,7 +1067,10 @@ "modifiedCount": 4, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -1218,7 +1233,10 @@ "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } }, diff --git a/test/test_encryption.py b/test/test_encryption.py index 872e0356ad..af8f54cd07 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1329,9 +1329,9 @@ def test_04_aws_endpoint_invalid_port(self): "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:12345", } - with self.assertRaises(EncryptionError) as ctx: + with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345") as ctx: self.client_encryption.create_data_key("aws", master_key=master_key) - self.assertIsInstance(ctx.exception.cause, socket.error) + self.assertIsInstance(ctx.exception.cause, AutoReconnect) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): @@ -2198,7 +2198,7 @@ def test_02_add_key_alt_name(self): # https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption class TestExplicitQueryableEncryption(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") @@ -2206,9 +2206,6 @@ def setUp(self): self.key1_id = self.key1_document["_id"] self.db = self.client.test_queryable_encryption self.client.drop_database(self.db) - self.db.command("create", self.encrypted_fields["escCollection"]) - self.db.command("create", self.encrypted_fields["eccCollection"]) - self.db.command("create", self.encrypted_fields["ecocCollection"]) self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) @@ -2425,7 +2422,7 @@ def test_02_success(self): class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): # Queryable Encryption is not supported on Standalone topology. @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() @@ -2517,7 +2514,7 @@ def MongoClient(**kwargs): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#range-explicit-encryption class TestRangeQueryProse(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 2, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") @@ -2710,7 +2707,7 @@ def test_int(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") diff --git a/test/unified_format.py b/test/unified_format.py index 18130290b5..584ee04ddd 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1016,7 +1016,9 @@ def process_error(self, exception, spec): if is_timeout_error: self.assertIsInstance(exception, PyMongoError) - self.assertTrue(exception.timeout, msg=exception) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception if error_contains: if isinstance(exception, BulkWriteError): diff --git a/test/utils.py b/test/utils.py index 842e9e3a7b..b39375925c 100644 --- a/test/utils.py +++ b/test/utils.py @@ -358,17 +358,13 @@ def __getitem__(self, item): class CompareType(object): - """Class that compares equal to any object of the given type.""" + """Class that compares equal to any object of the given type(s).""" - def __init__(self, type): - self.type = type + def __init__(self, types): + self.types = types def __eq__(self, other): - return isinstance(other, self.type) - - def __ne__(self, other): - """Needed for Python 2.""" - return not self.__eq__(other) + return isinstance(other, self.types) class FunctionCallRecorder(object): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4252420909..6530f39da6 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -32,7 +32,7 @@ ) from typing import List -from bson import decode, encode +from bson import ObjectId, decode, encode from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON @@ -336,22 +336,24 @@ def _run_op(self, sessions, collection, op, in_with_transaction): if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: out = self.run_operation(sessions, collection, op.copy()) + exc = context.exception if expect_error_message(expected_result): - if isinstance(context.exception, BulkWriteError): - errmsg = str(context.exception.details).lower() + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() else: - errmsg = str(context.exception).lower() + errmsg = str(exc).lower() self.assertIn(expected_result["errorContains"].lower(), errmsg) if expect_error_code(expected_result): - self.assertEqual( - expected_result["errorCodeName"], context.exception.details.get("codeName") - ) + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) if expect_error_labels_contain(expected_result): - self.assertErrorLabelsContain( - context.exception, expected_result["errorLabelsContain"] - ) + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit(context.exception, expected_result["errorLabelsOmit"]) + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc # Reraise the exception if we're in the with_transaction # callback. @@ -427,6 +429,12 @@ def check_events(self, test, listener, session_ids): elif key not in actual: self.fail("Expected key [%s] in %r" % (key, actual)) else: + # Workaround an incorrect command started event in fle2v2-CreateCollection.yml + # added in DRIVERS-2524. + if key == "encryptedFields": + for n in ("eccCollection", "ecocCollection", "escCollection"): + if val.get(n) is None: + val.pop(n, None) self.assertEqual( val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) ) @@ -617,6 +625,13 @@ def expect_error_labels_omit(expected_result): return False +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] + + return False + + def expect_error(op): expected_result = op.get("result") return ( @@ -625,6 +640,7 @@ def expect_error(op): or expect_error_code(expected_result) or expect_error_labels_contain(expected_result) or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) ) @@ -644,6 +660,11 @@ def decode_raw(val): TYPES = { "binData": Binary, "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, } @@ -654,7 +675,11 @@ def wrap_types(val): if isinstance(val, abc.Mapping): typ = val.get("$$type") if typ: - return CompareType(TYPES[typ]) + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) d = {} for key in val: d[key] = wrap_types(val[key]) From 3f1e960c4bb97b2864beee79ec42eb1483a79a3b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 1 May 2023 12:24:18 -0500 Subject: [PATCH 0894/2111] PYTHON-3690 Do not install unittest-xml-reporting on MacOS EG Hosts (#1200) --- .evergreen/utils.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index a474ce545e..c97cc34362 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -30,7 +30,10 @@ createvirtualenv () { fi python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel unittest-xml-reporting + python -m pip install --upgrade setuptools wheel + # lxml only has wheels for macos 10.15+ + python -m pip install unittest-xml-reporting || true + } # Usage: From eb137fdf5cdf61788a9d490a095361e16b6a3f7a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Apr 2023 15:01:25 -0700 Subject: [PATCH 0895/2111] PYTHON-3686 codec_options is no longer shadowed --- .github/workflows/test-python.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index bb0b836788..b4a8177fda 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -63,8 +63,6 @@ jobs: - name: Run mypy run: | mypy --install-types --non-interactive bson gridfs tools pymongo - # Test overshadowed codec_options.py file - mypy --install-types --non-interactive bson/codec_options.py mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test python -m pip install -U typing_extensions mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py From 14e8b011c20aaf88a579c08b27a4be5e8eac0b89 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 2 May 2023 13:45:55 -0700 Subject: [PATCH 0896/2111] PYTHON-3700 Clean up docs for create_index/drop_indexes (#1201) --- pymongo/collection.py | 6 +----- pymongo/operations.py | 4 ++-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pymongo/collection.py b/pymongo/collection.py index a5d3be9e05..ac78b6878d 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2030,11 +2030,11 @@ def create_index( pairs specifying the index to create - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - arguments - `comment` (optional): A user-provided comment to attach to this command. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword + arguments. .. versionchanged:: 4.4 Allow passing a list containing (key, direction) pairs @@ -2082,14 +2082,11 @@ def drop_indexes( :Parameters: - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - arguments - `comment` (optional): A user-provided comment to attach to this command. - `**kwargs` (optional): optional arguments to the createIndexes command (like maxTimeMS) can be passed as keyword arguments. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. @@ -2100,7 +2097,6 @@ def drop_indexes( .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. - """ if comment is not None: kwargs["comment"] = comment diff --git a/pymongo/operations.py b/pymongo/operations.py index f73262074d..ad119f2ecc 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -480,10 +480,10 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: :Parameters: - `keys`: a single key or a list containing (key, direction) pairs - or keys specifying the index to create + or keys specifying the index to create. - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword - arguments + arguments. .. versionchanged:: 3.11 Added the ``hidden`` option. From d340710e3d488f3906d0e3fa4d4f25e56779fc3a Mon Sep 17 00:00:00 2001 From: Jean-Christophe Fillion-Robin Date: Wed, 3 May 2023 17:47:24 -0400 Subject: [PATCH 0897/2111] PYTHON-3703 Fix typos and add codespell pre-commit hook (#1203) Update pre-commit config adding "codespell" hook --- .pre-commit-config.yaml | 13 ++++++ bson/__init__.py | 2 +- bson/_cbsonmodule.c | 2 +- bson/objectid.py | 2 +- bson/time64.c | 4 +- doc/changelog.rst | 4 +- doc/contributors.rst | 1 + doc/examples/bulk.rst | 2 +- doc/examples/type_hints.rst | 10 ++-- gridfs/grid_file.py | 2 +- pymongo/collection.py | 6 +-- pymongo/common.py | 2 +- pymongo/message.py | 2 +- pymongo/ocsp_support.py | 2 +- pymongo/results.py | 2 +- pymongo/uri_parser.py | 4 +- .../spec/legacy/fle2v2-MissingKey.json | 4 +- test/csot/deprecated-options.json | 2 +- test/mod_wsgi_test/mod_wsgi_test.conf | 2 +- test/mypy_fails/raw_bson_document.py | 8 ++-- test/mypy_fails/typedict_client.py | 8 ++-- test/test_read_write_concern_spec.py | 2 +- test/test_typing.py | 46 +++++++++---------- 23 files changed, 73 insertions(+), 59 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d8455981f0..f19f15682c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -83,3 +83,16 @@ repos: files: \.py$ exclude: "^(test|tools)/" stages: [manual] + +- repo: https://github.com/codespell-project/codespell + rev: "v2.2.4" + hooks: + - id: codespell + # Examples of errors or updates to justify the exceptions: + # - test/test_on_demand_csfle.py:44: FLE ==> FILE + # - test/test_bson.py:1043: fo ==> of, for, to, do, go + # - test/bson_corpus/decimal128-4.json:98: Infinit ==> Infinite + # - test/test_bson.py:267: isnt ==> isn't + # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine + # - test/test_client.py:188: te ==> the, be, we, to + args: ["-L", "fle,fo,infinit,isnt,nin,te"] diff --git a/bson/__init__.py b/bson/__init__.py index 700a5d4cf8..d95c511fc7 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1282,7 +1282,7 @@ def decode_file_iter( # Read size of next object. size_data = file_obj.read(4) if not size_data: - break # Finished with file normaly. + break # Finished with file normally. elif len(size_data) != 4: raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 8678e8050b..e45a11be32 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -968,7 +968,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } } - /* No _type_marker attibute or not one of our types. */ + /* No _type_marker attribute or not one of our types. */ if (PyBool_Check(value)) { const char c = (value == Py_True) ? 0x01 : 0x00; diff --git a/bson/objectid.py b/bson/objectid.py index 4bc0243532..1fab986b8b 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -231,7 +231,7 @@ def __getstate__(self) -> bytes: def __setstate__(self, value: Any) -> None: """explicit state set from pickling""" - # Provide backwards compatability with OIDs + # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): oid = value["_ObjectId__id"] diff --git a/bson/time64.c b/bson/time64.c index 8d2886592e..a21fbb90bd 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -73,7 +73,7 @@ static const Year years_in_gregorian_cycle = 400; #define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; -/* Year range we can trust the time funcitons with */ +/* Year range we can trust the time functions with */ #define MAX_SAFE_YEAR 2037 #define MIN_SAFE_YEAR 1971 @@ -739,7 +739,7 @@ struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st in a non-leap xx00. There is one point in the cycle we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st comming out as + year. So we need to correct for Dec 31st coming out as the 366th day of the year. */ if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) diff --git a/doc/changelog.rst b/doc/changelog.rst index 19830b09ac..db2259f95f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -2805,7 +2805,7 @@ Important New Features: - The URI parser has been moved into its own module and can be used directly by application code. - AutoReconnect exception now provides information about the error that - actually occured instead of a generic failure message. + actually occurred instead of a generic failure message. - A number of new helper methods have been added with options for setting and unsetting cursor flags, re-indexing a collection, fsync and locking a server, and getting the server's current operations. @@ -2930,7 +2930,7 @@ Issues resolved - `PYTHON-186 `_: When storing integers, type is selected according to value instead of type - `PYTHON-173 `_: - as_class option is not propogated by Cursor.clone + as_class option is not propagated by Cursor.clone - `PYTHON-113 `_: Redunducy in MasterSlaveConnection diff --git a/doc/contributors.rst b/doc/contributors.rst index 7ab87f7790..7efda5b20d 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -93,3 +93,4 @@ The following is a list of people who have contributed to - Ishmum Jawad Khan (ishmum123) - Arie Bovenberg (ariebovenberg) - Ben Warner (bcwarner) +- Jean-Christophe Fillion-Robin (jcfr) diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst index c2c5acc687..3ed8e09645 100644 --- a/doc/examples/bulk.rst +++ b/doc/examples/bulk.rst @@ -80,7 +80,7 @@ of operations performed. The first write failure that occurs (e.g. duplicate key error) aborts the remaining operations, and PyMongo raises -:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of +:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attribute of the exception instance provides the execution results up until the failure occurred and details about the failure - including the operation that caused the failure. diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst index f202ab32e1..8aaaff81eb 100644 --- a/doc/examples/type_hints.rst +++ b/doc/examples/type_hints.rst @@ -264,7 +264,7 @@ Troubleshooting Client Type Annotation ~~~~~~~~~~~~~~~~~~~~~~ -If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the followig ``mypy`` error:: +If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the following ``mypy`` error:: from pymongo import MongoClient client = MongoClient() # error: Need type annotation for "client" @@ -313,10 +313,10 @@ Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocu coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) - retreived = coll.find_one({"_id": doc["_id"]}) - assert retreived is not None - assert len(retreived.raw) > 0 - retreived[ + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + assert len(retrieved.raw) > 0 + retrieved[ "foo" ] = "bar" # error: Unsupported target for indexed assignment # ("RawBSONDocument") [index] diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 50efc0cd23..5ec6352684 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -381,7 +381,7 @@ def write(self, data: Any) -> None: def writelines(self, sequence: Iterable[Any]) -> None: """Write a sequence of strings to the file. - Does not add seperators. + Does not add separators. """ for line in sequence: self.write(line) diff --git a/pymongo/collection.py b/pymongo/collection.py index ac78b6878d..91b4013ee8 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -2076,7 +2076,7 @@ def drop_indexes( ) -> None: """Drops all indexes on this collection. - Can be used on non-existant collections or collections with no indexes. + Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error. :Parameters: @@ -2112,7 +2112,7 @@ def drop_index( ) -> None: """Drops the specified index on this collection. - Can be used on non-existant collections or collections with no + Can be used on non-existent collections or collections with no indexes. Raises OperationFailure on an error (e.g. trying to drop an index that does not exist). `index_or_name` can be either an index name (as returned by `create_index`), @@ -2683,7 +2683,7 @@ def rename( if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") if new_name[0] == "." or new_name[-1] == ".": - raise InvalidName("collecion names must not start or end with '.'") + raise InvalidName("collection names must not start or end with '.'") if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") diff --git a/pymongo/common.py b/pymongo/common.py index ba861c1545..4b8aeb020c 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -469,7 +469,7 @@ def validate_document_class( raise TypeError( "%s must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping" % (option,) + "subclass of collections.MutableMapping" % (option,) ) return value diff --git a/pymongo/message.py b/pymongo/message.py index 9fa64a875a..f7a173ca8a 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1077,7 +1077,7 @@ def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf): new_message_size = buf.tell() + doc_length # Does first document exceed max_message_size? doc_too_large = idx == 0 and (new_message_size > max_message_size) - # When OP_MSG is used unacknowleged we have to check + # When OP_MSG is used unacknowledged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 3a201f1f5e..e7f4a15d84 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -312,7 +312,7 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): return 0 if not user_data.check_ocsp_endpoint: _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") - # No stapled OCSP response, checking responder URI diabled, soft fail. + # No stapled OCSP response, checking responder URI disabled, soft fail. return 1 # https://tools.ietf.org/html/rfc6960#section-3.1 ext = _get_extension(cert, _AuthorityInformationAccess) diff --git a/pymongo/results.py b/pymongo/results.py index 5803900398..b072979499 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -45,7 +45,7 @@ def acknowledged(self) -> bool: .. note:: If the :attr:`acknowledged` attribute is ``False`` all other - attibutes of this class will raise + attributes of this class will raise :class:`~pymongo.errors.InvalidOperation` when accessed. Values for other attributes cannot be determined if the write operation was unacknowledged. diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 398dfbff00..e3aeee399e 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -42,7 +42,7 @@ def _unquoted_percent(s): """Check for unescaped percent signs. - :Paramaters: + :Parameters: - `s`: A string. `s` can have things like '%25', '%2525', and '%E2%85%A8' but cannot have unquoted percent like '%foo'. """ @@ -64,7 +64,7 @@ def parse_userinfo(userinfo: str) -> Tuple[str, str]: Returns a 2-tuple containing the unescaped username followed by the unescaped password. - :Paramaters: + :Parameters: - `userinfo`: A string of the form : """ if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): diff --git a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json index 0b7e86bca3..a072454112 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -55,7 +55,7 @@ "key_vault_data": [], "tests": [ { - "description": "FLE2 encrypt fails with mising key", + "description": "FLE2 encrypt fails with missing key", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -86,7 +86,7 @@ ] }, { - "description": "FLE2 decrypt fails with mising key", + "description": "FLE2 decrypt fails with missing key", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json index 0e2bdefd73..9c9b9a2288 100644 --- a/test/csot/deprecated-options.json +++ b/test/csot/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/mod_wsgi_test/mod_wsgi_test.conf b/test/mod_wsgi_test/mod_wsgi_test.conf index 9505933e96..6a77c675d5 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.conf +++ b/test/mod_wsgi_test/mod_wsgi_test.conf @@ -27,7 +27,7 @@ WSGISocketPrefix /tmp/ WSGIProcessGroup mod_wsgi_test - # For the convienience of unittests, rather than hard-code the location of + # For the convenience of unittests, rather than hard-code the location of # mod_wsgi_test.wsgi, include it in the URL, so # http://localhost/location-of-pymongo-checkout will work: diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py index 427140dfac..0e17224874 100644 --- a/test/mypy_fails/raw_bson_document.py +++ b/test/mypy_fails/raw_bson_document.py @@ -5,9 +5,9 @@ coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) -retreived = coll.find_one({"_id": doc["_id"]}) -assert retreived is not None -assert len(retreived.raw) > 0 -retreived[ +retrieved = coll.find_one({"_id": doc["_id"]}) +assert retrieved is not None +assert len(retrieved.raw) > 0 +retrieved[ "foo" ] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py index 24dd84ee28..6619df10fd 100644 --- a/test/mypy_fails/typedict_client.py +++ b/test/mypy_fails/typedict_client.py @@ -10,9 +10,9 @@ class Movie(TypedDict): client: MongoClient[Movie] = MongoClient() coll = client.test.test -retreived = coll.find_one({"_id": "foo"}) -assert retreived is not None -assert retreived["year"] == 1 +retrieved = coll.find_one({"_id": "foo"}) +assert retrieved is not None +assert retrieved["year"] == 1 assert ( - retreived["name"] == 2 + retrieved["name"] == 2 ) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 5cc4845e32..26bc111f00 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -280,7 +280,7 @@ def run_test(self): self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) if "readConcern" in test_case: - # Any string for 'level' is equaly valid + # Any string for 'level' is equally valid read_concern = ReadConcern(**test_case["readConcern"]) self.assertEqual(read_concern.document, test_case["readConcernDocument"]) self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) diff --git a/test/test_typing.py b/test/test_typing.py index 8fc0f5a23e..0aebc707cd 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -111,11 +111,11 @@ def test_insert_find(self) -> None: coll2 = self.client.test.test2 result = self.coll.insert_one(doc) self.assertEqual(result.inserted_id, doc["_id"]) - retreived = self.coll.find_one({"_id": doc["_id"]}) - if retreived: + retrieved = self.coll.find_one({"_id": doc["_id"]}) + if retrieved: # Documents returned from find are mutable. - retreived["new_field"] = 1 - result2 = coll2.insert_one(retreived) + retrieved["new_field"] = 1 + result2 = coll2.insert_one(retrieved) self.assertEqual(result2.inserted_id, result.inserted_id) def test_cursor_iterable(self) -> None: @@ -182,9 +182,9 @@ def test_default_document_type(self) -> None: coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) - retreived = coll.find_one({"_id": doc["_id"]}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + retrieved["a"] = 1 def test_aggregate_pipeline(self) -> None: coll3 = self.client.test.test3 @@ -329,26 +329,26 @@ class TestDocumentType(unittest.TestCase): def test_default(self) -> None: client: MongoClient = MongoClient() coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 @only_type_check def test_explicit_document_type(self) -> None: client: MongoClient[Dict[str, Any]] = MongoClient() coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 @only_type_check def test_typeddict_document_type(self) -> None: client: MongoClient[Movie] = MongoClient() coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - assert retreived["year"] == 1 - assert retreived["name"] == "a" + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert retrieved["year"] == 1 + assert retrieved["name"] == "a" @only_type_check def test_typeddict_document_type_insertion(self) -> None: @@ -450,17 +450,17 @@ def test_typeddict_find_notrequired(self): def test_raw_bson_document_type(self) -> None: client = MongoClient(document_class=RawBSONDocument) coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - assert len(retreived.raw) > 0 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert len(retrieved.raw) > 0 @only_type_check def test_son_document_type(self) -> None: client = MongoClient(document_class=SON[str, Any]) coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 def test_son_document_type_runtime(self) -> None: client = MongoClient(document_class=SON[str, Any], connect=False) From 3d3e4dc2384606412448fad9103b9e4e296c4faa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 4 May 2023 18:24:14 -0700 Subject: [PATCH 0898/2111] PYTHON-3464 Add FaaS platform to handshake metadata (#1204) Truncate metadata env, os, and platform fields if needed. --- pymongo/pool.py | 111 +++++++++++++++++++- test/test_client.py | 244 +++++++++++++++++++++++++++++--------------- 2 files changed, 272 insertions(+), 83 deletions(-) diff --git a/pymongo/pool.py b/pymongo/pool.py index 6355692ac9..42e6a642a4 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -23,8 +23,9 @@ import threading import time import weakref -from typing import Any, NoReturn, Optional +from typing import Any, Dict, NoReturn, Optional +import bson from bson import DEFAULT_CODEC_OPTIONS from bson.son import SON from pymongo import __version__, _csot, auth, helpers @@ -231,6 +232,108 @@ def _set_keepalive_times(sock): ) +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> Dict[str, Any]: + env: Dict[str, Any] = {} + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations +def _truncate_metadata(metadata): + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) + + # If the first getaddrinfo call of this interpreter's life is on a thread, # while the main thread holds the import lock, getaddrinfo deadlocks trying # to import the IDNA codec. Import it here, where presumably we're on the @@ -364,6 +467,12 @@ def __init__( if driver.platform: self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) + @property def _credentials(self): """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" diff --git a/test/test_client.py b/test/test_client.py index b2f128f11a..624c460c08 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -28,6 +28,7 @@ import threading import time from typing import Iterable, Type, no_type_check +from unittest.mock import patch sys.path[0:0] = [""] @@ -113,7 +114,6 @@ class ClientUnitTest(unittest.TestCase): client: MongoClient @classmethod - @client_context.require_connection def setUpClass(cls): cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) @@ -1751,6 +1751,86 @@ def test_sigstop_sigcont(self): self.assertIn("TEST COMPLETED", log_output) self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if expected_env is not None: + metadata["env"] = expected_env + with rs_or_single_client(serverSelectionTimeoutMS=10000) as client: + client.admin.command("ping") + options = client._MongoClient__options + self.assertEqual(options.pool_options.metadata, metadata) + + def test_handshake_01_aws(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) + + def test_handshake_02_azure(self): + self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + def test_handshake_03_gcp(self): + self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + + def test_handshake_04_vercel(self): + self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) + + def test_handshake_05_multiple(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) + + def test_handshake_06_region_too_long(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) + + def test_handshake_07_memory_invalid_int(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big"}, + {"name": "aws.lambda"}, + ) + + def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" @@ -1867,6 +1947,87 @@ def test_exhaust_getmore_network_error(self): self.assertNotIn(sock_info, pool.sockets) self.assertEqual(0, pool.requests) + def test_gevent_task(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import spawn + + def poller(): + while True: + client_context.client.pymongo_test.test.insert_one({}) + + task = spawn(poller) + task.kill() + self.assertTrue(task.dead) + + def test_gevent_timeout(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import Timeout, spawn + + client = rs_or_single_client(maxPoolSize=1) + coll = client.pymongo_test.test + coll.insert_one({}) + + def contentious_task(): + # The 10 second timeout causes this test to fail without blocking + # forever if a bug like PYTHON-2334 is reintroduced. + with Timeout(10): + coll.find_one({"$where": delay(1)}) + + def timeout_task(): + with Timeout(0.5): + try: + coll.find_one({}) + except Timeout: + pass + + ct = spawn(contentious_task) + tt = spawn(timeout_task) + tt.join(15) + ct.join(15) + self.assertTrue(tt.dead) + self.assertTrue(ct.dead) + self.assertIsNone(tt.get()) + self.assertIsNone(ct.get()) + + def test_gevent_timeout_when_creating_connection(self): + if not gevent_monkey_patched(): + raise SkipTest("Must be running monkey patched by gevent") + from gevent import Timeout, spawn + + client = rs_or_single_client() + self.addCleanup(client.close) + coll = client.pymongo_test.test + pool = get_pool(client) + + # Patch the pool to delay the connect method. + def delayed_connect(*args, **kwargs): + time.sleep(3) + return pool.__class__.connect(pool, *args, **kwargs) + + pool.connect = delayed_connect + + def timeout_task(): + with Timeout(1): + try: + coll.find_one({}) + return False + except Timeout: + return True + + tt = spawn(timeout_task) + tt.join(10) + + # Assert that we got our active_sockets count back + self.assertEqual(pool.active_sockets, 0) + # Assert the greenlet is dead + self.assertTrue(tt.dead) + # Assert that the Timeout was raised all the way to the try + self.assertTrue(tt.get()) + # Unpatch the instance. + del pool.connect + class TestClientLazyConnect(IntegrationTest): """Test concurrent operations on a lazily-connecting MongoClient.""" @@ -2046,87 +2207,6 @@ def test_network_error_on_delete(self): callback = lambda client: client.db.collection.delete_many({}) self._test_network_error(callback) - def test_gevent_task(self): - if not gevent_monkey_patched(): - raise SkipTest("Must be running monkey patched by gevent") - from gevent import spawn - - def poller(): - while True: - client_context.client.pymongo_test.test.insert_one({}) - - task = spawn(poller) - task.kill() - self.assertTrue(task.dead) - - def test_gevent_timeout(self): - if not gevent_monkey_patched(): - raise SkipTest("Must be running monkey patched by gevent") - from gevent import Timeout, spawn - - client = rs_or_single_client(maxPoolSize=1) - coll = client.pymongo_test.test - coll.insert_one({}) - - def contentious_task(): - # The 10 second timeout causes this test to fail without blocking - # forever if a bug like PYTHON-2334 is reintroduced. - with Timeout(10): - coll.find_one({"$where": delay(1)}) - - def timeout_task(): - with Timeout(0.5): - try: - coll.find_one({}) - except Timeout: - pass - - ct = spawn(contentious_task) - tt = spawn(timeout_task) - tt.join(15) - ct.join(15) - self.assertTrue(tt.dead) - self.assertTrue(ct.dead) - self.assertIsNone(tt.get()) - self.assertIsNone(ct.get()) - - def test_gevent_timeout_when_creating_connection(self): - if not gevent_monkey_patched(): - raise SkipTest("Must be running monkey patched by gevent") - from gevent import Timeout, spawn - - client = rs_or_single_client() - self.addCleanup(client.close) - coll = client.pymongo_test.test - pool = get_pool(client) - - # Patch the pool to delay the connect method. - def delayed_connect(*args, **kwargs): - time.sleep(3) - return pool.__class__.connect(pool, *args, **kwargs) - - pool.connect = delayed_connect - - def timeout_task(): - with Timeout(1): - try: - coll.find_one({}) - return False - except Timeout: - return True - - tt = spawn(timeout_task) - tt.join(10) - - # Assert that we got our active_sockets count back - self.assertEqual(pool.active_sockets, 0) - # Assert the greenlet is dead - self.assertTrue(tt.dead) - # Assert that the Timeout was raised all the way to the try - self.assertTrue(tt.get()) - # Unpatch the instance. - del pool.connect - class TestClientPool(MockClientTest): @client_context.require_connection From ae83a0b8be9bbcff2d6f251fa8bf1cdf545c99b1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 8 May 2023 11:49:05 -0500 Subject: [PATCH 0899/2111] PYTHON-3570 Deprecate currentOp/collStats commands by 7.0 (#1205) --- pymongo/database.py | 4 +-- test/test_examples.py | 2 +- .../collectionData-createOptions.json | 32 ++++++++++++------- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/pymongo/database.py b/pymongo/database.py index 358b946201..1e19d860e3 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -740,10 +740,10 @@ def command( >>> db.command("buildinfo") - For a command where the value matters, like ``{collstats: + For a command where the value matters, like ``{count: collection_name}`` we can do: - >>> db.command("collstats", collection_name) + >>> db.command("count", collection_name) For commands that take additional arguments we can use kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: diff --git a/test/test_examples.py b/test/test_examples.py index 9c1adda69c..c08cb17e20 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -869,7 +869,7 @@ def test_commands(self): # End runCommand Example 1 # Start runCommand Example 2 - db.command("collStats", "restaurants") + db.command("count", "restaurants") # End runCommand Example 2 def test_index_management(self): diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index 64f8fb02ff..19edc2247b 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -49,19 +49,29 @@ "description": "collection is created with the correct options", "operations": [ { - "name": "runCommand", - "object": "database0", + "object": "collection0", + "name": "aggregate", "arguments": { - "commandName": "collStats", - "command": { - "collStats": "coll0", - "scale": 1 - } + "pipeline": [ + { + "$collStats": { + "storageStats": {} + } + }, + { + "$project": { + "capped": "$storageStats.capped", + "maxSize": "$storageStats.maxSize" + } + } + ] }, - "expectResult": { - "capped": true, - "maxSize": 4096 - } + "expectResult": [ + { + "capped": true, + "maxSize": 4096 + } + ] } ] } From 873032660bf22d09cbf0013f6b077196d0b95f40 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 8 May 2023 12:33:28 -0500 Subject: [PATCH 0900/2111] PYTHON-3708 Fix ReadTheDocs Build Failure (#1206) --- .readthedocs.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index e2956c122b..39c86fff03 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -12,9 +12,13 @@ sphinx: # Set the version of Python and requirements required to build the docs. python: - version: 3.8 install: # Install pymongo itself. - method: pip path: . - requirements: doc/docs-requirements.txt + +build: + os: ubuntu-22.04 + tools: + python: "3.11" From 2752a7dd306d9793d449f035473351dd04be6917 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 9 May 2023 20:19:57 -0500 Subject: [PATCH 0901/2111] PYTHON-3456 CSFLE/QE Naming (#1208) --- doc/changelog.rst | 6 +- doc/examples/encryption.rst | 399 ++++++++++++++++++------------------ doc/index.rst | 2 +- 3 files changed, 208 insertions(+), 199 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index db2259f95f..7b35e4cd61 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -12,9 +12,8 @@ Changes in Version 4.4 or Visual Studio. - Improved support for type-checking with MyPy "strict" mode (`--strict`). - Added support for Python 3.11. -- pymongocrypt 1.6.0 or later is now required for Client Side Field Level Encryption (CSFLE) - and Queryable Encryption (QE) support. MongoDB Server 7.0 introduced a backwards breaking - change to the QE protocol. Users taking advantage of the QE beta must now upgrade to +- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB Server 7.0 introduced a backwards breaking + change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and PyMongo 4.4+. Issues Resolved @@ -198,7 +197,6 @@ in this release. .. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 .. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 -.. _Queryable Encryption: automatic-queryable-client-side-encryption Changes in Version 4.1.1 ------------------------- diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 57c1a84b0f..2823d3f9bc 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -1,7 +1,12 @@ +.. _In-Use Encryption: + +In-Use Encryption +================= + .. _Client-Side Field Level Encryption: Client-Side Field Level Encryption -================================== +---------------------------------- New in MongoDB 4.2, client-side field level encryption allows an application to encrypt specific data fields in addition to pre-existing MongoDB @@ -359,199 +364,6 @@ data key and create a collection with the if __name__ == "__main__": main() -.. _automatic-queryable-client-side-encryption: - -Automatic Queryable Encryption (Beta) -````````````````````````````````````` - -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -You must have MongoDB 7.0 Enterprise to preview the capability. - -Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: - - import os - from bson.codec_options import CodecOptions - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption, QueryType - from pymongo.encryption_options import AutoEncryptionOpts - - - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - key_vault_namespace = "keyvault.datakeys" - key_vault_client = MongoClient() - client_encryption = ClientEncryption( - kms_providers, key_vault_namespace, key_vault_client, CodecOptions() - ) - key_vault = key_vault_client["keyvault"]["datakeys"] - key_vault.drop() - key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) - key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) - - encrypted_fields_map = { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": key1_id, - "queries": [{"queryType": "equality"}], - }, - { - "path": "lastName", - "bsonType": "string", - "keyId": key2_id, - } - ] - } - } - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - client.default.drop_collection('encryptedCollection') - coll = client.default.create_collection('encryptedCollection') - coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) - docs = list(coll.find({"firstName": "Jane"})) - print(docs) - -In the above example, the ``firstName`` and ``lastName`` fields are -automatically encrypted and decrypted. - -Explicit Queryable Encryption (Beta) -```````````````````````````````````` - -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` -methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured -using an ``encrypted_fields`` mapping, as demonstrated by the following example:: - - import os - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, AutoEncryptionOpts, - ClientEncryption, QueryType) - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # Set up the key vault (key_vault_namespace) for this example. - client = MongoClient() - key_vault = client[key_vault_db_name][key_vault_coll_name] - - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - client.codec_options) - - # Create a new data key for the encryptedField. - indexed_key_id = client_encryption.create_data_key( - 'local') - unindexed_key_id = client_encryption.create_data_key( - 'local') - - encrypted_fields = { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": indexed_key_id, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality" - } - }, - { - "keyId": unindexed_key_id, - "path": "encryptedUnindexed", - "bsonType": "string", - } - ] - } - - opts = AutoEncryptionOpts( - {"local": {"key": local_master_key}}, - key_vault.full_name, - bypass_query_analysis=True, - key_vault_client=client, - ) - - # The MongoClient used to read/write application data. - encrypted_client = MongoClient(auto_encryption_opts=opts) - encrypted_client.drop_database("test") - db = encrypted_client.test - - # Create the collection with encrypted fields. - coll = db.create_collection("coll", encryptedFields=encrypted_fields) - - # Create and encrypt an indexed and unindexed value. - val = "encrypted indexed value" - unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) - insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, - unindexed_key_id) - - # Insert the payloads. - coll.insert_one({ - "encryptedIndexed": insert_payload_indexed, - "encryptedUnindexed": insert_payload_unindexed - }) - - # Encrypt our find payload using QueryType.EQUALITY. - # The value of "data_key_id" must be the same as used to encrypt the values - # above. - find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 - ) - - # Find the document we inserted using the encrypted payload. - # The returned document is automatically decrypted. - doc = coll.find_one({"encryptedIndexed": find_payload}) - print('Returned document: %s' % (doc,)) - - # Cleanup resources. - client_encryption.close() - encrypted_client.close() - client.close() - - - if __name__ == "__main__": - main() .. _explicit-client-side-encryption: @@ -785,3 +597,202 @@ An application using Azure credentials would look like, this time using coll.insert_one({"encryptedField": "123456789"}) The driver will `acquire an access token `_ from the Azure VM. + +.. _Queryable Encryption: + +Queryable Encryption +-------------------- + +.. _automatic-queryable-client-side-encryption: + +Automatic Queryable Encryption (Beta) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +You must have MongoDB 7.0 Enterprise to preview the capability. + +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + from bson.codec_options import CodecOptions + from pymongo import MongoClient + from pymongo.encryption import Algorithm, ClientEncryption, QueryType + from pymongo.encryption_options import AutoEncryptionOpts + + + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + key_vault_namespace = "keyvault.datakeys" + key_vault_client = MongoClient() + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, CodecOptions() + ) + key_vault = key_vault_client["keyvault"]["datakeys"] + key_vault.drop() + key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) + key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) + + encrypted_fields_map = { + "default.encryptedCollection": { + "escCollection": "encryptedCollection.esc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + } + ] + } + } + + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) + client = MongoClient(auto_encryption_opts=auto_encryption_opts) + client.default.drop_collection('encryptedCollection') + coll = client.default.create_collection('encryptedCollection') + coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) + docs = list(coll.find({"firstName": "Jane"})) + print(docs) + +In the above example, the ``firstName`` and ``lastName`` fields are +automatically encrypted and decrypted. + +Explicit Queryable Encryption (Beta) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. + +Queryable Encryption is the second version of Client-Side Field Level Encryption. +Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, +which are further processed server-side. + +Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` +methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured +using an ``encrypted_fields`` mapping, as demonstrated by the following example:: + + import os + + from pymongo import MongoClient + from pymongo.encryption import (Algorithm, AutoEncryptionOpts, + ClientEncryption, QueryType) + + + def main(): + # This must be the same master key that was used to create + # the encryption key. + local_master_key = os.urandom(96) + kms_providers = {"local": {"key": local_master_key}} + + # The MongoDB namespace (db.collection) used to store + # the encryption data keys. + key_vault_namespace = "encryption.__pymongoTestKeyVault" + key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) + + # Set up the key vault (key_vault_namespace) for this example. + client = MongoClient() + key_vault = client[key_vault_db_name][key_vault_coll_name] + + # Ensure that two data keys cannot share the same keyAltName. + key_vault.drop() + key_vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}) + + client_encryption = ClientEncryption( + kms_providers, + key_vault_namespace, + # The MongoClient to use for reading/writing to the key vault. + # This can be the same MongoClient used by the main application. + client, + # The CodecOptions class used for encrypting and decrypting. + # This should be the same CodecOptions instance you have configured + # on MongoClient, Database, or Collection. + client.codec_options) + + # Create a new data key for the encryptedField. + indexed_key_id = client_encryption.create_data_key( + 'local') + unindexed_key_id = client_encryption.create_data_key( + 'local') + + encrypted_fields = { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality" + } + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + } + ] + } + + opts = AutoEncryptionOpts( + {"local": {"key": local_master_key}}, + key_vault.full_name, + bypass_query_analysis=True, + key_vault_client=client, + ) + + # The MongoClient used to read/write application data. + encrypted_client = MongoClient(auto_encryption_opts=opts) + encrypted_client.drop_database("test") + db = encrypted_client.test + + # Create the collection with encrypted fields. + coll = db.create_collection("coll", encryptedFields=encrypted_fields) + + # Create and encrypt an indexed and unindexed value. + val = "encrypted indexed value" + unindexed_val = "encrypted unindexed value" + insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) + insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, + unindexed_key_id) + + # Insert the payloads. + coll.insert_one({ + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed + }) + + # Encrypt our find payload using QueryType.EQUALITY. + # The value of "data_key_id" must be the same as used to encrypt the values + # above. + find_payload = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 + ) + + # Find the document we inserted using the encrypted payload. + # The returned document is automatically decrypted. + doc = coll.find_one({"encryptedIndexed": find_payload}) + print('Returned document: %s' % (doc,)) + + # Cleanup resources. + client_encryption.close() + encrypted_client.close() + client.close() + + + if __name__ == "__main__": + main() diff --git a/doc/index.rst b/doc/index.rst index b43f5cf580..e474d27d8f 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -26,7 +26,7 @@ everything you need to know to use **PyMongo**. Using PyMongo with TLS / SSL. :doc:`examples/encryption` - Using PyMongo with client side encryption. + Using PyMongo with In-Use Encryption. :doc:`examples/type_hints` Using PyMongo with type hints. From d504322a740701fb465d36bb7c2ff5bcb1f02557 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 9 May 2023 23:19:44 -0700 Subject: [PATCH 0902/2111] PYTHON-3694 Test with MongoDB 7.0 (#1207) --- .evergreen/config.yml | 98 +++++++++++++++++++++++++++++++------------ README.rst | 2 +- doc/changelog.rst | 1 + 3 files changed, 74 insertions(+), 27 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f102668206..8398244071 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1448,6 +1448,7 @@ tasks: VERSION: "5.0" TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-6.0-standalone" tags: ["6.0", "standalone"] commands: @@ -1475,6 +1476,33 @@ tasks: TOPOLOGY: "sharded_cluster" - func: "run tests" + - name: "test-7.0-standalone" + tags: ["7.0", "standalone"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "7.0" + TOPOLOGY: "server" + - func: "run tests" + + - name: "test-7.0-replica_set" + tags: ["7.0", "replica_set"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "7.0" + TOPOLOGY: "replica_set" + - func: "run tests" + + - name: "test-7.0-sharded_cluster" + tags: ["7.0", "sharded_cluster"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "7.0" + TOPOLOGY: "sharded_cluster" + - func: "run tests" + - name: "test-latest-standalone" tags: ["latest", "standalone"] commands: @@ -1955,14 +1983,14 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - - name: "aws-auth-test-latest" + - name: "aws-auth-test-7.0" commands: - func: "bootstrap mongo-orchestration" vars: AUTH: "auth" ORCHESTRATION_FILE: "auth-aws.json" TOPOLOGY: "server" - VERSION: "latest" + VERSION: "7.0" - func: "add aws auth variables to file" - func: "run aws auth test with regular aws credentials" - func: "run aws auth test with assume role credentials" @@ -1971,6 +1999,7 @@ tasks: - func: "run aws auth test with aws EC2 credentials" - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-rapid" commands: - func: "bootstrap mongo-orchestration" @@ -1988,6 +2017,23 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" + - name: "aws-auth-test-latest" + commands: + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + VERSION: "latest" + - func: "add aws auth variables to file" + - func: "run aws auth test with regular aws credentials" + - func: "run aws auth test with assume role credentials" + - func: "run aws auth test with aws credentials as environment variables" + - func: "run aws auth test with aws credentials and session token as environment variables" + - func: "run aws auth test with aws EC2 credentials" + - func: "run aws auth test with aws web identity credentials" + - func: "run aws ECS auth test" + - name: load-balancer-test commands: - func: "bootstrap mongo-orchestration" @@ -2319,6 +2365,10 @@ axes: display_name: "MongoDB 6.0" variables: VERSION: "6.0" + - id: "7.0" + display_name: "MongoDB 7.0" + variables: + VERSION: "7.0" - id: "latest" display_name: "MongoDB latest" variables: @@ -2568,21 +2618,6 @@ axes: batchtime: 10080 # 7 days buildvariants: -- matrix_name: "tests-all" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=3.6 with SSL. - - rhel84 - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - - matrix_name: "tests-archlinux" matrix_spec: platform: @@ -2622,6 +2657,7 @@ buildvariants: display_name: "${platform} ${auth} ${ssl}" tasks: - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2637,6 +2673,7 @@ buildvariants: display_name: "${platform} ${auth-ssl}" tasks: - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2660,6 +2697,7 @@ buildvariants: add_tasks: &encryption-server-versions - ".rapid" - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2688,6 +2726,7 @@ buildvariants: tasks: &all-server-versions - ".rapid" - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2712,8 +2751,8 @@ buildvariants: display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" tasks: - '.replica_set' - # Test standalone and sharded only on 5.0 and later. - - '.5.0' + # Test standalone and sharded only on 7.0. + - '.7.0' - matrix_name: "tests-pyopenssl-macOS" matrix_spec: @@ -2827,6 +2866,7 @@ buildvariants: tasks: - ".rapid" - ".latest" + - ".7.0" - ".6.0" - ".5.0" - ".4.4" @@ -2889,6 +2929,8 @@ buildvariants: then: add_tasks: - "test-latest-standalone" + - "test-7.0-standalone" + - "test-6.0-standalone" - "test-5.0-standalone" - "test-4.4-standalone" - "test-4.2-standalone" @@ -3028,7 +3070,7 @@ buildvariants: matrix_spec: platform: ubuntu-20.04 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] - mongodb-version: ["4.4", "5.0", "6.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" @@ -3040,7 +3082,7 @@ buildvariants: matrix_spec: platform: windows-64-vsMulti-small python-version-windows: ["3.7", "3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" @@ -3052,7 +3094,7 @@ buildvariants: - matrix_name: "ocsp-test-macos" matrix_spec: platform: macos-1014 - mongodb-version: ["4.4", "5.0", "6.0", "latest"] + mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" ssl: "ssl" display_name: "OCSP test ${platform} ${mongodb-version}" @@ -3069,9 +3111,10 @@ buildvariants: tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" - matrix_name: "aws-auth-test-mac" matrix_spec: @@ -3081,9 +3124,11 @@ buildvariants: tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" + - matrix_name: "aws-auth-test-windows" matrix_spec: platform: [windows-64-vsMulti-small] @@ -3092,14 +3137,15 @@ buildvariants: tasks: - name: "aws-auth-test-4.4" - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - name: "aws-auth-test-6.0" + - name: "aws-auth-test-7.0" - name: "aws-auth-test-rapid" + - name: "aws-auth-test-latest" - matrix_name: "load-balancer" matrix_spec: platform: rhel84 - mongodb-version: ["rapid", "latest", "6.0"] + mongodb-version: ["6.0", "7.0", "rapid", "latest"] auth-ssl: "*" python-version: "*" loadbalancer: "*" diff --git a/README.rst b/README.rst index bb409a94ff..cc2b79d842 100644 --- a/README.rst +++ b/README.rst @@ -16,7 +16,7 @@ is a `gridfs `_ implementation on top of ``pymongo``. -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, and 6.0. +PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, 6.0, and 7.0. Support / Feedback ================== diff --git a/doc/changelog.rst b/doc/changelog.rst index 7b35e4cd61..a0d73eb4de 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,7 @@ Changelog Changes in Version 4.4 ----------------------- +- Added support for MongoDB 7.0. - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. - pymongocrypt 1.5.0 or later is now required for client side field level From afd7e1c2cdeb7bf33a9e21036450ff0a56fcc39a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 11 May 2023 14:35:30 -0500 Subject: [PATCH 0903/2111] PYTHON-3460 Implement OIDC SASL mechanism (#1138) --- .evergreen/config.yml | 83 ++ .evergreen/resync-specs.sh | 3 + .evergreen/run-mongodb-oidc-test.sh | 85 ++ pymongo/auth.py | 62 +- pymongo/auth_oidc.py | 299 +++++++ pymongo/common.py | 40 +- pymongo/helpers.py | 35 + pymongo/message.py | 2 + pymongo/pool.py | 13 +- pymongo/server.py | 3 +- test/auth/{ => legacy}/connection-string.json | 129 ++- .../unified/reauthenticate_with_retry.json | 191 ++++ .../unified/reauthenticate_without_retry.json | 191 ++++ test/auth_aws/test_auth_oidc.py | 821 ++++++++++++++++++ test/test_auth_spec.py | 31 +- 15 files changed, 1970 insertions(+), 18 deletions(-) create mode 100755 .evergreen/run-mongodb-oidc-test.sh create mode 100644 pymongo/auth_oidc.py rename test/auth/{ => legacy}/connection-string.json (76%) create mode 100644 test/auth/unified/reauthenticate_with_retry.json create mode 100644 test/auth/unified/reauthenticate_without_retry.json create mode 100644 test/auth_aws/test_auth_oidc.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8398244071..3f06fc1a03 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -749,6 +749,68 @@ functions: fi PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh + "bootstrap oidc": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + script: | + ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the oidc auth test, skipping..." + exit 0 + fi + + cd ${DRIVERS_TOOLS}/.evergreen/auth_oidc + export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} + export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + export AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN} + export OIDC_TOKEN_DIR=/tmp/tokens + + . ./activate-authoidcvenv.sh + python oidc_write_orchestration.py + python oidc_get_tokens.py + + "run oidc auth test with aws credentials": + - command: shell.exec + type: test + params: + working_dir: "src" + shell: bash + script: | + ${PREPARE_SHELL} + if [ "${skip_EC2_auth_test}" = "true" ]; then + echo "This platform does not support the oidc auth test, skipping..." + exit 0 + fi + cd ${DRIVERS_TOOLS}/.evergreen/auth_oidc + mongosh setup_oidc.js + - command: shell.exec + type: test + params: + working_dir: "src" + silent: true + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_oidc.sh" + export OIDC_TOKEN_DIR=/tmp/tokens + EOF + - command: shell.exec + type: test + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + if [ "${skip_web_identity_auth_test}" = "true" ]; then + echo "This platform does not support the oidc auth test, skipping..." + exit 0 + fi + PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-oidc-test.sh + "run aws auth test with aws credentials as environment variables": - command: shell.exec type: test @@ -2034,6 +2096,19 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" + - name: "oidc-auth-test-latest" + commands: + - func: "bootstrap oidc" + - func: "bootstrap mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-oidc.json" + TOPOLOGY: "replica_set" + VERSION: "latest" + - func: "run oidc auth test with aws credentials" + vars: + AWS_WEB_IDENTITY_TOKEN_FILE: /tmp/tokens/test1 + - name: load-balancer-test commands: - func: "bootstrap mongo-orchestration" @@ -3103,6 +3178,14 @@ buildvariants: # macOS MongoDB servers do not staple OCSP responses and only support RSA. - name: ".ocsp-rsa !.ocsp-staple" +- matrix_name: "oidc-auth-test" + matrix_spec: + platform: [ ubuntu-20.04 ] + python-version: ["3.9"] + display_name: "MONGODB-OIDC Auth ${platform} ${python-version}" + tasks: + - name: "oidc-auth-test-latest" + - matrix_name: "aws-auth-test" matrix_spec: platform: [ubuntu-20.04] diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 489ff28b3a..817a2d96bc 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -70,6 +70,9 @@ for spec in "$@" do # Match the spec dir name, the python test dir name, and/or common abbreviations. case "$spec" in + auth) + cpjson auth/tests/ auth + ;; atlas-data-lake-testing|data_lake) cpjson atlas-data-lake-testing/tests/ data_lake ;; diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh new file mode 100755 index 0000000000..46bb779578 --- /dev/null +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use +# to connect to the server via MONGODB-OIDC authentication +# mechanism. +# PYTHON_BINARY The Python version to use. + +echo "Running MONGODB-OIDC authentication tests" +# ensure no secrets are printed in log files +set +x + +# load the script +shopt -s expand_aliases # needed for `urlencode` alias +[ -s "${PROJECT_DIRECTORY}/prepare_mongodb_oidc.sh" ] && source "${PROJECT_DIRECTORY}/prepare_mongodb_oidc.sh" + +MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} +MONGODB_URI_SINGLE="${MONGODB_URI}/?authMechanism=MONGODB-OIDC" +MONGODB_URI_MULTIPLE="${MONGODB_URI}:27018/?authMechanism=MONGODB-OIDC&directConnection=true" + +if [ -z "${OIDC_TOKEN_DIR}" ]; then + echo "Must specify OIDC_TOKEN_DIR" + exit 1 +fi + +export MONGODB_URI_SINGLE="$MONGODB_URI_SINGLE" +export MONGODB_URI_MULTIPLE="$MONGODB_URI_MULTIPLE" +export MONGODB_URI="$MONGODB_URI" + +echo $MONGODB_URI_SINGLE +echo $MONGODB_URI_MULTIPLE +echo $MONGODB_URI + +if [ "$ASSERT_NO_URI_CREDS" = "true" ]; then + if echo "$MONGODB_URI" | grep -q "@"; then + echo "MONGODB_URI unexpectedly contains user credentials!"; + exit 1 + fi +fi + +# show test output +set -x + +# Workaround macOS python 3.9 incompatibility with system virtualenv. +if [ "$(uname -s)" = "Darwin" ]; then + VIRTUALENV="/Library/Frameworks/Python.framework/Versions/3.9/bin/python3 -m virtualenv" +else + VIRTUALENV=$(command -v virtualenv) +fi + +authtest () { + if [ "Windows_NT" = "$OS" ]; then + PYTHON=$(cygpath -m $PYTHON) + fi + + echo "Running MONGODB-OIDC authentication tests with $PYTHON" + $PYTHON --version + + $VIRTUALENV -p $PYTHON --never-download venvoidc + if [ "Windows_NT" = "$OS" ]; then + . venvoidc/Scripts/activate + else + . venvoidc/bin/activate + fi + python -m pip install -U pip setuptools + python -m pip install '.[aws]' + python test/auth_aws/test_auth_oidc.py -v + deactivate + rm -rf venvoidc +} + +PYTHON=${PYTHON_BINARY:-} +if [ -z "$PYTHON" ]; then + echo "Cannot test without specifying PYTHON_BINARY" + exit 1 +fi + +authtest diff --git a/pymongo/auth.py b/pymongo/auth.py index 3d259335b0..4bc31ee97b 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -27,6 +27,7 @@ from bson.binary import Binary from bson.son import SON from pymongo.auth_aws import _authenticate_aws +from pymongo.auth_oidc import _authenticate_oidc, _get_authenticator, _OIDCProperties from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep @@ -48,6 +49,7 @@ [ "GSSAPI", "MONGODB-CR", + "MONGODB-OIDC", "MONGODB-X509", "MONGODB-AWS", "PLAIN", @@ -101,7 +103,7 @@ def __hash__(self): def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple.""" - if mech not in ("MONGODB-X509", "MONGODB-AWS") and user is None: + if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: raise ConfigurationError("%s requires a username." % (mech,)) if mech == "GSSAPI": if source is not None and source != "$external": @@ -137,6 +139,32 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): aws_props = _AWSProperties(aws_session_token=aws_session_token) # user can be None for temporary link-local EC2 credentials. return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "MONGODB-OIDC": + properties = extra.get("authmechanismproperties", {}) + request_token_callback = properties.get("request_token_callback") + refresh_token_callback = properties.get("refresh_token_callback", None) + provider_name = properties.get("PROVIDER_NAME", "") + default_allowed = [ + "*.mongodb.net", + "*.mongodb-dev.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", + ] + allowed_hosts = properties.get("allowed_hosts", default_allowed) + if not request_token_callback and provider_name != "aws": + raise ConfigurationError( + "authentication with MONGODB-OIDC requires providing an request_token_callback or a provider_name of 'aws'" + ) + oidc_props = _OIDCProperties( + request_token_callback=request_token_callback, + refresh_token_callback=refresh_token_callback, + provider_name=provider_name, + allowed_hosts=allowed_hosts, + ) + return MongoCredential(mech, "$external", user, passwd, oidc_props, None) + elif mech == "PLAIN": source_database = source or database or "$external" return MongoCredential(mech, source_database, user, passwd, None, None) @@ -439,7 +467,7 @@ def _authenticate_x509(credentials, sock_info): # MONGODB-X509 is done after the speculative auth step. return - cmd = _X509Context(credentials).speculate_command() + cmd = _X509Context(credentials, sock_info.address).speculate_command() sock_info.command("$external", cmd) @@ -482,6 +510,7 @@ def _authenticate_default(credentials, sock_info): "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, "PLAIN": _authenticate_plain, "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), @@ -490,15 +519,16 @@ def _authenticate_default(credentials, sock_info): class _AuthContext(object): - def __init__(self, credentials): + def __init__(self, credentials, address): self.credentials = credentials self.speculative_authenticate = None + self.address = address @staticmethod - def from_credentials(creds): + def from_credentials(creds, address): spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) if spec_cls: - return spec_cls(creds) + return spec_cls(creds, address) return None def speculate_command(self): @@ -512,8 +542,8 @@ def speculate_succeeded(self): class _ScramContext(_AuthContext): - def __init__(self, credentials, mechanism): - super(_ScramContext, self).__init__(credentials) + def __init__(self, credentials, address, mechanism): + super(_ScramContext, self).__init__(credentials, address) self.scram_data = None self.mechanism = mechanism @@ -534,16 +564,30 @@ def speculate_command(self): return cmd +class _OIDCContext(_AuthContext): + def speculate_command(self): + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.auth_start_cmd(False) + if cmd is None: + return + cmd["db"] = self.credentials.source + return cmd + + _SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { "MONGODB-X509": _X509Context, "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), } -def authenticate(credentials, sock_info): +def authenticate(credentials, sock_info, reauthenticate=False): """Authenticate sock_info.""" mechanism = credentials.mechanism auth_func = _AUTH_MAP[mechanism] - auth_func(credentials, sock_info) + if mechanism == "MONGODB-OIDC": + _authenticate_oidc(credentials, sock_info, reauthenticate) + else: + auth_func(credentials, sock_info) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py new file mode 100644 index 0000000000..530b1bb068 --- /dev/null +++ b/pymongo/auth_oidc.py @@ -0,0 +1,299 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +import os +import threading +from dataclasses import dataclass, field +from datetime import datetime, timedelta, timezone +from typing import Callable, Dict, List, Optional + +import bson +from bson.binary import Binary +from bson.son import SON +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers import _REAUTHENTICATION_REQUIRED_CODE + + +@dataclass +class _OIDCProperties: + request_token_callback: Optional[Callable[..., Dict]] + refresh_token_callback: Optional[Callable[..., Dict]] + provider_name: Optional[str] + allowed_hosts: List[str] + + +"""Mechanism properties for MONGODB-OIDC authentication.""" + +TOKEN_BUFFER_MINUTES = 5 +CALLBACK_TIMEOUT_SECONDS = 5 * 60 +CACHE_TIMEOUT_MINUTES = 60 * 5 +CALLBACK_VERSION = 0 + +_CACHE: Dict[str, "_OIDCAuthenticator"] = {} + + +def _get_authenticator(credentials, address): + # Clear out old items in the cache. + now_utc = datetime.now(timezone.utc) + to_remove = [] + for key, value in _CACHE.items(): + if value.cache_exp_utc is not None and value.cache_exp_utc < now_utc: + to_remove.append(key) + for key in to_remove: + del _CACHE[key] + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + request_cb = properties.request_token_callback + refresh_cb = properties.refresh_token_callback + + # Validate that the address is allowed. + if not properties.provider_name: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache item. + cache_key = f"{principal_name}{address[0]}{address[1]}{id(request_cb)}{id(refresh_cb)}" + _CACHE.setdefault(cache_key, _OIDCAuthenticator(username=principal_name, properties=properties)) + + return _CACHE[cache_key] + + +def _get_cache_exp(): + return datetime.now(timezone.utc) + timedelta(minutes=CACHE_TIMEOUT_MINUTES) + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + idp_info: Optional[Dict] = field(default=None) + idp_resp: Optional[Dict] = field(default=None) + reauth_gen_id: int = field(default=0) + idp_info_gen_id: int = field(default=0) + token_gen_id: int = field(default=0) + token_exp_utc: Optional[datetime] = field(default=None) + cache_exp_utc: datetime = field(default_factory=_get_cache_exp) + lock: threading.Lock = field(default_factory=threading.Lock) + + def get_current_token(self, use_callbacks=True): + properties = self.properties + + request_cb = properties.request_token_callback + refresh_cb = properties.refresh_token_callback + if not use_callbacks: + request_cb = None + refresh_cb = None + + current_valid_token = False + if self.token_exp_utc is not None: + now_utc = datetime.now(timezone.utc) + exp_utc = self.token_exp_utc + buffer_seconds = TOKEN_BUFFER_MINUTES * 60 + if (exp_utc - now_utc).total_seconds() >= buffer_seconds: + current_valid_token = True + + timeout = CALLBACK_TIMEOUT_SECONDS + + if not use_callbacks and not current_valid_token: + return None + + if not current_valid_token and request_cb is not None: + prev_token = self.idp_resp and self.idp_resp["access_token"] + with self.lock: + # See if the token was changed while we were waiting for the + # lock. + new_token = self.idp_resp and self.idp_resp["access_token"] + if new_token != prev_token: + return new_token + + refresh_token = self.idp_resp and self.idp_resp.get("refresh_token") + refresh_token = refresh_token or "" + context = dict( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=refresh_token, + ) + + if self.idp_resp is None or refresh_cb is None: + self.idp_resp = request_cb(self.idp_info, context) + elif request_cb is not None: + self.idp_resp = refresh_cb(self.idp_info, context) + cache_exp_utc = datetime.now(timezone.utc) + timedelta( + minutes=CACHE_TIMEOUT_MINUTES + ) + self.cache_exp_utc = cache_exp_utc + self.token_gen_id += 1 + + token_result = self.idp_resp + + # Validate callback return value. + if not isinstance(token_result, dict): + raise ValueError("OIDC callback returned invalid result") + + if "access_token" not in token_result: + raise ValueError("OIDC callback did not return an access_token") + + expected = ["access_token", "expires_in_seconds", "refesh_token"] + for key in token_result: + if key not in expected: + raise ValueError(f'Unexpected field in callback result "{key}"') + + token = token_result["access_token"] + + if "expires_in_seconds" in token_result: + expires_in = int(token_result["expires_in_seconds"]) + buffer_seconds = TOKEN_BUFFER_MINUTES * 60 + if expires_in >= buffer_seconds: + now_utc = datetime.now(timezone.utc) + exp_utc = now_utc + timedelta(seconds=expires_in) + self.token_exp_utc = exp_utc + + return token + + def auth_start_cmd(self, use_callbacks=True): + properties = self.properties + + # Handle aws provider credentials. + if properties.provider_name == "aws": + aws_identity_file = os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] + with open(aws_identity_file) as fid: + token = fid.read().strip() + payload = dict(jwt=token) + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", Binary(bson.encode(payload))), + ] + ) + return cmd + + principal_name = self.username + + if self.idp_info is not None: + self.cache_exp_utc = datetime.now(timezone.utc) + timedelta( + minutes=CACHE_TIMEOUT_MINUTES + ) + + if self.idp_info is None: + self.cache_exp_utc = _get_cache_exp() + + if self.idp_info is None: + # Send the SASL start with the optional principal name. + payload = dict() + + if principal_name: + payload["n"] = principal_name + + cmd = SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", Binary(bson.encode(payload))), + ("autoAuthorize", 1), + ] + ) + return cmd + + token = self.get_current_token(use_callbacks) + if not token: + return None + bin_payload = Binary(bson.encode(dict(jwt=token))) + return SON( + [ + ("saslStart", 1), + ("mechanism", "MONGODB-OIDC"), + ("payload", bin_payload), + ] + ) + + def clear(self): + self.idp_info = None + self.idp_resp = None + self.token_exp_utc = None + + def run_command(self, sock_info, cmd): + try: + return sock_info.command("$external", cmd, no_reauth=True) + except OperationFailure as exc: + self.clear() + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + if "jwt" in bson.decode(cmd["payload"]): # type:ignore[attr-defined] + if self.idp_info_gen_id > self.reauth_gen_id: + raise + return self.authenticate(sock_info, reauthenticate=True) + raise + + def authenticate(self, sock_info, reauthenticate=False): + if reauthenticate: + prev_id = getattr(sock_info, "oidc_token_gen_id", None) + # Check if we've already changed tokens. + if prev_id == self.token_gen_id: + self.reauth_gen_id = self.idp_info_gen_id + self.token_exp_utc = None + if not self.properties.refresh_token_callback: + self.clear() + + ctx = sock_info.auth_ctx + cmd = None + + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + else: + cmd = self.auth_start_cmd() + resp = self.run_command(sock_info, cmd) + + if resp["done"]: + sock_info.oidc_token_gen_id = self.token_gen_id + return + + server_resp: Dict = bson.decode(resp["payload"]) + if "issuer" in server_resp: + self.idp_info = server_resp + self.idp_info_gen_id += 1 + + conversation_id = resp["conversationId"] + token = self.get_current_token() + sock_info.oidc_token_gen_id = self.token_gen_id + bin_payload = Binary(bson.encode(dict(jwt=token))) + cmd = SON( + [ + ("saslContinue", 1), + ("conversationId", conversation_id), + ("payload", bin_payload), + ] + ) + resp = self.run_command(sock_info, cmd) + if not resp["done"]: + self.clear() + raise OperationFailure("SASL conversation failed to complete.") + return resp + + +def _authenticate_oidc(credentials, sock_info, reauthenticate): + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, sock_info.address) + return authenticator.authenticate(sock_info, reauthenticate=reauthenticate) diff --git a/pymongo/common.py b/pymongo/common.py index 4b8aeb020c..4e39c8e514 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -16,6 +16,7 @@ """Functions and classes common to multiple pymongo modules.""" import datetime +import inspect import warnings from collections import OrderedDict, abc from typing import ( @@ -416,14 +417,48 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] _MECHANISM_PROPS = frozenset( - ["SERVICE_NAME", "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN"] + [ + "SERVICE_NAME", + "CANONICALIZE_HOST_NAME", + "SERVICE_REALM", + "AWS_SESSION_TOKEN", + "PROVIDER_NAME", + ] ) def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" - value = validate_string(option, value) props: Dict[str, Any] = {} + if not isinstance(value, str): + if not isinstance(value, dict): + raise ValueError("Auth mechanism properties must be given as a string or a dictionary") + for key, value in value.items(): + if isinstance(value, str): + props[key] = value + elif isinstance(value, bool): + props[key] = str(value).lower() + elif key in ["allowed_hosts"] and isinstance(value, list): + props[key] = value + elif inspect.isfunction(value): + signature = inspect.signature(value) + if key == "request_token_callback": + expected_params = 2 + elif key == "refresh_token_callback": + expected_params = 2 + else: + raise ValueError(f"Unrecognized Auth mechanism function {key}") + if len(signature.parameters) != expected_params: + msg = f"{key} must accept {expected_params} parameters" + raise ValueError(msg) + props[key] = value + else: + raise ValueError( + "Auth mechanism property values must be strings or callback functions" + ) + return props + + value = validate_string(option, value) for opt in value.split(","): try: key, val = opt.split(":") @@ -715,6 +750,7 @@ def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeCo "password": validate_string_or_none, "server_selector": validate_is_callable_or_none, "auto_encryption_opts": validate_auto_encryption_opts_or_none, + "authoidcallowedhosts": validate_list, } # Dictionary where keys are any URI option name, and values are the diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 31325c8af2..1a753c66f4 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -68,6 +68,9 @@ ] ) +# Server code raised when re-authentication is required +_REAUTHENTICATION_REQUIRED_CODE = 391 + def _gen_index_name(keys): """Generate an index name from the set of fields it is over.""" @@ -267,3 +270,35 @@ def _handle_exception(): pass finally: del einfo + + +def _handle_reauth(func): + def inner(*args, **kwargs): + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.pool import SocketInfo + + try: + return func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a SocketInfo + # or has a socket_info attribute, so we can trigger + # a reauth. + sock_info = None + for arg in args: + if isinstance(arg, SocketInfo): + sock_info = arg + break + if hasattr(arg, "sock_info"): + sock_info = arg.sock_info + break + if sock_info: + sock_info.authenticate(reauthenticate=True) + else: + raise + return func(*args, **kwargs) + raise + + return inner diff --git a/pymongo/message.py b/pymongo/message.py index f7a173ca8a..3510d210a5 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -54,6 +54,7 @@ ProtocolError, ) from pymongo.hello import HelloCompat +from pymongo.helpers import _handle_reauth from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -909,6 +910,7 @@ def unack_write(self, cmd, request_id, msg, max_doc_size, docs): self.start_time = datetime.datetime.now() return result + @_handle_reauth def write_command(self, cmd, request_id, msg, docs): """A proxy for SocketInfo.write_command that handles event publishing.""" if self.publish: diff --git a/pymongo/pool.py b/pymongo/pool.py index 42e6a642a4..6ba1554231 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -57,6 +57,7 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat +from pymongo.helpers import _handle_reauth from pymongo.lock import _create_lock from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason from pymongo.network import command, receive_message @@ -756,7 +757,7 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): if creds: if creds.mechanism == "DEFAULT" and creds.username: cmd["saslSupportedMechs"] = creds.source + "." + creds.username - auth_ctx = auth._AuthContext.from_credentials(creds) + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) if auth_ctx: cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() else: @@ -813,6 +814,7 @@ def _next_reply(self): helpers._check_command_response(response_doc, self.max_wire_version) return response_doc + @_handle_reauth def command( self, dbname, @@ -966,17 +968,22 @@ def write_command(self, request_id, msg, codec_options): helpers._check_command_response(result, self.max_wire_version) return result - def authenticate(self): + def authenticate(self, reauthenticate=False): """Authenticate to the server if needed. Can raise ConnectionFailure or OperationFailure. """ # CMAP spec says to publish the ready event only after authenticating # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False if not self.ready: creds = self.opts._credentials if creds: - auth.authenticate(creds, self) + auth.authenticate(creds, self, reauthenticate=reauthenticate) self.ready = True if self.enabled_for_cmap: self.listeners.publish_connection_ready(self.address, self.id) diff --git a/pymongo/server.py b/pymongo/server.py index f26f473c32..16c905abb7 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -18,7 +18,7 @@ from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure -from pymongo.helpers import _check_command_response +from pymongo.helpers import _check_command_response, _handle_reauth from pymongo.message import _convert_exception, _OpMsg from pymongo.response import PinnedResponse, Response @@ -73,6 +73,7 @@ def request_check(self): """Check the server's state soon.""" self._monitor.request_check() + @_handle_reauth def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): """Run a _Query or _GetMore operation and return a Response object. diff --git a/test/auth/connection-string.json b/test/auth/legacy/connection-string.json similarity index 76% rename from test/auth/connection-string.json rename to test/auth/legacy/connection-string.json index 2a37ae8df4..ca979010af 100644 --- a/test/auth/connection-string.json +++ b/test/auth/legacy/connection-string.json @@ -444,6 +444,133 @@ "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" } } + }, + { + "description": "should recognise the mechanism and request callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with request callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism with request and refresh callback (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest", "oidcRefresh"], + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true, + "REFRESH_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism and username with request callback (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": true, + "credential": { + "username": "principalName", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "REQUEST_TOKEN_CALLBACK": true + } + } + }, + { + "description": "should recognise the mechanism with aws device (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:aws", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "PROVIDER_NAME": "aws" + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with aws device (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=PROVIDER_NAME:aws", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "PROVIDER_NAME": "aws" + } + } + }, + { + "description": "should throw an exception if username and password are specified (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRequest"], + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if username and deviceName are specified (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&PROVIDER_NAME:gcp", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if specified deviceName is not supported (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=PROVIDER_NAME:unexisted", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if neither deviceName nor callbacks specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when only refresh callback is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "callback": ["oidcRefresh"], + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "valid": false, + "credential": null } ] -} +} \ No newline at end of file diff --git a/test/auth/unified/reauthenticate_with_retry.json b/test/auth/unified/reauthenticate_with_retry.json new file mode 100644 index 0000000000..ef110562ed --- /dev/null +++ b/test/auth/unified/reauthenticate_with_retry.json @@ -0,0 +1,191 @@ +{ + "description": "reauthenticate_with_retry", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "6.3", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": true, + "retryWrites": true + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=true", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "collection0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=true", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/auth/unified/reauthenticate_without_retry.json b/test/auth/unified/reauthenticate_without_retry.json new file mode 100644 index 0000000000..6fded47634 --- /dev/null +++ b/test/auth/unified/reauthenticate_without_retry.json @@ -0,0 +1,191 @@ +{ + "description": "reauthenticate_without_retry", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "6.3", + "auth": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": { + "retryReads": false, + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "Read command should reauthenticate when receive ReauthenticationRequired error code and retryReads=false", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "collection0", + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write command should reauthenticate when receive ReauthenticationRequired error code and retryWrites=false", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/auth_aws/test_auth_oidc.py b/test/auth_aws/test_auth_oidc.py new file mode 100644 index 0000000000..470e4581c2 --- /dev/null +++ b/test/auth_aws/test_auth_oidc.py @@ -0,0 +1,821 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" + +import os +import sys +import threading +import time +import unittest +from contextlib import contextmanager +from typing import Dict + +sys.path[0:0] = [""] + +from test.utils import EventListener + +from bson import SON +from pymongo import MongoClient +from pymongo.auth_oidc import _CACHE as _oidc_cache +from pymongo.cursor import CursorType +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne + + +class TestAuthOIDC(unittest.TestCase): + uri: str + + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ["MONGODB_URI_MULTIPLE"] + cls.uri_admin = os.environ["MONGODB_URI"] + cls.token_dir = os.environ["OIDC_TOKEN_DIR"] + + def setUp(self): + self.request_called = 0 + self.refresh_called = 0 + _oidc_cache.clear() + os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] = os.path.join(self.token_dir, "test_user1") + + def create_request_cb(self, username="test_user1", expires_in_seconds=None, sleep=0): + + token_file = os.path.join(self.token_dir, username) + + def request_token(server_info, context): + # Validate the info. + self.assertIn("issuer", server_info) + self.assertIn("clientId", server_info) + + # Validate the timeout. + timeout_seconds = context["timeout_seconds"] + self.assertEqual(timeout_seconds, 60 * 5) + with open(token_file) as fid: + token = fid.read() + resp = dict(access_token=token) + + time.sleep(sleep) + + if expires_in_seconds is not None: + resp["expires_in_seconds"] = expires_in_seconds + self.request_called += 1 + return resp + + return request_token + + def create_refresh_cb(self, username="test_user1", expires_in_seconds=None): + + token_file = os.path.join(self.token_dir, username) + + def refresh_token(server_info, context): + with open(token_file) as fid: + token = fid.read() + + # Validate the info. + self.assertIn("issuer", server_info) + self.assertIn("clientId", server_info) + + # Validate the creds + self.assertIsNotNone(context["refresh_token"]) + + # Validate the timeout. + self.assertEqual(context["timeout_seconds"], 60 * 5) + + resp = dict(access_token=token) + if expires_in_seconds is not None: + resp["expires_in_seconds"] = expires_in_seconds + self.refresh_called += 1 + return resp + + return refresh_token + + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = MongoClient(self.uri_admin) + client.admin.command(cmd_on) + try: + yield + finally: + client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") + + def test_connect_callbacks_single_implicit_username(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_single_explicit_username(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_single, username="test_user1", authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_multiple_principal_user1(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient( + self.uri_multiple, username="test_user1", authmechanismproperties=props + ) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_multiple_principal_user2(self): + request_token = self.create_request_cb("test_user2") + props: Dict = dict(request_token_callback=request_token) + client = MongoClient( + self.uri_multiple, username="test_user2", authmechanismproperties=props + ) + client.test.test.find_one() + client.close() + + def test_connect_callbacks_multiple_no_username(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + with self.assertRaises(OperationFailure): + client.test.test.find_one() + client.close() + + def test_allowed_hosts_blocked(self): + request_token = self.create_request_cb() + props: Dict = dict(request_token_callback=request_token, allowed_hosts=[]) + client = MongoClient(self.uri_single, authmechanismproperties=props) + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + client.close() + + props: Dict = dict(request_token_callback=request_token, allowed_hosts=["example.com"]) + client = MongoClient( + self.uri_single + "&ignored=example.com", authmechanismproperties=props, connect=False + ) + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + client.close() + + def test_connect_aws_single_principal(self): + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_aws_multiple_principal_user1(self): + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_aws_multiple_principal_user2(self): + os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] = os.path.join(self.token_dir, "test_user2") + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_connect_aws_allowed_hosts_ignored(self): + props = dict(PROVIDER_NAME="aws", allowed_hosts=[]) + client = MongoClient(self.uri_multiple, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_valid_callbacks(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + refresh_cb = self.create_refresh_cb() + + props: Dict = dict( + request_token_callback=request_cb, + refresh_token_callback=refresh_cb, + ) + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + def test_lock_avoids_extra_callbacks(self): + request_cb = self.create_request_cb(sleep=0.5) + refresh_cb = self.create_refresh_cb() + + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + + def run_test(): + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + t1 = threading.Thread(target=run_test) + t2 = threading.Thread(target=run_test) + t1.start() + t2.start() + t1.join() + t2.join() + + self.assertEqual(self.request_called, 1) + self.assertEqual(self.refresh_called, 2) + + def test_request_callback_returns_null(self): + def request_token_null(a, b): + return None + + props: Dict = dict(request_token_callback=request_token_null) + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_refresh_callback_returns_null(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + + def refresh_token_null(a, b): + return None + + props: Dict = dict( + request_token_callback=request_cb, refresh_token_callback=refresh_token_null + ) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_request_callback_invalid_result(self): + def request_token_invalid(a, b): + return dict() + + props: Dict = dict(request_token_callback=request_token_invalid) + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def request_cb_extra_value(server_info, context): + result = self.create_request_cb()(server_info, context) + result["foo"] = "bar" + return result + + props: Dict = dict(request_token_callback=request_cb_extra_value) + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_refresh_callback_missing_data(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + + def refresh_cb_no_token(a, b): + return dict() + + props: Dict = dict( + request_token_callback=request_cb, refresh_token_callback=refresh_cb_no_token + ) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_refresh_callback_extra_data(self): + request_cb = self.create_request_cb(expires_in_seconds=60) + + def refresh_cb_extra_value(server_info, context): + result = self.create_refresh_cb()(server_info, context) + result["foo"] = "bar" + return result + + props: Dict = dict( + request_token_callback=request_cb, refresh_token_callback=refresh_cb_extra_value + ) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + client = MongoClient(self.uri_single, authMechanismProperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_cache_with_refresh(self): + # Create a new client with a request callback and a refresh callback. Both callbacks will read the contents of the ``AWS_WEB_IDENTITY_TOKEN_FILE`` location to obtain a valid access token. + + # Give a callback response with a valid accessToken and an expiresInSeconds that is within one minute. + request_cb = self.create_request_cb(expires_in_seconds=60) + refresh_cb = self.create_refresh_cb() + + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + + # Ensure that a ``find`` operation adds credentials to the cache. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + self.assertEqual(len(_oidc_cache), 1) + + # Create a new client with the same request callback and a refresh callback. + # Ensure that a ``find`` operation results in a call to the refresh callback. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + self.assertEqual(self.refresh_called, 1) + self.assertEqual(len(_oidc_cache), 1) + + def test_cache_with_no_refresh(self): + # Create a new client with a request callback callback. + # Give a callback response with a valid accessToken and an expiresInSeconds that is within one minute. + request_cb = self.create_request_cb() + + props = dict(request_token_callback=request_cb) + client = MongoClient(self.uri_single, authMechanismProperties=props) + + # Ensure that a ``find`` operation adds credentials to the cache. + self.request_called = 0 + client.test.test.find_one() + client.close() + self.assertEqual(self.request_called, 1) + self.assertEqual(len(_oidc_cache), 1) + + # Create a new client with the same request callback. + # Ensure that a ``find`` operation results in a call to the request callback. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + self.assertEqual(self.request_called, 2) + self.assertEqual(len(_oidc_cache), 1) + + def test_cache_key_includes_callback(self): + request_cb = self.create_request_cb() + + props: Dict = dict(request_token_callback=request_cb) + + # Ensure that a ``find`` operation adds a new entry to the cache. + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + # Create a new client with a different request callback. + def request_token_2(a, b): + return request_cb(a, b) + + props["request_token_callback"] = request_token_2 + client = MongoClient(self.uri_single, authMechanismProperties=props) + + # Ensure that a ``find`` operation adds a new entry to the cache. + client.test.test.find_one() + client.close() + self.assertEqual(len(_oidc_cache), 2) + + def test_cache_clears_on_error(self): + request_cb = self.create_request_cb() + + # Create a new client with a valid request callback that gives credentials that expire within 5 minutes and a refresh callback that gives invalid credentials. + def refresh_cb(a, b): + return dict(access_token="bad") + + # Add a token to the cache that will expire soon. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authMechanismProperties=props) + client.test.test.find_one() + client.close() + + # Create a new client with the same callbacks. + client = MongoClient(self.uri_single, authMechanismProperties=props) + + # Ensure that another ``find`` operation results in an error. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + client.close() + + # Ensure that the cache has been cleared. + authenticator = list(_oidc_cache.values())[0] + self.assertIsNone(authenticator.idp_info) + + def test_cache_is_not_used_in_aws_automatic_workflow(self): + # Create a new client using the AWS device workflow. + # Ensure that a ``find`` operation does not add credentials to the cache. + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_single, authmechanismproperties=props) + client.test.test.find_one() + client.close() + + # Ensure that the cache has been cleared. + authenticator = list(_oidc_cache.values())[0] + self.assertIsNone(authenticator.idp_info) + + def test_speculative_auth_success(self): + # Clear the cache + _oidc_cache.clear() + token_file = os.path.join(self.token_dir, "test_user1") + + def request_token(a, b): + with open(token_file) as fid: + token = fid.read() + return dict(access_token=token, expires_in_seconds=1000) + + # Create a client with a request callback that returns a valid token + # that will not expire soon. + props: Dict = dict(request_token_callback=request_token) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Close the client. + client.close() + + # Create a new client. + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + # Close the client. + client.close() + + def test_reauthenticate_succeeds(self): + listener = EventListener() + + # Create request and refresh callbacks that return valid credentials + # that will not expire soon. + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + listener.reset() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + client.test.test.find_one() + + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + self.assertEqual(succeeded_events, ["find"]) + self.assertEqual(failed_events, ["find"]) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_bulk_write(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_bulk_read(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform a find operation. + client.test.test.find_one() + + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + list(cursor) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_cursor(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_get_more(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + props = dict(PROVIDER_NAME="aws") + client = MongoClient(self.uri_single, authmechanismproperties=props) + hello = client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_succeeds_command(self): + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + + print("start of test") + client = MongoClient(self.uri_single, authmechanismproperties=props) + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the refresh callback has not been called. + self.assertEqual(self.refresh_called, 0) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = client.test.command(dict(count="test")) + + self.assertGreaterEqual(len(list(cursor)), 1) + + # Assert that the refresh callback has been called. + self.assertEqual(self.refresh_called, 1) + client.close() + + def test_reauthenticate_retries_and_succeeds_with_cache(self): + listener = EventListener() + + # Create request and refresh callbacks that return valid credentials + # that will not expire soon. + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands of the form + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["find", "saslStart"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Close the client. + client.close() + + def test_reauthenticate_fails_with_no_cache(self): + listener = EventListener() + + # Create request and refresh callbacks that return valid credentials + # that will not expire soon. + request_cb = self.create_request_cb() + refresh_cb = self.create_refresh_cb() + + # Create a client with the callbacks. + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client = MongoClient( + self.uri_single, event_listeners=[listener], authmechanismproperties=props + ) + + # Perform a find operation. + client.test.test.find_one() + + # Clear the cache. + _oidc_cache.clear() + + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["find", "saslStart"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + client.close() + + def test_late_reauth_avoids_callback(self): + # Step 1: connect with both clients + request_cb = self.create_request_cb(expires_in_seconds=1e6) + refresh_cb = self.create_refresh_cb(expires_in_seconds=1e6) + + props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + client1 = MongoClient(self.uri_single, authMechanismProperties=props) + client1.test.test.find_one() + client2 = MongoClient(self.uri_single, authMechanismProperties=props) + client2.test.test.find_one() + + self.assertEqual(self.refresh_called, 0) + self.assertEqual(self.request_called, 1) + + # Step 2: cause a find 391 on the first client + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client1.test.test.find_one() + + self.assertEqual(self.refresh_called, 1) + self.assertEqual(self.request_called, 1) + + # Step 3: cause a find 391 on the second client + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client2.test.test.find_one() + + self.assertEqual(self.refresh_called, 1) + self.assertEqual(self.request_called, 1) + + client1.close() + client2.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 9f2fa374ac..78f4d21929 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -22,6 +22,7 @@ sys.path[0:0] = [""] from test import unittest +from test.unified_format import generate_test_classes from pymongo import MongoClient @@ -41,7 +42,16 @@ def run_test(self): if not valid: self.assertRaises(Exception, MongoClient, uri, connect=False) else: - client = MongoClient(uri, connect=False) + props = {} + if credential: + props = credential["mechanism_properties"] or {} + if props.get("REQUEST_TOKEN_CALLBACK"): + props["request_token_callback"] = lambda x, y: 1 + del props["REQUEST_TOKEN_CALLBACK"] + if props.get("REFRESH_TOKEN_CALLBACK"): + props["refresh_token_callback"] = lambda a, b: 1 + del props["REFRESH_TOKEN_CALLBACK"] + client = MongoClient(uri, connect=False, authmechanismproperties=props) credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) @@ -70,6 +80,16 @@ def run_test(self): self.assertEqual( actual.aws_session_token, expected["AWS_SESSION_TOKEN"] ) + elif "PROVIDER_NAME" in expected: + self.assertEqual(actual.provider_name, expected["PROVIDER_NAME"]) + elif "request_token_callback" in expected: + self.assertEqual( + actual.request_token_callback, expected["request_token_callback"] + ) + elif "refresh_token_callback" in expected: + self.assertEqual( + actual.refresh_token_callback, expected["refresh_token_callback"] + ) else: self.fail("Unhandled property: %s" % (key,)) else: @@ -82,7 +102,7 @@ def run_test(self): def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as auth_tests: test_cases = json.load(auth_tests)["tests"] @@ -97,5 +117,12 @@ def create_tests(): create_tests() +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + if __name__ == "__main__": unittest.main() From 0092b0af79378abf35b6db73a082ecb91af1d973 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 11 May 2023 15:27:17 -0700 Subject: [PATCH 0904/2111] PYTHON-2504 Run pyupgrade 3.4.0 and ruff 0.0.265 (#1196) pyupgrade --py37-plus bson/*.py pymongo/*.py gridfs/*.py test/*.py tools/*.py test/*/*.py ruff --fix-only --select ALL --fixable ALL --target-version py37 --line-length=100 --unfixable COM812,D400,D415,ERA001,RUF100,SIM108,D211,D212,SIM105,SIM,PT,ANN204,EM bson/*.py pymongo/*.py gridfs/*.py test/*.py test/*/*.py --- bson/__init__.py | 17 +- bson/_helpers.py | 4 +- bson/binary.py | 11 +- bson/code.py | 7 +- bson/codec_options.py | 24 +-- bson/dbref.py | 10 +- bson/decimal128.py | 6 +- bson/json_util.py | 71 ++++---- bson/max_key.py | 5 +- bson/min_key.py | 5 +- bson/objectid.py | 16 +- bson/raw_bson.py | 4 +- bson/regex.py | 5 +- bson/son.py | 16 +- bson/timestamp.py | 7 +- gridfs/__init__.py | 5 +- gridfs/grid_file.py | 18 +- pymongo/_csot.py | 2 +- pymongo/aggregation.py | 6 +- pymongo/auth.py | 16 +- pymongo/auth_aws.py | 6 +- pymongo/auth_oidc.py | 20 +-- pymongo/bulk.py | 5 +- pymongo/change_stream.py | 7 +- pymongo/client_options.py | 2 +- pymongo/client_session.py | 33 ++-- pymongo/collation.py | 14 +- pymongo/collection.py | 29 ++-- pymongo/command_cursor.py | 8 +- pymongo/common.py | 92 +++++----- pymongo/compression_support.py | 18 +- pymongo/cursor.py | 14 +- pymongo/database.py | 21 ++- pymongo/driver_info.py | 6 +- pymongo/encryption.py | 8 +- pymongo/encryption_options.py | 2 +- pymongo/errors.py | 24 +-- pymongo/helpers.py | 10 +- pymongo/message.py | 32 ++-- pymongo/mongo_client.py | 21 +-- pymongo/monitor.py | 8 +- pymongo/monitoring.py | 135 +++++++-------- pymongo/network.py | 14 +- pymongo/ocsp_cache.py | 2 +- pymongo/operations.py | 22 +-- pymongo/periodic_executor.py | 4 +- pymongo/pool.py | 44 +++-- pymongo/pyopenssl_context.py | 21 +-- pymongo/read_concern.py | 5 +- pymongo/read_preferences.py | 35 ++-- pymongo/response.py | 9 +- pymongo/results.py | 23 ++- pymongo/saslprep.py | 6 +- pymongo/server.py | 4 +- pymongo/server_api.py | 8 +- pymongo/server_description.py | 6 +- pymongo/server_selectors.py | 3 +- pymongo/settings.py | 4 +- pymongo/socket_checker.py | 4 +- pymongo/srv_resolver.py | 6 +- pymongo/ssl_support.py | 2 +- pymongo/topology.py | 18 +- pymongo/topology_description.py | 14 +- pymongo/typings.py | 3 +- pymongo/uri_parser.py | 19 +-- pymongo/write_concern.py | 6 +- test/__init__.py | 79 ++++----- test/atlas/test_connection.py | 2 +- test/auth_aws/test_auth_aws.py | 14 +- test/auth_aws/test_auth_oidc.py | 107 ++++++------ test/crud_v2_format.py | 4 +- test/mockupdb/operations.py | 2 +- test/mockupdb/test_handshake.py | 1 + test/mockupdb/test_mixed_version_sharded.py | 2 +- .../mockupdb/test_mongos_command_read_mode.py | 2 +- .../test_network_disconnect_primary.py | 2 +- test/mockupdb/test_op_msg.py | 2 +- test/mockupdb/test_op_msg_read_preference.py | 16 +- test/mockupdb/test_reset_and_request_check.py | 4 +- test/mockupdb/test_slave_okay_sharded.py | 6 +- test/mockupdb/test_slave_okay_single.py | 2 +- test/mod_wsgi_test/test_client.py | 14 +- test/ocsp/test_ocsp.py | 2 +- test/performance/perf_test.py | 39 ++--- test/pymongo_mocks.py | 8 +- test/qcheck.py | 7 +- test/sigstop_sigcont.py | 2 +- test/test_auth.py | 32 ++-- test/test_auth_spec.py | 6 +- test/test_binary.py | 14 +- test/test_bson.py | 11 +- test/test_bulk.py | 14 +- test/test_change_stream.py | 36 ++-- test/test_client.py | 44 +++-- test/test_client_context.py | 10 +- test/test_cmap.py | 16 +- test/test_code.py | 3 +- test/test_collation.py | 6 +- test/test_collection.py | 14 +- test/test_comment.py | 2 +- test/test_common.py | 8 +- ...nnections_survive_primary_stepdown_spec.py | 2 +- test/test_crud_v1.py | 2 +- test/test_cursor.py | 6 +- test/test_custom_types.py | 32 ++-- test/test_data_lake.py | 4 +- test/test_database.py | 13 +- test/test_dbref.py | 2 +- test/test_discovery_and_monitoring.py | 10 +- test/test_encryption.py | 62 +++---- test/test_examples.py | 32 ++-- test/test_grid_file.py | 12 +- test/test_gridfs.py | 14 +- test/test_gridfs_bucket.py | 12 +- test/test_load_balancer.py | 2 +- test/test_mongos_load_balancing.py | 14 +- test/test_monitor.py | 6 +- test/test_monitoring.py | 84 +++++----- test/test_on_demand_csfle.py | 8 +- test/test_pooling.py | 18 +- test/test_read_concern.py | 8 +- test/test_read_preferences.py | 35 ++-- test/test_read_write_concern_spec.py | 8 +- test/test_replica_set_reconfig.py | 12 +- test/test_retryable_reads.py | 10 +- test/test_retryable_writes.py | 46 ++--- test/test_sdam_monitoring_spec.py | 30 ++-- test/test_server_selection.py | 4 +- test/test_server_selection_in_window.py | 4 +- test/test_server_selection_rtt.py | 2 +- test/test_session.py | 55 +++--- test/test_son.py | 28 ++-- test/test_srv_polling.py | 2 +- test/test_ssl.py | 4 +- test/test_threads.py | 2 +- test/test_topology.py | 8 +- test/test_transactions.py | 12 +- test/test_typing.py | 13 +- test/test_uri_parser.py | 2 +- test/test_uri_spec.py | 17 +- test/test_write_concern.py | 2 +- test/unified_format.py | 157 +++++++++--------- test/utils.py | 60 +++---- test/utils_selection_tests.py | 10 +- test/utils_spec_runner.py | 22 +-- test/version.py | 2 +- 146 files changed, 1234 insertions(+), 1241 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index d95c511fc7..d0a8daa273 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -237,8 +237,8 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON( - "Detected unknown BSON type %r for fieldname '%s'. Are " - "you using the latest driver version?" % (chr(element_type).encode(), element_name) + "Detected unknown BSON type {!r} for fieldname '{}'. Are " + "you using the latest driver version?".format(chr(element_type).encode(), element_name) ) @@ -626,8 +626,7 @@ def gen_list_name() -> Generator[bytes, None, None]: The first 1000 keys are returned from a pre-built cache. All subsequent keys are generated on the fly. """ - for name in _LIST_NAMES: - yield name + yield from _LIST_NAMES counter = itertools.count(1000) while True: @@ -942,18 +941,18 @@ def _name_value_to_bson( name, fallback_encoder(value), check_keys, opts, in_fallback_call=True ) - raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) + raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) + raise InvalidDocument(f"documents must have only string keys, key was {key!r}") if check_keys: if key.startswith("$"): - raise InvalidDocument("key %r must not start with '$'" % (key,)) + raise InvalidDocument(f"key {key!r} must not start with '$'") if "." in key: - raise InvalidDocument("key %r must not contain '.'" % (key,)) + raise InvalidDocument(f"key {key!r} must not contain '.'") name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts) @@ -971,7 +970,7 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo if not top_level or key != "_id": elements.append(_element_to_bson(key, value, check_keys, opts)) except AttributeError: - raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) + raise TypeError(f"encoder expected a mapping type but got: {doc!r}") encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" diff --git a/bson/_helpers.py b/bson/_helpers.py index ee3b0f1099..5643d77c24 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -13,7 +13,7 @@ # limitations under the License. """Setstate and getstate functions for objects with __slots__, allowing - compatibility with default pickling protocol +compatibility with default pickling protocol """ from typing import Any, Mapping @@ -33,7 +33,7 @@ def _mangle_name(name: str, prefix: str) -> str: def _getstate_slots(self: Any) -> Mapping[Any, Any]: prefix = self.__class__.__name__ - ret = dict() + ret = {} for name in self.__slots__: mangled_name = _mangle_name(name, prefix) if hasattr(self, mangled_name): diff --git a/bson/binary.py b/bson/binary.py index a270eae8d2..77e3a3d478 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -306,7 +306,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI .. versionadded:: 3.11 """ if self.subtype not in ALL_UUID_SUBTYPES: - raise ValueError("cannot decode subtype %s as a uuid" % (self.subtype,)) + raise ValueError(f"cannot decode subtype {self.subtype} as a uuid") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( @@ -330,8 +330,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI return UUID(bytes=self) raise ValueError( - "cannot decode subtype %s to %s" - % (self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation]) + f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" ) @property @@ -341,7 +340,7 @@ def subtype(self) -> int: def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 - data = super(Binary, self).__getnewargs__()[0] + data = super().__getnewargs__()[0] if not isinstance(data, bytes): data = data.encode("latin-1") return data, self.__subtype @@ -355,10 +354,10 @@ def __eq__(self, other: Any) -> bool: return False def __hash__(self) -> int: - return super(Binary, self).__hash__() ^ hash(self.__subtype) + return super().__hash__() ^ hash(self.__subtype) def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - return "Binary(%s, %s)" % (bytes.__repr__(self), self.__subtype) + return f"Binary({bytes.__repr__(self)}, {self.__subtype})" diff --git a/bson/code.py b/bson/code.py index b732e82469..27ec588fae 100644 --- a/bson/code.py +++ b/bson/code.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing JavaScript code in BSON. -""" +"""Tools for representing JavaScript code in BSON.""" from collections.abc import Mapping as _Mapping from typing import Any, Mapping, Optional, Type, Union @@ -54,7 +53,7 @@ def __new__( cls: Type["Code"], code: Union[str, "Code"], scope: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> "Code": if not isinstance(code, str): raise TypeError("code must be an instance of str") @@ -88,7 +87,7 @@ def scope(self) -> Optional[Mapping[str, Any]]: return self.__scope def __repr__(self): - return "Code(%s, %r)" % (str.__repr__(self), self.__scope) + return f"Code({str.__repr__(self)}, {self.__scope!r})" def __eq__(self, other: Any) -> bool: if isinstance(other, Code): diff --git a/bson/codec_options.py b/bson/codec_options.py index 096be85264..a0bdd0eeb9 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -63,12 +63,10 @@ class TypeEncoder(abc.ABC): @abc.abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" - pass @abc.abstractmethod def transform_python(self, value: Any) -> Any: """Convert the given Python object into something serializable.""" - pass class TypeDecoder(abc.ABC): @@ -84,12 +82,10 @@ class TypeDecoder(abc.ABC): @abc.abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" - pass @abc.abstractmethod def transform_bson(self, value: Any) -> Any: """Convert the given BSON value into our own type.""" - pass class TypeCodec(TypeEncoder, TypeDecoder): @@ -105,14 +101,12 @@ class TypeCodec(TypeEncoder, TypeDecoder): See :ref:`custom-type-type-codec` documentation for an example. """ - pass - _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -class TypeRegistry(object): +class TypeRegistry: """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after instantiation. @@ -164,8 +158,7 @@ def __init__( self._decoder_map[codec.bson_type] = codec.transform_bson if not is_valid_codec: raise TypeError( - "Expected an instance of %s, %s, or %s, got %r instead" - % (TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec) + f"Expected an instance of {TypeEncoder.__name__}, {TypeDecoder.__name__}, or {TypeCodec.__name__}, got {codec!r} instead" ) def _validate_type_encoder(self, codec: _Codec) -> None: @@ -175,12 +168,12 @@ def _validate_type_encoder(self, codec: _Codec) -> None: if issubclass(cast(TypeCodec, codec).python_type, pytype): err_msg = ( "TypeEncoders cannot change how built-in types are " - "encoded (encoder %s transforms type %s)" % (codec, pytype) + "encoded (encoder {} transforms type {})".format(codec, pytype) ) raise TypeError(err_msg) def __repr__(self): - return "%s(type_codecs=%r, fallback_encoder=%r)" % ( + return "{}(type_codecs={!r}, fallback_encoder={!r})".format( self.__class__.__name__, self.__type_codecs, self._fallback_encoder, @@ -446,10 +439,9 @@ def _arguments_repr(self) -> str: ) return ( - "document_class=%s, tz_aware=%r, uuid_representation=%s, " - "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r, datetime_conversion=%s" - % ( + "document_class={}, tz_aware={!r}, uuid_representation={}, " + "unicode_decode_error_handler={!r}, tzinfo={!r}, " + "type_registry={!r}, datetime_conversion={!s}".format( document_class_repr, self.tz_aware, uuid_rep_repr, @@ -474,7 +466,7 @@ def _options_dict(self) -> Dict[str, Any]: } def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) + return f"{self.__class__.__name__}({self._arguments_repr()})" def with_options(self, **kwargs: Any) -> "CodecOptions": """Make a copy of this CodecOptions, overriding some options:: diff --git a/bson/dbref.py b/bson/dbref.py index 7849435f23..491278e6f4 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -21,7 +21,7 @@ from bson.son import SON -class DBRef(object): +class DBRef: """A reference to a document stored in MongoDB.""" __slots__ = "__collection", "__id", "__database", "__kwargs" @@ -36,7 +36,7 @@ def __init__( id: Any, database: Optional[str] = None, _extra: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> None: """Initialize a new :class:`DBRef`. @@ -102,10 +102,10 @@ def as_doc(self) -> SON[str, Any]: return doc def __repr__(self): - extra = "".join([", %s=%r" % (k, v) for k, v in self.__kwargs.items()]) + extra = "".join([f", {k}={v!r}" for k, v in self.__kwargs.items()]) if self.database is None: - return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) + return f"DBRef({self.collection!r}, {self.id!r}{extra})" + return f"DBRef({self.collection!r}, {self.id!r}, {self.database!r}{extra})" def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): diff --git a/bson/decimal128.py b/bson/decimal128.py index bce5b251e9..0e24b5bbae 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -115,7 +115,7 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: return high, low -class Decimal128(object): +class Decimal128: """BSON Decimal128 type:: >>> Decimal128(Decimal("0.0005")) @@ -226,7 +226,7 @@ def __init__(self, value: _VALUE_OPTIONS) -> None: ) self.__high, self.__low = value # type: ignore else: - raise TypeError("Cannot convert %r to Decimal128" % (value,)) + raise TypeError(f"Cannot convert {value!r} to Decimal128") def to_decimal(self) -> decimal.Decimal: """Returns an instance of :class:`decimal.Decimal` for this @@ -297,7 +297,7 @@ def __str__(self) -> str: return str(dec) def __repr__(self): - return "Decimal128('%s')" % (str(self),) + return f"Decimal128('{str(self)}')" def __setstate__(self, value: Tuple[int, int]) -> None: self.__high, self.__low = value diff --git a/bson/json_util.py b/bson/json_util.py index 8842d5c74d..bc566fa982 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -288,7 +288,7 @@ def __new__( strict_uuid: Optional[bool] = None, json_mode: int = JSONMode.RELAXED, *args: Any, - **kwargs: Any + **kwargs: Any, ) -> "JSONOptions": kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: @@ -303,7 +303,7 @@ def __new__( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation." ) - self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) + self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " @@ -350,21 +350,20 @@ def __new__( def _arguments_repr(self) -> str: return ( - "strict_number_long=%r, " - "datetime_representation=%r, " - "strict_uuid=%r, json_mode=%r, %s" - % ( + "strict_number_long={!r}, " + "datetime_representation={!r}, " + "strict_uuid={!r}, json_mode={!r}, {}".format( self.strict_number_long, self.datetime_representation, self.strict_uuid, self.json_mode, - super(JSONOptions, self)._arguments_repr(), + super()._arguments_repr(), ) ) def _options_dict(self) -> Dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead - options_dict = super(JSONOptions, self)._options_dict() + options_dict = super()._options_dict() options_dict.update( { "strict_number_long": self.strict_number_long, @@ -492,7 +491,7 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> if hasattr(obj, "items"): return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): - return list((_json_convert(v, json_options) for v in obj)) + return [_json_convert(v, json_options) for v in obj] try: return default(obj, json_options) except TypeError: @@ -568,9 +567,9 @@ def _parse_legacy_regex(doc: Any) -> Any: def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: - raise TypeError("Bad $uuid, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $uuid, extra field(s): {doc}") if not isinstance(doc["$uuid"], str): - raise TypeError("$uuid must be a string: %s" % (doc,)) + raise TypeError(f"$uuid must be a string: {doc}") if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) else: @@ -613,11 +612,11 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary b64 = binary["base64"] subtype = binary["subType"] if not isinstance(b64, str): - raise TypeError("$binary base64 must be a string: %s" % (doc,)) + raise TypeError(f"$binary base64 must be a string: {doc}") if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError("$binary subType must be a string at most 2 characters: %s" % (doc,)) + raise TypeError(f"$binary subType must be a string at most 2 characters: {doc}") if len(binary) != 2: - raise TypeError('$binary must include only "base64" and "subType" components: %s' % (doc,)) + raise TypeError(f'$binary must include only "base64" and "subType" components: {doc}') data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) @@ -629,7 +628,7 @@ def _parse_canonical_datetime( """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: - raise TypeError("Bad $date, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $date, extra field(s): {doc}") # mongoexport 2.6 and newer if isinstance(dtm, str): # Parse offset @@ -692,7 +691,7 @@ def _parse_canonical_datetime( def _parse_canonical_oid(doc: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: - raise TypeError("Bad $oid, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $oid, extra field(s): {doc}") return ObjectId(doc["$oid"]) @@ -700,7 +699,7 @@ def _parse_canonical_symbol(doc: Any) -> str: """Decode a JSON symbol to Python string.""" symbol = doc["$symbol"] if len(doc) != 1: - raise TypeError("Bad $symbol, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $symbol, extra field(s): {doc}") return str(symbol) @@ -708,7 +707,7 @@ def _parse_canonical_code(doc: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ("$code", "$scope"): - raise TypeError("Bad $code, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $code, extra field(s): {doc}") return Code(doc["$code"], scope=doc.get("$scope")) @@ -716,11 +715,11 @@ def _parse_canonical_regex(doc: Any) -> Regex: """Decode a JSON regex to bson.regex.Regex.""" regex = doc["$regularExpression"] if len(doc) != 1: - raise TypeError("Bad $regularExpression, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $regularExpression, extra field(s): {doc}") if len(regex) != 2: raise TypeError( 'Bad $regularExpression must include only "pattern"' - 'and "options" components: %s' % (doc,) + 'and "options" components: {}'.format(doc) ) opts = regex["options"] if not isinstance(opts, str): @@ -739,28 +738,28 @@ def _parse_canonical_dbpointer(doc: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" dbref = doc["$dbPointer"] if len(doc) != 1: - raise TypeError("Bad $dbPointer, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s): {doc}") if isinstance(dbref, DBRef): dbref_doc = dbref.as_doc() # DBPointer must not contain $db in its value. if dbref.database is not None: - raise TypeError("Bad $dbPointer, extra field $db: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field $db: {dbref_doc}") if not isinstance(dbref.id, ObjectId): - raise TypeError("Bad $dbPointer, $id must be an ObjectId: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, $id must be an ObjectId: {dbref_doc}") if len(dbref_doc) != 2: - raise TypeError("Bad $dbPointer, extra field(s) in DBRef: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s) in DBRef: {dbref_doc}") return dbref else: - raise TypeError("Bad $dbPointer, expected a DBRef: %s" % (doc,)) + raise TypeError(f"Bad $dbPointer, expected a DBRef: {doc}") def _parse_canonical_int32(doc: Any) -> int: """Decode a JSON int32 to python int.""" i_str = doc["$numberInt"] if len(doc) != 1: - raise TypeError("Bad $numberInt, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberInt, extra field(s): {doc}") if not isinstance(i_str, str): - raise TypeError("$numberInt must be string: %s" % (doc,)) + raise TypeError(f"$numberInt must be string: {doc}") return int(i_str) @@ -768,7 +767,7 @@ def _parse_canonical_int64(doc: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" l_str = doc["$numberLong"] if len(doc) != 1: - raise TypeError("Bad $numberLong, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberLong, extra field(s): {doc}") return Int64(l_str) @@ -776,9 +775,9 @@ def _parse_canonical_double(doc: Any) -> float: """Decode a JSON double to python float.""" d_str = doc["$numberDouble"] if len(doc) != 1: - raise TypeError("Bad $numberDouble, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberDouble, extra field(s): {doc}") if not isinstance(d_str, str): - raise TypeError("$numberDouble must be string: %s" % (doc,)) + raise TypeError(f"$numberDouble must be string: {doc}") return float(d_str) @@ -786,18 +785,18 @@ def _parse_canonical_decimal128(doc: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" d_str = doc["$numberDecimal"] if len(doc) != 1: - raise TypeError("Bad $numberDecimal, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberDecimal, extra field(s): {doc}") if not isinstance(d_str, str): - raise TypeError("$numberDecimal must be string: %s" % (doc,)) + raise TypeError(f"$numberDecimal must be string: {doc}") return Decimal128(d_str) def _parse_canonical_minkey(doc: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: - raise TypeError("$minKey value must be 1: %s" % (doc,)) + raise TypeError(f"$minKey value must be 1: {doc}") if len(doc) != 1: - raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MinKey() @@ -806,7 +805,7 @@ def _parse_canonical_maxkey(doc: Any) -> MaxKey: if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: raise TypeError("$maxKey value must be 1: %s", (doc,)) if len(doc) != 1: - raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MaxKey() @@ -839,7 +838,7 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: millis = int(obj.microsecond / 1000) fracsecs = ".%03d" % (millis,) if millis else "" return { - "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) } millis = _datetime_to_millis(obj) diff --git a/bson/max_key.py b/bson/max_key.py index b4f38d072e..eb5705d378 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MaxKey type. -""" +"""Representation for the MongoDB internal MaxKey type.""" from typing import Any -class MaxKey(object): +class MaxKey: """MongoDB internal MaxKey type.""" __slots__ = () diff --git a/bson/min_key.py b/bson/min_key.py index babc655e43..2c8f73d560 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MinKey type. -""" +"""Representation for the MongoDB internal MinKey type.""" from typing import Any -class MinKey(object): +class MinKey: """MongoDB internal MinKey type.""" __slots__ = () diff --git a/bson/objectid.py b/bson/objectid.py index 1fab986b8b..b045e93d04 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB ObjectIds. -""" +"""Tools for working with MongoDB ObjectIds.""" import binascii import calendar @@ -43,7 +42,7 @@ def _random_bytes() -> bytes: return os.urandom(5) -class ObjectId(object): +class ObjectId: """A MongoDB ObjectId.""" _pid = os.getpid() @@ -166,7 +165,6 @@ def _random(cls) -> bytes: def __generate(self) -> None: """Generate a new value for this ObjectId.""" - # 4 bytes current time oid = struct.pack(">I", int(time.time())) @@ -202,9 +200,7 @@ def __validate(self, oid: Any) -> None: else: _raise_invalid_id(oid) else: - raise TypeError( - "id must be an instance of (bytes, str, ObjectId), not %s" % (type(oid),) - ) + raise TypeError(f"id must be an instance of (bytes, str, ObjectId), not {type(oid)}") @property def binary(self) -> bytes: @@ -224,13 +220,13 @@ def generation_time(self) -> datetime.datetime: return datetime.datetime.fromtimestamp(timestamp, utc) def __getstate__(self) -> bytes: - """return value of object for pickling. + """Return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id def __setstate__(self, value: Any) -> None: - """explicit state set from pickling""" + """Explicit state set from pickling""" # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): @@ -249,7 +245,7 @@ def __str__(self) -> str: return binascii.hexlify(self.__id).decode() def __repr__(self): - return "ObjectId('%s')" % (str(self),) + return f"ObjectId('{str(self)}')" def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 2c2b3c97ca..bb1dbd22a5 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -131,7 +131,7 @@ class from the standard library so it can be used like a read-only elif not issubclass(codec_options.document_class, RawBSONDocument): raise TypeError( "RawBSONDocument cannot use CodecOptions with document " - "class %s" % (codec_options.document_class,) + "class {}".format(codec_options.document_class) ) self.__codec_options = codec_options # Validate the bson object size. @@ -174,7 +174,7 @@ def __eq__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return "%s(%r, codec_options=%r)" % ( + return "{}({!r}, codec_options={!r})".format( self.__class__.__name__, self.raw, self.__codec_options, diff --git a/bson/regex.py b/bson/regex.py index 3e98477198..c06e493f38 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB regular expressions. -""" +"""Tools for representing MongoDB regular expressions.""" import re from typing import Any, Generic, Pattern, Type, TypeVar, Union @@ -117,7 +116,7 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - return "Regex(%r, %r)" % (self.pattern, self.flags) + return f"Regex({self.pattern!r}, {self.flags!r})" def try_compile(self) -> "Pattern[_T]": """Compile this :class:`Regex` as a Python regular expression. diff --git a/bson/son.py b/bson/son.py index bba108aa80..482e8d2584 100644 --- a/bson/son.py +++ b/bson/son.py @@ -16,7 +16,8 @@ Regular dictionaries can be used instead of SON objects, but not when the order of keys is important. A SON object can be used just like a normal Python -dictionary.""" +dictionary. +""" import copy import re @@ -58,7 +59,7 @@ class SON(Dict[_Key, _Value]): def __init__( self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, - **kwargs: Any + **kwargs: Any, ) -> None: self.__keys = [] dict.__init__(self) @@ -66,14 +67,14 @@ def __init__( self.update(kwargs) def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": - instance = super(SON, cls).__new__(cls, *args, **kwargs) # type: ignore[type-var] + instance = super().__new__(cls, *args, **kwargs) # type: ignore[type-var] instance.__keys = [] return instance def __repr__(self): result = [] for key in self.__keys: - result.append("(%r, %r)" % (key, self[key])) + result.append(f"({key!r}, {self[key]!r})") return "SON([%s])" % ", ".join(result) def __setitem__(self, key: _Key, value: _Value) -> None: @@ -94,8 +95,7 @@ def copy(self) -> "SON[_Key, _Value]": # efficient. # second level definitions support higher levels def __iter__(self) -> Iterator[_Key]: - for k in self.__keys: - yield k + yield from self.__keys def has_key(self, key: _Key) -> bool: return key in self.__keys @@ -113,7 +113,7 @@ def values(self) -> List[_Value]: # type: ignore[override] def clear(self) -> None: self.__keys = [] - super(SON, self).clear() + super().clear() def setdefault(self, key: _Key, default: _Value) -> _Value: try: @@ -189,7 +189,7 @@ def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): - return dict([(k, transform_value(v)) for k, v in value.items()]) + return {k: transform_value(v) for k, v in value.items()} else: return value diff --git a/bson/timestamp.py b/bson/timestamp.py index a333b9fa3e..5591b60e41 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB internal Timestamps. -""" +"""Tools for representing MongoDB internal Timestamps.""" import calendar import datetime @@ -25,7 +24,7 @@ UPPERBOUND = 4294967296 -class Timestamp(object): +class Timestamp: """MongoDB internal timestamps used in the opLog.""" __slots__ = ("__time", "__inc") @@ -113,7 +112,7 @@ def __ge__(self, other: Any) -> bool: return NotImplemented def __repr__(self): - return "Timestamp(%s, %s)" % (self.__time, self.__inc) + return f"Timestamp({self.__time}, {self.__inc})" def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 692567b2de..9a4cda5527 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -53,7 +53,7 @@ ] -class GridFS(object): +class GridFS: """An instance of GridFS on top of a single Database.""" def __init__(self, database: Database, collection: str = "fs"): @@ -141,7 +141,6 @@ def put(self, data: Any, **kwargs: Any) -> Any: .. versionchanged:: 3.0 w=0 writes to GridFS are now prohibited. """ - with GridIn(self.__collection, **kwargs) as grid_file: grid_file.write(data) return grid_file._id @@ -449,7 +448,7 @@ def exists( return f is not None -class GridFSBucket(object): +class GridFSBucket: """An instance of GridFS on top of a single Database.""" def __init__( diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index 5ec6352684..fd260963d7 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -76,7 +76,7 @@ def setter(self: Any, value: Any) -> Any: if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: - docstring = "%s\n\n%s" % ( + docstring = "{}\n\n{}".format( docstring, "This attribute is read-only and " "can only be read after :meth:`close` " @@ -114,7 +114,7 @@ def _disallow_transactions(session: Optional[ClientSession]) -> None: raise InvalidOperation("GridFS does not support multi-document transactions") -class GridIn(object): +class GridIn: """Class to write data to GridFS.""" def __init__( @@ -497,7 +497,7 @@ def _ensure_file(self) -> None: self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) if not self._file: raise NoFile( - "no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id) + f"no file in gridfs collection {self.__files!r} with _id {self.__file_id!r}" ) def __getattr__(self, name: str) -> Any: @@ -640,10 +640,10 @@ def seek(self, pos: int, whence: int = _SEEK_SET) -> int: elif whence == _SEEK_END: new_pos = int(self.length) + pos else: - raise IOError(22, "Invalid value for `whence`") + raise OSError(22, "Invalid value for `whence`") if new_pos < 0: - raise IOError(22, "Invalid value for `pos` - must be positive") + raise OSError(22, "Invalid value for `pos` - must be positive") # Optimization, continue using the same buffer and chunk iterator. if new_pos == self.__position: @@ -732,7 +732,7 @@ def __del__(self) -> None: pass -class _GridOutChunkIterator(object): +class _GridOutChunkIterator: """Iterates over a file's chunks using a single cursor. Raises CorruptGridFile when encountering any truncated, missing, or extra @@ -832,7 +832,7 @@ def close(self) -> None: self._cursor = None -class GridOutIterator(object): +class GridOutIterator: def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) @@ -878,7 +878,7 @@ def __init__( # Hold on to the base "fs" collection to create GridOut objects later. self.__root_collection = collection - super(GridOutCursor, self).__init__( + super().__init__( collection.files, filter, skip=skip, @@ -892,7 +892,7 @@ def __init__( def next(self) -> GridOut: """Get next GridOut object from cursor.""" _disallow_transactions(self.session) - next_file = super(GridOutCursor, self).next() + next_file = super().next() return GridOut(self.__root_collection, file_document=next_file, session=self.session) __next__ = next diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 8a4617ecaf..7a5a8a7302 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -57,7 +57,7 @@ def clamp_remaining(max_timeout: float) -> float: return min(timeout, max_timeout) -class _TimeoutContext(object): +class _TimeoutContext: """Internal timeout context manager. Use :func:`pymongo.timeout` instead:: diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index a13f164f53..a97455cb29 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -21,7 +21,7 @@ from pymongo.read_preferences import ReadPreference, _AggWritePref -class _AggregationCommand(object): +class _AggregationCommand: """The internal abstract base class for aggregation cursors. Should not be called directly by application developers. Use @@ -202,7 +202,7 @@ def _database(self): class _CollectionRawAggregationCommand(_CollectionAggregationCommand): def __init__(self, *args, **kwargs): - super(_CollectionRawAggregationCommand, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) # For raw-batches, we set the initial batchSize for the cursor to 0. if not self._performs_write: @@ -216,7 +216,7 @@ def _aggregation_target(self): @property def _cursor_namespace(self): - return "%s.$cmd.aggregate" % (self._target.name,) + return f"{self._target.name}.$cmd.aggregate" @property def _database(self): diff --git a/pymongo/auth.py b/pymongo/auth.py index 4bc31ee97b..ac7cb254e9 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -61,7 +61,7 @@ """The authentication mechanisms supported by PyMongo.""" -class _Cache(object): +class _Cache: __slots__ = ("data",) _hash_val = hash("_Cache") @@ -104,7 +104,7 @@ def __hash__(self): def _build_credentials_tuple(mech, source, user, passwd, extra, database): """Build and return a mechanism specific credentials tuple.""" if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: - raise ConfigurationError("%s requires a username." % (mech,)) + raise ConfigurationError(f"{mech} requires a username.") if mech == "GSSAPI": if source is not None and source != "$external": raise ValueError("authentication source must be $external or None for GSSAPI") @@ -297,7 +297,7 @@ def _password_digest(username, password): raise TypeError("username must be an instance of str") md5hash = hashlib.md5() - data = "%s:mongo:%s" % (username, password) + data = f"{username}:mongo:{password}" md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() @@ -306,7 +306,7 @@ def _auth_key(nonce, username, password): """Get an auth key to use for authentication.""" digest = _password_digest(username, password) md5hash = hashlib.md5() - data = "%s%s%s" % (nonce, username, digest) + data = f"{nonce}{username}{digest}" md5hash.update(data.encode("utf-8")) return md5hash.hexdigest() @@ -448,7 +448,7 @@ def _authenticate_plain(credentials, sock_info): source = credentials.source username = credentials.username password = credentials.password - payload = ("\x00%s\x00%s" % (username, password)).encode("utf-8") + payload = (f"\x00{username}\x00{password}").encode() cmd = SON( [ ("saslStart", 1), @@ -518,7 +518,7 @@ def _authenticate_default(credentials, sock_info): } -class _AuthContext(object): +class _AuthContext: def __init__(self, credentials, address): self.credentials = credentials self.speculative_authenticate = None @@ -543,7 +543,7 @@ def speculate_succeeded(self): class _ScramContext(_AuthContext): def __init__(self, credentials, address, mechanism): - super(_ScramContext, self).__init__(credentials, address) + super().__init__(credentials, address) self.scram_data = None self.mechanism = mechanism @@ -569,7 +569,7 @@ def speculate_command(self): authenticator = _get_authenticator(self.credentials, self.address) cmd = authenticator.auth_start_cmd(False) if cmd is None: - return + return None cmd["db"] = self.credentials.source return cmd diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index e84465ea66..bfa4c731d3 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -21,7 +21,7 @@ _HAVE_MONGODB_AWS = True except ImportError: - class AwsSaslContext(object): # type: ignore + class AwsSaslContext: # type: ignore def __init__(self, credentials): pass @@ -102,9 +102,7 @@ def _authenticate_aws(credentials, sock_info): # Clear the cached credentials if we hit a failure in auth. set_cached_credentials(None) # Convert to OperationFailure and include pymongo-auth-aws version. - raise OperationFailure( - "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) - ) + raise OperationFailure(f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})") except Exception: # Clear the cached credentials if we hit a failure in auth. set_cached_credentials(None) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index 530b1bb068..543dc0200d 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -131,11 +131,11 @@ def get_current_token(self, use_callbacks=True): refresh_token = self.idp_resp and self.idp_resp.get("refresh_token") refresh_token = refresh_token or "" - context = dict( - timeout_seconds=timeout, - version=CALLBACK_VERSION, - refresh_token=refresh_token, - ) + context = { + "timeout_seconds": timeout, + "version": CALLBACK_VERSION, + "refresh_token": refresh_token, + } if self.idp_resp is None or refresh_cb is None: self.idp_resp = request_cb(self.idp_info, context) @@ -181,7 +181,7 @@ def auth_start_cmd(self, use_callbacks=True): aws_identity_file = os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] with open(aws_identity_file) as fid: token = fid.read().strip() - payload = dict(jwt=token) + payload = {"jwt": token} cmd = SON( [ ("saslStart", 1), @@ -203,7 +203,7 @@ def auth_start_cmd(self, use_callbacks=True): if self.idp_info is None: # Send the SASL start with the optional principal name. - payload = dict() + payload = {} if principal_name: payload["n"] = principal_name @@ -221,7 +221,7 @@ def auth_start_cmd(self, use_callbacks=True): token = self.get_current_token(use_callbacks) if not token: return None - bin_payload = Binary(bson.encode(dict(jwt=token))) + bin_payload = Binary(bson.encode({"jwt": token})) return SON( [ ("saslStart", 1), @@ -268,7 +268,7 @@ def authenticate(self, sock_info, reauthenticate=False): if resp["done"]: sock_info.oidc_token_gen_id = self.token_gen_id - return + return None server_resp: Dict = bson.decode(resp["payload"]) if "issuer" in server_resp: @@ -278,7 +278,7 @@ def authenticate(self, sock_info, reauthenticate=False): conversation_id = resp["conversationId"] token = self.get_current_token() sock_info.oidc_token_gen_id = self.token_gen_id - bin_payload = Binary(bson.encode(dict(jwt=token))) + bin_payload = Binary(bson.encode({"jwt": token})) cmd = SON( [ ("saslContinue", 1), diff --git a/pymongo/bulk.py b/pymongo/bulk.py index b21b576aa5..b0f61b9f9f 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -60,7 +60,7 @@ _COMMANDS = ("insert", "update", "delete") -class _Run(object): +class _Run: """Represents a batch of write operations.""" def __init__(self, op_type): @@ -136,7 +136,7 @@ def _raise_bulk_write_error(full_result: Any) -> NoReturn: raise BulkWriteError(full_result) -class _Bulk(object): +class _Bulk: """The private guts of the bulk write API.""" def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): @@ -509,5 +509,6 @@ def execute(self, write_concern, session): if not write_concern.acknowledged: with client._socket_for_writes(session) as sock_info: self.execute_no_results(sock_info, generator, write_concern) + return None else: return self.execute_command(generator, write_concern, session) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 775f93c79a..c53f981188 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -156,7 +156,8 @@ def _aggregation_command_class(self): @property def _client(self): """The client against which the aggregation commands for - this ChangeStream will be run.""" + this ChangeStream will be run. + """ raise NotImplementedError def _change_stream_options(self): @@ -221,7 +222,7 @@ def _process_result(self, result, sock_info): if self._start_at_operation_time is None: raise OperationFailure( "Expected field 'operationTime' missing from command " - "response : %r" % (result,) + "response : {!r}".format(result) ) def _run_aggregation_cmd(self, session, explicit_session): @@ -473,6 +474,6 @@ class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): """ def _change_stream_options(self): - options = super(ClusterChangeStream, self)._change_stream_options() + options = super()._change_stream_options() options["allChangesForCluster"] = True return options diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 882474e258..c9f63dc95a 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -167,7 +167,7 @@ def _parse_pool_options(username, password, database, options): ) -class ClientOptions(object): +class ClientOptions: """Read only configuration options for a MongoClient. Should not be instantiated directly by application developers. Access diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 1ec0b16476..dbc5f3aa8d 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -169,7 +169,7 @@ from pymongo.write_concern import WriteConcern -class SessionOptions(object): +class SessionOptions: """Options for a new :class:`ClientSession`. :Parameters: @@ -203,8 +203,9 @@ def __init__( if not isinstance(default_transaction_options, TransactionOptions): raise TypeError( "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: %r" - % (default_transaction_options,) + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) ) self._default_transaction_options = default_transaction_options self._snapshot = snapshot @@ -232,7 +233,7 @@ def snapshot(self) -> Optional[bool]: return self._snapshot -class TransactionOptions(object): +class TransactionOptions: """Options for :meth:`ClientSession.start_transaction`. :Parameters: @@ -275,25 +276,25 @@ def __init__( if not isinstance(read_concern, ReadConcern): raise TypeError( "read_concern must be an instance of " - "pymongo.read_concern.ReadConcern, not: %r" % (read_concern,) + "pymongo.read_concern.ReadConcern, not: {!r}".format(read_concern) ) if write_concern is not None: if not isinstance(write_concern, WriteConcern): raise TypeError( "write_concern must be an instance of " - "pymongo.write_concern.WriteConcern, not: %r" % (write_concern,) + "pymongo.write_concern.WriteConcern, not: {!r}".format(write_concern) ) if not write_concern.acknowledged: raise ConfigurationError( "transactions do not support unacknowledged write concern" - ": %r" % (write_concern,) + ": {!r}".format(write_concern) ) if read_preference is not None: if not isinstance(read_preference, _ServerMode): raise TypeError( - "%r is not valid for read_preference. See " + "{!r} is not valid for read_preference. See " "pymongo.read_preferences for valid " - "options." % (read_preference,) + "options.".format(read_preference) ) if max_commit_time_ms is not None: if not isinstance(max_commit_time_ms, int): @@ -340,12 +341,12 @@ def _validate_session_write_concern(session, write_concern): else: raise ConfigurationError( "Explicit sessions are incompatible with " - "unacknowledged write concern: %r" % (write_concern,) + "unacknowledged write concern: {!r}".format(write_concern) ) return session -class _TransactionContext(object): +class _TransactionContext: """Internal transaction context manager for start_transaction.""" def __init__(self, session): @@ -362,7 +363,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.__session.abort_transaction() -class _TxnState(object): +class _TxnState: NONE = 1 STARTING = 2 IN_PROGRESS = 3 @@ -371,7 +372,7 @@ class _TxnState(object): ABORTED = 6 -class _Transaction(object): +class _Transaction: """Internal class to hold transaction information in a ClientSession.""" def __init__(self, opts, client): @@ -973,7 +974,7 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): if read_preference != ReadPreference.PRIMARY: raise InvalidOperation( "read preference in a transaction must be primary, not: " - "%r" % (read_preference,) + "{!r}".format(read_preference) ) if self._transaction.state == _TxnState.STARTING: @@ -1023,7 +1024,7 @@ def inc_transaction_id(self): self.started_retryable_write = True -class _ServerSession(object): +class _ServerSession: def __init__(self, generation): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} @@ -1062,7 +1063,7 @@ class _ServerSessionPool(collections.deque): """ def __init__(self, *args, **kwargs): - super(_ServerSessionPool, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.generation = 0 def reset(self): diff --git a/pymongo/collation.py b/pymongo/collation.py index 3d8503f7d5..bdc996be1b 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -21,7 +21,7 @@ from pymongo import common -class CollationStrength(object): +class CollationStrength: """ An enum that defines values for `strength` on a :class:`~pymongo.collation.Collation`. @@ -43,7 +43,7 @@ class CollationStrength(object): """Differentiate unicode code point (characters are exactly identical).""" -class CollationAlternate(object): +class CollationAlternate: """ An enum that defines values for `alternate` on a :class:`~pymongo.collation.Collation`. @@ -62,7 +62,7 @@ class CollationAlternate(object): """ -class CollationMaxVariable(object): +class CollationMaxVariable: """ An enum that defines values for `max_variable` on a :class:`~pymongo.collation.Collation`. @@ -75,7 +75,7 @@ class CollationMaxVariable(object): """Spaces alone are ignored.""" -class CollationCaseFirst(object): +class CollationCaseFirst: """ An enum that defines values for `case_first` on a :class:`~pymongo.collation.Collation`. @@ -91,7 +91,7 @@ class CollationCaseFirst(object): """Default for locale or collation strength.""" -class Collation(object): +class Collation: """Collation :Parameters: @@ -163,7 +163,7 @@ def __init__( maxVariable: Optional[str] = None, normalization: Optional[bool] = None, backwards: Optional[bool] = None, - **kwargs: Any + **kwargs: Any, ) -> None: locale = common.validate_string("locale", locale) self.__document: Dict[str, Any] = {"locale": locale} @@ -201,7 +201,7 @@ def document(self) -> Dict[str, Any]: def __repr__(self): document = self.document - return "Collation(%s)" % (", ".join("%s=%r" % (key, document[key]) for key in document),) + return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): diff --git a/pymongo/collection.py b/pymongo/collection.py index 91b4013ee8..3b9001240e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -88,7 +88,7 @@ ] -class ReturnDocument(object): +class ReturnDocument: """An enum used with :meth:`~pymongo.collection.Collection.find_one_and_replace` and :meth:`~pymongo.collection.Collection.find_one_and_update`. @@ -201,7 +201,7 @@ def __init__( .. seealso:: The MongoDB documentation on `collections `_. """ - super(Collection, self).__init__( + super().__init__( codec_options or database.codec_options, read_preference or database.read_preference, write_concern or database.write_concern, @@ -212,7 +212,7 @@ def __init__( if not name or ".." in name: raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): raise InvalidName("collection names must not contain '$': %r" % name) if name[0] == "." or name[-1] == ".": raise InvalidName("collection names must not start or end with '.': %r" % name) @@ -222,7 +222,7 @@ def __init__( self.__database: Database[_DocumentType] = database self.__name = name - self.__full_name = "%s.%s" % (self.__database.name, self.__name) + self.__full_name = f"{self.__database.name}.{self.__name}" self.__write_response_codec_options = self.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) @@ -344,17 +344,17 @@ def __getattr__(self, name: str) -> "Collection[_DocumentType]": - `name`: the name of the collection to get """ if name.startswith("_"): - full_name = "%s.%s" % (self.__name, name) + full_name = f"{self.__name}.{name}" raise AttributeError( - "Collection has no attribute %r. To access the %s" - " collection, use database['%s']." % (name, full_name, full_name) + "Collection has no attribute {!r}. To access the {}" + " collection, use database['{}'].".format(name, full_name, full_name) ) return self.__getitem__(name) def __getitem__(self, name: str) -> "Collection[_DocumentType]": return Collection( self.__database, - "%s.%s" % (self.__name, name), + f"{self.__name}.{name}", False, self.codec_options, self.read_preference, @@ -363,7 +363,7 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": ) def __repr__(self): - return "Collection(%r, %r)" % (self.__database, self.__name) + return f"Collection({self.__database!r}, {self.__name!r})" def __eq__(self, other: Any) -> bool: if isinstance(other, Collection): @@ -541,7 +541,7 @@ def bulk_write( try: request._add_to_bulk(blk) except AttributeError: - raise TypeError("%r is not a valid request" % (request,)) + raise TypeError(f"{request!r} is not a valid request") write_concern = self._write_concern_for(session) bulk_api_result = blk.execute(write_concern, session) @@ -579,6 +579,7 @@ def _insert_command(session, sock_info, retryable_write): if not isinstance(doc, RawBSONDocument): return doc.get("_id") + return None def insert_one( self, @@ -719,7 +720,7 @@ def gen(): write_concern = self._write_concern_for(session) blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) - blk.ops = [doc for doc in gen()] + blk.ops = list(gen()) blk.execute(write_concern, session=session) return InsertManyResult(inserted_ids, write_concern.acknowledged) @@ -1924,7 +1925,7 @@ def gen_indexes(): for index in indexes: if not isinstance(index, IndexModel): raise TypeError( - "%r is not an instance of pymongo.operations.IndexModel" % (index,) + f"{index!r} is not an instance of pymongo.operations.IndexModel" ) document = index.document names.append(document["name"]) @@ -2442,7 +2443,6 @@ def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - with self.__database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionAggregationCommand, @@ -2687,7 +2687,7 @@ def rename( if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") - new_name = "%s.%s" % (self.__database.name, new_name) + new_name = f"{self.__database.name}.{new_name}" cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) cmd.update(kwargs) if comment is not None: @@ -2794,7 +2794,6 @@ def __find_and_modify( **kwargs, ): """Internal findAndModify helper.""" - common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): raise ValueError( diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 6f3f244419..d57b45154d 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -132,13 +132,15 @@ def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": def _has_next(self): """Returns `True` if the cursor has documents remaining from the - previous batch.""" + previous batch. + """ return len(self.__data) > 0 @property def _post_batch_resume_token(self): """Retrieve the postBatchResumeToken from the response to a - changeStream aggregate or getMore.""" + changeStream aggregate or getMore. + """ return self.__postbatchresumetoken def _maybe_pin_connection(self, sock_info): @@ -328,7 +330,7 @@ def __init__( .. seealso:: The MongoDB documentation on `cursors `_. """ assert not cursor_info.get("firstBatch") - super(RawBatchCommandCursor, self).__init__( + super().__init__( collection, cursor_info, address, diff --git a/pymongo/common.py b/pymongo/common.py index 4e39c8e514..82c773695a 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -157,7 +157,7 @@ def clean_node(node: str) -> Tuple[str, int]: def raise_config_error(key: str, dummy: Any) -> NoReturn: """Raise ConfigurationError with the given key name.""" - raise ConfigurationError("Unknown option %s" % (key,)) + raise ConfigurationError(f"Unknown option {key}") # Mapping of URI uuid representation options to valid subtypes. @@ -174,14 +174,14 @@ def validate_boolean(option: str, value: Any) -> bool: """Validates that 'value' is True or False.""" if isinstance(value, bool): return value - raise TypeError("%s must be True or False" % (option,)) + raise TypeError(f"{option} must be True or False") def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ("true", "false"): - raise ValueError("The value of %s must be 'true' or 'false'" % (option,)) + raise ValueError(f"The value of {option} must be 'true' or 'false'") return value == "true" return validate_boolean(option, value) @@ -194,15 +194,15 @@ def validate_integer(option: str, value: Any) -> int: try: return int(value) except ValueError: - raise ValueError("The value of %s must be an integer" % (option,)) - raise TypeError("Wrong type for %s, value must be an integer" % (option,)) + raise ValueError(f"The value of {option} must be an integer") + raise TypeError(f"Wrong type for {option}, value must be an integer") def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be a positive integer" % (option,)) + raise ValueError(f"The value of {option} must be a positive integer") return val @@ -210,7 +210,7 @@ def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be a non negative integer" % (option,)) + raise ValueError(f"The value of {option} must be a non negative integer") return val @@ -221,7 +221,7 @@ def validate_readable(option: str, value: Any) -> Optional[str]: # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) - open(value, "r").close() + open(value).close() return value @@ -243,7 +243,7 @@ def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of str" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an instance of str") def validate_string_or_none(option: str, value: Any) -> Optional[str]: @@ -262,7 +262,7 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an integer or a string" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an integer or a string") def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: @@ -275,16 +275,14 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in except ValueError: return value return validate_non_negative_integer(option, val) - raise TypeError( - "Wrong type for %s, value must be an non negative integer or a string" % (option,) - ) + raise TypeError(f"Wrong type for {option}, value must be an non negative integer or a string") def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is positive. """ - errmsg = "%s must be an integer or float" % (option,) + errmsg = f"{option} must be an integer or float" try: value = float(value) except ValueError: @@ -295,7 +293,7 @@ def validate_positive_float(option: str, value: Any) -> float: # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and less than one billion" % (option,)) + raise ValueError(f"{option} must be greater than 0 and less than one billion") return value @@ -324,7 +322,7 @@ def validate_timeout_or_zero(option: str, value: Any) -> float: config error. """ if value is None: - raise ConfigurationError("%s cannot be None" % (option,)) + raise ConfigurationError(f"{option} cannot be None") if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 @@ -360,7 +358,7 @@ def validate_max_staleness(option: str, value: Any) -> int: def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: """Validate a read preference.""" if not isinstance(value, _ServerMode): - raise TypeError("%r is not a read preference." % (value,)) + raise TypeError(f"{value!r} is not a read preference.") return value @@ -372,14 +370,14 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: mode. """ if value not in _MONGOS_MODES: - raise ValueError("%s is not a valid read preference" % (value,)) + raise ValueError(f"{value} is not a valid read preference") return value def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option.""" if value not in MECHANISMS: - raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) + raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") return value @@ -389,9 +387,9 @@ def validate_uuid_representation(dummy: Any, value: Any) -> int: return _UUID_REPRESENTATIONS[value] except KeyError: raise ValueError( - "%s is an invalid UUID representation. " + "{} is an invalid UUID representation. " "Must be one of " - "%s" % (value, tuple(_UUID_REPRESENTATIONS)) + "{}".format(value, tuple(_UUID_REPRESENTATIONS)) ) @@ -412,7 +410,7 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid value for %s" % (tag_set, name)) + raise ValueError(f"{tag_set!r} not a valid value for {name}") return tag_sets @@ -472,13 +470,13 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Uni raise ValueError( "auth mechanism properties must be " "key:value pairs like SERVICE_NAME:" - "mongodb, not %s." % (opt,) + "mongodb, not {}.".format(opt) ) if key not in _MECHANISM_PROPS: raise ValueError( - "%s is not a supported auth " + "{} is not a supported auth " "mechanism property. Must be one of " - "%s." % (key, tuple(_MECHANISM_PROPS)) + "{}.".format(key, tuple(_MECHANISM_PROPS)) ) if key == "CANONICALIZE_HOST_NAME": props[key] = validate_boolean_or_string(key, val) @@ -502,9 +500,9 @@ def validate_document_class( is_mapping = issubclass(value.__origin__, abc.MutableMapping) if not is_mapping and not issubclass(value, RawBSONDocument): raise TypeError( - "%s must be dict, bson.son.SON, " + "{} must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "subclass of collections.MutableMapping" % (option,) + "subclass of collections.MutableMapping".format(option) ) return value @@ -512,14 +510,14 @@ def validate_document_class( def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): - raise TypeError("%s must be an instance of %s" % (option, TypeRegistry)) + raise TypeError(f"{option} must be an instance of {TypeRegistry}") return value def validate_list(option: str, value: Any) -> List: """Validates that 'value' is a list.""" if not isinstance(value, list): - raise TypeError("%s must be a list" % (option,)) + raise TypeError(f"{option} must be a list") return value @@ -534,9 +532,9 @@ def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): raise TypeError( - "%s must either be a list or an instance of dict, " + "{} must either be a list or an instance of dict, " "bson.son.SON, or any other type that inherits from " - "collections.Mapping" % (option,) + "collections.Mapping".format(option) ) @@ -544,9 +542,9 @@ def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): raise TypeError( - "%s must be an instance of dict, bson.son.SON, or " + "{} must be an instance of dict, bson.son.SON, or " "any other type that inherits from " - "collections.Mapping" % (option,) + "collections.Mapping".format(option) ) @@ -554,10 +552,10 @@ def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError( - "%s must be an instance of dict, bson.son.SON, " + "{} must be an instance of dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or " "a type that inherits from " - "collections.MutableMapping" % (option,) + "collections.MutableMapping".format(option) ) @@ -568,7 +566,7 @@ def validate_appname_or_none(option: str, value: Any) -> Optional[str]: validate_string(option, value) # We need length in bytes, so encode utf8 first. if len(value.encode("utf-8")) > 128: - raise ValueError("%s must be <= 128 bytes" % (option,)) + raise ValueError(f"{option} must be <= 128 bytes") return value @@ -577,7 +575,7 @@ def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: if value is None: return value if not isinstance(value, DriverInfo): - raise TypeError("%s must be an instance of DriverInfo" % (option,)) + raise TypeError(f"{option} must be an instance of DriverInfo") return value @@ -586,7 +584,7 @@ def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: if value is None: return value if not isinstance(value, ServerApi): - raise TypeError("%s must be an instance of ServerApi" % (option,)) + raise TypeError(f"{option} must be an instance of ServerApi") return value @@ -595,7 +593,7 @@ def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: if value is None: return value if not callable(value): - raise ValueError("%s must be a callable" % (option,)) + raise ValueError(f"{option} must be a callable") return value @@ -629,9 +627,9 @@ def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: """Validate the Unicode decode error handler option of CodecOptions.""" if value not in _UNICODE_DECODE_ERROR_HANDLERS: raise ValueError( - "%s is an invalid Unicode decode error handler. " + "{} is an invalid Unicode decode error handler. " "Must be one of " - "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) + "{}".format(value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) ) return value @@ -650,7 +648,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A from pymongo.encryption_options import AutoEncryptionOpts if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % (option,)) + raise TypeError(f"{option} must be an instance of AutoEncryptionOpts") return value @@ -667,7 +665,7 @@ def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeCo elif isinstance(value, int): return DatetimeConversion(value) - raise TypeError("%s must be a str or int representing DatetimeConversion" % (option,)) + raise TypeError(f"{option} must be a str or int representing DatetimeConversion") # Dictionary where keys are the names of public URI options, and values @@ -805,7 +803,7 @@ def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError("Unknown authentication option: %s" % (option,)) + raise ConfigurationError(f"Unknown authentication option: {option}") return option, value @@ -866,7 +864,7 @@ def _ecoc_coll_name(encrypted_fields, name): WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) -class BaseObject(object): +class BaseObject: """A base class that provides attributes and methods common to multiple pymongo classes. @@ -886,9 +884,9 @@ def __init__( if not isinstance(read_preference, _ServerMode): raise TypeError( - "%r is not valid for read_preference. See " + "{!r} is not valid for read_preference. See " "pymongo.read_preferences for valid " - "options." % (read_preference,) + "options.".format(read_preference) ) self.__read_preference = read_preference diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index c9632a43d3..40bad403f3 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -40,8 +40,8 @@ from pymongo.hello import HelloCompat from pymongo.monitoring import _SENSITIVE_COMMANDS -_SUPPORTED_COMPRESSORS = set(["snappy", "zlib", "zstd"]) -_NO_COMPRESSION = set([HelloCompat.CMD, HelloCompat.LEGACY_CMD]) +_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} +_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} _NO_COMPRESSION.update(_SENSITIVE_COMMANDS) @@ -56,7 +56,7 @@ def validate_compressors(dummy, value): for compressor in compressors[:]: if compressor not in _SUPPORTED_COMPRESSORS: compressors.remove(compressor) - warnings.warn("Unsupported compressor: %s" % (compressor,)) + warnings.warn(f"Unsupported compressor: {compressor}") elif compressor == "snappy" and not _HAVE_SNAPPY: compressors.remove(compressor) warnings.warn( @@ -82,13 +82,13 @@ def validate_zlib_compression_level(option, value): try: level = int(value) except Exception: - raise TypeError("%s must be an integer, not %r." % (option, value)) + raise TypeError(f"{option} must be an integer, not {value!r}.") if level < -1 or level > 9: raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level -class CompressionSettings(object): +class CompressionSettings: def __init__(self, compressors, zlib_compression_level): self.compressors = compressors self.zlib_compression_level = zlib_compression_level @@ -102,9 +102,11 @@ def get_compression_context(self, compressors): return ZlibContext(self.zlib_compression_level) elif chosen == "zstd": return ZstdContext() + return None + return None -class SnappyContext(object): +class SnappyContext: compressor_id = 1 @staticmethod @@ -112,7 +114,7 @@ def compress(data): return snappy.compress(data) -class ZlibContext(object): +class ZlibContext: compressor_id = 2 def __init__(self, level): @@ -122,7 +124,7 @@ def compress(self, data: bytes) -> bytes: return zlib.compress(data, self.level) -class ZstdContext(object): +class ZstdContext: compressor_id = 3 @staticmethod diff --git a/pymongo/cursor.py b/pymongo/cursor.py index ccf0bfd71b..cc4e1a1146 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -97,7 +97,7 @@ } -class CursorType(object): +class CursorType: NON_TAILABLE = 0 """The standard cursor type.""" @@ -126,7 +126,7 @@ class CursorType(object): """ -class _SocketManager(object): +class _SocketManager: """Used with exhaust cursors to ensure the socket is returned.""" def __init__(self, sock, more_to_come): @@ -387,11 +387,11 @@ def _clone(self, deepcopy=True, base=None): "exhaust", "has_filter", ) - data = dict( - (k, v) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_Cursor__") and k[9:] in values_to_clone - ) + } if deepcopy: data = self._deepcopy(data) base.__dict__.update(data) @@ -412,7 +412,7 @@ def __die(self, synchronous=False): self.__killed = True if self.__id and not already_killed: cursor_id = self.__id - address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname)) + address = _CursorAddress(self.__address, f"{self.__dbname}.{self.__collname}") else: # Skip killCursors. cursor_id = 0 @@ -1322,7 +1322,7 @@ def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs .. seealso:: The MongoDB documentation on `cursors `_. """ - super(RawBatchCursor, self).__init__(collection, *args, **kwargs) + super().__init__(collection, *args, **kwargs) def _unpack_response( self, response, cursor_id, codec_options, user_fields=None, legacy_response=False diff --git a/pymongo/database.py b/pymongo/database.py index 1e19d860e3..66cfce2090 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -125,7 +125,7 @@ def __init__( db.__my_collection__ """ - super(Database, self).__init__( + super().__init__( codec_options or client.codec_options, read_preference or client.read_preference, write_concern or client.write_concern, @@ -211,7 +211,7 @@ def __hash__(self) -> int: return hash((self.__client, self.__name)) def __repr__(self): - return "Database(%r, %r)" % (self.__client, self.__name) + return f"Database({self.__client!r}, {self.__name!r})" def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. @@ -223,8 +223,8 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: """ if name.startswith("_"): raise AttributeError( - "Database has no attribute %r. To access the %s" - " collection, use database[%r]." % (name, name, name) + "Database has no attribute {!r}. To access the {}" + " collection, use database[{!r}].".format(name, name, name) ) return self.__getitem__(name) @@ -415,9 +415,9 @@ def create_collection( { // key pattern must be {_id: 1} key: , // required - unique: , // required, must be ‘true’ + unique: , // required, must be `true` name: , // optional, otherwise automatically generated - v: , // optional, must be ‘2’ if provided + v: , // optional, must be `2` if provided } - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for enabling pre- and post-images. @@ -863,7 +863,6 @@ def _cmd(session, server, sock_info, read_preference): def _list_collections(self, sock_info, session, read_preference, **kwargs): """Internal listCollections helper.""" - coll = self.get_collection("$cmd", read_preference=read_preference) cmd = SON([("listCollections", 1), ("cursor", {})]) cmd.update(kwargs) @@ -1128,14 +1127,14 @@ def validate_collection( if "result" in result: info = result["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) + raise CollectionInvalid(f"{name} invalid: {info}") # Sharded results elif "raw" in result: for _, res in result["raw"].items(): if "result" in res: info = res["result"] if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) + raise CollectionInvalid(f"{name} invalid: {info}") elif not res.get("valid", False): valid = False break @@ -1144,7 +1143,7 @@ def validate_collection( valid = False if not valid: - raise CollectionInvalid("%s invalid: %r" % (name, result)) + raise CollectionInvalid(f"{name} invalid: {result!r}") return result @@ -1200,7 +1199,7 @@ def dereference( if dbref.database is not None and dbref.database != self.__name: raise ValueError( "trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, self.__name) + "another database ({!r} not {!r})".format(dbref.database, self.__name) ) return self[dbref.collection].find_one( {"_id": dbref.id}, session=session, comment=comment, **kwargs diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 53fbfd3428..86ddfcfb3e 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -31,12 +31,12 @@ class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): def __new__( cls, name: str, version: Optional[str] = None, platform: Optional[str] = None ) -> "DriverInfo": - self = super(DriverInfo, cls).__new__(cls, name, version, platform) + self = super().__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): raise TypeError( - "Wrong type for DriverInfo %s option, value " - "must be an instance of str" % (key,) + "Wrong type for DriverInfo {} option, value " + "must be an instance of str".format(key) ) return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 4c46bf56ae..f2eb71ce71 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -177,6 +177,7 @@ def collection_info(self, database, filter): with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: for doc in cursor: return _dict_to_bson(doc, False, _DATA_KEY_OPTS) + return None def spawn(self): """Spawn mongocryptd. @@ -272,7 +273,7 @@ def close(self): self.mongocryptd_client = None -class RewrapManyDataKeyResult(object): +class RewrapManyDataKeyResult: """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. .. versionadded:: 4.2 @@ -292,11 +293,12 @@ def bulk_write_result(self) -> Optional[BulkWriteResult]: return self._bulk_write_result -class _Encrypter(object): +class _Encrypter: """Encrypts and decrypts MongoDB commands. This class is used to support automatic encryption and decryption of - MongoDB commands.""" + MongoDB commands. + """ def __init__(self, client, opts): """Create a _Encrypter for a client. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 0cb96d7dad..e87d96b31a 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -31,7 +31,7 @@ from pymongo.mongo_client import MongoClient -class AutoEncryptionOpts(object): +class AutoEncryptionOpts: """Options to configure automatic client-side field level encryption.""" def __init__( diff --git a/pymongo/errors.py b/pymongo/errors.py index 192eec99d9..36f97f4b5a 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -33,7 +33,7 @@ class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: - super(PyMongoError, self).__init__(message) + super().__init__(message) self._message = message self._error_labels = set(error_labels or []) @@ -105,7 +105,7 @@ def __init__( if errors is not None: if isinstance(errors, dict): error_labels = errors.get("errorLabels") - super(AutoReconnect, self).__init__(message, error_labels) + super().__init__(message, error_labels) self.errors = self.details = errors or [] @@ -125,7 +125,7 @@ def timeout(self) -> bool: def _format_detailed_error(message, details): if details is not None: - message = "%s, full error: %s" % (message, details) + message = f"{message}, full error: {details}" return message @@ -148,9 +148,7 @@ class NotPrimaryError(AutoReconnect): def __init__( self, message: str = "", errors: Optional[Union[Mapping[str, Any], List]] = None ) -> None: - super(NotPrimaryError, self).__init__( - _format_detailed_error(message, errors), errors=errors - ) + super().__init__(_format_detailed_error(message, errors), errors=errors) class ServerSelectionTimeoutError(AutoReconnect): @@ -191,9 +189,7 @@ def __init__( error_labels = None if details is not None: error_labels = details.get("errorLabels") - super(OperationFailure, self).__init__( - _format_detailed_error(error, details), error_labels=error_labels - ) + super().__init__(_format_detailed_error(error, details), error_labels=error_labels) self.__code = code self.__details = details self.__max_wire_version = max_wire_version @@ -293,7 +289,7 @@ class BulkWriteError(OperationFailure): details: Mapping[str, Any] def __init__(self, results: Mapping[str, Any]) -> None: - super(BulkWriteError, self).__init__("batch op errors occurred", 65, results) + super().__init__("batch op errors occurred", 65, results) def __reduce__(self) -> Tuple[Any, Any]: return self.__class__, (self.details,) @@ -331,8 +327,6 @@ class InvalidURI(ConfigurationError): class DocumentTooLarge(InvalidDocument): """Raised when an encoded document is too large for the connected server.""" - pass - class EncryptionError(PyMongoError): """Raised when encryption or decryption fails. @@ -344,7 +338,7 @@ class EncryptionError(PyMongoError): """ def __init__(self, cause: Exception) -> None: - super(EncryptionError, self).__init__(str(cause)) + super().__init__(str(cause)) self.__cause = cause @property @@ -369,7 +363,7 @@ class EncryptedCollectionError(EncryptionError): """ def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: - super(EncryptedCollectionError, self).__init__(cause) + super().__init__(cause) self.__encrypted_fields = encrypted_fields @property @@ -386,5 +380,3 @@ def encrypted_fields(self) -> Mapping[str, Any]: class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" - - pass diff --git a/pymongo/helpers.py b/pymongo/helpers.py index 1a753c66f4..f4582854dc 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -74,7 +74,7 @@ def _gen_index_name(keys): """Generate an index name from the set of fields it is over.""" - return "_".join(["%s_%s" % item for item in keys]) + return "_".join(["{}_{}".format(*item) for item in keys]) def _index_list(key_or_list, direction=None): @@ -248,12 +248,10 @@ def _fields_list_to_dict(fields, option_name): if isinstance(fields, (abc.Sequence, abc.Set)): if not all(isinstance(field, str) for field in fields): - raise TypeError( - "%s must be a list of key names, each an instance of str" % (option_name,) - ) + raise TypeError(f"{option_name} must be a list of key names, each an instance of str") return dict.fromkeys(fields, 1) - raise TypeError("%s must be a mapping or list of key names" % (option_name,)) + raise TypeError(f"{option_name} must be a mapping or list of key names") def _handle_exception(): @@ -266,7 +264,7 @@ def _handle_exception(): einfo = sys.exc_info() try: traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) - except IOError: + except OSError: pass finally: del einfo diff --git a/pymongo/message.py b/pymongo/message.py index 3510d210a5..34f6e6235d 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -115,7 +115,6 @@ def _convert_exception(exception): def _convert_write_result(operation, command, result): """Convert a legacy write result to write command format.""" - # Based on _merge_legacy from bulk.py affected = result.get("n", 0) res = {"ok": 1, "n": affected} @@ -240,7 +239,7 @@ def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms, commen return cmd -class _Query(object): +class _Query: """A query operation.""" __slots__ = ( @@ -310,7 +309,7 @@ def reset(self): self._as_command = None def namespace(self): - return "%s.%s" % (self.db, self.coll) + return f"{self.db}.{self.coll}" def use_command(self, sock_info): use_find_cmd = False @@ -421,7 +420,7 @@ def get_message(self, read_preference, sock_info, use_cmd=False): ) -class _GetMore(object): +class _GetMore: """A getmore operation.""" __slots__ = ( @@ -475,7 +474,7 @@ def reset(self): self._as_command = None def namespace(self): - return "%s.%s" % (self.db, self.coll) + return f"{self.db}.{self.coll}" def use_command(self, sock_info): use_cmd = False @@ -518,7 +517,6 @@ def as_command(self, sock_info, apply_timeout=False): def get_message(self, dummy0, sock_info, use_cmd=False): """Get a getmore message.""" - ns = self.namespace() ctx = sock_info.compression_context @@ -539,7 +537,7 @@ def get_message(self, dummy0, sock_info, use_cmd=False): class _RawBatchQuery(_Query): def use_command(self, sock_info): # Compatibility checks. - super(_RawBatchQuery, self).use_command(sock_info) + super().use_command(sock_info) if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True @@ -551,7 +549,7 @@ def use_command(self, sock_info): class _RawBatchGetMore(_GetMore): def use_command(self, sock_info): # Compatibility checks. - super(_RawBatchGetMore, self).use_command(sock_info) + super().use_command(sock_info) if sock_info.max_wire_version >= 8: # MongoDB 4.2+ supports exhaust over OP_MSG return True @@ -578,7 +576,7 @@ def namespace(self): def __hash__(self): # Two _CursorAddress instances with different namespaces # must not hash the same. - return (self + (self.__namespace,)).__hash__() + return ((*self, self.__namespace)).__hash__() def __eq__(self, other): if isinstance(other, _CursorAddress): @@ -648,7 +646,7 @@ def _op_msg_no_header(flags, command, identifier, docs, opts): encoded_size = _pack_int(size) total_size += size max_doc_size = max(len(doc) for doc in encoded_docs) - data = [flags_type, encoded, type_one, encoded_size, cstring] + encoded_docs + data = [flags_type, encoded, type_one, encoded_size, cstring, *encoded_docs] else: data = [flags_type, encoded] return b"".join(data), total_size, max_doc_size @@ -795,7 +793,7 @@ def _get_more(collection_name, num_to_return, cursor_id, ctx=None): return _get_more_uncompressed(collection_name, num_to_return, cursor_id) -class _BulkWriteContext(object): +class _BulkWriteContext: """A wrapper around SocketInfo for use with write splitting functions.""" __slots__ = ( @@ -1033,7 +1031,7 @@ def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> N else: # There's nothing intelligent we can say # about size for update and delete - raise DocumentTooLarge("%r command document too large" % (operation,)) + raise DocumentTooLarge(f"{operation!r} command document too large") # OP_MSG ------------------------------------------------------------- @@ -1253,7 +1251,7 @@ def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, return to_send, length -class _OpReply(object): +class _OpReply: """A MongoDB OP_REPLY response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "documents") @@ -1363,7 +1361,7 @@ def unpack(cls, msg): return cls(flags, cursor_id, number_returned, documents) -class _OpMsg(object): +class _OpMsg: """A MongoDB OP_MSG response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "payload_document") @@ -1427,12 +1425,12 @@ def unpack(cls, msg): flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError("Unsupported OP_MSG flag checksumPresent: 0x%x" % (flags,)) + raise ProtocolError(f"Unsupported OP_MSG flag checksumPresent: 0x{flags:x}") if flags ^ cls.MORE_TO_COME: - raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) + raise ProtocolError(f"Unsupported OP_MSG flags: 0x{flags:x}") if first_payload_type != 0: - raise ProtocolError("Unsupported OP_MSG payload type: 0x%x" % (first_payload_type,)) + raise ProtocolError(f"Unsupported OP_MSG payload type: 0x{first_payload_type:x}") if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ca60affdf5..ccfaaa31c1 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -805,7 +805,7 @@ def __init__( self.__kill_cursors_queue: List = [] self._event_listeners = options.pool_options._event_listeners - super(MongoClient, self).__init__( + super().__init__( options.codec_options, options.read_preference, options.write_concern, @@ -1509,11 +1509,11 @@ def option_repr(option, value): if value is dict: return "document_class=dict" else: - return "document_class=%s.%s" % (value.__module__, value.__name__) + return f"document_class={value.__module__}.{value.__name__}" if option in common.TIMEOUT_OPTIONS and value is not None: - return "%s=%s" % (option, int(value * 1000)) + return f"{option}={int(value * 1000)}" - return "%s=%r" % (option, value) + return f"{option}={value!r}" # Host first... options = [ @@ -1536,7 +1536,7 @@ def option_repr(option, value): return ", ".join(options) def __repr__(self): - return "MongoClient(%s)" % (self._repr_helper(),) + return f"MongoClient({self._repr_helper()})" def __getattr__(self, name: str) -> database.Database[_DocumentType]: """Get a database by name. @@ -1549,8 +1549,8 @@ def __getattr__(self, name: str) -> database.Database[_DocumentType]: """ if name.startswith("_"): raise AttributeError( - "MongoClient has no attribute %r. To access the %s" - " database, use client[%r]." % (name, name, name) + "MongoClient has no attribute {!r}. To access the {}" + " database, use client[{!r}].".format(name, name, name) ) return self.__getitem__(name) @@ -1685,7 +1685,8 @@ def _process_kill_cursors(self): # This method is run periodically by a background thread. def _process_periodic_tasks(self): """Process any pending kill cursors requests and - maintain connection pool parameters.""" + maintain connection pool parameters. + """ try: self._process_kill_cursors() self._topology.update_pool() @@ -1742,7 +1743,7 @@ def _get_server_session(self): def _return_server_session(self, server_session, lock): """Internal: return a _ServerSession to the pool.""" if isinstance(server_session, _EmptyServerSession): - return + return None return self._topology.return_server_session(server_session, lock) def _ensure_session(self, session=None): @@ -2121,7 +2122,7 @@ def _add_retryable_write_error(exc, max_wire_version): exc._add_error_label("RetryableWriteError") -class _MongoClientErrorHandler(object): +class _MongoClientErrorHandler: """Handle errors raised when executing an operation.""" __slots__ = ( diff --git a/pymongo/monitor.py b/pymongo/monitor.py index 9031d4b785..2fc0bf8bab 100644 --- a/pymongo/monitor.py +++ b/pymongo/monitor.py @@ -37,7 +37,7 @@ def _sanitize(error): error.__cause__ = None -class MonitorBase(object): +class MonitorBase: def __init__(self, topology, name, interval, min_interval): """Base class to do periodic work on a background thread. @@ -108,7 +108,7 @@ def __init__(self, server_description, topology, pool, topology_settings): The Topology is weakly referenced. The Pool must be exclusive to this Monitor. """ - super(Monitor, self).__init__( + super().__init__( topology, "pymongo_server_monitor_thread", topology_settings.heartbeat_frequency, @@ -290,7 +290,7 @@ def __init__(self, topology, topology_settings): The Topology is weakly referenced. """ - super(SrvMonitor, self).__init__( + super().__init__( topology, "pymongo_srv_polling_thread", common.MIN_SRV_RESCAN_INTERVAL, @@ -343,7 +343,7 @@ def __init__(self, topology, topology_settings, pool): The Topology is weakly referenced. """ - super(_RttMonitor, self).__init__( + super().__init__( topology, "pymongo_server_rtt_thread", topology_settings.heartbeat_frequency, diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 5b729652ad..391ca13540 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -211,7 +211,7 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) -class _EventListener(object): +class _EventListener: """Abstract base class for all event listeners.""" @@ -486,14 +486,14 @@ def _to_micros(dur): def _validate_event_listeners(option, listeners): """Validate event listeners""" if not isinstance(listeners, abc.Sequence): - raise TypeError("%s must be a list or tuple" % (option,)) + raise TypeError(f"{option} must be a list or tuple") for listener in listeners: if not isinstance(listener, _EventListener): raise TypeError( - "Listeners for %s must be either a " + "Listeners for {} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (option,) + "ConnectionPoolListener.".format(option) ) return listeners @@ -508,10 +508,10 @@ def register(listener: _EventListener) -> None: """ if not isinstance(listener, _EventListener): raise TypeError( - "Listeners for %s must be either a " + "Listeners for {} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (listener,) + "ConnectionPoolListener.".format(listener) ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) @@ -528,19 +528,17 @@ def register(listener: _EventListener) -> None: # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS: set = set( - [ - "authenticate", - "saslstart", - "saslcontinue", - "getnonce", - "createuser", - "updateuser", - "copydbgetnonce", - "copydbsaslstart", - "copydb", - ] -) +_SENSITIVE_COMMANDS: set = { + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +} # The "hello" command is also deemed sensitive when attempting speculative @@ -554,7 +552,7 @@ def _is_speculative_authenticate(command_name, doc): return False -class _CommandEvent(object): +class _CommandEvent: """Base class for command events.""" __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") @@ -627,10 +625,10 @@ def __init__( service_id: Optional[ObjectId] = None, ) -> None: if not command: - raise ValueError("%r is not a valid command" % (command,)) + raise ValueError(f"{command!r} is not a valid command") # Command name must be first key. command_name = next(iter(command)) - super(CommandStartedEvent, self).__init__( + super().__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) cmd_name = command_name.lower() @@ -651,7 +649,7 @@ def database_name(self) -> str: return self.__db def __repr__(self): - return ("<%s %s db: %r, command: %r, operation_id: %s, service_id: %s>") % ( + return ("<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}>").format( self.__class__.__name__, self.connection_id, self.database_name, @@ -687,7 +685,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, ) -> None: - super(CommandSucceededEvent, self).__init__( + super().__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) self.__duration_micros = _to_micros(duration) @@ -708,7 +706,9 @@ def reply(self) -> _DocumentOut: return self.__reply def __repr__(self): - return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, service_id: %s>") % ( + return ( + "<{} {} command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}>" + ).format( self.__class__.__name__, self.connection_id, self.command_name, @@ -744,7 +744,7 @@ def __init__( operation_id: Optional[int], service_id: Optional[ObjectId] = None, ) -> None: - super(CommandFailedEvent, self).__init__( + super().__init__( command_name, request_id, connection_id, operation_id, service_id=service_id ) self.__duration_micros = _to_micros(duration) @@ -762,9 +762,9 @@ def failure(self) -> _DocumentOut: def __repr__(self): return ( - "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "failure: %r, service_id: %s>" - ) % ( + "<{} {} command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}>" + ).format( self.__class__.__name__, self.connection_id, self.command_name, @@ -775,7 +775,7 @@ def __repr__(self): ) -class _PoolEvent(object): +class _PoolEvent: """Base class for pool events.""" __slots__ = ("__address",) @@ -791,7 +791,7 @@ def address(self) -> _Address: return self.__address def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + return f"{self.__class__.__name__}({self.__address!r})" class PoolCreatedEvent(_PoolEvent): @@ -807,7 +807,7 @@ class PoolCreatedEvent(_PoolEvent): __slots__ = ("__options",) def __init__(self, address: _Address, options: Dict[str, Any]) -> None: - super(PoolCreatedEvent, self).__init__(address) + super().__init__(address) self.__options = options @property @@ -816,7 +816,7 @@ def options(self) -> Dict[str, Any]: return self.__options def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__options) + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" class PoolReadyEvent(_PoolEvent): @@ -846,7 +846,7 @@ class PoolClearedEvent(_PoolEvent): __slots__ = ("__service_id",) def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: - super(PoolClearedEvent, self).__init__(address) + super().__init__(address) self.__service_id = service_id @property @@ -860,7 +860,7 @@ def service_id(self) -> Optional[ObjectId]: return self.__service_id def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__service_id) + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r})" class PoolClosedEvent(_PoolEvent): @@ -876,7 +876,7 @@ class PoolClosedEvent(_PoolEvent): __slots__ = () -class ConnectionClosedReason(object): +class ConnectionClosedReason: """An enum that defines values for `reason` on a :class:`ConnectionClosedEvent`. @@ -897,7 +897,7 @@ class ConnectionClosedReason(object): """The pool was closed, making the connection no longer valid.""" -class ConnectionCheckOutFailedReason(object): +class ConnectionCheckOutFailedReason: """An enum that defines values for `reason` on a :class:`ConnectionCheckOutFailedEvent`. @@ -916,7 +916,7 @@ class ConnectionCheckOutFailedReason(object): """ -class _ConnectionEvent(object): +class _ConnectionEvent: """Private base class for connection events.""" __slots__ = ("__address",) @@ -932,7 +932,7 @@ def address(self) -> _Address: return self.__address def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + return f"{self.__class__.__name__}({self.__address!r})" class _ConnectionIdEvent(_ConnectionEvent): @@ -950,7 +950,7 @@ def connection_id(self) -> int: return self.__connection_id def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__connection_id) + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" class ConnectionCreatedEvent(_ConnectionIdEvent): @@ -999,7 +999,7 @@ class ConnectionClosedEvent(_ConnectionIdEvent): __slots__ = ("__reason",) def __init__(self, address, connection_id, reason): - super(ConnectionClosedEvent, self).__init__(address, connection_id) + super().__init__(address, connection_id) self.__reason = reason @property @@ -1012,7 +1012,7 @@ def reason(self): return self.__reason def __repr__(self): - return "%s(%r, %r, %r)" % ( + return "{}({!r}, {!r}, {!r})".format( self.__class__.__name__, self.address, self.connection_id, @@ -1060,7 +1060,7 @@ def reason(self) -> str: return self.__reason def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__reason) + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r})" class ConnectionCheckedOutEvent(_ConnectionIdEvent): @@ -1091,7 +1091,7 @@ class ConnectionCheckedInEvent(_ConnectionIdEvent): __slots__ = () -class _ServerEvent(object): +class _ServerEvent: """Base class for server events.""" __slots__ = ("__server_address", "__topology_id") @@ -1111,7 +1111,7 @@ def topology_id(self) -> ObjectId: return self.__topology_id def __repr__(self): - return "<%s %s topology_id: %s>" % ( + return "<{} {} topology_id: {}>".format( self.__class__.__name__, self.server_address, self.topology_id, @@ -1130,26 +1130,28 @@ def __init__( self, previous_description: "ServerDescription", new_description: "ServerDescription", - *args: Any + *args: Any, ) -> None: - super(ServerDescriptionChangedEvent, self).__init__(*args) + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property def previous_description(self) -> "ServerDescription": """The previous - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__previous_description @property def new_description(self) -> "ServerDescription": """The new - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__new_description def __repr__(self): - return "<%s %s changed from: %s, to: %s>" % ( + return "<{} {} changed from: {}, to: {}>".format( self.__class__.__name__, self.server_address, self.previous_description, @@ -1175,7 +1177,7 @@ class ServerClosedEvent(_ServerEvent): __slots__ = () -class TopologyEvent(object): +class TopologyEvent: """Base class for topology description events.""" __slots__ = "__topology_id" @@ -1189,7 +1191,7 @@ def topology_id(self) -> ObjectId: return self.__topology_id def __repr__(self): - return "<%s topology_id: %s>" % (self.__class__.__name__, self.topology_id) + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" class TopologyDescriptionChangedEvent(TopologyEvent): @@ -1204,26 +1206,28 @@ def __init__( self, previous_description: "TopologyDescription", new_description: "TopologyDescription", - *args: Any + *args: Any, ) -> None: - super(TopologyDescriptionChangedEvent, self).__init__(*args) + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property def previous_description(self) -> "TopologyDescription": """The previous - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__previous_description @property def new_description(self) -> "TopologyDescription": """The new - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__new_description def __repr__(self): - return "<%s topology_id: %s changed from: %s, to: %s>" % ( + return "<{} topology_id: {} changed from: {}, to: {}>".format( self.__class__.__name__, self.topology_id, self.previous_description, @@ -1249,7 +1253,7 @@ class TopologyClosedEvent(TopologyEvent): __slots__ = () -class _ServerHeartbeatEvent(object): +class _ServerHeartbeatEvent: """Base class for server heartbeat events.""" __slots__ = "__connection_id" @@ -1260,11 +1264,12 @@ def __init__(self, connection_id: _Address) -> None: @property def connection_id(self) -> _Address: """The address (host, port) of the server this heartbeat was sent - to.""" + to. + """ return self.__connection_id def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, self.connection_id) + return f"<{self.__class__.__name__} {self.connection_id}>" class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): @@ -1287,7 +1292,7 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): def __init__( self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False ) -> None: - super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) + super().__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @@ -1313,7 +1318,7 @@ def awaited(self) -> bool: return self.__awaited def __repr__(self): - return "<%s %s duration: %s, awaited: %s, reply: %s>" % ( + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( self.__class__.__name__, self.connection_id, self.duration, @@ -1334,7 +1339,7 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): def __init__( self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False ) -> None: - super(ServerHeartbeatFailedEvent, self).__init__(connection_id) + super().__init__(connection_id) self.__duration = duration self.__reply = reply self.__awaited = awaited @@ -1360,7 +1365,7 @@ def awaited(self) -> bool: return self.__awaited def __repr__(self): - return "<%s %s duration: %s, awaited: %s, reply: %r>" % ( + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( self.__class__.__name__, self.connection_id, self.duration, @@ -1369,7 +1374,7 @@ def __repr__(self): ) -class _EventListeners(object): +class _EventListeners: """Configure event listeners for a client instance. Any event listeners registered globally are included by default. diff --git a/pymongo/network.py b/pymongo/network.py index a5c5459e14..d105c8b8b5 100644 --- a/pymongo/network.py +++ b/pymongo/network.py @@ -219,15 +219,15 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: - raise ProtocolError("Got response id %r but expected %r" % (response_to, request_id)) + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") if length <= 16: raise ProtocolError( - "Message length (%r) not longer than standard message header size (16)" % (length,) + f"Message length ({length!r}) not longer than standard message header size (16)" ) if length > max_message_size: raise ProtocolError( - "Message length (%r) is larger than server max " - "message size (%r)" % (length, max_message_size) + "Message length ({!r}) is larger than server max " + "message size ({!r})".format(length, max_message_size) ) if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( @@ -240,7 +240,7 @@ def receive_message(sock_info, request_id, max_message_size=MAX_MESSAGE_SIZE): try: unpack_reply = _UNPACK_REPLY[op_code] except KeyError: - raise ProtocolError("Got opcode %r but expected %r" % (op_code, _UNPACK_REPLY.keys())) + raise ProtocolError(f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}") return unpack_reply(data) @@ -281,7 +281,7 @@ def wait_for_read(sock_info, deadline): # Errors raised by sockets (and TLS sockets) when in non-blocking mode. -BLOCKING_IO_ERRORS = (BlockingIOError,) + ssl_support.BLOCKING_IO_ERRORS +BLOCKING_IO_ERRORS = (BlockingIOError, *ssl_support.BLOCKING_IO_ERRORS) def _receive_data_on_socket(sock_info, length, deadline): @@ -299,7 +299,7 @@ def _receive_data_on_socket(sock_info, length, deadline): chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) except BLOCKING_IO_ERRORS: raise socket.timeout("timed out") - except (IOError, OSError) as exc: # noqa: B014 + except OSError as exc: # noqa: B014 if _errno_from_exception(exc) == errno.EINTR: continue raise diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 389ee09ce7..0c50902167 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -20,7 +20,7 @@ from pymongo.lock import _create_lock -class _OCSPCache(object): +class _OCSPCache: """A cache for OCSP responses.""" CACHE_KEY_TYPE = namedtuple( # type: ignore diff --git a/pymongo/operations.py b/pymongo/operations.py index ad119f2ecc..3ff4ed57a3 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -48,7 +48,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_insert(self._doc) def __repr__(self): - return "InsertOne(%r)" % (self._doc,) + return f"InsertOne({self._doc!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): @@ -59,7 +59,7 @@ def __ne__(self, other: Any) -> bool: return not self == other -class DeleteOne(object): +class DeleteOne: """Represents a delete_one operation.""" __slots__ = ("_filter", "_collation", "_hint") @@ -104,7 +104,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) def __repr__(self): - return "DeleteOne(%r, %r)" % (self._filter, self._collation) + return f"DeleteOne({self._filter!r}, {self._collation!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): @@ -115,7 +115,7 @@ def __ne__(self, other: Any) -> bool: return not self == other -class DeleteMany(object): +class DeleteMany: """Represents a delete_many operation.""" __slots__ = ("_filter", "_collation", "_hint") @@ -160,7 +160,7 @@ def _add_to_bulk(self, bulkobj): bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) def __repr__(self): - return "DeleteMany(%r, %r)" % (self._filter, self._collation) + return f"DeleteMany({self._filter!r}, {self._collation!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): @@ -242,7 +242,7 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self): - return "%s(%r, %r, %r, %r, %r)" % ( + return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -252,7 +252,7 @@ def __repr__(self): ) -class _UpdateOp(object): +class _UpdateOp: """Private base class for update operations.""" __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") @@ -298,7 +298,7 @@ def __ne__(self, other): return not self == other def __repr__(self): - return "%s(%r, %r, %r, %r, %r, %r)" % ( + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -352,7 +352,7 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateOne, self).__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" @@ -410,7 +410,7 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateMany, self).__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint) def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" @@ -425,7 +425,7 @@ def _add_to_bulk(self, bulkobj): ) -class IndexModel(object): +class IndexModel: """Represents an index to create.""" __slots__ = ("__document",) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 95e7830674..24090e0160 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -22,7 +22,7 @@ from pymongo.lock import _create_lock -class PeriodicExecutor(object): +class PeriodicExecutor: def __init__(self, interval, min_interval, target, name=None): """ "Run a target function periodically on a background thread. @@ -51,7 +51,7 @@ def __init__(self, interval, min_interval, target, name=None): self._lock = _create_lock() def __repr__(self): - return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" def open(self) -> None: """Start. Multiple calls have no effect. diff --git a/pymongo/pool.py b/pymongo/pool.py index 6ba1554231..5bae8ce878 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -81,7 +81,6 @@ def _set_non_inheritable_non_atomic(fd): # everything we need from fcntl, etc. def _set_non_inheritable_non_atomic(fd): """Dummy function for platforms that don't provide fcntl.""" - pass _MAX_TCP_KEEPIDLE = 120 @@ -134,7 +133,7 @@ def _set_tcp_option(sock, tcp_option, max_value): default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) if default > max_value: sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except socket.error: + except OSError: pass def _set_keepalive_times(sock): @@ -351,7 +350,7 @@ def _raise_connection_failure( if port is not None: msg = "%s:%d: %s" % (host, port, error) else: - msg = "%s: %s" % (host, error) + msg = f"{host}: {error}" if msg_prefix: msg = msg_prefix + msg if isinstance(error, socket.timeout): @@ -371,7 +370,7 @@ def _cond_wait(condition, deadline): return condition.wait(timeout) -class PoolOptions(object): +class PoolOptions: """Read only connection pool options for a MongoClient. Should not be instantiated directly by application developers. Access @@ -456,17 +455,17 @@ def __init__( # } if driver: if driver.name: - self.__metadata["driver"]["name"] = "%s|%s" % ( + self.__metadata["driver"]["name"] = "{}|{}".format( _METADATA["driver"]["name"], driver.name, ) if driver.version: - self.__metadata["driver"]["version"] = "%s|%s" % ( + self.__metadata["driver"]["version"] = "{}|{}".format( _METADATA["driver"]["version"], driver.version, ) if driver.platform: - self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) + self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) env = _metadata_env() if env: @@ -601,7 +600,7 @@ def load_balanced(self): return self.__load_balanced -class _CancellationContext(object): +class _CancellationContext: def __init__(self): self._cancelled = False @@ -615,7 +614,7 @@ def cancelled(self): return self._cancelled -class SocketInfo(object): +class SocketInfo: """Store a socket with some metadata. :Parameters: @@ -1080,7 +1079,7 @@ def __hash__(self): return hash(self.sock) def __repr__(self): - return "SocketInfo(%s)%s at %s" % ( + return "SocketInfo({}){} at {}".format( repr(self.sock), self.closed and " CLOSED" or "", id(self), @@ -1106,7 +1105,7 @@ def _create_connection(address, options): try: sock.connect(host) return sock - except socket.error: + except OSError: sock.close() raise @@ -1125,7 +1124,7 @@ def _create_connection(address, options): # all file descriptors are created non-inheritable. See PEP 446. try: sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) - except socket.error: + except OSError: # Can SOCK_CLOEXEC be defined even if the kernel doesn't support # it? sock = socket.socket(af, socktype, proto) @@ -1144,7 +1143,7 @@ def _create_connection(address, options): _set_keepalive_times(sock) sock.connect(sa) return sock - except socket.error as e: + except OSError as e: err = e sock.close() @@ -1155,7 +1154,7 @@ def _create_connection(address, options): # host with an OS/kernel or Python interpreter that doesn't # support IPv6. The test case is Jython2.5.1 which doesn't # support IPv6 at all. - raise socket.error("getaddrinfo failed") + raise OSError("getaddrinfo failed") def _configured_socket(address, options): @@ -1182,7 +1181,7 @@ def _configured_socket(address, options): # Raise _CertificateError directly like we do after match_hostname # below. raise - except (IOError, OSError, SSLError) as exc: # noqa: B014 + except (OSError, SSLError) as exc: # noqa: B014 sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -1208,10 +1207,8 @@ class _PoolClosedError(PyMongoError): closed pool. """ - pass - -class _PoolGeneration(object): +class _PoolGeneration: def __init__(self): # Maps service_id to generation. self._generations = collections.defaultdict(int) @@ -1242,7 +1239,7 @@ def stale(self, gen, service_id): return gen != self.get(service_id) -class PoolState(object): +class PoolState: PAUSED = 1 READY = 2 CLOSED = 3 @@ -1753,10 +1750,9 @@ def _raise_wait_queue_timeout(self) -> NoReturn: other_ops = self.active_sockets - self.ncursors - self.ntxns raise WaitQueueTimeoutError( "Timeout waiting for connection from the connection pool. " - "maxPoolSize: %s, connections in use by cursors: %s, " - "connections in use by transactions: %s, connections in use " - "by other operations: %s, timeout: %s" - % ( + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( self.opts.max_pool_size, self.ncursors, self.ntxns, @@ -1766,7 +1762,7 @@ def _raise_wait_queue_timeout(self) -> NoReturn: ) raise WaitQueueTimeoutError( "Timed out while checking out a connection from connection pool. " - "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) + "maxPoolSize: {}, timeout: {}".format(self.opts.max_pool_size, timeout) ) def __del__(self): diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 2d9c904bb3..bfc52df671 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -67,7 +67,7 @@ _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) +_REVERSE_VERIFY_MAP = {value: key for key, value in _VERIFY_MAP.items()} # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are @@ -97,7 +97,7 @@ class _sslConn(_SSL.Connection): def __init__(self, ctx, sock, suppress_ragged_eofs): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs - super(_sslConn, self).__init__(ctx, sock) + super().__init__(ctx, sock) def _call(self, call, *args, **kwargs): timeout = self.gettimeout() @@ -122,11 +122,11 @@ def _call(self, call, *args, **kwargs): continue def do_handshake(self, *args, **kwargs): - return self._call(super(_sslConn, self).do_handshake, *args, **kwargs) + return self._call(super().do_handshake, *args, **kwargs) def recv(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv, *args, **kwargs) + return self._call(super().recv, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -135,7 +135,7 @@ def recv(self, *args, **kwargs): def recv_into(self, *args, **kwargs): try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) + return self._call(super().recv_into, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): @@ -148,11 +148,11 @@ def sendall(self, buf, flags=0): total_sent = 0 while total_sent < total_length: try: - sent = self._call(super(_sslConn, self).send, view[total_sent:], flags) + sent = self._call(super().send, view[total_sent:], flags) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. - except (IOError, OSError) as exc: # noqa: B014 + except OSError as exc: # noqa: B014 if _errno_from_exception(exc) == _EINTR: continue raise @@ -163,7 +163,7 @@ def sendall(self, buf, flags=0): total_sent += sent -class _CallbackData(object): +class _CallbackData: """Data class which is passed to the OCSP callback.""" def __init__(self): @@ -172,7 +172,7 @@ def __init__(self): self.ocsp_response_cache = _OCSPCache() -class SSLContext(object): +class SSLContext: """A CPython compatible SSLContext implementation wrapping PyOpenSSL's context. """ @@ -328,7 +328,8 @@ def load_default_certs(self): def set_default_verify_paths(self): """Specify that the platform provided CA certificates are to be used - for verification purposes.""" + for verification purposes. + """ # Note: See PyOpenSSL's docs for limitations, which are similar # but not that same as CPython's. self._ctx.set_default_verify_paths() diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index dfb3930ab0..c673c44780 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -17,7 +17,7 @@ from typing import Any, Dict, Optional -class ReadConcern(object): +class ReadConcern: """ReadConcern :Parameters: @@ -45,7 +45,8 @@ def level(self) -> Optional[str]: @property def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with - old wire protocol versions.""" + old wire protocol versions. + """ return self.level is None or self.level == "local" @property diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 46f029ed31..f3aa003a1c 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -46,18 +46,18 @@ def _validate_tag_sets(tag_sets): return tag_sets if not isinstance(tag_sets, (list, tuple)): - raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") if len(tag_sets) == 0: raise ValueError( - ("Tag sets %r invalid, must be None or contain at least one set of tags") % (tag_sets,) + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" ) for tags in tag_sets: if not isinstance(tags, abc.Mapping): raise TypeError( - "Tag set %r invalid, must be an instance of dict, " + "Tag set {!r} invalid, must be an instance of dict, " "bson.son.SON or other type that inherits from " - "collection.Mapping" % (tags,) + "collection.Mapping".format(tags) ) return list(tag_sets) @@ -88,7 +88,7 @@ def _validate_hedge(hedge): return None if not isinstance(hedge, dict): - raise TypeError("hedge must be a dictionary, not %r" % (hedge,)) + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") return hedge @@ -97,7 +97,7 @@ def _validate_hedge(hedge): _TagSets = Sequence[Mapping[str, Any]] -class _ServerMode(object): +class _ServerMode: """Base class for all read preferences.""" __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") @@ -168,7 +168,8 @@ def tag_sets(self) -> _TagSets: def max_staleness(self) -> int: """The maximum estimated length of time (in seconds) a replica set secondary can fall behind the primary in replication before it will - no longer be selected for operations, or -1 for no maximum.""" + no longer be selected for operations, or -1 for no maximum. + """ return self.__max_staleness @property @@ -209,7 +210,7 @@ def min_wire_version(self) -> int: return 0 if self.__max_staleness == -1 else 5 def __repr__(self): - return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( self.name, self.__tag_sets, self.__max_staleness, @@ -263,7 +264,7 @@ class Primary(_ServerMode): __slots__ = () def __init__(self) -> None: - super(Primary, self).__init__(_PRIMARY) + super().__init__(_PRIMARY) def __call__(self, selection: Any) -> Any: """Apply this read preference to a Selection.""" @@ -314,7 +315,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -357,7 +358,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness, hedge) + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -401,9 +402,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(SecondaryPreferred, self).__init__( - _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge - ) + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -448,7 +447,7 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness, hedge) + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) def __call__(self, selection: Any) -> Any: """Apply this read preference to Selection.""" @@ -490,7 +489,7 @@ def __call__(self, selection): return self.effective_pref(selection) def __repr__(self): - return "_AggWritePref(pref=%r)" % (self.pref,) + return f"_AggWritePref(pref={self.pref!r})" # Proxy other calls to the effective_pref so that _AggWritePref can be # used in place of an actual read preference. @@ -524,7 +523,7 @@ def make_read_preference( ) -class ReadPreference(object): +class ReadPreference: """An enum that defines some commonly used read preference modes. Apps can also create a custom read preference, for example:: @@ -591,7 +590,7 @@ def read_pref_mode_from_name(name: str) -> int: return _MONGOS_MODES.index(name) -class MovingAverage(object): +class MovingAverage: """Tracks an exponentially-weighted moving average.""" average: Optional[float] diff --git a/pymongo/response.py b/pymongo/response.py index 1369eac4e0..fc01b0f1bf 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -15,7 +15,7 @@ """Represent a response from the server.""" -class Response(object): +class Response: __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") def __init__(self, data, address, request_id, duration, from_command, docs): @@ -86,9 +86,7 @@ def __init__( - `more_to_come`: Bool indicating whether cursor is ready to be exhausted. """ - super(PinnedResponse, self).__init__( - data, address, request_id, duration, from_command, docs - ) + super().__init__(data, address, request_id, duration, from_command, docs) self._socket_info = socket_info self._more_to_come = more_to_come @@ -105,5 +103,6 @@ def socket_info(self): @property def more_to_come(self): """If true, server is ready to send batches on the socket until the - result set is exhausted or there is an error.""" + result set is exhausted or there is an error. + """ return self._more_to_come diff --git a/pymongo/results.py b/pymongo/results.py index b072979499..3bd9e82069 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -18,7 +18,7 @@ from pymongo.errors import InvalidOperation -class _WriteResult(object): +class _WriteResult: """Base class for write result classes.""" __slots__ = ("__acknowledged",) @@ -30,10 +30,10 @@ def _raise_if_unacknowledged(self, property_name): """Raise an exception on property access if unacknowledged.""" if not self.__acknowledged: raise InvalidOperation( - "A value for %s is not available when " + "A value for {} is not available when " "the write is unacknowledged. Check the " "acknowledged attribute to avoid this " - "error." % (property_name,) + "error.".format(property_name) ) @property @@ -63,7 +63,7 @@ class InsertOneResult(_WriteResult): def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id - super(InsertOneResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def inserted_id(self) -> Any: @@ -78,7 +78,7 @@ class InsertManyResult(_WriteResult): def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids - super(InsertManyResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def inserted_ids(self) -> List: @@ -102,7 +102,7 @@ class UpdateResult(_WriteResult): def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result - super(UpdateResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def raw_result(self) -> Dict[str, Any]: @@ -134,13 +134,14 @@ def upserted_id(self) -> Any: class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` - and :meth:`~pymongo.collection.Collection.delete_many`""" + and :meth:`~pymongo.collection.Collection.delete_many` + """ __slots__ = ("__raw_result",) def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result - super(DeleteResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def raw_result(self) -> Dict[str, Any]: @@ -169,7 +170,7 @@ def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: :exc:`~pymongo.errors.InvalidOperation`. """ self.__bulk_api_result = bulk_api_result - super(BulkWriteResult, self).__init__(acknowledged) + super().__init__(acknowledged) @property def bulk_api_result(self) -> Dict[str, Any]: @@ -211,7 +212,5 @@ def upserted_ids(self) -> Optional[Dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") if self.__bulk_api_result: - return dict( - (upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"] - ) + return {upsert["index"]: upsert["_id"] for upsert in self.bulk_api_result["upserted"]} return None diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index b96d6fcb56..34c0182a53 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -71,7 +71,7 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) return data if prohibit_unassigned_code_points: - prohibited = _PROHIBITED + (stringprep.in_table_a1,) + prohibited = (*_PROHIBITED, stringprep.in_table_a1) else: prohibited = _PROHIBITED @@ -98,12 +98,12 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) raise ValueError("SASLprep: failed bidirectional check") # RFC3454, Section 6, #2. If a string contains any RandALCat # character, it MUST NOT contain any LCat character. - prohibited = prohibited + (stringprep.in_table_d2,) + prohibited = (*prohibited, stringprep.in_table_d2) else: # RFC3454, Section 6, #3. Following the logic of #3, if # the first character is not a RandALCat, no other character # can be either. - prohibited = prohibited + (in_table_d1,) + prohibited = (*prohibited, in_table_d1) # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi for char in data: diff --git a/pymongo/server.py b/pymongo/server.py index 16c905abb7..2eb91c5b5d 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -25,7 +25,7 @@ _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} -class Server(object): +class Server: def __init__( self, server_description, pool, monitor, topology_id=None, listeners=None, events=None ): @@ -245,4 +245,4 @@ def _split_message(self, message): return request_id, data, 0 def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, self._description) + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/server_api.py b/pymongo/server_api.py index e92d6e6179..2393615032 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -95,7 +95,7 @@ class ServerApiVersion: """Server API version "1".""" -class ServerApi(object): +class ServerApi: """MongoDB Stable API.""" def __init__(self, version, strict=None, deprecation_errors=None): @@ -113,16 +113,16 @@ def __init__(self, version, strict=None, deprecation_errors=None): .. versionadded:: 3.12 """ if version != ServerApiVersion.V1: - raise ValueError("Unknown ServerApi version: %s" % (version,)) + raise ValueError(f"Unknown ServerApi version: {version}") if strict is not None and not isinstance(strict, bool): raise TypeError( "Wrong type for ServerApi strict, value must be an instance " - "of bool, not %s" % (type(strict),) + "of bool, not {}".format(type(strict)) ) if deprecation_errors is not None and not isinstance(deprecation_errors, bool): raise TypeError( "Wrong type for ServerApi deprecation_errors, value must be " - "an instance of bool, not %s" % (type(deprecation_errors),) + "an instance of bool, not {}".format(type(deprecation_errors)) ) self._version = version self._strict = strict diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 46517ee95e..4bca3390ae 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -25,7 +25,7 @@ from pymongo.typings import _Address -class ServerDescription(object): +class ServerDescription: """Immutable representation of one server. :Parameters: @@ -287,8 +287,8 @@ def __ne__(self, other: Any) -> bool: def __repr__(self): errmsg = "" if self.error: - errmsg = ", error=%r" % (self.error,) - return "<%s %s server_type: %s, rtt: %s%s>" % ( + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( self.__class__.__name__, self.address, self.server_type_name, diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index 313566cb83..aa9d26b5fb 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -17,7 +17,7 @@ from pymongo.server_type import SERVER_TYPE -class Selection(object): +class Selection: """Input or output of a server selector function.""" @classmethod @@ -51,6 +51,7 @@ def secondary_with_max_last_write_date(self): secondaries = secondary_server_selector(self) if secondaries.server_descriptions: return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) + return None @property def primary_selection(self): diff --git a/pymongo/settings.py b/pymongo/settings.py index 2bd2527cdf..5d6ddefd36 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -26,7 +26,7 @@ from pymongo.topology_description import TOPOLOGY_TYPE -class TopologySettings(object): +class TopologySettings: def __init__( self, seeds=None, @@ -156,4 +156,4 @@ def get_topology_type(self): def get_server_descriptions(self): """Initial dict of (address, ServerDescription) for all seeds.""" - return dict([(address, ServerDescription(address)) for address in self.seeds]) + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 420953db2e..a278898952 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -33,7 +33,7 @@ def _errno_from_exception(exc): return None -class SocketChecker(object): +class SocketChecker: def __init__(self) -> None: self._poller: Optional[select.poll] if _HAVE_POLL: @@ -78,7 +78,7 @@ def select( # ready: subsets of the first three arguments. Return # True if any of the lists are not empty. return any(res) - except (_SelectError, IOError) as exc: # type: ignore + except (_SelectError, OSError) as exc: # type: ignore if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue raise diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index fe2dd49aa0..583de818b0 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -51,7 +51,7 @@ def _resolve(*args, **kwargs): ) -class _SrvResolver(object): +class _SrvResolver: def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): self.__fqdn = fqdn self.__srv = srv_service_name @@ -110,9 +110,9 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): try: nlist = node[0].split(".")[1:][-self.__slen :] except Exception: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) + raise ConfigurationError(f"Invalid SRV host: {node[0]}") if self.__plist != nlist: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) + raise ConfigurationError(f"Invalid SRV host: {node[0]}") if self.__srv_max_hosts: nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) return results, nodes diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 13c5315eee..3af535ee4b 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -71,7 +71,7 @@ def get_ssl_context( try: ctx.load_cert_chain(certfile, None, passphrase) except _ssl.SSLError as exc: - raise ConfigurationError("Private key doesn't match certificate: %s" % (exc,)) + raise ConfigurationError(f"Private key doesn't match certificate: {exc}") if crlfile is not None: if _ssl.IS_PYOPENSSL: raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") diff --git a/pymongo/topology.py b/pymongo/topology.py index 904f6b1836..9759b39f9f 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -75,7 +75,7 @@ def process_events_queue(queue_ref): return True # Continue PeriodicExecutor. -class Topology(object): +class Topology: """Monitor a topology of one or more servers.""" def __init__(self, topology_settings): @@ -236,8 +236,7 @@ def _select_servers_loop(self, selector, timeout, address): # No suitable servers. if timeout == 0 or now > end_time: raise ServerSelectionTimeoutError( - "%s, Timeout: %ss, Topology Description: %r" - % (self._error_message(selector), timeout, self.description) + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" ) self._ensure_opened() @@ -431,7 +430,7 @@ def _get_replica_set_members(self, selector): ): return set() - return set([sd.address for sd in selector(self._new_selection())]) + return {sd.address for sd in selector(self._new_selection())} def get_secondaries(self): """Return set of secondary addresses.""" @@ -499,7 +498,8 @@ def update_pool(self): def close(self): """Clear pools and terminate monitors. Topology does not reopen on demand. Any further operations will raise - :exc:`~.errors.InvalidOperation`.""" + :exc:`~.errors.InvalidOperation`. + """ with self._lock: for server in self._servers.values(): server.close() @@ -807,14 +807,14 @@ def _error_message(self, selector): else: return "No %s available for writes" % server_plural else: - return 'No %s match selector "%s"' % (server_plural, selector) + return f'No {server_plural} match selector "{selector}"' else: addresses = list(self._description.server_descriptions()) servers = list(self._description.server_descriptions().values()) if not servers: if is_replica_set: # We removed all servers because of the wrong setName? - return 'No %s available for replica set name "%s"' % ( + return 'No {} available for replica set name "{}"'.format( server_plural, self._settings.replica_set_name, ) @@ -844,7 +844,7 @@ def __repr__(self): msg = "" if not self._opened: msg = "CLOSED " - return "<%s %s%r>" % (self.__class__.__name__, msg, self._description) + return f"<{self.__class__.__name__} {msg}{self._description!r}>" def eq_props(self): """The properties to use for MongoClient/Topology equality checks.""" @@ -860,7 +860,7 @@ def __hash__(self): return hash(self.eq_props()) -class _ErrorContext(object): +class _ErrorContext: """An error with context for SDAM error handling.""" def __init__(self, error, max_wire_version, sock_generation, completed_handshake, service_id): diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 7503a72704..7079b324b2 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -47,7 +47,7 @@ class _TopologyType(NamedTuple): _ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] -class TopologyDescription(object): +class TopologyDescription: def __init__( self, topology_type: int, @@ -171,7 +171,7 @@ def reset(self) -> "TopologyDescription": topology_type = self._topology_type # The default ServerDescription's type is Unknown. - sds = dict((address, ServerDescription(address)) for address in self._server_descriptions) + sds = {address: ServerDescription(address) for address in self._server_descriptions} return TopologyDescription( topology_type, @@ -184,7 +184,8 @@ def reset(self) -> "TopologyDescription": def server_descriptions(self) -> Dict[_Address, ServerDescription]: """Dict of (address, - :class:`~pymongo.server_description.ServerDescription`).""" + :class:`~pymongo.server_description.ServerDescription`). + """ return self._server_descriptions.copy() @property @@ -346,7 +347,7 @@ def has_writable_server(self) -> bool: def __repr__(self): # Sort the servers by address. servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) - return "<%s id: %s, topology_type: %s, servers: %r>" % ( + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( self.__class__.__name__, self._topology_settings._topology_id, self.topology_type_name, @@ -400,8 +401,9 @@ def updated_topology_description( if set_name is not None and set_name != server_description.replica_set_name: error = ConfigurationError( "client is configured to connect to a replica set named " - "'%s' but this node belongs to a set named '%s'" - % (set_name, server_description.replica_set_name) + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) ) sds[address] = server_description.to_unknown(error=error) # Single type never changes. diff --git a/pymongo/typings.py b/pymongo/typings.py index 32cd980c97..ef82114f15 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -29,7 +29,8 @@ def strip_optional(elem): """This function is to allow us to cast all of the elements of an iterator from Optional[_T] to _T - while inside a list comprehension.""" + while inside a list comprehension. + """ assert elem is not None return elem diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index e3aeee399e..0772b39c80 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -134,7 +134,7 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr host, port = host.split(":", 1) if isinstance(port, str): if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %r" % (port,)) + raise ValueError(f"Port must be an integer between 0 and 65535: {port!r}") port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: @@ -155,7 +155,8 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ - readpreferencetags portion, and the use of a unicode options string.""" + readpreferencetags portion, and the use of a unicode options string. + """ options = _CaseInsensitiveDictionary() for uriopt in opts.split(delim): key, value = uriopt.split("=") @@ -163,7 +164,7 @@ def _parse_options(opts, delim): options.setdefault(key, []).append(value) else: if key in options: - warnings.warn("Duplicate URI option '%s'." % (key,)) + warnings.warn(f"Duplicate URI option '{key}'.") if key.lower() == "authmechanismproperties": val = value else: @@ -475,9 +476,7 @@ def parse_uri( is_srv = True scheme_free = uri[SRV_SCHEME_LEN:] else: - raise InvalidURI( - "Invalid URI scheme: URI must begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) - ) + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") if not scheme_free: raise InvalidURI("Must provide at least one hostname or IP.") @@ -525,15 +524,13 @@ def parse_uri( srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") if is_srv: if options.get("directConnection"): - raise ConfigurationError( - "Cannot specify directConnection=true with %s URIs" % (SRV_SCHEME,) - ) + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") nodes = split_hosts(hosts, default_port=None) if len(nodes) != 1: - raise InvalidURI("%s URIs must include one, and only one, hostname" % (SRV_SCHEME,)) + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") fqdn, port = nodes[0] if port is not None: - raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") # Use the connection timeout. connectTimeoutMS passed as a keyword # argument overrides the same option passed in the connection string. diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ced71d0488..25f87954b5 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -19,7 +19,7 @@ from pymongo.errors import ConfigurationError -class WriteConcern(object): +class WriteConcern: """WriteConcern :Parameters: @@ -113,7 +113,9 @@ def acknowledged(self) -> bool: return self.__acknowledged def __repr__(self): - return "WriteConcern(%s)" % (", ".join("%s=%s" % kvt for kvt in self.__document.items()),) + return "WriteConcern({})".format( + ", ".join("{}={}".format(*kvt) for kvt in self.__document.items()) + ) def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): diff --git a/test/__init__.py b/test/__init__.py index dc324c6911..c80b4e95c8 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test suite for pymongo, bson, and gridfs. -""" +"""Test suite for pymongo, bson, and gridfs.""" import base64 import gc @@ -92,7 +91,7 @@ CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) -TLS_OPTIONS: Dict = dict(tls=True) +TLS_OPTIONS: Dict = {"tls": True} if CLIENT_PEM: TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM if CA_PEM: @@ -149,7 +148,7 @@ def is_server_resolvable(): try: socket.gethostbyname("server") return True - except socket.error: + except OSError: return False finally: socket.setdefaulttimeout(socket_timeout) @@ -165,7 +164,7 @@ def _create_user(authdb, user, pwd=None, roles=None, **kwargs): return authdb.command(cmd) -class client_knobs(object): +class client_knobs: def __init__( self, heartbeat_frequency=None, @@ -234,10 +233,9 @@ def wrap(*args, **kwargs): def __del__(self): if self._enabled: msg = ( - "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, " - "MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, " - "EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s" - % ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( common.HEARTBEAT_FREQUENCY, common.MIN_HEARTBEAT_INTERVAL, common.KILL_CURSOR_FREQUENCY, @@ -250,10 +248,10 @@ def __del__(self): def _all_users(db): - return set(u["user"] for u in db.command("usersInfo").get("users", [])) + return {u["user"] for u in db.command("usersInfo").get("users", [])} -class ClientContext(object): +class ClientContext: client: MongoClient MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI @@ -339,14 +337,14 @@ def _connect(self, host, port, **kwargs): except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - "connected client %r, but legacy hello failed: %s" % (client, exc) + f"connected client {client!r}, but legacy hello failed: {exc}" ) else: - self.connection_attempts.append("successfully connected client %r" % (client,)) + self.connection_attempts.append(f"successfully connected client {client!r}") # If connected, then return client with default timeout return pymongo.MongoClient(host, port, **kwargs) except pymongo.errors.ConnectionFailure as exc: - self.connection_attempts.append("failed to connect client %r: %s" % (client, exc)) + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") return None finally: client.close() @@ -447,7 +445,7 @@ def _init_client(self): nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) self.nodes = set(nodes) else: - self.nodes = set([(host, port)]) + self.nodes = {(host, port)} self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) @@ -587,7 +585,7 @@ def _server_started_with_ipv6(self): for info in socket.getaddrinfo(self.host, self.port): if info[0] == socket.AF_INET6: return True - except socket.error: + except OSError: pass return False @@ -599,7 +597,7 @@ def wrap(*args, **kwargs): self.init() # Always raise SkipTest if we can't connect to MongoDB if not self.connected: - raise SkipTest("Cannot connect to MongoDB on %s" % (self.pair,)) + raise SkipTest(f"Cannot connect to MongoDB on {self.pair}") if condition(): return f(*args, **kwargs) raise SkipTest(msg) @@ -625,7 +623,7 @@ def require_connection(self, func): """Run a test only if we can connect to MongoDB.""" return self._require( lambda: True, # _require checks if we're connected - "Cannot connect to MongoDB on %s" % (self.pair,), + f"Cannot connect to MongoDB on {self.pair}", func=func, ) @@ -633,14 +631,15 @@ def require_data_lake(self, func): """Run a test only if we are connected to Atlas Data Lake.""" return self._require( lambda: self.is_data_lake, - "Not connected to Atlas Data Lake on %s" % (self.pair,), + f"Not connected to Atlas Data Lake on {self.pair}", func=func, ) def require_no_mmap(self, func): """Run a test only if the server is not using the MMAPv1 storage engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters.""" + run regardless of storage engine on sharded clusters. + """ def is_not_mmap(): if self.is_mongos: @@ -734,7 +733,8 @@ def require_mongos(self, func): def require_multiple_mongoses(self, func): """Run a test only if the client is connected to a sharded cluster - that has 2 mongos nodes.""" + that has 2 mongos nodes. + """ return self._require( lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func ) @@ -786,7 +786,7 @@ def is_topology_type(self, topologies): "load-balanced", } if unknown: - raise AssertionError("Unknown topologies: %r" % (unknown,)) + raise AssertionError(f"Unknown topologies: {unknown!r}") if self.load_balancer: if "load-balanced" in topologies: return True @@ -812,7 +812,8 @@ def is_topology_type(self, topologies): def require_cluster_type(self, topologies=[]): # noqa """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies - are 'single', 'replicaset', and 'sharded'.""" + are 'single', 'replicaset', and 'sharded'. + """ def _is_valid_topology(): return self.is_topology_type(topologies) @@ -827,7 +828,8 @@ def require_test_commands(self, func): def require_failCommand_fail_point(self, func): """Run a test only if the server supports the failCommand fail - point.""" + point. + """ return self._require( lambda: self.supports_failCommand_fail_point, "failCommand fail point must be supported", @@ -930,7 +932,7 @@ def require_no_api_version(self, func): ) def mongos_seeds(self): - return ",".join("%s:%s" % address for address in self.mongoses) + return ",".join("{}:{}".format(*address) for address in self.mongoses) @property def supports_failCommand_fail_point(self): @@ -1139,7 +1141,7 @@ def setUpClass(cls): pass def setUp(self): - super(MockClientTest, self).setUp() + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) @@ -1147,7 +1149,7 @@ def setUp(self): def tearDown(self): self.client_knobs.disable() - super(MockClientTest, self).tearDown() + super().tearDown() # Global knobs to speed up the test suite. @@ -1181,9 +1183,9 @@ def print_running_topology(topology): if running: print( "WARNING: found Topology with running threads:\n" - " Threads: %s\n" - " Topology: %s\n" - " Creation traceback:\n%s" % (running, topology, topology._settings._stack) + " Threads: {}\n" + " Topology: {}\n" + " Creation traceback:\n{}".format(running, topology, topology._settings._stack) ) @@ -1215,11 +1217,11 @@ def teardown(): global_knobs.disable() garbage = [] for g in gc.garbage: - garbage.append("GARBAGE: %r" % (g,)) - garbage.append(" gc.get_referents: %r" % (gc.get_referents(g),)) - garbage.append(" gc.get_referrers: %r" % (gc.get_referrers(g),)) + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") if garbage: - assert False, "\n".join(garbage) + raise AssertionError("\n".join(garbage)) c = client_context.client if c: if not client_context.is_data_lake: @@ -1237,7 +1239,7 @@ def teardown(): class PymongoTestRunner(unittest.TextTestRunner): def run(self, test): setup() - result = super(PymongoTestRunner, self).run(test) + result = super().run(test) teardown() return result @@ -1247,7 +1249,7 @@ def run(self, test): class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] def run(self, test): setup() - result = super(PymongoXMLTestRunner, self).run(test) + result = super().run(test) teardown() return result @@ -1260,8 +1262,7 @@ def test_cases(suite): yield suite_or_case else: # unittest.TestSuite - for case in test_cases(suite_or_case): - yield case + yield from test_cases(suite_or_case) # Helper method to workaround https://bugs.python.org/issue21724 @@ -1272,7 +1273,7 @@ def clear_warning_registry(): setattr(module, "__warningregistry__", {}) # noqa -class SystemCertsPatcher(object): +class SystemCertsPatcher: def __init__(self, ca_certs): if ( ssl.OPENSSL_VERSION.lower().startswith("libressl") diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 39d817140e..036e4772ff 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -102,7 +102,7 @@ def test_uniqueness(self): duplicates = [names for names in uri_to_names.values() if len(names) > 1] self.assertFalse( duplicates, - "Error: the following env variables have duplicate values: %s" % (duplicates,), + f"Error: the following env variables have duplicate values: {duplicates}", ) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index e0329a783e..e180d8b064 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -39,7 +39,7 @@ def test_should_fail_without_credentials(self): if "@" not in self.uri: self.skipTest("MONGODB_URI already has no credentials") - hosts = ["%s:%s" % addr for addr in parse_uri(self.uri)["nodelist"]] + hosts = ["{}:{}".format(*addr) for addr in parse_uri(self.uri)["nodelist"]] self.assertTrue(hosts) with MongoClient(hosts) as client: with self.assertRaises(OperationFailure): @@ -115,7 +115,7 @@ def test_poisoned_cache(self): def test_environment_variables_ignored(self): creds = self.setup_cache() self.assertIsNotNone(creds) - prev = os.environ.copy() + os.environ.copy() client = MongoClient(self.uri) self.addCleanup(client.close) @@ -124,9 +124,11 @@ def test_environment_variables_ignored(self): self.assertIsNotNone(auth.get_cached_credentials()) - mock_env = dict( - AWS_ACCESS_KEY_ID="foo", AWS_SECRET_ACCESS_KEY="bar", AWS_SESSION_TOKEN="baz" - ) + mock_env = { + "AWS_ACCESS_KEY_ID": "foo", + "AWS_SECRET_ACCESS_KEY": "bar", + "AWS_SESSION_TOKEN": "baz", + } with patch.dict("os.environ", mock_env): self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") @@ -147,7 +149,7 @@ def test_no_cache_environment_variables(self): self.assertIsNotNone(creds) auth.set_cached_credentials(None) - mock_env = dict(AWS_ACCESS_KEY_ID=creds.username, AWS_SECRET_ACCESS_KEY=creds.password) + mock_env = {"AWS_ACCESS_KEY_ID": creds.username, "AWS_SECRET_ACCESS_KEY": creds.password} if creds.token: mock_env["AWS_SESSION_TOKEN"] = creds.token diff --git a/test/auth_aws/test_auth_oidc.py b/test/auth_aws/test_auth_oidc.py index 470e4581c2..26e71573d4 100644 --- a/test/auth_aws/test_auth_oidc.py +++ b/test/auth_aws/test_auth_oidc.py @@ -65,7 +65,7 @@ def request_token(server_info, context): self.assertEqual(timeout_seconds, 60 * 5) with open(token_file) as fid: token = fid.read() - resp = dict(access_token=token) + resp = {"access_token": token} time.sleep(sleep) @@ -94,7 +94,7 @@ def refresh_token(server_info, context): # Validate the timeout. self.assertEqual(context["timeout_seconds"], 60 * 5) - resp = dict(access_token=token) + resp = {"access_token": token} if expires_in_seconds is not None: resp["expires_in_seconds"] = expires_in_seconds self.refresh_called += 1 @@ -115,21 +115,21 @@ def fail_point(self, command_args): def test_connect_callbacks_single_implicit_username(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_callbacks_single_explicit_username(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_single, username="test_user1", authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_callbacks_multiple_principal_user1(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient( self.uri_multiple, username="test_user1", authmechanismproperties=props ) @@ -138,7 +138,7 @@ def test_connect_callbacks_multiple_principal_user1(self): def test_connect_callbacks_multiple_principal_user2(self): request_token = self.create_request_cb("test_user2") - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient( self.uri_multiple, username="test_user2", authmechanismproperties=props ) @@ -147,7 +147,7 @@ def test_connect_callbacks_multiple_principal_user2(self): def test_connect_callbacks_multiple_no_username(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_multiple, authmechanismproperties=props) with self.assertRaises(OperationFailure): client.test.test.find_one() @@ -155,13 +155,13 @@ def test_connect_callbacks_multiple_no_username(self): def test_allowed_hosts_blocked(self): request_token = self.create_request_cb() - props: Dict = dict(request_token_callback=request_token, allowed_hosts=[]) + props: Dict = {"request_token_callback": request_token, "allowed_hosts": []} client = MongoClient(self.uri_single, authmechanismproperties=props) with self.assertRaises(ConfigurationError): client.test.test.find_one() client.close() - props: Dict = dict(request_token_callback=request_token, allowed_hosts=["example.com"]) + props: Dict = {"request_token_callback": request_token, "allowed_hosts": ["example.com"]} client = MongoClient( self.uri_single + "&ignored=example.com", authmechanismproperties=props, connect=False ) @@ -170,26 +170,26 @@ def test_allowed_hosts_blocked(self): client.close() def test_connect_aws_single_principal(self): - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_aws_multiple_principal_user1(self): - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_multiple, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_aws_multiple_principal_user2(self): os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] = os.path.join(self.token_dir, "test_user2") - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_multiple, authmechanismproperties=props) client.test.test.find_one() client.close() def test_connect_aws_allowed_hosts_ignored(self): - props = dict(PROVIDER_NAME="aws", allowed_hosts=[]) + props = {"PROVIDER_NAME": "aws", "allowed_hosts": []} client = MongoClient(self.uri_multiple, authmechanismproperties=props) client.test.test.find_one() client.close() @@ -198,10 +198,10 @@ def test_valid_callbacks(self): request_cb = self.create_request_cb(expires_in_seconds=60) refresh_cb = self.create_refresh_cb() - props: Dict = dict( - request_token_callback=request_cb, - refresh_token_callback=refresh_cb, - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_cb, + } client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() @@ -214,7 +214,7 @@ def test_lock_avoids_extra_callbacks(self): request_cb = self.create_request_cb(sleep=0.5) refresh_cb = self.create_refresh_cb() - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} def run_test(): client = MongoClient(self.uri_single, authMechanismProperties=props) @@ -239,7 +239,7 @@ def test_request_callback_returns_null(self): def request_token_null(a, b): return None - props: Dict = dict(request_token_callback=request_token_null) + props: Dict = {"request_token_callback": request_token_null} client = MongoClient(self.uri_single, authMechanismProperties=props) with self.assertRaises(ValueError): client.test.test.find_one() @@ -251,9 +251,10 @@ def test_refresh_callback_returns_null(self): def refresh_token_null(a, b): return None - props: Dict = dict( - request_token_callback=request_cb, refresh_token_callback=refresh_token_null - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_token_null, + } client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -265,9 +266,9 @@ def refresh_token_null(a, b): def test_request_callback_invalid_result(self): def request_token_invalid(a, b): - return dict() + return {} - props: Dict = dict(request_token_callback=request_token_invalid) + props: Dict = {"request_token_callback": request_token_invalid} client = MongoClient(self.uri_single, authMechanismProperties=props) with self.assertRaises(ValueError): client.test.test.find_one() @@ -278,7 +279,7 @@ def request_cb_extra_value(server_info, context): result["foo"] = "bar" return result - props: Dict = dict(request_token_callback=request_cb_extra_value) + props: Dict = {"request_token_callback": request_cb_extra_value} client = MongoClient(self.uri_single, authMechanismProperties=props) with self.assertRaises(ValueError): client.test.test.find_one() @@ -288,11 +289,12 @@ def test_refresh_callback_missing_data(self): request_cb = self.create_request_cb(expires_in_seconds=60) def refresh_cb_no_token(a, b): - return dict() + return {} - props: Dict = dict( - request_token_callback=request_cb, refresh_token_callback=refresh_cb_no_token - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_cb_no_token, + } client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -310,9 +312,10 @@ def refresh_cb_extra_value(server_info, context): result["foo"] = "bar" return result - props: Dict = dict( - request_token_callback=request_cb, refresh_token_callback=refresh_cb_extra_value - ) + props: Dict = { + "request_token_callback": request_cb, + "refresh_token_callback": refresh_cb_extra_value, + } client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -329,7 +332,7 @@ def test_cache_with_refresh(self): request_cb = self.create_request_cb(expires_in_seconds=60) refresh_cb = self.create_refresh_cb() - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} # Ensure that a ``find`` operation adds credentials to the cache. client = MongoClient(self.uri_single, authMechanismProperties=props) @@ -352,7 +355,7 @@ def test_cache_with_no_refresh(self): # Give a callback response with a valid accessToken and an expiresInSeconds that is within one minute. request_cb = self.create_request_cb() - props = dict(request_token_callback=request_cb) + props = {"request_token_callback": request_cb} client = MongoClient(self.uri_single, authMechanismProperties=props) # Ensure that a ``find`` operation adds credentials to the cache. @@ -373,7 +376,7 @@ def test_cache_with_no_refresh(self): def test_cache_key_includes_callback(self): request_cb = self.create_request_cb() - props: Dict = dict(request_token_callback=request_cb) + props: Dict = {"request_token_callback": request_cb} # Ensure that a ``find`` operation adds a new entry to the cache. client = MongoClient(self.uri_single, authMechanismProperties=props) @@ -397,10 +400,10 @@ def test_cache_clears_on_error(self): # Create a new client with a valid request callback that gives credentials that expire within 5 minutes and a refresh callback that gives invalid credentials. def refresh_cb(a, b): - return dict(access_token="bad") + return {"access_token": "bad"} # Add a token to the cache that will expire soon. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authMechanismProperties=props) client.test.test.find_one() client.close() @@ -421,7 +424,7 @@ def refresh_cb(a, b): def test_cache_is_not_used_in_aws_automatic_workflow(self): # Create a new client using the AWS device workflow. # Ensure that a ``find`` operation does not add credentials to the cache. - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_single, authmechanismproperties=props) client.test.test.find_one() client.close() @@ -438,11 +441,11 @@ def test_speculative_auth_success(self): def request_token(a, b): with open(token_file) as fid: token = fid.read() - return dict(access_token=token, expires_in_seconds=1000) + return {"access_token": token, "expires_in_seconds": 1000} # Create a client with a request callback that returns a valid token # that will not expire soon. - props: Dict = dict(request_token_callback=request_token) + props: Dict = {"request_token_callback": request_token} client = MongoClient(self.uri_single, authmechanismproperties=props) # Set a fail point for saslStart commands. @@ -483,7 +486,7 @@ def test_reauthenticate_succeeds(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient( self.uri_single, event_listeners=[listener], authmechanismproperties=props ) @@ -536,7 +539,7 @@ def test_reauthenticate_succeeds_bulk_write(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform a find operation. @@ -563,7 +566,7 @@ def test_reauthenticate_succeeds_bulk_read(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform a find operation. @@ -594,7 +597,7 @@ def test_reauthenticate_succeeds_cursor(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform an insert operation. @@ -622,7 +625,7 @@ def test_reauthenticate_succeeds_get_more(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform an insert operation. @@ -647,7 +650,7 @@ def test_reauthenticate_succeeds_get_more(self): def test_reauthenticate_succeeds_get_more_exhaust(self): # Ensure no mongos - props = dict(PROVIDER_NAME="aws") + props = {"PROVIDER_NAME": "aws"} client = MongoClient(self.uri_single, authmechanismproperties=props) hello = client.admin.command(HelloCompat.LEGACY_CMD) if hello.get("msg") != "isdbgrid": @@ -657,7 +660,7 @@ def test_reauthenticate_succeeds_get_more_exhaust(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient(self.uri_single, authmechanismproperties=props) # Perform an insert operation. @@ -685,7 +688,7 @@ def test_reauthenticate_succeeds_command(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} print("start of test") client = MongoClient(self.uri_single, authmechanismproperties=props) @@ -703,7 +706,7 @@ def test_reauthenticate_succeeds_command(self): } ): # Perform a count operation. - cursor = client.test.command(dict(count="test")) + cursor = client.test.command({"count": "test"}) self.assertGreaterEqual(len(list(cursor)), 1) @@ -720,7 +723,7 @@ def test_reauthenticate_retries_and_succeeds_with_cache(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient( self.uri_single, event_listeners=[listener], authmechanismproperties=props ) @@ -750,7 +753,7 @@ def test_reauthenticate_fails_with_no_cache(self): refresh_cb = self.create_refresh_cb() # Create a client with the callbacks. - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client = MongoClient( self.uri_single, event_listeners=[listener], authmechanismproperties=props ) @@ -778,7 +781,7 @@ def test_late_reauth_avoids_callback(self): request_cb = self.create_request_cb(expires_in_seconds=1e6) refresh_cb = self.create_refresh_cb(expires_in_seconds=1e6) - props: Dict = dict(request_token_callback=request_cb, refresh_token_callback=refresh_cb) + props: Dict = {"request_token_callback": request_cb, "refresh_token_callback": refresh_cb} client1 = MongoClient(self.uri_single, authMechanismProperties=props) client1.test.test.find_one() client2 = MongoClient(self.uri_single, authMechanismProperties=props) diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py index 4118dfef9f..f711a125c2 100644 --- a/test/crud_v2_format.py +++ b/test/crud_v2_format.py @@ -27,7 +27,7 @@ class TestCrudV2(SpecRunner): def allowable_errors(self, op): """Override expected error classes.""" - errors = super(TestCrudV2, self).allowable_errors(op) + errors = super().allowable_errors(op) errors += (ValueError,) return errors @@ -51,4 +51,4 @@ def setup_scenario(self, scenario_def): """Allow specs to override a test's setup.""" # PYTHON-1935 Only create the collection if there is data to insert. if scenario_def["data"]: - super(TestCrudV2, self).setup_scenario(scenario_def) + super().setup_scenario(scenario_def) diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 90d7f27c39..692f9aef04 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -112,7 +112,7 @@ ] -_ops_by_name = dict([(op.name, op) for op in operations]) +_ops_by_name = {op.name: op for op in operations} Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 39188e8ad0..d3f8922c4c 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -247,6 +247,7 @@ def responder(request): } ) ) + return None else: return request.reply(**primary_response) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index dc2cd57380..7813069c99 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -46,7 +46,7 @@ def setup_server(self, upgrade): "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version ) - self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongoses_uri = "mongodb://{},{}".format( self.mongos_old.address_string, self.mongos_new.address_string, ) diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 997f5af118..62bd76cf0f 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -110,7 +110,7 @@ def generate_mongos_read_mode_tests(): # Skip something like command('foo', read_preference=SECONDARY). continue test = create_mongos_read_mode_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestMongosCommandReadMode, test_name, test) diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index ea13a3b042..dd14abf84f 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -26,7 +26,7 @@ def test_network_disconnect_primary(self): # Application operation fails against primary. Test that topology # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. # http://bit.ly/1B5ttuL - primary, secondary = servers = [MockupDB() for _ in range(2)] + primary, secondary = servers = (MockupDB() for _ in range(2)) for server in servers: server.run() self.addCleanup(server.stop) diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 22fe38fd02..e8542e2fe5 100755 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -304,7 +304,7 @@ def test(self): def create_tests(ops): for op in ops: - test_name = "test_op_msg_%s" % (op.name,) + test_name = f"test_op_msg_{op.name}" setattr(TestOpMsg, test_name, operation_test(op)) diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index b377f4cf69..a3aef1541e 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -35,7 +35,7 @@ class OpMsgReadPrefBase(unittest.TestCase): @classmethod def setUpClass(cls): - super(OpMsgReadPrefBase, cls).setUpClass() + super().setUpClass() @classmethod def add_test(cls, mode, test_name, test): @@ -50,7 +50,7 @@ def setup_client(self, read_preference): class TestOpMsgMongos(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgMongos, cls).setUpClass() + super().setUpClass() auto_ismaster = { "ismaster": True, "msg": "isdbgrid", # Mongos. @@ -64,13 +64,13 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.primary.stop() - super(TestOpMsgMongos, cls).tearDownClass() + super().tearDownClass() class TestOpMsgReplicaSet(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgReplicaSet, cls).setUpClass() + super().setUpClass() cls.primary, cls.secondary = MockupDB(), MockupDB() for server in cls.primary, cls.secondary: server.run() @@ -94,7 +94,7 @@ def setUpClass(cls): def tearDownClass(cls): for server in cls.primary, cls.secondary: server.stop() - super(TestOpMsgReplicaSet, cls).tearDownClass() + super().tearDownClass() @classmethod def add_test(cls, mode, test_name, test): @@ -118,7 +118,7 @@ class TestOpMsgSingle(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgSingle, cls).setUpClass() + super().setUpClass() auto_ismaster = { "ismaster": True, "minWireVersion": 2, @@ -131,7 +131,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.primary.stop() - super(TestOpMsgSingle, cls).tearDownClass() + super().tearDownClass() def create_op_msg_read_mode_test(mode, operation): @@ -181,7 +181,7 @@ def generate_op_msg_read_mode_tests(): for entry in matrix: mode, operation = entry test = create_op_msg_read_mode_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: cls.add_test(mode, test_name, test) diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 841cd41846..c554499379 100755 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -26,7 +26,7 @@ class TestResetAndRequestCheck(unittest.TestCase): def __init__(self, *args, **kwargs): - super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.ismaster_time = 0.0 self.client = None self.server = None @@ -143,7 +143,7 @@ def generate_reset_tests(): for entry in matrix: operation, (test_method, name) = entry test = create_reset_test(operation, test_method) - test_name = "%s_%s" % (name, operation.name.replace(" ", "_")) + test_name = "{}_{}".format(name, operation.name.replace(" ", "_")) test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 18f2016126..5a590bcf15 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -43,7 +43,7 @@ def setup_server(self): "ismaster", minWireVersion=2, maxWireVersion=6, ismaster=True, msg="isdbgrid" ) - self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongoses_uri = "mongodb://{},{}".format( self.mongos1.address_string, self.mongos2.address_string, ) @@ -59,7 +59,7 @@ def test(self): elif operation.op_type == "must-use-primary": slave_ok = False else: - assert False, "unrecognized op_type %r" % operation.op_type + raise AssertionError("unrecognized op_type %r" % operation.op_type) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) @@ -84,7 +84,7 @@ def generate_slave_ok_sharded_tests(): for entry in matrix: mode, operation = entry test = create_slave_ok_sharded_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestSlaveOkaySharded, test_name, test) diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 4b2846490f..90b99df496 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -78,7 +78,7 @@ def generate_slave_ok_single_tests(): mode, (server_type, ismaster), operation = entry test = create_slave_ok_single_test(mode, server_type, ismaster, operation) - test_name = "test_%s_%s_with_mode_%s" % ( + test_name = "test_{}_{}_with_mode_{}".format( operation.name.replace(" ", "_"), server_type, mode, diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index bfdae9e824..6d3b299700 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test client for mod_wsgi application, see bug PYTHON-353. -""" +"""Test client for mod_wsgi application, see bug PYTHON-353.""" import _thread as thread import sys @@ -91,14 +90,14 @@ class URLGetterThread(threading.Thread): counter = 0 def __init__(self, options, url, nrequests_per_thread): - super(URLGetterThread, self).__init__() + super().__init__() self.options = options self.url = url self.nrequests_per_thread = nrequests_per_thread self.errors = 0 def run(self): - for i in range(self.nrequests_per_thread): + for _i in range(self.nrequests_per_thread): try: get(url) except Exception as e: @@ -128,9 +127,8 @@ def main(options, mode, url): if options.verbose: print( - "Getting %s %s times total in %s threads, " - "%s times per thread" - % ( + "Getting {} {} times total in {} threads, " + "{} times per thread".format( url, nrequests_per_thread * options.nthreads, options.nthreads, @@ -154,7 +152,7 @@ def main(options, mode, url): else: assert mode == "serial" if options.verbose: - print("Getting %s %s times in one thread" % (url, options.nrequests)) + print(f"Getting {url} {options.nrequests} times in one thread") for i in range(1, options.nrequests + 1): try: diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index a0770afefa..dc2650499f 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -40,7 +40,7 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s&tlsCAFile=%s&%s") % ( + uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS={}&tlsCAFile={}&{}").format( TIMEOUT_MS, CA_FILE, options, diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 3cb4b5d5d1..062058e09d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -58,7 +58,7 @@ def tearDownModule(): print(output) -class Timer(object): +class Timer: def __enter__(self): self.start = time.monotonic() return self @@ -68,7 +68,7 @@ def __exit__(self, *args): self.interval = self.end - self.start -class PerformanceTest(object): +class PerformanceTest: dataset: Any data_size: Any do_task: Any @@ -85,7 +85,7 @@ def tearDown(self): name = self.__class__.__name__ median = self.percentile(50) bytes_per_sec = self.data_size / median - print("Running %s. MEDIAN=%s" % (self.__class__.__name__, self.percentile(50))) + print(f"Running {self.__class__.__name__}. MEDIAN={self.percentile(50)}") result_data.append( { "info": { @@ -113,6 +113,7 @@ def percentile(self, percentile): return sorted_results[percentile_index] else: self.fail("Test execution failed") + return None def runTest(self): results = [] @@ -202,7 +203,7 @@ class TestDocument(PerformanceTest): def setUp(self): # Location of test data. with open( - os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)), "r" + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) ) as data: self.document = json.loads(data.read()) @@ -210,7 +211,7 @@ def setUp(self): self.client.drop_database("perftest") def tearDown(self): - super(TestDocument, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def before(self): @@ -225,7 +226,7 @@ class TestFindOneByID(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "tweet.json" - super(TestFindOneByID, self).setUp() + super().setUp() documents = [self.document.copy() for _ in range(NUM_DOCS)] self.corpus = self.client.perftest.corpus @@ -249,7 +250,7 @@ class TestSmallDocInsertOne(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "small_doc.json" - super(TestSmallDocInsertOne, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] @@ -264,7 +265,7 @@ class TestLargeDocInsertOne(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "large_doc.json" - super(TestLargeDocInsertOne, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(10)] @@ -280,7 +281,7 @@ class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "tweet.json" - super(TestFindManyAndEmptyCursor, self).setUp() + super().setUp() for _ in range(10): self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) @@ -301,7 +302,7 @@ class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "small_doc.json" - super(TestSmallDocBulkInsert, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(NUM_DOCS)] def before(self): @@ -316,7 +317,7 @@ class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): def setUp(self): self.dataset = "large_doc.json" - super(TestLargeDocBulkInsert, self).setUp() + super().setUp() self.documents = [self.document.copy() for _ in range(10)] def before(self): @@ -342,7 +343,7 @@ def setUp(self): self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): - super(TestGridFsUpload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def before(self): @@ -368,7 +369,7 @@ def setUp(self): self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) def tearDown(self): - super(TestGridFsDownload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def do_task(self): @@ -392,14 +393,14 @@ def mp_map(map_func, files): def insert_json_file(filename): assert proc_client is not None - with open(filename, "r") as data: + with open(filename) as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) def insert_json_file_with_file_id(filename): documents = [] - with open(filename, "r") as data: + with open(filename) as data: for line in data: doc = json.loads(line) doc["file"] = filename @@ -461,7 +462,7 @@ def after(self): self.client.perftest.drop_collection("corpus") def tearDown(self): - super(TestJsonMultiImport, self).tearDown() + super().tearDown() self.client.drop_database("perftest") @@ -482,7 +483,7 @@ def do_task(self): mp_map(read_json_file, self.files) def tearDown(self): - super(TestJsonMultiExport, self).tearDown() + super().tearDown() self.client.drop_database("perftest") @@ -505,7 +506,7 @@ def do_task(self): mp_map(insert_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileUpload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") @@ -529,7 +530,7 @@ def do_task(self): mp_map(read_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileDownload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 580c5da993..2e7fda21e0 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -40,7 +40,7 @@ def __init__(self, client, pair, *args, **kwargs): @contextlib.contextmanager def get_socket(self, handler=None): client = self.client - host_and_port = "%s:%s" % (self.mock_host, self.mock_port) + host_and_port = f"{self.mock_host}:{self.mock_port}" if host_and_port in client.mock_down_hosts: raise AutoReconnect("mock error") @@ -54,7 +54,7 @@ def get_socket(self, handler=None): yield sock_info -class DummyMonitor(object): +class DummyMonitor: def __init__(self, server_description, topology, pool, topology_settings): self._server_description = server_description self.opened = False @@ -99,7 +99,7 @@ def __init__( arbiters=None, down_hosts=None, *args, - **kwargs + **kwargs, ): """A MongoClient connected to the default server, with a mock topology. @@ -144,7 +144,7 @@ def __init__( client_options = client_context.default_client_options.copy() client_options.update(kwargs) - super(MockClient, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) def kill_host(self, host): """Host is like 'a:1'.""" diff --git a/test/qcheck.py b/test/qcheck.py index 4cce7b5bc8..52e4c46b8b 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -116,7 +116,8 @@ def gen_regexp(gen_length): # TODO our patterns only consist of one letter. # this is because of a bug in CPython's regex equality testing, # which I haven't quite tracked down, so I'm just ignoring it... - pattern = lambda: "".join(gen_list(choose_lifted("a"), gen_length)()) + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) def gen_flags(): flags = 0 @@ -230,9 +231,9 @@ def check(predicate, generator): try: if not predicate(case): reduction = reduce(case, predicate) - counter_examples.append("after %s reductions: %r" % reduction) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) except: - counter_examples.append("%r : %s" % (case, traceback.format_exc())) + counter_examples.append(f"{case!r} : {traceback.format_exc()}") return counter_examples diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py index 87b4f62038..6f84b6a6a2 100644 --- a/test/sigstop_sigcont.py +++ b/test/sigstop_sigcont.py @@ -84,7 +84,7 @@ def main(uri: str) -> None: if len(sys.argv) != 2: print("unknown or missing options") print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") - exit(1) + sys.exit(1) # Enable logs in this format: # 2022-03-30 12:40:55,582 INFO diff --git a/test/test_auth.py b/test/test_auth.py index 7db2247746..f9a9af4d5a 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -67,7 +67,7 @@ class AutoAuthenticateThread(threading.Thread): """ def __init__(self, collection): - super(AutoAuthenticateThread, self).__init__() + super().__init__() self.collection = collection self.success = False @@ -89,10 +89,10 @@ def setUpClass(cls): cls.service_realm_required = ( GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL ) - mech_properties = "SERVICE_NAME:%s" % (GSSAPI_SERVICE_NAME,) - mech_properties += ",CANONICALIZE_HOST_NAME:%s" % (GSSAPI_CANONICALIZE,) + mech_properties = f"SERVICE_NAME:{GSSAPI_SERVICE_NAME}" + mech_properties += f",CANONICALIZE_HOST_NAME:{GSSAPI_CANONICALIZE}" if GSSAPI_SERVICE_REALM is not None: - mech_properties += ",SERVICE_REALM:%s" % (GSSAPI_SERVICE_REALM,) + mech_properties += f",SERVICE_REALM:{GSSAPI_SERVICE_REALM}" cls.mech_properties = mech_properties def test_credentials_hashing(self): @@ -111,8 +111,8 @@ def test_credentials_hashing(self): "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None ) - self.assertEqual(1, len(set([creds1, creds2]))) - self.assertEqual(3, len(set([creds0, creds1, creds2, creds3]))) + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) @ignore_deprecations def test_gssapi_simple(self): @@ -160,7 +160,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + "&authMechanismProperties=%s" % (self.mech_properties,) + mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() @@ -179,7 +179,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() - uri = uri + "&replicaSet=%s" % (str(set_name),) + uri = uri + f"&replicaSet={str(set_name)}" client = MongoClient(uri) client[GSSAPI_DB].list_collection_names() @@ -196,7 +196,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() - mech_uri = mech_uri + "&replicaSet=%s" % (str(set_name),) + mech_uri = mech_uri + f"&replicaSet={str(set_name)}" client = MongoClient(mech_uri) client[GSSAPI_DB].list_collection_names() @@ -336,12 +336,12 @@ def auth_string(user, password): class TestSCRAMSHA1(IntegrationTest): @client_context.require_auth def setUp(self): - super(TestSCRAMSHA1, self).setUp() + super().setUp() client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) def tearDown(self): client_context.drop_user("pymongo_test", "user") - super(TestSCRAMSHA1, self).tearDown() + super().tearDown() def test_scram_sha1(self): host, port = client_context.host, client_context.port @@ -368,16 +368,16 @@ class TestSCRAM(IntegrationTest): @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): - super(TestSCRAM, self).setUp() + super().setUp() self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS - monitoring._SENSITIVE_COMMANDS = set([]) + monitoring._SENSITIVE_COMMANDS = set() self.listener = AllowListEventListener("saslStart") def tearDown(self): monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS client_context.client.testscram.command("dropAllUsersFromDatabase") client_context.client.drop_database("testscram") - super(TestSCRAM, self).tearDown() + super().tearDown() def test_scram_skip_empty_exchange(self): listener = AllowListEventListener("saslStart", "saslContinue") @@ -597,14 +597,14 @@ def test_scram_threaded(self): class TestAuthURIOptions(IntegrationTest): @client_context.require_auth def setUp(self): - super(TestAuthURIOptions, self).setUp() + super().setUp() client_context.create_user("admin", "admin", "pass") client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): client_context.drop_user("pymongo_test", "user") client_context.drop_user("admin", "admin") - super(TestAuthURIOptions, self).tearDown() + super().tearDown() def test_uri_options(self): # Test default to admin diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 78f4d21929..ebcc4eeb7d 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -67,7 +67,7 @@ def run_test(self): expected = credential["mechanism_properties"] if expected is not None: actual = credentials.mechanism_properties - for key, val in expected.items(): + for key, _val in expected.items(): if "SERVICE_NAME" in expected: self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) elif "CANONICALIZE_HOST_NAME" in expected: @@ -91,7 +91,7 @@ def run_test(self): actual.refresh_token_callback, expected["refresh_token_callback"] ) else: - self.fail("Unhandled property: %s" % (key,)) + self.fail(f"Unhandled property: {key}") else: if credential["mechanism"] == "MONGODB-AWS": self.assertIsNone(credentials.mechanism_properties.aws_session_token) @@ -111,7 +111,7 @@ def create_tests(): continue test_method = create_test(test_case) name = str(test_case["description"].lower().replace(" ", "_")) - setattr(TestAuthSpec, "test_%s_%s" % (test_suffix, name), test_method) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) create_tests() diff --git a/test/test_binary.py b/test/test_binary.py index 65abdca796..158a990290 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -122,15 +122,15 @@ def test_equality(self): def test_repr(self): one = Binary(b"hello world") - self.assertEqual(repr(one), "Binary(%s, 0)" % (repr(b"hello world"),)) + self.assertEqual(repr(one), "Binary({}, 0)".format(repr(b"hello world"))) two = Binary(b"hello world", 2) - self.assertEqual(repr(two), "Binary(%s, 2)" % (repr(b"hello world"),)) + self.assertEqual(repr(two), "Binary({}, 2)".format(repr(b"hello world"))) three = Binary(b"\x08\xFF") - self.assertEqual(repr(three), "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(three), "Binary({}, 0)".format(repr(b"\x08\xFF"))) four = Binary(b"\x08\xFF", 2) - self.assertEqual(repr(four), "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(four), "Binary({}, 2)".format(repr(b"\x08\xFF"))) five = Binary(b"test", 100) - self.assertEqual(repr(five), "Binary(%s, 100)" % (repr(b"test"),)) + self.assertEqual(repr(five), "Binary({}, 100)".format(repr(b"test"))) def test_hash(self): one = Binary(b"hello world") @@ -351,7 +351,7 @@ class TestUuidSpecExplicitCoding(unittest.TestCase): @classmethod def setUpClass(cls): - super(TestUuidSpecExplicitCoding, cls).setUpClass() + super().setUpClass() cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") @staticmethod @@ -452,7 +452,7 @@ class TestUuidSpecImplicitCoding(IntegrationTest): @classmethod def setUpClass(cls): - super(TestUuidSpecImplicitCoding, cls).setUpClass() + super().setUpClass() cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") @staticmethod diff --git a/test/test_bson.py b/test/test_bson.py index a8fd1fef45..a6e6352333 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -370,7 +369,7 @@ def test_invalid_decodes(self): ), ] for i, data in enumerate(bad_bsons): - msg = "bad_bson[{}]".format(i) + msg = f"bad_bson[{i}]" with self.assertRaises(InvalidBSON, msg=msg): decode_all(data) with self.assertRaises(InvalidBSON, msg=msg): @@ -491,7 +490,7 @@ def test_basic_encode(self): def test_unknown_type(self): # Repr value differs with major python version - part = "type %r for fieldname 'foo'" % (b"\x14",) + part = "type {!r} for fieldname 'foo'".format(b"\x14") docs = [ b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00", (b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140\x00\x01\x00\x00\x00\x00\x00"), @@ -648,7 +647,7 @@ def test_small_long_encode_decode(self): encoded1 = encode({"x": 256}) decoded1 = decode(encoded1)["x"] self.assertEqual(256, decoded1) - self.assertEqual(type(256), type(decoded1)) + self.assertEqual(int, type(decoded1)) encoded2 = encode({"x": Int64(256)}) decoded2 = decode(encoded2)["x"] @@ -925,7 +924,7 @@ def test_bad_id_keys(self): def test_bson_encode_thread_safe(self): def target(i): for j in range(1000): - my_int = type("MyInt_%s_%s" % (i, j), (int,), {}) + my_int = type(f"MyInt_{i}_{j}", (int,), {}) bson.encode({"my_int": my_int()}) threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] @@ -939,7 +938,7 @@ def target(i): self.assertIsNone(t.exc) def test_raise_invalid_document(self): - class Wrapper(object): + class Wrapper: def __init__(self, val): self.val = val diff --git a/test/test_bulk.py b/test/test_bulk.py index ac7073c0ef..6a2af3143c 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -50,12 +50,12 @@ class BulkTestBase(IntegrationTest): @classmethod def setUpClass(cls): - super(BulkTestBase, cls).setUpClass() + super().setUpClass() cls.coll = cls.db.test cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) def setUp(self): - super(BulkTestBase, self).setUp() + super().setUp() self.coll.drop() def assertEqualResponse(self, expected, actual): @@ -93,7 +93,7 @@ def assertEqualResponse(self, expected, actual): self.assertEqual( actual.get(key), value, - "%r value of %r does not match expected %r" % (key, actual.get(key), value), + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", ) def assertEqualUpsert(self, expected, actual): @@ -793,10 +793,10 @@ class BulkAuthorizationTestBase(BulkTestBase): @client_context.require_auth @client_context.require_no_api_version def setUpClass(cls): - super(BulkAuthorizationTestBase, cls).setUpClass() + super().setUpClass() def setUp(self): - super(BulkAuthorizationTestBase, self).setUp() + super().setUp() client_context.create_user(self.db.name, "readonly", "pw", ["read"]) self.db.command( "createRole", @@ -902,7 +902,7 @@ def test_no_remove(self): InsertOne({"x": 3}), # Never attempted. ] self.assertRaises(OperationFailure, coll.bulk_write, requests) - self.assertEqual(set([1, 2]), set(self.coll.distinct("x"))) + self.assertEqual({1, 2}, set(self.coll.distinct("x"))) class TestBulkWriteConcern(BulkTestBase): @@ -911,7 +911,7 @@ class TestBulkWriteConcern(BulkTestBase): @classmethod def setUpClass(cls): - super(TestBulkWriteConcern, cls).setUpClass() + super().setUpClass() cls.w = client_context.w cls.secondary = None if cls.w is not None and cls.w > 1: diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 2388a6e1f4..c9ddfcd137 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -104,7 +104,8 @@ def get_resume_token(self, invalidate=False): def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most - recently returned timestamp.""" + recently returned timestamp. + """ optime = self.client.admin.command("ping")["operationTime"] return Timestamp(optime.time, optime.inc + 1) @@ -120,7 +121,7 @@ def kill_change_stream_cursor(self, change_stream): client._close_cursor_now(cursor.cursor_id, address) -class APITestsMixin(object): +class APITestsMixin: @no_type_check def test_watch(self): with self.change_stream( @@ -208,7 +209,7 @@ def test_try_next_runs_one_getmore(self): # Stream still works after a resume. coll.insert_one({"_id": 3}) wait_until(lambda: stream.try_next() is not None, "get change from try_next") - self.assertEqual(set(listener.started_command_names()), set(["getMore"])) + self.assertEqual(set(listener.started_command_names()), {"getMore"}) self.assertIsNone(stream.try_next()) @no_type_check @@ -249,7 +250,7 @@ def test_start_at_operation_time(self): coll.insert_many([{"data": i} for i in range(ndocs)]) with self.change_stream(start_at_operation_time=optime) as cs: - for i in range(ndocs): + for _i in range(ndocs): cs.next() @no_type_check @@ -443,7 +444,7 @@ def test_start_after_resume_process_without_changes(self): self.assertEqual(change["fullDocument"], {"_id": 2}) -class ProseSpecTestsMixin(object): +class ProseSpecTestsMixin: @no_type_check def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) @@ -461,7 +462,8 @@ def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream - has never returned any changes if previous_change is None.""" + has never returned any changes if previous_change is None. + """ if previous_change is None: agg_cmd = listener.started_events[0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] @@ -474,7 +476,8 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes listener is a AllowListEventListener that listens for aggregate and - getMore commands.""" + getMore commands. + """ if previous_change is None or stream._cursor._has_next(): token = self._get_expected_resume_token_legacy(stream, listener, previous_change) if token is not None: @@ -767,14 +770,14 @@ class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams def setUpClass(cls): - super(TestClusterChangeStream, cls).setUpClass() + super().setUpClass() cls.dbs = [cls.db, cls.client.pymongo_test_2] @classmethod def tearDownClass(cls): for db in cls.dbs: cls.client.drop_database(db) - super(TestClusterChangeStream, cls).tearDownClass() + super().tearDownClass() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) @@ -828,7 +831,7 @@ class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams def setUpClass(cls): - super(TestDatabaseChangeStream, cls).setUpClass() + super().setUpClass() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) @@ -913,7 +916,7 @@ class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecT @classmethod @client_context.require_change_streams def setUpClass(cls): - super(TestCollectionChangeStream, cls).setUpClass() + super().setUpClass() def setUp(self): # Use a new collection for each test. @@ -1044,17 +1047,17 @@ class TestAllLegacyScenarios(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestAllLegacyScenarios, cls).setUpClass() + super().setUpClass() cls.listener = AllowListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod def tearDownClass(cls): cls.client.close() - super(TestAllLegacyScenarios, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(TestAllLegacyScenarios, self).setUp() + super().setUp() self.listener.reset() def setUpCluster(self, scenario_dict): @@ -1088,7 +1091,8 @@ def setFailPoint(self, scenario_dict): def assert_list_contents_are_subset(self, superlist, sublist): """Check that each element in sublist is a subset of the corresponding - element in superlist.""" + element in superlist. + """ self.assertEqual(len(superlist), len(sublist)) for sup, sub in zip(superlist, sublist): if isinstance(sub, dict): @@ -1104,7 +1108,7 @@ def assert_dict_is_subset(self, superdict, subdict): exempt_fields = ["documentKey", "_id", "getMore"] for key, value in subdict.items(): if key not in superdict: - self.fail("Key %s not found in %s" % (key, superdict)) + self.fail(f"Key {key} not found in {superdict}") if isinstance(value, dict): self.assert_dict_is_subset(superdict[key], value) continue diff --git a/test/test_client.py b/test/test_client.py index 624c460c08..ec2b4bac97 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -325,7 +325,7 @@ def test_metadata(self): self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) # Test appending to driver info. metadata["driver"]["name"] = "PyMongo|FooDriver" - metadata["driver"]["version"] = "%s|1.2.3" % (_METADATA["driver"]["version"],) + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) client = MongoClient( "foo", 27017, @@ -335,7 +335,7 @@ def test_metadata(self): ) options = client._MongoClient__options self.assertEqual(options.pool_options.metadata, metadata) - metadata["platform"] = "%s|FooPlatform" % (_METADATA["platform"],) + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) client = MongoClient( "foo", 27017, @@ -347,7 +347,7 @@ def test_metadata(self): self.assertEqual(options.pool_options.metadata, metadata) def test_kwargs_codec_options(self): - class MyFloatType(object): + class MyFloatType: def __init__(self, x): self.__x = x @@ -704,7 +704,7 @@ def test_init_disconnected_with_auth(self): self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): - seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertEqual(client_context.client, c) @@ -723,7 +723,7 @@ def test_equality(self): ) def test_hashable(self): - seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = rs_or_single_client(seed, connect=False) self.addCleanup(c.close) self.assertIn(c, {client_context.client}) @@ -735,7 +735,7 @@ def test_host_w_port(self): with self.assertRaises(ValueError): connected( MongoClient( - "%s:1234567" % (client_context.host,), + f"{client_context.host}:1234567", connectTimeoutMS=1, serverSelectionTimeoutMS=10, ) @@ -1002,7 +1002,7 @@ def test_username_and_password(self): @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), connect=False + f"mongodb://user:wrong@{client_context.host}/pymongo_test", connect=False ) assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) @@ -1160,7 +1160,7 @@ def test_ipv6(self): raise SkipTest("Need the ipaddress module to test with SSL") if client_context.auth_enabled: - auth_str = "%s:%s@" % (db_user, db_pwd) + auth_str = f"{db_user}:{db_pwd}@" else: auth_str = "" @@ -1533,7 +1533,7 @@ def test_reset_during_update_pool(self): # Continuously reset the pool. class ResetPoolThread(threading.Thread): def __init__(self, pool): - super(ResetPoolThread, self).__init__() + super().__init__() self.running = True self.pool = pool @@ -1657,7 +1657,7 @@ def test_network_error_message(self): {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} ): assert client.address is not None - expected = "%s:%s: " % client.address + expected = "{}:{}: ".format(*client.address) with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) @@ -1836,7 +1836,7 @@ class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" def setUp(self): - super(TestExhaustCursor, self).setUp() + super().setUp() if client_context.is_mongos: raise SkipTest("mongos doesn't support exhaust, SERVER-2627") @@ -2188,23 +2188,33 @@ def _test_network_error(self, operation_callback): self.assertEqual(7, sd_b.max_wire_version) def test_network_error_on_query(self): - callback = lambda client: client.db.collection.find_one() + def callback(client): + return client.db.collection.find_one() + self._test_network_error(callback) def test_network_error_on_insert(self): - callback = lambda client: client.db.collection.insert_one({}) + def callback(client): + return client.db.collection.insert_one({}) + self._test_network_error(callback) def test_network_error_on_update(self): - callback = lambda client: client.db.collection.update_one({}, {"$unset": "x"}) + def callback(client): + return client.db.collection.update_one({}, {"$unset": "x"}) + self._test_network_error(callback) def test_network_error_on_replace(self): - callback = lambda client: client.db.collection.replace_one({}, {}) + def callback(client): + return client.db.collection.replace_one({}, {}) + self._test_network_error(callback) def test_network_error_on_delete(self): - callback = lambda client: client.db.collection.delete_many({}) + def callback(client): + return client.db.collection.delete_many({}) + self._test_network_error(callback) @@ -2227,7 +2237,7 @@ def test_rs_client_does_not_maintain_pool_to_arbiters(self): wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(c.address, ("a", 1)) - self.assertEqual(c.arbiters, set([("c", 3)])) + self.assertEqual(c.arbiters, {("c", 3)}) # Assert that we create 2 and only 2 pooled connections. listener.wait_for_event(monitoring.ConnectionReadyEvent, 2) self.assertEqual(listener.event_count(monitoring.ConnectionCreatedEvent), 2) diff --git a/test/test_client_context.py b/test/test_client_context.py index 9ee5b96d61..72da8dbc34 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -28,8 +28,9 @@ def test_must_connect(self): self.assertTrue( client_context.connected, "client context must be connected when " - "PYMONGO_MUST_CONNECT is set. Failed attempts:\n%s" - % (client_context.connection_attempt_info(),), + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), ) def test_serverless(self): @@ -39,8 +40,9 @@ def test_serverless(self): self.assertTrue( client_context.connected and client_context.serverless, "client context must be connected to serverless when " - "TEST_SERVERLESS is set. Failed attempts:\n%s" - % (client_context.connection_attempt_info(),), + "TEST_SERVERLESS is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), ) def test_enableTestCommands_is_disabled(self): diff --git a/test/test_cmap.py b/test/test_cmap.py index 360edef0e8..3b84524f44 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -116,7 +116,7 @@ def wait_for_event(self, op): timeout = op.get("timeout", 10000) / 1000.0 wait_until( lambda: self.listener.event_count(event) >= count, - "find %s %s event(s)" % (count, event), + f"find {count} {event} event(s)", timeout=timeout, ) @@ -191,11 +191,11 @@ def check_events(self, events, ignore): """Check the events of a test.""" actual_events = self.actual_events(ignore) for actual, expected in zip(actual_events, events): - self.logs.append("Checking event actual: %r vs expected: %r" % (actual, expected)) + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") self.check_event(actual, expected) if len(events) > len(actual_events): - self.fail("missing events: %r" % (events[len(actual_events) :],)) + self.fail(f"missing events: {events[len(actual_events) :]!r}") def check_error(self, actual, expected): message = expected.pop("message") @@ -260,9 +260,9 @@ def run_scenario(self, scenario_def, test): self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. - self.targets: dict = dict() + self.targets: dict = {} # Map of label names to Connection objects - self.labels: dict = dict() + self.labels: dict = {} def cleanup(): for t in self.targets.values(): @@ -285,7 +285,7 @@ def cleanup(): self.check_events(test["events"], test["ignore"]) except Exception: # Print the events after a test failure. - print("\nFailed test: %r" % (test["description"],)) + print("\nFailed test: {!r}".format(test["description"])) print("Operations:") for op in self._ops: print(op) @@ -332,8 +332,8 @@ def test_2_all_client_pools_have_same_options(self): self.assertEqual(pool.opts, pool_opts) def test_3_uri_connection_pool_options(self): - opts = "&".join(["%s=%s" % (k, v) for k, v in self.POOL_OPTIONS.items()]) - uri = "mongodb://%s/?%s" % (client_context.pair, opts) + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{client_context.pair}/?{opts}" client = rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts diff --git a/test/test_code.py b/test/test_code.py index 9ff305e39a..9e44ca4962 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -67,7 +66,7 @@ def test_repr(self): c = Code("hello world", {"blah": 3}) self.assertEqual(repr(c), "Code('hello world', {'blah': 3})") c = Code("\x08\xFF") - self.assertEqual(repr(c), "Code(%s, None)" % (repr("\x08\xFF"),)) + self.assertEqual(repr(c), "Code({}, None)".format(repr("\x08\xFF"))) def test_equality(self): b = Code("hello") diff --git a/test/test_collation.py b/test/test_collation.py index 18f8bc78ac..7f4bbf4750 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -96,7 +96,7 @@ class TestCollation(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestCollation, cls).setUpClass() + super().setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -110,11 +110,11 @@ def tearDownClass(cls): cls.warn_context.__exit__() cls.warn_context = None cls.client.close() - super(TestCollation, cls).tearDownClass() + super().tearDownClass() def tearDown(self): self.listener.reset() - super(TestCollation, self).tearDown() + super().tearDown() def last_command_started(self): return self.listener.started_events[-1].command diff --git a/test/test_collection.py b/test/test_collection.py index e36d6663f0..ca657f0099 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -151,7 +149,7 @@ class TestCollection(IntegrationTest): @classmethod def setUpClass(cls): - super(TestCollection, cls).setUpClass() + super().setUpClass() cls.w = client_context.w # type: ignore @classmethod @@ -373,7 +371,7 @@ def test_list_indexes(self): db.test.insert_one({}) # create collection def map_indexes(indexes): - return dict([(index["name"], index) for index in indexes]) + return {index["name"]: index for index in indexes} indexes = list(db.test.list_indexes()) self.assertEqual(len(indexes), 1) @@ -485,7 +483,7 @@ def test_index_2dsphere(self): db.test.drop_indexes() self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) - for dummy, info in db.test.index_information().items(): + for _dummy, info in db.test.index_information().items(): field, idx_type = info["key"][0] if field == "geo" and idx_type == "2dsphere": break @@ -504,7 +502,7 @@ def test_index_hashed(self): db.test.drop_indexes() self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) - for dummy, info in db.test.index_information().items(): + for _dummy, info in db.test.index_information().items(): field, idx_type = info["key"][0] if field == "a" and idx_type == "hashed": break @@ -1638,8 +1636,8 @@ def test_find_one(self): self.assertTrue("hello" in db.test.find_one(projection=("hello",))) self.assertTrue("hello" not in db.test.find_one(projection=("foo",))) - self.assertTrue("hello" in db.test.find_one(projection=set(["hello"]))) - self.assertTrue("hello" not in db.test.find_one(projection=set(["foo"]))) + self.assertTrue("hello" in db.test.find_one(projection={"hello"})) + self.assertTrue("hello" not in db.test.find_one(projection={"foo"})) self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) diff --git a/test/test_comment.py b/test/test_comment.py index 85e5470d74..ea44c74257 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -28,7 +28,7 @@ from pymongo.operations import IndexModel -class Empty(object): +class Empty: def __getattr__(self, item): try: self.__dict__[item] diff --git a/test/test_common.py b/test/test_common.py index ff50878ea1..76367ffa0c 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -148,14 +148,12 @@ def test_mongo_client(self): self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client( - "mongodb://%s/" % (pair,), replicaSet=client_context.replica_set_name - ) + m = rs_or_single_client(f"mongodb://{pair}/", replicaSet=client_context.replica_set_name) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) m = rs_or_single_client( - "mongodb://%s/?w=0" % (pair,), replicaSet=client_context.replica_set_name + f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name ) coll = m.pymongo_test.write_concern_test @@ -163,7 +161,7 @@ def test_mongo_client(self): # Equality tests direct = connected(single_client(w=0)) - direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), **self.credentials)) + direct2 = connected(single_client(f"mongodb://{pair}/?w=0", **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index fd9f126551..e09ba72a5c 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -40,7 +40,7 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): @classmethod @client_context.require_replica_set def setUpClass(cls): - super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() + super().setUpClass() cls.listener = CMAPListener() cls.client = rs_or_single_client( event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index ca4b84c26d..589da0a7d7 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -55,7 +55,7 @@ def check_result(self, expected_result, result): if isinstance(result, _WriteResult): for res in expected_result: prop = camel_to_snake(res) - msg = "%s : %r != %r" % (prop, expected_result, result) + msg = f"{prop} : {expected_result!r} != {result!r}" # SPEC-869: Only BulkWriteResult has upserted_count. if prop == "upserted_count" and not isinstance(result, BulkWriteResult): if result.upserted_id is not None: # type: ignore diff --git a/test/test_cursor.py b/test/test_cursor.py index e96efb92b0..f8820f8aa2 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -945,7 +945,7 @@ def test_getitem_slice_index(self): for a, b in zip(count(99), self.db.test.find()[99:]): self.assertEqual(a, b["i"]) - for i in self.db.test.find()[1000:]: + for _i in self.db.test.find()[1000:]: self.fail() self.assertEqual(5, len(list(self.db.test.find()[20:25]))) @@ -1079,7 +1079,7 @@ def test_concurrent_close(self): def iterate_cursor(): while cursor.alive: - for doc in cursor: + for _doc in cursor: pass t = threading.Thread(target=iterate_cursor) @@ -1430,7 +1430,7 @@ def test_monitoring(self): class TestRawBatchCommandCursor(IntegrationTest): @classmethod def setUpClass(cls): - super(TestRawBatchCommandCursor, cls).setUpClass() + super().setUpClass() def test_aggregate_raw(self): c = self.db.test diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 676b3b6af0..14d7b4b05d 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -81,7 +81,7 @@ class DecimalCodec(DecimalDecoder, DecimalEncoder): DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) -class UndecipherableInt64Type(object): +class UndecipherableInt64Type: def __init__(self, value): self.value = value @@ -146,7 +146,7 @@ def transform_bson(self, value): return ResumeTokenToNanDecoder -class CustomBSONTypeTests(object): +class CustomBSONTypeTests: @no_type_check def roundtrip(self, doc): bsonbytes = encode(doc, codec_options=self.codecopts) @@ -164,9 +164,9 @@ def test_encode_decode_roundtrip(self): def test_decode_all(self): documents = [] for dec in range(3): - documents.append({"average": Decimal("56.4%s" % (dec,))}) + documents.append({"average": Decimal(f"56.4{dec}")}) - bsonstream = bytes() + bsonstream = b"" for doc in documents: bsonstream += encode(doc, codec_options=self.codecopts) @@ -287,7 +287,7 @@ def run_test(base, attrs, fail): else: codec() - class MyType(object): + class MyType: pass run_test( @@ -350,11 +350,11 @@ class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): @classmethod def setUpClass(cls): - class TypeA(object): + class TypeA: def __init__(self, x): self.value = x - class TypeB(object): + class TypeB: def __init__(self, x): self.value = x @@ -442,12 +442,12 @@ class TestTypeRegistry(unittest.TestCase): @classmethod def setUpClass(cls): - class MyIntType(object): + class MyIntType: def __init__(self, x): assert isinstance(x, int) self.x = x - class MyStrType(object): + class MyStrType: def __init__(self, x): assert isinstance(x, str) self.x = x @@ -553,18 +553,18 @@ def test_initialize_fail(self): with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([type("AnyType", (object,), {})()]) - err_msg = "fallback_encoder %r is not a callable" % (True,) + err_msg = f"fallback_encoder {True!r} is not a callable" with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([], True) # type: ignore[arg-type] - err_msg = "fallback_encoder %r is not a callable" % ("hello",) + err_msg = "fallback_encoder {!r} is not a callable".format("hello") with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) - r = "TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % (codec_instances, None) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" self.assertEqual(r, repr(type_registry)) def test_type_registry_eq(self): @@ -777,7 +777,7 @@ def test_grid_out_custom_opts(self): self.assertRaises(AttributeError, setattr, two, attr, 5) -class ChangeStreamsWCustomTypesTestMixin(object): +class ChangeStreamsWCustomTypesTestMixin: @no_type_check def change_stream(self, *args, **kwargs): return self.watched_target.watch(*args, **kwargs) @@ -899,7 +899,7 @@ class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCus @classmethod @client_context.require_change_streams def setUpClass(cls): - super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() cls.db.test.delete_many({}) def tearDown(self): @@ -918,7 +918,7 @@ class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCusto @client_context.require_version_min(4, 0, 0) @client_context.require_change_streams def setUpClass(cls): - super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() cls.db.test.delete_many({}) def tearDown(self): @@ -937,7 +937,7 @@ class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustom @client_context.require_version_min(4, 0, 0) @client_context.require_change_streams def setUpClass(cls): - super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() + super().setUpClass() cls.db.test.delete_many({}) def tearDown(self): diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 4fa38435a3..ce210010bd 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -52,7 +52,7 @@ class TestDataLakeProse(IntegrationTest): @classmethod @client_context.require_data_lake def setUpClass(cls): - super(TestDataLakeProse, cls).setUpClass() + super().setUpClass() # Test killCursors def test_1(self): @@ -100,7 +100,7 @@ class DataLakeTestSpec(TestCrudV2): @classmethod @client_context.require_data_lake def setUpClass(cls): - super(DataLakeTestSpec, cls).setUpClass() + super().setUpClass() def setup_scenario(self, scenario_def): # Spec tests MUST NOT insert data/drop collection for diff --git a/test/test_database.py b/test/test_database.py index b6be380aab..140d169db3 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -137,7 +137,7 @@ def test_get_coll(self): def test_repr(self): self.assertEqual( repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, repr("pymongo_test")), + "Database({!r}, {})".format(self.client, repr("pymongo_test")), ) def test_create_collection(self): @@ -262,8 +262,8 @@ def test_list_collections(self): # Checking if is there any collection which don't exists. if ( - len(set(colls) - set(["test", "test.mike"])) == 0 - or len(set(colls) - set(["test", "test.mike", "system.indexes"])) == 0 + len(set(colls) - {"test", "test.mike"}) == 0 + or len(set(colls) - {"test", "test.mike", "system.indexes"}) == 0 ): self.assertTrue(True) else: @@ -301,10 +301,7 @@ def test_list_collections(self): coll_cnt = {} # Checking if is there any collection which don't exists. - if ( - len(set(colls) - set(["test"])) == 0 - or len(set(colls) - set(["test", "system.indexes"])) == 0 - ): + if len(set(colls) - {"test"}) == 0 or len(set(colls) - {"test", "system.indexes"}) == 0: self.assertTrue(True) else: self.assertTrue(False) @@ -439,7 +436,7 @@ def test_id_ordering(self): ) cursor = db.test.find() for x in cursor: - for (k, v) in x.items(): + for (k, _v) in x.items(): self.assertEqual(k, "_id") break diff --git a/test/test_dbref.py b/test/test_dbref.py index 281aef473f..107d95d230 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -64,7 +64,7 @@ def test_repr(self): ) self.assertEqual( repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" % (repr("coll"),), + "DBRef({}, ObjectId('1234567890abcdef12345678'))".format(repr("coll")), ) self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") self.assertEqual( diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 9af8185ab5..8a14ecfb2a 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -104,15 +104,15 @@ def got_app_error(topology, app_error): elif error_type == "timeout": raise NetworkTimeout("mock network timeout error") else: - raise AssertionError("unknown error type: %s" % (error_type,)) - assert False + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError except (AutoReconnect, NotPrimaryError, OperationFailure) as e: if when == "beforeHandshakeCompletes": completed_handshake = False elif when == "afterHandshakeCompletes": completed_handshake = True else: - assert False, "Unknown when field %s" % (when,) + raise AssertionError(f"Unknown when field {when}") topology.handle_error( server_address, @@ -201,7 +201,7 @@ def run_scenario(self): for i, phase in enumerate(scenario_def["phases"]): # Including the phase description makes failures easier to debug. description = phase.get("description", str(i)) - with assertion_context("phase: %s" % (description,)): + with assertion_context(f"phase: {description}"): for response in phase.get("responses", []): got_hello(c, common.partition_node(response[0]), response[1]) @@ -228,7 +228,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_encryption.py b/test/test_encryption.py index af8f54cd07..95f18eb307 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -207,7 +207,7 @@ class EncryptionIntegrationTest(IntegrationTest): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(EncryptionIntegrationTest, cls).setUpClass() + super().setUpClass() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -295,7 +295,7 @@ def _test_auto_encrypt(self, opts): # Collection.distinct auto decrypts. decrypted_ssns = encrypted_coll.distinct("ssn") - self.assertEqual(set(decrypted_ssns), set(d["ssn"] for d in docs)) + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): @@ -391,7 +391,7 @@ class TestClientMaxWireVersion(IntegrationTest): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): - super(TestClientMaxWireVersion, cls).setUpClass() + super().setUpClass() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): @@ -585,7 +585,7 @@ class TestSpec(SpecRunner): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): - super(TestSpec, cls).setUpClass() + super().setUpClass() def parse_auto_encrypt_opts(self, opts): """Parse clientOptions.autoEncryptOpts.""" @@ -630,14 +630,14 @@ def parse_client_options(self, opts): if encrypt_opts: opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - return super(TestSpec, self).parse_client_options(opts) + return super().parse_client_options(opts) def get_object_name(self, op): """Default object is collection.""" return op.get("object", "collection") def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) desc = test["description"].lower() if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") @@ -674,7 +674,7 @@ def setup_scenario(self, scenario_def): def allowable_errors(self, op): """Override expected error classes.""" - errors = super(TestSpec, self).allowable_errors(op) + errors = super().allowable_errors(op) # An updateOne test expects encryption to error when no $ operator # appears but pymongo raises a client side ValueError in this case. if op["name"] == "updateOne": @@ -773,7 +773,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): "No environment credentials are set", ) def setUpClass(cls): - super(TestDataKeyDoubleEncryption, cls).setUpClass() + super().setUpClass() cls.listener = OvertCommandListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.client.db.coll.drop() @@ -818,7 +818,7 @@ def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] ) self.assertBinaryUUID(datakey_id) cmd = self.listener.started_events[-1] @@ -830,20 +830,20 @@ def run_test(self, provider_name): # Encrypt by key_id. encrypted = self.client_encryption.encrypt( - "hello %s" % (provider_name,), + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=datakey_id, ) self.assertEncrypted(encrypted) self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) - self.assertEqual(doc_decrypted["value"], "hello %s" % (provider_name,)) # type: ignore + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( - "hello %s" % (provider_name,), + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name="%s_altname" % (provider_name,), + key_alt_name=f"{provider_name}_altname", ) self.assertEqual(encrypted_altname, encrypted) @@ -965,7 +965,7 @@ class TestCorpus(EncryptionIntegrationTest): @classmethod @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUpClass(cls): - super(TestCorpus, cls).setUpClass() + super().setUpClass() @staticmethod def kms_providers(): @@ -1046,17 +1046,17 @@ def _test_corpus(self, opts): self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) if identifier == "id": if kms == "local": - kwargs = dict(key_id=LOCAL_KEY_ID) + kwargs = {"key_id": LOCAL_KEY_ID} elif kms == "aws": - kwargs = dict(key_id=AWS_KEY_ID) + kwargs = {"key_id": AWS_KEY_ID} elif kms == "azure": - kwargs = dict(key_id=AZURE_KEY_ID) + kwargs = {"key_id": AZURE_KEY_ID} elif kms == "gcp": - kwargs = dict(key_id=GCP_KEY_ID) + kwargs = {"key_id": GCP_KEY_ID} else: - kwargs = dict(key_id=KMIP_KEY_ID) + kwargs = {"key_id": KMIP_KEY_ID} else: - kwargs = dict(key_alt_name=kms) + kwargs = {"key_alt_name": kms} self.assertIn(value["algo"], ("det", "rand")) if value["algo"] == "det": @@ -1069,12 +1069,12 @@ def _test_corpus(self, opts): value["value"], algo, **kwargs # type: ignore[arg-type] ) if not value["allowed"]: - self.fail("encrypt should have failed: %r: %r" % (key, value)) + self.fail(f"encrypt should have failed: {key!r}: {value!r}") corpus_copied[key]["value"] = encrypted_val except Exception: if value["allowed"]: tb = traceback.format_exc() - self.fail("encrypt failed: %r: %r, traceback: %s" % (key, value, tb)) + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") client_encrypted.db.coll.insert_one(corpus_copied) corpus_decrypted = client_encrypted.db.coll.find_one() @@ -1141,7 +1141,7 @@ class TestBsonSizeBatches(EncryptionIntegrationTest): @classmethod def setUpClass(cls): - super(TestBsonSizeBatches, cls).setUpClass() + super().setUpClass() db = client_context.client.db cls.coll = db.coll cls.coll.drop() @@ -1172,7 +1172,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.coll_encrypted.drop() cls.client_encrypted.close() - super(TestBsonSizeBatches, cls).tearDownClass() + super().tearDownClass() def test_01_insert_succeeds_under_2MiB(self): doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} @@ -1242,7 +1242,7 @@ class TestCustomEndpoint(EncryptionIntegrationTest): "No environment credentials are set", ) def setUpClass(cls): - super(TestCustomEndpoint, cls).setUpClass() + super().setUpClass() def setUp(self): kms_providers = { @@ -1442,7 +1442,7 @@ def test_12_kmip_master_key_invalid_endpoint(self): self.client_encryption.create_data_key("kmip", key) -class AzureGCPEncryptionTestMixin(object): +class AzureGCPEncryptionTestMixin: DEK = None KMS_PROVIDER_MAP = None KEYVAULT_DB = "keyvault" @@ -1514,7 +1514,7 @@ def setUpClass(cls): cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} cls.DEK = json_data(BASE, "custom", "azure-dek.json") cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super(TestAzureEncryption, cls).setUpClass() + super().setUpClass() def test_explicit(self): return self._test_explicit( @@ -1540,7 +1540,7 @@ def setUpClass(cls): cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} cls.DEK = json_data(BASE, "custom", "gcp-dek.json") cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super(TestGCPEncryption, cls).setUpClass() + super().setUpClass() def test_explicit(self): return self._test_explicit( @@ -1985,7 +1985,7 @@ def listener(): class TestKmsTLSProse(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): - super(TestKmsTLSProse, self).setUp() + super().setUp() self.patch_system_certs(CA_PEM) self.client_encrypted = ClientEncryption( {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS @@ -2023,7 +2023,7 @@ def test_invalid_hostname_in_kms_certificate(self): class TestKmsTLSOptions(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): - super(TestKmsTLSOptions, self).setUp() + super().setUp() # 1, create client with only tlsCAFile. providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" @@ -2391,7 +2391,7 @@ def run_test(self, src_provider, dst_provider): # https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials class TestOnDemandAWSCredentials(EncryptionIntegrationTest): def setUp(self): - super(TestOnDemandAWSCredentials, self).setUp() + super().setUp() self.master_key = { "region": "us-east-1", "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), diff --git a/test/test_examples.py b/test/test_examples.py index c08cb17e20..b9508d4f1e 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -34,7 +34,7 @@ class TestSampleShellCommands(IntegrationTest): @classmethod def setUpClass(cls): - super(TestSampleShellCommands, cls).setUpClass() + super().setUpClass() # Run once before any tests run. cls.db.inventory.drop() @@ -757,18 +757,18 @@ def insert_docs(): # 1. The database for reactive, real-time applications # Start Changestream Example 1 cursor = db.inventory.watch() - document = next(cursor) + next(cursor) # End Changestream Example 1 # Start Changestream Example 2 cursor = db.inventory.watch(full_document="updateLookup") - document = next(cursor) + next(cursor) # End Changestream Example 2 # Start Changestream Example 3 resume_token = cursor.resume_token cursor = db.inventory.watch(resume_after=resume_token) - document = next(cursor) + next(cursor) # End Changestream Example 3 # Start Changestream Example 4 @@ -777,7 +777,7 @@ def insert_docs(): {"$addFields": {"newField": "this is an added field!"}}, ] cursor = db.inventory.watch(pipeline=pipeline) - document = next(cursor) + next(cursor) # End Changestream Example 4 finally: done = True @@ -898,7 +898,7 @@ def test_misc(self): with client.start_session() as session: collection.insert_one({"_id": 1}, session=session) collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) - for doc in collection.find({}, session=session): + for _doc in collection.find({}, session=session): pass # 3. Exploiting the power of arrays @@ -1078,7 +1078,7 @@ def update_employee_info(session): with client.start_session() as session: try: run_transaction_with_retry(update_employee_info, session) - except Exception as exc: + except Exception: # Do something with error. raise @@ -1089,7 +1089,9 @@ def update_employee_info(session): self.assertIsNotNone(employee) self.assertEqual(employee["status"], "Inactive") - MongoClient = lambda _: rs_client() + def MongoClient(_): + return rs_client() + uriString = None # Start Transactions withTxn API Example 1 @@ -1179,25 +1181,27 @@ class TestVersionedApiExamples(IntegrationTest): @client_context.require_version_min(4, 7) def test_versioned_api(self): # Versioned API examples - MongoClient = lambda _, server_api: rs_client(server_api=server_api, connect=False) + def MongoClient(_, server_api): + return rs_client(server_api=server_api, connect=False) + uri = None # Start Versioned API Example 1 from pymongo.server_api import ServerApi - client = MongoClient(uri, server_api=ServerApi("1")) + MongoClient(uri, server_api=ServerApi("1")) # End Versioned API Example 1 # Start Versioned API Example 2 - client = MongoClient(uri, server_api=ServerApi("1", strict=True)) + MongoClient(uri, server_api=ServerApi("1", strict=True)) # End Versioned API Example 2 # Start Versioned API Example 3 - client = MongoClient(uri, server_api=ServerApi("1", strict=False)) + MongoClient(uri, server_api=ServerApi("1", strict=False)) # End Versioned API Example 3 # Start Versioned API Example 4 - client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 @unittest.skip("PYTHON-3167 count has been added to API version 1") @@ -1339,7 +1343,7 @@ def test_snapshot_query(self): # Start Snapshot Query Example 2 db = client.retail with client.start_session(snapshot=True) as s: - total = db.sales.aggregate( + db.sales.aggregate( [ { "$match": { diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 8b46133a60..04003289e6 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the grid_file module. -""" +"""Tests for the grid_file module.""" import datetime import io @@ -462,12 +460,10 @@ def test_multiple_reads(self): def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) f.write( - ( - b"""Hello world, + b"""Hello world, How are you? Hope all is well. Bye""" - ) ) f.close() @@ -498,12 +494,10 @@ def test_readline(self): def test_readlines(self): f = GridIn(self.db.fs, chunkSize=5) f.write( - ( - b"""Hello world, + b"""Hello world, How are you? Hope all is well. Bye""" - ) ) f.close() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index cfa6e43e85..4ba8467d22 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" import datetime import sys @@ -90,7 +88,7 @@ class TestGridfs(IntegrationTest): @classmethod def setUpClass(cls): - super(TestGridfs, cls).setUpClass() + super().setUpClass() cls.fs = gridfs.GridFS(cls.db) cls.alt = gridfs.GridFS(cls.db, "alt") @@ -141,7 +139,7 @@ def test_list(self): self.fs.put(b"foo", filename="test") self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), set(self.fs.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.fs.list())) def test_empty_file(self): oid = self.fs.put(b"") @@ -210,7 +208,7 @@ def test_alt_collection(self): self.alt.put(b"foo", filename="test") self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), set(self.alt.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.alt.list())) def test_threaded_reads(self): self.fs.put(b"hello", _id="test") @@ -394,7 +392,7 @@ def test_missing_length_iter(self): f = self.fs.get_last_version(filename="empty") def iterate_file(grid_file): - for chunk in grid_file: + for _chunk in grid_file: pass return True @@ -496,7 +494,7 @@ class TestGridfsReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestGridfsReplicaSet, cls).setUpClass() + super().setUpClass() @classmethod def tearDownClass(cls): diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index b6a33b4ecc..e5695f2c38 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2015-present MongoDB, Inc. # @@ -14,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" import datetime import itertools import threading @@ -75,7 +73,7 @@ class TestGridfs(IntegrationTest): @classmethod def setUpClass(cls): - super(TestGridfs, cls).setUpClass() + super().setUpClass() cls.fs = gridfs.GridFSBucket(cls.db) cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") @@ -196,8 +194,8 @@ def test_alt_collection(self): self.alt.upload_from_stream("hello world", b"") self.assertEqual( - set(["mike", "test", "hello world", "foo"]), - set(k["filename"] for k in list(self.db.alt.files.find())), + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in list(self.db.alt.files.find())}, ) def test_threaded_reads(self): @@ -442,7 +440,7 @@ class TestGridfsBucketReplicaSet(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestGridfsBucketReplicaSet, cls).setUpClass() + super().setUpClass() @classmethod def tearDownClass(cls): diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index d4de8debf5..df68b3e626 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -150,7 +150,7 @@ def test_session_gc(self): class PoolLocker(ExceptionCatchingThread): def __init__(self, pool): - super(PoolLocker, self).__init__(target=self.lock_pool) + super().__init__(target=self.lock_pool) self.pool = pool self.daemon = True self.locked = threading.Event() diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index e39940f56b..9e83e879a5 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -36,7 +36,7 @@ def setUpModule(): class SimpleOp(threading.Thread): def __init__(self, client): - super(SimpleOp, self).__init__() + super().__init__() self.client = client self.passed = False @@ -58,9 +58,9 @@ def do_simple_op(client, nthreads): def writable_addresses(topology): - return set( + return { server.description.address for server in topology.select_servers(writable_server_selector) - ) + } class TestMongosLoadBalancing(MockClientTest): @@ -133,7 +133,7 @@ def test_local_threshold(self): topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). - self.assertEqual(set([("a", 1), ("b", 2), ("c", 3)]), writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, writable_addresses(topology)) # No error client.admin.command("ping") @@ -143,7 +143,7 @@ def test_local_threshold(self): # No error client.db.command("ping") # Our chosen mongos goes down. - client.kill_host("%s:%s" % next(iter(client.nodes))) + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) try: client.db.command("ping") except: @@ -174,13 +174,13 @@ def test_load_balancing(self): self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). - self.assertEqual(set([("a", 1), ("b", 2)]), writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2)}, writable_addresses(topology)) client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. wait_until( - lambda: set([("b", 2)]) == writable_addresses(topology), + lambda: {("b", 2)} == writable_addresses(topology), 'discover server "a" is too far', ) diff --git a/test/test_monitor.py b/test/test_monitor.py index 85cfb0bc40..9ee3c52ff5 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -66,7 +66,7 @@ def test_cleanup_executors_on_client_del(self): del client for ref, name in executor_refs: - wait_until(partial(unregistered, ref), "unregister executor: %s" % (name,), timeout=5) + wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) def test_cleanup_executors_on_client_close(self): client = create_client() @@ -76,9 +76,7 @@ def test_cleanup_executors_on_client_close(self): client.close() for executor in executors: - wait_until( - lambda: executor._stopped, "closed executor: %s" % (executor._name,), timeout=5 - ) + wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) if __name__ == "__main__": diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 39b3d2f896..c7c793b382 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -39,18 +39,18 @@ class TestCommandMonitoring(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestCommandMonitoring, cls).setUpClass() + super().setUpClass() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) @classmethod def tearDownClass(cls): cls.client.close() - super(TestCommandMonitoring, cls).tearDownClass() + super().tearDownClass() def tearDown(self): self.listener.reset() - super(TestCommandMonitoring, self).tearDown() + super().tearDown() def test_started_simple(self): self.client.pymongo_test.command("ping") @@ -232,40 +232,40 @@ def _test_find_options(self, query, expected_cmd): tuple(cursor) def test_find_options(self): - query = dict( - filter={}, - hint=[("x", 1)], - max_time_ms=10000, - max={"x": 10}, - min={"x": -10}, - return_key=True, - show_record_id=True, - projection={"x": False}, - skip=1, - no_cursor_timeout=True, - sort=[("_id", 1)], - allow_partial_results=True, - comment="this is a test", - batch_size=2, - ) + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } - cmd = dict( - find="test", - filter={}, - hint=SON([("x", 1)]), - comment="this is a test", - maxTimeMS=10000, - max={"x": 10}, - min={"x": -10}, - returnKey=True, - showRecordId=True, - sort=SON([("_id", 1)]), - projection={"x": False}, - skip=1, - batchSize=2, - noCursorTimeout=True, - allowPartialResults=True, - ) + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } if client_context.version < (4, 1, 0, -1): query["max_scan"] = 10 @@ -276,9 +276,9 @@ def test_find_options(self): @client_context.require_version_max(3, 7, 2) def test_find_snapshot(self): # Test "snapshot" parameter separately, can't combine with "sort". - query = dict(filter={}, snapshot=True) + query = {"filter": {}, "snapshot": True} - cmd = dict(find="test", filter={}, snapshot=True) + cmd = {"find": "test", "filter": {}, "snapshot": True} self._test_find_options(query, cmd) @@ -1049,7 +1049,7 @@ def test_write_errors(self): errors.extend(succeed.reply["writeErrors"]) self.assertEqual(2, len(errors)) - fields = set(["index", "code", "errmsg"]) + fields = {"index", "code", "errmsg"} for error in errors: self.assertTrue(fields.issubset(set(error))) @@ -1113,7 +1113,7 @@ class TestGlobalListener(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestGlobalListener, cls).setUpClass() + super().setUpClass() cls.listener = EventListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) @@ -1126,10 +1126,10 @@ def setUpClass(cls): def tearDownClass(cls): monitoring._LISTENERS = cls.saved_listeners cls.client.close() - super(TestGlobalListener, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(TestGlobalListener, self).setUp() + super().setUp() self.listener.reset() def test_simple(self): diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index d5668199a3..499dc64b3b 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -30,10 +30,10 @@ class TestonDemandGCPCredentials(IntegrationTest): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(TestonDemandGCPCredentials, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestonDemandGCPCredentials, self).setUp() + super().setUp() self.master_key = { "projectId": "devprod-drivers", "location": "global", @@ -72,10 +72,10 @@ class TestonDemandAzureCredentials(IntegrationTest): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) def setUpClass(cls): - super(TestonDemandAzureCredentials, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestonDemandAzureCredentials, self).setUp() + super().setUp() self.master_key = { "keyVaultEndpoint": "https://keyvault-drivers-2411.vault.azure.net/keys/", "keyName": "KEY-NAME", diff --git a/test/test_pooling.py b/test/test_pooling.py index 923c89d83b..57c9b807a6 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -60,7 +60,7 @@ class MongoThread(threading.Thread): """A thread that uses a MongoClient.""" def __init__(self, client): - super(MongoThread, self).__init__() + super().__init__() self.daemon = True # Don't hang whole test if thread hangs. self.client = client self.db = self.client[DB] @@ -107,7 +107,7 @@ class SocketGetter(MongoThread): """ def __init__(self, client, pool): - super(SocketGetter, self).__init__(client) + super().__init__(client) self.state = "init" self.pool = pool self.sock = None @@ -132,7 +132,7 @@ def run_cases(client, cases): n_runs = 5 for case in cases: - for i in range(n_runs): + for _i in range(n_runs): t = case(client) t.start() threads.append(t) @@ -148,7 +148,7 @@ class _TestPoolingBase(IntegrationTest): """Base class for all connection-pool tests.""" def setUp(self): - super(_TestPoolingBase, self).setUp() + super().setUp() self.c = rs_or_single_client() db = self.c[DB] db.unique.drop() @@ -158,7 +158,7 @@ def setUp(self): def tearDown(self): self.c.close() - super(_TestPoolingBase, self).tearDown() + super().tearDown() def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): # Start the pool with the correct ssl options. @@ -329,7 +329,7 @@ def test_wait_queue_timeout(self): duration = time.time() - start self.assertTrue( abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % (duration, wait_queue_timeout), + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", ) def test_no_wait_queue_timeout(self): @@ -440,7 +440,7 @@ def f(): with lock: self.n_passed += 1 - for i in range(nthreads): + for _i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() @@ -472,7 +472,7 @@ def f(): with lock: self.n_passed += 1 - for i in range(nthreads): + for _i in range(nthreads): t = threading.Thread(target=f) threads.append(t) t.start() @@ -500,7 +500,7 @@ def test_max_pool_size_with_connection_failure(self): # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for # socket from pool" instead of AutoReconnect. - for i in range(2): + for _i in range(2): with self.assertRaises(AutoReconnect) as context: with test_pool.get_socket(): pass diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 2230f2bef2..682fe03e72 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -33,7 +33,7 @@ class TestReadConcern(IntegrationTest): @classmethod @client_context.require_connection def setUpClass(cls): - super(TestReadConcern, cls).setUpClass() + super().setUpClass() cls.listener = OvertCommandListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -43,11 +43,11 @@ def setUpClass(cls): def tearDownClass(cls): cls.client.close() client_context.client.pymongo_test.drop_collection("coll") - super(TestReadConcern, cls).tearDownClass() + super().tearDownClass() def tearDown(self): self.listener.reset() - super(TestReadConcern, self).tearDown() + super().tearDown() def test_read_concern(self): rc = ReadConcern() @@ -65,7 +65,7 @@ def test_read_concern(self): self.assertRaises(TypeError, ReadConcern, 42) def test_read_concern_uri(self): - uri = "mongodb://%s/?readConcernLevel=majority" % (client_context.pair,) + uri = f"mongodb://{client_context.pair}/?readConcernLevel=majority" client = rs_or_single_client(uri, connect=False) self.assertEqual(ReadConcern("majority"), client.read_concern) diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 1362623dff..6156b6b3fc 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -90,10 +90,10 @@ class TestReadPreferencesBase(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestReadPreferencesBase, cls).setUpClass() + super().setUpClass() def setUp(self): - super(TestReadPreferencesBase, self).setUp() + super().setUp() # Insert some data so we can use cursors in read_from_which_host self.client.pymongo_test.test.drop() self.client.get_database( @@ -119,16 +119,17 @@ def read_from_which_kind(self, client): return "secondary" else: self.fail( - "Cursor used address %s, expected either primary " - "%s or secondaries %s" % (address, client.primary, client.secondaries) + "Cursor used address {}, expected either primary " + "{} or secondaries {}".format(address, client.primary, client.secondaries) ) + return None def assertReadsFrom(self, expected, **kwargs): c = rs_client(**kwargs) wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) - self.assertEqual(expected, used, "Cursor used %s, expected %s" % (used, expected)) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") class TestSingleSecondaryOk(TestReadPreferencesBase): @@ -271,7 +272,7 @@ def test_nearest(self): self.assertFalse( not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies), + " but didn't use {}\nlatencies: {}".format(not_used, latencies), ) @@ -280,18 +281,18 @@ def __init__(self, *args, **kwargs): self.has_read_from = set() client_options = client_context.client_options client_options.update(kwargs) - super(ReadPrefTester, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) @contextlib.contextmanager def _socket_for_reads(self, read_preference, session): - context = super(ReadPrefTester, self)._socket_for_reads(read_preference, session) + context = super()._socket_for_reads(read_preference, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @contextlib.contextmanager def _socket_from_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._socket_from_server(read_preference, server, session) + context = super()._socket_from_server(read_preference, server, session) with context as (sock_info, read_preference): self.record_a_read(sock_info.address) yield sock_info, read_preference @@ -317,7 +318,7 @@ class TestCommandAndReadPreference(IntegrationTest): @classmethod @client_context.require_secondaries_count(1) def setUpClass(cls): - super(TestCommandAndReadPreference, cls).setUpClass() + super().setUpClass() cls.c = ReadPrefTester( client_context.pair, # Ignore round trip times, to test ReadPreference modes only. @@ -360,7 +361,7 @@ def _test_fn(self, server_type, fn): break assert self.c.primary is not None - unused = self.c.secondaries.union(set([self.c.primary])).difference(used) + unused = self.c.secondaries.union({self.c.primary}).difference(used) if unused: self.fail("Some members not used for NEAREST: %s" % (unused)) else: @@ -373,7 +374,10 @@ def _test_primary_helper(self, func): def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): for mode, server_type in _PREF_MAP: new_coll = coll.with_options(read_preference=mode()) - func = lambda: getattr(new_coll, meth)(*args, **kwargs) + + def func(): + return getattr(new_coll, meth)(*args, **kwargs) + if secondary_ok: self._test_fn(server_type, func) else: @@ -383,7 +387,10 @@ def test_command(self): # Test that the generic command helper obeys the read preference # passed to it. for mode, server_type in _PREF_MAP: - func = lambda: self.c.pymongo_test.command("dbStats", read_preference=mode()) + + def func(): + return self.c.pymongo_test.command("dbStats", read_preference=mode()) + self._test_fn(server_type, func) def test_create_collection(self): @@ -536,7 +543,7 @@ def test_send_hedge(self): client = rs_client(event_listeners=[listener]) self.addCleanup(client.close) client.admin.command("ping") - for mode, cls in cases.items(): + for _mode, cls in cases.items(): pref = cls(hedge={"enabled": True}) coll = client.test.get_collection("test", read_preference=pref) listener.reset() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 26bc111f00..2b39f7d04e 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -89,16 +89,16 @@ def insert_command_default_write_concern(): f() self.assertGreaterEqual(len(listener.started_events), 1) - for i, event in enumerate(listener.started_events): + for _i, event in enumerate(listener.started_events): self.assertNotIn( "readConcern", event.command, - "%s sent default readConcern with %s" % (name, event.command_name), + f"{name} sent default readConcern with {event.command_name}", ) self.assertNotIn( "writeConcern", event.command, - "%s sent default writeConcern with %s" % (name, event.command_name), + f"{name} sent default writeConcern with {event.command_name}", ) def assertWriteOpsRaise(self, write_concern, expected_exception): @@ -307,7 +307,7 @@ def create_tests(): fname = os.path.splitext(filename)[0] for test_case in test_cases: new_test = create_test(test_case) - test_name = "test_%s_%s_%s" % ( + test_name = "test_{}_{}_{}".format( dirname.replace("-", "_"), fname.replace("-", "_"), str(test_case["description"].lower().replace(" ", "_")), diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 898be99d4d..bdeaeb06a3 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -83,7 +83,7 @@ def test_replica_set_client(self): c.mock_members.remove("c:3") c.mock_standalones.append("c:3") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "update the list of secondaries") + wait_until(lambda: {("b", 2)} == c.secondaries, "update the list of secondaries") self.assertEqual(("a", 1), c.primary) @@ -106,7 +106,7 @@ def test_replica_set_client(self): # C is removed. c.mock_hello_hosts.remove("c:3") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "update list of secondaries") + wait_until(lambda: {("b", 2)} == c.secondaries, "update list of secondaries") self.assertEqual(("a", 1), c.primary) @@ -148,7 +148,7 @@ def test_client(self): # MongoClient connects to primary by default. self.assertEqual(c.address, ("a", 1)) - self.assertEqual(set([("a", 1), ("b", 2)]), c.nodes) + self.assertEqual({("a", 1), ("b", 2)}, c.nodes) # C is added. c.mock_members.append("c:3") @@ -159,7 +159,7 @@ def test_client(self): self.assertEqual(c.address, ("a", 1)) wait_until( - lambda: set([("a", 1), ("b", 2), ("c", 3)]) == c.nodes, "reconnect to both secondaries" + lambda: {("a", 1), ("b", 2), ("c", 3)} == c.nodes, "reconnect to both secondaries" ) def test_replica_set_client(self): @@ -169,13 +169,13 @@ def test_replica_set_client(self): self.addCleanup(c.close) wait_until(lambda: ("a", 1) == c.primary, "discover the primary") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "discover the secondary") + wait_until(lambda: {("b", 2)} == c.secondaries, "discover the secondary") # C is added. c.mock_members.append("c:3") c.mock_hello_hosts.append("c:3") - wait_until(lambda: set([("b", 2), ("c", 3)]) == c.secondaries, "discover the new secondary") + wait_until(lambda: {("b", 2), ("c", 3)} == c.secondaries, "discover the new secondary") self.assertEqual(("a", 1), c.primary) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 517e1122b0..ee12c524c9 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -76,14 +76,14 @@ class TestSpec(SpecRunner): # TODO: remove this once PYTHON-1948 is done. @client_context.require_no_mmap def setUpClass(cls): - super(TestSpec, cls).setUpClass() + super().setUpClass() def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] for name in skip_names: if name.lower() in test["description"].lower(): - self.skipTest("PyMongo does not support %s" % (name,)) + self.skipTest(f"PyMongo does not support {name}") # Serverless does not support $out and collation. if client_context.serverless: @@ -107,7 +107,7 @@ def get_scenario_coll_name(self, scenario_def): """Override a test's collection name to support GridFS tests.""" if "bucket_name" in scenario_def: return scenario_def["bucket_name"] - return super(TestSpec, self).get_scenario_coll_name(scenario_def) + return super().get_scenario_coll_name(scenario_def) def setup_scenario(self, scenario_def): """Override a test's setup to support GridFS tests.""" @@ -127,7 +127,7 @@ def setup_scenario(self, scenario_def): db.get_collection("fs.chunks").drop() db.get_collection("fs.files", write_concern=wc).drop() else: - super(TestSpec, self).setup_scenario(scenario_def) + super().setup_scenario(scenario_def) def create_test(scenario_def, test, name): diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 1e978f21be..32841a8227 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -68,7 +68,7 @@ class InsertEventListener(EventListener): def succeeded(self, event: CommandSucceededEvent) -> None: - super(InsertEventListener, self).succeeded(event) + super().succeeded(event) if ( event.command_name == "insert" and event.reply.get("writeConcernError", {}).get("code", None) == 91 @@ -108,7 +108,7 @@ def run_test_ops(self, sessions, collection, test): if "result" in outcome: operation["result"] = outcome["result"] test["operations"] = [operation] - super(TestAllScenarios, self).run_test_ops(sessions, collection, test) + super().run_test_ops(sessions, collection, test) def create_test(scenario_def, test, name): @@ -168,13 +168,13 @@ class IgnoreDeprecationsTest(IntegrationTest): @classmethod def setUpClass(cls): - super(IgnoreDeprecationsTest, cls).setUpClass() + super().setUpClass() cls.deprecation_filter = DeprecationFilter() @classmethod def tearDownClass(cls): cls.deprecation_filter.stop() - super(IgnoreDeprecationsTest, cls).tearDownClass() + super().tearDownClass() class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): @@ -182,7 +182,7 @@ class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): @classmethod def setUpClass(cls): - super(TestRetryableWritesMMAPv1, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @@ -193,7 +193,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.knobs.disable() cls.client.close() - super(TestRetryableWritesMMAPv1, cls).tearDownClass() + super().tearDownClass() @client_context.require_no_standalone def test_actionable_error_message(self): @@ -217,7 +217,7 @@ class TestRetryableWrites(IgnoreDeprecationsTest): @classmethod @client_context.require_no_mmap def setUpClass(cls): - super(TestRetryableWrites, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @@ -229,7 +229,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.knobs.disable() cls.client.close() - super(TestRetryableWrites, cls).tearDownClass() + super().tearDownClass() def setUp(self): if client_context.is_rs and client_context.test_commands_enabled: @@ -248,20 +248,20 @@ def test_supported_single_statement_no_retry(self): client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) self.addCleanup(client.close) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() method(*args, **kwargs) for event in listener.started_events: self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) @client_context.require_no_standalone def test_supported_single_statement_supported_cluster(self): for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" self.listener.reset() method(*args, **kwargs) commands_started = self.listener.started_events @@ -270,13 +270,13 @@ def test_supported_single_statement_supported_cluster(self): self.assertIn( "lsid", first_attempt.command, - "%s sent no lsid with %s" % (msg, first_attempt.command_name), + f"{msg} sent no lsid with {first_attempt.command_name}", ) initial_session_id = first_attempt.command["lsid"] self.assertIn( "txnNumber", first_attempt.command, - "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + f"{msg} sent no txnNumber with {first_attempt.command_name}", ) # There should be no retry when the failpoint is not active. @@ -289,13 +289,13 @@ def test_supported_single_statement_supported_cluster(self): self.assertIn( "lsid", retry_attempt.command, - "%s sent no lsid with %s" % (msg, first_attempt.command_name), + f"{msg} sent no lsid with {first_attempt.command_name}", ) self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) self.assertIn( "txnNumber", retry_attempt.command, - "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), + f"{msg} sent no txnNumber with {first_attempt.command_name}", ) self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) @@ -304,7 +304,7 @@ def test_supported_single_statement_unsupported_cluster(self): raise SkipTest("This cluster supports retryable writes") for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" self.listener.reset() method(*args, **kwargs) @@ -312,7 +312,7 @@ def test_supported_single_statement_unsupported_cluster(self): self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) def test_unsupported_single_statement(self): @@ -322,7 +322,7 @@ def test_unsupported_single_statement(self): for method, args, kwargs in non_retryable_single_statement_ops( coll ) + retryable_single_statement_ops(coll_w0): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" self.listener.reset() method(*args, **kwargs) started_events = self.listener.started_events @@ -332,7 +332,7 @@ def test_unsupported_single_statement(self): self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) def test_server_selection_timeout_not_retried(self): @@ -345,7 +345,7 @@ def test_server_selection_timeout_not_retried(self): event_listeners=[listener], ) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) @@ -374,7 +374,7 @@ def raise_error(*args, **kwargs): return server for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): @@ -479,7 +479,7 @@ class TestWriteConcernError(IntegrationTest): @client_context.require_no_mmap @client_context.require_failCommand_fail_point def setUpClass(cls): - super(TestWriteConcernError, cls).setUpClass() + super().setUpClass() cls.fail_insert = { "configureFailPoint": "failCommand", "mode": {"times": 2}, @@ -668,7 +668,7 @@ def raise_connection_err_select_server(*args, **kwargs): with client.start_session() as session: kwargs = copy.deepcopy(kwargs) kwargs["session"] = session - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" initial_txn_id = session._server_session.transaction_id # Each operation should fail on the first attempt and succeed diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index d7b3744399..2587ae7965 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -44,12 +44,12 @@ def compare_server_descriptions(expected, actual): - if (not expected["address"] == "%s:%s" % actual.address) or ( + if (not expected["address"] == "{}:{}".format(*actual.address)) or ( not server_name_to_type(expected["type"]) == actual.server_type ): return False expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) - return expected_hosts == set("%s:%s" % s for s in actual.all_hosts) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} def compare_topology_descriptions(expected, actual): @@ -60,7 +60,7 @@ def compare_topology_descriptions(expected, actual): if len(expected) != len(actual): return False for exp_server in expected: - for address, actual_server in actual.items(): + for _address, actual_server in actual.items(): if compare_server_descriptions(exp_server, actual_server): break else: @@ -79,22 +79,22 @@ def compare_events(expected_dict, actual): if expected_type == "server_opening_event": if not isinstance(actual, monitoring.ServerOpeningEvent): return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) - if not expected["address"] == "%s:%s" % actual.server_address: + if not expected["address"] == "{}:{}".format(*actual.server_address): return ( False, "ServerOpeningEvent published with wrong address (expected" - " %s, got %s" % (expected["address"], actual.server_address), + " {}, got {}".format(expected["address"], actual.server_address), ) elif expected_type == "server_description_changed_event": if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) - if not expected["address"] == "%s:%s" % actual.server_address: + if not expected["address"] == "{}:{}".format(*actual.server_address): return ( False, "ServerDescriptionChangedEvent has wrong address" - " (expected %s, got %s" % (expected["address"], actual.server_address), + " (expected {}, got {}".format(expected["address"], actual.server_address), ) if not compare_server_descriptions(expected["newDescription"], actual.new_description): @@ -110,11 +110,11 @@ def compare_events(expected_dict, actual): elif expected_type == "server_closed_event": if not isinstance(actual, monitoring.ServerClosedEvent): return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) - if not expected["address"] == "%s:%s" % actual.server_address: + if not expected["address"] == "{}:{}".format(*actual.server_address): return ( False, "ServerClosedEvent published with wrong address" - " (expected %s, got %s" % (expected["address"], actual.server_address), + " (expected {}, got {}".format(expected["address"], actual.server_address), ) elif expected_type == "topology_opening_event": @@ -145,7 +145,7 @@ def compare_events(expected_dict, actual): return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) else: - return False, "Incorrect event: expected %s, actual %s" % (expected_type, actual) + return False, f"Incorrect event: expected {expected_type}, actual {actual}" return True, "" @@ -170,7 +170,7 @@ def compare_multiple_events(i, expected_results, actual_results): class TestAllScenarios(IntegrationTest): def setUp(self): - super(TestAllScenarios, self).setUp() + super().setUp() self.all_listener = ServerAndTopologyEventListener() @@ -235,7 +235,7 @@ def _run(self): # Assert no extra events. extra_events = self.all_listener.results[expected_len:] if extra_events: - self.fail("Extra events %r" % (extra_events,)) + self.fail(f"Extra events {extra_events!r}") self.all_listener.reset() finally: @@ -251,7 +251,7 @@ def create_tests(): scenario_def = json.load(scenario_stream, object_hook=object_hook) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s" % (os.path.splitext(filename)[0],) + test_name = f"test_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -268,7 +268,7 @@ class TestSdamMonitoring(IntegrationTest): @classmethod @client_context.require_failCommand_fail_point def setUpClass(cls): - super(TestSdamMonitoring, cls).setUpClass() + super().setUpClass() # Speed up the tests by decreasing the event publish frequency. cls.knobs = client_knobs(events_queue_frequency=0.1) cls.knobs.enable() @@ -284,7 +284,7 @@ def setUpClass(cls): def tearDownClass(cls): cls.test_client.close() cls.knobs.disable() - super(TestSdamMonitoring, cls).tearDownClass() + super().tearDownClass() def setUp(self): self.listener.reset() diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 8d4ffe5e9b..30b82769bc 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -48,7 +48,7 @@ ) -class SelectionStoreSelector(object): +class SelectionStoreSelector: """No-op selector that keeps track of what was passed to it.""" def __init__(self): @@ -103,7 +103,7 @@ def all_hosts_started(): def test_invalid_server_selector(self): # Client initialization must fail if server_selector is not callable. - for selector_candidate in [list(), 10, "string", {}]: + for selector_candidate in [[], 10, "string", {}]: with self.assertRaisesRegex(ValueError, "must be a callable"): MongoClient(connect=False, server_selector=selector_candidate) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index d076ae77b3..63769a6457 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -46,7 +46,7 @@ def run_scenario(self, scenario_def): server.pool.operation_count = mock["operation_count"] pref = ReadPreference.NEAREST - counts = dict((address, 0) for address in topology._description.server_descriptions()) + counts = {address: 0 for address in topology._description.server_descriptions()} # Number of times to repeat server selection iterations = scenario_def["iterations"] @@ -91,7 +91,7 @@ def tests(self, scenario_def): class FinderThread(threading.Thread): def __init__(self, collection, iterations): - super(FinderThread, self).__init__() + super().__init__() self.daemon = True self.collection = collection self.iterations = iterations diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index d2d8768809..5c2a8a6fba 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -57,7 +57,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_session.py b/test/test_session.py index 25d209ebaf..18d0122dae 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -47,15 +47,15 @@ class SessionTestListener(EventListener): def started(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).started(event) + super().started(event) def succeeded(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).failed(event) + super().failed(event) def first_command_started(self): assert len(self.started_events) >= 1, "No command-started events" @@ -74,7 +74,7 @@ class TestSession(IntegrationTest): @classmethod @client_context.require_sessions def setUpClass(cls): - super(TestSession, cls).setUpClass() + super().setUpClass() # Create a second client so we can make sure clients cannot share # sessions. cls.client2 = rs_or_single_client() @@ -87,7 +87,7 @@ def setUpClass(cls): def tearDownClass(cls): monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) cls.client2.close() - super(TestSession, cls).tearDownClass() + super().tearDownClass() def setUp(self): self.listener = SessionTestListener() @@ -97,7 +97,7 @@ def setUp(self): ) self.addCleanup(self.client.close) self.db = self.client.pymongo_test - self.initial_lsids = set(s["id"] for s in session_ids(self.client)) + self.initial_lsids = {s["id"] for s in session_ids(self.client)} def tearDown(self): """All sessions used in the test must be returned to the pool.""" @@ -107,7 +107,7 @@ def tearDown(self): if "lsid" in event.command: used_lsids.add(event.command["lsid"]["id"]) - current_lsids = set(s["id"] for s in session_ids(self.client)) + current_lsids = {s["id"] for s in session_ids(self.client)} self.assertLessEqual(used_lsids, current_lsids) def _test_ops(self, client, *ops): @@ -129,13 +129,13 @@ def _test_ops(self, client, *ops): for event in listener.started_events: self.assertTrue( "lsid" in event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) self.assertEqual( s.session_id, event.command["lsid"], - "%s sent wrong lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent wrong lsid with {event.command_name}", ) self.assertFalse(s.has_ended) @@ -164,7 +164,7 @@ def _test_ops(self, client, *ops): for event in listener.started_events: self.assertTrue( "lsid" in event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) lsids.append(event.command["lsid"]) @@ -176,7 +176,7 @@ def _test_ops(self, client, *ops): self.assertIn( lsid, session_ids(client), - "%s did not return implicit session to pool" % (f.__name__,), + f"{f.__name__} did not return implicit session to pool", ) def test_implicit_sessions_checkout(self): @@ -405,13 +405,13 @@ def test_cursor(self): for event in listener.started_events: self.assertTrue( "lsid" in event.command, - "%s sent no lsid with %s" % (name, event.command_name), + f"{name} sent no lsid with {event.command_name}", ) self.assertEqual( s.session_id, event.command["lsid"], - "%s sent wrong lsid with %s" % (name, event.command_name), + f"{name} sent wrong lsid with {event.command_name}", ) with self.assertRaisesRegex(InvalidOperation, "ended session"): @@ -423,20 +423,20 @@ def test_cursor(self): f(session=None) event0 = listener.first_command_started() self.assertTrue( - "lsid" in event0.command, "%s sent no lsid with %s" % (name, event0.command_name) + "lsid" in event0.command, f"{name} sent no lsid with {event0.command_name}" ) lsid = event0.command["lsid"] for event in listener.started_events[1:]: self.assertTrue( - "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) + "lsid" in event.command, f"{name} sent no lsid with {event.command_name}" ) self.assertEqual( lsid, event.command["lsid"], - "%s sent wrong lsid with %s" % (name, event.command_name), + f"{name} sent wrong lsid with {event.command_name}", ) def test_gridfs(self): @@ -693,7 +693,7 @@ def _test_unacknowledged_ops(self, client, *ops): kw = copy.copy(kw) kw["session"] = s with self.assertRaises( - ConfigurationError, msg="%s did not raise ConfigurationError" % (f.__name__,) + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" ): f(*args, **kw) if f.__name__ == "create_collection": @@ -703,11 +703,11 @@ def _test_unacknowledged_ops(self, client, *ops): self.assertIn( "lsid", event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) # Should not run any command before raising an error. - self.assertFalse(listener.started_events, "%s sent command" % (f.__name__,)) + self.assertFalse(listener.started_events, f"{f.__name__} sent command") self.assertTrue(s.has_ended) @@ -724,12 +724,12 @@ def _test_unacknowledged_ops(self, client, *ops): self.assertIn( "lsid", event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) for event in listener.started_events: self.assertNotIn( - "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" ) def test_unacknowledged_writes(self): @@ -792,7 +792,7 @@ def tearDownClass(cls): @client_context.require_sessions def setUp(self): - super(TestCausalConsistency, self).setUp() + super().setUp() @client_context.require_no_standalone def test_core(self): @@ -1072,7 +1072,7 @@ def test_cluster_time_no_server_support(self): class TestClusterTime(IntegrationTest): def setUp(self): - super(TestClusterTime, self).setUp() + super().setUp() if "$clusterTime" not in client_context.hello: raise SkipTest("$clusterTime not supported") @@ -1128,7 +1128,7 @@ def insert_and_aggregate(): ("rename_and_drop", rename_and_drop), ] - for name, f in ops: + for _name, f in ops: listener.reset() # Call f() twice, insert to advance clusterTime, call f() again. f() @@ -1140,21 +1140,20 @@ def insert_and_aggregate(): for i, event in enumerate(listener.started_events): self.assertTrue( "$clusterTime" in event.command, - "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no $clusterTime with {event.command_name}", ) if i > 0: succeeded = listener.succeeded_events[i - 1] self.assertTrue( "$clusterTime" in succeeded.reply, - "%s received no $clusterTime with %s" - % (f.__name__, succeeded.command_name), + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", ) self.assertTrue( event.command["$clusterTime"]["clusterTime"] >= succeeded.reply["$clusterTime"]["clusterTime"], - "%s sent wrong $clusterTime with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", ) diff --git a/test/test_son.py b/test/test_son.py index 5c1f43594d..5e62ffb176 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -47,7 +47,7 @@ def test_equality(self): self.assertEqual(a1, SON({"hello": "world"})) self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) - self.assertEqual(b2, dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) + self.assertEqual(b2, {"hello_": "mike", "mike": "awesome", "hello": "world"}) self.assertNotEqual(a1, b2) self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) @@ -55,7 +55,7 @@ def test_equality(self): # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) - self.assertFalse(b2 != dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) + self.assertFalse(b2 != {"hello_": "mike", "mike": "awesome", "hello": "world"}) # Embedded SON. d4 = SON([("blah", {"foo": SON()})]) @@ -97,10 +97,10 @@ def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo # version 2.1.1 pickled_with_2_1_1 = ( - "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" - "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb." - ).encode("utf8") + b"ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" + b"c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" + b"S'_SON__keys'\np7\n(lp8\nsb." + ) son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) @@ -138,18 +138,14 @@ def test_copying(self): self.assertEqual(id(reflexive_son1), id(reflexive_son1["reflexive"])) def test_iteration(self): - """ - Test __iter__ - """ + """Test __iter__""" # test success case test_son = SON([(1, 100), (2, 200), (3, 300)]) for ele in test_son: self.assertEqual(ele * 100, test_son[ele]) def test_contains_has(self): - """ - has_key and __contains__ - """ + """has_key and __contains__""" test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertIn(1, test_son) self.assertTrue(2 in test_son, "in failed") @@ -158,9 +154,7 @@ def test_contains_has(self): self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") # noqa def test_clears(self): - """ - Test clear() - """ + """Test clear()""" test_son = SON([(1, 100), (2, 200), (3, 300)]) test_son.clear() self.assertNotIn(1, test_son) @@ -169,9 +163,7 @@ def test_clears(self): self.assertEqual({}, test_son.to_dict()) def test_len(self): - """ - Test len - """ + """Test len""" test_son = SON() self.assertEqual(0, len(test_son)) test_son = SON([(1, 100), (2, 200), (3, 300)]) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 7a6c61ad21..8bf81f4de9 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -32,7 +32,7 @@ WAIT_TIME = 0.1 -class SrvPollingKnobs(object): +class SrvPollingKnobs: def __init__( self, ttl_time=None, diff --git a/test/test_ssl.py b/test/test_ssl.py index bf151578cb..e6df2a1c24 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -142,7 +142,7 @@ def assertClientWorks(self, client): @classmethod @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") def setUpClass(cls): - super(TestSSL, cls).setUpClass() + super().setUpClass() # MongoClient should connect to the primary by default. cls.saved_port = MongoClient.PORT MongoClient.PORT = client_context.port @@ -150,7 +150,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): MongoClient.PORT = cls.saved_port - super(TestSSL, cls).tearDownClass() + super().tearDownClass() @client_context.require_tls def test_simple_ssl(self): diff --git a/test/test_threads.py b/test/test_threads.py index 899392e1a0..b948bf9249 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -111,7 +111,7 @@ def test_threading(self): self.db.test.insert_many([{"x": i} for i in range(1000)]) threads = [] - for i in range(10): + for _i in range(10): t = SaveAndFind(self.db.test) t.start() threads.append(t) diff --git a/test/test_topology.py b/test/test_topology.py index e09d7c3691..adbf19f571 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -89,7 +89,7 @@ class TopologyTest(unittest.TestCase): """Disables periodic monitoring, to make tests deterministic.""" def setUp(self): - super(TopologyTest, self).setUp() + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=999999) self.client_knobs.enable() self.addCleanup(self.client_knobs.disable) @@ -647,13 +647,13 @@ def test_topology_repr(self): ) self.assertEqual( repr(t.description), - ", " ", " "]>" % (t._topology_id,), + " rtt: None>]>".format(t._topology_id), ) def test_unexpected_load_balancer(self): @@ -734,7 +734,7 @@ def _check_with_socket(self, *args, **kwargs): if hello_count[0] in (1, 3): return Hello({"ok": 1, "maxWireVersion": 6}), 0 else: - raise AutoReconnect("mock monitor error #%s" % (hello_count[0],)) + raise AutoReconnect(f"mock monitor error #{hello_count[0]}") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) diff --git a/test/test_transactions.py b/test/test_transactions.py index dc58beb930..9b51927d67 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -63,19 +63,19 @@ class TransactionsBase(SpecRunner): @classmethod def setUpClass(cls): - super(TransactionsBase, cls).setUpClass() + super().setUpClass() if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(single_client("%s:%s" % address)) + cls.mongos_clients.append(single_client("{}:{}".format(*address))) @classmethod def tearDownClass(cls): for client in cls.mongos_clients: client.close() - super(TransactionsBase, cls).tearDownClass() + super().tearDownClass() def maybe_skip_scenario(self, test): - super(TransactionsBase, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) if ( "secondary" in self.id() and not client_context.is_mongos @@ -390,7 +390,7 @@ def test_transaction_direct_connection(self): list(res) -class PatchSessionTimeout(object): +class PatchSessionTimeout: """Patches the client_session's with_transaction timeout for testing.""" def __init__(self, mock_timeout): @@ -416,7 +416,7 @@ class _MyException(Exception): pass def raise_error(_): - raise _MyException() + raise _MyException with self.client.start_session() as s: with self.assertRaises(_MyException): diff --git a/test/test_typing.py b/test/test_typing.py index 0aebc707cd..27597bb2c8 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -13,7 +13,8 @@ # limitations under the License. """Test that each file in mypy_fails/ actually fails mypy, and test some -sample client code that uses PyMongo typings.""" +sample client code that uses PyMongo typings. +""" import os import sys import tempfile @@ -39,7 +40,7 @@ class ImplicitMovie(TypedDict): name: str year: int -except ImportError as exc: +except ImportError: Movie = dict # type:ignore[misc,assignment] ImplicitMovie = dict # type: ignore[assignment,misc] MovieWithId = dict # type: ignore[assignment,misc] @@ -164,12 +165,12 @@ def test_bulk_write_heterogeneous(self): def test_command(self) -> None: result: Dict = self.client.admin.command("ping") - items = result.items() + result.items() def test_list_collections(self) -> None: cursor = self.client.test.list_collections() value = cursor.next() - items = value.items() + value.items() def test_list_databases(self) -> None: cursor = self.client.list_databases() @@ -237,7 +238,7 @@ def foo(self): assert rt_document2.foo() == "bar" codec_options2 = CodecOptions(document_class=RawBSONDocument) - bsonbytes3 = encode(doc, codec_options=codec_options2) + encode(doc, codec_options=codec_options2) rt_document3 = decode(bsonbytes2, codec_options=codec_options2) assert rt_document3.raw @@ -463,7 +464,7 @@ def test_son_document_type(self) -> None: retrieved["a"] = 1 def test_son_document_type_runtime(self) -> None: - client = MongoClient(document_class=SON[str, Any], connect=False) + MongoClient(document_class=SON[str, Any], connect=False) @only_type_check def test_create_index(self) -> None: diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 2f81e3b512..e2dd17ec26 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -508,7 +508,7 @@ def test_redact_AWS_SESSION_TOKEN(self): def test_special_chars(self): user = "user@ /9+:?~!$&'()*+,;=" pwd = "pwd@ /9+:?~!$&'()*+,;=" - uri = "mongodb://%s:%s@localhost" % (quote_plus(user), quote_plus(pwd)) + uri = f"mongodb://{quote_plus(user)}:{quote_plus(pwd)}@localhost" res = parse_uri(uri) self.assertEqual(user, res["username"]) self.assertEqual(pwd, res["password"]) diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index d12abf3b91..5b68c80401 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -13,7 +13,8 @@ # limitations under the License. """Test that the pymongo.uri_parser module is compliant with the connection -string and uri options specifications.""" +string and uri options specifications. +""" import json import os @@ -73,7 +74,7 @@ def setUp(self): def get_error_message_template(expected, artefact): - return "%s %s for test '%s'" % ("Expected" if expected else "Unexpected", artefact, "%s") + return "{} {} for test '{}'".format("Expected" if expected else "Unexpected", artefact, "%s") def run_scenario_in_dir(target_workdir): @@ -133,13 +134,15 @@ def run_scenario(self): for exp, actual in zip(test["hosts"], options["nodelist"]): self.assertEqual( - exp["host"], actual[0], "Expected host %s but got %s" % (exp["host"], actual[0]) + exp["host"], + actual[0], + "Expected host {} but got {}".format(exp["host"], actual[0]), ) if exp["port"] is not None: self.assertEqual( exp["port"], actual[1], - "Expected port %s but got %s" % (exp["port"], actual), + "Expected port {} but got {}".format(exp["port"], actual), ) # Compare auth options. @@ -157,7 +160,7 @@ def run_scenario(self): self.assertEqual( auth[elm], options[elm], - "Expected %s but got %s" % (auth[elm], options[elm]), + f"Expected {auth[elm]} but got {options[elm]}", ) # Compare URI options. @@ -183,7 +186,7 @@ def run_scenario(self): ), ) else: - self.fail("Missing expected option %s" % (opt,)) + self.fail(f"Missing expected option {opt}") return run_scenario_in_dir(test_workdir)(run_scenario) @@ -209,7 +212,7 @@ def create_tests(test_path): continue testmethod = create_test(testcase, dirpath) - testname = "test_%s_%s_%s" % ( + testname = "test_{}_{}_{}".format( dirname, os.path.splitext(filename)[0], str(dsc).replace(" ", "_"), diff --git a/test/test_write_concern.py b/test/test_write_concern.py index 02c562a348..822f3a4d1d 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -40,7 +40,7 @@ def test_equality_to_none(self): self.assertTrue(concern != None) # noqa def test_equality_compatible_type(self): - class _FakeWriteConcern(object): + class _FakeWriteConcern: def __init__(self, **document): self.document = document diff --git a/test/unified_format.py b/test/unified_format.py index 584ee04ddd..90cb442b28 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -131,7 +131,7 @@ # Build up a placeholder map. -PLACEHOLDER_MAP = dict() +PLACEHOLDER_MAP = {} for (provider_name, provider_data) in [ ("local", {"key": LOCAL_MASTER_KEY}), ("aws", AWS_CREDS), @@ -257,7 +257,7 @@ def parse_bulk_write_error_result(error): return parse_bulk_write_result(write_result) -class NonLazyCursor(object): +class NonLazyCursor: """A find cursor proxy that creates the remote cursor when initialized.""" def __init__(self, find_cursor, client): @@ -289,7 +289,7 @@ class EventListenerUtil(CMAPListener, CommandListener, ServerListener): def __init__( self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map ): - self._event_types = set(name.lower() for name in observe_events) + self._event_types = {name.lower() for name in observe_events} if observe_sensitive_commands: self._observe_sensitive_commands = True self._ignore_commands = set(ignore_commands) @@ -306,7 +306,7 @@ def __init__( for i in events: self._event_mapping[i].append(id) self.entity_map[id] = [] - super(EventListenerUtil, self).__init__() + super().__init__() def get_events(self, event_type): assert event_type in ("command", "cmap", "sdam", "all"), event_type @@ -321,7 +321,7 @@ def get_events(self, event_type): def add_event(self, event): event_name = type(event).__name__.lower() if event_name in self._event_types: - super(EventListenerUtil, self).add_event(event) + super().add_event(event) for id in self._event_mapping[event_name]: self.entity_map[id].append( { @@ -332,7 +332,7 @@ def add_event(self, event): ) def _command_event(self, event): - if not event.command_name.lower() in self._ignore_commands: + if event.command_name.lower() not in self._ignore_commands: self.add_event(event) def started(self, event): @@ -364,9 +364,10 @@ def closed(self, event: ServerClosedEvent) -> None: self.add_event(event) -class EntityMapUtil(object): +class EntityMapUtil: """Utility class that implements an entity map as per the unified - test format specification.""" + test format specification. + """ def __init__(self, test_class): self._entities: Dict[str, Any] = {} @@ -384,14 +385,14 @@ def __getitem__(self, item): try: return self._entities[item] except KeyError: - self.test.fail("Could not find entity named %s in map" % (item,)) + self.test.fail(f"Could not find entity named {item} in map") def __setitem__(self, key, value): if not isinstance(key, str): self.test.fail("Expected entity name of type str, got %s" % (type(key))) if key in self._entities: - self.test.fail("Entity named %s already in map" % (key,)) + self.test.fail(f"Entity named {key} already in map") self._entities[key] = value @@ -410,9 +411,7 @@ def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: - self.test.fail( - "Entity spec %s did not contain exactly one top-level key" % (entity_spec,) - ) + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") entity_type, spec = next(iter(entity_spec.items())) spec = self._handle_placeholders(spec, spec, "") @@ -454,8 +453,9 @@ def _create_entity(self, entity_spec, uri=None): client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" - % (spec["client"], type(client)) + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) ) options = parse_collection_or_database_options(spec.get("databaseOptions", {})) self[spec["id"]] = client.get_database(spec["databaseName"], **options) @@ -464,8 +464,9 @@ def _create_entity(self, entity_spec, uri=None): database = self[spec["database"]] if not isinstance(database, Database): self.test.fail( - "Expected entity %s to be of type Database, got %s" - % (spec["database"], type(database)) + "Expected entity {} to be of type Database, got {}".format( + spec["database"], type(database) + ) ) options = parse_collection_or_database_options(spec.get("collectionOptions", {})) self[spec["id"]] = database.get_collection(spec["collectionName"], **options) @@ -474,8 +475,9 @@ def _create_entity(self, entity_spec, uri=None): client = self[spec["client"]] if not isinstance(client, MongoClient): self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" - % (spec["client"], type(client)) + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) ) opts = camel_to_snake_args(spec.get("sessionOptions", {})) if "default_transaction_options" in opts: @@ -522,7 +524,7 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: self[name] = thread return - self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) + self.test.fail(f"Unable to create entity of unknown type {entity_type}") def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: @@ -532,12 +534,12 @@ def get_listener_for_client(self, client_name: str) -> EventListenerUtil: client = self[client_name] if not isinstance(client, MongoClient): self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" % (client_name, type(client)) + f"Expected entity {client_name} to be of type MongoClient, got {type(client)}" ) listener = self._listeners.get(client_name) if not listener: - self.test.fail("No listeners configured for client %s" % (client_name,)) + self.test.fail(f"No listeners configured for client {client_name}") return listener @@ -545,8 +547,7 @@ def get_lsid_for_session(self, session_name): session = self[session_name] if not isinstance(session, ClientSession): self.test.fail( - "Expected entity %s to be of type ClientSession, got %s" - % (session_name, type(session)) + f"Expected entity {session_name} to be of type ClientSession, got {type(session)}" ) try: @@ -587,9 +588,10 @@ def get_lsid_for_session(self, session_name): } -class MatchEvaluatorUtil(object): +class MatchEvaluatorUtil: """Utility class that implements methods for evaluating matches as per - the unified test format specification.""" + the unified test format specification. + """ def __init__(self, test_class): self.test = test_class @@ -606,11 +608,11 @@ def _operation_exists(self, spec, actual, key_to_compare): else: self.test.assertNotIn(key_to_compare, actual) else: - self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) + self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") def __type_alias_to_type(self, alias): if alias not in BSON_TYPE_ALIAS_MAP: - self.test.fail("Unrecognized BSON type alias %s" % (alias,)) + self.test.fail(f"Unrecognized BSON type alias {alias}") return BSON_TYPE_ALIAS_MAP[alias] def _operation_type(self, spec, actual, key_to_compare): @@ -653,11 +655,11 @@ def _operation_lte(self, spec, actual, key_to_compare): self.test.assertLessEqual(actual[key_to_compare], spec) def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): - method_name = "_operation_%s" % (opname.strip("$"),) + method_name = "_operation_{}".format(opname.strip("$")) try: method = getattr(self, method_name) except AttributeError: - self.test.fail("Unsupported special matching operator %s" % (opname,)) + self.test.fail(f"Unsupported special matching operator {opname}") else: method(spec, actual, key_to_compare) @@ -668,7 +670,8 @@ def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=Non If given, ``key_to_compare`` is assumed to be the key in ``expectation`` whose corresponding value needs to be evaluated for a possible special operation. ``key_to_compare`` - is ignored when ``expectation`` has only one key.""" + is ignored when ``expectation`` has only one key. + """ if not isinstance(expectation, abc.Mapping): return False @@ -730,14 +733,16 @@ def match_result(self, expectation, actual, in_recursive_call=False): self._match_document(e, a, is_root=not in_recursive_call) else: self.match_result(e, a, in_recursive_call=True) - return + return None # account for flexible numerics in element-wise comparison if isinstance(expectation, int) or isinstance(expectation, float): self.test.assertEqual(expectation, actual) + return None else: self.test.assertIsInstance(actual, type(expectation)) self.test.assertEqual(expectation, actual) + return None def assertHasServiceId(self, spec, actual): if "hasServiceId" in spec: @@ -828,7 +833,7 @@ def match_event(self, event_type, expectation, actual): if "newDescription" in spec: self.match_server_description(actual.new_description, spec["newDescription"]) else: - raise Exception("Unsupported event type %s" % (name,)) + raise Exception(f"Unsupported event type {name}") def coerce_result(opname, result): @@ -840,7 +845,7 @@ def coerce_result(opname, result): if opname == "insertOne": return {"insertedId": result.inserted_id} if opname == "insertMany": - return {idx: _id for idx, _id in enumerate(result.inserted_ids)} + return dict(enumerate(result.inserted_ids)) if opname in ("deleteOne", "deleteMany"): return {"deletedCount": result.deleted_count} if opname in ("updateOne", "updateMany", "replaceOne"): @@ -904,11 +909,11 @@ def insert_initial_data(self, initial_data): @classmethod def setUpClass(cls): # super call creates internal client cls.client - super(UnifiedSpecTestMixinV1, cls).setUpClass() + super().setUpClass() # process file-level runOnRequirements run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) if not cls.should_run_on(run_on_spec): - raise unittest.SkipTest("%s runOnRequirements not satisfied" % (cls.__name__,)) + raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here if client_context.storage_engine == "mmapv1": @@ -916,7 +921,7 @@ def setUpClass(cls): raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") def setUp(self): - super(UnifiedSpecTestMixinV1, self).setUp() + super().setUp() # process schemaVersion # note: we check major schema version during class generation # note: we do this here because we cannot run assertions in setUpClass @@ -924,7 +929,7 @@ def setUp(self): self.assertLessEqual( version, self.SCHEMA_VERSION, - "expected schema version %s or lower, got %s" % (self.SCHEMA_VERSION, version), + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", ) # initialize internals @@ -1044,20 +1049,18 @@ def process_error(self, exception, spec): if error_labels_omit: for err_label in error_labels_omit: if exception.has_error_label(err_label): - self.fail("Exception '%s' unexpectedly had label '%s'" % (exception, err_label)) + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") if expect_result: if isinstance(exception, BulkWriteError): result = parse_bulk_write_error_result(exception) self.match_evaluator.match_result(expect_result, result) else: - self.fail( - "expectResult can only be specified with %s exceptions" % (BulkWriteError,) - ) + self.fail(f"expectResult can only be specified with {BulkWriteError} exceptions") def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail("Operation %s not supported for entity of type %s" % (opname, type(target))) + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") def __entityOperation_createChangeStream(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": @@ -1153,6 +1156,7 @@ def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): return next(target) except StopIteration: pass + return None def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor) @@ -1182,8 +1186,8 @@ def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): kwargs["master_key"] = opts.get("masterKey") data = target.rewrap_many_data_key(*args, **kwargs) if data.bulk_write_result: - return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) - return dict() + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: with target.open_download_stream(*args, **kwargs) as gout: @@ -1234,30 +1238,30 @@ def run_entity_operation(self, spec): arguments = {} if isinstance(target, MongoClient): - method_name = "_clientOperation_%s" % (opname,) + method_name = f"_clientOperation_{opname}" elif isinstance(target, Database): - method_name = "_databaseOperation_%s" % (opname,) + method_name = f"_databaseOperation_{opname}" elif isinstance(target, Collection): - method_name = "_collectionOperation_%s" % (opname,) + method_name = f"_collectionOperation_{opname}" # contentType is always stored in metadata in pymongo. if target.name.endswith(".files") and opname == "find": for doc in spec.get("expectResult", []): if "contentType" in doc: doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): - method_name = "_changeStreamOperation_%s" % (opname,) + method_name = f"_changeStreamOperation_{opname}" elif isinstance(target, NonLazyCursor): - method_name = "_cursor_%s" % (opname,) + method_name = f"_cursor_{opname}" elif isinstance(target, ClientSession): - method_name = "_sessionOperation_%s" % (opname,) + method_name = f"_sessionOperation_{opname}" elif isinstance(target, GridFSBucket): - method_name = "_bucketOperation_%s" % (opname,) + method_name = f"_bucketOperation_{opname}" if "id" in arguments: arguments["file_id"] = arguments.pop("id") # MD5 is always disabled in pymongo. arguments.pop("disable_md5", None) elif isinstance(target, ClientEncryption): - method_name = "_clientEncryptionOperation_%s" % (opname,) + method_name = f"_clientEncryptionOperation_{opname}" else: method_name = "doesNotExist" @@ -1270,7 +1274,7 @@ def run_entity_operation(self, spec): try: cmd = getattr(target, target_opname) except AttributeError: - self.fail("Unsupported operation %s on entity %s" % (opname, target)) + self.fail(f"Unsupported operation {opname} on entity {target}") else: cmd = functools.partial(method, target) @@ -1286,15 +1290,13 @@ def run_entity_operation(self, spec): # Ignore all operation errors but to avoid masking bugs don't # ignore things like TypeError and ValueError. if ignore and isinstance(exc, (PyMongoError,)): - return + return None if expect_error: return self.process_error(exc, expect_error) raise else: if expect_error: - self.fail( - 'Excepted error %s but "%s" succeeded: %s' % (expect_error, opname, result) - ) + self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') if expect_result: actual = coerce_result(opname, result) @@ -1302,6 +1304,8 @@ def run_entity_operation(self, spec): if save_as_entity: self.entity_map[save_as_entity] = result + return None + return None def __set_fail_point(self, client, command_args): if not client_context.test_commands_enabled: @@ -1324,10 +1328,10 @@ def _testOperation_targetedFailPoint(self, spec): if not session._pinned_address: self.fail( "Cannot use targetedFailPoint operation with unpinned " - "session %s" % (spec["session"],) + "session {}".format(spec["session"]) ) - client = single_client("%s:%s" % session._pinned_address) + client = single_client("{}:{}".format(*session._pinned_address)) self.addCleanup(client.close) self.__set_fail_point(client=client, command_args=spec["failPoint"]) @@ -1422,9 +1426,7 @@ def _testOperation_assertEventCount(self, spec): Assert the given event was published exactly `count` times. """ client, event, count = spec["client"], spec["event"], spec["count"] - self.assertEqual( - self._event_count(client, event), count, "expected %s not %r" % (count, event) - ) + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") def _testOperation_waitForEvent(self, spec): """Run the waitForEvent test operation. @@ -1434,7 +1436,7 @@ def _testOperation_waitForEvent(self, spec): client, event, count = spec["client"], spec["event"], spec["count"] wait_until( lambda: self._event_count(client, event) >= count, - "find %s %s event(s)" % (count, event), + f"find {count} {event} event(s)", ) def _testOperation_wait(self, spec): @@ -1485,7 +1487,7 @@ def _testOperation_waitForThread(self, spec): thread.join(10) if thread.exc: raise thread.exc - self.assertFalse(thread.is_alive(), "Thread %s is still running" % (spec["thread"],)) + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) def _testOperation_loop(self, spec): failure_key = spec.get("storeFailuresAsEntity") @@ -1527,11 +1529,11 @@ def _testOperation_loop(self, spec): def run_special_operation(self, spec): opname = spec["name"] - method_name = "_testOperation_%s" % (opname,) + method_name = f"_testOperation_{opname}" try: method = getattr(self, method_name) except AttributeError: - self.fail("Unsupported special test operation %s" % (opname,)) + self.fail(f"Unsupported special test operation {opname}") else: method(spec["arguments"]) @@ -1604,8 +1606,10 @@ def run_scenario(self, spec, uri=None): self.setUp() continue raise + return None else: self._run_scenario(spec, uri) + return None def _run_scenario(self, spec, uri=None): # maybe skip test manually @@ -1619,7 +1623,7 @@ def _run_scenario(self, spec, uri=None): # process skipReason skip_reason = spec.get("skipReason", None) if skip_reason is not None: - raise unittest.SkipTest("%s" % (skip_reason,)) + raise unittest.SkipTest(f"{skip_reason}") # process createEntities self._uri = uri @@ -1648,7 +1652,7 @@ class UnifiedSpecTestMeta(type): EXPECTED_FAILURES: Any def __init__(cls, *args, **kwargs): - super(UnifiedSpecTestMeta, cls).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def create_test(spec): def test_case(self): @@ -1658,7 +1662,9 @@ def test_case(self): for test_spec in cls.TEST_SPEC["tests"]: description = test_spec["description"] - test_name = "test_%s" % (description.strip(". ").replace(" ", "_").replace(".", "_"),) + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) test_method = create_test(copy.deepcopy(test_spec)) test_method.__name__ = str(test_name) @@ -1690,13 +1696,15 @@ def generate_test_classes( **kwargs, ): """Method for generating test classes. Returns a dictionary where keys are - the names of test classes and values are the test class objects.""" + the names of test classes and values are the test class objects. + """ test_klasses = {} def test_base_class_factory(test_spec): """Utility that creates the base class to use for test generation. This is needed to ensure that cls.TEST_SPEC is appropriately set when - the metaclass __init__ is invoked.""" + the metaclass __init__ is invoked. + """ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec @@ -1716,7 +1724,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) test_type = os.path.splitext(filename)[0] - snake_class_name = "Test%s_%s_%s" % ( + snake_class_name = "Test{}_{}_{}".format( class_name_prefix, dirname.replace("-", "_"), test_type.replace("-", "_").replace(".", "_"), @@ -1728,8 +1736,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) if mixin_class is None: raise ValueError( - "test file '%s' has unsupported schemaVersion '%s'" - % (fpath, schema_version) + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" ) module_dict = {"__module__": module} module_dict.update(kwargs) diff --git a/test/utils.py b/test/utils.py index b39375925c..810a02b872 100644 --- a/test/utils.py +++ b/test/utils.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for testing pymongo -""" +"""Utilities for testing pymongo""" import contextlib import copy @@ -65,7 +64,7 @@ IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) -class BaseListener(object): +class BaseListener: def __init__(self): self.events = [] @@ -91,7 +90,7 @@ def matching(self, matcher): def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" - wait_until(lambda: self.event_count(event) >= count, "find %s %s event(s)" % (count, event)) + wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): @@ -142,7 +141,7 @@ def pool_closed(self, event): class EventListener(BaseListener, monitoring.CommandListener): def __init__(self): - super(EventListener, self).__init__() + super().__init__() self.results = defaultdict(list) @property @@ -176,7 +175,7 @@ def started_command_names(self) -> List[str]: def reset(self) -> None: """Reset the state of this listener.""" self.results.clear() - super(EventListener, self).reset() + super().reset() class TopologyEventListener(monitoring.TopologyListener): @@ -200,19 +199,19 @@ def reset(self): class AllowListEventListener(EventListener): def __init__(self, *commands): self.commands = set(commands) - super(AllowListEventListener, self).__init__() + super().__init__() def started(self, event): if event.command_name in self.commands: - super(AllowListEventListener, self).started(event) + super().started(event) def succeeded(self, event): if event.command_name in self.commands: - super(AllowListEventListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if event.command_name in self.commands: - super(AllowListEventListener, self).failed(event) + super().failed(event) class OvertCommandListener(EventListener): @@ -222,18 +221,18 @@ class OvertCommandListener(EventListener): def started(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).started(event) + super().started(event) def succeeded(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).failed(event) + super().failed(event) -class _ServerEventListener(object): +class _ServerEventListener: """Listens to all events.""" def __init__(self): @@ -280,7 +279,7 @@ def failed(self, event): self.add_event(event) -class MockSocketInfo(object): +class MockSocketInfo: def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False @@ -295,7 +294,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): pass -class MockPool(object): +class MockPool: def __init__(self, address, options, handshake=True): self.gen = _PoolGeneration() self._lock = _create_lock() @@ -357,7 +356,7 @@ def __getitem__(self, item): return ScenarioDict({}) -class CompareType(object): +class CompareType: """Class that compares equal to any object of the given type(s).""" def __init__(self, types): @@ -367,7 +366,7 @@ def __eq__(self, other): return isinstance(other, self.types) -class FunctionCallRecorder(object): +class FunctionCallRecorder: """Utility class to wrap a callable and record its invocations.""" def __init__(self, function): @@ -392,7 +391,7 @@ def call_count(self): return len(self._call_list) -class TestCreator(object): +class TestCreator: """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): @@ -415,7 +414,8 @@ def __init__(self, create_test, test_class, test_path): def _ensure_min_max_server_version(self, scenario_def, method): """Test modifier that enforces a version range for the server on a - test case.""" + test case. + """ if "minServerVersion" in scenario_def: min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) if min_ver is not None: @@ -524,7 +524,7 @@ def create_tests(self): # Construct test from scenario. for test_def in self.tests(scenario_def): - test_name = "test_%s_%s_%s" % ( + test_name = "test_{}_{}_{}".format( dirname, test_type.replace("-", "_").replace(".", "_"), str(test_def["description"].replace(" ", "_").replace(".", "_")), @@ -539,9 +539,9 @@ def create_tests(self): def _connection_string(h): - if h.startswith("mongodb://") or h.startswith("mongodb+srv://"): + if h.startswith(("mongodb://", "mongodb+srv://")): return h - return "mongodb://%s" % (str(h),) + return f"mongodb://{str(h)}" def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): @@ -620,7 +620,7 @@ def ensure_all_connected(client: MongoClient) -> None: raise ConfigurationError("cluster is not a replica set") target_host_list = set(hello["hosts"] + hello.get("passives", [])) - connected_host_list = set([hello["me"]]) + connected_host_list = {hello["me"]} # Run hello until we have connected to each host at least once. def discover(): @@ -821,7 +821,7 @@ def assertRaisesExactly(cls, fn, *args, **kwargs): try: fn(*args, **kwargs) except Exception as e: - assert e.__class__ == cls, "got %s, expected %s" % (e.__class__.__name__, cls.__name__) + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" else: raise AssertionError("%s not raised" % cls) @@ -848,7 +848,7 @@ def wrapper(*args, **kwargs): return _ignore_deprecations() -class DeprecationFilter(object): +class DeprecationFilter: def __init__(self, action="ignore"): """Start filtering deprecations.""" self.warn_context = warnings.catch_warnings() @@ -922,7 +922,7 @@ def lazy_client_trial(reset, target, test, get_client): collection = client_context.client.pymongo_test.test with frequent_thread_switches(): - for i in range(NTRIALS): + for _i in range(NTRIALS): reset(collection) lazy_client = get_client() lazy_collection = lazy_client.pymongo_test.test @@ -972,11 +972,11 @@ class ExceptionCatchingThread(threading.Thread): def __init__(self, *args, **kwargs): self.exc = None - super(ExceptionCatchingThread, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def run(self): try: - super(ExceptionCatchingThread, self).run() + super().run() except BaseException as exc: self.exc = exc raise @@ -1147,6 +1147,6 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac elif cursor_type == "tailableAwait": arguments["cursor_type"] = CursorType.TAILABLE else: - assert False, f"Unsupported cursorType: {cursor_type}" + raise AssertionError(f"Unsupported cursorType: {cursor_type}") else: arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index e693fc25f0..ccb3897966 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -109,9 +109,11 @@ def get_topology_type_name(scenario_def): def get_topology_settings_dict(**kwargs): - settings = dict( - monitor_class=DummyMonitor, heartbeat_frequency=HEARTBEAT_FREQUENCY, pool_class=MockPool - ) + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": MockPool, + } settings.update(kwargs) return settings @@ -255,7 +257,7 @@ class TestAllScenarios(unittest.TestCase): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 6530f39da6..4ca6f1cc58 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -49,7 +49,7 @@ class SpecRunnerThread(threading.Thread): def __init__(self, name): - super(SpecRunnerThread, self).__init__() + super().__init__() self.name = name self.exc = None self.daemon = True @@ -88,7 +88,7 @@ class SpecRunner(IntegrationTest): @classmethod def setUpClass(cls): - super(SpecRunner, cls).setUpClass() + super().setUpClass() cls.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. @@ -98,10 +98,10 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.knobs.disable() - super(SpecRunner, cls).tearDownClass() + super().tearDownClass() def setUp(self): - super(SpecRunner, self).setUp() + super().setUp() self.targets = {} self.listener = None # type: ignore self.pool_listener = None @@ -170,7 +170,7 @@ def assertErrorLabelsContain(self, exc, expected_labels): def assertErrorLabelsOmit(self, exc, omit_labels): for label in omit_labels: self.assertFalse( - exc.has_error_label(label), msg="error labels should not contain %s" % (label,) + exc.has_error_label(label), msg=f"error labels should not contain {label}" ) def kill_all_sessions(self): @@ -242,6 +242,7 @@ def _helper(expected_result, result): self.assertEqual(expected_result, result) _helper(expected_result, result) + return None def get_object_name(self, op): """Allow subclasses to override handling of 'object' @@ -335,7 +336,7 @@ def _run_op(self, sessions, collection, op, in_with_transaction): expected_result = op.get("result") if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: - out = self.run_operation(sessions, collection, op.copy()) + self.run_operation(sessions, collection, op.copy()) exc = context.exception if expect_error_message(expected_result): if isinstance(exc, BulkWriteError): @@ -425,9 +426,9 @@ def check_events(self, test, listener, session_ids): for key, val in expected.items(): if val is None: if key in actual: - self.fail("Unexpected key [%s] in %r" % (key, actual)) + self.fail(f"Unexpected key [{key}] in {actual!r}") elif key not in actual: - self.fail("Expected key [%s] in %r" % (key, actual)) + self.fail(f"Expected key [{key}] in {actual!r}") else: # Workaround an incorrect command started event in fle2v2-CreateCollection.yml # added in DRIVERS-2524. @@ -436,7 +437,7 @@ def check_events(self, test, listener, session_ids): if val.get(n) is None: val.pop(n, None) self.assertEqual( - val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" ) else: self.assertEqual(actual, expected) @@ -459,7 +460,8 @@ def get_outcome_coll_name(self, outcome, collection): def run_test_ops(self, sessions, collection, test): """Added to allow retryable writes spec to override a test's - operation.""" + operation. + """ self.run_operations(sessions, collection, test["operations"]) def parse_client_options(self, opts): diff --git a/test/version.py b/test/version.py index e102db7111..1dd1bec5f9 100644 --- a/test/version.py +++ b/test/version.py @@ -18,7 +18,7 @@ class Version(tuple): def __new__(cls, *version): padded_version = cls._padded(version, 4) - return super(Version, cls).__new__(cls, tuple(padded_version)) + return super().__new__(cls, tuple(padded_version)) @classmethod def _padded(cls, iter, length, padding=0): From bc1a513d1041fceabc8cb1e8372bcb0807343813 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 11 May 2023 15:29:43 -0700 Subject: [PATCH 0905/2111] PYTHON-2504 Add pyupgrade/ruff commit to git-blame ignore --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 8f02673e41..67ad992c75 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,2 +1,4 @@ # Initial pre-commit reformat 5578999a90e439fbca06fc0ffc98f4d04e96f7b4 +# pyupgrade and ruff +0092b0af79378abf35b6db73a082ecb91af1d973 From 0123d32a20ced822c711c1312785e1419ace365f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 12 May 2023 12:17:40 -0700 Subject: [PATCH 0906/2111] PYTHON-3709 Remove "beta" from Queryable Encryption Equality API (#1210) --- doc/examples/encryption.rst | 17 ++++++++-------- pymongo/encryption.py | 37 ++++++++++++----------------------- pymongo/encryption_options.py | 8 ++------ pymongo/errors.py | 3 --- 4 files changed, 22 insertions(+), 43 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 2823d3f9bc..52fc548285 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -605,18 +605,17 @@ Queryable Encryption .. _automatic-queryable-client-side-encryption: -Automatic Queryable Encryption (Beta) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Automatic Queryable Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. +Automatic Queryable Encryption requires MongoDB 7.0+ Enterprise or a MongoDB 7.0+ Atlas cluster. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, which are further processed server-side. -You must have MongoDB 7.0 Enterprise to preview the capability. - -Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: +Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, +as demonstrated by the following example:: import os from bson.codec_options import CodecOptions @@ -669,10 +668,10 @@ Automatic encryption in Queryable Encryption is configured with an ``encrypted_f In the above example, the ``firstName`` and ``lastName`` fields are automatically encrypted and decrypted. -Explicit Queryable Encryption (Beta) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Explicit Queryable Encryption +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -PyMongo 4.4 brings beta support for Queryable Encryption with MongoDB >=7.0. +Explicit Queryable Encryption requires MongoDB 7.0+. Queryable Encryption is the second version of Client-Side Field Level Encryption. Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index f2eb71ce71..3e6163f80f 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -417,17 +417,11 @@ class Algorithm(str, enum.Enum): INDEXED = "Indexed" """Indexed. - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - .. versionadded:: 4.2 """ UNINDEXED = "Unindexed" """Unindexed. - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - .. versionadded:: 4.2 """ RANGEPREVIEW = "RangePreview" @@ -441,10 +435,7 @@ class Algorithm(str, enum.Enum): class QueryType(str, enum.Enum): - """**(BETA)** An enum that defines the supported values for explicit encryption query_type. - - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. + """An enum that defines the supported values for explicit encryption query_type. .. versionadded:: 4.2 """ @@ -453,7 +444,11 @@ class QueryType(str, enum.Enum): """Used to encrypt a value for an equality query.""" RANGEPREVIEW = "rangePreview" - """Used to encrypt a value for a range query.""" + """Used to encrypt a value for a range query. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. +""" class ClientEncryption(Generic[_DocumentType]): @@ -577,9 +572,6 @@ def create_encrypted_collection( ) -> Tuple[Collection[_DocumentType], Mapping[str, Any]]: """Create a collection with encryptedFields. - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - .. warning:: This function does not update the encryptedFieldsMap in the client's AutoEncryptionOpts, thus the user must create a new client after calling this function with @@ -592,7 +584,7 @@ def create_encrypted_collection( :Parameters: - `name`: the name of the collection to create - - `encrypted_fields` (dict): **(BETA)** Document that describes the encrypted fields for + - `encrypted_fields` (dict): Document that describes the encrypted fields for Queryable Encryption. For example:: { @@ -801,23 +793,18 @@ def encrypt( :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): **(BETA)** The query type to execute. See - :class:`QueryType` for valid options. - - `contention_factor` (int): **(BETA)** The contention factor to use + - `query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + - `contention_factor` (int): The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - `range_opts`: Experimental only, not intended for public use. - .. note:: `query_type`, and `contention_factor` are part of the Queryable Encryption beta. - Backwards-breaking changes may be made before the final release. - :Returns: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. .. versionchanged:: 4.2 Added the `query_type` and `contention_factor` parameters. - """ return self._encrypt_helper( value=value, @@ -846,16 +833,16 @@ def encrypt_expression( provided. :Parameters: - - `expression`: **(BETA)** The BSON aggregate or match expression to encrypt. + - `expression`: The BSON aggregate or match expression to encrypt. - `algorithm` (string): The encryption algorithm to use. See :class:`Algorithm` for some valid options. - `key_id`: Identifies a data key by ``_id`` which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): **(BETA)** The query type to execute. See + - `query_type` (str): The query type to execute. See :class:`QueryType` for valid options. - - `contention_factor` (int): **(BETA)** The contention factor to use + - `contention_factor` (int): The contention factor to use when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index e87d96b31a..285b082a7d 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -148,11 +148,11 @@ def __init__( - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is unable to load the crypt_shared library. - - `bypass_query_analysis` (optional): **(BETA)** If ``True``, disable automatic analysis + - `bypass_query_analysis` (optional): If ``True``, disable automatic analysis of outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. - - `encrypted_fields_map`: **(BETA)** Map of collection namespace ("db.coll") to documents + - `encrypted_fields_map`: Map of collection namespace ("db.coll") to documents that described the encrypted fields for Queryable Encryption. For example:: { @@ -175,10 +175,6 @@ def __init__( } } - .. note:: `bypass_query_analysis` and `encrypted_fields_map` are part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the - final release. - .. versionchanged:: 4.2 Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` parameters. diff --git a/pymongo/errors.py b/pymongo/errors.py index 36f97f4b5a..e7aef90552 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -356,9 +356,6 @@ def timeout(self) -> bool: class EncryptedCollectionError(EncryptionError): """Raised when creating a collection with encrypted_fields fails. - .. note:: EncryptedCollectionError and `create_encrypted_collection` are both part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the final release. - .. versionadded:: 4.4 """ From 622df873aecfe87e61b35867bc4a22ba5be52733 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 May 2023 16:05:23 -0500 Subject: [PATCH 0907/2111] PYTHON-3696 Bump minimum pymongocrypt version req to >=1.6 for QEv2 (#1211) --- doc/changelog.rst | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index a0d73eb4de..3d03a6f386 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -7,7 +7,7 @@ Changes in Version 4.4 - Added support for MongoDB 7.0. - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. -- pymongocrypt 1.5.0 or later is now required for client side field level +- pymongocrypt 1.6.0 or later is now required for client side field level encryption support. - Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. diff --git a/setup.py b/setup.py index 4fa51fa314..9e8cf4b291 100755 --- a/setup.py +++ b/setup.py @@ -281,7 +281,7 @@ def build_extension(self, ext): aws_reqs = ["pymongo-auth-aws<2.0.0"] extras_require = { - "encryption": ["pymongocrypt>=1.5.0,<2.0.0"] + aws_reqs, + "encryption": ["pymongocrypt>=1.6.0,<2.0.0"] + aws_reqs, "ocsp": pyopenssl_reqs, "snappy": ["python-snappy"], "zstd": ["zstandard"], From 738048bf4e62db9b9ae8733ecd0459aeecb7895a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 May 2023 16:29:24 -0500 Subject: [PATCH 0908/2111] PYTHON-3646 Update readme for PyMongo driver (#1212) --- README.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index cc2b79d842..71d47bdc0b 100644 --- a/README.rst +++ b/README.rst @@ -24,8 +24,8 @@ Support / Feedback For issues with, questions about, or feedback for PyMongo, please look into our `support channels `_. Please do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. +questions - you're more likely to get an answer on `StackOverflow `_ +(using a "mongodb" tag). Bugs / Feature Requests ======================= @@ -192,6 +192,12 @@ Documentation can be generated by running **python setup.py doc**. Generated documentation can be found in the *doc/build/html/* directory. +Learning Resources +================== + +MongoDB Learn - `Python courses `_. +`Python Articles on Developer Center `_. + Testing ======= From 2a869b56ca42118820f047e5481debb723c13836 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 12 May 2023 16:58:10 -0500 Subject: [PATCH 0909/2111] PYTHON-3613 Improving Time-Series Scalability (#1213) --- .../timeseries-collection.json | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json index b5638fd36e..8525056fd1 100644 --- a/test/collection_management/timeseries-collection.json +++ b/test/collection_management/timeseries-collection.json @@ -250,6 +250,71 @@ ] } ] + }, + { + "description": "createCollection with bucketing options", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] } ] } From bda9e3a0bb533b6d0c6c5141feeecb77c4d31709 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 15 May 2023 06:36:36 -0500 Subject: [PATCH 0910/2111] PYTHON-3469 Error if RewrapManyDataKey is called with masterKey and without provider (#1214) --- pymongo/encryption.py | 2 ++ test/test_encryption.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 3e6163f80f..1d407fae88 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -1033,6 +1033,8 @@ def rewrap_many_data_key( .. versionadded:: 4.2 """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") self._check_closed() with _wrap_encryption_errors(): raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) diff --git a/test/test_encryption.py b/test/test_encryption.py index 95f18eb307..314b8dfbbe 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2387,6 +2387,12 @@ def run_test(self, src_provider, dst_provider): decrypt_result2 = client_encryption2.decrypt(cipher_text) self.assertEqual(decrypt_result2, "test") + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + # https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials class TestOnDemandAWSCredentials(EncryptionIntegrationTest): From 4c0196d3409286bad13d654126a6f5a226ce1430 Mon Sep 17 00:00:00 2001 From: thalassemia Date: Fri, 26 May 2023 07:40:32 -0700 Subject: [PATCH 0911/2111] PYTHON-3717 Speed up _type_marker check in BSON (#1219) --- bson/_cbsonmodule.c | 52 +++++++++++++++++++-------------------- bson/_cbsonmodule.h | 2 +- pymongo/_cmessagemodule.c | 33 +++++++++++++++---------- 3 files changed, 47 insertions(+), 40 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index e45a11be32..8e5e8b6c0c 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -55,6 +55,7 @@ struct module_state { PyObject* DatetimeMS; PyObject* _min_datetime_ms; PyObject* _max_datetime_ms; + PyObject* _type_marker_str; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -378,6 +379,9 @@ static int _load_python_objects(PyObject* module) { PyObject* compiled = NULL; struct module_state *state = GETSTATE(module); + /* Python str for faster _type_marker check */ + state->_type_marker_str = PyUnicode_FromString("_type_marker"); + if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || _load_object(&state->ObjectId, "bson.objectid", "ObjectId") || @@ -428,12 +432,12 @@ static int _load_python_objects(PyObject* module) { * * Return the type marker, 0 if there is no marker, or -1 on failure. */ -static long _type_marker(PyObject* object) { +static long _type_marker(PyObject* object, PyObject* _type_marker_str) { PyObject* type_marker = NULL; long type = 0; - if (PyObject_HasAttrString(object, "_type_marker")) { - type_marker = PyObject_GetAttrString(object, "_type_marker"); + if (PyObject_HasAttr(object, _type_marker_str)) { + type_marker = PyObject_GetAttr(object, _type_marker_str); if (type_marker == NULL) { return -1; } @@ -450,13 +454,6 @@ static long _type_marker(PyObject* object) { if (type_marker && PyLong_CheckExact(type_marker)) { type = PyLong_AsLong(type_marker); Py_DECREF(type_marker); - /* - * Py(Long|Int)_AsLong returns -1 for error but -1 is a valid value - * so we call PyErr_Occurred to differentiate. - */ - if (type == -1 && PyErr_Occurred()) { - return -1; - } } else { Py_XDECREF(type_marker); } @@ -504,13 +501,12 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr return 0; } -/* Fill out a codec_options_t* from a CodecOptions object. Use with the "O&" - * format spec in PyArg_ParseTuple. +/* Fill out a codec_options_t* from a CodecOptions object. * * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_codec_options(PyObject* options_obj, void* p) { +int convert_codec_options(PyObject* self, PyObject* options_obj, void* p) { codec_options_t* options = (codec_options_t*)p; PyObject* type_registry_obj = NULL; long type_marker; @@ -527,7 +523,8 @@ int convert_codec_options(PyObject* options_obj, void* p) { &options->datetime_conversion)) return 0; - type_marker = _type_marker(options->document_class); + type_marker = _type_marker(options->document_class, + GETSTATE(self)->_type_marker_str); if (type_marker < 0) { return 0; } @@ -730,7 +727,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, * problems with python sub interpreters. Our custom types should * have a _type_marker attribute, which we can switch on instead. */ - long type = _type_marker(value); + long type = _type_marker(value, state->_type_marker_str); if (type < 0) { return 0; } @@ -1382,7 +1379,7 @@ int write_dict(PyObject* self, buffer_t buffer, long type_marker; /* check for RawBSONDocument */ - type_marker = _type_marker(dict); + type_marker = _type_marker(dict, state->_type_marker_str); if (type_marker < 0) { return 0; } @@ -1504,18 +1501,20 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* result; unsigned char check_keys; unsigned char top_level = 1; + PyObject* options_obj; codec_options_t options; buffer_t buffer; PyObject* raw_bson_document_bytes_obj; long type_marker; - if (!PyArg_ParseTuple(args, "ObO&|b", &dict, &check_keys, - convert_codec_options, &options, &top_level)) { + if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, + &options_obj, &top_level) && + convert_codec_options(self, options_obj, &options))) { return NULL; } /* check for RawBSONDocument */ - type_marker = _type_marker(dict); + type_marker = _type_marker(dict, GETSTATE(self)->_type_marker_str); if (type_marker < 0) { destroy_codec_options(&options); return NULL; @@ -2526,6 +2525,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { /* TODO: Support buffer protocol */ char* string; PyObject* bson; + PyObject* options_obj; codec_options_t options; unsigned position; unsigned max; @@ -2535,8 +2535,9 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OIIO&p", &bson, &position, &max, - convert_codec_options, &options, &raw_array)) { + if (!(PyArg_ParseTuple(args, "OIIOp", &bson, &position, &max, + &options_obj, &raw_array) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -2638,7 +2639,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { Py_buffer view = {0}; if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && - convert_codec_options(options_obj, &options))) { + convert_codec_options(self, options_obj, &options))) { return result; } @@ -2715,10 +2716,8 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* options_obj = NULL; Py_buffer view = {0}; - if (!PyArg_ParseTuple(args, "OO", &bson, &options_obj)) { - return NULL; - } - if (!convert_codec_options(options_obj, &options)) { + if (!(PyArg_ParseTuple(args, "OO", &bson, &options_obj) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -2966,6 +2965,7 @@ static int _cbson_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->MaxKey); Py_CLEAR(GETSTATE(m)->UTC); Py_CLEAR(GETSTATE(m)->REType); + Py_CLEAR(GETSTATE(m)->_type_marker_str); return 0; } diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 6ff453b8ff..682205bd84 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -86,7 +86,7 @@ typedef struct codec_options_t { #define _cbson_convert_codec_options_INDEX 4 #define _cbson_convert_codec_options_RETURN int -#define _cbson_convert_codec_options_PROTO (PyObject* options_obj, void* p) +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, void* p) #define _cbson_destroy_codec_options_INDEX 5 #define _cbson_destroy_codec_options_RETURN void diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 2f03ce73e0..7d5e2db3cc 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -75,19 +75,21 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { int num_to_return; PyObject* query; PyObject* field_selector; + PyObject* options_obj; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; PyObject* result = NULL; - if (!PyArg_ParseTuple(args, "Iet#iiOOO&", + if (!(PyArg_ParseTuple(args, "Iet#iiOOO", &flags, "utf-8", &collection_name, &collection_name_length, &num_to_skip, &num_to_return, &query, &field_selector, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } buffer = pymongo_buffer_new(); @@ -220,6 +222,7 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { Py_ssize_t identifier_length = 0; PyObject* docs; PyObject* doc; + PyObject* options_obj; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -229,14 +232,15 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { PyObject* iterator = NULL; /*flags, command, identifier, docs, opts*/ - if (!PyArg_ParseTuple(args, "IOet#OO&", + if (!(PyArg_ParseTuple(args, "IOet#OO", &flags, &command, "utf-8", &identifier, &identifier_length, &docs, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } buffer = pymongo_buffer_new(); @@ -528,14 +532,15 @@ _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObO&O", + if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -581,14 +586,15 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "bOObO&O", + if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -850,14 +856,15 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); - if (!PyArg_ParseTuple(args, "et#bOOO&O", "utf-8", + if (!(PyArg_ParseTuple(args, "et#bOOOO", "utf-8", &ns, &ns_len, &op, &command, &docs, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { From 3bc853a6206e833e01cb00985ef4897e044b1417 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 31 May 2023 18:48:05 -0500 Subject: [PATCH 0912/2111] PYTHON-3692 [Build Failure] Container Test failed MONGODB-AWS on MongoDB 5.0 (#1220) --- .evergreen/config.yml | 45 ++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3f06fc1a03..cdc7178ba2 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -598,12 +598,14 @@ functions: - command: shell.exec type: test params: + shell: "bash" working_dir: "src" script: | ${PREPARE_SHELL} + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_regular_aws.js + . ./activate-authawsvenv.sh + python aws_tester.py regular - command: shell.exec type: test params: @@ -628,12 +630,14 @@ functions: - command: shell.exec type: test params: + shell: "bash" working_dir: "src" script: | ${PREPARE_SHELL} + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_assume_role.js + . ./activate-authawsvenv.sh + python aws_tester.py assume-role - command: shell.exec type: test params: @@ -665,15 +669,17 @@ functions: type: test params: working_dir: "src" + shell: "bash" script: | ${PREPARE_SHELL} if [ "${skip_EC2_auth_test}" = "true" ]; then echo "This platform does not support the EC2 auth test, skipping..." exit 0 fi + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_ec2.js + . ./activate-authawsvenv.sh + python aws_tester.py ec2 - command: shell.exec type: test params: @@ -694,15 +700,17 @@ functions: type: test params: working_dir: "src" + shell: "bash" script: | ${PREPARE_SHELL} if [ "${skip_EC2_auth_test}" = "true" ]; then echo "This platform does not support the web identity auth test, skipping..." exit 0 fi + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_web_identity.js + . ./activate-authawsvenv.sh + python aws_tester.py web-identity - command: shell.exec type: test params: @@ -857,6 +865,7 @@ functions: - command: shell.exec type: test params: + shell: "bash" working_dir: "src" script: | ${PREPARE_SHELL} @@ -864,14 +873,12 @@ functions: echo "This platform does not support the ECS auth test, skipping..." exit 0 fi + set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - cat < setup.js - const mongo_binaries = "$MONGODB_BINARIES"; - const project_dir = "$PROJECT_DIRECTORY"; - EOF - - mongo --nodb setup.js aws_e2e_ecs.js + . ./activate-authawsvenv.sh + export MONGODB_BINARIES="${MONGODB_BINARIES}"; + export PROJECT_DIRECTORY="${PROJECT_DIRECTORY}"; + python aws_tester.py ecs cd - "cleanup": @@ -2328,6 +2335,12 @@ axes: batchtime: 10080 # 7 days variables: python3_binary: python3 + - id: ubuntu-18.04 + display_name: "Ubuntu 18.04" + run_on: ubuntu1804-small + batchtime: 10080 # 7 days + variables: + python3_binary: python3 - id: rhel83-zseries display_name: "RHEL 8.3 (zSeries)" run_on: rhel83-zseries-small @@ -3188,7 +3201,7 @@ buildvariants: - matrix_name: "aws-auth-test" matrix_spec: - platform: [ubuntu-20.04] + platform: [ubuntu-18.04] python-version: ["3.7"] display_name: "MONGODB-AWS Auth ${platform} ${python-version}" tasks: From 2fe01929e93929cb904aafac1038fadd3d324395 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 31 May 2023 18:48:34 -0500 Subject: [PATCH 0913/2111] BUILD-17302 AWS EC2 credential retrieval 404s in Drivers CI (#1218) --- .evergreen/config.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index cdc7178ba2..ac2ac11513 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1049,6 +1049,18 @@ functions: # Remove all Docker images docker rmi -f $(docker images -a -q) &> /dev/null || true + "teardown_aws": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" + if [ -f "./aws_e2e_setup.json" ]; then + . ./activate-authawsvenv.sh + python ./lib/aws_assign_instance_profile.py + fi + "build release": - command: shell.exec type: test @@ -1144,6 +1156,7 @@ post: - func: "upload mo artifacts" - func: "upload test results" - func: "stop mongo-orchestration" + - func: "teardown_aws" - func: "cleanup" - func: "teardown_docker" From 5831934b379e2a3c634778e92feed41837d4c9b2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 5 Jun 2023 12:03:51 -0500 Subject: [PATCH 0914/2111] PYTHON-3691 [Build Failure] test_client.TestClient.test_exhaust_network_error (#1216) --- pymongo/pyopenssl_context.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index bfc52df671..83d8f853ef 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -107,6 +107,11 @@ def _call(self, call, *args, **kwargs): try: return call(*args, **kwargs) except BLOCKING_IO_ERRORS as exc: + # Check for closed socket. + if self.fileno() == -1: + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") + raise SSLError("Underlying socket has been closed") if isinstance(exc, _SSL.WantReadError): want_read = True want_write = False From 1ba4c0bcbdde870ca3c857069a038677eef74c29 Mon Sep 17 00:00:00 2001 From: thalassemia Date: Mon, 5 Jun 2023 16:35:39 -0700 Subject: [PATCH 0915/2111] PYTHON-3718 Faster INT2STRING (#1221) --- bson/_cbsonmodule.c | 101 +++++++++++++++++++++++++++++++++++++- bson/_cbsonmodule.h | 19 ++++--- doc/contributors.rst | 1 + pymongo/_cmessagemodule.c | 7 ++- setup.py | 7 ++- test/test_bson.py | 10 ++++ 6 files changed, 134 insertions(+), 11 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 8e5e8b6c0c..a5bc66f0c5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -82,6 +82,99 @@ struct module_state { #define DATETIME_MS 3 #define DATETIME_AUTO 4 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long num, char* str, size_t size) { + // Buffer should fit 64-bit signed integer + if (size < 21) { + PyErr_Format( + PyExc_RuntimeError, + "Buffer too small to hold long long: %d < 21", size); + return -1; + } + int index = 0; + int sign = 1; + // Convert to unsigned to handle -LLONG_MIN overflow + unsigned long long absNum; + // Handle the case of 0 + if (num == 0) { + str[index++] = '0'; + str[index] = '\0'; + return 0; + } + // Handle negative numbers + if (num < 0) { + sign = -1; + absNum = 0ULL - (unsigned long long)num; + } else { + absNum = (unsigned long long)num; + } + // Convert the number to string + unsigned long long digit; + while (absNum > 0) { + digit = absNum % 10ULL; + str[index++] = (char)digit + '0'; // Convert digit to character + absNum /= 10; + } + // Add minus sign if negative + if (sign == -1) { + str[index++] = '-'; + } + str[index] = '\0'; // Null terminator + // Reverse the string + int start = 0; + int end = index - 1; + while (start < end) { + char temp = str[start]; + str[start++] = str[end]; + str[end--] = temp; + } + return 0; +} + +static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { + // Test extreme values + Py_ssize_t maxNum = PY_SSIZE_T_MAX; + Py_ssize_t minNum = PY_SSIZE_T_MIN; + Py_ssize_t num; + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + int res = LL2STR(str_1, (long long)minNum); + if (res == -1) { + return NULL; + } + INT2STRING(str_2, (long long)minNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + LL2STR(str_1, (long long)maxNum); + INT2STRING(str_2, (long long)maxNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + + // Test common values + for (num = 0; num < 10000; num++) { + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + LL2STR(str_1, (long long)num); + INT2STRING(str_2, (long long)num); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + } + + return args; +} + /* Get an error class from the bson.errors module. * * Returns a new ref */ @@ -1027,13 +1120,16 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } for(i = 0; i < items; i++) { int list_type_byte = pymongo_buffer_save_space(buffer, 1); - char name[16]; + char name[BUF_SIZE]; PyObject* item_value; if (list_type_byte == -1) { return 0; } - INT2STRING(name, (int)i); + int res = LL2STR(name, (long long)i); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, name, (int)strlen(name) + 1)) { return 0; } @@ -2934,6 +3030,7 @@ static PyMethodDef _CBSONMethods[] = { {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, + {"_test_long_long_to_str", _test_long_long_to_str, METH_VARARGS, "Test conversion of extreme and common Py_ssize_t values to str."}, {NULL, NULL, 0, NULL} }; diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 682205bd84..b7b92538e4 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -23,28 +23,35 @@ /* * This macro is basically an implementation of asprintf for win32 * We print to the provided buffer to get the string value as an int. + * USE LL2STR. This is kept only to test LL2STR. */ #if defined(_MSC_VER) && (_MSC_VER >= 1400) #define INT2STRING(buffer, i) \ _snprintf_s((buffer), \ - _scprintf("%d", (i)) + 1, \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) #else #define INT2STRING(buffer, i) \ _snprintf((buffer), \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif #else -#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%d", (i)) +#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%lld", (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif +/* Just enough space in char array to hold LLONG_MIN and null terminator */ +#define BUF_SIZE 21 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long int num, char* str, size_t size); +#define LL2STR(buffer, i) cbson_long_long_to_str((i), (buffer), sizeof(buffer)) + typedef struct type_registry_t { PyObject* encoder_map; PyObject* decoder_map; diff --git a/doc/contributors.rst b/doc/contributors.rst index 7efda5b20d..0bea46466e 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -94,3 +94,4 @@ The following is a list of people who have contributed to - Arie Bovenberg (ariebovenberg) - Ben Warner (bcwarner) - Jean-Christophe Fillion-Robin (jcfr) +- Sean Cheah (thalassemia) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 7d5e2db3cc..ee7623d832 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -767,8 +767,11 @@ _batched_write_command( int cur_doc_begin; int cur_size; int enough_data = 0; - char key[16]; - INT2STRING(key, idx); + char key[BUF_SIZE]; + int res = LL2STR(key, (long long)idx); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, "\x03", 1) || !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { goto fail; diff --git a/setup.py b/setup.py index 9e8cf4b291..e570d04c5a 100755 --- a/setup.py +++ b/setup.py @@ -263,7 +263,12 @@ def build_extension(self, ext): Extension( "pymongo._cmessage", include_dirs=["bson"], - sources=["pymongo/_cmessagemodule.c", "bson/buffer.c"], + sources=[ + "pymongo/_cmessagemodule.c", + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + ], ), ] diff --git a/test/test_bson.py b/test/test_bson.py index a6e6352333..e38fe970f2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1310,5 +1310,15 @@ def __int__(self): encode({"x": float_ms}) +class TestLongLongToString(unittest.TestCase): + def test_long_long_to_string(self): + try: + from bson import _cbson + + _cbson._test_long_long_to_str() + except ImportError: + print("_cbson was not imported. Check compilation logs.") + + if __name__ == "__main__": unittest.main() From c7e06e6fc17d829ade89916129e6264100b53b2f Mon Sep 17 00:00:00 2001 From: Dainis Gorbunovs Date: Tue, 6 Jun 2023 00:38:28 +0100 Subject: [PATCH 0916/2111] PYTHON-3725 Fix Test Failure - MockupDB test_network_disconnect_primary (#1222) --- doc/contributors.rst | 1 + test/mockupdb/test_network_disconnect_primary.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 0bea46466e..17ae4784e2 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -95,3 +95,4 @@ The following is a list of people who have contributed to - Ben Warner (bcwarner) - Jean-Christophe Fillion-Robin (jcfr) - Sean Cheah (thalassemia) +- Dainis Gorbunovs (DainisGorbunovs) diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index dd14abf84f..936130484a 100755 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -26,12 +26,12 @@ def test_network_disconnect_primary(self): # Application operation fails against primary. Test that topology # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. # http://bit.ly/1B5ttuL - primary, secondary = servers = (MockupDB() for _ in range(2)) - for server in servers: + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: server.run() self.addCleanup(server.stop) - hosts = [server.address_string for server in servers] + hosts = [server.address_string for server in (primary, secondary)] primary_response = OpReply( ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 ) From 1ad0df085841cdb052057bd1981e8d9991da55d9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 6 Jun 2023 12:06:08 -0700 Subject: [PATCH 0917/2111] PYTHON-3724 Remove null values from `command_started_event` in fle2v2-CreateCollection.yml (#1223) --- .../spec/legacy/fle2v2-CreateCollection.json | 18 ------------------ test/utils_spec_runner.py | 6 ------ 2 files changed, 24 deletions(-) diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json index 819d2eec3c..cc8bd17145 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -158,9 +158,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -343,9 +340,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -851,9 +845,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1048,9 +1039,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1367,9 +1355,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", @@ -1635,9 +1620,6 @@ "command": { "create": "encryptedCollection", "encryptedFields": { - "escCollection": null, - "ecocCollection": null, - "eccCollection": null, "fields": [ { "path": "firstName", diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4ca6f1cc58..21cc3e6d81 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -430,12 +430,6 @@ def check_events(self, test, listener, session_ids): elif key not in actual: self.fail(f"Expected key [{key}] in {actual!r}") else: - # Workaround an incorrect command started event in fle2v2-CreateCollection.yml - # added in DRIVERS-2524. - if key == "encryptedFields": - for n in ("eccCollection", "ecocCollection", "escCollection"): - if val.get(n) is None: - val.pop(n, None) self.assertEqual( val, decode_raw(actual[key]), f"Key [{key}] in {actual}" ) From 7146be01aef4dd34fdd22c9af942f350ad8e581f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 9 Jun 2023 12:00:14 -0700 Subject: [PATCH 0918/2111] PYTHON-3721 Stop Testing on AWS Linux 2018 (#1226) --- .evergreen/config.yml | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac2ac11513..f62cb0d0c0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2284,14 +2284,6 @@ axes: - id: platform display_name: OS values: - - id: awslinux - display_name: "Amazon Linux 2018 (Enterprise)" - run_on: amazon1-2018-test - batchtime: 10080 # 7 days - variables: - skip_crypt_shared: true - python3_binary: "/opt/python/3.8/bin/python3" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - id: archlinux-test display_name: "Archlinux" run_on: archlinux-test @@ -2330,6 +2322,12 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz + - id: rhel76 + display_name: "RHEL 7.6" + run_on: rhel76-small + batchtime: 10080 # 7 days + variables: + libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel84 display_name: "RHEL 8.4" run_on: rhel84-small @@ -2990,7 +2988,7 @@ buildvariants: - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: awslinux + platform: rhel76 # Python 3.10+ requires OpenSSL 1.1.1+ python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] auth-ssl: "*" From 0bce579b819561021e2abda9681670ff4e925437 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 9 Jun 2023 13:08:56 -0700 Subject: [PATCH 0919/2111] PYTHON-3728 Simplify convert_codec_options signature (#1225) --- bson/_cbsonmodule.c | 6 +++--- bson/_cbsonmodule.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index a5bc66f0c5..2632e2f339 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -599,8 +599,7 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_codec_options(PyObject* self, PyObject* options_obj, void* p) { - codec_options_t* options = (codec_options_t*)p; +int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { PyObject* type_registry_obj = NULL; long type_marker; @@ -613,8 +612,9 @@ int convert_codec_options(PyObject* self, PyObject* options_obj, void* p) { &options->unicode_decode_error_handler, &options->tzinfo, &type_registry_obj, - &options->datetime_conversion)) + &options->datetime_conversion)) { return 0; + } type_marker = _type_marker(options->document_class, GETSTATE(self)->_type_marker_str); diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index b7b92538e4..3be2b74427 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -93,7 +93,7 @@ typedef struct codec_options_t { #define _cbson_convert_codec_options_INDEX 4 #define _cbson_convert_codec_options_RETURN int -#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, void* p) +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, codec_options_t* options) #define _cbson_destroy_codec_options_INDEX 5 #define _cbson_destroy_codec_options_RETURN void From 3f687f71fb1f6c6e7fbf742cc7731213f3521627 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 12 Jun 2023 12:41:59 -0700 Subject: [PATCH 0920/2111] PYTHON-3443 Remove redundant code to avoid Coverity warnings (#1228) --- bson/_cbsonmodule.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 2632e2f339..5918a678c6 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -304,19 +304,15 @@ static int millis_from_datetime_ms(PyObject* dt, long long* out){ long long millis; if (!(ll_millis = PyNumber_Long(dt))){ - if (PyErr_Occurred()) { // TypeError - return 0; - } - } - - if ((millis = PyLong_AsLongLong(ll_millis)) == -1){ - if (PyErr_Occurred()) { /* Overflow */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB datetimes can only handle up to 8-byte ints"); - return 0; - } + return 0; } + millis = PyLong_AsLongLong(ll_millis); Py_DECREF(ll_millis); + if (millis == -1 && PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } *out = millis; return 1; } @@ -2081,7 +2077,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, millis = max_millis; } // Continues from here to return a datetime. - } else if (dt_auto) { + } else { // dt_auto if (millis < min_millis || millis > max_millis){ value = datetime_ms_from_millis(self, millis); break; // Out-of-range so done. From ec3437849e4cf4186f117d85be173494f1bb9b75 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 12 Jun 2023 15:43:30 -0700 Subject: [PATCH 0921/2111] PYTHON-3702 Stop using utcnow and utcfromtimestamp (#1229) --- bson/datetime_ms.py | 2 +- doc/examples/datetimes.rst | 8 +++++--- doc/tutorial.rst | 2 +- gridfs/grid_file.py | 2 +- pymongo/ocsp_cache.py | 5 +++-- pymongo/ocsp_support.py | 3 ++- test/test_bson.py | 4 ++-- test/test_client.py | 2 +- test/test_objectid.py | 4 ++-- test/test_ocsp_cache.py | 4 ++-- test/utils_selection_tests.py | 2 +- 11 files changed, 21 insertions(+), 17 deletions(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index c64a0cce87..5fc8b70328 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -26,7 +26,7 @@ from bson.tz_util import utc EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) +EPOCH_NAIVE = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) class DatetimeMS: diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 562c9480a6..2dc9c003eb 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -25,10 +25,12 @@ time into MongoDB: .. doctest:: - >>> result = db.objects.insert_one({"last_modified": datetime.datetime.utcnow()}) + >>> result = db.objects.insert_one( + ... {"last_modified": datetime.datetime.now(tz=timezone.utc)} + ... ) -Always use :meth:`datetime.datetime.utcnow`, which returns the current time in -UTC, instead of :meth:`datetime.datetime.now`, which returns the current local +Always use :meth:`datetime.datetime.now(tz=timezone.utc)`, which explicitly returns the current time in +UTC, instead of :meth:`datetime.datetime.now`, with no arguments, which returns the current local time. Avoid doing this: .. doctest:: diff --git a/doc/tutorial.rst b/doc/tutorial.rst index d7854c885a..768b535fe3 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -109,7 +109,7 @@ post: ... "author": "Mike", ... "text": "My first blog post!", ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow(), + ... "date": datetime.datetime.now(tz=timezone.utc), ... } Note that documents can contain native Python types (like diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index fd260963d7..fe3b56cdde 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -292,7 +292,7 @@ def __flush(self) -> Any: self.__flush_buffer() # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) - self._file["uploadDate"] = datetime.datetime.utcnow() + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) return self._coll.files.insert_one(self._file, session=self._session) except DuplicateKeyError: diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 0c50902167..b60a24b027 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -16,6 +16,7 @@ from collections import namedtuple from datetime import datetime as _datetime +from datetime import timezone from pymongo.lock import _create_lock @@ -60,7 +61,7 @@ def __setitem__(self, key, value): return # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.utcnow() < value.next_update): + if not (value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update): return # Cache new response OR update cached response if new response @@ -81,7 +82,7 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if value.this_update <= _datetime.utcnow() < value.next_update: + if value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update: return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index e7f4a15d84..dda92d0d3b 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -17,6 +17,7 @@ import logging as _logging import re as _re from datetime import datetime as _datetime +from datetime import timezone from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend @@ -219,7 +220,7 @@ def _verify_response(issuer, response): # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? - now = _datetime.utcnow() + now = _datetime.now(tz=timezone.utc) # RFC6960, Section 3.2, Number 5 if response.this_update > now: _LOGGER.debug("thisUpdate is in the future") diff --git a/test/test_bson.py b/test/test_bson.py index e38fe970f2..12fbea92fa 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -986,7 +986,7 @@ def test_codec_options_repr(self): def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc))[0] self.assertIsInstance(decoded["sub_document"], dict) @@ -998,7 +998,7 @@ def test_decode_all_defaults(self): def test_decode_all_no_options(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc), None)[0] self.assertIsInstance(decoded["sub_document"], dict) diff --git a/test/test_client.py b/test/test_client.py index ec2b4bac97..bba6b37287 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1143,7 +1143,7 @@ def test_tz_aware(self): naive = self.client aware.pymongo_test.drop_collection("test") - now = datetime.datetime.utcnow() + now = datetime.datetime.now(tz=datetime.timezone.utc) aware.pymongo_test.test.insert_one({"x": now}) self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) diff --git a/test/test_objectid.py b/test/test_objectid.py index bb1af865c0..cb96feaf34 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -86,7 +86,7 @@ def test_binary_str_equivalence(self): self.assertEqual(a, ObjectId(str(a))) def test_generation_time(self): - d1 = datetime.datetime.utcnow() + d1 = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d2 = ObjectId().generation_time self.assertEqual(utc, d2.tzinfo) @@ -97,7 +97,7 @@ def test_from_datetime(self): if "PyPy 1.8.0" in sys.version: # See https://bugs.pypy.org/issue1092 raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") - d = datetime.datetime.utcnow() + d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 0e6777a9f9..3740b6b28a 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -17,7 +17,7 @@ import random import sys from collections import namedtuple -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from os import urandom from time import sleep from typing import Any @@ -61,7 +61,7 @@ def _create_mock_request(self): ) def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): - now = datetime.utcnow() + now = datetime.now(tz=timezone.utc) this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index ccb3897966..6967544f09 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -44,7 +44,7 @@ def get_addresses(server_list): def make_last_write_date(server): - epoch = datetime.datetime.utcfromtimestamp(0) + epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) millis = server.get("lastWrite", {}).get("lastWriteDate") if millis: diff = ((millis % 1000) + 1000) % 1000 From eed9d02a2e2c3e0b7fcde6ad4d07d09832254eaa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 13 Jun 2023 11:30:50 -0500 Subject: [PATCH 0922/2111] PYTHON-3731 Disable MONGODB-OIDC Auth for 4.4 (#1230) --- .evergreen/config.yml | 2 +- pymongo/auth.py | 1 - test/auth_aws/test_auth_oidc.py | 4 ++++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f62cb0d0c0..c3e8a3d1f3 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -3204,7 +3204,7 @@ buildvariants: - matrix_name: "oidc-auth-test" matrix_spec: - platform: [ ubuntu-20.04 ] + platform: [ rhel84 ] python-version: ["3.9"] display_name: "MONGODB-OIDC Auth ${platform} ${python-version}" tasks: diff --git a/pymongo/auth.py b/pymongo/auth.py index ac7cb254e9..00b6faa6fd 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -510,7 +510,6 @@ def _authenticate_default(credentials, sock_info): "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, - "MONGODB-OIDC": _authenticate_oidc, "PLAIN": _authenticate_plain, "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), diff --git a/test/auth_aws/test_auth_oidc.py b/test/auth_aws/test_auth_oidc.py index 26e71573d4..7b42f98a1c 100644 --- a/test/auth_aws/test_auth_oidc.py +++ b/test/auth_aws/test_auth_oidc.py @@ -28,12 +28,16 @@ from bson import SON from pymongo import MongoClient +from pymongo.auth import _AUTH_MAP, _authenticate_oidc from pymongo.auth_oidc import _CACHE as _oidc_cache from pymongo.cursor import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.operations import InsertOne +# Force MONGODB-OIDC to be enabled. +_AUTH_MAP["MONGODB-OIDC"] = _authenticate_oidc # type:ignore + class TestAuthOIDC(unittest.TestCase): uri: str From ece45b1edf451f606c1efebfc944d13f80fbdd1e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 14 Jun 2023 10:00:52 -0700 Subject: [PATCH 0923/2111] PYTHON-3699 Add prose test for change stream splitting (#1232) --- test/test_change_stream.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index c9ddfcd137..dae8b1f5a1 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -762,6 +762,26 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + # Prose test no. 19 + @no_type_check + @client_context.require_version_min(7, 0, -1) + def test_split_large_change(self): + self.db.drop_collection("test_split_large_change") + coll = self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + with coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): dbs: list From 1269c006da2ad9c35d812ff309ded7beebc50e81 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 Jun 2023 11:27:58 -0700 Subject: [PATCH 0924/2111] PYTHON-3735 Add types to PyMongo auth module (#1231) --- pymongo/auth.py | 87 +++++++++++++++++++++++++++++++------------------ pymongo/pool.py | 4 ++- 2 files changed, 59 insertions(+), 32 deletions(-) diff --git a/pymongo/auth.py b/pymongo/auth.py index 00b6faa6fd..b4d04f8d14 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -13,15 +13,17 @@ # limitations under the License. """Authentication helpers.""" +from __future__ import annotations import functools import hashlib import hmac import os import socket +import typing from base64 import standard_b64decode, standard_b64encode from collections import namedtuple -from typing import Callable, Mapping +from typing import TYPE_CHECKING, Any, Callable, Mapping, MutableMapping, Optional from urllib.parse import quote from bson.binary import Binary @@ -31,6 +33,10 @@ from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep +if TYPE_CHECKING: + from pymongo.hello import Hello + from pymongo.pool import SocketInfo + HAVE_KERBEROS = True _USE_PRINCIPAL = False try: @@ -66,21 +72,21 @@ class _Cache: _hash_val = hash("_Cache") - def __init__(self): + def __init__(self) -> None: self.data = None - def __eq__(self, other): + def __eq__(self, other: object) -> bool: # Two instances must always compare equal. if isinstance(other, _Cache): return True return NotImplemented - def __ne__(self, other): + def __ne__(self, other: object) -> bool: if isinstance(other, _Cache): return False return NotImplemented - def __hash__(self): + def __hash__(self) -> int: return self._hash_val @@ -101,7 +107,14 @@ def __hash__(self): """Mechanism properties for MONGODB-AWS authentication.""" -def _build_credentials_tuple(mech, source, user, passwd, extra, database): +def _build_credentials_tuple( + mech: str, + source: Optional[str], + user: str, + passwd: str, + extra: Mapping[str, Any], + database: Optional[str], +) -> MongoCredential: """Build and return a mechanism specific credentials tuple.""" if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: raise ConfigurationError(f"{mech} requires a username.") @@ -175,17 +188,21 @@ def _build_credentials_tuple(mech, source, user, passwd, extra, database): return MongoCredential(mech, source_database, user, passwd, None, _Cache()) -def _xor(fir, sec): +def _xor(fir: bytes, sec: bytes) -> bytes: """XOR two byte strings together (python 3.x).""" return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) -def _parse_scram_response(response): +def _parse_scram_response(response: bytes) -> dict: """Split a scram response into key, value pairs.""" - return dict(item.split(b"=", 1) for item in response.split(b",")) + return dict( + typing.cast(typing.Tuple[str, str], item.split(b"=", 1)) for item in response.split(b",") + ) -def _authenticate_scram_start(credentials, mechanism): +def _authenticate_scram_start( + credentials: MongoCredential, mechanism: str +) -> tuple[bytes, bytes, MutableMapping[str, Any]]: username = credentials.username user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") nonce = standard_b64encode(os.urandom(32)) @@ -203,7 +220,9 @@ def _authenticate_scram_start(credentials, mechanism): return nonce, first_bare, cmd -def _authenticate_scram(credentials, sock_info, mechanism): +def _authenticate_scram( + credentials: MongoCredential, sock_info: SocketInfo, mechanism: str +) -> None: """Authenticate using SCRAM.""" username = credentials.username if mechanism == "SCRAM-SHA-256": @@ -287,7 +306,7 @@ def _authenticate_scram(credentials, sock_info, mechanism): raise OperationFailure("SASL conversation failed to complete.") -def _password_digest(username, password): +def _password_digest(username: str, password: str) -> str: """Get a password digest to use for authentication.""" if not isinstance(password, str): raise TypeError("password must be an instance of str") @@ -302,7 +321,7 @@ def _password_digest(username, password): return md5hash.hexdigest() -def _auth_key(nonce, username, password): +def _auth_key(nonce: str, username: str, password: str) -> str: """Get an auth key to use for authentication.""" digest = _password_digest(username, password) md5hash = hashlib.md5() @@ -311,7 +330,7 @@ def _auth_key(nonce, username, password): return md5hash.hexdigest() -def _canonicalize_hostname(hostname): +def _canonicalize_hostname(hostname: str) -> str: """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( @@ -326,7 +345,7 @@ def _canonicalize_hostname(hostname): return name[0].lower() -def _authenticate_gssapi(credentials, sock_info): +def _authenticate_gssapi(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using GSSAPI.""" if not HAVE_KERBEROS: raise ConfigurationError( @@ -443,7 +462,7 @@ def _authenticate_gssapi(credentials, sock_info): raise OperationFailure(str(exc)) -def _authenticate_plain(credentials, sock_info): +def _authenticate_plain(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using SASL PLAIN (RFC 4616)""" source = credentials.source username = credentials.username @@ -460,7 +479,7 @@ def _authenticate_plain(credentials, sock_info): sock_info.command(source, cmd) -def _authenticate_x509(credentials, sock_info): +def _authenticate_x509(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using MONGODB-X509.""" ctx = sock_info.auth_ctx if ctx and ctx.speculate_succeeded(): @@ -471,7 +490,7 @@ def _authenticate_x509(credentials, sock_info): sock_info.command("$external", cmd) -def _authenticate_mongo_cr(credentials, sock_info): +def _authenticate_mongo_cr(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using MONGODB-CR.""" source = credentials.source username = credentials.username @@ -486,7 +505,7 @@ def _authenticate_mongo_cr(credentials, sock_info): sock_info.command(source, query) -def _authenticate_default(credentials, sock_info): +def _authenticate_default(credentials: MongoCredential, sock_info: SocketInfo) -> None: if sock_info.max_wire_version >= 7: if sock_info.negotiated_mechs: mechs = sock_info.negotiated_mechs @@ -518,35 +537,39 @@ def _authenticate_default(credentials, sock_info): class _AuthContext: - def __init__(self, credentials, address): + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: self.credentials = credentials - self.speculative_authenticate = None + self.speculative_authenticate: Optional[Mapping[str, Any]] = None self.address = address @staticmethod - def from_credentials(creds, address): + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) if spec_cls: return spec_cls(creds, address) return None - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: raise NotImplementedError - def parse_response(self, hello): + def parse_response(self, hello: Hello) -> None: self.speculative_authenticate = hello.speculative_authenticate - def speculate_succeeded(self): + def speculate_succeeded(self) -> bool: return bool(self.speculative_authenticate) class _ScramContext(_AuthContext): - def __init__(self, credentials, address, mechanism): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: super().__init__(credentials, address) - self.scram_data = None + self.scram_data: Optional[tuple[bytes, bytes]] = None self.mechanism = mechanism - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) # The 'db' field is included only on the speculative command. cmd["db"] = self.credentials.source @@ -556,7 +579,7 @@ def speculate_command(self): class _X509Context(_AuthContext): - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) if self.credentials.username is not None: cmd["user"] = self.credentials.username @@ -564,7 +587,7 @@ def speculate_command(self): class _OIDCContext(_AuthContext): - def speculate_command(self): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: authenticator = _get_authenticator(self.credentials, self.address) cmd = authenticator.auth_start_cmd(False) if cmd is None: @@ -582,7 +605,9 @@ def speculate_command(self): } -def authenticate(credentials, sock_info, reauthenticate=False): +def authenticate( + credentials: MongoCredential, sock_info: SocketInfo, reauthenticate: bool = False +) -> None: """Authenticate sock_info.""" mechanism = credentials.mechanism auth_func = _AUTH_MAP[mechanism] diff --git a/pymongo/pool.py b/pymongo/pool.py index 5bae8ce878..2b498078c2 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -758,7 +758,9 @@ def _hello(self, cluster_time, topology_version, heartbeat_frequency): cmd["saslSupportedMechs"] = creds.source + "." + creds.username auth_ctx = auth._AuthContext.from_credentials(creds, self.address) if auth_ctx: - cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate else: auth_ctx = None From 6a04fe2c91efe4251b924a5f32a3b40e80547adc Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 Jun 2023 16:11:26 -0700 Subject: [PATCH 0925/2111] PYTHON-3702 Stop using utcnow and utcfromtimestamp changelog update (#1235) --- doc/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3d03a6f386..e0e316e5b6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -16,6 +16,9 @@ Changes in Version 4.4 - pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should use :meth:`datetime.datetime.now(tz=timezone.utc)` and :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. + +.. _in this Github issue: https://github.com/python/cpython/issues/103857 Issues Resolved ............... From e27e710184d91c34eefad80cc3b9e412bf926ae8 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 15 Jun 2023 08:57:18 -0700 Subject: [PATCH 0926/2111] PYTHON-3736 Add Noah to code owners for PyMongo, Motor, and PyMongoArrow (#1237) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 15a41b6ce6..3be0c9b0d1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # Global owner for repo -* @blink1073 @juliusgeo @ShaneHarvey +* @blink1073 @NoahStapp @ShaneHarvey From bcfdd200c3987ce5606bf7cd4408062e9d741c0d Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 15 Jun 2023 08:57:50 -0700 Subject: [PATCH 0927/2111] PYTHON-3702 bson datetime utc import cleanup (#1233) --- bson/datetime_ms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 5fc8b70328..c422d6e379 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -26,7 +26,7 @@ from bson.tz_util import utc EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) +EPOCH_NAIVE = EPOCH_AWARE.replace(tzinfo=None) class DatetimeMS: From f7874fb110851b16a70ac611a5a016467988becd Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 15 Jun 2023 11:54:20 -0700 Subject: [PATCH 0928/2111] PYTHON-2287 Improve error message for invalid boolean option (#1236) --- bson/codec_options.py | 2 +- doc/contributors.rst | 1 + pymongo/common.py | 9 +-------- pymongo/pyopenssl_context.py | 7 +++---- pymongo/write_concern.py | 14 ++++++++++---- test/test_common.py | 7 +++++++ 6 files changed, 23 insertions(+), 17 deletions(-) diff --git a/bson/codec_options.py b/bson/codec_options.py index a0bdd0eeb9..45860fa705 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -397,7 +397,7 @@ def __new__( "subclass of collections.abc.MutableMapping" ) if not isinstance(tz_aware, bool): - raise TypeError("tz_aware must be True or False") + raise TypeError(f"tz_aware must be True or False, was: tz_aware={tz_aware}") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( "uuid_representation must be a value from bson.binary.UuidRepresentation" diff --git a/doc/contributors.rst b/doc/contributors.rst index 17ae4784e2..e6d5e5310d 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -96,3 +96,4 @@ The following is a list of people who have contributed to - Jean-Christophe Fillion-Robin (jcfr) - Sean Cheah (thalassemia) - Dainis Gorbunovs (DainisGorbunovs) +- Iris Ho (sleepyStick) diff --git a/pymongo/common.py b/pymongo/common.py index 82c773695a..15a4c6f227 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -50,7 +50,7 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode from pymongo.server_api import ServerApi -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) @@ -170,13 +170,6 @@ def raise_config_error(key: str, dummy: Any) -> NoReturn: } -def validate_boolean(option: str, value: Any) -> bool: - """Validates that 'value' is True or False.""" - if isinstance(value, bool): - return value - raise TypeError(f"{option} must be True or False") - - def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 83d8f853ef..d6762bcaa2 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -37,6 +37,7 @@ from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback from pymongo.socket_checker import SocketChecker as _SocketChecker from pymongo.socket_checker import _errno_from_exception +from pymongo.write_concern import validate_boolean try: import certifi @@ -228,8 +229,7 @@ def __get_check_hostname(self): return self._check_hostname def __set_check_hostname(self, value): - if not isinstance(value, bool): - raise TypeError("check_hostname must be True or False") + validate_boolean("check_hostname", value) self._check_hostname = value check_hostname = property(__get_check_hostname, __set_check_hostname) @@ -238,8 +238,7 @@ def __get_check_ocsp_endpoint(self): return self._callback_data.check_ocsp_endpoint def __set_check_ocsp_endpoint(self, value): - if not isinstance(value, bool): - raise TypeError("check_ocsp must be True or False") + validate_boolean("check_ocsp", value) self._callback_data.check_ocsp_endpoint = value check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 25f87954b5..d62c3c3117 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -19,6 +19,14 @@ from pymongo.errors import ConfigurationError +# Moved here to avoid a circular import. +def validate_boolean(option: str, value: Any) -> bool: + """Validates that 'value' is True or False.""" + if isinstance(value, bool): + return value + raise TypeError(f"{option} must be True or False, was: {option}={value}") + + class WriteConcern: """WriteConcern @@ -65,13 +73,11 @@ def __init__( self.__document["wtimeout"] = wtimeout if j is not None: - if not isinstance(j, bool): - raise TypeError("j must be True or False") + validate_boolean("j", j) self.__document["j"] = j if fsync is not None: - if not isinstance(fsync, bool): - raise TypeError("fsync must be True or False") + validate_boolean("fsync", fsync) if j and fsync: raise ConfigurationError("Can't set both j and fsync at the same time") self.__document["fsync"] = fsync diff --git a/test/test_common.py b/test/test_common.py index 76367ffa0c..f1769cb214 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -165,6 +165,13 @@ def test_mongo_client(self): self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) + def test_validate_boolean(self): + self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + if __name__ == "__main__": unittest.main() From d86fb9496a6418d42c13d9caa0946dfb4e42df5f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 15 Jun 2023 14:07:48 -0500 Subject: [PATCH 0929/2111] PYTHON-3519 Skip test_pool_paused_error_is_retryable on PyPy for now (#1238) --- test/test_retryable_reads.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index ee12c524c9..97c51cd44f 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -162,6 +162,9 @@ class TestPoolPausedError(IntegrationTest): @client_context.require_failCommand_blockConnection @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flakey on PyPy") cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) From ada1280ad30ad5cd771b9a3c537ec3b4f83e3ba8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 15 Jun 2023 14:08:13 -0500 Subject: [PATCH 0930/2111] PYTHON-3011 Skip test_connections_are_only_returned_once on PyPy for now (#1239) --- test/test_load_balancer.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index df68b3e626..9a824bbaf4 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -37,6 +37,9 @@ class TestLB(IntegrationTest): RUN_ON_SERVERLESS = True def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") pool = get_pool(self.client) nconns = len(pool.sockets) self.db.test.find_one({}) From 2cfebf52cdd7d35cc2502aef90d262c28b684817 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 15 Jun 2023 12:34:02 -0700 Subject: [PATCH 0931/2111] PYTHON-3706 Skip flaky test on Windows/macOS (#1241) --- test/test_encryption.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/test_encryption.py b/test/test_encryption.py index 314b8dfbbe..0b9087359e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -639,6 +639,11 @@ def get_object_name(self, op): def maybe_skip_scenario(self, test): super().maybe_skip_scenario(test) desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") From 601d1ec3a13434761bfd9997cc8ee6633c3d9f93 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 15 Jun 2023 12:54:29 -0700 Subject: [PATCH 0932/2111] PYTHON-3737 Use __future__ annotations for forward reference type hints (#1234) --- pymongo/change_stream.py | 5 ++- pymongo/client_session.py | 6 ++- pymongo/collection.py | 71 ++++++++++++++++++----------------- pymongo/command_cursor.py | 11 +++--- pymongo/cursor.py | 14 ++++--- pymongo/database.py | 44 +++++++++++----------- pymongo/encryption_options.py | 3 +- pymongo/mongo_client.py | 11 +++--- pymongo/monitoring.py | 18 +++++---- test/test_comment.py | 5 ++- 10 files changed, 101 insertions(+), 87 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index c53f981188..3a4d968c18 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -13,6 +13,7 @@ # permissions and limitations under the License. """Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations import copy from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union @@ -96,7 +97,7 @@ class ChangeStream(Generic[_DocumentType]): def __init__( self, target: Union[ - "MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]" + MongoClient[_DocumentType], Database[_DocumentType], Collection[_DocumentType] ], pipeline: Optional[_Pipeline], full_document: Optional[str], @@ -105,7 +106,7 @@ def __init__( batch_size: Optional[int], collation: Optional[_CollationIn], start_at_operation_time: Optional[Timestamp], - session: Optional["ClientSession"], + session: Optional[ClientSession], start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, diff --git a/pymongo/client_session.py b/pymongo/client_session.py index dbc5f3aa8d..08d9f03bb5 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -133,6 +133,8 @@ ======= """ +from __future__ import annotations + import collections import time import uuid @@ -478,7 +480,7 @@ class ClientSession: def __init__( self, - client: "MongoClient", + client: MongoClient, server_session: Any, options: SessionOptions, implicit: bool, @@ -524,7 +526,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self) -> "MongoClient": + def client(self) -> MongoClient: """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ diff --git a/pymongo/collection.py b/pymongo/collection.py index 3b9001240e..428b1b0931 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -13,6 +13,7 @@ # limitations under the License. """Collection level utilities for Mongo.""" +from __future__ import annotations from collections import abc from typing import ( @@ -114,14 +115,14 @@ class Collection(common.BaseObject, Generic[_DocumentType]): def __init__( self, - database: "Database[_DocumentType]", + database: Database[_DocumentType], name: str, create: Optional[bool] = False, - codec_options: Optional["CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - session: Optional["ClientSession"] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, **kwargs: Any, ) -> None: """Get / create a Mongo collection. @@ -335,7 +336,7 @@ def __create( session=session, ) - def __getattr__(self, name: str) -> "Collection[_DocumentType]": + def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a sub-collection of this collection by name. Raises InvalidName if an invalid collection name is used. @@ -351,7 +352,7 @@ def __getattr__(self, name: str) -> "Collection[_DocumentType]": ) return self.__getitem__(name) - def __getitem__(self, name: str) -> "Collection[_DocumentType]": + def __getitem__(self, name: str) -> Collection[_DocumentType]: return Collection( self.__database, f"{self.__name}.{name}", @@ -397,7 +398,7 @@ def name(self) -> str: return self.__name @property - def database(self) -> "Database[_DocumentType]": + def database(self) -> Database[_DocumentType]: """The :class:`~pymongo.database.Database` that this :class:`Collection` is a part of. """ @@ -405,11 +406,11 @@ def database(self) -> "Database[_DocumentType]": def with_options( self, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> "Collection[_DocumentType]": + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: """Get a clone of this collection changing the specified settings. >>> coll1.read_preference @@ -455,7 +456,7 @@ def bulk_write( requests: Sequence[_WriteOp[_DocumentType]], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, let: Optional[Mapping] = None, ) -> BulkWriteResult: @@ -585,7 +586,7 @@ def insert_one( self, document: Union[_DocumentType, RawBSONDocument], bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> InsertOneResult: """Insert a single document. @@ -653,7 +654,7 @@ def insert_many( documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> InsertManyResult: """Insert an iterable of documents. @@ -855,7 +856,7 @@ def replace_one( bypass_document_validation: bool = False, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: @@ -959,7 +960,7 @@ def update_one( collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: @@ -1073,7 +1074,7 @@ def update_many( bypass_document_validation: Optional[bool] = None, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: @@ -1168,7 +1169,7 @@ def update_many( def drop( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> None: @@ -1306,7 +1307,7 @@ def delete_one( filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> DeleteResult: @@ -1373,7 +1374,7 @@ def delete_many( filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> DeleteResult: @@ -1769,7 +1770,7 @@ def _cmd(session, server, sock_info, read_preference): def count_documents( self, filter: Mapping[str, Any], - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> int: @@ -1860,7 +1861,7 @@ def _retryable_non_cursor_read(self, func, session): def create_indexes( self, indexes: Sequence[IndexModel], - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> List[str]: @@ -1952,7 +1953,7 @@ def gen_indexes(): def create_index( self, keys: _IndexKeyHint, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> str: @@ -2071,7 +2072,7 @@ def create_index( def drop_indexes( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> None: @@ -2107,7 +2108,7 @@ def drop_indexes( def drop_index( self, index_or_name: _IndexKeyHint, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> None: @@ -2174,7 +2175,7 @@ def drop_index( def list_indexes( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. @@ -2239,7 +2240,7 @@ def _cmd(session, server, sock_info, read_preference): def index_information( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> MutableMapping[str, Any]: """Get information on this collection's indexes. @@ -2282,7 +2283,7 @@ def index_information( def options( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> MutableMapping[str, Any]: """Get the options set on this collection. @@ -2361,7 +2362,7 @@ def _aggregate( def aggregate( self, pipeline: _Pipeline, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -2458,7 +2459,7 @@ def aggregate( def aggregate_raw_batches( self, pipeline: _Pipeline, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> RawBatchCursor[_DocumentType]: @@ -2509,7 +2510,7 @@ def watch( batch_size: Optional[int] = None, collation: Optional[_CollationIn] = None, start_at_operation_time: Optional[Timestamp] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, @@ -2644,7 +2645,7 @@ def watch( def rename( self, new_name: str, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> MutableMapping[str, Any]: @@ -2709,7 +2710,7 @@ def distinct( self, key: str, filter: Optional[Mapping[str, Any]] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> List: @@ -2860,7 +2861,7 @@ def find_one_and_delete( projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, sort: Optional[_IndexList] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -2953,7 +2954,7 @@ def find_one_and_replace( upsert: bool = False, return_document: bool = ReturnDocument.BEFORE, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -3062,7 +3063,7 @@ def find_one_and_update( return_document: bool = ReturnDocument.BEFORE, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index d57b45154d..c831dfb49b 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -13,6 +13,7 @@ # limitations under the License. """CommandCursor class to iterate over command results.""" +from __future__ import annotations from collections import deque from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, NoReturn, Optional @@ -36,12 +37,12 @@ class CommandCursor(Generic[_DocumentType]): def __init__( self, - collection: "Collection[_DocumentType]", + collection: Collection[_DocumentType], cursor_info: Mapping[str, Any], address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, explicit_session: bool = False, comment: Any = None, ) -> None: @@ -267,7 +268,7 @@ def address(self) -> Optional[_Address]: return self.__address @property - def session(self) -> Optional["ClientSession"]: + def session(self) -> Optional[ClientSession]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 @@ -312,12 +313,12 @@ class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): def __init__( self, - collection: "Collection[_DocumentType]", + collection: Collection[_DocumentType], cursor_info: Mapping[str, Any], address: Optional[_Address], batch_size: int = 0, max_await_time_ms: Optional[int] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, explicit_session: bool = False, comment: Any = None, ) -> None: diff --git a/pymongo/cursor.py b/pymongo/cursor.py index cc4e1a1146..8d131a711e 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -13,6 +13,8 @@ # limitations under the License. """Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + import copy import warnings from collections import deque @@ -163,7 +165,7 @@ class Cursor(Generic[_DocumentType]): def __init__( self, - collection: "Collection[_DocumentType]", + collection: Collection[_DocumentType], filter: Optional[Mapping[str, Any]] = None, projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, skip: int = 0, @@ -184,7 +186,7 @@ def __init__( show_record_id: Optional[bool] = None, snapshot: Optional[bool] = None, comment: Optional[Any] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, allow_disk_use: Optional[bool] = None, let: Optional[bool] = None, ) -> None: @@ -202,7 +204,7 @@ def __init__( self.__exhaust = False self.__sock_mgr: Any = None self.__killed = False - self.__session: Optional["ClientSession"] + self.__session: Optional[ClientSession] if session: self.__session = session @@ -312,7 +314,7 @@ def __init__( self.__collname = collection.name @property - def collection(self) -> "Collection[_DocumentType]": + def collection(self) -> Collection[_DocumentType]: """The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating. """ @@ -1230,7 +1232,7 @@ def address(self) -> Optional[Tuple[str, Any]]: return self.__address @property - def session(self) -> Optional["ClientSession"]: + def session(self) -> Optional[ClientSession]: """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. .. versionadded:: 3.6 @@ -1313,7 +1315,7 @@ class RawBatchCursor(Cursor, Generic[_DocumentType]): _query_class = _RawBatchQuery _getmore_class = _RawBatchGetMore - def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None: + def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: """Create a new cursor / iterator over raw batches of BSON data. Should not be called directly by application developers - diff --git a/pymongo/database.py b/pymongo/database.py index 66cfce2090..1fa9913c60 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -13,6 +13,8 @@ # limitations under the License. """Database level operations.""" +from __future__ import annotations + from copy import deepcopy from typing import ( TYPE_CHECKING, @@ -74,10 +76,10 @@ def __init__( self, client: "MongoClient[_DocumentType]", name: str, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, ) -> None: """Get a database by client and name. @@ -154,10 +156,10 @@ def name(self) -> str: def with_options( self, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, ) -> "Database[_DocumentType]": """Get a clone of this database changing the specified settings. @@ -241,10 +243,10 @@ def __getitem__(self, name: str) -> "Collection[_DocumentType]": def get_collection( self, name: str, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, ) -> Collection[_DocumentType]: """Get a :class:`~pymongo.collection.Collection` with the given name and options. @@ -319,11 +321,11 @@ def _get_encrypted_fields(self, kwargs, coll_name, ask_db): def create_collection( self, name: str, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, - session: Optional["ClientSession"] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, check_exists: Optional[bool] = True, **kwargs: Any, ) -> Collection[_DocumentType]: @@ -472,7 +474,7 @@ def create_collection( ) def aggregate( - self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any + self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any ) -> CommandCursor[_DocumentType]: """Perform a database-level aggregation. @@ -557,7 +559,7 @@ def watch( batch_size: Optional[int] = None, collation: Optional[_CollationIn] = None, start_at_operation_time: Optional[Timestamp] = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, start_after: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, full_document_before_change: Optional[str] = None, @@ -720,7 +722,7 @@ def command( allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_preference: Optional[_ServerMode] = None, codec_options: "Optional[bson.codec_options.CodecOptions[_CodecDocumentType]]" = None, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> _CodecDocumentType: @@ -883,7 +885,7 @@ def _list_collections(self, sock_info, session, read_preference, **kwargs): def list_collections( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, filter: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -924,7 +926,7 @@ def _cmd(session, server, sock_info, read_preference): def list_collection_names( self, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, filter: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -989,7 +991,7 @@ def _drop_helper(self, name, session=None, comment=None): def drop_collection( self, name_or_collection: Union[str, Collection[_DocumentTypeArg]], - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None, ) -> Dict[str, Any]: @@ -1069,7 +1071,7 @@ def validate_collection( name_or_collection: Union[str, Collection[_DocumentTypeArg]], scandata: bool = False, full: bool = False, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, background: Optional[bool] = None, comment: Optional[Any] = None, ) -> Dict[str, Any]: @@ -1165,7 +1167,7 @@ def __bool__(self) -> NoReturn: def dereference( self, dbref: DBRef, - session: Optional["ClientSession"] = None, + session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any, ) -> Optional[_DocumentType]: diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 285b082a7d..d6f3ca6835 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -13,6 +13,7 @@ # limitations under the License. """Support for automatic client-side field level encryption.""" +from __future__ import annotations from typing import TYPE_CHECKING, Any, List, Mapping, Optional @@ -38,7 +39,7 @@ def __init__( self, kms_providers: Mapping[str, Any], key_vault_namespace: str, - key_vault_client: Optional["MongoClient"] = None, + key_vault_client: Optional[MongoClient] = None, schema_map: Optional[Mapping[str, Any]] = None, bypass_auto_encryption: bool = False, mongocryptd_uri: str = "mongodb://localhost:27020", diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index ccfaaa31c1..871c4545e5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -30,6 +30,7 @@ >>> c["test-database"] Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') """ +from __future__ import annotations import contextlib import os @@ -1762,7 +1763,7 @@ def _ensure_session(self, session=None): @contextlib.contextmanager def _tmp_session( self, session: Optional[client_session.ClientSession], close: bool = True - ) -> "Generator[Optional[client_session.ClientSession], None, None]": + ) -> Generator[Optional[client_session.ClientSession], None, None]: """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.ClientSession): @@ -1939,10 +1940,10 @@ def drop_database( def get_default_database( self, default: Optional[str] = None, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, + read_concern: Optional[ReadConcern] = None, ) -> database.Database[_DocumentType]: """Get the database named in the MongoDB connection URI. @@ -2000,10 +2001,10 @@ def get_default_database( def get_database( self, name: Optional[str] = None, - codec_options: Optional["bson.CodecOptions[_DocumentTypeArg]"] = None, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, + read_concern: Optional[ReadConcern] = None, ) -> database.Database[_DocumentType]: """Get a :class:`~pymongo.database.Database` with the given name and options. diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 391ca13540..24ac7f06bc 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -183,6 +183,8 @@ def connection_checked_in(self, event): handler first. """ +from __future__ import annotations + import datetime from collections import abc, namedtuple from typing import TYPE_CHECKING, Any, Dict, Optional @@ -1128,8 +1130,8 @@ class ServerDescriptionChangedEvent(_ServerEvent): def __init__( self, - previous_description: "ServerDescription", - new_description: "ServerDescription", + previous_description: ServerDescription, + new_description: ServerDescription, *args: Any, ) -> None: super().__init__(*args) @@ -1137,14 +1139,14 @@ def __init__( self.__new_description = new_description @property - def previous_description(self) -> "ServerDescription": + def previous_description(self) -> ServerDescription: """The previous :class:`~pymongo.server_description.ServerDescription`. """ return self.__previous_description @property - def new_description(self) -> "ServerDescription": + def new_description(self) -> ServerDescription: """The new :class:`~pymongo.server_description.ServerDescription`. """ @@ -1204,8 +1206,8 @@ class TopologyDescriptionChangedEvent(TopologyEvent): def __init__( self, - previous_description: "TopologyDescription", - new_description: "TopologyDescription", + previous_description: TopologyDescription, + new_description: TopologyDescription, *args: Any, ) -> None: super().__init__(*args) @@ -1213,14 +1215,14 @@ def __init__( self.__new_description = new_description @property - def previous_description(self) -> "TopologyDescription": + def previous_description(self) -> TopologyDescription: """The previous :class:`~pymongo.topology_description.TopologyDescription`. """ return self.__previous_description @property - def new_description(self) -> "TopologyDescription": + def new_description(self) -> TopologyDescription: """The new :class:`~pymongo.topology_description.TopologyDescription`. """ diff --git a/test/test_comment.py b/test/test_comment.py index ea44c74257..baac68be58 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -14,9 +14,10 @@ """Test the keyword argument 'comment' in various helpers.""" +from __future__ import annotations + import inspect import sys -from typing import Any, Union sys.path[0:0] = [""] @@ -69,7 +70,7 @@ def _test_ops( "signature of function %s" % (h.__name__), ) self.assertEqual( - inspect.signature(h).parameters["comment"].annotation, Union[Any, None] + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" ) if isinstance(maybe_cursor, CommandCursor): maybe_cursor.close() From 37202c0db1e8c282b62852f10f63e0c127fd88f7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 16 Jun 2023 14:31:26 -0500 Subject: [PATCH 0933/2111] PYTHON-3726 Migrate off of Ubuntu in EG Builds (#1227) --- .evergreen/config.yml | 106 ++++++++++++++++++------------------------ 1 file changed, 45 insertions(+), 61 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c3e8a3d1f3..df126f57a1 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1372,7 +1372,7 @@ tasks: - name: "release-combine" tags: ["release_tag"] - run_on: ubuntu2004-small + run_on: rhel84-small depends_on: - name: "*" variant: ".release_tag" @@ -2218,7 +2218,8 @@ tasks: shell: "bash" script: | ${PREPARE_SHELL} - export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz + export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/run-tests.sh - name: testazurekms-task @@ -2272,10 +2273,10 @@ tasks: set -o errexit ${PREPARE_SHELL} cd src - PYTHON_BINARY= + PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ KEY_NAME='${testazurekms_keyname}' \ KEY_VAULT_ENDPOINT='${testazurekms_keyvaultendpoint}' \ - LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu2004-64/master/latest/libmongocrypt.tar.gz \ + LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz \ SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ ./.evergreen/run-tests.sh @@ -2322,17 +2323,19 @@ axes: skip_web_identity_auth_test: true python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: rhel76 - display_name: "RHEL 7.6" + - id: rhel7 + display_name: "RHEL 7.x" run_on: rhel76-small batchtime: 10080 # 7 days variables: + python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel84 - display_name: "RHEL 8.4" + - id: rhel8 + display_name: "RHEL 8.x" run_on: rhel84-small batchtime: 10080 # 7 days variables: + python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-80-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel80-fips display_name: "RHEL 8.0 FIPS" @@ -2815,9 +2818,9 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-python-version-rhel8.4-test-ssl" +- matrix_name: "tests-python-version-rhel8-test-ssl" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth-ssl: "*" coverage: "*" @@ -2835,14 +2838,14 @@ buildvariants: - matrix_name: "tests-pyopenssl" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth: "*" ssl: "ssl" pyopenssl: "*" # Only test "noauth" with Python 3.7. exclude_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] auth: "noauth" ssl: "ssl" @@ -2874,9 +2877,9 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-rhel84-test-encryption" +- matrix_name: "tests-python-version-rhel8-test-encryption" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth-ssl: noauth-nossl # TODO: dependency error for 'coverage-report' task: @@ -2888,22 +2891,22 @@ buildvariants: rules: - if: encryption: ["encryption", "encryption_crypt_shared"] - platform: rhel84 + platform: rhel8 auth-ssl: noauth-nossl python-version: "*" then: add_tasks: *encryption-server-versions -- matrix_name: "tests-python-version-rhel84-without-c-extensions" +- matrix_name: "tests-python-version-rhel8-without-c-extensions" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" c-extensions: without-c-extensions auth-ssl: noauth-nossl coverage: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: rhel84 + - platform: rhel8 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "*" auth-ssl: "*" @@ -2911,15 +2914,15 @@ buildvariants: display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" tasks: *all-server-versions -- matrix_name: "tests-python-version-rhel84-compression" +- matrix_name: "tests-python-version-rhel8-compression" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" c-extensions: "*" compression: "*" exclude_spec: # These interpreters are always tested without extensions. - - platform: rhel84 + - platform: rhel8 python-version: ["pypy3.7", "pypy3.8"] c-extensions: "with-c-extensions" compression: "*" @@ -2940,36 +2943,21 @@ buildvariants: - "test-4.0-standalone" - "test-3.6-standalone" -- matrix_name: "tests-python-version-green-framework-rhel84" +- matrix_name: "tests-python-version-green-framework-rhel8" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" green-framework: "*" auth-ssl: "*" exclude_spec: # Don't test green frameworks on these Python versions. - - platform: rhel84 - python-version: ["pypy3.7", "pypy3.8", "3.11"] + - platform: rhel8 + python-version: ["pypy3.7", "pypy3.8"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions -- matrix_name: "tests-python-version-green-framework-ubuntu20" - matrix_spec: - platform: ubuntu-20.04 - python-version: ["3.11"] - green-framework: "*" - auth-ssl: "*" - display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" - tasks: - - ".rapid" - - ".latest" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - matrix_name: "tests-windows-python-version" matrix_spec: platform: windows-64-vsMulti-small @@ -2988,7 +2976,7 @@ buildvariants: - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: - platform: rhel76 + platform: rhel7 # Python 3.10+ requires OpenSSL 1.1.1+ python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] auth-ssl: "*" @@ -3016,13 +3004,13 @@ buildvariants: # Storage engine tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "tests-storage-engines" matrix_spec: - platform: rhel84 + platform: rhel8 storage-engine: "*" python-version: 3.7 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: - platform: rhel84 + platform: rhel8 storage-engine: ["inmemory"] python-version: "*" then: @@ -3037,7 +3025,7 @@ buildvariants: - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 - platform: rhel84 + platform: rhel8 storage-engine: ["mmapv1"] python-version: "*" then: @@ -3050,7 +3038,7 @@ buildvariants: # enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.7. - matrix_name: "test-disableTestCommands" matrix_spec: - platform: rhel84 + platform: rhel8 disableTestCommands: "*" python-version: "3.7" display_name: "Disable test commands ${python-version} ${platform}" @@ -3059,7 +3047,7 @@ buildvariants: - matrix_name: "test-linux-enterprise-auth" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version}" @@ -3092,7 +3080,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: 3.7 display_name: "MockupDB Tests" tasks: @@ -3100,7 +3088,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.8"] display_name: "Doctests ${python-version} ${platform}" tasks: @@ -3126,7 +3114,7 @@ buildvariants: - matrix_name: "atlas-connect" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" display_name: "Atlas connect ${python-version} ${platform}" tasks: @@ -3134,7 +3122,7 @@ buildvariants: - matrix_name: "serverless" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: "*" auth-ssl: auth-ssl serverless: "*" @@ -3144,7 +3132,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.7", "3.10"] auth: "auth" c-extensions: "*" @@ -3154,7 +3142,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: - platform: rhel84 + platform: rhel8 python-version: ["3.7", "3.10"] auth: "auth" versionedApi: "*" @@ -3167,7 +3155,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: - platform: ubuntu-20.04 + platform: rhel8 python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] mongodb-version: ["4.4", "5.0", "6.0", "7.0", "latest"] auth: "noauth" @@ -3204,7 +3192,7 @@ buildvariants: - matrix_name: "oidc-auth-test" matrix_spec: - platform: [ rhel84 ] + platform: [ rhel8 ] python-version: ["3.9"] display_name: "MONGODB-OIDC Auth ${platform} ${python-version}" tasks: @@ -3251,7 +3239,7 @@ buildvariants: - matrix_name: "load-balancer" matrix_spec: - platform: rhel84 + platform: rhel8 mongodb-version: ["6.0", "7.0", "rapid", "latest"] auth-ssl: "*" python-version: "*" @@ -3263,7 +3251,7 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - ubuntu2004-small + - debian10-small tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3271,7 +3259,7 @@ buildvariants: - name: testazurekms-variant display_name: "Azure KMS" - run_on: ubuntu2004-small + run_on: debian10-small tasks: - name: testazurekms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README @@ -3286,10 +3274,6 @@ buildvariants: # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available - # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ - # Ubuntu16.04 aarch64 is only supported by MongoDB 3.4+ - # Ubuntu16.04 s390x is only supported by MongoDB 3.4+ - # Ubuntu16.04 (x86) only supports MongoDB 3.2+ # Debian 8.1 only supports MongoDB 3.4+ # SUSE12 s390x is only supported by MongoDB 3.4+ # No enterprise build for Archlinux, SSL not available From 374250d5494ad8221a28f9b2c62f83b1ef16f451 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 16 Jun 2023 13:05:18 -0700 Subject: [PATCH 0934/2111] PYTHON-2963 Add tox config in preparation for migration from setup.py (#1240) --- .github/workflows/test-python.yml | 19 ++++---- tox.ini | 77 +++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 11 deletions(-) create mode 100644 tox.ini diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index b4a8177fda..83def93f57 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -33,13 +33,16 @@ jobs: python-version: ${{ matrix.python-version }} cache: 'pip' cache-dependency-path: 'setup.py' + - name: Install dependencies + run: | + pip install tox - name: Start MongoDB uses: supercharge/mongodb-github-action@1.7.0 with: mongodb-version: 4.4 - name: Run tests run: | - python setup.py test + tox -e test mypytest: name: Run mypy @@ -58,22 +61,16 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip mypy==1.2 - pip install -e ".[zstd, encryption, ocsp]" + pip install tox - name: Run mypy run: | - mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test - python -m pip install -U typing_extensions - mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py + tox -e typecheck-mypy - name: Run pyright run: | - python -m pip install -U pip pyright==1.1.290 - pyright test/test_typing.py test/test_typing_strict.py + tox -e typecheck-pyright - name: Run pyright strict run: | - echo '{"strict": ["tests/test_typing_strict.py"]}' >> pyrightconfig.json - pyright test/test_typing_strict.py + tox -e typecheck-pyright-strict linkcheck: name: Check Links diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..e199914cb5 --- /dev/null +++ b/tox.ini @@ -0,0 +1,77 @@ +[tox] +requires = + tox>=4 +envlist = + # Test using the system Python. + test, + # Run pre-commit on all files. + lint, + # Run pre-commit on all files, including stages that require manual fixes. + lint-manual, + # Typecheck all files. + typecheck + +[testenv:test] +description = run unit tests +commands = + python --version + python setup.py test {posargs} + +[testenv:lint] +description = run pre-commit +deps = + pre-commit +commands = + pre-commit run --all-files + +[testenv:lint-manual] +description = run all pre-commit stages, including those that require manual fixes +deps = + pre-commit +commands = + pre-commit run --all-files --hook-stage manual + +[testenv:typecheck-mypy] +description = run mypy and pyright to typecheck +deps = + mypy + zstandard + certifi; platform_system == "win32" or platform_system == "Darwin" + typing_extensions + pyopenssl>=17.2.0 + requests<3.0.0 + service_identity>=18.1.0 + pymongocrypt>=1.6.0,<2.0.0 + pymongo-auth-aws<2.0.0 +commands = + mypy --install-types --non-interactive bson gridfs tools pymongo + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py + +[testenv:typecheck-pyright] +description = run pyright to typecheck +deps = + mypy + pyright==1.1.290 +commands = + pyright test/test_typing.py test/test_typing_strict.py + +[testenv:typecheck-pyright-strict] +description = run pyright with strict mode to typecheck +deps = + {[testenv:typecheck-pyright]deps} +allowlist_externals=echo +commands = + echo '{"strict": ["tests/test_typing_strict.py"]}' > pyrightconfig.json + pyright test/test_typing_strict.py + +[testenv:typecheck] +description = run mypy and pyright to typecheck +deps = + {[testenv:typecheck-mypy]deps} + {[testenv:typecheck-pyright]deps} +allowlist_externals=echo +commands = + {[testenv:typecheck-mypy]commands} + {[testenv:typecheck-pyright]commands} + {[testenv:typecheck-pyright-strict]commands} From 82d87dc173d0095faa7aef8622387debaca52b2c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 16 Jun 2023 13:30:54 -0700 Subject: [PATCH 0935/2111] PYTHON-3744 Fix utcnow deprecation build regressions (#1244) --- doc/examples/datetimes.rst | 4 ++-- doc/tutorial.rst | 2 +- pymongo/ocsp_cache.py | 12 ++++++++++-- pymongo/ocsp_support.py | 2 +- test/test_ocsp_cache.py | 2 +- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 2dc9c003eb..f9c9fa7a31 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -26,10 +26,10 @@ time into MongoDB: .. doctest:: >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now(tz=timezone.utc)} + ... {"last_modified": datetime.datetime.now(tz=datetime.timezone.utc)} ... ) -Always use :meth:`datetime.datetime.now(tz=timezone.utc)`, which explicitly returns the current time in +Always use :meth:`datetime.datetime.now(tz=datetime.timezone.utc)`, which explicitly returns the current time in UTC, instead of :meth:`datetime.datetime.now`, with no arguments, which returns the current local time. Avoid doing this: diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 768b535fe3..e33936363d 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -109,7 +109,7 @@ post: ... "author": "Mike", ... "text": "My first blog post!", ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.now(tz=timezone.utc), + ... "date": datetime.datetime.now(tz=datetime.timezone.utc), ... } Note that documents can contain native Python types (like diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index b60a24b027..f6ac4bb08c 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -61,7 +61,11 @@ def __setitem__(self, key, value): return # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update): + if not ( + value.this_update + <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) + < value.next_update + ): return # Cache new response OR update cached response if new response @@ -82,7 +86,11 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if value.this_update <= _datetime.now(tz=timezone.utc) < value.next_update: + if ( + value.this_update + <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) + < value.next_update + ): return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index dda92d0d3b..dd070748a4 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -220,7 +220,7 @@ def _verify_response(issuer, response): # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? - now = _datetime.now(tz=timezone.utc) + now = _datetime.now(tz=timezone.utc).replace(tzinfo=None) # RFC6960, Section 3.2, Number 5 if response.this_update > now: _LOGGER.debug("thisUpdate is in the future") diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 3740b6b28a..7fff4fd902 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -61,7 +61,7 @@ def _create_mock_request(self): ) def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): - now = datetime.now(tz=timezone.utc) + now = datetime.now(tz=timezone.utc).replace(tzinfo=None) this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) From bc66d83efc5e57c5685cdd621aa3bf74ba44f2a7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 20 Jun 2023 09:13:23 -0700 Subject: [PATCH 0936/2111] PYTHON-3738 Use tox for sphinx doc instead of setup.py (#1245) --- .github/workflows/test-python.yml | 6 ++---- tox.ini | 35 ++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 83def93f57..d7c442cc49 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -83,9 +83,7 @@ jobs: cache-dependency-path: 'setup.py' - name: Install dependencies run: | - python -m pip install -U pip - python -m pip install sphinx + pip install tox - name: Check links run: | - cd doc - make linkcheck + tox -e linkcheck diff --git a/tox.ini b/tox.ini index e199914cb5..46c1a697de 100644 --- a/tox.ini +++ b/tox.ini @@ -8,8 +8,20 @@ envlist = lint, # Run pre-commit on all files, including stages that require manual fixes. lint-manual, + # Typecheck using mypy. + typecheck-mypy, + # Typecheck using pyright. + typecheck-pyright, + # Typecheck using pyright strict. + typecheck-pyright-strict, # Typecheck all files. - typecheck + typecheck, + # Build sphinx docs + doc, + # Test sphinx docs + doc-test, + # Linkcheck sphinx docs + linkcheck [testenv:test] description = run unit tests @@ -75,3 +87,24 @@ commands = {[testenv:typecheck-mypy]commands} {[testenv:typecheck-pyright]commands} {[testenv:typecheck-pyright-strict]commands} + +[testenv:doc] +description = build sphinx docs +deps = + sphinx +commands = + sphinx-build -E -b html doc ./doc/_build/html + +[testenv:doc-test] +description = run sphinx doc tests +deps = + {[testenv:doc]deps} +commands = + sphinx-build -E -b doctest doc ./doc/_build/doctest + +[testenv:linkcheck] +description = check links of sphinx docs +deps = + {[testenv:doc]deps} +commands = + sphinx-build -E -b linkcheck doc ./doc/_build/linkcheck From 55a9bee81012057e90c47d08f396377c112e47bb Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 20 Jun 2023 23:20:39 -0700 Subject: [PATCH 0937/2111] BUMP 4.4 (#1251) --- doc/api/pymongo/encryption_options.rst | 4 +--- doc/changelog.rst | 27 +++++++++++++++++++------- pymongo/_version.py | 2 +- pymongo/client_options.py | 2 +- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/doc/api/pymongo/encryption_options.rst b/doc/api/pymongo/encryption_options.rst index 08bfc157a9..b8a886ea68 100644 --- a/doc/api/pymongo/encryption_options.rst +++ b/doc/api/pymongo/encryption_options.rst @@ -3,6 +3,4 @@ .. automodule:: pymongo.encryption_options :synopsis: Support for automatic client-side field level encryption - - .. autoclass:: pymongo.encryption_options.AutoEncryptionOpts - :members: + :members: diff --git a/doc/changelog.rst b/doc/changelog.rst index e0e316e5b6..eae105b617 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -5,18 +5,28 @@ Changes in Version 4.4 ----------------------- - Added support for MongoDB 7.0. +- Added support for Python 3.11. - Added support for passing a list containing (key, direction) pairs or keys to :meth:`~pymongo.collection.Collection.create_index`. -- pymongocrypt 1.6.0 or later is now required for client side field level - encryption support. +- Improved bson encoding performance (`PYTHON-3717`_ and `PYTHON-3718`_). - Improved support for Pyright to improve typing support for IDEs like Visual Studio Code or Visual Studio. - Improved support for type-checking with MyPy "strict" mode (`--strict`). -- Added support for Python 3.11. -- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB Server 7.0 introduced a backwards breaking - change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to - MongoDB 7.0+ and PyMongo 4.4+. -- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should use :meth:`datetime.datetime.now(tz=timezone.utc)` and :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. +- Added :meth:`~pymongo.encryption.ClientEncryption.create_encrypted_collection`, + :class:`~pymongo.errors.EncryptedCollectionError`, + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression`, + :class:`~pymongo.encryption_options.RangeOpts`, + and :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW` as part of the experimental + Queryable Encryption beta. +- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB + Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking + advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and + PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and + :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated + in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should + use :meth:`datetime.datetime.now(tz=timezone.utc)` and + :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. .. _in this Github issue: https://github.com/python/cpython/issues/103857 @@ -28,6 +38,9 @@ in this release. .. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 +.. _PYTHON-3717: https://jira.mongodb.org/browse/PYTHON-3717 +.. _PYTHON-3718: https://jira.mongodb.org/browse/PYTHON-3718 + Changes in Version 4.3.3 ------------------------ diff --git a/pymongo/_version.py b/pymongo/_version.py index a5885d8cc5..14ba007944 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0, ".dev1") +version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0) def get_version_string() -> str: diff --git a/pymongo/client_options.py b/pymongo/client_options.py index c9f63dc95a..2e39b843ec 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -267,7 +267,7 @@ def read_concern(self): def timeout(self) -> Optional[float]: """The configured timeoutMS converted to seconds, or None. - ..versionadded: 4.2 + .. versionadded: 4.2 """ return self.__timeout From bafb73cb8c92c07b647c30fdca19032db379f337 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 21 Jun 2023 11:16:35 -0700 Subject: [PATCH 0938/2111] BUMP 4.5.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 14ba007944..db32b1ddb2 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,7 +15,7 @@ """Current version of PyMongo.""" from typing import Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 4, 0) +version_tuple: Tuple[Union[int, str], ...] = (4, 5, 0, ".dev0") def get_version_string() -> str: From b16e06acfda8cf009da74c7706df833627533a74 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 21 Jun 2023 15:07:14 -0700 Subject: [PATCH 0939/2111] PYTHON-3727 Use tox for unit tests and switch to pytest (#1249) --- pytest.ini | 4 +++ test/conftest.py | 10 ++++++++ test/test_cmap.py | 6 ++--- test/test_crud_v1.py | 4 +-- test/test_data_lake.py | 4 +-- test/test_encryption.py | 4 +-- test/test_read_write_concern_spec.py | 4 +-- test/test_retryable_reads.py | 4 +-- test/test_retryable_writes.py | 4 +-- test/test_server_selection_in_window.py | 6 ++--- test/test_transactions.py | 6 ++--- test/utils.py | 2 +- tox.ini | 34 +++++++++++++++++-------- 13 files changed, 59 insertions(+), 33 deletions(-) create mode 100644 pytest.ini create mode 100644 test/conftest.py diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..daf6168964 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +testpaths = + test +norecursedirs = test/* diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..400fd9ed75 --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,10 @@ +from test import setup, teardown + +import pytest + + +@pytest.fixture(scope="session", autouse=True) +def test_setup_and_teardown(): + setup() + yield + teardown() diff --git a/test/test_cmap.py b/test/test_cmap.py index 3b84524f44..1676ed66da 100644 --- a/test/test_cmap.py +++ b/test/test_cmap.py @@ -24,7 +24,7 @@ from test.pymongo_mocks import DummyMonitor from test.utils import ( CMAPListener, - TestCreator, + SpecTestCreator, camel_to_snake, client_context, get_pool, @@ -455,7 +455,7 @@ def run_scenario(self): return run_scenario -class CMAPTestCreator(TestCreator): +class CMAPSpecTestCreator(SpecTestCreator): def tests(self, scenario_def): """Extract the tests from a spec file. @@ -465,7 +465,7 @@ def tests(self, scenario_def): return [scenario_def] -test_creator = CMAPTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) +test_creator = CMAPSpecTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) test_creator.create_tests() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index 589da0a7d7..46aab2fba1 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -21,7 +21,7 @@ from test import IntegrationTest, unittest from test.utils import ( - TestCreator, + SpecTestCreator, camel_to_snake, camel_to_snake_args, camel_to_upper_camel, @@ -171,7 +171,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() diff --git a/test/test_data_lake.py b/test/test_data_lake.py index ce210010bd..868cbe836b 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -23,7 +23,7 @@ from test.crud_v2_format import TestCrudV2 from test.utils import ( OvertCommandListener, - TestCreator, + SpecTestCreator, rs_client_noauth, rs_or_single_client, ) @@ -115,7 +115,7 @@ def run_scenario(self): return run_scenario -TestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() +SpecTestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() if __name__ == "__main__": diff --git a/test/test_encryption.py b/test/test_encryption.py index 0b9087359e..2f61b52ffb 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -50,7 +50,7 @@ from test.utils import ( AllowListEventListener, OvertCommandListener, - TestCreator, + SpecTestCreator, TopologyEventListener, camel_to_snake_args, is_greenthread_patched, @@ -695,7 +695,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) test_creator.create_tests() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 2b39f7d04e..b27e9fa033 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -24,7 +24,7 @@ from test import IntegrationTest, client_context, unittest from test.utils import ( EventListener, - TestCreator, + SpecTestCreator, disable_replication, enable_replication, rs_or_single_client, @@ -337,7 +337,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) +test_creator = SpecTestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) test_creator.create_tests() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 97c51cd44f..df173ac27b 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -31,7 +31,7 @@ from test.utils import ( CMAPListener, OvertCommandListener, - TestCreator, + SpecTestCreator, rs_or_single_client, ) from test.utils_spec_runner import SpecRunner @@ -138,7 +138,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestSpec, _TEST_PATH) test_creator.create_tests() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 32841a8227..89507b33c3 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -28,7 +28,7 @@ DeprecationFilter, EventListener, OvertCommandListener, - TestCreator, + SpecTestCreator, rs_or_single_client, ) from test.utils_spec_runner import SpecRunner @@ -120,7 +120,7 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) +test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) test_creator.create_tests() diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 63769a6457..6c015e0ed2 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -19,7 +19,7 @@ from test import IntegrationTest, client_context, unittest from test.utils import ( OvertCommandListener, - TestCreator, + SpecTestCreator, get_pool, rs_client, wait_until, @@ -76,7 +76,7 @@ def run_scenario(self): return run_scenario -class CustomTestCreator(TestCreator): +class CustomSpecTestCreator(SpecTestCreator): def tests(self, scenario_def): """Extract the tests from a spec file. @@ -86,7 +86,7 @@ def tests(self, scenario_def): return [scenario_def] -CustomTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() class FinderThread(threading.Thread): diff --git a/test/test_transactions.py b/test/test_transactions.py index 9b51927d67..57495b0ab7 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -23,7 +23,7 @@ from test import client_context, unittest from test.utils import ( OvertCommandListener, - TestCreator, + SpecTestCreator, rs_client, single_client, wait_until, @@ -581,11 +581,11 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestTransactions, TEST_PATH) +test_creator = SpecTestCreator(create_test, TestTransactions, TEST_PATH) test_creator.create_tests() -TestCreator( +SpecTestCreator( create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH ).create_tests() diff --git a/test/utils.py b/test/utils.py index 810a02b872..86edae8808 100644 --- a/test/utils.py +++ b/test/utils.py @@ -391,7 +391,7 @@ def call_count(self): return len(self._call_list) -class TestCreator: +class SpecTestCreator: """Class to create test cases from specifications.""" def __init__(self, create_test, test_class, test_path): diff --git a/tox.ini b/tox.ini index 46c1a697de..f6e2f1f755 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,8 @@ requires = envlist = # Test using the system Python. test, + # Test the extra encryption functionality. + test-encryption, # Run pre-commit on all files. lint, # Run pre-commit on all files, including stages that require manual fixes. @@ -24,10 +26,21 @@ envlist = linkcheck [testenv:test] -description = run unit tests +description = run base set of unit tests with no extra functionality +deps = + pytest>=7 commands = python --version - python setup.py test {posargs} + pytest -v -rs {posargs} + +[testenv:test-encryption] +description = run base unit tests with encryption enabled +deps = {[testenv:test]deps} +extras = encryption +commands = + python --version + pytest {posargs} + [testenv:lint] description = run pre-commit @@ -45,25 +58,24 @@ commands = [testenv:typecheck-mypy] description = run mypy and pyright to typecheck +extras = + encryption + ocsp + zstd + aws deps = - mypy - zstandard + mypy==1.2.0 certifi; platform_system == "win32" or platform_system == "Darwin" typing_extensions - pyopenssl>=17.2.0 - requests<3.0.0 - service_identity>=18.1.0 - pymongocrypt>=1.6.0,<2.0.0 - pymongo-auth-aws<2.0.0 commands = mypy --install-types --non-interactive bson gridfs tools pymongo - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" --exclude "test/conftest.py" test mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py [testenv:typecheck-pyright] description = run pyright to typecheck deps = - mypy + mypy==1.2.0 pyright==1.1.290 commands = pyright test/test_typing.py test/test_typing_strict.py From df07641687506054264b065e672f46492b825b41 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 21 Jun 2023 16:25:58 -0700 Subject: [PATCH 0940/2111] PYTHON-2523 Remove unneeded bson-stdint-win32.h (#1253) --- bson/bson-endian.h | 1 - bson/bson-stdint-win32.h | 259 --------------------------------------- 2 files changed, 260 deletions(-) delete mode 100644 bson/bson-stdint-win32.h diff --git a/bson/bson-endian.h b/bson/bson-endian.h index c34a58dde1..e906b0776f 100644 --- a/bson/bson-endian.h +++ b/bson/bson-endian.h @@ -25,7 +25,6 @@ #ifdef _MSC_VER -# include "bson-stdint-win32.h" # define BSON_INLINE __inline #else # include diff --git a/bson/bson-stdint-win32.h b/bson/bson-stdint-win32.h deleted file mode 100644 index cb2acd9384..0000000000 --- a/bson/bson-stdint-win32.h +++ /dev/null @@ -1,259 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2013 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the product nor the names of its contributors may -// be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#if _MSC_VER >= 1600 // [ -#include -#else // ] _MSC_VER >= 1600 [ - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -// These #ifndef's are needed to prevent collisions with . -// Check out Issue 9 for the details. -#ifndef INTMAX_C // [ -# define INTMAX_C INT64_C -#endif // INTMAX_C ] -#ifndef UINTMAX_C // [ -# define UINTMAX_C UINT64_C -#endif // UINTMAX_C ] - -#endif // __STDC_CONSTANT_MACROS ] - -#endif // _MSC_VER >= 1600 ] - -#endif // _MSC_STDINT_H_ ] From e78a91ef2858b666f1bfd8fb4de420074d940af3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 22 Jun 2023 13:00:55 -0500 Subject: [PATCH 0941/2111] PYTHON-2965 Migrate to a PEP517 compliant build system (#1252) --- .evergreen/build-mac.sh | 5 +- .evergreen/build-manylinux-internal.sh | 3 +- .evergreen/build-windows.sh | 3 +- .evergreen/config.yml | 3 +- .evergreen/run-doctests.sh | 4 +- README.rst | 2 +- RELEASE.rst | 2 +- doc/index.rst | 3 +- doc/installation.rst | 4 +- pyproject.toml | 93 ++++++++++++ setup.py | 187 ++----------------------- tox.ini | 3 +- 12 files changed, 120 insertions(+), 192 deletions(-) create mode 100644 pyproject.toml diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh index 60846ae92a..4e8be8cf58 100755 --- a/.evergreen/build-mac.sh +++ b/.evergreen/build-mac.sh @@ -14,9 +14,8 @@ PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 rm -rf build createvirtualenv $PYTHON releasevenv -python -m pip install --upgrade wheel -python -m pip install setuptools==63.2.0 -python setup.py bdist_wheel +python -m pip install build +python -m build --wheel . deactivate || true rm -rf releasevenv diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh index 7c3747f4e2..6f1c58fd86 100755 --- a/.evergreen/build-manylinux-internal.sh +++ b/.evergreen/build-manylinux-internal.sh @@ -16,7 +16,8 @@ for PYTHON in /opt/python/*/bin/python; do fi # https://github.com/pypa/manylinux/issues/49 rm -rf build - $PYTHON setup.py bdist_wheel + $PYTHON -m pip install build + $PYTHON -m build --wheel . rm -rf build # Audit wheels and write manylinux tag diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh index aeb16892b1..8748e5c18f 100755 --- a/.evergreen/build-windows.sh +++ b/.evergreen/build-windows.sh @@ -13,7 +13,8 @@ for VERSION in 37 38 39 310 311; do "C:/Python/32/Python${VERSION}/python.exe") for PYTHON in "${_pythons[@]}"; do rm -rf build - $PYTHON setup.py bdist_wheel + $PYTHON -m pip install build + $PYTHON -m build --wheel . # Test that each wheel is installable. for release in dist/*; do diff --git a/.evergreen/config.yml b/.evergreen/config.yml index df126f57a1..f3c159a1df 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1121,7 +1121,8 @@ functions: done # Build source distribution. cd src/ - /opt/python/3.7/bin/python3 setup.py sdist + /opt/python/3.7/bin/python3 -m pip install build + /opt/python/3.7/bin/python3 -m build --sdist . cp dist/* ../releases - command: archive.targz_pack params: diff --git a/.evergreen/run-doctests.sh b/.evergreen/run-doctests.sh index eebb0f784c..39e5102b6a 100644 --- a/.evergreen/run-doctests.sh +++ b/.evergreen/run-doctests.sh @@ -3,5 +3,5 @@ set -o xtrace set -o errexit -${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} setup.py doc -t +${PYTHON_BINARY} -m pip install tox +${PYTHON_BINARY} -m tox -e doc-test diff --git a/README.rst b/README.rst index 71d47bdc0b..6274e2c9dd 100644 --- a/README.rst +++ b/README.rst @@ -79,7 +79,7 @@ Or ``easy_install`` from You can also download the project source and do:: - $ python setup.py install + $ pip install . Do **not** install the "bson" package from pypi. PyMongo comes with its own bson package; doing "easy_install bson" installs a third-party package that diff --git a/RELEASE.rst b/RELEASE.rst index 4150126f22..caa67d3819 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -36,7 +36,7 @@ Doing a Release To test locally, ``python3 setup.py test`` will build the C extensions and test. ``python3 tools/clean.py`` will remove the extensions, and then ``python3 setup.py --no_ext test`` will run the tests without - them. You can also run the doctests: ``python3 setup.py doc -t``. + them. You can also run the doctests: ``tox -e doc-test``. 2. Check Jira to ensure all the tickets in this version have been completed. diff --git a/doc/index.rst b/doc/index.rst index e474d27d8f..7e357c2a4b 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -102,7 +102,8 @@ following command from the root directory of the **PyMongo** source: .. code-block:: bash - $ python setup.py doc + $ pip install tox + $ tox -e docs Indices and tables ------------------ diff --git a/doc/installation.rst b/doc/installation.rst index c4cbc78d93..8ba21b0f9d 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -89,7 +89,7 @@ latest source from GitHub and install the driver from the resulting tree:: $ git clone https://github.com/mongodb/mongo-python-driver.git pymongo $ cd pymongo/ - $ python3 setup.py install + $ pip install . Installing from source on Unix .............................. @@ -186,7 +186,7 @@ If you wish to install PyMongo without the C extensions, even if the extensions build properly, it can be done using a command line option to *setup.py*:: - $ python3 setup.py --no_ext install + $ NO_EXT=1 python -m pip install . Installing a beta or release candidate -------------------------------------- diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..3ad35c0b43 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,93 @@ +[build-system] +requires = ["setuptools>=63.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "pymongo" +dynamic = ["version"] +description = "Python driver for MongoDB " +readme = "README.rst" +license = {file="LICENSE"} +requires-python = ">=3.7" +authors = [ + { name = "The MongoDB Python Team" }, +] +keywords = [ + "bson", + "gridfs", + "mongo", + "mongodb", + "pymongo", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Database", + "Typing :: Typed", +] +dependencies = [ + "dnspython>=1.16.0,<3.0.0", +] + +[project.optional-dependencies] +aws = [ + "pymongo-auth-aws<2.0.0", +] +encryption = [ + "pymongo[aws]", + "pymongocrypt>=1.6.0,<2.0.0", +] +gssapi = [ + "pykerberos;os.name!='nt'", + "winkerberos>=0.5.0;os.name=='nt'" +] +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +# Fallback to certifi on Windows if we can't load CA certs from the system +# store and just use certifi on macOS. +# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths +ocsp = [ + "certifi;os.name=='nt' or sys_platform=='darwin'", + "pyopenssl>=17.2.0", + "requests<3.0.0", + "service_identity>=18.1.0", +] +snappy = [ + "python-snappy", +] +# PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. +srv = [] +tls = [] +# PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. +zstd = [ + "zstandard", +] + +[project.urls] +Homepage = "http://github.com/mongodb/mongo-python-driver" + +[tool.setuptools.dynamic] +version = {attr = "pymongo._version.__version__"} + +[tool.setuptools.packages.find] +include = ["bson","gridfs", "pymongo"] + +[tool.setuptools.package-data] +bson=["py.typed", "*.pyi"] +pymongo=["py.typed", "*.pyi"] +gridfs=["py.typed", "*.pyi"] diff --git a/setup.py b/setup.py index e570d04c5a..de8f1b4c18 100755 --- a/setup.py +++ b/setup.py @@ -1,69 +1,16 @@ import os -import platform -import re import sys import warnings -if sys.version_info[:3] < (3, 7): - raise RuntimeError("Python version >= 3.7 required.") - - # Hack to silence atexit traceback in some Python versions try: import multiprocessing # noqa: F401 except ImportError: pass -from setuptools import setup - -if sys.version_info[:2] < (3, 10): - from distutils.cmd import Command - from distutils.command.build_ext import build_ext - from distutils.core import Extension -else: - from setuptools import Command - from setuptools.command.build_ext import build_ext - from setuptools.extension import Extension - -_HAVE_SPHINX = True -try: - from sphinx.cmd import build as sphinx -except ImportError: - try: - import sphinx - except ImportError: - _HAVE_SPHINX = False - -version_ns = {} -with open("pymongo/_version.py") as fp: - exec(fp.read(), version_ns) -version = version_ns["__version__"] - -f = open("README.rst") -try: - try: - readme_content = f.read() - except BaseException: - readme_content = "" -finally: - f.close() - -# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple -# ships are built with it. This is a problem starting with Xcode 5.1 -# since clang 3.4 errors out when it encounters unrecognized compiler -# flags. This hack removes -mno-fused-madd from the CFLAGS automatically -# generated by distutils for Apple provided pythons, allowing C extension -# builds to complete without error. The inspiration comes from older -# versions of distutils.sysconfig.get_config_vars. -if sys.platform == "darwin" and "clang" in platform.python_compiler().lower(): - from distutils.sysconfig import get_config_vars - - res = get_config_vars() - for key in ("CFLAGS", "PY_CFLAGS"): - if key in res: - flags = res[key] - flags = re.sub("-mno-fused-madd", "", flags) - res[key] = flags +from setuptools import Command, setup +from setuptools.command.build_ext import build_ext +from setuptools.extension import Extension class test(Command): @@ -126,55 +73,6 @@ def run(self): sys.exit(not result.wasSuccessful()) -class doc(Command): - - description = "generate or test documentation" - - user_options = [("test", "t", "run doctests instead of generating documentation")] - - boolean_options = ["test"] - - def initialize_options(self): - self.test = False - - def finalize_options(self): - pass - - def run(self): - - if not _HAVE_SPHINX: - raise RuntimeError("You must install Sphinx to build or test the documentation.") - - if self.test: - path = os.path.join(os.path.abspath("."), "doc", "_build", "doctest") - mode = "doctest" - else: - path = os.path.join(os.path.abspath("."), "doc", "_build", version) - mode = "html" - - try: - os.makedirs(path) - except BaseException: - pass - - sphinx_args = ["-E", "-b", mode, "doc", path] - - # sphinx.main calls sys.exit when sphinx.build_main exists. - # Call build_main directly so we can check status and print - # the full path to the built docs. - if hasattr(sphinx, "build_main"): - status = sphinx.build_main(sphinx_args) - else: - status = sphinx.main(sphinx_args) - - if status: - raise RuntimeError("documentation step '%s' failed" % (mode,)) - - sys.stdout.write( - "\nDocumentation step '%s' performed, results here:\n %s/\n" % (mode, path) - ) - - class custom_build_ext(build_ext): """Allow C extension building to fail. @@ -272,39 +170,10 @@ def build_extension(self, ext): ), ] -# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced -# a related feature we need. 17.2.0 fixes a bug -# in set_default_verify_paths we should really avoid. -# service_identity 18.1.0 introduced support for IP addr matching. -pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] -if sys.platform in ("win32", "darwin"): - # Fallback to certifi on Windows if we can't load CA certs from the system - # store and just use certifi on macOS. - # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - pyopenssl_reqs.append("certifi") - -aws_reqs = ["pymongo-auth-aws<2.0.0"] - -extras_require = { - "encryption": ["pymongocrypt>=1.6.0,<2.0.0"] + aws_reqs, - "ocsp": pyopenssl_reqs, - "snappy": ["python-snappy"], - "zstd": ["zstandard"], - "aws": aws_reqs, - "srv": [], # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. - "tls": [], # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. -} - -# GSSAPI extras -if sys.platform == "win32": - extras_require["gssapi"] = ["winkerberos>=0.5.0"] -else: - extras_require["gssapi"] = ["pykerberos"] - -extra_opts = {} - -if "--no_ext" in sys.argv: + +if "--no_ext" in sys.argv or "NO_EXT" in os.environ: sys.argv.remove("--no_ext") + ext_modules = [] elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: sys.stdout.write( """ @@ -314,46 +183,8 @@ def build_extension(self, ext): *****************************************************\n """ ) -else: - extra_opts["ext_modules"] = ext_modules + ext_modules = [] setup( - name="pymongo", - version=version, - description="Python driver for MongoDB ", - long_description=readme_content, - author="The MongoDB Python Team", - url="http://github.com/mongodb/mongo-python-driver", - keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=["dnspython>=1.16.0,<3.0.0"], - license="Apache License, Version 2.0", - python_requires=">=3.7", - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database", - "Typing :: Typed", - ], - cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, - extras_require=extras_require, - packages=["bson", "pymongo", "gridfs"], - package_data={ - "bson": ["py.typed", "*.pyi"], - "pymongo": ["py.typed", "*.pyi"], - "gridfs": ["py.typed", "*.pyi"], - }, - **extra_opts -) + cmdclass={"build_ext": custom_build_ext, "test": test}, ext_modules=ext_modules +) # type:ignore diff --git a/tox.ini b/tox.ini index f6e2f1f755..ba53a2011e 100644 --- a/tox.ini +++ b/tox.ini @@ -103,7 +103,7 @@ commands = [testenv:doc] description = build sphinx docs deps = - sphinx + -rdoc/docs-requirements.txt commands = sphinx-build -E -b html doc ./doc/_build/html @@ -111,6 +111,7 @@ commands = description = run sphinx doc tests deps = {[testenv:doc]deps} + gevent commands = sphinx-build -E -b doctest doc ./doc/_build/doctest From 424e6c46fa8ea39904aa1083059b48371af47da5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 22 Jun 2023 14:10:27 -0700 Subject: [PATCH 0942/2111] PYTHON-3762 Remove global code owners (#1256) --- .github/CODEOWNERS | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 3be0c9b0d1..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ -# Global owner for repo -* @blink1073 @NoahStapp @ShaneHarvey From 2a4dc9cb0c0e3ed93ff2002f696690dab6e64dfa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 22 Jun 2023 16:18:55 -0500 Subject: [PATCH 0943/2111] PYTHON-3760 Add C extension building as part of tox test environment (#1255) --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index de8f1b4c18..2c1a52ecaf 100755 --- a/setup.py +++ b/setup.py @@ -126,6 +126,8 @@ def run(self): try: build_ext.run(self) except Exception: + if "TOX_ENV_NAME" in os.environ: + raise e = sys.exc_info()[1] sys.stdout.write("%s\n" % str(e)) warnings.warn( @@ -141,6 +143,8 @@ def build_extension(self, ext): try: build_ext.build_extension(self, ext) except Exception: + if "TOX_ENV_NAME" in os.environ: + raise e = sys.exc_info()[1] sys.stdout.write("%s\n" % str(e)) warnings.warn( From a750098057ffff0d9c9e5c32e14ac6e41946c4d1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:20:33 -0700 Subject: [PATCH 0944/2111] PYTHON-3750 add types to server.py (#1248) --- pymongo/server.py | 67 +++++++++++++++++++++++++++++++++------------ pymongo/topology.py | 2 +- test/test_server.py | 2 +- 3 files changed, 51 insertions(+), 20 deletions(-) diff --git a/pymongo/server.py b/pymongo/server.py index 2eb91c5b5d..840d8b7cb8 100644 --- a/pymongo/server.py +++ b/pymongo/server.py @@ -13,22 +13,42 @@ # permissions and limitations under the License. """Communicate with one MongoDB server in a topology.""" +from __future__ import annotations from datetime import datetime +from typing import TYPE_CHECKING, Any, Callable, List, Mapping, Optional, Tuple, Union from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers import _check_command_response, _handle_reauth -from pymongo.message import _convert_exception, _OpMsg +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query from pymongo.response import PinnedResponse, Response +if TYPE_CHECKING: + from contextlib import _GeneratorContextManager + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.mongo_client import _MongoClientErrorHandler + from pymongo.monitor import Monitor + from pymongo.monitoring import _EventListeners + from pymongo.pool import Pool, SocketInfo + from pymongo.server_description import ServerDescription + _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} class Server: def __init__( - self, server_description, pool, monitor, topology_id=None, listeners=None, events=None - ): + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue]] = None, + ) -> None: """Represent one MongoDB server.""" self._description = server_description self._pool = pool @@ -38,9 +58,9 @@ def __init__( self._listener = listeners self._events = None if self._publish: - self._events = events() + self._events = events() # type: ignore[misc] - def open(self): + def open(self) -> None: """Start monitoring, or restart after a fork. Multiple calls have no effect. @@ -48,11 +68,11 @@ def open(self): if not self._pool.opts.load_balanced: self._monitor.open() - def reset(self, service_id=None): + def reset(self, service_id: Optional[ObjectId] = None) -> None: """Clear the connection pool.""" self.pool.reset(service_id) - def close(self): + def close(self) -> None: """Clear the connection pool and stop the monitor. Reconnect with open(). @@ -69,12 +89,19 @@ def close(self): self._monitor.close() self._pool.reset_without_pause() - def request_check(self): + def request_check(self) -> None: """Check the server's state soon.""" self._monitor.request_check() @_handle_reauth - def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): + def run_operation( + self, + sock_info: SocketInfo, + operation: Union[_Query, _GetMore], + read_preference: bool, + listeners: _EventListeners, + unpack_res: Callable[..., List[Mapping[str, Any]]], + ) -> Response: """Run a _Query or _GetMore operation and return a Response object. This method is used only to run _Query/_GetMore operations from @@ -84,7 +111,7 @@ def run_operation(self, sock_info, operation, read_preference, listeners, unpack :Parameters: - `sock_info`: A SocketInfo instance. - `operation`: A _Query or _GetMore object. - - `set_secondary_okay`: Pass to operation.get_message. + - `read_preference`: The read preference to use. - `listeners`: Instance of _EventListeners or None. - `unpack_res`: A callable that decodes the wire protocol response. """ @@ -215,34 +242,38 @@ def run_operation(self, sock_info, operation, read_preference, listeners, unpack return response - def get_socket(self, handler=None): + def get_socket( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> _GeneratorContextManager[SocketInfo]: return self.pool.get_socket(handler) @property - def description(self): + def description(self) -> ServerDescription: return self._description @description.setter - def description(self, server_description): + def description(self, server_description: ServerDescription) -> None: assert server_description.address == self._description.address self._description = server_description @property - def pool(self): + def pool(self) -> Pool: return self._pool - def _split_message(self, message): + def _split_message( + self, message: Union[Tuple[int, Any], Tuple[int, Any, int]] + ) -> Tuple[int, Any, int]: """Return request_id, data, max_doc_size. :Parameters: - `message`: (request_id, data, max_doc_size) or (request_id, data) """ if len(message) == 3: - return message + return message # type: ignore[return-value] else: # get_more and kill_cursors messages don't include BSON documents. - request_id, data = message + request_id, data = message # type: ignore[misc] return request_id, data, 0 - def __repr__(self): + def __repr__(self) -> str: return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/topology.py b/pymongo/topology.py index 9759b39f9f..0a2eaf9420 100644 --- a/pymongo/topology.py +++ b/pymongo/topology.py @@ -732,7 +732,7 @@ def _update_servers(self): ) weak = None - if self._publish_server: + if self._publish_server and self._events is not None: weak = weakref.ref(self._events) server = Server( server_description=sd, diff --git a/test/test_server.py b/test/test_server.py index 064d77d024..58e39edd7f 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -29,7 +29,7 @@ class TestServer(unittest.TestCase): def test_repr(self): hello = Hello({"ok": 1}) sd = ServerDescription(("localhost", 27017), hello) - server = Server(sd, pool=object(), monitor=object()) + server = Server(sd, pool=object(), monitor=object()) # type: ignore[arg-type] self.assertTrue("Standalone" in str(server)) From eb8013ce026a639339603361e45364c3498a66cb Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:21:04 -0700 Subject: [PATCH 0945/2111] PYTHON-3740 add types to helpers.py (#1246) --- pymongo/cursor.py | 3 +- pymongo/helpers.py | 67 ++++++++++++++++++++++++++++++++----------- pymongo/operations.py | 6 ++-- 3 files changed, 55 insertions(+), 21 deletions(-) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 8d131a711e..a5722c8d08 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -274,6 +274,7 @@ def __init__( self.__show_record_id = show_record_id self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot + self.__hint: Union[str, SON[str, Any], None] self.__set_hint(hint) # Exhaust cursor support @@ -437,7 +438,7 @@ def close(self) -> None: def __query_spec(self): """Get the spec to use for a query.""" - operators = {} + operators: Dict[str, Any] = {} if self.__ordering: operators["$orderby"] = self.__ordering if self.__explain: diff --git a/pymongo/helpers.py b/pymongo/helpers.py index f4582854dc..4b26c36cff 100644 --- a/pymongo/helpers.py +++ b/pymongo/helpers.py @@ -13,11 +13,26 @@ # limitations under the License. """Bits and pieces used by the driver that don't really fit elsewhere.""" +from __future__ import annotations import sys import traceback from collections import abc -from typing import Any, List, NoReturn +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Tuple, + TypeVar, + Union, + cast, +) from bson.son import SON from pymongo import ASCENDING @@ -34,8 +49,12 @@ ) from pymongo.hello import HelloCompat +if TYPE_CHECKING: + from pymongo.cursor import _Hint + from pymongo.operations import _IndexList + # From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES = frozenset( +_SHUTDOWN_CODES: frozenset = frozenset( [ 11600, # InterruptedAtShutdown 91, # ShutdownInProgress @@ -69,15 +88,17 @@ ) # Server code raised when re-authentication is required -_REAUTHENTICATION_REQUIRED_CODE = 391 +_REAUTHENTICATION_REQUIRED_CODE: int = 391 -def _gen_index_name(keys): +def _gen_index_name(keys: _IndexList) -> str: """Generate an index name from the set of fields it is over.""" return "_".join(["{}_{}".format(*item) for item in keys]) -def _index_list(key_or_list, direction=None): +def _index_list( + key_or_list: _Hint, direction: Optional[Union[int, str]] = None +) -> Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]]: """Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction. @@ -93,7 +114,7 @@ def _index_list(key_or_list, direction=None): return list(key_or_list) elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, key_or_list must be an instance of list") - values = [] + values: List[Tuple[str, int]] = [] for item in key_or_list: if isinstance(item, str): item = (item, ASCENDING) @@ -101,7 +122,7 @@ def _index_list(key_or_list, direction=None): return values -def _index_document(index_list): +def _index_document(index_list: _IndexList) -> SON[str, Any]: """Helper to generate an index specifying document. Takes a list of (key, direction) pairs. @@ -134,13 +155,19 @@ def _index_document(index_list): def _check_command_response( - response, max_wire_version, allowable_errors=None, parse_write_concern_error=False -): + response: Mapping[str, Any], + max_wire_version: Optional[int], + allowable_errors: Optional[List[int]] = None, + parse_write_concern_error: bool = False, +) -> None: """Check the response to a command for errors.""" if "ok" not in response: # Server didn't recognize our message as a command. raise OperationFailure( - response.get("$err"), response.get("code"), response, max_wire_version + response.get("$err"), # type: ignore[arg-type] + response.get("code"), + response, + max_wire_version, ) if parse_write_concern_error and "writeConcernError" in response: @@ -210,7 +237,7 @@ def _raise_write_concern_error(error: Any) -> NoReturn: raise WriteConcernError(error.get("errmsg"), error.get("code"), error) -def _get_wce_doc(result): +def _get_wce_doc(result: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: """Return the writeConcernError or None.""" wce = result.get("writeConcernError") if wce: @@ -222,7 +249,7 @@ def _get_wce_doc(result): return wce -def _check_write_command_response(result): +def _check_write_command_response(result: Mapping[str, Any]) -> None: """Backward compatibility helper for write command error handling.""" # Prefer write errors over write concern errors write_errors = result.get("writeErrors") @@ -234,7 +261,9 @@ def _check_write_command_response(result): _raise_write_concern_error(wce) -def _fields_list_to_dict(fields, option_name): +def _fields_list_to_dict( + fields: Union[Mapping[str, Any], Iterable[str]], option_name: str +) -> Mapping[str, Any]: """Takes a sequence of field names and returns a matching dictionary. ["a", "b"] becomes {"a": 1, "b": 1} @@ -254,7 +283,7 @@ def _fields_list_to_dict(fields, option_name): raise TypeError(f"{option_name} must be a mapping or list of key names") -def _handle_exception(): +def _handle_exception() -> None: """Print exceptions raised by subscribers to stderr.""" # Heavily influenced by logging.Handler.handleError. @@ -270,8 +299,12 @@ def _handle_exception(): del einfo -def _handle_reauth(func): - def inner(*args, **kwargs): +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + def inner(*args: Any, **kwargs: Any) -> Any: no_reauth = kwargs.pop("no_reauth", False) from pymongo.pool import SocketInfo @@ -299,4 +332,4 @@ def inner(*args, **kwargs): return func(*args, **kwargs) raise - return inner + return cast(F, inner) diff --git a/pymongo/operations.py b/pymongo/operations.py index 3ff4ed57a3..ed270c1ca6 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -94,7 +94,7 @@ def __init__( validate_is_mapping("filter", filter) if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers._index_document(hint) # type: ignore[assignment] self._filter = filter self._collation = collation self._hint = hint @@ -150,7 +150,7 @@ def __init__( validate_is_mapping("filter", filter) if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers._index_document(hint) # type: ignore[assignment] self._filter = filter self._collation = collation self._hint = hint @@ -213,7 +213,7 @@ def __init__( validate_boolean("upsert", upsert) if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers._index_document(hint) # type: ignore[assignment] self._filter = filter self._doc = replacement From 8b2320440863ccd87b50713a2dafdf0033016e4f Mon Sep 17 00:00:00 2001 From: stephan-hof Date: Mon, 26 Jun 2023 21:20:01 +0200 Subject: [PATCH 0946/2111] PYTHON-3758 Support overflow integers in fallback_encoder. (#1243) bson only supports 64-bit integer within range: [-9_223_372_036_854_775_807, +9_223_372_036_854_775_807] This change calls the fallback_encoder before raising OverflowError on integers outside of this range. --- bson/__init__.py | 13 ++++++++++++- bson/_cbsonmodule.c | 24 ++++++++++++++++-------- doc/contributors.rst | 1 + test/test_custom_types.py | 25 +++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 9 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index d0a8daa273..fd11c9952b 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -896,12 +896,21 @@ def _name_value_to_bson( in_fallback_call: bool = False, ) -> bytes: """Encode a single name, value pair.""" + + was_integer_overflow = False + # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore except KeyError: pass + except OverflowError: + if not isinstance(value, int): + raise + + # Give the fallback_encoder a chance + was_integer_overflow = True # Second, fall back to trying _type_marker. This has to be done # before the loop below since users could subclass one of our @@ -927,7 +936,7 @@ def _name_value_to_bson( # is done after trying the custom type encoder because checking for each # subtype is expensive. for base in _BUILT_IN_TYPES: - if isinstance(value, base): + if not was_integer_overflow and isinstance(value, base): func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func @@ -941,6 +950,8 @@ def _name_value_to_bson( name, fallback_encoder(value), check_keys, opts, in_fallback_call=True ) + if was_integer_overflow: + raise OverflowError("BSON can only handle up to 8-byte ints") raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 5918a678c6..68ea6b63c4 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -662,6 +662,13 @@ static int write_element_to_buffer(PyObject* self, buffer_t buffer, static void _set_cannot_encode(PyObject* value) { + if (PyLong_Check(value)) { + if ((PyLong_AsLongLong(value) == -1) && PyErr_Occurred()) { + return PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + } + } + PyObject* type = NULL; PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument == NULL) { @@ -1069,16 +1076,17 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, long long long_long_value; PyErr_Clear(); long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow AGAIN */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; + if (PyErr_Occurred()) { + /* Ignore error and give the fallback_encoder a chance. */ + PyErr_Clear(); + } else { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; + return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; - return buffer_write_int64(buffer, (int64_t)long_long_value); + } else { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; + return buffer_write_int32(buffer, (int32_t)int_value); } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_int32(buffer, (int32_t)int_value); } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; diff --git a/doc/contributors.rst b/doc/contributors.rst index e6d5e5310d..2a4ca1ea47 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -97,3 +97,4 @@ The following is a list of people who have contributed to - Sean Cheah (thalassemia) - Dainis Gorbunovs (DainisGorbunovs) - Iris Ho (sleepyStick) +- Stephan Hof (stephan-hof) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 14d7b4b05d..7e190483a3 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -274,6 +274,22 @@ def fallback_encoder(value): with self.assertRaises(TypeError): encode(document, codec_options=codecopts) + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): @@ -623,6 +639,15 @@ def setUp(self): def tearDown(self): self.db.test.drop() + def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + collection.insert_one({"_id": 1, "data": 2**520}) + ret = collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + def test_command_errors_w_custom_type_decoder(self): db = self.db test_doc = {"_id": 1, "data": "a"} From 3d2a650cbeaf990b7ca1493ba80c5c9a5fe8e56a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 12:32:05 -0700 Subject: [PATCH 0947/2111] PYTHON-3755 add types to aggregation.py (#1254) --- pymongo/aggregation.py | 80 ++++++++++++++++++++++++++-------------- pymongo/change_stream.py | 1 - 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py index a97455cb29..feac81c7c7 100644 --- a/pymongo/aggregation.py +++ b/pymongo/aggregation.py @@ -13,6 +13,10 @@ # permissions and limitations under the License. """Perform aggregation operations on a collection or database.""" +from __future__ import annotations + +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union from bson.son import SON from pymongo import common @@ -20,6 +24,16 @@ from pymongo.errors import ConfigurationError from pymongo.read_preferences import ReadPreference, _AggWritePref +if TYPE_CHECKING: + from pymongo.client_session import ClientSession + from pymongo.collection import Collection + from pymongo.command_cursor import CommandCursor + from pymongo.database import Database + from pymongo.pool import SocketInfo + from pymongo.read_preferences import _ServerMode + from pymongo.server import Server + from pymongo.typings import _Pipeline + class _AggregationCommand: """The internal abstract base class for aggregation cursors. @@ -31,17 +45,16 @@ class _AggregationCommand: def __init__( self, - target, - cursor_class, - pipeline, - options, - explicit_session, - let=None, - user_fields=None, - result_processor=None, - comment=None, - show_expanded_events=None, - ): + target: Union[Database, Collection], + cursor_class: type[CommandCursor], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + explicit_session: bool, + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], SocketInfo], None]] = None, + comment: Any = None, + ) -> None: if "explain" in options: raise ConfigurationError( "The explain option is not supported. Use Database.command instead." @@ -85,28 +98,31 @@ def __init__( self._collation = validate_collation_or_none(options.pop("collation", None)) self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) - self._write_preference = None + self._write_preference: Optional[_AggWritePref] = None @property - def _aggregation_target(self): + def _aggregation_target(self) -> Union[str, int]: """The argument to pass to the aggregate command.""" raise NotImplementedError @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: """The namespace in which the aggregate command is run.""" raise NotImplementedError - def _cursor_collection(self, cursor_doc): + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" raise NotImplementedError @property - def _database(self): + def _database(self) -> Database: """The database against which the aggregation command is run.""" raise NotImplementedError - def get_read_preference(self, session): + def get_read_preference( + self, session: Optional[ClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + if self._write_preference: return self._write_preference pref = self._target._read_preference_for(session) @@ -114,7 +130,13 @@ def get_read_preference(self, session): self._write_preference = pref = _AggWritePref(pref) return pref - def get_cursor(self, session, server, sock_info, read_preference): + def get_cursor( + self, + session: ClientSession, + server: Server, + sock_info: SocketInfo, + read_preference: _ServerMode, + ) -> CommandCursor: # Serialize command. cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) cmd.update(self._options) @@ -183,25 +205,27 @@ def get_cursor(self, session, server, sock_info, read_preference): class _CollectionAggregationCommand(_AggregationCommand): + _target: Collection + @property - def _aggregation_target(self): + def _aggregation_target(self) -> str: return self._target.name @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: return self._target.full_name - def _cursor_collection(self, cursor): + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" return self._target @property - def _database(self): + def _database(self) -> Database: return self._target.database class _CollectionRawAggregationCommand(_CollectionAggregationCommand): - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) # For raw-batches, we set the initial batchSize for the cursor to 0. @@ -210,19 +234,21 @@ def __init__(self, *args, **kwargs): class _DatabaseAggregationCommand(_AggregationCommand): + _target: Database + @property - def _aggregation_target(self): + def _aggregation_target(self) -> int: return 1 @property - def _cursor_namespace(self): + def _cursor_namespace(self) -> str: return f"{self._target.name}.$cmd.aggregate" @property - def _database(self): + def _database(self) -> Database: return self._target - def _cursor_collection(self, cursor): + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: """The Collection used for the aggregate command cursor.""" # Collection level aggregate may not always return the "ns" field # according to our MockupDB tests. Let's handle that case for db level diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 3a4d968c18..1e2be563b7 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -238,7 +238,6 @@ def _run_aggregation_cmd(self, session, explicit_session): explicit_session, result_processor=self._process_result, comment=self._comment, - show_expanded_events=self._show_expanded_events, ) return self._client._retryable_read( cmd.get_cursor, self._target._read_preference_for(session), session From 940404ad3fec11ab8d86345d7542b86acc7df4a2 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:08:42 -0700 Subject: [PATCH 0948/2111] PYTHON-3771 add types to client_options.py (#1266) --- pymongo/client_options.py | 73 ++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 2e39b843ec..7e5be69283 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -13,25 +13,38 @@ # permissions and limitations under the License. """Tools to parse mongo client options.""" +from __future__ import annotations -from typing import Optional +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Tuple from bson.codec_options import _parse_codec_options from pymongo import common -from pymongo.auth import _build_credentials_tuple +from pymongo.auth import MongoCredential, _build_credentials_tuple from pymongo.common import validate_boolean from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError from pymongo.monitoring import _EventListeners from pymongo.pool import PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.read_preferences import ( + _ServerMode, + make_read_preference, + read_pref_mode_from_name, +) from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from bson.codec_options import CodecOptions + from pymongo.encryption import AutoEncryptionOpts + from pymongo.pyopenssl_context import SSLContext + from pymongo.server_selectors import Selection -def _parse_credentials(username, password, database, options): + +def _parse_credentials( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> Optional[MongoCredential]: """Parse authentication credentials.""" mechanism = options.get("authmechanism", "DEFAULT" if username else None) source = options.get("authsource") @@ -40,7 +53,7 @@ def _parse_credentials(username, password, database, options): return None -def _parse_read_preference(options): +def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: """Parse read preference options.""" if "read_preference" in options: return options["read_preference"] @@ -52,7 +65,7 @@ def _parse_read_preference(options): return make_read_preference(mode, tags, max_staleness) -def _parse_write_concern(options): +def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: """Parse write concern options.""" concern = options.get("w") wtimeout = options.get("wtimeoutms") @@ -61,13 +74,13 @@ def _parse_write_concern(options): return WriteConcern(concern, wtimeout, j, fsync) -def _parse_read_concern(options): +def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: """Parse read concern options.""" concern = options.get("readconcernlevel") return ReadConcern(concern) -def _parse_ssl_options(options): +def _parse_ssl_options(options: Mapping[str, Any]) -> Tuple[Optional[SSLContext], bool]: """Parse ssl options.""" use_tls = options.get("tls") if use_tls is not None: @@ -126,7 +139,9 @@ def _parse_ssl_options(options): return None, allow_invalid_hostnames -def _parse_pool_options(username, password, database, options): +def _parse_pool_options( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> PoolOptions: """Parse connection pool options.""" credentials = _parse_credentials(username, password, database, options) max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) @@ -175,7 +190,9 @@ class ClientOptions: instead. """ - def __init__(self, username, password, database, options): + def __init__( + self, username: str, password: str, database: Optional[str], options: Mapping[str, Any] + ): self.__options = options self.__codec_options = _parse_codec_options(options) self.__direct_connection = options.get("directconnection") @@ -200,66 +217,66 @@ def __init__(self, username, password, database, options): self.__timeout = options.get("timeoutms") @property - def _options(self): + def _options(self) -> Mapping[str, Any]: """The original options used to create this ClientOptions.""" return self.__options @property - def connect(self): + def connect(self) -> Optional[bool]: """Whether to begin discovering a MongoDB topology automatically.""" return self.__connect @property - def codec_options(self): + def codec_options(self) -> CodecOptions: """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options @property - def direct_connection(self): + def direct_connection(self) -> Optional[bool]: """Whether to connect to the deployment in 'Single' topology.""" return self.__direct_connection @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: """The local threshold for this instance.""" return self.__local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: """The server selection timeout for this instance in seconds.""" return self.__server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> Callable[[Selection], Selection]: return self.__server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: """The monitoring frequency in seconds.""" return self.__heartbeat_frequency @property - def pool_options(self): + def pool_options(self) -> PoolOptions: """A :class:`~pymongo.pool.PoolOptions` instance.""" return self.__pool_options @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """A read preference instance.""" return self.__read_preference @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self.__replica_set_name @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """A :class:`~pymongo.write_concern.WriteConcern` instance.""" return self.__write_concern @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern @@ -272,27 +289,27 @@ def timeout(self) -> Optional[float]: return self.__timeout @property - def retry_writes(self): + def retry_writes(self) -> bool: """If this instance should retry supported write operations.""" return self.__retry_writes @property - def retry_reads(self): + def retry_reads(self) -> bool: """If this instance should retry supported read operations.""" return self.__retry_reads @property - def auto_encryption_opts(self): + def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" return self.__auto_encryption_opts @property - def load_balanced(self): + def load_balanced(self) -> Optional[bool]: """True if the client was configured to connect to a load balancer.""" return self.__load_balanced @property - def event_listeners(self): + def event_listeners(self) -> _EventListeners: """The event listeners registered for this client. See :mod:`~pymongo.monitoring` for details. From 1e14e89d0e7f6f09b841adcacef65cd1e5675c44 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:13:07 -0700 Subject: [PATCH 0949/2111] PYTHON-3769 add types to auth_aws.py (#1264) --- pymongo/auth_aws.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py index bfa4c731d3..62aab6a219 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/auth_aws.py @@ -13,6 +13,7 @@ # limitations under the License. """MONGODB-AWS Authentication helpers.""" +from __future__ import annotations try: import pymongo_auth_aws @@ -38,11 +39,18 @@ def set_cached_credentials(creds): pass +from typing import TYPE_CHECKING, Any, Mapping + import bson from bson.binary import Binary from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure +if TYPE_CHECKING: + from bson.typings import _DocumentIn, _ReadableBuffer + from pymongo.auth import MongoCredential + from pymongo.pool import SocketInfo + class _AwsSaslContext(AwsSaslContext): # type: ignore # Dependency injection: @@ -50,16 +58,16 @@ def binary_type(self): """Return the bson.binary.Binary type.""" return Binary - def bson_encode(self, doc): + def bson_encode(self, doc: _DocumentIn) -> bytes: """Encode a dictionary to BSON.""" return bson.encode(doc) - def bson_decode(self, data): + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: """Decode BSON to a dictionary.""" return bson.decode(data) -def _authenticate_aws(credentials, sock_info): +def _authenticate_aws(credentials: MongoCredential, sock_info: SocketInfo) -> None: """Authenticate using MONGODB-AWS.""" if not _HAVE_MONGODB_AWS: raise ConfigurationError( From 5397d74668d50dc450af3b4c108bdedaee952d85 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:13:25 -0700 Subject: [PATCH 0950/2111] PYTHON-3767 add types to ocsp_support.py (#1262) --- pymongo/ocsp_support.py | 129 ++++++++++++++++++++++++++-------------- 1 file changed, 84 insertions(+), 45 deletions(-) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index dd070748a4..fa9bd1b7e6 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -13,11 +13,13 @@ # permissions and limitations under the License. """Support for requesting and verifying OCSP responses.""" +from __future__ import annotations import logging as _logging import re as _re from datetime import datetime as _datetime from datetime import timezone +from typing import TYPE_CHECKING, Iterable, List, Optional, Type, Union, cast from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend @@ -51,6 +53,26 @@ from pymongo import _csot +if TYPE_CHECKING: + from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed448, ed25519, rsa + from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + from cryptography.hazmat.primitives.hashes import HashAlgorithm + from cryptography.x509 import Certificate, Name + from cryptography.x509.extensions import Extension, ExtensionTypeVar + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + from OpenSSL.SSL import Connection + + from pymongo.ocsp_cache import _OCSPCache + from pymongo.pyopenssl_context import _CallbackData + + CertificateIssuerPublicKeyTypes = Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + ] + # Note: the functions in this module generally return 1 or 0. The reason # is simple. The entry point, ocsp_callback, is registered as a callback # with OpenSSL through PyOpenSSL. The callback must return 1 (success) or @@ -63,7 +85,7 @@ ) -def _load_trusted_ca_certs(cafile): +def _load_trusted_ca_certs(cafile: str) -> List[Certificate]: """Parse the tlsCAFile into a list of certificates.""" with open(cafile, "rb") as f: data = f.read() @@ -76,7 +98,9 @@ def _load_trusted_ca_certs(cafile): return trusted_ca_certs -def _get_issuer_cert(cert, chain, trusted_ca_certs): +def _get_issuer_cert( + cert: Certificate, chain: Iterable[Certificate], trusted_ca_certs: Optional[List[Certificate]] +) -> Optional[Certificate]: issuer_name = cert.issuer for candidate in chain: if candidate.subject == issuer_name: @@ -93,16 +117,21 @@ def _get_issuer_cert(cert, chain, trusted_ca_certs): return None -def _verify_signature(key, signature, algorithm, data): +def _verify_signature( + key: CertificateIssuerPublicKeyTypes, + signature: bytes, + algorithm: Union[Prehashed, HashAlgorithm, None], + data: bytes, +) -> int: # See cryptography.x509.Certificate.public_key # for the public key types. try: if isinstance(key, _RSAPublicKey): - key.verify(signature, data, _PKCS1v15(), algorithm) + key.verify(signature, data, _PKCS1v15(), algorithm) # type: ignore[arg-type] elif isinstance(key, _DSAPublicKey): - key.verify(signature, data, algorithm) + key.verify(signature, data, algorithm) # type: ignore[arg-type] elif isinstance(key, _EllipticCurvePublicKey): - key.verify(signature, data, _ECDSA(algorithm)) + key.verify(signature, data, _ECDSA(algorithm)) # type: ignore[arg-type] else: key.verify(signature, data) except _InvalidSignature: @@ -110,14 +139,16 @@ def _verify_signature(key, signature, algorithm, data): return 1 -def _get_extension(cert, klass): +def _get_extension( + cert: Certificate, klass: Type[ExtensionTypeVar] +) -> Optional[Extension[ExtensionTypeVar]]: try: return cert.extensions.get_extension_for_class(klass) except _ExtensionNotFound: return None -def _public_key_hash(cert): +def _public_key_hash(cert: Certificate) -> bytes: public_key = cert.public_key() # https://tools.ietf.org/html/rfc2560#section-4.2.1 # "KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key @@ -134,7 +165,9 @@ def _public_key_hash(cert): return digest.finalize() -def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): +def _get_certs_by_key_hash( + certificates: Iterable[Certificate], issuer: Certificate, responder_key_hash: Optional[bytes] +) -> List[Certificate]: return [ cert for cert in certificates @@ -142,7 +175,9 @@ def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): ] -def _get_certs_by_name(certificates, issuer, responder_name): +def _get_certs_by_name( + certificates: Iterable[Certificate], issuer: Certificate, responder_name: Optional[Name] +) -> List[Certificate]: return [ cert for cert in certificates @@ -150,7 +185,7 @@ def _get_certs_by_name(certificates, issuer, responder_name): ] -def _verify_response_signature(issuer, response): +def _verify_response_signature(issuer: Certificate, response: OCSPResponse) -> int: # Response object will have a responder_name or responder_key_hash # not both. name = response.responder_name @@ -185,7 +220,7 @@ def _verify_response_signature(issuer, response): _LOGGER.debug("Delegate not authorized for OCSP signing") return 0 if not _verify_signature( - issuer.public_key(), + cast(CertificateIssuerPublicKeyTypes, issuer.public_key()), responder_cert.signature, responder_cert.signature_hash_algorithm, responder_cert.tbs_certificate_bytes, @@ -194,7 +229,7 @@ def _verify_response_signature(issuer, response): return 0 # RFC6960, Section 3.2, Number 2 ret = _verify_signature( - responder_cert.public_key(), + cast(CertificateIssuerPublicKeyTypes, responder_cert.public_key()), response.signature, response.signature_hash_algorithm, response.tbs_response_bytes, @@ -204,14 +239,14 @@ def _verify_response_signature(issuer, response): return ret -def _build_ocsp_request(cert, issuer): +def _build_ocsp_request(cert: Certificate, issuer: Certificate) -> OCSPRequest: # https://cryptography.io/en/latest/x509/ocsp/#creating-requests builder = _OCSPRequestBuilder() builder = builder.add_certificate(cert, issuer, _SHA1()) return builder.build() -def _verify_response(issuer, response): +def _verify_response(issuer: Certificate, response: OCSPResponse) -> int: _LOGGER.debug("Verifying response") # RFC6960, Section 3.2, Number 2, 3 and 4 happen here. res = _verify_response_signature(issuer, response) @@ -232,7 +267,9 @@ def _verify_response(issuer, response): return 1 -def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): +def _get_ocsp_response( + cert: Certificate, issuer: Certificate, uri: Union[str, bytes], ocsp_response_cache: _OCSPCache +) -> Optional[OCSPResponse]: ocsp_request = _build_ocsp_request(cert, issuer) try: ocsp_response = ocsp_response_cache[ocsp_request] @@ -275,30 +312,32 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): return ocsp_response -def _ocsp_callback(conn, ocsp_bytes, user_data): +def _ocsp_callback(conn: Connection, ocsp_bytes: bytes, user_data: Optional[_CallbackData]) -> bool: """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" - cert = conn.get_peer_certificate() - if cert is None: + # always pass in user_data but OpenSSL requires it be optional + assert user_data + pycert = conn.get_peer_certificate() + if pycert is None: _LOGGER.debug("No peer cert?") - return 0 - cert = cert.to_cryptography() + return False + cert = pycert.to_cryptography() # Use the verified chain when available (pyopenssl>=20.0). if hasattr(conn, "get_verified_chain"): - chain = conn.get_verified_chain() + pychain = conn.get_verified_chain() trusted_ca_certs = None else: - chain = conn.get_peer_cert_chain() + pychain = conn.get_peer_cert_chain() trusted_ca_certs = user_data.trusted_ca_certs - if not chain: + if not pychain: _LOGGER.debug("No peer cert chain?") - return 0 - chain = [cer.to_cryptography() for cer in chain] + return False + chain = [cer.to_cryptography() for cer in pychain] issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 - ext = _get_extension(cert, _TLSFeature) - if ext is not None: - for feature in ext.value: + ext_tls = _get_extension(cert, _TLSFeature) + if ext_tls is not None: + for feature in ext_tls.value: if feature == _TLSFeatureType.status_request: _LOGGER.debug("Peer presented a must-staple cert") must_staple = True @@ -310,29 +349,29 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("Peer did not staple an OCSP response") if must_staple: _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") - return 0 + return False if not user_data.check_ocsp_endpoint: _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") # No stapled OCSP response, checking responder URI disabled, soft fail. - return 1 + return True # https://tools.ietf.org/html/rfc6960#section-3.1 - ext = _get_extension(cert, _AuthorityInformationAccess) - if ext is None: + ext_aia = _get_extension(cert, _AuthorityInformationAccess) + if ext_aia is None: _LOGGER.debug("No authority access information, soft fail") # No stapled OCSP response, no responder URI, soft fail. - return 1 + return True uris = [ desc.access_location.value - for desc in ext.value + for desc in ext_aia.value if desc.access_method == _AuthorityInformationAccessOID.OCSP ] if not uris: _LOGGER.debug("No OCSP URI, soft fail") # No responder URI, soft fail. - return 1 + return True if issuer is None: _LOGGER.debug("No issuer cert?") - return 0 + return False _LOGGER.debug("Requesting OCSP data") # When requesting data from an OCSP endpoint we only fail on # successful, valid responses with a certificate status of REVOKED. @@ -346,28 +385,28 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): continue _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.GOOD: - return 1 + return True if response.certificate_status == _OCSPCertStatus.REVOKED: - return 0 + return False # Soft fail if we couldn't get a definitive status. _LOGGER.debug("No definitive OCSP cert status, soft fail") - return 1 + return True _LOGGER.debug("Peer stapled an OCSP response") if issuer is None: _LOGGER.debug("No issuer cert?") - return 0 + return False response = _load_der_ocsp_response(ocsp_bytes) _LOGGER.debug("OCSP response status: %r", response.response_status) # This happens in _request_ocsp when there is no stapled response so # we know if we can compare serial numbers for the request and response. if response.response_status != _OCSPResponseStatus.SUCCESSFUL: - return 0 + return False if not _verify_response(issuer, response): - return 0 + return False # Cache the verified, stapled response. ocsp_response_cache[_build_ocsp_request(cert, issuer)] = response _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.REVOKED: - return 0 - return 1 + return False + return True From 1f7cf0941d061db15661f1e77c92859fc2f7fd28 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:13:58 -0700 Subject: [PATCH 0951/2111] PYTHON-3772 add types to change_stream.py (#1267) --- pymongo/change_stream.py | 46 ++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 1e2be563b7..10bfd36236 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -16,13 +16,24 @@ from __future__ import annotations import copy -from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Mapping, + Optional, + Type, + Union, +) from bson import _bson_to_dict from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp from pymongo import _csot, common from pymongo.aggregation import ( + _AggregationCommand, _CollectionAggregationCommand, _DatabaseAggregationCommand, ) @@ -67,6 +78,7 @@ from pymongo.collection import Collection from pymongo.database import Database from pymongo.mongo_client import MongoClient + from pymongo.pool import SocketInfo def _resumable(exc: PyMongoError) -> bool: @@ -150,18 +162,18 @@ def __init__( self._cursor = self._create_cursor() @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_AggregationCommand]: """The aggregation command class to be used.""" raise NotImplementedError @property - def _client(self): + def _client(self) -> MongoClient: """The client against which the aggregation commands for this ChangeStream will be run. """ raise NotImplementedError - def _change_stream_options(self): + def _change_stream_options(self) -> Dict[str, Any]: """Return the options dict for the $changeStream pipeline stage.""" options: Dict[str, Any] = {} if self._full_document is not None: @@ -185,7 +197,7 @@ def _change_stream_options(self): return options - def _command_options(self): + def _command_options(self) -> Dict[str, Any]: """Return the options dict for the aggregation command.""" options = {} if self._max_await_time_ms is not None: @@ -194,14 +206,14 @@ def _command_options(self): options["batchSize"] = self._batch_size return options - def _aggregation_pipeline(self): + def _aggregation_pipeline(self) -> List[Dict[str, Any]]: """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() full_pipeline: list = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline - def _process_result(self, result, sock_info): + def _process_result(self, result: Mapping[str, Any], sock_info: SocketInfo) -> None: """Callback that caches the postBatchResumeToken or startAtOperationTime from a changeStream aggregate command response containing an empty batch of change documents. @@ -226,7 +238,9 @@ def _process_result(self, result, sock_info): "response : {!r}".format(result) ) - def _run_aggregation_cmd(self, session, explicit_session): + def _run_aggregation_cmd( + self, session: Optional[ClientSession], explicit_session: bool + ) -> CommandCursor: """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ @@ -247,7 +261,7 @@ def _create_cursor(self): with self._client._tmp_session(self._session, close=False) as s: return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) - def _resume(self): + def _resume(self) -> None: """Reestablish this change stream after a resumable error.""" try: self._cursor.close() @@ -437,12 +451,14 @@ class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + _target: Collection[_DocumentType] + @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: return _CollectionAggregationCommand @property - def _client(self): + def _client(self) -> MongoClient: return self._target.database.client @@ -455,12 +471,14 @@ class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ + _target: Database[_DocumentType] + @property - def _aggregation_command_class(self): + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: return _DatabaseAggregationCommand @property - def _client(self): + def _client(self) -> MongoClient: return self._target.client @@ -473,7 +491,7 @@ class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): .. versionadded:: 3.7 """ - def _change_stream_options(self): + def _change_stream_options(self) -> Dict[str, Any]: options = super()._change_stream_options() options["allChangesForCluster"] = True return options From 70666a65cc5ccf4c0f707fd5e6c121a6c0866582 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:15:38 -0700 Subject: [PATCH 0952/2111] PYTHON-3766 add types to ocsp_cache.py (#1261) --- pymongo/ocsp_cache.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index f6ac4bb08c..b0ac4d654f 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -14,12 +14,18 @@ """Utilities for caching OCSP responses.""" +from __future__ import annotations + from collections import namedtuple from datetime import datetime as _datetime from datetime import timezone +from typing import TYPE_CHECKING from pymongo.lock import _create_lock +if TYPE_CHECKING: + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + class _OCSPCache: """A cache for OCSP responses.""" @@ -34,7 +40,7 @@ def __init__(self): # Hold this lock when accessing _data. self._lock = _create_lock() - def _get_cache_key(self, ocsp_request): + def _get_cache_key(self, ocsp_request: OCSPRequest) -> CACHE_KEY_TYPE: return self.CACHE_KEY_TYPE( hash_algorithm=ocsp_request.hash_algorithm.name.lower(), issuer_name_hash=ocsp_request.issuer_name_hash, @@ -42,7 +48,7 @@ def _get_cache_key(self, ocsp_request): serial_number=ocsp_request.serial_number, ) - def __setitem__(self, key, value): + def __setitem__(self, key: OCSPRequest, value: OCSPResponse) -> None: """Add/update a cache entry. 'key' is of type cryptography.x509.ocsp.OCSPRequest @@ -74,7 +80,7 @@ def __setitem__(self, key, value): if cached_value is None or cached_value.next_update < value.next_update: self._data[cache_key] = value - def __getitem__(self, item): + def __getitem__(self, item: OCSPRequest) -> OCSPResponse: """Get a cache entry if it exists. 'item' is of type cryptography.x509.ocsp.OCSPRequest From 386f6d8b7f6979d1360ef4dad7964ec18c8a4fea Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:15:58 -0700 Subject: [PATCH 0953/2111] PYTHON-3765 add types to server_api.py (#1260) --- pymongo/server_api.py | 15 ++++++++++----- test/test_versioned_api.py | 4 ++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 2393615032..47812818de 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -83,6 +83,9 @@ Classes ======= """ +from __future__ import annotations + +from typing import Any, MutableMapping, Optional class ServerApiVersion: @@ -98,7 +101,9 @@ class ServerApiVersion: class ServerApi: """MongoDB Stable API.""" - def __init__(self, version, strict=None, deprecation_errors=None): + def __init__( + self, version: str, strict: Optional[bool] = None, deprecation_errors: Optional[bool] = None + ): """Options to configure MongoDB Stable API. :Parameters: @@ -129,7 +134,7 @@ def __init__(self, version, strict=None, deprecation_errors=None): self._deprecation_errors = deprecation_errors @property - def version(self): + def version(self) -> str: """The API version setting. This value is sent to the server in the "apiVersion" field. @@ -137,7 +142,7 @@ def version(self): return self._version @property - def strict(self): + def strict(self) -> Optional[bool]: """The API strict mode setting. When set, this value is sent to the server in the "apiStrict" field. @@ -145,7 +150,7 @@ def strict(self): return self._strict @property - def deprecation_errors(self): + def deprecation_errors(self) -> Optional[bool]: """The API deprecation errors setting. When set, this value is sent to the server in the @@ -154,7 +159,7 @@ def deprecation_errors(self): return self._deprecation_errors -def _add_to_command(cmd, server_api): +def _add_to_command(cmd: MutableMapping[str, Any], server_api: Optional[ServerApi]) -> None: """Internal helper which adds API versioning options to a command. :Parameters: diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 7dbf2c867d..3372c1a919 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -56,9 +56,9 @@ def test_server_api_validation(self): with self.assertRaises(ValueError): ServerApi("2") with self.assertRaises(TypeError): - ServerApi("1", strict="not-a-bool") + ServerApi("1", strict="not-a-bool") # type: ignore[arg-type] with self.assertRaises(TypeError): - ServerApi("1", deprecation_errors="not-a-bool") + ServerApi("1", deprecation_errors="not-a-bool") # type: ignore[arg-type] with self.assertRaises(TypeError): MongoClient(server_api="not-a-ServerApi") From 5c3cfa784830d5a06e129c5d6e074c1799a44381 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:16:20 -0700 Subject: [PATCH 0954/2111] PYTHON-3764 add types to settings.py (#1259) --- pymongo/settings.py | 79 ++++++++++++++++++++++--------------------- test/test_topology.py | 6 ++-- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/pymongo/settings.py b/pymongo/settings.py index 5d6ddefd36..3436fcad6b 100644 --- a/pymongo/settings.py +++ b/pymongo/settings.py @@ -16,34 +16,35 @@ import threading import traceback +from typing import Any, Collection, Dict, Optional, Tuple, Type, Union from bson.objectid import ObjectId from pymongo import common, monitor, pool from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT from pymongo.errors import ConfigurationError -from pymongo.pool import PoolOptions +from pymongo.pool import Pool, PoolOptions from pymongo.server_description import ServerDescription -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector class TopologySettings: def __init__( self, - seeds=None, - replica_set_name=None, - pool_class=None, - pool_options=None, - monitor_class=None, - condition_class=None, - local_threshold_ms=LOCAL_THRESHOLD_MS, - server_selection_timeout=SERVER_SELECTION_TIMEOUT, - heartbeat_frequency=common.HEARTBEAT_FREQUENCY, - server_selector=None, - fqdn=None, - direct_connection=False, - load_balanced=None, - srv_service_name=common.SRV_SERVICE_NAME, - srv_max_hosts=0, + seeds: Optional[Collection[Tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, ): """Represent MongoClient's configuration. @@ -55,12 +56,12 @@ def __init__( % (common.MIN_HEARTBEAT_INTERVAL * 1000,) ) - self._seeds = seeds or [("localhost", 27017)] + self._seeds: Collection[Tuple[str, int]] = seeds or [("localhost", 27017)] self._replica_set_name = replica_set_name - self._pool_class = pool_class or pool.Pool - self._pool_options = pool_options or PoolOptions() - self._monitor_class = monitor_class or monitor.Monitor - self._condition_class = condition_class or threading.Condition + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition self._local_threshold_ms = local_threshold_ms self._server_selection_timeout = server_selection_timeout self._server_selector = server_selector @@ -77,52 +78,52 @@ def __init__( self._stack = "".join(traceback.format_stack()) @property - def seeds(self): + def seeds(self) -> Collection[Tuple[str, int]]: """List of server addresses.""" return self._seeds @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: return self._replica_set_name @property - def pool_class(self): + def pool_class(self) -> Type[Pool]: return self._pool_class @property - def pool_options(self): + def pool_options(self) -> PoolOptions: return self._pool_options @property - def monitor_class(self): + def monitor_class(self) -> Optional[Type[monitor.Monitor]]: return self._monitor_class @property - def condition_class(self): + def condition_class(self) -> Optional[Type[threading.Condition]]: return self._condition_class @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: return self._local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: return self._server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> Optional[_ServerSelector]: return self._server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self._heartbeat_frequency @property - def fqdn(self): + def fqdn(self) -> Optional[str]: return self._fqdn @property - def direct(self): + def direct(self) -> Optional[bool]: """Connect directly to a single server, or use a set of servers? True if there is one seed and no replica_set_name. @@ -130,21 +131,21 @@ def direct(self): return self._direct @property - def load_balanced(self): + def load_balanced(self) -> Optional[bool]: """True if the client was configured to connect to a load balancer.""" return self._load_balanced @property - def srv_service_name(self): + def srv_service_name(self) -> str: """The srvServiceName.""" return self._srv_service_name @property - def srv_max_hosts(self): + def srv_max_hosts(self) -> int: """The srvMaxHosts.""" return self._srv_max_hosts - def get_topology_type(self): + def get_topology_type(self) -> int: if self.load_balanced: return TOPOLOGY_TYPE.LoadBalanced elif self.direct: @@ -154,6 +155,6 @@ def get_topology_type(self): else: return TOPOLOGY_TYPE.Unknown - def get_server_descriptions(self): + def get_server_descriptions(self) -> Dict[Union[Tuple[str, int], Any], ServerDescription]: """Initial dict of (address, ServerDescription) for all seeds.""" return {address: ServerDescription(address) for address in self.seeds} diff --git a/test/test_topology.py b/test/test_topology.py index adbf19f571..a7bfeb766e 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -52,7 +52,7 @@ def create_mock_topology( topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, - pool_class=MockPool, + pool_class=MockPool, # type: ignore[arg-type] monitor_class=monitor_class, direct_connection=direct_connection, ) @@ -451,7 +451,7 @@ def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] ) t = Topology(topology_settings) @@ -479,7 +479,7 @@ def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor # type: ignore[arg-type] ) t = Topology(topology_settings) From de61d1ac8f6a812d42778b6c5b5745bfda6d0e69 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:17:47 -0700 Subject: [PATCH 0955/2111] PYTHON-3752 add types to bulk.py (#1250) --- pymongo/bulk.py | 180 +++++++++++++++++++++++++++++------------- pymongo/operations.py | 26 ++++-- 2 files changed, 145 insertions(+), 61 deletions(-) diff --git a/pymongo/bulk.py b/pymongo/bulk.py index b0f61b9f9f..49c355e34f 100644 --- a/pymongo/bulk.py +++ b/pymongo/bulk.py @@ -16,16 +16,30 @@ .. versionadded:: 2.7 """ +from __future__ import annotations + import copy +from collections.abc import MutableMapping from itertools import islice -from typing import Any, NoReturn +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + List, + Mapping, + NoReturn, + Optional, + Tuple, + Type, + Union, +) from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo import _csot, common -from pymongo.client_session import _validate_session_write_concern -from pymongo.collation import validate_collation_or_none +from pymongo.client_session import ClientSession, _validate_session_write_concern from pymongo.common import ( validate_is_document_type, validate_ok_for_replace, @@ -49,28 +63,34 @@ from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern -_DELETE_ALL = 0 -_DELETE_ONE = 1 +if TYPE_CHECKING: + from pymongo.collection import Collection + from pymongo.operations import _IndexKeyHint + from pymongo.pool import SocketInfo + from pymongo.typings import _DocumentType + +_DELETE_ALL: int = 0 +_DELETE_ONE: int = 1 # For backwards compatibility. See MongoDB src/mongo/base/error_codes.err -_BAD_VALUE = 2 -_UNKNOWN_ERROR = 8 -_WRITE_CONCERN_ERROR = 64 +_BAD_VALUE: int = 2 +_UNKNOWN_ERROR: int = 8 +_WRITE_CONCERN_ERROR: int = 64 -_COMMANDS = ("insert", "update", "delete") +_COMMANDS: Tuple[str, str, str] = ("insert", "update", "delete") class _Run: """Represents a batch of write operations.""" - def __init__(self, op_type): + def __init__(self, op_type: int) -> None: """Initialize a new Run object.""" - self.op_type = op_type - self.index_map = [] - self.ops = [] - self.idx_offset = 0 + self.op_type: int = op_type + self.index_map: List[int] = [] + self.ops: List[Any] = [] + self.idx_offset: int = 0 - def index(self, idx): + def index(self, idx: int) -> int: """Get the original index of an operation in this run. :Parameters: @@ -78,7 +98,7 @@ def index(self, idx): """ return self.index_map[idx] - def add(self, original_index, operation): + def add(self, original_index: int, operation: Any) -> None: """Add an operation to this Run instance. :Parameters: @@ -90,7 +110,12 @@ def add(self, original_index, operation): self.ops.append(operation) -def _merge_command(run, full_result, offset, result): +def _merge_command( + run: _Run, + full_result: MutableMapping[str, Any], + offset: int, + result: Mapping[str, Any], +) -> None: """Merge a write command result into the full bulk result.""" affected = result.get("n", 0) @@ -129,7 +154,7 @@ def _merge_command(run, full_result, offset, result): full_result["writeConcernErrors"].append(wce) -def _raise_bulk_write_error(full_result: Any) -> NoReturn: +def _raise_bulk_write_error(full_result: Mapping[str, Any]) -> NoReturn: """Raise a BulkWriteError from the full bulk api result.""" if full_result["writeErrors"]: full_result["writeErrors"].sort(key=lambda error: error["index"]) @@ -139,7 +164,14 @@ def _raise_bulk_write_error(full_result: Any) -> NoReturn: class _Bulk: """The private guts of the bulk write API.""" - def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): + def __init__( + self, + collection: Collection[_DocumentType], + ordered: bool, + bypass_document_validation: bool, + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: """Initialize a _Bulk instance.""" self.collection = collection.with_options( codec_options=collection.codec_options._replace( @@ -149,9 +181,9 @@ def __init__(self, collection, ordered, bypass_document_validation, comment=None self.let = let if self.let is not None: common.validate_is_document_type("let", self.let) - self.comment = comment + self.comment: Optional[str] = comment self.ordered = ordered - self.ops = [] + self.ops: List[Tuple[int, Mapping[str, Any]]] = [] self.executed = False self.bypass_doc_val = bypass_document_validation self.uses_collation = False @@ -166,14 +198,14 @@ def __init__(self, collection, ordered, bypass_document_validation, comment=None self.next_run = None @property - def bulk_ctx_class(self): + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: encrypter = self.collection.database.client._encrypter if encrypter and not encrypter._bypass_auto_encryption: return _EncryptedBulkWriteContext else: return _BulkWriteContext - def add_insert(self, document): + def add_insert(self, document: MutableMapping[str, Any]) -> None: """Add an insert document to the list of ops.""" validate_is_document_type("document", document) # Generate ObjectId client side. @@ -183,18 +215,22 @@ def add_insert(self, document): def add_update( self, - selector, - update, - multi=False, - upsert=False, - collation=None, - array_filters=None, - hint=None, - ): + selector: Mapping[str, Any], + update: Union[ + Mapping[str, Any], + List[Mapping[str, Any]], + ], + multi: bool = False, + upsert: bool = False, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[List[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd = SON([("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)]) - collation = validate_collation_or_none(collation) + cmd: Dict[str, Any] = dict( + [("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)] + ) if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -209,11 +245,17 @@ def add_update( self.is_retryable = False self.ops.append((_UPDATE, cmd)) - def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None): + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + collation: Optional[Mapping[str, Any]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) - collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -222,10 +264,15 @@ def add_replace(self, selector, replacement, upsert=False, collation=None, hint= cmd["hint"] = hint self.ops.append((_UPDATE, cmd)) - def add_delete(self, selector, limit, collation=None, hint=None): + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: """Create a delete document and add it to the list of ops.""" cmd = SON([("q", selector), ("limit", limit)]) - collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -237,7 +284,7 @@ def add_delete(self, selector, limit, collation=None, hint=None): self.is_retryable = False self.ops.append((_DELETE, cmd)) - def gen_ordered(self): + def gen_ordered(self) -> Iterator[Optional[_Run]]: """Generate batches of operations, batched by type of operation, in the order **provided**. """ @@ -251,7 +298,7 @@ def gen_ordered(self): run.add(idx, operation) yield run - def gen_unordered(self): + def gen_unordered(self) -> Iterator[_Run]: """Generate batches of operations, batched by type of operation, in arbitrary order. """ @@ -265,15 +312,15 @@ def gen_unordered(self): def _execute_command( self, - generator, - write_concern, - session, - sock_info, - op_id, - retryable, - full_result, - final_write_concern=None, - ): + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + sock_info: SocketInfo, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: db_name = self.collection.database.name client = self.collection.database.client listeners = client._event_listeners @@ -366,7 +413,12 @@ def _execute_command( # Reset our state self.current_run = run = self.next_run - def execute_command(self, generator, write_concern, session): + def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + ) -> Dict[str, Any]: """Execute using write commands.""" # nModified is only reported for write commands, not legacy ops. full_result = { @@ -381,9 +433,17 @@ def execute_command(self, generator, write_concern, session): } op_id = _randint() - def retryable_bulk(session, sock_info, retryable): + def retryable_bulk( + session: Optional[ClientSession], sock_info: SocketInfo, retryable: bool + ) -> None: self._execute_command( - generator, write_concern, session, sock_info, op_id, retryable, full_result + generator, + write_concern, + session, + sock_info, + op_id, + retryable, + full_result, ) client = self.collection.database.client @@ -394,7 +454,7 @@ def retryable_bulk(session, sock_info, retryable): _raise_bulk_write_error(full_result) return full_result - def execute_op_msg_no_results(self, sock_info, generator): + def execute_op_msg_no_results(self, sock_info: SocketInfo, generator: Iterator[Any]) -> None: """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" db_name = self.collection.database.name client = self.collection.database.client @@ -433,7 +493,12 @@ def execute_op_msg_no_results(self, sock_info, generator): run.idx_offset += len(to_send) self.current_run = run = next(generator, None) - def execute_command_no_results(self, sock_info, generator, write_concern): + def execute_command_no_results( + self, + sock_info: SocketInfo, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" full_result = { "writeErrors": [], @@ -464,7 +529,12 @@ def execute_command_no_results(self, sock_info, generator, write_concern): except OperationFailure: pass - def execute_no_results(self, sock_info, generator, write_concern): + def execute_no_results( + self, + sock_info: SocketInfo, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: """Execute all operations, returning no results (w=0).""" if self.uses_collation: raise ConfigurationError("Collation is unsupported for unacknowledged writes.") @@ -490,7 +560,7 @@ def execute_no_results(self, sock_info, generator, write_concern): return self.execute_command_no_results(sock_info, generator, write_concern) return self.execute_op_msg_no_results(sock_info, generator) - def execute(self, write_concern, session): + def execute(self, write_concern: WriteConcern, session: Optional[ClientSession]) -> Any: """Execute operations.""" if not self.ops: raise InvalidOperation("No operations to execute") diff --git a/pymongo/operations.py b/pymongo/operations.py index ed270c1ca6..fc9dac0fe5 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -101,7 +101,12 @@ def __init__( def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) def __repr__(self): return f"DeleteOne({self._filter!r}, {self._collation!r})" @@ -157,7 +162,12 @@ def __init__( def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) def __repr__(self): return f"DeleteMany({self._filter!r}, {self._collation!r})" @@ -224,12 +234,16 @@ def __init__( def _add_to_bulk(self, bulkobj): """Add this operation to the _Bulk instance `bulkobj`.""" bulkobj.add_replace( - self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, ) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return (other._filter, other._doc, other._upsert, other._collation, other._hint) == ( + return (other._filter, other._doc, other._upsert, other._collation, other._hint,) == ( self._filter, self._doc, self._upsert, @@ -361,7 +375,7 @@ def _add_to_bulk(self, bulkobj): self._doc, False, self._upsert, - collation=self._collation, + collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, ) @@ -419,7 +433,7 @@ def _add_to_bulk(self, bulkobj): self._doc, True, self._upsert, - collation=self._collation, + collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, ) From ba7be3c1bb71cd13c483ac7c5fa6e12faa0a276d Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:24:17 -0700 Subject: [PATCH 0956/2111] PYTHON-3781 fix type for server_selector (#1269) --- pymongo/client_options.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 7e5be69283..91ef51a526 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -15,7 +15,7 @@ """Tools to parse mongo client options.""" from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, Tuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Tuple from bson.codec_options import _parse_codec_options from pymongo import common @@ -39,7 +39,7 @@ from bson.codec_options import CodecOptions from pymongo.encryption import AutoEncryptionOpts from pymongo.pyopenssl_context import SSLContext - from pymongo.server_selectors import Selection + from pymongo.topology_description import _ServerSelector def _parse_credentials( @@ -247,7 +247,7 @@ def server_selection_timeout(self) -> int: return self.__server_selection_timeout @property - def server_selector(self) -> Callable[[Selection], Selection]: + def server_selector(self) -> _ServerSelector: return self.__server_selector @property From 12cbeb86ec6f9398e762b7f5645f2cf977489af1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:27:57 -0700 Subject: [PATCH 0957/2111] PYTHON-3784 add types to daemon.py (#1273) --- pymongo/daemon.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pymongo/daemon.py b/pymongo/daemon.py index 4fdf147a59..643eb58b6e 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -23,13 +23,14 @@ import subprocess import sys import warnings +from typing import Optional, Sequence # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) -def _popen_wait(popen, timeout): +def _popen_wait(popen: subprocess.Popen, timeout: Optional[float]) -> Optional[int]: """Implement wait timeout support for Python 3.""" try: return popen.wait(timeout=timeout) @@ -38,7 +39,7 @@ def _popen_wait(popen, timeout): return None -def _silence_resource_warning(popen): +def _silence_resource_warning(popen: Optional[subprocess.Popen]) -> None: """Silence Popen's ResourceWarning. Note this should only be used if the process was created as a daemon. @@ -56,7 +57,7 @@ def _silence_resource_warning(popen): # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Windows).""" try: with open(os.devnull, "r+b") as devnull: @@ -87,7 +88,7 @@ def _spawn_daemon(args): # to be safe to call from any thread. Using Popen instead of fork also # avoids triggering the application's os.register_at_fork() callbacks when # we spawn the mongocryptd daemon process. - def _spawn(args): + def _spawn(args: Sequence[str]) -> Optional[subprocess.Popen]: """Spawn the process and silence stdout/stderr.""" try: with open(os.devnull, "r+b") as devnull: @@ -100,8 +101,9 @@ def _spawn(args): RuntimeWarning, stacklevel=2, ) + return None - def _spawn_daemon_double_popen(args): + def _spawn_daemon_double_popen(args: Sequence[str]) -> None: """Spawn a daemon process using a double subprocess.Popen.""" spawner_args = [sys.executable, _THIS_FILE] spawner_args.extend(args) @@ -110,7 +112,7 @@ def _spawn_daemon_double_popen(args): # processes. _popen_wait(temp_proc, _WAIT_TIMEOUT) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Unix).""" # "If Python is unable to retrieve the real path to its executable, # sys.executable will be an empty string or None". From 91711ee366b268d59dc517ca94f67bcb7e948bb8 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:28:46 -0700 Subject: [PATCH 0958/2111] PYTHON-3783 add types to compression_support.py (#1272) --- pymongo/compression_support.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 40bad403f3..030376fbd1 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -11,8 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import warnings +from typing import Any, Iterable, List, Union try: import snappy @@ -45,10 +47,10 @@ _NO_COMPRESSION.update(_SENSITIVE_COMMANDS) -def validate_compressors(dummy, value): +def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> List[str]: try: # `value` is string. - compressors = value.split(",") + compressors = value.split(",") # type: ignore[union-attr] except AttributeError: # `value` is an iterable. compressors = list(value) @@ -78,7 +80,7 @@ def validate_compressors(dummy, value): return compressors -def validate_zlib_compression_level(option, value): +def validate_zlib_compression_level(option: str, value: Any) -> int: try: level = int(value) except Exception: @@ -89,11 +91,13 @@ def validate_zlib_compression_level(option, value): class CompressionSettings: - def __init__(self, compressors, zlib_compression_level): + def __init__(self, compressors: List[str], zlib_compression_level: int): self.compressors = compressors self.zlib_compression_level = zlib_compression_level - def get_compression_context(self, compressors): + def get_compression_context( + self, compressors: List[str] + ) -> Union[SnappyContext, ZlibContext, ZstdContext, None]: if compressors: chosen = compressors[0] if chosen == "snappy": @@ -110,7 +114,7 @@ class SnappyContext: compressor_id = 1 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: return snappy.compress(data) @@ -128,13 +132,13 @@ class ZstdContext: compressor_id = 3 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: # ZstdCompressor is not thread safe. # TODO: Use a pool? return ZstdCompressor().compress(data) -def decompress(data, compressor_id): +def decompress(data: bytes, compressor_id: int) -> bytes: if compressor_id == SnappyContext.compressor_id: # python-snappy doesn't support the buffer interface. # https://github.com/andrix/python-snappy/issues/65 From 7d19205540792519fb86dc19674306219c8ebbf0 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:29:32 -0700 Subject: [PATCH 0959/2111] PYTHON-3782 add types to lock.py (#1271) --- pymongo/lock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/lock.py b/pymongo/lock.py index b7c01f56b7..741876afcb 100644 --- a/pymongo/lock.py +++ b/pymongo/lock.py @@ -22,7 +22,7 @@ _forkable_locks: weakref.WeakSet = weakref.WeakSet() -def _create_lock(): +def _create_lock() -> threading.Lock: """Represents a lock that is tracked upon instantiation using a WeakSet and reset by pymongo upon forking. """ From ebba342aaa396f530056c25445f49af1319ad047 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 09:48:57 -0700 Subject: [PATCH 0960/2111] PYTHON-3787 add types to max_staleness_selectors.py (#1276) --- pymongo/max_staleness_selectors.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 28b0bb615e..2b7a7cc3c8 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -26,17 +26,22 @@ where "SMax" is the secondary with the greatest lastWriteDate. """ +from __future__ import annotations + +from typing import TYPE_CHECKING from pymongo.errors import ConfigurationError from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_selectors import Selection # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 SMALLEST_MAX_STALENESS = 90 -def _validate_max_staleness(max_staleness, heartbeat_frequency): +def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> None: # We checked for max staleness -1 before this, it must be positive here. if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: raise ConfigurationError( @@ -53,7 +58,7 @@ def _validate_max_staleness(max_staleness, heartbeat_frequency): ) -def _with_primary(max_staleness, selection): +def _with_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with a known primary.""" primary = selection.primary sds = [] @@ -75,7 +80,7 @@ def _with_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def _no_primary(max_staleness, selection): +def _no_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with no known primary.""" # Secondary that's replicated the most recent writes. smax = selection.secondary_with_max_last_write_date() @@ -98,7 +103,7 @@ def _no_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def select(max_staleness, selection): +def select(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection.""" if max_staleness == -1: return selection From 2c563f128d71d612136ed50e3d91c15cdf6c25af Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:47:17 -0700 Subject: [PATCH 0961/2111] PYTHON-3785 add types to response.py (#1274) --- pymongo/response.py | 47 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/pymongo/response.py b/pymongo/response.py index fc01b0f1bf..bd4795bfb0 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -13,12 +13,30 @@ # limitations under the License. """Represent a response from the server.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.message import _OpMsg, _OpReply + from pymongo.pool import SocketInfo + from pymongo.typings import _Address class Response: __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") - def __init__(self, data, address, request_id, duration, from_command, docs): + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: List[Mapping[str, Any]], + ): """Represent a response from the server. :Parameters: @@ -36,32 +54,32 @@ def __init__(self, data, address, request_id, duration, from_command, docs): self._docs = docs @property - def data(self): + def data(self) -> Union[_OpMsg, _OpReply]: """Server response's raw BSON bytes.""" return self._data @property - def address(self): + def address(self) -> _Address: """(host, port) of the source server.""" return self._address @property - def request_id(self): + def request_id(self) -> int: """The request id of this operation.""" return self._request_id @property - def duration(self): + def duration(self) -> Optional[timedelta]: """The duration of the operation.""" return self._duration @property - def from_command(self): + def from_command(self) -> bool: """If the response is a result from a db command.""" return self._from_command @property - def docs(self): + def docs(self) -> List[Mapping[str, Any]]: """The decoded document(s).""" return self._docs @@ -70,7 +88,15 @@ class PinnedResponse(Response): __slots__ = ("_socket_info", "_more_to_come") def __init__( - self, data, address, socket_info, request_id, duration, from_command, docs, more_to_come + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + socket_info: SocketInfo, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: List[Mapping[str, Any]], + more_to_come: bool, ): """Represent a response to an exhaust cursor's initial query. @@ -78,7 +104,6 @@ def __init__( - `data`: A network response message. - `address`: (host, port) of the source server. - `socket_info`: The SocketInfo used for the initial query. - - `pool`: The Pool from which the SocketInfo came. - `request_id`: The request id of this operation. - `duration`: The duration of the operation. - `from_command`: If the response is the result of a db command. @@ -91,7 +116,7 @@ def __init__( self._more_to_come = more_to_come @property - def socket_info(self): + def socket_info(self) -> SocketInfo: """The SocketInfo used for the initial query. The server will send batches on this socket, without waiting for @@ -101,7 +126,7 @@ def socket_info(self): return self._socket_info @property - def more_to_come(self): + def more_to_come(self) -> bool: """If true, server is ready to send batches on the socket until the result set is exhausted or there is an error. """ From 2a75a181987e5083e2643b14fc7417c0d4d9bbe5 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:48:04 -0700 Subject: [PATCH 0962/2111] PYTHON-3788 add types to server_selectors.py (#1278) --- pymongo/server_selectors.py | 40 ++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index aa9d26b5fb..9a67015575 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -13,15 +13,25 @@ # permissions and limitations under the License. """Criteria to select some ServerDescriptions from a TopologyDescription.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Optional, TypeVar from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +T = TypeVar("T") + class Selection: """Input or output of a server selector function.""" @classmethod - def from_topology_description(cls, topology_description): + def from_topology_description(cls, topology_description: TopologyDescription) -> Selection: known_servers = topology_description.known_servers primary = None for sd in known_servers: @@ -36,54 +46,60 @@ def from_topology_description(cls, topology_description): primary, ) - def __init__(self, topology_description, server_descriptions, common_wire_version, primary): + def __init__( + self, + topology_description: TopologyDescription, + server_descriptions: List[ServerDescription], + common_wire_version: Optional[int], + primary: Optional[ServerDescription], + ): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version - def with_server_descriptions(self, server_descriptions): + def with_server_descriptions(self, server_descriptions: List[ServerDescription]) -> Selection: return Selection( self.topology_description, server_descriptions, self.common_wire_version, self.primary ) - def secondary_with_max_last_write_date(self): + def secondary_with_max_last_write_date(self) -> Optional[ServerDescription]: secondaries = secondary_server_selector(self) if secondaries.server_descriptions: return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) return None @property - def primary_selection(self): + def primary_selection(self) -> Selection: primaries = [self.primary] if self.primary else [] return self.with_server_descriptions(primaries) @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self.topology_description.heartbeat_frequency @property - def topology_type(self): + def topology_type(self) -> int: return self.topology_description.topology_type - def __bool__(self): + def __bool__(self) -> bool: return bool(self.server_descriptions) - def __getitem__(self, item): + def __getitem__(self, item: int) -> ServerDescription: return self.server_descriptions[item] -def any_server_selector(selection): +def any_server_selector(selection: T) -> T: return selection -def readable_server_selector(selection): +def readable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_readable] ) -def writable_server_selector(selection): +def writable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_writable] ) From 01dd2f8ce091bd33bc917d977bc2f2cf9da788c7 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 10:49:04 -0700 Subject: [PATCH 0963/2111] PYTHON-3786 add types to srv_resolver.py (#1275) --- pymongo/srv_resolver.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 583de818b0..57c48f1e13 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -13,9 +13,11 @@ # permissions and limitations under the License. """Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations import ipaddress import random +from typing import Any, List, Optional, Tuple, Union try: from dns import resolver @@ -30,14 +32,14 @@ # dnspython can return bytes or str from various parts # of its API depending on version. We always want str. -def maybe_decode(text): +def maybe_decode(text: Union[str, bytes]) -> str: if isinstance(text, bytes): return text.decode() return text # PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. -def _resolve(*args, **kwargs): +def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: if hasattr(resolver, "resolve"): # dnspython >= 2 return resolver.resolve(*args, **kwargs) @@ -52,7 +54,13 @@ def _resolve(*args, **kwargs): class _SrvResolver: - def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT @@ -72,7 +80,7 @@ def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): if self.__slen < 2: raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) - def get_options(self): + def get_options(self) -> Optional[str]: try: results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) except (resolver.NoAnswer, resolver.NXDOMAIN): @@ -84,7 +92,7 @@ def get_options(self): raise ConfigurationError("Only one TXT record is supported") return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") - def _resolve_uri(self, encapsulate_errors): + def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: try: results = _resolve( "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout @@ -97,7 +105,9 @@ def _resolve_uri(self, encapsulate_errors): raise ConfigurationError(str(exc)) return results - def _get_srv_response_and_hosts(self, encapsulate_errors): + def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> Tuple[resolver.Answer, List[Tuple[str, Any]]]: results = self._resolve_uri(encapsulate_errors) # Construct address tuples @@ -117,10 +127,12 @@ def _get_srv_response_and_hosts(self, encapsulate_errors): nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) return results, nodes - def get_hosts(self): + def get_hosts(self) -> List[Tuple[str, Any]]: _, nodes = self._get_srv_response_and_hosts(True) return nodes - def get_hosts_and_min_ttl(self): + def get_hosts_and_min_ttl(self) -> Tuple[List[Tuple[str, Any]], int]: results, nodes = self._get_srv_response_and_hosts(False) - return nodes, results.rrset.ttl + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl From d5882075d63fc9209640ebd7e92d60fd3684d6aa Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 Jun 2023 10:57:23 -0700 Subject: [PATCH 0964/2111] PYTHON-3789 Use tox for Evergreen mockupdb tests (#1277) --- .evergreen/config.yml | 4 +- .evergreen/run-mockupdb-tests.sh | 18 ------ test/mockupdb/test_handshake.py | 97 ++++++++++++++++---------------- tox.ini | 9 +++ 4 files changed, 60 insertions(+), 68 deletions(-) delete mode 100755 .evergreen/run-mockupdb-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f3c159a1df..8fa2df2415 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -357,7 +357,9 @@ functions: script: | set -o xtrace ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh + + alias python=${PYTHON_BINARY} + python -m tox -e test-mockupdb "run doctests": - command: shell.exec diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh deleted file mode 100755 index a76ed6316f..0000000000 --- a/.evergreen/run-mockupdb-tests.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Must be run from pymongo repo root -set -o xtrace -set -o errexit - -. .evergreen/utils.sh - -${PYTHON_BINARY} setup.py clean - -createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivate; rm -rf mockuptests" EXIT HUP - -# Install PyMongo from git clone so mockup-tests don't -# download it from pypi. -python -m pip install . -python -m pip install --upgrade 'https://github.com/ajdavis/mongo-mockup-db/archive/master.zip' -cd ./test/mockupdb -python -m unittest discover -v diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index d3f8922c4c..883d518f5b 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -23,49 +23,6 @@ from pymongo.server_api import ServerApi, ServerApiVersion -def test_hello_with_option(self, protocol, **kwargs): - hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" - # `db.command("hello"|"ismaster")` commands are the same for primaries and - # secondaries, so we only need one server. - primary = MockupDB() - # Set up a custom handler to save the first request from the driver. - self.handshake_req = None - - def respond(r): - # Only save the very first request from the driver. - if self.handshake_req is None: - self.handshake_req = r - load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} - return r.reply( - OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) - ) - - primary.autoresponds(respond) - primary.run() - self.addCleanup(primary.stop) - - # We need a special dict because MongoClient uses "server_api" and all - # of the commands use "apiVersion". - k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} - client = MongoClient( - "mongodb://" + primary.address_string, - appname="my app", # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] - ) - - self.addCleanup(client.close) - - # We have an autoresponder luckily, so no need for `go()`. - assert client.db.command(hello) - - # We do this checking here rather than in the autoresponder `respond()` - # because it runs in another Python thread so there are some funky things - # with error handling within that thread, and we want to be able to use - # self.assertRaises(). - self.handshake_req.assert_matches(protocol(hello, **kwargs)) - _check_handshake_data(self.handshake_req) - - def _check_handshake_data(request): assert "client" in request data = request["client"] @@ -79,6 +36,48 @@ def _check_handshake_data(request): class TestHandshake(unittest.TestCase): + def hello_with_option_helper(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req is None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] + ) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + def test_client_handshake_data(self): primary, secondary = MockupDB(), MockupDB() for server in primary, secondary: @@ -208,21 +207,21 @@ def test_client_handshake_saslSupportedMechs(self): return def test_handshake_load_balanced(self): - test_hello_with_option(self, OpMsg, loadBalanced=True) + self.hello_with_option_helper(OpMsg, loadBalanced=True) with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, Command, loadBalanced=True) + self.hello_with_option_helper(Command, loadBalanced=True) def test_handshake_versioned_api(self): - test_hello_with_option(self, OpMsg, apiVersion="1") + self.hello_with_option_helper(OpMsg, apiVersion="1") with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, Command, apiVersion="1") + self.hello_with_option_helper(Command, apiVersion="1") def test_handshake_not_either(self): # If we don't specify either option then it should be using # OP_QUERY for the initial step of the handshake. - test_hello_with_option(self, Command) + self.hello_with_option_helper(Command) with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, OpMsg) + self.hello_with_option_helper(OpMsg) def test_handshake_max_wire(self): server = MockupDB() diff --git a/tox.ini b/tox.ini index ba53a2011e..bdabf17700 100644 --- a/tox.ini +++ b/tox.ini @@ -121,3 +121,12 @@ deps = {[testenv:doc]deps} commands = sphinx-build -E -b linkcheck doc ./doc/_build/linkcheck + +[testenv:test-mockupdb] +description = run mockupdb tests +deps = + {[testenv:test]deps} + https://github.com/ajdavis/mongo-mockup-db/archive/master.zip +passenv = * +commands = + python -m pytest -v ./test/mockupdb From 0c727bba856cb1cd44e4609f60605254c4ee86bf Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:53:43 -0700 Subject: [PATCH 0965/2111] PYTHON-3791 Fix access to last_write_date (#1279) --- pymongo/max_staleness_selectors.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 2b7a7cc3c8..10c136a43e 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -61,11 +61,13 @@ def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> Non def _with_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with a known primary.""" primary = selection.primary + assert primary sds = [] for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert s.last_write_date and primary.last_write_date staleness = ( (s.last_update_time - s.last_write_date) - (primary.last_update_time - primary.last_write_date) @@ -93,6 +95,7 @@ def _no_primary(max_staleness: int, selection: Selection) -> Selection: for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert smax.last_write_date and s.last_write_date staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency if staleness <= max_staleness: From 820823891da58d46f889fc332b0da6b55f11f39a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 11:54:23 -0700 Subject: [PATCH 0966/2111] PYTHON-3773 add types to client_session.py (#1268) --- pymongo/client_session.py | 137 +++++++++++++++++++++++--------------- 1 file changed, 83 insertions(+), 54 deletions(-) diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 08d9f03bb5..d196318664 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -144,9 +144,13 @@ Any, Callable, ContextManager, + List, Mapping, + MutableMapping, NoReturn, Optional, + Tuple, + Type, TypeVar, ) @@ -170,6 +174,12 @@ from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.pool import SocketInfo + from pymongo.server import Server + class SessionOptions: """Options for a new :class:`ClientSession`. @@ -326,7 +336,9 @@ def max_commit_time_ms(self) -> Optional[int]: return self._max_commit_time_ms -def _validate_session_write_concern(session, write_concern): +def _validate_session_write_concern( + session: Optional[ClientSession], write_concern: Optional[WriteConcern] +) -> Optional[ClientSession]: """Validate that an explicit session is not used with an unack'ed write. Returns the session to use for the next operation. @@ -351,13 +363,18 @@ def _validate_session_write_concern(session, write_concern): class _TransactionContext: """Internal transaction context manager for start_transaction.""" - def __init__(self, session): + def __init__(self, session: ClientSession): self.__session = session - def __enter__(self): + def __enter__(self) -> _TransactionContext: return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: if self.__session.in_transaction: if exc_val is None: self.__session.commit_transaction() @@ -377,49 +394,49 @@ class _TxnState: class _Transaction: """Internal class to hold transaction information in a ClientSession.""" - def __init__(self, opts, client): + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient): self.opts = opts self.state = _TxnState.NONE self.sharded = False - self.pinned_address = None - self.sock_mgr = None + self.pinned_address: Optional[Tuple[str, Optional[int]]] = None + self.sock_mgr: Optional[_SocketManager] = None self.recovery_token = None self.attempt = 0 self.client = client - def active(self): + def active(self) -> bool: return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) - def starting(self): + def starting(self) -> bool: return self.state == _TxnState.STARTING @property - def pinned_conn(self): + def pinned_conn(self) -> Optional[SocketInfo]: if self.active() and self.sock_mgr: return self.sock_mgr.sock return None - def pin(self, server, sock_info): + def pin(self, server: Server, sock_info: SocketInfo) -> None: self.sharded = True self.pinned_address = server.description.address if server.description.server_type == SERVER_TYPE.LoadBalancer: sock_info.pin_txn() self.sock_mgr = _SocketManager(sock_info, False) - def unpin(self): + def unpin(self) -> None: self.pinned_address = None if self.sock_mgr: self.sock_mgr.close() self.sock_mgr = None - def reset(self): + def reset(self) -> None: self.unpin() self.state = _TxnState.NONE self.sharded = False self.recovery_token = None self.attempt = 0 - def __del__(self): + def __del__(self) -> None: if self.sock_mgr: # Reuse the cursor closing machinery to return the socket to the # pool soon. @@ -433,7 +450,7 @@ def _reraise_with_unknown_commit(exc: Any) -> NoReturn: raise -def _max_time_expired_error(exc): +def _max_time_expired_error(exc: PyMongoError) -> bool: """Return true if exc is a MaxTimeMSExpired error.""" return isinstance(exc, OperationFailure) and exc.code == 50 @@ -454,7 +471,7 @@ def _max_time_expired_error(exc): _WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 -def _within_time_limit(start_time): +def _within_time_limit(start_time: float) -> bool: """Are we within the with_transaction retry limit?""" return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT @@ -489,8 +506,8 @@ def __init__( self._client: MongoClient = client self._server_session = server_session self._options = options - self._cluster_time = None - self._operation_time = None + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None self._snapshot_time = None # Is this an implicitly created session? self._implicit = implicit @@ -503,7 +520,7 @@ def end_session(self) -> None: """ self._end_session(lock=True) - def _end_session(self, lock): + def _end_session(self, lock: bool) -> None: if self._server_session is not None: try: if self.in_transaction: @@ -515,7 +532,7 @@ def _end_session(self, lock): self._client._return_server_session(self._server_session, lock) self._server_session = None - def _check_ended(self): + def _check_ended(self) -> None: if self._server_session is None: raise InvalidOperation("Cannot use ended session") @@ -557,14 +574,14 @@ def operation_time(self) -> Optional[Timestamp]: """ return self._operation_time - def _inherit_option(self, name, val): + def _inherit_option(self, name: str, val: _T) -> _T: """Return the inherited TransactionOption value.""" if val: return val txn_opts = self.options.default_transaction_options - val = txn_opts and getattr(txn_opts, name) - if val: - return val + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val return getattr(self.client, name) def with_transaction( @@ -814,21 +831,22 @@ def abort_transaction(self) -> None: self._transaction.state = _TxnState.ABORTED self._unpin() - def _finish_transaction_with_retry(self, command_name): + def _finish_transaction_with_retry(self, command_name: str) -> List[Any]: """Run commit or abort with one retry after any retryable error. :Parameters: - `command_name`: Either "commitTransaction" or "abortTransaction". """ - def func(session, sock_info, retryable): + def func(session: ClientSession, sock_info: SocketInfo, retryable: bool) -> List[Any]: return self._finish_transaction(sock_info, command_name) return self._client._retry_internal(True, func, self, None) - def _finish_transaction(self, sock_info, command_name): + def _finish_transaction(self, sock_info: SocketInfo, command_name: str) -> List[Any]: self._transaction.attempt += 1 opts = self._transaction.opts + assert opts wc = opts.write_concern cmd = SON([(command_name, 1)]) if command_name == "commitTransaction": @@ -839,6 +857,7 @@ def _finish_transaction(self, sock_info, command_name): # subsequent commitTransaction commands should be upgraded to use # w:"majority" and set a default value of 10 seconds for wtimeout. if self._transaction.attempt > 1: + assert wc wc_doc = wc.document wc_doc["w"] = "majority" wc_doc.setdefault("wtimeout", 10000) @@ -851,7 +870,7 @@ def _finish_transaction(self, sock_info, command_name): sock_info, cmd, session=self, write_concern=wc, parse_write_concern_error=True ) - def _advance_cluster_time(self, cluster_time): + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: """Internal cluster time helper.""" if self._cluster_time is None: self._cluster_time = cluster_time @@ -873,7 +892,7 @@ def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) - def _advance_operation_time(self, operation_time): + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: """Internal operation time helper.""" if self._operation_time is None: self._operation_time = operation_time @@ -893,7 +912,7 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") self._advance_operation_time(operation_time) - def _process_response(self, reply): + def _process_response(self, reply: Mapping[str, Any]) -> None: """Process a response to a command that was run with this session.""" self._advance_cluster_time(reply.get("$clusterTime")) self._advance_operation_time(reply.get("operationTime")) @@ -922,44 +941,51 @@ def in_transaction(self) -> bool: return self._transaction.active() @property - def _starting_transaction(self): + def _starting_transaction(self) -> bool: """True if this session is starting a multi-statement transaction.""" return self._transaction.starting() @property - def _pinned_address(self): + def _pinned_address(self) -> Optional[Tuple[str, Optional[int]]]: """The mongos address this transaction was created on.""" if self._transaction.active(): return self._transaction.pinned_address return None @property - def _pinned_connection(self): + def _pinned_connection(self) -> Optional[SocketInfo]: """The connection this transaction was started on.""" return self._transaction.pinned_conn - def _pin(self, server, sock_info): + def _pin(self, server: Server, sock_info: SocketInfo) -> None: """Pin this session to the given Server or to the given connection.""" self._transaction.pin(server, sock_info) - def _unpin(self): + def _unpin(self) -> None: """Unpin this session from any pinned Server.""" self._transaction.unpin() - def _txn_read_preference(self): + def _txn_read_preference(self) -> Optional[_ServerMode]: """Return read preference of this transaction or None.""" if self.in_transaction: + assert self._transaction.opts return self._transaction.opts.read_preference return None - def _materialize(self): + def _materialize(self) -> None: if isinstance(self._server_session, _EmptyServerSession): old = self._server_session self._server_session = self._client._topology.get_server_session() if old.started_retryable_write: self._server_session.inc_transaction_id() - def _apply_to(self, command, is_retryable, read_preference, sock_info): + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: ReadPreference, + sock_info: SocketInfo, + ) -> None: self._check_ended() self._materialize() if self.options.snapshot: @@ -984,6 +1010,7 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): self._transaction.state = _TxnState.IN_PROGRESS command["startTransaction"] = True + assert self._transaction.opts if self._transaction.opts.read_concern: rc = self._transaction.opts.read_concern.document if rc: @@ -993,11 +1020,11 @@ def _apply_to(self, command, is_retryable, read_preference, sock_info): command["txnNumber"] = self._server_session.transaction_id command["autocommit"] = False - def _start_retryable_write(self): + def _start_retryable_write(self) -> None: self._check_ended() self._server_session.inc_transaction_id() - def _update_read_concern(self, cmd, sock_info): + def _update_read_concern(self, cmd: MutableMapping[str, Any], sock_info: SocketInfo) -> None: if self.options.causal_consistency and self.operation_time is not None: cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time if self.options.snapshot: @@ -1019,15 +1046,15 @@ def __init__(self): self.dirty = False self.started_retryable_write = False - def mark_dirty(self): + def mark_dirty(self) -> None: self.dirty = True - def inc_transaction_id(self): + def inc_transaction_id(self) -> None: self.started_retryable_write = True class _ServerSession: - def __init__(self, generation): + def __init__(self, generation: int): # Ensure id is type 4, regardless of CodecOptions.uuid_representation. self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} self.last_use = time.monotonic() @@ -1035,7 +1062,7 @@ def __init__(self, generation): self.dirty = False self.generation = generation - def mark_dirty(self): + def mark_dirty(self) -> None: """Mark this session as dirty. A server session is marked dirty when a command fails with a network @@ -1043,18 +1070,18 @@ def mark_dirty(self): """ self.dirty = True - def timed_out(self, session_timeout_minutes): + def timed_out(self, session_timeout_minutes: float) -> bool: idle_seconds = time.monotonic() - self.last_use # Timed out if we have less than a minute to live. return idle_seconds > (session_timeout_minutes - 1) * 60 @property - def transaction_id(self): + def transaction_id(self) -> Int64: """Positive 64-bit integer.""" return Int64(self._transaction_id) - def inc_transaction_id(self): + def inc_transaction_id(self) -> None: self._transaction_id += 1 @@ -1064,21 +1091,21 @@ class _ServerSessionPool(collections.deque): This class is not thread-safe, access it while holding the Topology lock. """ - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.generation = 0 - def reset(self): + def reset(self) -> None: self.generation += 1 self.clear() - def pop_all(self): + def pop_all(self) -> List[_ServerSession]: ids = [] while self: ids.append(self.pop().session_id) return ids - def get_server_session(self, session_timeout_minutes): + def get_server_session(self, session_timeout_minutes: float) -> _ServerSession: # Although the Driver Sessions Spec says we only clear stale sessions # in return_server_session, PyMongo can't take a lock when returning # sessions from a __del__ method (like in Cursor.__die), so it can't @@ -1094,20 +1121,22 @@ def get_server_session(self, session_timeout_minutes): return _ServerSession(self.generation) - def return_server_session(self, server_session, session_timeout_minutes): + def return_server_session( + self, server_session: _ServerSession, session_timeout_minutes: Optional[float] + ) -> None: if session_timeout_minutes is not None: self._clear_stale(session_timeout_minutes) if server_session.timed_out(session_timeout_minutes): return self.return_server_session_no_lock(server_session) - def return_server_session_no_lock(self, server_session): + def return_server_session_no_lock(self, server_session: _ServerSession) -> None: # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. if server_session.generation == self.generation and not server_session.dirty: self.appendleft(server_session) - def _clear_stale(self, session_timeout_minutes): + def _clear_stale(self, session_timeout_minutes: float) -> None: # Clear stale sessions. The least recently used are on the right. while self: if self[-1].timed_out(session_timeout_minutes): From cae124c32c6d1d9e1e32b5731fe23b511e9e45eb Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 Jun 2023 14:19:04 -0700 Subject: [PATCH 0967/2111] PYTHON-3588 Expose an API to create a cursor from a command response (#1263) --- .evergreen/resync-specs.sh | 5 +- CONTRIBUTING.rst | 3 +- doc/changelog.rst | 6 + pymongo/command_cursor.py | 21 +- pymongo/database.py | 109 ++- test/run_command/unified/runCommand.json | 635 +++++++++++++ .../run_command/unified/runCursorCommand.json | 877 ++++++++++++++++++ test/test_database.py | 20 + test/test_run_command.py | 17 + test/unified_format.py | 36 +- tox.ini | 2 + 11 files changed, 1723 insertions(+), 8 deletions(-) create mode 100644 test/run_command/unified/runCommand.json create mode 100644 test/run_command/unified/runCursorCommand.json create mode 100644 test/test_run_command.py diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 817a2d96bc..a74a0125e6 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -56,7 +56,7 @@ cpjson () { cd "$SPECS"/source/$1 find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ $PYMONGO/test/$2 - printf "\nIgnored files for ${PWD}\n" + printf "\nIgnored files for ${PWD}:\n" IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" @@ -126,6 +126,9 @@ do retryable-writes|retryable_writes) cpjson retryable-writes/tests/ retryable_writes ;; + run-command|run_command) + cpjson run-command/tests/ run_command + ;; sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) cpjson server-discovery-and-monitoring/tests/errors \ discovery_and_monitoring/errors diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index a457b3e4c3..a897d0e067 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -98,7 +98,8 @@ use the script provided in ``.evergreen/resync-specs.sh``.:: git clone git@github.com:mongodb/specifications.git export MDB_SPECS=~/specifications cd ~/mongo-python-driver/.evergreen - ./resync-specs.sh -b "connection-string*" crud bson-corpus + ./resync-specs.sh -b "" spec1 spec2 ... + ./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" cd .. The ``-b`` flag adds as a regex pattern to block files you do not wish to diff --git a/doc/changelog.rst b/doc/changelog.rst index eae105b617..b112d3bc08 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,12 @@ Changelog ========= +Changes in Version 4.5 +----------------------- + +- Added :meth:`~pymongo.database.Database.cursor_command` + and :meth:`~pymongo.command_cursor.CommandCursor.try_next` to support executing an arbitrary command that returns a cursor. + Changes in Version 4.4 ----------------------- diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index c831dfb49b..7a2e528680 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -292,7 +292,7 @@ def next(self) -> _DocumentType: __next__ = next - def _try_next(self, get_more_allowed): + def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: """Advance the cursor blocking for at most one getMore command.""" if not len(self.__data) and not self.__killed and get_more_allowed: self._refresh() @@ -301,6 +301,25 @@ def _try_next(self, get_more_allowed): else: return None + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :Returns: + The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return self._try_next(get_more_allowed=True) + def __enter__(self) -> "CommandCursor[_DocumentType]": return self diff --git a/pymongo/database.py b/pymongo/database.py index 1fa9913c60..7829c28fe2 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -42,7 +42,7 @@ from pymongo.collection import Collection from pymongo.command_cursor import CommandCursor from pymongo.common import _ecoc_coll_name, _esc_coll_name -from pymongo.errors import CollectionInvalid, InvalidName +from pymongo.errors import CollectionInvalid, InvalidName, InvalidOperation from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline @@ -833,6 +833,113 @@ def command( **kwargs, ) + @_csot.apply + def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + batch_size: Optional[int] = None, + max_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> CommandCursor: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :Parameters: + - `command`: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + - `value` (optional): value to use for the command verb when + `command` is passed as a string + - `check` (optional): check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + - `allowable_errors`: if `check` is ``True``, error messages + in this list will be ignored by error-checking + - `read_preference` (optional): The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + - `codec_options`: A :class:`~bson.codec_options.CodecOptions` + instance. + - `session` (optional): A + :class:`~pymongo.client_session.ClientSession`. + - `comment` (optional): A user-provided comment to attach to this + command. + - `**kwargs` (optional): additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + with self.__client._tmp_session(session, close=False) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + with self.__client._socket_for_reads(read_preference, tmp_session) as ( + sock_info, + read_preference, + ): + response = self._command( + sock_info, + command, + value, + check, + allowable_errors, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = CommandCursor( + coll, + response["cursor"], + sock_info.address, + batch_size=batch_size or 0, + max_await_time_ms=max_time_ms, + session=tmp_session, + explicit_session=session is not None, + comment=comment, + ) + cmd_cursor._maybe_pin_connection(sock_info) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + def _retryable_read_command( self, command, diff --git a/test/run_command/unified/runCommand.json b/test/run_command/unified/runCommand.json new file mode 100644 index 0000000000..007e514bd7 --- /dev/null +++ b/test/run_command/unified/runCommand.json @@ -0,0 +1,635 @@ +{ + "description": "runCommand", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + }, + { + "database": { + "id": "dbWithRC", + "client": "client", + "databaseName": "dbWithRC", + "databaseOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "database": { + "id": "dbWithWC", + "client": "client", + "databaseName": "dbWithWC", + "databaseOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "client": { + "id": "clientWithStableApi", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "dbWithStableApi", + "client": "clientWithStableApi", + "databaseName": "dbWithStableApi" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "always attaches $db and implicit lsid to given command and omits default readPreference", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "db", + "lsid": { + "$$exists": true + }, + "$readPreference": { + "$$exists": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "always gossips the $clusterTime on the sent command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$clusterTime": { + "$$exists": true + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided session lsid to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided $readPreference to given command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded-replicaset", + "load-balanced", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "mode": "nearest" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach $readPreference to given command on standalone", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach primary $readPreference to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "primary" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not inherit readConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithRC", + "arguments": { + "commandName": "aggregate", + "command": { + "aggregate": "collection", + "pipeline": [], + "cursor": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection", + "readConcern": { + "$$exists": false + }, + "$db": "dbWithRC" + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "does not inherit writeConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithWC", + "arguments": { + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "bar" + } + ], + "ordered": true + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "writeConcern": { + "$$exists": false + }, + "$db": "dbWithWC" + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "does not retry retryable errors on given command", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "attaches transaction fields to given command", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded-replicaset", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "session": "session", + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "db" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "attaches apiVersion fields to given command when stableApi is configured on the client", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "dbWithStableApi", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "clientWithStableApi", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "dbWithStableApi", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCursorCommand.json b/test/run_command/unified/runCursorCommand.json new file mode 100644 index 0000000000..4f1ec8a01a --- /dev/null +++ b/test/run_command/unified/runCursorCommand.json @@ -0,0 +1,877 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "successfully executes checkMetadataConsistency cursor creating command", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "checkMetadataConsistency", + "command": { + "checkMetadataConsistency": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "checkMetadataConsistency": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "checkMetadataConsistency" + } + } + ] + } + ] + }, + { + "description": "errors if the command response is not a cursor", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "creates an implicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "accepts an explicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is exhausted", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 5, + "x": 55 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is closed", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ] + }, + { + "description": "supports configuring getMore batchSize", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 5, + "command": { + "find": "collection", + "batchSize": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "batchSize": 5, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore maxTimeMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "maxTimeMS": 300, + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1 + } + }, + "ignoreResultAndError": true + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "maxTimeMS": 300, + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "comment": { + "hello": "getMore" + }, + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + } + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "comment": { + "hello": "getMore" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "does not close the cursor when receiving an empty batch", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "cursorType": "tailable", + "commandName": "find", + "batchSize": 2, + "command": { + "find": "cappedCollection", + "tailable": true + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "cappedCollection" + }, + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "cappedCollection" + }, + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "cappedCollection" + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "cappedCollection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_database.py b/test/test_database.py index 140d169db3..041b339e6a 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -18,6 +18,8 @@ import sys from typing import Any, Iterable, List, Mapping, Union +from pymongo.command_cursor import CommandCursor + sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest @@ -42,6 +44,7 @@ CollectionInvalid, ExecutionTimeout, InvalidName, + InvalidOperation, OperationFailure, WriteConcernError, ) @@ -407,6 +410,23 @@ def test_command_with_regex(self): for doc in result["cursor"]["firstBatch"]: self.assertTrue(isinstance(doc["r"], Regex)) + def test_cursor_command(self): + db = self.client.pymongo_test + db.test.drop() + + docs = [{"_id": i, "doc": i} for i in range(3)] + db.test.insert_many(docs) + + cursor = db.cursor_command("find", "test") + + self.assertIsInstance(cursor, CommandCursor) + + result_docs = list(cursor) + self.assertEqual(docs, result_docs) + + def test_cursor_command_invalid(self): + self.assertRaises(InvalidOperation, self.db.cursor_command, "usersInfo", "test") + def test_password_digest(self): self.assertRaises(TypeError, auth._password_digest, 5) self.assertRaises(TypeError, auth._password_digest, True) diff --git a/test/test_run_command.py b/test/test_run_command.py new file mode 100644 index 0000000000..848fd2cb92 --- /dev/null +++ b/test/test_run_command.py @@ -0,0 +1,17 @@ +import os +import unittest +from test.unified_format import generate_test_classes + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "run_command") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 90cb442b28..72db9e7d47 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -64,10 +64,11 @@ from bson.objectid import ObjectId from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket, GridOut -from pymongo import ASCENDING, MongoClient, _csot +from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.change_stream import ChangeStream from pymongo.client_session import ClientSession, TransactionOptions, _TxnState from pymongo.collection import Collection +from pymongo.command_cursor import CommandCursor from pymongo.database import Database from pymongo.encryption import ClientEncryption from pymongo.encryption_options import _HAVE_PYMONGOCRYPT @@ -1087,6 +1088,31 @@ def _databaseOperation_runCommand(self, target, **kwargs): kwargs["command"] = ordered_command return target.command(**kwargs) + def _databaseOperation_runCursorCommand(self, target, **kwargs): + return list(self._databaseOperation_createCommandCursor(target, **kwargs)) + + def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_time_ms"] = kwargs["maxTimeMS"] + del kwargs["maxTimeMS"] + + return target.cursor_command(**kwargs) + def _databaseOperation_listCollections(self, target, *args, **kwargs): if "batch_size" in kwargs: kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} @@ -1150,7 +1176,9 @@ def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kw return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): - self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, CommandCursor + ) while target.alive: try: return next(target) @@ -1159,7 +1187,7 @@ def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): return None def _cursor_close(self, target, *args, **kwargs): - self.__raise_if_unsupported("close", target, NonLazyCursor) + self.__raise_if_unsupported("close", target, NonLazyCursor, CommandCursor) return target.close() def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): @@ -1250,7 +1278,7 @@ def run_entity_operation(self, spec): doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): method_name = f"_changeStreamOperation_{opname}" - elif isinstance(target, NonLazyCursor): + elif isinstance(target, (NonLazyCursor, CommandCursor)): method_name = f"_cursor_{opname}" elif isinstance(target, ClientSession): method_name = f"_sessionOperation_{opname}" diff --git a/tox.ini b/tox.ini index bdabf17700..240126f8a5 100644 --- a/tox.ini +++ b/tox.ini @@ -91,6 +91,8 @@ commands = [testenv:typecheck] description = run mypy and pyright to typecheck +extras = + {[testenv:typecheck-mypy]extras} deps = {[testenv:typecheck-mypy]deps} {[testenv:typecheck-pyright]deps} From e56c08afccde4387d3bb24043098ff1626703a78 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 28 Jun 2023 15:25:46 -0700 Subject: [PATCH 0968/2111] PYTHON-3770 add types to auth_oidc.py (#1265) --- pymongo/auth_oidc.py | 42 ++++++++++++++++++++++++++++-------------- pymongo/pool.py | 1 + 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index 543dc0200d..a3afbdb3fe 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -13,11 +13,13 @@ # limitations under the License. """MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + import os import threading from dataclasses import dataclass, field from datetime import datetime, timedelta, timezone -from typing import Callable, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple import bson from bson.binary import Binary @@ -25,6 +27,10 @@ from pymongo.errors import ConfigurationError, OperationFailure from pymongo.helpers import _REAUTHENTICATION_REQUIRED_CODE +if TYPE_CHECKING: + from pymongo.auth import MongoCredential + from pymongo.pool import SocketInfo + @dataclass class _OIDCProperties: @@ -44,7 +50,9 @@ class _OIDCProperties: _CACHE: Dict[str, "_OIDCAuthenticator"] = {} -def _get_authenticator(credentials, address): +def _get_authenticator( + credentials: MongoCredential, address: Tuple[str, int] +) -> _OIDCAuthenticator: # Clear out old items in the cache. now_utc = datetime.now(timezone.utc) to_remove = [] @@ -81,7 +89,7 @@ def _get_authenticator(credentials, address): return _CACHE[cache_key] -def _get_cache_exp(): +def _get_cache_exp() -> datetime: return datetime.now(timezone.utc) + timedelta(minutes=CACHE_TIMEOUT_MINUTES) @@ -98,7 +106,7 @@ class _OIDCAuthenticator: cache_exp_utc: datetime = field(default_factory=_get_cache_exp) lock: threading.Lock = field(default_factory=threading.Lock) - def get_current_token(self, use_callbacks=True): + def get_current_token(self, use_callbacks: bool = True) -> Optional[str]: properties = self.properties request_cb = properties.request_token_callback @@ -116,16 +124,15 @@ def get_current_token(self, use_callbacks=True): current_valid_token = True timeout = CALLBACK_TIMEOUT_SECONDS - if not use_callbacks and not current_valid_token: return None if not current_valid_token and request_cb is not None: - prev_token = self.idp_resp and self.idp_resp["access_token"] + prev_token = self.idp_resp["access_token"] if self.idp_resp else None with self.lock: # See if the token was changed while we were waiting for the # lock. - new_token = self.idp_resp and self.idp_resp["access_token"] + new_token = self.idp_resp["access_token"] if self.idp_resp else None if new_token != prev_token: return new_token @@ -173,14 +180,14 @@ def get_current_token(self, use_callbacks=True): return token - def auth_start_cmd(self, use_callbacks=True): + def auth_start_cmd(self, use_callbacks: bool = True) -> Optional[SON[str, Any]]: properties = self.properties # Handle aws provider credentials. if properties.provider_name == "aws": aws_identity_file = os.environ["AWS_WEB_IDENTITY_TOKEN_FILE"] with open(aws_identity_file) as fid: - token = fid.read().strip() + token: Optional[str] = fid.read().strip() payload = {"jwt": token} cmd = SON( [ @@ -230,14 +237,16 @@ def auth_start_cmd(self, use_callbacks=True): ] ) - def clear(self): + def clear(self) -> None: self.idp_info = None self.idp_resp = None self.token_exp_utc = None - def run_command(self, sock_info, cmd): + def run_command( + self, sock_info: SocketInfo, cmd: Mapping[str, Any] + ) -> Optional[Mapping[str, Any]]: try: - return sock_info.command("$external", cmd, no_reauth=True) + return sock_info.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] except OperationFailure as exc: self.clear() if exc.code == _REAUTHENTICATION_REQUIRED_CODE: @@ -247,7 +256,9 @@ def run_command(self, sock_info, cmd): return self.authenticate(sock_info, reauthenticate=True) raise - def authenticate(self, sock_info, reauthenticate=False): + def authenticate( + self, sock_info: SocketInfo, reauthenticate: bool = False + ) -> Optional[Mapping[str, Any]]: if reauthenticate: prev_id = getattr(sock_info, "oidc_token_gen_id", None) # Check if we've already changed tokens. @@ -264,6 +275,7 @@ def authenticate(self, sock_info, reauthenticate=False): resp = ctx.speculative_authenticate else: cmd = self.auth_start_cmd() + assert cmd is not None resp = self.run_command(sock_info, cmd) if resp["done"]: @@ -293,7 +305,9 @@ def authenticate(self, sock_info, reauthenticate=False): return resp -def _authenticate_oidc(credentials, sock_info, reauthenticate): +def _authenticate_oidc( + credentials: MongoCredential, sock_info: SocketInfo, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: """Authenticate using MONGODB-OIDC.""" authenticator = _get_authenticator(credentials, sock_info.address) return authenticator.authenticate(sock_info, reauthenticate=reauthenticate) diff --git a/pymongo/pool.py b/pymongo/pool.py index 2b498078c2..a827d10f9c 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -647,6 +647,7 @@ def __init__(self, sock, pool, address, id): self.compression_settings = pool.opts._compression_settings self.compression_context = None self.socket_checker = SocketChecker() + self.oidc_token_gen_id = None # Support for mechanism negotiation on the initial handshake. self.negotiated_mechs = None self.auth_ctx = None From 46276439ac702d5654a74a4ac5b039a2fffd4632 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 30 Jun 2023 17:02:36 -0400 Subject: [PATCH 0969/2111] PYTHON-3796 Fix typo in docs and reformat using blacken-docs (#1284) --- doc/examples/encryption.rst | 307 ++++++++++++++++++++---------------- 1 file changed, 174 insertions(+), 133 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 52fc548285..ecee03180c 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -146,21 +146,19 @@ the client into sending unencrypted data that should be encrypted. JSON Schemas supplied in the ``schema_map`` only apply to configuring automatic client-side field level encryption. Other validation rules in the JSON schema will not be enforced by the driver and -will result in an error.:: +will result in an error. - import os +.. code-block:: python + import os from bson.codec_options import CodecOptions from bson import json_util - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts - def create_json_schema_file(kms_providers, key_vault_namespace, - key_vault_client): + def create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client): client_encryption = ClientEncryption( kms_providers, key_vault_namespace, @@ -170,31 +168,33 @@ will result in an error.:: # on MongoClient, Database, or Collection. We will not be calling # encrypt() or decrypt() in this example so we can use any # CodecOptions. - CodecOptions()) + CodecOptions(), + ) # Create a new data key and json schema for the encryptedField. # https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_1']) + "local", key_alt_names=["pymongo_encryption_example_1"] + ) schema = { "properties": { "encryptedField": { "encrypt": { "keyId": [data_key_id], "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, } } }, - "bsonType": "object" + "bsonType": "object", } # Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be # able to parse the MongoDB extended JSON file. json_schema_string = json_util.dumps( - schema, json_options=json_util.CANONICAL_JSON_OPTIONS) + schema, json_options=json_util.CANONICAL_JSON_OPTIONS + ) - with open('jsonSchema.json', 'w') as file: + with open("jsonSchema.json", "w") as file: file.write(json_schema_string) @@ -221,19 +221,20 @@ will result in an error.:: key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) - create_json_schema_file( - kms_providers, key_vault_namespace, key_vault_client) + create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client) # Load the JSON Schema and construct the local schema_map option. - with open('jsonSchema.json', 'r') as file: + with open("jsonSchema.json", "r") as file: json_schema_string = file.read() json_schema = json_util.loads(json_schema_string) schema_map = {encrypted_namespace: json_schema} auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, schema_map=schema_map) + kms_providers, key_vault_namespace, schema_map=schema_map + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) db_name, coll_name = encrypted_namespace.split(".", 1) @@ -242,14 +243,15 @@ will result in an error.:: coll.drop() coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) + print("Decrypted document: %s" % (coll.find_one(),)) unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) if __name__ == "__main__": main() + Server-Side Field Level Encryption Enforcement `````````````````````````````````````````````` @@ -263,7 +265,9 @@ encryption using :class:`~pymongo.encryption.ClientEncryption` to create a new encryption data key and create a collection with the `Automatic Encryption JSON Schema Syntax -`_:: +`_: + +.. code-block:: python import os @@ -271,8 +275,7 @@ data key and create a collection with the from bson.binary import STANDARD from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern @@ -301,7 +304,8 @@ data key and create a collection with the key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -312,27 +316,27 @@ data key and create a collection with the # on MongoClient, Database, or Collection. We will not be calling # encrypt() or decrypt() in this example so we can use any # CodecOptions. - CodecOptions()) + CodecOptions(), + ) # Create a new data key and json schema for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_2']) + "local", key_alt_names=["pymongo_encryption_example_2"] + ) json_schema = { "properties": { "encryptedField": { "encrypt": { "keyId": [data_key_id], "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, } } }, - "bsonType": "object" + "bsonType": "object", } - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace) + auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) client = MongoClient(auto_encryption_opts=auto_encryption_opts) db_name, coll_name = encrypted_namespace.split(".", 1) db = client[db_name] @@ -348,17 +352,18 @@ data key and create a collection with the # JSON Schema. codec_options=CodecOptions(uuid_representation=STANDARD), write_concern=WriteConcern(w="majority"), - validator={"$jsonSchema": json_schema}) + validator={"$jsonSchema": json_schema}, + ) coll = client[db_name][coll_name] coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) + print("Decrypted document: %s" % (coll.find_one(),)) unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) try: unencrypted_coll.insert_one({"encryptedField": "123456789"}) except OperationFailure as exc: - print('Unencrypted insert failed: %s' % (exc.details,)) + print("Unencrypted insert failed: %s" % (exc.details,)) if __name__ == "__main__": @@ -372,13 +377,14 @@ Explicit Encryption Explicit encryption is a MongoDB community feature and does not use the ``mongocryptd`` process. Explicit encryption is provided by the -:class:`~pymongo.encryption.ClientEncryption` class, for example:: +:class:`~pymongo.encryption.ClientEncryption` class, for example: + +.. code-block:: python import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption def main(): @@ -405,7 +411,8 @@ Explicit encryption is a MongoDB community feature and does not use the key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -416,24 +423,27 @@ Explicit encryption is a MongoDB community feature and does not use the # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - coll.codec_options) + coll.codec_options, + ) # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_3']) + "local", key_alt_names=["pymongo_encryption_example_3"] + ) # Explicitly encrypt a field: encrypted_field = client_encryption.encrypt( "123456789", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) + key_id=data_key_id, + ) coll.insert_one({"encryptedField": encrypted_field}) doc = coll.find_one() - print('Encrypted document: %s' % (doc,)) + print("Encrypted document: %s" % (doc,)) # Explicitly decrypt the field: doc["encryptedField"] = client_encryption.decrypt(doc["encryptedField"]) - print('Decrypted document: %s' % (doc,)) + print("Decrypted document: %s" % (doc,)) # Cleanup resources. client_encryption.close() @@ -451,13 +461,14 @@ Although automatic encryption requires MongoDB 4.2 enterprise or a MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True`` in -:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: +:class:`~pymongo.encryption_options.AutoEncryptionOpts`: + +.. code-block:: python import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) + from pymongo.encryption import Algorithm, ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts @@ -476,7 +487,8 @@ To configure automatic *decryption* without automatic *encryption* set # the automatic _decryption_ behavior. bypass_auto_encryption will # also disable spawning mongocryptd. auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, bypass_auto_encryption=True) + kms_providers, key_vault_namespace, bypass_auto_encryption=True + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) coll = client.test.coll @@ -490,7 +502,8 @@ To configure automatic *decryption* without automatic *encryption* set key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -501,28 +514,32 @@ To configure automatic *decryption* without automatic *encryption* set # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - coll.codec_options) + coll.codec_options, + ) # Create a new data key for the encryptedField. data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_4']) + "local", key_alt_names=["pymongo_encryption_example_4"] + ) # Explicitly encrypt a field: encrypted_field = client_encryption.encrypt( "123456789", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='pymongo_encryption_example_4') + key_alt_name="pymongo_encryption_example_4", + ) coll.insert_one({"encryptedField": encrypted_field}) # Automatically decrypts any encrypted fields. doc = coll.find_one() - print('Decrypted document: %s' % (doc,)) + print("Decrypted document: %s" % (doc,)) unencrypted_coll = MongoClient().test.coll - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) + print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) # Cleanup resources. client_encryption.close() client.close() + if __name__ == "__main__": main() @@ -539,38 +556,44 @@ AWS, GCP, and Azure cloud environments. To enable the driver's behavior to obtain credentials from the environment, add the appropriate key ("aws", "gcp", or "azure") with an empty map to "kms_providers" in either :class:`~pymongo.encryption_options.AutoEncryptionOpts` or :class:`~pymongo.encryption.ClientEncryption` options. -An application using AWS credentials would look like:: +An application using AWS credentials would look like: + +.. code-block:: python from pymongo import MongoClient from pymongo.encryption import ClientEncryption + client = MongoClient() client_encryption = ClientEncryption( - # The empty dictionary enables on-demand credentials. - kms_providers={"aws": {}}, - key_vault_namespace="keyvault.datakeys", - key_vault_client=client, - codec_options=client.codec_options, + # The empty dictionary enables on-demand credentials. + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, ) master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), } client_encryption.create_data_key("aws", master_key) The above will enable the same behavior of obtaining AWS credentials from the environment as is used for :ref:`MONGODB-AWS` authentication, including the caching to avoid rate limiting. -An application using GCP credentials would look like:: +An application using GCP credentials would look like: + +.. code-block:: python from pymongo import MongoClient from pymongo.encryption import ClientEncryption + client = MongoClient() client_encryption = ClientEncryption( - # The empty dictionary enables on-demand credentials. - kms_providers={"gcp": {}}, - key_vault_namespace="keyvault.datakeys", - key_vault_client=client, - codec_options=client.codec_options, + # The empty dictionary enables on-demand credentials. + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client, + codec_options=client.codec_options, ) master_key = { "projectId": "my-project", @@ -583,15 +606,17 @@ An application using GCP credentials would look like:: The driver will query the `VM instance metadata `_ to obtain credentials. An application using Azure credentials would look like, this time using -:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: +:class:`~pymongo.encryption_options.AutoEncryptionOpts`: + +.. code-block:: python from pymongo import MongoClient from pymongo.encryption_options import AutoEncryptionOpts + # The empty dictionary enables on-demand credentials. - kms_providers={"azure": {}}, - key_vault_namespace="keyvault.datakeys" - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace) + kms_providers = ({"azure": {}},) + key_vault_namespace = "keyvault.datakeys" + auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) client = MongoClient(auto_encryption_opts=auto_encryption_opts) coll = client.test.coll coll.insert_one({"encryptedField": "123456789"}) @@ -615,7 +640,9 @@ Data is encrypted client-side. Queryable Encryption supports indexed encrypted f which are further processed server-side. Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, -as demonstrated by the following example:: +as demonstrated by the following example: + +.. code-block:: python import os from bson.codec_options import CodecOptions @@ -623,7 +650,6 @@ as demonstrated by the following example:: from pymongo.encryption import Algorithm, ClientEncryption, QueryType from pymongo.encryption_options import AutoEncryptionOpts - local_master_key = os.urandom(96) kms_providers = {"local": {"key": local_master_key}} key_vault_namespace = "keyvault.datakeys" @@ -638,30 +664,33 @@ as demonstrated by the following example:: encrypted_fields_map = { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": key1_id, - "queries": [{"queryType": "equality"}], - }, - { - "path": "lastName", - "bsonType": "string", - "keyId": key2_id, - } - ] + "escCollection": "encryptedCollection.esc", + "ecocCollection": "encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": key1_id, + "queries": [{"queryType": "equality"}], + }, + { + "path": "lastName", + "bsonType": "string", + "keyId": key2_id, + }, + ], } } auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) + kms_providers, + key_vault_namespace, + encrypted_fields_map=encrypted_fields_map, + ) client = MongoClient(auto_encryption_opts=auto_encryption_opts) - client.default.drop_collection('encryptedCollection') - coll = client.default.create_collection('encryptedCollection') - coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) + client.default.drop_collection("encryptedCollection") + coll = client.default.create_collection("encryptedCollection") + coll.insert_one({"_id": 1, "firstName": "Jane", "lastName": "Doe"}) docs = list(coll.find({"firstName": "Jane"})) print(docs) @@ -679,13 +708,18 @@ which are further processed server-side. Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured -using an ``encrypted_fields`` mapping, as demonstrated by the following example:: +using an ``encrypted_fields`` mapping, as demonstrated by the following example: - import os +.. code-block:: python + import os from pymongo import MongoClient - from pymongo.encryption import (Algorithm, AutoEncryptionOpts, - ClientEncryption, QueryType) + from pymongo.encryption import ( + Algorithm, + AutoEncryptionOpts, + ClientEncryption, + QueryType, + ) def main(): @@ -708,7 +742,8 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: key_vault.create_index( "keyAltNames", unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) client_encryption = ClientEncryption( kms_providers, @@ -719,32 +754,29 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # The CodecOptions class used for encrypting and decrypting. # This should be the same CodecOptions instance you have configured # on MongoClient, Database, or Collection. - client.codec_options) + client.codec_options, + ) # Create a new data key for the encryptedField. - indexed_key_id = client_encryption.create_data_key( - 'local') - unindexed_key_id = client_encryption.create_data_key( - 'local') + indexed_key_id = client_encryption.create_data_key("local") + unindexed_key_id = client_encryption.create_data_key("local") encrypted_fields = { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": indexed_key_id, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality" - } - }, - { - "keyId": unindexed_key_id, - "path": "encryptedUnindexed", - "bsonType": "string", - } - ] + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": indexed_key_id, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": {"queryType": "equality"}, + }, + { + "keyId": unindexed_key_id, + "path": "encryptedUnindexed", + "bsonType": "string", + }, + ], } opts = AutoEncryptionOpts( @@ -765,27 +797,36 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: # Create and encrypt an indexed and unindexed value. val = "encrypted indexed value" unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) - insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, - unindexed_key_id) + insert_payload_indexed = client_encryption.encrypt( + val, Algorithm.INDEXED, indexed_key_id, contention_factor=1 + ) + insert_payload_unindexed = client_encryption.encrypt( + unindexed_val, Algorithm.UNINDEXED, unindexed_key_id + ) # Insert the payloads. - coll.insert_one({ - "encryptedIndexed": insert_payload_indexed, - "encryptedUnindexed": insert_payload_unindexed - }) + coll.insert_one( + { + "encryptedIndexed": insert_payload_indexed, + "encryptedUnindexed": insert_payload_unindexed, + } + ) # Encrypt our find payload using QueryType.EQUALITY. - # The value of "data_key_id" must be the same as used to encrypt the values - # above. + # The value of "indexed_key_id" must be the same as used to encrypt + # the values above. find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 + val, + Algorithm.INDEXED, + indexed_key_id, + query_type=QueryType.EQUALITY, + contention_factor=1, ) # Find the document we inserted using the encrypted payload. # The returned document is automatically decrypted. doc = coll.find_one({"encryptedIndexed": find_payload}) - print('Returned document: %s' % (doc,)) + print("Returned document: %s" % (doc,)) # Cleanup resources. client_encryption.close() From a3940ac278e9a23fc2deaa52b304fb5d5d607bd1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Jul 2023 05:24:25 -0500 Subject: [PATCH 0970/2111] PYTHON-3759 Update to Newer Build Hosts (#1257) --- .evergreen/build-manylinux.sh | 5 +++++ .evergreen/config.yml | 15 ++++----------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh index 871151a5f3..38490c3142 100755 --- a/.evergreen/build-manylinux.sh +++ b/.evergreen/build-manylinux.sh @@ -1,6 +1,11 @@ #!/bin/bash -ex docker version + +# Set up qemu support using the method used in docker/setup-qemu-action +# https://github.com/docker/setup-qemu-action/blob/2b82ce82d56a2a04d2637cd93a637ae1b359c0a7/README.md?plain=1#L46 +docker run --rm --privileged tonistiigi/binfmt:latest --install all + # manylinux1 2021-05-05-b64d921 and manylinux2014 2021-05-05-1ac6ef3 were # the last releases to generate pip < 20.3 compatible wheels. After that # auditwheel was upgraded to v4 which produces PEP 600 manylinux_x_y wheels diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8fa2df2415..928a56df1a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1352,7 +1352,7 @@ tasks: - name: "release-manylinux" tags: ["release_tag"] - run_on: ubuntu2004-large + run_on: ubuntu2204-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - func: "build release" @@ -1360,7 +1360,7 @@ tasks: - name: "release-old-manylinux" tags: ["release_tag"] - run_on: ubuntu2004-large + run_on: ubuntu2204-large exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). commands: - command: shell.exec @@ -2292,13 +2292,6 @@ axes: display_name: "Archlinux" run_on: archlinux-test batchtime: 10080 # 7 days - - id: debian92 - display_name: "Debian 9.2" - run_on: debian92-test - batchtime: 10080 # 7 days - variables: - python3_binary: "/opt/python/3.8/bin/python3" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/debian92/master/latest/libmongocrypt.tar.gz - id: macos-1014 display_name: "macOS 10.14" run_on: macos-1014 @@ -2328,14 +2321,14 @@ axes: libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - id: rhel7 display_name: "RHEL 7.x" - run_on: rhel76-small + run_on: rhel79-small batchtime: 10080 # 7 days variables: python3_binary: "/opt/python/3.8/bin/python3" libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - id: rhel8 display_name: "RHEL 8.x" - run_on: rhel84-small + run_on: rhel87-small batchtime: 10080 # 7 days variables: python3_binary: "/opt/python/3.8/bin/python3" From 0b5bdccf3ae1233a032c525bc809c84eed3520d1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 3 Jul 2023 09:15:04 -0700 Subject: [PATCH 0971/2111] PYTHON-3729 use PyObject_GetAddr instead of PyObject_GetAddrString (#1281) --- bson/_cbsonmodule.c | 117 +++++++++++++++++++++++++++++--------- pymongo/_cmessagemodule.c | 34 ++++++++--- 2 files changed, 116 insertions(+), 35 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 68ea6b63c4..c26ad252cc 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -56,6 +56,20 @@ struct module_state { PyObject* _min_datetime_ms; PyObject* _max_datetime_ms; PyObject* _type_marker_str; + PyObject* _flags_str; + PyObject* _pattern_str; + PyObject* _encoder_map_str; + PyObject* _decoder_map_str; + PyObject* _fallback_encoder_str; + PyObject* _raw_str; + PyObject* _subtype_str; + PyObject* _binary_str; + PyObject* _scope_str; + PyObject* _inc_str; + PyObject* _time_str; + PyObject* _bid_str; + PyObject* _replace_str; + PyObject* _astimezone_str; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -219,7 +233,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw); +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw); /* Date stuff */ static PyObject* datetime_from_millis(long long millis) { @@ -468,8 +482,24 @@ static int _load_python_objects(PyObject* module) { PyObject* compiled = NULL; struct module_state *state = GETSTATE(module); - /* Python str for faster _type_marker check */ - state->_type_marker_str = PyUnicode_FromString("_type_marker"); + /* Cache commonly used attribute names to improve performance. */ + if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && + (state->_flags_str = PyUnicode_FromString("flags")) && + (state->_pattern_str = PyUnicode_FromString("pattern")) && + (state->_encoder_map_str = PyUnicode_FromString("_encoder_map")) && + (state->_decoder_map_str = PyUnicode_FromString("_decoder_map")) && + (state->_fallback_encoder_str = PyUnicode_FromString("_fallback_encoder")) && + (state->_raw_str = PyUnicode_FromString("raw")) && + (state->_subtype_str = PyUnicode_FromString("subtype")) && + (state->_binary_str = PyUnicode_FromString("binary")) && + (state->_scope_str = PyUnicode_FromString("scope")) && + (state->_inc_str = PyUnicode_FromString("inc")) && + (state->_time_str = PyUnicode_FromString("time")) && + (state->_bid_str = PyUnicode_FromString("bid")) && + (state->_replace_str = PyUnicode_FromString("replace")) && + (state->_astimezone_str = PyUnicode_FromString("astimezone")))) { + return 1; + } if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || @@ -555,25 +585,25 @@ static long _type_marker(PyObject* object, PyObject* _type_marker_str) { * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry, PyObject* _encoder_map_str, PyObject* _decoder_map_str, PyObject* _fallback_encoder_str) { registry->encoder_map = NULL; registry->decoder_map = NULL; registry->fallback_encoder = NULL; registry->registry_obj = NULL; - registry->encoder_map = PyObject_GetAttrString(registry_obj, "_encoder_map"); + registry->encoder_map = PyObject_GetAttr(registry_obj, _encoder_map_str); if (registry->encoder_map == NULL) { goto fail; } registry->is_encoder_empty = (PyDict_Size(registry->encoder_map) == 0); - registry->decoder_map = PyObject_GetAttrString(registry_obj, "_decoder_map"); + registry->decoder_map = PyObject_GetAttr(registry_obj, _decoder_map_str); if (registry->decoder_map == NULL) { goto fail; } registry->is_decoder_empty = (PyDict_Size(registry->decoder_map) == 0); - registry->fallback_encoder = PyObject_GetAttrString(registry_obj, "_fallback_encoder"); + registry->fallback_encoder = PyObject_GetAttr(registry_obj, _fallback_encoder_str); if (registry->fallback_encoder == NULL) { goto fail; } @@ -597,6 +627,7 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr */ int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { PyObject* type_registry_obj = NULL; + struct module_state *state = GETSTATE(self); long type_marker; options->unicode_decode_error_handler = NULL; @@ -613,13 +644,13 @@ int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t } type_marker = _type_marker(options->document_class, - GETSTATE(self)->_type_marker_str); + state->_type_marker_str); if (type_marker < 0) { return 0; } if (!cbson_convert_type_registry(type_registry_obj, - &options->type_registry)) { + &options->type_registry, state->_encoder_map_str, state->_decoder_map_str, state->_fallback_encoder_str)) { return 0; } @@ -692,7 +723,7 @@ _set_cannot_encode(PyObject* value) { * Sets exception and returns 0 on failure. */ static int _write_regex_to_buffer( - buffer_t buffer, int type_byte, PyObject* value) { + buffer_t buffer, int type_byte, PyObject* value, PyObject* _flags_str, PyObject* _pattern_str) { PyObject* py_flags; PyObject* py_pattern; @@ -708,7 +739,7 @@ static int _write_regex_to_buffer( * Both the builtin re type and our Regex class have attributes * "flags" and "pattern". */ - py_flags = PyObject_GetAttrString(value, "flags"); + py_flags = PyObject_GetAttr(value, _flags_str); if (!py_flags) { return 0; } @@ -717,7 +748,7 @@ static int _write_regex_to_buffer( if (int_flags == -1 && PyErr_Occurred()) { return 0; } - py_pattern = PyObject_GetAttrString(value, "pattern"); + py_pattern = PyObject_GetAttr(value, _pattern_str); if (!py_pattern) { return 0; } @@ -838,7 +869,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, int size; *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttrString(value, "subtype"); + subtype_object = PyObject_GetAttr(value, state->_subtype_str); if (!subtype_object) { return 0; } @@ -886,7 +917,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* ObjectId */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "binary"); + PyObject* pystring = PyObject_GetAttr(value, state->_binary_str); if (!pystring) { return 0; } @@ -906,7 +937,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, case 11: { /* Regex */ - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } case 13: { @@ -915,7 +946,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, length_location, length; - PyObject* scope = PyObject_GetAttrString(value, "scope"); + PyObject* scope = PyObject_GetAttr(value, state->_scope_str); if (!scope) { return 0; } @@ -958,7 +989,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, PyObject* obj; unsigned long i; - obj = PyObject_GetAttrString(value, "inc"); + obj = PyObject_GetAttr(value, state->_inc_str); if (!obj) { return 0; } @@ -971,7 +1002,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - obj = PyObject_GetAttrString(value, "time"); + obj = PyObject_GetAttr(value, state->_time_str); if (!obj) { return 0; } @@ -1006,7 +1037,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* Decimal128 */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "bid"); + PyObject* pystring = PyObject_GetAttr(value, state->_bid_str); if (!pystring) { return 0; } @@ -1041,7 +1072,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, case 101: { /* RawBSONDocument */ - if (!write_raw_doc(buffer, value)) { + if (!write_raw_doc(buffer, value, state->_raw_str)) { return 0; } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; @@ -1206,7 +1237,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } /* @@ -1437,14 +1468,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw) { +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { char* bytes; Py_ssize_t len; int len_int; int bytes_written = 0; PyObject* bytes_obj = NULL; - bytes_obj = PyObject_GetAttrString(raw, "raw"); + bytes_obj = PyObject_GetAttr(raw, _raw_str); if (!bytes_obj) { goto fail; } @@ -1485,7 +1516,7 @@ int write_dict(PyObject* self, buffer_t buffer, } if (101 == type_marker) { - return write_raw_doc(buffer, dict); + return write_raw_doc(buffer, dict, state->_raw_str); } mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); @@ -1606,6 +1637,7 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { buffer_t buffer; PyObject* raw_bson_document_bytes_obj; long type_marker; + struct module_state *state = GETSTATE(self); if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, &options_obj, &top_level) && @@ -1614,13 +1646,13 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { } /* check for RawBSONDocument */ - type_marker = _type_marker(dict, GETSTATE(self)->_type_marker_str); + type_marker = _type_marker(dict, state->_type_marker_str); if (type_marker < 0) { destroy_codec_options(&options); return NULL; } else if (101 == type_marker) { destroy_codec_options(&options); - raw_bson_document_bytes_obj = PyObject_GetAttrString(dict, "raw"); + raw_bson_document_bytes_obj = PyObject_GetAttr(dict, state->_raw_str); if (NULL == raw_bson_document_bytes_obj) { return NULL; } @@ -2102,7 +2134,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (!naive) { goto invalid; } - replace = PyObject_GetAttrString(naive, "replace"); + replace = PyObject_GetAttr(naive, state->_replace_str); Py_DECREF(naive); if (!replace) { goto invalid; @@ -2137,7 +2169,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, /* convert to local time */ if (options->tzinfo != Py_None) { - astimezone = PyObject_GetAttrString(value, "astimezone"); + astimezone = PyObject_GetAttr(value, state->_astimezone_str); Py_DECREF(value); if (!astimezone) { Py_DECREF(replace); @@ -3051,6 +3083,21 @@ static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->MaxKey); Py_VISIT(GETSTATE(m)->UTC); Py_VISIT(GETSTATE(m)->REType); + Py_VISIT(GETSTATE(m)->_type_marker_str); + Py_VISIT(GETSTATE(m)->_flags_str); + Py_VISIT(GETSTATE(m)->_pattern_str); + Py_VISIT(GETSTATE(m)->_encoder_map_str); + Py_VISIT(GETSTATE(m)->_decoder_map_str); + Py_VISIT(GETSTATE(m)->_fallback_encoder_str); + Py_VISIT(GETSTATE(m)->_raw_str); + Py_VISIT(GETSTATE(m)->_subtype_str); + Py_VISIT(GETSTATE(m)->_binary_str); + Py_VISIT(GETSTATE(m)->_scope_str); + Py_VISIT(GETSTATE(m)->_inc_str); + Py_VISIT(GETSTATE(m)->_time_str); + Py_VISIT(GETSTATE(m)->_bid_str); + Py_VISIT(GETSTATE(m)->_replace_str); + Py_VISIT(GETSTATE(m)->_astimezone_str); return 0; } @@ -3067,6 +3114,20 @@ static int _cbson_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->UTC); Py_CLEAR(GETSTATE(m)->REType); Py_CLEAR(GETSTATE(m)->_type_marker_str); + Py_CLEAR(GETSTATE(m)->_flags_str); + Py_CLEAR(GETSTATE(m)->_pattern_str); + Py_CLEAR(GETSTATE(m)->_encoder_map_str); + Py_CLEAR(GETSTATE(m)->_decoder_map_str); + Py_CLEAR(GETSTATE(m)->_fallback_encoder_str); + Py_CLEAR(GETSTATE(m)->_raw_str); + Py_CLEAR(GETSTATE(m)->_subtype_str); + Py_CLEAR(GETSTATE(m)->_binary_str); + Py_CLEAR(GETSTATE(m)->_scope_str); + Py_CLEAR(GETSTATE(m)->_inc_str); + Py_CLEAR(GETSTATE(m)->_time_str); + Py_CLEAR(GETSTATE(m)->_bid_str); + Py_CLEAR(GETSTATE(m)->_replace_str); + Py_CLEAR(GETSTATE(m)->_astimezone_str); return 0; } diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index ee7623d832..7ac66a1e4b 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -28,6 +28,10 @@ struct module_state { PyObject* _cbson; + PyObject* _max_bson_size_str; + PyObject* _max_message_size_str; + PyObject* _max_write_batch_size_str; + PyObject* _max_split_size_str; }; /* See comments about module initialization in _cbsonmodule.c */ @@ -366,21 +370,21 @@ _batched_op_msg( PyObject* iterator = NULL; char* flags = ack ? "\x00\x00\x00\x00" : "\x02\x00\x00\x00"; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; } - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; } - max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); + max_message_size_obj = PyObject_GetAttr(ctx, state->_max_message_size_str); max_message_size = PyLong_AsLong(max_message_size_obj); Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { @@ -667,7 +671,7 @@ _batched_write_command( PyObject* doc = NULL; PyObject* iterator = NULL; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { @@ -679,7 +683,7 @@ _batched_write_command( */ max_cmd_size = max_bson_size + 16382; - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { @@ -689,7 +693,7 @@ _batched_write_command( // max_split_size is the size at which to perform a batch split. // Normally this this value is equal to max_bson_size (16MiB). However, // when auto encryption is enabled max_split_size is reduced to 2MiB. - max_split_size_obj = PyObject_GetAttrString(ctx, "max_split_size"); + max_split_size_obj = PyObject_GetAttr(ctx, state->_max_split_size_str); max_split_size = PyLong_AsLong(max_split_size_obj); Py_XDECREF(max_split_size_obj); if (max_split_size == -1) { @@ -924,11 +928,19 @@ static PyMethodDef _CMessageMethods[] = { #define INITERROR return NULL static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->_cbson); + Py_VISIT(GETSTATE(m)->_max_bson_size_str); + Py_VISIT(GETSTATE(m)->_max_message_size_str); + Py_VISIT(GETSTATE(m)->_max_split_size_str); + Py_VISIT(GETSTATE(m)->_max_write_batch_size_str); return 0; } static int _cmessage_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->_cbson); + Py_CLEAR(GETSTATE(m)->_max_bson_size_str); + Py_CLEAR(GETSTATE(m)->_max_message_size_str); + Py_CLEAR(GETSTATE(m)->_max_split_size_str); + Py_CLEAR(GETSTATE(m)->_max_write_batch_size_str); return 0; } @@ -950,6 +962,7 @@ PyInit__cmessage(void) PyObject *_cbson = NULL; PyObject *c_api_object = NULL; PyObject *m = NULL; + struct module_state* state = NULL; /* Store a reference to the _cbson module since it's needed to call some * of its functions @@ -977,7 +990,14 @@ PyInit__cmessage(void) goto fail; } - GETSTATE(m)->_cbson = _cbson; + state = GETSTATE(m); + state->_cbson = _cbson; + if (!((state->_max_bson_size_str = PyUnicode_FromString("max_bson_size")) && + (state->_max_message_size_str = PyUnicode_FromString("max_message_size")) && + (state->_max_write_batch_size_str = PyUnicode_FromString("max_write_batch_size")) && + (state->_max_split_size_str = PyUnicode_FromString("max_split_size")))) { + goto fail; + } Py_DECREF(c_api_object); From 94fabf5e988b02041693ff7f0921e4eee32ee9a0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 3 Jul 2023 09:17:11 -0700 Subject: [PATCH 0972/2111] PYTHON-3793 Make tox fail with invalid environment (#1286) --- .evergreen/config.yml | 2 +- .evergreen/run-doctests.sh | 2 +- .github/workflows/test-python.yml | 10 +++++----- RELEASE.rst | 2 +- doc/index.rst | 2 +- tox.ini | 14 +++++++++++++- 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 928a56df1a..8062d35652 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -359,7 +359,7 @@ functions: ${PREPARE_SHELL} alias python=${PYTHON_BINARY} - python -m tox -e test-mockupdb + python -m tox -m test-mockupdb "run doctests": - command: shell.exec diff --git a/.evergreen/run-doctests.sh b/.evergreen/run-doctests.sh index 39e5102b6a..be71f1789a 100644 --- a/.evergreen/run-doctests.sh +++ b/.evergreen/run-doctests.sh @@ -4,4 +4,4 @@ set -o xtrace set -o errexit ${PYTHON_BINARY} -m pip install tox -${PYTHON_BINARY} -m tox -e doc-test +${PYTHON_BINARY} -m tox -m doc-test diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index d7c442cc49..93e10ac562 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -42,7 +42,7 @@ jobs: mongodb-version: 4.4 - name: Run tests run: | - tox -e test + tox -m test mypytest: name: Run mypy @@ -64,13 +64,13 @@ jobs: pip install tox - name: Run mypy run: | - tox -e typecheck-mypy + tox -m typecheck-mypy - name: Run pyright run: | - tox -e typecheck-pyright + tox -m typecheck-pyright - name: Run pyright strict run: | - tox -e typecheck-pyright-strict + tox -m typecheck-pyright-strict linkcheck: name: Check Links @@ -86,4 +86,4 @@ jobs: pip install tox - name: Check links run: | - tox -e linkcheck + tox -m linkcheck diff --git a/RELEASE.rst b/RELEASE.rst index caa67d3819..74a45e829a 100644 --- a/RELEASE.rst +++ b/RELEASE.rst @@ -36,7 +36,7 @@ Doing a Release To test locally, ``python3 setup.py test`` will build the C extensions and test. ``python3 tools/clean.py`` will remove the extensions, and then ``python3 setup.py --no_ext test`` will run the tests without - them. You can also run the doctests: ``tox -e doc-test``. + them. You can also run the doctests: ``tox -m doc-test``. 2. Check Jira to ensure all the tickets in this version have been completed. diff --git a/doc/index.rst b/doc/index.rst index 7e357c2a4b..2f0ba1d36a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -103,7 +103,7 @@ following command from the root directory of the **PyMongo** source: .. code-block:: bash $ pip install tox - $ tox -e docs + $ tox -m doc Indices and tables ------------------ diff --git a/tox.ini b/tox.ini index 240126f8a5..ac65ed1ad9 100644 --- a/tox.ini +++ b/tox.ini @@ -24,6 +24,19 @@ envlist = doc-test, # Linkcheck sphinx docs linkcheck +labels = # Use labels and -m instead of -e so that tox -m ` for alternatives. + + The :meth:`MongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.MongoClient.close`. + + :class:`~pymongo.mongo_client.MongoClient` no longer returns an + instance of :class:`~pymongo.database.Database` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + client['__my_database__'] + + Not:: + + client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + """ + doc_class = document_class or dict + self._init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + + if host is None: + host = self.HOST + if isinstance(host, str): + host = [host] + if port is None: + port = self.PORT + if not isinstance(port, int): + raise TypeError("port must be an instance of int") + + # _pool_class, _monitor_class, and _condition_class are for deep + # customization of PyMongo, e.g. Motor. + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) + + # Parse options passed as kwargs. + keyword_opts = common._CaseInsensitiveDictionary(kwargs) + keyword_opts["document_class"] = doc_class + + seeds = set() + username = None + password = None + dbase = None + opts = common._CaseInsensitiveDictionary() + fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") + for entity in host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = uri_parser.parse_uri( + entity, + port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + username = res["username"] or username + password = res["password"] or password + dbase = res["database"] or dbase + opts = res["options"] + fqdn = res["fqdn"] + else: + seeds.update(uri_parser.split_hosts(entity, port)) + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + if type_registry is not None: + keyword_opts["type_registry"] = type_registry + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + connect = opts.get("connect", True) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + + # Override connection string options with kwarg options. + opts.update(keyword_opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", username) + password = opts.get("password", password) + self._options = options = ClientOptions(username, password, dbase, opts) + + self._default_database_name = dbase + self._lock = _ALock(_create_lock()) + self._kill_cursors_queue: list = [] + + self._event_listeners = options.pool_options._event_listeners + super().__init__( + options.codec_options, + options.read_preference, + options.write_concern, + options.read_concern, + ) + + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=options.replica_set_name, + pool_class=pool_class, + pool_options=options.pool_options, + monitor_class=monitor_class, + condition_class=condition_class, + local_threshold_ms=options.local_threshold_ms, + server_selection_timeout=options.server_selection_timeout, + server_selector=options.server_selector, + heartbeat_frequency=options.heartbeat_frequency, + fqdn=fqdn, + direct_connection=options.direct_connection, + load_balanced=options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=options.server_monitoring_mode, + ) + + self._init_background() + + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] + + self._encrypter = None + if self._options.auto_encryption_opts: + from pymongo.asynchronous.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout + + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + AsyncMongoClient._clients[self._topology._topology_id] = self + + def _init_background(self, old_pid: Optional[int] = None) -> None: + self._topology = Topology(self._topology_settings) + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid + + async def target() -> bool: + client = self_ref() + if client is None: + return False # Stop the executor. + await AsyncMongoClient._process_periodic_tasks(client) + return True + + executor = periodic_executor.PeriodicExecutor( + interval=common.KILL_CURSOR_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_kill_cursors_thread", + ) + + # We strongly reference the executor and it weakly references us via + # this closure. When the client is freed, stop the executor soon. + self_ref: Any = weakref.ref(self, executor.close) + self._kill_cursors_executor = executor + + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) + + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background() + + def _duplicate(self, **kwargs: Any) -> AsyncMongoClient: + args = self._init_kwargs.copy() + args.update(kwargs) + return AsyncMongoClient(**args) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> ChangeStream[_DocumentType]: + """Watch changes on this cluster. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.ClusterChangeStream` cursor which + iterates over changes on all databases on this cluster. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with client.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.ClusterChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.ClusterChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = ClusterChangeStream( + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + @property + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :return: An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + return self._topology.description + + @property + def nodes(self) -> FrozenSet[_Address]: + """Set of all currently connected servers. + + .. warning:: When connected to a replica set the value of :attr:`nodes` + can change over time as :class:`MongoClient`'s view of the replica + set changes. :attr:`nodes` can also be an empty set when + :class:`MongoClient` is first instantiated and hasn't yet connected + to any servers, or a network partition causes it to lose connection + to all servers. + """ + description = self._topology.description + return frozenset(s.address for s in description.known_servers) + + @property + def options(self) -> ClientOptions: + """The configuration options for this client. + + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. + + .. versionadded:: 4.0 + """ + return self._options + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self._topology == other._topology + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self._topology) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" + + return f"{option}={value!r}" + + # Host first... + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" + + def __getattr__(self, name: str) -> database.AsyncDatabase[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> database.AsyncDatabase[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + return database.AsyncDatabase(self, name) + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.ClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.client_session.SessionOptions`. See the + :mod:`~pymongo.client_session` module for details and examples. + + A :class:`~pymongo.client_session.ClientSession` may only be used with + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.client_session.ClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[ClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.AsyncDatabase[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = MongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.AsyncDatabase( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.AsyncDatabase[_DocumentType]: + """Get a :class:`~pymongo.database.Database` with the given name and + options. + + Useful for creating a :class:`~pymongo.database.Database` with + different codec options, read preference, and/or write concern from + this :class:`MongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.AsyncDatabase( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.AsyncDatabase: + """Get a Database instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + async def __aenter__(self) -> AsyncMongoClient[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'MongoClient' object is not iterable") + + next = __next__ + + async def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = await self._topology.select_server(writable_server_selector, _Op.TEST) + + return getattr(server.description, attr_name) + + @property + async def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded, + ): + return None + return await self._server_property("address") + + @property + async def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + return await self._topology.get_primary() # type: ignore[return-value] + + @property + async def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + return await self._topology.get_secondaries() + + @property + async def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + return await self._topology.get_arbiters() + + @property + async def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return await self._server_property("is_writable") + + @property + async def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return await self._server_property("server_type") == SERVER_TYPE.Mongos + + async def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use Connection.command directly to avoid implicitly creating + # another session. + async with await self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + await conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + async def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + session_ids = await self._topology.pop_all_sessions() + if session_ids: + await self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + await self._process_kill_cursors() + await self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + await self._encrypter.close() + + async def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + await self._topology.open() + async with self._lock: + self._kill_cursors_executor.open() + return self._topology + + @contextlib.asynccontextmanager + async def _checkout( + self, server: Server, session: Optional[ClientSession] + ) -> AsyncGenerator[Connection, None]: + in_txn = session and session.in_transaction + async with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + async with await server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + async def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The ClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. + """ + try: + topology = await self._get_topology() + if session and not session.in_transaction: + await session._transaction.reset() + if not address and session: + address = session._pinned_address + if address: + # We're running a getMore or this session is pinned to a mongos. + server = await topology.select_server_by_address( + address, operation, operation_id=operation_id + ) + if not server: + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 + else: + server = await topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) + return server + except PyMongoError as exc: + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + await session._unpin() + raise + + async def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> AsyncContextManager[Connection]: + server = await self._select_server(writable_server_selector, session, operation) + return self._checkout(server, session) + + @contextlib.asynccontextmanager + async def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] + ) -> AsyncGenerator[tuple[Connection, _ServerMode], None]: + assert read_preference is not None, "read_preference must not be None" + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. + # Thread safe: if the type is single it cannot change. + topology = await self._get_topology() + single = topology.description.topology_type == TOPOLOGY_TYPE.Single + + async with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + async def _conn_for_reads( + self, + read_preference: _ServerMode, + session: Optional[ClientSession], + operation: str, + ) -> AsyncContextManager[tuple[Connection, _ServerMode]]: + assert read_preference is not None, "read_preference must not be None" + _ = await self._get_topology() + server = await self._select_server(read_preference, session, operation) + return self._conn_from_server(read_preference, server, session) + + @_csot.apply + async def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, + address: Optional[_Address] = None, + ) -> Response: + """Run a _Query/_GetMore operation and return a Response. + + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message + to a specific server, used for getMore. + """ + if operation.conn_mgr: + server = await self._select_server( + operation.read_preference, + operation.session, + operation.name, + address=address, + ) + + async with operation.conn_mgr._alock: + async with _MongoClientErrorHandler(self, server, operation.session) as err_handler: + err_handler.contribute_socket(operation.conn_mgr.conn) + return await server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + self, + ) + + async def _cmd( + _session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return await server.run_operation( + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, + ) + + return await self._retryable_read( + _cmd, + operation.read_preference, + operation.session, + address=address, + retryable=isinstance(operation, message._Query), + operation=operation.name, + ) + + async def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk], + operation: str, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with at most one consecutive retries + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + """ + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return await self._retry_internal( + func=func, + session=session, + bulk=bulk, + operation=operation, + retryable=retryable, + operation_id=operation_id, + ) + + @_csot.apply + async def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk], + operation: str, + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ) -> T: + """Internal retryable helper for all client transactions. + + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() + """ + return await _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + operation=operation, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + operation_id=operation_id, + ).run() + + async def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + retryable: bool = True, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries + (may not always be supported even if supplied), defaults to False + """ + + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + return await self._retry_internal( + func, + session, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) + + async def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + operation: str, + bulk: Optional[_Bulk] = None, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None + """ + async with self._tmp_session(session) as s: + return await self._retry_with_session(retryable, func, s, bulk, operation, operation_id) + + async def _cleanup_cursor( + self, + locks_allowed: bool, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + explicit_session: bool, + ) -> None: + """Cleanup a cursor from cursor.close() or __del__. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. + + :param locks_allowed: True if we are allowed to acquire locks. + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + :param explicit_session: True if the session was passed explicitly. + """ + if locks_allowed: + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + await self._close_cursor_now( + cursor_id, address, session=session, conn_mgr=conn_mgr + ) + if conn_mgr: + await conn_mgr.close() + else: + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and not explicit_session: + await session._end_session(lock=locks_allowed) + + async def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[ClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Send a kill cursors message with the given id. + + The cursor is closed synchronously on the current thread. + """ + if not isinstance(cursor_id, int): + raise TypeError("cursor_id must be an instance of int") + + try: + if conn_mgr: + async with conn_mgr._alock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + await self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + await self._kill_cursors([cursor_id], address, await self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + async def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[ClientSession], + ) -> None: + """Send a kill cursors message with the given ids.""" + if address: + # address could be a tuple or _CursorAddress, but + # select_server_by_address needs (host, port). + server = await topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] + else: + # Application called close_cursor() with no address. + server = await topology.select_server(writable_server_selector, _Op.KILL_CURSORS) + + async with self._checkout(server, session) as conn: + assert address is not None + await self._kill_cursor_impl(cursor_ids, address, session, conn) + + async def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[ClientSession], + conn: Connection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = {"killCursors": coll, "cursors": cursor_ids} + await conn.command(db, spec, session=session, client=self) + + async def _process_kill_cursors(self) -> None: + """Process any pending kill cursors requests.""" + address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] + + # Other threads or the GC may append to the queue concurrently. + while True: + try: + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() + except IndexError: + break + + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + await self._cleanup_cursor(True, cursor_id, address, conn_mgr, None, False) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + helpers._handle_exception() + + # Don't re-open topology if it's closed and there's no pending cursors. + if address_to_cursor_ids: + topology = await self._get_topology() + for address, cursor_ids in address_to_cursor_ids.items(): + try: + await self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + helpers._handle_exception() + + # This method is run periodically by a background thread. + async def _process_periodic_tasks(self) -> None: + """Process any pending kill cursors requests and + maintain connection pool parameters. + """ + try: + await self._process_kill_cursors() + await self._topology.update_pool() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + helpers._handle_exception() + + async def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession], lock: bool + ) -> None: + """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None + return await self._topology.return_server_session(server_session, lock) + + @contextlib.asynccontextmanager + async def _tmp_session( + self, session: Optional[client_session.ClientSession], close: bool = True + ) -> AsyncGenerator[Optional[client_session.ClientSession], None, None]: + """If provided session is None, lend a temporary session.""" + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError("'session' argument must be a ClientSession or None.") + # Don't call end_session. + yield session + return + + s = self._ensure_session(session) + if s: + try: + yield s + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. + await s.end_session() + raise + finally: + # Call end_session when we exit this scope. + if close: + await s.end_session() + else: + yield None + + async def _process_response( + self, reply: Mapping[str, Any], session: Optional[ClientSession] + ) -> None: + await self._topology.receive_cluster_time(reply.get("$clusterTime")) + if session is not None: + session._process_response(reply) + + async def server_info( + self, session: Optional[client_session.ClientSession] = None + ) -> dict[str, Any]: + """Get information about the MongoDB server we're connected to. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return cast( + dict, + await self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + async def _list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = await admin._retryable_read_command( + cmd, session=session, operation=_Op.LIST_DATABASES + ) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return AsyncCommandCursor(admin["$cmd"], cursor, None, comment=comment) + + async def list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[dict[str, Any]]: + """Get a cursor over the databases of the connected server. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listDatabases command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return await self._list_databases(session, comment, **kwargs) + + async def list_database_names( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: + """Get a list of the names of all databases on the connected server. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionadded:: 3.6 + """ + res = await self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] async for doc in res] + + @_csot.apply + async def drop_database( + self, + name_or_database: Union[str, database.AsyncDatabase[_DocumentTypeArg]], + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> None: + """Drop a database. + + Raises :class:`TypeError` if `name_or_database` is not an instance of + :class:`str` or :class:`~pymongo.database.Database`. + + :param name_or_database: the name of a database to drop, or a + :class:`~pymongo.database.Database` instance representing the + database to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of + this client is automatically applied to this operation. + + .. versionchanged:: 3.4 + Apply this client's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_database + if isinstance(name, database.AsyncDatabase): + name = name.name + + if not isinstance(name, str): + raise TypeError("name_or_database must be an instance of str or a Database") + + async with await self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: + await self[name]._command( + conn, + {"dropDatabase": 1, "comment": comment}, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, BulkWriteError): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers_constants._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ConnectionFailure) and not isinstance( + exc, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__(self, client: AsyncMongoClient, server: Server, session: Optional[ClientSession]): + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: Connection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + async def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + await self.session._unpin() + err_ctx = _ErrorContext( + exc_val, + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + await self.client._topology.handle_error(self.server_address, err_ctx) + + async def __aenter__(self) -> _MongoClientErrorHandler: + return self + + async def __aexit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return await self.handle(exc_type, exc_val) + + +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" + + def __init__( + self, + mongo_client: AsyncMongoClient, + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[_Bulk], + operation: str, + is_read: bool = False, + session: Optional[ClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id + + async def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :raises: self._last_error: Last exception raised + + :return: Result of the func() call + """ + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return await self._read() if self._is_read else await self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers_constants._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + else: + raise + + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + await self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) + + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error + + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error + + async def _get_server(self) -> Server: + """Retrieves a server object based on provided object context + + :return: Abstraction to connect to server + """ + return await self._client._select_server( + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, + ) + + async def _write(self) -> T: + """Wrapper method for write-type retryable client executions + + :return: Output for func()'s call + """ + try: + max_wire_version = 0 + is_mongos = False + self._server = await self._get_server() + async with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + return await self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version, is_mongos) + raise + + async def _read(self) -> T: + """Wrapper method for read-type retryable client executions + + :return: Output for func()'s call + """ + self._server = await self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + async with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + return await self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in AsyncMongoClient._clients.items(): + client._after_fork() + + +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py new file mode 100644 index 0000000000..6bd8061081 --- /dev/null +++ b/pymongo/asynchronous/monitor.py @@ -0,0 +1,487 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Class to monitor a MongoDB server on a background thread.""" + +from __future__ import annotations + +import atexit +import time +import weakref +from typing import TYPE_CHECKING, Any, Mapping, Optional, cast + +from pymongo._csot import MovingMinimum +from pymongo.asynchronous import common, periodic_executor +from pymongo.asynchronous.hello import Hello +from pymongo.asynchronous.periodic_executor import _shutdown_executors +from pymongo.asynchronous.pool import _is_faas +from pymongo.asynchronous.read_preferences import MovingAverage +from pymongo.asynchronous.server_description import ServerDescription +from pymongo.asynchronous.srv_resolver import _SrvResolver +from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled +from pymongo.lock import _create_lock + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import Connection, Pool, _CancellationContext + from pymongo.asynchronous.settings import TopologySettings + from pymongo.asynchronous.topology import Topology + +_IS_SYNC = False + + +def _sanitize(error: Exception) -> None: + """PYTHON-2433 Clear error traceback info.""" + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + +class MonitorBase: + def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): + """Base class to do periodic work on a background thread. + + The background thread is signaled to stop when the Topology or + this instance is freed. + """ + + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + async def target() -> bool: + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + await monitor._run() # type:ignore[attr-defined] + return True + + executor = periodic_executor.PeriodicExecutor( + interval=interval, min_interval=min_interval, target=target, name=name + ) + + self._executor = executor + + def _on_topology_gc(dummy: Optional[Topology] = None) -> None: + # This prevents GC from waiting 10 seconds for hello to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + self._executor.open() + + def gc_safe_close(self) -> None: + """GC safe close.""" + self._executor.close() + + async def close(self) -> None: + """Close and stop monitoring. + + open() restarts the monitor after closing. + """ + self.gc_safe_close() + + def join(self, timeout: Optional[int] = None) -> None: + """Wait for the monitor to stop.""" + self._executor.join(timeout) + + def request_check(self) -> None: + """If the monitor is sleeping, wake it soon.""" + self._executor.wake() + + +class Monitor(MonitorBase): + def __init__( + self, + server_description: ServerDescription, + topology: Topology, + pool: Pool, + topology_settings: TopologySettings, + ): + """Class to monitor a MongoDB server on a background thread. + + Pass an initial ServerDescription, a Topology, a Pool, and + TopologySettings. + + The Topology is weakly referenced. The Pool must be exclusive to this + Monitor. + """ + super().__init__( + topology, + "pymongo_server_monitor_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + self._server_description = server_description + self._pool = pool + self._settings = topology_settings + self._listeners = self._settings._pool_options._event_listeners + self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat + self._cancel_context: Optional[_CancellationContext] = None + self._rtt_monitor = _RttMonitor( + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) + if topology_settings.server_monitoring_mode == "stream": + self._stream = True + elif topology_settings.server_monitoring_mode == "poll": + self._stream = False + else: + self._stream = not _is_faas() + + def cancel_check(self) -> None: + """Cancel any concurrent hello check. + + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + async def _start_rtt_monitor(self) -> None: + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + await self._rtt_monitor.close() + + def gc_safe_close(self) -> None: + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() + + async def close(self) -> None: + self.gc_safe_close() + await self._rtt_monitor.close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + await self._reset_connection() + + async def _reset_connection(self) -> None: + # Clear our pooled connection. + await self._pool.reset() + + async def _run(self) -> None: + try: + prev_sd = self._server_description + try: + self._server_description = await self._check_server() + except _OperationCancelled as exc: + _sanitize(exc) + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc + ) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return + + # Update the Topology and clear the server pool on error. + await self._topology.on_change( + self._server_description, + reset_pool=self._server_description.error, + interrupt_connections=isinstance(self._server_description.error, NetworkTimeout), + ) + + if self._stream and ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): + await self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() + except ReferenceError: + # Topology was garbage-collected. + await self.close() + + async def _check_server(self) -> ServerDescription: + """Call hello or read the next streaming response. + + Returns a ServerDescription. + """ + start = time.monotonic() + try: + try: + return await self._check_once() + except (OperationFailure, NotPrimaryError) as exc: + # Update max cluster time even when hello fails. + details = cast(Mapping[str, Any], exc.details) + self._topology.receive_cluster_time(details.get("$clusterTime")) + raise + except ReferenceError: + raise + except Exception as error: + _sanitize(error) + sd = self._server_description + address = sd.address + duration = time.monotonic() - start + if self._publish: + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) + assert self._listeners is not None + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + await self._reset_connection() + if isinstance(error, _OperationCancelled): + raise + self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) + + async def _check_once(self) -> ServerDescription: + """A single attempt to call hello. + + Returns a ServerDescription, or raises an exception. + """ + address = self._server_description.address + if self._publish: + assert self._listeners is not None + sd = self._server_description + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns + and self._stream + and sd.is_server_type_known + and sd.topology_version + ) + self._listeners.publish_server_heartbeat_started(address, awaited) + + if self._cancel_context and self._cancel_context.cancelled: + await self._reset_connection() + async with self._pool.checkout() as conn: + self._cancel_context = conn.cancel_context + response, round_trip_time = await self._check_with_socket(conn) + if not response.awaitable: + self._rtt_monitor.add_sample(round_trip_time) + + avg_rtt, min_rtt = self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_succeeded( + address, round_trip_time, response, response.awaitable + ) + return sd + + async def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: + """Return (Hello, round_trip_time). + + Can raise ConnectionFailure or OperationFailure. + """ + cluster_time = self._topology.max_cluster_time() + start = time.monotonic() + if conn.more_to_come: + # Read the next streaming hello (MongoDB 4.4+). + response = Hello(await conn._next_reply(), awaitable=True) + elif ( + self._stream and conn.performed_handshake and self._server_description.topology_version + ): + # Initiate streaming hello (MongoDB 4.4+). + response = await conn._hello( + cluster_time, + self._server_description.topology_version, + self._settings.heartbeat_frequency, + ) + else: + # New connection handshake or polling hello (MongoDB <4.4). + response = await conn._hello(cluster_time, None, None) + return response, time.monotonic() - start + + +class SrvMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings): + """Class to poll SRV records on a background thread. + + Pass a Topology and a TopologySettings. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency, + ) + self._settings = topology_settings + self._seedlist = self._settings._seeds + assert isinstance(self._settings.fqdn, str) + self._fqdn: str = self._settings.fqdn + self._startup_time = time.monotonic() + + async def _run(self) -> None: + # Don't poll right after creation, wait 60 seconds first + if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: + return + seedlist = self._get_seedlist() + if seedlist: + self._seedlist = seedlist + try: + await self._topology.on_srv_update(self._seedlist) + except ReferenceError: + # Topology was garbage-collected. + await self.close() + + def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: + """Poll SRV records for a seedlist. + + Returns a list of ServerDescriptions. + """ + try: + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) + seedlist, ttl = resolver.get_hosts_and_min_ttl() + if len(seedlist) == 0: + # As per the spec: this should be treated as a failure. + raise Exception + except Exception: + # As per the spec, upon encountering an error: + # - An error must not be raised + # - SRV records must be rescanned every heartbeatFrequencyMS + # - Topology must be left unchanged + self.request_check() + return None + else: + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings, pool: Pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_server_rtt_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + + self._pool = pool + self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() + self._lock = _create_lock() + + async def close(self) -> None: + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + await self._pool.reset() + + def add_sample(self, sample: float) -> None: + """Add a RTT sample.""" + with self._lock: + self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) + + def get(self) -> tuple[Optional[float], float]: + """Get the calculated average, or None if no samples yet and the min.""" + with self._lock: + return self._moving_average.get(), self._moving_min.get() + + def reset(self) -> None: + """Reset the average RTT.""" + with self._lock: + self._moving_average.reset() + self._moving_min.reset() + + async def _run(self) -> None: + try: + # NOTE: This thread is only run when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = await self._ping() + self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + await self.close() + except Exception: + await self._pool.reset() + + async def _ping(self) -> float: + """Run a "hello" command and return the RTT.""" + async with self._pool.checkout() as conn: + if self._executor._stopped: + raise Exception("_RttMonitor closed") + start = time.monotonic() + await conn.hello() + return time.monotonic() - start + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor: MonitorBase) -> None: + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref: weakref.ReferenceType[MonitorBase]) -> None: + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors() -> None: + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources() -> None: + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: # type:ignore[truthy-function] + shutdown() + shutdown = _shutdown_executors + if shutdown: # type:ignore[truthy-function] + shutdown() + + +atexit.register(_shutdown_resources) diff --git a/pymongo/asynchronous/monitoring.py b/pymongo/asynchronous/monitoring.py new file mode 100644 index 0000000000..36d015fe29 --- /dev/null +++ b/pymongo/asynchronous/monitoring.py @@ -0,0 +1,1903 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools to monitor driver events. + +.. versionadded:: 3.1 + +.. attention:: Starting in PyMongo 3.11, the monitoring classes outlined below + are included in the PyMongo distribution under the + :mod:`~pymongo.event_loggers` submodule. + +Use :func:`register` to register global listeners for specific events. +Listeners must inherit from one of the abstract classes below and implement +the correct functions for that class. + +For example, a simple command logger might be implemented like this:: + + import logging + + from pymongo import monitoring + + class CommandLogger(monitoring.CommandListener): + + def started(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event)) + + def failed(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event)) + + monitoring.register(CommandLogger()) + +Server discovery and monitoring events are also available. For example:: + + class ServerLogger(monitoring.ServerListener): + + def opened(self, event): + logging.info("Server {0.server_address} added to topology " + "{0.topology_id}".format(event)) + + def description_changed(self, event): + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + "Server {0.server_address} changed type from " + "{0.previous_description.server_type_name} to " + "{0.new_description.server_type_name}".format(event)) + + def closed(self, event): + logging.warning("Server {0.server_address} removed from topology " + "{0.topology_id}".format(event)) + + + class HeartbeatLogger(monitoring.ServerHeartbeatListener): + + def started(self, event): + logging.info("Heartbeat sent to server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + # The reply.document attribute was added in PyMongo 3.4. + logging.info("Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event)) + + def failed(self, event): + logging.warning("Heartbeat to server {0.connection_id} " + "failed with error {0.reply}".format(event)) + + class TopologyLogger(monitoring.TopologyListener): + + def opened(self, event): + logging.info("Topology with id {0.topology_id} " + "opened".format(event)) + + def description_changed(self, event): + logging.info("Topology description updated for " + "topology id {0.topology_id}".format(event)) + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + "Topology {0.topology_id} changed type from " + "{0.previous_description.topology_type_name} to " + "{0.new_description.topology_type_name}".format(event)) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event): + logging.info("Topology with id {0.topology_id} " + "closed".format(event)) + +Connection monitoring and pooling events are also available. For example:: + + class ConnectionPoolLogger(ConnectionPoolListener): + + def pool_created(self, event): + logging.info("[pool {0.address}] pool created".format(event)) + + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + + def pool_cleared(self, event): + logging.info("[pool {0.address}] pool cleared".format(event)) + + def pool_closed(self, event): + logging.info("[pool {0.address}] pool closed".format(event)) + + def connection_created(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection created".format(event)) + + def connection_ready(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection setup succeeded".format(event)) + + def connection_closed(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection closed, reason: " + "{0.reason}".format(event)) + + def connection_check_out_started(self, event): + logging.info("[pool {0.address}] connection check out " + "started".format(event)) + + def connection_check_out_failed(self, event): + logging.info("[pool {0.address}] connection check out " + "failed, reason: {0.reason}".format(event)) + + def connection_checked_out(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection checked out of pool".format(event)) + + def connection_checked_in(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection checked into pool".format(event)) + + +Event listeners can also be registered per instance of +:class:`~pymongo.mongo_client.MongoClient`:: + + client = MongoClient(event_listeners=[CommandLogger()]) + +Note that previously registered global listeners are automatically included +when configuring per client event listeners. Registering a new global listener +will not add that listener to existing client instances. + +.. note:: Events are delivered **synchronously**. Application threads block + waiting for event handlers (e.g. :meth:`~CommandListener.started`) to + return. Care must be taken to ensure that your event handlers are efficient + enough to not adversely affect overall application performance. + +.. warning:: The command documents published through this API are *not* copies. + If you intend to modify them in any way you must copy them in your event + handler first. +""" + +from __future__ import annotations + +import datetime +from collections import abc, namedtuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +from bson.objectid import ObjectId +from pymongo.asynchronous.hello import Hello +from pymongo.asynchronous.hello_compat import HelloCompat +from pymongo.asynchronous.helpers import _handle_exception +from pymongo.asynchronous.typings import _Address, _DocumentOut +from pymongo.helpers_constants import _SENSITIVE_COMMANDS + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.asynchronous.server_description import ServerDescription + from pymongo.asynchronous.topology_description import TopologyDescription + +_IS_SYNC = False + +_Listeners = namedtuple( + "_Listeners", + ( + "command_listeners", + "server_listeners", + "server_heartbeat_listeners", + "topology_listeners", + "cmap_listeners", + ), +) + +_LISTENERS = _Listeners([], [], [], [], []) + + +class _EventListener: + """Abstract base class for all event listeners.""" + + +class CommandListener(_EventListener): + """Abstract base class for command listeners. + + Handles `CommandStartedEvent`, `CommandSucceededEvent`, + and `CommandFailedEvent`. + """ + + def started(self, event: CommandStartedEvent) -> None: + """Abstract method to handle a `CommandStartedEvent`. + + :param event: An instance of :class:`CommandStartedEvent`. + """ + raise NotImplementedError + + def succeeded(self, event: CommandSucceededEvent) -> None: + """Abstract method to handle a `CommandSucceededEvent`. + + :param event: An instance of :class:`CommandSucceededEvent`. + """ + raise NotImplementedError + + def failed(self, event: CommandFailedEvent) -> None: + """Abstract method to handle a `CommandFailedEvent`. + + :param event: An instance of :class:`CommandFailedEvent`. + """ + raise NotImplementedError + + +class ConnectionPoolListener(_EventListener): + """Abstract base class for connection pool listeners. + + Handles all of the connection pool events defined in the Connection + Monitoring and Pooling Specification: + :class:`PoolCreatedEvent`, :class:`PoolClearedEvent`, + :class:`PoolClosedEvent`, :class:`ConnectionCreatedEvent`, + :class:`ConnectionReadyEvent`, :class:`ConnectionClosedEvent`, + :class:`ConnectionCheckOutStartedEvent`, + :class:`ConnectionCheckOutFailedEvent`, + :class:`ConnectionCheckedOutEvent`, + and :class:`ConnectionCheckedInEvent`. + + .. versionadded:: 3.9 + """ + + def pool_created(self, event: PoolCreatedEvent) -> None: + """Abstract method to handle a :class:`PoolCreatedEvent`. + + Emitted when a connection Pool is created. + + :param event: An instance of :class:`PoolCreatedEvent`. + """ + raise NotImplementedError + + def pool_ready(self, event: PoolReadyEvent) -> None: + """Abstract method to handle a :class:`PoolReadyEvent`. + + Emitted when a connection Pool is marked ready. + + :param event: An instance of :class:`PoolReadyEvent`. + + .. versionadded:: 4.0 + """ + raise NotImplementedError + + def pool_cleared(self, event: PoolClearedEvent) -> None: + """Abstract method to handle a `PoolClearedEvent`. + + Emitted when a connection Pool is cleared. + + :param event: An instance of :class:`PoolClearedEvent`. + """ + raise NotImplementedError + + def pool_closed(self, event: PoolClosedEvent) -> None: + """Abstract method to handle a `PoolClosedEvent`. + + Emitted when a connection Pool is closed. + + :param event: An instance of :class:`PoolClosedEvent`. + """ + raise NotImplementedError + + def connection_created(self, event: ConnectionCreatedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCreatedEvent`. + + Emitted when a connection Pool creates a Connection object. + + :param event: An instance of :class:`ConnectionCreatedEvent`. + """ + raise NotImplementedError + + def connection_ready(self, event: ConnectionReadyEvent) -> None: + """Abstract method to handle a :class:`ConnectionReadyEvent`. + + Emitted when a connection has finished its setup, and is now ready to + use. + + :param event: An instance of :class:`ConnectionReadyEvent`. + """ + raise NotImplementedError + + def connection_closed(self, event: ConnectionClosedEvent) -> None: + """Abstract method to handle a :class:`ConnectionClosedEvent`. + + Emitted when a connection Pool closes a connection. + + :param event: An instance of :class:`ConnectionClosedEvent`. + """ + raise NotImplementedError + + def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. + + Emitted when the driver starts attempting to check out a connection. + + :param event: An instance of :class:`ConnectionCheckOutStartedEvent`. + """ + raise NotImplementedError + + def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. + + Emitted when the driver's attempt to check out a connection fails. + + :param event: An instance of :class:`ConnectionCheckOutFailedEvent`. + """ + raise NotImplementedError + + def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. + + Emitted when the driver successfully checks out a connection. + + :param event: An instance of :class:`ConnectionCheckedOutEvent`. + """ + raise NotImplementedError + + def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckedInEvent`. + + Emitted when the driver checks in a connection back to the connection + Pool. + + :param event: An instance of :class:`ConnectionCheckedInEvent`. + """ + raise NotImplementedError + + +class ServerHeartbeatListener(_EventListener): + """Abstract base class for server heartbeat listeners. + + Handles `ServerHeartbeatStartedEvent`, `ServerHeartbeatSucceededEvent`, + and `ServerHeartbeatFailedEvent`. + + .. versionadded:: 3.3 + """ + + def started(self, event: ServerHeartbeatStartedEvent) -> None: + """Abstract method to handle a `ServerHeartbeatStartedEvent`. + + :param event: An instance of :class:`ServerHeartbeatStartedEvent`. + """ + raise NotImplementedError + + def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: + """Abstract method to handle a `ServerHeartbeatSucceededEvent`. + + :param event: An instance of :class:`ServerHeartbeatSucceededEvent`. + """ + raise NotImplementedError + + def failed(self, event: ServerHeartbeatFailedEvent) -> None: + """Abstract method to handle a `ServerHeartbeatFailedEvent`. + + :param event: An instance of :class:`ServerHeartbeatFailedEvent`. + """ + raise NotImplementedError + + +class TopologyListener(_EventListener): + """Abstract base class for topology monitoring listeners. + Handles `TopologyOpenedEvent`, `TopologyDescriptionChangedEvent`, and + `TopologyClosedEvent`. + + .. versionadded:: 3.3 + """ + + def opened(self, event: TopologyOpenedEvent) -> None: + """Abstract method to handle a `TopologyOpenedEvent`. + + :param event: An instance of :class:`TopologyOpenedEvent`. + """ + raise NotImplementedError + + def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: + """Abstract method to handle a `TopologyDescriptionChangedEvent`. + + :param event: An instance of :class:`TopologyDescriptionChangedEvent`. + """ + raise NotImplementedError + + def closed(self, event: TopologyClosedEvent) -> None: + """Abstract method to handle a `TopologyClosedEvent`. + + :param event: An instance of :class:`TopologyClosedEvent`. + """ + raise NotImplementedError + + +class ServerListener(_EventListener): + """Abstract base class for server listeners. + Handles `ServerOpeningEvent`, `ServerDescriptionChangedEvent`, and + `ServerClosedEvent`. + + .. versionadded:: 3.3 + """ + + def opened(self, event: ServerOpeningEvent) -> None: + """Abstract method to handle a `ServerOpeningEvent`. + + :param event: An instance of :class:`ServerOpeningEvent`. + """ + raise NotImplementedError + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + """Abstract method to handle a `ServerDescriptionChangedEvent`. + + :param event: An instance of :class:`ServerDescriptionChangedEvent`. + """ + raise NotImplementedError + + def closed(self, event: ServerClosedEvent) -> None: + """Abstract method to handle a `ServerClosedEvent`. + + :param event: An instance of :class:`ServerClosedEvent`. + """ + raise NotImplementedError + + +def _to_micros(dur: timedelta) -> int: + """Convert duration 'dur' to microseconds.""" + return int(dur.total_seconds() * 10e5) + + +def _validate_event_listeners( + option: str, listeners: Sequence[_EventListeners] +) -> Sequence[_EventListeners]: + """Validate event listeners""" + if not isinstance(listeners, abc.Sequence): + raise TypeError(f"{option} must be a list or tuple") + for listener in listeners: + if not isinstance(listener, _EventListener): + raise TypeError( + f"Listeners for {option} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." + ) + return listeners + + +def register(listener: _EventListener) -> None: + """Register a global event listener. + + :param listener: A subclasses of :class:`CommandListener`, + :class:`ServerHeartbeatListener`, :class:`ServerListener`, + :class:`TopologyListener`, or :class:`ConnectionPoolListener`. + """ + if not isinstance(listener, _EventListener): + raise TypeError( + f"Listeners for {listener} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." + ) + if isinstance(listener, CommandListener): + _LISTENERS.command_listeners.append(listener) + if isinstance(listener, ServerHeartbeatListener): + _LISTENERS.server_heartbeat_listeners.append(listener) + if isinstance(listener, ServerListener): + _LISTENERS.server_listeners.append(listener) + if isinstance(listener, TopologyListener): + _LISTENERS.topology_listeners.append(listener) + if isinstance(listener, ConnectionPoolListener): + _LISTENERS.cmap_listeners.append(listener) + + +# The "hello" command is also deemed sensitive when attempting speculative +# authentication. +def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: + if ( + command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) + and "speculativeAuthenticate" in doc + ): + return True + return False + + +class _CommandEvent: + """Base class for command events.""" + + __slots__ = ( + "__cmd_name", + "__rqst_id", + "__conn_id", + "__op_id", + "__service_id", + "__db", + "__server_conn_id", + ) + + def __init__( + self, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + self.__cmd_name = command_name + self.__rqst_id = request_id + self.__conn_id = connection_id + self.__op_id = operation_id + self.__service_id = service_id + self.__db = database_name + self.__server_conn_id = server_connection_id + + @property + def command_name(self) -> str: + """The command name.""" + return self.__cmd_name + + @property + def request_id(self) -> int: + """The request id for this operation.""" + return self.__rqst_id + + @property + def connection_id(self) -> _Address: + """The address (host, port) of the server this command was sent to.""" + return self.__conn_id + + @property + def service_id(self) -> Optional[ObjectId]: + """The service_id this command was sent to, or ``None``. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def operation_id(self) -> Optional[int]: + """An id for this series of events or None.""" + return self.__op_id + + @property + def database_name(self) -> str: + """The database_name this command was sent to, or ``""``. + + .. versionadded:: 4.6 + """ + return self.__db + + @property + def server_connection_id(self) -> Optional[int]: + """The server-side connection id for the connection this command was sent on, or ``None``. + + .. versionadded:: 4.7 + """ + return self.__server_conn_id + + +class CommandStartedEvent(_CommandEvent): + """Event published when a command starts. + + :param command: The command document. + :param database_name: The name of the database this command was run against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + """ + + __slots__ = ("__cmd",) + + def __init__( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + server_connection_id: Optional[int] = None, + ) -> None: + if not command: + raise ValueError(f"{command!r} is not a valid command") + # Command name must be first key. + command_name = next(iter(command)) + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): + self.__cmd: _DocumentOut = {} + else: + self.__cmd = command + + @property + def command(self) -> _DocumentOut: + """The command document.""" + return self.__cmd + + @property + def database_name(self) -> str: + """The name of the database this command was run against.""" + return super().database_name + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.service_id, + self.server_connection_id, + ) + + +class CommandSucceededEvent(_CommandEvent): + """Event published when a command succeeds. + + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + + __slots__ = ("__duration_micros", "__reply") + + def __init__( + self, + duration: datetime.timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + self.__duration_micros = _to_micros(duration) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): + self.__reply: _DocumentOut = {} + else: + self.__reply = reply + + @property + def duration_micros(self) -> int: + """The duration of this operation in microseconds.""" + return self.__duration_micros + + @property + def reply(self) -> _DocumentOut: + """The server failure document for this operation.""" + return self.__reply + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.service_id, + self.server_connection_id, + ) + + +class CommandFailedEvent(_CommandEvent): + """Event published when a command fails. + + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + + __slots__ = ("__duration_micros", "__failure") + + def __init__( + self, + duration: datetime.timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + self.__duration_micros = _to_micros(duration) + self.__failure = failure + + @property + def duration_micros(self) -> int: + """The duration of this operation in microseconds.""" + return self.__duration_micros + + @property + def failure(self) -> _DocumentOut: + """The server failure document for this operation.""" + return self.__failure + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.failure, + self.service_id, + self.server_connection_id, + ) + + +class _PoolEvent: + """Base class for pool events.""" + + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: + self.__address = address + + @property + def address(self) -> _Address: + """The address (host, port) pair of the server the pool is attempting + to connect to. + """ + return self.__address + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class PoolCreatedEvent(_PoolEvent): + """Published when a Connection Pool is created. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__options",) + + def __init__(self, address: _Address, options: dict[str, Any]) -> None: + super().__init__(address) + self.__options = options + + @property + def options(self) -> dict[str, Any]: + """Any non-default pool options that were set on this Connection Pool.""" + return self.__options + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" + + +class PoolReadyEvent(_PoolEvent): + """Published when a Connection Pool is marked ready. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 4.0 + """ + + __slots__ = () + + +class PoolClearedEvent(_PoolEvent): + """Published when a Connection Pool is cleared. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + :param service_id: The service_id this command was sent to, or ``None``. + :param interrupt_connections: True if all active connections were interrupted by the Pool during clearing. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__service_id", "__interrupt_connections") + + def __init__( + self, + address: _Address, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + super().__init__(address) + self.__service_id = service_id + self.__interrupt_connections = interrupt_connections + + @property + def service_id(self) -> Optional[ObjectId]: + """Connections with this service_id are cleared. + + When service_id is ``None``, all connections in the pool are cleared. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def interrupt_connections(self) -> bool: + """If True, active connections are interrupted during clearing. + + .. versionadded:: 4.7 + """ + return self.__interrupt_connections + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r}, {self.__interrupt_connections!r})" + + +class PoolClosedEvent(_PoolEvent): + """Published when a Connection Pool is closed. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionClosedReason: + """An enum that defines values for `reason` on a + :class:`ConnectionClosedEvent`. + + .. versionadded:: 3.9 + """ + + STALE = "stale" + """The pool was cleared, making the connection no longer valid.""" + + IDLE = "idle" + """The connection became stale by being idle for too long (maxIdleTimeMS). + """ + + ERROR = "error" + """The connection experienced an error, making it no longer valid.""" + + POOL_CLOSED = "poolClosed" + """The pool was closed, making the connection no longer valid.""" + + +class ConnectionCheckOutFailedReason: + """An enum that defines values for `reason` on a + :class:`ConnectionCheckOutFailedEvent`. + + .. versionadded:: 3.9 + """ + + TIMEOUT = "timeout" + """The connection check out attempt exceeded the specified timeout.""" + + POOL_CLOSED = "poolClosed" + """The pool was previously closed, and cannot provide new connections.""" + + CONN_ERROR = "connectionError" + """The connection check out attempt experienced an error while setting up + a new connection. + """ + + +class _ConnectionEvent: + """Private base class for connection events.""" + + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: + self.__address = address + + @property + def address(self) -> _Address: + """The address (host, port) pair of the server this connection is + attempting to connect to. + """ + return self.__address + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + + @property + def connection_id(self) -> int: + """The ID of the connection.""" + return self.__connection_id + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" + + +class _ConnectionDurationEvent(_ConnectionIdEvent): + """Private base class for connection events with a duration.""" + + __slots__ = ("__duration",) + + def __init__(self, address: _Address, connection_id: int, duration: Optional[float]) -> None: + super().__init__(address, connection_id) + self.__duration = duration + + @property + def duration(self) -> Optional[float]: + """The duration of the connection event. + + .. versionadded:: 4.7 + """ + return self.__duration + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.connection_id!r}, {self.__duration!r})" + + +class ConnectionCreatedEvent(_ConnectionIdEvent): + """Published when a Connection Pool creates a Connection object. + + NOTE: This connection is not ready for use until the + :class:`ConnectionReadyEvent` is published. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionReadyEvent(_ConnectionDurationEvent): + """Published when a Connection has finished its setup, and is ready to use. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionClosedEvent(_ConnectionIdEvent): + """Published when a Connection is closed. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + :param reason: A reason explaining why this connection was closed. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__reason",) + + def __init__(self, address: _Address, connection_id: int, reason: str): + super().__init__(address, connection_id) + self.__reason = reason + + @property + def reason(self) -> str: + """A reason explaining why this connection was closed. + + The reason must be one of the strings from the + :class:`ConnectionClosedReason` enum. + """ + return self.__reason + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.address, + self.connection_id, + self.__reason, + ) + + +class ConnectionCheckOutStartedEvent(_ConnectionEvent): + """Published when the driver starts attempting to check out a connection. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionCheckOutFailedEvent(_ConnectionDurationEvent): + """Published when the driver's attempt to check out a connection fails. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param reason: A reason explaining why connection check out failed. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__reason",) + + def __init__(self, address: _Address, reason: str, duration: Optional[float]) -> None: + super().__init__(address=address, connection_id=0, duration=duration) + self.__reason = reason + + @property + def reason(self) -> str: + """A reason explaining why connection check out failed. + + The reason must be one of the strings from the + :class:`ConnectionCheckOutFailedReason` enum. + """ + return self.__reason + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r}, {self.duration!r})" + + +class ConnectionCheckedOutEvent(_ConnectionDurationEvent): + """Published when the driver successfully checks out a connection. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionCheckedInEvent(_ConnectionIdEvent): + """Published when the driver checks in a Connection into the Pool. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class _ServerEvent: + """Base class for server events.""" + + __slots__ = ("__server_address", "__topology_id") + + def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: + self.__server_address = server_address + self.__topology_id = topology_id + + @property + def server_address(self) -> _Address: + """The address (host, port) pair of the server""" + return self.__server_address + + @property + def topology_id(self) -> ObjectId: + """A unique identifier for the topology this server is a part of.""" + return self.__topology_id + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.server_address} topology_id: {self.topology_id}>" + + +class ServerDescriptionChangedEvent(_ServerEvent): + """Published when server description changes. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__previous_description", "__new_description") + + def __init__( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + *args: Any, + ) -> None: + super().__init__(*args) + self.__previous_description = previous_description + self.__new_description = new_description + + @property + def previous_description(self) -> ServerDescription: + """The previous + :class:`~pymongo.server_description.ServerDescription`. + """ + return self.__previous_description + + @property + def new_description(self) -> ServerDescription: + """The new + :class:`~pymongo.server_description.ServerDescription`. + """ + return self.__new_description + + def __repr__(self) -> str: + return "<{} {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.server_address, + self.previous_description, + self.new_description, + ) + + +class ServerOpeningEvent(_ServerEvent): + """Published when server is initialized. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class ServerClosedEvent(_ServerEvent): + """Published when server is closed. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class TopologyEvent: + """Base class for topology description events.""" + + __slots__ = ("__topology_id",) + + def __init__(self, topology_id: ObjectId) -> None: + self.__topology_id = topology_id + + @property + def topology_id(self) -> ObjectId: + """A unique identifier for the topology this server is a part of.""" + return self.__topology_id + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" + + +class TopologyDescriptionChangedEvent(TopologyEvent): + """Published when the topology description changes. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__previous_description", "__new_description") + + def __init__( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + *args: Any, + ) -> None: + super().__init__(*args) + self.__previous_description = previous_description + self.__new_description = new_description + + @property + def previous_description(self) -> TopologyDescription: + """The previous + :class:`~pymongo.topology_description.TopologyDescription`. + """ + return self.__previous_description + + @property + def new_description(self) -> TopologyDescription: + """The new + :class:`~pymongo.topology_description.TopologyDescription`. + """ + return self.__new_description + + def __repr__(self) -> str: + return "<{} topology_id: {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.topology_id, + self.previous_description, + self.new_description, + ) + + +class TopologyOpenedEvent(TopologyEvent): + """Published when the topology is initialized. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class TopologyClosedEvent(TopologyEvent): + """Published when the topology is closed. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class _ServerHeartbeatEvent: + """Base class for server heartbeat events.""" + + __slots__ = ("__connection_id", "__awaited") + + def __init__(self, connection_id: _Address, awaited: bool = False) -> None: + self.__connection_id = connection_id + self.__awaited = awaited + + @property + def connection_id(self) -> _Address: + """The address (host, port) of the server this heartbeat was sent + to. + """ + return self.__connection_id + + @property + def awaited(self) -> bool: + """Whether the heartbeat was issued as an awaitable hello command. + + .. versionadded:: 4.6 + """ + return self.__awaited + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.connection_id} awaited: {self.awaited}>" + + +class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): + """Published when a heartbeat is started. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): + """Fired when the server heartbeat succeeds. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__duration", "__reply") + + def __init__( + self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) + self.__duration = duration + self.__reply = reply + + @property + def duration(self) -> float: + """The duration of this heartbeat in microseconds.""" + return self.__duration + + @property + def reply(self) -> Hello: + """An instance of :class:`~pymongo.hello.Hello`.""" + return self.__reply + + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + + +class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): + """Fired when the server heartbeat fails, either with an "ok: 0" + or a socket exception. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__duration", "__reply") + + def __init__( + self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) + self.__duration = duration + self.__reply = reply + + @property + def duration(self) -> float: + """The duration of this heartbeat in microseconds.""" + return self.__duration + + @property + def reply(self) -> Exception: + """A subclass of :exc:`Exception`.""" + return self.__reply + + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + + +class _EventListeners: + """Configure event listeners for a client instance. + + Any event listeners registered globally are included by default. + + :param listeners: A list of event listeners. + """ + + def __init__(self, listeners: Optional[Sequence[_EventListener]]): + self.__command_listeners = _LISTENERS.command_listeners[:] + self.__server_listeners = _LISTENERS.server_listeners[:] + lst = _LISTENERS.server_heartbeat_listeners + self.__server_heartbeat_listeners = lst[:] + self.__topology_listeners = _LISTENERS.topology_listeners[:] + self.__cmap_listeners = _LISTENERS.cmap_listeners[:] + if listeners is not None: + for lst in listeners: + if isinstance(lst, CommandListener): + self.__command_listeners.append(lst) + if isinstance(lst, ServerListener): + self.__server_listeners.append(lst) + if isinstance(lst, ServerHeartbeatListener): + self.__server_heartbeat_listeners.append(lst) + if isinstance(lst, TopologyListener): + self.__topology_listeners.append(lst) + if isinstance(lst, ConnectionPoolListener): + self.__cmap_listeners.append(lst) + self.__enabled_for_commands = bool(self.__command_listeners) + self.__enabled_for_server = bool(self.__server_listeners) + self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) + self.__enabled_for_topology = bool(self.__topology_listeners) + self.__enabled_for_cmap = bool(self.__cmap_listeners) + + @property + def enabled_for_commands(self) -> bool: + """Are any CommandListener instances registered?""" + return self.__enabled_for_commands + + @property + def enabled_for_server(self) -> bool: + """Are any ServerListener instances registered?""" + return self.__enabled_for_server + + @property + def enabled_for_server_heartbeat(self) -> bool: + """Are any ServerHeartbeatListener instances registered?""" + return self.__enabled_for_server_heartbeat + + @property + def enabled_for_topology(self) -> bool: + """Are any TopologyListener instances registered?""" + return self.__enabled_for_topology + + @property + def enabled_for_cmap(self) -> bool: + """Are any ConnectionPoolListener instances registered?""" + return self.__enabled_for_cmap + + def event_listeners(self) -> list[_EventListeners]: + """List of registered event listeners.""" + return ( + self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + + self.__cmap_listeners + ) + + def publish_command_start( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + ) -> None: + """Publish a CommandStartedEvent to all command listeners. + + :param command: The command document. + :param database_name: The name of the database this command was run + against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + """ + if op_id is None: + op_id = request_id + event = CommandStartedEvent( + command, + database_name, + request_id, + connection_id, + op_id, + service_id=service_id, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.started(event) + except Exception: + _handle_exception() + + def publish_command_success( + self, + duration: timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + speculative_hello: bool = False, + database_name: str = "", + ) -> None: + """Publish a CommandSucceededEvent to all command listeners. + + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param speculative_hello: Was the command sent with speculative auth? + :param database_name: The database this command was sent to, or ``""``. + """ + if op_id is None: + op_id = request_id + if speculative_hello: + # Redact entire response when the command started contained + # speculativeAuthenticate. + reply = {} + event = CommandSucceededEvent( + duration, + reply, + command_name, + request_id, + connection_id, + op_id, + service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.succeeded(event) + except Exception: + _handle_exception() + + def publish_command_failure( + self, + duration: timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: + """Publish a CommandFailedEvent to all command listeners. + + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document or failure description + document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + if op_id is None: + op_id = request_id + event = CommandFailedEvent( + duration, + failure, + command_name, + request_id, + connection_id, + op_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.failed(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_started(self, connection_id: _Address, awaited: bool) -> None: + """Publish a ServerHeartbeatStartedEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param awaited: True if this heartbeat is part of an awaitable hello command. + """ + event = ServerHeartbeatStartedEvent(connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.started(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_succeeded( + self, connection_id: _Address, duration: float, reply: Hello, awaited: bool + ) -> None: + """Publish a ServerHeartbeatSucceededEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible + resolution for the platform. + :param reply: The command reply. + :param awaited: True if the response was awaited. + """ + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.succeeded(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_failed( + self, connection_id: _Address, duration: float, reply: Exception, awaited: bool + ) -> None: + """Publish a ServerHeartbeatFailedEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible + resolution for the platform. + :param reply: The command reply. + :param awaited: True if the response was awaited. + """ + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.failed(event) + except Exception: + _handle_exception() + + def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: + """Publish a ServerOpeningEvent to all server listeners. + + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerOpeningEvent(server_address, topology_id) + for subscriber in self.__server_listeners: + try: + subscriber.opened(event) + except Exception: + _handle_exception() + + def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: + """Publish a ServerClosedEvent to all server listeners. + + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerClosedEvent(server_address, topology_id) + for subscriber in self.__server_listeners: + try: + subscriber.closed(event) + except Exception: + _handle_exception() + + def publish_server_description_changed( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + server_address: _Address, + topology_id: ObjectId, + ) -> None: + """Publish a ServerDescriptionChangedEvent to all server listeners. + + :param previous_description: The previous server description. + :param server_address: The address (host, port) pair of the server. + :param new_description: The new server description. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerDescriptionChangedEvent( + previous_description, new_description, server_address, topology_id + ) + for subscriber in self.__server_listeners: + try: + subscriber.description_changed(event) + except Exception: + _handle_exception() + + def publish_topology_opened(self, topology_id: ObjectId) -> None: + """Publish a TopologyOpenedEvent to all topology listeners. + + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyOpenedEvent(topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.opened(event) + except Exception: + _handle_exception() + + def publish_topology_closed(self, topology_id: ObjectId) -> None: + """Publish a TopologyClosedEvent to all topology listeners. + + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyClosedEvent(topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.closed(event) + except Exception: + _handle_exception() + + def publish_topology_description_changed( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + topology_id: ObjectId, + ) -> None: + """Publish a TopologyDescriptionChangedEvent to all topology listeners. + + :param previous_description: The previous topology description. + :param new_description: The new topology description. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.description_changed(event) + except Exception: + _handle_exception() + + def publish_pool_created(self, address: _Address, options: dict[str, Any]) -> None: + """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" + event = PoolCreatedEvent(address, options) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_created(event) + except Exception: + _handle_exception() + + def publish_pool_ready(self, address: _Address) -> None: + """Publish a :class:`PoolReadyEvent` to all pool listeners.""" + event = PoolReadyEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_ready(event) + except Exception: + _handle_exception() + + def publish_pool_cleared( + self, + address: _Address, + service_id: Optional[ObjectId], + interrupt_connections: bool = False, + ) -> None: + """Publish a :class:`PoolClearedEvent` to all pool listeners.""" + event = PoolClearedEvent(address, service_id, interrupt_connections) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_cleared(event) + except Exception: + _handle_exception() + + def publish_pool_closed(self, address: _Address) -> None: + """Publish a :class:`PoolClosedEvent` to all pool listeners.""" + event = PoolClosedEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_closed(event) + except Exception: + _handle_exception() + + def publish_connection_created(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionCreatedEvent` to all connection + listeners. + """ + event = ConnectionCreatedEvent(address, connection_id) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_created(event) + except Exception: + _handle_exception() + + def publish_connection_ready( + self, address: _Address, connection_id: int, duration: float + ) -> None: + """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" + event = ConnectionReadyEvent(address, connection_id, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_ready(event) + except Exception: + _handle_exception() + + def publish_connection_closed(self, address: _Address, connection_id: int, reason: str) -> None: + """Publish a :class:`ConnectionClosedEvent` to all connection + listeners. + """ + event = ConnectionClosedEvent(address, connection_id, reason) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_closed(event) + except Exception: + _handle_exception() + + def publish_connection_check_out_started(self, address: _Address) -> None: + """Publish a :class:`ConnectionCheckOutStartedEvent` to all connection + listeners. + """ + event = ConnectionCheckOutStartedEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_check_out_started(event) + except Exception: + _handle_exception() + + def publish_connection_check_out_failed( + self, address: _Address, reason: str, duration: float + ) -> None: + """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection + listeners. + """ + event = ConnectionCheckOutFailedEvent(address, reason, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_check_out_failed(event) + except Exception: + _handle_exception() + + def publish_connection_checked_out( + self, address: _Address, connection_id: int, duration: float + ) -> None: + """Publish a :class:`ConnectionCheckedOutEvent` to all connection + listeners. + """ + event = ConnectionCheckedOutEvent(address, connection_id, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_checked_out(event) + except Exception: + _handle_exception() + + def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionCheckedInEvent` to all connection + listeners. + """ + event = ConnectionCheckedInEvent(address, connection_id) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_checked_in(event) + except Exception: + _handle_exception() diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py new file mode 100644 index 0000000000..25fffaca19 --- /dev/null +++ b/pymongo/asynchronous/network.py @@ -0,0 +1,418 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import asyncio +import datetime +import errno +import logging +import socket +import time +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from bson import _decode_all_selective +from pymongo import _csot +from pymongo.asynchronous import helpers as _async_helpers +from pymongo.asynchronous import message as _async_message +from pymongo.asynchronous.common import MAX_MESSAGE_SIZE +from pymongo.asynchronous.compression_support import _NO_COMPRESSION, decompress +from pymongo.asynchronous.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.asynchronous.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.asynchronous.monitoring import _is_speculative_authenticate +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, + ProtocolError, + _OperationCancelled, +) +from pymongo.network_layer import ( + _POLL_TIMEOUT, + _UNPACK_COMPRESSION_HEADER, + _UNPACK_HEADER, + BLOCKING_IO_ERRORS, + async_sendall, +) +from pymongo.socket_checker import _errno_from_exception + +if TYPE_CHECKING: + from bson import CodecOptions + from pymongo.asynchronous.client_session import ClientSession + from pymongo.asynchronous.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.monitoring import _EventListeners + from pymongo.asynchronous.pool import Connection + from pymongo.asynchronous.read_preferences import _ServerMode + from pymongo.asynchronous.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.read_concern import ReadConcern + from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +async def command( + conn: Connection, + dbname: str, + spec: MutableMapping[str, Any], + is_mongos: bool, + read_preference: Optional[_ServerMode], + codec_options: CodecOptions[_DocumentType], + session: Optional[ClientSession], + client: Optional[AsyncMongoClient], + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + address: Optional[_Address] = None, + listeners: Optional[_EventListeners] = None, + max_bson_size: Optional[int] = None, + read_concern: Optional[ReadConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + compression_ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, + use_op_msg: bool = False, + unacknowledged: bool = False, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + write_concern: Optional[WriteConcern] = None, +) -> _DocumentType: + """Execute a command over the socket, or raise socket.error. + + :param conn: a Connection instance + :param dbname: name of the database on which to run the command + :param spec: a command document as an ordered dict type, eg SON. + :param is_mongos: are we connected to a mongos? + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param session: optional ClientSession instance. + :param client: optional AsyncMongoClient instance for updating $clusterTime. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param address: the (host, port) of `conn` + :param listeners: An instance of :class:`~pymongo.monitoring.EventListeners` + :param max_bson_size: The maximum encoded bson size for this server + :param read_concern: The read concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` + field in the command response. + :param collation: The collation for this command. + :param compression_ctx: optional compression Context. + :param use_op_msg: True if we should use OP_MSG. + :param unacknowledged: True if this is an unacknowledged command. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + :param exhaust_allowed: True if we should enable OP_MSG exhaustAllowed. + """ + name = next(iter(spec)) + ns = dbname + ".$cmd" + speculative_hello = False + + # Publish the original command document, perhaps with lsid and $clusterTime. + orig = spec + if is_mongos and not use_op_msg: + assert read_preference is not None + spec = _async_message._maybe_add_read_preference(spec, read_preference) + if read_concern and not (session and session.in_transaction): + if read_concern.level: + spec["readConcern"] = read_concern.document + if session: + session._update_read_concern(spec, conn) + if collation is not None: + spec["collation"] = collation + + publish = listeners is not None and listeners.enabled_for_commands + start = datetime.datetime.now() + if publish: + speculative_hello = _is_speculative_authenticate(name, spec) + + if compression_ctx and name.lower() in _NO_COMPRESSION: + compression_ctx = None + + if client and client._encrypter and not client._encrypter._bypass_auto_encryption: + spec = orig = await client._encrypter.encrypt(dbname, spec, codec_options) + + # Support CSOT + if client: + conn.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) + + if use_op_msg: + flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 + flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 + request_id, msg, size, max_doc_size = _async_message._op_msg( + flags, spec, dbname, read_preference, codec_options, ctx=compression_ctx + ) + # If this is an unacknowledged write then make sure the encoded doc(s) + # are small enough, otherwise rely on the server to return an error. + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: + _async_message._raise_document_too_large(name, size, max_bson_size) + else: + request_id, msg, size = _async_message._query( + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) + + if max_bson_size is not None and size > max_bson_size + _async_message._COMMAND_OVERHEAD: + _async_message._raise_document_too_large( + name, size, max_bson_size + _async_message._COMMAND_OVERHEAD + ) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=spec, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_start( + orig, + dbname, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + await async_sendall(conn.conn, msg) + if use_op_msg and unacknowledged: + # Unacknowledged, fake a successful command response. + reply = None + response_doc: _DocumentOut = {"ok": 1} + else: + reply = await receive_message(conn, request_id) + conn.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response( + codec_options=codec_options, user_fields=user_fields + ) + + response_doc = unpacked_docs[0] + if client: + await client._process_response(response_doc, session) + if check: + _async_helpers._check_command_response( + response_doc, + conn.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) + except Exception as exc: + duration = datetime.datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _async_message._convert_exception(exc) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_failure( + duration, + failure, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbname, + ) + raise + duration = datetime.datetime.now() - start + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=response_doc, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + speculative_authenticate="speculativeAuthenticate" in orig, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_success( + duration, + response_doc, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + speculative_hello=speculative_hello, + database_name=dbname, + ) + + if client and client._encrypter and reply: + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + response_doc = cast( + "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] + ) + + return response_doc # type: ignore[return-value] + + +async def receive_message( + conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + timeout = conn.conn.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + # Ignore the response's request id. + length, _, response_to, op_code = _UNPACK_HEADER( + await _receive_data_on_socket(conn, 16, deadline) + ) + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({max_message_size!r})" + ) + if op_code == 2012: + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( + await _receive_data_on_socket(conn, 9, deadline) + ) + data = decompress(await _receive_data_on_socket(conn, length - 25, deadline), compressor_id) + else: + data = await _receive_data_on_socket(conn, length - 16, deadline) + + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) + + +async def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: + """Block until at least one byte is read, or a timeout, or a cancel.""" + sock = conn.conn + timed_out = False + # Check if the connection's socket has been manually closed + if sock.fileno() == -1: + return + while True: + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) + else: + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") + await asyncio.sleep(0) + + +async def _receive_data_on_socket( + conn: Connection, length: int, deadline: Optional[float] +) -> memoryview: + buf = bytearray(length) + mv = memoryview(buf) + bytes_read = 0 + while bytes_read < length: + try: + await wait_for_read(conn, deadline) + # CSOT: Update timeout. When the timeout has expired perform one + # final non-blocking recv. This helps avoid spurious timeouts when + # the response is actually already buffered on the client. + if _csot.get_timeout() and deadline is not None: + conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) + chunk_length = conn.conn.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") from None + except OSError as exc: + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise OSError("connection closed") + + bytes_read += chunk_length + + return mv diff --git a/pymongo/asynchronous/operations.py b/pymongo/asynchronous/operations.py new file mode 100644 index 0000000000..d4beff759d --- /dev/null +++ b/pymongo/asynchronous/operations.py @@ -0,0 +1,625 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Operation class definitions.""" +from __future__ import annotations + +import enum +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +from bson.raw_bson import RawBSONDocument +from pymongo.asynchronous import helpers +from pymongo.asynchronous.collation import validate_collation_or_none +from pymongo.asynchronous.common import validate_is_mapping, validate_list +from pymongo.asynchronous.helpers import _gen_index_name, _index_document, _index_list +from pymongo.asynchronous.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from pymongo.asynchronous.bulk import _Bulk + +_IS_SYNC = False + +# Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary +_IndexList = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_IndexKeyHint = Union[str, _IndexList] + + +class _Op(str, enum.Enum): + ABORT = "abortTransaction" + AGGREGATE = "aggregate" + COMMIT = "commitTransaction" + COUNT = "count" + CREATE = "create" + CREATE_INDEXES = "createIndexes" + CREATE_SEARCH_INDEXES = "createSearchIndexes" + DELETE = "delete" + DISTINCT = "distinct" + DROP = "drop" + DROP_DATABASE = "dropDatabase" + DROP_INDEXES = "dropIndexes" + DROP_SEARCH_INDEXES = "dropSearchIndexes" + END_SESSIONS = "endSessions" + FIND_AND_MODIFY = "findAndModify" + FIND = "find" + INSERT = "insert" + LIST_COLLECTIONS = "listCollections" + LIST_INDEXES = "listIndexes" + LIST_SEARCH_INDEX = "listSearchIndexes" + LIST_DATABASES = "listDatabases" + UPDATE = "update" + UPDATE_INDEX = "updateIndex" + UPDATE_SEARCH_INDEX = "updateSearchIndex" + RENAME = "rename" + GETMORE = "getMore" + KILL_CURSORS = "killCursors" + TEST = "testOperation" + + +class InsertOne(Generic[_DocumentType]): + """Represents an insert_one operation.""" + + __slots__ = ("_doc",) + + def __init__(self, document: _DocumentType) -> None: + """Create an InsertOne instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.bulk_write`. + + :param document: The document to insert. If the document is missing an + _id field one will be added. + """ + self._doc = document + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_insert(self._doc) # type: ignore[arg-type] + + def __repr__(self) -> str: + return f"InsertOne({self._doc!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return other._doc == self._doc + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class DeleteOne: + """Represents a delete_one operation.""" + + __slots__ = ("_filter", "_collation", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create a DeleteOne instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.bulk_write`. + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._collation = collation + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __repr__(self) -> str: + return f"DeleteOne({self._filter!r}, {self._collation!r}, {self._hint!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return (other._filter, other._collation, other._hint) == ( + self._filter, + self._collation, + self._hint, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class DeleteMany: + """Represents a delete_many operation.""" + + __slots__ = ("_filter", "_collation", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create a DeleteMany instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.bulk_write`. + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._collation = collation + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __repr__(self) -> str: + return f"DeleteMany({self._filter!r}, {self._collation!r}, {self._hint!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return (other._filter, other._collation, other._hint) == ( + self._filter, + self._collation, + self._hint, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class ReplaceOne(Generic[_DocumentType]): + """Represents a replace_one operation.""" + + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + replacement: Union[_DocumentType, RawBSONDocument], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create a ReplaceOne instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.bulk_write`. + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the ``collation`` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if upsert is not None: + validate_boolean("upsert", upsert) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._doc = replacement + self._upsert = upsert + self._collation = collation + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_replace( + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + other._hint, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + ) + + +class _UpdateOp: + """Private base class for update operations.""" + + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + doc: Union[Mapping[str, Any], _Pipeline], + upsert: bool, + collation: Optional[_CollationIn], + array_filters: Optional[list[Mapping[str, Any]]], + hint: Optional[_IndexKeyHint], + ): + if filter is not None: + validate_is_mapping("filter", filter) + if upsert is not None: + validate_boolean("upsert", upsert) + if array_filters is not None: + validate_list("array_filters", array_filters) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + + self._filter = filter + self._doc = doc + self._upsert = upsert + self._collation = collation + self._array_filters = array_filters + + def __eq__(self, other: object) -> bool: + if isinstance(other, type(self)): + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._array_filters, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) + return NotImplemented + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) + + +class UpdateOne(_UpdateOp): + """Represents an update_one operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Represents an update_one operation. + + For use with :meth:`~pymongo.collection.AsyncCollection.bulk_write`. + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added the `array_filters` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, update, upsert, collation, array_filters, hint) + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_update( + self._filter, + self._doc, + False, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + +class UpdateMany(_UpdateOp): + """Represents an update_many operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create an UpdateMany instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.bulk_write`. + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added the `array_filters` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, update, upsert, collation, array_filters, hint) + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_update( + self._filter, + self._doc, + True, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + +class IndexModel: + """Represents an index to create.""" + + __slots__ = ("__document",) + + def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: + """Create an Index instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.create_indexes`. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str`, and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation`: An instance of :class:`~pymongo.collation.Collation` + that specifies the collation to use. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the { "$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + :param keys: a single key or a list containing (key, direction) pairs + or keys specifying the index to create. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.2 + Added the ``partialFilterExpression`` option to support partial + indexes. + + .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ + """ + keys = _index_list(keys) + if kwargs.get("name") is None: + kwargs["name"] = _gen_index_name(keys) + kwargs["key"] = _index_document(keys) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + self.__document = kwargs + if collation is not None: + self.__document["collation"] = collation + + @property + def document(self) -> dict[str, Any]: + """An index document suitable for passing to the createIndexes + command. + """ + return self.__document + + +class SearchIndexModel: + """Represents a search index to create.""" + + __slots__ = ("__document",) + + def __init__( + self, + definition: Mapping[str, Any], + name: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a Search Index instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.create_search_index` and :meth:`~pymongo.collection.AsyncCollection.create_search_indexes`. + + :param definition: The definition for this index. + :param name: The name for this index, if present. + :param type: The type for this index which defaults to "search". Alternative values include "vectorSearch". + :param kwargs: Keyword arguments supplying any additional options. + + .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. + .. versionadded:: 4.5 + .. versionchanged:: 4.7 + Added the type and kwargs arguments. + """ + self.__document: dict[str, Any] = {} + if name is not None: + self.__document["name"] = name + self.__document["definition"] = definition + if type is not None: + self.__document["type"] = type + self.__document.update(kwargs) + + @property + def document(self) -> Mapping[str, Any]: + """The document for this index.""" + return self.__document diff --git a/pymongo/asynchronous/periodic_executor.py b/pymongo/asynchronous/periodic_executor.py new file mode 100644 index 0000000000..337d10f133 --- /dev/null +++ b/pymongo/asynchronous/periodic_executor.py @@ -0,0 +1,209 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Run a target function on a background thread.""" + +from __future__ import annotations + +import asyncio +import sys +import threading +import time +import weakref +from typing import Any, Optional + +from pymongo.lock import _ALock, _create_lock + +_IS_SYNC = False + + +class PeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Any, + name: Optional[str] = None, + ): + """Run a target function periodically on a background thread. + + If the target's return value is false, the executor stops. + + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is + called very often. + :param target: A function. + :param name: A name to give the underlying thread. + """ + # threading.Event and its internal condition variable are expensive + # in Python 2, see PYTHON-983. Use a boolean to know when to wake. + # The executor's design is constrained by several Python issues, see + # "periodic_executor.rst" in this repository. + self._event = False + self._interval = interval + self._min_interval = min_interval + self._target = target + self._stopped = False + self._thread: Optional[threading.Thread] = None + self._name = name + self._skip_sleep = False + self._thread_will_exit = False + self._lock = _ALock(_create_lock()) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + + def _run_async(self) -> None: + asyncio.run(self._run()) # type: ignore[func-returns-value] + + def open(self) -> None: + """Start. Multiple calls have no effect. + + Not safe to call from multiple threads at once. + """ + with self._lock: + if self._thread_will_exit: + # If the background thread has read self._stopped as True + # there is a chance that it has not yet exited. The call to + # join should not block indefinitely because there is no + # other work done outside the while loop in self._run. + try: + assert self._thread is not None + self._thread.join() + except ReferenceError: + # Thread terminated. + pass + self._thread_will_exit = False + self._stopped = False + started: Any = False + try: + started = self._thread and self._thread.is_alive() + except ReferenceError: + # Thread terminated. + pass + + if not started: + if _IS_SYNC: + thread = threading.Thread(target=self._run, name=self._name) + else: + thread = threading.Thread(target=self._run_async, name=self._name) + thread.daemon = True + self._thread = weakref.proxy(thread) + _register_executor(self) + # Mitigation to RuntimeError firing when thread starts on shutdown + # https://github.com/python/cpython/issues/114570 + try: + thread.start() + except RuntimeError as e: + if "interpreter shutdown" in str(e) or sys.is_finalizing(): + self._thread = None + return + raise + + def close(self, dummy: Any = None) -> None: + """Stop. To restart, call open(). + + The dummy parameter allows an executor's close method to be a weakref + callback; see monitor.py. + """ + self._stopped = True + + def join(self, timeout: Optional[int] = None) -> None: + if self._thread is not None: + try: + self._thread.join(timeout) + except (ReferenceError, RuntimeError): + # Thread already terminated, or not yet started. + pass + + def wake(self) -> None: + """Execute the target function soon.""" + self._event = True + + def update_interval(self, new_interval: int) -> None: + self._interval = new_interval + + def skip_sleep(self) -> None: + self._skip_sleep = True + + async def _should_stop(self) -> bool: + async with self._lock: + if self._stopped: + self._thread_will_exit = True + return True + return False + + async def _run(self) -> None: + while not await self._should_stop(): + try: + if not await self._target(): + self._stopped = True + break + except BaseException: + async with self._lock: + self._stopped = True + self._thread_will_exit = True + + raise + + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: + await asyncio.sleep(self._min_interval) + if self._event: + break # Early wake. + + self._event = False + + +# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started, +# an executor is kept alive by a strong reference from its thread and perhaps +# from other objects. When the thread dies and all other referrers are freed, +# the executor is freed and removed from _EXECUTORS. If any threads are +# running when the interpreter begins to shut down, we try to halt and join +# them to avoid spurious errors. +_EXECUTORS = set() + + +def _register_executor(executor: PeriodicExecutor) -> None: + ref = weakref.ref(executor, _on_executor_deleted) + _EXECUTORS.add(ref) + + +def _on_executor_deleted(ref: weakref.ReferenceType[PeriodicExecutor]) -> None: + _EXECUTORS.remove(ref) + + +def _shutdown_executors() -> None: + if _EXECUTORS is None: + return + + # Copy the set. Stopping threads has the side effect of removing executors. + executors = list(_EXECUTORS) + + # First signal all executors to close... + for ref in executors: + executor = ref() + if executor: + executor.close() + + # ...then try to join them. + for ref in executors: + executor = ref() + if executor: + executor.join(1) + + executor = None diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py new file mode 100644 index 0000000000..a4d3c50645 --- /dev/null +++ b/pymongo/asynchronous/pool.py @@ -0,0 +1,2128 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +from __future__ import annotations + +import collections +import contextlib +import copy +import logging +import os +import platform +import socket +import ssl +import sys +import threading +import time +import weakref +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +import bson +from bson import DEFAULT_CODEC_OPTIONS +from pymongo import __version__, _csot +from pymongo.asynchronous import helpers +from pymongo.asynchronous.client_session import _validate_session_write_concern +from pymongo.asynchronous.common import ( + MAX_BSON_SIZE, + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, + MAX_POOL_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + MIN_POOL_SIZE, + ORDERED_TYPES, + WAIT_QUEUE_TIMEOUT, +) +from pymongo.asynchronous.hello import Hello +from pymongo.asynchronous.hello_compat import HelloCompat +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.asynchronous.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) +from pymongo.asynchronous.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, + _EventListeners, +) +from pymongo.asynchronous.network import command, receive_message +from pymongo.asynchronous.read_preferences import ReadPreference +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConfigurationError, + ConnectionFailure, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, + _CertificateError, +) +from pymongo.lock import _ACondition, _ALock, _create_lock +from pymongo.network_layer import async_sendall +from pymongo.server_api import _add_to_command +from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker +from pymongo.ssl_support import HAS_SNI, SSLError + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.asynchronous.auth import MongoCredential, _AuthContext + from pymongo.asynchronous.client_session import ClientSession + from pymongo.asynchronous.compression_support import ( + CompressionSettings, + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.asynchronous.message import _OpMsg, _OpReply + from pymongo.asynchronous.mongo_client import AsyncMongoClient, _MongoClientErrorHandler + from pymongo.asynchronous.read_preferences import _ServerMode + from pymongo.asynchronous.typings import ClusterTime, _Address, _CollationIn + from pymongo.driver_info import DriverInfo + from pymongo.pyopenssl_context import SSLContext, _sslConn + from pymongo.read_concern import ReadConcern + from pymongo.server_api import ServerApi + from pymongo.write_concern import WriteConcern + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_IS_SYNC = False + +_MAX_TCP_KEEPIDLE = 120 +_MAX_TCP_KEEPINTVL = 10 +_MAX_TCP_KEEPCNT = 9 + +if sys.platform == "win32": + try: + import _winreg as winreg + except ImportError: + import winreg + + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + + try: + with winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + +else: + + def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: + if hasattr(socket, tcp_option): + sockopt = getattr(socket, tcp_option) + try: + # PYTHON-1350 - NetBSD doesn't implement getsockopt for + # TCP_KEEPIDLE and friends. Don't attempt to set the + # values there. + default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) + if default > max_value: + sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) + except OSError: + pass + + def _set_keepalive_times(sock: socket.socket) -> None: + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) + + +_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} + +if sys.platform.startswith("linux"): + # platform.linux_distribution was deprecated in Python 3.5 + # and removed in Python 3.8. Starting in Python 3.5 it + # raises DeprecationWarning + # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 + _name = platform.system() + _METADATA["os"] = { + "type": _name, + "name": _name, + "architecture": platform.machine(), + # Kernel version (e.g. 4.4.0-17-generic). + "version": platform.release(), + } +elif sys.platform == "darwin": + _METADATA["os"] = { + "type": platform.system(), + "name": platform.system(), + "architecture": platform.machine(), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + "version": platform.mac_ver()[0], + } +elif sys.platform == "win32": + _METADATA["os"] = { + "type": platform.system(), + # "Windows XP", "Windows 7", "Windows 10", etc. + "name": " ".join((platform.system(), platform.release())), + "architecture": platform.machine(), + # Windows patch level (e.g. 5.1.2600-SP3) + "version": "-".join(platform.win32_ver()[1:3]), + } +elif sys.platform.startswith("java"): + _name, _ver, _arch = platform.java_ver()[-1] + _METADATA["os"] = { + # Linux, Windows 7, Mac OS X, etc. + "type": _name, + "name": _name, + # x86, x86_64, AMD64, etc. + "architecture": _arch, + # Linux kernel version, OSX version, etc. + "version": _ver, + } +else: + # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = { + "type": platform.system(), + "name": " ".join([part for part in _aliased[:2] if part]), + "architecture": platform.machine(), + "version": _aliased[2], + } + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) +else: + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) + +DOCKER_ENV_PATH = "/.dockerenv" +ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" + +RUNTIME_NAME_DOCKER = "docker" +ORCHESTRATOR_NAME_K8S = "kubernetes" + + +def get_container_env_info() -> dict[str, str]: + """Returns the runtime and orchestrator of a container. + If neither value is present, the metadata client.env.container field will be omitted.""" + container = {} + + if Path(DOCKER_ENV_PATH).exists(): + container["runtime"] = RUNTIME_NAME_DOCKER + if os.getenv(ENV_VAR_K8S): + container["orchestrator"] = ORCHESTRATOR_NAME_K8S + + return container + + +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _is_faas() -> bool: + return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> dict[str, Any]: + env: dict[str, Any] = {} + container = get_container_env_info() + if container: + env["container"] = container + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations +def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) + + +# If the first getaddrinfo call of this interpreter's life is on a thread, +# while the main thread holds the import lock, getaddrinfo deadlocks trying +# to import the IDNA codec. Import it here, where presumably we're on the +# main thread, to avoid the deadlock. See PYTHON-607. +"foo".encode("idna") + + +def _raise_connection_failure( + address: Any, + error: Exception, + msg_prefix: Optional[str] = None, + timeout_details: Optional[dict[str, float]] = None, +) -> NoReturn: + """Convert a socket.error to ConnectionFailure and raise it.""" + host, port = address + # If connecting to a Unix socket, port will be None. + if port is not None: + msg = "%s:%d: %s" % (host, port, error) + else: + msg = f"{host}: {error}" + if msg_prefix: + msg = msg_prefix + msg + if "configured timeouts" not in msg: + msg += format_timeout_details(timeout_details) + if isinstance(error, socket.timeout): + raise NetworkTimeout(msg) from error + elif isinstance(error, SSLError) and "timed out" in str(error): + # Eventlet does not distinguish TLS network timeouts from other + # SSLErrors (https://github.com/eventlet/eventlet/issues/692). + # Luckily, we can work around this limitation because the phrase + # 'timed out' appears in all the timeout related SSLErrors raised. + raise NetworkTimeout(msg) from error + else: + raise AutoReconnect(msg) from error + + +async def _cond_wait(condition: _ACondition, deadline: Optional[float]) -> bool: + timeout = deadline - time.monotonic() if deadline else None + return await condition.wait(timeout) + + +def _get_timeout_details(options: PoolOptions) -> dict[str, float]: + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details + + +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result + + +class PoolOptions: + """Read only connection pool options for an AsyncMongoClient. + + Should not be instantiated directly by application developers. Access + a client's pool options via + :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: + + pool_opts = client.options.pool_options + pool_opts.max_pool_size + pool_opts.min_pool_size + + """ + + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size: int = MAX_POOL_SIZE, + min_pool_size: int = MIN_POOL_SIZE, + max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, + connect_timeout: Optional[float] = None, + socket_timeout: Optional[float] = None, + wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, + ssl_context: Optional[SSLContext] = None, + tls_allow_invalid_hostnames: bool = False, + event_listeners: Optional[_EventListeners] = None, + appname: Optional[str] = None, + driver: Optional[DriverInfo] = None, + compression_settings: Optional[CompressionSettings] = None, + max_connecting: int = MAX_CONNECTING, + pause_enabled: bool = True, + server_api: Optional[ServerApi] = None, + load_balanced: Optional[bool] = None, + credentials: Optional[MongoCredential] = None, + ): + self.__max_pool_size = max_pool_size + self.__min_pool_size = min_pool_size + self.__max_idle_time_seconds = max_idle_time_seconds + self.__connect_timeout = connect_timeout + self.__socket_timeout = socket_timeout + self.__wait_queue_timeout = wait_queue_timeout + self.__ssl_context = ssl_context + self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames + self.__event_listeners = event_listeners + self.__appname = appname + self.__driver = driver + self.__compression_settings = compression_settings + self.__max_connecting = max_connecting + self.__pause_enabled = pause_enabled + self.__server_api = server_api + self.__load_balanced = load_balanced + self.__credentials = credentials + self.__metadata = copy.deepcopy(_METADATA) + if appname: + self.__metadata["application"] = {"name": appname} + + # Combine the "driver" AsyncMongoClient option with PyMongo's info, like: + # { + # 'driver': { + # 'name': 'PyMongo|MyDriver', + # 'version': '4.2.0|1.2.3', + # }, + # 'platform': 'CPython 3.8.0|MyPlatform' + # } + if driver: + if driver.name: + self.__metadata["driver"]["name"] = "{}|{}".format( + _METADATA["driver"]["name"], + driver.name, + ) + if driver.version: + self.__metadata["driver"]["version"] = "{}|{}".format( + _METADATA["driver"]["version"], + driver.version, + ) + if driver.platform: + self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) + + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) + + @property + def _credentials(self) -> Optional[MongoCredential]: + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + + @property + def non_default_options(self) -> dict[str, Any]: + """The non-default options this pool was created with. + + Added for CMAP's :class:`PoolCreatedEvent`. + """ + opts = {} + if self.__max_pool_size != MAX_POOL_SIZE: + opts["maxPoolSize"] = self.__max_pool_size + if self.__min_pool_size != MIN_POOL_SIZE: + opts["minPoolSize"] = self.__min_pool_size + if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: + assert self.__max_idle_time_seconds is not None + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 + if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: + assert self.__wait_queue_timeout is not None + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 + if self.__max_connecting != MAX_CONNECTING: + opts["maxConnecting"] = self.__max_connecting + return opts + + @property + def max_pool_size(self) -> float: + """The maximum allowable number of concurrent connections to each + connected server. Requests to a server will block if there are + `maxPoolSize` outstanding connections to the requested server. + Defaults to 100. Cannot be 0. + + When a server's pool has reached `max_pool_size`, operations for that + server block waiting for a socket to be returned to the pool. If + ``waitQueueTimeoutMS`` is set, a blocked operation will raise + :exc:`~pymongo.errors.ConnectionFailure` after a timeout. + By default ``waitQueueTimeoutMS`` is not set. + """ + return self.__max_pool_size + + @property + def min_pool_size(self) -> int: + """The minimum required number of concurrent connections that the pool + will maintain to each connected server. Default is 0. + """ + return self.__min_pool_size + + @property + def max_connecting(self) -> int: + """The maximum number of concurrent connection creation attempts per + pool. Defaults to 2. + """ + return self.__max_connecting + + @property + def pause_enabled(self) -> bool: + return self.__pause_enabled + + @property + def max_idle_time_seconds(self) -> Optional[int]: + """The maximum number of seconds that a connection can remain + idle in the pool before being removed and replaced. Defaults to + `None` (no limit). + """ + return self.__max_idle_time_seconds + + @property + def connect_timeout(self) -> Optional[float]: + """How long a connection can take to be opened before timing out.""" + return self.__connect_timeout + + @property + def socket_timeout(self) -> Optional[float]: + """How long a send or receive on a socket can take before timing out.""" + return self.__socket_timeout + + @property + def wait_queue_timeout(self) -> Optional[int]: + """How long a thread will wait for a socket from the pool if the pool + has no free sockets. + """ + return self.__wait_queue_timeout + + @property + def _ssl_context(self) -> Optional[SSLContext]: + """An SSLContext instance or None.""" + return self.__ssl_context + + @property + def tls_allow_invalid_hostnames(self) -> bool: + """If True skip ssl.match_hostname.""" + return self.__tls_allow_invalid_hostnames + + @property + def _event_listeners(self) -> Optional[_EventListeners]: + """An instance of pymongo.monitoring._EventListeners.""" + return self.__event_listeners + + @property + def appname(self) -> Optional[str]: + """The application name, for sending with hello in server handshake.""" + return self.__appname + + @property + def driver(self) -> Optional[DriverInfo]: + """Driver name and version, for sending with hello in handshake.""" + return self.__driver + + @property + def _compression_settings(self) -> Optional[CompressionSettings]: + return self.__compression_settings + + @property + def metadata(self) -> dict[str, Any]: + """A dict of metadata about the application, driver, os, and platform.""" + return self.__metadata.copy() + + @property + def server_api(self) -> Optional[ServerApi]: + """A pymongo.server_api.ServerApi or None.""" + return self.__server_api + + @property + def load_balanced(self) -> Optional[bool]: + """True if this Pool is configured in load balanced mode.""" + return self.__load_balanced + + +class _CancellationContext: + def __init__(self) -> None: + self._cancelled = False + + def cancel(self) -> None: + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self) -> bool: + """Was cancel called?""" + return self._cancelled + + +class Connection: + """Store a connection with some metadata. + + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool + """ + + def __init__( + self, conn: Union[socket.socket, _sslConn], pool: Pool, address: tuple[str, int], id: int + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn + self.address = address + self.id = id + self.closed = False + self.last_checkin_time = time.monotonic() + self.performed_handshake = False + self.is_writable: bool = False + self.max_wire_version = MAX_WIRE_VERSION + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.supports_sessions = False + self.hello_ok: bool = False + self.is_mongos = False + self.op_msg_enabled = False + self.listeners = pool.opts._event_listeners + self.enabled_for_cmap = pool.enabled_for_cmap + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() + self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.settimeout(timeout) + + def apply_timeout( + self, client: AsyncMongoClient, cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + async def unpin(self) -> None: + pool = self.pool_ref() + if pool: + await pool.checkin(self) + else: + self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> dict[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return {HelloCompat.CMD: 1} + else: + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} + + async def hello(self) -> Hello: + return await self._hello(None, None, None) + + async def _hello( + self, + cluster_time: Optional[ClusterTime], + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata + if self.compression_settings: + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + if not performing_handshake and cluster_time is not None: + cmd["$clusterTime"] = cluster_time + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.asynchronous import auth + + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = await self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) + self.compression_context = ctx + + self.op_msg_enabled = True + self.server_connection_id = hello.connection_id + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + async def _next_reply(self) -> dict[str, Any]: + reply = await self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + async def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + client: Optional[AsyncMongoClient] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: + """Execute a command or raise an error. + + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the + ``writeConcernError`` field in the command response. + :param collation: The collation for this command. + :param session: optional ClientSession instance. + :param client: optional AsyncMongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.validate_session(client, session) + session = _validate_session_write_concern(session, write_concern) + + # Ensure command name remains in first place. + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] + spec = dict(spec) + + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + + self.add_server_api(spec) + if session: + await session._apply_to(spec, retryable_write, read_preference, self) + self.send_cluster_time(spec, session, client) + listeners = self.listeners if publish_events else None + unacknowledged = bool(write_concern and not write_concern.acknowledged) + if self.op_msg_enabled: + self._raise_if_not_writable(unacknowledged) + try: + return await command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): + raise + # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. + except BaseException as error: + self._raise_connection_failure(error) + + async def send_message(self, message: bytes, max_doc_size: int) -> None: + """Send a raw BSON message or raise ConnectionFailure. + + If a network exception is raised, the socket is closed. + """ + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: + raise DocumentTooLarge( + "BSON document too large (%d bytes) - the connected server " + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) + + try: + await async_sendall(self.conn, message) + except BaseException as error: + self._raise_connection_failure(error) + + async def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise ConnectionFailure. + + If any exception is raised, the socket is closed. + """ + try: + return await receive_message(self, request_id, self.max_message_size) + except BaseException as error: + self._raise_connection_failure(error) + + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not + writable. + """ + if unacknowledged and not self.is_writable: + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) + + async def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. + + Can raise ConnectionFailure or InvalidDocument. + + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. + """ + self._raise_if_not_writable(True) + await self.send_message(msg, max_doc_size) + + async def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions + ) -> dict[str, Any]: + """Send "insert" etc. command, returning response as a dict. + + Can raise ConnectionFailure or OperationFailure. + + :param request_id: an int. + :param msg: bytes, the command message. + """ + await self.send_message(msg, 0) + reply = await self.receive_message(request_id) + result = reply.command_response(codec_options) + + # Raises NotPrimaryError or OperationFailure. + helpers._check_command_response(result, self.max_wire_version) + return result + + async def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. + + Can raise ConnectionFailure or OperationFailure. + """ + # CMAP spec says to publish the ready event only after authenticating + # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False + if not self.ready: + creds = self.opts._credentials + if creds: + from pymongo.asynchronous import auth + + await auth.authenticate(creds, self, reauthenticate=reauthenticate) + self.ready = True + if self.enabled_for_cmap: + assert self.listeners is not None + duration = time.monotonic() - self.creation_time + self.listeners.publish_connection_ready(self.address, self.id, duration) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_READY, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) + + def validate_session( + self, client: Optional[AsyncMongoClient], session: Optional[ClientSession] + ) -> None: + """Validate this session before use with client. + + Raises error if the client is not the one that created the session. + """ + if session: + if session._client is not client: + raise InvalidOperation( + "Can only use session with the AsyncMongoClient that started it" + ) + + def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + self._close_conn() + if reason and self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) + + def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + return self.socket_checker.socket_closed(self.conn) + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[ClientSession], + client: Optional[AsyncMongoClient], + ) -> None: + """Add $clusterTime.""" + if client: + client._send_cluster_time(command, session) + + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: + self.is_writable = is_writable + + def idle_time_seconds(self) -> float: + """Seconds since this socket was last checked into its pool.""" + return time.monotonic() - self.last_checkin_time + + def _raise_connection_failure(self, error: BaseException) -> NoReturn: + # Catch *all* exceptions from socket methods and close the socket. In + # regular Python, socket operations only raise socket.error, even if + # the underlying cause was a Ctrl-C: a signal raised during socket.recv + # is expressed as an EINTR error from poll. See internal_select_ex() in + # socketmodule.c. All error codes from poll become socket.error at + # first. Eventually in PyEval_EvalFrameEx the interpreter checks for + # signals and throws KeyboardInterrupt into the current frame on the + # main thread. + # + # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, + # ..) is called in Python code, which experiences the signal as a + # KeyboardInterrupt from the start, rather than as an initial + # socket.error, so we catch that, close the socket, and reraise it. + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, SSLError)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + else: + raise + + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.conn) + + def __repr__(self) -> str: + return "Connection({}){} at {}".format( + repr(self.conn), + self.closed and " CLOSED" or "", + id(self), + ) + + +def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.connect(host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + sock.connect(sa) + return sock + except OSError as e: + err = e + sock.close() + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +async def _configured_socket( + address: _Address, options: PoolOptions +) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if HAS_SNI: + if _IS_SYNC: + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + ssl_sock = await ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc] + else: + if _IS_SYNC: + ssl_sock = ssl_context.wrap_socket(sock) + else: + ssl_sock = await ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +class _PoolClosedError(PyMongoError): + """Internal error raised when a thread tries to get a connection from a + closed pool. + """ + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 + + +# Do *not* explicitly inherit from object or Jython won't call __del__ +# http://bugs.jython.org/issue1057 +class Pool: + def __init__( + self, + address: _Address, + options: PoolOptions, + handshake: bool = True, + client_id: Optional[ObjectId] = None, + ): + """ + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param handshake: whether to call hello for each new Connection + """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY + # Check a socket's health with socket_closed() every once in a while. + # Can override for testing: 0 to always check, None to never check. + self._check_interval_seconds = 1 + # LIFO pool. Sockets are ordered on idle time. Sockets claimed + # and returned to pool from the left side. Stale sockets removed + # from the right side. + self.conns: collections.deque = collections.deque() + self.active_contexts: set[_CancellationContext] = set() + self.lock = _ALock(_create_lock()) + self.active_sockets = 0 + # Monotonically increasing connection ID required for CMAP Events. + self.next_connection_id = 1 + # Track whether the sockets in this pool are writeable or not. + self.is_writable: Optional[bool] = None + + # Keep track of resets, so we notice sockets created before the most + # recent reset and close them. + # self.generation = 0 + self.gen = _PoolGeneration() + self.pid = os.getpid() + self.address = address + self.opts = options + self.handshake = handshake + # Don't publish events in Monitor pools. + self.enabled_for_cmap = ( + self.handshake + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) + + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = _ACondition(threading.Condition(self.lock)) # type: ignore[arg-type] + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = _ACondition(threading.Condition(self.lock)) # type: ignore[arg-type] + self._max_connecting = self.opts.max_connecting + self._pending = 0 + self._client_id = client_id + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count: int = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[Connection] = set() + self.ncursors = 0 + self.ntxns = 0 + + async def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. + async with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_READY, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + async def _reset( + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + old_state = self.state + async with self.size_cond: + if self.closed: + return + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() + keep: collections.deque = collections.deque() + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + + if close: + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() + + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + + listeners = self.opts._event_listeners + # CMAP spec says that close() MUST close sockets before publishing the + # PoolClosedEvent but that reset() SHOULD close sockets *after* + # publishing the PoolClearedEvent. + if close: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_closed(self.address) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + ) + else: + if old_state != PoolState.PAUSED and self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLEARED, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) + for conn in sockets: + conn.close_conn(ConnectionClosedReason.STALE) + + async def update_is_writable(self, is_writable: Optional[bool]) -> None: + """Updates the is_writable attribute on all sockets currently in the + Pool. + """ + self.is_writable = is_writable + async with self.lock: + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) + + async def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + await self._reset( + close=False, service_id=service_id, interrupt_connections=interrupt_connections + ) + + async def reset_without_pause(self) -> None: + await self._reset(close=False, pause=False) + + async def close(self) -> None: + await self._reset(close=True) + + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + async def remove_stale_sockets(self, reference_generation: int) -> None: + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the + pool. + """ + # Take the lock to avoid the race condition described in PYTHON-2699. + async with self.lock: + if self.state != PoolState.READY: + return + + if self.opts.max_idle_time_seconds is not None: + async with self.lock: + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + conn = self.conns.pop() + conn.close_conn(ConnectionClosedReason.IDLE) + + while True: + async with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False + try: + async with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = await self.connect() + async with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.gen.get_overall() != reference_generation: + conn.close_conn(ConnectionClosedReason.STALE) + return + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + finally: + if incremented: + # Notify after adding the socket to the pool. + async with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + + async with self.size_cond: + self.requests -= 1 + self.size_cond.notify() + + async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + """Connect to Mongo and return a new Connection. + + Can raise ConnectionFailure. + + Note that the pool does not keep a reference to the socket -- you + must call checkin() when you're done with it. + """ + async with self.lock: + conn_id = self.next_connection_id + self.next_connection_id += 1 + + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_created(self.address, conn_id) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) + + try: + sock = await _configured_socket(self.address, self.opts) + except BaseException as error: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn_id, ConnectionClosedReason.ERROR + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + if isinstance(error, (IOError, OSError, SSLError)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + + raise + + conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] + async with self.lock: + self.active_contexts.add(conn.cancel_context) + try: + if self.handshake: + await conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + await conn.authenticate() + except BaseException: + conn.close_conn(ConnectionClosedReason.ERROR) + raise + + return conn + + @contextlib.asynccontextmanager + async def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncGenerator[Connection, None]: + """Get a connection from the pool. Use with a "with" statement. + + Returns a :class:`Connection` object wrapping a connected + :class:`socket.socket`. + + This method should always be used in a with-statement:: + + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) + + Can raise ConnectionFailure or OperationFailure. + + :param handler: A _MongoClientErrorHandler. + """ + listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_started(self.address) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + conn = await self._get_conn(checkout_started_time, handler=handler) + + if self.enabled_for_cmap: + assert listeners is not None + duration = time.monotonic() - checkout_started_time + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) + try: + async with self.lock: + self.active_contexts.add(conn.cancel_context) + yield conn + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + await handler.handle(exc_type, exc_val) + if not pinned and conn.active: + await self.checkin(conn) + raise + if conn.pinned_txn: + async with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + async with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + await self.checkin(conn) + + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: + if self.state != PoolState.READY: + if self.enabled_for_cmap and emit_event: + assert self.opts._event_listeners is not None + duration = time.monotonic() - checkout_started_time + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + async def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> Connection: + """Get or create a Connection. Can raise ConnectionFailure.""" + # We use the pid here to avoid issues with fork / multiprocessing. + # See test.test_client:TestClient.test_fork for an example of + # what could go wrong otherwise + if self.pid != os.getpid(): + await self.reset_without_pause() + + if self.closed: + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + duration = time.monotonic() - checkout_started_time + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) + raise _PoolClosedError( + "Attempted to check out a connection from closed connection pool" + ) + + async with self.lock: + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + async with self.size_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=True) + while not (self.requests < self.max_pool_size): + if not await _cond_wait(self.size_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) + self.requests += 1 + + # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False + try: + async with self.lock: + self.active_sockets += 1 + incremented = True + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + async with self._max_connecting_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + if not await _cond_wait(self._max_connecting_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = await self.connect(handler=handler) + finally: + async with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + except BaseException: + if conn: + # We checked out a socket but authentication failed. + conn.close_conn(ConnectionClosedReason.ERROR) + async with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if self.enabled_for_cmap and not emitted_event: + assert self.opts._event_listeners is not None + duration = time.monotonic() - checkout_started_time + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + raise + + conn.active = True + return conn + + async def checkin(self, conn: Connection) -> None: + """Return the connection to the pool, or if it's closed discard it. + + :param conn: The connection to check into the pool. + """ + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + async with self.lock: + self.active_contexts.discard(conn.cancel_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKEDIN, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) + if self.pid != os.getpid(): + await self.reset_without_pause() + else: + if self.closed: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + else: + async with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + + async with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 + self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + def _perished(self, conn: Connection) -> bool: + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for + for longer than the max idle time, or if the socket has been closed by + some external network error, or if the socket's generation is outdated. + + Checking sockets lets us avoid seeing *some* + :class:`~pymongo.errors.AutoReconnect` exceptions on server + hiccups, etc. We only check if the socket was closed by an external + error if it has been > 1 second since the socket was checked into the + pool, to keep performance reasonable - we can't avoid AutoReconnects + completely anyway. + """ + idle_time_seconds = conn.idle_time_seconds() + # If socket is idle, open a new one. + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + conn.close_conn(ConnectionClosedReason.IDLE) + return True + + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + conn.close_conn(ConnectionClosedReason.ERROR) + return True + + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + return True + + return False + + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + duration = time.monotonic() - checkout_started_time + listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) + + def __del__(self) -> None: + # Avoid ResourceWarnings in Python 3 + # Close all sockets without calling reset() or close() because it is + # not safe to acquire a lock in __del__. + for conn in self.conns: + conn.close_conn(None) diff --git a/pymongo/asynchronous/read_preferences.py b/pymongo/asynchronous/read_preferences.py new file mode 100644 index 0000000000..8b6fb60753 --- /dev/null +++ b/pymongo/asynchronous/read_preferences.py @@ -0,0 +1,624 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License", +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for choosing which member of a replica set to read from.""" + +from __future__ import annotations + +from collections import abc +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +from pymongo.asynchronous import max_staleness_selectors +from pymongo.asynchronous.server_selectors import ( + member_with_tags_server_selector, + secondary_with_tags_server_selector, +) +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from pymongo.asynchronous.server_selectors import Selection + from pymongo.asynchronous.topology_description import TopologyDescription + +_IS_SYNC = False + +_PRIMARY = 0 +_PRIMARY_PREFERRED = 1 +_SECONDARY = 2 +_SECONDARY_PREFERRED = 3 +_NEAREST = 4 + + +_MONGOS_MODES = ( + "primary", + "primaryPreferred", + "secondary", + "secondaryPreferred", + "nearest", +) + +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] + + +def _validate_tag_sets(tag_sets: Optional[_TagSets]) -> Optional[_TagSets]: + """Validate tag sets for a MongoClient.""" + if tag_sets is None: + return tag_sets + + if not isinstance(tag_sets, (list, tuple)): + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") + if len(tag_sets) == 0: + raise ValueError( + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" + ) + + for tags in tag_sets: + if not isinstance(tags, abc.Mapping): + raise TypeError( + f"Tag set {tags!r} invalid, must be an instance of dict, " + "bson.son.SON or other type that inherits from " + "collection.Mapping" + ) + + return list(tag_sets) + + +def _invalid_max_staleness_msg(max_staleness: Any) -> str: + return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness + + +# Some duplication with common.py to avoid import cycle. +def _validate_max_staleness(max_staleness: Any) -> int: + """Validate max_staleness.""" + if max_staleness == -1: + return -1 + + if not isinstance(max_staleness, int): + raise TypeError(_invalid_max_staleness_msg(max_staleness)) + + if max_staleness <= 0: + raise ValueError(_invalid_max_staleness_msg(max_staleness)) + + return max_staleness + + +def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: + """Validate hedge.""" + if hedge is None: + return None + + if not isinstance(hedge, dict): + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") + + return hedge + + +class _ServerMode: + """Base class for all read preferences.""" + + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") + + def __init__( + self, + mode: int, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + self.__mongos_mode = _MONGOS_MODES[mode] + self.__mode = mode + self.__tag_sets = _validate_tag_sets(tag_sets) + self.__max_staleness = _validate_max_staleness(max_staleness) + self.__hedge = _validate_hedge(hedge) + + @property + def name(self) -> str: + """The name of this read preference.""" + return self.__class__.__name__ + + @property + def mongos_mode(self) -> str: + """The mongos mode of this read preference.""" + return self.__mongos_mode + + @property + def document(self) -> dict[str, Any]: + """Read preference as a document.""" + doc: dict[str, Any] = {"mode": self.__mongos_mode} + if self.__tag_sets not in (None, [{}]): + doc["tags"] = self.__tag_sets + if self.__max_staleness != -1: + doc["maxStalenessSeconds"] = self.__max_staleness + if self.__hedge not in (None, {}): + doc["hedge"] = self.__hedge + return doc + + @property + def mode(self) -> int: + """The mode of this read preference instance.""" + return self.__mode + + @property + def tag_sets(self) -> _TagSets: + """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to + read only from members whose ``dc`` tag has the value ``"ny"``. + To specify a priority-order for tag sets, provide a list of + tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag + set, ``{}``, means "read from any member that matches the mode, + ignoring tags." MongoClient tries each set of tags in turn + until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) + + .. seealso:: `Data-Center Awareness + `_ + """ + return list(self.__tag_sets) if self.__tag_sets else [{}] + + @property + def max_staleness(self) -> int: + """The maximum estimated length of time (in seconds) a replica set + secondary can fall behind the primary in replication before it will + no longer be selected for operations, or -1 for no maximum. + """ + return self.__max_staleness + + @property + def hedge(self) -> Optional[_Hedge]: + """The read preference ``hedge`` parameter. + + A dictionary that configures how the server will perform hedged reads. + It consists of the following keys: + + - ``enabled``: Enables or disables hedged reads in sharded clusters. + + Hedged reads are automatically enabled in MongoDB 4.4+ when using a + ``nearest`` read preference. To explicitly enable hedged reads, set + the ``enabled`` key to ``true``:: + + >>> Nearest(hedge={'enabled': True}) + + To explicitly disable hedged reads, set the ``enabled`` key to + ``False``:: + + >>> Nearest(hedge={'enabled': False}) + + .. versionadded:: 3.11 + """ + return self.__hedge + + @property + def min_wire_version(self) -> int: + """The wire protocol version the server must support. + + Some read preferences impose version requirements on all servers (e.g. + maxStalenessSeconds requires MongoDB 3.4 / maxWireVersion 5). + + All servers' maxWireVersion must be at least this read preference's + `min_wire_version`, or the driver raises + :exc:`~pymongo.errors.ConfigurationError`. + """ + return 0 if self.__max_staleness == -1 else 5 + + def __repr__(self) -> str: + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( + self.name, + self.__tag_sets, + self.__max_staleness, + self.__hedge, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, _ServerMode): + return ( + self.mode == other.mode + and self.tag_sets == other.tag_sets + and self.max_staleness == other.max_staleness + and self.hedge == other.hedge + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __getstate__(self) -> dict[str, Any]: + """Return value of object for pickling. + + Needed explicitly because __slots__() defined. + """ + return { + "mode": self.__mode, + "tag_sets": self.__tag_sets, + "max_staleness": self.__max_staleness, + "hedge": self.__hedge, + } + + def __setstate__(self, value: Mapping[str, Any]) -> None: + """Restore from pickling.""" + self.__mode = value["mode"] + self.__mongos_mode = _MONGOS_MODES[self.__mode] + self.__tag_sets = _validate_tag_sets(value["tag_sets"]) + self.__max_staleness = _validate_max_staleness(value["max_staleness"]) + self.__hedge = _validate_hedge(value["hedge"]) + + def __call__(self, selection: Selection) -> Selection: + return selection + + +class Primary(_ServerMode): + """Primary read preference. + + * When directly connected to one mongod queries are allowed if the server + is standalone or a replica set primary. + * When connected to a mongos queries are sent to the primary of a shard. + * When connected to a replica set queries are sent to the primary of + the replica set. + """ + + __slots__ = () + + def __init__(self) -> None: + super().__init__(_PRIMARY) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return selection.primary_selection + + def __repr__(self) -> str: + return "Primary()" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, _ServerMode): + return other.mode == _PRIMARY + return NotImplemented + + +class PrimaryPreferred(_ServerMode): + """PrimaryPreferred read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are sent to the primary of a shard if + available, otherwise a shard secondary. + * When connected to a replica set queries are sent to the primary if + available, otherwise a secondary. + + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to an available secondary until the + primary of the replica set is discovered. + + :param tag_sets: The :attr:`~tag_sets` to use if the primary is not + available. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` to use if the primary is not available. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + if selection.primary: + return selection.primary_selection + else: + return secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class Secondary(_ServerMode): + """Secondary read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among shard + secondaries. An error is raised if no secondaries are available. + * When connected to a replica set queries are distributed among + secondaries. An error is raised if no secondaries are available. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + return secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class SecondaryPreferred(_ServerMode): + """SecondaryPreferred read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among shard + secondaries, or the shard primary if no secondary is available. + * When connected to a replica set queries are distributed among + secondaries, or the primary if no secondary is available. + + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to the primary of the replica set until + an available secondary is discovered. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + secondaries = secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + if secondaries: + return secondaries + else: + return selection.primary_selection + + +class Nearest(_ServerMode): + """Nearest read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among all members of + a shard. + * When connected to a replica set queries are distributed among all + members. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + return member_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class _AggWritePref: + """Agg $out/$merge write preference. + + * If there are readable servers and there is any pre-5.0 server, use + primary read preference. + * Otherwise use `pref` read preference. + + :param pref: The read preference to use on MongoDB 5.0+. + """ + + __slots__ = ("pref", "effective_pref") + + def __init__(self, pref: _ServerMode): + self.pref = pref + self.effective_pref: _ServerMode = ReadPreference.PRIMARY + + def selection_hook(self, topology_description: TopologyDescription) -> None: + common_wv = topology_description.common_wire_version + if ( + topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) + and common_wv + and common_wv < 13 + ): + self.effective_pref = ReadPreference.PRIMARY + else: + self.effective_pref = self.pref + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return self.effective_pref(selection) + + def __repr__(self) -> str: + return f"_AggWritePref(pref={self.pref!r})" + + # Proxy other calls to the effective_pref so that _AggWritePref can be + # used in place of an actual read preference. + def __getattr__(self, name: str) -> Any: + return getattr(self.effective_pref, name) + + +_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) + + +def make_read_preference( + mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 +) -> _ServerMode: + if mode == _PRIMARY: + if tag_sets not in (None, [{}]): + raise ConfigurationError("Read preference primary cannot be combined with tags") + if max_staleness != -1: + raise ConfigurationError( + "Read preference primary cannot be combined with maxStalenessSeconds" + ) + return Primary() + return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore + + +_MODES = ( + "PRIMARY", + "PRIMARY_PREFERRED", + "SECONDARY", + "SECONDARY_PREFERRED", + "NEAREST", +) + + +class ReadPreference: + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + See :doc:`/examples/high_availability` for code examples. + + A read preference is used in three cases: + + :class:`~pymongo.mongo_client.MongoClient` connected to a single mongod: + + - ``PRIMARY``: Queries are allowed if the server is standalone or a replica + set primary. + - All other modes allow queries to standalone servers, to a replica set + primary, or to replica set secondaries. + + :class:`~pymongo.mongo_client.MongoClient` initialized with the + ``replicaSet`` option: + + - ``PRIMARY``: Read from the primary. This is the default, and provides the + strongest consistency. If no primary is available, raise + :class:`~pymongo.errors.AutoReconnect`. + + - ``PRIMARY_PREFERRED``: Read from the primary if available, or if there is + none, read from a secondary. + + - ``SECONDARY``: Read from a secondary. If no secondary is available, + raise :class:`~pymongo.errors.AutoReconnect`. + + - ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise + from the primary. + + - ``NEAREST``: Read from any member. + + :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a + sharded cluster of replica sets: + + - ``PRIMARY``: Read from the primary of the shard, or raise + :class:`~pymongo.errors.OperationFailure` if there is none. + This is the default. + + - ``PRIMARY_PREFERRED``: Read from the primary of the shard, or if there is + none, read from a secondary of the shard. + + - ``SECONDARY``: Read from a secondary of the shard, or raise + :class:`~pymongo.errors.OperationFailure` if there is none. + + - ``SECONDARY_PREFERRED``: Read from a secondary of the shard if available, + otherwise from the shard primary. + + - ``NEAREST``: Read from any shard member. + """ + + PRIMARY = Primary() + PRIMARY_PREFERRED = PrimaryPreferred() + SECONDARY = Secondary() + SECONDARY_PREFERRED = SecondaryPreferred() + NEAREST = Nearest() + + +def read_pref_mode_from_name(name: str) -> int: + """Get the read preference mode from mongos/uri name.""" + return _MONGOS_MODES.index(name) + + +class MovingAverage: + """Tracks an exponentially-weighted moving average.""" + + average: Optional[float] + + def __init__(self) -> None: + self.average = None + + def add_sample(self, sample: float) -> None: + if sample < 0: + # Likely system time change while waiting for hello response + # and not using time.monotonic. Ignore it, the next one will + # probably be valid. + return + if self.average is None: + self.average = sample + else: + # The Server Selection Spec requires an exponentially weighted + # average with alpha = 0.2. + self.average = 0.8 * self.average + 0.2 * sample + + def get(self) -> Optional[float]: + """Get the calculated average, or None if no samples yet.""" + return self.average + + def reset(self) -> None: + self.average = None diff --git a/pymongo/asynchronous/response.py b/pymongo/asynchronous/response.py new file mode 100644 index 0000000000..f19328f6ee --- /dev/null +++ b/pymongo/asynchronous/response.py @@ -0,0 +1,133 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Represent a response from the server.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Union + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.asynchronous.message import _OpMsg, _OpReply + from pymongo.asynchronous.pool import Connection + from pymongo.asynchronous.typings import _Address, _DocumentOut + +_IS_SYNC = False + + +class Response: + __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") + + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: Sequence[Mapping[str, Any]], + ): + """Represent a response from the server. + + :param data: A network response message. + :param address: (host, port) of the source server. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: if the response is the result of a db command. + """ + self._data = data + self._address = address + self._request_id = request_id + self._duration = duration + self._from_command = from_command + self._docs = docs + + @property + def data(self) -> Union[_OpMsg, _OpReply]: + """Server response's raw BSON bytes.""" + return self._data + + @property + def address(self) -> _Address: + """(host, port) of the source server.""" + return self._address + + @property + def request_id(self) -> int: + """The request id of this operation.""" + return self._request_id + + @property + def duration(self) -> Optional[timedelta]: + """The duration of the operation.""" + return self._duration + + @property + def from_command(self) -> bool: + """If the response is a result from a db command.""" + return self._from_command + + @property + def docs(self) -> Sequence[Mapping[str, Any]]: + """The decoded document(s).""" + return self._docs + + +class PinnedResponse(Response): + __slots__ = ("_conn", "_more_to_come") + + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + conn: Connection, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: list[_DocumentOut], + more_to_come: bool, + ): + """Represent a response to an exhaust cursor's initial query. + + :param data: A network response message. + :param address: (host, port) of the source server. + :param conn: The Connection used for the initial query. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: If the response is the result of a db command. + :param docs: List of documents. + :param more_to_come: Bool indicating whether cursor is ready to be + exhausted. + """ + super().__init__(data, address, request_id, duration, from_command, docs) + self._conn = conn + self._more_to_come = more_to_come + + @property + def conn(self) -> Connection: + """The Connection used for the initial query. + + The server will send batches on this socket, without waiting for + getMores from the client, until the result set is exhausted or there + is an error. + """ + return self._conn + + @property + def more_to_come(self) -> bool: + """If true, server is ready to send batches on the socket until the + result set is exhausted or there is an error. + """ + return self._more_to_come diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py new file mode 100644 index 0000000000..cf812d05c7 --- /dev/null +++ b/pymongo/asynchronous/server.py @@ -0,0 +1,355 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Communicate with one MongoDB server in a topology.""" +from __future__ import annotations + +import logging +from datetime import datetime +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Optional, + Union, +) + +from bson import _decode_all_selective +from pymongo.asynchronous.helpers import _check_command_response, _handle_reauth +from pymongo.asynchronous.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.asynchronous.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.asynchronous.response import PinnedResponse, Response +from pymongo.errors import NotPrimaryError, OperationFailure + +if TYPE_CHECKING: + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.asynchronous.mongo_client import AsyncMongoClient, _MongoClientErrorHandler + from pymongo.asynchronous.monitor import Monitor + from pymongo.asynchronous.monitoring import _EventListeners + from pymongo.asynchronous.pool import Connection, Pool + from pymongo.asynchronous.read_preferences import _ServerMode + from pymongo.asynchronous.server_description import ServerDescription + from pymongo.asynchronous.typings import _DocumentOut + +_IS_SYNC = False + +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} + + +class Server: + def __init__( + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue]] = None, + ) -> None: + """Represent one MongoDB server.""" + self._description = server_description + self._pool = pool + self._monitor = monitor + self._topology_id = topology_id + self._publish = listeners is not None and listeners.enabled_for_server + self._listener = listeners + self._events = None + if self._publish: + self._events = events() # type: ignore[misc] + + async def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + if not self._pool.opts.load_balanced: + self._monitor.open() + + async def reset(self, service_id: Optional[ObjectId] = None) -> None: + """Clear the connection pool.""" + await self.pool.reset(service_id) + + async def close(self) -> None: + """Clear the connection pool and stop the monitor. + + Reconnect with open(). + """ + if self._publish: + assert self._listener is not None + assert self._events is not None + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) + await self._monitor.close() + await self._pool.close() + + def request_check(self) -> None: + """Check the server's state soon.""" + self._monitor.request_check() + + @_handle_reauth + async def run_operation( + self, + conn: Connection, + operation: Union[_Query, _GetMore], + read_preference: _ServerMode, + listeners: Optional[_EventListeners], + unpack_res: Callable[..., list[_DocumentOut]], + client: AsyncMongoClient, + ) -> Response: + """Run a _Query or _GetMore operation and return a Response object. + + This method is used only to run _Query/_GetMore operations from + cursors. + Can raise ConnectionFailure, OperationFailure, etc. + + :param conn: A Connection instance. + :param operation: A _Query or _GetMore object. + :param read_preference: The read preference to use. + :param listeners: Instance of _EventListeners or None. + :param unpack_res: A callable that decodes the wire protocol response. + """ + duration = None + assert listeners is not None + publish = listeners.enabled_for_commands + start = datetime.now() + + use_cmd = operation.use_command(conn) + more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come + if more_to_come: + request_id = 0 + else: + message = await operation.get_message(read_preference, conn, use_cmd) + request_id, data, max_doc_size = self._split_message(message) + + cmd, dbn = await operation.as_command(conn) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + + if publish: + cmd, dbn = await operation.as_command(conn) + if "$db" not in cmd: + cmd["$db"] = dbn + assert listeners is not None + listeners.publish_command_start( + cmd, + dbn, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + if more_to_come: + reply = await conn.receive_message(None) + else: + await conn.send_message(data, max_doc_size) + reply = await conn.receive_message(request_id) + + # Unpack and check for command errors. + if use_cmd: + user_fields = _CURSOR_DOC_FIELDS + legacy_response = False + else: + user_fields = None + legacy_response = True + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) + if use_cmd: + first = docs[0] + await operation.client._process_response(first, operation.session) + _check_command_response(first, conn.max_wire_version) + except Exception as exc: + duration = datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + listeners.publish_command_failure( + duration, + failure, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + raise + duration = datetime.now() - start + # Must publish in find / getMore / explain command response + # format. + if use_cmd: + res = docs[0] + elif operation.name == "explain": + res = docs[0] if docs else {} + else: + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] + if operation.name == "find": + res["cursor"]["firstBatch"] = docs + else: + res["cursor"]["nextBatch"] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=res, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + listeners.publish_command_success( + duration, + res, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + + # Decrypt response. + client = operation.client + if client and client._encrypter: + if use_cmd: + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) + + response: Response + + if client._should_pin_cursor(operation.session) or operation.exhaust: + conn.pin_cursor() + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.conn_mgr: + operation.conn_mgr.update_exhaust(more_to_come) + response = PinnedResponse( + data=reply, + address=self._description.address, + conn=conn, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + more_to_come=more_to_come, + ) + else: + response = Response( + data=reply, + address=self._description.address, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + ) + + return response + + async def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncContextManager[Connection]: + return self.pool.checkout(handler) + + @property + def description(self) -> ServerDescription: + return self._description + + @description.setter + def description(self, server_description: ServerDescription) -> None: + assert server_description.address == self._description.address + self._description = server_description + + @property + def pool(self) -> Pool: + return self._pool + + def _split_message( + self, message: Union[tuple[int, Any], tuple[int, Any, int]] + ) -> tuple[int, Any, int]: + """Return request_id, data, max_doc_size. + + :param message: (request_id, data, max_doc_size) or (request_id, data) + """ + if len(message) == 3: + return message # type: ignore[return-value] + else: + # get_more and kill_cursors messages don't include BSON documents. + request_id, data = message # type: ignore[misc] + return request_id, data, 0 + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/asynchronous/server_description.py b/pymongo/asynchronous/server_description.py new file mode 100644 index 0000000000..8e15c34006 --- /dev/null +++ b/pymongo/asynchronous/server_description.py @@ -0,0 +1,301 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Represent one server the driver is connected to.""" +from __future__ import annotations + +import time +import warnings +from typing import Any, Mapping, Optional + +from bson import EPOCH_NAIVE +from bson.objectid import ObjectId +from pymongo.asynchronous.hello import Hello +from pymongo.asynchronous.typings import ClusterTime, _Address +from pymongo.server_type import SERVER_TYPE + +_IS_SYNC = False + + +class ServerDescription: + """Immutable representation of one server. + + :param address: A (host, port) pair + :param hello: Optional Hello instance + :param round_trip_time: Optional float + :param error: Optional, the last error attempting to connect to the server + :param round_trip_time: Optional float, the min latency from the most recent samples + """ + + __slots__ = ( + "_address", + "_server_type", + "_all_hosts", + "_tags", + "_replica_set_name", + "_primary", + "_max_bson_size", + "_max_message_size", + "_max_write_batch_size", + "_min_wire_version", + "_max_wire_version", + "_round_trip_time", + "_min_round_trip_time", + "_me", + "_is_writable", + "_is_readable", + "_ls_timeout_minutes", + "_error", + "_set_version", + "_election_id", + "_cluster_time", + "_last_write_date", + "_last_update_time", + "_topology_version", + ) + + def __init__( + self, + address: _Address, + hello: Optional[Hello] = None, + round_trip_time: Optional[float] = None, + error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, + ) -> None: + self._address = address + if not hello: + hello = Hello({}) + + self._server_type = hello.server_type + self._all_hosts = hello.all_hosts + self._tags = hello.tags + self._replica_set_name = hello.replica_set_name + self._primary = hello.primary + self._max_bson_size = hello.max_bson_size + self._max_message_size = hello.max_message_size + self._max_write_batch_size = hello.max_write_batch_size + self._min_wire_version = hello.min_wire_version + self._max_wire_version = hello.max_wire_version + self._set_version = hello.set_version + self._election_id = hello.election_id + self._cluster_time = hello.cluster_time + self._is_writable = hello.is_writable + self._is_readable = hello.is_readable + self._ls_timeout_minutes = hello.logical_session_timeout_minutes + self._round_trip_time = round_trip_time + self._min_round_trip_time = min_round_trip_time + self._me = hello.me + self._last_update_time = time.monotonic() + self._error = error + self._topology_version = hello.topology_version + if error: + details = getattr(error, "details", None) + if isinstance(details, dict): + self._topology_version = details.get("topologyVersion") + + self._last_write_date: Optional[float] + if hello.last_write_date: + # Convert from datetime to seconds. + delta = hello.last_write_date - EPOCH_NAIVE + self._last_write_date = delta.total_seconds() + else: + self._last_write_date = None + + @property + def address(self) -> _Address: + """The address (host, port) of this server.""" + return self._address + + @property + def server_type(self) -> int: + """The type of this server.""" + return self._server_type + + @property + def server_type_name(self) -> str: + """The server type as a human readable string. + + .. versionadded:: 3.4 + """ + return SERVER_TYPE._fields[self._server_type] + + @property + def all_hosts(self) -> set[tuple[str, int]]: + """List of hosts, passives, and arbiters known to this server.""" + return self._all_hosts + + @property + def tags(self) -> Mapping[str, Any]: + return self._tags + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self._replica_set_name + + @property + def primary(self) -> Optional[tuple[str, int]]: + """This server's opinion about who the primary is, or None.""" + return self._primary + + @property + def max_bson_size(self) -> int: + return self._max_bson_size + + @property + def max_message_size(self) -> int: + return self._max_message_size + + @property + def max_write_batch_size(self) -> int: + return self._max_write_batch_size + + @property + def min_wire_version(self) -> int: + return self._min_wire_version + + @property + def max_wire_version(self) -> int: + return self._max_wire_version + + @property + def set_version(self) -> Optional[int]: + return self._set_version + + @property + def election_id(self) -> Optional[ObjectId]: + return self._election_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + return self._cluster_time + + @property + def election_tuple(self) -> tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) + return self._set_version, self._election_id + + @property + def me(self) -> Optional[tuple[str, int]]: + return self._me + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + return self._ls_timeout_minutes + + @property + def last_write_date(self) -> Optional[float]: + return self._last_write_date + + @property + def last_update_time(self) -> float: + return self._last_update_time + + @property + def round_trip_time(self) -> Optional[float]: + """The current average latency or None.""" + # This override is for unittesting only! + if self._address in self._host_to_round_trip_time: + return self._host_to_round_trip_time[self._address] + + return self._round_trip_time + + @property + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + + @property + def error(self) -> Optional[Exception]: + """The last error attempting to connect to the server, or None.""" + return self._error + + @property + def is_writable(self) -> bool: + return self._is_writable + + @property + def is_readable(self) -> bool: + return self._is_readable + + @property + def mongos(self) -> bool: + return self._server_type == SERVER_TYPE.Mongos + + @property + def is_server_type_known(self) -> bool: + return self.server_type != SERVER_TYPE.Unknown + + @property + def retryable_writes_supported(self) -> bool: + """Checks if this server supports retryable writes.""" + return ( + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + ) or self._server_type == SERVER_TYPE.LoadBalancer + + @property + def retryable_reads_supported(self) -> bool: + """Checks if this server supports retryable writes.""" + return self._max_wire_version >= 6 + + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._topology_version + + def to_unknown(self, error: Optional[Exception] = None) -> ServerDescription: + unknown = ServerDescription(self.address, error=error) + unknown._topology_version = self.topology_version + return unknown + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ServerDescription): + return ( + (self._address == other.address) + and (self._server_type == other.server_type) + and (self._min_wire_version == other.min_wire_version) + and (self._max_wire_version == other.max_wire_version) + and (self._me == other.me) + and (self._all_hosts == other.all_hosts) + and (self._tags == other.tags) + and (self._replica_set_name == other.replica_set_name) + and (self._set_version == other.set_version) + and (self._election_id == other.election_id) + and (self._primary == other.primary) + and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) + and (self._error == other.error) + ) + + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + errmsg = "" + if self.error: + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( + self.__class__.__name__, + self.address, + self.server_type_name, + self.round_trip_time, + errmsg, + ) + + # For unittesting only. Use under no circumstances! + _host_to_round_trip_time: dict = {} diff --git a/pymongo/asynchronous/server_selectors.py b/pymongo/asynchronous/server_selectors.py new file mode 100644 index 0000000000..eeaebadd6e --- /dev/null +++ b/pymongo/asynchronous/server_selectors.py @@ -0,0 +1,175 @@ +# Copyright 2014-2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Criteria to select some ServerDescriptions from a TopologyDescription.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, TypeVar, cast + +from pymongo.server_type import SERVER_TYPE + +if TYPE_CHECKING: + from pymongo.asynchronous.server_description import ServerDescription + from pymongo.asynchronous.topology_description import TopologyDescription + +_IS_SYNC = False + +T = TypeVar("T") +TagSet = Mapping[str, Any] +TagSets = Sequence[TagSet] + + +class Selection: + """Input or output of a server selector function.""" + + @classmethod + def from_topology_description(cls, topology_description: TopologyDescription) -> Selection: + known_servers = topology_description.known_servers + primary = None + for sd in known_servers: + if sd.server_type == SERVER_TYPE.RSPrimary: + primary = sd + break + + return Selection( + topology_description, + topology_description.known_servers, + topology_description.common_wire_version, + primary, + ) + + def __init__( + self, + topology_description: TopologyDescription, + server_descriptions: list[ServerDescription], + common_wire_version: Optional[int], + primary: Optional[ServerDescription], + ): + self.topology_description = topology_description + self.server_descriptions = server_descriptions + self.primary = primary + self.common_wire_version = common_wire_version + + def with_server_descriptions(self, server_descriptions: list[ServerDescription]) -> Selection: + return Selection( + self.topology_description, server_descriptions, self.common_wire_version, self.primary + ) + + def secondary_with_max_last_write_date(self) -> Optional[ServerDescription]: + secondaries = secondary_server_selector(self) + if secondaries.server_descriptions: + return max( + secondaries.server_descriptions, key=lambda sd: cast(float, sd.last_write_date) + ) + return None + + @property + def primary_selection(self) -> Selection: + primaries = [self.primary] if self.primary else [] + return self.with_server_descriptions(primaries) + + @property + def heartbeat_frequency(self) -> int: + return self.topology_description.heartbeat_frequency + + @property + def topology_type(self) -> int: + return self.topology_description.topology_type + + def __bool__(self) -> bool: + return bool(self.server_descriptions) + + def __getitem__(self, item: int) -> ServerDescription: + return self.server_descriptions[item] + + +def any_server_selector(selection: T) -> T: + return selection + + +def readable_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.is_readable] + ) + + +def writable_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.is_writable] + ) + + +def secondary_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary] + ) + + +def arbiter_server_selector(selection: Selection) -> Selection: + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter] + ) + + +def writable_preferred_server_selector(selection: Selection) -> Selection: + """Like PrimaryPreferred but doesn't use tags or latency.""" + return writable_server_selector(selection) or secondary_server_selector(selection) + + +def apply_single_tag_set(tag_set: TagSet, selection: Selection) -> Selection: + """All servers matching one tag set. + + A tag set is a dict. A server matches if its tags are a superset: + A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}. + + The empty tag set {} matches any server. + """ + + def tags_match(server_tags: Mapping[str, Any]) -> bool: + for key, value in tag_set.items(): + if key not in server_tags or server_tags[key] != value: + return False + + return True + + return selection.with_server_descriptions( + [s for s in selection.server_descriptions if tags_match(s.tags)] + ) + + +def apply_tag_sets(tag_sets: TagSets, selection: Selection) -> Selection: + """All servers match a list of tag sets. + + tag_sets is a list of dicts. The empty tag set {} matches any server, + and may be provided at the end of the list as a fallback. So + [{'a': 'value'}, {}] expresses a preference for servers tagged + {'a': 'value'}, but accepts any server if none matches the first + preference. + """ + for tag_set in tag_sets: + with_tag_set = apply_single_tag_set(tag_set, selection) + if with_tag_set: + return with_tag_set + + return selection.with_server_descriptions([]) + + +def secondary_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: + """All near-enough secondaries matching the tag sets.""" + return apply_tag_sets(tag_sets, secondary_server_selector(selection)) + + +def member_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: + """All near-enough members matching the tag sets.""" + return apply_tag_sets(tag_sets, readable_server_selector(selection)) diff --git a/pymongo/asynchronous/settings.py b/pymongo/asynchronous/settings.py new file mode 100644 index 0000000000..f88235cf59 --- /dev/null +++ b/pymongo/asynchronous/settings.py @@ -0,0 +1,170 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent MongoClient's configuration.""" +from __future__ import annotations + +import threading +import traceback +from typing import Any, Collection, Optional, Type, Union + +from bson.objectid import ObjectId +from pymongo.asynchronous import common, monitor, pool +from pymongo.asynchronous.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT +from pymongo.asynchronous.pool import Pool, PoolOptions +from pymongo.asynchronous.server_description import ServerDescription +from pymongo.asynchronous.topology_description import TOPOLOGY_TYPE, _ServerSelector +from pymongo.errors import ConfigurationError + +_IS_SYNC = False + + +class TopologySettings: + def __init__( + self, + seeds: Optional[Collection[tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, + server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + ): + """Represent MongoClient's configuration. + + Take a list of (host, port) pairs and optional replica set name. + """ + if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: + raise ConfigurationError( + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) + + self._seeds: Collection[tuple[str, int]] = seeds or [("localhost", 27017)] + self._replica_set_name = replica_set_name + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition + self._local_threshold_ms = local_threshold_ms + self._server_selection_timeout = server_selection_timeout + self._server_selector = server_selector + self._fqdn = fqdn + self._heartbeat_frequency = heartbeat_frequency + self._direct = direct_connection + self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 + self._server_monitoring_mode = server_monitoring_mode + + self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = "".join(traceback.format_stack()) + + @property + def seeds(self) -> Collection[tuple[str, int]]: + """List of server addresses.""" + return self._seeds + + @property + def replica_set_name(self) -> Optional[str]: + return self._replica_set_name + + @property + def pool_class(self) -> Type[Pool]: + return self._pool_class + + @property + def pool_options(self) -> PoolOptions: + return self._pool_options + + @property + def monitor_class(self) -> Type[monitor.Monitor]: + return self._monitor_class + + @property + def condition_class(self) -> Type[threading.Condition]: + return self._condition_class + + @property + def local_threshold_ms(self) -> int: + return self._local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + return self._server_selection_timeout + + @property + def server_selector(self) -> Optional[_ServerSelector]: + return self._server_selector + + @property + def heartbeat_frequency(self) -> int: + return self._heartbeat_frequency + + @property + def fqdn(self) -> Optional[str]: + return self._fqdn + + @property + def direct(self) -> Optional[bool]: + """Connect directly to a single server, or use a set of servers? + + True if there is one seed and no replica_set_name. + """ + return self._direct + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + + @property + def srv_service_name(self) -> str: + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self) -> int: + """The srvMaxHosts.""" + return self._srv_max_hosts + + @property + def server_monitoring_mode(self) -> str: + """The serverMonitoringMode.""" + return self._server_monitoring_mode + + def get_topology_type(self) -> int: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: + return TOPOLOGY_TYPE.Single + elif self.replica_set_name is not None: + return TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + return TOPOLOGY_TYPE.Unknown + + def get_server_descriptions(self) -> dict[Union[tuple[str, int], Any], ServerDescription]: + """Initial dict of (address, ServerDescription) for all seeds.""" + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py new file mode 100644 index 0000000000..1a37bad966 --- /dev/null +++ b/pymongo/asynchronous/srv_resolver.py @@ -0,0 +1,149 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo.asynchronous.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from dns import resolver + +_IS_SYNC = False + + +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False + + +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + from dns import resolver + + if hasattr(resolver, "resolve"): + # dnspython >= 2 + return resolver.resolve(*args, **kwargs) + # dnspython 1.X + return resolver.query(*args, **kwargs) + + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) + + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): + self.__fqdn = fqdn + self.__srv = srv_service_name + self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT + self.__srv_max_hosts = srv_max_hosts or 0 + # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + + try: + self.__plist = self.__fqdn.split(".")[1:] + except Exception: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None + self.__slen = len(self.__plist) + if self.__slen < 2: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) + + def get_options(self) -> Optional[str]: + from dns import resolver + + try: + results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) + except (resolver.NoAnswer, resolver.NXDOMAIN): + # No TXT records + return None + except Exception as exc: + raise ConfigurationError(str(exc)) from None + if len(results) > 1: + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") + + def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: + try: + results = _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) + except Exception as exc: + if not encapsulate_errors: + # Raise the original error. + raise + # Else, raise all errors as ConfigurationError. + raise ConfigurationError(str(exc)) from None + return results + + def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: + results = self._resolve_uri(encapsulate_errors) + + # Construct address tuples + nodes = [ + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) for res in results + ] + + # Validate hosts + for node in nodes: + try: + nlist = node[0].lower().split(".")[1:][-self.__slen :] + except Exception: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None + if self.__plist != nlist: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) + return results, nodes + + def get_hosts(self) -> list[tuple[str, Any]]: + _, nodes = self._get_srv_response_and_hosts(True) + return nodes + + def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: + results, nodes = self._get_srv_response_and_hosts(False) + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py new file mode 100644 index 0000000000..df6dd903a7 --- /dev/null +++ b/pymongo/asynchronous/topology.py @@ -0,0 +1,1030 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal class to monitor a topology of one or more servers.""" + +from __future__ import annotations + +import logging +import os +import queue +import random +import sys +import time +import warnings +import weakref +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast + +from pymongo import _csot, helpers_constants +from pymongo.asynchronous import common, periodic_executor +from pymongo.asynchronous.client_session import _ServerSession, _ServerSessionPool +from pymongo.asynchronous.hello import Hello +from pymongo.asynchronous.logger import ( + _SERVER_SELECTION_LOGGER, + _debug_log, + _ServerSelectionStatusMessage, +) +from pymongo.asynchronous.monitor import SrvMonitor +from pymongo.asynchronous.pool import Pool, PoolOptions +from pymongo.asynchronous.server import Server +from pymongo.asynchronous.server_description import ServerDescription +from pymongo.asynchronous.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + secondary_server_selector, + writable_server_selector, +) +from pymongo.asynchronous.topology_description import ( + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) +from pymongo.errors import ( + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) +from pymongo.lock import _ACondition, _ALock, _create_lock + +if TYPE_CHECKING: + from bson import ObjectId + from pymongo.asynchronous.settings import TopologySettings + from pymongo.asynchronous.typings import ClusterTime, _Address + +_IS_SYNC = False + +_pymongo_dir = str(Path(__file__).parent) + + +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: + q = queue_ref() + if not q: + return False # Cancel PeriodicExecutor. + + while True: + try: + event = q.get_nowait() + except queue.Empty: + break + else: + fn, args = event + fn(*args) + + return True # Continue PeriodicExecutor. + + +class Topology: + """Monitor a topology of one or more servers.""" + + def __init__(self, topology_settings: TopologySettings): + self._topology_id = topology_settings._topology_id + self._listeners = topology_settings._pool_options._event_listeners + self._publish_server = self._listeners is not None and self._listeners.enabled_for_server + self._publish_tp = self._listeners is not None and self._listeners.enabled_for_topology + + # Create events queue if there are publishers. + self._events = None + self.__events_executor: Any = None + + if self._publish_server or self._publish_tp: + self._events = queue.Queue(maxsize=100) + + if self._publish_tp: + assert self._events is not None + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) + self._settings = topology_settings + topology_description = TopologyDescription( + topology_settings.get_topology_type(), + topology_settings.get_server_descriptions(), + topology_settings.replica_set_name, + None, + None, + topology_settings, + ) + + self._description = topology_description + if self._publish_tp: + assert self._events is not None + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) + + for seed in topology_settings.seeds: + if self._publish_server: + assert self._events is not None + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + + # Store the seed list to help diagnose errors in _error_message(). + self._seed_addresses = list(topology_description.server_descriptions()) + self._opened = False + self._closed = False + self._lock = _ALock(_create_lock()) + self._condition = _ACondition(self._settings.condition_class(self._lock)) # type: ignore[arg-type] + self._servers: dict[_Address, Server] = {} + self._pid: Optional[int] = None + self._max_cluster_time: Optional[ClusterTime] = None + self._session_pool = _ServerSessionPool() + + if self._publish_server or self._publish_tp: + assert self._events is not None + weak: weakref.ReferenceType[queue.Queue] + + async def target() -> bool: + return process_events_queue(weak) + + executor = periodic_executor.PeriodicExecutor( + interval=common.EVENTS_QUEUE_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_events_thread", + ) + + # We strongly reference the executor and it weakly references + # the queue via this closure. When the topology is freed, stop + # the executor soon. + weak = weakref.ref(self._events, executor.close) + self.__events_executor = executor + executor.open() + + self._srv_monitor = None + if self._settings.fqdn is not None and not self._settings.load_balanced: + self._srv_monitor = SrvMonitor(self, self._settings) + + async def open(self) -> None: + """Start monitoring, or restart after a fork. + + No effect if called multiple times. + + .. warning:: Topology is shared among multiple threads and is protected + by mutual exclusion. Using Topology from a process other than the one + that initialized it will emit a warning and may result in deadlock. To + prevent this from happening, AsyncMongoClient must be created after any + forking. + + """ + pid = os.getpid() + if self._pid is None: + self._pid = pid + elif pid != self._pid: + self._pid = pid + if sys.version_info[:2] >= (3, 12): + kwargs = {"skip_file_prefixes": (_pymongo_dir,)} + else: + kwargs = {"stacklevel": 6} + # Ignore B028 warning for missing stacklevel. + warnings.warn( # type: ignore[call-overload] # noqa: B028 + "AsyncMongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://pymongo.readthedocs.io/en/stable/faq.html#" + "is-pymongo-fork-safe", + **kwargs, + ) + async with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + await server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() + + async with self._lock: + await self._ensure_opened() + + def get_server_selection_timeout(self) -> float: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + + async def select_servers( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + operation_id: Optional[int] = None, + ) -> list[Server]: + """Return a list of Servers matching selector, or time out. + + :param selector: function that takes a list of Servers and returns + a subset of them. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value common.SERVER_SELECTION_TIMEOUT + is used. + :param address: optional server address to select. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + if server_selection_timeout is None: + server_timeout = self.get_server_selection_timeout() + else: + server_timeout = server_selection_timeout + + async with self._lock: + server_descriptions = await self._select_servers_loop( + selector, server_timeout, operation, operation_id, address + ) + + return [ + cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions + ] + + async def _select_servers_loop( + self, + selector: Callable[[Selection], Selection], + timeout: float, + operation: str, + operation_id: Optional[int], + address: Optional[_Address], + ) -> list[ServerDescription]: + """select_servers() guts. Hold the lock when calling this.""" + now = time.monotonic() + end_time = now + timeout + logged_waiting = False + + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.STARTED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + ) + + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + while not server_descriptions: + # No suitable servers. + if timeout == 0 or now > end_time: + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.FAILED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + failure=self._error_message(selector), + ) + raise ServerSelectionTimeoutError( + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" + ) + + if not logged_waiting: + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.WAITING, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + remainingTimeMS=int(end_time - time.monotonic()), + ) + logged_waiting = True + + await self._ensure_opened() + self._request_check_all() + + # Release the lock and wait for the topology description to + # change, or for a timeout. We won't miss any changes that + # came after our most recent apply_selector call, since we've + # held the lock until now. + await self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) + self._description.check_compatible() + now = time.monotonic() + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + self._description.check_compatible() + return server_descriptions + + async def _select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + servers = await self.select_servers( + selector, operation, server_selection_timeout, address, operation_id + ) + servers = _filter_servers(servers, deprioritized_servers) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 + + async def select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Like select_servers, but choose a random server if several match.""" + server = await self._select_server( + selector, + operation, + server_selection_timeout, + address, + deprioritized_servers, + operation_id=operation_id, + ) + if _csot.get_timeout(): + _csot.set_rtt(server.description.min_round_trip_time) + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.SUCCEEDED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + serverHost=server.description.address[0], + serverPort=server.description.address[1], + ) + return server + + async def select_server_by_address( + self, + address: _Address, + operation: str, + server_selection_timeout: Optional[int] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Return a Server for "address", reconnecting if necessary. + + If the server's type is not known, request an immediate check of all + servers. Time out after "server_selection_timeout" if the server + cannot be reached. + + :param address: A (host, port) pair. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value + common.SERVER_SELECTION_TIMEOUT is used. + :param operation_id: The unique id of the current operation being performed. Defaults to None if not provided. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + return await self.select_server( + any_server_selector, + operation, + server_selection_timeout, + address, + operation_id=operation_id, + ) + + async def _process_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription on an opened topology. + + Hold the lock when calling this. + """ + td_old = self._description + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale hello response. Ignore it. + return + + new_td = updated_topology_description(self._description, server_description) + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): + server = self._servers.get(server_description.address) + if server: + await server.pool.ready() + + suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description + if self._publish_server and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) + + self._description = new_td + await self._update_servers() + self._receive_cluster_time_no_lock(server_description.cluster_time) + + if self._publish_tp and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + + # Shutdown SRV polling for unsupported cluster types. + # This is only applicable if the old topology was Unknown, and the + # new one is something other than Unknown or Sharded. + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): + await self._srv_monitor.close() + + # Clear the pool from a failed heartbeat. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + await server.pool.reset(interrupt_connections=interrupt_connections) + + # Wake waiters in select_servers(). + self._condition.notify_all() + + async def on_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription after an hello call completes.""" + # We do no I/O holding the lock. + async with self._lock: + # Monitors may continue working on hello calls for some time + # after a call to Topology.close, so this method may be called at + # any time. Ensure the topology is open before processing the + # change. + # Any monitored server was definitely in the topology description + # once. Check if it's still in the description or if some state- + # change removed it. E.g., we got a host list from the primary + # that didn't include this server. + if self._opened and self._description.has_server(server_description.address): + await self._process_change(server_description, reset_pool, interrupt_connections) + + async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new seedlist on an opened topology. + Hold the lock when calling this. + """ + td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return + self._description = _updated_topology_description_srv_polling(self._description, seedlist) + + await self._update_servers() + + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + + async def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new list of nodes obtained from scanning SRV records.""" + # We do no I/O holding the lock. + async with self._lock: + if self._opened: + await self._process_srv_update(seedlist) + + def get_server_by_address(self, address: _Address) -> Optional[Server]: + """Get a Server or None. + + Returns the current version of the server immediately, even if it's + Unknown or absent from the topology. Only use this in unittests. + In driver code, use select_server_by_address, since then you're + assured a recent view of the server's type and wire protocol version. + """ + return self._servers.get(address) + + def has_server(self, address: _Address) -> bool: + return address in self._servers + + async def get_primary(self) -> Optional[_Address]: + """Return primary's address or None.""" + # Implemented here in Topology instead of AsyncMongoClient, so it can lock. + async with self._lock: + topology_type = self._description.topology_type + if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: + return None + + return writable_server_selector(self._new_selection())[0].address + + async def _get_replica_set_members( + self, selector: Callable[[Selection], Selection] + ) -> set[_Address]: + """Return set of replica set member addresses.""" + # Implemented here in Topology instead of AsyncMongoClient, so it can lock. + async with self._lock: + topology_type = self._description.topology_type + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): + return set() + + return {sd.address for sd in iter(selector(self._new_selection()))} + + async def get_secondaries(self) -> set[_Address]: + """Return set of secondary addresses.""" + return await self._get_replica_set_members(secondary_server_selector) + + async def get_arbiters(self) -> set[_Address]: + """Return set of arbiter addresses.""" + return await self._get_replica_set_members(arbiter_server_selector) + + def max_cluster_time(self) -> Optional[ClusterTime]: + """Return a document, the highest seen $clusterTime.""" + return self._max_cluster_time + + def _receive_cluster_time_no_lock(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + # Driver Sessions Spec: "Whenever a driver receives a cluster time from + # a server it MUST compare it to the current highest seen cluster time + # for the deployment. If the new cluster time is higher than the + # highest seen cluster time it MUST become the new highest seen cluster + # time. Two cluster times are compared using only the BsonTimestamp + # value of the clusterTime embedded field." + if cluster_time: + # ">" uses bson.timestamp.Timestamp's comparison operator. + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): + self._max_cluster_time = cluster_time + + async def receive_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + async with self._lock: + self._receive_cluster_time_no_lock(cluster_time) + + async def request_check_all(self, wait_time: int = 5) -> None: + """Wake all monitors, wait for at least one to check its server.""" + async with self._lock: + self._request_check_all() + await self._condition.wait(wait_time) + + def data_bearing_servers(self) -> list[ServerDescription]: + """Return a list of all data-bearing servers. + + This includes any server that might be selected for an operation. + """ + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers + + async def update_pool(self) -> None: + # Remove any stale sockets and add new sockets if pool is too small. + servers = [] + async with self._lock: + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.gen.get_overall())) + + for server, generation in servers: + try: + await server.pool.remove_stale_sockets(generation) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False, None) + await self.handle_error(server.description.address, ctx) + raise + + async def close(self) -> None: + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. + """ + async with self._lock: + for server in self._servers.values(): + await server.close() + + # Mark all servers Unknown. + self._description = self._description.reset() + for address, sd in self._description.server_descriptions().items(): + if address in self._servers: + self._servers[address].description = sd + + # Stop SRV polling thread. + if self._srv_monitor: + await self._srv_monitor.close() + + self._opened = False + self._closed = True + + # Publish only after releasing the lock. + if self._publish_tp: + assert self._events is not None + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if self._publish_server or self._publish_tp: + self.__events_executor.close() + + @property + def description(self) -> TopologyDescription: + return self._description + + async def pop_all_sessions(self) -> list[_ServerSession]: + """Pop all session ids from the pool.""" + async with self._lock: + return self._session_pool.pop_all() + + async def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + """Start or resume a server session, or raise ConfigurationError.""" + async with self._lock: + return self._session_pool.get_server_session(session_timeout_minutes) + + async def return_server_session(self, server_session: _ServerSession, lock: bool) -> None: + if lock: + async with self._lock: + self._session_pool.return_server_session( + server_session, self._description.logical_session_timeout_minutes + ) + else: + # Called from a __del__ method, can't use a lock. + self._session_pool.return_server_session_no_lock(server_session) + + def _new_selection(self) -> Selection: + """A Selection object, initially including all known servers. + + Hold the lock when calling this. + """ + return Selection.from_topology_description(self._description) + + async def _ensure_opened(self) -> None: + """Start monitors, or restart after a fork. + + Hold the lock when calling this. + """ + if self._closed: + raise InvalidOperation("Cannot use AsyncMongoClient after close") + + if not self._opened: + self._opened = True + await self._update_servers() + + # Start or restart the events publishing thread. + if self._publish_tp or self._publish_server: + self.__events_executor.open() + + # Start the SRV polling thread. + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): + self._srv_monitor.open() + + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + await self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) + + # Ensure that the monitors are open. + for server in self._servers.values(): + await server.open() + + def _is_stale_error(self, address: _Address, err_ctx: _ErrorContext) -> bool: + server = self._servers.get(address) + if server is None: + # Another thread removed this server from the topology. + return True + + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, "details"): + if isinstance(error.details, dict): + error_tv = error.details.get("topologyVersion") + + return _is_stale_error_topology_version(cur_tv, error_tv) + + async def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + + if isinstance(error, NetworkTimeout) and err_ctx.completed_handshake: + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif isinstance(error, WriteError): + # Ignore writeErrors. + return + elif isinstance(error, (NotPrimaryError, OperationFailure)): + # As per the SDAM spec if: + # - the server sees a "not primary" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + if hasattr(error, "code"): + err_code = error.code + else: + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get("code", default) # type: ignore[union-attr] + if err_code in helpers_constants._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers_constants._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + await server.reset(service_id) + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + await server.reset(service_id) + elif isinstance(error, ConnectionFailure): + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + await server.reset(service_id) + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the hello check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() + + async def handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + """Handle an application error. + + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + async with self._lock: + await self._handle_error(address, err_ctx) + + def _request_check_all(self) -> None: + """Wake all monitors. Hold the lock when calling this.""" + for server in self._servers.values(): + server.request_check() + + async def _update_servers(self) -> None: + """Sync our Servers from TopologyDescription.server_descriptions. + + Hold the lock while calling this. + """ + for address, sd in self._description.server_descriptions().items(): + if address not in self._servers: + monitor = self._settings.monitor_class( + server_description=sd, + topology=self, + pool=self._create_pool_for_monitor(address), + topology_settings=self._settings, + ) + + weak = None + if self._publish_server and self._events is not None: + weak = weakref.ref(self._events) + server = Server( + server_description=sd, + pool=self._create_pool_for_server(address), + monitor=monitor, + topology_id=self._topology_id, + listeners=self._listeners, + events=weak, + ) + + self._servers[address] = server + await server.open() + else: + # Cache old is_writable value. + was_writable = self._servers[address].description.is_writable + # Update server description. + self._servers[address].description = sd + # Update is_writable value of the pool, if it changed. + if was_writable != sd.is_writable: + await self._servers[address].pool.update_is_writable(sd.is_writable) + + for address, server in list(self._servers.items()): + if not self._description.has_server(address): + await server.close() + self._servers.pop(address) + + def _create_pool_for_server(self, address: _Address) -> Pool: + return self._settings.pool_class( + address, self._settings.pool_options, client_id=self._topology_id + ) + + def _create_pool_for_monitor(self, address: _Address) -> Pool: + options = self._settings.pool_options + + # According to the Server Discovery And Monitoring Spec, monitors use + # connect_timeout for both connect_timeout and socket_timeout. The + # pool only has one socket so maxPoolSize and so on aren't needed. + monitor_pool_options = PoolOptions( + connect_timeout=options.connect_timeout, + socket_timeout=options.connect_timeout, + ssl_context=options._ssl_context, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, + event_listeners=options._event_listeners, + appname=options.appname, + driver=options.driver, + pause_enabled=False, + server_api=options.server_api, + ) + + return self._settings.pool_class( + address, monitor_pool_options, handshake=False, client_id=self._topology_id + ) + + def _error_message(self, selector: Callable[[Selection], Selection]) -> str: + """Format an error message if server selection fails. + + Hold the lock when calling this. + """ + is_replica_set = self._description.topology_type in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) + + if is_replica_set: + server_plural = "replica set members" + elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: + server_plural = "mongoses" + else: + server_plural = "servers" + + if self._description.known_servers: + # We've connected, but no servers match the selector. + if selector is writable_server_selector: + if is_replica_set: + return "No primary available for writes" + else: + return "No %s available for writes" % server_plural + else: + return f'No {server_plural} match selector "{selector}"' + else: + addresses = list(self._description.server_descriptions()) + servers = list(self._description.server_descriptions().values()) + if not servers: + if is_replica_set: + # We removed all servers because of the wrong setName? + return 'No {} available for replica set name "{}"'.format( + server_plural, + self._settings.replica_set_name, + ) + else: + return "No %s available" % server_plural + + # 1 or more servers, all Unknown. Are they unknown for one reason? + error = servers[0].error + same = all(server.error == error for server in servers[1:]) + if same: + if error is None: + # We're still discovering. + return "No %s found yet" % server_plural + + if is_replica_set and not set(addresses).intersection(self._seed_addresses): + # We replaced our seeds with new hosts but can't reach any. + return ( + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) + + return str(error) + else: + return ",".join(str(server.error) for server in servers if server.error) + + def __repr__(self) -> str: + msg = "" + if not self._opened: + msg = "CLOSED " + return f"<{self.__class__.__name__} {msg}{self._description!r}>" + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + """The properties to use for AsyncMongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self) -> int: + return hash(self.eq_props()) + + +class _ErrorContext: + """An error with context for SDAM error handling.""" + + def __init__( + self, + error: BaseException, + max_wire_version: int, + sock_generation: int, + completed_handshake: bool, + service_id: Optional[ObjectId], + ): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + self.completed_handshake = completed_handshake + self.service_id = service_id + + +def _is_stale_error_topology_version( + current_tv: Optional[Mapping[str, Any]], error_tv: Optional[Mapping[str, Any]] +) -> bool: + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv["processId"] != error_tv["processId"]: + return False + return current_tv["counter"] >= error_tv["counter"] + + +def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDescription) -> bool: + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv["processId"] != new_tv["processId"]: + return False + return current_tv["counter"] > new_tv["counter"] + + +def _filter_servers( + candidates: list[Server], deprioritized_servers: Optional[list[Server]] = None +) -> list[Server]: + """Filter out deprioritized servers from a list of server candidates.""" + if not deprioritized_servers: + return candidates + + filtered = [server for server in candidates if server not in deprioritized_servers] + + # If not possible to pick a prioritized server, return the original list + return filtered or candidates diff --git a/pymongo/asynchronous/topology_description.py b/pymongo/asynchronous/topology_description.py new file mode 100644 index 0000000000..ce7aff7f51 --- /dev/null +++ b/pymongo/asynchronous/topology_description.py @@ -0,0 +1,678 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent a deployment of MongoDB servers.""" +from __future__ import annotations + +from random import sample +from typing import ( + Any, + Callable, + List, + Mapping, + MutableMapping, + NamedTuple, + Optional, + cast, +) + +from bson.min_key import MinKey +from bson.objectid import ObjectId +from pymongo.asynchronous import common +from pymongo.asynchronous.read_preferences import ReadPreference, _AggWritePref, _ServerMode +from pymongo.asynchronous.server_description import ServerDescription +from pymongo.asynchronous.server_selectors import Selection +from pymongo.asynchronous.typings import _Address +from pymongo.errors import ConfigurationError +from pymongo.server_type import SERVER_TYPE + +_IS_SYNC = False + + +# Enumeration for various kinds of MongoDB cluster topologies. +class _TopologyType(NamedTuple): + Single: int + ReplicaSetNoPrimary: int + ReplicaSetWithPrimary: int + Sharded: int + Unknown: int + LoadBalanced: int + + +TOPOLOGY_TYPE = _TopologyType(*range(6)) + +# Topologies compatible with SRV record polling. +SRV_POLLING_TOPOLOGIES: tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) + + +_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] + + +class TopologyDescription: + def __init__( + self, + topology_type: int, + server_descriptions: dict[_Address, ServerDescription], + replica_set_name: Optional[str], + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], + topology_settings: Any, + ) -> None: + """Representation of a deployment of MongoDB servers. + + :param topology_type: initial type + :param server_descriptions: dict of (address, ServerDescription) for + all seeds + :param replica_set_name: replica set name or None + :param max_set_version: greatest setVersion seen from a primary, or None + :param max_election_id: greatest electionId seen from a primary, or None + :param topology_settings: a TopologySettings + """ + self._topology_type = topology_type + self._replica_set_name = replica_set_name + self._server_descriptions = server_descriptions + self._max_set_version = max_set_version + self._max_election_id = max_election_id + + # The heartbeat_frequency is used in staleness estimates. + self._topology_settings = topology_settings + + # Is PyMongo compatible with all servers' wire protocols? + self._incompatible_err = None + if self._topology_type != TOPOLOGY_TYPE.LoadBalanced: + self._init_incompatible_err() + + # Server Discovery And Monitoring Spec: Whenever a client updates the + # TopologyDescription from an hello response, it MUST set + # TopologyDescription.logicalSessionTimeoutMinutes to the smallest + # logicalSessionTimeoutMinutes value among ServerDescriptions of all + # data-bearing server types. If any have a null + # logicalSessionTimeoutMinutes, then + # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. + readable_servers = self.readable_servers + if not readable_servers: + self._ls_timeout_minutes = None + elif any(s.logical_session_timeout_minutes is None for s in readable_servers): + self._ls_timeout_minutes = None + else: + self._ls_timeout_minutes = min( # type: ignore[type-var] + s.logical_session_timeout_minutes for s in readable_servers + ) + + def _init_incompatible_err(self) -> None: + """Internal compatibility check for non-load balanced topologies.""" + for s in self._server_descriptions.values(): + if not s.is_server_type_known: + continue + + # s.min/max_wire_version is the server's wire protocol. + # MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports. + server_too_new = ( + # Server too new. + s.min_wire_version is not None + and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION + ) + + server_too_old = ( + # Server too old. + s.max_wire_version is not None + and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION + ) + + if server_too_new: + self._incompatible_err = ( + "Server at %s:%d requires wire version %d, but this " # type: ignore + "version of PyMongo only supports up to %d." + % ( + s.address[0], + s.address[1] or 0, + s.min_wire_version, + common.MAX_SUPPORTED_WIRE_VERSION, + ) + ) + + elif server_too_old: + self._incompatible_err = ( + "Server at %s:%d reports wire version %d, but this " # type: ignore + "version of PyMongo requires at least %d (MongoDB %s)." + % ( + s.address[0], + s.address[1] or 0, + s.max_wire_version, + common.MIN_SUPPORTED_WIRE_VERSION, + common.MIN_SUPPORTED_SERVER_VERSION, + ) + ) + + break + + def check_compatible(self) -> None: + """Raise ConfigurationError if any server is incompatible. + + A server is incompatible if its wire protocol version range does not + overlap with PyMongo's. + """ + if self._incompatible_err: + raise ConfigurationError(self._incompatible_err) + + def has_server(self, address: _Address) -> bool: + return address in self._server_descriptions + + def reset_server(self, address: _Address) -> TopologyDescription: + """A copy of this description, with one server marked Unknown.""" + unknown_sd = self._server_descriptions[address].to_unknown() + return updated_topology_description(self, unknown_sd) + + def reset(self) -> TopologyDescription: + """A copy of this description, with all servers marked Unknown.""" + if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: + topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + topology_type = self._topology_type + + # The default ServerDescription's type is Unknown. + sds = {address: ServerDescription(address) for address in self._server_descriptions} + + return TopologyDescription( + topology_type, + sds, + self._replica_set_name, + self._max_set_version, + self._max_election_id, + self._topology_settings, + ) + + def server_descriptions(self) -> dict[_Address, ServerDescription]: + """dict of (address, + :class:`~pymongo.server_description.ServerDescription`). + """ + return self._server_descriptions.copy() + + @property + def topology_type(self) -> int: + """The type of this topology.""" + return self._topology_type + + @property + def topology_type_name(self) -> str: + """The topology type as a human readable string. + + .. versionadded:: 3.4 + """ + return TOPOLOGY_TYPE._fields[self._topology_type] + + @property + def replica_set_name(self) -> Optional[str]: + """The replica set name.""" + return self._replica_set_name + + @property + def max_set_version(self) -> Optional[int]: + """Greatest setVersion seen from a primary, or None.""" + return self._max_set_version + + @property + def max_election_id(self) -> Optional[ObjectId]: + """Greatest electionId seen from a primary, or None.""" + return self._max_election_id + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + """Minimum logical session timeout, or None.""" + return self._ls_timeout_minutes + + @property + def known_servers(self) -> list[ServerDescription]: + """List of Servers of types besides Unknown.""" + return [s for s in self._server_descriptions.values() if s.is_server_type_known] + + @property + def has_known_servers(self) -> bool: + """Whether there are any Servers of types besides Unknown.""" + return any(s for s in self._server_descriptions.values() if s.is_server_type_known) + + @property + def readable_servers(self) -> list[ServerDescription]: + """List of readable Servers.""" + return [s for s in self._server_descriptions.values() if s.is_readable] + + @property + def common_wire_version(self) -> Optional[int]: + """Minimum of all servers' max wire versions, or None.""" + servers = self.known_servers + if servers: + return min(s.max_wire_version for s in self.known_servers) + + return None + + @property + def heartbeat_frequency(self) -> int: + return self._topology_settings.heartbeat_frequency + + @property + def srv_max_hosts(self) -> int: + return self._topology_settings._srv_max_hosts + + def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: + if not selection: + return [] + round_trip_times: list[float] = [] + for server in selection.server_descriptions: + if server.round_trip_time is None: + config_err_msg = f"round_trip_time for server {server.address} is unexpectedly None: {self}, servers: {selection.server_descriptions}" + raise ConfigurationError(config_err_msg) + round_trip_times.append(server.round_trip_time) + # Round trip time in seconds. + fastest = min(round_trip_times) + threshold = self._topology_settings.local_threshold_ms / 1000.0 + return [ + s + for s in selection.server_descriptions + if (cast(float, s.round_trip_time) - fastest) <= threshold + ] + + def apply_selector( + self, + selector: Any, + address: Optional[_Address] = None, + custom_selector: Optional[_ServerSelector] = None, + ) -> list[ServerDescription]: + """List of servers matching the provided selector(s). + + :param selector: a callable that takes a Selection as input and returns + a Selection as output. For example, an instance of a read + preference from :mod:`~pymongo.read_preferences`. + :param address: A server address to select. + :param custom_selector: A callable that augments server + selection rules. Accepts a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + + .. versionadded:: 3.4 + """ + if getattr(selector, "min_wire_version", 0): + common_wv = self.common_wire_version + if common_wv and common_wv < selector.min_wire_version: + raise ConfigurationError( + "%s requires min wire version %d, but topology's min" + " wire version is %d" % (selector, selector.min_wire_version, common_wv) + ) + + if isinstance(selector, _AggWritePref): + selector.selection_hook(self) + + if self.topology_type == TOPOLOGY_TYPE.Unknown: + return [] + elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): + # Ignore selectors for standalone and load balancer mode. + return self.known_servers + if address: + # Ignore selectors when explicit address is requested. + description = self.server_descriptions().get(address) + return [description] if description else [] + + selection = Selection.from_topology_description(self) + # Ignore read preference for sharded clusters. + if self.topology_type != TOPOLOGY_TYPE.Sharded: + selection = selector(selection) + + # Apply custom selector followed by localThresholdMS. + if custom_selector is not None and selection: + selection = selection.with_server_descriptions( + custom_selector(selection.server_descriptions) + ) + return self._apply_local_threshold(selection) + + def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: + """Does this topology have any readable servers available matching the + given read preference? + + :param read_preference: an instance of a read preference from + :mod:`~pymongo.read_preferences`. Defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + + .. note:: When connected directly to a single server this method + always returns ``True``. + + .. versionadded:: 3.4 + """ + common.validate_read_preference("read_preference", read_preference) + return any(self.apply_selector(read_preference)) + + def has_writable_server(self) -> bool: + """Does this topology have a writable server available? + + .. note:: When connected directly to a single server this method + always returns ``True``. + + .. versionadded:: 3.4 + """ + return self.has_readable_server(ReadPreference.PRIMARY) + + def __repr__(self) -> str: + # Sort the servers by address. + servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( + self.__class__.__name__, + self._topology_settings._topology_id, + self.topology_type_name, + servers, + ) + + +# If topology type is Unknown and we receive a hello response, what should +# the new topology type be? +_SERVER_TYPE_TO_TOPOLOGY_TYPE = { + SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded, + SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary, + SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + # Note: SERVER_TYPE.LoadBalancer and Unknown are intentionally left out. +} + + +def updated_topology_description( + topology_description: TopologyDescription, server_description: ServerDescription +) -> TopologyDescription: + """Return an updated copy of a TopologyDescription. + + :param topology_description: the current TopologyDescription + :param server_description: a new ServerDescription that resulted from + a hello call + + Called after attempting (successfully or not) to call hello on the + server at server_description.address. Does not modify topology_description. + """ + address = server_description.address + + # These values will be updated, if necessary, to form the new + # TopologyDescription. + topology_type = topology_description.topology_type + set_name = topology_description.replica_set_name + max_set_version = topology_description.max_set_version + max_election_id = topology_description.max_election_id + server_type = server_description.server_type + + # Don't mutate the original dict of server descriptions; copy it. + sds = topology_description.server_descriptions() + + # Replace this server's description with the new one. + sds[address] = server_description + + if topology_type == TOPOLOGY_TYPE.Single: + # Set server type to Unknown if replica set name does not match. + if set_name is not None and set_name != server_description.replica_set_name: + error = ConfigurationError( + "client is configured to connect to a replica set named " + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) + ) + sds[address] = server_description.to_unknown(error=error) + # Single type never changes. + return TopologyDescription( + TOPOLOGY_TYPE.Single, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) + + if topology_type == TOPOLOGY_TYPE.Unknown: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): + if len(topology_description._topology_settings.seeds) == 1: + topology_type = TOPOLOGY_TYPE.Single + else: + # Remove standalone from Topology when given multiple seeds. + sds.pop(address) + elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): + topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] + + if topology_type == TOPOLOGY_TYPE.Sharded: + if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown): + sds.pop(address) + + elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): + sds.pop(address) + + elif server_type == SERVER_TYPE.RSPrimary: + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type, set_name = _update_rs_no_primary_from_member( + sds, set_name, server_description + ) + + elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): + sds.pop(address) + topology_type = _check_has_primary(sds) + + elif server_type == SERVER_TYPE.RSPrimary: + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) + + else: + # Server type is Unknown or RSGhost: did we just lose the primary? + topology_type = _check_has_primary(sds) + + # Return updated copy. + return TopologyDescription( + topology_type, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) + + +def _updated_topology_description_srv_polling( + topology_description: TopologyDescription, seedlist: list[tuple[str, Any]] +) -> TopologyDescription: + """Return an updated copy of a TopologyDescription. + + :param topology_description: the current TopologyDescription + :param seedlist: a list of new seeds new ServerDescription that resulted from + a hello call + """ + assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES + # Create a copy of the server descriptions. + sds = topology_description.server_descriptions() + + # If seeds haven't changed, don't do anything. + if set(sds.keys()) == set(seedlist): + return topology_description + + # Remove SDs corresponding to servers no longer part of the SRV record. + for address in list(sds.keys()): + if address not in seedlist: + sds.pop(address) + + if topology_description.srv_max_hosts != 0: + new_hosts = set(seedlist) - set(sds.keys()) + n_to_add = topology_description.srv_max_hosts - len(sds) + if n_to_add > 0: + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) + else: + seedlist = [] + # Add SDs corresponding to servers recently added to the SRV record. + for address in seedlist: + if address not in sds: + sds[address] = ServerDescription(address) + return TopologyDescription( + topology_description.topology_type, + sds, + topology_description.replica_set_name, + topology_description.max_set_version, + topology_description.max_election_id, + topology_description._topology_settings, + ) + + +def _update_rs_from_primary( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], +) -> tuple[int, Optional[str], Optional[int], Optional[ObjectId]]: + """Update topology description from a primary's hello response. + + Pass in a dict of ServerDescriptions, current replica set name, the + ServerDescription we are processing, and the TopologyDescription's + max_set_version and max_election_id if any. + + Returns (new topology type, new replica_set_name, new max_set_version, + new max_election_id). + """ + if replica_set_name is None: + replica_set_name = server_description.replica_set_name + + elif replica_set_name != server_description.replica_set_name: + # We found a primary but it doesn't have the replica_set_name + # provided by the user. + sds.pop(server_description.address) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple: tuple = (server_description.set_version, server_description.election_id) + max_election_tuple: tuple = (max_set_version, max_election_id) + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version + + # We've heard from the primary. Is it the same primary as before? + for server in sds.values(): + if ( + server.server_type is SERVER_TYPE.RSPrimary + and server.address != server_description.address + ): + # Reset old primary's type to Unknown. + sds[server.address] = server.to_unknown() + + # There can be only one prior primary. + break + + # Discover new hosts from this primary's response. + for new_address in server_description.all_hosts: + if new_address not in sds: + sds[new_address] = ServerDescription(new_address) + + # Remove hosts not in the response. + for addr in set(sds) - server_description.all_hosts: + sds.pop(addr) + + # If the host list differs from the seed list, we may not have a primary + # after all. + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) + + +def _update_rs_with_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> int: + """RS with known primary. Process a response from a non-primary. + + Pass in a dict of ServerDescriptions, current replica set name, and the + ServerDescription we are processing. + + Returns new topology type. + """ + assert replica_set_name is not None + + if replica_set_name != server_description.replica_set_name: + sds.pop(server_description.address) + elif server_description.me and server_description.address != server_description.me: + sds.pop(server_description.address) + + # Had this member been the primary? + return _check_has_primary(sds) + + +def _update_rs_no_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> tuple[int, Optional[str]]: + """RS without known primary. Update from a non-primary's response. + + Pass in a dict of ServerDescriptions, current replica set name, and the + ServerDescription we are processing. + + Returns (new topology type, new replica_set_name). + """ + topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary + if replica_set_name is None: + replica_set_name = server_description.replica_set_name + + elif replica_set_name != server_description.replica_set_name: + sds.pop(server_description.address) + return topology_type, replica_set_name + + # This isn't the primary's response, so don't remove any servers + # it doesn't report. Only add new servers. + for address in server_description.all_hosts: + if address not in sds: + sds[address] = ServerDescription(address) + + if server_description.me and server_description.address != server_description.me: + sds.pop(server_description.address) + + return topology_type, replica_set_name + + +def _check_has_primary(sds: Mapping[_Address, ServerDescription]) -> int: + """Current topology type is ReplicaSetWithPrimary. Is primary still known? + + Pass in a dict of ServerDescriptions. + + Returns new topology type. + """ + for s in sds.values(): + if s.server_type == SERVER_TYPE.RSPrimary: + return TOPOLOGY_TYPE.ReplicaSetWithPrimary + else: # noqa: PLW0120 + return TOPOLOGY_TYPE.ReplicaSetNoPrimary diff --git a/pymongo/asynchronous/typings.py b/pymongo/asynchronous/typings.py new file mode 100644 index 0000000000..508c5b6dea --- /dev/null +++ b/pymongo/asynchronous/typings.py @@ -0,0 +1,61 @@ +# Copyright 2022-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by PyMongo""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +from bson.typings import _DocumentOut, _DocumentType, _DocumentTypeArg + +if TYPE_CHECKING: + from pymongo.asynchronous.collation import Collation + +_IS_SYNC = False + +# Common Shared Types. +_Address = Tuple[str, Optional[int]] +_CollationIn = Union[Mapping[str, Any], "Collation"] +_Pipeline = Sequence[Mapping[str, Any]] +ClusterTime = Mapping[str, Any] + +_T = TypeVar("_T") + + +def strip_optional(elem: Optional[_T]) -> _T: + """This function is to allow us to cast all of the elements of an iterator from Optional[_T] to _T + while inside a list comprehension. + """ + assert elem is not None + return elem + + +__all__ = [ + "_DocumentOut", + "_DocumentType", + "_DocumentTypeArg", + "_Address", + "_CollationIn", + "_Pipeline", + "strip_optional", +] diff --git a/pymongo/asynchronous/uri_parser.py b/pymongo/asynchronous/uri_parser.py new file mode 100644 index 0000000000..b5fde6c30c --- /dev/null +++ b/pymongo/asynchronous/uri_parser.py @@ -0,0 +1,624 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +import re +import sys +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sized, + Union, + cast, +) +from urllib.parse import unquote_plus + +from pymongo.asynchronous.client_options import _parse_ssl_options +from pymongo.asynchronous.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) +from pymongo.asynchronous.srv_resolver import _have_dnspython, _SrvResolver +from pymongo.asynchronous.typings import _Address +from pymongo.errors import ConfigurationError, InvalidURI + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext + +_IS_SYNC = False +SCHEME = "mongodb://" +SCHEME_LEN = len(SCHEME) +SRV_SCHEME = "mongodb+srv://" +SRV_SCHEME_LEN = len(SRV_SCHEME) +DEFAULT_PORT = 27017 + + +def _unquoted_percent(s: str) -> bool: + """Check for unescaped percent signs. + + :param s: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == "%": + sub = s[i : i + 3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote_plus(sub) == sub: + return True + return False + + +def parse_userinfo(userinfo: str) -> tuple[str, str]: + """Validates the format of user information in a MongoDB URI. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. + + Returns a 2-tuple containing the unescaped username followed + by the unescaped password. + + :param userinfo: A string of the form : + """ + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + + user, _, passwd = userinfo.partition(":") + # No password is expected with GSSAPI authentication. + if not user: + raise InvalidURI("The empty string is not valid username.") + + return unquote_plus(user), unquote_plus(passwd) + + +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> tuple[str, Optional[Union[str, int]]]: + """Validates an IPv6 literal host:port string. + + Returns a 2-tuple of IPv6 literal followed by port where + port is default_port if it wasn't specified in entity. + + :param entity: A string that represents an IPv6 literal enclosed + in braces (e.g. '[::1]' or '[::1]:27017'). + :param default_port: The port number to use when one wasn't + specified in entity. + """ + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." + ) + i = entity.find("]:") + if i == -1: + return entity[1:-1], default_port + return entity[1:i], entity[i + 2 :] + + +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: + """Validates a host string + + Returns a 2-tuple of host followed by port where port is default_port + if it wasn't specified in the string. + + :param entity: A host or host:port string where host could be a + hostname or IP address. + :param default_port: The port number to use when one wasn't + specified in entity. + """ + host = entity + port: Optional[Union[str, int]] = default_port + if entity[0] == "[": + host, port = parse_ipv6_literal_host(entity, default_port) + elif entity.endswith(".sock"): + return entity, default_port + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) + if isinstance(port, str): + if not port.isdigit() or int(port) > 65535 or int(port) <= 0: + raise ValueError(f"Port must be an integer between 0 and 65535: {port!r}") + port = int(port) + + # Normalize hostname to lowercase, since DNS is case-insensitive: + # http://tools.ietf.org/html/rfc4343 + # This prevents useless rediscovery if "foo.com" is in the seed list but + # "FOO.com" is in the hello response. + return host.lower(), port + + +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", +} + + +def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: + """Helper method for split_options which creates the options dict. + Also handles the creation of a list for the URI tag_sets/ + readpreferencetags portion, and the use of a unicode options string. + """ + options = _CaseInsensitiveDictionary() + for uriopt in opts.split(delim): + key, value = uriopt.split("=") + if key.lower() == "readpreferencetags": + options.setdefault(key, []).append(value) + else: + if key in options: + warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) + if key.lower() == "authmechanismproperties": + val = value + else: + val = unquote_plus(value) + options[key] = val + + return options + + +def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Raise appropriate errors when conflicting TLS options are present in + the options dictionary. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Implicitly defined options must not be explicitly specified. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + if opt in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") + if tlsallowinvalidcerts is not None: + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) + if tlsallowinvalidcerts is True: + options["tlsdisableocspendpointcheck"] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = options.get("tlscrlfile") + if tlscrlfile is not None: + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): + if options.get(opt) is True: + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." + raise InvalidURI(err_msg % (opt,)) + + if "ssl" in options and "tls" in options: + + def truth_value(val: Any) -> Any: + if val in ("true", "false"): + return val == "true" + if isinstance(val, bool): + return val + return val + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) + + return options + + +def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Issue appropriate warnings when deprecated options are present in the + options dictionary. Removes deprecated option key, value pairs if the + options dictionary is found to also have the renamed option. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + for optname in list(options): + if optname in URI_OPTIONS_DEPRECATION_MAP: + mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] + if mode == "renamed": + newoptname = message + if newoptname in options: + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." + warnings.warn( + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) + options.pop(optname) + continue + warn_msg = "Option '%s' is deprecated, use '%s' instead." + warnings.warn( + warn_msg % (options.cased_key(optname), newoptname), + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": + warn_msg = "Option '%s' is deprecated. %s." + warnings.warn( + warn_msg % (options.cased_key(optname), message), + DeprecationWarning, + stacklevel=2, + ) + + return options + + +def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Normalizes option names in the options dictionary by converting them to + their internally-used names. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Expand the tlsInsecure option. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure + + for optname in list(options): + intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) + if intname is not None: + options[intname] = options.pop(optname) + + return options + + +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: + """Validates and normalizes options passed in a MongoDB URI. + + Returns a new dictionary of validated and normalized options. If warn is + False then errors will be thrown for invalid options, otherwise they will + be ignored and a warning will be issued. + + :param opts: A dict of MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and + invalid options will be ignored. Otherwise invalid options will + cause errors. + """ + return get_validated_options(opts, warn) + + +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: + """Takes the options portion of a MongoDB URI, validates each option + and returns the options in a dictionary. + + :param opt: A string representing MongoDB URI options. + :param validate: If ``True`` (the default), validate and normalize all + options. + :param warn: If ``False`` (the default), suppress all warnings raised + during validation of options. + :param normalize: If ``True`` (the default), renames all options to their + internally-used names. + """ + and_idx = opts.find("&") + semi_idx = opts.find(";") + try: + if and_idx >= 0 and semi_idx >= 0: + raise InvalidURI("Can not mix '&' and ';' for option separators.") + elif and_idx >= 0: + options = _parse_options(opts, "&") + elif semi_idx >= 0: + options = _parse_options(opts, ";") + elif opts.find("=") != -1: + options = _parse_options(opts, None) + else: + raise ValueError + except ValueError: + raise InvalidURI("MongoDB URI options are key=value pairs.") from None + + options = _handle_security_options(options) + + options = _handle_option_deprecations(options) + + if normalize: + options = _normalize_options(options) + + if validate: + options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") + + return options + + +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: + """Takes a string of the form host1[:port],host2[:port]... and + splits it into (host, port) tuples. If [:port] isn't present the + default_port is used. + + Returns a set of 2-tuples containing the host name (or IP) followed by + port number. + + :param hosts: A string of the form host1[:port],host2[:port],... + :param default_port: The port number to use when one wasn't specified + for a host. + """ + nodes = [] + for entity in hosts.split(","): + if not entity: + raise ConfigurationError("Empty host (or extra comma in host list).") + port = default_port + # Unix socket entities don't have ports + if entity.endswith(".sock"): + port = None + nodes.append(parse_host(entity, port)) + return nodes + + +# Prohibited characters in database name. DB names also can't have ".", but for +# backward-compat we allow "db.collection" in URI. +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") + +_ALLOWED_TXT_OPTS = frozenset( + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) + + +def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") + + if options.get("loadbalanced"): + if len(nodes) > 1: + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") + + +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + elif uri.startswith(SRV_SCHEME): + if not _have_dnspython(): + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) + ) + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + else: + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") + + if not scheme_free: + raise InvalidURI("Must provide at least one hostname or IP.") + + user = None + passwd = None + dbase = None + collection = None + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, dbase = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if dbase: + dbase = unquote_plus(dbase) + if "." in dbase: + dbase, collection = dbase.split(".", 1) + if _BAD_DB_CHARS.search(dbase): + raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") + user, passwd = parse_userinfo(userinfo) + else: + hosts = host_part + + if "/" in hosts: + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) + + hosts = unquote_plus(hosts) + fqdn = None + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + if options.get("directConnection"): + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") + nodes = split_hosts(hosts, default_port=None) + if len(nodes) != 1: + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") + fqdn, port = nodes[0] + if port is not None: + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = dns_resolver.get_hosts() + dns_options = dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError( + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" + ) + elif not is_srv and srv_max_hosts: + raise ConfigurationError( + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" + ) + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, + } + + +def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError("kms_tls_options must be a dict") + contexts = {} + for provider, options in kms_tls_options.items(): + if not isinstance(options, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + options.setdefault("tls", True) + opts = _CaseInsensitiveDictionary(options) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + if ssl_context is None: + raise ConfigurationError("TLS is required for KMS providers") + if allow_invalid_hostnames: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableCertificateRevocationCheck", + ]: + if n in opts: + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") + contexts[provider] = ssl_context + return contexts + + +if __name__ == "__main__": + import pprint + + try: + pprint.pprint(parse_uri(sys.argv[1])) # noqa: T203 + except InvalidURI as exc: + print(exc) # noqa: T201 + sys.exit(0) diff --git a/pymongo/auth.py b/pymongo/auth.py index 8bc4145abc..13302ae5db 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -1,4 +1,4 @@ -# Copyright 2013-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,645 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Authentication helpers.""" +"""Re-import of synchronous Auth API for compatibility.""" from __future__ import annotations -import functools -import hashlib -import hmac -import os -import socket -import typing -from base64 import standard_b64decode, standard_b64encode -from collections import namedtuple -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Mapping, - MutableMapping, - Optional, - cast, -) -from urllib.parse import quote +from pymongo.synchronous.auth import * # noqa: F403 +from pymongo.synchronous.auth import __doc__ as original_doc -from bson.binary import Binary -from pymongo.auth_aws import _authenticate_aws -from pymongo.auth_oidc import ( - _authenticate_oidc, - _get_authenticator, - _OIDCAzureCallback, - _OIDCGCPCallback, - _OIDCProperties, - _OIDCTestCallback, -) -from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.saslprep import saslprep - -if TYPE_CHECKING: - from pymongo.hello import Hello - from pymongo.pool import Connection - -HAVE_KERBEROS = True -_USE_PRINCIPAL = False -try: - import winkerberos as kerberos # type:ignore[import] - - if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): - _USE_PRINCIPAL = True -except ImportError: - try: - import kerberos # type:ignore[import] - except ImportError: - HAVE_KERBEROS = False - - -MECHANISMS = frozenset( - [ - "GSSAPI", - "MONGODB-CR", - "MONGODB-OIDC", - "MONGODB-X509", - "MONGODB-AWS", - "PLAIN", - "SCRAM-SHA-1", - "SCRAM-SHA-256", - "DEFAULT", - ] -) -"""The authentication mechanisms supported by PyMongo.""" - - -class _Cache: - __slots__ = ("data",) - - _hash_val = hash("_Cache") - - def __init__(self) -> None: - self.data = None - - def __eq__(self, other: object) -> bool: - # Two instances must always compare equal. - if isinstance(other, _Cache): - return True - return NotImplemented - - def __ne__(self, other: object) -> bool: - if isinstance(other, _Cache): - return False - return NotImplemented - - def __hash__(self) -> int: - return self._hash_val - - -MongoCredential = namedtuple( - "MongoCredential", - ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], -) -"""A hashable namedtuple of values used for authentication.""" - - -GSSAPIProperties = namedtuple( - "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] -) -"""Mechanism properties for GSSAPI authentication.""" - - -_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) -"""Mechanism properties for MONGODB-AWS authentication.""" - - -def _build_credentials_tuple( - mech: str, - source: Optional[str], - user: str, - passwd: str, - extra: Mapping[str, Any], - database: Optional[str], -) -> MongoCredential: - """Build and return a mechanism specific credentials tuple.""" - if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: - raise ConfigurationError(f"{mech} requires a username.") - if mech == "GSSAPI": - if source is not None and source != "$external": - raise ValueError("authentication source must be $external or None for GSSAPI") - properties = extra.get("authmechanismproperties", {}) - service_name = properties.get("SERVICE_NAME", "mongodb") - canonicalize = bool(properties.get("CANONICALIZE_HOST_NAME", False)) - service_realm = properties.get("SERVICE_REALM") - props = GSSAPIProperties( - service_name=service_name, - canonicalize_host_name=canonicalize, - service_realm=service_realm, - ) - # Source is always $external. - return MongoCredential(mech, "$external", user, passwd, props, None) - elif mech == "MONGODB-X509": - if passwd is not None: - raise ConfigurationError("Passwords are not supported by MONGODB-X509") - if source is not None and source != "$external": - raise ValueError("authentication source must be $external or None for MONGODB-X509") - # Source is always $external, user can be None. - return MongoCredential(mech, "$external", user, None, None, None) - elif mech == "MONGODB-AWS": - if user is not None and passwd is None: - raise ConfigurationError("username without a password is not supported by MONGODB-AWS") - if source is not None and source != "$external": - raise ConfigurationError( - "authentication source must be $external or None for MONGODB-AWS" - ) - - properties = extra.get("authmechanismproperties", {}) - aws_session_token = properties.get("AWS_SESSION_TOKEN") - aws_props = _AWSProperties(aws_session_token=aws_session_token) - # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, "$external", user, passwd, aws_props, None) - elif mech == "MONGODB-OIDC": - properties = extra.get("authmechanismproperties", {}) - callback = properties.get("OIDC_CALLBACK") - human_callback = properties.get("OIDC_HUMAN_CALLBACK") - environ = properties.get("ENVIRONMENT") - token_resource = properties.get("TOKEN_RESOURCE", "") - default_allowed = [ - "*.mongodb.net", - "*.mongodb-dev.net", - "*.mongodb-qa.net", - "*.mongodbgov.net", - "localhost", - "127.0.0.1", - "::1", - ] - allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) - msg = ( - "authentication with MONGODB-OIDC requires providing either a callback or a environment" - ) - if passwd is not None: - msg = "password is not supported by MONGODB-OIDC" - raise ConfigurationError(msg) - if callback or human_callback: - if environ is not None: - raise ConfigurationError(msg) - if callback and human_callback: - msg = "cannot set both OIDC_CALLBACK and OIDC_HUMAN_CALLBACK" - raise ConfigurationError(msg) - elif environ is not None: - if environ == "test": - if user is not None: - msg = "test environment for MONGODB-OIDC does not support username" - raise ConfigurationError(msg) - callback = _OIDCTestCallback() - elif environ == "azure": - passwd = None - if not token_resource: - raise ConfigurationError( - "Azure environment for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" - ) - callback = _OIDCAzureCallback(token_resource) - elif environ == "gcp": - passwd = None - if not token_resource: - raise ConfigurationError( - "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" - ) - callback = _OIDCGCPCallback(token_resource) - else: - raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") - else: - raise ConfigurationError(msg) - - oidc_props = _OIDCProperties( - callback=callback, - human_callback=human_callback, - environment=environ, - allowed_hosts=allowed_hosts, - token_resource=token_resource, - username=user, - ) - return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) - - elif mech == "PLAIN": - source_database = source or database or "$external" - return MongoCredential(mech, source_database, user, passwd, None, None) - else: - source_database = source or database or "admin" - if passwd is None: - raise ConfigurationError("A password is required.") - return MongoCredential(mech, source_database, user, passwd, None, _Cache()) - - -def _xor(fir: bytes, sec: bytes) -> bytes: - """XOR two byte strings together.""" - return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) - - -def _parse_scram_response(response: bytes) -> Dict[bytes, bytes]: - """Split a scram response into key, value pairs.""" - return dict( - typing.cast(typing.Tuple[bytes, bytes], item.split(b"=", 1)) - for item in response.split(b",") - ) - - -def _authenticate_scram_start( - credentials: MongoCredential, mechanism: str -) -> tuple[bytes, bytes, MutableMapping[str, Any]]: - username = credentials.username - user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") - nonce = standard_b64encode(os.urandom(32)) - first_bare = b"n=" + user + b",r=" + nonce - - cmd = { - "saslStart": 1, - "mechanism": mechanism, - "payload": Binary(b"n,," + first_bare), - "autoAuthorize": 1, - "options": {"skipEmptyExchange": True}, - } - return nonce, first_bare, cmd - - -def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanism: str) -> None: - """Authenticate using SCRAM.""" - username = credentials.username - if mechanism == "SCRAM-SHA-256": - digest = "sha256" - digestmod = hashlib.sha256 - data = saslprep(credentials.password).encode("utf-8") - else: - digest = "sha1" - digestmod = hashlib.sha1 - data = _password_digest(username, credentials.password).encode("utf-8") - source = credentials.source - cache = credentials.cache - - # Make local - _hmac = hmac.HMAC - - ctx = conn.auth_ctx - if ctx and ctx.speculate_succeeded(): - assert isinstance(ctx, _ScramContext) - assert ctx.scram_data is not None - nonce, first_bare = ctx.scram_data - res = ctx.speculative_authenticate - else: - nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) - res = conn.command(source, cmd) - - assert res is not None - server_first = res["payload"] - parsed = _parse_scram_response(server_first) - iterations = int(parsed[b"i"]) - if iterations < 4096: - raise OperationFailure("Server returned an invalid iteration count.") - salt = parsed[b"s"] - rnonce = parsed[b"r"] - if not rnonce.startswith(nonce): - raise OperationFailure("Server returned an invalid nonce.") - - without_proof = b"c=biws,r=" + rnonce - if cache.data: - client_key, server_key, csalt, citerations = cache.data - else: - client_key, server_key, csalt, citerations = None, None, None, None - - # Salt and / or iterations could change for a number of different - # reasons. Either changing invalidates the cache. - if not client_key or salt != csalt or iterations != citerations: - salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) - client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() - server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() - cache.data = (client_key, server_key, salt, iterations) - stored_key = digestmod(client_key).digest() - auth_msg = b",".join((first_bare, server_first, without_proof)) - client_sig = _hmac(stored_key, auth_msg, digestmod).digest() - client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) - client_final = b",".join((without_proof, client_proof)) - - server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) - - cmd = { - "saslContinue": 1, - "conversationId": res["conversationId"], - "payload": Binary(client_final), - } - res = conn.command(source, cmd) - - parsed = _parse_scram_response(res["payload"]) - if not hmac.compare_digest(parsed[b"v"], server_sig): - raise OperationFailure("Server returned an invalid signature.") - - # A third empty challenge may be required if the server does not support - # skipEmptyExchange: SERVER-44857. - if not res["done"]: - cmd = { - "saslContinue": 1, - "conversationId": res["conversationId"], - "payload": Binary(b""), - } - res = conn.command(source, cmd) - if not res["done"]: - raise OperationFailure("SASL conversation failed to complete.") - - -def _password_digest(username: str, password: str) -> str: - """Get a password digest to use for authentication.""" - if not isinstance(password, str): - raise TypeError("password must be an instance of str") - if len(password) == 0: - raise ValueError("password can't be empty") - if not isinstance(username, str): - raise TypeError("username must be an instance of str") - - md5hash = hashlib.md5() # noqa: S324 - data = f"{username}:mongo:{password}" - md5hash.update(data.encode("utf-8")) - return md5hash.hexdigest() - - -def _auth_key(nonce: str, username: str, password: str) -> str: - """Get an auth key to use for authentication.""" - digest = _password_digest(username, password) - md5hash = hashlib.md5() # noqa: S324 - data = f"{nonce}{username}{digest}" - md5hash.update(data.encode("utf-8")) - return md5hash.hexdigest() - - -def _canonicalize_hostname(hostname: str) -> str: - """Canonicalize hostname following MIT-krb5 behavior.""" - # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 - af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( - hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME - )[0] - - try: - name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) - except socket.gaierror: - return canonname.lower() - - return name[0].lower() - - -def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None: - """Authenticate using GSSAPI.""" - if not HAVE_KERBEROS: - raise ConfigurationError( - 'The "kerberos" module must be installed to use GSSAPI authentication.' - ) - - try: - username = credentials.username - password = credentials.password - props = credentials.mechanism_properties - # Starting here and continuing through the while loop below - establish - # the security context. See RFC 4752, Section 3.1, first paragraph. - host = conn.address[0] - if props.canonicalize_host_name: - host = _canonicalize_hostname(host) - service = props.service_name + "@" + host - if props.service_realm is not None: - service = service + "@" + props.service_realm - - if password is not None: - if _USE_PRINCIPAL: - # Note that, though we use unquote_plus for unquoting URI - # options, we use quote here. Microsoft's UrlUnescape (used - # by WinKerberos) doesn't support +. - principal = ":".join((quote(username), quote(password))) - result, ctx = kerberos.authGSSClientInit( - service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG - ) - else: - if "@" in username: - user, domain = username.split("@", 1) - else: - user, domain = username, None - result, ctx = kerberos.authGSSClientInit( - service, - gssflags=kerberos.GSS_C_MUTUAL_FLAG, - user=user, - domain=domain, - password=password, - ) - else: - result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) - - if result != kerberos.AUTH_GSS_COMPLETE: - raise OperationFailure("Kerberos context failed to initialize.") - - try: - # pykerberos uses a weird mix of exceptions and return values - # to indicate errors. - # 0 == continue, 1 == complete, -1 == error - # Only authGSSClientStep can return 0. - if kerberos.authGSSClientStep(ctx, "") != 0: - raise OperationFailure("Unknown kerberos failure in step function.") - - # Start a SASL conversation with mongod/s - # Note: pykerberos deals with base64 encoded byte strings. - # Since mongo accepts base64 strings as the payload we don't - # have to use bson.binary.Binary. - payload = kerberos.authGSSClientResponse(ctx) - cmd = { - "saslStart": 1, - "mechanism": "GSSAPI", - "payload": payload, - "autoAuthorize": 1, - } - response = conn.command("$external", cmd) - - # Limit how many times we loop to catch protocol / library issues - for _ in range(10): - result = kerberos.authGSSClientStep(ctx, str(response["payload"])) - if result == -1: - raise OperationFailure("Unknown kerberos failure in step function.") - - payload = kerberos.authGSSClientResponse(ctx) or "" - - cmd = { - "saslContinue": 1, - "conversationId": response["conversationId"], - "payload": payload, - } - response = conn.command("$external", cmd) - - if result == kerberos.AUTH_GSS_COMPLETE: - break - else: - raise OperationFailure("Kerberos authentication failed to complete.") - - # Once the security context is established actually authenticate. - # See RFC 4752, Section 3.1, last two paragraphs. - if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: - raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") - - if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: - raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") - - payload = kerberos.authGSSClientResponse(ctx) - cmd = { - "saslContinue": 1, - "conversationId": response["conversationId"], - "payload": payload, - } - conn.command("$external", cmd) - - finally: - kerberos.authGSSClientClean(ctx) - - except kerberos.KrbError as exc: - raise OperationFailure(str(exc)) from None - - -def _authenticate_plain(credentials: MongoCredential, conn: Connection) -> None: - """Authenticate using SASL PLAIN (RFC 4616)""" - source = credentials.source - username = credentials.username - password = credentials.password - payload = (f"\x00{username}\x00{password}").encode() - cmd = { - "saslStart": 1, - "mechanism": "PLAIN", - "payload": Binary(payload), - "autoAuthorize": 1, - } - conn.command(source, cmd) - - -def _authenticate_x509(credentials: MongoCredential, conn: Connection) -> None: - """Authenticate using MONGODB-X509.""" - ctx = conn.auth_ctx - if ctx and ctx.speculate_succeeded(): - # MONGODB-X509 is done after the speculative auth step. - return - - cmd = _X509Context(credentials, conn.address).speculate_command() - conn.command("$external", cmd) - - -def _authenticate_mongo_cr(credentials: MongoCredential, conn: Connection) -> None: - """Authenticate using MONGODB-CR.""" - source = credentials.source - username = credentials.username - password = credentials.password - # Get a nonce - response = conn.command(source, {"getnonce": 1}) - nonce = response["nonce"] - key = _auth_key(nonce, username, password) - - # Actually authenticate - query = {"authenticate": 1, "user": username, "nonce": nonce, "key": key} - conn.command(source, query) - - -def _authenticate_default(credentials: MongoCredential, conn: Connection) -> None: - if conn.max_wire_version >= 7: - if conn.negotiated_mechs: - mechs = conn.negotiated_mechs - else: - source = credentials.source - cmd = conn.hello_cmd() - cmd["saslSupportedMechs"] = source + "." + credentials.username - mechs = conn.command(source, cmd, publish_events=False).get("saslSupportedMechs", []) - if "SCRAM-SHA-256" in mechs: - return _authenticate_scram(credentials, conn, "SCRAM-SHA-256") - else: - return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") - else: - return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") - - -_AUTH_MAP: Mapping[str, Callable[..., None]] = { - "GSSAPI": _authenticate_gssapi, - "MONGODB-CR": _authenticate_mongo_cr, - "MONGODB-X509": _authenticate_x509, - "MONGODB-AWS": _authenticate_aws, - "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] - "PLAIN": _authenticate_plain, - "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), - "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), - "DEFAULT": _authenticate_default, -} - - -class _AuthContext: - def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: - self.credentials = credentials - self.speculative_authenticate: Optional[Mapping[str, Any]] = None - self.address = address - - @staticmethod - def from_credentials( - creds: MongoCredential, address: tuple[str, int] - ) -> Optional[_AuthContext]: - spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) - if spec_cls: - return cast(_AuthContext, spec_cls(creds, address)) - return None - - def speculate_command(self) -> Optional[MutableMapping[str, Any]]: - raise NotImplementedError - - def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: - self.speculative_authenticate = hello.speculative_authenticate - - def speculate_succeeded(self) -> bool: - return bool(self.speculative_authenticate) - - -class _ScramContext(_AuthContext): - def __init__( - self, credentials: MongoCredential, address: tuple[str, int], mechanism: str - ) -> None: - super().__init__(credentials, address) - self.scram_data: Optional[tuple[bytes, bytes]] = None - self.mechanism = mechanism - - def speculate_command(self) -> Optional[MutableMapping[str, Any]]: - nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) - # The 'db' field is included only on the speculative command. - cmd["db"] = self.credentials.source - # Save for later use. - self.scram_data = (nonce, first_bare) - return cmd - - -class _X509Context(_AuthContext): - def speculate_command(self) -> MutableMapping[str, Any]: - cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} - if self.credentials.username is not None: - cmd["user"] = self.credentials.username - return cmd - - -class _OIDCContext(_AuthContext): - def speculate_command(self) -> Optional[MutableMapping[str, Any]]: - authenticator = _get_authenticator(self.credentials, self.address) - cmd = authenticator.get_spec_auth_cmd() - if cmd is None: - return None - cmd["db"] = self.credentials.source - return cmd - - -_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { - "MONGODB-X509": _X509Context, - "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), - "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), - "MONGODB-OIDC": _OIDCContext, - "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), -} - - -def authenticate( - credentials: MongoCredential, conn: Connection, reauthenticate: bool = False -) -> None: - """Authenticate connection.""" - mechanism = credentials.mechanism - auth_func = _AUTH_MAP[mechanism] - if mechanism == "MONGODB-OIDC": - _authenticate_oidc(credentials, conn, reauthenticate) - else: - auth_func(credentials, conn) +__doc__ = original_doc diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index bfe2340f0a..fa7f7f297f 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -1,4 +1,4 @@ -# Copyright 2023-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,354 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""MONGODB-OIDC Authentication helpers.""" +"""Re-import of synchronous AuthOIDC API for compatibility.""" from __future__ import annotations -import abc -import os -import threading -import time -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union -from urllib.parse import quote +from pymongo.synchronous.auth_oidc import * # noqa: F403 +from pymongo.synchronous.auth_oidc import __doc__ as original_doc -import bson -from bson.binary import Binary -from pymongo._azure_helpers import _get_azure_response -from pymongo._csot import remaining -from pymongo._gcp_helpers import _get_gcp_response -from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.helpers import _AUTHENTICATION_FAILURE_CODE - -if TYPE_CHECKING: - from pymongo.auth import MongoCredential - from pymongo.pool import Connection - - -@dataclass -class OIDCIdPInfo: - issuer: str - clientId: Optional[str] = field(default=None) - requestScopes: Optional[list[str]] = field(default=None) - - -@dataclass -class OIDCCallbackContext: - timeout_seconds: float - username: str - version: int - refresh_token: Optional[str] = field(default=None) - idp_info: Optional[OIDCIdPInfo] = field(default=None) - - -@dataclass -class OIDCCallbackResult: - access_token: str - expires_in_seconds: Optional[float] = field(default=None) - refresh_token: Optional[str] = field(default=None) - - -class OIDCCallback(abc.ABC): - """A base class for defining OIDC callbacks.""" - - @abc.abstractmethod - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - """Convert the given BSON value into our own type.""" - - -@dataclass -class _OIDCProperties: - callback: Optional[OIDCCallback] = field(default=None) - human_callback: Optional[OIDCCallback] = field(default=None) - environment: Optional[str] = field(default=None) - allowed_hosts: list[str] = field(default_factory=list) - token_resource: Optional[str] = field(default=None) - username: str = "" - - -"""Mechanism properties for MONGODB-OIDC authentication.""" - -TOKEN_BUFFER_MINUTES = 5 -HUMAN_CALLBACK_TIMEOUT_SECONDS = 5 * 60 -CALLBACK_VERSION = 1 -MACHINE_CALLBACK_TIMEOUT_SECONDS = 60 -TIME_BETWEEN_CALLS_SECONDS = 0.1 - - -def _get_authenticator( - credentials: MongoCredential, address: tuple[str, int] -) -> _OIDCAuthenticator: - if credentials.cache.data: - return credentials.cache.data - - # Extract values. - principal_name = credentials.username - properties = credentials.mechanism_properties - - # Validate that the address is allowed. - if not properties.environment: - found = False - allowed_hosts = properties.allowed_hosts - for patt in allowed_hosts: - if patt == address[0]: - found = True - elif patt.startswith("*.") and address[0].endswith(patt[1:]): - found = True - if not found: - raise ConfigurationError( - f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" - ) - - # Get or create the cache data. - credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) - return credentials.cache.data - - -class _OIDCTestCallback(OIDCCallback): - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - token_file = os.environ.get("OIDC_TOKEN_FILE") - if not token_file: - raise RuntimeError( - 'MONGODB-OIDC with an "test" provider requires "OIDC_TOKEN_FILE" to be set' - ) - with open(token_file) as fid: - return OIDCCallbackResult(access_token=fid.read().strip()) - - -class _OIDCAzureCallback(OIDCCallback): - def __init__(self, token_resource: str) -> None: - self.token_resource = quote(token_resource) - - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - resp = _get_azure_response(self.token_resource, context.username, context.timeout_seconds) - return OIDCCallbackResult( - access_token=resp["access_token"], expires_in_seconds=resp["expires_in"] - ) - - -class _OIDCGCPCallback(OIDCCallback): - def __init__(self, token_resource: str) -> None: - self.token_resource = quote(token_resource) - - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - resp = _get_gcp_response(self.token_resource, context.timeout_seconds) - return OIDCCallbackResult(access_token=resp["access_token"]) - - -@dataclass -class _OIDCAuthenticator: - username: str - properties: _OIDCProperties - refresh_token: Optional[str] = field(default=None) - access_token: Optional[str] = field(default=None) - idp_info: Optional[OIDCIdPInfo] = field(default=None) - token_gen_id: int = field(default=0) - lock: threading.Lock = field(default_factory=threading.Lock) - last_call_time: float = field(default=0) - - def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: - """Handle a reauthenticate from the server.""" - # Invalidate the token for the connection. - self._invalidate(conn) - # Call the appropriate auth logic for the callback type. - if self.properties.callback: - return self._authenticate_machine(conn) - return self._authenticate_human(conn) - - def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: - """Handle an initial authenticate request.""" - # First handle speculative auth. - # If it succeeded, we are done. - ctx = conn.auth_ctx - if ctx and ctx.speculate_succeeded(): - resp = ctx.speculative_authenticate - if resp and resp["done"]: - conn.oidc_token_gen_id = self.token_gen_id - return resp - - # If spec auth failed, call the appropriate auth logic for the callback type. - # We cannot assume that the token is invalid, because a proxy may have been - # involved that stripped the speculative auth information. - if self.properties.callback: - return self._authenticate_machine(conn) - return self._authenticate_human(conn) - - def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: - """Get the appropriate speculative auth command.""" - if not self.access_token: - return None - return self._get_start_command({"jwt": self.access_token}) - - def _authenticate_machine(self, conn: Connection) -> Mapping[str, Any]: - # If there is a cached access token, try to authenticate with it. If - # authentication fails with error code 18, invalidate the access token, - # fetch a new access token, and try to authenticate again. If authentication - # fails for any other reason, raise the error to the user. - if self.access_token: - try: - return self._sasl_start_jwt(conn) - except OperationFailure as e: - if self._is_auth_error(e): - return self._authenticate_machine(conn) - raise - return self._sasl_start_jwt(conn) - - def _authenticate_human(self, conn: Connection) -> Optional[Mapping[str, Any]]: - # If we have a cached access token, try a JwtStepRequest. - # authentication fails with error code 18, invalidate the access token, - # and try to authenticate again. If authentication fails for any other - # reason, raise the error to the user. - if self.access_token: - try: - return self._sasl_start_jwt(conn) - except OperationFailure as e: - if self._is_auth_error(e): - return self._authenticate_human(conn) - raise - - # If we have a cached refresh token, try a JwtStepRequest with that. - # If authentication fails with error code 18, invalidate the access and - # refresh tokens, and try to authenticate again. If authentication fails for - # any other reason, raise the error to the user. - if self.refresh_token: - try: - return self._sasl_start_jwt(conn) - except OperationFailure as e: - if self._is_auth_error(e): - self.refresh_token = None - return self._authenticate_human(conn) - raise - - # Start a new Two-Step SASL conversation. - # Run a PrincipalStepRequest to get the IdpInfo. - cmd = self._get_start_command(None) - start_resp = self._run_command(conn, cmd) - # Attempt to authenticate with a JwtStepRequest. - return self._sasl_continue_jwt(conn, start_resp) - - def _get_access_token(self) -> Optional[str]: - properties = self.properties - cb: Union[None, OIDCCallback] - resp: OIDCCallbackResult - - is_human = properties.human_callback is not None - if is_human and self.idp_info is None: - return None - - if properties.callback: - cb = properties.callback - if properties.human_callback: - cb = properties.human_callback - - prev_token = self.access_token - if prev_token: - return prev_token - - if cb is None and not prev_token: - return None - - if not prev_token and cb is not None: - with self.lock: - # See if the token was changed while we were waiting for the - # lock. - new_token = self.access_token - if new_token != prev_token: - return new_token - - # Ensure that we are waiting a min time between callback invocations. - delta = time.time() - self.last_call_time - if delta < TIME_BETWEEN_CALLS_SECONDS: - time.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) - self.last_call_time = time.time() - - if is_human: - timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS - assert self.idp_info is not None - else: - timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) - context = OIDCCallbackContext( - timeout_seconds=timeout, - version=CALLBACK_VERSION, - refresh_token=self.refresh_token, - idp_info=self.idp_info, - username=self.properties.username, - ) - resp = cb.fetch(context) - if not isinstance(resp, OIDCCallbackResult): - raise ValueError("Callback result must be of type OIDCCallbackResult") - self.refresh_token = resp.refresh_token - self.access_token = resp.access_token - self.token_gen_id += 1 - - return self.access_token - - def _run_command(self, conn: Connection, cmd: MutableMapping[str, Any]) -> Mapping[str, Any]: - try: - return conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] - except OperationFailure as e: - if self._is_auth_error(e): - self._invalidate(conn) - raise - - def _is_auth_error(self, err: Exception) -> bool: - if not isinstance(err, OperationFailure): - return False - return err.code == _AUTHENTICATION_FAILURE_CODE - - def _invalidate(self, conn: Connection) -> None: - # Ignore the invalidation if a token gen id is given and is less than our - # current token gen id. - token_gen_id = conn.oidc_token_gen_id or 0 - if token_gen_id is not None and token_gen_id < self.token_gen_id: - return - self.access_token = None - - def _sasl_continue_jwt( - self, conn: Connection, start_resp: Mapping[str, Any] - ) -> Mapping[str, Any]: - self.access_token = None - self.refresh_token = None - start_payload: dict = bson.decode(start_resp["payload"]) - if "issuer" in start_payload: - self.idp_info = OIDCIdPInfo(**start_payload) - access_token = self._get_access_token() - conn.oidc_token_gen_id = self.token_gen_id - cmd = self._get_continue_command({"jwt": access_token}, start_resp) - return self._run_command(conn, cmd) - - def _sasl_start_jwt(self, conn: Connection) -> Mapping[str, Any]: - access_token = self._get_access_token() - conn.oidc_token_gen_id = self.token_gen_id - cmd = self._get_start_command({"jwt": access_token}) - return self._run_command(conn, cmd) - - def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: - if payload is None: - principal_name = self.username - if principal_name: - payload = {"n": principal_name} - else: - payload = {} - bin_payload = Binary(bson.encode(payload)) - return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} - - def _get_continue_command( - self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] - ) -> MutableMapping[str, Any]: - bin_payload = Binary(bson.encode(payload)) - return { - "saslContinue": 1, - "payload": bin_payload, - "conversationId": start_resp["conversationId"], - } - - -def _authenticate_oidc( - credentials: MongoCredential, conn: Connection, reauthenticate: bool -) -> Optional[Mapping[str, Any]]: - """Authenticate using MONGODB-OIDC.""" - authenticator = _get_authenticator(credentials, conn.address) - if reauthenticate: - return authenticator.reauthenticate(conn) - else: - return authenticator.authenticate(conn) +__doc__ = original_doc diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 300bd88e92..5decc0991f 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -1,489 +1,21 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Watch changes on a collection, a database, or the entire cluster.""" +"""Re-import of synchronous ChangeStream API for compatibility.""" from __future__ import annotations -import copy -from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union +from pymongo.synchronous.change_stream import * # noqa: F403 +from pymongo.synchronous.change_stream import __doc__ as original_doc -from bson import CodecOptions, _bson_to_dict -from bson.raw_bson import RawBSONDocument -from bson.timestamp import Timestamp -from pymongo import _csot, common -from pymongo.aggregation import ( - _AggregationCommand, - _CollectionAggregationCommand, - _DatabaseAggregationCommand, -) -from pymongo.collation import validate_collation_or_none -from pymongo.command_cursor import CommandCursor -from pymongo.errors import ( - ConnectionFailure, - CursorNotFound, - InvalidOperation, - OperationFailure, - PyMongoError, -) -from pymongo.operations import _Op -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline - -# The change streams spec considers the following server errors from the -# getMore command non-resumable. All other getMore errors are resumable. -_RESUMABLE_GETMORE_ERRORS = frozenset( - [ - 6, # HostUnreachable - 7, # HostNotFound - 89, # NetworkTimeout - 91, # ShutdownInProgress - 189, # PrimarySteppedDown - 262, # ExceededTimeLimit - 9001, # SocketException - 10107, # NotWritablePrimary - 11600, # InterruptedAtShutdown - 11602, # InterruptedDueToReplStateChange - 13435, # NotPrimaryNoSecondaryOk - 13436, # NotPrimaryOrSecondary - 63, # StaleShardVersion - 150, # StaleEpoch - 13388, # StaleConfig - 234, # RetryChangeStream - 133, # FailedToSatisfyReadPreference - ] -) - - -if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.database import Database - from pymongo.mongo_client import MongoClient - from pymongo.pool import Connection - - -def _resumable(exc: PyMongoError) -> bool: - """Return True if given a resumable change stream error.""" - if isinstance(exc, (ConnectionFailure, CursorNotFound)): - return True - if isinstance(exc, OperationFailure): - if exc._max_wire_version is None: - return False - return ( - exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") - ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) - return False - - -class ChangeStream(Generic[_DocumentType]): - """The internal abstract base class for change stream cursors. - - Should not be called directly by application developers. Use - :meth:`pymongo.collection.Collection.watch`, - :meth:`pymongo.database.Database.watch`, or - :meth:`pymongo.mongo_client.MongoClient.watch` instead. - - .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. - """ - - def __init__( - self, - target: Union[ - MongoClient[_DocumentType], Database[_DocumentType], Collection[_DocumentType] - ], - pipeline: Optional[_Pipeline], - full_document: Optional[str], - resume_after: Optional[Mapping[str, Any]], - max_await_time_ms: Optional[int], - batch_size: Optional[int], - collation: Optional[_CollationIn], - start_at_operation_time: Optional[Timestamp], - session: Optional[ClientSession], - start_after: Optional[Mapping[str, Any]], - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - show_expanded_events: Optional[bool] = None, - ) -> None: - if pipeline is None: - pipeline = [] - pipeline = common.validate_list("pipeline", pipeline) - common.validate_string_or_none("full_document", full_document) - validate_collation_or_none(collation) - common.validate_non_negative_integer_or_none("batchSize", batch_size) - - self._decode_custom = False - self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options - if target.codec_options.type_registry._decoder_map: - self._decode_custom = True - # Keep the type registry so that we support encoding custom types - # in the pipeline. - self._target = target.with_options( # type: ignore - codec_options=target.codec_options.with_options(document_class=RawBSONDocument) - ) - else: - self._target = target - - self._pipeline = copy.deepcopy(pipeline) - self._full_document = full_document - self._full_document_before_change = full_document_before_change - self._uses_start_after = start_after is not None - self._uses_resume_after = resume_after is not None - self._resume_token = copy.deepcopy(start_after or resume_after) - self._max_await_time_ms = max_await_time_ms - self._batch_size = batch_size - self._collation = collation - self._start_at_operation_time = start_at_operation_time - self._session = session - self._comment = comment - self._closed = False - self._timeout = self._target._timeout - self._show_expanded_events = show_expanded_events - # Initialize cursor. - self._cursor = self._create_cursor() - - @property - def _aggregation_command_class(self) -> Type[_AggregationCommand]: - """The aggregation command class to be used.""" - raise NotImplementedError - - @property - def _client(self) -> MongoClient: - """The client against which the aggregation commands for - this ChangeStream will be run. - """ - raise NotImplementedError - - def _change_stream_options(self) -> dict[str, Any]: - """Return the options dict for the $changeStream pipeline stage.""" - options: dict[str, Any] = {} - if self._full_document is not None: - options["fullDocument"] = self._full_document - - if self._full_document_before_change is not None: - options["fullDocumentBeforeChange"] = self._full_document_before_change - - resume_token = self.resume_token - if resume_token is not None: - if self._uses_start_after: - options["startAfter"] = resume_token - else: - options["resumeAfter"] = resume_token - elif self._start_at_operation_time is not None: - options["startAtOperationTime"] = self._start_at_operation_time - - if self._show_expanded_events: - options["showExpandedEvents"] = self._show_expanded_events - - return options - - def _command_options(self) -> dict[str, Any]: - """Return the options dict for the aggregation command.""" - options = {} - if self._max_await_time_ms is not None: - options["maxAwaitTimeMS"] = self._max_await_time_ms - if self._batch_size is not None: - options["batchSize"] = self._batch_size - return options - - def _aggregation_pipeline(self) -> list[dict[str, Any]]: - """Return the full aggregation pipeline for this ChangeStream.""" - options = self._change_stream_options() - full_pipeline: list = [{"$changeStream": options}] - full_pipeline.extend(self._pipeline) - return full_pipeline - - def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: - """Callback that caches the postBatchResumeToken or - startAtOperationTime from a changeStream aggregate command response - containing an empty batch of change documents. - - This is implemented as a callback because we need access to the wire - version in order to determine whether to cache this value. - """ - if not result["cursor"]["firstBatch"]: - if "postBatchResumeToken" in result["cursor"]: - self._resume_token = result["cursor"]["postBatchResumeToken"] - elif ( - self._start_at_operation_time is None - and self._uses_resume_after is False - and self._uses_start_after is False - and conn.max_wire_version >= 7 - ): - self._start_at_operation_time = result.get("operationTime") - # PYTHON-2181: informative error on missing operationTime. - if self._start_at_operation_time is None: - raise OperationFailure( - "Expected field 'operationTime' missing from command " - f"response : {result!r}" - ) - - def _run_aggregation_cmd( - self, session: Optional[ClientSession], explicit_session: bool - ) -> CommandCursor: - """Run the full aggregation pipeline for this ChangeStream and return - the corresponding CommandCursor. - """ - cmd = self._aggregation_command_class( - self._target, - CommandCursor, - self._aggregation_pipeline(), - self._command_options(), - explicit_session, - result_processor=self._process_result, - comment=self._comment, - ) - return self._client._retryable_read( - cmd.get_cursor, - self._target._read_preference_for(session), - session, - operation=_Op.AGGREGATE, - ) - - def _create_cursor(self) -> CommandCursor: - with self._client._tmp_session(self._session, close=False) as s: - return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) - - def _resume(self) -> None: - """Reestablish this change stream after a resumable error.""" - try: - self._cursor.close() - except PyMongoError: - pass - self._cursor = self._create_cursor() - - def close(self) -> None: - """Close this ChangeStream.""" - self._closed = True - self._cursor.close() - - def __iter__(self) -> ChangeStream[_DocumentType]: - return self - - @property - def resume_token(self) -> Optional[Mapping[str, Any]]: - """The cached resume token that will be used to resume after the most - recently returned change. - - .. versionadded:: 3.9 - """ - return copy.deepcopy(self._resume_token) - - @_csot.apply - def next(self) -> _DocumentType: - """Advance the cursor. - - This method blocks until the next change document is returned or an - unrecoverable error is raised. This method is used when iterating over - all changes in the cursor. For example:: - - try: - resume_token = None - pipeline = [{'$match': {'operationType': 'insert'}}] - with db.collection.watch(pipeline) as stream: - for insert_change in stream: - print(insert_change) - resume_token = stream.resume_token - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - if resume_token is None: - # There is no usable resume token because there was a - # failure during ChangeStream initialization. - logging.error('...') - else: - # Use the interrupted ChangeStream's resume token to create - # a new ChangeStream. The new stream will continue from the - # last seen insert change without missing any events. - with db.collection.watch( - pipeline, resume_after=resume_token) as stream: - for insert_change in stream: - print(insert_change) - - Raises :exc:`StopIteration` if this ChangeStream is closed. - """ - while self.alive: - doc = self.try_next() - if doc is not None: - return doc - - raise StopIteration - - __next__ = next - - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise - :exc:`StopIteration` and :meth:`try_next` can return ``None``. - - .. versionadded:: 3.8 - """ - return not self._closed - - @_csot.apply - def try_next(self) -> Optional[_DocumentType]: - """Advance the cursor without blocking indefinitely. - - This method returns the next change document without waiting - indefinitely for the next change. For example:: - - with db.collection.watch() as stream: - while stream.alive: - change = stream.try_next() - # Note that the ChangeStream's resume token may be updated - # even when no changes are returned. - print("Current resume token: %r" % (stream.resume_token,)) - if change is not None: - print("Change document: %r" % (change,)) - continue - # We end up here when there are no recent changes. - # Sleep for a while before trying again to avoid flooding - # the server with getMore requests when no changes are - # available. - time.sleep(10) - - If no change document is cached locally then this method runs a single - getMore command. If the getMore yields any documents, the next - document is returned, otherwise, if the getMore returns no documents - (because there have been no changes) then ``None`` is returned. - - :return: The next change document or ``None`` when no document is available - after running a single getMore or when the cursor is closed. - - .. versionadded:: 3.8 - """ - if not self._closed and not self._cursor.alive: - self._resume() - - # Attempt to get the next change with at most one getMore and at most - # one resume attempt. - try: - try: - change = self._cursor._try_next(True) - except PyMongoError as exc: - if not _resumable(exc): - raise - self._resume() - change = self._cursor._try_next(False) - except PyMongoError as exc: - # Close the stream after a fatal error. - if not _resumable(exc) and not exc.timeout: - self.close() - raise - except Exception: - self.close() - raise - - # Check if the cursor was invalidated. - if not self._cursor.alive: - self._closed = True - - # If no changes are available. - if change is None: - # We have either iterated over all documents in the cursor, - # OR the most-recently returned batch is empty. In either case, - # update the cached resume token with the postBatchResumeToken if - # one was returned. We also clear the startAtOperationTime. - if self._cursor._post_batch_resume_token is not None: - self._resume_token = self._cursor._post_batch_resume_token - self._start_at_operation_time = None - return change - - # Else, changes are available. - try: - resume_token = change["_id"] - except KeyError: - self.close() - raise InvalidOperation( - "Cannot provide resume functionality when the resume token is missing." - ) from None - - # If this is the last change document from the current batch, cache the - # postBatchResumeToken. - if not self._cursor._has_next() and self._cursor._post_batch_resume_token: - resume_token = self._cursor._post_batch_resume_token - - # Hereafter, don't use startAfter; instead use resumeAfter. - self._uses_start_after = False - self._uses_resume_after = True - - # Cache the resume token and clear startAtOperationTime. - self._resume_token = resume_token - self._start_at_operation_time = None - - if self._decode_custom: - return _bson_to_dict(change.raw, self._orig_codec_options) - return change - - def __enter__(self) -> ChangeStream[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - -class CollectionChangeStream(ChangeStream[_DocumentType]): - """A change stream that watches changes on a single collection. - - Should not be called directly by application developers. Use - helper method :meth:`pymongo.collection.Collection.watch` instead. - - .. versionadded:: 3.7 - """ - - _target: Collection[_DocumentType] - - @property - def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: - return _CollectionAggregationCommand - - @property - def _client(self) -> MongoClient[_DocumentType]: - return self._target.database.client - - -class DatabaseChangeStream(ChangeStream[_DocumentType]): - """A change stream that watches changes on all collections in a database. - - Should not be called directly by application developers. Use - helper method :meth:`pymongo.database.Database.watch` instead. - - .. versionadded:: 3.7 - """ - - _target: Database[_DocumentType] - - @property - def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: - return _DatabaseAggregationCommand - - @property - def _client(self) -> MongoClient[_DocumentType]: - return self._target.client - - -class ClusterChangeStream(DatabaseChangeStream[_DocumentType]): - """A change stream that watches changes on all collections in the cluster. - - Should not be called directly by application developers. Use - helper method :meth:`pymongo.mongo_client.MongoClient.watch` instead. - - .. versionadded:: 3.7 - """ - - def _change_stream_options(self) -> dict[str, Any]: - options = super()._change_stream_options() - options["allChangesForCluster"] = True - return options +__doc__ = original_doc diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 9c745b11ef..7a4e04453d 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -1,332 +1,21 @@ -# Copyright 2014-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Tools to parse mongo client options.""" +"""Re-import of synchronous ClientOptions API for compatibility.""" from __future__ import annotations -from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, cast +from pymongo.synchronous.client_options import * # noqa: F403 +from pymongo.synchronous.client_options import __doc__ as original_doc -from bson.codec_options import _parse_codec_options -from pymongo import common -from pymongo.compression_support import CompressionSettings -from pymongo.errors import ConfigurationError -from pymongo.monitoring import _EventListener, _EventListeners -from pymongo.pool import PoolOptions -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ( - _ServerMode, - make_read_preference, - read_pref_mode_from_name, -) -from pymongo.server_selectors import any_server_selector -from pymongo.ssl_support import get_ssl_context -from pymongo.write_concern import WriteConcern, validate_boolean - -if TYPE_CHECKING: - from bson.codec_options import CodecOptions - from pymongo.auth import MongoCredential - from pymongo.encryption_options import AutoEncryptionOpts - from pymongo.pyopenssl_context import SSLContext - from pymongo.topology_description import _ServerSelector - - -def _parse_credentials( - username: str, password: str, database: Optional[str], options: Mapping[str, Any] -) -> Optional[MongoCredential]: - """Parse authentication credentials.""" - mechanism = options.get("authmechanism", "DEFAULT" if username else None) - source = options.get("authsource") - if username or mechanism: - from pymongo.auth import _build_credentials_tuple - - return _build_credentials_tuple(mechanism, source, username, password, options, database) - return None - - -def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: - """Parse read preference options.""" - if "read_preference" in options: - return options["read_preference"] - - name = options.get("readpreference", "primary") - mode = read_pref_mode_from_name(name) - tags = options.get("readpreferencetags") - max_staleness = options.get("maxstalenessseconds", -1) - return make_read_preference(mode, tags, max_staleness) - - -def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: - """Parse write concern options.""" - concern = options.get("w") - wtimeout = options.get("wtimeoutms") - j = options.get("journal") - fsync = options.get("fsync") - return WriteConcern(concern, wtimeout, j, fsync) - - -def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: - """Parse read concern options.""" - concern = options.get("readconcernlevel") - return ReadConcern(concern) - - -def _parse_ssl_options(options: Mapping[str, Any]) -> tuple[Optional[SSLContext], bool]: - """Parse ssl options.""" - use_tls = options.get("tls") - if use_tls is not None: - validate_boolean("tls", use_tls) - - certfile = options.get("tlscertificatekeyfile") - passphrase = options.get("tlscertificatekeyfilepassword") - ca_certs = options.get("tlscafile") - crlfile = options.get("tlscrlfile") - allow_invalid_certificates = options.get("tlsallowinvalidcertificates", False) - allow_invalid_hostnames = options.get("tlsallowinvalidhostnames", False) - disable_ocsp_endpoint_check = options.get("tlsdisableocspendpointcheck", False) - - enabled_tls_opts = [] - for opt in ( - "tlscertificatekeyfile", - "tlscertificatekeyfilepassword", - "tlscafile", - "tlscrlfile", - ): - # Any non-null value of these options implies tls=True. - if opt in options and options[opt]: - enabled_tls_opts.append(opt) - for opt in ( - "tlsallowinvalidcertificates", - "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck", - ): - # A value of False for these options implies tls=True. - if opt in options and not options[opt]: - enabled_tls_opts.append(opt) - - if enabled_tls_opts: - if use_tls is None: - # Implicitly enable TLS when one of the tls* options is set. - use_tls = True - elif not use_tls: - # Error since tls is explicitly disabled but a tls option is set. - raise ConfigurationError( - "TLS has not been enabled but the " - "following tls parameters have been set: " - "%s. Please set `tls=True` or remove." % ", ".join(enabled_tls_opts) - ) - - if use_tls: - ctx = get_ssl_context( - certfile, - passphrase, - ca_certs, - crlfile, - allow_invalid_certificates, - allow_invalid_hostnames, - disable_ocsp_endpoint_check, - ) - return ctx, allow_invalid_hostnames - return None, allow_invalid_hostnames - - -def _parse_pool_options( - username: str, password: str, database: Optional[str], options: Mapping[str, Any] -) -> PoolOptions: - """Parse connection pool options.""" - credentials = _parse_credentials(username, password, database, options) - max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) - min_pool_size = options.get("minpoolsize", common.MIN_POOL_SIZE) - max_idle_time_seconds = options.get("maxidletimems", common.MAX_IDLE_TIME_SEC) - if max_pool_size is not None and min_pool_size > max_pool_size: - raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") - connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) - socket_timeout = options.get("sockettimeoutms") - wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) - event_listeners = cast(Optional[Sequence[_EventListener]], options.get("event_listeners")) - appname = options.get("appname") - driver = options.get("driver") - server_api = options.get("server_api") - compression_settings = CompressionSettings( - options.get("compressors", []), options.get("zlibcompressionlevel", -1) - ) - ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) - load_balanced = options.get("loadbalanced") - max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) - return PoolOptions( - max_pool_size, - min_pool_size, - max_idle_time_seconds, - connect_timeout, - socket_timeout, - wait_queue_timeout, - ssl_context, - tls_allow_invalid_hostnames, - _EventListeners(event_listeners), - appname, - driver, - compression_settings, - max_connecting=max_connecting, - server_api=server_api, - load_balanced=load_balanced, - credentials=credentials, - ) - - -class ClientOptions: - """Read only configuration options for a MongoClient. - - Should not be instantiated directly by application developers. Access - a client's options via :attr:`pymongo.mongo_client.MongoClient.options` - instead. - """ - - def __init__( - self, username: str, password: str, database: Optional[str], options: Mapping[str, Any] - ): - self.__options = options - self.__codec_options = _parse_codec_options(options) - self.__direct_connection = options.get("directconnection") - self.__local_threshold_ms = options.get("localthresholdms", common.LOCAL_THRESHOLD_MS) - # self.__server_selection_timeout is in seconds. Must use full name for - # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. - self.__server_selection_timeout = options.get( - "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT - ) - self.__pool_options = _parse_pool_options(username, password, database, options) - self.__read_preference = _parse_read_preference(options) - self.__replica_set_name = options.get("replicaset") - self.__write_concern = _parse_write_concern(options) - self.__read_concern = _parse_read_concern(options) - self.__connect = options.get("connect") - self.__heartbeat_frequency = options.get("heartbeatfrequencyms", common.HEARTBEAT_FREQUENCY) - self.__retry_writes = options.get("retrywrites", common.RETRY_WRITES) - self.__retry_reads = options.get("retryreads", common.RETRY_READS) - self.__server_selector = options.get("server_selector", any_server_selector) - self.__auto_encryption_opts = options.get("auto_encryption_opts") - self.__load_balanced = options.get("loadbalanced") - self.__timeout = options.get("timeoutms") - self.__server_monitoring_mode = options.get( - "servermonitoringmode", common.SERVER_MONITORING_MODE - ) - - @property - def _options(self) -> Mapping[str, Any]: - """The original options used to create this ClientOptions.""" - return self.__options - - @property - def connect(self) -> Optional[bool]: - """Whether to begin discovering a MongoDB topology automatically.""" - return self.__connect - - @property - def codec_options(self) -> CodecOptions: - """A :class:`~bson.codec_options.CodecOptions` instance.""" - return self.__codec_options - - @property - def direct_connection(self) -> Optional[bool]: - """Whether to connect to the deployment in 'Single' topology.""" - return self.__direct_connection - - @property - def local_threshold_ms(self) -> int: - """The local threshold for this instance.""" - return self.__local_threshold_ms - - @property - def server_selection_timeout(self) -> int: - """The server selection timeout for this instance in seconds.""" - return self.__server_selection_timeout - - @property - def server_selector(self) -> _ServerSelector: - return self.__server_selector - - @property - def heartbeat_frequency(self) -> int: - """The monitoring frequency in seconds.""" - return self.__heartbeat_frequency - - @property - def pool_options(self) -> PoolOptions: - """A :class:`~pymongo.pool.PoolOptions` instance.""" - return self.__pool_options - - @property - def read_preference(self) -> _ServerMode: - """A read preference instance.""" - return self.__read_preference - - @property - def replica_set_name(self) -> Optional[str]: - """Replica set name or None.""" - return self.__replica_set_name - - @property - def write_concern(self) -> WriteConcern: - """A :class:`~pymongo.write_concern.WriteConcern` instance.""" - return self.__write_concern - - @property - def read_concern(self) -> ReadConcern: - """A :class:`~pymongo.read_concern.ReadConcern` instance.""" - return self.__read_concern - - @property - def timeout(self) -> Optional[float]: - """The configured timeoutMS converted to seconds, or None. - - .. versionadded:: 4.2 - """ - return self.__timeout - - @property - def retry_writes(self) -> bool: - """If this instance should retry supported write operations.""" - return self.__retry_writes - - @property - def retry_reads(self) -> bool: - """If this instance should retry supported read operations.""" - return self.__retry_reads - - @property - def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: - """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" - return self.__auto_encryption_opts - - @property - def load_balanced(self) -> Optional[bool]: - """True if the client was configured to connect to a load balancer.""" - return self.__load_balanced - - @property - def event_listeners(self) -> list[_EventListeners]: - """The event listeners registered for this client. - - See :mod:`~pymongo.monitoring` for details. - - .. versionadded:: 4.0 - """ - assert self.__pool_options._event_listeners is not None - return self.__pool_options._event_listeners.event_listeners() - - @property - def server_monitoring_mode(self) -> str: - """The configured serverMonitoringMode option. - - .. versionadded:: 4.5 - """ - return self.__server_monitoring_mode +__doc__ = original_doc diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 3efc624c04..0597e8986c 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -1,4 +1,4 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,1144 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Logical sessions for ordering sequential operations. - -.. versionadded:: 3.6 - -Causally Consistent Reads -========================= - -.. code-block:: python - - with client.start_session(causal_consistency=True) as session: - collection = client.db.collection - collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) - secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) - - # A secondary read waits for replication of the write. - secondary_c.find_one({"_id": 1}, session=session) - -If `causal_consistency` is True (the default), read operations that use -the session are causally after previous read and write operations. Using a -causally consistent session, an application can read its own writes and is -guaranteed monotonic reads, even when reading from replica set secondaries. - -.. seealso:: The MongoDB documentation on `causal-consistency `_. - -.. _transactions-ref: - -Transactions -============ - -.. versionadded:: 3.7 - -MongoDB 4.0 adds support for transactions on replica set primaries. A -transaction is associated with a :class:`ClientSession`. To start a transaction -on a session, use :meth:`ClientSession.start_transaction` in a with-statement. -Then, execute an operation within the transaction by passing the session to the -operation: - -.. code-block:: python - - orders = client.db.orders - inventory = client.db.inventory - with client.start_session() as session: - with session.start_transaction(): - orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one( - {"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, - session=session, - ) - -Upon normal completion of ``with session.start_transaction()`` block, the -transaction automatically calls :meth:`ClientSession.commit_transaction`. -If the block exits with an exception, the transaction automatically calls -:meth:`ClientSession.abort_transaction`. - -In general, multi-document transactions only support read/write (CRUD) -operations on existing collections. However, MongoDB 4.4 adds support for -creating collections and indexes with some limitations, including an -insert operation that would result in the creation of a new collection. -For a complete description of all the supported and unsupported operations -see the `MongoDB server's documentation for transactions -`_. - -A session may only have a single active transaction at a time, multiple -transactions on the same session can be executed in sequence. - -Sharded Transactions -^^^^^^^^^^^^^^^^^^^^ - -.. versionadded:: 3.9 - -PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB ->=4.2. Sharded transactions have the same API as replica set transactions. -When running a transaction against a sharded cluster, the session is -pinned to the mongos server selected for the first operation in the -transaction. All subsequent operations that are part of the same transaction -are routed to the same mongos server. When the transaction is completed, by -running either commitTransaction or abortTransaction, the session is unpinned. - -.. seealso:: The MongoDB documentation on `transactions `_. - -.. _snapshot-reads-ref: - -Snapshot Reads -============== - -.. versionadded:: 3.12 - -MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by -passing the ``snapshot`` option to -:meth:`~pymongo.mongo_client.MongoClient.start_session`. -If ``snapshot`` is True, all read operations that use this session read data -from the same snapshot timestamp. The server chooses the latest -majority-committed snapshot timestamp when executing the first read operation -using the session. Subsequent reads on this session read from the same -snapshot timestamp. Snapshot reads are also supported when reading from -replica set secondaries. - -.. code-block:: python - - # Each read using this session reads data from the same point in time. - with client.start_session(snapshot=True) as session: - order = orders.find_one({"sku": "abc123"}, session=session) - inventory = inventory.find_one({"sku": "abc123"}, session=session) - -Snapshot Reads Limitations -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Snapshot reads sessions are incompatible with ``causal_consistency=True``. -Only the following read operations are supported in a snapshot reads session: - -- :meth:`~pymongo.collection.Collection.find` -- :meth:`~pymongo.collection.Collection.find_one` -- :meth:`~pymongo.collection.Collection.aggregate` -- :meth:`~pymongo.collection.Collection.count_documents` -- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) - -Classes -======= -""" - +"""Re-import of synchronous ClientSession API for compatibility.""" from __future__ import annotations -import collections -import time -import uuid -from collections.abc import Mapping as _Mapping -from typing import ( - TYPE_CHECKING, - Any, - Callable, - ContextManager, - Mapping, - MutableMapping, - NoReturn, - Optional, - Type, - TypeVar, -) - -from bson.binary import Binary -from bson.int64 import Int64 -from bson.timestamp import Timestamp -from pymongo import _csot -from pymongo.cursor import _ConnectionManager -from pymongo.errors import ( - ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure, - PyMongoError, - WTimeoutError, -) -from pymongo.helpers import _RETRYABLE_ERROR_CODES -from pymongo.operations import _Op -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.server_type import SERVER_TYPE -from pymongo.write_concern import WriteConcern - -if TYPE_CHECKING: - from types import TracebackType - - from pymongo.pool import Connection - from pymongo.server import Server - from pymongo.typings import ClusterTime, _Address - - -class SessionOptions: - """Options for a new :class:`ClientSession`. - - :param causal_consistency: If True, read operations are causally - ordered within the session. Defaults to True when the ``snapshot`` - option is ``False``. - :param default_transaction_options: The default - TransactionOptions to use for transactions started on this session. - :param snapshot: If True, then all reads performed using this - session will read from the same snapshot. This option is incompatible - with ``causal_consistency=True``. Defaults to ``False``. - - .. versionchanged:: 3.12 - Added the ``snapshot`` parameter. - """ - - def __init__( - self, - causal_consistency: Optional[bool] = None, - default_transaction_options: Optional[TransactionOptions] = None, - snapshot: Optional[bool] = False, - ) -> None: - if snapshot: - if causal_consistency: - raise ConfigurationError("snapshot reads do not support causal_consistency=True") - causal_consistency = False - elif causal_consistency is None: - causal_consistency = True - self._causal_consistency = causal_consistency - if default_transaction_options is not None: - if not isinstance(default_transaction_options, TransactionOptions): - raise TypeError( - "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: {!r}".format( - default_transaction_options - ) - ) - self._default_transaction_options = default_transaction_options - self._snapshot = snapshot - - @property - def causal_consistency(self) -> bool: - """Whether causal consistency is configured.""" - return self._causal_consistency - - @property - def default_transaction_options(self) -> Optional[TransactionOptions]: - """The default TransactionOptions to use for transactions started on - this session. - - .. versionadded:: 3.7 - """ - return self._default_transaction_options - - @property - def snapshot(self) -> Optional[bool]: - """Whether snapshot reads are configured. - - .. versionadded:: 3.12 - """ - return self._snapshot - - -class TransactionOptions: - """Options for :meth:`ClientSession.start_transaction`. - - :param read_concern: The - :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. - If ``None`` (the default) the :attr:`read_preference` of - the :class:`MongoClient` is used. - :param write_concern: The - :class:`~pymongo.write_concern.WriteConcern` to use for this - transaction. If ``None`` (the default) the :attr:`read_preference` of - the :class:`MongoClient` is used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. Transactions which read must use - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - :param max_commit_time_ms: The maximum amount of time to allow a - single commitTransaction command to run. This option is an alias for - maxTimeMS option on the commitTransaction command. If ``None`` (the - default) maxTimeMS is not used. - - .. versionchanged:: 3.9 - Added the ``max_commit_time_ms`` option. - - .. versionadded:: 3.7 - """ - - def __init__( - self, - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None, - ) -> None: - self._read_concern = read_concern - self._write_concern = write_concern - self._read_preference = read_preference - self._max_commit_time_ms = max_commit_time_ms - if read_concern is not None: - if not isinstance(read_concern, ReadConcern): - raise TypeError( - "read_concern must be an instance of " - f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" - ) - if write_concern is not None: - if not isinstance(write_concern, WriteConcern): - raise TypeError( - "write_concern must be an instance of " - f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" - ) - if not write_concern.acknowledged: - raise ConfigurationError( - "transactions do not support unacknowledged write concern" - f": {write_concern!r}" - ) - if read_preference is not None: - if not isinstance(read_preference, _ServerMode): - raise TypeError( - f"{read_preference!r} is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." - ) - if max_commit_time_ms is not None: - if not isinstance(max_commit_time_ms, int): - raise TypeError("max_commit_time_ms must be an integer or None") - - @property - def read_concern(self) -> Optional[ReadConcern]: - """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" - return self._read_concern - - @property - def write_concern(self) -> Optional[WriteConcern]: - """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" - return self._write_concern - - @property - def read_preference(self) -> Optional[_ServerMode]: - """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" - return self._read_preference - - @property - def max_commit_time_ms(self) -> Optional[int]: - """The maxTimeMS to use when running a commitTransaction command. - - .. versionadded:: 3.9 - """ - return self._max_commit_time_ms - - -def _validate_session_write_concern( - session: Optional[ClientSession], write_concern: Optional[WriteConcern] -) -> Optional[ClientSession]: - """Validate that an explicit session is not used with an unack'ed write. - - Returns the session to use for the next operation. - """ - if session: - if write_concern is not None and not write_concern.acknowledged: - # For unacknowledged writes without an explicit session, - # drivers SHOULD NOT use an implicit session. If a driver - # creates an implicit session for unacknowledged writes - # without an explicit session, the driver MUST NOT send the - # session ID. - if session._implicit: - return None - else: - raise ConfigurationError( - "Explicit sessions are incompatible with " - f"unacknowledged write concern: {write_concern!r}" - ) - return session - - -class _TransactionContext: - """Internal transaction context manager for start_transaction.""" - - def __init__(self, session: ClientSession): - self.__session = session - - def __enter__(self) -> _TransactionContext: - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - if self.__session.in_transaction: - if exc_val is None: - self.__session.commit_transaction() - else: - self.__session.abort_transaction() - - -class _TxnState: - NONE = 1 - STARTING = 2 - IN_PROGRESS = 3 - COMMITTED = 4 - COMMITTED_EMPTY = 5 - ABORTED = 6 - - -class _Transaction: - """Internal class to hold transaction information in a ClientSession.""" - - def __init__(self, opts: Optional[TransactionOptions], client: MongoClient): - self.opts = opts - self.state = _TxnState.NONE - self.sharded = False - self.pinned_address: Optional[_Address] = None - self.conn_mgr: Optional[_ConnectionManager] = None - self.recovery_token = None - self.attempt = 0 - self.client = client - - def active(self) -> bool: - return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) - - def starting(self) -> bool: - return self.state == _TxnState.STARTING - - @property - def pinned_conn(self) -> Optional[Connection]: - if self.active() and self.conn_mgr: - return self.conn_mgr.conn - return None - - def pin(self, server: Server, conn: Connection) -> None: - self.sharded = True - self.pinned_address = server.description.address - if server.description.server_type == SERVER_TYPE.LoadBalancer: - conn.pin_txn() - self.conn_mgr = _ConnectionManager(conn, False) - - def unpin(self) -> None: - self.pinned_address = None - if self.conn_mgr: - self.conn_mgr.close() - self.conn_mgr = None - - def reset(self) -> None: - self.unpin() - self.state = _TxnState.NONE - self.sharded = False - self.recovery_token = None - self.attempt = 0 - - def __del__(self) -> None: - if self.conn_mgr: - # Reuse the cursor closing machinery to return the socket to the - # pool soon. - self.client._close_cursor_soon(0, None, self.conn_mgr) - self.conn_mgr = None - - -def _reraise_with_unknown_commit(exc: Any) -> NoReturn: - """Re-raise an exception with the UnknownTransactionCommitResult label.""" - exc._add_error_label("UnknownTransactionCommitResult") - raise - - -def _max_time_expired_error(exc: PyMongoError) -> bool: - """Return true if exc is a MaxTimeMSExpired error.""" - return isinstance(exc, OperationFailure) and exc.code == 50 - - -# From the transactions spec, all the retryable writes errors plus -# WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( - [ - 64, # WriteConcernFailed - 50, # MaxTimeMSExpired - ] -) - -# From the Convenient API for Transactions spec, with_transaction must -# halt retries after 120 seconds. -# This limit is non-configurable and was chosen to be twice the 60 second -# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. -_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 - - -def _within_time_limit(start_time: float) -> bool: - """Are we within the with_transaction retry limit?""" - return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT - - -_T = TypeVar("_T") - -if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient - - -class ClientSession: - """A session for ordering sequential operations. - - :class:`ClientSession` instances are **not thread-safe or fork-safe**. - They can only be used by one thread or process at a time. A single - :class:`ClientSession` cannot be used to run multiple operations - concurrently. - - Should not be initialized directly by application developers - to create a - :class:`ClientSession`, call - :meth:`~pymongo.mongo_client.MongoClient.start_session`. - """ - - def __init__( - self, - client: MongoClient, - server_session: Any, - options: SessionOptions, - implicit: bool, - ) -> None: - # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client: MongoClient = client - self._server_session = server_session - self._options = options - self._cluster_time: Optional[Mapping[str, Any]] = None - self._operation_time: Optional[Timestamp] = None - self._snapshot_time = None - # Is this an implicitly created session? - self._implicit = implicit - self._transaction = _Transaction(None, client) - - def end_session(self) -> None: - """Finish this session. If a transaction has started, abort it. - - It is an error to use the session after the session has ended. - """ - self._end_session(lock=True) - - def _end_session(self, lock: bool) -> None: - if self._server_session is not None: - try: - if self.in_transaction: - self.abort_transaction() - # It's possible we're still pinned here when the transaction - # is in the committed state when the session is discarded. - self._unpin() - finally: - self._client._return_server_session(self._server_session, lock) - self._server_session = None - - def _check_ended(self) -> None: - if self._server_session is None: - raise InvalidOperation("Cannot use ended session") - - def __enter__(self) -> ClientSession: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self._end_session(lock=True) - - @property - def client(self) -> MongoClient: - """The :class:`~pymongo.mongo_client.MongoClient` this session was - created from. - """ - return self._client - - @property - def options(self) -> SessionOptions: - """The :class:`SessionOptions` this session was created with.""" - return self._options - - @property - def session_id(self) -> Mapping[str, Any]: - """A BSON document, the opaque server session identifier.""" - self._check_ended() - self._materialize(self._client.topology_description.logical_session_timeout_minutes) - return self._server_session.session_id - - @property - def _transaction_id(self) -> Int64: - """The current transaction id for the underlying server session.""" - self._materialize(self._client.topology_description.logical_session_timeout_minutes) - return self._server_session.transaction_id - - @property - def cluster_time(self) -> Optional[ClusterTime]: - """The cluster time returned by the last operation executed - in this session. - """ - return self._cluster_time - - @property - def operation_time(self) -> Optional[Timestamp]: - """The operation time returned by the last operation executed - in this session. - """ - return self._operation_time - - def _inherit_option(self, name: str, val: _T) -> _T: - """Return the inherited TransactionOption value.""" - if val: - return val - txn_opts = self.options.default_transaction_options - parent_val = txn_opts and getattr(txn_opts, name) - if parent_val: - return parent_val - return getattr(self.client, name) - - def with_transaction( - self, - callback: Callable[[ClientSession], _T], - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None, - ) -> _T: - """Execute a callback in a transaction. - - This method starts a transaction on this session, executes ``callback`` - once, and then commits the transaction. For example:: - - def callback(session): - orders = session.client.db.orders - inventory = session.client.db.inventory - orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, session=session) - - with client.start_session() as session: - session.with_transaction(callback) - - To pass arbitrary arguments to the ``callback``, wrap your callable - with a ``lambda`` like this:: - - def callback(session, custom_arg, custom_kwarg=None): - # Transaction operations... - - with client.start_session() as session: - session.with_transaction( - lambda s: callback(s, "custom_arg", custom_kwarg=1)) - - In the event of an exception, ``with_transaction`` may retry the commit - or the entire transaction, therefore ``callback`` may be invoked - multiple times by a single call to ``with_transaction``. Developers - should be mindful of this possibility when writing a ``callback`` that - modifies application state or has any other side-effects. - Note that even when the ``callback`` is invoked multiple times, - ``with_transaction`` ensures that the transaction will be committed - at-most-once on the server. - - The ``callback`` should not attempt to start new transactions, but - should simply run operations meant to be contained within a - transaction. The ``callback`` should also not commit the transaction; - this is handled automatically by ``with_transaction``. If the - ``callback`` does commit or abort the transaction without error, - however, ``with_transaction`` will return without taking further - action. - - :class:`ClientSession` instances are **not thread-safe or fork-safe**. - Consequently, the ``callback`` must not attempt to execute multiple - operations concurrently. - - When ``callback`` raises an exception, ``with_transaction`` - automatically aborts the current transaction. When ``callback`` or - :meth:`~ClientSession.commit_transaction` raises an exception that - includes the ``"TransientTransactionError"`` error label, - ``with_transaction`` starts a new transaction and re-executes - the ``callback``. - - When :meth:`~ClientSession.commit_transaction` raises an exception with - the ``"UnknownTransactionCommitResult"`` error label, - ``with_transaction`` retries the commit until the result of the - transaction is known. - - This method will cease retrying after 120 seconds has elapsed. This - timeout is not configurable and any exception raised by the - ``callback`` or by :meth:`ClientSession.commit_transaction` after the - timeout is reached will be re-raised. Applications that desire a - different timeout duration should not use this method. - - :param callback: The callable ``callback`` to run inside a transaction. - The callable must accept a single argument, this session. Note, - under certain error conditions the callback may be run multiple - times. - :param read_concern: The - :class:`~pymongo.read_concern.ReadConcern` to use for this - transaction. - :param write_concern: The - :class:`~pymongo.write_concern.WriteConcern` to use for this - transaction. - :param read_preference: The read preference to use for this - transaction. If ``None`` (the default) the :attr:`read_preference` - of this :class:`Database` is used. See - :mod:`~pymongo.read_preferences` for options. - - :return: The return value of the ``callback``. - - .. versionadded:: 3.9 - """ - start_time = time.monotonic() - while True: - self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) - try: - ret = callback(self) - except Exception as exc: - if self.in_transaction: - self.abort_transaction() - if ( - isinstance(exc, PyMongoError) - and exc.has_error_label("TransientTransactionError") - and _within_time_limit(start_time) - ): - # Retry the entire transaction. - continue - raise - - if not self.in_transaction: - # Assume callback intentionally ended the transaction. - return ret - - while True: - try: - self.commit_transaction() - except PyMongoError as exc: - if ( - exc.has_error_label("UnknownTransactionCommitResult") - and _within_time_limit(start_time) - and not _max_time_expired_error(exc) - ): - # Retry the commit. - continue - - if exc.has_error_label("TransientTransactionError") and _within_time_limit( - start_time - ): - # Retry the entire transaction. - break - raise - - # Commit succeeded. - return ret - - def start_transaction( - self, - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None, - ) -> ContextManager: - """Start a multi-statement transaction. - - Takes the same arguments as :class:`TransactionOptions`. - - .. versionchanged:: 3.9 - Added the ``max_commit_time_ms`` option. - - .. versionadded:: 3.7 - """ - self._check_ended() - - if self.options.snapshot: - raise InvalidOperation("Transactions are not supported in snapshot sessions") - - if self.in_transaction: - raise InvalidOperation("Transaction already in progress") - - read_concern = self._inherit_option("read_concern", read_concern) - write_concern = self._inherit_option("write_concern", write_concern) - read_preference = self._inherit_option("read_preference", read_preference) - if max_commit_time_ms is None: - opts = self.options.default_transaction_options - if opts: - max_commit_time_ms = opts.max_commit_time_ms - - self._transaction.opts = TransactionOptions( - read_concern, write_concern, read_preference, max_commit_time_ms - ) - self._transaction.reset() - self._transaction.state = _TxnState.STARTING - self._start_retryable_write() - return _TransactionContext(self) - - def commit_transaction(self) -> None: - """Commit a multi-statement transaction. - - .. versionadded:: 3.7 - """ - self._check_ended() - state = self._transaction.state - if state is _TxnState.NONE: - raise InvalidOperation("No transaction started") - elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): - # Server transaction was never started, no need to send a command. - self._transaction.state = _TxnState.COMMITTED_EMPTY - return - elif state is _TxnState.ABORTED: - raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") - elif state is _TxnState.COMMITTED: - # We're explicitly retrying the commit, move the state back to - # "in progress" so that in_transaction returns true. - self._transaction.state = _TxnState.IN_PROGRESS - - try: - self._finish_transaction_with_retry("commitTransaction") - except ConnectionFailure as exc: - # We do not know if the commit was successfully applied on the - # server or if it satisfied the provided write concern, set the - # unknown commit error label. - exc._remove_error_label("TransientTransactionError") - _reraise_with_unknown_commit(exc) - except WTimeoutError as exc: - # We do not know if the commit has satisfied the provided write - # concern, add the unknown commit error label. - _reraise_with_unknown_commit(exc) - except OperationFailure as exc: - if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: - # The server reports errorLabels in the case. - raise - # We do not know if the commit was successfully applied on the - # server or if it satisfied the provided write concern, set the - # unknown commit error label. - _reraise_with_unknown_commit(exc) - finally: - self._transaction.state = _TxnState.COMMITTED - - def abort_transaction(self) -> None: - """Abort a multi-statement transaction. - - .. versionadded:: 3.7 - """ - self._check_ended() - - state = self._transaction.state - if state is _TxnState.NONE: - raise InvalidOperation("No transaction started") - elif state is _TxnState.STARTING: - # Server transaction was never started, no need to send a command. - self._transaction.state = _TxnState.ABORTED - return - elif state is _TxnState.ABORTED: - raise InvalidOperation("Cannot call abortTransaction twice") - elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): - raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") - - try: - self._finish_transaction_with_retry("abortTransaction") - except (OperationFailure, ConnectionFailure): - # The transactions spec says to ignore abortTransaction errors. - pass - finally: - self._transaction.state = _TxnState.ABORTED - self._unpin() - - def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: - """Run commit or abort with one retry after any retryable error. - - :param command_name: Either "commitTransaction" or "abortTransaction". - """ - - def func( - _session: Optional[ClientSession], conn: Connection, _retryable: bool - ) -> dict[str, Any]: - return self._finish_transaction(conn, command_name) - - return self._client._retry_internal(func, self, None, retryable=True, operation=_Op.ABORT) - - def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: - self._transaction.attempt += 1 - opts = self._transaction.opts - assert opts - wc = opts.write_concern - cmd = {command_name: 1} - if command_name == "commitTransaction": - if opts.max_commit_time_ms and _csot.get_timeout() is None: - cmd["maxTimeMS"] = opts.max_commit_time_ms - - # Transaction spec says that after the initial commit attempt, - # subsequent commitTransaction commands should be upgraded to use - # w:"majority" and set a default value of 10 seconds for wtimeout. - if self._transaction.attempt > 1: - assert wc - wc_doc = wc.document - wc_doc["w"] = "majority" - wc_doc.setdefault("wtimeout", 10000) - wc = WriteConcern(**wc_doc) - - if self._transaction.recovery_token: - cmd["recoveryToken"] = self._transaction.recovery_token - - return self._client.admin._command( - conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True - ) - - def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: - """Internal cluster time helper.""" - if self._cluster_time is None: - self._cluster_time = cluster_time - elif cluster_time is not None: - if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: - self._cluster_time = cluster_time - - def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: - """Update the cluster time for this session. - - :param cluster_time: The - :data:`~pymongo.client_session.ClientSession.cluster_time` from - another `ClientSession` instance. - """ - if not isinstance(cluster_time, _Mapping): - raise TypeError("cluster_time must be a subclass of collections.Mapping") - if not isinstance(cluster_time.get("clusterTime"), Timestamp): - raise ValueError("Invalid cluster_time") - self._advance_cluster_time(cluster_time) - - def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: - """Internal operation time helper.""" - if self._operation_time is None: - self._operation_time = operation_time - elif operation_time is not None: - if operation_time > self._operation_time: - self._operation_time = operation_time - - def advance_operation_time(self, operation_time: Timestamp) -> None: - """Update the operation time for this session. - - :param operation_time: The - :data:`~pymongo.client_session.ClientSession.operation_time` from - another `ClientSession` instance. - """ - if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") - self._advance_operation_time(operation_time) - - def _process_response(self, reply: Mapping[str, Any]) -> None: - """Process a response to a command that was run with this session.""" - self._advance_cluster_time(reply.get("$clusterTime")) - self._advance_operation_time(reply.get("operationTime")) - if self._options.snapshot and self._snapshot_time is None: - if "cursor" in reply: - ct = reply["cursor"].get("atClusterTime") - else: - ct = reply.get("atClusterTime") - self._snapshot_time = ct - if self.in_transaction and self._transaction.sharded: - recovery_token = reply.get("recoveryToken") - if recovery_token: - self._transaction.recovery_token = recovery_token - - @property - def has_ended(self) -> bool: - """True if this session is finished.""" - return self._server_session is None - - @property - def in_transaction(self) -> bool: - """True if this session has an active multi-statement transaction. - - .. versionadded:: 3.10 - """ - return self._transaction.active() - - @property - def _starting_transaction(self) -> bool: - """True if this session is starting a multi-statement transaction.""" - return self._transaction.starting() - - @property - def _pinned_address(self) -> Optional[_Address]: - """The mongos address this transaction was created on.""" - if self._transaction.active(): - return self._transaction.pinned_address - return None - - @property - def _pinned_connection(self) -> Optional[Connection]: - """The connection this transaction was started on.""" - return self._transaction.pinned_conn - - def _pin(self, server: Server, conn: Connection) -> None: - """Pin this session to the given Server or to the given connection.""" - self._transaction.pin(server, conn) - - def _unpin(self) -> None: - """Unpin this session from any pinned Server.""" - self._transaction.unpin() - - def _txn_read_preference(self) -> Optional[_ServerMode]: - """Return read preference of this transaction or None.""" - if self.in_transaction: - assert self._transaction.opts - return self._transaction.opts.read_preference - return None - - def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: - if isinstance(self._server_session, _EmptyServerSession): - old = self._server_session - self._server_session = self._client._topology.get_server_session( - logical_session_timeout_minutes - ) - if old.started_retryable_write: - self._server_session.inc_transaction_id() - - def _apply_to( - self, - command: MutableMapping[str, Any], - is_retryable: bool, - read_preference: _ServerMode, - conn: Connection, - ) -> None: - if not conn.supports_sessions: - if not self._implicit: - raise ConfigurationError("Sessions are not supported by this MongoDB deployment") - return - self._check_ended() - self._materialize(conn.logical_session_timeout_minutes) - if self.options.snapshot: - self._update_read_concern(command, conn) - - self._server_session.last_use = time.monotonic() - command["lsid"] = self._server_session.session_id - - if is_retryable: - command["txnNumber"] = self._server_session.transaction_id - return - - if self.in_transaction: - if read_preference != ReadPreference.PRIMARY: - raise InvalidOperation( - f"read preference in a transaction must be primary, not: {read_preference!r}" - ) - - if self._transaction.state == _TxnState.STARTING: - # First command begins a new transaction. - self._transaction.state = _TxnState.IN_PROGRESS - command["startTransaction"] = True - - assert self._transaction.opts - if self._transaction.opts.read_concern: - rc = self._transaction.opts.read_concern.document - if rc: - command["readConcern"] = rc - self._update_read_concern(command, conn) - - command["txnNumber"] = self._server_session.transaction_id - command["autocommit"] = False - - def _start_retryable_write(self) -> None: - self._check_ended() - self._server_session.inc_transaction_id() - - def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: Connection) -> None: - if self.options.causal_consistency and self.operation_time is not None: - cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time - if self.options.snapshot: - if conn.max_wire_version < 13: - raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") - rc = cmd.setdefault("readConcern", {}) - rc["level"] = "snapshot" - if self._snapshot_time is not None: - rc["atClusterTime"] = self._snapshot_time - - def __copy__(self) -> NoReturn: - raise TypeError("A ClientSession cannot be copied, create a new session instead") - - -class _EmptyServerSession: - __slots__ = "dirty", "started_retryable_write" - - def __init__(self) -> None: - self.dirty = False - self.started_retryable_write = False - - def mark_dirty(self) -> None: - self.dirty = True - - def inc_transaction_id(self) -> None: - self.started_retryable_write = True - - -class _ServerSession: - def __init__(self, generation: int): - # Ensure id is type 4, regardless of CodecOptions.uuid_representation. - self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} - self.last_use = time.monotonic() - self._transaction_id = 0 - self.dirty = False - self.generation = generation - - def mark_dirty(self) -> None: - """Mark this session as dirty. - - A server session is marked dirty when a command fails with a network - error. Dirty sessions are later discarded from the server session pool. - """ - self.dirty = True - - def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: - if session_timeout_minutes is None: - return False - - idle_seconds = time.monotonic() - self.last_use - - # Timed out if we have less than a minute to live. - return idle_seconds > (session_timeout_minutes - 1) * 60 - - @property - def transaction_id(self) -> Int64: - """Positive 64-bit integer.""" - return Int64(self._transaction_id) - - def inc_transaction_id(self) -> None: - self._transaction_id += 1 - - -class _ServerSessionPool(collections.deque): - """Pool of _ServerSession objects. - - This class is not thread-safe, access it while holding the Topology lock. - """ - - def __init__(self, *args: Any, **kwargs: Any): - super().__init__(*args, **kwargs) - self.generation = 0 - - def reset(self) -> None: - self.generation += 1 - self.clear() - - def pop_all(self) -> list[_ServerSession]: - ids = [] - while self: - ids.append(self.pop().session_id) - return ids - - def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: - # Although the Driver Sessions Spec says we only clear stale sessions - # in return_server_session, PyMongo can't take a lock when returning - # sessions from a __del__ method (like in Cursor.__die), so it can't - # clear stale sessions there. In case many sessions were returned via - # __del__, check for stale sessions here too. - self._clear_stale(session_timeout_minutes) - - # The most recently used sessions are on the left. - while self: - s = self.popleft() - if not s.timed_out(session_timeout_minutes): - return s - - return _ServerSession(self.generation) - - def return_server_session( - self, server_session: _ServerSession, session_timeout_minutes: Optional[int] - ) -> None: - if session_timeout_minutes is not None: - self._clear_stale(session_timeout_minutes) - if server_session.timed_out(session_timeout_minutes): - return - self.return_server_session_no_lock(server_session) - - def return_server_session_no_lock(self, server_session: _ServerSession) -> None: - # Discard sessions from an old pool to avoid duplicate sessions in the - # child process after a fork. - if server_session.generation == self.generation and not server_session.dirty: - self.appendleft(server_session) +from pymongo.synchronous.client_session import * # noqa: F403 +from pymongo.synchronous.client_session import __doc__ as original_doc - def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: - # Clear stale sessions. The least recently used are on the right. - while self: - if self[-1].timed_out(session_timeout_minutes): - self.pop() - else: - # The remaining sessions also haven't timed out. - break +__doc__ = original_doc diff --git a/pymongo/collation.py b/pymongo/collation.py index 971628f4ec..b129a04512 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -1,4 +1,4 @@ -# Copyright 2016 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,213 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with `collations`_. - -.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ -""" +"""Re-import of synchronous Collation API for compatibility.""" from __future__ import annotations -from typing import Any, Mapping, Optional, Union - -from pymongo import common -from pymongo.write_concern import validate_boolean - - -class CollationStrength: - """ - An enum that defines values for `strength` on a - :class:`~pymongo.collation.Collation`. - """ - - PRIMARY = 1 - """Differentiate base (unadorned) characters.""" - - SECONDARY = 2 - """Differentiate character accents.""" - - TERTIARY = 3 - """Differentiate character case.""" - - QUATERNARY = 4 - """Differentiate words with and without punctuation.""" - - IDENTICAL = 5 - """Differentiate unicode code point (characters are exactly identical).""" - - -class CollationAlternate: - """ - An enum that defines values for `alternate` on a - :class:`~pymongo.collation.Collation`. - """ - - NON_IGNORABLE = "non-ignorable" - """Spaces and punctuation are treated as base characters.""" - - SHIFTED = "shifted" - """Spaces and punctuation are *not* considered base characters. - - Spaces and punctuation are distinguished regardless when the - :class:`~pymongo.collation.Collation` strength is at least - :data:`~pymongo.collation.CollationStrength.QUATERNARY`. - - """ - - -class CollationMaxVariable: - """ - An enum that defines values for `max_variable` on a - :class:`~pymongo.collation.Collation`. - """ - - PUNCT = "punct" - """Both punctuation and spaces are ignored.""" - - SPACE = "space" - """Spaces alone are ignored.""" - - -class CollationCaseFirst: - """ - An enum that defines values for `case_first` on a - :class:`~pymongo.collation.Collation`. - """ - - UPPER = "upper" - """Sort uppercase characters first.""" - - LOWER = "lower" - """Sort lowercase characters first.""" - - OFF = "off" - """Default for locale or collation strength.""" - - -class Collation: - """Collation - - :param locale: (string) The locale of the collation. This should be a string - that identifies an `ICU locale ID` exactly. For example, ``en_US`` is - valid, but ``en_us`` and ``en-US`` are not. Consult the MongoDB - documentation for a list of supported locales. - :param caseLevel: (optional) If ``True``, turn on case sensitivity if - `strength` is 1 or 2 (case sensitivity is implied if `strength` is - greater than 2). Defaults to ``False``. - :param caseFirst: (optional) Specify that either uppercase or lowercase - characters take precedence. Must be one of the following values: - - * :data:`~CollationCaseFirst.UPPER` - * :data:`~CollationCaseFirst.LOWER` - * :data:`~CollationCaseFirst.OFF` (the default) - - :param strength: Specify the comparison strength. This is also - known as the ICU comparison level. This must be one of the following - values: - - * :data:`~CollationStrength.PRIMARY` - * :data:`~CollationStrength.SECONDARY` - * :data:`~CollationStrength.TERTIARY` (the default) - * :data:`~CollationStrength.QUATERNARY` - * :data:`~CollationStrength.IDENTICAL` - - Each successive level builds upon the previous. For example, a - `strength` of :data:`~CollationStrength.SECONDARY` differentiates - characters based both on the unadorned base character and its accents. - - :param numericOrdering: If ``True``, order numbers numerically - instead of in collation order (defaults to ``False``). - :param alternate: Specify whether spaces and punctuation are - considered base characters. This must be one of the following values: - - * :data:`~CollationAlternate.NON_IGNORABLE` (the default) - * :data:`~CollationAlternate.SHIFTED` - - :param maxVariable: When `alternate` is - :data:`~CollationAlternate.SHIFTED`, this option specifies what - characters may be ignored. This must be one of the following values: - - * :data:`~CollationMaxVariable.PUNCT` (the default) - * :data:`~CollationMaxVariable.SPACE` - - :param normalization: If ``True``, normalizes text into Unicode - NFD. Defaults to ``False``. - :param backwards: If ``True``, accents on characters are - considered from the back of the word to the front, as it is done in some - French dictionary ordering traditions. Defaults to ``False``. - :param kwargs: Keyword arguments supplying any additional options - to be sent with this Collation object. - - .. versionadded: 3.4 - - """ - - __slots__ = ("__document",) - - def __init__( - self, - locale: str, - caseLevel: Optional[bool] = None, - caseFirst: Optional[str] = None, - strength: Optional[int] = None, - numericOrdering: Optional[bool] = None, - alternate: Optional[str] = None, - maxVariable: Optional[str] = None, - normalization: Optional[bool] = None, - backwards: Optional[bool] = None, - **kwargs: Any, - ) -> None: - locale = common.validate_string("locale", locale) - self.__document: dict[str, Any] = {"locale": locale} - if caseLevel is not None: - self.__document["caseLevel"] = validate_boolean("caseLevel", caseLevel) - if caseFirst is not None: - self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) - if strength is not None: - self.__document["strength"] = common.validate_integer("strength", strength) - if numericOrdering is not None: - self.__document["numericOrdering"] = validate_boolean( - "numericOrdering", numericOrdering - ) - if alternate is not None: - self.__document["alternate"] = common.validate_string("alternate", alternate) - if maxVariable is not None: - self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) - if normalization is not None: - self.__document["normalization"] = validate_boolean("normalization", normalization) - if backwards is not None: - self.__document["backwards"] = validate_boolean("backwards", backwards) - self.__document.update(kwargs) - - @property - def document(self) -> dict[str, Any]: - """The document representation of this collation. - - .. note:: - :class:`Collation` is immutable. Mutating the value of - :attr:`document` does not mutate this :class:`Collation`. - """ - return self.__document.copy() - - def __repr__(self) -> str: - document = self.document - return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Collation): - return self.document == other.document - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - +from pymongo.synchronous.collation import * # noqa: F403 +from pymongo.synchronous.collation import __doc__ as original_doc -def validate_collation_or_none( - value: Optional[Union[Mapping[str, Any], Collation]] -) -> Optional[dict[str, Any]]: - if value is None: - return None - if isinstance(value, Collation): - return value.document - if isinstance(value, dict): - return value - raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") +__doc__ = original_doc diff --git a/pymongo/collection.py b/pymongo/collection.py index ddfe9f1df8..c7427f9b6e 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1,4 +1,4 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,3472 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Collection level utilities for Mongo.""" +"""Re-import of synchronous Collection API for compatibility.""" from __future__ import annotations -from collections import abc -from typing import ( - TYPE_CHECKING, - Any, - Callable, - ContextManager, - Generic, - Iterable, - Iterator, - Mapping, - MutableMapping, - NoReturn, - Optional, - Sequence, - Type, - TypeVar, - Union, - cast, -) +from pymongo.synchronous.collection import * # noqa: F403 +from pymongo.synchronous.collection import __doc__ as original_doc -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions -from bson.objectid import ObjectId -from bson.raw_bson import RawBSONDocument -from bson.son import SON -from bson.timestamp import Timestamp -from pymongo import ASCENDING, _csot, common, helpers, message -from pymongo.aggregation import ( - _CollectionAggregationCommand, - _CollectionRawAggregationCommand, -) -from pymongo.bulk import _Bulk -from pymongo.change_stream import CollectionChangeStream -from pymongo.collation import validate_collation_or_none -from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.common import _ecoc_coll_name, _esc_coll_name -from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import ( - ConfigurationError, - InvalidName, - InvalidOperation, - OperationFailure, -) -from pymongo.helpers import _check_write_command_response -from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import ( - DeleteMany, - DeleteOne, - IndexModel, - InsertOne, - ReplaceOne, - SearchIndexModel, - UpdateMany, - UpdateOne, - _IndexKeyHint, - _IndexList, - _Op, -) -from pymongo.read_concern import DEFAULT_READ_CONCERN, ReadConcern -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.results import ( - BulkWriteResult, - DeleteResult, - InsertManyResult, - InsertOneResult, - UpdateResult, -) -from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean - -T = TypeVar("T") - -_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} - - -_WriteOp = Union[ - InsertOne[_DocumentType], - DeleteOne, - DeleteMany, - ReplaceOne[_DocumentType], - UpdateOne, - UpdateMany, -] - - -class ReturnDocument: - """An enum used with - :meth:`~pymongo.collection.Collection.find_one_and_replace` and - :meth:`~pymongo.collection.Collection.find_one_and_update`. - """ - - BEFORE = False - """Return the original document before it was updated/replaced, or - ``None`` if no document matches the query. - """ - AFTER = True - """Return the updated/replaced or inserted document.""" - - -if TYPE_CHECKING: - from pymongo.aggregation import _AggregationCommand - from pymongo.client_session import ClientSession - from pymongo.collation import Collation - from pymongo.database import Database - from pymongo.pool import Connection - from pymongo.server import Server - - -class Collection(common.BaseObject, Generic[_DocumentType]): - """A Mongo collection.""" - - def __init__( - self, - database: Database[_DocumentType], - name: str, - create: Optional[bool] = False, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - session: Optional[ClientSession] = None, - **kwargs: Any, - ) -> None: - """Get / create a Mongo collection. - - Raises :class:`TypeError` if `name` is not an instance of - :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is - not a valid collection name. Any additional keyword arguments will be used - as options passed to the create command. See - :meth:`~pymongo.database.Database.create_collection` for valid - options. - - If `create` is ``True``, `collation` is specified, or any additional - keyword arguments are present, a ``create`` command will be - sent, using ``session`` if specified. Otherwise, a ``create`` command - will not be sent and the collection will be created implicitly on first - use. The optional ``session`` argument is *only* used for the ``create`` - command, it is not associated with the collection afterward. - - :param database: the database to get a collection from - :param name: the name of the collection to get - :param create: if ``True``, force collection - creation even without options being set - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) database.codec_options is used. - :param read_preference: The read preference to use. If - ``None`` (the default) database.read_preference is used. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) database.write_concern is used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) database.read_concern is used. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. If a collation is provided, - it will be passed to the create collection command. - :param session: a - :class:`~pymongo.client_session.ClientSession` that is used with - the create collection command - :param kwargs: additional keyword arguments will - be passed as options for the create collection command - - .. versionchanged:: 4.2 - Added the ``clusteredIndex`` and ``encryptedFields`` parameters. - - .. versionchanged:: 4.0 - Removed the reindex, map_reduce, inline_map_reduce, - parallel_scan, initialize_unordered_bulk_op, - initialize_ordered_bulk_op, group, count, insert, save, - update, remove, find_and_modify, and ensure_index methods. See the - :ref:`pymongo4-migration-guide`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Support the `collation` option. - - .. versionchanged:: 3.2 - Added the read_concern option. - - .. versionchanged:: 3.0 - Added the codec_options, read_preference, and write_concern options. - Removed the uuid_subtype attribute. - :class:`~pymongo.collection.Collection` no longer returns an - instance of :class:`~pymongo.collection.Collection` for attribute - names with leading underscores. You must use dict-style lookups - instead:: - - collection['__my_collection__'] - - Not: - - collection.__my_collection__ - - .. seealso:: The MongoDB documentation on `collections `_. - """ - super().__init__( - codec_options or database.codec_options, - read_preference or database.read_preference, - write_concern or database.write_concern, - read_concern or database.read_concern, - ) - if not isinstance(name, str): - raise TypeError("name must be an instance of str") - - if not name or ".." in name: - raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): - raise InvalidName("collection names must not contain '$': %r" % name) - if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start or end with '.': %r" % name) - if "\x00" in name: - raise InvalidName("collection names must not contain the null character") - collation = validate_collation_or_none(kwargs.pop("collation", None)) - - self.__database: Database[_DocumentType] = database - self.__name = name - self.__full_name = f"{self.__database.name}.{self.__name}" - self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler="replace", document_class=dict - ) - self._timeout = database.client.options.timeout - encrypted_fields = kwargs.pop("encryptedFields", None) - if create or kwargs or collation: - if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) - opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} - self.__create( - _esc_coll_name(encrypted_fields, name), opts, None, session, qev2_required=True - ) - self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) - self.create_index([("__safeContent__", ASCENDING)], session) - else: - self.__create(name, kwargs, collation, session) - - def _conn_for_writes( - self, session: Optional[ClientSession], operation: str - ) -> ContextManager[Connection]: - return self.__database.client._conn_for_writes(session, operation) - - def _command( - self, - conn: Connection, - command: MutableMapping[str, Any], - read_preference: Optional[_ServerMode] = None, - codec_options: Optional[CodecOptions] = None, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - collation: Optional[_CollationIn] = None, - session: Optional[ClientSession] = None, - retryable_write: bool = False, - user_fields: Optional[Any] = None, - ) -> Mapping[str, Any]: - """Internal command helper. - - :param conn` - A Connection instance. - :param command` - The command itself, as a :class:`~bson.son.SON` instance. - :param read_preference` (optional) - The read preference to use. - :param codec_options` (optional) - An instance of - :class:`~bson.codec_options.CodecOptions`. - :param check: raise OperationFailure if there are errors - :param allowable_errors: errors to ignore if `check` is True - :param read_concern` (optional) - An instance of - :class:`~pymongo.read_concern.ReadConcern`. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. - :param collation` (optional) - An instance of - :class:`~pymongo.collation.Collation`. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param retryable_write: True if this command is a retryable - write. - :param user_fields: Response fields that should be decoded - using the TypeDecoders from codec_options, passed to - bson._decode_all_selective. - - :return: The result document. - """ - with self.__database.client._tmp_session(session) as s: - return conn.command( - self.__database.name, - command, - read_preference or self._read_preference_for(session), - codec_options or self.codec_options, - check, - allowable_errors, - read_concern=read_concern, - write_concern=write_concern, - parse_write_concern_error=True, - collation=collation, - session=s, - client=self.__database.client, - retryable_write=retryable_write, - user_fields=user_fields, - ) - - def __create( - self, - name: str, - options: MutableMapping[str, Any], - collation: Optional[_CollationIn], - session: Optional[ClientSession], - encrypted_fields: Optional[Mapping[str, Any]] = None, - qev2_required: bool = False, - ) -> None: - """Sends a create command with the given options.""" - cmd: dict[str, Any] = {"create": name} - if encrypted_fields: - cmd["encryptedFields"] = encrypted_fields - - if options: - if "size" in options: - options["size"] = float(options["size"]) - cmd.update(options) - with self._conn_for_writes(session, operation=_Op.CREATE) as conn: - if qev2_required and conn.max_wire_version < 21: - raise ConfigurationError( - "Driver support of Queryable Encryption is incompatible with server. " - "Upgrade server to use Queryable Encryption. " - f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" - ) - - self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=self._write_concern_for(session), - collation=collation, - session=session, - ) - - def __getattr__(self, name: str) -> Collection[_DocumentType]: - """Get a sub-collection of this collection by name. - - Raises InvalidName if an invalid collection name is used. - - :param name: the name of the collection to get - """ - if name.startswith("_"): - full_name = f"{self.__name}.{name}" - raise AttributeError( - f"Collection has no attribute {name!r}. To access the {full_name}" - f" collection, use database['{full_name}']." - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> Collection[_DocumentType]: - return Collection( - self.__database, - f"{self.__name}.{name}", - False, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - - def __repr__(self) -> str: - return f"Collection({self.__database!r}, {self.__name!r})" - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Collection): - return self.__database == other.database and self.__name == other.name - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash((self.__database, self.__name)) - - def __bool__(self) -> NoReturn: - raise NotImplementedError( - "Collection objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: collection is not None" - ) - - @property - def full_name(self) -> str: - """The full name of this :class:`Collection`. - - The full name is of the form `database_name.collection_name`. - """ - return self.__full_name - - @property - def name(self) -> str: - """The name of this :class:`Collection`.""" - return self.__name - - @property - def database(self) -> Database[_DocumentType]: - """The :class:`~pymongo.database.Database` that this - :class:`Collection` is a part of. - """ - return self.__database - - def with_options( - self, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> Collection[_DocumentType]: - """Get a clone of this collection changing the specified settings. - - >>> coll1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) - >>> coll1.read_preference - Primary() - >>> coll2.read_preference - Secondary(tag_sets=None) - - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Collection` - is used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Collection` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Collection` - is used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Collection` - is used. - """ - return Collection( - self.__database, - self.__name, - False, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern, - ) - - @_csot.apply - def bulk_write( - self, - requests: Sequence[_WriteOp[_DocumentType]], - ordered: bool = True, - bypass_document_validation: bool = False, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - let: Optional[Mapping] = None, - ) -> BulkWriteResult: - """Send a batch of write operations to the server. - - Requests are passed as a list of write operation instances ( - :class:`~pymongo.operations.InsertOne`, - :class:`~pymongo.operations.UpdateOne`, - :class:`~pymongo.operations.UpdateMany`, - :class:`~pymongo.operations.ReplaceOne`, - :class:`~pymongo.operations.DeleteOne`, or - :class:`~pymongo.operations.DeleteMany`). - - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} - {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} - >>> # DeleteMany, UpdateOne, and UpdateMany are also available. - ... - >>> from pymongo import InsertOne, DeleteOne, ReplaceOne - >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), - ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] - >>> result = db.test.bulk_write(requests) - >>> result.inserted_count - 1 - >>> result.deleted_count - 1 - >>> result.modified_count - 0 - >>> result.upserted_ids - {2: ObjectId('54f62ee28891e756a6e1abd5')} - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} - {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} - {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} - - :param requests: A list of write operations (see examples above). - :param ordered: If ``True`` (the default) requests will be - performed on the server serially, in the order provided. If an error - occurs all remaining operations are aborted. If ``False`` requests - will be performed on the server in arbitrary order, possibly in - parallel, and all operations will be attempted. - :param bypass_document_validation: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - :return: An instance of :class:`~pymongo.results.BulkWriteResult`. - - .. seealso:: :ref:`writes-and-ids` - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - Added ``let`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 3.0 - """ - common.validate_list("requests", requests) - - blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) - for request in requests: - try: - request._add_to_bulk(blk) - except AttributeError: - raise TypeError(f"{request!r} is not a valid request") from None - - write_concern = self._write_concern_for(session) - bulk_api_result = blk.execute(write_concern, session, _Op.INSERT) - if bulk_api_result is not None: - return BulkWriteResult(bulk_api_result, True) - return BulkWriteResult({}, False) - - def _insert_one( - self, - doc: Mapping[str, Any], - ordered: bool, - write_concern: WriteConcern, - op_id: Optional[int], - bypass_doc_val: bool, - session: Optional[ClientSession], - comment: Optional[Any] = None, - ) -> Any: - """Internal helper for inserting a single document.""" - write_concern = write_concern or self.write_concern - acknowledged = write_concern.acknowledged - command = {"insert": self.name, "ordered": ordered, "documents": [doc]} - if comment is not None: - command["comment"] = comment - - def _insert_command( - session: Optional[ClientSession], conn: Connection, retryable_write: bool - ) -> None: - if bypass_doc_val: - command["bypassDocumentValidation"] = True - - result = conn.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, - ) - - _check_write_command_response(result) - - self.__database.client._retryable_write( - acknowledged, _insert_command, session, operation=_Op.INSERT - ) - - if not isinstance(doc, RawBSONDocument): - return doc.get("_id") - return None - - def insert_one( - self, - document: Union[_DocumentType, RawBSONDocument], - bypass_document_validation: bool = False, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - ) -> InsertOneResult: - """Insert a single document. - - >>> db.test.count_documents({'x': 1}) - 0 - >>> result = db.test.insert_one({'x': 1}) - >>> result.inserted_id - ObjectId('54f112defba522406c9cc208') - >>> db.test.find_one({'x': 1}) - {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} - - :param document: The document to insert. Must be a mutable mapping - type. If the document does not have an _id field one will be - added automatically. - :param bypass_document_validation: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - :return: - An instance of :class:`~pymongo.results.InsertOneResult`. - - .. seealso:: :ref:`writes-and-ids` - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 3.0 - """ - common.validate_is_document_type("document", document) - if not (isinstance(document, RawBSONDocument) or "_id" in document): - document["_id"] = ObjectId() # type: ignore[index] - - write_concern = self._write_concern_for(session) - return InsertOneResult( - self._insert_one( - document, - ordered=True, - write_concern=write_concern, - op_id=None, - bypass_doc_val=bypass_document_validation, - session=session, - comment=comment, - ), - write_concern.acknowledged, - ) - - @_csot.apply - def insert_many( - self, - documents: Iterable[Union[_DocumentType, RawBSONDocument]], - ordered: bool = True, - bypass_document_validation: bool = False, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - ) -> InsertManyResult: - """Insert an iterable of documents. - - >>> db.test.count_documents({}) - 0 - >>> result = db.test.insert_many([{'x': i} for i in range(2)]) - >>> result.inserted_ids - [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] - >>> db.test.count_documents({}) - 2 - - :param documents: A iterable of documents to insert. - :param ordered: If ``True`` (the default) documents will be - inserted on the server serially, in the order provided. If an error - occurs all remaining inserts are aborted. If ``False``, documents - will be inserted on the server in arbitrary order, possibly in - parallel, and all document inserts will be attempted. - :param bypass_document_validation: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - :return: An instance of :class:`~pymongo.results.InsertManyResult`. - - .. seealso:: :ref:`writes-and-ids` - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 3.0 - """ - if ( - not isinstance(documents, abc.Iterable) - or isinstance(documents, abc.Mapping) - or not documents - ): - raise TypeError("documents must be a non-empty list") - inserted_ids: list[ObjectId] = [] - - def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: - """A generator that validates documents and handles _ids.""" - for document in documents: - common.validate_is_document_type("document", document) - if not isinstance(document, RawBSONDocument): - if "_id" not in document: - document["_id"] = ObjectId() # type: ignore[index] - inserted_ids.append(document["_id"]) - yield (message._INSERT, document) - - write_concern = self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) - blk.ops = list(gen()) - blk.execute(write_concern, session, _Op.INSERT) - return InsertManyResult(inserted_ids, write_concern.acknowledged) - - def _update( - self, - conn: Connection, - criteria: Mapping[str, Any], - document: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - multi: bool = False, - write_concern: Optional[WriteConcern] = None, - op_id: Optional[int] = None, - ordered: bool = True, - bypass_doc_val: Optional[bool] = False, - collation: Optional[_CollationIn] = None, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - retryable_write: bool = False, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> Optional[Mapping[str, Any]]: - """Internal update / replace helper.""" - validate_boolean("upsert", upsert) - collation = validate_collation_or_none(collation) - write_concern = write_concern or self.write_concern - acknowledged = write_concern.acknowledged - update_doc: dict[str, Any] = { - "q": criteria, - "u": document, - "multi": multi, - "upsert": upsert, - } - if collation is not None: - if not acknowledged: - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - else: - update_doc["collation"] = collation - if array_filters is not None: - if not acknowledged: - raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") - else: - update_doc["arrayFilters"] = array_filters - if hint is not None: - if not acknowledged and conn.max_wire_version < 8: - raise ConfigurationError( - "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." - ) - if not isinstance(hint, str): - hint = helpers._index_document(hint) - update_doc["hint"] = hint - command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} - if let is not None: - common.validate_is_mapping("let", let) - command["let"] = let - - if comment is not None: - command["comment"] = comment - # Update command. - if bypass_doc_val: - command["bypassDocumentValidation"] = True - - # The command result has to be published for APM unmodified - # so we make a shallow copy here before adding updatedExisting. - result = conn.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, - ).copy() - _check_write_command_response(result) - # Add the updatedExisting field for compatibility. - if result.get("n") and "upserted" not in result: - result["updatedExisting"] = True - else: - result["updatedExisting"] = False - # MongoDB >= 2.6.0 returns the upsert _id in an array - # element. Break it out for backward compatibility. - if "upserted" in result: - result["upserted"] = result["upserted"][0]["_id"] - - if not acknowledged: - return None - return result - - def _update_retryable( - self, - criteria: Mapping[str, Any], - document: Union[Mapping[str, Any], _Pipeline], - operation: str, - upsert: bool = False, - multi: bool = False, - write_concern: Optional[WriteConcern] = None, - op_id: Optional[int] = None, - ordered: bool = True, - bypass_doc_val: Optional[bool] = False, - collation: Optional[_CollationIn] = None, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> Optional[Mapping[str, Any]]: - """Internal update / replace helper.""" - - def _update( - session: Optional[ClientSession], conn: Connection, retryable_write: bool - ) -> Optional[Mapping[str, Any]]: - return self._update( - conn, - criteria, - document, - upsert=upsert, - multi=multi, - write_concern=write_concern, - op_id=op_id, - ordered=ordered, - bypass_doc_val=bypass_doc_val, - collation=collation, - array_filters=array_filters, - hint=hint, - session=session, - retryable_write=retryable_write, - let=let, - comment=comment, - ) - - return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _update, - session, - operation, - ) - - def replace_one( - self, - filter: Mapping[str, Any], - replacement: Mapping[str, Any], - upsert: bool = False, - bypass_document_validation: bool = False, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> UpdateResult: - """Replace a single document matching the filter. - - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} - >>> result = db.test.replace_one({'x': 1}, {'y': 1}) - >>> result.matched_count - 1 - >>> result.modified_count - 1 - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} - - The *upsert* option can be used to insert a new document if a matching - document does not exist. - - >>> result = db.test.replace_one({'x': 1}, {'x': 1}, True) - >>> result.matched_count - 0 - >>> result.modified_count - 0 - >>> result.upserted_id - ObjectId('54f11e5c8891e756a6e1abd4') - >>> db.test.find_one({'x': 1}) - {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} - - :param filter: A query that matches the document to replace. - :param replacement: The new document. - :param upsert: If ``True``, perform an insert if no documents - match the filter. - :param bypass_document_validation: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - :return: - An instance of :class:`~pymongo.results.UpdateResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support. - - .. versionadded:: 3.0 - """ - common.validate_is_mapping("filter", filter) - common.validate_ok_for_replace(replacement) - if let is not None: - common.validate_is_mapping("let", let) - write_concern = self._write_concern_for(session) - return UpdateResult( - self._update_retryable( - filter, - replacement, - _Op.UPDATE, - upsert, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - collation=collation, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def update_one( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - bypass_document_validation: bool = False, - collation: Optional[_CollationIn] = None, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> UpdateResult: - """Update a single document matching the filter. - - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) - >>> result.matched_count - 1 - >>> result.modified_count - 1 - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 4, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - - If ``upsert=True`` and no documents match the filter, create a - new document based on the filter criteria and update modifications. - - >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) - >>> result.matched_count - 0 - >>> result.modified_count - 0 - >>> result.upserted_id - ObjectId('626a678eeaa80587d4bb3fb7') - >>> db.test.find_one(result.upserted_id) - {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} - - :param filter: A query that matches the document to update. - :param update: The modifications to apply. - :param upsert: If ``True``, perform an insert if no documents - match the filter. - :param bypass_document_validation: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param array_filters: A list of filters specifying which - array elements an update should apply. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - - :return: - An instance of :class:`~pymongo.results.UpdateResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the ``update``. - .. versionchanged:: 3.6 - Added the ``array_filters`` and ``session`` parameters. - .. versionchanged:: 3.4 - Added the ``collation`` option. - .. versionchanged:: 3.2 - Added ``bypass_document_validation`` support. - - .. versionadded:: 3.0 - """ - common.validate_is_mapping("filter", filter) - common.validate_ok_for_update(update) - common.validate_list_or_none("array_filters", array_filters) - - write_concern = self._write_concern_for(session) - return UpdateResult( - self._update_retryable( - filter, - update, - _Op.UPDATE, - upsert, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - collation=collation, - array_filters=array_filters, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def update_many( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - bypass_document_validation: Optional[bool] = None, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> UpdateResult: - """Update one or more documents that match the filter. - - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) - >>> result.matched_count - 3 - >>> result.modified_count - 3 - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 4, '_id': 0} - {'x': 4, '_id': 1} - {'x': 4, '_id': 2} - - :param filter: A query that matches the documents to update. - :param update: The modifications to apply. - :param upsert: If ``True``, perform an insert if no documents - match the filter. - :param bypass_document_validation: If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param array_filters: A list of filters specifying which - array elements an update should apply. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - - :return: - An instance of :class:`~pymongo.results.UpdateResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. - .. versionchanged:: 3.6 - Added ``array_filters`` and ``session`` parameters. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support. - - .. versionadded:: 3.0 - """ - common.validate_is_mapping("filter", filter) - common.validate_ok_for_update(update) - common.validate_list_or_none("array_filters", array_filters) - - write_concern = self._write_concern_for(session) - return UpdateResult( - self._update_retryable( - filter, - update, - _Op.UPDATE, - upsert, - multi=True, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - collation=collation, - array_filters=array_filters, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def drop( - self, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, - ) -> None: - """Alias for :meth:`~pymongo.database.Database.drop_collection`. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. - - The following two calls are equivalent: - - >>> db.foo.drop() - >>> db.drop_collection("foo") - - .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.7 - :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - dbo = self.__database.client.get_database( - self.__database.name, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - dbo.drop_collection( - self.__name, session=session, comment=comment, encrypted_fields=encrypted_fields - ) - - def _delete( - self, - conn: Connection, - criteria: Mapping[str, Any], - multi: bool, - write_concern: Optional[WriteConcern] = None, - op_id: Optional[int] = None, - ordered: bool = True, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - retryable_write: bool = False, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> Mapping[str, Any]: - """Internal delete helper.""" - common.validate_is_mapping("filter", criteria) - write_concern = write_concern or self.write_concern - acknowledged = write_concern.acknowledged - delete_doc = {"q": criteria, "limit": int(not multi)} - collation = validate_collation_or_none(collation) - if collation is not None: - if not acknowledged: - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - else: - delete_doc["collation"] = collation - if hint is not None: - if not acknowledged and conn.max_wire_version < 9: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." - ) - if not isinstance(hint, str): - hint = helpers._index_document(hint) - delete_doc["hint"] = hint - command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} - - if let is not None: - common.validate_is_document_type("let", let) - command["let"] = let - - if comment is not None: - command["comment"] = comment - - # Delete command. - result = conn.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, - ) - _check_write_command_response(result) - return result - - def _delete_retryable( - self, - criteria: Mapping[str, Any], - multi: bool, - write_concern: Optional[WriteConcern] = None, - op_id: Optional[int] = None, - ordered: bool = True, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> Mapping[str, Any]: - """Internal delete helper.""" - - def _delete( - session: Optional[ClientSession], conn: Connection, retryable_write: bool - ) -> Mapping[str, Any]: - return self._delete( - conn, - criteria, - multi, - write_concern=write_concern, - op_id=op_id, - ordered=ordered, - collation=collation, - hint=hint, - session=session, - retryable_write=retryable_write, - let=let, - comment=comment, - ) - - return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, - _delete, - session, - operation=_Op.DELETE, - ) - - def delete_one( - self, - filter: Mapping[str, Any], - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> DeleteResult: - """Delete a single document matching the filter. - - >>> db.test.count_documents({'x': 1}) - 3 - >>> result = db.test.delete_one({'x': 1}) - >>> result.deleted_count - 1 - >>> db.test.count_documents({'x': 1}) - 2 - - :param filter: A query that matches the document to delete. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - - :return: - An instance of :class:`~pymongo.results.DeleteResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionadded:: 3.0 - """ - write_concern = self._write_concern_for(session) - return DeleteResult( - self._delete_retryable( - filter, - False, - write_concern=write_concern, - collation=collation, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def delete_many( - self, - filter: Mapping[str, Any], - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> DeleteResult: - """Delete one or more documents matching the filter. - - >>> db.test.count_documents({'x': 1}) - 3 - >>> result = db.test.delete_many({'x': 1}) - >>> result.deleted_count - 3 - >>> db.test.count_documents({'x': 1}) - 0 - - :param filter: A query that matches the documents to delete. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - - :return: - An instance of :class:`~pymongo.results.DeleteResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionadded:: 3.0 - """ - write_concern = self._write_concern_for(session) - return DeleteResult( - self._delete_retryable( - filter, - True, - write_concern=write_concern, - collation=collation, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def find_one( - self, filter: Optional[Any] = None, *args: Any, **kwargs: Any - ) -> Optional[_DocumentType]: - """Get a single document from the database. - - All arguments to :meth:`find` are also valid arguments for - :meth:`find_one`, although any `limit` argument will be - ignored. Returns a single document, or ``None`` if no matching - document is found. - - The :meth:`find_one` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :param filter: a dictionary specifying - the query to be performed OR any other type to be used as - the value for a query for ``"_id"``. - - :param args: any additional positional arguments - are the same as the arguments to :meth:`find`. - - :param kwargs: any additional keyword arguments - are the same as the arguments to :meth:`find`. - - :: code-block: python - - >>> collection.find_one(max_time_ms=100) - - """ - if filter is not None and not isinstance(filter, abc.Mapping): - filter = {"_id": filter} - cursor = self.find(filter, *args, **kwargs) - for result in cursor.limit(-1): - return result - return None - - def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: - """Query the database. - - The `filter` argument is a query document that all results - must match. For example: - - >>> db.test.find({"hello": "world"}) - - only matches documents that have a key "hello" with value - "world". Matches can have other keys *in addition* to - "hello". The `projection` argument is used to specify a subset - of fields that should be included in the result documents. By - limiting results to a certain subset of fields you can cut - down on network traffic and decoding time. - - Raises :class:`TypeError` if any of the arguments are of - improper type. Returns an instance of - :class:`~pymongo.cursor.Cursor` corresponding to this query. - - The :meth:`find` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :param filter: A query document that selects which documents - to include in the result set. Can be an empty document to include - all documents. - :param projection: a list of field names that should be - returned in the result set or a dict specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a dict to exclude fields from - the result (e.g. projection={'_id': False}). - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param skip: the number of documents to omit (from - the start of the result set) when returning the results - :param limit: the maximum number of results to - return. A limit of 0 (the default) is equivalent to setting no - limit. - :param no_cursor_timeout: if False (the default), any - returned cursor is closed by the server after 10 minutes of - inactivity. If set to True, the returned cursor will never - time out on the server. Care should be taken to ensure that - cursors with no_cursor_timeout turned on are properly closed. - :param cursor_type: the type of cursor to return. The valid - options are defined by :class:`~pymongo.cursor.CursorType`: - - - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of - this find call will return a standard cursor over the result set. - - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this - find call will be a tailable cursor - tailable cursors are only - for use with capped collections. They are not closed when the - last data is retrieved but are kept open and the cursor location - marks the final document position. If more data is received - iteration of the cursor will continue from the last document - received. For details, see the `tailable cursor documentation - `_. - - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result - of this find call will be a tailable cursor with the await flag - set. The server will wait for a few seconds after returning the - full result set so that it can capture and return additional data - added during the query. - - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this - find call will be an exhaust cursor. MongoDB will stream batched - results to the client without waiting for the client to request - each batch, reducing latency. See notes on compatibility below. - - :param sort: a list of (key, direction) pairs - specifying the sort order for this query. See - :meth:`~pymongo.cursor.Cursor.sort` for details. - :param allow_partial_results: if True, mongos will return - partial results if some shards are down instead of returning an - error. - :param oplog_replay: **DEPRECATED** - if True, set the - oplogReplay query flag. Default: False. - :param batch_size: Limits the number of documents returned in - a single batch. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param return_key: If True, return only the index keys in - each document. - :param show_record_id: If True, adds a field ``$recordId`` in - each document with the storage engine's internal record identifier. - :param snapshot: **DEPRECATED** - If True, prevents the - cursor from returning a document more than once because of an - intervening write operation. - :param hint: An index, in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the - proper index to use for the query. - :param max_time_ms: Specifies a time limit for a query - operation. If the specified time is exceeded, the operation will be - aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass - this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor. - :param max_scan: **DEPRECATED** - The maximum number of - documents to scan. Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.max_scan` on the cursor. - :param min: A list of field, limit pairs specifying the - inclusive lower bound for all keys of a specific index in order. - Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must - also be passed to ensure the query utilizes the correct index. - :param max: A list of field, limit pairs specifying the - exclusive upper bound for all keys of a specific index in order. - Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must - also be passed to ensure the query utilizes the correct index. - :param comment: A string to attach to the query to help - interpret and trace the operation in the server logs and in profile - data. Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.comment` on the cursor. - :param allow_disk_use: if True, MongoDB may use temporary - disk files to store data exceeding the system memory limit while - processing a blocking sort operation. The option has no effect if - MongoDB can satisfy the specified sort using an index, or if the - blocking sort requires less memory than the 100 MiB limit. This - option is only supported on MongoDB 4.4 and above. - - .. note:: There are a number of caveats to using - :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: - - - The `limit` option can not be used with an exhaust cursor. - - - Exhaust cursors are not supported by mongos and can not be - used with a sharded cluster. - - - A :class:`~pymongo.cursor.Cursor` instance created with the - :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an - exclusive :class:`~socket.socket` connection to MongoDB. If the - :class:`~pymongo.cursor.Cursor` is discarded without being - completely iterated the underlying :class:`~socket.socket` - connection will be closed and discarded without being returned to - the connection pool. - - .. versionchanged:: 4.0 - Removed the ``modifiers`` option. - Empty projections (eg {} or []) are passed to the server as-is, - rather than the previous behavior which substituted in a - projection of ``{"_id": 1}``. This means that an empty projection - will now return the entire document, not just the ``"_id"`` field. - - .. versionchanged:: 3.11 - Added the ``allow_disk_use`` option. - Deprecated the ``oplog_replay`` option. Support for this option is - deprecated in MongoDB 4.4. The query engine now automatically - optimizes queries against the oplog without requiring this - option to be set. - - .. versionchanged:: 3.7 - Deprecated the ``snapshot`` option, which is deprecated in MongoDB - 3.6 and removed in MongoDB 4.0. - Deprecated the ``max_scan`` option. Support for this option is - deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit - server-side execution time. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.5 - Added the options ``return_key``, ``show_record_id``, ``snapshot``, - ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and - ``comment``. - Deprecated the ``modifiers`` option. - - .. versionchanged:: 3.4 - Added support for the ``collation`` option. - - .. versionchanged:: 3.0 - Changed the parameter names ``spec``, ``fields``, ``timeout``, and - ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, - and ``allow_partial_results`` respectively. - Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` - options. - Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, - ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, - ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and - slave_okay parameters. - Removed ``compile_re`` option: PyMongo now always - represents BSON regular expressions as :class:`~bson.regex.Regex` - objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to - convert from a BSON regular expression to a Python regular - expression object. - Soft deprecated the ``manipulate`` option. - - .. seealso:: The MongoDB documentation on `find `_. - """ - return Cursor(self, *args, **kwargs) - - def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: - """Query the database and retrieve batches of raw BSON. - - Similar to the :meth:`find` method but returns a - :class:`~pymongo.cursor.RawBatchCursor`. - - This example demonstrates how to work with raw batches, but in practice - raw batches should be passed to an external library that can decode - BSON into another data type, rather than used with PyMongo's - :mod:`bson` module. - - >>> import bson - >>> cursor = db.test.find_raw_batches() - >>> for batch in cursor: - ... print(bson.decode_all(batch)) - - .. note:: find_raw_batches does not support auto encryption. - - .. versionchanged:: 3.12 - Instead of ignoring the user-specified read concern, this method - now sends it to the server when connected to MongoDB 3.6+. - - Added session support. - - .. versionadded:: 3.6 - """ - # OP_MSG is required to support encryption. - if self.__database.client._encrypter: - raise InvalidOperation("find_raw_batches does not support auto encryption") - return RawBatchCursor(self, *args, **kwargs) - - def _count_cmd( - self, - session: Optional[ClientSession], - conn: Connection, - read_preference: Optional[_ServerMode], - cmd: dict[str, Any], - collation: Optional[Collation], - ) -> int: - """Internal count command helper.""" - # XXX: "ns missing" checks can be removed when we drop support for - # MongoDB 3.0, see SERVER-17051. - res = self._command( - conn, - cmd, - read_preference=read_preference, - allowable_errors=["ns missing"], - codec_options=self.__write_response_codec_options, - read_concern=self.read_concern, - collation=collation, - session=session, - ) - if res.get("errmsg", "") == "ns missing": - return 0 - return int(res["n"]) - - def _aggregate_one_result( - self, - conn: Connection, - read_preference: Optional[_ServerMode], - cmd: dict[str, Any], - collation: Optional[_CollationIn], - session: Optional[ClientSession], - ) -> Optional[Mapping[str, Any]]: - """Internal helper to run an aggregate that returns a single result.""" - result = self._command( - conn, - cmd, - read_preference, - allowable_errors=[26], # Ignore NamespaceNotFound. - codec_options=self.__write_response_codec_options, - read_concern=self.read_concern, - collation=collation, - session=session, - ) - # cursor will not be present for NamespaceNotFound errors. - if "cursor" not in result: - return None - batch = result["cursor"]["firstBatch"] - return batch[0] if batch else None - - def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: - """Get an estimate of the number of documents in this collection using - collection metadata. - - The :meth:`estimated_document_count` method is **not** supported in a - transaction. - - All optional parameters should be passed as keyword arguments - to this method. Valid options include: - - - `maxTimeMS` (int): The maximum amount of time to allow this - operation to run, in milliseconds. - - :param comment: A user-provided comment to attach to this - command. - :param kwargs: See list of options above. - - .. versionchanged:: 4.2 - This method now always uses the `count`_ command. Due to an oversight in versions - 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the - :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are - recommended to upgrade their server version to 5.0.9+ or set - :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. - - .. versionadded:: 3.7 - .. _count: https://mongodb.com/docs/manual/reference/command/count/ - """ - if "session" in kwargs: - raise ConfigurationError("estimated_document_count does not support sessions") - if comment is not None: - kwargs["comment"] = comment - - def _cmd( - session: Optional[ClientSession], - _server: Server, - conn: Connection, - read_preference: Optional[_ServerMode], - ) -> int: - cmd: dict[str, Any] = {"count": self.__name} - cmd.update(kwargs) - return self._count_cmd(session, conn, read_preference, cmd, collation=None) - - return self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) - - def count_documents( - self, - filter: Mapping[str, Any], - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> int: - """Count the number of documents in this collection. - - .. note:: For a fast count of the total documents in a collection see - :meth:`estimated_document_count`. - - The :meth:`count_documents` method is supported in a transaction. - - All optional parameters should be passed as keyword arguments - to this method. Valid options include: - - - `skip` (int): The number of matching documents to skip before - returning results. - - `limit` (int): The maximum number of documents to count. Must be - a positive integer. If not provided, no limit is imposed. - - `maxTimeMS` (int): The maximum amount of time to allow this - operation to run, in milliseconds. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `hint` (string or list of tuples): The index to use. Specify either - the index name as a string or the index specification as a list of - tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - - The :meth:`count_documents` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - .. note:: When migrating from :meth:`count` to :meth:`count_documents` - the following query operators must be replaced: - - +-------------+-------------------------------------+ - | Operator | Replacement | - +=============+=====================================+ - | $where | `$expr`_ | - +-------------+-------------------------------------+ - | $near | `$geoWithin`_ with `$center`_ | - +-------------+-------------------------------------+ - | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | - +-------------+-------------------------------------+ - - :param filter: A query document that selects which documents - to count in the collection. Can be an empty document to count all - documents. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: See list of options above. - - - .. versionadded:: 3.7 - - .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ - .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ - .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ - .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ - """ - pipeline = [{"$match": filter}] - if "skip" in kwargs: - pipeline.append({"$skip": kwargs.pop("skip")}) - if "limit" in kwargs: - pipeline.append({"$limit": kwargs.pop("limit")}) - if comment is not None: - kwargs["comment"] = comment - pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) - cmd = {"aggregate": self.__name, "pipeline": pipeline, "cursor": {}} - if "hint" in kwargs and not isinstance(kwargs["hint"], str): - kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) - - def _cmd( - session: Optional[ClientSession], - _server: Server, - conn: Connection, - read_preference: Optional[_ServerMode], - ) -> int: - result = self._aggregate_one_result(conn, read_preference, cmd, collation, session) - if not result: - return 0 - return result["n"] - - return self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) - - def _retryable_non_cursor_read( - self, - func: Callable[[Optional[ClientSession], Server, Connection, Optional[_ServerMode]], T], - session: Optional[ClientSession], - operation: str, - ) -> T: - """Non-cursor read helper to handle implicit session creation.""" - client = self.__database.client - with client._tmp_session(session) as s: - return client._retryable_read(func, self._read_preference_for(s), s, operation) - - def create_indexes( - self, - indexes: Sequence[IndexModel], - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> list[str]: - """Create one or more indexes on this collection. - - >>> from pymongo import IndexModel, ASCENDING, DESCENDING - >>> index1 = IndexModel([("hello", DESCENDING), - ... ("world", ASCENDING)], name="hello_world") - >>> index2 = IndexModel([("goodbye", DESCENDING)]) - >>> db.test.create_indexes([index1, index2]) - ["hello_world", "goodbye_-1"] - - :param indexes: A list of :class:`~pymongo.operations.IndexModel` - instances. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - - - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - .. versionadded:: 3.0 - - .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ - """ - common.validate_list("indexes", indexes) - if comment is not None: - kwargs["comment"] = comment - return self.__create_indexes(indexes, session, **kwargs) - - @_csot.apply - def __create_indexes( - self, indexes: Sequence[IndexModel], session: Optional[ClientSession], **kwargs: Any - ) -> list[str]: - """Internal createIndexes helper. - - :param indexes: A list of :class:`~pymongo.operations.IndexModel` - instances. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param kwargs: optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - """ - names = [] - with self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: - supports_quorum = conn.max_wire_version >= 9 - - def gen_indexes() -> Iterator[Mapping[str, Any]]: - for index in indexes: - if not isinstance(index, IndexModel): - raise TypeError( - f"{index!r} is not an instance of pymongo.operations.IndexModel" - ) - document = index.document - names.append(document["name"]) - yield document - - cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} - cmd.update(kwargs) - if "commitQuorum" in kwargs and not supports_quorum: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use the " - "commitQuorum option for createIndexes" - ) - - self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - write_concern=self._write_concern_for(session), - session=session, - ) - return names - - def create_index( - self, - keys: _IndexKeyHint, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> str: - """Creates an index on this collection. - - Takes either a single key or a list containing (key, direction) pairs - or keys. If no direction is given, :data:`~pymongo.ASCENDING` will - be assumed. - The key(s) must be an instance of :class:`str` and the direction(s) must - be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, - :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). - - To create a single key ascending index on the key ``'mike'`` we just - use a string argument:: - - >>> my_collection.create_index("mike") - - For a compound index on ``'mike'`` descending and ``'eliot'`` - ascending we need to use a list of tuples:: - - >>> my_collection.create_index([("mike", pymongo.DESCENDING), - ... "eliot"]) - - All optional index creation parameters should be passed as - keyword arguments to this method. For example:: - - >>> my_collection.create_index([("mike", pymongo.DESCENDING)], - ... background=True) - - Valid options include, but are not limited to: - - - `name`: custom name to use for this index - if none is - given, a name will be generated. - - `unique`: if ``True``, creates a uniqueness constraint on the - index. - - `background`: if ``True``, this index should be created in the - background. - - `sparse`: if ``True``, omit from the index any documents that lack - the indexed field. - - `bucketSize`: for use with geoHaystack indexes. - Number of documents to group together within a certain proximity - to a given longitude and latitude. - - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` - index. - - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` - index. - - `expireAfterSeconds`: Used to create an expiring (TTL) - collection. MongoDB will automatically delete documents from - this collection after seconds. The indexed field must - be a UTC datetime or the data will not expire. - - `partialFilterExpression`: A document that specifies a filter for - a partial index. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `wildcardProjection`: Allows users to include or exclude specific - field paths from a `wildcard index`_ using the {"$**" : 1} key - pattern. Requires MongoDB >= 4.2. - - `hidden`: if ``True``, this index will be hidden from the query - planner and will not be evaluated as part of query plan - selection. Requires MongoDB >= 4.4. - - See the MongoDB documentation for a full list of supported options by - server version. - - .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The - option is silently ignored by the server and unique index builds - using the option will fail if a duplicate value is detected. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - :param keys: a single key or a list of (key, direction) - pairs specifying the index to create - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: any additional index creation - options (see the above list) should be passed as keyword - arguments. - - .. versionchanged:: 4.4 - Allow passing a list containing (key, direction) pairs - or keys for the ``keys`` parameter. - .. versionchanged:: 4.1 - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added the ``hidden`` option. - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for passing maxTimeMS - in kwargs. - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. Support the `collation` option. - .. versionchanged:: 3.2 - Added partialFilterExpression to support partial indexes. - .. versionchanged:: 3.0 - Renamed `key_or_list` to `keys`. Removed the `cache_for` option. - :meth:`create_index` no longer caches index names. Removed support - for the drop_dups and bucket_size aliases. - - .. seealso:: The MongoDB documentation on `indexes `_. - - .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ - """ - cmd_options = {} - if "maxTimeMS" in kwargs: - cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") - if comment is not None: - cmd_options["comment"] = comment - index = IndexModel(keys, **kwargs) - return self.__create_indexes([index], session, **cmd_options)[0] - - def drop_indexes( - self, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> None: - """Drops all indexes on this collection. - - Can be used on non-existent collections or collections with no indexes. - Raises OperationFailure on an error. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - """ - if comment is not None: - kwargs["comment"] = comment - self.drop_index("*", session=session, **kwargs) - - @_csot.apply - def drop_index( - self, - index_or_name: _IndexKeyHint, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> None: - """Drops the specified index on this collection. - - Can be used on non-existent collections or collections with no - indexes. Raises OperationFailure on an error (e.g. trying to - drop an index that does not exist). `index_or_name` - can be either an index name (as returned by `create_index`), - or an index specifier (as passed to `create_index`). An index - specifier should be a list of (key, direction) pairs. Raises - TypeError if index is not an instance of (str, unicode, list). - - .. warning:: - - if a custom name was used on index creation (by - passing the `name` parameter to :meth:`create_index`) the index - **must** be dropped by name. - - :param index_or_name: index (or name of index) to drop - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - name = index_or_name - if isinstance(index_or_name, list): - name = helpers._gen_index_name(index_or_name) - - if not isinstance(name, str): - raise TypeError("index_or_name must be an instance of str or list") - - cmd = {"dropIndexes": self.__name, "index": name} - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - with self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: - self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - session=session, - ) - - def list_indexes( - self, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - ) -> CommandCursor[MutableMapping[str, Any]]: - """Get a cursor over the index documents for this collection. - - >>> for index in db.test.list_indexes(): - ... print(index) - ... - SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionadded:: 3.0 - """ - codec_options: CodecOptions = CodecOptions(SON) - coll = cast( - Collection[MutableMapping[str, Any]], - self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), - ) - read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - explicit_session = session is not None - - def _cmd( - session: Optional[ClientSession], - _server: Server, - conn: Connection, - read_preference: _ServerMode, - ) -> CommandCursor[MutableMapping[str, Any]]: - cmd = {"listIndexes": self.__name, "cursor": {}} - if comment is not None: - cmd["comment"] = comment - - try: - cursor = self._command(conn, cmd, read_preference, codec_options, session=session)[ - "cursor" - ] - except OperationFailure as exc: - # Ignore NamespaceNotFound errors to match the behavior - # of reading from *.system.indexes. - if exc.code != 26: - raise - cursor = {"id": 0, "firstBatch": []} - cmd_cursor = CommandCursor( - coll, - cursor, - conn.address, - session=session, - explicit_session=explicit_session, - comment=cmd.get("comment"), - ) - cmd_cursor._maybe_pin_connection(conn) - return cmd_cursor - - with self.__database.client._tmp_session(session, False) as s: - return self.__database.client._retryable_read( - _cmd, read_pref, s, operation=_Op.LIST_INDEXES - ) - - def index_information( - self, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - ) -> MutableMapping[str, Any]: - """Get information on this collection's indexes. - - Returns a dictionary where the keys are index names (as - returned by create_index()) and the values are dictionaries - containing information about each index. The dictionary is - guaranteed to contain at least a single key, ``"key"`` which - is a list of (key, direction) pairs specifying the index (as - passed to create_index()). It will also contain any other - metadata about the indexes, except for the ``"ns"`` and - ``"name"`` keys, which are cleaned. Example output might look - like this: - - >>> db.test.create_index("x", unique=True) - 'x_1' - >>> db.test.index_information() - {'_id_': {'key': [('_id', 1)]}, - 'x_1': {'unique': True, 'key': [('x', 1)]}} - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - cursor = self.list_indexes(session=session, comment=comment) - info = {} - for index in cursor: - index["key"] = list(index["key"].items()) - index = dict(index) # noqa: PLW2901 - info[index.pop("name")] = index - return info - - def list_search_indexes( - self, - name: Optional[str] = None, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[Mapping[str, Any]]: - """Return a cursor over search indexes for the current collection. - - :param name: If given, the name of the index to search - for. Only indexes with matching index names will be returned. - If not given, all search indexes for the current collection - will be returned. - :param session: a :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result - set. - - .. note:: requires a MongoDB server version 7.0+ Atlas cluster. - - .. versionadded:: 4.5 - """ - if name is None: - pipeline: _Pipeline = [{"$listSearchIndexes": {}}] - else: - pipeline = [{"$listSearchIndexes": {"name": name}}] - - coll = self.with_options( - codec_options=DEFAULT_CODEC_OPTIONS, - read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN, - read_concern=DEFAULT_READ_CONCERN, - ) - cmd = _CollectionAggregationCommand( - coll, - CommandCursor, - pipeline, - kwargs, - explicit_session=session is not None, - comment=comment, - user_fields={"cursor": {"firstBatch": 1}}, - ) - - return self.__database.client._retryable_read( - cmd.get_cursor, - cmd.get_read_preference(session), # type: ignore[arg-type] - session, - retryable=not cmd._performs_write, - operation=_Op.LIST_SEARCH_INDEX, - ) - - def create_search_index( - self, - model: Union[Mapping[str, Any], SearchIndexModel], - session: Optional[ClientSession] = None, - comment: Any = None, - **kwargs: Any, - ) -> str: - """Create a single search index for the current collection. - - :param model: The model for the new search index. - It can be given as a :class:`~pymongo.operations.SearchIndexModel` - instance or a dictionary with a model "definition" and optional - "name". - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the createSearchIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - :return: The name of the new search index. - - .. note:: requires a MongoDB server version 7.0+ Atlas cluster. - - .. versionadded:: 4.5 - """ - if not isinstance(model, SearchIndexModel): - model = SearchIndexModel(**model) - return self.create_search_indexes([model], session, comment, **kwargs)[0] - - def create_search_indexes( - self, - models: list[SearchIndexModel], - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> list[str]: - """Create multiple search indexes for the current collection. - - :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. - :param session: a :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the createSearchIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - :return: A list of the newly created search index names. - - .. note:: requires a MongoDB server version 7.0+ Atlas cluster. - - .. versionadded:: 4.5 - """ - if comment is not None: - kwargs["comment"] = comment - - def gen_indexes() -> Iterator[Mapping[str, Any]]: - for index in models: - if not isinstance(index, SearchIndexModel): - raise TypeError( - f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" - ) - yield index.document - - cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} - cmd.update(kwargs) - - with self._conn_for_writes(session, operation=_Op.CREATE_SEARCH_INDEXES) as conn: - resp = self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - ) - return [index["name"] for index in resp["indexesCreated"]] - - def drop_search_index( - self, - name: str, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> None: - """Delete a search index by index name. - - :param name: The name of the search index to be deleted. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the dropSearchIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - .. note:: requires a MongoDB server version 7.0+ Atlas cluster. - - .. versionadded:: 4.5 - """ - cmd = {"dropSearchIndex": self.__name, "name": name} - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - with self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: - self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found", 26], - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - ) - - def update_search_index( - self, - name: str, - definition: Mapping[str, Any], - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> None: - """Update a search index by replacing the existing index definition with the provided definition. - - :param name: The name of the search index to be updated. - :param definition: The new search index definition. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: optional arguments to the updateSearchIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - .. note:: requires a MongoDB server version 7.0+ Atlas cluster. - - .. versionadded:: 4.5 - """ - cmd = {"updateSearchIndex": self.__name, "name": name, "definition": definition} - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - with self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: - self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found", 26], - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - ) - - def options( - self, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - ) -> MutableMapping[str, Any]: - """Get the options set on this collection. - - Returns a dictionary of options and their values - see - :meth:`~pymongo.database.Database.create_collection` for more - information on the possible options. Returns an empty - dictionary if the collection has not been created yet. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - dbo = self.__database.client.get_database( - self.__database.name, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - cursor = dbo.list_collections( - session=session, filter={"name": self.__name}, comment=comment - ) - - result = None - for doc in cursor: - result = doc - break - - if not result: - return {} - - options = result.get("options", {}) - assert options is not None - if "create" in options: - del options["create"] - - return options - - @_csot.apply - def _aggregate( - self, - aggregation_command: Type[_AggregationCommand], - pipeline: _Pipeline, - cursor_class: Type[CommandCursor], - session: Optional[ClientSession], - explicit_session: bool, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[_DocumentType]: - if comment is not None: - kwargs["comment"] = comment - cmd = aggregation_command( - self, - cursor_class, - pipeline, - kwargs, - explicit_session, - let, - user_fields={"cursor": {"firstBatch": 1}}, - ) - - return self.__database.client._retryable_read( - cmd.get_cursor, - cmd.get_read_preference(session), # type: ignore[arg-type] - session, - retryable=not cmd._performs_write, - operation=_Op.AGGREGATE, - ) - - def aggregate( - self, - pipeline: _Pipeline, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[_DocumentType]: - """Perform an aggregation using the aggregation framework on this - collection. - - The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Collection`, except when ``$out`` or ``$merge`` are used on - MongoDB <5.0, in which case - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. - - .. note:: This method does not support the 'explain' option. Please - use `PyMongoExplain `_ - instead. An example is included in the :ref:`aggregate-examples` - documentation. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - :param pipeline: a list of aggregation pipeline stages - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: A dict of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. ``"$$var"``). This option is - only supported on MongoDB >= 5.0. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: extra `aggregate command`_ parameters. - - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - - :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result - set. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - Added ``let`` parameter. - Support $merge and $out executing on secondaries according to the - collection's :attr:`read_preference`. - .. versionchanged:: 4.0 - Removed the ``useCursor`` option. - .. versionchanged:: 3.9 - Apply this collection's read concern to pipelines containing the - `$out` stage when connected to MongoDB >= 4.2. - Added support for the ``$merge`` pipeline stage. - Aggregations that write always use read preference - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - .. versionchanged:: 3.6 - Added the `session` parameter. Added the `maxAwaitTimeMS` option. - Deprecated the `useCursor` option. - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. Support the `collation` option. - .. versionchanged:: 3.0 - The :meth:`aggregate` method always returns a CommandCursor. The - pipeline argument must be a list. - - .. seealso:: :doc:`/examples/aggregation` - - .. _aggregate command: - https://mongodb.com/docs/manual/reference/command/aggregate - """ - with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate( - _CollectionAggregationCommand, - pipeline, - CommandCursor, - session=s, - explicit_session=session is not None, - let=let, - comment=comment, - **kwargs, - ) - - def aggregate_raw_batches( - self, - pipeline: _Pipeline, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> RawBatchCursor[_DocumentType]: - """Perform an aggregation and retrieve batches of raw BSON. - - Similar to the :meth:`aggregate` method but returns a - :class:`~pymongo.cursor.RawBatchCursor`. - - This example demonstrates how to work with raw batches, but in practice - raw batches should be passed to an external library that can decode - BSON into another data type, rather than used with PyMongo's - :mod:`bson` module. - - >>> import bson - >>> cursor = db.test.aggregate_raw_batches([ - ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) - >>> for batch in cursor: - ... print(bson.decode_all(batch)) - - .. note:: aggregate_raw_batches does not support auto encryption. - - .. versionchanged:: 3.12 - Added session support. - - .. versionadded:: 3.6 - """ - # OP_MSG is required to support encryption. - if self.__database.client._encrypter: - raise InvalidOperation("aggregate_raw_batches does not support auto encryption") - if comment is not None: - kwargs["comment"] = comment - with self.__database.client._tmp_session(session, close=False) as s: - return cast( - RawBatchCursor[_DocumentType], - self._aggregate( - _CollectionRawAggregationCommand, - pipeline, - RawBatchCommandCursor, - session=s, - explicit_session=session is not None, - **kwargs, - ), - ) - - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional[ClientSession] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - show_expanded_events: Optional[bool] = None, - ) -> CollectionChangeStream[_DocumentType]: - """Watch changes on this collection. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.CollectionChangeStream` cursor which - iterates over changes on this collection. - - .. code-block:: python - - with db.collection.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.CollectionChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.CollectionChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error("...") - - For a precise description of the resume process see the - `change streams specification`_. - - .. note:: Using this helper method is preferred to directly calling - :meth:`~pymongo.collection.Collection.aggregate` with a - ``$changeStream`` stage, for the purpose of supporting - resumability. - - .. warning:: This Collection's :attr:`read_concern` must be - ``ReadConcern("majority")`` in order to use the ``$changeStream`` - stage. - - :param pipeline: A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - :param full_document: The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - :param full_document_before_change: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - :param resume_after: A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - :param max_await_time_ms: The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - :param batch_size: The maximum number of documents to return - per batch. - :param collation: The :class:`~pymongo.collation.Collation` - to use for the aggregation. - :param start_at_operation_time: If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param start_after: The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - :param comment: A user-provided comment to attach to this - command. - :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - - :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. - - .. versionchanged:: 4.3 - Added `show_expanded_events` parameter. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionchanged:: 3.7 - Added the ``start_at_operation_time`` parameter. - - .. versionadded:: 3.6 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md - """ - return CollectionChangeStream( - self, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - show_expanded_events, - ) - - @_csot.apply - def rename( - self, - new_name: str, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> MutableMapping[str, Any]: - """Rename this collection. - - If operating in auth mode, client must be authorized as an - admin to perform this operation. Raises :class:`TypeError` if - `new_name` is not an instance of :class:`str`. - Raises :class:`~pymongo.errors.InvalidName` - if `new_name` is not a valid collection name. - - :param new_name: new name for this collection - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: additional arguments to the rename command - may be passed as keyword arguments to this helper method - (i.e. ``dropTarget=True``) - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - if not isinstance(new_name, str): - raise TypeError("new_name must be an instance of str") - - if not new_name or ".." in new_name: - raise InvalidName("collection names cannot be empty") - if new_name[0] == "." or new_name[-1] == ".": - raise InvalidName("collection names must not start or end with '.'") - if "$" in new_name and not new_name.startswith("oplog.$main"): - raise InvalidName("collection names must not contain '$'") - - new_name = f"{self.__database.name}.{new_name}" - cmd = {"renameCollection": self.__full_name, "to": new_name} - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - write_concern = self._write_concern_for_cmd(cmd, session) - - with self._conn_for_writes(session, operation=_Op.RENAME) as conn: - with self.__database.client._tmp_session(session) as s: - return conn.command( - "admin", - cmd, - write_concern=write_concern, - parse_write_concern_error=True, - session=s, - client=self.__database.client, - ) - - def distinct( - self, - key: str, - filter: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> list: - """Get a list of distinct values for `key` among all documents - in this collection. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`str`. - - All optional distinct parameters should be passed as keyword arguments - to this method. Valid options include: - - - `maxTimeMS` (int): The maximum amount of time to allow the count - command to run, in milliseconds. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - The :meth:`distinct` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :param key: name of the field for which we want to get the distinct - values - :param filter: A query document that specifies the documents - from which to retrieve the distinct values. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: See list of options above. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Support the `collation` option. - - """ - if not isinstance(key, str): - raise TypeError("key must be an instance of str") - cmd = {"distinct": self.__name, "key": key} - if filter is not None: - if "query" in kwargs: - raise ConfigurationError("can't pass both filter and query") - kwargs["query"] = filter - collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - - def _cmd( - session: Optional[ClientSession], - _server: Server, - conn: Connection, - read_preference: Optional[_ServerMode], - ) -> list: - return self._command( - conn, - cmd, - read_preference=read_preference, - read_concern=self.read_concern, - collation=collation, - session=session, - user_fields={"values": 1}, - )["values"] - - return self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) - - def _write_concern_for_cmd( - self, cmd: Mapping[str, Any], session: Optional[ClientSession] - ) -> WriteConcern: - raw_wc = cmd.get("writeConcern") - if raw_wc is not None: - return WriteConcern(**raw_wc) - else: - return self._write_concern_for(session) - - def __find_and_modify( - self, - filter: Mapping[str, Any], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]], - sort: Optional[_IndexList], - upsert: Optional[bool] = None, - return_document: bool = ReturnDocument.BEFORE, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping] = None, - **kwargs: Any, - ) -> Any: - """Internal findAndModify helper.""" - common.validate_is_mapping("filter", filter) - if not isinstance(return_document, bool): - raise ValueError( - "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" - ) - collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd = {"findAndModify": self.__name, "query": filter, "new": return_document} - if let is not None: - common.validate_is_mapping("let", let) - cmd["let"] = let - cmd.update(kwargs) - if projection is not None: - cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") - if sort is not None: - cmd["sort"] = helpers._index_document(sort) - if upsert is not None: - validate_boolean("upsert", upsert) - cmd["upsert"] = upsert - if hint is not None: - if not isinstance(hint, str): - hint = helpers._index_document(hint) - - write_concern = self._write_concern_for_cmd(cmd, session) - - def _find_and_modify( - session: Optional[ClientSession], conn: Connection, retryable_write: bool - ) -> Any: - acknowledged = write_concern.acknowledged - if array_filters is not None: - if not acknowledged: - raise ConfigurationError( - "arrayFilters is unsupported for unacknowledged writes." - ) - cmd["arrayFilters"] = list(array_filters) - if hint is not None: - if conn.max_wire_version < 8: - raise ConfigurationError( - "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." - ) - elif not acknowledged and conn.max_wire_version < 9: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." - ) - cmd["hint"] = hint - out = self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=write_concern, - collation=collation, - session=session, - retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS, - ) - _check_write_command_response(out) - - return out.get("value") - - return self.__database.client._retryable_write( - write_concern.acknowledged, - _find_and_modify, - session, - operation=_Op.FIND_AND_MODIFY, - ) - - def find_one_and_delete( - self, - filter: Mapping[str, Any], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - sort: Optional[_IndexList] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _DocumentType: - """Finds a single document and deletes it, returning the document. - - >>> db.test.count_documents({'x': 1}) - 2 - >>> db.test.find_one_and_delete({'x': 1}) - {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} - >>> db.test.count_documents({'x': 1}) - 1 - - If multiple documents match *filter*, a *sort* can be applied. - - >>> for doc in db.test.find({'x': 1}): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> db.test.find_one_and_delete( - ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) - {'x': 1, '_id': 2} - - The *projection* option can be used to limit the fields returned. - - >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) - {'x': 1} - - :param filter: A query that matches the document to delete. - :param projection: a list of field names that should be - returned in the result document or a mapping specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a mapping to exclude fields from - the result (e.g. projection={'_id': False}). - :param sort: a list of (key, direction) pairs - specifying the sort order for the query. If multiple documents - match the query, they are sorted and the first is deleted. - :param hint: An index to use to support the query predicate - specified either by its string name, or in the same format as - passed to :meth:`~pymongo.collection.Collection.create_index` - (e.g. ``[('field', ASCENDING)]``). This option is only supported - on MongoDB 4.4 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - :param kwargs: additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - - .. versionchanged:: 4.1 - Added ``let`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.2 - Respects write concern. - - .. warning:: Starting in PyMongo 3.2, this command uses the - :class:`~pymongo.write_concern.WriteConcern` of this - :class:`~pymongo.collection.Collection` when connected to MongoDB >= - 3.2. Note that using an elevated write concern with this command may - be slower compared to using the default write concern. - - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionadded:: 3.0 - """ - kwargs["remove"] = True - if comment is not None: - kwargs["comment"] = comment - return self.__find_and_modify( - filter, projection, sort, let=let, hint=hint, session=session, **kwargs - ) - - def find_one_and_replace( - self, - filter: Mapping[str, Any], - replacement: Mapping[str, Any], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - sort: Optional[_IndexList] = None, - upsert: bool = False, - return_document: bool = ReturnDocument.BEFORE, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _DocumentType: - """Finds a single document and replaces it, returning either the - original or the replaced document. - - The :meth:`find_one_and_replace` method differs from - :meth:`find_one_and_update` by replacing the document matched by - *filter*, rather than modifying the existing document. - - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) - {'x': 1, '_id': 0} - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'y': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - - :param filter: A query that matches the document to replace. - :param replacement: The replacement document. - :param projection: A list of field names that should be - returned in the result document or a mapping specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a mapping to exclude fields from - the result (e.g. projection={'_id': False}). - :param sort: a list of (key, direction) pairs - specifying the sort order for the query. If multiple documents - match the query, they are sorted and the first is replaced. - :param upsert: When ``True``, inserts a new document if no - document matches the query. Defaults to ``False``. - :param return_document: If - :attr:`ReturnDocument.BEFORE` (the default), - returns the original document before it was replaced, or ``None`` - if no document matches. If - :attr:`ReturnDocument.AFTER`, returns the replaced - or inserted document. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - :param kwargs: additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - - .. versionchanged:: 4.1 - Added ``let`` parameter. - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the ``collation`` option. - .. versionchanged:: 3.2 - Respects write concern. - - .. warning:: Starting in PyMongo 3.2, this command uses the - :class:`~pymongo.write_concern.WriteConcern` of this - :class:`~pymongo.collection.Collection` when connected to MongoDB >= - 3.2. Note that using an elevated write concern with this command may - be slower compared to using the default write concern. - - .. versionadded:: 3.0 - """ - common.validate_ok_for_replace(replacement) - kwargs["update"] = replacement - if comment is not None: - kwargs["comment"] = comment - return self.__find_and_modify( - filter, - projection, - sort, - upsert, - return_document, - let=let, - hint=hint, - session=session, - **kwargs, - ) - - def find_one_and_update( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - sort: Optional[_IndexList] = None, - upsert: bool = False, - return_document: bool = ReturnDocument.BEFORE, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional[ClientSession] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _DocumentType: - """Finds a single document and updates it, returning either the - original or the updated document. - - >>> db.test.find_one_and_update( - ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) - {'_id': 665, 'done': False, 'count': 25}} - - Returns ``None`` if no document matches the filter. - - >>> db.test.find_one_and_update( - ... {'_exists': False}, {'$inc': {'count': 1}}) - - When the filter matches, by default :meth:`find_one_and_update` - returns the original version of the document before the update was - applied. To return the updated (or inserted in the case of - *upsert*) version of the document instead, use the *return_document* - option. - - >>> from pymongo import ReturnDocument - >>> db.example.find_one_and_update( - ... {'_id': 'userid'}, - ... {'$inc': {'seq': 1}}, - ... return_document=ReturnDocument.AFTER) - {'_id': 'userid', 'seq': 1} - - You can limit the fields returned with the *projection* option. - - >>> db.example.find_one_and_update( - ... {'_id': 'userid'}, - ... {'$inc': {'seq': 1}}, - ... projection={'seq': True, '_id': False}, - ... return_document=ReturnDocument.AFTER) - {'seq': 2} - - The *upsert* option can be used to create the document if it doesn't - already exist. - - >>> db.example.delete_many({}).deleted_count - 1 - >>> db.example.find_one_and_update( - ... {'_id': 'userid'}, - ... {'$inc': {'seq': 1}}, - ... projection={'seq': True, '_id': False}, - ... upsert=True, - ... return_document=ReturnDocument.AFTER) - {'seq': 1} - - If multiple documents match *filter*, a *sort* can be applied. - - >>> for doc in db.test.find({'done': True}): - ... print(doc) - ... - {'_id': 665, 'done': True, 'result': {'count': 26}} - {'_id': 701, 'done': True, 'result': {'count': 17}} - >>> db.test.find_one_and_update( - ... {'done': True}, - ... {'$set': {'final': True}}, - ... sort=[('_id', pymongo.DESCENDING)]) - {'_id': 701, 'done': True, 'result': {'count': 17}} - - :param filter: A query that matches the document to update. - :param update: The update operations to apply. - :param projection: A list of field names that should be - returned in the result document or a mapping specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a dict to exclude fields from - the result (e.g. projection={'_id': False}). - :param sort: a list of (key, direction) pairs - specifying the sort order for the query. If multiple documents - match the query, they are sorted and the first is updated. - :param upsert: When ``True``, inserts a new document if no - document matches the query. Defaults to ``False``. - :param return_document: If - :attr:`ReturnDocument.BEFORE` (the default), - returns the original document before it was updated. If - :attr:`ReturnDocument.AFTER`, returns the updated - or inserted document. - :param array_filters: A list of filters specifying which - array elements an update should apply. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param let: Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - :param comment: A user-provided comment to attach to this - command. - :param kwargs: additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the ``update``. - .. versionchanged:: 3.6 - Added the ``array_filters`` and ``session`` options. - .. versionchanged:: 3.4 - Added the ``collation`` option. - .. versionchanged:: 3.2 - Respects write concern. - - .. warning:: Starting in PyMongo 3.2, this command uses the - :class:`~pymongo.write_concern.WriteConcern` of this - :class:`~pymongo.collection.Collection` when connected to MongoDB >= - 3.2. Note that using an elevated write concern with this command may - be slower compared to using the default write concern. - - .. versionadded:: 3.0 - """ - common.validate_ok_for_update(update) - common.validate_list_or_none("array_filters", array_filters) - kwargs["update"] = update - if comment is not None: - kwargs["comment"] = comment - return self.__find_and_modify( - filter, - projection, - sort, - upsert, - return_document, - array_filters, - hint=hint, - let=let, - session=session, - **kwargs, - ) - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'Collection' object is not iterable") - - next = __next__ - - def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: - """This is only here so that some API misusages are easier to debug.""" - if "." not in self.__name: - raise TypeError( - "'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % self.__name - ) - raise TypeError( - "'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % self.__name.split(".")[-1] - ) +__doc__ = original_doc diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 0411a45abe..d9ca3ee405 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -1,4 +1,4 @@ -# Copyright 2014-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,390 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""CommandCursor class to iterate over command results.""" +"""Re-import of synchronous CommandCursor API for compatibility.""" from __future__ import annotations -from collections import deque -from typing import ( - TYPE_CHECKING, - Any, - Generic, - Iterator, - Mapping, - NoReturn, - Optional, - Sequence, - Union, -) +from pymongo.synchronous.command_cursor import * # noqa: F403 +from pymongo.synchronous.command_cursor import __doc__ as original_doc -from bson import CodecOptions, _convert_raw_document_lists_to_streams -from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _ConnectionManager -from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure -from pymongo.message import _CursorAddress, _GetMore, _OpMsg, _OpReply, _RawBatchGetMore -from pymongo.response import PinnedResponse -from pymongo.typings import _Address, _DocumentOut, _DocumentType - -if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.pool import Connection - - -class CommandCursor(Generic[_DocumentType]): - """A cursor / iterator over command cursors.""" - - _getmore_class = _GetMore - - def __init__( - self, - collection: Collection[_DocumentType], - cursor_info: Mapping[str, Any], - address: Optional[_Address], - batch_size: int = 0, - max_await_time_ms: Optional[int] = None, - session: Optional[ClientSession] = None, - explicit_session: bool = False, - comment: Any = None, - ) -> None: - """Create a new command cursor.""" - self.__sock_mgr: Any = None - self.__collection: Collection[_DocumentType] = collection - self.__id = cursor_info["id"] - self.__data = deque(cursor_info["firstBatch"]) - self.__postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( - "postBatchResumeToken" - ) - self.__address = address - self.__batch_size = batch_size - self.__max_await_time_ms = max_await_time_ms - self.__session = session - self.__explicit_session = explicit_session - self.__killed = self.__id == 0 - self.__comment = comment - if self.__killed: - self.__end_session(True) - - if "ns" in cursor_info: # noqa: SIM401 - self.__ns = cursor_info["ns"] - else: - self.__ns = collection.full_name - - self.batch_size(batch_size) - - if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") - - def __del__(self) -> None: - self.__die() - - def __die(self, synchronous: bool = False) -> None: - """Closes this cursor.""" - already_killed = self.__killed - self.__killed = True - if self.__id and not already_killed: - cursor_id = self.__id - assert self.__address is not None - address = _CursorAddress(self.__address, self.__ns) - else: - # Skip killCursors. - cursor_id = 0 - address = None - self.__collection.database.client._cleanup_cursor( - synchronous, - cursor_id, - address, - self.__sock_mgr, - self.__session, - self.__explicit_session, - ) - if not self.__explicit_session: - self.__session = None - self.__sock_mgr = None - - def __end_session(self, synchronous: bool) -> None: - if self.__session and not self.__explicit_session: - self.__session._end_session(lock=synchronous) - self.__session = None - - def close(self) -> None: - """Explicitly close / kill this cursor.""" - self.__die(True) - - def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: - """Limits the number of documents returned in one batch. Each batch - requires a round trip to the server. It can be adjusted to optimize - performance and limit data transfer. - - .. note:: batch_size can not override MongoDB's internal limits on the - amount of data it will return to the client in a single batch (i.e - if you set batch size to 1,000,000,000, MongoDB will currently only - return 4-16MB of results per batch). - - Raises :exc:`TypeError` if `batch_size` is not an integer. - Raises :exc:`ValueError` if `batch_size` is less than ``0``. - - :param batch_size: The size of each batch of results requested. - """ - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - - self.__batch_size = batch_size == 1 and 2 or batch_size - return self - - def _has_next(self) -> bool: - """Returns `True` if the cursor has documents remaining from the - previous batch. - """ - return len(self.__data) > 0 - - @property - def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: - """Retrieve the postBatchResumeToken from the response to a - changeStream aggregate or getMore. - """ - return self.__postbatchresumetoken - - def _maybe_pin_connection(self, conn: Connection) -> None: - client = self.__collection.database.client - if not client._should_pin_cursor(self.__session): - return - if not self.__sock_mgr: - conn.pin_cursor() - conn_mgr = _ConnectionManager(conn, False) - # Ensure the connection gets returned when the entire result is - # returned in the first batch. - if self.__id == 0: - conn_mgr.close() - else: - self.__sock_mgr = conn_mgr - - def __send_message(self, operation: _GetMore) -> None: - """Send a getmore message and handle the response.""" - client = self.__collection.database.client - try: - response = client._run_operation( - operation, self._unpack_response, address=self.__address - ) - except OperationFailure as exc: - if exc.code in _CURSOR_CLOSED_ERRORS: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - if exc.timeout: - self.__die(False) - else: - # Return the session and pinned connection, if necessary. - self.close() - raise - except ConnectionFailure: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - # Return the session and pinned connection, if necessary. - self.close() - raise - except Exception: - self.close() - raise - - if isinstance(response, PinnedResponse): - if not self.__sock_mgr: - self.__sock_mgr = _ConnectionManager(response.conn, response.more_to_come) - if response.from_command: - cursor = response.docs[0]["cursor"] - documents = cursor["nextBatch"] - self.__postbatchresumetoken = cursor.get("postBatchResumeToken") - self.__id = cursor["id"] - else: - documents = response.docs - assert isinstance(response.data, _OpReply) - self.__id = response.data.cursor_id - - if self.__id == 0: - self.close() - self.__data = deque(documents) - - def _unpack_response( - self, - response: Union[_OpReply, _OpMsg], - cursor_id: Optional[int], - codec_options: CodecOptions[Mapping[str, Any]], - user_fields: Optional[Mapping[str, Any]] = None, - legacy_response: bool = False, - ) -> Sequence[_DocumentOut]: - return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) - - def _refresh(self) -> int: - """Refreshes the cursor with more data from the server. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if self.__id: # Get More - dbname, collname = self.__ns.split(".", 1) - read_pref = self.__collection._read_preference_for(self.session) - self.__send_message( - self._getmore_class( - dbname, - collname, - self.__batch_size, - self.__id, - self.__collection.codec_options, - read_pref, - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - False, - self.__comment, - ) - ) - else: # Cursor id is zero nothing else to return - self.__die(True) - - return len(self.__data) - - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - Even if :attr:`alive` is ``True``, :meth:`next` can raise - :exc:`StopIteration`. Best to use a for loop:: - - for doc in collection.aggregate(pipeline): - print(doc) - - .. note:: :attr:`alive` can be True while iterating a cursor from - a failed server. In this case :attr:`alive` will return False after - :meth:`next` fails to retrieve the next batch of results from the - server. - """ - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self) -> int: - """Returns the id of the cursor.""" - return self.__id - - @property - def address(self) -> Optional[_Address]: - """The (host, port) of the server used, or None. - - .. versionadded:: 3.0 - """ - return self.__address - - @property - def session(self) -> Optional[ClientSession]: - """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. - - .. versionadded:: 3.6 - """ - if self.__explicit_session: - return self.__session - return None - - def __iter__(self) -> Iterator[_DocumentType]: - return self - - def next(self) -> _DocumentType: - """Advance the cursor.""" - # Block until a document is returnable. - while self.alive: - doc = self._try_next(True) - if doc is not None: - return doc - - raise StopIteration - - __next__ = next - - def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: - """Advance the cursor blocking for at most one getMore command.""" - if not len(self.__data) and not self.__killed and get_more_allowed: - self._refresh() - if len(self.__data): - return self.__data.popleft() - else: - return None - - def try_next(self) -> Optional[_DocumentType]: - """Advance the cursor without blocking indefinitely. - - This method returns the next document without waiting - indefinitely for data. - - If no document is cached locally then this method runs a single - getMore command. If the getMore yields any documents, the next - document is returned, otherwise, if the getMore returns no documents - (because there is no additional data) then ``None`` is returned. - - :return: The next document or ``None`` when no document is available - after running a single getMore or when the cursor is closed. - - .. versionadded:: 4.5 - """ - return self._try_next(get_more_allowed=True) - - def __enter__(self) -> CommandCursor[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - -class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): - _getmore_class = _RawBatchGetMore - - def __init__( - self, - collection: Collection[_DocumentType], - cursor_info: Mapping[str, Any], - address: Optional[_Address], - batch_size: int = 0, - max_await_time_ms: Optional[int] = None, - session: Optional[ClientSession] = None, - explicit_session: bool = False, - comment: Any = None, - ) -> None: - """Create a new cursor / iterator over raw batches of BSON data. - - Should not be called directly by application developers - - see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` - instead. - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - assert not cursor_info.get("firstBatch") - super().__init__( - collection, - cursor_info, - address, - batch_size, - max_await_time_ms, - session, - explicit_session, - comment, - ) - - def _unpack_response( # type: ignore[override] - self, - response: Union[_OpReply, _OpMsg], - cursor_id: Optional[int], - codec_options: CodecOptions, - user_fields: Optional[Mapping[str, Any]] = None, - legacy_response: bool = False, - ) -> list[Mapping[str, Any]]: - raw_response = response.raw_response(cursor_id, user_fields=user_fields) - if not legacy_response: - # OP_MSG returns firstBatch/nextBatch documents as a BSON array - # Re-assemble the array of documents into a document stream - _convert_raw_document_lists_to_streams(raw_response[0]) - return raw_response # type: ignore[return-value] - - def __getitem__(self, index: int) -> NoReturn: - raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") +__doc__ = original_doc diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 3151fcaf3d..b3ac54c971 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1,4 +1,4 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,1346 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Cursor class to iterate over Mongo query results.""" +"""Re-import of synchronous Cursor API for compatibility.""" from __future__ import annotations -import copy -import warnings -from collections import deque -from typing import ( - TYPE_CHECKING, - Any, - Generic, - Iterable, - List, - Mapping, - NoReturn, - Optional, - Sequence, - Tuple, - Union, - cast, - overload, -) +from pymongo.cursor_shared import * # noqa: F403 +from pymongo.synchronous.cursor import * # noqa: F403 +from pymongo.synchronous.cursor import __doc__ as original_doc -from bson import RE_TYPE, _convert_raw_document_lists_to_streams -from bson.code import Code -from bson.son import SON -from pymongo import helpers -from pymongo.collation import validate_collation_or_none -from pymongo.common import ( - validate_is_document_type, - validate_is_mapping, -) -from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure -from pymongo.lock import _create_lock -from pymongo.message import ( - _CursorAddress, - _GetMore, - _OpMsg, - _OpReply, - _Query, - _RawBatchGetMore, - _RawBatchQuery, -) -from pymongo.response import PinnedResponse -from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType -from pymongo.write_concern import validate_boolean - -if TYPE_CHECKING: - from _typeshed import SupportsItems - - from bson.codec_options import CodecOptions - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.pool import Connection - from pymongo.read_preferences import _ServerMode - - -# These errors mean that the server has already killed the cursor so there is -# no need to send killCursors. -_CURSOR_CLOSED_ERRORS = frozenset( - [ - 43, # CursorNotFound - 175, # QueryPlanKilled - 237, # CursorKilled - # On a tailable cursor, the following errors mean the capped collection - # rolled over. - # MongoDB 2.6: - # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} - 28617, - # MongoDB 3.0: - # {'$err': 'getMore executor error: UnknownError no details available', - # 'code': 17406, 'ok': 0} - 17406, - # MongoDB 3.2 + 3.4: - # {'ok': 0.0, 'errmsg': 'GetMore command executor error: - # CappedPositionLost: CollectionScan died due to failure to restore - # tailable cursor position. Last seen record id: RecordId(3)', - # 'code': 96} - 96, - # MongoDB 3.6+: - # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to - # restore tailable cursor position. Last seen record id: RecordId(3)"', - # 'code': 136, 'codeName': 'CappedPositionLost'} - 136, - ] -) - -_QUERY_OPTIONS = { - "tailable_cursor": 2, - "secondary_okay": 4, - "oplog_replay": 8, - "no_timeout": 16, - "await_data": 32, - "exhaust": 64, - "partial": 128, -} - - -class CursorType: - NON_TAILABLE = 0 - """The standard cursor type.""" - - TAILABLE = _QUERY_OPTIONS["tailable_cursor"] - """The tailable cursor type. - - Tailable cursors are only for use with capped collections. They are not - closed when the last data is retrieved but are kept open and the cursor - location marks the final document position. If more data is received - iteration of the cursor will continue from the last document received. - """ - - TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"] - """A tailable cursor with the await option set. - - Creates a tailable cursor that will wait for a few seconds after returning - the full result set so that it can capture and return additional data added - during the query. - """ - - EXHAUST = _QUERY_OPTIONS["exhaust"] - """An exhaust cursor. - - MongoDB will stream batched results to the client without waiting for the - client to request each batch, reducing latency. - """ - - -class _ConnectionManager: - """Used with exhaust cursors to ensure the connection is returned.""" - - def __init__(self, conn: Connection, more_to_come: bool): - self.conn: Optional[Connection] = conn - self.more_to_come = more_to_come - self.lock = _create_lock() - - def update_exhaust(self, more_to_come: bool) -> None: - self.more_to_come = more_to_come - - def close(self) -> None: - """Return this instance's connection to the connection pool.""" - if self.conn: - self.conn.unpin() - self.conn = None - - -_Sort = Union[ - Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] -] -_Hint = Union[str, _Sort] - - -class Cursor(Generic[_DocumentType]): - """A cursor / iterator over Mongo query results.""" - - _query_class = _Query - _getmore_class = _GetMore - - def __init__( - self, - collection: Collection[_DocumentType], - filter: Optional[Mapping[str, Any]] = None, - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - skip: int = 0, - limit: int = 0, - no_cursor_timeout: bool = False, - cursor_type: int = CursorType.NON_TAILABLE, - sort: Optional[_Sort] = None, - allow_partial_results: bool = False, - oplog_replay: bool = False, - batch_size: int = 0, - collation: Optional[_CollationIn] = None, - hint: Optional[_Hint] = None, - max_scan: Optional[int] = None, - max_time_ms: Optional[int] = None, - max: Optional[_Sort] = None, - min: Optional[_Sort] = None, - return_key: Optional[bool] = None, - show_record_id: Optional[bool] = None, - snapshot: Optional[bool] = None, - comment: Optional[Any] = None, - session: Optional[ClientSession] = None, - allow_disk_use: Optional[bool] = None, - let: Optional[bool] = None, - ) -> None: - """Create a new cursor. - - Should not be called directly by application developers - see - :meth:`~pymongo.collection.Collection.find` instead. - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - # Initialize all attributes used in __del__ before possibly raising - # an error to avoid attribute errors during garbage collection. - self.__collection: Collection[_DocumentType] = collection - self.__id: Any = None - self.__exhaust = False - self.__sock_mgr: Any = None - self.__killed = False - self.__session: Optional[ClientSession] - - if session: - self.__session = session - self.__explicit_session = True - else: - self.__session = None - self.__explicit_session = False - - spec: Mapping[str, Any] = filter or {} - validate_is_mapping("filter", spec) - if not isinstance(skip, int): - raise TypeError("skip must be an instance of int") - if not isinstance(limit, int): - raise TypeError("limit must be an instance of int") - validate_boolean("no_cursor_timeout", no_cursor_timeout) - if no_cursor_timeout and not self.__explicit_session: - warnings.warn( - "use an explicit session with no_cursor_timeout=True " - "otherwise the cursor may still timeout after " - "30 minutes, for more info see " - "https://mongodb.com/docs/v4.4/reference/method/" - "cursor.noCursorTimeout/" - "#session-idle-timeout-overrides-nocursortimeout", - UserWarning, - stacklevel=2, - ) - if cursor_type not in ( - CursorType.NON_TAILABLE, - CursorType.TAILABLE, - CursorType.TAILABLE_AWAIT, - CursorType.EXHAUST, - ): - raise ValueError("not a valid value for cursor_type") - validate_boolean("allow_partial_results", allow_partial_results) - validate_boolean("oplog_replay", oplog_replay) - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - # Only set if allow_disk_use is provided by the user, else None. - if allow_disk_use is not None: - allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) - - if projection is not None: - projection = helpers._fields_list_to_dict(projection, "projection") - - if let is not None: - validate_is_document_type("let", let) - - self.__let = let - self.__spec = spec - self.__has_filter = filter is not None - self.__projection = projection - self.__skip = skip - self.__limit = limit - self.__batch_size = batch_size - self.__ordering = sort and helpers._index_document(sort) or None - self.__max_scan = max_scan - self.__explain = False - self.__comment = comment - self.__max_time_ms = max_time_ms - self.__max_await_time_ms: Optional[int] = None - self.__max: Optional[Union[dict[Any, Any], _Sort]] = max - self.__min: Optional[Union[dict[Any, Any], _Sort]] = min - self.__collation = validate_collation_or_none(collation) - self.__return_key = return_key - self.__show_record_id = show_record_id - self.__allow_disk_use = allow_disk_use - self.__snapshot = snapshot - self.__hint: Union[str, dict[str, Any], None] - self.__set_hint(hint) - - # Exhaust cursor support - if cursor_type == CursorType.EXHAUST: - if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are not supported by mongos") - if limit: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__exhaust = True - - # This is ugly. People want to be able to do cursor[5:5] and - # get an empty result set (old behavior was an - # exception). It's hard to do that right, though, because the - # server uses limit(0) to mean 'no limit'. So we set __empty - # in that case and check for it when iterating. We also unset - # it anytime we change __limit. - self.__empty = False - - self.__data: deque = deque() - self.__address: Optional[_Address] = None - self.__retrieved = 0 - - self.__codec_options = collection.codec_options - # Read preference is set when the initial find is sent. - self.__read_preference: Optional[_ServerMode] = None - self.__read_concern = collection.read_concern - - self.__query_flags = cursor_type - if no_cursor_timeout: - self.__query_flags |= _QUERY_OPTIONS["no_timeout"] - if allow_partial_results: - self.__query_flags |= _QUERY_OPTIONS["partial"] - if oplog_replay: - self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] - - # The namespace to use for find/getMore commands. - self.__dbname = collection.database.name - self.__collname = collection.name - - @property - def collection(self) -> Collection[_DocumentType]: - """The :class:`~pymongo.collection.Collection` that this - :class:`Cursor` is iterating. - """ - return self.__collection - - @property - def retrieved(self) -> int: - """The number of documents retrieved so far.""" - return self.__retrieved - - def __del__(self) -> None: - self.__die() - - def rewind(self) -> Cursor[_DocumentType]: - """Rewind this cursor to its unevaluated state. - - Reset this cursor if it has been partially or completely evaluated. - Any options that are present on the cursor will remain in effect. - Future iterating performed on this cursor will cause new queries to - be sent to the server, even if the resultant data has already been - retrieved by this cursor. - """ - self.close() - self.__data = deque() - self.__id = None - self.__address = None - self.__retrieved = 0 - self.__killed = False - - return self - - def clone(self) -> Cursor[_DocumentType]: - """Get a clone of this cursor. - - Returns a new Cursor instance with options matching those that have - been set on the current instance. The clone will be completely - unevaluated, even if the current instance has been partially or - completely evaluated. - """ - return self._clone(True) - - def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: - """Internal clone helper.""" - if not base: - if self.__explicit_session: - base = self._clone_base(self.__session) - else: - base = self._clone_base(None) - - values_to_clone = ( - "spec", - "projection", - "skip", - "limit", - "max_time_ms", - "max_await_time_ms", - "comment", - "max", - "min", - "ordering", - "explain", - "hint", - "batch_size", - "max_scan", - "query_flags", - "collation", - "empty", - "show_record_id", - "return_key", - "allow_disk_use", - "snapshot", - "exhaust", - "has_filter", - ) - data = { - k: v - for k, v in self.__dict__.items() - if k.startswith("_Cursor__") and k[9:] in values_to_clone - } - if deepcopy: - data = self._deepcopy(data) - base.__dict__.update(data) - return base - - def _clone_base(self, session: Optional[ClientSession]) -> Cursor: - """Creates an empty Cursor object for information to be copied into.""" - return self.__class__(self.__collection, session=session) - - def __die(self, synchronous: bool = False) -> None: - """Closes this cursor.""" - try: - already_killed = self.__killed - except AttributeError: - # __init__ did not run to completion (or at all). - return - - self.__killed = True - if self.__id and not already_killed: - cursor_id = self.__id - assert self.__address is not None - address = _CursorAddress(self.__address, f"{self.__dbname}.{self.__collname}") - else: - # Skip killCursors. - cursor_id = 0 - address = None - self.__collection.database.client._cleanup_cursor( - synchronous, - cursor_id, - address, - self.__sock_mgr, - self.__session, - self.__explicit_session, - ) - if not self.__explicit_session: - self.__session = None - self.__sock_mgr = None - - def close(self) -> None: - """Explicitly close / kill this cursor.""" - self.__die(True) - - def __query_spec(self) -> Mapping[str, Any]: - """Get the spec to use for a query.""" - operators: dict[str, Any] = {} - if self.__ordering: - operators["$orderby"] = self.__ordering - if self.__explain: - operators["$explain"] = True - if self.__hint: - operators["$hint"] = self.__hint - if self.__let: - operators["let"] = self.__let - if self.__comment: - operators["$comment"] = self.__comment - if self.__max_scan: - operators["$maxScan"] = self.__max_scan - if self.__max_time_ms is not None: - operators["$maxTimeMS"] = self.__max_time_ms - if self.__max: - operators["$max"] = self.__max - if self.__min: - operators["$min"] = self.__min - if self.__return_key is not None: - operators["$returnKey"] = self.__return_key - if self.__show_record_id is not None: - # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. - operators["$showDiskLoc"] = self.__show_record_id - if self.__snapshot is not None: - operators["$snapshot"] = self.__snapshot - - if operators: - # Make a shallow copy so we can cleanly rewind or clone. - spec = dict(self.__spec) - - # Allow-listed commands must be wrapped in $query. - if "$query" not in spec: - # $query has to come first - spec = {"$query": spec} - - spec.update(operators) - return spec - # Have to wrap with $query if "query" is the first key. - # We can't just use $query anytime "query" is a key as - # that breaks commands like count and find_and_modify. - # Checking spec.keys()[0] covers the case that the spec - # was passed as an instance of SON or OrderedDict. - elif "query" in self.__spec and ( - len(self.__spec) == 1 or next(iter(self.__spec)) == "query" - ): - return {"$query": self.__spec} - - return self.__spec - - def __check_okay_to_chain(self) -> None: - """Check if it is okay to chain more options onto this cursor.""" - if self.__retrieved or self.__id is not None: - raise InvalidOperation("cannot set options after executing query") - - def add_option(self, mask: int) -> Cursor[_DocumentType]: - """Set arbitrary query flags using a bitmask. - - To set the tailable flag: - cursor.add_option(2) - """ - if not isinstance(mask, int): - raise TypeError("mask must be an int") - self.__check_okay_to_chain() - - if mask & _QUERY_OPTIONS["exhaust"]: - if self.__limit: - raise InvalidOperation("Can't use limit and exhaust together.") - if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are not supported by mongos") - self.__exhaust = True - - self.__query_flags |= mask - return self - - def remove_option(self, mask: int) -> Cursor[_DocumentType]: - """Unset arbitrary query flags using a bitmask. - - To unset the tailable flag: - cursor.remove_option(2) - """ - if not isinstance(mask, int): - raise TypeError("mask must be an int") - self.__check_okay_to_chain() - - if mask & _QUERY_OPTIONS["exhaust"]: - self.__exhaust = False - - self.__query_flags &= ~mask - return self - - def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: - """Specifies whether MongoDB can use temporary disk files while - processing a blocking sort operation. - - Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. - - .. note:: `allow_disk_use` requires server version **>= 4.4** - - :param allow_disk_use: if True, MongoDB may use temporary - disk files to store data exceeding the system memory limit while - processing a blocking sort operation. - - .. versionadded:: 3.11 - """ - if not isinstance(allow_disk_use, bool): - raise TypeError("allow_disk_use must be a bool") - self.__check_okay_to_chain() - - self.__allow_disk_use = allow_disk_use - return self - - def limit(self, limit: int) -> Cursor[_DocumentType]: - """Limits the number of results to be returned by this cursor. - - Raises :exc:`TypeError` if `limit` is not an integer. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` - has already been used. The last `limit` applied to this cursor - takes precedence. A limit of ``0`` is equivalent to no limit. - - :param limit: the number of results to return - - .. seealso:: The MongoDB documentation on `limit `_. - """ - if not isinstance(limit, int): - raise TypeError("limit must be an integer") - if self.__exhaust: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__check_okay_to_chain() - - self.__empty = False - self.__limit = limit - return self - - def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: - """Limits the number of documents returned in one batch. Each batch - requires a round trip to the server. It can be adjusted to optimize - performance and limit data transfer. - - .. note:: batch_size can not override MongoDB's internal limits on the - amount of data it will return to the client in a single batch (i.e - if you set batch size to 1,000,000,000, MongoDB will currently only - return 4-16MB of results per batch). - - Raises :exc:`TypeError` if `batch_size` is not an integer. - Raises :exc:`ValueError` if `batch_size` is less than ``0``. - Raises :exc:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. The last `batch_size` - applied to this cursor takes precedence. - - :param batch_size: The size of each batch of results requested. - """ - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - self.__check_okay_to_chain() - - self.__batch_size = batch_size - return self - - def skip(self, skip: int) -> Cursor[_DocumentType]: - """Skips the first `skip` results of this cursor. - - Raises :exc:`TypeError` if `skip` is not an integer. Raises - :exc:`ValueError` if `skip` is less than ``0``. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has - already been used. The last `skip` applied to this cursor takes - precedence. - - :param skip: the number of results to skip - """ - if not isinstance(skip, int): - raise TypeError("skip must be an integer") - if skip < 0: - raise ValueError("skip must be >= 0") - self.__check_okay_to_chain() - - self.__skip = skip - return self - - def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: - """Specifies a time limit for a query operation. If the specified - time is exceeded, the operation will be aborted and - :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` - is ``None`` no limit is applied. - - Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. - Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` - has already been used. - - :param max_time_ms: the time limit after which the operation is aborted - """ - if not isinstance(max_time_ms, int) and max_time_ms is not None: - raise TypeError("max_time_ms must be an integer or None") - self.__check_okay_to_chain() - - self.__max_time_ms = max_time_ms - return self - - def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: - """Specifies a time limit for a getMore operation on a - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other - types of cursor max_await_time_ms is ignored. - - Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or - ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. - - .. note:: `max_await_time_ms` requires server version **>= 3.2** - - :param max_await_time_ms: the time limit after which the operation is - aborted - - .. versionadded:: 3.2 - """ - if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") - self.__check_okay_to_chain() - - # Ignore max_await_time_ms if not tailable or await_data is False. - if self.__query_flags & CursorType.TAILABLE_AWAIT: - self.__max_await_time_ms = max_await_time_ms - - return self - - @overload - def __getitem__(self, index: int) -> _DocumentType: - ... - - @overload - def __getitem__(self, index: slice) -> Cursor[_DocumentType]: - ... - - def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_DocumentType]]: - """Get a single document or a slice of documents from this cursor. - - .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each - index access or slice requires that a new query be run using skip - and limit. Do not iterate the cursor using index accesses. - The following example is **extremely inefficient** and may return - surprising results:: - - cursor = db.collection.find() - # Warning: This runs a new query for each document. - # Don't do this! - for idx in range(10): - print(cursor[idx]) - - Raises :class:`~pymongo.errors.InvalidOperation` if this - cursor has already been used. - - To get a single document use an integral index, e.g.:: - - >>> db.test.find()[50] - - An :class:`IndexError` will be raised if the index is negative - or greater than the amount of documents in this cursor. Any - limit previously applied to this cursor will be ignored. - - To get a slice of documents use a slice index, e.g.:: - - >>> db.test.find()[20:25] - - This will return this cursor with a limit of ``5`` and skip of - ``20`` applied. Using a slice index will override any prior - limits or skips applied to this cursor (including those - applied through previous calls to this method). Raises - :class:`IndexError` when the slice has a step, a negative - start value, or a stop value less than or equal to the start - value. - - :param index: An integer or slice index to be applied to this cursor - """ - self.__check_okay_to_chain() - self.__empty = False - if isinstance(index, slice): - if index.step is not None: - raise IndexError("Cursor instances do not support slice steps") - - skip = 0 - if index.start is not None: - if index.start < 0: - raise IndexError("Cursor instances do not support negative indices") - skip = index.start - - if index.stop is not None: - limit = index.stop - skip - if limit < 0: - raise IndexError( - "stop index must be greater than start index for slice %r" % index - ) - if limit == 0: - self.__empty = True - else: - limit = 0 - - self.__skip = skip - self.__limit = limit - return self - - if isinstance(index, int): - if index < 0: - raise IndexError("Cursor instances do not support negative indices") - clone = self.clone() - clone.skip(index + self.__skip) - clone.limit(-1) # use a hard limit - clone.__query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 - for doc in clone: - return doc - raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor instances" % index) - - def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: - """**DEPRECATED** - Limit the number of documents to scan when - performing the query. - - Raises :class:`~pymongo.errors.InvalidOperation` if this - cursor has already been used. Only the last :meth:`max_scan` - applied to this cursor has any effect. - - :param max_scan: the maximum number of documents to scan - - .. versionchanged:: 3.7 - Deprecated :meth:`max_scan`. Support for this option is deprecated in - MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side - execution time. - """ - self.__check_okay_to_chain() - self.__max_scan = max_scan - return self - - def max(self, spec: _Sort) -> Cursor[_DocumentType]: - """Adds ``max`` operator that specifies upper bound for specific index. - - When using ``max``, :meth:`~hint` should also be configured to ensure - the query uses the expected index and starting in MongoDB 4.2 - :meth:`~hint` will be required. - - :param spec: a list of field, limit pairs specifying the exclusive - upper bound for all keys of a specific index in order. - - .. versionchanged:: 3.8 - Deprecated cursors that use ``max`` without a :meth:`~hint`. - - .. versionadded:: 2.7 - """ - if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") - - self.__check_okay_to_chain() - self.__max = dict(spec) - return self - - def min(self, spec: _Sort) -> Cursor[_DocumentType]: - """Adds ``min`` operator that specifies lower bound for specific index. - - When using ``min``, :meth:`~hint` should also be configured to ensure - the query uses the expected index and starting in MongoDB 4.2 - :meth:`~hint` will be required. - - :param spec: a list of field, limit pairs specifying the inclusive - lower bound for all keys of a specific index in order. - - .. versionchanged:: 3.8 - Deprecated cursors that use ``min`` without a :meth:`~hint`. - - .. versionadded:: 2.7 - """ - if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") - - self.__check_okay_to_chain() - self.__min = dict(spec) - return self - - def sort( - self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None - ) -> Cursor[_DocumentType]: - """Sorts this cursor's results. - - Pass a field name and a direction, either - :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: - - for doc in collection.find().sort('field', pymongo.ASCENDING): - print(doc) - - To sort by multiple fields, pass a list of (key, direction) pairs. - If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: - - for doc in collection.find().sort([ - 'field1', - ('field2', pymongo.DESCENDING)]): - print(doc) - - Text search results can be sorted by relevance:: - - cursor = db.test.find( - {'$text': {'$search': 'some words'}}, - {'score': {'$meta': 'textScore'}}) - - # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) - - for doc in cursor: - print(doc) - - For more advanced text search functionality, see MongoDB's - `Atlas Search `_. - - Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has - already been used. Only the last :meth:`sort` applied to this - cursor has any effect. - - :param key_or_list: a single key or a list of (key, direction) - pairs specifying the keys to sort on - :param direction: only used if `key_or_list` is a single - key, if not given :data:`~pymongo.ASCENDING` is assumed - """ - self.__check_okay_to_chain() - keys = helpers._index_list(key_or_list, direction) - self.__ordering = helpers._index_document(keys) - return self - - def distinct(self, key: str) -> list: - """Get a list of distinct values for `key` among all documents - in the result set of this query. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`str`. - - The :meth:`distinct` method obeys the - :attr:`~pymongo.collection.Collection.read_preference` of the - :class:`~pymongo.collection.Collection` instance on which - :meth:`~pymongo.collection.Collection.find` was called. - - :param key: name of key for which we want to get the distinct values - - .. seealso:: :meth:`pymongo.collection.Collection.distinct` - """ - options: dict[str, Any] = {} - if self.__spec: - options["query"] = self.__spec - if self.__max_time_ms is not None: - options["maxTimeMS"] = self.__max_time_ms - if self.__comment: - options["comment"] = self.__comment - if self.__collation is not None: - options["collation"] = self.__collation - - return self.__collection.distinct(key, session=self.__session, **options) - - def explain(self) -> _DocumentType: - """Returns an explain plan record for this cursor. - - .. note:: This method uses the default verbosity mode of the - `explain command - `_, - ``allPlansExecution``. To use a different verbosity use - :meth:`~pymongo.database.Database.command` to run the explain - command directly. - - .. seealso:: The MongoDB documentation on `explain `_. - """ - c = self.clone() - c.__explain = True - - # always use a hard limit for explains - if c.__limit: - c.__limit = -abs(c.__limit) - return next(c) - - def __set_hint(self, index: Optional[_Hint]) -> None: - if index is None: - self.__hint = None - return - - if isinstance(index, str): - self.__hint = index - else: - self.__hint = helpers._index_document(index) - - def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: - """Adds a 'hint', telling Mongo the proper index to use for the query. - - Judicious use of hints can greatly improve query - performance. When doing a query on multiple fields (at least - one of which is indexed) pass the indexed field as a hint to - the query. Raises :class:`~pymongo.errors.OperationFailure` if the - provided hint requires an index that does not exist on this collection, - and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has - already been used. - - `index` should be an index as passed to - :meth:`~pymongo.collection.Collection.create_index` - (e.g. ``[('field', ASCENDING)]``) or the name of the index. - If `index` is ``None`` any existing hint for this query is - cleared. The last hint applied to this cursor takes precedence - over all others. - - :param index: index to hint on (as an index specifier) - """ - self.__check_okay_to_chain() - self.__set_hint(index) - return self - - def comment(self, comment: Any) -> Cursor[_DocumentType]: - """Adds a 'comment' to the cursor. - - http://mongodb.com/docs/manual/reference/operator/comment/ - - :param comment: A string to attach to the query to help interpret and - trace the operation in the server logs and in profile data. - - .. versionadded:: 2.7 - """ - self.__check_okay_to_chain() - self.__comment = comment - return self - - def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: - """Adds a `$where`_ clause to this query. - - The `code` argument must be an instance of :class:`str` or - :class:`~bson.code.Code` containing a JavaScript expression. - This expression will be evaluated for each document scanned. - Only those documents for which the expression evaluates to - *true* will be returned as results. The keyword *this* refers - to the object currently being scanned. For example:: - - # Find all documents where field "a" is less than "b" plus "c". - for doc in db.test.find().where('this.a < (this.b + this.c)'): - print(doc) - - Raises :class:`TypeError` if `code` is not an instance of - :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. Only the last call to - :meth:`where` applied to a :class:`Cursor` has any effect. - - .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` - with scope variables. Consider using `$expr`_ instead. - - :param code: JavaScript expression to use as a filter - - .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ - .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ - """ - self.__check_okay_to_chain() - if not isinstance(code, Code): - code = Code(code) - - # Avoid overwriting a filter argument that was given by the user - # when updating the spec. - spec: dict[str, Any] - if self.__has_filter: - spec = dict(self.__spec) - else: - spec = cast(dict, self.__spec) - spec["$where"] = code - self.__spec = spec - return self - - def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: - """Adds a :class:`~pymongo.collation.Collation` to this query. - - Raises :exc:`TypeError` if `collation` is not an instance of - :class:`~pymongo.collation.Collation` or a ``dict``. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has - already been used. Only the last collation applied to this cursor has - any effect. - - :param collation: An instance of :class:`~pymongo.collation.Collation`. - """ - self.__check_okay_to_chain() - self.__collation = validate_collation_or_none(collation) - return self - - def __send_message(self, operation: Union[_Query, _GetMore]) -> None: - """Send a query or getmore operation and handles the response. - - If operation is ``None`` this is an exhaust cursor, which reads - the next result batch off the exhaust socket instead of - sending getMore messages to the server. - - Can raise ConnectionFailure. - """ - client = self.__collection.database.client - # OP_MSG is required to support exhaust cursors with encryption. - if client._encrypter and self.__exhaust: - raise InvalidOperation("exhaust cursors do not support auto encryption") - - try: - response = client._run_operation( - operation, self._unpack_response, address=self.__address - ) - except OperationFailure as exc: - if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - if exc.timeout: - self.__die(False) - else: - self.close() - # If this is a tailable cursor the error is likely - # due to capped collection roll over. Setting - # self.__killed to True ensures Cursor.alive will be - # False. No need to re-raise. - if ( - exc.code in _CURSOR_CLOSED_ERRORS - and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"] - ): - return - raise - except ConnectionFailure: - self.__killed = True - self.close() - raise - except Exception: - self.close() - raise - - self.__address = response.address - if isinstance(response, PinnedResponse): - if not self.__sock_mgr: - self.__sock_mgr = _ConnectionManager(response.conn, response.more_to_come) - - cmd_name = operation.name - docs = response.docs - if response.from_command: - if cmd_name != "explain": - cursor = docs[0]["cursor"] - self.__id = cursor["id"] - if cmd_name == "find": - documents = cursor["firstBatch"] - # Update the namespace used for future getMore commands. - ns = cursor.get("ns") - if ns: - self.__dbname, self.__collname = ns.split(".", 1) - else: - documents = cursor["nextBatch"] - self.__data = deque(documents) - self.__retrieved += len(documents) - else: - self.__id = 0 - self.__data = deque(docs) - self.__retrieved += len(docs) - else: - assert isinstance(response.data, _OpReply) - self.__id = response.data.cursor_id - self.__data = deque(docs) - self.__retrieved += response.data.number_returned - - if self.__id == 0: - # Don't wait for garbage collection to call __del__, return the - # socket and the session to the pool now. - self.close() - - if self.__limit and self.__id and self.__limit <= self.__retrieved: - self.close() - - def _unpack_response( - self, - response: Union[_OpReply, _OpMsg], - cursor_id: Optional[int], - codec_options: CodecOptions, - user_fields: Optional[Mapping[str, Any]] = None, - legacy_response: bool = False, - ) -> Sequence[_DocumentOut]: - return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) - - def _read_preference(self) -> _ServerMode: - if self.__read_preference is None: - # Save the read preference for getMore commands. - self.__read_preference = self.__collection._read_preference_for(self.session) - return self.__read_preference - - def _refresh(self) -> int: - """Refreshes the cursor with more data from Mongo. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if not self.__session: - self.__session = self.__collection.database.client._ensure_session() - - if self.__id is None: # Query - if (self.__min or self.__max) and not self.__hint: - raise InvalidOperation( - "Passing a 'hint' is required when using the min/max query" - " option to ensure the query utilizes the correct index" - ) - q = self._query_class( - self.__query_flags, - self.__collection.database.name, - self.__collection.name, - self.__skip, - self.__query_spec(), - self.__projection, - self.__codec_options, - self._read_preference(), - self.__limit, - self.__batch_size, - self.__read_concern, - self.__collation, - self.__session, - self.__collection.database.client, - self.__allow_disk_use, - self.__exhaust, - ) - self.__send_message(q) - elif self.__id: # Get More - if self.__limit: - limit = self.__limit - self.__retrieved - if self.__batch_size: - limit = min(limit, self.__batch_size) - else: - limit = self.__batch_size - # Exhaust cursors don't send getMore messages. - g = self._getmore_class( - self.__dbname, - self.__collname, - limit, - self.__id, - self.__codec_options, - self._read_preference(), - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - self.__exhaust, - self.__comment, - ) - self.__send_message(g) - - return len(self.__data) - - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - This is mostly useful with `tailable cursors - `_ - since they will stop iterating even though they *may* return more - results in the future. - - With regular cursors, simply use a for loop instead of :attr:`alive`:: - - for doc in collection.find(): - print(doc) - - .. note:: Even if :attr:`alive` is True, :meth:`next` can raise - :exc:`StopIteration`. :attr:`alive` can also be True while iterating - a cursor from a failed server. In this case :attr:`alive` will - return False after :meth:`next` fails to retrieve the next batch - of results from the server. - """ - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self) -> Optional[int]: - """Returns the id of the cursor - - .. versionadded:: 2.2 - """ - return self.__id - - @property - def address(self) -> Optional[tuple[str, Any]]: - """The (host, port) of the server used, or None. - - .. versionchanged:: 3.0 - Renamed from "conn_id". - """ - return self.__address - - @property - def session(self) -> Optional[ClientSession]: - """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. - - .. versionadded:: 3.6 - """ - if self.__explicit_session: - return self.__session - return None - - def __iter__(self) -> Cursor[_DocumentType]: - return self - - def next(self) -> _DocumentType: - """Advance the cursor.""" - if self.__empty: - raise StopIteration - if len(self.__data) or self._refresh(): - return self.__data.popleft() - else: - raise StopIteration - - __next__ = next - - def __enter__(self) -> Cursor[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - def __copy__(self) -> Cursor[_DocumentType]: - """Support function for `copy.copy()`. - - .. versionadded:: 2.4 - """ - return self._clone(deepcopy=False) - - def __deepcopy__(self, memo: Any) -> Any: - """Support function for `copy.deepcopy()`. - - .. versionadded:: 2.4 - """ - return self._clone(deepcopy=True) - - @overload - def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: - ... - - @overload - def _deepcopy( - self, x: SupportsItems, memo: Optional[dict[int, Union[list, dict]]] = None - ) -> dict: - ... - - def _deepcopy( - self, x: Union[Iterable, SupportsItems], memo: Optional[dict[int, Union[list, dict]]] = None - ) -> Union[list, dict]: - """Deepcopy helper for the data dictionary or list. - - Regular expressions cannot be deep copied but as they are immutable we - don't have to copy them when cloning. - """ - y: Union[list, dict] - iterator: Iterable[tuple[Any, Any]] - if not hasattr(x, "items"): - y, is_list, iterator = [], True, enumerate(x) - else: - y, is_list, iterator = {}, False, cast("SupportsItems", x).items() - if memo is None: - memo = {} - val_id = id(x) - if val_id in memo: - return memo[val_id] - memo[val_id] = y - - for key, value in iterator: - if isinstance(value, (dict, list)) and not isinstance(value, SON): - value = self._deepcopy(value, memo) # noqa: PLW2901 - elif not isinstance(value, RE_TYPE): - value = copy.deepcopy(value, memo) # noqa: PLW2901 - - if is_list: - y.append(value) # type: ignore[union-attr] - else: - if not isinstance(key, RE_TYPE): - key = copy.deepcopy(key, memo) # noqa: PLW2901 - y[key] = value - return y - - -class RawBatchCursor(Cursor, Generic[_DocumentType]): - """A cursor / iterator over raw batches of BSON data from a query result.""" - - _query_class = _RawBatchQuery - _getmore_class = _RawBatchGetMore - - def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: - """Create a new cursor / iterator over raw batches of BSON data. - - Should not be called directly by application developers - - see :meth:`~pymongo.collection.Collection.find_raw_batches` - instead. - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - super().__init__(collection, *args, **kwargs) - - def _unpack_response( - self, - response: Union[_OpReply, _OpMsg], - cursor_id: Optional[int], - codec_options: CodecOptions[Mapping[str, Any]], - user_fields: Optional[Mapping[str, Any]] = None, - legacy_response: bool = False, - ) -> list[_DocumentOut]: - raw_response = response.raw_response(cursor_id, user_fields=user_fields) - if not legacy_response: - # OP_MSG returns firstBatch/nextBatch documents as a BSON array - # Re-assemble the array of documents into a document stream - _convert_raw_document_lists_to_streams(raw_response[0]) - return cast(List["_DocumentOut"], raw_response) - - def explain(self) -> _DocumentType: - """Returns an explain plan record for this cursor. - - .. seealso:: The MongoDB documentation on `explain `_. - """ - clone = self._clone(deepcopy=True, base=Cursor(self.collection)) - return clone.explain() - - def __getitem__(self, index: Any) -> NoReturn: - raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") +__doc__ = original_doc diff --git a/pymongo/cursor_shared.py b/pymongo/cursor_shared.py new file mode 100644 index 0000000000..de6126c4fb --- /dev/null +++ b/pymongo/cursor_shared.py @@ -0,0 +1,94 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants and types shared across all cursor classes.""" +from __future__ import annotations + +from typing import Any, Mapping, Sequence, Tuple, Union + +# These errors mean that the server has already killed the cursor so there is +# no need to send killCursors. +_CURSOR_CLOSED_ERRORS = frozenset( + [ + 43, # CursorNotFound + 175, # QueryPlanKilled + 237, # CursorKilled + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, + ] +) + +_QUERY_OPTIONS = { + "tailable_cursor": 2, + "secondary_okay": 4, + "oplog_replay": 8, + "no_timeout": 16, + "await_data": 32, + "exhaust": 64, + "partial": 128, +} + + +class CursorType: + NON_TAILABLE = 0 + """The standard cursor type.""" + + TAILABLE = _QUERY_OPTIONS["tailable_cursor"] + """The tailable cursor type. + + Tailable cursors are only for use with capped collections. They are not + closed when the last data is retrieved but are kept open and the cursor + location marks the final document position. If more data is received + iteration of the cursor will continue from the last document received. + """ + + TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"] + """A tailable cursor with the await option set. + + Creates a tailable cursor that will wait for a few seconds after returning + the full result set so that it can capture and return additional data added + during the query. + """ + + EXHAUST = _QUERY_OPTIONS["exhaust"] + """An exhaust cursor. + + MongoDB will stream batched results to the client without waiting for the + client to request each batch, reducing latency. + """ + + +_Sort = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_Hint = Union[str, _Sort] diff --git a/pymongo/database.py b/pymongo/database.py index 70580694e5..6c81ac227d 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1,4 +1,4 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,1377 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Database level operations.""" +"""Re-import of synchronous Database API for compatibility.""" from __future__ import annotations -from copy import deepcopy -from typing import ( - TYPE_CHECKING, - Any, - Generic, - Mapping, - MutableMapping, - NoReturn, - Optional, - Sequence, - TypeVar, - Union, - cast, - overload, -) +from pymongo.synchronous.database import * # noqa: F403 +from pymongo.synchronous.database import __doc__ as original_doc -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions -from bson.dbref import DBRef -from bson.timestamp import Timestamp -from pymongo import _csot, common -from pymongo.aggregation import _DatabaseAggregationCommand -from pymongo.change_stream import DatabaseChangeStream -from pymongo.collection import Collection -from pymongo.command_cursor import CommandCursor -from pymongo.common import _ecoc_coll_name, _esc_coll_name -from pymongo.errors import CollectionInvalid, InvalidName, InvalidOperation -from pymongo.operations import _Op -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline - -if TYPE_CHECKING: - import bson - import bson.codec_options - from pymongo.client_session import ClientSession - from pymongo.mongo_client import MongoClient - from pymongo.pool import Connection - from pymongo.read_concern import ReadConcern - from pymongo.server import Server - from pymongo.write_concern import WriteConcern - - -def _check_name(name: str) -> None: - """Check if a database name is valid.""" - if not name: - raise InvalidName("database name cannot be the empty string") - - for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: - if invalid_char in name: - raise InvalidName("database names cannot contain the character %r" % invalid_char) - - -_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) - - -class Database(common.BaseObject, Generic[_DocumentType]): - """A Mongo database.""" - - def __init__( - self, - client: MongoClient[_DocumentType], - name: str, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> None: - """Get a database by client and name. - - Raises :class:`TypeError` if `name` is not an instance of - :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if - `name` is not a valid database name. - - :param client: A :class:`~pymongo.mongo_client.MongoClient` instance. - :param name: The database name. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) client.codec_options is used. - :param read_preference: The read preference to use. If - ``None`` (the default) client.read_preference is used. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) client.write_concern is used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) client.read_concern is used. - - .. seealso:: The MongoDB documentation on `databases `_. - - .. versionchanged:: 4.0 - Removed the eval, system_js, error, last_status, previous_error, - reset_error_history, authenticate, logout, collection_names, - current_op, add_user, remove_user, profiling_level, - set_profiling_level, and profiling_info methods. - See the :ref:`pymongo4-migration-guide`. - - .. versionchanged:: 3.2 - Added the read_concern option. - - .. versionchanged:: 3.0 - Added the codec_options, read_preference, and write_concern options. - :class:`~pymongo.database.Database` no longer returns an instance - of :class:`~pymongo.collection.Collection` for attribute names - with leading underscores. You must use dict-style lookups instead:: - - db['__my_collection__'] - - Not: - - db.__my_collection__ - """ - super().__init__( - codec_options or client.codec_options, - read_preference or client.read_preference, - write_concern or client.write_concern, - read_concern or client.read_concern, - ) - - if not isinstance(name, str): - raise TypeError("name must be an instance of str") - - if name != "$external": - _check_name(name) - - self.__name = name - self.__client: MongoClient[_DocumentType] = client - self._timeout = client.options.timeout - - @property - def client(self) -> MongoClient[_DocumentType]: - """The client instance for this :class:`Database`.""" - return self.__client - - @property - def name(self) -> str: - """The name of this :class:`Database`.""" - return self.__name - - def with_options( - self, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> Database[_DocumentType]: - """Get a clone of this database changing the specified settings. - - >>> db1.read_preference - Primary() - >>> from pymongo.read_preferences import Secondary - >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) - >>> db1.read_preference - Primary() - >>> db2.read_preference - Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) - - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Collection` - is used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Collection` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Collection` - is used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Collection` - is used. - - .. versionadded:: 3.8 - """ - return Database( - self.client, - self.__name, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern, - ) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Database): - return self.__client == other.client and self.__name == other.name - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash((self.__client, self.__name)) - - def __repr__(self) -> str: - return f"Database({self.__client!r}, {self.__name!r})" - - def __getattr__(self, name: str) -> Collection[_DocumentType]: - """Get a collection of this database by name. - - Raises InvalidName if an invalid collection name is used. - - :param name: the name of the collection to get - """ - if name.startswith("_"): - raise AttributeError( - f"Database has no attribute {name!r}. To access the {name}" - f" collection, use database[{name!r}]." - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> Collection[_DocumentType]: - """Get a collection of this database by name. - - Raises InvalidName if an invalid collection name is used. - - :param name: the name of the collection to get - """ - return Collection(self, name) - - def get_collection( - self, - name: str, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> Collection[_DocumentType]: - """Get a :class:`~pymongo.collection.Collection` with the given name - and options. - - Useful for creating a :class:`~pymongo.collection.Collection` with - different codec options, read preference, and/or write concern from - this :class:`Database`. - - >>> db.read_preference - Primary() - >>> coll1 = db.test - >>> coll1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> coll2 = db.get_collection( - ... 'test', read_preference=ReadPreference.SECONDARY) - >>> coll2.read_preference - Secondary(tag_sets=None) - - :param name: The name of the collection - a string. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Database` is - used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Database` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Database` is - used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Database` is - used. - """ - return Collection( - self, - name, - False, - codec_options, - read_preference, - write_concern, - read_concern, - ) - - def _get_encrypted_fields( - self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool - ) -> Optional[Mapping[str, Any]]: - encrypted_fields = kwargs.get("encryptedFields") - if encrypted_fields: - return cast(Mapping[str, Any], deepcopy(encrypted_fields)) - if ( - self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - and self.client.options.auto_encryption_opts._encrypted_fields_map.get( - f"{self.name}.{coll_name}" - ) - ): - return cast( - Mapping[str, Any], - deepcopy( - self.client.options.auto_encryption_opts._encrypted_fields_map[ - f"{self.name}.{coll_name}" - ] - ), - ) - if ask_db and self.client.options.auto_encryption_opts: - options = self[coll_name].options() - if options.get("encryptedFields"): - return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) - return None - - @_csot.apply - def create_collection( - self, - name: str, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - session: Optional[ClientSession] = None, - check_exists: Optional[bool] = True, - **kwargs: Any, - ) -> Collection[_DocumentType]: - """Create a new :class:`~pymongo.collection.Collection` in this - database. - - Normally collection creation is automatic. This method should - only be used to specify options on - creation. :class:`~pymongo.errors.CollectionInvalid` will be - raised if the collection already exists. - - :param name: the name of the collection to create - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Database` is - used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Database` is used. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Database` is - used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Database` is - used. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param `check_exists`: if True (the default), send a listCollections command to - check if the collection already exists before creation. - :param kwargs: additional keyword arguments will - be passed as options for the `create collection command`_ - - All optional `create collection command`_ parameters should be passed - as keyword arguments to this method. Valid options include, but are not - limited to: - - - ``size`` (int): desired initial size for the collection (in - bytes). For capped collections this size is the max - size of the collection. - - ``capped`` (bool): if True, this is a capped collection - - ``max`` (int): maximum number of objects if capped (optional) - - ``timeseries`` (dict): a document specifying configuration options for - timeseries collections - - ``expireAfterSeconds`` (int): the number of seconds after which a - document in a timeseries collection expires - - ``validator`` (dict): a document specifying validation rules or expressions - for the collection - - ``validationLevel`` (str): how strictly to apply the - validation rules to existing documents during an update. The default level - is "strict" - - ``validationAction`` (str): whether to "error" on invalid documents - (the default) or just "warn" about the violations but allow invalid - documents to be inserted - - ``indexOptionDefaults`` (dict): a document specifying a default configuration - for indexes when creating a collection - - ``viewOn`` (str): the name of the source collection or view from which - to create the view - - ``pipeline`` (list): a list of aggregation pipeline stages - - ``comment`` (str): a user-provided comment to attach to this command. - This option is only supported on MongoDB >= 4.4. - - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. For example:: - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - } - - ``clusteredIndex`` (dict): Document that specifies the clustered index - configuration. It must have the following form:: - - { - // key pattern must be {_id: 1} - key: , // required - unique: , // required, must be `true` - name: , // optional, otherwise automatically generated - v: , // optional, must be `2` if provided - } - - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for - enabling pre- and post-images. - - .. versionchanged:: 4.2 - Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. - - .. versionchanged:: 3.11 - This method is now supported inside multi-document transactions - with MongoDB 4.4+. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Added the collation option. - - .. versionchanged:: 3.0 - Added the codec_options, read_preference, and write_concern options. - - .. _create collection command: - https://mongodb.com/docs/manual/reference/command/create - """ - encrypted_fields = self._get_encrypted_fields(kwargs, name, False) - if encrypted_fields: - common.validate_is_mapping("encryptedFields", encrypted_fields) - kwargs["encryptedFields"] = encrypted_fields - - clustered_index = kwargs.get("clusteredIndex") - if clustered_index: - common.validate_is_mapping("clusteredIndex", clustered_index) - - with self.__client._tmp_session(session) as s: - # Skip this check in a transaction where listCollections is not - # supported. - if ( - check_exists - and (not s or not s.in_transaction) - and name in self.list_collection_names(filter={"name": name}, session=s) - ): - raise CollectionInvalid("collection %s already exists" % name) - return Collection( - self, - name, - True, - codec_options, - read_preference, - write_concern, - read_concern, - session=s, - **kwargs, - ) - - def aggregate( - self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any - ) -> CommandCursor[_DocumentType]: - """Perform a database-level aggregation. - - See the `aggregation pipeline`_ documentation for a list of stages - that are supported. - - .. code-block:: python - - # Lists all operations currently running on the server. - with client.admin.aggregate([{"$currentOp": {}}]) as cursor: - for operation in cursor: - print(operation) - - The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Database`, except when ``$out`` or ``$merge`` are used, in - which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` - is used. - - .. note:: This method does not support the 'explain' option. Please - use :meth:`~pymongo.database.Database.command` instead. - - .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this collection is automatically applied to this operation. - - :param pipeline: a list of aggregation pipeline stages - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param kwargs: extra `aggregate command`_ parameters. - - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `let` (dict): A dict of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. ``"$$var"``). This option is - only supported on MongoDB >= 5.0. - - :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result - set. - - .. versionadded:: 3.9 - - .. _aggregation pipeline: - https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline - - .. _aggregate command: - https://mongodb.com/docs/manual/reference/command/aggregate - """ - with self.client._tmp_session(session, close=False) as s: - cmd = _DatabaseAggregationCommand( - self, - CommandCursor, - pipeline, - kwargs, - session is not None, - user_fields={"cursor": {"firstBatch": 1}}, - ) - return self.client._retryable_read( - cmd.get_cursor, - cmd.get_read_preference(s), # type: ignore[arg-type] - s, - retryable=not cmd._performs_write, - operation=_Op.AGGREGATE, - ) - - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional[ClientSession] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - show_expanded_events: Optional[bool] = None, - ) -> DatabaseChangeStream[_DocumentType]: - """Watch changes on this database. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which - iterates over changes on all collections in this database. - - Introduced in MongoDB 4.0. - - .. code-block:: python - - with db.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with db.watch([{"$match": {"operationType": "insert"}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error("...") - - For a precise description of the resume process see the - `change streams specification`_. - - :param pipeline: A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - :param full_document: The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - :param full_document_before_change: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - :param resume_after: A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - :param max_await_time_ms: The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - :param batch_size: The maximum number of documents to return - per batch. - :param collation: The :class:`~pymongo.collation.Collation` - to use for the aggregation. - :param start_at_operation_time: If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param start_after: The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - :param comment: A user-provided comment to attach to this - command. - :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - - :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. - - .. versionchanged:: 4.3 - Added `show_expanded_events` parameter. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionadded:: 3.7 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md - """ - return DatabaseChangeStream( - self, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - show_expanded_events=show_expanded_events, - ) - - @overload - def _command( - self, - conn: Connection, - command: Union[str, MutableMapping[str, Any]], - value: int = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: _ServerMode = ReadPreference.PRIMARY, - codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, - write_concern: Optional[WriteConcern] = None, - parse_write_concern_error: bool = False, - session: Optional[ClientSession] = None, - **kwargs: Any, - ) -> dict[str, Any]: - ... - - @overload - def _command( - self, - conn: Connection, - command: Union[str, MutableMapping[str, Any]], - value: int = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: _ServerMode = ReadPreference.PRIMARY, - codec_options: CodecOptions[_CodecDocumentType] = ..., - write_concern: Optional[WriteConcern] = None, - parse_write_concern_error: bool = False, - session: Optional[ClientSession] = None, - **kwargs: Any, - ) -> _CodecDocumentType: - ... - - def _command( - self, - conn: Connection, - command: Union[str, MutableMapping[str, Any]], - value: int = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: _ServerMode = ReadPreference.PRIMARY, - codec_options: Union[ - CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] - ] = DEFAULT_CODEC_OPTIONS, - write_concern: Optional[WriteConcern] = None, - parse_write_concern_error: bool = False, - session: Optional[ClientSession] = None, - **kwargs: Any, - ) -> Union[dict[str, Any], _CodecDocumentType]: - """Internal command helper.""" - if isinstance(command, str): - command = {command: value} - - command.update(kwargs) - with self.__client._tmp_session(session) as s: - return conn.command( - self.__name, - command, - read_preference, - codec_options, - check, - allowable_errors, - write_concern=write_concern, - parse_write_concern_error=parse_write_concern_error, - session=s, - client=self.__client, - ) - - @overload - def command( - self, - command: Union[str, MutableMapping[str, Any]], - value: Any = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: Optional[_ServerMode] = None, - codec_options: None = None, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> dict[str, Any]: - ... - - @overload - def command( - self, - command: Union[str, MutableMapping[str, Any]], - value: Any = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: Optional[_ServerMode] = None, - codec_options: CodecOptions[_CodecDocumentType] = ..., - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _CodecDocumentType: - ... - - @_csot.apply - def command( - self, - command: Union[str, MutableMapping[str, Any]], - value: Any = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: Optional[_ServerMode] = None, - codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> Union[dict[str, Any], _CodecDocumentType]: - """Issue a MongoDB command. - - Send command `command` to the database and return the - response. If `command` is an instance of :class:`str` - then the command {`command`: `value`} will be sent. - Otherwise, `command` must be an instance of - :class:`dict` and will be sent as is. - - Any additional keyword arguments will be added to the final - command document before it is sent. - - For example, a command like ``{buildinfo: 1}`` can be sent - using: - - >>> db.command("buildinfo") - OR - >>> db.command({"buildinfo": 1}) - - For a command where the value matters, like ``{count: - collection_name}`` we can do: - - >>> db.command("count", collection_name) - OR - >>> db.command({"count": collection_name}) - - For commands that take additional arguments we can use - kwargs. So ``{count: collection_name, query: query}`` becomes: - - >>> db.command("count", collection_name, query=query) - OR - >>> db.command({"count": collection_name, "query": query}) - - :param command: document representing the command to be issued, - or the name of the command (for simple commands only). - - .. note:: the order of keys in the `command` document is - significant (the "verb" must come first), so commands - which require multiple keys (e.g. `findandmodify`) - should be done with this in mind. - - :param value: value to use for the command verb when - `command` is passed as a string - :param check: check the response for errors, raising - :class:`~pymongo.errors.OperationFailure` if there are any - :param allowable_errors: if `check` is ``True``, error messages - in this list will be ignored by error-checking - :param read_preference: The read preference for this - operation. See :mod:`~pymongo.read_preferences` for options. - If the provided `session` is in a transaction, defaults to the - read preference configured for the transaction. - Otherwise, defaults to - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - :param codec_options: A :class:`~bson.codec_options.CodecOptions` - instance. - :param session: A - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: additional keyword arguments will - be added to the command document before it is sent - - - .. note:: :meth:`command` does **not** obey this Database's - :attr:`read_preference` or :attr:`codec_options`. You must use the - ``read_preference`` and ``codec_options`` parameters instead. - - .. note:: :meth:`command` does **not** apply any custom TypeDecoders - when decoding the command response. - - .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will - automatically add API versioning options to the given command. - Explicitly adding API versioning options in the command and - declaring an API version on the client is not supported. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.0 - Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, - and `secondary_acceptable_latency_ms` option. - Removed `compile_re` option: PyMongo now always represents BSON - regular expressions as :class:`~bson.regex.Regex` objects. Use - :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a - BSON regular expression to a Python regular expression object. - Added the ``codec_options`` parameter. - - .. seealso:: The MongoDB documentation on `commands `_. - """ - opts = codec_options or DEFAULT_CODEC_OPTIONS - if comment is not None: - kwargs["comment"] = comment - - if isinstance(command, str): - command_name = command - else: - command_name = next(iter(command)) - - if read_preference is None: - read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - with self.__client._conn_for_reads(read_preference, session, operation=command_name) as ( - connection, - read_preference, - ): - return self._command( - connection, - command, - value, - check, - allowable_errors, - read_preference, - opts, - session=session, - **kwargs, - ) - - @_csot.apply - def cursor_command( - self, - command: Union[str, MutableMapping[str, Any]], - value: Any = 1, - read_preference: Optional[_ServerMode] = None, - codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - max_await_time_ms: Optional[int] = None, - **kwargs: Any, - ) -> CommandCursor[_DocumentType]: - """Issue a MongoDB command and parse the response as a cursor. - - If the response from the server does not include a cursor field, an error will be thrown. - - Otherwise, behaves identically to issuing a normal MongoDB command. - - :param command: document representing the command to be issued, - or the name of the command (for simple commands only). - - .. note:: the order of keys in the `command` document is - significant (the "verb" must come first), so commands - which require multiple keys (e.g. `findandmodify`) - should use an instance of :class:`~bson.son.SON` or - a string and kwargs instead of a Python `dict`. - - :param value: value to use for the command verb when - `command` is passed as a string - :param read_preference: The read preference for this - operation. See :mod:`~pymongo.read_preferences` for options. - If the provided `session` is in a transaction, defaults to the - read preference configured for the transaction. - Otherwise, defaults to - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - :param codec_options`: A :class:`~bson.codec_options.CodecOptions` - instance. - :param session: A - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to future getMores for this - command. - :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. - :param kwargs: additional keyword arguments will - be added to the command document before it is sent - - .. note:: :meth:`command` does **not** obey this Database's - :attr:`read_preference` or :attr:`codec_options`. You must use the - ``read_preference`` and ``codec_options`` parameters instead. - - .. note:: :meth:`command` does **not** apply any custom TypeDecoders - when decoding the command response. - - .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will - automatically add API versioning options to the given command. - Explicitly adding API versioning options in the command and - declaring an API version on the client is not supported. - - .. seealso:: The MongoDB documentation on `commands `_. - """ - if isinstance(command, str): - command_name = command - else: - command_name = next(iter(command)) - - with self.__client._tmp_session(session, close=False) as tmp_session: - opts = codec_options or DEFAULT_CODEC_OPTIONS - - if read_preference is None: - read_preference = ( - tmp_session and tmp_session._txn_read_preference() - ) or ReadPreference.PRIMARY - with self.__client._conn_for_reads(read_preference, tmp_session, command_name) as ( - conn, - read_preference, - ): - response = self._command( - conn, - command, - value, - True, - None, - read_preference, - opts, - session=tmp_session, - **kwargs, - ) - coll = self.get_collection("$cmd", read_preference=read_preference) - if response.get("cursor"): - cmd_cursor = CommandCursor( - coll, - response["cursor"], - conn.address, - max_await_time_ms=max_await_time_ms, - session=tmp_session, - explicit_session=session is not None, - comment=comment, - ) - cmd_cursor._maybe_pin_connection(conn) - return cmd_cursor - else: - raise InvalidOperation("Command does not return a cursor.") - - def _retryable_read_command( - self, - command: Union[str, MutableMapping[str, Any]], - operation: str, - session: Optional[ClientSession] = None, - ) -> dict[str, Any]: - """Same as command but used for retryable read commands.""" - read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - - def _cmd( - session: Optional[ClientSession], - _server: Server, - conn: Connection, - read_preference: _ServerMode, - ) -> dict[str, Any]: - return self._command( - conn, - command, - read_preference=read_preference, - session=session, - ) - - return self.__client._retryable_read(_cmd, read_preference, session, operation) - - def _list_collections( - self, - conn: Connection, - session: Optional[ClientSession], - read_preference: _ServerMode, - **kwargs: Any, - ) -> CommandCursor[MutableMapping[str, Any]]: - """Internal listCollections helper.""" - coll = cast( - Collection[MutableMapping[str, Any]], - self.get_collection("$cmd", read_preference=read_preference), - ) - cmd = {"listCollections": 1, "cursor": {}} - cmd.update(kwargs) - with self.__client._tmp_session(session, close=False) as tmp_session: - cursor = self._command(conn, cmd, read_preference=read_preference, session=tmp_session)[ - "cursor" - ] - cmd_cursor = CommandCursor( - coll, - cursor, - conn.address, - session=tmp_session, - explicit_session=session is not None, - comment=cmd.get("comment"), - ) - cmd_cursor._maybe_pin_connection(conn) - return cmd_cursor - - def list_collections( - self, - session: Optional[ClientSession] = None, - filter: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[MutableMapping[str, Any]]: - """Get a cursor over the collections of this database. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param filter: A query document to filter the list of - collections returned from the listCollections command. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: Optional parameters of the - `listCollections command - `_ - can be passed as keyword arguments to this method. The supported - options differ by server version. - - - :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. - - .. versionadded:: 3.6 - """ - if filter is not None: - kwargs["filter"] = filter - read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - if comment is not None: - kwargs["comment"] = comment - - def _cmd( - session: Optional[ClientSession], - _server: Server, - conn: Connection, - read_preference: _ServerMode, - ) -> CommandCursor[MutableMapping[str, Any]]: - return self._list_collections(conn, session, read_preference=read_preference, **kwargs) - - return self.__client._retryable_read( - _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS - ) - - def list_collection_names( - self, - session: Optional[ClientSession] = None, - filter: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> list[str]: - """Get a list of all the collection names in this database. - - For example, to list all non-system collections:: - - filter = {"name": {"$regex": r"^(?!system\\.)"}} - db.list_collection_names(filter=filter) - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param filter: A query document to filter the list of - collections returned from the listCollections command. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: Optional parameters of the - `listCollections command - `_ - can be passed as keyword arguments to this method. The supported - options differ by server version. - - - .. versionchanged:: 3.8 - Added the ``filter`` and ``**kwargs`` parameters. - - .. versionadded:: 3.6 - """ - if comment is not None: - kwargs["comment"] = comment - if filter is None: - kwargs["nameOnly"] = True - - else: - # The enumerate collections spec states that "drivers MUST NOT set - # nameOnly if a filter specifies any keys other than name." - common.validate_is_mapping("filter", filter) - kwargs["filter"] = filter - if not filter or (len(filter) == 1 and "name" in filter): - kwargs["nameOnly"] = True - - return [result["name"] for result in self.list_collections(session=session, **kwargs)] - - def _drop_helper( - self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None - ) -> dict[str, Any]: - command = {"drop": name} - if comment is not None: - command["comment"] = comment - - with self.__client._conn_for_writes(session, operation=_Op.DROP) as connection: - return self._command( - connection, - command, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session, - ) - - @_csot.apply - def drop_collection( - self, - name_or_collection: Union[str, Collection[_DocumentTypeArg]], - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, - ) -> dict[str, Any]: - """Drop a collection. - - :param name_or_collection: the name of a collection to drop or the - collection object itself - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. For example:: - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - - } - - - .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this database is automatically applied to this operation. - - .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Apply this database's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - name = name_or_collection - if isinstance(name, Collection): - name = name.name - - if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str") - encrypted_fields = self._get_encrypted_fields( - {"encryptedFields": encrypted_fields}, - name, - True, - ) - if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) - self._drop_helper( - _esc_coll_name(encrypted_fields, name), session=session, comment=comment - ) - self._drop_helper( - _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment - ) - - return self._drop_helper(name, session, comment) - - def validate_collection( - self, - name_or_collection: Union[str, Collection[_DocumentTypeArg]], - scandata: bool = False, - full: bool = False, - session: Optional[ClientSession] = None, - background: Optional[bool] = None, - comment: Optional[Any] = None, - ) -> dict[str, Any]: - """Validate a collection. - - Returns a dict of validation info. Raises CollectionInvalid if - validation fails. - - See also the MongoDB documentation on the `validate command`_. - - :param name_or_collection: A Collection object or the name of a - collection to validate. - :param scandata: Do extra checks beyond checking the overall - structure of the collection. - :param full: Have the server do a more thorough scan of the - collection. Use with `scandata` for a thorough scan - of the structure of the collection and the individual - documents. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param background: A boolean flag that determines whether - the command runs in the background. Requires MongoDB 4.4+. - :param comment: A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.11 - Added ``background`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ - """ - name = name_or_collection - if isinstance(name, Collection): - name = name.name - - if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or Collection") - cmd = {"validate": name, "scandata": scandata, "full": full} - if comment is not None: - cmd["comment"] = comment - - if background is not None: - cmd["background"] = background - - result = self.command(cmd, session=session) - - valid = True - # Pre 1.9 results - if "result" in result: - info = result["result"] - if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid(f"{name} invalid: {info}") - # Sharded results - elif "raw" in result: - for _, res in result["raw"].items(): - if "result" in res: - info = res["result"] - if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid(f"{name} invalid: {info}") - elif not res.get("valid", False): - valid = False - break - # Post 1.9 non-sharded results. - elif not result.get("valid", False): - valid = False - - if not valid: - raise CollectionInvalid(f"{name} invalid: {result!r}") - - return result - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'Database' object is not iterable") - - next = __next__ - - def __bool__(self) -> NoReturn: - raise NotImplementedError( - "Database objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: database is not None" - ) - - def dereference( - self, - dbref: DBRef, - session: Optional[ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> Optional[_DocumentType]: - """Dereference a :class:`~bson.dbref.DBRef`, getting the - document it points to. - - Raises :class:`TypeError` if `dbref` is not an instance of - :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if - the reference does not point to a valid document. Raises - :class:`ValueError` if `dbref` has a database specified that - is different from the current database. - - :param dbref: the reference - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: any additional keyword arguments - are the same as the arguments to - :meth:`~pymongo.collection.Collection.find`. - - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - if not isinstance(dbref, DBRef): - raise TypeError("cannot dereference a %s" % type(dbref)) - if dbref.database is not None and dbref.database != self.__name: - raise ValueError( - "trying to dereference a DBRef that points to " - f"another database ({dbref.database!r} not {self.__name!r})" - ) - return self[dbref.collection].find_one( - {"_id": dbref.id}, session=session, comment=comment, **kwargs - ) +__doc__ = original_doc diff --git a/pymongo/database_shared.py b/pymongo/database_shared.py new file mode 100644 index 0000000000..2d4e37feef --- /dev/null +++ b/pymongo/database_shared.py @@ -0,0 +1,34 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, helpers, and types shared across all database classes.""" +from __future__ import annotations + +from typing import Any, Mapping, TypeVar + +from pymongo.errors import InvalidName + + +def _check_name(name: str) -> None: + """Check if a database name is valid.""" + if not name: + raise InvalidName("database name cannot be the empty string") + + for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: + if invalid_char in name: + raise InvalidName("database names cannot contain the character %r" % invalid_char) + + +_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) diff --git a/pymongo/encryption.py b/pymongo/encryption.py index c7f02766c9..4887a3f90e 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -1,4 +1,4 @@ -# Copyright 2019-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,1101 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Support for explicit client-side field level encryption.""" +"""Re-import of synchronous Encryption API for compatibility.""" from __future__ import annotations -import contextlib -import enum -import socket -import uuid -import weakref -from copy import deepcopy -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generic, - Iterator, - Mapping, - MutableMapping, - Optional, - Sequence, - Union, - cast, -) +from pymongo.synchronous.encryption import * # noqa: F403 +from pymongo.synchronous.encryption import __doc__ as original_doc -try: - from pymongocrypt.auto_encrypter import AutoEncrypter # type:ignore[import] - from pymongocrypt.errors import MongoCryptError # type:ignore[import] - from pymongocrypt.explicit_encrypter import ExplicitEncrypter # type:ignore[import] - from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] - from pymongocrypt.state_machine import MongoCryptCallback # type:ignore[import] - - _HAVE_PYMONGOCRYPT = True -except ImportError: - _HAVE_PYMONGOCRYPT = False - MongoCryptCallback = object - -from bson import _dict_to_bson, decode, encode -from bson.binary import STANDARD, UUID_SUBTYPE, Binary -from bson.codec_options import CodecOptions -from bson.errors import BSONError -from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson -from pymongo import _csot -from pymongo.collection import Collection -from pymongo.common import CONNECT_TIMEOUT -from pymongo.cursor import Cursor -from pymongo.daemon import _spawn_daemon -from pymongo.database import Database -from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts -from pymongo.errors import ( - ConfigurationError, - EncryptedCollectionError, - EncryptionError, - InvalidOperation, - PyMongoError, - ServerSelectionTimeoutError, -) -from pymongo.mongo_client import MongoClient -from pymongo.network import BLOCKING_IO_ERRORS -from pymongo.operations import UpdateOne -from pymongo.pool import PoolOptions, _configured_socket, _raise_connection_failure -from pymongo.read_concern import ReadConcern -from pymongo.results import BulkWriteResult, DeleteResult -from pymongo.ssl_support import get_ssl_context -from pymongo.typings import _DocumentType, _DocumentTypeArg -from pymongo.uri_parser import parse_host -from pymongo.write_concern import WriteConcern - -if TYPE_CHECKING: - from pymongocrypt.mongocrypt import MongoCryptKmsContext - -_HTTPS_PORT = 443 -_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT -_MONGOCRYPTD_TIMEOUT_MS = 10000 - -_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( - document_class=Dict[str, Any], uuid_representation=STANDARD -) -# Use RawBSONDocument codec options to avoid needlessly decoding -# documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) - - -@contextlib.contextmanager -def _wrap_encryption_errors() -> Iterator[None]: - """Context manager to wrap encryption related errors.""" - try: - yield - except BSONError: - # BSON encoding/decoding errors are unrelated to encryption so - # we should propagate them unchanged. - raise - except Exception as exc: - raise EncryptionError(exc) from exc - - -class _EncryptionIO(MongoCryptCallback): # type: ignore[misc] - def __init__( - self, - client: Optional[MongoClient[_DocumentTypeArg]], - key_vault_coll: Collection[_DocumentTypeArg], - mongocryptd_client: Optional[MongoClient[_DocumentTypeArg]], - opts: AutoEncryptionOpts, - ): - """Internal class to perform I/O on behalf of pymongocrypt.""" - self.client_ref: Any - # Use a weak ref to break reference cycle. - if client is not None: - self.client_ref = weakref.ref(client) - else: - self.client_ref = None - self.key_vault_coll: Optional[Collection[RawBSONDocument]] = cast( - Collection[RawBSONDocument], - key_vault_coll.with_options( - codec_options=_KEY_VAULT_OPTS, - read_concern=ReadConcern(level="majority"), - write_concern=WriteConcern(w="majority"), - ), - ) - self.mongocryptd_client = mongocryptd_client - self.opts = opts - self._spawned = False - - def kms_request(self, kms_context: MongoCryptKmsContext) -> None: - """Complete a KMS request. - - :param kms_context: A :class:`MongoCryptKmsContext`. - - :return: None - """ - endpoint = kms_context.endpoint - message = kms_context.message - provider = kms_context.kms_provider - ctx = self.opts._kms_ssl_contexts.get(provider) - if ctx is None: - # Enable strict certificate verification, OCSP, match hostname, and - # SNI using the system default CA certificates. - ctx = get_ssl_context( - None, # certfile - None, # passphrase - None, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False, - ) # disable_ocsp_endpoint_check - # CSOT: set timeout for socket creation. - connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) - opts = PoolOptions( - connect_timeout=connect_timeout, - socket_timeout=connect_timeout, - ssl_context=ctx, - ) - host, port = parse_host(endpoint, _HTTPS_PORT) - try: - conn = _configured_socket((host, port), opts) - try: - conn.sendall(message) - while kms_context.bytes_needed > 0: - # CSOT: update timeout. - conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = conn.recv(kms_context.bytes_needed) - if not data: - raise OSError("KMS connection closed") - kms_context.feed(data) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") from None - finally: - conn.close() - except (PyMongoError, MongoCryptError): - raise # Propagate pymongo errors directly. - except Exception as error: - # Wrap I/O errors in PyMongo exceptions. - _raise_connection_failure((host, port), error) - - def collection_info( - self, database: Database[Mapping[str, Any]], filter: bytes - ) -> Optional[bytes]: - """Get the collection info for a namespace. - - The returned collection info is passed to libmongocrypt which reads - the JSON schema. - - :param database: The database on which to run listCollections. - :param filter: The filter to pass to listCollections. - - :return: The first document from the listCollections command response as BSON. - """ - with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: - for doc in cursor: - return _dict_to_bson(doc, False, _DATA_KEY_OPTS) - return None - - def spawn(self) -> None: - """Spawn mongocryptd. - - Note this method is thread safe; at most one mongocryptd will start - successfully. - """ - self._spawned = True - args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] - args.extend(self.opts._mongocryptd_spawn_args) - _spawn_daemon(args) - - def mark_command(self, database: str, cmd: bytes) -> bytes: - """Mark a command for encryption. - - :param database: The database on which to run this command. - :param cmd: The BSON command to run. - - :return: The marked command response from mongocryptd. - """ - if not self._spawned and not self.opts._mongocryptd_bypass_spawn: - self.spawn() - # Database.command only supports mutable mappings so we need to decode - # the raw BSON command first. - inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) - assert self.mongocryptd_client is not None - try: - res = self.mongocryptd_client[database].command( - inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS - ) - except ServerSelectionTimeoutError: - if self.opts._mongocryptd_bypass_spawn: - raise - self.spawn() - res = self.mongocryptd_client[database].command( - inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS - ) - return res.raw - - def fetch_keys(self, filter: bytes) -> Iterator[bytes]: - """Yields one or more keys from the key vault. - - :param filter: The filter to pass to find. - - :return: A generator which yields the requested keys from the key vault. - """ - assert self.key_vault_coll is not None - with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: - for key in cursor: - yield key.raw - - def insert_data_key(self, data_key: bytes) -> Binary: - """Insert a data key into the key vault. - - :param data_key: The data key document to insert. - - :return: The _id of the inserted data key document. - """ - raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) - data_key_id = raw_doc.get("_id") - if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: - raise TypeError("data_key _id must be Binary with a UUID subtype") - - assert self.key_vault_coll is not None - self.key_vault_coll.insert_one(raw_doc) - return data_key_id - - def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: - """Encode a document to BSON. - - A document can be any mapping type (like :class:`dict`). - - :param doc: mapping type representing a document - - :return: The encoded BSON bytes. - """ - return encode(doc) - - def close(self) -> None: - """Release resources. - - Note it is not safe to call this method from __del__ or any GC hooks. - """ - self.client_ref = None - self.key_vault_coll = None - if self.mongocryptd_client: - self.mongocryptd_client.close() - self.mongocryptd_client = None - - -class RewrapManyDataKeyResult: - """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. - - .. versionadded:: 4.2 - """ - - def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: - self._bulk_write_result = bulk_write_result - - @property - def bulk_write_result(self) -> Optional[BulkWriteResult]: - """The result of the bulk write operation used to update the key vault - collection with one or more rewrapped data keys. If - :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, - no bulk write operation will be executed and this field will be - ``None``. - """ - return self._bulk_write_result - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self._bulk_write_result!r})" - - -class _Encrypter: - """Encrypts and decrypts MongoDB commands. - - This class is used to support automatic encryption and decryption of - MongoDB commands. - """ - - def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): - """Create a _Encrypter for a client. - - :param client: The encrypted MongoClient. - :param opts: The encrypted client's :class:`AutoEncryptionOpts`. - """ - if opts._schema_map is None: - schema_map = None - else: - schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) - - if opts._encrypted_fields_map is None: - encrypted_fields_map = None - else: - encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) - self._bypass_auto_encryption = opts._bypass_auto_encryption - self._internal_client = None - - def _get_internal_client( - encrypter: _Encrypter, mongo_client: MongoClient[_DocumentTypeArg] - ) -> MongoClient[_DocumentTypeArg]: - if mongo_client.options.pool_options.max_pool_size is None: - # Unlimited pool size, use the same client. - return mongo_client - # Else - limited pool size, use an internal client. - if encrypter._internal_client is not None: - return encrypter._internal_client - internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) - encrypter._internal_client = internal_client - return internal_client - - if opts._key_vault_client is not None: - key_vault_client = opts._key_vault_client - else: - key_vault_client = _get_internal_client(self, client) - - if opts._bypass_auto_encryption: - metadata_client = None - else: - metadata_client = _get_internal_client(self, client) - - db, coll = opts._key_vault_namespace.split(".", 1) - key_vault_coll = key_vault_client[db][coll] - - mongocryptd_client: MongoClient[Mapping[str, Any]] = MongoClient( - opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS - ) - - io_callbacks = _EncryptionIO( # type:ignore[misc] - metadata_client, key_vault_coll, mongocryptd_client, opts - ) - self._auto_encrypter = AutoEncrypter( - io_callbacks, - MongoCryptOptions( - opts._kms_providers, - schema_map, - crypt_shared_lib_path=opts._crypt_shared_lib_path, - crypt_shared_lib_required=opts._crypt_shared_lib_required, - bypass_encryption=opts._bypass_auto_encryption, - encrypted_fields_map=encrypted_fields_map, - bypass_query_analysis=opts._bypass_query_analysis, - ), - ) - self._closed = False - - def encrypt( - self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] - ) -> dict[str, Any]: - """Encrypt a MongoDB command. - - :param database: The database for this command. - :param cmd: A command document. - :param codec_options: The CodecOptions to use while encoding `cmd`. - - :return: The encrypted command to execute. - """ - self._check_closed() - encoded_cmd = _dict_to_bson(cmd, False, codec_options) - with _wrap_encryption_errors(): - encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) - # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. - return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - - def decrypt(self, response: bytes) -> Optional[bytes]: - """Decrypt a MongoDB command response. - - :param response: A MongoDB command response as BSON. - - :return: The decrypted command response. - """ - self._check_closed() - with _wrap_encryption_errors(): - return cast(bytes, self._auto_encrypter.decrypt(response)) - - def _check_closed(self) -> None: - if self._closed: - raise InvalidOperation("Cannot use MongoClient after close") - - def close(self) -> None: - """Cleanup resources.""" - self._closed = True - self._auto_encrypter.close() - if self._internal_client: - self._internal_client.close() - self._internal_client = None - - -class Algorithm(str, enum.Enum): - """An enum that defines the supported encryption algorithms.""" - - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" - """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" - AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" - """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" - INDEXED = "Indexed" - """Indexed. - - .. versionadded:: 4.2 - """ - UNINDEXED = "Unindexed" - """Unindexed. - - .. versionadded:: 4.2 - """ - RANGEPREVIEW = "RangePreview" - """RangePreview. - - .. note:: Support for Range queries is in beta. - Backwards-breaking changes may be made before the final release. - - .. versionadded:: 4.4 - """ - - -class QueryType(str, enum.Enum): - """An enum that defines the supported values for explicit encryption query_type. - - .. versionadded:: 4.2 - """ - - EQUALITY = "equality" - """Used to encrypt a value for an equality query.""" - - RANGEPREVIEW = "rangePreview" - """Used to encrypt a value for a range query. - - .. note:: Support for Range queries is in beta. - Backwards-breaking changes may be made before the final release. -""" - - -class ClientEncryption(Generic[_DocumentType]): - """Explicit client-side field level encryption.""" - - def __init__( - self, - kms_providers: Mapping[str, Any], - key_vault_namespace: str, - key_vault_client: MongoClient[_DocumentTypeArg], - codec_options: CodecOptions[_DocumentTypeArg], - kms_tls_options: Optional[Mapping[str, Any]] = None, - ) -> None: - """Explicit client-side field level encryption. - - The ClientEncryption class encapsulates explicit operations on a key - vault collection that cannot be done directly on a MongoClient. Similar - to configuring auto encryption on a MongoClient, it is constructed with - a MongoClient (to a MongoDB cluster containing the key vault - collection), KMS provider configuration, and keyVaultNamespace. It - provides an API for explicitly encrypting and decrypting values, and - creating data keys. It does not provide an API to query keys from the - key vault collection, as this can be done directly on the MongoClient. - - See :ref:`explicit-client-side-encryption` for an example. - - :param kms_providers: Map of KMS provider options. The `kms_providers` - map values differ by provider: - - - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. - These are the AWS access key ID and AWS secret access key used - to generate KMS messages. An optional "sessionToken" may be - included to support temporary AWS credentials. - - `azure`: Map with "tenantId", "clientId", and "clientSecret" as - strings. Additionally, "identityPlatformEndpoint" may also be - specified as a string (defaults to 'login.microsoftonline.com'). - These are the Azure Active Directory credentials used to - generate Azure Key Vault messages. - - `gcp`: Map with "email" as a string and "privateKey" - as `bytes` or a base64 encoded string. - Additionally, "endpoint" may also be specified as a string - (defaults to 'oauth2.googleapis.com'). These are the - credentials used to generate Google Cloud KMS messages. - - `kmip`: Map with "endpoint" as a host with required port. - For example: ``{"endpoint": "example.com:443"}``. - - `local`: Map with "key" as `bytes` (96 bytes in length) or - a base64 encoded string which decodes - to 96 bytes. "key" is the master key used to encrypt/decrypt - data keys. This key should be generated and stored as securely - as possible. - - KMS providers may be specified with an optional name suffix - separated by a colon, for example "kmip:name" or "aws:name". - Named KMS providers do not support :ref:`CSFLE on-demand credentials`. - :param key_vault_namespace: The namespace for the key vault collection. - The key vault collection contains all data keys used for encryption - and decryption. Data keys are stored as documents in this MongoDB - collection. Data keys are protected with encryption by a KMS - provider. - :param key_vault_client: A MongoClient connected to a MongoDB cluster - containing the `key_vault_namespace` collection. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions` to use when encoding a - value for encryption and decoding the decrypted BSON value. This - should be the same CodecOptions instance configured on the - MongoClient, Database, or Collection used to access application - data. - :param kms_tls_options: A map of KMS provider names to TLS - options to use when creating secure connections to KMS providers. - Accepts the same TLS options as - :class:`pymongo.mongo_client.MongoClient`. For example, to - override the system default CA file:: - - kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} - - Or to supply a client certificate:: - - kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - - .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter and the "kmip" KMS provider. - - .. versionadded:: 3.9 - """ - if not _HAVE_PYMONGOCRYPT: - raise ConfigurationError( - "client-side field level encryption requires the pymongocrypt " - "library: install a compatible version with: " - "python -m pip install 'pymongo[encryption]'" - ) - - if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") - - self._kms_providers = kms_providers - self._key_vault_namespace = key_vault_namespace - self._key_vault_client = key_vault_client - self._codec_options = codec_options - - db, coll = key_vault_namespace.split(".", 1) - key_vault_coll = key_vault_client[db][coll] - - opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options - ) - self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( - None, key_vault_coll, None, opts - ) - self._encryption = ExplicitEncrypter( - self._io_callbacks, MongoCryptOptions(kms_providers, None) - ) - # Use the same key vault collection as the callback. - assert self._io_callbacks.key_vault_coll is not None - self._key_vault_coll = self._io_callbacks.key_vault_coll - - def create_encrypted_collection( - self, - database: Database[_DocumentTypeArg], - name: str, - encrypted_fields: Mapping[str, Any], - kms_provider: Optional[str] = None, - master_key: Optional[Mapping[str, Any]] = None, - **kwargs: Any, - ) -> tuple[Collection[_DocumentTypeArg], Mapping[str, Any]]: - """Create a collection with encryptedFields. - - .. warning:: - This function does not update the encryptedFieldsMap in the client's - AutoEncryptionOpts, thus the user must create a new client after calling this function with - the encryptedFields returned. - - Normally collection creation is automatic. This method should - only be used to specify options on - creation. :class:`~pymongo.errors.EncryptionError` will be - raised if the collection already exists. - - :param name: the name of the collection to create - :param encrypted_fields: Document that describes the encrypted fields for - Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: - - .. code-block: python - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - } - - :param kms_provider: the KMS provider to be used - :param master_key: Identifies a KMS-specific key used to encrypt the - new data key. If the kmsProvider is "local" the `master_key` is - not applicable and may be omitted. - :param kwargs: additional keyword arguments are the same as "create_collection". - - All optional `create collection command`_ parameters should be passed - as keyword arguments to this method. - See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. - - :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. - - .. versionadded:: 4.4 - - .. _create collection command: - https://mongodb.com/docs/manual/reference/command/create - - """ - encrypted_fields = deepcopy(encrypted_fields) - for i, field in enumerate(encrypted_fields["fields"]): - if isinstance(field, dict) and field.get("keyId") is None: - try: - encrypted_fields["fields"][i]["keyId"] = self.create_data_key( - kms_provider=kms_provider, # type:ignore[arg-type] - master_key=master_key, - ) - except EncryptionError as exc: - raise EncryptedCollectionError(exc, encrypted_fields) from exc - kwargs["encryptedFields"] = encrypted_fields - kwargs["check_exists"] = False - try: - return ( - database.create_collection(name=name, **kwargs), - encrypted_fields, - ) - except Exception as exc: - raise EncryptedCollectionError(exc, encrypted_fields) from exc - - def create_data_key( - self, - kms_provider: str, - master_key: Optional[Mapping[str, Any]] = None, - key_alt_names: Optional[Sequence[str]] = None, - key_material: Optional[bytes] = None, - ) -> Binary: - """Create and insert a new data key into the key vault collection. - - :param kms_provider: The KMS provider to use. Supported values are - "aws", "azure", "gcp", "kmip", "local", or a named provider like - "kmip:name". - :param master_key: Identifies a KMS-specific key used to encrypt the - new data key. If the kmsProvider is "local" the `master_key` is - not applicable and may be omitted. - - If the `kms_provider` type is "aws" it is required and has the - following fields:: - - - `region` (string): Required. The AWS region, e.g. "us-east-1". - - `key` (string): Required. The Amazon Resource Name (ARN) to - the AWS customer. - - `endpoint` (string): Optional. An alternate host to send KMS - requests to. May include port number, e.g. - "kms.us-east-1.amazonaws.com:443". - - If the `kms_provider` type is "azure" it is required and has the - following fields:: - - - `keyVaultEndpoint` (string): Required. Host with optional - port, e.g. "example.vault.azure.net". - - `keyName` (string): Required. Key name in the key vault. - - `keyVersion` (string): Optional. Version of the key to use. - - If the `kms_provider` type is "gcp" it is required and has the - following fields:: - - - `projectId` (string): Required. The Google cloud project ID. - - `location` (string): Required. The GCP location, e.g. "us-east1". - - `keyRing` (string): Required. Name of the key ring that contains - the key to use. - - `keyName` (string): Required. Name of the key to use. - - `keyVersion` (string): Optional. Version of the key to use. - - `endpoint` (string): Optional. Host with optional port. - Defaults to "cloudkms.googleapis.com". - - If the `kms_provider` type is "kmip" it is optional and has the - following fields:: - - - `keyId` (string): Optional. `keyId` is the KMIP Unique - Identifier to a 96 byte KMIP Secret Data managed object. If - keyId is omitted, the driver creates a random 96 byte KMIP - Secret Data managed object. - - `endpoint` (string): Optional. Host with optional - port, e.g. "example.vault.azure.net:". - - :param key_alt_names: An optional list of string alternate - names used to reference a key. If a key is created with alternate - names, then encryption may refer to the key by the unique alternate - name instead of by ``key_id``. The following example shows creating - and referring to a data key by alternate name:: - - client_encryption.create_data_key("local", key_alt_names=["name1"]) - # reference the key with the alternate name - client_encryption.encrypt("457-55-5462", key_alt_name="name1", - algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) - :param key_material: Sets the custom key material to be used - by the data key for encryption and decryption. - - :return: The ``_id`` of the created data key document as a - :class:`~bson.binary.Binary` with subtype - :data:`~bson.binary.UUID_SUBTYPE`. - - .. versionchanged:: 4.2 - Added the `key_material` parameter. - """ - self._check_closed() - with _wrap_encryption_errors(): - return cast( - Binary, - self._encryption.create_data_key( - kms_provider, - master_key=master_key, - key_alt_names=key_alt_names, - key_material=key_material, - ), - ) - - def _encrypt_helper( - self, - value: Any, - algorithm: str, - key_id: Optional[Union[Binary, uuid.UUID]] = None, - key_alt_name: Optional[str] = None, - query_type: Optional[str] = None, - contention_factor: Optional[int] = None, - range_opts: Optional[RangeOpts] = None, - is_expression: bool = False, - ) -> Any: - self._check_closed() - if isinstance(key_id, uuid.UUID): - key_id = Binary.from_uuid(key_id) - if key_id is not None and not ( - isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE - ): - raise TypeError("key_id must be a bson.binary.Binary with subtype 4") - - doc = encode( - {"v": value}, - codec_options=self._codec_options, - ) - range_opts_bytes = None - if range_opts: - range_opts_bytes = encode( - range_opts.document, - codec_options=self._codec_options, - ) - with _wrap_encryption_errors(): - encrypted_doc = self._encryption.encrypt( - value=doc, - algorithm=algorithm, - key_id=key_id, - key_alt_name=key_alt_name, - query_type=query_type, - contention_factor=contention_factor, - range_opts=range_opts_bytes, - is_expression=is_expression, - ) - return decode(encrypted_doc)["v"] - - def encrypt( - self, - value: Any, - algorithm: str, - key_id: Optional[Union[Binary, uuid.UUID]] = None, - key_alt_name: Optional[str] = None, - query_type: Optional[str] = None, - contention_factor: Optional[int] = None, - range_opts: Optional[RangeOpts] = None, - ) -> Binary: - """Encrypt a BSON value with a given key and algorithm. - - Note that exactly one of ``key_id`` or ``key_alt_name`` must be - provided. - - :param value: The BSON value to encrypt. - :param algorithm` (string): The encryption algorithm to use. See - :class:`Algorithm` for some valid options. - :param key_id: Identifies a data key by ``_id`` which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - :param key_alt_name: Identifies a key vault document by 'keyAltName'. - :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. - :param contention_factor` (int): The contention factor to use - when the algorithm is :attr:`Algorithm.INDEXED`. An integer value - *must* be given when the :attr:`Algorithm.INDEXED` algorithm is - used. - :param range_opts: Experimental only, not intended for public use. - - :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. - - .. versionchanged:: 4.7 - ``key_id`` can now be passed in as a :class:`uuid.UUID`. - - .. versionchanged:: 4.2 - Added the `query_type` and `contention_factor` parameters. - """ - return cast( - Binary, - self._encrypt_helper( - value=value, - algorithm=algorithm, - key_id=key_id, - key_alt_name=key_alt_name, - query_type=query_type, - contention_factor=contention_factor, - range_opts=range_opts, - is_expression=False, - ), - ) - - def encrypt_expression( - self, - expression: Mapping[str, Any], - algorithm: str, - key_id: Optional[Union[Binary, uuid.UUID]] = None, - key_alt_name: Optional[str] = None, - query_type: Optional[str] = None, - contention_factor: Optional[int] = None, - range_opts: Optional[RangeOpts] = None, - ) -> RawBSONDocument: - """Encrypt a BSON expression with a given key and algorithm. - - Note that exactly one of ``key_id`` or ``key_alt_name`` must be - provided. - - :param expression: The BSON aggregate or match expression to encrypt. - :param algorithm` (string): The encryption algorithm to use. See - :class:`Algorithm` for some valid options. - :param key_id: Identifies a data key by ``_id`` which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - :param key_alt_name: Identifies a key vault document by 'keyAltName'. - :param query_type` (str): The query type to execute. See - :class:`QueryType` for valid options. - :param contention_factor` (int): The contention factor to use - when the algorithm is :attr:`Algorithm.INDEXED`. An integer value - *must* be given when the :attr:`Algorithm.INDEXED` algorithm is - used. - :param range_opts: Experimental only, not intended for public use. - - :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. - - .. versionchanged:: 4.7 - ``key_id`` can now be passed in as a :class:`uuid.UUID`. - - .. versionadded:: 4.4 - """ - return cast( - RawBSONDocument, - self._encrypt_helper( - value=expression, - algorithm=algorithm, - key_id=key_id, - key_alt_name=key_alt_name, - query_type=query_type, - contention_factor=contention_factor, - range_opts=range_opts, - is_expression=True, - ), - ) - - def decrypt(self, value: Binary) -> Any: - """Decrypt an encrypted value. - - :param value` (Binary): The encrypted value, a - :class:`~bson.binary.Binary` with subtype 6. - - :return: The decrypted BSON value. - """ - self._check_closed() - if not (isinstance(value, Binary) and value.subtype == 6): - raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") - - with _wrap_encryption_errors(): - doc = encode({"v": value}) - decrypted_doc = self._encryption.decrypt(doc) - return decode(decrypted_doc, codec_options=self._codec_options)["v"] - - def get_key(self, id: Binary) -> Optional[RawBSONDocument]: - """Get a data key by id. - - :param id` (Binary): The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - :return: The key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - assert self._key_vault_coll is not None - return self._key_vault_coll.find_one({"_id": id}) - - def get_keys(self) -> Cursor[RawBSONDocument]: - """Get all of the data keys. - - :return: An instance of :class:`~pymongo.cursor.Cursor` over the data key - documents. - - .. versionadded:: 4.2 - """ - self._check_closed() - assert self._key_vault_coll is not None - return self._key_vault_coll.find({}) - - def delete_key(self, id: Binary) -> DeleteResult: - """Delete a key document in the key vault collection that has the given ``key_id``. - - :param id` (Binary): The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - :return: The delete result. - - .. versionadded:: 4.2 - """ - self._check_closed() - assert self._key_vault_coll is not None - return self._key_vault_coll.delete_one({"_id": id}) - - def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: - """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. - - :param `id`: The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - :param `key_alt_name`: The key alternate name to add. - - :return: The previous version of the key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - update = {"$addToSet": {"keyAltNames": key_alt_name}} - assert self._key_vault_coll is not None - return self._key_vault_coll.find_one_and_update({"_id": id}, update) - - def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: - """Get a key document in the key vault collection that has the given ``key_alt_name``. - - :param key_alt_name: (str): The key alternate name of the key to get. - - :return: The key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - assert self._key_vault_coll is not None - return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) - - def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: - """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. - - Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. - - :param `id`: The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - :param `key_alt_name`: The key alternate name to remove. - - :return: Returns the previous version of the key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - pipeline = [ - { - "$set": { - "keyAltNames": { - "$cond": [ - {"$eq": ["$keyAltNames", [key_alt_name]]}, - "$$REMOVE", - { - "$filter": { - "input": "$keyAltNames", - "cond": {"$ne": ["$$this", key_alt_name]}, - } - }, - ] - } - } - } - ] - assert self._key_vault_coll is not None - return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) - - def rewrap_many_data_key( - self, - filter: Mapping[str, Any], - provider: Optional[str] = None, - master_key: Optional[Mapping[str, Any]] = None, - ) -> RewrapManyDataKeyResult: - """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. - - :param filter: A document used to filter the data keys. - :param provider: The new KMS provider to use to encrypt the data keys, - or ``None`` to use the current KMS provider(s). - :param `master_key`: The master key fields corresponding to the new KMS - provider when ``provider`` is not ``None``. - - :return: A :class:`RewrapManyDataKeyResult`. - - This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. - Note that this does *not* require re-encrypting any of the data in your encrypted collections, - but rather refreshes the key that protects the keys that encrypt the data: - - .. code-block:: python - - client_encryption.rewrap_many_data_key( - filter={"keyAltNames": "optional filter for which keys you want to update"}, - master_key={ - "provider": "azure", # replace with your cloud provider - "master_key": { - # put the rest of your master_key options here - "key": "" - }, - }, - ) - - .. versionadded:: 4.2 - """ - if master_key is not None and provider is None: - raise ConfigurationError("A provider must be given if a master_key is given") - self._check_closed() - with _wrap_encryption_errors(): - raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) - if raw_result is None: - return RewrapManyDataKeyResult() - - raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) - replacements = [] - for key in raw_doc["v"]: - update_model = { - "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, - "$currentDate": {"updateDate": True}, - } - op = UpdateOne({"_id": key["_id"]}, update_model) - replacements.append(op) - if not replacements: - return RewrapManyDataKeyResult() - assert self._key_vault_coll is not None - result = self._key_vault_coll.bulk_write(replacements) - return RewrapManyDataKeyResult(result) - - def __enter__(self) -> ClientEncryption[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - def _check_closed(self) -> None: - if self._encryption is None: - raise InvalidOperation("Cannot use closed ClientEncryption") - - def close(self) -> None: - """Release resources. - - Note that using this class in a with-statement will automatically call - :meth:`close`:: - - with ClientEncryption(...) as client_encryption: - encrypted = client_encryption.encrypt(value, ...) - decrypted = client_encryption.decrypt(encrypted) - - """ - if self._io_callbacks: - self._io_callbacks.close() - self._encryption.close() - self._io_callbacks = None - self._encryption = None +__doc__ = original_doc diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 1d5369977c..350344a6da 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -1,4 +1,4 @@ -# Copyright 2019-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,257 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Support for automatic client-side field level encryption.""" +"""Re-import of synchronous EncryptionOptions API for compatibility.""" from __future__ import annotations -from typing import TYPE_CHECKING, Any, Mapping, Optional +from pymongo.synchronous.encryption_options import * # noqa: F403 +from pymongo.synchronous.encryption_options import __doc__ as original_doc -try: - import pymongocrypt # type:ignore[import] # noqa: F401 - - _HAVE_PYMONGOCRYPT = True -except ImportError: - _HAVE_PYMONGOCRYPT = False -from bson import int64 -from pymongo.common import validate_is_mapping -from pymongo.errors import ConfigurationError -from pymongo.uri_parser import _parse_kms_tls_options - -if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient - from pymongo.typings import _DocumentTypeArg - - -class AutoEncryptionOpts: - """Options to configure automatic client-side field level encryption.""" - - def __init__( - self, - kms_providers: Mapping[str, Any], - key_vault_namespace: str, - key_vault_client: Optional[MongoClient[_DocumentTypeArg]] = None, - schema_map: Optional[Mapping[str, Any]] = None, - bypass_auto_encryption: bool = False, - mongocryptd_uri: str = "mongodb://localhost:27020", - mongocryptd_bypass_spawn: bool = False, - mongocryptd_spawn_path: str = "mongocryptd", - mongocryptd_spawn_args: Optional[list[str]] = None, - kms_tls_options: Optional[Mapping[str, Any]] = None, - crypt_shared_lib_path: Optional[str] = None, - crypt_shared_lib_required: bool = False, - bypass_query_analysis: bool = False, - encrypted_fields_map: Optional[Mapping[str, Any]] = None, - ) -> None: - """Options to configure automatic client-side field level encryption. - - Automatic client-side field level encryption requires MongoDB >=4.2 - enterprise or a MongoDB >=4.2 Atlas cluster. Automatic encryption is not - supported for operations on a database or view and will result in - error. - - Although automatic encryption requires MongoDB >=4.2 enterprise or a - MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all - users. To configure automatic *decryption* without automatic - *encryption* set ``bypass_auto_encryption=True``. Explicit - encryption and explicit decryption is also supported for all users - with the :class:`~pymongo.encryption.ClientEncryption` class. - - See :ref:`automatic-client-side-encryption` for an example. - - :param kms_providers: Map of KMS provider options. The `kms_providers` - map values differ by provider: - - - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. - These are the AWS access key ID and AWS secret access key used - to generate KMS messages. An optional "sessionToken" may be - included to support temporary AWS credentials. - - `azure`: Map with "tenantId", "clientId", and "clientSecret" as - strings. Additionally, "identityPlatformEndpoint" may also be - specified as a string (defaults to 'login.microsoftonline.com'). - These are the Azure Active Directory credentials used to - generate Azure Key Vault messages. - - `gcp`: Map with "email" as a string and "privateKey" - as `bytes` or a base64 encoded string. - Additionally, "endpoint" may also be specified as a string - (defaults to 'oauth2.googleapis.com'). These are the - credentials used to generate Google Cloud KMS messages. - - `kmip`: Map with "endpoint" as a host with required port. - For example: ``{"endpoint": "example.com:443"}``. - - `local`: Map with "key" as `bytes` (96 bytes in length) or - a base64 encoded string which decodes - to 96 bytes. "key" is the master key used to encrypt/decrypt - data keys. This key should be generated and stored as securely - as possible. - - KMS providers may be specified with an optional name suffix - separated by a colon, for example "kmip:name" or "aws:name". - Named KMS providers do not support :ref:`CSFLE on-demand credentials`. - Named KMS providers enables more than one of each KMS provider type to be configured. - For example, to configure multiple local KMS providers:: - - kms_providers = { - "local": {"key": local_kek1}, # Unnamed KMS provider. - "local:myname": {"key": local_kek2}, # Named KMS provider with name "myname". - } - - :param key_vault_namespace: The namespace for the key vault collection. - The key vault collection contains all data keys used for encryption - and decryption. Data keys are stored as documents in this MongoDB - collection. Data keys are protected with encryption by a KMS - provider. - :param key_vault_client: By default, the key vault collection - is assumed to reside in the same MongoDB cluster as the encrypted - MongoClient. Use this option to route data key queries to a - separate MongoDB cluster. - :param schema_map: Map of collection namespace ("db.coll") to - JSON Schema. By default, a collection's JSONSchema is periodically - polled with the listCollections command. But a JSONSchema may be - specified locally with the schemaMap option. - - **Supplying a `schema_map` provides more security than relying on - JSON Schemas obtained from the server. It protects against a - malicious server advertising a false JSON Schema, which could trick - the client into sending unencrypted data that should be - encrypted.** - - Schemas supplied in the schemaMap only apply to configuring - automatic encryption for client side encryption. Other validation - rules in the JSON schema will not be enforced by the driver and - will result in an error. - :param bypass_auto_encryption: If ``True``, automatic - encryption will be disabled but automatic decryption will still be - enabled. Defaults to ``False``. - :param mongocryptd_uri: The MongoDB URI used to connect - to the *local* mongocryptd process. Defaults to - ``'mongodb://localhost:27020'``. - :param mongocryptd_bypass_spawn: If ``True``, the encrypted - MongoClient will not attempt to spawn the mongocryptd process. - Defaults to ``False``. - :param mongocryptd_spawn_path: Used for spawning the - mongocryptd process. Defaults to ``'mongocryptd'`` and spawns - mongocryptd from the system path. - :param mongocryptd_spawn_args: A list of string arguments to - use when spawning the mongocryptd process. Defaults to - ``['--idleShutdownTimeoutSecs=60']``. If the list does not include - the ``idleShutdownTimeoutSecs`` option then - ``'--idleShutdownTimeoutSecs=60'`` will be added. - :param kms_tls_options: A map of KMS provider names to TLS - options to use when creating secure connections to KMS providers. - Accepts the same TLS options as - :class:`pymongo.mongo_client.MongoClient`. For example, to - override the system default CA file:: - - kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} - - Or to supply a client certificate:: - - kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - :param crypt_shared_lib_path: Override the path to load the crypt_shared library. - :param crypt_shared_lib_required: If True, raise an error if libmongocrypt is - unable to load the crypt_shared library. - :param bypass_query_analysis: If ``True``, disable automatic analysis - of outgoing commands. Set `bypass_query_analysis` to use explicit - encryption on indexed fields without the MongoDB Enterprise Advanced - licensed crypt_shared library. - :param encrypted_fields_map: Map of collection namespace ("db.coll") to documents - that described the encrypted fields for Queryable Encryption. For example:: - - { - "db.encryptedCollection": { - "escCollection": "enxcol_.encryptedCollection.esc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - } - } - - .. versionchanged:: 4.2 - Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, - and `bypass_query_analysis` parameters. - - .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter and the "kmip" KMS provider. - - .. versionadded:: 3.9 - """ - if not _HAVE_PYMONGOCRYPT: - raise ConfigurationError( - "client side encryption requires the pymongocrypt library: " - "install a compatible version with: " - "python -m pip install 'pymongo[encryption]'" - ) - if encrypted_fields_map: - validate_is_mapping("encrypted_fields_map", encrypted_fields_map) - self._encrypted_fields_map = encrypted_fields_map - self._bypass_query_analysis = bypass_query_analysis - self._crypt_shared_lib_path = crypt_shared_lib_path - self._crypt_shared_lib_required = crypt_shared_lib_required - self._kms_providers = kms_providers - self._key_vault_namespace = key_vault_namespace - self._key_vault_client = key_vault_client - self._schema_map = schema_map - self._bypass_auto_encryption = bypass_auto_encryption - self._mongocryptd_uri = mongocryptd_uri - self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn - self._mongocryptd_spawn_path = mongocryptd_spawn_path - if mongocryptd_spawn_args is None: - mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] - self._mongocryptd_spawn_args = mongocryptd_spawn_args - if not isinstance(self._mongocryptd_spawn_args, list): - raise TypeError("mongocryptd_spawn_args must be a list") - if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): - self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") - # Maps KMS provider name to a SSLContext. - self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) - self._bypass_query_analysis = bypass_query_analysis - - -class RangeOpts: - """Options to configure encrypted queries using the rangePreview algorithm.""" - - def __init__( - self, - sparsity: int, - min: Optional[Any] = None, - max: Optional[Any] = None, - precision: Optional[int] = None, - ) -> None: - """Options to configure encrypted queries using the rangePreview algorithm. - - .. note:: This feature is experimental only, and not intended for public use. - - :param sparsity: An integer. - :param min: A BSON scalar value corresponding to the type being queried. - :param max: A BSON scalar value corresponding to the type being queried. - :param precision: An integer, may only be set for double or decimal128 types. - - .. versionadded:: 4.4 - """ - self.min = min - self.max = max - self.sparsity = sparsity - self.precision = precision - - @property - def document(self) -> dict[str, Any]: - doc = {} - for k, v in [ - ("sparsity", int64.Int64(self.sparsity)), - ("precision", self.precision), - ("min", self.min), - ("max", self.max), - ]: - if v is not None: - doc[k] = v - return doc +__doc__ = original_doc diff --git a/pymongo/errors.py b/pymongo/errors.py index a781e4a016..7efbc1ff31 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -21,7 +21,7 @@ from bson.errors import InvalidDocument if TYPE_CHECKING: - from pymongo.typings import _DocumentOut + from pymongo.asynchronous.typings import _DocumentOut class PyMongoError(Exception): diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 287db3fc4d..756e90ba23 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -1,4 +1,4 @@ -# Copyright 2020-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,212 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. - -"""Example event logger classes. - -.. versionadded:: 3.11 - -These loggers can be registered using :func:`register` or -:class:`~pymongo.mongo_client.MongoClient`. - -``monitoring.register(CommandLogger())`` - -or - -``MongoClient(event_listeners=[CommandLogger()])`` -""" +"""Re-import of synchronous EventLoggers API for compatibility.""" from __future__ import annotations -import logging - -from pymongo import monitoring - - -class CommandLogger(monitoring.CommandListener): - """A simple listener that logs command events. - - Listens for :class:`~pymongo.monitoring.CommandStartedEvent`, - :class:`~pymongo.monitoring.CommandSucceededEvent` and - :class:`~pymongo.monitoring.CommandFailedEvent` events and - logs them at the `INFO` severity level using :mod:`logging`. - .. versionadded:: 3.11 - """ - - def started(self, event: monitoring.CommandStartedEvent) -> None: - logging.info( - f"Command {event.command_name} with request id " - f"{event.request_id} started on server " - f"{event.connection_id}" - ) - - def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: - logging.info( - f"Command {event.command_name} with request id " - f"{event.request_id} on server {event.connection_id} " - f"succeeded in {event.duration_micros} " - "microseconds" - ) - - def failed(self, event: monitoring.CommandFailedEvent) -> None: - logging.info( - f"Command {event.command_name} with request id " - f"{event.request_id} on server {event.connection_id} " - f"failed in {event.duration_micros} " - "microseconds" - ) - - -class ServerLogger(monitoring.ServerListener): - """A simple listener that logs server discovery events. - - Listens for :class:`~pymongo.monitoring.ServerOpeningEvent`, - :class:`~pymongo.monitoring.ServerDescriptionChangedEvent`, - and :class:`~pymongo.monitoring.ServerClosedEvent` - events and logs them at the `INFO` severity level using :mod:`logging`. - - .. versionadded:: 3.11 - """ - - def opened(self, event: monitoring.ServerOpeningEvent) -> None: - logging.info(f"Server {event.server_address} added to topology {event.topology_id}") - - def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: - previous_server_type = event.previous_description.server_type - new_server_type = event.new_description.server_type - if new_server_type != previous_server_type: - # server_type_name was added in PyMongo 3.4 - logging.info( - f"Server {event.server_address} changed type from " - f"{event.previous_description.server_type_name} to " - f"{event.new_description.server_type_name}" - ) - - def closed(self, event: monitoring.ServerClosedEvent) -> None: - logging.warning(f"Server {event.server_address} removed from topology {event.topology_id}") - - -class HeartbeatLogger(monitoring.ServerHeartbeatListener): - """A simple listener that logs server heartbeat events. - - Listens for :class:`~pymongo.monitoring.ServerHeartbeatStartedEvent`, - :class:`~pymongo.monitoring.ServerHeartbeatSucceededEvent`, - and :class:`~pymongo.monitoring.ServerHeartbeatFailedEvent` - events and logs them at the `INFO` severity level using :mod:`logging`. - - .. versionadded:: 3.11 - """ - - def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: - logging.info(f"Heartbeat sent to server {event.connection_id}") - - def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: - # The reply.document attribute was added in PyMongo 3.4. - logging.info( - f"Heartbeat to server {event.connection_id} " - "succeeded with reply " - f"{event.reply.document}" - ) - - def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: - logging.warning( - f"Heartbeat to server {event.connection_id} failed with error {event.reply}" - ) - - -class TopologyLogger(monitoring.TopologyListener): - """A simple listener that logs server topology events. - - Listens for :class:`~pymongo.monitoring.TopologyOpenedEvent`, - :class:`~pymongo.monitoring.TopologyDescriptionChangedEvent`, - and :class:`~pymongo.monitoring.TopologyClosedEvent` - events and logs them at the `INFO` severity level using :mod:`logging`. - - .. versionadded:: 3.11 - """ - - def opened(self, event: monitoring.TopologyOpenedEvent) -> None: - logging.info(f"Topology with id {event.topology_id} opened") - - def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: - logging.info(f"Topology description updated for topology id {event.topology_id}") - previous_topology_type = event.previous_description.topology_type - new_topology_type = event.new_description.topology_type - if new_topology_type != previous_topology_type: - # topology_type_name was added in PyMongo 3.4 - logging.info( - f"Topology {event.topology_id} changed type from " - f"{event.previous_description.topology_type_name} to " - f"{event.new_description.topology_type_name}" - ) - # The has_writable_server and has_readable_server methods - # were added in PyMongo 3.4. - if not event.new_description.has_writable_server(): - logging.warning("No writable servers available.") - if not event.new_description.has_readable_server(): - logging.warning("No readable servers available.") - - def closed(self, event: monitoring.TopologyClosedEvent) -> None: - logging.info(f"Topology with id {event.topology_id} closed") - - -class ConnectionPoolLogger(monitoring.ConnectionPoolListener): - """A simple listener that logs server connection pool events. - - Listens for :class:`~pymongo.monitoring.PoolCreatedEvent`, - :class:`~pymongo.monitoring.PoolClearedEvent`, - :class:`~pymongo.monitoring.PoolClosedEvent`, - :~pymongo.monitoring.class:`ConnectionCreatedEvent`, - :class:`~pymongo.monitoring.ConnectionReadyEvent`, - :class:`~pymongo.monitoring.ConnectionClosedEvent`, - :class:`~pymongo.monitoring.ConnectionCheckOutStartedEvent`, - :class:`~pymongo.monitoring.ConnectionCheckOutFailedEvent`, - :class:`~pymongo.monitoring.ConnectionCheckedOutEvent`, - and :class:`~pymongo.monitoring.ConnectionCheckedInEvent` - events and logs them at the `INFO` severity level using :mod:`logging`. - - .. versionadded:: 3.11 - """ - - def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: - logging.info(f"[pool {event.address}] pool created") - - def pool_ready(self, event: monitoring.PoolReadyEvent) -> None: - logging.info(f"[pool {event.address}] pool ready") - - def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: - logging.info(f"[pool {event.address}] pool cleared") - - def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: - logging.info(f"[pool {event.address}] pool closed") - - def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: - logging.info(f"[pool {event.address}][conn #{event.connection_id}] connection created") - - def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: - logging.info( - f"[pool {event.address}][conn #{event.connection_id}] connection setup succeeded" - ) - - def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: - logging.info( - f"[pool {event.address}][conn #{event.connection_id}] " - f'connection closed, reason: "{event.reason}"' - ) - - def connection_check_out_started( - self, event: monitoring.ConnectionCheckOutStartedEvent - ) -> None: - logging.info(f"[pool {event.address}] connection check out started") - - def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: - logging.info(f"[pool {event.address}] connection check out failed, reason: {event.reason}") - - def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: - logging.info( - f"[pool {event.address}][conn #{event.connection_id}] connection checked out of pool" - ) +from pymongo.synchronous.event_loggers import * # noqa: F403 +from pymongo.synchronous.event_loggers import __doc__ as original_doc - def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: - logging.info( - f"[pool {event.address}][conn #{event.connection_id}] connection checked into pool" - ) +__doc__ = original_doc diff --git a/pymongo/helpers_constants.py b/pymongo/helpers_constants.py new file mode 100644 index 0000000000..00b2502701 --- /dev/null +++ b/pymongo/helpers_constants.py @@ -0,0 +1,72 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Constants used by the driver that don't really fit elsewhere.""" + +# From the SDAM spec, the "node is shutting down" codes. +from __future__ import annotations + +_SHUTDOWN_CODES: frozenset = frozenset( + [ + 11600, # InterruptedAtShutdown + 91, # ShutdownInProgress + ] +) +# From the SDAM spec, the "not primary" error codes are combined with the +# "node is recovering" error codes (of which the "node is shutting down" +# errors are a subset). +_NOT_PRIMARY_CODES: frozenset = ( + frozenset( + [ + 10058, # LegacyNotPrimary <=3.2 "not primary" error code + 10107, # NotWritablePrimary + 13435, # NotPrimaryNoSecondaryOk + 11602, # InterruptedDueToReplStateChange + 13436, # NotPrimaryOrSecondary + 189, # PrimarySteppedDown + ] + ) + | _SHUTDOWN_CODES +) +# From the retryable writes spec. +_RETRYABLE_ERROR_CODES: frozenset = _NOT_PRIMARY_CODES | frozenset( + [ + 7, # HostNotFound + 6, # HostUnreachable + 89, # NetworkTimeout + 9001, # SocketException + 262, # ExceededTimeLimit + 134, # ReadConcernMajorityNotAvailableYet + ] +) + +# Server code raised when re-authentication is required +_REAUTHENTICATION_REQUIRED_CODE: int = 391 + +# Server code raised when authentication fails. +_AUTHENTICATION_FAILURE_CODE: int = 18 + +# Note - to avoid bugs from forgetting which if these is all lowercase and +# which are camelCase, and at the same time avoid having to add a test for +# every command, use all lowercase here and test against command_name.lower(). +_SENSITIVE_COMMANDS: set = { + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +} diff --git a/pymongo/lock.py b/pymongo/lock.py index e374785006..b05f6acffb 100644 --- a/pymongo/lock.py +++ b/pymongo/lock.py @@ -13,9 +13,12 @@ # limitations under the License. from __future__ import annotations +import asyncio import os import threading +import time import weakref +from typing import Any, Callable, Optional _HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") @@ -38,3 +41,102 @@ def _release_locks() -> None: for lock in _forkable_locks: if lock.locked(): lock.release() + + +class _ALock: + def __init__(self, lock: threading.Lock) -> None: + self._lock = lock + + def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: + return self._lock.acquire(blocking=blocking, timeout=timeout) + + async def a_acquire(self, blocking: bool = True, timeout: float = -1) -> bool: + if timeout > 0: + tstart = time.monotonic() + while True: + acquired = self._lock.acquire(blocking=False) + if acquired: + return True + if timeout > 0 and (time.monotonic() - tstart) > timeout: + return False + if not blocking: + return False + await asyncio.sleep(0) + + def release(self) -> None: + self._lock.release() + + async def __aenter__(self) -> _ALock: + await self.a_acquire() + return self + + def __enter__(self) -> _ALock: + self._lock.acquire() + return self + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() + + async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() + + +class _ACondition: + def __init__(self, condition: threading.Condition) -> None: + self._condition = condition + + async def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: + if timeout > 0: + tstart = time.monotonic() + while True: + acquired = self._condition.acquire(blocking=False) + if acquired: + return True + if timeout > 0 and (time.monotonic() - tstart) > timeout: + return False + if not blocking: + return False + await asyncio.sleep(0) + + async def wait(self, timeout: Optional[float] = None) -> bool: + if timeout is not None: + tstart = time.monotonic() + while True: + notified = self._condition.wait(0.001) + if notified: + return True + if timeout is not None and (time.monotonic() - tstart) > timeout: + return False + + async def wait_for(self, predicate: Callable, timeout: Optional[float] = None) -> bool: + if timeout is not None: + tstart = time.monotonic() + while True: + notified = self._condition.wait_for(predicate, 0.001) + if notified: + return True + if timeout is not None and (time.monotonic() - tstart) > timeout: + return False + + def notify(self, n: int = 1) -> None: + self._condition.notify(n) + + def notify_all(self) -> None: + self._condition.notify_all() + + def release(self) -> None: + self._condition.release() + + async def __aenter__(self) -> _ACondition: + await self.acquire() + return self + + def __enter__(self) -> _ACondition: + self._condition.acquire() + return self + + async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index b0824acd44..68c2bbc4b5 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1,2529 +1,21 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Tools for connecting to MongoDB. - -.. seealso:: :doc:`/examples/high_availability` for examples of connecting - to replica sets or sets of mongos servers. - -To get a :class:`~pymongo.database.Database` instance from a -:class:`MongoClient` use either dictionary-style or attribute-style -access: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> c = MongoClient() - >>> c.test_database - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') - >>> c["test-database"] - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') -""" +"""Re-import of synchronous MongoClient API for compatibility.""" from __future__ import annotations -import contextlib -import os -import weakref -from collections import defaultdict -from typing import ( - TYPE_CHECKING, - Any, - Callable, - ContextManager, - FrozenSet, - Generic, - Iterator, - Mapping, - MutableMapping, - NoReturn, - Optional, - Sequence, - Type, - TypeVar, - Union, - cast, -) - -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry -from bson.timestamp import Timestamp -from pymongo import ( - _csot, - client_session, - common, - database, - helpers, - message, - periodic_executor, - uri_parser, -) -from pymongo.change_stream import ChangeStream, ClusterChangeStream -from pymongo.client_options import ClientOptions -from pymongo.client_session import _EmptyServerSession -from pymongo.command_cursor import CommandCursor -from pymongo.errors import ( - AutoReconnect, - BulkWriteError, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - NotPrimaryError, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError, - WaitQueueTimeoutError, - WriteConcernError, -) -from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks -from pymongo.logger import _CLIENT_LOGGER, _log_or_warn -from pymongo.monitoring import ConnectionClosedReason -from pymongo.operations import _Op -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.server_selectors import writable_server_selector -from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext -from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription -from pymongo.typings import ( - ClusterTime, - _Address, - _CollationIn, - _DocumentType, - _DocumentTypeArg, - _Pipeline, -) -from pymongo.uri_parser import ( - _check_options, - _handle_option_deprecations, - _handle_security_options, - _normalize_options, -) -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern - -if TYPE_CHECKING: - import sys - from types import TracebackType - - from bson.objectid import ObjectId - from pymongo.bulk import _Bulk - from pymongo.client_session import ClientSession, _ServerSession - from pymongo.cursor import _ConnectionManager - from pymongo.database import Database - from pymongo.message import _CursorAddress, _GetMore, _Query - from pymongo.pool import Connection - from pymongo.read_concern import ReadConcern - from pymongo.response import Response - from pymongo.server import Server - from pymongo.server_selectors import Selection - - if sys.version_info[:2] >= (3, 9): - from collections.abc import Generator - else: - # Deprecated since version 3.9: collections.abc.Generator now supports []. - from typing import Generator - -T = TypeVar("T") - -_WriteCall = Callable[[Optional["ClientSession"], "Connection", bool], T] -_ReadCall = Callable[[Optional["ClientSession"], "Server", "Connection", _ServerMode], T] - - -class MongoClient(common.BaseObject, Generic[_DocumentType]): - """ - A client-side representation of a MongoDB cluster. - - Instances can represent either a standalone MongoDB server, a replica - set, or a sharded cluster. Instances of this class are responsible for - maintaining up-to-date state of the cluster, and possibly cache - resources related to this, including background threads for monitoring, - and connection pools. - """ - - HOST = "localhost" - PORT = 27017 - # Define order to retrieve options from ClientOptions for __repr__. - # No host/port; these are retrieved from TopologySettings. - _constructor_args = ("document_class", "tz_aware", "connect") - _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() - - def __init__( - self, - host: Optional[Union[str, Sequence[str]]] = None, - port: Optional[int] = None, - document_class: Optional[Type[_DocumentType]] = None, - tz_aware: Optional[bool] = None, - connect: Optional[bool] = None, - type_registry: Optional[TypeRegistry] = None, - **kwargs: Any, - ) -> None: - """Client for a MongoDB instance, a replica set, or a set of mongoses. - - .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of - False instead of None. - For more details, see the relevant section of the PyMongo 4.x migration guide: - :ref:`pymongo4-migration-direct-connection`. - - The client object is thread-safe and has connection-pooling built in. - If an operation fails because of a network error, - :class:`~pymongo.errors.ConnectionFailure` is raised and the client - reconnects in the background. Application code should handle this - exception (recognizing that the operation failed) and then continue to - execute. - - The `host` parameter can be a full `mongodb URI - `_, in addition to - a simple hostname. It can also be a list of hostnames but no more - than one URI. Any port specified in the host string(s) will override - the `port` parameter. For username and - passwords reserved characters like ':', '/', '+' and '@' must be - percent encoded following RFC 2396:: - - from urllib.parse import quote_plus - - uri = "mongodb://%s:%s@%s" % ( - quote_plus(user), quote_plus(password), host) - client = MongoClient(uri) - - Unix domain sockets are also supported. The socket path must be percent - encoded in the URI:: - - uri = "mongodb://%s:%s@%s" % ( - quote_plus(user), quote_plus(password), quote_plus(socket_path)) - client = MongoClient(uri) - - But not when passed as a simple hostname:: - - client = MongoClient('/tmp/mongodb-27017.sock') - - Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The - URI must include one, and only one, hostname. The hostname will be - resolved to one or more DNS `SRV records - `_ which will be used - as the seed list for connecting to the MongoDB deployment. When using - SRV URIs, the `authSource` and `replicaSet` configuration options can - be specified using `TXT records - `_. See the - `Initial DNS Seedlist Discovery spec - `_ - for more details. Note that the use of SRV URIs implicitly enables - TLS support. Pass tls=false in the URI to override. - - .. note:: MongoClient creation will block waiting for answers from - DNS when mongodb+srv:// URIs are used. - - .. note:: Starting with version 3.0 the :class:`MongoClient` - constructor no longer blocks while connecting to the server or - servers, and it no longer raises - :class:`~pymongo.errors.ConnectionFailure` if they are - unavailable, nor :class:`~pymongo.errors.ConfigurationError` - if the user's credentials are wrong. Instead, the constructor - returns immediately and launches the connection process on - background threads. You can check if the server is available - like this:: - - from pymongo.errors import ConnectionFailure - client = MongoClient() - try: - # The ping command is cheap and does not require auth. - client.admin.command('ping') - except ConnectionFailure: - print("Server not available") - - .. warning:: When using PyMongo in a multiprocessing context, please - read :ref:`multiprocessing` first. - - .. note:: Many of the following options can be passed using a MongoDB - URI or keyword parameters. If the same option is passed in a URI and - as a keyword parameter the keyword parameter takes precedence. - - :param host: hostname or IP address or Unix domain socket - path of a single mongod or mongos instance to connect to, or a - mongodb URI, or a list of hostnames (but no more than one mongodb - URI). If `host` is an IPv6 literal it must be enclosed in '[' - and ']' characters - following the RFC2732 URL syntax (e.g. '[::1]' for localhost). - Multihomed and round robin DNS addresses are **not** supported. - :param port: port number on which to connect - :param document_class: default class to use for - documents returned from queries on this client - :param tz_aware: if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`MongoClient` will be timezone - aware (otherwise they will be naive) - :param connect: if ``True`` (the default), immediately - begin connecting to MongoDB in the background. Otherwise connect - on the first operation. - :param type_registry: instance of - :class:`~bson.codec_options.TypeRegistry` to enable encoding - and decoding of custom types. - :param datetime_conversion: Specifies how UTC datetimes should be decoded - within BSON. Valid options include 'datetime_ms' to return as a - DatetimeMS, 'datetime' to return as a datetime.datetime and - raising a ValueError for out-of-range values, 'datetime_auto' to - return DatetimeMS objects when the underlying datetime is - out-of-range and 'datetime_clamp' to clamp to the minimum and - maximum possible datetimes. Defaults to 'datetime'. See - :ref:`handling-out-of-range-datetimes` for details. - - | **Other optional parameters can be passed as keyword arguments:** - - - `directConnection` (optional): if ``True``, forces this client to - connect directly to the specified MongoDB host as a standalone. - If ``false``, the client connects to the entire replica set of - which the given MongoDB host(s) is a part. If this is ``True`` - and a mongodb+srv:// URI or a URI containing multiple seeds is - provided, an exception will be raised. - - `maxPoolSize` (optional): The maximum allowable number of - concurrent connections to each connected server. Requests to a - server will block if there are `maxPoolSize` outstanding - connections to the requested server. Defaults to 100. Can be - either 0 or None, in which case there is no limit on the number - of concurrent connections. - - `minPoolSize` (optional): The minimum required number of concurrent - connections that the pool will maintain to each connected server. - Default is 0. - - `maxIdleTimeMS` (optional): The maximum number of milliseconds that - a connection can remain idle in the pool before being removed and - replaced. Defaults to `None` (no limit). - - `maxConnecting` (optional): The maximum number of connections that - each pool can establish concurrently. Defaults to `2`. - - `timeoutMS`: (integer or None) Controls how long (in - milliseconds) the driver will wait when executing an operation - (including retry attempts) before raising a timeout error. - ``0`` or ``None`` means no timeout. - - `socketTimeoutMS`: (integer or None) Controls how long (in - milliseconds) the driver will wait for a response after sending an - ordinary (non-monitoring) database operation before concluding that - a network error has occurred. ``0`` or ``None`` means no timeout. - Defaults to ``None`` (no timeout). - - `connectTimeoutMS`: (integer or None) Controls how long (in - milliseconds) the driver will wait during server monitoring when - connecting a new socket to a server before concluding the server - is unavailable. ``0`` or ``None`` means no timeout. - Defaults to ``20000`` (20 seconds). - - `server_selector`: (callable or None) Optional, user-provided - function that augments server selection rules. The function should - accept as an argument a list of - :class:`~pymongo.server_description.ServerDescription` objects and - return a list of server descriptions that should be considered - suitable for the desired operation. - - `serverSelectionTimeoutMS`: (integer) Controls how long (in - milliseconds) the driver will wait to find an available, - appropriate server to carry out a database operation; while it is - waiting, multiple server monitoring operations may be carried out, - each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 - seconds). - - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) - a thread will wait for a socket from the pool if the pool has no - free sockets. Defaults to ``None`` (no timeout). - - `heartbeatFrequencyMS`: (optional) The number of milliseconds - between periodic server checks, or None to accept the default - frequency of 10 seconds. - - `serverMonitoringMode`: (optional) The server monitoring mode to use. - Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". - - `appname`: (string or None) The name of the application that - created this MongoClient instance. The server will log this value - upon establishing each connection. It is also recorded in the slow - query log and profile collections. - - `driver`: (pair or None) A driver implemented on top of PyMongo can - pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, - version, and platform to the message printed in the server log when - establishing a connection. - - `event_listeners`: a list or tuple of event listeners. See - :mod:`~pymongo.monitoring` for details. - - `retryWrites`: (boolean) Whether supported write operations - executed within this MongoClient will be retried once after a - network error. Defaults to ``True``. - The supported write operations are: - - - :meth:`~pymongo.collection.Collection.bulk_write`, as long as - :class:`~pymongo.operations.UpdateMany` or - :class:`~pymongo.operations.DeleteMany` are not included. - - :meth:`~pymongo.collection.Collection.delete_one` - - :meth:`~pymongo.collection.Collection.insert_one` - - :meth:`~pymongo.collection.Collection.insert_many` - - :meth:`~pymongo.collection.Collection.replace_one` - - :meth:`~pymongo.collection.Collection.update_one` - - :meth:`~pymongo.collection.Collection.find_one_and_delete` - - :meth:`~pymongo.collection.Collection.find_one_and_replace` - - :meth:`~pymongo.collection.Collection.find_one_and_update` - - Unsupported write operations include, but are not limited to, - :meth:`~pymongo.collection.Collection.aggregate` using the ``$out`` - pipeline operator and any operation with an unacknowledged write - concern (e.g. {w: 0})). See - https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst - - `retryReads`: (boolean) Whether supported read operations - executed within this MongoClient will be retried once after a - network error. Defaults to ``True``. - The supported read operations are: - :meth:`~pymongo.collection.Collection.find`, - :meth:`~pymongo.collection.Collection.find_one`, - :meth:`~pymongo.collection.Collection.aggregate` without ``$out``, - :meth:`~pymongo.collection.Collection.distinct`, - :meth:`~pymongo.collection.Collection.count`, - :meth:`~pymongo.collection.Collection.estimated_document_count`, - :meth:`~pymongo.collection.Collection.count_documents`, - :meth:`pymongo.collection.Collection.watch`, - :meth:`~pymongo.collection.Collection.list_indexes`, - :meth:`pymongo.database.Database.watch`, - :meth:`~pymongo.database.Database.list_collections`, - :meth:`pymongo.mongo_client.MongoClient.watch`, - and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. - - Unsupported read operations include, but are not limited to - :meth:`~pymongo.database.Database.command` and any getMore - operation on a cursor. - - Enabling retryable reads makes applications more resilient to - transient errors such as network failures, database upgrades, and - replica set failovers. For an exact definition of which errors - trigger a retry, see the `retryable reads specification - `_. - - - `compressors`: Comma separated list of compressors for wire - protocol compression. The list is used to negotiate a compressor - with the server. Currently supported options are "snappy", "zlib" - and "zstd". Support for snappy requires the - `python-snappy `_ package. - zlib support requires the Python standard library zlib module. zstd - requires the `zstandard `_ - package. By default no compression is used. Compression support - must also be enabled on the server. MongoDB 3.6+ supports snappy - and zlib compression. MongoDB 4.2+ adds support for zstd. - See :ref:`network-compression-example` for details. - - `zlibCompressionLevel`: (int) The zlib compression level to use - when zlib is used as the wire protocol compressor. Supported values - are -1 through 9. -1 tells the zlib library to use its default - compression level (usually 6). 0 means no compression. 1 is best - speed. 9 is best compression. Defaults to -1. - - `uuidRepresentation`: The BSON representation to use when encoding - from and decoding to instances of :class:`~uuid.UUID`. Valid - values are the strings: "standard", "pythonLegacy", "javaLegacy", - "csharpLegacy", and "unspecified" (the default). New applications - should consider setting this to "standard" for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `srvServiceName`: (string) The SRV service name to use for - "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: - - MongoClient("mongodb+srv://example.com/?srvServiceName=customname") - - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will - connect to. More specifically, when a "mongodb+srv://" connection string - resolves to more than srvMaxHosts number of hosts, the client will randomly - choose an srvMaxHosts sized subset of hosts. - - - | **Write Concern options:** - | (Only set if passed. No default values.) - - - `w`: (integer or string) If this is a replica set, write operations - will block until they have been replicated to the specified number - or tagged set of servers. `w=` always includes the replica set - primary (e.g. w=3 means write to the primary and wait until - replicated to **two** secondaries). Passing w=0 **disables write - acknowledgement** and all other write concern options. - - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. - Specify a value in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. Passing wTimeoutMS=0 - will cause **write operations to wait indefinitely**. - - `journal`: If ``True`` block until write operations have been - committed to the journal. Cannot be used in combination with - `fsync`. Write operations will fail with an exception if this - option is used when the server is running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. - - | **Replica set keyword arguments for connecting with a replica set - - either directly or via a mongos:** - - - `replicaSet`: (string or None) The name of the replica set to - connect to. The driver will verify that all servers it connects to - match this name. Implies that the hosts specified are a seed list - and the driver should attempt to find all members of the set. - Defaults to ``None``. - - | **Read Preference:** - - - `readPreference`: The replica set read preference for this client. - One of ``primary``, ``primaryPreferred``, ``secondary``, - ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. - - `readPreferenceTags`: Specifies a tag set as a comma-separated list - of colon-separated key-value pairs. For example ``dc:ny,rack:1``. - Defaults to ``None``. - - `maxStalenessSeconds`: (integer) The maximum estimated - length of time a replica set secondary can fall behind the primary - in replication before it will no longer be selected for operations. - Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds - is set, it must be a positive integer greater than or equal to - 90 seconds. - - .. seealso:: :doc:`/examples/server_selection` - - | **Authentication:** - - - `username`: A string. - - `password`: A string. - - Although username and password must be percent-escaped in a MongoDB - URI, they must not be percent-escaped when passed as parameters. In - this example, both the space and slash special characters are passed - as-is:: - - MongoClient(username="user name", password="pass/word") - - - `authSource`: The database to authenticate on. Defaults to the - database specified in the URI, if provided, or to "admin". - - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 - when connected to MongoDB 3.6 and negotiates the mechanism to use - (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. - - `authMechanismProperties`: Used to specify authentication mechanism - specific options. To specify the service name for GSSAPI - authentication pass authMechanismProperties='SERVICE_NAME:'. - To specify the session token for MONGODB-AWS authentication pass - ``authMechanismProperties='AWS_SESSION_TOKEN:'``. - - .. seealso:: :doc:`/examples/authentication` - - | **TLS/SSL configuration:** - - - `tls`: (boolean) If ``True``, create the connection to the server - using transport layer security. Defaults to ``False``. - - `tlsInsecure`: (boolean) Specify whether TLS constraints should be - relaxed as much as possible. Setting ``tlsInsecure=True`` implies - ``tlsAllowInvalidCertificates=True`` and - ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think - very carefully before setting this to ``True`` as it dramatically - reduces the security of TLS. - - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues - the TLS handshake regardless of the outcome of the certificate - verification process. If this is ``False``, and a value is not - provided for ``tlsCAFile``, PyMongo will attempt to load system - provided CA certificates. If the python version in use does not - support loading system CA certificates then the ``tlsCAFile`` - parameter must point to a file of CA certificates. - ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. - Defaults to ``False``. Think very carefully before setting this - to ``True`` as that could make your application vulnerable to - on-path attackers. - - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS - hostname verification. ``tlsAllowInvalidHostnames=False`` implies - ``tls=True``. Defaults to ``False``. Think very carefully before - setting this to ``True`` as that could make your application - vulnerable to on-path attackers. - - `tlsCAFile`: A file containing a single or a bundle of - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``tls=True``. Defaults to ``None``. - - `tlsCertificateKeyFile`: A file containing the client certificate - and private key. Implies ``tls=True``. Defaults to ``None``. - - `tlsCRLFile`: A file containing a PEM or DER formatted - certificate revocation list. Implies ``tls=True``. Defaults to - ``None``. - - `tlsCertificateKeyFilePassword`: The password or passphrase for - decrypting the private key in ``tlsCertificateKeyFile``. Only - necessary if the private key is encrypted. Defaults to ``None``. - - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables - certificate revocation status checking via the OCSP responder - specified on the server certificate. - ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. - Defaults to ``False``. - - `ssl`: (boolean) Alias for ``tls``. - - | **Read Concern options:** - | (If not set explicitly, this will use the server default) - - - `readConcernLevel`: (string) The read concern level specifies the - level of isolation for read operations. For example, a read - operation using a read concern level of ``majority`` will only - return data that has been written to a majority of nodes. If the - level is left unspecified, the server default will be used. - - | **Client side encryption options:** - | (If not set explicitly, client side encryption will not be enabled.) - - - `auto_encryption_opts`: A - :class:`~pymongo.encryption_options.AutoEncryptionOpts` which - configures this client to automatically encrypt collection commands - and automatically decrypt results. See - :ref:`automatic-client-side-encryption` for an example. - If a :class:`MongoClient` is configured with - ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a - separate internal ``MongoClient`` is created if any of the - following are true: - - - A ``key_vault_client`` is not passed to - :class:`~pymongo.encryption_options.AutoEncryptionOpts` - - ``bypass_auto_encrpytion=False`` is passed to - :class:`~pymongo.encryption_options.AutoEncryptionOpts` - - | **Stable API options:** - | (If not set explicitly, Stable API will not be enabled.) - - - `server_api`: A - :class:`~pymongo.server_api.ServerApi` which configures this - client to use Stable API. See :ref:`versioned-api-ref` for - details. - - .. seealso:: The MongoDB documentation on `connections `_. - - .. versionchanged:: 4.5 - Added the ``serverMonitoringMode`` keyword argument. - - .. versionchanged:: 4.2 - Added the ``timeoutMS`` keyword argument. - - .. versionchanged:: 4.0 - - - Removed the fsync, unlock, is_locked, database_names, and - close_cursor methods. - See the :ref:`pymongo4-migration-guide`. - - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` - keyword arguments. - - The default for `uuidRepresentation` was changed from - ``pythonLegacy`` to ``unspecified``. - - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and - keyword arguments. - - .. versionchanged:: 3.12 - Added the ``server_api`` keyword argument. - The following keyword arguments were deprecated: - - - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor - of ``tlsCertificateKeyFile``. - - .. versionchanged:: 3.11 - Added the following keyword arguments and URI options: - - - ``tlsDisableOCSPEndpointCheck`` - - ``directConnection`` - - .. versionchanged:: 3.9 - Added the ``retryReads`` keyword argument and URI option. - Added the ``tlsInsecure`` keyword argument and URI option. - The following keyword arguments and URI options were deprecated: - - - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. - - ``j`` was deprecated in favor of ``journal``. - - ``ssl_cert_reqs`` was deprecated in favor of - ``tlsAllowInvalidCertificates``. - - ``ssl_match_hostname`` was deprecated in favor of - ``tlsAllowInvalidHostnames``. - - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. - - ``ssl_certfile`` was deprecated in favor of - ``tlsCertificateKeyFile``. - - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. - - ``ssl_pem_passphrase`` was deprecated in favor of - ``tlsCertificateKeyFilePassword``. - - .. versionchanged:: 3.9 - ``retryWrites`` now defaults to ``True``. - - .. versionchanged:: 3.8 - Added the ``server_selector`` keyword argument. - Added the ``type_registry`` keyword argument. - - .. versionchanged:: 3.7 - Added the ``driver`` keyword argument. - - .. versionchanged:: 3.6 - Added support for mongodb+srv:// URIs. - Added the ``retryWrites`` keyword argument and URI option. - - .. versionchanged:: 3.5 - Add ``username`` and ``password`` options. Document the - ``authSource``, ``authMechanism``, and ``authMechanismProperties`` - options. - Deprecated the ``socketKeepAlive`` keyword argument and URI option. - ``socketKeepAlive`` now defaults to ``True``. - - .. versionchanged:: 3.0 - :class:`~pymongo.mongo_client.MongoClient` is now the one and only - client class for a standalone server, mongos, or replica set. - It includes the functionality that had been split into - :class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect - to a replica set, discover all its members, and monitor the set for - stepdowns, elections, and reconfigs. - - The :class:`~pymongo.mongo_client.MongoClient` constructor no - longer blocks while connecting to the server or servers, and it no - longer raises :class:`~pymongo.errors.ConnectionFailure` if they - are unavailable, nor :class:`~pymongo.errors.ConfigurationError` - if the user's credentials are wrong. Instead, the constructor - returns immediately and launches the connection process on - background threads. - - Therefore the ``alive`` method is removed since it no longer - provides meaningful information; even if the client is disconnected, - it may discover a server in time to fulfill the next operation. - - In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of - standalone MongoDB servers and used the first it could connect to:: - - MongoClient(['host1.com:27017', 'host2.com:27017']) - - A list of multiple standalones is no longer supported; if multiple - servers are listed they must be members of the same replica set, or - mongoses in the same sharded cluster. - - The behavior for a list of mongoses is changed from "high - availability" to "load balancing". Before, the client connected to - the lowest-latency mongos in the list, and used it until a network - error prompted it to re-evaluate all mongoses' latencies and - reconnect to one of them. In PyMongo 3, the client monitors its - network latency to all the mongoses continuously, and distributes - operations evenly among those with the lowest latency. See - :ref:`mongos-load-balancing` for more information. - - The ``connect`` option is added. - - The ``start_request``, ``in_request``, and ``end_request`` methods - are removed, as well as the ``auto_start_request`` option. - - The ``copy_database`` method is removed, see the - :doc:`copy_database examples ` for alternatives. - - The :meth:`MongoClient.disconnect` method is removed; it was a - synonym for :meth:`~pymongo.MongoClient.close`. - - :class:`~pymongo.mongo_client.MongoClient` no longer returns an - instance of :class:`~pymongo.database.Database` for attribute names - with leading underscores. You must use dict-style lookups instead:: - - client['__my_database__'] - - Not:: - - client.__my_database__ - - .. versionchanged:: 4.7 - Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. - """ - doc_class = document_class or dict - self.__init_kwargs: dict[str, Any] = { - "host": host, - "port": port, - "document_class": doc_class, - "tz_aware": tz_aware, - "connect": connect, - "type_registry": type_registry, - **kwargs, - } - - if host is None: - host = self.HOST - if isinstance(host, str): - host = [host] - if port is None: - port = self.PORT - if not isinstance(port, int): - raise TypeError("port must be an instance of int") - - # _pool_class, _monitor_class, and _condition_class are for deep - # customization of PyMongo, e.g. Motor. - pool_class = kwargs.pop("_pool_class", None) - monitor_class = kwargs.pop("_monitor_class", None) - condition_class = kwargs.pop("_condition_class", None) - - # Parse options passed as kwargs. - keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts["document_class"] = doc_class - - seeds = set() - username = None - password = None - dbase = None - opts = common._CaseInsensitiveDictionary() - fqdn = None - srv_service_name = keyword_opts.get("srvservicename") - srv_max_hosts = keyword_opts.get("srvmaxhosts") - if len([h for h in host if "/" in h]) > 1: - raise ConfigurationError("host must not contain multiple MongoDB URIs") - for entity in host: - # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' - # it must be a URI, - # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names - if "/" in entity: - # Determine connection timeout from kwargs. - timeout = keyword_opts.get("connecttimeoutms") - if timeout is not None: - timeout = common.validate_timeout_or_none_or_zero( - keyword_opts.cased_key("connecttimeoutms"), timeout - ) - res = uri_parser.parse_uri( - entity, - port, - validate=True, - warn=True, - normalize=False, - connect_timeout=timeout, - srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts, - ) - seeds.update(res["nodelist"]) - username = res["username"] or username - password = res["password"] or password - dbase = res["database"] or dbase - opts = res["options"] - fqdn = res["fqdn"] - else: - seeds.update(uri_parser.split_hosts(entity, port)) - if not seeds: - raise ConfigurationError("need to specify at least one host") - - for hostname in [node[0] for node in seeds]: - if _detect_external_db(hostname): - break - - # Add options with named keyword arguments to the parsed kwarg options. - if type_registry is not None: - keyword_opts["type_registry"] = type_registry - if tz_aware is None: - tz_aware = opts.get("tz_aware", False) - if connect is None: - connect = opts.get("connect", True) - keyword_opts["tz_aware"] = tz_aware - keyword_opts["connect"] = connect - - # Handle deprecated options in kwarg options. - keyword_opts = _handle_option_deprecations(keyword_opts) - # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary( - dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) - ) - - # Override connection string options with kwarg options. - opts.update(keyword_opts) - - if srv_service_name is None: - srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) - - srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") - # Handle security-option conflicts in combined options. - opts = _handle_security_options(opts) - # Normalize combined options. - opts = _normalize_options(opts) - _check_options(seeds, opts) - - # Username and password passed as kwargs override user info in URI. - username = opts.get("username", username) - password = opts.get("password", password) - self.__options = options = ClientOptions(username, password, dbase, opts) - - self.__default_database_name = dbase - self.__lock = _create_lock() - self.__kill_cursors_queue: list = [] - - self._event_listeners = options.pool_options._event_listeners - super().__init__( - options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern, - ) - - self._topology_settings = TopologySettings( - seeds=seeds, - replica_set_name=options.replica_set_name, - pool_class=pool_class, - pool_options=options.pool_options, - monitor_class=monitor_class, - condition_class=condition_class, - local_threshold_ms=options.local_threshold_ms, - server_selection_timeout=options.server_selection_timeout, - server_selector=options.server_selector, - heartbeat_frequency=options.heartbeat_frequency, - fqdn=fqdn, - direct_connection=options.direct_connection, - load_balanced=options.load_balanced, - srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts, - server_monitoring_mode=options.server_monitoring_mode, - ) - - self._init_background() - - if connect: - self._get_topology() - - self._encrypter = None - if self.__options.auto_encryption_opts: - from pymongo.encryption import _Encrypter - - self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) - self._timeout = self.__options.timeout - - if _HAS_REGISTER_AT_FORK: - # Add this client to the list of weakly referenced items. - # This will be used later if we fork. - MongoClient._clients[self._topology._topology_id] = self - - def _init_background(self, old_pid: Optional[int] = None) -> None: - self._topology = Topology(self._topology_settings) - # Seed the topology with the old one's pid so we can detect clients - # that are opened before a fork and used after. - self._topology._pid = old_pid - - def target() -> bool: - client = self_ref() - if client is None: - return False # Stop the executor. - MongoClient._process_periodic_tasks(client) - return True - - executor = periodic_executor.PeriodicExecutor( - interval=common.KILL_CURSOR_FREQUENCY, - min_interval=common.MIN_HEARTBEAT_INTERVAL, - target=target, - name="pymongo_kill_cursors_thread", - ) - - # We strongly reference the executor and it weakly references us via - # this closure. When the client is freed, stop the executor soon. - self_ref: Any = weakref.ref(self, executor.close) - self._kill_cursors_executor = executor - - def _after_fork(self) -> None: - """Resets topology in a child after successfully forking.""" - self._init_background(self._topology._pid) - - def _duplicate(self, **kwargs: Any) -> MongoClient: - args = self.__init_kwargs.copy() - args.update(kwargs) - return MongoClient(**args) - - def _server_property(self, attr_name: str) -> Any: - """An attribute of the current server's description. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - Not threadsafe if used multiple times in a single method, since - the server may change. In such cases, store a local reference to a - ServerDescription first, then use its properties. - """ - server = self._get_topology().select_server(writable_server_selector, _Op.TEST) - - return getattr(server.description, attr_name) - - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional[client_session.ClientSession] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - show_expanded_events: Optional[bool] = None, - ) -> ChangeStream[_DocumentType]: - """Watch changes on this cluster. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.ClusterChangeStream` cursor which - iterates over changes on all databases on this cluster. - - Introduced in MongoDB 4.0. - - .. code-block:: python - - with client.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.ClusterChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.ClusterChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with client.watch([{"$match": {"operationType": "insert"}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error("...") - - For a precise description of the resume process see the - `change streams specification`_. - - :param pipeline: A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - :param full_document: The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - :param full_document_before_change: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - :param resume_after: A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - :param max_await_time_ms: The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - :param batch_size: The maximum number of documents to return - per batch. - :param collation: The :class:`~pymongo.collation.Collation` - to use for the aggregation. - :param start_at_operation_time: If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param start_after: The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - :param comment: A user-provided comment to attach to this - command. - :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - - :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. - - .. versionchanged:: 4.3 - Added `show_expanded_events` parameter. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionadded:: 3.7 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md - """ - return ClusterChangeStream( - self.admin, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - show_expanded_events=show_expanded_events, - ) - - @property - def topology_description(self) -> TopologyDescription: - """The description of the connected MongoDB deployment. - - >>> client.topology_description - , , ]> - >>> client.topology_description.topology_type_name - 'ReplicaSetWithPrimary' - - Note that the description is periodically updated in the background - but the returned object itself is immutable. Access this property again - to get a more recent - :class:`~pymongo.topology_description.TopologyDescription`. - - :return: An instance of - :class:`~pymongo.topology_description.TopologyDescription`. - - .. versionadded:: 4.0 - """ - return self._topology.description - - @property - def address(self) -> Optional[tuple[str, int]]: - """(host, port) of the current standalone, primary, or mongos, or None. - - Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if - the client is load-balancing among mongoses, since there is no single - address. Use :attr:`nodes` instead. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - .. versionadded:: 3.0 - """ - topology_type = self._topology._description.topology_type - if ( - topology_type == TOPOLOGY_TYPE.Sharded - and len(self.topology_description.server_descriptions()) > 1 - ): - raise InvalidOperation( - 'Cannot use "address" property when load balancing among' - ' mongoses, use "nodes" instead.' - ) - if topology_type not in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded, - ): - return None - return self._server_property("address") - - @property - def primary(self) -> Optional[tuple[str, int]]: - """The (host, port) of the current primary of the replica set. - - Returns ``None`` if this client is not connected to a replica set, - there is no primary, or this client was created without the - `replicaSet` option. - - .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. - """ - return self._topology.get_primary() # type: ignore[return-value] - - @property - def secondaries(self) -> set[_Address]: - """The secondary members known to this client. - - A sequence of (host, port) pairs. Empty if this client is not - connected to a replica set, there are no visible secondaries, or this - client was created without the `replicaSet` option. - - .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. - """ - return self._topology.get_secondaries() - - @property - def arbiters(self) -> set[_Address]: - """Arbiters in the replica set. - - A sequence of (host, port) pairs. Empty if this client is not - connected to a replica set, there are no arbiters, or this client was - created without the `replicaSet` option. - """ - return self._topology.get_arbiters() - - @property - def is_primary(self) -> bool: - """If this client is connected to a server that can accept writes. - - True if the current server is a standalone, mongos, or the primary of - a replica set. If the client is not connected, this will block until a - connection is established or raise ServerSelectionTimeoutError if no - server is available. - """ - return self._server_property("is_writable") - - @property - def is_mongos(self) -> bool: - """If this client is connected to mongos. If the client is not - connected, this will block until a connection is established or raise - ServerSelectionTimeoutError if no server is available. - """ - return self._server_property("server_type") == SERVER_TYPE.Mongos - - @property - def nodes(self) -> FrozenSet[_Address]: - """Set of all currently connected servers. - - .. warning:: When connected to a replica set the value of :attr:`nodes` - can change over time as :class:`MongoClient`'s view of the replica - set changes. :attr:`nodes` can also be an empty set when - :class:`MongoClient` is first instantiated and hasn't yet connected - to any servers, or a network partition causes it to lose connection - to all servers. - """ - description = self._topology.description - return frozenset(s.address for s in description.known_servers) - - @property - def options(self) -> ClientOptions: - """The configuration options for this client. - - :return: An instance of :class:`~pymongo.client_options.ClientOptions`. - - .. versionadded:: 4.0 - """ - return self.__options - - def _end_sessions(self, session_ids: list[_ServerSession]) -> None: - """Send endSessions command(s) with the given session ids.""" - try: - # Use Connection.command directly to avoid implicitly creating - # another session. - with self._conn_for_reads( - ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS - ) as ( - conn, - read_pref, - ): - if not conn.supports_sessions: - return - - for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} - conn.command("admin", spec, read_preference=read_pref, client=self) - except PyMongoError: - # Drivers MUST ignore any errors returned by the endSessions - # command. - pass - - def close(self) -> None: - """Cleanup client resources and disconnect from MongoDB. - - End all server sessions created by this client by sending one or more - endSessions commands. - - Close all sockets in the connection pools and stop the monitor threads. - - .. versionchanged:: 4.0 - Once closed, the client cannot be used again and any attempt will - raise :exc:`~pymongo.errors.InvalidOperation`. - - .. versionchanged:: 3.6 - End all server sessions created by this client. - """ - session_ids = self._topology.pop_all_sessions() - if session_ids: - self._end_sessions(session_ids) - # Stop the periodic task thread and then send pending killCursor - # requests before closing the topology. - self._kill_cursors_executor.close() - self._process_kill_cursors() - self._topology.close() - if self._encrypter: - # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. - self._encrypter.close() - - def _get_topology(self) -> Topology: - """Get the internal :class:`~pymongo.topology.Topology` object. - - If this client was created with "connect=False", calling _get_topology - launches the connection process in the background. - """ - self._topology.open() - with self.__lock: - self._kill_cursors_executor.open() - return self._topology - - @contextlib.contextmanager - def _checkout(self, server: Server, session: Optional[ClientSession]) -> Iterator[Connection]: - in_txn = session and session.in_transaction - with _MongoClientErrorHandler(self, server, session) as err_handler: - # Reuse the pinned connection, if it exists. - if in_txn and session and session._pinned_connection: - err_handler.contribute_socket(session._pinned_connection) - yield session._pinned_connection - return - with server.checkout(handler=err_handler) as conn: - # Pin this session to the selected server or connection. - if ( - in_txn - and session - and server.description.server_type - in ( - SERVER_TYPE.Mongos, - SERVER_TYPE.LoadBalancer, - ) - ): - session._pin(server, conn) - err_handler.contribute_socket(conn) - if ( - self._encrypter - and not self._encrypter._bypass_auto_encryption - and conn.max_wire_version < 8 - ): - raise ConfigurationError( - "Auto-encryption requires a minimum MongoDB version of 4.2" - ) - yield conn - - def _select_server( - self, - server_selector: Callable[[Selection], Selection], - session: Optional[ClientSession], - operation: str, - address: Optional[_Address] = None, - deprioritized_servers: Optional[list[Server]] = None, - operation_id: Optional[int] = None, - ) -> Server: - """Select a server to run an operation on this client. - - :param server_selector: The server selector to use if the session is - not pinned and no address is given. - :param session: The ClientSession for the next operation, or None. May - be pinned to a mongos server address. - :param operation: The name of the operation that the server is being selected for. - :param address: Address when sending a message - to a specific server, used for getMore. - """ - try: - topology = self._get_topology() - if session and not session.in_transaction: - session._transaction.reset() - if not address and session: - address = session._pinned_address - if address: - # We're running a getMore or this session is pinned to a mongos. - server = topology.select_server_by_address( - address, operation, operation_id=operation_id - ) - if not server: - raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 - else: - server = topology.select_server( - server_selector, - operation, - deprioritized_servers=deprioritized_servers, - operation_id=operation_id, - ) - return server - except PyMongoError as exc: - # Server selection errors in a transaction are transient. - if session and session.in_transaction: - exc._add_error_label("TransientTransactionError") - session._unpin() - raise - - def _conn_for_writes( - self, session: Optional[ClientSession], operation: str - ) -> ContextManager[Connection]: - server = self._select_server(writable_server_selector, session, operation) - return self._checkout(server, session) - - @contextlib.contextmanager - def _conn_from_server( - self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] - ) -> Iterator[tuple[Connection, _ServerMode]]: - assert read_preference is not None, "read_preference must not be None" - # Get a connection for a server matching the read preference, and yield - # conn with the effective read preference. The Server Selection - # Spec says not to send any $readPreference to standalones and to - # always send primaryPreferred when directly connected to a repl set - # member. - # Thread safe: if the type is single it cannot change. - # NOTE: We already opened the Topology when selecting a server so there's no need - # to call _get_topology() again. - single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single - - with self._checkout(server, session) as conn: - if single: - if conn.is_repl and not (session and session.in_transaction): - # Use primary preferred to ensure any repl set member - # can handle the request. - read_preference = ReadPreference.PRIMARY_PREFERRED - elif conn.is_standalone: - # Don't send read preference to standalones. - read_preference = ReadPreference.PRIMARY - yield conn, read_preference - - def _conn_for_reads( - self, - read_preference: _ServerMode, - session: Optional[ClientSession], - operation: str, - ) -> ContextManager[tuple[Connection, _ServerMode]]: - assert read_preference is not None, "read_preference must not be None" - server = self._select_server(read_preference, session, operation) - return self._conn_from_server(read_preference, server, session) - - def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: - return self.__options.load_balanced and not (session and session.in_transaction) - - @_csot.apply - def _run_operation( - self, - operation: Union[_Query, _GetMore], - unpack_res: Callable, - address: Optional[_Address] = None, - ) -> Response: - """Run a _Query/_GetMore operation and return a Response. - - :param operation: a _Query or _GetMore object. - :param unpack_res: A callable that decodes the wire protocol response. - :param address: Optional address when sending a message - to a specific server, used for getMore. - """ - if operation.conn_mgr: - server = self._select_server( - operation.read_preference, - operation.session, - operation.name, - address=address, - ) - - with operation.conn_mgr.lock: - with _MongoClientErrorHandler(self, server, operation.session) as err_handler: - err_handler.contribute_socket(operation.conn_mgr.conn) - return server.run_operation( - operation.conn_mgr.conn, - operation, - operation.read_preference, - self._event_listeners, - unpack_res, - self, - ) - - def _cmd( - _session: Optional[ClientSession], - server: Server, - conn: Connection, - read_preference: _ServerMode, - ) -> Response: - operation.reset() # Reset op in case of retry. - return server.run_operation( - conn, - operation, - read_preference, - self._event_listeners, - unpack_res, - self, - ) - - return self._retryable_read( - _cmd, - operation.read_preference, - operation.session, - address=address, - retryable=isinstance(operation, message._Query), - operation=operation.name, - ) - - def _retry_with_session( - self, - retryable: bool, - func: _WriteCall[T], - session: Optional[ClientSession], - bulk: Optional[_Bulk], - operation: str, - operation_id: Optional[int] = None, - ) -> T: - """Execute an operation with at most one consecutive retries - - Returns func()'s return value on success. On error retries the same - command. - - Re-raises any exception thrown by func(). - """ - # Ensure that the options supports retry_writes and there is a valid session not in - # transaction, otherwise, we will not support retry behavior for this txn. - retryable = bool( - retryable and self.options.retry_writes and session and not session.in_transaction - ) - return self._retry_internal( - func=func, - session=session, - bulk=bulk, - operation=operation, - retryable=retryable, - operation_id=operation_id, - ) - - @_csot.apply - def _retry_internal( - self, - func: _WriteCall[T] | _ReadCall[T], - session: Optional[ClientSession], - bulk: Optional[_Bulk], - operation: str, - is_read: bool = False, - address: Optional[_Address] = None, - read_pref: Optional[_ServerMode] = None, - retryable: bool = False, - operation_id: Optional[int] = None, - ) -> T: - """Internal retryable helper for all client transactions. - - :param func: Callback function we want to retry - :param session: Client Session on which the transaction should occur - :param bulk: Abstraction to handle bulk write operations - :param operation: The name of the operation that the server is being selected for - :param is_read: If this is an exclusive read transaction, defaults to False - :param address: Server Address, defaults to None - :param read_pref: Topology of read operation, defaults to None - :param retryable: If the operation should be retried once, defaults to None - - :return: Output of the calling func() - """ - return _ClientConnectionRetryable( - mongo_client=self, - func=func, - bulk=bulk, - operation=operation, - is_read=is_read, - session=session, - read_pref=read_pref, - address=address, - retryable=retryable, - operation_id=operation_id, - ).run() - - def _retryable_read( - self, - func: _ReadCall[T], - read_pref: _ServerMode, - session: Optional[ClientSession], - operation: str, - address: Optional[_Address] = None, - retryable: bool = True, - operation_id: Optional[int] = None, - ) -> T: - """Execute an operation with consecutive retries if possible - - Returns func()'s return value on success. On error retries the same - command. - - Re-raises any exception thrown by func(). - - :param func: Read call we want to execute - :param read_pref: Desired topology of read operation - :param session: Client session we should use to execute operation - :param operation: The name of the operation that the server is being selected for - :param address: Optional address when sending a message, defaults to None - :param retryable: if we should attempt retries - (may not always be supported even if supplied), defaults to False - """ - - # Ensure that the client supports retrying on reads and there is no session in - # transaction, otherwise, we will not support retry behavior for this call. - retryable = bool( - retryable and self.options.retry_reads and not (session and session.in_transaction) - ) - return self._retry_internal( - func, - session, - None, - operation, - is_read=True, - address=address, - read_pref=read_pref, - retryable=retryable, - operation_id=operation_id, - ) - - def _retryable_write( - self, - retryable: bool, - func: _WriteCall[T], - session: Optional[ClientSession], - operation: str, - bulk: Optional[_Bulk] = None, - operation_id: Optional[int] = None, - ) -> T: - """Execute an operation with consecutive retries if possible - - Returns func()'s return value on success. On error retries the same - command. - - Re-raises any exception thrown by func(). - - :param retryable: if we should attempt retries (may not always be supported) - :param func: write call we want to execute during a session - :param session: Client session we will use to execute write operation - :param operation: The name of the operation that the server is being selected for - :param bulk: bulk abstraction to execute operations in bulk, defaults to None - """ - with self._tmp_session(session) as s: - return self._retry_with_session(retryable, func, s, bulk, operation, operation_id) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, self.__class__): - return self._topology == other._topology - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash(self._topology) - - def _repr_helper(self) -> str: - def option_repr(option: str, value: Any) -> str: - """Fix options whose __repr__ isn't usable in a constructor.""" - if option == "document_class": - if value is dict: - return "document_class=dict" - else: - return f"document_class={value.__module__}.{value.__name__}" - if option in common.TIMEOUT_OPTIONS and value is not None: - return f"{option}={int(value * 1000)}" - - return f"{option}={value!r}" - - # Host first... - options = [ - "host=%r" - % [ - "%s:%d" % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds - ] - ] - # ... then everything in self._constructor_args... - options.extend( - option_repr(key, self.__options._options[key]) for key in self._constructor_args - ) - # ... then everything else. - options.extend( - option_repr(key, self.__options._options[key]) - for key in self.__options._options - if key not in set(self._constructor_args) and key != "username" and key != "password" - ) - return ", ".join(options) - - def __repr__(self) -> str: - return f"MongoClient({self._repr_helper()})" - - def __getattr__(self, name: str) -> database.Database[_DocumentType]: - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :param name: the name of the database to get - """ - if name.startswith("_"): - raise AttributeError( - f"MongoClient has no attribute {name!r}. To access the {name}" - f" database, use client[{name!r}]." - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> database.Database[_DocumentType]: - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :param name: the name of the database to get - """ - return database.Database(self, name) - - def _cleanup_cursor( - self, - locks_allowed: bool, - cursor_id: int, - address: Optional[_CursorAddress], - conn_mgr: _ConnectionManager, - session: Optional[ClientSession], - explicit_session: bool, - ) -> None: - """Cleanup a cursor from cursor.close() or __del__. - - This method handles cleanup for Cursors/CommandCursors including any - pinned connection or implicit session attached at the time the cursor - was closed or garbage collected. - - :param locks_allowed: True if we are allowed to acquire locks. - :param cursor_id: The cursor id which may be 0. - :param address: The _CursorAddress. - :param conn_mgr: The _ConnectionManager for the pinned connection or None. - :param session: The cursor's session. - :param explicit_session: True if the session was passed explicitly. - """ - if locks_allowed: - if cursor_id: - if conn_mgr and conn_mgr.more_to_come: - # If this is an exhaust cursor and we haven't completely - # exhausted the result set we *must* close the socket - # to stop the server from sending more data. - assert conn_mgr.conn is not None - conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) - else: - self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) - if conn_mgr: - conn_mgr.close() - else: - # The cursor will be closed later in a different session. - if cursor_id or conn_mgr: - self._close_cursor_soon(cursor_id, address, conn_mgr) - if session and not explicit_session: - session._end_session(lock=locks_allowed) - - def _close_cursor_soon( - self, - cursor_id: int, - address: Optional[_CursorAddress], - conn_mgr: Optional[_ConnectionManager] = None, - ) -> None: - """Request that a cursor and/or connection be cleaned up soon.""" - self.__kill_cursors_queue.append((address, cursor_id, conn_mgr)) - - def _close_cursor_now( - self, - cursor_id: int, - address: Optional[_CursorAddress], - session: Optional[ClientSession] = None, - conn_mgr: Optional[_ConnectionManager] = None, - ) -> None: - """Send a kill cursors message with the given id. - - The cursor is closed synchronously on the current thread. - """ - if not isinstance(cursor_id, int): - raise TypeError("cursor_id must be an instance of int") - - try: - if conn_mgr: - with conn_mgr.lock: - # Cursor is pinned to LB outside of a transaction. - assert address is not None - assert conn_mgr.conn is not None - self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) - else: - self._kill_cursors([cursor_id], address, self._get_topology(), session) - except PyMongoError: - # Make another attempt to kill the cursor later. - self._close_cursor_soon(cursor_id, address) - - def _kill_cursors( - self, - cursor_ids: Sequence[int], - address: Optional[_CursorAddress], - topology: Topology, - session: Optional[ClientSession], - ) -> None: - """Send a kill cursors message with the given ids.""" - if address: - # address could be a tuple or _CursorAddress, but - # select_server_by_address needs (host, port). - server = topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] - else: - # Application called close_cursor() with no address. - server = topology.select_server(writable_server_selector, _Op.KILL_CURSORS) - - with self._checkout(server, session) as conn: - assert address is not None - self._kill_cursor_impl(cursor_ids, address, session, conn) - - def _kill_cursor_impl( - self, - cursor_ids: Sequence[int], - address: _CursorAddress, - session: Optional[ClientSession], - conn: Connection, - ) -> None: - namespace = address.namespace - db, coll = namespace.split(".", 1) - spec = {"killCursors": coll, "cursors": cursor_ids} - conn.command(db, spec, session=session, client=self) - - def _process_kill_cursors(self) -> None: - """Process any pending kill cursors requests.""" - address_to_cursor_ids = defaultdict(list) - pinned_cursors = [] - - # Other threads or the GC may append to the queue concurrently. - while True: - try: - address, cursor_id, conn_mgr = self.__kill_cursors_queue.pop() - except IndexError: - break - - if conn_mgr: - pinned_cursors.append((address, cursor_id, conn_mgr)) - else: - address_to_cursor_ids[address].append(cursor_id) - - for address, cursor_id, conn_mgr in pinned_cursors: - try: - self._cleanup_cursor(True, cursor_id, address, conn_mgr, None, False) - except Exception as exc: - if isinstance(exc, InvalidOperation) and self._topology._closed: - # Raise the exception when client is closed so that it - # can be caught in _process_periodic_tasks - raise - else: - helpers._handle_exception() - - # Don't re-open topology if it's closed and there's no pending cursors. - if address_to_cursor_ids: - topology = self._get_topology() - for address, cursor_ids in address_to_cursor_ids.items(): - try: - self._kill_cursors(cursor_ids, address, topology, session=None) - except Exception as exc: - if isinstance(exc, InvalidOperation) and self._topology._closed: - raise - else: - helpers._handle_exception() - - # This method is run periodically by a background thread. - def _process_periodic_tasks(self) -> None: - """Process any pending kill cursors requests and - maintain connection pool parameters. - """ - try: - self._process_kill_cursors() - self._topology.update_pool() - except Exception as exc: - if isinstance(exc, InvalidOperation) and self._topology._closed: - return - else: - helpers._handle_exception() - - def __start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: - server_session = _EmptyServerSession() - opts = client_session.SessionOptions(**kwargs) - return client_session.ClientSession(self, server_session, opts, implicit) - - def start_session( - self, - causal_consistency: Optional[bool] = None, - default_transaction_options: Optional[client_session.TransactionOptions] = None, - snapshot: Optional[bool] = False, - ) -> client_session.ClientSession: - """Start a logical session. - - This method takes the same parameters as - :class:`~pymongo.client_session.SessionOptions`. See the - :mod:`~pymongo.client_session` module for details and examples. - - A :class:`~pymongo.client_session.ClientSession` may only be used with - the MongoClient that started it. :class:`ClientSession` instances are - **not thread-safe or fork-safe**. They can only be used by one thread - or process at a time. A single :class:`ClientSession` cannot be used - to run multiple operations concurrently. - - :return: An instance of :class:`~pymongo.client_session.ClientSession`. - - .. versionadded:: 3.6 - """ - return self.__start_session( - False, - causal_consistency=causal_consistency, - default_transaction_options=default_transaction_options, - snapshot=snapshot, - ) - - def _return_server_session( - self, server_session: Union[_ServerSession, _EmptyServerSession], lock: bool - ) -> None: - """Internal: return a _ServerSession to the pool.""" - if isinstance(server_session, _EmptyServerSession): - return None - return self._topology.return_server_session(server_session, lock) - - def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: - """If provided session is None, lend a temporary session.""" - if session: - return session - - try: - # Don't make implicit sessions causally consistent. Applications - # should always opt-in. - return self.__start_session(True, causal_consistency=False) - except (ConfigurationError, InvalidOperation): - # Sessions not supported. - return None - - @contextlib.contextmanager - def _tmp_session( - self, session: Optional[client_session.ClientSession], close: bool = True - ) -> Generator[Optional[client_session.ClientSession], None, None]: - """If provided session is None, lend a temporary session.""" - if session is not None: - if not isinstance(session, client_session.ClientSession): - raise ValueError("'session' argument must be a ClientSession or None.") - # Don't call end_session. - yield session - return - - s = self._ensure_session(session) - if s: - try: - yield s - except Exception as exc: - if isinstance(exc, ConnectionFailure): - s._server_session.mark_dirty() - - # Always call end_session on error. - s.end_session() - raise - finally: - # Call end_session when we exit this scope. - if close: - s.end_session() - else: - yield None - - def _send_cluster_time( - self, command: MutableMapping[str, Any], session: Optional[ClientSession] - ) -> None: - topology_time = self._topology.max_cluster_time() - session_time = session.cluster_time if session else None - if topology_time and session_time: - if topology_time["clusterTime"] > session_time["clusterTime"]: - cluster_time: Optional[ClusterTime] = topology_time - else: - cluster_time = session_time - else: - cluster_time = topology_time or session_time - if cluster_time: - command["$clusterTime"] = cluster_time - - def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSession]) -> None: - self._topology.receive_cluster_time(reply.get("$clusterTime")) - if session is not None: - session._process_response(reply) - - def server_info(self, session: Optional[client_session.ClientSession] = None) -> dict[str, Any]: - """Get information about the MongoDB server we're connected to. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - return cast( - dict, - self.admin.command( - "buildinfo", read_preference=ReadPreference.PRIMARY, session=session - ), - ) - - def list_databases( - self, - session: Optional[client_session.ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[dict[str, Any]]: - """Get a cursor over the databases of the connected server. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - :param kwargs: Optional parameters of the - `listDatabases command - `_ - can be passed as keyword arguments to this method. The supported - options differ by server version. - - - :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. - - .. versionadded:: 3.6 - """ - cmd = {"listDatabases": 1} - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - admin = self._database_default_options("admin") - res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) - # listDatabases doesn't return a cursor (yet). Fake one. - cursor = { - "id": 0, - "firstBatch": res["databases"], - "ns": "admin.$cmd", - } - return CommandCursor(admin["$cmd"], cursor, None, comment=comment) - - def list_database_names( - self, - session: Optional[client_session.ClientSession] = None, - comment: Optional[Any] = None, - ) -> list[str]: - """Get a list of the names of all databases on the connected server. - - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionadded:: 3.6 - """ - return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] - - @_csot.apply - def drop_database( - self, - name_or_database: Union[str, database.Database[_DocumentTypeArg]], - session: Optional[client_session.ClientSession] = None, - comment: Optional[Any] = None, - ) -> None: - """Drop a database. - - Raises :class:`TypeError` if `name_or_database` is not an instance of - :class:`str` or :class:`~pymongo.database.Database`. - - :param name_or_database: the name of a database to drop, or a - :class:`~pymongo.database.Database` instance representing the - database to drop - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param comment: A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of - this client is automatically applied to this operation. - - .. versionchanged:: 3.4 - Apply this client's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - name = name_or_database - if isinstance(name, database.Database): - name = name.name - - if not isinstance(name, str): - raise TypeError("name_or_database must be an instance of str or a Database") - - with self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: - self[name]._command( - conn, - {"dropDatabase": 1, "comment": comment}, - read_preference=ReadPreference.PRIMARY, - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session, - ) - - def get_default_database( - self, - default: Optional[str] = None, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> database.Database[_DocumentType]: - """Get the database named in the MongoDB connection URI. - - >>> uri = 'mongodb://host/my_database' - >>> client = MongoClient(uri) - >>> db = client.get_default_database() - >>> assert db.name == 'my_database' - >>> db = client.get_database() - >>> assert db.name == 'my_database' - - Useful in scripts where you want to choose which database to use - based only on the URI in a configuration file. - - :param default: the database name to use if no database name - was provided in the URI. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is - used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is - used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is - used. - :param comment: A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.8 - Undeprecated. Added the ``default``, ``codec_options``, - ``read_preference``, ``write_concern`` and ``read_concern`` - parameters. - - .. versionchanged:: 3.5 - Deprecated, use :meth:`get_database` instead. - """ - if self.__default_database_name is None and default is None: - raise ConfigurationError("No default database name defined or provided.") - - name = cast(str, self.__default_database_name or default) - return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern - ) - - def get_database( - self, - name: Optional[str] = None, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> database.Database[_DocumentType]: - """Get a :class:`~pymongo.database.Database` with the given name and - options. - - Useful for creating a :class:`~pymongo.database.Database` with - different codec options, read preference, and/or write concern from - this :class:`MongoClient`. - - >>> client.read_preference - Primary() - >>> db1 = client.test - >>> db1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> db2 = client.get_database( - ... 'test', read_preference=ReadPreference.SECONDARY) - >>> db2.read_preference - Secondary(tag_sets=None) - - :param name: The name of the database - a string. If ``None`` - (the default) the database named in the MongoDB connection URI is - returned. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is - used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is - used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is - used. - - .. versionchanged:: 3.5 - The `name` parameter is now optional, defaulting to the database - named in the MongoDB connection URI. - """ - if name is None: - if self.__default_database_name is None: - raise ConfigurationError("No default database defined") - name = self.__default_database_name - - return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern - ) - - def _database_default_options(self, name: str) -> Database: - """Get a Database instance with the default settings.""" - return self.get_database( - name, - codec_options=DEFAULT_CODEC_OPTIONS, - read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN, - ) - - def __enter__(self) -> MongoClient[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'MongoClient' object is not iterable") - - next = __next__ - - -def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: - """Return the server response from PyMongo exception or None.""" - if isinstance(exc, BulkWriteError): - # Check the last writeConcernError to determine if this - # BulkWriteError is retryable. - wces = exc.details["writeConcernErrors"] - return wces[-1] if wces else None - if isinstance(exc, (NotPrimaryError, OperationFailure)): - return cast(Mapping[str, Any], exc.details) - return None - - -def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: - doc = _retryable_error_doc(exc) - if doc: - code = doc.get("code", 0) - # retryWrites on MMAPv1 should raise an actionable error. - if code == 20 and str(exc).startswith("Transaction numbers"): - errmsg = ( - "This MongoDB deployment does not support " - "retryable writes. Please add retryWrites=false " - "to your connection string." - ) - raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] - if max_wire_version >= 9: - # In MongoDB 4.4+, the server reports the error labels. - for label in doc.get("errorLabels", []): - exc._add_error_label(label) - else: - # Do not consult writeConcernError for pre-4.4 mongos. - if isinstance(exc, WriteConcernError) and is_mongos: - pass - elif code in helpers._RETRYABLE_ERROR_CODES: - exc._add_error_label("RetryableWriteError") - - # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is - # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance( - exc, (NotPrimaryError, WaitQueueTimeoutError) - ): - exc._add_error_label("RetryableWriteError") - - -class _MongoClientErrorHandler: - """Handle errors raised when executing an operation.""" - - __slots__ = ( - "client", - "server_address", - "session", - "max_wire_version", - "sock_generation", - "completed_handshake", - "service_id", - "handled", - ) - - def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): - self.client = client - self.server_address = server.description.address - self.session = session - self.max_wire_version = common.MIN_WIRE_VERSION - # XXX: When get_socket fails, this generation could be out of date: - # "Note that when a network error occurs before the handshake - # completes then the error's generation number is the generation - # of the pool at the time the connection attempt was started." - self.sock_generation = server.pool.gen.get_overall() - self.completed_handshake = False - self.service_id: Optional[ObjectId] = None - self.handled = False - - def contribute_socket(self, conn: Connection, completed_handshake: bool = True) -> None: - """Provide socket information to the error handler.""" - self.max_wire_version = conn.max_wire_version - self.sock_generation = conn.generation - self.service_id = conn.service_id - self.completed_handshake = completed_handshake - - def handle( - self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] - ) -> None: - if self.handled or exc_val is None: - return - self.handled = True - if self.session: - if isinstance(exc_val, ConnectionFailure): - if self.session.in_transaction: - exc_val._add_error_label("TransientTransactionError") - self.session._server_session.mark_dirty() - - if isinstance(exc_val, PyMongoError): - if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( - "RetryableWriteError" - ): - self.session._unpin() - err_ctx = _ErrorContext( - exc_val, - self.max_wire_version, - self.sock_generation, - self.completed_handshake, - self.service_id, - ) - self.client._topology.handle_error(self.server_address, err_ctx) - - def __enter__(self) -> _MongoClientErrorHandler: - return self - - def __exit__( - self, - exc_type: Optional[Type[Exception]], - exc_val: Optional[Exception], - exc_tb: Optional[TracebackType], - ) -> None: - return self.handle(exc_type, exc_val) - - -class _ClientConnectionRetryable(Generic[T]): - """Responsible for executing retryable connections on read or write operations""" - - def __init__( - self, - mongo_client: MongoClient, - func: _WriteCall[T] | _ReadCall[T], - bulk: Optional[_Bulk], - operation: str, - is_read: bool = False, - session: Optional[ClientSession] = None, - read_pref: Optional[_ServerMode] = None, - address: Optional[_Address] = None, - retryable: bool = False, - operation_id: Optional[int] = None, - ): - self._last_error: Optional[Exception] = None - self._retrying = False - self._multiple_retries = _csot.get_timeout() is not None - self._client = mongo_client - - self._func = func - self._bulk = bulk - self._session = session - self._is_read = is_read - self._retryable = retryable - self._read_pref = read_pref - self._server_selector: Callable[[Selection], Selection] = ( - read_pref if is_read else writable_server_selector # type: ignore - ) - self._address = address - self._server: Server = None # type: ignore - self._deprioritized_servers: list[Server] = [] - self._operation = operation - self._operation_id = operation_id - - def run(self) -> T: - """Runs the supplied func() and attempts a retry - - :raises: self._last_error: Last exception raised - - :return: Result of the func() call - """ - # Increment the transaction id up front to ensure any retry attempt - # will use the proper txnNumber, even if server or socket selection - # fails before the command can be sent. - if self._is_session_state_retryable() and self._retryable and not self._is_read: - self._session._start_retryable_write() # type: ignore - if self._bulk: - self._bulk.started_retryable_write = True - - while True: - self._check_last_error(check_csot=True) - try: - return self._read() if self._is_read else self._write() - except ServerSelectionTimeoutError: - # The application may think the write was never attempted - # if we raise ServerSelectionTimeoutError on the retry - # attempt. Raise the original exception instead. - self._check_last_error() - # A ServerSelectionTimeoutError error indicates that there may - # be a persistent outage. Attempting to retry in this case will - # most likely be a waste of time. - raise - except PyMongoError as exc: - # Execute specialized catch on read - if self._is_read: - if isinstance(exc, (ConnectionFailure, OperationFailure)): - # ConnectionFailures do not supply a code property - exc_code = getattr(exc, "code", None) - if self._is_not_eligible_for_retry() or ( - isinstance(exc, OperationFailure) - and exc_code not in helpers._RETRYABLE_ERROR_CODES - ): - raise - self._retrying = True - self._last_error = exc - else: - raise - - # Specialized catch on write operation - if not self._is_read: - if not self._retryable: - raise - retryable_write_error_exc = exc.has_error_label("RetryableWriteError") - if retryable_write_error_exc: - assert self._session - self._session._unpin() - if not retryable_write_error_exc or self._is_not_eligible_for_retry(): - if exc.has_error_label("NoWritesPerformed") and self._last_error: - raise self._last_error from exc - else: - raise - if self._bulk: - self._bulk.retrying = True - else: - self._retrying = True - if not exc.has_error_label("NoWritesPerformed"): - self._last_error = exc - if self._last_error is None: - self._last_error = exc - - if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: - self._deprioritized_servers.append(self._server) - - def _is_not_eligible_for_retry(self) -> bool: - """Checks if the exchange is not eligible for retry""" - return not self._retryable or (self._is_retrying() and not self._multiple_retries) - - def _is_retrying(self) -> bool: - """Checks if the exchange is currently undergoing a retry""" - return self._bulk.retrying if self._bulk else self._retrying - - def _is_session_state_retryable(self) -> bool: - """Checks if provided session is eligible for retry - - reads: Make sure there is no ongoing transaction (if provided a session) - writes: Make sure there is a session without an active transaction - """ - if self._is_read: - return not (self._session and self._session.in_transaction) - return bool(self._session and not self._session.in_transaction) - - def _check_last_error(self, check_csot: bool = False) -> None: - """Checks if the ongoing client exchange experienced a exception previously. - If so, raise last error - - :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False - """ - if self._is_retrying(): - remaining = _csot.remaining() - if not check_csot or (remaining is not None and remaining <= 0): - assert self._last_error is not None - raise self._last_error - - def _get_server(self) -> Server: - """Retrieves a server object based on provided object context - - :return: Abstraction to connect to server - """ - return self._client._select_server( - self._server_selector, - self._session, - self._operation, - address=self._address, - deprioritized_servers=self._deprioritized_servers, - operation_id=self._operation_id, - ) - - def _write(self) -> T: - """Wrapper method for write-type retryable client executions - - :return: Output for func()'s call - """ - try: - max_wire_version = 0 - is_mongos = False - self._server = self._get_server() - with self._client._checkout(self._server, self._session) as conn: - max_wire_version = conn.max_wire_version - sessions_supported = ( - self._session - and self._server.description.retryable_writes_supported - and conn.supports_sessions - ) - is_mongos = conn.is_mongos - if not sessions_supported: - # A retry is not possible because this server does - # not support sessions raise the last error. - self._check_last_error() - self._retryable = False - return self._func(self._session, conn, self._retryable) # type: ignore - except PyMongoError as exc: - if not self._retryable: - raise - # Add the RetryableWriteError label, if applicable. - _add_retryable_write_error(exc, max_wire_version, is_mongos) - raise - - def _read(self) -> T: - """Wrapper method for read-type retryable client executions - - :return: Output for func()'s call - """ - self._server = self._get_server() - assert self._read_pref is not None, "Read Preference required on read calls" - with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( - conn, - read_pref, - ): - if self._retrying and not self._retryable: - self._check_last_error() - return self._func(self._session, self._server, conn, read_pref) # type: ignore - - -def _after_fork_child() -> None: - """Releases the locks in child process and resets the - topologies in all MongoClients. - """ - # Reinitialize locks - _release_locks() - - # Perform cleanup in clients (i.e. get rid of topology) - for _, client in MongoClient._clients.items(): - client._after_fork() - - -def _detect_external_db(entity: str) -> bool: - """Detects external database hosts and logs an informational message at the INFO level.""" - entity = entity.lower() - cosmos_db_hosts = [".cosmos.azure.com"] - document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] - - for host in cosmos_db_hosts: - if entity.endswith(host): - _log_or_warn( - _CLIENT_LOGGER, - "You appear to be connected to a CosmosDB cluster. For more information regarding feature " - "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", - ) - return True - for host in document_db_hosts: - if entity.endswith(host): - _log_or_warn( - _CLIENT_LOGGER, - "You appear to be connected to a DocumentDB cluster. For more information regarding feature " - "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", - ) - return True - return False - +from pymongo.synchronous.mongo_client import * # noqa: F403 +from pymongo.synchronous.mongo_client import __doc__ as original_doc -if _HAS_REGISTER_AT_FORK: - # This will run in the same thread as the fork was called. - # If we fork in a critical region on the same thread, it should break. - # This is fine since we would never call fork directly from a critical region. - os.register_at_fork(after_in_child=_after_fork_child) +__doc__ = original_doc diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 896a747e72..b9825b4ca3 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1,1900 +1,21 @@ -# Copyright 2015-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Tools to monitor driver events. - -.. versionadded:: 3.1 - -.. attention:: Starting in PyMongo 3.11, the monitoring classes outlined below - are included in the PyMongo distribution under the - :mod:`~pymongo.event_loggers` submodule. - -Use :func:`register` to register global listeners for specific events. -Listeners must inherit from one of the abstract classes below and implement -the correct functions for that class. - -For example, a simple command logger might be implemented like this:: - - import logging - - from pymongo import monitoring - - class CommandLogger(monitoring.CommandListener): - - def started(self, event): - logging.info("Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event)) - - def succeeded(self, event): - logging.info("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event)) - - def failed(self, event): - logging.info("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event)) - - monitoring.register(CommandLogger()) - -Server discovery and monitoring events are also available. For example:: - - class ServerLogger(monitoring.ServerListener): - - def opened(self, event): - logging.info("Server {0.server_address} added to topology " - "{0.topology_id}".format(event)) - - def description_changed(self, event): - previous_server_type = event.previous_description.server_type - new_server_type = event.new_description.server_type - if new_server_type != previous_server_type: - # server_type_name was added in PyMongo 3.4 - logging.info( - "Server {0.server_address} changed type from " - "{0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event)) - - def closed(self, event): - logging.warning("Server {0.server_address} removed from topology " - "{0.topology_id}".format(event)) - - - class HeartbeatLogger(monitoring.ServerHeartbeatListener): - - def started(self, event): - logging.info("Heartbeat sent to server " - "{0.connection_id}".format(event)) - - def succeeded(self, event): - # The reply.document attribute was added in PyMongo 3.4. - logging.info("Heartbeat to server {0.connection_id} " - "succeeded with reply " - "{0.reply.document}".format(event)) - - def failed(self, event): - logging.warning("Heartbeat to server {0.connection_id} " - "failed with error {0.reply}".format(event)) - - class TopologyLogger(monitoring.TopologyListener): - - def opened(self, event): - logging.info("Topology with id {0.topology_id} " - "opened".format(event)) - - def description_changed(self, event): - logging.info("Topology description updated for " - "topology id {0.topology_id}".format(event)) - previous_topology_type = event.previous_description.topology_type - new_topology_type = event.new_description.topology_type - if new_topology_type != previous_topology_type: - # topology_type_name was added in PyMongo 3.4 - logging.info( - "Topology {0.topology_id} changed type from " - "{0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event)) - # The has_writable_server and has_readable_server methods - # were added in PyMongo 3.4. - if not event.new_description.has_writable_server(): - logging.warning("No writable servers available.") - if not event.new_description.has_readable_server(): - logging.warning("No readable servers available.") - - def closed(self, event): - logging.info("Topology with id {0.topology_id} " - "closed".format(event)) - -Connection monitoring and pooling events are also available. For example:: - - class ConnectionPoolLogger(ConnectionPoolListener): - - def pool_created(self, event): - logging.info("[pool {0.address}] pool created".format(event)) - - def pool_ready(self, event): - logging.info("[pool {0.address}] pool is ready".format(event)) - - def pool_cleared(self, event): - logging.info("[pool {0.address}] pool cleared".format(event)) - - def pool_closed(self, event): - logging.info("[pool {0.address}] pool closed".format(event)) - - def connection_created(self, event): - logging.info("[pool {0.address}][connection #{0.connection_id}] " - "connection created".format(event)) - - def connection_ready(self, event): - logging.info("[pool {0.address}][connection #{0.connection_id}] " - "connection setup succeeded".format(event)) - - def connection_closed(self, event): - logging.info("[pool {0.address}][connection #{0.connection_id}] " - "connection closed, reason: " - "{0.reason}".format(event)) - - def connection_check_out_started(self, event): - logging.info("[pool {0.address}] connection check out " - "started".format(event)) - - def connection_check_out_failed(self, event): - logging.info("[pool {0.address}] connection check out " - "failed, reason: {0.reason}".format(event)) - - def connection_checked_out(self, event): - logging.info("[pool {0.address}][connection #{0.connection_id}] " - "connection checked out of pool".format(event)) - - def connection_checked_in(self, event): - logging.info("[pool {0.address}][connection #{0.connection_id}] " - "connection checked into pool".format(event)) - - -Event listeners can also be registered per instance of -:class:`~pymongo.mongo_client.MongoClient`:: - - client = MongoClient(event_listeners=[CommandLogger()]) - -Note that previously registered global listeners are automatically included -when configuring per client event listeners. Registering a new global listener -will not add that listener to existing client instances. - -.. note:: Events are delivered **synchronously**. Application threads block - waiting for event handlers (e.g. :meth:`~CommandListener.started`) to - return. Care must be taken to ensure that your event handlers are efficient - enough to not adversely affect overall application performance. - -.. warning:: The command documents published through this API are *not* copies. - If you intend to modify them in any way you must copy them in your event - handler first. -""" +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Re-import of synchronous Monitoring API for compatibility.""" from __future__ import annotations -import datetime -from collections import abc, namedtuple -from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence - -from bson.objectid import ObjectId -from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _SENSITIVE_COMMANDS, _handle_exception -from pymongo.typings import _Address, _DocumentOut - -if TYPE_CHECKING: - from datetime import timedelta - - from pymongo.server_description import ServerDescription - from pymongo.topology_description import TopologyDescription - - -_Listeners = namedtuple( - "_Listeners", - ( - "command_listeners", - "server_listeners", - "server_heartbeat_listeners", - "topology_listeners", - "cmap_listeners", - ), -) - -_LISTENERS = _Listeners([], [], [], [], []) - - -class _EventListener: - """Abstract base class for all event listeners.""" - - -class CommandListener(_EventListener): - """Abstract base class for command listeners. - - Handles `CommandStartedEvent`, `CommandSucceededEvent`, - and `CommandFailedEvent`. - """ - - def started(self, event: CommandStartedEvent) -> None: - """Abstract method to handle a `CommandStartedEvent`. - - :param event: An instance of :class:`CommandStartedEvent`. - """ - raise NotImplementedError - - def succeeded(self, event: CommandSucceededEvent) -> None: - """Abstract method to handle a `CommandSucceededEvent`. - - :param event: An instance of :class:`CommandSucceededEvent`. - """ - raise NotImplementedError - - def failed(self, event: CommandFailedEvent) -> None: - """Abstract method to handle a `CommandFailedEvent`. - - :param event: An instance of :class:`CommandFailedEvent`. - """ - raise NotImplementedError - - -class ConnectionPoolListener(_EventListener): - """Abstract base class for connection pool listeners. - - Handles all of the connection pool events defined in the Connection - Monitoring and Pooling Specification: - :class:`PoolCreatedEvent`, :class:`PoolClearedEvent`, - :class:`PoolClosedEvent`, :class:`ConnectionCreatedEvent`, - :class:`ConnectionReadyEvent`, :class:`ConnectionClosedEvent`, - :class:`ConnectionCheckOutStartedEvent`, - :class:`ConnectionCheckOutFailedEvent`, - :class:`ConnectionCheckedOutEvent`, - and :class:`ConnectionCheckedInEvent`. - - .. versionadded:: 3.9 - """ - - def pool_created(self, event: PoolCreatedEvent) -> None: - """Abstract method to handle a :class:`PoolCreatedEvent`. - - Emitted when a connection Pool is created. - - :param event: An instance of :class:`PoolCreatedEvent`. - """ - raise NotImplementedError - - def pool_ready(self, event: PoolReadyEvent) -> None: - """Abstract method to handle a :class:`PoolReadyEvent`. - - Emitted when a connection Pool is marked ready. - - :param event: An instance of :class:`PoolReadyEvent`. - - .. versionadded:: 4.0 - """ - raise NotImplementedError - - def pool_cleared(self, event: PoolClearedEvent) -> None: - """Abstract method to handle a `PoolClearedEvent`. - - Emitted when a connection Pool is cleared. - - :param event: An instance of :class:`PoolClearedEvent`. - """ - raise NotImplementedError - - def pool_closed(self, event: PoolClosedEvent) -> None: - """Abstract method to handle a `PoolClosedEvent`. - - Emitted when a connection Pool is closed. - - :param event: An instance of :class:`PoolClosedEvent`. - """ - raise NotImplementedError - - def connection_created(self, event: ConnectionCreatedEvent) -> None: - """Abstract method to handle a :class:`ConnectionCreatedEvent`. - - Emitted when a connection Pool creates a Connection object. - - :param event: An instance of :class:`ConnectionCreatedEvent`. - """ - raise NotImplementedError - - def connection_ready(self, event: ConnectionReadyEvent) -> None: - """Abstract method to handle a :class:`ConnectionReadyEvent`. - - Emitted when a connection has finished its setup, and is now ready to - use. - - :param event: An instance of :class:`ConnectionReadyEvent`. - """ - raise NotImplementedError - - def connection_closed(self, event: ConnectionClosedEvent) -> None: - """Abstract method to handle a :class:`ConnectionClosedEvent`. - - Emitted when a connection Pool closes a connection. - - :param event: An instance of :class:`ConnectionClosedEvent`. - """ - raise NotImplementedError - - def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> None: - """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. - - Emitted when the driver starts attempting to check out a connection. - - :param event: An instance of :class:`ConnectionCheckOutStartedEvent`. - """ - raise NotImplementedError - - def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> None: - """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. - - Emitted when the driver's attempt to check out a connection fails. - - :param event: An instance of :class:`ConnectionCheckOutFailedEvent`. - """ - raise NotImplementedError - - def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: - """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. - - Emitted when the driver successfully checks out a connection. - - :param event: An instance of :class:`ConnectionCheckedOutEvent`. - """ - raise NotImplementedError - - def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: - """Abstract method to handle a :class:`ConnectionCheckedInEvent`. - - Emitted when the driver checks in a connection back to the connection - Pool. - - :param event: An instance of :class:`ConnectionCheckedInEvent`. - """ - raise NotImplementedError - - -class ServerHeartbeatListener(_EventListener): - """Abstract base class for server heartbeat listeners. - - Handles `ServerHeartbeatStartedEvent`, `ServerHeartbeatSucceededEvent`, - and `ServerHeartbeatFailedEvent`. - - .. versionadded:: 3.3 - """ - - def started(self, event: ServerHeartbeatStartedEvent) -> None: - """Abstract method to handle a `ServerHeartbeatStartedEvent`. - - :param event: An instance of :class:`ServerHeartbeatStartedEvent`. - """ - raise NotImplementedError - - def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: - """Abstract method to handle a `ServerHeartbeatSucceededEvent`. - - :param event: An instance of :class:`ServerHeartbeatSucceededEvent`. - """ - raise NotImplementedError - - def failed(self, event: ServerHeartbeatFailedEvent) -> None: - """Abstract method to handle a `ServerHeartbeatFailedEvent`. - - :param event: An instance of :class:`ServerHeartbeatFailedEvent`. - """ - raise NotImplementedError - - -class TopologyListener(_EventListener): - """Abstract base class for topology monitoring listeners. - Handles `TopologyOpenedEvent`, `TopologyDescriptionChangedEvent`, and - `TopologyClosedEvent`. - - .. versionadded:: 3.3 - """ - - def opened(self, event: TopologyOpenedEvent) -> None: - """Abstract method to handle a `TopologyOpenedEvent`. - - :param event: An instance of :class:`TopologyOpenedEvent`. - """ - raise NotImplementedError - - def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: - """Abstract method to handle a `TopologyDescriptionChangedEvent`. - - :param event: An instance of :class:`TopologyDescriptionChangedEvent`. - """ - raise NotImplementedError - - def closed(self, event: TopologyClosedEvent) -> None: - """Abstract method to handle a `TopologyClosedEvent`. - - :param event: An instance of :class:`TopologyClosedEvent`. - """ - raise NotImplementedError - - -class ServerListener(_EventListener): - """Abstract base class for server listeners. - Handles `ServerOpeningEvent`, `ServerDescriptionChangedEvent`, and - `ServerClosedEvent`. - - .. versionadded:: 3.3 - """ - - def opened(self, event: ServerOpeningEvent) -> None: - """Abstract method to handle a `ServerOpeningEvent`. - - :param event: An instance of :class:`ServerOpeningEvent`. - """ - raise NotImplementedError - - def description_changed(self, event: ServerDescriptionChangedEvent) -> None: - """Abstract method to handle a `ServerDescriptionChangedEvent`. - - :param event: An instance of :class:`ServerDescriptionChangedEvent`. - """ - raise NotImplementedError - - def closed(self, event: ServerClosedEvent) -> None: - """Abstract method to handle a `ServerClosedEvent`. - - :param event: An instance of :class:`ServerClosedEvent`. - """ - raise NotImplementedError - - -def _to_micros(dur: timedelta) -> int: - """Convert duration 'dur' to microseconds.""" - return int(dur.total_seconds() * 10e5) - - -def _validate_event_listeners( - option: str, listeners: Sequence[_EventListeners] -) -> Sequence[_EventListeners]: - """Validate event listeners""" - if not isinstance(listeners, abc.Sequence): - raise TypeError(f"{option} must be a list or tuple") - for listener in listeners: - if not isinstance(listener, _EventListener): - raise TypeError( - f"Listeners for {option} must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." - ) - return listeners - - -def register(listener: _EventListener) -> None: - """Register a global event listener. - - :param listener: A subclasses of :class:`CommandListener`, - :class:`ServerHeartbeatListener`, :class:`ServerListener`, - :class:`TopologyListener`, or :class:`ConnectionPoolListener`. - """ - if not isinstance(listener, _EventListener): - raise TypeError( - f"Listeners for {listener} must be either a " - "CommandListener, ServerHeartbeatListener, " - "ServerListener, TopologyListener, or " - "ConnectionPoolListener." - ) - if isinstance(listener, CommandListener): - _LISTENERS.command_listeners.append(listener) - if isinstance(listener, ServerHeartbeatListener): - _LISTENERS.server_heartbeat_listeners.append(listener) - if isinstance(listener, ServerListener): - _LISTENERS.server_listeners.append(listener) - if isinstance(listener, TopologyListener): - _LISTENERS.topology_listeners.append(listener) - if isinstance(listener, ConnectionPoolListener): - _LISTENERS.cmap_listeners.append(listener) - - -# The "hello" command is also deemed sensitive when attempting speculative -# authentication. -def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: - if ( - command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) - and "speculativeAuthenticate" in doc - ): - return True - return False - - -class _CommandEvent: - """Base class for command events.""" - - __slots__ = ( - "__cmd_name", - "__rqst_id", - "__conn_id", - "__op_id", - "__service_id", - "__db", - "__server_conn_id", - ) - - def __init__( - self, - command_name: str, - request_id: int, - connection_id: _Address, - operation_id: Optional[int], - service_id: Optional[ObjectId] = None, - database_name: str = "", - server_connection_id: Optional[int] = None, - ) -> None: - self.__cmd_name = command_name - self.__rqst_id = request_id - self.__conn_id = connection_id - self.__op_id = operation_id - self.__service_id = service_id - self.__db = database_name - self.__server_conn_id = server_connection_id - - @property - def command_name(self) -> str: - """The command name.""" - return self.__cmd_name - - @property - def request_id(self) -> int: - """The request id for this operation.""" - return self.__rqst_id - - @property - def connection_id(self) -> _Address: - """The address (host, port) of the server this command was sent to.""" - return self.__conn_id - - @property - def service_id(self) -> Optional[ObjectId]: - """The service_id this command was sent to, or ``None``. - - .. versionadded:: 3.12 - """ - return self.__service_id - - @property - def operation_id(self) -> Optional[int]: - """An id for this series of events or None.""" - return self.__op_id - - @property - def database_name(self) -> str: - """The database_name this command was sent to, or ``""``. - - .. versionadded:: 4.6 - """ - return self.__db - - @property - def server_connection_id(self) -> Optional[int]: - """The server-side connection id for the connection this command was sent on, or ``None``. - - .. versionadded:: 4.7 - """ - return self.__server_conn_id - - -class CommandStartedEvent(_CommandEvent): - """Event published when a command starts. - - :param command: The command document. - :param database_name: The name of the database this command was run against. - :param request_id: The request id for this operation. - :param connection_id: The address (host, port) of the server this command - was sent to. - :param operation_id: An optional identifier for a series of related events. - :param service_id: The service_id this command was sent to, or ``None``. - """ - - __slots__ = ("__cmd",) - - def __init__( - self, - command: _DocumentOut, - database_name: str, - request_id: int, - connection_id: _Address, - operation_id: Optional[int], - service_id: Optional[ObjectId] = None, - server_connection_id: Optional[int] = None, - ) -> None: - if not command: - raise ValueError(f"{command!r} is not a valid command") - # Command name must be first key. - command_name = next(iter(command)) - super().__init__( - command_name, - request_id, - connection_id, - operation_id, - service_id=service_id, - database_name=database_name, - server_connection_id=server_connection_id, - ) - cmd_name = command_name.lower() - if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): - self.__cmd: _DocumentOut = {} - else: - self.__cmd = command - - @property - def command(self) -> _DocumentOut: - """The command document.""" - return self.__cmd - - @property - def database_name(self) -> str: - """The name of the database this command was run against.""" - return super().database_name - - def __repr__(self) -> str: - return ( - "<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}, server_connection_id: {}>" - ).format( - self.__class__.__name__, - self.connection_id, - self.database_name, - self.command_name, - self.operation_id, - self.service_id, - self.server_connection_id, - ) - - -class CommandSucceededEvent(_CommandEvent): - """Event published when a command succeeds. - - :param duration: The command duration as a datetime.timedelta. - :param reply: The server reply document. - :param command_name: The command name. - :param request_id: The request id for this operation. - :param connection_id: The address (host, port) of the server this command - was sent to. - :param operation_id: An optional identifier for a series of related events. - :param service_id: The service_id this command was sent to, or ``None``. - :param database_name: The database this command was sent to, or ``""``. - """ - - __slots__ = ("__duration_micros", "__reply") - - def __init__( - self, - duration: datetime.timedelta, - reply: _DocumentOut, - command_name: str, - request_id: int, - connection_id: _Address, - operation_id: Optional[int], - service_id: Optional[ObjectId] = None, - database_name: str = "", - server_connection_id: Optional[int] = None, - ) -> None: - super().__init__( - command_name, - request_id, - connection_id, - operation_id, - service_id=service_id, - database_name=database_name, - server_connection_id=server_connection_id, - ) - self.__duration_micros = _to_micros(duration) - cmd_name = command_name.lower() - if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): - self.__reply: _DocumentOut = {} - else: - self.__reply = reply - - @property - def duration_micros(self) -> int: - """The duration of this operation in microseconds.""" - return self.__duration_micros - - @property - def reply(self) -> _DocumentOut: - """The server failure document for this operation.""" - return self.__reply - - def __repr__(self) -> str: - return ( - "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}, server_connection_id: {}>" - ).format( - self.__class__.__name__, - self.connection_id, - self.database_name, - self.command_name, - self.operation_id, - self.duration_micros, - self.service_id, - self.server_connection_id, - ) - - -class CommandFailedEvent(_CommandEvent): - """Event published when a command fails. - - :param duration: The command duration as a datetime.timedelta. - :param failure: The server reply document. - :param command_name: The command name. - :param request_id: The request id for this operation. - :param connection_id: The address (host, port) of the server this command - was sent to. - :param operation_id: An optional identifier for a series of related events. - :param service_id: The service_id this command was sent to, or ``None``. - :param database_name: The database this command was sent to, or ``""``. - """ - - __slots__ = ("__duration_micros", "__failure") - - def __init__( - self, - duration: datetime.timedelta, - failure: _DocumentOut, - command_name: str, - request_id: int, - connection_id: _Address, - operation_id: Optional[int], - service_id: Optional[ObjectId] = None, - database_name: str = "", - server_connection_id: Optional[int] = None, - ) -> None: - super().__init__( - command_name, - request_id, - connection_id, - operation_id, - service_id=service_id, - database_name=database_name, - server_connection_id=server_connection_id, - ) - self.__duration_micros = _to_micros(duration) - self.__failure = failure - - @property - def duration_micros(self) -> int: - """The duration of this operation in microseconds.""" - return self.__duration_micros - - @property - def failure(self) -> _DocumentOut: - """The server failure document for this operation.""" - return self.__failure - - def __repr__(self) -> str: - return ( - "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " - "failure: {!r}, service_id: {}, server_connection_id: {}>" - ).format( - self.__class__.__name__, - self.connection_id, - self.database_name, - self.command_name, - self.operation_id, - self.duration_micros, - self.failure, - self.service_id, - self.server_connection_id, - ) - - -class _PoolEvent: - """Base class for pool events.""" - - __slots__ = ("__address",) - - def __init__(self, address: _Address) -> None: - self.__address = address - - @property - def address(self) -> _Address: - """The address (host, port) pair of the server the pool is attempting - to connect to. - """ - return self.__address - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.__address!r})" - - -class PoolCreatedEvent(_PoolEvent): - """Published when a Connection Pool is created. - - :param address: The address (host, port) pair of the server this Pool is - attempting to connect to. - - .. versionadded:: 3.9 - """ - - __slots__ = ("__options",) - - def __init__(self, address: _Address, options: dict[str, Any]) -> None: - super().__init__(address) - self.__options = options - - @property - def options(self) -> dict[str, Any]: - """Any non-default pool options that were set on this Connection Pool.""" - return self.__options - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" - - -class PoolReadyEvent(_PoolEvent): - """Published when a Connection Pool is marked ready. - - :param address: The address (host, port) pair of the server this Pool is - attempting to connect to. - - .. versionadded:: 4.0 - """ - - __slots__ = () - - -class PoolClearedEvent(_PoolEvent): - """Published when a Connection Pool is cleared. - - :param address: The address (host, port) pair of the server this Pool is - attempting to connect to. - :param service_id: The service_id this command was sent to, or ``None``. - :param interrupt_connections: True if all active connections were interrupted by the Pool during clearing. - - .. versionadded:: 3.9 - """ - - __slots__ = ("__service_id", "__interrupt_connections") - - def __init__( - self, - address: _Address, - service_id: Optional[ObjectId] = None, - interrupt_connections: bool = False, - ) -> None: - super().__init__(address) - self.__service_id = service_id - self.__interrupt_connections = interrupt_connections - - @property - def service_id(self) -> Optional[ObjectId]: - """Connections with this service_id are cleared. - - When service_id is ``None``, all connections in the pool are cleared. - - .. versionadded:: 3.12 - """ - return self.__service_id - - @property - def interrupt_connections(self) -> bool: - """If True, active connections are interrupted during clearing. - - .. versionadded:: 4.7 - """ - return self.__interrupt_connections - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r}, {self.__interrupt_connections!r})" - - -class PoolClosedEvent(_PoolEvent): - """Published when a Connection Pool is closed. - - :param address: The address (host, port) pair of the server this Pool is - attempting to connect to. - - .. versionadded:: 3.9 - """ - - __slots__ = () - - -class ConnectionClosedReason: - """An enum that defines values for `reason` on a - :class:`ConnectionClosedEvent`. - - .. versionadded:: 3.9 - """ - - STALE = "stale" - """The pool was cleared, making the connection no longer valid.""" - - IDLE = "idle" - """The connection became stale by being idle for too long (maxIdleTimeMS). - """ - - ERROR = "error" - """The connection experienced an error, making it no longer valid.""" - - POOL_CLOSED = "poolClosed" - """The pool was closed, making the connection no longer valid.""" - - -class ConnectionCheckOutFailedReason: - """An enum that defines values for `reason` on a - :class:`ConnectionCheckOutFailedEvent`. - - .. versionadded:: 3.9 - """ - - TIMEOUT = "timeout" - """The connection check out attempt exceeded the specified timeout.""" - - POOL_CLOSED = "poolClosed" - """The pool was previously closed, and cannot provide new connections.""" - - CONN_ERROR = "connectionError" - """The connection check out attempt experienced an error while setting up - a new connection. - """ - - -class _ConnectionEvent: - """Private base class for connection events.""" - - __slots__ = ("__address",) - - def __init__(self, address: _Address) -> None: - self.__address = address - - @property - def address(self) -> _Address: - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.__address!r})" - - -class _ConnectionIdEvent(_ConnectionEvent): - """Private base class for connection events with an id.""" - - __slots__ = ("__connection_id",) - - def __init__(self, address: _Address, connection_id: int) -> None: - super().__init__(address) - self.__connection_id = connection_id - - @property - def connection_id(self) -> int: - """The ID of the connection.""" - return self.__connection_id - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" - - -class _ConnectionDurationEvent(_ConnectionIdEvent): - """Private base class for connection events with a duration.""" - - __slots__ = ("__duration",) - - def __init__(self, address: _Address, connection_id: int, duration: Optional[float]) -> None: - super().__init__(address, connection_id) - self.__duration = duration - - @property - def duration(self) -> Optional[float]: - """The duration of the connection event. - - .. versionadded:: 4.7 - """ - return self.__duration - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.connection_id!r}, {self.__duration!r})" - - -class ConnectionCreatedEvent(_ConnectionIdEvent): - """Published when a Connection Pool creates a Connection object. - - NOTE: This connection is not ready for use until the - :class:`ConnectionReadyEvent` is published. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - :param connection_id: The integer ID of the Connection in this Pool. - - .. versionadded:: 3.9 - """ - - __slots__ = () - - -class ConnectionReadyEvent(_ConnectionDurationEvent): - """Published when a Connection has finished its setup, and is ready to use. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - :param connection_id: The integer ID of the Connection in this Pool. - - .. versionadded:: 3.9 - """ - - __slots__ = () - - -class ConnectionClosedEvent(_ConnectionIdEvent): - """Published when a Connection is closed. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - :param connection_id: The integer ID of the Connection in this Pool. - :param reason: A reason explaining why this connection was closed. - - .. versionadded:: 3.9 - """ - - __slots__ = ("__reason",) - - def __init__(self, address: _Address, connection_id: int, reason: str): - super().__init__(address, connection_id) - self.__reason = reason - - @property - def reason(self) -> str: - """A reason explaining why this connection was closed. - - The reason must be one of the strings from the - :class:`ConnectionClosedReason` enum. - """ - return self.__reason - - def __repr__(self) -> str: - return "{}({!r}, {!r}, {!r})".format( - self.__class__.__name__, - self.address, - self.connection_id, - self.__reason, - ) - - -class ConnectionCheckOutStartedEvent(_ConnectionEvent): - """Published when the driver starts attempting to check out a connection. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - - .. versionadded:: 3.9 - """ - - __slots__ = () - - -class ConnectionCheckOutFailedEvent(_ConnectionDurationEvent): - """Published when the driver's attempt to check out a connection fails. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - :param reason: A reason explaining why connection check out failed. - - .. versionadded:: 3.9 - """ - - __slots__ = ("__reason",) - - def __init__(self, address: _Address, reason: str, duration: Optional[float]) -> None: - super().__init__(address=address, connection_id=0, duration=duration) - self.__reason = reason - - @property - def reason(self) -> str: - """A reason explaining why connection check out failed. - - The reason must be one of the strings from the - :class:`ConnectionCheckOutFailedReason` enum. - """ - return self.__reason - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r}, {self.duration!r})" - - -class ConnectionCheckedOutEvent(_ConnectionDurationEvent): - """Published when the driver successfully checks out a connection. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - :param connection_id: The integer ID of the Connection in this Pool. - - .. versionadded:: 3.9 - """ - - __slots__ = () - - -class ConnectionCheckedInEvent(_ConnectionIdEvent): - """Published when the driver checks in a Connection into the Pool. - - :param address: The address (host, port) pair of the server this - Connection is attempting to connect to. - :param connection_id: The integer ID of the Connection in this Pool. - - .. versionadded:: 3.9 - """ - - __slots__ = () - - -class _ServerEvent: - """Base class for server events.""" - - __slots__ = ("__server_address", "__topology_id") - - def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: - self.__server_address = server_address - self.__topology_id = topology_id - - @property - def server_address(self) -> _Address: - """The address (host, port) pair of the server""" - return self.__server_address - - @property - def topology_id(self) -> ObjectId: - """A unique identifier for the topology this server is a part of.""" - return self.__topology_id - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.server_address} topology_id: {self.topology_id}>" - - -class ServerDescriptionChangedEvent(_ServerEvent): - """Published when server description changes. - - .. versionadded:: 3.3 - """ - - __slots__ = ("__previous_description", "__new_description") - - def __init__( - self, - previous_description: ServerDescription, - new_description: ServerDescription, - *args: Any, - ) -> None: - super().__init__(*args) - self.__previous_description = previous_description - self.__new_description = new_description - - @property - def previous_description(self) -> ServerDescription: - """The previous - :class:`~pymongo.server_description.ServerDescription`. - """ - return self.__previous_description - - @property - def new_description(self) -> ServerDescription: - """The new - :class:`~pymongo.server_description.ServerDescription`. - """ - return self.__new_description - - def __repr__(self) -> str: - return "<{} {} changed from: {}, to: {}>".format( - self.__class__.__name__, - self.server_address, - self.previous_description, - self.new_description, - ) - - -class ServerOpeningEvent(_ServerEvent): - """Published when server is initialized. - - .. versionadded:: 3.3 - """ - - __slots__ = () - - -class ServerClosedEvent(_ServerEvent): - """Published when server is closed. - - .. versionadded:: 3.3 - """ - - __slots__ = () - - -class TopologyEvent: - """Base class for topology description events.""" - - __slots__ = ("__topology_id",) - - def __init__(self, topology_id: ObjectId) -> None: - self.__topology_id = topology_id - - @property - def topology_id(self) -> ObjectId: - """A unique identifier for the topology this server is a part of.""" - return self.__topology_id - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" - - -class TopologyDescriptionChangedEvent(TopologyEvent): - """Published when the topology description changes. - - .. versionadded:: 3.3 - """ - - __slots__ = ("__previous_description", "__new_description") - - def __init__( - self, - previous_description: TopologyDescription, - new_description: TopologyDescription, - *args: Any, - ) -> None: - super().__init__(*args) - self.__previous_description = previous_description - self.__new_description = new_description - - @property - def previous_description(self) -> TopologyDescription: - """The previous - :class:`~pymongo.topology_description.TopologyDescription`. - """ - return self.__previous_description - - @property - def new_description(self) -> TopologyDescription: - """The new - :class:`~pymongo.topology_description.TopologyDescription`. - """ - return self.__new_description - - def __repr__(self) -> str: - return "<{} topology_id: {} changed from: {}, to: {}>".format( - self.__class__.__name__, - self.topology_id, - self.previous_description, - self.new_description, - ) - - -class TopologyOpenedEvent(TopologyEvent): - """Published when the topology is initialized. - - .. versionadded:: 3.3 - """ - - __slots__ = () - - -class TopologyClosedEvent(TopologyEvent): - """Published when the topology is closed. - - .. versionadded:: 3.3 - """ - - __slots__ = () - - -class _ServerHeartbeatEvent: - """Base class for server heartbeat events.""" - - __slots__ = ("__connection_id", "__awaited") - - def __init__(self, connection_id: _Address, awaited: bool = False) -> None: - self.__connection_id = connection_id - self.__awaited = awaited - - @property - def connection_id(self) -> _Address: - """The address (host, port) of the server this heartbeat was sent - to. - """ - return self.__connection_id - - @property - def awaited(self) -> bool: - """Whether the heartbeat was issued as an awaitable hello command. - - .. versionadded:: 4.6 - """ - return self.__awaited - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} {self.connection_id} awaited: {self.awaited}>" - - -class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): - """Published when a heartbeat is started. - - .. versionadded:: 3.3 - """ - - __slots__ = () - - -class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): - """Fired when the server heartbeat succeeds. - - .. versionadded:: 3.3 - """ - - __slots__ = ("__duration", "__reply") - - def __init__( - self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False - ) -> None: - super().__init__(connection_id, awaited) - self.__duration = duration - self.__reply = reply - - @property - def duration(self) -> float: - """The duration of this heartbeat in microseconds.""" - return self.__duration - - @property - def reply(self) -> Hello: - """An instance of :class:`~pymongo.hello.Hello`.""" - return self.__reply - - @property - def awaited(self) -> bool: - """Whether the heartbeat was awaited. - - If true, then :meth:`duration` reflects the sum of the round trip time - to the server and the time that the server waited before sending a - response. - - .. versionadded:: 3.11 - """ - return super().awaited - - def __repr__(self) -> str: - return "<{} {} duration: {}, awaited: {}, reply: {}>".format( - self.__class__.__name__, - self.connection_id, - self.duration, - self.awaited, - self.reply, - ) - - -class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): - """Fired when the server heartbeat fails, either with an "ok: 0" - or a socket exception. - - .. versionadded:: 3.3 - """ - - __slots__ = ("__duration", "__reply") - - def __init__( - self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False - ) -> None: - super().__init__(connection_id, awaited) - self.__duration = duration - self.__reply = reply - - @property - def duration(self) -> float: - """The duration of this heartbeat in microseconds.""" - return self.__duration - - @property - def reply(self) -> Exception: - """A subclass of :exc:`Exception`.""" - return self.__reply - - @property - def awaited(self) -> bool: - """Whether the heartbeat was awaited. - - If true, then :meth:`duration` reflects the sum of the round trip time - to the server and the time that the server waited before sending a - response. - - .. versionadded:: 3.11 - """ - return super().awaited - - def __repr__(self) -> str: - return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( - self.__class__.__name__, - self.connection_id, - self.duration, - self.awaited, - self.reply, - ) - - -class _EventListeners: - """Configure event listeners for a client instance. - - Any event listeners registered globally are included by default. - - :param listeners: A list of event listeners. - """ - - def __init__(self, listeners: Optional[Sequence[_EventListener]]): - self.__command_listeners = _LISTENERS.command_listeners[:] - self.__server_listeners = _LISTENERS.server_listeners[:] - lst = _LISTENERS.server_heartbeat_listeners - self.__server_heartbeat_listeners = lst[:] - self.__topology_listeners = _LISTENERS.topology_listeners[:] - self.__cmap_listeners = _LISTENERS.cmap_listeners[:] - if listeners is not None: - for lst in listeners: - if isinstance(lst, CommandListener): - self.__command_listeners.append(lst) - if isinstance(lst, ServerListener): - self.__server_listeners.append(lst) - if isinstance(lst, ServerHeartbeatListener): - self.__server_heartbeat_listeners.append(lst) - if isinstance(lst, TopologyListener): - self.__topology_listeners.append(lst) - if isinstance(lst, ConnectionPoolListener): - self.__cmap_listeners.append(lst) - self.__enabled_for_commands = bool(self.__command_listeners) - self.__enabled_for_server = bool(self.__server_listeners) - self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) - self.__enabled_for_topology = bool(self.__topology_listeners) - self.__enabled_for_cmap = bool(self.__cmap_listeners) - - @property - def enabled_for_commands(self) -> bool: - """Are any CommandListener instances registered?""" - return self.__enabled_for_commands - - @property - def enabled_for_server(self) -> bool: - """Are any ServerListener instances registered?""" - return self.__enabled_for_server - - @property - def enabled_for_server_heartbeat(self) -> bool: - """Are any ServerHeartbeatListener instances registered?""" - return self.__enabled_for_server_heartbeat - - @property - def enabled_for_topology(self) -> bool: - """Are any TopologyListener instances registered?""" - return self.__enabled_for_topology - - @property - def enabled_for_cmap(self) -> bool: - """Are any ConnectionPoolListener instances registered?""" - return self.__enabled_for_cmap - - def event_listeners(self) -> list[_EventListeners]: - """List of registered event listeners.""" - return ( - self.__command_listeners - + self.__server_heartbeat_listeners - + self.__server_listeners - + self.__topology_listeners - + self.__cmap_listeners - ) - - def publish_command_start( - self, - command: _DocumentOut, - database_name: str, - request_id: int, - connection_id: _Address, - server_connection_id: Optional[int], - op_id: Optional[int] = None, - service_id: Optional[ObjectId] = None, - ) -> None: - """Publish a CommandStartedEvent to all command listeners. - - :param command: The command document. - :param database_name: The name of the database this command was run - against. - :param request_id: The request id for this operation. - :param connection_id: The address (host, port) of the server this - command was sent to. - :param op_id: The (optional) operation id for this operation. - :param service_id: The service_id this command was sent to, or ``None``. - """ - if op_id is None: - op_id = request_id - event = CommandStartedEvent( - command, - database_name, - request_id, - connection_id, - op_id, - service_id=service_id, - server_connection_id=server_connection_id, - ) - for subscriber in self.__command_listeners: - try: - subscriber.started(event) - except Exception: - _handle_exception() - - def publish_command_success( - self, - duration: timedelta, - reply: _DocumentOut, - command_name: str, - request_id: int, - connection_id: _Address, - server_connection_id: Optional[int], - op_id: Optional[int] = None, - service_id: Optional[ObjectId] = None, - speculative_hello: bool = False, - database_name: str = "", - ) -> None: - """Publish a CommandSucceededEvent to all command listeners. - - :param duration: The command duration as a datetime.timedelta. - :param reply: The server reply document. - :param command_name: The command name. - :param request_id: The request id for this operation. - :param connection_id: The address (host, port) of the server this - command was sent to. - :param op_id: The (optional) operation id for this operation. - :param service_id: The service_id this command was sent to, or ``None``. - :param speculative_hello: Was the command sent with speculative auth? - :param database_name: The database this command was sent to, or ``""``. - """ - if op_id is None: - op_id = request_id - if speculative_hello: - # Redact entire response when the command started contained - # speculativeAuthenticate. - reply = {} - event = CommandSucceededEvent( - duration, - reply, - command_name, - request_id, - connection_id, - op_id, - service_id, - database_name=database_name, - server_connection_id=server_connection_id, - ) - for subscriber in self.__command_listeners: - try: - subscriber.succeeded(event) - except Exception: - _handle_exception() - - def publish_command_failure( - self, - duration: timedelta, - failure: _DocumentOut, - command_name: str, - request_id: int, - connection_id: _Address, - server_connection_id: Optional[int], - op_id: Optional[int] = None, - service_id: Optional[ObjectId] = None, - database_name: str = "", - ) -> None: - """Publish a CommandFailedEvent to all command listeners. - - :param duration: The command duration as a datetime.timedelta. - :param failure: The server reply document or failure description - document. - :param command_name: The command name. - :param request_id: The request id for this operation. - :param connection_id: The address (host, port) of the server this - command was sent to. - :param op_id: The (optional) operation id for this operation. - :param service_id: The service_id this command was sent to, or ``None``. - :param database_name: The database this command was sent to, or ``""``. - """ - if op_id is None: - op_id = request_id - event = CommandFailedEvent( - duration, - failure, - command_name, - request_id, - connection_id, - op_id, - service_id=service_id, - database_name=database_name, - server_connection_id=server_connection_id, - ) - for subscriber in self.__command_listeners: - try: - subscriber.failed(event) - except Exception: - _handle_exception() - - def publish_server_heartbeat_started(self, connection_id: _Address, awaited: bool) -> None: - """Publish a ServerHeartbeatStartedEvent to all server heartbeat - listeners. - - :param connection_id: The address (host, port) pair of the connection. - :param awaited: True if this heartbeat is part of an awaitable hello command. - """ - event = ServerHeartbeatStartedEvent(connection_id, awaited) - for subscriber in self.__server_heartbeat_listeners: - try: - subscriber.started(event) - except Exception: - _handle_exception() - - def publish_server_heartbeat_succeeded( - self, connection_id: _Address, duration: float, reply: Hello, awaited: bool - ) -> None: - """Publish a ServerHeartbeatSucceededEvent to all server heartbeat - listeners. - - :param connection_id: The address (host, port) pair of the connection. - :param duration: The execution time of the event in the highest possible - resolution for the platform. - :param reply: The command reply. - :param awaited: True if the response was awaited. - """ - event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) - for subscriber in self.__server_heartbeat_listeners: - try: - subscriber.succeeded(event) - except Exception: - _handle_exception() - - def publish_server_heartbeat_failed( - self, connection_id: _Address, duration: float, reply: Exception, awaited: bool - ) -> None: - """Publish a ServerHeartbeatFailedEvent to all server heartbeat - listeners. - - :param connection_id: The address (host, port) pair of the connection. - :param duration: The execution time of the event in the highest possible - resolution for the platform. - :param reply: The command reply. - :param awaited: True if the response was awaited. - """ - event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) - for subscriber in self.__server_heartbeat_listeners: - try: - subscriber.failed(event) - except Exception: - _handle_exception() - - def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: - """Publish a ServerOpeningEvent to all server listeners. - - :param server_address: The address (host, port) pair of the server. - :param topology_id: A unique identifier for the topology this server - is a part of. - """ - event = ServerOpeningEvent(server_address, topology_id) - for subscriber in self.__server_listeners: - try: - subscriber.opened(event) - except Exception: - _handle_exception() - - def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: - """Publish a ServerClosedEvent to all server listeners. - - :param server_address: The address (host, port) pair of the server. - :param topology_id: A unique identifier for the topology this server - is a part of. - """ - event = ServerClosedEvent(server_address, topology_id) - for subscriber in self.__server_listeners: - try: - subscriber.closed(event) - except Exception: - _handle_exception() - - def publish_server_description_changed( - self, - previous_description: ServerDescription, - new_description: ServerDescription, - server_address: _Address, - topology_id: ObjectId, - ) -> None: - """Publish a ServerDescriptionChangedEvent to all server listeners. - - :param previous_description: The previous server description. - :param server_address: The address (host, port) pair of the server. - :param new_description: The new server description. - :param topology_id: A unique identifier for the topology this server - is a part of. - """ - event = ServerDescriptionChangedEvent( - previous_description, new_description, server_address, topology_id - ) - for subscriber in self.__server_listeners: - try: - subscriber.description_changed(event) - except Exception: - _handle_exception() - - def publish_topology_opened(self, topology_id: ObjectId) -> None: - """Publish a TopologyOpenedEvent to all topology listeners. - - :param topology_id: A unique identifier for the topology this server - is a part of. - """ - event = TopologyOpenedEvent(topology_id) - for subscriber in self.__topology_listeners: - try: - subscriber.opened(event) - except Exception: - _handle_exception() - - def publish_topology_closed(self, topology_id: ObjectId) -> None: - """Publish a TopologyClosedEvent to all topology listeners. - - :param topology_id: A unique identifier for the topology this server - is a part of. - """ - event = TopologyClosedEvent(topology_id) - for subscriber in self.__topology_listeners: - try: - subscriber.closed(event) - except Exception: - _handle_exception() - - def publish_topology_description_changed( - self, - previous_description: TopologyDescription, - new_description: TopologyDescription, - topology_id: ObjectId, - ) -> None: - """Publish a TopologyDescriptionChangedEvent to all topology listeners. - - :param previous_description: The previous topology description. - :param new_description: The new topology description. - :param topology_id: A unique identifier for the topology this server - is a part of. - """ - event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) - for subscriber in self.__topology_listeners: - try: - subscriber.description_changed(event) - except Exception: - _handle_exception() - - def publish_pool_created(self, address: _Address, options: dict[str, Any]) -> None: - """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" - event = PoolCreatedEvent(address, options) - for subscriber in self.__cmap_listeners: - try: - subscriber.pool_created(event) - except Exception: - _handle_exception() - - def publish_pool_ready(self, address: _Address) -> None: - """Publish a :class:`PoolReadyEvent` to all pool listeners.""" - event = PoolReadyEvent(address) - for subscriber in self.__cmap_listeners: - try: - subscriber.pool_ready(event) - except Exception: - _handle_exception() - - def publish_pool_cleared( - self, - address: _Address, - service_id: Optional[ObjectId], - interrupt_connections: bool = False, - ) -> None: - """Publish a :class:`PoolClearedEvent` to all pool listeners.""" - event = PoolClearedEvent(address, service_id, interrupt_connections) - for subscriber in self.__cmap_listeners: - try: - subscriber.pool_cleared(event) - except Exception: - _handle_exception() - - def publish_pool_closed(self, address: _Address) -> None: - """Publish a :class:`PoolClosedEvent` to all pool listeners.""" - event = PoolClosedEvent(address) - for subscriber in self.__cmap_listeners: - try: - subscriber.pool_closed(event) - except Exception: - _handle_exception() - - def publish_connection_created(self, address: _Address, connection_id: int) -> None: - """Publish a :class:`ConnectionCreatedEvent` to all connection - listeners. - """ - event = ConnectionCreatedEvent(address, connection_id) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_created(event) - except Exception: - _handle_exception() - - def publish_connection_ready( - self, address: _Address, connection_id: int, duration: float - ) -> None: - """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" - event = ConnectionReadyEvent(address, connection_id, duration) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_ready(event) - except Exception: - _handle_exception() - - def publish_connection_closed(self, address: _Address, connection_id: int, reason: str) -> None: - """Publish a :class:`ConnectionClosedEvent` to all connection - listeners. - """ - event = ConnectionClosedEvent(address, connection_id, reason) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_closed(event) - except Exception: - _handle_exception() - - def publish_connection_check_out_started(self, address: _Address) -> None: - """Publish a :class:`ConnectionCheckOutStartedEvent` to all connection - listeners. - """ - event = ConnectionCheckOutStartedEvent(address) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_check_out_started(event) - except Exception: - _handle_exception() - - def publish_connection_check_out_failed( - self, address: _Address, reason: str, duration: float - ) -> None: - """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection - listeners. - """ - event = ConnectionCheckOutFailedEvent(address, reason, duration) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_check_out_failed(event) - except Exception: - _handle_exception() - - def publish_connection_checked_out( - self, address: _Address, connection_id: int, duration: float - ) -> None: - """Publish a :class:`ConnectionCheckedOutEvent` to all connection - listeners. - """ - event = ConnectionCheckedOutEvent(address, connection_id, duration) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_checked_out(event) - except Exception: - _handle_exception() +from pymongo.synchronous.monitoring import * # noqa: F403 +from pymongo.synchronous.monitoring import __doc__ as original_doc - def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: - """Publish a :class:`ConnectionCheckedInEvent` to all connection - listeners. - """ - event = ConnectionCheckedInEvent(address, connection_id) - for subscriber in self.__cmap_listeners: - try: - subscriber.connection_checked_in(event) - except Exception: - _handle_exception() +__doc__ = original_doc diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py new file mode 100644 index 0000000000..6087b1aa8d --- /dev/null +++ b/pymongo/network_layer.py @@ -0,0 +1,49 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import asyncio +import socket +import struct +from typing import ( + TYPE_CHECKING, + Union, +) + +from pymongo import ssl_support + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import _sslConn + +_UNPACK_HEADER = struct.Struct(" None: + timeout = socket.gettimeout() + socket.settimeout(0.0) + loop = asyncio.get_event_loop() + try: + await asyncio.wait_for(loop.sock_sendall(socket, buf), timeout=timeout) # type: ignore[arg-type] + finally: + socket.settimeout(timeout) + + +def sendall(socket: Union[socket.socket, _sslConn], buf: bytes) -> None: + socket.sendall(buf) diff --git a/pymongo/operations.py b/pymongo/operations.py index 4872afa911..dbfc048a60 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -1,4 +1,4 @@ -# Copyright 2015-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,612 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Operation class definitions.""" +"""Re-import of synchronous Operations API for compatibility.""" from __future__ import annotations -import enum -from typing import ( - TYPE_CHECKING, - Any, - Generic, - Mapping, - Optional, - Sequence, - Tuple, - Union, -) +from pymongo.synchronous.operations import * # noqa: F403 +from pymongo.synchronous.operations import __doc__ as original_doc -from bson.raw_bson import RawBSONDocument -from pymongo import helpers -from pymongo.collation import validate_collation_or_none -from pymongo.common import validate_is_mapping, validate_list -from pymongo.helpers import _gen_index_name, _index_document, _index_list -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline -from pymongo.write_concern import validate_boolean - -if TYPE_CHECKING: - from pymongo.bulk import _Bulk - -# Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary -_IndexList = Union[ - Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] -] -_IndexKeyHint = Union[str, _IndexList] - - -class _Op(str, enum.Enum): - ABORT = "abortTransaction" - AGGREGATE = "aggregate" - COMMIT = "commitTransaction" - COUNT = "count" - CREATE = "create" - CREATE_INDEXES = "createIndexes" - CREATE_SEARCH_INDEXES = "createSearchIndexes" - DELETE = "delete" - DISTINCT = "distinct" - DROP = "drop" - DROP_DATABASE = "dropDatabase" - DROP_INDEXES = "dropIndexes" - DROP_SEARCH_INDEXES = "dropSearchIndexes" - END_SESSIONS = "endSessions" - FIND_AND_MODIFY = "findAndModify" - FIND = "find" - INSERT = "insert" - LIST_COLLECTIONS = "listCollections" - LIST_INDEXES = "listIndexes" - LIST_SEARCH_INDEX = "listSearchIndexes" - LIST_DATABASES = "listDatabases" - UPDATE = "update" - UPDATE_INDEX = "updateIndex" - UPDATE_SEARCH_INDEX = "updateSearchIndex" - RENAME = "rename" - GETMORE = "getMore" - KILL_CURSORS = "killCursors" - TEST = "testOperation" - - -class InsertOne(Generic[_DocumentType]): - """Represents an insert_one operation.""" - - __slots__ = ("_doc",) - - def __init__(self, document: _DocumentType) -> None: - """Create an InsertOne instance. - - For use with :meth:`~pymongo.collection.Collection.bulk_write`. - - :param document: The document to insert. If the document is missing an - _id field one will be added. - """ - self._doc = document - - def _add_to_bulk(self, bulkobj: _Bulk) -> None: - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_insert(self._doc) # type: ignore[arg-type] - - def __repr__(self) -> str: - return f"InsertOne({self._doc!r})" - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return other._doc == self._doc - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - -class DeleteOne: - """Represents a delete_one operation.""" - - __slots__ = ("_filter", "_collation", "_hint") - - def __init__( - self, - filter: Mapping[str, Any], - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - ) -> None: - """Create a DeleteOne instance. - - For use with :meth:`~pymongo.collection.Collection.bulk_write`. - - :param filter: A query that matches the document to delete. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.5 - Added the `collation` option. - """ - if filter is not None: - validate_is_mapping("filter", filter) - if hint is not None and not isinstance(hint, str): - self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) - else: - self._hint = hint - self._filter = filter - self._collation = collation - - def _add_to_bulk(self, bulkobj: _Bulk) -> None: - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete( - self._filter, - 1, - collation=validate_collation_or_none(self._collation), - hint=self._hint, - ) - - def __repr__(self) -> str: - return f"DeleteOne({self._filter!r}, {self._collation!r}, {self._hint!r})" - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return (other._filter, other._collation, other._hint) == ( - self._filter, - self._collation, - self._hint, - ) - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - -class DeleteMany: - """Represents a delete_many operation.""" - - __slots__ = ("_filter", "_collation", "_hint") - - def __init__( - self, - filter: Mapping[str, Any], - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - ) -> None: - """Create a DeleteMany instance. - - For use with :meth:`~pymongo.collection.Collection.bulk_write`. - - :param filter: A query that matches the documents to delete. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.5 - Added the `collation` option. - """ - if filter is not None: - validate_is_mapping("filter", filter) - if hint is not None and not isinstance(hint, str): - self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) - else: - self._hint = hint - self._filter = filter - self._collation = collation - - def _add_to_bulk(self, bulkobj: _Bulk) -> None: - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete( - self._filter, - 0, - collation=validate_collation_or_none(self._collation), - hint=self._hint, - ) - - def __repr__(self) -> str: - return f"DeleteMany({self._filter!r}, {self._collation!r}, {self._hint!r})" - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return (other._filter, other._collation, other._hint) == ( - self._filter, - self._collation, - self._hint, - ) - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - -class ReplaceOne(Generic[_DocumentType]): - """Represents a replace_one operation.""" - - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") - - def __init__( - self, - filter: Mapping[str, Any], - replacement: Union[_DocumentType, RawBSONDocument], - upsert: bool = False, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - ) -> None: - """Create a ReplaceOne instance. - - For use with :meth:`~pymongo.collection.Collection.bulk_write`. - - :param filter: A query that matches the document to replace. - :param replacement: The new document. - :param upsert: If ``True``, perform an insert if no documents - match the filter. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.5 - Added the ``collation`` option. - """ - if filter is not None: - validate_is_mapping("filter", filter) - if upsert is not None: - validate_boolean("upsert", upsert) - if hint is not None and not isinstance(hint, str): - self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) - else: - self._hint = hint - self._filter = filter - self._doc = replacement - self._upsert = upsert - self._collation = collation - - def _add_to_bulk(self, bulkobj: _Bulk) -> None: - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_replace( - self._filter, - self._doc, - self._upsert, - collation=validate_collation_or_none(self._collation), - hint=self._hint, - ) - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return ( - other._filter, - other._doc, - other._upsert, - other._collation, - other._hint, - ) == ( - self._filter, - self._doc, - self._upsert, - self._collation, - other._hint, - ) - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __repr__(self) -> str: - return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( - self.__class__.__name__, - self._filter, - self._doc, - self._upsert, - self._collation, - self._hint, - ) - - -class _UpdateOp: - """Private base class for update operations.""" - - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") - - def __init__( - self, - filter: Mapping[str, Any], - doc: Union[Mapping[str, Any], _Pipeline], - upsert: bool, - collation: Optional[_CollationIn], - array_filters: Optional[list[Mapping[str, Any]]], - hint: Optional[_IndexKeyHint], - ): - if filter is not None: - validate_is_mapping("filter", filter) - if upsert is not None: - validate_boolean("upsert", upsert) - if array_filters is not None: - validate_list("array_filters", array_filters) - if hint is not None and not isinstance(hint, str): - self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) - else: - self._hint = hint - - self._filter = filter - self._doc = doc - self._upsert = upsert - self._collation = collation - self._array_filters = array_filters - - def __eq__(self, other: object) -> bool: - if isinstance(other, type(self)): - return ( - other._filter, - other._doc, - other._upsert, - other._collation, - other._array_filters, - other._hint, - ) == ( - self._filter, - self._doc, - self._upsert, - self._collation, - self._array_filters, - self._hint, - ) - return NotImplemented - - def __repr__(self) -> str: - return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( - self.__class__.__name__, - self._filter, - self._doc, - self._upsert, - self._collation, - self._array_filters, - self._hint, - ) - - -class UpdateOne(_UpdateOp): - """Represents an update_one operation.""" - - __slots__ = () - - def __init__( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - collation: Optional[_CollationIn] = None, - array_filters: Optional[list[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - ) -> None: - """Represents an update_one operation. - - For use with :meth:`~pymongo.collection.Collection.bulk_write`. - - :param filter: A query that matches the document to update. - :param update: The modifications to apply. - :param upsert: If ``True``, perform an insert if no documents - match the filter. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param array_filters: A list of filters specifying which - array elements an update should apply. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - - .. versionchanged:: 3.11 - Added the `hint` option. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. - .. versionchanged:: 3.6 - Added the `array_filters` option. - .. versionchanged:: 3.5 - Added the `collation` option. - """ - super().__init__(filter, update, upsert, collation, array_filters, hint) - - def _add_to_bulk(self, bulkobj: _Bulk) -> None: - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update( - self._filter, - self._doc, - False, - self._upsert, - collation=validate_collation_or_none(self._collation), - array_filters=self._array_filters, - hint=self._hint, - ) - - -class UpdateMany(_UpdateOp): - """Represents an update_many operation.""" - - __slots__ = () - - def __init__( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - collation: Optional[_CollationIn] = None, - array_filters: Optional[list[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - ) -> None: - """Create an UpdateMany instance. - - For use with :meth:`~pymongo.collection.Collection.bulk_write`. - - :param filter: A query that matches the documents to update. - :param update: The modifications to apply. - :param upsert: If ``True``, perform an insert if no documents - match the filter. - :param collation: An instance of - :class:`~pymongo.collation.Collation`. - :param array_filters: A list of filters specifying which - array elements an update should apply. - :param hint: An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - - .. versionchanged:: 3.11 - Added the `hint` option. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. - .. versionchanged:: 3.6 - Added the `array_filters` option. - .. versionchanged:: 3.5 - Added the `collation` option. - """ - super().__init__(filter, update, upsert, collation, array_filters, hint) - - def _add_to_bulk(self, bulkobj: _Bulk) -> None: - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_update( - self._filter, - self._doc, - True, - self._upsert, - collation=validate_collation_or_none(self._collation), - array_filters=self._array_filters, - hint=self._hint, - ) - - -class IndexModel: - """Represents an index to create.""" - - __slots__ = ("__document",) - - def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: - """Create an Index instance. - - For use with :meth:`~pymongo.collection.Collection.create_indexes`. - - Takes either a single key or a list containing (key, direction) pairs - or keys. If no direction is given, :data:`~pymongo.ASCENDING` will - be assumed. - The key(s) must be an instance of :class:`str`, and the direction(s) must - be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, - :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). - - Valid options include, but are not limited to: - - - `name`: custom name to use for this index - if none is - given, a name will be generated. - - `unique`: if ``True``, creates a uniqueness constraint on the index. - - `background`: if ``True``, this index should be created in the - background. - - `sparse`: if ``True``, omit from the index any documents that lack - the indexed field. - - `bucketSize`: for use with geoHaystack indexes. - Number of documents to group together within a certain proximity - to a given longitude and latitude. - - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` - index. - - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` - index. - - `expireAfterSeconds`: Used to create an expiring (TTL) - collection. MongoDB will automatically delete documents from - this collection after seconds. The indexed field must - be a UTC datetime or the data will not expire. - - `partialFilterExpression`: A document that specifies a filter for - a partial index. - - `collation`: An instance of :class:`~pymongo.collation.Collation` - that specifies the collation to use. - - `wildcardProjection`: Allows users to include or exclude specific - field paths from a `wildcard index`_ using the { "$**" : 1} key - pattern. Requires MongoDB >= 4.2. - - `hidden`: if ``True``, this index will be hidden from the query - planner and will not be evaluated as part of query plan - selection. Requires MongoDB >= 4.4. - - See the MongoDB documentation for a full list of supported options by - server version. - - :param keys: a single key or a list containing (key, direction) pairs - or keys specifying the index to create. - :param kwargs: any additional index creation - options (see the above list) should be passed as keyword - arguments. - - .. versionchanged:: 3.11 - Added the ``hidden`` option. - .. versionchanged:: 3.2 - Added the ``partialFilterExpression`` option to support partial - indexes. - - .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ - """ - keys = _index_list(keys) - if kwargs.get("name") is None: - kwargs["name"] = _gen_index_name(keys) - kwargs["key"] = _index_document(keys) - collation = validate_collation_or_none(kwargs.pop("collation", None)) - self.__document = kwargs - if collation is not None: - self.__document["collation"] = collation - - @property - def document(self) -> dict[str, Any]: - """An index document suitable for passing to the createIndexes - command. - """ - return self.__document - - -class SearchIndexModel: - """Represents a search index to create.""" - - __slots__ = ("__document",) - - def __init__( - self, - definition: Mapping[str, Any], - name: Optional[str] = None, - type: Optional[str] = None, - **kwargs: Any, - ) -> None: - """Create a Search Index instance. - - For use with :meth:`~pymongo.collection.Collection.create_search_index` and :meth:`~pymongo.collection.Collection.create_search_indexes`. - - :param definition: The definition for this index. - :param name: The name for this index, if present. - :param type: The type for this index which defaults to "search". Alternative values include "vectorSearch". - :param kwargs: Keyword arguments supplying any additional options. - - .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. - .. versionadded:: 4.5 - .. versionchanged:: 4.7 - Added the type and kwargs arguments. - """ - self.__document: dict[str, Any] = {} - if name is not None: - self.__document["name"] = name - self.__document["definition"] = definition - if type is not None: - self.__document["type"] = type - self.__document.update(kwargs) - - @property - def document(self) -> Mapping[str, Any]: - """The document for this index.""" - return self.__document +__doc__ = original_doc diff --git a/pymongo/pool.py b/pymongo/pool.py index 2e8aefa60c..0045f227b4 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1,2110 +1,21 @@ -# Copyright 2011-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Re-import of synchronous Pool API for compatibility.""" from __future__ import annotations -import collections -import contextlib -import copy -import logging -import os -import platform -import socket -import ssl -import sys -import threading -import time -import weakref -from pathlib import Path -from typing import ( - TYPE_CHECKING, - Any, - Iterator, - Mapping, - MutableMapping, - NoReturn, - Optional, - Sequence, - Union, -) +from pymongo.synchronous.pool import * # noqa: F403 +from pymongo.synchronous.pool import __doc__ as original_doc -import bson -from bson import DEFAULT_CODEC_OPTIONS -from pymongo import __version__, _csot, helpers -from pymongo.client_session import _validate_session_write_concern -from pymongo.common import ( - MAX_BSON_SIZE, - MAX_CONNECTING, - MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, - MAX_POOL_SIZE, - MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, - MIN_POOL_SIZE, - ORDERED_TYPES, - WAIT_QUEUE_TIMEOUT, -) -from pymongo.errors import ( # type:ignore[attr-defined] - AutoReconnect, - ConfigurationError, - ConnectionFailure, - DocumentTooLarge, - ExecutionTimeout, - InvalidOperation, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError, - WaitQueueTimeoutError, - _CertificateError, -) -from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _handle_reauth -from pymongo.lock import _create_lock -from pymongo.logger import ( - _CONNECTION_LOGGER, - _ConnectionStatusMessage, - _debug_log, - _verbose_connection_error_reason, -) -from pymongo.monitoring import ( - ConnectionCheckOutFailedReason, - ConnectionClosedReason, - _EventListeners, -) -from pymongo.network import command, receive_message -from pymongo.read_preferences import ReadPreference -from pymongo.server_api import _add_to_command -from pymongo.server_type import SERVER_TYPE -from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import HAS_SNI, SSLError - -if TYPE_CHECKING: - from bson import CodecOptions - from bson.objectid import ObjectId - from pymongo.auth import MongoCredential, _AuthContext - from pymongo.client_session import ClientSession - from pymongo.compression_support import ( - CompressionSettings, - SnappyContext, - ZlibContext, - ZstdContext, - ) - from pymongo.driver_info import DriverInfo - from pymongo.message import _OpMsg, _OpReply - from pymongo.mongo_client import MongoClient, _MongoClientErrorHandler - from pymongo.pyopenssl_context import SSLContext, _sslConn - from pymongo.read_concern import ReadConcern - from pymongo.read_preferences import _ServerMode - from pymongo.server_api import ServerApi - from pymongo.typings import ClusterTime, _Address, _CollationIn - from pymongo.write_concern import WriteConcern - -try: - from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl - - def _set_non_inheritable_non_atomic(fd: int) -> None: - """Set the close-on-exec flag on the given file descriptor.""" - flags = fcntl(fd, F_GETFD) - fcntl(fd, F_SETFD, flags | FD_CLOEXEC) - -except ImportError: - # Windows, various platforms we don't claim to support - # (Jython, IronPython, ..), systems that don't provide - # everything we need from fcntl, etc. - def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 - """Dummy function for platforms that don't provide fcntl.""" - - -_MAX_TCP_KEEPIDLE = 120 -_MAX_TCP_KEEPINTVL = 10 -_MAX_TCP_KEEPCNT = 9 - -if sys.platform == "win32": - try: - import _winreg as winreg - except ImportError: - import winreg - - def _query(key, name, default): - try: - value, _ = winreg.QueryValueEx(key, name) - # Ensure the value is a number or raise ValueError. - return int(value) - except (OSError, ValueError): - # QueryValueEx raises OSError when the key does not exist (i.e. - # the system is using the Windows default value). - return default - - try: - with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" - ) as key: - _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) - _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) - except OSError: - # We could not check the default values because winreg.OpenKey failed. - # Assume the system is using the default values. - _WINDOWS_TCP_IDLE_MS = 7200000 - _WINDOWS_TCP_INTERVAL_MS = 1000 - - def _set_keepalive_times(sock): - idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) - if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: - sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) - -else: - - def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: - if hasattr(socket, tcp_option): - sockopt = getattr(socket, tcp_option) - try: - # PYTHON-1350 - NetBSD doesn't implement getsockopt for - # TCP_KEEPIDLE and friends. Don't attempt to set the - # values there. - default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) - if default > max_value: - sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except OSError: - pass - - def _set_keepalive_times(sock: socket.socket) -> None: - _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) - - -_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} - -if sys.platform.startswith("linux"): - # platform.linux_distribution was deprecated in Python 3.5 - # and removed in Python 3.8. Starting in Python 3.5 it - # raises DeprecationWarning - # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 - _name = platform.system() - _METADATA["os"] = { - "type": _name, - "name": _name, - "architecture": platform.machine(), - # Kernel version (e.g. 4.4.0-17-generic). - "version": platform.release(), - } -elif sys.platform == "darwin": - _METADATA["os"] = { - "type": platform.system(), - "name": platform.system(), - "architecture": platform.machine(), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - "version": platform.mac_ver()[0], - } -elif sys.platform == "win32": - _ver = sys.getwindowsversion() - _METADATA["os"] = { - "type": "Windows", - "name": "Windows", - # Avoid using platform calls, see PYTHON-4455. - "architecture": os.environ.get("PROCESSOR_ARCHITECTURE") or platform.machine(), - # Windows patch level (e.g. 10.0.17763-SP0). - "version": ".".join(map(str, _ver[:3])) + f"-SP{_ver[-1] or '0'}", - } -elif sys.platform.startswith("java"): - _name, _ver, _arch = platform.java_ver()[-1] - _METADATA["os"] = { - # Linux, Windows 7, Mac OS X, etc. - "type": _name, - "name": _name, - # x86, x86_64, AMD64, etc. - "architecture": _arch, - # Linux kernel version, OSX version, etc. - "version": _ver, - } -else: - # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) - _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) - _METADATA["os"] = { - "type": platform.system(), - "name": " ".join([part for part in _aliased[:2] if part]), - "architecture": platform.machine(), - "version": _aliased[2], - } - -if platform.python_implementation().startswith("PyPy"): - _METADATA["platform"] = " ".join( - ( - platform.python_implementation(), - ".".join(map(str, sys.pypy_version_info)), # type: ignore - "(Python %s)" % ".".join(map(str, sys.version_info)), - ) - ) -elif sys.platform.startswith("java"): - _METADATA["platform"] = " ".join( - ( - platform.python_implementation(), - ".".join(map(str, sys.version_info)), - "(%s)" % " ".join((platform.system(), platform.release())), - ) - ) -else: - _METADATA["platform"] = " ".join( - (platform.python_implementation(), ".".join(map(str, sys.version_info))) - ) - -DOCKER_ENV_PATH = "/.dockerenv" -ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" - -RUNTIME_NAME_DOCKER = "docker" -ORCHESTRATOR_NAME_K8S = "kubernetes" - - -def get_container_env_info() -> dict[str, str]: - """Returns the runtime and orchestrator of a container. - If neither value is present, the metadata client.env.container field will be omitted.""" - container = {} - - if Path(DOCKER_ENV_PATH).exists(): - container["runtime"] = RUNTIME_NAME_DOCKER - if os.getenv(ENV_VAR_K8S): - container["orchestrator"] = ORCHESTRATOR_NAME_K8S - - return container - - -def _is_lambda() -> bool: - if os.getenv("AWS_LAMBDA_RUNTIME_API"): - return True - env = os.getenv("AWS_EXECUTION_ENV") - if env: - return env.startswith("AWS_Lambda_") - return False - - -def _is_azure_func() -> bool: - return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) - - -def _is_gcp_func() -> bool: - return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) - - -def _is_vercel() -> bool: - return bool(os.getenv("VERCEL")) - - -def _is_faas() -> bool: - return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() - - -def _getenv_int(key: str) -> Optional[int]: - """Like os.getenv but returns an int, or None if the value is missing/malformed.""" - val = os.getenv(key) - if not val: - return None - try: - return int(val) - except ValueError: - return None - - -def _metadata_env() -> dict[str, Any]: - env: dict[str, Any] = {} - container = get_container_env_info() - if container: - env["container"] = container - # Skip if multiple (or no) envs are matched. - if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: - return env - if _is_lambda(): - env["name"] = "aws.lambda" - region = os.getenv("AWS_REGION") - if region: - env["region"] = region - memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") - if memory_mb is not None: - env["memory_mb"] = memory_mb - elif _is_azure_func(): - env["name"] = "azure.func" - elif _is_gcp_func(): - env["name"] = "gcp.func" - region = os.getenv("FUNCTION_REGION") - if region: - env["region"] = region - memory_mb = _getenv_int("FUNCTION_MEMORY_MB") - if memory_mb is not None: - env["memory_mb"] = memory_mb - timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") - if timeout_sec is not None: - env["timeout_sec"] = timeout_sec - elif _is_vercel(): - env["name"] = "vercel" - region = os.getenv("VERCEL_REGION") - if region: - env["region"] = region - return env - - -_MAX_METADATA_SIZE = 512 - - -# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations -def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: - """Perform metadata truncation.""" - if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: - return - # 1. Omit fields from env except env.name. - env_name = metadata.get("env", {}).get("name") - if env_name: - metadata["env"] = {"name": env_name} - if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: - return - # 2. Omit fields from os except os.type. - os_type = metadata.get("os", {}).get("type") - if os_type: - metadata["os"] = {"type": os_type} - if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: - return - # 3. Omit the env document entirely. - metadata.pop("env", None) - encoded_size = len(bson.encode(metadata)) - if encoded_size <= _MAX_METADATA_SIZE: - return - # 4. Truncate platform. - overflow = encoded_size - _MAX_METADATA_SIZE - plat = metadata.get("platform", "") - if plat: - plat = plat[:-overflow] - if plat: - metadata["platform"] = plat - else: - metadata.pop("platform", None) - - -# If the first getaddrinfo call of this interpreter's life is on a thread, -# while the main thread holds the import lock, getaddrinfo deadlocks trying -# to import the IDNA codec. Import it here, where presumably we're on the -# main thread, to avoid the deadlock. See PYTHON-607. -"foo".encode("idna") - - -def _raise_connection_failure( - address: Any, - error: Exception, - msg_prefix: Optional[str] = None, - timeout_details: Optional[dict[str, float]] = None, -) -> NoReturn: - """Convert a socket.error to ConnectionFailure and raise it.""" - host, port = address - # If connecting to a Unix socket, port will be None. - if port is not None: - msg = "%s:%d: %s" % (host, port, error) - else: - msg = f"{host}: {error}" - if msg_prefix: - msg = msg_prefix + msg - if "configured timeouts" not in msg: - msg += format_timeout_details(timeout_details) - if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) from error - elif isinstance(error, SSLError) and "timed out" in str(error): - # Eventlet does not distinguish TLS network timeouts from other - # SSLErrors (https://github.com/eventlet/eventlet/issues/692). - # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised. - raise NetworkTimeout(msg) from error - else: - raise AutoReconnect(msg) from error - - -def _cond_wait(condition: threading.Condition, deadline: Optional[float]) -> bool: - timeout = deadline - time.monotonic() if deadline else None - return condition.wait(timeout) - - -def _get_timeout_details(options: PoolOptions) -> dict[str, float]: - details = {} - timeout = _csot.get_timeout() - socket_timeout = options.socket_timeout - connect_timeout = options.connect_timeout - if timeout: - details["timeoutMS"] = timeout * 1000 - if socket_timeout and not timeout: - details["socketTimeoutMS"] = socket_timeout * 1000 - if connect_timeout: - details["connectTimeoutMS"] = connect_timeout * 1000 - return details - - -def format_timeout_details(details: Optional[dict[str, float]]) -> str: - result = "" - if details: - result += " (configured timeouts:" - for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: - if timeout in details: - result += f" {timeout}: {details[timeout]}ms," - result = result[:-1] - result += ")" - return result - - -class PoolOptions: - """Read only connection pool options for a MongoClient. - - Should not be instantiated directly by application developers. Access - a client's pool options via - :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: - - pool_opts = client.options.pool_options - pool_opts.max_pool_size - pool_opts.min_pool_size - - """ - - __slots__ = ( - "__max_pool_size", - "__min_pool_size", - "__max_idle_time_seconds", - "__connect_timeout", - "__socket_timeout", - "__wait_queue_timeout", - "__ssl_context", - "__tls_allow_invalid_hostnames", - "__event_listeners", - "__appname", - "__driver", - "__metadata", - "__compression_settings", - "__max_connecting", - "__pause_enabled", - "__server_api", - "__load_balanced", - "__credentials", - ) - - def __init__( - self, - max_pool_size: int = MAX_POOL_SIZE, - min_pool_size: int = MIN_POOL_SIZE, - max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, - connect_timeout: Optional[float] = None, - socket_timeout: Optional[float] = None, - wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, - ssl_context: Optional[SSLContext] = None, - tls_allow_invalid_hostnames: bool = False, - event_listeners: Optional[_EventListeners] = None, - appname: Optional[str] = None, - driver: Optional[DriverInfo] = None, - compression_settings: Optional[CompressionSettings] = None, - max_connecting: int = MAX_CONNECTING, - pause_enabled: bool = True, - server_api: Optional[ServerApi] = None, - load_balanced: Optional[bool] = None, - credentials: Optional[MongoCredential] = None, - ): - self.__max_pool_size = max_pool_size - self.__min_pool_size = min_pool_size - self.__max_idle_time_seconds = max_idle_time_seconds - self.__connect_timeout = connect_timeout - self.__socket_timeout = socket_timeout - self.__wait_queue_timeout = wait_queue_timeout - self.__ssl_context = ssl_context - self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames - self.__event_listeners = event_listeners - self.__appname = appname - self.__driver = driver - self.__compression_settings = compression_settings - self.__max_connecting = max_connecting - self.__pause_enabled = pause_enabled - self.__server_api = server_api - self.__load_balanced = load_balanced - self.__credentials = credentials - self.__metadata = copy.deepcopy(_METADATA) - if appname: - self.__metadata["application"] = {"name": appname} - - # Combine the "driver" MongoClient option with PyMongo's info, like: - # { - # 'driver': { - # 'name': 'PyMongo|MyDriver', - # 'version': '4.2.0|1.2.3', - # }, - # 'platform': 'CPython 3.8.0|MyPlatform' - # } - if driver: - if driver.name: - self.__metadata["driver"]["name"] = "{}|{}".format( - _METADATA["driver"]["name"], - driver.name, - ) - if driver.version: - self.__metadata["driver"]["version"] = "{}|{}".format( - _METADATA["driver"]["version"], - driver.version, - ) - if driver.platform: - self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) - - env = _metadata_env() - if env: - self.__metadata["env"] = env - - _truncate_metadata(self.__metadata) - - @property - def _credentials(self) -> Optional[MongoCredential]: - """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" - return self.__credentials - - @property - def non_default_options(self) -> dict[str, Any]: - """The non-default options this pool was created with. - - Added for CMAP's :class:`PoolCreatedEvent`. - """ - opts = {} - if self.__max_pool_size != MAX_POOL_SIZE: - opts["maxPoolSize"] = self.__max_pool_size - if self.__min_pool_size != MIN_POOL_SIZE: - opts["minPoolSize"] = self.__min_pool_size - if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: - assert self.__max_idle_time_seconds is not None - opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 - if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: - assert self.__wait_queue_timeout is not None - opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 - if self.__max_connecting != MAX_CONNECTING: - opts["maxConnecting"] = self.__max_connecting - return opts - - @property - def max_pool_size(self) -> float: - """The maximum allowable number of concurrent connections to each - connected server. Requests to a server will block if there are - `maxPoolSize` outstanding connections to the requested server. - Defaults to 100. Cannot be 0. - - When a server's pool has reached `max_pool_size`, operations for that - server block waiting for a socket to be returned to the pool. If - ``waitQueueTimeoutMS`` is set, a blocked operation will raise - :exc:`~pymongo.errors.ConnectionFailure` after a timeout. - By default ``waitQueueTimeoutMS`` is not set. - """ - return self.__max_pool_size - - @property - def min_pool_size(self) -> int: - """The minimum required number of concurrent connections that the pool - will maintain to each connected server. Default is 0. - """ - return self.__min_pool_size - - @property - def max_connecting(self) -> int: - """The maximum number of concurrent connection creation attempts per - pool. Defaults to 2. - """ - return self.__max_connecting - - @property - def pause_enabled(self) -> bool: - return self.__pause_enabled - - @property - def max_idle_time_seconds(self) -> Optional[int]: - """The maximum number of seconds that a connection can remain - idle in the pool before being removed and replaced. Defaults to - `None` (no limit). - """ - return self.__max_idle_time_seconds - - @property - def connect_timeout(self) -> Optional[float]: - """How long a connection can take to be opened before timing out.""" - return self.__connect_timeout - - @property - def socket_timeout(self) -> Optional[float]: - """How long a send or receive on a socket can take before timing out.""" - return self.__socket_timeout - - @property - def wait_queue_timeout(self) -> Optional[int]: - """How long a thread will wait for a socket from the pool if the pool - has no free sockets. - """ - return self.__wait_queue_timeout - - @property - def _ssl_context(self) -> Optional[SSLContext]: - """An SSLContext instance or None.""" - return self.__ssl_context - - @property - def tls_allow_invalid_hostnames(self) -> bool: - """If True skip ssl.match_hostname.""" - return self.__tls_allow_invalid_hostnames - - @property - def _event_listeners(self) -> Optional[_EventListeners]: - """An instance of pymongo.monitoring._EventListeners.""" - return self.__event_listeners - - @property - def appname(self) -> Optional[str]: - """The application name, for sending with hello in server handshake.""" - return self.__appname - - @property - def driver(self) -> Optional[DriverInfo]: - """Driver name and version, for sending with hello in handshake.""" - return self.__driver - - @property - def _compression_settings(self) -> Optional[CompressionSettings]: - return self.__compression_settings - - @property - def metadata(self) -> dict[str, Any]: - """A dict of metadata about the application, driver, os, and platform.""" - return self.__metadata.copy() - - @property - def server_api(self) -> Optional[ServerApi]: - """A pymongo.server_api.ServerApi or None.""" - return self.__server_api - - @property - def load_balanced(self) -> Optional[bool]: - """True if this Pool is configured in load balanced mode.""" - return self.__load_balanced - - -class _CancellationContext: - def __init__(self) -> None: - self._cancelled = False - - def cancel(self) -> None: - """Cancel this context.""" - self._cancelled = True - - @property - def cancelled(self) -> bool: - """Was cancel called?""" - return self._cancelled - - -class Connection: - """Store a connection with some metadata. - - :param conn: a raw connection object - :param pool: a Pool instance - :param address: the server's (host, port) - :param id: the id of this socket in it's pool - """ - - def __init__( - self, conn: Union[socket.socket, _sslConn], pool: Pool, address: tuple[str, int], id: int - ): - self.pool_ref = weakref.ref(pool) - self.conn = conn - self.address = address - self.id = id - self.closed = False - self.last_checkin_time = time.monotonic() - self.performed_handshake = False - self.is_writable: bool = False - self.max_wire_version = MAX_WIRE_VERSION - self.max_bson_size = MAX_BSON_SIZE - self.max_message_size = MAX_MESSAGE_SIZE - self.max_write_batch_size = MAX_WRITE_BATCH_SIZE - self.supports_sessions = False - self.hello_ok: bool = False - self.is_mongos = False - self.op_msg_enabled = False - self.listeners = pool.opts._event_listeners - self.enabled_for_cmap = pool.enabled_for_cmap - self.compression_settings = pool.opts._compression_settings - self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None - self.socket_checker: SocketChecker = SocketChecker() - self.oidc_token_gen_id: Optional[int] = None - # Support for mechanism negotiation on the initial handshake. - self.negotiated_mechs: Optional[list[str]] = None - self.auth_ctx: Optional[_AuthContext] = None - - # The pool's generation changes with each reset() so we can close - # sockets created before the last reset. - self.pool_gen = pool.gen - self.generation = self.pool_gen.get_overall() - self.ready = False - self.cancel_context: _CancellationContext = _CancellationContext() - self.opts = pool.opts - self.more_to_come: bool = False - # For load balancer support. - self.service_id: Optional[ObjectId] = None - self.server_connection_id: Optional[int] = None - # When executing a transaction in load balancing mode, this flag is - # set to true to indicate that the session now owns the connection. - self.pinned_txn = False - self.pinned_cursor = False - self.active = False - self.last_timeout = self.opts.socket_timeout - self.connect_rtt = 0.0 - self._client_id = pool._client_id - self.creation_time = time.monotonic() - - def set_conn_timeout(self, timeout: Optional[float]) -> None: - """Cache last timeout to avoid duplicate calls to conn.settimeout.""" - if timeout == self.last_timeout: - return - self.last_timeout = timeout - self.conn.settimeout(timeout) - - def apply_timeout( - self, client: MongoClient, cmd: Optional[MutableMapping[str, Any]] - ) -> Optional[float]: - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - # Reset the socket timeout unless we're performing a streaming monitor check. - if not self.more_to_come: - self.set_conn_timeout(self.opts.socket_timeout) - return None - # RTT validation. - rtt = _csot.get_rtt() - if rtt is None: - rtt = self.connect_rtt - max_time_ms = timeout - rtt - if max_time_ms < 0: - timeout_details = _get_timeout_details(self.opts) - formatted = format_timeout_details(timeout_details) - # CSOT: raise an error without running the command since we know it will time out. - errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" - raise ExecutionTimeout( - errmsg, - 50, - {"ok": 0, "errmsg": errmsg, "code": 50}, - self.max_wire_version, - ) - if cmd is not None: - cmd["maxTimeMS"] = int(max_time_ms * 1000) - self.set_conn_timeout(timeout) - return timeout - - def pin_txn(self) -> None: - self.pinned_txn = True - assert not self.pinned_cursor - - def pin_cursor(self) -> None: - self.pinned_cursor = True - assert not self.pinned_txn - - def unpin(self) -> None: - pool = self.pool_ref() - if pool: - pool.checkin(self) - else: - self.close_conn(ConnectionClosedReason.STALE) - - def hello_cmd(self) -> dict[str, Any]: - # Handshake spec requires us to use OP_MSG+hello command for the - # initial handshake in load balanced or stable API mode. - if self.opts.server_api or self.hello_ok or self.opts.load_balanced: - self.op_msg_enabled = True - return {HelloCompat.CMD: 1} - else: - return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} - - def hello(self) -> Hello[dict[str, Any]]: - return self._hello(None, None, None) - - def _hello( - self, - cluster_time: Optional[ClusterTime], - topology_version: Optional[Any], - heartbeat_frequency: Optional[int], - ) -> Hello[dict[str, Any]]: - cmd = self.hello_cmd() - performing_handshake = not self.performed_handshake - awaitable = False - if performing_handshake: - self.performed_handshake = True - cmd["client"] = self.opts.metadata - if self.compression_settings: - cmd["compression"] = self.compression_settings.compressors - if self.opts.load_balanced: - cmd["loadBalanced"] = True - elif topology_version is not None: - cmd["topologyVersion"] = topology_version - assert heartbeat_frequency is not None - cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) - awaitable = True - # If connect_timeout is None there is no timeout. - if self.opts.connect_timeout: - self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) - - if not performing_handshake and cluster_time is not None: - cmd["$clusterTime"] = cluster_time - - creds = self.opts._credentials - if creds: - if creds.mechanism == "DEFAULT" and creds.username: - cmd["saslSupportedMechs"] = creds.source + "." + creds.username - from pymongo import auth - - auth_ctx = auth._AuthContext.from_credentials(creds, self.address) - if auth_ctx: - speculative_authenticate = auth_ctx.speculate_command() - if speculative_authenticate is not None: - cmd["speculativeAuthenticate"] = speculative_authenticate - else: - auth_ctx = None - - if performing_handshake: - start = time.monotonic() - doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) - if performing_handshake: - self.connect_rtt = time.monotonic() - start - hello = Hello(doc, awaitable=awaitable) - self.is_writable = hello.is_writable - self.max_wire_version = hello.max_wire_version - self.max_bson_size = hello.max_bson_size - self.max_message_size = hello.max_message_size - self.max_write_batch_size = hello.max_write_batch_size - self.supports_sessions = ( - hello.logical_session_timeout_minutes is not None and hello.is_readable - ) - self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes - self.hello_ok = hello.hello_ok - self.is_repl = hello.server_type in ( - SERVER_TYPE.RSPrimary, - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther, - SERVER_TYPE.RSGhost, - ) - self.is_standalone = hello.server_type == SERVER_TYPE.Standalone - self.is_mongos = hello.server_type == SERVER_TYPE.Mongos - if performing_handshake and self.compression_settings: - ctx = self.compression_settings.get_compression_context(hello.compressors) - self.compression_context = ctx - - self.op_msg_enabled = True - self.server_connection_id = hello.connection_id - if creds: - self.negotiated_mechs = hello.sasl_supported_mechs - if auth_ctx: - auth_ctx.parse_response(hello) # type:ignore[arg-type] - if auth_ctx.speculate_succeeded(): - self.auth_ctx = auth_ctx - if self.opts.load_balanced: - if not hello.service_id: - raise ConfigurationError( - "Driver attempted to initialize in load balancing mode," - " but the server does not support this mode" - ) - self.service_id = hello.service_id - self.generation = self.pool_gen.get(self.service_id) - return hello - - def _next_reply(self) -> dict[str, Any]: - reply = self.receive_message(None) - self.more_to_come = reply.more_to_come - unpacked_docs = reply.unpack_response() - response_doc = unpacked_docs[0] - helpers._check_command_response(response_doc, self.max_wire_version) - return response_doc - - @_handle_reauth - def command( - self, - dbname: str, - spec: MutableMapping[str, Any], - read_preference: _ServerMode = ReadPreference.PRIMARY, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - parse_write_concern_error: bool = False, - collation: Optional[_CollationIn] = None, - session: Optional[ClientSession] = None, - client: Optional[MongoClient] = None, - retryable_write: bool = False, - publish_events: bool = True, - user_fields: Optional[Mapping[str, Any]] = None, - exhaust_allowed: bool = False, - ) -> dict[str, Any]: - """Execute a command or raise an error. - - :param dbname: name of the database on which to run the command - :param spec: a command document as a dict, SON, or mapping object - :param read_preference: a read preference - :param codec_options: a CodecOptions instance - :param check: raise OperationFailure if there are errors - :param allowable_errors: errors to ignore if `check` is True - :param read_concern: The read concern for this command. - :param write_concern: The write concern for this command. - :param parse_write_concern_error: Whether to parse the - ``writeConcernError`` field in the command response. - :param collation: The collation for this command. - :param session: optional ClientSession instance. - :param client: optional MongoClient for gossipping $clusterTime. - :param retryable_write: True if this command is a retryable write. - :param publish_events: Should we publish events for this command? - :param user_fields: Response fields that should be decoded - using the TypeDecoders from codec_options, passed to - bson._decode_all_selective. - """ - self.validate_session(client, session) - session = _validate_session_write_concern(session, write_concern) - - # Ensure command name remains in first place. - if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] - spec = dict(spec) - - if not (write_concern is None or write_concern.acknowledged or collation is None): - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - - self.add_server_api(spec) - if session: - session._apply_to(spec, retryable_write, read_preference, self) - self.send_cluster_time(spec, session, client) - listeners = self.listeners if publish_events else None - unacknowledged = bool(write_concern and not write_concern.acknowledged) - if self.op_msg_enabled: - self._raise_if_not_writable(unacknowledged) - try: - return command( - self, - dbname, - spec, - self.is_mongos, - read_preference, - codec_options, - session, - client, - check, - allowable_errors, - self.address, - listeners, - self.max_bson_size, - read_concern, - parse_write_concern_error=parse_write_concern_error, - collation=collation, - compression_ctx=self.compression_context, - use_op_msg=self.op_msg_enabled, - unacknowledged=unacknowledged, - user_fields=user_fields, - exhaust_allowed=exhaust_allowed, - write_concern=write_concern, - ) - except (OperationFailure, NotPrimaryError): - raise - # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. - except BaseException as error: - self._raise_connection_failure(error) - - def send_message(self, message: bytes, max_doc_size: int) -> None: - """Send a raw BSON message or raise ConnectionFailure. - - If a network exception is raised, the socket is closed. - """ - if self.max_bson_size is not None and max_doc_size > self.max_bson_size: - raise DocumentTooLarge( - "BSON document too large (%d bytes) - the connected server " - "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) - ) - - try: - self.conn.sendall(message) - except BaseException as error: - self._raise_connection_failure(error) - - def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: - """Receive a raw BSON message or raise ConnectionFailure. - - If any exception is raised, the socket is closed. - """ - try: - return receive_message(self, request_id, self.max_message_size) - except BaseException as error: - self._raise_connection_failure(error) - - def _raise_if_not_writable(self, unacknowledged: bool) -> None: - """Raise NotPrimaryError on unacknowledged write if this socket is not - writable. - """ - if unacknowledged and not self.is_writable: - # Write won't succeed, bail as if we'd received a not primary error. - raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) - - def unack_write(self, msg: bytes, max_doc_size: int) -> None: - """Send unack OP_MSG. - - Can raise ConnectionFailure or InvalidDocument. - - :param msg: bytes, an OP_MSG message. - :param max_doc_size: size in bytes of the largest document in `msg`. - """ - self._raise_if_not_writable(True) - self.send_message(msg, max_doc_size) - - def write_command( - self, request_id: int, msg: bytes, codec_options: CodecOptions - ) -> dict[str, Any]: - """Send "insert" etc. command, returning response as a dict. - - Can raise ConnectionFailure or OperationFailure. - - :param request_id: an int. - :param msg: bytes, the command message. - """ - self.send_message(msg, 0) - reply = self.receive_message(request_id) - result = reply.command_response(codec_options) - - # Raises NotPrimaryError or OperationFailure. - helpers._check_command_response(result, self.max_wire_version) - return result - - def authenticate(self, reauthenticate: bool = False) -> None: - """Authenticate to the server if needed. - - Can raise ConnectionFailure or OperationFailure. - """ - # CMAP spec says to publish the ready event only after authenticating - # the connection. - if reauthenticate: - if self.performed_handshake: - # Existing auth_ctx is stale, remove it. - self.auth_ctx = None - self.ready = False - if not self.ready: - creds = self.opts._credentials - if creds: - from pymongo import auth - - auth.authenticate(creds, self, reauthenticate=reauthenticate) - self.ready = True - if self.enabled_for_cmap: - assert self.listeners is not None - duration = time.monotonic() - self.creation_time - self.listeners.publish_connection_ready(self.address, self.id, duration) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_READY, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=self.id, - durationMS=duration, - ) - - def validate_session( - self, client: Optional[MongoClient], session: Optional[ClientSession] - ) -> None: - """Validate this session before use with client. - - Raises error if the client is not the one that created the session. - """ - if session: - if session._client is not client: - raise InvalidOperation("Can only use session with the MongoClient that started it") - - def close_conn(self, reason: Optional[str]) -> None: - """Close this connection with a reason.""" - if self.closed: - return - self._close_conn() - if reason and self.enabled_for_cmap: - assert self.listeners is not None - self.listeners.publish_connection_closed(self.address, self.id, reason) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=self.id, - reason=_verbose_connection_error_reason(reason), - error=reason, - ) - - def _close_conn(self) -> None: - """Close this connection.""" - if self.closed: - return - self.closed = True - self.cancel_context.cancel() - # Note: We catch exceptions to avoid spurious errors on interpreter - # shutdown. - try: - self.conn.close() - except Exception: # noqa: S110 - pass - - def conn_closed(self) -> bool: - """Return True if we know socket has been closed, False otherwise.""" - return self.socket_checker.socket_closed(self.conn) - - def send_cluster_time( - self, - command: MutableMapping[str, Any], - session: Optional[ClientSession], - client: Optional[MongoClient], - ) -> None: - """Add $clusterTime.""" - if client: - client._send_cluster_time(command, session) - - def add_server_api(self, command: MutableMapping[str, Any]) -> None: - """Add server_api parameters.""" - if self.opts.server_api: - _add_to_command(command, self.opts.server_api) - - def update_last_checkin_time(self) -> None: - self.last_checkin_time = time.monotonic() - - def update_is_writable(self, is_writable: bool) -> None: - self.is_writable = is_writable - - def idle_time_seconds(self) -> float: - """Seconds since this socket was last checked into its pool.""" - return time.monotonic() - self.last_checkin_time - - def _raise_connection_failure(self, error: BaseException) -> NoReturn: - # Catch *all* exceptions from socket methods and close the socket. In - # regular Python, socket operations only raise socket.error, even if - # the underlying cause was a Ctrl-C: a signal raised during socket.recv - # is expressed as an EINTR error from poll. See internal_select_ex() in - # socketmodule.c. All error codes from poll become socket.error at - # first. Eventually in PyEval_EvalFrameEx the interpreter checks for - # signals and throws KeyboardInterrupt into the current frame on the - # main thread. - # - # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, - # ..) is called in Python code, which experiences the signal as a - # KeyboardInterrupt from the start, rather than as an initial - # socket.error, so we catch that, close the socket, and reraise it. - # - # The connection closed event will be emitted later in checkin. - if self.ready: - reason = None - else: - reason = ConnectionClosedReason.ERROR - self.close_conn(reason) - # SSLError from PyOpenSSL inherits directly from Exception. - if isinstance(error, (IOError, OSError, SSLError)): - details = _get_timeout_details(self.opts) - _raise_connection_failure(self.address, error, timeout_details=details) - else: - raise - - def __eq__(self, other: Any) -> bool: - return self.conn == other.conn - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash(self.conn) - - def __repr__(self) -> str: - return "Connection({}){} at {}".format( - repr(self.conn), - self.closed and " CLOSED" or "", - id(self), - ) - - -def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: - """Given (host, port) and PoolOptions, connect and return a socket object. - - Can raise socket.error. - - This is a modified version of create_connection from CPython >= 2.7. - """ - host, port = address - - # Check if dealing with a unix domain socket - if host.endswith(".sock"): - if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported on this system") - sock = socket.socket(socket.AF_UNIX) - # SOCK_CLOEXEC not supported for Unix sockets. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.connect(host) - return sock - except OSError: - sock.close() - raise - - # Don't try IPv6 if we don't support it. Also skip it if host - # is 'localhost' (::1 is fine). Avoids slow connect issues - # like PYTHON-356. - family = socket.AF_INET - if socket.has_ipv6 and host != "localhost": - family = socket.AF_UNSPEC - - err = None - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, dummy, sa = res - # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited - # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 - # all file descriptors are created non-inheritable. See PEP 446. - try: - sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) - except OSError: - # Can SOCK_CLOEXEC be defined even if the kernel doesn't support - # it? - sock = socket.socket(af, socktype, proto) - # Fallback when SOCK_CLOEXEC isn't available. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - # CSOT: apply timeout to socket connect. - timeout = _csot.remaining() - if timeout is None: - timeout = options.connect_timeout - elif timeout <= 0: - raise socket.timeout("timed out") - sock.settimeout(timeout) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) - _set_keepalive_times(sock) - sock.connect(sa) - return sock - except OSError as e: - err = e - sock.close() - - if err is not None: - raise err - else: - # This likely means we tried to connect to an IPv6 only - # host with an OS/kernel or Python interpreter that doesn't - # support IPv6. The test case is Jython2.5.1 which doesn't - # support IPv6 at all. - raise OSError("getaddrinfo failed") - - -def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: - """Given (host, port) and PoolOptions, return a configured socket. - - Can raise socket.error, ConnectionFailure, or _CertificateError. - - Sets socket's SSL and timeout options. - """ - sock = _create_connection(address, options) - ssl_context = options._ssl_context - - if ssl_context is None: - sock.settimeout(options.socket_timeout) - return sock - - host = address[0] - try: - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if HAS_SNI: - ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) - else: - ssl_sock = ssl_context.wrap_socket(sock) - except _CertificateError: - sock.close() - # Raise _CertificateError directly like we do after match_hostname - # below. - raise - except (OSError, SSLError) as exc: - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - details = _get_timeout_details(options) - _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) - if ( - ssl_context.verify_mode - and not ssl_context.check_hostname - and not options.tls_allow_invalid_hostnames - ): - try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) - except _CertificateError: - ssl_sock.close() - raise - - ssl_sock.settimeout(options.socket_timeout) - return ssl_sock - - -class _PoolClosedError(PyMongoError): - """Internal error raised when a thread tries to get a connection from a - closed pool. - """ - - -class _PoolGeneration: - def __init__(self) -> None: - # Maps service_id to generation. - self._generations: dict[ObjectId, int] = collections.defaultdict(int) - # Overall pool generation. - self._generation = 0 - - def get(self, service_id: Optional[ObjectId]) -> int: - """Get the generation for the given service_id.""" - if service_id is None: - return self._generation - return self._generations[service_id] - - def get_overall(self) -> int: - """Get the Pool's overall generation.""" - return self._generation - - def inc(self, service_id: Optional[ObjectId]) -> None: - """Increment the generation for the given service_id.""" - self._generation += 1 - if service_id is None: - for service_id in self._generations: - self._generations[service_id] += 1 - else: - self._generations[service_id] += 1 - - def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: - """Return if the given generation for a given service_id is stale.""" - return gen != self.get(service_id) - - -class PoolState: - PAUSED = 1 - READY = 2 - CLOSED = 3 - - -# Do *not* explicitly inherit from object or Jython won't call __del__ -# http://bugs.jython.org/issue1057 -class Pool: - def __init__( - self, - address: _Address, - options: PoolOptions, - handshake: bool = True, - client_id: Optional[ObjectId] = None, - ): - """ - :param address: a (hostname, port) tuple - :param options: a PoolOptions instance - :param handshake: whether to call hello for each new Connection - """ - if options.pause_enabled: - self.state = PoolState.PAUSED - else: - self.state = PoolState.READY - # Check a socket's health with socket_closed() every once in a while. - # Can override for testing: 0 to always check, None to never check. - self._check_interval_seconds = 1 - # LIFO pool. Sockets are ordered on idle time. Sockets claimed - # and returned to pool from the left side. Stale sockets removed - # from the right side. - self.conns: collections.deque = collections.deque() - self.active_contexts: set[_CancellationContext] = set() - self.lock = _create_lock() - self.active_sockets = 0 - # Monotonically increasing connection ID required for CMAP Events. - self.next_connection_id = 1 - # Track whether the sockets in this pool are writeable or not. - self.is_writable: Optional[bool] = None - - # Keep track of resets, so we notice sockets created before the most - # recent reset and close them. - # self.generation = 0 - self.gen = _PoolGeneration() - self.pid = os.getpid() - self.address = address - self.opts = options - self.handshake = handshake - # Don't publish events in Monitor pools. - self.enabled_for_cmap = ( - self.handshake - and self.opts._event_listeners is not None - and self.opts._event_listeners.enabled_for_cmap - ) - - # The first portion of the wait queue. - # Enforces: maxPoolSize - # Also used for: clearing the wait queue - self.size_cond = threading.Condition(self.lock) - self.requests = 0 - self.max_pool_size = self.opts.max_pool_size - if not self.max_pool_size: - self.max_pool_size = float("inf") - # The second portion of the wait queue. - # Enforces: maxConnecting - # Also used for: clearing the wait queue - self._max_connecting_cond = threading.Condition(self.lock) - self._max_connecting = self.opts.max_connecting - self._pending = 0 - self._client_id = client_id - if self.enabled_for_cmap: - assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_pool_created( - self.address, self.opts.non_default_options - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_CREATED, - serverHost=self.address[0], - serverPort=self.address[1], - **self.opts.non_default_options, - ) - # Similar to active_sockets but includes threads in the wait queue. - self.operation_count: int = 0 - # Retain references to pinned connections to prevent the CPython GC - # from thinking that a cursor's pinned connection can be GC'd when the - # cursor is GC'd (see PYTHON-2751). - self.__pinned_sockets: set[Connection] = set() - self.ncursors = 0 - self.ntxns = 0 - - def ready(self) -> None: - # Take the lock to avoid the race condition described in PYTHON-2699. - with self.lock: - if self.state != PoolState.READY: - self.state = PoolState.READY - if self.enabled_for_cmap: - assert self.opts._event_listeners is not None - self.opts._event_listeners.publish_pool_ready(self.address) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_READY, - serverHost=self.address[0], - serverPort=self.address[1], - ) - - @property - def closed(self) -> bool: - return self.state == PoolState.CLOSED - - def _reset( - self, - close: bool, - pause: bool = True, - service_id: Optional[ObjectId] = None, - interrupt_connections: bool = False, - ) -> None: - old_state = self.state - with self.size_cond: - if self.closed: - return - if self.opts.pause_enabled and pause and not self.opts.load_balanced: - old_state, self.state = self.state, PoolState.PAUSED - self.gen.inc(service_id) - newpid = os.getpid() - if self.pid != newpid: - self.pid = newpid - self.active_sockets = 0 - self.operation_count = 0 - if service_id is None: - sockets, self.conns = self.conns, collections.deque() - else: - discard: collections.deque = collections.deque() - keep: collections.deque = collections.deque() - for conn in self.conns: - if conn.service_id == service_id: - discard.append(conn) - else: - keep.append(conn) - sockets = discard - self.conns = keep - - if close: - self.state = PoolState.CLOSED - # Clear the wait queue - self._max_connecting_cond.notify_all() - self.size_cond.notify_all() - - if interrupt_connections: - for context in self.active_contexts: - context.cancel() - - listeners = self.opts._event_listeners - # CMAP spec says that close() MUST close sockets before publishing the - # PoolClosedEvent but that reset() SHOULD close sockets *after* - # publishing the PoolClearedEvent. - if close: - for conn in sockets: - conn.close_conn(ConnectionClosedReason.POOL_CLOSED) - if self.enabled_for_cmap: - assert listeners is not None - listeners.publish_pool_closed(self.address) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - ) - else: - if old_state != PoolState.PAUSED and self.enabled_for_cmap: - assert listeners is not None - listeners.publish_pool_cleared( - self.address, - service_id=service_id, - interrupt_connections=interrupt_connections, - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_CLEARED, - serverHost=self.address[0], - serverPort=self.address[1], - serviceId=service_id, - ) - for conn in sockets: - conn.close_conn(ConnectionClosedReason.STALE) - - def update_is_writable(self, is_writable: Optional[bool]) -> None: - """Updates the is_writable attribute on all sockets currently in the - Pool. - """ - self.is_writable = is_writable - with self.lock: - for _socket in self.conns: - _socket.update_is_writable(self.is_writable) - - def reset( - self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False - ) -> None: - self._reset(close=False, service_id=service_id, interrupt_connections=interrupt_connections) - - def reset_without_pause(self) -> None: - self._reset(close=False, pause=False) - - def close(self) -> None: - self._reset(close=True) - - def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: - return self.gen.stale(gen, service_id) - - def remove_stale_sockets(self, reference_generation: int) -> None: - """Removes stale sockets then adds new ones if pool is too small and - has not been reset. The `reference_generation` argument specifies the - `generation` at the point in time this operation was requested on the - pool. - """ - # Take the lock to avoid the race condition described in PYTHON-2699. - with self.lock: - if self.state != PoolState.READY: - return - - if self.opts.max_idle_time_seconds is not None: - with self.lock: - while ( - self.conns - and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds - ): - conn = self.conns.pop() - conn.close_conn(ConnectionClosedReason.IDLE) - - while True: - with self.size_cond: - # There are enough sockets in the pool. - if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: - return - if self.requests >= self.opts.min_pool_size: - return - self.requests += 1 - incremented = False - try: - with self._max_connecting_cond: - # If maxConnecting connections are already being created - # by this pool then try again later instead of waiting. - if self._pending >= self._max_connecting: - return - self._pending += 1 - incremented = True - conn = self.connect() - with self.lock: - # Close connection and return if the pool was reset during - # socket creation or while acquiring the pool lock. - if self.gen.get_overall() != reference_generation: - conn.close_conn(ConnectionClosedReason.STALE) - return - self.conns.appendleft(conn) - self.active_contexts.discard(conn.cancel_context) - finally: - if incremented: - # Notify after adding the socket to the pool. - with self._max_connecting_cond: - self._pending -= 1 - self._max_connecting_cond.notify() - - with self.size_cond: - self.requests -= 1 - self.size_cond.notify() - - def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: - """Connect to Mongo and return a new Connection. - - Can raise ConnectionFailure. - - Note that the pool does not keep a reference to the socket -- you - must call checkin() when you're done with it. - """ - with self.lock: - conn_id = self.next_connection_id - self.next_connection_id += 1 - - listeners = self.opts._event_listeners - if self.enabled_for_cmap: - assert listeners is not None - listeners.publish_connection_created(self.address, conn_id) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CREATED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn_id, - ) - - try: - sock = _configured_socket(self.address, self.opts) - except BaseException as error: - if self.enabled_for_cmap: - assert listeners is not None - listeners.publish_connection_closed( - self.address, conn_id, ConnectionClosedReason.ERROR - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn_id, - reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), - error=ConnectionClosedReason.ERROR, - ) - if isinstance(error, (IOError, OSError, SSLError)): - details = _get_timeout_details(self.opts) - _raise_connection_failure(self.address, error, timeout_details=details) - - raise - - conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] - with self.lock: - self.active_contexts.add(conn.cancel_context) - try: - if self.handshake: - conn.hello() - self.is_writable = conn.is_writable - if handler: - handler.contribute_socket(conn, completed_handshake=False) - - conn.authenticate() - except BaseException: - conn.close_conn(ConnectionClosedReason.ERROR) - raise - - return conn - - @contextlib.contextmanager - def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterator[Connection]: - """Get a connection from the pool. Use with a "with" statement. - - Returns a :class:`Connection` object wrapping a connected - :class:`socket.socket`. - - This method should always be used in a with-statement:: - - with pool.get_conn() as connection: - connection.send_message(msg) - data = connection.receive_message(op_code, request_id) - - Can raise ConnectionFailure or OperationFailure. - - :param handler: A _MongoClientErrorHandler. - """ - listeners = self.opts._event_listeners - checkout_started_time = time.monotonic() - if self.enabled_for_cmap: - assert listeners is not None - listeners.publish_connection_check_out_started(self.address) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_STARTED, - serverHost=self.address[0], - serverPort=self.address[1], - ) - - conn = self._get_conn(checkout_started_time, handler=handler) - - if self.enabled_for_cmap: - assert listeners is not None - duration = time.monotonic() - checkout_started_time - listeners.publish_connection_checked_out(self.address, conn.id, duration) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn.id, - durationMS=duration, - ) - try: - with self.lock: - self.active_contexts.add(conn.cancel_context) - yield conn - except BaseException: - # Exception in caller. Ensure the connection gets returned. - # Note that when pinned is True, the session owns the - # connection and it is responsible for checking the connection - # back into the pool. - pinned = conn.pinned_txn or conn.pinned_cursor - if handler: - # Perform SDAM error handling rules while the connection is - # still checked out. - exc_type, exc_val, _ = sys.exc_info() - handler.handle(exc_type, exc_val) - if not pinned and conn.active: - self.checkin(conn) - raise - if conn.pinned_txn: - with self.lock: - self.__pinned_sockets.add(conn) - self.ntxns += 1 - elif conn.pinned_cursor: - with self.lock: - self.__pinned_sockets.add(conn) - self.ncursors += 1 - elif conn.active: - self.checkin(conn) - - def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: - if self.state != PoolState.READY: - if self.enabled_for_cmap and emit_event: - assert self.opts._event_listeners is not None - duration = time.monotonic() - checkout_started_time - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_FAILED, - serverHost=self.address[0], - serverPort=self.address[1], - reason="An error occurred while trying to establish a new connection", - error=ConnectionCheckOutFailedReason.CONN_ERROR, - durationMS=duration, - ) - - details = _get_timeout_details(self.opts) - _raise_connection_failure( - self.address, AutoReconnect("connection pool paused"), timeout_details=details - ) - - def _get_conn( - self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None - ) -> Connection: - """Get or create a Connection. Can raise ConnectionFailure.""" - # We use the pid here to avoid issues with fork / multiprocessing. - # See test.test_client:TestClient.test_fork for an example of - # what could go wrong otherwise - if self.pid != os.getpid(): - self.reset_without_pause() - - if self.closed: - if self.enabled_for_cmap: - assert self.opts._event_listeners is not None - duration = time.monotonic() - checkout_started_time - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_FAILED, - serverHost=self.address[0], - serverPort=self.address[1], - reason="Connection pool was closed", - error=ConnectionCheckOutFailedReason.POOL_CLOSED, - durationMS=duration, - ) - raise _PoolClosedError( - "Attempted to check out a connection from closed connection pool" - ) - - with self.lock: - self.operation_count += 1 - - # Get a free socket or create one. - if _csot.get_timeout(): - deadline = _csot.get_deadline() - elif self.opts.wait_queue_timeout: - deadline = time.monotonic() + self.opts.wait_queue_timeout - else: - deadline = None - - with self.size_cond: - self._raise_if_not_ready(checkout_started_time, emit_event=True) - while not (self.requests < self.max_pool_size): - if not _cond_wait(self.size_cond, deadline): - # Timed out, notify the next thread to ensure a - # timeout doesn't consume the condition. - if self.requests < self.max_pool_size: - self.size_cond.notify() - self._raise_wait_queue_timeout(checkout_started_time) - self._raise_if_not_ready(checkout_started_time, emit_event=True) - self.requests += 1 - - # We've now acquired the semaphore and must release it on error. - conn = None - incremented = False - emitted_event = False - try: - with self.lock: - self.active_sockets += 1 - incremented = True - while conn is None: - # CMAP: we MUST wait for either maxConnecting OR for a socket - # to be checked back into the pool. - with self._max_connecting_cond: - self._raise_if_not_ready(checkout_started_time, emit_event=False) - while not (self.conns or self._pending < self._max_connecting): - if not _cond_wait(self._max_connecting_cond, deadline): - # Timed out, notify the next thread to ensure a - # timeout doesn't consume the condition. - if self.conns or self._pending < self._max_connecting: - self._max_connecting_cond.notify() - emitted_event = True - self._raise_wait_queue_timeout(checkout_started_time) - self._raise_if_not_ready(checkout_started_time, emit_event=False) - - try: - conn = self.conns.popleft() - except IndexError: - self._pending += 1 - if conn: # We got a socket from the pool - if self._perished(conn): - conn = None - continue - else: # We need to create a new connection - try: - conn = self.connect(handler=handler) - finally: - with self._max_connecting_cond: - self._pending -= 1 - self._max_connecting_cond.notify() - except BaseException: - if conn: - # We checked out a socket but authentication failed. - conn.close_conn(ConnectionClosedReason.ERROR) - with self.size_cond: - self.requests -= 1 - if incremented: - self.active_sockets -= 1 - self.size_cond.notify() - - if self.enabled_for_cmap and not emitted_event: - assert self.opts._event_listeners is not None - duration = time.monotonic() - checkout_started_time - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_FAILED, - serverHost=self.address[0], - serverPort=self.address[1], - reason="An error occurred while trying to establish a new connection", - error=ConnectionCheckOutFailedReason.CONN_ERROR, - durationMS=duration, - ) - raise - - conn.active = True - return conn - - def checkin(self, conn: Connection) -> None: - """Return the connection to the pool, or if it's closed discard it. - - :param conn: The connection to check into the pool. - """ - txn = conn.pinned_txn - cursor = conn.pinned_cursor - conn.active = False - conn.pinned_txn = False - conn.pinned_cursor = False - self.__pinned_sockets.discard(conn) - listeners = self.opts._event_listeners - with self.lock: - self.active_contexts.discard(conn.cancel_context) - if self.enabled_for_cmap: - assert listeners is not None - listeners.publish_connection_checked_in(self.address, conn.id) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKEDIN, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn.id, - ) - if self.pid != os.getpid(): - self.reset_without_pause() - else: - if self.closed: - conn.close_conn(ConnectionClosedReason.POOL_CLOSED) - elif conn.closed: - # CMAP requires the closed event be emitted after the check in. - if self.enabled_for_cmap: - assert listeners is not None - listeners.publish_connection_closed( - self.address, conn.id, ConnectionClosedReason.ERROR - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn.id, - reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), - error=ConnectionClosedReason.ERROR, - ) - else: - with self.lock: - # Hold the lock to ensure this section does not race with - # Pool.reset(). - if self.stale_generation(conn.generation, conn.service_id): - conn.close_conn(ConnectionClosedReason.STALE) - else: - conn.update_last_checkin_time() - conn.update_is_writable(bool(self.is_writable)) - self.conns.appendleft(conn) - # Notify any threads waiting to create a connection. - self._max_connecting_cond.notify() - - with self.size_cond: - if txn: - self.ntxns -= 1 - elif cursor: - self.ncursors -= 1 - self.requests -= 1 - self.active_sockets -= 1 - self.operation_count -= 1 - self.size_cond.notify() - - def _perished(self, conn: Connection) -> bool: - """Return True and close the connection if it is "perished". - - This side-effecty function checks if this socket has been idle for - for longer than the max idle time, or if the socket has been closed by - some external network error, or if the socket's generation is outdated. - - Checking sockets lets us avoid seeing *some* - :class:`~pymongo.errors.AutoReconnect` exceptions on server - hiccups, etc. We only check if the socket was closed by an external - error if it has been > 1 second since the socket was checked into the - pool, to keep performance reasonable - we can't avoid AutoReconnects - completely anyway. - """ - idle_time_seconds = conn.idle_time_seconds() - # If socket is idle, open a new one. - if ( - self.opts.max_idle_time_seconds is not None - and idle_time_seconds > self.opts.max_idle_time_seconds - ): - conn.close_conn(ConnectionClosedReason.IDLE) - return True - - if self._check_interval_seconds is not None and ( - self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds - ): - if conn.conn_closed(): - conn.close_conn(ConnectionClosedReason.ERROR) - return True - - if self.stale_generation(conn.generation, conn.service_id): - conn.close_conn(ConnectionClosedReason.STALE) - return True - - return False - - def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: - listeners = self.opts._event_listeners - if self.enabled_for_cmap: - assert listeners is not None - duration = time.monotonic() - checkout_started_time - listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_FAILED, - serverHost=self.address[0], - serverPort=self.address[1], - reason="Wait queue timeout elapsed without a connection becoming available", - error=ConnectionCheckOutFailedReason.TIMEOUT, - durationMS=duration, - ) - timeout = _csot.get_timeout() or self.opts.wait_queue_timeout - if self.opts.load_balanced: - other_ops = self.active_sockets - self.ncursors - self.ntxns - raise WaitQueueTimeoutError( - "Timeout waiting for connection from the connection pool. " - "maxPoolSize: {}, connections in use by cursors: {}, " - "connections in use by transactions: {}, connections in use " - "by other operations: {}, timeout: {}".format( - self.opts.max_pool_size, - self.ncursors, - self.ntxns, - other_ops, - timeout, - ) - ) - raise WaitQueueTimeoutError( - "Timed out while checking out a connection from connection pool. " - f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" - ) - - def __del__(self) -> None: - # Avoid ResourceWarnings in Python 3 - # Close all sockets without calling reset() or close() because it is - # not safe to acquire a lock in __del__. - for conn in self.conns: - conn.close_conn(None) +__doc__ = original_doc diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index b08588daff..4afb3e17b5 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -17,6 +17,7 @@ """ from __future__ import annotations +import asyncio import socket as _socket import ssl as _stdlibssl import sys as _sys @@ -364,6 +365,58 @@ def set_default_verify_paths(self) -> None: # but not that same as CPython's. self._ctx.set_default_verify_paths() + async def a_wrap_socket( + self, + sock: _socket.socket, + server_side: bool = False, + do_handshake_on_connect: bool = True, + suppress_ragged_eofs: bool = True, + server_hostname: Optional[str] = None, + session: Optional[_SSL.Session] = None, + ) -> _sslConn: + """Wrap an existing Python socket connection and return a TLS socket + object. + """ + ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs) + loop = asyncio.get_running_loop() + if session: + ssl_conn.set_session(session) + if server_side is True: + ssl_conn.set_accept_state() + else: + # SNI + if server_hostname and not _is_ip_address(server_hostname): + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + ssl_conn.set_tlsext_host_name(server_hostname.encode("idna")) + if self.verify_mode != _stdlibssl.CERT_NONE: + # Request a stapled OCSP response. + await loop.run_in_executor(None, ssl_conn.request_ocsp) + ssl_conn.set_connect_state() + # If this wasn't true the caller of wrap_socket would call + # do_handshake() + if do_handshake_on_connect: + # XXX: If we do hostname checking in a callback we can get rid + # of this call to do_handshake() since the handshake + # will happen automatically later. + await loop.run_in_executor(None, ssl_conn.do_handshake) + # XXX: Do this in a callback registered with + # SSLContext.set_info_callback? See Twisted for an example. + if self.check_hostname and server_hostname is not None: + from service_identity import pyopenssl + + try: + if _is_ip_address(server_hostname): + pyopenssl.verify_ip_address(ssl_conn, server_hostname) + else: + pyopenssl.verify_hostname(ssl_conn, server_hostname) + except ( # type:ignore[misc] + service_identity.SICertificateError, + service_identity.SIVerificationError, + ) as exc: + raise _CertificateError(str(exc)) from None + return ssl_conn + def wrap_socket( self, sock: _socket.socket, diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 7752750c46..de15cbfcaf 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -1,6 +1,6 @@ -# Copyright 2012-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License", +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # @@ -12,611 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for choosing which member of a replica set to read from.""" - +"""Re-import of synchronous ReadPreferences API for compatibility.""" from __future__ import annotations -from collections import abc -from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence - -from pymongo import max_staleness_selectors -from pymongo.errors import ConfigurationError -from pymongo.server_selectors import ( - member_with_tags_server_selector, - secondary_with_tags_server_selector, -) - -if TYPE_CHECKING: - from pymongo.server_selectors import Selection - from pymongo.topology_description import TopologyDescription - -_PRIMARY = 0 -_PRIMARY_PREFERRED = 1 -_SECONDARY = 2 -_SECONDARY_PREFERRED = 3 -_NEAREST = 4 - - -_MONGOS_MODES = ( - "primary", - "primaryPreferred", - "secondary", - "secondaryPreferred", - "nearest", -) - -_Hedge = Mapping[str, Any] -_TagSets = Sequence[Mapping[str, Any]] - - -def _validate_tag_sets(tag_sets: Optional[_TagSets]) -> Optional[_TagSets]: - """Validate tag sets for a MongoClient.""" - if tag_sets is None: - return tag_sets - - if not isinstance(tag_sets, (list, tuple)): - raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") - if len(tag_sets) == 0: - raise ValueError( - f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" - ) - - for tags in tag_sets: - if not isinstance(tags, abc.Mapping): - raise TypeError( - f"Tag set {tags!r} invalid, must be an instance of dict, " - "bson.son.SON or other type that inherits from " - "collection.Mapping" - ) - - return list(tag_sets) - - -def _invalid_max_staleness_msg(max_staleness: Any) -> str: - return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness - - -# Some duplication with common.py to avoid import cycle. -def _validate_max_staleness(max_staleness: Any) -> int: - """Validate max_staleness.""" - if max_staleness == -1: - return -1 - - if not isinstance(max_staleness, int): - raise TypeError(_invalid_max_staleness_msg(max_staleness)) - - if max_staleness <= 0: - raise ValueError(_invalid_max_staleness_msg(max_staleness)) - - return max_staleness - - -def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: - """Validate hedge.""" - if hedge is None: - return None - - if not isinstance(hedge, dict): - raise TypeError(f"hedge must be a dictionary, not {hedge!r}") - - return hedge - - -class _ServerMode: - """Base class for all read preferences.""" - - __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") - - def __init__( - self, - mode: int, - tag_sets: Optional[_TagSets] = None, - max_staleness: int = -1, - hedge: Optional[_Hedge] = None, - ) -> None: - self.__mongos_mode = _MONGOS_MODES[mode] - self.__mode = mode - self.__tag_sets = _validate_tag_sets(tag_sets) - self.__max_staleness = _validate_max_staleness(max_staleness) - self.__hedge = _validate_hedge(hedge) - - @property - def name(self) -> str: - """The name of this read preference.""" - return self.__class__.__name__ - - @property - def mongos_mode(self) -> str: - """The mongos mode of this read preference.""" - return self.__mongos_mode - - @property - def document(self) -> dict[str, Any]: - """Read preference as a document.""" - doc: dict[str, Any] = {"mode": self.__mongos_mode} - if self.__tag_sets not in (None, [{}]): - doc["tags"] = self.__tag_sets - if self.__max_staleness != -1: - doc["maxStalenessSeconds"] = self.__max_staleness - if self.__hedge not in (None, {}): - doc["hedge"] = self.__hedge - return doc - - @property - def mode(self) -> int: - """The mode of this read preference instance.""" - return self.__mode - - @property - def tag_sets(self) -> _TagSets: - """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to - read only from members whose ``dc`` tag has the value ``"ny"``. - To specify a priority-order for tag sets, provide a list of - tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag - set, ``{}``, means "read from any member that matches the mode, - ignoring tags." MongoClient tries each set of tags in turn - until it finds a set of tags with at least one matching member. - For example, to only send a query to an analytic node:: - - Nearest(tag_sets=[{"node":"analytics"}]) - - Or using :class:`SecondaryPreferred`:: - - SecondaryPreferred(tag_sets=[{"node":"analytics"}]) - - .. seealso:: `Data-Center Awareness - `_ - """ - return list(self.__tag_sets) if self.__tag_sets else [{}] - - @property - def max_staleness(self) -> int: - """The maximum estimated length of time (in seconds) a replica set - secondary can fall behind the primary in replication before it will - no longer be selected for operations, or -1 for no maximum. - """ - return self.__max_staleness - - @property - def hedge(self) -> Optional[_Hedge]: - """The read preference ``hedge`` parameter. - - A dictionary that configures how the server will perform hedged reads. - It consists of the following keys: - - - ``enabled``: Enables or disables hedged reads in sharded clusters. - - Hedged reads are automatically enabled in MongoDB 4.4+ when using a - ``nearest`` read preference. To explicitly enable hedged reads, set - the ``enabled`` key to ``true``:: - - >>> Nearest(hedge={'enabled': True}) - - To explicitly disable hedged reads, set the ``enabled`` key to - ``False``:: - - >>> Nearest(hedge={'enabled': False}) - - .. versionadded:: 3.11 - """ - return self.__hedge - - @property - def min_wire_version(self) -> int: - """The wire protocol version the server must support. - - Some read preferences impose version requirements on all servers (e.g. - maxStalenessSeconds requires MongoDB 3.4 / maxWireVersion 5). - - All servers' maxWireVersion must be at least this read preference's - `min_wire_version`, or the driver raises - :exc:`~pymongo.errors.ConfigurationError`. - """ - return 0 if self.__max_staleness == -1 else 5 - - def __repr__(self) -> str: - return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( - self.name, - self.__tag_sets, - self.__max_staleness, - self.__hedge, - ) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, _ServerMode): - return ( - self.mode == other.mode - and self.tag_sets == other.tag_sets - and self.max_staleness == other.max_staleness - and self.hedge == other.hedge - ) - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __getstate__(self) -> dict[str, Any]: - """Return value of object for pickling. - - Needed explicitly because __slots__() defined. - """ - return { - "mode": self.__mode, - "tag_sets": self.__tag_sets, - "max_staleness": self.__max_staleness, - "hedge": self.__hedge, - } - - def __setstate__(self, value: Mapping[str, Any]) -> None: - """Restore from pickling.""" - self.__mode = value["mode"] - self.__mongos_mode = _MONGOS_MODES[self.__mode] - self.__tag_sets = _validate_tag_sets(value["tag_sets"]) - self.__max_staleness = _validate_max_staleness(value["max_staleness"]) - self.__hedge = _validate_hedge(value["hedge"]) - - def __call__(self, selection: Selection) -> Selection: - return selection - - -class Primary(_ServerMode): - """Primary read preference. - - * When directly connected to one mongod queries are allowed if the server - is standalone or a replica set primary. - * When connected to a mongos queries are sent to the primary of a shard. - * When connected to a replica set queries are sent to the primary of - the replica set. - """ - - __slots__ = () - - def __init__(self) -> None: - super().__init__(_PRIMARY) - - def __call__(self, selection: Selection) -> Selection: - """Apply this read preference to a Selection.""" - return selection.primary_selection - - def __repr__(self) -> str: - return "Primary()" - - def __eq__(self, other: Any) -> bool: - if isinstance(other, _ServerMode): - return other.mode == _PRIMARY - return NotImplemented - - -class PrimaryPreferred(_ServerMode): - """PrimaryPreferred read preference. - - * When directly connected to one mongod queries are allowed to standalone - servers, to a replica set primary, or to replica set secondaries. - * When connected to a mongos queries are sent to the primary of a shard if - available, otherwise a shard secondary. - * When connected to a replica set queries are sent to the primary if - available, otherwise a secondary. - - .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first - created reads will be routed to an available secondary until the - primary of the replica set is discovered. - - :param tag_sets: The :attr:`~tag_sets` to use if the primary is not - available. - :param max_staleness: (integer, in seconds) The maximum estimated - length of time a replica set secondary can fall behind the primary in - replication before it will no longer be selected for operations. - Default -1, meaning no maximum. If it is set, it must be at least - 90 seconds. - :param hedge: The :attr:`~hedge` to use if the primary is not available. - - .. versionchanged:: 3.11 - Added ``hedge`` parameter. - """ - - __slots__ = () - - def __init__( - self, - tag_sets: Optional[_TagSets] = None, - max_staleness: int = -1, - hedge: Optional[_Hedge] = None, - ) -> None: - super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) - - def __call__(self, selection: Selection) -> Selection: - """Apply this read preference to Selection.""" - if selection.primary: - return selection.primary_selection - else: - return secondary_with_tags_server_selector( - self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) - ) - - -class Secondary(_ServerMode): - """Secondary read preference. - - * When directly connected to one mongod queries are allowed to standalone - servers, to a replica set primary, or to replica set secondaries. - * When connected to a mongos queries are distributed among shard - secondaries. An error is raised if no secondaries are available. - * When connected to a replica set queries are distributed among - secondaries. An error is raised if no secondaries are available. - - :param tag_sets: The :attr:`~tag_sets` for this read preference. - :param max_staleness: (integer, in seconds) The maximum estimated - length of time a replica set secondary can fall behind the primary in - replication before it will no longer be selected for operations. - Default -1, meaning no maximum. If it is set, it must be at least - 90 seconds. - :param hedge: The :attr:`~hedge` for this read preference. - - .. versionchanged:: 3.11 - Added ``hedge`` parameter. - """ - - __slots__ = () - - def __init__( - self, - tag_sets: Optional[_TagSets] = None, - max_staleness: int = -1, - hedge: Optional[_Hedge] = None, - ) -> None: - super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) - - def __call__(self, selection: Selection) -> Selection: - """Apply this read preference to Selection.""" - return secondary_with_tags_server_selector( - self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) - ) - - -class SecondaryPreferred(_ServerMode): - """SecondaryPreferred read preference. - - * When directly connected to one mongod queries are allowed to standalone - servers, to a replica set primary, or to replica set secondaries. - * When connected to a mongos queries are distributed among shard - secondaries, or the shard primary if no secondary is available. - * When connected to a replica set queries are distributed among - secondaries, or the primary if no secondary is available. - - .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first - created reads will be routed to the primary of the replica set until - an available secondary is discovered. - - :param tag_sets: The :attr:`~tag_sets` for this read preference. - :param max_staleness: (integer, in seconds) The maximum estimated - length of time a replica set secondary can fall behind the primary in - replication before it will no longer be selected for operations. - Default -1, meaning no maximum. If it is set, it must be at least - 90 seconds. - :param hedge: The :attr:`~hedge` for this read preference. - - .. versionchanged:: 3.11 - Added ``hedge`` parameter. - """ - - __slots__ = () - - def __init__( - self, - tag_sets: Optional[_TagSets] = None, - max_staleness: int = -1, - hedge: Optional[_Hedge] = None, - ) -> None: - super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) - - def __call__(self, selection: Selection) -> Selection: - """Apply this read preference to Selection.""" - secondaries = secondary_with_tags_server_selector( - self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) - ) - - if secondaries: - return secondaries - else: - return selection.primary_selection - - -class Nearest(_ServerMode): - """Nearest read preference. - - * When directly connected to one mongod queries are allowed to standalone - servers, to a replica set primary, or to replica set secondaries. - * When connected to a mongos queries are distributed among all members of - a shard. - * When connected to a replica set queries are distributed among all - members. - - :param tag_sets: The :attr:`~tag_sets` for this read preference. - :param max_staleness: (integer, in seconds) The maximum estimated - length of time a replica set secondary can fall behind the primary in - replication before it will no longer be selected for operations. - Default -1, meaning no maximum. If it is set, it must be at least - 90 seconds. - :param hedge: The :attr:`~hedge` for this read preference. - - .. versionchanged:: 3.11 - Added ``hedge`` parameter. - """ - - __slots__ = () - - def __init__( - self, - tag_sets: Optional[_TagSets] = None, - max_staleness: int = -1, - hedge: Optional[_Hedge] = None, - ) -> None: - super().__init__(_NEAREST, tag_sets, max_staleness, hedge) - - def __call__(self, selection: Selection) -> Selection: - """Apply this read preference to Selection.""" - return member_with_tags_server_selector( - self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) - ) - - -class _AggWritePref: - """Agg $out/$merge write preference. - - * If there are readable servers and there is any pre-5.0 server, use - primary read preference. - * Otherwise use `pref` read preference. - - :param pref: The read preference to use on MongoDB 5.0+. - """ - - __slots__ = ("pref", "effective_pref") - - def __init__(self, pref: _ServerMode): - self.pref = pref - self.effective_pref: _ServerMode = ReadPreference.PRIMARY - - def selection_hook(self, topology_description: TopologyDescription) -> None: - common_wv = topology_description.common_wire_version - if ( - topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) - and common_wv - and common_wv < 13 - ): - self.effective_pref = ReadPreference.PRIMARY - else: - self.effective_pref = self.pref - - def __call__(self, selection: Selection) -> Selection: - """Apply this read preference to a Selection.""" - return self.effective_pref(selection) - - def __repr__(self) -> str: - return f"_AggWritePref(pref={self.pref!r})" - - # Proxy other calls to the effective_pref so that _AggWritePref can be - # used in place of an actual read preference. - def __getattr__(self, name: str) -> Any: - return getattr(self.effective_pref, name) - - -_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) - - -def make_read_preference( - mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 -) -> _ServerMode: - if mode == _PRIMARY: - if tag_sets not in (None, [{}]): - raise ConfigurationError("Read preference primary cannot be combined with tags") - if max_staleness != -1: - raise ConfigurationError( - "Read preference primary cannot be combined with maxStalenessSeconds" - ) - return Primary() - return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore - - -_MODES = ( - "PRIMARY", - "PRIMARY_PREFERRED", - "SECONDARY", - "SECONDARY_PREFERRED", - "NEAREST", -) - - -class ReadPreference: - """An enum that defines some commonly used read preference modes. - - Apps can also create a custom read preference, for example:: - - Nearest(tag_sets=[{"node":"analytics"}]) - - See :doc:`/examples/high_availability` for code examples. - - A read preference is used in three cases: - - :class:`~pymongo.mongo_client.MongoClient` connected to a single mongod: - - - ``PRIMARY``: Queries are allowed if the server is standalone or a replica - set primary. - - All other modes allow queries to standalone servers, to a replica set - primary, or to replica set secondaries. - - :class:`~pymongo.mongo_client.MongoClient` initialized with the - ``replicaSet`` option: - - - ``PRIMARY``: Read from the primary. This is the default, and provides the - strongest consistency. If no primary is available, raise - :class:`~pymongo.errors.AutoReconnect`. - - - ``PRIMARY_PREFERRED``: Read from the primary if available, or if there is - none, read from a secondary. - - - ``SECONDARY``: Read from a secondary. If no secondary is available, - raise :class:`~pymongo.errors.AutoReconnect`. - - - ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise - from the primary. - - - ``NEAREST``: Read from any member. - - :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a - sharded cluster of replica sets: - - - ``PRIMARY``: Read from the primary of the shard, or raise - :class:`~pymongo.errors.OperationFailure` if there is none. - This is the default. - - - ``PRIMARY_PREFERRED``: Read from the primary of the shard, or if there is - none, read from a secondary of the shard. - - - ``SECONDARY``: Read from a secondary of the shard, or raise - :class:`~pymongo.errors.OperationFailure` if there is none. - - - ``SECONDARY_PREFERRED``: Read from a secondary of the shard if available, - otherwise from the shard primary. - - - ``NEAREST``: Read from any shard member. - """ - - PRIMARY = Primary() - PRIMARY_PREFERRED = PrimaryPreferred() - SECONDARY = Secondary() - SECONDARY_PREFERRED = SecondaryPreferred() - NEAREST = Nearest() - - -def read_pref_mode_from_name(name: str) -> int: - """Get the read preference mode from mongos/uri name.""" - return _MONGOS_MODES.index(name) - - -class MovingAverage: - """Tracks an exponentially-weighted moving average.""" - - average: Optional[float] - - def __init__(self) -> None: - self.average = None - - def add_sample(self, sample: float) -> None: - if sample < 0: - # Likely system time change while waiting for hello response - # and not using time.monotonic. Ignore it, the next one will - # probably be valid. - return - if self.average is None: - self.average = sample - else: - # The Server Selection Spec requires an exponentially weighted - # average with alpha = 0.2. - self.average = 0.8 * self.average + 0.2 * sample - - def get(self) -> Optional[float]: - """Get the calculated average, or None if no samples yet.""" - return self.average +from pymongo.synchronous.read_preferences import * # noqa: F403 +from pymongo.synchronous.read_preferences import __doc__ as original_doc - def reset(self) -> None: - self.average = None +__doc__ = original_doc diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 6393fce0a1..4ee6b340d9 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -1,4 +1,4 @@ -# Copyright 2014-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,288 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Represent one server the driver is connected to.""" +"""Re-import of synchronous ServerDescription API for compatibility.""" from __future__ import annotations -import time -import warnings -from typing import Any, Mapping, Optional +from pymongo.synchronous.server_description import * # noqa: F403 +from pymongo.synchronous.server_description import __doc__ as original_doc -from bson import EPOCH_NAIVE -from bson.objectid import ObjectId -from pymongo.hello import Hello -from pymongo.server_type import SERVER_TYPE -from pymongo.typings import ClusterTime, _Address - - -class ServerDescription: - """Immutable representation of one server. - - :param address: A (host, port) pair - :param hello: Optional Hello instance - :param round_trip_time: Optional float - :param error: Optional, the last error attempting to connect to the server - :param round_trip_time: Optional float, the min latency from the most recent samples - """ - - __slots__ = ( - "_address", - "_server_type", - "_all_hosts", - "_tags", - "_replica_set_name", - "_primary", - "_max_bson_size", - "_max_message_size", - "_max_write_batch_size", - "_min_wire_version", - "_max_wire_version", - "_round_trip_time", - "_min_round_trip_time", - "_me", - "_is_writable", - "_is_readable", - "_ls_timeout_minutes", - "_error", - "_set_version", - "_election_id", - "_cluster_time", - "_last_write_date", - "_last_update_time", - "_topology_version", - ) - - def __init__( - self, - address: _Address, - hello: Optional[Hello] = None, - round_trip_time: Optional[float] = None, - error: Optional[Exception] = None, - min_round_trip_time: float = 0.0, - ) -> None: - self._address = address - if not hello: - hello = Hello({}) - - self._server_type = hello.server_type - self._all_hosts = hello.all_hosts - self._tags = hello.tags - self._replica_set_name = hello.replica_set_name - self._primary = hello.primary - self._max_bson_size = hello.max_bson_size - self._max_message_size = hello.max_message_size - self._max_write_batch_size = hello.max_write_batch_size - self._min_wire_version = hello.min_wire_version - self._max_wire_version = hello.max_wire_version - self._set_version = hello.set_version - self._election_id = hello.election_id - self._cluster_time = hello.cluster_time - self._is_writable = hello.is_writable - self._is_readable = hello.is_readable - self._ls_timeout_minutes = hello.logical_session_timeout_minutes - self._round_trip_time = round_trip_time - self._min_round_trip_time = min_round_trip_time - self._me = hello.me - self._last_update_time = time.monotonic() - self._error = error - self._topology_version = hello.topology_version - if error: - details = getattr(error, "details", None) - if isinstance(details, dict): - self._topology_version = details.get("topologyVersion") - - self._last_write_date: Optional[float] - if hello.last_write_date: - # Convert from datetime to seconds. - delta = hello.last_write_date - EPOCH_NAIVE - self._last_write_date = delta.total_seconds() - else: - self._last_write_date = None - - @property - def address(self) -> _Address: - """The address (host, port) of this server.""" - return self._address - - @property - def server_type(self) -> int: - """The type of this server.""" - return self._server_type - - @property - def server_type_name(self) -> str: - """The server type as a human readable string. - - .. versionadded:: 3.4 - """ - return SERVER_TYPE._fields[self._server_type] - - @property - def all_hosts(self) -> set[tuple[str, int]]: - """List of hosts, passives, and arbiters known to this server.""" - return self._all_hosts - - @property - def tags(self) -> Mapping[str, Any]: - return self._tags - - @property - def replica_set_name(self) -> Optional[str]: - """Replica set name or None.""" - return self._replica_set_name - - @property - def primary(self) -> Optional[tuple[str, int]]: - """This server's opinion about who the primary is, or None.""" - return self._primary - - @property - def max_bson_size(self) -> int: - return self._max_bson_size - - @property - def max_message_size(self) -> int: - return self._max_message_size - - @property - def max_write_batch_size(self) -> int: - return self._max_write_batch_size - - @property - def min_wire_version(self) -> int: - return self._min_wire_version - - @property - def max_wire_version(self) -> int: - return self._max_wire_version - - @property - def set_version(self) -> Optional[int]: - return self._set_version - - @property - def election_id(self) -> Optional[ObjectId]: - return self._election_id - - @property - def cluster_time(self) -> Optional[ClusterTime]: - return self._cluster_time - - @property - def election_tuple(self) -> tuple[Optional[int], Optional[ObjectId]]: - warnings.warn( - "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", - DeprecationWarning, - stacklevel=2, - ) - return self._set_version, self._election_id - - @property - def me(self) -> Optional[tuple[str, int]]: - return self._me - - @property - def logical_session_timeout_minutes(self) -> Optional[int]: - return self._ls_timeout_minutes - - @property - def last_write_date(self) -> Optional[float]: - return self._last_write_date - - @property - def last_update_time(self) -> float: - return self._last_update_time - - @property - def round_trip_time(self) -> Optional[float]: - """The current average latency or None.""" - # This override is for unittesting only! - if self._address in self._host_to_round_trip_time: - return self._host_to_round_trip_time[self._address] - - return self._round_trip_time - - @property - def min_round_trip_time(self) -> float: - """The min latency from the most recent samples.""" - return self._min_round_trip_time - - @property - def error(self) -> Optional[Exception]: - """The last error attempting to connect to the server, or None.""" - return self._error - - @property - def is_writable(self) -> bool: - return self._is_writable - - @property - def is_readable(self) -> bool: - return self._is_readable - - @property - def mongos(self) -> bool: - return self._server_type == SERVER_TYPE.Mongos - - @property - def is_server_type_known(self) -> bool: - return self.server_type != SERVER_TYPE.Unknown - - @property - def retryable_writes_supported(self) -> bool: - """Checks if this server supports retryable writes.""" - return ( - self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) - ) or self._server_type == SERVER_TYPE.LoadBalancer - - @property - def retryable_reads_supported(self) -> bool: - """Checks if this server supports retryable writes.""" - return self._max_wire_version >= 6 - - @property - def topology_version(self) -> Optional[Mapping[str, Any]]: - return self._topology_version - - def to_unknown(self, error: Optional[Exception] = None) -> ServerDescription: - unknown = ServerDescription(self.address, error=error) - unknown._topology_version = self.topology_version - return unknown - - def __eq__(self, other: Any) -> bool: - if isinstance(other, ServerDescription): - return ( - (self._address == other.address) - and (self._server_type == other.server_type) - and (self._min_wire_version == other.min_wire_version) - and (self._max_wire_version == other.max_wire_version) - and (self._me == other.me) - and (self._all_hosts == other.all_hosts) - and (self._tags == other.tags) - and (self._replica_set_name == other.replica_set_name) - and (self._set_version == other.set_version) - and (self._election_id == other.election_id) - and (self._primary == other.primary) - and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) - and (self._error == other.error) - ) - - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __repr__(self) -> str: - errmsg = "" - if self.error: - errmsg = f", error={self.error!r}" - return "<{} {} server_type: {}, rtt: {}{}>".format( - self.__class__.__name__, - self.address, - self.server_type_name, - self.round_trip_time, - errmsg, - ) - - # For unittesting only. Use under no circumstances! - _host_to_round_trip_time: dict = {} +__doc__ = original_doc diff --git a/pymongo/synchronous/__init__.py b/pymongo/synchronous/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pymongo/aggregation.py b/pymongo/synchronous/aggregation.py similarity index 92% rename from pymongo/aggregation.py rename to pymongo/synchronous/aggregation.py index 574db10aca..a4b5a957cb 100644 --- a/pymongo/aggregation.py +++ b/pymongo/synchronous/aggregation.py @@ -18,20 +18,22 @@ from collections.abc import Callable, Mapping, MutableMapping from typing import TYPE_CHECKING, Any, Optional, Union -from pymongo import common -from pymongo.collation import validate_collation_or_none from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference, _AggWritePref +from pymongo.synchronous import common +from pymongo.synchronous.collation import validate_collation_or_none +from pymongo.synchronous.read_preferences import ReadPreference, _AggWritePref if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.command_cursor import CommandCursor - from pymongo.database import Database - from pymongo.pool import Connection - from pymongo.read_preferences import _ServerMode - from pymongo.server import Server - from pymongo.typings import _DocumentType, _Pipeline + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.command_cursor import CommandCursor + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.read_preferences import _ServerMode + from pymongo.synchronous.server import Server + from pymongo.synchronous.typings import _DocumentType, _Pipeline + +_IS_SYNC = True class _AggregationCommand: diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py new file mode 100644 index 0000000000..cb1b23d15b --- /dev/null +++ b/pymongo/synchronous/auth.py @@ -0,0 +1,658 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication helpers.""" +from __future__ import annotations + +import functools +import hashlib +import hmac +import os +import socket +import typing +from base64 import standard_b64decode, standard_b64encode +from collections import namedtuple +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Mapping, + MutableMapping, + Optional, + cast, +) +from urllib.parse import quote + +from bson.binary import Binary +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep +from pymongo.synchronous.auth_aws import _authenticate_aws +from pymongo.synchronous.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, + _OIDCAzureCallback, + _OIDCGCPCallback, + _OIDCProperties, + _OIDCTestCallback, +) + +if TYPE_CHECKING: + from pymongo.synchronous.hello import Hello + from pymongo.synchronous.pool import Connection + +HAVE_KERBEROS = True +_USE_PRINCIPAL = False +try: + import winkerberos as kerberos # type:ignore[import] + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): + _USE_PRINCIPAL = True +except ImportError: + try: + import kerberos # type:ignore[import] + except ImportError: + HAVE_KERBEROS = False + + +_IS_SYNC = True + +MECHANISMS = frozenset( + [ + "GSSAPI", + "MONGODB-CR", + "MONGODB-OIDC", + "MONGODB-X509", + "MONGODB-AWS", + "PLAIN", + "SCRAM-SHA-1", + "SCRAM-SHA-256", + "DEFAULT", + ] +) +"""The authentication mechanisms supported by PyMongo.""" + + +class _Cache: + __slots__ = ("data",) + + _hash_val = hash("_Cache") + + def __init__(self) -> None: + self.data = None + + def __eq__(self, other: object) -> bool: + # Two instances must always compare equal. + if isinstance(other, _Cache): + return True + return NotImplemented + + def __ne__(self, other: object) -> bool: + if isinstance(other, _Cache): + return False + return NotImplemented + + def __hash__(self) -> int: + return self._hash_val + + +MongoCredential = namedtuple( + "MongoCredential", + ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], +) +"""A hashable namedtuple of values used for authentication.""" + + +GSSAPIProperties = namedtuple( + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] +) +"""Mechanism properties for GSSAPI authentication.""" + + +_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) +"""Mechanism properties for MONGODB-AWS authentication.""" + + +def _build_credentials_tuple( + mech: str, + source: Optional[str], + user: str, + passwd: str, + extra: Mapping[str, Any], + database: Optional[str], +) -> MongoCredential: + """Build and return a mechanism specific credentials tuple.""" + if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: + raise ConfigurationError(f"{mech} requires a username.") + if mech == "GSSAPI": + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for GSSAPI") + properties = extra.get("authmechanismproperties", {}) + service_name = properties.get("SERVICE_NAME", "mongodb") + canonicalize = bool(properties.get("CANONICALIZE_HOST_NAME", False)) + service_realm = properties.get("SERVICE_REALM") + props = GSSAPIProperties( + service_name=service_name, + canonicalize_host_name=canonicalize, + service_realm=service_realm, + ) + # Source is always $external. + return MongoCredential(mech, "$external", user, passwd, props, None) + elif mech == "MONGODB-X509": + if passwd is not None: + raise ConfigurationError("Passwords are not supported by MONGODB-X509") + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for MONGODB-X509") + # Source is always $external, user can be None. + return MongoCredential(mech, "$external", user, None, None, None) + elif mech == "MONGODB-AWS": + if user is not None and passwd is None: + raise ConfigurationError("username without a password is not supported by MONGODB-AWS") + if source is not None and source != "$external": + raise ConfigurationError( + "authentication source must be $external or None for MONGODB-AWS" + ) + + properties = extra.get("authmechanismproperties", {}) + aws_session_token = properties.get("AWS_SESSION_TOKEN") + aws_props = _AWSProperties(aws_session_token=aws_session_token) + # user can be None for temporary link-local EC2 credentials. + return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "MONGODB-OIDC": + properties = extra.get("authmechanismproperties", {}) + callback = properties.get("OIDC_CALLBACK") + human_callback = properties.get("OIDC_HUMAN_CALLBACK") + environ = properties.get("ENVIRONMENT") + token_resource = properties.get("TOKEN_RESOURCE", "") + default_allowed = [ + "*.mongodb.net", + "*.mongodb-dev.net", + "*.mongodb-qa.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", + ] + allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) + msg = ( + "authentication with MONGODB-OIDC requires providing either a callback or a environment" + ) + if passwd is not None: + msg = "password is not supported by MONGODB-OIDC" + raise ConfigurationError(msg) + if callback or human_callback: + if environ is not None: + raise ConfigurationError(msg) + if callback and human_callback: + msg = "cannot set both OIDC_CALLBACK and OIDC_HUMAN_CALLBACK" + raise ConfigurationError(msg) + elif environ is not None: + if environ == "test": + if user is not None: + msg = "test environment for MONGODB-OIDC does not support username" + raise ConfigurationError(msg) + callback = _OIDCTestCallback() + elif environ == "azure": + passwd = None + if not token_resource: + raise ConfigurationError( + "Azure environment for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCAzureCallback(token_resource) + elif environ == "gcp": + passwd = None + if not token_resource: + raise ConfigurationError( + "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCGCPCallback(token_resource) + else: + raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") + else: + raise ConfigurationError(msg) + + oidc_props = _OIDCProperties( + callback=callback, + human_callback=human_callback, + environment=environ, + allowed_hosts=allowed_hosts, + token_resource=token_resource, + username=user, + ) + return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) + + elif mech == "PLAIN": + source_database = source or database or "$external" + return MongoCredential(mech, source_database, user, passwd, None, None) + else: + source_database = source or database or "admin" + if passwd is None: + raise ConfigurationError("A password is required.") + return MongoCredential(mech, source_database, user, passwd, None, _Cache()) + + +def _xor(fir: bytes, sec: bytes) -> bytes: + """XOR two byte strings together.""" + return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) + + +def _parse_scram_response(response: bytes) -> Dict[bytes, bytes]: + """Split a scram response into key, value pairs.""" + return dict( + typing.cast(typing.Tuple[bytes, bytes], item.split(b"=", 1)) + for item in response.split(b",") + ) + + +def _authenticate_scram_start( + credentials: MongoCredential, mechanism: str +) -> tuple[bytes, bytes, MutableMapping[str, Any]]: + username = credentials.username + user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") + nonce = standard_b64encode(os.urandom(32)) + first_bare = b"n=" + user + b",r=" + nonce + + cmd = { + "saslStart": 1, + "mechanism": mechanism, + "payload": Binary(b"n,," + first_bare), + "autoAuthorize": 1, + "options": {"skipEmptyExchange": True}, + } + return nonce, first_bare, cmd + + +def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanism: str) -> None: + """Authenticate using SCRAM.""" + username = credentials.username + if mechanism == "SCRAM-SHA-256": + digest = "sha256" + digestmod = hashlib.sha256 + data = saslprep(credentials.password).encode("utf-8") + else: + digest = "sha1" + digestmod = hashlib.sha1 + data = _password_digest(username, credentials.password).encode("utf-8") + source = credentials.source + cache = credentials.cache + + # Make local + _hmac = hmac.HMAC + + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + assert isinstance(ctx, _ScramContext) + assert ctx.scram_data is not None + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) + res = conn.command(source, cmd) + + assert res is not None + server_first = res["payload"] + parsed = _parse_scram_response(server_first) + iterations = int(parsed[b"i"]) + if iterations < 4096: + raise OperationFailure("Server returned an invalid iteration count.") + salt = parsed[b"s"] + rnonce = parsed[b"r"] + if not rnonce.startswith(nonce): + raise OperationFailure("Server returned an invalid nonce.") + + without_proof = b"c=biws,r=" + rnonce + if cache.data: + client_key, server_key, csalt, citerations = cache.data + else: + client_key, server_key, csalt, citerations = None, None, None, None + + # Salt and / or iterations could change for a number of different + # reasons. Either changing invalidates the cache. + if not client_key or salt != csalt or iterations != citerations: + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) + client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() + server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() + cache.data = (client_key, server_key, salt, iterations) + stored_key = digestmod(client_key).digest() + auth_msg = b",".join((first_bare, server_first, without_proof)) + client_sig = _hmac(stored_key, auth_msg, digestmod).digest() + client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) + client_final = b",".join((without_proof, client_proof)) + + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) + + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(client_final), + } + res = conn.command(source, cmd) + + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): + raise OperationFailure("Server returned an invalid signature.") + + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. + if not res["done"]: + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(b""), + } + res = conn.command(source, cmd) + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") + + +def _password_digest(username: str, password: str) -> str: + """Get a password digest to use for authentication.""" + if not isinstance(password, str): + raise TypeError("password must be an instance of str") + if len(password) == 0: + raise ValueError("password can't be empty") + if not isinstance(username, str): + raise TypeError("username must be an instance of str") + + md5hash = hashlib.md5() # noqa: S324 + data = f"{username}:mongo:{password}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _auth_key(nonce: str, username: str, password: str) -> str: + """Get an auth key to use for authentication.""" + digest = _password_digest(username, password) + md5hash = hashlib.md5() # noqa: S324 + data = f"{nonce}{username}{digest}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _canonicalize_hostname(hostname: str) -> str: + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( + hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME + )[0] + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + + return name[0].lower() + + +def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using GSSAPI.""" + if not HAVE_KERBEROS: + raise ConfigurationError( + 'The "kerberos" module must be installed to use GSSAPI authentication.' + ) + + try: + username = credentials.username + password = credentials.password + props = credentials.mechanism_properties + # Starting here and continuing through the while loop below - establish + # the security context. See RFC 4752, Section 3.1, first paragraph. + host = conn.address[0] + if props.canonicalize_host_name: + host = _canonicalize_hostname(host) + service = props.service_name + "@" + host + if props.service_realm is not None: + service = service + "@" + props.service_realm + + if password is not None: + if _USE_PRINCIPAL: + # Note that, though we use unquote_plus for unquoting URI + # options, we use quote here. Microsoft's UrlUnescape (used + # by WinKerberos) doesn't support +. + principal = ":".join((quote(username), quote(password))) + result, ctx = kerberos.authGSSClientInit( + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) + else: + if "@" in username: + user, domain = username.split("@", 1) + else: + user, domain = username, None + result, ctx = kerberos.authGSSClientInit( + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) + else: + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + + if result != kerberos.AUTH_GSS_COMPLETE: + raise OperationFailure("Kerberos context failed to initialize.") + + try: + # pykerberos uses a weird mix of exceptions and return values + # to indicate errors. + # 0 == continue, 1 == complete, -1 == error + # Only authGSSClientStep can return 0. + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos failure in step function.") + + # Start a SASL conversation with mongod/s + # Note: pykerberos deals with base64 encoded byte strings. + # Since mongo accepts base64 strings as the payload we don't + # have to use bson.binary.Binary. + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslStart": 1, + "mechanism": "GSSAPI", + "payload": payload, + "autoAuthorize": 1, + } + response = conn.command("$external", cmd) + + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) + if result == -1: + raise OperationFailure("Unknown kerberos failure in step function.") + + payload = kerberos.authGSSClientResponse(ctx) or "" + + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + response = conn.command("$external", cmd) + + if result == kerberos.AUTH_GSS_COMPLETE: + break + else: + raise OperationFailure("Kerberos authentication failed to complete.") + + # Once the security context is established actually authenticate. + # See RFC 4752, Section 3.1, last two paragraphs. + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") + + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") + + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + conn.command("$external", cmd) + + finally: + kerberos.authGSSClientClean(ctx) + + except kerberos.KrbError as exc: + raise OperationFailure(str(exc)) from None + + +def _authenticate_plain(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using SASL PLAIN (RFC 4616)""" + source = credentials.source + username = credentials.username + password = credentials.password + payload = (f"\x00{username}\x00{password}").encode() + cmd = { + "saslStart": 1, + "mechanism": "PLAIN", + "payload": Binary(payload), + "autoAuthorize": 1, + } + conn.command(source, cmd) + + +def _authenticate_x509(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-X509.""" + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials, conn.address).speculate_command() + conn.command("$external", cmd) + + +def _authenticate_mongo_cr(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-CR.""" + source = credentials.source + username = credentials.username + password = credentials.password + # Get a nonce + response = conn.command(source, {"getnonce": 1}) + nonce = response["nonce"] + key = _auth_key(nonce, username, password) + + # Actually authenticate + query = {"authenticate": 1, "user": username, "nonce": nonce, "key": key} + conn.command(source, query) + + +def _authenticate_default(credentials: MongoCredential, conn: Connection) -> None: + if conn.max_wire_version >= 7: + if conn.negotiated_mechs: + mechs = conn.negotiated_mechs + else: + source = credentials.source + cmd = conn.hello_cmd() + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = (conn.command(source, cmd, publish_events=False)).get("saslSupportedMechs", []) + if "SCRAM-SHA-256" in mechs: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-256") + else: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + else: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + + +_AUTH_MAP: Mapping[str, Callable[..., None]] = { + "GSSAPI": _authenticate_gssapi, + "MONGODB-CR": _authenticate_mongo_cr, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, +} + + +class _AuthContext: + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: + self.credentials = credentials + self.speculative_authenticate: Optional[Mapping[str, Any]] = None + self.address = address + + @staticmethod + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return cast(_AuthContext, spec_cls(creds, address)) + return None + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + raise NotImplementedError + + def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: + self.speculative_authenticate = hello.speculative_authenticate + + def speculate_succeeded(self) -> bool: + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: + super().__init__(credentials, address) + self.scram_data: Optional[tuple[bytes, bytes]] = None + self.mechanism = mechanism + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd["db"] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self) -> MutableMapping[str, Any]: + cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} + if self.credentials.username is not None: + cmd["user"] = self.credentials.username + return cmd + + +class _OIDCContext(_AuthContext): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.get_spec_auth_cmd() + if cmd is None: + return None + cmd["db"] = self.credentials.source + return cmd + + +_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), +} + + +def authenticate( + credentials: MongoCredential, conn: Connection, reauthenticate: bool = False +) -> None: + """Authenticate connection.""" + mechanism = credentials.mechanism + auth_func = _AUTH_MAP[mechanism] + if mechanism == "MONGODB-OIDC": + _authenticate_oidc(credentials, conn, reauthenticate) + else: + auth_func(credentials, conn) diff --git a/pymongo/auth_aws.py b/pymongo/synchronous/auth_aws.py similarity index 96% rename from pymongo/auth_aws.py rename to pymongo/synchronous/auth_aws.py index 042eee5a73..04ceb95b34 100644 --- a/pymongo/auth_aws.py +++ b/pymongo/synchronous/auth_aws.py @@ -23,8 +23,10 @@ if TYPE_CHECKING: from bson.typings import _ReadableBuffer - from pymongo.auth import MongoCredential - from pymongo.pool import Connection + from pymongo.synchronous.auth import MongoCredential + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True def _authenticate_aws(credentials: MongoCredential, conn: Connection) -> None: @@ -36,7 +38,6 @@ def _authenticate_aws(credentials: MongoCredential, conn: Connection) -> None: "MONGODB-AWS authentication requires pymongo-auth-aws: " "install with: python -m pip install 'pymongo[aws]'" ) from e - # Delayed import. from pymongo_auth_aws.auth import ( # type:ignore[import] set_cached_credentials, diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py new file mode 100644 index 0000000000..f59b4d54a1 --- /dev/null +++ b/pymongo/synchronous/auth_oidc.py @@ -0,0 +1,378 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + +import abc +import os +import threading +import time +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union +from urllib.parse import quote + +import bson +from bson.binary import Binary +from pymongo._azure_helpers import _get_azure_response +from pymongo._csot import remaining +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers_constants import _AUTHENTICATION_FAILURE_CODE + +if TYPE_CHECKING: + from pymongo.synchronous.auth import MongoCredential + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +@dataclass +class OIDCIdPInfo: + issuer: str + clientId: Optional[str] = field(default=None) + requestScopes: Optional[list[str]] = field(default=None) + + +@dataclass +class OIDCCallbackContext: + timeout_seconds: float + username: str + version: int + refresh_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + + +@dataclass +class OIDCCallbackResult: + access_token: str + expires_in_seconds: Optional[float] = field(default=None) + refresh_token: Optional[str] = field(default=None) + + +class OIDCCallback(abc.ABC): + """A base class for defining OIDC callbacks.""" + + @abc.abstractmethod + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + """Convert the given BSON value into our own type.""" + + +@dataclass +class _OIDCProperties: + callback: Optional[OIDCCallback] = field(default=None) + human_callback: Optional[OIDCCallback] = field(default=None) + environment: Optional[str] = field(default=None) + allowed_hosts: list[str] = field(default_factory=list) + token_resource: Optional[str] = field(default=None) + username: str = "" + + +"""Mechanism properties for MONGODB-OIDC authentication.""" + +TOKEN_BUFFER_MINUTES = 5 +HUMAN_CALLBACK_TIMEOUT_SECONDS = 5 * 60 +CALLBACK_VERSION = 1 +MACHINE_CALLBACK_TIMEOUT_SECONDS = 60 +TIME_BETWEEN_CALLS_SECONDS = 0.1 + + +def _get_authenticator( + credentials: MongoCredential, address: tuple[str, int] +) -> _OIDCAuthenticator: + if credentials.cache.data: + return credentials.cache.data + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + + # Validate that the address is allowed. + if not properties.environment: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache data. + credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) + return credentials.cache.data + + +class _OIDCTestCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("OIDC_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "test" provider requires "OIDC_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAWSCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("AWS_WEB_IDENTITY_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "aws" provider requires "AWS_WEB_IDENTITY_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAzureCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_azure_response(self.token_resource, context.username, context.timeout_seconds) + return OIDCCallbackResult( + access_token=resp["access_token"], expires_in_seconds=resp["expires_in"] + ) + + +class _OIDCGCPCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_gcp_response(self.token_resource, context.timeout_seconds) + return OIDCCallbackResult(access_token=resp["access_token"]) + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + refresh_token: Optional[str] = field(default=None) + access_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + token_gen_id: int = field(default=0) + lock: threading.Lock = field(default_factory=threading.Lock) + last_call_time: float = field(default=0) + + def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # Invalidate the token for the connection. + self._invalidate(conn) + # Call the appropriate auth logic for the callback type. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle an initial authenticate request.""" + # First handle speculative auth. + # If it succeeded, we are done. + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + if resp and resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return resp + + # If spec auth failed, call the appropriate auth logic for the callback type. + # We cannot assume that the token is invalid, because a proxy may have been + # involved that stripped the speculative auth information. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: + """Get the appropriate speculative auth command.""" + if not self.access_token: + return None + return self._get_start_command({"jwt": self.access_token}) + + def _authenticate_machine(self, conn: Connection) -> Mapping[str, Any]: + # If there is a cached access token, try to authenticate with it. If + # authentication fails with error code 18, invalidate the access token, + # fetch a new access token, and try to authenticate again. If authentication + # fails for any other reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_machine(conn) + raise + return self._sasl_start_jwt(conn) + + def _authenticate_human(self, conn: Connection) -> Optional[Mapping[str, Any]]: + # If we have a cached access token, try a JwtStepRequest. + # authentication fails with error code 18, invalidate the access token, + # and try to authenticate again. If authentication fails for any other + # reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_human(conn) + raise + + # If we have a cached refresh token, try a JwtStepRequest with that. + # If authentication fails with error code 18, invalidate the access and + # refresh tokens, and try to authenticate again. If authentication fails for + # any other reason, raise the error to the user. + if self.refresh_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + self.refresh_token = None + return self._authenticate_human(conn) + raise + + # Start a new Two-Step SASL conversation. + # Run a PrincipalStepRequest to get the IdpInfo. + cmd = self._get_start_command(None) + start_resp = self._run_command(conn, cmd) + # Attempt to authenticate with a JwtStepRequest. + return self._sasl_continue_jwt(conn, start_resp) + + def _get_access_token(self) -> Optional[str]: + properties = self.properties + cb: Union[None, OIDCCallback] + resp: OIDCCallbackResult + + is_human = properties.human_callback is not None + if is_human and self.idp_info is None: + return None + + if properties.callback: + cb = properties.callback + if properties.human_callback: + cb = properties.human_callback + + prev_token = self.access_token + if prev_token: + return prev_token + + if cb is None and not prev_token: + return None + + if not prev_token and cb is not None: + with self.lock: + # See if the token was changed while we were waiting for the + # lock. + new_token = self.access_token + if new_token != prev_token: + return new_token + + # Ensure that we are waiting a min time between callback invocations. + delta = time.time() - self.last_call_time + if delta < TIME_BETWEEN_CALLS_SECONDS: + time.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + self.last_call_time = time.time() + + if is_human: + timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS + assert self.idp_info is not None + else: + timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) + context = OIDCCallbackContext( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=self.refresh_token, + idp_info=self.idp_info, + username=self.properties.username, + ) + resp = cb.fetch(context) + if not isinstance(resp, OIDCCallbackResult): + raise ValueError("Callback result must be of type OIDCCallbackResult") + self.refresh_token = resp.refresh_token + self.access_token = resp.access_token + self.token_gen_id += 1 + + return self.access_token + + def _run_command(self, conn: Connection, cmd: MutableMapping[str, Any]) -> Mapping[str, Any]: + try: + return conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] + except OperationFailure as e: + if self._is_auth_error(e): + self._invalidate(conn) + raise + + def _is_auth_error(self, err: Exception) -> bool: + if not isinstance(err, OperationFailure): + return False + return err.code == _AUTHENTICATION_FAILURE_CODE + + def _invalidate(self, conn: Connection) -> None: + # Ignore the invalidation if a token gen id is given and is less than our + # current token gen id. + token_gen_id = conn.oidc_token_gen_id or 0 + if token_gen_id is not None and token_gen_id < self.token_gen_id: + return + self.access_token = None + + def _sasl_continue_jwt( + self, conn: Connection, start_resp: Mapping[str, Any] + ) -> Mapping[str, Any]: + self.access_token = None + self.refresh_token = None + start_payload: dict = bson.decode(start_resp["payload"]) + if "issuer" in start_payload: + self.idp_info = OIDCIdPInfo(**start_payload) + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_continue_command({"jwt": access_token}, start_resp) + return self._run_command(conn, cmd) + + def _sasl_start_jwt(self, conn: Connection) -> Mapping[str, Any]: + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_start_command({"jwt": access_token}) + return self._run_command(conn, cmd) + + def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: + if payload is None: + principal_name = self.username + if principal_name: + payload = {"n": principal_name} + else: + payload = {} + bin_payload = Binary(bson.encode(payload)) + return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} + + def _get_continue_command( + self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] + ) -> MutableMapping[str, Any]: + bin_payload = Binary(bson.encode(payload)) + return { + "saslContinue": 1, + "payload": bin_payload, + "conversationId": start_resp["conversationId"], + } + + +def _authenticate_oidc( + credentials: MongoCredential, conn: Connection, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, conn.address) + if reauthenticate: + return authenticator.reauthenticate(conn) + else: + return authenticator.authenticate(conn) diff --git a/pymongo/bulk.py b/pymongo/synchronous/bulk.py similarity index 96% rename from pymongo/bulk.py rename to pymongo/synchronous/bulk.py index e1c46105f7..781acdb4d8 100644 --- a/pymongo/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -34,21 +34,23 @@ from bson.objectid import ObjectId from bson.raw_bson import RawBSONDocument -from pymongo import _csot, common -from pymongo.client_session import ClientSession, _validate_session_write_concern -from pymongo.common import ( - validate_is_document_type, - validate_ok_for_replace, - validate_ok_for_update, -) +from pymongo import _csot from pymongo.errors import ( BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure, ) -from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc -from pymongo.message import ( +from pymongo.helpers_constants import _RETRYABLE_ERROR_CODES +from pymongo.synchronous import common +from pymongo.synchronous.client_session import ClientSession, _validate_session_write_concern +from pymongo.synchronous.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.synchronous.helpers import _get_wce_doc +from pymongo.synchronous.message import ( _DELETE, _INSERT, _UPDATE, @@ -56,13 +58,15 @@ _EncryptedBulkWriteContext, _randint, ) -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern if TYPE_CHECKING: - from pymongo.collection import Collection - from pymongo.pool import Connection - from pymongo.typings import _DocumentOut, _DocumentType, _Pipeline + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.typings import _DocumentOut, _DocumentType, _Pipeline + +_IS_SYNC = True _DELETE_ALL: int = 0 _DELETE_ONE: int = 1 @@ -449,7 +453,7 @@ def retryable_bulk( ) client = self.collection.database.client - client._retryable_write( + _ = client._retryable_write( self.is_retryable, retryable_bulk, session, diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py new file mode 100644 index 0000000000..1b22ed9be1 --- /dev/null +++ b/pymongo/synchronous/change_stream.py @@ -0,0 +1,497 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union + +from bson import CodecOptions, _bson_to_dict +from bson.raw_bson import RawBSONDocument +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) +from pymongo.synchronous import common +from pymongo.synchronous.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) +from pymongo.synchronous.collation import validate_collation_or_none +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.typings import _CollationIn, _DocumentType, _Pipeline + +_IS_SYNC = True + +# The change streams spec considers the following server errors from the +# getMore command non-resumable. All other getMore errors are resumable. +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + ] +) + + +if TYPE_CHECKING: + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.database import Database + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + + +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + +class ChangeStream(Generic[_DocumentType]): + """The internal abstract base class for change stream cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.collection.Collection.watch`, + :meth:`pymongo.database.Database.watch`, or + :meth:`pymongo.mongo_client.MongoClient.watch` instead. + + .. versionadded:: 3.6 + .. seealso:: The MongoDB documentation on `changeStreams `_. + """ + + def __init__( + self, + target: Union[ + MongoClient[_DocumentType], + Database[_DocumentType], + Collection[_DocumentType], + ], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional[ClientSession], + start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> None: + if pipeline is None: + pipeline = [] + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) + validate_collation_or_none(collation) + common.validate_non_negative_integer_or_none("batchSize", batch_size) + + self._decode_custom = False + self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options + if target.codec_options.type_registry._decoder_map: + self._decode_custom = True + # Keep the type registry so that we support encoding custom types + # in the pipeline. + self._target = target.with_options( # type: ignore + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) + else: + self._target = target + + self._pipeline = copy.deepcopy(pipeline) + self._full_document = full_document + self._full_document_before_change = full_document_before_change + self._uses_start_after = start_after is not None + self._uses_resume_after = resume_after is not None + self._resume_token = copy.deepcopy(start_after or resume_after) + self._max_await_time_ms = max_await_time_ms + self._batch_size = batch_size + self._collation = collation + self._start_at_operation_time = start_at_operation_time + self._session = session + self._comment = comment + self._closed = False + self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events + + def _initialize_cursor(self) -> None: + # Initialize cursor. + self._cursor = self._create_cursor() + + @property + def _aggregation_command_class(self) -> Type[_AggregationCommand]: + """The aggregation command class to be used.""" + raise NotImplementedError + + @property + def _client(self) -> MongoClient: + """The client against which the aggregation commands for + this ChangeStream will be run. + """ + raise NotImplementedError + + def _change_stream_options(self) -> dict[str, Any]: + """Return the options dict for the $changeStream pipeline stage.""" + options: dict[str, Any] = {} + if self._full_document is not None: + options["fullDocument"] = self._full_document + + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + + resume_token = self.resume_token + if resume_token is not None: + if self._uses_start_after: + options["startAfter"] = resume_token + else: + options["resumeAfter"] = resume_token + + elif self._start_at_operation_time is not None: + options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + + return options + + def _command_options(self) -> dict[str, Any]: + """Return the options dict for the aggregation command.""" + options = {} + if self._max_await_time_ms is not None: + options["maxAwaitTimeMS"] = self._max_await_time_ms + if self._batch_size is not None: + options["batchSize"] = self._batch_size + return options + + def _aggregation_pipeline(self) -> list[dict[str, Any]]: + """Return the full aggregation pipeline for this ChangeStream.""" + options = self._change_stream_options() + full_pipeline: list = [{"$changeStream": options}] + full_pipeline.extend(self._pipeline) + return full_pipeline + + def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. + + This is implemented as a callback because we need access to the wire + version in order to determine whether to cache this value. + """ + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and conn.max_wire_version >= 7 + ): + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + f"response : {result!r}" + ) + + def _run_aggregation_cmd( + self, session: Optional[ClientSession], explicit_session: bool + ) -> CommandCursor: + """Run the full aggregation pipeline for this ChangeStream and return + the corresponding CommandCursor. + """ + cmd = self._aggregation_command_class( + self._target, + CommandCursor, + self._aggregation_pipeline(), + self._command_options(), + explicit_session, + result_processor=self._process_result, + comment=self._comment, + ) + return self._client._retryable_read( + cmd.get_cursor, + self._target._read_preference_for(session), + session, + operation=_Op.AGGREGATE, + ) + + def _create_cursor(self) -> CommandCursor: + with self._client._tmp_session(self._session, close=False) as s: + return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) + + def _resume(self) -> None: + """Reestablish this change stream after a resumable error.""" + try: + self._cursor.close() + except PyMongoError: + pass + self._cursor = self._create_cursor() + + def close(self) -> None: + """Close this ChangeStream.""" + self._closed = True + self._cursor.close() + + def __iter__(self) -> ChangeStream[_DocumentType]: + return self + + @property + def resume_token(self) -> Optional[Mapping[str, Any]]: + """The cached resume token that will be used to resume after the most + recently returned change. + + .. versionadded:: 3.9 + """ + return copy.deepcopy(self._resume_token) + + @_csot.apply + def next(self) -> _DocumentType: + """Advance the cursor. + + This method blocks until the next change document is returned or an + unrecoverable error is raised. This method is used when iterating over + all changes in the cursor. For example:: + + try: + resume_token = None + pipeline = [{'$match': {'operationType': 'insert'}}] + async with db.collection.watch(pipeline) as stream: + async for insert_change in stream: + print(insert_change) + resume_token = stream.resume_token + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + if resume_token is None: + # There is no usable resume token because there was a + # failure during ChangeStream initialization. + logging.error('...') + else: + # Use the interrupted ChangeStream's resume token to create + # a new ChangeStream. The new stream will continue from the + # last seen insert change without missing any events. + async with db.collection.watch( + pipeline, resume_after=resume_token) as stream: + async for insert_change in stream: + print(insert_change) + + Raises :exc:`StopIteration` if this ChangeStream is closed. + """ + while self.alive: + doc = self.try_next() + if doc is not None: + return doc + + raise StopIteration + + __next__ = next + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration` and :meth:`try_next` can return ``None``. + + .. versionadded:: 3.8 + """ + return not self._closed + + @_csot.apply + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next change document without waiting + indefinitely for the next change. For example:: + + async with db.collection.watch() as stream: + while stream.alive: + change = await stream.try_next() + # Note that the ChangeStream's resume token may be updated + # even when no changes are returned. + print("Current resume token: %r" % (stream.resume_token,)) + if change is not None: + print("Change document: %r" % (change,)) + continue + # We end up here when there are no recent changes. + # Sleep for a while before trying again to avoid flooding + # the server with getMore requests when no changes are + # available. + time.sleep(10) + + If no change document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there have been no changes) then ``None`` is returned. + + :return: The next change document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 3.8 + """ + if not self._closed and not self._cursor.alive: + self._resume() + + # Attempt to get the next change with at most one getMore and at most + # one resume attempt. + try: + try: + change = self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + self._resume() + change = self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + self.close() + raise + except Exception: + self.close() + raise + + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + + # If no changes are available. + if change is None: + # We have either iterated over all documents in the cursor, + # OR the most-recently returned batch is empty. In either case, + # update the cached resume token with the postBatchResumeToken if + # one was returned. We also clear the startAtOperationTime. + if self._cursor._post_batch_resume_token is not None: + self._resume_token = self._cursor._post_batch_resume_token + self._start_at_operation_time = None + return change + + # Else, changes are available. + try: + resume_token = change["_id"] + except KeyError: + self.close() + raise InvalidOperation( + "Cannot provide resume functionality when the resume token is missing." + ) from None + + # If this is the last change document from the current batch, cache the + # postBatchResumeToken. + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: + resume_token = self._cursor._post_batch_resume_token + + # Hereafter, don't use startAfter; instead use resumeAfter. + self._uses_start_after = False + self._uses_resume_after = True + + # Cache the resume token and clear startAtOperationTime. + self._resume_token = resume_token + self._start_at_operation_time = None + + if self._decode_custom: + return _bson_to_dict(change.raw, self._orig_codec_options) + return change + + def __enter__(self) -> ChangeStream[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + +class CollectionChangeStream(ChangeStream[_DocumentType]): + """A change stream that watches changes on a single collection. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.collection.Collection.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: Collection[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: + return _CollectionAggregationCommand + + @property + def _client(self) -> MongoClient[_DocumentType]: + return self._target.database.client + + +class DatabaseChangeStream(ChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in a database. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.database.Database.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: Database[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: + return _DatabaseAggregationCommand + + @property + def _client(self) -> MongoClient[_DocumentType]: + return self._target.client + + +class ClusterChangeStream(DatabaseChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in the cluster. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.mongo_client.MongoClient.watch` instead. + + .. versionadded:: 3.7 + """ + + def _change_stream_options(self) -> dict[str, Any]: + options = super()._change_stream_options() + options["allChangesForCluster"] = True + return options diff --git a/pymongo/synchronous/client_options.py b/pymongo/synchronous/client_options.py new file mode 100644 index 0000000000..58042220fb --- /dev/null +++ b/pymongo/synchronous/client_options.py @@ -0,0 +1,334 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools to parse mongo client options.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, cast + +from bson.codec_options import _parse_codec_options +from pymongo.errors import ConfigurationError +from pymongo.read_concern import ReadConcern +from pymongo.ssl_support import get_ssl_context +from pymongo.synchronous import common +from pymongo.synchronous.compression_support import CompressionSettings +from pymongo.synchronous.monitoring import _EventListener, _EventListeners +from pymongo.synchronous.pool import PoolOptions +from pymongo.synchronous.read_preferences import ( + _ServerMode, + make_read_preference, + read_pref_mode_from_name, +) +from pymongo.synchronous.server_selectors import any_server_selector +from pymongo.write_concern import WriteConcern, validate_boolean + +if TYPE_CHECKING: + from bson.codec_options import CodecOptions + from pymongo.pyopenssl_context import SSLContext + from pymongo.synchronous.auth import MongoCredential + from pymongo.synchronous.encryption_options import AutoEncryptionOpts + from pymongo.synchronous.topology_description import _ServerSelector + +_IS_SYNC = True + + +def _parse_credentials( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> Optional[MongoCredential]: + """Parse authentication credentials.""" + mechanism = options.get("authmechanism", "DEFAULT" if username else None) + source = options.get("authsource") + if username or mechanism: + from pymongo.synchronous.auth import _build_credentials_tuple + + return _build_credentials_tuple(mechanism, source, username, password, options, database) + return None + + +def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: + """Parse read preference options.""" + if "read_preference" in options: + return options["read_preference"] + + name = options.get("readpreference", "primary") + mode = read_pref_mode_from_name(name) + tags = options.get("readpreferencetags") + max_staleness = options.get("maxstalenessseconds", -1) + return make_read_preference(mode, tags, max_staleness) + + +def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: + """Parse write concern options.""" + concern = options.get("w") + wtimeout = options.get("wtimeoutms") + j = options.get("journal") + fsync = options.get("fsync") + return WriteConcern(concern, wtimeout, j, fsync) + + +def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: + """Parse read concern options.""" + concern = options.get("readconcernlevel") + return ReadConcern(concern) + + +def _parse_ssl_options(options: Mapping[str, Any]) -> tuple[Optional[SSLContext], bool]: + """Parse ssl options.""" + use_tls = options.get("tls") + if use_tls is not None: + validate_boolean("tls", use_tls) + + certfile = options.get("tlscertificatekeyfile") + passphrase = options.get("tlscertificatekeyfilepassword") + ca_certs = options.get("tlscafile") + crlfile = options.get("tlscrlfile") + allow_invalid_certificates = options.get("tlsallowinvalidcertificates", False) + allow_invalid_hostnames = options.get("tlsallowinvalidhostnames", False) + disable_ocsp_endpoint_check = options.get("tlsdisableocspendpointcheck", False) + + enabled_tls_opts = [] + for opt in ( + "tlscertificatekeyfile", + "tlscertificatekeyfilepassword", + "tlscafile", + "tlscrlfile", + ): + # Any non-null value of these options implies tls=True. + if opt in options and options[opt]: + enabled_tls_opts.append(opt) + for opt in ( + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", + ): + # A value of False for these options implies tls=True. + if opt in options and not options[opt]: + enabled_tls_opts.append(opt) + + if enabled_tls_opts: + if use_tls is None: + # Implicitly enable TLS when one of the tls* options is set. + use_tls = True + elif not use_tls: + # Error since tls is explicitly disabled but a tls option is set. + raise ConfigurationError( + "TLS has not been enabled but the " + "following tls parameters have been set: " + "%s. Please set `tls=True` or remove." % ", ".join(enabled_tls_opts) + ) + + if use_tls: + ctx = get_ssl_context( + certfile, + passphrase, + ca_certs, + crlfile, + allow_invalid_certificates, + allow_invalid_hostnames, + disable_ocsp_endpoint_check, + ) + return ctx, allow_invalid_hostnames + return None, allow_invalid_hostnames + + +def _parse_pool_options( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> PoolOptions: + """Parse connection pool options.""" + credentials = _parse_credentials(username, password, database, options) + max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) + min_pool_size = options.get("minpoolsize", common.MIN_POOL_SIZE) + max_idle_time_seconds = options.get("maxidletimems", common.MAX_IDLE_TIME_SEC) + if max_pool_size is not None and min_pool_size > max_pool_size: + raise ValueError("minPoolSize must be smaller or equal to maxPoolSize") + connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) + socket_timeout = options.get("sockettimeoutms") + wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) + event_listeners = cast(Optional[Sequence[_EventListener]], options.get("event_listeners")) + appname = options.get("appname") + driver = options.get("driver") + server_api = options.get("server_api") + compression_settings = CompressionSettings( + options.get("compressors", []), options.get("zlibcompressionlevel", -1) + ) + ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) + load_balanced = options.get("loadbalanced") + max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) + return PoolOptions( + max_pool_size, + min_pool_size, + max_idle_time_seconds, + connect_timeout, + socket_timeout, + wait_queue_timeout, + ssl_context, + tls_allow_invalid_hostnames, + _EventListeners(event_listeners), + appname, + driver, + compression_settings, + max_connecting=max_connecting, + server_api=server_api, + load_balanced=load_balanced, + credentials=credentials, + ) + + +class ClientOptions: + """Read only configuration options for a MongoClient. + + Should not be instantiated directly by application developers. Access + a client's options via :attr:`pymongo.mongo_client.MongoClient.options` + instead. + """ + + def __init__( + self, username: str, password: str, database: Optional[str], options: Mapping[str, Any] + ): + self.__options = options + self.__codec_options = _parse_codec_options(options) + self.__direct_connection = options.get("directconnection") + self.__local_threshold_ms = options.get("localthresholdms", common.LOCAL_THRESHOLD_MS) + # self.__server_selection_timeout is in seconds. Must use full name for + # common.SERVER_SELECTION_TIMEOUT because it is set directly by tests. + self.__server_selection_timeout = options.get( + "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT + ) + self.__pool_options = _parse_pool_options(username, password, database, options) + self.__read_preference = _parse_read_preference(options) + self.__replica_set_name = options.get("replicaset") + self.__write_concern = _parse_write_concern(options) + self.__read_concern = _parse_read_concern(options) + self.__connect = options.get("connect") + self.__heartbeat_frequency = options.get("heartbeatfrequencyms", common.HEARTBEAT_FREQUENCY) + self.__retry_writes = options.get("retrywrites", common.RETRY_WRITES) + self.__retry_reads = options.get("retryreads", common.RETRY_READS) + self.__server_selector = options.get("server_selector", any_server_selector) + self.__auto_encryption_opts = options.get("auto_encryption_opts") + self.__load_balanced = options.get("loadbalanced") + self.__timeout = options.get("timeoutms") + self.__server_monitoring_mode = options.get( + "servermonitoringmode", common.SERVER_MONITORING_MODE + ) + + @property + def _options(self) -> Mapping[str, Any]: + """The original options used to create this ClientOptions.""" + return self.__options + + @property + def connect(self) -> Optional[bool]: + """Whether to begin discovering a MongoDB topology automatically.""" + return self.__connect + + @property + def codec_options(self) -> CodecOptions: + """A :class:`~bson.codec_options.CodecOptions` instance.""" + return self.__codec_options + + @property + def direct_connection(self) -> Optional[bool]: + """Whether to connect to the deployment in 'Single' topology.""" + return self.__direct_connection + + @property + def local_threshold_ms(self) -> int: + """The local threshold for this instance.""" + return self.__local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + """The server selection timeout for this instance in seconds.""" + return self.__server_selection_timeout + + @property + def server_selector(self) -> _ServerSelector: + return self.__server_selector + + @property + def heartbeat_frequency(self) -> int: + """The monitoring frequency in seconds.""" + return self.__heartbeat_frequency + + @property + def pool_options(self) -> PoolOptions: + """A :class:`~pymongo.pool.PoolOptions` instance.""" + return self.__pool_options + + @property + def read_preference(self) -> _ServerMode: + """A read preference instance.""" + return self.__read_preference + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self.__replica_set_name + + @property + def write_concern(self) -> WriteConcern: + """A :class:`~pymongo.write_concern.WriteConcern` instance.""" + return self.__write_concern + + @property + def read_concern(self) -> ReadConcern: + """A :class:`~pymongo.read_concern.ReadConcern` instance.""" + return self.__read_concern + + @property + def timeout(self) -> Optional[float]: + """The configured timeoutMS converted to seconds, or None. + + .. versionadded:: 4.2 + """ + return self.__timeout + + @property + def retry_writes(self) -> bool: + """If this instance should retry supported write operations.""" + return self.__retry_writes + + @property + def retry_reads(self) -> bool: + """If this instance should retry supported read operations.""" + return self.__retry_reads + + @property + def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: + """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" + return self.__auto_encryption_opts + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self.__load_balanced + + @property + def event_listeners(self) -> list[_EventListeners]: + """The event listeners registered for this client. + + See :mod:`~pymongo.monitoring` for details. + + .. versionadded:: 4.0 + """ + assert self.__pool_options._event_listeners is not None + return self.__pool_options._event_listeners.event_listeners() + + @property + def server_monitoring_mode(self) -> str: + """The configured serverMonitoringMode option. + + .. versionadded:: 4.5 + """ + return self.__server_monitoring_mode diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py new file mode 100644 index 0000000000..b4339bd122 --- /dev/null +++ b/pymongo/synchronous/client_session.py @@ -0,0 +1,1157 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logical sessions for ordering sequential operations. + +.. versionadded:: 3.6 + +Causally Consistent Reads +========================= + +.. code-block:: python + + with client.start_session(causal_consistency=True) as session: + collection = client.db.collection + await collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) + + # A secondary read waits for replication of the write. + await secondary_c.find_one({"_id": 1}, session=session) + +If `causal_consistency` is True (the default), read operations that use +the session are causally after previous read and write operations. Using a +causally consistent session, an application can read its own writes and is +guaranteed monotonic reads, even when reading from replica set secondaries. + +.. seealso:: The MongoDB documentation on `causal-consistency `_. + +.. _transactions-ref: + +Transactions +============ + +.. versionadded:: 3.7 + +MongoDB 4.0 adds support for transactions on replica set primaries. A +transaction is associated with a :class:`ClientSession`. To start a transaction +on a session, use :meth:`ClientSession.start_transaction` in a with-statement. +Then, execute an operation within the transaction by passing the session to the +operation: + +.. code-block:: python + + orders = client.db.orders + inventory = client.db.inventory + with client.start_session() as session: + async with session.start_transaction(): + await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + await inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) + +Upon normal completion of ``async with session.start_transaction()`` block, the +transaction automatically calls :meth:`ClientSession.commit_transaction`. +If the block exits with an exception, the transaction automatically calls +:meth:`ClientSession.abort_transaction`. + +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an +insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. + +A session may only have a single active transaction at a time, multiple +transactions on the same session can be executed in sequence. + +Sharded Transactions +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 3.9 + +PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB +>=4.2. Sharded transactions have the same API as replica set transactions. +When running a transaction against a sharded cluster, the session is +pinned to the mongos server selected for the first operation in the +transaction. All subsequent operations that are part of the same transaction +are routed to the same mongos server. When the transaction is completed, by +running either commitTransaction or abortTransaction, the session is unpinned. + +.. seealso:: The MongoDB documentation on `transactions `_. + +.. _snapshot-reads-ref: + +Snapshot Reads +============== + +.. versionadded:: 3.12 + +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.mongo_client.MongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + with client.start_session(snapshot=True) as session: + order = await orders.find_one({"sku": "abc123"}, session=session) + inventory = await inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.collection.Collection.find` +- :meth:`~pymongo.collection.Collection.find_one` +- :meth:`~pymongo.collection.Collection.aggregate` +- :meth:`~pymongo.collection.Collection.count_documents` +- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) + +Classes +======= +""" + +from __future__ import annotations + +import collections +import time +import uuid +from collections.abc import Mapping as _Mapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Mapping, + MutableMapping, + NoReturn, + Optional, + Type, + TypeVar, +) + +from bson.binary import Binary +from bson.int64 import Int64 +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) +from pymongo.helpers_constants import _RETRYABLE_ERROR_CODES +from pymongo.read_concern import ReadConcern +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.cursor import _ConnectionManager +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.read_preferences import ReadPreference, _ServerMode +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.synchronous.typings import ClusterTime, _Address + +_IS_SYNC = True + + +class SessionOptions: + """Options for a new :class:`ClientSession`. + + :param causal_consistency: If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. + :param default_transaction_options: The default + TransactionOptions to use for transactions started on this session. + :param snapshot: If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. + """ + + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> None: + if snapshot: + if causal_consistency: + raise ConfigurationError("snapshot reads do not support causal_consistency=True") + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True + self._causal_consistency = causal_consistency + if default_transaction_options is not None: + if not isinstance(default_transaction_options, TransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of " + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) + ) + self._default_transaction_options = default_transaction_options + self._snapshot = snapshot + + @property + def causal_consistency(self) -> bool: + """Whether causal consistency is configured.""" + return self._causal_consistency + + @property + def default_transaction_options(self) -> Optional[TransactionOptions]: + """The default TransactionOptions to use for transactions started on + this session. + + .. versionadded:: 3.7 + """ + return self._default_transaction_options + + @property + def snapshot(self) -> Optional[bool]: + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + + +class TransactionOptions: + """Options for :meth:`ClientSession.start_transaction`. + + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. + If ``None`` (the default) the :attr:`read_preference` of + the :class:`MongoClient` is used. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` of + the :class:`MongoClient` is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. Transactions which read must use + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param max_commit_time_ms: The maximum amount of time to allow a + single commitTransaction command to run. This option is an alias for + maxTimeMS option on the commitTransaction command. If ``None`` (the + default) maxTimeMS is not used. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> None: + self._read_concern = read_concern + self._write_concern = write_concern + self._read_preference = read_preference + self._max_commit_time_ms = max_commit_time_ms + if read_concern is not None: + if not isinstance(read_concern, ReadConcern): + raise TypeError( + "read_concern must be an instance of " + f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" + ) + if write_concern is not None: + if not isinstance(write_concern, WriteConcern): + raise TypeError( + "write_concern must be an instance of " + f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" + ) + if not write_concern.acknowledged: + raise ConfigurationError( + "transactions do not support unacknowledged write concern" + f": {write_concern!r}" + ) + if read_preference is not None: + if not isinstance(read_preference, _ServerMode): + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + if max_commit_time_ms is not None: + if not isinstance(max_commit_time_ms, int): + raise TypeError("max_commit_time_ms must be an integer or None") + + @property + def read_concern(self) -> Optional[ReadConcern]: + """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" + return self._read_concern + + @property + def write_concern(self) -> Optional[WriteConcern]: + """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" + return self._write_concern + + @property + def read_preference(self) -> Optional[_ServerMode]: + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" + return self._read_preference + + @property + def max_commit_time_ms(self) -> Optional[int]: + """The maxTimeMS to use when running a commitTransaction command. + + .. versionadded:: 3.9 + """ + return self._max_commit_time_ms + + +def _validate_session_write_concern( + session: Optional[ClientSession], write_concern: Optional[WriteConcern] +) -> Optional[ClientSession]: + """Validate that an explicit session is not used with an unack'ed write. + + Returns the session to use for the next operation. + """ + if session: + if write_concern is not None and not write_concern.acknowledged: + # For unacknowledged writes without an explicit session, + # drivers SHOULD NOT use an implicit session. If a driver + # creates an implicit session for unacknowledged writes + # without an explicit session, the driver MUST NOT send the + # session ID. + if session._implicit: + return None + else: + raise ConfigurationError( + "Explicit sessions are incompatible with " + f"unacknowledged write concern: {write_concern!r}" + ) + return session + + +class _TransactionContext: + """Internal transaction context manager for start_transaction.""" + + def __init__(self, session: ClientSession): + self.__session = session + + def __enter__(self) -> _TransactionContext: + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self.__session.in_transaction: + if exc_val is None: + self.__session.commit_transaction() + else: + self.__session.abort_transaction() + + +class _TxnState: + NONE = 1 + STARTING = 2 + IN_PROGRESS = 3 + COMMITTED = 4 + COMMITTED_EMPTY = 5 + ABORTED = 6 + + +class _Transaction: + """Internal class to hold transaction information in a ClientSession.""" + + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient): + self.opts = opts + self.state = _TxnState.NONE + self.sharded = False + self.pinned_address: Optional[_Address] = None + self.conn_mgr: Optional[_ConnectionManager] = None + self.recovery_token = None + self.attempt = 0 + self.client = client + + def active(self) -> bool: + return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) + + def starting(self) -> bool: + return self.state == _TxnState.STARTING + + @property + def pinned_conn(self) -> Optional[Connection]: + if self.active() and self.conn_mgr: + return self.conn_mgr.conn + return None + + def pin(self, server: Server, conn: Connection) -> None: + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + conn.pin_txn() + self.conn_mgr = _ConnectionManager(conn, False) + + def unpin(self) -> None: + self.pinned_address = None + if self.conn_mgr: + self.conn_mgr.close() + self.conn_mgr = None + + def reset(self) -> None: + self.unpin() + self.state = _TxnState.NONE + self.sharded = False + self.recovery_token = None + self.attempt = 0 + + def __del__(self) -> None: + if self.conn_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.conn_mgr) + self.conn_mgr = None + + +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: + """Re-raise an exception with the UnknownTransactionCommitResult label.""" + exc._add_error_label("UnknownTransactionCommitResult") + raise + + +def _max_time_expired_error(exc: PyMongoError) -> bool: + """Return true if exc is a MaxTimeMSExpired error.""" + return isinstance(exc, OperationFailure) and exc.code == 50 + + +# From the transactions spec, all the retryable writes errors plus +# WriteConcernFailed. +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( + [ + 64, # WriteConcernFailed + 50, # MaxTimeMSExpired + ] +) + +# From the Convenient API for Transactions spec, with_transaction must +# halt retries after 120 seconds. +# This limit is non-configurable and was chosen to be twice the 60 second +# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. +_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 + + +def _within_time_limit(start_time: float) -> bool: + """Are we within the with_transaction retry limit?""" + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + + +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + + +class ClientSession: + """A session for ordering sequential operations. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`ClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`ClientSession`, call + :meth:`~pymongo.mongo_client.MongoClient.start_session`. + """ + + def __init__( + self, + client: MongoClient, + server_session: Any, + options: SessionOptions, + implicit: bool, + ) -> None: + # A MongoClient, a _ServerSession, a SessionOptions, and a set. + self._client: MongoClient = client + self._server_session = server_session + self._options = options + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None + self._snapshot_time = None + # Is this an implicitly created session? + self._implicit = implicit + self._transaction = _Transaction(None, client) + + def end_session(self) -> None: + """Finish this session. If a transaction has started, abort it. + + It is an error to use the session after the session has ended. + """ + self._end_session(lock=True) + + def _end_session(self, lock: bool) -> None: + if self._server_session is not None: + try: + if self.in_transaction: + self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + self._unpin() + finally: + self._client._return_server_session(self._server_session, lock) + self._server_session = None + + def _check_ended(self) -> None: + if self._server_session is None: + raise InvalidOperation("Cannot use ended session") + + def __enter__(self) -> ClientSession: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self._end_session(lock=True) + + @property + def client(self) -> MongoClient: + """The :class:`~pymongo.mongo_client.MongoClient` this session was + created from. + """ + return self._client + + @property + def options(self) -> SessionOptions: + """The :class:`SessionOptions` this session was created with.""" + return self._options + + @property + def session_id(self) -> Mapping[str, Any]: + """A BSON document, the opaque server session identifier.""" + self._check_ended() + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.session_id + + @property + def _transaction_id(self) -> Int64: + """The current transaction id for the underlying server session.""" + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.transaction_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + """The cluster time returned by the last operation executed + in this session. + """ + return self._cluster_time + + @property + def operation_time(self) -> Optional[Timestamp]: + """The operation time returned by the last operation executed + in this session. + """ + return self._operation_time + + def _inherit_option(self, name: str, val: _T) -> _T: + """Return the inherited TransactionOption value.""" + if val: + return val + txn_opts = self.options.default_transaction_options + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val + return getattr(self.client, name) + + def with_transaction( + self, + callback: Callable[[ClientSession], _T], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: + """Execute a callback in a transaction. + + This method starts a transaction on this session, executes ``callback`` + once, and then commits the transaction. For example:: + + async def callback(session): + orders = session.client.db.orders + inventory = session.client.db.inventory + await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + await inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, session=session) + + with client.start_session() as session: + await session.with_transaction(callback) + + To pass arbitrary arguments to the ``callback``, wrap your callable + with a ``lambda`` like this:: + + async def callback(session, custom_arg, custom_kwarg=None): + # Transaction operations... + + with client.start_session() as session: + await session.with_transaction( + lambda s: callback(s, "custom_arg", custom_kwarg=1)) + + In the event of an exception, ``with_transaction`` may retry the commit + or the entire transaction, therefore ``callback`` may be invoked + multiple times by a single call to ``with_transaction``. Developers + should be mindful of this possibility when writing a ``callback`` that + modifies application state or has any other side-effects. + Note that even when the ``callback`` is invoked multiple times, + ``with_transaction`` ensures that the transaction will be committed + at-most-once on the server. + + The ``callback`` should not attempt to start new transactions, but + should simply run operations meant to be contained within a + transaction. The ``callback`` should also not commit the transaction; + this is handled automatically by ``with_transaction``. If the + ``callback`` does commit or abort the transaction without error, + however, ``with_transaction`` will return without taking further + action. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + + When ``callback`` raises an exception, ``with_transaction`` + automatically aborts the current transaction. When ``callback`` or + :meth:`~ClientSession.commit_transaction` raises an exception that + includes the ``"TransientTransactionError"`` error label, + ``with_transaction`` starts a new transaction and re-executes + the ``callback``. + + When :meth:`~ClientSession.commit_transaction` raises an exception with + the ``"UnknownTransactionCommitResult"`` error label, + ``with_transaction`` retries the commit until the result of the + transaction is known. + + This method will cease retrying after 120 seconds has elapsed. This + timeout is not configurable and any exception raised by the + ``callback`` or by :meth:`ClientSession.commit_transaction` after the + timeout is reached will be re-raised. Applications that desire a + different timeout duration should not use this method. + + :param callback: The callable ``callback`` to run inside a transaction. + The callable must accept a single argument, this session. Note, + under certain error conditions the callback may be run multiple + times. + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this + transaction. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. + :param read_preference: The read preference to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` + of this :class:`Database` is used. See + :mod:`~pymongo.read_preferences` for options. + + :return: The return value of the ``callback``. + + .. versionadded:: 3.9 + """ + start_time = time.monotonic() + while True: + self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) + try: + ret = callback(self) + except Exception as exc: + if self.in_transaction: + self.abort_transaction() + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): + # Retry the entire transaction. + continue + raise + + if not self.in_transaction: + # Assume callback intentionally ended the transaction. + return ret + + while True: + try: + self.commit_transaction() + except PyMongoError as exc: + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): + # Retry the commit. + continue + + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): + # Retry the entire transaction. + break + raise + + # Commit succeeded. + return ret + + def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> ContextManager: + """Start a multi-statement transaction. + + Takes the same arguments as :class:`TransactionOptions`. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + self._check_ended() + + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in snapshot sessions") + + if self.in_transaction: + raise InvalidOperation("Transaction already in progress") + + read_concern = self._inherit_option("read_concern", read_concern) + write_concern = self._inherit_option("write_concern", write_concern) + read_preference = self._inherit_option("read_preference", read_preference) + if max_commit_time_ms is None: + opts = self.options.default_transaction_options + if opts: + max_commit_time_ms = opts.max_commit_time_ms + + self._transaction.opts = TransactionOptions( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + self._transaction.reset() + self._transaction.state = _TxnState.STARTING + self._start_retryable_write() + return _TransactionContext(self) + + def commit_transaction(self) -> None: + """Commit a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.COMMITTED_EMPTY + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") + elif state is _TxnState.COMMITTED: + # We're explicitly retrying the commit, move the state back to + # "in progress" so that in_transaction returns true. + self._transaction.state = _TxnState.IN_PROGRESS + + try: + self._finish_transaction_with_retry("commitTransaction") + except ConnectionFailure as exc: + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + exc._remove_error_label("TransientTransactionError") + _reraise_with_unknown_commit(exc) + except WTimeoutError as exc: + # We do not know if the commit has satisfied the provided write + # concern, add the unknown commit error label. + _reraise_with_unknown_commit(exc) + except OperationFailure as exc: + if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: + # The server reports errorLabels in the case. + raise + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + _reraise_with_unknown_commit(exc) + finally: + self._transaction.state = _TxnState.COMMITTED + + def abort_transaction(self) -> None: + """Abort a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state is _TxnState.STARTING: + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.ABORTED + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call abortTransaction twice") + elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") + + try: + self._finish_transaction_with_retry("abortTransaction") + except (OperationFailure, ConnectionFailure): + # The transactions spec says to ignore abortTransaction errors. + pass + finally: + self._transaction.state = _TxnState.ABORTED + self._unpin() + + def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: + """Run commit or abort with one retry after any retryable error. + + :param command_name: Either "commitTransaction" or "abortTransaction". + """ + + def func( + _session: Optional[ClientSession], conn: Connection, _retryable: bool + ) -> dict[str, Any]: + return self._finish_transaction(conn, command_name) + + return self._client._retry_internal(func, self, None, retryable=True, operation=_Op.ABORT) + + def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: + self._transaction.attempt += 1 + opts = self._transaction.opts + assert opts + wc = opts.write_concern + cmd = {command_name: 1} + if command_name == "commitTransaction": + if opts.max_commit_time_ms and _csot.get_timeout() is None: + cmd["maxTimeMS"] = opts.max_commit_time_ms + + # Transaction spec says that after the initial commit attempt, + # subsequent commitTransaction commands should be upgraded to use + # w:"majority" and set a default value of 10 seconds for wtimeout. + if self._transaction.attempt > 1: + assert wc + wc_doc = wc.document + wc_doc["w"] = "majority" + wc_doc.setdefault("wtimeout", 10000) + wc = WriteConcern(**wc_doc) + + if self._transaction.recovery_token: + cmd["recoveryToken"] = self._transaction.recovery_token + + return self._client.admin._command( + conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) + + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + """Internal cluster time helper.""" + if self._cluster_time is None: + self._cluster_time = cluster_time + elif cluster_time is not None: + if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: + self._cluster_time = cluster_time + + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: + """Update the cluster time for this session. + + :param cluster_time: The + :data:`~pymongo.client_session.ClientSession.cluster_time` from + another `ClientSession` instance. + """ + if not isinstance(cluster_time, _Mapping): + raise TypeError("cluster_time must be a subclass of collections.Mapping") + if not isinstance(cluster_time.get("clusterTime"), Timestamp): + raise ValueError("Invalid cluster_time") + self._advance_cluster_time(cluster_time) + + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: + """Internal operation time helper.""" + if self._operation_time is None: + self._operation_time = operation_time + elif operation_time is not None: + if operation_time > self._operation_time: + self._operation_time = operation_time + + def advance_operation_time(self, operation_time: Timestamp) -> None: + """Update the operation time for this session. + + :param operation_time: The + :data:`~pymongo.client_session.ClientSession.operation_time` from + another `ClientSession` instance. + """ + if not isinstance(operation_time, Timestamp): + raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") + self._advance_operation_time(operation_time) + + def _process_response(self, reply: Mapping[str, Any]) -> None: + """Process a response to a command that was run with this session.""" + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) + if self._options.snapshot and self._snapshot_time is None: + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") + else: + ct = reply.get("atClusterTime") + self._snapshot_time = ct + if self.in_transaction and self._transaction.sharded: + recovery_token = reply.get("recoveryToken") + if recovery_token: + self._transaction.recovery_token = recovery_token + + @property + def has_ended(self) -> bool: + """True if this session is finished.""" + return self._server_session is None + + @property + def in_transaction(self) -> bool: + """True if this session has an active multi-statement transaction. + + .. versionadded:: 3.10 + """ + return self._transaction.active() + + @property + def _starting_transaction(self) -> bool: + """True if this session is starting a multi-statement transaction.""" + return self._transaction.starting() + + @property + def _pinned_address(self) -> Optional[_Address]: + """The mongos address this transaction was created on.""" + if self._transaction.active(): + return self._transaction.pinned_address + return None + + @property + def _pinned_connection(self) -> Optional[Connection]: + """The connection this transaction was started on.""" + return self._transaction.pinned_conn + + def _pin(self, server: Server, conn: Connection) -> None: + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, conn) + + def _unpin(self) -> None: + """Unpin this session from any pinned Server.""" + self._transaction.unpin() + + def _txn_read_preference(self) -> Optional[_ServerMode]: + """Return read preference of this transaction or None.""" + if self.in_transaction: + assert self._transaction.opts + return self._transaction.opts.read_preference + return None + + def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session( + logical_session_timeout_minutes + ) + if old.started_retryable_write: + self._server_session.inc_transaction_id() + + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: _ServerMode, + conn: Connection, + ) -> None: + if not conn.supports_sessions: + if not self._implicit: + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return + self._check_ended() + self._materialize(conn.logical_session_timeout_minutes) + if self.options.snapshot: + self._update_read_concern(command, conn) + + self._server_session.last_use = time.monotonic() + command["lsid"] = self._server_session.session_id + + if is_retryable: + command["txnNumber"] = self._server_session.transaction_id + return + + if self.in_transaction: + if read_preference != ReadPreference.PRIMARY: + raise InvalidOperation( + f"read preference in a transaction must be primary, not: {read_preference!r}" + ) + + if self._transaction.state == _TxnState.STARTING: + # First command begins a new transaction. + self._transaction.state = _TxnState.IN_PROGRESS + command["startTransaction"] = True + + assert self._transaction.opts + if self._transaction.opts.read_concern: + rc = self._transaction.opts.read_concern.document + if rc: + command["readConcern"] = rc + self._update_read_concern(command, conn) + + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False + + def _start_retryable_write(self) -> None: + self._check_ended() + self._server_session.inc_transaction_id() + + def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: Connection) -> None: + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time + if self.options.snapshot: + if conn.max_wire_version < 13: + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" + if self._snapshot_time is not None: + rc["atClusterTime"] = self._snapshot_time + + def __copy__(self) -> NoReturn: + raise TypeError("A ClientSession cannot be copied, create a new session instead") + + +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self) -> None: + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self) -> None: + self.dirty = True + + def inc_transaction_id(self) -> None: + self.started_retryable_write = True + + +class _ServerSession: + def __init__(self, generation: int): + # Ensure id is type 4, regardless of CodecOptions.uuid_representation. + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} + self.last_use = time.monotonic() + self._transaction_id = 0 + self.dirty = False + self.generation = generation + + def mark_dirty(self) -> None: + """Mark this session as dirty. + + A server session is marked dirty when a command fails with a network + error. Dirty sessions are later discarded from the server session pool. + """ + self.dirty = True + + def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: + if session_timeout_minutes is None: + return False + + idle_seconds = time.monotonic() - self.last_use + + # Timed out if we have less than a minute to live. + return idle_seconds > (session_timeout_minutes - 1) * 60 + + @property + def transaction_id(self) -> Int64: + """Positive 64-bit integer.""" + return Int64(self._transaction_id) + + def inc_transaction_id(self) -> None: + self._transaction_id += 1 + + +class _ServerSessionPool(collections.deque): + """Pool of _ServerSession objects. + + This class is not thread-safe, access it while holding the Topology lock. + """ + + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self.generation = 0 + + def reset(self) -> None: + self.generation += 1 + self.clear() + + def pop_all(self) -> list[_ServerSession]: + ids = [] + while self: + ids.append(self.pop().session_id) + return ids + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + # Although the Driver Sessions Spec says we only clear stale sessions + # in return_server_session, PyMongo can't take a lock when returning + # sessions from a __del__ method (like in Cursor.__die), so it can't + # clear stale sessions there. In case many sessions were returned via + # __del__, check for stale sessions here too. + self._clear_stale(session_timeout_minutes) + + # The most recently used sessions are on the left. + while self: + s = self.popleft() + if not s.timed_out(session_timeout_minutes): + return s + + return _ServerSession(self.generation) + + def return_server_session( + self, server_session: _ServerSession, session_timeout_minutes: Optional[int] + ) -> None: + if session_timeout_minutes is not None: + self._clear_stale(session_timeout_minutes) + if server_session.timed_out(session_timeout_minutes): + return + self.return_server_session_no_lock(server_session) + + def return_server_session_no_lock(self, server_session: _ServerSession) -> None: + # Discard sessions from an old pool to avoid duplicate sessions in the + # child process after a fork. + if server_session.generation == self.generation and not server_session.dirty: + self.appendleft(server_session) + + def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: + # Clear stale sessions. The least recently used are on the right. + while self: + if self[-1].timed_out(session_timeout_minutes): + self.pop() + else: + # The remaining sessions also haven't timed out. + break diff --git a/pymongo/synchronous/collation.py b/pymongo/synchronous/collation.py new file mode 100644 index 0000000000..1ce1ee00b1 --- /dev/null +++ b/pymongo/synchronous/collation.py @@ -0,0 +1,226 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with `collations`_. + +.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ +""" +from __future__ import annotations + +from typing import Any, Mapping, Optional, Union + +from pymongo.synchronous import common +from pymongo.write_concern import validate_boolean + +_IS_SYNC = True + + +class CollationStrength: + """ + An enum that defines values for `strength` on a + :class:`~pymongo.collation.Collation`. + """ + + PRIMARY = 1 + """Differentiate base (unadorned) characters.""" + + SECONDARY = 2 + """Differentiate character accents.""" + + TERTIARY = 3 + """Differentiate character case.""" + + QUATERNARY = 4 + """Differentiate words with and without punctuation.""" + + IDENTICAL = 5 + """Differentiate unicode code point (characters are exactly identical).""" + + +class CollationAlternate: + """ + An enum that defines values for `alternate` on a + :class:`~pymongo.collation.Collation`. + """ + + NON_IGNORABLE = "non-ignorable" + """Spaces and punctuation are treated as base characters.""" + + SHIFTED = "shifted" + """Spaces and punctuation are *not* considered base characters. + + Spaces and punctuation are distinguished regardless when the + :class:`~pymongo.collation.Collation` strength is at least + :data:`~pymongo.collation.CollationStrength.QUATERNARY`. + + """ + + +class CollationMaxVariable: + """ + An enum that defines values for `max_variable` on a + :class:`~pymongo.collation.Collation`. + """ + + PUNCT = "punct" + """Both punctuation and spaces are ignored.""" + + SPACE = "space" + """Spaces alone are ignored.""" + + +class CollationCaseFirst: + """ + An enum that defines values for `case_first` on a + :class:`~pymongo.collation.Collation`. + """ + + UPPER = "upper" + """Sort uppercase characters first.""" + + LOWER = "lower" + """Sort lowercase characters first.""" + + OFF = "off" + """Default for locale or collation strength.""" + + +class Collation: + """Collation + + :param locale: (string) The locale of the collation. This should be a string + that identifies an `ICU locale ID` exactly. For example, ``en_US`` is + valid, but ``en_us`` and ``en-US`` are not. Consult the MongoDB + documentation for a list of supported locales. + :param caseLevel: (optional) If ``True``, turn on case sensitivity if + `strength` is 1 or 2 (case sensitivity is implied if `strength` is + greater than 2). Defaults to ``False``. + :param caseFirst: (optional) Specify that either uppercase or lowercase + characters take precedence. Must be one of the following values: + + * :data:`~CollationCaseFirst.UPPER` + * :data:`~CollationCaseFirst.LOWER` + * :data:`~CollationCaseFirst.OFF` (the default) + + :param strength: Specify the comparison strength. This is also + known as the ICU comparison level. This must be one of the following + values: + + * :data:`~CollationStrength.PRIMARY` + * :data:`~CollationStrength.SECONDARY` + * :data:`~CollationStrength.TERTIARY` (the default) + * :data:`~CollationStrength.QUATERNARY` + * :data:`~CollationStrength.IDENTICAL` + + Each successive level builds upon the previous. For example, a + `strength` of :data:`~CollationStrength.SECONDARY` differentiates + characters based both on the unadorned base character and its accents. + + :param numericOrdering: If ``True``, order numbers numerically + instead of in collation order (defaults to ``False``). + :param alternate: Specify whether spaces and punctuation are + considered base characters. This must be one of the following values: + + * :data:`~CollationAlternate.NON_IGNORABLE` (the default) + * :data:`~CollationAlternate.SHIFTED` + + :param maxVariable: When `alternate` is + :data:`~CollationAlternate.SHIFTED`, this option specifies what + characters may be ignored. This must be one of the following values: + + * :data:`~CollationMaxVariable.PUNCT` (the default) + * :data:`~CollationMaxVariable.SPACE` + + :param normalization: If ``True``, normalizes text into Unicode + NFD. Defaults to ``False``. + :param backwards: If ``True``, accents on characters are + considered from the back of the word to the front, as it is done in some + French dictionary ordering traditions. Defaults to ``False``. + :param kwargs: Keyword arguments supplying any additional options + to be sent with this Collation object. + + .. versionadded: 3.4 + + """ + + __slots__ = ("__document",) + + def __init__( + self, + locale: str, + caseLevel: Optional[bool] = None, + caseFirst: Optional[str] = None, + strength: Optional[int] = None, + numericOrdering: Optional[bool] = None, + alternate: Optional[str] = None, + maxVariable: Optional[str] = None, + normalization: Optional[bool] = None, + backwards: Optional[bool] = None, + **kwargs: Any, + ) -> None: + locale = common.validate_string("locale", locale) + self.__document: dict[str, Any] = {"locale": locale} + if caseLevel is not None: + self.__document["caseLevel"] = validate_boolean("caseLevel", caseLevel) + if caseFirst is not None: + self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) + if strength is not None: + self.__document["strength"] = common.validate_integer("strength", strength) + if numericOrdering is not None: + self.__document["numericOrdering"] = validate_boolean( + "numericOrdering", numericOrdering + ) + if alternate is not None: + self.__document["alternate"] = common.validate_string("alternate", alternate) + if maxVariable is not None: + self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) + if normalization is not None: + self.__document["normalization"] = validate_boolean("normalization", normalization) + if backwards is not None: + self.__document["backwards"] = validate_boolean("backwards", backwards) + self.__document.update(kwargs) + + @property + def document(self) -> dict[str, Any]: + """The document representation of this collation. + + .. note:: + :class:`Collation` is immutable. Mutating the value of + :attr:`document` does not mutate this :class:`Collation`. + """ + return self.__document.copy() + + def __repr__(self) -> str: + document = self.document + return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Collation): + return self.document == other.document + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +def validate_collation_or_none( + value: Optional[Union[Mapping[str, Any], Collation]] +) -> Optional[dict[str, Any]]: + if value is None: + return None + if isinstance(value, Collation): + return value.document + if isinstance(value, dict): + return value + raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py new file mode 100644 index 0000000000..61bd81fd9b --- /dev/null +++ b/pymongo/synchronous/collection.py @@ -0,0 +1,3547 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection level utilities for Mongo.""" +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from bson.timestamp import Timestamp +from pymongo import ASCENDING, _csot +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.synchronous import common, helpers, message +from pymongo.synchronous.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.synchronous.bulk import _Bulk +from pymongo.synchronous.change_stream import CollectionChangeStream +from pymongo.synchronous.collation import validate_collation_or_none +from pymongo.synchronous.command_cursor import ( + CommandCursor, + RawBatchCommandCursor, +) +from pymongo.synchronous.common import _ecoc_coll_name, _esc_coll_name +from pymongo.synchronous.cursor import ( + Cursor, + RawBatchCursor, +) +from pymongo.synchronous.helpers import _check_write_command_response +from pymongo.synchronous.message import _UNICODE_REPLACE_CODEC_OPTIONS +from pymongo.synchronous.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, + _IndexKeyHint, + _IndexList, + _Op, +) +from pymongo.synchronous.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean + +_IS_SYNC = True + +T = TypeVar("T") + +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} + + +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] + + +class ReturnDocument: + """An enum used with + :meth:`~pymongo.collection.Collection.find_one_and_replace` and + :meth:`~pymongo.collection.Collection.find_one_and_update`. + """ + + BEFORE = False + """Return the original document before it was updated/replaced, or + ``None`` if no document matches the query. + """ + AFTER = True + """Return the updated/replaced or inserted document.""" + + +if TYPE_CHECKING: + import bson + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.aggregation import _AggregationCommand + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collation import Collation + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + + +class Collection(common.BaseObject, Generic[_DocumentType]): + """A Mongo collection.""" + + def __init__( + self, + database: Database[_DocumentType], + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> None: + """Get / create a Mongo collection. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is + not a valid collection name. Any additional keyword arguments will be used + as options passed to the create command. See + :meth:`~pymongo.database.Database.create_collection` for valid + options. + + If `create` is ``True``, `collation` is specified, or any additional + keyword arguments are present, a ``create`` command will be + sent, using ``session`` if specified. Otherwise, a ``create`` command + will not be sent and the collection will be created implicitly on first + use. The optional ``session`` argument is *only* used for the ``create`` + command, it is not associated with the collection afterward. + + :param database: the database to get a collection from + :param name: the name of the collection to get + :param create: If ``True``, force collection + creation even without options being set. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) database.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) database.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) database.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) database.read_concern is used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. If a collation is provided, + it will be passed to the create collection command. + :param session: A + :class:`~pymongo.client_session.ClientSession` that is used with + the create collection command. + :param kwargs: Additional keyword arguments will + be passed as options for the create collection command. + + .. versionchanged:: 4.2 + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + Removed the uuid_subtype attribute. + :class:`~pymongo.collection.Collection` no longer returns an + instance of :class:`~pymongo.collection.Collection` for attribute + names with leading underscores. You must use dict-style lookups + instead:: + + collection['__my_collection__'] + + Not: + + collection.__my_collection__ + + .. seealso:: The MongoDB documentation on `collections `_. + """ + super().__init__( + codec_options or database.codec_options, + read_preference or database.read_preference, + write_concern or database.write_concern, + read_concern or database.read_concern, + ) + if not isinstance(name, str): + raise TypeError("name must be an instance of str") + + if not name or ".." in name: + raise InvalidName("collection names cannot be empty") + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): + raise InvalidName("collection names must not contain '$': %r" % name) + if name[0] == "." or name[-1] == ".": + raise InvalidName("collection names must not start or end with '.': %r" % name) + if "\x00" in name: + raise InvalidName("collection names must not contain the null character") + + self._database: Database[_DocumentType] = database + self._name = name + self._full_name = f"{self._database.name}.{self._name}" + self._write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout + + if create or kwargs: + if _IS_SYNC: + self._create(kwargs, session) # type: ignore[unused-coroutine] + else: + raise ValueError("Collection does not support the `create` or `kwargs` arguments.") + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a sub-collection of this collection by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + full_name = f"{self._name}.{name}" + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + return Collection( + self._database, + f"{self._name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._database!r}, {self._name!r})" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Collection): + return self._database == other.database and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._database, self._name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + + @property + def full_name(self) -> str: + """The full name of this :class:`Collection`. + + The full name is of the form `database_name.collection_name`. + """ + return self._full_name + + @property + def name(self) -> str: + """The name of this :class:`Collection`.""" + return self._name + + @property + def database(self) -> Database[_DocumentType]: + """The :class:`~pymongo.database.Database` that this + :class:`Collection` is a part of. + """ + return self._database + + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: + """Get a clone of this collection changing the specified settings. + + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) + >>> coll1.read_preference + Primary() + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + """ + return Collection( + self._database, + self._name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[ClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") + if raw_wc is not None: + return WriteConcern(**raw_wc) + else: + return self._write_concern_for(session) + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError(f"'{type(self).__name__}' object is not iterable") + + next = __next__ + + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" + if "." not in self._name: + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self._name + ) + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you meant to " + f"call the '%s' method on a '{type(self).__name__}' object it is " + "failing because no such method exists." % self._name.split(".")[-1] + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> CollectionChangeStream[_DocumentType]: + """Watch changes on this collection. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.CollectionChangeStream` cursor which + iterates over changes on this collection. + + .. code-block:: python + + async with db.collection.watch() as stream: + async for change in stream: + print(change) + + The :class:`~pymongo.change_stream.CollectionChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.CollectionChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + async with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: + async for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + .. note:: Using this helper method is preferred to directly calling + :meth:`~pymongo.collection.Collection.aggregate` with a + ``$changeStream`` stage, for the purpose of supporting + resumability. + + .. warning:: This Collection's :attr:`read_concern` must be + ``ReadConcern("majority")`` in order to use the ``$changeStream`` + stage. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionchanged:: 3.7 + Added the ``start_at_operation_time`` parameter. + + .. versionadded:: 3.6 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = CollectionChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + return self._database.client._conn_for_writes(session, operation) + + def _command( + self, + conn: Connection, + command: MutableMapping[str, Any], + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions] = None, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + user_fields: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal command helper. + + :param conn` - A Connection instance. + :param command` - The command itself, as a :class:`~bson.son.SON` instance. + :param read_preference` (optional) - The read preference to use. + :param codec_options` (optional) - An instance of + :class:`~bson.codec_options.CodecOptions`. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern` (optional) - An instance of + :class:`~pymongo.read_concern.ReadConcern`. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. + :param collation` (optional) - An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param retryable_write: True if this command is a retryable + write. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + + :return: The result document. + """ + with self._database.client._tmp_session(session) as s: + return conn.command( + self._database.name, + command, + read_preference or self._read_preference_for(session), + codec_options or self.codec_options, + check, + allowable_errors, + read_concern=read_concern, + write_concern=write_concern, + parse_write_concern_error=True, + collation=collation, + session=s, + client=self._database.client, + retryable_write=retryable_write, + user_fields=user_fields, + ) + + def _create_helper( + self, + name: str, + options: MutableMapping[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + encrypted_fields: Optional[Mapping[str, Any]] = None, + qev2_required: bool = False, + ) -> None: + """Sends a create command with the given options.""" + cmd: dict[str, Any] = {"create": name} + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + + if options: + if "size" in options: + options["size"] = float(options["size"]) + cmd.update(options) + with self._conn_for_writes(session, operation=_Op.CREATE) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + collation=collation, + session=session, + ) + + def _create( + self, + options: MutableMapping[str, Any], + session: Optional[ClientSession], + ) -> None: + collation = validate_collation_or_none(options.pop("collation", None)) + encrypted_fields = options.pop("encryptedFields", None) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self._create_helper( + _esc_coll_name(encrypted_fields, self._name), + opts, + None, + session, + qev2_required=True, + ) + self._create_helper(_ecoc_coll_name(encrypted_fields, self._name), opts, None, session) + self._create_helper( + self._name, options, collation, session, encrypted_fields=encrypted_fields + ) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self._create_helper(self._name, options, collation, session) + + @_csot.apply + def bulk_write( + self, + requests: Sequence[_WriteOp[_DocumentType]], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + let: Optional[Mapping] = None, + ) -> BulkWriteResult: + """Send a batch of write operations to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + ... + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), + ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] + >>> result = db.test.bulk_write(requests) + >>> result.inserted_count + 1 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {2: ObjectId('54f62ee28891e756a6e1abd5')} + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + + :param requests: A list of write operations (see examples above). + :param ordered: If ``True`` (the default) requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False`` requests + will be performed on the server in arbitrary order, possibly in + parallel, and all operations will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + + :return: An instance of :class:`~pymongo.results.BulkWriteResult`. + + .. seealso:: :ref:`writes-and-ids` + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_list("requests", requests) + + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) + for request in requests: + try: + request._add_to_bulk(blk) + except AttributeError: + raise TypeError(f"{request!r} is not a valid request") from None + + write_concern = self._write_concern_for(session) + bulk_api_result = blk.execute(write_concern, session, _Op.INSERT) + if bulk_api_result is not None: + return BulkWriteResult(bulk_api_result, True) + return BulkWriteResult({}, False) + + def _insert_one( + self, + doc: Mapping[str, Any], + ordered: bool, + write_concern: WriteConcern, + op_id: Optional[int], + bypass_doc_val: bool, + session: Optional[ClientSession], + comment: Optional[Any] = None, + ) -> Any: + """Internal helper for inserting a single document.""" + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + command = {"insert": self.name, "ordered": ordered, "documents": [doc]} + if comment is not None: + command["comment"] = comment + + def _insert_command( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> None: + if bypass_doc_val: + command["bypassDocumentValidation"] = True + + result = conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + + _check_write_command_response(result) + + self._database.client._retryable_write( + acknowledged, _insert_command, session, operation=_Op.INSERT + ) + + if not isinstance(doc, RawBSONDocument): + return doc.get("_id") + return None + + def insert_one( + self, + document: Union[_DocumentType, RawBSONDocument], + bypass_document_validation: bool = False, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertOneResult: + """Insert a single document. + + >>> await db.test.count_documents({'x': 1}) + 0 + >>> result = await db.test.insert_one({'x': 1}) + >>> result.inserted_id + ObjectId('54f112defba522406c9cc208') + >>> await db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} + + :param document: The document to insert. Must be a mutable mapping + type. If the document does not have an _id field one will be + added automatically. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.InsertOneResult`. + + .. seealso:: :ref:`writes-and-ids` + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_is_document_type("document", document) + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() # type: ignore[index] + + write_concern = self._write_concern_for(session) + return InsertOneResult( + self._insert_one( + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + comment=comment, + ), + write_concern.acknowledged, + ) + + @_csot.apply + def insert_many( + self, + documents: Iterable[Union[_DocumentType, RawBSONDocument]], + ordered: bool = True, + bypass_document_validation: bool = False, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertManyResult: + """Insert an iterable of documents. + + >>> await db.test.count_documents({}) + 0 + >>> result = await db.test.insert_many([{'x': i} for i in range(2)]) + >>> await result.inserted_ids + [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] + >>> await db.test.count_documents({}) + 2 + + :param documents: A iterable of documents to insert. + :param ordered: If ``True`` (the default) documents will be + inserted on the server serially, in the order provided. If an error + occurs all remaining inserts are aborted. If ``False``, documents + will be inserted on the server in arbitrary order, possibly in + parallel, and all document inserts will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.results.InsertManyResult`. + + .. seealso:: :ref:`writes-and-ids` + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): + raise TypeError("documents must be a non-empty list") + inserted_ids: list[ObjectId] = [] + + def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: + """A generator that validates documents and handles _ids.""" + for document in documents: + common.validate_is_document_type("document", document) + if not isinstance(document, RawBSONDocument): + if "_id" not in document: + document["_id"] = ObjectId() # type: ignore[index] + inserted_ids.append(document["_id"]) + yield (message._INSERT, document) + + write_concern = self._write_concern_for(session) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) + blk.ops = list(gen()) + blk.execute(write_concern, session, _Op.INSERT) + return InsertManyResult(inserted_ids, write_concern.acknowledged) + + def _update( + self, + conn: Connection, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + validate_boolean("upsert", upsert) + collation = validate_collation_or_none(collation) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + update_doc: dict[str, Any] = { + "q": criteria, + "u": document, + "multi": multi, + "upsert": upsert, + } + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + update_doc["collation"] = collation + if array_filters is not None: + if not acknowledged: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + else: + update_doc["arrayFilters"] = array_filters + if hint is not None: + if not acknowledged and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if not isinstance(hint, str): + hint = helpers._index_document(hint) + update_doc["hint"] = hint + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} + if let is not None: + common.validate_is_mapping("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + # Update command. + if bypass_doc_val: + command["bypassDocumentValidation"] = True + + # The command result has to be published for APM unmodified + # so we make a shallow copy here before adding updatedExisting. + result = ( + conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + ).copy() + _check_write_command_response(result) + # Add the updatedExisting field for compatibility. + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True + else: + result["updatedExisting"] = False + # MongoDB >= 2.6.0 returns the upsert _id in an array + # element. Break it out for backward compatibility. + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] + + if not acknowledged: + return None + return result + + def _update_retryable( + self, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + operation: str, + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + + def _update( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Optional[Mapping[str, Any]]: + return self._update( + conn, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) + + return self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _update, + session, + operation, + ) + + def replace_one( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Replace a single document matching the filter. + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + >>> result = await db.test.replace_one({'x': 1}, {'y': 1}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + + The *upsert* option can be used to insert a new document if a matching + document does not exist. + + >>> result = await db.test.replace_one({'x': 1}, {'x': 1}, True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('54f11e5c8891e756a6e1abd4') + >>> await db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_replace(replacement) + if let is not None: + common.validate_is_mapping("let", let) + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + replacement, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_one( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update a single document matching the filter. + + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = await db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = await db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> await db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Added ``bypass_document_validation`` support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_many( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update one or more documents that match the filter. + + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = await db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 3 + >>> result.modified_count + 3 + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + multi=True, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def drop( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> None: + """Alias for :meth:`~pymongo.database.Database.drop_collection`. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. + + The following two calls are equivalent: + + >>> await db.foo.drop() + >>> await db.drop_collection("foo") + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.7 + :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + dbo.drop_collection( + self._name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) + + def _delete( + self, + conn: Connection, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + common.validate_is_mapping("filter", criteria) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + delete_doc = {"q": criteria, "limit": int(not multi)} + collation = validate_collation_or_none(collation) + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + delete_doc["collation"] = collation + if hint is not None: + if not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if not isinstance(hint, str): + hint = helpers._index_document(hint) + delete_doc["hint"] = hint + command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} + + if let is not None: + common.validate_is_document_type("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + + # Delete command. + result = conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + _check_write_command_response(result) + return result + + def _delete_retryable( + self, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + + def _delete( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Mapping[str, Any]: + return self._delete( + conn, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) + + return self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _delete, + session, + operation=_Op.DELETE, + ) + + def delete_one( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete a single document matching the filter. + + >>> await db.test.count_documents({'x': 1}) + 3 + >>> result = await db.test.delete_one({'x': 1}) + >>> result.deleted_count + 1 + >>> await db.test.count_documents({'x': 1}) + 2 + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + self._delete_retryable( + filter, + False, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def delete_many( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete one or more documents matching the filter. + + >>> await db.test.count_documents({'x': 1}) + 3 + >>> result = await db.test.delete_many({'x': 1}) + >>> result.deleted_count + 3 + >>> await db.test.count_documents({'x': 1}) + 0 + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + self._delete_retryable( + filter, + True, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: + """Get a single document from the database. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single document, or ``None`` if no matching + document is found. + + The :meth:`find_one` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param filter: a dictionary specifying + the query to be performed OR any other type to be used as + the value for a query for ``"_id"``. + + :param args: any additional positional arguments + are the same as the arguments to :meth:`find`. + + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + :: code-block: python + + >>> await collection.find_one(max_time_ms=100) + + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + cursor = self.find(filter, *args, **kwargs) + for result in cursor.limit(-1): + return result + return None + + def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: + """Query the database. + + The `filter` argument is a query document that all results + must match. For example: + + >>> await db.test.find({"hello": "world"}) + + only matches documents that have a key "hello" with value + "world". Matches can have other keys *in addition* to + "hello". The `projection` argument is used to specify a subset + of fields that should be included in the result documents. By + limiting results to a certain subset of fields you can cut + down on network traffic and decoding time. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~pymongo.cursor.Cursor` corresponding to this query. + + The :meth:`find` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param filter: A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. + :param projection: a list of field names that should be + returned in the result set or a dict specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param skip: the number of documents to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return. A limit of 0 (the default) is equivalent to setting no + limit. + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param cursor_type: the type of cursor to return. The valid + options are defined by :class:`~pymongo.cursor.CursorType`: + + - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of + this find call will return a standard cursor over the result set. + - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this + find call will be a tailable cursor - tailable cursors are only + for use with capped collections. They are not closed when the + last data is retrieved but are kept open and the cursor location + marks the final document position. If more data is received + iteration of the cursor will continue from the last document + received. For details, see the `tailable cursor documentation + `_. + - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result + of this find call will be a tailable cursor with the await flag + set. The server will wait for a few seconds after returning the + full result set so that it can capture and return additional data + added during the query. + - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this + find call will be an exhaust cursor. MongoDB will stream batched + results to the client without waiting for the client to request + each batch, reducing latency. See notes on compatibility below. + + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + :param allow_partial_results: if True, mongos will return + partial results if some shards are down instead of returning an + error. + :param oplog_replay: **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. + :param batch_size: Limits the number of documents returned in + a single batch. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param return_key: If True, return only the index keys in + each document. + :param show_record_id: If True, adds a field ``$recordId`` in + each document with the storage engine's internal record identifier. + :param snapshot: **DEPRECATED** - If True, prevents the + cursor from returning a document more than once because of an + intervening write operation. + :param hint: An index, in the same format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the + proper index to use for the query. + :param max_time_ms: Specifies a time limit for a query + operation. If the specified time is exceeded, the operation will be + aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass + this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor. + :param max_scan: **DEPRECATED** - The maximum number of + documents to scan. Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max_scan` on the cursor. + :param min: A list of field, limit pairs specifying the + inclusive lower bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param max: A list of field, limit pairs specifying the + exclusive upper bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param comment: A string to attach to the query to help + interpret and trace the operation in the server logs and in profile + data. Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.comment` on the cursor. + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. + + .. note:: There are a number of caveats to using + :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: + + - The `limit` option can not be used with an exhaust cursor. + + - Exhaust cursors are not supported by mongos and can not be + used with a sharded cluster. + + - A :class:`~pymongo.cursor.Cursor` instance created with the + :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an + exclusive :class:`~socket.socket` connection to MongoDB. If the + :class:`~pymongo.cursor.Cursor` is discarded without being + completely iterated the underlying :class:`~socket.socket` + connection will be closed and discarded without being returned to + the connection pool. + + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. + + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. + + .. versionchanged:: 3.7 + Deprecated the ``snapshot`` option, which is deprecated in MongoDB + 3.6 and removed in MongoDB 4.0. + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.5 + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. + + .. versionchanged:: 3.4 + Added support for the ``collation`` option. + + .. versionchanged:: 3.0 + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always + represents BSON regular expressions as :class:`~bson.regex.Regex` + objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to + convert from a BSON regular expression to a Python regular + expression object. + Soft deprecated the ``manipulate`` option. + + .. seealso:: The MongoDB documentation on `find `_. + """ + cursor = Cursor(self, *args, **kwargs) + cursor._supports_exhaust() + return cursor + + def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: + """Query the database and retrieve batches of raw BSON. + + Similar to the :meth:`find` method but returns a + :class:`~pymongo.cursor.RawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = await db.test.find_raw_batches() + >>> async for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: find_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("find_raw_batches does not support auto encryption") + return RawBatchCursor(self, *args, **kwargs) + + def _count_cmd( + self, + session: Optional[ClientSession], + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[Collation], + ) -> int: + """Internal count command helper.""" + # XXX: "ns missing" checks can be removed when we drop support for + # MongoDB 3.0, see SERVER-17051. + res = self._command( + conn, + cmd, + read_preference=read_preference, + allowable_errors=["ns missing"], + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + if res.get("errmsg", "") == "ns missing": + return 0 + return int(res["n"]) + + def _aggregate_one_result( + self, + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + ) -> Optional[Mapping[str, Any]]: + """Internal helper to run an aggregate that returns a single result.""" + result = self._command( + conn, + cmd, + read_preference, + allowable_errors=[26], # Ignore NamespaceNotFound. + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + # cursor will not be present for NamespaceNotFound errors. + if "cursor" not in result: + return None + batch = result["cursor"]["firstBatch"] + return batch[0] if batch else None + + def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: + """Get an estimate of the number of documents in this collection using + collection metadata. + + The :meth:`estimated_document_count` method is **not** supported in a + transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. + + .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ + """ + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"count": self._name} + cmd.update(kwargs) + return self._count_cmd(session, conn, read_preference, cmd, collation=None) + + return self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) + + def count_documents( + self, + filter: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> int: + """Count the number of documents in this collection. + + .. note:: For a fast count of the total documents in a collection see + :meth:`estimated_document_count`. + + The :meth:`count_documents` method is supported in a transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `skip` (int): The number of matching documents to skip before + returning results. + - `limit` (int): The maximum number of documents to count. Must be + a positive integer. If not provided, no limit is imposed. + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `hint` (string or list of tuples): The index to use. Specify either + the index name as a string or the index specification as a list of + tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). + + The :meth:`count_documents` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + .. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+-------------------------------------+ + | Operator | Replacement | + +=============+=====================================+ + | $where | `$expr`_ | + +-------------+-------------------------------------+ + | $near | `$geoWithin`_ with `$center`_ | + +-------------+-------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | + +-------------+-------------------------------------+ + + :param filter: A query document that selects which documents + to count in the collection. Can be an empty document to count all + documents. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + + .. versionadded:: 3.7 + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + """ + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + cmd = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + if "hint" in kwargs and not isinstance(kwargs["hint"], str): + kwargs["hint"] = helpers._index_document(kwargs["hint"]) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + cmd.update(kwargs) + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + result = self._aggregate_one_result(conn, read_preference, cmd, collation, session) + if not result: + return 0 + return result["n"] + + return self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) + + def _retryable_non_cursor_read( + self, + func: Callable[ + [Optional[ClientSession], Server, Connection, Optional[_ServerMode]], + T, + ], + session: Optional[ClientSession], + operation: str, + ) -> T: + """Non-cursor read helper to handle implicit session creation.""" + client = self._database.client + with client._tmp_session(session) as s: + return client._retryable_read(func, self._read_preference_for(s), s, operation) + + def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create one or more indexes on this collection. + + >>> from pymongo import IndexModel, ASCENDING, DESCENDING + >>> index1 = IndexModel([("hello", DESCENDING), + ... ("world", ASCENDING)], name="hello_world") + >>> index2 = IndexModel([("goodbye", DESCENDING)]) + >>> await db.test.create_indexes([index1, index2]) + ["hello_world", "goodbye_-1"] + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + .. versionadded:: 3.0 + + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ + """ + common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment + return self._create_indexes(indexes, session, **kwargs) + + @_csot.apply + def _create_indexes( + self, indexes: Sequence[IndexModel], session: Optional[ClientSession], **kwargs: Any + ) -> list[str]: + """Internal createIndexes helper. + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + """ + names = [] + with self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: + supports_quorum = conn.max_wire_version >= 9 + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in indexes: + if not isinstance(index, IndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.IndexModel" + ) + document = index.document + names.append(document["name"]) + yield document + + cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + if "commitQuorum" in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes" + ) + + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + write_concern=self._write_concern_for(session), + session=session, + ) + return names + + def create_index( + self, + keys: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> str: + """Creates an index on this collection. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str` and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + To create a single key ascending index on the key ``'mike'`` we just + use a string argument:: + + >>> await my_collection.create_index("mike") + + For a compound index on ``'mike'`` descending and ``'eliot'`` + ascending we need to use a list of tuples:: + + >>> await my_collection.create_index([("mike", pymongo.DESCENDING), + ... "eliot"]) + + All optional index creation parameters should be passed as + keyword arguments to this method. For example:: + + >>> await my_collection.create_index([("mike", pymongo.DESCENDING)], + ... background=True) + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The + option is silently ignored by the server and unique index builds + using the option will fail if a duplicate value is detected. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + :param keys: a single key or a list of (key, direction) + pairs specifying the index to create + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for passing maxTimeMS + in kwargs. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.2 + Added partialFilterExpression to support partial indexes. + .. versionchanged:: 3.0 + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. + + .. seealso:: The MongoDB documentation on `indexes `_. + + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ + """ + cmd_options = {} + if "maxTimeMS" in kwargs: + cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment + index = IndexModel(keys, **kwargs) + return (self._create_indexes([index], session, **cmd_options))[0] + + def drop_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops all indexes on this collection. + + Can be used on non-existent collections or collections with no indexes. + Raises OperationFailure on an error. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + """ + if comment is not None: + kwargs["comment"] = comment + self._drop_index("*", session=session, **kwargs) + + @_csot.apply + def drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops the specified index on this collection. + + Can be used on non-existent collections or collections with no + indexes. Raises OperationFailure on an error (e.g. trying to + drop an index that does not exist). `index_or_name` + can be either an index name (as returned by `create_index`), + or an index specifier (as passed to `create_index`). An index + specifier should be a list of (key, direction) pairs. Raises + TypeError if index is not an instance of (str, unicode, list). + + .. warning:: + + if a custom name was used on index creation (by + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. + + :param index_or_name: index (or name of index) to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + self._drop_index(index_or_name, session, comment, **kwargs) + + @_csot.apply + def _drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + name = index_or_name + if isinstance(index_or_name, list): + name = helpers._gen_index_name(index_or_name) + + if not isinstance(name, str): + raise TypeError("index_or_name must be an instance of str or list") + + cmd = {"dropIndexes": self._name, "index": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + def list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the index documents for this collection. + + >>> async for index in db.test.list_indexes(): + ... print(index) + ... + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionadded:: 3.0 + """ + return self._list_indexes(session, comment) + + def _list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: + codec_options: CodecOptions = CodecOptions(SON) + coll = cast( + Collection[MutableMapping[str, Any]], + self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + explicit_session = session is not None + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + cmd = {"listIndexes": self._name, "cursor": {}} + if comment is not None: + cmd["comment"] = comment + + try: + cursor = ( + self._command(conn, cmd, read_preference, codec_options, session=session) + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=session, + explicit_session=explicit_session, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + with self._database.client._tmp_session(session, False) as s: + return self._database.client._retryable_read( + _cmd, read_pref, s, operation=_Op.LIST_INDEXES + ) + + def index_information( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get information on this collection's indexes. + + Returns a dictionary where the keys are index names (as + returned by create_index()) and the values are dictionaries + containing information about each index. The dictionary is + guaranteed to contain at least a single key, ``"key"`` which + is a list of (key, direction) pairs specifying the index (as + passed to create_index()). It will also contain any other + metadata about the indexes, except for the ``"ns"`` and + ``"name"`` keys, which are cleaned. Example output might look + like this: + + >>> db.test.create_index("x", unique=True) + 'x_1' + >>> db.test.index_information() + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + cursor = self._list_indexes(session=session, comment=comment) + info = {} + for index in cursor: + index["key"] = list(index["key"].items()) + index = dict(index) # noqa: PLW2901 + info[index.pop("name")] = index + return info + + def list_search_indexes( + self, + name: Optional[str] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[Mapping[str, Any]]: + """Return a cursor over search indexes for the current collection. + + :param name: If given, the name of the index to search + for. Only indexes with matching index names will be returned. + If not given, all search indexes for the current collection + will be returned. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if name is None: + pipeline: _Pipeline = [{"$listSearchIndexes": {}}] + else: + pipeline = [{"$listSearchIndexes": {"name": name}}] + + coll = self.with_options( + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + read_concern=DEFAULT_READ_CONCERN, + ) + cmd = _CollectionAggregationCommand( + coll, + CommandCursor, + pipeline, + kwargs, + explicit_session=session is not None, + comment=comment, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.LIST_SEARCH_INDEX, + ) + + def create_search_index( + self, + model: Union[Mapping[str, Any], SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Any = None, + **kwargs: Any, + ) -> str: + """Create a single search index for the current collection. + + :param model: The model for the new search index. + It can be given as a :class:`~pymongo.operations.SearchIndexModel` + instance or a dictionary with a model "definition" and optional + "name". + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: The name of the new search index. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if not isinstance(model, SearchIndexModel): + model = SearchIndexModel(**model) + return (self._create_search_indexes([model], session, comment, **kwargs))[0] + + def create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create multiple search indexes for the current collection. + + :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: A list of the newly created search index names. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + return self._create_search_indexes(models, session, comment, **kwargs) + + def _create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in models: + if not isinstance(index, SearchIndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" + ) + yield index.document + + cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + + with self._conn_for_writes(session, operation=_Op.CREATE_SEARCH_INDEXES) as conn: + resp = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + return [index["name"] for index in resp["indexesCreated"]] + + def drop_search_index( + self, + name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Delete a search index by index name. + + :param name: The name of the search index to be deleted. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the dropSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"dropSearchIndex": self._name, "name": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def update_search_index( + self, + name: str, + definition: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Update a search index by replacing the existing index definition with the provided definition. + + :param name: The name of the search index to be updated. + :param definition: The new search index definition. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the updateSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"updateSearchIndex": self._name, "name": name, "definition": definition} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def options( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get the options set on this collection. + + Returns a dictionary of options and their values - see + :meth:`~pymongo.database.Database.create_collection` for more + information on the possible options. Returns an empty + dictionary if the collection has not been created yet. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + cursor = dbo.list_collections(session=session, filter={"name": self._name}, comment=comment) + + result = None + for doc in cursor: + result = doc + break + + if not result: + return {} + + options = result.get("options", {}) + assert options is not None + if "create" in options: + del options["create"] + + return options + + @_csot.apply + def _aggregate( + self, + aggregation_command: Type[_AggregationCommand], + pipeline: _Pipeline, + cursor_class: Type[CommandCursor], + session: Optional[ClientSession], + explicit_session: bool, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + if comment is not None: + kwargs["comment"] = comment + cmd = aggregation_command( + self, + cursor_class, + pipeline, + kwargs, + explicit_session, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + def aggregate( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Perform an aggregation using the aggregation framework on this + collection. + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Collection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + + .. note:: This method does not support the 'explain' option. Please + use `PyMongoExplain `_ + instead. An example is included in the :ref:`aggregate-examples` + documentation. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. + .. versionchanged:: 3.9 + Apply this collection's read concern to pipelines containing the + `$out` stage when connected to MongoDB >= 4.2. + Added support for the ``$merge`` pipeline stage. + Aggregations that write always use read preference + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + .. versionchanged:: 3.6 + Added the `session` parameter. Added the `maxAwaitTimeMS` option. + Deprecated the `useCursor` option. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.0 + The :meth:`aggregate` method always returns a CommandCursor. The + pipeline argument must be a list. + + .. seealso:: :doc:`/examples/aggregation` + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + with self._database.client._tmp_session(session, close=False) as s: + return self._aggregate( + _CollectionAggregationCommand, + pipeline, + CommandCursor, + session=s, + explicit_session=session is not None, + let=let, + comment=comment, + **kwargs, + ) + + def aggregate_raw_batches( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> RawBatchCursor[_DocumentType]: + """Perform an aggregation and retrieve batches of raw BSON. + + Similar to the :meth:`aggregate` method but returns a + :class:`~pymongo.cursor.RawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = await db.test.aggregate_raw_batches([ + ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) + >>> async for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") + if comment is not None: + kwargs["comment"] = comment + with self._database.client._tmp_session(session, close=False) as s: + return cast( + RawBatchCursor[_DocumentType], + self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + explicit_session=session is not None, + **kwargs, + ), + ) + + @_csot.apply + def rename( + self, + new_name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> MutableMapping[str, Any]: + """Rename this collection. + + If operating in auth mode, client must be authorized as an + admin to perform this operation. Raises :class:`TypeError` if + `new_name` is not an instance of :class:`str`. + Raises :class:`~pymongo.errors.InvalidName` + if `new_name` is not a valid collection name. + + :param new_name: new name for this collection + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional arguments to the rename command + may be passed as keyword arguments to this helper method + (i.e. ``dropTarget=True``) + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + if not isinstance(new_name, str): + raise TypeError("new_name must be an instance of str") + + if not new_name or ".." in new_name: + raise InvalidName("collection names cannot be empty") + if new_name[0] == "." or new_name[-1] == ".": + raise InvalidName("collection names must not start or end with '.'") + if "$" in new_name and not new_name.startswith("oplog.$main"): + raise InvalidName("collection names must not contain '$'") + + new_name = f"{self._database.name}.{new_name}" + cmd = {"renameCollection": self._full_name, "to": new_name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + write_concern = self._write_concern_for_cmd(cmd, session) + + with self._conn_for_writes(session, operation=_Op.RENAME) as conn: + with self._database.client._tmp_session(session) as s: + return conn.command( + "admin", + cmd, + write_concern=write_concern, + parse_write_concern_error=True, + session=s, + client=self._database.client, + ) + + def distinct( + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list: + """Get a list of distinct values for `key` among all documents + in this collection. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + All optional distinct parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow the count + command to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + + The :meth:`distinct` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param key: name of the field for which we want to get the distinct + values + :param filter: A query document that specifies the documents + from which to retrieve the distinct values. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + """ + if not isinstance(key, str): + raise TypeError("key must be an instance of str") + cmd = {"distinct": self._name, "key": key} + if filter is not None: + if "query" in kwargs: + raise ConfigurationError("can't pass both filter and query") + kwargs["query"] = filter + collation = validate_collation_or_none(kwargs.pop("collation", None)) + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> list: + return ( + self._command( + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + ) + )["values"] + + return self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) + + def _find_and_modify( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + sort: Optional[_IndexList], + upsert: Optional[bool] = None, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping] = None, + **kwargs: Any, + ) -> Any: + """Internal findAndModify helper.""" + common.validate_is_mapping("filter", filter) + if not isinstance(return_document, bool): + raise ValueError( + "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert + if hint is not None: + if not isinstance(hint, str): + hint = helpers._index_document(hint) + + write_concern = self._write_concern_for_cmd(cmd, session) + + def _find_and_modify_helper( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Any: + acknowledged = write_concern.acknowledged + if array_filters is not None: + if not acknowledged: + raise ConfigurationError( + "arrayFilters is unsupported for unacknowledged writes." + ) + cmd["arrayFilters"] = list(array_filters) + if hint is not None: + if conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint + out = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) + _check_write_command_response(out) + + return out.get("value") + + return self._database.client._retryable_write( + write_concern.acknowledged, + _find_and_modify_helper, + session, + operation=_Op.FIND_AND_MODIFY, + ) + + def find_one_and_delete( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and deletes it, returning the document. + + >>> await db.test.count_documents({'x': 1}) + 2 + >>> await db.test.find_one_and_delete({'x': 1}) + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + >>> await db.test.count_documents({'x': 1}) + 1 + + If multiple documents match *filter*, a *sort* can be applied. + + >>> async for doc in db.test.find({'x': 1}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> await db.test.find_one_and_delete( + ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) + {'x': 1, '_id': 2} + + The *projection* option can be used to limit the fields returned. + + >>> await db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) + {'x': 1} + + :param filter: A query that matches the document to delete. + :param projection: a list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is deleted. + :param hint: An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) + + def find_one_and_replace( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and replaces it, returning either the + original or the replaced document. + + The :meth:`find_one_and_replace` method differs from + :meth:`find_one_and_update` by replacing the document matched by + *filter*, rather than modifying the existing document. + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> await db.test.find_one_and_replace({'x': 1}, {'y': 1}) + {'x': 1, '_id': 0} + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + :param filter: A query that matches the document to replace. + :param replacement: The replacement document. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is replaced. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was replaced, or ``None`` + if no document matches. If + :attr:`ReturnDocument.AFTER`, returns the replaced + or inserted document. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_replace(replacement) + kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) + + def find_one_and_update( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and updates it, returning either the + original or the updated document. + + >>> await db.test.find_one_and_update( + ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) + {'_id': 665, 'done': False, 'count': 25}} + + Returns ``None`` if no document matches the filter. + + >>> await db.test.find_one_and_update( + ... {'_exists': False}, {'$inc': {'count': 1}}) + + When the filter matches, by default :meth:`find_one_and_update` + returns the original version of the document before the update was + applied. To return the updated (or inserted in the case of + *upsert*) version of the document instead, use the *return_document* + option. + + >>> from pymongo import ReturnDocument + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... return_document=ReturnDocument.AFTER) + {'_id': 'userid', 'seq': 1} + + You can limit the fields returned with the *projection* option. + + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... return_document=ReturnDocument.AFTER) + {'seq': 2} + + The *upsert* option can be used to create the document if it doesn't + already exist. + + >>> await db.example.delete_many({}).deleted_count + 1 + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... upsert=True, + ... return_document=ReturnDocument.AFTER) + {'seq': 1} + + If multiple documents match *filter*, a *sort* can be applied. + + >>> async for doc in db.test.find({'done': True}): + ... print(doc) + ... + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} + >>> await db.test.find_one_and_update( + ... {'done': True}, + ... {'$set': {'final': True}}, + ... sort=[('_id', pymongo.DESCENDING)]) + {'_id': 701, 'done': True, 'result': {'count': 17}} + + :param filter: A query that matches the document to update. + :param update: The update operations to apply. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is updated. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was updated. If + :attr:`ReturnDocument.AFTER`, returns the updated + or inserted document. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` options. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py new file mode 100644 index 0000000000..a2a5d8b192 --- /dev/null +++ b/pymongo/synchronous/command_cursor.py @@ -0,0 +1,415 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CommandCursor class to iterate over command results.""" +from __future__ import annotations + +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterator, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.synchronous.cursor import _ConnectionManager +from pymongo.synchronous.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _RawBatchGetMore, +) +from pymongo.synchronous.response import PinnedResponse +from pymongo.synchronous.typings import _Address, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +class CommandCursor(Generic[_DocumentType]): + """A cursor / iterator over command cursors.""" + + _getmore_class = _GetMore + + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + explicit_session: bool = False, + comment: Any = None, + ) -> None: + """Create a new command cursor.""" + self._sock_mgr: Any = None + self._collection: Collection[_DocumentType] = collection + self._id = cursor_info["id"] + self._data = deque(cursor_info["firstBatch"]) + self._postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + "postBatchResumeToken" + ) + self._address = address + self._batch_size = batch_size + self._max_await_time_ms = max_await_time_ms + self._session = session + self._explicit_session = explicit_session + self._killed = self._id == 0 + self._comment = comment + if _IS_SYNC and self._killed: + self._end_session(True) # type: ignore[unused-coroutine] + + if "ns" in cursor_info: # noqa: SIM401 + self._ns = cursor_info["ns"] + else: + self._ns = collection.full_name + + self.batch_size(batch_size) + + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError("max_await_time_ms must be an integer or None") + + def __del__(self) -> None: + if _IS_SYNC: + self._die(False) # type: ignore[unused-coroutine] + + def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError("batch_size must be an integer") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + + self._batch_size = batch_size == 1 and 2 or batch_size + return self + + def _has_next(self) -> bool: + """Returns `True` if the cursor has documents remaining from the + previous batch. + """ + return len(self._data) > 0 + + @property + def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: + """Retrieve the postBatchResumeToken from the response to a + changeStream aggregate or getMore. + """ + return self._postbatchresumetoken + + def _maybe_pin_connection(self, conn: Connection) -> None: + client = self._collection.database.client + if not client._should_pin_cursor(self._session): + return + if not self._sock_mgr: + conn.pin_cursor() + conn_mgr = _ConnectionManager(conn, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self._id == 0: + conn_mgr.close() + else: + self._sock_mgr = conn_mgr + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration`. Best to use a for loop:: + + async for doc in collection.aggregate(pipeline): + print(doc) + + .. note:: :attr:`alive` can be True while iterating a cursor from + a failed server. In this case :attr:`alive` will return False after + :meth:`next` fails to retrieve the next batch of results from the + server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> int: + """Returns the id of the cursor.""" + return self._id + + @property + def address(self) -> Optional[_Address]: + """The (host, port) of the server used, or None. + + .. versionadded:: 3.0 + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._explicit_session: + return self._session + return None + + def _die(self, synchronous: bool = False) -> None: + """Closes this cursor.""" + already_killed = self._killed + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, self._ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + self._collection.database.client._cleanup_cursor( + synchronous, + cursor_id, + address, + self._sock_mgr, + self._session, + self._explicit_session, + ) + if not self._explicit_session: + self._session = None + self._sock_mgr = None + + def _end_session(self, synchronous: bool) -> None: + if self._session and not self._explicit_session: + self._session._end_session(lock=synchronous) + self._session = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die(True) + + def _send_message(self, operation: _GetMore) -> None: + """Send a getmore message and handle the response.""" + client = self._collection.database.client + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die(False) + else: + # Return the session and pinned connection, if necessary. + self.close() + raise + except ConnectionFailure: + # Don't send killCursors because the cursor is already closed. + self._killed = True + # Return the session and pinned connection, if necessary. + self.close() + raise + except Exception: + self.close() + raise + + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) + if response.from_command: + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self._postbatchresumetoken = cursor.get("postBatchResumeToken") + self._id = cursor["id"] + else: + documents = response.docs + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + + if self._id == 0: + self.close() + self._data = deque(documents) + + def _refresh(self) -> int: + """Refreshes the cursor with more data from the server. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if self._id: # Get More + dbname, collname = self._ns.split(".", 1) + read_pref = self._collection._read_preference_for(self.session) + self._send_message( + self._getmore_class( + dbname, + collname, + self._batch_size, + self._id, + self._collection.codec_options, + read_pref, + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + False, + self._comment, + ) + ) + else: # Cursor id is zero nothing else to return + self._die(True) + + return len(self._data) + + def __iter__(self) -> Iterator[_DocumentType]: + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + # Block until a document is returnable. + while self.alive: + doc = self._try_next(True) + if doc is not None: + return doc + + raise StopIteration + + def __next__(self) -> _DocumentType: + return self.next() + + def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: + """Advance the cursor blocking for at most one getMore command.""" + if not len(self._data) and not self._killed and get_more_allowed: + self._refresh() + if len(self._data): + return self._data.popleft() + else: + return None + + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :return: The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return self._try_next(get_more_allowed=True) + + def __enter__(self) -> CommandCursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def to_list(self) -> list[_DocumentType]: + return [x for x in self] # noqa: C416,RUF100 + + +class RawBatchCommandCursor(CommandCursor[_DocumentType]): + _getmore_class = _RawBatchGetMore + + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + explicit_session: bool = False, + comment: Any = None, + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + assert not cursor_info.get("firstBatch") + super().__init__( + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + explicit_session, + comment, + ) + + def _unpack_response( # type: ignore[override] + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[Mapping[str, Any]]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response # type: ignore[return-value] + + def __getitem__(self, index: int) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/common.py b/pymongo/synchronous/common.py similarity index 97% rename from pymongo/common.py rename to pymongo/synchronous/common.py index 57560a7b0d..13e58adedd 100644 --- a/pymongo/common.py +++ b/pymongo/synchronous/common.py @@ -40,20 +40,22 @@ from bson.binary import UuidRepresentation from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry from bson.raw_bson import RawBSONDocument -from pymongo.compression_support import ( - validate_compressors, - validate_zlib_compression_level, -) from pymongo.driver_info import DriverInfo from pymongo.errors import ConfigurationError -from pymongo.monitoring import _validate_event_listeners from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import _MONGOS_MODES, _ServerMode from pymongo.server_api import ServerApi +from pymongo.synchronous.compression_support import ( + validate_compressors, + validate_zlib_compression_level, +) +from pymongo.synchronous.monitoring import _validate_event_listeners +from pymongo.synchronous.read_preferences import _MONGOS_MODES, _ServerMode from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean if TYPE_CHECKING: - from pymongo.client_session import ClientSession + from pymongo.synchronous.client_session import ClientSession + +_IS_SYNC = True ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) @@ -378,7 +380,7 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option.""" - from pymongo.auth import MECHANISMS + from pymongo.synchronous.auth import MECHANISMS if value not in MECHANISMS: raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") @@ -444,7 +446,7 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni elif key in ["ALLOWED_HOSTS"] and isinstance(value, list): props[key] = value elif key in ["OIDC_CALLBACK", "OIDC_HUMAN_CALLBACK"]: - from pymongo.auth_oidc import OIDCCallback + from pymongo.synchronous.auth_oidc import OIDCCallback if not isinstance(value, OIDCCallback): raise ValueError("callback must be an OIDCCallback object") @@ -640,7 +642,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A """Validate the driver keyword arg.""" if value is None: return value - from pymongo.encryption_options import AutoEncryptionOpts + from pymongo.synchronous.encryption_options import AutoEncryptionOpts if not isinstance(value, AutoEncryptionOpts): raise TypeError(f"{option} must be an instance of AutoEncryptionOpts") @@ -902,7 +904,7 @@ def __init__( ) -> None: if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") - self.__codec_options = codec_options + self._codec_options = codec_options if not isinstance(read_preference, _ServerMode): raise TypeError( @@ -910,24 +912,24 @@ def __init__( "pymongo.read_preferences for valid " "options." ) - self.__read_preference = read_preference + self._read_preference = read_preference if not isinstance(write_concern, WriteConcern): raise TypeError( "write_concern must be an instance of pymongo.write_concern.WriteConcern" ) - self.__write_concern = write_concern + self._write_concern = write_concern if not isinstance(read_concern, ReadConcern): raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") - self.__read_concern = read_concern + self._read_concern = read_concern @property def codec_options(self) -> CodecOptions: """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ - return self.__codec_options + return self._codec_options @property def write_concern(self) -> WriteConcern: @@ -937,7 +939,7 @@ def write_concern(self) -> WriteConcern: .. versionchanged:: 3.0 The :attr:`write_concern` attribute is now read only. """ - return self.__write_concern + return self._write_concern def _write_concern_for(self, session: Optional[ClientSession]) -> WriteConcern: """Read only access to the write concern of this instance or session.""" @@ -953,14 +955,14 @@ def read_preference(self) -> _ServerMode: .. versionchanged:: 3.0 The :attr:`read_preference` attribute is now read only. """ - return self.__read_preference + return self._read_preference def _read_preference_for(self, session: Optional[ClientSession]) -> _ServerMode: """Read only access to the read preference of this instance or session.""" # Override this operation's read preference with the transaction's. if session: - return session._txn_read_preference() or self.__read_preference - return self.__read_preference + return session._txn_read_preference() or self._read_preference + return self._read_preference @property def read_concern(self) -> ReadConcern: @@ -969,7 +971,7 @@ def read_concern(self) -> ReadConcern: .. versionadded:: 3.2 """ - return self.__read_concern + return self._read_concern class _CaseInsensitiveDictionary(MutableMapping[str, Any]): diff --git a/pymongo/compression_support.py b/pymongo/synchronous/compression_support.py similarity index 97% rename from pymongo/compression_support.py rename to pymongo/synchronous/compression_support.py index 2f155352d2..e5153f8c87 100644 --- a/pymongo/compression_support.py +++ b/pymongo/synchronous/compression_support.py @@ -16,8 +16,11 @@ import warnings from typing import Any, Iterable, Optional, Union -from pymongo.hello import HelloCompat -from pymongo.helpers import _SENSITIVE_COMMANDS +from pymongo.helpers_constants import _SENSITIVE_COMMANDS +from pymongo.synchronous.hello_compat import HelloCompat + +_IS_SYNC = True + _SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} _NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} @@ -146,6 +149,7 @@ class ZstdContext: def compress(data: bytes) -> bytes: # ZstdCompressor is not thread safe. # TODO: Use a pool? + import zstandard return zstandard.ZstdCompressor().compress(data) diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py new file mode 100644 index 0000000000..b74266a74e --- /dev/null +++ b/pymongo/synchronous/cursor.py @@ -0,0 +1,1289 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + +import copy +import warnings +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson import RE_TYPE, _convert_raw_document_lists_to_streams +from bson.code import Code +from bson.son import SON +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _create_lock +from pymongo.synchronous import helpers +from pymongo.synchronous.collation import validate_collation_or_none +from pymongo.synchronous.common import ( + validate_is_document_type, + validate_is_mapping, +) +from pymongo.synchronous.helpers import next +from pymongo.synchronous.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) +from pymongo.synchronous.response import PinnedResponse +from pymongo.synchronous.typings import _Address, _CollationIn, _DocumentOut, _DocumentType +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from _typeshed import SupportsItems + + from bson.codec_options import CodecOptions + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.read_preferences import _ServerMode + +_IS_SYNC = True + + +class _ConnectionManager: + """Used with exhaust cursors to ensure the connection is returned.""" + + def __init__(self, conn: Connection, more_to_come: bool): + self.conn: Optional[Connection] = conn + self.more_to_come = more_to_come + self._alock = _create_lock() + + def update_exhaust(self, more_to_come: bool) -> None: + self.more_to_come = more_to_come + + def close(self) -> None: + """Return this instance's connection to the connection pool.""" + if self.conn: + self.conn.unpin() + self.conn = None + + +class Cursor(Generic[_DocumentType]): + _query_class = _Query + _getmore_class = _GetMore + + def __init__( + self, + collection: Collection[_DocumentType], + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Optional[Any] = None, + session: Optional[ClientSession] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None, + ) -> None: + """Create a new cursor. + + Should not be called directly by application developers - see + :meth:`~pymongo.collection.Collection.find` instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + # Initialize all attributes used in __del__ before possibly raising + # an error to avoid attribute errors during garbage collection. + self._collection: Collection[_DocumentType] = collection + self._id: Any = None + self._exhaust = False + self._sock_mgr: Any = None + self._killed = False + self._session: Optional[ClientSession] + + if session: + self._session = session + self._explicit_session = True + else: + self._session = None + self._explicit_session = False + + spec: Mapping[str, Any] = filter or {} + validate_is_mapping("filter", spec) + if not isinstance(skip, int): + raise TypeError("skip must be an instance of int") + if not isinstance(limit, int): + raise TypeError("limit must be an instance of int") + validate_boolean("no_cursor_timeout", no_cursor_timeout) + if no_cursor_timeout and not self._explicit_session: + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://mongodb.com/docs/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): + raise ValueError("not a valid value for cursor_type") + validate_boolean("allow_partial_results", allow_partial_results) + validate_boolean("oplog_replay", oplog_replay) + if not isinstance(batch_size, int): + raise TypeError("batch_size must be an integer") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) + + if projection is not None: + projection = helpers._fields_list_to_dict(projection, "projection") + + if let is not None: + validate_is_document_type("let", let) + + self._let = let + self._spec = spec + self._has_filter = filter is not None + self._projection = projection + self._skip = skip + self._limit = limit + self._batch_size = batch_size + self._ordering = sort and helpers._index_document(sort) or None + self._max_scan = max_scan + self._explain = False + self._comment = comment + self._max_time_ms = max_time_ms + self._max_await_time_ms: Optional[int] = None + self._max: Optional[Union[dict[Any, Any], _Sort]] = max + self._min: Optional[Union[dict[Any, Any], _Sort]] = min + self._collation = validate_collation_or_none(collation) + self._return_key = return_key + self._show_record_id = show_record_id + self._allow_disk_use = allow_disk_use + self._snapshot = snapshot + self._hint: Union[str, dict[str, Any], None] + self._set_hint(hint) + + # This is ugly. People want to be able to do cursor[5:5] and + # get an empty result set (old behavior was an + # exception). It's hard to do that right, though, because the + # server uses limit(0) to mean 'no limit'. So we set __empty + # in that case and check for it when iterating. We also unset + # it anytime we change __limit. + self._empty = False + + self._data: deque = deque() + self._address: Optional[_Address] = None + self._retrieved = 0 + + self._codec_options = collection.codec_options + # Read preference is set when the initial find is sent. + self._read_preference: Optional[_ServerMode] = None + self._read_concern = collection.read_concern + + self._query_flags = cursor_type + self._cursor_type = cursor_type + if no_cursor_timeout: + self._query_flags |= _QUERY_OPTIONS["no_timeout"] + if allow_partial_results: + self._query_flags |= _QUERY_OPTIONS["partial"] + if oplog_replay: + self._query_flags |= _QUERY_OPTIONS["oplog_replay"] + + # The namespace to use for find/getMore commands. + self._dbname = collection.database.name + self._collname = collection.name + + def _supports_exhaust(self) -> None: + # Exhaust cursor support + if self._cursor_type == CursorType.EXHAUST: + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + self._exhaust = True + + @property + def collection(self) -> Collection[_DocumentType]: + """The :class:`~pymongo.collection.Collection` that this + :class:`Cursor` is iterating. + """ + return self._collection + + @property + def retrieved(self) -> int: + """The number of documents retrieved so far.""" + return self._retrieved + + def __del__(self) -> None: + if _IS_SYNC: + self._die() # type: ignore[unused-coroutine] + + def clone(self) -> Cursor[_DocumentType]: + """Get a clone of this cursor. + + Returns a new Cursor instance with options matching those that have + been set on the current instance. The clone will be completely + unevaluated, even if the current instance has been partially or + completely evaluated. + """ + return self._clone(True) + + def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: + """Internal clone helper.""" + if not base: + if self._explicit_session: + base = self._clone_base(self._session) + else: + base = self._clone_base(None) + + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + "cursor_type", + ) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_") and k[1:] in values_to_clone + } + if deepcopy: + data = self._deepcopy(data) + base.__dict__.update(data) + return base + + def _clone_base(self, session: Optional[ClientSession]) -> Cursor: + """Creates an empty Cursor object for information to be copied into.""" + return self.__class__(self._collection, session=session) + + def _query_spec(self) -> Mapping[str, Any]: + """Get the spec to use for a query.""" + operators: dict[str, Any] = {} + if self._ordering: + operators["$orderby"] = self._ordering + if self._explain: + operators["$explain"] = True + if self._hint: + operators["$hint"] = self._hint + if self._let: + operators["let"] = self._let + if self._comment: + operators["$comment"] = self._comment + if self._max_scan: + operators["$maxScan"] = self._max_scan + if self._max_time_ms is not None: + operators["$maxTimeMS"] = self._max_time_ms + if self._max: + operators["$max"] = self._max + if self._min: + operators["$min"] = self._min + if self._return_key is not None: + operators["$returnKey"] = self._return_key + if self._show_record_id is not None: + # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. + operators["$showDiskLoc"] = self._show_record_id + if self._snapshot is not None: + operators["$snapshot"] = self._snapshot + + if operators: + # Make a shallow copy so we can cleanly rewind or clone. + spec = dict(self._spec) + + # Allow-listed commands must be wrapped in $query. + if "$query" not in spec: + # $query has to come first + spec = {"$query": spec} + + spec.update(operators) + return spec + # Have to wrap with $query if "query" is the first key. + # We can't just use $query anytime "query" is a key as + # that breaks commands like count and find_and_modify. + # Checking spec.keys()[0] covers the case that the spec + # was passed as an instance of SON or OrderedDict. + elif "query" in self._spec and (len(self._spec) == 1 or next(iter(self._spec)) == "query"): + return {"$query": self._spec} + + return self._spec + + def _check_okay_to_chain(self) -> None: + """Check if it is okay to chain more options onto this cursor.""" + if self._retrieved or self._id is not None: + raise InvalidOperation("cannot set options after executing query") + + def add_option(self, mask: int) -> Cursor[_DocumentType]: + """Set arbitrary query flags using a bitmask. + + To set the tailable flag: + cursor.add_option(2) + """ + if not isinstance(mask, int): + raise TypeError("mask must be an int") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + self._exhaust = True + + self._query_flags |= mask + return self + + def remove_option(self, mask: int) -> Cursor[_DocumentType]: + """Unset arbitrary query flags using a bitmask. + + To unset the tailable flag: + cursor.remove_option(2) + """ + if not isinstance(mask, int): + raise TypeError("mask must be an int") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + self._exhaust = False + + self._query_flags &= ~mask + return self + + def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** + + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError("allow_disk_use must be a bool") + self._check_okay_to_chain() + + self._allow_disk_use = allow_disk_use + return self + + def limit(self, limit: int) -> Cursor[_DocumentType]: + """Limits the number of results to be returned by this cursor. + + Raises :exc:`TypeError` if `limit` is not an integer. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` + has already been used. The last `limit` applied to this cursor + takes precedence. A limit of ``0`` is equivalent to no limit. + + :param limit: the number of results to return + + .. seealso:: The MongoDB documentation on `limit `_. + """ + if not isinstance(limit, int): + raise TypeError("limit must be an integer") + if self._exhaust: + raise InvalidOperation("Can't use limit and exhaust together.") + self._check_okay_to_chain() + + self._empty = False + self._limit = limit + return self + + def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. The last `batch_size` + applied to this cursor takes precedence. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError("batch_size must be an integer") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + self._check_okay_to_chain() + + self._batch_size = batch_size + return self + + def skip(self, skip: int) -> Cursor[_DocumentType]: + """Skips the first `skip` results of this cursor. + + Raises :exc:`TypeError` if `skip` is not an integer. Raises + :exc:`ValueError` if `skip` is less than ``0``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has + already been used. The last `skip` applied to this cursor takes + precedence. + + :param skip: the number of results to skip + """ + if not isinstance(skip, int): + raise TypeError("skip must be an integer") + if skip < 0: + raise ValueError("skip must be >= 0") + self._check_okay_to_chain() + + self._skip = skip + return self + + def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: + """Specifies a time limit for a query operation. If the specified + time is exceeded, the operation will be aborted and + :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` + is ``None`` no limit is applied. + + Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` + has already been used. + + :param max_time_ms: the time limit after which the operation is aborted + """ + if not isinstance(max_time_ms, int) and max_time_ms is not None: + raise TypeError("max_time_ms must be an integer or None") + self._check_okay_to_chain() + + self._max_time_ms = max_time_ms + return self + + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: + """Specifies a time limit for a getMore operation on a + :attr:`~pymongo.cursor_shared.CursorType.TAILABLE_AWAIT` cursor. For all other + types of cursor max_await_time_ms is ignored. + + Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or + ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. + + .. note:: `max_await_time_ms` requires server version **>= 3.2** + + :param max_await_time_ms: the time limit after which the operation is + aborted + + .. versionadded:: 3.2 + """ + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError("max_await_time_ms must be an integer or None") + self._check_okay_to_chain() + + # Ignore max_await_time_ms if not tailable or await_data is False. + if self._query_flags & CursorType.TAILABLE_AWAIT: + self._max_await_time_ms = max_await_time_ms + + return self + + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> Cursor[_DocumentType]: + ... + + def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_DocumentType]]: + """Get a single document or a slice of documents from this cursor. + + .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. + + To get a single document use an integral index, e.g.:: + + >>> db.test.find()[50] + + An :class:`IndexError` will be raised if the index is negative + or greater than the amount of documents in this cursor. Any + limit previously applied to this cursor will be ignored. + + To get a slice of documents use a slice index, e.g.:: + + >>> db.test.find()[20:25] + + This will return this cursor with a limit of ``5`` and skip of + ``20`` applied. Using a slice index will override any prior + limits or skips applied to this cursor (including those + applied through previous calls to this method). Raises + :class:`IndexError` when the slice has a step, a negative + start value, or a stop value less than or equal to the start + value. + + :param index: An integer or slice index to be applied to this cursor + """ + if _IS_SYNC: + self._check_okay_to_chain() + self._empty = False + if isinstance(index, slice): + if index.step is not None: + raise IndexError("Cursor instances do not support slice steps") + + skip = 0 + if index.start is not None: + if index.start < 0: + raise IndexError("Cursor instances do not support negative indices") + skip = index.start + + if index.stop is not None: + limit = index.stop - skip + if limit < 0: + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) + if limit == 0: + self._empty = True + else: + limit = 0 + + self._skip = skip + self._limit = limit + return self + + if isinstance(index, int): + if index < 0: + raise IndexError("Cursor instances do not support negative indices") + clone = self.clone() + clone.skip(index + self._skip) + clone.limit(-1) # use a hard limit + clone._query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 + for doc in clone: # type: ignore[attr-defined] + return doc + raise IndexError("no such item for Cursor instance") + raise TypeError("index %r cannot be applied to Cursor instances" % index) + else: + raise IndexError("Cursor does not support indexing") + + def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: + """**DEPRECATED** - Limit the number of documents to scan when + performing the query. + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. Only the last :meth:`max_scan` + applied to this cursor has any effect. + + :param max_scan: the maximum number of documents to scan + + .. versionchanged:: 3.7 + Deprecated :meth:`max_scan`. Support for this option is deprecated in + MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side + execution time. + """ + self._check_okay_to_chain() + self._max_scan = max_scan + return self + + def max(self, spec: _Sort) -> Cursor[_DocumentType]: + """Adds ``max`` operator that specifies upper bound for specific index. + + When using ``max``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the exclusive + upper bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``max`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError("spec must be an instance of list or tuple") + + self._check_okay_to_chain() + self._max = dict(spec) + return self + + def min(self, spec: _Sort) -> Cursor[_DocumentType]: + """Adds ``min`` operator that specifies lower bound for specific index. + + When using ``min``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the inclusive + lower bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``min`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError("spec must be an instance of list or tuple") + + self._check_okay_to_chain() + self._min = dict(spec) + return self + + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> Cursor[_DocumentType]: + """Sorts this cursor's results. + + Pass a field name and a direction, either + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: + + async for doc in collection.find().sort('field', pymongo.ASCENDING): + print(doc) + + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: + + async for doc in collection.find().sort([ + 'field1', + ('field2', pymongo.DESCENDING)]): + print(doc) + + Text search results can be sorted by relevance:: + + cursor = await db.test.find( + {'$text': {'$search': 'some words'}}, + {'score': {'$meta': 'textScore'}}) + + # Sort by 'score' field. + cursor.sort([('score', {'$meta': 'textScore'})]) + + async for doc in cursor: + print(doc) + + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. Only the last :meth:`sort` applied to this + cursor has any effect. + + :param key_or_list: a single key or a list of (key, direction) + pairs specifying the keys to sort on + :param direction: only used if `key_or_list` is a single + key, if not given :data:`~pymongo.ASCENDING` is assumed + """ + self._check_okay_to_chain() + keys = helpers._index_list(key_or_list, direction) + self._ordering = helpers._index_document(keys) + return self + + def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. note:: This method uses the default verbosity mode of the + `explain command + `_, + ``allPlansExecution``. To use a different verbosity use + :meth:`~pymongo.database.Database.command` to run the explain + command directly. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + c = self.clone() + c._explain = True + + # always use a hard limit for explains + if c._limit: + c._limit = -abs(c._limit) + return next(c) + + def _set_hint(self, index: Optional[_Hint]) -> None: + if index is None: + self._hint = None + return + + if isinstance(index, str): + self._hint = index + else: + self._hint = helpers._index_document(index) + + def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: + """Adds a 'hint', telling Mongo the proper index to use for the query. + + Judicious use of hints can greatly improve query + performance. When doing a query on multiple fields (at least + one of which is indexed) pass the indexed field as a hint to + the query. Raises :class:`~pymongo.errors.OperationFailure` if the + provided hint requires an index that does not exist on this collection, + and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. + + `index` should be an index as passed to + :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``) or the name of the index. + If `index` is ``None`` any existing hint for this query is + cleared. The last hint applied to this cursor takes precedence + over all others. + + :param index: index to hint on (as an index specifier) + """ + self._check_okay_to_chain() + self._set_hint(index) + return self + + def comment(self, comment: Any) -> Cursor[_DocumentType]: + """Adds a 'comment' to the cursor. + + http://mongodb.com/docs/manual/reference/operator/comment/ + + :param comment: A string to attach to the query to help interpret and + trace the operation in the server logs and in profile data. + + .. versionadded:: 2.7 + """ + self._check_okay_to_chain() + self._comment = comment + return self + + def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: + """Adds a `$where`_ clause to this query. + + The `code` argument must be an instance of :class:`str` or + :class:`~bson.code.Code` containing a JavaScript expression. + This expression will be evaluated for each document scanned. + Only those documents for which the expression evaluates to + *true* will be returned as results. The keyword *this* refers + to the object currently being scanned. For example:: + + # Find all documents where field "a" is less than "b" plus "c". + async for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) + + Raises :class:`TypeError` if `code` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. Only the last call to + :meth:`where` applied to a :class:`Cursor` has any effect. + + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + + :param code: JavaScript expression to use as a filter + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ + """ + self._check_okay_to_chain() + if not isinstance(code, Code): + code = Code(code) + + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: dict[str, Any] + if self._has_filter: + spec = dict(self._spec) + else: + spec = cast(dict, self._spec) + spec["$where"] = code + self._spec = spec + return self + + def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: + """Adds a :class:`~pymongo.collation.Collation` to this query. + + Raises :exc:`TypeError` if `collation` is not an instance of + :class:`~pymongo.collation.Collation` or a ``dict``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has + already been used. Only the last collation applied to this cursor has + any effect. + + :param collation: An instance of :class:`~pymongo.collation.Collation`. + """ + self._check_okay_to_chain() + self._collation = validate_collation_or_none(collation) + return self + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _get_read_preference(self) -> _ServerMode: + if self._read_preference is None: + # Save the read preference for getMore commands. + self._read_preference = self._collection._read_preference_for(self.session) + return self._read_preference + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + This is mostly useful with `tailable cursors + `_ + since they will stop iterating even though they *may* return more + results in the future. + + With regular cursors, simply use a for loop instead of :attr:`alive`:: + + async for doc in collection.find(): + print(doc) + + .. note:: Even if :attr:`alive` is True, :meth:`next` can raise + :exc:`StopIteration`. :attr:`alive` can also be True while iterating + a cursor from a failed server. In this case :attr:`alive` will + return False after :meth:`next` fails to retrieve the next batch + of results from the server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> Optional[int]: + """Returns the id of the cursor + + .. versionadded:: 2.2 + """ + return self._id + + @property + def address(self) -> Optional[tuple[str, Any]]: + """The (host, port) of the server used, or None. + + .. versionchanged:: 3.0 + Renamed from "conn_id". + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._explicit_session: + return self._session + return None + + def __copy__(self) -> Cursor[_DocumentType]: + """Support function for `copy.copy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=False) + + def __deepcopy__(self, memo: Any) -> Any: + """Support function for `copy.deepcopy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=True) + + @overload + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: + ... + + @overload + def _deepcopy( + self, x: SupportsItems, memo: Optional[dict[int, Union[list, dict]]] = None + ) -> dict: + ... + + def _deepcopy( + self, x: Union[Iterable, SupportsItems], memo: Optional[dict[int, Union[list, dict]]] = None + ) -> Union[list, dict]: + """Deepcopy helper for the data dictionary or list. + + Regular expressions cannot be deep copied but as they are immutable we + don't have to copy them when cloning. + """ + y: Union[list, dict] + iterator: Iterable[tuple[Any, Any]] + if not hasattr(x, "items"): + y, is_list, iterator = [], True, enumerate(x) + else: + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() + if memo is None: + memo = {} + val_id = id(x) + if val_id in memo: + return memo[val_id] + memo[val_id] = y + + for key, value in iterator: + if isinstance(value, (dict, list)) and not isinstance(value, SON): + value = self._deepcopy(value, memo) # noqa: PLW2901 + elif not isinstance(value, RE_TYPE): + value = copy.deepcopy(value, memo) # noqa: PLW2901 + + if is_list: + y.append(value) # type: ignore[union-attr] + else: + if not isinstance(key, RE_TYPE): + key = copy.deepcopy(key, memo) # noqa: PLW2901 + y[key] = value + return y + + def _die(self, synchronous: bool = False) -> None: + """Closes this cursor.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, f"{self._dbname}.{self._collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + self._collection.database.client._cleanup_cursor( + synchronous, + cursor_id, + address, + self._sock_mgr, + self._session, + self._explicit_session, + ) + if not self._explicit_session: + self._session = None + self._sock_mgr = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die(True) + + def distinct(self, key: str) -> list: + """Get a list of distinct values for `key` among all documents + in the result set of this query. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + The :meth:`distinct` method obeys the + :attr:`~pymongo.collection.Collection.read_preference` of the + :class:`~pymongo.collection.Collection` instance on which + :meth:`~pymongo.collection.Collection.find` was called. + + :param key: name of key for which we want to get the distinct values + + .. seealso:: :meth:`pymongo.collection.Collection.distinct` + """ + options: dict[str, Any] = {} + if self._spec: + options["query"] = self._spec + if self._max_time_ms is not None: + options["maxTimeMS"] = self._max_time_ms + if self._comment: + options["comment"] = self._comment + if self._collation is not None: + options["collation"] = self._collation + + return self._collection.distinct(key, session=self._session, **options) + + def _send_message(self, operation: Union[_Query, _GetMore]) -> None: + """Send a query or getmore operation and handles the response. + + If operation is ``None`` this is an exhaust cursor, which reads + the next result batch off the exhaust socket instead of + sending getMore messages to the server. + + Can raise ConnectionFailure. + """ + client = self._collection.database.client + # OP_MSG is required to support exhaust cursors with encryption. + if client._encrypter and self._exhaust: + raise InvalidOperation("exhaust cursors do not support auto encryption") + + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self._exhaust: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die(False) + else: + self.close() + # If this is a tailable cursor the error is likely + # due to capped collection roll over. Setting + # self._killed to True ensures Cursor.alive will be + # False. No need to re-raise. + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self._query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): + return + raise + except ConnectionFailure: + self._killed = True + self.close() + raise + except Exception: + self.close() + raise + + self._address = response.address + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) + + cmd_name = operation.name + docs = response.docs + if response.from_command: + if cmd_name != "explain": + cursor = docs[0]["cursor"] + self._id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self._dbname, self._collname = ns.split(".", 1) + else: + documents = cursor["nextBatch"] + self._data = deque(documents) + self._retrieved += len(documents) + else: + self._id = 0 + self._data = deque(docs) + self._retrieved += len(docs) + else: + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + self._data = deque(docs) + self._retrieved += response.data.number_returned + + if self._id == 0: + # Don't wait for garbage collection to call __del__, return the + # socket and the session to the pool now. + self.close() + + if self._limit and self._id and self._limit <= self._retrieved: + self.close() + + def _refresh(self) -> int: + """Refreshes the cursor with more data from Mongo. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if not self._session: + self._session = self._collection.database.client._ensure_session() + + if self._id is None: # Query + if (self._min or self._max) and not self._hint: + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self._query_flags, + self._collection.database.name, + self._collection.name, + self._skip, + self._query_spec(), + self._projection, + self._codec_options, + self._get_read_preference(), + self._limit, + self._batch_size, + self._read_concern, + self._collation, + self._session, + self._collection.database.client, + self._allow_disk_use, + self._exhaust, + ) + self._send_message(q) + elif self._id: # Get More + if self._limit: + limit = self._limit - self._retrieved + if self._batch_size: + limit = min(limit, self._batch_size) + else: + limit = self._batch_size + # Exhaust cursors don't send getMore messages. + g = self._getmore_class( + self._dbname, + self._collname, + limit, + self._id, + self._codec_options, + self._get_read_preference(), + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + self._exhaust, + self._comment, + ) + self._send_message(g) + + return len(self._data) + + def rewind(self) -> Cursor[_DocumentType]: + """Rewind this cursor to its unevaluated state. + + Reset this cursor if it has been partially or completely evaluated. + Any options that are present on the cursor will remain in effect. + Future iterating performed on this cursor will cause new queries to + be sent to the server, even if the resultant data has already been + retrieved by this cursor. + """ + self.close() + self._data = deque() + self._id = None + self._address = None + self._retrieved = 0 + self._killed = False + + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + if self._empty: + raise StopIteration + if len(self._data) or self._refresh(): + return self._data.popleft() + else: + raise StopIteration + + def __next__(self) -> _DocumentType: + return self.next() + + def __iter__(self) -> Cursor[_DocumentType]: + return self + + def __enter__(self) -> Cursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def to_list(self) -> list[_DocumentType]: + return [x for x in self] # noqa: C416,RUF100 + + +class RawBatchCursor(Cursor, Generic[_DocumentType]): + """A cursor / iterator over raw batches of BSON data from a query result.""" + + _query_class = _RawBatchQuery + _getmore_class = _RawBatchGetMore + + def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.collection.Collection.find_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + super().__init__(collection, *args, **kwargs) + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[_DocumentOut]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return cast(List["_DocumentOut"], raw_response) + + def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + clone = self._clone(deepcopy=True, base=Cursor(self.collection)) + return clone.explain() + + def __getitem__(self, index: Any) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py new file mode 100644 index 0000000000..92521d7c14 --- /dev/null +++ b/pymongo/synchronous/database.py @@ -0,0 +1,1419 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Database level operations.""" +from __future__ import annotations + +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.dbref import DBRef +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.database_shared import _check_name, _CodecDocumentType +from pymongo.errors import CollectionInvalid, InvalidOperation +from pymongo.synchronous import common +from pymongo.synchronous.aggregation import _DatabaseAggregationCommand +from pymongo.synchronous.change_stream import DatabaseChangeStream +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.common import _ecoc_coll_name, _esc_coll_name +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline + +if TYPE_CHECKING: + import bson + import bson.codec_options + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class Database(common.BaseObject, Generic[_DocumentType]): + def __init__( + self, + client: MongoClient[_DocumentType], + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> None: + """Get a database by client and name. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if + `name` is not a valid database name. + + :param client: A :class:`~pymongo.mongo_client.MongoClient` instance. + :param name: The database name. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) client.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) client.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) client.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) client.read_concern is used. + + .. seealso:: The MongoDB documentation on `databases `_. + + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + :class:`~pymongo.database.Database` no longer returns an instance + of :class:`~pymongo.collection.Collection` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + db['__my_collection__'] + + Not: + + db.__my_collection__ + """ + super().__init__( + codec_options or client.codec_options, + read_preference or client.read_preference, + write_concern or client.write_concern, + read_concern or client.read_concern, + ) + + if not isinstance(name, str): + raise TypeError("name must be an instance of str") + + if name != "$external": + _check_name(name) + + self._name = name + self._client: MongoClient[_DocumentType] = client + self._timeout = client.options.timeout + + @property + def client(self) -> MongoClient[_DocumentType]: + """The client instance for this :class:`Database`.""" + return self._client + + @property + def name(self) -> str: + """The name of this :class:`Database`.""" + return self._name + + def with_options( + self, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Database[_DocumentType]: + """Get a clone of this database changing the specified settings. + + >>> db1.read_preference + Primary() + >>> from pymongo.synchronous.read_preferences import Secondary + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) + >>> db1.read_preference + Primary() + >>> db2.read_preference + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + + .. versionadded:: 3.8 + """ + return Database( + self._client, + self._name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Database): + return self._client == other.client and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._client, self._name)) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._client!r}, {self._name!r})" + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" collection, use database[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + return Collection(self, name) + + def get_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: + """Get a :class:`~pymongo.collection.Collection` with the given name + and options. + + Useful for creating a :class:`~pymongo.collection.Collection` with + different codec options, read preference, and/or write concern from + this :class:`Database`. + + >>> db.read_preference + Primary() + >>> coll1 = db.test + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = db.get_collection( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the collection - a string. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Database` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Database` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Database` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Database` is + used. + """ + return Collection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + + def _get_encrypted_fields( + self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool + ) -> Optional[Mapping[str, Any]]: + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return cast(Mapping[str, Any], deepcopy(encrypted_fields)) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return cast( + Mapping[str, Any], + deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ), + ) + if ask_db and self.client.options.auto_encryption_opts: + options = self[coll_name].options() + if options.get("encryptedFields"): + return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) + return None + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Database' object is not iterable") + + next = __next__ + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> DatabaseChangeStream[_DocumentType]: + """Watch changes on this database. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which + iterates over changes on all collections in this database. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + async with db.watch() as stream: + async for change in stream: + print(change) + + The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + async with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + async for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = DatabaseChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + @_csot.apply + def create_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + check_exists: Optional[bool] = True, + **kwargs: Any, + ) -> Collection[_DocumentType]: + """Create a new :class:`~pymongo.collection.Collection` in this + database. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.CollectionInvalid` will be + raised if the collection already exists. + + :param name: the name of the collection to create + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Database` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Database` is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Database` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Database` is + used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param `check_exists`: if True (the default), send a listCollections command to + check if the collection already exists before creation. + :param kwargs: additional keyword arguments will + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size`` (int): desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds`` (int): the number of seconds after which a + document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be `true` + name: , // optional, otherwise automatically generated + v: , // optional, must be `2` if provided + } + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. + + .. versionchanged:: 4.2 + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. + + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Added the collation option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + """ + encrypted_fields = self._get_encrypted_fields(kwargs, name, False) + if encrypted_fields: + common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) + + with self._client._tmp_session(session) as s: + # Skip this check in a transaction where listCollections is not + # supported. + if ( + check_exists + and (not s or not s.in_transaction) + and name in self._list_collection_names(filter={"name": name}, session=s) + ): + raise CollectionInvalid("collection %s already exists" % name) + coll = Collection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + coll._create(kwargs, s) + + return coll + + def aggregate( + self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any + ) -> CommandCursor[_DocumentType]: + """Perform a database-level aggregation. + + See the `aggregation pipeline`_ documentation for a list of stages + that are supported. + + .. code-block:: python + + # Lists all operations currently running on the server. + with client.admin.aggregate([{"$currentOp": {}}]) as cursor: + for operation in cursor: + print(operation) + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Database`, except when ``$out`` or ``$merge`` are used, in + which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` + is used. + + .. note:: This method does not support the 'explain' option. Please + use :meth:`~pymongo.database.Database.command` instead. + + .. note:: The :attr:`~pymongo.database.Database.write_concern` of + this collection is automatically applied to this operation. + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. versionadded:: 3.9 + + .. _aggregation pipeline: + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + with self.client._tmp_session(session, close=False) as s: + cmd = _DatabaseAggregationCommand( + self, + CommandCursor, + pipeline, + kwargs, + session is not None, + user_fields={"cursor": {"firstBatch": 1}}, + ) + return self.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(s), # type: ignore[arg-type] + s, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[_CodecDocumentType] = ..., + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: Union[ + CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] + ] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Internal command helper.""" + if isinstance(command, str): + command = {command: value} + + command.update(kwargs) + with self._client._tmp_session(session) as s: + return conn.command( + self._name, + command, + read_preference, + codec_options, + check, + allowable_errors, + write_concern=write_concern, + parse_write_concern_error=parse_write_concern_error, + session=s, + client=self._client, + ) + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: None = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: CodecOptions[_CodecDocumentType] = ..., + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + @_csot.apply + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Issue a MongoDB command. + + Send command `command` to the database and return the + response. If `command` is an instance of :class:`str` + then the command {`command`: `value`} will be sent. + Otherwise, `command` must be an instance of + :class:`dict` and will be sent as is. + + Any additional keyword arguments will be added to the final + command document before it is sent. + + For example, a command like ``{buildinfo: 1}`` can be sent + using: + + >>> await db.command("buildinfo") + OR + >>> await db.command({"buildinfo": 1}) + + For a command where the value matters, like ``{count: + collection_name}`` we can do: + + >>> await db.command("count", collection_name) + OR + >>> await db.command({"count": collection_name}) + + For commands that take additional arguments we can use + kwargs. So ``{count: collection_name, query: query}`` becomes: + + >>> await db.command("count", collection_name, query=query) + OR + >>> await db.command({"count": collection_name, "query": query}) + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should be done with this in mind. + + :param value: value to use for the command verb when + `command` is passed as a string + :param check: check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + :param allowable_errors: if `check` is ``True``, error messages + in this list will be ignored by error-checking + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, + and `secondary_acceptable_latency_ms` option. + Removed `compile_re` option: PyMongo now always represents BSON + regular expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + Added the ``codec_options`` parameter. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if comment is not None: + kwargs["comment"] = comment + + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + if read_preference is None: + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + with self._client._conn_for_reads(read_preference, session, operation=command_name) as ( + connection, + read_preference, + ): + return self._command( + connection, + command, + value, + check, + allowable_errors, + read_preference, + opts, # type: ignore[arg-type] + session=session, + **kwargs, + ) + + @_csot.apply + def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + max_await_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + :param value: value to use for the command verb when + `command` is passed as a string + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options`: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to future getMores for this + command. + :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see :ref:`versioned-api-ref`), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + with self._client._tmp_session(session, close=False) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + with self._client._conn_for_reads(read_preference, tmp_session, command_name) as ( + conn, + read_preference, + ): + response = self._command( + conn, + command, + value, + True, + None, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = CommandCursor( + coll, + response["cursor"], + conn.address, + max_await_time_ms=max_await_time_ms, + session=tmp_session, + explicit_session=session is not None, + comment=comment, + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + + def _retryable_read_command( + self, + command: Union[str, MutableMapping[str, Any]], + operation: str, + session: Optional[ClientSession] = None, + ) -> dict[str, Any]: + """Same as command but used for retryable read commands.""" + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> dict[str, Any]: + return self._command( + conn, + command, + read_preference=read_preference, + session=session, + ) + + return self._client._retryable_read(_cmd, read_preference, session, operation) + + def _list_collections( + self, + conn: Connection, + session: Optional[ClientSession], + read_preference: _ServerMode, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Internal listCollections helper.""" + coll = cast( + Collection[MutableMapping[str, Any]], + self.get_collection("$cmd", read_preference=read_preference), + ) + cmd = {"listCollections": 1, "cursor": {}} + cmd.update(kwargs) + with self._client._tmp_session(session, close=False) as tmp_session: + cursor = ( + self._command(conn, cmd, read_preference=read_preference, session=tmp_session) + )["cursor"] + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=tmp_session, + explicit_session=session is not None, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + def _list_collections_helper( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + if filter is not None: + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + return self._list_collections(conn, session, read_preference=read_preference, **kwargs) + + return self._client._retryable_read( + _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS + ) + + def list_collections( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return self._list_collections_helper(session, filter, comment, **kwargs) + + def _list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + if filter is None: + kwargs["nameOnly"] = True + + else: + # The enumerate collections spec states that "drivers MUST NOT set + # nameOnly if a filter specifies any keys other than name." + common.validate_is_mapping("filter", filter) + kwargs["filter"] = filter + if not filter or (len(filter) == 1 and "name" in filter): + kwargs["nameOnly"] = True + + return [ + result["name"] for result in self._list_collections_helper(session=session, **kwargs) + ] + + def list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Get a list of all the collection names in this database. + + For example, to list all non-system collections:: + + filter = {"name": {"$regex": r"^(?!system\\.)"}} + db.list_collection_names(filter=filter) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + .. versionchanged:: 3.8 + Added the ``filter`` and ``**kwargs`` parameters. + + .. versionadded:: 3.6 + """ + return self._list_collection_names(session, filter, comment, **kwargs) + + def _drop_helper( + self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None + ) -> dict[str, Any]: + command = {"drop": name} + if comment is not None: + command["comment"] = comment + + with self._client._conn_for_writes(session, operation=_Op.DROP) as connection: + return self._command( + connection, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + def drop_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> dict[str, Any]: + """Drop a collection. + + :param name_or_collection: the name of a collection to drop or the + collection object itself + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } + + + .. note:: The :attr:`~pymongo.database.Database.write_concern` of + this database is automatically applied to this operation. + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this database's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_collection + if isinstance(name, Collection): + name = name.name + + if not isinstance(name, str): + raise TypeError("name_or_collection must be an instance of str") + encrypted_fields = self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return self._drop_helper(name, session, comment) + + def validate_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + scandata: bool = False, + full: bool = False, + session: Optional[ClientSession] = None, + background: Optional[bool] = None, + comment: Optional[Any] = None, + ) -> dict[str, Any]: + """Validate a collection. + + Returns a dict of validation info. Raises CollectionInvalid if + validation fails. + + See also the MongoDB documentation on the `validate command`_. + + :param name_or_collection: A Collection object or the name of a + collection to validate. + :param scandata: Do extra checks beyond checking the overall + structure of the collection. + :param full: Have the server do a more thorough scan of the + collection. Use with `scandata` for a thorough scan + of the structure of the collection and the individual + documents. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param background: A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.11 + Added ``background`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + """ + name = name_or_collection + if isinstance(name, Collection): + name = name.name + + if not isinstance(name, str): + raise TypeError("name_or_collection must be an instance of str or Collection") + cmd = {"validate": name, "scandata": scandata, "full": full} + if comment is not None: + cmd["comment"] = comment + + if background is not None: + cmd["background"] = background + + result = self.command(cmd, session=session) + + valid = True + # Pre 1.9 results + if "result" in result: + info = result["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + # Sharded results + elif "raw" in result: + for _, res in result["raw"].items(): + if "result" in res: + info = res["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + elif not res.get("valid", False): + valid = False + break + # Post 1.9 non-sharded results. + elif not result.get("valid", False): + valid = False + + if not valid: + raise CollectionInvalid(f"{name} invalid: {result!r}") + + return result + + def dereference( + self, + dbref: DBRef, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Optional[_DocumentType]: + """Dereference a :class:`~bson.dbref.DBRef`, getting the + document it points to. + + Raises :class:`TypeError` if `dbref` is not an instance of + :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if + the reference does not point to a valid document. Raises + :class:`ValueError` if `dbref` has a database specified that + is different from the current database. + + :param dbref: the reference + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional keyword arguments + are the same as the arguments to + :meth:`~pymongo.collection.Collection.find`. + + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if not isinstance(dbref, DBRef): + raise TypeError("cannot dereference a %s" % type(dbref)) + if dbref.database is not None and dbref.database != self._name: + raise ValueError( + "trying to dereference a DBRef that points to " + f"another database ({dbref.database!r} not {self._name!r})" + ) + return self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py new file mode 100644 index 0000000000..cb248c5643 --- /dev/null +++ b/pymongo/synchronous/encryption.py @@ -0,0 +1,1120 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for explicit client-side field level encryption.""" +from __future__ import annotations + +import contextlib +import enum +import socket +import uuid +import weakref +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generator, + Generic, + Iterator, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +try: + from pymongocrypt.errors import MongoCryptError # type:ignore[import] + from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] + from pymongocrypt.synchronous.auto_encrypter import AutoEncrypter # type:ignore[import] + from pymongocrypt.synchronous.explicit_encrypter import ( # type:ignore[import] + ExplicitEncrypter, + ) + from pymongocrypt.synchronous.state_machine import ( # type:ignore[import] + MongoCryptCallback, + ) + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False + MongoCryptCallback = object + +from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from pymongo import _csot +from pymongo.daemon import _spawn_daemon +from pymongo.errors import ( + ConfigurationError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + PyMongoError, + ServerSelectionTimeoutError, +) +from pymongo.network_layer import BLOCKING_IO_ERRORS, sendall +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult +from pymongo.ssl_support import get_ssl_context +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.common import CONNECT_TIMEOUT +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.encryption_options import AutoEncryptionOpts, RangeOpts +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.operations import UpdateOne +from pymongo.synchronous.pool import PoolOptions, _configured_socket, _raise_connection_failure +from pymongo.synchronous.typings import _DocumentType, _DocumentTypeArg +from pymongo.synchronous.uri_parser import parse_host +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongocrypt.mongocrypt import MongoCryptKmsContext + + +_IS_SYNC = True + +_HTTPS_PORT = 443 +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT +_MONGOCRYPTD_TIMEOUT_MS = 10000 + +_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( + document_class=Dict[str, Any], uuid_representation=STANDARD +) +# Use RawBSONDocument codec options to avoid needlessly decoding +# documents from the key vault. +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) + + +@contextlib.contextmanager +def _wrap_encryption_errors() -> Iterator[None]: + """Context manager to wrap encryption related errors.""" + try: + yield + except BSONError: + # BSON encoding/decoding errors are unrelated to encryption so + # we should propagate them unchanged. + raise + except Exception as exc: + raise EncryptionError(exc) from exc + + +class _EncryptionIO(MongoCryptCallback): # type: ignore[misc] + def __init__( + self, + client: Optional[MongoClient[_DocumentTypeArg]], + key_vault_coll: Collection[_DocumentTypeArg], + mongocryptd_client: Optional[MongoClient[_DocumentTypeArg]], + opts: AutoEncryptionOpts, + ): + """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any + # Use a weak ref to break reference cycle. + if client is not None: + self.client_ref = weakref.ref(client) + else: + self.client_ref = None + self.key_vault_coll: Optional[Collection[RawBSONDocument]] = cast( + Collection[RawBSONDocument], + key_vault_coll.with_options( + codec_options=_KEY_VAULT_OPTS, + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ), + ) + self.mongocryptd_client = mongocryptd_client + self.opts = opts + self._spawned = False + + def kms_request(self, kms_context: MongoCryptKmsContext) -> None: + """Complete a KMS request. + + :param kms_context: A :class:`MongoCryptKmsContext`. + + :return: None + """ + endpoint = kms_context.endpoint + message = kms_context.message + provider = kms_context.kms_provider + ctx = self.opts._kms_ssl_contexts.get(provider) + if ctx is None: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, + ) # disable_ocsp_endpoint_check + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) + opts = PoolOptions( + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, + ssl_context=ctx, + ) + host, port = parse_host(endpoint, _HTTPS_PORT) + try: + conn = _configured_socket((host, port), opts) + try: + sendall(conn, message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data = conn.recv(kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") from None + finally: + conn.close() + except (PyMongoError, MongoCryptError): + raise # Propagate pymongo errors directly. + except Exception as error: + # Wrap I/O errors in PyMongo exceptions. + _raise_connection_failure((host, port), error) + + def collection_info( + self, database: Database[Mapping[str, Any]], filter: bytes + ) -> Optional[bytes]: + """Get the collection info for a namespace. + + The returned collection info is passed to libmongocrypt which reads + the JSON schema. + + :param database: The database on which to run listCollections. + :param filter: The filter to pass to listCollections. + + :return: The first document from the listCollections command response as BSON. + """ + with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: + for doc in cursor: + return _dict_to_bson(doc, False, _DATA_KEY_OPTS) + return None + + def spawn(self) -> None: + """Spawn mongocryptd. + + Note this method is thread safe; at most one mongocryptd will start + successfully. + """ + self._spawned = True + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] + args.extend(self.opts._mongocryptd_spawn_args) + _spawn_daemon(args) + + def mark_command(self, database: str, cmd: bytes) -> bytes: + """Mark a command for encryption. + + :param database: The database on which to run this command. + :param cmd: The BSON command to run. + + :return: The marked command response from mongocryptd. + """ + if not self._spawned and not self.opts._mongocryptd_bypass_spawn: + self.spawn() + # Database.command only supports mutable mappings so we need to decode + # the raw BSON command first. + inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) + assert self.mongocryptd_client is not None + try: + res = self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + except ServerSelectionTimeoutError: + if self.opts._mongocryptd_bypass_spawn: + raise + self.spawn() + res = self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + return res.raw + + def fetch_keys(self, filter: bytes) -> Generator[bytes, None]: + """Yields one or more keys from the key vault. + + :param filter: The filter to pass to find. + + :return: A generator which yields the requested keys from the key vault. + """ + assert self.key_vault_coll is not None + with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: + for key in cursor: + yield key.raw + + def insert_data_key(self, data_key: bytes) -> Binary: + """Insert a data key into the key vault. + + :param data_key: The data key document to insert. + + :return: The _id of the inserted data key document. + """ + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) + data_key_id = raw_doc.get("_id") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError("data_key _id must be Binary with a UUID subtype") + + assert self.key_vault_coll is not None + self.key_vault_coll.insert_one(raw_doc) + return data_key_id + + def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: + """Encode a document to BSON. + + A document can be any mapping type (like :class:`dict`). + + :param doc: mapping type representing a document + + :return: The encoded BSON bytes. + """ + return encode(doc) + + def close(self) -> None: + """Release resources. + + Note it is not safe to call this method from __del__ or any GC hooks. + """ + self.client_ref = None + self.key_vault_coll = None + if self.mongocryptd_client: + self.mongocryptd_client.close() + self.mongocryptd_client = None + + +class RewrapManyDataKeyResult: + """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. + + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._bulk_write_result!r})" + + +class _Encrypter: + """Encrypts and decrypts MongoDB commands. + + This class is used to support automatic encryption and decryption of + MongoDB commands. + """ + + def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): + """Create a _Encrypter for a client. + + :param client: The encrypted MongoClient. + :param opts: The encrypted client's :class:`AutoEncryptionOpts`. + """ + if opts._schema_map is None: + schema_map = None + else: + schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) + self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + + def _get_internal_client( + encrypter: _Encrypter, mongo_client: MongoClient[_DocumentTypeArg] + ) -> MongoClient[_DocumentTypeArg]: + if mongo_client.options.pool_options.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client: MongoClient[Mapping[str, Any]] = MongoClient( + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) + + io_callbacks = _EncryptionIO( # type:ignore[misc] + metadata_client, key_vault_coll, mongocryptd_client, opts + ) + self._auto_encrypter = AutoEncrypter( + io_callbacks, + MongoCryptOptions( + opts._kms_providers, + schema_map, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, + bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, + bypass_query_analysis=opts._bypass_query_analysis, + ), + ) + self._closed = False + + def encrypt( + self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] + ) -> dict[str, Any]: + """Encrypt a MongoDB command. + + :param database: The database for this command. + :param cmd: A command document. + :param codec_options: The CodecOptions to use while encoding `cmd`. + + :return: The encrypted command to execute. + """ + self._check_closed() + encoded_cmd = _dict_to_bson(cmd, False, codec_options) + with _wrap_encryption_errors(): + encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) + # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. + return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + + def decrypt(self, response: bytes) -> Optional[bytes]: + """Decrypt a MongoDB command response. + + :param response: A MongoDB command response as BSON. + + :return: The decrypted command response. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast(bytes, self._auto_encrypter.decrypt(response)) + + def _check_closed(self) -> None: + if self._closed: + raise InvalidOperation("Cannot use MongoClient after close") + + def close(self) -> None: + """Cleanup resources.""" + self._closed = True + self._auto_encrypter.close() + if self._internal_client: + self._internal_client.close() + self._internal_client = None + + +class Algorithm(str, enum.Enum): + """An enum that defines the supported encryption algorithms.""" + + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + RANGEPREVIEW = "RangePreview" + """RangePreview. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. + + .. versionadded:: 4.4 + """ + + +class QueryType(str, enum.Enum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = "equality" + """Used to encrypt a value for an equality query.""" + + RANGEPREVIEW = "rangePreview" + """Used to encrypt a value for a range query. + + .. note:: Support for Range queries is in beta. + Backwards-breaking changes may be made before the final release. +""" + + +class ClientEncryption(Generic[_DocumentType]): + """Explicit client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient[_DocumentTypeArg], + codec_options: CodecOptions[_DocumentTypeArg], + kms_tls_options: Optional[Mapping[str, Any]] = None, + ) -> None: + """Explicit client-side field level encryption. + + The ClientEncryption class encapsulates explicit operations on a key + vault collection that cannot be done directly on a MongoClient. Similar + to configuring auto encryption on a MongoClient, it is constructed with + a MongoClient (to a MongoDB cluster containing the key vault + collection), KMS provider configuration, and keyVaultNamespace. It + provides an API for explicitly encrypting and decrypting values, and + creating data keys. It does not provide an API to query keys from the + key vault collection, as this can be done directly on the MongoClient. + + See :ref:`explicit-client-side-encryption` for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: A MongoClient connected to a MongoDB cluster + containing the `key_vault_namespace` collection. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` to use when encoding a + value for encryption and decoding the decrypted BSON value. This + should be the same CodecOptions instance configured on the + MongoClient, Database, or Collection used to access application + data. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client-side field level encryption requires the pymongocrypt " + "library: install a compatible version with: " + "python -m pip install 'pymongo[encryption]'" + ) + + if not isinstance(codec_options, CodecOptions): + raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._codec_options = codec_options + + db, coll = key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options + ) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) + self._encryption = ExplicitEncrypter( + self._io_callbacks, MongoCryptOptions(kms_providers, None) + ) + # Use the same key vault collection as the callback. + assert self._io_callbacks.key_vault_coll is not None + self._key_vault_coll = self._io_callbacks.key_vault_coll + + def create_encrypted_collection( + self, + database: Database[_DocumentTypeArg], + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> tuple[Collection[_DocumentTypeArg], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :param name: the name of the collection to create + :param encrypted_fields: Document that describes the encrypted fields for + Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: + + .. code-block: python + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + :param kms_provider: the KMS provider to be used + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + :param kwargs: additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + + :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + ) + except EncryptionError as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + + def create_data_key( + self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + ) -> Binary: + """Create and insert a new data key into the key vault collection. + + :param kms_provider: The KMS provider to use. Supported values are + "aws", "azure", "gcp", "kmip", "local", or a named provider like + "kmip:name". + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + + If the `kms_provider` type is "aws" it is required and has the + following fields:: + + - `region` (string): Required. The AWS region, e.g. "us-east-1". + - `key` (string): Required. The Amazon Resource Name (ARN) to + the AWS customer. + - `endpoint` (string): Optional. An alternate host to send KMS + requests to. May include port number, e.g. + "kms.us-east-1.amazonaws.com:443". + + If the `kms_provider` type is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` type is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + + If the `kms_provider` type is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + + :param key_alt_names: An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. The following example shows creating + and referring to a data key by alternate name:: + + client_encryption.create_data_key("local", key_alt_names=["name1"]) + # reference the key with the alternate name + client_encryption.encrypt("457-55-5462", key_alt_name="name1", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + :param key_material: Sets the custom key material to be used + by the data key for encryption and decryption. + + :return: The ``_id`` of the created data key document as a + :class:`~bson.binary.Binary` with subtype + :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast( + Binary, + self._encryption.create_data_key( + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ), + ) + + def _encrypt_helper( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + is_expression: bool = False, + ) -> Any: + self._check_closed() + if isinstance(key_id, uuid.UUID): + key_id = Binary.from_uuid(key_id) + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + range_opts_bytes = None + if range_opts: + range_opts_bytes = encode( + range_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts_bytes, + is_expression=is_expression, + ) + return decode(encrypted_doc)["v"] + + def encrypt( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> Binary: + """Encrypt a BSON value with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param value: The BSON value to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Experimental only, not intended for public use. + + :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionchanged:: 4.2 + Added the `query_type` and `contention_factor` parameters. + """ + return cast( + Binary, + self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + ), + ) + + def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param expression: The BSON aggregate or match expression to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See + :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Experimental only, not intended for public use. + + :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionadded:: 4.4 + """ + return cast( + RawBSONDocument, + self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ), + ) + + def decrypt(self, value: Binary) -> Any: + """Decrypt an encrypted value. + + :param value` (Binary): The encrypted value, a + :class:`~bson.binary.Binary` with subtype 6. + + :return: The decrypted BSON value. + """ + self._check_closed() + if not (isinstance(value, Binary) and value.subtype == 6): + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") + + with _wrap_encryption_errors(): + doc = encode({"v": value}) + decrypted_doc = self._encryption.decrypt(doc) + return decode(decrypted_doc, codec_options=self._codec_options)["v"] + + def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> Cursor[RawBSONDocument]: + """Get all of the data keys. + + :return: An instance of :class:`~pymongo.cursor.Cursor` over the data key + documents. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find({}) + + def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The delete result. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.delete_one({"_id": id}) + + def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :param `id`: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param `key_alt_name`: The key alternate name to add. + + :return: The previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, update) + + def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :param key_alt_name: (str): The key alternate name of the key to get. + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :param `id`: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param `key_alt_name`: The key alternate name to remove. + + :return: Returns the previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :param filter: A document used to filter the data keys. + :param provider: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + :param `master_key`: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :return: A :class:`RewrapManyDataKeyResult`. + + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + + .. versionadded:: 4.2 + """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") + self._check_closed() + with _wrap_encryption_errors(): + raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + assert self._key_vault_coll is not None + result = self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + + def __enter__(self) -> ClientEncryption[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def _check_closed(self) -> None: + if self._encryption is None: + raise InvalidOperation("Cannot use closed ClientEncryption") + + def close(self) -> None: + """Release resources. + + Note that using this class in a with-statement will automatically call + :meth:`close`:: + + with ClientEncryption(...) as client_encryption: + encrypted = client_encryption.encrypt(value, ...) + decrypted = client_encryption.decrypt(encrypted) + + """ + if self._io_callbacks: + self._io_callbacks.close() + self._encryption.close() + self._io_callbacks = None + self._encryption = None diff --git a/pymongo/synchronous/encryption_options.py b/pymongo/synchronous/encryption_options.py new file mode 100644 index 0000000000..03bc01d181 --- /dev/null +++ b/pymongo/synchronous/encryption_options.py @@ -0,0 +1,270 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for automatic client-side field level encryption.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional + +try: + import pymongocrypt # type:ignore[import] # noqa: F401 + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False +from bson import int64 +from pymongo.errors import ConfigurationError +from pymongo.synchronous.common import validate_is_mapping +from pymongo.synchronous.uri_parser import _parse_kms_tls_options + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.typings import _DocumentTypeArg + +_IS_SYNC = True + + +class AutoEncryptionOpts: + """Options to configure automatic client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: Optional[MongoClient[_DocumentTypeArg]] = None, + schema_map: Optional[Mapping[str, Any]] = None, + bypass_auto_encryption: bool = False, + mongocryptd_uri: str = "mongodb://localhost:27020", + mongocryptd_bypass_spawn: bool = False, + mongocryptd_spawn_path: str = "mongocryptd", + mongocryptd_spawn_args: Optional[list[str]] = None, + kms_tls_options: Optional[Mapping[str, Any]] = None, + crypt_shared_lib_path: Optional[str] = None, + crypt_shared_lib_required: bool = False, + bypass_query_analysis: bool = False, + encrypted_fields_map: Optional[Mapping[str, Any]] = None, + ) -> None: + """Options to configure automatic client-side field level encryption. + + Automatic client-side field level encryption requires MongoDB >=4.2 + enterprise or a MongoDB >=4.2 Atlas cluster. Automatic encryption is not + supported for operations on a database or view and will result in + error. + + Although automatic encryption requires MongoDB >=4.2 enterprise or a + MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all + users. To configure automatic *decryption* without automatic + *encryption* set ``bypass_auto_encryption=True``. Explicit + encryption and explicit decryption is also supported for all users + with the :class:`~pymongo.encryption.ClientEncryption` class. + + See :ref:`automatic-client-side-encryption` for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + Named KMS providers enables more than one of each KMS provider type to be configured. + For example, to configure multiple local KMS providers:: + + kms_providers = { + "local": {"key": local_kek1}, # Unnamed KMS provider. + "local:myname": {"key": local_kek2}, # Named KMS provider with name "myname". + } + + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: By default, the key vault collection + is assumed to reside in the same MongoDB cluster as the encrypted + MongoClient. Use this option to route data key queries to a + separate MongoDB cluster. + :param schema_map: Map of collection namespace ("db.coll") to + JSON Schema. By default, a collection's JSONSchema is periodically + polled with the listCollections command. But a JSONSchema may be + specified locally with the schemaMap option. + + **Supplying a `schema_map` provides more security than relying on + JSON Schemas obtained from the server. It protects against a + malicious server advertising a false JSON Schema, which could trick + the client into sending unencrypted data that should be + encrypted.** + + Schemas supplied in the schemaMap only apply to configuring + automatic encryption for client side encryption. Other validation + rules in the JSON schema will not be enforced by the driver and + will result in an error. + :param bypass_auto_encryption: If ``True``, automatic + encryption will be disabled but automatic decryption will still be + enabled. Defaults to ``False``. + :param mongocryptd_uri: The MongoDB URI used to connect + to the *local* mongocryptd process. Defaults to + ``'mongodb://localhost:27020'``. + :param mongocryptd_bypass_spawn: If ``True``, the encrypted + MongoClient will not attempt to spawn the mongocryptd process. + Defaults to ``False``. + :param mongocryptd_spawn_path: Used for spawning the + mongocryptd process. Defaults to ``'mongocryptd'`` and spawns + mongocryptd from the system path. + :param mongocryptd_spawn_args: A list of string arguments to + use when spawning the mongocryptd process. Defaults to + ``['--idleShutdownTimeoutSecs=60']``. If the list does not include + the ``idleShutdownTimeoutSecs`` option then + ``'--idleShutdownTimeoutSecs=60'`` will be added. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param crypt_shared_lib_path: Override the path to load the crypt_shared library. + :param crypt_shared_lib_required: If True, raise an error if libmongocrypt is + unable to load the crypt_shared library. + :param bypass_query_analysis: If ``True``, disable automatic analysis + of outgoing commands. Set `bypass_query_analysis` to use explicit + encryption on indexed fields without the MongoDB Enterprise Advanced + licensed crypt_shared library. + :param encrypted_fields_map: Map of collection namespace ("db.coll") to documents + that described the encrypted fields for Queryable Encryption. For example:: + + { + "db.encryptedCollection": { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + } + + .. versionchanged:: 4.2 + Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, + and `bypass_query_analysis` parameters. + + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client side encryption requires the pymongocrypt library: " + "install a compatible version with: " + "python -m pip install 'pymongo[encryption]'" + ) + if encrypted_fields_map: + validate_is_mapping("encrypted_fields_map", encrypted_fields_map) + self._encrypted_fields_map = encrypted_fields_map + self._bypass_query_analysis = bypass_query_analysis + self._crypt_shared_lib_path = crypt_shared_lib_path + self._crypt_shared_lib_required = crypt_shared_lib_required + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._schema_map = schema_map + self._bypass_auto_encryption = bypass_auto_encryption + self._mongocryptd_uri = mongocryptd_uri + self._mongocryptd_bypass_spawn = mongocryptd_bypass_spawn + self._mongocryptd_spawn_path = mongocryptd_spawn_path + if mongocryptd_spawn_args is None: + mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] + self._mongocryptd_spawn_args = mongocryptd_spawn_args + if not isinstance(self._mongocryptd_spawn_args, list): + raise TypeError("mongocryptd_spawn_args must be a list") + if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): + self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") + # Maps KMS provider name to a SSLContext. + self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) + self._bypass_query_analysis = bypass_query_analysis + + +class RangeOpts: + """Options to configure encrypted queries using the rangePreview algorithm.""" + + def __init__( + self, + sparsity: int, + min: Optional[Any] = None, + max: Optional[Any] = None, + precision: Optional[int] = None, + ) -> None: + """Options to configure encrypted queries using the rangePreview algorithm. + + .. note:: This feature is experimental only, and not intended for public use. + + :param sparsity: An integer. + :param min: A BSON scalar value corresponding to the type being queried. + :param max: A BSON scalar value corresponding to the type being queried. + :param precision: An integer, may only be set for double or decimal128 types. + + .. versionadded:: 4.4 + """ + self.min = min + self.max = max + self.sparsity = sparsity + self.precision = precision + + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("sparsity", int64.Int64(self.sparsity)), + ("precision", self.precision), + ("min", self.min), + ("max", self.max), + ]: + if v is not None: + doc[k] = v + return doc diff --git a/pymongo/synchronous/event_loggers.py b/pymongo/synchronous/event_loggers.py new file mode 100644 index 0000000000..fe9dd899d3 --- /dev/null +++ b/pymongo/synchronous/event_loggers.py @@ -0,0 +1,225 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Example event logger classes. + +.. versionadded:: 3.11 + +These loggers can be registered using :func:`register` or +:class:`~pymongo.mongo_client.MongoClient`. + +``monitoring.register(CommandLogger())`` + +or + +``MongoClient(event_listeners=[CommandLogger()])`` +""" +from __future__ import annotations + +import logging + +from pymongo.synchronous import monitoring + +_IS_SYNC = True + + +class CommandLogger(monitoring.CommandListener): + """A simple listener that logs command events. + + Listens for :class:`~pymongo.monitoring.CommandStartedEvent`, + :class:`~pymongo.monitoring.CommandSucceededEvent` and + :class:`~pymongo.monitoring.CommandFailedEvent` events and + logs them at the `INFO` severity level using :mod:`logging`. + .. versionadded:: 3.11 + """ + + def started(self, event: monitoring.CommandStartedEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} started on server " + f"{event.connection_id}" + ) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"succeeded in {event.duration_micros} " + "microseconds" + ) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + logging.info( + f"Command {event.command_name} with request id " + f"{event.request_id} on server {event.connection_id} " + f"failed in {event.duration_micros} " + "microseconds" + ) + + +class ServerLogger(monitoring.ServerListener): + """A simple listener that logs server discovery events. + + Listens for :class:`~pymongo.monitoring.ServerOpeningEvent`, + :class:`~pymongo.monitoring.ServerDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.ServerClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def opened(self, event: monitoring.ServerOpeningEvent) -> None: + logging.info(f"Server {event.server_address} added to topology {event.topology_id}") + + def description_changed(self, event: monitoring.ServerDescriptionChangedEvent) -> None: + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + f"Server {event.server_address} changed type from " + f"{event.previous_description.server_type_name} to " + f"{event.new_description.server_type_name}" + ) + + def closed(self, event: monitoring.ServerClosedEvent) -> None: + logging.warning(f"Server {event.server_address} removed from topology {event.topology_id}") + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + """A simple listener that logs server heartbeat events. + + Listens for :class:`~pymongo.monitoring.ServerHeartbeatStartedEvent`, + :class:`~pymongo.monitoring.ServerHeartbeatSucceededEvent`, + and :class:`~pymongo.monitoring.ServerHeartbeatFailedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: + logging.info(f"Heartbeat sent to server {event.connection_id}") + + def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: + # The reply.document attribute was added in PyMongo 3.4. + logging.info( + f"Heartbeat to server {event.connection_id} " + "succeeded with reply " + f"{event.reply.document}" + ) + + def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: + logging.warning( + f"Heartbeat to server {event.connection_id} failed with error {event.reply}" + ) + + +class TopologyLogger(monitoring.TopologyListener): + """A simple listener that logs server topology events. + + Listens for :class:`~pymongo.monitoring.TopologyOpenedEvent`, + :class:`~pymongo.monitoring.TopologyDescriptionChangedEvent`, + and :class:`~pymongo.monitoring.TopologyClosedEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def opened(self, event: monitoring.TopologyOpenedEvent) -> None: + logging.info(f"Topology with id {event.topology_id} opened") + + def description_changed(self, event: monitoring.TopologyDescriptionChangedEvent) -> None: + logging.info(f"Topology description updated for topology id {event.topology_id}") + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + f"Topology {event.topology_id} changed type from " + f"{event.previous_description.topology_type_name} to " + f"{event.new_description.topology_type_name}" + ) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event: monitoring.TopologyClosedEvent) -> None: + logging.info(f"Topology with id {event.topology_id} closed") + + +class ConnectionPoolLogger(monitoring.ConnectionPoolListener): + """A simple listener that logs server connection pool events. + + Listens for :class:`~pymongo.monitoring.PoolCreatedEvent`, + :class:`~pymongo.monitoring.PoolClearedEvent`, + :class:`~pymongo.monitoring.PoolClosedEvent`, + :~pymongo.monitoring.class:`ConnectionCreatedEvent`, + :class:`~pymongo.monitoring.ConnectionReadyEvent`, + :class:`~pymongo.monitoring.ConnectionClosedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutStartedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckOutFailedEvent`, + :class:`~pymongo.monitoring.ConnectionCheckedOutEvent`, + and :class:`~pymongo.monitoring.ConnectionCheckedInEvent` + events and logs them at the `INFO` severity level using :mod:`logging`. + + .. versionadded:: 3.11 + """ + + def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: + logging.info(f"[pool {event.address}] pool created") + + def pool_ready(self, event: monitoring.PoolReadyEvent) -> None: + logging.info(f"[pool {event.address}] pool ready") + + def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: + logging.info(f"[pool {event.address}] pool cleared") + + def pool_closed(self, event: monitoring.PoolClosedEvent) -> None: + logging.info(f"[pool {event.address}] pool closed") + + def connection_created(self, event: monitoring.ConnectionCreatedEvent) -> None: + logging.info(f"[pool {event.address}][conn #{event.connection_id}] connection created") + + def connection_ready(self, event: monitoring.ConnectionReadyEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection setup succeeded" + ) + + def connection_closed(self, event: monitoring.ConnectionClosedEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] " + f'connection closed, reason: "{event.reason}"' + ) + + def connection_check_out_started( + self, event: monitoring.ConnectionCheckOutStartedEvent + ) -> None: + logging.info(f"[pool {event.address}] connection check out started") + + def connection_check_out_failed(self, event: monitoring.ConnectionCheckOutFailedEvent) -> None: + logging.info(f"[pool {event.address}] connection check out failed, reason: {event.reason}") + + def connection_checked_out(self, event: monitoring.ConnectionCheckedOutEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection checked out of pool" + ) + + def connection_checked_in(self, event: monitoring.ConnectionCheckedInEvent) -> None: + logging.info( + f"[pool {event.address}][conn #{event.connection_id}] connection checked into pool" + ) diff --git a/pymongo/synchronous/hello.py b/pymongo/synchronous/hello.py new file mode 100644 index 0000000000..5c1d8438fc --- /dev/null +++ b/pymongo/synchronous/hello.py @@ -0,0 +1,219 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers for the 'hello' and legacy hello commands.""" +from __future__ import annotations + +import copy +import datetime +import itertools +from typing import Any, Generic, Mapping, Optional + +from bson.objectid import ObjectId +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous import common +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.typings import ClusterTime, _DocumentType + +_IS_SYNC = True + + +def _get_server_type(doc: Mapping[str, Any]) -> int: + """Determine the server type from a hello response.""" + if not doc.get("ok"): + return SERVER_TYPE.Unknown + + if doc.get("serviceId"): + return SERVER_TYPE.LoadBalancer + elif doc.get("isreplicaset"): + return SERVER_TYPE.RSGhost + elif doc.get("setName"): + if doc.get("hidden"): + return SERVER_TYPE.RSOther + elif doc.get(HelloCompat.PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get(HelloCompat.LEGACY_PRIMARY): + return SERVER_TYPE.RSPrimary + elif doc.get("secondary"): + return SERVER_TYPE.RSSecondary + elif doc.get("arbiterOnly"): + return SERVER_TYPE.RSArbiter + else: + return SERVER_TYPE.RSOther + elif doc.get("msg") == "isdbgrid": + return SERVER_TYPE.Mongos + else: + return SERVER_TYPE.Standalone + + +class Hello(Generic[_DocumentType]): + """Parse a hello response from the server. + + .. versionadded:: 3.12 + """ + + __slots__ = ("_doc", "_server_type", "_is_writable", "_is_readable", "_awaitable") + + def __init__(self, doc: _DocumentType, awaitable: bool = False) -> None: + self._server_type = _get_server_type(doc) + self._doc: _DocumentType = doc + self._is_writable = self._server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.Standalone, + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + + self._is_readable = self.server_type == SERVER_TYPE.RSSecondary or self._is_writable + self._awaitable = awaitable + + @property + def document(self) -> _DocumentType: + """The complete hello command response document. + + .. versionadded:: 3.4 + """ + return copy.copy(self._doc) + + @property + def server_type(self) -> int: + return self._server_type + + @property + def all_hosts(self) -> set[tuple[str, int]]: + """List of hosts, passives, and arbiters known to this server.""" + return set( + map( + common.clean_node, + itertools.chain( + self._doc.get("hosts", []), + self._doc.get("passives", []), + self._doc.get("arbiters", []), + ), + ) + ) + + @property + def tags(self) -> Mapping[str, Any]: + """Replica set member tags or empty dict.""" + return self._doc.get("tags", {}) + + @property + def primary(self) -> Optional[tuple[str, int]]: + """This server's opinion about who the primary is, or None.""" + if self._doc.get("primary"): + return common.partition_node(self._doc["primary"]) + else: + return None + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self._doc.get("setName") + + @property + def max_bson_size(self) -> int: + return self._doc.get("maxBsonObjectSize", common.MAX_BSON_SIZE) + + @property + def max_message_size(self) -> int: + return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) + + @property + def max_write_batch_size(self) -> int: + return self._doc.get("maxWriteBatchSize", common.MAX_WRITE_BATCH_SIZE) + + @property + def min_wire_version(self) -> int: + return self._doc.get("minWireVersion", common.MIN_WIRE_VERSION) + + @property + def max_wire_version(self) -> int: + return self._doc.get("maxWireVersion", common.MAX_WIRE_VERSION) + + @property + def set_version(self) -> Optional[int]: + return self._doc.get("setVersion") + + @property + def election_id(self) -> Optional[ObjectId]: + return self._doc.get("electionId") + + @property + def cluster_time(self) -> Optional[ClusterTime]: + return self._doc.get("$clusterTime") + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + return self._doc.get("logicalSessionTimeoutMinutes") + + @property + def is_writable(self) -> bool: + return self._is_writable + + @property + def is_readable(self) -> bool: + return self._is_readable + + @property + def me(self) -> Optional[tuple[str, int]]: + me = self._doc.get("me") + if me: + return common.clean_node(me) + return None + + @property + def last_write_date(self) -> Optional[datetime.datetime]: + return self._doc.get("lastWrite", {}).get("lastWriteDate") + + @property + def compressors(self) -> Optional[list[str]]: + return self._doc.get("compression") + + @property + def sasl_supported_mechs(self) -> list[str]: + """Supported authentication mechanisms for the current user. + + For example:: + + >>> hello.sasl_supported_mechs + ["SCRAM-SHA-1", "SCRAM-SHA-256"] + + """ + return self._doc.get("saslSupportedMechs", []) + + @property + def speculative_authenticate(self) -> Optional[Mapping[str, Any]]: + """The speculativeAuthenticate field.""" + return self._doc.get("speculativeAuthenticate") + + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._doc.get("topologyVersion") + + @property + def awaitable(self) -> bool: + return self._awaitable + + @property + def service_id(self) -> Optional[ObjectId]: + return self._doc.get("serviceId") + + @property + def hello_ok(self) -> bool: + return self._doc.get("helloOk", False) + + @property + def connection_id(self) -> Optional[int]: + return self._doc.get("connectionId") diff --git a/pymongo/synchronous/hello_compat.py b/pymongo/synchronous/hello_compat.py new file mode 100644 index 0000000000..126ed4bf54 --- /dev/null +++ b/pymongo/synchronous/hello_compat.py @@ -0,0 +1,26 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The HelloCompat class, placed here to break circular import issues.""" +from __future__ import annotations + +_IS_SYNC = True + + +class HelloCompat: + CMD = "hello" + LEGACY_CMD = "ismaster" + PRIMARY = "isWritablePrimary" + LEGACY_PRIMARY = "ismaster" + LEGACY_ERROR = "not master" diff --git a/pymongo/helpers.py b/pymongo/synchronous/helpers.py similarity index 83% rename from pymongo/helpers.py rename to pymongo/synchronous/helpers.py index 080c3204a4..892d6a93e3 100644 --- a/pymongo/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -15,6 +15,7 @@ """Bits and pieces used by the driver that don't really fit elsewhere.""" from __future__ import annotations +import builtins import sys import traceback from collections import abc @@ -45,68 +46,15 @@ WTimeoutError, _wtimeout_error, ) -from pymongo.hello import HelloCompat +from pymongo.helpers_constants import _NOT_PRIMARY_CODES, _REAUTHENTICATION_REQUIRED_CODE +from pymongo.synchronous.hello_compat import HelloCompat if TYPE_CHECKING: - from pymongo.cursor import _Hint - from pymongo.operations import _IndexList - from pymongo.typings import _DocumentOut - -# From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES: frozenset = frozenset( - [ - 11600, # InterruptedAtShutdown - 91, # ShutdownInProgress - ] -) -# From the SDAM spec, the "not primary" error codes are combined with the -# "node is recovering" error codes (of which the "node is shutting down" -# errors are a subset). -_NOT_PRIMARY_CODES: frozenset = ( - frozenset( - [ - 10058, # LegacyNotPrimary <=3.2 "not primary" error code - 10107, # NotWritablePrimary - 13435, # NotPrimaryNoSecondaryOk - 11602, # InterruptedDueToReplStateChange - 13436, # NotPrimaryOrSecondary - 189, # PrimarySteppedDown - ] - ) - | _SHUTDOWN_CODES -) -# From the retryable writes spec. -_RETRYABLE_ERROR_CODES: frozenset = _NOT_PRIMARY_CODES | frozenset( - [ - 7, # HostNotFound - 6, # HostUnreachable - 89, # NetworkTimeout - 9001, # SocketException - 262, # ExceededTimeLimit - 134, # ReadConcernMajorityNotAvailableYet - ] -) - -# Server code raised when re-authentication is required -_REAUTHENTICATION_REQUIRED_CODE: int = 391 + from pymongo.cursor_shared import _Hint + from pymongo.synchronous.operations import _IndexList + from pymongo.synchronous.typings import _DocumentOut -# Server code raised when authentication fails. -_AUTHENTICATION_FAILURE_CODE: int = 18 - -# Note - to avoid bugs from forgetting which if these is all lowercase and -# which are camelCase, and at the same time avoid having to add a test for -# every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS: set = { - "authenticate", - "saslstart", - "saslcontinue", - "getnonce", - "createuser", - "updateuser", - "copydbgetnonce", - "copydbsaslstart", - "copydb", -} +_IS_SYNC = True def _gen_index_name(keys: _IndexList) -> str: @@ -335,8 +283,8 @@ def _handle_exception() -> None: def _handle_reauth(func: F) -> F: def inner(*args: Any, **kwargs: Any) -> Any: no_reauth = kwargs.pop("no_reauth", False) - from pymongo.message import _BulkWriteContext - from pymongo.pool import Connection + from pymongo.synchronous.message import _BulkWriteContext + from pymongo.synchronous.pool import Connection try: return func(*args, **kwargs) @@ -363,3 +311,11 @@ def inner(*args: Any, **kwargs: Any) -> Any: raise return cast(F, inner) + + +def next(cls: Any) -> Any: + """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" + if sys.version_info >= (3, 10): + return builtins.next(cls) + else: + return cls.__next__() diff --git a/pymongo/logger.py b/pymongo/synchronous/logger.py similarity index 98% rename from pymongo/logger.py rename to pymongo/synchronous/logger.py index 2caafa778d..d0f539ee6f 100644 --- a/pymongo/logger.py +++ b/pymongo/synchronous/logger.py @@ -21,7 +21,9 @@ from bson import UuidRepresentation, json_util from bson.json_util import JSONOptions, _truncate_documents -from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason +from pymongo.synchronous.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason + +_IS_SYNC = True class _CommandStatusMessage(str, enum.Enum): diff --git a/pymongo/max_staleness_selectors.py b/pymongo/synchronous/max_staleness_selectors.py similarity index 98% rename from pymongo/max_staleness_selectors.py rename to pymongo/synchronous/max_staleness_selectors.py index 72edf555b3..cde43890df 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/synchronous/max_staleness_selectors.py @@ -34,7 +34,10 @@ from pymongo.server_type import SERVER_TYPE if TYPE_CHECKING: - from pymongo.server_selectors import Selection + from pymongo.synchronous.server_selectors import Selection + +_IS_SYNC = True + # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 diff --git a/pymongo/message.py b/pymongo/synchronous/message.py similarity index 98% rename from pymongo/message.py rename to pymongo/synchronous/message.py index 9412dc9149..0eca1e8f15 100644 --- a/pymongo/message.py +++ b/pymongo/synchronous/message.py @@ -64,23 +64,30 @@ OperationFailure, ProtocolError, ) -from pymongo.hello import HelloCompat -from pymongo.helpers import _handle_reauth -from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.logger import ( + _COMMAND_LOGGER, + _CommandStatusMessage, + _debug_log, +) +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern if TYPE_CHECKING: from datetime import timedelta - from pymongo.client_session import ClientSession - from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext - from pymongo.mongo_client import MongoClient - from pymongo.monitoring import _EventListeners - from pymongo.pool import Connection from pymongo.read_concern import ReadConcern - from pymongo.read_preferences import _ServerMode - from pymongo.typings import _Address, _DocumentOut + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.monitoring import _EventListeners + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.read_preferences import _ServerMode + from pymongo.synchronous.typings import _Address, _DocumentOut + + +_IS_SYNC = True MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 @@ -418,7 +425,7 @@ def get_message( spec = self.spec if use_cmd: - spec = self.as_command(conn, apply_timeout=True)[0] + spec = (self.as_command(conn, apply_timeout=True))[0] request_id, msg, size, _ = _op_msg( 0, spec, @@ -560,7 +567,7 @@ def get_message( ctx = conn.compression_context if use_cmd: - spec = self.as_command(conn, apply_timeout=True)[0] + spec = (self.as_command(conn, apply_timeout=True))[0] if self.conn_mgr and self.exhaust: flags = _OpMsg.EXHAUST_ALLOWED else: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py new file mode 100644 index 0000000000..a44a4e039e --- /dev/null +++ b/pymongo/synchronous/mongo_client.py @@ -0,0 +1,2534 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for connecting to MongoDB. + +.. seealso:: :doc:`/examples/high_availability` for examples of connecting + to replica sets or sets of mongos servers. + +To get a :class:`~pymongo.database.Database` instance from a +:class:`MongoClient` use either dictionary-style or attribute-style +access: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> c = MongoClient() + >>> c.test_database + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') + >>> c["test-database"] + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') +""" +from __future__ import annotations + +import contextlib +import os +import weakref +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + FrozenSet, + Generator, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +from bson.timestamp import Timestamp +from pymongo import _csot, helpers_constants +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous import ( + client_session, + common, + database, + helpers, + message, + periodic_executor, + uri_parser, +) +from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream +from pymongo.synchronous.client_options import ClientOptions +from pymongo.synchronous.client_session import _EmptyServerSession +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.logger import _CLIENT_LOGGER, _log_or_warn +from pymongo.synchronous.monitoring import ConnectionClosedReason +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.server_selectors import writable_server_selector +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.synchronous.typings import ( + ClusterTime, + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) +from pymongo.synchronous.uri_parser import ( + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, +) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + import sys + from types import TracebackType + + from bson.objectid import ObjectId + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_session import ClientSession, _ServerSession + from pymongo.synchronous.cursor import _ConnectionManager + from pymongo.synchronous.message import _CursorAddress, _GetMore, _Query + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.response import Response + from pymongo.synchronous.server import Server + from pymongo.synchronous.server_selectors import Selection + + if sys.version_info[:2] >= (3, 9): + pass + else: + # Deprecated since version 3.9: collections.abc.Generator now supports []. + pass + +T = TypeVar("T") + +_WriteCall = Callable[[Optional["ClientSession"], "Connection", bool], T] +_ReadCall = Callable[[Optional["ClientSession"], "Server", "Connection", _ServerMode], T] + +_IS_SYNC = True + + +class MongoClient(common.BaseObject, Generic[_DocumentType]): + HOST = "localhost" + PORT = 27017 + # Define order to retrieve options from ClientOptions for __repr__. + # No host/port; these are retrieved from TopologySettings. + _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() + + def __init__( + self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Optional[Type[_DocumentType]] = None, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: + """Client for a MongoDB instance, a replica set, or a set of mongoses. + + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + + The client object is thread-safe and has connection-pooling built in. + If an operation fails because of a network error, + :class:`~pymongo.errors.ConnectionFailure` is raised and the client + reconnects in the background. Application code should handle this + exception (recognizing that the operation failed) and then continue to + execute. + + The `host` parameter can be a full `mongodb URI + `_, in addition to + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and + passwords reserved characters like ':', '/', '+' and '@' must be + percent encoded following RFC 2396:: + + from urllib.parse import quote_plus + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), host) + client = MongoClient(uri) + + Unix domain sockets are also supported. The socket path must be percent + encoded in the URI:: + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), quote_plus(socket_path)) + client = MongoClient(uri) + + But not when passed as a simple hostname:: + + client = MongoClient('/tmp/mongodb-27017.sock') + + Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The + URI must include one, and only one, hostname. The hostname will be + resolved to one or more DNS `SRV records + `_ which will be used + as the seed list for connecting to the MongoDB deployment. When using + SRV URIs, the `authSource` and `replicaSet` configuration options can + be specified using `TXT records + `_. See the + `Initial DNS Seedlist Discovery spec + `_ + for more details. Note that the use of SRV URIs implicitly enables + TLS support. Pass tls=false in the URI to override. + + .. note:: MongoClient creation will block waiting for answers from + DNS when mongodb+srv:// URIs are used. + + .. note:: Starting with version 3.0 the :class:`MongoClient` + constructor no longer blocks while connecting to the server or + servers, and it no longer raises + :class:`~pymongo.errors.ConnectionFailure` if they are + unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. You can check if the server is available + like this:: + + from pymongo.errors import ConnectionFailure + client = MongoClient() + try: + # The ping command is cheap and does not require auth. + client.admin.command('ping') + except ConnectionFailure: + print("Server not available") + + .. warning:: When using PyMongo in a multiprocessing context, please + read :ref:`multiprocessing` first. + + .. note:: Many of the following options can be passed using a MongoDB + URI or keyword parameters. If the same option is passed in a URI and + as a keyword parameter the keyword parameter takes precedence. + + :param host: hostname or IP address or Unix domain socket + path of a single mongod or mongos instance to connect to, or a + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters + following the RFC2732 URL syntax (e.g. '[::1]' for localhost). + Multihomed and round robin DNS addresses are **not** supported. + :param port: port number on which to connect + :param document_class: default class to use for + documents returned from queries on this client + :param tz_aware: if ``True``, + :class:`~datetime.datetime` instances returned as values + in a document by this :class:`MongoClient` will be timezone + aware (otherwise they will be naive) + :param connect: If ``True`` (the default), immediately + begin connecting to MongoDB in the background. Otherwise connect + on the first operation. + :param type_registry: instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + :param datetime_conversion: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + :ref:`handling-out-of-range-datetimes` for details. + + | **Other optional parameters can be passed as keyword arguments:** + + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. + - `maxPoolSize` (optional): The maximum allowable number of + concurrent connections to each connected server. Requests to a + server will block if there are `maxPoolSize` outstanding + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. + - `minPoolSize` (optional): The minimum required number of concurrent + connections that the pool will maintain to each connected server. + Default is 0. + - `maxIdleTimeMS` (optional): The maximum number of milliseconds that + a connection can remain idle in the pool before being removed and + replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. + - `socketTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait for a response after sending an + ordinary (non-monitoring) database operation before concluding that + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). + - `connectTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait during server monitoring when + connecting a new socket to a server before concluding the server + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). + - `server_selector`: (callable or None) Optional, user-provided + function that augments server selection rules. The function should + accept as an argument a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + - `serverSelectionTimeoutMS`: (integer) Controls how long (in + milliseconds) the driver will wait to find an available, + appropriate server to carry out a database operation; while it is + waiting, multiple server monitoring operations may be carried out, + each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 + seconds). + - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) + a thread will wait for a socket from the pool if the pool has no + free sockets. Defaults to ``None`` (no timeout). + - `heartbeatFrequencyMS`: (optional) The number of milliseconds + between periodic server checks, or None to accept the default + frequency of 10 seconds. + - `serverMonitoringMode`: (optional) The server monitoring mode to use. + Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". + - `appname`: (string or None) The name of the application that + created this MongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. + - `driver`: (pair or None) A driver implemented on top of PyMongo can + pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, + version, and platform to the message printed in the server log when + establishing a connection. + - `event_listeners`: a list or tuple of event listeners. See + :mod:`~pymongo.monitoring` for details. + - `retryWrites`: (boolean) Whether supported write operations + executed within this MongoClient will be retried once after a + network error. Defaults to ``True``. + The supported write operations are: + + - :meth:`~pymongo.collection.Collection.bulk_write`, as long as + :class:`~pymongo.operations.UpdateMany` or + :class:`~pymongo.operations.DeleteMany` are not included. + - :meth:`~pymongo.collection.Collection.delete_one` + - :meth:`~pymongo.collection.Collection.insert_one` + - :meth:`~pymongo.collection.Collection.insert_many` + - :meth:`~pymongo.collection.Collection.replace_one` + - :meth:`~pymongo.collection.Collection.update_one` + - :meth:`~pymongo.collection.Collection.find_one_and_delete` + - :meth:`~pymongo.collection.Collection.find_one_and_replace` + - :meth:`~pymongo.collection.Collection.find_one_and_update` + + Unsupported write operations include, but are not limited to, + :meth:`~pymongo.collection.Collection.aggregate` using the ``$out`` + pipeline operator and any operation with an unacknowledged write + concern (e.g. {w: 0})). See + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst + - `retryReads`: (boolean) Whether supported read operations + executed within this MongoClient will be retried once after a + network error. Defaults to ``True``. + The supported read operations are: + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + :meth:`~pymongo.collection.Collection.aggregate` without ``$out``, + :meth:`~pymongo.collection.Collection.distinct`, + :meth:`~pymongo.collection.Collection.count`, + :meth:`~pymongo.collection.Collection.estimated_document_count`, + :meth:`~pymongo.collection.Collection.count_documents`, + :meth:`pymongo.collection.Collection.watch`, + :meth:`~pymongo.collection.Collection.list_indexes`, + :meth:`pymongo.database.Database.watch`, + :meth:`~pymongo.database.Database.list_collections`, + :meth:`pymongo.mongo_client.MongoClient.watch`, + and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. + + Unsupported read operations include, but are not limited to + :meth:`~pymongo.database.Database.command` and any getMore + operation on a cursor. + + Enabling retryable reads makes applications more resilient to + transient errors such as network failures, database upgrades, and + replica set failovers. For an exact definition of which errors + trigger a retry, see the `retryable reads specification + `_. + + - `compressors`: Comma separated list of compressors for wire + protocol compression. The list is used to negotiate a compressor + with the server. Currently supported options are "snappy", "zlib" + and "zstd". Support for snappy requires the + `python-snappy `_ package. + zlib support requires the Python standard library zlib module. zstd + requires the `zstandard `_ + package. By default no compression is used. Compression support + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. + See :ref:`network-compression-example` for details. + - `zlibCompressionLevel`: (int) The zlib compression level to use + when zlib is used as the wire protocol compressor. Supported values + are -1 through 9. -1 tells the zlib library to use its default + compression level (usually 6). 0 means no compression. 1 is best + speed. 9 is best compression. Defaults to -1. + - `uuidRepresentation`: The BSON representation to use when encoding + from and decoding to instances of :class:`~uuid.UUID`. Valid + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language + compatibility. See :ref:`handling-uuid-data-example` for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. + + + | **Write Concern options:** + | (Only set if passed. No default values.) + + - `w`: (integer or string) If this is a replica set, write operations + will block until they have been replicated to the specified number + or tagged set of servers. `w=` always includes the replica set + primary (e.g. w=3 means write to the primary and wait until + replicated to **two** secondaries). Passing w=0 **disables write + acknowledgement** and all other write concern options. + - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write propagation + to complete. If replication does not complete in the given + timeframe, a timeout exception is raised. Passing wTimeoutMS=0 + will cause **write operations to wait indefinitely**. + - `journal`: If ``True`` block until write operations have been + committed to the journal. Cannot be used in combination with + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. + - `fsync`: If ``True`` and the server is running without journaling, + blocks until the server has synced all data files to disk. If the + server is running with journaling, this acts the same as the `j` + option, blocking until write operations have been committed to the + journal. Cannot be used in combination with `j`. + + | **Replica set keyword arguments for connecting with a replica set + - either directly or via a mongos:** + + - `replicaSet`: (string or None) The name of the replica set to + connect to. The driver will verify that all servers it connects to + match this name. Implies that the hosts specified are a seed list + and the driver should attempt to find all members of the set. + Defaults to ``None``. + + | **Read Preference:** + + - `readPreference`: The replica set read preference for this client. + One of ``primary``, ``primaryPreferred``, ``secondary``, + ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. + - `readPreferenceTags`: Specifies a tag set as a comma-separated list + of colon-separated key-value pairs. For example ``dc:ny,rack:1``. + Defaults to ``None``. + - `maxStalenessSeconds`: (integer) The maximum estimated + length of time a replica set secondary can fall behind the primary + in replication before it will no longer be selected for operations. + Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds + is set, it must be a positive integer greater than or equal to + 90 seconds. + + .. seealso:: :doc:`/examples/server_selection` + + | **Authentication:** + + - `username`: A string. + - `password`: A string. + + Although username and password must be percent-escaped in a MongoDB + URI, they must not be percent-escaped when passed as parameters. In + this example, both the space and slash special characters are passed + as-is:: + + MongoClient(username="user name", password="pass/word") + + - `authSource`: The database to authenticate on. Defaults to the + database specified in the URI, if provided, or to "admin". + - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. + If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 + when connected to MongoDB 3.6 and negotiates the mechanism to use + (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. + - `authMechanismProperties`: Used to specify authentication mechanism + specific options. To specify the service name for GSSAPI + authentication pass authMechanismProperties='SERVICE_NAME:'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. + + .. seealso:: :doc:`/examples/authentication` + + | **TLS/SSL configuration:** + + - `tls`: (boolean) If ``True``, create the connection to the server + using transport layer security. Defaults to ``False``. + - `tlsInsecure`: (boolean) Specify whether TLS constraints should be + relaxed as much as possible. Setting ``tlsInsecure=True`` implies + ``tlsAllowInvalidCertificates=True`` and + ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think + very carefully before setting this to ``True`` as it dramatically + reduces the security of TLS. + - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues + the TLS handshake regardless of the outcome of the certificate + verification process. If this is ``False``, and a value is not + provided for ``tlsCAFile``, PyMongo will attempt to load system + provided CA certificates. If the python version in use does not + support loading system CA certificates then the ``tlsCAFile`` + parameter must point to a file of CA certificates. + ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. + Defaults to ``False``. Think very carefully before setting this + to ``True`` as that could make your application vulnerable to + on-path attackers. + - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS + hostname verification. ``tlsAllowInvalidHostnames=False`` implies + ``tls=True``. Defaults to ``False``. Think very carefully before + setting this to ``True`` as that could make your application + vulnerable to on-path attackers. + - `tlsCAFile`: A file containing a single or a bundle of + "certification authority" certificates, which are used to validate + certificates passed from the other end of the connection. + Implies ``tls=True``. Defaults to ``None``. + - `tlsCertificateKeyFile`: A file containing the client certificate + and private key. Implies ``tls=True``. Defaults to ``None``. + - `tlsCRLFile`: A file containing a PEM or DER formatted + certificate revocation list. Implies ``tls=True``. Defaults to + ``None``. + - `tlsCertificateKeyFilePassword`: The password or passphrase for + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. + - `ssl`: (boolean) Alias for ``tls``. + + | **Read Concern options:** + | (If not set explicitly, this will use the server default) + + - `readConcernLevel`: (string) The read concern level specifies the + level of isolation for read operations. For example, a read + operation using a read concern level of ``majority`` will only + return data that has been written to a majority of nodes. If the + level is left unspecified, the server default will be used. + + | **Client side encryption options:** + | (If not set explicitly, client side encryption will not be enabled.) + + - `auto_encryption_opts`: A + :class:`~pymongo.encryption_options.AutoEncryptionOpts` which + configures this client to automatically encrypt collection commands + and automatically decrypt results. See + :ref:`automatic-client-side-encryption` for an example. + If a :class:`MongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``MongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Stable API. See :ref:`versioned-api-ref` for + details. + + .. seealso:: The MongoDB documentation on `connections `_. + + .. versionchanged:: 4.5 + Added the ``serverMonitoringMode`` keyword argument. + + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + + .. versionchanged:: 4.0 + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. + + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. + + .. versionchanged:: 3.11 + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` + + .. versionchanged:: 3.9 + Added the ``retryReads`` keyword argument and URI option. + Added the ``tlsInsecure`` keyword argument and URI option. + The following keyword arguments and URI options were deprecated: + + - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. + - ``j`` was deprecated in favor of ``journal``. + - ``ssl_cert_reqs`` was deprecated in favor of + ``tlsAllowInvalidCertificates``. + - ``ssl_match_hostname`` was deprecated in favor of + ``tlsAllowInvalidHostnames``. + - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. + - ``ssl_certfile`` was deprecated in favor of + ``tlsCertificateKeyFile``. + - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. + - ``ssl_pem_passphrase`` was deprecated in favor of + ``tlsCertificateKeyFilePassword``. + + .. versionchanged:: 3.9 + ``retryWrites`` now defaults to ``True``. + + .. versionchanged:: 3.8 + Added the ``server_selector`` keyword argument. + Added the ``type_registry`` keyword argument. + + .. versionchanged:: 3.7 + Added the ``driver`` keyword argument. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + Added the ``retryWrites`` keyword argument and URI option. + + .. versionchanged:: 3.5 + Add ``username`` and ``password`` options. Document the + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` + options. + Deprecated the ``socketKeepAlive`` keyword argument and URI option. + ``socketKeepAlive`` now defaults to ``True``. + + .. versionchanged:: 3.0 + :class:`~pymongo.mongo_client.MongoClient` is now the one and only + client class for a standalone server, mongos, or replica set. + It includes the functionality that had been split into + :class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect + to a replica set, discover all its members, and monitor the set for + stepdowns, elections, and reconfigs. + + The :class:`~pymongo.mongo_client.MongoClient` constructor no + longer blocks while connecting to the server or servers, and it no + longer raises :class:`~pymongo.errors.ConnectionFailure` if they + are unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. + + Therefore the ``alive`` method is removed since it no longer + provides meaningful information; even if the client is disconnected, + it may discover a server in time to fulfill the next operation. + + In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of + standalone MongoDB servers and used the first it could connect to:: + + MongoClient(['host1.com:27017', 'host2.com:27017']) + + A list of multiple standalones is no longer supported; if multiple + servers are listed they must be members of the same replica set, or + mongoses in the same sharded cluster. + + The behavior for a list of mongoses is changed from "high + availability" to "load balancing". Before, the client connected to + the lowest-latency mongos in the list, and used it until a network + error prompted it to re-evaluate all mongoses' latencies and + reconnect to one of them. In PyMongo 3, the client monitors its + network latency to all the mongoses continuously, and distributes + operations evenly among those with the lowest latency. See + :ref:`mongos-load-balancing` for more information. + + The ``connect`` option is added. + + The ``start_request``, ``in_request``, and ``end_request`` methods + are removed, as well as the ``auto_start_request`` option. + + The ``copy_database`` method is removed, see the + :doc:`copy_database examples ` for alternatives. + + The :meth:`MongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.MongoClient.close`. + + :class:`~pymongo.mongo_client.MongoClient` no longer returns an + instance of :class:`~pymongo.database.Database` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + client['__my_database__'] + + Not:: + + client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + """ + doc_class = document_class or dict + self._init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + + if host is None: + host = self.HOST + if isinstance(host, str): + host = [host] + if port is None: + port = self.PORT + if not isinstance(port, int): + raise TypeError("port must be an instance of int") + + # _pool_class, _monitor_class, and _condition_class are for deep + # customization of PyMongo, e.g. Motor. + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) + + # Parse options passed as kwargs. + keyword_opts = common._CaseInsensitiveDictionary(kwargs) + keyword_opts["document_class"] = doc_class + + seeds = set() + username = None + password = None + dbase = None + opts = common._CaseInsensitiveDictionary() + fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") + for entity in host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = uri_parser.parse_uri( + entity, + port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + username = res["username"] or username + password = res["password"] or password + dbase = res["database"] or dbase + opts = res["options"] + fqdn = res["fqdn"] + else: + seeds.update(uri_parser.split_hosts(entity, port)) + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + if type_registry is not None: + keyword_opts["type_registry"] = type_registry + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + connect = opts.get("connect", True) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + + # Override connection string options with kwarg options. + opts.update(keyword_opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", username) + password = opts.get("password", password) + self._options = options = ClientOptions(username, password, dbase, opts) + + self._default_database_name = dbase + self._lock = _create_lock() + self._kill_cursors_queue: list = [] + + self._event_listeners = options.pool_options._event_listeners + super().__init__( + options.codec_options, + options.read_preference, + options.write_concern, + options.read_concern, + ) + + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=options.replica_set_name, + pool_class=pool_class, + pool_options=options.pool_options, + monitor_class=monitor_class, + condition_class=condition_class, + local_threshold_ms=options.local_threshold_ms, + server_selection_timeout=options.server_selection_timeout, + server_selector=options.server_selector, + heartbeat_frequency=options.heartbeat_frequency, + fqdn=fqdn, + direct_connection=options.direct_connection, + load_balanced=options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=options.server_monitoring_mode, + ) + + self._init_background() + + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] + + self._encrypter = None + if self._options.auto_encryption_opts: + from pymongo.synchronous.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout + + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self + + def _init_background(self, old_pid: Optional[int] = None) -> None: + self._topology = Topology(self._topology_settings) + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid + + def target() -> bool: + client = self_ref() + if client is None: + return False # Stop the executor. + MongoClient._process_periodic_tasks(client) + return True + + executor = periodic_executor.PeriodicExecutor( + interval=common.KILL_CURSOR_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_kill_cursors_thread", + ) + + # We strongly reference the executor and it weakly references us via + # this closure. When the client is freed, stop the executor soon. + self_ref: Any = weakref.ref(self, executor.close) + self._kill_cursors_executor = executor + + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) + + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background() + + def _duplicate(self, **kwargs: Any) -> MongoClient: + args = self._init_kwargs.copy() + args.update(kwargs) + return MongoClient(**args) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> ChangeStream[_DocumentType]: + """Watch changes on this cluster. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.ClusterChangeStream` cursor which + iterates over changes on all databases on this cluster. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with client.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.ClusterChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.ClusterChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = ClusterChangeStream( + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + @property + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :return: An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + return self._topology.description + + @property + def nodes(self) -> FrozenSet[_Address]: + """Set of all currently connected servers. + + .. warning:: When connected to a replica set the value of :attr:`nodes` + can change over time as :class:`MongoClient`'s view of the replica + set changes. :attr:`nodes` can also be an empty set when + :class:`MongoClient` is first instantiated and hasn't yet connected + to any servers, or a network partition causes it to lose connection + to all servers. + """ + description = self._topology.description + return frozenset(s.address for s in description.known_servers) + + @property + def options(self) -> ClientOptions: + """The configuration options for this client. + + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. + + .. versionadded:: 4.0 + """ + return self._options + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self._topology == other._topology + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self._topology) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" + + return f"{option}={value!r}" + + # Host first... + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" + + def __getattr__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + return database.Database(self, name) + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.ClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.client_session.SessionOptions`. See the + :mod:`~pymongo.client_session` module for details and examples. + + A :class:`~pymongo.client_session.ClientSession` may only be used with + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.client_session.ClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[ClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = MongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get a :class:`~pymongo.database.Database` with the given name and + options. + + Useful for creating a :class:`~pymongo.database.Database` with + different codec options, read preference, and/or write concern from + this :class:`MongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.Database: + """Get a Database instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + def __enter__(self) -> MongoClient[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'MongoClient' object is not iterable") + + next = __next__ + + def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = self._topology.select_server(writable_server_selector, _Op.TEST) + + return getattr(server.description, attr_name) + + @property + def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded, + ): + return None + return self._server_property("address") + + @property + def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + return self._topology.get_primary() # type: ignore[return-value] + + @property + def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + return self._topology.get_secondaries() + + @property + def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + return self._topology.get_arbiters() + + @property + def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return self._server_property("is_writable") + + @property + def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return self._server_property("server_type") == SERVER_TYPE.Mongos + + def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use Connection.command directly to avoid implicitly creating + # another session. + with self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + session_ids = self._topology.pop_all_sessions() + if session_ids: + self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + self._process_kill_cursors() + self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + self._encrypter.close() + + def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + self._topology.open() + with self._lock: + self._kill_cursors_executor.open() + return self._topology + + @contextlib.contextmanager + def _checkout( + self, server: Server, session: Optional[ClientSession] + ) -> Generator[Connection, None]: + in_txn = session and session.in_transaction + with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + with server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The ClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. + """ + try: + topology = self._get_topology() + if session and not session.in_transaction: + session._transaction.reset() + if not address and session: + address = session._pinned_address + if address: + # We're running a getMore or this session is pinned to a mongos. + server = topology.select_server_by_address( + address, operation, operation_id=operation_id + ) + if not server: + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 + else: + server = topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) + return server + except PyMongoError as exc: + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + session._unpin() + raise + + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + server = self._select_server(writable_server_selector, session, operation) + return self._checkout(server, session) + + @contextlib.contextmanager + def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] + ) -> Generator[tuple[Connection, _ServerMode], None]: + assert read_preference is not None, "read_preference must not be None" + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. + # Thread safe: if the type is single it cannot change. + topology = self._get_topology() + single = topology.description.topology_type == TOPOLOGY_TYPE.Single + + with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + def _conn_for_reads( + self, + read_preference: _ServerMode, + session: Optional[ClientSession], + operation: str, + ) -> ContextManager[tuple[Connection, _ServerMode]]: + assert read_preference is not None, "read_preference must not be None" + _ = self._get_topology() + server = self._select_server(read_preference, session, operation) + return self._conn_from_server(read_preference, server, session) + + @_csot.apply + def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, + address: Optional[_Address] = None, + ) -> Response: + """Run a _Query/_GetMore operation and return a Response. + + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message + to a specific server, used for getMore. + """ + if operation.conn_mgr: + server = self._select_server( + operation.read_preference, + operation.session, + operation.name, + address=address, + ) + + with operation.conn_mgr._alock: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: + err_handler.contribute_socket(operation.conn_mgr.conn) + return server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + self, + ) + + def _cmd( + _session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return server.run_operation( + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, + ) + + return self._retryable_read( + _cmd, + operation.read_preference, + operation.session, + address=address, + retryable=isinstance(operation, message._Query), + operation=operation.name, + ) + + def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk], + operation: str, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with at most one consecutive retries + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + """ + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return self._retry_internal( + func=func, + session=session, + bulk=bulk, + operation=operation, + retryable=retryable, + operation_id=operation_id, + ) + + @_csot.apply + def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[ClientSession], + bulk: Optional[_Bulk], + operation: str, + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ) -> T: + """Internal retryable helper for all client transactions. + + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() + """ + return _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + operation=operation, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + operation_id=operation_id, + ).run() + + def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + retryable: bool = True, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries + (may not always be supported even if supplied), defaults to False + """ + + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + return self._retry_internal( + func, + session, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) + + def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + operation: str, + bulk: Optional[_Bulk] = None, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None + """ + with self._tmp_session(session) as s: + return self._retry_with_session(retryable, func, s, bulk, operation, operation_id) + + def _cleanup_cursor( + self, + locks_allowed: bool, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + explicit_session: bool, + ) -> None: + """Cleanup a cursor from cursor.close() or __del__. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. + + :param locks_allowed: True if we are allowed to acquire locks. + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + :param explicit_session: True if the session was passed explicitly. + """ + if locks_allowed: + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + conn_mgr.close() + else: + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and not explicit_session: + session._end_session(lock=locks_allowed) + + def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[ClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Send a kill cursors message with the given id. + + The cursor is closed synchronously on the current thread. + """ + if not isinstance(cursor_id, int): + raise TypeError("cursor_id must be an instance of int") + + try: + if conn_mgr: + with conn_mgr._alock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + self._kill_cursors([cursor_id], address, self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[ClientSession], + ) -> None: + """Send a kill cursors message with the given ids.""" + if address: + # address could be a tuple or _CursorAddress, but + # select_server_by_address needs (host, port). + server = topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] + else: + # Application called close_cursor() with no address. + server = topology.select_server(writable_server_selector, _Op.KILL_CURSORS) + + with self._checkout(server, session) as conn: + assert address is not None + self._kill_cursor_impl(cursor_ids, address, session, conn) + + def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[ClientSession], + conn: Connection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = {"killCursors": coll, "cursors": cursor_ids} + conn.command(db, spec, session=session, client=self) + + def _process_kill_cursors(self) -> None: + """Process any pending kill cursors requests.""" + address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] + + # Other threads or the GC may append to the queue concurrently. + while True: + try: + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() + except IndexError: + break + + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + self._cleanup_cursor(True, cursor_id, address, conn_mgr, None, False) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + helpers._handle_exception() + + # Don't re-open topology if it's closed and there's no pending cursors. + if address_to_cursor_ids: + topology = self._get_topology() + for address, cursor_ids in address_to_cursor_ids.items(): + try: + self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + helpers._handle_exception() + + # This method is run periodically by a background thread. + def _process_periodic_tasks(self) -> None: + """Process any pending kill cursors requests and + maintain connection pool parameters. + """ + try: + self._process_kill_cursors() + self._topology.update_pool() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + helpers._handle_exception() + + def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession], lock: bool + ) -> None: + """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None + return self._topology.return_server_session(server_session, lock) + + @contextlib.contextmanager + def _tmp_session( + self, session: Optional[client_session.ClientSession], close: bool = True + ) -> Generator[Optional[client_session.ClientSession], None, None]: + """If provided session is None, lend a temporary session.""" + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError("'session' argument must be a ClientSession or None.") + # Don't call end_session. + yield session + return + + s = self._ensure_session(session) + if s: + try: + yield s + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. + s.end_session() + raise + finally: + # Call end_session when we exit this scope. + if close: + s.end_session() + else: + yield None + + def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSession]) -> None: + self._topology.receive_cluster_time(reply.get("$clusterTime")) + if session is not None: + session._process_response(reply) + + def server_info(self, session: Optional[client_session.ClientSession] = None) -> dict[str, Any]: + """Get information about the MongoDB server we're connected to. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return cast( + dict, + self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + def _list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) + + def list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + """Get a cursor over the databases of the connected server. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listDatabases command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return self._list_databases(session, comment, **kwargs) + + def list_database_names( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: + """Get a list of the names of all databases on the connected server. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionadded:: 3.6 + """ + res = self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] for doc in res] + + @_csot.apply + def drop_database( + self, + name_or_database: Union[str, database.Database[_DocumentTypeArg]], + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> None: + """Drop a database. + + Raises :class:`TypeError` if `name_or_database` is not an instance of + :class:`str` or :class:`~pymongo.database.Database`. + + :param name_or_database: the name of a database to drop, or a + :class:`~pymongo.database.Database` instance representing the + database to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of + this client is automatically applied to this operation. + + .. versionchanged:: 3.4 + Apply this client's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_database + if isinstance(name, database.Database): + name = name.name + + if not isinstance(name, str): + raise TypeError("name_or_database must be an instance of str or a Database") + + with self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: + self[name]._command( + conn, + {"dropDatabase": 1, "comment": comment}, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, BulkWriteError): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers_constants._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ConnectionFailure) and not isinstance( + exc, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: Connection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + self.session._unpin() + err_ctx = _ErrorContext( + exc_val, + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + self.client._topology.handle_error(self.server_address, err_ctx) + + def __enter__(self) -> _MongoClientErrorHandler: + return self + + def __exit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return self.handle(exc_type, exc_val) + + +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" + + def __init__( + self, + mongo_client: MongoClient, + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[_Bulk], + operation: str, + is_read: bool = False, + session: Optional[ClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id + + def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :raises: self._last_error: Last exception raised + + :return: Result of the func() call + """ + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return self._read() if self._is_read else self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers_constants._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + else: + raise + + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) + + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error + + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error + + def _get_server(self) -> Server: + """Retrieves a server object based on provided object context + + :return: Abstraction to connect to server + """ + return self._client._select_server( + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, + ) + + def _write(self) -> T: + """Wrapper method for write-type retryable client executions + + :return: Output for func()'s call + """ + try: + max_wire_version = 0 + is_mongos = False + self._server = self._get_server() + with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + return self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version, is_mongos) + raise + + def _read(self) -> T: + """Wrapper method for read-type retryable client executions + + :return: Output for func()'s call + """ + self._server = self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + return self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in MongoClient._clients.items(): + client._after_fork() + + +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/monitor.py b/pymongo/synchronous/monitor.py similarity index 96% rename from pymongo/monitor.py rename to pymongo/synchronous/monitor.py index 64945dd106..96849e7349 100644 --- a/pymongo/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -21,21 +21,23 @@ import weakref from typing import TYPE_CHECKING, Any, Mapping, Optional, cast -from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled -from pymongo.hello import Hello from pymongo.lock import _create_lock -from pymongo.periodic_executor import _shutdown_executors -from pymongo.pool import _is_faas -from pymongo.read_preferences import MovingAverage -from pymongo.server_description import ServerDescription -from pymongo.srv_resolver import _SrvResolver +from pymongo.synchronous import common, periodic_executor +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.periodic_executor import _shutdown_executors +from pymongo.synchronous.pool import _is_faas +from pymongo.synchronous.read_preferences import MovingAverage +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.srv_resolver import _SrvResolver if TYPE_CHECKING: - from pymongo.pool import Connection, Pool, _CancellationContext - from pymongo.settings import TopologySettings - from pymongo.topology import Topology + from pymongo.synchronous.pool import Connection, Pool, _CancellationContext + from pymongo.synchronous.settings import TopologySettings + from pymongo.synchronous.topology import Topology + +_IS_SYNC = True def _sanitize(error: Exception) -> None: diff --git a/pymongo/synchronous/monitoring.py b/pymongo/synchronous/monitoring.py new file mode 100644 index 0000000000..a4b7296881 --- /dev/null +++ b/pymongo/synchronous/monitoring.py @@ -0,0 +1,1903 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools to monitor driver events. + +.. versionadded:: 3.1 + +.. attention:: Starting in PyMongo 3.11, the monitoring classes outlined below + are included in the PyMongo distribution under the + :mod:`~pymongo.event_loggers` submodule. + +Use :func:`register` to register global listeners for specific events. +Listeners must inherit from one of the abstract classes below and implement +the correct functions for that class. + +For example, a simple command logger might be implemented like this:: + + import logging + + from pymongo import monitoring + + class CommandLogger(monitoring.CommandListener): + + def started(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event)) + + def failed(self, event): + logging.info("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event)) + + monitoring.register(CommandLogger()) + +Server discovery and monitoring events are also available. For example:: + + class ServerLogger(monitoring.ServerListener): + + def opened(self, event): + logging.info("Server {0.server_address} added to topology " + "{0.topology_id}".format(event)) + + def description_changed(self, event): + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.info( + "Server {0.server_address} changed type from " + "{0.previous_description.server_type_name} to " + "{0.new_description.server_type_name}".format(event)) + + def closed(self, event): + logging.warning("Server {0.server_address} removed from topology " + "{0.topology_id}".format(event)) + + + class HeartbeatLogger(monitoring.ServerHeartbeatListener): + + def started(self, event): + logging.info("Heartbeat sent to server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + # The reply.document attribute was added in PyMongo 3.4. + logging.info("Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event)) + + def failed(self, event): + logging.warning("Heartbeat to server {0.connection_id} " + "failed with error {0.reply}".format(event)) + + class TopologyLogger(monitoring.TopologyListener): + + def opened(self, event): + logging.info("Topology with id {0.topology_id} " + "opened".format(event)) + + def description_changed(self, event): + logging.info("Topology description updated for " + "topology id {0.topology_id}".format(event)) + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.info( + "Topology {0.topology_id} changed type from " + "{0.previous_description.topology_type_name} to " + "{0.new_description.topology_type_name}".format(event)) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.warning("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.warning("No readable servers available.") + + def closed(self, event): + logging.info("Topology with id {0.topology_id} " + "closed".format(event)) + +Connection monitoring and pooling events are also available. For example:: + + class ConnectionPoolLogger(ConnectionPoolListener): + + def pool_created(self, event): + logging.info("[pool {0.address}] pool created".format(event)) + + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + + def pool_cleared(self, event): + logging.info("[pool {0.address}] pool cleared".format(event)) + + def pool_closed(self, event): + logging.info("[pool {0.address}] pool closed".format(event)) + + def connection_created(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection created".format(event)) + + def connection_ready(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection setup succeeded".format(event)) + + def connection_closed(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection closed, reason: " + "{0.reason}".format(event)) + + def connection_check_out_started(self, event): + logging.info("[pool {0.address}] connection check out " + "started".format(event)) + + def connection_check_out_failed(self, event): + logging.info("[pool {0.address}] connection check out " + "failed, reason: {0.reason}".format(event)) + + def connection_checked_out(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection checked out of pool".format(event)) + + def connection_checked_in(self, event): + logging.info("[pool {0.address}][connection #{0.connection_id}] " + "connection checked into pool".format(event)) + + +Event listeners can also be registered per instance of +:class:`~pymongo.mongo_client.MongoClient`:: + + client = MongoClient(event_listeners=[CommandLogger()]) + +Note that previously registered global listeners are automatically included +when configuring per client event listeners. Registering a new global listener +will not add that listener to existing client instances. + +.. note:: Events are delivered **synchronously**. Application threads block + waiting for event handlers (e.g. :meth:`~CommandListener.started`) to + return. Care must be taken to ensure that your event handlers are efficient + enough to not adversely affect overall application performance. + +.. warning:: The command documents published through this API are *not* copies. + If you intend to modify them in any way you must copy them in your event + handler first. +""" + +from __future__ import annotations + +import datetime +from collections import abc, namedtuple +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +from bson.objectid import ObjectId +from pymongo.helpers_constants import _SENSITIVE_COMMANDS +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.helpers import _handle_exception +from pymongo.synchronous.typings import _Address, _DocumentOut + +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.synchronous.server_description import ServerDescription + from pymongo.synchronous.topology_description import TopologyDescription + +_IS_SYNC = True + +_Listeners = namedtuple( + "_Listeners", + ( + "command_listeners", + "server_listeners", + "server_heartbeat_listeners", + "topology_listeners", + "cmap_listeners", + ), +) + +_LISTENERS = _Listeners([], [], [], [], []) + + +class _EventListener: + """Abstract base class for all event listeners.""" + + +class CommandListener(_EventListener): + """Abstract base class for command listeners. + + Handles `CommandStartedEvent`, `CommandSucceededEvent`, + and `CommandFailedEvent`. + """ + + def started(self, event: CommandStartedEvent) -> None: + """Abstract method to handle a `CommandStartedEvent`. + + :param event: An instance of :class:`CommandStartedEvent`. + """ + raise NotImplementedError + + def succeeded(self, event: CommandSucceededEvent) -> None: + """Abstract method to handle a `CommandSucceededEvent`. + + :param event: An instance of :class:`CommandSucceededEvent`. + """ + raise NotImplementedError + + def failed(self, event: CommandFailedEvent) -> None: + """Abstract method to handle a `CommandFailedEvent`. + + :param event: An instance of :class:`CommandFailedEvent`. + """ + raise NotImplementedError + + +class ConnectionPoolListener(_EventListener): + """Abstract base class for connection pool listeners. + + Handles all of the connection pool events defined in the Connection + Monitoring and Pooling Specification: + :class:`PoolCreatedEvent`, :class:`PoolClearedEvent`, + :class:`PoolClosedEvent`, :class:`ConnectionCreatedEvent`, + :class:`ConnectionReadyEvent`, :class:`ConnectionClosedEvent`, + :class:`ConnectionCheckOutStartedEvent`, + :class:`ConnectionCheckOutFailedEvent`, + :class:`ConnectionCheckedOutEvent`, + and :class:`ConnectionCheckedInEvent`. + + .. versionadded:: 3.9 + """ + + def pool_created(self, event: PoolCreatedEvent) -> None: + """Abstract method to handle a :class:`PoolCreatedEvent`. + + Emitted when a connection Pool is created. + + :param event: An instance of :class:`PoolCreatedEvent`. + """ + raise NotImplementedError + + def pool_ready(self, event: PoolReadyEvent) -> None: + """Abstract method to handle a :class:`PoolReadyEvent`. + + Emitted when a connection Pool is marked ready. + + :param event: An instance of :class:`PoolReadyEvent`. + + .. versionadded:: 4.0 + """ + raise NotImplementedError + + def pool_cleared(self, event: PoolClearedEvent) -> None: + """Abstract method to handle a `PoolClearedEvent`. + + Emitted when a connection Pool is cleared. + + :param event: An instance of :class:`PoolClearedEvent`. + """ + raise NotImplementedError + + def pool_closed(self, event: PoolClosedEvent) -> None: + """Abstract method to handle a `PoolClosedEvent`. + + Emitted when a connection Pool is closed. + + :param event: An instance of :class:`PoolClosedEvent`. + """ + raise NotImplementedError + + def connection_created(self, event: ConnectionCreatedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCreatedEvent`. + + Emitted when a connection Pool creates a Connection object. + + :param event: An instance of :class:`ConnectionCreatedEvent`. + """ + raise NotImplementedError + + def connection_ready(self, event: ConnectionReadyEvent) -> None: + """Abstract method to handle a :class:`ConnectionReadyEvent`. + + Emitted when a connection has finished its setup, and is now ready to + use. + + :param event: An instance of :class:`ConnectionReadyEvent`. + """ + raise NotImplementedError + + def connection_closed(self, event: ConnectionClosedEvent) -> None: + """Abstract method to handle a :class:`ConnectionClosedEvent`. + + Emitted when a connection Pool closes a connection. + + :param event: An instance of :class:`ConnectionClosedEvent`. + """ + raise NotImplementedError + + def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. + + Emitted when the driver starts attempting to check out a connection. + + :param event: An instance of :class:`ConnectionCheckOutStartedEvent`. + """ + raise NotImplementedError + + def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. + + Emitted when the driver's attempt to check out a connection fails. + + :param event: An instance of :class:`ConnectionCheckOutFailedEvent`. + """ + raise NotImplementedError + + def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. + + Emitted when the driver successfully checks out a connection. + + :param event: An instance of :class:`ConnectionCheckedOutEvent`. + """ + raise NotImplementedError + + def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: + """Abstract method to handle a :class:`ConnectionCheckedInEvent`. + + Emitted when the driver checks in a connection back to the connection + Pool. + + :param event: An instance of :class:`ConnectionCheckedInEvent`. + """ + raise NotImplementedError + + +class ServerHeartbeatListener(_EventListener): + """Abstract base class for server heartbeat listeners. + + Handles `ServerHeartbeatStartedEvent`, `ServerHeartbeatSucceededEvent`, + and `ServerHeartbeatFailedEvent`. + + .. versionadded:: 3.3 + """ + + def started(self, event: ServerHeartbeatStartedEvent) -> None: + """Abstract method to handle a `ServerHeartbeatStartedEvent`. + + :param event: An instance of :class:`ServerHeartbeatStartedEvent`. + """ + raise NotImplementedError + + def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: + """Abstract method to handle a `ServerHeartbeatSucceededEvent`. + + :param event: An instance of :class:`ServerHeartbeatSucceededEvent`. + """ + raise NotImplementedError + + def failed(self, event: ServerHeartbeatFailedEvent) -> None: + """Abstract method to handle a `ServerHeartbeatFailedEvent`. + + :param event: An instance of :class:`ServerHeartbeatFailedEvent`. + """ + raise NotImplementedError + + +class TopologyListener(_EventListener): + """Abstract base class for topology monitoring listeners. + Handles `TopologyOpenedEvent`, `TopologyDescriptionChangedEvent`, and + `TopologyClosedEvent`. + + .. versionadded:: 3.3 + """ + + def opened(self, event: TopologyOpenedEvent) -> None: + """Abstract method to handle a `TopologyOpenedEvent`. + + :param event: An instance of :class:`TopologyOpenedEvent`. + """ + raise NotImplementedError + + def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: + """Abstract method to handle a `TopologyDescriptionChangedEvent`. + + :param event: An instance of :class:`TopologyDescriptionChangedEvent`. + """ + raise NotImplementedError + + def closed(self, event: TopologyClosedEvent) -> None: + """Abstract method to handle a `TopologyClosedEvent`. + + :param event: An instance of :class:`TopologyClosedEvent`. + """ + raise NotImplementedError + + +class ServerListener(_EventListener): + """Abstract base class for server listeners. + Handles `ServerOpeningEvent`, `ServerDescriptionChangedEvent`, and + `ServerClosedEvent`. + + .. versionadded:: 3.3 + """ + + def opened(self, event: ServerOpeningEvent) -> None: + """Abstract method to handle a `ServerOpeningEvent`. + + :param event: An instance of :class:`ServerOpeningEvent`. + """ + raise NotImplementedError + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + """Abstract method to handle a `ServerDescriptionChangedEvent`. + + :param event: An instance of :class:`ServerDescriptionChangedEvent`. + """ + raise NotImplementedError + + def closed(self, event: ServerClosedEvent) -> None: + """Abstract method to handle a `ServerClosedEvent`. + + :param event: An instance of :class:`ServerClosedEvent`. + """ + raise NotImplementedError + + +def _to_micros(dur: timedelta) -> int: + """Convert duration 'dur' to microseconds.""" + return int(dur.total_seconds() * 10e5) + + +def _validate_event_listeners( + option: str, listeners: Sequence[_EventListeners] +) -> Sequence[_EventListeners]: + """Validate event listeners""" + if not isinstance(listeners, abc.Sequence): + raise TypeError(f"{option} must be a list or tuple") + for listener in listeners: + if not isinstance(listener, _EventListener): + raise TypeError( + f"Listeners for {option} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." + ) + return listeners + + +def register(listener: _EventListener) -> None: + """Register a global event listener. + + :param listener: A subclasses of :class:`CommandListener`, + :class:`ServerHeartbeatListener`, :class:`ServerListener`, + :class:`TopologyListener`, or :class:`ConnectionPoolListener`. + """ + if not isinstance(listener, _EventListener): + raise TypeError( + f"Listeners for {listener} must be either a " + "CommandListener, ServerHeartbeatListener, " + "ServerListener, TopologyListener, or " + "ConnectionPoolListener." + ) + if isinstance(listener, CommandListener): + _LISTENERS.command_listeners.append(listener) + if isinstance(listener, ServerHeartbeatListener): + _LISTENERS.server_heartbeat_listeners.append(listener) + if isinstance(listener, ServerListener): + _LISTENERS.server_listeners.append(listener) + if isinstance(listener, TopologyListener): + _LISTENERS.topology_listeners.append(listener) + if isinstance(listener, ConnectionPoolListener): + _LISTENERS.cmap_listeners.append(listener) + + +# The "hello" command is also deemed sensitive when attempting speculative +# authentication. +def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: + if ( + command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) + and "speculativeAuthenticate" in doc + ): + return True + return False + + +class _CommandEvent: + """Base class for command events.""" + + __slots__ = ( + "__cmd_name", + "__rqst_id", + "__conn_id", + "__op_id", + "__service_id", + "__db", + "__server_conn_id", + ) + + def __init__( + self, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + self.__cmd_name = command_name + self.__rqst_id = request_id + self.__conn_id = connection_id + self.__op_id = operation_id + self.__service_id = service_id + self.__db = database_name + self.__server_conn_id = server_connection_id + + @property + def command_name(self) -> str: + """The command name.""" + return self.__cmd_name + + @property + def request_id(self) -> int: + """The request id for this operation.""" + return self.__rqst_id + + @property + def connection_id(self) -> _Address: + """The address (host, port) of the server this command was sent to.""" + return self.__conn_id + + @property + def service_id(self) -> Optional[ObjectId]: + """The service_id this command was sent to, or ``None``. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def operation_id(self) -> Optional[int]: + """An id for this series of events or None.""" + return self.__op_id + + @property + def database_name(self) -> str: + """The database_name this command was sent to, or ``""``. + + .. versionadded:: 4.6 + """ + return self.__db + + @property + def server_connection_id(self) -> Optional[int]: + """The server-side connection id for the connection this command was sent on, or ``None``. + + .. versionadded:: 4.7 + """ + return self.__server_conn_id + + +class CommandStartedEvent(_CommandEvent): + """Event published when a command starts. + + :param command: The command document. + :param database_name: The name of the database this command was run against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + """ + + __slots__ = ("__cmd",) + + def __init__( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + server_connection_id: Optional[int] = None, + ) -> None: + if not command: + raise ValueError(f"{command!r} is not a valid command") + # Command name must be first key. + command_name = next(iter(command)) + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): + self.__cmd: _DocumentOut = {} + else: + self.__cmd = command + + @property + def command(self) -> _DocumentOut: + """The command document.""" + return self.__cmd + + @property + def database_name(self) -> str: + """The name of the database this command was run against.""" + return super().database_name + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.service_id, + self.server_connection_id, + ) + + +class CommandSucceededEvent(_CommandEvent): + """Event published when a command succeeds. + + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + + __slots__ = ("__duration_micros", "__reply") + + def __init__( + self, + duration: datetime.timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + self.__duration_micros = _to_micros(duration) + cmd_name = command_name.lower() + if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, reply): + self.__reply: _DocumentOut = {} + else: + self.__reply = reply + + @property + def duration_micros(self) -> int: + """The duration of this operation in microseconds.""" + return self.__duration_micros + + @property + def reply(self) -> _DocumentOut: + """The server failure document for this operation.""" + return self.__reply + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.service_id, + self.server_connection_id, + ) + + +class CommandFailedEvent(_CommandEvent): + """Event published when a command fails. + + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command + was sent to. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + + __slots__ = ("__duration_micros", "__failure") + + def __init__( + self, + duration: datetime.timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + operation_id: Optional[int], + service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, + ) -> None: + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + self.__duration_micros = _to_micros(duration) + self.__failure = failure + + @property + def duration_micros(self) -> int: + """The duration of this operation in microseconds.""" + return self.__duration_micros + + @property + def failure(self) -> _DocumentOut: + """The server failure document for this operation.""" + return self.__failure + + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}, server_connection_id: {}>" + ).format( + self.__class__.__name__, + self.connection_id, + self.database_name, + self.command_name, + self.operation_id, + self.duration_micros, + self.failure, + self.service_id, + self.server_connection_id, + ) + + +class _PoolEvent: + """Base class for pool events.""" + + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: + self.__address = address + + @property + def address(self) -> _Address: + """The address (host, port) pair of the server the pool is attempting + to connect to. + """ + return self.__address + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class PoolCreatedEvent(_PoolEvent): + """Published when a Connection Pool is created. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__options",) + + def __init__(self, address: _Address, options: dict[str, Any]) -> None: + super().__init__(address) + self.__options = options + + @property + def options(self) -> dict[str, Any]: + """Any non-default pool options that were set on this Connection Pool.""" + return self.__options + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" + + +class PoolReadyEvent(_PoolEvent): + """Published when a Connection Pool is marked ready. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 4.0 + """ + + __slots__ = () + + +class PoolClearedEvent(_PoolEvent): + """Published when a Connection Pool is cleared. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + :param service_id: The service_id this command was sent to, or ``None``. + :param interrupt_connections: True if all active connections were interrupted by the Pool during clearing. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__service_id", "__interrupt_connections") + + def __init__( + self, + address: _Address, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + super().__init__(address) + self.__service_id = service_id + self.__interrupt_connections = interrupt_connections + + @property + def service_id(self) -> Optional[ObjectId]: + """Connections with this service_id are cleared. + + When service_id is ``None``, all connections in the pool are cleared. + + .. versionadded:: 3.12 + """ + return self.__service_id + + @property + def interrupt_connections(self) -> bool: + """If True, active connections are interrupted during clearing. + + .. versionadded:: 4.7 + """ + return self.__interrupt_connections + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r}, {self.__interrupt_connections!r})" + + +class PoolClosedEvent(_PoolEvent): + """Published when a Connection Pool is closed. + + :param address: The address (host, port) pair of the server this Pool is + attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionClosedReason: + """An enum that defines values for `reason` on a + :class:`ConnectionClosedEvent`. + + .. versionadded:: 3.9 + """ + + STALE = "stale" + """The pool was cleared, making the connection no longer valid.""" + + IDLE = "idle" + """The connection became stale by being idle for too long (maxIdleTimeMS). + """ + + ERROR = "error" + """The connection experienced an error, making it no longer valid.""" + + POOL_CLOSED = "poolClosed" + """The pool was closed, making the connection no longer valid.""" + + +class ConnectionCheckOutFailedReason: + """An enum that defines values for `reason` on a + :class:`ConnectionCheckOutFailedEvent`. + + .. versionadded:: 3.9 + """ + + TIMEOUT = "timeout" + """The connection check out attempt exceeded the specified timeout.""" + + POOL_CLOSED = "poolClosed" + """The pool was previously closed, and cannot provide new connections.""" + + CONN_ERROR = "connectionError" + """The connection check out attempt experienced an error while setting up + a new connection. + """ + + +class _ConnectionEvent: + """Private base class for connection events.""" + + __slots__ = ("__address",) + + def __init__(self, address: _Address) -> None: + self.__address = address + + @property + def address(self) -> _Address: + """The address (host, port) pair of the server this connection is + attempting to connect to. + """ + return self.__address + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + + @property + def connection_id(self) -> int: + """The ID of the connection.""" + return self.__connection_id + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" + + +class _ConnectionDurationEvent(_ConnectionIdEvent): + """Private base class for connection events with a duration.""" + + __slots__ = ("__duration",) + + def __init__(self, address: _Address, connection_id: int, duration: Optional[float]) -> None: + super().__init__(address, connection_id) + self.__duration = duration + + @property + def duration(self) -> Optional[float]: + """The duration of the connection event. + + .. versionadded:: 4.7 + """ + return self.__duration + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.connection_id!r}, {self.__duration!r})" + + +class ConnectionCreatedEvent(_ConnectionIdEvent): + """Published when a Connection Pool creates a Connection object. + + NOTE: This connection is not ready for use until the + :class:`ConnectionReadyEvent` is published. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionReadyEvent(_ConnectionDurationEvent): + """Published when a Connection has finished its setup, and is ready to use. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionClosedEvent(_ConnectionIdEvent): + """Published when a Connection is closed. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + :param reason: A reason explaining why this connection was closed. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__reason",) + + def __init__(self, address: _Address, connection_id: int, reason: str): + super().__init__(address, connection_id) + self.__reason = reason + + @property + def reason(self) -> str: + """A reason explaining why this connection was closed. + + The reason must be one of the strings from the + :class:`ConnectionClosedReason` enum. + """ + return self.__reason + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r})".format( + self.__class__.__name__, + self.address, + self.connection_id, + self.__reason, + ) + + +class ConnectionCheckOutStartedEvent(_ConnectionEvent): + """Published when the driver starts attempting to check out a connection. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionCheckOutFailedEvent(_ConnectionDurationEvent): + """Published when the driver's attempt to check out a connection fails. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param reason: A reason explaining why connection check out failed. + + .. versionadded:: 3.9 + """ + + __slots__ = ("__reason",) + + def __init__(self, address: _Address, reason: str, duration: Optional[float]) -> None: + super().__init__(address=address, connection_id=0, duration=duration) + self.__reason = reason + + @property + def reason(self) -> str: + """A reason explaining why connection check out failed. + + The reason must be one of the strings from the + :class:`ConnectionCheckOutFailedReason` enum. + """ + return self.__reason + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r}, {self.duration!r})" + + +class ConnectionCheckedOutEvent(_ConnectionDurationEvent): + """Published when the driver successfully checks out a connection. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class ConnectionCheckedInEvent(_ConnectionIdEvent): + """Published when the driver checks in a Connection into the Pool. + + :param address: The address (host, port) pair of the server this + Connection is attempting to connect to. + :param connection_id: The integer ID of the Connection in this Pool. + + .. versionadded:: 3.9 + """ + + __slots__ = () + + +class _ServerEvent: + """Base class for server events.""" + + __slots__ = ("__server_address", "__topology_id") + + def __init__(self, server_address: _Address, topology_id: ObjectId) -> None: + self.__server_address = server_address + self.__topology_id = topology_id + + @property + def server_address(self) -> _Address: + """The address (host, port) pair of the server""" + return self.__server_address + + @property + def topology_id(self) -> ObjectId: + """A unique identifier for the topology this server is a part of.""" + return self.__topology_id + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.server_address} topology_id: {self.topology_id}>" + + +class ServerDescriptionChangedEvent(_ServerEvent): + """Published when server description changes. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__previous_description", "__new_description") + + def __init__( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + *args: Any, + ) -> None: + super().__init__(*args) + self.__previous_description = previous_description + self.__new_description = new_description + + @property + def previous_description(self) -> ServerDescription: + """The previous + :class:`~pymongo.server_description.ServerDescription`. + """ + return self.__previous_description + + @property + def new_description(self) -> ServerDescription: + """The new + :class:`~pymongo.server_description.ServerDescription`. + """ + return self.__new_description + + def __repr__(self) -> str: + return "<{} {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.server_address, + self.previous_description, + self.new_description, + ) + + +class ServerOpeningEvent(_ServerEvent): + """Published when server is initialized. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class ServerClosedEvent(_ServerEvent): + """Published when server is closed. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class TopologyEvent: + """Base class for topology description events.""" + + __slots__ = ("__topology_id",) + + def __init__(self, topology_id: ObjectId) -> None: + self.__topology_id = topology_id + + @property + def topology_id(self) -> ObjectId: + """A unique identifier for the topology this server is a part of.""" + return self.__topology_id + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" + + +class TopologyDescriptionChangedEvent(TopologyEvent): + """Published when the topology description changes. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__previous_description", "__new_description") + + def __init__( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + *args: Any, + ) -> None: + super().__init__(*args) + self.__previous_description = previous_description + self.__new_description = new_description + + @property + def previous_description(self) -> TopologyDescription: + """The previous + :class:`~pymongo.topology_description.TopologyDescription`. + """ + return self.__previous_description + + @property + def new_description(self) -> TopologyDescription: + """The new + :class:`~pymongo.topology_description.TopologyDescription`. + """ + return self.__new_description + + def __repr__(self) -> str: + return "<{} topology_id: {} changed from: {}, to: {}>".format( + self.__class__.__name__, + self.topology_id, + self.previous_description, + self.new_description, + ) + + +class TopologyOpenedEvent(TopologyEvent): + """Published when the topology is initialized. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class TopologyClosedEvent(TopologyEvent): + """Published when the topology is closed. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class _ServerHeartbeatEvent: + """Base class for server heartbeat events.""" + + __slots__ = ("__connection_id", "__awaited") + + def __init__(self, connection_id: _Address, awaited: bool = False) -> None: + self.__connection_id = connection_id + self.__awaited = awaited + + @property + def connection_id(self) -> _Address: + """The address (host, port) of the server this heartbeat was sent + to. + """ + return self.__connection_id + + @property + def awaited(self) -> bool: + """Whether the heartbeat was issued as an awaitable hello command. + + .. versionadded:: 4.6 + """ + return self.__awaited + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.connection_id} awaited: {self.awaited}>" + + +class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): + """Published when a heartbeat is started. + + .. versionadded:: 3.3 + """ + + __slots__ = () + + +class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): + """Fired when the server heartbeat succeeds. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__duration", "__reply") + + def __init__( + self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) + self.__duration = duration + self.__reply = reply + + @property + def duration(self) -> float: + """The duration of this heartbeat in microseconds.""" + return self.__duration + + @property + def reply(self) -> Hello: + """An instance of :class:`~pymongo.hello.Hello`.""" + return self.__reply + + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + + +class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): + """Fired when the server heartbeat fails, either with an "ok: 0" + or a socket exception. + + .. versionadded:: 3.3 + """ + + __slots__ = ("__duration", "__reply") + + def __init__( + self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False + ) -> None: + super().__init__(connection_id, awaited) + self.__duration = duration + self.__reply = reply + + @property + def duration(self) -> float: + """The duration of this heartbeat in microseconds.""" + return self.__duration + + @property + def reply(self) -> Exception: + """A subclass of :exc:`Exception`.""" + return self.__reply + + @property + def awaited(self) -> bool: + """Whether the heartbeat was awaited. + + If true, then :meth:`duration` reflects the sum of the round trip time + to the server and the time that the server waited before sending a + response. + + .. versionadded:: 3.11 + """ + return super().awaited + + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( + self.__class__.__name__, + self.connection_id, + self.duration, + self.awaited, + self.reply, + ) + + +class _EventListeners: + """Configure event listeners for a client instance. + + Any event listeners registered globally are included by default. + + :param listeners: A list of event listeners. + """ + + def __init__(self, listeners: Optional[Sequence[_EventListener]]): + self.__command_listeners = _LISTENERS.command_listeners[:] + self.__server_listeners = _LISTENERS.server_listeners[:] + lst = _LISTENERS.server_heartbeat_listeners + self.__server_heartbeat_listeners = lst[:] + self.__topology_listeners = _LISTENERS.topology_listeners[:] + self.__cmap_listeners = _LISTENERS.cmap_listeners[:] + if listeners is not None: + for lst in listeners: + if isinstance(lst, CommandListener): + self.__command_listeners.append(lst) + if isinstance(lst, ServerListener): + self.__server_listeners.append(lst) + if isinstance(lst, ServerHeartbeatListener): + self.__server_heartbeat_listeners.append(lst) + if isinstance(lst, TopologyListener): + self.__topology_listeners.append(lst) + if isinstance(lst, ConnectionPoolListener): + self.__cmap_listeners.append(lst) + self.__enabled_for_commands = bool(self.__command_listeners) + self.__enabled_for_server = bool(self.__server_listeners) + self.__enabled_for_server_heartbeat = bool(self.__server_heartbeat_listeners) + self.__enabled_for_topology = bool(self.__topology_listeners) + self.__enabled_for_cmap = bool(self.__cmap_listeners) + + @property + def enabled_for_commands(self) -> bool: + """Are any CommandListener instances registered?""" + return self.__enabled_for_commands + + @property + def enabled_for_server(self) -> bool: + """Are any ServerListener instances registered?""" + return self.__enabled_for_server + + @property + def enabled_for_server_heartbeat(self) -> bool: + """Are any ServerHeartbeatListener instances registered?""" + return self.__enabled_for_server_heartbeat + + @property + def enabled_for_topology(self) -> bool: + """Are any TopologyListener instances registered?""" + return self.__enabled_for_topology + + @property + def enabled_for_cmap(self) -> bool: + """Are any ConnectionPoolListener instances registered?""" + return self.__enabled_for_cmap + + def event_listeners(self) -> list[_EventListeners]: + """List of registered event listeners.""" + return ( + self.__command_listeners + + self.__server_heartbeat_listeners + + self.__server_listeners + + self.__topology_listeners + + self.__cmap_listeners + ) + + def publish_command_start( + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + ) -> None: + """Publish a CommandStartedEvent to all command listeners. + + :param command: The command document. + :param database_name: The name of the database this command was run + against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + """ + if op_id is None: + op_id = request_id + event = CommandStartedEvent( + command, + database_name, + request_id, + connection_id, + op_id, + service_id=service_id, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.started(event) + except Exception: + _handle_exception() + + def publish_command_success( + self, + duration: timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + speculative_hello: bool = False, + database_name: str = "", + ) -> None: + """Publish a CommandSucceededEvent to all command listeners. + + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param speculative_hello: Was the command sent with speculative auth? + :param database_name: The database this command was sent to, or ``""``. + """ + if op_id is None: + op_id = request_id + if speculative_hello: + # Redact entire response when the command started contained + # speculativeAuthenticate. + reply = {} + event = CommandSucceededEvent( + duration, + reply, + command_name, + request_id, + connection_id, + op_id, + service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.succeeded(event) + except Exception: + _handle_exception() + + def publish_command_failure( + self, + duration: timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: + """Publish a CommandFailedEvent to all command listeners. + + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document or failure description + document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this + command was sent to. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. + """ + if op_id is None: + op_id = request_id + event = CommandFailedEvent( + duration, + failure, + command_name, + request_id, + connection_id, + op_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, + ) + for subscriber in self.__command_listeners: + try: + subscriber.failed(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_started(self, connection_id: _Address, awaited: bool) -> None: + """Publish a ServerHeartbeatStartedEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param awaited: True if this heartbeat is part of an awaitable hello command. + """ + event = ServerHeartbeatStartedEvent(connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.started(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_succeeded( + self, connection_id: _Address, duration: float, reply: Hello, awaited: bool + ) -> None: + """Publish a ServerHeartbeatSucceededEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible + resolution for the platform. + :param reply: The command reply. + :param awaited: True if the response was awaited. + """ + event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.succeeded(event) + except Exception: + _handle_exception() + + def publish_server_heartbeat_failed( + self, connection_id: _Address, duration: float, reply: Exception, awaited: bool + ) -> None: + """Publish a ServerHeartbeatFailedEvent to all server heartbeat + listeners. + + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible + resolution for the platform. + :param reply: The command reply. + :param awaited: True if the response was awaited. + """ + event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) + for subscriber in self.__server_heartbeat_listeners: + try: + subscriber.failed(event) + except Exception: + _handle_exception() + + def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: + """Publish a ServerOpeningEvent to all server listeners. + + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerOpeningEvent(server_address, topology_id) + for subscriber in self.__server_listeners: + try: + subscriber.opened(event) + except Exception: + _handle_exception() + + def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: + """Publish a ServerClosedEvent to all server listeners. + + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerClosedEvent(server_address, topology_id) + for subscriber in self.__server_listeners: + try: + subscriber.closed(event) + except Exception: + _handle_exception() + + def publish_server_description_changed( + self, + previous_description: ServerDescription, + new_description: ServerDescription, + server_address: _Address, + topology_id: ObjectId, + ) -> None: + """Publish a ServerDescriptionChangedEvent to all server listeners. + + :param previous_description: The previous server description. + :param server_address: The address (host, port) pair of the server. + :param new_description: The new server description. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = ServerDescriptionChangedEvent( + previous_description, new_description, server_address, topology_id + ) + for subscriber in self.__server_listeners: + try: + subscriber.description_changed(event) + except Exception: + _handle_exception() + + def publish_topology_opened(self, topology_id: ObjectId) -> None: + """Publish a TopologyOpenedEvent to all topology listeners. + + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyOpenedEvent(topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.opened(event) + except Exception: + _handle_exception() + + def publish_topology_closed(self, topology_id: ObjectId) -> None: + """Publish a TopologyClosedEvent to all topology listeners. + + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyClosedEvent(topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.closed(event) + except Exception: + _handle_exception() + + def publish_topology_description_changed( + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + topology_id: ObjectId, + ) -> None: + """Publish a TopologyDescriptionChangedEvent to all topology listeners. + + :param previous_description: The previous topology description. + :param new_description: The new topology description. + :param topology_id: A unique identifier for the topology this server + is a part of. + """ + event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) + for subscriber in self.__topology_listeners: + try: + subscriber.description_changed(event) + except Exception: + _handle_exception() + + def publish_pool_created(self, address: _Address, options: dict[str, Any]) -> None: + """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" + event = PoolCreatedEvent(address, options) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_created(event) + except Exception: + _handle_exception() + + def publish_pool_ready(self, address: _Address) -> None: + """Publish a :class:`PoolReadyEvent` to all pool listeners.""" + event = PoolReadyEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_ready(event) + except Exception: + _handle_exception() + + def publish_pool_cleared( + self, + address: _Address, + service_id: Optional[ObjectId], + interrupt_connections: bool = False, + ) -> None: + """Publish a :class:`PoolClearedEvent` to all pool listeners.""" + event = PoolClearedEvent(address, service_id, interrupt_connections) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_cleared(event) + except Exception: + _handle_exception() + + def publish_pool_closed(self, address: _Address) -> None: + """Publish a :class:`PoolClosedEvent` to all pool listeners.""" + event = PoolClosedEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.pool_closed(event) + except Exception: + _handle_exception() + + def publish_connection_created(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionCreatedEvent` to all connection + listeners. + """ + event = ConnectionCreatedEvent(address, connection_id) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_created(event) + except Exception: + _handle_exception() + + def publish_connection_ready( + self, address: _Address, connection_id: int, duration: float + ) -> None: + """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" + event = ConnectionReadyEvent(address, connection_id, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_ready(event) + except Exception: + _handle_exception() + + def publish_connection_closed(self, address: _Address, connection_id: int, reason: str) -> None: + """Publish a :class:`ConnectionClosedEvent` to all connection + listeners. + """ + event = ConnectionClosedEvent(address, connection_id, reason) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_closed(event) + except Exception: + _handle_exception() + + def publish_connection_check_out_started(self, address: _Address) -> None: + """Publish a :class:`ConnectionCheckOutStartedEvent` to all connection + listeners. + """ + event = ConnectionCheckOutStartedEvent(address) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_check_out_started(event) + except Exception: + _handle_exception() + + def publish_connection_check_out_failed( + self, address: _Address, reason: str, duration: float + ) -> None: + """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection + listeners. + """ + event = ConnectionCheckOutFailedEvent(address, reason, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_check_out_failed(event) + except Exception: + _handle_exception() + + def publish_connection_checked_out( + self, address: _Address, connection_id: int, duration: float + ) -> None: + """Publish a :class:`ConnectionCheckedOutEvent` to all connection + listeners. + """ + event = ConnectionCheckedOutEvent(address, connection_id, duration) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_checked_out(event) + except Exception: + _handle_exception() + + def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: + """Publish a :class:`ConnectionCheckedInEvent` to all connection + listeners. + """ + event = ConnectionCheckedInEvent(address, connection_id) + for subscriber in self.__cmap_listeners: + try: + subscriber.connection_checked_in(event) + except Exception: + _handle_exception() diff --git a/pymongo/network.py b/pymongo/synchronous/network.py similarity index 88% rename from pymongo/network.py rename to pymongo/synchronous/network.py index 76afbe135d..3f5319fd32 100644 --- a/pymongo/network.py +++ b/pymongo/synchronous/network.py @@ -19,7 +19,6 @@ import errno import logging import socket -import struct import time from typing import ( TYPE_CHECKING, @@ -33,33 +32,42 @@ ) from bson import _decode_all_selective -from pymongo import _csot, helpers, message, ssl_support -from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import _NO_COMPRESSION, decompress +from pymongo import _csot from pymongo.errors import ( NotPrimaryError, OperationFailure, ProtocolError, _OperationCancelled, ) -from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log -from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply -from pymongo.monitoring import _is_speculative_authenticate +from pymongo.network_layer import ( + _POLL_TIMEOUT, + _UNPACK_COMPRESSION_HEADER, + _UNPACK_HEADER, + BLOCKING_IO_ERRORS, + sendall, +) from pymongo.socket_checker import _errno_from_exception +from pymongo.synchronous import helpers as _async_helpers +from pymongo.synchronous import message as _async_message +from pymongo.synchronous.common import MAX_MESSAGE_SIZE +from pymongo.synchronous.compression_support import _NO_COMPRESSION, decompress +from pymongo.synchronous.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.synchronous.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.synchronous.monitoring import _is_speculative_authenticate if TYPE_CHECKING: from bson import CodecOptions - from pymongo.client_session import ClientSession - from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext - from pymongo.mongo_client import MongoClient - from pymongo.monitoring import _EventListeners - from pymongo.pool import Connection from pymongo.read_concern import ReadConcern - from pymongo.read_preferences import _ServerMode - from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.monitoring import _EventListeners + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.read_preferences import _ServerMode + from pymongo.synchronous.typings import _Address, _CollationIn, _DocumentOut, _DocumentType from pymongo.write_concern import WriteConcern -_UNPACK_HEADER = struct.Struct(" max_bson_size: - message._raise_document_too_large(name, size, max_bson_size) + _async_message._raise_document_too_large(name, size, max_bson_size) else: - request_id, msg, size = message._query( + request_id, msg, size = _async_message._query( 0, ns, 0, -1, spec, None, codec_options, compression_ctx ) - if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: - message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) + if max_bson_size is not None and size > max_bson_size + _async_message._COMMAND_OVERHEAD: + _async_message._raise_document_too_large( + name, size, max_bson_size + _async_message._COMMAND_OVERHEAD + ) if client is not None: if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( @@ -193,7 +203,7 @@ def command( ) try: - conn.conn.sendall(msg) + sendall(conn.conn, msg) if use_op_msg and unacknowledged: # Unacknowledged, fake a successful command response. reply = None @@ -209,7 +219,7 @@ def command( if client: client._process_response(response_doc, session) if check: - helpers._check_command_response( + _async_helpers._check_command_response( response_doc, conn.max_wire_version, allowable_errors, @@ -220,7 +230,7 @@ def command( if isinstance(exc, (NotPrimaryError, OperationFailure)): failure: _DocumentOut = exc.details # type: ignore[assignment] else: - failure = message._convert_exception(exc) + failure = _async_message._convert_exception(exc) if client is not None: if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( @@ -298,9 +308,6 @@ def command( return response_doc # type: ignore[return-value] -_UNPACK_COMPRESSION_HEADER = struct.Struct(" Union[_OpReply, _OpMsg]: @@ -345,9 +352,6 @@ def receive_message( return unpack_reply(data) -_POLL_TIMEOUT = 0.5 - - def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: """Block until at least one byte is read, or a timeout, or a cancel.""" sock = conn.conn @@ -381,10 +385,6 @@ def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: raise socket.timeout("timed out") -# Errors raised by sockets (and TLS sockets) when in non-blocking mode. -BLOCKING_IO_ERRORS = (BlockingIOError, *ssl_support.BLOCKING_IO_ERRORS) - - def _receive_data_on_socket(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: buf = bytearray(length) mv = memoryview(buf) diff --git a/pymongo/synchronous/operations.py b/pymongo/synchronous/operations.py new file mode 100644 index 0000000000..148f84a42c --- /dev/null +++ b/pymongo/synchronous/operations.py @@ -0,0 +1,625 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Operation class definitions.""" +from __future__ import annotations + +import enum +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +from bson.raw_bson import RawBSONDocument +from pymongo.synchronous import helpers +from pymongo.synchronous.collation import validate_collation_or_none +from pymongo.synchronous.common import validate_is_mapping, validate_list +from pymongo.synchronous.helpers import _gen_index_name, _index_document, _index_list +from pymongo.synchronous.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from pymongo.synchronous.bulk import _Bulk + +_IS_SYNC = True + +# Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary +_IndexList = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_IndexKeyHint = Union[str, _IndexList] + + +class _Op(str, enum.Enum): + ABORT = "abortTransaction" + AGGREGATE = "aggregate" + COMMIT = "commitTransaction" + COUNT = "count" + CREATE = "create" + CREATE_INDEXES = "createIndexes" + CREATE_SEARCH_INDEXES = "createSearchIndexes" + DELETE = "delete" + DISTINCT = "distinct" + DROP = "drop" + DROP_DATABASE = "dropDatabase" + DROP_INDEXES = "dropIndexes" + DROP_SEARCH_INDEXES = "dropSearchIndexes" + END_SESSIONS = "endSessions" + FIND_AND_MODIFY = "findAndModify" + FIND = "find" + INSERT = "insert" + LIST_COLLECTIONS = "listCollections" + LIST_INDEXES = "listIndexes" + LIST_SEARCH_INDEX = "listSearchIndexes" + LIST_DATABASES = "listDatabases" + UPDATE = "update" + UPDATE_INDEX = "updateIndex" + UPDATE_SEARCH_INDEX = "updateSearchIndex" + RENAME = "rename" + GETMORE = "getMore" + KILL_CURSORS = "killCursors" + TEST = "testOperation" + + +class InsertOne(Generic[_DocumentType]): + """Represents an insert_one operation.""" + + __slots__ = ("_doc",) + + def __init__(self, document: _DocumentType) -> None: + """Create an InsertOne instance. + + For use with :meth:`~pymongo.collection.Collection.bulk_write`. + + :param document: The document to insert. If the document is missing an + _id field one will be added. + """ + self._doc = document + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_insert(self._doc) # type: ignore[arg-type] + + def __repr__(self) -> str: + return f"InsertOne({self._doc!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return other._doc == self._doc + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class DeleteOne: + """Represents a delete_one operation.""" + + __slots__ = ("_filter", "_collation", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create a DeleteOne instance. + + For use with :meth:`~pymongo.collection.Collection.bulk_write`. + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._collation = collation + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __repr__(self) -> str: + return f"DeleteOne({self._filter!r}, {self._collation!r}, {self._hint!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return (other._filter, other._collation, other._hint) == ( + self._filter, + self._collation, + self._hint, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class DeleteMany: + """Represents a delete_many operation.""" + + __slots__ = ("_filter", "_collation", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create a DeleteMany instance. + + For use with :meth:`~pymongo.collection.Collection.bulk_write`. + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._collation = collation + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __repr__(self) -> str: + return f"DeleteMany({self._filter!r}, {self._collation!r}, {self._hint!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return (other._filter, other._collation, other._hint) == ( + self._filter, + self._collation, + self._hint, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class ReplaceOne(Generic[_DocumentType]): + """Represents a replace_one operation.""" + + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + replacement: Union[_DocumentType, RawBSONDocument], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create a ReplaceOne instance. + + For use with :meth:`~pymongo.collection.Collection.bulk_write`. + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.5 + Added the ``collation`` option. + """ + if filter is not None: + validate_is_mapping("filter", filter) + if upsert is not None: + validate_boolean("upsert", upsert) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + self._filter = filter + self._doc = replacement + self._upsert = upsert + self._collation = collation + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_replace( + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + other._hint, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + ) + + +class _UpdateOp: + """Private base class for update operations.""" + + __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") + + def __init__( + self, + filter: Mapping[str, Any], + doc: Union[Mapping[str, Any], _Pipeline], + upsert: bool, + collation: Optional[_CollationIn], + array_filters: Optional[list[Mapping[str, Any]]], + hint: Optional[_IndexKeyHint], + ): + if filter is not None: + validate_is_mapping("filter", filter) + if upsert is not None: + validate_boolean("upsert", upsert) + if array_filters is not None: + validate_list("array_filters", array_filters) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers._index_document(hint) + else: + self._hint = hint + + self._filter = filter + self._doc = doc + self._upsert = upsert + self._collation = collation + self._array_filters = array_filters + + def __eq__(self, other: object) -> bool: + if isinstance(other, type(self)): + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._array_filters, + other._hint, + ) == ( + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) + return NotImplemented + + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + ) + + +class UpdateOne(_UpdateOp): + """Represents an update_one operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Represents an update_one operation. + + For use with :meth:`~pymongo.collection.Collection.bulk_write`. + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added the `array_filters` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, update, upsert, collation, array_filters, hint) + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_update( + self._filter, + self._doc, + False, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + +class UpdateMany(_UpdateOp): + """Represents an update_many operation.""" + + __slots__ = () + + def __init__( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + collation: Optional[_CollationIn] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + ) -> None: + """Create an UpdateMany instance. + + For use with :meth:`~pymongo.collection.Collection.bulk_write`. + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + + .. versionchanged:: 3.11 + Added the `hint` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added the `array_filters` option. + .. versionchanged:: 3.5 + Added the `collation` option. + """ + super().__init__(filter, update, upsert, collation, array_filters, hint) + + def _add_to_bulk(self, bulkobj: _Bulk) -> None: + """Add this operation to the _Bulk instance `bulkobj`.""" + bulkobj.add_update( + self._filter, + self._doc, + True, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + +class IndexModel: + """Represents an index to create.""" + + __slots__ = ("__document",) + + def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: + """Create an Index instance. + + For use with :meth:`~pymongo.collection.Collection.create_indexes`. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str`, and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation`: An instance of :class:`~pymongo.collation.Collation` + that specifies the collation to use. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the { "$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + :param keys: a single key or a list containing (key, direction) pairs + or keys specifying the index to create. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.2 + Added the ``partialFilterExpression`` option to support partial + indexes. + + .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ + """ + keys = _index_list(keys) + if kwargs.get("name") is None: + kwargs["name"] = _gen_index_name(keys) + kwargs["key"] = _index_document(keys) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + self.__document = kwargs + if collation is not None: + self.__document["collation"] = collation + + @property + def document(self) -> dict[str, Any]: + """An index document suitable for passing to the createIndexes + command. + """ + return self.__document + + +class SearchIndexModel: + """Represents a search index to create.""" + + __slots__ = ("__document",) + + def __init__( + self, + definition: Mapping[str, Any], + name: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a Search Index instance. + + For use with :meth:`~pymongo.collection.Collection.create_search_index` and :meth:`~pymongo.collection.Collection.create_search_indexes`. + + :param definition: The definition for this index. + :param name: The name for this index, if present. + :param type: The type for this index which defaults to "search". Alternative values include "vectorSearch". + :param kwargs: Keyword arguments supplying any additional options. + + .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. + .. versionadded:: 4.5 + .. versionchanged:: 4.7 + Added the type and kwargs arguments. + """ + self.__document: dict[str, Any] = {} + if name is not None: + self.__document["name"] = name + self.__document["definition"] = definition + if type is not None: + self.__document["type"] = type + self.__document.update(kwargs) + + @property + def document(self) -> Mapping[str, Any]: + """The document for this index.""" + return self.__document diff --git a/pymongo/periodic_executor.py b/pymongo/synchronous/periodic_executor.py similarity index 92% rename from pymongo/periodic_executor.py rename to pymongo/synchronous/periodic_executor.py index 9e9ead61fc..43125016bc 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/synchronous/periodic_executor.py @@ -16,24 +16,27 @@ from __future__ import annotations +import asyncio import sys import threading import time import weakref -from typing import Any, Callable, Optional +from typing import Any, Optional from pymongo.lock import _create_lock +_IS_SYNC = True + class PeriodicExecutor: def __init__( self, interval: float, min_interval: float, - target: Callable[[], bool], + target: Any, name: Optional[str] = None, ): - """ "Run a target function periodically on a background thread. + """Run a target function periodically on a background thread. If the target's return value is false, the executor stops. @@ -61,6 +64,9 @@ def __init__( def __repr__(self) -> str: return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + def _run_async(self) -> None: + asyncio.run(self._run()) # type: ignore[func-returns-value] + def open(self) -> None: """Start. Multiple calls have no effect. @@ -88,7 +94,10 @@ def open(self) -> None: pass if not started: - thread = threading.Thread(target=self._run, name=self._name) + if _IS_SYNC: + thread = threading.Thread(target=self._run, name=self._name) + else: + thread = threading.Thread(target=self._run_async, name=self._name) thread.daemon = True self._thread = weakref.proxy(thread) _register_executor(self) @@ -128,7 +137,7 @@ def update_interval(self, new_interval: int) -> None: def skip_sleep(self) -> None: self._skip_sleep = True - def __should_stop(self) -> bool: + def _should_stop(self) -> bool: with self._lock: if self._stopped: self._thread_will_exit = True @@ -136,7 +145,7 @@ def __should_stop(self) -> bool: return False def _run(self) -> None: - while not self.__should_stop(): + while not self._should_stop(): try: if not self._target(): self._stopped = True diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py new file mode 100644 index 0000000000..391db4e7a7 --- /dev/null +++ b/pymongo/synchronous/pool.py @@ -0,0 +1,2122 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +from __future__ import annotations + +import collections +import contextlib +import copy +import logging +import os +import platform +import socket +import ssl +import sys +import threading +import time +import weakref +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Generator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +import bson +from bson import DEFAULT_CODEC_OPTIONS +from pymongo import __version__, _csot +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConfigurationError, + ConnectionFailure, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, + _CertificateError, +) +from pymongo.lock import _create_lock +from pymongo.network_layer import sendall +from pymongo.server_api import _add_to_command +from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker +from pymongo.ssl_support import HAS_SNI, SSLError +from pymongo.synchronous import helpers +from pymongo.synchronous.client_session import _validate_session_write_concern +from pymongo.synchronous.common import ( + MAX_BSON_SIZE, + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_MESSAGE_SIZE, + MAX_POOL_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + MIN_POOL_SIZE, + ORDERED_TYPES, + WAIT_QUEUE_TIMEOUT, +) +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) +from pymongo.synchronous.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, + _EventListeners, +) +from pymongo.synchronous.network import command, receive_message +from pymongo.synchronous.read_preferences import ReadPreference + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.driver_info import DriverInfo + from pymongo.pyopenssl_context import SSLContext, _sslConn + from pymongo.read_concern import ReadConcern + from pymongo.server_api import ServerApi + from pymongo.synchronous.auth import MongoCredential, _AuthContext + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.compression_support import ( + CompressionSettings, + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.synchronous.message import _OpMsg, _OpReply + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.synchronous.read_preferences import _ServerMode + from pymongo.synchronous.typings import ClusterTime, _Address, _CollationIn + from pymongo.write_concern import WriteConcern + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_IS_SYNC = True + +_MAX_TCP_KEEPIDLE = 120 +_MAX_TCP_KEEPINTVL = 10 +_MAX_TCP_KEEPCNT = 9 + +if sys.platform == "win32": + try: + import _winreg as winreg + except ImportError: + import winreg + + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + + try: + with winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + +else: + + def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: + if hasattr(socket, tcp_option): + sockopt = getattr(socket, tcp_option) + try: + # PYTHON-1350 - NetBSD doesn't implement getsockopt for + # TCP_KEEPIDLE and friends. Don't attempt to set the + # values there. + default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) + if default > max_value: + sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) + except OSError: + pass + + def _set_keepalive_times(sock: socket.socket) -> None: + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) + + +_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} + +if sys.platform.startswith("linux"): + # platform.linux_distribution was deprecated in Python 3.5 + # and removed in Python 3.8. Starting in Python 3.5 it + # raises DeprecationWarning + # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 + _name = platform.system() + _METADATA["os"] = { + "type": _name, + "name": _name, + "architecture": platform.machine(), + # Kernel version (e.g. 4.4.0-17-generic). + "version": platform.release(), + } +elif sys.platform == "darwin": + _METADATA["os"] = { + "type": platform.system(), + "name": platform.system(), + "architecture": platform.machine(), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + "version": platform.mac_ver()[0], + } +elif sys.platform == "win32": + _METADATA["os"] = { + "type": platform.system(), + # "Windows XP", "Windows 7", "Windows 10", etc. + "name": " ".join((platform.system(), platform.release())), + "architecture": platform.machine(), + # Windows patch level (e.g. 5.1.2600-SP3) + "version": "-".join(platform.win32_ver()[1:3]), + } +elif sys.platform.startswith("java"): + _name, _ver, _arch = platform.java_ver()[-1] + _METADATA["os"] = { + # Linux, Windows 7, Mac OS X, etc. + "type": _name, + "name": _name, + # x86, x86_64, AMD64, etc. + "architecture": _arch, + # Linux kernel version, OSX version, etc. + "version": _ver, + } +else: + # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = { + "type": platform.system(), + "name": " ".join([part for part in _aliased[:2] if part]), + "architecture": platform.machine(), + "version": _aliased[2], + } + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) +else: + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) + +DOCKER_ENV_PATH = "/.dockerenv" +ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" + +RUNTIME_NAME_DOCKER = "docker" +ORCHESTRATOR_NAME_K8S = "kubernetes" + + +def get_container_env_info() -> dict[str, str]: + """Returns the runtime and orchestrator of a container. + If neither value is present, the metadata client.env.container field will be omitted.""" + container = {} + + if Path(DOCKER_ENV_PATH).exists(): + container["runtime"] = RUNTIME_NAME_DOCKER + if os.getenv(ENV_VAR_K8S): + container["orchestrator"] = ORCHESTRATOR_NAME_K8S + + return container + + +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _is_faas() -> bool: + return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> dict[str, Any]: + env: dict[str, Any] = {} + container = get_container_env_info() + if container: + env["container"] = container + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations +def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) + + +# If the first getaddrinfo call of this interpreter's life is on a thread, +# while the main thread holds the import lock, getaddrinfo deadlocks trying +# to import the IDNA codec. Import it here, where presumably we're on the +# main thread, to avoid the deadlock. See PYTHON-607. +"foo".encode("idna") + + +def _raise_connection_failure( + address: Any, + error: Exception, + msg_prefix: Optional[str] = None, + timeout_details: Optional[dict[str, float]] = None, +) -> NoReturn: + """Convert a socket.error to ConnectionFailure and raise it.""" + host, port = address + # If connecting to a Unix socket, port will be None. + if port is not None: + msg = "%s:%d: %s" % (host, port, error) + else: + msg = f"{host}: {error}" + if msg_prefix: + msg = msg_prefix + msg + if "configured timeouts" not in msg: + msg += format_timeout_details(timeout_details) + if isinstance(error, socket.timeout): + raise NetworkTimeout(msg) from error + elif isinstance(error, SSLError) and "timed out" in str(error): + # Eventlet does not distinguish TLS network timeouts from other + # SSLErrors (https://github.com/eventlet/eventlet/issues/692). + # Luckily, we can work around this limitation because the phrase + # 'timed out' appears in all the timeout related SSLErrors raised. + raise NetworkTimeout(msg) from error + else: + raise AutoReconnect(msg) from error + + +def _cond_wait(condition: threading.Condition, deadline: Optional[float]) -> bool: + timeout = deadline - time.monotonic() if deadline else None + return condition.wait(timeout) + + +def _get_timeout_details(options: PoolOptions) -> dict[str, float]: + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details + + +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result + + +class PoolOptions: + """Read only connection pool options for a MongoClient. + + Should not be instantiated directly by application developers. Access + a client's pool options via + :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: + + pool_opts = client.options.pool_options + pool_opts.max_pool_size + pool_opts.min_pool_size + + """ + + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size: int = MAX_POOL_SIZE, + min_pool_size: int = MIN_POOL_SIZE, + max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, + connect_timeout: Optional[float] = None, + socket_timeout: Optional[float] = None, + wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, + ssl_context: Optional[SSLContext] = None, + tls_allow_invalid_hostnames: bool = False, + event_listeners: Optional[_EventListeners] = None, + appname: Optional[str] = None, + driver: Optional[DriverInfo] = None, + compression_settings: Optional[CompressionSettings] = None, + max_connecting: int = MAX_CONNECTING, + pause_enabled: bool = True, + server_api: Optional[ServerApi] = None, + load_balanced: Optional[bool] = None, + credentials: Optional[MongoCredential] = None, + ): + self.__max_pool_size = max_pool_size + self.__min_pool_size = min_pool_size + self.__max_idle_time_seconds = max_idle_time_seconds + self.__connect_timeout = connect_timeout + self.__socket_timeout = socket_timeout + self.__wait_queue_timeout = wait_queue_timeout + self.__ssl_context = ssl_context + self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames + self.__event_listeners = event_listeners + self.__appname = appname + self.__driver = driver + self.__compression_settings = compression_settings + self.__max_connecting = max_connecting + self.__pause_enabled = pause_enabled + self.__server_api = server_api + self.__load_balanced = load_balanced + self.__credentials = credentials + self.__metadata = copy.deepcopy(_METADATA) + if appname: + self.__metadata["application"] = {"name": appname} + + # Combine the "driver" MongoClient option with PyMongo's info, like: + # { + # 'driver': { + # 'name': 'PyMongo|MyDriver', + # 'version': '4.2.0|1.2.3', + # }, + # 'platform': 'CPython 3.8.0|MyPlatform' + # } + if driver: + if driver.name: + self.__metadata["driver"]["name"] = "{}|{}".format( + _METADATA["driver"]["name"], + driver.name, + ) + if driver.version: + self.__metadata["driver"]["version"] = "{}|{}".format( + _METADATA["driver"]["version"], + driver.version, + ) + if driver.platform: + self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) + + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) + + @property + def _credentials(self) -> Optional[MongoCredential]: + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + + @property + def non_default_options(self) -> dict[str, Any]: + """The non-default options this pool was created with. + + Added for CMAP's :class:`PoolCreatedEvent`. + """ + opts = {} + if self.__max_pool_size != MAX_POOL_SIZE: + opts["maxPoolSize"] = self.__max_pool_size + if self.__min_pool_size != MIN_POOL_SIZE: + opts["minPoolSize"] = self.__min_pool_size + if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: + assert self.__max_idle_time_seconds is not None + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 + if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: + assert self.__wait_queue_timeout is not None + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 + if self.__max_connecting != MAX_CONNECTING: + opts["maxConnecting"] = self.__max_connecting + return opts + + @property + def max_pool_size(self) -> float: + """The maximum allowable number of concurrent connections to each + connected server. Requests to a server will block if there are + `maxPoolSize` outstanding connections to the requested server. + Defaults to 100. Cannot be 0. + + When a server's pool has reached `max_pool_size`, operations for that + server block waiting for a socket to be returned to the pool. If + ``waitQueueTimeoutMS`` is set, a blocked operation will raise + :exc:`~pymongo.errors.ConnectionFailure` after a timeout. + By default ``waitQueueTimeoutMS`` is not set. + """ + return self.__max_pool_size + + @property + def min_pool_size(self) -> int: + """The minimum required number of concurrent connections that the pool + will maintain to each connected server. Default is 0. + """ + return self.__min_pool_size + + @property + def max_connecting(self) -> int: + """The maximum number of concurrent connection creation attempts per + pool. Defaults to 2. + """ + return self.__max_connecting + + @property + def pause_enabled(self) -> bool: + return self.__pause_enabled + + @property + def max_idle_time_seconds(self) -> Optional[int]: + """The maximum number of seconds that a connection can remain + idle in the pool before being removed and replaced. Defaults to + `None` (no limit). + """ + return self.__max_idle_time_seconds + + @property + def connect_timeout(self) -> Optional[float]: + """How long a connection can take to be opened before timing out.""" + return self.__connect_timeout + + @property + def socket_timeout(self) -> Optional[float]: + """How long a send or receive on a socket can take before timing out.""" + return self.__socket_timeout + + @property + def wait_queue_timeout(self) -> Optional[int]: + """How long a thread will wait for a socket from the pool if the pool + has no free sockets. + """ + return self.__wait_queue_timeout + + @property + def _ssl_context(self) -> Optional[SSLContext]: + """An SSLContext instance or None.""" + return self.__ssl_context + + @property + def tls_allow_invalid_hostnames(self) -> bool: + """If True skip ssl.match_hostname.""" + return self.__tls_allow_invalid_hostnames + + @property + def _event_listeners(self) -> Optional[_EventListeners]: + """An instance of pymongo.monitoring._EventListeners.""" + return self.__event_listeners + + @property + def appname(self) -> Optional[str]: + """The application name, for sending with hello in server handshake.""" + return self.__appname + + @property + def driver(self) -> Optional[DriverInfo]: + """Driver name and version, for sending with hello in handshake.""" + return self.__driver + + @property + def _compression_settings(self) -> Optional[CompressionSettings]: + return self.__compression_settings + + @property + def metadata(self) -> dict[str, Any]: + """A dict of metadata about the application, driver, os, and platform.""" + return self.__metadata.copy() + + @property + def server_api(self) -> Optional[ServerApi]: + """A pymongo.server_api.ServerApi or None.""" + return self.__server_api + + @property + def load_balanced(self) -> Optional[bool]: + """True if this Pool is configured in load balanced mode.""" + return self.__load_balanced + + +class _CancellationContext: + def __init__(self) -> None: + self._cancelled = False + + def cancel(self) -> None: + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self) -> bool: + """Was cancel called?""" + return self._cancelled + + +class Connection: + """Store a connection with some metadata. + + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool + """ + + def __init__( + self, conn: Union[socket.socket, _sslConn], pool: Pool, address: tuple[str, int], id: int + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn + self.address = address + self.id = id + self.closed = False + self.last_checkin_time = time.monotonic() + self.performed_handshake = False + self.is_writable: bool = False + self.max_wire_version = MAX_WIRE_VERSION + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.supports_sessions = False + self.hello_ok: bool = False + self.is_mongos = False + self.op_msg_enabled = False + self.listeners = pool.opts._event_listeners + self.enabled_for_cmap = pool.enabled_for_cmap + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() + self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.settimeout(timeout) + + def apply_timeout( + self, client: MongoClient, cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + def unpin(self) -> None: + pool = self.pool_ref() + if pool: + pool.checkin(self) + else: + self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> dict[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return {HelloCompat.CMD: 1} + else: + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} + + def hello(self) -> Hello: + return self._hello(None, None, None) + + def _hello( + self, + cluster_time: Optional[ClusterTime], + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata + if self.compression_settings: + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + if not performing_handshake and cluster_time is not None: + cmd["$clusterTime"] = cluster_time + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.synchronous import auth + + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) + self.compression_context = ctx + + self.op_msg_enabled = True + self.server_connection_id = hello.connection_id + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + def _next_reply(self) -> dict[str, Any]: + reply = self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + client: Optional[MongoClient] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: + """Execute a command or raise an error. + + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the + ``writeConcernError`` field in the command response. + :param collation: The collation for this command. + :param session: optional ClientSession instance. + :param client: optional MongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.validate_session(client, session) + session = _validate_session_write_concern(session, write_concern) + + # Ensure command name remains in first place. + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] + spec = dict(spec) + + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + + self.add_server_api(spec) + if session: + session._apply_to(spec, retryable_write, read_preference, self) + self.send_cluster_time(spec, session, client) + listeners = self.listeners if publish_events else None + unacknowledged = bool(write_concern and not write_concern.acknowledged) + if self.op_msg_enabled: + self._raise_if_not_writable(unacknowledged) + try: + return command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): + raise + # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. + except BaseException as error: + self._raise_connection_failure(error) + + def send_message(self, message: bytes, max_doc_size: int) -> None: + """Send a raw BSON message or raise ConnectionFailure. + + If a network exception is raised, the socket is closed. + """ + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: + raise DocumentTooLarge( + "BSON document too large (%d bytes) - the connected server " + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) + + try: + sendall(self.conn, message) + except BaseException as error: + self._raise_connection_failure(error) + + def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise ConnectionFailure. + + If any exception is raised, the socket is closed. + """ + try: + return receive_message(self, request_id, self.max_message_size) + except BaseException as error: + self._raise_connection_failure(error) + + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not + writable. + """ + if unacknowledged and not self.is_writable: + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) + + def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. + + Can raise ConnectionFailure or InvalidDocument. + + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. + """ + self._raise_if_not_writable(True) + self.send_message(msg, max_doc_size) + + def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions + ) -> dict[str, Any]: + """Send "insert" etc. command, returning response as a dict. + + Can raise ConnectionFailure or OperationFailure. + + :param request_id: an int. + :param msg: bytes, the command message. + """ + self.send_message(msg, 0) + reply = self.receive_message(request_id) + result = reply.command_response(codec_options) + + # Raises NotPrimaryError or OperationFailure. + helpers._check_command_response(result, self.max_wire_version) + return result + + def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. + + Can raise ConnectionFailure or OperationFailure. + """ + # CMAP spec says to publish the ready event only after authenticating + # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False + if not self.ready: + creds = self.opts._credentials + if creds: + from pymongo.synchronous import auth + + auth.authenticate(creds, self, reauthenticate=reauthenticate) + self.ready = True + if self.enabled_for_cmap: + assert self.listeners is not None + duration = time.monotonic() - self.creation_time + self.listeners.publish_connection_ready(self.address, self.id, duration) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_READY, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) + + def validate_session( + self, client: Optional[MongoClient], session: Optional[ClientSession] + ) -> None: + """Validate this session before use with client. + + Raises error if the client is not the one that created the session. + """ + if session: + if session._client is not client: + raise InvalidOperation("Can only use session with the MongoClient that started it") + + def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + self._close_conn() + if reason and self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) + + def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + return self.socket_checker.socket_closed(self.conn) + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[ClientSession], + client: Optional[MongoClient], + ) -> None: + """Add $clusterTime.""" + if client: + client._send_cluster_time(command, session) + + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: + self.is_writable = is_writable + + def idle_time_seconds(self) -> float: + """Seconds since this socket was last checked into its pool.""" + return time.monotonic() - self.last_checkin_time + + def _raise_connection_failure(self, error: BaseException) -> NoReturn: + # Catch *all* exceptions from socket methods and close the socket. In + # regular Python, socket operations only raise socket.error, even if + # the underlying cause was a Ctrl-C: a signal raised during socket.recv + # is expressed as an EINTR error from poll. See internal_select_ex() in + # socketmodule.c. All error codes from poll become socket.error at + # first. Eventually in PyEval_EvalFrameEx the interpreter checks for + # signals and throws KeyboardInterrupt into the current frame on the + # main thread. + # + # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, + # ..) is called in Python code, which experiences the signal as a + # KeyboardInterrupt from the start, rather than as an initial + # socket.error, so we catch that, close the socket, and reraise it. + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, SSLError)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + else: + raise + + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.conn) + + def __repr__(self) -> str: + return "Connection({}){} at {}".format( + repr(self.conn), + self.closed and " CLOSED" or "", + id(self), + ) + + +def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.connect(host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + sock.connect(sa) + return sock + except OSError as e: + err = e + sock.close() + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if HAS_SNI: + if _IS_SYNC: + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + ssl_sock = ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc] + else: + if _IS_SYNC: + ssl_sock = ssl_context.wrap_socket(sock) + else: + ssl_sock = ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +class _PoolClosedError(PyMongoError): + """Internal error raised when a thread tries to get a connection from a + closed pool. + """ + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 + + +# Do *not* explicitly inherit from object or Jython won't call __del__ +# http://bugs.jython.org/issue1057 +class Pool: + def __init__( + self, + address: _Address, + options: PoolOptions, + handshake: bool = True, + client_id: Optional[ObjectId] = None, + ): + """ + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param handshake: whether to call hello for each new Connection + """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY + # Check a socket's health with socket_closed() every once in a while. + # Can override for testing: 0 to always check, None to never check. + self._check_interval_seconds = 1 + # LIFO pool. Sockets are ordered on idle time. Sockets claimed + # and returned to pool from the left side. Stale sockets removed + # from the right side. + self.conns: collections.deque = collections.deque() + self.active_contexts: set[_CancellationContext] = set() + self.lock = _create_lock() + self.active_sockets = 0 + # Monotonically increasing connection ID required for CMAP Events. + self.next_connection_id = 1 + # Track whether the sockets in this pool are writeable or not. + self.is_writable: Optional[bool] = None + + # Keep track of resets, so we notice sockets created before the most + # recent reset and close them. + # self.generation = 0 + self.gen = _PoolGeneration() + self.pid = os.getpid() + self.address = address + self.opts = options + self.handshake = handshake + # Don't publish events in Monitor pools. + self.enabled_for_cmap = ( + self.handshake + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) + + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = threading.Condition(self.lock) # type: ignore[arg-type] + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = threading.Condition(self.lock) # type: ignore[arg-type] + self._max_connecting = self.opts.max_connecting + self._pending = 0 + self._client_id = client_id + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count: int = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[Connection] = set() + self.ncursors = 0 + self.ntxns = 0 + + def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_READY, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + def _reset( + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + old_state = self.state + with self.size_cond: + if self.closed: + return + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() + keep: collections.deque = collections.deque() + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + + if close: + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() + + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + + listeners = self.opts._event_listeners + # CMAP spec says that close() MUST close sockets before publishing the + # PoolClosedEvent but that reset() SHOULD close sockets *after* + # publishing the PoolClearedEvent. + if close: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_closed(self.address) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + ) + else: + if old_state != PoolState.PAUSED and self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLEARED, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) + for conn in sockets: + conn.close_conn(ConnectionClosedReason.STALE) + + def update_is_writable(self, is_writable: Optional[bool]) -> None: + """Updates the is_writable attribute on all sockets currently in the + Pool. + """ + self.is_writable = is_writable + with self.lock: + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) + + def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + self._reset(close=False, service_id=service_id, interrupt_connections=interrupt_connections) + + def reset_without_pause(self) -> None: + self._reset(close=False, pause=False) + + def close(self) -> None: + self._reset(close=True) + + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + def remove_stale_sockets(self, reference_generation: int) -> None: + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the + pool. + """ + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + return + + if self.opts.max_idle_time_seconds is not None: + with self.lock: + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + conn = self.conns.pop() + conn.close_conn(ConnectionClosedReason.IDLE) + + while True: + with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False + try: + with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = self.connect() + with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.gen.get_overall() != reference_generation: + conn.close_conn(ConnectionClosedReason.STALE) + return + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + finally: + if incremented: + # Notify after adding the socket to the pool. + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + + with self.size_cond: + self.requests -= 1 + self.size_cond.notify() + + def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + """Connect to Mongo and return a new Connection. + + Can raise ConnectionFailure. + + Note that the pool does not keep a reference to the socket -- you + must call checkin() when you're done with it. + """ + with self.lock: + conn_id = self.next_connection_id + self.next_connection_id += 1 + + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_created(self.address, conn_id) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) + + try: + sock = _configured_socket(self.address, self.opts) + except BaseException as error: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn_id, ConnectionClosedReason.ERROR + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + if isinstance(error, (IOError, OSError, SSLError)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + + raise + + conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] + with self.lock: + self.active_contexts.add(conn.cancel_context) + try: + if self.handshake: + conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + conn.authenticate() + except BaseException: + conn.close_conn(ConnectionClosedReason.ERROR) + raise + + return conn + + @contextlib.contextmanager + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> Generator[Connection, None]: + """Get a connection from the pool. Use with a "with" statement. + + Returns a :class:`Connection` object wrapping a connected + :class:`socket.socket`. + + This method should always be used in a with-statement:: + + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) + + Can raise ConnectionFailure or OperationFailure. + + :param handler: A _MongoClientErrorHandler. + """ + listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_started(self.address) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + conn = self._get_conn(checkout_started_time, handler=handler) + + if self.enabled_for_cmap: + assert listeners is not None + duration = time.monotonic() - checkout_started_time + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) + try: + with self.lock: + self.active_contexts.add(conn.cancel_context) + yield conn + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + handler.handle(exc_type, exc_val) + if not pinned and conn.active: + self.checkin(conn) + raise + if conn.pinned_txn: + with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + self.checkin(conn) + + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: + if self.state != PoolState.READY: + if self.enabled_for_cmap and emit_event: + assert self.opts._event_listeners is not None + duration = time.monotonic() - checkout_started_time + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> Connection: + """Get or create a Connection. Can raise ConnectionFailure.""" + # We use the pid here to avoid issues with fork / multiprocessing. + # See test.test_client:TestClient.test_fork for an example of + # what could go wrong otherwise + if self.pid != os.getpid(): + self.reset_without_pause() + + if self.closed: + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + duration = time.monotonic() - checkout_started_time + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) + raise _PoolClosedError( + "Attempted to check out a connection from closed connection pool" + ) + + with self.lock: + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + with self.size_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=True) + while not (self.requests < self.max_pool_size): + if not _cond_wait(self.size_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) + self.requests += 1 + + # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False + try: + with self.lock: + self.active_sockets += 1 + incremented = True + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + with self._max_connecting_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + if not _cond_wait(self._max_connecting_cond, deadline): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = self.connect(handler=handler) + finally: + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + except BaseException: + if conn: + # We checked out a socket but authentication failed. + conn.close_conn(ConnectionClosedReason.ERROR) + with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if self.enabled_for_cmap and not emitted_event: + assert self.opts._event_listeners is not None + duration = time.monotonic() - checkout_started_time + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + raise + + conn.active = True + return conn + + def checkin(self, conn: Connection) -> None: + """Return the connection to the pool, or if it's closed discard it. + + :param conn: The connection to check into the pool. + """ + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + with self.lock: + self.active_contexts.discard(conn.cancel_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKEDIN, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) + if self.pid != os.getpid(): + self.reset_without_pause() + else: + if self.closed: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + else: + with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + + with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 + self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + def _perished(self, conn: Connection) -> bool: + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for + for longer than the max idle time, or if the socket has been closed by + some external network error, or if the socket's generation is outdated. + + Checking sockets lets us avoid seeing *some* + :class:`~pymongo.errors.AutoReconnect` exceptions on server + hiccups, etc. We only check if the socket was closed by an external + error if it has been > 1 second since the socket was checked into the + pool, to keep performance reasonable - we can't avoid AutoReconnects + completely anyway. + """ + idle_time_seconds = conn.idle_time_seconds() + # If socket is idle, open a new one. + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + conn.close_conn(ConnectionClosedReason.IDLE) + return True + + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + conn.close_conn(ConnectionClosedReason.ERROR) + return True + + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + return True + + return False + + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + duration = time.monotonic() - checkout_started_time + listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) + + def __del__(self) -> None: + # Avoid ResourceWarnings in Python 3 + # Close all sockets without calling reset() or close() because it is + # not safe to acquire a lock in __del__. + for conn in self.conns: + conn.close_conn(None) diff --git a/pymongo/synchronous/read_preferences.py b/pymongo/synchronous/read_preferences.py new file mode 100644 index 0000000000..464256c343 --- /dev/null +++ b/pymongo/synchronous/read_preferences.py @@ -0,0 +1,624 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License", +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for choosing which member of a replica set to read from.""" + +from __future__ import annotations + +from collections import abc +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +from pymongo.errors import ConfigurationError +from pymongo.synchronous import max_staleness_selectors +from pymongo.synchronous.server_selectors import ( + member_with_tags_server_selector, + secondary_with_tags_server_selector, +) + +if TYPE_CHECKING: + from pymongo.synchronous.server_selectors import Selection + from pymongo.synchronous.topology_description import TopologyDescription + +_IS_SYNC = True + +_PRIMARY = 0 +_PRIMARY_PREFERRED = 1 +_SECONDARY = 2 +_SECONDARY_PREFERRED = 3 +_NEAREST = 4 + + +_MONGOS_MODES = ( + "primary", + "primaryPreferred", + "secondary", + "secondaryPreferred", + "nearest", +) + +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] + + +def _validate_tag_sets(tag_sets: Optional[_TagSets]) -> Optional[_TagSets]: + """Validate tag sets for a MongoClient.""" + if tag_sets is None: + return tag_sets + + if not isinstance(tag_sets, (list, tuple)): + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") + if len(tag_sets) == 0: + raise ValueError( + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" + ) + + for tags in tag_sets: + if not isinstance(tags, abc.Mapping): + raise TypeError( + f"Tag set {tags!r} invalid, must be an instance of dict, " + "bson.son.SON or other type that inherits from " + "collection.Mapping" + ) + + return list(tag_sets) + + +def _invalid_max_staleness_msg(max_staleness: Any) -> str: + return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness + + +# Some duplication with common.py to avoid import cycle. +def _validate_max_staleness(max_staleness: Any) -> int: + """Validate max_staleness.""" + if max_staleness == -1: + return -1 + + if not isinstance(max_staleness, int): + raise TypeError(_invalid_max_staleness_msg(max_staleness)) + + if max_staleness <= 0: + raise ValueError(_invalid_max_staleness_msg(max_staleness)) + + return max_staleness + + +def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: + """Validate hedge.""" + if hedge is None: + return None + + if not isinstance(hedge, dict): + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") + + return hedge + + +class _ServerMode: + """Base class for all read preferences.""" + + __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") + + def __init__( + self, + mode: int, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + self.__mongos_mode = _MONGOS_MODES[mode] + self.__mode = mode + self.__tag_sets = _validate_tag_sets(tag_sets) + self.__max_staleness = _validate_max_staleness(max_staleness) + self.__hedge = _validate_hedge(hedge) + + @property + def name(self) -> str: + """The name of this read preference.""" + return self.__class__.__name__ + + @property + def mongos_mode(self) -> str: + """The mongos mode of this read preference.""" + return self.__mongos_mode + + @property + def document(self) -> dict[str, Any]: + """Read preference as a document.""" + doc: dict[str, Any] = {"mode": self.__mongos_mode} + if self.__tag_sets not in (None, [{}]): + doc["tags"] = self.__tag_sets + if self.__max_staleness != -1: + doc["maxStalenessSeconds"] = self.__max_staleness + if self.__hedge not in (None, {}): + doc["hedge"] = self.__hedge + return doc + + @property + def mode(self) -> int: + """The mode of this read preference instance.""" + return self.__mode + + @property + def tag_sets(self) -> _TagSets: + """Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to + read only from members whose ``dc`` tag has the value ``"ny"``. + To specify a priority-order for tag sets, provide a list of + tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag + set, ``{}``, means "read from any member that matches the mode, + ignoring tags." MongoClient tries each set of tags in turn + until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) + + .. seealso:: `Data-Center Awareness + `_ + """ + return list(self.__tag_sets) if self.__tag_sets else [{}] + + @property + def max_staleness(self) -> int: + """The maximum estimated length of time (in seconds) a replica set + secondary can fall behind the primary in replication before it will + no longer be selected for operations, or -1 for no maximum. + """ + return self.__max_staleness + + @property + def hedge(self) -> Optional[_Hedge]: + """The read preference ``hedge`` parameter. + + A dictionary that configures how the server will perform hedged reads. + It consists of the following keys: + + - ``enabled``: Enables or disables hedged reads in sharded clusters. + + Hedged reads are automatically enabled in MongoDB 4.4+ when using a + ``nearest`` read preference. To explicitly enable hedged reads, set + the ``enabled`` key to ``true``:: + + >>> Nearest(hedge={'enabled': True}) + + To explicitly disable hedged reads, set the ``enabled`` key to + ``False``:: + + >>> Nearest(hedge={'enabled': False}) + + .. versionadded:: 3.11 + """ + return self.__hedge + + @property + def min_wire_version(self) -> int: + """The wire protocol version the server must support. + + Some read preferences impose version requirements on all servers (e.g. + maxStalenessSeconds requires MongoDB 3.4 / maxWireVersion 5). + + All servers' maxWireVersion must be at least this read preference's + `min_wire_version`, or the driver raises + :exc:`~pymongo.errors.ConfigurationError`. + """ + return 0 if self.__max_staleness == -1 else 5 + + def __repr__(self) -> str: + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( + self.name, + self.__tag_sets, + self.__max_staleness, + self.__hedge, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, _ServerMode): + return ( + self.mode == other.mode + and self.tag_sets == other.tag_sets + and self.max_staleness == other.max_staleness + and self.hedge == other.hedge + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __getstate__(self) -> dict[str, Any]: + """Return value of object for pickling. + + Needed explicitly because __slots__() defined. + """ + return { + "mode": self.__mode, + "tag_sets": self.__tag_sets, + "max_staleness": self.__max_staleness, + "hedge": self.__hedge, + } + + def __setstate__(self, value: Mapping[str, Any]) -> None: + """Restore from pickling.""" + self.__mode = value["mode"] + self.__mongos_mode = _MONGOS_MODES[self.__mode] + self.__tag_sets = _validate_tag_sets(value["tag_sets"]) + self.__max_staleness = _validate_max_staleness(value["max_staleness"]) + self.__hedge = _validate_hedge(value["hedge"]) + + def __call__(self, selection: Selection) -> Selection: + return selection + + +class Primary(_ServerMode): + """Primary read preference. + + * When directly connected to one mongod queries are allowed if the server + is standalone or a replica set primary. + * When connected to a mongos queries are sent to the primary of a shard. + * When connected to a replica set queries are sent to the primary of + the replica set. + """ + + __slots__ = () + + def __init__(self) -> None: + super().__init__(_PRIMARY) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return selection.primary_selection + + def __repr__(self) -> str: + return "Primary()" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, _ServerMode): + return other.mode == _PRIMARY + return NotImplemented + + +class PrimaryPreferred(_ServerMode): + """PrimaryPreferred read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are sent to the primary of a shard if + available, otherwise a shard secondary. + * When connected to a replica set queries are sent to the primary if + available, otherwise a secondary. + + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to an available secondary until the + primary of the replica set is discovered. + + :param tag_sets: The :attr:`~tag_sets` to use if the primary is not + available. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` to use if the primary is not available. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + if selection.primary: + return selection.primary_selection + else: + return secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class Secondary(_ServerMode): + """Secondary read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among shard + secondaries. An error is raised if no secondaries are available. + * When connected to a replica set queries are distributed among + secondaries. An error is raised if no secondaries are available. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + return secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class SecondaryPreferred(_ServerMode): + """SecondaryPreferred read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among shard + secondaries, or the shard primary if no secondary is available. + * When connected to a replica set queries are distributed among + secondaries, or the primary if no secondary is available. + + .. note:: When a :class:`~pymongo.mongo_client.MongoClient` is first + created reads will be routed to the primary of the replica set until + an available secondary is discovered. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + secondaries = secondary_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + if secondaries: + return secondaries + else: + return selection.primary_selection + + +class Nearest(_ServerMode): + """Nearest read preference. + + * When directly connected to one mongod queries are allowed to standalone + servers, to a replica set primary, or to replica set secondaries. + * When connected to a mongos queries are distributed among all members of + a shard. + * When connected to a replica set queries are distributed among all + members. + + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated + length of time a replica set secondary can fall behind the primary in + replication before it will no longer be selected for operations. + Default -1, meaning no maximum. If it is set, it must be at least + 90 seconds. + :param hedge: The :attr:`~hedge` for this read preference. + + .. versionchanged:: 3.11 + Added ``hedge`` parameter. + """ + + __slots__ = () + + def __init__( + self, + tag_sets: Optional[_TagSets] = None, + max_staleness: int = -1, + hedge: Optional[_Hedge] = None, + ) -> None: + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to Selection.""" + return member_with_tags_server_selector( + self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) + ) + + +class _AggWritePref: + """Agg $out/$merge write preference. + + * If there are readable servers and there is any pre-5.0 server, use + primary read preference. + * Otherwise use `pref` read preference. + + :param pref: The read preference to use on MongoDB 5.0+. + """ + + __slots__ = ("pref", "effective_pref") + + def __init__(self, pref: _ServerMode): + self.pref = pref + self.effective_pref: _ServerMode = ReadPreference.PRIMARY + + def selection_hook(self, topology_description: TopologyDescription) -> None: + common_wv = topology_description.common_wire_version + if ( + topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) + and common_wv + and common_wv < 13 + ): + self.effective_pref = ReadPreference.PRIMARY + else: + self.effective_pref = self.pref + + def __call__(self, selection: Selection) -> Selection: + """Apply this read preference to a Selection.""" + return self.effective_pref(selection) + + def __repr__(self) -> str: + return f"_AggWritePref(pref={self.pref!r})" + + # Proxy other calls to the effective_pref so that _AggWritePref can be + # used in place of an actual read preference. + def __getattr__(self, name: str) -> Any: + return getattr(self.effective_pref, name) + + +_ALL_READ_PREFERENCES = (Primary, PrimaryPreferred, Secondary, SecondaryPreferred, Nearest) + + +def make_read_preference( + mode: int, tag_sets: Optional[_TagSets], max_staleness: int = -1 +) -> _ServerMode: + if mode == _PRIMARY: + if tag_sets not in (None, [{}]): + raise ConfigurationError("Read preference primary cannot be combined with tags") + if max_staleness != -1: + raise ConfigurationError( + "Read preference primary cannot be combined with maxStalenessSeconds" + ) + return Primary() + return _ALL_READ_PREFERENCES[mode](tag_sets, max_staleness) # type: ignore + + +_MODES = ( + "PRIMARY", + "PRIMARY_PREFERRED", + "SECONDARY", + "SECONDARY_PREFERRED", + "NEAREST", +) + + +class ReadPreference: + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + See :doc:`/examples/high_availability` for code examples. + + A read preference is used in three cases: + + :class:`~pymongo.mongo_client.MongoClient` connected to a single mongod: + + - ``PRIMARY``: Queries are allowed if the server is standalone or a replica + set primary. + - All other modes allow queries to standalone servers, to a replica set + primary, or to replica set secondaries. + + :class:`~pymongo.mongo_client.MongoClient` initialized with the + ``replicaSet`` option: + + - ``PRIMARY``: Read from the primary. This is the default, and provides the + strongest consistency. If no primary is available, raise + :class:`~pymongo.errors.AutoReconnect`. + + - ``PRIMARY_PREFERRED``: Read from the primary if available, or if there is + none, read from a secondary. + + - ``SECONDARY``: Read from a secondary. If no secondary is available, + raise :class:`~pymongo.errors.AutoReconnect`. + + - ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise + from the primary. + + - ``NEAREST``: Read from any member. + + :class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a + sharded cluster of replica sets: + + - ``PRIMARY``: Read from the primary of the shard, or raise + :class:`~pymongo.errors.OperationFailure` if there is none. + This is the default. + + - ``PRIMARY_PREFERRED``: Read from the primary of the shard, or if there is + none, read from a secondary of the shard. + + - ``SECONDARY``: Read from a secondary of the shard, or raise + :class:`~pymongo.errors.OperationFailure` if there is none. + + - ``SECONDARY_PREFERRED``: Read from a secondary of the shard if available, + otherwise from the shard primary. + + - ``NEAREST``: Read from any shard member. + """ + + PRIMARY = Primary() + PRIMARY_PREFERRED = PrimaryPreferred() + SECONDARY = Secondary() + SECONDARY_PREFERRED = SecondaryPreferred() + NEAREST = Nearest() + + +def read_pref_mode_from_name(name: str) -> int: + """Get the read preference mode from mongos/uri name.""" + return _MONGOS_MODES.index(name) + + +class MovingAverage: + """Tracks an exponentially-weighted moving average.""" + + average: Optional[float] + + def __init__(self) -> None: + self.average = None + + def add_sample(self, sample: float) -> None: + if sample < 0: + # Likely system time change while waiting for hello response + # and not using time.monotonic. Ignore it, the next one will + # probably be valid. + return + if self.average is None: + self.average = sample + else: + # The Server Selection Spec requires an exponentially weighted + # average with alpha = 0.2. + self.average = 0.8 * self.average + 0.2 * sample + + def get(self) -> Optional[float]: + """Get the calculated average, or None if no samples yet.""" + return self.average + + def reset(self) -> None: + self.average = None diff --git a/pymongo/response.py b/pymongo/synchronous/response.py similarity index 95% rename from pymongo/response.py rename to pymongo/synchronous/response.py index 5cdd3e7e8d..94fd4df508 100644 --- a/pymongo/response.py +++ b/pymongo/synchronous/response.py @@ -20,9 +20,11 @@ if TYPE_CHECKING: from datetime import timedelta - from pymongo.message import _OpMsg, _OpReply - from pymongo.pool import Connection - from pymongo.typings import _Address, _DocumentOut + from pymongo.synchronous.message import _OpMsg, _OpReply + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.typings import _Address, _DocumentOut + +_IS_SYNC = True class Response: diff --git a/pymongo/server.py b/pymongo/synchronous/server.py similarity index 93% rename from pymongo/server.py rename to pymongo/synchronous/server.py index 1c437a7eef..4c79569992 100644 --- a/pymongo/server.py +++ b/pymongo/synchronous/server.py @@ -17,27 +17,36 @@ import logging from datetime import datetime -from typing import TYPE_CHECKING, Any, Callable, ContextManager, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Optional, + Union, +) from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure -from pymongo.helpers import _check_command_response, _handle_reauth -from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log -from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query -from pymongo.response import PinnedResponse, Response +from pymongo.synchronous.helpers import _check_command_response, _handle_reauth +from pymongo.synchronous.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.synchronous.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.synchronous.response import PinnedResponse, Response if TYPE_CHECKING: from queue import Queue from weakref import ReferenceType from bson.objectid import ObjectId - from pymongo.mongo_client import MongoClient, _MongoClientErrorHandler - from pymongo.monitor import Monitor - from pymongo.monitoring import _EventListeners - from pymongo.pool import Connection, Pool - from pymongo.read_preferences import _ServerMode - from pymongo.server_description import ServerDescription - from pymongo.typings import _DocumentOut + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.synchronous.monitor import Monitor + from pymongo.synchronous.monitoring import _EventListeners + from pymongo.synchronous.pool import Connection, Pool + from pymongo.synchronous.read_preferences import _ServerMode + from pymongo.synchronous.server_description import ServerDescription + from pymongo.synchronous.typings import _DocumentOut + +_IS_SYNC = True _CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} diff --git a/pymongo/synchronous/server_description.py b/pymongo/synchronous/server_description.py new file mode 100644 index 0000000000..4a23fc1293 --- /dev/null +++ b/pymongo/synchronous/server_description.py @@ -0,0 +1,301 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Represent one server the driver is connected to.""" +from __future__ import annotations + +import time +import warnings +from typing import Any, Mapping, Optional + +from bson import EPOCH_NAIVE +from bson.objectid import ObjectId +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.typings import ClusterTime, _Address + +_IS_SYNC = True + + +class ServerDescription: + """Immutable representation of one server. + + :param address: A (host, port) pair + :param hello: Optional Hello instance + :param round_trip_time: Optional float + :param error: Optional, the last error attempting to connect to the server + :param round_trip_time: Optional float, the min latency from the most recent samples + """ + + __slots__ = ( + "_address", + "_server_type", + "_all_hosts", + "_tags", + "_replica_set_name", + "_primary", + "_max_bson_size", + "_max_message_size", + "_max_write_batch_size", + "_min_wire_version", + "_max_wire_version", + "_round_trip_time", + "_min_round_trip_time", + "_me", + "_is_writable", + "_is_readable", + "_ls_timeout_minutes", + "_error", + "_set_version", + "_election_id", + "_cluster_time", + "_last_write_date", + "_last_update_time", + "_topology_version", + ) + + def __init__( + self, + address: _Address, + hello: Optional[Hello] = None, + round_trip_time: Optional[float] = None, + error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, + ) -> None: + self._address = address + if not hello: + hello = Hello({}) + + self._server_type = hello.server_type + self._all_hosts = hello.all_hosts + self._tags = hello.tags + self._replica_set_name = hello.replica_set_name + self._primary = hello.primary + self._max_bson_size = hello.max_bson_size + self._max_message_size = hello.max_message_size + self._max_write_batch_size = hello.max_write_batch_size + self._min_wire_version = hello.min_wire_version + self._max_wire_version = hello.max_wire_version + self._set_version = hello.set_version + self._election_id = hello.election_id + self._cluster_time = hello.cluster_time + self._is_writable = hello.is_writable + self._is_readable = hello.is_readable + self._ls_timeout_minutes = hello.logical_session_timeout_minutes + self._round_trip_time = round_trip_time + self._min_round_trip_time = min_round_trip_time + self._me = hello.me + self._last_update_time = time.monotonic() + self._error = error + self._topology_version = hello.topology_version + if error: + details = getattr(error, "details", None) + if isinstance(details, dict): + self._topology_version = details.get("topologyVersion") + + self._last_write_date: Optional[float] + if hello.last_write_date: + # Convert from datetime to seconds. + delta = hello.last_write_date - EPOCH_NAIVE + self._last_write_date = delta.total_seconds() + else: + self._last_write_date = None + + @property + def address(self) -> _Address: + """The address (host, port) of this server.""" + return self._address + + @property + def server_type(self) -> int: + """The type of this server.""" + return self._server_type + + @property + def server_type_name(self) -> str: + """The server type as a human readable string. + + .. versionadded:: 3.4 + """ + return SERVER_TYPE._fields[self._server_type] + + @property + def all_hosts(self) -> set[tuple[str, int]]: + """List of hosts, passives, and arbiters known to this server.""" + return self._all_hosts + + @property + def tags(self) -> Mapping[str, Any]: + return self._tags + + @property + def replica_set_name(self) -> Optional[str]: + """Replica set name or None.""" + return self._replica_set_name + + @property + def primary(self) -> Optional[tuple[str, int]]: + """This server's opinion about who the primary is, or None.""" + return self._primary + + @property + def max_bson_size(self) -> int: + return self._max_bson_size + + @property + def max_message_size(self) -> int: + return self._max_message_size + + @property + def max_write_batch_size(self) -> int: + return self._max_write_batch_size + + @property + def min_wire_version(self) -> int: + return self._min_wire_version + + @property + def max_wire_version(self) -> int: + return self._max_wire_version + + @property + def set_version(self) -> Optional[int]: + return self._set_version + + @property + def election_id(self) -> Optional[ObjectId]: + return self._election_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + return self._cluster_time + + @property + def election_tuple(self) -> tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) + return self._set_version, self._election_id + + @property + def me(self) -> Optional[tuple[str, int]]: + return self._me + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + return self._ls_timeout_minutes + + @property + def last_write_date(self) -> Optional[float]: + return self._last_write_date + + @property + def last_update_time(self) -> float: + return self._last_update_time + + @property + def round_trip_time(self) -> Optional[float]: + """The current average latency or None.""" + # This override is for unittesting only! + if self._address in self._host_to_round_trip_time: + return self._host_to_round_trip_time[self._address] + + return self._round_trip_time + + @property + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + + @property + def error(self) -> Optional[Exception]: + """The last error attempting to connect to the server, or None.""" + return self._error + + @property + def is_writable(self) -> bool: + return self._is_writable + + @property + def is_readable(self) -> bool: + return self._is_readable + + @property + def mongos(self) -> bool: + return self._server_type == SERVER_TYPE.Mongos + + @property + def is_server_type_known(self) -> bool: + return self.server_type != SERVER_TYPE.Unknown + + @property + def retryable_writes_supported(self) -> bool: + """Checks if this server supports retryable writes.""" + return ( + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + ) or self._server_type == SERVER_TYPE.LoadBalancer + + @property + def retryable_reads_supported(self) -> bool: + """Checks if this server supports retryable writes.""" + return self._max_wire_version >= 6 + + @property + def topology_version(self) -> Optional[Mapping[str, Any]]: + return self._topology_version + + def to_unknown(self, error: Optional[Exception] = None) -> ServerDescription: + unknown = ServerDescription(self.address, error=error) + unknown._topology_version = self.topology_version + return unknown + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ServerDescription): + return ( + (self._address == other.address) + and (self._server_type == other.server_type) + and (self._min_wire_version == other.min_wire_version) + and (self._max_wire_version == other.max_wire_version) + and (self._me == other.me) + and (self._all_hosts == other.all_hosts) + and (self._tags == other.tags) + and (self._replica_set_name == other.replica_set_name) + and (self._set_version == other.set_version) + and (self._election_id == other.election_id) + and (self._primary == other.primary) + and (self._ls_timeout_minutes == other.logical_session_timeout_minutes) + and (self._error == other.error) + ) + + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + errmsg = "" + if self.error: + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( + self.__class__.__name__, + self.address, + self.server_type_name, + self.round_trip_time, + errmsg, + ) + + # For unittesting only. Use under no circumstances! + _host_to_round_trip_time: dict = {} diff --git a/pymongo/server_selectors.py b/pymongo/synchronous/server_selectors.py similarity index 97% rename from pymongo/server_selectors.py rename to pymongo/synchronous/server_selectors.py index c22ad599ee..a3b2066ab0 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/synchronous/server_selectors.py @@ -20,9 +20,10 @@ from pymongo.server_type import SERVER_TYPE if TYPE_CHECKING: - from pymongo.server_description import ServerDescription - from pymongo.topology_description import TopologyDescription + from pymongo.synchronous.server_description import ServerDescription + from pymongo.synchronous.topology_description import TopologyDescription +_IS_SYNC = True T = TypeVar("T") TagSet = Mapping[str, Any] diff --git a/pymongo/settings.py b/pymongo/synchronous/settings.py similarity index 94% rename from pymongo/settings.py rename to pymongo/synchronous/settings.py index 4a3e7be4cd..f51b5307aa 100644 --- a/pymongo/settings.py +++ b/pymongo/synchronous/settings.py @@ -20,12 +20,14 @@ from typing import Any, Collection, Optional, Type, Union from bson.objectid import ObjectId -from pymongo import common, monitor, pool -from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT from pymongo.errors import ConfigurationError -from pymongo.pool import Pool, PoolOptions -from pymongo.server_description import ServerDescription -from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector +from pymongo.synchronous import common, monitor, pool +from pymongo.synchronous.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT +from pymongo.synchronous.pool import Pool, PoolOptions +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE, _ServerSelector + +_IS_SYNC = True class TopologySettings: diff --git a/pymongo/srv_resolver.py b/pymongo/synchronous/srv_resolver.py similarity index 98% rename from pymongo/srv_resolver.py rename to pymongo/synchronous/srv_resolver.py index 6f6cc285fa..e5481305e0 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -19,12 +19,14 @@ import random from typing import TYPE_CHECKING, Any, Optional, Union -from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError +from pymongo.synchronous.common import CONNECT_TIMEOUT if TYPE_CHECKING: from dns import resolver +_IS_SYNC = True + def _have_dnspython() -> bool: try: diff --git a/pymongo/topology.py b/pymongo/synchronous/topology.py similarity index 97% rename from pymongo/topology.py rename to pymongo/synchronous/topology.py index e10f490adc..d76cef7bfc 100644 --- a/pymongo/topology.py +++ b/pymongo/synchronous/topology.py @@ -27,8 +27,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast -from pymongo import _csot, common, helpers, periodic_executor -from pymongo.client_session import _ServerSession, _ServerSessionPool +from pymongo import _csot, helpers_constants from pymongo.errors import ( ConnectionFailure, InvalidOperation, @@ -39,25 +38,27 @@ ServerSelectionTimeoutError, WriteError, ) -from pymongo.hello import Hello from pymongo.lock import _create_lock -from pymongo.logger import ( +from pymongo.synchronous import common, periodic_executor +from pymongo.synchronous.client_session import _ServerSession, _ServerSessionPool +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.logger import ( _SERVER_SELECTION_LOGGER, _debug_log, _ServerSelectionStatusMessage, ) -from pymongo.monitor import SrvMonitor -from pymongo.pool import Pool, PoolOptions -from pymongo.server import Server -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import ( +from pymongo.synchronous.monitor import SrvMonitor +from pymongo.synchronous.pool import Pool, PoolOptions +from pymongo.synchronous.server import Server +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import ( Selection, any_server_selector, arbiter_server_selector, secondary_server_selector, writable_server_selector, ) -from pymongo.topology_description import ( +from pymongo.synchronous.topology_description import ( SRV_POLLING_TOPOLOGIES, TOPOLOGY_TYPE, TopologyDescription, @@ -67,9 +68,10 @@ if TYPE_CHECKING: from bson import ObjectId - from pymongo.settings import TopologySettings - from pymongo.typings import ClusterTime, _Address + from pymongo.synchronous.settings import TopologySettings + from pymongo.synchronous.typings import ClusterTime, _Address +_IS_SYNC = True _pymongo_dir = str(Path(__file__).parent) @@ -143,7 +145,7 @@ def __init__(self, topology_settings: TopologySettings): self._opened = False self._closed = False self._lock = _create_lock() - self._condition = self._settings.condition_class(self._lock) + self._condition = self._settings.condition_class(self._lock) # type: ignore[arg-type] self._servers: dict[_Address, Server] = {} self._pid: Optional[int] = None self._max_cluster_time: Optional[ClusterTime] = None @@ -786,8 +788,8 @@ def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: # Default error code if one does not exist. default = 10107 if isinstance(error, NotPrimaryError) else None err_code = error.details.get("code", default) # type: ignore[union-attr] - if err_code in helpers._NOT_PRIMARY_CODES: - is_shutting_down = err_code in helpers._SHUTDOWN_CODES + if err_code in helpers_constants._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers_constants._SHUTDOWN_CODES # Mark server Unknown, clear the pool, and request check. if not self._settings.load_balanced: self._process_change(ServerDescription(address, error=error)) diff --git a/pymongo/synchronous/topology_description.py b/pymongo/synchronous/topology_description.py new file mode 100644 index 0000000000..961b9da8d5 --- /dev/null +++ b/pymongo/synchronous/topology_description.py @@ -0,0 +1,678 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent a deployment of MongoDB servers.""" +from __future__ import annotations + +from random import sample +from typing import ( + Any, + Callable, + List, + Mapping, + MutableMapping, + NamedTuple, + Optional, + cast, +) + +from bson.min_key import MinKey +from bson.objectid import ObjectId +from pymongo.errors import ConfigurationError +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous import common +from pymongo.synchronous.read_preferences import ReadPreference, _AggWritePref, _ServerMode +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import Selection +from pymongo.synchronous.typings import _Address + +_IS_SYNC = True + + +# Enumeration for various kinds of MongoDB cluster topologies. +class _TopologyType(NamedTuple): + Single: int + ReplicaSetNoPrimary: int + ReplicaSetWithPrimary: int + Sharded: int + Unknown: int + LoadBalanced: int + + +TOPOLOGY_TYPE = _TopologyType(*range(6)) + +# Topologies compatible with SRV record polling. +SRV_POLLING_TOPOLOGIES: tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) + + +_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] + + +class TopologyDescription: + def __init__( + self, + topology_type: int, + server_descriptions: dict[_Address, ServerDescription], + replica_set_name: Optional[str], + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], + topology_settings: Any, + ) -> None: + """Representation of a deployment of MongoDB servers. + + :param topology_type: initial type + :param server_descriptions: dict of (address, ServerDescription) for + all seeds + :param replica_set_name: replica set name or None + :param max_set_version: greatest setVersion seen from a primary, or None + :param max_election_id: greatest electionId seen from a primary, or None + :param topology_settings: a TopologySettings + """ + self._topology_type = topology_type + self._replica_set_name = replica_set_name + self._server_descriptions = server_descriptions + self._max_set_version = max_set_version + self._max_election_id = max_election_id + + # The heartbeat_frequency is used in staleness estimates. + self._topology_settings = topology_settings + + # Is PyMongo compatible with all servers' wire protocols? + self._incompatible_err = None + if self._topology_type != TOPOLOGY_TYPE.LoadBalanced: + self._init_incompatible_err() + + # Server Discovery And Monitoring Spec: Whenever a client updates the + # TopologyDescription from an hello response, it MUST set + # TopologyDescription.logicalSessionTimeoutMinutes to the smallest + # logicalSessionTimeoutMinutes value among ServerDescriptions of all + # data-bearing server types. If any have a null + # logicalSessionTimeoutMinutes, then + # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. + readable_servers = self.readable_servers + if not readable_servers: + self._ls_timeout_minutes = None + elif any(s.logical_session_timeout_minutes is None for s in readable_servers): + self._ls_timeout_minutes = None + else: + self._ls_timeout_minutes = min( # type: ignore[type-var] + s.logical_session_timeout_minutes for s in readable_servers + ) + + def _init_incompatible_err(self) -> None: + """Internal compatibility check for non-load balanced topologies.""" + for s in self._server_descriptions.values(): + if not s.is_server_type_known: + continue + + # s.min/max_wire_version is the server's wire protocol. + # MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports. + server_too_new = ( + # Server too new. + s.min_wire_version is not None + and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION + ) + + server_too_old = ( + # Server too old. + s.max_wire_version is not None + and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION + ) + + if server_too_new: + self._incompatible_err = ( + "Server at %s:%d requires wire version %d, but this " # type: ignore + "version of PyMongo only supports up to %d." + % ( + s.address[0], + s.address[1] or 0, + s.min_wire_version, + common.MAX_SUPPORTED_WIRE_VERSION, + ) + ) + + elif server_too_old: + self._incompatible_err = ( + "Server at %s:%d reports wire version %d, but this " # type: ignore + "version of PyMongo requires at least %d (MongoDB %s)." + % ( + s.address[0], + s.address[1] or 0, + s.max_wire_version, + common.MIN_SUPPORTED_WIRE_VERSION, + common.MIN_SUPPORTED_SERVER_VERSION, + ) + ) + + break + + def check_compatible(self) -> None: + """Raise ConfigurationError if any server is incompatible. + + A server is incompatible if its wire protocol version range does not + overlap with PyMongo's. + """ + if self._incompatible_err: + raise ConfigurationError(self._incompatible_err) + + def has_server(self, address: _Address) -> bool: + return address in self._server_descriptions + + def reset_server(self, address: _Address) -> TopologyDescription: + """A copy of this description, with one server marked Unknown.""" + unknown_sd = self._server_descriptions[address].to_unknown() + return updated_topology_description(self, unknown_sd) + + def reset(self) -> TopologyDescription: + """A copy of this description, with all servers marked Unknown.""" + if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: + topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + topology_type = self._topology_type + + # The default ServerDescription's type is Unknown. + sds = {address: ServerDescription(address) for address in self._server_descriptions} + + return TopologyDescription( + topology_type, + sds, + self._replica_set_name, + self._max_set_version, + self._max_election_id, + self._topology_settings, + ) + + def server_descriptions(self) -> dict[_Address, ServerDescription]: + """dict of (address, + :class:`~pymongo.server_description.ServerDescription`). + """ + return self._server_descriptions.copy() + + @property + def topology_type(self) -> int: + """The type of this topology.""" + return self._topology_type + + @property + def topology_type_name(self) -> str: + """The topology type as a human readable string. + + .. versionadded:: 3.4 + """ + return TOPOLOGY_TYPE._fields[self._topology_type] + + @property + def replica_set_name(self) -> Optional[str]: + """The replica set name.""" + return self._replica_set_name + + @property + def max_set_version(self) -> Optional[int]: + """Greatest setVersion seen from a primary, or None.""" + return self._max_set_version + + @property + def max_election_id(self) -> Optional[ObjectId]: + """Greatest electionId seen from a primary, or None.""" + return self._max_election_id + + @property + def logical_session_timeout_minutes(self) -> Optional[int]: + """Minimum logical session timeout, or None.""" + return self._ls_timeout_minutes + + @property + def known_servers(self) -> list[ServerDescription]: + """List of Servers of types besides Unknown.""" + return [s for s in self._server_descriptions.values() if s.is_server_type_known] + + @property + def has_known_servers(self) -> bool: + """Whether there are any Servers of types besides Unknown.""" + return any(s for s in self._server_descriptions.values() if s.is_server_type_known) + + @property + def readable_servers(self) -> list[ServerDescription]: + """List of readable Servers.""" + return [s for s in self._server_descriptions.values() if s.is_readable] + + @property + def common_wire_version(self) -> Optional[int]: + """Minimum of all servers' max wire versions, or None.""" + servers = self.known_servers + if servers: + return min(s.max_wire_version for s in self.known_servers) + + return None + + @property + def heartbeat_frequency(self) -> int: + return self._topology_settings.heartbeat_frequency + + @property + def srv_max_hosts(self) -> int: + return self._topology_settings._srv_max_hosts + + def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: + if not selection: + return [] + round_trip_times: list[float] = [] + for server in selection.server_descriptions: + if server.round_trip_time is None: + config_err_msg = f"round_trip_time for server {server.address} is unexpectedly None: {self}, servers: {selection.server_descriptions}" + raise ConfigurationError(config_err_msg) + round_trip_times.append(server.round_trip_time) + # Round trip time in seconds. + fastest = min(round_trip_times) + threshold = self._topology_settings.local_threshold_ms / 1000.0 + return [ + s + for s in selection.server_descriptions + if (cast(float, s.round_trip_time) - fastest) <= threshold + ] + + def apply_selector( + self, + selector: Any, + address: Optional[_Address] = None, + custom_selector: Optional[_ServerSelector] = None, + ) -> list[ServerDescription]: + """List of servers matching the provided selector(s). + + :param selector: a callable that takes a Selection as input and returns + a Selection as output. For example, an instance of a read + preference from :mod:`~pymongo.read_preferences`. + :param address: A server address to select. + :param custom_selector: A callable that augments server + selection rules. Accepts a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + + .. versionadded:: 3.4 + """ + if getattr(selector, "min_wire_version", 0): + common_wv = self.common_wire_version + if common_wv and common_wv < selector.min_wire_version: + raise ConfigurationError( + "%s requires min wire version %d, but topology's min" + " wire version is %d" % (selector, selector.min_wire_version, common_wv) + ) + + if isinstance(selector, _AggWritePref): + selector.selection_hook(self) + + if self.topology_type == TOPOLOGY_TYPE.Unknown: + return [] + elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): + # Ignore selectors for standalone and load balancer mode. + return self.known_servers + if address: + # Ignore selectors when explicit address is requested. + description = self.server_descriptions().get(address) + return [description] if description else [] + + selection = Selection.from_topology_description(self) + # Ignore read preference for sharded clusters. + if self.topology_type != TOPOLOGY_TYPE.Sharded: + selection = selector(selection) + + # Apply custom selector followed by localThresholdMS. + if custom_selector is not None and selection: + selection = selection.with_server_descriptions( + custom_selector(selection.server_descriptions) + ) + return self._apply_local_threshold(selection) + + def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: + """Does this topology have any readable servers available matching the + given read preference? + + :param read_preference: an instance of a read preference from + :mod:`~pymongo.read_preferences`. Defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + + .. note:: When connected directly to a single server this method + always returns ``True``. + + .. versionadded:: 3.4 + """ + common.validate_read_preference("read_preference", read_preference) + return any(self.apply_selector(read_preference)) + + def has_writable_server(self) -> bool: + """Does this topology have a writable server available? + + .. note:: When connected directly to a single server this method + always returns ``True``. + + .. versionadded:: 3.4 + """ + return self.has_readable_server(ReadPreference.PRIMARY) + + def __repr__(self) -> str: + # Sort the servers by address. + servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( + self.__class__.__name__, + self._topology_settings._topology_id, + self.topology_type_name, + servers, + ) + + +# If topology type is Unknown and we receive a hello response, what should +# the new topology type be? +_SERVER_TYPE_TO_TOPOLOGY_TYPE = { + SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded, + SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary, + SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary, + # Note: SERVER_TYPE.LoadBalancer and Unknown are intentionally left out. +} + + +def updated_topology_description( + topology_description: TopologyDescription, server_description: ServerDescription +) -> TopologyDescription: + """Return an updated copy of a TopologyDescription. + + :param topology_description: the current TopologyDescription + :param server_description: a new ServerDescription that resulted from + a hello call + + Called after attempting (successfully or not) to call hello on the + server at server_description.address. Does not modify topology_description. + """ + address = server_description.address + + # These values will be updated, if necessary, to form the new + # TopologyDescription. + topology_type = topology_description.topology_type + set_name = topology_description.replica_set_name + max_set_version = topology_description.max_set_version + max_election_id = topology_description.max_election_id + server_type = server_description.server_type + + # Don't mutate the original dict of server descriptions; copy it. + sds = topology_description.server_descriptions() + + # Replace this server's description with the new one. + sds[address] = server_description + + if topology_type == TOPOLOGY_TYPE.Single: + # Set server type to Unknown if replica set name does not match. + if set_name is not None and set_name != server_description.replica_set_name: + error = ConfigurationError( + "client is configured to connect to a replica set named " + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) + ) + sds[address] = server_description.to_unknown(error=error) + # Single type never changes. + return TopologyDescription( + TOPOLOGY_TYPE.Single, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) + + if topology_type == TOPOLOGY_TYPE.Unknown: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): + if len(topology_description._topology_settings.seeds) == 1: + topology_type = TOPOLOGY_TYPE.Single + else: + # Remove standalone from Topology when given multiple seeds. + sds.pop(address) + elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): + topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] + + if topology_type == TOPOLOGY_TYPE.Sharded: + if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown): + sds.pop(address) + + elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): + sds.pop(address) + + elif server_type == SERVER_TYPE.RSPrimary: + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type, set_name = _update_rs_no_primary_from_member( + sds, set_name, server_description + ) + + elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: + if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): + sds.pop(address) + topology_type = _check_has_primary(sds) + + elif server_type == SERVER_TYPE.RSPrimary: + (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( + sds, set_name, server_description, max_set_version, max_election_id + ) + + elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): + topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) + + else: + # Server type is Unknown or RSGhost: did we just lose the primary? + topology_type = _check_has_primary(sds) + + # Return updated copy. + return TopologyDescription( + topology_type, + sds, + set_name, + max_set_version, + max_election_id, + topology_description._topology_settings, + ) + + +def _updated_topology_description_srv_polling( + topology_description: TopologyDescription, seedlist: list[tuple[str, Any]] +) -> TopologyDescription: + """Return an updated copy of a TopologyDescription. + + :param topology_description: the current TopologyDescription + :param seedlist: a list of new seeds new ServerDescription that resulted from + a hello call + """ + assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES + # Create a copy of the server descriptions. + sds = topology_description.server_descriptions() + + # If seeds haven't changed, don't do anything. + if set(sds.keys()) == set(seedlist): + return topology_description + + # Remove SDs corresponding to servers no longer part of the SRV record. + for address in list(sds.keys()): + if address not in seedlist: + sds.pop(address) + + if topology_description.srv_max_hosts != 0: + new_hosts = set(seedlist) - set(sds.keys()) + n_to_add = topology_description.srv_max_hosts - len(sds) + if n_to_add > 0: + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) + else: + seedlist = [] + # Add SDs corresponding to servers recently added to the SRV record. + for address in seedlist: + if address not in sds: + sds[address] = ServerDescription(address) + return TopologyDescription( + topology_description.topology_type, + sds, + topology_description.replica_set_name, + topology_description.max_set_version, + topology_description.max_election_id, + topology_description._topology_settings, + ) + + +def _update_rs_from_primary( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], +) -> tuple[int, Optional[str], Optional[int], Optional[ObjectId]]: + """Update topology description from a primary's hello response. + + Pass in a dict of ServerDescriptions, current replica set name, the + ServerDescription we are processing, and the TopologyDescription's + max_set_version and max_election_id if any. + + Returns (new topology type, new replica_set_name, new max_set_version, + new max_election_id). + """ + if replica_set_name is None: + replica_set_name = server_description.replica_set_name + + elif replica_set_name != server_description.replica_set_name: + # We found a primary but it doesn't have the replica_set_name + # provided by the user. + sds.pop(server_description.address) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple: tuple = (server_description.set_version, server_description.election_id) + max_election_tuple: tuple = (max_set_version, max_election_id) + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id + + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version + ): + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown() + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version + + # We've heard from the primary. Is it the same primary as before? + for server in sds.values(): + if ( + server.server_type is SERVER_TYPE.RSPrimary + and server.address != server_description.address + ): + # Reset old primary's type to Unknown. + sds[server.address] = server.to_unknown() + + # There can be only one prior primary. + break + + # Discover new hosts from this primary's response. + for new_address in server_description.all_hosts: + if new_address not in sds: + sds[new_address] = ServerDescription(new_address) + + # Remove hosts not in the response. + for addr in set(sds) - server_description.all_hosts: + sds.pop(addr) + + # If the host list differs from the seed list, we may not have a primary + # after all. + return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) + + +def _update_rs_with_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> int: + """RS with known primary. Process a response from a non-primary. + + Pass in a dict of ServerDescriptions, current replica set name, and the + ServerDescription we are processing. + + Returns new topology type. + """ + assert replica_set_name is not None + + if replica_set_name != server_description.replica_set_name: + sds.pop(server_description.address) + elif server_description.me and server_description.address != server_description.me: + sds.pop(server_description.address) + + # Had this member been the primary? + return _check_has_primary(sds) + + +def _update_rs_no_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> tuple[int, Optional[str]]: + """RS without known primary. Update from a non-primary's response. + + Pass in a dict of ServerDescriptions, current replica set name, and the + ServerDescription we are processing. + + Returns (new topology type, new replica_set_name). + """ + topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary + if replica_set_name is None: + replica_set_name = server_description.replica_set_name + + elif replica_set_name != server_description.replica_set_name: + sds.pop(server_description.address) + return topology_type, replica_set_name + + # This isn't the primary's response, so don't remove any servers + # it doesn't report. Only add new servers. + for address in server_description.all_hosts: + if address not in sds: + sds[address] = ServerDescription(address) + + if server_description.me and server_description.address != server_description.me: + sds.pop(server_description.address) + + return topology_type, replica_set_name + + +def _check_has_primary(sds: Mapping[_Address, ServerDescription]) -> int: + """Current topology type is ReplicaSetWithPrimary. Is primary still known? + + Pass in a dict of ServerDescriptions. + + Returns new topology type. + """ + for s in sds.values(): + if s.server_type == SERVER_TYPE.RSPrimary: + return TOPOLOGY_TYPE.ReplicaSetWithPrimary + else: # noqa: PLW0120 + return TOPOLOGY_TYPE.ReplicaSetNoPrimary diff --git a/pymongo/typings.py b/pymongo/synchronous/typings.py similarity index 95% rename from pymongo/typings.py rename to pymongo/synchronous/typings.py index 174a0e3614..bc3fb0938f 100644 --- a/pymongo/typings.py +++ b/pymongo/synchronous/typings.py @@ -29,8 +29,9 @@ from bson.typings import _DocumentOut, _DocumentType, _DocumentTypeArg if TYPE_CHECKING: - from pymongo.collation import Collation + from pymongo.synchronous.collation import Collation +_IS_SYNC = True # Common Shared Types. _Address = Tuple[str, Optional[int]] diff --git a/pymongo/synchronous/uri_parser.py b/pymongo/synchronous/uri_parser.py new file mode 100644 index 0000000000..8e37bdc696 --- /dev/null +++ b/pymongo/synchronous/uri_parser.py @@ -0,0 +1,624 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +import re +import sys +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sized, + Union, + cast, +) +from urllib.parse import unquote_plus + +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.synchronous.client_options import _parse_ssl_options +from pymongo.synchronous.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + SRV_SERVICE_NAME, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) +from pymongo.synchronous.srv_resolver import _have_dnspython, _SrvResolver +from pymongo.synchronous.typings import _Address + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext + +_IS_SYNC = True +SCHEME = "mongodb://" +SCHEME_LEN = len(SCHEME) +SRV_SCHEME = "mongodb+srv://" +SRV_SCHEME_LEN = len(SRV_SCHEME) +DEFAULT_PORT = 27017 + + +def _unquoted_percent(s: str) -> bool: + """Check for unescaped percent signs. + + :param s: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == "%": + sub = s[i : i + 3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote_plus(sub) == sub: + return True + return False + + +def parse_userinfo(userinfo: str) -> tuple[str, str]: + """Validates the format of user information in a MongoDB URI. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. + + Returns a 2-tuple containing the unescaped username followed + by the unescaped password. + + :param userinfo: A string of the form : + """ + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + + user, _, passwd = userinfo.partition(":") + # No password is expected with GSSAPI authentication. + if not user: + raise InvalidURI("The empty string is not valid username.") + + return unquote_plus(user), unquote_plus(passwd) + + +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> tuple[str, Optional[Union[str, int]]]: + """Validates an IPv6 literal host:port string. + + Returns a 2-tuple of IPv6 literal followed by port where + port is default_port if it wasn't specified in entity. + + :param entity: A string that represents an IPv6 literal enclosed + in braces (e.g. '[::1]' or '[::1]:27017'). + :param default_port: The port number to use when one wasn't + specified in entity. + """ + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." + ) + i = entity.find("]:") + if i == -1: + return entity[1:-1], default_port + return entity[1:i], entity[i + 2 :] + + +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: + """Validates a host string + + Returns a 2-tuple of host followed by port where port is default_port + if it wasn't specified in the string. + + :param entity: A host or host:port string where host could be a + hostname or IP address. + :param default_port: The port number to use when one wasn't + specified in entity. + """ + host = entity + port: Optional[Union[str, int]] = default_port + if entity[0] == "[": + host, port = parse_ipv6_literal_host(entity, default_port) + elif entity.endswith(".sock"): + return entity, default_port + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) + if isinstance(port, str): + if not port.isdigit() or int(port) > 65535 or int(port) <= 0: + raise ValueError(f"Port must be an integer between 0 and 65535: {port!r}") + port = int(port) + + # Normalize hostname to lowercase, since DNS is case-insensitive: + # http://tools.ietf.org/html/rfc4343 + # This prevents useless rediscovery if "foo.com" is in the seed list but + # "FOO.com" is in the hello response. + return host.lower(), port + + +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", +} + + +def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: + """Helper method for split_options which creates the options dict. + Also handles the creation of a list for the URI tag_sets/ + readpreferencetags portion, and the use of a unicode options string. + """ + options = _CaseInsensitiveDictionary() + for uriopt in opts.split(delim): + key, value = uriopt.split("=") + if key.lower() == "readpreferencetags": + options.setdefault(key, []).append(value) + else: + if key in options: + warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) + if key.lower() == "authmechanismproperties": + val = value + else: + val = unquote_plus(value) + options[key] = val + + return options + + +def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Raise appropriate errors when conflicting TLS options are present in + the options dictionary. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Implicitly defined options must not be explicitly specified. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + if opt in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") + if tlsallowinvalidcerts is not None: + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) + if tlsallowinvalidcerts is True: + options["tlsdisableocspendpointcheck"] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = options.get("tlscrlfile") + if tlscrlfile is not None: + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): + if options.get(opt) is True: + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." + raise InvalidURI(err_msg % (opt,)) + + if "ssl" in options and "tls" in options: + + def truth_value(val: Any) -> Any: + if val in ("true", "false"): + return val == "true" + if isinstance(val, bool): + return val + return val + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) + + return options + + +def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Issue appropriate warnings when deprecated options are present in the + options dictionary. Removes deprecated option key, value pairs if the + options dictionary is found to also have the renamed option. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + for optname in list(options): + if optname in URI_OPTIONS_DEPRECATION_MAP: + mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] + if mode == "renamed": + newoptname = message + if newoptname in options: + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." + warnings.warn( + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) + options.pop(optname) + continue + warn_msg = "Option '%s' is deprecated, use '%s' instead." + warnings.warn( + warn_msg % (options.cased_key(optname), newoptname), + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": + warn_msg = "Option '%s' is deprecated. %s." + warnings.warn( + warn_msg % (options.cased_key(optname), message), + DeprecationWarning, + stacklevel=2, + ) + + return options + + +def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Normalizes option names in the options dictionary by converting them to + their internally-used names. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Expand the tlsInsecure option. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure + + for optname in list(options): + intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) + if intname is not None: + options[intname] = options.pop(optname) + + return options + + +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: + """Validates and normalizes options passed in a MongoDB URI. + + Returns a new dictionary of validated and normalized options. If warn is + False then errors will be thrown for invalid options, otherwise they will + be ignored and a warning will be issued. + + :param opts: A dict of MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and + invalid options will be ignored. Otherwise invalid options will + cause errors. + """ + return get_validated_options(opts, warn) + + +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: + """Takes the options portion of a MongoDB URI, validates each option + and returns the options in a dictionary. + + :param opt: A string representing MongoDB URI options. + :param validate: If ``True`` (the default), validate and normalize all + options. + :param warn: If ``False`` (the default), suppress all warnings raised + during validation of options. + :param normalize: If ``True`` (the default), renames all options to their + internally-used names. + """ + and_idx = opts.find("&") + semi_idx = opts.find(";") + try: + if and_idx >= 0 and semi_idx >= 0: + raise InvalidURI("Can not mix '&' and ';' for option separators.") + elif and_idx >= 0: + options = _parse_options(opts, "&") + elif semi_idx >= 0: + options = _parse_options(opts, ";") + elif opts.find("=") != -1: + options = _parse_options(opts, None) + else: + raise ValueError + except ValueError: + raise InvalidURI("MongoDB URI options are key=value pairs.") from None + + options = _handle_security_options(options) + + options = _handle_option_deprecations(options) + + if normalize: + options = _normalize_options(options) + + if validate: + options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") + + return options + + +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: + """Takes a string of the form host1[:port],host2[:port]... and + splits it into (host, port) tuples. If [:port] isn't present the + default_port is used. + + Returns a set of 2-tuples containing the host name (or IP) followed by + port number. + + :param hosts: A string of the form host1[:port],host2[:port],... + :param default_port: The port number to use when one wasn't specified + for a host. + """ + nodes = [] + for entity in hosts.split(","): + if not entity: + raise ConfigurationError("Empty host (or extra comma in host list).") + port = default_port + # Unix socket entities don't have ports + if entity.endswith(".sock"): + port = None + nodes.append(parse_host(entity, port)) + return nodes + + +# Prohibited characters in database name. DB names also can't have ".", but for +# backward-compat we allow "db.collection" in URI. +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") + +_ALLOWED_TXT_OPTS = frozenset( + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) + + +def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") + + if options.get("loadbalanced"): + if len(nodes) > 1: + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") + + +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + elif uri.startswith(SRV_SCHEME): + if not _have_dnspython(): + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) + ) + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + else: + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") + + if not scheme_free: + raise InvalidURI("Must provide at least one hostname or IP.") + + user = None + passwd = None + dbase = None + collection = None + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, dbase = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if dbase: + dbase = unquote_plus(dbase) + if "." in dbase: + dbase, collection = dbase.split(".", 1) + if _BAD_DB_CHARS.search(dbase): + raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") + user, passwd = parse_userinfo(userinfo) + else: + hosts = host_part + + if "/" in hosts: + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) + + hosts = unquote_plus(hosts) + fqdn = None + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + if options.get("directConnection"): + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") + nodes = split_hosts(hosts, default_port=None) + if len(nodes) != 1: + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") + fqdn, port = nodes[0] + if port is not None: + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = dns_resolver.get_hosts() + dns_options = dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError( + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" + ) + elif not is_srv and srv_max_hosts: + raise ConfigurationError( + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" + ) + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, + } + + +def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError("kms_tls_options must be a dict") + contexts = {} + for provider, options in kms_tls_options.items(): + if not isinstance(options, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + options.setdefault("tls", True) + opts = _CaseInsensitiveDictionary(options) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + if ssl_context is None: + raise ConfigurationError("TLS is required for KMS providers") + if allow_invalid_hostnames: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableCertificateRevocationCheck", + ]: + if n in opts: + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") + contexts[provider] = ssl_context + return contexts + + +if __name__ == "__main__": + import pprint + + try: + pprint.pprint(parse_uri(sys.argv[1])) # noqa: T203 + except InvalidURI as exc: + print(exc) # noqa: T201 + sys.exit(0) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index cc2330cbab..201d9b390d 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -1,676 +1,21 @@ -# Copyright 2014-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Represent a deployment of MongoDB servers.""" +"""Re-import of synchronous TopologyDescription API for compatibility.""" from __future__ import annotations -from random import sample -from typing import ( - Any, - Callable, - List, - Mapping, - MutableMapping, - NamedTuple, - Optional, - cast, -) +from pymongo.synchronous.topology_description import * # noqa: F403 +from pymongo.synchronous.topology_description import __doc__ as original_doc -from bson.min_key import MinKey -from bson.objectid import ObjectId -from pymongo import common -from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import Selection -from pymongo.server_type import SERVER_TYPE -from pymongo.typings import _Address - - -# Enumeration for various kinds of MongoDB cluster topologies. -class _TopologyType(NamedTuple): - Single: int - ReplicaSetNoPrimary: int - ReplicaSetWithPrimary: int - Sharded: int - Unknown: int - LoadBalanced: int - - -TOPOLOGY_TYPE = _TopologyType(*range(6)) - -# Topologies compatible with SRV record polling. -SRV_POLLING_TOPOLOGIES: tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) - - -_ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] - - -class TopologyDescription: - def __init__( - self, - topology_type: int, - server_descriptions: dict[_Address, ServerDescription], - replica_set_name: Optional[str], - max_set_version: Optional[int], - max_election_id: Optional[ObjectId], - topology_settings: Any, - ) -> None: - """Representation of a deployment of MongoDB servers. - - :param topology_type: initial type - :param server_descriptions: dict of (address, ServerDescription) for - all seeds - :param replica_set_name: replica set name or None - :param max_set_version: greatest setVersion seen from a primary, or None - :param max_election_id: greatest electionId seen from a primary, or None - :param topology_settings: a TopologySettings - """ - self._topology_type = topology_type - self._replica_set_name = replica_set_name - self._server_descriptions = server_descriptions - self._max_set_version = max_set_version - self._max_election_id = max_election_id - - # The heartbeat_frequency is used in staleness estimates. - self._topology_settings = topology_settings - - # Is PyMongo compatible with all servers' wire protocols? - self._incompatible_err = None - if self._topology_type != TOPOLOGY_TYPE.LoadBalanced: - self._init_incompatible_err() - - # Server Discovery And Monitoring Spec: Whenever a client updates the - # TopologyDescription from an hello response, it MUST set - # TopologyDescription.logicalSessionTimeoutMinutes to the smallest - # logicalSessionTimeoutMinutes value among ServerDescriptions of all - # data-bearing server types. If any have a null - # logicalSessionTimeoutMinutes, then - # TopologyDescription.logicalSessionTimeoutMinutes MUST be set to null. - readable_servers = self.readable_servers - if not readable_servers: - self._ls_timeout_minutes = None - elif any(s.logical_session_timeout_minutes is None for s in readable_servers): - self._ls_timeout_minutes = None - else: - self._ls_timeout_minutes = min( # type: ignore[type-var] - s.logical_session_timeout_minutes for s in readable_servers - ) - - def _init_incompatible_err(self) -> None: - """Internal compatibility check for non-load balanced topologies.""" - for s in self._server_descriptions.values(): - if not s.is_server_type_known: - continue - - # s.min/max_wire_version is the server's wire protocol. - # MIN/MAX_SUPPORTED_WIRE_VERSION is what PyMongo supports. - server_too_new = ( - # Server too new. - s.min_wire_version is not None - and s.min_wire_version > common.MAX_SUPPORTED_WIRE_VERSION - ) - - server_too_old = ( - # Server too old. - s.max_wire_version is not None - and s.max_wire_version < common.MIN_SUPPORTED_WIRE_VERSION - ) - - if server_too_new: - self._incompatible_err = ( - "Server at %s:%d requires wire version %d, but this " # type: ignore - "version of PyMongo only supports up to %d." - % ( - s.address[0], - s.address[1] or 0, - s.min_wire_version, - common.MAX_SUPPORTED_WIRE_VERSION, - ) - ) - - elif server_too_old: - self._incompatible_err = ( - "Server at %s:%d reports wire version %d, but this " # type: ignore - "version of PyMongo requires at least %d (MongoDB %s)." - % ( - s.address[0], - s.address[1] or 0, - s.max_wire_version, - common.MIN_SUPPORTED_WIRE_VERSION, - common.MIN_SUPPORTED_SERVER_VERSION, - ) - ) - - break - - def check_compatible(self) -> None: - """Raise ConfigurationError if any server is incompatible. - - A server is incompatible if its wire protocol version range does not - overlap with PyMongo's. - """ - if self._incompatible_err: - raise ConfigurationError(self._incompatible_err) - - def has_server(self, address: _Address) -> bool: - return address in self._server_descriptions - - def reset_server(self, address: _Address) -> TopologyDescription: - """A copy of this description, with one server marked Unknown.""" - unknown_sd = self._server_descriptions[address].to_unknown() - return updated_topology_description(self, unknown_sd) - - def reset(self) -> TopologyDescription: - """A copy of this description, with all servers marked Unknown.""" - if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: - topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary - else: - topology_type = self._topology_type - - # The default ServerDescription's type is Unknown. - sds = {address: ServerDescription(address) for address in self._server_descriptions} - - return TopologyDescription( - topology_type, - sds, - self._replica_set_name, - self._max_set_version, - self._max_election_id, - self._topology_settings, - ) - - def server_descriptions(self) -> dict[_Address, ServerDescription]: - """dict of (address, - :class:`~pymongo.server_description.ServerDescription`). - """ - return self._server_descriptions.copy() - - @property - def topology_type(self) -> int: - """The type of this topology.""" - return self._topology_type - - @property - def topology_type_name(self) -> str: - """The topology type as a human readable string. - - .. versionadded:: 3.4 - """ - return TOPOLOGY_TYPE._fields[self._topology_type] - - @property - def replica_set_name(self) -> Optional[str]: - """The replica set name.""" - return self._replica_set_name - - @property - def max_set_version(self) -> Optional[int]: - """Greatest setVersion seen from a primary, or None.""" - return self._max_set_version - - @property - def max_election_id(self) -> Optional[ObjectId]: - """Greatest electionId seen from a primary, or None.""" - return self._max_election_id - - @property - def logical_session_timeout_minutes(self) -> Optional[int]: - """Minimum logical session timeout, or None.""" - return self._ls_timeout_minutes - - @property - def known_servers(self) -> list[ServerDescription]: - """List of Servers of types besides Unknown.""" - return [s for s in self._server_descriptions.values() if s.is_server_type_known] - - @property - def has_known_servers(self) -> bool: - """Whether there are any Servers of types besides Unknown.""" - return any(s for s in self._server_descriptions.values() if s.is_server_type_known) - - @property - def readable_servers(self) -> list[ServerDescription]: - """List of readable Servers.""" - return [s for s in self._server_descriptions.values() if s.is_readable] - - @property - def common_wire_version(self) -> Optional[int]: - """Minimum of all servers' max wire versions, or None.""" - servers = self.known_servers - if servers: - return min(s.max_wire_version for s in self.known_servers) - - return None - - @property - def heartbeat_frequency(self) -> int: - return self._topology_settings.heartbeat_frequency - - @property - def srv_max_hosts(self) -> int: - return self._topology_settings._srv_max_hosts - - def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: - if not selection: - return [] - round_trip_times: list[float] = [] - for server in selection.server_descriptions: - if server.round_trip_time is None: - config_err_msg = f"round_trip_time for server {server.address} is unexpectedly None: {self}, servers: {selection.server_descriptions}" - raise ConfigurationError(config_err_msg) - round_trip_times.append(server.round_trip_time) - # Round trip time in seconds. - fastest = min(round_trip_times) - threshold = self._topology_settings.local_threshold_ms / 1000.0 - return [ - s - for s in selection.server_descriptions - if (cast(float, s.round_trip_time) - fastest) <= threshold - ] - - def apply_selector( - self, - selector: Any, - address: Optional[_Address] = None, - custom_selector: Optional[_ServerSelector] = None, - ) -> list[ServerDescription]: - """List of servers matching the provided selector(s). - - :param selector: a callable that takes a Selection as input and returns - a Selection as output. For example, an instance of a read - preference from :mod:`~pymongo.read_preferences`. - :param address: A server address to select. - :param custom_selector: A callable that augments server - selection rules. Accepts a list of - :class:`~pymongo.server_description.ServerDescription` objects and - return a list of server descriptions that should be considered - suitable for the desired operation. - - .. versionadded:: 3.4 - """ - if getattr(selector, "min_wire_version", 0): - common_wv = self.common_wire_version - if common_wv and common_wv < selector.min_wire_version: - raise ConfigurationError( - "%s requires min wire version %d, but topology's min" - " wire version is %d" % (selector, selector.min_wire_version, common_wv) - ) - - if isinstance(selector, _AggWritePref): - selector.selection_hook(self) - - if self.topology_type == TOPOLOGY_TYPE.Unknown: - return [] - elif self.topology_type in (TOPOLOGY_TYPE.Single, TOPOLOGY_TYPE.LoadBalanced): - # Ignore selectors for standalone and load balancer mode. - return self.known_servers - if address: - # Ignore selectors when explicit address is requested. - description = self.server_descriptions().get(address) - return [description] if description else [] - - selection = Selection.from_topology_description(self) - # Ignore read preference for sharded clusters. - if self.topology_type != TOPOLOGY_TYPE.Sharded: - selection = selector(selection) - - # Apply custom selector followed by localThresholdMS. - if custom_selector is not None and selection: - selection = selection.with_server_descriptions( - custom_selector(selection.server_descriptions) - ) - return self._apply_local_threshold(selection) - - def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIMARY) -> bool: - """Does this topology have any readable servers available matching the - given read preference? - - :param read_preference: an instance of a read preference from - :mod:`~pymongo.read_preferences`. Defaults to - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - - .. note:: When connected directly to a single server this method - always returns ``True``. - - .. versionadded:: 3.4 - """ - common.validate_read_preference("read_preference", read_preference) - return any(self.apply_selector(read_preference)) - - def has_writable_server(self) -> bool: - """Does this topology have a writable server available? - - .. note:: When connected directly to a single server this method - always returns ``True``. - - .. versionadded:: 3.4 - """ - return self.has_readable_server(ReadPreference.PRIMARY) - - def __repr__(self) -> str: - # Sort the servers by address. - servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) - return "<{} id: {}, topology_type: {}, servers: {!r}>".format( - self.__class__.__name__, - self._topology_settings._topology_id, - self.topology_type_name, - servers, - ) - - -# If topology type is Unknown and we receive a hello response, what should -# the new topology type be? -_SERVER_TYPE_TO_TOPOLOGY_TYPE = { - SERVER_TYPE.Mongos: TOPOLOGY_TYPE.Sharded, - SERVER_TYPE.RSPrimary: TOPOLOGY_TYPE.ReplicaSetWithPrimary, - SERVER_TYPE.RSSecondary: TOPOLOGY_TYPE.ReplicaSetNoPrimary, - SERVER_TYPE.RSArbiter: TOPOLOGY_TYPE.ReplicaSetNoPrimary, - SERVER_TYPE.RSOther: TOPOLOGY_TYPE.ReplicaSetNoPrimary, - # Note: SERVER_TYPE.LoadBalancer and Unknown are intentionally left out. -} - - -def updated_topology_description( - topology_description: TopologyDescription, server_description: ServerDescription -) -> TopologyDescription: - """Return an updated copy of a TopologyDescription. - - :param topology_description: the current TopologyDescription - :param server_description: a new ServerDescription that resulted from - a hello call - - Called after attempting (successfully or not) to call hello on the - server at server_description.address. Does not modify topology_description. - """ - address = server_description.address - - # These values will be updated, if necessary, to form the new - # TopologyDescription. - topology_type = topology_description.topology_type - set_name = topology_description.replica_set_name - max_set_version = topology_description.max_set_version - max_election_id = topology_description.max_election_id - server_type = server_description.server_type - - # Don't mutate the original dict of server descriptions; copy it. - sds = topology_description.server_descriptions() - - # Replace this server's description with the new one. - sds[address] = server_description - - if topology_type == TOPOLOGY_TYPE.Single: - # Set server type to Unknown if replica set name does not match. - if set_name is not None and set_name != server_description.replica_set_name: - error = ConfigurationError( - "client is configured to connect to a replica set named " - "'{}' but this node belongs to a set named '{}'".format( - set_name, server_description.replica_set_name - ) - ) - sds[address] = server_description.to_unknown(error=error) - # Single type never changes. - return TopologyDescription( - TOPOLOGY_TYPE.Single, - sds, - set_name, - max_set_version, - max_election_id, - topology_description._topology_settings, - ) - - if topology_type == TOPOLOGY_TYPE.Unknown: - if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.LoadBalancer): - if len(topology_description._topology_settings.seeds) == 1: - topology_type = TOPOLOGY_TYPE.Single - else: - # Remove standalone from Topology when given multiple seeds. - sds.pop(address) - elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost): - topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type] - - if topology_type == TOPOLOGY_TYPE.Sharded: - if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown): - sds.pop(address) - - elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary: - if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): - sds.pop(address) - - elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( - sds, set_name, server_description, max_set_version, max_election_id - ) - - elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): - topology_type, set_name = _update_rs_no_primary_from_member( - sds, set_name, server_description - ) - - elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: - if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos): - sds.pop(address) - topology_type = _check_has_primary(sds) - - elif server_type == SERVER_TYPE.RSPrimary: - (topology_type, set_name, max_set_version, max_election_id) = _update_rs_from_primary( - sds, set_name, server_description, max_set_version, max_election_id - ) - - elif server_type in (SERVER_TYPE.RSSecondary, SERVER_TYPE.RSArbiter, SERVER_TYPE.RSOther): - topology_type = _update_rs_with_primary_from_member(sds, set_name, server_description) - - else: - # Server type is Unknown or RSGhost: did we just lose the primary? - topology_type = _check_has_primary(sds) - - # Return updated copy. - return TopologyDescription( - topology_type, - sds, - set_name, - max_set_version, - max_election_id, - topology_description._topology_settings, - ) - - -def _updated_topology_description_srv_polling( - topology_description: TopologyDescription, seedlist: list[tuple[str, Any]] -) -> TopologyDescription: - """Return an updated copy of a TopologyDescription. - - :param topology_description: the current TopologyDescription - :param seedlist: a list of new seeds new ServerDescription that resulted from - a hello call - """ - assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES - # Create a copy of the server descriptions. - sds = topology_description.server_descriptions() - - # If seeds haven't changed, don't do anything. - if set(sds.keys()) == set(seedlist): - return topology_description - - # Remove SDs corresponding to servers no longer part of the SRV record. - for address in list(sds.keys()): - if address not in seedlist: - sds.pop(address) - - if topology_description.srv_max_hosts != 0: - new_hosts = set(seedlist) - set(sds.keys()) - n_to_add = topology_description.srv_max_hosts - len(sds) - if n_to_add > 0: - seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) - else: - seedlist = [] - # Add SDs corresponding to servers recently added to the SRV record. - for address in seedlist: - if address not in sds: - sds[address] = ServerDescription(address) - return TopologyDescription( - topology_description.topology_type, - sds, - topology_description.replica_set_name, - topology_description.max_set_version, - topology_description.max_election_id, - topology_description._topology_settings, - ) - - -def _update_rs_from_primary( - sds: MutableMapping[_Address, ServerDescription], - replica_set_name: Optional[str], - server_description: ServerDescription, - max_set_version: Optional[int], - max_election_id: Optional[ObjectId], -) -> tuple[int, Optional[str], Optional[int], Optional[ObjectId]]: - """Update topology description from a primary's hello response. - - Pass in a dict of ServerDescriptions, current replica set name, the - ServerDescription we are processing, and the TopologyDescription's - max_set_version and max_election_id if any. - - Returns (new topology type, new replica_set_name, new max_set_version, - new max_election_id). - """ - if replica_set_name is None: - replica_set_name = server_description.replica_set_name - - elif replica_set_name != server_description.replica_set_name: - # We found a primary but it doesn't have the replica_set_name - # provided by the user. - sds.pop(server_description.address) - return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id - - if server_description.max_wire_version is None or server_description.max_wire_version < 17: - new_election_tuple: tuple = (server_description.set_version, server_description.election_id) - max_election_tuple: tuple = (max_set_version, max_election_id) - if None not in new_election_tuple: - if None not in max_election_tuple and new_election_tuple < max_election_tuple: - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id - max_election_id = server_description.election_id - - if server_description.set_version is not None and ( - max_set_version is None or server_description.set_version > max_set_version - ): - max_set_version = server_description.set_version - else: - new_election_tuple = server_description.election_id, server_description.set_version - max_election_tuple = max_election_id, max_set_version - new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) - max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) - if new_election_safe < max_election_safe: - # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id - else: - max_election_id = server_description.election_id - max_set_version = server_description.set_version - - # We've heard from the primary. Is it the same primary as before? - for server in sds.values(): - if ( - server.server_type is SERVER_TYPE.RSPrimary - and server.address != server_description.address - ): - # Reset old primary's type to Unknown. - sds[server.address] = server.to_unknown() - - # There can be only one prior primary. - break - - # Discover new hosts from this primary's response. - for new_address in server_description.all_hosts: - if new_address not in sds: - sds[new_address] = ServerDescription(new_address) - - # Remove hosts not in the response. - for addr in set(sds) - server_description.all_hosts: - sds.pop(addr) - - # If the host list differs from the seed list, we may not have a primary - # after all. - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - -def _update_rs_with_primary_from_member( - sds: MutableMapping[_Address, ServerDescription], - replica_set_name: Optional[str], - server_description: ServerDescription, -) -> int: - """RS with known primary. Process a response from a non-primary. - - Pass in a dict of ServerDescriptions, current replica set name, and the - ServerDescription we are processing. - - Returns new topology type. - """ - assert replica_set_name is not None - - if replica_set_name != server_description.replica_set_name: - sds.pop(server_description.address) - elif server_description.me and server_description.address != server_description.me: - sds.pop(server_description.address) - - # Had this member been the primary? - return _check_has_primary(sds) - - -def _update_rs_no_primary_from_member( - sds: MutableMapping[_Address, ServerDescription], - replica_set_name: Optional[str], - server_description: ServerDescription, -) -> tuple[int, Optional[str]]: - """RS without known primary. Update from a non-primary's response. - - Pass in a dict of ServerDescriptions, current replica set name, and the - ServerDescription we are processing. - - Returns (new topology type, new replica_set_name). - """ - topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary - if replica_set_name is None: - replica_set_name = server_description.replica_set_name - - elif replica_set_name != server_description.replica_set_name: - sds.pop(server_description.address) - return topology_type, replica_set_name - - # This isn't the primary's response, so don't remove any servers - # it doesn't report. Only add new servers. - for address in server_description.all_hosts: - if address not in sds: - sds[address] = ServerDescription(address) - - if server_description.me and server_description.address != server_description.me: - sds.pop(server_description.address) - - return topology_type, replica_set_name - - -def _check_has_primary(sds: Mapping[_Address, ServerDescription]) -> int: - """Current topology type is ReplicaSetWithPrimary. Is primary still known? - - Pass in a dict of ServerDescriptions. - - Returns new topology type. - """ - for s in sds.values(): - if s.server_type == SERVER_TYPE.RSPrimary: - return TOPOLOGY_TYPE.ReplicaSetWithPrimary - else: # noqa: PLW0120 - return TOPOLOGY_TYPE.ReplicaSetNoPrimary +__doc__ = original_doc diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 4ebd3008c3..e74ef18831 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -1,623 +1,21 @@ -# Copyright 2011-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. - -"""Tools to parse and validate a MongoDB URI.""" +"""Re-import of synchronous URIParser API for compatibility.""" from __future__ import annotations -import re -import sys -import warnings -from typing import ( - TYPE_CHECKING, - Any, - Mapping, - MutableMapping, - Optional, - Sized, - Union, - cast, -) -from urllib.parse import unquote_plus - -from pymongo.client_options import _parse_ssl_options -from pymongo.common import ( - INTERNAL_URI_OPTION_NAME_MAP, - SRV_SERVICE_NAME, - URI_OPTIONS_DEPRECATION_MAP, - _CaseInsensitiveDictionary, - get_validated_options, -) -from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.srv_resolver import _have_dnspython, _SrvResolver -from pymongo.typings import _Address - -if TYPE_CHECKING: - from pymongo.pyopenssl_context import SSLContext - -SCHEME = "mongodb://" -SCHEME_LEN = len(SCHEME) -SRV_SCHEME = "mongodb+srv://" -SRV_SCHEME_LEN = len(SRV_SCHEME) -DEFAULT_PORT = 27017 - - -def _unquoted_percent(s: str) -> bool: - """Check for unescaped percent signs. - - :param s: A string. `s` can have things like '%25', '%2525', - and '%E2%85%A8' but cannot have unquoted percent like '%foo'. - """ - for i in range(len(s)): - if s[i] == "%": - sub = s[i : i + 3] - # If unquoting yields the same string this means there was an - # unquoted %. - if unquote_plus(sub) == sub: - return True - return False - - -def parse_userinfo(userinfo: str) -> tuple[str, str]: - """Validates the format of user information in a MongoDB URI. - Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", - "]", "@") as per RFC 3986 must be escaped. - - Returns a 2-tuple containing the unescaped username followed - by the unescaped password. - - :param userinfo: A string of the form : - """ - if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): - raise InvalidURI( - "Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus" - ) - - user, _, passwd = userinfo.partition(":") - # No password is expected with GSSAPI authentication. - if not user: - raise InvalidURI("The empty string is not valid username.") - - return unquote_plus(user), unquote_plus(passwd) - - -def parse_ipv6_literal_host( - entity: str, default_port: Optional[int] -) -> tuple[str, Optional[Union[str, int]]]: - """Validates an IPv6 literal host:port string. - - Returns a 2-tuple of IPv6 literal followed by port where - port is default_port if it wasn't specified in entity. - - :param entity: A string that represents an IPv6 literal enclosed - in braces (e.g. '[::1]' or '[::1]:27017'). - :param default_port: The port number to use when one wasn't - specified in entity. - """ - if entity.find("]") == -1: - raise ValueError( - "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." - ) - i = entity.find("]:") - if i == -1: - return entity[1:-1], default_port - return entity[1:i], entity[i + 2 :] - - -def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: - """Validates a host string - - Returns a 2-tuple of host followed by port where port is default_port - if it wasn't specified in the string. - - :param entity: A host or host:port string where host could be a - hostname or IP address. - :param default_port: The port number to use when one wasn't - specified in entity. - """ - host = entity - port: Optional[Union[str, int]] = default_port - if entity[0] == "[": - host, port = parse_ipv6_literal_host(entity, default_port) - elif entity.endswith(".sock"): - return entity, default_port - elif entity.find(":") != -1: - if entity.count(":") > 1: - raise ValueError( - "Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732." - ) - host, port = host.split(":", 1) - if isinstance(port, str): - if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError(f"Port must be an integer between 0 and 65535: {port!r}") - port = int(port) - - # Normalize hostname to lowercase, since DNS is case-insensitive: - # http://tools.ietf.org/html/rfc4343 - # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the hello response. - return host.lower(), port - - -# Options whose values are implicitly determined by tlsInsecure. -_IMPLICIT_TLSINSECURE_OPTS = { - "tlsallowinvalidcertificates", - "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck", -} - - -def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: - """Helper method for split_options which creates the options dict. - Also handles the creation of a list for the URI tag_sets/ - readpreferencetags portion, and the use of a unicode options string. - """ - options = _CaseInsensitiveDictionary() - for uriopt in opts.split(delim): - key, value = uriopt.split("=") - if key.lower() == "readpreferencetags": - options.setdefault(key, []).append(value) - else: - if key in options: - warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) - if key.lower() == "authmechanismproperties": - val = value - else: - val = unquote_plus(value) - options[key] = val - - return options - - -def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: - """Raise appropriate errors when conflicting TLS options are present in - the options dictionary. - - :param options: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - # Implicitly defined options must not be explicitly specified. - tlsinsecure = options.get("tlsinsecure") - if tlsinsecure is not None: - for opt in _IMPLICIT_TLSINSECURE_OPTS: - if opt in options: - err_msg = "URI options %s and %s cannot be specified simultaneously." - raise InvalidURI( - err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) - ) - - # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. - tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") - if tlsallowinvalidcerts is not None: - if "tlsdisableocspendpointcheck" in options: - err_msg = "URI options %s and %s cannot be specified simultaneously." - raise InvalidURI( - err_msg - % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) - ) - if tlsallowinvalidcerts is True: - options["tlsdisableocspendpointcheck"] = True - - # Handle co-occurence of CRL and OCSP-related options. - tlscrlfile = options.get("tlscrlfile") - if tlscrlfile is not None: - for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): - if options.get(opt) is True: - err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." - raise InvalidURI(err_msg % (opt,)) - - if "ssl" in options and "tls" in options: - - def truth_value(val: Any) -> Any: - if val in ("true", "false"): - return val == "true" - if isinstance(val, bool): - return val - return val - - if truth_value(options.get("ssl")) != truth_value(options.get("tls")): - err_msg = "Can not specify conflicting values for URI options %s and %s." - raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) - - return options - - -def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: - """Issue appropriate warnings when deprecated options are present in the - options dictionary. Removes deprecated option key, value pairs if the - options dictionary is found to also have the renamed option. - - :param options: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - for optname in list(options): - if optname in URI_OPTIONS_DEPRECATION_MAP: - mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] - if mode == "renamed": - newoptname = message - if newoptname in options: - warn_msg = "Deprecated option '%s' ignored in favor of '%s'." - warnings.warn( - warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), - DeprecationWarning, - stacklevel=2, - ) - options.pop(optname) - continue - warn_msg = "Option '%s' is deprecated, use '%s' instead." - warnings.warn( - warn_msg % (options.cased_key(optname), newoptname), - DeprecationWarning, - stacklevel=2, - ) - elif mode == "removed": - warn_msg = "Option '%s' is deprecated. %s." - warnings.warn( - warn_msg % (options.cased_key(optname), message), - DeprecationWarning, - stacklevel=2, - ) - - return options - - -def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: - """Normalizes option names in the options dictionary by converting them to - their internally-used names. - - :param options: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - # Expand the tlsInsecure option. - tlsinsecure = options.get("tlsinsecure") - if tlsinsecure is not None: - for opt in _IMPLICIT_TLSINSECURE_OPTS: - # Implicit options are logically the same as tlsInsecure. - options[opt] = tlsinsecure - - for optname in list(options): - intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) - if intname is not None: - options[intname] = options.pop(optname) - - return options - - -def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: - """Validates and normalizes options passed in a MongoDB URI. - - Returns a new dictionary of validated and normalized options. If warn is - False then errors will be thrown for invalid options, otherwise they will - be ignored and a warning will be issued. - - :param opts: A dict of MongoDB URI options. - :param warn: If ``True`` then warnings will be logged and - invalid options will be ignored. Otherwise invalid options will - cause errors. - """ - return get_validated_options(opts, warn) - - -def split_options( - opts: str, validate: bool = True, warn: bool = False, normalize: bool = True -) -> MutableMapping[str, Any]: - """Takes the options portion of a MongoDB URI, validates each option - and returns the options in a dictionary. - - :param opt: A string representing MongoDB URI options. - :param validate: If ``True`` (the default), validate and normalize all - options. - :param warn: If ``False`` (the default), suppress all warnings raised - during validation of options. - :param normalize: If ``True`` (the default), renames all options to their - internally-used names. - """ - and_idx = opts.find("&") - semi_idx = opts.find(";") - try: - if and_idx >= 0 and semi_idx >= 0: - raise InvalidURI("Can not mix '&' and ';' for option separators.") - elif and_idx >= 0: - options = _parse_options(opts, "&") - elif semi_idx >= 0: - options = _parse_options(opts, ";") - elif opts.find("=") != -1: - options = _parse_options(opts, None) - else: - raise ValueError - except ValueError: - raise InvalidURI("MongoDB URI options are key=value pairs.") from None - - options = _handle_security_options(options) - - options = _handle_option_deprecations(options) - - if normalize: - options = _normalize_options(options) - - if validate: - options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) - if options.get("authsource") == "": - raise InvalidURI("the authSource database cannot be an empty string") - - return options - - -def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: - """Takes a string of the form host1[:port],host2[:port]... and - splits it into (host, port) tuples. If [:port] isn't present the - default_port is used. - - Returns a set of 2-tuples containing the host name (or IP) followed by - port number. - - :param hosts: A string of the form host1[:port],host2[:port],... - :param default_port: The port number to use when one wasn't specified - for a host. - """ - nodes = [] - for entity in hosts.split(","): - if not entity: - raise ConfigurationError("Empty host (or extra comma in host list).") - port = default_port - # Unix socket entities don't have ports - if entity.endswith(".sock"): - port = None - nodes.append(parse_host(entity, port)) - return nodes - - -# Prohibited characters in database name. DB names also can't have ".", but for -# backward-compat we allow "db.collection" in URI. -_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") - -_ALLOWED_TXT_OPTS = frozenset( - ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] -) - - -def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: - # Ensure directConnection was not True if there are multiple seeds. - if len(nodes) > 1 and options.get("directconnection"): - raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") - - if options.get("loadbalanced"): - if len(nodes) > 1: - raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") - if options.get("directconnection"): - raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") - if options.get("replicaset"): - raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") - - -def parse_uri( - uri: str, - default_port: Optional[int] = DEFAULT_PORT, - validate: bool = True, - warn: bool = False, - normalize: bool = True, - connect_timeout: Optional[float] = None, - srv_service_name: Optional[str] = None, - srv_max_hosts: Optional[int] = None, -) -> dict[str, Any]: - """Parse and validate a MongoDB URI. - - Returns a dict of the form:: - - { - 'nodelist': , - 'username': or None, - 'password': or None, - 'database': or None, - 'collection': or None, - 'options': , - 'fqdn': or None - } - - If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done - to build nodelist and options. - - :param uri: The MongoDB URI to parse. - :param default_port: The port number to use when one wasn't specified - for a host in the URI. - :param validate: If ``True`` (the default), validate and - normalize all options. Default: ``True``. - :param warn: When validating, if ``True`` then will warn - the user then ignore any invalid options or values. If ``False``, - validation will error when options are unsupported or values are - invalid. Default: ``False``. - :param normalize: If ``True``, convert names of URI options - to their internally-used names. Default: ``True``. - :param connect_timeout: The maximum time in milliseconds to - wait for a response from the DNS server. - :param srv_service_name: A custom SRV service name - - .. versionchanged:: 4.6 - The delimiting slash (``/``) between hosts and connection options is now optional. - For example, "mongodb://example.com?tls=true" is now a valid URI. - - .. versionchanged:: 4.0 - To better follow RFC 3986, unquoted percent signs ("%") are no longer - supported. - - .. versionchanged:: 3.9 - Added the ``normalize`` parameter. - - .. versionchanged:: 3.6 - Added support for mongodb+srv:// URIs. - - .. versionchanged:: 3.5 - Return the original value of the ``readPreference`` MongoDB URI option - instead of the validated read preference mode. - - .. versionchanged:: 3.1 - ``warn`` added so invalid options can be ignored. - """ - if uri.startswith(SCHEME): - is_srv = False - scheme_free = uri[SCHEME_LEN:] - elif uri.startswith(SRV_SCHEME): - if not _have_dnspython(): - python_path = sys.executable or "python" - raise ConfigurationError( - 'The "dnspython" module must be ' - "installed to use mongodb+srv:// URIs. " - "To fix this error install pymongo again:\n " - "%s -m pip install pymongo>=4.3" % (python_path) - ) - is_srv = True - scheme_free = uri[SRV_SCHEME_LEN:] - else: - raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") - - if not scheme_free: - raise InvalidURI("Must provide at least one hostname or IP.") - - user = None - passwd = None - dbase = None - collection = None - options = _CaseInsensitiveDictionary() - - host_plus_db_part, _, opts = scheme_free.partition("?") - if "/" in host_plus_db_part: - host_part, _, dbase = host_plus_db_part.partition("/") - else: - host_part = host_plus_db_part - - if dbase: - dbase = unquote_plus(dbase) - if "." in dbase: - dbase, collection = dbase.split(".", 1) - if _BAD_DB_CHARS.search(dbase): - raise InvalidURI('Bad database name "%s"' % dbase) - else: - dbase = None - - if opts: - options.update(split_options(opts, validate, warn, normalize)) - if srv_service_name is None: - srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) - if "@" in host_part: - userinfo, _, hosts = host_part.rpartition("@") - user, passwd = parse_userinfo(userinfo) - else: - hosts = host_part - - if "/" in hosts: - raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) - - hosts = unquote_plus(hosts) - fqdn = None - srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") - if is_srv: - if options.get("directConnection"): - raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") - nodes = split_hosts(hosts, default_port=None) - if len(nodes) != 1: - raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") - fqdn, port = nodes[0] - if port is not None: - raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") - - # Use the connection timeout. connectTimeoutMS passed as a keyword - # argument overrides the same option passed in the connection string. - connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) - nodes = dns_resolver.get_hosts() - dns_options = dns_resolver.get_options() - if dns_options: - parsed_dns_options = split_options(dns_options, validate, warn, normalize) - if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: - raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are supported from DNS" - ) - for opt, val in parsed_dns_options.items(): - if opt not in options: - options[opt] = val - if options.get("loadBalanced") and srv_max_hosts: - raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") - if options.get("replicaSet") and srv_max_hosts: - raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") - if "tls" not in options and "ssl" not in options: - options["tls"] = True if validate else "true" - elif not is_srv and options.get("srvServiceName") is not None: - raise ConfigurationError( - "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" - ) - elif not is_srv and srv_max_hosts: - raise ConfigurationError( - "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" - ) - else: - nodes = split_hosts(hosts, default_port=default_port) - - _check_options(nodes, options) - - return { - "nodelist": nodes, - "username": user, - "password": passwd, - "database": dbase, - "collection": collection, - "options": options, - "fqdn": fqdn, - } - - -def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: - """Parse KMS TLS connection options.""" - if not kms_tls_options: - return {} - if not isinstance(kms_tls_options, dict): - raise TypeError("kms_tls_options must be a dict") - contexts = {} - for provider, options in kms_tls_options.items(): - if not isinstance(options, dict): - raise TypeError(f'kms_tls_options["{provider}"] must be a dict') - options.setdefault("tls", True) - opts = _CaseInsensitiveDictionary(options) - opts = _handle_security_options(opts) - opts = _normalize_options(opts) - opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) - ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) - if ssl_context is None: - raise ConfigurationError("TLS is required for KMS providers") - if allow_invalid_hostnames: - raise ConfigurationError("Insecure TLS options prohibited") - - for n in [ - "tlsInsecure", - "tlsAllowInvalidCertificates", - "tlsAllowInvalidHostnames", - "tlsDisableCertificateRevocationCheck", - ]: - if n in opts: - raise ConfigurationError(f"Insecure TLS options prohibited: {n}") - contexts[provider] = ssl_context - return contexts - - -if __name__ == "__main__": - import pprint +from pymongo.synchronous.uri_parser import * # noqa: F403 +from pymongo.synchronous.uri_parser import __doc__ as original_doc - try: - pprint.pprint(parse_uri(sys.argv[1])) # noqa: T203 - except InvalidURI as exc: - print(exc) # noqa: T201 - sys.exit(0) +__doc__ = original_doc diff --git a/pyproject.toml b/pyproject.toml index aebabbf344..1540432e50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" version = {attr = "pymongo._version.__version__"} [tool.setuptools.packages.find] -include = ["bson","gridfs", "pymongo"] +include = ["bson","gridfs", "gridfs.asynchronous", "gridfs.synchronous", "pymongo", "pymongo.asynchronous", "pymongo.synchronous"] [tool.setuptools.package-data] bson=["py.typed", "*.pyi"] @@ -99,6 +99,16 @@ disable_error_code = ["no-untyped-def", "no-untyped-call"] module = ["service_identity.*"] ignore_missing_imports = true +[[tool.mypy.overrides]] +module = ["pymongo.synchronous.*", "gridfs.synchronous.*"] +warn_unused_ignores = false +disable_error_code = ["unused-coroutine"] + +[[tool.mypy.overrides]] +module = ["pymongo.asynchronous.*"] +warn_unused_ignores = false + + [tool.ruff] target-version = "py37" line-length = 100 @@ -126,6 +136,7 @@ select = [ "UP", # pyupgrade "YTT", # flake8-2020 "EXE", # flake8-executable + "ASYNC", # flake8-async ] ignore = [ "PLR", # Design related pylint codes diff --git a/requirements/test.txt b/requirements/test.txt index 91e898f3cb..1facbf03b9 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1 +1,2 @@ pytest>=7 +pytest-asyncio diff --git a/test/__init__.py b/test/__init__.py index e1eba725b0..a78fab3ca1 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -45,14 +45,14 @@ import pymongo import pymongo.errors from bson.son import SON -from pymongo import common, message -from pymongo.common import partition_node -from pymongo.database import Database -from pymongo.hello import HelloCompat -from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] -from pymongo.uri_parser import parse_uri +from pymongo.synchronous import common, message +from pymongo.synchronous.common import partition_node +from pymongo.synchronous.database import Database +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.uri_parser import parse_uri if HAVE_SSL: import ssl @@ -1191,7 +1191,7 @@ def print_running_topology(topology): def print_running_clients(): - from pymongo.topology import Topology + from pymongo.synchronous.topology import Topology processed = set() # Avoid false positives on the main test client. diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py new file mode 100644 index 0000000000..d38065eb3f --- /dev/null +++ b/test/asynchronous/__init__.py @@ -0,0 +1,983 @@ +# Copyright 2010-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations + +import asyncio +import base64 +import gc +import multiprocessing +import os +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +import unittest +import warnings +from asyncio import iscoroutinefunction +from test import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TEST_SERVERLESS, + TLS_OPTIONS, + SystemCertsPatcher, + _all_users, + _create_user, + db_pwd, + db_user, + global_knobs, + host, + is_server_resolvable, + port, + print_running_clients, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from contextlib import asynccontextmanager, contextmanager +from functools import wraps +from test.version import Version +from typing import Any, Callable, Dict, Generator, no_type_check +from unittest import SkipTest +from urllib.parse import quote_plus + +import pymongo +import pymongo.errors +from bson.son import SON +from pymongo.asynchronous import common, message +from pymongo.asynchronous.common import partition_node +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.hello_compat import HelloCompat +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.server_api import ServerApi +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] + +if HAVE_SSL: + import ssl + +_IS_SYNC = False + + +class AsyncClientContext: + client: AsyncMongoClient + + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI + + def __init__(self): + """Create a client and grab essential information from the server.""" + self.connection_attempts = [] + self.connected = False + self.w = None + self.nodes = set() + self.replica_set_name = None + self.cmd_line = None + self.server_status = None + self.version = Version(-1) # Needs to be comparable with Version + self.auth_enabled = False + self.test_commands_enabled = False + self.server_parameters = {} + self._hello = None + self.is_mongos = False + self.mongoses = [] + self.is_rs = False + self.has_ipv6 = False + self.tls = False + self.tlsCertificateKeyFile = False + self.server_is_resolvable = is_server_resolvable() + self.default_client_options: Dict = {} + self.sessions_enabled = False + self.client = None # type: ignore + self.conn_lock = threading.Lock() + self.is_data_lake = False + self.load_balancer = TEST_LOADBALANCER + self.serverless = TEST_SERVERLESS + if self.load_balancer or self.serverless: + self.default_client_options["loadBalanced"] = True + if COMPRESSORS: + self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api + + @property + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = async_client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port + if async_client_context.auth_enabled: + opts["username"] = db_user + opts["password"] = db_pwd + if self.replica_set_name: + opts["replicaSet"] = self.replica_set_name + return opts + + @property + async def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = async_client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if async_client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + pair = await self.pair + return f"mongodb://{auth_part}{pair}/?{opts_part}" + + @property + async def hello(self): + if not self._hello: + if self.serverless or self.load_balancer: + self._hello = await self.client.admin.command(HelloCompat.CMD) + else: + self._hello = await self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello + + async def _connect(self, host, port, **kwargs): + kwargs.update(self.default_client_options) + client: AsyncMongoClient = pymongo.AsyncMongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) + try: + try: + await client.admin.command("ping") # Can we connect? + except pymongo.errors.OperationFailure as exc: + # SERVER-32063 + self.connection_attempts.append( + f"connected client {client!r}, but legacy hello failed: {exc}" + ) + else: + self.connection_attempts.append(f"successfully connected client {client!r}") + # If connected, then return client with default timeout + return pymongo.AsyncMongoClient(host, port, **kwargs) + except pymongo.errors.ConnectionFailure as exc: + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") + return None + finally: + await client.close() + + async def _init_client(self): + self.client = await self._connect(host, port) + if self.client is not None: + # Return early when connected to dataLake as mongohoused does not + # support the getCmdLineOpts command and is tested without TLS. + build_info: Any = await self.client.admin.command("buildInfo") + if "dataLake" in build_info: + self.is_data_lake = True + self.auth_enabled = True + self.client = await self._connect(host, port, username=db_user, password=db_pwd) + self.connected = True + return + + if HAVE_SSL and not self.client: + # Is MongoDB configured for SSL? + self.client = await self._connect(host, port, **TLS_OPTIONS) + if self.client: + self.tls = True + self.default_client_options.update(TLS_OPTIONS) + self.tlsCertificateKeyFile = True + + if self.client: + self.connected = True + + if self.serverless: + self.auth_enabled = True + else: + try: + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() + + if self.auth_enabled: + if not self.serverless and not IS_SRV: + # See if db_user already exists. + if not self._check_user_provided(): + _create_user(self.client.admin, db_user, db_pwd) + + self.client = await self._connect( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + + # May not have this if OperationFailure was raised earlier. + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + + if self.serverless: + self.server_status = {} + else: + self.server_status = await self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False + + hello = await self.hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello + + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) + self.is_rs = True + if self.auth_enabled: + # It doesn't matter which member we use as the seed here. + self.client = pymongo.AsyncMongoClient( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + else: + self.client = pymongo.AsyncMongoClient( + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) + + # Get the authoritative hello result from the primary. + self._hello = None + hello = await self.hello + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) + self.nodes = set(nodes) + else: + self.nodes = {(host, port)} + self.w = len(hello.get("hosts", [])) or 1 + self.version = await Version.async_from_client(self.client) + + if self.serverless: + self.server_parameters = { + "requireApiVersion": False, + "enableTestCommands": True, + } + self.test_commands_enabled = True + self.has_ipv6 = False + else: + self.server_parameters = await self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: + self.test_commands_enabled = True + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: + self.test_commands_enabled = True + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": + self.test_commands_enabled = True + self.has_ipv6 = self._server_started_with_ipv6() + + self.is_mongos = (await self.hello).get("msg") == "isdbgrid" + if self.is_mongos: + address = await self.client.address + self.mongoses.append(address) + if not self.serverless: + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = await self._connect( + *next_address, **self.default_client_options + ) + if mongos_client: + hello = await mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + + async def init(self): + with self.conn_lock: + if not self.client and not self.connection_attempts: + await self._init_client() + + def connection_attempt_info(self): + return "\n".join(self.connection_attempts) + + @property + async def host(self): + if self.is_rs and not IS_SRV: + primary = await self.client.primary + return str(primary[0]) if primary is not None else host + return host + + @property + async def port(self): + if self.is_rs and not IS_SRV: + primary = await self.client.primary + return primary[1] if primary is not None else port + return port + + @property + async def pair(self): + return "%s:%d" % (await self.host, await self.port) + + @property + async def has_secondaries(self): + if not self.client: + return False + return bool(len(await self.client.secondaries)) + + @property + def storage_engine(self): + try: + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) + except AttributeError: + # Raised if self.server_status is None. + return None + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + + async def _check_user_provided(self): + """Return True if db_user/db_password is already an admin user.""" + client: AsyncMongoClient = pymongo.AsyncMongoClient( + host, + port, + username=db_user, + password=db_pwd, + **self.default_client_options, + ) + + try: + return db_user in _all_users(client.admin) + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: + # Auth failed. + return False + else: + raise + finally: + await client.close() + + def _server_started_with_auth(self): + # MongoDB >= 2.0 + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] + # MongoDB >= 2.6 + if "security" in parsed: + security = parsed["security"] + # >= rc3 + if "authorization" in security: + return security["authorization"] == "enabled" + # < rc3 + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) + # Legacy + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv + + async def _server_started_with_ipv6(self): + if not socket.has_ipv6: + return False + + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): + return False + else: + if "--ipv6" not in self.cmd_line["argv"]: + return False + + # The server was started with --ipv6. Is there an IPv6 route to it? + try: + for info in socket.getaddrinfo(await self.host, await self.port): + if info[0] == socket.AF_INET6: + return True + except OSError: + pass + + return False + + def _require(self, condition, msg, func=None): + def make_wrapper(f): + if iscoroutinefunction(f): + wraps_async = True + else: + wraps_async = False + + @wraps(f) + async def wrap(*args, **kwargs): + await self.init() + # Always raise SkipTest if we can't connect to MongoDB + if not self.connected: + pair = await self.pair + raise SkipTest(f"Cannot connect to MongoDB on {pair}") + if iscoroutinefunction(condition) and await condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) + elif condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) + if "self.pair" in msg: + new_msg = msg.replace("self.pair", await self.pair) + else: + new_msg = msg + raise SkipTest(new_msg) + + return wrap + + if func is None: + + def decorate(f): + return make_wrapper(f) + + return decorate + return make_wrapper(func) + + def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): + kwargs["writeConcern"] = {"w": self.w} + return _create_user(self.client[dbname], user, pwd, roles, **kwargs) + + async def drop_user(self, dbname, user): + await self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) + + def require_connection(self, func): + """Run a test only if we can connect to MongoDB.""" + return self._require( + lambda: True, # _require checks if we're connected + "Cannot connect to MongoDB on self.pair", + func=func, + ) + + def require_data_lake(self, func): + """Run a test only if we are connected to Atlas Data Lake.""" + return self._require( + lambda: self.is_data_lake, + "Not connected to Atlas Data Lake on self.pair", + func=func, + ) + + def require_no_mmap(self, func): + """Run a test only if the server is not using the MMAPv1 storage + engine. Only works for standalone and replica sets; tests are + run regardless of storage engine on sharded clusters. + """ + + def is_not_mmap(): + if self.is_mongos: + return True + return self.storage_engine != "mmapv1" + + return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) + + def require_version_min(self, *ver): + """Run a test only if the server version is at least ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) + + def require_version_max(self, *ver): + """Run a test only if the server version is at most ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) + + def require_auth(self, func): + """Run a test only if the server is running with auth enabled.""" + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) + + def require_no_auth(self, func): + """Run a test only if the server is running without auth enabled.""" + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) + + def require_replica_set(self, func): + """Run a test only if the client is connected to a replica set.""" + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) + + def require_secondaries_count(self, count): + """Run a test only if the client is connected to a replica set that has + `count` secondaries. + """ + + async def sec_count(): + return 0 if not self.client else len(await self.client.secondaries) + + return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + + @property + async def supports_secondary_read_pref(self): + if self.has_secondaries: + return True + if self.is_mongos: + shard = await self.client.config.shards.find_one()["host"] # type:ignore[index] + num_members = shard.count(",") + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read preference", + ) + + def require_no_replica_set(self, func): + """Run a test if the client is *not* connected to a replica set.""" + return self._require( + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) + + def require_ipv6(self, func): + """Run a test only if the client can connect to a server via IPv6.""" + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) + + def require_no_mongos(self, func): + """Run a test only if the client is not connected to a mongos.""" + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) + + def require_mongos(self, func): + """Run a test only if the client is connected to a mongos.""" + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) + + def require_multiple_mongoses(self, func): + """Run a test only if the client is connected to a sharded cluster + that has 2 mongos nodes. + """ + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) + + def require_standalone(self, func): + """Run a test only if the client is connected to a standalone.""" + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) + + def require_no_standalone(self, func): + """Run a test only if the client is not connected to a standalone.""" + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) + + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) + + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) + + def require_no_serverless(self, func): + """Run a test only if the client is not connected to serverless.""" + return self._require( + lambda: not self.serverless, "Must not be connected to serverless", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) + + async def is_topology_type(self, topologies): + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "sharded-replicaset", + "load-balanced", + } + if unknown: + raise AssertionError(f"Unknown topologies: {unknown!r}") + if self.load_balancer: + if "load-balanced" in topologies: + return True + return False + if "single" in topologies and not (self.is_mongos or self.is_rs): + return True + if "replicaset" in topologies and self.is_rs: + return True + if "sharded" in topologies and self.is_mongos: + return True + if "sharded-replicaset" in topologies and self.is_mongos: + shards = await (await async_client_context.client.config.shards.find()).to_list() + for shard in shards: + # For a 3-member RS-backed sharded cluster, shard['host'] + # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' + # Otherwise it will be 'ip1:port1' + host_spec = shard["host"] + if not len(host_spec.split("/")) > 1: + return False + return True + return False + + def require_cluster_type(self, topologies=None): + """Run a test only if the client is connected to a cluster that + conforms to one of the specified topologies. Acceptable topologies + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] + + async def _is_valid_topology(): + return await self.is_topology_type(topologies) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) + + def require_test_commands(self, func): + """Run a test only if the server has test commands enabled.""" + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) + + def require_failCommand_fail_point(self, func): + """Run a test only if the server supports the failCommand fail + point. + """ + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) + + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + "failCommand appName must be supported", + func=func, + ) + + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection.""" + return self._require( + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), + "failCommand blockConnection is not supported", + func=func, + ) + + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) + + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) + + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) + + def require_server_resolvable(self, func): + """Run a test only if the hostname 'server' is resolvable.""" + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate hostname in the certificate", + func=func, + ) + + def require_sessions(self, func): + """Run a test only if the deployment supports sessions.""" + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) + + def supports_retryable_writes(self): + if self.storage_engine == "mmapv1": + return False + if not self.sessions_enabled: + return False + return self.is_mongos or self.is_rs + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) + + def supports_transactions(self): + if self.storage_engine == "mmapv1": + return False + + if self.version.at_least(4, 1, 8): + return self.is_mongos or self.is_rs + + if self.version.at_least(4, 0): + return self.is_rs + + return False + + def require_transactions(self, func): + """Run a test only if the deployment might support transactions. + + *Might* because this does not test the storage engine or FCV. + """ + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) + + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) + + def mongos_seeds(self): + return ",".join("{}:{}".format(*address) for address in self.mongoses) + + @property + def supports_failCommand_fail_point(self): + """Does the server support the failCommand fail point?""" + if self.is_mongos: + return self.version.at_least(4, 1, 5) and self.test_commands_enabled + else: + return self.version.at_least(4, 0) and self.test_commands_enabled + + @property + def requires_hint_with_min_max_queries(self): + """Does the server require a hint with min/max queries.""" + # Changed in SERVER-39567. + return self.version.at_least(4, 1, 10) + + @property + async def max_bson_size(self): + return (await self.hello)["maxBsonObjectSize"] + + @property + async def max_write_batch_size(self): + return (await self.hello)["maxWriteBatchSize"] + + +# Reusable client context +async_client_context = AsyncClientContext() + + +class AsyncPyMongoTestCase(unittest.IsolatedAsyncioTestCase): + def assertEqualCommand(self, expected, actual, msg=None): + self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) + + def assertEqualReply(self, expected, actual, msg=None): + self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + + @asynccontextmanager + async def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + await async_client_context.client.admin.command(cmd_on) + try: + yield + finally: + await async_client_context.client.admin.command( + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + +class AsyncIntegrationTest(AsyncPyMongoTestCase): + """Async base class for TestCases that need a connection to MongoDB to pass.""" + + client: AsyncMongoClient[dict] + db: AsyncDatabase + credentials: Dict[str, str] + + @classmethod + def setUpClass(cls): + if _IS_SYNC: + cls._setup_class() + else: + asyncio.run(cls._setup_class()) + + @classmethod + @async_client_context.require_connection + async def _setup_class(cls): + if async_client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + if async_client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + raise SkipTest("this test does not support serverless") + cls.client = async_client_context.client + cls.db = cls.client.pymongo_test + if async_client_context.auth_enabled: + cls.credentials = {"username": db_user, "password": db_pwd} + else: + cls.credentials = {} + + async def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + await c.delete_many({}) + await c.drop_indexes() + + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) + + +async def async_setup(): + await async_client_context.init() + warnings.resetwarnings() + warnings.simplefilter("always") + global_knobs.enable() + + +async def async_teardown(): + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + c = async_client_context.client + if c: + if not async_client_context.is_data_lake: + await c.drop_database("pymongo-pooling-tests") + await c.drop_database("pymongo_test") + await c.drop_database("pymongo_test1") + await c.drop_database("pymongo_test2") + await c.drop_database("pymongo_test_mike") + await c.drop_database("pymongo_test_bernie") + await c.close() + + print_running_clients() + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py new file mode 100644 index 0000000000..28e3890d9c --- /dev/null +++ b/test/asynchronous/conftest.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from test.asynchronous import async_setup, async_teardown + +import pytest_asyncio + +_IS_SYNC = False + + +@pytest_asyncio.fixture(scope="session", autouse=True) +async def test_setup_and_teardown(): + await async_setup() + yield + await async_teardown() diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py new file mode 100644 index 0000000000..078bad9e20 --- /dev/null +++ b/test/asynchronous/test_collection.py @@ -0,0 +1,2264 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection module.""" +from __future__ import annotations + +import asyncio +import contextlib +import re +import sys +from codecs import utf_8_decode +from collections import defaultdict +from typing import Any, Iterable, no_type_check + +from pymongo.asynchronous.database import AsyncDatabase + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + async_get_pool, + async_is_mongos, + async_rs_or_single_client, + async_single_client, + async_wait_until, + wait_until, +) + +from bson import encode +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex +from bson.son import SON +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT +from pymongo.asynchronous.bulk import BulkWriteError +from pymongo.asynchronous.collection import AsyncCollection, ReturnDocument +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.helpers import anext +from pymongo.asynchronous.message import _COMMAND_OVERHEAD, _gen_find_command +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.asynchronous.operations import * +from pymongo.asynchronous.read_preferences import ReadPreference +from pymongo.cursor_shared import CursorType +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCollectionNoConnect(unittest.TestCase): + """Test Collection features on a client that does not connect.""" + + db: AsyncDatabase + + @classmethod + def setUpClass(cls): + cls.db = AsyncMongoClient(connect=False).pymongo_test + + def test_collection(self): + self.assertRaises(TypeError, AsyncCollection, self.db, 5) + + def make_col(base, name): + return base[name] + + self.assertRaises(InvalidName, make_col, self.db, "") + self.assertRaises(InvalidName, make_col, self.db, "te$t") + self.assertRaises(InvalidName, make_col, self.db, ".test") + self.assertRaises(InvalidName, make_col, self.db, "test.") + self.assertRaises(InvalidName, make_col, self.db, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "") + self.assertRaises(InvalidName, make_col, self.db.test, "te$t") + self.assertRaises(InvalidName, make_col, self.db.test, ".test") + self.assertRaises(InvalidName, make_col, self.db.test, "test.") + self.assertRaises(InvalidName, make_col, self.db.test, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t") + + def test_getattr(self): + coll = self.db.test + self.assertTrue(isinstance(coll["_does_not_exist"], AsyncCollection)) + + with self.assertRaises(AttributeError) as context: + coll._does_not_exist + + # Message should be: + # "AttributeError: Collection has no attribute '_does_not_exist'. To + # access the test._does_not_exist collection, use + # database['test._does_not_exist']." + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + coll2 = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertEqual(coll2.write_concern, WriteConcern(w=0)) + self.assertNotEqual(coll.write_concern, coll2.write_concern) + coll3 = coll2.subcoll + self.assertEqual(coll2.write_concern, coll3.write_concern) + coll4 = coll2["subcoll"] + self.assertEqual(coll2.write_concern, coll4.write_concern) + + def test_iteration(self): + coll = self.db.coll + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): + msg = "'NoneType' object is not callable" + else: + if _IS_SYNC: + msg = "'Collection' object is not iterable" + else: + msg = "'AsyncCollection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, msg): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, msg): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) + + +class AsyncTestCollection(AsyncIntegrationTest): + w: int + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.w = async_client_context.w # type: ignore + + @classmethod + def tearDownClass(cls): + if _IS_SYNC: + cls.db.drop_collection("test_large_limit") # type: ignore[unused-coroutine] + else: + asyncio.run(cls.async_tearDownClass()) + + @classmethod + async def async_tearDownClass(cls): + await cls.db.drop_collection("test_large_limit") + + async def asyncSetUp(self): + await self.db.test.drop() + + async def asyncTearDown(self): + await self.db.test.drop() + + @contextlib.contextmanager + def write_concern_collection(self): + if async_client_context.is_rs: + with self.assertRaises(WriteConcernError): + # Unsatisfiable write concern. + yield AsyncCollection( + self.db, + "test", + write_concern=WriteConcern(w=len(async_client_context.nodes) + 1), + ) + else: + yield self.db.test + + async def test_equality(self): + self.assertTrue(isinstance(self.db.test, AsyncCollection)) + self.assertEqual(self.db.test, self.db["test"]) + self.assertEqual(self.db.test, AsyncCollection(self.db, "test")) + self.assertEqual(self.db.test.mike, self.db["test.mike"]) + self.assertEqual(self.db.test["mike"], self.db["test.mike"]) + + async def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + + async def test_create(self): + # No Exception. + db = async_client_context.client.pymongo_test + await db.create_test_no_wc.drop() + + async def lambda_test(): + return "create_test_no_wc" not in await db.list_collection_names() + + async def lambda_test_2(): + return "create_test_no_wc" in await db.list_collection_names() + + await async_wait_until( + lambda_test, + "drop create_test_no_wc collection", + ) + await db.create_collection("create_test_no_wc") + await async_wait_until( + lambda_test_2, + "create create_test_no_wc collection", + ) + # SERVER-33317 + if not async_client_context.is_mongos or not async_client_context.version.at_least(3, 7, 0): + with self.assertRaises(OperationFailure): + await db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) + + async def test_drop_nonexistent_collection(self): + await self.db.drop_collection("test") + self.assertFalse("test" in await self.db.list_collection_names()) + + # No exception + await self.db.drop_collection("test") + + async def test_create_indexes(self): + db = self.db + + with self.assertRaises(TypeError): + await db.test.create_indexes("foo") # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.test.create_indexes(["foo"]) # type: ignore[list-item] + self.assertRaises(TypeError, IndexModel, 5) + self.assertRaises(ValueError, IndexModel, []) + + await db.test.drop_indexes() + await db.test.insert_one({}) + self.assertEqual(len(await db.test.index_information()), 1) + + await db.test.create_indexes([IndexModel("hello")]) + await db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) + + # Tuple instead of list. + await db.test.create_indexes([IndexModel((("world", ASCENDING),))]) + + self.assertEqual(len(await db.test.index_information()), 4) + + await db.test.drop_indexes() + names = await db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) + self.assertEqual(names, ["hello_world"]) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_indexes([IndexModel("hello")]) + self.assertTrue("hello_1" in await db.test.index_information()) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + names = await db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) + info = await db.test.index_information() + for name in names: + self.assertTrue(name in info) + + await db.test.drop() + await db.test.insert_one({"a": 1}) + await db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + await db.test.create_indexes([IndexModel("a", unique=True)]) + + with self.write_concern_collection() as coll: + await coll.create_indexes([IndexModel("hello")]) + + @async_client_context.require_version_max(4, 3, -1) + async def test_create_indexes_commitQuorum_requires_44(self): + db = self.db + with self.assertRaisesRegex( + ConfigurationError, + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", + ): + await db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + @async_client_context.require_no_standalone + @async_client_context.require_version_min(4, 4, -1) + async def test_create_indexes_commitQuorum(self): + await self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + async def test_create_index(self): + db = self.db + + with self.assertRaises(TypeError): + await db.test.create_index(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + await db.test.create_index([]) + + await db.test.drop_indexes() + await db.test.insert_one({}) + self.assertEqual(len(await db.test.index_information()), 1) + + await db.test.create_index("hello") + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + + # Tuple instead of list. + await db.test.create_index((("world", ASCENDING),)) + + self.assertEqual(len(await db.test.index_information()), 4) + + await db.test.drop_indexes() + ix = await db.test.create_index( + [("hello", DESCENDING), ("world", ASCENDING)], name="hello_world" + ) + self.assertEqual(ix, "hello_world") + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_index("hello") + self.assertTrue("hello_1" in await db.test.index_information()) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + self.assertTrue("hello_-1_world_1" in await db.test.index_information()) + + await db.test.drop_indexes() + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertTrue("hello_-1_world_1" in await db.test.index_information()) + + await db.test.drop() + await db.test.insert_one({"a": 1}) + await db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + await db.test.create_index("a", unique=True) + + with self.write_concern_collection() as coll: + await coll.create_index([("hello", DESCENDING)]) + + await db.test.create_index(["hello", "world"]) + await db.test.create_index(["hello", ("world", DESCENDING)]) + await db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + + async def test_drop_index(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index("hello") + name = await db.test.create_index("goodbye") + + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + await db.test.drop_index(name) + + # Drop it again. + with self.assertRaises(OperationFailure): + await db.test.drop_index(name) + self.assertEqual(len(await db.test.index_information()), 2) + self.assertTrue("hello_1" in await db.test.index_information()) + + await db.test.drop_indexes() + await db.test.create_index("hello") + name = await db.test.create_index("goodbye") + + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + await db.test.drop_index([("goodbye", ASCENDING)]) + self.assertEqual(len(await db.test.index_information()), 2) + self.assertTrue("hello_1" in await db.test.index_information()) + + with self.write_concern_collection() as coll: + await coll.drop_index("hello_1") + + @async_client_context.require_no_mongos + @async_client_context.require_test_commands + async def test_index_management_max_time_ms(self): + coll = self.db.test + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + with self.assertRaises(ExecutionTimeout): + await coll.create_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.create_indexes([IndexModel("foo")], maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.drop_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.drop_indexes(maxTimeMS=1) + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + async def test_list_indexes(self): + db = self.db + await db.test.drop() + await db.test.insert_one({}) # create collection + + def map_indexes(indexes): + return {index["name"]: index for index in indexes} + + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 1) + self.assertTrue("_id_" in map_indexes(indexes)) + + await db.test.create_index("hello") + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 2) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) + + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 3) + index_map = map_indexes(indexes) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) + self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) + + # List indexes on a collection that does not exist. + indexes = await (await db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + # List indexes on a database that does not exist. + indexes = await (await db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + async def test_index_info(self): + db = self.db + await db.test.drop() + await db.test.insert_one({}) # create collection + self.assertEqual(len(await db.test.index_information()), 1) + self.assertTrue("_id_" in await db.test.index_information()) + + await db.test.create_index("hello") + self.assertEqual(len(await db.test.index_information()), 2) + self.assertEqual( + (await db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)] + ) + + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual( + (await db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)] + ) + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual( + [("hello", DESCENDING), ("world", ASCENDING)], + (await db.test.index_information())["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, (await db.test.index_information())["hello_-1_world_1"]["unique"]) + + async def test_index_geo2d(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("loc_2d", await db.test.create_index([("loc", GEO2D)])) + index_info = (await db.test.index_information())["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) + + # geoSearch was deprecated in 4.4 and removed in 5.0 + @async_client_context.require_version_max(4, 5) + @async_client_context.require_no_mongos + async def test_index_haystack(self): + db = self.db + await db.test.drop() + _id = await db.test.insert_one( + {"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"} + ).inserted_id + await db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + await db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + await db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = ( + await db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + ) + )["results"] + + self.assertEqual(2, len(results)) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) + + @async_client_context.require_no_mongos + async def test_index_text(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("t_text", await db.test.create_index([("t", TEXT)])) + index_info = (await db.test.index_information())["t_text"] + self.assertTrue("weights" in index_info) + + await db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) + + # MongoDB 2.6 text search. Create 'score' field in projection. + cursor = await db.test.find( + {"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}} + ) + + # Sort by 'score' field. + cursor.sort([("score", {"$meta": "textScore"})]) + results = await cursor.to_list() + self.assertTrue(results[0]["score"] >= results[1]["score"]) + + await db.test.drop_indexes() + + async def test_index_2dsphere(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("geo_2dsphere", await db.test.create_index([("geo", GEOSPHERE)])) + + for dummy, info in (await db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": + break + else: + self.fail("2dsphere index not found.") + + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + query = {"geo": {"$within": {"$geometry": poly}}} + + # This query will error without a 2dsphere index. + await db.test.find(query) + await db.test.drop_indexes() + + async def test_index_hashed(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("a_hashed", await db.test.create_index([("a", HASHED)])) + + for dummy, info in (await db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": + break + else: + self.fail("hashed index not found.") + + await db.test.drop_indexes() + + async def test_index_sparse(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue((await db.test.index_information())["key_1"]["sparse"]) + + async def test_index_background(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index([("keya", ASCENDING)]) + await db.test.create_index([("keyb", ASCENDING)], background=False) + await db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertFalse("background" in (await db.test.index_information())["keya_1"]) + self.assertFalse((await db.test.index_information())["keyb_1"]["background"]) + self.assertTrue((await db.test.index_information())["keyc_1"]["background"]) + + async def _drop_dups_setup(self, db): + await db.drop_collection("test") + await db.test.insert_one({"i": 1}) + await db.test.insert_one({"i": 2}) + await db.test.insert_one({"i": 2}) # duplicate + await db.test.insert_one({"i": 3}) + + async def test_index_dont_drop_dups(self): + # Try *not* dropping duplicates + db = self.db + await self._drop_dups_setup(db) + + # There's a duplicate + async def _test_create(): + await db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + + with self.assertRaises(DuplicateKeyError): + await _test_create() + + # Duplicate wasn't dropped + self.assertEqual(4, await db.test.count_documents({})) + + # Index wasn't created, only the default index on _id + self.assertEqual(1, len(await db.test.index_information())) + + # Get the plan dynamically because the explain format will change. + def get_plan_stage(self, root, stage): + if root.get("stage") == stage: + return root + elif "inputStage" in root: + return self.get_plan_stage(root["inputStage"], stage) + elif "inputStages" in root: + for i in root["inputStages"]: + stage = self.get_plan_stage(i, stage) + if stage: + return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) + elif "shards" in root: + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) + if stage: + return stage + return {} + + async def test_index_filter(self): + db = self.db + await db.drop_collection("test") + + # Test bad filter spec on create. + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression=5) + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression={"x": {"$asdasd": 3}}) + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression={"$and": 5}) + + self.assertEqual( + "x_1", + await db.test.create_index( + [("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}} + ), + ) + await db.test.insert_one({"x": 5, "a": 2}) + await db.test.insert_one({"x": 6, "a": 1}) + + # Operations that use the partial index. + explain = await (await db.test.find({"x": 6, "a": 1})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = await (await db.test.find({"x": {"$gt": 1}, "a": 1})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = await (await db.test.find({"x": 6, "a": {"$lte": 1}})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + # Operations that do not use the partial index. + explain = await (await db.test.find({"x": 6, "a": {"$lte": 1.6}})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + explain = await (await db.test.find({"x": 6})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + # Test drop_indexes. + await db.test.drop_index("x_1") + explain = await (await db.test.find({"x": 6, "a": 1})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + async def test_field_selection(self): + db = self.db + await db.drop_collection("test") + + doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}} + await db.test.insert_one(doc) + + # Test field inclusion + doc = await anext(await db.test.find({}, ["_id"])) + self.assertEqual(list(doc), ["_id"]) + doc = await anext(await db.test.find({}, ["a"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "a"]) + doc = await anext(await db.test.find({}, ["b"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b"]) + doc = await anext(await db.test.find({}, ["c"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + doc = await anext(await db.test.find({}, ["a"])) + self.assertEqual(doc["a"], 1) + doc = await anext(await db.test.find({}, ["b"])) + self.assertEqual(doc["b"], 5) + doc = await anext(await db.test.find({}, ["c"])) + self.assertEqual(doc["c"], {"d": 5, "e": 10}) + + # Test inclusion of fields with dots + doc = await anext(await db.test.find({}, ["c.d"])) + self.assertEqual(doc["c"], {"d": 5}) + doc = await anext(await db.test.find({}, ["c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + doc = await anext(await db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + + doc = await anext(await db.test.find({}, ["b", "c.e"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b", "c"]) + doc = await anext(await db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["b"], 5) + + # Test field exclusion + doc = await anext(await db.test.find({}, {"a": False, "b": 0})) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + + doc = await anext(await db.test.find({}, {"_id": False})) + l = list(doc) + self.assertFalse("_id" in l) + + async def test_options(self): + db = self.db + await db.drop_collection("test") + await db.create_collection("test", capped=True, size=4096) + result = await db.test.options() + self.assertEqual(result, {"capped": True, "size": 4096}) + await db.drop_collection("test") + + async def test_insert_one(self): + db = self.db + await db.test.drop() + + document: dict[str, Any] = {"_id": 1000} + result = await db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertTrue(isinstance(result.inserted_id, int)) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) + self.assertEqual(1, await db.test.count_documents({})) + + document = {"foo": "bar"} + result = await db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) + self.assertEqual(2, await db.test.count_documents({})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertEqual(document["_id"], result.inserted_id) + self.assertFalse(result.acknowledged) + # The insert failed duplicate key... + + async def async_lambda(): + return await db.test.count_documents({}) == 2 + + await async_wait_until(async_lambda, "forcing duplicate key error") + + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) + result = await db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertEqual(result.inserted_id, None) + + async def test_insert_many(self): + db = self.db + await db.test.drop() + + docs: list = [{} for _ in range(5)] + result = await db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, ObjectId)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [{"_id": i} for i in range(5)] + result = await db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, int)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] + result = await db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertEqual([], result.inserted_ids) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + docs: list = [{} for _ in range(5)] + result = await db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertFalse(result.acknowledged) + self.assertEqual(20, await db.test.count_documents({})) + + async def test_insert_many_generator(self): + coll = self.db.test + await coll.delete_many({}) + + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = await coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + async def test_insert_many_invalid(self): + db = self.db + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many({}) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many([]) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many(1) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) + + async def test_delete_one(self): + await self.db.test.drop() + + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"y": 1}) + await self.db.test.insert_one({"z": 1}) + + result = await self.db.test.delete_one({"x": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(2, await self.db.test.count_documents({})) + + result = await self.db.test.delete_one({"y": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await self.db.test.count_documents({})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = await db.test.delete_one({"z": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + async def lambda_async(): + return await db.test.count_documents({}) == 0 + + await async_wait_until(lambda_async, "delete 1 documents") + + async def test_delete_many(self): + await self.db.test.drop() + + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"y": 1}) + await self.db.test.insert_one({"y": 1}) + + result = await self.db.test.delete_many({"x": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertEqual(2, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(0, await self.db.test.count_documents({"x": 1})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = await db.test.delete_many({"y": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + async def lambda_async(): + return await db.test.count_documents({}) == 0 + + await async_wait_until(lambda_async, "delete 2 documents") + + async def test_command_document_too_large(self): + large = "*" * (await async_client_context.max_bson_size + _COMMAND_OVERHEAD) + coll = self.db.test + with self.assertRaises(DocumentTooLarge): + await coll.insert_one({"data": large}) + # update_one and update_many are the same + with self.assertRaises(DocumentTooLarge): + await coll.replace_one({}, {"data": large}) + with self.assertRaises(DocumentTooLarge): + await coll.delete_one({"data": large}) + + async def test_write_large_document(self): + max_size = await async_client_context.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + with self.assertRaises(OperationFailure): + await self.db.test.insert_one({"foo": max_str}) + with self.assertRaises(OperationFailure): + await self.db.test.replace_one({}, {"foo": max_str}, upsert=True) + with self.assertRaises(OperationFailure): + await self.db.test.insert_many([{"x": 1}, {"foo": max_str}]) + await self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + await self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(DocumentTooLarge): + await unack_coll.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 14)}) + await self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) + + async def test_insert_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test insert_one + with self.assertRaises(OperationFailure): + await db.test.insert_one({"_id": 1, "x": 100}) + result = await db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertEqual(1, result.inserted_id) + result = await db.test.insert_one({"_id": 2, "a": 0}) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertEqual(2, result.inserted_id) + + await db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.find_one({"y": 1}) + + await async_wait_until(async_lambda, "find w:0 inserted document") + + # Test insert_many + docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] + with self.assertRaises(OperationFailure): + await db.test.insert_many(docs) + result = await db.test.insert_many(docs, bypass_document_validation=True) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, int)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"x": doc["x"]})) + self.assertTrue(result.acknowledged) + docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] + result = await db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, int)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"a": doc["a"]})) + self.assertTrue(result.acknowledged) + + with self.assertRaises(OperationFailure): + await db_w0.test.insert_many( + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) + + async def test_replace_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test replace_one + await db.test.insert_one({"a": 101}) + with self.assertRaises(OperationFailure): + await db.test.replace_one({"a": 101}, {"y": 1}) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(1, await db.test.count_documents({"a": 101})) + await db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"a": 101})) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + await db.test.replace_one({"y": 1}, {"a": 102}) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(0, await db.test.count_documents({"a": 101})) + self.assertEqual(1, await db.test.count_documents({"a": 102})) + + await db.test.insert_one({"y": 1}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.replace_one({"y": 1}, {"x": 101}) + self.assertEqual(0, await db.test.count_documents({"x": 101})) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + await db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(1, await db.test.count_documents({"x": 101})) + await db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) + self.assertEqual(0, await db.test.count_documents({"x": 101})) + self.assertEqual(1, await db.test.count_documents({"a": 103})) + + await db.test.insert_one({"y": 1}, bypass_document_validation=True) + await db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + + await async_wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") + + async def test_update_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.test.insert_one({"z": 5}) + await db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test update_one + with self.assertRaises(OperationFailure): + await db.test.update_one({"z": 5}, {"$inc": {"z": -10}}) + self.assertEqual(0, await db.test.count_documents({"z": -5})) + self.assertEqual(1, await db.test.count_documents({"z": 5})) + await db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"z": 5})) + self.assertEqual(1, await db.test.count_documents({"z": -5})) + await db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) + self.assertEqual(1, await db.test.count_documents({"z": 1})) + self.assertEqual(0, await db.test.count_documents({"z": -5})) + + await db.test.insert_one({"z": -10}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_one({"z": -10}, {"$inc": {"z": 1}}) + self.assertEqual(0, await db.test.count_documents({"z": -9})) + self.assertEqual(1, await db.test.count_documents({"z": -10})) + await db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) + self.assertEqual(1, await db.test.count_documents({"z": -9})) + self.assertEqual(0, await db.test.count_documents({"z": -10})) + await db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) + self.assertEqual(0, await db.test.count_documents({"z": -9})) + self.assertEqual(1, await db.test.count_documents({"z": 0})) + + await db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) + await db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.find_one({"y": 1, "x": 1}) + + await async_wait_until(async_lambda, "find w:0 updated document") + + # Test update_many + await db.test.insert_many([{"z": i} for i in range(3, 101)]) + await db.test.insert_one({"y": 0}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_many({}, {"$inc": {"z": -100}}) + self.assertEqual(100, await db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, await db.test.count_documents({"z": {"$lt": 0}})) + self.assertEqual(0, await db.test.count_documents({"y": 0, "z": -100})) + await db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(100, await db.test.count_documents({"z": {"$lte": 0}})) + await db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(50, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(50, await db.test.count_documents({"z": {"$lt": 0}})) + + await db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_many({}, {"$inc": {"z": 1}}) + self.assertEqual(100, await db.test.count_documents({"z": {"$lte": 0}})) + self.assertEqual(50, await db.test.count_documents({"z": {"$gt": 1}})) + await db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(150, await db.test.count_documents({"z": {"$lte": 0}})) + await db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(150, await db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, await db.test.count_documents({"z": {"$lt": 0}})) + + await db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + await db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + await db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.count_documents({"m": 1, "x": 1}) == 2 + + await async_wait_until(async_lambda, "find w:0 updated documents") + + async def test_bypass_document_validation_bulk_write(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$gte": 0}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] + await db.test.bulk_write(ops, bypass_document_validation=True) + + self.assertEqual(3, await db.test.count_documents({})) + self.assertEqual(1, await db.test.count_documents({"a": -11})) + self.assertEqual(1, await db.test.count_documents({"a": -1})) + self.assertEqual(1, await db.test.count_documents({"a": -9})) + + # Assert that the operations would fail without bypass_doc_val + for op in ops: + with self.assertRaises(BulkWriteError): + await db.test.bulk_write([op]) + + with self.assertRaises(OperationFailure): + await db_w0.test.bulk_write(ops, bypass_document_validation=True) + + async def test_find_by_default_dct(self): + db = self.db + await db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] + self.assertIsNotNone(await db.test.find_one(dct)) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) + + async def test_find_w_fields(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one( + {"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"} + ) + self.assertEqual(1, await db.test.count_documents({})) + doc = await anext(await db.test.find({})) + self.assertTrue("x" in doc) + doc = await anext(await db.test.find({})) + self.assertTrue("mike" in doc) + doc = await anext(await db.test.find({})) + self.assertTrue("extra thing" in doc) + doc = await anext(await db.test.find({}, ["x", "mike"])) + self.assertTrue("x" in doc) + doc = await anext(await db.test.find({}, ["x", "mike"])) + self.assertTrue("mike" in doc) + doc = await anext(await db.test.find({}, ["x", "mike"])) + self.assertFalse("extra thing" in doc) + doc = await anext(await db.test.find({}, ["mike"])) + self.assertFalse("x" in doc) + doc = await anext(await db.test.find({}, ["mike"])) + self.assertTrue("mike" in doc) + doc = await anext(await db.test.find({}, ["mike"])) + self.assertFalse("extra thing" in doc) + + @no_type_check + async def test_fields_specifier_as_dict(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) + + self.assertEqual([1, 2, 3], (await db.test.find_one())["x"]) + self.assertEqual([2, 3], (await db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) + self.assertTrue("x" not in await db.test.find_one(projection={"x": 0})) + self.assertTrue("mike" in await db.test.find_one(projection={"x": 0})) + + async def test_find_w_regex(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one({"x": "hello_world"}) + await db.test.insert_one({"x": "hello_mike"}) + await db.test.insert_one({"x": "hello_mikey"}) + await db.test.insert_one({"x": "hello_test"}) + + self.assertEqual(len(await (await db.test.find()).to_list()), 4) + self.assertEqual( + len(await (await db.test.find({"x": re.compile("^hello.*")})).to_list()), 4 + ) + self.assertEqual(len(await (await db.test.find({"x": re.compile("ello")})).to_list()), 4) + self.assertEqual(len(await (await db.test.find({"x": re.compile("^hello$")})).to_list()), 0) + self.assertEqual( + len(await (await db.test.find({"x": re.compile("^hello_mi.*$")})).to_list()), 2 + ) + + async def test_id_can_be_anything(self): + db = self.db + + await db.test.delete_many({}) + auto_id = {"hello": "world"} + await db.test.insert_one(auto_id) + self.assertTrue(isinstance(auto_id["_id"], ObjectId)) + + numeric = {"_id": 240, "hello": "world"} + await db.test.insert_one(numeric) + self.assertEqual(numeric["_id"], 240) + + obj = {"_id": numeric, "hello": "world"} + await db.test.insert_one(obj) + self.assertEqual(obj["_id"], numeric) + + async for x in await db.test.find(): + self.assertEqual(x["hello"], "world") + self.assertTrue("_id" in x) + + async def test_unique_index(self): + db = self.db + await db.drop_collection("test") + await db.test.create_index("hello") + + # No error. + await db.test.insert_one({"hello": "world"}) + await db.test.insert_one({"hello": "world"}) + + await db.drop_collection("test") + await db.test.create_index("hello", unique=True) + + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"hello": "world"}) + await db.test.insert_one({"hello": "world"}) + + async def test_duplicate_key_error(self): + db = self.db + await db.drop_collection("test") + + await db.test.create_index("x", unique=True) + + await db.test.insert_one({"_id": 1, "x": 1}) + + with self.assertRaises(DuplicateKeyError) as context: + await db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + + with self.assertRaises(DuplicateKeyError) as context: + await db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + self.assertEqual(1, await db.test.count_documents({})) + + async def test_write_error_text_handling(self): + db = self.db + await db.drop_collection("test") + + await db.test.create_index("text", unique=True) + + # Test workaround for SERVER-24007 + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) + + text = utf_8_decode(data, None, True) + await db.test.insert_one({"text": text}) + + # Should raise DuplicateKeyError, not InvalidBSON + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"text": text}) + + with self.assertRaises(DuplicateKeyError): + await db.test.replace_one({"_id": ObjectId()}, {"text": text}, upsert=True) + + # Should raise BulkWriteError, not InvalidBSON + with self.assertRaises(BulkWriteError): + await db.test.insert_many([{"text": text}]) + + async def test_write_error_unicode(self): + coll = self.db.test + self.addAsyncCleanup(coll.drop) + + await coll.create_index("a", unique=True) + await coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + await coll.insert_one({"a": "unicode \U0001f40d"}) + + # Once more for good measure. + self.assertIn("E11000 duplicate key error", str(ctx.exception)) + + async def test_wtimeout(self): + # Ensure setting wtimeout doesn't disable write concern altogether. + # See SERVER-12596. + collection = self.db.test + await collection.drop() + await collection.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + await coll.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + await coll.insert_one({"_id": 1}) + + async def test_error_code(self): + try: + await self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) + except OperationFailure as exc: + self.assertTrue(exc.code in (9, 10147, 16840, 17009)) + # Just check that we set the error document. Fields + # vary by MongoDB version. + self.assertTrue(exc.details is not None) + else: + self.fail("OperationFailure was not raised") + + async def test_index_on_subfield(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_one({"hello": {"a": 4, "b": 5}}) + await db.test.insert_one({"hello": {"a": 7, "b": 2}}) + await db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + await db.drop_collection("test") + await db.test.create_index("hello.a", unique=True) + + await db.test.insert_one({"hello": {"a": 4, "b": 5}}) + await db.test.insert_one({"hello": {"a": 7, "b": 2}}) + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + async def test_replace_one(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.replace_one({}, {"$set": {"x": 1}}) + + id1 = (await db.test.insert_one({"x": 1})).inserted_id + result = await db.test.replace_one({"x": 1}, {"y": 1}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + self.assertEqual(0, await db.test.count_documents({"x": 1})) + self.assertEqual((await db.test.find_one(id1))["y"], 1) # type: ignore + + replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) + result = await db.test.replace_one({"y": 1}, replacement, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"z": 1})) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual((await db.test.find_one(id1))["z"], 1) # type: ignore + + result = await db.test.replace_one({"x": 2}, {"y": 2}, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(0, result.matched_count) + self.assertTrue(result.modified_count in (None, 0)) + self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 2})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.replace_one({"x": 0}, {"y": 0}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_one(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.update_one({}, {"x": 1}) + + id1 = (await db.test.insert_one({"x": 5})).inserted_id + result = await db.test.update_one({}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((await db.test.find_one(id1))["x"], 6) # type: ignore + + id2 = (await db.test.insert_one({"x": 1})).inserted_id + result = await db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((await db.test.find_one(id1))["x"], 7) # type: ignore + self.assertEqual((await db.test.find_one(id2))["x"], 1) # type: ignore + + result = await db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(0, result.matched_count) + self.assertTrue(result.modified_count in (None, 0)) + self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_many(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.update_many({}, {"x": 1}) + + await db.test.insert_one({"x": 4, "y": 3}) + await db.test.insert_one({"x": 5, "y": 5}) + await db.test.insert_one({"x": 4, "y": 4}) + + result = await db.test.update_many({"x": 4}, {"$set": {"y": 5}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(2, result.matched_count) + self.assertTrue(result.modified_count in (None, 2)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(3, await db.test.count_documents({"y": 5})) + + result = await db.test.update_many({"x": 5}, {"$set": {"y": 6}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 6})) + + result = await db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(0, result.matched_count) + self.assertTrue(result.modified_count in (None, 0)) + self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_check_keys(self): + await self.db.drop_collection("test") + self.assertTrue(await self.db.test.insert_one({"hello": "world"})) + + # Modify shouldn't check keys... + self.assertTrue( + await self.db.test.update_one( + {"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True + ) + ) + + # I know this seems like testing the server but I'd like to be notified + # by CI if the server's behavior changes here. + doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) + with self.assertRaises(OperationFailure): + await self.db.test.update_one({"hello": "world"}, doc, upsert=True) + + # This is going to cause keys to be checked and raise InvalidDocument. + # That's OK assuming the server's behavior in the previous assert + # doesn't change. If the behavior changes checking the first key for + # '$' in update won't be good enough anymore. + doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) + with self.assertRaises(OperationFailure): + await self.db.test.replace_one({"hello": "world"}, doc, upsert=True) + + # Replace with empty document + self.assertNotEqual( + 0, (await self.db.test.replace_one({"hello": "world"}, {})).matched_count + ) + + async def test_acknowledged_delete(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"x": 1}, {"x": 1}]) + self.assertEqual(2, (await db.test.delete_many({})).deleted_count) + self.assertEqual(0, (await db.test.delete_many({})).deleted_count) + + @async_client_context.require_version_max(4, 9) + async def test_manual_last_error(self): + coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) + await coll.insert_one({"x": 1}) + await self.db.command("getlasterror", w=1, wtimeout=1) + + async def test_count_documents(self): + db = self.db + await db.drop_collection("test") + self.addAsyncCleanup(db.drop_collection, "test") + + self.assertEqual(await db.test.count_documents({}), 0) + await db.wrong.insert_many([{}, {}]) + self.assertEqual(await db.test.count_documents({}), 0) + await db.test.insert_many([{}, {}]) + self.assertEqual(await db.test.count_documents({}), 2) + await db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(await db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(await db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) + + async def test_estimated_document_count(self): + db = self.db + await db.drop_collection("test") + self.addAsyncCleanup(db.drop_collection, "test") + + self.assertEqual(await db.test.estimated_document_count(), 0) + await db.wrong.insert_many([{}, {}]) + self.assertEqual(await db.test.estimated_document_count(), 0) + await db.test.insert_many([{}, {}]) + self.assertEqual(await db.test.estimated_document_count(), 2) + + async def test_aggregate(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + await db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + result = await db.test.aggregate([pipeline]) + self.assertTrue(isinstance(result, AsyncCommandCursor)) + self.assertEqual([{"foo": [1, 2]}], await result.to_list()) + + # Test write concern. + with self.write_concern_collection() as coll: + await coll.aggregate([{"$out": "output-collection"}]) + + async def test_aggregate_raw_bson(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + await db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) + result = await coll.aggregate([pipeline]) + self.assertTrue(isinstance(result, AsyncCommandCursor)) + first_result = await anext(result) + self.assertIsInstance(first_result, RawBSONDocument) + self.assertEqual([1, 2], list(first_result["foo"])) + + async def test_aggregation_cursor_validation(self): + db = self.db + projection = {"$project": {"_id": "$_id"}} + cursor = await db.test.aggregate([projection], cursor={}) + self.assertTrue(isinstance(cursor, AsyncCommandCursor)) + + async def test_aggregation_cursor(self): + db = self.db + if await async_client_context.has_secondaries: + # Test that getMore messages are sent to the right server. + db = self.client.get_database( + db.name, + read_preference=ReadPreference.SECONDARY, + write_concern=WriteConcern(w=self.w), + ) + + for collection_size in (10, 1000): + await db.drop_collection("test") + await db.test.insert_many([{"_id": i} for i in range(collection_size)]) + expected_sum = sum(range(collection_size)) + # Use batchSize to ensure multiple getMore messages + cursor = await db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) + + self.assertEqual(expected_sum, sum(doc["_id"] for doc in await cursor.to_list())) + + # Test that batchSize is handled properly. + cursor = await db.test.aggregate([], batchSize=5) + self.assertEqual(5, len(cursor._data)) + # Force a getMore + cursor._data.clear() + await anext(cursor) + # batchSize - 1 + self.assertEqual(4, len(cursor._data)) + # Exhaust the cursor. There shouldn't be any errors. + async for _doc in cursor: + pass + + async def test_aggregation_cursor_alive(self): + await self.db.test.delete_many({}) + await self.db.test.insert_many([{} for _ in range(3)]) + self.addAsyncCleanup(self.db.test.delete_many, {}) + cursor = await self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) + n = 0 + while True: + await cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + async def test_invalid_session_parameter(self): + async def try_invalid_session(): + with await self.db.test.aggregate([], {}): # type:ignore + pass + + with self.assertRaisesRegex(ValueError, "must be a ClientSession"): + await try_invalid_session() + + async def test_large_limit(self): + db = self.db + await db.drop_collection("test_large_limit") + await db.test_large_limit.create_index([("x", 1)]) + my_str = "mongomongo" * 1000 + + await db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) + + i = 0 + y = 0 + async for doc in (await db.test_large_limit.find(limit=1900)).sort([("x", 1)]): + i += 1 + y += doc["x"] + + self.assertEqual(1900, i) + self.assertEqual((1900 * 1899) / 2, y) + + async def test_find_kwargs(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, await db.test.count_documents({})) + + total = 0 + async for x in await db.test.find({}, skip=4, limit=2): + total += x["x"] + + self.assertEqual(9, total) + + async def test_rename(self): + db = self.db + await db.drop_collection("test") + await db.drop_collection("foo") + + with self.assertRaises(TypeError): + await db.test.rename(5) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + await db.test.rename("") + with self.assertRaises(InvalidName): + await db.test.rename("te$t") + with self.assertRaises(InvalidName): + await db.test.rename(".test") + with self.assertRaises(InvalidName): + await db.test.rename("test.") + with self.assertRaises(InvalidName): + await db.test.rename("tes..t") + + self.assertEqual(0, await db.test.count_documents({})) + self.assertEqual(0, await db.foo.count_documents({})) + + await db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, await db.test.count_documents({})) + + await db.test.rename("foo") + + self.assertEqual(0, await db.test.count_documents({})) + self.assertEqual(10, await db.foo.count_documents({})) + + x = 0 + async for doc in await db.foo.find(): + self.assertEqual(x, doc["x"]) + x += 1 + + await db.test.insert_one({}) + with self.assertRaises(OperationFailure): + await db.foo.rename("test") + await db.foo.rename("test", dropTarget=True) + + with self.write_concern_collection() as coll: + await coll.rename("foo") + + @no_type_check + async def test_find_one(self): + db = self.db + await db.drop_collection("test") + + _id = (await db.test.insert_one({"hello": "world", "foo": "bar"})).inserted_id + + self.assertEqual("world", (await db.test.find_one())["hello"]) + self.assertEqual(await db.test.find_one(_id), await db.test.find_one()) + self.assertEqual(await db.test.find_one(None), await db.test.find_one()) + self.assertEqual(await db.test.find_one({}), await db.test.find_one()) + self.assertEqual(await db.test.find_one({"hello": "world"}), await db.test.find_one()) + + self.assertTrue("hello" in await db.test.find_one(projection=["hello"])) + self.assertTrue("hello" not in await db.test.find_one(projection=["foo"])) + + self.assertTrue("hello" in await db.test.find_one(projection=("hello",))) + self.assertTrue("hello" not in await db.test.find_one(projection=("foo",))) + + self.assertTrue("hello" in await db.test.find_one(projection={"hello"})) + self.assertTrue("hello" not in await db.test.find_one(projection={"foo"})) + + self.assertTrue("hello" in await db.test.find_one(projection=frozenset(["hello"]))) + self.assertTrue("hello" not in await db.test.find_one(projection=frozenset(["foo"]))) + + self.assertEqual(["_id"], list(await db.test.find_one(projection={"_id": True}))) + self.assertTrue("hello" in list(await db.test.find_one(projection={}))) + self.assertTrue("hello" in list(await db.test.find_one(projection=[]))) + + self.assertEqual(None, await db.test.find_one({"hello": "foo"})) + self.assertEqual(None, await db.test.find_one(ObjectId())) + + async def test_find_one_non_objectid(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_one({"_id": 5}) + + self.assertTrue(await db.test.find_one(5)) + self.assertFalse(await db.test.find_one(6)) + + async def test_find_one_with_find_args(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_many([{"x": i} for i in range(1, 4)]) + + self.assertEqual(1, (await db.test.find_one())["x"]) + self.assertEqual(2, (await db.test.find_one(skip=1, limit=2))["x"]) + + async def test_find_with_sort(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}]) + + self.assertEqual(2, (await db.test.find_one())["x"]) + self.assertEqual(1, (await db.test.find_one(sort=[("x", 1)]))["x"]) + self.assertEqual(3, (await db.test.find_one(sort=[("x", -1)]))["x"]) + + async def to_list(things): + return [thing["x"] async for thing in things] + + self.assertEqual([2, 1, 3], await to_list(await db.test.find())) + self.assertEqual([1, 2, 3], await to_list(await db.test.find(sort=[("x", 1)]))) + self.assertEqual([3, 2, 1], await to_list(await db.test.find(sort=[("x", -1)]))) + + with self.assertRaises(TypeError): + await db.test.find(sort=5) + with self.assertRaises(TypeError): + await db.test.find(sort="hello") + with self.assertRaises(TypeError): + await db.test.find(sort=["hello", 1]) + + # TODO doesn't actually test functionality, just that it doesn't blow up + async def test_cursor_timeout(self): + await (await self.db.test.find(no_cursor_timeout=True)).to_list() + await (await self.db.test.find(no_cursor_timeout=False)).to_list() + + async def test_exhaust(self): + if await async_is_mongos(self.db.client): + with self.assertRaises(InvalidOperation): + await self.db.test.find(cursor_type=CursorType.EXHAUST) + return + + # Limit is incompatible with exhaust. + with self.assertRaises(InvalidOperation): + await self.db.test.find(cursor_type=CursorType.EXHAUST, limit=5) + cur = await self.db.test.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(InvalidOperation): + cur.limit(5) + cur = await self.db.test.find(limit=5) + with self.assertRaises(InvalidOperation): + await cur.add_option(64) + cur = await self.db.test.find() + await cur.add_option(64) + with self.assertRaises(InvalidOperation): + cur.limit(5) + + await self.db.drop_collection("test") + # Insert enough documents to require more than one batch + await self.db.test.insert_many([{"i": i} for i in range(150)]) + + client = await async_rs_or_single_client(maxPoolSize=1) + self.addAsyncCleanup(client.close) + pool = await async_get_pool(client) + + # Make sure the socket is returned after exhaustion. + cur = await client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) + await anext(cur) + self.assertEqual(0, len(pool.conns)) + async for _ in cur: + pass + self.assertEqual(1, len(pool.conns)) + + # Same as previous but don't call next() + async for _ in await client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): + pass + self.assertEqual(1, len(pool.conns)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = await client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) + if async_client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + await anext(cur) + else: + await anext(cur) + self.assertEqual(0, len(pool.conns)) + # if sys.platform.startswith("java") or "PyPy" in sys.version: + # # Don't wait for GC or use gc.collect(), it's unreliable. + await cur.close() + cur = None + # Wait until the background thread returns the socket. + wait_until(lambda: pool.active_sockets == 0, "return socket") + # The socket should be discarded. + self.assertEqual(0, len(pool.conns)) + + async def test_distinct(self): + await self.db.drop_collection("test") + + test = self.db.test + await test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + + distinct = await test.distinct("a") + distinct.sort() + + self.assertEqual([1, 2, 3], distinct) + + distinct = await (await test.find({"a": {"$gt": 1}})).distinct("a") + distinct.sort() + self.assertEqual([2, 3], distinct) + + distinct = await test.distinct("a", {"a": {"$gt": 1}}) + distinct.sort() + self.assertEqual([2, 3], distinct) + + await self.db.drop_collection("test") + + await test.insert_one({"a": {"b": "a"}, "c": 12}) + await test.insert_one({"a": {"b": "b"}, "c": 12}) + await test.insert_one({"a": {"b": "c"}, "c": 12}) + await test.insert_one({"a": {"b": "c"}, "c": 12}) + + distinct = await test.distinct("a.b") + distinct.sort() + + self.assertEqual(["a", "b", "c"], distinct) + + async def test_query_on_query_field(self): + await self.db.drop_collection("test") + await self.db.test.insert_one({"query": "foo"}) + await self.db.test.insert_one({"bar": "foo"}) + + self.assertEqual(1, await self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual( + 1, len(await (await self.db.test.find({"query": {"$ne": None}})).to_list()) + ) + + async def test_min_query(self): + await self.db.drop_collection("test") + await self.db.test.insert_many([{"x": 1}, {"x": 2}]) + await self.db.test.create_index("x") + + cursor = await self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") + + docs = await cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(2, docs[0]["x"]) + + async def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + await self.db.test.drop() + n_docs = await async_client_context.max_write_batch_size + 100 + await self.db.test.insert_many([{} for _ in range(n_docs)]) + self.assertEqual(n_docs, await self.db.test.count_documents({})) + await self.db.test.drop() + + async def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addAsyncCleanup(self.client.drop_database, "test_insert_large_batch") + max_bson_size = await async_client_context.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = "x" * int(max_bson_size / 2) + + # Batch insert that requires 2 batches. + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] + await db.collection_0.insert_many(successful_insert) + self.assertEqual(4, await db.collection_0.count_documents({})) + + await db.collection_0.drop() + + # Test that inserts fail after first error. + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] + + with self.assertRaises(BulkWriteError): + await db.collection_1.insert_many(insert_second_fails) + + self.assertEqual(1, await db.collection_1.count_documents({})) + + await db.collection_1.drop() + + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) + await unack_coll.insert_many(insert_second_fails) + + async def async_lambda(): + return await db.collection_2.count_documents({}) == 1 + + await async_wait_until(async_lambda, "insert 1 document", timeout=60) + + await db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] + + with self.assertRaises(OperationFailure) as context: + await db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn("id1", str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, await db.collection_3.count_documents({})) + + await db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) + await unack_coll.insert_many(insert_two_failures, ordered=False) + + async def async_lambda(): + return await db.collection_4.count_documents({}) == 2 + + # Only the first and third documents are inserted. + await async_wait_until(async_lambda, "insert 2 documents", timeout=60) + + await db.collection_4.drop() + + async def test_messages_with_unicode_collection_names(self): + db = self.db + + await db["Employés"].insert_one({"x": 1}) + await db["Employés"].replace_one({"x": 1}, {"x": 2}) + await db["Employés"].delete_many({}) + await db["Employés"].find_one() + await (await db["Employés"].find()).to_list() + + async def test_drop_indexes_non_existent(self): + await self.db.drop_collection("test") + await self.db.test.drop_indexes() + + # This is really a bson test but easier to just reproduce it here... + # (Shame on me) + async def test_bad_encode(self): + c = self.db.test + await c.drop() + with self.assertRaises(InvalidDocument): + await c.insert_one({"x": c}) + + class BadGetAttr(dict): + def __getattr__(self, name): + pass + + bad = BadGetAttr([("foo", "bar")]) + await c.insert_one({"bad": bad}) + self.assertEqual("bar", (await c.find_one())["bad"]["foo"]) # type: ignore + + async def test_array_filters_validation(self): + # array_filters must be a list. + c = self.db.test + with self.assertRaises(TypeError): + await c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + update = {"$set": {"a": 1}} + await c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] + + async def test_array_filters_unacknowledged(self): + c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(ConfigurationError): + await c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await c_w0.find_one_and_update( + {}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}] + ) + + async def test_find_one_and(self): + c = self.db.test + await c.drop() + await c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual( + {"_id": 1, "i": 1}, await c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}}) + ) + self.assertEqual( + {"_id": 1, "i": 3}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, await c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, await c.find_one({"_id": 1})) + + self.assertEqual(None, await c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + await c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + await c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) + + await c.drop() + for j in range(5): + await c.insert_one({"j": j, "i": 0}) + + sort = [("j", DESCENDING)] + self.assertEqual(4, (await c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) + + async def test_find_one_and_write_concern(self): + listener = EventListener() + db = (await async_single_client(event_listeners=[listener]))[self.db.name] + # non-default WriteConcern. + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) + # default WriteConcern. + c_default = db.get_collection("test", write_concern=WriteConcern()) + # Authenticate the client and throw out auth commands from the listener. + await db.command("ping") + listener.reset() + await c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + await c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + await c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + # Test write concern errors. + if async_client_context.is_rs: + c_wc_error = db.get_collection( + "test", write_concern=WriteConcern(w=len(async_client_context.nodes) + 1) + ) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_replace( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_delete( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + listener.reset() + + await c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + await c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + await c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + async def test_find_with_nested(self): + c = self.db.test + await c.drop() + await c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] + self.assertEqual( + [2], + [ + i["i"] + async for i in await c.find( + { + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, + ] + } + ) + ], + ) + + self.assertEqual( + [0, 1, 2], + [ + i["i"] + async for i in await c.find( + { + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, + ] + } + ) + ], + ) + + async def test_find_regex(self): + c = self.db.test + await c.drop() + await c.insert_one({"r": re.compile(".*")}) + + self.assertTrue(isinstance((await c.find_one())["r"], Regex)) # type: ignore + async for doc in await c.find(): + self.assertTrue(isinstance(doc["r"], Regex)) + + def test_find_command_generation(self): + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(AsyncCollection(self.db, "test")) + + @async_client_context.require_version_min(5, 0, 0) + async def test_helpers_with_let(self): + c = self.db.test + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (c.find, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([],)), + ] + for let in [10, "str", [], False]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): + await helper(*args, let=let) # type: ignore + for helper, args in helpers: + await helper(*args, let={}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 3e5dcec563..f6a6c96949 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -26,7 +26,7 @@ from pymongo import MongoClient from pymongo.errors import OperationFailure -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri class TestAuthAWS(unittest.TestCase): diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index c7614fa0c3..3fb2894783 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -27,7 +27,6 @@ sys.path[0:0] = [""] -import pprint from test.unified_format import generate_test_classes from test.utils import EventListener @@ -35,12 +34,16 @@ from pymongo import MongoClient from pymongo._azure_helpers import _get_azure_response from pymongo._gcp_helpers import _get_gcp_response -from pymongo.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult -from pymongo.cursor import CursorType +from pymongo.cursor_shared import CursorType from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure -from pymongo.hello import HelloCompat -from pymongo.operations import InsertOne -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, +) +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.operations import InsertOne +from pymongo.synchronous.uri_parser import parse_uri ROOT = Path(__file__).parent.parent.resolve() TEST_PATH = ROOT / "auth" / "unified" diff --git a/test/lambda/mongodb/app.py b/test/lambda/mongodb/app.py index 5840347d9a..deb26bdf1e 100644 --- a/test/lambda/mongodb/app.py +++ b/test/lambda/mongodb/app.py @@ -12,7 +12,7 @@ from bson import has_c as has_bson_c from pymongo import MongoClient from pymongo import has_c as has_pymongo_c -from pymongo.monitoring import ( +from pymongo.synchronous.monitoring import ( CommandListener, ConnectionPoolListener, ServerHeartbeatListener, diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index 8ee33431a8..1e91384dc4 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -20,7 +20,7 @@ from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference -from pymongo.read_preferences import ( +from pymongo.synchronous.read_preferences import ( _MONGOS_MODES, make_read_preference, read_pref_mode_from_name, diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py index d05cfb531a..36e004c05a 100644 --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -19,7 +19,7 @@ from pymongo import MongoClient from pymongo.errors import ConnectionFailure -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE class TestNetworkDisconnectPrimary(unittest.TestCase): diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index dd95254967..aa2437f230 100644 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -19,8 +19,8 @@ from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going from pymongo import MongoClient, WriteConcern -from pymongo.cursor import CursorType -from pymongo.operations import DeleteOne, InsertOne, UpdateOne +from pymongo.cursor_shared import CursorType +from pymongo.synchronous.operations import DeleteOne, InsertOne, UpdateOne Operation = namedtuple("Operation", ["name", "function", "request", "reply"]) diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index 0fa7b84861..36b8f4fbee 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -22,7 +22,7 @@ from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference -from pymongo.read_preferences import ( +from pymongo.synchronous.read_preferences import ( _MONGOS_MODES, make_read_preference, read_pref_mode_from_name, diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 5297709886..9eb4de28c8 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -21,7 +21,7 @@ from bson import SON from pymongo import MongoClient -from pymongo.read_preferences import ( +from pymongo.synchronous.read_preferences import ( Nearest, Primary, PrimaryPreferred, diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 19dfb9e395..080110020a 100644 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -22,8 +22,8 @@ from pymongo import MongoClient from pymongo.errors import ConnectionFailure -from pymongo.operations import _Op from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.operations import _Op class TestResetAndRequestCheck(unittest.TestCase): diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 45b7d51ba0..9692465d56 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -28,7 +28,7 @@ from operations import operations # type: ignore[import] from pymongo import MongoClient -from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.synchronous.read_preferences import make_read_preference, read_pref_mode_from_name class TestSlaveOkaySharded(unittest.TestCase): diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index b03232807e..bf1cdee74b 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -27,8 +27,8 @@ from operations import operations # type: ignore[import] from pymongo import MongoClient -from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.synchronous.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE def topology_type_name(client): diff --git a/test/mod_wsgi_test/mod_wsgi_test.py b/test/mod_wsgi_test/mod_wsgi_test.py index c5f5c3086a..d9e6c163dd 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.py +++ b/test/mod_wsgi_test/mod_wsgi_test.py @@ -37,7 +37,7 @@ from bson.dbref import DBRef from bson.objectid import ObjectId from bson.regex import Regex -from pymongo.mongo_client import MongoClient +from pymongo.synchronous.mongo_client import MongoClient # Ensure the C extensions are installed. assert bson.has_c() diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index de2714cc00..b4f15dc14d 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -41,11 +41,7 @@ def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS={}&tlsCAFile={}&{}").format( - TIMEOUT_MS, - CA_FILE, - options, - ) + uri = f"mongodb://localhost:27017/?serverSelectionTimeoutMS={TIMEOUT_MS}&tlsCAFile={CA_FILE}&{options}" print(uri) client = pymongo.MongoClient(uri) client.admin.command("ping") diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index c750d0cf71..d3c1a271cd 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -20,12 +20,13 @@ from functools import partial from test import client_context -from pymongo import MongoClient, common from pymongo.errors import AutoReconnect, NetworkTimeout -from pymongo.hello import Hello, HelloCompat -from pymongo.monitor import Monitor -from pymongo.pool import Pool -from pymongo.server_description import ServerDescription +from pymongo.synchronous import common +from pymongo.synchronous.hello import Hello, HelloCompat +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.pool import Pool +from pymongo.synchronous.server_description import ServerDescription class MockPool(Pool): diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py index 95a36ad7a2..c5084f5943 100644 --- a/test/sigstop_sigcont.py +++ b/test/sigstop_sigcont.py @@ -21,9 +21,9 @@ sys.path[0:0] = [""] -from pymongo import monitoring -from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi +from pymongo.synchronous import monitoring +from pymongo.synchronous.mongo_client import MongoClient SERVER_API = None MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") diff --git a/test/synchronous/__init__.py b/test/synchronous/__init__.py new file mode 100644 index 0000000000..6eb11eee85 --- /dev/null +++ b/test/synchronous/__init__.py @@ -0,0 +1,981 @@ +# Copyright 2010-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations + +import asyncio +import base64 +import gc +import multiprocessing +import os +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +import unittest +import warnings +from asyncio import iscoroutinefunction +from test import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TEST_SERVERLESS, + TLS_OPTIONS, + SystemCertsPatcher, + _all_users, + _create_user, + db_pwd, + db_user, + global_knobs, + host, + is_server_resolvable, + port, + print_running_clients, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from contextlib import contextmanager +from functools import wraps +from test.version import Version +from typing import Any, Callable, Dict, Generator, no_type_check +from unittest import SkipTest +from urllib.parse import quote_plus + +import pymongo +import pymongo.errors +from bson.son import SON +from pymongo.server_api import ServerApi +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.synchronous import common, message +from pymongo.synchronous.common import partition_node +from pymongo.synchronous.database import Database +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.uri_parser import parse_uri + +if HAVE_SSL: + import ssl + +_IS_SYNC = True + + +class ClientContext: + client: MongoClient + + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI + + def __init__(self): + """Create a client and grab essential information from the server.""" + self.connection_attempts = [] + self.connected = False + self.w = None + self.nodes = set() + self.replica_set_name = None + self.cmd_line = None + self.server_status = None + self.version = Version(-1) # Needs to be comparable with Version + self.auth_enabled = False + self.test_commands_enabled = False + self.server_parameters = {} + self._hello = None + self.is_mongos = False + self.mongoses = [] + self.is_rs = False + self.has_ipv6 = False + self.tls = False + self.tlsCertificateKeyFile = False + self.server_is_resolvable = is_server_resolvable() + self.default_client_options: Dict = {} + self.sessions_enabled = False + self.client = None # type: ignore + self.conn_lock = threading.Lock() + self.is_data_lake = False + self.load_balancer = TEST_LOADBALANCER + self.serverless = TEST_SERVERLESS + if self.load_balancer or self.serverless: + self.default_client_options["loadBalanced"] = True + if COMPRESSORS: + self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api + + @property + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port + if client_context.auth_enabled: + opts["username"] = db_user + opts["password"] = db_pwd + if self.replica_set_name: + opts["replicaSet"] = self.replica_set_name + return opts + + @property + def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + pair = self.pair + return f"mongodb://{auth_part}{pair}/?{opts_part}" + + @property + def hello(self): + if not self._hello: + if self.serverless or self.load_balancer: + self._hello = self.client.admin.command(HelloCompat.CMD) + else: + self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello + + def _connect(self, host, port, **kwargs): + kwargs.update(self.default_client_options) + client: MongoClient = pymongo.MongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) + try: + try: + client.admin.command("ping") # Can we connect? + except pymongo.errors.OperationFailure as exc: + # SERVER-32063 + self.connection_attempts.append( + f"connected client {client!r}, but legacy hello failed: {exc}" + ) + else: + self.connection_attempts.append(f"successfully connected client {client!r}") + # If connected, then return client with default timeout + return pymongo.MongoClient(host, port, **kwargs) + except pymongo.errors.ConnectionFailure as exc: + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") + return None + finally: + client.close() + + def _init_client(self): + self.client = self._connect(host, port) + if self.client is not None: + # Return early when connected to dataLake as mongohoused does not + # support the getCmdLineOpts command and is tested without TLS. + build_info: Any = self.client.admin.command("buildInfo") + if "dataLake" in build_info: + self.is_data_lake = True + self.auth_enabled = True + self.client = self._connect(host, port, username=db_user, password=db_pwd) + self.connected = True + return + + if HAVE_SSL and not self.client: + # Is MongoDB configured for SSL? + self.client = self._connect(host, port, **TLS_OPTIONS) + if self.client: + self.tls = True + self.default_client_options.update(TLS_OPTIONS) + self.tlsCertificateKeyFile = True + + if self.client: + self.connected = True + + if self.serverless: + self.auth_enabled = True + else: + try: + self.cmd_line = self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() + + if self.auth_enabled: + if not self.serverless and not IS_SRV: + # See if db_user already exists. + if not self._check_user_provided(): + _create_user(self.client.admin, db_user, db_pwd) + + self.client = self._connect( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + + # May not have this if OperationFailure was raised earlier. + self.cmd_line = self.client.admin.command("getCmdLineOpts") + + if self.serverless: + self.server_status = {} + else: + self.server_status = self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False + + hello = self.hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello + + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) + self.is_rs = True + if self.auth_enabled: + # It doesn't matter which member we use as the seed here. + self.client = pymongo.MongoClient( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + else: + self.client = pymongo.MongoClient( + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) + + # Get the authoritative hello result from the primary. + self._hello = None + hello = self.hello + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) + self.nodes = set(nodes) + else: + self.nodes = {(host, port)} + self.w = len(hello.get("hosts", [])) or 1 + self.version = Version.from_client(self.client) + + if self.serverless: + self.server_parameters = { + "requireApiVersion": False, + "enableTestCommands": True, + } + self.test_commands_enabled = True + self.has_ipv6 = False + else: + self.server_parameters = self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: + self.test_commands_enabled = True + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: + self.test_commands_enabled = True + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": + self.test_commands_enabled = True + self.has_ipv6 = self._server_started_with_ipv6() + + self.is_mongos = (self.hello).get("msg") == "isdbgrid" + if self.is_mongos: + address = self.client.address + self.mongoses.append(address) + if not self.serverless: + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + + def init(self): + with self.conn_lock: + if not self.client and not self.connection_attempts: + self._init_client() + + def connection_attempt_info(self): + return "\n".join(self.connection_attempts) + + @property + def host(self): + if self.is_rs and not IS_SRV: + primary = self.client.primary + return str(primary[0]) if primary is not None else host + return host + + @property + def port(self): + if self.is_rs and not IS_SRV: + primary = self.client.primary + return primary[1] if primary is not None else port + return port + + @property + def pair(self): + return "%s:%d" % (self.host, self.port) + + @property + def has_secondaries(self): + if not self.client: + return False + return bool(len(self.client.secondaries)) + + @property + def storage_engine(self): + try: + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) + except AttributeError: + # Raised if self.server_status is None. + return None + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + + def _check_user_provided(self): + """Return True if db_user/db_password is already an admin user.""" + client: MongoClient = pymongo.MongoClient( + host, + port, + username=db_user, + password=db_pwd, + **self.default_client_options, + ) + + try: + return db_user in _all_users(client.admin) + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: + # Auth failed. + return False + else: + raise + finally: + client.close() + + def _server_started_with_auth(self): + # MongoDB >= 2.0 + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] + # MongoDB >= 2.6 + if "security" in parsed: + security = parsed["security"] + # >= rc3 + if "authorization" in security: + return security["authorization"] == "enabled" + # < rc3 + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) + # Legacy + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv + + def _server_started_with_ipv6(self): + if not socket.has_ipv6: + return False + + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): + return False + else: + if "--ipv6" not in self.cmd_line["argv"]: + return False + + # The server was started with --ipv6. Is there an IPv6 route to it? + try: + for info in socket.getaddrinfo(self.host, self.port): + if info[0] == socket.AF_INET6: + return True + except OSError: + pass + + return False + + def _require(self, condition, msg, func=None): + def make_wrapper(f): + if iscoroutinefunction(f): + wraps_async = True + else: + wraps_async = False + + @wraps(f) + def wrap(*args, **kwargs): + self.init() + # Always raise SkipTest if we can't connect to MongoDB + if not self.connected: + pair = self.pair + raise SkipTest(f"Cannot connect to MongoDB on {pair}") + if iscoroutinefunction(condition) and condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) + elif condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) + if "self.pair" in msg: + new_msg = msg.replace("self.pair", self.pair) + else: + new_msg = msg + raise SkipTest(new_msg) + + return wrap + + if func is None: + + def decorate(f): + return make_wrapper(f) + + return decorate + return make_wrapper(func) + + def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): + kwargs["writeConcern"] = {"w": self.w} + return _create_user(self.client[dbname], user, pwd, roles, **kwargs) + + def drop_user(self, dbname, user): + self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) + + def require_connection(self, func): + """Run a test only if we can connect to MongoDB.""" + return self._require( + lambda: True, # _require checks if we're connected + "Cannot connect to MongoDB on self.pair", + func=func, + ) + + def require_data_lake(self, func): + """Run a test only if we are connected to Atlas Data Lake.""" + return self._require( + lambda: self.is_data_lake, + "Not connected to Atlas Data Lake on self.pair", + func=func, + ) + + def require_no_mmap(self, func): + """Run a test only if the server is not using the MMAPv1 storage + engine. Only works for standalone and replica sets; tests are + run regardless of storage engine on sharded clusters. + """ + + def is_not_mmap(): + if self.is_mongos: + return True + return self.storage_engine != "mmapv1" + + return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) + + def require_version_min(self, *ver): + """Run a test only if the server version is at least ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) + + def require_version_max(self, *ver): + """Run a test only if the server version is at most ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) + + def require_auth(self, func): + """Run a test only if the server is running with auth enabled.""" + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) + + def require_no_auth(self, func): + """Run a test only if the server is running without auth enabled.""" + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) + + def require_replica_set(self, func): + """Run a test only if the client is connected to a replica set.""" + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) + + def require_secondaries_count(self, count): + """Run a test only if the client is connected to a replica set that has + `count` secondaries. + """ + + def sec_count(): + return 0 if not self.client else len(self.client.secondaries) + + return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + + @property + def supports_secondary_read_pref(self): + if self.has_secondaries: + return True + if self.is_mongos: + shard = self.client.config.shards.find_one()["host"] # type:ignore[index] + num_members = shard.count(",") + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read preference", + ) + + def require_no_replica_set(self, func): + """Run a test if the client is *not* connected to a replica set.""" + return self._require( + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) + + def require_ipv6(self, func): + """Run a test only if the client can connect to a server via IPv6.""" + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) + + def require_no_mongos(self, func): + """Run a test only if the client is not connected to a mongos.""" + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) + + def require_mongos(self, func): + """Run a test only if the client is connected to a mongos.""" + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) + + def require_multiple_mongoses(self, func): + """Run a test only if the client is connected to a sharded cluster + that has 2 mongos nodes. + """ + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) + + def require_standalone(self, func): + """Run a test only if the client is connected to a standalone.""" + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) + + def require_no_standalone(self, func): + """Run a test only if the client is not connected to a standalone.""" + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) + + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) + + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) + + def require_no_serverless(self, func): + """Run a test only if the client is not connected to serverless.""" + return self._require( + lambda: not self.serverless, "Must not be connected to serverless", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) + + def is_topology_type(self, topologies): + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "sharded-replicaset", + "load-balanced", + } + if unknown: + raise AssertionError(f"Unknown topologies: {unknown!r}") + if self.load_balancer: + if "load-balanced" in topologies: + return True + return False + if "single" in topologies and not (self.is_mongos or self.is_rs): + return True + if "replicaset" in topologies and self.is_rs: + return True + if "sharded" in topologies and self.is_mongos: + return True + if "sharded-replicaset" in topologies and self.is_mongos: + shards = (client_context.client.config.shards.find()).to_list() + for shard in shards: + # For a 3-member RS-backed sharded cluster, shard['host'] + # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' + # Otherwise it will be 'ip1:port1' + host_spec = shard["host"] + if not len(host_spec.split("/")) > 1: + return False + return True + return False + + def require_cluster_type(self, topologies=None): + """Run a test only if the client is connected to a cluster that + conforms to one of the specified topologies. Acceptable topologies + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] + + def _is_valid_topology(): + return self.is_topology_type(topologies) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) + + def require_test_commands(self, func): + """Run a test only if the server has test commands enabled.""" + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) + + def require_failCommand_fail_point(self, func): + """Run a test only if the server supports the failCommand fail + point. + """ + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) + + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + "failCommand appName must be supported", + func=func, + ) + + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection.""" + return self._require( + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), + "failCommand blockConnection is not supported", + func=func, + ) + + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) + + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) + + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) + + def require_server_resolvable(self, func): + """Run a test only if the hostname 'server' is resolvable.""" + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate hostname in the certificate", + func=func, + ) + + def require_sessions(self, func): + """Run a test only if the deployment supports sessions.""" + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) + + def supports_retryable_writes(self): + if self.storage_engine == "mmapv1": + return False + if not self.sessions_enabled: + return False + return self.is_mongos or self.is_rs + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) + + def supports_transactions(self): + if self.storage_engine == "mmapv1": + return False + + if self.version.at_least(4, 1, 8): + return self.is_mongos or self.is_rs + + if self.version.at_least(4, 0): + return self.is_rs + + return False + + def require_transactions(self, func): + """Run a test only if the deployment might support transactions. + + *Might* because this does not test the storage engine or FCV. + """ + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) + + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) + + def mongos_seeds(self): + return ",".join("{}:{}".format(*address) for address in self.mongoses) + + @property + def supports_failCommand_fail_point(self): + """Does the server support the failCommand fail point?""" + if self.is_mongos: + return self.version.at_least(4, 1, 5) and self.test_commands_enabled + else: + return self.version.at_least(4, 0) and self.test_commands_enabled + + @property + def requires_hint_with_min_max_queries(self): + """Does the server require a hint with min/max queries.""" + # Changed in SERVER-39567. + return self.version.at_least(4, 1, 10) + + @property + def max_bson_size(self): + return (self.hello)["maxBsonObjectSize"] + + @property + def max_write_batch_size(self): + return (self.hello)["maxWriteBatchSize"] + + +# Reusable client context +client_context = ClientContext() + + +class PyMongoTestCase(unittest.TestCase): + def assertEqualCommand(self, expected, actual, msg=None): + self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) + + def assertEqualReply(self, expected, actual, msg=None): + self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client_context.client.admin.command(cmd_on) + try: + yield + finally: + client_context.client.admin.command( + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + +class IntegrationTest(PyMongoTestCase): + """Async base class for TestCases that need a connection to MongoDB to pass.""" + + client: MongoClient[dict] + db: Database + credentials: Dict[str, str] + + @classmethod + def setUpClass(cls): + if _IS_SYNC: + cls._setup_class() + else: + asyncio.run(cls._setup_class()) + + @classmethod + @client_context.require_connection + def _setup_class(cls): + if client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + if client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + raise SkipTest("this test does not support serverless") + cls.client = client_context.client + cls.db = cls.client.pymongo_test + if client_context.auth_enabled: + cls.credentials = {"username": db_user, "password": db_pwd} + else: + cls.credentials = {} + + def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + c.delete_many({}) + c.drop_indexes() + + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) + + +def setup(): + client_context.init() + warnings.resetwarnings() + warnings.simplefilter("always") + global_knobs.enable() + + +def teardown(): + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + c = client_context.client + if c: + if not client_context.is_data_lake: + c.drop_database("pymongo-pooling-tests") + c.drop_database("pymongo_test") + c.drop_database("pymongo_test1") + c.drop_database("pymongo_test2") + c.drop_database("pymongo_test_mike") + c.drop_database("pymongo_test_bernie") + c.close() + + print_running_clients() + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) diff --git a/test/synchronous/conftest.py b/test/synchronous/conftest.py new file mode 100644 index 0000000000..5befb96e1b --- /dev/null +++ b/test/synchronous/conftest.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from test.synchronous import setup, teardown + +import pytest + +_IS_SYNC = True + + +@pytest.fixture(scope="session", autouse=True) +def test_setup_and_teardown(): + setup() + yield + teardown() diff --git a/test/synchronous/test_collection.py b/test/synchronous/test_collection.py new file mode 100644 index 0000000000..39d7e13a31 --- /dev/null +++ b/test/synchronous/test_collection.py @@ -0,0 +1,2233 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection module.""" +from __future__ import annotations + +import asyncio +import contextlib +import re +import sys +from codecs import utf_8_decode +from collections import defaultdict +from typing import Any, Iterable, no_type_check + +from pymongo.synchronous.database import Database + +sys.path[0:0] = [""] + +from test import unittest +from test.synchronous import IntegrationTest, client_context +from test.utils import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + get_pool, + is_mongos, + rs_or_single_client, + single_client, + wait_until, +) + +from bson import encode +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex +from bson.son import SON +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT +from pymongo.cursor_shared import CursorType +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.synchronous.bulk import BulkWriteError +from pymongo.synchronous.collection import Collection, ReturnDocument +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.helpers import next +from pymongo.synchronous.message import _COMMAND_OVERHEAD, _gen_find_command +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.operations import * +from pymongo.synchronous.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestCollectionNoConnect(unittest.TestCase): + """Test Collection features on a client that does not connect.""" + + db: Database + + @classmethod + def setUpClass(cls): + cls.db = MongoClient(connect=False).pymongo_test + + def test_collection(self): + self.assertRaises(TypeError, Collection, self.db, 5) + + def make_col(base, name): + return base[name] + + self.assertRaises(InvalidName, make_col, self.db, "") + self.assertRaises(InvalidName, make_col, self.db, "te$t") + self.assertRaises(InvalidName, make_col, self.db, ".test") + self.assertRaises(InvalidName, make_col, self.db, "test.") + self.assertRaises(InvalidName, make_col, self.db, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "") + self.assertRaises(InvalidName, make_col, self.db.test, "te$t") + self.assertRaises(InvalidName, make_col, self.db.test, ".test") + self.assertRaises(InvalidName, make_col, self.db.test, "test.") + self.assertRaises(InvalidName, make_col, self.db.test, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t") + + def test_getattr(self): + coll = self.db.test + self.assertTrue(isinstance(coll["_does_not_exist"], Collection)) + + with self.assertRaises(AttributeError) as context: + coll._does_not_exist + + # Message should be: + # "AttributeError: Collection has no attribute '_does_not_exist'. To + # access the test._does_not_exist collection, use + # database['test._does_not_exist']." + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + coll2 = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertEqual(coll2.write_concern, WriteConcern(w=0)) + self.assertNotEqual(coll.write_concern, coll2.write_concern) + coll3 = coll2.subcoll + self.assertEqual(coll2.write_concern, coll3.write_concern) + coll4 = coll2["subcoll"] + self.assertEqual(coll2.write_concern, coll4.write_concern) + + def test_iteration(self): + coll = self.db.coll + if "PyPy" in sys.version and sys.version_info < (3, 8, 15): + msg = "'NoneType' object is not callable" + else: + if _IS_SYNC: + msg = "'Collection' object is not iterable" + else: + msg = "'AsyncCollection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, msg): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, msg): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) + + +class TestCollection(IntegrationTest): + w: int + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.w = client_context.w # type: ignore + + @classmethod + def tearDownClass(cls): + if _IS_SYNC: + cls.db.drop_collection("test_large_limit") # type: ignore[unused-coroutine] + else: + asyncio.run(cls.async_tearDownClass()) + + @classmethod + def async_tearDownClass(cls): + cls.db.drop_collection("test_large_limit") + + def setUp(self): + self.db.test.drop() + + def tearDown(self): + self.db.test.drop() + + @contextlib.contextmanager + def write_concern_collection(self): + if client_context.is_rs: + with self.assertRaises(WriteConcernError): + # Unsatisfiable write concern. + yield Collection( + self.db, + "test", + write_concern=WriteConcern(w=len(client_context.nodes) + 1), + ) + else: + yield self.db.test + + def test_equality(self): + self.assertTrue(isinstance(self.db.test, Collection)) + self.assertEqual(self.db.test, self.db["test"]) + self.assertEqual(self.db.test, Collection(self.db, "test")) + self.assertEqual(self.db.test.mike, self.db["test.mike"]) + self.assertEqual(self.db.test["mike"], self.db["test.mike"]) + + def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + + def test_create(self): + # No Exception. + db = client_context.client.pymongo_test + db.create_test_no_wc.drop() + + def lambda_test(): + return "create_test_no_wc" not in db.list_collection_names() + + def lambda_test_2(): + return "create_test_no_wc" in db.list_collection_names() + + wait_until( + lambda_test, + "drop create_test_no_wc collection", + ) + db.create_collection("create_test_no_wc") + wait_until( + lambda_test_2, + "create create_test_no_wc collection", + ) + # SERVER-33317 + if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): + with self.assertRaises(OperationFailure): + db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) + + def test_drop_nonexistent_collection(self): + self.db.drop_collection("test") + self.assertFalse("test" in self.db.list_collection_names()) + + # No exception + self.db.drop_collection("test") + + def test_create_indexes(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.create_indexes("foo") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.create_indexes(["foo"]) # type: ignore[list-item] + self.assertRaises(TypeError, IndexModel, 5) + self.assertRaises(ValueError, IndexModel, []) + + db.test.drop_indexes() + db.test.insert_one({}) + self.assertEqual(len(db.test.index_information()), 1) + + db.test.create_indexes([IndexModel("hello")]) + db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) + + # Tuple instead of list. + db.test.create_indexes([IndexModel((("world", ASCENDING),))]) + + self.assertEqual(len(db.test.index_information()), 4) + + db.test.drop_indexes() + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) + self.assertEqual(names, ["hello_world"]) + + db.test.drop_indexes() + self.assertEqual(len(db.test.index_information()), 1) + db.test.create_indexes([IndexModel("hello")]) + self.assertTrue("hello_1" in db.test.index_information()) + + db.test.drop_indexes() + self.assertEqual(len(db.test.index_information()), 1) + names = db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) + info = db.test.index_information() + for name in names: + self.assertTrue(name in info) + + db.test.drop() + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + db.test.create_indexes([IndexModel("a", unique=True)]) + + with self.write_concern_collection() as coll: + coll.create_indexes([IndexModel("hello")]) + + @client_context.require_version_max(4, 3, -1) + def test_create_indexes_commitQuorum_requires_44(self): + db = self.db + with self.assertRaisesRegex( + ConfigurationError, + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", + ): + db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + @client_context.require_no_standalone + @client_context.require_version_min(4, 4, -1) + def test_create_indexes_commitQuorum(self): + self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + def test_create_index(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.create_index(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.create_index([]) + + db.test.drop_indexes() + db.test.insert_one({}) + self.assertEqual(len(db.test.index_information()), 1) + + db.test.create_index("hello") + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + + # Tuple instead of list. + db.test.create_index((("world", ASCENDING),)) + + self.assertEqual(len(db.test.index_information()), 4) + + db.test.drop_indexes() + ix = db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") + self.assertEqual(ix, "hello_world") + + db.test.drop_indexes() + self.assertEqual(len(db.test.index_information()), 1) + db.test.create_index("hello") + self.assertTrue("hello_1" in db.test.index_information()) + + db.test.drop_indexes() + self.assertEqual(len(db.test.index_information()), 1) + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + self.assertTrue("hello_-1_world_1" in db.test.index_information()) + + db.test.drop_indexes() + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertTrue("hello_-1_world_1" in db.test.index_information()) + + db.test.drop() + db.test.insert_one({"a": 1}) + db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + db.test.create_index("a", unique=True) + + with self.write_concern_collection() as coll: + coll.create_index([("hello", DESCENDING)]) + + db.test.create_index(["hello", "world"]) + db.test.create_index(["hello", ("world", DESCENDING)]) + db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + + def test_drop_index(self): + db = self.db + db.test.drop_indexes() + db.test.create_index("hello") + name = db.test.create_index("goodbye") + + self.assertEqual(len(db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + db.test.drop_index(name) + + # Drop it again. + with self.assertRaises(OperationFailure): + db.test.drop_index(name) + self.assertEqual(len(db.test.index_information()), 2) + self.assertTrue("hello_1" in db.test.index_information()) + + db.test.drop_indexes() + db.test.create_index("hello") + name = db.test.create_index("goodbye") + + self.assertEqual(len(db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + db.test.drop_index([("goodbye", ASCENDING)]) + self.assertEqual(len(db.test.index_information()), 2) + self.assertTrue("hello_1" in db.test.index_information()) + + with self.write_concern_collection() as coll: + coll.drop_index("hello_1") + + @client_context.require_no_mongos + @client_context.require_test_commands + def test_index_management_max_time_ms(self): + coll = self.db.test + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") + try: + with self.assertRaises(ExecutionTimeout): + coll.create_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.create_indexes([IndexModel("foo")], maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.drop_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.drop_indexes(maxTimeMS=1) + finally: + self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + + def test_list_indexes(self): + db = self.db + db.test.drop() + db.test.insert_one({}) # create collection + + def map_indexes(indexes): + return {index["name"]: index for index in indexes} + + indexes = (db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 1) + self.assertTrue("_id_" in map_indexes(indexes)) + + db.test.create_index("hello") + indexes = (db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 2) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) + + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + indexes = (db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 3) + index_map = map_indexes(indexes) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) + self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) + + # List indexes on a collection that does not exist. + indexes = (db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + # List indexes on a database that does not exist. + indexes = (db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + def test_index_info(self): + db = self.db + db.test.drop() + db.test.insert_one({}) # create collection + self.assertEqual(len(db.test.index_information()), 1) + self.assertTrue("_id_" in db.test.index_information()) + + db.test.create_index("hello") + self.assertEqual(len(db.test.index_information()), 2) + self.assertEqual((db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)]) + + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual((db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)]) + self.assertEqual(len(db.test.index_information()), 3) + self.assertEqual( + [("hello", DESCENDING), ("world", ASCENDING)], + (db.test.index_information())["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, (db.test.index_information())["hello_-1_world_1"]["unique"]) + + def test_index_geo2d(self): + db = self.db + db.test.drop_indexes() + self.assertEqual("loc_2d", db.test.create_index([("loc", GEO2D)])) + index_info = (db.test.index_information())["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) + + # geoSearch was deprecated in 4.4 and removed in 5.0 + @client_context.require_version_max(4, 5) + @client_context.require_no_mongos + def test_index_haystack(self): + db = self.db + db.test.drop() + _id = db.test.insert_one( + {"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"} + ).inserted_id + db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = ( + db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + ) + )["results"] + + self.assertEqual(2, len(results)) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) + + @client_context.require_no_mongos + def test_index_text(self): + db = self.db + db.test.drop_indexes() + self.assertEqual("t_text", db.test.create_index([("t", TEXT)])) + index_info = (db.test.index_information())["t_text"] + self.assertTrue("weights" in index_info) + + db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) + + # MongoDB 2.6 text search. Create 'score' field in projection. + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) + + # Sort by 'score' field. + cursor.sort([("score", {"$meta": "textScore"})]) + results = cursor.to_list() + self.assertTrue(results[0]["score"] >= results[1]["score"]) + + db.test.drop_indexes() + + def test_index_2dsphere(self): + db = self.db + db.test.drop_indexes() + self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) + + for dummy, info in (db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": + break + else: + self.fail("2dsphere index not found.") + + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + query = {"geo": {"$within": {"$geometry": poly}}} + + # This query will error without a 2dsphere index. + db.test.find(query) + db.test.drop_indexes() + + def test_index_hashed(self): + db = self.db + db.test.drop_indexes() + self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) + + for dummy, info in (db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": + break + else: + self.fail("hashed index not found.") + + db.test.drop_indexes() + + def test_index_sparse(self): + db = self.db + db.test.drop_indexes() + db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue((db.test.index_information())["key_1"]["sparse"]) + + def test_index_background(self): + db = self.db + db.test.drop_indexes() + db.test.create_index([("keya", ASCENDING)]) + db.test.create_index([("keyb", ASCENDING)], background=False) + db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertFalse("background" in (db.test.index_information())["keya_1"]) + self.assertFalse((db.test.index_information())["keyb_1"]["background"]) + self.assertTrue((db.test.index_information())["keyc_1"]["background"]) + + def _drop_dups_setup(self, db): + db.drop_collection("test") + db.test.insert_one({"i": 1}) + db.test.insert_one({"i": 2}) + db.test.insert_one({"i": 2}) # duplicate + db.test.insert_one({"i": 3}) + + def test_index_dont_drop_dups(self): + # Try *not* dropping duplicates + db = self.db + self._drop_dups_setup(db) + + # There's a duplicate + def _test_create(): + db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + + with self.assertRaises(DuplicateKeyError): + _test_create() + + # Duplicate wasn't dropped + self.assertEqual(4, db.test.count_documents({})) + + # Index wasn't created, only the default index on _id + self.assertEqual(1, len(db.test.index_information())) + + # Get the plan dynamically because the explain format will change. + def get_plan_stage(self, root, stage): + if root.get("stage") == stage: + return root + elif "inputStage" in root: + return self.get_plan_stage(root["inputStage"], stage) + elif "inputStages" in root: + for i in root["inputStages"]: + stage = self.get_plan_stage(i, stage) + if stage: + return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) + elif "shards" in root: + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) + if stage: + return stage + return {} + + def test_index_filter(self): + db = self.db + db.drop_collection("test") + + # Test bad filter spec on create. + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression=5) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression={"x": {"$asdasd": 3}}) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression={"$and": 5}) + + self.assertEqual( + "x_1", + db.test.create_index([("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}}), + ) + db.test.insert_one({"x": 5, "a": 2}) + db.test.insert_one({"x": 6, "a": 1}) + + # Operations that use the partial index. + explain = (db.test.find({"x": 6, "a": 1})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = (db.test.find({"x": {"$gt": 1}, "a": 1})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = (db.test.find({"x": 6, "a": {"$lte": 1}})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + # Operations that do not use the partial index. + explain = (db.test.find({"x": 6, "a": {"$lte": 1.6}})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + explain = (db.test.find({"x": 6})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + # Test drop_indexes. + db.test.drop_index("x_1") + explain = (db.test.find({"x": 6, "a": 1})).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + def test_field_selection(self): + db = self.db + db.drop_collection("test") + + doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}} + db.test.insert_one(doc) + + # Test field inclusion + doc = next(db.test.find({}, ["_id"])) + self.assertEqual(list(doc), ["_id"]) + doc = next(db.test.find({}, ["a"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "a"]) + doc = next(db.test.find({}, ["b"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b"]) + doc = next(db.test.find({}, ["c"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + doc = next(db.test.find({}, ["a"])) + self.assertEqual(doc["a"], 1) + doc = next(db.test.find({}, ["b"])) + self.assertEqual(doc["b"], 5) + doc = next(db.test.find({}, ["c"])) + self.assertEqual(doc["c"], {"d": 5, "e": 10}) + + # Test inclusion of fields with dots + doc = next(db.test.find({}, ["c.d"])) + self.assertEqual(doc["c"], {"d": 5}) + doc = next(db.test.find({}, ["c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + doc = next(db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + + doc = next(db.test.find({}, ["b", "c.e"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b", "c"]) + doc = next(db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["b"], 5) + + # Test field exclusion + doc = next(db.test.find({}, {"a": False, "b": 0})) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + + doc = next(db.test.find({}, {"_id": False})) + l = list(doc) + self.assertFalse("_id" in l) + + def test_options(self): + db = self.db + db.drop_collection("test") + db.create_collection("test", capped=True, size=4096) + result = db.test.options() + self.assertEqual(result, {"capped": True, "size": 4096}) + db.drop_collection("test") + + def test_insert_one(self): + db = self.db + db.test.drop() + + document: dict[str, Any] = {"_id": 1000} + result = db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertTrue(isinstance(result.inserted_id, int)) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) + self.assertEqual(1, db.test.count_documents({})) + + document = {"foo": "bar"} + result = db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) + self.assertEqual(2, db.test.count_documents({})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertEqual(document["_id"], result.inserted_id) + self.assertFalse(result.acknowledged) + # The insert failed duplicate key... + + def async_lambda(): + return db.test.count_documents({}) == 2 + + wait_until(async_lambda, "forcing duplicate key error") + + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) + result = db.test.insert_one(document) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertEqual(result.inserted_id, None) + + def test_insert_many(self): + db = self.db + db.test.drop() + + docs: list = [{} for _ in range(5)] + result = db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, ObjectId)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [{"_id": i} for i in range(5)] + result = db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, int)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] + result = db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertEqual([], result.inserted_ids) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + docs: list = [{} for _ in range(5)] + result = db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertFalse(result.acknowledged) + self.assertEqual(20, db.test.count_documents({})) + + def test_insert_many_generator(self): + coll = self.db.test + coll.delete_many({}) + + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + def test_insert_many_invalid(self): + db = self.db + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many({}) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many([]) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(1) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) + + def test_delete_one(self): + self.db.test.drop() + + self.db.test.insert_one({"x": 1}) + self.db.test.insert_one({"y": 1}) + self.db.test.insert_one({"z": 1}) + + result = self.db.test.delete_one({"x": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(2, self.db.test.count_documents({})) + + result = self.db.test.delete_one({"y": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(1, self.db.test.count_documents({})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = db.test.delete_one({"z": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + def lambda_async(): + return db.test.count_documents({}) == 0 + + wait_until(lambda_async, "delete 1 documents") + + def test_delete_many(self): + self.db.test.drop() + + self.db.test.insert_one({"x": 1}) + self.db.test.insert_one({"x": 1}) + self.db.test.insert_one({"y": 1}) + self.db.test.insert_one({"y": 1}) + + result = self.db.test.delete_many({"x": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertEqual(2, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(0, self.db.test.count_documents({"x": 1})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = db.test.delete_many({"y": 1}) + self.assertTrue(isinstance(result, DeleteResult)) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + def lambda_async(): + return db.test.count_documents({}) == 0 + + wait_until(lambda_async, "delete 2 documents") + + def test_command_document_too_large(self): + large = "*" * (client_context.max_bson_size + _COMMAND_OVERHEAD) + coll = self.db.test + with self.assertRaises(DocumentTooLarge): + coll.insert_one({"data": large}) + # update_one and update_many are the same + with self.assertRaises(DocumentTooLarge): + coll.replace_one({}, {"data": large}) + with self.assertRaises(DocumentTooLarge): + coll.delete_one({"data": large}) + + def test_write_large_document(self): + max_size = client_context.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + with self.assertRaises(OperationFailure): + self.db.test.insert_one({"foo": max_str}) + with self.assertRaises(OperationFailure): + self.db.test.replace_one({}, {"foo": max_str}, upsert=True) + with self.assertRaises(OperationFailure): + self.db.test.insert_many([{"x": 1}, {"foo": max_str}]) + self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(DocumentTooLarge): + unack_coll.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 14)}) + self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) + + def test_insert_bypass_document_validation(self): + db = self.db + db.test.drop() + db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test insert_one + with self.assertRaises(OperationFailure): + db.test.insert_one({"_id": 1, "x": 100}) + result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertEqual(1, result.inserted_id) + result = db.test.insert_one({"_id": 2, "a": 0}) + self.assertTrue(isinstance(result, InsertOneResult)) + self.assertEqual(2, result.inserted_id) + + db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + + def async_lambda(): + return db_w0.test.find_one({"y": 1}) + + wait_until(async_lambda, "find w:0 inserted document") + + # Test insert_many + docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] + with self.assertRaises(OperationFailure): + db.test.insert_many(docs) + result = db.test.insert_many(docs, bypass_document_validation=True) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, int)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"x": doc["x"]})) + self.assertTrue(result.acknowledged) + docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] + result = db.test.insert_many(docs) + self.assertTrue(isinstance(result, InsertManyResult)) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertTrue(isinstance(_id, int)) + self.assertTrue(_id in result.inserted_ids) + self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) + self.assertTrue(result.acknowledged) + + with self.assertRaises(OperationFailure): + db_w0.test.insert_many( + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) + + def test_replace_bypass_document_validation(self): + db = self.db + db.test.drop() + db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test replace_one + db.test.insert_one({"a": 101}) + with self.assertRaises(OperationFailure): + db.test.replace_one({"a": 101}, {"y": 1}) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual(1, db.test.count_documents({"a": 101})) + db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) + self.assertEqual(0, db.test.count_documents({"a": 101})) + self.assertEqual(1, db.test.count_documents({"y": 1})) + db.test.replace_one({"y": 1}, {"a": 102}) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual(0, db.test.count_documents({"a": 101})) + self.assertEqual(1, db.test.count_documents({"a": 102})) + + db.test.insert_one({"y": 1}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.replace_one({"y": 1}, {"x": 101}) + self.assertEqual(0, db.test.count_documents({"x": 101})) + self.assertEqual(1, db.test.count_documents({"y": 1})) + db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual(1, db.test.count_documents({"x": 101})) + db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) + self.assertEqual(0, db.test.count_documents({"x": 101})) + self.assertEqual(1, db.test.count_documents({"a": 103})) + + db.test.insert_one({"y": 1}, bypass_document_validation=True) + db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + + wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") + + def test_update_bypass_document_validation(self): + db = self.db + db.test.drop() + db.test.insert_one({"z": 5}) + db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test update_one + with self.assertRaises(OperationFailure): + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}) + self.assertEqual(0, db.test.count_documents({"z": -5})) + self.assertEqual(1, db.test.count_documents({"z": 5})) + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) + self.assertEqual(0, db.test.count_documents({"z": 5})) + self.assertEqual(1, db.test.count_documents({"z": -5})) + db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) + self.assertEqual(1, db.test.count_documents({"z": 1})) + self.assertEqual(0, db.test.count_documents({"z": -5})) + + db.test.insert_one({"z": -10}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}) + self.assertEqual(0, db.test.count_documents({"z": -9})) + self.assertEqual(1, db.test.count_documents({"z": -10})) + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) + self.assertEqual(1, db.test.count_documents({"z": -9})) + self.assertEqual(0, db.test.count_documents({"z": -10})) + db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) + self.assertEqual(0, db.test.count_documents({"z": -9})) + self.assertEqual(1, db.test.count_documents({"z": 0})) + + db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) + db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + def async_lambda(): + return db_w0.test.find_one({"y": 1, "x": 1}) + + wait_until(async_lambda, "find w:0 updated document") + + # Test update_many + db.test.insert_many([{"z": i} for i in range(3, 101)]) + db.test.insert_one({"y": 0}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.update_many({}, {"$inc": {"z": -100}}) + self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) + self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100})) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) + db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(50, db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}})) + + db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + with self.assertRaises(OperationFailure): + db.test.update_many({}, {"$inc": {"z": 1}}) + self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) + self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}})) + db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(150, db.test.count_documents({"z": {"$lte": 0}})) + db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(150, db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) + + db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + def async_lambda(): + return db_w0.test.count_documents({"m": 1, "x": 1}) == 2 + + wait_until(async_lambda, "find w:0 updated documents") + + def test_bypass_document_validation_bulk_write(self): + db = self.db + db.test.drop() + db.create_collection("test", validator={"a": {"$gte": 0}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] + db.test.bulk_write(ops, bypass_document_validation=True) + + self.assertEqual(3, db.test.count_documents({})) + self.assertEqual(1, db.test.count_documents({"a": -11})) + self.assertEqual(1, db.test.count_documents({"a": -1})) + self.assertEqual(1, db.test.count_documents({"a": -9})) + + # Assert that the operations would fail without bypass_doc_val + for op in ops: + with self.assertRaises(BulkWriteError): + db.test.bulk_write([op]) + + with self.assertRaises(OperationFailure): + db_w0.test.bulk_write(ops, bypass_document_validation=True) + + def test_find_by_default_dct(self): + db = self.db + db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] + self.assertIsNotNone(db.test.find_one(dct)) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) + + def test_find_w_fields(self): + db = self.db + db.test.delete_many({}) + + db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) + self.assertEqual(1, db.test.count_documents({})) + doc = next(db.test.find({})) + self.assertTrue("x" in doc) + doc = next(db.test.find({})) + self.assertTrue("mike" in doc) + doc = next(db.test.find({})) + self.assertTrue("extra thing" in doc) + doc = next(db.test.find({}, ["x", "mike"])) + self.assertTrue("x" in doc) + doc = next(db.test.find({}, ["x", "mike"])) + self.assertTrue("mike" in doc) + doc = next(db.test.find({}, ["x", "mike"])) + self.assertFalse("extra thing" in doc) + doc = next(db.test.find({}, ["mike"])) + self.assertFalse("x" in doc) + doc = next(db.test.find({}, ["mike"])) + self.assertTrue("mike" in doc) + doc = next(db.test.find({}, ["mike"])) + self.assertFalse("extra thing" in doc) + + @no_type_check + def test_fields_specifier_as_dict(self): + db = self.db + db.test.delete_many({}) + + db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) + + self.assertEqual([1, 2, 3], (db.test.find_one())["x"]) + self.assertEqual([2, 3], (db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) + self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) + self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) + + def test_find_w_regex(self): + db = self.db + db.test.delete_many({}) + + db.test.insert_one({"x": "hello_world"}) + db.test.insert_one({"x": "hello_mike"}) + db.test.insert_one({"x": "hello_mikey"}) + db.test.insert_one({"x": "hello_test"}) + + self.assertEqual(len((db.test.find()).to_list()), 4) + self.assertEqual(len((db.test.find({"x": re.compile("^hello.*")})).to_list()), 4) + self.assertEqual(len((db.test.find({"x": re.compile("ello")})).to_list()), 4) + self.assertEqual(len((db.test.find({"x": re.compile("^hello$")})).to_list()), 0) + self.assertEqual(len((db.test.find({"x": re.compile("^hello_mi.*$")})).to_list()), 2) + + def test_id_can_be_anything(self): + db = self.db + + db.test.delete_many({}) + auto_id = {"hello": "world"} + db.test.insert_one(auto_id) + self.assertTrue(isinstance(auto_id["_id"], ObjectId)) + + numeric = {"_id": 240, "hello": "world"} + db.test.insert_one(numeric) + self.assertEqual(numeric["_id"], 240) + + obj = {"_id": numeric, "hello": "world"} + db.test.insert_one(obj) + self.assertEqual(obj["_id"], numeric) + + for x in db.test.find(): + self.assertEqual(x["hello"], "world") + self.assertTrue("_id" in x) + + def test_unique_index(self): + db = self.db + db.drop_collection("test") + db.test.create_index("hello") + + # No error. + db.test.insert_one({"hello": "world"}) + db.test.insert_one({"hello": "world"}) + + db.drop_collection("test") + db.test.create_index("hello", unique=True) + + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"hello": "world"}) + db.test.insert_one({"hello": "world"}) + + def test_duplicate_key_error(self): + db = self.db + db.drop_collection("test") + + db.test.create_index("x", unique=True) + + db.test.insert_one({"_id": 1, "x": 1}) + + with self.assertRaises(DuplicateKeyError) as context: + db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + + with self.assertRaises(DuplicateKeyError) as context: + db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + self.assertEqual(1, db.test.count_documents({})) + + def test_write_error_text_handling(self): + db = self.db + db.drop_collection("test") + + db.test.create_index("text", unique=True) + + # Test workaround for SERVER-24007 + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) + + text = utf_8_decode(data, None, True) + db.test.insert_one({"text": text}) + + # Should raise DuplicateKeyError, not InvalidBSON + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"text": text}) + + with self.assertRaises(DuplicateKeyError): + db.test.replace_one({"_id": ObjectId()}, {"text": text}, upsert=True) + + # Should raise BulkWriteError, not InvalidBSON + with self.assertRaises(BulkWriteError): + db.test.insert_many([{"text": text}]) + + def test_write_error_unicode(self): + coll = self.db.test + self.addCleanup(coll.drop) + + coll.create_index("a", unique=True) + coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + coll.insert_one({"a": "unicode \U0001f40d"}) + + # Once more for good measure. + self.assertIn("E11000 duplicate key error", str(ctx.exception)) + + def test_wtimeout(self): + # Ensure setting wtimeout doesn't disable write concern altogether. + # See SERVER-12596. + collection = self.db.test + collection.drop() + collection.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + coll.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + coll.insert_one({"_id": 1}) + + def test_error_code(self): + try: + self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) + except OperationFailure as exc: + self.assertTrue(exc.code in (9, 10147, 16840, 17009)) + # Just check that we set the error document. Fields + # vary by MongoDB version. + self.assertTrue(exc.details is not None) + else: + self.fail("OperationFailure was not raised") + + def test_index_on_subfield(self): + db = self.db + db.drop_collection("test") + + db.test.insert_one({"hello": {"a": 4, "b": 5}}) + db.test.insert_one({"hello": {"a": 7, "b": 2}}) + db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + db.drop_collection("test") + db.test.create_index("hello.a", unique=True) + + db.test.insert_one({"hello": {"a": 4, "b": 5}}) + db.test.insert_one({"hello": {"a": 7, "b": 2}}) + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + def test_replace_one(self): + db = self.db + db.drop_collection("test") + + with self.assertRaises(ValueError): + db.test.replace_one({}, {"$set": {"x": 1}}) + + id1 = (db.test.insert_one({"x": 1})).inserted_id + result = db.test.replace_one({"x": 1}, {"y": 1}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"y": 1})) + self.assertEqual(0, db.test.count_documents({"x": 1})) + self.assertEqual((db.test.find_one(id1))["y"], 1) # type: ignore + + replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) + result = db.test.replace_one({"y": 1}, replacement, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"z": 1})) + self.assertEqual(0, db.test.count_documents({"y": 1})) + self.assertEqual((db.test.find_one(id1))["z"], 1) # type: ignore + + result = db.test.replace_one({"x": 2}, {"y": 2}, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(0, result.matched_count) + self.assertTrue(result.modified_count in (None, 0)) + self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"y": 2})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.replace_one({"x": 0}, {"y": 0}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + def test_update_one(self): + db = self.db + db.drop_collection("test") + + with self.assertRaises(ValueError): + db.test.update_one({}, {"x": 1}) + + id1 = (db.test.insert_one({"x": 5})).inserted_id + result = db.test.update_one({}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((db.test.find_one(id1))["x"], 6) # type: ignore + + id2 = (db.test.insert_one({"x": 1})).inserted_id + result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((db.test.find_one(id1))["x"], 7) # type: ignore + self.assertEqual((db.test.find_one(id2))["x"], 1) # type: ignore + + result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(0, result.matched_count) + self.assertTrue(result.modified_count in (None, 0)) + self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + def test_update_many(self): + db = self.db + db.drop_collection("test") + + with self.assertRaises(ValueError): + db.test.update_many({}, {"x": 1}) + + db.test.insert_one({"x": 4, "y": 3}) + db.test.insert_one({"x": 5, "y": 5}) + db.test.insert_one({"x": 4, "y": 4}) + + result = db.test.update_many({"x": 4}, {"$set": {"y": 5}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(2, result.matched_count) + self.assertTrue(result.modified_count in (None, 2)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(3, db.test.count_documents({"y": 5})) + + result = db.test.update_many({"x": 5}, {"$set": {"y": 6}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, db.test.count_documents({"y": 6})) + + result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertEqual(0, result.matched_count) + self.assertTrue(result.modified_count in (None, 0)) + self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) + self.assertTrue(isinstance(result, UpdateResult)) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + def test_update_check_keys(self): + self.db.drop_collection("test") + self.assertTrue(self.db.test.insert_one({"hello": "world"})) + + # Modify shouldn't check keys... + self.assertTrue( + self.db.test.update_one({"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True) + ) + + # I know this seems like testing the server but I'd like to be notified + # by CI if the server's behavior changes here. + doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) + with self.assertRaises(OperationFailure): + self.db.test.update_one({"hello": "world"}, doc, upsert=True) + + # This is going to cause keys to be checked and raise InvalidDocument. + # That's OK assuming the server's behavior in the previous assert + # doesn't change. If the behavior changes checking the first key for + # '$' in update won't be good enough anymore. + doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) + with self.assertRaises(OperationFailure): + self.db.test.replace_one({"hello": "world"}, doc, upsert=True) + + # Replace with empty document + self.assertNotEqual(0, (self.db.test.replace_one({"hello": "world"}, {})).matched_count) + + def test_acknowledged_delete(self): + db = self.db + db.drop_collection("test") + db.test.insert_many([{"x": 1}, {"x": 1}]) + self.assertEqual(2, (db.test.delete_many({})).deleted_count) + self.assertEqual(0, (db.test.delete_many({})).deleted_count) + + @client_context.require_version_max(4, 9) + def test_manual_last_error(self): + coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) + coll.insert_one({"x": 1}) + self.db.command("getlasterror", w=1, wtimeout=1) + + def test_count_documents(self): + db = self.db + db.drop_collection("test") + self.addCleanup(db.drop_collection, "test") + + self.assertEqual(db.test.count_documents({}), 0) + db.wrong.insert_many([{}, {}]) + self.assertEqual(db.test.count_documents({}), 0) + db.test.insert_many([{}, {}]) + self.assertEqual(db.test.count_documents({}), 2) + db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) + + def test_estimated_document_count(self): + db = self.db + db.drop_collection("test") + self.addCleanup(db.drop_collection, "test") + + self.assertEqual(db.test.estimated_document_count(), 0) + db.wrong.insert_many([{}, {}]) + self.assertEqual(db.test.estimated_document_count(), 0) + db.test.insert_many([{}, {}]) + self.assertEqual(db.test.estimated_document_count(), 2) + + def test_aggregate(self): + db = self.db + db.drop_collection("test") + db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + result = db.test.aggregate([pipeline]) + self.assertTrue(isinstance(result, CommandCursor)) + self.assertEqual([{"foo": [1, 2]}], result.to_list()) + + # Test write concern. + with self.write_concern_collection() as coll: + coll.aggregate([{"$out": "output-collection"}]) + + def test_aggregate_raw_bson(self): + db = self.db + db.drop_collection("test") + db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) + result = coll.aggregate([pipeline]) + self.assertTrue(isinstance(result, CommandCursor)) + first_result = next(result) + self.assertIsInstance(first_result, RawBSONDocument) + self.assertEqual([1, 2], list(first_result["foo"])) + + def test_aggregation_cursor_validation(self): + db = self.db + projection = {"$project": {"_id": "$_id"}} + cursor = db.test.aggregate([projection], cursor={}) + self.assertTrue(isinstance(cursor, CommandCursor)) + + def test_aggregation_cursor(self): + db = self.db + if client_context.has_secondaries: + # Test that getMore messages are sent to the right server. + db = self.client.get_database( + db.name, + read_preference=ReadPreference.SECONDARY, + write_concern=WriteConcern(w=self.w), + ) + + for collection_size in (10, 1000): + db.drop_collection("test") + db.test.insert_many([{"_id": i} for i in range(collection_size)]) + expected_sum = sum(range(collection_size)) + # Use batchSize to ensure multiple getMore messages + cursor = db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) + + self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor.to_list())) + + # Test that batchSize is handled properly. + cursor = db.test.aggregate([], batchSize=5) + self.assertEqual(5, len(cursor._data)) + # Force a getMore + cursor._data.clear() + next(cursor) + # batchSize - 1 + self.assertEqual(4, len(cursor._data)) + # Exhaust the cursor. There shouldn't be any errors. + for _doc in cursor: + pass + + def test_aggregation_cursor_alive(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{} for _ in range(3)]) + self.addCleanup(self.db.test.delete_many, {}) + cursor = self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) + n = 0 + while True: + cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + def test_invalid_session_parameter(self): + def try_invalid_session(): + with self.db.test.aggregate([], {}): # type:ignore + pass + + with self.assertRaisesRegex(ValueError, "must be a ClientSession"): + try_invalid_session() + + def test_large_limit(self): + db = self.db + db.drop_collection("test_large_limit") + db.test_large_limit.create_index([("x", 1)]) + my_str = "mongomongo" * 1000 + + db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) + + i = 0 + y = 0 + for doc in (db.test_large_limit.find(limit=1900)).sort([("x", 1)]): + i += 1 + y += doc["x"] + + self.assertEqual(1900, i) + self.assertEqual((1900 * 1899) / 2, y) + + def test_find_kwargs(self): + db = self.db + db.drop_collection("test") + db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, db.test.count_documents({})) + + total = 0 + for x in db.test.find({}, skip=4, limit=2): + total += x["x"] + + self.assertEqual(9, total) + + def test_rename(self): + db = self.db + db.drop_collection("test") + db.drop_collection("foo") + + with self.assertRaises(TypeError): + db.test.rename(5) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + db.test.rename("") + with self.assertRaises(InvalidName): + db.test.rename("te$t") + with self.assertRaises(InvalidName): + db.test.rename(".test") + with self.assertRaises(InvalidName): + db.test.rename("test.") + with self.assertRaises(InvalidName): + db.test.rename("tes..t") + + self.assertEqual(0, db.test.count_documents({})) + self.assertEqual(0, db.foo.count_documents({})) + + db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, db.test.count_documents({})) + + db.test.rename("foo") + + self.assertEqual(0, db.test.count_documents({})) + self.assertEqual(10, db.foo.count_documents({})) + + x = 0 + for doc in db.foo.find(): + self.assertEqual(x, doc["x"]) + x += 1 + + db.test.insert_one({}) + with self.assertRaises(OperationFailure): + db.foo.rename("test") + db.foo.rename("test", dropTarget=True) + + with self.write_concern_collection() as coll: + coll.rename("foo") + + @no_type_check + def test_find_one(self): + db = self.db + db.drop_collection("test") + + _id = (db.test.insert_one({"hello": "world", "foo": "bar"})).inserted_id + + self.assertEqual("world", (db.test.find_one())["hello"]) + self.assertEqual(db.test.find_one(_id), db.test.find_one()) + self.assertEqual(db.test.find_one(None), db.test.find_one()) + self.assertEqual(db.test.find_one({}), db.test.find_one()) + self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) + + self.assertTrue("hello" in db.test.find_one(projection=["hello"])) + self.assertTrue("hello" not in db.test.find_one(projection=["foo"])) + + self.assertTrue("hello" in db.test.find_one(projection=("hello",))) + self.assertTrue("hello" not in db.test.find_one(projection=("foo",))) + + self.assertTrue("hello" in db.test.find_one(projection={"hello"})) + self.assertTrue("hello" not in db.test.find_one(projection={"foo"})) + + self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) + self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) + + self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) + self.assertTrue("hello" in list(db.test.find_one(projection={}))) + self.assertTrue("hello" in list(db.test.find_one(projection=[]))) + + self.assertEqual(None, db.test.find_one({"hello": "foo"})) + self.assertEqual(None, db.test.find_one(ObjectId())) + + def test_find_one_non_objectid(self): + db = self.db + db.drop_collection("test") + + db.test.insert_one({"_id": 5}) + + self.assertTrue(db.test.find_one(5)) + self.assertFalse(db.test.find_one(6)) + + def test_find_one_with_find_args(self): + db = self.db + db.drop_collection("test") + + db.test.insert_many([{"x": i} for i in range(1, 4)]) + + self.assertEqual(1, (db.test.find_one())["x"]) + self.assertEqual(2, (db.test.find_one(skip=1, limit=2))["x"]) + + def test_find_with_sort(self): + db = self.db + db.drop_collection("test") + + db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}]) + + self.assertEqual(2, (db.test.find_one())["x"]) + self.assertEqual(1, (db.test.find_one(sort=[("x", 1)]))["x"]) + self.assertEqual(3, (db.test.find_one(sort=[("x", -1)]))["x"]) + + def to_list(things): + return [thing["x"] for thing in things] + + self.assertEqual([2, 1, 3], to_list(db.test.find())) + self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)]))) + self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)]))) + + with self.assertRaises(TypeError): + db.test.find(sort=5) + with self.assertRaises(TypeError): + db.test.find(sort="hello") + with self.assertRaises(TypeError): + db.test.find(sort=["hello", 1]) + + # TODO doesn't actually test functionality, just that it doesn't blow up + def test_cursor_timeout(self): + (self.db.test.find(no_cursor_timeout=True)).to_list() + (self.db.test.find(no_cursor_timeout=False)).to_list() + + def test_exhaust(self): + if is_mongos(self.db.client): + with self.assertRaises(InvalidOperation): + self.db.test.find(cursor_type=CursorType.EXHAUST) + return + + # Limit is incompatible with exhaust. + with self.assertRaises(InvalidOperation): + self.db.test.find(cursor_type=CursorType.EXHAUST, limit=5) + cur = self.db.test.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(InvalidOperation): + cur.limit(5) + cur = self.db.test.find(limit=5) + with self.assertRaises(InvalidOperation): + cur.add_option(64) + cur = self.db.test.find() + cur.add_option(64) + with self.assertRaises(InvalidOperation): + cur.limit(5) + + self.db.drop_collection("test") + # Insert enough documents to require more than one batch + self.db.test.insert_many([{"i": i} for i in range(150)]) + + client = rs_or_single_client(maxPoolSize=1) + self.addCleanup(client.close) + pool = get_pool(client) + + # Make sure the socket is returned after exhaustion. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) + next(cur) + self.assertEqual(0, len(pool.conns)) + for _ in cur: + pass + self.assertEqual(1, len(pool.conns)) + + # Same as previous but don't call next() + for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): + pass + self.assertEqual(1, len(pool.conns)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) + if client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + next(cur) + else: + next(cur) + self.assertEqual(0, len(pool.conns)) + # if sys.platform.startswith("java") or "PyPy" in sys.version: + # # Don't wait for GC or use gc.collect(), it's unreliable. + cur.close() + cur = None + # Wait until the background thread returns the socket. + wait_until(lambda: pool.active_sockets == 0, "return socket") + # The socket should be discarded. + self.assertEqual(0, len(pool.conns)) + + def test_distinct(self): + self.db.drop_collection("test") + + test = self.db.test + test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + + distinct = test.distinct("a") + distinct.sort() + + self.assertEqual([1, 2, 3], distinct) + + distinct = (test.find({"a": {"$gt": 1}})).distinct("a") + distinct.sort() + self.assertEqual([2, 3], distinct) + + distinct = test.distinct("a", {"a": {"$gt": 1}}) + distinct.sort() + self.assertEqual([2, 3], distinct) + + self.db.drop_collection("test") + + test.insert_one({"a": {"b": "a"}, "c": 12}) + test.insert_one({"a": {"b": "b"}, "c": 12}) + test.insert_one({"a": {"b": "c"}, "c": 12}) + test.insert_one({"a": {"b": "c"}, "c": 12}) + + distinct = test.distinct("a.b") + distinct.sort() + + self.assertEqual(["a", "b", "c"], distinct) + + def test_query_on_query_field(self): + self.db.drop_collection("test") + self.db.test.insert_one({"query": "foo"}) + self.db.test.insert_one({"bar": "foo"}) + + self.assertEqual(1, self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len((self.db.test.find({"query": {"$ne": None}})).to_list())) + + def test_min_query(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"x": 1}, {"x": 2}]) + self.db.test.create_index("x") + + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") + + docs = cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(2, docs[0]["x"]) + + def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + self.db.test.drop() + n_docs = client_context.max_write_batch_size + 100 + self.db.test.insert_many([{} for _ in range(n_docs)]) + self.assertEqual(n_docs, self.db.test.count_documents({})) + self.db.test.drop() + + def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addCleanup(self.client.drop_database, "test_insert_large_batch") + max_bson_size = client_context.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = "x" * int(max_bson_size / 2) + + # Batch insert that requires 2 batches. + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] + db.collection_0.insert_many(successful_insert) + self.assertEqual(4, db.collection_0.count_documents({})) + + db.collection_0.drop() + + # Test that inserts fail after first error. + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] + + with self.assertRaises(BulkWriteError): + db.collection_1.insert_many(insert_second_fails) + + self.assertEqual(1, db.collection_1.count_documents({})) + + db.collection_1.drop() + + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_second_fails) + + def async_lambda(): + return db.collection_2.count_documents({}) == 1 + + wait_until(async_lambda, "insert 1 document", timeout=60) + + db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] + + with self.assertRaises(OperationFailure) as context: + db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn("id1", str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, db.collection_3.count_documents({})) + + db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) + unack_coll.insert_many(insert_two_failures, ordered=False) + + def async_lambda(): + return db.collection_4.count_documents({}) == 2 + + # Only the first and third documents are inserted. + wait_until(async_lambda, "insert 2 documents", timeout=60) + + db.collection_4.drop() + + def test_messages_with_unicode_collection_names(self): + db = self.db + + db["Employés"].insert_one({"x": 1}) + db["Employés"].replace_one({"x": 1}, {"x": 2}) + db["Employés"].delete_many({}) + db["Employés"].find_one() + (db["Employés"].find()).to_list() + + def test_drop_indexes_non_existent(self): + self.db.drop_collection("test") + self.db.test.drop_indexes() + + # This is really a bson test but easier to just reproduce it here... + # (Shame on me) + def test_bad_encode(self): + c = self.db.test + c.drop() + with self.assertRaises(InvalidDocument): + c.insert_one({"x": c}) + + class BadGetAttr(dict): + def __getattr__(self, name): + pass + + bad = BadGetAttr([("foo", "bar")]) + c.insert_one({"bad": bad}) + self.assertEqual("bar", (c.find_one())["bad"]["foo"]) # type: ignore + + def test_array_filters_validation(self): + # array_filters must be a list. + c = self.db.test + with self.assertRaises(TypeError): + c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + update = {"$set": {"a": 1}} + c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] + + def test_array_filters_unacknowledged(self): + c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(ConfigurationError): + c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + c_w0.find_one_and_update({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + + def test_find_one_and(self): + c = self.db.test + c.drop() + c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual({"_id": 1, "i": 1}, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, c.find_one({"_id": 1})) + + self.assertEqual(None, c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) + + c.drop() + for j in range(5): + c.insert_one({"j": j, "i": 0}) + + sort = [("j", DESCENDING)] + self.assertEqual(4, (c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) + + def test_find_one_and_write_concern(self): + listener = EventListener() + db = (single_client(event_listeners=[listener]))[self.db.name] + # non-default WriteConcern. + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) + # default WriteConcern. + c_default = db.get_collection("test", write_concern=WriteConcern()) + # Authenticate the client and throw out auth commands from the listener. + db.command("ping") + listener.reset() + c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + # Test write concern errors. + if client_context.is_rs: + c_wc_error = db.get_collection( + "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + ) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_replace( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_delete( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + listener.reset() + + c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + def test_find_with_nested(self): + c = self.db.test + c.drop() + c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] + self.assertEqual( + [2], + [ + i["i"] + for i in c.find( + { + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, + ] + } + ) + ], + ) + + self.assertEqual( + [0, 1, 2], + [ + i["i"] + for i in c.find( + { + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, + ] + } + ) + ], + ) + + def test_find_regex(self): + c = self.db.test + c.drop() + c.insert_one({"r": re.compile(".*")}) + + self.assertTrue(isinstance((c.find_one())["r"], Regex)) # type: ignore + for doc in c.find(): + self.assertTrue(isinstance(doc["r"], Regex)) + + def test_find_command_generation(self): + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(Collection(self.db, "test")) + + @client_context.require_version_min(5, 0, 0) + def test_helpers_with_let(self): + c = self.db.test + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (c.find, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([],)), + ] + for let in [10, "str", [], False]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): + helper(*args, let=let) # type: ignore + for helper, args in helpers: + helper(*args, let={}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth.py b/test/test_auth.py index 596c94d562..6bc58e08c7 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -33,12 +33,13 @@ single_client_noauth, ) -from pymongo import MongoClient, monitoring -from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple +from pymongo import MongoClient +from pymongo.asynchronous.auth import HAVE_KERBEROS, _build_credentials_tuple from pymongo.errors import OperationFailure -from pymongo.hello import HelloCompat -from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP +from pymongo.synchronous import monitoring +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.read_preferences import ReadPreference # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. GSSAPI_HOST = os.environ.get("GSSAPI_HOST") diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 6cd037e204..9ec7e07f3b 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -27,7 +27,7 @@ from test.unified_format import generate_test_classes from pymongo import MongoClient -from pymongo.auth_oidc import OIDCCallback +from pymongo.asynchronous.auth_oidc import OIDCCallback _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") diff --git a/test/test_binary.py b/test/test_binary.py index 517d633aa4..66a57dcb54 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -33,8 +33,8 @@ from bson.binary import * from bson.codec_options import CodecOptions from bson.son import SON -from pymongo.common import validate_uuid_representation -from pymongo.mongo_client import MongoClient +from pymongo.synchronous.common import validate_uuid_representation +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern diff --git a/test/test_bulk.py b/test/test_bulk.py index af0875ec7f..42dbf5b152 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -19,7 +19,7 @@ import uuid from typing import Any, Optional -from pymongo.mongo_client import MongoClient +from pymongo.synchronous.mongo_client import MongoClient sys.path[0:0] = [""] @@ -34,15 +34,15 @@ from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from pymongo.collection import Collection -from pymongo.common import partition_node from pymongo.errors import ( BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure, ) -from pymongo.operations import * +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.common import partition_node +from pymongo.synchronous.operations import * from pymongo.write_concern import WriteConcern diff --git a/test/test_change_stream.py b/test/test_change_stream.py index aa2a7063bb..4d8422667f 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -40,14 +40,14 @@ from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient -from pymongo.command_cursor import CommandCursor from pymongo.errors import ( InvalidOperation, OperationFailure, ServerSelectionTimeoutError, ) -from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.message import _CursorAddress from pymongo.write_concern import WriteConcern @@ -117,7 +117,7 @@ def insert_one_and_check(self, change_stream, doc): def kill_change_stream_cursor(self, change_stream): """Cause a cursor not found error on the next getMore.""" cursor = change_stream._cursor - address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) + address = _CursorAddress(cursor.address, cursor._ns) client = self.watched_collection().database.client client._close_cursor_now(cursor.cursor_id, address) @@ -136,7 +136,7 @@ def test_watch(self): self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) - self.assertEqual(1000, change_stream._cursor._CommandCursor__max_await_time_ms) + self.assertEqual(1000, change_stream._cursor._max_await_time_ms) self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) _ = change_stream.next() resume_token = change_stream.resume_token diff --git a/test/test_client.py b/test/test_client.py index 4377d410a9..af71c4890e 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -30,13 +30,14 @@ import sys import threading import time +import warnings from typing import Iterable, Type, no_type_check from unittest import mock from unittest.mock import patch import pytest -from pymongo.operations import _Op +from pymongo.synchronous.operations import _Op sys.path[0:0] = [""] @@ -82,13 +83,6 @@ ) from bson.son import SON from bson.tz_util import utc -from pymongo import event_loggers, message, monitoring -from pymongo.client_options import ClientOptions -from pymongo.command_cursor import CommandCursor -from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT -from pymongo.compression_support import _have_snappy, _have_zstd -from pymongo.cursor import Cursor, CursorType -from pymongo.database import Database from pymongo.driver_info import DriverInfo from pymongo.errors import ( AutoReconnect, @@ -102,16 +96,28 @@ ServerSelectionTimeoutError, WriteConcernError, ) -from pymongo.mongo_client import MongoClient -from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent -from pymongo.pool import _METADATA, ENV_VAR_K8S, Connection, PoolOptions -from pymongo.read_preferences import ReadPreference -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import readable_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TOPOLOGY_TYPE -from pymongo.topology import _ErrorContext -from pymongo.topology_description import TopologyDescription +from pymongo.synchronous import event_loggers, message, monitoring +from pymongo.synchronous.client_options import ClientOptions +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT +from pymongo.synchronous.compression_support import _have_snappy, _have_zstd +from pymongo.synchronous.cursor import Cursor, CursorType +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.synchronous.pool import ( + _METADATA, + ENV_VAR_K8S, + Connection, + PoolOptions, +) +from pymongo.synchronous.read_preferences import ReadPreference +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import readable_server_selector, writable_server_selector +from pymongo.synchronous.settings import TOPOLOGY_TYPE +from pymongo.synchronous.topology import _ErrorContext +from pymongo.synchronous.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern @@ -147,7 +153,7 @@ def test_keyword_arg_defaults(self): serverSelectionTimeoutMS=12000, ) - options = client._MongoClient__options + options = client.options pool_opts = options.pool_options self.assertEqual(None, pool_opts.socket_timeout) # socket.Socket.settimeout takes a float in seconds @@ -160,17 +166,17 @@ def test_keyword_arg_defaults(self): def test_connect_timeout(self): client = MongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) - pool_opts = client._MongoClient__options.pool_options + pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) client = MongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) - pool_opts = client._MongoClient__options.pool_options + pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) client = MongoClient( "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False ) - pool_opts = client._MongoClient__options.pool_options + pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) @@ -319,10 +325,10 @@ def test_metadata(self): metadata = copy.deepcopy(_METADATA) metadata["application"] = {"name": "foobar"} client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) client = MongoClient("foo", 27017, appname="foobar", connect=False) - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) # No error MongoClient(appname="x" * 128) @@ -344,7 +350,7 @@ def test_metadata(self): driver=DriverInfo("FooDriver", "1.2.3", None), connect=False, ) - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) client = MongoClient( @@ -354,7 +360,7 @@ def test_metadata(self): driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), connect=False, ) - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) @@ -363,7 +369,7 @@ def test_container_metadata(self): metadata["env"] = {} metadata["env"]["container"] = {"orchestrator": "kubernetes"} client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) def test_kwargs_codec_options(self): @@ -447,7 +453,7 @@ def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") - clopts = c._MongoClient__options + clopts = c.options opts = clopts._options self.assertEqual(opts["tls"], False) @@ -456,13 +462,13 @@ def test_uri_option_precedence(self): def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. - from pymongo.srv_resolver import _resolve + from pymongo.synchronous.srv_resolver import _resolve patched_resolver = FunctionCallRecorder(_resolve) - pymongo.srv_resolver._resolve = patched_resolver + pymongo.synchronous.srv_resolver._resolve = patched_resolver def reset_resolver(): - pymongo.srv_resolver._resolve = _resolve + pymongo.synchronous.srv_resolver._resolve = _resolve self.addCleanup(reset_resolver) @@ -499,7 +505,7 @@ def test_uri_security_options(self): # Matching SSL and TLS options should not cause errors. c = MongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) - self.assertEqual(c._MongoClient__options._options["tls"], False) + self.assertEqual(c.options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): @@ -551,7 +557,7 @@ def test_validate_suggestion(self): with self.assertRaisesRegex(ConfigurationError, expected): MongoClient(**{typo: "standard"}) # type: ignore[arg-type] - @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") def test_detected_environment_logging(self, mock_get_hosts): normal_hosts = [ "normal.host.com", @@ -573,7 +579,7 @@ def test_detected_environment_logging(self, mock_get_hosts): logs = [record.message for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) - @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") def test_detected_environment_warning(self, mock_get_hosts): with self._caplog.at_level(logging.WARN): normal_hosts = [ @@ -611,7 +617,7 @@ def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove connections when maxIdleTimeMS not set client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) @@ -622,7 +628,7 @@ def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, two @@ -636,7 +642,7 @@ def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new connections. client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, @@ -650,7 +656,7 @@ def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn_one: pass # Assert that the pool does not close connections prematurely. @@ -667,12 +673,12 @@ def test_max_idle_time_reaper_removes_stale(self): def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=0.1): client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) self.assertEqual(0, len(server._pool.conns)) # Assert that pool started up at minPoolSize client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) wait_until( lambda: len(server._pool.conns) == 10, "pool initialized with 10 connections", @@ -691,7 +697,7 @@ def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) @@ -705,7 +711,7 @@ def test_max_idle_time_checkout(self): # Test that connections are reused if maxIdleTimeMS is not set. client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector, _Op.TEST) + server = client._get_topology()._select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) @@ -1174,7 +1180,10 @@ def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=100, connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) - client = MongoClient(serverSelectionTimeoutMS=0, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient(serverSelectionTimeoutMS=0, connect=False) + self.assertAlmostEqual(0, client.options.server_selection_timeout) self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) @@ -1186,14 +1195,20 @@ def test_server_selection_timeout(self): client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) - client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) # Test invalid timeout in URI ignored and set to default. - client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) - client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): @@ -1512,7 +1527,7 @@ def test_small_heartbeat_frequency_ms(self): def test_compression(self): def compression_settings(client): - pool_options = client._MongoClient__options.pool_options + pool_options = client.options.pool_options return pool_options._compression_settings uri = "mongodb://localhost:27017/?compressors=zlib" @@ -1535,12 +1550,16 @@ def compression_settings(client): self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar" - client = MongoClient(uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar,zlib" - client = MongoClient(uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) @@ -1548,12 +1567,16 @@ def compression_settings(client): # According to the connection string spec, unsupported values # just raise a warning and are ignored. uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" - client = MongoClient(uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" - client = MongoClient(uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) @@ -1575,7 +1598,9 @@ def compression_settings(client): if not _have_zstd(): uri = "mongodb://localhost:27017/?compressors=zstd" - client = MongoClient(uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + client = MongoClient(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: @@ -1746,7 +1771,7 @@ def test_process_periodic_tasks(self): # Add cursor to kill cursors queue del cursor wait_until( - lambda: client._MongoClient__kill_cursors_queue, + lambda: client._kill_cursors_queue, "waited for cursor to be added to queue", ) client._process_periodic_tasks() # This must not raise or print any exceptions @@ -1826,7 +1851,7 @@ def _test_handshake(self, env_vars, expected_env): os.environ["AWS_REGION"] = "" with rs_or_single_client(serverSelectionTimeoutMS=10000) as client: client.admin.command("ping") - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) def test_handshake_01_aws(self): @@ -2016,7 +2041,7 @@ def test_exhaust_getmore_network_error(self): cursor.next() # Cause a network error. - conn = cursor._Cursor__sock_mgr.conn + conn = cursor._sock_mgr.conn conn.conn.close() # A getmore fails. @@ -2024,7 +2049,7 @@ def test_exhaust_getmore_network_error(self): self.assertTrue(conn.closed) wait_until( - lambda: len(client._MongoClient__kill_cursors_queue) == 0, + lambda: len(client._kill_cursors_queue) == 0, "waited for all killCursor requests to complete", ) # The socket was closed and the semaphore was decremented. diff --git a/test/test_collation.py b/test/test_collation.py index bedf0a2eaa..f4830da5d2 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -21,15 +21,15 @@ from test.utils import EventListener, rs_or_single_client from typing import Any -from pymongo.collation import ( +from pymongo.errors import ConfigurationError +from pymongo.synchronous.collation import ( Collation, CollationAlternate, CollationCaseFirst, CollationMaxVariable, CollationStrength, ) -from pymongo.errors import ConfigurationError -from pymongo.operations import ( +from pymongo.synchronous.operations import ( DeleteMany, DeleteOne, IndexModel, diff --git a/test/test_collection.py b/test/test_collection.py index 1667a3dd03..54f76336d5 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -22,7 +22,7 @@ from collections import defaultdict from typing import Any, Iterable, no_type_check -from pymongo.database import Database +from pymongo.synchronous.database import Database sys.path[0:0] = [""] @@ -45,10 +45,7 @@ from bson.regex import Regex from bson.son import SON from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT -from pymongo.bulk import BulkWriteError -from pymongo.collection import Collection, ReturnDocument -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import CursorType +from pymongo.cursor_shared import CursorType from pymongo.errors import ( ConfigurationError, DocumentTooLarge, @@ -60,17 +57,20 @@ OperationFailure, WriteConcernError, ) -from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command -from pymongo.mongo_client import MongoClient -from pymongo.operations import * from pymongo.read_concern import DEFAULT_READ_CONCERN -from pymongo.read_preferences import ReadPreference from pymongo.results import ( DeleteResult, InsertManyResult, InsertOneResult, UpdateResult, ) +from pymongo.synchronous.bulk import BulkWriteError +from pymongo.synchronous.collection import Collection, ReturnDocument +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.message import _COMMAND_OVERHEAD, _gen_find_command +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.operations import * +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -192,17 +192,22 @@ def test_create(self): lambda: "create_test_no_wc" not in db.list_collection_names(), "drop create_test_no_wc collection", ) + db.create_collection("create_test_no_wc") + wait_until( + lambda: "create_test_no_wc" in db.list_collection_names(), + "create create_test_no_wc collection", + ) + db.create_test_no_wc.drop() Collection(db, name="create_test_no_wc", create=True) wait_until( lambda: "create_test_no_wc" in db.list_collection_names(), "create create_test_no_wc collection", ) + # SERVER-33317 if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): with self.assertRaises(OperationFailure): - Collection( - db, name="create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN, create=True - ) + db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) def test_drop_nonexistent_collection(self): self.db.drop_collection("test") @@ -1519,12 +1524,12 @@ def test_aggregation_cursor(self): # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) - self.assertEqual(5, len(cursor._CommandCursor__data)) # type: ignore + self.assertEqual(5, len(cursor._data)) # Force a getMore - cursor._CommandCursor__data.clear() # type: ignore + cursor._data.clear() next(cursor) # batchSize - 1 - self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore + self.assertEqual(4, len(cursor._data)) # Exhaust the cursor. There shouldn't be any errors. for _doc in cursor: pass diff --git a/test/test_comment.py b/test/test_comment.py index ffbf8d51ca..f9630655c9 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -25,8 +25,8 @@ from test.utils import EventListener, rs_or_single_client from bson.dbref import DBRef -from pymongo.command_cursor import CommandCursor -from pymongo.operations import IndexModel +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.operations import IndexModel class Empty: diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index f021c61f67..8a0f104a79 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -45,7 +45,7 @@ PyMongoError, WaitQueueTimeoutError, ) -from pymongo.monitoring import ( +from pymongo.synchronous.monitoring import ( ConnectionCheckedInEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, @@ -60,9 +60,9 @@ PoolCreatedEvent, PoolReadyEvent, ) -from pymongo.pool import PoolState, _PoolClosedError -from pymongo.read_preferences import ReadPreference -from pymongo.topology_description import updated_topology_description +from pymongo.synchronous.pool import PoolState, _PoolClosedError +from pymongo.synchronous.read_preferences import ReadPreference +from pymongo.synchronous.topology_description import updated_topology_description OBJECT_TYPES = { # Event types. diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index ef8500ae6a..bb80bda932 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -28,9 +28,9 @@ ) from bson import SON -from pymongo import monitoring -from pymongo.collection import Collection from pymongo.errors import NotPrimaryError +from pymongo.synchronous import monitoring +from pymongo.synchronous.collection import Collection from pymongo.write_concern import WriteConcern diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index c9f8dbe4b4..b13e4c8444 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -29,11 +29,14 @@ drop_collections, ) -from pymongo import WriteConcern, operations -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor +from pymongo import WriteConcern from pymongo.errors import PyMongoError -from pymongo.operations import ( +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, _WriteResult +from pymongo.synchronous import operations +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.operations import ( DeleteMany, DeleteOne, InsertOne, @@ -41,8 +44,6 @@ UpdateMany, UpdateOne, ) -from pymongo.read_concern import ReadConcern -from pymongo.results import BulkWriteResult, _WriteResult # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "v1") diff --git a/test/test_cursor.py b/test/test_cursor.py index a54e025f55..c354c42b33 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -43,12 +43,12 @@ from bson.code import Code from bson.son import SON from pymongo import ASCENDING, DESCENDING -from pymongo.collation import Collation -from pymongo.cursor import Cursor, CursorType from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure -from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.collation import Collation +from pymongo.synchronous.cursor import Cursor, CursorType +from pymongo.synchronous.operations import _IndexList +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern @@ -64,64 +64,64 @@ def test_deepcopy_cursor_littered_with_regexes(self): ) cursor2 = copy.deepcopy(cursor) - self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore + self.assertEqual(cursor._spec, cursor2._spec) def test_add_remove_option(self): cursor = self.db.test.find() - self.assertEqual(0, cursor._Cursor__query_flags) + self.assertEqual(0, cursor._query_flags) cursor.add_option(2) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) - self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.add_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.add_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) - self.assertEqual(162, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(162, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) - self.assertEqual(162, cursor._Cursor__query_flags) + self.assertEqual(162, cursor._query_flags) cursor.add_option(128) - self.assertEqual(162, cursor._Cursor__query_flags) + self.assertEqual(162, cursor._query_flags) cursor.remove_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) - self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) - self.assertEqual(2, cursor._Cursor__query_flags) + self.assertEqual(2, cursor._query_flags) cursor.remove_option(32) - self.assertEqual(2, cursor._Cursor__query_flags) + self.assertEqual(2, cursor._query_flags) # Timeout cursor = self.db.test.find(no_cursor_timeout=True) - self.assertEqual(16, cursor._Cursor__query_flags) + self.assertEqual(16, cursor._query_flags) cursor2 = self.db.test.find().add_option(16) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(16) - self.assertEqual(0, cursor._Cursor__query_flags) + self.assertEqual(0, cursor._query_flags) # Tailable / Await data cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(34, cursor._Cursor__query_flags) + self.assertEqual(34, cursor._query_flags) cursor2 = self.db.test.find().add_option(34) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(32) - self.assertEqual(2, cursor._Cursor__query_flags) + self.assertEqual(2, cursor._query_flags) # Partial cursor = self.db.test.find(allow_partial_results=True) - self.assertEqual(128, cursor._Cursor__query_flags) + self.assertEqual(128, cursor._query_flags) cursor2 = self.db.test.find().add_option(128) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(128) - self.assertEqual(0, cursor._Cursor__query_flags) + self.assertEqual(0, cursor._query_flags) def test_add_remove_option_exhaust(self): # Exhaust - which mongos doesn't support @@ -130,13 +130,13 @@ def test_add_remove_option_exhaust(self): self.db.test.find(cursor_type=CursorType.EXHAUST) else: cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) - self.assertEqual(64, cursor._Cursor__query_flags) + self.assertEqual(64, cursor._query_flags) cursor2 = self.db.test.find().add_option(64) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) - self.assertTrue(cursor._Cursor__exhaust) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + self.assertTrue(cursor._exhaust) cursor.remove_option(64) - self.assertEqual(0, cursor._Cursor__query_flags) - self.assertFalse(cursor._Cursor__exhaust) + self.assertEqual(0, cursor._query_flags) + self.assertFalse(cursor._exhaust) def test_allow_disk_use(self): db = self.db @@ -146,9 +146,9 @@ def test_allow_disk_use(self): self.assertRaises(TypeError, coll.find().allow_disk_use, "baz") cursor = coll.find().allow_disk_use(True) - self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore + self.assertEqual(True, cursor._allow_disk_use) cursor = coll.find().allow_disk_use(False) - self.assertEqual(False, cursor._Cursor__allow_disk_use) # type: ignore + self.assertEqual(False, cursor._allow_disk_use) def test_max_time_ms(self): db = self.db @@ -162,15 +162,15 @@ def test_max_time_ms(self): coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) - self.assertEqual(999, cursor._Cursor__max_time_ms) # type: ignore + self.assertEqual(999, cursor._max_time_ms) cursor = coll.find().max_time_ms(10).max_time_ms(1000) - self.assertEqual(1000, cursor._Cursor__max_time_ms) # type: ignore + self.assertEqual(1000, cursor._max_time_ms) cursor = coll.find().max_time_ms(999) c2 = cursor.clone() - self.assertEqual(999, c2._Cursor__max_time_ms) # type: ignore - self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) # type: ignore - self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) # type: ignore + self.assertEqual(999, c2._max_time_ms) + self.assertTrue("$maxTimeMS" in cursor._query_spec()) + self.assertTrue("$maxTimeMS" in c2._query_spec()) self.assertTrue(coll.find_one(max_time_ms=1000)) @@ -204,24 +204,24 @@ def test_max_await_time_ms(self): # When cursor is not tailable_await cursor = coll.find() - self.assertEqual(None, cursor._Cursor__max_await_time_ms) + self.assertEqual(None, cursor._max_await_time_ms) cursor = coll.find().max_await_time_ms(99) - self.assertEqual(None, cursor._Cursor__max_await_time_ms) + self.assertEqual(None, cursor._max_await_time_ms) # If cursor is tailable_await and timeout is unset cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(None, cursor._Cursor__max_await_time_ms) + self.assertEqual(None, cursor._max_await_time_ms) # If cursor is tailable_await and timeout is set cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) - self.assertEqual(99, cursor._Cursor__max_await_time_ms) + self.assertEqual(99, cursor._max_await_time_ms) cursor = ( coll.find(cursor_type=CursorType.TAILABLE_AWAIT) .max_await_time_ms(10) .max_await_time_ms(90) ) - self.assertEqual(90, cursor._Cursor__max_await_time_ms) + self.assertEqual(90, cursor._max_await_time_ms) listener = AllowListEventListener("find", "getMore") coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test @@ -572,13 +572,13 @@ def cursor_count(cursor, expected_count): cur = db.test.find().batch_size(1) next(cur) # find command batchSize should be 1 - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) next(cur) - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) next(cur) - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) next(cur) - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) def test_limit_and_batch_size(self): db = self.db @@ -587,51 +587,51 @@ def test_limit_and_batch_size(self): curs = db.test.find().limit(0).batch_size(10) next(curs) - self.assertEqual(10, curs._Cursor__retrieved) + self.assertEqual(10, curs._retrieved) curs = db.test.find(limit=0, batch_size=10) next(curs) - self.assertEqual(10, curs._Cursor__retrieved) + self.assertEqual(10, curs._retrieved) curs = db.test.find().limit(-2).batch_size(0) next(curs) - self.assertEqual(2, curs._Cursor__retrieved) + self.assertEqual(2, curs._retrieved) curs = db.test.find(limit=-2, batch_size=0) next(curs) - self.assertEqual(2, curs._Cursor__retrieved) + self.assertEqual(2, curs._retrieved) curs = db.test.find().limit(-4).batch_size(5) next(curs) - self.assertEqual(4, curs._Cursor__retrieved) + self.assertEqual(4, curs._retrieved) curs = db.test.find(limit=-4, batch_size=5) next(curs) - self.assertEqual(4, curs._Cursor__retrieved) + self.assertEqual(4, curs._retrieved) curs = db.test.find().limit(50).batch_size(500) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) curs = db.test.find(limit=50, batch_size=500) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) curs = db.test.find().batch_size(500) next(curs) - self.assertEqual(500, curs._Cursor__retrieved) + self.assertEqual(500, curs._retrieved) curs = db.test.find(batch_size=500) next(curs) - self.assertEqual(500, curs._Cursor__retrieved) + self.assertEqual(500, curs._retrieved) curs = db.test.find().limit(50) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) curs = db.test.find(limit=50) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) # these two might be shaky, as the default # is set by the server. as of 2.0.0-rc0, 101 @@ -639,15 +639,15 @@ def test_limit_and_batch_size(self): # for queries without ntoreturn curs = db.test.find() next(curs) - self.assertEqual(101, curs._Cursor__retrieved) + self.assertEqual(101, curs._retrieved) curs = db.test.find().limit(0).batch_size(0) next(curs) - self.assertEqual(101, curs._Cursor__retrieved) + self.assertEqual(101, curs._retrieved) curs = db.test.find(limit=0, batch_size=0) next(curs) - self.assertEqual(101, curs._Cursor__retrieved) + self.assertEqual(101, curs._retrieved) def test_skip(self): db = self.db @@ -886,17 +886,17 @@ def test_clone(self): # Shallow copies can so can mutate cursor2 = copy.copy(cursor) - cursor2._Cursor__projection["cursor2"] = False - self.assertTrue("cursor2" in cursor._Cursor__projection) + cursor2._projection["cursor2"] = False + self.assertTrue(cursor._projection and "cursor2" in cursor._projection) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) - cursor3._Cursor__projection["cursor3"] = False - self.assertFalse("cursor3" in cursor._Cursor__projection) + cursor3._projection["cursor3"] = False + self.assertFalse(cursor._projection and "cursor3" in cursor._projection) cursor4 = cursor.clone() - cursor4._Cursor__projection["cursor4"] = False - self.assertFalse("cursor4" in cursor._Cursor__projection) + cursor4._projection["cursor4"] = False + self.assertFalse(cursor._projection and "cursor4" in cursor._projection) # Test memo when deepcopying queries query = {"hello": "world"} @@ -905,16 +905,16 @@ def test_clone(self): cursor2 = copy.deepcopy(cursor) - self.assertNotEqual(id(cursor._Cursor__spec), id(cursor2._Cursor__spec)) - self.assertEqual(id(cursor2._Cursor__spec["reflexive"]), id(cursor2._Cursor__spec)) - self.assertEqual(len(cursor2._Cursor__spec), 2) + self.assertNotEqual(id(cursor._spec), id(cursor2._spec)) + self.assertEqual(id(cursor2._spec["reflexive"]), id(cursor2._spec)) + self.assertEqual(len(cursor2._spec), 2) # Ensure hints are cloned as the correct type cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) # Internal types are now dict rather than SON by default - self.assertTrue(isinstance(cursor2._Cursor__hint, dict)) - self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) + self.assertTrue(isinstance(cursor2._hint, dict)) + self.assertEqual(cursor._hint, cursor2._hint) def test_clone_empty(self): self.db.test.delete_many({}) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index aa4b8b0a7d..d946eee173 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -52,9 +52,9 @@ from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument from gridfs import GridIn, GridOut -from pymongo.collection import ReturnDocument from pymongo.errors import DuplicateKeyError -from pymongo.message import _CursorAddress +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.message import _CursorAddress class DecimalEncoder(TypeEncoder): @@ -817,7 +817,7 @@ def insert_and_check(self, change_stream, insert_doc, expected_doc): def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor - address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) + address = _CursorAddress(cursor.address, cursor._ns) client = self.input_target.database.client client._close_cursor_now(cursor.cursor_id, address) diff --git a/test/test_database.py b/test/test_database.py index 87391312f9..1520a4cc55 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -19,7 +19,7 @@ import sys from typing import Any, Iterable, List, Mapping, Union -from pymongo.command_cursor import CommandCursor +from pymongo.synchronous.command_cursor import CommandCursor sys.path[0:0] = [""] @@ -38,9 +38,7 @@ from bson.objectid import ObjectId from bson.regex import Regex from bson.son import SON -from pymongo import auth, helpers -from pymongo.collection import Collection -from pymongo.database import Database +from pymongo.asynchronous import auth from pymongo.errors import ( CollectionInvalid, ExecutionTimeout, @@ -49,9 +47,12 @@ OperationFailure, WriteConcernError, ) -from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import helpers +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern diff --git a/test/test_default_exports.py b/test/test_default_exports.py index 4b02e0e318..91f94c9db4 100644 --- a/test/test_default_exports.py +++ b/test/test_default_exports.py @@ -67,6 +67,161 @@ def test_gridfs(self): def test_bson(self): self.check_module(bson, BSON_IGNORE) + def test_pymongo_imports(self): + import pymongo + from pymongo.auth import MECHANISMS + from pymongo.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + ) + from pymongo.change_stream import ( + ChangeStream, + ClusterChangeStream, + CollectionChangeStream, + DatabaseChangeStream, + ) + from pymongo.client_options import ClientOptions + from pymongo.client_session import ClientSession, SessionOptions, TransactionOptions + from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, + validate_collation_or_none, + ) + from pymongo.collection import Collection, ReturnDocument + from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor + from pymongo.cursor import Cursor, RawBatchCursor + from pymongo.database import Database + from pymongo.driver_info import DriverInfo + from pymongo.encryption import ( + Algorithm, + ClientEncryption, + QueryType, + RewrapManyDataKeyResult, + ) + from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts + from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + CursorNotFound, + DocumentTooLarge, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + ExecutionTimeout, + InvalidName, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + ProtocolError, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, + WriteError, + WTimeoutError, + ) + from pymongo.event_loggers import ( + CommandLogger, + ConnectionPoolLogger, + HeartbeatLogger, + ServerLogger, + TopologyLogger, + ) + from pymongo.mongo_client import MongoClient + from pymongo.monitoring import ( + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionPoolListener, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, + TopologyEvent, + TopologyListener, + TopologyOpenedEvent, + register, + ) + from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + SearchIndexModel, + UpdateMany, + UpdateOne, + ) + from pymongo.pool import PoolOptions + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + SecondaryPreferred, + ) + from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, + ) + from pymongo.server_api import ServerApi, ServerApiVersion + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + from pymongo.uri_parser import ( + parse_host, + parse_ipv6_literal_host, + parse_uri, + parse_userinfo, + split_hosts, + split_options, + validate_options, + ) + from pymongo.write_concern import WriteConcern, validate_boolean + + def test_gridfs_imports(self): + import gridfs + from gridfs.errors import CorruptGridFile, FileExists, GridFSError, NoFile + from gridfs.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutChunkIterator, + GridOutCursor, + GridOutIterator, + ) + if __name__ == "__main__": unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 72b0f8a024..53602eaeca 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -40,7 +40,7 @@ from unittest.mock import patch from bson import Timestamp, json_util -from pymongo import MongoClient, common, monitoring +from pymongo import MongoClient from pymongo.errors import ( AutoReconnect, ConfigurationError, @@ -48,14 +48,15 @@ NotPrimaryError, OperationFailure, ) -from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _check_command_response, _check_write_command_response -from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent -from pymongo.server_description import SERVER_TYPE, ServerDescription -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext -from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo.uri_parser import parse_uri +from pymongo.synchronous import common, monitoring +from pymongo.synchronous.hello import Hello, HelloCompat +from pymongo.synchronous.helpers import _check_command_response, _check_write_command_response +from pymongo.synchronous.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent +from pymongo.synchronous.server_description import SERVER_TYPE, ServerDescription +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE +from pymongo.synchronous.uri_parser import parse_uri # Location of JSON test specifications. SDAM_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") @@ -286,8 +287,8 @@ def mock_command(*args, **kwargs): barrier.wait() raise AutoReconnect("mock Connection.command error") - for sock in pool.conns: - sock.command = mock_command + for conn in pool.conns: + conn.command = mock_command def insert_command(i): try: diff --git a/test/test_dns.py b/test/test_dns.py index 9a78e451d7..a2d0fd8b4d 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -25,10 +25,10 @@ from test import IntegrationTest, client_context, unittest from test.utils import wait_until -from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError -from pymongo.mongo_client import MongoClient -from pymongo.uri_parser import parse_uri, split_hosts +from pymongo.synchronous.common import validate_read_preference_tags +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.uri_parser import parse_uri, split_hosts class TestDNSRepl(unittest.TestCase): diff --git a/test/test_encryption.py b/test/test_encryption.py index 2a60b72957..0e232f4401 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -30,8 +30,8 @@ from threading import Thread from typing import Any, Dict, Mapping -from pymongo.collection import Collection from pymongo.daemon import _spawn_daemon +from pymongo.synchronous.collection import Collection sys.path[0:0] = [""] @@ -68,10 +68,8 @@ from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON -from pymongo import ReadPreference, encryption -from pymongo.cursor import CursorType -from pymongo.encryption import Algorithm, ClientEncryption, QueryType -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts +from pymongo import ReadPreference +from pymongo.cursor_shared import CursorType from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -84,15 +82,18 @@ ServerSelectionTimeoutError, WriteError, ) -from pymongo.mongo_client import MongoClient -from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.synchronous import encryption +from pymongo.synchronous.encryption import Algorithm, ClientEncryption, QueryType +from pymongo.synchronous.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.operations import InsertOne, ReplaceOne, UpdateOne from pymongo.write_concern import WriteConcern KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} def get_client_opts(client): - return client._MongoClient__options + return client.options class TestAutoEncryptionOpts(PyMongoTestCase): diff --git a/test/test_examples.py b/test/test_examples.py index e003d8459a..f0d8bd5543 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -27,8 +27,8 @@ import pymongo from pymongo.errors import ConnectionFailure, OperationFailure from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference from pymongo.server_api import ServerApi +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern diff --git a/test/test_fork.py b/test/test_fork.py index d9ac3d261d..8fc1cdbb55 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -41,7 +41,7 @@ def test_lock_client(self): # Forks the client with some items locked. # Parent => All locks should be as before the fork. # Child => All locks should be reset. - with self.client._MongoClient__lock: + with self.client._lock: def target(): with warnings.catch_warnings(): @@ -65,6 +65,7 @@ def target(): with self.fork(target): pass + @unittest.skip("testing") def test_topology_reset(self): # Tests that topologies are different from each other. # Cannot use ID because virtual memory addresses may be the same. diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 344a248b45..c45c5b5771 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -22,7 +22,7 @@ import zipfile from io import BytesIO -from pymongo.database import Database +from pymongo.synchronous.database import Database sys.path[0:0] = [""] @@ -32,7 +32,7 @@ from bson.objectid import ObjectId from gridfs import GridFS from gridfs.errors import NoFile -from gridfs.grid_file import ( +from gridfs.synchronous.grid_file import ( _SEEK_CUR, _SEEK_END, DEFAULT_CHUNK_SIZE, @@ -42,7 +42,7 @@ ) from pymongo import MongoClient from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError -from pymongo.message import _CursorAddress +from pymongo.synchronous.message import _CursorAddress class TestGridFileNoConnect(unittest.TestCase): @@ -253,9 +253,9 @@ def test_grid_out_cursor_options(self): cursor_clone = cursor.clone() cursor_dict = cursor.__dict__.copy() - cursor_dict.pop("_Cursor__session") + cursor_dict.pop("_session") cursor_clone_dict = cursor_clone.__dict__.copy() - cursor_clone_dict.pop("_Cursor__session") + cursor_clone_dict.pop("_session") self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) @@ -757,7 +757,7 @@ def test_survive_cursor_not_found(self): # readchunk(). assert client.address is not None client._close_cursor_now( - outfile._GridOut__chunk_iter._cursor.cursor_id, + outfile._chunk_iter._cursor.cursor_id, _CursorAddress(client.address, db.fs.chunks.full_name), ) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 88fccd6544..1ef17afc2b 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -31,15 +31,15 @@ import gridfs from bson.binary import Binary from gridfs.errors import CorruptGridFile, FileExists, NoFile -from gridfs.grid_file import DEFAULT_CHUNK_SIZE, GridOutCursor -from pymongo.database import Database +from gridfs.synchronous.grid_file import DEFAULT_CHUNK_SIZE, GridOutCursor from pymongo.errors import ( ConfigurationError, NotPrimaryError, ServerSelectionTimeoutError, ) -from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.read_preferences import ReadPreference class JustWrite(threading.Thread): @@ -346,7 +346,7 @@ def test_file_exists(self): one.close() # Attempt to upload a file with more chunks to the same _id. - with patch("gridfs.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + with patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): two = self.fs.new_file(_id=123) self.assertRaises(FileExists, two.write, b"x" * DEFAULT_CHUNK_SIZE * 3) # Original file is still readable (no extra chunks were uploaded). @@ -443,13 +443,13 @@ def test_gridfs_find(self): cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) - def test_delete_not_initialized(self): - # Creating a cursor with invalid arguments will not run __init__ - # but will still call __del__. - cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ - with self.assertRaises(TypeError): - cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore - cursor.__del__() # no error + # def test_delete_not_initialized(self): + # # Creating a cursor with invalid arguments will not run __init__ + # # but will still call __del__. + # cursor = GridOutCursor.__new__(GridOutCursor) # Skip calling __init__ + # with self.assertRaises(TypeError): + # cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore + # cursor.__del__() # no error def test_gridfs_find_one(self): self.assertEqual(None, self.fs.find_one()) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index f1e7800ce3..6ce7b79228 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -41,8 +41,8 @@ ServerSelectionTimeoutError, WriteConcernError, ) -from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.read_preferences import ReadPreference class JustWrite(threading.Thread): @@ -282,7 +282,7 @@ def test_upload_from_stream_with_id(self): ) self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) - @patch("gridfs.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) @client_context.require_failCommand_fail_point def test_upload_bulk_write_error(self): # Test BulkWriteError from insert_many is converted to an insert_one style error. @@ -305,7 +305,7 @@ def test_upload_bulk_write_error(self): self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) gin.abort() - @patch("gridfs.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 10) def test_upload_batching(self): with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: gin.write(b"s" * (10 - 1)) @@ -401,7 +401,7 @@ def test_rename(self): self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) - @patch("gridfs.grid_file._UPLOAD_BUFFER_SIZE", 5) + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) def test_abort(self): gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) gin.write(b"test1") diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 5c75ab01df..0566fffe5b 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -23,8 +23,8 @@ from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until from pymongo.errors import ConnectionFailure -from pymongo.hello import Hello, HelloCompat -from pymongo.monitor import Monitor +from pymongo.synchronous.hello import Hello, HelloCompat +from pymongo.synchronous.monitor import Monitor class TestHeartbeatMonitoring(IntegrationTest): diff --git a/test/test_index_management.py b/test/test_index_management.py index 5b6653dcba..b8409178d1 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -29,8 +29,8 @@ from pymongo import MongoClient from pymongo.errors import OperationFailure -from pymongo.operations import SearchIndexModel from pymongo.read_concern import ReadConcern +from pymongo.synchronous.operations import SearchIndexModel from pymongo.write_concern import WriteConcern _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "index_management") diff --git a/test/test_logger.py b/test/test_logger.py index e8d1929b8b..d1f84a8441 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -20,7 +20,7 @@ from bson import json_util from pymongo.errors import OperationFailure -from pymongo.logger import _DEFAULT_DOCUMENT_LENGTH +from pymongo.synchronous.logger import _DEFAULT_DOCUMENT_LENGTH # https://github.com/mongodb/specifications/tree/master/source/command-logging-and-monitoring/tests#prose-tests diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 1b0130f7d8..d41f216eb8 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -20,7 +20,7 @@ import time import warnings -from pymongo.operations import _Op +from pymongo.synchronous.operations import _Op sys.path[0:0] = [""] @@ -30,7 +30,7 @@ from pymongo import MongoClient from pymongo.errors import ConfigurationError -from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.server_selectors import writable_server_selector # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "max_staleness") diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index f39a1cb03f..4ab4d30657 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -18,7 +18,7 @@ import sys import threading -from pymongo.operations import _Op +from pymongo.synchronous.operations import _Op sys.path[0:0] = [""] @@ -27,8 +27,8 @@ from test.utils import connected, wait_until from pymongo.errors import AutoReconnect, InvalidOperation -from pymongo.server_selectors import writable_server_selector -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.synchronous.server_selectors import writable_server_selector +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE @client_context.require_connection @@ -89,6 +89,7 @@ def test_lazy_connect(self): # While connected() ensures we can trigger connection from the main # thread and wait for the monitors, this test triggers connection from # several threads at once to check for data races. + raise unittest.SkipTest("skip for now") nthreads = 10 client = self.mock_client() self.assertEqual(0, len(client.nodes)) diff --git a/test/test_monitor.py b/test/test_monitor.py index 92bcdc49ad..3bf610294d 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -30,7 +30,7 @@ wait_until, ) -from pymongo.periodic_executor import _EXECUTORS +from pymongo.synchronous.periodic_executor import _EXECUTORS def unregistered(ref): diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 868078d5c8..7f88888157 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -27,10 +27,11 @@ from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON -from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring -from pymongo.command_cursor import CommandCursor +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import monitoring +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index bfd07a83ec..73484772e5 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -24,7 +24,7 @@ from test import IntegrationTest, client_context from bson.codec_options import CodecOptions -from pymongo.encryption import _HAVE_PYMONGOCRYPT, ClientEncryption, EncryptionError +from pymongo.synchronous.encryption import _HAVE_PYMONGOCRYPT, ClientEncryption, EncryptionError class TestonDemandGCPCredentials(IntegrationTest): diff --git a/test/test_pooling.py b/test/test_pooling.py index e91c57bc6b..5ed701517a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -24,17 +24,18 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.son import SON -from pymongo import MongoClient, message, timeout +from pymongo import MongoClient, timeout from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError -from pymongo.hello import HelloCompat +from pymongo.synchronous import message +from pymongo.synchronous.hello_compat import HelloCompat sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest from test.utils import delay, get_pool, joinall, rs_or_single_client -from pymongo.pool import Pool, PoolOptions from pymongo.socket_checker import SocketChecker +from pymongo.synchronous.pool import Pool, PoolOptions @client_context.require_connection diff --git a/test/test_pymongo.py b/test/test_pymongo.py index d4203ed5cf..8d78afba7c 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -27,7 +27,7 @@ class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient - self.assertEqual(pymongo.MongoClient, pymongo.mongo_client.MongoClient) + self.assertEqual(pymongo.MongoClient, pymongo.synchronous.mongo_client.MongoClient) if __name__ == "__main__": diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 2d6a3e9f1b..4f774aa87d 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -22,7 +22,7 @@ import sys from typing import Any -from pymongo.operations import _Op +from pymongo.synchronous.operations import _Op sys.path[0:0] = [""] @@ -39,9 +39,10 @@ from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.message import _maybe_add_read_preference -from pymongo.mongo_client import MongoClient -from pymongo.read_preferences import ( +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.message import _maybe_add_read_preference +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.read_preferences import ( MovingAverage, Nearest, Primary, @@ -50,9 +51,8 @@ Secondary, SecondaryPreferred, ) -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import Selection, readable_server_selector -from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import Selection, readable_server_selector from pymongo.write_concern import WriteConcern @@ -300,6 +300,12 @@ def _conn_from_server(self, read_preference, server, session): self.record_a_read(conn.address) yield conn, read_preference + async def _socket_for_reads_async(self, read_preference, session): + context = await super()._socket_for_reads_async(read_preference, session) + async with context as (sock_info, read_preference): + self.record_a_read(sock_info.address) + return await super()._socket_for_reads_async(read_preference, session) + def record_a_read(self, address): server = self._get_topology().select_server_by_address(address, _Op.TEST, 0) self.has_read_from.add(server) diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 939f05faf2..93986d824d 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -40,9 +40,9 @@ WriteError, WTimeoutError, ) -from pymongo.mongo_client import MongoClient -from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.operations import IndexModel, InsertOne from pymongo.write_concern import WriteConcern _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "read_write_concern") diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index e3028688d7..569f7c2751 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -43,8 +43,8 @@ ) from test.utils_spec_runner import SpecRunner -from pymongo.mongo_client import MongoClient -from pymongo.monitoring import ( +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.monitoring import ( ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, ConnectionCheckOutFailedReason, diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index ccc6b12e01..347e6c1383 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -47,15 +47,15 @@ ServerSelectionTimeoutError, WriteConcernError, ) -from pymongo.mongo_client import MongoClient -from pymongo.monitoring import ( +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.monitoring import ( CommandSucceededEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, ConnectionCheckOutFailedReason, PoolClearedEvent, ) -from pymongo.operations import ( +from pymongo.synchronous.operations import ( DeleteMany, DeleteOne, InsertOne, diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 105ffaf034..c955dc4084 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -31,14 +31,15 @@ ) from bson.json_util import object_hook -from pymongo import MongoClient, monitoring -from pymongo.collection import Collection -from pymongo.common import clean_node +from pymongo import MongoClient from pymongo.errors import ConnectionFailure, NotPrimaryError -from pymongo.hello import Hello -from pymongo.monitor import Monitor -from pymongo.server_description import ServerDescription -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.synchronous import monitoring +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.common import clean_node +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sdam_monitoring") diff --git a/test/test_server.py b/test/test_server.py index 1d71a614d3..b5c6c1365f 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -21,9 +21,9 @@ from test import unittest -from pymongo.hello import Hello -from pymongo.server import Server -from pymongo.server_description import ServerDescription +from pymongo.synchronous.hello import Hello +from pymongo.synchronous.server import Server +from pymongo.synchronous.server_description import ServerDescription class TestServer(unittest.TestCase): diff --git a/test/test_server_description.py b/test/test_server_description.py index ee05e95cf8..273c001c9e 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -23,9 +23,9 @@ from bson.int64 import Int64 from bson.objectid import ObjectId -from pymongo.hello import Hello, HelloCompat -from pymongo.server_description import ServerDescription from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.hello import Hello, HelloCompat +from pymongo.synchronous.server_description import ServerDescription address = ("localhost", 27017) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 30a8aaa7a2..94289a00a3 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -20,12 +20,12 @@ from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError -from pymongo.hello import HelloCompat -from pymongo.operations import _Op -from pymongo.server_selectors import writable_server_selector -from pymongo.settings import TopologySettings -from pymongo.topology import Topology -from pymongo.typings import strip_optional +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.server_selectors import writable_server_selector +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology +from pymongo.synchronous.typings import strip_optional sys.path[0:0] = [""] diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 9dced595c9..c7384590d9 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -27,9 +27,9 @@ ) from test.utils_selection_tests import create_topology -from pymongo.common import clean_node -from pymongo.operations import _Op -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.common import clean_node +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.read_preferences import ReadPreference # Location of JSON test specifications. TEST_PATH = os.path.join( diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index a129af4585..26e871c400 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -23,7 +23,7 @@ from test import unittest -from pymongo.read_preferences import MovingAverage +from pymongo.synchronous.read_preferences import MovingAverage # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection/rtt") diff --git a/test/test_session.py b/test/test_session.py index c5cf77b754..f746c6d7cb 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -21,7 +21,7 @@ from io import BytesIO from typing import Any, Callable, List, Set, Tuple -from pymongo.mongo_client import MongoClient +from pymongo.synchronous.mongo_client import MongoClient sys.path[0:0] = [""] @@ -35,13 +35,14 @@ from bson import DBRef from gridfs import GridFS, GridFSBucket -from pymongo import ASCENDING, IndexModel, InsertOne, monitoring -from pymongo.command_cursor import CommandCursor -from pymongo.common import _MAX_END_SESSIONS -from pymongo.cursor import Cursor +from pymongo import ASCENDING from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure -from pymongo.operations import UpdateOne from pymongo.read_concern import ReadConcern +from pymongo.synchronous import monitoring +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.common import _MAX_END_SESSIONS +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.operations import IndexModel, InsertOne, UpdateOne # Ignore auth commands like saslStart, so we can assert lsid is in all commands. @@ -184,6 +185,7 @@ def test_implicit_sessions_checkout(self): # "To confirm that implicit sessions only allocate their server session after a # successful connection checkout" test from Driver Sessions Spec. succeeded = False + raise unittest.SkipTest("temporary skip") lsid_set = set() failures = 0 for _ in range(5): @@ -295,8 +297,8 @@ def test_client(self): client = self.client ops: list = [ (client.server_info, [], {}), - (client.list_database_names, [], {}), - (client.drop_database, ["pymongo_test"], {}), + # (client.list_database_names, [], {}), + # (client.drop_database, ["pymongo_test"], {}), ] self._test_ops(client, *ops) @@ -377,12 +379,12 @@ def test_cursor_clone(self): next(cursor) # Session is "owned" by cursor. self.assertIsNone(cursor.session) - self.assertIsNotNone(cursor._Cursor__session) + self.assertIsNotNone(cursor._session) clone = cursor.clone() next(clone) self.assertIsNone(clone.session) - self.assertIsNotNone(clone._Cursor__session) - self.assertFalse(cursor._Cursor__session is clone._Cursor__session) + self.assertIsNotNone(clone._session) + self.assertFalse(cursor._session is clone._session) cursor.close() clone.close() @@ -540,12 +542,12 @@ def test_gridfsbucket_cursor(self): cursor = bucket.find(batch_size=1) files = [cursor.next()] - s = cursor._Cursor__session + s = cursor._session self.assertFalse(s.has_ended) cursor.__del__() self.assertTrue(s.has_ended) - self.assertIsNone(cursor._Cursor__session) + self.assertIsNone(cursor._session) # Files are still valid, they use their own sessions. for f in files: @@ -621,7 +623,7 @@ def _test_cursor_helper(self, create_cursor, close_cursor): cursor = create_cursor(coll, None) next(cursor) # Session is "owned" by cursor. - session = getattr(cursor, "_%s__session" % cursor.__class__.__name__) + session = cursor._session self.assertIsNotNone(session) lsid = session.session_id next(cursor) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 29283f0ff2..0c293874b1 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -25,10 +25,10 @@ from test.utils import FunctionCallRecorder, wait_until import pymongo -from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.mongo_client import MongoClient -from pymongo.srv_resolver import _have_dnspython +from pymongo.synchronous import common +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.srv_resolver import _have_dnspython WAIT_TIME = 0.1 @@ -51,7 +51,9 @@ def __init__( def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval @@ -71,15 +73,15 @@ def mock_get_hosts_and_min_ttl(resolver, *args): else: patch_func = mock_get_hosts_and_min_ttl - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore def __enter__(self): self.enable() def disable(self): common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore - self.old_dns_resolver_response # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response ) def __exit__(self, exc_type, exc_val, exc_tb): @@ -131,7 +133,10 @@ def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAI def predicate(): if set(expected_nodelist) == set(self.get_nodelist(client)): - return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) return False wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) @@ -141,7 +146,7 @@ def predicate(): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore 1, "resolver was never called", ) diff --git a/test/test_ssl.py b/test/test_ssl.py index 3b307df39e..56dd23a8e0 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -33,8 +33,8 @@ from pymongo import MongoClient, ssl_support from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure -from pymongo.hello import HelloCompat from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context +from pymongo.synchronous.hello_compat import HelloCompat from pymongo.write_concern import WriteConcern _HAVE_PYOPENSSL = False diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 44e673822a..054910ca1f 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -29,8 +29,8 @@ wait_until, ) -from pymongo import monitoring -from pymongo.hello import HelloCompat +from pymongo.synchronous import monitoring +from pymongo.synchronous.hello_compat import HelloCompat class TestStreamingProtocol(IntegrationTest): diff --git a/test/test_topology.py b/test/test_topology.py index 7662a0c028..e6fd5a3c0b 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -17,7 +17,7 @@ import sys -from pymongo.operations import _Op +from pymongo.synchronous.operations import _Op sys.path[0:0] = [""] @@ -26,19 +26,19 @@ from test.utils import MockPool, wait_until from bson.objectid import ObjectId -from pymongo import common from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure -from pymongo.hello import Hello, HelloCompat -from pymongo.monitor import Monitor -from pymongo.pool import PoolOptions -from pymongo.read_preferences import ReadPreference, Secondary -from pymongo.server import Server -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext, _filter_servers -from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.synchronous import common +from pymongo.synchronous.hello import Hello, HelloCompat +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.pool import PoolOptions +from pymongo.synchronous.read_preferences import ReadPreference, Secondary +from pymongo.synchronous.server import Server +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import any_server_selector, writable_server_selector +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext, _filter_servers +from pymongo.synchronous.topology_description import TOPOLOGY_TYPE class SetNameDiscoverySettings(TopologySettings): diff --git a/test/test_transactions.py b/test/test_transactions.py index 797b2e3740..4279c942ec 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -35,10 +35,7 @@ from bson import encode from bson.raw_bson import RawBSONDocument from gridfs import GridFS, GridFSBucket -from pymongo import WriteConcern, client_session -from pymongo.client_session import TransactionOptions -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor +from pymongo import WriteConcern from pymongo.errors import ( CollectionInvalid, ConfigurationError, @@ -46,9 +43,13 @@ InvalidOperation, OperationFailure, ) -from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import client_session +from pymongo.synchronous.client_session import TransactionOptions +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.operations import IndexModel, InsertOne +from pymongo.synchronous.read_preferences import ReadPreference _TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") diff --git a/test/test_typing.py b/test/test_typing.py index ae395c02e6..552590c644 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -75,9 +75,9 @@ class ImplicitMovie(TypedDict): from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo import ASCENDING, MongoClient -from pymongo.collection import Collection -from pymongo.operations import DeleteOne, InsertOne, ReplaceOne -from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.operations import DeleteOne, InsertOne, ReplaceOne +from pymongo.synchronous.read_preferences import ReadPreference TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") diff --git a/test/test_typing_strict.py b/test/test_typing_strict.py index 4b03b2bfdf..32e9fcfcca 100644 --- a/test/test_typing_strict.py +++ b/test/test_typing_strict.py @@ -19,8 +19,8 @@ from typing import TYPE_CHECKING, Any, Dict import pymongo -from pymongo.collection import Collection -from pymongo.database import Database +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.database import Database def test_generic_arguments() -> None: diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 27f5fd2fbc..09178e2802 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -28,7 +28,7 @@ from bson.binary import JAVA_LEGACY from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.uri_parser import ( +from pymongo.synchronous.uri_parser import ( parse_uri, parse_userinfo, split_hosts, diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index f483a03842..a5ec436498 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -26,9 +26,9 @@ from test import clear_warning_registry, unittest -from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate -from pymongo.compression_support import _have_snappy -from pymongo.uri_parser import SRV_SCHEME, parse_uri +from pymongo.synchronous.common import INTERNAL_URI_OPTION_NAME_MAP, validate +from pymongo.synchronous.compression_support import _have_snappy +from pymongo.synchronous.uri_parser import parse_uri CONN_STRING_TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index cb25c3f66b..7fe8ebd76f 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -22,8 +22,8 @@ from test.unified_format import generate_test_classes from test.utils import OvertCommandListener, rs_or_single_client -from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi, ServerApiVersion +from pymongo.synchronous.mongo_client import MongoClient TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "versioned-api") diff --git a/test/unified_format.py b/test/unified_format.py index 3f98b571bb..fe1419c0d0 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -68,13 +68,6 @@ from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket, GridOut from pymongo import ASCENDING, CursorType, MongoClient, _csot -from pymongo.change_stream import ChangeStream -from pymongo.client_session import ClientSession, TransactionOptions, _TxnState -from pymongo.collection import Collection -from pymongo.command_cursor import CommandCursor -from pymongo.database import Database -from pymongo.encryption import ClientEncryption -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( BulkWriteError, ConfigurationError, @@ -85,7 +78,18 @@ OperationFailure, PyMongoError, ) -from pymongo.monitoring import ( +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult +from pymongo.server_api import ServerApi +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.change_stream import ChangeStream +from pymongo.synchronous.client_session import ClientSession, TransactionOptions, _TxnState +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.encryption import ClientEncryption +from pymongo.synchronous.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.synchronous.monitoring import ( _SENSITIVE_COMMANDS, CommandFailedEvent, CommandListener, @@ -121,16 +125,12 @@ _ServerEvent, _ServerHeartbeatEvent, ) -from pymongo.operations import SearchIndexModel -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference -from pymongo.results import BulkWriteResult -from pymongo.server_api import ServerApi -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import Selection, writable_server_selector -from pymongo.server_type import SERVER_TYPE -from pymongo.topology_description import TopologyDescription -from pymongo.typings import _Address +from pymongo.synchronous.operations import SearchIndexModel +from pymongo.synchronous.read_preferences import ReadPreference +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import Selection, writable_server_selector +from pymongo.synchronous.topology_description import TopologyDescription +from pymongo.synchronous.typings import _Address from pymongo.write_concern import WriteConcern JSON_OPTS = json_util.JSONOptions(tz_aware=False) diff --git a/test/utils.py b/test/utils.py index 15480dc440..bd33270c11 100644 --- a/test/utils.py +++ b/test/utils.py @@ -15,6 +15,7 @@ """Utilities for testing pymongo""" from __future__ import annotations +import asyncio import contextlib import copy import functools @@ -29,19 +30,24 @@ from collections import abc, defaultdict from functools import partial from test import client_context, db_pwd, db_user +from test.asynchronous import async_client_context from typing import Any, List from bson import json_util from bson.objectid import ObjectId from bson.son import SON -from pymongo import MongoClient, monitoring, operations, read_preferences -from pymongo.collection import ReturnDocument -from pymongo.cursor import CursorType +from pymongo import AsyncMongoClient +from pymongo.cursor_shared import CursorType from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.hello import HelloCompat -from pymongo.helpers import _SENSITIVE_COMMANDS +from pymongo.helpers_constants import _SENSITIVE_COMMANDS from pymongo.lock import _create_lock -from pymongo.monitoring import ( +from pymongo.read_concern import ReadConcern +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous import monitoring, operations, read_preferences +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.hello_compat import HelloCompat +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.monitoring import ( ConnectionCheckedInEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, @@ -54,13 +60,11 @@ PoolCreatedEvent, PoolReadyEvent, ) -from pymongo.operations import _Op -from pymongo.pool import _CancellationContext, _PoolGeneration -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference -from pymongo.server_selectors import any_server_selector, writable_server_selector -from pymongo.server_type import SERVER_TYPE -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.operations import _Op +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration +from pymongo.synchronous.read_preferences import ReadPreference +from pymongo.synchronous.server_selectors import any_server_selector, writable_server_selector +from pymongo.synchronous.uri_parser import parse_uri from pymongo.write_concern import WriteConcern IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) @@ -594,6 +598,33 @@ def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs return MongoClient(uri, port, **client_options) +async def _async_mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + return AsyncMongoClient(uri, port, **client_options) + + def single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: """Make a direct connection. Don't authenticate.""" return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) @@ -630,6 +661,52 @@ def rs_or_single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoCli return _mongo_client(h, p, **kwargs) +async def async_single_client_noauth( + h: Any = None, p: Any = None, **kwargs: Any +) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await _async_mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) + + +async def async_single_client( + h: Any = None, p: Any = None, **kwargs: Any +) -> AsyncMongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return await _async_mongo_client(h, p, directConnection=True, **kwargs) + + +async def async_rs_client_noauth( + h: Any = None, p: Any = None, **kwargs: Any +) -> AsyncMongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return await _async_mongo_client(h, p, authenticate=False, **kwargs) + + +async def async_rs_client(h: Any = None, p: Any = None, **kwargs: Any) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await _async_mongo_client(h, p, **kwargs) + + +async def async_rs_or_single_client_noauth( + h: Any = None, p: Any = None, **kwargs: Any +) -> AsyncMongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return await _async_mongo_client(h, p, authenticate=False, **kwargs) + + +async def async_rs_or_single_client( + h: Any = None, p: Any = None, **kwargs: Any +) -> AsyncMongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return await _async_mongo_client(h, p, **kwargs) + + def ensure_all_connected(client: MongoClient) -> None: """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a @@ -821,6 +898,32 @@ def wait_until(predicate, success_description, timeout=10): time.sleep(interval) +async def async_wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. + + E.g.: + + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') + + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). + + Returns the predicate's first true value. + """ + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + retval = await predicate() + if retval: + return retval + + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) + + await asyncio.sleep(interval) + + def repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" cmd = SON([("replSetStepDown", 1)]) @@ -836,6 +939,11 @@ def is_mongos(client): return res.get("msg", "") == "isdbgrid" +async def async_is_mongos(client): + res = await client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" + + def assertRaisesExactly(cls, fn, *args, **kwargs): """ Unlike the standard assertRaises, this checks that a function raises a @@ -888,7 +996,14 @@ def stop(self): def get_pool(client): """Get the standalone, primary, or mongos pool.""" topology = client._get_topology() - server = topology.select_server(writable_server_selector, _Op.TEST) + server = topology._select_server(writable_server_selector, _Op.TEST) + return server.pool + + +async def async_get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = await client._get_topology() + server = await topology._select_server(writable_server_selector, _Op.TEST) return server.pool @@ -900,6 +1015,16 @@ def get_pools(client): ] +async def async_get_pools(client): + """Get all pools.""" + return [ + server.pool + async for server in await (await client._get_topology()).select_servers( + any_server_selector, _Op.TEST + ) + ] + + # Constants for run_threads and lazy_client_trial. NTRIALS = 5 NTHREADS = 10 diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 2b684bb0f1..7673e9bc27 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -19,7 +19,7 @@ import os import sys -from pymongo.operations import _Op +from pymongo.synchronous.operations import _Op sys.path[0:0] = [""] @@ -28,13 +28,13 @@ from test.utils import MockPool, parse_read_preference from bson import json_util -from pymongo.common import HEARTBEAT_FREQUENCY, clean_node from pymongo.errors import AutoReconnect, ConfigurationError -from pymongo.hello import Hello, HelloCompat -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import writable_server_selector -from pymongo.settings import TopologySettings -from pymongo.topology import Topology +from pymongo.synchronous.common import HEARTBEAT_FREQUENCY, clean_node +from pymongo.synchronous.hello import Hello, HelloCompat +from pymongo.synchronous.server_description import ServerDescription +from pymongo.synchronous.server_selectors import writable_server_selector +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology def get_addresses(server_list): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index eea96aa1d7..e38d53b94a 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -38,13 +38,13 @@ from bson.int64 import Int64 from bson.son import SON from gridfs import GridFSBucket -from pymongo import client_session -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult, _WriteResult +from pymongo.synchronous import client_session +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern diff --git a/test/version.py b/test/version.py index 043c760cf5..42d53cfcf4 100644 --- a/test/version.py +++ b/test/version.py @@ -80,6 +80,13 @@ def from_client(cls, client): return cls.from_version_array(info["versionArray"]) return cls.from_string(info["version"]) + @classmethod + async def async_from_client(cls, client): + info = await client.server_info() + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) + def at_least(self, *other_version): return self >= Version(*other_version) diff --git a/tools/synchro.py b/tools/synchro.py new file mode 100644 index 0000000000..2a0c4f4318 --- /dev/null +++ b/tools/synchro.py @@ -0,0 +1,279 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Synchronization of asynchronous modules. + +Used as part of our build system to generate synchronous code. +""" + +from __future__ import annotations + +import re +from os import listdir +from pathlib import Path + +from unasync import Rule, unasync_files # type: ignore[import] + +replacements = { + "AsyncCollection": "Collection", + "AsyncDatabase": "Database", + "AsyncCursor": "Cursor", + "AsyncMongoClient": "MongoClient", + "AsyncCommandCursor": "CommandCursor", + "AsyncRawBatchCursor": "RawBatchCursor", + "AsyncRawBatchCommandCursor": "RawBatchCommandCursor", + "async_command": "command", + "async_receive_message": "receive_message", + "async_sendall": "sendall", + "asynchronous": "synchronous", + "anext": "next", + "_ALock": "_Lock", + "_ACondition": "_Condition", + "AsyncGridFS": "GridFS", + "AsyncGridFSBucket": "GridFSBucket", + "AsyncGridIn": "GridIn", + "AsyncGridOut": "GridOut", + "AsyncGridOutCursor": "GridOutCursor", + "AsyncGridOutIterator": "GridOutIterator", + "_AsyncGridOutChunkIterator": "GridOutChunkIterator", + "_a_grid_in_property": "_grid_in_property", + "_a_grid_out_property": "_grid_out_property", + "AsyncMongoCryptCallback": "MongoCryptCallback", + "AsyncExplicitEncrypter": "ExplicitEncrypter", + "AsyncAutoEncrypter": "AutoEncrypter", + "AsyncContextManager": "ContextManager", + "AsyncClientContext": "ClientContext", + "AsyncTestCollection": "TestCollection", + "AsyncIntegrationTest": "IntegrationTest", + "AsyncPyMongoTestCase": "PyMongoTestCase", + "async_client_context": "client_context", + "async_setup": "setup", + "asyncSetUp": "setUp", + "asyncTearDown": "tearDown", + "async_teardown": "teardown", + "pytest_asyncio": "pytest", + "async_wait_until": "wait_until", + "addAsyncCleanup": "addCleanup", + "async_setup_class": "setup_class", + "IsolatedAsyncioTestCase": "TestCase", + "async_get_pool": "get_pool", + "async_is_mongos": "is_mongos", + "async_rs_or_single_client": "rs_or_single_client", + "async_single_client": "single_client", + "async_from_client": "from_client", +} + +docstring_replacements: dict[tuple[str, str], str] = { + ("MongoClient", "connect"): """If ``True`` (the default), immediately + begin connecting to MongoDB in the background. Otherwise connect + on the first operation.""", + ("Collection", "create"): """If ``True``, force collection + creation even without options being set.""", + ("Collection", "session"): """A + :class:`~pymongo.client_session.ClientSession` that is used with + the create collection command.""", + ("Collection", "kwargs"): """Additional keyword arguments will + be passed as options for the create collection command.""", +} + +type_replacements = {"_Condition": "threading.Condition"} + +_pymongo_base = "./pymongo/asynchronous/" +_gridfs_base = "./gridfs/asynchronous/" +_test_base = "./test/asynchronous/" + +_pymongo_dest_base = "./pymongo/synchronous/" +_gridfs_dest_base = "./gridfs/synchronous/" +_test_dest_base = "./test/synchronous/" + + +async_files = [ + _pymongo_base + f for f in listdir(_pymongo_base) if (Path(_pymongo_base) / f).is_file() +] + +gridfs_files = [ + _gridfs_base + f for f in listdir(_gridfs_base) if (Path(_gridfs_base) / f).is_file() +] + +test_files = [_test_base + f for f in listdir(_test_base) if (Path(_test_base) / f).is_file()] + +sync_files = [ + _pymongo_dest_base + f + for f in listdir(_pymongo_dest_base) + if (Path(_pymongo_dest_base) / f).is_file() +] + +sync_gridfs_files = [ + _gridfs_dest_base + f + for f in listdir(_gridfs_dest_base) + if (Path(_gridfs_dest_base) / f).is_file() +] + +sync_test_files = [ + _test_dest_base + f for f in listdir(_test_dest_base) if (Path(_test_dest_base) / f).is_file() +] + + +docstring_translate_files = [ + _pymongo_dest_base + f + for f in [ + "aggregation.py", + "change_stream.py", + "collection.py", + "command_cursor.py", + "cursor.py", + "client_options.py", + "client_session.py", + "database.py", + "encryption.py", + "encryption_options.py", + "mongo_client.py", + "network.py", + "operations.py", + "pool.py", + "topology.py", + ] +] + + +def process_files(files: list[str]) -> None: + for file in files: + if "__init__" not in file or "__init__" and "test" in file: + with open(file, "r+") as f: + lines = f.readlines() + lines = apply_is_sync(lines) + lines = translate_coroutine_types(lines) + lines = translate_async_sleeps(lines) + if file in docstring_translate_files: + lines = translate_docstrings(lines) + translate_locks(lines) + translate_types(lines) + f.seek(0) + f.writelines(lines) + f.truncate() + + +def apply_is_sync(lines: list[str]) -> list[str]: + is_sync = next(iter([line for line in lines if line.startswith("_IS_SYNC = ")])) + index = lines.index(is_sync) + is_sync = is_sync.replace("False", "True") + lines[index] = is_sync + return lines + + +def translate_coroutine_types(lines: list[str]) -> list[str]: + coroutine_types = [line for line in lines if "Coroutine[" in line] + for type in coroutine_types: + res = re.search(r"Coroutine\[([A-z]+), ([A-z]+), ([A-z]+)\]", type) + if res: + old = res[0] + index = lines.index(type) + new = type.replace(old, res.group(3)) + lines[index] = new + return lines + + +def translate_locks(lines: list[str]) -> list[str]: + lock_lines = [line for line in lines if "_Lock(" in line] + cond_lines = [line for line in lines if "_Condition(" in line] + for line in lock_lines: + res = re.search(r"_Lock\(([^()]*\(\))\)", line) + if res: + old = res[0] + index = lines.index(line) + lines[index] = line.replace(old, res[1]) + for line in cond_lines: + res = re.search(r"_Condition\(([^()]*\([^()]*\))\)", line) + if res: + old = res[0] + index = lines.index(line) + lines[index] = line.replace(old, res[1]) + + return lines + + +def translate_types(lines: list[str]) -> list[str]: + for k, v in type_replacements.items(): + matches = [line for line in lines if k in line and "import" not in line] + for line in matches: + index = lines.index(line) + lines[index] = line.replace(k, v) + return lines + + +def translate_async_sleeps(lines: list[str]) -> list[str]: + blocking_sleeps = [line for line in lines if "asyncio.sleep(0)" in line] + lines = [line for line in lines if line not in blocking_sleeps] + sleeps = [line for line in lines if "asyncio.sleep" in line] + + for line in sleeps: + res = re.search(r"asyncio.sleep\(([^()]*)\)", line) + if res: + old = res[0] + index = lines.index(line) + new = f"time.sleep({res[1]})" + lines[index] = line.replace(old, new) + + return lines + + +def translate_docstrings(lines: list[str]) -> list[str]: + for i in range(len(lines)): + for k in replacements: + if k in lines[i]: + # This sequence of replacements fixes the grammar issues caused by translating async -> sync + if "an Async" in lines[i]: + lines[i] = lines[i].replace("an Async", "a Async") + if "An Async" in lines[i]: + lines[i] = lines[i].replace("An Async", "A Async") + if "an asynchronous" in lines[i]: + lines[i] = lines[i].replace("an asynchronous", "a") + if "An asynchronous" in lines[i]: + lines[i] = lines[i].replace("An asynchronous", "A") + lines[i] = lines[i].replace(k, replacements[k]) + if "Sync" in lines[i] and replacements[k] in lines[i]: + lines[i] = lines[i].replace("Sync", "") + for i in range(len(lines)): + for k in docstring_replacements: # type: ignore[assignment] + if f":param {k[1]}: **Not supported by {k[0]}**." in lines[i]: + lines[i] = lines[i].replace( + f"**Not supported by {k[0]}**.", + docstring_replacements[k], # type: ignore[index] + ) + + return lines + + +def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[str, str]) -> None: + unasync_files( + files, + [ + Rule( + fromdir=src, + todir=dest, + additional_replacements=replacements, + ) + ], + ) + + +def main() -> None: + unasync_directory(async_files, _pymongo_base, _pymongo_dest_base, replacements) + unasync_directory(gridfs_files, _gridfs_base, _gridfs_dest_base, replacements) + unasync_directory(test_files, _test_base, _test_dest_base, replacements) + process_files(sync_files + sync_gridfs_files + sync_test_files) + + +if __name__ == "__main__": + main() diff --git a/tools/synchro.sh b/tools/synchro.sh new file mode 100644 index 0000000000..fe48b663bc --- /dev/null +++ b/tools/synchro.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +python ./tools/synchro.py +python -m ruff check pymongo/synchronous/ gridfs/synchronous/ test/synchronous --fix --silent +python -m ruff format pymongo/synchronous/ gridfs/synchronous/ test/synchronous --silent From 2b030018e5220151a5aa7ad4ef5baa23df210ab5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 7 Jun 2024 06:24:18 -0500 Subject: [PATCH 1298/2111] PYTHON-4451 Use Hatch as Build Backend (#1644) --- .evergreen/run-tests.sh | 1 + .evergreen/utils.sh | 2 +- .github/workflows/test-python.yml | 6 ++---- MANIFEST.in | 34 ----------------------------- README.md | 6 ------ hatch_build.py | 36 +++++++++++++++++++++++++++++++ pymongo/_version.py | 27 +++++++++++++++++------ pyproject.toml | 32 ++++++++++++++++++--------- setup.py | 26 +--------------------- test/test_pymongo.py | 9 ++++++++ tools/fail_if_no_c.py | 8 +++++++ tox.ini | 13 ----------- 12 files changed, 100 insertions(+), 100 deletions(-) delete mode 100644 MANIFEST.in create mode 100644 hatch_build.py diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 3cad42e4dc..2d9a7d4e23 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -158,6 +158,7 @@ if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE if [ ! -d "libmongocrypt_git" ]; then git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git fi + python -m pip install -U setuptools python -m pip install ./libmongocrypt_git/bindings/python python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 7238feb3c8..f0a5851d91 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -66,7 +66,7 @@ createvirtualenv () { export PIP_QUIET=1 python -m pip install --upgrade pip - python -m pip install --upgrade setuptools tox + python -m pip install --upgrade tox } # Usage: diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 530a2386f2..b93c93c022 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -31,12 +31,10 @@ jobs: - name: Run linters run: | tox -m lint-manual - - name: Check Manifest - run: | - tox -m manifest - name: Run compilation run: | - pip install -e . + export PYMONGO_C_EXT_MUST_BUILD=1 + pip install -v -e . python tools/fail_if_no_c.py - name: Run typecheck run: | diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 686da15403..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,34 +0,0 @@ -include README.md -include LICENSE -include THIRD-PARTY-NOTICES -include *.ini -include sbom.json -include requirements.txt -exclude .coveragerc -exclude .git-blame-ignore-revs -exclude .pre-commit-config.yaml -exclude .readthedocs.yaml -exclude CONTRIBUTING.md -exclude RELEASE.md -recursive-include doc *.rst -recursive-include doc *.py -recursive-include doc *.conf -recursive-include doc *.css -recursive-include doc *.js -recursive-include doc *.png -include doc/Makefile -include doc/_templates/layout.html -include doc/make.bat -include doc/static/periodic-executor-refs.dot -recursive-include requirements *.txt -recursive-include tools *.py -recursive-include tools *.sh -include tools/README.rst -include green_framework_test.py -recursive-include test *.pem -recursive-include test *.py -recursive-include test *.json -recursive-include bson *.h -prune test/mod_wsgi_test -prune test/lambda -prune .evergreen diff --git a/README.md b/README.md index f3fb3d8f1b..3d13f1aa9a 100644 --- a/README.md +++ b/README.md @@ -78,12 +78,6 @@ PyMongo can be installed with [pip](http://pypi.python.org/pypi/pip): python -m pip install pymongo ``` -Or `easy_install` from [setuptools](http://pypi.python.org/pypi/setuptools): - -```bash -python -m easy_install pymongo -``` - You can also download the project source and do: ```bash diff --git a/hatch_build.py b/hatch_build.py new file mode 100644 index 0000000000..792f0647e2 --- /dev/null +++ b/hatch_build.py @@ -0,0 +1,36 @@ +"""A custom hatch build hook for pymongo.""" +from __future__ import annotations + +import os +import subprocess +import sys +from pathlib import Path + +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class CustomHook(BuildHookInterface): + """The pymongo build hook.""" + + def initialize(self, version, build_data): + """Initialize the hook.""" + if self.target_name == "sdist": + return + here = Path(__file__).parent.resolve() + sys.path.insert(0, str(here)) + + subprocess.check_call([sys.executable, "setup.py", "build_ext", "-i"]) + + # Ensure wheel is marked as binary and contains the binary files. + build_data["infer_tag"] = True + build_data["pure_python"] = False + if os.name == "nt": + patt = ".pyd" + else: + patt = ".so" + for pkg in ["bson", "pymongo"]: + dpath = here / pkg + for fpath in dpath.glob(f"*{patt}"): + relpath = os.path.relpath(fpath, here) + build_data["artifacts"].append(relpath) + build_data["force_include"][relpath] = relpath diff --git a/pymongo/_version.py b/pymongo/_version.py index dc5c38c734..bc7653c263 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -15,16 +15,29 @@ """Current version of PyMongo.""" from __future__ import annotations -from typing import Tuple, Union +import re +from typing import List, Tuple, Union -version_tuple: Tuple[Union[int, str], ...] = (4, 8, 0, ".dev0") +__version__ = "4.8.0.dev1" -def get_version_string() -> str: - if isinstance(version_tuple[-1], str): - return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] - return ".".join(map(str, version_tuple)) +def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: + pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" + match = re.match(pattern, version) + if match: + parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]] + if match["rest"]: + parts.append(match["rest"]) + elif re.match(r"\d+.\d+", version): + parts = [int(part) for part in version.split(".")] + else: + raise ValueError("Could not parse version") + return tuple(parts) -__version__: str = get_version_string() +version_tuple = get_version_tuple(__version__) version = __version__ + + +def get_version_string() -> str: + return __version__ diff --git a/pyproject.toml b/pyproject.toml index 1540432e50..e7eb5877ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = ["setuptools>=63.0"] -build-backend = "setuptools.build_meta" +requires = ["hatchling>1.24","setuptools>=65.0","hatch-requirements-txt>=0.4.1"] +build-backend = "hatchling.build" [project] name = "pymongo" @@ -45,16 +45,27 @@ Documentation = "https://pymongo.readthedocs.io" Source = "https://github.com/mongodb/mongo-python-driver" Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" -[tool.setuptools.dynamic] -version = {attr = "pymongo._version.__version__"} +# Used to call hatch_build.py +[tool.hatch.build.hooks.custom] -[tool.setuptools.packages.find] -include = ["bson","gridfs", "gridfs.asynchronous", "gridfs.synchronous", "pymongo", "pymongo.asynchronous", "pymongo.synchronous"] +[tool.hatch.version] +path = "pymongo/_version.py" -[tool.setuptools.package-data] -bson=["py.typed", "*.pyi"] -pymongo=["py.typed", "*.pyi"] -gridfs=["py.typed", "*.pyi"] +[tool.hatch.build.targets.wheel] +packages = ["bson","gridfs", "pymongo"] + +[tool.hatch.metadata.hooks.requirements_txt] +files = ["requirements.txt"] + +[tool.hatch.metadata.hooks.requirements_txt.optional-dependencies] +aws = ["requirements/aws.txt"] +docs = ["requirements/docs.txt"] +encryption = ["requirements/encryption.txt"] +gssapi = ["requirements/gssapi.txt"] +ocsp = ["requirements/ocsp.txt"] +snappy = ["requirements/snappy.txt"] +test = ["requirements/test.txt"] +zstd = ["requirements/zstd.txt"] [tool.pytest.ini_options] minversion = "7" @@ -179,6 +190,7 @@ dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?)|dummy.*)$" "UP031", "F401", "B023", "F811"] "tools/*.py" = ["T201"] "green_framework_test.py" = ["T201"] +"hatch_build.py" = ["S"] [tool.coverage.run] branch = true diff --git a/setup.py b/setup.py index 599ea0e4a9..65ae1908fe 100644 --- a/setup.py +++ b/setup.py @@ -136,32 +136,8 @@ def build_extension(self, ext): ) ext_modules = [] - -def parse_reqs_file(fname): - with open(fname) as fid: - lines = [li.strip() for li in fid.readlines()] - return [li for li in lines if li and not li.startswith("#")] - - -dependencies = parse_reqs_file("requirements.txt") - -extras_require = dict( - aws=parse_reqs_file("requirements/aws.txt"), - encryption=parse_reqs_file("requirements/encryption.txt"), - gssapi=parse_reqs_file("requirements/gssapi.txt"), - ocsp=parse_reqs_file("requirements/ocsp.txt"), - snappy=parse_reqs_file("requirements/snappy.txt"), - # PYTHON-3423 Removed in 4.3 but kept here to avoid pip warnings. - srv=[], - tls=[], - # PYTHON-2133 Removed in 4.0 but kept here to avoid pip warnings. - zstd=parse_reqs_file("requirements/zstd.txt"), - test=parse_reqs_file("requirements/test.txt"), -) - setup( cmdclass={"build_ext": custom_build_ext}, - install_requires=dependencies, - extras_require=extras_require, ext_modules=ext_modules, + packages=["bson", "pymongo", "gridfs"], ) # type:ignore diff --git a/test/test_pymongo.py b/test/test_pymongo.py index 8d78afba7c..fd8ece6c03 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -22,6 +22,7 @@ from test import unittest import pymongo +from pymongo._version import get_version_tuple class TestPyMongo(unittest.TestCase): @@ -29,6 +30,14 @@ def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient self.assertEqual(pymongo.MongoClient, pymongo.synchronous.mongo_client.MongoClient) + def test_get_version_tuple(self): + self.assertEqual(get_version_tuple("4.8.0.dev1"), (4, 8, 0, ".dev1")) + self.assertEqual(get_version_tuple("4.8.1"), (4, 8, 1)) + self.assertEqual(get_version_tuple("5.0.0rc1"), (5, 0, 0, "rc1")) + self.assertEqual(get_version_tuple("5.0"), (5, 0)) + with self.assertRaises(ValueError): + get_version_tuple("5") + if __name__ == "__main__": unittest.main() diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 95810c1a73..6848e155aa 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -29,6 +29,14 @@ import pymongo # noqa: E402 if not pymongo.has_c() or not bson.has_c(): + try: + from pymongo import _cmessage # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + print(e) + try: + from bson import _cbson # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + print(e) sys.exit("could not load C extensions") if os.environ.get("ENSURE_UNIVERSAL2") == "1": diff --git a/tox.ini b/tox.ini index eb9ae204e2..331c73ce18 100644 --- a/tox.ini +++ b/tox.ini @@ -31,8 +31,6 @@ envlist = doc-test, # Linkcheck sphinx docs linkcheck - # Check the sdist integrity. - manifest labels = # Use labels and -m instead of -e so that tox -m ` for alternatives. - The :meth:`MongoClient.disconnect` method is removed; it was a - synonym for :meth:`~pymongo.MongoClient.close`. + The :meth:`AsyncMongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.asynchronous.AsyncMongoClient.close`. - :class:`~pymongo.mongo_client.MongoClient` no longer returns an - instance of :class:`~pymongo.database.Database` for attribute names + :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` no longer returns an + instance of :class:`~pymongo.asynchronous.database.AsyncDatabase` for attribute names with leading underscores. You must use dict-style lookups instead:: client['__my_database__'] @@ -924,7 +924,7 @@ async def watch( Performs an aggregation with an implicit initial ``$changeStream`` stage and returns a - :class:`~pymongo.change_stream.ClusterChangeStream` cursor which + :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` cursor which iterates over changes on all databases on this cluster. Introduced in MongoDB 4.0. @@ -935,10 +935,10 @@ async def watch( for change in stream: print(change) - The :class:`~pymongo.change_stream.ClusterChangeStream` iterable + The :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` iterable blocks until the next change document is returned or an error is raised. If the - :meth:`~pymongo.change_stream.ClusterChangeStream.next` method + :meth:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream.next` method encounters a network error when retrieving a batch from the server, it will automatically attempt to recreate the cursor such that no change events are missed. Any error encountered during the resume @@ -951,7 +951,7 @@ async def watch( for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the + # The AsyncChangeStream encountered an unrecoverable error or the # resume attempt failed to recreate the cursor. logging.error("...") @@ -988,7 +988,7 @@ async def watch( the specified :class:`~bson.timestamp.Timestamp`. Requires MongoDB >= 4.0. :param session: a - :class:`~pymongo.client_session.AsyncClientSession`. + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param start_after: The same as `resume_after` except that `start_after` can resume notifications after an invalidate event. This option and `resume_after` are mutually exclusive. @@ -996,7 +996,7 @@ async def watch( command. :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` cursor. .. versionchanged:: 4.3 Added `show_expanded_events` parameter. @@ -1062,9 +1062,9 @@ def nodes(self) -> FrozenSet[_Address]: """Set of all currently connected servers. .. warning:: When connected to a replica set the value of :attr:`nodes` - can change over time as :class:`MongoClient`'s view of the replica + can change over time as :class:`AsyncMongoClient`'s view of the replica set changes. :attr:`nodes` can also be an empty set when - :class:`MongoClient` is first instantiated and hasn't yet connected + :class:`AsyncMongoClient` is first instantiated and hasn't yet connected to any servers, or a network partition causes it to lose connection to all servers. """ @@ -1176,16 +1176,16 @@ def start_session( """Start a logical session. This method takes the same parameters as - :class:`~pymongo.client_session.SessionOptions`. See the - :mod:`~pymongo.client_session` module for details and examples. + :class:`~pymongo.asynchronous.client_session.SessionOptions`. See the + :mod:`~pymongo.asynchronous.client_session` module for details and examples. - A :class:`~pymongo.client_session.AsyncClientSession` may only be used with - the MongoClient that started it. :class:`AsyncClientSession` instances are + A :class:`~pymongo.asynchronous.client_session.AsyncClientSession` may only be used with + the AsyncMongoClient that started it. :class:`AsyncClientSession` instances are **not thread-safe or fork-safe**. They can only be used by one thread or process at a time. A single :class:`AsyncClientSession` cannot be used to run multiple operations concurrently. - :return: An instance of :class:`~pymongo.client_session.AsyncClientSession`. + :return: An instance of :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. .. versionadded:: 3.6 """ @@ -1237,7 +1237,7 @@ def get_default_database( """Get the database named in the MongoDB connection URI. >>> uri = 'mongodb://host/my_database' - >>> client = MongoClient(uri) + >>> client = AsyncMongoClient(uri) >>> db = client.get_default_database() >>> assert db.name == 'my_database' >>> db = client.get_database() @@ -1250,19 +1250,19 @@ def get_default_database( was provided in the URI. :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is + default) the :attr:`codec_options` of this :class:`AsyncMongoClient` is used. :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` for options. :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is + default) the :attr:`write_concern` of this :class:`AsyncMongoClient` is used. :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is + default) the :attr:`read_concern` of this :class:`AsyncMongoClient` is used. :param comment: A user-provided comment to attach to this command. @@ -1294,12 +1294,12 @@ def get_database( write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, ) -> database.AsyncDatabase[_DocumentType]: - """Get a :class:`~pymongo.database.Database` with the given name and + """Get a :class:`~pymongo.asynchronous.database.AsyncDatabase` with the given name and options. - Useful for creating a :class:`~pymongo.database.Database` with + Useful for creating a :class:`~pymongo.asynchronous.database.AsyncDatabase` with different codec options, read preference, and/or write concern from - this :class:`MongoClient`. + this :class:`AsyncMongoClient`. >>> client.read_preference Primary() @@ -1317,19 +1317,19 @@ def get_database( returned. :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is + default) the :attr:`codec_options` of this :class:`AsyncMongoClient` is used. :param read_preference: The read preference to use. If ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` for options. :param write_concern: An instance of :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is + default) the :attr:`write_concern` of this :class:`AsyncMongoClient` is used. :param read_concern: An instance of :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is + default) the :attr:`read_concern` of this :class:`AsyncMongoClient` is used. .. versionchanged:: 3.5 @@ -1346,7 +1346,7 @@ def get_database( ) def _database_default_options(self, name: str) -> database.AsyncDatabase: - """Get a Database instance with the default settings.""" + """Get a AsyncDatabase instance with the default settings.""" return self.get_database( name, codec_options=DEFAULT_CODEC_OPTIONS, @@ -1426,7 +1426,7 @@ async def primary(self) -> Optional[tuple[str, int]]: `replicaSet` option. .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. + AsyncMongoClient gained this property in version 3.0. """ return await self._topology.get_primary() # type: ignore[return-value] @@ -1439,7 +1439,7 @@ async def secondaries(self) -> set[_Address]: client was created without the `replicaSet` option. .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. + AsyncMongoClient gained this property in version 3.0. """ return await self._topology.get_secondaries() @@ -1522,7 +1522,7 @@ async def aclose(self) -> None: await self._encrypter.close() async def _get_topology(self) -> Topology: - """Get the internal :class:`~pymongo.topology.Topology` object. + """Get the internal :class:`~pymongo.asynchronous.topology.Topology` object. If this client was created with "connect=False", calling _get_topology launches the connection process in the background. @@ -2074,7 +2074,7 @@ async def server_info( """Get information about the MongoDB server we're connected to. :param session: a - :class:`~pymongo.client_session.AsyncClientSession`. + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. .. versionchanged:: 3.6 Added ``session`` parameter. @@ -2117,7 +2117,7 @@ async def list_databases( """Get a cursor over the databases of the connected server. :param session: a - :class:`~pymongo.client_session.AsyncClientSession`. + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param comment: A user-provided comment to attach to this command. :param kwargs: Optional parameters of the @@ -2127,7 +2127,7 @@ async def list_databases( options differ by server version. - :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. .. versionadded:: 3.6 """ @@ -2141,7 +2141,7 @@ async def list_database_names( """Get a list of the names of all databases on the connected server. :param session: a - :class:`~pymongo.client_session.AsyncClientSession`. + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param comment: A user-provided comment to attach to this command. @@ -2163,13 +2163,13 @@ async def drop_database( """Drop a database. Raises :class:`TypeError` if `name_or_database` is not an instance of - :class:`str` or :class:`~pymongo.database.Database`. + :class:`str` or :class:`~pymongo.asynchronous.database.AsyncDatabase`. :param name_or_database: the name of a database to drop, or a - :class:`~pymongo.database.Database` instance representing the + :class:`~pymongo.asynchronous.database.AsyncDatabase` instance representing the database to drop :param session: a - :class:`~pymongo.client_session.AsyncClientSession`. + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param comment: A user-provided comment to attach to this command. @@ -2179,7 +2179,7 @@ async def drop_database( .. versionchanged:: 3.6 Added ``session`` parameter. - .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of + .. note:: The :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.write_concern` of this client is automatically applied to this operation. .. versionchanged:: 3.4 @@ -2192,7 +2192,7 @@ async def drop_database( name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance of str or a Database") + raise TypeError("name_or_database must be an instance of str or a AsyncDatabase") async with await self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: await self[name]._command( diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py index f7489249d8..a971ad08c0 100644 --- a/pymongo/synchronous/change_stream.py +++ b/pymongo/synchronous/change_stream.py @@ -297,8 +297,8 @@ def next(self) -> _DocumentType: try: resume_token = None pipeline = [{'$match': {'operationType': 'insert'}}] - async with db.collection.watch(pipeline) as stream: - async for insert_change in stream: + with db.collection.watch(pipeline) as stream: + for insert_change in stream: print(insert_change) resume_token = stream.resume_token except pymongo.errors.PyMongoError: @@ -312,9 +312,9 @@ def next(self) -> _DocumentType: # Use the interrupted ChangeStream's resume token to create # a new ChangeStream. The new stream will continue from the # last seen insert change without missing any events. - async with db.collection.watch( + with db.collection.watch( pipeline, resume_after=resume_token) as stream: - async for insert_change in stream: + for insert_change in stream: print(insert_change) Raises :exc:`StopIteration` if this ChangeStream is closed. @@ -346,9 +346,9 @@ def try_next(self) -> Optional[_DocumentType]: This method returns the next change document without waiting indefinitely for the next change. For example:: - async with db.collection.watch() as stream: + with db.collection.watch() as stream: while stream.alive: - change = await stream.try_next() + change = stream.try_next() # Note that the ChangeStream's resume token may be updated # even when no changes are returned. print("Current resume token: %r" % (stream.resume_token,)) diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index e07298b49a..f1d680fc0a 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -23,11 +23,11 @@ with client.start_session(causal_consistency=True) as session: collection = client.db.collection - await collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) # A secondary read waits for replication of the write. - await secondary_c.find_one({"_id": 1}, session=session) + secondary_c.find_one({"_id": 1}, session=session) If `causal_consistency` is True (the default), read operations that use the session are causally after previous read and write operations. Using a @@ -54,15 +54,15 @@ orders = client.db.orders inventory = client.db.inventory with client.start_session() as session: - async with session.start_transaction(): - await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - await inventory.update_one( + with session.start_transaction(): + orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + inventory.update_one( {"sku": "abc123", "qty": {"$gte": 100}}, {"$inc": {"qty": -100}}, session=session, ) -Upon normal completion of ``async with session.start_transaction()`` block, the +Upon normal completion of ``with session.start_transaction()`` block, the transaction automatically calls :meth:`ClientSession.commit_transaction`. If the block exits with an exception, the transaction automatically calls :meth:`ClientSession.abort_transaction`. @@ -114,8 +114,8 @@ # Each read using this session reads data from the same point in time. with client.start_session(snapshot=True) as session: - order = await orders.find_one({"sku": "abc123"}, session=session) - inventory = await inventory.find_one({"sku": "abc123"}, session=session) + order = orders.find_one({"sku": "abc123"}, session=session) + inventory = inventory.find_one({"sku": "abc123"}, session=session) Snapshot Reads Limitations ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -609,24 +609,24 @@ def with_transaction( This method starts a transaction on this session, executes ``callback`` once, and then commits the transaction. For example:: - async def callback(session): + def callback(session): orders = session.client.db.orders inventory = session.client.db.inventory - await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - await inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, + orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, {"$inc": {"qty": -100}}, session=session) with client.start_session() as session: - await session.with_transaction(callback) + session.with_transaction(callback) To pass arbitrary arguments to the ``callback``, wrap your callable with a ``lambda`` like this:: - async def callback(session, custom_arg, custom_kwarg=None): + def callback(session, custom_arg, custom_kwarg=None): # Transaction operations... with client.start_session() as session: - await session.with_transaction( + session.with_transaction( lambda s: callback(s, "custom_arg", custom_kwarg=1)) In the event of an exception, ``with_transaction`` may retry the commit diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 5803e34b2a..54db3a56b3 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -385,7 +385,7 @@ def _write_concern_for_cmd( __iter__ = None def __next__(self) -> NoReturn: - raise TypeError(f"'{type(self).__name__}' object is not iterable") + raise TypeError("'Collection' object is not iterable") next = __next__ @@ -423,19 +423,19 @@ def watch( Performs an aggregation with an implicit initial ``$changeStream`` stage and returns a - :class:`~pymongo.synchronous.change_stream.CollectionChangeStream` cursor which + :class:`~pymongo.change_stream.CollectionChangeStream` cursor which iterates over changes on this collection. .. code-block:: python - async with db.collection.watch() as stream: - async for change in stream: + with db.collection.watch() as stream: + for change in stream: print(change) - The :class:`~pymongo.synchronous.change_stream.CollectionChangeStream` iterable + The :class:`~pymongo.change_stream.CollectionChangeStream` iterable blocks until the next change document is returned or an error is raised. If the - :meth:`~pymongo.synchronous.change_stream.CollectionChangeStream.next` method + :meth:`~pymongo.change_stream.CollectionChangeStream.next` method encounters a network error when retrieving a batch from the server, it will automatically attempt to recreate the cursor such that no change events are missed. Any error encountered during the resume @@ -444,8 +444,8 @@ def watch( .. code-block:: python try: - async with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: - async for insert_change in stream: + with db.coll.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the @@ -502,7 +502,7 @@ def watch( command. :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - :return: A :class:`~pymongo.synchronous.change_stream.CollectionChangeStream` cursor. + :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. .. versionchanged:: 4.3 Added `show_expanded_events` parameter. @@ -818,12 +818,12 @@ def insert_one( ) -> InsertOneResult: """Insert a single document. - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 0 - >>> result = await db.test.insert_one({'x': 1}) + >>> result = db.test.insert_one({'x': 1}) >>> result.inserted_id ObjectId('54f112defba522406c9cc208') - >>> await db.test.find_one({'x': 1}) + >>> db.test.find_one({'x': 1}) {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} :param document: The document to insert. Must be a mutable mapping @@ -884,12 +884,12 @@ def insert_many( ) -> InsertManyResult: """Insert an iterable of documents. - >>> await db.test.count_documents({}) + >>> db.test.count_documents({}) 0 - >>> result = await db.test.insert_many([{'x': i} for i in range(2)]) - >>> await result.inserted_ids + >>> result = db.test.insert_many([{'x': i} for i in range(2)]) + >>> result.inserted_ids [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] - >>> await db.test.count_documents({}) + >>> db.test.count_documents({}) 2 :param documents: A iterable of documents to insert. @@ -1098,16 +1098,16 @@ def replace_one( ) -> UpdateResult: """Replace a single document matching the filter. - >>> async for doc in db.test.find({}): + >>> for doc in db.test.find({}): ... print(doc) ... {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} - >>> result = await db.test.replace_one({'x': 1}, {'y': 1}) + >>> result = db.test.replace_one({'x': 1}, {'y': 1}) >>> result.matched_count 1 >>> result.modified_count 1 - >>> async for doc in db.test.find({}): + >>> for doc in db.test.find({}): ... print(doc) ... {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} @@ -1115,14 +1115,14 @@ def replace_one( The *upsert* option can be used to insert a new document if a matching document does not exist. - >>> result = await db.test.replace_one({'x': 1}, {'x': 1}, True) + >>> result = db.test.replace_one({'x': 1}, {'x': 1}, True) >>> result.matched_count 0 >>> result.modified_count 0 >>> result.upserted_id ObjectId('54f11e5c8891e756a6e1abd4') - >>> await db.test.find_one({'x': 1}) + >>> db.test.find_one({'x': 1}) {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} :param filter: A query that matches the document to replace. @@ -1201,18 +1201,18 @@ def update_one( ) -> UpdateResult: """Update a single document matching the filter. - >>> async for doc in db.test.find(): + >>> for doc in db.test.find(): ... print(doc) ... {'x': 1, '_id': 0} {'x': 1, '_id': 1} {'x': 1, '_id': 2} - >>> result = await db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) + >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) >>> result.matched_count 1 >>> result.modified_count 1 - >>> async for doc in db.test.find(): + >>> for doc in db.test.find(): ... print(doc) ... {'x': 4, '_id': 0} @@ -1222,14 +1222,14 @@ def update_one( If ``upsert=True`` and no documents match the filter, create a new document based on the filter criteria and update modifications. - >>> result = await db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) >>> result.matched_count 0 >>> result.modified_count 0 >>> result.upserted_id ObjectId('626a678eeaa80587d4bb3fb7') - >>> await db.test.find_one(result.upserted_id) + >>> db.test.find_one(result.upserted_id) {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} :param filter: A query that matches the document to update. @@ -1314,18 +1314,18 @@ def update_many( ) -> UpdateResult: """Update one or more documents that match the filter. - >>> async for doc in db.test.find(): + >>> for doc in db.test.find(): ... print(doc) ... {'x': 1, '_id': 0} {'x': 1, '_id': 1} {'x': 1, '_id': 2} - >>> result = await db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) + >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) >>> result.matched_count 3 >>> result.modified_count 3 - >>> async for doc in db.test.find(): + >>> for doc in db.test.find(): ... print(doc) ... {'x': 4, '_id': 0} @@ -1417,8 +1417,8 @@ def drop( The following two calls are equivalent: - >>> await db.foo.drop() - >>> await db.drop_collection("foo") + >>> db.foo.drop() + >>> db.drop_collection("foo") .. versionchanged:: 4.2 Added ``encrypted_fields`` parameter. @@ -1550,12 +1550,12 @@ def delete_one( ) -> DeleteResult: """Delete a single document matching the filter. - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 3 - >>> result = await db.test.delete_one({'x': 1}) + >>> result = db.test.delete_one({'x': 1}) >>> result.deleted_count 1 - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 2 :param filter: A query that matches the document to delete. @@ -1615,12 +1615,12 @@ def delete_many( ) -> DeleteResult: """Delete one or more documents matching the filter. - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 3 - >>> result = await db.test.delete_many({'x': 1}) + >>> result = db.test.delete_many({'x': 1}) >>> result.deleted_count 3 - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 0 :param filter: A query that matches the documents to delete. @@ -1694,7 +1694,7 @@ def find_one( :: code-block: python - >>> await collection.find_one(max_time_ms=100) + >>> collection.find_one(max_time_ms=100) """ if filter is not None and not isinstance(filter, abc.Mapping): @@ -1904,8 +1904,8 @@ def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_Documen :mod:`bson` module. >>> import bson - >>> cursor = await db.test.find_raw_batches() - >>> async for batch in cursor: + >>> cursor = db.test.find_raw_batches() + >>> for batch in cursor: ... print(bson.decode_all(batch)) .. note:: find_raw_batches does not support auto encryption. @@ -2133,7 +2133,7 @@ def create_indexes( >>> index1 = IndexModel([("hello", DESCENDING), ... ("world", ASCENDING)], name="hello_world") >>> index2 = IndexModel([("goodbye", DESCENDING)]) - >>> await db.test.create_indexes([index1, index2]) + >>> db.test.create_indexes([index1, index2]) ["hello_world", "goodbye_-1"] :param indexes: A list of :class:`~pymongo.operations.IndexModel` @@ -2232,18 +2232,18 @@ def create_index( To create a single key ascending index on the key ``'mike'`` we just use a string argument:: - >>> await my_collection.create_index("mike") + >>> my_collection.create_index("mike") For a compound index on ``'mike'`` descending and ``'eliot'`` ascending we need to use a list of tuples:: - >>> await my_collection.create_index([("mike", pymongo.DESCENDING), + >>> my_collection.create_index([("mike", pymongo.DESCENDING), ... "eliot"]) All optional index creation parameters should be passed as keyword arguments to this method. For example:: - >>> await my_collection.create_index([("mike", pymongo.DESCENDING)], + >>> my_collection.create_index([("mike", pymongo.DESCENDING)], ... background=True) Valid options include, but are not limited to: @@ -2448,7 +2448,7 @@ def list_indexes( ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the index documents for this collection. - >>> async for index in db.test.list_indexes(): + >>> for index in db.test.list_indexes(): ... print(index) ... SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) @@ -2957,9 +2957,9 @@ def aggregate_raw_batches( :mod:`bson` module. >>> import bson - >>> cursor = await db.test.aggregate_raw_batches([ + >>> cursor = db.test.aggregate_raw_batches([ ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) - >>> async for batch in cursor: + >>> for batch in cursor: ... print(bson.decode_all(batch)) .. note:: aggregate_raw_batches does not support auto encryption. @@ -3217,28 +3217,28 @@ def find_one_and_delete( ) -> _DocumentType: """Finds a single document and deletes it, returning the document. - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 2 - >>> await db.test.find_one_and_delete({'x': 1}) + >>> db.test.find_one_and_delete({'x': 1}) {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} - >>> await db.test.count_documents({'x': 1}) + >>> db.test.count_documents({'x': 1}) 1 If multiple documents match *filter*, a *sort* can be applied. - >>> async for doc in db.test.find({'x': 1}): + >>> for doc in db.test.find({'x': 1}): ... print(doc) ... {'x': 1, '_id': 0} {'x': 1, '_id': 1} {'x': 1, '_id': 2} - >>> await db.test.find_one_and_delete( + >>> db.test.find_one_and_delete( ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) {'x': 1, '_id': 2} The *projection* option can be used to limit the fields returned. - >>> await db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) + >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) {'x': 1} :param filter: A query that matches the document to delete. @@ -3314,15 +3314,15 @@ def find_one_and_replace( :meth:`find_one_and_update` by replacing the document matched by *filter*, rather than modifying the existing document. - >>> async for doc in db.test.find({}): + >>> for doc in db.test.find({}): ... print(doc) ... {'x': 1, '_id': 0} {'x': 1, '_id': 1} {'x': 1, '_id': 2} - >>> await db.test.find_one_and_replace({'x': 1}, {'y': 1}) + >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) {'x': 1, '_id': 0} - >>> async for doc in db.test.find({}): + >>> for doc in db.test.find({}): ... print(doc) ... {'y': 1, '_id': 0} @@ -3418,13 +3418,13 @@ def find_one_and_update( """Finds a single document and updates it, returning either the original or the updated document. - >>> await db.test.find_one_and_update( + >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) {'_id': 665, 'done': False, 'count': 25}} Returns ``None`` if no document matches the filter. - >>> await db.test.find_one_and_update( + >>> db.test.find_one_and_update( ... {'_exists': False}, {'$inc': {'count': 1}}) When the filter matches, by default :meth:`find_one_and_update` @@ -3434,7 +3434,7 @@ def find_one_and_update( option. >>> from pymongo import ReturnDocument - >>> await db.example.find_one_and_update( + >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) @@ -3442,7 +3442,7 @@ def find_one_and_update( You can limit the fields returned with the *projection* option. - >>> await db.example.find_one_and_update( + >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, @@ -3452,9 +3452,9 @@ def find_one_and_update( The *upsert* option can be used to create the document if it doesn't already exist. - >>> await db.example.delete_many({}).deleted_count + >>> (db.example.delete_many({})).deleted_count 1 - >>> await db.example.find_one_and_update( + >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, @@ -3464,12 +3464,12 @@ def find_one_and_update( If multiple documents match *filter*, a *sort* can be applied. - >>> async for doc in db.test.find({'done': True}): + >>> for doc in db.test.find({'done': True}): ... print(doc) ... {'_id': 665, 'done': True, 'result': {'count': 26}} {'_id': 701, 'done': True, 'result': {'count': 17}} - >>> await db.test.find_one_and_update( + >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index 1cd4d8694c..86fa69dcb6 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -164,7 +164,7 @@ def alive(self) -> bool: Even if :attr:`alive` is ``True``, :meth:`next` can raise :exc:`StopIteration`. Best to use a for loop:: - async for doc in collection.aggregate(pipeline): + for doc in collection.aggregate(pipeline): print(doc) .. note:: :attr:`alive` can be True while iterating a cursor from @@ -382,11 +382,11 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() def to_list(self) -> list[_DocumentType]: - """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. To use:: - >>> await cursor.to_list() + >>> cursor.to_list() If the cursor is empty or has no more results, an empty list will be returned. diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 652af606cb..1595ce40b9 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -527,7 +527,7 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: """Specifies a time limit for a getMore operation on a - :attr:`~pymongo.cursor_shared.CursorType.TAILABLE_AWAIT` cursor. For all other + :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or @@ -712,27 +712,27 @@ def sort( Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: - async for doc in collection.find().sort('field', pymongo.ASCENDING): + for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs. If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: - async for doc in collection.find().sort([ + for doc in collection.find().sort([ 'field1', ('field2', pymongo.DESCENDING)]): print(doc) Text search results can be sorted by relevance:: - cursor = await db.test.find( + cursor = db.test.find( {'$text': {'$search': 'some words'}}, {'score': {'$meta': 'textScore'}}) # Sort by 'score' field. cursor.sort([('score', {'$meta': 'textScore'})]) - async for doc in cursor: + for doc in cursor: print(doc) For more advanced text search functionality, see MongoDB's @@ -831,7 +831,7 @@ def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: to the object currently being scanned. For example:: # Find all documents where field "a" is less than "b" plus "c". - async for doc in db.test.find().where('this.a < (this.b + this.c)'): + for doc in db.test.find().where('this.a < (this.b + this.c)'): print(doc) Raises :class:`TypeError` if `code` is not an instance of @@ -904,7 +904,7 @@ def alive(self) -> bool: With regular cursors, simply use a for loop instead of :attr:`alive`:: - async for doc in collection.find(): + for doc in collection.find(): print(doc) .. note:: Even if :attr:`alive` is True, :meth:`next` can raise @@ -1285,11 +1285,11 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() def to_list(self) -> list[_DocumentType]: - """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. To use:: - >>> await cursor.to_list() + >>> cursor.to_list() If the cursor is empty or has no more results, an empty list will be returned. diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 3b3a91095a..93a9985281 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -337,21 +337,21 @@ def watch( Performs an aggregation with an implicit initial ``$changeStream`` stage and returns a - :class:`~pymongo.synchronous.change_stream.DatabaseChangeStream` cursor which + :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which iterates over changes on all collections in this database. Introduced in MongoDB 4.0. .. code-block:: python - async with db.watch() as stream: - async for change in stream: + with db.watch() as stream: + for change in stream: print(change) - The :class:`~pymongo.synchronous.change_stream.DatabaseChangeStream` iterable + The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable blocks until the next change document is returned or an error is raised. If the - :meth:`~pymongo.synchronous.change_stream.DatabaseChangeStream.next` method + :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method encounters a network error when retrieving a batch from the server, it will automatically attempt to recreate the cursor such that no change events are missed. Any error encountered during the resume @@ -360,8 +360,8 @@ def watch( .. code-block:: python try: - async with db.watch([{"$match": {"operationType": "insert"}}]) as stream: - async for insert_change in stream: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: print(insert_change) except pymongo.errors.PyMongoError: # The ChangeStream encountered an unrecoverable error or the @@ -409,7 +409,7 @@ def watch( command. :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - :return: A :class:`~pymongo.synchronous.change_stream.DatabaseChangeStream` cursor. + :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. .. versionchanged:: 4.3 Added `show_expanded_events` parameter. @@ -810,23 +810,23 @@ def command( For example, a command like ``{buildinfo: 1}`` can be sent using: - >>> await db.command("buildinfo") + >>> db.command("buildinfo") OR - >>> await db.command({"buildinfo": 1}) + >>> db.command({"buildinfo": 1}) For a command where the value matters, like ``{count: collection_name}`` we can do: - >>> await db.command("count", collection_name) + >>> db.command("count", collection_name) OR - >>> await db.command({"count": collection_name}) + >>> db.command({"count": collection_name}) For commands that take additional arguments we can use kwargs. So ``{count: collection_name, query: query}`` becomes: - >>> await db.command("count", collection_name, query=query) + >>> db.command("count", collection_name, query=query) OR - >>> await db.command({"count": collection_name, "query": query}) + >>> db.command({"count": collection_name, "query": query}) :param command: document representing the command to be issued, or the name of the command (for simple commands only). diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 0d848458dc..c20a74d3db 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -116,10 +116,10 @@ def test_iteration(self): with self.assertRaises(TypeError): _ = db[0] # next fails - with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'AsyncDatabase' object is not iterable"): _ = next(db) # .next() fails - with self.assertRaisesRegex(TypeError, "'Database' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'AsyncDatabase' object is not iterable"): _ = db.next() # Do not implement typing.Iterable. self.assertNotIsInstance(db, Iterable) diff --git a/test/test_cursor.py b/test/test_cursor.py index 12cb0cd57c..26f0575da0 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -232,7 +232,7 @@ def test_max_await_time_ms(self): listener = AllowListEventListener("find", "getMore") coll = (rs_or_single_client(event_listeners=[listener]))[self.db.name].pymongo_test - # Tailable_await defaults. + # Tailable_defaults. coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() # find self.assertFalse("maxTimeMS" in listener.started_events[0].command) @@ -240,7 +240,7 @@ def test_max_await_time_ms(self): self.assertFalse("maxTimeMS" in listener.started_events[1].command) listener.reset() - # Tailable_await with max_await_time_ms set. + # Tailable_with max_await_time_ms set. coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) @@ -251,7 +251,7 @@ def test_max_await_time_ms(self): self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) listener.reset() - # Tailable_await with max_time_ms and make sure list() works on synchronous cursors + # Tailable_with max_time_ms and make sure list() works on synchronous cursors if _IS_SYNC: list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # type: ignore[call-overload] else: @@ -265,7 +265,7 @@ def test_max_await_time_ms(self): self.assertFalse("maxTimeMS" in listener.started_events[1].command) listener.reset() - # Tailable_await with both max_time_ms and max_await_time_ms + # Tailable_with both max_time_ms and max_await_time_ms ( coll.find(cursor_type=CursorType.TAILABLE_AWAIT) .max_time_ms(99) diff --git a/tools/synchro.py b/tools/synchro.py index 94f3d7f8f6..57b089c5a5 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -272,15 +272,26 @@ def translate_docstrings(lines: list[str]) -> list[str]: # This sequence of replacements fixes the grammar issues caused by translating async -> sync if "an Async" in lines[i]: lines[i] = lines[i].replace("an Async", "a Async") + if "an 'Async" in lines[i]: + lines[i] = lines[i].replace("an 'Async", "a 'Async") if "An Async" in lines[i]: lines[i] = lines[i].replace("An Async", "A Async") + if "An 'Async" in lines[i]: + lines[i] = lines[i].replace("An 'Async", "A 'Async") if "an asynchronous" in lines[i]: lines[i] = lines[i].replace("an asynchronous", "a") if "An asynchronous" in lines[i]: lines[i] = lines[i].replace("An asynchronous", "A") + # This ensures docstring links are for `pymongo.X` instead of `pymongo.synchronous.X` + if "pymongo.asynchronous" in lines[i] and "import" not in lines[i]: + lines[i] = lines[i].replace("pymongo.asynchronous", "pymongo") lines[i] = lines[i].replace(k, replacements[k]) if "Sync" in lines[i] and "Synchronous" not in lines[i] and replacements[k] in lines[i]: lines[i] = lines[i].replace("Sync", "") + if "async for" in lines[i] or "async with" in lines[i] or "async def" in lines[i]: + lines[i] = lines[i].replace("async ", "") + if "await " in lines[i] and "tailable" not in lines[i]: + lines[i] = lines[i].replace("await ", "") for i in range(len(lines)): for k in docstring_replacements: # type: ignore[assignment] if f":param {k[1]}: **Not supported by {k[0]}**." in lines[i]: From 3d936d5c7de2d08015d84f0d0bdd2d31566cb0b0 Mon Sep 17 00:00:00 2001 From: morotti Date: Fri, 2 Aug 2024 21:25:32 +0100 Subject: [PATCH 1370/2111] PYTHON-4600 Handle round trip time being negative when time.monotonic() is not monotonic (#1758) Co-authored-by: rmorotti --- doc/contributors.rst | 1 + pymongo/_csot.py | 5 +---- pymongo/asynchronous/monitor.py | 16 +++++++++++++--- pymongo/read_preferences.py | 5 +---- pymongo/synchronous/monitor.py | 16 +++++++++++++--- 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/doc/contributors.rst b/doc/contributors.rst index 49fb2d844d..272b81d6ae 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -101,3 +101,4 @@ The following is a list of people who have contributed to - Casey Clements (caseyclements) - Ivan Lukyanchikov (ilukyanchikov) - Terry Patterson +- Romain Morotti diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 2ac02aa9e2..94328f9819 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -149,10 +149,7 @@ def __init__(self) -> None: def add_sample(self, sample: float) -> None: if sample < 0: - # Likely system time change while waiting for hello response - # and not using time.monotonic. Ignore it, the next one will - # probably be valid. - return + raise ValueError(f"duration cannot be negative {sample}") self.samples.append(sample) def get(self) -> float: diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index a5f7435128..d2ac8868e7 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -48,6 +48,15 @@ def _sanitize(error: Exception) -> None: error.__cause__ = None +def _monotonic_duration(start: float) -> float: + """Return the duration since the given start time. + + Accounts for buggy platforms where time.monotonic() is not monotonic. + See PYTHON-4600. + """ + return max(0.0, time.monotonic() - start) + + class MonitorBase: def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): """Base class to do periodic work on a background thread. @@ -247,7 +256,7 @@ async def _check_server(self) -> ServerDescription: _sanitize(error) sd = self._server_description address = sd.address - duration = time.monotonic() - start + duration = _monotonic_duration(start) if self._publish: awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) assert self._listeners is not None @@ -317,7 +326,8 @@ async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float] else: # New connection handshake or polling hello (MongoDB <4.4). response = await conn._hello(cluster_time, None, None) - return response, time.monotonic() - start + duration = _monotonic_duration(start) + return response, duration class SrvMonitor(MonitorBase): @@ -441,7 +451,7 @@ async def _ping(self) -> float: raise Exception("_RttMonitor closed") start = time.monotonic() await conn.hello() - return time.monotonic() - start + return _monotonic_duration(start) # Close monitors to cancel any in progress streaming checks before joining diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 19b908a8c8..8c6e6de45d 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -607,10 +607,7 @@ def __init__(self) -> None: def add_sample(self, sample: float) -> None: if sample < 0: - # Likely system time change while waiting for hello response - # and not using time.monotonic. Ignore it, the next one will - # probably be valid. - return + raise ValueError(f"duration cannot be negative {sample}") if self.average is None: self.average = sample else: diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index 8106c1922d..e3d1f7bf2a 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -48,6 +48,15 @@ def _sanitize(error: Exception) -> None: error.__cause__ = None +def _monotonic_duration(start: float) -> float: + """Return the duration since the given start time. + + Accounts for buggy platforms where time.monotonic() is not monotonic. + See PYTHON-4600. + """ + return max(0.0, time.monotonic() - start) + + class MonitorBase: def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): """Base class to do periodic work on a background thread. @@ -247,7 +256,7 @@ def _check_server(self) -> ServerDescription: _sanitize(error) sd = self._server_description address = sd.address - duration = time.monotonic() - start + duration = _monotonic_duration(start) if self._publish: awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) assert self._listeners is not None @@ -317,7 +326,8 @@ def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: else: # New connection handshake or polling hello (MongoDB <4.4). response = conn._hello(cluster_time, None, None) - return response, time.monotonic() - start + duration = _monotonic_duration(start) + return response, duration class SrvMonitor(MonitorBase): @@ -441,7 +451,7 @@ def _ping(self) -> float: raise Exception("_RttMonitor closed") start = time.monotonic() conn.hello() - return time.monotonic() - start + return _monotonic_duration(start) # Close monitors to cancel any in progress streaming checks before joining From d28ceb205895cbf2ed5012dfae377eeb92bf4dfd Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 2 Aug 2024 16:31:59 -0700 Subject: [PATCH 1371/2111] PYTHON-4021 Fix previous topologyDescription published when closing a client (#1769) --- pymongo/asynchronous/topology.py | 2 +- pymongo/synchronous/topology.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 183c459f23..4e55db4981 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -645,6 +645,7 @@ async def close(self) -> None: :exc:`~.errors.InvalidOperation`. """ async with self._lock: + old_td = self._description for server in self._servers.values(): await server.close() @@ -664,7 +665,6 @@ async def close(self) -> None: # Publish only after releasing the lock. if self._publish_tp: assert self._events is not None - old_td = self._description self._description = TopologyDescription( TOPOLOGY_TYPE.Unknown, {}, diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index eda5f01d3d..8542f67bb3 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -643,6 +643,7 @@ def close(self) -> None: :exc:`~.errors.InvalidOperation`. """ with self._lock: + old_td = self._description for server in self._servers.values(): server.close() @@ -662,7 +663,6 @@ def close(self) -> None: # Publish only after releasing the lock. if self._publish_tp: assert self._events is not None - old_td = self._description self._description = TopologyDescription( TOPOLOGY_TYPE.Unknown, {}, From 9c7adf89ef0a317d0300ce7528f7f9680090d087 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 2 Aug 2024 16:32:11 -0700 Subject: [PATCH 1372/2111] PYTHON-4609 Speed up unified tests (#1768) --- test/unified_format.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/unified_format.py b/test/unified_format.py index d311d97292..2c576da45b 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -33,6 +33,7 @@ from test import ( IntegrationTest, client_context, + client_knobs, unittest, ) from test.helpers import ( @@ -1037,8 +1038,18 @@ def setUpClass(cls): if "retryable-writes" in cls.TEST_SPEC["description"]: raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + @classmethod def tearDownClass(cls): + cls.knobs.disable() for client in cls.mongos_clients: client.close() super().tearDownClass() From a5d519775de1877174c719197a2cd0a9ad42e59f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 5 Aug 2024 12:26:06 -0700 Subject: [PATCH 1373/2111] PYTHON-4605 Test serverMonitoringMode=poll waits after a successful heartbeat (#1767) --- .../unified/serverMonitoringMode.json | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json index 7d681b4f9e..4b492f7d85 100644 --- a/test/discovery_and_monitoring/unified/serverMonitoringMode.json +++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json @@ -444,6 +444,69 @@ ] } ] + }, + { + "description": "poll waits after successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 1000000 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 500 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 1 + } + } + ] } ] } From da2465f2c76b131b34a07dc848dfbb12b745bc44 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 6 Aug 2024 10:50:52 -0700 Subject: [PATCH 1374/2111] PYTHON-4611 Prefer non deprecated cryptography apis (#1770) --- pymongo/ocsp_cache.py | 59 ++++++++++++++++++++++++++++------------- pymongo/ocsp_support.py | 12 ++++++--- 2 files changed, 50 insertions(+), 21 deletions(-) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 742579312f..3facefe350 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -19,7 +19,7 @@ from collections import namedtuple from datetime import datetime as _datetime from datetime import timezone -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Optional from pymongo.lock import _create_lock @@ -27,6 +27,22 @@ from cryptography.x509.ocsp import OCSPRequest, OCSPResponse +def _next_update(value: OCSPResponse) -> Optional[_datetime]: + """Compat helper to return the response's next_update_utc.""" + # Added in cryptography 43.0.0. + if hasattr(value, "next_update_utc"): + return value.next_update_utc + return value.next_update + + +def _this_update(value: OCSPResponse) -> Optional[_datetime]: + """Compat helper to return the response's this_update_utc.""" + # Added in cryptography 43.0.0. + if hasattr(value, "this_update_utc"): + return value.this_update_utc + return value.this_update + + class _OCSPCache: """A cache for OCSP responses.""" @@ -62,25 +78,30 @@ def __setitem__(self, key: OCSPRequest, value: OCSPResponse) -> None: # As per the OCSP protocol, if the response's nextUpdate field is # not set, the responder is indicating that newer revocation # information is available all the time. - if value.next_update is None: + next_update = _next_update(value) + if next_update is None: self._data.pop(cache_key, None) return + this_update = _this_update(value) + if this_update is None: + return + now = _datetime.now(tz=timezone.utc) + if this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) # Do nothing if the response is invalid. - if not ( - value.this_update - <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) - < value.next_update - ): + if not (this_update <= now < next_update): return # Cache new response OR update cached response if new response # has longer validity. cached_value = self._data.get(cache_key, None) - if cached_value is None or ( - cached_value.next_update is not None - and cached_value.next_update < value.next_update - ): + if cached_value is None: + self._data[cache_key] = value + return + cached_next_update = _next_update(cached_value) + if cached_next_update is not None and cached_next_update < next_update: self._data[cache_key] = value def __getitem__(self, item: OCSPRequest) -> OCSPResponse: @@ -95,13 +116,15 @@ def __getitem__(self, item: OCSPRequest) -> OCSPResponse: value = self._data[cache_key] # Return cached response if it is still valid. - assert value.this_update is not None - assert value.next_update is not None - if ( - value.this_update - <= _datetime.now(tz=timezone.utc).replace(tzinfo=None) - < value.next_update - ): + this_update = _this_update(value) + next_update = _next_update(value) + assert this_update is not None + assert next_update is not None + now = _datetime.now(tz=timezone.utc) + if this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) + if this_update <= now < next_update: return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 1bda3b4d71..ee359b71c2 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -58,6 +58,7 @@ from requests.exceptions import RequestException as _RequestException from pymongo import _csot +from pymongo.ocsp_cache import _next_update, _this_update if TYPE_CHECKING: from cryptography.hazmat.primitives.asymmetric import ( @@ -275,13 +276,18 @@ def _verify_response(issuer: Certificate, response: OCSPResponse) -> int: # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? - now = _datetime.now(tz=timezone.utc).replace(tzinfo=None) + this_update = _this_update(response) + now = _datetime.now(tz=timezone.utc) + if this_update and this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) # RFC6960, Section 3.2, Number 5 - if response.this_update > now: + if this_update and this_update > now: _LOGGER.debug("thisUpdate is in the future") return 0 # RFC6960, Section 3.2, Number 6 - if response.next_update and response.next_update < now: + next_update = _next_update(response) + if next_update and next_update < now: _LOGGER.debug("nextUpdate is in the past") return 0 return 1 From d08fec6342e07477e3a0c44d069905758d8b63b9 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:10:01 -0700 Subject: [PATCH 1375/2111] PYTHON-4550 Add MongoClient.bulk_write API (#1745) --- pymongo/_client_bulk_shared.py | 77 ++ pymongo/asynchronous/client_bulk.py | 788 +++++++++++++++ pymongo/asynchronous/mongo_client.py | 175 +++- pymongo/errors.py | 57 ++ pymongo/message.py | 405 +++++++- pymongo/operations.py | 315 ++++-- pymongo/results.py | 161 ++- pymongo/synchronous/client_bulk.py | 786 +++++++++++++++ pymongo/synchronous/mongo_client.py | 175 +++- pymongo/typings.py | 3 + test/asynchronous/test_client_bulk_write.py | 571 +++++++++++ .../unacknowledged-client-bulkWrite.json | 218 ++++ .../client-bulkWrite-delete-options.json | 267 +++++ .../client-bulkWrite-errorResponse.json | 68 ++ .../crud/unified/client-bulkWrite-errors.json | 454 +++++++++ .../client-bulkWrite-mixed-namespaces.json | 314 ++++++ .../unified/client-bulkWrite-options.json | 715 +++++++++++++ .../unified/client-bulkWrite-ordered.json | 290 ++++++ .../unified/client-bulkWrite-results.json | 832 +++++++++++++++ .../client-bulkWrite-update-options.json | 948 ++++++++++++++++++ .../client-bulkWrite-update-pipeline.json | 257 +++++ .../client-bulkWrite-update-validation.json | 216 ++++ .../client-bulkWrite-clientErrors.json | 350 +++++++ .../client-bulkWrite-serverErrors.json | 872 ++++++++++++++++ .../unified/handshakeError.json | 216 ++++ .../operation-id.json | 187 ++++ test/test_client_bulk_write.py | 571 +++++++++++ .../unified/client-bulkWrite.json | 592 +++++++++++ .../transactions/unified/mongos-pin-auto.json | 294 ++++++ test/unified_format.py | 113 ++- test/utils.py | 6 +- test/versioned-api/crud-api-version-1.json | 82 +- tools/synchro.py | 2 + 33 files changed, 11221 insertions(+), 156 deletions(-) create mode 100644 pymongo/_client_bulk_shared.py create mode 100644 pymongo/asynchronous/client_bulk.py create mode 100644 pymongo/synchronous/client_bulk.py create mode 100644 test/asynchronous/test_client_bulk_write.py create mode 100644 test/command_monitoring/unacknowledged-client-bulkWrite.json create mode 100644 test/crud/unified/client-bulkWrite-delete-options.json create mode 100644 test/crud/unified/client-bulkWrite-errorResponse.json create mode 100644 test/crud/unified/client-bulkWrite-errors.json create mode 100644 test/crud/unified/client-bulkWrite-mixed-namespaces.json create mode 100644 test/crud/unified/client-bulkWrite-options.json create mode 100644 test/crud/unified/client-bulkWrite-ordered.json create mode 100644 test/crud/unified/client-bulkWrite-results.json create mode 100644 test/crud/unified/client-bulkWrite-update-options.json create mode 100644 test/crud/unified/client-bulkWrite-update-pipeline.json create mode 100644 test/crud/unified/client-bulkWrite-update-validation.json create mode 100644 test/retryable_writes/unified/client-bulkWrite-clientErrors.json create mode 100644 test/retryable_writes/unified/client-bulkWrite-serverErrors.json create mode 100644 test/test_client_bulk_write.py create mode 100644 test/transactions/unified/client-bulkWrite.json diff --git a/pymongo/_client_bulk_shared.py b/pymongo/_client_bulk_shared.py new file mode 100644 index 0000000000..4dd1af2108 --- /dev/null +++ b/pymongo/_client_bulk_shared.py @@ -0,0 +1,77 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across Client Bulk Write API implementations.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, NoReturn + +from pymongo.errors import ClientBulkWriteException, OperationFailure +from pymongo.helpers_shared import _get_wce_doc + +if TYPE_CHECKING: + from pymongo.typings import _DocumentOut + + +def _merge_command( + ops: list[tuple[str, Mapping[str, Any]]], + offset: int, + full_result: MutableMapping[str, Any], + result: Mapping[str, Any], +) -> None: + """Merge result of a single bulk write batch into the full result.""" + if result.get("error"): + full_result["error"] = result["error"] + + full_result["nInserted"] += result.get("nInserted", 0) + full_result["nDeleted"] += result.get("nDeleted", 0) + full_result["nMatched"] += result.get("nMatched", 0) + full_result["nModified"] += result.get("nModified", 0) + full_result["nUpserted"] += result.get("nUpserted", 0) + + write_errors = result.get("writeErrors") + if write_errors: + for doc in write_errors: + # Leave the server response intact for APM. + replacement = doc.copy() + original_index = doc["idx"] + offset + replacement["idx"] = original_index + # Add the failed operation to the error document. + replacement["op"] = ops[original_index][1] + full_result["writeErrors"].append(replacement) + + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) + + +def _throw_client_bulk_write_exception( + full_result: _DocumentOut, verbose_results: bool +) -> NoReturn: + """Raise a ClientBulkWriteException from the full result.""" + # retryWrites on MMAPv1 should raise an actionable error. + if full_result["writeErrors"]: + full_result["writeErrors"].sort(key=lambda error: error["idx"]) + err = full_result["writeErrors"][0] + code = err["code"] + msg = err["errmsg"] + if code == 20 and msg.startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, full_result) + raise ClientBulkWriteException(full_result, verbose_results) diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py new file mode 100644 index 0000000000..671d989c25 --- /dev/null +++ b/pymongo/asynchronous/client_bulk.py @@ -0,0 +1,788 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The client-level bulk write operations interface. + +.. versionadded:: 4.9 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.asynchronous.client_session import AsyncClientSession, _validate_session_write_concern +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection +from pymongo._client_bulk_shared import ( + _merge_command, + _throw_client_bulk_write_exception, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + WaitQueueTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _ClientBulkWriteContext, + _convert_client_bulk_exception, + _convert_exception, + _convert_write_result, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + ClientBulkWriteResult, + DeleteResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _DocumentOut, _Pipeline +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class _AsyncClientBulk: + """The private guts of the client-level bulk write API.""" + + def __init__( + self, + client: AsyncMongoClient, + write_concern: WriteConcern, + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + comment: Optional[str] = None, + let: Optional[Any] = None, + verbose_results: bool = False, + ) -> None: + """Initialize a _AsyncClientBulk instance.""" + self.client = client + self.write_concern = write_concern + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.ordered = ordered + self.bypass_doc_val = bypass_document_validation + self.comment = comment + self.verbose_results = verbose_results + + self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.idx_offset: int = 0 + self.total_ops: int = 0 + + self.executed = False + self.uses_upsert = False + self.uses_collation = False + self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False + + self.is_retryable = self.client.options.retry_writes + self.retrying = False + self.started_retryable_write = False + + @property + def bulk_ctx_class(self) -> Type[_ClientBulkWriteContext]: + return _ClientBulkWriteContext + + def add_insert(self, namespace: str, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + cmd = {"insert": namespace, "document": document} + self.ops.append(("insert", cmd)) + self.total_ops += 1 + + def add_update( + self, + namespace: str, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool = False, + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd = { + "update": namespace, + "filter": selector, + "updateMods": update, + "multi": multi, + } + if upsert is not None: + self.uses_upsert = True + cmd["upsert"] = upsert + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("update", cmd)) + self.total_ops += 1 + + def add_replace( + self, + namespace: str, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd = { + "update": namespace, + "filter": selector, + "updateMods": replacement, + "multi": False, + } + if upsert is not None: + self.uses_upsert = True + cmd["upsert"] = upsert + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + self.ops.append(("replace", cmd)) + self.total_ops += 1 + + def add_delete( + self, + namespace: str, + selector: Mapping[str, Any], + multi: bool, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = {"delete": namespace, "filter": selector, "multi": multi} + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("delete", cmd)) + self.total_ops += 1 + + @_handle_reauth + async def write_command( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: Union[bytes, dict[str, Any]], + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: AsyncMongoClient, + ) -> dict[str, Any]: + """A proxy for AsyncConnection.write_command that handles event publishing.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, op_docs, ns_docs) + try: + reply = await bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc, arg-type] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + finally: + bwc.start_time = datetime.datetime.now() + return reply # type: ignore[return-value] + + async def unack_write( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: AsyncMongoClient, + ) -> Optional[Mapping[str, Any]]: + """A proxy for AsyncConnection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, op_docs, ns_docs) + try: + result = await bwc.conn.unack_write(msg, bwc.max_bson_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + finally: + bwc.start_time = datetime.datetime.now() + return result # type: ignore[return-value] + + async def _execute_batch_unack( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (unack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + await self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return to_send_ops, to_send_ns + + async def _execute_batch( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (ack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + result = await self.write_command( + bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client + ) # type: ignore[arg-type] + await self.client._process_response(result, bwc.session) # type: ignore[arg-type] + return result, to_send_ops, to_send_ns # type: ignore[return-value] + + async def _process_results_cursor( + self, + full_result: MutableMapping[str, Any], + result: MutableMapping[str, Any], + conn: AsyncConnection, + session: Optional[AsyncClientSession], + ) -> None: + """Internal helper for processing the server reply command cursor.""" + if result.get("cursor"): + coll = AsyncCollection( + database=AsyncDatabase(self.client, "admin"), + name="$cmd.bulkWrite", + ) + cmd_cursor = AsyncCommandCursor( + coll, + result["cursor"], + conn.address, + session=session, + explicit_session=session is not None, + comment=self.comment, + ) + await cmd_cursor._maybe_pin_connection(conn) + + # Iterate the cursor to get individual write results. + try: + async for doc in cmd_cursor: + original_index = doc["idx"] + self.idx_offset + op_type, op = self.ops[original_index] + + if not doc["ok"]: + result["writeErrors"].append(doc) + if self.ordered: + return + + # Record individual write result. + if doc["ok"] and self.verbose_results: + if op_type == "insert": + inserted_id = op["document"]["_id"] + res = InsertOneResult(inserted_id, acknowledged=True) # type: ignore[assignment] + if op_type in ["update", "replace"]: + op_type = "update" + res = UpdateResult(doc, acknowledged=True, in_client_bulk=True) # type: ignore[assignment] + if op_type == "delete": + res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] + full_result[f"{op_type}Results"][original_index] = res + + except Exception as exc: + # Attempt to close the cursor, then raise top-level error. + if cmd_cursor.alive: + await cmd_cursor.close() + result["error"] = _convert_client_bulk_exception(exc) + + async def _execute_command( + self, + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + """Internal helper for executing batches of bulkWrite commands.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + + # AsyncConnection.command validates the session, but we use + # AsyncConnection.write_command + conn.validate_session(self.client, session) + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + session, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # If this is the last possible batch, use the + # final write concern. + if self.total_ops - self.idx_offset <= bwc.max_write_batch_size: + write_concern = final_write_concern or write_concern + + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + not_in_transaction = session and not session.in_transaction + if not_in_transaction or not session: + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, self.client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(self.client, cmd) + ops = islice(self.ops, self.idx_offset, None) + + # Run as many ops as possible in one server command. + if write_concern.acknowledged: + raw_result, to_send_ops, _ = await self._execute_batch(bwc, cmd, ops) # type: ignore[arg-type] + result = copy.deepcopy(raw_result) + + # Top-level server/network error. + if result.get("error"): + error = result["error"] + retryable_top_level_error = ( + isinstance(error.details, dict) + and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES + ) + retryable_network_error = isinstance( + error, ConnectionFailure + ) and not isinstance(error, (NotPrimaryError, WaitQueueTimeoutError)) + + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + if retryable and (retryable_top_level_error or retryable_network_error): + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + else: + _merge_command(self.ops, self.idx_offset, full_result, result) + _throw_client_bulk_write_exception(full_result, self.verbose_results) + + result["error"] = None + result["writeErrors"] = [] + if result.get("nErrors", 0) < len(to_send_ops): + full_result["anySuccessful"] = True + + # Top-level command error. + if not result["ok"]: + result["error"] = raw_result + _merge_command(self.ops, self.idx_offset, full_result, result) + break + + if retryable: + # Retryable writeConcernErrors halt the execution of this batch. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + + # Process the server reply as a command cursor. + await self._process_results_cursor(full_result, result, conn, session) + + # Merge this batch's results with the full results. + _merge_command(self.ops, self.idx_offset, full_result, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + else: + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + # We halt execution if we hit a top-level error, + # or an individual error in an ordered bulk write. + if full_result["error"] or (self.ordered and full_result["writeErrors"]): + break + + async def execute_command( + self, + session: Optional[AsyncClientSession], + operation: str, + ) -> MutableMapping[str, Any]: + """Execute commands with w=1 WriteConcern.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + op_id = _randint() + + async def retryable_bulk( + session: Optional[AsyncClientSession], + conn: AsyncConnection, + retryable: bool, + ) -> None: + if conn.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + await self._execute_command( + self.write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + await self.client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) + + if full_result["error"] or full_result["writeErrors"] or full_result["writeConcernErrors"]: + _throw_client_bulk_write_exception(full_result, self.verbose_results) + return full_result + + async def execute_command_unack_unordered( + self, + conn: AsyncConnection, + ) -> None: + """Execute commands with OP_MSG and w=0 writeConcern, unordered.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + op_id = _randint() + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + None, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + conn.add_server_api(cmd) + ops = islice(self.ops, self.idx_offset, None) + + # Run as many ops as possible in one server command. + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + async def execute_command_unack_ordered( + self, + conn: AsyncConnection, + ) -> None: + """Execute commands with OP_MSG and w=0 WriteConcern, ordered.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + # Ordered bulk writes have to be acknowledged so that we stop + # processing at the first error, even when the application + # specified unacknowledged writeConcern. + initial_write_concern = WriteConcern() + op_id = _randint() + try: + await self._execute_command( + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + self.write_concern, + ) + except OperationFailure: + pass + + async def execute_no_results( + self, + conn: AsyncConnection, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val is not None: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + if self.ordered: + return await self.execute_command_unack_ordered(conn) + return await self.execute_command_unack_unordered(conn) + + async def execute( + self, + session: Optional[AsyncClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + session = _validate_session_write_concern(session, self.write_concern) + + if not self.write_concern.acknowledged: + async with await self.client._conn_for_writes(session, operation) as connection: + if connection.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + await self.execute_no_results(connection) + return ClientBulkWriteResult(None, False, False) # type: ignore[arg-type] + + result = await self.execute_command(session, operation) + return ClientBulkWriteResult( + result, + self.write_concern.acknowledged, + self.verbose_results, + ) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 53fa14858d..90e40978a2 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -61,6 +61,7 @@ from pymongo import _csot, common, helpers_shared, uri_parser from pymongo.asynchronous import client_session, database, periodic_executor from pymongo.asynchronous.change_stream import AsyncChangeStream, AsyncClusterChangeStream +from pymongo.asynchronous.client_bulk import _AsyncClientBulk from pymongo.asynchronous.client_session import _EmptyServerSession from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.settings import TopologySettings @@ -69,6 +70,7 @@ from pymongo.errors import ( AutoReconnect, BulkWriteError, + ClientBulkWriteException, ConfigurationError, ConnectionFailure, InvalidOperation, @@ -83,8 +85,17 @@ from pymongo.logger import _CLIENT_LOGGER, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason -from pymongo.operations import _Op +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription @@ -130,6 +141,15 @@ _IS_SYNC = False +_WriteOp = Union[ + InsertOne, + DeleteOne, + DeleteMany, + ReplaceOne, + UpdateOne, + UpdateMany, +] + class AsyncMongoClient(common.BaseObject, Generic[_DocumentType]): HOST = "localhost" @@ -1720,7 +1740,7 @@ async def _retry_with_session( retryable: bool, func: _WriteCall[T], session: Optional[AsyncClientSession], - bulk: Optional[_AsyncBulk], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], operation: str, operation_id: Optional[int] = None, ) -> T: @@ -1750,7 +1770,7 @@ async def _retry_internal( self, func: _WriteCall[T] | _ReadCall[T], session: Optional[AsyncClientSession], - bulk: Optional[_AsyncBulk], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], operation: str, is_read: bool = False, address: Optional[_Address] = None, @@ -1833,7 +1853,7 @@ async def _retryable_write( func: _WriteCall[T], session: Optional[AsyncClientSession], operation: str, - bulk: Optional[_AsyncBulk] = None, + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]] = None, operation_id: Optional[int] = None, ) -> T: """Execute an operation with consecutive retries if possible @@ -2204,10 +2224,134 @@ async def drop_database( session=session, ) + @_csot.apply + async def bulk_write( + self, + models: Sequence[_WriteOp[_DocumentType]], + session: Optional[AsyncClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping] = None, + write_concern: Optional[WriteConcern] = None, + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> async for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = await client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {3: ObjectId('54f62ee28891e756a6e1abd5')} + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> async for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this + command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. + + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + + .. seealso:: :ref:`writes-and-ids` + + .. note:: requires MongoDB server version 8.0+. + + .. versionadded:: 4.9 + """ + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) + + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + common.validate_list("models", models) + + blk = _AsyncClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, + ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None + + return await blk.execute(session, _Op.BULK_WRITE) + def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: """Return the server response from PyMongo exception or None.""" - if isinstance(exc, BulkWriteError): + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): # Check the last writeConcernError to determine if this # BulkWriteError is retryable. wces = exc.details["writeConcernErrors"] @@ -2242,10 +2386,14 @@ def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mong # AsyncConnection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance( - exc, (NotPrimaryError, WaitQueueTimeoutError) + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) ): - exc._add_error_label("RetryableWriteError") + exc_to_check._add_error_label("RetryableWriteError") class _MongoClientErrorHandler: @@ -2292,6 +2440,8 @@ async def handle( return self.handled = True if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error if isinstance(exc_val, ConnectionFailure): if self.session.in_transaction: exc_val._add_error_label("TransientTransactionError") @@ -2303,7 +2453,7 @@ async def handle( ): await self.session._unpin() err_ctx = _ErrorContext( - exc_val, + exc_val, # type: ignore[arg-type] self.max_wire_version, self.sock_generation, self.completed_handshake, @@ -2330,7 +2480,7 @@ def __init__( self, mongo_client: AsyncMongoClient, func: _WriteCall[T] | _ReadCall[T], - bulk: Optional[_AsyncBulk], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], operation: str, is_read: bool = False, session: Optional[AsyncClientSession] = None, @@ -2407,7 +2557,10 @@ async def run(self) -> T: if not self._is_read: if not self._retryable: raise - retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") if retryable_write_error_exc: assert self._session await self._session._unpin() diff --git a/pymongo/errors.py b/pymongo/errors.py index a0f1ba2e93..1c51708c72 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -24,6 +24,7 @@ from bson.errors import InvalidDocument if TYPE_CHECKING: + from pymongo.results import ClientBulkWriteResult from pymongo.typings import _DocumentOut @@ -308,6 +309,62 @@ def timeout(self) -> bool: return False +class ClientBulkWriteException(OperationFailure): + """Exception class for client-level bulk write errors.""" + + details: _DocumentOut + verbose: bool + + def __init__(self, results: _DocumentOut, verbose: bool) -> None: + super().__init__("batch op errors occurred", 65, results) + self.verbose = verbose + + def __reduce__(self) -> tuple[Any, Any]: + return self.__class__, (self.details,) + + @property + def error(self) -> Optional[Any]: + """A top-level error that occurred when attempting to + communicate with the server or execute the bulk write. + + This value may not be populated if the exception was + thrown due to errors occurring on individual writes. + """ + return self.details.get("error", None) + + @property + def write_concern_errors(self) -> Optional[list[WriteConcernError]]: + """Write concern errors that occurred during the bulk write. + + This list may have multiple items if more than one + server command was required to execute the bulk write. + """ + return self.details.get("writeConcernErrors", []) + + @property + def write_errors(self) -> Optional[Mapping[int, WriteError]]: + """Errors that occurred during the execution of individual write operations. + + This map will contain at most one entry if the bulk write was ordered. + """ + return self.details.get("writeErrors", {}) + + @property + def partial_result(self) -> Optional[ClientBulkWriteResult]: + """The results of any successful operations that were + performed before the error was encountered. + """ + from pymongo.results import ClientBulkWriteResult + + if self.details.get("anySuccessful"): + return ClientBulkWriteResult( + self.details, # type: ignore[arg-type] + acknowledged=True, + has_verbose_results=self.verbose, + ) + return None + + class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" diff --git a/pymongo/message.py b/pymongo/message.py index bcb4ce10ec..90fac85452 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -21,6 +21,7 @@ """ from __future__ import annotations +import copy import datetime import random import struct @@ -101,7 +102,12 @@ _UPDATE: b"\x04updates\x00\x00\x00\x00\x00", _DELETE: b"\x04deletes\x00\x00\x00\x00\x00", } -_FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} +_FIELD_MAP = { + "insert": "documents", + "update": "updates", + "delete": "deletes", + "bulkWrite": "bulkWrite", +} _UNICODE_REPLACE_CODEC_OPTIONS: CodecOptions[Mapping[str, Any]] = CodecOptions( unicode_decode_error_handler="replace" @@ -136,6 +142,17 @@ def _convert_exception(exception: Exception) -> dict[str, Any]: return {"errmsg": str(exception), "errtype": exception.__class__.__name__} +def _convert_client_bulk_exception(exception: Exception) -> dict[str, Any]: + """Convert an Exception into a failure document for publishing, + for use in client-level bulk write API. + """ + return { + "errmsg": str(exception), + "code": exception.code, # type: ignore[attr-defined] + "errtype": exception.__class__.__name__, + } + + def _convert_write_result( operation: str, command: Mapping[str, Any], result: Mapping[str, Any] ) -> dict[str, Any]: @@ -551,8 +568,8 @@ def _get_more( } -class _BulkWriteContext: - """A wrapper around AsyncConnection for use with write splitting functions.""" +class _BulkWriteContextBase: + """Private base class for wrapping around AsyncConnection to use with write splitting functions.""" __slots__ = ( "db_name", @@ -576,7 +593,7 @@ def __init__( conn: _AgnosticConnection, operation_id: int, listeners: _EventListeners, - session: _AgnosticClientSession, + session: Optional[_AgnosticClientSession], op_type: int, codec: CodecOptions, ): @@ -593,17 +610,6 @@ def __init__( self.op_type = op_type self.codec = codec - def batch_command( - self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] - ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]]]: - namespace = self.db_name + ".$cmd" - request_id, msg, to_send = _do_batched_op_msg( - namespace, self.op_type, cmd, docs, self.codec, self - ) - if not to_send: - raise InvalidOperation("cannot do an empty bulk write") - return request_id, msg, to_send - @property def max_bson_size(self) -> int: """A proxy for SockInfo.max_bson_size.""" @@ -627,22 +633,6 @@ def max_split_size(self) -> int: """The maximum size of a BSON command before batch splitting.""" return self.max_bson_size - def _start( - self, cmd: MutableMapping[str, Any], request_id: int, docs: list[Mapping[str, Any]] - ) -> MutableMapping[str, Any]: - """Publish a CommandStartedEvent.""" - cmd[self.field] = docs - self.listeners.publish_command_start( - cmd, - self.db_name, - request_id, - self.conn.address, - self.conn.server_connection_id, - self.op_id, - self.conn.service_id, - ) - return cmd - def _succeed(self, request_id: int, reply: _DocumentOut, duration: datetime.timedelta) -> None: """Publish a CommandSucceededEvent.""" self.listeners.publish_command_success( @@ -672,6 +662,61 @@ def _fail(self, request_id: int, failure: _DocumentOut, duration: datetime.timed ) +class _BulkWriteContext(_BulkWriteContextBase): + """A wrapper around AsyncConnection/Connection for use with the collection-level bulk write API.""" + + __slots__ = () + + def __init__( + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + op_type: int, + codec: CodecOptions, + ): + super().__init__( + database_name, + cmd_name, + conn, + operation_id, + listeners, + session, + op_type, + codec, + ) + + def batch_command( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] + ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]]]: + namespace = self.db_name + ".$cmd" + request_id, msg, to_send = _do_batched_op_msg( + namespace, self.op_type, cmd, docs, self.codec, self + ) + if not to_send: + raise InvalidOperation("cannot do an empty bulk write") + return request_id, msg, to_send + + def _start( + self, cmd: MutableMapping[str, Any], request_id: int, docs: list[Mapping[str, Any]] + ) -> MutableMapping[str, Any]: + """Publish a CommandStartedEvent.""" + cmd[self.field] = docs + self.listeners.publish_command_start( + cmd, + self.db_name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + ) + return cmd + + class _EncryptedBulkWriteContext(_BulkWriteContext): __slots__ = () @@ -878,6 +923,304 @@ def _do_batched_op_msg( return _batched_op_msg(operation, command, docs, ack, opts, ctx) +class _ClientBulkWriteContext(_BulkWriteContextBase): + """A wrapper around AsyncConnection/Connection for use with the client-level bulk write API.""" + + __slots__ = () + + def __init__( + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + codec: CodecOptions, + ): + super().__init__( + database_name, + cmd_name, + conn, + operation_id, + listeners, + session, + 0, + codec, + ) + + def batch_command( + self, cmd: MutableMapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]] + ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + request_id, msg, to_send_ops, to_send_ns = _client_do_batched_op_msg( + cmd, operations, self.codec, self + ) + if not to_send_ops: + raise InvalidOperation("cannot do an empty bulk write") + return request_id, msg, to_send_ops, to_send_ns + + def _start( + self, + cmd: MutableMapping[str, Any], + request_id: int, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + ) -> MutableMapping[str, Any]: + """Publish a CommandStartedEvent.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + self.listeners.publish_command_start( + cmd, + self.db_name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + ) + return cmd + + +_OP_MSG_OVERHEAD = 1000 + + +def _client_construct_op_msg( + command: Mapping[str, Any], + to_send_ops: list[Mapping[str, Any]], + to_send_ns: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions, + buf: _BytesIO, +) -> int: + # Write flags + flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00" + buf.write(flags) + + # Type 0 Section + buf.write(b"\x00") + buf.write(_dict_to_bson(command, False, opts)) + + # Type 1 Section for ops + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + buf.write(b"ops\x00") + # Write all the ops documents + for op in to_send_ops: + buf.write(_dict_to_bson(op, False, opts)) + resume_location = buf.tell() + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + buf.seek(resume_location) + + # Type 1 Section for nsInfo + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + buf.write(b"nsInfo\x00") + # Write all the nsInfo documents + for ns in to_send_ns: + buf.write(_dict_to_bson(ns, False, opts)) + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + + return length + + +def _client_batched_op_msg_impl( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + ack: bool, + opts: CodecOptions, + ctx: _ClientBulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]], int]: + """Create a batched OP_MSG write for client-level bulk write.""" + + def _check_doc_size_limits( + op_type: str, + document: Mapping[str, Any], + limit: int, + ) -> int: + doc_size = len(_dict_to_bson(document, False, opts)) + if doc_size > limit: + _raise_document_too_large(op_type, doc_size, limit) + return doc_size + + max_bson_size = ctx.max_bson_size + max_write_batch_size = ctx.max_write_batch_size + max_message_size = ctx.max_message_size + + # Don't include bulkWrite-command-agnostic fields in document size calculations. + abridged_keys = ["bulkWrite", "errorsOnly", "ordered"] + if command.get("bypassDocumentValidation"): + abridged_keys.append("bypassDocumentValidation") + if command.get("comment"): + abridged_keys.append("comment") + if command.get("let"): + abridged_keys.append("let") + command_abridged = {key: command[key] for key in abridged_keys} + command_len_abridged = len(_dict_to_bson(command_abridged, False, opts)) + + # When OP_MSG is used unacknowledged we have to check command + # document size client-side or applications won't be notified. + if not ack: + _check_doc_size_limits("bulkWrite", command_abridged, max_bson_size + _COMMAND_OVERHEAD) + + # Maximum combined size of the ops and nsInfo document sequences. + max_doc_sequences_bytes = max_message_size - (_OP_MSG_OVERHEAD + command_len_abridged) + + ns_info = {} + to_send_ops: list[Mapping[str, Any]] = [] + to_send_ns: list[Mapping[str, int]] = [] + total_ops_length = 0 + total_ns_length = 0 + idx = 0 + + for real_op_type, op_doc in operations: + op_type = real_op_type + # Check insert/replace document size if unacknowledged. + if real_op_type == "insert": + if not ack: + _check_doc_size_limits(real_op_type, op_doc["document"], max_bson_size) + if real_op_type == "replace": + op_type = "update" + if not ack: + _check_doc_size_limits(real_op_type, op_doc["updateMods"], max_bson_size) + + ns_doc_to_send = None + ns_length = 0 + namespace = op_doc[op_type] + if namespace not in ns_info: + ns_doc_to_send = {"ns": namespace} + new_ns_index = len(to_send_ns) + ns_info[namespace] = new_ns_index + + # First entry in the operation doc has the operation type as its + # key and the index of its namespace within ns_info as its value. + op_doc_to_send = copy.deepcopy(op_doc) + op_doc_to_send[op_type] = ns_info[namespace] # type: ignore[index] + + # Encode current operation doc and, if newly added, namespace doc. + op_length = len(_dict_to_bson(op_doc_to_send, False, opts)) + if ns_doc_to_send: + ns_length = len(_dict_to_bson(ns_doc_to_send, False, opts)) + + # Check operation document size if unacknowledged. + if not ack: + _check_doc_size_limits(op_type, op_doc_to_send, max_bson_size + _COMMAND_OVERHEAD) + + new_message_size = total_ops_length + total_ns_length + op_length + ns_length + # We have enough data, return this batch. + if new_message_size > max_doc_sequences_bytes: + break + to_send_ops.append(op_doc_to_send) + total_ops_length += op_length + if ns_doc_to_send: + to_send_ns.append(ns_doc_to_send) + total_ns_length += ns_length + idx += 1 + # We have enough documents, return this batch. + if idx == max_write_batch_size: + break + + # Construct the entire OP_MSG. + length = _client_construct_op_msg(command, to_send_ops, to_send_ns, ack, opts, buf) + + return to_send_ops, to_send_ns, length + + +def _client_encode_batched_op_msg( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + ack: bool, + opts: CodecOptions, + ctx: _ClientBulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Encode the next batched client-level bulkWrite + operation as OP_MSG. + """ + buf = _BytesIO() + + to_send_ops, to_send_ns, _ = _client_batched_op_msg_impl( + command, operations, ack, opts, ctx, buf + ) + return buf.getvalue(), to_send_ops, to_send_ns + + +def _client_batched_op_msg_compressed( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + ack: bool, + opts: CodecOptions, + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Create the next batched client-level bulkWrite operation + with OP_MSG, compressed. + """ + data, to_send_ops, to_send_ns = _client_encode_batched_op_msg( + command, operations, ack, opts, ctx + ) + + assert ctx.conn.compression_context is not None + request_id, msg = _compress(2013, data, ctx.conn.compression_context) + return request_id, msg, to_send_ops, to_send_ns + + +def _client_batched_op_msg( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + ack: bool, + opts: CodecOptions, + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """OP_MSG implementation entry point for client-level bulkWrite.""" + buf = _BytesIO() + + # Save space for message length and request id + buf.write(_ZERO_64) + # responseTo, opCode + buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") + + to_send_ops, to_send_ns, length = _client_batched_op_msg_impl( + command, operations, ack, opts, ctx, buf + ) + + # Header - request id and message length + buf.seek(4) + request_id = _randint() + buf.write(_pack_int(request_id)) + buf.seek(0) + buf.write(_pack_int(length)) + + return request_id, buf.getvalue(), to_send_ops, to_send_ns + + +def _client_do_batched_op_msg( + command: MutableMapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + opts: CodecOptions, + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Create the next batched client-level bulkWrite + operation using OP_MSG. + """ + command["$db"] = "admin" + if "writeConcern" in command: + ack = bool(command["writeConcern"].get("w", 1)) + else: + ack = True + if ctx.conn.compression_context: + return _client_batched_op_msg_compressed(command, operations, ack, opts, ctx) + return _client_batched_op_msg(command, operations, ack, opts, ctx) + + # End OP_MSG ----------------------------------------------------- diff --git a/pymongo/operations.py b/pymongo/operations.py index 7bb861ae4f..d2e1feba69 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -34,12 +34,13 @@ from pymongo import helpers_shared from pymongo.collation import validate_collation_or_none from pymongo.common import validate_is_mapping, validate_list +from pymongo.errors import InvalidOperation from pymongo.helpers_shared import _gen_index_name, _index_document, _index_list from pymongo.typings import _CollationIn, _DocumentType, _Pipeline from pymongo.write_concern import validate_boolean if TYPE_CHECKING: - from pymongo.typings import _AgnosticBulk + from pymongo.typings import _AgnosticBulk, _AgnosticClientBulk # Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary @@ -52,6 +53,7 @@ class _Op(str, enum.Enum): ABORT = "abortTransaction" AGGREGATE = "aggregate" + BULK_WRITE = "bulkWrite" COMMIT = "commitTransaction" COUNT = "count" CREATE = "create" @@ -83,48 +85,130 @@ class _Op(str, enum.Enum): class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" - __slots__ = ("_doc",) + __slots__ = ( + "_doc", + "_namespace", + ) - def __init__(self, document: _DocumentType) -> None: + def __init__(self, document: _DocumentType, namespace: Optional[str] = None) -> None: """Create an InsertOne instance. - For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write` and :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. :param document: The document to insert. If the document is missing an _id field one will be added. + :param namespace: (optional) The namespace in which to insert a document. + + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. """ self._doc = document + self._namespace = namespace def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" bulkobj.add_insert(self._doc) # type: ignore[arg-type] + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_insert( + self._namespace, + self._doc, # type: ignore[arg-type] + ) + def __repr__(self) -> str: - return f"InsertOne({self._doc!r})" + if self._namespace: + return f"{self.__class__.__name__}({self._doc!r}, {self._namespace!r})" + return f"{self.__class__.__name__}({self._doc!r})" + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return other._doc == self._doc and other._namespace == self._namespace + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + +class _DeleteOp: + """Private base class for delete operations.""" + + __slots__ = ( + "_filter", + "_collation", + "_hint", + "_namespace", + ) + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + ) -> None: + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint + + self._filter = filter + self._collation = collation + self._namespace = namespace def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return other._doc == self._doc + return ( + other._filter, + other._collation, + other._hint, + other._namespace, + ) == ( + self._filter, + self._collation, + self._hint, + self._namespace, + ) return NotImplemented def __ne__(self, other: Any) -> bool: return not self == other + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._collation, + self._hint, + self._namespace, + ) + return f"{self.__class__.__name__}({self._filter!r}, {self._collation!r}, {self._hint!r})" + -class DeleteOne: +class DeleteOne(_DeleteOp): """Represents a delete_one operation.""" - __slots__ = ("_filter", "_collation", "_hint") + __slots__ = () def __init__( self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create a DeleteOne instance. - For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write` and :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. :param filter: A query that matches the document to delete. :param collation: An instance of @@ -135,20 +219,16 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. + :param namespace: (optional) The namespace in which to delete a document. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ - if filter is not None: - validate_is_mapping("filter", filter) - if hint is not None and not isinstance(hint, str): - self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) - else: - self._hint = hint - self._filter = filter - self._collation = collation + super().__init__(filter, collation, hint, namespace) def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" @@ -159,36 +239,37 @@ def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: hint=self._hint, ) - def __repr__(self) -> str: - return f"DeleteOne({self._filter!r}, {self._collation!r}, {self._hint!r})" - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return (other._filter, other._collation, other._hint) == ( - self._filter, - self._collation, - self._hint, + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" ) - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other + bulkobj.add_delete( + self._namespace, + self._filter, + multi=False, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) -class DeleteMany: +class DeleteMany(_DeleteOp): """Represents a delete_many operation.""" - __slots__ = ("_filter", "_collation", "_hint") + __slots__ = () def __init__( self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create a DeleteMany instance. - For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write` and :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. :param filter: A query that matches the documents to delete. :param collation: An instance of @@ -199,20 +280,16 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. + :param namespace: (optional) The namespace in which to delete documents. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ - if filter is not None: - validate_is_mapping("filter", filter) - if hint is not None and not isinstance(hint, str): - self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) - else: - self._hint = hint - self._filter = filter - self._collation = collation + super().__init__(filter, collation, hint, namespace) def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" @@ -223,26 +300,32 @@ def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: hint=self._hint, ) - def __repr__(self) -> str: - return f"DeleteMany({self._filter!r}, {self._collation!r}, {self._hint!r})" - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return (other._filter, other._collation, other._hint) == ( - self._filter, - self._collation, - self._hint, + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" ) - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other + bulkobj.add_delete( + self._namespace, + self._filter, + multi=True, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) class ReplaceOne(Generic[_DocumentType]): """Represents a replace_one operation.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") + __slots__ = ( + "_filter", + "_doc", + "_upsert", + "_collation", + "_hint", + "_namespace", + ) def __init__( self, @@ -251,10 +334,12 @@ def __init__( upsert: bool = False, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create a ReplaceOne instance. - For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write` and :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. :param filter: A query that matches the document to replace. :param replacement: The new document. @@ -268,7 +353,10 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. + :param namespace: (optional) The namespace in which to replace a document. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.5 @@ -282,10 +370,12 @@ def __init__( self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) else: self._hint = hint + self._filter = filter self._doc = replacement self._upsert = upsert self._collation = collation + self._namespace = namespace def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" @@ -297,6 +387,21 @@ def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: hint=self._hint, ) + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_replace( + self._namespace, + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) + def __eq__(self, other: Any) -> bool: if type(other) == type(self): return ( @@ -305,12 +410,14 @@ def __eq__(self, other: Any) -> bool: other._upsert, other._collation, other._hint, + other._namespace, ) == ( self._filter, self._doc, self._upsert, self._collation, other._hint, + self._namespace, ) return NotImplemented @@ -318,6 +425,16 @@ def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + self._namespace, + ) return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, @@ -331,16 +448,25 @@ def __repr__(self) -> str: class _UpdateOp: """Private base class for update operations.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") + __slots__ = ( + "_filter", + "_doc", + "_upsert", + "_collation", + "_array_filters", + "_hint", + "_namespace", + ) def __init__( self, filter: Mapping[str, Any], doc: Union[Mapping[str, Any], _Pipeline], - upsert: bool, + upsert: Optional[bool], collation: Optional[_CollationIn], array_filters: Optional[list[Mapping[str, Any]]], hint: Optional[_IndexKeyHint], + namespace: Optional[str], ): if filter is not None: validate_is_mapping("filter", filter) @@ -358,6 +484,7 @@ def __init__( self._upsert = upsert self._collation = collation self._array_filters = array_filters + self._namespace = namespace def __eq__(self, other: object) -> bool: if isinstance(other, type(self)): @@ -368,6 +495,7 @@ def __eq__(self, other: object) -> bool: other._collation, other._array_filters, other._hint, + other._namespace, ) == ( self._filter, self._doc, @@ -375,10 +503,25 @@ def __eq__(self, other: object) -> bool: self._collation, self._array_filters, self._hint, + self._namespace, ) return NotImplemented + def __ne__(self, other: Any) -> bool: + return not self == other + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + self._namespace, + ) return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, @@ -399,14 +542,16 @@ def __init__( self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, + upsert: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Represents an update_one operation. - For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write` and :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. :param filter: A query that matches the document to update. :param update: The modifications to apply. @@ -422,7 +567,10 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. + :param namespace: (optional) The namespace in which to update a document. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the `hint` option. .. versionchanged:: 3.9 @@ -432,11 +580,28 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super().__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace) def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" bulkobj.add_update( + self._filter, + self._doc, + False, + bool(self._upsert), + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_update( + self._namespace, self._filter, self._doc, False, @@ -456,14 +621,16 @@ def __init__( self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, + upsert: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create an UpdateMany instance. - For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write` and :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. :param filter: A query that matches the documents to update. :param update: The modifications to apply. @@ -479,7 +646,10 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. + :param namespace: (optional) The namespace in which to update documents. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the `hint` option. .. versionchanged:: 3.9 @@ -489,11 +659,28 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super().__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace) def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" bulkobj.add_update( + self._filter, + self._doc, + True, + bool(self._upsert), + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_update( + self._namespace, self._filter, self._doc, True, diff --git a/pymongo/results.py b/pymongo/results.py index 1744f2c9e1..b34f6c4926 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -18,7 +18,7 @@ """ from __future__ import annotations -from typing import Any, Mapping, Optional, cast +from typing import Any, Mapping, MutableMapping, Optional, cast from pymongo.errors import InvalidOperation @@ -65,7 +65,9 @@ def acknowledged(self) -> bool: class InsertOneResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" + """The return type for :meth:`~pymongo.collection.Collection.insert_one` + and as part of :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + """ __slots__ = ("__inserted_id",) @@ -113,13 +115,23 @@ def inserted_ids(self) -> list[Any]: class UpdateResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.update_many`, and - :meth:`~pymongo.collection.Collection.replace_one`. + :meth:`~pymongo.collection.Collection.replace_one`, and as part of + :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. """ - __slots__ = ("__raw_result",) - - def __init__(self, raw_result: Optional[Mapping[str, Any]], acknowledged: bool): + __slots__ = ( + "__raw_result", + "__in_client_bulk", + ) + + def __init__( + self, + raw_result: Optional[Mapping[str, Any]], + acknowledged: bool, + in_client_bulk: bool = False, + ): self.__raw_result = raw_result + self.__in_client_bulk = in_client_bulk super().__init__(acknowledged) def __repr__(self) -> str: @@ -134,9 +146,9 @@ def raw_result(self) -> Optional[Mapping[str, Any]]: def matched_count(self) -> int: """The number of documents matched for this update.""" self._raise_if_unacknowledged("matched_count") - if self.upserted_id is not None: - return 0 assert self.__raw_result is not None + if not self.__in_client_bulk and self.upserted_id is not None: + return 0 return self.__raw_result.get("n", 0) @property @@ -153,12 +165,21 @@ def upserted_id(self) -> Any: """ self._raise_if_unacknowledged("upserted_id") assert self.__raw_result is not None - return self.__raw_result.get("upserted") + if self.__in_client_bulk and self.__raw_result.get("upserted"): + return self.__raw_result["upserted"]["_id"] + return self.__raw_result.get("upserted", None) + + @property + def did_upsert(self) -> bool: + """Whether or not an upsert took place.""" + assert self.__raw_result is not None + return len(self.__raw_result.get("upserted", {})) > 0 class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` and :meth:`~pymongo.collection.Collection.delete_many` + and as part of :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. """ __slots__ = ("__raw_result",) @@ -182,19 +203,12 @@ def deleted_count(self) -> int: return self.__raw_result.get("n", 0) -class BulkWriteResult(_WriteResult): - """An object wrapper for bulk API write results.""" +class _BulkWriteResultBase(_WriteResult): + """Private base class for bulk write API results.""" __slots__ = ("__bulk_api_result",) def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: - """Create a BulkWriteResult instance. - - :param bulk_api_result: A result dict from the bulk API - :param acknowledged: Was this write result acknowledged? If ``False`` - then all properties of this object will raise - :exc:`~pymongo.errors.InvalidOperation`. - """ self.__bulk_api_result = bulk_api_result super().__init__(acknowledged) @@ -203,7 +217,7 @@ def __repr__(self) -> str: @property def bulk_api_result(self) -> dict[str, Any]: - """The raw bulk API result.""" + """The raw bulk write API result.""" return self.__bulk_api_result @property @@ -228,7 +242,10 @@ def modified_count(self) -> int: def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") - return cast(int, self.__bulk_api_result.get("nRemoved")) + if "nRemoved" in self.__bulk_api_result: + return cast(int, self.__bulk_api_result.get("nRemoved")) + else: + return cast(int, self.__bulk_api_result.get("nDeleted")) @property def upserted_count(self) -> int: @@ -236,10 +253,112 @@ def upserted_count(self) -> int: self._raise_if_unacknowledged("upserted_count") return cast(int, self.__bulk_api_result.get("nUpserted")) + +class BulkWriteResult(_BulkWriteResultBase): + """An object wrapper for collection-level bulk write API results.""" + + __slots__ = () + + def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: + """Create a BulkWriteResult instance. + + :param bulk_api_result: A result dict from the collection-level bulk write API + :param acknowledged: Was this write result acknowledged? If ``False`` + then all properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + """ + super().__init__(bulk_api_result, acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.bulk_api_result!r}, acknowledged={self.acknowledged})" + ) + @property def upserted_ids(self) -> Optional[dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") - if self.__bulk_api_result: + if self.bulk_api_result: return {upsert["index"]: upsert["_id"] for upsert in self.bulk_api_result["upserted"]} return None + + +class ClientBulkWriteResult(_BulkWriteResultBase): + """An object wrapper for client-level bulk write API results.""" + + __slots__ = ("__has_verbose_results",) + + def __init__( + self, + bulk_api_result: MutableMapping[str, Any], + acknowledged: bool, + has_verbose_results: bool, + ) -> None: + """Create a ClientBulkWriteResult instance. + + :param bulk_api_result: A result dict from the client-level bulk write API + :param acknowledged: Was this write result acknowledged? If ``False`` + then all properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + :param has_verbose_results: Should the returned result be verbose? + If ``False``, then the ``insert_results``, ``update_results``, and + ``delete_results`` properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + """ + self.__has_verbose_results = has_verbose_results + super().__init__( + bulk_api_result, # type: ignore[arg-type] + acknowledged, + ) + + def __repr__(self) -> str: + return "{}({!r}, acknowledged={}, verbose={})".format( + self.__class__.__name__, + self.bulk_api_result, + self.acknowledged, + self.has_verbose_results, + ) + + def _raise_if_not_verbose(self, property_name: str) -> None: + """Raise an exception on property access if verbose results are off.""" + if not self.__has_verbose_results: + raise InvalidOperation( + f"A value for {property_name} is not available when " + "the results are not set to be verbose. Check the " + "verbose_results attribute to avoid this error." + ) + + @property + def has_verbose_results(self) -> bool: + """Whether the returned results should be verbose.""" + return self.__has_verbose_results + + @property + def insert_results(self) -> Mapping[int, InsertOneResult]: + """A map of successful insertion operations to their results.""" + self._raise_if_unacknowledged("insert_results") + self._raise_if_not_verbose("insert_results") + return cast( + Mapping[int, InsertOneResult], + self.bulk_api_result.get("insertResults"), + ) + + @property + def update_results(self) -> Mapping[int, UpdateResult]: + """A map of successful update operations to their results.""" + self._raise_if_unacknowledged("update_results") + self._raise_if_not_verbose("update_results") + return cast( + Mapping[int, UpdateResult], + self.bulk_api_result.get("updateResults"), + ) + + @property + def delete_results(self) -> Mapping[int, DeleteResult]: + """A map of successful delete operations to their results.""" + self._raise_if_unacknowledged("delete_results") + self._raise_if_not_verbose("delete_results") + return cast( + Mapping[int, DeleteResult], + self.bulk_api_result.get("deleteResults"), + ) diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py new file mode 100644 index 0000000000..229abd4330 --- /dev/null +++ b/pymongo/synchronous/client_bulk.py @@ -0,0 +1,786 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The client-level bulk write operations interface. + +.. versionadded:: 4.9 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.synchronous.client_session import ClientSession, _validate_session_write_concern +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection +from pymongo._client_bulk_shared import ( + _merge_command, + _throw_client_bulk_write_exception, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + WaitQueueTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _ClientBulkWriteContext, + _convert_client_bulk_exception, + _convert_exception, + _convert_write_result, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + ClientBulkWriteResult, + DeleteResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _DocumentOut, _Pipeline +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class _ClientBulk: + """The private guts of the client-level bulk write API.""" + + def __init__( + self, + client: MongoClient, + write_concern: WriteConcern, + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + comment: Optional[str] = None, + let: Optional[Any] = None, + verbose_results: bool = False, + ) -> None: + """Initialize a _ClientBulk instance.""" + self.client = client + self.write_concern = write_concern + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.ordered = ordered + self.bypass_doc_val = bypass_document_validation + self.comment = comment + self.verbose_results = verbose_results + + self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.idx_offset: int = 0 + self.total_ops: int = 0 + + self.executed = False + self.uses_upsert = False + self.uses_collation = False + self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False + + self.is_retryable = self.client.options.retry_writes + self.retrying = False + self.started_retryable_write = False + + @property + def bulk_ctx_class(self) -> Type[_ClientBulkWriteContext]: + return _ClientBulkWriteContext + + def add_insert(self, namespace: str, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + cmd = {"insert": namespace, "document": document} + self.ops.append(("insert", cmd)) + self.total_ops += 1 + + def add_update( + self, + namespace: str, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool = False, + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd = { + "update": namespace, + "filter": selector, + "updateMods": update, + "multi": multi, + } + if upsert is not None: + self.uses_upsert = True + cmd["upsert"] = upsert + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("update", cmd)) + self.total_ops += 1 + + def add_replace( + self, + namespace: str, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd = { + "update": namespace, + "filter": selector, + "updateMods": replacement, + "multi": False, + } + if upsert is not None: + self.uses_upsert = True + cmd["upsert"] = upsert + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + self.ops.append(("replace", cmd)) + self.total_ops += 1 + + def add_delete( + self, + namespace: str, + selector: Mapping[str, Any], + multi: bool, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = {"delete": namespace, "filter": selector, "multi": multi} + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("delete", cmd)) + self.total_ops += 1 + + @_handle_reauth + def write_command( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: Union[bytes, dict[str, Any]], + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: MongoClient, + ) -> dict[str, Any]: + """A proxy for Connection.write_command that handles event publishing.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, op_docs, ns_docs) + try: + reply = bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc, arg-type] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + finally: + bwc.start_time = datetime.datetime.now() + return reply # type: ignore[return-value] + + def unack_write( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: MongoClient, + ) -> Optional[Mapping[str, Any]]: + """A proxy for Connection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.STARTED, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, op_docs, ns_docs) + try: + result = bwc.conn.unack_write(msg, bwc.max_bson_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.SUCCEEDED, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + clientId=client._topology_settings._topology_id, + message=_CommandStatusMessage.FAILED, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + finally: + bwc.start_time = datetime.datetime.now() + return result # type: ignore[return-value] + + def _execute_batch_unack( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (unack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return to_send_ops, to_send_ns + + def _execute_batch( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (ack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + result = self.write_command(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + self.client._process_response(result, bwc.session) # type: ignore[arg-type] + return result, to_send_ops, to_send_ns # type: ignore[return-value] + + def _process_results_cursor( + self, + full_result: MutableMapping[str, Any], + result: MutableMapping[str, Any], + conn: Connection, + session: Optional[ClientSession], + ) -> None: + """Internal helper for processing the server reply command cursor.""" + if result.get("cursor"): + coll = Collection( + database=Database(self.client, "admin"), + name="$cmd.bulkWrite", + ) + cmd_cursor = CommandCursor( + coll, + result["cursor"], + conn.address, + session=session, + explicit_session=session is not None, + comment=self.comment, + ) + cmd_cursor._maybe_pin_connection(conn) + + # Iterate the cursor to get individual write results. + try: + for doc in cmd_cursor: + original_index = doc["idx"] + self.idx_offset + op_type, op = self.ops[original_index] + + if not doc["ok"]: + result["writeErrors"].append(doc) + if self.ordered: + return + + # Record individual write result. + if doc["ok"] and self.verbose_results: + if op_type == "insert": + inserted_id = op["document"]["_id"] + res = InsertOneResult(inserted_id, acknowledged=True) # type: ignore[assignment] + if op_type in ["update", "replace"]: + op_type = "update" + res = UpdateResult(doc, acknowledged=True, in_client_bulk=True) # type: ignore[assignment] + if op_type == "delete": + res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] + full_result[f"{op_type}Results"][original_index] = res + + except Exception as exc: + # Attempt to close the cursor, then raise top-level error. + if cmd_cursor.alive: + cmd_cursor.close() + result["error"] = _convert_client_bulk_exception(exc) + + def _execute_command( + self, + write_concern: WriteConcern, + session: Optional[ClientSession], + conn: Connection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + """Internal helper for executing batches of bulkWrite commands.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + + # Connection.command validates the session, but we use + # Connection.write_command + conn.validate_session(self.client, session) + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + session, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # If this is the last possible batch, use the + # final write concern. + if self.total_ops - self.idx_offset <= bwc.max_write_batch_size: + write_concern = final_write_concern or write_concern + + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + not_in_transaction = session and not session.in_transaction + if not_in_transaction or not session: + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, self.client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(self.client, cmd) + ops = islice(self.ops, self.idx_offset, None) + + # Run as many ops as possible in one server command. + if write_concern.acknowledged: + raw_result, to_send_ops, _ = self._execute_batch(bwc, cmd, ops) # type: ignore[arg-type] + result = copy.deepcopy(raw_result) + + # Top-level server/network error. + if result.get("error"): + error = result["error"] + retryable_top_level_error = ( + isinstance(error.details, dict) + and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES + ) + retryable_network_error = isinstance( + error, ConnectionFailure + ) and not isinstance(error, (NotPrimaryError, WaitQueueTimeoutError)) + + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + if retryable and (retryable_top_level_error or retryable_network_error): + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + else: + _merge_command(self.ops, self.idx_offset, full_result, result) + _throw_client_bulk_write_exception(full_result, self.verbose_results) + + result["error"] = None + result["writeErrors"] = [] + if result.get("nErrors", 0) < len(to_send_ops): + full_result["anySuccessful"] = True + + # Top-level command error. + if not result["ok"]: + result["error"] = raw_result + _merge_command(self.ops, self.idx_offset, full_result, result) + break + + if retryable: + # Retryable writeConcernErrors halt the execution of this batch. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + + # Process the server reply as a command cursor. + self._process_results_cursor(full_result, result, conn, session) + + # Merge this batch's results with the full results. + _merge_command(self.ops, self.idx_offset, full_result, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + else: + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + # We halt execution if we hit a top-level error, + # or an individual error in an ordered bulk write. + if full_result["error"] or (self.ordered and full_result["writeErrors"]): + break + + def execute_command( + self, + session: Optional[ClientSession], + operation: str, + ) -> MutableMapping[str, Any]: + """Execute commands with w=1 WriteConcern.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + op_id = _randint() + + def retryable_bulk( + session: Optional[ClientSession], + conn: Connection, + retryable: bool, + ) -> None: + if conn.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + self._execute_command( + self.write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + self.client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) + + if full_result["error"] or full_result["writeErrors"] or full_result["writeConcernErrors"]: + _throw_client_bulk_write_exception(full_result, self.verbose_results) + return full_result + + def execute_command_unack_unordered( + self, + conn: Connection, + ) -> None: + """Execute commands with OP_MSG and w=0 writeConcern, unordered.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + op_id = _randint() + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + None, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + conn.add_server_api(cmd) + ops = islice(self.ops, self.idx_offset, None) + + # Run as many ops as possible in one server command. + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + def execute_command_unack_ordered( + self, + conn: Connection, + ) -> None: + """Execute commands with OP_MSG and w=0 WriteConcern, ordered.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + # Ordered bulk writes have to be acknowledged so that we stop + # processing at the first error, even when the application + # specified unacknowledged writeConcern. + initial_write_concern = WriteConcern() + op_id = _randint() + try: + self._execute_command( + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + self.write_concern, + ) + except OperationFailure: + pass + + def execute_no_results( + self, + conn: Connection, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val is not None: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + if self.ordered: + return self.execute_command_unack_ordered(conn) + return self.execute_command_unack_unordered(conn) + + def execute( + self, + session: Optional[ClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + session = _validate_session_write_concern(session, self.write_concern) + + if not self.write_concern.acknowledged: + with self.client._conn_for_writes(session, operation) as connection: + if connection.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + self.execute_no_results(connection) + return ClientBulkWriteResult(None, False, False) # type: ignore[arg-type] + + result = self.execute_command(session, operation) + return ClientBulkWriteResult( + result, + self.write_concern.acknowledged, + self.verbose_results, + ) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index bd14311b5a..41b4db4f18 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -62,6 +62,7 @@ from pymongo.errors import ( AutoReconnect, BulkWriteError, + ClientBulkWriteException, ConfigurationError, ConnectionFailure, InvalidOperation, @@ -76,12 +77,22 @@ from pymongo.logger import _CLIENT_LOGGER, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason -from pymongo.operations import _Op +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.synchronous import client_session, database, periodic_executor from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream +from pymongo.synchronous.client_bulk import _ClientBulk from pymongo.synchronous.client_session import _EmptyServerSession from pymongo.synchronous.command_cursor import CommandCursor from pymongo.synchronous.settings import TopologySettings @@ -127,6 +138,15 @@ _IS_SYNC = True +_WriteOp = Union[ + InsertOne, + DeleteOne, + DeleteMany, + ReplaceOne, + UpdateOne, + UpdateMany, +] + class MongoClient(common.BaseObject, Generic[_DocumentType]): HOST = "localhost" @@ -1715,7 +1735,7 @@ def _retry_with_session( retryable: bool, func: _WriteCall[T], session: Optional[ClientSession], - bulk: Optional[_Bulk], + bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, operation_id: Optional[int] = None, ) -> T: @@ -1745,7 +1765,7 @@ def _retry_internal( self, func: _WriteCall[T] | _ReadCall[T], session: Optional[ClientSession], - bulk: Optional[_Bulk], + bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, is_read: bool = False, address: Optional[_Address] = None, @@ -1828,7 +1848,7 @@ def _retryable_write( func: _WriteCall[T], session: Optional[ClientSession], operation: str, - bulk: Optional[_Bulk] = None, + bulk: Optional[Union[_Bulk, _ClientBulk]] = None, operation_id: Optional[int] = None, ) -> T: """Execute an operation with consecutive retries if possible @@ -2193,10 +2213,134 @@ def drop_database( session=session, ) + @_csot.apply + def bulk_write( + self, + models: Sequence[_WriteOp[_DocumentType]], + session: Optional[ClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping] = None, + write_concern: Optional[WriteConcern] = None, + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {3: ObjectId('54f62ee28891e756a6e1abd5')} + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.client_session.ClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this + command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. + + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + + .. seealso:: :ref:`writes-and-ids` + + .. note:: requires MongoDB server version 8.0+. + + .. versionadded:: 4.9 + """ + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) + + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + common.validate_list("models", models) + + blk = _ClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, + ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None + + return blk.execute(session, _Op.BULK_WRITE) + def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: """Return the server response from PyMongo exception or None.""" - if isinstance(exc, BulkWriteError): + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): # Check the last writeConcernError to determine if this # BulkWriteError is retryable. wces = exc.details["writeConcernErrors"] @@ -2231,10 +2375,14 @@ def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mong # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance( - exc, (NotPrimaryError, WaitQueueTimeoutError) + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) ): - exc._add_error_label("RetryableWriteError") + exc_to_check._add_error_label("RetryableWriteError") class _MongoClientErrorHandler: @@ -2279,6 +2427,8 @@ def handle( return self.handled = True if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error if isinstance(exc_val, ConnectionFailure): if self.session.in_transaction: exc_val._add_error_label("TransientTransactionError") @@ -2290,7 +2440,7 @@ def handle( ): self.session._unpin() err_ctx = _ErrorContext( - exc_val, + exc_val, # type: ignore[arg-type] self.max_wire_version, self.sock_generation, self.completed_handshake, @@ -2317,7 +2467,7 @@ def __init__( self, mongo_client: MongoClient, func: _WriteCall[T] | _ReadCall[T], - bulk: Optional[_Bulk], + bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, is_read: bool = False, session: Optional[ClientSession] = None, @@ -2394,7 +2544,10 @@ def run(self) -> T: if not self._is_read: if not self._retryable: raise - retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") if retryable_write_error_exc: assert self._session self._session._unpin() diff --git a/pymongo/typings.py b/pymongo/typings.py index 9f6d7b1669..68962eb540 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -30,11 +30,13 @@ if TYPE_CHECKING: from pymongo.asynchronous.bulk import _AsyncBulk + from pymongo.asynchronous.client_bulk import _AsyncClientBulk from pymongo.asynchronous.client_session import AsyncClientSession from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.asynchronous.pool import AsyncConnection from pymongo.collation import Collation from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_bulk import _ClientBulk from pymongo.synchronous.client_session import ClientSession from pymongo.synchronous.mongo_client import MongoClient from pymongo.synchronous.pool import Connection @@ -53,6 +55,7 @@ _AgnosticConnection = Union["AsyncConnection", "Connection"] _AgnosticClientSession = Union["AsyncClientSession", "ClientSession"] _AgnosticBulk = Union["_AsyncBulk", "_Bulk"] +_AgnosticClientBulk = Union["_AsyncClientBulk", "_ClientBulk"] def strip_optional(elem: Optional[_T]) -> _T: diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py new file mode 100644 index 0000000000..f55b3082bb --- /dev/null +++ b/test/asynchronous/test_client_bulk_write.py @@ -0,0 +1,571 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the client bulk write API.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import ( + OvertCommandListener, + async_rs_or_single_client, +) + +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + ClientBulkWriteException, + DocumentTooLarge, + InvalidOperation, + NetworkTimeout, +) +from pymongo.monitoring import * +from pymongo.operations import * +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestClientBulkWrite(AsyncIntegrationTest): + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_returns_error_if_no_namespace_provided(self): + client = await async_rs_or_single_client() + self.addAsyncCleanup(client.aclose) + + models = [InsertOne(document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + await client.bulk_write(models=models) + self.assertIn( + "MongoClient.bulk_write requires a namespace to be provided for each write operation", + context.exception._message, + ) + + +# https://github.com/mongodb/specifications/tree/master/source/crud/tests +class TestClientBulkWriteCRUD(AsyncIntegrationTest): + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_batch_splits_if_num_operations_too_large(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] + models = [] + for _ in range(max_write_batch_size + 1): + models.append(InsertOne(namespace="db.coll", document={"a": "b"})) + self.addAsyncCleanup(client.db["coll"].drop) + + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, max_write_batch_size + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + first_event, second_event = bulk_write_events + self.assertEqual(len(first_event.command["ops"]), max_write_batch_size) + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(first_event.operation_id, second_event.operation_id) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_batch_splits_if_ops_payload_too_large(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + + models = [] + num_models = int(max_message_size_bytes / max_bson_object_size + 1) + b_repeated = "b" * (max_bson_object_size - 500) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + first_event, second_event = bulk_write_events + self.assertEqual(len(first_event.command["ops"]), num_models - 1) + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(first_event.operation_id, second_event.operation_id) + + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_failCommand_fail_point + async def test_collects_write_concern_errors_across_batches(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client( + event_listeners=[listener], + retryWrites=False, + ) + self.addAsyncCleanup(client.aclose) + max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] + + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["bulkWrite"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + async with self.fail_point(fail_command): + models = [] + for _ in range(max_write_batch_size + 1): + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models) + self.assertEqual(len(context.exception.write_concern_errors), 2) # type: ignore[arg-type] + self.assertIsNotNone(context.exception.partial_result) + self.assertEqual( + context.exception.partial_result.inserted_count, max_write_batch_size + 1 + ) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_collects_write_errors_across_batches_unordered(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + await collection.insert_one(document={"_id": 1}) + + max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] + models = [] + for _ in range(max_write_batch_size + 1): + models.append( + InsertOne( + namespace="db.coll", + document={"_id": 1}, + ) + ) + + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models, ordered=False) + self.assertEqual(len(context.exception.write_errors), max_write_batch_size + 1) # type: ignore[arg-type] + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_collects_write_errors_across_batches_ordered(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + await collection.insert_one(document={"_id": 1}) + + max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] + models = [] + for _ in range(max_write_batch_size + 1): + models.append( + InsertOne( + namespace="db.coll", + document={"_id": 1}, + ) + ) + + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models, ordered=True) + self.assertEqual(len(context.exception.write_errors), 1) # type: ignore[arg-type] + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 1) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_handles_cursor_requiring_getMore(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + models = [] + a_repeated = "a" * (max_bson_object_size // 2) + b_repeated = "b" * (max_bson_object_size // 2) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": a_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": b_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + + result = await client.bulk_write(models=models, verbose_results=True) + self.assertEqual(result.upserted_count, 2) + self.assertEqual(len(result.update_results), 2) + + get_more_event = False + for event in listener.started_events: + if event.command_name == "getMore": + get_more_event = True + self.assertTrue(get_more_event) + + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_standalone + async def test_handles_cursor_requiring_getMore_within_transaction(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + async with client.start_session() as session: + await session.start_transaction() + models = [] + a_repeated = "a" * (max_bson_object_size // 2) + b_repeated = "b" * (max_bson_object_size // 2) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": a_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": b_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + result = await client.bulk_write(models=models, session=session, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(len(result.update_results), 2) + + get_more_event = False + for event in listener.started_events: + if event.command_name == "getMore": + get_more_event = True + self.assertTrue(get_more_event) + + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_failCommand_fail_point + async def test_handles_getMore_error(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 8}, + } + async with self.fail_point(fail_command): + models = [] + a_repeated = "a" * (max_bson_object_size // 2) + b_repeated = "b" * (max_bson_object_size // 2) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": a_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": b_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models, verbose_results=True) + self.assertIsNotNone(context.exception.error) + self.assertEqual(context.exception.error["code"], 8) + self.assertIsNotNone(context.exception.partial_result) + self.assertEqual(context.exception.partial_result.upserted_count, 2) + self.assertEqual(len(context.exception.partial_result.update_results), 1) + + get_more_event = False + kill_cursors_event = False + for event in listener.started_events: + if event.command_name == "getMore": + get_more_event = True + if event.command_name == "killCursors": + kill_cursors_event = True + self.assertTrue(get_more_event) + self.assertTrue(kill_cursors_event) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_returns_error_if_unacknowledged_too_large_insert(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + b_repeated = "b" * max_bson_object_size + + # Insert document. + models_insert = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge): + await client.bulk_write(models=models_insert, write_concern=WriteConcern(w=0)) + + # Replace document. + models_replace = [ReplaceOne(namespace="db.coll", filter={}, replacement={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge): + await client.bulk_write(models=models_replace, write_concern=WriteConcern(w=0)) + + async def _setup_namespace_test_models(self): + max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + + ops_bytes = max_message_size_bytes - 1122 + num_models = ops_bytes // max_bson_object_size + remainder_bytes = ops_bytes % max_bson_object_size + + models = [] + b_repeated = "b" * (max_bson_object_size - 57) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + if remainder_bytes >= 217: + num_models += 1 + b_repeated = "b" * (remainder_bytes - 57) + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + return num_models, models + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + num_models, models = await self._setup_namespace_test_models() + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + + # No batch splitting required. + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 1) + event = bulk_write_events[0] + + self.assertEqual(len(event.command["ops"]), num_models + 1) + self.assertEqual(len(event.command["nsInfo"]), 1) + self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_batch_splits_if_new_namespace_is_too_large(self): + listener = OvertCommandListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.aclose) + + num_models, models = await self._setup_namespace_test_models() + c_repeated = "c" * 200 + namespace = f"db.{c_repeated}" + models.append( + InsertOne( + namespace=namespace, + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + self.addAsyncCleanup(client.db[c_repeated].drop) + + # Batch splitting required. + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 2) + first_event, second_event = bulk_write_events + + self.assertEqual(len(first_event.command["ops"]), num_models) + self.assertEqual(len(first_event.command["nsInfo"]), 1) + self.assertEqual(first_event.command["nsInfo"][0]["ns"], "db.coll") + + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(len(second_event.command["nsInfo"]), 1) + self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_returns_error_if_no_writes_can_be_added_to_ops(self): + client = await async_rs_or_single_client() + self.addAsyncCleanup(client.aclose) + + max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] + + # Document too large. + b_repeated = "b" * max_message_size_bytes + models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(InvalidOperation) as context: + await client.bulk_write(models=models) + self.assertIn("cannot do an empty bulk write", context.exception._message) + + # Namespace too large. + c_repeated = "c" * max_message_size_bytes + namespace = f"db.{c_repeated}" + models = [InsertOne(namespace=namespace, document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + await client.bulk_write(models=models) + self.assertIn("cannot do an empty bulk write", context.exception._message) + + @async_client_context.require_version_min(8, 0, 0, -24) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_returns_error_if_auto_encryption_configured(self): + opts = AutoEncryptionOpts( + key_vault_namespace="db.coll", + kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, + ) + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + await client.bulk_write(models=models) + self.assertIn( + "bulk_write does not currently support automatic encryption", context.exception._message + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites +class TestClientBulkWriteTimeout(AsyncIntegrationTest): + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_failCommand_fail_point + async def test_timeout_in_multi_batch_bulk_write(self): + internal_client = await async_rs_or_single_client(timeoutMS=None) + self.addAsyncCleanup(internal_client.aclose) + + collection = internal_client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] + max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, + } + async with self.fail_point(fail_command): + models = [] + num_models = int(max_message_size_bytes / max_bson_object_size + 1) + b_repeated = "b" * (max_bson_object_size - 500) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + + listener = OvertCommandListener() + client = await async_rs_or_single_client( + event_listeners=[listener], + readConcernLevel="majority", + readPreference="primary", + timeoutMS=2000, + w="majority", + ) + self.addAsyncCleanup(client.aclose) + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, NetworkTimeout) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) diff --git a/test/command_monitoring/unacknowledged-client-bulkWrite.json b/test/command_monitoring/unacknowledged-client-bulkWrite.json new file mode 100644 index 0000000000..1099b6a1e9 --- /dev/null +++ b/test/command_monitoring/unacknowledged-client-bulkWrite.json @@ -0,0 +1,218 @@ +{ + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ], + "uriOptions": { + "w": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "tests": [ + { + "description": "A successful mixed client bulkWrite", + "operations": [ + { + "object": "client", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "command-monitoring-tests.test", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ] + }, + "expectResult": { + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite", + "reply": { + "ok": 1, + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-delete-options.json b/test/crud/unified/client-bulkWrite-delete-options.json new file mode 100644 index 0000000000..5bdf2b124a --- /dev/null +++ b/test/crud/unified/client-bulkWrite-delete-options.json @@ -0,0 +1,267 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-errorResponse.json b/test/crud/unified/client-bulkWrite-errorResponse.json new file mode 100644 index 0000000000..edf2339d8a --- /dev/null +++ b/test/crud/unified/client-bulkWrite-errorResponse.json @@ -0,0 +1,68 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-errors.json b/test/crud/unified/client-bulkWrite-errors.json new file mode 100644 index 0000000000..9f17f85331 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-errors.json @@ -0,0 +1,454 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-mixed-namespaces.json b/test/crud/unified/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 0000000000..f90755dc85 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,314 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-options.json b/test/crud/unified/client-bulkWrite-options.json new file mode 100644 index 0000000000..a1e6af3bf3 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-options.json @@ -0,0 +1,715 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-ordered.json b/test/crud/unified/client-bulkWrite-ordered.json new file mode 100644 index 0000000000..a55d6619b5 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-ordered.json @@ -0,0 +1,290 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-results.json b/test/crud/unified/client-bulkWrite-results.json new file mode 100644 index 0000000000..97a9e50b21 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-results.json @@ -0,0 +1,832 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-options.json b/test/crud/unified/client-bulkWrite-update-options.json new file mode 100644 index 0000000000..93a2774e5f --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-options.json @@ -0,0 +1,948 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-pipeline.json b/test/crud/unified/client-bulkWrite-update-pipeline.json new file mode 100644 index 0000000000..57b6c9c1ba --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-pipeline.json @@ -0,0 +1,257 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-validation.json b/test/crud/unified/client-bulkWrite-update-validation.json new file mode 100644 index 0000000000..617e711338 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/client-bulkWrite-clientErrors.json b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 0000000000..e2c0fb9c0a --- /dev/null +++ b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,350 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 0000000000..4a0b210eb5 --- /dev/null +++ b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,872 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index ef06fb1e35..aa677494c8 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -53,6 +53,222 @@ } ], "tests": [ + { + "description": "client.clientBulkWrite succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, { "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json index 23af7a8a22..5383b66332 100644 --- a/test/server_selection_logging/operation-id.json +++ b/test/server_selection_logging/operation-id.json @@ -47,6 +47,9 @@ } } ], + "_yamlAnchors": { + "namespace": "logging-tests.server-selection" + }, "tests": [ { "description": "Successful bulkWrite operation: log messages have operationIds", @@ -224,6 +227,190 @@ ] } ] + }, + { + "description": "Successful client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "Failed client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] } ] } diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py new file mode 100644 index 0000000000..facf2971a1 --- /dev/null +++ b/test/test_client_bulk_write.py @@ -0,0 +1,571 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the client bulk write API.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils import ( + OvertCommandListener, + rs_or_single_client, +) + +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + ClientBulkWriteException, + DocumentTooLarge, + InvalidOperation, + NetworkTimeout, +) +from pymongo.monitoring import * +from pymongo.operations import * +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class TestClientBulkWrite(IntegrationTest): + @client_context.require_version_min(8, 0, 0, -24) + def test_returns_error_if_no_namespace_provided(self): + client = rs_or_single_client() + self.addCleanup(client.close) + + models = [InsertOne(document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + client.bulk_write(models=models) + self.assertIn( + "MongoClient.bulk_write requires a namespace to be provided for each write operation", + context.exception._message, + ) + + +# https://github.com/mongodb/specifications/tree/master/source/crud/tests +class TestClientBulkWriteCRUD(IntegrationTest): + @client_context.require_version_min(8, 0, 0, -24) + def test_batch_splits_if_num_operations_too_large(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] + models = [] + for _ in range(max_write_batch_size + 1): + models.append(InsertOne(namespace="db.coll", document={"a": "b"})) + self.addCleanup(client.db["coll"].drop) + + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, max_write_batch_size + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + first_event, second_event = bulk_write_events + self.assertEqual(len(first_event.command["ops"]), max_write_batch_size) + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(first_event.operation_id, second_event.operation_id) + + @client_context.require_version_min(8, 0, 0, -24) + def test_batch_splits_if_ops_payload_too_large(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + + models = [] + num_models = int(max_message_size_bytes / max_bson_object_size + 1) + b_repeated = "b" * (max_bson_object_size - 500) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + self.addCleanup(client.db["coll"].drop) + + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + first_event, second_event = bulk_write_events + self.assertEqual(len(first_event.command["ops"]), num_models - 1) + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(first_event.operation_id, second_event.operation_id) + + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_failCommand_fail_point + def test_collects_write_concern_errors_across_batches(self): + listener = OvertCommandListener() + client = rs_or_single_client( + event_listeners=[listener], + retryWrites=False, + ) + self.addCleanup(client.close) + max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] + + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["bulkWrite"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + with self.fail_point(fail_command): + models = [] + for _ in range(max_write_batch_size + 1): + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models) + self.assertEqual(len(context.exception.write_concern_errors), 2) # type: ignore[arg-type] + self.assertIsNotNone(context.exception.partial_result) + self.assertEqual( + context.exception.partial_result.inserted_count, max_write_batch_size + 1 + ) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + @client_context.require_version_min(8, 0, 0, -24) + def test_collects_write_errors_across_batches_unordered(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + collection.insert_one(document={"_id": 1}) + + max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] + models = [] + for _ in range(max_write_batch_size + 1): + models.append( + InsertOne( + namespace="db.coll", + document={"_id": 1}, + ) + ) + + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models, ordered=False) + self.assertEqual(len(context.exception.write_errors), max_write_batch_size + 1) # type: ignore[arg-type] + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) + + @client_context.require_version_min(8, 0, 0, -24) + def test_collects_write_errors_across_batches_ordered(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + collection.insert_one(document={"_id": 1}) + + max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] + models = [] + for _ in range(max_write_batch_size + 1): + models.append( + InsertOne( + namespace="db.coll", + document={"_id": 1}, + ) + ) + + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models, ordered=True) + self.assertEqual(len(context.exception.write_errors), 1) # type: ignore[arg-type] + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 1) + + @client_context.require_version_min(8, 0, 0, -24) + def test_handles_cursor_requiring_getMore(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + models = [] + a_repeated = "a" * (max_bson_object_size // 2) + b_repeated = "b" * (max_bson_object_size // 2) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": a_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": b_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + + result = client.bulk_write(models=models, verbose_results=True) + self.assertEqual(result.upserted_count, 2) + self.assertEqual(len(result.update_results), 2) + + get_more_event = False + for event in listener.started_events: + if event.command_name == "getMore": + get_more_event = True + self.assertTrue(get_more_event) + + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_standalone + def test_handles_cursor_requiring_getMore_within_transaction(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + with client.start_session() as session: + session.start_transaction() + models = [] + a_repeated = "a" * (max_bson_object_size // 2) + b_repeated = "b" * (max_bson_object_size // 2) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": a_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": b_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + result = client.bulk_write(models=models, session=session, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(len(result.update_results), 2) + + get_more_event = False + for event in listener.started_events: + if event.command_name == "getMore": + get_more_event = True + self.assertTrue(get_more_event) + + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_failCommand_fail_point + def test_handles_getMore_error(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 8}, + } + with self.fail_point(fail_command): + models = [] + a_repeated = "a" * (max_bson_object_size // 2) + b_repeated = "b" * (max_bson_object_size // 2) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": a_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": b_repeated}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models, verbose_results=True) + self.assertIsNotNone(context.exception.error) + self.assertEqual(context.exception.error["code"], 8) + self.assertIsNotNone(context.exception.partial_result) + self.assertEqual(context.exception.partial_result.upserted_count, 2) + self.assertEqual(len(context.exception.partial_result.update_results), 1) + + get_more_event = False + kill_cursors_event = False + for event in listener.started_events: + if event.command_name == "getMore": + get_more_event = True + if event.command_name == "killCursors": + kill_cursors_event = True + self.assertTrue(get_more_event) + self.assertTrue(kill_cursors_event) + + @client_context.require_version_min(8, 0, 0, -24) + def test_returns_error_if_unacknowledged_too_large_insert(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + b_repeated = "b" * max_bson_object_size + + # Insert document. + models_insert = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge): + client.bulk_write(models=models_insert, write_concern=WriteConcern(w=0)) + + # Replace document. + models_replace = [ReplaceOne(namespace="db.coll", filter={}, replacement={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge): + client.bulk_write(models=models_replace, write_concern=WriteConcern(w=0)) + + def _setup_namespace_test_models(self): + max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + + ops_bytes = max_message_size_bytes - 1122 + num_models = ops_bytes // max_bson_object_size + remainder_bytes = ops_bytes % max_bson_object_size + + models = [] + b_repeated = "b" * (max_bson_object_size - 57) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + if remainder_bytes >= 217: + num_models += 1 + b_repeated = "b" * (remainder_bytes - 57) + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + return num_models, models + + @client_context.require_version_min(8, 0, 0, -24) + def test_no_batch_splits_if_new_namespace_is_not_too_large(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + num_models, models = self._setup_namespace_test_models() + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + + # No batch splitting required. + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 1) + event = bulk_write_events[0] + + self.assertEqual(len(event.command["ops"]), num_models + 1) + self.assertEqual(len(event.command["nsInfo"]), 1) + self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") + + @client_context.require_version_min(8, 0, 0, -24) + def test_batch_splits_if_new_namespace_is_too_large(self): + listener = OvertCommandListener() + client = rs_or_single_client(event_listeners=[listener]) + self.addCleanup(client.close) + + num_models, models = self._setup_namespace_test_models() + c_repeated = "c" * 200 + namespace = f"db.{c_repeated}" + models.append( + InsertOne( + namespace=namespace, + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + self.addCleanup(client.db[c_repeated].drop) + + # Batch splitting required. + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 2) + first_event, second_event = bulk_write_events + + self.assertEqual(len(first_event.command["ops"]), num_models) + self.assertEqual(len(first_event.command["nsInfo"]), 1) + self.assertEqual(first_event.command["nsInfo"][0]["ns"], "db.coll") + + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(len(second_event.command["nsInfo"]), 1) + self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) + + @client_context.require_version_min(8, 0, 0, -24) + def test_returns_error_if_no_writes_can_be_added_to_ops(self): + client = rs_or_single_client() + self.addCleanup(client.close) + + max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] + + # Document too large. + b_repeated = "b" * max_message_size_bytes + models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(InvalidOperation) as context: + client.bulk_write(models=models) + self.assertIn("cannot do an empty bulk write", context.exception._message) + + # Namespace too large. + c_repeated = "c" * max_message_size_bytes + namespace = f"db.{c_repeated}" + models = [InsertOne(namespace=namespace, document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + client.bulk_write(models=models) + self.assertIn("cannot do an empty bulk write", context.exception._message) + + @client_context.require_version_min(8, 0, 0, -24) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_returns_error_if_auto_encryption_configured(self): + opts = AutoEncryptionOpts( + key_vault_namespace="db.coll", + kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, + ) + client = rs_or_single_client(auto_encryption_opts=opts) + self.addCleanup(client.close) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + client.bulk_write(models=models) + self.assertIn( + "bulk_write does not currently support automatic encryption", context.exception._message + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites +class TestClientBulkWriteTimeout(IntegrationTest): + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_failCommand_fail_point + def test_timeout_in_multi_batch_bulk_write(self): + internal_client = rs_or_single_client(timeoutMS=None) + self.addCleanup(internal_client.close) + + collection = internal_client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] + max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, + } + with self.fail_point(fail_command): + models = [] + num_models = int(max_message_size_bytes / max_bson_object_size + 1) + b_repeated = "b" * (max_bson_object_size - 500) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + + listener = OvertCommandListener() + client = rs_or_single_client( + event_listeners=[listener], + readConcernLevel="majority", + readPreference="primary", + timeoutMS=2000, + w="majority", + ) + self.addCleanup(client.close) + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, NetworkTimeout) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) diff --git a/test/transactions/unified/client-bulkWrite.json b/test/transactions/unified/client-bulkWrite.json new file mode 100644 index 0000000000..f8f1d97169 --- /dev/null +++ b/test/transactions/unified/client-bulkWrite.json @@ -0,0 +1,592 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-pin-auto.json b/test/transactions/unified/mongos-pin-auto.json index 93eac8bb77..27db520401 100644 --- a/test/transactions/unified/mongos-pin-auto.json +++ b/test/transactions/unified/mongos-pin-auto.json @@ -2004,6 +2004,104 @@ } ] }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, { "description": "unpin after transient connection error on insertOne insert", "operations": [ @@ -5175,6 +5273,202 @@ ] } ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] } ] } diff --git a/test/unified_format.py b/test/unified_format.py index 2c576da45b..0322d83cce 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -74,6 +74,7 @@ from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( BulkWriteError, + ClientBulkWriteException, ConfigurationError, ConnectionFailure, EncryptionError, @@ -118,10 +119,18 @@ _ServerEvent, _ServerHeartbeatEvent, ) -from pymongo.operations import SearchIndexModel +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, +) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.results import BulkWriteResult +from pymongo.results import BulkWriteResult, ClientBulkWriteResult from pymongo.server_api import ServerApi from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection, writable_server_selector @@ -289,11 +298,61 @@ def parse_bulk_write_result(result): } +def parse_client_bulk_write_individual(op_type, result): + if op_type == "insert": + return {"insertedId": result.inserted_id} + if op_type == "update": + if result.upserted_id: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedId": result.upserted_id, + } + else: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + } + if op_type == "delete": + return { + "deletedCount": result.deleted_count, + } + + +def parse_client_bulk_write_result(result): + insert_results, update_results, delete_results = {}, {}, {} + if result.has_verbose_results: + for idx, res in result.insert_results.items(): + insert_results[str(idx)] = parse_client_bulk_write_individual("insert", res) + for idx, res in result.update_results.items(): + update_results[str(idx)] = parse_client_bulk_write_individual("update", res) + for idx, res in result.delete_results.items(): + delete_results[str(idx)] = parse_client_bulk_write_individual("delete", res) + + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "insertResults": insert_results, + "updateResults": update_results, + "deleteResults": delete_results, + } + + def parse_bulk_write_error_result(error): write_result = BulkWriteResult(error.details, True) return parse_bulk_write_result(write_result) +def parse_client_bulk_write_error_result(error): + write_result = error.partial_result + if not write_result: + return None + return parse_client_bulk_write_result(write_result) + + class NonLazyCursor: """A find cursor proxy that creates the remote cursor when initialized.""" @@ -946,6 +1005,8 @@ def coerce_result(opname, result): return {"acknowledged": False} if opname == "bulkWrite": return parse_bulk_write_result(result) + if opname == "clientBulkWrite": + return parse_client_bulk_write_result(result) if opname == "insertOne": return {"insertedId": result.inserted_id} if opname == "insertMany": @@ -974,7 +1035,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.20") + SCHEMA_VERSION = Version.from_string("1.21") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any @@ -1151,20 +1212,27 @@ def process_error(self, exception, spec): expect_result = spec.get("expectResult") error_response = spec.get("errorResponse") if error_response: - self.match_evaluator.match_result(error_response, exception.details) + if isinstance(exception, ClientBulkWriteException): + self.match_evaluator.match_result(error_response, exception.error.details) + else: + self.match_evaluator.match_result(error_response, exception.details) if is_error: # already satisfied because exception was raised pass if is_client_error: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception # Connection errors are considered client errors. - if isinstance(exception, ConnectionFailure): - self.assertNotIsInstance(exception, NotPrimaryError) - elif isinstance(exception, (InvalidOperation, ConfigurationError, EncryptionError)): + if isinstance(error, ConnectionFailure): + self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError)): pass else: - self.assertNotIsInstance(exception, PyMongoError) + self.assertNotIsInstance(error, PyMongoError) if is_timeout_error: self.assertIsInstance(exception, PyMongoError) @@ -1175,21 +1243,31 @@ def process_error(self, exception, spec): if error_contains: if isinstance(exception, BulkWriteError): errmsg = str(exception.details).lower() + elif isinstance(exception, ClientBulkWriteException): + errmsg = str(exception.details).lower() else: errmsg = str(exception).lower() self.assertIn(error_contains.lower(), errmsg) if error_code: - self.assertEqual(error_code, exception.details.get("code")) + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("code")) + else: + self.assertEqual(error_code, exception.details.get("code")) if error_code_name: - self.assertEqual(error_code_name, exception.details.get("codeName")) + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("codeName")) + else: + self.assertEqual(error_code_name, exception.details.get("codeName")) if error_labels_contain: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception labels = [ - err_label - for err_label in error_labels_contain - if exception.has_error_label(err_label) + err_label for err_label in error_labels_contain if error.has_error_label(err_label) ] self.assertEqual(labels, error_labels_contain) @@ -1202,8 +1280,13 @@ def process_error(self, exception, spec): if isinstance(exception, BulkWriteError): result = parse_bulk_write_error_result(exception) self.match_evaluator.match_result(expect_result, result) + elif isinstance(exception, ClientBulkWriteException): + result = parse_client_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) else: - self.fail(f"expectResult can only be specified with {BulkWriteError} exceptions") + self.fail( + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions" + ) return exception @@ -1481,6 +1564,8 @@ def run_entity_operation(self, spec): target_opname = camel_to_snake(opname) if target_opname == "iterate_once": target_opname = "try_next" + if target_opname == "client_bulk_write": + target_opname = "bulk_write" try: cmd = getattr(target, target_opname) except AttributeError: diff --git a/test/utils.py b/test/utils.py index 0c08dca95d..fa198b1c64 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1251,10 +1251,10 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac # Requires boolean returnDocument. elif arg_name == "returnDocument": arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) - elif c2s == "requests": + elif "bulk_write" in opname and (c2s == "requests" or c2s == "models"): # Parse each request into a bulk write model. requests = [] - for request in arguments["requests"]: + for request in arguments[c2s]: if "name" in request: # CRUD v2 format bulk_model = camel_to_upper_camel(request["name"]) @@ -1266,7 +1266,7 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) bulk_arguments = camel_to_snake_args(spec) requests.append(bulk_class(**dict(bulk_arguments))) - arguments["requests"] = requests + arguments[c2s] = requests elif arg_name == "session": arguments["session"] = entity_map[arguments["session"]] elif opname == "open_download_stream" and arg_name == "id": diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index a387d0587e..fe668620f8 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -50,7 +50,8 @@ }, "apiDeprecationErrors": true } - ] + ], + "namespace": "versioned-api-tests.test" }, "initialData": [ { @@ -426,6 +427,85 @@ } ] }, + { + "description": "client bulkWrite appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "versioned-api-tests.test", + "document": { + "_id": 6, + "x": 6 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 6 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 6, + "x": 6 + } + } + ], + "nsInfo": [ + { + "ns": "versioned-api-tests.test" + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, { "description": "countDocuments appends declared API version", "operations": [ diff --git a/tools/synchro.py b/tools/synchro.py index 57b089c5a5..e0af50229f 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -39,6 +39,7 @@ "AsyncDatabaseChangeStream": "DatabaseChangeStream", "AsyncClusterChangeStream": "ClusterChangeStream", "_AsyncBulk": "_Bulk", + "_AsyncClientBulk": "_ClientBulk", "AsyncConnection": "Connection", "async_command": "command", "async_receive_message": "receive_message", @@ -151,6 +152,7 @@ "pymongo_mocks.py", "utils_spec_runner.py", "test_client.py", + "test_client_bulk_write.py", "test_collection.py", "test_cursor.py", "test_database.py", From da593183275474aa16942c53d935099463506d4e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 6 Aug 2024 16:29:24 -0700 Subject: [PATCH 1376/2111] PYTHON-4610 More robust to_list tests (#1773) --- test/asynchronous/test_cursor.py | 26 ++++++++++++-------------- test/test_cursor.py | 26 ++++++++++++-------------- 2 files changed, 24 insertions(+), 28 deletions(-) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 925584b894..833493ce32 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1380,41 +1380,39 @@ async def test_getMore_does_not_send_readPreference(self): self.assertEqual("getMore", started[1].command_name) self.assertNotIn("$readPreference", started[1].command) + @async_client_context.require_version_min(4, 0) @async_client_context.require_replica_set async def test_to_list_tailable(self): oplog = self.client.local.oplog.rs last = await oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1).next() ts = last["ts"] - + # Set maxAwaitTimeMS=1 to speed up the test and avoid blocking on the noop writer. c = oplog.find( {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True - ) - + ).max_await_time_ms(1) + self.addAsyncCleanup(c.close) docs = await c.to_list() - self.assertGreaterEqual(len(docs), 1) async def test_to_list_empty(self): c = self.db.does_not_exist.find() - docs = await c.to_list() - self.assertEqual([], docs) - @async_client_context.require_replica_set + @async_client_context.require_change_streams async def test_command_cursor_to_list(self): - c = await self.db.test.aggregate([{"$changeStream": {}}]) - + # Set maxAwaitTimeMS=1 to speed up the test. + c = await self.db.test.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addAsyncCleanup(c.close) docs = await c.to_list() - self.assertGreaterEqual(len(docs), 0) - @async_client_context.require_replica_set + @async_client_context.require_change_streams async def test_command_cursor_to_list_empty(self): - c = await self.db.does_not_exist.aggregate([{"$changeStream": {}}]) - + # Set maxAwaitTimeMS=1 to speed up the test. + c = await self.db.does_not_exist.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addAsyncCleanup(c.close) docs = await c.to_list() - self.assertEqual([], docs) diff --git a/test/test_cursor.py b/test/test_cursor.py index 26f0575da0..e995bd5298 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1371,41 +1371,39 @@ def test_getMore_does_not_send_readPreference(self): self.assertEqual("getMore", started[1].command_name) self.assertNotIn("$readPreference", started[1].command) + @client_context.require_version_min(4, 0) @client_context.require_replica_set def test_to_list_tailable(self): oplog = self.client.local.oplog.rs last = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1).next() ts = last["ts"] - + # Set maxAwaitTimeMS=1 to speed up the test and avoid blocking on the noop writer. c = oplog.find( {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True - ) - + ).max_await_time_ms(1) + self.addCleanup(c.close) docs = c.to_list() - self.assertGreaterEqual(len(docs), 1) def test_to_list_empty(self): c = self.db.does_not_exist.find() - docs = c.to_list() - self.assertEqual([], docs) - @client_context.require_replica_set + @client_context.require_change_streams def test_command_cursor_to_list(self): - c = self.db.test.aggregate([{"$changeStream": {}}]) - + # Set maxAwaitTimeMS=1 to speed up the test. + c = self.db.test.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addCleanup(c.close) docs = c.to_list() - self.assertGreaterEqual(len(docs), 0) - @client_context.require_replica_set + @client_context.require_change_streams def test_command_cursor_to_list_empty(self): - c = self.db.does_not_exist.aggregate([{"$changeStream": {}}]) - + # Set maxAwaitTimeMS=1 to speed up the test. + c = self.db.does_not_exist.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addCleanup(c.close) docs = c.to_list() - self.assertEqual([], docs) From d4e5ee10fc65ff79be51b4ccf9d16efd8f185dd6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 7 Aug 2024 08:04:01 -0500 Subject: [PATCH 1377/2111] PYTHON-4601 Address Azure KMS and GCP KMS setup failures (#1766) --- .evergreen/config.yml | 5 +++-- .evergreen/run-azurekms-fail-test.sh | 8 +++++--- .evergreen/run-azurekms-test.sh | 7 +++++-- .evergreen/run-gcpkms-test.sh | 6 +++++- .evergreen/run-tests.sh | 10 +++++----- .evergreen/setup-encryption.sh | 7 +++++-- 6 files changed, 28 insertions(+), 15 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 45ac6c4140..af9a496591 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2126,7 +2126,8 @@ tasks: script: | ${PREPARE_SHELL} export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 - export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/${bucket_name}/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz + export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz + SKIP_SERVERS=1 bash ./.evergreen/setup-encryption.sh SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/hatch.sh test:test-eg - name: testazurekms-task @@ -3144,7 +3145,7 @@ buildvariants: - name: testgcpkms-variant display_name: "GCP KMS" run_on: - - debian10-small + - debian11-small tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh index 65c5cd0bba..d99c178fb9 100644 --- a/.evergreen/run-azurekms-fail-test.sh +++ b/.evergreen/run-azurekms-fail-test.sh @@ -1,10 +1,12 @@ #!/bin/bash set -o errexit # Exit the script with error if any of the commands fail - +HERE=$(dirname ${BASH_SOURCE:-$0}) . $DRIVERS_TOOLS/.evergreen/csfle/azurekms/setup-secrets.sh +export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz +SKIP_SERVERS=1 bash $HERE/setup-encryption.sh PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ KEY_NAME="${AZUREKMS_KEYNAME}" \ KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ - LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz \ SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ - ./.evergreen/hatch.sh test:test-eg + $HERE/hatch.sh test:test-eg +bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh index 961bcf507d..bb515a9386 100644 --- a/.evergreen/run-azurekms-test.sh +++ b/.evergreen/run-azurekms-test.sh @@ -1,11 +1,13 @@ #!/bin/bash set -o errexit # Exit the script with error if any of the commands fail - +HERE=$(dirname ${BASH_SOURCE:-$0}) source ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/secrets-export.sh echo "Copying files ... begin" export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey +export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz +SKIP_SERVERS=1 bash $HERE/setup-encryption.sh tar czf /tmp/mongo-python-driver.tgz . # shellcheck disable=SC2088 AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" AZUREKMS_DST="~/" \ @@ -16,6 +18,7 @@ AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/hatch.sh test:test-eg" \ +AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/hatch.sh test:test-eg" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Running test ... end" +bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh index 8a5fef04c2..7ccc74b453 100644 --- a/.evergreen/run-gcpkms-test.sh +++ b/.evergreen/run-gcpkms-test.sh @@ -1,5 +1,6 @@ #!/bin/bash set -o errexit # Exit the script with error if any of the commands fail +HERE=$(dirname ${BASH_SOURCE:-$0}) source ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/secrets-export.sh echo "Copying files ... begin" @@ -7,6 +8,8 @@ export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} export GCPKMS_PROJECT=${GCPKMS_PROJECT} export GCPKMS_ZONE=${GCPKMS_ZONE} export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} +export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz +SKIP_SERVERS=1 bash $HERE/setup-encryption.sh tar czf /tmp/mongo-python-driver.tgz . GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh echo "Copying files ... end" @@ -14,5 +17,6 @@ echo "Untarring file ... begin" GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian10/master/latest/libmongocrypt.tar.gz ./.evergreen/hatch.sh test:test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 ./.evergreen/hatch.sh test:test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Running test ... end" +bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4ada73141d..beee1ed287 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -121,14 +121,14 @@ if [ -n "$TEST_PYOPENSSL" ]; then fi if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then - - python -m pip install '.[encryption]' - - # Setup encryption if necessary. + # Check for libmongocrypt checkout. if [ ! -d "libmongocrypt" ]; then - bash ./.evergreen/setup-encryption.sh + echo "Run encryption setup first!" + exit 1 fi + python -m pip install '.[encryption]' + # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE=$(pwd)/libmongocrypt/nocrypto if [ -f "${BASE}/lib/libmongocrypt.so" ]; then diff --git a/.evergreen/setup-encryption.sh b/.evergreen/setup-encryption.sh index b439c15ddc..71231e1732 100644 --- a/.evergreen/setup-encryption.sh +++ b/.evergreen/setup-encryption.sh @@ -4,6 +4,7 @@ set -o xtrace if [ -z "${DRIVERS_TOOLS}" ]; then echo "Missing environment variable DRIVERS_TOOLS" + exit 1 fi TARGET="" @@ -50,5 +51,7 @@ tar xzf libmongocrypt.tar.gz -C ./libmongocrypt ls -la libmongocrypt ls -la libmongocrypt/nocrypto -bash ${DRIVERS_TOOLS}/.evergreen/csfle/setup-secrets.sh -bash ${DRIVERS_TOOLS}/.evergreen/csfle/start-servers.sh +if [ -z "${SKIP_SERVERS:-}" ]; then + bash ${DRIVERS_TOOLS}/.evergreen/csfle/setup-secrets.sh + bash ${DRIVERS_TOOLS}/.evergreen/csfle/start-servers.sh +fi From 000e50c076575005d04614468c52908c2743b8a7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 7 Aug 2024 12:14:40 -0500 Subject: [PATCH 1378/2111] PYTHON-4266 Migrate Atlas Data Lake tests to unified test format (#1760) --- test/data_lake/aggregate.json | 53 ----------- test/data_lake/estimatedDocumentCount.json | 27 ------ test/data_lake/getMore.json | 57 ----------- test/data_lake/listCollections.json | 25 ----- test/data_lake/listDatabases.json | 24 ----- test/data_lake/runCommand.json | 31 ------ test/data_lake/unified/aggregate.json | 84 ++++++++++++++++ .../unified/estimatedDocumentCount.json | 56 +++++++++++ test/data_lake/{ => unified}/find.json | 49 ++++++++-- test/data_lake/unified/getMore.json | 95 +++++++++++++++++++ test/data_lake/unified/listCollections.json | 48 ++++++++++ test/data_lake/unified/listDatabases.json | 41 ++++++++ test/data_lake/unified/runCommand.json | 54 +++++++++++ test/test_data_lake.py | 31 ++---- 14 files changed, 424 insertions(+), 251 deletions(-) delete mode 100644 test/data_lake/aggregate.json delete mode 100644 test/data_lake/estimatedDocumentCount.json delete mode 100644 test/data_lake/getMore.json delete mode 100644 test/data_lake/listCollections.json delete mode 100644 test/data_lake/listDatabases.json delete mode 100644 test/data_lake/runCommand.json create mode 100644 test/data_lake/unified/aggregate.json create mode 100644 test/data_lake/unified/estimatedDocumentCount.json rename test/data_lake/{ => unified}/find.json (52%) create mode 100644 test/data_lake/unified/getMore.json create mode 100644 test/data_lake/unified/listCollections.json create mode 100644 test/data_lake/unified/listDatabases.json create mode 100644 test/data_lake/unified/runCommand.json diff --git a/test/data_lake/aggregate.json b/test/data_lake/aggregate.json deleted file mode 100644 index 99995bca41..0000000000 --- a/test/data_lake/aggregate.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "Aggregate with pipeline (project, sort, limit)", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 0 - } - }, - { - "$sort": { - "a": 1 - } - }, - { - "$limit": 2 - } - ] - }, - "result": [ - { - "a": 1, - "b": 2, - "c": 3 - }, - { - "a": 2, - "b": 3, - "c": 4 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "driverdata" - } - } - } - ] - } - ] -} diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json deleted file mode 100644 index 997a3ab3fc..0000000000 --- a/test/data_lake/estimatedDocumentCount.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "estimatedDocumentCount succeeds", - "operations": [ - { - "object": "collection", - "name": "estimatedDocumentCount", - "result": 15 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "driverdata" - }, - "command_name": "count", - "database_name": "test" - } - } - ] - } - ] -} diff --git a/test/data_lake/getMore.json b/test/data_lake/getMore.json deleted file mode 100644 index e2e1d4788a..0000000000 --- a/test/data_lake/getMore.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "A successful find event with getMore", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": { - "a": { - "$gte": 2 - } - }, - "sort": { - "a": 1 - }, - "batchSize": 3, - "limit": 4 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "driverdata", - "filter": { - "a": { - "$gte": 2 - } - }, - "sort": { - "a": 1 - }, - "batchSize": 3, - "limit": 4 - }, - "command_name": "find", - "database_name": "test" - } - }, - { - "command_started_event": { - "command": { - "batchSize": 1 - }, - "command_name": "getMore", - "database_name": "cursors" - } - } - ] - } - ] -} diff --git a/test/data_lake/listCollections.json b/test/data_lake/listCollections.json deleted file mode 100644 index e419f7b3e9..0000000000 --- a/test/data_lake/listCollections.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "database_name": "test", - "tests": [ - { - "description": "ListCollections succeeds", - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command_name": "listCollections", - "database_name": "test", - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/data_lake/listDatabases.json b/test/data_lake/listDatabases.json deleted file mode 100644 index 6458148e49..0000000000 --- a/test/data_lake/listDatabases.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "tests": [ - { - "description": "ListDatabases succeeds", - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command_name": "listDatabases", - "database_name": "admin", - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/data_lake/runCommand.json b/test/data_lake/runCommand.json deleted file mode 100644 index d81ff1a64b..0000000000 --- a/test/data_lake/runCommand.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "database_name": "test", - "tests": [ - { - "description": "ping succeeds using runCommand", - "operations": [ - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command_name": "ping", - "database_name": "test", - "command": { - "ping": 1 - } - } - } - ] - } - ] -} diff --git a/test/data_lake/unified/aggregate.json b/test/data_lake/unified/aggregate.json new file mode 100644 index 0000000000..68a3467c71 --- /dev/null +++ b/test/data_lake/unified/aggregate.json @@ -0,0 +1,84 @@ +{ + "description": "aggregate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "Aggregate with pipeline (project, sort, limit)", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + }, + { + "$sort": { + "a": 1 + } + }, + { + "$limit": 2 + } + ] + }, + "expectResult": [ + { + "a": 1, + "b": 2, + "c": 3 + }, + { + "a": 2, + "b": 3, + "c": 4 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "driverdata" + }, + "commandName": "aggregate", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/estimatedDocumentCount.json b/test/data_lake/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..b7515a4418 --- /dev/null +++ b/test/data_lake/unified/estimatedDocumentCount.json @@ -0,0 +1,56 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "estimatedDocumentCount succeeds", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 15 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "driverdata" + }, + "commandName": "count", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/find.json b/test/data_lake/unified/find.json similarity index 52% rename from test/data_lake/find.json rename to test/data_lake/unified/find.json index 8a3468a135..d0652dc720 100644 --- a/test/data_lake/find.json +++ b/test/data_lake/unified/find.json @@ -1,12 +1,36 @@ { - "collection_name": "driverdata", - "database_name": "test", + "description": "find", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], "tests": [ { "description": "Find with projection and sort", "operations": [ { - "object": "collection", + "object": "collection0", "name": "find", "arguments": { "filter": { @@ -22,7 +46,7 @@ }, "limit": 5 }, - "result": [ + "expectResult": [ { "a": 5, "b": 6, @@ -51,13 +75,20 @@ ] } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "find": "driverdata" + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "driverdata" + }, + "commandName": "find", + "databaseName": "test" + } } - } + ] } ] } diff --git a/test/data_lake/unified/getMore.json b/test/data_lake/unified/getMore.json new file mode 100644 index 0000000000..109b6d3d8e --- /dev/null +++ b/test/data_lake/unified/getMore.json @@ -0,0 +1,95 @@ +{ + "description": "getMore", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "A successful find event with getMore", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "driverdata", + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": { + "$$type": "string" + }, + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "cursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/listCollections.json b/test/data_lake/unified/listCollections.json new file mode 100644 index 0000000000..642e7ed328 --- /dev/null +++ b/test/data_lake/unified/listCollections.json @@ -0,0 +1,48 @@ +{ + "description": "listCollections", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + }, + "commandName": "listCollections", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/listDatabases.json b/test/data_lake/unified/listDatabases.json new file mode 100644 index 0000000000..64506ee54e --- /dev/null +++ b/test/data_lake/unified/listDatabases.json @@ -0,0 +1,41 @@ +{ + "description": "listDatabases", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + }, + "commandName": "listDatabases", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/runCommand.json b/test/data_lake/unified/runCommand.json new file mode 100644 index 0000000000..325b6b3f30 --- /dev/null +++ b/test/data_lake/unified/runCommand.json @@ -0,0 +1,54 @@ +{ + "description": "runCommand", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "ping succeeds using runCommand", + "operations": [ + { + "object": "database0", + "name": "runCommand", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_data_lake.py b/test/test_data_lake.py index a11bd9b9cd..8ba83ab190 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -17,16 +17,16 @@ import os import sys +from pathlib import Path import pytest sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.crud_v2_format import TestCrudV2 +from test.unified_format import generate_test_classes from test.utils import ( OvertCommandListener, - SpecTestCreator, rs_client_noauth, rs_or_single_client, ) @@ -100,30 +100,11 @@ def test_3(self): client[self.TEST_DB][self.TEST_COLLECTION].find_one() -class DataLakeTestSpec(TestCrudV2): - # Default test database and collection names. - TEST_DB = "test" - TEST_COLLECTION = "driverdata" - - @classmethod - @client_context.require_data_lake - def setUpClass(cls): - super().setUpClass() - - def setup_scenario(self, scenario_def): - # Spec tests MUST NOT insert data/drop collection for - # data lake testing. - pass - - -def create_test(scenario_def, test, name): - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - +# Location of JSON test specifications. +TEST_PATH = Path(__file__).parent / "data_lake/unified" -SpecTestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) if __name__ == "__main__": From dcaa42bfa456386b3b098d9956e44f00e2c9a5bc Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 7 Aug 2024 11:31:23 -0700 Subject: [PATCH 1379/2111] PYTHON-4632 Fix test_connection_timeout_message (#1774) --- test/test_pooling.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/test/test_pooling.py b/test/test_pooling.py index aa32f9f774..cd8a617358 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -416,7 +416,8 @@ def find_one(): @client_context.require_failCommand_fail_point def test_csot_timeout_message(self): client = rs_or_single_client(appName="connectionTimeoutApp") - # Mock a connection failing due to timeout. + self.addCleanup(client.close) + # Mock an operation failing due to pymongo.timeout(). mock_connection_timeout = { "configureFailPoint": "failCommand", "mode": "alwaysOn", @@ -440,8 +441,8 @@ def test_csot_timeout_message(self): @client_context.require_failCommand_fail_point def test_socket_timeout_message(self): client = rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") - - # Mock a connection failing due to timeout. + self.addCleanup(client.close) + # Mock an operation failing due to socketTimeoutMS. mock_connection_timeout = { "configureFailPoint": "failCommand", "mode": "alwaysOn", @@ -469,7 +470,7 @@ def test_socket_timeout_message(self): 4, 9, 0 ) # configureFailPoint does not allow failure on handshake before 4.9, fixed in SERVER-49336 def test_connection_timeout_message(self): - # Mock a connection failing due to timeout. + # Mock a connection creation failing due to timeout. mock_connection_timeout = { "configureFailPoint": "failCommand", "mode": "alwaysOn", @@ -481,9 +482,18 @@ def test_connection_timeout_message(self): }, } + client = rs_or_single_client( + connectTimeoutMS=500, + socketTimeoutMS=500, + appName="connectionTimeoutApp", + heartbeatFrequencyMS=1000000, + ) + self.addCleanup(client.close) + client.admin.command("ping") + pool = get_pool(client) + pool.reset_without_pause() with self.fail_point(mock_connection_timeout): with self.assertRaises(Exception) as error: - client = rs_or_single_client(connectTimeoutMS=500, appName="connectionTimeoutApp") client.admin.command("ping") self.assertTrue( From 13cf110f016b36926ce836552c7b324b0ea7dba0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 7 Aug 2024 16:17:48 -0700 Subject: [PATCH 1380/2111] PYTHON-4633 Speed up TestCollectionChangeStream.test_uuid_representations (#1775) --- test/test_change_stream.py | 36 ++++++++++++++++++++----------- test/test_custom_types.py | 9 +++----- test/test_examples.py | 5 +++++ test/test_sdam_monitoring_spec.py | 8 ++++--- 4 files changed, 37 insertions(+), 21 deletions(-) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index e00aaa6403..b71f5613d8 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -1020,21 +1020,32 @@ def test_raw(self): self.assertEqual(change["ns"]["coll"], self.watched_collection().name) self.assertEqual(change["fullDocument"], raw_doc) + @client_context.require_version_min(4, 0) # Needed for start_at_operation_time. def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" + optime = self.db.command("ping")["operationTime"] + self.watched_collection().insert_many( + [ + {"_id": Binary(uuid.uuid4().bytes, id_subtype)} + for id_subtype in (STANDARD, PYTHON_LEGACY) + ] + ) for uuid_representation in ALL_UUID_REPRESENTATIONS: - for id_subtype in (STANDARD, PYTHON_LEGACY): - options = self.watched_collection().codec_options.with_options( - uuid_representation=uuid_representation - ) - coll = self.watched_collection(codec_options=options) - with coll.watch() as change_stream: - coll.insert_one({"_id": Binary(uuid.uuid4().bytes, id_subtype)}) - _ = change_stream.next() - resume_token = change_stream.resume_token + options = self.watched_collection().codec_options.with_options( + uuid_representation=uuid_representation + ) + coll = self.watched_collection(codec_options=options) + with coll.watch(start_at_operation_time=optime, max_await_time_ms=1) as change_stream: + _ = change_stream.next() + resume_token_1 = change_stream.resume_token + _ = change_stream.next() + resume_token_2 = change_stream.resume_token - # Should not error. - coll.watch(resume_after=resume_token) + # Should not error. + with coll.watch(resume_after=resume_token_1): + pass + with coll.watch(resume_after=resume_token_2): + pass def test_document_id_order(self): """Test with document _ids that need their order preserved.""" @@ -1053,7 +1064,8 @@ def test_document_id_order(self): # The resume token is always a document. self.assertIsInstance(resume_token, document_class) # Should not error. - coll.watch(resume_after=resume_token) + with coll.watch(resume_after=resume_token): + pass coll.delete_many({}) def test_read_concern(self): diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 7daf83244d..c30c62b1b1 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -764,9 +764,7 @@ def test_grid_out_custom_opts(self): db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, - aliases=["foo"], metadata={"foo": "red", "bar": "blue"}, bar=3, baz="hello", @@ -780,13 +778,10 @@ def test_grid_out_custom_opts(self): self.assertEqual("my_file", two.filename) self.assertEqual(5, two._id) self.assertEqual(11, two.length) - self.assertEqual("text/html", two.content_type) self.assertEqual(1000, two.chunk_size) self.assertTrue(isinstance(two.upload_date, datetime.datetime)) - self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual(None, two.md5) for attr in [ "_id", @@ -805,7 +800,9 @@ def test_grid_out_custom_opts(self): class ChangeStreamsWCustomTypesTestMixin: @no_type_check def change_stream(self, *args, **kwargs): - return self.watched_target.watch(*args, **kwargs) + stream = self.watched_target.watch(*args, max_await_time_ms=1, **kwargs) + self.addCleanup(stream.close) + return stream @no_type_check def insert_and_check(self, change_stream, insert_doc, expected_doc): diff --git a/test/test_examples.py b/test/test_examples.py index e003d8459a..02b1785866 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -747,6 +747,7 @@ def test_change_streams(self): done = False def insert_docs(): + nonlocal done while not done: db.inventory.insert_one({"username": "alice"}) db.inventory.delete_one({"username": "alice"}) @@ -760,17 +761,20 @@ def insert_docs(): cursor = db.inventory.watch() next(cursor) # End Changestream Example 1 + cursor.close() # Start Changestream Example 2 cursor = db.inventory.watch(full_document="updateLookup") next(cursor) # End Changestream Example 2 + cursor.close() # Start Changestream Example 3 resume_token = cursor.resume_token cursor = db.inventory.watch(resume_after=resume_token) next(cursor) # End Changestream Example 3 + cursor.close() # Start Changestream Example 4 pipeline = [ @@ -780,6 +784,7 @@ def insert_docs(): cursor = db.inventory.watch(pipeline=pipeline) next(cursor) # End Changestream Example 4 + cursor.close() finally: done = True t.join() diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 63281c9871..8e0a3cbbb4 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -179,7 +179,7 @@ def setUp(self): def create_test(scenario_def): def run_scenario(self): - with client_knobs(events_queue_frequency=0.1): + with client_knobs(events_queue_frequency=0.05, min_heartbeat_interval=0.05): _run_scenario(self) def _run_scenario(self): @@ -216,7 +216,7 @@ def _run(self): ) # Wait some time to catch possible lagging extra events. - time.sleep(0.5) + wait_until(lambda: topology._events.empty(), "publish lagging events") i = 0 while i < expected_len: @@ -273,7 +273,9 @@ class TestSdamMonitoring(IntegrationTest): def setUpClass(cls): super().setUpClass() # Speed up the tests by decreasing the event publish frequency. - cls.knobs = client_knobs(events_queue_frequency=0.1) + cls.knobs = client_knobs( + events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 + ) cls.knobs.enable() cls.listener = ServerAndTopologyEventListener() retry_writes = client_context.supports_transactions() From 682f15b21e6ccf823d528a06e34c034fcbff208e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 8 Aug 2024 08:21:59 -0700 Subject: [PATCH 1381/2111] PYTHON-4618 - Fix TypeError: Socket cannot be of type SSLSocket (#1772) --- pymongo/network_layer.py | 101 +++++++++++++++++++++++++++++++---- pymongo/pyopenssl_context.py | 3 ++ pymongo/ssl_context.py | 2 + pymongo/ssl_support.py | 3 ++ 4 files changed, 98 insertions(+), 11 deletions(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 6087b1aa8d..f1c378b9b2 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -18,32 +18,111 @@ import asyncio import socket import struct +import sys +from asyncio import AbstractEventLoop, Future from typing import ( - TYPE_CHECKING, Union, ) from pymongo import ssl_support -if TYPE_CHECKING: - from pymongo.pyopenssl_context import _sslConn +try: + from ssl import SSLError, SSLSocket + + _HAVE_SSL = True +except ImportError: + _HAVE_SSL = False + +try: + from pymongo.pyopenssl_context import ( + BLOCKING_IO_LOOKUP_ERROR, + BLOCKING_IO_READ_ERROR, + BLOCKING_IO_WRITE_ERROR, + _sslConn, + ) + + _HAVE_PYOPENSSL = True +except ImportError: + _HAVE_PYOPENSSL = False + _sslConn = SSLSocket # type: ignore + from pymongo.ssl_support import ( # type: ignore[assignment] + BLOCKING_IO_LOOKUP_ERROR, + BLOCKING_IO_READ_ERROR, + BLOCKING_IO_WRITE_ERROR, + ) _UNPACK_HEADER = struct.Struct(" None: - timeout = socket.gettimeout() - socket.settimeout(0.0) +async def async_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: + timeout = sock.gettimeout() + sock.settimeout(0.0) loop = asyncio.get_event_loop() try: - await asyncio.wait_for(loop.sock_sendall(socket, buf), timeout=timeout) # type: ignore[arg-type] + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + if sys.platform == "win32": + await asyncio.wait_for(_async_sendall_ssl_windows(sock, buf), timeout=timeout) + else: + await asyncio.wait_for(_async_sendall_ssl(sock, buf, loop), timeout=timeout) + else: + await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] finally: - socket.settimeout(timeout) + sock.settimeout(timeout) + + +async def _async_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop +) -> None: + fd = sock.fileno() + sent = 0 + + def _is_ready(fut: Future) -> None: + loop.remove_writer(fd) + loop.remove_reader(fd) + if fut.done(): + return + fut.set_result(None) + + while sent < len(buf): + try: + sent += sock.send(buf) + except BLOCKING_IO_ERRORS as exc: + fd = sock.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + await fut + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + await fut + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + loop.add_writer(fd, _is_ready, fut) + await fut + + +# The default Windows asyncio event loop does not support loop.add_reader/add_writer: https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support +async def _async_sendall_ssl_windows(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + while total_sent < total_length: + try: + sent = sock.send(view[total_sent:]) + except BLOCKING_IO_ERRORS: + await asyncio.sleep(0.5) + sent = 0 + total_sent += sent -def sendall(socket: Union[socket.socket, _sslConn], buf: bytes) -> None: - socket.sendall(buf) +def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: + sock.sendall(buf) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index c1b85af125..4f6f6f4a89 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -90,6 +90,9 @@ def _is_ip_address(address: Any) -> bool: # According to the docs for socket.send it can raise # WantX509LookupError and should be retried. BLOCKING_IO_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +BLOCKING_IO_READ_ERROR = _SSL.WantReadError +BLOCKING_IO_WRITE_ERROR = _SSL.WantWriteError +BLOCKING_IO_LOOKUP_ERROR = _SSL.WantX509LookupError def _ragged_eof(exc: BaseException) -> bool: diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 1a0424208f..ee32145c02 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -30,6 +30,8 @@ # Errors raised by SSL sockets when in non-blocking mode. BLOCKING_IO_ERRORS = (_ssl.SSLWantReadError, _ssl.SSLWantWriteError) +BLOCKING_IO_READ_ERROR = _ssl.SSLWantReadError +BLOCKING_IO_WRITE_ERROR = _ssl.SSLWantWriteError # Base Exception class SSLError = _ssl.SSLError diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 6a5dd278d3..580d71f9b0 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -53,6 +53,9 @@ IPADDR_SAFE = True SSLError = _ssl.SSLError BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS + BLOCKING_IO_READ_ERROR = _ssl.BLOCKING_IO_READ_ERROR + BLOCKING_IO_WRITE_ERROR = _ssl.BLOCKING_IO_WRITE_ERROR + BLOCKING_IO_LOOKUP_ERROR = BLOCKING_IO_READ_ERROR def get_ssl_context( certfile: Optional[str], From f17f5e286e65ce5fac5ddf809e3e89ff7d167bc4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 8 Aug 2024 13:18:28 -0500 Subject: [PATCH 1382/2111] PYTHON-4379 Test that durations are included on relevant pool events (#1765) --- .../pool-checkin-make-available.json | 6 ++++-- .../pool-checkout-connection.json | 6 ++++-- .../pool-checkout-error-closed.json | 4 +++- .../pool-checkout-maxConnecting-timeout.json | 3 ++- .../pool-clear-clears-waitqueue.json | 12 ++++++++---- .../pool-clear-interrupting-pending-connections.json | 2 +- test/connection_monitoring/pool-clear-ready.json | 7 +++++-- test/connection_monitoring/pool-ready.json | 6 ++++-- test/connection_monitoring/wait-queue-timeout.json | 6 ++++-- 9 files changed, 35 insertions(+), 17 deletions(-) diff --git a/test/connection_monitoring/pool-checkin-make-available.json b/test/connection_monitoring/pool-checkin-make-available.json index 41c522ae67..3f37f188c0 100644 --- a/test/connection_monitoring/pool-checkin-make-available.json +++ b/test/connection_monitoring/pool-checkin-make-available.json @@ -22,7 +22,8 @@ { "type": "ConnectionCheckedOut", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckedIn", @@ -32,7 +33,8 @@ { "type": "ConnectionCheckedOut", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-checkout-connection.json b/test/connection_monitoring/pool-checkout-connection.json index d89b342605..c7e8914d45 100644 --- a/test/connection_monitoring/pool-checkout-connection.json +++ b/test/connection_monitoring/pool-checkout-connection.json @@ -23,12 +23,14 @@ { "type": "ConnectionReady", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckedOut", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-checkout-error-closed.json b/test/connection_monitoring/pool-checkout-error-closed.json index ee2926e1c0..614403ef50 100644 --- a/test/connection_monitoring/pool-checkout-error-closed.json +++ b/test/connection_monitoring/pool-checkout-error-closed.json @@ -38,7 +38,8 @@ { "type": "ConnectionCheckedOut", "address": 42, - "connectionId": 42 + "connectionId": 42, + "duration": 42 }, { "type": "ConnectionCheckedIn", @@ -56,6 +57,7 @@ { "type": "ConnectionCheckOutFailed", "address": 42, + "duration": 42, "reason": "poolClosed" } ], diff --git a/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json index 84ddf8fdba..4d9fda1a68 100644 --- a/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json +++ b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json @@ -89,7 +89,8 @@ { "type": "ConnectionCheckOutFailed", "reason": "timeout", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-clear-clears-waitqueue.json b/test/connection_monitoring/pool-clear-clears-waitqueue.json index d4aef928c7..e6077f12a5 100644 --- a/test/connection_monitoring/pool-clear-clears-waitqueue.json +++ b/test/connection_monitoring/pool-clear-clears-waitqueue.json @@ -59,7 +59,8 @@ }, { "type": "ConnectionCheckedOut", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutStarted", @@ -76,17 +77,20 @@ { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-clear-interrupting-pending-connections.json b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json index ceae07a1c7..c1fd746329 100644 --- a/test/connection_monitoring/pool-clear-interrupting-pending-connections.json +++ b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json @@ -17,7 +17,7 @@ ], "closeConnection": false, "blockConnection": true, - "blockTimeMS": 1000 + "blockTimeMS": 10000 } }, "poolOptions": { diff --git a/test/connection_monitoring/pool-clear-ready.json b/test/connection_monitoring/pool-clear-ready.json index 800c3545ad..88c2988ac5 100644 --- a/test/connection_monitoring/pool-clear-ready.json +++ b/test/connection_monitoring/pool-clear-ready.json @@ -40,7 +40,8 @@ { "type": "ConnectionCheckedOut", "address": 42, - "connectionId": 42 + "connectionId": 42, + "duration": 42 }, { "type": "ConnectionPoolCleared", @@ -49,6 +50,7 @@ { "type": "ConnectionCheckOutFailed", "address": 42, + "duration": 42, "reason": "connectionError" }, { @@ -57,7 +59,8 @@ }, { "type": "ConnectionCheckedOut", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-ready.json b/test/connection_monitoring/pool-ready.json index 29ce7326cf..a90aed04d0 100644 --- a/test/connection_monitoring/pool-ready.json +++ b/test/connection_monitoring/pool-ready.json @@ -31,7 +31,8 @@ { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionPoolReady", @@ -47,7 +48,8 @@ }, { "type": "ConnectionCheckedOut", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/wait-queue-timeout.json b/test/connection_monitoring/wait-queue-timeout.json index fbcbdfb04d..8bd7c49499 100644 --- a/test/connection_monitoring/wait-queue-timeout.json +++ b/test/connection_monitoring/wait-queue-timeout.json @@ -48,7 +48,8 @@ { "type": "ConnectionCheckedOut", "connectionId": 42, - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutStarted", @@ -57,7 +58,8 @@ { "type": "ConnectionCheckOutFailed", "reason": "timeout", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckedIn", From 0a578b44520ae72724a7cc9ceb89b9b6bdafc663 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 8 Aug 2024 11:26:25 -0700 Subject: [PATCH 1383/2111] PYTHON-4396 Unskip failCommand+appName tests on MongoDB 4.4.7+ (#1735) --- test/__init__.py | 4 +- test/asynchronous/__init__.py | 4 +- test/csot/change-streams.json | 22 +++---- test/csot/close-cursors.json | 12 ++-- test/csot/command-execution.json | 5 +- test/csot/convenient-transactions.json | 24 +++++++- test/csot/deprecated-options.json | 2 +- test/csot/error-transformations.json | 1 - test/csot/global-timeoutMS.json | 2 +- test/csot/legacy-timeouts.json | 4 +- test/csot/non-tailable-cursors.json | 20 +++---- test/csot/override-operation-timeoutMS.json | 2 +- test/csot/retryability-legacy-timeouts.json | 2 +- test/csot/sessions-inherit-timeoutMS.json | 30 ++++++++-- ...sessions-override-operation-timeoutMS.json | 34 ++++++++--- test/csot/sessions-override-timeoutMS.json | 30 ++++++++-- test/csot/tailable-awaitData.json | 14 ++--- test/csot/tailable-non-awaitData.json | 10 ++-- .../unified/hello-command-error.json | 2 +- .../unified/hello-network-error.json | 2 +- .../unified/interruptInUse-pool-clear.json | 2 +- .../unified/minPoolSize-error.json | 2 +- test/load_balancer/cursors.json | 57 +++++++++++++++---- test/load_balancer/sdam-error-handling.json | 5 +- test/load_balancer/transactions.json | 2 +- test/test_pooling.py | 9 +-- test/test_streaming_protocol.py | 1 - 27 files changed, 204 insertions(+), 100 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index b603ae0a5b..b8394bd134 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -728,9 +728,9 @@ def require_failCommand_fail_point(self, func): def require_failCommand_appName(self, func): """Run a test only if the server supports the failCommand appName.""" - # SERVER-47195 + # SERVER-47195 and SERVER-49336. return self._require( - lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + lambda: (self.test_commands_enabled and self.version >= (4, 4, 7)), "failCommand appName must be supported", func=func, ) diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 878ef81bae..4098d9af8f 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -730,9 +730,9 @@ def require_failCommand_fail_point(self, func): def require_failCommand_appName(self, func): """Run a test only if the server supports the failCommand appName.""" - # SERVER-47195 + # SERVER-47195 and SERVER-49336. return self._require( - lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + lambda: (self.test_commands_enabled and self.version >= (4, 4, 7)), "failCommand appName must be supported", func=func, ) diff --git a/test/csot/change-streams.json b/test/csot/change-streams.json index a8b2b7e170..8cffb08e26 100644 --- a/test/csot/change-streams.json +++ b/test/csot/change-streams.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/csot/close-cursors.json b/test/csot/close-cursors.json index 1361971c4c..79b0de7b6a 100644 --- a/test/csot/close-cursors.json +++ b/test/csot/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index f0858791e9..aa9c3eb23f 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -3,11 +3,10 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", - "sharded-replicaset", "sharded" ], "serverless": "forbid" @@ -52,7 +51,7 @@ ], "appName": "reduceMaxTimeMSTest", "blockConnection": true, - "blockTimeMS": 75 + "blockTimeMS": 50 } } } diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json index 0c8cc6edd9..3868b3026c 100644 --- a/test/csot/convenient-transactions.json +++ b/test/csot/convenient-transactions.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json index 9c9b9a2288..d3e4631ff4 100644 --- a/test/csot/deprecated-options.json +++ b/test/csot/deprecated-options.json @@ -6,7 +6,7 @@ "minServerVersion": "4.2", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json index 4d9e061c3b..4889e39583 100644 --- a/test/csot/error-transformations.json +++ b/test/csot/error-transformations.json @@ -11,7 +11,6 @@ { "minServerVersion": "4.2", "topologies": [ - "replicaset", "sharded" ] } diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json index 34854ac155..740bbad2e2 100644 --- a/test/csot/global-timeoutMS.json +++ b/test/csot/global-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/csot/legacy-timeouts.json b/test/csot/legacy-timeouts.json index 3a2d2eaefb..535425c934 100644 --- a/test/csot/legacy-timeouts.json +++ b/test/csot/legacy-timeouts.json @@ -1,6 +1,6 @@ { "description": "legacy timeouts continue to work if timeoutMS is not set", - "schemaVersion": "1.9", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.4" @@ -280,7 +280,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json index 0a5448a6bb..291c6e72aa 100644 --- a/test/csot/non-tailable-cursors.json +++ b/test/csot/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json index 896b996ee8..6fa0bd802a 100644 --- a/test/csot/override-operation-timeoutMS.json +++ b/test/csot/override-operation-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json index 63e8efccfc..aded781aee 100644 --- a/test/csot/retryability-legacy-timeouts.json +++ b/test/csot/retryability-legacy-timeouts.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json index 8205c086bc..13ea91c794 100644 --- a/test/csot/sessions-inherit-timeoutMS.json +++ b/test/csot/sessions-inherit-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/csot/sessions-override-operation-timeoutMS.json b/test/csot/sessions-override-operation-timeoutMS.json index ff26de29f5..441c698328 100644 --- a/test/csot/sessions-override-operation-timeoutMS.json +++ b/test/csot/sessions-override-operation-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/csot/sessions-override-timeoutMS.json b/test/csot/sessions-override-timeoutMS.json index 1d3b8932af..d90152e909 100644 --- a/test/csot/sessions-override-timeoutMS.json +++ b/test/csot/sessions-override-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json index 6da85c7783..535fb69243 100644 --- a/test/csot/tailable-awaitData.json +++ b/test/csot/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/csot/tailable-non-awaitData.json b/test/csot/tailable-non-awaitData.json index 34ee660963..e88230e4f7 100644 --- a/test/csot/tailable-non-awaitData.json +++ b/test/csot/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/discovery_and_monitoring/unified/hello-command-error.json b/test/discovery_and_monitoring/unified/hello-command-error.json index 9afea87e77..87958cb2c0 100644 --- a/test/discovery_and_monitoring/unified/hello-command-error.json +++ b/test/discovery_and_monitoring/unified/hello-command-error.json @@ -3,7 +3,7 @@ "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "serverless": "forbid", "topologies": [ "single", diff --git a/test/discovery_and_monitoring/unified/hello-network-error.json b/test/discovery_and_monitoring/unified/hello-network-error.json index 55373c90cc..15ed2b605e 100644 --- a/test/discovery_and_monitoring/unified/hello-network-error.json +++ b/test/discovery_and_monitoring/unified/hello-network-error.json @@ -3,7 +3,7 @@ "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "serverless": "forbid", "topologies": [ "single", diff --git a/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json index a20d79030a..d9329646d4 100644 --- a/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json +++ b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json @@ -3,7 +3,7 @@ "schemaVersion": "1.11", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4", "serverless": "forbid", "topologies": [ "replicaset", diff --git a/test/discovery_and_monitoring/unified/minPoolSize-error.json b/test/discovery_and_monitoring/unified/minPoolSize-error.json index 7e294baf66..bd9e9fcdec 100644 --- a/test/discovery_and_monitoring/unified/minPoolSize-error.json +++ b/test/discovery_and_monitoring/unified/minPoolSize-error.json @@ -3,7 +3,7 @@ "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "serverless": "forbid", "topologies": [ "single" diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json index e66c46c0c3..27aaddd5b6 100644 --- a/test/load_balancer/cursors.json +++ b/test/load_balancer/cursors.json @@ -1,6 +1,6 @@ { "description": "cursors are correctly pinned to connections for load-balanced clusters", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ @@ -222,7 +222,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -239,7 +242,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -333,7 +339,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -475,7 +484,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -492,7 +504,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -605,7 +620,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -750,7 +768,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -767,7 +788,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -858,7 +882,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -950,7 +977,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": { "$$type": "string" @@ -1100,7 +1130,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 8760b723fd..5892dcacd6 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -1,6 +1,6 @@ { "description": "state change errors are correctly handled", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ @@ -263,7 +263,7 @@ "description": "errors during the initial connection hello are ignored", "runOnRequirements": [ { - "minServerVersion": "4.9" + "minServerVersion": "4.4.7" } ], "operations": [ @@ -279,7 +279,6 @@ }, "data": { "failCommands": [ - "ismaster", "isMaster", "hello" ], diff --git a/test/load_balancer/transactions.json b/test/load_balancer/transactions.json index 8cf24f4ca4..0dd04ee854 100644 --- a/test/load_balancer/transactions.json +++ b/test/load_balancer/transactions.json @@ -1,6 +1,6 @@ { "description": "transactions are correctly pinned to connections for load-balanced clusters", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ diff --git a/test/test_pooling.py b/test/test_pooling.py index cd8a617358..31259d7b3a 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -413,7 +413,7 @@ def find_one(): # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds print(len(pool.conns)) - @client_context.require_failCommand_fail_point + @client_context.require_failCommand_appName def test_csot_timeout_message(self): client = rs_or_single_client(appName="connectionTimeoutApp") self.addCleanup(client.close) @@ -438,7 +438,7 @@ def test_csot_timeout_message(self): self.assertTrue("(configured timeouts: timeoutMS: 500.0ms" in str(error.exception)) - @client_context.require_failCommand_fail_point + @client_context.require_failCommand_appName def test_socket_timeout_message(self): client = rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") self.addCleanup(client.close) @@ -465,10 +465,7 @@ def test_socket_timeout_message(self): in str(error.exception) ) - @client_context.require_failCommand_fail_point - @client_context.require_version_min( - 4, 9, 0 - ) # configureFailPoint does not allow failure on handshake before 4.9, fixed in SERVER-49336 + @client_context.require_failCommand_appName def test_connection_timeout_message(self): # Mock a connection creation failing due to timeout. mock_connection_timeout = { diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 44e673822a..9bca899a48 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -141,7 +141,6 @@ def changed_event(event): self.assertEqual(1, len(events)) self.assertGreater(events[0].new_description.round_trip_time, 0) - @client_context.require_version_min(4, 9, -1) @client_context.require_failCommand_appName def test_monitor_waits_after_server_check_error(self): # This test implements: From 6934611879ba5e69ddcfe4d88b6aeb686ef2ed38 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 8 Aug 2024 13:43:51 -0500 Subject: [PATCH 1384/2111] PYTHON-4637 Fix azure kms task variant (#1779) --- .evergreen/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index af9a496591..4a3ea88756 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -3153,7 +3153,7 @@ buildvariants: - name: testazurekms-variant display_name: "Azure KMS" - run_on: rhel87-small + run_on: debian11-small tasks: - name: testazurekms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README From b14420a9df321714a06ddae1a907a04846ebca20 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 8 Aug 2024 15:40:18 -0500 Subject: [PATCH 1385/2111] PYTHON-4614 Do not test PyPy with OpenSSL 1.0.2 (#1777) --- .evergreen/combine-coverage.sh | 2 +- .evergreen/config.yml | 2 +- .evergreen/utils.sh | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh index 7db4a6cbc2..92d2f1f1f8 100644 --- a/.evergreen/combine-coverage.sh +++ b/.evergreen/combine-coverage.sh @@ -15,7 +15,7 @@ fi createvirtualenv "$PYTHON_BINARY" covenv # Keep in sync with run-tests.sh # coverage >=5 is needed for relative_files=true. -pip install -q "coverage>=5,<=7.5" +pip install -q "coverage[toml]>=5,<=7.5" pip list ls -la coverage/ diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4a3ea88756..8f32b8a1b1 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2854,7 +2854,7 @@ buildvariants: matrix_spec: platform: rhel7 # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.8", "3.9", "pypy3.9", "pypy3.10"] + python-version: ["3.8", "3.9"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index f881d9190c..ff186c6e1c 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -67,7 +67,6 @@ createvirtualenv () { export PIP_QUIET=1 python -m pip install --upgrade pip - python -m pip install --upgrade hatch } # Usage: From 8939ea359c9f0fd7c38d19afba4da5ac82ae0f27 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 8 Aug 2024 19:34:39 -0700 Subject: [PATCH 1386/2111] PYTHON-4640 Improve performance of creating ObjectIds with multiple threads (#1781) --- bson/objectid.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/bson/objectid.py b/bson/objectid.py index 57efdc7983..a5500872da 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -29,6 +29,9 @@ from bson.tz_util import utc _MAX_COUNTER_VALUE = 0xFFFFFF +_PACK_INT = struct.Struct(">I").pack +_PACK_INT_RANDOM = struct.Struct(">I5s").pack +_UNPACK_INT = struct.Struct(">I").unpack def _raise_invalid_id(oid: str) -> NoReturn: @@ -132,7 +135,7 @@ def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> Ob if offset is not None: generation_time = generation_time - offset timestamp = calendar.timegm(generation_time.timetuple()) - oid = struct.pack(">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" + oid = _PACK_INT(int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" return cls(oid) @classmethod @@ -163,18 +166,12 @@ def _random(cls) -> bytes: def __generate(self) -> None: """Generate a new value for this ObjectId.""" - # 4 bytes current time - oid = struct.pack(">I", int(time.time())) - - # 5 bytes random - oid += ObjectId._random() - - # 3 bytes inc with ObjectId._inc_lock: - oid += struct.pack(">I", ObjectId._inc)[1:4] - ObjectId._inc = (ObjectId._inc + 1) % (_MAX_COUNTER_VALUE + 1) + inc = ObjectId._inc + ObjectId._inc = (inc + 1) % (_MAX_COUNTER_VALUE + 1) - self.__id = oid + # 4 bytes current time, 5 bytes random, 3 bytes inc. + self.__id = _PACK_INT_RANDOM(int(time.time()), ObjectId._random()) + _PACK_INT(inc)[1:4] def __validate(self, oid: Any) -> None: """Validate and use the given id for this ObjectId. @@ -212,7 +209,7 @@ def generation_time(self) -> datetime.datetime: represents the generation time in UTC. It is precise to the second. """ - timestamp = struct.unpack(">I", self.__id[0:4])[0] + timestamp = _UNPACK_INT(self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc) def __getstate__(self) -> bytes: From d91393bc8393f5e3c49fbc6b0c5b437dbf737202 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 9 Aug 2024 16:27:38 -0500 Subject: [PATCH 1387/2111] PYTHON-4644 Use a random name for hatchenv (#1782) --- .evergreen/hatch.sh | 7 +++++-- .evergreen/utils.sh | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index e24320b9df..8438394101 100644 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -13,8 +13,11 @@ if $PYTHON_BINARY -m hatch --version; then $PYTHON_BINARY -m hatch run "$@" } else # No toolchain hatch present, set up virtualenv before installing hatch - createvirtualenv "$PYTHON_BINARY" hatchenv - trap "deactivate; rm -rf hatchenv" EXIT HUP + # Use a random venv name because the encryption tasks run this script multiple times in the same run. + ENV_NAME=hatchenv-$RANDOM + createvirtualenv "$PYTHON_BINARY" $ENV_NAME + # shellcheck disable=SC2064 + trap "deactivate; rm -rf $ENV_NAME" EXIT HUP python -m pip install -q hatch run_hatch() { python -m hatch run "$@" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index ff186c6e1c..1a5e2a153f 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -43,7 +43,7 @@ find_python3() { createvirtualenv () { PYTHON=$1 VENVPATH=$2 - rm -rf $VENVPATH + # Prefer venv VENV="$PYTHON -m venv" if [ "$(uname -s)" = "Darwin" ]; then From 940d2c85fb66f894f9520e69e1b60a05b87a6eff Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 9 Aug 2024 16:28:10 -0500 Subject: [PATCH 1388/2111] PYTHON-4616 Remove EVG release scripts (#1776) --- .evergreen/build-mac.sh | 30 ----- .evergreen/build-manylinux-internal.sh | 42 ------- .evergreen/build-manylinux.sh | 50 -------- .evergreen/build-windows.sh | 29 ----- .evergreen/config.yml | 159 ------------------------- .evergreen/release.sh | 9 -- 6 files changed, 319 deletions(-) delete mode 100755 .evergreen/build-mac.sh delete mode 100755 .evergreen/build-manylinux-internal.sh delete mode 100755 .evergreen/build-manylinux.sh delete mode 100755 .evergreen/build-windows.sh delete mode 100755 .evergreen/release.sh diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh deleted file mode 100755 index 4e8be8cf58..0000000000 --- a/.evergreen/build-mac.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -ex - -# Get access to testinstall. -. .evergreen/utils.sh - -# Create temp directory for validated files. -rm -rf validdist -mkdir -p validdist -mv dist/* validdist || true - -VERSION=${VERSION:-3.10} - -PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 -rm -rf build - -createvirtualenv $PYTHON releasevenv -python -m pip install build -python -m build --wheel . -deactivate || true -rm -rf releasevenv - -# Test that each wheel is installable. -for release in dist/*; do - testinstall $PYTHON $release - mv $release validdist/ -done - -mv validdist/* dist -rm -rf validdist -ls dist diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh deleted file mode 100755 index 267e647ffd..0000000000 --- a/.evergreen/build-manylinux-internal.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -ex -cd /src - -# Get access to testinstall. -. .evergreen/utils.sh - -# Create temp directory for validated files. -rm -rf validdist -mkdir -p validdist -mv dist/* validdist || true - -# Compile wheels -for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp38|cp39|cp310|cp311|cp312) ]]; then - continue - fi - # https://github.com/pypa/manylinux/issues/49 - rm -rf build - $PYTHON -m pip install build - $PYTHON -m build --wheel . - rm -rf build - - # Audit wheels and write manylinux tag - for whl in dist/*.whl; do - # Skip already built manylinux wheels. - if [[ "$whl" != *"manylinux"* ]]; then - auditwheel repair $whl -w dist - rm $whl - fi - done - - # Test that each wheel is installable. - # Test without virtualenv because it's not present on manylinux containers. - for release in dist/*; do - testinstall $PYTHON $release "without-virtualenv" - mv $release validdist/ - done -done - -mv validdist/* dist -rm -rf validdist -ls dist diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh deleted file mode 100755 index 19f2b7f4aa..0000000000 --- a/.evergreen/build-manylinux.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -ex - -docker version - -# Set up qemu support using the method used in docker/setup-qemu-action -# https://github.com/docker/setup-qemu-action/blob/2b82ce82d56a2a04d2637cd93a637ae1b359c0a7/README.md?plain=1#L46 -docker run --rm --privileged tonistiigi/binfmt:latest --install all - -# manylinux1 2021-05-05-b64d921 and manylinux2014 2021-05-05-1ac6ef3 were -# the last releases to generate pip < 20.3 compatible wheels. After that -# auditwheel was upgraded to v4 which produces PEP 600 manylinux_x_y wheels -# which requires pip >= 20.3. We use the older docker image to support older -# pip versions. -BUILD_WITH_TAG="$1" -if [ -n "$BUILD_WITH_TAG" ]; then - images=(quay.io/pypa/manylinux1_x86_64:2021-05-05-b64d921 \ - quay.io/pypa/manylinux1_i686:2021-05-05-b64d921 \ - quay.io/pypa/manylinux2014_x86_64:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_i686:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_aarch64:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_ppc64le:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_s390x:2021-05-05-1ac6ef3) -else - images=(quay.io/pypa/manylinux1_x86_64 \ - quay.io/pypa/manylinux1_i686 \ - quay.io/pypa/manylinux2014_x86_64 \ - quay.io/pypa/manylinux2014_i686 \ - quay.io/pypa/manylinux2014_aarch64 \ - quay.io/pypa/manylinux2014_ppc64le \ - quay.io/pypa/manylinux2014_s390x) -fi - -for image in "${images[@]}"; do - docker pull $image - docker run --rm -v "`pwd`:/src" $image /src/.evergreen/build-manylinux-internal.sh -done - -ls dist - -# Check for any unexpected files. -unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp38*' -or \ - -iname '*cp39*' -or \ - -iname '*cp310*' -or \ - -iname '*cp311*' -or \ - -iname '*cp312*' \)) -if [ -n "$unexpected" ]; then - echo "Unexpected files:" $unexpected - exit 1 -fi diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh deleted file mode 100755 index d30382fcee..0000000000 --- a/.evergreen/build-windows.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -ex - -# Get access to testinstall. -. .evergreen/utils.sh - -# Create temp directory for validated files. -rm -rf validdist -mkdir -p validdist -mv dist/* validdist || true - -for VERSION in 38 39 310 311 312; do - _pythons=("C:/Python/Python${VERSION}/python.exe" \ - "C:/Python/32/Python${VERSION}/python.exe") - for PYTHON in "${_pythons[@]}"; do - rm -rf build - $PYTHON -m pip install build - $PYTHON -m build --wheel . - - # Test that each wheel is installable. - for release in dist/*; do - testinstall $PYTHON $release - mv $release validdist/ - done - done -done - -mv validdist/* dist -rm -rf validdist -ls dist diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8f32b8a1b1..1e218e2c7b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -786,92 +786,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh - "build release": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - set -o xtrace - VERSION=${VERSION} ENSURE_UNIVERSAL2=${ENSURE_UNIVERSAL2} .evergreen/release.sh - - "upload release": - - command: ec2.assume_role - params: - role_arn: ${assume_role_arn} - - command: archive.targz_pack - params: - target: "release-files.tgz" - source_dir: "src/dist" - include: - - "*" - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: release-files.tgz - remote_file: release/${revision}/${task_id}-${execution}-release-files.tar.gz - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Release files - - "download and merge releases": - - command: ec2.assume_role - params: - role_arn: ${assume_role_arn} - - command: shell.exec - params: - silent: true - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - script: | - # Download all the task coverage files. - aws s3 cp --recursive s3://${bucket_name}/release/${revision}/ release/ - - command: shell.exec - params: - shell: "bash" - script: | - ${PREPARE_SHELL} - set -o xtrace - # Combine releases into one directory. - ls -la release/ - mkdir releases - # Copy old manylinux release first since we want the newer manylinux - # wheels to override them. - mkdir old_manylinux - if mv release/*old_manylinux* old_manylinux; then - for REL in old_manylinux/*; do - tar zxvf $REL -C releases/ - done - fi - for REL in release/*; do - tar zxvf $REL -C releases/ - done - # Build source distribution. - cd src/ - /opt/python/3.8/bin/python3 -m pip install build - /opt/python/3.8/bin/python3 -m build --sdist . - cp dist/* ../releases - - command: archive.targz_pack - params: - target: "release-files-all.tgz" - source_dir: "releases/" - include: - - "*" - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: release-files-all.tgz - remote_file: release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Release files all - "run perf tests": - command: shell.exec type: test @@ -1116,72 +1030,6 @@ tasks: genhtml --version || true valgrind --version || true - - name: "release-mac" - tags: ["release_tag"] - run_on: macos-11 - commands: - - func: "build release" - vars: - VERSION: "3.12" - ENSURE_UNIVERSAL2: "1" - - func: "build release" - vars: - VERSION: "3.11" - ENSURE_UNIVERSAL2: "1" - - func: "build release" - vars: - VERSION: "3.10" - ENSURE_UNIVERSAL2: "1" - - func: "build release" - vars: - VERSION: "3.9" - ENSURE_UNIVERSAL2: "1" - - func: "upload release" - - func: "build release" - vars: - VERSION: "3.8" - - func: "upload release" - - - name: "release-windows" - tags: ["release_tag"] - run_on: windows-64-vsMulti-small - commands: - - func: "build release" - - func: "upload release" - - - name: "release-manylinux" - tags: ["release_tag"] - run_on: ubuntu2204-large - exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). - commands: - - func: "build release" - - func: "upload release" - - - name: "release-old-manylinux" - tags: ["release_tag"] - run_on: ubuntu2204-large - exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). - commands: - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - set -o xtrace - .evergreen/build-manylinux.sh BUILD_WITH_TAG - - func: "upload release" - - - name: "release-combine" - tags: ["release_tag"] - run_on: rhel84-small - depends_on: - - name: "*" - variant: ".release_tag" - patch_optional: true - commands: - - func: "download and merge releases" - # Standard test tasks {{{ - name: "mockupdb" @@ -3177,13 +3025,6 @@ buildvariants: tasks: - name: "check-import-time" -- name: Release - display_name: Release - batchtime: 20160 # 14 days - tags: ["release_tag"] - tasks: - - ".release_tag" - - name: "perf-tests" display_name: "Performance Benchmark Tests" batchtime: 10080 # 7 days diff --git a/.evergreen/release.sh b/.evergreen/release.sh deleted file mode 100755 index 1fdd459ad9..0000000000 --- a/.evergreen/release.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -ex - -if [ "$(uname -s)" = "Darwin" ]; then - .evergreen/build-mac.sh -elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - .evergreen/build-windows.sh -else - .evergreen/build-manylinux.sh -fi From 47427378769e125b891eb70cddfca6e4836b214b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 9 Aug 2024 23:19:15 -0700 Subject: [PATCH 1389/2111] PYTHON-4643 Fix test_to_list_tailable (#1783) --- test/asynchronous/test_cursor.py | 6 ++++-- test/test_cursor.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 833493ce32..d6d56244f7 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1380,7 +1380,6 @@ async def test_getMore_does_not_send_readPreference(self): self.assertEqual("getMore", started[1].command_name) self.assertNotIn("$readPreference", started[1].command) - @async_client_context.require_version_min(4, 0) @async_client_context.require_replica_set async def test_to_list_tailable(self): oplog = self.client.local.oplog.rs @@ -1391,7 +1390,10 @@ async def test_to_list_tailable(self): {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True ).max_await_time_ms(1) self.addAsyncCleanup(c.close) - docs = await c.to_list() + # Wait for the change to be read. + docs = [] + while not docs: + docs = await c.to_list() self.assertGreaterEqual(len(docs), 1) async def test_to_list_empty(self): diff --git a/test/test_cursor.py b/test/test_cursor.py index e995bd5298..0d61865196 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1371,7 +1371,6 @@ def test_getMore_does_not_send_readPreference(self): self.assertEqual("getMore", started[1].command_name) self.assertNotIn("$readPreference", started[1].command) - @client_context.require_version_min(4, 0) @client_context.require_replica_set def test_to_list_tailable(self): oplog = self.client.local.oplog.rs @@ -1382,7 +1381,10 @@ def test_to_list_tailable(self): {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True ).max_await_time_ms(1) self.addCleanup(c.close) - docs = c.to_list() + # Wait for the change to be read. + docs = [] + while not docs: + docs = c.to_list() self.assertGreaterEqual(len(docs), 1) def test_to_list_empty(self): From cd9de28c8f591f0bc774a264f3c1a831fe2f15b3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 9 Aug 2024 23:19:46 -0700 Subject: [PATCH 1390/2111] PYTHON-4652 Remove duplicate async tests in Github Actions (#1785) --- .github/workflows/test-python.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 4d514726cd..ba04e8e418 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -78,9 +78,6 @@ jobs: - name: Run tests run: | hatch run test:test - - name: Run async tests - run: | - hatch run test:test-async doctest: runs-on: ubuntu-latest From 30b32d00c45b0e79fce822d23a283f562c1291f9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 12 Aug 2024 10:10:19 -0700 Subject: [PATCH 1391/2111] PYTHON-4649 Skip CSOT tests on slow Windows and macOS hosts (#1784) --- .evergreen/config.yml | 9 +++++++++ test/test_csot.py | 1 + test/unified_format.py | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1e218e2c7b..0df4bdcef5 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -435,6 +435,9 @@ functions: if [ -n "${TEST_INDEX_MANAGEMENT}" ]; then export TEST_INDEX_MANAGEMENT=1 fi + if [ -n "${SKIP_CSOT_TESTS}" ]; then + export SKIP_CSOT_TESTS=1 + fi GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ PYTHON_BINARY=${PYTHON_BINARY} \ @@ -2072,6 +2075,8 @@ axes: skip_EC2_auth_test: true skip_ECS_auth_test: true skip_web_identity_auth_test: true + # CSOT tests are unreliable on our slow macOS hosts. + SKIP_CSOT_TESTS: true - id: macos-arm64 display_name: "macOS Arm64" run_on: macos-14-arm64 @@ -2079,6 +2084,8 @@ axes: skip_EC2_auth_test: true skip_ECS_auth_test: true skip_web_identity_auth_test: true + # CSOT tests are unreliable on our slow macOS hosts. + SKIP_CSOT_TESTS: true - id: rhel7 display_name: "RHEL 7.x" run_on: rhel79-small @@ -2121,6 +2128,8 @@ axes: skip_EC2_auth_test: true skip_web_identity_auth_test: true venv_bin_dir: "Scripts" + # CSOT tests are unreliable on our slow Windows hosts. + SKIP_CSOT_TESTS: true # Test with authentication? - id: auth diff --git a/test/test_csot.py b/test/test_csot.py index e8ee92d4a6..64210b4d64 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -77,6 +77,7 @@ def test_timeout_nested(self): @client_context.require_change_streams def test_change_stream_can_resume_after_timeouts(self): coll = self.db.test + coll.insert_one({}) with coll.watch() as stream: with pymongo.timeout(0.1): with self.assertRaises(PyMongoError) as ctx: diff --git a/test/unified_format.py b/test/unified_format.py index 0322d83cce..d978ef84d3 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -145,6 +145,8 @@ from pymongo.typings import _Address from pymongo.write_concern import WriteConcern +SKIP_CSOT_TESTS = os.getenv("SKIP_CSOT_TESTS") + JSON_OPTS = json_util.JSONOptions(tz_aware=False) IS_INTERRUPTED = False @@ -1953,6 +1955,9 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): + if "csot" in self.id().lower() and SKIP_CSOT_TESTS: + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") + # Kill all sessions before and after each test to prevent an open # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. From 2afbd4b2791e619e3983dcc8a04b33e5fc4c4844 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:21:09 -0700 Subject: [PATCH 1392/2111] PYTHON-4650 Fix MongoClient.bulk_write test failure when compression is enabled (#1786) --- test/__init__.py | 4 + test/asynchronous/__init__.py | 4 + test/asynchronous/test_client_bulk_write.py | 97 +++++++++++---------- test/test_client_bulk_write.py | 97 +++++++++++---------- 4 files changed, 112 insertions(+), 90 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index b8394bd134..e60736e3e8 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -852,6 +852,10 @@ def max_bson_size(self): def max_write_batch_size(self): return (self.hello)["maxWriteBatchSize"] + @property + def max_message_size_bytes(self): + return (self.hello)["maxMessageSizeBytes"] + # Reusable client context client_context = ClientContext() diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 4098d9af8f..a95a9e31b0 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -854,6 +854,10 @@ async def max_bson_size(self): async def max_write_batch_size(self): return (await self.hello)["maxWriteBatchSize"] + @property + async def max_message_size_bytes(self): + return (await self.hello)["maxMessageSizeBytes"] + # Reusable client context async_client_context = AsyncClientContext() diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index f55b3082bb..7ce989bbe7 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -56,20 +56,24 @@ async def test_returns_error_if_no_namespace_provided(self): # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(AsyncIntegrationTest): + async def asyncSetUp(self): + self.max_write_batch_size = await async_client_context.max_write_batch_size + self.max_bson_object_size = await async_client_context.max_bson_size + self.max_message_size_bytes = await async_client_context.max_message_size_bytes + @async_client_context.require_version_min(8, 0, 0, -24) async def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) self.addAsyncCleanup(client.aclose) - max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append(InsertOne(namespace="db.coll", document={"a": "b"})) self.addAsyncCleanup(client.db["coll"].drop) result = await client.bulk_write(models=models) - self.assertEqual(result.inserted_count, max_write_batch_size + 1) + self.assertEqual(result.inserted_count, self.max_write_batch_size + 1) bulk_write_events = [] for event in listener.started_events: @@ -78,7 +82,7 @@ async def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(len(bulk_write_events), 2) first_event, second_event = bulk_write_events - self.assertEqual(len(first_event.command["ops"]), max_write_batch_size) + self.assertEqual(len(first_event.command["ops"]), self.max_write_batch_size) self.assertEqual(len(second_event.command["ops"]), 1) self.assertEqual(first_event.operation_id, second_event.operation_id) @@ -88,12 +92,9 @@ async def test_batch_splits_if_ops_payload_too_large(self): client = await async_rs_or_single_client(event_listeners=[listener]) self.addAsyncCleanup(client.aclose) - max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] - models = [] - num_models = int(max_message_size_bytes / max_bson_object_size + 1) - b_repeated = "b" * (max_bson_object_size - 500) + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - 500) for _ in range(num_models): models.append( InsertOne( @@ -126,7 +127,6 @@ async def test_collects_write_concern_errors_across_batches(self): retryWrites=False, ) self.addAsyncCleanup(client.aclose) - max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] fail_command = { "configureFailPoint": "failCommand", @@ -138,7 +138,7 @@ async def test_collects_write_concern_errors_across_batches(self): } async with self.fail_point(fail_command): models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append( InsertOne( namespace="db.coll", @@ -152,7 +152,7 @@ async def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(context.exception.write_concern_errors), 2) # type: ignore[arg-type] self.assertIsNotNone(context.exception.partial_result) self.assertEqual( - context.exception.partial_result.inserted_count, max_write_batch_size + 1 + context.exception.partial_result.inserted_count, self.max_write_batch_size + 1 ) bulk_write_events = [] @@ -172,9 +172,8 @@ async def test_collects_write_errors_across_batches_unordered(self): await collection.drop() await collection.insert_one(document={"_id": 1}) - max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append( InsertOne( namespace="db.coll", @@ -184,7 +183,7 @@ async def test_collects_write_errors_across_batches_unordered(self): with self.assertRaises(ClientBulkWriteException) as context: await client.bulk_write(models=models, ordered=False) - self.assertEqual(len(context.exception.write_errors), max_write_batch_size + 1) # type: ignore[arg-type] + self.assertEqual(len(context.exception.write_errors), self.max_write_batch_size + 1) # type: ignore[arg-type] bulk_write_events = [] for event in listener.started_events: @@ -203,9 +202,8 @@ async def test_collects_write_errors_across_batches_ordered(self): await collection.drop() await collection.insert_one(document={"_id": 1}) - max_write_batch_size = (await async_client_context.hello)["maxWriteBatchSize"] models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append( InsertOne( namespace="db.coll", @@ -233,10 +231,9 @@ async def test_handles_cursor_requiring_getMore(self): self.addAsyncCleanup(collection.drop) await collection.drop() - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] models = [] - a_repeated = "a" * (max_bson_object_size // 2) - b_repeated = "b" * (max_bson_object_size // 2) + a_repeated = "a" * (self.max_bson_object_size // 2) + b_repeated = "b" * (self.max_bson_object_size // 2) models.append( UpdateOne( namespace="db.coll", @@ -275,12 +272,11 @@ async def test_handles_cursor_requiring_getMore_within_transaction(self): self.addAsyncCleanup(collection.drop) await collection.drop() - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] async with client.start_session() as session: await session.start_transaction() models = [] - a_repeated = "a" * (max_bson_object_size // 2) - b_repeated = "b" * (max_bson_object_size // 2) + a_repeated = "a" * (self.max_bson_object_size // 2) + b_repeated = "b" * (self.max_bson_object_size // 2) models.append( UpdateOne( namespace="db.coll", @@ -319,7 +315,6 @@ async def test_handles_getMore_error(self): self.addAsyncCleanup(collection.drop) await collection.drop() - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 1}, @@ -327,8 +322,8 @@ async def test_handles_getMore_error(self): } async with self.fail_point(fail_command): models = [] - a_repeated = "a" * (max_bson_object_size // 2) - b_repeated = "b" * (max_bson_object_size // 2) + a_repeated = "a" * (self.max_bson_object_size // 2) + b_repeated = "b" * (self.max_bson_object_size // 2) models.append( UpdateOne( namespace="db.coll", @@ -370,8 +365,7 @@ async def test_returns_error_if_unacknowledged_too_large_insert(self): client = await async_rs_or_single_client(event_listeners=[listener]) self.addAsyncCleanup(client.aclose) - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] - b_repeated = "b" * max_bson_object_size + b_repeated = "b" * self.max_bson_object_size # Insert document. models_insert = [InsertOne(namespace="db.coll", document={"a": b_repeated})] @@ -384,15 +378,25 @@ async def test_returns_error_if_unacknowledged_too_large_insert(self): await client.bulk_write(models=models_replace, write_concern=WriteConcern(w=0)) async def _setup_namespace_test_models(self): - max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] - - ops_bytes = max_message_size_bytes - 1122 - num_models = ops_bytes // max_bson_object_size - remainder_bytes = ops_bytes % max_bson_object_size + # See prose test specification below for details on these calculations. + # https://github.com/mongodb/specifications/tree/master/source/crud/tests#details-on-size-calculations + _EXISTING_BULK_WRITE_BYTES = 1122 + _OPERATION_DOC_BYTES = 57 + _NAMESPACE_DOC_BYTES = 217 + + # When compression is enabled, max_message_size is + # smaller to account for compression message header. + if async_client_context.client_options.get("compressors"): + max_message_size_bytes = self.max_message_size_bytes - 16 + else: + max_message_size_bytes = self.max_message_size_bytes + + ops_bytes = max_message_size_bytes - _EXISTING_BULK_WRITE_BYTES + num_models = ops_bytes // self.max_bson_object_size + remainder_bytes = ops_bytes % self.max_bson_object_size models = [] - b_repeated = "b" * (max_bson_object_size - 57) + b_repeated = "b" * (self.max_bson_object_size - _OPERATION_DOC_BYTES) for _ in range(num_models): models.append( InsertOne( @@ -400,9 +404,9 @@ async def _setup_namespace_test_models(self): document={"a": b_repeated}, ) ) - if remainder_bytes >= 217: + if remainder_bytes >= _NAMESPACE_DOC_BYTES: num_models += 1 - b_repeated = "b" * (remainder_bytes - 57) + b_repeated = "b" * (remainder_bytes - _OPERATION_DOC_BYTES) models.append( InsertOne( namespace="db.coll", @@ -485,17 +489,15 @@ async def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = await async_rs_or_single_client() self.addAsyncCleanup(client.aclose) - max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] - # Document too large. - b_repeated = "b" * max_message_size_bytes + b_repeated = "b" * self.max_message_size_bytes models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] with self.assertRaises(InvalidOperation) as context: await client.bulk_write(models=models) self.assertIn("cannot do an empty bulk write", context.exception._message) # Namespace too large. - c_repeated = "c" * max_message_size_bytes + c_repeated = "c" * self.max_message_size_bytes namespace = f"db.{c_repeated}" models = [InsertOne(namespace=namespace, document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -522,9 +524,16 @@ async def test_returns_error_if_auto_encryption_configured(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteTimeout(AsyncIntegrationTest): + async def asyncSetUp(self): + self.max_write_batch_size = await async_client_context.max_write_batch_size + self.max_bson_object_size = await async_client_context.max_bson_size + self.max_message_size_bytes = await async_client_context.max_message_size_bytes + @async_client_context.require_version_min(8, 0, 0, -24) @async_client_context.require_failCommand_fail_point async def test_timeout_in_multi_batch_bulk_write(self): + _OVERHEAD = 500 + internal_client = await async_rs_or_single_client(timeoutMS=None) self.addAsyncCleanup(internal_client.aclose) @@ -532,8 +541,6 @@ async def test_timeout_in_multi_batch_bulk_write(self): self.addAsyncCleanup(collection.drop) await collection.drop() - max_bson_object_size = (await async_client_context.hello)["maxBsonObjectSize"] - max_message_size_bytes = (await async_client_context.hello)["maxMessageSizeBytes"] fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 2}, @@ -541,8 +548,8 @@ async def test_timeout_in_multi_batch_bulk_write(self): } async with self.fail_point(fail_command): models = [] - num_models = int(max_message_size_bytes / max_bson_object_size + 1) - b_repeated = "b" * (max_bson_object_size - 500) + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) for _ in range(num_models): models.append( InsertOne( diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index facf2971a1..6399908804 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -56,20 +56,24 @@ def test_returns_error_if_no_namespace_provided(self): # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(IntegrationTest): + def setUp(self): + self.max_write_batch_size = client_context.max_write_batch_size + self.max_bson_object_size = client_context.max_bson_size + self.max_message_size_bytes = client_context.max_message_size_bytes + @client_context.require_version_min(8, 0, 0, -24) def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append(InsertOne(namespace="db.coll", document={"a": "b"})) self.addCleanup(client.db["coll"].drop) result = client.bulk_write(models=models) - self.assertEqual(result.inserted_count, max_write_batch_size + 1) + self.assertEqual(result.inserted_count, self.max_write_batch_size + 1) bulk_write_events = [] for event in listener.started_events: @@ -78,7 +82,7 @@ def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(len(bulk_write_events), 2) first_event, second_event = bulk_write_events - self.assertEqual(len(first_event.command["ops"]), max_write_batch_size) + self.assertEqual(len(first_event.command["ops"]), self.max_write_batch_size) self.assertEqual(len(second_event.command["ops"]), 1) self.assertEqual(first_event.operation_id, second_event.operation_id) @@ -88,12 +92,9 @@ def test_batch_splits_if_ops_payload_too_large(self): client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] - models = [] - num_models = int(max_message_size_bytes / max_bson_object_size + 1) - b_repeated = "b" * (max_bson_object_size - 500) + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - 500) for _ in range(num_models): models.append( InsertOne( @@ -126,7 +127,6 @@ def test_collects_write_concern_errors_across_batches(self): retryWrites=False, ) self.addCleanup(client.close) - max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] fail_command = { "configureFailPoint": "failCommand", @@ -138,7 +138,7 @@ def test_collects_write_concern_errors_across_batches(self): } with self.fail_point(fail_command): models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append( InsertOne( namespace="db.coll", @@ -152,7 +152,7 @@ def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(context.exception.write_concern_errors), 2) # type: ignore[arg-type] self.assertIsNotNone(context.exception.partial_result) self.assertEqual( - context.exception.partial_result.inserted_count, max_write_batch_size + 1 + context.exception.partial_result.inserted_count, self.max_write_batch_size + 1 ) bulk_write_events = [] @@ -172,9 +172,8 @@ def test_collects_write_errors_across_batches_unordered(self): collection.drop() collection.insert_one(document={"_id": 1}) - max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append( InsertOne( namespace="db.coll", @@ -184,7 +183,7 @@ def test_collects_write_errors_across_batches_unordered(self): with self.assertRaises(ClientBulkWriteException) as context: client.bulk_write(models=models, ordered=False) - self.assertEqual(len(context.exception.write_errors), max_write_batch_size + 1) # type: ignore[arg-type] + self.assertEqual(len(context.exception.write_errors), self.max_write_batch_size + 1) # type: ignore[arg-type] bulk_write_events = [] for event in listener.started_events: @@ -203,9 +202,8 @@ def test_collects_write_errors_across_batches_ordered(self): collection.drop() collection.insert_one(document={"_id": 1}) - max_write_batch_size = (client_context.hello)["maxWriteBatchSize"] models = [] - for _ in range(max_write_batch_size + 1): + for _ in range(self.max_write_batch_size + 1): models.append( InsertOne( namespace="db.coll", @@ -233,10 +231,9 @@ def test_handles_cursor_requiring_getMore(self): self.addCleanup(collection.drop) collection.drop() - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] models = [] - a_repeated = "a" * (max_bson_object_size // 2) - b_repeated = "b" * (max_bson_object_size // 2) + a_repeated = "a" * (self.max_bson_object_size // 2) + b_repeated = "b" * (self.max_bson_object_size // 2) models.append( UpdateOne( namespace="db.coll", @@ -275,12 +272,11 @@ def test_handles_cursor_requiring_getMore_within_transaction(self): self.addCleanup(collection.drop) collection.drop() - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] with client.start_session() as session: session.start_transaction() models = [] - a_repeated = "a" * (max_bson_object_size // 2) - b_repeated = "b" * (max_bson_object_size // 2) + a_repeated = "a" * (self.max_bson_object_size // 2) + b_repeated = "b" * (self.max_bson_object_size // 2) models.append( UpdateOne( namespace="db.coll", @@ -319,7 +315,6 @@ def test_handles_getMore_error(self): self.addCleanup(collection.drop) collection.drop() - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 1}, @@ -327,8 +322,8 @@ def test_handles_getMore_error(self): } with self.fail_point(fail_command): models = [] - a_repeated = "a" * (max_bson_object_size // 2) - b_repeated = "b" * (max_bson_object_size // 2) + a_repeated = "a" * (self.max_bson_object_size // 2) + b_repeated = "b" * (self.max_bson_object_size // 2) models.append( UpdateOne( namespace="db.coll", @@ -370,8 +365,7 @@ def test_returns_error_if_unacknowledged_too_large_insert(self): client = rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] - b_repeated = "b" * max_bson_object_size + b_repeated = "b" * self.max_bson_object_size # Insert document. models_insert = [InsertOne(namespace="db.coll", document={"a": b_repeated})] @@ -384,15 +378,25 @@ def test_returns_error_if_unacknowledged_too_large_insert(self): client.bulk_write(models=models_replace, write_concern=WriteConcern(w=0)) def _setup_namespace_test_models(self): - max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] - - ops_bytes = max_message_size_bytes - 1122 - num_models = ops_bytes // max_bson_object_size - remainder_bytes = ops_bytes % max_bson_object_size + # See prose test specification below for details on these calculations. + # https://github.com/mongodb/specifications/tree/master/source/crud/tests#details-on-size-calculations + _EXISTING_BULK_WRITE_BYTES = 1122 + _OPERATION_DOC_BYTES = 57 + _NAMESPACE_DOC_BYTES = 217 + + # When compression is enabled, max_message_size is + # smaller to account for compression message header. + if client_context.client_options.get("compressors"): + max_message_size_bytes = self.max_message_size_bytes - 16 + else: + max_message_size_bytes = self.max_message_size_bytes + + ops_bytes = max_message_size_bytes - _EXISTING_BULK_WRITE_BYTES + num_models = ops_bytes // self.max_bson_object_size + remainder_bytes = ops_bytes % self.max_bson_object_size models = [] - b_repeated = "b" * (max_bson_object_size - 57) + b_repeated = "b" * (self.max_bson_object_size - _OPERATION_DOC_BYTES) for _ in range(num_models): models.append( InsertOne( @@ -400,9 +404,9 @@ def _setup_namespace_test_models(self): document={"a": b_repeated}, ) ) - if remainder_bytes >= 217: + if remainder_bytes >= _NAMESPACE_DOC_BYTES: num_models += 1 - b_repeated = "b" * (remainder_bytes - 57) + b_repeated = "b" * (remainder_bytes - _OPERATION_DOC_BYTES) models.append( InsertOne( namespace="db.coll", @@ -485,17 +489,15 @@ def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = rs_or_single_client() self.addCleanup(client.close) - max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] - # Document too large. - b_repeated = "b" * max_message_size_bytes + b_repeated = "b" * self.max_message_size_bytes models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] with self.assertRaises(InvalidOperation) as context: client.bulk_write(models=models) self.assertIn("cannot do an empty bulk write", context.exception._message) # Namespace too large. - c_repeated = "c" * max_message_size_bytes + c_repeated = "c" * self.max_message_size_bytes namespace = f"db.{c_repeated}" models = [InsertOne(namespace=namespace, document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -522,9 +524,16 @@ def test_returns_error_if_auto_encryption_configured(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteTimeout(IntegrationTest): + def setUp(self): + self.max_write_batch_size = client_context.max_write_batch_size + self.max_bson_object_size = client_context.max_bson_size + self.max_message_size_bytes = client_context.max_message_size_bytes + @client_context.require_version_min(8, 0, 0, -24) @client_context.require_failCommand_fail_point def test_timeout_in_multi_batch_bulk_write(self): + _OVERHEAD = 500 + internal_client = rs_or_single_client(timeoutMS=None) self.addCleanup(internal_client.close) @@ -532,8 +541,6 @@ def test_timeout_in_multi_batch_bulk_write(self): self.addCleanup(collection.drop) collection.drop() - max_bson_object_size = (client_context.hello)["maxBsonObjectSize"] - max_message_size_bytes = (client_context.hello)["maxMessageSizeBytes"] fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 2}, @@ -541,8 +548,8 @@ def test_timeout_in_multi_batch_bulk_write(self): } with self.fail_point(fail_command): models = [] - num_models = int(max_message_size_bytes / max_bson_object_size + 1) - b_repeated = "b" * (max_bson_object_size - 500) + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) for _ in range(num_models): models.append( InsertOne( From a232b657d01030d2bc2b40db068ebb49f8b964a4 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 12 Aug 2024 10:23:43 -0700 Subject: [PATCH 1393/2111] PYTHON-4613 Skip async tests when testing eventlet/gevent (#1780) --- green_framework_test.py | 12 +++++++++++- hatch.toml | 2 +- pyproject.toml | 3 ++- test/asynchronous/conftest.py | 6 ++---- test/conftest.py | 7 ++----- test/pytest_conf.py | 16 ++++++++++++++++ tools/synchro.py | 1 + 7 files changed, 35 insertions(+), 12 deletions(-) create mode 100644 test/pytest_conf.py diff --git a/green_framework_test.py b/green_framework_test.py index 65025798cf..037d0279c3 100644 --- a/green_framework_test.py +++ b/green_framework_test.py @@ -60,8 +60,18 @@ def run(framework_name, *args): # Monkey-patch. FRAMEWORKS[framework_name]() + arg_list = list(args) + + # Never run async tests with a framework + if len(arg_list) <= 1: + arg_list.extend(["-m", "not default_async and default"]) + else: + for i in range(len(arg_list) - 1): + if "-m" in arg_list[i]: + arg_list[i + 1] = f"not default_async and {arg_list[i + 1]}" + # Run the tests. - sys.exit(pytest.main(list(args))) + sys.exit(pytest.main(arg_list)) def main(): diff --git a/hatch.toml b/hatch.toml index 25d113e5da..8b1cf93e32 100644 --- a/hatch.toml +++ b/hatch.toml @@ -41,7 +41,7 @@ features = ["test"] [envs.test.scripts] test = "pytest -v --durations=5 --maxfail=10 {args}" test-eg = "bash ./.evergreen/run-tests.sh {args}" -test-async = "test test/asynchronous/ {args}" +test-async = "pytest -v --durations=5 --maxfail=10 -m default_async {args}" test-mockupdb = ["pip install -U git+https://github.com/ajdavis/mongo-mockup-db@master", "test -m mockupdb"] [envs.encryption] diff --git a/pyproject.toml b/pyproject.toml index cfd994f563..4380b57e8d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ zstd = ["requirements/zstd.txt"] [tool.pytest.ini_options] minversion = "7" -addopts = ["-ra", "--strict-config", "--strict-markers", "--junitxml=xunit-results/TEST-results.xml", "-m default"] +addopts = ["-ra", "--strict-config", "--strict-markers", "--junitxml=xunit-results/TEST-results.xml", "-m default or default_async"] testpaths = ["test"] log_cli_level = "INFO" faulthandler_timeout = 1500 @@ -108,6 +108,7 @@ markers = [ "load_balancer: load balancer tests", "mockupdb: tests that rely on mockupdb", "default: default test suite", + "default_async: default async test suite", ] [tool.mypy] diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py index 398ba4265c..f5bcd953a4 100644 --- a/test/asynchronous/conftest.py +++ b/test/asynchronous/conftest.py @@ -1,5 +1,6 @@ from __future__ import annotations +from test import pytest_conf from test.asynchronous import async_setup, async_teardown import pytest_asyncio @@ -14,7 +15,4 @@ async def test_setup_and_teardown(): await async_teardown() -def pytest_collection_modifyitems(items, config): - for item in items: - if not any(item.iter_markers()): - item.add_marker("default") +pytest_collection_modifyitems = pytest_conf.pytest_collection_modifyitems diff --git a/test/conftest.py b/test/conftest.py index 39d29355b7..431dd152fe 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,6 +1,6 @@ from __future__ import annotations -from test import setup, teardown +from test import pytest_conf, setup, teardown import pytest @@ -14,7 +14,4 @@ def test_setup_and_teardown(): teardown() -def pytest_collection_modifyitems(items, config): - for item in items: - if not any(item.iter_markers()): - item.add_marker("default") +pytest_collection_modifyitems = pytest_conf.pytest_collection_modifyitems diff --git a/test/pytest_conf.py b/test/pytest_conf.py new file mode 100644 index 0000000000..75f3e74322 --- /dev/null +++ b/test/pytest_conf.py @@ -0,0 +1,16 @@ +from __future__ import annotations + + +def pytest_collection_modifyitems(items, config): + sync_items = [] + async_items = [ + item + for item in items + if "asynchronous" in item.fspath.dirname or sync_items.append(item) # type: ignore[func-returns-value] + ] + for item in async_items: + if not any(item.iter_markers()): + item.add_marker("default_async") + for item in sync_items: + if not any(item.iter_markers()): + item.add_marker("default") diff --git a/tools/synchro.py b/tools/synchro.py index e0af50229f..5711e1f84d 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -95,6 +95,7 @@ "aclose": "close", "async-transactions-ref": "transactions-ref", "async-snapshot-reads-ref": "snapshot-reads-ref", + "default_async": "default", } docstring_replacements: dict[tuple[str, str], str] = { From 47b225702869a34e81b827036d4d9d1e77d1a532 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:59:44 -0700 Subject: [PATCH 1394/2111] PYTHON-4641 Fix failure in async version of client bulk CSOT test (#1790) --- test/asynchronous/test_client_bulk_write.py | 3 ++- test/test_client_bulk_write.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 7ce989bbe7..4fe4fce2db 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -523,7 +523,7 @@ async def test_returns_error_if_auto_encryption_configured(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites -class TestClientBulkWriteTimeout(AsyncIntegrationTest): +class TestClientBulkWriteCSOT(AsyncIntegrationTest): async def asyncSetUp(self): self.max_write_batch_size = await async_client_context.max_write_batch_size self.max_bson_object_size = await async_client_context.max_bson_size @@ -567,6 +567,7 @@ async def test_timeout_in_multi_batch_bulk_write(self): w="majority", ) self.addAsyncCleanup(client.aclose) + await client.admin.command("ping") # Init the client first. with self.assertRaises(ClientBulkWriteException) as context: await client.bulk_write(models=models) self.assertIsInstance(context.exception.error, NetworkTimeout) diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 6399908804..8f6aad0cfa 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -523,7 +523,7 @@ def test_returns_error_if_auto_encryption_configured(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites -class TestClientBulkWriteTimeout(IntegrationTest): +class TestClientBulkWriteCSOT(IntegrationTest): def setUp(self): self.max_write_batch_size = client_context.max_write_batch_size self.max_bson_object_size = client_context.max_bson_size @@ -567,6 +567,7 @@ def test_timeout_in_multi_batch_bulk_write(self): w="majority", ) self.addCleanup(client.close) + client.admin.command("ping") # Init the client first. with self.assertRaises(ClientBulkWriteException) as context: client.bulk_write(models=models) self.assertIsInstance(context.exception.error, NetworkTimeout) From f69d330b25db33272fcccbe762668c1a7e8833ab Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 13 Aug 2024 19:17:45 -0500 Subject: [PATCH 1395/2111] PYTHON-4654 Clean up Async API to match Motor (#1789) --- doc/api/pymongo/asynchronous/mongo_client.rst | 2 +- pymongo/asynchronous/collection.py | 6 +- pymongo/asynchronous/encryption.py | 4 +- pymongo/asynchronous/mongo_client.py | 8 +- pymongo/synchronous/mongo_client.py | 4 + test/asynchronous/__init__.py | 6 +- test/asynchronous/test_client.py | 80 +++++++++---------- test/asynchronous/test_client_bulk_write.py | 32 ++++---- test/asynchronous/test_collection.py | 2 +- test/asynchronous/test_cursor.py | 40 +++++----- test/asynchronous/test_database.py | 2 +- test/asynchronous/test_session.py | 16 ++-- test/asynchronous/test_transactions.py | 23 +++--- test/asynchronous/utils_spec_runner.py | 2 +- test/test_cursor.py | 14 ++-- test/test_session.py | 2 +- test/test_transactions.py | 5 +- tools/synchro.py | 1 - 18 files changed, 128 insertions(+), 121 deletions(-) diff --git a/doc/api/pymongo/asynchronous/mongo_client.rst b/doc/api/pymongo/asynchronous/mongo_client.rst index 57aa33e3c6..afbd802ff2 100644 --- a/doc/api/pymongo/asynchronous/mongo_client.rst +++ b/doc/api/pymongo/asynchronous/mongo_client.rst @@ -6,7 +6,7 @@ .. autoclass:: pymongo.asynchronous.mongo_client.AsyncMongoClient(host='localhost', port=27017, document_class=dict, tz_aware=False, connect=True, **kwargs) - .. automethod:: aclose + .. automethod:: close .. describe:: c[db_name] || c.db_name diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index e634b449f4..e5a54c0904 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -1893,9 +1893,7 @@ def find(self, *args: Any, **kwargs: Any) -> AsyncCursor[_DocumentType]: """ return AsyncCursor(self, *args, **kwargs) - async def find_raw_batches( - self, *args: Any, **kwargs: Any - ) -> AsyncRawBatchCursor[_DocumentType]: + def find_raw_batches(self, *args: Any, **kwargs: Any) -> AsyncRawBatchCursor[_DocumentType]: """Query the database and retrieve batches of raw BSON. Similar to the :meth:`find` method but returns a @@ -1907,7 +1905,7 @@ async def find_raw_batches( :mod:`bson` module. >>> import bson - >>> cursor = await db.test.find_raw_batches() + >>> cursor = db.test.find_raw_batches() >>> async for batch in cursor: ... print(bson.decode_all(batch)) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index f8c03b21cc..8b63525f21 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -299,7 +299,7 @@ async def close(self) -> None: self.client_ref = None self.key_vault_coll = None if self.mongocryptd_client: - await self.mongocryptd_client.aclose() + await self.mongocryptd_client.close() self.mongocryptd_client = None @@ -439,7 +439,7 @@ async def close(self) -> None: self._closed = True await self._auto_encrypter.close() if self._internal_client: - await self._internal_client.aclose() + await self._internal_client.close() self._internal_client = None diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 90e40978a2..fbbd9a4eed 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -1378,7 +1378,7 @@ async def __aenter__(self) -> AsyncMongoClient[_DocumentType]: return self async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - await self.aclose() + await self.close() # See PYTHON-3084. __iter__ = None @@ -1514,7 +1514,7 @@ async def _end_sessions(self, session_ids: list[_ServerSession]) -> None: # command. pass - async def aclose(self) -> None: + async def close(self) -> None: """Cleanup client resources and disconnect from MongoDB. End all server sessions created by this client by sending one or more @@ -1541,6 +1541,10 @@ async def aclose(self) -> None: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. await self._encrypter.close() + if not _IS_SYNC: + # Add support for contextlib.aclosing. + aclose = close + async def _get_topology(self) -> Topology: """Get the internal :class:`~pymongo.asynchronous.topology.Topology` object. diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 41b4db4f18..1863165625 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1536,6 +1536,10 @@ def close(self) -> None: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. self._encrypter.close() + if not _IS_SYNC: + # Add support for contextlib.closing. + aclose = close + def _get_topology(self) -> Topology: """Get the internal :class:`~pymongo.topology.Topology` object. diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index a95a9e31b0..900b260c2e 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -193,7 +193,7 @@ async def _connect(self, host, port, **kwargs): self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") return None finally: - await client.aclose() + await client.close() async def _init_client(self): self.client = await self._connect(host, port) @@ -409,7 +409,7 @@ async def _check_user_provided(self): else: raise finally: - await client.aclose() + await client.close() def _server_started_with_auth(self): # MongoDB >= 2.0 @@ -1089,7 +1089,7 @@ async def async_teardown(): await c.drop_database("pymongo_test2") await c.drop_database("pymongo_test_mike") await c.drop_database("pymongo_test_bernie") - await c.aclose() + await c.close() print_running_clients() diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index fa526b3eae..9489de1563 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -137,7 +137,7 @@ async def _setup_class(cls): @classmethod async def _tearDown_class(cls): - await cls.client.aclose() + await cls.client.close() @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): @@ -663,7 +663,7 @@ async def test_max_idle_time_reaper_default(self): pass self.assertEqual(1, len(server._pool.conns)) self.assertTrue(conn in server._pool.conns) - await client.aclose() + await client.close() async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): @@ -679,7 +679,7 @@ async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): self.assertGreaterEqual(len(server._pool.conns), 1) wait_until(lambda: conn not in server._pool.conns, "remove stale socket") wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") - await client.aclose() + await client.close() async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): @@ -697,7 +697,7 @@ async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): self.assertEqual(1, len(server._pool.conns)) wait_until(lambda: conn not in server._pool.conns, "remove stale socket") wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") - await client.aclose() + await client.close() async def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): @@ -717,7 +717,7 @@ async def test_max_idle_time_reaper_removes_stale(self): lambda: len(server._pool.conns) == 0, "stale socket reaped and new one NOT added to the pool", ) - await client.aclose() + await client.close() async def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=0.1): @@ -845,13 +845,13 @@ async def test_init_disconnected_with_auth(self): async def test_equality(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = await async_rs_or_single_client(seed, connect=False) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) self.assertEqual(async_client_context.client, c) # Explicitly test inequality self.assertFalse(async_client_context.client != c) c = await async_rs_or_single_client("invalid.com", connect=False) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) self.assertNotEqual(async_client_context.client, c) self.assertTrue(async_client_context.client != c) # Seeds differ: @@ -867,10 +867,10 @@ async def test_equality(self): async def test_hashable(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = await async_rs_or_single_client(seed, connect=False) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) self.assertIn(c, {async_client_context.client}) c = await async_rs_or_single_client("invalid.com", connect=False) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) self.assertNotIn(c, {async_client_context.client}) async def test_host_w_port(self): @@ -940,7 +940,7 @@ async def test_list_databases(self): self.assertIs(type(helper_doc), dict) self.assertEqual(helper_doc.keys(), cmd_doc.keys()) client = await async_rs_or_single_client(document_class=SON) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) async for doc in await client.list_databases(): self.assertIs(type(doc), dict) @@ -991,7 +991,7 @@ async def test_drop_database(self): async def test_close(self): test_client = await async_rs_or_single_client() coll = test_client.pymongo_test.bar - await test_client.aclose() + await test_client.close() with self.assertRaises(InvalidOperation): await coll.count_documents({}) @@ -1024,7 +1024,7 @@ async def test_close_kills_cursors(self): # Close the client and ensure the topology is closed. self.assertTrue(test_client._topology._opened) - await test_client.aclose() + await test_client.close() self.assertFalse(test_client._topology._opened) test_client = await async_rs_or_single_client() # The killCursors task should not need to re-open the topology. @@ -1037,7 +1037,7 @@ async def test_close_stops_kill_cursors_thread(self): self.assertFalse(client._kill_cursors_executor._stopped) # Closing the client should stop the thread. - await client.aclose() + await client.close() self.assertTrue(client._kill_cursors_executor._stopped) # Reusing the closed client should raise an InvalidOperation error. @@ -1062,21 +1062,21 @@ async def test_uri_connect_option(self): self.assertTrue(kc_thread and kc_thread.is_alive()) # Tear down. - await client.aclose() + await client.close() async def test_close_does_not_open_servers(self): client = await async_rs_client(connect=False) topology = client._topology self.assertEqual(topology._servers, {}) - await client.aclose() + await client.close() self.assertEqual(topology._servers, {}) async def test_close_closes_sockets(self): client = await async_rs_client() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.test.test.find_one() topology = client._topology - await client.aclose() + await client.close() for server in topology._servers.values(): self.assertFalse(server._pool.conns) self.assertTrue(server._monitor._executor._stopped) @@ -1181,7 +1181,7 @@ async def test_unix_socket(self): uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. client = await async_rs_or_single_client(uri) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = await client.list_database_names() self.assertTrue("pymongo_test" in dbs) @@ -1206,7 +1206,7 @@ async def test_document_class(self): self.assertFalse(isinstance(await db.test.find_one(), SON)) c = await async_rs_or_single_client(document_class=SON) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) @@ -1248,7 +1248,7 @@ async def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = await async_rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) - self.addAsyncCleanup(timeout.aclose) + self.addAsyncCleanup(timeout.close) await no_timeout.pymongo_test.drop_collection("test") await no_timeout.pymongo_test.test.insert_one({"x": 1}) @@ -1310,7 +1310,7 @@ async def test_tz_aware(self): self.assertRaises(ValueError, AsyncMongoClient, tz_aware="foo") aware = await async_rs_or_single_client(tz_aware=True) - self.addAsyncCleanup(aware.aclose) + self.addAsyncCleanup(aware.close) naive = self.client await aware.pymongo_test.drop_collection("test") @@ -1340,7 +1340,7 @@ async def test_ipv6(self): uri += "/?replicaSet=" + (async_client_context.replica_set_name or "") client = await async_rs_or_single_client_noauth(uri) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.pymongo_test.test.insert_one({"dummy": "object"}) await client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) @@ -1442,7 +1442,7 @@ async def test_operation_failure(self): # to avoid race conditions caused by replica set failover or idle # socket reaping. client = await async_single_client() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.pymongo_test.test.find_one() pool = await async_get_pool(client) socket_count = len(pool.conns) @@ -1467,7 +1467,7 @@ async def test_lazy_connect_w0(self): self.addAsyncCleanup(async_client_context.client.drop_database, "test_lazy_connect_w0") client = await async_rs_or_single_client(connect=False, w=0) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.test_lazy_connect_w0.test.insert_one({}) async def predicate(): @@ -1476,7 +1476,7 @@ async def predicate(): await async_wait_until(predicate, "find one document") client = await async_rs_or_single_client(connect=False, w=0) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) async def predicate(): @@ -1485,7 +1485,7 @@ async def predicate(): await async_wait_until(predicate, "update one document") client = await async_rs_or_single_client(connect=False, w=0) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.test_lazy_connect_w0.test.delete_one({}) async def predicate(): @@ -1498,7 +1498,7 @@ async def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = await async_rs_or_single_client(maxPoolSize=1, retryReads=False) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.pymongo_test.test pool = await async_get_pool(client) pool._check_interval_seconds = None # Never check. @@ -1611,7 +1611,7 @@ def init(self, *args): # closer to 0.5 sec with heartbeatFrequencyMS configured. self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) - await client.aclose() + await client.close() finally: ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore @@ -1709,7 +1709,7 @@ def compression_settings(client): async def test_reset_during_update_pool(self): client = await async_rs_or_single_client(minPoolSize=10) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.admin.command("ping") pool = await async_get_pool(client) generation = pool.gen.get_overall() @@ -1758,7 +1758,7 @@ async def test_background_connections_do_not_hold_locks(self): client = await async_rs_or_single_client( serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False ) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) # Create a single connection in the pool. await client.admin.command("ping") @@ -1793,7 +1793,7 @@ async def test_direct_connection(self): await client.admin.command("ping") self.assertEqual(len(client.nodes), 1) self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) - await client.aclose() + await client.close() # direct_connection=False should result in RS topology. client = await async_rs_or_single_client(directConnection=False) @@ -1803,7 +1803,7 @@ async def test_direct_connection(self): client._topology_settings.get_topology_type(), [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], ) - await client.aclose() + await client.close() # directConnection=True, should error with multiple hosts as a list. with self.assertRaises(ConfigurationError): @@ -1827,7 +1827,7 @@ def server_description_count(): "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 ) initial_count = server_description_count() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) with self.assertRaises(ServerSelectionTimeoutError): await client.test.test.find_one() gc.collect() @@ -1841,7 +1841,7 @@ def server_description_count(): @async_client_context.require_failCommand_fail_point async def test_network_error_message(self): client = await async_single_client(retryReads=False) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.admin.command("ping") # connect async with self.fail_point( {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} @@ -1860,7 +1860,7 @@ async def test_process_periodic_tasks(self): await cursor.next() c_id = cursor.cursor_id self.assertIsNotNone(c_id) - await client.aclose() + await client.close() # Add cursor to kill cursors queue del cursor wait_until( @@ -2315,7 +2315,7 @@ async def test_discover_primary(self): replicaSet="rs", heartbeatFrequencyMS=500, ) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) wait_until(lambda: len(c.nodes) == 3, "connect") @@ -2342,7 +2342,7 @@ async def test_reconnect(self): retryReads=False, serverSelectionTimeoutMS=1000, ) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) wait_until(lambda: len(c.nodes) == 3, "connect") @@ -2377,7 +2377,7 @@ async def _test_network_error(self, operation_callback): retryReads=False, serverSelectionTimeoutMS=1000, ) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) # Set host-specific information so we can test whether it is reset. c.set_wire_version_range("a:1", 2, 6) @@ -2453,7 +2453,7 @@ async def test_rs_client_does_not_maintain_pool_to_arbiters(self): minPoolSize=1, # minPoolSize event_listeners=[listener], ) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(await c.address, ("a", 1)) @@ -2483,7 +2483,7 @@ async def test_direct_client_maintains_pool_to_arbiter(self): minPoolSize=1, # minPoolSize event_listeners=[listener], ) - self.addAsyncCleanup(c.aclose) + self.addAsyncCleanup(c.close) wait_until(lambda: len(c.nodes) == 1, "connect") self.assertEqual(await c.address, ("c", 3)) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 4fe4fce2db..eea0b4e8e0 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -43,7 +43,7 @@ class TestClientBulkWrite(AsyncIntegrationTest): @async_client_context.require_version_min(8, 0, 0, -24) async def test_returns_error_if_no_namespace_provided(self): client = await async_rs_or_single_client() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -65,7 +65,7 @@ async def asyncSetUp(self): async def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) models = [] for _ in range(self.max_write_batch_size + 1): @@ -90,7 +90,7 @@ async def test_batch_splits_if_num_operations_too_large(self): async def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) models = [] num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) @@ -126,7 +126,7 @@ async def test_collects_write_concern_errors_across_batches(self): event_listeners=[listener], retryWrites=False, ) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) fail_command = { "configureFailPoint": "failCommand", @@ -165,7 +165,7 @@ async def test_collects_write_concern_errors_across_batches(self): async def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -195,7 +195,7 @@ async def test_collects_write_errors_across_batches_unordered(self): async def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -225,7 +225,7 @@ async def test_collects_write_errors_across_batches_ordered(self): async def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -266,7 +266,7 @@ async def test_handles_cursor_requiring_getMore(self): async def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -309,7 +309,7 @@ async def test_handles_cursor_requiring_getMore_within_transaction(self): async def test_handles_getMore_error(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -363,7 +363,7 @@ async def test_handles_getMore_error(self): async def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) b_repeated = "b" * self.max_bson_object_size @@ -419,7 +419,7 @@ async def _setup_namespace_test_models(self): async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) num_models, models = await self._setup_namespace_test_models() models.append( @@ -450,7 +450,7 @@ async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): async def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) num_models, models = await self._setup_namespace_test_models() c_repeated = "c" * 200 @@ -487,7 +487,7 @@ async def test_batch_splits_if_new_namespace_is_too_large(self): @async_client_context.require_version_min(8, 0, 0, -24) async def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = await async_rs_or_single_client() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) # Document too large. b_repeated = "b" * self.max_message_size_bytes @@ -512,7 +512,7 @@ async def test_returns_error_if_auto_encryption_configured(self): kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, ) client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -535,7 +535,7 @@ async def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 internal_client = await async_rs_or_single_client(timeoutMS=None) - self.addAsyncCleanup(internal_client.aclose) + self.addAsyncCleanup(internal_client.close) collection = internal_client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -566,7 +566,7 @@ async def test_timeout_in_multi_batch_bulk_write(self): timeoutMS=2000, w="majority", ) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) await client.admin.command("ping") # Init the client first. with self.assertRaises(ClientBulkWriteException) as context: await client.bulk_write(models=models) diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 90a53e8b8a..10d64a525c 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -1820,7 +1820,7 @@ async def test_exhaust(self): await self.db.test.insert_many([{"i": i} for i in range(150)]) client = await async_rs_or_single_client(maxPoolSize=1) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) pool = await async_get_pool(client) # Make sure the socket is returned after exhaustion. diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index d6d56244f7..9dd67f2dae 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -354,7 +354,7 @@ async def test_explain_with_read_concern(self): # Do not add readConcern level to explain. listener = AllowListEventListener("explain") client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(await coll.find().explain()) started = listener.started_events @@ -1262,7 +1262,7 @@ async def test_close_kills_cursor_synchronously(self): listener = AllowListEventListener("killCursors") client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client[self.db.name].test_close_kills_cursors # Add some test data. @@ -1301,7 +1301,7 @@ def assertCursorKilled(): async def test_timeout_kills_cursor_asynchronously(self): listener = AllowListEventListener("killCursors") client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client[self.db.name].test_timeout_kills_cursor # Add some test data. @@ -1359,7 +1359,7 @@ def test_delete_not_initialized(self): async def test_getMore_does_not_send_readPreference(self): listener = AllowListEventListener("find", "getMore") client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) # We never send primary read preference so override the default. coll = client[self.db.name].get_collection( "test", read_preference=ReadPreference.PRIMARY_PREFERRED @@ -1424,7 +1424,7 @@ async def test_find_raw(self): await c.drop() docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] await c.insert_many(docs) - batches = await (await c.find_raw_batches()).sort("_id").to_list() + batches = await c.find_raw_batches().sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1440,7 +1440,7 @@ async def test_find_raw_transaction(self): async with client.start_session() as session: async with await session.start_transaction(): batches = await ( - (await client[self.db.name].test.find_raw_batches(session=session)).sort("_id") + client[self.db.name].test.find_raw_batches(session=session).sort("_id") ).to_list() cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "find") @@ -1470,9 +1470,7 @@ async def test_find_raw_retryable_reads(self): async with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} ): - batches = ( - await (await client[self.db.name].test.find_raw_batches()).sort("_id").to_list() - ) + batches = await client[self.db.name].test.find_raw_batches().sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1493,7 +1491,7 @@ async def test_find_raw_snapshot_reads(self): db = client[self.db.name] async with client.start_session(snapshot=True) as session: await db.test.distinct("x", {}, session=session) - batches = await (await db.test.find_raw_batches(session=session)).sort("_id").to_list() + batches = await db.test.find_raw_batches(session=session).sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1504,18 +1502,18 @@ async def test_find_raw_snapshot_reads(self): async def test_explain(self): c = self.db.test await c.insert_one({}) - explanation = await (await c.find_raw_batches()).explain() + explanation = await c.find_raw_batches().explain() self.assertIsInstance(explanation, dict) async def test_empty(self): await self.db.test.drop() - cursor = await self.db.test.find_raw_batches() + cursor = self.db.test.find_raw_batches() with self.assertRaises(StopAsyncIteration): await anext(cursor) async def test_clone(self): await self.db.test.insert_one({}) - cursor = await self.db.test.find_raw_batches() + cursor = self.db.test.find_raw_batches() # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. self.assertIsInstance(await anext(cursor.clone()), bytes) self.assertIsInstance(await anext(copy.copy(cursor)), bytes) @@ -1525,24 +1523,22 @@ async def test_exhaust(self): c = self.db.test await c.drop() await c.insert_many({"_id": i} for i in range(200)) - result = b"".join( - await (await c.find_raw_batches(cursor_type=CursorType.EXHAUST)).to_list() - ) + result = b"".join(await c.find_raw_batches(cursor_type=CursorType.EXHAUST).to_list()) self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) async def test_server_error(self): with self.assertRaises(OperationFailure) as exc: - await anext(await self.db.test.find_raw_batches({"x": {"$bad": 1}})) + await anext(self.db.test.find_raw_batches({"x": {"$bad": 1}})) # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) async def test_get_item(self): with self.assertRaises(InvalidOperation): - (await self.db.test.find_raw_batches())[0] + self.db.test.find_raw_batches()[0] async def test_collation(self): - await anext(await self.db.test.find_raw_batches(collation=Collation("en_US"))) + await anext(self.db.test.find_raw_batches(collation=Collation("en_US"))) @async_client_context.require_no_mmap # MMAPv1 does not support read concern async def test_read_concern(self): @@ -1550,7 +1546,7 @@ async def test_read_concern(self): {} ) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) - await anext(await c.find_raw_batches()) + await anext(c.find_raw_batches()) async def test_monitoring(self): listener = EventListener() @@ -1560,7 +1556,7 @@ async def test_monitoring(self): await c.insert_many([{"_id": i} for i in range(10)]) listener.reset() - cursor = await c.find_raw_batches(batch_size=4) + cursor = c.find_raw_batches(batch_size=4) # First raw batch of 4 documents. await anext(cursor) @@ -1766,7 +1762,7 @@ async def test_monitoring(self): async def test_exhaust_cursor_db_set(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) c = client.pymongo_test.test await c.delete_many({}) await c.insert_many([{"_id": i} for i in range(3)]) diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index c20a74d3db..8f6886a2a7 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -236,7 +236,7 @@ async def test_list_collection_names_filter(self): async def test_check_exists(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) db = client[self.db.name] await db.drop_collection("unique") await db.create_collection("unique", check_exists=True) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index b8387b7f6a..1e1f5659ba 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -99,7 +99,7 @@ async def _setup_class(cls): @classmethod async def _tearDown_class(cls): monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) - await cls.client2.aclose() + await cls.client2.close() await super()._tearDown_class() async def asyncSetUp(self): @@ -108,7 +108,7 @@ async def asyncSetUp(self): self.client = await async_rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener] ) - self.addAsyncCleanup(self.client.aclose) + self.addAsyncCleanup(self.client.close) self.db = self.client.pymongo_test self.initial_lsids = {s["id"] for s in session_ids(self.client)} @@ -295,14 +295,14 @@ async def test_end_sessions(self): # Closing the client should end all sessions and clear the pool. self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) - await client.aclose() + await client.close() self.assertEqual(len(client._topology._session_pool), 0) end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. listener.reset() - await client.aclose() + await client.close() self.assertEqual(len(listener.started_events), 0) async def test_client(self): @@ -790,7 +790,7 @@ async def test_unacknowledged_writes(self): # Ensure the collection exists. await self.client.pymongo_test.test_unacked_writes.insert_one({}) client = await async_rs_or_single_client(w=0, event_listeners=[self.listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) db = client.pymongo_test coll = db.test_unacked_writes ops: list = [ @@ -842,7 +842,7 @@ async def _setup_class(cls): @classmethod async def _tearDown_class(cls): - await cls.client.aclose() + await cls.client.close() @async_client_context.require_sessions async def asyncSetUp(self): @@ -927,7 +927,7 @@ async def aggregate_raw(coll, session): return await (await coll.aggregate_raw_batches([], session=session)).to_list() async def find_raw(coll, session): - return await (await coll.find_raw_batches({}, session=session)).to_list() + return await coll.find_raw_batches({}, session=session).to_list() await self._test_reads(aggregate) await self._test_reads(lambda coll, session: coll.find({}, session=session).to_list()) @@ -1156,7 +1156,7 @@ async def test_cluster_time(self): client = await async_rs_or_single_client( event_listeners=[listener], heartbeatFrequencyMS=999999 ) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). await collection.insert_many([{} for _ in range(10)]) diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 373fda588a..8fa1e70d01 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -74,7 +74,7 @@ async def _setup_class(cls): @classmethod async def _tearDown_class(cls): for client in cls.mongos_clients: - await client.aclose() + await client.close() await super()._tearDown_class() def maybe_skip_scenario(self, test): @@ -121,7 +121,7 @@ def test_transaction_options_validation(self): async def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" client = await async_rs_client(w=0) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) db = client.test coll = db.test await coll.insert_one({}) @@ -183,7 +183,7 @@ async def test_unpin_for_next_transaction(self): coll = client.test.test # Create the collection. await coll.insert_one({}) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) async with client.start_session() as s: # Session is pinned to Mongos. async with await s.start_transaction(): @@ -211,7 +211,7 @@ async def test_unpin_for_non_transaction_operation(self): coll = client.test.test # Create the collection. await coll.insert_one({}) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) async with client.start_session() as s: # Session is pinned to Mongos. async with await s.start_transaction(): @@ -339,7 +339,7 @@ async def test_transaction_starts_with_batched_write(self): coll = client[self.db.name].test await coll.delete_many({}) listener.reset() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) self.addAsyncCleanup(coll.drop) large_str = "\0" * (1 * 1024 * 1024) ops: List[InsertOne[RawBSONDocument]] = [ @@ -365,7 +365,7 @@ async def test_transaction_starts_with_batched_write(self): @async_client_context.require_transactions async def test_transaction_direct_connection(self): client = await async_single_client() - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client.pymongo_test.test # Make sure the collection exists. @@ -375,6 +375,9 @@ async def test_transaction_direct_connection(self): async def find(*args, **kwargs): return coll.find(*args, **kwargs) + async def find_raw_batches(*args, **kwargs): + return coll.find_raw_batches(*args, **kwargs) + ops = [ (coll.bulk_write, [[InsertOne[dict]({})]]), (coll.insert_one, [{}]), @@ -393,7 +396,7 @@ async def find(*args, **kwargs): (coll.aggregate, [[]]), (find, [{}]), (coll.aggregate_raw_batches, [[]]), - (coll.find_raw_batches, [{}]), + (find_raw_batches, [{}]), (coll.database.command, ["find", coll.name]), ] for f, args in ops: @@ -452,7 +455,7 @@ async def callback2(session): async def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() client = await async_rs_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client[self.db.name].test async def callback(session): @@ -481,7 +484,7 @@ async def callback(session): async def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() client = await async_rs_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client[self.db.name].test async def callback(session): @@ -516,7 +519,7 @@ async def callback(session): async def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() client = await async_rs_client(event_listeners=[listener]) - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) coll = client[self.db.name].test async def callback(session): diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index 2e256ec17f..71044d1530 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -535,7 +535,7 @@ async def run_scenario(self, scenario_def, test): self.pool_listener = pool_listener self.server_listener = server_listener # Close the client explicitly to avoid having too many threads open. - self.addAsyncCleanup(client.aclose) + self.addAsyncCleanup(client.close) # Create session0 and session1. sessions = {} diff --git a/test/test_cursor.py b/test/test_cursor.py index 0d61865196..1cde4718f1 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1415,7 +1415,7 @@ def test_find_raw(self): c.drop() docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = (c.find_raw_batches()).sort("_id").to_list() + batches = c.find_raw_batches().sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1431,7 +1431,7 @@ def test_find_raw_transaction(self): with client.start_session() as session: with session.start_transaction(): batches = ( - (client[self.db.name].test.find_raw_batches(session=session)).sort("_id") + client[self.db.name].test.find_raw_batches(session=session).sort("_id") ).to_list() cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "find") @@ -1461,7 +1461,7 @@ def test_find_raw_retryable_reads(self): with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} ): - batches = (client[self.db.name].test.find_raw_batches()).sort("_id").to_list() + batches = client[self.db.name].test.find_raw_batches().sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1482,7 +1482,7 @@ def test_find_raw_snapshot_reads(self): db = client[self.db.name] with client.start_session(snapshot=True) as session: db.test.distinct("x", {}, session=session) - batches = (db.test.find_raw_batches(session=session)).sort("_id").to_list() + batches = db.test.find_raw_batches(session=session).sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1493,7 +1493,7 @@ def test_find_raw_snapshot_reads(self): def test_explain(self): c = self.db.test c.insert_one({}) - explanation = (c.find_raw_batches()).explain() + explanation = c.find_raw_batches().explain() self.assertIsInstance(explanation, dict) def test_empty(self): @@ -1514,7 +1514,7 @@ def test_exhaust(self): c = self.db.test c.drop() c.insert_many({"_id": i} for i in range(200)) - result = b"".join((c.find_raw_batches(cursor_type=CursorType.EXHAUST)).to_list()) + result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST).to_list()) self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) def test_server_error(self): @@ -1526,7 +1526,7 @@ def test_server_error(self): def test_get_item(self): with self.assertRaises(InvalidOperation): - (self.db.test.find_raw_batches())[0] + self.db.test.find_raw_batches()[0] def test_collation(self): next(self.db.test.find_raw_batches(collation=Collation("en_US"))) diff --git a/test/test_session.py b/test/test_session.py index dfc894804a..563b33c70e 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -925,7 +925,7 @@ def aggregate_raw(coll, session): return (coll.aggregate_raw_batches([], session=session)).to_list() def find_raw(coll, session): - return (coll.find_raw_batches({}, session=session)).to_list() + return coll.find_raw_batches({}, session=session).to_list() self._test_reads(aggregate) self._test_reads(lambda coll, session: coll.find({}, session=session).to_list()) diff --git a/test/test_transactions.py b/test/test_transactions.py index 4ee0186475..b1869bec79 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -371,6 +371,9 @@ def test_transaction_direct_connection(self): def find(*args, **kwargs): return coll.find(*args, **kwargs) + def find_raw_batches(*args, **kwargs): + return coll.find_raw_batches(*args, **kwargs) + ops = [ (coll.bulk_write, [[InsertOne[dict]({})]]), (coll.insert_one, [{}]), @@ -389,7 +392,7 @@ def find(*args, **kwargs): (coll.aggregate, [[]]), (find, [{}]), (coll.aggregate_raw_batches, [[]]), - (coll.find_raw_batches, [{}]), + (find_raw_batches, [{}]), (coll.database.command, ["find", coll.name]), ] for f, args in ops: diff --git a/tools/synchro.py b/tools/synchro.py index 5711e1f84d..0c2aff1301 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -92,7 +92,6 @@ "asyncAssertRaisesExactly": "assertRaisesExactly", "get_async_mock_client": "get_mock_client", "aconnect": "_connect", - "aclose": "close", "async-transactions-ref": "transactions-ref", "async-snapshot-reads-ref": "snapshot-reads-ref", "default_async": "default", From f2f75fc1c8716622b7e04fcbbc327dc000afec02 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 13 Aug 2024 18:32:48 -0700 Subject: [PATCH 1396/2111] PYTHON-4659 Fix async with TLS (#1793) --- pymongo/network_layer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index f1c378b9b2..d99b4fee41 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -77,6 +77,7 @@ async def async_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> Non async def _async_sendall_ssl( sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop ) -> None: + view = memoryview(buf) fd = sock.fileno() sent = 0 @@ -89,7 +90,7 @@ def _is_ready(fut: Future) -> None: while sent < len(buf): try: - sent += sock.send(buf) + sent += sock.send(view[sent:]) except BLOCKING_IO_ERRORS as exc: fd = sock.fileno() # Check for closed socket. From adf8817df8f3fd47d82cf4c5e3bd4e93cfdfb602 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 14 Aug 2024 13:13:56 -0500 Subject: [PATCH 1397/2111] PYTHON-4584 Add length option to Cursor.to_list for motor compat (#1791) --- gridfs/asynchronous/grid_file.py | 12 ++++++++++-- gridfs/synchronous/grid_file.py | 12 ++++++++++-- pymongo/asynchronous/command_cursor.py | 27 ++++++++++++++++++++------ pymongo/asynchronous/cursor.py | 27 ++++++++++++++++++++------ pymongo/synchronous/command_cursor.py | 27 ++++++++++++++++++++------ pymongo/synchronous/cursor.py | 27 ++++++++++++++++++++------ test/asynchronous/test_cursor.py | 27 ++++++++++++++++++++++++++ test/test_cursor.py | 27 ++++++++++++++++++++++++++ test/test_gridfs.py | 6 ++++++ 9 files changed, 164 insertions(+), 28 deletions(-) diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index 303abe7059..4d6140750e 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -1892,8 +1892,16 @@ async def next(self) -> AsyncGridOut: next_file = await super().next() return AsyncGridOut(self._root_collection, file_document=next_file, session=self.session) - async def to_list(self) -> list[AsyncGridOut]: - return [x async for x in self] # noqa: C416,RUF100 + async def to_list(self, length: Optional[int] = None) -> list[AsyncGridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x async for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(await self.next()) + return ret __anext__ = next diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index 1e3d265d4b..bc2e29a61d 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -1878,8 +1878,16 @@ def next(self) -> GridOut: next_file = super().next() return GridOut(self._root_collection, file_document=next_file, session=self.session) - def to_list(self) -> list[GridOut]: - return [x for x in self] # noqa: C416,RUF100 + def to_list(self, length: Optional[int] = None) -> list[GridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(self.next()) + return ret __next__ = next diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py index b28f983b12..b2cd345f63 100644 --- a/pymongo/asynchronous/command_cursor.py +++ b/pymongo/asynchronous/command_cursor.py @@ -346,13 +346,17 @@ async def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: else: return None - async def _next_batch(self, result: list) -> bool: - """Get all available documents from the cursor.""" + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + """Get all or some available documents from the cursor.""" if not len(self._data) and not self._killed: await self._refresh() if len(self._data): - result.extend(self._data) - self._data.clear() + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) return True else: return False @@ -381,21 +385,32 @@ async def __aenter__(self) -> AsyncCommandCursor[_DocumentType]: async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: await self.close() - async def to_list(self) -> list[_DocumentType]: + async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. To use:: >>> await cursor.to_list() + Or, so read at most n items from the cursor:: + + >>> await cursor.to_list(n) + If the cursor is empty or has no more results, an empty list will be returned. .. versionadded:: 4.9 """ res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") while self.alive: - if not await self._next_batch(res): + if not await self._next_batch(res, remaining): break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break return res diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 8421667bec..bae77bb304 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -1260,16 +1260,20 @@ async def next(self) -> _DocumentType: else: raise StopAsyncIteration - async def _next_batch(self, result: list) -> bool: - """Get all available documents from the cursor.""" + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + """Get all or some documents from the cursor.""" if not self._exhaust_checked: self._exhaust_checked = True await self._supports_exhaust() if self._empty: return False if len(self._data) or await self._refresh(): - result.extend(self._data) - self._data.clear() + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) return True else: return False @@ -1286,21 +1290,32 @@ async def __aenter__(self) -> AsyncCursor[_DocumentType]: async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: await self.close() - async def to_list(self) -> list[_DocumentType]: + async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. To use:: >>> await cursor.to_list() + Or, so read at most n items from the cursor:: + + >>> await cursor.to_list(n) + If the cursor is empty or has no more results, an empty list will be returned. .. versionadded:: 4.9 """ res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") while self.alive: - if not await self._next_batch(res): + if not await self._next_batch(res, remaining): break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break return res diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index 86fa69dcb6..da05bf1a3b 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -346,13 +346,17 @@ def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: else: return None - def _next_batch(self, result: list) -> bool: - """Get all available documents from the cursor.""" + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + """Get all or some available documents from the cursor.""" if not len(self._data) and not self._killed: self._refresh() if len(self._data): - result.extend(self._data) - self._data.clear() + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) return True else: return False @@ -381,21 +385,32 @@ def __enter__(self) -> CommandCursor[_DocumentType]: def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def to_list(self) -> list[_DocumentType]: + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. To use:: >>> cursor.to_list() + Or, so read at most n items from the cursor:: + + >>> cursor.to_list(n) + If the cursor is empty or has no more results, an empty list will be returned. .. versionadded:: 4.9 """ res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") while self.alive: - if not self._next_batch(res): + if not self._next_batch(res, remaining): break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break return res diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 1595ce40b9..c352b64098 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -1258,16 +1258,20 @@ def next(self) -> _DocumentType: else: raise StopIteration - def _next_batch(self, result: list) -> bool: - """Get all available documents from the cursor.""" + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + """Get all or some documents from the cursor.""" if not self._exhaust_checked: self._exhaust_checked = True self._supports_exhaust() if self._empty: return False if len(self._data) or self._refresh(): - result.extend(self._data) - self._data.clear() + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) return True else: return False @@ -1284,21 +1288,32 @@ def __enter__(self) -> Cursor[_DocumentType]: def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() - def to_list(self) -> list[_DocumentType]: + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. To use:: >>> cursor.to_list() + Or, so read at most n items from the cursor:: + + >>> cursor.to_list(n) + If the cursor is empty or has no more results, an empty list will be returned. .. versionadded:: 4.9 """ res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") while self.alive: - if not self._next_batch(res): + if not self._next_batch(res, remaining): break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break return res diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 9dd67f2dae..6967205fe3 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1401,6 +1401,20 @@ async def test_to_list_empty(self): docs = await c.to_list() self.assertEqual([], docs) + async def test_to_list_length(self): + coll = self.db.test + await coll.insert_many([{} for _ in range(5)]) + self.addCleanup(coll.drop) + c = coll.find() + docs = await c.to_list(3) + self.assertEqual(len(docs), 3) + + c = coll.find(batch_size=2) + docs = await c.to_list(3) + self.assertEqual(len(docs), 3) + docs = await c.to_list(3) + self.assertEqual(len(docs), 2) + @async_client_context.require_change_streams async def test_command_cursor_to_list(self): # Set maxAwaitTimeMS=1 to speed up the test. @@ -1417,6 +1431,19 @@ async def test_command_cursor_to_list_empty(self): docs = await c.to_list() self.assertEqual([], docs) + @async_client_context.require_change_streams + async def test_command_cursor_to_list_length(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"foo": 1}, {"foo": 2}]) + + pipeline = {"$project": {"_id": False, "foo": True}} + result = await db.test.aggregate([pipeline]) + self.assertEqual(len(await result.to_list()), 2) + + result = await db.test.aggregate([pipeline]) + self.assertEqual(len(await result.to_list(1)), 1) + class TestRawBatchCursor(AsyncIntegrationTest): async def test_find_raw(self): diff --git a/test/test_cursor.py b/test/test_cursor.py index 1cde4718f1..8e6fade1ec 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1392,6 +1392,20 @@ def test_to_list_empty(self): docs = c.to_list() self.assertEqual([], docs) + def test_to_list_length(self): + coll = self.db.test + coll.insert_many([{} for _ in range(5)]) + self.addCleanup(coll.drop) + c = coll.find() + docs = c.to_list(3) + self.assertEqual(len(docs), 3) + + c = coll.find(batch_size=2) + docs = c.to_list(3) + self.assertEqual(len(docs), 3) + docs = c.to_list(3) + self.assertEqual(len(docs), 2) + @client_context.require_change_streams def test_command_cursor_to_list(self): # Set maxAwaitTimeMS=1 to speed up the test. @@ -1408,6 +1422,19 @@ def test_command_cursor_to_list_empty(self): docs = c.to_list() self.assertEqual([], docs) + @client_context.require_change_streams + def test_command_cursor_to_list_length(self): + db = self.db + db.drop_collection("test") + db.test.insert_many([{"foo": 1}, {"foo": 2}]) + + pipeline = {"$project": {"_id": False, "foo": True}} + result = db.test.aggregate([pipeline]) + self.assertEqual(len(result.to_list()), 2) + + result = db.test.aggregate([pipeline]) + self.assertEqual(len(result.to_list(1)), 1) + class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 27b38dc0b0..19ec152bd1 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -440,6 +440,12 @@ def test_gridfs_find(self): gout = next(cursor) self.assertEqual(b"test2+", gout.read()) self.assertRaises(StopIteration, cursor.__next__) + cursor.rewind() + items = cursor.to_list() + self.assertEqual(len(items), 2) + cursor.rewind() + items = cursor.to_list(1) + self.assertEqual(len(items), 1) cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) From 297dfe6aa3d31aa23e081a79d56e11ffd45dd952 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Thu, 15 Aug 2024 14:13:00 -0700 Subject: [PATCH 1398/2111] PYTHON-4660 Fix AttributeError when MongoClient.bulk_write batch fails with InvalidBSON (#1792) --- pymongo/asynchronous/client_bulk.py | 3 ++- pymongo/asynchronous/mongo_client.py | 4 +++- pymongo/synchronous/client_bulk.py | 3 ++- pymongo/synchronous/mongo_client.py | 4 +++- test/asynchronous/test_client_bulk_write.py | 22 ++++++++++++++++++++- test/test_client_bulk_write.py | 22 ++++++++++++++++++++- 6 files changed, 52 insertions(+), 6 deletions(-) diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 671d989c25..1f3cca2f6c 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -550,7 +550,8 @@ async def _execute_command( if result.get("error"): error = result["error"] retryable_top_level_error = ( - isinstance(error.details, dict) + hasattr(error, "details") + and isinstance(error.details, dict) and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES ) retryable_network_error = isinstance( diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index fbbd9a4eed..8848fa4fd5 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2562,7 +2562,9 @@ async def run(self) -> T: if not self._retryable: raise if isinstance(exc, ClientBulkWriteException) and exc.error: - retryable_write_error_exc = exc.error.has_error_label("RetryableWriteError") + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") else: retryable_write_error_exc = exc.has_error_label("RetryableWriteError") if retryable_write_error_exc: diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 229abd4330..5f969804d5 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -548,7 +548,8 @@ def _execute_command( if result.get("error"): error = result["error"] retryable_top_level_error = ( - isinstance(error.details, dict) + hasattr(error, "details") + and isinstance(error.details, dict) and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES ) retryable_network_error = isinstance( diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 1863165625..4aff3b5eed 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2549,7 +2549,9 @@ def run(self) -> T: if not self._retryable: raise if isinstance(exc, ClientBulkWriteException) and exc.error: - retryable_write_error_exc = exc.error.has_error_label("RetryableWriteError") + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") else: retryable_write_error_exc = exc.has_error_label("RetryableWriteError") if retryable_write_error_exc: diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index eea0b4e8e0..20e6ab7c95 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -19,12 +19,18 @@ sys.path[0:0] = [""] -from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + unittest, +) from test.utils import ( OvertCommandListener, async_rs_or_single_client, ) +from unittest.mock import patch +from pymongo.asynchronous.client_bulk import _AsyncClientBulk from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( ClientBulkWriteException, @@ -53,6 +59,20 @@ async def test_returns_error_if_no_namespace_provided(self): context.exception._message, ) + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_handles_non_pymongo_error(self): + with patch.object( + _AsyncClientBulk, "write_command", return_value={"error": TypeError("mock type error")} + ): + client = await async_rs_or_single_client() + self.addAsyncCleanup(client.close) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, TypeError) + self.assertFalse(hasattr(context.exception.error, "details")) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(AsyncIntegrationTest): diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 8f6aad0cfa..686b60642a 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -19,11 +19,16 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import ( + IntegrationTest, + client_context, + unittest, +) from test.utils import ( OvertCommandListener, rs_or_single_client, ) +from unittest.mock import patch from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( @@ -34,6 +39,7 @@ ) from pymongo.monitoring import * from pymongo.operations import * +from pymongo.synchronous.client_bulk import _ClientBulk from pymongo.write_concern import WriteConcern _IS_SYNC = True @@ -53,6 +59,20 @@ def test_returns_error_if_no_namespace_provided(self): context.exception._message, ) + @client_context.require_version_min(8, 0, 0, -24) + def test_handles_non_pymongo_error(self): + with patch.object( + _ClientBulk, "write_command", return_value={"error": TypeError("mock type error")} + ): + client = rs_or_single_client() + self.addCleanup(client.close) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, TypeError) + self.assertFalse(hasattr(context.exception.error, "details")) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(IntegrationTest): From 8b44bc4bf378d87c109a7fc1bcf6722ecc2160c7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 16 Aug 2024 14:14:07 -0700 Subject: [PATCH 1399/2111] PYTHON-4150 Resync spec tests to bump maxWireVersion (#1787) --- .../rs/discover_arbiters.json | 2 +- .../rs/discover_arbiters_replicaset.json | 2 +- .../rs/discover_ghost.json | 2 +- .../rs/discover_ghost_replicaset.json | 2 +- .../rs/discover_hidden.json | 2 +- .../rs/discover_hidden_replicaset.json | 2 +- .../rs/discover_passives.json | 4 ++-- .../rs/discover_passives_replicaset.json | 4 ++-- .../rs/discover_primary.json | 2 +- .../rs/discover_primary_replicaset.json | 2 +- .../rs/discover_rsother.json | 2 +- .../rs/discover_rsother_replicaset.json | 4 ++-- .../rs/discover_secondary.json | 2 +- .../rs/discover_secondary_replicaset.json | 2 +- test/discovery_and_monitoring/rs/discovery.json | 8 ++++---- .../rs/equal_electionids.json | 4 ++-- .../rs/hosts_differ_from_seeds.json | 2 +- .../rs/incompatible_arbiter.json | 2 +- .../rs/incompatible_ghost.json | 2 +- .../rs/incompatible_other.json | 2 +- test/discovery_and_monitoring/rs/ls_timeout.json | 12 ++++++------ .../rs/member_reconfig.json | 4 ++-- .../rs/member_standalone.json | 4 ++-- .../discovery_and_monitoring/rs/new_primary.json | 4 ++-- .../rs/new_primary_new_electionid.json | 6 +++--- .../rs/new_primary_new_setversion.json | 6 +++--- .../rs/new_primary_wrong_set_name.json | 4 ++-- .../rs/non_rs_member.json | 2 +- .../rs/normalize_case.json | 2 +- .../rs/normalize_case_me.json | 4 ++-- .../rs/null_election_id-pre-6.0.json | 8 ++++---- .../rs/primary_becomes_ghost.json | 4 ++-- .../rs/primary_becomes_mongos.json | 4 ++-- .../rs/primary_becomes_standalone.json | 4 ++-- .../rs/primary_changes_set_name.json | 4 ++-- .../rs/primary_disconnect.json | 2 +- .../rs/primary_disconnect_electionid.json | 10 +++++----- .../rs/primary_disconnect_setversion.json | 10 +++++----- ...y_hint_from_secondary_with_mismatched_me.json | 4 ++-- .../rs/primary_mismatched_me.json | 2 +- .../rs/primary_reports_new_member.json | 8 ++++---- .../rs/primary_to_no_primary_mismatched_me.json | 4 ++-- .../rs/primary_wrong_set_name.json | 2 +- test/discovery_and_monitoring/rs/repeated.json | 8 ++++---- .../rs/replicaset_rsnp.json | 2 +- .../rs/response_from_removed.json | 4 ++-- .../rs/sec_not_auth.json | 4 ++-- .../rs/secondary_ignore_ok_0-pre-6.0.json | 6 +++--- .../rs/secondary_ignore_ok_0.json | 6 +++--- .../rs/secondary_mismatched_me.json | 2 +- .../rs/secondary_wrong_set_name.json | 2 +- .../secondary_wrong_set_name_with_primary.json | 4 ++-- .../setversion_without_electionid-pre-6.0.json | 4 ++-- .../rs/stepdown_change_set_name.json | 4 ++-- test/discovery_and_monitoring/rs/too_new.json | 2 +- test/discovery_and_monitoring/rs/too_old.json | 2 +- .../rs/unexpected_mongos.json | 2 +- ...se_setversion_without_electionid-pre-6.0.json | 6 +++--- .../rs/wrong_set_name.json | 2 +- .../sharded/discover_single_mongos.json | 2 +- .../sharded/ls_timeout_mongos.json | 8 ++++---- .../sharded/mongos_disconnect.json | 6 +++--- .../sharded/multiple_mongoses.json | 4 ++-- .../sharded/non_mongos_removed.json | 4 ++-- .../sharded/too_old.json | 2 +- .../single/direct_connection_external_ip.json | 2 +- .../single/direct_connection_mongos.json | 2 +- .../single/direct_connection_replicaset.json | 2 +- .../single/direct_connection_rsarbiter.json | 2 +- .../single/direct_connection_rsprimary.json | 2 +- .../single/direct_connection_rssecondary.json | 2 +- .../single/direct_connection_standalone.json | 2 +- .../single/direct_connection_wrong_set_name.json | 4 ++-- .../single/discover_standalone.json | 2 +- .../single/ls_timeout_standalone.json | 2 +- .../single/not_ok_response.json | 4 ++-- .../single/standalone_removed.json | 2 +- .../single/standalone_using_legacy_hello.json | 2 +- .../DefaultNoMaxStaleness.json | 10 +++++----- .../ReplicaSetNoPrimary/LastUpdateTime.json | 12 ++++++------ .../ReplicaSetNoPrimary/Nearest.json | 12 ++++++------ .../ReplicaSetNoPrimary/Nearest2.json | 12 ++++++------ .../OneKnownTwoUnavailable.json | 6 +++--- .../ReplicaSetNoPrimary/PrimaryPreferred.json | 8 ++++---- .../PrimaryPreferred_tags.json | 8 ++++---- .../ReplicaSetNoPrimary/Secondary.json | 12 ++++++------ .../ReplicaSetNoPrimary/SecondaryPreferred.json | 8 ++++---- .../SecondaryPreferred_tags.json | 12 ++++++------ .../ReplicaSetNoPrimary/ZeroMaxStaleness.json | 4 ++-- .../DefaultNoMaxStaleness.json | 10 +++++----- .../ReplicaSetWithPrimary/LastUpdateTime.json | 12 ++++++------ .../ReplicaSetWithPrimary/LongHeartbeat.json | 10 +++++----- .../ReplicaSetWithPrimary/LongHeartbeat2.json | 4 ++-- .../MaxStalenessTooSmall.json | 4 ++-- .../MaxStalenessWithModePrimary.json | 4 ++-- .../ReplicaSetWithPrimary/Nearest.json | 12 ++++++------ .../ReplicaSetWithPrimary/Nearest2.json | 12 ++++++------ .../ReplicaSetWithPrimary/Nearest_tags.json | 8 ++++---- .../ReplicaSetWithPrimary/PrimaryPreferred.json | 8 ++++---- .../SecondaryPreferred.json | 8 ++++---- .../SecondaryPreferred_tags.json | 16 ++++++++-------- .../SecondaryPreferred_tags2.json | 10 +++++----- .../ReplicaSetWithPrimary/Secondary_tags.json | 16 ++++++++-------- .../ReplicaSetWithPrimary/Secondary_tags2.json | 10 +++++----- .../ReplicaSetWithPrimary/ZeroMaxStaleness.json | 4 ++-- .../max_staleness/Sharded/SmallMaxStaleness.json | 10 +++++----- test/max_staleness/Single/SmallMaxStaleness.json | 6 +++--- .../max_staleness/Unknown/SmallMaxStaleness.json | 2 +- 108 files changed, 277 insertions(+), 277 deletions(-) diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json index 53709b0cee..803462b156 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json index 64fb49f4fc..e58d7c7fb4 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json index 2e24c83e0b..3b7fc836ec 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost.json +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -12,7 +12,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json index cf5fe83a54..1a8457983b 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -12,7 +12,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json index e4a90f1f9c..10bd51edeb 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden.json +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json index 04420596f0..63cf558675 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json index 30258409f6..0a292c675c 100644 --- a/test/discovery_and_monitoring/rs/discover_passives.json +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json index 266eaa5234..c48fd47625 100644 --- a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json index 2d1292bbd4..04e7a4984c 100644 --- a/test/discovery_and_monitoring/rs/discover_primary.json +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json index 54dfefba5f..3cdcfdcee2 100644 --- a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json index 4ab25667f0..9c3b8d8b7d 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother.json +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json index e3958d70ad..3da9efb066 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -34,7 +34,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json index 22325d4e03..64a1ce31e3 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary.json +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json index d903b6444d..d230f976a2 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discovery.json b/test/discovery_and_monitoring/rs/discovery.json index 50e1269223..e9deaa7587 100644 --- a/test/discovery_and_monitoring/rs/discovery.json +++ b/test/discovery_and_monitoring/rs/discovery.json @@ -18,7 +18,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ "d:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -103,7 +103,7 @@ "e:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -147,7 +147,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json index 17df3207fa..f1deedf9f4 100644 --- a/test/discovery_and_monitoring/rs/equal_electionids.json +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json index 4e02304c61..085e81e266 100644 --- a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json +++ b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json @@ -15,7 +15,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json index f0539cb337..bda18d9f6f 100644 --- a/test/discovery_and_monitoring/rs/incompatible_arbiter.json +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json index 824e953f90..9d82e31682 100644 --- a/test/discovery_and_monitoring/rs/incompatible_ghost.json +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json index 6f301ef5de..149ba01142 100644 --- a/test/discovery_and_monitoring/rs/incompatible_other.json +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/ls_timeout.json b/test/discovery_and_monitoring/rs/ls_timeout.json index 96389d3b76..c68790ddfd 100644 --- a/test/discovery_and_monitoring/rs/ls_timeout.json +++ b/test/discovery_and_monitoring/rs/ls_timeout.json @@ -20,7 +20,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": 3, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -58,7 +58,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -104,7 +104,7 @@ "setName": "rs", "arbiterOnly": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -152,7 +152,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -194,7 +194,7 @@ "hidden": true, "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -244,7 +244,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": null, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/member_reconfig.json b/test/discovery_and_monitoring/rs/member_reconfig.json index 0e2c2c462e..a05fed0efb 100644 --- a/test/discovery_and_monitoring/rs/member_reconfig.json +++ b/test/discovery_and_monitoring/rs/member_reconfig.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/member_standalone.json b/test/discovery_and_monitoring/rs/member_standalone.json index 0756003a89..db100db9f3 100644 --- a/test/discovery_and_monitoring/rs/member_standalone.json +++ b/test/discovery_and_monitoring/rs/member_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -40,7 +40,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json index ed1a6245f9..1a84c69c91 100644 --- a/test/discovery_and_monitoring/rs/new_primary.json +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -50,7 +50,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index ccb3a41f75..509720d445 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -114,7 +114,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index 415a0f66aa..96533c61ee 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -114,7 +114,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json index d7b19cfe8f..774b3a5736 100644 --- a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/non_rs_member.json b/test/discovery_and_monitoring/rs/non_rs_member.json index 538077ef09..6bf10bd628 100644 --- a/test/discovery_and_monitoring/rs/non_rs_member.json +++ b/test/discovery_and_monitoring/rs/non_rs_member.json @@ -10,7 +10,7 @@ "ok": 1, "helloOk": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/normalize_case.json b/test/discovery_and_monitoring/rs/normalize_case.json index 96a944f0c3..62915495e0 100644 --- a/test/discovery_and_monitoring/rs/normalize_case.json +++ b/test/discovery_and_monitoring/rs/normalize_case.json @@ -21,7 +21,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/normalize_case_me.json b/test/discovery_and_monitoring/rs/normalize_case_me.json index ab1720cefc..0d9ba6213e 100644 --- a/test/discovery_and_monitoring/rs/normalize_case_me.json +++ b/test/discovery_and_monitoring/rs/normalize_case_me.json @@ -22,7 +22,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json index f1fa2e252e..9e7ccc6e7f 100644 --- a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -18,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -66,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -116,7 +116,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -167,7 +167,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json index 9c54b39856..e34280e88c 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -41,7 +41,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json index ac416e57d5..79510d9399 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -41,7 +41,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json index a64524d0ca..abcc1e2d01 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -38,7 +38,7 @@ { "ok": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_changes_set_name.json b/test/discovery_and_monitoring/rs/primary_changes_set_name.json index bf70ca3014..3b564d2c93 100644 --- a/test/discovery_and_monitoring/rs/primary_changes_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_changes_set_name.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -44,7 +44,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect.json b/test/discovery_and_monitoring/rs/primary_disconnect.json index 3db854f085..73a01a82a9 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index 3a80b150fe..5a91188ea8 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -115,7 +115,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -159,7 +159,7 @@ "$oid": "000000000000000000000003" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -203,7 +203,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index 32e03fb7d4..f7417ad77b 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -115,7 +115,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -159,7 +159,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -203,7 +203,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json index bc02cc9571..1ca72225a2 100644 --- a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -18,7 +18,7 @@ "setName": "rs", "primary": "b:27017", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -48,7 +48,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_mismatched_me.json index 2d2c0f40d8..6bb6226f8a 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me.json @@ -31,7 +31,7 @@ "ok": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ] diff --git a/test/discovery_and_monitoring/rs/primary_reports_new_member.json b/test/discovery_and_monitoring/rs/primary_reports_new_member.json index ac0d9374f0..ed28c48c87 100644 --- a/test/discovery_and_monitoring/rs/primary_reports_new_member.json +++ b/test/discovery_and_monitoring/rs/primary_reports_new_member.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -51,7 +51,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -86,7 +86,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -127,7 +127,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json index 6dbd73dadc..798a648d19 100644 --- a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json @@ -17,7 +17,7 @@ "me": "a:27017", "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -52,7 +52,7 @@ "me": "c:27017", "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json index cc0691fb8c..1366e38996 100644 --- a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json @@ -15,7 +15,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json index 610aeae0ac..3ce0948ab8 100644 --- a/test/discovery_and_monitoring/rs/repeated.json +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -84,7 +84,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -120,7 +120,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json index 3148e1c141..1cd732b82f 100644 --- a/test/discovery_and_monitoring/rs/replicaset_rsnp.json +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/response_from_removed.json b/test/discovery_and_monitoring/rs/response_from_removed.json index 87a66d9e72..fa46a14ceb 100644 --- a/test/discovery_and_monitoring/rs/response_from_removed.json +++ b/test/discovery_and_monitoring/rs/response_from_removed.json @@ -15,7 +15,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -46,7 +46,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/sec_not_auth.json b/test/discovery_and_monitoring/rs/sec_not_auth.json index a39855e654..ccbe7a08af 100644 --- a/test/discovery_and_monitoring/rs/sec_not_auth.json +++ b/test/discovery_and_monitoring/rs/sec_not_auth.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json index 054425c84c..f27060533c 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ { "ok": 0, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index ee9519930b..9ffff58ef0 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ { "ok": 0, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json index 6f1b9b5986..790e4bfca8 100644 --- a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -32,7 +32,7 @@ "ok": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ] diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json index 8d2f152f59..1f86b50543 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json index b7ef2d6d6a..6b89914151 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -51,7 +51,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json index c2e2fe5b9b..e62c6963ed 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], diff --git a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json index e9075f97f2..6de995518d 100644 --- a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json +++ b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -45,7 +45,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/too_new.json b/test/discovery_and_monitoring/rs/too_new.json index 0433d27a36..696246f8e1 100644 --- a/test/discovery_and_monitoring/rs/too_new.json +++ b/test/discovery_and_monitoring/rs/too_new.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json index 461d00acc4..8100a663f5 100644 --- a/test/discovery_and_monitoring/rs/too_old.json +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/unexpected_mongos.json b/test/discovery_and_monitoring/rs/unexpected_mongos.json index cc19a961f2..c6ffb321ca 100644 --- a/test/discovery_and_monitoring/rs/unexpected_mongos.json +++ b/test/discovery_and_monitoring/rs/unexpected_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json index 5c58b65614..2f9b567b85 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -64,7 +64,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -108,7 +108,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], diff --git a/test/discovery_and_monitoring/rs/wrong_set_name.json b/test/discovery_and_monitoring/rs/wrong_set_name.json index 9654ff7b79..d0764d24dc 100644 --- a/test/discovery_and_monitoring/rs/wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/wrong_set_name.json @@ -17,7 +17,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json index 9e877a0840..bf7e57521c 100644 --- a/test/discovery_and_monitoring/sharded/discover_single_mongos.json +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json index 93fa398d52..3da0f84ca2 100644 --- a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json +++ b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json @@ -13,7 +13,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -25,7 +25,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -67,7 +67,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/mongos_disconnect.json b/test/discovery_and_monitoring/sharded/mongos_disconnect.json index 50a93eda5f..29b3351869 100644 --- a/test/discovery_and_monitoring/sharded/mongos_disconnect.json +++ b/test/discovery_and_monitoring/sharded/mongos_disconnect.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -76,7 +76,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/multiple_mongoses.json b/test/discovery_and_monitoring/sharded/multiple_mongoses.json index 311592d715..ae0c2d9cde 100644 --- a/test/discovery_and_monitoring/sharded/multiple_mongoses.json +++ b/test/discovery_and_monitoring/sharded/multiple_mongoses.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/non_mongos_removed.json b/test/discovery_and_monitoring/sharded/non_mongos_removed.json index d74375ebbf..4698f576d5 100644 --- a/test/discovery_and_monitoring/sharded/non_mongos_removed.json +++ b/test/discovery_and_monitoring/sharded/non_mongos_removed.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -26,7 +26,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/too_old.json b/test/discovery_and_monitoring/sharded/too_old.json index 688e1db0f5..b918715ada 100644 --- a/test/discovery_and_monitoring/sharded/too_old.json +++ b/test/discovery_and_monitoring/sharded/too_old.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json index 90676a8f9b..1461b4c469 100644 --- a/test/discovery_and_monitoring/single/direct_connection_external_ip.json +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json index 25fe965185..72be020862 100644 --- a/test/discovery_and_monitoring/single/direct_connection_mongos.json +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json index cd8660888a..82a51d390e 100644 --- a/test/discovery_and_monitoring/single/direct_connection_replicaset.json +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json index e204956056..e06d284364 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json index 409e8502b3..45eb1602fb 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json index 305f283b52..b1bef8a49f 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json index b47278482a..e71ba07e74 100644 --- a/test/discovery_and_monitoring/single/direct_connection_standalone.json +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json index 71080e6810..8014a0a533 100644 --- a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -45,7 +45,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json index 858cbdaf63..d78c81654b 100644 --- a/test/discovery_and_monitoring/single/discover_standalone.json +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/ls_timeout_standalone.json b/test/discovery_and_monitoring/single/ls_timeout_standalone.json index 87b3e4e8a1..236eabe00a 100644 --- a/test/discovery_and_monitoring/single/ls_timeout_standalone.json +++ b/test/discovery_and_monitoring/single/ls_timeout_standalone.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/not_ok_response.json b/test/discovery_and_monitoring/single/not_ok_response.json index 8e7c2a10e3..cfaac3564a 100644 --- a/test/discovery_and_monitoring/single/not_ok_response.json +++ b/test/discovery_and_monitoring/single/not_ok_response.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -21,7 +21,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/standalone_removed.json b/test/discovery_and_monitoring/single/standalone_removed.json index 57f8f861b1..675cdbb008 100644 --- a/test/discovery_and_monitoring/single/standalone_removed.json +++ b/test/discovery_and_monitoring/single/standalone_removed.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json index 46660fa8de..488cac4918 100644 --- a/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json +++ b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json @@ -10,7 +10,7 @@ "ok": 1, "ismaster": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json index 5afebbbdcb..db8b061b30 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json index 492d8a2f62..10b6f28786 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json index 6602561c1d..38b9986500 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json index 16d9a673bd..586b47ccd2 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json index 54f318872f..15a62090e3 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json +++ b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json @@ -17,7 +17,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { @@ -35,7 +35,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { @@ -48,7 +48,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json index 7956b8e516..7c036f725c 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json index 453dce6605..56fcb156bb 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json index b383f275dc..5a4b0c8226 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json index 7bce7d0aa4..19a948e928 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json index 32c9ca770b..b4633d88f3 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json index fd84cd1193..ccb916f107 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json index 35eaa9d69d..00137cf69e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json index 18450beaed..9d1db2de65 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json index b9fb407f9e..b0636236cc 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json index b695e1caeb..76edfcb836 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json index 9b798d37da..aa936e3c67 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json index 1fa7bb4dd0..c24752a7f1 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json index 198be4a681..d3a9535b09 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json index 3ae629c898..f91706e804 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json index 675df82631..4ed0b9ed2e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json index 795b47a111..7945530e6a 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json index 5455708a70..b433d6a430 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json index 6670b54c89..e594af7832 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json index 642fee1fb3..bc0953c657 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json index 502120dce6..2817cf9225 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json index 6978a1807b..7aa487a078 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json index e1e4a7ffb7..fff5609fcc 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Sharded/SmallMaxStaleness.json b/test/max_staleness/Sharded/SmallMaxStaleness.json index 91d89720d1..98e05be363 100644 --- a/test/max_staleness/Sharded/SmallMaxStaleness.json +++ b/test/max_staleness/Sharded/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Single/SmallMaxStaleness.json b/test/max_staleness/Single/SmallMaxStaleness.json index b8d2db24be..d948739855 100644 --- a/test/max_staleness/Single/SmallMaxStaleness.json +++ b/test/max_staleness/Single/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -27,7 +27,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -41,7 +41,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Unknown/SmallMaxStaleness.json b/test/max_staleness/Unknown/SmallMaxStaleness.json index 8d69f46a1e..0e609bcf94 100644 --- a/test/max_staleness/Unknown/SmallMaxStaleness.json +++ b/test/max_staleness/Unknown/SmallMaxStaleness.json @@ -6,7 +6,7 @@ { "address": "a:27017", "type": "Unknown", - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, From ce5c5adb6374a68b7154689eec7d9862dcc9ef28 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Fri, 16 Aug 2024 14:45:32 -0700 Subject: [PATCH 1400/2111] PYTHON-4578 Benchmark collection and client bulk write (#1796) --- .evergreen/config.yml | 12 ++++++ test/performance/perf_test.py | 76 ++++++++++++++++++++++++++++++++++- 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 0df4bdcef5..8d77e58c07 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2031,6 +2031,17 @@ tasks: - func: "attach benchmark test results" - func: "send dashboard data" + - name: "perf-8.0-standalone" + tags: ["perf"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "8.0" + TOPOLOGY: "server" + - func: "run perf tests" + - func: "attach benchmark test results" + - func: "send dashboard data" + - name: "assign-pr-reviewer" tags: ["pr"] allowed_requesters: ["patch", "github_pr"] @@ -3041,6 +3052,7 @@ buildvariants: tasks: - name: "perf-6.0-standalone" - name: "perf-6.0-standalone-ssl" + - name: "perf-8.0-standalone" # Platform notes # i386 builds of OpenSSL or Cyrus SASL are not available diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index a0aad36289..9beadb1227 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -46,7 +46,7 @@ import threading import time import warnings -from typing import Any, List, Optional +from typing import Any, List, Optional, Union import pytest @@ -61,7 +61,12 @@ from bson import decode, encode, json_util from gridfs import GridFSBucket -from pymongo import MongoClient +from pymongo import ( + DeleteOne, + InsertOne, + MongoClient, + ReplaceOne, +) pytestmark = pytest.mark.perf @@ -390,6 +395,15 @@ def setUp(self): self.documents = [self.document.copy() for _ in range(NUM_DOCS)] +class SmallDocMixedTest(TestDocument): + dataset = "small_doc.json" + + def setUp(self): + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS * 2 + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + class TestSmallDocInsertOne(SmallDocInsertTest, unittest.TestCase): def do_task(self): insert_one = self.corpus.insert_one @@ -429,11 +443,69 @@ def do_task(self): self.corpus.insert_many(self.documents, ordered=True) +class TestSmallDocClientBulkInsert(SmallDocInsertTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc.copy())) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + +class TestSmallDocBulkMixedOps(SmallDocMixedTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(document=doc.copy())) + self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) + self.models.append(DeleteOne(filter={})) + + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkMixedOps(SmallDocMixedTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) + def setUp(self): + super().setUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc.copy())) + self.models.append( + ReplaceOne( + namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True + ) + ) + self.models.append(DeleteOne(namespace="perftest.corpus", filter={})) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + class TestLargeDocBulkInsert(LargeDocInsertTest, unittest.TestCase): def do_task(self): self.corpus.insert_many(self.documents, ordered=True) +class TestLargeDocClientBulkInsert(LargeDocInsertTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc.copy())) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + class GridFsTest(PerformanceTest): def setUp(self): super().setUp() From 768858eed61625f715c95b09abec2daa0334de49 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Fri, 16 Aug 2024 15:55:30 -0700 Subject: [PATCH 1401/2111] PYTHON-4630 Add documentation for MongoClient.bulk_write (#1794) --- .github/workflows/test-python.yml | 2 +- doc/api/pymongo/asynchronous/mongo_client.rst | 1 + doc/api/pymongo/mongo_client.rst | 1 + doc/changelog.rst | 14 +- doc/examples/client_bulk.rst | 188 ++++++++++++++++++ doc/examples/index.rst | 1 + pymongo/asynchronous/mongo_client.py | 6 +- pymongo/errors.py | 4 +- pymongo/synchronous/mongo_client.py | 6 +- 9 files changed, 215 insertions(+), 8 deletions(-) create mode 100644 doc/examples/client_bulk.rst diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index ba04e8e418..036b2c4b76 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -96,7 +96,7 @@ jobs: - name: Start MongoDB uses: supercharge/mongodb-github-action@1.10.0 with: - mongodb-version: 4.4 + mongodb-version: '8.0.0-rc4' - name: Run tests run: | hatch run doctest:test diff --git a/doc/api/pymongo/asynchronous/mongo_client.rst b/doc/api/pymongo/asynchronous/mongo_client.rst index afbd802ff2..75952f1b6d 100644 --- a/doc/api/pymongo/asynchronous/mongo_client.rst +++ b/doc/api/pymongo/asynchronous/mongo_client.rst @@ -35,5 +35,6 @@ .. automethod:: get_database .. automethod:: server_info .. automethod:: watch + .. automethod:: bulk_write .. automethod:: __getitem__ .. automethod:: __getattr__ diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index 37ec8ae002..0409e7ef68 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -35,5 +35,6 @@ .. automethod:: get_database .. automethod:: server_info .. automethod:: watch + .. automethod:: bulk_write .. automethod:: __getitem__ .. automethod:: __getattr__ diff --git a/doc/changelog.rst b/doc/changelog.rst index d14a466cdb..6a9744cfd1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -16,7 +16,19 @@ PyMongo 4.9 brings a number of improvements including: :class:`~pymongo.asynchronous.cursor.AsyncCursor`, and :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` as an asynchronous-friendly alternative to ``list(cursor)``. - +- Added :meth:`~pymongo.mongo_client.MongoClient.bulk_write` to :class:`~pymongo.mongo_client.MongoClient` + and :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient`, + enabling users to perform insert, update, and delete operations + against mixed namespaces in a minimized number of round trips. + Please see :doc:`examples/client_bulk` for more information. +- Added support for the ``namespace`` parameter to the + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany` operations, so + they can be used in the new :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. Issues Resolved ............... diff --git a/doc/examples/client_bulk.rst b/doc/examples/client_bulk.rst new file mode 100644 index 0000000000..d2b4a70e21 --- /dev/null +++ b/doc/examples/client_bulk.rst @@ -0,0 +1,188 @@ +Client Bulk Write Operations +============================= + +.. testsetup:: + + from pymongo import MongoClient + + client = MongoClient() + client.drop_database("client_bulk_example") + db = client.client_bulk_example + client.db.drop_collection("test_one") + client.db.drop_collection("test_two") + client.db.drop_collection("test_three") + client.db.drop_collection("test_four") + client.db.drop_collection("test_five") + client.db.drop_collection("test_six") + +The :meth:`~pymongo.mongo_client.MongoClient.bulk_write` +method has been added to :class:`~pymongo.mongo_client.MongoClient` in PyMongo 4.9. +This method enables users to perform batches of write operations **across +multiple namespaces** in a minimized number of round trips, and +to receive detailed results for each operation performed. + +.. note:: This method requires MongoDB server version 8.0+. + +Basic Usage +------------ + +A list of insert, update, and delete operations can be passed into the +:meth:`~pymongo.mongo_client.MongoClient.bulk_write` method. Each request +must include the namespace on which to perform the operation. + +PyMongo will automatically split the given requests into smaller sub-batches based on +the maximum message size accepted by MongoDB, supporting very large bulk write operations. + +The return value is an instance of +:class:`~pymongo.results.ClientBulkWriteResult`. + +.. _summary_client_bulk: + +Summary Results +................. + +By default, the returned :class:`~pymongo.results.ClientBulkWriteResult` instance will contain a +summary of the types of operations performed in the bulk write, along with their respective counts. + +.. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from pymongo import InsertOne, DeleteOne, UpdateOne + >>> models = [ + ... InsertOne(namespace="db.test_one", document={"_id": 1}), + ... InsertOne(namespace="db.test_two", document={"_id": 2}), + ... DeleteOne(namespace="db.test_one", filter={"_id": 1}), + ... UpdateOne( + ... namespace="db.test_two", + ... filter={"_id": 4}, + ... update={"$inc": {"j": 1}}, + ... upsert=True, + ... ), + ... ] + >>> result = client.bulk_write(models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_count + 1 + +.. _verbose_client_bulk: + +Verbose Results +................. + +If the ``verbose_results`` parameter is set to True, the returned :class:`~pymongo.results.ClientBulkWriteResult` +instance will also include detailed results about each successful operation performed as part of the bulk write. + +.. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateMany + >>> models = [ + ... DeleteMany( + ... namespace="db.test_two", filter={} + ... ), # Delete all documents from the previous example + ... InsertOne(namespace="db.test_one", document={"_id": 1}), + ... InsertOne(namespace="db.test_one", document={"_id": 2}), + ... InsertOne(namespace="db.test_two", document={"_id": 3}), + ... UpdateMany(namespace="db.test_one", filter={}, update={"$set": {"foo": "bar"}}), + ... ReplaceOne( + ... namespace="db.test_two", filter={"j": 1}, replacement={"_id": 4}, upsert=True + ... ), + ... ] + >>> result = client.bulk_write(models, verbose_results=True) + >>> result.delete_results + {0: DeleteResult({'ok': 1.0, 'idx': 0, 'n': 2}, ...)} + >>> result.insert_results + {1: InsertOneResult(1, ...), + 2: InsertOneResult(2, ...), + 3: InsertOneResult(3, ...)} + >>> result.update_results + {4: UpdateResult({'ok': 1.0, 'idx': 4, 'n': 2, 'nModified': 2}, ...), + 5: UpdateResult({'ok': 1.0, 'idx': 5, 'n': 1, 'nModified': 0, 'upserted': {'_id': 4}}, ...)} + + +Handling Errors +---------------- + +If any errors occur during the bulk write, a :class:`~pymongo.errors.ClientBulkWriteException` will be raised. +If a server, connection, or network error occurred, the ``error`` field of the exception will contain +that error. + +Individual write errors or write concern errors get recorded in the ``write_errors`` and ``write_concern_errors`` fields of the exception. +The ``partial_result`` field gets populated with the results of any operations that were successfully completed before the exception was raised. + +.. _ordered_client_bulk: + +Ordered Operations +.................... + +In an ordered bulk write (the default), if an individual write fails, no further operations will get executed. +For example, a duplicate key error on the third operation below aborts the remaining two operations. + +.. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from pymongo import InsertOne, DeleteOne + >>> from pymongo.errors import ClientBulkWriteException + >>> models = [ + ... InsertOne(namespace="db.test_three", document={"_id": 3}), + ... InsertOne(namespace="db.test_four", document={"_id": 4}), + ... InsertOne(namespace="db.test_three", document={"_id": 3}), # Duplicate _id + ... InsertOne(namespace="db.test_four", document={"_id": 5}), + ... DeleteOne(namespace="db.test_three", filter={"_id": 3}), + ... ] + >>> try: + ... client.bulk_write(models) + ... except ClientBulkWriteException as cbwe: + ... exception = cbwe + ... + >>> exception.write_errors + [{'ok': 0.0, + 'idx': 2, + 'code': 11000, + 'errmsg': 'E11000 duplicate key error ... dup key: { _id: 3 }', ... + 'op': {'insert': 'db.test_three', 'document': {'_id': 3}}}] + >>> exception.partial_result.inserted_count + 2 + >>> exception.partial_result.deleted_count + 0 + +.. _unordered_client_bulk: + +Unordered Operations +..................... + +If the ``ordered`` parameter is set to False, all operations in the bulk write will be attempted, regardless of any individual write errors that occur. +For example, the fourth and fifth write operations below get executed successfully, despite the duplicate key error on the third operation. + +.. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from pymongo import InsertOne, DeleteOne + >>> from pymongo.errors import ClientBulkWriteException + >>> models = [ + ... InsertOne(namespace="db.test_five", document={"_id": 5}), + ... InsertOne(namespace="db.test_six", document={"_id": 6}), + ... InsertOne(namespace="db.test_five", document={"_id": 5}), # Duplicate _id + ... InsertOne(namespace="db.test_six", document={"_id": 7}), + ... DeleteOne(namespace="db.test_five", filter={"_id": 5}), + ... ] + >>> try: + ... client.bulk_write(models, ordered=False) + ... except ClientBulkWriteException as cbwe: + ... exception = cbwe + ... + >>> exception.write_errors + [{'ok': 0.0, + 'idx': 2, + 'code': 11000, + 'errmsg': 'E11000 duplicate key error ... dup key: { _id: 5 }', ... + 'op': {'insert': 'db.test_five', 'document': {'_id': 5}}}] + >>> exception.partial_result.inserted_count + 3 + >>> exception.partial_result.deleted_count + 1 diff --git a/doc/examples/index.rst b/doc/examples/index.rst index 75d208f20f..ac450470ef 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -22,6 +22,7 @@ MongoDB, you can start it like so: copydb custom_type bulk + client_bulk datetimes geo gevent diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 8848fa4fd5..e1a9d7735d 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2274,8 +2274,8 @@ async def bulk_write( 1 >>> result.modified_count 0 - >>> result.upserted_ids - {3: ObjectId('54f62ee28891e756a6e1abd5')} + >>> result.upserted_count + 1 >>> async for doc in db.test.find({}): ... print(doc) ... @@ -2312,6 +2312,8 @@ async def bulk_write( :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + .. seealso:: For more info, see :doc:`/examples/client_bulk`. + .. seealso:: :ref:`writes-and-ids` .. note:: requires MongoDB server version 8.0+. diff --git a/pymongo/errors.py b/pymongo/errors.py index 1c51708c72..2cd1081e3b 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -342,10 +342,10 @@ def write_concern_errors(self) -> Optional[list[WriteConcernError]]: return self.details.get("writeConcernErrors", []) @property - def write_errors(self) -> Optional[Mapping[int, WriteError]]: + def write_errors(self) -> Optional[list[WriteError]]: """Errors that occurred during the execution of individual write operations. - This map will contain at most one entry if the bulk write was ordered. + This list will contain at most one entry if the bulk write was ordered. """ return self.details.get("writeErrors", {}) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 4aff3b5eed..287ad6af7f 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2263,8 +2263,8 @@ def bulk_write( 1 >>> result.modified_count 0 - >>> result.upserted_ids - {3: ObjectId('54f62ee28891e756a6e1abd5')} + >>> result.upserted_count + 1 >>> for doc in db.test.find({}): ... print(doc) ... @@ -2301,6 +2301,8 @@ def bulk_write( :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + .. seealso:: For more info, see :doc:`/examples/client_bulk`. + .. seealso:: :ref:`writes-and-ids` .. note:: requires MongoDB server version 8.0+. From 559d8b1ea1a3e93a1d96562b66d9e6ecd05028ff Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Fri, 16 Aug 2024 15:55:57 -0700 Subject: [PATCH 1402/2111] PYTHON-4596 Only encode each operation document once for MongoClient.bulk_write (#1797) --- pymongo/message.py | 62 +++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 90fac85452..6a21409c52 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -985,11 +985,10 @@ def _start( def _client_construct_op_msg( - command: Mapping[str, Any], - to_send_ops: list[Mapping[str, Any]], - to_send_ns: list[Mapping[str, Any]], + command_encoded: bytes, + to_send_ops_encoded: list[bytes], + to_send_ns_encoded: list[bytes], ack: bool, - opts: CodecOptions, buf: _BytesIO, ) -> int: # Write flags @@ -998,7 +997,7 @@ def _client_construct_op_msg( # Type 0 Section buf.write(b"\x00") - buf.write(_dict_to_bson(command, False, opts)) + buf.write(command_encoded) # Type 1 Section for ops buf.write(b"\x01") @@ -1007,8 +1006,8 @@ def _client_construct_op_msg( buf.write(b"\x00\x00\x00\x00") buf.write(b"ops\x00") # Write all the ops documents - for op in to_send_ops: - buf.write(_dict_to_bson(op, False, opts)) + for op_encoded in to_send_ops_encoded: + buf.write(op_encoded) resume_location = buf.tell() # Write type 1 section size length = buf.tell() @@ -1023,8 +1022,8 @@ def _client_construct_op_msg( buf.write(b"\x00\x00\x00\x00") buf.write(b"nsInfo\x00") # Write all the nsInfo documents - for ns in to_send_ns: - buf.write(_dict_to_bson(ns, False, opts)) + for ns_encoded in to_send_ns_encoded: + buf.write(ns_encoded) # Write type 1 section size length = buf.tell() buf.seek(size_location) @@ -1045,19 +1044,23 @@ def _client_batched_op_msg_impl( def _check_doc_size_limits( op_type: str, - document: Mapping[str, Any], + doc_size: int, limit: int, - ) -> int: - doc_size = len(_dict_to_bson(document, False, opts)) + ) -> None: if doc_size > limit: _raise_document_too_large(op_type, doc_size, limit) - return doc_size max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size max_message_size = ctx.max_message_size - # Don't include bulkWrite-command-agnostic fields in document size calculations. + command_encoded = _dict_to_bson(command, False, opts) + # When OP_MSG is used unacknowledged we have to check command + # document size client-side or applications won't be notified. + if not ack: + _check_doc_size_limits("bulkWrite", len(command_encoded), max_bson_size + _COMMAND_OVERHEAD) + + # Don't include bulkWrite-command-agnostic fields in batch-splitting calculations. abridged_keys = ["bulkWrite", "errorsOnly", "ordered"] if command.get("bypassDocumentValidation"): abridged_keys.append("bypassDocumentValidation") @@ -1068,17 +1071,14 @@ def _check_doc_size_limits( command_abridged = {key: command[key] for key in abridged_keys} command_len_abridged = len(_dict_to_bson(command_abridged, False, opts)) - # When OP_MSG is used unacknowledged we have to check command - # document size client-side or applications won't be notified. - if not ack: - _check_doc_size_limits("bulkWrite", command_abridged, max_bson_size + _COMMAND_OVERHEAD) - # Maximum combined size of the ops and nsInfo document sequences. max_doc_sequences_bytes = max_message_size - (_OP_MSG_OVERHEAD + command_len_abridged) ns_info = {} to_send_ops: list[Mapping[str, Any]] = [] to_send_ns: list[Mapping[str, int]] = [] + to_send_ops_encoded: list[bytes] = [] + to_send_ns_encoded: list[bytes] = [] total_ops_length = 0 total_ns_length = 0 idx = 0 @@ -1088,11 +1088,13 @@ def _check_doc_size_limits( # Check insert/replace document size if unacknowledged. if real_op_type == "insert": if not ack: - _check_doc_size_limits(real_op_type, op_doc["document"], max_bson_size) + doc_size = len(_dict_to_bson(op_doc["document"], False, opts)) + _check_doc_size_limits(real_op_type, doc_size, max_bson_size) if real_op_type == "replace": op_type = "update" if not ack: - _check_doc_size_limits(real_op_type, op_doc["updateMods"], max_bson_size) + doc_size = len(_dict_to_bson(op_doc["updateMods"], False, opts)) + _check_doc_size_limits(real_op_type, doc_size, max_bson_size) ns_doc_to_send = None ns_length = 0 @@ -1108,30 +1110,40 @@ def _check_doc_size_limits( op_doc_to_send[op_type] = ns_info[namespace] # type: ignore[index] # Encode current operation doc and, if newly added, namespace doc. - op_length = len(_dict_to_bson(op_doc_to_send, False, opts)) + op_doc_encoded = _dict_to_bson(op_doc_to_send, False, opts) + op_length = len(op_doc_encoded) if ns_doc_to_send: - ns_length = len(_dict_to_bson(ns_doc_to_send, False, opts)) + ns_doc_encoded = _dict_to_bson(ns_doc_to_send, False, opts) + ns_length = len(ns_doc_encoded) # Check operation document size if unacknowledged. if not ack: - _check_doc_size_limits(op_type, op_doc_to_send, max_bson_size + _COMMAND_OVERHEAD) + _check_doc_size_limits(op_type, op_length, max_bson_size + _COMMAND_OVERHEAD) new_message_size = total_ops_length + total_ns_length + op_length + ns_length # We have enough data, return this batch. if new_message_size > max_doc_sequences_bytes: break + + # Add op and ns documents to this batch. to_send_ops.append(op_doc_to_send) + to_send_ops_encoded.append(op_doc_encoded) total_ops_length += op_length if ns_doc_to_send: to_send_ns.append(ns_doc_to_send) + to_send_ns_encoded.append(ns_doc_encoded) total_ns_length += ns_length + idx += 1 + # We have enough documents, return this batch. if idx == max_write_batch_size: break # Construct the entire OP_MSG. - length = _client_construct_op_msg(command, to_send_ops, to_send_ns, ack, opts, buf) + length = _client_construct_op_msg( + command_encoded, to_send_ops_encoded, to_send_ns_encoded, ack, buf + ) return to_send_ops, to_send_ns, length From efcecc9a7fbe9ee3850d274b8a6dff74e0a0b30e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 19 Aug 2024 12:57:15 -0500 Subject: [PATCH 1403/2111] PYTHON-4648 Fix handling of event_loop_policy in tests (#1799) --- test/__init__.py | 10 ---------- test/asynchronous/__init__.py | 10 ---------- test/asynchronous/conftest.py | 14 ++++++++++++++ test/conftest.py | 13 +++++++++++++ 4 files changed, 27 insertions(+), 20 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index e60736e3e8..2a23ae0fd3 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -79,16 +79,6 @@ _IS_SYNC = True -# The default asyncio loop implementation on Windows -# has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) -# We explicitly use a different loop implementation here to prevent that issue -if ( - not _IS_SYNC - and sys.platform == "win32" - and asyncio.get_event_loop_policy() == asyncio.WindowsProactorEventLoopPolicy -): - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore[attr-defined] - class ClientContext: client: MongoClient diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 900b260c2e..3d22b5ff76 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -79,16 +79,6 @@ _IS_SYNC = False -# The default asyncio loop implementation on Windows -# has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) -# We explicitly use a different loop implementation here to prevent that issue -if ( - not _IS_SYNC - and sys.platform == "win32" - and asyncio.get_event_loop_policy() == asyncio.WindowsProactorEventLoopPolicy -): - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore[attr-defined] - class AsyncClientContext: client: AsyncMongoClient diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py index f5bcd953a4..e443dff6c0 100644 --- a/test/asynchronous/conftest.py +++ b/test/asynchronous/conftest.py @@ -1,13 +1,27 @@ from __future__ import annotations +import asyncio +import sys from test import pytest_conf from test.asynchronous import async_setup, async_teardown +import pytest import pytest_asyncio _IS_SYNC = False +@pytest.fixture(scope="session") +def event_loop_policy(): + # The default asyncio loop implementation on Windows + # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) + # We explicitly use a different loop implementation here to prevent that issue + if sys.platform == "win32": + return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] + + return asyncio.get_event_loop_policy() + + @pytest_asyncio.fixture(scope="session", autouse=True) async def test_setup_and_teardown(): await async_setup() diff --git a/test/conftest.py b/test/conftest.py index 431dd152fe..a3d954c7c3 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,5 +1,7 @@ from __future__ import annotations +import asyncio +import sys from test import pytest_conf, setup, teardown import pytest @@ -7,6 +9,17 @@ _IS_SYNC = True +@pytest.fixture(scope="session") +def event_loop_policy(): + # The default asyncio loop implementation on Windows + # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) + # We explicitly use a different loop implementation here to prevent that issue + if sys.platform == "win32": + return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] + + return asyncio.get_event_loop_policy() + + @pytest.fixture(scope="session", autouse=True) def test_setup_and_teardown(): setup() From f16206cb8975773d77617d495f11582d0c3783ff Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 19 Aug 2024 14:29:18 -0500 Subject: [PATCH 1404/2111] PYTHON-4392 Support Range Indexes as GA (#1795) --- doc/changelog.rst | 5 +- pymongo/encryption_options.py | 6 +- .../spec/legacy/fle2v2-Compact.json | 3 + .../spec/legacy/fle2v2-Rangev2-Compact.json | 289 +++++++++++++ .../legacy/fle2v2-Rangev2-Date-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Date-Delete.json | 2 +- .../fle2v2-Rangev2-Date-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Date-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Date-Update.json | 2 +- .../fle2v2-Rangev2-Decimal-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Decimal-Delete.json | 2 +- ...e2v2-Rangev2-Decimal-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Decimal-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Decimal-Update.json | 2 +- ...v2-Rangev2-DecimalPrecision-Aggregate.json | 2 +- ...le2v2-Rangev2-DecimalPrecision-Delete.json | 2 +- ...ev2-DecimalPrecision-FindOneAndUpdate.json | 2 +- ...2-Rangev2-DecimalPrecision-InsertFind.json | 2 +- ...le2v2-Rangev2-DecimalPrecision-Update.json | 2 +- .../spec/legacy/fle2v2-Rangev2-Defaults.json | 381 ++++++++++++++++++ .../fle2v2-Rangev2-Double-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Double-Delete.json | 2 +- ...le2v2-Rangev2-Double-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Double-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Double-Update.json | 2 +- ...2v2-Rangev2-DoublePrecision-Aggregate.json | 2 +- ...fle2v2-Rangev2-DoublePrecision-Delete.json | 2 +- ...gev2-DoublePrecision-FindOneAndUpdate.json | 2 +- ...v2-Rangev2-DoublePrecision-InsertFind.json | 2 +- ...fle2v2-Rangev2-DoublePrecision-Update.json | 2 +- .../legacy/fle2v2-Rangev2-Int-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Int-Delete.json | 2 +- .../fle2v2-Rangev2-Int-FindOneAndUpdate.json | 2 +- .../legacy/fle2v2-Rangev2-Int-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Int-Update.json | 2 +- .../legacy/fle2v2-Rangev2-Long-Aggregate.json | 2 +- .../legacy/fle2v2-Rangev2-Long-Delete.json | 2 +- .../fle2v2-Rangev2-Long-FindOneAndUpdate.json | 2 +- .../fle2v2-Rangev2-Long-InsertFind.json | 2 +- .../legacy/fle2v2-Rangev2-Long-Update.json | 2 +- test/test_encryption.py | 33 ++ 41 files changed, 747 insertions(+), 40 deletions(-) create mode 100644 test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json create mode 100644 test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 6a9744cfd1..8d1c6f1182 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,8 +8,9 @@ PyMongo 4.9 brings a number of improvements including: - Added support for MongoDB 8.0. - A new asynchronous API with full asyncio support. -- Add support for :attr:`~pymongo.encryption.Algorithm.RANGE` and deprecate - :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW`. +- Added support for In-Use Encryption range queries with MongoDB 8.0. + Added :attr:`~pymongo.encryption.Algorithm.RANGE`. + ``sparsity`` and ``trim_factor`` are now optional in :class:`~pymongo.encryption_options.RangeOpts`. - pymongocrypt>=1.10 is now required for :ref:`In-Use Encryption` support. - Added :meth:`~pymongo.cursor.Cursor.to_list` to :class:`~pymongo.cursor.Cursor`, :class:`~pymongo.command_cursor.CommandCursor`, diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 9b31acb59e..3b0d32a4b9 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -238,8 +238,8 @@ class RangeOpts: def __init__( self, - sparsity: int, - trim_factor: int, + sparsity: Optional[int] = None, + trim_factor: Optional[int] = None, min: Optional[Any] = None, max: Optional[Any] = None, precision: Optional[int] = None, @@ -264,7 +264,7 @@ def __init__( def document(self) -> dict[str, Any]: doc = {} for k, v in [ - ("sparsity", int64.Int64(self.sparsity)), + ("sparsity", int64.Int64(self.sparsity) if self.sparsity else None), ("trimFactor", self.trim_factor), ("precision", self.precision), ("min", self.min), diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json index 27310cb59f..868095e1e6 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -130,6 +130,9 @@ "command": { "compactStructuredEncryptionData": "default" } + }, + "result": { + "ok": 1 } } ], diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json new file mode 100644 index 0000000000..59241927ca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json @@ -0,0 +1,289 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "ok": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json index 63a2db3ef1..df2161cc36 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json @@ -328,7 +328,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json index 63a2b29fcc..b4f15d9b1f 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json @@ -317,7 +317,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json index 049186c869..97ab4aaeb9 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -330,7 +330,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json index d0751434b5..a011c388e4 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json @@ -322,7 +322,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json index 1e7750feeb..6bab6499f5 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json @@ -330,7 +330,7 @@ "encryptedDate": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json index 5f573a933d..d1a82c2164 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json @@ -288,7 +288,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json index a94dd40fee..19cae3c64f 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json @@ -279,7 +279,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json index 5226facfb6..4ab3b63ea5 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -288,7 +288,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json index b6615454bd..5a2adf6907 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json @@ -282,7 +282,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json index ceef8ca9ba..b840d38347 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json @@ -290,7 +290,7 @@ "encryptedDecimalNoPrecision": { "$gt": { "$binary": { - "base64": "DeFiAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAAA", + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json index 35cc4aba87..271f57b125 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -317,7 +317,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json index e000c40589..7b3d5d8225 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -308,7 +308,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json index 27f10a30a7..af371f7b3f 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -317,7 +317,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json index 5fb96730d6..bbe81f87ad 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -311,7 +311,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json index f67ae3ca23..987bdf1aa6 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -319,7 +319,7 @@ "encryptedDecimalPrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..c2a119cb7f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,381 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json index e14ca8ff0c..daa7f4e973 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json @@ -290,7 +290,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json index 6821c97939..4a9c1f27b5 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json @@ -281,7 +281,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json index 298a4506cc..d7860de83e 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -290,7 +290,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json index 0c6f9e9872..934af381f1 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json @@ -284,7 +284,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json index dabe8a0930..ec95e0334a 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json @@ -292,7 +292,7 @@ "encryptedDoubleNoPrecision": { "$gt": { "$binary": { - "base64": "DYckAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json index 8d434dc279..e8a50ebeca 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -317,7 +317,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json index a9315dec96..8a0fecf786 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -308,7 +308,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json index 28bebe0dbb..ac77931d61 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -317,7 +317,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json index 3b3176be6f..5dcc09dca9 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -311,7 +311,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json index be2d0e9f4a..483e3d52e6 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json @@ -319,7 +319,7 @@ "encryptedDoublePrecision": { "$gt": { "$binary": { - "base64": "DdIJAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json index c689dede18..6cd837c789 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json @@ -308,7 +308,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json index 4a6b34a1dc..b251db9157 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json @@ -299,7 +299,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json index 2bf905fa65..6e09b5ea2c 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -308,7 +308,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json index a5eb4d60ec..cbab7e7699 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json @@ -302,7 +302,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json index e826ea2acf..cb6b223943 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json @@ -310,7 +310,7 @@ "encryptedInt": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json index d5020f5927..5c4bf10101 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json @@ -308,7 +308,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json index 3720d00341..faf0c401b7 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json @@ -299,7 +299,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json index 5e4b5ae0de..b233b40b54 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -308,7 +308,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json index 0d48580626..1b787d4cb6 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json @@ -302,7 +302,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json index 2d3321fd80..07182bb5e2 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json @@ -310,7 +310,7 @@ "encryptedLong": { "$gt": { "$binary": { - "base64": "DUkFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAAA==", + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", "subType": "06" } } diff --git a/test/test_encryption.py b/test/test_encryption.py index 3c8d066a20..464a91303f 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2817,6 +2817,39 @@ def test_int(self): self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1, trim_factor=1), int) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#23-range-explicit-encryption-applies-defaults +class TestRangeQueryDefaultsProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 0, -1) + def setUp(self): + super().setUp() + self.client.drop_database(self.db) + self.key_vault_client = self.client + self.client_encryption = ClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS + ) + self.addCleanup(self.client_encryption.close) + self.key_id = self.client_encryption.create_data_key("local") + opts = RangeOpts(min=0, max=1000) + self.payload_defaults = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + + def test_uses_libmongocrypt_defaults(self): + opts = RangeOpts(min=0, max=1000, sparsity=2, trim_factor=6) + payload = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) == len(self.payload_defaults) + + def test_accepts_trim_factor_0(self): + opts = RangeOpts(min=0, max=1000, trim_factor=0) + payload = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) > len(self.payload_defaults) + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): @client_context.require_no_standalone From ad888797cf8943ef998e2a6019a59b9d2adeb80b Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Mon, 19 Aug 2024 12:57:57 -0700 Subject: [PATCH 1405/2111] PYTHON-4666 Fix handling of large documents in client.bulk_write (#1798) --- pymongo/message.py | 2 ++ test/asynchronous/test_client_bulk_write.py | 6 ++---- test/test_client_bulk_write.py | 6 ++---- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/pymongo/message.py b/pymongo/message.py index 6a21409c52..b8d88bf10d 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1123,6 +1123,8 @@ def _check_doc_size_limits( new_message_size = total_ops_length + total_ns_length + op_length + ns_length # We have enough data, return this batch. if new_message_size > max_doc_sequences_bytes: + if idx == 0: + _raise_document_too_large(op_type, op_length, max_bson_size + _COMMAND_OVERHEAD) break # Add op and ns documents to this batch. diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 20e6ab7c95..efae6957ce 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -512,17 +512,15 @@ async def test_returns_error_if_no_writes_can_be_added_to_ops(self): # Document too large. b_repeated = "b" * self.max_message_size_bytes models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] - with self.assertRaises(InvalidOperation) as context: + with self.assertRaises(DocumentTooLarge): await client.bulk_write(models=models) - self.assertIn("cannot do an empty bulk write", context.exception._message) # Namespace too large. c_repeated = "c" * self.max_message_size_bytes namespace = f"db.{c_repeated}" models = [InsertOne(namespace=namespace, document={"a": "b"})] - with self.assertRaises(InvalidOperation) as context: + with self.assertRaises(DocumentTooLarge): await client.bulk_write(models=models) - self.assertIn("cannot do an empty bulk write", context.exception._message) @async_client_context.require_version_min(8, 0, 0, -24) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 686b60642a..daf0ad8321 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -512,17 +512,15 @@ def test_returns_error_if_no_writes_can_be_added_to_ops(self): # Document too large. b_repeated = "b" * self.max_message_size_bytes models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] - with self.assertRaises(InvalidOperation) as context: + with self.assertRaises(DocumentTooLarge): client.bulk_write(models=models) - self.assertIn("cannot do an empty bulk write", context.exception._message) # Namespace too large. c_repeated = "c" * self.max_message_size_bytes namespace = f"db.{c_repeated}" models = [InsertOne(namespace=namespace, document={"a": "b"})] - with self.assertRaises(InvalidOperation) as context: + with self.assertRaises(DocumentTooLarge): client.bulk_write(models=models) - self.assertIn("cannot do an empty bulk write", context.exception._message) @client_context.require_version_min(8, 0, 0, -24) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") From c03721c8f55b37dae08b5f6ad15d0c38cc3aed8c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 20 Aug 2024 14:37:00 -0500 Subject: [PATCH 1406/2111] PYTHON-4656 Fix running of enterprise auth tests (#1801) --- .evergreen/run-tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index beee1ed287..66df6b26ca 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -36,6 +36,7 @@ TEST_ARGS="${*:1}" export PIP_QUIET=1 # Quiet by default export PIP_PREFER_BINARY=1 # Prefer binary dists by default +set +x python -c "import sys; sys.exit(sys.prefix == sys.base_prefix)" || (echo "Not inside a virtual env!"; exit 1) # Try to source local Drivers Secrets @@ -47,7 +48,6 @@ else fi if [ "$AUTH" != "noauth" ]; then - set +x if [ ! -z "$TEST_DATA_LAKE" ]; then export DB_USER="mhuser" export DB_PASSWORD="pencil" @@ -68,10 +68,10 @@ if [ "$AUTH" != "noauth" ]; then export DB_PASSWORD="pwd123" fi echo "Added auth, DB_USER: $DB_USER" - set -x fi if [ -n "$TEST_ENTERPRISE_AUTH" ]; then + python -m pip install '.[gssapi]' if [ "Windows_NT" = "$OS" ]; then echo "Setting GSSAPI_PASS" export GSSAPI_PASS=${SASL_PASS} From 4024a1b85d7265dc5474642b542992c916e22877 Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Tue, 20 Aug 2024 17:18:28 -0700 Subject: [PATCH 1407/2111] PYTHON-4668 Improve performance of client.bulk_write (#1800) --- doc/examples/client_bulk.rst | 4 +- pymongo/asynchronous/client_bulk.py | 29 ++++++++----- pymongo/message.py | 48 ++++++++++++--------- pymongo/synchronous/client_bulk.py | 29 ++++++++----- test/asynchronous/test_client_bulk_write.py | 24 +++++++---- test/performance/perf_test.py | 8 ++-- test/test_client_bulk_write.py | 24 +++++++---- 7 files changed, 103 insertions(+), 63 deletions(-) diff --git a/doc/examples/client_bulk.rst b/doc/examples/client_bulk.rst index d2b4a70e21..447f09688f 100644 --- a/doc/examples/client_bulk.rst +++ b/doc/examples/client_bulk.rst @@ -145,7 +145,7 @@ For example, a duplicate key error on the third operation below aborts the remai 'idx': 2, 'code': 11000, 'errmsg': 'E11000 duplicate key error ... dup key: { _id: 3 }', ... - 'op': {'insert': 'db.test_three', 'document': {'_id': 3}}}] + 'op': {'insert': 0, 'document': {'_id': 3}}}] >>> exception.partial_result.inserted_count 2 >>> exception.partial_result.deleted_count @@ -181,7 +181,7 @@ For example, the fourth and fifth write operations below get executed successful 'idx': 2, 'code': 11000, 'errmsg': 'E11000 duplicate key error ... dup key: { _id: 5 }', ... - 'op': {'insert': 'db.test_five', 'document': {'_id': 5}}}] + 'op': {'insert': 0, 'document': {'_id': 5}}}] >>> exception.partial_result.inserted_count 3 >>> exception.partial_result.deleted_count diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 1f3cca2f6c..b9ab6b876b 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -108,6 +108,7 @@ def __init__( self.verbose_results = verbose_results self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.namespaces: list[str] = [] self.idx_offset: int = 0 self.total_ops: int = 0 @@ -132,8 +133,9 @@ def add_insert(self, namespace: str, document: _DocumentOut) -> None: # Generate ObjectId client side. if not (isinstance(document, RawBSONDocument) or "_id" in document): document["_id"] = ObjectId() - cmd = {"insert": namespace, "document": document} + cmd = {"insert": -1, "document": document} self.ops.append(("insert", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 def add_update( @@ -150,7 +152,7 @@ def add_update( """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) cmd = { - "update": namespace, + "update": -1, "filter": selector, "updateMods": update, "multi": multi, @@ -171,6 +173,7 @@ def add_update( # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append(("update", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 def add_replace( @@ -185,7 +188,7 @@ def add_replace( """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) cmd = { - "update": namespace, + "update": -1, "filter": selector, "updateMods": replacement, "multi": False, @@ -200,6 +203,7 @@ def add_replace( self.uses_collation = True cmd["collation"] = collation self.ops.append(("replace", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 def add_delete( @@ -211,7 +215,7 @@ def add_delete( hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create a delete document and add it to the list of ops.""" - cmd = {"delete": namespace, "filter": selector, "multi": multi} + cmd = {"delete": -1, "filter": selector, "multi": multi} if hint is not None: self.uses_hint_delete = True cmd["hint"] = hint @@ -222,6 +226,7 @@ def add_delete( # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append(("delete", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 @_handle_reauth @@ -407,9 +412,10 @@ async def _execute_batch_unack( bwc: _ClientBulkWriteContext, cmd: dict[str, Any], ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Executes a batch of bulkWrite server commands (unack).""" - request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) await self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] return to_send_ops, to_send_ns @@ -418,9 +424,10 @@ async def _execute_batch( bwc: _ClientBulkWriteContext, cmd: dict[str, Any], ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Executes a batch of bulkWrite server commands (ack).""" - request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) result = await self.write_command( bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client ) # type: ignore[arg-type] @@ -540,11 +547,12 @@ async def _execute_command( # CSOT: apply timeout before encoding the command. conn.apply_timeout(self.client, cmd) ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) # Run as many ops as possible in one server command. if write_concern.acknowledged: - raw_result, to_send_ops, _ = await self._execute_batch(bwc, cmd, ops) # type: ignore[arg-type] - result = copy.deepcopy(raw_result) + raw_result, to_send_ops, _ = await self._execute_batch(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + result = raw_result # Top-level server/network error. if result.get("error"): @@ -600,7 +608,7 @@ async def _execute_command( self.started_retryable_write = False else: - to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] self.idx_offset += len(to_send_ops) @@ -697,9 +705,10 @@ async def execute_command_unack_unordered( conn.add_server_api(cmd) ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) # Run as many ops as possible in one server command. - to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] self.idx_offset += len(to_send_ops) diff --git a/pymongo/message.py b/pymongo/message.py index b8d88bf10d..de77ccd382 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -21,7 +21,6 @@ """ from __future__ import annotations -import copy import datetime import random import struct @@ -950,10 +949,13 @@ def __init__( ) def batch_command( - self, cmd: MutableMapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]] + self, + cmd: MutableMapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]], list[Mapping[str, Any]]]: request_id, msg, to_send_ops, to_send_ns = _client_do_batched_op_msg( - cmd, operations, self.codec, self + cmd, operations, namespaces, self.codec, self ) if not to_send_ops: raise InvalidOperation("cannot do an empty bulk write") @@ -1035,6 +1037,7 @@ def _client_construct_op_msg( def _client_batched_op_msg_impl( command: Mapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ack: bool, opts: CodecOptions, ctx: _ClientBulkWriteContext, @@ -1076,14 +1079,14 @@ def _check_doc_size_limits( ns_info = {} to_send_ops: list[Mapping[str, Any]] = [] - to_send_ns: list[Mapping[str, int]] = [] + to_send_ns: list[Mapping[str, str]] = [] to_send_ops_encoded: list[bytes] = [] to_send_ns_encoded: list[bytes] = [] total_ops_length = 0 total_ns_length = 0 idx = 0 - for real_op_type, op_doc in operations: + for (real_op_type, op_doc), namespace in zip(operations, namespaces): op_type = real_op_type # Check insert/replace document size if unacknowledged. if real_op_type == "insert": @@ -1096,24 +1099,23 @@ def _check_doc_size_limits( doc_size = len(_dict_to_bson(op_doc["updateMods"], False, opts)) _check_doc_size_limits(real_op_type, doc_size, max_bson_size) - ns_doc_to_send = None + ns_doc = None ns_length = 0 - namespace = op_doc[op_type] + if namespace not in ns_info: - ns_doc_to_send = {"ns": namespace} + ns_doc = {"ns": namespace} new_ns_index = len(to_send_ns) ns_info[namespace] = new_ns_index # First entry in the operation doc has the operation type as its # key and the index of its namespace within ns_info as its value. - op_doc_to_send = copy.deepcopy(op_doc) - op_doc_to_send[op_type] = ns_info[namespace] # type: ignore[index] + op_doc[op_type] = ns_info[namespace] # type: ignore[index] # Encode current operation doc and, if newly added, namespace doc. - op_doc_encoded = _dict_to_bson(op_doc_to_send, False, opts) + op_doc_encoded = _dict_to_bson(op_doc, False, opts) op_length = len(op_doc_encoded) - if ns_doc_to_send: - ns_doc_encoded = _dict_to_bson(ns_doc_to_send, False, opts) + if ns_doc: + ns_doc_encoded = _dict_to_bson(ns_doc, False, opts) ns_length = len(ns_doc_encoded) # Check operation document size if unacknowledged. @@ -1128,11 +1130,11 @@ def _check_doc_size_limits( break # Add op and ns documents to this batch. - to_send_ops.append(op_doc_to_send) + to_send_ops.append(op_doc) to_send_ops_encoded.append(op_doc_encoded) total_ops_length += op_length - if ns_doc_to_send: - to_send_ns.append(ns_doc_to_send) + if ns_doc: + to_send_ns.append(ns_doc) to_send_ns_encoded.append(ns_doc_encoded) total_ns_length += ns_length @@ -1153,6 +1155,7 @@ def _check_doc_size_limits( def _client_encode_batched_op_msg( command: Mapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ack: bool, opts: CodecOptions, ctx: _ClientBulkWriteContext, @@ -1163,7 +1166,7 @@ def _client_encode_batched_op_msg( buf = _BytesIO() to_send_ops, to_send_ns, _ = _client_batched_op_msg_impl( - command, operations, ack, opts, ctx, buf + command, operations, namespaces, ack, opts, ctx, buf ) return buf.getvalue(), to_send_ops, to_send_ns @@ -1171,6 +1174,7 @@ def _client_encode_batched_op_msg( def _client_batched_op_msg_compressed( command: Mapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ack: bool, opts: CodecOptions, ctx: _ClientBulkWriteContext, @@ -1179,7 +1183,7 @@ def _client_batched_op_msg_compressed( with OP_MSG, compressed. """ data, to_send_ops, to_send_ns = _client_encode_batched_op_msg( - command, operations, ack, opts, ctx + command, operations, namespaces, ack, opts, ctx ) assert ctx.conn.compression_context is not None @@ -1190,6 +1194,7 @@ def _client_batched_op_msg_compressed( def _client_batched_op_msg( command: Mapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ack: bool, opts: CodecOptions, ctx: _ClientBulkWriteContext, @@ -1203,7 +1208,7 @@ def _client_batched_op_msg( buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") to_send_ops, to_send_ns, length = _client_batched_op_msg_impl( - command, operations, ack, opts, ctx, buf + command, operations, namespaces, ack, opts, ctx, buf ) # Header - request id and message length @@ -1219,6 +1224,7 @@ def _client_batched_op_msg( def _client_do_batched_op_msg( command: MutableMapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], opts: CodecOptions, ctx: _ClientBulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: @@ -1231,8 +1237,8 @@ def _client_do_batched_op_msg( else: ack = True if ctx.conn.compression_context: - return _client_batched_op_msg_compressed(command, operations, ack, opts, ctx) - return _client_batched_op_msg(command, operations, ack, opts, ctx) + return _client_batched_op_msg_compressed(command, operations, namespaces, ack, opts, ctx) + return _client_batched_op_msg(command, operations, namespaces, ack, opts, ctx) # End OP_MSG ----------------------------------------------------- diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 5f969804d5..106e5dcbb3 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -108,6 +108,7 @@ def __init__( self.verbose_results = verbose_results self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.namespaces: list[str] = [] self.idx_offset: int = 0 self.total_ops: int = 0 @@ -132,8 +133,9 @@ def add_insert(self, namespace: str, document: _DocumentOut) -> None: # Generate ObjectId client side. if not (isinstance(document, RawBSONDocument) or "_id" in document): document["_id"] = ObjectId() - cmd = {"insert": namespace, "document": document} + cmd = {"insert": -1, "document": document} self.ops.append(("insert", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 def add_update( @@ -150,7 +152,7 @@ def add_update( """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) cmd = { - "update": namespace, + "update": -1, "filter": selector, "updateMods": update, "multi": multi, @@ -171,6 +173,7 @@ def add_update( # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append(("update", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 def add_replace( @@ -185,7 +188,7 @@ def add_replace( """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) cmd = { - "update": namespace, + "update": -1, "filter": selector, "updateMods": replacement, "multi": False, @@ -200,6 +203,7 @@ def add_replace( self.uses_collation = True cmd["collation"] = collation self.ops.append(("replace", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 def add_delete( @@ -211,7 +215,7 @@ def add_delete( hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create a delete document and add it to the list of ops.""" - cmd = {"delete": namespace, "filter": selector, "multi": multi} + cmd = {"delete": -1, "filter": selector, "multi": multi} if hint is not None: self.uses_hint_delete = True cmd["hint"] = hint @@ -222,6 +226,7 @@ def add_delete( # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append(("delete", cmd)) + self.namespaces.append(namespace) self.total_ops += 1 @_handle_reauth @@ -407,9 +412,10 @@ def _execute_batch_unack( bwc: _ClientBulkWriteContext, cmd: dict[str, Any], ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Executes a batch of bulkWrite server commands (unack).""" - request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] return to_send_ops, to_send_ns @@ -418,9 +424,10 @@ def _execute_batch( bwc: _ClientBulkWriteContext, cmd: dict[str, Any], ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Executes a batch of bulkWrite server commands (ack).""" - request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops) + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) result = self.write_command(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] self.client._process_response(result, bwc.session) # type: ignore[arg-type] return result, to_send_ops, to_send_ns # type: ignore[return-value] @@ -538,11 +545,12 @@ def _execute_command( # CSOT: apply timeout before encoding the command. conn.apply_timeout(self.client, cmd) ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) # Run as many ops as possible in one server command. if write_concern.acknowledged: - raw_result, to_send_ops, _ = self._execute_batch(bwc, cmd, ops) # type: ignore[arg-type] - result = copy.deepcopy(raw_result) + raw_result, to_send_ops, _ = self._execute_batch(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + result = raw_result # Top-level server/network error. if result.get("error"): @@ -598,7 +606,7 @@ def _execute_command( self.started_retryable_write = False else: - to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] self.idx_offset += len(to_send_ops) @@ -695,9 +703,10 @@ def execute_command_unack_unordered( conn.add_server_api(cmd) ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) # Run as many ops as possible in one server command. - to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops) # type: ignore[arg-type] + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] self.idx_offset += len(to_send_ops) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index efae6957ce..48a14699e6 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -48,12 +48,9 @@ class TestClientBulkWrite(AsyncIntegrationTest): @async_client_context.require_version_min(8, 0, 0, -24) async def test_returns_error_if_no_namespace_provided(self): - client = await async_rs_or_single_client() - self.addAsyncCleanup(client.close) - models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: - await client.bulk_write(models=models) + await self.client.bulk_write(models=models) self.assertIn( "MongoClient.bulk_write requires a namespace to be provided for each write operation", context.exception._message, @@ -64,15 +61,26 @@ async def test_handles_non_pymongo_error(self): with patch.object( _AsyncClientBulk, "write_command", return_value={"error": TypeError("mock type error")} ): - client = await async_rs_or_single_client() - self.addAsyncCleanup(client.close) - models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(ClientBulkWriteException) as context: - await client.bulk_write(models=models) + await self.client.bulk_write(models=models) self.assertIsInstance(context.exception.error, TypeError) self.assertFalse(hasattr(context.exception.error, "details")) + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_formats_write_error_correctly(self): + models = [ + InsertOne(namespace="db.coll", document={"_id": 1}), + InsertOne(namespace="db.coll", document={"_id": 1}), + ] + + with self.assertRaises(ClientBulkWriteException) as context: + await self.client.bulk_write(models=models) + + write_error = context.exception.write_errors[0] + self.assertEqual(write_error["idx"], 1) + self.assertEqual(write_error["op"], {"insert": 0, "document": {"_id": 1}}) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(AsyncIntegrationTest): diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 9beadb1227..6e269e25b0 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -449,7 +449,7 @@ def setUp(self): super().setUp() self.models = [] for doc in self.documents: - self.models.append(InsertOne(namespace="perftest.corpus", document=doc.copy())) + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) @client_context.require_version_min(8, 0, 0, -24) def do_task(self): @@ -461,7 +461,7 @@ def setUp(self): super().setUp() self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] for doc in self.documents: - self.models.append(InsertOne(document=doc.copy())) + self.models.append(InsertOne(document=doc)) self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) self.models.append(DeleteOne(filter={})) @@ -475,7 +475,7 @@ def setUp(self): super().setUp() self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] for doc in self.documents: - self.models.append(InsertOne(namespace="perftest.corpus", document=doc.copy())) + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) self.models.append( ReplaceOne( namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True @@ -499,7 +499,7 @@ def setUp(self): super().setUp() self.models = [] for doc in self.documents: - self.models.append(InsertOne(namespace="perftest.corpus", document=doc.copy())) + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) @client_context.require_version_min(8, 0, 0, -24) def do_task(self): diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index daf0ad8321..5a2d8ca5ff 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -48,12 +48,9 @@ class TestClientBulkWrite(IntegrationTest): @client_context.require_version_min(8, 0, 0, -24) def test_returns_error_if_no_namespace_provided(self): - client = rs_or_single_client() - self.addCleanup(client.close) - models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: - client.bulk_write(models=models) + self.client.bulk_write(models=models) self.assertIn( "MongoClient.bulk_write requires a namespace to be provided for each write operation", context.exception._message, @@ -64,15 +61,26 @@ def test_handles_non_pymongo_error(self): with patch.object( _ClientBulk, "write_command", return_value={"error": TypeError("mock type error")} ): - client = rs_or_single_client() - self.addCleanup(client.close) - models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(ClientBulkWriteException) as context: - client.bulk_write(models=models) + self.client.bulk_write(models=models) self.assertIsInstance(context.exception.error, TypeError) self.assertFalse(hasattr(context.exception.error, "details")) + @client_context.require_version_min(8, 0, 0, -24) + def test_formats_write_error_correctly(self): + models = [ + InsertOne(namespace="db.coll", document={"_id": 1}), + InsertOne(namespace="db.coll", document={"_id": 1}), + ] + + with self.assertRaises(ClientBulkWriteException) as context: + self.client.bulk_write(models=models) + + write_error = context.exception.write_errors[0] + self.assertEqual(write_error["idx"], 1) + self.assertEqual(write_error["op"], {"insert": 0, "document": {"_id": 1}}) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(IntegrationTest): From 7295fe17b7e4182ba95367b648a4dc32666f3a4d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 21 Aug 2024 14:28:55 -0500 Subject: [PATCH 1408/2111] PYTHON-4226 Add Projection with aggregation expressions example (#1803) --- test/test_examples.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/test_examples.py b/test/test_examples.py index 02b1785866..296283db28 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -866,6 +866,38 @@ def test_aggregate_examples(self): ) # End Aggregation Example 4 + @client_context.require_version_min(4, 4) + def test_aggregate_projection_example(self): + db = self.db + + # Start Aggregation Projection Example 1 + db.inventory.find( + {}, + { + "_id": 0, + "item": 1, + "status": { + "$switch": { + "branches": [ + {"case": {"$eq": ["$status", "A"]}, "then": "Available"}, + {"case": {"$eq": ["$status", "D"]}, "then": "Discontinued"}, + ], + "default": "No status found", + } + }, + "area": { + "$concat": [ + {"$toString": {"$multiply": ["$size.h", "$size.w"]}}, + " ", + "$size.uom", + ] + }, + "reportNumber": {"$literal": 1}, + }, + ) + + # End Aggregation Projection Example 1 + def test_commands(self): db = self.db db.restaurants.insert_one({}) From d6b896d18b3cb351148b2dfac0b1e5c062a18169 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 21 Aug 2024 16:52:16 -0500 Subject: [PATCH 1409/2111] PYTHON-4229 Get remaining secrets from AWS secrets manager (#1805) --- .evergreen/config.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8d77e58c07..8388c72151 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -394,7 +394,6 @@ functions: params: working_dir: "src" shell: bash - include_expansions_in_env: ["DRIVERS_ATLAS_LAMBDA_USER", "DRIVERS_ATLAS_LAMBDA_PASSWORD"] script: | # Disable xtrace set +x From 4dde30147cc192e51beb2bca6570bfe667c8b8df Mon Sep 17 00:00:00 2001 From: Shruti Sridhar <77828382+shruti-sridhar@users.noreply.github.com> Date: Thu, 22 Aug 2024 14:06:02 -0700 Subject: [PATCH 1410/2111] PYTHON-4671 Skip client.bulk_write tests on Atlas Serverless (#1807) --- test/asynchronous/test_client_bulk_write.py | 17 +++++++++++++++++ .../unacknowledged-client-bulkWrite.json | 3 ++- .../client-bulkWrite-delete-options.json | 5 +++-- .../unified/client-bulkWrite-errorResponse.json | 3 ++- test/crud/unified/client-bulkWrite-errors.json | 3 ++- .../client-bulkWrite-mixed-namespaces.json | 5 +++-- test/crud/unified/client-bulkWrite-options.json | 5 +++-- test/crud/unified/client-bulkWrite-ordered.json | 5 +++-- test/crud/unified/client-bulkWrite-results.json | 5 +++-- .../client-bulkWrite-update-options.json | 5 +++-- .../client-bulkWrite-update-pipeline.json | 5 +++-- .../unified/client-bulkWrite-clientErrors.json | 3 ++- .../unified/client-bulkWrite-serverErrors.json | 3 ++- .../unified/handshakeError.json | 10 ++++++---- test/server_selection_logging/operation-id.json | 6 ++++-- test/test_client_bulk_write.py | 17 +++++++++++++++++ test/transactions/unified/client-bulkWrite.json | 5 +++-- test/versioned-api/crud-api-version-1.json | 3 ++- 18 files changed, 80 insertions(+), 28 deletions(-) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 48a14699e6..458d119856 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -47,6 +47,7 @@ class TestClientBulkWrite(AsyncIntegrationTest): @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_returns_error_if_no_namespace_provided(self): models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -57,6 +58,7 @@ async def test_returns_error_if_no_namespace_provided(self): ) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_handles_non_pymongo_error(self): with patch.object( _AsyncClientBulk, "write_command", return_value={"error": TypeError("mock type error")} @@ -68,6 +70,7 @@ async def test_handles_non_pymongo_error(self): self.assertFalse(hasattr(context.exception.error, "details")) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_formats_write_error_correctly(self): models = [ InsertOne(namespace="db.coll", document={"_id": 1}), @@ -90,6 +93,7 @@ async def asyncSetUp(self): self.max_message_size_bytes = await async_client_context.max_message_size_bytes @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -115,6 +119,7 @@ async def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -147,6 +152,7 @@ async def test_batch_splits_if_ops_payload_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless @async_client_context.require_failCommand_fail_point async def test_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() @@ -190,6 +196,7 @@ async def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(bulk_write_events), 2) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -220,6 +227,7 @@ async def test_collects_write_errors_across_batches_unordered(self): self.assertEqual(len(bulk_write_events), 2) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -250,6 +258,7 @@ async def test_collects_write_errors_across_batches_ordered(self): self.assertEqual(len(bulk_write_events), 1) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -290,6 +299,7 @@ async def test_handles_cursor_requiring_getMore(self): self.assertTrue(get_more_event) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless @async_client_context.require_no_standalone async def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() @@ -333,6 +343,7 @@ async def test_handles_cursor_requiring_getMore_within_transaction(self): self.assertTrue(get_more_event) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless @async_client_context.require_failCommand_fail_point async def test_handles_getMore_error(self): listener = OvertCommandListener() @@ -388,6 +399,7 @@ async def test_handles_getMore_error(self): self.assertTrue(kill_cursors_event) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -444,6 +456,7 @@ async def _setup_namespace_test_models(self): return num_models, models @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -475,6 +488,7 @@ async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = await async_rs_or_single_client(event_listeners=[listener]) @@ -513,6 +527,7 @@ async def test_batch_splits_if_new_namespace_is_too_large(self): self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless async def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = await async_rs_or_single_client() self.addAsyncCleanup(client.close) @@ -531,6 +546,7 @@ async def test_returns_error_if_no_writes_can_be_added_to_ops(self): await client.bulk_write(models=models) @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") async def test_returns_error_if_auto_encryption_configured(self): opts = AutoEncryptionOpts( @@ -556,6 +572,7 @@ async def asyncSetUp(self): self.max_message_size_bytes = await async_client_context.max_message_size_bytes @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless @async_client_context.require_failCommand_fail_point async def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 diff --git a/test/command_monitoring/unacknowledged-client-bulkWrite.json b/test/command_monitoring/unacknowledged-client-bulkWrite.json index 1099b6a1e9..b30e1540f4 100644 --- a/test/command_monitoring/unacknowledged-client-bulkWrite.json +++ b/test/command_monitoring/unacknowledged-client-bulkWrite.json @@ -3,7 +3,8 @@ "schemaVersion": "1.7", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-delete-options.json b/test/crud/unified/client-bulkWrite-delete-options.json index 5bdf2b124a..d9987897dc 100644 --- a/test/crud/unified/client-bulkWrite-delete-options.json +++ b/test/crud/unified/client-bulkWrite-delete-options.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite delete options", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-errorResponse.json b/test/crud/unified/client-bulkWrite-errorResponse.json index edf2339d8a..b828aad3b9 100644 --- a/test/crud/unified/client-bulkWrite-errorResponse.json +++ b/test/crud/unified/client-bulkWrite-errorResponse.json @@ -3,7 +3,8 @@ "schemaVersion": "1.12", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-errors.json b/test/crud/unified/client-bulkWrite-errors.json index 9f17f85331..8cc45bb5f2 100644 --- a/test/crud/unified/client-bulkWrite-errors.json +++ b/test/crud/unified/client-bulkWrite-errors.json @@ -3,7 +3,8 @@ "schemaVersion": "1.21", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-mixed-namespaces.json b/test/crud/unified/client-bulkWrite-mixed-namespaces.json index f90755dc85..55f0618923 100644 --- a/test/crud/unified/client-bulkWrite-mixed-namespaces.json +++ b/test/crud/unified/client-bulkWrite-mixed-namespaces.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite with mixed namespaces", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-options.json b/test/crud/unified/client-bulkWrite-options.json index a1e6af3bf3..708fe4e85b 100644 --- a/test/crud/unified/client-bulkWrite-options.json +++ b/test/crud/unified/client-bulkWrite-options.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite top-level options", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-ordered.json b/test/crud/unified/client-bulkWrite-ordered.json index a55d6619b5..6fb10d992f 100644 --- a/test/crud/unified/client-bulkWrite-ordered.json +++ b/test/crud/unified/client-bulkWrite-ordered.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite with ordered option", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-results.json b/test/crud/unified/client-bulkWrite-results.json index 97a9e50b21..accf5a9cbf 100644 --- a/test/crud/unified/client-bulkWrite-results.json +++ b/test/crud/unified/client-bulkWrite-results.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite results", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-update-options.json b/test/crud/unified/client-bulkWrite-update-options.json index 93a2774e5f..ce6241c681 100644 --- a/test/crud/unified/client-bulkWrite-update-options.json +++ b/test/crud/unified/client-bulkWrite-update-options.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite update options", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-update-pipeline.json b/test/crud/unified/client-bulkWrite-update-pipeline.json index 57b6c9c1ba..9dba5ee6c5 100644 --- a/test/crud/unified/client-bulkWrite-update-pipeline.json +++ b/test/crud/unified/client-bulkWrite-update-pipeline.json @@ -1,9 +1,10 @@ { "description": "client bulkWrite update pipeline", - "schemaVersion": "1.1", + "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/retryable_writes/unified/client-bulkWrite-clientErrors.json b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json index e2c0fb9c0a..d16e0c9c8d 100644 --- a/test/retryable_writes/unified/client-bulkWrite-clientErrors.json +++ b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json @@ -8,7 +8,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json index 4a0b210eb5..f58c82bcc7 100644 --- a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json +++ b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json @@ -8,7 +8,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index aa677494c8..93cb2e849e 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -1,6 +1,6 @@ { "description": "retryable writes handshake failures", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.2", @@ -57,7 +57,8 @@ "description": "client.clientBulkWrite succeeds after retryable handshake network error", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "operations": [ @@ -165,7 +166,8 @@ "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "operations": [ @@ -2010,4 +2012,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json index 5383b66332..ccc2623166 100644 --- a/test/server_selection_logging/operation-id.json +++ b/test/server_selection_logging/operation-id.json @@ -232,7 +232,8 @@ "description": "Successful client bulkWrite operation: log messages have operationIds", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "operations": [ @@ -304,7 +305,8 @@ "description": "Failed client bulkWrite operation: log messages have operationIds", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "operations": [ diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 5a2d8ca5ff..6c6188cf61 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -47,6 +47,7 @@ class TestClientBulkWrite(IntegrationTest): @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_returns_error_if_no_namespace_provided(self): models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -57,6 +58,7 @@ def test_returns_error_if_no_namespace_provided(self): ) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_handles_non_pymongo_error(self): with patch.object( _ClientBulk, "write_command", return_value={"error": TypeError("mock type error")} @@ -68,6 +70,7 @@ def test_handles_non_pymongo_error(self): self.assertFalse(hasattr(context.exception.error, "details")) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_formats_write_error_correctly(self): models = [ InsertOne(namespace="db.coll", document={"_id": 1}), @@ -90,6 +93,7 @@ def setUp(self): self.max_message_size_bytes = client_context.max_message_size_bytes @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -115,6 +119,7 @@ def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -147,6 +152,7 @@ def test_batch_splits_if_ops_payload_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless @client_context.require_failCommand_fail_point def test_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() @@ -190,6 +196,7 @@ def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(bulk_write_events), 2) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -220,6 +227,7 @@ def test_collects_write_errors_across_batches_unordered(self): self.assertEqual(len(bulk_write_events), 2) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -250,6 +258,7 @@ def test_collects_write_errors_across_batches_ordered(self): self.assertEqual(len(bulk_write_events), 1) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -290,6 +299,7 @@ def test_handles_cursor_requiring_getMore(self): self.assertTrue(get_more_event) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless @client_context.require_no_standalone def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() @@ -333,6 +343,7 @@ def test_handles_cursor_requiring_getMore_within_transaction(self): self.assertTrue(get_more_event) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless @client_context.require_failCommand_fail_point def test_handles_getMore_error(self): listener = OvertCommandListener() @@ -388,6 +399,7 @@ def test_handles_getMore_error(self): self.assertTrue(kill_cursors_event) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -444,6 +456,7 @@ def _setup_namespace_test_models(self): return num_models, models @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -475,6 +488,7 @@ def test_no_batch_splits_if_new_namespace_is_not_too_large(self): self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = rs_or_single_client(event_listeners=[listener]) @@ -513,6 +527,7 @@ def test_batch_splits_if_new_namespace_is_too_large(self): self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = rs_or_single_client() self.addCleanup(client.close) @@ -531,6 +546,7 @@ def test_returns_error_if_no_writes_can_be_added_to_ops(self): client.bulk_write(models=models) @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_returns_error_if_auto_encryption_configured(self): opts = AutoEncryptionOpts( @@ -556,6 +572,7 @@ def setUp(self): self.max_message_size_bytes = client_context.max_message_size_bytes @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless @client_context.require_failCommand_fail_point def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 diff --git a/test/transactions/unified/client-bulkWrite.json b/test/transactions/unified/client-bulkWrite.json index f8f1d97169..4a8d013f8d 100644 --- a/test/transactions/unified/client-bulkWrite.json +++ b/test/transactions/unified/client-bulkWrite.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite transactions", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "8.0", @@ -8,7 +8,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index fe668620f8..23ef59a6d9 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -431,7 +431,8 @@ "description": "client bulkWrite appends declared API version", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "operations": [ From 4eae7d2d94b4c15581c61ecc88a77daee738617f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 23 Aug 2024 09:50:05 -0700 Subject: [PATCH 1411/2111] PYTHON-4690 Add repr for FixedOffset eg FixedOffset(datetime.timedelta(seconds=3600), '+60')) (#1806) --- bson/tz_util.py | 3 +++ doc/changelog.rst | 1 + test/test_bson.py | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/bson/tz_util.py b/bson/tz_util.py index a21d3c1736..4d31c04f9c 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -39,6 +39,9 @@ def __init__(self, offset: Union[float, timedelta], name: str) -> None: def __getinitargs__(self) -> Tuple[timedelta, str]: return self.__offset, self.__name + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__offset!r}, {self.__name!r})" + def utcoffset(self, dt: Optional[datetime]) -> timedelta: return self.__offset diff --git a/doc/changelog.rst b/doc/changelog.rst index 8d1c6f1182..d80f78fe4d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -30,6 +30,7 @@ PyMongo 4.9 brings a number of improvements including: :class:`~pymongo.operations.DeleteOne`, and :class:`~pymongo.operations.DeleteMany` operations, so they can be used in the new :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. +- Added :func:`repr` support to :class:`bson.tz_util.FixedOffset`. Issues Resolved ............... diff --git a/test/test_bson.py b/test/test_bson.py index fec84090d2..79a7fa0615 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1019,6 +1019,10 @@ def test_tzinfo(self): tz = FixedOffset(42, "forty-two") self.assertRaises(ValueError, CodecOptions, tzinfo=tz) self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo) + self.assertEqual(repr(tz), "FixedOffset(datetime.timedelta(seconds=2520), 'forty-two')") + self.assertEqual( + repr(eval(repr(tz))), "FixedOffset(datetime.timedelta(seconds=2520), 'forty-two')" + ) def test_codec_options_repr(self): r = ( From 7ee08ddbe6e082da88c39e988e6299e7c1a2a987 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 23 Aug 2024 14:57:07 -0500 Subject: [PATCH 1412/2111] PYTHON-4672 Clarify Reauthentication and Speculative Authentication combination behavior (#1802) Co-authored-by: Jib --- test/auth_oidc/test_auth_oidc.py | 51 ++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 406ea5ec1b..fa4b7d6697 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -40,11 +40,7 @@ from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.operations import InsertOne -from pymongo.synchronous.auth_oidc import ( - OIDCCallback, - OIDCCallbackContext, - OIDCCallbackResult, -) +from pymongo.synchronous.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult from pymongo.uri_parser import parse_uri ROOT = Path(__file__).parent.parent.resolve() @@ -1019,6 +1015,51 @@ def fetch(self, _): # Close the client. client.close() + def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): + # Create an OIDC configured client that can listen for `SaslStart` commands. + listener = EventListener() + client = self.create_client(event_listeners=[listener]) + + # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. + client2 = self.create_client() + client2.test.test.find_one() + client.options.pool_options._credentials.cache.data = ( + client2.options.pool_options._credentials.cache.data + ) + client2.close() + self.request_called = 0 + + # Perform an `insert` operation that succeeds. + client.test.test.insert_one({}) + + # Assert that the callback was not called. + self.assertEqual(self.request_called, 0) + + # Assert there were no `SaslStart` commands executed. + assert not any( + event.command_name.lower() == "saslstart" for event in listener.started_events + ) + listener.reset() + + # Set a fail point for `insert` commands of the form: + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform an `insert` operation that succeeds. + client.test.test.insert_one({}) + + # Assert that the callback was called once. + self.assertEqual(self.request_called, 1) + + # Assert there were `SaslStart` commands executed. + assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + + # Close the client. + client.close() + def test_5_1_azure_with_no_username(self): if ENVIRON != "azure": raise unittest.SkipTest("Test is only supported on Azure") From 50586baf8d7d2f8c32ad4da71110e74db3697974 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 23 Aug 2024 14:57:48 -0500 Subject: [PATCH 1413/2111] PYTHON-4025 Move Release Instructions to Wiki (#1808) --- CONTRIBUTING.md | 4 ++ RELEASE.md | 109 ------------------------------------------------ 2 files changed, 4 insertions(+), 109 deletions(-) delete mode 100644 RELEASE.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4dede83437..c844470138 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -244,3 +244,7 @@ The `-b` flag adds as a regex pattern to block files you do not wish to update in PyMongo. This is primarily helpful if you are implementing a new feature in PyMongo that has spec tests already implemented, or if you are attempting to validate new spec tests in PyMongo. + +## Making a Release + +Follow the [Python Driver Release Process Wiki](https://wiki.corp.mongodb.com/display/DRIVERS/Python+Driver+Release+Process). diff --git a/RELEASE.md b/RELEASE.md deleted file mode 100644 index 3c2990df08..0000000000 --- a/RELEASE.md +++ /dev/null @@ -1,109 +0,0 @@ -# Some notes on PyMongo releases - -## Versioning - -We follow [semver](https://semver.org/) and [pep-0440](https://www.python.org/dev/peps/pep-0440) -for versioning. - -We shoot for a release every few months - that will generally just -increment the middle / minor version number (e.g. `3.5.0` -> `3.6.0`). - -Patch releases are reserved for bug fixes (in general no new features or -deprecations) - they only happen in cases where there is a critical bug -in a recently released version, or when a release has no new features or -API changes. - -In between releases we add `.devN` to the version number to denote the -version under development. So if we just released `3.6.0`, then the -current dev version might be `3.6.1.dev0` or `3.7.0.dev0`. When we make the -next release we replace all instances of `3.x.x.devN` in the docs with the -new version number. - -## Deprecation - -Changes should be backwards compatible unless absolutely necessary. When -making API changes the approach is generally to add a deprecation -warning but keeping the existing API functional. Deprecated features can -be removed in a release that changes the major version number. - -## Doing a Release - -1. PyMongo is tested on Evergreen. Ensure the latest commit are passing - [CI](https://spruce.mongodb.com/commits/mongo-python-driver) as expected. - -2. Check Jira to ensure all the tickets in this version have been - completed. - -3. Make a PR that adds the release notes to `doc/changelog.rst`. Generally just - summarize/clarify the git log, but you might add some more long form - notes for big changes. - -4. Merge the PR. - -5. Clone the source repository in a temporary directory and check out the - release branch. - -6. Update the version number in `pymongo/_version.py`. - -7. Commit the change, e.g. `git add . && git commit -m "BUMP "` - -7. Tag w/ version_number, eg, - `git tag -a '4.1.0' -m 'BUMP 4.1.0'`. - -8. Bump the version number to `.dev0` in - `pymongo/_version.py`, commit, push. - -9. Push commit / tag, eg `git push && git push --tags`. - -10. Pushing a tag will trigger the release process on GitHub Actions - that will require a member of the team to authorize the deployment. - Navigate to https://github.com/mongodb/mongo-python-driver/actions/workflows/release-python.yml - and wait for the publish to complete. - -11. Make sure the new version appears on - `https://pymongo.readthedocs.io/en/stable/`. If the new version does not show - up automatically, trigger a rebuild of "stable" on https://readthedocs.org/projects/pymongo/builds/. - -12. Publish the release version in Jira and add a description of the release, such as a the reason - or the main feature. - -13. Announce the release on the [community forum](https://www.mongodb.com/community/forums/tags/c/announcements/driver-releases/110/python) - -14. File a ticket for DOCSP highlighting changes in server version and - Python version compatibility or the lack thereof, for example https://jira.mongodb.org/browse/DOCSP-34040 - -15. Create a GitHub Release for the tag using https://github.com/mongodb/mongo-python-driver/releases/new. - The title should be "PyMongo X.Y.Z", and the description should - contain a link to the release notes on the the community forum, e.g. - "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457" - -16. Wait for automated update PR on conda-forge, e.g.: https://github.com/conda-forge/pymongo-feedstock/pull/81 - Update dependencies if needed. - - -## Doing a Bug Fix Release - -1. If it is a new branch, first create the release branch and Evergreen project. - -- Clone the source repository in a temporary location. - -- Create a branch from the tag, e.g. `git checkout -b v4.1 4.1.0`. - -- Push the branch, e.g.: `git push origin v4.6`. - -- Create a new project in Evergreen for the branch by duplicating the "Mongo Python Driver" project. - Select the option to create a JIRA ticket for S3 bucket permissions. - -- Update the "Display Name", "Branch Name", and "Identifier". - -- Attach the project to the repository. - -- Wait for the JIRA ticket to be resolved and verify S3 upload capability with a patch release on the - new project. - -2. Create a PR against the release branch. - -3. Create a release using the "Doing a Release" checklist above, ensuring that you - check out the appropriate release branch in the source checkout. - -4. Cherry-pick the changelog PR onto the `master` branch. From f4392041b2641928b5ecfd0e662d8e04fd9ee584 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 26 Aug 2024 14:39:48 -0500 Subject: [PATCH 1414/2111] PYTHON-4692 Skip TestClientBulkWriteCSOT on MacOS and Windows (#1810) --- test/asynchronous/test_client_bulk_write.py | 3 +++ test/test_client_bulk_write.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 458d119856..c35e823d03 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -15,6 +15,7 @@ """Test the client bulk write API.""" from __future__ import annotations +import os import sys sys.path[0:0] = [""] @@ -567,6 +568,8 @@ async def test_returns_error_if_auto_encryption_configured(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(AsyncIntegrationTest): async def asyncSetUp(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") self.max_write_batch_size = await async_client_context.max_write_batch_size self.max_bson_object_size = await async_client_context.max_bson_size self.max_message_size_bytes = await async_client_context.max_message_size_bytes diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 6c6188cf61..ee19a04176 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -15,6 +15,7 @@ """Test the client bulk write API.""" from __future__ import annotations +import os import sys sys.path[0:0] = [""] @@ -567,6 +568,8 @@ def test_returns_error_if_auto_encryption_configured(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(IntegrationTest): def setUp(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") self.max_write_batch_size = client_context.max_write_batch_size self.max_bson_object_size = client_context.max_bson_size self.max_message_size_bytes = client_context.max_message_size_bytes From b8213f28176c77352a7e36f0db4ebd1b1e9213ed Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 27 Aug 2024 10:50:44 -0400 Subject: [PATCH 1415/2111] PYTHON-4698 Rename the async ClientEncryption to AsyncClientEncryption (#1816) --- pymongo/asynchronous/encryption.py | 14 +++++++------- pymongo/encryption_options.py | 2 +- tools/synchro.py | 1 + 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 8b63525f21..93484541eb 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -304,7 +304,7 @@ async def close(self) -> None: class RewrapManyDataKeyResult: - """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. + """Result object returned by a :meth:`~AsyncClientEncryption.rewrap_many_data_key` operation. .. versionadded:: 4.2 """ @@ -316,7 +316,7 @@ def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: def bulk_write_result(self) -> Optional[BulkWriteResult]: """The result of the bulk write operation used to update the key vault collection with one or more rewrapped data keys. If - :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + :meth:`~AsyncClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, no bulk write operation will be executed and this field will be ``None``. """ @@ -506,7 +506,7 @@ def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: return opts -class ClientEncryption(Generic[_DocumentType]): +class AsyncClientEncryption(Generic[_DocumentType]): """Explicit client-side field level encryption.""" def __init__( @@ -519,7 +519,7 @@ def __init__( ) -> None: """Explicit client-side field level encryption. - The ClientEncryption class encapsulates explicit operations on a key + The AsyncClientEncryption class encapsulates explicit operations on a key vault collection that cannot be done directly on an AsyncMongoClient. Similar to configuring auto encryption on an AsyncMongoClient, it is constructed with an AsyncMongoClient (to a MongoDB cluster containing the key vault @@ -1126,7 +1126,7 @@ async def rewrap_many_data_key( result = await self._key_vault_coll.bulk_write(replacements) return RewrapManyDataKeyResult(result) - async def __aenter__(self) -> ClientEncryption[_DocumentType]: + async def __aenter__(self) -> AsyncClientEncryption[_DocumentType]: return self async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: @@ -1134,7 +1134,7 @@ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: def _check_closed(self) -> None: if self._encryption is None: - raise InvalidOperation("Cannot use closed ClientEncryption") + raise InvalidOperation("Cannot use closed AsyncClientEncryption") async def close(self) -> None: """Release resources. @@ -1142,7 +1142,7 @@ async def close(self) -> None: Note that using this class in a with-statement will automatically call :meth:`close`:: - with ClientEncryption(...) as client_encryption: + with AsyncClientEncryption(...) as client_encryption: encrypted = client_encryption.encrypt(value, ...) decrypted = client_encryption.decrypt(encrypted) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 3b0d32a4b9..df13026500 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -70,7 +70,7 @@ def __init__( users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True``. Explicit encryption and explicit decryption is also supported for all users - with the :class:`~pymongo.encryption.ClientEncryption` class. + with the :class:`~pymongo.asynchronous.encryption.AsyncClientEncryption` and :class:`~pymongo.encryption.ClientEncryption` classes. See :ref:`automatic-client-side-encryption` for an example. diff --git a/tools/synchro.py b/tools/synchro.py index 0c2aff1301..65ff3bfe0a 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -58,6 +58,7 @@ "_AsyncGridOutChunkIterator": "GridOutChunkIterator", "_a_grid_in_property": "_grid_in_property", "_a_grid_out_property": "_grid_out_property", + "AsyncClientEncryption": "ClientEncryption", "AsyncMongoCryptCallback": "MongoCryptCallback", "AsyncExplicitEncrypter": "ExplicitEncrypter", "AsyncAutoEncrypter": "AutoEncrypter", From 81ea92b8082007e8916c6e36db9349aef49bd5c3 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 27 Aug 2024 13:38:42 -0400 Subject: [PATCH 1416/2111] PYTHON-4669 - Update More APIs for Motor Compatibility (#1815) --- gridfs/asynchronous/grid_file.py | 30 ++++++++++++++++++++++-------- gridfs/synchronous/grid_file.py | 28 ++++++++++++++++++++-------- pyproject.toml | 6 +++++- 3 files changed, 47 insertions(+), 17 deletions(-) diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index 4d6140750e..afc1a0f756 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -1176,20 +1176,22 @@ def __getattr__(self, name: str) -> Any: raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name: str, value: Any) -> None: - if _IS_SYNC: - # For properties of this instance like _buffer, or descriptors set on - # the class like filename, use regular __setattr__ - if name in self.__dict__ or name in self.__class__.__dict__: - object.__setattr__(self, name, value) - else: + # For properties of this instance like _buffer, or descriptors set on + # the class like filename, use regular __setattr__ + if name in self.__dict__ or name in self.__class__.__dict__: + object.__setattr__(self, name, value) + else: + if _IS_SYNC: # All other attributes are part of the document in db.fs.files. # Store them to be sent to server on close() or if closed, send # them now. self._file[name] = value if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - else: - object.__setattr__(self, name, value) + else: + raise AttributeError( + "AsyncGridIn does not support __setattr__. Use AsyncGridIn.set() instead" + ) async def set(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on @@ -1484,6 +1486,17 @@ def __init__( _file: Any _chunk_iter: Any + async def __anext__(self) -> bytes: + return super().__next__() + + def __next__(self) -> bytes: # noqa: F811, RUF100 + if _IS_SYNC: + return super().__next__() + else: + raise TypeError( + "AsyncGridOut does not support synchronous iteration. Use `async for` instead" + ) + async def open(self) -> None: if not self._file: _disallow_transactions(self._session) @@ -1511,6 +1524,7 @@ async def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ + await self.open() received = len(self._buffer) - self._buffer_pos chunk_data = EMPTY chunk_size = int(self.chunk_size) diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index bc2e29a61d..80015f96e7 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -1166,20 +1166,22 @@ def __getattr__(self, name: str) -> Any: raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name: str, value: Any) -> None: - if _IS_SYNC: - # For properties of this instance like _buffer, or descriptors set on - # the class like filename, use regular __setattr__ - if name in self.__dict__ or name in self.__class__.__dict__: - object.__setattr__(self, name, value) - else: + # For properties of this instance like _buffer, or descriptors set on + # the class like filename, use regular __setattr__ + if name in self.__dict__ or name in self.__class__.__dict__: + object.__setattr__(self, name, value) + else: + if _IS_SYNC: # All other attributes are part of the document in db.fs.files. # Store them to be sent to server on close() or if closed, send # them now. self._file[name] = value if self._closed: self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - else: - object.__setattr__(self, name, value) + else: + raise AttributeError( + "GridIn does not support __setattr__. Use GridIn.set() instead" + ) def set(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on @@ -1472,6 +1474,15 @@ def __init__( _file: Any _chunk_iter: Any + def __next__(self) -> bytes: + return super().__next__() + + def __next__(self) -> bytes: # noqa: F811, RUF100 + if _IS_SYNC: + return super().__next__() + else: + raise TypeError("GridOut does not support synchronous iteration. Use `for` instead") + def open(self) -> None: if not self._file: _disallow_transactions(self._session) @@ -1499,6 +1510,7 @@ def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ + self.open() received = len(self._buffer) - self._buffer_pos chunk_data = EMPTY chunk_size = int(self.chunk_size) diff --git a/pyproject.toml b/pyproject.toml index 4380b57e8d..8452bfe956 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,7 +126,7 @@ module = ["service_identity.*"] ignore_missing_imports = true [[tool.mypy.overrides]] -module = ["pymongo.synchronous.*", "gridfs.synchronous.*"] +module = ["pymongo.synchronous.*"] warn_unused_ignores = false disable_error_code = ["unused-coroutine"] @@ -134,6 +134,10 @@ disable_error_code = ["unused-coroutine"] module = ["pymongo.asynchronous.*"] warn_unused_ignores = false +[[tool.mypy.overrides]] +module = ["gridfs.synchronous.*"] +warn_unused_ignores = false +disable_error_code = ["unused-coroutine", "no-redef"] [tool.ruff] target-version = "py37" From fd0787a57b72ed30e367f519bebb0ba505ddc121 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 27 Aug 2024 19:05:15 -0500 Subject: [PATCH 1417/2111] PYTHON-4615 Address sign-compare warning, improve array_of_documents_to_buffer validation (#1804) --- bson/__init__.py | 7 ++++--- bson/_cbsonmodule.c | 50 ++++++++++++++++++++++++++++----------------- test/test_bson.py | 18 ++++++++++++++++ 3 files changed, 53 insertions(+), 22 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index a7c9ddc509..48fffd745f 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -284,10 +284,10 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: except struct.error as exc: raise InvalidBSON(str(exc)) from None end = position + obj_size - 1 - if data[end] != 0: - raise InvalidBSON("bad eoo") if end >= obj_end: raise InvalidBSON("invalid object length") + if data[end] != 0: + raise InvalidBSON("bad eoo") # If this is the top-level document, validate the total size too. if position == 0 and obj_size != obj_end: raise InvalidBSON("invalid object length") @@ -1180,9 +1180,10 @@ def _decode_selective( return doc -def _array_of_documents_to_buffer(view: memoryview) -> bytes: +def _array_of_documents_to_buffer(data: Union[memoryview, bytes]) -> bytes: # Extract the raw bytes of each document. position = 0 + view = memoryview(data) _, end = _get_object_size(view, position, len(view)) position += 4 buffers: list[memoryview] = [] diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 3b3aecc441..68ec9fe45c 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -2901,11 +2901,31 @@ static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* a "not enough data for a BSON document"); Py_DECREF(InvalidBSON); } - goto done; + goto fail; } memcpy(&size, string, 4); size = BSON_UINT32_FROM_LE(size); + + /* validate the size of the array */ + if (view.len != (int32_t)size || (int32_t)size < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "objsize too large"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "bad eoo"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + /* save space for length */ if (pymongo_buffer_save_space(buffer, size) == -1) { goto fail; @@ -2948,30 +2968,22 @@ static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* a goto fail; } - if (view.len < size) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "objsize too large"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - - if (string[size - 1]) { - PyObject* InvalidBSON = _error("InvalidBSON"); - if (InvalidBSON) { - PyErr_SetString(InvalidBSON, "bad eoo"); - Py_DECREF(InvalidBSON); - } - goto fail; - } - if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { goto fail; } position += value_length; } + if (position != size - 1) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "bad object or element length"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + /* objectify buffer */ result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), (Py_ssize_t)pymongo_buffer_get_position(buffer)); diff --git a/test/test_bson.py b/test/test_bson.py index 79a7fa0615..8c8fe60182 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -41,6 +41,7 @@ EPOCH_AWARE, DatetimeMS, Regex, + _array_of_documents_to_buffer, _datetime_to_millis, decode, decode_all, @@ -1366,6 +1367,23 @@ def __int__(self): with self.assertRaisesRegex(InvalidBSON, re.compile(re.escape(_DATETIME_ERROR_SUGGESTION))): decode(encode({"a": DatetimeMS(small_ms)})) + def test_array_of_documents_to_buffer(self): + doc = dict(a=1) + buf = _array_of_documents_to_buffer(encode({"0": doc})) + self.assertEqual(buf, encode(doc)) + buf = _array_of_documents_to_buffer(encode({"0": doc, "1": doc})) + self.assertEqual(buf, encode(doc) + encode(doc)) + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(encode({"0": doc, "1": doc}) + b"1") + buf = encode({"0": doc, "1": doc}) + buf = buf[:-1] + b"1" + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(buf) + # We replace the size of the array with \xff\xff\xff\x00 which is -221 as an int32. + buf = b"\x14\x00\x00\x00\x04a\x00\xff\xff\xff\x00\x100\x00\x01\x00\x00\x00\x00\x00" + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(buf) + class TestLongLongToString(unittest.TestCase): def test_long_long_to_string(self): From e430d2e2fa67e15c666ce666d0fa1ca8101ef8dc Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 Aug 2024 08:31:42 -0400 Subject: [PATCH 1418/2111] PYTHON-4662 - Capture Async PyMongo metadata (#1814) --- pymongo/asynchronous/mongo_client.py | 2 +- pymongo/client_options.py | 16 +++++++++++++--- pymongo/pool_options.py | 9 ++++++++- pymongo/synchronous/mongo_client.py | 2 +- test/asynchronous/test_client.py | 5 ++++- test/test_client.py | 3 +++ tools/synchro.py | 1 + 7 files changed, 31 insertions(+), 7 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index e1a9d7735d..05e4e80f1d 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -830,7 +830,7 @@ def __init__( # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self._options = options = ClientOptions(username, password, dbase, opts) + self._options = options = ClientOptions(username, password, dbase, opts, _IS_SYNC) self._default_database_name = dbase self._lock = _ALock(_create_lock()) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 18f882980c..9b9b88a736 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -144,7 +144,11 @@ def _parse_ssl_options(options: Mapping[str, Any]) -> tuple[Optional[SSLContext] def _parse_pool_options( - username: str, password: str, database: Optional[str], options: Mapping[str, Any] + username: str, + password: str, + database: Optional[str], + options: Mapping[str, Any], + is_sync: bool, ) -> PoolOptions: """Parse connection pool options.""" credentials = _parse_credentials(username, password, database, options) @@ -183,6 +187,7 @@ def _parse_pool_options( server_api=server_api, load_balanced=load_balanced, credentials=credentials, + is_sync=is_sync, ) @@ -195,7 +200,12 @@ class ClientOptions: """ def __init__( - self, username: str, password: str, database: Optional[str], options: Mapping[str, Any] + self, + username: str, + password: str, + database: Optional[str], + options: Mapping[str, Any], + is_sync: bool = True, ): self.__options = options self.__codec_options = _parse_codec_options(options) @@ -206,7 +216,7 @@ def __init__( self.__server_selection_timeout = options.get( "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT ) - self.__pool_options = _parse_pool_options(username, password, database, options) + self.__pool_options = _parse_pool_options(username, password, database, options, is_sync) self.__read_preference = _parse_read_preference(options) self.__replica_set_name = options.get("replicaset") self.__write_concern = _parse_write_concern(options) diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index ad3200a3ff..6ec97d7d1b 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -331,6 +331,7 @@ def __init__( server_api: Optional[ServerApi] = None, load_balanced: Optional[bool] = None, credentials: Optional[MongoCredential] = None, + is_sync: Optional[bool] = True, ): self.__max_pool_size = max_pool_size self.__min_pool_size = min_pool_size @@ -350,6 +351,7 @@ def __init__( self.__load_balanced = load_balanced self.__credentials = credentials self.__metadata = copy.deepcopy(_METADATA) + if appname: self.__metadata["application"] = {"name": appname} @@ -361,10 +363,15 @@ def __init__( # }, # 'platform': 'CPython 3.8.0|MyPlatform' # } + if not is_sync: + self.__metadata["driver"]["name"] = "{}|{}".format( + self.__metadata["driver"]["name"], + "async", + ) if driver: if driver.name: self.__metadata["driver"]["name"] = "{}|{}".format( - _METADATA["driver"]["name"], + self.__metadata["driver"]["name"], driver.name, ) if driver.version: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 287ad6af7f..f855d90600 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -829,7 +829,7 @@ def __init__( # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self._options = options = ClientOptions(username, password, dbase, opts) + self._options = options = ClientOptions(username, password, dbase, opts, _IS_SYNC) self._default_database_name = dbase self._lock = _create_lock() diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 9489de1563..d4f09cde33 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -341,6 +341,7 @@ async def test_read_preference(self): def test_metadata(self): metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo|async" metadata["application"] = {"name": "foobar"} client = AsyncMongoClient("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options @@ -359,7 +360,7 @@ def test_metadata(self): self.assertRaises(TypeError, AsyncMongoClient, driver="abc") self.assertRaises(TypeError, AsyncMongoClient, driver=("Foo", "1", "a")) # Test appending to driver info. - metadata["driver"]["name"] = "PyMongo|FooDriver" + metadata["driver"]["name"] = "PyMongo|async|FooDriver" metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) client = AsyncMongoClient( "foo", @@ -403,6 +404,7 @@ def test_metadata(self): @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) def test_container_metadata(self): metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo|async" metadata["env"] = {} metadata["env"]["container"] = {"orchestrator": "kubernetes"} client = AsyncMongoClient("mongodb://foo:27017/?appname=foobar&connect=false") @@ -1938,6 +1940,7 @@ def test_sigstop_sigcont(self): async def _test_handshake(self, env_vars, expected_env): with patch.dict("os.environ", env_vars): metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo|async" if expected_env is not None: metadata["env"] = expected_env diff --git a/test/test_client.py b/test/test_client.py index 0c8232643a..22e94dcddb 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -337,6 +337,7 @@ def test_read_preference(self): def test_metadata(self): metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo" metadata["application"] = {"name": "foobar"} client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options @@ -399,6 +400,7 @@ def test_metadata(self): @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) def test_container_metadata(self): metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo" metadata["env"] = {} metadata["env"]["container"] = {"orchestrator": "kubernetes"} client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") @@ -1894,6 +1896,7 @@ def test_sigstop_sigcont(self): def _test_handshake(self, env_vars, expected_env): with patch.dict("os.environ", env_vars): metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo" if expected_env is not None: metadata["env"] = expected_env diff --git a/tools/synchro.py b/tools/synchro.py index 65ff3bfe0a..f45112c4cb 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -96,6 +96,7 @@ "async-transactions-ref": "transactions-ref", "async-snapshot-reads-ref": "snapshot-reads-ref", "default_async": "default", + "PyMongo|async": "PyMongo", } docstring_replacements: dict[tuple[str, str], str] = { From 9d3b5033faf80caeecfb3665260de4fd54348004 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 28 Aug 2024 07:38:43 -0500 Subject: [PATCH 1419/2111] PYTHON-3967 SDAM unit test sharded/too_new needs to defined wireVersions for host b (#1817) --- test/discovery_and_monitoring/rs/too_old.json | 4 +++- test/discovery_and_monitoring/sharded/too_new.json | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json index 8100a663f5..dc8a5b2b9c 100644 --- a/test/discovery_and_monitoring/rs/too_old.json +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -30,7 +30,9 @@ "hosts": [ "a:27017", "b:27017" - ] + ], + "minWireVersion": 999, + "maxWireVersion": 1000 } ] ], diff --git a/test/discovery_and_monitoring/sharded/too_new.json b/test/discovery_and_monitoring/sharded/too_new.json index 4b997d2163..c4e984ddec 100644 --- a/test/discovery_and_monitoring/sharded/too_new.json +++ b/test/discovery_and_monitoring/sharded/too_new.json @@ -21,7 +21,9 @@ "ok": 1, "helloOk": true, "isWritablePrimary": true, - "msg": "isdbgrid" + "msg": "isdbgrid", + "minWireVersion": 7, + "maxWireVersion": 900 } ] ], From 28697df6f850f5c7eebcc7445c4f3adc50c1230e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Aug 2024 11:39:03 -0700 Subject: [PATCH 1420/2111] PYTHON-4691 Fix non-UTC timezones with DATETIME_CLAMP/DATETIME_AUTO (#1811) --- bson/datetime_ms.py | 34 +++++++++++++----- test/test_bson.py | 84 +++++++++++++++++++++++++++++++++++++++------ 2 files changed, 98 insertions(+), 20 deletions(-) diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 112871a16c..48e57e0d11 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -114,17 +114,40 @@ def __int__(self) -> int: return self._value +def _datetime_to_millis(dtm: datetime.datetime) -> int: + """Convert datetime to milliseconds since epoch UTC.""" + if dtm.utcoffset() is not None: + dtm = dtm - dtm.utcoffset() # type: ignore + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) + + +_MIN_UTC = datetime.datetime.min.replace(tzinfo=utc) +_MAX_UTC = datetime.datetime.max.replace(tzinfo=utc) +_MIN_UTC_MS = _datetime_to_millis(_MIN_UTC) +_MAX_UTC_MS = _datetime_to_millis(_MAX_UTC) + + # Inclusive and exclusive min and max for timezones. # Timezones are hashed by their offset, which is a timedelta # and therefore there are more than 24 possible timezones. @functools.lru_cache(maxsize=None) def _min_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: - return _datetime_to_millis(datetime.datetime.min.replace(tzinfo=tz)) + delta = tz.utcoffset(_MIN_UTC) + if delta is not None: + offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 + else: + offset_millis = 0 + return max(_MIN_UTC_MS, _MIN_UTC_MS - offset_millis) @functools.lru_cache(maxsize=None) def _max_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: - return _datetime_to_millis(datetime.datetime.max.replace(tzinfo=tz)) + delta = tz.utcoffset(_MAX_UTC) + if delta is not None: + offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 + else: + offset_millis = 0 + return min(_MAX_UTC_MS, _MAX_UTC_MS - offset_millis) def _millis_to_datetime( @@ -162,10 +185,3 @@ def _millis_to_datetime( return DatetimeMS(millis) else: raise ValueError("datetime_conversion must be an element of DatetimeConversion") - - -def _datetime_to_millis(dtm: datetime.datetime) -> int: - """Convert datetime to milliseconds since epoch UTC.""" - if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() # type: ignore - return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) diff --git a/test/test_bson.py b/test/test_bson.py index 8c8fe60182..4996c46b92 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1252,54 +1252,116 @@ def test_class_conversions(self): def test_clamping(self): # Test clamping from below and above. - opts1 = CodecOptions( + opts = CodecOptions( datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=datetime.timezone.utc, ) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1)}) - dec_below = decode(below, opts1) + dec_below = decode(below, opts) self.assertEqual( dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) ) above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1)}) - dec_above = decode(above, opts1) + dec_above = decode(above, opts) self.assertEqual( dec_above["x"], datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), ) - def test_tz_clamping(self): + def test_tz_clamping_local(self): # Naive clamping to local tz. - opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) + opts = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) - dec_below = decode(below, opts1) + dec_below = decode(below, opts) self.assertEqual(dec_below["x"], datetime.datetime.min) above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) - dec_above = decode(above, opts1) + dec_above = decode(above, opts) self.assertEqual( dec_above["x"], datetime.datetime.max.replace(microsecond=999000), ) - # Aware clamping. - opts2 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) + def test_tz_clamping_utc(self): + # Aware clamping default utc. + opts = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) - dec_below = decode(below, opts2) + dec_below = decode(below, opts) self.assertEqual( dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) ) above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) - dec_above = decode(above, opts2) + dec_above = decode(above, opts) self.assertEqual( dec_above["x"], datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), ) + def test_tz_clamping_non_utc(self): + for tz in [FixedOffset(60, "+1H"), FixedOffset(-60, "-1H")]: + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=tz + ) + # Min/max values in this timezone which can be represented in both BSON and datetime UTC. + try: + min_tz = datetime.datetime.min.replace(tzinfo=utc).astimezone(tz) + except OverflowError: + min_tz = datetime.datetime.min.replace(tzinfo=tz) + try: + max_tz = datetime.datetime.max.replace(tzinfo=utc, microsecond=999000).astimezone( + tz + ) + except OverflowError: + max_tz = datetime.datetime.max.replace(tzinfo=tz, microsecond=999000) + + for in_range in [ + min_tz, + min_tz + datetime.timedelta(milliseconds=1), + max_tz - datetime.timedelta(milliseconds=1), + max_tz, + ]: + doc = decode(encode({"x": in_range}), opts) + self.assertEqual(doc["x"], in_range) + + for too_low in [ + DatetimeMS(_datetime_to_millis(min_tz) - 1), + DatetimeMS(_datetime_to_millis(min_tz) - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(min_tz) - 1 - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1 - 60 * 60 * 1000), + ]: + doc = decode(encode({"x": too_low}), opts) + self.assertEqual(doc["x"], min_tz) + + for too_high in [ + DatetimeMS(_datetime_to_millis(max_tz) + 1), + DatetimeMS(_datetime_to_millis(max_tz) + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(max_tz) + 1 + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1 + 60 * 60 * 1000), + ]: + doc = decode(encode({"x": too_high}), opts) + self.assertEqual(doc["x"], max_tz) + + def test_tz_clamping_non_utc_simple(self): + dtm = datetime.datetime(2024, 8, 23) + encoded = encode({"d": dtm}) + self.assertEqual(decode(encoded)["d"], dtm) + for conversion in [ + DatetimeConversion.DATETIME, + DatetimeConversion.DATETIME_CLAMP, + DatetimeConversion.DATETIME_AUTO, + ]: + for tz in [FixedOffset(60, "+1H"), FixedOffset(-60, "-1H")]: + opts = CodecOptions(datetime_conversion=conversion, tz_aware=True, tzinfo=tz) + self.assertEqual(decode(encoded, opts)["d"], dtm.replace(tzinfo=utc).astimezone(tz)) + def test_datetime_auto(self): # Naive auto, in range. opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) From c6967ab139cbc68099e84e2bb6f3789e65804e09 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 Aug 2024 14:48:49 -0400 Subject: [PATCH 1421/2111] PYTHON-3472 - Add log messages to SDAM spec (#1771) Co-authored-by: Jib --- pymongo/asynchronous/monitor.py | 56 +- pymongo/asynchronous/server.py | 17 +- pymongo/asynchronous/topology.py | 59 +- pymongo/logger.py | 14 +- pymongo/synchronous/monitor.py | 56 +- pymongo/synchronous/server.py | 17 +- pymongo/synchronous/topology.py | 59 +- .../rs/compatible.json | 2 +- .../rs/compatible_unknown.json | 2 +- .../sharded/compatible.json | 2 +- .../single/compatible.json | 2 +- .../single/too_old_then_upgraded.json | 4 +- .../unified/logging-loadbalanced.json | 150 +++++ .../unified/logging-replicaset.json | 606 ++++++++++++++++++ .../unified/logging-sharded.json | 492 ++++++++++++++ .../unified/logging-standalone.json | 517 +++++++++++++++ test/unified_format.py | 55 +- 17 files changed, 2064 insertions(+), 46 deletions(-) create mode 100644 test/discovery_and_monitoring/unified/logging-loadbalanced.json create mode 100644 test/discovery_and_monitoring/unified/logging-replicaset.json create mode 100644 test/discovery_and_monitoring/unified/logging-sharded.json create mode 100644 test/discovery_and_monitoring/unified/logging-standalone.json diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index d2ac8868e7..f9e912b084 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -17,6 +17,7 @@ from __future__ import annotations import atexit +import logging import time import weakref from typing import TYPE_CHECKING, Any, Mapping, Optional, cast @@ -28,6 +29,7 @@ from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock +from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage from pymongo.pool_options import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -257,10 +259,21 @@ async def _check_server(self) -> ServerDescription: sd = self._server_description address = sd.address duration = _monotonic_duration(start) + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) if self._publish: - awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) assert self._listeners is not None self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology._topology_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=duration * 1000, + failure=error, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, + ) await self._reset_connection() if isinstance(error, _OperationCancelled): raise @@ -274,22 +287,32 @@ async def _check_once(self) -> ServerDescription: Returns a ServerDescription, or raises an exception. """ address = self._server_description.address + sd = self._server_description + + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns and self._stream and sd.is_server_type_known and sd.topology_version + ) if self._publish: assert self._listeners is not None - sd = self._server_description - # XXX: "awaited" could be incorrectly set to True in the rare case - # the pool checkout closes and recreates a connection. - awaited = bool( - self._pool.conns - and self._stream - and sd.is_server_type_known - and sd.topology_version - ) self._listeners.publish_server_heartbeat_started(address, awaited) if self._cancel_context and self._cancel_context.cancelled: await self._reset_connection() async with self._pool.checkout() as conn: + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + message=_SDAMStatusMessage.HEARTBEAT_START, + ) + self._cancel_context = conn.cancel_context response, round_trip_time = await self._check_with_socket(conn) if not response.awaitable: @@ -302,6 +325,19 @@ async def _check_once(self) -> ServerDescription: self._listeners.publish_server_heartbeat_succeeded( address, round_trip_time, response, response.awaitable ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=round_trip_time * 1000, + reply=response.document, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, + ) return sd async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float]: diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index 892594c97d..8d0024afd1 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -30,7 +30,13 @@ from pymongo.asynchronous.helpers import _handle_reauth from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers_shared import _check_command_response -from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.logger import ( + _COMMAND_LOGGER, + _SDAM_LOGGER, + _CommandStatusMessage, + _debug_log, + _SDAMStatusMessage, +) from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query from pymongo.response import PinnedResponse, Response @@ -99,6 +105,15 @@ async def close(self) -> None: (self._description.address, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + serverHost=self._description.address[0], + serverPort=self._description.address[1], + message=_SDAMStatusMessage.STOP_SERVER, + ) + await self._monitor.close() await self._pool.close() diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 4e55db4981..2df30d244f 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -46,8 +46,10 @@ from pymongo.hello import Hello from pymongo.lock import _ACondition, _ALock, _create_lock from pymongo.logger import ( + _SDAM_LOGGER, _SERVER_SELECTION_LOGGER, _debug_log, + _SDAMStatusMessage, _ServerSelectionStatusMessage, ) from pymongo.pool_options import PoolOptions @@ -110,6 +112,13 @@ def __init__(self, topology_settings: TopologySettings): if self._publish_server or self._publish_tp: self._events = queue.Queue(maxsize=100) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + message=_SDAMStatusMessage.START_TOPOLOGY, + ) + if self._publish_tp: assert self._events is not None self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) @@ -124,22 +133,38 @@ def __init__(self, topology_settings: TopologySettings): ) self._description = topology_description + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) if self._publish_tp: assert self._events is not None - initial_td = TopologyDescription( - TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings - ) self._events.put( ( self._listeners.publish_topology_description_changed, (initial_td, self._description, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=initial_td, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) for seed in topology_settings.seeds: if self._publish_server: assert self._events is not None self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + serverHost=seed[0], + serverPort=seed[1], + message=_SDAMStatusMessage.START_SERVER, + ) # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) @@ -472,6 +497,14 @@ async def _process_change( (td_old, self._description, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=td_old, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) # Shutdown SRV polling for unsupported cluster types. # This is only applicable if the old topology was Unknown, and the @@ -530,6 +563,14 @@ async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: (td_old, self._description, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=td_old, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) async def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new list of nodes obtained from scanning SRV records.""" @@ -684,6 +725,18 @@ async def close(self) -> None: ) ) self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=old_td, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) + _debug_log( + _SDAM_LOGGER, topologyId=self._topology_id, message=_SDAMStatusMessage.STOP_TOPOLOGY + ) + if self._publish_server or self._publish_tp: # Make sure the events executor thread is fully closed before publishing the remaining events self.__events_executor.close() diff --git a/pymongo/logger.py b/pymongo/logger.py index 2caafa778d..2ff35328b4 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -53,6 +53,17 @@ class _ConnectionStatusMessage(str, enum.Enum): CHECKEDIN = "Connection checked in" +class _SDAMStatusMessage(str, enum.Enum): + START_TOPOLOGY = "Starting topology monitoring" + STOP_TOPOLOGY = "Stopped topology monitoring" + START_SERVER = "Starting server monitoring" + STOP_SERVER = "Stopped server monitoring" + TOPOLOGY_CHANGE = "Topology description changed" + HEARTBEAT_START = "Server heartbeat started" + HEARTBEAT_SUCCESS = "Server heartbeat succeeded" + HEARTBEAT_FAIL = "Server heartbeat failed" + + _DEFAULT_DOCUMENT_LENGTH = 1000 _SENSITIVE_COMMANDS = [ "authenticate", @@ -73,6 +84,7 @@ class _ConnectionStatusMessage(str, enum.Enum): _CONNECTION_LOGGER = logging.getLogger("pymongo.connection") _SERVER_SELECTION_LOGGER = logging.getLogger("pymongo.serverSelection") _CLIENT_LOGGER = logging.getLogger("pymongo.client") +_SDAM_LOGGER = logging.getLogger("pymongo.topology") _VERBOSE_CONNECTION_ERROR_REASONS = { ConnectionClosedReason.POOL_CLOSED: "Connection pool was closed", ConnectionCheckOutFailedReason.POOL_CLOSED: "Connection pool was closed", @@ -129,7 +141,7 @@ def _is_sensitive(self, doc_name: str) -> bool: ) is_sensitive_hello = ( - self._kwargs["commandName"] in _HELLO_COMMANDS and is_speculative_authenticate + self._kwargs.get("commandName", None) in _HELLO_COMMANDS and is_speculative_authenticate ) return is_sensitive_command or is_sensitive_hello diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index e3d1f7bf2a..3f9bb2ea75 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -17,6 +17,7 @@ from __future__ import annotations import atexit +import logging import time import weakref from typing import TYPE_CHECKING, Any, Mapping, Optional, cast @@ -26,6 +27,7 @@ from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock +from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage from pymongo.pool_options import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -257,10 +259,21 @@ def _check_server(self) -> ServerDescription: sd = self._server_description address = sd.address duration = _monotonic_duration(start) + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) if self._publish: - awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) assert self._listeners is not None self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology._topology_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=duration * 1000, + failure=error, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, + ) self._reset_connection() if isinstance(error, _OperationCancelled): raise @@ -274,22 +287,32 @@ def _check_once(self) -> ServerDescription: Returns a ServerDescription, or raises an exception. """ address = self._server_description.address + sd = self._server_description + + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns and self._stream and sd.is_server_type_known and sd.topology_version + ) if self._publish: assert self._listeners is not None - sd = self._server_description - # XXX: "awaited" could be incorrectly set to True in the rare case - # the pool checkout closes and recreates a connection. - awaited = bool( - self._pool.conns - and self._stream - and sd.is_server_type_known - and sd.topology_version - ) self._listeners.publish_server_heartbeat_started(address, awaited) if self._cancel_context and self._cancel_context.cancelled: self._reset_connection() with self._pool.checkout() as conn: + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + message=_SDAMStatusMessage.HEARTBEAT_START, + ) + self._cancel_context = conn.cancel_context response, round_trip_time = self._check_with_socket(conn) if not response.awaitable: @@ -302,6 +325,19 @@ def _check_once(self) -> ServerDescription: self._listeners.publish_server_heartbeat_succeeded( address, round_trip_time, response, response.awaitable ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=round_trip_time * 1000, + reply=response.document, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, + ) return sd def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py index 347155784f..ed48cc6cc8 100644 --- a/pymongo/synchronous/server.py +++ b/pymongo/synchronous/server.py @@ -29,7 +29,13 @@ from bson import _decode_all_selective from pymongo.errors import NotPrimaryError, OperationFailure from pymongo.helpers_shared import _check_command_response -from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.logger import ( + _COMMAND_LOGGER, + _SDAM_LOGGER, + _CommandStatusMessage, + _debug_log, + _SDAMStatusMessage, +) from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query from pymongo.response import PinnedResponse, Response from pymongo.synchronous.helpers import _handle_reauth @@ -99,6 +105,15 @@ def close(self) -> None: (self._description.address, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + serverHost=self._description.address[0], + serverPort=self._description.address[1], + message=_SDAMStatusMessage.STOP_SERVER, + ) + self._monitor.close() self._pool.close() diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 8542f67bb3..54a9d8a69e 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -41,8 +41,10 @@ from pymongo.hello import Hello from pymongo.lock import _create_lock from pymongo.logger import ( + _SDAM_LOGGER, _SERVER_SELECTION_LOGGER, _debug_log, + _SDAMStatusMessage, _ServerSelectionStatusMessage, ) from pymongo.pool_options import PoolOptions @@ -110,6 +112,13 @@ def __init__(self, topology_settings: TopologySettings): if self._publish_server or self._publish_tp: self._events = queue.Queue(maxsize=100) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + message=_SDAMStatusMessage.START_TOPOLOGY, + ) + if self._publish_tp: assert self._events is not None self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) @@ -124,22 +133,38 @@ def __init__(self, topology_settings: TopologySettings): ) self._description = topology_description + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) if self._publish_tp: assert self._events is not None - initial_td = TopologyDescription( - TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings - ) self._events.put( ( self._listeners.publish_topology_description_changed, (initial_td, self._description, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=initial_td, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) for seed in topology_settings.seeds: if self._publish_server: assert self._events is not None self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + serverHost=seed[0], + serverPort=seed[1], + message=_SDAMStatusMessage.START_SERVER, + ) # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) @@ -472,6 +497,14 @@ def _process_change( (td_old, self._description, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=td_old, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) # Shutdown SRV polling for unsupported cluster types. # This is only applicable if the old topology was Unknown, and the @@ -530,6 +563,14 @@ def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: (td_old, self._description, self._topology_id), ) ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=td_old, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new list of nodes obtained from scanning SRV records.""" @@ -682,6 +723,18 @@ def close(self) -> None: ) ) self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + topologyId=self._topology_id, + previousDescription=old_td, + newDescription=self._description, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + ) + _debug_log( + _SDAM_LOGGER, topologyId=self._topology_id, message=_SDAMStatusMessage.STOP_TOPOLOGY + ) + if self._publish_server or self._publish_tp: # Make sure the events executor thread is fully closed before publishing the remaining events self.__events_executor.close() diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json index dfd5d57dfa..444b13e9d5 100644 --- a/test/discovery_and_monitoring/rs/compatible.json +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 21 + "maxWireVersion": 6 } ], [ diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json index 95e03ea958..cf92dd1ed3 100644 --- a/test/discovery_and_monitoring/rs/compatible_unknown.json +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 21 + "maxWireVersion": 6 } ] ], diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json index ceb0ec24c4..e531db97f9 100644 --- a/test/discovery_and_monitoring/sharded/compatible.json +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 21 + "maxWireVersion": 6 } ] ], diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json index 493d9b748e..302927598c 100644 --- a/test/discovery_and_monitoring/single/compatible.json +++ b/test/discovery_and_monitoring/single/compatible.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 21 + "maxWireVersion": 6 } ] ], diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json index c3dd98cf62..58ae7d9de4 100644 --- a/test/discovery_and_monitoring/single/too_old_then_upgraded.json +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -1,5 +1,5 @@ { - "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 21", + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 6", "uri": "mongodb://a", "phases": [ { @@ -35,7 +35,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 21 + "maxWireVersion": 6 } ] ], diff --git a/test/discovery_and_monitoring/unified/logging-loadbalanced.json b/test/discovery_and_monitoring/unified/logging-loadbalanced.json new file mode 100644 index 0000000000..45440d2557 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-loadbalanced.json @@ -0,0 +1,150 @@ +{ + "description": "loadbalanced-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-replicaset.json b/test/discovery_and_monitoring/unified/logging-replicaset.json new file mode 100644 index 0000000000..e6738225cd --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-replicaset.json @@ -0,0 +1,606 @@ +{ + "description": "replicaset-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 3 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-sharded.json b/test/discovery_and_monitoring/unified/logging-sharded.json new file mode 100644 index 0000000000..61b27f5be0 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-sharded.json @@ -0,0 +1,492 @@ +{ + "description": "sharded-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": true + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 3 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-standalone.json b/test/discovery_and_monitoring/unified/logging-standalone.json new file mode 100644 index 0000000000..1ee6dbe899 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-standalone.json @@ -0,0 +1,517 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index d978ef84d3..99fe0b1693 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -45,6 +45,7 @@ GCP_CREDS, KMIP_CREDS, LOCAL_MASTER_KEY, + client_knobs, ) from test.utils import ( CMAPListener, @@ -851,7 +852,7 @@ def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=Non return False - def _match_document(self, expectation, actual, is_root): + def _match_document(self, expectation, actual, is_root, test=False): if self._evaluate_if_special_operation(expectation, actual): return @@ -861,35 +862,48 @@ def _match_document(self, expectation, actual, is_root): continue self.test.assertIn(key, actual) - self.match_result(value, actual[key], in_recursive_call=True) + if not self.match_result(value, actual[key], in_recursive_call=True, test=test): + return False if not is_root: expected_keys = set(expectation.keys()) for key, value in expectation.items(): if value == {"$$exists": False}: expected_keys.remove(key) - self.test.assertEqual(expected_keys, set(actual.keys())) + if test: + self.test.assertEqual(expected_keys, set(actual.keys())) + else: + return set(expected_keys).issubset(set(actual.keys())) + return True - def match_result(self, expectation, actual, in_recursive_call=False): + def match_result(self, expectation, actual, in_recursive_call=False, test=True): if isinstance(expectation, abc.Mapping): - return self._match_document(expectation, actual, is_root=not in_recursive_call) + return self._match_document( + expectation, actual, is_root=not in_recursive_call, test=test + ) if isinstance(expectation, abc.MutableSequence): self.test.assertIsInstance(actual, abc.MutableSequence) for e, a in zip(expectation, actual): if isinstance(e, abc.Mapping): - self._match_document(e, a, is_root=not in_recursive_call) + self._match_document(e, a, is_root=not in_recursive_call, test=test) else: - self.match_result(e, a, in_recursive_call=True) + self.match_result(e, a, in_recursive_call=True, test=test) return None # account for flexible numerics in element-wise comparison if isinstance(expectation, int) or isinstance(expectation, float): - self.test.assertEqual(expectation, actual) + if test: + self.test.assertEqual(expectation, actual) + else: + return expectation == actual return None else: - self.test.assertIsInstance(actual, type(expectation)) - self.test.assertEqual(expectation, actual) + if test: + self.test.assertIsInstance(actual, type(expectation)) + self.test.assertEqual(expectation, actual) + else: + return isinstance(actual, type(expectation)) and expectation == actual return None def match_server_description(self, actual: ServerDescription, spec: dict) -> None: @@ -1891,6 +1905,20 @@ def check_events(self, spec): else: assert server_connection_id is None + def process_ignore_messages(self, ignore_logs, actual_logs): + final_logs = [] + for log in actual_logs: + ignored = False + for ignore_log in ignore_logs: + if log["data"]["message"] == ignore_log["data"][ + "message" + ] and self.match_evaluator.match_result(ignore_log, log, test=False): + ignored = True + break + if not ignored: + final_logs.append(log) + return final_logs + def check_log_messages(self, operations, spec): def format_logs(log_list): client_to_log = defaultdict(list) @@ -1898,7 +1926,7 @@ def format_logs(log_list): if log.module == "ocsp_support": continue data = json_util.loads(log.message) - client = data.pop("clientId") + client = data.pop("clientId") if "clientId" in data else data.pop("topologyId") client_to_log[client].append( { "level": log.levelname.lower(), @@ -1919,6 +1947,11 @@ def format_logs(log_list): clientid = self.entity_map[client["client"]]._topology_settings._topology_id actual_logs = formatted_logs[clientid] actual_logs = [log for log in actual_logs if log["component"] in components] + + ignore_logs = client.get("ignoreMessages", []) + if ignore_logs: + actual_logs = self.process_ignore_messages(ignore_logs, actual_logs) + if client.get("ignoreExtraMessages", False): actual_logs = actual_logs[: len(client["messages"])] self.assertEqual(len(client["messages"]), len(actual_logs)) From a2059dc9cb5485d16fbf31e32d3d816604e1bef9 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 28 Aug 2024 14:20:55 -0700 Subject: [PATCH 1422/2111] PYTHON-4663 Fix compatibility with dateutil timezones (#1812) --- bson/_cbsonmodule.c | 283 +++++++++++++++++++++---------------- bson/datetime_ms.py | 13 +- bson/json_util.py | 4 +- bson/objectid.py | 11 +- doc/changelog.rst | 6 + doc/examples/datetimes.rst | 2 +- test/test_bson.py | 25 ++++ test/test_json_util.py | 8 +- test/test_objectid.py | 3 - 9 files changed, 205 insertions(+), 150 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 68ec9fe45c..cc498f448e 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -53,8 +53,10 @@ struct module_state { PyObject* Decimal128; PyObject* Mapping; PyObject* DatetimeMS; - PyObject* _min_datetime_ms; - PyObject* _max_datetime_ms; + PyObject* min_datetime; + PyObject* max_datetime; + PyObject* replace_args; + PyObject* replace_kwargs; PyObject* _type_marker_str; PyObject* _flags_str; PyObject* _pattern_str; @@ -80,6 +82,8 @@ struct module_state { PyObject* _from_uuid_str; PyObject* _as_uuid_str; PyObject* _from_bid_str; + int64_t min_millis; + int64_t max_millis; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -253,7 +257,7 @@ static PyObject* datetime_from_millis(long long millis) { * 2. Multiply that by 1000: 253402300799000 * 3. Add in microseconds divided by 1000 253402300799999 * - * (Note: BSON doesn't support microsecond accuracy, hence the rounding.) + * (Note: BSON doesn't support microsecond accuracy, hence the truncation.) * * To decode we could do: * 1. Get seconds: timestamp / 1000: 253402300799 @@ -376,6 +380,118 @@ static int millis_from_datetime_ms(PyObject* dt, long long* out){ return 1; } +static PyObject* decode_datetime(PyObject* self, long long millis, const codec_options_t* options){ + PyObject* naive = NULL; + PyObject* replace = NULL; + PyObject* args = NULL; + PyObject* kwargs = NULL; + PyObject* value = NULL; + struct module_state *state = GETSTATE(self); + if (options->datetime_conversion == DATETIME_MS){ + return datetime_ms_from_millis(self, millis); + } + + int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; + int dt_auto = options->datetime_conversion == DATETIME_AUTO; + + if (dt_clamp || dt_auto){ + int64_t min_millis = state->min_millis; + int64_t max_millis = state->max_millis; + int64_t min_millis_offset = 0; + int64_t max_millis_offset = 0; + if (options->tz_aware && options->tzinfo && options->tzinfo != Py_None) { + PyObject* utcoffset = PyObject_CallMethodObjArgs(options->tzinfo, state->_utcoffset_str, state->min_datetime, NULL); + if (utcoffset == NULL) { + return 0; + } + if (utcoffset != Py_None) { + if (!PyDelta_Check(utcoffset)) { + PyObject* BSONError = _error("BSONError"); + if (BSONError) { + PyErr_SetString(BSONError, "tzinfo.utcoffset() did not return a datetime.timedelta"); + Py_DECREF(BSONError); + } + Py_DECREF(utcoffset); + return 0; + } + min_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * 86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * 1000 + + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); + } + Py_DECREF(utcoffset); + utcoffset = PyObject_CallMethodObjArgs(options->tzinfo, state->_utcoffset_str, state->max_datetime, NULL); + if (utcoffset == NULL) { + return 0; + } + if (utcoffset != Py_None) { + if (!PyDelta_Check(utcoffset)) { + PyObject* BSONError = _error("BSONError"); + if (BSONError) { + PyErr_SetString(BSONError, "tzinfo.utcoffset() did not return a datetime.timedelta"); + Py_DECREF(BSONError); + } + Py_DECREF(utcoffset); + return 0; + } + max_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * 86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * 1000 + + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); + } + Py_DECREF(utcoffset); + } + if (min_millis_offset < 0) { + min_millis -= min_millis_offset; + } + + if (max_millis_offset > 0) { + max_millis -= max_millis_offset; + } + + if (dt_clamp) { + if (millis < min_millis) { + millis = min_millis; + } else if (millis > max_millis) { + millis = max_millis; + } + // Continues from here to return a datetime. + } else { // dt_auto + if (millis < min_millis || millis > max_millis){ + return datetime_ms_from_millis(self, millis); + } + } + } + + naive = datetime_from_millis(millis); + if (!naive) { + goto invalid; + } + + if (!options->tz_aware) { /* In the naive case, we're done here. */ + return naive; + } + replace = PyObject_GetAttr(naive, state->_replace_str); + if (!replace) { + goto invalid; + } + value = PyObject_Call(replace, state->replace_args, state->replace_kwargs); + if (!value) { + goto invalid; + } + + /* convert to local time */ + if (options->tzinfo != Py_None) { + PyObject* temp = PyObject_CallMethodObjArgs(value, state->_astimezone_str, options->tzinfo, NULL); + Py_DECREF(value); + value = temp; + } +invalid: + Py_XDECREF(naive); + Py_XDECREF(replace); + Py_XDECREF(args); + Py_XDECREF(kwargs); + return value; +} + /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { if (pymongo_buffer_write(buffer, data, size)) { @@ -482,6 +598,8 @@ static int _load_python_objects(PyObject* module) { PyObject* empty_string = NULL; PyObject* re_compile = NULL; PyObject* compiled = NULL; + PyObject* min_datetime_ms = NULL; + PyObject* max_datetime_ms = NULL; struct module_state *state = GETSTATE(module); if (!state) { return 1; @@ -530,10 +648,34 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->UUID, "uuid", "UUID") || _load_object(&state->Mapping, "collections.abc", "Mapping") || _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || - _load_object(&state->_min_datetime_ms, "bson.datetime_ms", "_min_datetime_ms") || - _load_object(&state->_max_datetime_ms, "bson.datetime_ms", "_max_datetime_ms")) { + _load_object(&min_datetime_ms, "bson.datetime_ms", "_MIN_UTC_MS") || + _load_object(&max_datetime_ms, "bson.datetime_ms", "_MAX_UTC_MS") || + _load_object(&state->min_datetime, "bson.datetime_ms", "_MIN_UTC") || + _load_object(&state->max_datetime, "bson.datetime_ms", "_MAX_UTC")) { + return 1; + } + + state->min_millis = PyLong_AsLongLong(min_datetime_ms); + state->max_millis = PyLong_AsLongLong(max_datetime_ms); + Py_DECREF(min_datetime_ms); + Py_DECREF(max_datetime_ms); + if ((state->min_millis == -1 || state->max_millis == -1) && PyErr_Occurred()) { + return 1; + } + + /* Speed up datetime.replace(tzinfo=utc) call */ + state->replace_args = PyTuple_New(0); + if (!state->replace_args) { + return 1; + } + state->replace_kwargs = PyDict_New(); + if (!state->replace_kwargs) { return 1; } + if (PyDict_SetItem(state->replace_kwargs, state->_tzinfo_str, state->UTC) == -1) { + return 1; + } + /* Reload our REType hack too. */ empty_string = PyBytes_FromString(""); if (empty_string == NULL) { @@ -1247,8 +1389,8 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; if (utcoffset != Py_None) { PyObject* result = PyNumber_Subtract(value, utcoffset); - Py_DECREF(utcoffset); if (!result) { + Py_DECREF(utcoffset); return 0; } millis = millis_from_datetime(result); @@ -1256,6 +1398,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else { millis = millis_from_datetime(value); } + Py_DECREF(utcoffset); *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { @@ -2043,11 +2186,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 9: { - PyObject* naive; - PyObject* replace; - PyObject* args; - PyObject* kwargs; - PyObject* astimezone; int64_t millis; if (max < 8) { goto invalid; @@ -2056,120 +2194,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, millis = (int64_t)BSON_UINT64_FROM_LE(millis); *position += 8; - if (options->datetime_conversion == DATETIME_MS){ - value = datetime_ms_from_millis(self, millis); - break; - } - - int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; - int dt_auto = options->datetime_conversion == DATETIME_AUTO; - - - if (dt_clamp || dt_auto){ - PyObject *min_millis_fn_res; - PyObject *max_millis_fn_res; - int64_t min_millis; - int64_t max_millis; - - if (options->tz_aware){ - PyObject* tzinfo = options->tzinfo; - if (tzinfo == Py_None) { - // Default to UTC. - tzinfo = state->UTC; - } - min_millis_fn_res = PyObject_CallFunctionObjArgs(state->_min_datetime_ms, tzinfo, NULL); - max_millis_fn_res = PyObject_CallFunctionObjArgs(state->_max_datetime_ms, tzinfo, NULL); - } else { - min_millis_fn_res = PyObject_CallObject(state->_min_datetime_ms, NULL); - max_millis_fn_res = PyObject_CallObject(state->_max_datetime_ms, NULL); - } - - if (!min_millis_fn_res || !max_millis_fn_res){ - Py_XDECREF(min_millis_fn_res); - Py_XDECREF(max_millis_fn_res); - goto invalid; - } - - min_millis = PyLong_AsLongLong(min_millis_fn_res); - max_millis = PyLong_AsLongLong(max_millis_fn_res); - - if ((min_millis == -1 || max_millis == -1) && PyErr_Occurred()) - { - // min/max_millis check - goto invalid; - } - - if (dt_clamp) { - if (millis < min_millis) { - millis = min_millis; - } else if (millis > max_millis) { - millis = max_millis; - } - // Continues from here to return a datetime. - } else { // dt_auto - if (millis < min_millis || millis > max_millis){ - value = datetime_ms_from_millis(self, millis); - break; // Out-of-range so done. - } - } - } - - naive = datetime_from_millis(millis); - if (!options->tz_aware) { /* In the naive case, we're done here. */ - value = naive; - break; - } - - if (!naive) { - goto invalid; - } - replace = PyObject_GetAttr(naive, state->_replace_str); - Py_DECREF(naive); - if (!replace) { - goto invalid; - } - args = PyTuple_New(0); - if (!args) { - Py_DECREF(replace); - goto invalid; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(replace); - Py_DECREF(args); - goto invalid; - } - if (PyDict_SetItem(kwargs, state->_tzinfo_str, state->UTC) == -1) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - value = PyObject_Call(replace, args, kwargs); - if (!value) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - - /* convert to local time */ - if (options->tzinfo != Py_None) { - astimezone = PyObject_GetAttr(value, state->_astimezone_str); - Py_DECREF(value); - if (!astimezone) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - value = PyObject_CallFunctionObjArgs(astimezone, options->tzinfo, NULL); - Py_DECREF(astimezone); - } - - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); + value = decode_datetime(self, millis, options); break; } case 11: @@ -3053,6 +3078,10 @@ static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(state->_from_uuid_str); Py_VISIT(state->_as_uuid_str); Py_VISIT(state->_from_bid_str); + Py_VISIT(state->min_datetime); + Py_VISIT(state->max_datetime); + Py_VISIT(state->replace_args); + Py_VISIT(state->replace_kwargs); return 0; } @@ -3097,6 +3126,10 @@ static int _cbson_clear(PyObject *m) { Py_CLEAR(state->_from_uuid_str); Py_CLEAR(state->_as_uuid_str); Py_CLEAR(state->_from_bid_str); + Py_CLEAR(state->min_datetime); + Py_CLEAR(state->max_datetime); + Py_CLEAR(state->replace_args); + Py_CLEAR(state->replace_kwargs); return 0; } diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 48e57e0d11..1b6fa22794 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -20,7 +20,6 @@ import calendar import datetime -import functools from typing import Any, Union, cast from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion @@ -127,11 +126,8 @@ def _datetime_to_millis(dtm: datetime.datetime) -> int: _MAX_UTC_MS = _datetime_to_millis(_MAX_UTC) -# Inclusive and exclusive min and max for timezones. -# Timezones are hashed by their offset, which is a timedelta -# and therefore there are more than 24 possible timezones. -@functools.lru_cache(maxsize=None) -def _min_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: +# Inclusive min and max for timezones. +def _min_datetime_ms(tz: datetime.tzinfo = utc) -> int: delta = tz.utcoffset(_MIN_UTC) if delta is not None: offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 @@ -140,8 +136,7 @@ def _min_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: return max(_MIN_UTC_MS, _MIN_UTC_MS - offset_millis) -@functools.lru_cache(maxsize=None) -def _max_datetime_ms(tz: datetime.timezone = datetime.timezone.utc) -> int: +def _max_datetime_ms(tz: datetime.tzinfo = utc) -> int: delta = tz.utcoffset(_MAX_UTC) if delta is not None: offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 @@ -159,7 +154,7 @@ def _millis_to_datetime( or opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP or opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO ): - tz = opts.tzinfo or datetime.timezone.utc + tz = opts.tzinfo or utc if opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP: millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) elif opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO: diff --git a/bson/json_util.py b/bson/json_util.py index 6c5197c75a..4269ba9858 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -125,10 +125,10 @@ from bson.code import Code from bson.codec_options import CodecOptions, DatetimeConversion from bson.datetime_ms import ( + _MAX_UTC_MS, EPOCH_AWARE, DatetimeMS, _datetime_to_millis, - _max_datetime_ms, _millis_to_datetime, ) from bson.dbref import DBRef @@ -844,7 +844,7 @@ def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: if ( json_options.datetime_representation == DatetimeRepresentation.ISO8601 - and 0 <= int(obj) <= _max_datetime_ms() + and 0 <= int(obj) <= _MAX_UTC_MS ): return _encode_datetime(obj.as_datetime(), json_options) elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: diff --git a/bson/objectid.py b/bson/objectid.py index a5500872da..970c4e52e8 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -16,7 +16,6 @@ from __future__ import annotations import binascii -import calendar import datetime import os import struct @@ -25,6 +24,7 @@ from random import SystemRandom from typing import Any, NoReturn, Optional, Type, Union +from bson.datetime_ms import _datetime_to_millis from bson.errors import InvalidId from bson.tz_util import utc @@ -131,11 +131,10 @@ def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> Ob :param generation_time: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - offset = generation_time.utcoffset() - if offset is not None: - generation_time = generation_time - offset - timestamp = calendar.timegm(generation_time.timetuple()) - oid = _PACK_INT(int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" + oid = ( + _PACK_INT(_datetime_to_millis(generation_time) // 1000) + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + ) return cls(oid) @classmethod diff --git a/doc/changelog.rst b/doc/changelog.rst index d80f78fe4d..42a4fdf50f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -31,6 +31,12 @@ PyMongo 4.9 brings a number of improvements including: :class:`~pymongo.operations.DeleteMany` operations, so they can be used in the new :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - Added :func:`repr` support to :class:`bson.tz_util.FixedOffset`. +- Fixed a bug where PyMongo would raise ``InvalidBSON: unhashable type: 'tzfile'`` + when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or + :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a timezone from dateutil. +- Fixed a bug where PyMongo would raise ``InvalidBSON: date value out of range`` + when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or + :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a non-UTC timezone. Issues Resolved ............... diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 5571880e94..1790506423 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -98,7 +98,7 @@ out of MongoDB in US/Pacific time: >>> aware_times = db.times.with_options(codec_options=CodecOptions( ... tz_aware=True, ... tzinfo=pytz.timezone('US/Pacific'))) - >>> result = aware_times.find_one() + >>> result = aware_times.find_one()['date'] datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE tzinfo=) diff --git a/test/test_bson.py b/test/test_bson.py index 4996c46b92..a0190ef2d8 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1362,6 +1362,31 @@ def test_tz_clamping_non_utc_simple(self): opts = CodecOptions(datetime_conversion=conversion, tz_aware=True, tzinfo=tz) self.assertEqual(decode(encoded, opts)["d"], dtm.replace(tzinfo=utc).astimezone(tz)) + def test_tz_clamping_non_hashable(self): + class NonHashableTZ(FixedOffset): + __hash__ = None + + tz = NonHashableTZ(0, "UTC-non-hashable") + self.assertRaises(TypeError, hash, tz) + # Aware clamping. + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=tz + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts) + self.assertEqual(dec_below["x"], datetime.datetime.min.replace(tzinfo=tz)) + + within = encode({"x": EPOCH_AWARE.astimezone(tz)}) + dec_within = decode(within, opts) + self.assertEqual(dec_within["x"], EPOCH_AWARE.astimezone(tz)) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=tz, microsecond=999000), + ) + def test_datetime_auto(self): # Naive auto, in range. opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) diff --git a/test/test_json_util.py b/test/test_json_util.py index 0f73a8efd9..3a40c174e8 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -39,7 +39,7 @@ UuidRepresentation, ) from bson.code import Code -from bson.datetime_ms import _max_datetime_ms +from bson.datetime_ms import _MAX_UTC_MS from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.int64 import Int64 @@ -257,7 +257,7 @@ def test_datetime(self): def test_datetime_ms(self): # Test ISO8601 in-range dat_min: dict[str, Any] = {"x": DatetimeMS(0)} - dat_max: dict[str, Any] = {"x": DatetimeMS(_max_datetime_ms())} + dat_max: dict[str, Any] = {"x": DatetimeMS(_MAX_UTC_MS)} opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) self.assertEqual( @@ -271,7 +271,7 @@ def test_datetime_ms(self): # Test ISO8601 out-of-range dat_min = {"x": DatetimeMS(-1)} - dat_max = {"x": DatetimeMS(_max_datetime_ms() + 1)} + dat_max = {"x": DatetimeMS(_MAX_UTC_MS + 1)} self.assertEqual('{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min)) self.assertEqual( @@ -302,7 +302,7 @@ def test_datetime_ms(self): # Test decode from datetime.datetime to DatetimeMS dat_min = {"x": datetime.datetime.min} - dat_max = {"x": DatetimeMS(_max_datetime_ms()).as_datetime(CodecOptions(tz_aware=False))} + dat_max = {"x": DatetimeMS(_MAX_UTC_MS).as_datetime(CodecOptions(tz_aware=False))} opts = JSONOptions( datetime_representation=DatetimeRepresentation.ISO8601, datetime_conversion=DatetimeConversion.DATETIME_MS, diff --git a/test/test_objectid.py b/test/test_objectid.py index 771ba09422..26670832f6 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -95,9 +95,6 @@ def test_generation_time(self): self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) def test_from_datetime(self): - if "PyPy 1.8.0" in sys.version: - # See https://bugs.pypy.org/issue1092 - raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) From e6b95f65953e01e435004af069a6976473eaf841 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 29 Aug 2024 10:02:47 -0400 Subject: [PATCH 1423/2111] PYTHON-4673 - Add Async Encryption Tests (#1818) --- pymongo/asynchronous/encryption.py | 6 +- pymongo/asynchronous/network.py | 2 +- pymongo/asynchronous/server.py | 2 +- pymongo/synchronous/mongo_client.py | 2 +- test/asynchronous/test_encryption.py | 3155 ++++++++++++++++++++++++++ test/test_encryption.py | 367 +-- tools/synchro.py | 2 + 7 files changed, 3353 insertions(+), 183 deletions(-) create mode 100644 test/asynchronous/test_encryption.py diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 93484541eb..3fb00c6ca9 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -50,7 +50,7 @@ _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False - MongoCryptCallback = object + AsyncMongoCryptCallback = object from bson import _dict_to_bson, decode, encode from bson.binary import STANDARD, UUID_SUBTYPE, Binary @@ -207,10 +207,10 @@ async def collection_info( :return: The first document from the listCollections command response as BSON. """ - async with self.client_ref()[database].list_collections( + async with await self.client_ref()[database].list_collections( filter=RawBSONDocument(filter) ) as cursor: - for doc in cursor: + async for doc in cursor: return _dict_to_bson(doc, False, _DATA_KEY_OPTS) return None diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index ff43a5ffcb..44a63a2fc3 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -297,7 +297,7 @@ async def command( ) if client and client._encrypter and reply: - decrypted = client._encrypter.decrypt(reply.raw_command_response()) + decrypted = await client._encrypter.decrypt(reply.raw_command_response()) response_doc = cast( "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] ) diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index 8d0024afd1..72f22584e2 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -309,7 +309,7 @@ async def run_operation( client = operation.client # type: ignore[assignment] if client and client._encrypter: if use_cmd: - decrypted = client._encrypter.decrypt(reply.raw_command_response()) + decrypted = await client._encrypter.decrypt(reply.raw_command_response()) docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) response: Response diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index f855d90600..77e029a7c9 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1538,7 +1538,7 @@ def close(self) -> None: if not _IS_SYNC: # Add support for contextlib.closing. - aclose = close + close = close def _get_topology(self) -> Topology: """Get the internal :class:`~pymongo.topology.Topology` object. diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py new file mode 100644 index 0000000000..eb431e1d50 --- /dev/null +++ b/test/asynchronous/test_encryption.py @@ -0,0 +1,3155 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption spec.""" +from __future__ import annotations + +import base64 +import copy +import os +import pathlib +import re +import socket +import socketserver +import ssl +import sys +import textwrap +import traceback +import uuid +import warnings +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context +from threading import Thread +from typing import Any, Dict, Mapping + +import pytest + +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.helpers import anext +from pymongo.daemon import _spawn_daemon + +sys.path[0:0] = [""] + +from test import ( + unittest, +) +from test.helpers import ( + AWS_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.test_bulk import BulkTestBase +from test.unified_format import generate_test_classes +from test.utils import ( + AllowListEventListener, + OvertCommandListener, + SpecTestCreator, + TopologyEventListener, + async_rs_or_single_client, + async_wait_until, + camel_to_snake_args, + is_greenthread_patched, +) +from test.utils_spec_runner import SpecRunner + +from bson import DatetimeMS, Decimal128, encode, json_util +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.json_util import JSONOptions +from bson.son import SON +from pymongo import ReadPreference +from pymongo.asynchronous import encryption +from pymongo.asynchronous.encryption import Algorithm, AsyncClientEncryption, QueryType +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.cursor_shared import CursorType +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, + WriteError, +) +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +pytestmark = pytest.mark.encryption + +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} + + +def get_client_opts(client): + return client.options + + +class TestAutoEncryptionOpts(AsyncPyMongoTestCase): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically + client = AsyncMongoClient( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True + ), + connect=False, + ) + self.addAsyncCleanup(client.aclose) + + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") + def test_init_requires_pymongocrypt(self): + with self.assertRaises(ConfigurationError): + AutoEncryptionOpts({}, "keyvault.datakeys") + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init(self): + opts = AutoEncryptionOpts({}, "keyvault.datakeys") + self.assertEqual(opts._kms_providers, {}) + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") + self.assertEqual(opts._key_vault_client, None) + self.assertEqual(opts._schema_map, None) + self.assertEqual(opts._bypass_auto_encryption, False) + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") + self.assertEqual(opts._mongocryptd_bypass_spawn, False) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + self.assertEqual(opts._kms_ssl_contexts, {}) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_spawn_args(self): + # User can override idleShutdownTimeoutSecs + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) + + # idleShutdownTimeoutSecs is added by default + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + + # Also added when other options are given + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) + self.assertEqual( + opts._mongocryptd_spawn_args, + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_kms_tls_options(self): + # Error cases: + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + tls_opts: Any + for tls_opts in [ + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + ]: + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + with self.assertRaises(FileNotFoundError): + AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) + # Success cases: + tls_opts: Any + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + self.assertEqual(opts._kms_ssl_contexts, {}) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + ctx = opts._kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = opts._kms_ssl_contexts["aws"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, + "k.d", + kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + ) + ctx = opts._kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + + +class TestClientOptions(AsyncPyMongoTestCase): + async def test_default(self): + client = AsyncMongoClient(connect=False) + self.addAsyncCleanup(client.aclose) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + client = AsyncMongoClient(auto_encryption_opts=None, connect=False) + self.addAsyncCleanup(client.aclose) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_kwargs(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = AsyncMongoClient(auto_encryption_opts=opts, connect=False) + self.addAsyncCleanup(client.aclose) + self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) + + +class AsyncEncryptionIntegrationTest(AsyncIntegrationTest): + """Base class for encryption integration tests.""" + + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def _setup_class(cls): + await super()._setup_class() + + def assertEncrypted(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, 6) + + def assertBinaryUUID(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, UUID_SUBTYPE) + + +# Location of JSON test files. +if _IS_SYNC: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent, "client-side-encryption") +else: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "client-side-encryption") + +SPEC_PATH = os.path.join(BASE, "spec") + +OPTS = CodecOptions() + +# Use SON to preserve the order of fields while parsing json. Use tz_aware +# =False to match how CodecOptions decodes dates. +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) + + +def read(*paths): + with open(os.path.join(BASE, *paths)) as fp: + return fp.read() + + +def json_data(*paths): + return json_util.loads(read(*paths), json_options=JSON_OPTS) + + +def bson_data(*paths): + return encode(json_data(*paths), codec_options=OPTS) + + +class TestClientSimple(AsyncEncryptionIntegrationTest): + async def _test_auto_encrypt(self, opts): + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + + # Create the encrypted field's data key. + key_vault = await create_key_vault( + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) + self.addAsyncCleanup(key_vault.drop) + + # Collection.insert_one/insert_many auto encrypts. + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] + encrypted_coll = client.pymongo_test.test + await encrypted_coll.insert_one(docs[0]) + await encrypted_coll.insert_many(docs[1:3]) + unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) + await unack.insert_one(docs[3]) + await unack.insert_many(docs[4:], ordered=False) + + async def count_documents(): + return await self.db.test.count_documents({}) == len(docs) + + await async_wait_until(count_documents, "insert documents with w=0") + + # Database.command auto decrypts. + res = await client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) + + # Collection.find auto decrypts. + decrypted_docs = await encrypted_coll.find().to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.find auto decrypts getMores. + decrypted_docs = await encrypted_coll.find(batch_size=1).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts. + decrypted_docs = await (await encrypted_coll.aggregate([])).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts getMores. + decrypted_docs = await (await encrypted_coll.aggregate([], batchSize=1)).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.distinct auto decrypts. + decrypted_ssns = await encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) + + # Make sure the field is actually encrypted. + async for encrypted_doc in self.db.test.find(): + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) + + # Attempt to encrypt an unencodable object. + with self.assertRaises(BSONError): + await encrypted_coll.insert_one({"unencodeable": object()}) + + async def test_auto_encrypt(self): + # Configure the encrypted field via jsonSchema. + json_schema = json_data("custom", "schema.json") + await create_with_schema(self.db.test, json_schema) + self.addAsyncCleanup(self.db.test.drop) + + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + await self._test_auto_encrypt(opts) + + async def test_auto_encrypt_local_schema_map(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) + + await self._test_auto_encrypt(opts) + + async def test_use_after_close(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + + await client.admin.command("ping") + await client.aclose() + with self.assertRaisesRegex(InvalidOperation, "Cannot use AsyncMongoClient after close"): + await client.admin.command("ping") + + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent and eventlet do not support POSIX-style forking.", + ) + async def test_fork(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + + async def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + await client.admin.command("ping") + + with self.fork(target): + await target() + + +class TestEncryptedBulkWrite(BulkTestBase, AsyncEncryptionIntegrationTest): + async def test_upsert_uuid_standard_encrypt(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + +class TestClientMaxWireVersion(AsyncIntegrationTest): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def _setup_class(cls): + await super()._setup_class() + + @async_client_context.require_version_max(4, 0, 99) + async def test_raise_max_wire_version_error(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + msg = "Auto-encryption requires a minimum MongoDB version of 4.2" + with self.assertRaisesRegex(ConfigurationError, msg): + await client.test.test.insert_one({}) + with self.assertRaisesRegex(ConfigurationError, msg): + await client.admin.command("ping") + with self.assertRaisesRegex(ConfigurationError, msg): + await client.test.test.find_one({}) + with self.assertRaisesRegex(ConfigurationError, msg): + await client.test.test.bulk_write([InsertOne({})]) + + async def test_raise_unsupported_error(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client.aclose) + msg = "find_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client.test.test.find_raw_batches({}) + + msg = "aggregate_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client.test.test.aggregate_raw_batches([]) + + if async_client_context.is_mongos: + msg = "Exhaust cursors are not supported by mongos" + else: + msg = "exhaust cursors do not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await anext(client.test.test.find(cursor_type=CursorType.EXHAUST)) + + +class TestExplicitSimple(AsyncEncryptionIntegrationTest): + async def test_encrypt_decrypt(self): + client_encryption = AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + self.addAsyncCleanup(client_encryption.close) + # Use standard UUID representation. + key_vault = async_client_context.client.keyvault.get_collection( + "datakeys", codec_options=OPTS + ) + self.addAsyncCleanup(key_vault.drop) + + # Create the encrypted field's data key. + key_id = await client_encryption.create_data_key("local", key_alt_names=["name"]) + self.assertBinaryUUID(key_id) + self.assertTrue(await key_vault.find_one({"_id": key_id})) + + # Create an unused data key to make sure filtering works. + unused_key_id = await client_encryption.create_data_key("local", key_alt_names=["unused"]) + self.assertBinaryUUID(unused_key_id) + self.assertTrue(await key_vault.find_one({"_id": unused_key_id})) + + doc = {"_id": 0, "ssn": "000"} + encrypted_ssn = await client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + + # Ensure encryption via key_alt_name for the same key produces the + # same output. + encrypted_ssn2 = await client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) + self.assertEqual(encrypted_ssn, encrypted_ssn2) + + # Test encryption via UUID + encrypted_ssn3 = await client_encryption.encrypt( + doc["ssn"], + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=key_id.as_uuid(), + ) + self.assertEqual(encrypted_ssn, encrypted_ssn3) + + # Test decryption. + decrypted_ssn = await client_encryption.decrypt(encrypted_ssn) + self.assertEqual(decrypted_ssn, doc["ssn"]) + + async def test_validation(self): + client_encryption = AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + self.addAsyncCleanup(client_encryption.close) + + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.decrypt("str") # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.decrypt(Binary(b"123")) + + msg = "key_id must be a bson.binary.Binary with subtype 4" + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.encrypt("str", algo, key_id=Binary(b"123")) + + async def test_bson_errors(self): + client_encryption = AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + self.addAsyncCleanup(client_encryption.close) + + # Attempt to encrypt an unencodable object. + unencodable_value = object() + with self.assertRaises(BSONError): + await client_encryption.encrypt( + unencodable_value, + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=Binary.from_uuid(uuid.uuid4()), + ) + + async def test_codec_options(self): + with self.assertRaisesRegex(TypeError, "codec_options must be"): + AsyncClientEncryption( + KMS_PROVIDERS, + "keyvault.datakeys", + async_client_context.client, + None, # type: ignore[arg-type] + ) + + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + client_encryption_legacy = AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts + ) + self.addAsyncCleanup(client_encryption_legacy.close) + + # Create the encrypted field's data key. + key_id = await client_encryption_legacy.create_data_key("local") + + # Encrypt a UUID with JAVA_LEGACY codec options. + value = uuid.uuid4() + encrypted_legacy = await client_encryption_legacy.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = await client_encryption_legacy.decrypt(encrypted_legacy) + self.assertEqual(decrypted_value_legacy, value) + + # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + client_encryption = AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts + ) + self.addAsyncCleanup(client_encryption.close) + encrypted_standard = await client_encryption.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_standard = await client_encryption.decrypt(encrypted_standard) + self.assertEqual(decrypted_standard, value) + + # Test that codec_options is applied during encryption. + self.assertNotEqual(encrypted_standard, encrypted_legacy) + # Test that codec_options is applied during decryption. + self.assertEqual( + await client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(await client_encryption.decrypt(encrypted_legacy), value) + + async def test_close(self): + client_encryption = AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + await client_encryption.close() + # Close can be called multiple times. + await client_encryption.close() + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + msg = "Cannot use closed AsyncClientEncryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.create_data_key("local") + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.encrypt("val", algo, key_alt_name="name") + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.decrypt(Binary(b"", 6)) + + async def test_with_statement(self): + async with AsyncClientEncryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) as client_encryption: + pass + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed AsyncClientEncryption"): + await client_encryption.create_data_key("local") + + +# Spec tests +AWS_TEMP_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), +} + +AWS_TEMP_NO_SESSION_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), +} +KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} + + +if _IS_SYNC: + # TODO: Add asynchronous SpecRunner (https://jira.mongodb.org/browse/PYTHON-4700) + class TestSpec(SpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def setUpClass(cls): + super().setUpClass() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + + def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = async_client_context.client.get_database("keyvault", codec_options=OPTS)[ + "datakeys" + ] + coll.delete_many({}) + if key_vault_data: + coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = async_client_context.client.get_database(db_name, codec_options=OPTS) + coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + def create_test(scenario_def, test, name): + @async_client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) + test_creator.create_tests() + + if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, + ) + ) + +# Prose Tests +ALL_KMS_PROVIDERS = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + "local": {"key": LOCAL_MASTER_KEY}, +} + +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) + + +async def create_with_schema(coll, json_schema): + """Create and return a Collection with a jsonSchema.""" + await coll.with_options(write_concern=WriteConcern(w="majority")).drop() + return await coll.database.create_collection( + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) + + +async def create_key_vault(vault, *data_keys): + """Create the key vault collection with optional data keys.""" + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) + await vault.drop() + if data_keys: + await vault.insert_many(data_keys) + await vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) + return vault + + +class TestDataKeyDoubleEncryption(AsyncEncryptionIntegrationTest): + client_encrypted: AsyncMongoClient + client_encryption: AsyncClientEncryption + listener: OvertCommandListener + vault: Any + + KMS_PROVIDERS = ALL_KMS_PROVIDERS + + MASTER_KEYS = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, + } + + @classmethod + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + async def _setup_class(cls): + await super()._setup_class() + cls.listener = OvertCommandListener() + cls.client = await async_rs_or_single_client(event_listeners=[cls.listener]) + await cls.client.db.coll.drop() + cls.vault = await create_key_vault(cls.client.keyvault.datakeys) + + # Configure the encrypted field via the local schema_map option. + schemas = { + "db.coll": { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + } + } + }, + } + } + opts = AutoEncryptionOpts( + cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + ) + cls.client_encrypted = await async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + cls.client_encryption = AsyncClientEncryption( + cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + ) + + @classmethod + async def _tearDown_class(cls): + await cls.vault.drop() + await cls.client.close() + await cls.client_encrypted.close() + await cls.client_encryption.close() + + def setUp(self): + self.listener.reset() + + async def run_test(self, provider_name): + # Create data key. + master_key: Any = self.MASTER_KEYS[provider_name] + datakey_id = await self.client_encryption.create_data_key( + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] + ) + self.assertBinaryUUID(datakey_id) + cmd = self.listener.started_events[-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = await self.vault.find({"_id": datakey_id}).to_list() + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) + + # Encrypt by key_id. + encrypted = await self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=datakey_id, + ) + self.assertEncrypted(encrypted) + await self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = await self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore + + # Encrypt by key_alt_name. + encrypted_altname = await self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_alt_name=f"{provider_name}_altname", + ) + self.assertEqual(encrypted_altname, encrypted) + + # Explicitly encrypting an auto encrypted field. + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): + await self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) + + async def test_data_key_local(self): + await self.run_test("local") + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_data_key_aws(self): + await self.run_test("aws") + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def test_data_key_azure(self): + await self.run_test("azure") + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_data_key_gcp(self): + await self.run_test("gcp") + + async def test_data_key_kmip(self): + await self.run_test("kmip") + + +class TestExternalKeyVault(AsyncEncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + async def _test_external_key_vault(self, with_external_key_vault): + await self.client.db.coll.drop() + vault = await create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) + self.addAsyncCleanup(vault.drop) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + if with_external_key_vault: + key_vault_client = await async_rs_or_single_client( + username="fake-user", password="fake-pwd" + ) + self.addAsyncCleanup(key_vault_client.close) + else: + key_vault_client = async_client_context.client + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) + + client_encrypted = await async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + self.addAsyncCleanup(client_encrypted.close) + + client_encryption = AsyncClientEncryption( + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) + self.addAsyncCleanup(client_encryption.close) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + await client_encryption.encrypt( + "test", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=LOCAL_KEY_ID, + ) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + await client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) + + async def test_external_key_vault_1(self): + await self._test_external_key_vault(True) + + async def test_external_key_vault_2(self): + await self._test_external_key_vault(False) + + +class TestViews(AsyncEncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + async def test_views_are_prohibited(self): + await self.client.db.view.drop() + await self.client.db.create_collection("view", viewOn="coll") + self.addAsyncCleanup(self.client.db.view.drop) + + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") + client_encrypted = await async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + self.addAsyncCleanup(client_encrypted.aclose) + + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): + await client_encrypted.db.view.insert_one({}) + + +class TestCorpus(AsyncEncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def _setup_class(cls): + await super()._setup_class() + + @staticmethod + def kms_providers(): + return ALL_KMS_PROVIDERS + + @staticmethod + def fix_up_schema(json_schema): + """Remove deprecated symbol/dbPointer types from json schema.""" + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] + return json_schema + + @staticmethod + def fix_up_curpus(corpus): + """Disallow deprecated symbol/dbPointer types from corpus test.""" + for key in corpus: + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False + return corpus + + @staticmethod + def fix_up_curpus_encrypted(corpus_encrypted, corpus): + """Fix the expected values for deprecated symbol/dbPointer types.""" + for key in corpus_encrypted: + if "_symbol_" in key or "_dbPointer_" in key: + corpus_encrypted[key] = copy.deepcopy(corpus[key]) + return corpus_encrypted + + async def _test_corpus(self, opts): + # Drop and create the collection 'db.coll' with jsonSchema. + coll = await create_with_schema( + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) + self.addAsyncCleanup(coll.drop) + + vault = await create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) + self.addAsyncCleanup(vault.drop) + + client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client_encrypted.close) + + client_encryption = AsyncClientEncryption( + self.kms_providers(), + "keyvault.datakeys", + async_client_context.client, + OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) + self.addAsyncCleanup(client_encryption.close) + + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) + corpus_copied: SON = SON() + for key, value in corpus.items(): + corpus_copied[key] = copy.deepcopy(value) + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + if value["method"] == "auto": + continue + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": + kwargs = {"key_id": LOCAL_KEY_ID} + elif kms == "aws": + kwargs = {"key_id": AWS_KEY_ID} + elif kms == "azure": + kwargs = {"key_id": AZURE_KEY_ID} + elif kms == "gcp": + kwargs = {"key_id": GCP_KEY_ID} + else: + kwargs = {"key_id": KMIP_KEY_ID} + else: + kwargs = {"key_alt_name": kms} + + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + else: + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random + + try: + encrypted_val = await client_encryption.encrypt( + value["value"], + algo, + **kwargs, # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail(f"encrypt should have failed: {key!r}: {value!r}") + corpus_copied[key]["value"] = encrypted_val + except Exception: + if value["allowed"]: + tb = traceback.format_exc() + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") + + await client_encrypted.db.coll.insert_one(corpus_copied) + corpus_decrypted = await client_encrypted.db.coll.find_one() + self.assertEqual(corpus_decrypted, corpus) + + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) + corpus_encrypted_actual = await coll.find_one() + for key, value in corpus_encrypted_actual.items(): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = await client_encryption.decrypt(value["value"]) + decrypt_expected = await client_encryption.decrypt( + corpus_encrypted_expected[key]["value"] + ) + self.assertEqual(decrypt_actual, decrypt_expected, key) + else: + self.assertEqual(value["value"], corpus[key]["value"], key) + + async def test_corpus(self): + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + ) + await self._test_corpus(opts) + + async def test_corpus_local_schema(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS, + ) + await self._test_corpus(opts) + + +_2_MiB = 2097152 +_16_MiB = 16777216 + + +class TestBsonSizeBatches(AsyncEncryptionIntegrationTest): + """Prose tests for BSON size limits and batch splitting.""" + + coll: AsyncCollection + coll_encrypted: AsyncCollection + client_encrypted: AsyncMongoClient + listener: OvertCommandListener + + @classmethod + async def _setup_class(cls): + await super()._setup_class() + db = async_client_context.client.db + cls.coll = db.coll + await cls.coll.drop() + # Configure the encrypted 'db.coll' collection via jsonSchema. + json_schema = json_data("limits", "limits-schema.json") + await db.create_collection( + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) + + # Create the key vault. + coll = async_client_context.client.get_database( + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] + await coll.drop() + await coll.insert_one(json_data("limits", "limits-key.json")) + + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + cls.listener = OvertCommandListener() + cls.client_encrypted = await async_rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[cls.listener] + ) + cls.coll_encrypted = cls.client_encrypted.db.coll + + @classmethod + async def _tearDown_class(cls): + await cls.coll_encrypted.drop() + await cls.client_encrypted.close() + await super()._tearDown_class() + + async def test_01_insert_succeeds_under_2MiB(self): + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "over_2mib_under_16mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_02_insert_succeeds_over_2MiB_post_encryption(self): + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_2mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_03_bulk_batch_split(self): + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} + self.listener.reset() + await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + + async def test_04_bulk_batch_split(self): + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} + doc1.update(limits_doc) + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} + doc2.update(limits_doc) + self.listener.reset() + await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + + async def test_05_insert_succeeds_just_under_16MiB(self): + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "under_16mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_06_insert_fails_over_16MiB(self): + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + doc.update(limits_doc) + + with self.assertRaisesRegex(WriteError, "object to insert too large"): + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_16mib_bulk" + with self.assertRaises(BulkWriteError) as ctx: + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + err = ctx.exception.details["writeErrors"][0] + self.assertEqual(2, err["code"]) + self.assertIn("object to insert too large", err["errmsg"]) + + +class TestCustomEndpoint(AsyncEncryptionIntegrationTest): + """Prose tests for creating data keys with a custom endpoint.""" + + @classmethod + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + async def _setup_class(cls): + await super()._setup_class() + + def setUp(self): + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } + self.client_encryption = AsyncClientEncryption( + kms_providers=kms_providers, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) + + kms_providers_invalid = copy.deepcopy(kms_providers) + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" + self.client_encryption_invalid = AsyncClientEncryption( + kms_providers=kms_providers_invalid, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + kms_tls_options=KMS_TLS_OPTS, + ) + self._kmip_host_error = None + self._invalid_host_error = None + + async def asyncTearDown(self): + await self.client_encryption.close() + await self.client_encryption_invalid.close() + + async def run_test_expected_success(self, provider_name, master_key): + data_key_id = await self.client_encryption.create_data_key( + provider_name, master_key=master_key + ) + encrypted = await self.client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", await self.client_encryption.decrypt(encrypted)) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_01_aws_region_key(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_02_aws_region_key_endpoint(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_03_aws_region_key_endpoint_port(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_04_aws_endpoint_invalid_port(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-1.amazonaws.com:12345", + } + with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345") as ctx: + await self.client_encryption.create_data_key("aws", master_key=master_key) + self.assertIsInstance(ctx.exception.cause, AutoReconnect) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_05_aws_endpoint_wrong_region(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-2.amazonaws.com", + } + # The full error should be something like: + # "Credential should be scoped to a valid region, not 'us-east-1'" + # but we only check for EncryptionError to avoid breaking on slight + # changes to AWS' error message. + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_06_aws_endpoint_invalid_host(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "doesnotexist.invalid", + } + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def test_07_azure(self): + master_key = { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + } + await self.run_test_expected_success("azure", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption_invalid.create_data_key("azure", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_08_gcp_valid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "cloudkms.googleapis.com:443", + } + await self.run_test_expected_success("gcp", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_09_gcp_invalid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "doesnotexist.invalid:443", + } + + # The full error should be something like: + # "Invalid KMS response, no access_token returned. HTTP status=200" + with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): + await self.client_encryption.create_data_key("gcp", master_key=master_key) + + def dns_error(self, host, port): + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) + return self._kmip_host_error + + async def test_10_kmip_invalid_endpoint(self): + key = {"keyId": "1"} + await self.run_test_expected_success("kmip", key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + await self.client_encryption_invalid.create_data_key("kmip", key) + + async def test_11_kmip_master_key_endpoint(self): + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} + await self.run_test_expected_success("kmip", key) + # Override invalid endpoint: + data_key_id = await self.client_encryption_invalid.create_data_key("kmip", master_key=key) + encrypted = await self.client_encryption_invalid.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", await self.client_encryption_invalid.decrypt(encrypted)) + + async def test_12_kmip_master_key_invalid_endpoint(self): + key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + await self.client_encryption.create_data_key("kmip", key) + + +class AzureGCPEncryptionTestMixin: + DEK = None + KMS_PROVIDER_MAP = None + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" + client: AsyncMongoClient + + async def asyncSetUp(self): + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) + await create_key_vault(keyvault, self.DEK) + + async def _test_explicit(self, expectation): + client_encryption = AsyncClientEncryption( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + async_client_context.client, + OPTS, + ) + self.addAsyncCleanup(client_encryption.close) + + ciphertext = await client_encryption.encrypt( + "string0", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=self.DEK["_id"], + ) + + self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) + self.assertEqual(await client_encryption.decrypt(ciphertext), "string0") + + async def _test_automatic(self, expectation_extjson, payload): + encrypted_db = "db" + encrypted_coll = "coll" + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + + encryption_opts = AutoEncryptionOpts( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + keyvault_namespace, + schema_map=self.SCHEMA_MAP, + ) + + insert_listener = AllowListEventListener("insert") + client = await async_rs_or_single_client( + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) + self.addAsyncCleanup(client.aclose) + + coll = client.get_database(encrypted_db).get_collection( + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) + await coll.drop() + + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) + + await coll.insert_one(payload) + event = insert_listener.started_events[0] + inserted_doc = event.command["documents"][0] + + for key, value in expected_document.items(): + self.assertEqual(value, inserted_doc[key]) + + output_doc = await coll.find_one({}) + for key, value in payload.items(): + self.assertEqual(output_doc[key], value) + + +class TestAzureEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def _setup_class(cls): + cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + cls.DEK = json_data(BASE, "custom", "azure-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super()._setup_class() + + async def test_explicit(self): + return await self._test_explicit( + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) + + async def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06"} + }}""" + ) + return await self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) + + +class TestGCPEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): + @classmethod + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def _setup_class(cls): + cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + cls.DEK = json_data(BASE, "custom", "gcp-dek.json") + cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super()._setup_class() + + async def test_explicit(self): + return await self._test_explicit( + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) + + async def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06"} + }}""" + ) + return await self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests +class TestDeadlockProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + self.client_test = await async_rs_or_single_client( + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) + self.addAsyncCleanup(self.client_test.aclose) + + self.client_keyvault_listener = OvertCommandListener() + self.client_keyvault = await async_rs_or_single_client( + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) + self.addAsyncCleanup(self.client_keyvault.aclose) + + await self.client_test.keyvault.datakeys.drop() + await self.client_test.db.coll.drop() + await self.client_test.keyvault.datakeys.insert_one( + json_data("external", "external-key.json") + ) + _ = await self.client_test.db.create_collection( + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) + + client_encryption = AsyncClientEncryption( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) + self.ciphertext = await client_encryption.encrypt( + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) + await client_encryption.close() + + self.client_listener = OvertCommandListener() + self.topology_listener = TopologyEventListener() + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + + async def _run_test(self, max_pool_size, auto_encryption_opts): + client_encrypted = await async_rs_or_single_client( + readConcernLevel="majority", + w="majority", + maxPoolSize=max_pool_size, + auto_encryption_opts=auto_encryption_opts, + event_listeners=[self.client_listener, self.topology_listener], + ) + + if auto_encryption_opts._bypass_auto_encryption is True: + await self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) + elif auto_encryption_opts._bypass_auto_encryption is False: + await client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) + else: + raise RuntimeError("bypass_auto_encryption must be a bool") + + result = await client_encrypted.db.coll.find_one({"_id": 0}) + self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) + + self.addAsyncCleanup(client_encrypted.close) + + async def test_case_1(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 4) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_2(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_3(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_4(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_5(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 5) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_6(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_7(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_8(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +class TestDecryptProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + self.client = async_client_context.client + await self.client.db.drop_collection("decryption_events") + await create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = AsyncClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = await self.client_encryption.create_data_key("local") + self.cipher_text = await self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = await async_rs_or_single_client( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + self.addAsyncCleanup(self.encrypted_client.close) + + async def test_01_command_error(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + await self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: + self.assertEqual(event.failure["code"], 123) + + async def test_02_network_error(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + await self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") + + async def test_03_decrypt_error(self): + await self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + await anext(await self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + async def test_04_decrypt_success(self): + await self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + await anext(await self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd +class TestBypassSpawningMongocryptdProse(AsyncEncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", + ) + async def test_mongocryptd_bypass_spawn(self): + # Lower the mongocryptd timeout to reduce the test run time. + self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS + encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + + def reset_timeout(): + encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + + self.addCleanup(reset_timeout) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + schema_map=schemas, + mongocryptd_bypass_spawn=True, + mongocryptd_uri="mongodb://localhost:27027/", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client_encrypted.close) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + async def test_bypassAutoEncryption(self): + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + bypass_auto_encryption=True, + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client_encrypted.aclose) + await client_encrypted.db.coll.insert_one({"unencrypted": "test"}) + # Validate that mongocryptd was not spawned: + mongocryptd_client = AsyncMongoClient( + "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" + ) + with self.assertRaises(ServerSelectionTimeoutError): + await mongocryptd_client.admin.command("ping") + + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_via_loading_shared_library(self): + await create_key_vault( + async_client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client_encrypted.aclose) + await client_encrypted.db.coll.drop() + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted((await async_client_context.client.db.coll.find_one({}))["encrypted"]) + no_mongocryptd_client = AsyncMongoClient( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + self.addAsyncCleanup(no_mongocryptd_client.aclose) + with self.assertRaises(ServerSelectionTimeoutError): + await no_mongocryptd_client.db.command("ping") + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + await create_key_vault( + async_client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(client_encrypted.aclose) + await client_encrypted.db.coll.drop() + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + + +# https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests +class TestKmsTLSProse(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + self.patch_system_certs(CA_PEM) + self.client_encrypted = AsyncClientEncryption( + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) + self.addAsyncCleanup(self.client_encrypted.close) + + async def test_invalid_kms_certificate_expired(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9000", + } + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encrypted.create_data_key("aws", master_key=key) + + async def test_invalid_hostname_in_kms_certificate(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9001", + } + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encrypted.create_data_key("aws", master_key=key) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +class TestKmsTLSOptions(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9002" + providers["gcp"]["endpoint"] = "127.0.0.1:9002" + kms_tls_opts_ca_only = { + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, + } + self.client_encryption_no_client_cert = AsyncClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + self.addAsyncCleanup(self.client_encryption_no_client_cert.close) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM + self.client_encryption_with_tls = AsyncClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + self.addAsyncCleanup(self.client_encryption_with_tls.close) + # 3, update endpoints to expired host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" + providers["gcp"]["endpoint"] = "127.0.0.1:9000" + providers["kmip"]["endpoint"] = "127.0.0.1:9000" + self.client_encryption_expired = AsyncClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + self.addAsyncCleanup(self.client_encryption_expired.close) + # 3, update endpoints to invalid host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" + providers["gcp"]["endpoint"] = "127.0.0.1:9001" + providers["kmip"]["endpoint"] = "127.0.0.1:9001" + self.client_encryption_invalid_hostname = AsyncClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + self.addAsyncCleanup(self.client_encryption_invalid_hostname.close) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" + ) + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += "|EOF" + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == "win32": + self.cert_error += "|forcibly closed" + # 4, Test named KMS providers. + providers = { + "aws:no_client_cert": AWS_CREDS, + "azure:no_client_cert": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:no_client_cert": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:no_client_cert": KMIP_CREDS, + "aws:with_tls": AWS_CREDS, + "azure:with_tls": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:with_tls": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:with_tls": KMIP_CREDS, + } + no_cert = {"tlsCAFile": CA_PEM} + with_cert = {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} + kms_tls_opts_4 = { + "aws:no_client_cert": no_cert, + "azure:no_client_cert": no_cert, + "gcp:no_client_cert": no_cert, + "kmip:no_client_cert": no_cert, + "aws:with_tls": with_cert, + "azure:with_tls": with_cert, + "gcp:with_tls": with_cert, + "kmip:with_tls": with_cert, + } + self.client_encryption_with_names = AsyncClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 + ) + + async def test_01_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("aws", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + await self.client_encryption_with_tls.create_data_key("aws", key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key["endpoint"] = "127.0.0.1:9000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("aws", key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + key["endpoint"] = "127.0.0.1:9001" + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encryption_invalid_hostname.create_data_key("aws", key) + + async def test_02_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("azure", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_tls.create_data_key("azure", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("azure", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encryption_invalid_hostname.create_data_key("azure", key) + + async def test_03_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("gcp", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_tls.create_data_key("gcp", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("gcp", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encryption_invalid_hostname.create_data_key("gcp", key) + + async def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("kmip") + await self.client_encryption_with_tls.create_data_key("kmip") + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("kmip") + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encryption_invalid_hostname.create_data_key("kmip") + + async def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = AsyncClientEncryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + self.addAsyncCleanup(encryption.close) + ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") + self.assertFalse(ctx.check_ocsp_endpoint) + + async def test_06_named_kms_providers_apply_tls_options_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("aws:no_client_cert", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + await self.client_encryption_with_names.create_data_key("aws:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_names.create_data_key("azure:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("gcp:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_names.create_data_key("gcp:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("kmip:no_client_cert") + await self.client_encryption_with_names.create_data_key("kmip:with_tls") + + +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + self.client = async_client_context.client + await create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = AsyncClientEncryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = await self.client_encryption.create_data_key( + "local", key_alt_names=["def"] + ) + + async def test_01_create_key(self): + await self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + await self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + await self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + async def test_02_add_key_alt_name(self): + key_id = await self.client_encryption.create_data_key("local") + await self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = await self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + await self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = await self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +class TestExplicitQueryableEncryption(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + await self.client.drop_database(self.db) + await self.db.command( + "create", "explicit_encryption", encryptedFields=self.encrypted_fields + ) + key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = AsyncClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addAsyncCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.addAsyncCleanup(self.encrypted_client.aclose) + + async def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + async def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = await self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + async def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.UNINDEXED, self.key1_id + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = ( + await self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1}).to_list() + ) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + async def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + decrypted = await self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + async def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = await self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = await self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap +class TestRewrapWithSeparateClientEncryption(AsyncEncryptionIntegrationTest): + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + async def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + await self.run_test(src_provider, dst_provider) + + async def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + await self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``AsyncClientEncryption`` object named ``client_encryption1`` + client_encryption1 = AsyncClientEncryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addAsyncCleanup(client_encryption1.close) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = await client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = await client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``AsyncClientEncryption`` object named ``client_encryption2`` + client2 = await async_rs_or_single_client() + self.addAsyncCleanup(client2.aclose) + client_encryption2 = AsyncClientEncryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=KMS_TLS_OPTS, + codec_options=OPTS, + ) + self.addAsyncCleanup(client_encryption2.close) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = await client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = await client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + + +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials +class TestOnDemandAWSCredentials(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + async def test_01_failure(self): + self.client_encryption = AsyncClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_02_success(self): + self.client_encryption = AsyncClientEncryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + ) + await self.client_encryption.create_data_key("aws", self.master_key) + + +class TestQueryableEncryptionDocsExample(AsyncEncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_queryable_encryption(self): + # AsyncMongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + async def AsyncMongoClient(**kwargs): + c = await async_rs_or_single_client(**kwargs) + self.addAsyncCleanup(c.aclose) + return c + + # Drop data from prior test runs. + await self.client.keyvault.datakeys.drop() + await self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = await AsyncMongoClient() + client_encryption = AsyncClientEncryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = await client_encryption.create_data_key("local") + key2_id = await client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = await AsyncMongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = await db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + await encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = await encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = await AsyncMongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = await unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + await client_encryption.close() + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption +class TestRangeQueryProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + await self.client.drop_database(self.db) + key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = AsyncClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + self.addAsyncCleanup(self.client_encryption.close) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + self.addAsyncCleanup(self.encrypted_client.aclose) + + async def run_expression_find( + self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None + ): + find_payload = await self.client_encryption.encrypt_expression( + expression=expression, + key_id=key_id or self.key1_id, + algorithm=Algorithm.RANGE, + query_type=QueryType.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + await self.encrypted_client.db.explicit_encryption.find(find_payload).to_list(), + key=lambda x: x["_id"], + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + async def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + await self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + await self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + async def encrypt_and_cast(i): + return await self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": await encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + await self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = await self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(await self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + expression = { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + } + await self.run_expression_find( + name, expression, [cast_func(i) for i in [6, 30, 200]], range_opts + ) + # Case 2, with UUID key_id + await self.run_expression_find( + name, + expression, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + key_id=self.key1_id.as_uuid(), + ) + + # Case 3. + await self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + await self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + await self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + await self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + await self.client_encryption.encrypt( + 6 if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + await self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), + max=cast_func(200), + sparsity=1, + trim_factor=1, + precision=2, + ), + ) + + async def test_double_no_precision(self): + await self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1, trim_factor=1), float) + + async def test_double_precision(self): + await self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, trim_factor=1, precision=2), + float, + ) + + async def test_decimal_no_precision(self): + await self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1, trim_factor=1), lambda x: Decimal128(str(x)) + ) + + async def test_decimal_precision(self): + await self.run_test_cases( + "DecimalPrecision", + RangeOpts( + min=Decimal128("0.0"), + max=Decimal128("200.0"), + sparsity=1, + trim_factor=1, + precision=2, + ), + lambda x: Decimal128(str(x)), + ) + + async def test_datetime(self): + await self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1, trim_factor=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + async def test_int(self): + await self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1, trim_factor=1), int) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#23-range-explicit-encryption-applies-defaults +class TestRangeQueryDefaultsProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + await self.client.drop_database(self.db) + self.key_vault_client = self.client + self.client_encryption = AsyncClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS + ) + self.addAsyncCleanup(self.client_encryption.close) + self.key_id = await self.client_encryption.create_data_key("local") + opts = RangeOpts(min=0, max=1000) + self.payload_defaults = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + + async def test_uses_libmongocrypt_defaults(self): + opts = RangeOpts(min=0, max=1000, sparsity=2, trim_factor=6) + payload = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) == len(self.payload_defaults) + + async def test_accepts_trim_factor_0(self): + opts = RangeOpts(min=0, max=1000, trim_factor=0) + payload = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) > len(self.payload_defaults) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + await self.client.drop_database(self.db) + self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addAsyncCleanup(self.key_vault.drop) + self.client_encryption = AsyncClientEncryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + self.addAsyncCleanup(self.client_encryption.close) + + async def test_01_simple_create(self): + coll, _ = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + await coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + async def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + await self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + async def test_03_invalid_keyid(self): + with self.assertRaisesRegex( + EncryptedCollectionError, + "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + async def test_04_insert_encrypted(self): + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = await self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + await coll.insert_one({"ssn": encrypted_value}) + + async def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + async def test_options_forward(self): + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + async def test_mixed_null_keyids(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + await self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + await coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + async def test_create_datakey_fails(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="does not exist", + ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) + + async def test_create_failure(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) + + async def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + await self.db.create_collection("testing1") + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + await self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + + +def start_mongocryptd(port) -> None: + args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] + _spawn_daemon(args) + + +class TestNoSessionsSupport(AsyncEncryptionIntegrationTest): + mongocryptd_client: AsyncMongoClient + MONGOCRYPTD_PORT = 27020 + + @classmethod + @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") + async def _setup_class(cls): + await super()._setup_class() + start_mongocryptd(cls.MONGOCRYPTD_PORT) + + @classmethod + async def _tearDown_class(cls): + await super()._tearDown_class() + + async def asyncSetUp(self) -> None: + self.listener = OvertCommandListener() + self.mongocryptd_client = AsyncMongoClient( + f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] + ) + self.addAsyncCleanup(self.mongocryptd_client.aclose) + + hello = await self.mongocryptd_client.db.command("hello") + self.assertNotIn("logicalSessionTimeoutMinutes", hello) + + async def test_implicit_session_ignored_when_unsupported(self): + self.listener.reset() + with self.assertRaises(OperationFailure): + await self.mongocryptd_client.db.test.find_one() + + self.assertNotIn("lsid", self.listener.started_events[0].command) + + with self.assertRaises(OperationFailure): + await self.mongocryptd_client.db.test.insert_one({"x": 1}) + + self.assertNotIn("lsid", self.listener.started_events[1].command) + + async def test_explicit_session_errors_when_unsupported(self): + self.listener.reset() + async with self.mongocryptd_client.start_session() as s: + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + await self.mongocryptd_client.db.test.find_one(session=s) + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + await self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_encryption.py b/test/test_encryption.py index 464a91303f..568ebffc9e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -18,6 +18,7 @@ import base64 import copy import os +import pathlib import re import socket import socketserver @@ -27,6 +28,7 @@ import traceback import uuid import warnings +from test import IntegrationTest, PyMongoTestCase, client_context from threading import Thread from typing import Any, Dict, Mapping @@ -34,13 +36,11 @@ from pymongo.daemon import _spawn_daemon from pymongo.synchronous.collection import Collection +from pymongo.synchronous.helpers import next sys.path[0:0] = [""] from test import ( - IntegrationTest, - PyMongoTestCase, - client_context, unittest, ) from test.helpers import ( @@ -93,6 +93,8 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True + pytestmark = pytest.mark.encryption KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} @@ -216,8 +218,8 @@ class EncryptionIntegrationTest(IntegrationTest): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -229,7 +231,11 @@ def assertBinaryUUID(self, val): # Location of JSON test files. -BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") +if _IS_SYNC: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent, "client-side-encryption") +else: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "client-side-encryption") + SPEC_PATH = os.path.join(BASE, "spec") OPTS = CodecOptions() @@ -278,9 +284,11 @@ def _test_auto_encrypt(self, opts): unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) unack.insert_one(docs[3]) unack.insert_many(docs[4:], ordered=False) - wait_until( - lambda: self.db.test.count_documents({}) == len(docs), "insert documents with w=0" - ) + + def count_documents(): + return self.db.test.count_documents({}) == len(docs) + + wait_until(count_documents, "insert documents with w=0") # Database.command auto decrypts. res = client.pymongo_test.command("find", "test", filter={"ssn": "000"}) @@ -288,19 +296,19 @@ def _test_auto_encrypt(self, opts): self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) # Collection.find auto decrypts. - decrypted_docs = list(encrypted_coll.find()) + decrypted_docs = encrypted_coll.find().to_list() self.assertEqual(decrypted_docs, docs) # Collection.find auto decrypts getMores. - decrypted_docs = list(encrypted_coll.find(batch_size=1)) + decrypted_docs = encrypted_coll.find(batch_size=1).to_list() self.assertEqual(decrypted_docs, docs) # Collection.aggregate auto decrypts. - decrypted_docs = list(encrypted_coll.aggregate([])) + decrypted_docs = (encrypted_coll.aggregate([])).to_list() self.assertEqual(decrypted_docs, docs) # Collection.aggregate auto decrypts getMores. - decrypted_docs = list(encrypted_coll.aggregate([], batchSize=1)) + decrypted_docs = (encrypted_coll.aggregate([], batchSize=1)).to_list() self.assertEqual(decrypted_docs, docs) # Collection.distinct auto decrypts. @@ -402,8 +410,8 @@ def test_upsert_uuid_standard_encrypt(self): class TestClientMaxWireVersion(IntegrationTest): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): @@ -601,131 +609,130 @@ def test_with_statement(self): KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} -class TestSpec(SpecRunner): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def setUpClass(cls): - super().setUpClass() - - def parse_auto_encrypt_opts(self, opts): - """Parse clientOptions.autoEncryptOpts.""" - opts = camel_to_snake_args(opts) - kms_providers = opts["kms_providers"] - if "aws" in kms_providers: - kms_providers["aws"] = AWS_CREDS - if not any(AWS_CREDS.values()): - self.skipTest("AWS environment credentials are not set") - if "awsTemporary" in kms_providers: - kms_providers["aws"] = AWS_TEMP_CREDS - del kms_providers["awsTemporary"] - if not any(AWS_TEMP_CREDS.values()): - self.skipTest("AWS Temp environment credentials are not set") - if "awsTemporaryNoSessionToken" in kms_providers: - kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS - del kms_providers["awsTemporaryNoSessionToken"] - if not any(AWS_TEMP_NO_SESSION_CREDS.values()): - self.skipTest("AWS Temp environment credentials are not set") - if "azure" in kms_providers: - kms_providers["azure"] = AZURE_CREDS - if not any(AZURE_CREDS.values()): - self.skipTest("Azure environment credentials are not set") - if "gcp" in kms_providers: - kms_providers["gcp"] = GCP_CREDS - if not any(AZURE_CREDS.values()): - self.skipTest("GCP environment credentials are not set") - if "kmip" in kms_providers: - kms_providers["kmip"] = KMIP_CREDS - opts["kms_tls_options"] = KMS_TLS_OPTS - if "key_vault_namespace" not in opts: - opts["key_vault_namespace"] = "keyvault.datakeys" - if "extra_options" in opts: - opts.update(camel_to_snake_args(opts.pop("extra_options"))) - - opts = dict(opts) - return AutoEncryptionOpts(**opts) - - def parse_client_options(self, opts): - """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop("autoEncryptOpts", None) - if encrypt_opts: - opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - - return super().parse_client_options(opts) - - def get_object_name(self, op): - """Default object is collection.""" - return op.get("object", "collection") - - def maybe_skip_scenario(self, test): - super().maybe_skip_scenario(test) - desc = test["description"].lower() - if ( - "timeoutms applied to listcollections to get collection schema" in desc - and sys.platform in ("win32", "darwin") - ): - self.skipTest("PYTHON-3706 flaky test on Windows/macOS") - if "type=symbol" in desc: - self.skipTest("PyMongo does not support the symbol type") - - def setup_scenario(self, scenario_def): - """Override a test's setup.""" - key_vault_data = scenario_def["key_vault_data"] - encrypted_fields = scenario_def["encrypted_fields"] - json_schema = scenario_def["json_schema"] - data = scenario_def["data"] - coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] - coll.delete_many({}) - if key_vault_data: - coll.insert_many(key_vault_data) - - db_name = self.get_scenario_db_name(scenario_def) - coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database(db_name, codec_options=OPTS) - coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) - wc = WriteConcern(w="majority") - kwargs: Dict[str, Any] = {} - if json_schema: - kwargs["validator"] = {"$jsonSchema": json_schema} - kwargs["codec_options"] = OPTS - if not data: - kwargs["write_concern"] = wc - if encrypted_fields: - kwargs["encryptedFields"] = encrypted_fields - db.create_collection(coll_name, **kwargs) - coll = db[coll_name] - if data: - # Load data. - coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) - - def allowable_errors(self, op): - """Override expected error classes.""" - errors = super().allowable_errors(op) - # An updateOne test expects encryption to error when no $ operator - # appears but pymongo raises a client side ValueError in this case. - if op["name"] == "updateOne": - errors += (ValueError,) - return errors - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) -test_creator.create_tests() - - -if _HAVE_PYMONGOCRYPT: - globals().update( - generate_test_classes( - os.path.join(SPEC_PATH, "unified"), - module=__name__, +if _IS_SYNC: + # TODO: Add synchronous SpecRunner (https://jira.mongodb.org/browse/PYTHON-4700) + class TestSpec(SpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def setUpClass(cls): + super().setUpClass() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + + def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) + if key_vault_data: + coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = client_context.client.get_database(db_name, codec_options=OPTS) + coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + def create_test(scenario_def, test, name): + @client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) + test_creator.create_tests() + + if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, + ) ) - ) # Prose Tests ALL_KMS_PROVIDERS = { @@ -797,8 +804,8 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.listener = OvertCommandListener() cls.client = rs_or_single_client(event_listeners=[cls.listener]) cls.client.db.coll.drop() @@ -830,7 +837,7 @@ def setUpClass(cls): ) @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.vault.drop() cls.client.close() cls.client_encrypted.close() @@ -849,7 +856,7 @@ def run_test(self, provider_name): cmd = self.listener.started_events[-1] self.assertEqual("insert", cmd.command_name) self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) - docs = list(self.vault.find({"_id": datakey_id})) + docs = self.vault.find({"_id": datakey_id}).to_list() self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) @@ -989,8 +996,8 @@ def test_views_are_prohibited(self): class TestCorpus(EncryptionIntegrationTest): @classmethod @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() @staticmethod def kms_providers(): @@ -1167,8 +1174,8 @@ class TestBsonSizeBatches(EncryptionIntegrationTest): listener: OvertCommandListener @classmethod - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() db = client_context.client.db cls.coll = db.coll cls.coll.drop() @@ -1196,10 +1203,10 @@ def setUpClass(cls): cls.coll_encrypted = cls.client_encrypted.db.coll @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.coll_encrypted.drop() cls.client_encrypted.close() - super().tearDownClass() + super()._tearDown_class() def test_01_insert_succeeds_under_2MiB(self): doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} @@ -1268,8 +1275,8 @@ class TestCustomEndpoint(EncryptionIntegrationTest): any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() def setUp(self): kms_providers = { @@ -1537,11 +1544,11 @@ def _test_automatic(self, expectation_extjson, payload): class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") - def setUpClass(cls): + def _setup_class(cls): cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} cls.DEK = json_data(BASE, "custom", "azure-dek.json") cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super().setUpClass() + super()._setup_class() def test_explicit(self): return self._test_explicit( @@ -1563,11 +1570,11 @@ def test_automatic(self): class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): @classmethod @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") - def setUpClass(cls): + def _setup_class(cls): cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} cls.DEK = json_data(BASE, "custom", "gcp-dek.json") cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super().setUpClass() + super()._setup_class() def test_explicit(self): return self._test_explicit( @@ -1944,7 +1951,8 @@ def test_bypassAutoEncryption(self): @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") def test_via_loading_shared_library(self): create_key_vault( - client_context.client.keyvault.datakeys, json_data("external", "external-key.json") + client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), ) schemas = {"db.coll": json_data("external", "external-schema.json")} opts = AutoEncryptionOpts( @@ -1962,7 +1970,7 @@ def test_via_loading_shared_library(self): self.addCleanup(client_encrypted.close) client_encrypted.db.coll.drop() client_encrypted.db.coll.insert_one({"encrypted": "test"}) - self.assertEncrypted(client_context.client.db.coll.find_one({})["encrypted"]) + self.assertEncrypted((client_context.client.db.coll.find_one({}))["encrypted"]) no_mongocryptd_client = MongoClient( host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" ) @@ -1989,7 +1997,8 @@ def listener(): listener_t = Thread(target=listener) listener_t.start() create_key_vault( - client_context.client.keyvault.datakeys, json_data("external", "external-key.json") + client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), ) schemas = {"db.coll": json_data("external", "external-schema.json")} opts = AutoEncryptionOpts( @@ -2326,11 +2335,12 @@ def test_01_insert_encrypted_indexed_and_find(self): find_payload = self.client_encryption.encrypt( val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) - docs = list( - self.encrypted_client[self.db.name].explicit_encryption.find( - {"encryptedIndexed": find_payload} - ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() ) + self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["encryptedIndexed"], val) @@ -2348,11 +2358,12 @@ def test_02_insert_encrypted_indexed_and_find_contention(self): find_payload = self.client_encryption.encrypt( val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) - docs = list( - self.encrypted_client[self.db.name].explicit_encryption.find( - {"encryptedIndexed": find_payload} - ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() ) + self.assertLessEqual(len(docs), 10) for doc in docs: self.assertEqual(doc["encryptedIndexed"], val) @@ -2365,11 +2376,12 @@ def test_02_insert_encrypted_indexed_and_find_contention(self): query_type=QueryType.EQUALITY, contention_factor=contention, ) - docs = list( - self.encrypted_client[self.db.name].explicit_encryption.find( - {"encryptedIndexed": find_payload} - ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() ) + self.assertEqual(len(docs), 10) for doc in docs: self.assertEqual(doc["encryptedIndexed"], val) @@ -2381,7 +2393,7 @@ def test_03_insert_encrypted_unindexed(self): {"_id": 1, "encryptedUnindexed": insert_payload} ) - docs = list(self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1})) + docs = self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1}).to_list() self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["encryptedUnindexed"], val) @@ -2461,7 +2473,7 @@ def run_test(self, src_provider, dst_provider): kms_tls_options=KMS_TLS_OPTS, codec_options=OPTS, ) - self.addCleanup(client_encryption1.close) + self.addCleanup(client_encryption2.close) # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( @@ -2647,7 +2659,8 @@ def run_expression_find( if use_expr: find_payload = {"$expr": find_payload} sorted_find = sorted( - self.encrypted_client.db.explicit_encryption.find(find_payload), key=lambda x: x["_id"] + self.encrypted_client.db.explicit_encryption.find(find_payload).to_list(), + key=lambda x: x["_id"], ) for elem, expected in zip(sorted_find, expected_elems): self.assertEqual(elem[f"encrypted{name}"], expected) @@ -3073,13 +3086,13 @@ class TestNoSessionsSupport(EncryptionIntegrationTest): @classmethod @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() start_mongocryptd(cls.MONGOCRYPTD_PORT) @classmethod - def tearDownClass(cls): - super().tearDownClass() + def _tearDown_class(cls): + super()._tearDown_class() def setUp(self) -> None: self.listener = OvertCommandListener() diff --git a/tools/synchro.py b/tools/synchro.py index f45112c4cb..6fb7116747 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -96,6 +96,7 @@ "async-transactions-ref": "transactions-ref", "async-snapshot-reads-ref": "snapshot-reads-ref", "default_async": "default", + "aclose": "close", "PyMongo|async": "PyMongo", } @@ -158,6 +159,7 @@ "test_collection.py", "test_cursor.py", "test_database.py", + "test_encryption.py", "test_logger.py", "test_session.py", "test_transactions.py", From a4645f0f8bc1f652e8d4feeaa8219ef0af047122 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 30 Aug 2024 13:36:40 -0700 Subject: [PATCH 1424/2111] PYTHON-4712 Improve BSON encoding/decoding docs (#1823) --- bson/__init__.py | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index 48fffd745f..e8ac7c4441 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -22,30 +22,46 @@ None null both bool boolean both int [#int]_ int32 / int64 py -> bson -`bson.int64.Int64` int64 both +:class:`bson.int64.Int64` int64 both float number (real) both str string both list array both -dict / `SON` object both -datetime.datetime [#dt]_ [#dt2]_ date both -`bson.regex.Regex` regex both +dict object both +:class:`~bson.son.SON` object both +:py:class:`~collections.abc.Mapping` object py -> bson +:class:`~bson.raw_bson.RawBSONDocument` object both [#raw]_ +datetime.datetime [#dt]_ [#dt2]_ UTC datetime both +:class:`~bson.datetime_ms.DatetimeMS` UTC datetime both [#dt3]_ +:class:`~bson.regex.Regex` regex both compiled re [#re]_ regex py -> bson -`bson.binary.Binary` binary both -`bson.objectid.ObjectId` oid both -`bson.dbref.DBRef` dbref both +:class:`~bson.binary.Binary` binary both +:py:class:`uuid.UUID` [#uuid]_ binary both +:class:`~bson.objectid.ObjectId` oid both +:class:`~bson.dbref.DBRef` dbref both +:class:`~bson.dbref.DBRef` dbpointer bson -> py None undefined bson -> py -`bson.code.Code` code both +:class:`~bson.code.Code` code both str symbol bson -> py bytes [#bytes]_ binary both +:class:`~bson.timestamp.Timestamp` timestamp both +:class:`~bson.decimal128.Decimal128` decimal128 both +:class:`~bson.min_key.MinKey` min key both +:class:`~bson.max_key.MaxKey` max key both ======================================= ============= =================== .. [#int] A Python int will be saved as a BSON int32 or BSON int64 depending on its size. A BSON int32 will always decode to a Python int. A BSON int64 will always decode to a :class:`~bson.int64.Int64`. -.. [#dt] datetime.datetime instances will be rounded to the nearest - millisecond when saved -.. [#dt2] all datetime.datetime instances are treated as *naive*. clients - should always use UTC. +.. [#raw] Decoding a bson object to :class:`~bson.raw_bson.RawBSONDocument` can be + optionally configured via :attr:`~bson.codec_options.CodecOptions.document_class`. +.. [#dt] datetime.datetime instances are encoded with millisecond precision so + the microsecond field is truncated. +.. [#dt2] all datetime.datetime instances are encoded as UTC. By default, they + are decoded as *naive* but timezone aware datetimes are also supported. + See :doc:`/examples/datetimes` for examples. +.. [#dt3] To enable decoding a bson UTC datetime to a :class:`~bson.datetime_ms.DatetimeMS` + instance see :ref:`handling-out-of-range-datetimes`. +.. [#uuid] For :py:class:`uuid.UUID` encoding and decoding behavior see :doc:`/examples/uuid`. .. [#re] :class:`~bson.regex.Regex` instances and regular expression objects from ``re.compile()`` are both saved as BSON regular expressions. BSON regular expressions are decoded as :class:`~bson.regex.Regex` From 3840d9dd0fe292e08e510310a03f3be4a8b5bbee Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 3 Sep 2024 13:26:11 -0400 Subject: [PATCH 1425/2111] Add script to help convert sync tests to async tests (#1825) --- CONTRIBUTING.md | 7 ++ tools/convert_test_to_async.py | 141 +++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+) create mode 100644 tools/convert_test_to_async.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c844470138..42cc8dc1b7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -248,3 +248,10 @@ you are attempting to validate new spec tests in PyMongo. ## Making a Release Follow the [Python Driver Release Process Wiki](https://wiki.corp.mongodb.com/display/DRIVERS/Python+Driver+Release+Process). + +## Converting a test to async +The `tools/convert_test_to_async.py` script takes in an existing synchronous test file and outputs a +partially-converted asynchronous version of the same name to the `test/asynchronous` directory. +Use this generated file as a starting point for the completed conversion. + +The script is used like so: `python tools/convert_test_to_async.py [test_file.py]` diff --git a/tools/convert_test_to_async.py b/tools/convert_test_to_async.py new file mode 100644 index 0000000000..dbdb217c84 --- /dev/null +++ b/tools/convert_test_to_async.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +import asyncio +import sys + +from pymongo import AsyncMongoClient +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase + +replacements = { + "Collection": "AsyncCollection", + "Database": "AsyncDatabase", + "Cursor": "AsyncCursor", + "MongoClient": "AsyncMongoClient", + "CommandCursor": "AsyncCommandCursor", + "RawBatchCursor": "AsyncRawBatchCursor", + "RawBatchCommandCursor": "AsyncRawBatchCommandCursor", + "ClientSession": "AsyncClientSession", + "ChangeStream": "AsyncChangeStream", + "CollectionChangeStream": "AsyncCollectionChangeStream", + "DatabaseChangeStream": "AsyncDatabaseChangeStream", + "ClusterChangeStream": "AsyncClusterChangeStream", + "_Bulk": "_AsyncBulk", + "_ClientBulk": "_AsyncClientBulk", + "Connection": "AsyncConnection", + "synchronous": "asynchronous", + "Synchronous": "Asynchronous", + "next": "await anext", + "_Lock": "_ALock", + "_Condition": "_ACondition", + "GridFS": "AsyncGridFS", + "GridFSBucket": "AsyncGridFSBucket", + "GridIn": "AsyncGridIn", + "GridOut": "AsyncGridOut", + "GridOutCursor": "AsyncGridOutCursor", + "GridOutIterator": "AsyncGridOutIterator", + "GridOutChunkIterator": "_AsyncGridOutChunkIterator", + "_grid_in_property": "_a_grid_in_property", + "_grid_out_property": "_a_grid_out_property", + "ClientEncryption": "AsyncClientEncryption", + "MongoCryptCallback": "AsyncMongoCryptCallback", + "ExplicitEncrypter": "AsyncExplicitEncrypter", + "AutoEncrypter": "AsyncAutoEncrypter", + "ContextManager": "AsyncContextManager", + "ClientContext": "AsyncClientContext", + "TestCollection": "AsyncTestCollection", + "IntegrationTest": "AsyncIntegrationTest", + "PyMongoTestCase": "AsyncPyMongoTestCase", + "MockClientTest": "AsyncMockClientTest", + "client_context": "async_client_context", + "setUp": "asyncSetUp", + "tearDown": "asyncTearDown", + "wait_until": "await async_wait_until", + "addCleanup": "addAsyncCleanup", + "TestCase": "IsolatedAsyncioTestCase", + "UnitTest": "AsyncUnitTest", + "MockClient": "AsyncMockClient", + "SpecRunner": "AsyncSpecRunner", + "TransactionsBase": "AsyncTransactionsBase", + "get_pool": "await async_get_pool", + "is_mongos": "await async_is_mongos", + "rs_or_single_client": "await async_rs_or_single_client", + "rs_or_single_client_noauth": "await async_rs_or_single_client_noauth", + "rs_client": "await async_rs_client", + "single_client": "await async_single_client", + "from_client": "await async_from_client", + "closing": "aclosing", + "assertRaisesExactly": "asyncAssertRaisesExactly", + "get_mock_client": "await get_async_mock_client", + "close": "await aclose", +} + +async_classes = [AsyncMongoClient, AsyncDatabase, AsyncCollection, AsyncCursor, AsyncCommandCursor] + + +def get_async_methods() -> set[str]: + result: set[str] = set() + for x in async_classes: + methods = { + k + for k, v in vars(x).items() + if callable(v) + and not isinstance(v, classmethod) + and asyncio.iscoroutinefunction(v) + and v.__name__[0] != "_" + } + result = result | methods + return result + + +async_methods = get_async_methods() + + +def apply_replacements(lines: list[str]) -> list[str]: + for i in range(len(lines)): + if "_IS_SYNC = True" in lines[i]: + lines[i] = "_IS_SYNC = False" + if "def test" in lines[i]: + lines[i] = lines[i].replace("def test", "async def test") + for k in replacements: + if k in lines[i]: + lines[i] = lines[i].replace(k, replacements[k]) + for k in async_methods: + if k + "(" in lines[i]: + tokens = lines[i].split(" ") + for j in range(len(tokens)): + if k + "(" in tokens[j]: + if j < 2: + tokens.insert(0, "await") + else: + tokens.insert(j, "await") + break + new_line = " ".join(tokens) + + lines[i] = new_line + + return lines + + +def process_file(input_file: str, output_file: str) -> None: + with open(input_file, "r+") as f: + lines = f.readlines() + lines = apply_replacements(lines) + + with open(output_file, "w+") as f2: + f2.seek(0) + f2.writelines(lines) + f2.truncate() + + +def main() -> None: + args = sys.argv[1:] + sync_file = "./test/" + args[0] + async_file = "./" + args[0] + + process_file(sync_file, async_file) + + +main() From ba8a139e7220a342cddbd4efcb7e937254345f5a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:18:58 -0700 Subject: [PATCH 1426/2111] PYTHON-4651: Migrate test_client_context.py to async (#1819) --- .github/workflows/test-python.yml | 2 +- test/__init__.py | 4 +- test/asynchronous/__init__.py | 4 +- test/asynchronous/test_client_context.py | 66 ++++++++++++++++++++++++ test/test_client_context.py | 6 ++- tools/synchro.py | 1 + 6 files changed, 76 insertions(+), 7 deletions(-) create mode 100644 test/asynchronous/test_client_context.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 036b2c4b76..921168c130 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -209,4 +209,4 @@ jobs: ls which python pip install -e ".[test]" - PYMONGO_MUST_CONNECT=1 pytest -v test/test_client_context.py + PYMONGO_MUST_CONNECT=1 pytest -v -k client_context diff --git a/test/__init__.py b/test/__init__.py index 2a23ae0fd3..d978d7da34 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -947,11 +947,11 @@ def tearDownClass(cls): @classmethod def _setup_class(cls): - cls._setup_class() + pass @classmethod def _tearDown_class(cls): - cls._tearDown_class() + pass class IntegrationTest(PyMongoTestCase): diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 3d22b5ff76..def4bc1b89 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -949,11 +949,11 @@ def tearDownClass(cls): @classmethod async def _setup_class(cls): - await cls._setup_class() + pass @classmethod async def _tearDown_class(cls): - await cls._tearDown_class() + pass class AsyncIntegrationTest(AsyncPyMongoTestCase): diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py new file mode 100644 index 0000000000..a0cb53a14f --- /dev/null +++ b/test/asynchronous/test_client_context.py @@ -0,0 +1,66 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncUnitTest, SkipTest, async_client_context, unittest + +_IS_SYNC = False + + +class TestAsyncClientContext(AsyncUnitTest): + def test_must_connect(self): + if "PYMONGO_MUST_CONNECT" not in os.environ: + raise SkipTest("PYMONGO_MUST_CONNECT is not set") + + self.assertTrue( + async_client_context.connected, + "client context must be connected when " + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + async_client_context.connection_attempt_info() + ), + ) + + def test_serverless(self): + if "TEST_SERVERLESS" not in os.environ: + raise SkipTest("TEST_SERVERLESS is not set") + + self.assertTrue( + async_client_context.connected and async_client_context.serverless, + "client context must be connected to serverless when " + f"TEST_SERVERLESS is set. Failed attempts:\n{async_client_context.connection_attempt_info()}", + ) + + def test_enableTestCommands_is_disabled(self): + if "PYMONGO_DISABLE_TEST_COMMANDS" not in os.environ: + raise SkipTest("PYMONGO_DISABLE_TEST_COMMANDS is not set") + + self.assertFalse( + async_client_context.test_commands_enabled, + "enableTestCommands must be disabled when PYMONGO_DISABLE_TEST_COMMANDS is set.", + ) + + def test_setdefaultencoding_worked(self): + if "SETDEFAULTENCODING" not in os.environ: + raise SkipTest("SETDEFAULTENCODING is not set") + + self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_client_context.py b/test/test_client_context.py index 196647cb08..be8a562142 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -18,10 +18,12 @@ sys.path[0:0] = [""] -from test import SkipTest, client_context, unittest +from test import SkipTest, UnitTest, client_context, unittest +_IS_SYNC = True -class TestClientContext(unittest.TestCase): + +class TestClientContext(UnitTest): def test_must_connect(self): if "PYMONGO_MUST_CONNECT" not in os.environ: raise SkipTest("PYMONGO_MUST_CONNECT is not set") diff --git a/tools/synchro.py b/tools/synchro.py index 6fb7116747..adc0de2971 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -163,6 +163,7 @@ "test_logger.py", "test_session.py", "test_transactions.py", + "test_client_context.py", ] sync_test_files = [ From 5a70039ad20d7f10fb324aec6cd1661cf62f720c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 3 Sep 2024 16:57:41 -0400 Subject: [PATCH 1427/2111] PYTHON-4701 - Topology logging should use suppress_event (#1826) --- pymongo/asynchronous/topology.py | 4 ++-- pymongo/synchronous/topology.py | 4 ++-- test/unified_format.py | 6 +++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 2df30d244f..8a46e7fecd 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -475,7 +475,7 @@ async def _process_change( if server: await server.pool.ready() - suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description + suppress_event = sd_old == server_description if self._publish_server and not suppress_event: assert self._events is not None self._events.put( @@ -497,7 +497,7 @@ async def _process_change( (td_old, self._description, self._topology_id), ) ) - if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: _debug_log( _SDAM_LOGGER, topologyId=self._topology_id, diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 54a9d8a69e..9932d2cbd9 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -475,7 +475,7 @@ def _process_change( if server: server.pool.ready() - suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description + suppress_event = sd_old == server_description if self._publish_server and not suppress_event: assert self._events is not None self._events.put( @@ -497,7 +497,7 @@ def _process_change( (td_old, self._description, self._topology_id), ) ) - if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: _debug_log( _SDAM_LOGGER, topologyId=self._topology_id, diff --git a/test/unified_format.py b/test/unified_format.py index 99fe0b1693..e4ebf677e2 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1954,7 +1954,11 @@ def format_logs(log_list): if client.get("ignoreExtraMessages", False): actual_logs = actual_logs[: len(client["messages"])] - self.assertEqual(len(client["messages"]), len(actual_logs)) + self.assertEqual( + len(client["messages"]), + len(actual_logs), + f"expected {client['messages']} but got {actual_logs}", + ) for expected_msg, actual_msg in zip(client["messages"], actual_logs): expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") From 5a49ccc759665825c32f1cba9f780f195daf890f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 4 Sep 2024 08:57:59 -0400 Subject: [PATCH 1428/2111] PYTHON-4590 - Add type guards to async API methods (#1820) --- pymongo/asynchronous/collection.py | 4 ++++ pymongo/asynchronous/database.py | 5 +++++ pymongo/asynchronous/encryption.py | 12 +++++++++--- pymongo/asynchronous/mongo_client.py | 3 +++ pymongo/synchronous/collection.py | 4 ++++ pymongo/synchronous/database.py | 5 +++++ pymongo/synchronous/encryption.py | 12 +++++++++--- pymongo/synchronous/mongo_client.py | 3 +++ test/helpers.py | 10 ---------- 9 files changed, 42 insertions(+), 16 deletions(-) diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index e5a54c0904..6d8dfaf89a 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -228,6 +228,10 @@ def __init__( ) if not isinstance(name, str): raise TypeError("name must be an instance of str") + from pymongo.asynchronous.database import AsyncDatabase + + if not isinstance(database, AsyncDatabase): + raise TypeError(f"AsyncCollection requires an AsyncDatabase but {type(database)} given") if not name or ".." in name: raise InvalidName("collection names cannot be empty") diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index b61d581839..d5eec0134d 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -119,9 +119,14 @@ def __init__( read_concern or client.read_concern, ) + from pymongo.asynchronous.mongo_client import AsyncMongoClient + if not isinstance(name, str): raise TypeError("name must be an instance of str") + if not isinstance(client, AsyncMongoClient): + raise TypeError(f"AsyncMongoClient required but given {type(client)}") + if name != "$external": _check_name(name) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 3fb00c6ca9..c4cb886df7 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -194,9 +194,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: # Wrap I/O errors in PyMongo exceptions. _raise_connection_failure((host, port), error) - async def collection_info( - self, database: AsyncDatabase[Mapping[str, Any]], filter: bytes - ) -> Optional[bytes]: + async def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: """Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads @@ -598,6 +596,9 @@ def __init__( if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + if not isinstance(key_vault_client, AsyncMongoClient): + raise TypeError(f"AsyncMongoClient required but given {type(key_vault_client)}") + self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client @@ -683,6 +684,11 @@ async def create_encrypted_collection( https://mongodb.com/docs/manual/reference/command/create """ + if not isinstance(database, AsyncDatabase): + raise TypeError( + f"create_encrypted_collection() requires an AsyncDatabase but {type(database)} given" + ) + encrypted_fields = deepcopy(encrypted_fields) for i, field in enumerate(encrypted_fields["fields"]): if isinstance(field, dict) and field.get("keyId") is None: diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 05e4e80f1d..2af773c440 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2419,6 +2419,9 @@ class _MongoClientErrorHandler: def __init__( self, client: AsyncMongoClient, server: Server, session: Optional[AsyncClientSession] ): + if not isinstance(client, AsyncMongoClient): + raise TypeError(f"AsyncMongoClient required but given {type(client)}") + self.client = client self.server_address = server.description.address self.session = session diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 54db3a56b3..93e24432e5 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -231,6 +231,10 @@ def __init__( ) if not isinstance(name, str): raise TypeError("name must be an instance of str") + from pymongo.synchronous.database import Database + + if not isinstance(database, Database): + raise TypeError(f"Collection requires a Database but {type(database)} given") if not name or ".." in name: raise InvalidName("collection names cannot be empty") diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 93a9985281..1cd8ee643b 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -119,9 +119,14 @@ def __init__( read_concern or client.read_concern, ) + from pymongo.synchronous.mongo_client import MongoClient + if not isinstance(name, str): raise TypeError("name must be an instance of str") + if not isinstance(client, MongoClient): + raise TypeError(f"MongoClient required but given {type(client)}") + if name != "$external": _check_name(name) diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index e06ddad93d..2efa995978 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -194,9 +194,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: # Wrap I/O errors in PyMongo exceptions. _raise_connection_failure((host, port), error) - def collection_info( - self, database: Database[Mapping[str, Any]], filter: bytes - ) -> Optional[bytes]: + def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: """Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads @@ -596,6 +594,9 @@ def __init__( if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + if not isinstance(key_vault_client, MongoClient): + raise TypeError(f"MongoClient required but given {type(key_vault_client)}") + self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client @@ -681,6 +682,11 @@ def create_encrypted_collection( https://mongodb.com/docs/manual/reference/command/create """ + if not isinstance(database, Database): + raise TypeError( + f"create_encrypted_collection() requires a Database but {type(database)} given" + ) + encrypted_fields = deepcopy(encrypted_fields) for i, field in enumerate(encrypted_fields["fields"]): if isinstance(field, dict) and field.get("keyId") is None: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 77e029a7c9..6c5f68b7eb 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2406,6 +2406,9 @@ class _MongoClientErrorHandler: ) def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): + if not isinstance(client, MongoClient): + raise TypeError(f"MongoClient required but given {type(client)}") + self.client = client self.server_address = server.description.address self.session = session diff --git a/test/helpers.py b/test/helpers.py index d136e5b8d2..b38b2e2980 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -35,23 +35,13 @@ HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False -from contextlib import contextmanager from functools import wraps -from test.version import Version from typing import Any, Callable, Dict, Generator, no_type_check from unittest import SkipTest -from urllib.parse import quote_plus -import pymongo -import pymongo.errors from bson.son import SON from pymongo import common, message -from pymongo.common import partition_node -from pymongo.hello import HelloCompat -from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] -from pymongo.synchronous.database import Database -from pymongo.synchronous.mongo_client import MongoClient from pymongo.uri_parser import parse_uri if HAVE_SSL: From 4e74c8274e7c4cb7658d445d526dcc33ced1750b Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 4 Sep 2024 08:58:14 -0400 Subject: [PATCH 1429/2111] PYTHON-4669 - Update Async GridFS APIs for Motor Compatibility (#1821) --- gridfs/asynchronous/grid_file.py | 101 ++-- gridfs/grid_file_shared.py | 19 + gridfs/synchronous/grid_file.py | 97 ++-- pymongo/asynchronous/helpers.py | 5 + pymongo/asynchronous/topology.py | 2 +- pymongo/synchronous/helpers.py | 5 + pymongo/synchronous/topology.py | 2 +- test/asynchronous/test_grid_file.py | 871 ++++++++++++++++++++++++++++ test/test_grid_file.py | 114 +++- tools/synchro.py | 4 + 10 files changed, 1115 insertions(+), 105 deletions(-) create mode 100644 test/asynchronous/test_grid_file.py diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index afc1a0f756..a49d51d304 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -1176,24 +1176,6 @@ def __getattr__(self, name: str) -> Any: raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name: str, value: Any) -> None: - # For properties of this instance like _buffer, or descriptors set on - # the class like filename, use regular __setattr__ - if name in self.__dict__ or name in self.__class__.__dict__: - object.__setattr__(self, name, value) - else: - if _IS_SYNC: - # All other attributes are part of the document in db.fs.files. - # Store them to be sent to server on close() or if closed, send - # them now. - self._file[name] = value - if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - else: - raise AttributeError( - "AsyncGridIn does not support __setattr__. Use AsyncGridIn.set() instead" - ) - - async def set(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: @@ -1204,9 +1186,17 @@ async def set(self, name: str, value: Any) -> None: # them now. self._file[name] = value if self._closed: - await self._coll.files.update_one( - {"_id": self._file["_id"]}, {"$set": {name: value}} - ) + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "AsyncGridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use AsyncGridIn.set() instead" + ) + + async def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + await self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) async def _flush_data(self, data: Any, force: bool = False) -> None: """Flush `data` to a chunk.""" @@ -1400,7 +1390,11 @@ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: return False -class AsyncGridOut(io.IOBase): +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class AsyncGridOut(GRIDOUT_BASE_CLASS): # type: ignore + """Class to read data out of GridFS.""" def __init__( @@ -1460,6 +1454,8 @@ def __init__( self._position = 0 self._file = file_document self._session = session + if not _IS_SYNC: + self.closed = False _id: Any = _a_grid_out_property("_id", "The ``'_id'`` value for this file.") filename: str = _a_grid_out_property("filename", "Name of this file.") @@ -1486,16 +1482,43 @@ def __init__( _file: Any _chunk_iter: Any - async def __anext__(self) -> bytes: - return super().__next__() + if not _IS_SYNC: + closed: bool - def __next__(self) -> bytes: # noqa: F811, RUF100 - if _IS_SYNC: - return super().__next__() - else: - raise TypeError( - "AsyncGridOut does not support synchronous iteration. Use `async for` instead" - ) + async def __anext__(self) -> bytes: + line = await self.readline() + if line: + return line + raise StopAsyncIteration() + + async def to_list(self) -> list[bytes]: + return [x async for x in self] # noqa: C416, RUF100 + + async def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return await self._read_size_or_line(size=size, line=True) + + async def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + await self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = await self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines async def open(self) -> None: if not self._file: @@ -1616,18 +1639,11 @@ async def read(self, size: int = -1) -> bytes: """ return await self._read_size_or_line(size=size) - async def readline(self, size: int = -1) -> bytes: # type: ignore[override] - """Read one line or up to `size` bytes from the file. - - :param size: the maximum number of bytes to read - """ - return await self._read_size_or_line(size=size, line=True) - def tell(self) -> int: """Return the current position of this file.""" return self._position - async def seek(self, pos: int, whence: int = _SEEK_SET) -> int: # type: ignore[override] + async def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. :param pos: the position (or offset if using relative @@ -1690,12 +1706,15 @@ def __aiter__(self) -> AsyncGridOut: """ return self - async def close(self) -> None: # type: ignore[override] + async def close(self) -> None: """Make GridOut more generically file-like.""" if self._chunk_iter: await self._chunk_iter.close() self._chunk_iter = None - super().close() + if _IS_SYNC: + super().close() + else: + self.closed = True def write(self, value: Any) -> NoReturn: raise io.UnsupportedOperation("write") diff --git a/gridfs/grid_file_shared.py b/gridfs/grid_file_shared.py index b6f02a53df..79a0ad7f8c 100644 --- a/gridfs/grid_file_shared.py +++ b/gridfs/grid_file_shared.py @@ -38,7 +38,15 @@ def _a_grid_in_property( ) -> Any: """Create a GridIn property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) if closed_only and not self._closed: raise AttributeError("can only get %r on a closed file" % field_name) # Protect against PHP-237 @@ -46,6 +54,15 @@ def getter(self: Any) -> Any: return self._file.get(field_name, 0) return self._file.get(field_name, None) + def setter(self: Any, value: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if self._closed: + raise InvalidOperation( + "AsyncGridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use AsyncGridIn.set() instead" + ) + self._file[field_name] = value + if read_only: docstring += "\n\nThis attribute is read-only." elif closed_only: @@ -56,6 +73,8 @@ def getter(self: Any) -> Any: "has been called.", ) + if not read_only and not closed_only: + return property(getter, setter, doc=docstring) return property(getter, doc=docstring) diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index 80015f96e7..655f05f57a 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -1166,24 +1166,6 @@ def __getattr__(self, name: str) -> Any: raise AttributeError("GridIn object has no attribute '%s'" % name) def __setattr__(self, name: str, value: Any) -> None: - # For properties of this instance like _buffer, or descriptors set on - # the class like filename, use regular __setattr__ - if name in self.__dict__ or name in self.__class__.__dict__: - object.__setattr__(self, name, value) - else: - if _IS_SYNC: - # All other attributes are part of the document in db.fs.files. - # Store them to be sent to server on close() or if closed, send - # them now. - self._file[name] = value - if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - else: - raise AttributeError( - "GridIn does not support __setattr__. Use GridIn.set() instead" - ) - - def set(self, name: str, value: Any) -> None: # For properties of this instance like _buffer, or descriptors set on # the class like filename, use regular __setattr__ if name in self.__dict__ or name in self.__class__.__dict__: @@ -1194,7 +1176,17 @@ def set(self, name: str, value: Any) -> None: # them now. self._file[name] = value if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "GridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use GridIn.set() instead" + ) + + def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) def _flush_data(self, data: Any, force: bool = False) -> None: """Flush `data` to a chunk.""" @@ -1388,7 +1380,11 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: return False -class GridOut(io.IOBase): +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class GridOut(GRIDOUT_BASE_CLASS): # type: ignore + """Class to read data out of GridFS.""" def __init__( @@ -1448,6 +1444,8 @@ def __init__( self._position = 0 self._file = file_document self._session = session + if not _IS_SYNC: + self.closed = False _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") filename: str = _grid_out_property("filename", "Name of this file.") @@ -1474,14 +1472,43 @@ def __init__( _file: Any _chunk_iter: Any - def __next__(self) -> bytes: - return super().__next__() + if not _IS_SYNC: + closed: bool - def __next__(self) -> bytes: # noqa: F811, RUF100 - if _IS_SYNC: - return super().__next__() - else: - raise TypeError("GridOut does not support synchronous iteration. Use `for` instead") + def __next__(self) -> bytes: + line = self.readline() + if line: + return line + raise StopIteration() + + def to_list(self) -> list[bytes]: + return [x for x in self] # noqa: C416, RUF100 + + def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return self._read_size_or_line(size=size, line=True) + + def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines def open(self) -> None: if not self._file: @@ -1602,18 +1629,11 @@ def read(self, size: int = -1) -> bytes: """ return self._read_size_or_line(size=size) - def readline(self, size: int = -1) -> bytes: # type: ignore[override] - """Read one line or up to `size` bytes from the file. - - :param size: the maximum number of bytes to read - """ - return self._read_size_or_line(size=size, line=True) - def tell(self) -> int: """Return the current position of this file.""" return self._position - def seek(self, pos: int, whence: int = _SEEK_SET) -> int: # type: ignore[override] + def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. :param pos: the position (or offset if using relative @@ -1676,12 +1696,15 @@ def __iter__(self) -> GridOut: """ return self - def close(self) -> None: # type: ignore[override] + def close(self) -> None: """Make GridOut more generically file-like.""" if self._chunk_iter: self._chunk_iter.close() self._chunk_iter = None - super().close() + if _IS_SYNC: + super().close() + else: + self.closed = True def write(self, value: Any) -> NoReturn: raise io.UnsupportedOperation("write") diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 8a85135c1e..1ac8b6630f 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -70,8 +70,13 @@ async def inner(*args: Any, **kwargs: Any) -> Any: if sys.version_info >= (3, 10): anext = builtins.anext + aiter = builtins.aiter else: async def anext(cls: Any) -> Any: """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" return await cls.__anext__() + + def aiter(cls: Any) -> Any: + """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" + return cls.__aiter__() diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 8a46e7fecd..9dd1a1c76b 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -521,7 +521,7 @@ async def _process_change( if server: await server.pool.reset(interrupt_connections=interrupt_connections) - # Wake waiters in select_servers(). + # Wake anything waiting in select_servers(). self._condition.notify_all() async def on_change( diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index e6bbf5d515..064583dad3 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -70,8 +70,13 @@ def inner(*args: Any, **kwargs: Any) -> Any: if sys.version_info >= (3, 10): next = builtins.next + iter = builtins.iter else: def next(cls: Any) -> Any: """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#next.""" return cls.__next__() + + def iter(cls: Any) -> Any: + """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#next.""" + return cls.__iter__() diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 9932d2cbd9..414865154e 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -521,7 +521,7 @@ def _process_change( if server: server.pool.reset(interrupt_connections=interrupt_connections) - # Wake waiters in select_servers(). + # Wake anything waiting in select_servers(). self._condition.notify_all() def on_change( diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py new file mode 100644 index 0000000000..7071fc76f4 --- /dev/null +++ b/test/asynchronous/test_grid_file.py @@ -0,0 +1,871 @@ +# +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the grid_file module.""" +from __future__ import annotations + +import datetime +import io +import sys +import zipfile +from io import BytesIO +from test.asynchronous import AsyncIntegrationTest, AsyncUnitTest, async_client_context + +from pymongo.asynchronous.database import AsyncDatabase + +sys.path[0:0] = [""] + +from test import IntegrationTest, qcheck, unittest +from test.utils import EventListener, async_rs_or_single_client, rs_or_single_client + +from bson.objectid import ObjectId +from gridfs import GridFS +from gridfs.asynchronous.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + AsyncGridFS, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from pymongo import AsyncMongoClient +from pymongo.asynchronous.helpers import aiter, anext +from pymongo.errors import ConfigurationError, InvalidOperation, ServerSelectionTimeoutError +from pymongo.message import _CursorAddress + +_IS_SYNC = False + + +class AsyncTestGridFileNoConnect(AsyncUnitTest): + """Test GridFile features on a client that does not connect.""" + + db: AsyncDatabase + + @classmethod + def setUpClass(cls): + cls.db = AsyncMongoClient(connect=False).pymongo_test + + def test_grid_in_custom_opts(self): + self.assertRaises(TypeError, AsyncGridIn, "foo") + + a = AsyncGridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + + self.assertEqual(5, a._id) + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + self.assertEqual("text/html", a.content_type) + self.assertEqual(1000, a.chunk_size) + self.assertEqual(["foo"], a.aliases) + self.assertEqual({"foo": 1, "bar": 2}, a.metadata) + self.assertEqual(3, a.bar) + self.assertEqual("hello", a.baz) + self.assertRaises(AttributeError, getattr, a, "mike") + + b = AsyncGridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) + self.assertEqual("text/html", b.content_type) + self.assertEqual(1000, b.chunk_size) + self.assertEqual(100, b.baz) + + +class AsyncTestGridFile(AsyncIntegrationTest): + async def asyncSetUp(self): + await self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) + + async def test_basic(self): + f = AsyncGridIn(self.db.fs, filename="test") + await f.write(b"hello world") + await f.close() + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + + # make sure it's still there... + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + + f = AsyncGridIn(self.db.fs, filename="test") + await f.close() + self.assertEqual(2, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"", await g.read()) + + # test that reading 0 returns proper type + self.assertEqual(b"", await g.read(0)) + + async def test_md5(self): + f = AsyncGridIn(self.db.fs) + await f.write(b"hello world\n") + await f.close() + self.assertEqual(None, f.md5) + + async def test_alternate_collection(self): + await self.db.alt.files.delete_many({}) + await self.db.alt.chunks.delete_many({}) + + f = AsyncGridIn(self.db.alt) + await f.write(b"hello world") + await f.close() + + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + g = AsyncGridOut(self.db.alt, f._id) + self.assertEqual(b"hello world", await g.read()) + + async def test_grid_in_default_opts(self): + self.assertRaises(TypeError, AsyncGridIn, "foo") + + a = AsyncGridIn(self.db.fs) + + self.assertTrue(isinstance(a._id, ObjectId)) + self.assertRaises(AttributeError, setattr, a, "_id", 5) + + self.assertEqual(None, a.filename) + self.assertEqual(None, a.name) + a.filename = "my_file" + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + + self.assertEqual(None, a.content_type) + a.content_type = "text/html" + + self.assertEqual("text/html", a.content_type) + + self.assertRaises(AttributeError, getattr, a, "length") + self.assertRaises(AttributeError, setattr, a, "length", 5) + + self.assertEqual(255 * 1024, a.chunk_size) + self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) + + self.assertRaises(AttributeError, getattr, a, "upload_date") + self.assertRaises(AttributeError, setattr, a, "upload_date", 5) + + self.assertRaises(AttributeError, getattr, a, "aliases") + a.aliases = ["foo"] + + self.assertEqual(["foo"], a.aliases) + + self.assertRaises(AttributeError, getattr, a, "metadata") + a.metadata = {"foo": 1} + + self.assertEqual({"foo": 1}, a.metadata) + + self.assertRaises(AttributeError, setattr, a, "md5", 5) + + await a.close() + + if _IS_SYNC: + a.forty_two = 42 + else: + self.assertRaises(AttributeError, setattr, a, "forty_two", 42) + await a.set("forty_two", 42) + + self.assertEqual(42, a.forty_two) + + self.assertTrue(isinstance(a._id, ObjectId)) + self.assertRaises(AttributeError, setattr, a, "_id", 5) + + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + + self.assertEqual("text/html", a.content_type) + + self.assertEqual(0, a.length) + self.assertRaises(AttributeError, setattr, a, "length", 5) + + self.assertEqual(255 * 1024, a.chunk_size) + self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) + + self.assertTrue(isinstance(a.upload_date, datetime.datetime)) + self.assertRaises(AttributeError, setattr, a, "upload_date", 5) + + self.assertEqual(["foo"], a.aliases) + + self.assertEqual({"foo": 1}, a.metadata) + + self.assertEqual(None, a.md5) + self.assertRaises(AttributeError, setattr, a, "md5", 5) + + # Make sure custom attributes that were set both before and after + # a.close() are reflected in b. PYTHON-411. + b = await AsyncGridFS(self.db).get_last_version(filename=a.filename) + self.assertEqual(a.metadata, b.metadata) + self.assertEqual(a.aliases, b.aliases) + self.assertEqual(a.forty_two, b.forty_two) + + async def test_grid_out_default_opts(self): + self.assertRaises(TypeError, AsyncGridOut, "foo") + + gout = AsyncGridOut(self.db.fs, 5) + with self.assertRaises(NoFile): + if not _IS_SYNC: + await gout.open() + gout.name + + a = AsyncGridIn(self.db.fs) + await a.close() + + b = AsyncGridOut(self.db.fs, a._id) + if not _IS_SYNC: + await b.open() + + self.assertEqual(a._id, b._id) + self.assertEqual(0, b.length) + self.assertEqual(None, b.content_type) + self.assertEqual(None, b.name) + self.assertEqual(None, b.filename) + self.assertEqual(255 * 1024, b.chunk_size) + self.assertTrue(isinstance(b.upload_date, datetime.datetime)) + self.assertEqual(None, b.aliases) + self.assertEqual(None, b.metadata) + self.assertEqual(None, b.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, b, attr, 5) + + async def test_grid_out_cursor_options(self): + self.assertRaises( + TypeError, AsyncGridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) + + cursor = AsyncGridOutCursor(self.db.fs, {}) + cursor_clone = cursor.clone() + + cursor_dict = cursor.__dict__.copy() + cursor_dict.pop("_session") + cursor_clone_dict = cursor_clone.__dict__.copy() + cursor_clone_dict.pop("_session") + self.assertDictEqual(cursor_dict, cursor_clone_dict) + + self.assertRaises(NotImplementedError, cursor.add_option, 0) + self.assertRaises(NotImplementedError, cursor.remove_option, 0) + + async def test_grid_out_custom_opts(self): + one = AsyncGridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(self.db.fs, 5) + + if not _IS_SYNC: + await two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual("text/html", two.content_type) + self.assertEqual(1000, two.chunk_size) + self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertEqual(["foo"], two.aliases) + self.assertEqual({"foo": 1, "bar": 2}, two.metadata) + self.assertEqual(3, two.bar) + self.assertEqual(None, two.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + async def test_grid_out_file_document(self): + one = AsyncGridIn(self.db.fs) + await one.write(b"foo bar") + await one.close() + + two = AsyncGridOut(self.db.fs, file_document=await self.db.fs.files.find_one()) + self.assertEqual(b"foo bar", await two.read()) + + three = AsyncGridOut(self.db.fs, 5, file_document=await self.db.fs.files.find_one()) + self.assertEqual(b"foo bar", await three.read()) + + four = AsyncGridOut(self.db.fs, file_document={}) + with self.assertRaises(NoFile): + if not _IS_SYNC: + await four.open() + four.name + + async def test_write_file_like(self): + one = AsyncGridIn(self.db.fs) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(self.db.fs, one._id) + + three = AsyncGridIn(self.db.fs) + await three.write(two) + await three.close() + + four = AsyncGridOut(self.db.fs, three._id) + self.assertEqual(b"hello world", await four.read()) + + five = AsyncGridIn(self.db.fs, chunk_size=2) + await five.write(b"hello") + buffer = BytesIO(b" world") + await five.write(buffer) + await five.write(b" and mongodb") + await five.close() + self.assertEqual( + b"hello world and mongodb", await AsyncGridOut(self.db.fs, five._id).read() + ) + + async def test_write_lines(self): + a = AsyncGridIn(self.db.fs) + await a.writelines([b"hello ", b"world"]) + await a.close() + + self.assertEqual(b"hello world", await AsyncGridOut(self.db.fs, a._id).read()) + + async def test_close(self): + f = AsyncGridIn(self.db.fs) + await f.close() + with self.assertRaises(ValueError): + await f.write("test") + await f.close() + + async def test_closed(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write(b"Hello world.\nHow are you?") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + if not _IS_SYNC: + await g.open() + self.assertFalse(g.closed) + await g.read(1) + self.assertFalse(g.closed) + await g.read(100) + self.assertFalse(g.closed) + await g.close() + self.assertTrue(g.closed) + + async def test_multi_chunk_file(self): + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) + + f = AsyncGridIn(self.db.fs) + await f.write(random_string) + await f.close() + + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(2, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(random_string, await g.read()) + + # TODO: https://jira.mongodb.org/browse/PYTHON-4708 + @async_client_context.require_sync + async def test_small_chunks(self): + self.files = 0 + self.chunks = 0 + + async def helper(data): + f = AsyncGridIn(self.db.fs, chunkSize=1) + await f.write(data) + await f.close() + + self.files += 1 + self.chunks += len(data) + + self.assertEqual(self.files, await self.db.fs.files.count_documents({})) + self.assertEqual(self.chunks, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(data, await g.read()) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(data, await g.read(10) + await g.read(10)) + return True + + qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) + + async def test_seek(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + await g.seek(0) + self.assertEqual(b"hello world", await g.read()) + await g.seek(1) + self.assertEqual(b"ello world", await g.read()) + with self.assertRaises(IOError): + await g.seek(-1) + + await g.seek(-3, _SEEK_END) + self.assertEqual(b"rld", await g.read()) + await g.seek(0, _SEEK_END) + self.assertEqual(b"", await g.read()) + with self.assertRaises(IOError): + await g.seek(-100, _SEEK_END) + + await g.seek(3) + await g.seek(3, _SEEK_CUR) + self.assertEqual(b"world", await g.read()) + with self.assertRaises(IOError): + await g.seek(-100, _SEEK_CUR) + + async def test_tell(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(0, g.tell()) + await g.read(0) + self.assertEqual(0, g.tell()) + await g.read(1) + self.assertEqual(1, g.tell()) + await g.read(2) + self.assertEqual(3, g.tell()) + await g.read() + self.assertEqual(g.length, g.tell()) + + async def test_multiple_reads(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"he", await g.read(2)) + self.assertEqual(b"ll", await g.read(2)) + self.assertEqual(b"o ", await g.read(2)) + self.assertEqual(b"wo", await g.read(2)) + self.assertEqual(b"rl", await g.read(2)) + self.assertEqual(b"d", await g.read(2)) + self.assertEqual(b"", await g.read(2)) + + async def test_readline(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + await f.close() + + # Try read(), then readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"H", await g.read(1)) + self.assertEqual(b"ello world,\n", await g.readline()) + self.assertEqual(b"How a", await g.readline(5)) + self.assertEqual(b"", await g.readline(0)) + self.assertEqual(b"re you?\n", await g.readline()) + self.assertEqual(b"Hope all is well.\n", await g.readline(1000)) + self.assertEqual(b"Bye", await g.readline()) + self.assertEqual(b"", await g.readline()) + + # Try readline() first, then read(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"He", await g.readline(2)) + self.assertEqual(b"l", await g.read(1)) + self.assertEqual(b"lo", await g.readline(2)) + self.assertEqual(b" world,\n", await g.readline()) + + # Only readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"H", await g.readline(1)) + self.assertEqual(b"e", await g.readline(1)) + self.assertEqual(b"llo world,\n", await g.readline()) + + async def test_readlines(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + await f.close() + + # Try read(), then readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"He", await g.read(2)) + self.assertEqual([b"llo world,\n", b"How are you?\n"], await g.readlines(11)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], await g.readlines()) + self.assertEqual([], await g.readlines()) + + # Try readline(), then readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"Hello world,\n", await g.readline()) + self.assertEqual([b"How are you?\n", b"Hope all is well.\n"], await g.readlines(13)) + self.assertEqual(b"Bye", await g.readline()) + self.assertEqual([], await g.readlines()) + + # Only readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + await g.readlines(), + ) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + await g.readlines(0), + ) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual([b"How are you?\n"], await g.readlines(12)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], await g.readlines(18)) + + # Try readlines() first, then read(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual(b"H", await g.read(1)) + self.assertEqual([b"ow are you?\n", b"Hope all is well.\n"], await g.readlines(29)) + self.assertEqual([b"Bye"], await g.readlines(1)) + + # Try readlines() first, then readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual(b"How are you?\n", await g.readline()) + self.assertEqual([b"Hope all is well.\n"], await g.readlines(17)) + self.assertEqual(b"Bye", await g.readline()) + + async def test_iterator(self): + f = AsyncGridIn(self.db.fs) + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], await g.to_list()) + + f = AsyncGridIn(self.db.fs) + await f.write(b"hello world\nhere are\nsome lines.") + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + else: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], await g.to_list()) + + self.assertEqual(b"", await g.read(5)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], await g.to_list()) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world\n", await anext(aiter(g))) + self.assertEqual(b"here", await g.read(4)) + self.assertEqual(b" are\n", await anext(aiter(g))) + self.assertEqual(b"some lines", await g.read(10)) + self.assertEqual(b".", await anext(aiter(g))) + with self.assertRaises(StopAsyncIteration): + await aiter(g).__anext__() + + f = AsyncGridIn(self.db.fs, chunk_size=2) + await f.write(b"hello world") + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([b"hello world"], list(g)) + else: + self.assertEqual([b"hello world"], await g.to_list()) + + async def test_read_unaligned_buffer_size(self): + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." + f = AsyncGridIn(self.db.fs, chunkSize=16) + await f.write(in_data) + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + out_data = b"" + while 1: + s = await g.read(13) + if not s: + break + out_data += s + + self.assertEqual(in_data, out_data) + + async def test_readchunk(self): + in_data = b"a" * 10 + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(in_data) + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(3, len(await g.readchunk())) + + self.assertEqual(2, len(await g.read(2))) + self.assertEqual(1, len(await g.readchunk())) + + self.assertEqual(3, len(await g.read(3))) + + self.assertEqual(1, len(await g.readchunk())) + + self.assertEqual(0, len(await g.readchunk())) + + async def test_write_unicode(self): + f = AsyncGridIn(self.db.fs) + with self.assertRaises(TypeError): + await f.write("foo") + + f = AsyncGridIn(self.db.fs, encoding="utf-8") + await f.write("foo") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"foo", await g.read()) + + f = AsyncGridIn(self.db.fs, encoding="iso-8859-1") + await f.write("aé") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual("aé".encode("iso-8859-1"), await g.read()) + + async def test_set_after_close(self): + f = AsyncGridIn(self.db.fs, _id="foo", bar="baz") + + self.assertEqual("foo", f._id) + self.assertEqual("baz", f.bar) + self.assertRaises(AttributeError, getattr, f, "baz") + self.assertRaises(AttributeError, getattr, f, "uploadDate") + + self.assertRaises(AttributeError, setattr, f, "_id", 5) + if _IS_SYNC: + f.bar = "foo" + f.baz = 5 + else: + await f.set("bar", "foo") + await f.set("baz", 5) + + self.assertEqual("foo", f._id) + self.assertEqual("foo", f.bar) + self.assertEqual(5, f.baz) + self.assertRaises(AttributeError, getattr, f, "uploadDate") + + await f.close() + + self.assertEqual("foo", f._id) + self.assertEqual("foo", f.bar) + self.assertEqual(5, f.baz) + self.assertTrue(f.uploadDate) + + self.assertRaises(AttributeError, setattr, f, "_id", 5) + if _IS_SYNC: + f.bar = "a" + f.baz = "b" + else: + await f.set("bar", "a") + await f.set("baz", "b") + self.assertRaises(AttributeError, setattr, f, "upload_date", 5) + + g = AsyncGridOut(self.db.fs, f._id) + if not _IS_SYNC: + await g.open() + self.assertEqual("a", g.bar) + self.assertEqual("b", g.baz) + # Versions 2.0.1 and older saved a _closed field for some reason. + self.assertRaises(AttributeError, getattr, g, "_closed") + + async def test_context_manager(self): + contents = b"Imagine this is some important data..." + + async with AsyncGridIn(self.db.fs, filename="important") as infile: + await infile.write(contents) + + async with AsyncGridOut(self.db.fs, infile._id) as outfile: + self.assertEqual(contents, await outfile.read()) + + async def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + async with AsyncGridIn(self.db.fs, filename="important") as infile: + await infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + await self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + + self.assertIsNone(await self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + + async def test_prechunked_string(self): + async def write_me(s, chunk_size): + buf = BytesIO(s) + infile = AsyncGridIn(self.db.fs) + while True: + to_write = buf.read(chunk_size) + if to_write == b"": + break + await infile.write(to_write) + await infile.close() + buf.close() + + outfile = AsyncGridOut(self.db.fs, infile._id) + data = await outfile.read() + self.assertEqual(s, data) + + s = b"x" * DEFAULT_CHUNK_SIZE * 4 + # Test with default chunk size + await write_me(s, DEFAULT_CHUNK_SIZE) + # Multiple + await write_me(s, DEFAULT_CHUNK_SIZE * 3) + # Custom + await write_me(s, 262300) + + async def test_grid_out_lazy_connect(self): + fs = self.db.fs + outfile = AsyncGridOut(fs, file_id=-1) + with self.assertRaises(NoFile): + await outfile.read() + with self.assertRaises(NoFile): + if not _IS_SYNC: + await outfile.open() + outfile.filename + + infile = AsyncGridIn(fs, filename=1) + await infile.close() + + outfile = AsyncGridOut(fs, infile._id) + await outfile.read() + outfile.filename + + outfile = AsyncGridOut(fs, infile._id) + await outfile.readchunk() + + async def test_grid_in_lazy_connect(self): + client = AsyncMongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) + fs = client.db.fs + infile = AsyncGridIn(fs, file_id=-1, chunk_size=1) + with self.assertRaises(ServerSelectionTimeoutError): + await infile.write(b"data") + with self.assertRaises(ServerSelectionTimeoutError): + await infile.close() + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + AsyncGridIn((await async_rs_or_single_client(w=0)).pymongo_test.fs) + + async def test_survive_cursor_not_found(self): + # By default the find command returns 101 documents in the first batch. + # Use 102 batches to cause a single getMore. + chunk_size = 1024 + data = b"d" * (102 * chunk_size) + listener = EventListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + db = client.pymongo_test + async with AsyncGridIn(db.fs, chunk_size=chunk_size) as infile: + await infile.write(data) + + async with AsyncGridOut(db.fs, infile._id) as outfile: + self.assertEqual(len(await outfile.readchunk()), chunk_size) + + # Kill the cursor to simulate the cursor timing out on the server + # when an application spends a long time between two calls to + # readchunk(). + assert await client.address is not None + await client._close_cursor_now( + outfile._chunk_iter._cursor.cursor_id, + _CursorAddress(await client.address, db.fs.chunks.full_name), # type: ignore[arg-type] + ) + + # Read the rest of the file without error. + self.assertEqual(len(await outfile.read()), len(data) - chunk_size) + + # Paranoid, ensure that a getMore was actually sent. + self.assertIn("getMore", listener.started_command_names()) + + @async_client_context.require_sync + async def test_zip(self): + zf = BytesIO() + z = zipfile.ZipFile(zf, "w") + z.writestr("test.txt", b"hello world") + z.close() + zf.seek(0) + + f = AsyncGridIn(self.db.fs, filename="test.zip") + await f.write(zf) + await f.close() + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + z = zipfile.ZipFile(g) + self.assertSequenceEqual(z.namelist(), ["test.txt"]) + self.assertEqual(z.read("test.txt"), b"hello world") + + async def test_grid_out_unsupported_operations(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + + self.assertRaises(io.UnsupportedOperation, g.writelines, [b"some", b"lines"]) + self.assertRaises(io.UnsupportedOperation, g.write, b"some text") + self.assertRaises(io.UnsupportedOperation, g.fileno) + self.assertRaises(io.UnsupportedOperation, g.truncate) + + self.assertFalse(g.writable()) + self.assertFalse(g.isatty()) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_grid_file.py b/test/test_grid_file.py index f663f13653..0e806eb5cb 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -21,6 +21,7 @@ import sys import zipfile from io import BytesIO +from test import IntegrationTest, UnitTest, client_context from pymongo.synchronous.database import Database @@ -36,16 +37,20 @@ _SEEK_CUR, _SEEK_END, DEFAULT_CHUNK_SIZE, + GridFS, GridIn, GridOut, GridOutCursor, ) from pymongo import MongoClient -from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError +from pymongo.errors import ConfigurationError, InvalidOperation, ServerSelectionTimeoutError from pymongo.message import _CursorAddress +from pymongo.synchronous.helpers import iter, next +_IS_SYNC = True -class TestGridFileNoConnect(unittest.TestCase): + +class TestGridFileNoConnect(UnitTest): """Test GridFile features on a client that does not connect.""" db: Database @@ -151,6 +156,7 @@ def test_grid_in_default_opts(self): self.assertEqual(None, a.content_type) a.content_type = "text/html" + self.assertEqual("text/html", a.content_type) self.assertRaises(AttributeError, getattr, a, "length") @@ -164,17 +170,24 @@ def test_grid_in_default_opts(self): self.assertRaises(AttributeError, getattr, a, "aliases") a.aliases = ["foo"] + self.assertEqual(["foo"], a.aliases) self.assertRaises(AttributeError, getattr, a, "metadata") a.metadata = {"foo": 1} + self.assertEqual({"foo": 1}, a.metadata) self.assertRaises(AttributeError, setattr, a, "md5", 5) a.close() - a.forty_two = 42 + if _IS_SYNC: + a.forty_two = 42 + else: + self.assertRaises(AttributeError, setattr, a, "forty_two", 42) + a.set("forty_two", 42) + self.assertEqual(42, a.forty_two) self.assertTrue(isinstance(a._id, ObjectId)) @@ -213,12 +226,16 @@ def test_grid_out_default_opts(self): gout = GridOut(self.db.fs, 5) with self.assertRaises(NoFile): + if not _IS_SYNC: + gout.open() gout.name a = GridIn(self.db.fs) a.close() b = GridOut(self.db.fs, a._id) + if not _IS_SYNC: + b.open() self.assertEqual(a._id, b._id) self.assertEqual(0, b.length) @@ -278,6 +295,9 @@ def test_grid_out_custom_opts(self): two = GridOut(self.db.fs, 5) + if not _IS_SYNC: + two.open() + self.assertEqual("my_file", two.name) self.assertEqual("my_file", two.filename) self.assertEqual(5, two._id) @@ -316,6 +336,8 @@ def test_grid_out_file_document(self): four = GridOut(self.db.fs, file_document={}) with self.assertRaises(NoFile): + if not _IS_SYNC: + four.open() four.name def test_write_file_like(self): @@ -350,7 +372,8 @@ def test_write_lines(self): def test_close(self): f = GridIn(self.db.fs) f.close() - self.assertRaises(ValueError, f.write, "test") + with self.assertRaises(ValueError): + f.write("test") f.close() def test_closed(self): @@ -359,6 +382,8 @@ def test_closed(self): f.close() g = GridOut(self.db.fs, f._id) + if not _IS_SYNC: + g.open() self.assertFalse(g.closed) g.read(1) self.assertFalse(g.closed) @@ -380,6 +405,8 @@ def test_multi_chunk_file(self): g = GridOut(self.db.fs, f._id) self.assertEqual(random_string, g.read()) + # TODO: https://jira.mongodb.org/browse/PYTHON-4708 + @client_context.require_sync def test_small_chunks(self): self.files = 0 self.chunks = 0 @@ -415,18 +442,21 @@ def test_seek(self): self.assertEqual(b"hello world", g.read()) g.seek(1) self.assertEqual(b"ello world", g.read()) - self.assertRaises(IOError, g.seek, -1) + with self.assertRaises(IOError): + g.seek(-1) g.seek(-3, _SEEK_END) self.assertEqual(b"rld", g.read()) g.seek(0, _SEEK_END) self.assertEqual(b"", g.read()) - self.assertRaises(IOError, g.seek, -100, _SEEK_END) + with self.assertRaises(IOError): + g.seek(-100, _SEEK_END) g.seek(3) g.seek(3, _SEEK_CUR) self.assertEqual(b"world", g.read()) - self.assertRaises(IOError, g.seek, -100, _SEEK_CUR) + with self.assertRaises(IOError): + g.seek(-100, _SEEK_CUR) def test_tell(self): f = GridIn(self.db.fs, chunkSize=3) @@ -519,12 +549,14 @@ def test_readlines(self): # Only readlines(). g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines() + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(), ) g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines(0) + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(0), ) g = GridOut(self.db.fs, f._id) @@ -550,15 +582,25 @@ def test_iterator(self): f = GridIn(self.db.fs) f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([], list(g)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], g.to_list()) f = GridIn(self.db.fs) f.write(b"hello world\nhere are\nsome lines.") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + if _IS_SYNC: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + else: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], g.to_list()) + self.assertEqual(b"", g.read(5)) - self.assertEqual([], list(g)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], g.to_list()) g = GridOut(self.db.fs, f._id) self.assertEqual(b"hello world\n", next(iter(g))) @@ -566,13 +608,17 @@ def test_iterator(self): self.assertEqual(b" are\n", next(iter(g))) self.assertEqual(b"some lines", g.read(10)) self.assertEqual(b".", next(iter(g))) - self.assertRaises(StopIteration, iter(g).__next__) + with self.assertRaises(StopIteration): + iter(g).__next__() f = GridIn(self.db.fs, chunk_size=2) f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"hello world"], list(g)) + if _IS_SYNC: + self.assertEqual([b"hello world"], list(g)) + else: + self.assertEqual([b"hello world"], g.to_list()) def test_read_unaligned_buffer_size(self): in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." @@ -610,7 +656,8 @@ def test_readchunk(self): def test_write_unicode(self): f = GridIn(self.db.fs) - self.assertRaises(TypeError, f.write, "foo") + with self.assertRaises(TypeError): + f.write("foo") f = GridIn(self.db.fs, encoding="utf-8") f.write("foo") @@ -635,8 +682,12 @@ def test_set_after_close(self): self.assertRaises(AttributeError, getattr, f, "uploadDate") self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "foo" - f.baz = 5 + if _IS_SYNC: + f.bar = "foo" + f.baz = 5 + else: + f.set("bar", "foo") + f.set("baz", 5) self.assertEqual("foo", f._id) self.assertEqual("foo", f.bar) @@ -651,11 +702,17 @@ def test_set_after_close(self): self.assertTrue(f.uploadDate) self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "a" - f.baz = "b" + if _IS_SYNC: + f.bar = "a" + f.baz = "b" + else: + f.set("bar", "a") + f.set("baz", "b") self.assertRaises(AttributeError, setattr, f, "upload_date", 5) g = GridOut(self.db.fs, f._id) + if not _IS_SYNC: + g.open() self.assertEqual("a", g.bar) self.assertEqual("b", g.baz) # Versions 2.0.1 and older saved a _closed field for some reason. @@ -713,8 +770,12 @@ def write_me(s, chunk_size): def test_grid_out_lazy_connect(self): fs = self.db.fs outfile = GridOut(fs, file_id=-1) - self.assertRaises(NoFile, outfile.read) - self.assertRaises(NoFile, getattr, outfile, "filename") + with self.assertRaises(NoFile): + outfile.read() + with self.assertRaises(NoFile): + if not _IS_SYNC: + outfile.open() + outfile.filename infile = GridIn(fs, filename=1) infile.close() @@ -730,13 +791,15 @@ def test_grid_in_lazy_connect(self): client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) - self.assertRaises(ServerSelectionTimeoutError, infile.write, b"data") - self.assertRaises(ServerSelectionTimeoutError, infile.close) + with self.assertRaises(ServerSelectionTimeoutError): + infile.write(b"data") + with self.assertRaises(ServerSelectionTimeoutError): + infile.close() def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - GridIn(rs_or_single_client(w=0).pymongo_test.fs) + GridIn((rs_or_single_client(w=0)).pymongo_test.fs) def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. @@ -758,7 +821,7 @@ def test_survive_cursor_not_found(self): assert client.address is not None client._close_cursor_now( outfile._chunk_iter._cursor.cursor_id, - _CursorAddress(client.address, db.fs.chunks.full_name), + _CursorAddress(client.address, db.fs.chunks.full_name), # type: ignore[arg-type] ) # Read the rest of the file without error. @@ -767,6 +830,7 @@ def test_survive_cursor_not_found(self): # Paranoid, ensure that a getMore was actually sent. self.assertIn("getMore", listener.started_command_names()) + @client_context.require_sync def test_zip(self): zf = BytesIO() z = zipfile.ZipFile(zf, "w") diff --git a/tools/synchro.py b/tools/synchro.py index adc0de2971..b8fc9f33ce 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -47,6 +47,7 @@ "asynchronous": "synchronous", "Asynchronous": "Synchronous", "anext": "next", + "aiter": "iter", "_ALock": "_Lock", "_ACondition": "_Condition", "AsyncGridFS": "GridFS", @@ -98,6 +99,8 @@ "default_async": "default", "aclose": "close", "PyMongo|async": "PyMongo", + "AsyncTestGridFile": "TestGridFile", + "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", } docstring_replacements: dict[tuple[str, str], str] = { @@ -160,6 +163,7 @@ "test_cursor.py", "test_database.py", "test_encryption.py", + "test_grid_file.py", "test_logger.py", "test_session.py", "test_transactions.py", From b37fb918964222625428ea66ba8154ace65759c4 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:36:35 -0700 Subject: [PATCH 1430/2111] PYTHON-4704 Migrate test_bulk.py to async (#1827) --- test/__init__.py | 5 +- test/asynchronous/__init__.py | 5 +- test/asynchronous/test_bulk.py | 1134 ++++++++++++++++++++++++++++++++ test/test_bulk.py | 83 ++- tools/synchro.py | 5 + 5 files changed, 1207 insertions(+), 25 deletions(-) create mode 100644 test/asynchronous/test_bulk.py diff --git a/test/__init__.py b/test/__init__.py index d978d7da34..41af81f979 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -569,7 +569,10 @@ def require_secondaries_count(self, count): def sec_count(): return 0 if not self.client else len(self.client.secondaries) - return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + def check(): + return sec_count() >= count + + return self._require(check, "Not enough secondaries available") @property def supports_secondary_read_pref(self): diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index def4bc1b89..d1af89c184 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -571,7 +571,10 @@ def require_secondaries_count(self, count): async def sec_count(): return 0 if not self.client else len(await self.client.secondaries) - return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + async def check(): + return await sec_count() >= count + + return self._require(check, "Not enough secondaries available") @property async def supports_secondary_read_pref(self): diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py new file mode 100644 index 0000000000..24111ad7c0 --- /dev/null +++ b/test/asynchronous/test_bulk.py @@ -0,0 +1,1134 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the bulk API.""" +from __future__ import annotations + +import sys +import uuid +from typing import Any, Optional + +from pymongo.asynchronous.mongo_client import AsyncMongoClient + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, remove_all_users, unittest +from test.utils import ( + async_rs_or_single_client_noauth, + async_wait_until, + single_client, +) + +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.common import partition_node +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + InvalidOperation, + OperationFailure, +) +from pymongo.operations import * +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncBulkTestBase(AsyncIntegrationTest): + coll: AsyncCollection + coll_w0: AsyncCollection + + @classmethod + async def _setup_class(cls): + await super()._setup_class() + cls.coll = cls.db.test + cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) + + async def asyncSetUp(self): + super().setUp() + await self.coll.drop() + + def assertEqualResponse(self, expected, actual): + """Compare response from bulk.execute() to expected response.""" + for key, value in expected.items(): + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": + expected_upserts = value + actual_upserts = actual["upserted"] + self.assertEqual( + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) + + for e, a in zip(expected_upserts, actual_upserts): + self.assertEqualUpsert(e, a) + + elif key == "writeErrors": + expected_errors = value + actual_errors = actual["writeErrors"] + self.assertEqual( + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) + + for e, a in zip(expected_errors, actual_errors): + self.assertEqualWriteError(e, a) + + else: + self.assertEqual( + actual.get(key), + value, + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", + ) + + def assertEqualUpsert(self, expected, actual): + """Compare bulk.execute()['upserts'] to expected value. + + Like: {'index': 0, '_id': ObjectId()} + """ + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": + # Unspecified value. + self.assertTrue("_id" in actual) + else: + self.assertEqual(expected["_id"], actual["_id"]) + + def assertEqualWriteError(self, expected, actual): + """Compare bulk.execute()['writeErrors'] to expected value. + + Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} + """ + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": + # Unspecified value. + self.assertTrue("errmsg" in actual) + else: + self.assertEqual(expected["errmsg"], actual["errmsg"]) + + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": + # Unspecified _id. + self.assertTrue("_id" in actual_op) + actual_op.pop("_id") + expected_op.pop("_id") + + self.assertEqual(expected_op, actual_op) + + +class AsyncTestBulk(AsyncBulkTestBase): + async def test_empty(self): + with self.assertRaises(InvalidOperation): + await self.coll.bulk_write([]) + + async def test_insert(self): + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = await self.coll.bulk_write([InsertOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.inserted_count) + self.assertEqual(1, await self.coll.count_documents({})) + + async def _test_update_many(self, update): + expected = { + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([UpdateMany({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.matched_count) + self.assertTrue(result.modified_count in (2, None)) + + async def test_update_many(self): + await self._test_update_many({"$set": {"foo": "bar"}}) + + @async_client_context.require_version_min(4, 1, 11) + async def test_update_many_pipeline(self): + await self._test_update_many([{"$set": {"foo": "bar"}}]) + + async def test_array_filters_validation(self): + with self.assertRaises(TypeError): + await UpdateMany({}, {}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await UpdateOne({}, {}, array_filters={}) # type: ignore[arg-type] + + async def test_array_filters_unacknowledged(self): + coll = self.coll_w0 + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await coll.bulk_write([update_one]) + with self.assertRaises(ConfigurationError): + await coll.bulk_write([update_many]) + + async def _test_update_one(self, update): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([UpdateOne({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (1, None)) + + async def test_update_one(self): + await self._test_update_one({"$set": {"foo": "bar"}}) + + @async_client_context.require_version_min(4, 1, 11) + async def test_update_one_pipeline(self): + await self._test_update_one([{"$set": {"foo": "bar"}}]) + + async def test_replace_one(self): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertTrue(result.modified_count in (1, None)) + + async def test_remove(self): + # Test removing all documents, ordered. + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([DeleteMany({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.deleted_count) + + async def test_remove_one(self): + # Test removing one document, empty selector. + await self.coll.insert_many([{}, {}]) + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = await self.coll.bulk_write([DeleteOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.deleted_count) + self.assertEqual(await self.coll.count_documents({}), 1) + + async def test_upsert(self): + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + } + + result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None + self.assertEqual(1, len(result.upserted_ids)) + self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) + + self.assertEqual(await self.coll.count_documents({"foo": "bar"}), 1) + + async def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + n_docs = await async_client_context.max_write_batch_size + 100 + requests = [InsertOne[dict]({}) for _ in range(n_docs)] + result = await self.coll.bulk_write(requests, ordered=False) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, await self.coll.count_documents({})) + + # Same with ordered bulk. + await self.coll.drop() + result = await self.coll.bulk_write(requests) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, await self.coll.count_documents({})) + + async def test_bulk_max_message_size(self): + await self.coll.delete_many({}) + self.addCleanup(self.coll.delete_many, {}) + _16_MB = 16 * 1000 * 1000 + # Generate a list of documents such that the first batched OP_MSG is + # as close as possible to the 48MB limit. + docs = [ + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, + ] + # Fill in the remaining ~10000 bytes with small documents. + for i in range(4, 10000): + docs.append({"_id": i}) + result = await self.coll.insert_many(docs) + self.assertEqual(len(docs), len(result.inserted_ids)) + + async def test_generator_insert(self): + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = await self.coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + async def test_bulk_write_no_results(self): + result = await self.coll_w0.bulk_write([InsertOne({})]) + self.assertFalse(result.acknowledged) + self.assertRaises(InvalidOperation, lambda: result.inserted_count) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_ids) + + async def test_bulk_write_invalid_arguments(self): + # The requests argument must be a list. + generator = (InsertOne[dict]({}) for _ in range(10)) + with self.assertRaises(TypeError): + await self.coll.bulk_write(generator) # type: ignore[arg-type] + + # Document is not wrapped in a bulk write operation. + with self.assertRaises(TypeError): + await self.coll.bulk_write([{}]) # type: ignore[list-item] + + async def test_upsert_large(self): + big = "a" * (await async_client_context.max_bson_size - 37) + result = await self.coll.bulk_write( + [UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, await self.coll.count_documents({"x": 1})) + + async def test_client_generated_upsert_id(self): + result = await self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] + + result = await coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id["f"] = bytes(_id["f"]) + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_single_ordered_batch(self): + result = await self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) + + async def test_single_error_ordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_multiple_error_ordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), + ] + + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_single_unordered_batch(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + result = await self.coll.bulk_write(requests, ordered=False) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) + + async def test_single_error_unordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_multiple_error_unordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + # Assume the update at index 1 runs before the update at index 3, + # although the spec does not require it. Same for inserts. + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) + + async def test_large_inserts_ordered(self): + big = "x" * await async_client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(1, result["nInserted"]) + + await self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + write_result = await self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) + + self.assertEqual(6, write_result.inserted_count) + self.assertEqual(6, await self.coll.count_documents({})) + + async def test_large_inserts_unordered(self): + big = "x" * await async_client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + + await self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + result = await self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) + + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, await self.coll.count_documents({})) + + +class AsyncBulkAuthorizationTestBase(AsyncBulkTestBase): + @classmethod + @async_client_context.require_auth + @async_client_context.require_no_api_version + async def _setup_class(cls): + await super()._setup_class() + + async def asyncSetUp(self): + super().setUp() + await async_client_context.create_user(self.db.name, "readonly", "pw", ["read"]) + await self.db.command( + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + async_client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) + + async def asyncTearDown(self): + await self.db.command("dropRole", "noremove") + await remove_all_users(self.db) + + +class AsyncTestBulkUnacknowledged(AsyncBulkTestBase): + async def asyncTearDown(self): + await self.coll.delete_many({}) + + async def test_no_results_ordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = await self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + async def test_no_results_ordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should not be executed since the batch is ordered. + DeleteOne({"_id": 1}), + ] + result = await self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 3 + + await async_wait_until(predicate, "insert 3 documents") + self.assertEqual({"_id": 1}, await self.coll.find_one({"_id": 1})) + + async def test_no_results_unordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = await self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + async def test_no_results_unordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should be executed since the batch is unordered. + DeleteOne({"_id": 1}), + ] + result = await self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + +class AsyncTestBulkAuthorization(AsyncBulkAuthorizationTestBase): + async def test_readonly(self): + # We test that an authorization failure aborts the batch and is raised + # as OperationFailure. + cli = await async_rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + coll.find_one() + with self.assertRaises(OperationFailure): + await coll.bulk_write([InsertOne({"x": 1})]) + + async def test_no_remove(self): + # We test that an authorization failure aborts the batch and is raised + # as OperationFailure. + cli = await async_rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + coll.find_one() + requests = [ + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. + ] + with self.assertRaises(OperationFailure): + await coll.bulk_write(requests) # type: ignore[arg-type] + self.assertEqual({1, 2}, set(await self.coll.distinct("x"))) + + +class AsyncTestBulkWriteConcern(AsyncBulkTestBase): + w: Optional[int] + secondary: AsyncMongoClient + + @classmethod + async def _setup_class(cls): + await super()._setup_class() + cls.w = async_client_context.w + cls.secondary = None + if cls.w is not None and cls.w > 1: + for member in (await async_client_context.hello)["hosts"]: + if member != (await async_client_context.hello)["primary"]: + cls.secondary = single_client(*partition_node(member)) + break + + @classmethod + async def async_tearDownClass(cls): + if cls.secondary: + await cls.secondary.close() + + async def cause_wtimeout(self, requests, ordered): + if not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") + + # Use the rsSyncApplyStop failpoint to pause replication on a + # secondary which will cause a wtimeout error. + await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") + + try: + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) + return await coll.bulk_write(requests, ordered=ordered) + finally: + await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_write_concern_failure_ordered(self): + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = await coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) + self.assertTrue(result.acknowledged) + + requests: list[Any] = [InsertOne({"a": 1}), InsertOne({"a": 2})] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + await self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) + + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(details["writeConcernErrors"]) > 0) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + + await self.coll.delete_many({}) + await self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on ordered batch. + requests = [ + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + await self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertTrue(len(details["writeConcernErrors"]) > 1) + failed = details["writeErrors"][0] + self.assertTrue("duplicate" in failed["errmsg"]) + + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_write_concern_failure_unordered(self): + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = await coll_ww.bulk_write( + [DeleteOne({"something": "that does no exist"})], ordered=False + ) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + await self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(details["writeConcernErrors"]) > 1) + + await self.coll.delete_many({}) + await self.coll.create_index("a", unique=True) + self.addCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on unordered batch. + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + await self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertTrue(len(details["writeConcernErrors"]) > 1) + + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertEqual(1, failed["op"]["a"]) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertTrue(isinstance(failed["errmsg"], str)) + + upserts = details["upserted"] + self.assertEqual(1, len(upserts)) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_bulk.py b/test/test_bulk.py index 663dfaf19c..9069109cfa 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -44,14 +44,16 @@ from pymongo.synchronous.collection import Collection from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class BulkTestBase(IntegrationTest): coll: Collection coll_w0: Collection @classmethod - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.coll = cls.db.test cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) @@ -135,7 +137,8 @@ def assertEqualWriteError(self, expected, actual): class TestBulk(BulkTestBase): def test_empty(self): - self.assertRaises(InvalidOperation, self.coll.bulk_write, []) + with self.assertRaises(InvalidOperation): + self.coll.bulk_write([]) def test_insert(self): expected = { @@ -180,15 +183,19 @@ def test_update_many_pipeline(self): self._test_update_many([{"$set": {"foo": "bar"}}]) def test_array_filters_validation(self): - self.assertRaises(TypeError, UpdateMany, {}, {}, array_filters={}) - self.assertRaises(TypeError, UpdateOne, {}, {}, array_filters={}) + with self.assertRaises(TypeError): + UpdateMany({}, {}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + UpdateOne({}, {}, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): coll = self.coll_w0 update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) - self.assertRaises(ConfigurationError, coll.bulk_write, [update_one]) - self.assertRaises(ConfigurationError, coll.bulk_write, [update_many]) + with self.assertRaises(ConfigurationError): + coll.bulk_write([update_one]) + with self.assertRaises(ConfigurationError): + coll.bulk_write([update_many]) def _test_update_one(self, update): expected = { @@ -790,8 +797,8 @@ class BulkAuthorizationTestBase(BulkTestBase): @classmethod @client_context.require_auth @client_context.require_no_api_version - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() def setUp(self): super().setUp() @@ -828,8 +835,16 @@ def test_no_results_ordered_success(self): ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: self.coll.count_documents({}) == 2, "insert 2 documents") - wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def predicate(): + return self.coll.count_documents({}) == 2 + + wait_until(predicate, "insert 2 documents") + + def predicate(): + return self.coll.find_one({"_id": 1}) is None + + wait_until(predicate, 'removed {"_id": 1}') def test_no_results_ordered_failure(self): requests: list = [ @@ -843,7 +858,11 @@ def test_no_results_ordered_failure(self): ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: self.coll.count_documents({}) == 3, "insert 3 documents") + + def predicate(): + return self.coll.count_documents({}) == 3 + + wait_until(predicate, "insert 3 documents") self.assertEqual({"_id": 1}, self.coll.find_one({"_id": 1})) def test_no_results_unordered_success(self): @@ -855,8 +874,16 @@ def test_no_results_unordered_success(self): ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: self.coll.count_documents({}) == 2, "insert 2 documents") - wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def predicate(): + return self.coll.count_documents({}) == 2 + + wait_until(predicate, "insert 2 documents") + + def predicate(): + return self.coll.find_one({"_id": 1}) is None + + wait_until(predicate, 'removed {"_id": 1}') def test_no_results_unordered_failure(self): requests: list = [ @@ -870,8 +897,16 @@ def test_no_results_unordered_failure(self): ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: self.coll.count_documents({}) == 2, "insert 2 documents") - wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def predicate(): + return self.coll.count_documents({}) == 2 + + wait_until(predicate, "insert 2 documents") + + def predicate(): + return self.coll.find_one({"_id": 1}) is None + + wait_until(predicate, 'removed {"_id": 1}') class TestBulkAuthorization(BulkAuthorizationTestBase): @@ -883,7 +918,8 @@ def test_readonly(self): ) coll = cli.pymongo_test.test coll.find_one() - self.assertRaises(OperationFailure, coll.bulk_write, [InsertOne({"x": 1})]) + with self.assertRaises(OperationFailure): + coll.bulk_write([InsertOne({"x": 1})]) def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised @@ -899,7 +935,8 @@ def test_no_remove(self): DeleteMany({}), # Prohibited. InsertOne({"x": 3}), # Never attempted. ] - self.assertRaises(OperationFailure, coll.bulk_write, requests) + with self.assertRaises(OperationFailure): + coll.bulk_write(requests) # type: ignore[arg-type] self.assertEqual({1, 2}, set(self.coll.distinct("x"))) @@ -908,18 +945,18 @@ class TestBulkWriteConcern(BulkTestBase): secondary: MongoClient @classmethod - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.w = client_context.w cls.secondary = None if cls.w is not None and cls.w > 1: - for member in client_context.hello["hosts"]: - if member != client_context.hello["primary"]: + for member in (client_context.hello)["hosts"]: + if member != (client_context.hello)["primary"]: cls.secondary = single_client(*partition_node(member)) break @classmethod - def tearDownClass(cls): + def async_tearDownClass(cls): if cls.secondary: cls.secondary.close() diff --git a/tools/synchro.py b/tools/synchro.py index b8fc9f33ce..f4019f0bbb 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -46,6 +46,8 @@ "async_sendall": "sendall", "asynchronous": "synchronous", "Asynchronous": "Synchronous", + "AsyncBulkTestBase": "BulkTestBase", + "AsyncBulkAuthorizationTestBase": "BulkAuthorizationTestBase", "anext": "next", "aiter": "iter", "_ALock": "_Lock", @@ -157,6 +159,7 @@ "conftest.py", "pymongo_mocks.py", "utils_spec_runner.py", + "test_bulk.py", "test_client.py", "test_client_bulk_write.py", "test_collection.py", @@ -299,6 +302,8 @@ def translate_docstrings(lines: list[str]) -> list[str]: lines[i] = lines[i].replace(k, replacements[k]) if "Sync" in lines[i] and "Synchronous" not in lines[i] and replacements[k] in lines[i]: lines[i] = lines[i].replace("Sync", "") + if "rsApplyStop" in lines[i]: + lines[i] = lines[i].replace("rsApplyStop", "rsSyncApplyStop") if "async for" in lines[i] or "async with" in lines[i] or "async def" in lines[i]: lines[i] = lines[i].replace("async ", "") if "await " in lines[i] and "tailable" not in lines[i]: From 653ea8b8d29de07fa31aacf753a0ab884f90d0d2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 Sep 2024 14:53:21 -0700 Subject: [PATCH 1431/2111] PYTHON-4164 Document support for KMIP delegated master_key (#1830) --- doc/changelog.rst | 2 ++ pymongo/asynchronous/encryption.py | 3 +++ pymongo/synchronous/encryption.py | 3 +++ .../spec/legacy/fle2v2-Rangev2-Compact.json | 3 ++- 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 42a4fdf50f..2d574ee8ce 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -11,6 +11,8 @@ PyMongo 4.9 brings a number of improvements including: - Added support for In-Use Encryption range queries with MongoDB 8.0. Added :attr:`~pymongo.encryption.Algorithm.RANGE`. ``sparsity`` and ``trim_factor`` are now optional in :class:`~pymongo.encryption_options.RangeOpts`. +- Added support for the "delegated" option for the KMIP ``master_key`` in + :meth:`~pymongo.encryption.ClientEncryption.create_data_key`. - pymongocrypt>=1.10 is now required for :ref:`In-Use Encryption` support. - Added :meth:`~pymongo.cursor.Cursor.to_list` to :class:`~pymongo.cursor.Cursor`, :class:`~pymongo.command_cursor.CommandCursor`, diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index c4cb886df7..c9e3cadd6e 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -764,6 +764,9 @@ async def create_data_key( Secret Data managed object. - `endpoint` (string): Optional. Host with optional port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. :param key_alt_names: An optional list of string alternate names used to reference a key. If a key is created with alternate diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 2efa995978..3849cf3f2b 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -762,6 +762,9 @@ def create_data_key( Secret Data managed object. - `endpoint` (string): Optional. Host with optional port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. :param key_alt_names: An optional list of string alternate names used to reference a key. If a key is created with alternate diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json index 59241927ca..bba9f25535 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "serverless": "forbid" } ], "database_name": "default", From e27b428914279b7846bed7660836aa1d31986a9b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 4 Sep 2024 14:53:32 -0700 Subject: [PATCH 1432/2111] PYTHON-4150 Document compatibility with MongoDB 3.6 will soon be dropped (#1829) --- doc/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 2d574ee8ce..c5a4f47d79 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,9 @@ Changelog Changes in Version 4.9.0 ------------------------- +.. warning:: Driver support for MongoDB 3.6 reached end of life in April 2024. + PyMongo 4.9 will be the last release to support MongoDB 3.6. + PyMongo 4.9 brings a number of improvements including: - Added support for MongoDB 8.0. From 4d4813070dfb79581c755253d0fe69dec43bc3be Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 4 Sep 2024 19:40:37 -0500 Subject: [PATCH 1433/2111] PYTHON-4667 Handle $clusterTime from error responses in client Bulk Write (#1822) --- pymongo/asynchronous/bulk.py | 5 ++++- pymongo/asynchronous/client_bulk.py | 8 +++++++- pymongo/synchronous/bulk.py | 5 ++++- pymongo/synchronous/client_bulk.py | 8 +++++++- test/mockupdb/test_cluster_time.py | 31 ++++++++++++++++++++++++++--- 5 files changed, 50 insertions(+), 7 deletions(-) diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index c200899dd1..9fd673693f 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -281,6 +281,7 @@ async def write_command( ) if bwc.publish: bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + await client._process_response(reply, bwc.session) # type: ignore[arg-type] except Exception as exc: duration = datetime.datetime.now() - bwc.start_time if isinstance(exc, (NotPrimaryError, OperationFailure)): @@ -308,6 +309,9 @@ async def write_command( if bwc.publish: bwc._fail(request_id, failure, duration) + # Process the response from the server. + if isinstance(exc, (NotPrimaryError, OperationFailure)): + await client._process_response(exc.details, bwc.session) # type: ignore[arg-type] raise finally: bwc.start_time = datetime.datetime.now() @@ -449,7 +453,6 @@ async def _execute_batch( else: request_id, msg, to_send = bwc.batch_command(cmd, ops) result = await self.write_command(bwc, cmd, request_id, msg, to_send, client) # type: ignore[arg-type] - await client._process_response(result, bwc.session) # type: ignore[arg-type] return result, to_send # type: ignore[return-value] diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index b9ab6b876b..15a0369f41 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -283,6 +283,8 @@ async def write_command( ) if bwc.publish: bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + # Process the response from the server. + await self.client._process_response(reply, bwc.session) # type: ignore[arg-type] except Exception as exc: duration = datetime.datetime.now() - bwc.start_time if isinstance(exc, (NotPrimaryError, OperationFailure)): @@ -312,6 +314,11 @@ async def write_command( bwc._fail(request_id, failure, duration) # Top-level error will be embedded in ClientBulkWriteException. reply = {"error": exc} + # Process the response from the server. + if isinstance(exc, OperationFailure): + await self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + else: + await self.client._process_response({}, bwc.session) # type: ignore[arg-type] finally: bwc.start_time = datetime.datetime.now() return reply # type: ignore[return-value] @@ -431,7 +438,6 @@ async def _execute_batch( result = await self.write_command( bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client ) # type: ignore[arg-type] - await self.client._process_response(result, bwc.session) # type: ignore[arg-type] return result, to_send_ops, to_send_ns # type: ignore[return-value] async def _process_results_cursor( diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index 4da64c4a78..27fcff620c 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -281,6 +281,7 @@ def write_command( ) if bwc.publish: bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + client._process_response(reply, bwc.session) # type: ignore[arg-type] except Exception as exc: duration = datetime.datetime.now() - bwc.start_time if isinstance(exc, (NotPrimaryError, OperationFailure)): @@ -308,6 +309,9 @@ def write_command( if bwc.publish: bwc._fail(request_id, failure, duration) + # Process the response from the server. + if isinstance(exc, (NotPrimaryError, OperationFailure)): + client._process_response(exc.details, bwc.session) # type: ignore[arg-type] raise finally: bwc.start_time = datetime.datetime.now() @@ -449,7 +453,6 @@ def _execute_batch( else: request_id, msg, to_send = bwc.batch_command(cmd, ops) result = self.write_command(bwc, cmd, request_id, msg, to_send, client) # type: ignore[arg-type] - client._process_response(result, bwc.session) # type: ignore[arg-type] return result, to_send # type: ignore[return-value] diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 106e5dcbb3..23af231d16 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -283,6 +283,8 @@ def write_command( ) if bwc.publish: bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + # Process the response from the server. + self.client._process_response(reply, bwc.session) # type: ignore[arg-type] except Exception as exc: duration = datetime.datetime.now() - bwc.start_time if isinstance(exc, (NotPrimaryError, OperationFailure)): @@ -312,6 +314,11 @@ def write_command( bwc._fail(request_id, failure, duration) # Top-level error will be embedded in ClientBulkWriteException. reply = {"error": exc} + # Process the response from the server. + if isinstance(exc, OperationFailure): + self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + else: + self.client._process_response({}, bwc.session) # type: ignore[arg-type] finally: bwc.start_time = datetime.datetime.now() return reply # type: ignore[return-value] @@ -429,7 +436,6 @@ def _execute_batch( """Executes a batch of bulkWrite server commands (ack).""" request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) result = self.write_command(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] - self.client._process_response(result, bwc.session) # type: ignore[arg-type] return result, to_send_ops, to_send_ns # type: ignore[return-value] def _process_results_cursor( diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index f3ab0a6c54..9794843175 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -29,21 +29,22 @@ from bson import Timestamp from pymongo import DeleteMany, InsertOne, MongoClient, UpdateOne +from pymongo.errors import OperationFailure pytestmark = pytest.mark.mockupdb class TestClusterTime(unittest.TestCase): - def cluster_time_conversation(self, callback, replies): + def cluster_time_conversation(self, callback, replies, max_wire_version=6): cluster_time = Timestamp(0, 0) server = MockupDB() - # First test all commands include $clusterTime with wire version 6. + # First test all commands include $clusterTime with max_wire_version. _ = server.autoresponds( "ismaster", { "minWireVersion": 0, - "maxWireVersion": 6, + "maxWireVersion": max_wire_version, "$clusterTime": {"clusterTime": cluster_time}, }, ) @@ -166,6 +167,30 @@ def test_monitor(self): request.reply(reply) client.close() + def test_collection_bulk_error(self): + def callback(client: MongoClient[dict]) -> None: + with self.assertRaises(OperationFailure): + client.db.collection.bulk_write([InsertOne({}), InsertOne({})]) + + self.cluster_time_conversation( + callback, + [{"ok": 0, "errmsg": "mock error"}], + ) + + def test_client_bulk_error(self): + def callback(client: MongoClient[dict]) -> None: + with self.assertRaises(OperationFailure): + client.bulk_write( + [ + InsertOne({}, namespace="db.collection"), + InsertOne({}, namespace="db.collection"), + ] + ) + + self.cluster_time_conversation( + callback, [{"ok": 0, "errmsg": "mock error"}], max_wire_version=25 + ) + if __name__ == "__main__": unittest.main() From 26c55048d4ba760b10d79ce155ae7ae5fb5e9989 Mon Sep 17 00:00:00 2001 From: Jib Date: Thu, 5 Sep 2024 09:39:55 -0400 Subject: [PATCH 1434/2111] PYTHON-4631: Pushed PREPARE_SHELL creation into an env.sh file (#1788) Co-authored-by: Steven Silvester --- .evergreen/config.yml | 143 +++++++++++----------------- .evergreen/scripts/configure-env.sh | 53 +++++++++++ 2 files changed, 106 insertions(+), 90 deletions(-) create mode 100644 .evergreen/scripts/configure-env.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 8388c72151..c8e314f6c8 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -34,51 +34,14 @@ functions: # Applies the subitted patch, if any # Deprecated. Should be removed. But still needed for certain agents (ZAP) - command: git.apply_patch - # Make an evergreen exapanstion file with dynamic values - - command: shell.exec + # Make an evergreen expansion file with dynamic values + - command: subprocess.exec params: + include_expansions_in_env: ["is_patch", "project", "version_id"] + binary: bash working_dir: "src" - script: | - set +x - # Get the current unique version of this checkout - if [ "${is_patch}" = "true" ]; then - CURRENT_VERSION=$(git describe)-patch-${version_id} - else - CURRENT_VERSION=latest - fi - - export DRIVERS_TOOLS="$(dirname $(pwd))/drivers-tools" - export PROJECT_DIRECTORY="$(pwd)" - - # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - export DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) - export PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) - fi - - export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" - export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" - - cat < expansion.yml - CURRENT_VERSION: "$CURRENT_VERSION" - DRIVERS_TOOLS: "$DRIVERS_TOOLS" - MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" - MONGODB_BINARIES: "$MONGODB_BINARIES" - PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" - PREPARE_SHELL: | - set -o errexit - export SKIP_LEGACY_SHELL=1 - export DRIVERS_TOOLS="$DRIVERS_TOOLS" - export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" - export MONGODB_BINARIES="$MONGODB_BINARIES" - export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" - - export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" - export PATH="$MONGODB_BINARIES:$PATH" - export PROJECT="${project}" - export PIP_QUIET=1 - EOT - + args: + - .evergreen/scripts/configure-env.sh # Load the expansion file to make an evergreen variable with the current unique version - command: expansions.update params: @@ -88,14 +51,14 @@ functions: - command: shell.exec params: script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh set -o xtrace rm -rf $DRIVERS_TOOLS - if [ "${project}" = "drivers-tools" ]; then + if [ "$PROJECT" = "drivers-tools" ]; then # If this was a patch build, doing a fresh clone would not actually test the patch - cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS + cp -R ${PROJECT_DIRECTORY}/ ${DRIVERS_TOOLS} else - git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git ${DRIVERS_TOOLS} fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config @@ -129,12 +92,12 @@ functions: script: | # Download all the task coverage files. aws s3 cp --recursive s3://${bucket_name}/coverage/${revision}/${version_id}/coverage/ coverage/ - - command: shell.exec + - command: subprocess.exec params: working_dir: "src" - script: | - ${PREPARE_SHELL} - bash .evergreen/combine-coverage.sh + binary: bash + args: + - .evergreen/combine-coverage.sh # Upload the resulting html coverage report. - command: shell.exec params: @@ -164,7 +127,7 @@ functions: - command: shell.exec params: script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh set -o xtrace mkdir out_dir find $MONGO_ORCHESTRATION_HOME -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; @@ -266,7 +229,7 @@ functions: - command: shell.exec params: script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh set -o xtrace # Enable core dumps if enabled on the machine @@ -325,13 +288,13 @@ functions: type: setup params: script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh - command: shell.exec type: setup params: script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh sleep 1 docker ps @@ -340,7 +303,7 @@ functions: - command: shell.exec params: script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh set -o xtrace bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh @@ -350,7 +313,7 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -o xtrace PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} \ MOD_WSGI_EMBEDDED=${MOD_WSGI_EMBEDDED} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} \ @@ -362,7 +325,7 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -o xtrace export PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-mockupdb @@ -373,7 +336,7 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -o xtrace PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh doctest:test @@ -385,7 +348,7 @@ functions: background: true include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh if [ -n "${test_encryption}" ]; then ./.evergreen/hatch.sh encryption:setup fi @@ -397,7 +360,7 @@ functions: script: | # Disable xtrace set +x - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh if [ -n "${MONGODB_STARTED}" ]; then export PYMONGO_MUST_CONNECT=true fi @@ -497,7 +460,7 @@ functions: shell: "bash" working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh .evergreen/run-mongodb-aws-test.sh regular "run aws auth test with assume role credentials": @@ -507,7 +470,7 @@ functions: shell: "bash" working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh .evergreen/run-mongodb-aws-test.sh assume-role "run aws auth test with aws EC2 credentials": @@ -521,7 +484,7 @@ functions: echo "This platform does not support the EC2 auth test, skipping..." exit 0 fi - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh .evergreen/run-mongodb-aws-test.sh ec2 "run aws auth test with aws web identity credentials": @@ -535,7 +498,7 @@ functions: echo "This platform does not support the web identity auth test, skipping..." exit 0 fi - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh # Test with and without AWS_ROLE_SESSION_NAME set. .evergreen/run-mongodb-aws-test.sh web-identity AWS_ROLE_SESSION_NAME="test" \ @@ -558,7 +521,7 @@ functions: working_dir: "src" shell: bash script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh .evergreen/run-mongodb-aws-test.sh env-creds "run aws auth test with aws credentials and session token as environment variables": @@ -568,7 +531,7 @@ functions: working_dir: "src" shell: bash script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh .evergreen/run-mongodb-aws-test.sh session-creds "run aws ECS auth test": @@ -582,12 +545,12 @@ functions: echo "This platform does not support the ECS auth test, skipping..." exit 0 fi - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -ex cd ${DRIVERS_TOOLS}/.evergreen/auth_aws . ./activate-authawsvenv.sh . aws_setup.sh ecs - export MONGODB_BINARIES="${MONGODB_BINARIES}"; + export MONGODB_BINARIES="$MONGODB_BINARIES"; export PROJECT_DIRECTORY="${PROJECT_DIRECTORY}"; python aws_tester.py ecs cd - @@ -597,9 +560,9 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh . .evergreen/hatch.sh encryption:teardown - rm -rf $DRIVERS_TOOLS || true + rm -rf ${DRIVERS_TOOLS} || true rm -f ./secrets-export.sh || true "fix absolute paths": @@ -607,7 +570,7 @@ functions: params: script: | set +x - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename done @@ -617,20 +580,20 @@ functions: params: script: | set +x - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do cat $i | tr -d '\r' > $i.new mv $i.new $i done # Copy client certificate because symlinks do not work on Windows. - cp ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem + cp ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem $MONGO_ORCHESTRATION_HOME/lib/client.pem "make files executable": - command: shell.exec params: script: | set +x - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do chmod +x $i done @@ -640,7 +603,7 @@ functions: params: script: | set +x - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh echo '{"results": [{ "status": "FAIL", "test_file": "Build", "log_raw": "No test-results.json found was created" } ]}' > ${PROJECT_DIRECTORY}/test-results.json "install dependencies": @@ -648,7 +611,7 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -o xtrace file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. @@ -679,10 +642,10 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh TEST_OCSP=1 \ PYTHON_BINARY=${PYTHON_BINARY} \ - CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ + CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg @@ -691,7 +654,7 @@ functions: params: background: true script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd ${DRIVERS_TOOLS}/.evergreen/ocsp . ./activate-ocspvenv.sh python ocsp_mock.py \ @@ -704,7 +667,7 @@ functions: params: background: true script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd ${DRIVERS_TOOLS}/.evergreen/ocsp . ./activate-ocspvenv.sh python ocsp_mock.py \ @@ -719,7 +682,7 @@ functions: params: background: true script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd ${DRIVERS_TOOLS}/.evergreen/ocsp . ./activate-ocspvenv.sh python ocsp_mock.py \ @@ -732,7 +695,7 @@ functions: params: background: true script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd ${DRIVERS_TOOLS}/.evergreen/ocsp . ./activate-ocspvenv.sh python ocsp_mock.py \ @@ -774,7 +737,7 @@ functions: params: shell: "bash" script: | - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" if [ -f "./aws_e2e_setup.json" ]; then . ./activate-authawsvenv.sh @@ -794,7 +757,7 @@ functions: params: working_dir: "src" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh "attach benchmark test results": @@ -1889,7 +1852,7 @@ tasks: shell: bash script: |- set -o errexit - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd src git add . git commit -m "add files" @@ -1906,7 +1869,7 @@ tasks: shell: bash script: |- set -o errexit - ${PREPARE_SHELL} + . src/.evergreen/scripts/env.sh cd src git add . git commit -m "add files" @@ -1974,7 +1937,7 @@ tasks: working_dir: "src" shell: "bash" script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz SKIP_SERVERS=1 bash ./.evergreen/setup-encryption.sh @@ -2051,7 +2014,7 @@ tasks: shell: "bash" working_dir: src script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -x export CONFIG=$PROJECT_DIRECTORY/.github/reviewers.txt export SCRIPT="$DRIVERS_TOOLS/.evergreen/github_app/assign-reviewer.sh" @@ -2067,7 +2030,7 @@ tasks: shell: "bash" working_dir: src script: | - ${PREPARE_SHELL} + . .evergreen/scripts/env.sh set -x export BASE_SHA=${revision} export HEAD_SHA=${github.amrom.workers.devmit} diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh new file mode 100644 index 0000000000..0c9c8bb03a --- /dev/null +++ b/.evergreen/scripts/configure-env.sh @@ -0,0 +1,53 @@ +#!/bin/bash -ex + +# Get the current unique version of this checkout +# shellcheck disable=SC2154 +if [ "$is_patch" = "true" ]; then + # shellcheck disable=SC2154 + CURRENT_VERSION="$(git describe)-patch-$version_id" +else + CURRENT_VERSION=latest +fi + +PROJECT_DIRECTORY="$(pwd)" +DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" + +# Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory +if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) + PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) +fi + +SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" + +if [ -f "$SCRIPT_DIR/env.sh" ]; then + echo "Reading $SCRIPT_DIR/env.sh file" + . "$SCRIPT_DIR/env.sh" + exit 0 +fi + +export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" +export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + +cat < $SCRIPT_DIR/env.sh +set -o errexit +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" +export CURRENT_VERSION="$CURRENT_VERSION" +export SKIP_LEGACY_SHELL=1 +export DRIVERS_TOOLS="$DRIVERS_TOOLS" +export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" +export MONGODB_BINARIES="$MONGODB_BINARIES" +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" + +export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" +export PATH="$MONGODB_BINARIES:$PATH" +# shellcheck disable=SC2154 +export PROJECT="$project" +export PIP_QUIET=1 +EOT + +# Add these expansions to make it easier to call out tests scripts from the EVG yaml +cat < expansion.yml +DRIVERS_TOOLS: "$DRIVERS_TOOLS" +PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" +EOT From 6e9bf1e4a822cb480bd68c576d4ffb82f8b347d6 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 5 Sep 2024 10:20:32 -0400 Subject: [PATCH 1435/2111] PYTHON-4708 - Convert test.qcheck to async (#1832) --- test/asynchronous/qcheck.py | 255 ++++++++++++++++++++++++++++ test/asynchronous/test_grid_file.py | 18 +- test/qcheck.py | 7 +- test/test_grid_file.py | 14 +- tools/synchro.py | 1 + 5 files changed, 280 insertions(+), 15 deletions(-) create mode 100644 test/asynchronous/qcheck.py diff --git a/test/asynchronous/qcheck.py b/test/asynchronous/qcheck.py new file mode 100644 index 0000000000..190a7f1a91 --- /dev/null +++ b/test/asynchronous/qcheck.py @@ -0,0 +1,255 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import random +import re +import sys +import traceback + +sys.path[0:0] = [""] + +from bson.dbref import DBRef +from bson.objectid import ObjectId +from bson.son import SON + +_IS_SYNC = False + +gen_target = 100 +reduction_attempts = 10 +examples = 5 + + +def lift(value): + return lambda: value + + +def choose_lifted(generator_list): + return lambda: random.choice(generator_list) + + +def my_map(generator, function): + return lambda: function(generator()) + + +def choose(list): + return lambda: random.choice(list)() + + +def gen_range(start, stop): + return lambda: random.randint(start, stop) + + +def gen_int(): + max_int = 2147483647 + return lambda: random.randint(-max_int - 1, max_int) + + +def gen_float(): + return lambda: (random.random() - 0.5) * sys.maxsize + + +def gen_boolean(): + return lambda: random.choice([True, False]) + + +def gen_printable_char(): + return lambda: chr(random.randint(32, 126)) + + +def gen_printable_string(gen_length): + return lambda: "".join(gen_list(gen_printable_char(), gen_length)()) + + +def gen_char(set=None): + return lambda: bytes([random.randint(0, 255)]) + + +def gen_string(gen_length): + return lambda: b"".join(gen_list(gen_char(), gen_length)()) + + +def gen_unichar(): + return lambda: chr(random.randint(1, 0xFFF)) + + +def gen_unicode(gen_length): + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) + + +def gen_list(generator, gen_length): + return lambda: [generator() for _ in range(gen_length())] + + +def gen_datetime(): + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) + + +def gen_dict(gen_key, gen_value, gen_length): + def a_dict(gen_key, gen_value, length): + result = {} + for _ in range(length): + result[gen_key()] = gen_value() + return result + + return lambda: a_dict(gen_key, gen_value, gen_length()) + + +def gen_regexp(gen_length): + # TODO our patterns only consist of one letter. + # this is because of a bug in CPython's regex equality testing, + # which I haven't quite tracked down, so I'm just ignoring it... + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) + + def gen_flags(): + flags = 0 + if random.random() > 0.5: + flags = flags | re.IGNORECASE + if random.random() > 0.5: + flags = flags | re.MULTILINE + if random.random() > 0.5: + flags = flags | re.VERBOSE + + return flags + + return lambda: re.compile(pattern(), gen_flags()) + + +def gen_objectid(): + return lambda: ObjectId() + + +def gen_dbref(): + collection = gen_unicode(gen_range(0, 20)) + return lambda: DBRef(collection(), gen_mongo_value(1, True)()) + + +def gen_mongo_value(depth, ref): + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] + if ref: + choices.append(gen_dbref()) + if depth > 0: + choices.append(gen_mongo_list(depth, ref)) + choices.append(gen_mongo_dict(depth, ref)) + return choose(choices) + + +def gen_mongo_list(depth, ref): + return gen_list(gen_mongo_value(depth - 1, ref), gen_range(0, 10)) + + +def gen_mongo_dict(depth, ref=True): + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) + + +def simplify(case): # TODO this is a hack + if isinstance(case, SON) and "$ref" not in case: + simplified = SON(case) # make a copy! + if random.choice([True, False]): + # delete + simplified_keys = list(simplified) + if not len(simplified_keys): + return (False, case) + simplified.pop(random.choice(simplified_keys)) + return (True, simplified) + else: + # simplify a value + simplified_items = list(simplified.items()) + if not len(simplified_items): + return (False, case) + (key, value) = random.choice(simplified_items) + (success, value) = simplify(value) + simplified[key] = value + return (success, success and simplified or case) + if isinstance(case, list): + simplified = list(case) + if random.choice([True, False]): + # delete + if not len(simplified): + return (False, case) + simplified.pop(random.randrange(len(simplified))) + return (True, simplified) + else: + # simplify an item + if not len(simplified): + return (False, case) + index = random.randrange(len(simplified)) + (success, value) = simplify(simplified[index]) + simplified[index] = value + return (success, success and simplified or case) + return (False, case) + + +async def reduce(case, predicate, reductions=0): + for _ in range(reduction_attempts): + (reduced, simplified) = simplify(case) + if reduced and not await predicate(simplified): + return await reduce(simplified, predicate, reductions + 1) + return (reductions, case) + + +async def isnt(predicate): + async def is_not(x): + return not await predicate(x) + + return is_not + + +async def check(predicate, generator): + counter_examples = [] + for _ in range(gen_target): + case = generator() + try: + if not await predicate(case): + reduction = await reduce(case, predicate) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) + except: + counter_examples.append(f"{case!r} : {traceback.format_exc()}") + return counter_examples + + +async def check_unittest(test, predicate, generator): + counter_examples = await check(predicate, generator) + if counter_examples: + failures = len(counter_examples) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) + test.fail(message) diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index 7071fc76f4..6d589dc01c 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -21,17 +21,21 @@ import sys import zipfile from io import BytesIO -from test.asynchronous import AsyncIntegrationTest, AsyncUnitTest, async_client_context +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncUnitTest, + async_client_context, + qcheck, + unittest, +) from pymongo.asynchronous.database import AsyncDatabase sys.path[0:0] = [""] -from test import IntegrationTest, qcheck, unittest -from test.utils import EventListener, async_rs_or_single_client, rs_or_single_client +from test.utils import EventListener, async_rs_or_single_client from bson.objectid import ObjectId -from gridfs import GridFS from gridfs.asynchronous.grid_file import ( _SEEK_CUR, _SEEK_END, @@ -44,7 +48,7 @@ from gridfs.errors import NoFile from pymongo import AsyncMongoClient from pymongo.asynchronous.helpers import aiter, anext -from pymongo.errors import ConfigurationError, InvalidOperation, ServerSelectionTimeoutError +from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress _IS_SYNC = False @@ -407,8 +411,6 @@ async def test_multi_chunk_file(self): g = AsyncGridOut(self.db.fs, f._id) self.assertEqual(random_string, await g.read()) - # TODO: https://jira.mongodb.org/browse/PYTHON-4708 - @async_client_context.require_sync async def test_small_chunks(self): self.files = 0 self.chunks = 0 @@ -431,7 +433,7 @@ async def helper(data): self.assertEqual(data, await g.read(10) + await g.read(10)) return True - qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) + await qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) async def test_seek(self): f = AsyncGridIn(self.db.fs, chunkSize=3) diff --git a/test/qcheck.py b/test/qcheck.py index 8339bc3763..842580cbff 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -25,6 +25,8 @@ from bson.objectid import ObjectId from bson.son import SON +_IS_SYNC = True + gen_target = 100 reduction_attempts = 10 examples = 5 @@ -221,7 +223,10 @@ def reduce(case, predicate, reductions=0): def isnt(predicate): - return lambda x: not predicate(x) + def is_not(x): + return not predicate(x) + + return is_not def check(predicate, generator): diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 0e806eb5cb..bd89235b73 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -21,17 +21,21 @@ import sys import zipfile from io import BytesIO -from test import IntegrationTest, UnitTest, client_context +from test import ( + IntegrationTest, + UnitTest, + client_context, + qcheck, + unittest, +) from pymongo.synchronous.database import Database sys.path[0:0] = [""] -from test import IntegrationTest, qcheck, unittest from test.utils import EventListener, rs_or_single_client from bson.objectid import ObjectId -from gridfs import GridFS from gridfs.errors import NoFile from gridfs.synchronous.grid_file import ( _SEEK_CUR, @@ -43,7 +47,7 @@ GridOutCursor, ) from pymongo import MongoClient -from pymongo.errors import ConfigurationError, InvalidOperation, ServerSelectionTimeoutError +from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress from pymongo.synchronous.helpers import iter, next @@ -405,8 +409,6 @@ def test_multi_chunk_file(self): g = GridOut(self.db.fs, f._id) self.assertEqual(random_string, g.read()) - # TODO: https://jira.mongodb.org/browse/PYTHON-4708 - @client_context.require_sync def test_small_chunks(self): self.files = 0 self.chunks = 0 diff --git a/tools/synchro.py b/tools/synchro.py index f4019f0bbb..dfe3854e22 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -159,6 +159,7 @@ "conftest.py", "pymongo_mocks.py", "utils_spec_runner.py", + "qcheck.py", "test_bulk.py", "test_client.py", "test_client_bulk_write.py", From 2742a000c49bd8bf528c08c351f390c4c5d82cb6 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:05:24 -0700 Subject: [PATCH 1436/2111] PYTHON-4730 Fix Failing Async Bulk Tests (#1831) --- test/asynchronous/test_bulk.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 24111ad7c0..b5f2eefdef 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -26,8 +26,8 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, remove_all_users, unittest from test.utils import ( async_rs_or_single_client_noauth, + async_single_client, async_wait_until, - single_client, ) from bson.binary import Binary, UuidRepresentation @@ -817,7 +817,7 @@ async def asyncSetUp(self): roles=[], ) - async_client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) + await async_client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) async def asyncTearDown(self): await self.db.command("dropRole", "noremove") @@ -919,7 +919,7 @@ async def test_readonly(self): username="readonly", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test - coll.find_one() + await coll.find_one() with self.assertRaises(OperationFailure): await coll.bulk_write([InsertOne({"x": 1})]) @@ -930,7 +930,7 @@ async def test_no_remove(self): username="noremove", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test - coll.find_one() + await coll.find_one() requests = [ InsertOne({"x": 1}), ReplaceOne({"x": 2}, {"x": 2}, upsert=True), @@ -954,7 +954,7 @@ async def _setup_class(cls): if cls.w is not None and cls.w > 1: for member in (await async_client_context.hello)["hosts"]: if member != (await async_client_context.hello)["primary"]: - cls.secondary = single_client(*partition_node(member)) + cls.secondary = await async_single_client(*partition_node(member)) break @classmethod From 350413032257614dfb175999830eaf5846882373 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 Sep 2024 11:28:49 -0700 Subject: [PATCH 1437/2111] PYTHON-4663 Fix coverity warnings in datetime decoding change (#1835) --- bson/_cbsonmodule.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index cc498f448e..3e9d5ecc26 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -383,10 +383,11 @@ static int millis_from_datetime_ms(PyObject* dt, long long* out){ static PyObject* decode_datetime(PyObject* self, long long millis, const codec_options_t* options){ PyObject* naive = NULL; PyObject* replace = NULL; - PyObject* args = NULL; - PyObject* kwargs = NULL; PyObject* value = NULL; struct module_state *state = GETSTATE(self); + if (!state) { + goto invalid; + } if (options->datetime_conversion == DATETIME_MS){ return datetime_ms_from_millis(self, millis); } @@ -414,8 +415,8 @@ static PyObject* decode_datetime(PyObject* self, long long millis, const codec_o Py_DECREF(utcoffset); return 0; } - min_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * 86400 + - PyDateTime_DELTA_GET_SECONDS(utcoffset)) * 1000 + + min_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * (int64_t)86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * (int64_t)1000 + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); } Py_DECREF(utcoffset); @@ -433,8 +434,8 @@ static PyObject* decode_datetime(PyObject* self, long long millis, const codec_o Py_DECREF(utcoffset); return 0; } - max_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * 86400 + - PyDateTime_DELTA_GET_SECONDS(utcoffset)) * 1000 + + max_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * (int64_t)86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * (int64_t)1000 + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); } Py_DECREF(utcoffset); @@ -487,8 +488,6 @@ static PyObject* decode_datetime(PyObject* self, long long millis, const codec_o invalid: Py_XDECREF(naive); Py_XDECREF(replace); - Py_XDECREF(args); - Py_XDECREF(kwargs); return value; } From 25f724badbef2d266ed01e75fb8f21f895c2b3b1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:09:43 -0700 Subject: [PATCH 1438/2111] PYTHON-4727 Migrate test_monitoring.py to async (#1834) --- test/asynchronous/test_monitoring.py | 1280 ++++++++++++++++++++++++++ test/test_monitoring.py | 51 +- tools/synchro.py | 1 + 3 files changed, 1316 insertions(+), 16 deletions(-) create mode 100644 test/asynchronous/test_monitoring.py diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py new file mode 100644 index 0000000000..3f6563ee56 --- /dev/null +++ b/test/asynchronous/test_monitoring.py @@ -0,0 +1,1280 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import copy +import datetime +import sys +import time +from typing import Any + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + client_knobs, + sanitize_cmd, + unittest, +) +from test.utils import ( + EventListener, + async_rs_or_single_client, + async_single_client, + async_wait_until, +) + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.helpers import anext +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncTestCommandMonitoring(AsyncIntegrationTest): + listener: EventListener + + @classmethod + @async_client_context.require_connection + async def _setup_class(cls): + await super()._setup_class() + cls.listener = EventListener() + cls.client = await async_rs_or_single_client( + event_listeners=[cls.listener], retryWrites=False + ) + + @classmethod + async def _tearDown_class(cls): + await cls.client.close() + await super()._tearDown_class() + + async def asyncTearDown(self): + self.listener.reset() + await super().asyncTearDown() + + async def test_started_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + + async def test_succeeded_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertEqual("ping", succeeded.command_name) + self.assertEqual(await self.client.address, succeeded.connection_id) + self.assertEqual(1, succeeded.reply.get("ok")) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + + async def test_failed_simple(self): + try: + await self.client.pymongo_test.command("oops!") + except OperationFailure: + pass + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("oops!", failed.command_name) + self.assertEqual(await self.client.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertTrue(isinstance(failed.request_id, int)) + self.assertTrue(isinstance(failed.duration_micros, int)) + + async def test_find_one(self): + await self.client.pymongo_test.test.find_one() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + + async def test_find_and_get_more(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) + for _ in range(4): + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("find", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) + + self.listener.reset() + # Next batch. Exhausting the cursor could cause a getMore + # that returns id of 0 and no results. + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("getMore", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["nextBatch"], [{} for _ in range(4)]) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_find_with_explain(self): + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + res = await coll.find().explain() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(cmd, started.command) + self.assertEqual("explain", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("explain", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(await self.client.address, succeeded.connection_id) + self.assertEqual(res, succeeded.reply) + + async def _test_find_options(self, query, expected_cmd): + coll = self.client.pymongo_test.test + await coll.drop() + await coll.create_index("x") + await coll.insert_many([{"x": i} for i in range(5)]) + + # Test that we publish the unwrapped command. + self.listener.reset() + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + + cursor = coll.find(**query) + + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(expected_cmd, started.command) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("find", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(await self.client.address, succeeded.connection_id) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_find_options(self): + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } + + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } + + if async_client_context.version < (4, 1, 0, -1): + query["max_scan"] = 10 + cmd["maxScan"] = 10 + + await self._test_find_options(query, cmd) + + @async_client_context.require_version_max(3, 7, 2) + async def test_find_snapshot(self): + # Test "snapshot" parameter separately, can't combine with "sort". + query = {"filter": {}, "snapshot": True} + + cmd = {"find": "test", "filter": {}, "snapshot": True} + + await self._test_find_options(query, cmd) + + async def test_command_and_get_more(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = await coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) + for _ in range(4): + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("aggregate", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) + + self.listener.reset() + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("getMore", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } + self.assertEqualReply(expected_result, succeeded.reply) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_get_more_failure(self): + address = await self.client.address + coll = self.client.pymongo_test.test + cursor_id = Int64(12345) + cursor_doc = {"id": cursor_id, "firstBatch": [], "ns": coll.full_name} + cursor = AsyncCommandCursor(coll, cursor_doc, address) + try: + await anext(cursor) + except Exception: + pass + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertEqual("getMore", failed.command_name) + self.assertTrue(isinstance(failed.request_id, int)) + self.assertEqual(cursor.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_not_primary_error(self): + address = next(iter(await async_client_context.client.secondaries)) + client = await async_single_client(*address, event_listeners=[self.listener]) + # Clear authentication command results from the listener. + await client.admin.command("ping") + self.listener.reset() + error = None + try: + await client.pymongo_test.test.find_one_and_delete({}) + except NotPrimaryError as exc: + error = exc.errors + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertEqual("findAndModify", failed.command_name) + self.assertEqual(address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertTrue(isinstance(failed.request_id, int)) + self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertEqual(error, failed.failure) + + @async_client_context.require_no_mongos + async def test_exhaust(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find( + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("find", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } + self.assertEqualReply(expected_result, succeeded.reply) + + self.listener.reset() + tuple(await cursor.to_list()) + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: + self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) + self.assertEqual(cursor.address, event.connection_id) + self.assertEqual("pymongo_test", event.database_name) + self.assertTrue(isinstance(event.request_id, int)) + for event in self.listener.succeeded_events: + self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(event.duration_micros, int)) + self.assertEqual("getMore", event.command_name) + self.assertTrue(isinstance(event.request_id, int)) + self.assertEqual(cursor.address, event.connection_id) + # Last getMore receives a response with cursor id 0. + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) + + async def test_kill_cursors(self): + with client_knobs(kill_cursor_frequency=0.01): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + cursor = self.client.pymongo_test.test.find().batch_size(5) + await anext(cursor) + cursor_id = cursor.cursor_id + self.listener.reset() + await cursor.close() + await asyncio.sleep(2) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) + self.assertIs(type(started.connection_id), tuple) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertEqual("killCursors", succeeded.command_name) + self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIs(type(succeeded.connection_id), tuple) + self.assertEqual(cursor.address, succeeded.connection_id) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertTrue( + cursor_id in succeeded.reply["cursorsUnknown"] + or cursor_id in succeeded.reply["cursorsKilled"] + ) + + async def test_non_bulk_writes(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + # Implied write concern insert_one + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # Unacknowledged insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=0)) + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqualReply(succeeded.reply, {"ok": 1}) + + # Explicit write concern insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=1)) + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_many + self.listener.reset() + res = await coll.delete_many({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) + + # replace_one + self.listener.reset() + oid = ObjectId() + res = await coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) + + # update_one + self.listener.reset() + res = await coll.update_one({"x": 1}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # update_many + self.listener.reset() + res = await coll.update_many({"x": 2}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_one + self.listener.reset() + _ = await coll.delete_one({"x": 3}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + self.assertEqual(0, await coll.count_documents({})) + + # write errors + await coll.insert_one({"_id": 1}) + try: + self.listener.reset() + await coll.insert_one({"_id": 1}) + except OperationFailure: + pass + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") + self.assertIsInstance(errors, list) + error = errors[0] + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) + + async def test_insert_many(self): + # This always uses the bulk API. + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] + await coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + count = 0 + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + reply = succeed.reply + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) + self.assertEqual(documents, docs) + self.assertEqual(6, count) + + async def test_insert_many_unacknowledged(self): + coll = self.client.pymongo_test.test + await coll.drop() + unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.listener.reset() + + # Force two batches on legacy servers. + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] + await unack_coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + self.assertEqual(1, succeed.reply.get("ok")) + self.assertEqual(documents, docs) + + async def check(): + return await coll.count_documents({}) == 6 + + await async_wait_until(check, "insert documents with w=0") + + async def test_bulk_write(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + await coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + self.assertEqual(3, len(pairs)) + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) + self.assertEqualCommand(expected, started[0].command) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) + self.assertEqualCommand(expected, started[1].command) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) + self.assertEqualCommand(expected, started[2].command) + + @async_client_context.require_failCommand_fail_point + async def test_bulk_write_command_network_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_network_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + }, + } + async with self.fail_point(insert_network_error): + with self.assertRaises(AutoReconnect): + await coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) + + @async_client_context.require_failCommand_fail_point + async def test_bulk_write_command_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_command_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "await acloseAsyncConnection": False, + "errorCode": 10107, # Not primary + }, + } + async with self.fail_point(insert_command_error): + with self.assertRaises(NotPrimaryError): + await coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) + + async def test_write_errors(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + try: + await coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) + except OperationFailure: + pass + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + errors = [] + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) + + self.assertEqual(2, len(errors)) + fields = {"index", "code", "errmsg"} + for error in errors: + self.assertTrue(fields.issubset(set(error))) + + async def test_first_batch_helper(self): + # Regardless of server version and use of helpers._first_batch + # this test should still pass. + self.listener.reset() + tuple(await (await self.client.pymongo_test.test.list_indexes()).to_list()) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON([("listIndexes", "test"), ("cursor", {})]) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertTrue("cursor" in succeeded.reply) + self.assertTrue("ok" in succeeded.reply) + + self.listener.reset() + + @async_client_context.require_version_max(6, 1, 99) + async def test_sensitive_commands(self): + listeners = self.client._event_listeners + + self.listener.reset() + cmd = SON([("getnonce", 1)]) + listeners.publish_command_start(cmd, "pymongo_test", 12345, await self.client.address, None) # type: ignore[arg-type] + delta = datetime.timedelta(milliseconds=100) + listeners.publish_command_success( + delta, + {"nonce": "e474f4561c5eb40b", "ok": 1.0}, + "getnonce", + 12345, + await self.client.address, # type: ignore[arg-type] + None, + database_name="pymongo_test", + ) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqual({}, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual(succeeded.duration_micros, 100000) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqual({}, succeeded.reply) + + +class AsyncTestGlobalListener(AsyncIntegrationTest): + listener: EventListener + saved_listeners: Any + + @classmethod + @async_client_context.require_connection + async def _setup_class(cls): + await super()._setup_class() + cls.listener = EventListener() + # We plan to call register(), which internally modifies _LISTENERS. + cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) + monitoring.register(cls.listener) + cls.client = await async_single_client() + # Get one (authenticated) socket in the pool. + await cls.client.pymongo_test.command("ping") + + @classmethod + async def _tearDown_class(cls): + monitoring._LISTENERS = cls.saved_listeners + await cls.client.close() + await super()._tearDown_class() + + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener.reset() + + async def test_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertTrue(isinstance(started.request_id, int)) + + +class AsyncTestEventClasses(unittest.IsolatedAsyncioTestCase): + def test_command_event_repr(self): + request_id, connection_id, operation_id, db_name = 1, ("localhost", 27017), 2, "admin" + event = monitoring.CommandStartedEvent( + {"ping": 1}, db_name, request_id, connection_id, operation_id + ) + self.assertEqual( + repr(event), + "", + ) + delta = datetime.timedelta(milliseconds=100) + event = monitoring.CommandSucceededEvent( + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.CommandFailedEvent( + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_heartbeat_event_repr(self): + connection_id = ("localhost", 27017) + event = monitoring.ServerHeartbeatStartedEvent(connection_id) + self.assertEqual( + repr(event), "" + ) + delta = 0.1 + event = monitoring.ServerHeartbeatSucceededEvent( + delta, + {"ok": 1}, # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerHeartbeatFailedEvent( + delta, + "ERROR", # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_event_repr(self): + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") + event = monitoring.ServerOpeningEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + server_address, + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerClosedEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + + def test_topology_event_repr(self): + topology_id = ObjectId("000000000000000000000001") + event = monitoring.TopologyOpenedEvent(topology_id) + self.assertEqual(repr(event), "") + event = monitoring.TopologyDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.TopologyClosedEvent(topology_id) + self.assertEqual(repr(event), "") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index ed6a3d0bc2..8322e29918 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import annotations +import asyncio import copy import datetime import sys @@ -21,8 +22,19 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest -from test.utils import EventListener, rs_or_single_client, single_client, wait_until +from test import ( + IntegrationTest, + client_context, + client_knobs, + sanitize_cmd, + unittest, +) +from test.utils import ( + EventListener, + rs_or_single_client, + single_client, + wait_until, +) from bson.int64 import Int64 from bson.objectid import ObjectId @@ -31,23 +43,26 @@ from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestCommandMonitoring(IntegrationTest): listener: EventListener @classmethod @client_context.require_connection - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.listener = EventListener() cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.client.close() - super().tearDownClass() + super()._tearDown_class() def tearDown(self): self.listener.reset() @@ -171,7 +186,7 @@ def test_find_and_get_more(self): self.assertEqual(csr["nextBatch"], [{} for _ in range(4)]) finally: # Exhaust the cursor to avoid kill cursors. - tuple(cursor) + tuple(cursor.to_list()) def test_find_with_explain(self): cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) @@ -230,7 +245,7 @@ def _test_find_options(self, query, expected_cmd): self.assertEqual(self.client.address, succeeded.connection_id) finally: # Exhaust the cursor to avoid kill cursors. - tuple(cursor) + tuple(cursor.to_list()) def test_find_options(self): query = { @@ -356,7 +371,7 @@ def test_command_and_get_more(self): self.assertEqualReply(expected_result, succeeded.reply) finally: # Exhaust the cursor to avoid kill cursors. - tuple(cursor) + tuple(cursor.to_list()) def test_get_more_failure(self): address = self.client.address @@ -451,7 +466,7 @@ def test_exhaust(self): self.assertEqualReply(expected_result, succeeded.reply) self.listener.reset() - tuple(cursor) + tuple(cursor.to_list()) self.assertEqual(0, len(self.listener.failed_events)) for event in self.listener.started_events: self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) @@ -898,7 +913,11 @@ def test_insert_many_unacknowledged(self): self.assertEqual(succeed.operation_id, operation_id) self.assertEqual(1, succeed.reply.get("ok")) self.assertEqual(documents, docs) - wait_until(lambda: coll.count_documents({}) == 6, "insert documents with w=0") + + def check(): + return coll.count_documents({}) == 6 + + wait_until(check, "insert documents with w=0") def test_bulk_write(self): coll = self.client.pymongo_test.test @@ -1058,7 +1077,7 @@ def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch # this test should still pass. self.listener.reset() - tuple(self.client.pymongo_test.test.list_indexes()) + tuple((self.client.pymongo_test.test.list_indexes()).to_list()) started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) @@ -1119,8 +1138,8 @@ class TestGlobalListener(IntegrationTest): @classmethod @client_context.require_connection - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.listener = EventListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) @@ -1130,10 +1149,10 @@ def setUpClass(cls): cls.client.pymongo_test.command("ping") @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): monitoring._LISTENERS = cls.saved_listeners cls.client.close() - super().tearDownClass() + super()._tearDown_class() def setUp(self): super().setUp() diff --git a/tools/synchro.py b/tools/synchro.py index dfe3854e22..f38a83f128 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -172,6 +172,7 @@ "test_session.py", "test_transactions.py", "test_client_context.py", + "test_monitoring.py", ] sync_test_files = [ From 29bbf77dad0867ca19bf9769ca3fddc19cadec23 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 5 Sep 2024 14:18:48 -0700 Subject: [PATCH 1439/2111] PYTHON-4607 Use LogRecord.getMessage() not LogRecord.message (#1837) --- test/asynchronous/test_client.py | 2 +- test/asynchronous/test_logger.py | 14 +++++++------- test/test_client.py | 2 +- test/test_logger.py | 14 +++++++------- test/unified_format.py | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index d4f09cde33..97cbdf6dbd 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -617,7 +617,7 @@ def test_detected_environment_logging(self, mock_get_hosts): mock_get_hosts.return_value = [(host, 1)] AsyncMongoClient(host) AsyncMongoClient(multi_host) - logs = [record.message for record in cm.records if record.name == "pymongo.client"] + logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) @patch("pymongo.srv_resolver._SrvResolver.get_hosts") diff --git a/test/asynchronous/test_logger.py b/test/asynchronous/test_logger.py index 7a58846515..b219d530e7 100644 --- a/test/asynchronous/test_logger.py +++ b/test/asynchronous/test_logger.py @@ -37,15 +37,15 @@ async def test_default_truncation_limit(self): with self.assertLogs("pymongo.command", level="DEBUG") as cm: await db.test.insert_many(docs) - cmd_started_log = json_util.loads(cm.records[0].message) + cmd_started_log = json_util.loads(cm.records[0].getMessage()) self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) - cmd_succeeded_log = json_util.loads(cm.records[1].message) + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) with self.assertLogs("pymongo.command", level="DEBUG") as cm: await db.test.find({}).to_list() - cmd_succeeded_log = json_util.loads(cm.records[1].message) + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) async def test_configured_truncation_limit(self): @@ -55,14 +55,14 @@ async def test_configured_truncation_limit(self): with self.assertLogs("pymongo.command", level="DEBUG") as cm: await db.command(cmd) - cmd_started_log = json_util.loads(cm.records[0].message) + cmd_started_log = json_util.loads(cm.records[0].getMessage()) self.assertEqual(len(cmd_started_log["command"]), 5 + 3) - cmd_succeeded_log = json_util.loads(cm.records[1].message) + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) with self.assertRaises(OperationFailure): await db.command({"notARealCommand": True}) - cmd_failed_log = json_util.loads(cm.records[-1].message) + cmd_failed_log = json_util.loads(cm.records[-1].getMessage()) self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) async def test_truncation_multi_byte_codepoints(self): @@ -78,7 +78,7 @@ async def test_truncation_multi_byte_codepoints(self): with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): with self.assertLogs("pymongo.command", level="DEBUG") as cm: await self.db.test.insert_one({"x": multi_byte_char_str}) - cmd_started_log = json_util.loads(cm.records[0].message)["command"] + cmd_started_log = json_util.loads(cm.records[0].getMessage())["command"] cmd_started_log = cmd_started_log[:-3] last_3_bytes = cmd_started_log.encode()[-3:].decode() diff --git a/test/test_client.py b/test/test_client.py index 22e94dcddb..785139d6a8 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -611,7 +611,7 @@ def test_detected_environment_logging(self, mock_get_hosts): mock_get_hosts.return_value = [(host, 1)] MongoClient(host) MongoClient(multi_host) - logs = [record.message for record in cm.records if record.name == "pymongo.client"] + logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) @patch("pymongo.srv_resolver._SrvResolver.get_hosts") diff --git a/test/test_logger.py b/test/test_logger.py index d6c30b68a8..c0011ec3a5 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -36,15 +36,15 @@ def test_default_truncation_limit(self): with self.assertLogs("pymongo.command", level="DEBUG") as cm: db.test.insert_many(docs) - cmd_started_log = json_util.loads(cm.records[0].message) + cmd_started_log = json_util.loads(cm.records[0].getMessage()) self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) - cmd_succeeded_log = json_util.loads(cm.records[1].message) + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) with self.assertLogs("pymongo.command", level="DEBUG") as cm: db.test.find({}).to_list() - cmd_succeeded_log = json_util.loads(cm.records[1].message) + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) def test_configured_truncation_limit(self): @@ -54,14 +54,14 @@ def test_configured_truncation_limit(self): with self.assertLogs("pymongo.command", level="DEBUG") as cm: db.command(cmd) - cmd_started_log = json_util.loads(cm.records[0].message) + cmd_started_log = json_util.loads(cm.records[0].getMessage()) self.assertEqual(len(cmd_started_log["command"]), 5 + 3) - cmd_succeeded_log = json_util.loads(cm.records[1].message) + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) with self.assertRaises(OperationFailure): db.command({"notARealCommand": True}) - cmd_failed_log = json_util.loads(cm.records[-1].message) + cmd_failed_log = json_util.loads(cm.records[-1].getMessage()) self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) def test_truncation_multi_byte_codepoints(self): @@ -77,7 +77,7 @@ def test_truncation_multi_byte_codepoints(self): with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): with self.assertLogs("pymongo.command", level="DEBUG") as cm: self.db.test.insert_one({"x": multi_byte_char_str}) - cmd_started_log = json_util.loads(cm.records[0].message)["command"] + cmd_started_log = json_util.loads(cm.records[0].getMessage())["command"] cmd_started_log = cmd_started_log[:-3] last_3_bytes = cmd_started_log.encode()[-3:].decode() diff --git a/test/unified_format.py b/test/unified_format.py index e4ebf677e2..d35aed435a 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1925,7 +1925,7 @@ def format_logs(log_list): for log in log_list: if log.module == "ocsp_support": continue - data = json_util.loads(log.message) + data = json_util.loads(log.getMessage()) client = data.pop("clientId") if "clientId" in data else data.pop("topologyId") client_to_log[client].append( { From 044d92cc146aa009c0ab714184dee0a36628cbe8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 5 Sep 2024 19:34:01 -0500 Subject: [PATCH 1440/2111] PYTHON-4706 Allow running pytest directly without hatch (#1824) --- .evergreen/config.yml | 9 ++++++++- .evergreen/hatch.sh | 12 +++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c8e314f6c8..e718266efd 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -410,6 +410,7 @@ functions: SSL=${SSL} \ TEST_DATA_LAKE=${TEST_DATA_LAKE} \ MONGODB_API_VERSION=${MONGODB_API_VERSION} \ + SKIP_HATCH=${SKIP_HATCH} \ bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg "run enterprise auth tests": @@ -561,7 +562,9 @@ functions: working_dir: "src" script: | . .evergreen/scripts/env.sh - . .evergreen/hatch.sh encryption:teardown + if [ -f $DRIVERS_TOOLS/.evergreen/csfle/secrets-export.sh ]; then + . .evergreen/hatch.sh encryption:teardown + fi rm -rf ${DRIVERS_TOOLS} || true rm -f ./secrets-export.sh || true @@ -2083,10 +2086,14 @@ axes: display_name: "RHEL 8.3 (zSeries)" run_on: rhel83-zseries-small batchtime: 10080 # 7 days + variables: + SKIP_HATCH: true - id: rhel81-power8 display_name: "RHEL 8.1 (POWER8)" run_on: rhel81-power8-small batchtime: 10080 # 7 days + variables: + SKIP_HATCH: true - id: rhel82-arm64 display_name: "RHEL 8.2 (ARM64)" run_on: rhel82-arm64-small diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index 8438394101..db0da2f4d0 100644 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -8,7 +8,17 @@ if [ -z "$PYTHON_BINARY" ]; then PYTHON_BINARY=$(find_python3) fi -if $PYTHON_BINARY -m hatch --version; then +# Check if we should skip hatch and run the tests directly. +if [ -n "$SKIP_HATCH" ]; then + ENV_NAME=testenv-$RANDOM + createvirtualenv "$PYTHON_BINARY" $ENV_NAME + # shellcheck disable=SC2064 + trap "deactivate; rm -rf $ENV_NAME" EXIT HUP + python -m pip install -e ".[test]" + run_hatch() { + bash ./.evergreen/run-tests.sh + } +elif $PYTHON_BINARY -m hatch --version; then run_hatch() { $PYTHON_BINARY -m hatch run "$@" } From bf329add7cf59ba7f2faf616ec358cd630047f61 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Fri, 6 Sep 2024 08:57:32 -0700 Subject: [PATCH 1441/2111] PYTHON-4732 Migrate test_auth_spec.py to async (#1836) --- test/asynchronous/test_auth_spec.py | 108 ++++++++++++++++++++++++++++ test/test_auth_spec.py | 4 +- tools/synchro.py | 1 + 3 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_auth_spec.py diff --git a/test/asynchronous/test_auth_spec.py b/test/asynchronous/test_auth_spec.py new file mode 100644 index 0000000000..329b3eec62 --- /dev/null +++ b/test/asynchronous/test_auth_spec.py @@ -0,0 +1,108 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import glob +import json +import os +import sys +import warnings + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +from pymongo import AsyncMongoClient +from pymongo.asynchronous.auth_oidc import OIDCCallback + +_IS_SYNC = False + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") + + +class TestAuthSpec(unittest.IsolatedAsyncioTestCase): + pass + + +class SampleHumanCallback(OIDCCallback): + def fetch(self, context): + pass + + +def create_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") + + if not valid: + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.assertRaises(Exception, AsyncMongoClient, uri, connect=False) + else: + client = AsyncMongoClient(uri, connect=False) + credentials = client.options.pool_options._credentials + if credential is None: + self.assertIsNone(credentials) + else: + self.assertIsNotNone(credentials) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) + else: + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] + if expected is not None: + actual = credentials.mechanism_properties + for key, value in expected.items(): + self.assertEqual(getattr(actual, key.lower()), value) + else: + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) + else: + self.assertIsNone(credentials.mechanism_properties) + + return run_test + + +def create_tests(): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as auth_tests: + test_cases = json.load(auth_tests)["tests"] + for test_case in test_cases: + if test_case.get("optional", False): + continue + test_method = create_test(test_case) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) + + +create_tests() + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 9ec7e07f3b..38e5f19bf8 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -27,7 +27,9 @@ from test.unified_format import generate_test_classes from pymongo import MongoClient -from pymongo.asynchronous.auth_oidc import OIDCCallback +from pymongo.synchronous.auth_oidc import OIDCCallback + +_IS_SYNC = True _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") diff --git a/tools/synchro.py b/tools/synchro.py index f38a83f128..e49405ccb7 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -160,6 +160,7 @@ "pymongo_mocks.py", "utils_spec_runner.py", "qcheck.py", + "test_auth_spec.py", "test_bulk.py", "test_client.py", "test_client_bulk_write.py", From 4e102235added02a4c3cf5e94acc9842eb301df1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Sep 2024 10:16:38 -0700 Subject: [PATCH 1442/2111] PYTHON-4560 Disable rsSyncApplyStop tests on 8.0+ (#1840) --- test/asynchronous/test_bulk.py | 2 ++ test/test_bulk.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index b5f2eefdef..79d8e1a0f1 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -976,6 +976,7 @@ async def cause_wtimeout(self, requests, ordered): finally: await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + @async_client_context.require_version_max(7, 1) # PYTHON-4560 @async_client_context.require_replica_set @async_client_context.require_secondaries_count(1) async def test_write_concern_failure_ordered(self): @@ -1055,6 +1056,7 @@ async def test_write_concern_failure_ordered(self): failed = details["writeErrors"][0] self.assertTrue("duplicate" in failed["errmsg"]) + @async_client_context.require_version_max(7, 1) # PYTHON-4560 @async_client_context.require_replica_set @async_client_context.require_secondaries_count(1) async def test_write_concern_failure_unordered(self): diff --git a/test/test_bulk.py b/test/test_bulk.py index 9069109cfa..63b8c7790a 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -974,6 +974,7 @@ def cause_wtimeout(self, requests, ordered): finally: self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + @client_context.require_version_max(7, 1) # PYTHON-4560 @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_ordered(self): @@ -1053,6 +1054,7 @@ def test_write_concern_failure_ordered(self): failed = details["writeErrors"][0] self.assertTrue("duplicate" in failed["errmsg"]) + @client_context.require_version_max(7, 1) # PYTHON-4560 @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_unordered(self): From 22b66b2ed698c3f161dcaf6f00c1d999a4a9fb87 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 6 Sep 2024 12:17:47 -0500 Subject: [PATCH 1443/2111] PYTHON-4695 Fix test event loop policy and improve error traceback for ClientBulkWriteException (#1828) --- pymongo/_client_bulk_shared.py | 2 ++ pyproject.toml | 1 + test/asynchronous/conftest.py | 2 ++ test/conftest.py | 2 ++ 4 files changed, 7 insertions(+) diff --git a/pymongo/_client_bulk_shared.py b/pymongo/_client_bulk_shared.py index 4dd1af2108..649f1c6aa0 100644 --- a/pymongo/_client_bulk_shared.py +++ b/pymongo/_client_bulk_shared.py @@ -74,4 +74,6 @@ def _throw_client_bulk_write_exception( "to your connection string." ) raise OperationFailure(errmsg, code, full_result) + if isinstance(full_result["error"], BaseException): + raise ClientBulkWriteException(full_result, verbose_results) from full_result["error"] raise ClientBulkWriteException(full_result, verbose_results) diff --git a/pyproject.toml b/pyproject.toml index 8452bfe956..225be8e1d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,6 +74,7 @@ addopts = ["-ra", "--strict-config", "--strict-markers", "--junitxml=xunit-resul testpaths = ["test"] log_cli_level = "INFO" faulthandler_timeout = 1500 +asyncio_default_fixture_loop_scope = "session" xfail_strict = true filterwarnings = [ "error", diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py index e443dff6c0..c08f224abd 100644 --- a/test/asynchronous/conftest.py +++ b/test/asynchronous/conftest.py @@ -17,6 +17,8 @@ def event_loop_policy(): # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) # We explicitly use a different loop implementation here to prevent that issue if sys.platform == "win32": + # Needed for Python 3.8. + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] return asyncio.get_event_loop_policy() diff --git a/test/conftest.py b/test/conftest.py index a3d954c7c3..ca817a5a62 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -15,6 +15,8 @@ def event_loop_policy(): # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) # We explicitly use a different loop implementation here to prevent that issue if sys.platform == "win32": + # Needed for Python 3.8. + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] return asyncio.get_event_loop_policy() From 1eb3b8550e5ec41d57012cbb0acb086836187d9e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Sep 2024 10:20:29 -0700 Subject: [PATCH 1444/2111] PYTHON-4735 Resync SDAM tests to fix TestUnifiedLoggingLoadbalanced (#1839) --- test/discovery_and_monitoring/rs/compatible.json | 2 +- .../rs/compatible_unknown.json | 2 +- .../sharded/compatible.json | 2 +- .../single/compatible.json | 2 +- .../single/too_old_then_upgraded.json | 4 ++-- .../unified/logging-loadbalanced.json | 16 ++++++++++++++++ 6 files changed, 22 insertions(+), 6 deletions(-) diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json index 444b13e9d5..dfd5d57dfa 100644 --- a/test/discovery_and_monitoring/rs/compatible.json +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json index cf92dd1ed3..95e03ea958 100644 --- a/test/discovery_and_monitoring/rs/compatible_unknown.json +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json index e531db97f9..ceb0ec24c4 100644 --- a/test/discovery_and_monitoring/sharded/compatible.json +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json index 302927598c..493d9b748e 100644 --- a/test/discovery_and_monitoring/single/compatible.json +++ b/test/discovery_and_monitoring/single/compatible.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json index 58ae7d9de4..c3dd98cf62 100644 --- a/test/discovery_and_monitoring/single/too_old_then_upgraded.json +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -1,5 +1,5 @@ { - "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 6", + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 21", "uri": "mongodb://a", "phases": [ { @@ -35,7 +35,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/unified/logging-loadbalanced.json b/test/discovery_and_monitoring/unified/logging-loadbalanced.json index 45440d2557..0ad3b0ceaa 100644 --- a/test/discovery_and_monitoring/unified/logging-loadbalanced.json +++ b/test/discovery_and_monitoring/unified/logging-loadbalanced.json @@ -132,6 +132,22 @@ } } }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, { "level": "debug", "component": "topology", From 6bdaf19c78b6062dbf2e51513f24b941352f76c3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 6 Sep 2024 10:46:10 -0700 Subject: [PATCH 1445/2111] PYTHON-4617 Skip unified retryable writes tests on MMAPv1 (#1841) --- test/asynchronous/test_transactions.py | 58 +++++++++--------- test/crud_v2_format.py | 55 ----------------- test/test_retryable_reads.py | 81 -------------------------- test/test_retryable_writes.py | 44 -------------- test/test_transactions.py | 58 +++++++++--------- test/unified_format.py | 14 +++-- 6 files changed, 62 insertions(+), 248 deletions(-) delete mode 100644 test/crud_v2_format.py diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 8fa1e70d01..4034c8e2c4 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -15,7 +15,6 @@ """Execute Transactions Spec tests.""" from __future__ import annotations -import os import sys from io import BytesIO @@ -23,8 +22,7 @@ sys.path[0:0] = [""] -from test.asynchronous import async_client_context, unittest -from test.asynchronous.utils_spec_runner import AsyncSpecRunner +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.utils import ( OvertCommandListener, async_rs_client, @@ -54,8 +52,6 @@ _IS_SYNC = False -_TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") - # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, # 50 attempts yields a one in a quadrillion chance of a false positive @@ -63,31 +59,7 @@ UNPIN_TEST_MAX_ATTEMPTS = 50 -class AsyncTransactionsBase(AsyncSpecRunner): - @classmethod - async def _setup_class(cls): - await super()._setup_class() - if async_client_context.supports_transactions(): - for address in async_client_context.mongoses: - cls.mongos_clients.append(await async_single_client("{}:{}".format(*address))) - - @classmethod - async def _tearDown_class(cls): - for client in cls.mongos_clients: - await client.close() - await super()._tearDown_class() - - def maybe_skip_scenario(self, test): - super().maybe_skip_scenario(test) - if ( - "secondary" in self.id() - and not async_client_context.is_mongos - and not async_client_context.has_secondaries - ): - raise unittest.SkipTest("No secondaries") - - -class TestTransactions(AsyncTransactionsBase): +class TestTransactions(AsyncIntegrationTest): RUN_ON_SERVERLESS = True @async_client_context.require_transactions @@ -421,7 +393,31 @@ def __exit__(self, exc_type, exc_val, exc_tb): client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout -class TestTransactionsConvenientAPI(AsyncTransactionsBase): +class TestTransactionsConvenientAPI(AsyncIntegrationTest): + @classmethod + async def _setup_class(cls): + await super()._setup_class() + cls.mongos_clients = [] + if async_client_context.supports_transactions(): + for address in async_client_context.mongoses: + cls.mongos_clients.append(await async_single_client("{}:{}".format(*address))) + + @classmethod + async def _tearDown_class(cls): + for client in cls.mongos_clients: + await client.close() + await super()._tearDown_class() + + async def _set_fail_point(self, client, command_args): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + await client.admin.command(cmd) + + async def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + await self._set_fail_point(client, command_args) + @async_client_context.require_transactions async def test_callback_raises_custom_error(self): class _MyException(Exception): diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py deleted file mode 100644 index 8eadad8430..0000000000 --- a/test/crud_v2_format.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""v2 format CRUD test runner. - -https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.rst -""" -from __future__ import annotations - -from test.utils_spec_runner import SpecRunner - - -class TestCrudV2(SpecRunner): - # Default test database and collection names. - TEST_DB = None - TEST_COLLECTION = None - - def allowable_errors(self, op): - """Override expected error classes.""" - errors = super().allowable_errors(op) - errors += (ValueError,) - return errors - - def get_scenario_db_name(self, scenario_def): - """Crud spec says database_name is optional.""" - return scenario_def.get("database_name", self.TEST_DB) - - def get_scenario_coll_name(self, scenario_def): - """Crud spec says collection_name is optional.""" - return scenario_def.get("collection_name", self.TEST_COLLECTION) - - def get_object_name(self, op): - """Crud spec says object is optional and defaults to 'collection'.""" - return op.get("object", "collection") - - def get_outcome_coll_name(self, outcome, collection): - """Crud spec says outcome has an optional 'collection.name'.""" - return outcome["collection"].get("name", collection.name) - - def setup_scenario(self, scenario_def): - """Allow specs to override a test's setup.""" - # PYTHON-1935 Only create the collection if there is data to insert. - if scenario_def["data"]: - super().setup_scenario(scenario_def) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 9ea546ba9b..b0fa42a0c9 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -20,7 +20,6 @@ import sys import threading -from bson import SON from pymongo.errors import AutoReconnect sys.path[0:0] = [""] @@ -34,14 +33,10 @@ ) from test.utils import ( CMAPListener, - EventListener, OvertCommandListener, - SpecTestCreator, - rs_client, rs_or_single_client, set_fail_point, ) -from test.utils_spec_runner import SpecRunner from pymongo.monitoring import ( ConnectionCheckedOutEvent, @@ -50,7 +45,6 @@ PoolClearedEvent, ) from pymongo.synchronous.mongo_client import MongoClient -from pymongo.write_concern import WriteConcern # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") @@ -74,81 +68,6 @@ def test_uri(self): self.assertEqual(client.options.retry_reads, False) -class TestSpec(SpecRunner): - RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True - - @classmethod - @client_context.require_failCommand_fail_point - # TODO: remove this once PYTHON-1948 is done. - @client_context.require_no_mmap - def setUpClass(cls): - super().setUpClass() - - def maybe_skip_scenario(self, test): - super().maybe_skip_scenario(test) - skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] - for name in skip_names: - if name.lower() in test["description"].lower(): - self.skipTest(f"PyMongo does not support {name}") - - # Serverless does not support $out and collation. - if client_context.serverless: - for operation in test["operations"]: - if operation["name"] == "aggregate": - for stage in operation["arguments"]["pipeline"]: - if "$out" in stage: - self.skipTest("MongoDB Serverless does not support $out") - if "collation" in operation["arguments"]: - self.skipTest("MongoDB Serverless does not support collations") - - # Skip changeStream related tests on MMAPv1 and serverless. - test_name = self.id().rsplit(".")[-1] - if "changestream" in test_name.lower(): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support change streams.") - if client_context.serverless: - self.skipTest("Serverless does not support change streams.") - - def get_scenario_coll_name(self, scenario_def): - """Override a test's collection name to support GridFS tests.""" - if "bucket_name" in scenario_def: - return scenario_def["bucket_name"] - return super().get_scenario_coll_name(scenario_def) - - def setup_scenario(self, scenario_def): - """Override a test's setup to support GridFS tests.""" - if "bucket_name" in scenario_def: - data = scenario_def["data"] - db_name = self.get_scenario_db_name(scenario_def) - db = client_context.client[db_name] - # Create a bucket for the retryable reads GridFS tests with as few - # majority writes as possible. - wc = WriteConcern(w="majority") - if data: - db["fs.chunks"].drop() - db["fs.files"].drop() - db["fs.chunks"].insert_many(data["fs.chunks"]) - db.get_collection("fs.files", write_concern=wc).insert_many(data["fs.files"]) - else: - db.get_collection("fs.chunks").drop() - db.get_collection("fs.files", write_concern=wc).drop() - else: - super().setup_scenario(scenario_def) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = SpecTestCreator(create_test, TestSpec, _TEST_PATH) -test_creator.create_tests() - - class FindThread(threading.Thread): def __init__(self, collection): super().__init__() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 45a740e844..2938b7efaf 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -16,7 +16,6 @@ from __future__ import annotations import copy -import os import pprint import sys import threading @@ -29,11 +28,9 @@ DeprecationFilter, EventListener, OvertCommandListener, - SpecTestCreator, rs_or_single_client, set_fail_point, ) -from test.utils_spec_runner import SpecRunner from test.version import Version from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -65,9 +62,6 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") - class InsertEventListener(EventListener): def succeeded(self, event: CommandSucceededEvent) -> None: @@ -89,44 +83,6 @@ def succeeded(self, event: CommandSucceededEvent) -> None: ) -class TestAllScenarios(SpecRunner): - RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True - - def get_object_name(self, op): - return op.get("object", "collection") - - def get_scenario_db_name(self, scenario_def): - return scenario_def.get("database_name", "pymongo_test") - - def get_scenario_coll_name(self, scenario_def): - return scenario_def.get("collection_name", "test") - - def run_test_ops(self, sessions, collection, test): - # Transform retryable writes spec format into transactions. - operation = test["operation"] - outcome = test["outcome"] - if "error" in outcome: - operation["error"] = outcome["error"] - if "result" in outcome: - operation["result"] = outcome["result"] - test["operations"] = [operation] - super().run_test_ops(sessions, collection, test) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - @client_context.require_no_mmap - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = SpecTestCreator(create_test, TestAllScenarios, _TEST_PATH) -test_creator.create_tests() - - def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), diff --git a/test/test_transactions.py b/test/test_transactions.py index b1869bec79..c8c3c32d5b 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -15,7 +15,6 @@ """Execute Transactions Spec tests.""" from __future__ import annotations -import os import sys from io import BytesIO @@ -23,14 +22,13 @@ sys.path[0:0] = [""] -from test import client_context, unittest +from test import IntegrationTest, client_context, unittest from test.utils import ( OvertCommandListener, rs_client, single_client, wait_until, ) -from test.utils_spec_runner import SpecRunner from typing import List from bson import encode @@ -54,8 +52,6 @@ _IS_SYNC = True -_TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") - # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, # 50 attempts yields a one in a quadrillion chance of a false positive @@ -63,31 +59,7 @@ UNPIN_TEST_MAX_ATTEMPTS = 50 -class TransactionsBase(SpecRunner): - @classmethod - def _setup_class(cls): - super()._setup_class() - if client_context.supports_transactions(): - for address in client_context.mongoses: - cls.mongos_clients.append(single_client("{}:{}".format(*address))) - - @classmethod - def _tearDown_class(cls): - for client in cls.mongos_clients: - client.close() - super()._tearDown_class() - - def maybe_skip_scenario(self, test): - super().maybe_skip_scenario(test) - if ( - "secondary" in self.id() - and not client_context.is_mongos - and not client_context.has_secondaries - ): - raise unittest.SkipTest("No secondaries") - - -class TestTransactions(TransactionsBase): +class TestTransactions(IntegrationTest): RUN_ON_SERVERLESS = True @client_context.require_transactions @@ -417,7 +389,31 @@ def __exit__(self, exc_type, exc_val, exc_tb): client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout -class TestTransactionsConvenientAPI(TransactionsBase): +class TestTransactionsConvenientAPI(IntegrationTest): + @classmethod + def _setup_class(cls): + super()._setup_class() + cls.mongos_clients = [] + if client_context.supports_transactions(): + for address in client_context.mongoses: + cls.mongos_clients.append(single_client("{}:{}".format(*address))) + + @classmethod + def _tearDown_class(cls): + for client in cls.mongos_clients: + client.close() + super()._tearDown_class() + + def _set_fail_point(self, client, command_args): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + client.admin.command(cmd) + + def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + self._set_fail_point(client, command_args) + @client_context.require_transactions def test_callback_raises_custom_error(self): class _MyException(Exception): diff --git a/test/unified_format.py b/test/unified_format.py index d35aed435a..168d35ee1f 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1100,6 +1100,13 @@ def setUpClass(cls): if not cls.should_run_on(run_on_spec): raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") + # add any special-casing for skipping tests here + if client_context.storage_engine == "mmapv1": + if "retryable-writes" in cls.TEST_SPEC["description"] or "retryable_writes" in str( + cls.TEST_PATH + ): + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + # Handle mongos_clients for transactions tests. cls.mongos_clients = [] if ( @@ -1110,11 +1117,6 @@ def setUpClass(cls): for address in client_context.mongoses: cls.mongos_clients.append(single_client("{}:{}".format(*address))) - # add any special-casing for skipping tests here - if client_context.storage_engine == "mmapv1": - if "retryable-writes" in cls.TEST_SPEC["description"]: - raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") - # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs( heartbeat_frequency=0.1, @@ -2157,7 +2159,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore raise ValueError( f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" ) - module_dict = {"__module__": module} + module_dict = {"__module__": module, "TEST_PATH": test_path} module_dict.update(kwargs) test_klasses[class_name] = type( class_name, From f2cd655d0481aa93e89af4c99fd8a03a6979636f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 6 Sep 2024 16:04:39 -0400 Subject: [PATCH 1446/2111] PYTHON-4746 - Bump minimum pytest and pytest-asyncio versions (#1845) --- requirements/test.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/test.txt b/requirements/test.txt index 1facbf03b9..135114feff 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,2 +1,2 @@ -pytest>=7 -pytest-asyncio +pytest>=8.2 +pytest-asyncio>=0.24.0 From c883012b562128091bb4b8d184032be763d495f0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 6 Sep 2024 15:38:58 -0500 Subject: [PATCH 1447/2111] PYTHON-4703 MongoClient should default to connect=False on FaaS environments (#1844) --- doc/changelog.rst | 7 +++++++ pymongo/asynchronous/mongo_client.py | 9 ++++++++- pymongo/synchronous/mongo_client.py | 12 ++++++++++-- test/lambda/mongodb/app.py | 4 ++++ tools/synchro.py | 3 ++- 5 files changed, 31 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c5a4f47d79..6fffcdf696 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -42,6 +42,12 @@ PyMongo 4.9 brings a number of improvements including: - Fixed a bug where PyMongo would raise ``InvalidBSON: date value out of range`` when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a non-UTC timezone. +- The default value for ``connect`` in ``MongoClient`` is changed to ``False`` when running on + unction-as-a-service (FaaS) like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. + On some FaaS systems, there is a ``fork()`` operation at function + startup. By delaying the connection to the first operation, we avoid a deadlock. See + `Is PyMongo Fork-Safe`_ for more information. + Issues Resolved ............... @@ -49,6 +55,7 @@ Issues Resolved See the `PyMongo 4.9 release notes in JIRA`_ for the list of resolved issues in this release. +.. _Is PyMongo Fork-Safe : https://www.mongodb.com/docs/languages/python/pymongo-driver/current/faq/#is-pymongo-fork-safe- .. _PyMongo 4.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39940 diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 2af773c440..b5e73e8de8 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -720,6 +720,10 @@ def __init__( .. versionchanged:: 4.7 Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. """ doc_class = document_class or dict self._init_kwargs: dict[str, Any] = { @@ -803,7 +807,10 @@ def __init__( if tz_aware is None: tz_aware = opts.get("tz_aware", False) if connect is None: - connect = opts.get("connect", True) + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) keyword_opts["tz_aware"] = tz_aware keyword_opts["connect"] = connect diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 6c5f68b7eb..26af488acd 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -263,7 +263,8 @@ def __init__( aware (otherwise they will be naive) :param connect: If ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect - on the first operation. + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment. :param type_registry: instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. @@ -719,6 +720,10 @@ def __init__( .. versionchanged:: 4.7 Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. """ doc_class = document_class or dict self._init_kwargs: dict[str, Any] = { @@ -802,7 +807,10 @@ def __init__( if tz_aware is None: tz_aware = opts.get("tz_aware", False) if connect is None: - connect = opts.get("connect", True) + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) keyword_opts["tz_aware"] = tz_aware keyword_opts["connect"] = connect diff --git a/test/lambda/mongodb/app.py b/test/lambda/mongodb/app.py index 5840347d9a..274990d3bc 100644 --- a/test/lambda/mongodb/app.py +++ b/test/lambda/mongodb/app.py @@ -8,6 +8,7 @@ import json import os +import warnings from bson import has_c as has_bson_c from pymongo import MongoClient @@ -18,6 +19,9 @@ ServerHeartbeatListener, ) +# Ensure there are no warnings raised in normal operation. +warnings.simplefilter("error") + open_connections = 0 heartbeat_count = 0 streaming_heartbeat_count = 0 diff --git a/tools/synchro.py b/tools/synchro.py index e49405ccb7..e79cfce402 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -108,7 +108,8 @@ docstring_replacements: dict[tuple[str, str], str] = { ("MongoClient", "connect"): """If ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect - on the first operation.""", + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment.""", ("Collection", "create"): """If ``True``, force collection creation even without options being set.""", ("Collection", "session"): """A From f6a418f590de8e3e0a8d12dbd092bc5cd09e85fa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 9 Sep 2024 09:44:03 -0500 Subject: [PATCH 1448/2111] PYTHON-4182 Unskip test_unpin_after_TransientTransactionError_error_on_abort on latest (#1847) --- test/unified_format.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/unified_format.py b/test/unified_format.py index 168d35ee1f..63cd23af88 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1170,9 +1170,6 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") - if "unpin after non-transient error on abort" in spec["description"]: - if client_context.version[0] == 8: - self.skipTest("Skipping TransientTransactionError pending PYTHON-4182") class_name = self.__class__.__name__.lower() description = spec["description"].lower() From e683b81bf45e20f22b2d4a1d6ca61baad142b488 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 9 Sep 2024 11:05:04 -0400 Subject: [PATCH 1449/2111] PYTHON-4739 - Use AsyncBulkTestBase in Async TestEncryptedBulkWrite (#1846) --- test/asynchronous/test_encryption.py | 4 ++-- test/test_encryption.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index eb431e1d50..030f468db2 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -29,6 +29,7 @@ import uuid import warnings from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context +from test.asynchronous.test_bulk import AsyncBulkTestBase from threading import Thread from typing import Any, Dict, Mapping @@ -52,7 +53,6 @@ KMIP_CREDS, LOCAL_MASTER_KEY, ) -from test.test_bulk import BulkTestBase from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, @@ -372,7 +372,7 @@ async def target(): await target() -class TestEncryptedBulkWrite(BulkTestBase, AsyncEncryptionIntegrationTest): +class TestEncryptedBulkWrite(AsyncBulkTestBase, AsyncEncryptionIntegrationTest): async def test_upsert_uuid_standard_encrypt(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = await async_rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/test_encryption.py b/test/test_encryption.py index 568ebffc9e..5e02e4d628 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -29,6 +29,7 @@ import uuid import warnings from test import IntegrationTest, PyMongoTestCase, client_context +from test.test_bulk import BulkTestBase from threading import Thread from typing import Any, Dict, Mapping @@ -52,7 +53,6 @@ KMIP_CREDS, LOCAL_MASTER_KEY, ) -from test.test_bulk import BulkTestBase from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, From 2cca2d9e3d9322b64ab8475c821996f525bbdc2c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 9 Sep 2024 12:04:23 -0400 Subject: [PATCH 1450/2111] PYTHON-3193 - Add ResourceWarning for unclosed MongoClients in __del__ (#1833) --- doc/changelog.rst | 10 ++++++++++ pymongo/asynchronous/mongo_client.py | 19 +++++++++++++++++++ pymongo/asynchronous/settings.py | 2 +- pymongo/synchronous/mongo_client.py | 19 +++++++++++++++++++ pymongo/synchronous/settings.py | 2 +- pyproject.toml | 3 +++ 6 files changed, 53 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 6fffcdf696..c008066c22 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -42,6 +42,16 @@ PyMongo 4.9 brings a number of improvements including: - Fixed a bug where PyMongo would raise ``InvalidBSON: date value out of range`` when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a non-UTC timezone. +- Added a warning to unclosed MongoClient instances + telling users to explicitly close clients when finished with them to avoid leaking resources. + For example: + + .. code-block:: + + sys:1: ResourceWarning: Unclosed MongoClient opened at: + File "/Users//my_file.py", line 8, in `` + client = MongoClient() + Call MongoClient.close() to safely shut down your client and free up resources. - The default value for ``connect`` in ``MongoClient`` is changed to ``False`` when running on unction-as-a-service (FaaS) like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. On some FaaS systems, there is a ``fork()`` operation at function diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index b5e73e8de8..a84fbf2e59 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -34,6 +34,7 @@ import contextlib import os +import warnings import weakref from collections import defaultdict from typing import ( @@ -871,6 +872,7 @@ def __init__( ) self._opened = False + self._closed = False self._init_background() if _IS_SYNC and connect: @@ -1180,6 +1182,22 @@ def __getitem__(self, name: str) -> database.AsyncDatabase[_DocumentType]: """ return database.AsyncDatabase(self, name) + def __del__(self) -> None: + """Check that this AsyncMongoClient has been closed and issue a warning if not.""" + try: + if not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + source=self, + ) + except AttributeError: + pass + def _close_cursor_soon( self, cursor_id: int, @@ -1547,6 +1565,7 @@ async def close(self) -> None: if self._encrypter: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. await self._encrypter.close() + self._closed = True if not _IS_SYNC: # Add support for contextlib.aclosing. diff --git a/pymongo/asynchronous/settings.py b/pymongo/asynchronous/settings.py index c41c638e6c..1103e1bd18 100644 --- a/pymongo/asynchronous/settings.py +++ b/pymongo/asynchronous/settings.py @@ -82,7 +82,7 @@ def __init__( self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. - self._stack = "".join(traceback.format_stack()) + self._stack = "".join(traceback.format_stack()[:-2]) @property def seeds(self) -> Collection[tuple[str, int]]: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 26af488acd..cec78463b3 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -34,6 +34,7 @@ import contextlib import os +import warnings import weakref from collections import defaultdict from typing import ( @@ -871,6 +872,7 @@ def __init__( ) self._opened = False + self._closed = False self._init_background() if _IS_SYNC and connect: @@ -1180,6 +1182,22 @@ def __getitem__(self, name: str) -> database.Database[_DocumentType]: """ return database.Database(self, name) + def __del__(self) -> None: + """Check that this MongoClient has been closed and issue a warning if not.""" + try: + if not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + source=self, + ) + except AttributeError: + pass + def _close_cursor_soon( self, cursor_id: int, @@ -1543,6 +1561,7 @@ def close(self) -> None: if self._encrypter: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. self._encrypter.close() + self._closed = True if not _IS_SYNC: # Add support for contextlib.closing. diff --git a/pymongo/synchronous/settings.py b/pymongo/synchronous/settings.py index 8719e86083..040776713f 100644 --- a/pymongo/synchronous/settings.py +++ b/pymongo/synchronous/settings.py @@ -82,7 +82,7 @@ def __init__( self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. - self._stack = "".join(traceback.format_stack()) + self._stack = "".join(traceback.format_stack()[:-2]) @property def seeds(self) -> Collection[tuple[str, int]]: diff --git a/pyproject.toml b/pyproject.toml index 225be8e1d8..19db00f19a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -95,6 +95,9 @@ filterwarnings = [ "module:please use dns.resolver.Resolver.resolve:DeprecationWarning", # https://github.com/dateutil/dateutil/issues/1314 "module:datetime.datetime.utc:DeprecationWarning:dateutil", + # TODO: Remove both of these in https://jira.mongodb.org/browse/PYTHON-4731 + "ignore:Unclosed AsyncMongoClient*", + "ignore:Unclosed MongoClient*", ] markers = [ "auth_aws: tests that rely on pymongo-auth-aws", From ead3201a4ebc7e78b46b6fa0e4af25c37b4c7b20 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 9 Sep 2024 11:35:34 -0700 Subject: [PATCH 1451/2111] PYTHON-4733 Migrate test_auth.py to async (#1838) --- test/asynchronous/test_auth.py | 689 +++++++++++++++++++++++++++++++++ test/test_auth.py | 25 +- tools/synchro.py | 1 + 3 files changed, 708 insertions(+), 7 deletions(-) create mode 100644 test/asynchronous/test_auth.py diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py new file mode 100644 index 0000000000..e516ff6798 --- /dev/null +++ b/test/asynchronous/test_auth.py @@ -0,0 +1,689 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication Tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import threading +from urllib.parse import quote_plus + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, SkipTest, async_client_context, unittest +from test.utils import ( + AllowListEventListener, + async_rs_or_single_client, + async_rs_or_single_client_noauth, + async_single_client, + async_single_client_noauth, + delay, + ignore_deprecations, +) + +from pymongo import AsyncMongoClient, monitoring +from pymongo.asynchronous.auth import HAVE_KERBEROS +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.errors import OperationFailure +from pymongo.hello import HelloCompat +from pymongo.read_preferences import ReadPreference +from pymongo.saslprep import HAVE_STRINGPREP + +_IS_SYNC = False + +# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") + + +class AutoAuthenticateThread(threading.Thread): + """Used in testing threaded authentication. + + This does await collection.find_one() with a 1-second delay to ensure it must + check out and authenticate multiple connections from the pool concurrently. + + :Parameters: + `collection`: An auth-protected collection containing one document. + """ + + def __init__(self, collection): + super().__init__() + self.collection = collection + self.success = False + + def run(self): + assert self.collection.find_one({"$where": delay(1)}) is not None + self.success = True + + +class TestGSSAPI(unittest.IsolatedAsyncioTestCase): + mech_properties: str + service_realm_required: bool + + @classmethod + def setUpClass(cls): + if not HAVE_KERBEROS: + raise SkipTest("Kerberos module not available.") + if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") + cls.service_realm_required = ( + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = f"SERVICE_NAME:{GSSAPI_SERVICE_NAME}" + mech_properties += f",CANONICALIZE_HOST_NAME:{GSSAPI_CANONICALIZE}" + if GSSAPI_SERVICE_REALM is not None: + mech_properties += f",SERVICE_REALM:{GSSAPI_SERVICE_REALM}" + cls.mech_properties = mech_properties + + async def test_credentials_hashing(self): + # GSSAPI credentials are properly hashed. + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) + + creds1 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds2 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds3 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) + + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) + + @ignore_deprecations + async def test_gssapi_simple(self): + assert GSSAPI_PRINCIPAL is not None + if GSSAPI_PASS is not None: + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) + else: + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) + + if not self.service_realm_required: + # Without authMechanismProperties. + client = AsyncMongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) + + await client[GSSAPI_DB].collection.find_one() + + # Log in using URI, without authMechanismProperties. + client = AsyncMongoClient(uri) + await client[GSSAPI_DB].collection.find_one() + + # Authenticate with authMechanismProperties. + client = AsyncMongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + await client[GSSAPI_DB].collection.find_one() + + # Log in using URI, with authMechanismProperties. + mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" + client = AsyncMongoClient(mech_uri) + await client[GSSAPI_DB].collection.find_one() + + set_name = await client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + if set_name: + if not self.service_realm_required: + # Without authMechanismProperties + client = AsyncMongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) + + await client[GSSAPI_DB].list_collection_names() + + uri = uri + f"&replicaSet={set_name!s}" + client = AsyncMongoClient(uri) + await client[GSSAPI_DB].list_collection_names() + + # With authMechanismProperties + client = AsyncMongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + await client[GSSAPI_DB].list_collection_names() + + mech_uri = mech_uri + f"&replicaSet={set_name!s}" + client = AsyncMongoClient(mech_uri) + await client[GSSAPI_DB].list_collection_names() + + @ignore_deprecations + @async_client_context.require_sync + async def test_gssapi_threaded(self): + client = AsyncMongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + # Authentication succeeded? + await client.server_info() + db = client[GSSAPI_DB] + + # Need one document in the collection. AutoAuthenticateThread does + # collection.find_one with a 1-second delay, forcing it to check out + # multiple connections from the pool concurrently, proving that + # auto-authentication works with GSSAPI. + collection = db.test + if not await collection.count_documents({}): + try: + await collection.drop() + await collection.insert_one({"_id": 1}) + except OperationFailure: + raise SkipTest("User must be able to write.") + + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + set_name = await client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + if set_name: + client = AsyncMongoClient( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + # Succeeded? + await client.server_info() + + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + +class TestSASLPlain(unittest.IsolatedAsyncioTestCase): + @classmethod + def setUpClass(cls): + if not SASL_HOST or not SASL_USER or not SASL_PASS: + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") + + async def test_sasl_plain(self): + client = AsyncMongoClient( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) + await client.ldap.test.find_one() + + assert SASL_USER is not None + assert SASL_PASS is not None + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + client = AsyncMongoClient(uri) + await client.ldap.test.find_one() + + set_name = await client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + if set_name: + client = AsyncMongoClient( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) + await client.ldap.test.find_one() + + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) + client = AsyncMongoClient(uri) + await client.ldap.test.find_one() + + async def test_sasl_plain_bad_credentials(self): + def auth_string(user, password): + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + return uri + + bad_user = AsyncMongoClient(auth_string("not-user", SASL_PASS)) + bad_pwd = AsyncMongoClient(auth_string(SASL_USER, "not-pwd")) + # OperationFailure raised upon connecting. + with self.assertRaises(OperationFailure): + await bad_user.admin.command("ping") + with self.assertRaises(OperationFailure): + await bad_pwd.admin.command("ping") + + +class TestSCRAMSHA1(AsyncIntegrationTest): + @async_client_context.require_auth + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user( + "pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"] + ) + + async def asyncTearDown(self): + await async_client_context.drop_user("pymongo_test", "user") + await super().asyncTearDown() + + @async_client_context.require_no_fips + async def test_scram_sha1(self): + host, port = await async_client_context.host, await async_client_context.port + + client = await async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + await client.pymongo_test.command("dbstats") + + if async_client_context.is_rs: + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, async_client_context.replica_set_name) + ) + client = await async_single_client_noauth(uri) + await client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + await db.command("dbstats") + + +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation +class TestSCRAM(AsyncIntegrationTest): + @async_client_context.require_auth + @async_client_context.require_version_min(3, 7, 2) + async def asyncSetUp(self): + await super().asyncSetUp() + self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS + monitoring._SENSITIVE_COMMANDS = set() + self.listener = AllowListEventListener("saslStart") + + async def asyncTearDown(self): + monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS + await async_client_context.client.testscram.command("dropAllUsersFromDatabase") + await async_client_context.client.drop_database("testscram") + await super().asyncTearDown() + + async def test_scram_skip_empty_exchange(self): + listener = AllowListEventListener("saslStart", "saslContinue") + await async_client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = await async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + await client.testscram.command("dbstats") + + if async_client_context.version < (4, 4, -1): + # Assert we sent the skipEmptyExchange option. + first_event = listener.started_events[0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) + + # Assert the third exchange was skipped on servers that support it. + # Note that the first exchange occurs on the connection handshake. + started = listener.started_command_names() + if async_client_context.version.at_least(4, 4, -1): + self.assertEqual(started, ["saslContinue"]) + else: + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + + @async_client_context.require_no_fips + async def test_scram(self): + # Step 1: create users + await async_client_context.create_user( + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) + await async_client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + await async_client_context.create_user( + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) + + # Step 2: verify auth success cases + client = await async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 + client = await async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + await client.testscram.command("dbstats") + client = await async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + self.listener.reset() + client = await async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + await client.testscram.command("dbstats") + if async_client_context.version.at_least(4, 4, -1): + # Speculative authentication in 4.4+ sends saslStart with the + # handshake. + self.assertEqual(self.listener.started_events, []) + else: + started = self.listener.started_events[0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") + + # Step 3: verify auth failure conditions + client = await async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="not-a-user", password="pwd", authSource="testscram" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + if async_client_context.is_rs: + host, port = await async_client_context.host, await async_client_context.port + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await async_single_client_noauth(uri) + await client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + await db.command("dbstats") + + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") + async def test_scram_saslprep(self): + # Step 4: test SASLprep + host, port = await async_client_context.host, await async_client_context.port + # Test the use of SASLprep on passwords. For example, + # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') + # becomes 'IX'. SASLprep is only supported when the standard + # library provides stringprep. + await async_client_context.create_user( + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + await async_client_context.create_user( + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = await async_rs_or_single_client_noauth( + username="\u2168", password="\u2163", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="\u2168", password="IV", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="IX", password="I\u00ADX", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + client = await async_rs_or_single_client_noauth( + "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + + client = await async_rs_or_single_client_noauth( + "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + client = await async_rs_or_single_client_noauth( + "mongodb://IX:IX@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + + async def test_cache(self): + client = await async_single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) + # Force authentication. + await client.admin.command("ping") + cache = credentials.cache + self.assertIsNotNone(cache) + data = cache.data + self.assertIsNotNone(data) + self.assertEqual(len(data), 4) + ckey, skey, salt, iterations = data + self.assertIsInstance(ckey, bytes) + self.assertIsInstance(skey, bytes) + self.assertIsInstance(salt, bytes) + self.assertIsInstance(iterations, int) + + @async_client_context.require_sync + async def test_scram_threaded(self): + coll = async_client_context.client.db.test + await coll.drop() + await coll.insert_one({"_id": 1}) + + # The first thread to call find() will authenticate + client = await async_rs_or_single_client() + self.addAsyncCleanup(client.close) + coll = client.db.test + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(coll)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + +class TestAuthURIOptions(AsyncIntegrationTest): + @async_client_context.require_auth + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user("admin", "admin", "pass") + await async_client_context.create_user( + "pymongo_test", "user", "pass", ["userAdmin", "readWrite"] + ) + + async def asyncTearDown(self): + await async_client_context.drop_user("pymongo_test", "user") + await async_client_context.drop_user("admin", "admin") + await super().asyncTearDown() + + async def test_uri_options(self): + # Test default to admin + host, port = await async_client_context.host, await async_client_context.port + client = await async_rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + self.assertTrue(await client.admin.command("dbstats")) + + if async_client_context.is_rs: + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await async_single_client_noauth(uri) + self.assertTrue(await client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + # Test explicit database + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) + client = await async_rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.admin.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + + if async_client_context.is_rs: + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await async_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.admin.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + # Test authSource + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) + client = await async_rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.pymongo_test2.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + + if async_client_context.is_rs: + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, async_client_context.replica_set_name) + ) + client = await async_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.pymongo_test2.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth.py b/test/test_auth.py index 2ae0eae129..0bf0cfd80f 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -15,6 +15,7 @@ """Authentication Tests.""" from __future__ import annotations +import asyncio import os import sys import threading @@ -34,12 +35,14 @@ ) from pymongo import MongoClient, monitoring -from pymongo.asynchronous.auth import HAVE_KERBEROS from pymongo.auth_shared import _build_credentials_tuple from pymongo.errors import OperationFailure from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP +from pymongo.synchronous.auth import HAVE_KERBEROS + +_IS_SYNC = True # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. GSSAPI_HOST = os.environ.get("GSSAPI_HOST") @@ -203,6 +206,7 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() @ignore_deprecations + @client_context.require_sync def test_gssapi_threaded(self): client = MongoClient( GSSAPI_HOST, @@ -330,8 +334,10 @@ def auth_string(user, password): bad_user = MongoClient(auth_string("not-user", SASL_PASS)) bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, "ping") - self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") + with self.assertRaises(OperationFailure): + bad_user.admin.command("ping") + with self.assertRaises(OperationFailure): + bad_pwd.admin.command("ping") class TestSCRAMSHA1(IntegrationTest): @@ -578,6 +584,7 @@ def test_cache(self): self.assertIsInstance(salt, bytes) self.assertIsInstance(iterations, int) + @client_context.require_sync def test_scram_threaded(self): coll = client_context.client.db.test coll.drop() @@ -629,7 +636,8 @@ def test_uri_options(self): # Test explicit database uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, "dbstats") + with self.assertRaises(OperationFailure): + client.admin.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: @@ -639,7 +647,8 @@ def test_uri_options(self): client_context.replica_set_name, ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, "dbstats") + with self.assertRaises(OperationFailure): + client.admin.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) self.assertTrue(db.command("dbstats")) @@ -647,7 +656,8 @@ def test_uri_options(self): # Test authSource uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + with self.assertRaises(OperationFailure): + client.pymongo_test2.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: @@ -656,7 +666,8 @@ def test_uri_options(self): "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) ) client = single_client_noauth(uri) - self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + with self.assertRaises(OperationFailure): + client.pymongo_test2.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) self.assertTrue(db.command("dbstats")) diff --git a/tools/synchro.py b/tools/synchro.py index e79cfce402..cde75b539c 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -161,6 +161,7 @@ "pymongo_mocks.py", "utils_spec_runner.py", "qcheck.py", + "test_auth.py", "test_auth_spec.py", "test_bulk.py", "test_client.py", From 0119062abe7819e8ab369ffd628c5a05d60f6275 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 07:33:46 -0500 Subject: [PATCH 1452/2111] PYTHON-4749 Add Script to Cherry-Pick PRs (#1848) --- .evergreen/config.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index e718266efd..3dff3278d9 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2039,6 +2039,19 @@ tasks: export HEAD_SHA=${github.amrom.workers.devmit} bash .evergreen/run-import-time-test.sh + - name: "backport-pr" + allowed_requesters: ["commit"] + commands: + - command: subprocess.exec + type: test + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh + - mongodb + - mongo-python-driver + - ${github.amrom.workers.devmit} + axes: # Choice of distro - id: platform @@ -3014,6 +3027,13 @@ buildvariants: tasks: - name: "check-import-time" +- name: backport-pr + display_name: "Backport PR" + run_on: + - rhel8.7-small + tasks: + - name: "backport-pr" + - name: "perf-tests" display_name: "Performance Benchmark Tests" batchtime: 10080 # 7 days From 42f2d310571c8b64846d02eb33344e4fc027544a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 09:53:07 -0500 Subject: [PATCH 1453/2111] PYTHON-4750 Add support for Python 3.13 (#1849) --- .evergreen/config.yml | 18 +++++++++++++++--- doc/changelog.rst | 1 + pyproject.toml | 1 + 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3dff3278d9..a9dc669718 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2253,6 +2253,10 @@ axes: display_name: "Python 3.12" variables: PYTHON_BINARY: "/opt/python/3.12/bin/python3" + - id: "3.13" + display_name: "Python 3.13" + variables: + PYTHON_BINARY: "/opt/python/3.13/bin/python3" - id: "pypy3.9" display_name: "PyPy 3.9" variables: @@ -2285,6 +2289,10 @@ axes: display_name: "Python 3.12" variables: PYTHON_BINARY: "C:/python/Python312/python.exe" + - id: "3.13" + display_name: "Python 3.13" + variables: + PYTHON_BINARY: "C:/python/Python313/python.exe" - id: python-version-windows-32 display_name: "Python" @@ -2309,6 +2317,10 @@ axes: display_name: "32-bit Python 3.12" variables: PYTHON_BINARY: "C:/python/32/Python312/python.exe" + - id: "3.13" + display_name: "32-bit Python 3.13" + variables: + PYTHON_BINARY: "C:/python/32/Python313/python.exe" # Choice of mod_wsgi version - id: mod-wsgi-version @@ -2572,7 +2584,7 @@ buildvariants: # Only test "noauth" with Python 3.8. exclude_spec: platform: rhel8 - python-version: ["3.9", "3.10", "pypy3.9", "pypy3.10"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "pypy3.9", "pypy3.10"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2678,7 +2690,7 @@ buildvariants: exclude_spec: # Don't test green frameworks on these Python versions. - platform: rhel8 - python-version: ["pypy3.9", "pypy3.10"] + python-version: ["pypy3.9", "pypy3.10", "3.13"] green-framework: "*" auth-ssl: "*" display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" @@ -2801,7 +2813,7 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-22.04 - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] mod-wsgi-version: "*" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: diff --git a/doc/changelog.rst b/doc/changelog.rst index c008066c22..ba3cba8322 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -10,6 +10,7 @@ Changes in Version 4.9.0 PyMongo 4.9 brings a number of improvements including: - Added support for MongoDB 8.0. +- Added support for Python 3.13. - A new asynchronous API with full asyncio support. - Added support for In-Use Encryption range queries with MongoDB 8.0. Added :attr:`~pymongo.encryption.Algorithm.RANGE`. diff --git a/pyproject.toml b/pyproject.toml index 19db00f19a..b64c7d6031 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Database", "Typing :: Typed", ] From 4436b1c6763c91da6d46fe7e1664ecdd60560402 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:01 -0500 Subject: [PATCH 1454/2111] PYTHON-4747 Rename pymongo/client_session.py to pymongo/synchronous/client_session.py --- pymongo/{ => synchronous}/client_session.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/client_session.py (100%) diff --git a/pymongo/client_session.py b/pymongo/synchronous/client_session.py similarity index 100% rename from pymongo/client_session.py rename to pymongo/synchronous/client_session.py From 57305e017913277770b7e8f42e37878af32a7d1b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:01 -0500 Subject: [PATCH 1455/2111] PYTHON-4747 Sync client_session.py to master --- pymongo/client_session.py | 22 ++++++++++ pymongo/synchronous/client_session.py | 58 +++++++++++++++------------ 2 files changed, 55 insertions(+), 25 deletions(-) create mode 100644 pymongo/client_session.py diff --git a/pymongo/client_session.py b/pymongo/client_session.py new file mode 100644 index 0000000000..1a3af44e12 --- /dev/null +++ b/pymongo/client_session.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous ClientSession API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.client_session import * # noqa: F403 +from pymongo.synchronous.client_session import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["ClientSession", "SessionOptions", "TransactionOptions"] # noqa: F405 diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index 3efc624c04..f1d680fc0a 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -156,7 +156,6 @@ from bson.int64 import Int64 from bson.timestamp import Timestamp from pymongo import _csot -from pymongo.cursor import _ConnectionManager from pymongo.errors import ( ConfigurationError, ConnectionFailure, @@ -165,20 +164,23 @@ PyMongoError, WTimeoutError, ) -from pymongo.helpers import _RETRYABLE_ERROR_CODES +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES from pymongo.operations import _Op from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.cursor import _ConnectionManager from pymongo.write_concern import WriteConcern if TYPE_CHECKING: from types import TracebackType - from pymongo.pool import Connection - from pymongo.server import Server + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server from pymongo.typings import ClusterTime, _Address +_IS_SYNC = True + class SessionOptions: """Options for a new :class:`ClientSession`. @@ -476,7 +478,7 @@ def _within_time_limit(start_time: float) -> bool: _T = TypeVar("_T") if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient + from pymongo.synchronous.mongo_client import MongoClient class ClientSession: @@ -526,9 +528,15 @@ def _end_session(self, lock: bool) -> None: # is in the committed state when the session is discarded. self._unpin() finally: - self._client._return_server_session(self._server_session, lock) + self._client._return_server_session(self._server_session) self._server_session = None + def _end_implicit_session(self) -> None: + # Implicit sessions can't be part of transactions or pinned connections + if self._server_session is not None: + self._client._return_server_session(self._server_session) + self._server_session = None + def _check_ended(self) -> None: if self._server_session is None: raise InvalidOperation("Cannot use ended session") @@ -1097,7 +1105,7 @@ def inc_transaction_id(self) -> None: class _ServerSessionPool(collections.deque): """Pool of _ServerSession objects. - This class is not thread-safe, access it while holding the Topology lock. + This class is thread-safe. """ def __init__(self, *args: Any, **kwargs: Any): @@ -1110,8 +1118,11 @@ def reset(self) -> None: def pop_all(self) -> list[_ServerSession]: ids = [] - while self: - ids.append(self.pop().session_id) + while True: + try: + ids.append(self.pop().session_id) + except IndexError: + break return ids def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: @@ -1123,23 +1134,17 @@ def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerS self._clear_stale(session_timeout_minutes) # The most recently used sessions are on the left. - while self: - s = self.popleft() + while True: + try: + s = self.popleft() + except IndexError: + break if not s.timed_out(session_timeout_minutes): return s return _ServerSession(self.generation) - def return_server_session( - self, server_session: _ServerSession, session_timeout_minutes: Optional[int] - ) -> None: - if session_timeout_minutes is not None: - self._clear_stale(session_timeout_minutes) - if server_session.timed_out(session_timeout_minutes): - return - self.return_server_session_no_lock(server_session) - - def return_server_session_no_lock(self, server_session: _ServerSession) -> None: + def return_server_session(self, server_session: _ServerSession) -> None: # Discard sessions from an old pool to avoid duplicate sessions in the # child process after a fork. if server_session.generation == self.generation and not server_session.dirty: @@ -1147,9 +1152,12 @@ def return_server_session_no_lock(self, server_session: _ServerSession) -> None: def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: # Clear stale sessions. The least recently used are on the right. - while self: - if self[-1].timed_out(session_timeout_minutes): - self.pop() - else: + while True: + try: + s = self.pop() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + self.append(s) # The remaining sessions also haven't timed out. break From 4ef252bfe3b0ed3cc24ceb41a264c68f03392948 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:02 -0500 Subject: [PATCH 1456/2111] PYTHON-4747 Rename pymongo/encryption.py to pymongo/synchronous/encryption.py --- pymongo/{ => synchronous}/encryption.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/encryption.py (100%) diff --git a/pymongo/encryption.py b/pymongo/synchronous/encryption.py similarity index 100% rename from pymongo/encryption.py rename to pymongo/synchronous/encryption.py From 5e4a5e7454f893b97fb4d19d2b6df338ab830939 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:02 -0500 Subject: [PATCH 1457/2111] PYTHON-4747 Sync encryption.py to master --- pymongo/encryption.py | 22 ++++++ pymongo/synchronous/encryption.py | 117 +++++++++++++++++++++--------- 2 files changed, 105 insertions(+), 34 deletions(-) create mode 100644 pymongo/encryption.py diff --git a/pymongo/encryption.py b/pymongo/encryption.py new file mode 100644 index 0000000000..5bc2a75909 --- /dev/null +++ b/pymongo/encryption.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Encryption API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.encryption import * # noqa: F403 +from pymongo.synchronous.encryption import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["Algorithm", "ClientEncryption", "QueryType", "RewrapManyDataKeyResult"] # noqa: F405 diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index c7f02766c9..3849cf3f2b 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -25,6 +25,7 @@ TYPE_CHECKING, Any, Dict, + Generator, Generic, Iterator, Mapping, @@ -36,11 +37,15 @@ ) try: - from pymongocrypt.auto_encrypter import AutoEncrypter # type:ignore[import] from pymongocrypt.errors import MongoCryptError # type:ignore[import] - from pymongocrypt.explicit_encrypter import ExplicitEncrypter # type:ignore[import] from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] - from pymongocrypt.state_machine import MongoCryptCallback # type:ignore[import] + from pymongocrypt.synchronous.auto_encrypter import AutoEncrypter # type:ignore[import] + from pymongocrypt.synchronous.explicit_encrypter import ( # type:ignore[import] + ExplicitEncrypter, + ) + from pymongocrypt.synchronous.state_machine import ( # type:ignore[import] + MongoCryptCallback, + ) _HAVE_PYMONGOCRYPT = True except ImportError: @@ -53,11 +58,8 @@ from bson.errors import BSONError from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson from pymongo import _csot -from pymongo.collection import Collection from pymongo.common import CONNECT_TIMEOUT -from pymongo.cursor import Cursor from pymongo.daemon import _spawn_daemon -from pymongo.database import Database from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts from pymongo.errors import ( ConfigurationError, @@ -67,13 +69,17 @@ PyMongoError, ServerSelectionTimeoutError, ) -from pymongo.mongo_client import MongoClient -from pymongo.network import BLOCKING_IO_ERRORS +from pymongo.network_layer import BLOCKING_IO_ERRORS, sendall from pymongo.operations import UpdateOne -from pymongo.pool import PoolOptions, _configured_socket, _raise_connection_failure +from pymongo.pool_options import PoolOptions from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.pool import _configured_socket, _raise_connection_failure from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -81,6 +87,9 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext + +_IS_SYNC = True + _HTTPS_PORT = 443 _KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT _MONGOCRYPTD_TIMEOUT_MS = 10000 @@ -167,7 +176,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: try: conn = _configured_socket((host, port), opts) try: - conn.sendall(message) + sendall(conn, message) while kms_context.bytes_needed > 0: # CSOT: update timeout. conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) @@ -185,9 +194,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: # Wrap I/O errors in PyMongo exceptions. _raise_connection_failure((host, port), error) - def collection_info( - self, database: Database[Mapping[str, Any]], filter: bytes - ) -> Optional[bytes]: + def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: """Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads @@ -241,7 +248,7 @@ def mark_command(self, database: str, cmd: bytes) -> bytes: ) return res.raw - def fetch_keys(self, filter: bytes) -> Iterator[bytes]: + def fetch_keys(self, filter: bytes) -> Generator[bytes, None]: """Yields one or more keys from the key vault. :param filter: The filter to pass to find. @@ -371,13 +378,16 @@ def _get_internal_client( ) io_callbacks = _EncryptionIO( # type:ignore[misc] - metadata_client, key_vault_coll, mongocryptd_client, opts + metadata_client, + key_vault_coll, # type:ignore[arg-type] + mongocryptd_client, + opts, ) self._auto_encrypter = AutoEncrypter( io_callbacks, - MongoCryptOptions( - opts._kms_providers, - schema_map, + _create_mongocrypt_options( + kms_providers=opts._kms_providers, + schema_map=schema_map, crypt_shared_lib_path=opts._crypt_shared_lib_path, crypt_shared_lib_required=opts._crypt_shared_lib_required, bypass_encryption=opts._bypass_auto_encryption, @@ -446,11 +456,15 @@ class Algorithm(str, enum.Enum): .. versionadded:: 4.2 """ + RANGE = "Range" + """Range. + + .. versionadded:: 4.9 + """ RANGEPREVIEW = "RangePreview" - """RangePreview. + """**DEPRECATED** - RangePreview. - .. note:: Support for Range queries is in beta. - Backwards-breaking changes may be made before the final release. + .. note:: Support for RangePreview is deprecated. Use :attr:`Algorithm.RANGE` instead. .. versionadded:: 4.4 """ @@ -465,12 +479,27 @@ class QueryType(str, enum.Enum): EQUALITY = "equality" """Used to encrypt a value for an equality query.""" - RANGEPREVIEW = "rangePreview" + RANGE = "range" """Used to encrypt a value for a range query. - .. note:: Support for Range queries is in beta. - Backwards-breaking changes may be made before the final release. -""" + .. versionadded:: 4.9 + """ + + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - Used to encrypt a value for a rangePreview query. + + .. note:: Support for RangePreview is deprecated. Use :attr:`QueryType.RANGE` instead. + + .. versionadded:: 4.4 + """ + + +def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: + opts = MongoCryptOptions(**kwargs) + # Opt into range V2 encryption. + if hasattr(opts, "enable_range_v2"): + opts.enable_range_v2 = True + return opts class ClientEncryption(Generic[_DocumentType]): @@ -559,12 +588,15 @@ def __init__( raise ConfigurationError( "client-side field level encryption requires the pymongocrypt " "library: install a compatible version with: " - "python -m pip install 'pymongo[encryption]'" + "python -m pip install --upgrade 'pymongo[encryption]'" ) if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + if not isinstance(key_vault_client, MongoClient): + raise TypeError(f"MongoClient required but given {type(key_vault_client)}") + self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace self._key_vault_client = key_vault_client @@ -580,7 +612,8 @@ def __init__( None, key_vault_coll, None, opts ) self._encryption = ExplicitEncrypter( - self._io_callbacks, MongoCryptOptions(kms_providers, None) + self._io_callbacks, + _create_mongocrypt_options(kms_providers=kms_providers, schema_map=None), ) # Use the same key vault collection as the callback. assert self._io_callbacks.key_vault_coll is not None @@ -649,6 +682,11 @@ def create_encrypted_collection( https://mongodb.com/docs/manual/reference/command/create """ + if not isinstance(database, Database): + raise TypeError( + f"create_encrypted_collection() requires a Database but {type(database)} given" + ) + encrypted_fields = deepcopy(encrypted_fields) for i, field in enumerate(encrypted_fields["fields"]): if isinstance(field, dict) and field.get("keyId") is None: @@ -724,6 +762,9 @@ def create_data_key( Secret Data managed object. - `endpoint` (string): Optional. Host with optional port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. :param key_alt_names: An optional list of string alternate names used to reference a key. If a key is created with alternate @@ -826,10 +867,14 @@ def encrypt( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - :param range_opts: Experimental only, not intended for public use. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + .. versionchanged:: 4.7 ``key_id`` can now be passed in as a :class:`uuid.UUID`. @@ -878,10 +923,14 @@ def encrypt_expression( when the algorithm is :attr:`Algorithm.INDEXED`. An integer value *must* be given when the :attr:`Algorithm.INDEXED` algorithm is used. - :param range_opts: Experimental only, not intended for public use. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + .. versionchanged:: 4.7 ``key_id`` can now be passed in as a :class:`uuid.UUID`. @@ -963,10 +1012,10 @@ def delete_key(self, id: Binary) -> DeleteResult: def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. - :param `id`: The UUID of a key a which must be a + :param id: The UUID of a key a which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - :param `key_alt_name`: The key alternate name to add. + :param key_alt_name: The key alternate name to add. :return: The previous version of the key document. @@ -995,10 +1044,10 @@ def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSON Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. - :param `id`: The UUID of a key a which must be a + :param id: The UUID of a key a which must be a :class:`~bson.binary.Binary` with subtype 4 ( :attr:`~bson.binary.UUID_SUBTYPE`). - :param `key_alt_name`: The key alternate name to remove. + :param key_alt_name: The key alternate name to remove. :return: Returns the previous version of the key document. @@ -1037,7 +1086,7 @@ def rewrap_many_data_key( :param filter: A document used to filter the data keys. :param provider: The new KMS provider to use to encrypt the data keys, or ``None`` to use the current KMS provider(s). - :param `master_key`: The master key fields corresponding to the new KMS + :param master_key: The master key fields corresponding to the new KMS provider when ``provider`` is not ``None``. :return: A :class:`RewrapManyDataKeyResult`. From 6fc461183e42abe580945df9a78f23fbccadfa04 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:02 -0500 Subject: [PATCH 1458/2111] PYTHON-4747 Rename pymongo/auth_oidc.py to pymongo/synchronous/auth_oidc.py --- pymongo/{ => synchronous}/auth_oidc.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/auth_oidc.py (100%) diff --git a/pymongo/auth_oidc.py b/pymongo/synchronous/auth_oidc.py similarity index 100% rename from pymongo/auth_oidc.py rename to pymongo/synchronous/auth_oidc.py From bdf5ac6a5d7681105ae71e1e47a81df6b5647ba3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:02 -0500 Subject: [PATCH 1459/2111] PYTHON-4747 Sync auth_oidc.py to master --- pymongo/auth_oidc.py | 23 +++++++ pymongo/synchronous/auth_oidc.py | 103 +++++-------------------------- 2 files changed, 38 insertions(+), 88 deletions(-) create mode 100644 pymongo/auth_oidc.py diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py new file mode 100644 index 0000000000..4ac266de5f --- /dev/null +++ b/pymongo/auth_oidc.py @@ -0,0 +1,23 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous AuthOIDC API for compatibility.""" +from __future__ import annotations + +from pymongo.auth_oidc_shared import * # noqa: F403 +from pymongo.synchronous.auth_oidc import * # noqa: F403 +from pymongo.synchronous.auth_oidc import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["OIDCCallback", "OIDCCallbackContext", "OIDCCallbackResult", "OIDCIdPInfo"] # noqa: F405 diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py index bfe2340f0a..6381a408ab 100644 --- a/pymongo/synchronous/auth_oidc.py +++ b/pymongo/synchronous/auth_oidc.py @@ -15,75 +15,33 @@ """MONGODB-OIDC Authentication helpers.""" from __future__ import annotations -import abc -import os import threading import time from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union -from urllib.parse import quote import bson from bson.binary import Binary -from pymongo._azure_helpers import _get_azure_response from pymongo._csot import remaining -from pymongo._gcp_helpers import _get_gcp_response +from pymongo.auth_oidc_shared import ( + CALLBACK_VERSION, + HUMAN_CALLBACK_TIMEOUT_SECONDS, + MACHINE_CALLBACK_TIMEOUT_SECONDS, + TIME_BETWEEN_CALLS_SECONDS, + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + _OIDCProperties, +) from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.helpers import _AUTHENTICATION_FAILURE_CODE +from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE if TYPE_CHECKING: - from pymongo.auth import MongoCredential - from pymongo.pool import Connection + from pymongo.auth_shared import MongoCredential + from pymongo.synchronous.pool import Connection - -@dataclass -class OIDCIdPInfo: - issuer: str - clientId: Optional[str] = field(default=None) - requestScopes: Optional[list[str]] = field(default=None) - - -@dataclass -class OIDCCallbackContext: - timeout_seconds: float - username: str - version: int - refresh_token: Optional[str] = field(default=None) - idp_info: Optional[OIDCIdPInfo] = field(default=None) - - -@dataclass -class OIDCCallbackResult: - access_token: str - expires_in_seconds: Optional[float] = field(default=None) - refresh_token: Optional[str] = field(default=None) - - -class OIDCCallback(abc.ABC): - """A base class for defining OIDC callbacks.""" - - @abc.abstractmethod - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - """Convert the given BSON value into our own type.""" - - -@dataclass -class _OIDCProperties: - callback: Optional[OIDCCallback] = field(default=None) - human_callback: Optional[OIDCCallback] = field(default=None) - environment: Optional[str] = field(default=None) - allowed_hosts: list[str] = field(default_factory=list) - token_resource: Optional[str] = field(default=None) - username: str = "" - - -"""Mechanism properties for MONGODB-OIDC authentication.""" - -TOKEN_BUFFER_MINUTES = 5 -HUMAN_CALLBACK_TIMEOUT_SECONDS = 5 * 60 -CALLBACK_VERSION = 1 -MACHINE_CALLBACK_TIMEOUT_SECONDS = 60 -TIME_BETWEEN_CALLS_SECONDS = 0.1 +_IS_SYNC = True def _get_authenticator( @@ -115,37 +73,6 @@ def _get_authenticator( return credentials.cache.data -class _OIDCTestCallback(OIDCCallback): - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - token_file = os.environ.get("OIDC_TOKEN_FILE") - if not token_file: - raise RuntimeError( - 'MONGODB-OIDC with an "test" provider requires "OIDC_TOKEN_FILE" to be set' - ) - with open(token_file) as fid: - return OIDCCallbackResult(access_token=fid.read().strip()) - - -class _OIDCAzureCallback(OIDCCallback): - def __init__(self, token_resource: str) -> None: - self.token_resource = quote(token_resource) - - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - resp = _get_azure_response(self.token_resource, context.username, context.timeout_seconds) - return OIDCCallbackResult( - access_token=resp["access_token"], expires_in_seconds=resp["expires_in"] - ) - - -class _OIDCGCPCallback(OIDCCallback): - def __init__(self, token_resource: str) -> None: - self.token_resource = quote(token_resource) - - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - resp = _get_gcp_response(self.token_resource, context.timeout_seconds) - return OIDCCallbackResult(access_token=resp["access_token"]) - - @dataclass class _OIDCAuthenticator: username: str From 6bdf583f3d1df3a8803fa0e550b6447945aa49bb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:03 -0500 Subject: [PATCH 1460/2111] PYTHON-4747 Rename pymongo/collection.py to pymongo/synchronous/collection.py --- pymongo/{ => synchronous}/collection.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/collection.py (100%) diff --git a/pymongo/collection.py b/pymongo/synchronous/collection.py similarity index 100% rename from pymongo/collection.py rename to pymongo/synchronous/collection.py From 4e1035ece889b1526ecb4d305e11c5f6d96a15c7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:03 -0500 Subject: [PATCH 1461/2111] PYTHON-4747 Sync collection.py to master --- pymongo/collection.py | 25 + pymongo/synchronous/collection.py | 915 ++++++++++++++++-------------- 2 files changed, 518 insertions(+), 422 deletions(-) create mode 100644 pymongo/collection.py diff --git a/pymongo/collection.py b/pymongo/collection.py new file mode 100644 index 0000000000..f726ed0376 --- /dev/null +++ b/pymongo/collection.py @@ -0,0 +1,25 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Collection API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.collection import * # noqa: F403 +from pymongo.synchronous.collection import __doc__ as original_doc + +__doc__ = original_doc +__all__ = [ # noqa: F405 + "Collection", + "ReturnDocument", +] diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index ddfe9f1df8..93e24432e5 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -15,6 +15,7 @@ """Collection level utilities for Mongo.""" from __future__ import annotations +import warnings from collections import abc from typing import ( TYPE_CHECKING, @@ -40,24 +41,16 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from bson.timestamp import Timestamp -from pymongo import ASCENDING, _csot, common, helpers, message -from pymongo.aggregation import ( - _CollectionAggregationCommand, - _CollectionRawAggregationCommand, -) -from pymongo.bulk import _Bulk -from pymongo.change_stream import CollectionChangeStream +from pymongo import ASCENDING, _csot, common, helpers_shared, message from pymongo.collation import validate_collation_or_none -from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor from pymongo.common import _ecoc_coll_name, _esc_coll_name -from pymongo.cursor import Cursor, RawBatchCursor from pymongo.errors import ( ConfigurationError, InvalidName, InvalidOperation, OperationFailure, ) -from pymongo.helpers import _check_write_command_response +from pymongo.helpers_shared import _check_write_command_response from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS from pymongo.operations import ( DeleteMany, @@ -72,7 +65,7 @@ _IndexList, _Op, ) -from pymongo.read_concern import DEFAULT_READ_CONCERN, ReadConcern +from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.results import ( BulkWriteResult, @@ -81,9 +74,25 @@ InsertOneResult, UpdateResult, ) +from pymongo.synchronous.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.synchronous.bulk import _Bulk +from pymongo.synchronous.change_stream import CollectionChangeStream +from pymongo.synchronous.command_cursor import ( + CommandCursor, + RawBatchCommandCursor, +) +from pymongo.synchronous.cursor import ( + Cursor, + RawBatchCursor, +) from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean +_IS_SYNC = True + T = TypeVar("T") _FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} @@ -114,12 +123,14 @@ class ReturnDocument: if TYPE_CHECKING: - from pymongo.aggregation import _AggregationCommand - from pymongo.client_session import ClientSession + import bson from pymongo.collation import Collation - from pymongo.database import Database - from pymongo.pool import Connection - from pymongo.server import Server + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.aggregation import _AggregationCommand + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server class Collection(common.BaseObject, Generic[_DocumentType]): @@ -155,8 +166,8 @@ def __init__( :param database: the database to get a collection from :param name: the name of the collection to get - :param create: if ``True``, force collection - creation even without options being set + :param create: If ``True``, force collection + creation even without options being set. :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. If ``None`` (the default) database.codec_options is used. @@ -171,11 +182,11 @@ def __init__( :param collation: An instance of :class:`~pymongo.collation.Collation`. If a collation is provided, it will be passed to the create collection command. - :param session: a + :param session: A :class:`~pymongo.client_session.ClientSession` that is used with - the create collection command - :param kwargs: additional keyword arguments will - be passed as options for the create collection command + the create collection command. + :param kwargs: Additional keyword arguments will + be passed as options for the create collection command. .. versionchanged:: 4.2 Added the ``clusteredIndex`` and ``encryptedFields`` parameters. @@ -220,6 +231,10 @@ def __init__( ) if not isinstance(name, str): raise TypeError("name must be an instance of str") + from pymongo.synchronous.database import Database + + if not isinstance(database, Database): + raise TypeError(f"Collection requires a Database but {type(database)} given") if not name or ".." in name: raise InvalidName("collection names cannot be empty") @@ -229,33 +244,315 @@ def __init__( raise InvalidName("collection names must not start or end with '.': %r" % name) if "\x00" in name: raise InvalidName("collection names must not contain the null character") - collation = validate_collation_or_none(kwargs.pop("collation", None)) - self.__database: Database[_DocumentType] = database - self.__name = name - self.__full_name = f"{self.__database.name}.{self.__name}" - self.__write_response_codec_options = self.codec_options._replace( + self._database: Database[_DocumentType] = database + self._name = name + self._full_name = f"{self._database.name}.{self._name}" + self._write_response_codec_options = self.codec_options._replace( unicode_decode_error_handler="replace", document_class=dict ) self._timeout = database.client.options.timeout - encrypted_fields = kwargs.pop("encryptedFields", None) - if create or kwargs or collation: - if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) - opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} - self.__create( - _esc_coll_name(encrypted_fields, name), opts, None, session, qev2_required=True + + if create or kwargs: + if _IS_SYNC: + warnings.warn( + "The `create` and `kwargs` arguments to Collection are deprecated and will be removed in PyMongo 5.0", + DeprecationWarning, + stacklevel=2, ) - self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) - self.create_index([("__safeContent__", ASCENDING)], session) + self._create(kwargs, session) # type: ignore[unused-coroutine] else: - self.__create(name, kwargs, collation, session) + raise ValueError("Collection does not support the `create` or `kwargs` arguments.") + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a sub-collection of this collection by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + full_name = f"{self._name}.{name}" + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + return Collection( + self._database, + f"{self._name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._database!r}, {self._name!r})" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Collection): + return self._database == other.database and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._database, self._name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + + @property + def full_name(self) -> str: + """The full name of this :class:`Collection`. + + The full name is of the form `database_name.collection_name`. + """ + return self._full_name + + @property + def name(self) -> str: + """The name of this :class:`Collection`.""" + return self._name + + @property + def database(self) -> Database[_DocumentType]: + """The :class:`~pymongo.database.Database` that this + :class:`Collection` is a part of. + """ + return self._database + + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: + """Get a clone of this collection changing the specified settings. + + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) + >>> coll1.read_preference + Primary() + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + """ + return Collection( + self._database, + self._name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[ClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") + if raw_wc is not None: + return WriteConcern(**raw_wc) + else: + return self._write_concern_for(session) + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Collection' object is not iterable") + + next = __next__ + + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" + if "." not in self._name: + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self._name + ) + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you meant to " + f"call the '%s' method on a '{type(self).__name__}' object it is " + "failing because no such method exists." % self._name.split(".")[-1] + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> CollectionChangeStream[_DocumentType]: + """Watch changes on this collection. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.CollectionChangeStream` cursor which + iterates over changes on this collection. + + .. code-block:: python + + with db.collection.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.CollectionChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.CollectionChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with db.coll.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + .. note:: Using this helper method is preferred to directly calling + :meth:`~pymongo.collection.Collection.aggregate` with a + ``$changeStream`` stage, for the purpose of supporting + resumability. + + .. warning:: This Collection's :attr:`read_concern` must be + ``ReadConcern("majority")`` in order to use the ``$changeStream`` + stage. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionchanged:: 3.7 + Added the ``start_at_operation_time`` parameter. + + .. versionadded:: 3.6 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = CollectionChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream def _conn_for_writes( self, session: Optional[ClientSession], operation: str ) -> ContextManager[Connection]: - return self.__database.client._conn_for_writes(session, operation) + return self._database.client._conn_for_writes(session, operation) def _command( self, @@ -297,9 +594,9 @@ def _command( :return: The result document. """ - with self.__database.client._tmp_session(session) as s: + with self._database.client._tmp_session(session) as s: return conn.command( - self.__database.name, + self._database.name, command, read_preference or self._read_preference_for(session), codec_options or self.codec_options, @@ -310,12 +607,12 @@ def _command( parse_write_concern_error=True, collation=collation, session=s, - client=self.__database.client, + client=self._database.client, retryable_write=retryable_write, user_fields=user_fields, ) - def __create( + def _create_helper( self, name: str, options: MutableMapping[str, Any], @@ -331,136 +628,49 @@ def __create( if options: if "size" in options: - options["size"] = float(options["size"]) - cmd.update(options) - with self._conn_for_writes(session, operation=_Op.CREATE) as conn: - if qev2_required and conn.max_wire_version < 21: - raise ConfigurationError( - "Driver support of Queryable Encryption is incompatible with server. " - "Upgrade server to use Queryable Encryption. " - f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" - ) - - self._command( - conn, - cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=self._write_concern_for(session), - collation=collation, - session=session, - ) - - def __getattr__(self, name: str) -> Collection[_DocumentType]: - """Get a sub-collection of this collection by name. - - Raises InvalidName if an invalid collection name is used. - - :param name: the name of the collection to get - """ - if name.startswith("_"): - full_name = f"{self.__name}.{name}" - raise AttributeError( - f"Collection has no attribute {name!r}. To access the {full_name}" - f" collection, use database['{full_name}']." - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> Collection[_DocumentType]: - return Collection( - self.__database, - f"{self.__name}.{name}", - False, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - - def __repr__(self) -> str: - return f"Collection({self.__database!r}, {self.__name!r})" - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Collection): - return self.__database == other.database and self.__name == other.name - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash((self.__database, self.__name)) - - def __bool__(self) -> NoReturn: - raise NotImplementedError( - "Collection objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: collection is not None" - ) - - @property - def full_name(self) -> str: - """The full name of this :class:`Collection`. - - The full name is of the form `database_name.collection_name`. - """ - return self.__full_name - - @property - def name(self) -> str: - """The name of this :class:`Collection`.""" - return self.__name - - @property - def database(self) -> Database[_DocumentType]: - """The :class:`~pymongo.database.Database` that this - :class:`Collection` is a part of. - """ - return self.__database - - def with_options( - self, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> Collection[_DocumentType]: - """Get a clone of this collection changing the specified settings. + options["size"] = float(options["size"]) + cmd.update(options) + with self._conn_for_writes(session, operation=_Op.CREATE) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) - >>> coll1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) - >>> coll1.read_preference - Primary() - >>> coll2.read_preference - Secondary(tag_sets=None) + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + collation=collation, + session=session, + ) - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Collection` - is used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Collection` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Collection` - is used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Collection` - is used. - """ - return Collection( - self.__database, - self.__name, - False, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern, - ) + def _create( + self, + options: MutableMapping[str, Any], + session: Optional[ClientSession], + ) -> None: + collation = validate_collation_or_none(options.pop("collation", None)) + encrypted_fields = options.pop("encryptedFields", None) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self._create_helper( + _esc_coll_name(encrypted_fields, self._name), + opts, + None, + session, + qev2_required=True, + ) + self._create_helper(_ecoc_coll_name(encrypted_fields, self._name), opts, None, session) + self._create_helper( + self._name, options, collation, session, encrypted_fields=encrypted_fields + ) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self._create_helper(self._name, options, collation, session) @_csot.apply def bulk_write( @@ -584,18 +794,18 @@ def _insert_command( command["bypassDocumentValidation"] = True result = conn.command( - self.__database.name, + self._database.name, command, write_concern=write_concern, - codec_options=self.__write_response_codec_options, + codec_options=self._write_response_codec_options, session=session, - client=self.__database.client, + client=self._database.client, retryable_write=retryable_write, ) _check_write_command_response(result) - self.__database.client._retryable_write( + self._database.client._retryable_write( acknowledged, _insert_command, session, operation=_Op.INSERT ) @@ -788,7 +998,7 @@ def _update( "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." ) if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers_shared._index_document(hint) update_doc["hint"] = hint command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} if let is not None: @@ -803,14 +1013,16 @@ def _update( # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. - result = conn.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, + result = ( + conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) ).copy() _check_write_command_response(result) # Add the updatedExisting field for compatibility. @@ -869,7 +1081,7 @@ def _update( comment=comment, ) - return self.__database.client._retryable_write( + return self._database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, _update, session, @@ -1224,15 +1436,15 @@ def drop( .. versionchanged:: 3.6 Added ``session`` parameter. """ - dbo = self.__database.client.get_database( - self.__database.name, + dbo = self._database.client.get_database( + self._database.name, self.codec_options, self.read_preference, self.write_concern, self.read_concern, ) dbo.drop_collection( - self.__name, session=session, comment=comment, encrypted_fields=encrypted_fields + self._name, session=session, comment=comment, encrypted_fields=encrypted_fields ) def _delete( @@ -1267,7 +1479,7 @@ def _delete( "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." ) if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers_shared._index_document(hint) delete_doc["hint"] = hint command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} @@ -1280,12 +1492,12 @@ def _delete( # Delete command. result = conn.command( - self.__database.name, + self._database.name, command, write_concern=write_concern, - codec_options=self.__write_response_codec_options, + codec_options=self._write_response_codec_options, session=session, - client=self.__database.client, + client=self._database.client, retryable_write=retryable_write, ) _check_write_command_response(result) @@ -1324,7 +1536,7 @@ def _delete( comment=comment, ) - return self.__database.client._retryable_write( + return self._database.client._retryable_write( (write_concern or self.write_concern).acknowledged and not multi, _delete, session, @@ -1711,7 +1923,7 @@ def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_Documen .. versionadded:: 3.6 """ # OP_MSG is required to support encryption. - if self.__database.client._encrypter: + if self._database.client._encrypter: raise InvalidOperation("find_raw_batches does not support auto encryption") return RawBatchCursor(self, *args, **kwargs) @@ -1731,7 +1943,7 @@ def _count_cmd( cmd, read_preference=read_preference, allowable_errors=["ns missing"], - codec_options=self.__write_response_codec_options, + codec_options=self._write_response_codec_options, read_concern=self.read_concern, collation=collation, session=session, @@ -1754,7 +1966,7 @@ def _aggregate_one_result( cmd, read_preference, allowable_errors=[26], # Ignore NamespaceNotFound. - codec_options=self.__write_response_codec_options, + codec_options=self._write_response_codec_options, read_concern=self.read_concern, collation=collation, session=session, @@ -1803,7 +2015,7 @@ def _cmd( conn: Connection, read_preference: Optional[_ServerMode], ) -> int: - cmd: dict[str, Any] = {"count": self.__name} + cmd: dict[str, Any] = {"count": self._name} cmd.update(kwargs) return self._count_cmd(session, conn, read_preference, cmd, collation=None) @@ -1879,9 +2091,9 @@ def count_documents( if comment is not None: kwargs["comment"] = comment pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) - cmd = {"aggregate": self.__name, "pipeline": pipeline, "cursor": {}} + cmd = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} if "hint" in kwargs and not isinstance(kwargs["hint"], str): - kwargs["hint"] = helpers._index_document(kwargs["hint"]) + kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd.update(kwargs) @@ -1900,12 +2112,15 @@ def _cmd( def _retryable_non_cursor_read( self, - func: Callable[[Optional[ClientSession], Server, Connection, Optional[_ServerMode]], T], + func: Callable[ + [Optional[ClientSession], Server, Connection, Optional[_ServerMode]], + T, + ], session: Optional[ClientSession], operation: str, ) -> T: """Non-cursor read helper to handle implicit session creation.""" - client = self.__database.client + client = self._database.client with client._tmp_session(session) as s: return client._retryable_read(func, self._read_preference_for(s), s, operation) @@ -1954,10 +2169,10 @@ def create_indexes( common.validate_list("indexes", indexes) if comment is not None: kwargs["comment"] = comment - return self.__create_indexes(indexes, session, **kwargs) + return self._create_indexes(indexes, session, **kwargs) @_csot.apply - def __create_indexes( + def _create_indexes( self, indexes: Sequence[IndexModel], session: Optional[ClientSession], **kwargs: Any ) -> list[str]: """Internal createIndexes helper. @@ -2117,7 +2332,7 @@ def create_index( if comment is not None: cmd_options["comment"] = comment index = IndexModel(keys, **kwargs) - return self.__create_indexes([index], session, **cmd_options)[0] + return (self._create_indexes([index], session, **cmd_options))[0] def drop_indexes( self, @@ -2150,7 +2365,7 @@ def drop_indexes( """ if comment is not None: kwargs["comment"] = comment - self.drop_index("*", session=session, **kwargs) + self._drop_index("*", session=session, **kwargs) @_csot.apply def drop_index( @@ -2199,14 +2414,24 @@ def drop_index( when connected to MongoDB >= 3.4. """ + self._drop_index(index_or_name, session, comment, **kwargs) + + @_csot.apply + def _drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: name = index_or_name if isinstance(index_or_name, list): - name = helpers._gen_index_name(index_or_name) + name = helpers_shared._gen_index_name(index_or_name) if not isinstance(name, str): raise TypeError("index_or_name must be an instance of str or list") - cmd = {"dropIndexes": self.__name, "index": name} + cmd = {"dropIndexes": self._name, "index": name} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment @@ -2247,6 +2472,13 @@ def list_indexes( .. versionadded:: 3.0 """ + return self._list_indexes(session, comment) + + def _list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: codec_options: CodecOptions = CodecOptions(SON) coll = cast( Collection[MutableMapping[str, Any]], @@ -2261,14 +2493,14 @@ def _cmd( conn: Connection, read_preference: _ServerMode, ) -> CommandCursor[MutableMapping[str, Any]]: - cmd = {"listIndexes": self.__name, "cursor": {}} + cmd = {"listIndexes": self._name, "cursor": {}} if comment is not None: cmd["comment"] = comment try: - cursor = self._command(conn, cmd, read_preference, codec_options, session=session)[ - "cursor" - ] + cursor = ( + self._command(conn, cmd, read_preference, codec_options, session=session) + )["cursor"] except OperationFailure as exc: # Ignore NamespaceNotFound errors to match the behavior # of reading from *.system.indexes. @@ -2286,8 +2518,8 @@ def _cmd( cmd_cursor._maybe_pin_connection(conn) return cmd_cursor - with self.__database.client._tmp_session(session, False) as s: - return self.__database.client._retryable_read( + with self._database.client._tmp_session(session, False) as s: + return self._database.client._retryable_read( _cmd, read_pref, s, operation=_Op.LIST_INDEXES ) @@ -2325,7 +2557,7 @@ def index_information( .. versionchanged:: 3.6 Added ``session`` parameter. """ - cursor = self.list_indexes(session=session, comment=comment) + cursor = self._list_indexes(session=session, comment=comment) info = {} for index in cursor: index["key"] = list(index["key"].items()) @@ -2378,7 +2610,7 @@ def list_search_indexes( user_fields={"cursor": {"firstBatch": 1}}, ) - return self.__database.client._retryable_read( + return self._database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), # type: ignore[arg-type] session, @@ -2414,7 +2646,7 @@ def create_search_index( """ if not isinstance(model, SearchIndexModel): model = SearchIndexModel(**model) - return self.create_search_indexes([model], session, comment, **kwargs)[0] + return (self._create_search_indexes([model], session, comment, **kwargs))[0] def create_search_indexes( self, @@ -2438,6 +2670,15 @@ def create_search_indexes( .. versionadded:: 4.5 """ + return self._create_search_indexes(models, session, comment, **kwargs) + + def _create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: if comment is not None: kwargs["comment"] = comment @@ -2482,7 +2723,7 @@ def drop_search_index( .. versionadded:: 4.5 """ - cmd = {"dropSearchIndex": self.__name, "name": name} + cmd = {"dropSearchIndex": self._name, "name": name} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment @@ -2518,7 +2759,7 @@ def update_search_index( .. versionadded:: 4.5 """ - cmd = {"updateSearchIndex": self.__name, "name": name, "definition": definition} + cmd = {"updateSearchIndex": self._name, "name": name, "definition": definition} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment @@ -2551,16 +2792,14 @@ def options( .. versionchanged:: 3.6 Added ``session`` parameter. """ - dbo = self.__database.client.get_database( - self.__database.name, + dbo = self._database.client.get_database( + self._database.name, self.codec_options, self.read_preference, self.write_concern, self.read_concern, ) - cursor = dbo.list_collections( - session=session, filter={"name": self.__name}, comment=comment - ) + cursor = dbo.list_collections(session=session, filter={"name": self._name}, comment=comment) result = None for doc in cursor: @@ -2601,7 +2840,7 @@ def _aggregate( user_fields={"cursor": {"firstBatch": 1}}, ) - return self.__database.client._retryable_read( + return self._database.client._retryable_read( cmd.get_cursor, cmd.get_read_preference(session), # type: ignore[arg-type] session, @@ -2692,7 +2931,7 @@ def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - with self.__database.client._tmp_session(session, close=False) as s: + with self._database.client._tmp_session(session, close=False) as s: return self._aggregate( _CollectionAggregationCommand, pipeline, @@ -2735,11 +2974,11 @@ def aggregate_raw_batches( .. versionadded:: 3.6 """ # OP_MSG is required to support encryption. - if self.__database.client._encrypter: + if self._database.client._encrypter: raise InvalidOperation("aggregate_raw_batches does not support auto encryption") if comment is not None: kwargs["comment"] = comment - with self.__database.client._tmp_session(session, close=False) as s: + with self._database.client._tmp_session(session, close=False) as s: return cast( RawBatchCursor[_DocumentType], self._aggregate( @@ -2752,144 +2991,6 @@ def aggregate_raw_batches( ), ) - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional[ClientSession] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - show_expanded_events: Optional[bool] = None, - ) -> CollectionChangeStream[_DocumentType]: - """Watch changes on this collection. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.CollectionChangeStream` cursor which - iterates over changes on this collection. - - .. code-block:: python - - with db.collection.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.CollectionChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.CollectionChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with db.collection.watch([{"$match": {"operationType": "insert"}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error("...") - - For a precise description of the resume process see the - `change streams specification`_. - - .. note:: Using this helper method is preferred to directly calling - :meth:`~pymongo.collection.Collection.aggregate` with a - ``$changeStream`` stage, for the purpose of supporting - resumability. - - .. warning:: This Collection's :attr:`read_concern` must be - ``ReadConcern("majority")`` in order to use the ``$changeStream`` - stage. - - :param pipeline: A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - :param full_document: The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - :param full_document_before_change: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - :param resume_after: A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - :param max_await_time_ms: The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - :param batch_size: The maximum number of documents to return - per batch. - :param collation: The :class:`~pymongo.collation.Collation` - to use for the aggregation. - :param start_at_operation_time: If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param start_after: The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - :param comment: A user-provided comment to attach to this - command. - :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - - :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. - - .. versionchanged:: 4.3 - Added `show_expanded_events` parameter. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionchanged:: 3.7 - Added the ``start_at_operation_time`` parameter. - - .. versionadded:: 3.6 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md - """ - return CollectionChangeStream( - self, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - show_expanded_events, - ) - @_csot.apply def rename( self, @@ -2936,22 +3037,22 @@ def rename( if "$" in new_name and not new_name.startswith("oplog.$main"): raise InvalidName("collection names must not contain '$'") - new_name = f"{self.__database.name}.{new_name}" - cmd = {"renameCollection": self.__full_name, "to": new_name} + new_name = f"{self._database.name}.{new_name}" + cmd = {"renameCollection": self._full_name, "to": new_name} cmd.update(kwargs) if comment is not None: cmd["comment"] = comment write_concern = self._write_concern_for_cmd(cmd, session) with self._conn_for_writes(session, operation=_Op.RENAME) as conn: - with self.__database.client._tmp_session(session) as s: + with self._database.client._tmp_session(session) as s: return conn.command( "admin", cmd, write_concern=write_concern, parse_write_concern_error=True, session=s, - client=self.__database.client, + client=self._database.client, ) def distinct( @@ -2998,7 +3099,7 @@ def distinct( """ if not isinstance(key, str): raise TypeError("key must be an instance of str") - cmd = {"distinct": self.__name, "key": key} + cmd = {"distinct": self._name, "key": key} if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") @@ -3014,28 +3115,21 @@ def _cmd( conn: Connection, read_preference: Optional[_ServerMode], ) -> list: - return self._command( - conn, - cmd, - read_preference=read_preference, - read_concern=self.read_concern, - collation=collation, - session=session, - user_fields={"values": 1}, + return ( + self._command( + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + ) )["values"] return self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) - def _write_concern_for_cmd( - self, cmd: Mapping[str, Any], session: Optional[ClientSession] - ) -> WriteConcern: - raw_wc = cmd.get("writeConcern") - if raw_wc is not None: - return WriteConcern(**raw_wc) - else: - return self._write_concern_for(session) - - def __find_and_modify( + def _find_and_modify( self, filter: Mapping[str, Any], projection: Optional[Union[Mapping[str, Any], Iterable[str]]], @@ -3055,25 +3149,25 @@ def __find_and_modify( "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd = {"findAndModify": self.__name, "query": filter, "new": return_document} + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} if let is not None: common.validate_is_mapping("let", let) cmd["let"] = let cmd.update(kwargs) if projection is not None: - cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") if sort is not None: - cmd["sort"] = helpers._index_document(sort) + cmd["sort"] = helpers_shared._index_document(sort) if upsert is not None: validate_boolean("upsert", upsert) cmd["upsert"] = upsert if hint is not None: if not isinstance(hint, str): - hint = helpers._index_document(hint) + hint = helpers_shared._index_document(hint) write_concern = self._write_concern_for_cmd(cmd, session) - def _find_and_modify( + def _find_and_modify_helper( session: Optional[ClientSession], conn: Connection, retryable_write: bool ) -> Any: acknowledged = write_concern.acknowledged @@ -3107,9 +3201,9 @@ def _find_and_modify( return out.get("value") - return self.__database.client._retryable_write( + return self._database.client._retryable_write( write_concern.acknowledged, - _find_and_modify, + _find_and_modify_helper, session, operation=_Op.FIND_AND_MODIFY, ) @@ -3199,7 +3293,7 @@ def find_one_and_delete( kwargs["remove"] = True if comment is not None: kwargs["comment"] = comment - return self.__find_and_modify( + return self._find_and_modify( filter, projection, sort, let=let, hint=hint, session=session, **kwargs ) @@ -3298,7 +3392,7 @@ def find_one_and_replace( kwargs["update"] = replacement if comment is not None: kwargs["comment"] = comment - return self.__find_and_modify( + return self._find_and_modify( filter, projection, sort, @@ -3362,7 +3456,7 @@ def find_one_and_update( The *upsert* option can be used to create the document if it doesn't already exist. - >>> db.example.delete_many({}).deleted_count + >>> (db.example.delete_many({})).deleted_count 1 >>> db.example.find_one_and_update( ... {'_id': 'userid'}, @@ -3446,7 +3540,7 @@ def find_one_and_update( kwargs["update"] = update if comment is not None: kwargs["comment"] = comment - return self.__find_and_modify( + return self._find_and_modify( filter, projection, sort, @@ -3458,26 +3552,3 @@ def find_one_and_update( session=session, **kwargs, ) - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'Collection' object is not iterable") - - next = __next__ - - def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: - """This is only here so that some API misusages are easier to debug.""" - if "." not in self.__name: - raise TypeError( - "'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % self.__name - ) - raise TypeError( - "'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % self.__name.split(".")[-1] - ) From 3280769d6fec1340f21bb9a407037fe0c618ffb3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:03 -0500 Subject: [PATCH 1462/2111] PYTHON-4747 Rename pymongo/pool.py to pymongo/synchronous/pool.py --- pymongo/{ => synchronous}/pool.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/pool.py (100%) diff --git a/pymongo/pool.py b/pymongo/synchronous/pool.py similarity index 100% rename from pymongo/pool.py rename to pymongo/synchronous/pool.py From 5c6bb0a03923de3fd9f40844a94e46dff0c3a358 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:03 -0500 Subject: [PATCH 1463/2111] PYTHON-4747 Sync pool.py to master --- pymongo/pool.py | 22 + pymongo/synchronous/pool.py | 815 +++++++++--------------------------- 2 files changed, 219 insertions(+), 618 deletions(-) create mode 100644 pymongo/pool.py diff --git a/pymongo/pool.py b/pymongo/pool.py new file mode 100644 index 0000000000..fbbb70fc68 --- /dev/null +++ b/pymongo/pool.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Pool API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.pool import * # noqa: F403 +from pymongo.synchronous.pool import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["PoolOptions"] # noqa: F405 diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index bbd49339df..94a1d10436 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -14,23 +14,22 @@ from __future__ import annotations +import asyncio import collections import contextlib -import copy +import functools import logging import os -import platform import socket import ssl import sys import threading import time import weakref -from pathlib import Path from typing import ( TYPE_CHECKING, Any, - Iterator, + Generator, Mapping, MutableMapping, NoReturn, @@ -39,21 +38,14 @@ Union, ) -import bson from bson import DEFAULT_CODEC_OPTIONS -from pymongo import __version__, _csot, auth, helpers -from pymongo.client_session import _validate_session_write_concern +from pymongo import _csot, helpers_shared from pymongo.common import ( MAX_BSON_SIZE, - MAX_CONNECTING, - MAX_IDLE_TIME_SEC, MAX_MESSAGE_SIZE, - MAX_POOL_SIZE, MAX_WIRE_VERSION, MAX_WRITE_BATCH_SIZE, - MIN_POOL_SIZE, ORDERED_TYPES, - WAIT_QUEUE_TIMEOUT, ) from pymongo.errors import ( # type:ignore[attr-defined] AutoReconnect, @@ -70,7 +62,6 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _handle_reauth from pymongo.lock import _create_lock from pymongo.logger import ( _CONNECTION_LOGGER, @@ -81,33 +72,33 @@ from pymongo.monitoring import ( ConnectionCheckOutFailedReason, ConnectionClosedReason, - _EventListeners, ) -from pymongo.network import command, receive_message +from pymongo.network_layer import sendall +from pymongo.pool_options import PoolOptions from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker from pymongo.ssl_support import HAS_SNI, SSLError +from pymongo.synchronous.client_session import _validate_session_write_concern +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.network import command, receive_message if TYPE_CHECKING: from bson import CodecOptions from bson.objectid import ObjectId - from pymongo.auth import MongoCredential, _AuthContext - from pymongo.client_session import ClientSession from pymongo.compression_support import ( - CompressionSettings, SnappyContext, ZlibContext, ZstdContext, ) - from pymongo.driver_info import DriverInfo from pymongo.message import _OpMsg, _OpReply - from pymongo.mongo_client import MongoClient, _MongoClientErrorHandler - from pymongo.pyopenssl_context import SSLContext, _sslConn + from pymongo.pyopenssl_context import _sslConn from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _ServerMode - from pymongo.server_api import ServerApi + from pymongo.synchronous.auth import _AuthContext + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler from pymongo.typings import ClusterTime, _Address, _CollationIn from pymongo.write_concern import WriteConcern @@ -127,6 +118,8 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 """Dummy function for platforms that don't provide fcntl.""" +_IS_SYNC = True + _MAX_TCP_KEEPIDLE = 120 _MAX_TCP_KEEPINTVL = 10 _MAX_TCP_KEEPCNT = 9 @@ -186,217 +179,6 @@ def _set_keepalive_times(sock: socket.socket) -> None: _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) -_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} - -if sys.platform.startswith("linux"): - # platform.linux_distribution was deprecated in Python 3.5 - # and removed in Python 3.8. Starting in Python 3.5 it - # raises DeprecationWarning - # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 - _name = platform.system() - _METADATA["os"] = { - "type": _name, - "name": _name, - "architecture": platform.machine(), - # Kernel version (e.g. 4.4.0-17-generic). - "version": platform.release(), - } -elif sys.platform == "darwin": - _METADATA["os"] = { - "type": platform.system(), - "name": platform.system(), - "architecture": platform.machine(), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - "version": platform.mac_ver()[0], - } -elif sys.platform == "win32": - _METADATA["os"] = { - "type": platform.system(), - # "Windows XP", "Windows 7", "Windows 10", etc. - "name": " ".join((platform.system(), platform.release())), - "architecture": platform.machine(), - # Windows patch level (e.g. 5.1.2600-SP3) - "version": "-".join(platform.win32_ver()[1:3]), - } -elif sys.platform.startswith("java"): - _name, _ver, _arch = platform.java_ver()[-1] - _METADATA["os"] = { - # Linux, Windows 7, Mac OS X, etc. - "type": _name, - "name": _name, - # x86, x86_64, AMD64, etc. - "architecture": _arch, - # Linux kernel version, OSX version, etc. - "version": _ver, - } -else: - # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) - _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) - _METADATA["os"] = { - "type": platform.system(), - "name": " ".join([part for part in _aliased[:2] if part]), - "architecture": platform.machine(), - "version": _aliased[2], - } - -if platform.python_implementation().startswith("PyPy"): - _METADATA["platform"] = " ".join( - ( - platform.python_implementation(), - ".".join(map(str, sys.pypy_version_info)), # type: ignore - "(Python %s)" % ".".join(map(str, sys.version_info)), - ) - ) -elif sys.platform.startswith("java"): - _METADATA["platform"] = " ".join( - ( - platform.python_implementation(), - ".".join(map(str, sys.version_info)), - "(%s)" % " ".join((platform.system(), platform.release())), - ) - ) -else: - _METADATA["platform"] = " ".join( - (platform.python_implementation(), ".".join(map(str, sys.version_info))) - ) - -DOCKER_ENV_PATH = "/.dockerenv" -ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" - -RUNTIME_NAME_DOCKER = "docker" -ORCHESTRATOR_NAME_K8S = "kubernetes" - - -def get_container_env_info() -> dict[str, str]: - """Returns the runtime and orchestrator of a container. - If neither value is present, the metadata client.env.container field will be omitted.""" - container = {} - - if Path(DOCKER_ENV_PATH).exists(): - container["runtime"] = RUNTIME_NAME_DOCKER - if os.getenv(ENV_VAR_K8S): - container["orchestrator"] = ORCHESTRATOR_NAME_K8S - - return container - - -def _is_lambda() -> bool: - if os.getenv("AWS_LAMBDA_RUNTIME_API"): - return True - env = os.getenv("AWS_EXECUTION_ENV") - if env: - return env.startswith("AWS_Lambda_") - return False - - -def _is_azure_func() -> bool: - return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) - - -def _is_gcp_func() -> bool: - return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) - - -def _is_vercel() -> bool: - return bool(os.getenv("VERCEL")) - - -def _is_faas() -> bool: - return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() - - -def _getenv_int(key: str) -> Optional[int]: - """Like os.getenv but returns an int, or None if the value is missing/malformed.""" - val = os.getenv(key) - if not val: - return None - try: - return int(val) - except ValueError: - return None - - -def _metadata_env() -> dict[str, Any]: - env: dict[str, Any] = {} - container = get_container_env_info() - if container: - env["container"] = container - # Skip if multiple (or no) envs are matched. - if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: - return env - if _is_lambda(): - env["name"] = "aws.lambda" - region = os.getenv("AWS_REGION") - if region: - env["region"] = region - memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") - if memory_mb is not None: - env["memory_mb"] = memory_mb - elif _is_azure_func(): - env["name"] = "azure.func" - elif _is_gcp_func(): - env["name"] = "gcp.func" - region = os.getenv("FUNCTION_REGION") - if region: - env["region"] = region - memory_mb = _getenv_int("FUNCTION_MEMORY_MB") - if memory_mb is not None: - env["memory_mb"] = memory_mb - timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") - if timeout_sec is not None: - env["timeout_sec"] = timeout_sec - elif _is_vercel(): - env["name"] = "vercel" - region = os.getenv("VERCEL_REGION") - if region: - env["region"] = region - return env - - -_MAX_METADATA_SIZE = 512 - - -# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations -def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: - """Perform metadata truncation.""" - if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: - return - # 1. Omit fields from env except env.name. - env_name = metadata.get("env", {}).get("name") - if env_name: - metadata["env"] = {"name": env_name} - if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: - return - # 2. Omit fields from os except os.type. - os_type = metadata.get("os", {}).get("type") - if os_type: - metadata["os"] = {"type": os_type} - if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: - return - # 3. Omit the env document entirely. - metadata.pop("env", None) - encoded_size = len(bson.encode(metadata)) - if encoded_size <= _MAX_METADATA_SIZE: - return - # 4. Truncate platform. - overflow = encoded_size - _MAX_METADATA_SIZE - plat = metadata.get("platform", "") - if plat: - plat = plat[:-overflow] - if plat: - metadata["platform"] = plat - else: - metadata.pop("platform", None) - - -# If the first getaddrinfo call of this interpreter's life is on a thread, -# while the main thread holds the import lock, getaddrinfo deadlocks trying -# to import the IDNA codec. Import it here, where presumably we're on the -# main thread, to avoid the deadlock. See PYTHON-607. -"foo".encode("idna") - - def _raise_connection_failure( address: Any, error: Exception, @@ -457,238 +239,6 @@ def format_timeout_details(details: Optional[dict[str, float]]) -> str: return result -class PoolOptions: - """Read only connection pool options for a MongoClient. - - Should not be instantiated directly by application developers. Access - a client's pool options via - :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: - - pool_opts = client.options.pool_options - pool_opts.max_pool_size - pool_opts.min_pool_size - - """ - - __slots__ = ( - "__max_pool_size", - "__min_pool_size", - "__max_idle_time_seconds", - "__connect_timeout", - "__socket_timeout", - "__wait_queue_timeout", - "__ssl_context", - "__tls_allow_invalid_hostnames", - "__event_listeners", - "__appname", - "__driver", - "__metadata", - "__compression_settings", - "__max_connecting", - "__pause_enabled", - "__server_api", - "__load_balanced", - "__credentials", - ) - - def __init__( - self, - max_pool_size: int = MAX_POOL_SIZE, - min_pool_size: int = MIN_POOL_SIZE, - max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, - connect_timeout: Optional[float] = None, - socket_timeout: Optional[float] = None, - wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, - ssl_context: Optional[SSLContext] = None, - tls_allow_invalid_hostnames: bool = False, - event_listeners: Optional[_EventListeners] = None, - appname: Optional[str] = None, - driver: Optional[DriverInfo] = None, - compression_settings: Optional[CompressionSettings] = None, - max_connecting: int = MAX_CONNECTING, - pause_enabled: bool = True, - server_api: Optional[ServerApi] = None, - load_balanced: Optional[bool] = None, - credentials: Optional[MongoCredential] = None, - ): - self.__max_pool_size = max_pool_size - self.__min_pool_size = min_pool_size - self.__max_idle_time_seconds = max_idle_time_seconds - self.__connect_timeout = connect_timeout - self.__socket_timeout = socket_timeout - self.__wait_queue_timeout = wait_queue_timeout - self.__ssl_context = ssl_context - self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames - self.__event_listeners = event_listeners - self.__appname = appname - self.__driver = driver - self.__compression_settings = compression_settings - self.__max_connecting = max_connecting - self.__pause_enabled = pause_enabled - self.__server_api = server_api - self.__load_balanced = load_balanced - self.__credentials = credentials - self.__metadata = copy.deepcopy(_METADATA) - if appname: - self.__metadata["application"] = {"name": appname} - - # Combine the "driver" MongoClient option with PyMongo's info, like: - # { - # 'driver': { - # 'name': 'PyMongo|MyDriver', - # 'version': '4.2.0|1.2.3', - # }, - # 'platform': 'CPython 3.8.0|MyPlatform' - # } - if driver: - if driver.name: - self.__metadata["driver"]["name"] = "{}|{}".format( - _METADATA["driver"]["name"], - driver.name, - ) - if driver.version: - self.__metadata["driver"]["version"] = "{}|{}".format( - _METADATA["driver"]["version"], - driver.version, - ) - if driver.platform: - self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) - - env = _metadata_env() - if env: - self.__metadata["env"] = env - - _truncate_metadata(self.__metadata) - - @property - def _credentials(self) -> Optional[MongoCredential]: - """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" - return self.__credentials - - @property - def non_default_options(self) -> dict[str, Any]: - """The non-default options this pool was created with. - - Added for CMAP's :class:`PoolCreatedEvent`. - """ - opts = {} - if self.__max_pool_size != MAX_POOL_SIZE: - opts["maxPoolSize"] = self.__max_pool_size - if self.__min_pool_size != MIN_POOL_SIZE: - opts["minPoolSize"] = self.__min_pool_size - if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: - assert self.__max_idle_time_seconds is not None - opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 - if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: - assert self.__wait_queue_timeout is not None - opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 - if self.__max_connecting != MAX_CONNECTING: - opts["maxConnecting"] = self.__max_connecting - return opts - - @property - def max_pool_size(self) -> float: - """The maximum allowable number of concurrent connections to each - connected server. Requests to a server will block if there are - `maxPoolSize` outstanding connections to the requested server. - Defaults to 100. Cannot be 0. - - When a server's pool has reached `max_pool_size`, operations for that - server block waiting for a socket to be returned to the pool. If - ``waitQueueTimeoutMS`` is set, a blocked operation will raise - :exc:`~pymongo.errors.ConnectionFailure` after a timeout. - By default ``waitQueueTimeoutMS`` is not set. - """ - return self.__max_pool_size - - @property - def min_pool_size(self) -> int: - """The minimum required number of concurrent connections that the pool - will maintain to each connected server. Default is 0. - """ - return self.__min_pool_size - - @property - def max_connecting(self) -> int: - """The maximum number of concurrent connection creation attempts per - pool. Defaults to 2. - """ - return self.__max_connecting - - @property - def pause_enabled(self) -> bool: - return self.__pause_enabled - - @property - def max_idle_time_seconds(self) -> Optional[int]: - """The maximum number of seconds that a connection can remain - idle in the pool before being removed and replaced. Defaults to - `None` (no limit). - """ - return self.__max_idle_time_seconds - - @property - def connect_timeout(self) -> Optional[float]: - """How long a connection can take to be opened before timing out.""" - return self.__connect_timeout - - @property - def socket_timeout(self) -> Optional[float]: - """How long a send or receive on a socket can take before timing out.""" - return self.__socket_timeout - - @property - def wait_queue_timeout(self) -> Optional[int]: - """How long a thread will wait for a socket from the pool if the pool - has no free sockets. - """ - return self.__wait_queue_timeout - - @property - def _ssl_context(self) -> Optional[SSLContext]: - """An SSLContext instance or None.""" - return self.__ssl_context - - @property - def tls_allow_invalid_hostnames(self) -> bool: - """If True skip ssl.match_hostname.""" - return self.__tls_allow_invalid_hostnames - - @property - def _event_listeners(self) -> Optional[_EventListeners]: - """An instance of pymongo.monitoring._EventListeners.""" - return self.__event_listeners - - @property - def appname(self) -> Optional[str]: - """The application name, for sending with hello in server handshake.""" - return self.__appname - - @property - def driver(self) -> Optional[DriverInfo]: - """Driver name and version, for sending with hello in handshake.""" - return self.__driver - - @property - def _compression_settings(self) -> Optional[CompressionSettings]: - return self.__compression_settings - - @property - def metadata(self) -> dict[str, Any]: - """A dict of metadata about the application, driver, os, and platform.""" - return self.__metadata.copy() - - @property - def server_api(self) -> Optional[ServerApi]: - """A pymongo.server_api.ServerApi or None.""" - return self.__server_api - - @property - def load_balanced(self) -> Optional[bool]: - """True if this Pool is configured in load balanced mode.""" - return self.__load_balanced - - class _CancellationContext: def __init__(self) -> None: self._cancelled = False @@ -733,6 +283,7 @@ def __init__( self.op_msg_enabled = False self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap + self.enabled_for_logging = pool.enabled_for_logging self.compression_settings = pool.opts._compression_settings self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None self.socket_checker: SocketChecker = SocketChecker() @@ -824,7 +375,7 @@ def hello_cmd(self) -> dict[str, Any]: else: return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} - def hello(self) -> Hello[dict[str, Any]]: + def hello(self) -> Hello: return self._hello(None, None, None) def _hello( @@ -859,6 +410,8 @@ def _hello( if creds: if creds.mechanism == "DEFAULT" and creds.username: cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.synchronous import auth + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) if auth_ctx: speculative_authenticate = auth_ctx.speculate_command() @@ -919,7 +472,7 @@ def _next_reply(self) -> dict[str, Any]: self.more_to_come = reply.more_to_come unpacked_docs = reply.unpack_response() response_doc = unpacked_docs[0] - helpers._check_command_response(response_doc, self.max_wire_version) + helpers_shared._check_command_response(response_doc, self.max_wire_version) return response_doc @_handle_reauth @@ -1024,7 +577,7 @@ def send_message(self, message: bytes, max_doc_size: int) -> None: ) try: - self.conn.sendall(message) + sendall(self.conn, message) except BaseException as error: self._raise_connection_failure(error) @@ -1072,7 +625,7 @@ def write_command( result = reply.command_response(codec_options) # Raises NotPrimaryError or OperationFailure. - helpers._check_command_response(result, self.max_wire_version) + helpers_shared._check_command_response(result, self.max_wire_version) return result def authenticate(self, reauthenticate: bool = False) -> None: @@ -1090,22 +643,24 @@ def authenticate(self, reauthenticate: bool = False) -> None: if not self.ready: creds = self.opts._credentials if creds: + from pymongo.synchronous import auth + auth.authenticate(creds, self, reauthenticate=reauthenticate) self.ready = True + duration = time.monotonic() - self.creation_time if self.enabled_for_cmap: assert self.listeners is not None - duration = time.monotonic() - self.creation_time self.listeners.publish_connection_ready(self.address, self.id, duration) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_READY, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=self.id, - durationMS=duration, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_READY, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) def validate_session( self, client: Optional[MongoClient], session: Optional[ClientSession] @@ -1123,10 +678,11 @@ def close_conn(self, reason: Optional[str]) -> None: if self.closed: return self._close_conn() - if reason and self.enabled_for_cmap: - assert self.listeners is not None - self.listeners.publish_connection_closed(self.address, self.id, reason) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + if reason: + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, clientId=self._client_id, @@ -1315,9 +871,26 @@ def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket. # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. if HAS_SNI: - ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + if _IS_SYNC: + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + if hasattr(ssl_context, "a_wrap_socket"): + ssl_sock = ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc] + else: + loop = asyncio.get_running_loop() + ssl_sock = loop.run_in_executor( + None, + functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc] + ) else: - ssl_sock = ssl_context.wrap_socket(sock) + if _IS_SYNC: + ssl_sock = ssl_context.wrap_socket(sock) + else: + if hasattr(ssl_context, "a_wrap_socket"): + ssl_sock = ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc] + else: + loop = asyncio.get_running_loop() + ssl_sock = loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc] except _CertificateError: sock.close() # Raise _CertificateError directly like we do after match_hostname @@ -1430,17 +1003,18 @@ def __init__( self.address = address self.opts = options self.handshake = handshake - # Don't publish events in Monitor pools. + # Don't publish events or logs in Monitor pools. self.enabled_for_cmap = ( self.handshake and self.opts._event_listeners is not None and self.opts._event_listeners.enabled_for_cmap ) + self.enabled_for_logging = self.handshake # The first portion of the wait queue. # Enforces: maxPoolSize # Also used for: clearing the wait queue - self.size_cond = threading.Condition(self.lock) + self.size_cond = threading.Condition(self.lock) # type: ignore[arg-type] self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: @@ -1448,7 +1022,7 @@ def __init__( # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue - self._max_connecting_cond = threading.Condition(self.lock) + self._max_connecting_cond = threading.Condition(self.lock) # type: ignore[arg-type] self._max_connecting = self.opts.max_connecting self._pending = 0 self._client_id = client_id @@ -1457,15 +1031,15 @@ def __init__( self.opts._event_listeners.publish_pool_created( self.address, self.opts.non_default_options ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_CREATED, - serverHost=self.address[0], - serverPort=self.address[1], - **self.opts.non_default_options, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) # Similar to active_sockets but includes threads in the wait queue. self.operation_count: int = 0 # Retain references to pinned connections to prevent the CPython GC @@ -1483,14 +1057,14 @@ def ready(self) -> None: if self.enabled_for_cmap: assert self.opts._event_listeners is not None self.opts._event_listeners.publish_pool_ready(self.address) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_READY, - serverHost=self.address[0], - serverPort=self.address[1], - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_READY, + serverHost=self.address[0], + serverPort=self.address[1], + ) @property def closed(self) -> bool: @@ -1548,23 +1122,24 @@ def _reset( if self.enabled_for_cmap: assert listeners is not None listeners.publish_pool_closed(self.address) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.POOL_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - ) - else: - if old_state != PoolState.PAUSED and self.enabled_for_cmap: - assert listeners is not None - listeners.publish_pool_cleared( - self.address, - service_id=service_id, - interrupt_connections=interrupt_connections, + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.POOL_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + else: + if old_state != PoolState.PAUSED: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, clientId=self._client_id, @@ -1672,15 +1247,15 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_created(self.address, conn_id) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CREATED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn_id, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CREATED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) try: sock = _configured_socket(self.address, self.opts) @@ -1690,17 +1265,17 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect listeners.publish_connection_closed( self.address, conn_id, ConnectionClosedReason.ERROR ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn_id, - reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), - error=ConnectionClosedReason.ERROR, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) if isinstance(error, (IOError, OSError, SSLError)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) @@ -1725,7 +1300,9 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect return conn @contextlib.contextmanager - def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterator[Connection]: + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> Generator[Connection, None]: """Get a connection from the pool. Use with a "with" statement. Returns a :class:`Connection` object wrapping a connected @@ -1746,31 +1323,31 @@ def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterat if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_check_out_started(self.address) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_STARTED, - serverHost=self.address[0], - serverPort=self.address[1], - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + serverHost=self.address[0], + serverPort=self.address[1], + ) conn = self._get_conn(checkout_started_time, handler=handler) + duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: assert listeners is not None - duration = time.monotonic() - checkout_started_time listeners.publish_connection_checked_out(self.address, conn.id, duration) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn.id, - durationMS=duration, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) try: with self.lock: self.active_contexts.add(conn.cancel_context) @@ -1802,13 +1379,14 @@ def checkout(self, handler: Optional[_MongoClientErrorHandler] = None) -> Iterat def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: if self.state != PoolState.READY: - if self.enabled_for_cmap and emit_event: - assert self.opts._event_listeners is not None + if emit_event: duration = time.monotonic() - checkout_started_time - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, clientId=self._client_id, @@ -1836,23 +1414,23 @@ def _get_conn( self.reset_without_pause() if self.closed: + duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: assert self.opts._event_listeners is not None - duration = time.monotonic() - checkout_started_time self.opts._event_listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_FAILED, - serverHost=self.address[0], - serverPort=self.address[1], - reason="Connection pool was closed", - error=ConnectionCheckOutFailedReason.POOL_CLOSED, - durationMS=duration, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) raise _PoolClosedError( "Attempted to check out a connection from closed connection pool" ) @@ -1928,13 +1506,14 @@ def _get_conn( self.active_sockets -= 1 self.size_cond.notify() - if self.enabled_for_cmap and not emitted_event: - assert self.opts._event_listeners is not None + if not emitted_event: duration = time.monotonic() - checkout_started_time - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration - ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, clientId=self._client_id, @@ -1967,15 +1546,15 @@ def checkin(self, conn: Connection) -> None: if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_checked_in(self.address, conn.id) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKEDIN, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn.id, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKEDIN, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) if self.pid != os.getpid(): self.reset_without_pause() else: @@ -1988,17 +1567,17 @@ def checkin(self, conn: Connection) -> None: listeners.publish_connection_closed( self.address, conn.id, ConnectionClosedReason.ERROR ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CONN_CLOSED, - serverHost=self.address[0], - serverPort=self.address[1], - driverConnectionId=conn.id, - reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), - error=ConnectionClosedReason.ERROR, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CONN_CLOSED, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) else: with self.lock: # Hold the lock to ensure this section does not race with @@ -2060,23 +1639,23 @@ def _perished(self, conn: Connection) -> bool: def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: listeners = self.opts._event_listeners + duration = time.monotonic() - checkout_started_time if self.enabled_for_cmap: assert listeners is not None - duration = time.monotonic() - checkout_started_time listeners.publish_connection_check_out_failed( self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration ) - if _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): - _debug_log( - _CONNECTION_LOGGER, - clientId=self._client_id, - message=_ConnectionStatusMessage.CHECKOUT_FAILED, - serverHost=self.address[0], - serverPort=self.address[1], - reason="Wait queue timeout elapsed without a connection becoming available", - error=ConnectionCheckOutFailedReason.TIMEOUT, - durationMS=duration, - ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + clientId=self._client_id, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) timeout = _csot.get_timeout() or self.opts.wait_queue_timeout if self.opts.load_balanced: other_ops = self.active_sockets - self.ncursors - self.ntxns From b3e1f01774d3371a2aa29202f3a8515c5c8313bb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:03 -0500 Subject: [PATCH 1464/2111] PYTHON-4747 Rename pymongo/command_cursor.py to pymongo/synchronous/command_cursor.py --- pymongo/{ => synchronous}/command_cursor.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/command_cursor.py (100%) diff --git a/pymongo/command_cursor.py b/pymongo/synchronous/command_cursor.py similarity index 100% rename from pymongo/command_cursor.py rename to pymongo/synchronous/command_cursor.py From 2861be8f81259a6e78608a904b9231e03e17dba0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:04 -0500 Subject: [PATCH 1465/2111] PYTHON-4747 Sync command_cursor.py to master --- pymongo/command_cursor.py | 22 ++ pymongo/synchronous/command_cursor.py | 366 +++++++++++++++----------- 2 files changed, 238 insertions(+), 150 deletions(-) create mode 100644 pymongo/command_cursor.py diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py new file mode 100644 index 0000000000..941e3a0eda --- /dev/null +++ b/pymongo/command_cursor.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous CommandCursor API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.command_cursor import * # noqa: F403 +from pymongo.synchronous.command_cursor import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["CommandCursor", "RawBatchCommandCursor"] # noqa: F405 diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index 0411a45abe..da05bf1a3b 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -29,16 +29,25 @@ ) from bson import CodecOptions, _convert_raw_document_lists_to_streams -from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _ConnectionManager +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure -from pymongo.message import _CursorAddress, _GetMore, _OpMsg, _OpReply, _RawBatchGetMore +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _RawBatchGetMore, +) from pymongo.response import PinnedResponse +from pymongo.synchronous.cursor import _ConnectionManager from pymongo.typings import _Address, _DocumentOut, _DocumentType if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.pool import Connection + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True class CommandCursor(Generic[_DocumentType]): @@ -58,27 +67,27 @@ def __init__( comment: Any = None, ) -> None: """Create a new command cursor.""" - self.__sock_mgr: Any = None - self.__collection: Collection[_DocumentType] = collection - self.__id = cursor_info["id"] - self.__data = deque(cursor_info["firstBatch"]) - self.__postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + self._sock_mgr: Any = None + self._collection: Collection[_DocumentType] = collection + self._id = cursor_info["id"] + self._data = deque(cursor_info["firstBatch"]) + self._postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( "postBatchResumeToken" ) - self.__address = address - self.__batch_size = batch_size - self.__max_await_time_ms = max_await_time_ms - self.__session = session - self.__explicit_session = explicit_session - self.__killed = self.__id == 0 - self.__comment = comment - if self.__killed: - self.__end_session(True) + self._address = address + self._batch_size = batch_size + self._max_await_time_ms = max_await_time_ms + self._session = session + self._explicit_session = explicit_session + self._killed = self._id == 0 + self._comment = comment + if self._killed: + self._end_session() if "ns" in cursor_info: # noqa: SIM401 - self.__ns = cursor_info["ns"] + self._ns = cursor_info["ns"] else: - self.__ns = collection.full_name + self._ns = collection.full_name self.batch_size(batch_size) @@ -86,40 +95,7 @@ def __init__( raise TypeError("max_await_time_ms must be an integer or None") def __del__(self) -> None: - self.__die() - - def __die(self, synchronous: bool = False) -> None: - """Closes this cursor.""" - already_killed = self.__killed - self.__killed = True - if self.__id and not already_killed: - cursor_id = self.__id - assert self.__address is not None - address = _CursorAddress(self.__address, self.__ns) - else: - # Skip killCursors. - cursor_id = 0 - address = None - self.__collection.database.client._cleanup_cursor( - synchronous, - cursor_id, - address, - self.__sock_mgr, - self.__session, - self.__explicit_session, - ) - if not self.__explicit_session: - self.__session = None - self.__sock_mgr = None - - def __end_session(self, synchronous: bool) -> None: - if self.__session and not self.__explicit_session: - self.__session._end_session(lock=synchronous) - self.__session = None - - def close(self) -> None: - """Explicitly close / kill this cursor.""" - self.__die(True) + self._die_no_lock() def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: """Limits the number of documents returned in one batch. Each batch @@ -141,56 +117,152 @@ def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: if batch_size < 0: raise ValueError("batch_size must be >= 0") - self.__batch_size = batch_size == 1 and 2 or batch_size + self._batch_size = batch_size == 1 and 2 or batch_size return self def _has_next(self) -> bool: """Returns `True` if the cursor has documents remaining from the previous batch. """ - return len(self.__data) > 0 + return len(self._data) > 0 @property def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: """Retrieve the postBatchResumeToken from the response to a changeStream aggregate or getMore. """ - return self.__postbatchresumetoken + return self._postbatchresumetoken def _maybe_pin_connection(self, conn: Connection) -> None: - client = self.__collection.database.client - if not client._should_pin_cursor(self.__session): + client = self._collection.database.client + if not client._should_pin_cursor(self._session): return - if not self.__sock_mgr: + if not self._sock_mgr: conn.pin_cursor() conn_mgr = _ConnectionManager(conn, False) # Ensure the connection gets returned when the entire result is # returned in the first batch. - if self.__id == 0: + if self._id == 0: conn_mgr.close() else: - self.__sock_mgr = conn_mgr + self._sock_mgr = conn_mgr + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration`. Best to use a for loop:: + + for doc in collection.aggregate(pipeline): + print(doc) + + .. note:: :attr:`alive` can be True while iterating a cursor from + a failed server. In this case :attr:`alive` will return False after + :meth:`next` fails to retrieve the next batch of results from the + server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> int: + """Returns the id of the cursor.""" + return self._id + + @property + def address(self) -> Optional[_Address]: + """The (host, port) of the server used, or None. + + .. versionadded:: 3.0 + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._explicit_session: + return self._session + return None + + def _prepare_to_die(self) -> tuple[int, Optional[_CursorAddress]]: + already_killed = self._killed + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, self._ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session, self._explicit_session + ) + if not self._explicit_session: + self._session = None + self._sock_mgr = None - def __send_message(self, operation: _GetMore) -> None: + def _die_lock(self) -> None: + """Closes this cursor.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + self._explicit_session, + ) + if not self._explicit_session: + self._session = None + self._sock_mgr = None + + def _end_session(self) -> None: + if self._session and not self._explicit_session: + self._session._end_implicit_session() + self._session = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die_lock() + + def _send_message(self, operation: _GetMore) -> None: """Send a getmore message and handle the response.""" - client = self.__collection.database.client + client = self._collection.database.client try: response = client._run_operation( - operation, self._unpack_response, address=self.__address + operation, self._unpack_response, address=self._address ) except OperationFailure as exc: if exc.code in _CURSOR_CLOSED_ERRORS: # Don't send killCursors because the cursor is already closed. - self.__killed = True + self._killed = True if exc.timeout: - self.__die(False) + self._die_no_lock() else: # Return the session and pinned connection, if necessary. self.close() raise except ConnectionFailure: # Don't send killCursors because the cursor is already closed. - self.__killed = True + self._killed = True # Return the session and pinned connection, if necessary. self.close() raise @@ -199,105 +271,55 @@ def __send_message(self, operation: _GetMore) -> None: raise if isinstance(response, PinnedResponse): - if not self.__sock_mgr: - self.__sock_mgr = _ConnectionManager(response.conn, response.more_to_come) + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] if response.from_command: cursor = response.docs[0]["cursor"] documents = cursor["nextBatch"] - self.__postbatchresumetoken = cursor.get("postBatchResumeToken") - self.__id = cursor["id"] + self._postbatchresumetoken = cursor.get("postBatchResumeToken") + self._id = cursor["id"] else: documents = response.docs assert isinstance(response.data, _OpReply) - self.__id = response.data.cursor_id + self._id = response.data.cursor_id - if self.__id == 0: + if self._id == 0: self.close() - self.__data = deque(documents) - - def _unpack_response( - self, - response: Union[_OpReply, _OpMsg], - cursor_id: Optional[int], - codec_options: CodecOptions[Mapping[str, Any]], - user_fields: Optional[Mapping[str, Any]] = None, - legacy_response: bool = False, - ) -> Sequence[_DocumentOut]: - return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + self._data = deque(documents) def _refresh(self) -> int: """Refreshes the cursor with more data from the server. - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the cursor cannot be refreshed due to an error on the query. """ - if len(self.__data) or self.__killed: - return len(self.__data) + if len(self._data) or self._killed: + return len(self._data) - if self.__id: # Get More - dbname, collname = self.__ns.split(".", 1) - read_pref = self.__collection._read_preference_for(self.session) - self.__send_message( + if self._id: # Get More + dbname, collname = self._ns.split(".", 1) + read_pref = self._collection._read_preference_for(self.session) + self._send_message( self._getmore_class( dbname, collname, - self.__batch_size, - self.__id, - self.__collection.codec_options, + self._batch_size, + self._id, + self._collection.codec_options, read_pref, - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, False, - self.__comment, + self._comment, ) ) else: # Cursor id is zero nothing else to return - self.__die(True) - - return len(self.__data) + self._die_lock() - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - Even if :attr:`alive` is ``True``, :meth:`next` can raise - :exc:`StopIteration`. Best to use a for loop:: - - for doc in collection.aggregate(pipeline): - print(doc) - - .. note:: :attr:`alive` can be True while iterating a cursor from - a failed server. In this case :attr:`alive` will return False after - :meth:`next` fails to retrieve the next batch of results from the - server. - """ - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self) -> int: - """Returns the id of the cursor.""" - return self.__id - - @property - def address(self) -> Optional[_Address]: - """The (host, port) of the server used, or None. - - .. versionadded:: 3.0 - """ - return self.__address - - @property - def session(self) -> Optional[ClientSession]: - """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. - - .. versionadded:: 3.6 - """ - if self.__explicit_session: - return self.__session - return None + return len(self._data) def __iter__(self) -> Iterator[_DocumentType]: return self @@ -312,17 +334,33 @@ def next(self) -> _DocumentType: raise StopIteration - __next__ = next + def __next__(self) -> _DocumentType: + return self.next() def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: """Advance the cursor blocking for at most one getMore command.""" - if not len(self.__data) and not self.__killed and get_more_allowed: + if not len(self._data) and not self._killed and get_more_allowed: self._refresh() - if len(self.__data): - return self.__data.popleft() + if len(self._data): + return self._data.popleft() else: return None + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + """Get all or some available documents from the cursor.""" + if not len(self._data) and not self._killed: + self._refresh() + if len(self._data): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + def try_next(self) -> Optional[_DocumentType]: """Advance the cursor without blocking indefinitely. @@ -347,8 +385,36 @@ def __enter__(self) -> CommandCursor[_DocumentType]: def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. + + To use:: + + >>> cursor.to_list() + + Or, so read at most n items from the cursor:: + + >>> cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + -class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): +class RawBatchCommandCursor(CommandCursor[_DocumentType]): _getmore_class = _RawBatchGetMore def __init__( @@ -398,4 +464,4 @@ def _unpack_response( # type: ignore[override] return raw_response # type: ignore[return-value] def __getitem__(self, index: int) -> NoReturn: - raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") + raise InvalidOperation("Cannot call __getitem__ on RawBatchCommandCursor") From 1cbcf14e0d29e120c5a1ff0b427018b96eb5f9c5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:04 -0500 Subject: [PATCH 1466/2111] PYTHON-4747 Rename pymongo/mongo_client.py to pymongo/synchronous/mongo_client.py --- pymongo/{ => synchronous}/mongo_client.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/mongo_client.py (100%) diff --git a/pymongo/mongo_client.py b/pymongo/synchronous/mongo_client.py similarity index 100% rename from pymongo/mongo_client.py rename to pymongo/synchronous/mongo_client.py From 8bafdb04e4ba6089b0b7a5e4bbcf72ad8df93d58 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:04 -0500 Subject: [PATCH 1467/2111] PYTHON-4747 Sync mongo_client.py to master --- pymongo/mongo_client.py | 22 + pymongo/synchronous/mongo_client.py | 1399 +++++++++++++++------------ 2 files changed, 827 insertions(+), 594 deletions(-) create mode 100644 pymongo/mongo_client.py diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py new file mode 100644 index 0000000000..a815cbc8a9 --- /dev/null +++ b/pymongo/mongo_client.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous MongoClient API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.mongo_client import * # noqa: F403 +from pymongo.synchronous.mongo_client import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["MongoClient"] # noqa: F405 diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index f2076b0877..cec78463b3 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -34,6 +34,7 @@ import contextlib import os +import warnings import weakref from collections import defaultdict from typing import ( @@ -42,8 +43,8 @@ Callable, ContextManager, FrozenSet, + Generator, Generic, - Iterator, Mapping, MutableMapping, NoReturn, @@ -57,23 +58,12 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.timestamp import Timestamp -from pymongo import ( - _csot, - client_session, - common, - database, - helpers, - message, - periodic_executor, - uri_parser, -) -from pymongo.change_stream import ChangeStream, ClusterChangeStream +from pymongo import _csot, common, helpers_shared, uri_parser from pymongo.client_options import ClientOptions -from pymongo.client_session import _EmptyServerSession -from pymongo.command_cursor import CommandCursor from pymongo.errors import ( AutoReconnect, BulkWriteError, + ClientBulkWriteException, ConfigurationError, ConnectionFailure, InvalidOperation, @@ -86,13 +76,28 @@ ) from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks from pymongo.logger import _CLIENT_LOGGER, _log_or_warn +from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason -from pymongo.operations import _Op +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext +from pymongo.synchronous import client_session, database, periodic_executor +from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream +from pymongo.synchronous.client_bulk import _ClientBulk +from pymongo.synchronous.client_session import _EmptyServerSession +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription from pymongo.typings import ( ClusterTime, @@ -111,44 +116,40 @@ from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern if TYPE_CHECKING: - import sys from types import TracebackType from bson.objectid import ObjectId - from pymongo.bulk import _Bulk - from pymongo.client_session import ClientSession, _ServerSession - from pymongo.cursor import _ConnectionManager - from pymongo.database import Database - from pymongo.message import _CursorAddress, _GetMore, _Query - from pymongo.pool import Connection from pymongo.read_concern import ReadConcern from pymongo.response import Response - from pymongo.server import Server from pymongo.server_selectors import Selection + from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_session import ClientSession, _ServerSession + from pymongo.synchronous.cursor import _ConnectionManager + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server - if sys.version_info[:2] >= (3, 9): - from collections.abc import Generator - else: - # Deprecated since version 3.9: collections.abc.Generator now supports []. - from typing import Generator T = TypeVar("T") _WriteCall = Callable[[Optional["ClientSession"], "Connection", bool], T] -_ReadCall = Callable[[Optional["ClientSession"], "Server", "Connection", _ServerMode], T] +_ReadCall = Callable[ + [Optional["ClientSession"], "Server", "Connection", _ServerMode], + T, +] +_IS_SYNC = True -class MongoClient(common.BaseObject, Generic[_DocumentType]): - """ - A client-side representation of a MongoDB cluster. +_WriteOp = Union[ + InsertOne, + DeleteOne, + DeleteMany, + ReplaceOne, + UpdateOne, + UpdateMany, +] - Instances can represent either a standalone MongoDB server, a replica - set, or a sharded cluster. Instances of this class are responsible for - maintaining up-to-date state of the cluster, and possibly cache - resources related to this, including background threads for monitoring, - and connection pools. - """ +class MongoClient(common.BaseObject, Generic[_DocumentType]): HOST = "localhost" PORT = 27017 # Define order to retrieve options from ClientOptions for __repr__. @@ -261,9 +262,10 @@ def __init__( :class:`~datetime.datetime` instances returned as values in a document by this :class:`MongoClient` will be timezone aware (otherwise they will be naive) - :param connect: if ``True`` (the default), immediately + :param connect: If ``True`` (the default), immediately begin connecting to MongoDB in the background. Otherwise connect - on the first operation. + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment. :param type_registry: instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. @@ -719,9 +721,13 @@ def __init__( .. versionchanged:: 4.7 Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. """ doc_class = document_class or dict - self.__init_kwargs: dict[str, Any] = { + self._init_kwargs: dict[str, Any] = { "host": host, "port": port, "document_class": doc_class, @@ -802,7 +808,10 @@ def __init__( if tz_aware is None: tz_aware = opts.get("tz_aware", False) if connect is None: - connect = opts.get("connect", True) + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) keyword_opts["tz_aware"] = tz_aware keyword_opts["connect"] = connect @@ -829,11 +838,11 @@ def __init__( # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self.__options = options = ClientOptions(username, password, dbase, opts) + self._options = options = ClientOptions(username, password, dbase, opts, _IS_SYNC) - self.__default_database_name = dbase - self.__lock = _create_lock() - self.__kill_cursors_queue: list = [] + self._default_database_name = dbase + self._lock = _create_lock() + self._kill_cursors_queue: list = [] self._event_listeners = options.pool_options._event_listeners super().__init__( @@ -862,23 +871,29 @@ def __init__( server_monitoring_mode=options.server_monitoring_mode, ) + self._opened = False + self._closed = False self._init_background() - if connect: - self._get_topology() + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] self._encrypter = None - if self.__options.auto_encryption_opts: - from pymongo.encryption import _Encrypter + if self._options.auto_encryption_opts: + from pymongo.synchronous.encryption import _Encrypter - self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) - self._timeout = self.__options.timeout + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout if _HAS_REGISTER_AT_FORK: # Add this client to the list of weakly referenced items. # This will be used later if we fork. MongoClient._clients[self._topology._topology_id] = self + def _connect(self) -> None: + """Explicitly connect to MongoDB synchronously instead of on the first operation.""" + self._get_topology() + def _init_background(self, old_pid: Optional[int] = None) -> None: self._topology = Topology(self._topology_settings) # Seed the topology with the old one's pid so we can detect clients @@ -903,31 +918,22 @@ def target() -> bool: # this closure. When the client is freed, stop the executor soon. self_ref: Any = weakref.ref(self, executor.close) self._kill_cursors_executor = executor + self._opened = False + + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) def _after_fork(self) -> None: """Resets topology in a child after successfully forking.""" self._init_background(self._topology._pid) + # Reset the session pool to avoid duplicate sessions in the child process. + self._topology._session_pool.reset() def _duplicate(self, **kwargs: Any) -> MongoClient: - args = self.__init_kwargs.copy() + args = self._init_kwargs.copy() args.update(kwargs) return MongoClient(**args) - def _server_property(self, attr_name: str) -> Any: - """An attribute of the current server's description. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - Not threadsafe if used multiple times in a single method, since - the server may change. In such cases, store a local reference to a - ServerDescription first, then use its properties. - """ - server = self._get_topology().select_server(writable_server_selector, _Op.TEST) - - return getattr(server.description, attr_name) - def watch( self, pipeline: Optional[_Pipeline] = None, @@ -1040,7 +1046,7 @@ def watch( .. _change streams specification: https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md """ - return ClusterChangeStream( + change_stream = ClusterChangeStream( self.admin, pipeline, full_document, @@ -1056,6 +1062,9 @@ def watch( show_expanded_events=show_expanded_events, ) + change_stream._initialize_cursor() + return change_stream + @property def topology_description(self) -> TopologyDescription: """The description of the connected MongoDB deployment. @@ -1077,93 +1086,6 @@ def topology_description(self) -> TopologyDescription: """ return self._topology.description - @property - def address(self) -> Optional[tuple[str, int]]: - """(host, port) of the current standalone, primary, or mongos, or None. - - Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if - the client is load-balancing among mongoses, since there is no single - address. Use :attr:`nodes` instead. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - .. versionadded:: 3.0 - """ - topology_type = self._topology._description.topology_type - if ( - topology_type == TOPOLOGY_TYPE.Sharded - and len(self.topology_description.server_descriptions()) > 1 - ): - raise InvalidOperation( - 'Cannot use "address" property when load balancing among' - ' mongoses, use "nodes" instead.' - ) - if topology_type not in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded, - ): - return None - return self._server_property("address") - - @property - def primary(self) -> Optional[tuple[str, int]]: - """The (host, port) of the current primary of the replica set. - - Returns ``None`` if this client is not connected to a replica set, - there is no primary, or this client was created without the - `replicaSet` option. - - .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. - """ - return self._topology.get_primary() # type: ignore[return-value] - - @property - def secondaries(self) -> set[_Address]: - """The secondary members known to this client. - - A sequence of (host, port) pairs. Empty if this client is not - connected to a replica set, there are no visible secondaries, or this - client was created without the `replicaSet` option. - - .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. - """ - return self._topology.get_secondaries() - - @property - def arbiters(self) -> set[_Address]: - """Arbiters in the replica set. - - A sequence of (host, port) pairs. Empty if this client is not - connected to a replica set, there are no arbiters, or this client was - created without the `replicaSet` option. - """ - return self._topology.get_arbiters() - - @property - def is_primary(self) -> bool: - """If this client is connected to a server that can accept writes. - - True if the current server is a standalone, mongos, or the primary of - a replica set. If the client is not connected, this will block until a - connection is established or raise ServerSelectionTimeoutError if no - server is available. - """ - return self._server_property("is_writable") - - @property - def is_mongos(self) -> bool: - """If this client is connected to mongos. If the client is not - connected, this will block until a connection is established or raise - ServerSelectionTimeoutError if no server is available. - """ - return self._server_property("server_type") == SERVER_TYPE.Mongos - @property def nodes(self) -> FrozenSet[_Address]: """Set of all currently connected servers. @@ -1186,118 +1108,530 @@ def options(self) -> ClientOptions: .. versionadded:: 4.0 """ - return self.__options + return self._options - def _end_sessions(self, session_ids: list[_ServerSession]) -> None: - """Send endSessions command(s) with the given session ids.""" - try: - # Use Connection.command directly to avoid implicitly creating - # another session. - with self._conn_for_reads( - ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS - ) as ( - conn, - read_pref, - ): - if not conn.supports_sessions: - return + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self._topology == other._topology + return NotImplemented - for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} - conn.command("admin", spec, read_preference=read_pref, client=self) - except PyMongoError: - # Drivers MUST ignore any errors returned by the endSessions - # command. - pass + def __ne__(self, other: Any) -> bool: + return not self == other - def close(self) -> None: - """Cleanup client resources and disconnect from MongoDB. + def __hash__(self) -> int: + return hash(self._topology) - End all server sessions created by this client by sending one or more - endSessions commands. + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" - Close all sockets in the connection pools and stop the monitor threads. + return f"{option}={value!r}" - .. versionchanged:: 4.0 - Once closed, the client cannot be used again and any attempt will - raise :exc:`~pymongo.errors.InvalidOperation`. + # Host first... + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) - .. versionchanged:: 3.6 - End all server sessions created by this client. - """ - session_ids = self._topology.pop_all_sessions() - if session_ids: - self._end_sessions(session_ids) - # Stop the periodic task thread and then send pending killCursor - # requests before closing the topology. - self._kill_cursors_executor.close() - self._process_kill_cursors() - self._topology.close() - if self._encrypter: - # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. - self._encrypter.close() + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" - def _get_topology(self) -> Topology: - """Get the internal :class:`~pymongo.topology.Topology` object. + def __getattr__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. - If this client was created with "connect=False", calling _get_topology - launches the connection process in the background. + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get """ - self._topology.open() - with self.__lock: - self._kill_cursors_executor.open() - return self._topology + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) - @contextlib.contextmanager - def _checkout(self, server: Server, session: Optional[ClientSession]) -> Iterator[Connection]: - in_txn = session and session.in_transaction - with _MongoClientErrorHandler(self, server, session) as err_handler: - # Reuse the pinned connection, if it exists. - if in_txn and session and session._pinned_connection: - err_handler.contribute_socket(session._pinned_connection) - yield session._pinned_connection - return - with server.checkout(handler=err_handler) as conn: - # Pin this session to the selected server or connection. - if ( - in_txn - and session - and server.description.server_type - in ( - SERVER_TYPE.Mongos, - SERVER_TYPE.LoadBalancer, - ) - ): - session._pin(server, conn) - err_handler.contribute_socket(conn) - if ( - self._encrypter - and not self._encrypter._bypass_auto_encryption - and conn.max_wire_version < 8 - ): - raise ConfigurationError( - "Auto-encryption requires a minimum MongoDB version of 4.2" - ) - yield conn + def __getitem__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. - def _select_server( - self, - server_selector: Callable[[Selection], Selection], - session: Optional[ClientSession], - operation: str, - address: Optional[_Address] = None, - deprioritized_servers: Optional[list[Server]] = None, - operation_id: Optional[int] = None, - ) -> Server: - """Select a server to run an operation on this client. + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. - :param server_selector: The server selector to use if the session is - not pinned and no address is given. - :param session: The ClientSession for the next operation, or None. May - be pinned to a mongos server address. - :param operation: The name of the operation that the server is being selected for. - :param address: Address when sending a message - to a specific server, used for getMore. + :param name: the name of the database to get + """ + return database.Database(self, name) + + def __del__(self) -> None: + """Check that this MongoClient has been closed and issue a warning if not.""" + try: + if not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + source=self, + ) + except AttributeError: + pass + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.ClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.client_session.SessionOptions`. See the + :mod:`~pymongo.client_session` module for details and examples. + + A :class:`~pymongo.client_session.ClientSession` may only be used with + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.client_session.ClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[ClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = MongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get a :class:`~pymongo.database.Database` with the given name and + options. + + Useful for creating a :class:`~pymongo.database.Database` with + different codec options, read preference, and/or write concern from + this :class:`MongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.Database: + """Get a Database instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + def __enter__(self) -> MongoClient[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'MongoClient' object is not iterable") + + next = __next__ + + def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = (self._get_topology()).select_server(writable_server_selector, _Op.TEST) + + return getattr(server.description, attr_name) + + @property + def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.Single, + TOPOLOGY_TYPE.LoadBalanced, + TOPOLOGY_TYPE.Sharded, + ): + return None + return self._server_property("address") + + @property + def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + return self._topology.get_primary() # type: ignore[return-value] + + @property + def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + return self._topology.get_secondaries() + + @property + def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + return self._topology.get_arbiters() + + @property + def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return self._server_property("is_writable") + + @property + def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return self._server_property("server_type") == SERVER_TYPE.Mongos + + def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use Connection.command directly to avoid implicitly creating + # another session. + with self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + session_ids = self._topology.pop_all_sessions() + if session_ids: + self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + self._process_kill_cursors() + self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + self._encrypter.close() + self._closed = True + + if not _IS_SYNC: + # Add support for contextlib.closing. + close = close + + def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + if not self._opened: + self._topology.open() + with self._lock: + self._kill_cursors_executor.open() + self._opened = True + return self._topology + + @contextlib.contextmanager + def _checkout( + self, server: Server, session: Optional[ClientSession] + ) -> Generator[Connection, None]: + in_txn = session and session.in_transaction + with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + with server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The ClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. """ try: topology = self._get_topology() @@ -1336,7 +1670,7 @@ def _conn_for_writes( @contextlib.contextmanager def _conn_from_server( self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] - ) -> Iterator[tuple[Connection, _ServerMode]]: + ) -> Generator[tuple[Connection, _ServerMode], None]: assert read_preference is not None, "read_preference must not be None" # Get a connection for a server matching the read preference, and yield # conn with the effective read preference. The Server Selection @@ -1344,9 +1678,9 @@ def _conn_from_server( # always send primaryPreferred when directly connected to a repl set # member. # Thread safe: if the type is single it cannot change. - topology = self._get_topology() - single = topology.description.topology_type == TOPOLOGY_TYPE.Single - + # NOTE: We already opened the Topology when selecting a server so there's no need + # to call _get_topology() again. + single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single with self._checkout(server, session) as conn: if single: if conn.is_repl and not (session and session.in_transaction): @@ -1365,13 +1699,9 @@ def _conn_for_reads( operation: str, ) -> ContextManager[tuple[Connection, _ServerMode]]: assert read_preference is not None, "read_preference must not be None" - _ = self._get_topology() server = self._select_server(read_preference, session, operation) return self._conn_from_server(read_preference, server, session) - def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: - return self.__options.load_balanced and not (session and session.in_transaction) - @_csot.apply def _run_operation( self, @@ -1389,13 +1719,13 @@ def _run_operation( if operation.conn_mgr: server = self._select_server( operation.read_preference, - operation.session, + operation.session, # type: ignore[arg-type] operation.name, address=address, ) - with operation.conn_mgr.lock: - with _MongoClientErrorHandler(self, server, operation.session) as err_handler: + with operation.conn_mgr._alock: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] err_handler.contribute_socket(operation.conn_mgr.conn) return server.run_operation( operation.conn_mgr.conn, @@ -1425,9 +1755,9 @@ def _cmd( return self._retryable_read( _cmd, operation.read_preference, - operation.session, + operation.session, # type: ignore[arg-type] address=address, - retryable=isinstance(operation, message._Query), + retryable=isinstance(operation, _Query), operation=operation.name, ) @@ -1436,7 +1766,7 @@ def _retry_with_session( retryable: bool, func: _WriteCall[T], session: Optional[ClientSession], - bulk: Optional[_Bulk], + bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, operation_id: Optional[int] = None, ) -> T: @@ -1466,7 +1796,7 @@ def _retry_internal( self, func: _WriteCall[T] | _ReadCall[T], session: Optional[ClientSession], - bulk: Optional[_Bulk], + bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, is_read: bool = False, address: Optional[_Address] = None, @@ -1549,7 +1879,7 @@ def _retryable_write( func: _WriteCall[T], session: Optional[ClientSession], operation: str, - bulk: Optional[_Bulk] = None, + bulk: Optional[Union[_Bulk, _ClientBulk]] = None, operation_id: Optional[int] = None, ) -> T: """Execute an operation with consecutive retries if possible @@ -1568,127 +1898,63 @@ def _retryable_write( with self._tmp_session(session) as s: return self._retry_with_session(retryable, func, s, bulk, operation, operation_id) - def __eq__(self, other: Any) -> bool: - if isinstance(other, self.__class__): - return self._topology == other._topology - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash(self._topology) - - def _repr_helper(self) -> str: - def option_repr(option: str, value: Any) -> str: - """Fix options whose __repr__ isn't usable in a constructor.""" - if option == "document_class": - if value is dict: - return "document_class=dict" - else: - return f"document_class={value.__module__}.{value.__name__}" - if option in common.TIMEOUT_OPTIONS and value is not None: - return f"{option}={int(value * 1000)}" - - return f"{option}={value!r}" - - # Host first... - options = [ - "host=%r" - % [ - "%s:%d" % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds - ] - ] - # ... then everything in self._constructor_args... - options.extend( - option_repr(key, self.__options._options[key]) for key in self._constructor_args - ) - # ... then everything else. - options.extend( - option_repr(key, self.__options._options[key]) - for key in self.__options._options - if key not in set(self._constructor_args) and key != "username" and key != "password" - ) - return ", ".join(options) - - def __repr__(self) -> str: - return f"MongoClient({self._repr_helper()})" - - def __getattr__(self, name: str) -> database.Database[_DocumentType]: - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :param name: the name of the database to get - """ - if name.startswith("_"): - raise AttributeError( - f"MongoClient has no attribute {name!r}. To access the {name}" - f" database, use client[{name!r}]." - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> database.Database[_DocumentType]: - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. + def _cleanup_cursor_no_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + explicit_session: bool, + ) -> None: + """Cleanup a cursor from __del__ without locking. - :param name: the name of the database to get + This method handles cleanup for Cursors/CommandCursors including any + pinned connection attached at the time the cursor + was garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. """ - return database.Database(self, name) + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and not explicit_session: + session._end_implicit_session() - def _cleanup_cursor( + def _cleanup_cursor_lock( self, - locks_allowed: bool, cursor_id: int, address: Optional[_CursorAddress], conn_mgr: _ConnectionManager, session: Optional[ClientSession], explicit_session: bool, ) -> None: - """Cleanup a cursor from cursor.close() or __del__. + """Cleanup a cursor from cursor.close() using a lock. This method handles cleanup for Cursors/CommandCursors including any pinned connection or implicit session attached at the time the cursor was closed or garbage collected. - :param locks_allowed: True if we are allowed to acquire locks. :param cursor_id: The cursor id which may be 0. :param address: The _CursorAddress. :param conn_mgr: The _ConnectionManager for the pinned connection or None. :param session: The cursor's session. :param explicit_session: True if the session was passed explicitly. """ - if locks_allowed: - if cursor_id: - if conn_mgr and conn_mgr.more_to_come: - # If this is an exhaust cursor and we haven't completely - # exhausted the result set we *must* close the socket - # to stop the server from sending more data. - assert conn_mgr.conn is not None - conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) - else: - self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) - if conn_mgr: - conn_mgr.close() - else: - # The cursor will be closed later in a different session. - if cursor_id or conn_mgr: - self._close_cursor_soon(cursor_id, address, conn_mgr) + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + conn_mgr.close() if session and not explicit_session: - session._end_session(lock=locks_allowed) - - def _close_cursor_soon( - self, - cursor_id: int, - address: Optional[_CursorAddress], - conn_mgr: Optional[_ConnectionManager] = None, - ) -> None: - """Request that a cursor and/or connection be cleaned up soon.""" - self.__kill_cursors_queue.append((address, cursor_id, conn_mgr)) + session._end_implicit_session() def _close_cursor_now( self, @@ -1706,7 +1972,7 @@ def _close_cursor_now( try: if conn_mgr: - with conn_mgr.lock: + with conn_mgr._alock: # Cursor is pinned to LB outside of a transaction. assert address is not None assert conn_mgr.conn is not None @@ -1757,7 +2023,7 @@ def _process_kill_cursors(self) -> None: # Other threads or the GC may append to the queue concurrently. while True: try: - address, cursor_id, conn_mgr = self.__kill_cursors_queue.pop() + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() except IndexError: break @@ -1768,14 +2034,14 @@ def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: - self._cleanup_cursor(True, cursor_id, address, conn_mgr, None, False) + self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it # can be caught in _process_periodic_tasks raise else: - helpers._handle_exception() + helpers_shared._handle_exception() # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: @@ -1787,7 +2053,7 @@ def _process_kill_cursors(self) -> None: if isinstance(exc, InvalidOperation) and self._topology._closed: raise else: - helpers._handle_exception() + helpers_shared._handle_exception() # This method is run periodically by a background thread. def _process_periodic_tasks(self) -> None: @@ -1801,62 +2067,15 @@ def _process_periodic_tasks(self) -> None: if isinstance(exc, InvalidOperation) and self._topology._closed: return else: - helpers._handle_exception() - - def __start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: - server_session = _EmptyServerSession() - opts = client_session.SessionOptions(**kwargs) - return client_session.ClientSession(self, server_session, opts, implicit) - - def start_session( - self, - causal_consistency: Optional[bool] = None, - default_transaction_options: Optional[client_session.TransactionOptions] = None, - snapshot: Optional[bool] = False, - ) -> client_session.ClientSession: - """Start a logical session. - - This method takes the same parameters as - :class:`~pymongo.client_session.SessionOptions`. See the - :mod:`~pymongo.client_session` module for details and examples. - - A :class:`~pymongo.client_session.ClientSession` may only be used with - the MongoClient that started it. :class:`ClientSession` instances are - **not thread-safe or fork-safe**. They can only be used by one thread - or process at a time. A single :class:`ClientSession` cannot be used - to run multiple operations concurrently. - - :return: An instance of :class:`~pymongo.client_session.ClientSession`. - - .. versionadded:: 3.6 - """ - return self.__start_session( - False, - causal_consistency=causal_consistency, - default_transaction_options=default_transaction_options, - snapshot=snapshot, - ) + helpers_shared._handle_exception() def _return_server_session( - self, server_session: Union[_ServerSession, _EmptyServerSession], lock: bool + self, server_session: Union[_ServerSession, _EmptyServerSession] ) -> None: """Internal: return a _ServerSession to the pool.""" if isinstance(server_session, _EmptyServerSession): return None - return self._topology.return_server_session(server_session, lock) - - def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: - """If provided session is None, lend a temporary session.""" - if session: - return session - - try: - # Don't make implicit sessions causally consistent. Applications - # should always opt-in. - return self.__start_session(True, causal_consistency=False) - except (ConfigurationError, InvalidOperation): - # Sessions not supported. - return None + return self._topology.return_server_session(server_session) @contextlib.contextmanager def _tmp_session( @@ -1888,21 +2107,6 @@ def _tmp_session( else: yield None - def _send_cluster_time( - self, command: MutableMapping[str, Any], session: Optional[ClientSession] - ) -> None: - topology_time = self._topology.max_cluster_time() - session_time = session.cluster_time if session else None - if topology_time and session_time: - if topology_time["clusterTime"] > session_time["clusterTime"]: - cluster_time: Optional[ClusterTime] = topology_time - else: - cluster_time = session_time - else: - cluster_time = topology_time or session_time - if cluster_time: - command["$clusterTime"] = cluster_time - def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSession]) -> None: self._topology.receive_cluster_time(reply.get("$clusterTime")) if session is not None: @@ -1924,6 +2128,26 @@ def server_info(self, session: Optional[client_session.ClientSession] = None) -> ), ) + def _list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) + def list_databases( self, session: Optional[client_session.ClientSession] = None, @@ -1947,19 +2171,7 @@ def list_databases( .. versionadded:: 3.6 """ - cmd = {"listDatabases": 1} - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - admin = self._database_default_options("admin") - res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) - # listDatabases doesn't return a cursor (yet). Fake one. - cursor = { - "id": 0, - "firstBatch": res["databases"], - "ns": "admin.$cmd", - } - return CommandCursor(admin["$cmd"], cursor, None, comment=comment) + return self._list_databases(session, comment, **kwargs) def list_database_names( self, @@ -1978,7 +2190,8 @@ def list_database_names( .. versionadded:: 3.6 """ - return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] + res = self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] for doc in res] @_csot.apply def drop_database( @@ -2031,152 +2244,136 @@ def drop_database( session=session, ) - def get_default_database( + @_csot.apply + def bulk_write( self, - default: Optional[str] = None, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, + models: Sequence[_WriteOp[_DocumentType]], + session: Optional[ClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping] = None, write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> database.Database[_DocumentType]: - """Get the database named in the MongoDB connection URI. - - >>> uri = 'mongodb://host/my_database' - >>> client = MongoClient(uri) - >>> db = client.get_default_database() - >>> assert db.name == 'my_database' - >>> db = client.get_database() - >>> assert db.name == 'my_database' - - Useful in scripts where you want to choose which database to use - based only on the URI in a configuration file. - - :param default: the database name to use if no database name - was provided in the URI. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is - used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is - used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is - used. - :param comment: A user-provided comment to attach to this + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_count + 1 + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.client_session.ClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.8 - Undeprecated. Added the ``default``, ``codec_options``, - ``read_preference``, ``write_concern`` and ``read_concern`` - parameters. - - .. versionchanged:: 3.5 - Deprecated, use :meth:`get_database` instead. - """ - if self.__default_database_name is None and default is None: - raise ConfigurationError("No default database name defined or provided.") - - name = cast(str, self.__default_database_name or default) - return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern - ) - - def get_database( - self, - name: Optional[str] = None, - codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional[ReadConcern] = None, - ) -> database.Database[_DocumentType]: - """Get a :class:`~pymongo.database.Database` with the given name and - options. + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. - Useful for creating a :class:`~pymongo.database.Database` with - different codec options, read preference, and/or write concern from - this :class:`MongoClient`. + .. seealso:: For more info, see :doc:`/examples/client_bulk`. - >>> client.read_preference - Primary() - >>> db1 = client.test - >>> db1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> db2 = client.get_database( - ... 'test', read_preference=ReadPreference.SECONDARY) - >>> db2.read_preference - Secondary(tag_sets=None) + .. seealso:: :ref:`writes-and-ids` - :param name: The name of the database - a string. If ``None`` - (the default) the database named in the MongoDB connection URI is - returned. - :param codec_options: An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is - used. - :param read_preference: The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. - :param write_concern: An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is - used. - :param read_concern: An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is - used. + .. note:: requires MongoDB server version 8.0+. - .. versionchanged:: 3.5 - The `name` parameter is now optional, defaulting to the database - named in the MongoDB connection URI. + .. versionadded:: 4.9 """ - if name is None: - if self.__default_database_name is None: - raise ConfigurationError("No default database defined") - name = self.__default_database_name - - return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern - ) + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) - def _database_default_options(self, name: str) -> Database: - """Get a Database instance with the default settings.""" - return self.get_database( - name, - codec_options=DEFAULT_CODEC_OPTIONS, - read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN, + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + common.validate_list("models", models) + + blk = _ClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None - def __enter__(self) -> MongoClient[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'MongoClient' object is not iterable") - - next = __next__ + return blk.execute(session, _Op.BULK_WRITE) def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: """Return the server response from PyMongo exception or None.""" - if isinstance(exc, BulkWriteError): + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): # Check the last writeConcernError to determine if this # BulkWriteError is retryable. wces = exc.details["writeConcernErrors"] @@ -2206,15 +2403,19 @@ def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mong # Do not consult writeConcernError for pre-4.4 mongos. if isinstance(exc, WriteConcernError) and is_mongos: pass - elif code in helpers._RETRYABLE_ERROR_CODES: + elif code in helpers_shared._RETRYABLE_ERROR_CODES: exc._add_error_label("RetryableWriteError") # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance( - exc, (NotPrimaryError, WaitQueueTimeoutError) + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) ): - exc._add_error_label("RetryableWriteError") + exc_to_check._add_error_label("RetryableWriteError") class _MongoClientErrorHandler: @@ -2232,6 +2433,9 @@ class _MongoClientErrorHandler: ) def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): + if not isinstance(client, MongoClient): + raise TypeError(f"MongoClient required but given {type(client)}") + self.client = client self.server_address = server.description.address self.session = session @@ -2259,6 +2463,8 @@ def handle( return self.handled = True if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error if isinstance(exc_val, ConnectionFailure): if self.session.in_transaction: exc_val._add_error_label("TransientTransactionError") @@ -2270,7 +2476,7 @@ def handle( ): self.session._unpin() err_ctx = _ErrorContext( - exc_val, + exc_val, # type: ignore[arg-type] self.max_wire_version, self.sock_generation, self.completed_handshake, @@ -2297,7 +2503,7 @@ def __init__( self, mongo_client: MongoClient, func: _WriteCall[T] | _ReadCall[T], - bulk: Optional[_Bulk], + bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, is_read: bool = False, session: Optional[ClientSession] = None, @@ -2362,7 +2568,7 @@ def run(self) -> T: exc_code = getattr(exc, "code", None) if self._is_not_eligible_for_retry() or ( isinstance(exc, OperationFailure) - and exc_code not in helpers._RETRYABLE_ERROR_CODES + and exc_code not in helpers_shared._RETRYABLE_ERROR_CODES ): raise self._retrying = True @@ -2374,7 +2580,12 @@ def run(self) -> T: if not self._is_read: if not self._retryable: raise - retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") if retryable_write_error_exc: assert self._session self._session._unpin() From 42e48eaa2a72fd4b8e12a642bee2c4af2aca4e5f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:04 -0500 Subject: [PATCH 1468/2111] PYTHON-4747 Rename pymongo/change_stream.py to pymongo/synchronous/change_stream.py --- pymongo/{ => synchronous}/change_stream.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/change_stream.py (100%) diff --git a/pymongo/change_stream.py b/pymongo/synchronous/change_stream.py similarity index 100% rename from pymongo/change_stream.py rename to pymongo/synchronous/change_stream.py From 96faacb578400a40c24c6334fca21397bd0bbd41 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:05 -0500 Subject: [PATCH 1469/2111] PYTHON-4747 Sync change_stream.py to master --- pymongo/change_stream.py | 22 +++++++++++++++++++ pymongo/synchronous/change_stream.py | 32 +++++++++++++++++----------- 2 files changed, 41 insertions(+), 13 deletions(-) create mode 100644 pymongo/change_stream.py diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py new file mode 100644 index 0000000000..b96a1750cf --- /dev/null +++ b/pymongo/change_stream.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous ChangeStream API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.change_stream import * # noqa: F403 +from pymongo.synchronous.change_stream import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["ChangeStream", "ClusterChangeStream", "CollectionChangeStream", "DatabaseChangeStream"] # noqa: F405 diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py index dc2f6bf2c5..a971ad08c0 100644 --- a/pymongo/synchronous/change_stream.py +++ b/pymongo/synchronous/change_stream.py @@ -22,13 +22,7 @@ from bson.raw_bson import RawBSONDocument from bson.timestamp import Timestamp from pymongo import _csot, common -from pymongo.aggregation import ( - _AggregationCommand, - _CollectionAggregationCommand, - _DatabaseAggregationCommand, -) from pymongo.collation import validate_collation_or_none -from pymongo.command_cursor import CommandCursor from pymongo.errors import ( ConnectionFailure, CursorNotFound, @@ -37,8 +31,16 @@ PyMongoError, ) from pymongo.operations import _Op +from pymongo.synchronous.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) +from pymongo.synchronous.command_cursor import CommandCursor from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +_IS_SYNC = True + # The change streams spec considers the following server errors from the # getMore command non-resumable. All other getMore errors are resumable. _RESUMABLE_GETMORE_ERRORS = frozenset( @@ -65,11 +67,11 @@ if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.database import Database - from pymongo.mongo_client import MongoClient - from pymongo.pool import Connection + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.database import Database + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection def _resumable(exc: PyMongoError) -> bool: @@ -100,7 +102,9 @@ class ChangeStream(Generic[_DocumentType]): def __init__( self, target: Union[ - MongoClient[_DocumentType], Database[_DocumentType], Collection[_DocumentType] + MongoClient[_DocumentType], + Database[_DocumentType], + Collection[_DocumentType], ], pipeline: Optional[_Pipeline], full_document: Optional[str], @@ -149,6 +153,8 @@ def __init__( self._closed = False self._timeout = self._target._timeout self._show_expanded_events = show_expanded_events + + def _initialize_cursor(self) -> None: # Initialize cursor. self._cursor = self._create_cursor() @@ -180,7 +186,7 @@ def _change_stream_options(self) -> dict[str, Any]: else: options["resumeAfter"] = resume_token - if self._start_at_operation_time is not None: + elif self._start_at_operation_time is not None: options["startAtOperationTime"] = self._start_at_operation_time if self._show_expanded_events: From 489b2f8a1a3841e32ae3eba41a42b6e4bf505024 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:05 -0500 Subject: [PATCH 1470/2111] PYTHON-4747 Rename pymongo/cursor.py to pymongo/synchronous/cursor.py --- pymongo/{ => synchronous}/cursor.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/cursor.py (100%) diff --git a/pymongo/cursor.py b/pymongo/synchronous/cursor.py similarity index 100% rename from pymongo/cursor.py rename to pymongo/synchronous/cursor.py From 731bf9bbc05c623e7b19464e04e4345e780944d5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:05 -0500 Subject: [PATCH 1471/2111] PYTHON-4747 Sync cursor.py to master --- pymongo/cursor.py | 23 + pymongo/synchronous/cursor.py | 1046 +++++++++++++++++---------------- 2 files changed, 548 insertions(+), 521 deletions(-) create mode 100644 pymongo/cursor.py diff --git a/pymongo/cursor.py b/pymongo/cursor.py new file mode 100644 index 0000000000..869adddc37 --- /dev/null +++ b/pymongo/cursor.py @@ -0,0 +1,23 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Cursor API for compatibility.""" +from __future__ import annotations + +from pymongo.cursor_shared import * # noqa: F403 +from pymongo.synchronous.cursor import * # noqa: F403 +from pymongo.synchronous.cursor import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["Cursor", "CursorType", "RawBatchCursor"] # noqa: F405 diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 3151fcaf3d..c352b64098 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -28,7 +28,6 @@ NoReturn, Optional, Sequence, - Tuple, Union, cast, overload, @@ -37,12 +36,13 @@ from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON -from pymongo import helpers +from pymongo import helpers_shared from pymongo.collation import validate_collation_or_none from pymongo.common import ( validate_is_document_type, validate_is_mapping, ) +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure from pymongo.lock import _create_lock from pymongo.message import ( @@ -55,6 +55,7 @@ _RawBatchQuery, ) from pymongo.response import PinnedResponse +from pymongo.synchronous.helpers import next from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType from pymongo.write_concern import validate_boolean @@ -62,80 +63,12 @@ from _typeshed import SupportsItems from bson.codec_options import CodecOptions - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.pool import Connection from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection - -# These errors mean that the server has already killed the cursor so there is -# no need to send killCursors. -_CURSOR_CLOSED_ERRORS = frozenset( - [ - 43, # CursorNotFound - 175, # QueryPlanKilled - 237, # CursorKilled - # On a tailable cursor, the following errors mean the capped collection - # rolled over. - # MongoDB 2.6: - # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} - 28617, - # MongoDB 3.0: - # {'$err': 'getMore executor error: UnknownError no details available', - # 'code': 17406, 'ok': 0} - 17406, - # MongoDB 3.2 + 3.4: - # {'ok': 0.0, 'errmsg': 'GetMore command executor error: - # CappedPositionLost: CollectionScan died due to failure to restore - # tailable cursor position. Last seen record id: RecordId(3)', - # 'code': 96} - 96, - # MongoDB 3.6+: - # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to - # restore tailable cursor position. Last seen record id: RecordId(3)"', - # 'code': 136, 'codeName': 'CappedPositionLost'} - 136, - ] -) - -_QUERY_OPTIONS = { - "tailable_cursor": 2, - "secondary_okay": 4, - "oplog_replay": 8, - "no_timeout": 16, - "await_data": 32, - "exhaust": 64, - "partial": 128, -} - - -class CursorType: - NON_TAILABLE = 0 - """The standard cursor type.""" - - TAILABLE = _QUERY_OPTIONS["tailable_cursor"] - """The tailable cursor type. - - Tailable cursors are only for use with capped collections. They are not - closed when the last data is retrieved but are kept open and the cursor - location marks the final document position. If more data is received - iteration of the cursor will continue from the last document received. - """ - - TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"] - """A tailable cursor with the await option set. - - Creates a tailable cursor that will wait for a few seconds after returning - the full result set so that it can capture and return additional data added - during the query. - """ - - EXHAUST = _QUERY_OPTIONS["exhaust"] - """An exhaust cursor. - - MongoDB will stream batched results to the client without waiting for the - client to request each batch, reducing latency. - """ +_IS_SYNC = True class _ConnectionManager: @@ -144,7 +77,7 @@ class _ConnectionManager: def __init__(self, conn: Connection, more_to_come: bool): self.conn: Optional[Connection] = conn self.more_to_come = more_to_come - self.lock = _create_lock() + self._alock = _create_lock() def update_exhaust(self, more_to_come: bool) -> None: self.more_to_come = more_to_come @@ -156,15 +89,7 @@ def close(self) -> None: self.conn = None -_Sort = Union[ - Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] -] -_Hint = Union[str, _Sort] - - class Cursor(Generic[_DocumentType]): - """A cursor / iterator over Mongo query results.""" - _query_class = _Query _getmore_class = _GetMore @@ -204,19 +129,19 @@ def __init__( """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. - self.__collection: Collection[_DocumentType] = collection - self.__id: Any = None - self.__exhaust = False - self.__sock_mgr: Any = None - self.__killed = False - self.__session: Optional[ClientSession] + self._collection: Collection[_DocumentType] = collection + self._id: Any = None + self._exhaust = False + self._sock_mgr: Any = None + self._killed = False + self._session: Optional[ClientSession] if session: - self.__session = session - self.__explicit_session = True + self._session = session + self._explicit_session = True else: - self.__session = None - self.__explicit_session = False + self._session = None + self._explicit_session = False spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) @@ -225,7 +150,7 @@ def __init__( if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) - if no_cursor_timeout and not self.__explicit_session: + if no_cursor_timeout and not self._explicit_session: warnings.warn( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " @@ -254,41 +179,33 @@ def __init__( allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: - projection = helpers._fields_list_to_dict(projection, "projection") + projection = helpers_shared._fields_list_to_dict(projection, "projection") if let is not None: validate_is_document_type("let", let) - self.__let = let - self.__spec = spec - self.__has_filter = filter is not None - self.__projection = projection - self.__skip = skip - self.__limit = limit - self.__batch_size = batch_size - self.__ordering = sort and helpers._index_document(sort) or None - self.__max_scan = max_scan - self.__explain = False - self.__comment = comment - self.__max_time_ms = max_time_ms - self.__max_await_time_ms: Optional[int] = None - self.__max: Optional[Union[dict[Any, Any], _Sort]] = max - self.__min: Optional[Union[dict[Any, Any], _Sort]] = min - self.__collation = validate_collation_or_none(collation) - self.__return_key = return_key - self.__show_record_id = show_record_id - self.__allow_disk_use = allow_disk_use - self.__snapshot = snapshot - self.__hint: Union[str, dict[str, Any], None] - self.__set_hint(hint) - - # Exhaust cursor support - if cursor_type == CursorType.EXHAUST: - if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are not supported by mongos") - if limit: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__exhaust = True + self._let = let + self._spec = spec + self._has_filter = filter is not None + self._projection = projection + self._skip = skip + self._limit = limit + self._batch_size = batch_size + self._ordering = sort and helpers_shared._index_document(sort) or None + self._max_scan = max_scan + self._explain = False + self._comment = comment + self._max_time_ms = max_time_ms + self._max_await_time_ms: Optional[int] = None + self._max: Optional[Union[dict[Any, Any], _Sort]] = max + self._min: Optional[Union[dict[Any, Any], _Sort]] = min + self._collation = validate_collation_or_none(collation) + self._return_key = return_key + self._show_record_id = show_record_id + self._allow_disk_use = allow_disk_use + self._snapshot = snapshot + self._hint: Union[str, dict[str, Any], None] + self._set_hint(hint) # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an @@ -296,61 +213,61 @@ def __init__( # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. - self.__empty = False + self._empty = False - self.__data: deque = deque() - self.__address: Optional[_Address] = None - self.__retrieved = 0 + self._data: deque = deque() + self._address: Optional[_Address] = None + self._retrieved = 0 - self.__codec_options = collection.codec_options + self._codec_options = collection.codec_options # Read preference is set when the initial find is sent. - self.__read_preference: Optional[_ServerMode] = None - self.__read_concern = collection.read_concern + self._read_preference: Optional[_ServerMode] = None + self._read_concern = collection.read_concern - self.__query_flags = cursor_type + self._query_flags = cursor_type + self._cursor_type = cursor_type if no_cursor_timeout: - self.__query_flags |= _QUERY_OPTIONS["no_timeout"] + self._query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: - self.__query_flags |= _QUERY_OPTIONS["partial"] + self._query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: - self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] + self._query_flags |= _QUERY_OPTIONS["oplog_replay"] # The namespace to use for find/getMore commands. - self.__dbname = collection.database.name - self.__collname = collection.name + self._dbname = collection.database.name + self._collname = collection.name + + # Checking exhaust cursor support requires network IO + if _IS_SYNC: + self._exhaust_checked = True + self._supports_exhaust() # type: ignore[unused-coroutine] + else: + self._exhaust = cursor_type == CursorType.EXHAUST + self._exhaust_checked = False + + def _supports_exhaust(self) -> None: + # Exhaust cursor support + if self._cursor_type == CursorType.EXHAUST: + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + self._exhaust = True @property def collection(self) -> Collection[_DocumentType]: """The :class:`~pymongo.collection.Collection` that this :class:`Cursor` is iterating. """ - return self.__collection + return self._collection @property def retrieved(self) -> int: """The number of documents retrieved so far.""" - return self.__retrieved + return self._retrieved def __del__(self) -> None: - self.__die() - - def rewind(self) -> Cursor[_DocumentType]: - """Rewind this cursor to its unevaluated state. - - Reset this cursor if it has been partially or completely evaluated. - Any options that are present on the cursor will remain in effect. - Future iterating performed on this cursor will cause new queries to - be sent to the server, even if the resultant data has already been - retrieved by this cursor. - """ - self.close() - self.__data = deque() - self.__id = None - self.__address = None - self.__retrieved = 0 - self.__killed = False - - return self + self._die_no_lock() def clone(self) -> Cursor[_DocumentType]: """Get a clone of this cursor. @@ -365,8 +282,8 @@ def clone(self) -> Cursor[_DocumentType]: def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: """Internal clone helper.""" if not base: - if self.__explicit_session: - base = self._clone_base(self.__session) + if self._explicit_session: + base = self._clone_base(self._session) else: base = self._clone_base(None) @@ -394,11 +311,10 @@ def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor "snapshot", "exhaust", "has_filter", + "cursor_type", ) data = { - k: v - for k, v in self.__dict__.items() - if k.startswith("_Cursor__") and k[9:] in values_to_clone + k: v for k, v in self.__dict__.items() if k.startswith("_") and k[1:] in values_to_clone } if deepcopy: data = self._deepcopy(data) @@ -407,73 +323,40 @@ def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor def _clone_base(self, session: Optional[ClientSession]) -> Cursor: """Creates an empty Cursor object for information to be copied into.""" - return self.__class__(self.__collection, session=session) - - def __die(self, synchronous: bool = False) -> None: - """Closes this cursor.""" - try: - already_killed = self.__killed - except AttributeError: - # __init__ did not run to completion (or at all). - return + return self.__class__(self._collection, session=session) - self.__killed = True - if self.__id and not already_killed: - cursor_id = self.__id - assert self.__address is not None - address = _CursorAddress(self.__address, f"{self.__dbname}.{self.__collname}") - else: - # Skip killCursors. - cursor_id = 0 - address = None - self.__collection.database.client._cleanup_cursor( - synchronous, - cursor_id, - address, - self.__sock_mgr, - self.__session, - self.__explicit_session, - ) - if not self.__explicit_session: - self.__session = None - self.__sock_mgr = None - - def close(self) -> None: - """Explicitly close / kill this cursor.""" - self.__die(True) - - def __query_spec(self) -> Mapping[str, Any]: + def _query_spec(self) -> Mapping[str, Any]: """Get the spec to use for a query.""" operators: dict[str, Any] = {} - if self.__ordering: - operators["$orderby"] = self.__ordering - if self.__explain: + if self._ordering: + operators["$orderby"] = self._ordering + if self._explain: operators["$explain"] = True - if self.__hint: - operators["$hint"] = self.__hint - if self.__let: - operators["let"] = self.__let - if self.__comment: - operators["$comment"] = self.__comment - if self.__max_scan: - operators["$maxScan"] = self.__max_scan - if self.__max_time_ms is not None: - operators["$maxTimeMS"] = self.__max_time_ms - if self.__max: - operators["$max"] = self.__max - if self.__min: - operators["$min"] = self.__min - if self.__return_key is not None: - operators["$returnKey"] = self.__return_key - if self.__show_record_id is not None: + if self._hint: + operators["$hint"] = self._hint + if self._let: + operators["let"] = self._let + if self._comment: + operators["$comment"] = self._comment + if self._max_scan: + operators["$maxScan"] = self._max_scan + if self._max_time_ms is not None: + operators["$maxTimeMS"] = self._max_time_ms + if self._max: + operators["$max"] = self._max + if self._min: + operators["$min"] = self._min + if self._return_key is not None: + operators["$returnKey"] = self._return_key + if self._show_record_id is not None: # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. - operators["$showDiskLoc"] = self.__show_record_id - if self.__snapshot is not None: - operators["$snapshot"] = self.__snapshot + operators["$showDiskLoc"] = self._show_record_id + if self._snapshot is not None: + operators["$snapshot"] = self._snapshot if operators: # Make a shallow copy so we can cleanly rewind or clone. - spec = dict(self.__spec) + spec = dict(self._spec) # Allow-listed commands must be wrapped in $query. if "$query" not in spec: @@ -487,16 +370,14 @@ def __query_spec(self) -> Mapping[str, Any]: # that breaks commands like count and find_and_modify. # Checking spec.keys()[0] covers the case that the spec # was passed as an instance of SON or OrderedDict. - elif "query" in self.__spec and ( - len(self.__spec) == 1 or next(iter(self.__spec)) == "query" - ): - return {"$query": self.__spec} + elif "query" in self._spec and (len(self._spec) == 1 or next(iter(self._spec)) == "query"): + return {"$query": self._spec} - return self.__spec + return self._spec - def __check_okay_to_chain(self) -> None: + def _check_okay_to_chain(self) -> None: """Check if it is okay to chain more options onto this cursor.""" - if self.__retrieved or self.__id is not None: + if self._retrieved or self._id is not None: raise InvalidOperation("cannot set options after executing query") def add_option(self, mask: int) -> Cursor[_DocumentType]: @@ -507,16 +388,16 @@ def add_option(self, mask: int) -> Cursor[_DocumentType]: """ if not isinstance(mask, int): raise TypeError("mask must be an int") - self.__check_okay_to_chain() + self._check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: - if self.__limit: + if self._limit: raise InvalidOperation("Can't use limit and exhaust together.") - if self.__collection.database.client.is_mongos: + if self._collection.database.client.is_mongos: raise InvalidOperation("Exhaust cursors are not supported by mongos") - self.__exhaust = True + self._exhaust = True - self.__query_flags |= mask + self._query_flags |= mask return self def remove_option(self, mask: int) -> Cursor[_DocumentType]: @@ -527,12 +408,12 @@ def remove_option(self, mask: int) -> Cursor[_DocumentType]: """ if not isinstance(mask, int): raise TypeError("mask must be an int") - self.__check_okay_to_chain() + self._check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: - self.__exhaust = False + self._exhaust = False - self.__query_flags &= ~mask + self._query_flags &= ~mask return self def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: @@ -551,9 +432,9 @@ def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: """ if not isinstance(allow_disk_use, bool): raise TypeError("allow_disk_use must be a bool") - self.__check_okay_to_chain() + self._check_okay_to_chain() - self.__allow_disk_use = allow_disk_use + self._allow_disk_use = allow_disk_use return self def limit(self, limit: int) -> Cursor[_DocumentType]: @@ -570,12 +451,12 @@ def limit(self, limit: int) -> Cursor[_DocumentType]: """ if not isinstance(limit, int): raise TypeError("limit must be an integer") - if self.__exhaust: + if self._exhaust: raise InvalidOperation("Can't use limit and exhaust together.") - self.__check_okay_to_chain() + self._check_okay_to_chain() - self.__empty = False - self.__limit = limit + self._empty = False + self._limit = limit return self def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: @@ -600,9 +481,9 @@ def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") - self.__check_okay_to_chain() + self._check_okay_to_chain() - self.__batch_size = batch_size + self._batch_size = batch_size return self def skip(self, skip: int) -> Cursor[_DocumentType]: @@ -620,9 +501,9 @@ def skip(self, skip: int) -> Cursor[_DocumentType]: raise TypeError("skip must be an integer") if skip < 0: raise ValueError("skip must be >= 0") - self.__check_okay_to_chain() + self._check_okay_to_chain() - self.__skip = skip + self._skip = skip return self def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: @@ -639,9 +520,9 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: """ if not isinstance(max_time_ms, int) and max_time_ms is not None: raise TypeError("max_time_ms must be an integer or None") - self.__check_okay_to_chain() + self._check_okay_to_chain() - self.__max_time_ms = max_time_ms + self._max_time_ms = max_time_ms return self def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: @@ -662,11 +543,11 @@ def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_Documen """ if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError("max_await_time_ms must be an integer or None") - self.__check_okay_to_chain() + self._check_okay_to_chain() # Ignore max_await_time_ms if not tailable or await_data is False. - if self.__query_flags & CursorType.TAILABLE_AWAIT: - self.__max_await_time_ms = max_await_time_ms + if self._query_flags & CursorType.TAILABLE_AWAIT: + self._max_await_time_ms = max_await_time_ms return self @@ -718,44 +599,47 @@ def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_ :param index: An integer or slice index to be applied to this cursor """ - self.__check_okay_to_chain() - self.__empty = False - if isinstance(index, slice): - if index.step is not None: - raise IndexError("Cursor instances do not support slice steps") - - skip = 0 - if index.start is not None: - if index.start < 0: + if _IS_SYNC: + self._check_okay_to_chain() + self._empty = False + if isinstance(index, slice): + if index.step is not None: + raise IndexError("Cursor instances do not support slice steps") + + skip = 0 + if index.start is not None: + if index.start < 0: + raise IndexError("Cursor instances do not support negative indices") + skip = index.start + + if index.stop is not None: + limit = index.stop - skip + if limit < 0: + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) + if limit == 0: + self._empty = True + else: + limit = 0 + + self._skip = skip + self._limit = limit + return self + + if isinstance(index, int): + if index < 0: raise IndexError("Cursor instances do not support negative indices") - skip = index.start - - if index.stop is not None: - limit = index.stop - skip - if limit < 0: - raise IndexError( - "stop index must be greater than start index for slice %r" % index - ) - if limit == 0: - self.__empty = True - else: - limit = 0 - - self.__skip = skip - self.__limit = limit - return self - - if isinstance(index, int): - if index < 0: - raise IndexError("Cursor instances do not support negative indices") - clone = self.clone() - clone.skip(index + self.__skip) - clone.limit(-1) # use a hard limit - clone.__query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 - for doc in clone: - return doc - raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor instances" % index) + clone = self.clone() + clone.skip(index + self._skip) + clone.limit(-1) # use a hard limit + clone._query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 + for doc in clone: # type: ignore[attr-defined] + return doc + raise IndexError("no such item for Cursor instance") + raise TypeError("index %r cannot be applied to Cursor instances" % index) + else: + raise IndexError("Cursor does not support indexing") def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: """**DEPRECATED** - Limit the number of documents to scan when @@ -772,8 +656,8 @@ def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side execution time. """ - self.__check_okay_to_chain() - self.__max_scan = max_scan + self._check_okay_to_chain() + self._max_scan = max_scan return self def max(self, spec: _Sort) -> Cursor[_DocumentType]: @@ -794,8 +678,8 @@ def max(self, spec: _Sort) -> Cursor[_DocumentType]: if not isinstance(spec, (list, tuple)): raise TypeError("spec must be an instance of list or tuple") - self.__check_okay_to_chain() - self.__max = dict(spec) + self._check_okay_to_chain() + self._max = dict(spec) return self def min(self, spec: _Sort) -> Cursor[_DocumentType]: @@ -816,8 +700,8 @@ def min(self, spec: _Sort) -> Cursor[_DocumentType]: if not isinstance(spec, (list, tuple)): raise TypeError("spec must be an instance of list or tuple") - self.__check_okay_to_chain() - self.__min = dict(spec) + self._check_okay_to_chain() + self._min = dict(spec) return self def sort( @@ -863,39 +747,11 @@ def sort( :param direction: only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """ - self.__check_okay_to_chain() - keys = helpers._index_list(key_or_list, direction) - self.__ordering = helpers._index_document(keys) + self._check_okay_to_chain() + keys = helpers_shared._index_list(key_or_list, direction) + self._ordering = helpers_shared._index_document(keys) return self - def distinct(self, key: str) -> list: - """Get a list of distinct values for `key` among all documents - in the result set of this query. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`str`. - - The :meth:`distinct` method obeys the - :attr:`~pymongo.collection.Collection.read_preference` of the - :class:`~pymongo.collection.Collection` instance on which - :meth:`~pymongo.collection.Collection.find` was called. - - :param key: name of key for which we want to get the distinct values - - .. seealso:: :meth:`pymongo.collection.Collection.distinct` - """ - options: dict[str, Any] = {} - if self.__spec: - options["query"] = self.__spec - if self.__max_time_ms is not None: - options["maxTimeMS"] = self.__max_time_ms - if self.__comment: - options["comment"] = self.__comment - if self.__collation is not None: - options["collation"] = self.__collation - - return self.__collection.distinct(key, session=self.__session, **options) - def explain(self) -> _DocumentType: """Returns an explain plan record for this cursor. @@ -909,22 +765,22 @@ def explain(self) -> _DocumentType: .. seealso:: The MongoDB documentation on `explain `_. """ c = self.clone() - c.__explain = True + c._explain = True # always use a hard limit for explains - if c.__limit: - c.__limit = -abs(c.__limit) + if c._limit: + c._limit = -abs(c._limit) return next(c) - def __set_hint(self, index: Optional[_Hint]) -> None: + def _set_hint(self, index: Optional[_Hint]) -> None: if index is None: - self.__hint = None + self._hint = None return if isinstance(index, str): - self.__hint = index + self._hint = index else: - self.__hint = helpers._index_document(index) + self._hint = helpers_shared._index_document(index) def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: """Adds a 'hint', telling Mongo the proper index to use for the query. @@ -946,8 +802,8 @@ def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: :param index: index to hint on (as an index specifier) """ - self.__check_okay_to_chain() - self.__set_hint(index) + self._check_okay_to_chain() + self._set_hint(index) return self def comment(self, comment: Any) -> Cursor[_DocumentType]: @@ -960,8 +816,8 @@ def comment(self, comment: Any) -> Cursor[_DocumentType]: .. versionadded:: 2.7 """ - self.__check_okay_to_chain() - self.__comment = comment + self._check_okay_to_chain() + self._comment = comment return self def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: @@ -991,19 +847,19 @@ def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ """ - self.__check_okay_to_chain() + self._check_okay_to_chain() if not isinstance(code, Code): code = Code(code) # Avoid overwriting a filter argument that was given by the user # when updating the spec. spec: dict[str, Any] - if self.__has_filter: - spec = dict(self.__spec) + if self._has_filter: + spec = dict(self._spec) else: - spec = cast(dict, self.__spec) + spec = cast(dict, self._spec) spec["$where"] = code - self.__spec = spec + self._spec = spec return self def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: @@ -1017,93 +873,10 @@ def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: :param collation: An instance of :class:`~pymongo.collation.Collation`. """ - self.__check_okay_to_chain() - self.__collation = validate_collation_or_none(collation) + self._check_okay_to_chain() + self._collation = validate_collation_or_none(collation) return self - def __send_message(self, operation: Union[_Query, _GetMore]) -> None: - """Send a query or getmore operation and handles the response. - - If operation is ``None`` this is an exhaust cursor, which reads - the next result batch off the exhaust socket instead of - sending getMore messages to the server. - - Can raise ConnectionFailure. - """ - client = self.__collection.database.client - # OP_MSG is required to support exhaust cursors with encryption. - if client._encrypter and self.__exhaust: - raise InvalidOperation("exhaust cursors do not support auto encryption") - - try: - response = client._run_operation( - operation, self._unpack_response, address=self.__address - ) - except OperationFailure as exc: - if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - if exc.timeout: - self.__die(False) - else: - self.close() - # If this is a tailable cursor the error is likely - # due to capped collection roll over. Setting - # self.__killed to True ensures Cursor.alive will be - # False. No need to re-raise. - if ( - exc.code in _CURSOR_CLOSED_ERRORS - and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"] - ): - return - raise - except ConnectionFailure: - self.__killed = True - self.close() - raise - except Exception: - self.close() - raise - - self.__address = response.address - if isinstance(response, PinnedResponse): - if not self.__sock_mgr: - self.__sock_mgr = _ConnectionManager(response.conn, response.more_to_come) - - cmd_name = operation.name - docs = response.docs - if response.from_command: - if cmd_name != "explain": - cursor = docs[0]["cursor"] - self.__id = cursor["id"] - if cmd_name == "find": - documents = cursor["firstBatch"] - # Update the namespace used for future getMore commands. - ns = cursor.get("ns") - if ns: - self.__dbname, self.__collname = ns.split(".", 1) - else: - documents = cursor["nextBatch"] - self.__data = deque(documents) - self.__retrieved += len(documents) - else: - self.__id = 0 - self.__data = deque(docs) - self.__retrieved += len(docs) - else: - assert isinstance(response.data, _OpReply) - self.__id = response.data.cursor_id - self.__data = deque(docs) - self.__retrieved += response.data.number_returned - - if self.__id == 0: - # Don't wait for garbage collection to call __del__, return the - # socket and the session to the pool now. - self.close() - - if self.__limit and self.__id and self.__limit <= self.__retrieved: - self.close() - def _unpack_response( self, response: Union[_OpReply, _OpMsg], @@ -1114,75 +887,11 @@ def _unpack_response( ) -> Sequence[_DocumentOut]: return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) - def _read_preference(self) -> _ServerMode: - if self.__read_preference is None: + def _get_read_preference(self) -> _ServerMode: + if self._read_preference is None: # Save the read preference for getMore commands. - self.__read_preference = self.__collection._read_preference_for(self.session) - return self.__read_preference - - def _refresh(self) -> int: - """Refreshes the cursor with more data from Mongo. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if not self.__session: - self.__session = self.__collection.database.client._ensure_session() - - if self.__id is None: # Query - if (self.__min or self.__max) and not self.__hint: - raise InvalidOperation( - "Passing a 'hint' is required when using the min/max query" - " option to ensure the query utilizes the correct index" - ) - q = self._query_class( - self.__query_flags, - self.__collection.database.name, - self.__collection.name, - self.__skip, - self.__query_spec(), - self.__projection, - self.__codec_options, - self._read_preference(), - self.__limit, - self.__batch_size, - self.__read_concern, - self.__collation, - self.__session, - self.__collection.database.client, - self.__allow_disk_use, - self.__exhaust, - ) - self.__send_message(q) - elif self.__id: # Get More - if self.__limit: - limit = self.__limit - self.__retrieved - if self.__batch_size: - limit = min(limit, self.__batch_size) - else: - limit = self.__batch_size - # Exhaust cursors don't send getMore messages. - g = self._getmore_class( - self.__dbname, - self.__collname, - limit, - self.__id, - self.__codec_options, - self._read_preference(), - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - self.__exhaust, - self.__comment, - ) - self.__send_message(g) - - return len(self.__data) + self._read_preference = self._collection._read_preference_for(self.session) + return self._read_preference @property def alive(self) -> bool: @@ -1204,7 +913,7 @@ def alive(self) -> bool: return False after :meth:`next` fails to retrieve the next batch of results from the server. """ - return bool(len(self.__data) or (not self.__killed)) + return bool(len(self._data) or (not self._killed)) @property def cursor_id(self) -> Optional[int]: @@ -1212,7 +921,7 @@ def cursor_id(self) -> Optional[int]: .. versionadded:: 2.2 """ - return self.__id + return self._id @property def address(self) -> Optional[tuple[str, Any]]: @@ -1221,7 +930,7 @@ def address(self) -> Optional[tuple[str, Any]]: .. versionchanged:: 3.0 Renamed from "conn_id". """ - return self.__address + return self._address @property def session(self) -> Optional[ClientSession]: @@ -1229,30 +938,10 @@ def session(self) -> Optional[ClientSession]: .. versionadded:: 3.6 """ - if self.__explicit_session: - return self.__session + if self._explicit_session: + return self._session return None - def __iter__(self) -> Cursor[_DocumentType]: - return self - - def next(self) -> _DocumentType: - """Advance the cursor.""" - if self.__empty: - raise StopIteration - if len(self.__data) or self._refresh(): - return self.__data.popleft() - else: - raise StopIteration - - __next__ = next - - def __enter__(self) -> Cursor[_DocumentType]: - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - def __copy__(self) -> Cursor[_DocumentType]: """Support function for `copy.copy()`. @@ -1312,6 +1001,321 @@ def _deepcopy( y[key] = value return y + def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, f"{self._dbname}.{self._collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session, self._explicit_session + ) + if not self._explicit_session: + self._session = None + self._sock_mgr = None + + def _die_lock(self) -> None: + """Closes this cursor.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + self._explicit_session, + ) + if not self._explicit_session: + self._session = None + self._sock_mgr = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die_lock() + + def distinct(self, key: str) -> list: + """Get a list of distinct values for `key` among all documents + in the result set of this query. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + The :meth:`distinct` method obeys the + :attr:`~pymongo.collection.Collection.read_preference` of the + :class:`~pymongo.collection.Collection` instance on which + :meth:`~pymongo.collection.Collection.find` was called. + + :param key: name of key for which we want to get the distinct values + + .. seealso:: :meth:`pymongo.collection.Collection.distinct` + """ + options: dict[str, Any] = {} + if self._spec: + options["query"] = self._spec + if self._max_time_ms is not None: + options["maxTimeMS"] = self._max_time_ms + if self._comment: + options["comment"] = self._comment + if self._collation is not None: + options["collation"] = self._collation + + return self._collection.distinct(key, session=self._session, **options) + + def _send_message(self, operation: Union[_Query, _GetMore]) -> None: + """Send a query or getmore operation and handles the response. + + If operation is ``None`` this is an exhaust cursor, which reads + the next result batch off the exhaust socket instead of + sending getMore messages to the server. + + Can raise ConnectionFailure. + """ + client = self._collection.database.client + # OP_MSG is required to support exhaust cursors with encryption. + if client._encrypter and self._exhaust: + raise InvalidOperation("exhaust cursors do not support auto encryption") + + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self._exhaust: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + self.close() + # If this is a tailable cursor the error is likely + # due to capped collection roll over. Setting + # self._killed to True ensures Cursor.alive will be + # False. No need to re-raise. + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self._query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): + return + raise + except ConnectionFailure: + self._killed = True + self.close() + raise + except Exception: + self.close() + raise + + self._address = response.address + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + + cmd_name = operation.name + docs = response.docs + if response.from_command: + if cmd_name != "explain": + cursor = docs[0]["cursor"] + self._id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self._dbname, self._collname = ns.split(".", 1) + else: + documents = cursor["nextBatch"] + self._data = deque(documents) + self._retrieved += len(documents) + else: + self._id = 0 + self._data = deque(docs) + self._retrieved += len(docs) + else: + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + self._data = deque(docs) + self._retrieved += response.data.number_returned + + if self._id == 0: + # Don't wait for garbage collection to call __del__, return the + # socket and the session to the pool now. + self.close() + + if self._limit and self._id and self._limit <= self._retrieved: + self.close() + + def _refresh(self) -> int: + """Refreshes the cursor with more data from Mongo. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if not self._session: + self._session = self._collection.database.client._ensure_session() + + if self._id is None: # Query + if (self._min or self._max) and not self._hint: + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self._query_flags, + self._collection.database.name, + self._collection.name, + self._skip, + self._query_spec(), + self._projection, + self._codec_options, + self._get_read_preference(), + self._limit, + self._batch_size, + self._read_concern, + self._collation, + self._session, + self._collection.database.client, + self._allow_disk_use, + self._exhaust, + ) + self._send_message(q) + elif self._id: # Get More + if self._limit: + limit = self._limit - self._retrieved + if self._batch_size: + limit = min(limit, self._batch_size) + else: + limit = self._batch_size + # Exhaust cursors don't send getMore messages. + g = self._getmore_class( + self._dbname, + self._collname, + limit, + self._id, + self._codec_options, + self._get_read_preference(), + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + self._exhaust, + self._comment, + ) + self._send_message(g) + + return len(self._data) + + def rewind(self) -> Cursor[_DocumentType]: + """Rewind this cursor to its unevaluated state. + + Reset this cursor if it has been partially or completely evaluated. + Any options that are present on the cursor will remain in effect. + Future iterating performed on this cursor will cause new queries to + be sent to the server, even if the resultant data has already been + retrieved by this cursor. + """ + self.close() + self._data = deque() + self._id = None + self._address = None + self._retrieved = 0 + self._killed = False + + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + self._supports_exhaust() + if self._empty: + raise StopIteration + if len(self._data) or self._refresh(): + return self._data.popleft() + else: + raise StopIteration + + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + """Get all or some documents from the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + self._supports_exhaust() + if self._empty: + return False + if len(self._data) or self._refresh(): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + def __next__(self) -> _DocumentType: + return self.next() + + def __iter__(self) -> Cursor[_DocumentType]: + return self + + def __enter__(self) -> Cursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. + + To use:: + + >>> cursor.to_list() + + Or, so read at most n items from the cursor:: + + >>> cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + class RawBatchCursor(Cursor, Generic[_DocumentType]): """A cursor / iterator over raw batches of BSON data from a query result.""" From 8b5479c7463c7a00cfa4b1aabca79a02bee09f5f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:05 -0500 Subject: [PATCH 1472/2111] PYTHON-4747 Rename pymongo/auth.py to pymongo/synchronous/auth.py --- pymongo/{ => synchronous}/auth.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/auth.py (100%) diff --git a/pymongo/auth.py b/pymongo/synchronous/auth.py similarity index 100% rename from pymongo/auth.py rename to pymongo/synchronous/auth.py From 134d00a1024a423d520aaf58c4fd8ef5cff3e454 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:05 -0500 Subject: [PATCH 1473/2111] PYTHON-4747 Sync auth.py to master --- pymongo/auth.py | 22 ++++ pymongo/synchronous/auth.py | 230 ++---------------------------------- 2 files changed, 35 insertions(+), 217 deletions(-) create mode 100644 pymongo/auth.py diff --git a/pymongo/auth.py b/pymongo/auth.py new file mode 100644 index 0000000000..a65113841d --- /dev/null +++ b/pymongo/auth.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Auth API for compatibility.""" +from __future__ import annotations + +from pymongo.auth_shared import * # noqa: F403 +from pymongo.synchronous.auth import * # noqa: F403 +from pymongo.synchronous.auth import __doc__ as original_doc + +__doc__ = original_doc diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py index 8bc4145abc..9a3477679d 100644 --- a/pymongo/synchronous/auth.py +++ b/pymongo/synchronous/auth.py @@ -18,16 +18,12 @@ import functools import hashlib import hmac -import os import socket -import typing from base64 import standard_b64decode, standard_b64encode -from collections import namedtuple from typing import ( TYPE_CHECKING, Any, Callable, - Dict, Mapping, MutableMapping, Optional, @@ -36,21 +32,23 @@ from urllib.parse import quote from bson.binary import Binary -from pymongo.auth_aws import _authenticate_aws -from pymongo.auth_oidc import ( - _authenticate_oidc, - _get_authenticator, - _OIDCAzureCallback, - _OIDCGCPCallback, - _OIDCProperties, - _OIDCTestCallback, +from pymongo.auth_shared import ( + MongoCredential, + _authenticate_scram_start, + _parse_scram_response, + _xor, ) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.saslprep import saslprep +from pymongo.synchronous.auth_aws import _authenticate_aws +from pymongo.synchronous.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, +) if TYPE_CHECKING: from pymongo.hello import Hello - from pymongo.pool import Connection + from pymongo.synchronous.pool import Connection HAVE_KERBEROS = True _USE_PRINCIPAL = False @@ -66,209 +64,7 @@ HAVE_KERBEROS = False -MECHANISMS = frozenset( - [ - "GSSAPI", - "MONGODB-CR", - "MONGODB-OIDC", - "MONGODB-X509", - "MONGODB-AWS", - "PLAIN", - "SCRAM-SHA-1", - "SCRAM-SHA-256", - "DEFAULT", - ] -) -"""The authentication mechanisms supported by PyMongo.""" - - -class _Cache: - __slots__ = ("data",) - - _hash_val = hash("_Cache") - - def __init__(self) -> None: - self.data = None - - def __eq__(self, other: object) -> bool: - # Two instances must always compare equal. - if isinstance(other, _Cache): - return True - return NotImplemented - - def __ne__(self, other: object) -> bool: - if isinstance(other, _Cache): - return False - return NotImplemented - - def __hash__(self) -> int: - return self._hash_val - - -MongoCredential = namedtuple( - "MongoCredential", - ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], -) -"""A hashable namedtuple of values used for authentication.""" - - -GSSAPIProperties = namedtuple( - "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] -) -"""Mechanism properties for GSSAPI authentication.""" - - -_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) -"""Mechanism properties for MONGODB-AWS authentication.""" - - -def _build_credentials_tuple( - mech: str, - source: Optional[str], - user: str, - passwd: str, - extra: Mapping[str, Any], - database: Optional[str], -) -> MongoCredential: - """Build and return a mechanism specific credentials tuple.""" - if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: - raise ConfigurationError(f"{mech} requires a username.") - if mech == "GSSAPI": - if source is not None and source != "$external": - raise ValueError("authentication source must be $external or None for GSSAPI") - properties = extra.get("authmechanismproperties", {}) - service_name = properties.get("SERVICE_NAME", "mongodb") - canonicalize = bool(properties.get("CANONICALIZE_HOST_NAME", False)) - service_realm = properties.get("SERVICE_REALM") - props = GSSAPIProperties( - service_name=service_name, - canonicalize_host_name=canonicalize, - service_realm=service_realm, - ) - # Source is always $external. - return MongoCredential(mech, "$external", user, passwd, props, None) - elif mech == "MONGODB-X509": - if passwd is not None: - raise ConfigurationError("Passwords are not supported by MONGODB-X509") - if source is not None and source != "$external": - raise ValueError("authentication source must be $external or None for MONGODB-X509") - # Source is always $external, user can be None. - return MongoCredential(mech, "$external", user, None, None, None) - elif mech == "MONGODB-AWS": - if user is not None and passwd is None: - raise ConfigurationError("username without a password is not supported by MONGODB-AWS") - if source is not None and source != "$external": - raise ConfigurationError( - "authentication source must be $external or None for MONGODB-AWS" - ) - - properties = extra.get("authmechanismproperties", {}) - aws_session_token = properties.get("AWS_SESSION_TOKEN") - aws_props = _AWSProperties(aws_session_token=aws_session_token) - # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, "$external", user, passwd, aws_props, None) - elif mech == "MONGODB-OIDC": - properties = extra.get("authmechanismproperties", {}) - callback = properties.get("OIDC_CALLBACK") - human_callback = properties.get("OIDC_HUMAN_CALLBACK") - environ = properties.get("ENVIRONMENT") - token_resource = properties.get("TOKEN_RESOURCE", "") - default_allowed = [ - "*.mongodb.net", - "*.mongodb-dev.net", - "*.mongodb-qa.net", - "*.mongodbgov.net", - "localhost", - "127.0.0.1", - "::1", - ] - allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) - msg = ( - "authentication with MONGODB-OIDC requires providing either a callback or a environment" - ) - if passwd is not None: - msg = "password is not supported by MONGODB-OIDC" - raise ConfigurationError(msg) - if callback or human_callback: - if environ is not None: - raise ConfigurationError(msg) - if callback and human_callback: - msg = "cannot set both OIDC_CALLBACK and OIDC_HUMAN_CALLBACK" - raise ConfigurationError(msg) - elif environ is not None: - if environ == "test": - if user is not None: - msg = "test environment for MONGODB-OIDC does not support username" - raise ConfigurationError(msg) - callback = _OIDCTestCallback() - elif environ == "azure": - passwd = None - if not token_resource: - raise ConfigurationError( - "Azure environment for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" - ) - callback = _OIDCAzureCallback(token_resource) - elif environ == "gcp": - passwd = None - if not token_resource: - raise ConfigurationError( - "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" - ) - callback = _OIDCGCPCallback(token_resource) - else: - raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") - else: - raise ConfigurationError(msg) - - oidc_props = _OIDCProperties( - callback=callback, - human_callback=human_callback, - environment=environ, - allowed_hosts=allowed_hosts, - token_resource=token_resource, - username=user, - ) - return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) - - elif mech == "PLAIN": - source_database = source or database or "$external" - return MongoCredential(mech, source_database, user, passwd, None, None) - else: - source_database = source or database or "admin" - if passwd is None: - raise ConfigurationError("A password is required.") - return MongoCredential(mech, source_database, user, passwd, None, _Cache()) - - -def _xor(fir: bytes, sec: bytes) -> bytes: - """XOR two byte strings together.""" - return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) - - -def _parse_scram_response(response: bytes) -> Dict[bytes, bytes]: - """Split a scram response into key, value pairs.""" - return dict( - typing.cast(typing.Tuple[bytes, bytes], item.split(b"=", 1)) - for item in response.split(b",") - ) - - -def _authenticate_scram_start( - credentials: MongoCredential, mechanism: str -) -> tuple[bytes, bytes, MutableMapping[str, Any]]: - username = credentials.username - user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") - nonce = standard_b64encode(os.urandom(32)) - first_bare = b"n=" + user + b",r=" + nonce - - cmd = { - "saslStart": 1, - "mechanism": mechanism, - "payload": Binary(b"n,," + first_bare), - "autoAuthorize": 1, - "options": {"skipEmptyExchange": True}, - } - return nonce, first_bare, cmd +_IS_SYNC = True def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanism: str) -> None: @@ -553,7 +349,7 @@ def _authenticate_default(credentials: MongoCredential, conn: Connection) -> Non source = credentials.source cmd = conn.hello_cmd() cmd["saslSupportedMechs"] = source + "." + credentials.username - mechs = conn.command(source, cmd, publish_events=False).get("saslSupportedMechs", []) + mechs = (conn.command(source, cmd, publish_events=False)).get("saslSupportedMechs", []) if "SCRAM-SHA-256" in mechs: return _authenticate_scram(credentials, conn, "SCRAM-SHA-256") else: From 4b85f84137532a43bdcf6cc9f31f80b7116bd6d1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:06 -0500 Subject: [PATCH 1474/2111] PYTHON-4747 Rename pymongo/database.py to pymongo/synchronous/database.py --- pymongo/{ => synchronous}/database.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pymongo/{ => synchronous}/database.py (100%) diff --git a/pymongo/database.py b/pymongo/synchronous/database.py similarity index 100% rename from pymongo/database.py rename to pymongo/synchronous/database.py From 600da067d1aa92f23ac69dafe73d42d94d2d3332 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:06 -0500 Subject: [PATCH 1475/2111] PYTHON-4747 Sync database.py to master --- pymongo/database.py | 22 ++ pymongo/synchronous/database.py | 463 +++++++++++++++++--------------- 2 files changed, 271 insertions(+), 214 deletions(-) create mode 100644 pymongo/database.py diff --git a/pymongo/database.py b/pymongo/database.py new file mode 100644 index 0000000000..bbd05702dc --- /dev/null +++ b/pymongo/database.py @@ -0,0 +1,22 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous Database API for compatibility.""" +from __future__ import annotations + +from pymongo.synchronous.database import * # noqa: F403 +from pymongo.synchronous.database import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["Database"] # noqa: F405 diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 70580694e5..1cd8ee643b 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -25,7 +25,6 @@ NoReturn, Optional, Sequence, - TypeVar, Union, cast, overload, @@ -35,43 +34,31 @@ from bson.dbref import DBRef from bson.timestamp import Timestamp from pymongo import _csot, common -from pymongo.aggregation import _DatabaseAggregationCommand -from pymongo.change_stream import DatabaseChangeStream -from pymongo.collection import Collection -from pymongo.command_cursor import CommandCursor from pymongo.common import _ecoc_coll_name, _esc_coll_name -from pymongo.errors import CollectionInvalid, InvalidName, InvalidOperation +from pymongo.database_shared import _check_name, _CodecDocumentType +from pymongo.errors import CollectionInvalid, InvalidOperation from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.aggregation import _DatabaseAggregationCommand +from pymongo.synchronous.change_stream import DatabaseChangeStream +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline if TYPE_CHECKING: import bson import bson.codec_options - from pymongo.client_session import ClientSession - from pymongo.mongo_client import MongoClient - from pymongo.pool import Connection from pymongo.read_concern import ReadConcern - from pymongo.server import Server + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server from pymongo.write_concern import WriteConcern - -def _check_name(name: str) -> None: - """Check if a database name is valid.""" - if not name: - raise InvalidName("database name cannot be the empty string") - - for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: - if invalid_char in name: - raise InvalidName("database names cannot contain the character %r" % invalid_char) - - -_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) +_IS_SYNC = True class Database(common.BaseObject, Generic[_DocumentType]): - """A Mongo database.""" - def __init__( self, client: MongoClient[_DocumentType], @@ -132,25 +119,30 @@ def __init__( read_concern or client.read_concern, ) + from pymongo.synchronous.mongo_client import MongoClient + if not isinstance(name, str): raise TypeError("name must be an instance of str") + if not isinstance(client, MongoClient): + raise TypeError(f"MongoClient required but given {type(client)}") + if name != "$external": _check_name(name) - self.__name = name - self.__client: MongoClient[_DocumentType] = client + self._name = name + self._client: MongoClient[_DocumentType] = client self._timeout = client.options.timeout @property def client(self) -> MongoClient[_DocumentType]: """The client instance for this :class:`Database`.""" - return self.__client + return self._client @property def name(self) -> str: """The name of this :class:`Database`.""" - return self.__name + return self._name def with_options( self, @@ -190,8 +182,8 @@ def with_options( .. versionadded:: 3.8 """ return Database( - self.client, - self.__name, + self._client, + self._name, codec_options or self.codec_options, read_preference or self.read_preference, write_concern or self.write_concern, @@ -200,17 +192,17 @@ def with_options( def __eq__(self, other: Any) -> bool: if isinstance(other, Database): - return self.__client == other.client and self.__name == other.name + return self._client == other.client and self._name == other.name return NotImplemented def __ne__(self, other: Any) -> bool: return not self == other def __hash__(self) -> int: - return hash((self.__client, self.__name)) + return hash((self._client, self._name)) def __repr__(self) -> str: - return f"Database({self.__client!r}, {self.__name!r})" + return f"{type(self).__name__}({self._client!r}, {self._name!r})" def __getattr__(self, name: str) -> Collection[_DocumentType]: """Get a collection of this database by name. @@ -221,7 +213,7 @@ def __getattr__(self, name: str) -> Collection[_DocumentType]: """ if name.startswith("_"): raise AttributeError( - f"Database has no attribute {name!r}. To access the {name}" + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" f" collection, use database[{name!r}]." ) return self.__getitem__(name) @@ -316,6 +308,152 @@ def _get_encrypted_fields( return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) return None + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Database' object is not iterable") + + next = __next__ + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> DatabaseChangeStream[_DocumentType]: + """Watch changes on this database. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which + iterates over changes on all collections in this database. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with db.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = DatabaseChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + @_csot.apply def create_collection( self, @@ -356,7 +494,7 @@ def create_collection( :class:`~pymongo.collation.Collation`. :param session: a :class:`~pymongo.client_session.ClientSession`. - :param `check_exists`: if True (the default), send a listCollections command to + :param check_exists: if True (the default), send a listCollections command to check if the collection already exists before creation. :param kwargs: additional keyword arguments will be passed as options for the `create collection command`_ @@ -450,26 +588,27 @@ def create_collection( if clustered_index: common.validate_is_mapping("clusteredIndex", clustered_index) - with self.__client._tmp_session(session) as s: + with self._client._tmp_session(session) as s: # Skip this check in a transaction where listCollections is not # supported. if ( check_exists and (not s or not s.in_transaction) - and name in self.list_collection_names(filter={"name": name}, session=s) + and name in self._list_collection_names(filter={"name": name}, session=s) ): raise CollectionInvalid("collection %s already exists" % name) - return Collection( + coll = Collection( self, name, - True, + False, codec_options, read_preference, write_concern, read_concern, - session=s, - **kwargs, ) + coll._create(kwargs, s) + + return coll def aggregate( self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any @@ -550,134 +689,6 @@ def aggregate( operation=_Op.AGGREGATE, ) - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional[ClientSession] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - show_expanded_events: Optional[bool] = None, - ) -> DatabaseChangeStream[_DocumentType]: - """Watch changes on this database. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which - iterates over changes on all collections in this database. - - Introduced in MongoDB 4.0. - - .. code-block:: python - - with db.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with db.watch([{"$match": {"operationType": "insert"}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error("...") - - For a precise description of the resume process see the - `change streams specification`_. - - :param pipeline: A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - :param full_document: The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - :param full_document_before_change: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - :param resume_after: A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - :param max_await_time_ms: The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - :param batch_size: The maximum number of documents to return - per batch. - :param collation: The :class:`~pymongo.collation.Collation` - to use for the aggregation. - :param start_at_operation_time: If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - :param session: a - :class:`~pymongo.client_session.ClientSession`. - :param start_after: The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - :param comment: A user-provided comment to attach to this - command. - :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. - - :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. - - .. versionchanged:: 4.3 - Added `show_expanded_events` parameter. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionadded:: 3.7 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md - """ - return DatabaseChangeStream( - self, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - show_expanded_events=show_expanded_events, - ) - @overload def _command( self, @@ -733,9 +744,9 @@ def _command( command = {command: value} command.update(kwargs) - with self.__client._tmp_session(session) as s: + with self._client._tmp_session(session) as s: return conn.command( - self.__name, + self._name, command, read_preference, codec_options, @@ -744,7 +755,7 @@ def _command( write_concern=write_concern, parse_write_concern_error=parse_write_concern_error, session=s, - client=self.__client, + client=self._client, ) @overload @@ -890,7 +901,7 @@ def command( if read_preference is None: read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - with self.__client._conn_for_reads(read_preference, session, operation=command_name) as ( + with self._client._conn_for_reads(read_preference, session, operation=command_name) as ( connection, read_preference, ): @@ -901,7 +912,7 @@ def command( check, allowable_errors, read_preference, - opts, + opts, # type: ignore[arg-type] session=session, **kwargs, ) @@ -912,7 +923,7 @@ def cursor_command( command: Union[str, MutableMapping[str, Any]], value: Any = 1, read_preference: Optional[_ServerMode] = None, - codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + codec_options: Optional[CodecOptions[_CodecDocumentType]] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, max_await_time_ms: Optional[int] = None, @@ -941,7 +952,7 @@ def cursor_command( read preference configured for the transaction. Otherwise, defaults to :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - :param codec_options`: A :class:`~bson.codec_options.CodecOptions` + :param codec_options: A :class:`~bson.codec_options.CodecOptions` instance. :param session: A :class:`~pymongo.client_session.ClientSession`. @@ -971,14 +982,14 @@ def cursor_command( else: command_name = next(iter(command)) - with self.__client._tmp_session(session, close=False) as tmp_session: + with self._client._tmp_session(session, close=False) as tmp_session: opts = codec_options or DEFAULT_CODEC_OPTIONS if read_preference is None: read_preference = ( tmp_session and tmp_session._txn_read_preference() ) or ReadPreference.PRIMARY - with self.__client._conn_for_reads(read_preference, tmp_session, command_name) as ( + with self._client._conn_for_reads(read_preference, tmp_session, command_name) as ( conn, read_preference, ): @@ -1031,7 +1042,7 @@ def _cmd( session=session, ) - return self.__client._retryable_read(_cmd, read_preference, session, operation) + return self._client._retryable_read(_cmd, read_preference, session, operation) def _list_collections( self, @@ -1047,10 +1058,10 @@ def _list_collections( ) cmd = {"listCollections": 1, "cursor": {}} cmd.update(kwargs) - with self.__client._tmp_session(session, close=False) as tmp_session: - cursor = self._command(conn, cmd, read_preference=read_preference, session=tmp_session)[ - "cursor" - ] + with self._client._tmp_session(session, close=False) as tmp_session: + cursor = ( + self._command(conn, cmd, read_preference=read_preference, session=tmp_session) + )["cursor"] cmd_cursor = CommandCursor( coll, cursor, @@ -1062,7 +1073,7 @@ def _list_collections( cmd_cursor._maybe_pin_connection(conn) return cmd_cursor - def list_collections( + def _list_collections_helper( self, session: Optional[ClientSession] = None, filter: Optional[Mapping[str, Any]] = None, @@ -1102,23 +1113,18 @@ def _cmd( ) -> CommandCursor[MutableMapping[str, Any]]: return self._list_collections(conn, session, read_preference=read_preference, **kwargs) - return self.__client._retryable_read( + return self._client._retryable_read( _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS ) - def list_collection_names( + def list_collections( self, session: Optional[ClientSession] = None, filter: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, - ) -> list[str]: - """Get a list of all the collection names in this database. - - For example, to list all non-system collections:: - - filter = {"name": {"$regex": r"^(?!system\\.)"}} - db.list_collection_names(filter=filter) + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. :param session: a :class:`~pymongo.client_session.ClientSession`. @@ -1133,11 +1139,19 @@ def list_collection_names( options differ by server version. - .. versionchanged:: 3.8 - Added the ``filter`` and ``**kwargs`` parameters. + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. .. versionadded:: 3.6 """ + return self._list_collections_helper(session, filter, comment, **kwargs) + + def _list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: if comment is not None: kwargs["comment"] = comment if filter is None: @@ -1151,7 +1165,43 @@ def list_collection_names( if not filter or (len(filter) == 1 and "name" in filter): kwargs["nameOnly"] = True - return [result["name"] for result in self.list_collections(session=session, **kwargs)] + return [ + result["name"] for result in self._list_collections_helper(session=session, **kwargs) + ] + + def list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Get a list of all the collection names in this database. + + For example, to list all non-system collections:: + + filter = {"name": {"$regex": r"^(?!system\\.)"}} + db.list_collection_names(filter=filter) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + .. versionchanged:: 3.8 + Added the ``filter`` and ``**kwargs`` parameters. + + .. versionadded:: 3.6 + """ + return self._list_collection_names(session, filter, comment, **kwargs) def _drop_helper( self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None @@ -1160,7 +1210,7 @@ def _drop_helper( if comment is not None: command["comment"] = comment - with self.__client._conn_for_writes(session, operation=_Op.DROP) as connection: + with self._client._conn_for_writes(session, operation=_Op.DROP) as connection: return self._command( connection, command, @@ -1330,21 +1380,6 @@ def validate_collection( return result - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'Database' object is not iterable") - - next = __next__ - - def __bool__(self) -> NoReturn: - raise NotImplementedError( - "Database objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: database is not None" - ) - def dereference( self, dbref: DBRef, @@ -1378,10 +1413,10 @@ def dereference( """ if not isinstance(dbref, DBRef): raise TypeError("cannot dereference a %s" % type(dbref)) - if dbref.database is not None and dbref.database != self.__name: + if dbref.database is not None and dbref.database != self._name: raise ValueError( "trying to dereference a DBRef that points to " - f"another database ({dbref.database!r} not {self.__name!r})" + f"another database ({dbref.database!r} not {self._name!r})" ) return self[dbref.collection].find_one( {"_id": dbref.id}, session=session, comment=comment, **kwargs From 264c6edb9c821525ee40084e46ae238b7978ffd0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:06 -0500 Subject: [PATCH 1476/2111] PYTHON-4747 Rename gridfs/grid_file.py to gridfs/synchronous/grid_file.py --- gridfs/{ => synchronous}/grid_file.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename gridfs/{ => synchronous}/grid_file.py (100%) diff --git a/gridfs/grid_file.py b/gridfs/synchronous/grid_file.py similarity index 100% rename from gridfs/grid_file.py rename to gridfs/synchronous/grid_file.py From 8e3e652c3967d99b453cfd1da26e2b7ed60f337d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Sep 2024 12:16:06 -0500 Subject: [PATCH 1477/2111] PYTHON-4747 Sync grid_file.py to master --- gridfs/grid_file.py | 18 + gridfs/synchronous/grid_file.py | 1339 ++++++++++++++++++++++++++----- 2 files changed, 1174 insertions(+), 183 deletions(-) create mode 100644 gridfs/grid_file.py diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py new file mode 100644 index 0000000000..b2cab71515 --- /dev/null +++ b/gridfs/grid_file.py @@ -0,0 +1,18 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous gridfs API for compatibility.""" +from __future__ import annotations + +from gridfs.synchronous.grid_file import * # noqa: F403 diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index ac72c144b7..655f05f57a 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -16,20 +16,33 @@ from __future__ import annotations import datetime +import inspect import io import math -import os -import warnings -from typing import Any, Iterable, Mapping, NoReturn, Optional +from collections import abc +from typing import Any, Iterable, Mapping, NoReturn, Optional, cast from bson.int64 import Int64 from bson.objectid import ObjectId from gridfs.errors import CorruptGridFile, FileExists, NoFile -from pymongo import ASCENDING -from pymongo.client_session import ClientSession -from pymongo.collection import Collection -from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.cursor import Cursor +from gridfs.grid_file_shared import ( + _C_INDEX, + _CHUNK_OVERHEAD, + _F_INDEX, + _SEEK_CUR, + _SEEK_END, + _SEEK_SET, + _UPLOAD_BUFFER_CHUNKS, + _UPLOAD_BUFFER_SIZE, + DEFAULT_CHUNK_SIZE, + EMPTY, + NEWLN, + _clear_entity_type_registry, + _grid_in_property, + _grid_out_property, +) +from pymongo import ASCENDING, DESCENDING, WriteConcern, _csot +from pymongo.common import validate_string from pymongo.errors import ( BulkWriteError, ConfigurationError, @@ -38,114 +51,979 @@ InvalidOperation, OperationFailure, ) -from pymongo.helpers import _check_write_command_response -from pymongo.read_preferences import ReadPreference - -_SEEK_SET = os.SEEK_SET -_SEEK_CUR = os.SEEK_CUR -_SEEK_END = os.SEEK_END - -EMPTY = b"" -NEWLN = b"\n" - -"""Default chunk size, in bytes.""" -# Slightly under a power of 2, to work well with server's record allocations. -DEFAULT_CHUNK_SIZE = 255 * 1024 -# The number of chunked bytes to buffer before calling insert_many. -_UPLOAD_BUFFER_SIZE = MAX_MESSAGE_SIZE -# The number of chunk documents to buffer before calling insert_many. -_UPLOAD_BUFFER_CHUNKS = 100000 -# Rough BSON overhead of a chunk document not including the chunk data itself. -# Essentially len(encode({"_id": ObjectId(), "files_id": ObjectId(), "n": 1, "data": ""})) -_CHUNK_OVERHEAD = 60 - -_C_INDEX: dict[str, Any] = {"files_id": ASCENDING, "n": ASCENDING} -_F_INDEX: dict[str, Any] = {"filename": ASCENDING, "uploadDate": ASCENDING} - - -def _grid_in_property( - field_name: str, - docstring: str, - read_only: Optional[bool] = False, - closed_only: Optional[bool] = False, -) -> Any: - """Create a GridIn property.""" - warn_str = "" - if docstring.startswith("DEPRECATED,"): - warn_str = ( - f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" +from pymongo.helpers_shared import _check_write_command_response +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.client_session import ClientSession +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.helpers import next + +_IS_SYNC = True + + +def _disallow_transactions(session: Optional[ClientSession]) -> None: + if session and session.in_transaction: + raise InvalidOperation("GridFS does not support multi-document transactions") + + +class GridFS: + """An instance of GridFS on top of a single Database.""" + + def __init__(self, database: Database, collection: str = "fs"): + """Create a new instance of :class:`GridFS`. + + Raises :class:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + :param database: database to use + :param collection: root collection to use + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.1 + Indexes are only ensured on the first write to the DB. + + .. versionchanged:: 3.0 + `database` must use an acknowledged + :attr:`~pymongo.database.Database.write_concern` + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(database, Database): + raise TypeError("database must be an instance of Database") + + database = _clear_entity_type_registry(database) + + if not database.write_concern.acknowledged: + raise ConfigurationError("database must use acknowledged write_concern") + + self._collection = database[collection] + self._files = self._collection.files + self._chunks = self._collection.chunks + + def new_file(self, **kwargs: Any) -> GridIn: + """Create a new file in GridFS. + + Returns a new :class:`~gridfs.grid_file.GridIn` instance to + which data can be written. Any keyword arguments will be + passed through to :meth:`~gridfs.grid_file.GridIn`. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param kwargs: keyword arguments for file creation + """ + return GridIn(self._collection, **kwargs) + + def put(self, data: Any, **kwargs: Any) -> Any: + """Put data in GridFS as a new file. + + Equivalent to doing:: + + with fs.new_file(**kwargs) as f: + f.write(data) + + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see + :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the + ``"_id"`` of the created file. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param data: data to be written as a file. + :param kwargs: keyword arguments for file creation + + .. versionchanged:: 3.0 + w=0 writes to GridFS are now prohibited. + """ + with GridIn(self._collection, **kwargs) as grid_file: + grid_file.write(data) + return grid_file._id + + def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: + """Get a file from GridFS by ``"_id"``. + + Returns an instance of :class:`~gridfs.grid_file.GridOut`, + which provides a file-like interface for reading. + + :param file_id: ``"_id"`` of the file to get + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = GridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + gout.open() + return gout + + def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: + """Get a file from GridFS by ``"filename"`` or metadata fields. + + Returns a version of the file in GridFS whose filename matches + `filename` and whose metadata fields match the supplied keyword + arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. + + Version numbering is a convenience atop the GridFS API provided + by MongoDB. If more than one file matches the query (either by + `filename` alone, by metadata fields, or by a combination of + both), then version ``-1`` will be the most recently uploaded + matching file, ``-2`` the second most recently + uploaded, etc. Version ``0`` will be the first version + uploaded, ``1`` the second version, etc. So if three versions + have been uploaded, then version ``0`` is the same as version + ``-3``, version ``1`` is the same as version ``-2``, and + version ``2`` is the same as version ``-1``. + + Raises :class:`~gridfs.errors.NoFile` if no such version of + that file exists. + + :param filename: ``"filename"`` of the file to get, or `None` + :param version: version of the file to get (defaults + to -1, the most recent version uploaded) + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``get_version`` no longer ensures indexes. + """ + query = kwargs + if filename is not None: + query["filename"] = filename + + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if version is None: + version = -1 + if version < 0: + skip = abs(version) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) + try: + doc = next(cursor) + return GridOut(self._collection, file_document=doc, session=session) + except StopIteration: + raise NoFile("no version %d for filename %r" % (version, filename)) from None + + def get_last_version( + self, + filename: Optional[str] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: + """Get the most recent version of a file in GridFS by ``"filename"`` + or metadata fields. + + Equivalent to calling :meth:`get_version` with the default + `version` (``-1``). + + :param filename: ``"filename"`` of the file to get, or `None` + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return self.get_version(filename=filename, session=session, **kwargs) + + # TODO add optional safe mode for chunk removal? + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: + """Delete a file from GridFS by ``"_id"``. + + Deletes all data belonging to the file with ``"_id"``: + `file_id`. + + .. warning:: Any processes/threads reading from the file while + this method is executing will likely see an invalid/corrupt + file. Care should be taken to avoid concurrent reads to a file + while it is being deleted. + + .. note:: Deletes of non-existent files are considered successful + since the end result is the same: no file with that _id remains. + + :param file_id: ``"_id"`` of the file to delete + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``delete`` no longer ensures indexes. + """ + _disallow_transactions(session) + self._files.delete_one({"_id": file_id}, session=session) + self._chunks.delete_many({"files_id": file_id}, session=session) + + def list(self, session: Optional[ClientSession] = None) -> list[str]: + """List the names of all files stored in this instance of + :class:`GridFS`. + + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``list`` no longer ensures indexes. + """ + _disallow_transactions(session) + # With an index, distinct includes documents with no filename + # as None. + return [ + name for name in self._files.distinct("filename", session=session) if name is not None + ] + + def find_one( + self, + filter: Optional[Any] = None, + session: Optional[ClientSession] = None, + *args: Any, + **kwargs: Any, + ) -> Optional[GridOut]: + """Get a single file from gridfs. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, + or ``None`` if no matching file is found. For example: + + .. code-block: python + + file = fs.find_one({"filename": "lisa.txt"}) + + :param filter: a dictionary specifying + the query to be performing OR any other type to be used as + the value for a query for ``"_id"`` in the file collection. + :param args: any additional positional arguments are + the same as the arguments to :meth:`find`. + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + + _disallow_transactions(session) + for f in self.find(filter, *args, session=session, **kwargs): + return f + + return None + + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: + """Query GridFS for files. + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. For example:: + + for grid_out in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_out.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.ClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: A query document that selects which files + to include in the result set. Can be an empty document to include + all files. + :param skip: the number of files to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~gridfs.grid_file.GridOutCursor` + corresponding to this query. + + .. versionchanged:: 3.0 + Removed the read_preference, tag_sets, and + secondary_acceptable_latency_ms options. + .. versionadded:: 2.7 + .. seealso:: The MongoDB documentation on `find `_. + """ + return GridOutCursor(self._collection, *args, **kwargs) + + def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> bool: + """Check if a file exists in this instance of :class:`GridFS`. + + The file to check for can be specified by the value of its + ``_id`` key, or by passing in a query document. A query + document can be passed in as dictionary, or by using keyword + arguments. Thus, the following three calls are equivalent: + + >>> fs.exists(file_id) + >>> fs.exists({"_id": file_id}) + >>> fs.exists(_id=file_id) + + As are the following two calls: + + >>> fs.exists({"filename": "mike.txt"}) + >>> fs.exists(filename="mike.txt") + + And the following two: + + >>> fs.exists({"foo": {"$gt": 12}}) + >>> fs.exists(foo={"$gt": 12}) + + Returns ``True`` if a matching file exists, ``False`` + otherwise. Calls to :meth:`exists` will not automatically + create appropriate indexes; application developers should be + sure to create indexes if needed and as appropriate. + + :param document_or_id: query document, or _id of the + document to check for + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: keyword arguments are used as a + query document, if they're present. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + if kwargs: + f = self._files.find_one(kwargs, ["_id"], session=session) + else: + f = self._files.find_one(document_or_id, ["_id"], session=session) + + return f is not None + + +class GridFSBucket: + """An instance of GridFS on top of a single Database.""" + + def __init__( + self, + db: Database, + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: + """Create a new instance of :class:`GridFSBucket`. + + Raises :exc:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` + is not acknowledged. + + :param database: database to use. + :param bucket_name: The name of the bucket. Defaults to 'fs'. + :param chunk_size_bytes: The chunk size in bytes. Defaults + to 255KB. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` + (the default) db.write_concern is used. + :param read_preference: The read preference to use. If + ``None`` (the default) db.read_preference is used. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionadded:: 3.1 + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(db, Database): + raise TypeError("database must be an instance of Database") + + db = _clear_entity_type_registry(db) + + wtc = write_concern if write_concern is not None else db.write_concern + if not wtc.acknowledged: + raise ConfigurationError("write concern must be acknowledged") + + self._bucket_name = bucket_name + self._collection = db[bucket_name] + self._chunks: Collection = self._collection.chunks.with_options( + write_concern=write_concern, read_preference=read_preference ) - def getter(self: Any) -> Any: - if warn_str: - warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) - if closed_only and not self._closed: - raise AttributeError("can only get %r on a closed file" % field_name) - # Protect against PHP-237 - if field_name == "length": - return self._file.get(field_name, 0) - return self._file.get(field_name, None) - - def setter(self: Any, value: Any) -> Any: - if warn_str: - warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) - if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) - self._file[field_name] = value - - if read_only: - docstring += "\n\nThis attribute is read-only." - elif closed_only: - docstring = "{}\n\n{}".format( - docstring, - "This attribute is read-only and " - "can only be read after :meth:`close` " - "has been called.", + self._files: Collection = self._collection.files.with_options( + write_concern=write_concern, read_preference=read_preference ) - if not read_only and not closed_only: - return property(getter, setter, doc=docstring) - return property(getter, doc=docstring) + self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the filename, and can choose to add any + additional information in the metadata field of the file document or + modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream( + "test_file", chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` -def _grid_out_property(field_name: str, docstring: str) -> Any: - """Create a GridOut property.""" - warn_str = "" - if docstring.startswith("DEPRECATED,"): - warn_str = ( - f"GridOut property '{field_name}' is deprecated and will be removed in PyMongo 5.0" - ) + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) - def getter(self: Any) -> Any: - if warn_str: - warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) - self._ensure_file() + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata - # Protect against PHP-237 - if field_name == "length": - return self._file.get(field_name, 0) - return self._file.get(field_name, None) + return GridIn(self._collection, session=session, **opts) - docstring += "\n\nThis attribute is read-only." - return property(getter, doc=docstring) + def open_upload_stream_with_id( + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the file id and filename, and can choose to add + any additional information in the metadata field of the file document + or modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream_with_id( + ObjectId(), + "test_file", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return GridIn(self._collection, session=session, **opts) + + @_csot.apply + def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> ObjectId: + """Uploads a user file to a GridFS bucket. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Returns the _id of the uploaded file. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` -def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: - """Clear the given database/collection object's type registry.""" - codecopts = entity.codec_options.with_options(type_registry=None) - return entity.with_options(codec_options=codecopts, **kwargs) + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: + gin.write(source) + return cast(ObjectId, gin._id) -def _disallow_transactions(session: Optional[ClientSession]) -> None: - if session and session.in_transaction: - raise InvalidOperation("GridFS does not support multi-document transactions") + @_csot.apply + def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> None: + """Uploads a user file to a GridFS bucket with a custom file id. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + ObjectId(), + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_upload_stream_with_id( + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: + gin.write(source) + + def open_download_stream( + self, file_id: Any, session: Optional[ClientSession] = None + ) -> GridOut: + """Opens a Stream from which the application can read the contents of + the stored file specified by file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # get _id of file to read. + file_id = fs.upload_from_stream("test_file", "data I want to store!") + grid_out = fs.open_download_stream(file_id) + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = GridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + gout.open() + return gout + + @_csot.apply + def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[ClientSession] = None + ) -> None: + """Downloads the contents of the stored file specified by file_id and + writes the contents to `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to read + file_id = fs.upload_from_stream("test_file", "data I want to store!") + # Get file to write to + file = open('myfile','wb+') + fs.download_to_stream(file_id, file) + file.seek(0) + contents = file.read() + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param destination: a file-like object implementing :meth:`write`. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_download_stream(file_id, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + @_csot.apply + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: + """Given an file_id, delete this stored file's files collection document + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to delete + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.delete(file_id) + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be deleted. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + res = self._files.delete_one({"_id": file_id}, session=session) + self._chunks.delete_many({"files_id": file_id}, session=session) + if not res.deleted_count: + raise NoFile("no file could be deleted because none matched %s" % file_id) + + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: + """Find and return the files collection documents that match ``filter`` + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. + + For example:: + + for grid_data in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_data.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.ClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: Search query. + :param batch_size: The number of documents to return per + batch. + :param limit: The maximum number of documents to return. + :param no_cursor_timeout: The server normally times out idle + cursors after an inactivity period (10 minutes) to prevent excess + memory use. Set this option to True prevent that. + :param skip: The number of documents to skip before + returning. + :param sort: The order by which to sort results. Defaults to + None. + """ + return GridOutCursor(self._collection, *args, **kwargs) + + def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[ClientSession] = None + ) -> GridOut: + """Opens a Stream from which the application can read the contents of + `filename` and optional `revision`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + grid_out = fs.open_download_stream_by_name("test_file") + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` filename is not a string. + + :param filename: The name of the file to read from. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.ClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + query = {"filename": filename} + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if revision < 0: + skip = abs(revision) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) + try: + grid_file = next(cursor) + return GridOut(self._collection, file_document=grid_file, session=session) + except StopIteration: + raise NoFile("no version %d for filename %r" % (revision, filename)) from None + + @_csot.apply + def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None, + ) -> None: + """Write the contents of `filename` (with optional `revision`) to + `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get file to write to + file = open('myfile','wb') + fs.download_to_stream_by_name("test_file", file) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to read from. + :param destination: A file-like object that implements :meth:`write`. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.ClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_download_stream_by_name(filename, revision, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + def rename( + self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None + ) -> None: + """Renames the stored file with the specified file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to rename + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.rename(file_id, "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + result = self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) class GridIn: """Class to write data to GridFS.""" def __init__( - self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any + self, + root_collection: Collection, + session: Optional[ClientSession] = None, + **kwargs: Any, ) -> None: """Write a file to GridFS @@ -227,7 +1105,7 @@ def __init__( object.__setattr__(self, "_buffered_docs", []) object.__setattr__(self, "_buffered_docs_size", 0) - def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: + def _create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: @@ -240,11 +1118,11 @@ def __create_index(self, collection: Collection, index_key: Any, unique: bool) - if index_key not in index_keys: collection.create_index(index_key.items(), unique=unique, session=self._session) - def __ensure_indexes(self) -> None: + def _ensure_indexes(self) -> None: if not object.__getattribute__(self, "_ensured_index"): _disallow_transactions(self._session) - self.__create_index(self._coll.files, _F_INDEX, False) - self.__create_index(self._coll.chunks, _C_INDEX, True) + self._create_index(self._coll.files, _F_INDEX, False) + self._create_index(self._coll.chunks, _C_INDEX, True) object.__setattr__(self, "_ensured_index", True) def abort(self) -> None: @@ -281,7 +1159,9 @@ def closed(self) -> bool: _buffered_docs_size: int def __getattr__(self, name: str) -> Any: - if name in self._file: + if name == "_coll": + return object.__getattribute__(self, name) + elif name in self._file: return self._file[name] raise AttributeError("GridIn object has no attribute '%s'" % name) @@ -296,11 +1176,21 @@ def __setattr__(self, name: str, value: Any) -> None: # them now. self._file[name] = value if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "GridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use GridIn.set() instead" + ) + + def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - def __flush_data(self, data: Any, force: bool = False) -> None: + def _flush_data(self, data: Any, force: bool = False) -> None: """Flush `data` to a chunk.""" - self.__ensure_indexes() + self._ensure_indexes() assert len(data) <= self.chunk_size if data: self._buffered_docs.append( @@ -334,16 +1224,16 @@ def __flush_data(self, data: Any, force: bool = False) -> None: self._chunk_number += 1 self._position += len(data) - def __flush_buffer(self, force: bool = False) -> None: + def _flush_buffer(self, force: bool = False) -> None: """Flush the buffer contents out to a chunk.""" - self.__flush_data(self._buffer.getvalue(), force=force) + self._flush_data(self._buffer.getvalue(), force=force) self._buffer.close() self._buffer = io.BytesIO() - def __flush(self) -> Any: + def _flush(self) -> Any: """Flush the file to the database.""" try: - self.__flush_buffer(force=True) + self._flush_buffer(force=True) # The GridFS spec says length SHOULD be an Int64. self._file["length"] = Int64(self._position) self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) @@ -363,7 +1253,7 @@ def close(self) -> None: :meth:`close` more than once is allowed. """ if not self._closed: - self.__flush() + self._flush() object.__setattr__(self, "_closed", True) def read(self, size: int = -1) -> NoReturn: @@ -399,8 +1289,11 @@ def write(self, data: Any) -> None: raise ValueError("cannot write to a closed file") try: - # file-like - read = data.read + if isinstance(data, GridOut): + read = data.read + else: + # file-like + read = data.read except AttributeError: # string if not isinstance(data, (str, bytes)): @@ -412,8 +1305,31 @@ def write(self, data: Any) -> None: raise TypeError( "must specify an encoding for file in order to write str" ) from None - read = io.BytesIO(data).read + read = io.BytesIO(data).read # type: ignore[assignment] + if inspect.iscoroutinefunction(read): + self._write_async(read) + else: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + self.abort() + raise + self._buffer.write(to_write) # type: ignore + if len(to_write) < space: # type: ignore + return # EOF or incomplete + self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: # type: ignore + self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) # type: ignore + + def _write_async(self, read: Any) -> None: if self._buffer.tell() > 0: # Make sure to flush only when _buffer is complete space = self.chunk_size - self._buffer.tell() @@ -426,10 +1342,10 @@ def write(self, data: Any) -> None: self._buffer.write(to_write) if len(to_write) < space: return # EOF or incomplete - self.__flush_buffer() + self._flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) == self.chunk_size: - self.__flush_data(to_write) + self._flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write) @@ -464,7 +1380,11 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: return False -class GridOut(io.IOBase): +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class GridOut(GRIDOUT_BASE_CLASS): # type: ignore + """Class to read data out of GridFS.""" def __init__( @@ -513,17 +1433,19 @@ def __init__( super().__init__() - self.__chunks = root_collection.chunks - self.__files = root_collection.files - self.__file_id = file_id - self.__buffer = EMPTY + self._chunks = root_collection.chunks + self._files = root_collection.files + self._file_id = file_id + self._buffer = EMPTY # Start position within the current buffered chunk. - self.__buffer_pos = 0 - self.__chunk_iter = None + self._buffer_pos = 0 + self._chunk_iter = None # Position within the total file. - self.__position = 0 + self._position = 0 self._file = file_document self._session = session + if not _IS_SYNC: + self.closed = False _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") filename: str = _grid_out_property("filename", "Name of this file.") @@ -548,19 +1470,62 @@ def __init__( ) _file: Any - __chunk_iter: Any + _chunk_iter: Any + + if not _IS_SYNC: + closed: bool - def _ensure_file(self) -> None: + def __next__(self) -> bytes: + line = self.readline() + if line: + return line + raise StopIteration() + + def to_list(self) -> list[bytes]: + return [x for x in self] # noqa: C416, RUF100 + + def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return self._read_size_or_line(size=size, line=True) + + def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines + + def open(self) -> None: if not self._file: _disallow_transactions(self._session) - self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) + self._file = self._files.find_one({"_id": self._file_id}, session=self._session) if not self._file: raise NoFile( - f"no file in gridfs collection {self.__files!r} with _id {self.__file_id!r}" + f"no file in gridfs collection {self._files!r} with _id {self._file_id!r}" ) def __getattr__(self, name: str) -> Any: - self._ensure_file() + if _IS_SYNC: + self.open() # type: ignore[unused-coroutine] + elif not self._file: + raise InvalidOperation( + "You must call GridOut.open() before accessing the %s property" % name + ) if name in self._file: return self._file[name] raise AttributeError("GridOut object has no attribute '%s'" % name) @@ -572,34 +1537,35 @@ def readchunk(self) -> bytes: """Reads a chunk at a time. If the current position is within a chunk the remainder of the chunk is returned. """ - received = len(self.__buffer) - self.__buffer_pos + self.open() + received = len(self._buffer) - self._buffer_pos chunk_data = EMPTY chunk_size = int(self.chunk_size) if received > 0: - chunk_data = self.__buffer[self.__buffer_pos :] - elif self.__position < int(self.length): - chunk_number = int((received + self.__position) / chunk_size) - if self.__chunk_iter is None: - self.__chunk_iter = _GridOutChunkIterator( - self, self.__chunks, self._session, chunk_number + chunk_data = self._buffer[self._buffer_pos :] + elif self._position < int(self.length): + chunk_number = int((received + self._position) / chunk_size) + if self._chunk_iter is None: + self._chunk_iter = GridOutChunkIterator( + self, self._chunks, self._session, chunk_number ) - chunk = self.__chunk_iter.next() - chunk_data = chunk["data"][self.__position % chunk_size :] + chunk = self._chunk_iter.next() + chunk_data = chunk["data"][self._position % chunk_size :] if not chunk_data: raise CorruptGridFile("truncated chunk") - self.__position += len(chunk_data) - self.__buffer = EMPTY - self.__buffer_pos = 0 + self._position += len(chunk_data) + self._buffer = EMPTY + self._buffer_pos = 0 return chunk_data def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: """Internal read() and readline() helper.""" - self._ensure_file() - remainder = int(self.length) - self.__position + self.open() + remainder = int(self.length) - self._position if size < 0 or size > remainder: size = remainder @@ -610,14 +1576,14 @@ def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: data = [] while received < size: needed = size - received - if self.__buffer: + if self._buffer: # Optimization: Read the buffer with zero byte copies. - buf = self.__buffer - chunk_start = self.__buffer_pos - chunk_data = memoryview(buf)[self.__buffer_pos :] - self.__buffer = EMPTY - self.__buffer_pos = 0 - self.__position += len(chunk_data) + buf = self._buffer + chunk_start = self._buffer_pos + chunk_data = memoryview(buf)[self._buffer_pos :] + self._buffer = EMPTY + self._buffer_pos = 0 + self._position += len(chunk_data) else: buf = self.readchunk() chunk_start = 0 @@ -631,17 +1597,17 @@ def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: if len(chunk_data) > needed: data.append(chunk_data[:needed]) # Optimization: Save the buffer with zero byte copies. - self.__buffer = buf - self.__buffer_pos = chunk_start + needed - self.__position -= len(self.__buffer) - self.__buffer_pos + self._buffer = buf + self._buffer_pos = chunk_start + needed + self._position -= len(self._buffer) - self._buffer_pos else: data.append(chunk_data) received += len(chunk_data) # Detect extra chunks after reading the entire file. - if size == remainder and self.__chunk_iter: + if size == remainder and self._chunk_iter: try: - self.__chunk_iter.next() + self._chunk_iter.next() except StopIteration: pass @@ -663,16 +1629,9 @@ def read(self, size: int = -1) -> bytes: """ return self._read_size_or_line(size=size) - def readline(self, size: int = -1) -> bytes: # type: ignore[override] - """Read one line or up to `size` bytes from the file. - - :param size: the maximum number of bytes to read - """ - return self._read_size_or_line(size=size, line=True) - def tell(self) -> int: """Return the current position of this file.""" - return self.__position + return self._position def seek(self, pos: int, whence: int = _SEEK_SET) -> int: """Set the current position of this file. @@ -692,7 +1651,7 @@ def seek(self, pos: int, whence: int = _SEEK_SET) -> int: if whence == _SEEK_SET: new_pos = pos elif whence == _SEEK_CUR: - new_pos = self.__position + pos + new_pos = self._position + pos elif whence == _SEEK_END: new_pos = int(self.length) + pos else: @@ -702,15 +1661,15 @@ def seek(self, pos: int, whence: int = _SEEK_SET) -> int: raise OSError(22, "Invalid value for `pos` - must be positive") # Optimization, continue using the same buffer and chunk iterator. - if new_pos == self.__position: + if new_pos == self._position: return new_pos - self.__position = new_pos - self.__buffer = EMPTY - self.__buffer_pos = 0 - if self.__chunk_iter: - self.__chunk_iter.close() - self.__chunk_iter = None + self._position = new_pos + self._buffer = EMPTY + self._buffer_pos = 0 + if self._chunk_iter: + self._chunk_iter.close() + self._chunk_iter = None return new_pos def seekable(self) -> bool: @@ -739,10 +1698,13 @@ def __iter__(self) -> GridOut: def close(self) -> None: """Make GridOut more generically file-like.""" - if self.__chunk_iter: - self.__chunk_iter.close() - self.__chunk_iter = None - super().close() + if self._chunk_iter: + self._chunk_iter.close() + self._chunk_iter = None + if _IS_SYNC: + super().close() + else: + self.closed = True def write(self, value: Any) -> NoReturn: raise io.UnsupportedOperation("write") @@ -755,13 +1717,13 @@ def writable(self) -> bool: def __enter__(self) -> GridOut: """Makes it possible to use :class:`GridOut` files - with the context manager protocol. + with the async context manager protocol. """ return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: """Makes it possible to use :class:`GridOut` files - with the context manager protocol. + with the async context manager protocol. """ self.close() return False @@ -788,7 +1750,7 @@ def __del__(self) -> None: pass -class _GridOutChunkIterator: +class GridOutChunkIterator: """Iterates over a file's chunks using a single cursor. Raises CorruptGridFile when encountering any truncated, missing, or extra @@ -818,7 +1780,7 @@ def expected_chunk_length(self, chunk_n: int) -> int: return self._chunk_size return self._length - (self._chunk_size * (self._num_chunks - 1)) - def __iter__(self) -> _GridOutChunkIterator: + def __iter__(self) -> GridOutChunkIterator: return self def _create_cursor(self) -> None: @@ -890,13 +1852,13 @@ def close(self) -> None: class GridOutIterator: def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): - self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) + self._chunk_iter = GridOutChunkIterator(grid_out, chunks, session, 0) def __iter__(self) -> GridOutIterator: return self def next(self) -> bytes: - chunk = self.__chunk_iter.next() + chunk = self._chunk_iter.next() return bytes(chunk["data"]) __next__ = next @@ -932,7 +1894,7 @@ def __init__( collection = _clear_entity_type_registry(collection) # Hold on to the base "fs" collection to create GridOut objects later. - self.__root_collection = collection + self._root_collection = collection super().__init__( collection.files, @@ -949,7 +1911,18 @@ def next(self) -> GridOut: """Get next GridOut object from cursor.""" _disallow_transactions(self.session) next_file = super().next() - return GridOut(self.__root_collection, file_document=next_file, session=self.session) + return GridOut(self._root_collection, file_document=next_file, session=self.session) + + def to_list(self, length: Optional[int] = None) -> list[GridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(self.next()) + return ret __next__ = next @@ -961,4 +1934,4 @@ def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: def _clone_base(self, session: Optional[ClientSession]) -> GridOutCursor: """Creates an empty GridOutCursor for information to be copied into.""" - return GridOutCursor(self.__root_collection, session=session) + return GridOutCursor(self._root_collection, session=session) From 039db2f20a8f8133fce65c75b819aa03c6854dd8 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 11 Sep 2024 08:46:44 -0400 Subject: [PATCH 1478/2111] PYTHON-4590 - Make type guards more compatible (#1850) --- pymongo/asynchronous/collection.py | 3 ++- pymongo/asynchronous/database.py | 3 ++- pymongo/asynchronous/encryption.py | 10 ++++++---- pymongo/asynchronous/mongo_client.py | 3 ++- pymongo/synchronous/collection.py | 3 ++- pymongo/synchronous/database.py | 3 ++- pymongo/synchronous/encryption.py | 8 ++++---- pymongo/synchronous/mongo_client.py | 3 ++- test/unified_format.py | 6 +++--- 9 files changed, 25 insertions(+), 17 deletions(-) diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 6d8dfaf89a..a0b727dc7a 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -231,7 +231,8 @@ def __init__( from pymongo.asynchronous.database import AsyncDatabase if not isinstance(database, AsyncDatabase): - raise TypeError(f"AsyncCollection requires an AsyncDatabase but {type(database)} given") + if not any(cls.__name__ == "AsyncDatabase" for cls in database.__mro__): + raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") if not name or ".." in name: raise InvalidName("collection names cannot be empty") diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index d5eec0134d..fb042972be 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -125,7 +125,8 @@ def __init__( raise TypeError("name must be an instance of str") if not isinstance(client, AsyncMongoClient): - raise TypeError(f"AsyncMongoClient required but given {type(client)}") + if not any(cls.__name__ == "AsyncMongoClient" for cls in client.__mro__): + raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") if name != "$external": _check_name(name) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index c9e3cadd6e..b03af1b8a1 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -597,7 +597,10 @@ def __init__( raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") if not isinstance(key_vault_client, AsyncMongoClient): - raise TypeError(f"AsyncMongoClient required but given {type(key_vault_client)}") + if not any(cls.__name__ == "AsyncMongoClient" for cls in key_vault_client.__mro__): + raise TypeError( + f"AsyncMongoClient required but given {type(key_vault_client).__name__}" + ) self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace @@ -685,9 +688,8 @@ async def create_encrypted_collection( """ if not isinstance(database, AsyncDatabase): - raise TypeError( - f"create_encrypted_collection() requires an AsyncDatabase but {type(database)} given" - ) + if not any(cls.__name__ == "AsyncDatabase" for cls in database.__mro__): + raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") encrypted_fields = deepcopy(encrypted_fields) for i, field in enumerate(encrypted_fields["fields"]): diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index a84fbf2e59..20cc65d9d7 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2446,7 +2446,8 @@ def __init__( self, client: AsyncMongoClient, server: Server, session: Optional[AsyncClientSession] ): if not isinstance(client, AsyncMongoClient): - raise TypeError(f"AsyncMongoClient required but given {type(client)}") + if not any(cls.__name__ == "AsyncMongoClient" for cls in client.__mro__): + raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") self.client = client self.server_address = server.description.address diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 93e24432e5..ff02c65af5 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -234,7 +234,8 @@ def __init__( from pymongo.synchronous.database import Database if not isinstance(database, Database): - raise TypeError(f"Collection requires a Database but {type(database)} given") + if not any(cls.__name__ == "Database" for cls in database.__mro__): + raise TypeError(f"Database required but given {type(database).__name__}") if not name or ".." in name: raise InvalidName("collection names cannot be empty") diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 1cd8ee643b..5f499fff61 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -125,7 +125,8 @@ def __init__( raise TypeError("name must be an instance of str") if not isinstance(client, MongoClient): - raise TypeError(f"MongoClient required but given {type(client)}") + if not any(cls.__name__ == "MongoClient" for cls in client.__mro__): + raise TypeError(f"MongoClient required but given {type(client).__name__}") if name != "$external": _check_name(name) diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 3849cf3f2b..8c6411feb9 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -595,7 +595,8 @@ def __init__( raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") if not isinstance(key_vault_client, MongoClient): - raise TypeError(f"MongoClient required but given {type(key_vault_client)}") + if not any(cls.__name__ == "MongoClient" for cls in key_vault_client.__mro__): + raise TypeError(f"MongoClient required but given {type(key_vault_client).__name__}") self._kms_providers = kms_providers self._key_vault_namespace = key_vault_namespace @@ -683,9 +684,8 @@ def create_encrypted_collection( """ if not isinstance(database, Database): - raise TypeError( - f"create_encrypted_collection() requires a Database but {type(database)} given" - ) + if not any(cls.__name__ == "Database" for cls in database.__mro__): + raise TypeError(f"Database required but given {type(database).__name__}") encrypted_fields = deepcopy(encrypted_fields) for i, field in enumerate(encrypted_fields["fields"]): diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index cec78463b3..ac697405d1 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2434,7 +2434,8 @@ class _MongoClientErrorHandler: def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): if not isinstance(client, MongoClient): - raise TypeError(f"MongoClient required but given {type(client)}") + if not any(cls.__name__ == "MongoClient" for cls in client.__mro__): + raise TypeError(f"MongoClient required but given {type(client).__name__}") self.client = client self.server_address = server.description.address diff --git a/test/unified_format.py b/test/unified_format.py index 63cd23af88..78fc638787 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -580,7 +580,7 @@ def _create_entity(self, entity_spec, uri=None): return elif entity_type == "database": client = self[spec["client"]] - if not isinstance(client, MongoClient): + if type(client).__name__ != "MongoClient": self.test.fail( "Expected entity {} to be of type MongoClient, got {}".format( spec["client"], type(client) @@ -602,7 +602,7 @@ def _create_entity(self, entity_spec, uri=None): return elif entity_type == "session": client = self[spec["client"]] - if not isinstance(client, MongoClient): + if type(client).__name__ != "MongoClient": self.test.fail( "Expected entity {} to be of type MongoClient, got {}".format( spec["client"], type(client) @@ -667,7 +667,7 @@ def create_entities_from_spec(self, entity_spec, uri=None): def get_listener_for_client(self, client_name: str) -> EventListenerUtil: client = self[client_name] - if not isinstance(client, MongoClient): + if type(client).__name__ != "MongoClient": self.test.fail( f"Expected entity {client_name} to be of type MongoClient, got {type(client)}" ) From 63d957c2137cec66821d4d1669ff7a24f4c4f6f0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 11 Sep 2024 11:22:22 -0400 Subject: [PATCH 1479/2111] PYTHON-4590 - Fix MRO type guards (#1852) --- pymongo/asynchronous/collection.py | 3 ++- pymongo/asynchronous/database.py | 3 ++- pymongo/asynchronous/encryption.py | 8 ++++++-- pymongo/asynchronous/mongo_client.py | 3 ++- pymongo/synchronous/collection.py | 3 ++- pymongo/synchronous/database.py | 3 ++- pymongo/synchronous/encryption.py | 6 ++++-- pymongo/synchronous/mongo_client.py | 3 ++- 8 files changed, 22 insertions(+), 10 deletions(-) diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index a0b727dc7a..1ec74aad02 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -231,7 +231,8 @@ def __init__( from pymongo.asynchronous.database import AsyncDatabase if not isinstance(database, AsyncDatabase): - if not any(cls.__name__ == "AsyncDatabase" for cls in database.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncDatabase" for cls in type(database).__mro__): raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") if not name or ".." in name: diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index fb042972be..06c0eca2c1 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -125,7 +125,8 @@ def __init__( raise TypeError("name must be an instance of str") if not isinstance(client, AsyncMongoClient): - if not any(cls.__name__ == "AsyncMongoClient" for cls in client.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncMongoClient" for cls in type(client).__mro__): raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") if name != "$external": diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index b03af1b8a1..9b00c13e10 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -597,7 +597,10 @@ def __init__( raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") if not isinstance(key_vault_client, AsyncMongoClient): - if not any(cls.__name__ == "AsyncMongoClient" for cls in key_vault_client.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any( + cls.__name__ == "AsyncMongoClient" for cls in type(key_vault_client).__mro__ + ): raise TypeError( f"AsyncMongoClient required but given {type(key_vault_client).__name__}" ) @@ -688,7 +691,8 @@ async def create_encrypted_collection( """ if not isinstance(database, AsyncDatabase): - if not any(cls.__name__ == "AsyncDatabase" for cls in database.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncDatabase" for cls in type(database).__mro__): raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") encrypted_fields = deepcopy(encrypted_fields) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 20cc65d9d7..9dba97d12a 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2446,7 +2446,8 @@ def __init__( self, client: AsyncMongoClient, server: Server, session: Optional[AsyncClientSession] ): if not isinstance(client, AsyncMongoClient): - if not any(cls.__name__ == "AsyncMongoClient" for cls in client.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncMongoClient" for cls in type(client).__mro__): raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") self.client = client diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index ff02c65af5..7a41aef31f 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -234,7 +234,8 @@ def __init__( from pymongo.synchronous.database import Database if not isinstance(database, Database): - if not any(cls.__name__ == "Database" for cls in database.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "Database" for cls in type(database).__mro__): raise TypeError(f"Database required but given {type(database).__name__}") if not name or ".." in name: diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 5f499fff61..c57a59e09a 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -125,7 +125,8 @@ def __init__( raise TypeError("name must be an instance of str") if not isinstance(client, MongoClient): - if not any(cls.__name__ == "MongoClient" for cls in client.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): raise TypeError(f"MongoClient required but given {type(client).__name__}") if name != "$external": diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 8c6411feb9..efef6df9e8 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -595,7 +595,8 @@ def __init__( raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") if not isinstance(key_vault_client, MongoClient): - if not any(cls.__name__ == "MongoClient" for cls in key_vault_client.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(key_vault_client).__mro__): raise TypeError(f"MongoClient required but given {type(key_vault_client).__name__}") self._kms_providers = kms_providers @@ -684,7 +685,8 @@ def create_encrypted_collection( """ if not isinstance(database, Database): - if not any(cls.__name__ == "Database" for cls in database.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "Database" for cls in type(database).__mro__): raise TypeError(f"Database required but given {type(database).__name__}") encrypted_fields = deepcopy(encrypted_fields) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index ac697405d1..21fa57b5d8 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2434,7 +2434,8 @@ class _MongoClientErrorHandler: def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): if not isinstance(client, MongoClient): - if not any(cls.__name__ == "MongoClient" for cls in client.__mro__): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): raise TypeError(f"MongoClient required but given {type(client).__name__}") self.client = client From 0ca926ccfa32340819ad5232874cd1738421ca0a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:59:57 -0700 Subject: [PATCH 1480/2111] PYTHON-4753 [Build Failure] Async tests missing awaits (#1851) --- test/asynchronous/test_auth.py | 6 +++--- test/test_auth.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index e516ff6798..06f7fb9ca8 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -169,7 +169,7 @@ async def test_gssapi_simple(self): client = AsyncMongoClient(mech_uri) await client[GSSAPI_DB].collection.find_one() - set_name = await client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = async_client_context.replica_set_name if set_name: if not self.service_realm_required: # Without authMechanismProperties @@ -242,7 +242,7 @@ async def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = await client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = async_client_context.replica_set_name if set_name: client = AsyncMongoClient( GSSAPI_HOST, @@ -296,7 +296,7 @@ async def test_sasl_plain(self): client = AsyncMongoClient(uri) await client.ldap.test.find_one() - set_name = await client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = async_client_context.replica_set_name if set_name: client = AsyncMongoClient( SASL_HOST, diff --git a/test/test_auth.py b/test/test_auth.py index 0bf0cfd80f..fa3d0905bb 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -169,7 +169,7 @@ def test_gssapi_simple(self): client = MongoClient(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = client_context.replica_set_name if set_name: if not self.service_realm_required: # Without authMechanismProperties @@ -242,7 +242,7 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = client_context.replica_set_name if set_name: client = MongoClient( GSSAPI_HOST, @@ -296,7 +296,7 @@ def test_sasl_plain(self): client = MongoClient(uri) client.ldap.test.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = client_context.replica_set_name if set_name: client = MongoClient( SASL_HOST, From 9b9cf733682847136c867f192731dbd78fd8de61 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 12 Sep 2024 16:19:05 -0400 Subject: [PATCH 1481/2111] PYTHON-4758 - Only emit warnings for unclosed clients after opening (#1856) --- pymongo/asynchronous/mongo_client.py | 2 +- pymongo/synchronous/mongo_client.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 9dba97d12a..f7fc8e5e81 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -1185,7 +1185,7 @@ def __getitem__(self, name: str) -> database.AsyncDatabase[_DocumentType]: def __del__(self) -> None: """Check that this AsyncMongoClient has been closed and issue a warning if not.""" try: - if not self._closed: + if self._opened and not self._closed: warnings.warn( ( f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 21fa57b5d8..5786bbf5a9 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1185,7 +1185,7 @@ def __getitem__(self, name: str) -> database.Database[_DocumentType]: def __del__(self) -> None: """Check that this MongoClient has been closed and issue a warning if not.""" try: - if not self._closed: + if self._opened and not self._closed: warnings.warn( ( f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" From 0c0633da23a9222538926ecc05195d495721dbb2 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 16 Sep 2024 10:20:34 -0700 Subject: [PATCH 1482/2111] PYTHON-4763 Migrate test_change_stream.py to async (#1853) --- test/asynchronous/test_change_stream.py | 1231 +++++++++++++++++++++++ test/test_change_stream.py | 63 +- tools/synchro.py | 1 + 3 files changed, 1274 insertions(+), 21 deletions(-) create mode 100644 test/asynchronous/test_change_stream.py diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py new file mode 100644 index 0000000000..1b89c43bb7 --- /dev/null +++ b/test/asynchronous/test_change_stream.py @@ -0,0 +1,1231 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the change_stream module.""" +from __future__ import annotations + +import asyncio +import os +import random +import string +import sys +import threading +import time +import uuid +from itertools import product +from typing import no_type_check + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, Version, async_client_context, unittest +from test.unified_format import generate_test_classes +from test.utils import ( + AllowListEventListener, + EventListener, + async_rs_or_single_client, + async_wait_until, +) + +from bson import SON, ObjectId, Timestamp, encode +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from pymongo import AsyncMongoClient +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.helpers import anext +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) +from pymongo.message import _CursorAddress +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestAsyncChangeStreamBase(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + async def change_stream_with_client(self, client, *args, **kwargs): + """Create a change stream using the given client and return it.""" + raise NotImplementedError + + async def change_stream(self, *args, **kwargs): + """Create a change stream using the default client and return it.""" + return await self.change_stream_with_client(self.client, *args, **kwargs) + + async def client_with_listener(self, *commands): + """Return a client with a AllowListEventListener.""" + listener = AllowListEventListener(*commands) + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.close) + return client, listener + + def watched_collection(self, *args, **kwargs): + """Return a collection that is watched by self.change_stream().""" + # Construct a unique collection for each test. + collname = ".".join(self.id().rsplit(".", 2)[1:]) + return self.db.get_collection(collname, *args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + """Cause a change stream invalidate event.""" + raise NotImplementedError + + def generate_unique_collnames(self, numcolls): + """Generate numcolls collection names unique to a test.""" + collnames = [] + for idx in range(1, numcolls + 1): + collnames.append(self.id() + "_" + str(idx)) + return collnames + + async def get_resume_token(self, invalidate=False): + """Get a resume token to use for starting a change stream.""" + # Ensure targeted collection exists before starting. + coll = self.watched_collection(write_concern=WriteConcern("majority")) + await coll.insert_one({}) + + if invalidate: + async with await self.change_stream( + [{"$match": {"operationType": "invalidate"}}] + ) as cs: + if isinstance(cs._target, AsyncMongoClient): + self.skipTest("cluster-level change streams cannot be invalidated") + await self.generate_invalidate_event(cs) + return (await cs.next())["_id"] + else: + async with await self.change_stream() as cs: + await coll.insert_one({"data": 1}) + return (await cs.next())["_id"] + + async def get_start_at_operation_time(self): + """Get an operationTime. Advances the operation clock beyond the most + recently returned timestamp. + """ + optime = (await self.client.admin.command("ping"))["operationTime"] + return Timestamp(optime.time, optime.inc + 1) + + async def insert_one_and_check(self, change_stream, doc): + """Insert a document and check that it shows up in the change stream.""" + raise NotImplementedError + + async def kill_change_stream_cursor(self, change_stream): + """Cause a cursor not found error on the next getMore.""" + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.watched_collection().database.client + await client._close_cursor_now(cursor.cursor_id, address) + + +class APITestsMixin: + @no_type_check + async def test_watch(self): + async with await self.change_stream( + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) + self.assertEqual(1000, change_stream._max_await_time_ms) + self.assertEqual(100, change_stream._batch_size) + self.assertIsInstance(change_stream._cursor, AsyncCommandCursor) + self.assertEqual(1000, change_stream._cursor._max_await_time_ms) + await self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) + _ = await change_stream.next() + resume_token = change_stream.resume_token + with self.assertRaises(TypeError): + await self.change_stream(pipeline={}) + with self.assertRaises(TypeError): + await self.change_stream(full_document={}) + # No Error. + async with await self.change_stream(resume_after=resume_token): + pass + + @no_type_check + async def test_try_next(self): + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + await coll.insert_one({}) + self.addAsyncCleanup(coll.drop) + async with await self.change_stream(max_await_time_ms=250) as stream: + self.assertIsNone(await stream.try_next()) # No changes initially. + await coll.insert_one({}) # Generate a change. + + # On sharded clusters, even majority-committed changes only show + # up once an event that sorts after it shows up on the other + # shard. So, we wait on try_next to eventually return changes. + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + + @no_type_check + async def test_try_next_runs_one_getmore(self): + listener = EventListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + await client.admin.command("ping") + listener.reset() + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + await coll.insert_one({"_id": 1}) + self.addAsyncCleanup(coll.drop) + async with await self.change_stream_with_client(client, max_await_time_ms=250) as stream: + self.assertEqual(listener.started_command_names(), ["aggregate"]) + listener.reset() + + # Confirm that only a single getMore is run even when no documents + # are returned. + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + + # Get at least one change before resuming. + await coll.insert_one({"_id": 2}) + + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + listener.reset() + + # Cause the next request to initiate the resume process. + await self.kill_change_stream_cursor(stream) + listener.reset() + + # The sequence should be: + # - getMore, fail + # - resume with aggregate command + # - no results, return immediately without another getMore + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) + listener.reset() + + # Stream still works after a resume. + await coll.insert_one({"_id": 3}) + + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), {"getMore"}) + self.assertIsNone(await stream.try_next()) + + @no_type_check + async def test_batch_size_is_honored(self): + listener = EventListener() + client = await async_rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + await client.admin.command("ping") + listener.reset() + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + await coll.insert_one({"_id": 1}) + self.addAsyncCleanup(coll.drop) + # Expected batchSize. + expected = {"batchSize": 23} + async with await self.change_stream_with_client( + client, max_await_time_ms=250, batch_size=23 + ) as stream: + # Confirm that batchSize is honored for initial batch. + cmd = listener.started_events[0].command + self.assertEqual(cmd["cursor"], expected) + listener.reset() + # Confirm that batchSize is honored by getMores. + self.assertIsNone(await stream.try_next()) + cmd = listener.started_events[0].command + key = next(iter(expected)) + self.assertEqual(expected[key], cmd[key]) + + # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check + @async_client_context.require_version_min(4, 0, 0) + async def test_start_at_operation_time(self): + optime = await self.get_start_at_operation_time() + + coll = self.watched_collection(write_concern=WriteConcern("majority")) + ndocs = 3 + await coll.insert_many([{"data": i} for i in range(ndocs)]) + + async with await self.change_stream(start_at_operation_time=optime) as cs: + for _i in range(ndocs): + await cs.next() + + @no_type_check + async def _test_full_pipeline(self, expected_cs_stage): + client, listener = await self.client_with_listener("aggregate") + async with await self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: + pass + + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) + + @no_type_check + async def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + await self._test_full_pipeline({}) + + @no_type_check + async def test_iteration(self): + async with await self.change_stream(batch_size=2) as change_stream: + num_inserted = 10 + await self.watched_collection().insert_many([{} for _ in range(num_inserted)]) + inserts_received = 0 + async for change in change_stream: + self.assertEqual(change["operationType"], "insert") + inserts_received += 1 + if inserts_received == num_inserted: + break + await self._test_invalidate_stops_iteration(change_stream) + + @no_type_check + @async_client_context.require_sync + def _test_next_blocks(self, change_stream): + inserted_doc = {"_id": ObjectId()} + changes = [] + t = threading.Thread(target=lambda: changes.append(change_stream.next())) + t.start() + # Sleep for a bit to prove that the call to next() blocks. + time.sleep(1) + self.assertTrue(t.is_alive()) + self.assertFalse(changes) + self.watched_collection().insert_one(inserted_doc) + # Join with large timeout to give the server time to return the change, + # in particular for shard clusters. + t.join(30) + self.assertFalse(t.is_alive()) + self.assertEqual(1, len(changes)) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) + + @no_type_check + @async_client_context.require_sync + async def test_next_blocks(self): + """Test that next blocks until a change is readable""" + # Use a short wait time to speed up the test. + async with await self.change_stream(max_await_time_ms=250) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @async_client_context.require_sync + async def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + async with await self.watched_collection().aggregate( + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @async_client_context.require_sync + def test_concurrent_close(self): + """Ensure a ChangeStream can be closed from another thread.""" + # Use a short wait time to speed up the test. + with self.change_stream(max_await_time_ms=250) as change_stream: + + def iterate_cursor(): + try: + for _ in change_stream: + pass + except OperationFailure as e: + if e.code != 237: # AsyncCursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + self.watched_collection().insert_one({}) + asyncio.sleep(1) + change_stream.close() + t.join(3) + self.assertFalse(t.is_alive()) + + @no_type_check + async def test_unknown_full_document(self): + """Must rely on the server to raise an error on unknown fullDocument.""" + try: + async with await self.change_stream(full_document="notValidatedByPyMongo"): + pass + except OperationFailure: + pass + + @no_type_check + async def test_change_operations(self): + """Test each operation type.""" + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } + async with await self.change_stream() as change_stream: + # Insert. + inserted_doc = {"_id": ObjectId(), "foo": "bar"} + await self.watched_collection().insert_one(inserted_doc) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Update. + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} + await self.watched_collection().update_one(inserted_doc, update_spec) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} + if async_client_context.version.at_least(4, 5, 0): + expected_update_description["truncatedArrays"] = [] + self.assertEqual(expected_update_description, change["updateDescription"]) + # Replace. + await self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Delete. + await self.watched_collection().delete_one({"foo": "bar"}) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + # Invalidate. + await self._test_get_invalidate_event(change_stream) + + @no_type_check + @async_client_context.require_version_min(4, 1, 1) + async def test_start_after(self): + resume_token = await self.get_resume_token(invalidate=True) + + # resume_after cannot resume after invalidate. + with self.assertRaises(OperationFailure): + await self.change_stream(resume_after=resume_token) + + # start_after can resume after invalidate. + async with await self.change_stream(start_after=resume_token) as change_stream: + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + @no_type_check + @async_client_context.require_version_min(4, 1, 1) + async def test_start_after_resume_process_with_changes(self): + resume_token = await self.get_resume_token(invalidate=True) + + async with await self.change_stream( + start_after=resume_token, max_await_time_ms=250 + ) as change_stream: + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + self.assertIsNone(await change_stream.try_next()) + await self.kill_change_stream_cursor(change_stream) + + await self.watched_collection().insert_one({"_id": 3}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) + + @no_type_check + @async_client_context.require_version_min(4, 2) + async def test_start_after_resume_process_without_changes(self): + resume_token = await self.get_resume_token(invalidate=True) + + async with await self.change_stream( + start_after=resume_token, max_await_time_ms=250 + ) as change_stream: + self.assertIsNone(await change_stream.try_next()) + await self.kill_change_stream_cursor(change_stream) + + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + +class ProseSpecTestsMixin: + @no_type_check + async def _client_with_listener(self, *commands): + listener = AllowListEventListener(*commands) + client = await async_rs_or_single_client(event_listeners=[listener]) + self.addAsyncCleanup(client.close) + return client, listener + + @no_type_check + async def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): + await self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) + for _ in range(batch_size): + change = await anext(change_stream) + return change + + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that don't support postBatchResumeToken. Assumes the stream + has never returned any changes if previous_change is None. + """ + if previous_change is None: + agg_cmd = listener.started_events[0] + stage = agg_cmd.command["pipeline"][0]["$changeStream"] + return stage.get("resumeAfter") or stage.get("startAfter") + + return previous_change["_id"] + + def _get_expected_resume_token(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that support postBatchResumeToken. Assumes the stream has + never returned any changes if previous_change is None. Assumes + listener is a AllowListEventListener that listens for aggregate and + getMore commands. + """ + if previous_change is None or stream._cursor._has_next(): + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) + if token is not None: + return token + + response = listener.succeeded_events[-1].reply + return response["cursor"]["postBatchResumeToken"] + + @no_type_check + async def _test_raises_error_on_missing_id(self, expected_exception): + """AsyncChangeStream will raise an exception if the server response is + missing the resume token. + """ + async with await self.change_stream([{"$project": {"_id": 0}}]) as change_stream: + await self.watched_collection().insert_one({}) + with self.assertRaises(expected_exception): + await anext(change_stream) + # The cursor should now be closed. + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + @no_type_check + async def _test_update_resume_token(self, expected_rt_getter): + """AsyncChangeStream must continuously track the last seen resumeToken.""" + client, listener = await self._client_with_listener("aggregate", "getMore") + coll = self.watched_collection(write_concern=WriteConcern("majority")) + async with await self.change_stream_with_client(client) as change_stream: + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) + for _ in range(3): + await coll.insert_one({}) + change = await anext(change_stream) + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) + + # Prose test no. 1 + @async_client_context.require_version_min(4, 0, 7) + async def test_update_resume_token(self): + await self._test_update_resume_token(self._get_expected_resume_token) + + # Prose test no. 1 + @async_client_context.require_version_max(4, 0, 7) + async def test_update_resume_token_legacy(self): + await self._test_update_resume_token(self._get_expected_resume_token_legacy) + + # Prose test no. 2 + @async_client_context.require_version_min(4, 1, 8) + async def test_raises_error_on_missing_id_418plus(self): + # Server returns an error on 4.1.8+ + await self._test_raises_error_on_missing_id(OperationFailure) + + # Prose test no. 2 + @async_client_context.require_version_max(4, 1, 8) + async def test_raises_error_on_missing_id_418minus(self): + # PyMongo raises an error + await self._test_raises_error_on_missing_id(InvalidOperation) + + # Prose test no. 3 + @no_type_check + async def test_resume_on_error(self): + async with await self.change_stream() as change_stream: + await self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + await self.kill_change_stream_cursor(change_stream) + await self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 4 + @no_type_check + @async_client_context.require_failCommand_fail_point + async def test_no_resume_attempt_if_aggregate_command_fails(self): + # Set non-retryable error on aggregate command. + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} + client, listener = await self._client_with_listener("aggregate", "getMore") + async with self.fail_point(fail_point): + try: + _ = await self.change_stream_with_client(client) + except OperationFailure: + pass + + # Driver should have attempted aggregate command only once. + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") + + # Prose test no. 5 - REMOVED + # Prose test no. 6 - SKIPPED + # Reason: readPreference is not configurable using the watch() helpers + # so we can skip this test. Also, PyMongo performs server selection for + # each operation which ensure compliance with this prose test. + + # Prose test no. 7 + @no_type_check + async def test_initial_empty_batch(self): + async with await self.change_stream() as change_stream: + # The first batch should be empty. + self.assertFalse(change_stream._cursor._has_next()) + cursor_id = change_stream._cursor.cursor_id + self.assertTrue(cursor_id) + await self.insert_one_and_check(change_stream, {}) + # Make sure we're still using the same cursor. + self.assertEqual(cursor_id, change_stream._cursor.cursor_id) + + # Prose test no. 8 + @no_type_check + async def test_kill_cursors(self): + def raise_error(): + raise ServerSelectionTimeoutError("mock error") + + async with await self.change_stream() as change_stream: + await self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + await self.kill_change_stream_cursor(change_stream) + cursor.close = raise_error + await self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 9 + @no_type_check + @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_max(4, 0, 7) + async def test_start_at_operation_time_caching(self): + # Case 1: change stream not started with startAtOperationTime + client, listener = self.client_with_listener("aggregate") + async with await self.change_stream_with_client(client) as cs: + await self.kill_change_stream_cursor(cs) + await cs.try_next() + cmd = listener.started_events[-1].command + self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) + + # Case 2: change stream started with startAtOperationTime + listener.reset() + optime = await self.get_start_at_operation_time() + async with await self.change_stream_with_client( + client, start_at_operation_time=optime + ) as cs: + await self.kill_change_stream_cursor(cs) + await cs.try_next() + cmd = listener.started_events[-1].command + self.assertEqual( + cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), + optime, + str([k.command for k in listener.started_events]), + ) + + # Prose test no. 10 - SKIPPED + # This test is identical to prose test no. 3. + + # Prose test no. 11 + @no_type_check + @async_client_context.require_version_min(4, 0, 7) + async def test_resumetoken_empty_batch(self): + client, listener = await self._client_with_listener("getMore") + async with await self.change_stream_with_client(client) as change_stream: + self.assertIsNone(await change_stream.try_next()) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 11 + @no_type_check + @async_client_context.require_version_min(4, 0, 7) + async def test_resumetoken_exhausted_batch(self): + client, listener = await self._client_with_listener("getMore") + async with await self.change_stream_with_client(client) as change_stream: + await self._populate_and_exhaust_change_stream(change_stream) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 12 + @no_type_check + @async_client_context.require_version_max(4, 0, 7) + async def test_resumetoken_empty_batch_legacy(self): + resume_point = await self.get_resume_token() + + # Empty resume token when neither resumeAfter or startAfter specified. + async with await self.change_stream() as change_stream: + await change_stream.try_next() + self.assertIsNone(change_stream.resume_token) + + # Resume token value is same as resumeAfter. + async with await self.change_stream(resume_after=resume_point) as change_stream: + await change_stream.try_next() + resume_token = change_stream.resume_token + self.assertEqual(resume_token, resume_point) + + # Prose test no. 12 + @no_type_check + @async_client_context.require_version_max(4, 0, 7) + async def test_resumetoken_exhausted_batch_legacy(self): + # Resume token is _id of last change. + async with await self.change_stream() as change_stream: + change = await self._populate_and_exhaust_change_stream(change_stream) + self.assertEqual(change_stream.resume_token, change["_id"]) + resume_point = change["_id"] + + # Resume token is _id of last change even if resumeAfter is specified. + async with await self.change_stream(resume_after=resume_point) as change_stream: + change = await self._populate_and_exhaust_change_stream(change_stream) + self.assertEqual(change_stream.resume_token, change["_id"]) + + # Prose test no. 13 + @no_type_check + async def test_resumetoken_partially_iterated_batch(self): + # When batch has been iterated up to but not including the last element. + # Resume token should be _id of previous change document. + async with await self.change_stream() as change_stream: + await self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) + for _ in range(2): + change = await anext(change_stream) + resume_token = change_stream.resume_token + + self.assertEqual(resume_token, change["_id"]) + + @no_type_check + async def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): + # When the batch is not empty and hasn't been iterated at all. + # Resume token should be same as the resume option used. + resume_point = await self.get_resume_token() + + # Insert some documents so that firstBatch isn't empty. + await self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) + + # Resume token should be same as the resume option. + async with await self.change_stream(**{resume_option: resume_point}) as change_stream: + self.assertTrue(change_stream._cursor._has_next()) + resume_token = change_stream.resume_token + self.assertEqual(resume_token, resume_point) + + # Prose test no. 14 + @no_type_check + @async_client_context.require_no_mongos + async def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): + await self._test_resumetoken_uniterated_nonempty_batch("resume_after") + + # Prose test no. 14 + @no_type_check + @async_client_context.require_no_mongos + @async_client_context.require_version_min(4, 1, 1) + async def test_resumetoken_uniterated_nonempty_batch_startafter(self): + await self._test_resumetoken_uniterated_nonempty_batch("start_after") + + # Prose test no. 17 + @no_type_check + @async_client_context.require_version_min(4, 1, 1) + async def test_startafter_resume_uses_startafter_after_empty_getMore(self): + # Resume should use startAfter after no changes have been returned. + resume_point = await self.get_resume_token() + + client, listener = await self._client_with_listener("aggregate") + async with await self.change_stream_with_client( + client, start_after=resume_point + ) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + await change_stream.try_next() # No changes + await self.kill_change_stream_cursor(change_stream) + await change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 18 + @no_type_check + @async_client_context.require_version_min(4, 1, 1) + async def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): + # Resume should use resumeAfter after some changes have been returned. + resume_point = await self.get_resume_token() + + client, listener = await self._client_with_listener("aggregate") + async with await self.change_stream_with_client( + client, start_after=resume_point + ) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + await self.watched_collection().insert_one({}) + await anext(change_stream) # Changes + await self.kill_change_stream_cursor(change_stream) + await change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 19 + @no_type_check + async def test_split_large_change(self): + server_version = async_client_context.version + if not server_version.at_least(6, 0, 9): + self.skipTest("$changeStreamSplitLargeEvent requires MongoDB 6.0.9+") + if server_version.at_least(6, 1, 0) and server_version < Version(7, 0, 0): + self.skipTest("$changeStreamSplitLargeEvent is not available in 6.x rapid releases") + await self.db.drop_collection("test_split_large_change") + coll = await self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + await coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + async with await coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + await coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = await change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = await change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + + +class TestClusterAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): + dbs: list + + @classmethod + @async_client_context.require_version_min(4, 0, 0, -1) + @async_client_context.require_change_streams + async def _setup_class(cls): + await super()._setup_class() + cls.dbs = [cls.db, cls.client.pymongo_test_2] + + @classmethod + async def _tearDown_class(cls): + for db in cls.dbs: + await cls.client.drop_database(db) + await super()._tearDown_class() + + async def change_stream_with_client(self, client, *args, **kwargs): + return await client.watch(*args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + self.skipTest("cluster-level change streams cannot be invalidated") + + async def _test_get_invalidate_event(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + async def _test_invalidate_stops_iteration(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + async def _insert_and_check(self, change_stream, db, collname, doc): + coll = db[collname] + await coll.insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + async def insert_one_and_check(self, change_stream, doc): + db = random.choice(self.dbs) + collname = self.id() + await self._insert_and_check(change_stream, db, collname, doc) + + async def test_simple(self): + collnames = self.generate_unique_collnames(3) + async with await self.change_stream() as change_stream: + for db, collname in product(self.dbs, collnames): + await self._insert_and_check(change_stream, db, collname, {"_id": collname}) + + @async_client_context.require_sync + async def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + async with await self.client.admin.aggregate( + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + async def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + await self._test_full_pipeline({"allChangesForCluster": True}) + + +class TestAsyncDatabaseAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): + @classmethod + @async_client_context.require_version_min(4, 0, 0, -1) + @async_client_context.require_change_streams + async def _setup_class(cls): + await super()._setup_class() + + async def change_stream_with_client(self, client, *args, **kwargs): + return await client[self.db.name].watch(*args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + # Dropping the database invalidates the change stream. + await change_stream._client.drop_database(self.db.name) + + async def _test_get_invalidate_event(self, change_stream): + # Cache collection names. + dropped_colls = await self.db.list_collection_names() + # Drop the watched database to get an invalidate event. + await self.generate_invalidate_event(change_stream) + change = await change_stream.next() + # 4.1+ returns "drop" events for each collection in dropped database + # and a "dropDatabase" event for the database itself. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + for _ in range(len(dropped_colls)): + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) + change = await change_stream.next() + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) + # Get next change. + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The AsyncChangeStream should be dead. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + + async def _test_invalidate_stops_iteration(self, change_stream): + # Drop the watched database to get an invalidate event. + await change_stream._client.drop_database(self.db.name) + # Check drop and dropDatabase events. + async for change in change_stream: + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + async def _insert_and_check(self, change_stream, collname, doc): + coll = self.db[collname] + await coll.insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + async def insert_one_and_check(self, change_stream, doc): + await self._insert_and_check(change_stream, self.id(), doc) + + async def test_simple(self): + collnames = self.generate_unique_collnames(3) + async with await self.change_stream() as change_stream: + for collname in collnames: + await self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + + async def test_isolation(self): + # Ensure inserts to other dbs don't show up in our AsyncChangeStream. + other_db = self.client.pymongo_test_temp + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") + collname = self.id() + async with await self.change_stream() as change_stream: + await other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + await self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + await self.client.drop_database(other_db) + + +class TestAsyncCollectionAsyncChangeStream( + TestAsyncChangeStreamBase, APITestsMixin, ProseSpecTestsMixin +): + @classmethod + @async_client_context.require_change_streams + async def _setup_class(cls): + await super()._setup_class() + + async def asyncSetUp(self): + # Use a new collection for each test. + await self.watched_collection().drop() + await self.watched_collection().insert_one({}) + + async def change_stream_with_client(self, client, *args, **kwargs): + return ( + await client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) + + async def generate_invalidate_event(self, change_stream): + # Dropping the collection invalidates the change stream. + await change_stream._target.drop() + + async def _test_invalidate_stops_iteration(self, change_stream): + await self.generate_invalidate_event(change_stream) + # Check drop and dropDatabase events. + async for change in change_stream: + self.assertIn(change["operationType"], ("drop", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + async def _test_get_invalidate_event(self, change_stream): + # Drop the watched database to get an invalidate event. + await change_stream._target.drop() + change = await change_stream.next() + # 4.1+ returns a "drop" change document. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) + # Last change should be invalidate. + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The AsyncChangeStream should be dead. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + + async def insert_one_and_check(self, change_stream, doc): + await self.watched_collection().insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual( + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) + + async def test_raw(self): + """Test with RawBSONDocument.""" + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) + async with await raw_coll.watch() as change_stream: + raw_doc = RawBSONDocument(encode({"_id": 1})) + await self.watched_collection().insert_one(raw_doc) + change = await anext(change_stream) + self.assertIsInstance(change, RawBSONDocument) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) + + @async_client_context.require_version_min(4, 0) # Needed for start_at_operation_time. + async def test_uuid_representations(self): + """Test with uuid document _ids and different uuid_representation.""" + optime = (await self.db.command("ping"))["operationTime"] + await self.watched_collection().insert_many( + [ + {"_id": Binary(uuid.uuid4().bytes, id_subtype)} + for id_subtype in (STANDARD, PYTHON_LEGACY) + ] + ) + for uuid_representation in ALL_UUID_REPRESENTATIONS: + options = self.watched_collection().codec_options.with_options( + uuid_representation=uuid_representation + ) + coll = self.watched_collection(codec_options=options) + async with await coll.watch( + start_at_operation_time=optime, max_await_time_ms=1 + ) as change_stream: + _ = await change_stream.next() + resume_token_1 = change_stream.resume_token + _ = await change_stream.next() + resume_token_2 = change_stream.resume_token + + # Should not error. + async with await coll.watch(resume_after=resume_token_1): + pass + async with await coll.watch(resume_after=resume_token_2): + pass + + async def test_document_id_order(self): + """Test with document _ids that need their order preserved.""" + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} + for document_class in (dict, SON, RawBSONDocument): + options = self.watched_collection().codec_options.with_options( + document_class=document_class + ) + coll = self.watched_collection(codec_options=options) + async with await coll.watch() as change_stream: + await coll.insert_one(random_doc) + _ = await change_stream.next() + resume_token = change_stream.resume_token + + # The resume token is always a document. + self.assertIsInstance(resume_token, document_class) + # Should not error. + async with await coll.watch(resume_after=resume_token): + pass + await coll.delete_many({}) + + async def test_read_concern(self): + """Test readConcern is not validated by the driver.""" + # Read concern 'local' is not allowed for $changeStream. + coll = self.watched_collection(read_concern=ReadConcern("local")) + with self.assertRaises(OperationFailure): + await coll.watch() + + # Does not error. + coll = self.watched_collection(read_concern=ReadConcern("majority")) + async with await coll.watch(): + pass + + +class TestAllLegacyScenarios(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener + + @classmethod + @async_client_context.require_connection + async def _setup_class(cls): + await super()._setup_class() + cls.listener = AllowListEventListener("aggregate", "getMore") + cls.client = await async_rs_or_single_client(event_listeners=[cls.listener]) + + @classmethod + async def _tearDown_class(cls): + await cls.client.close() + await super()._tearDown_class() + + def asyncSetUp(self): + super().asyncSetUp() + self.listener.reset() + + async def asyncSetUpCluster(self, scenario_dict): + assets = [ + (scenario_dict["database_name"], scenario_dict["collection_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), + ] + for db, coll in assets: + await self.client.drop_database(db) + await self.client[db].create_collection(coll) + + async def setFailPoint(self, scenario_dict): + fail_point = scenario_dict.get("failPoint") + if fail_point is None: + return + elif not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + fail_cmd = SON([("configureFailPoint", "failCommand")]) + fail_cmd.update(fail_point) + await async_client_context.client.admin.command(fail_cmd) + self.addAsyncCleanup( + async_client_context.client.admin.command, + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) + + def assert_list_contents_are_subset(self, superlist, sublist): + """Check that each element in sublist is a subset of the corresponding + element in superlist. + """ + self.assertEqual(len(superlist), len(sublist)) + for sup, sub in zip(superlist, sublist): + if isinstance(sub, dict): + self.assert_dict_is_subset(sup, sub) + continue + if isinstance(sub, (list, tuple)): + self.assert_list_contents_are_subset(sup, sub) + continue + self.assertEqual(sup, sub) + + def assert_dict_is_subset(self, superdict, subdict): + """Check that subdict is a subset of superdict.""" + exempt_fields = ["documentKey", "_id", "getMore"] + for key, value in subdict.items(): + if key not in superdict: + self.fail(f"Key {key} not found in {superdict}") + if isinstance(value, dict): + self.assert_dict_is_subset(superdict[key], value) + continue + if isinstance(value, (list, tuple)): + self.assert_list_contents_are_subset(superdict[key], value) + continue + if key in exempt_fields: + # Only check for presence of these exempt fields, but not value. + self.assertIn(key, superdict) + else: + self.assertEqual(superdict[key], value) + + def check_event(self, event, expectation_dict): + if event is None: + self.fail() + for key, value in expectation_dict.items(): + if isinstance(value, dict): + self.assert_dict_is_subset(getattr(event, key), value) + else: + self.assertEqual(getattr(event, key), value) + + def asyncTearDown(self): + self.listener.reset() + + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_change_stream.py b/test/test_change_stream.py index b71f5613d8..cb19452aec 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -15,6 +15,7 @@ """Test the change_stream module.""" from __future__ import annotations +import asyncio import os import random import string @@ -48,8 +49,11 @@ from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestChangeStreamBase(IntegrationTest): RUN_ON_LOAD_BALANCER = True @@ -97,17 +101,17 @@ def get_resume_token(self, invalidate=False): if isinstance(cs._target, MongoClient): self.skipTest("cluster-level change streams cannot be invalidated") self.generate_invalidate_event(cs) - return cs.next()["_id"] + return (cs.next())["_id"] else: with self.change_stream() as cs: coll.insert_one({"data": 1}) - return cs.next()["_id"] + return (cs.next())["_id"] def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most recently returned timestamp. """ - optime = self.client.admin.command("ping")["operationTime"] + optime = (self.client.admin.command("ping"))["operationTime"] return Timestamp(optime.time, optime.inc + 1) def insert_one_and_check(self, change_stream, doc): @@ -158,10 +162,14 @@ def test_try_next(self): with self.change_stream(max_await_time_ms=250) as stream: self.assertIsNone(stream.try_next()) # No changes initially. coll.insert_one({}) # Generate a change. + # On sharded clusters, even majority-committed changes only show # up once an event that sorts after it shows up on the other # shard. So, we wait on try_next to eventually return changes. - wait_until(lambda: stream.try_next() is not None, "get change from try_next") + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") @no_type_check def test_try_next_runs_one_getmore(self): @@ -192,7 +200,11 @@ def test_try_next_runs_one_getmore(self): # Get at least one change before resuming. coll.insert_one({"_id": 2}) - wait_until(lambda: stream.try_next() is not None, "get change from try_next") + + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") listener.reset() # Cause the next request to initiate the resume process. @@ -209,7 +221,11 @@ def test_try_next_runs_one_getmore(self): # Stream still works after a resume. coll.insert_one({"_id": 3}) - wait_until(lambda: stream.try_next() is not None, "get change from try_next") + + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") self.assertEqual(set(listener.started_command_names()), {"getMore"}) self.assertIsNone(stream.try_next()) @@ -289,6 +305,7 @@ def test_iteration(self): self._test_invalidate_stops_iteration(change_stream) @no_type_check + @client_context.require_sync def _test_next_blocks(self, change_stream): inserted_doc = {"_id": ObjectId()} changes = [] @@ -308,13 +325,15 @@ def _test_next_blocks(self, change_stream): self.assertEqual(changes[0]["fullDocument"], inserted_doc) @no_type_check + @client_context.require_sync def test_next_blocks(self): """Test that next blocks until a change is readable""" - # Use a short await time to speed up the test. + # Use a short wait time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: self._test_next_blocks(change_stream) @no_type_check + @client_context.require_sync def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( @@ -323,9 +342,10 @@ def test_aggregate_cursor_blocks(self): self._test_next_blocks(change_stream) @no_type_check + @client_context.require_sync def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" - # Use a short await time to speed up the test. + # Use a short wait time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: def iterate_cursor(): @@ -798,15 +818,15 @@ class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.dbs = [cls.db, cls.client.pymongo_test_2] @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): for db in cls.dbs: cls.client.drop_database(db) - super().tearDownClass() + super()._tearDown_class() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) @@ -841,6 +861,7 @@ def test_simple(self): for db, collname in product(self.dbs, collnames): self._insert_and_check(change_stream, db, collname, {"_id": collname}) + @client_context.require_sync def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.client.admin.aggregate( @@ -859,8 +880,8 @@ class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) @@ -944,8 +965,8 @@ def test_isolation(self): class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): @classmethod @client_context.require_change_streams - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() def setUp(self): # Use a new collection for each test. @@ -1023,7 +1044,7 @@ def test_raw(self): @client_context.require_version_min(4, 0) # Needed for start_at_operation_time. def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" - optime = self.db.command("ping")["operationTime"] + optime = (self.db.command("ping"))["operationTime"] self.watched_collection().insert_many( [ {"_id": Binary(uuid.uuid4().bytes, id_subtype)} @@ -1087,15 +1108,15 @@ class TestAllLegacyScenarios(IntegrationTest): @classmethod @client_context.require_connection - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.listener = AllowListEventListener("aggregate", "getMore") cls.client = rs_or_single_client(event_listeners=[cls.listener]) @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.client.close() - super().tearDownClass() + super()._tearDown_class() def setUp(self): super().setUp() diff --git a/tools/synchro.py b/tools/synchro.py index cde75b539c..fdf3a05c95 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -164,6 +164,7 @@ "test_auth.py", "test_auth_spec.py", "test_bulk.py", + "test_change_stream.py", "test_client.py", "test_client_bulk_write.py", "test_collection.py", From 3b2151760851c1c6cd09a62ab4126cb9580d731e Mon Sep 17 00:00:00 2001 From: Jib Date: Mon, 16 Sep 2024 22:23:09 -0400 Subject: [PATCH 1483/2111] PYTHON-4752 Migrate docs links to Internal Docs Where Possible (#1715) Co-authored-by: Steven Silvester --- CONTRIBUTING.md | 11 ++++------- README.md | 2 +- bson/_cbsonmodule.c | 2 +- bson/datetime_ms.py | 2 +- doc/index.rst | 7 +++++-- pymongo/asynchronous/topology.py | 5 +++-- pymongo/synchronous/topology.py | 5 +++-- pyproject.toml | 2 +- 8 files changed, 19 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 42cc8dc1b7..2c2a5f4316 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -163,13 +163,10 @@ hatch run lint:build-manual ## Documentation -To contribute to the [API -documentation](https://pymongo.readthedocs.io/en/stable/) just make your -changes to the inline documentation of the appropriate [source -code](https://github.com/mongodb/mongo-python-driver) or [rst -file](https://github.com/mongodb/mongo-python-driver/tree/master/doc) in -a branch and submit a [pull -request](https://help.github.com/articles/using-pull-requests). You +To contribute to the [API documentation](https://pymongo.readthedocs.io/en/stable/) just make your +changes to the inline documentation of the appropriate [source code](https://github.com/mongodb/mongo-python-driver) or +[rst file](https://github.com/mongodb/mongo-python-driver/tree/master/doc) in +a branch and submit a [pull request](https://help.github.com/articles/using-pull-requests). You might also use the GitHub [Edit](https://github.com/blog/844-forking-with-the-edit-button) button. diff --git a/README.md b/README.md index bb773b795b..1076b66377 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![PyPI Version](https://img.shields.io/pypi/v/pymongo)](https://pypi.org/project/pymongo) [![Python Versions](https://img.shields.io/pypi/pyversions/pymongo)](https://pypi.org/project/pymongo) [![Monthly Downloads](https://static.pepy.tech/badge/pymongo/month)](https://pepy.tech/project/pymongo) -[![Documentation Status](https://readthedocs.org/projects/pymongo/badge/?version=stable)](http://pymongo.readthedocs.io/en/stable/?badge=stable) +[![API Documentation Status](https://readthedocs.org/projects/pymongo/badge/?version=stable)](http://pymongo.readthedocs.io/en/stable/api?badge=stable) ## About diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 3e9d5ecc26..34b407b940 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -306,7 +306,7 @@ static PyObject* datetime_from_millis(long long millis) { if (evalue) { PyObject* err_msg = PyObject_Str(evalue); if (err_msg) { - PyObject* appendage = PyUnicode_FromString(" (Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO) or MongoClient(datetime_conversion='DATETIME_AUTO')). See: https://pymongo.readthedocs.io/en/stable/examples/datetimes.html#handling-out-of-range-datetimes"); + PyObject* appendage = PyUnicode_FromString(" (Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO) or MongoClient(datetime_conversion='DATETIME_AUTO')). See: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/data-formats/dates-and-times/#handling-out-of-range-datetimes"); if (appendage) { PyObject* msg = PyUnicode_Concat(err_msg, appendage); if (msg) { diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 1b6fa22794..679524cb60 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -31,7 +31,7 @@ _DATETIME_ERROR_SUGGESTION = ( "(Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO)" " or MongoClient(datetime_conversion='DATETIME_AUTO'))." - " See: https://pymongo.readthedocs.io/en/stable/examples/datetimes.html#handling-out-of-range-datetimes" + " See: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/data-formats/dates-and-times/#handling-out-of-range-datetimes" ) diff --git a/doc/index.rst b/doc/index.rst index 71e1423816..0ac8bdec6e 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,6 +1,11 @@ PyMongo |release| Documentation =============================== +.. note:: The PyMongo documentation has been migrated to the + `MongoDB Documentation site `_. + As of PyMongo 4.10, the ReadTheDocs site will contain the detailed changelog and API docs, while the + rest of the documentation will only appear on the MongoDB Documentation site. + Overview -------- **PyMongo** is a Python distribution containing tools for working with @@ -95,8 +100,6 @@ pull request. Changes ------- See the :doc:`changelog` for a full list of changes to PyMongo. -For older versions of the documentation please see the -`archive list `_. About This Documentation ------------------------ diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 9dd1a1c76b..4e778cbc17 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -227,8 +227,9 @@ async def open(self) -> None: warnings.warn( # type: ignore[call-overload] # noqa: B028 "AsyncMongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe", + "https://www.mongodb.com/docs/languages/" + "python/pymongo-driver/current/faq/" + "#is-pymongo-fork-safe-", **kwargs, ) async with self._lock: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 414865154e..e8070e30ab 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -227,8 +227,9 @@ def open(self) -> None: warnings.warn( # type: ignore[call-overload] # noqa: B028 "MongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe", + "https://www.mongodb.com/docs/languages/" + "python/pymongo-driver/current/faq/" + "#is-pymongo-fork-safe-", **kwargs, ) with self._lock: diff --git a/pyproject.toml b/pyproject.toml index b64c7d6031..2df172fde2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ classifiers = [ [project.urls] Homepage = "https://www.mongodb.org" -Documentation = "https://pymongo.readthedocs.io" +Documentation = "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/" Source = "https://github.com/mongodb/mongo-python-driver" Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" From fb51c11cacce56dca3bf48a810947e45d6c01d2f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 16 Sep 2024 21:23:40 -0500 Subject: [PATCH 1484/2111] PYTHON-4756 Add changelog note about dropping srv extra (#1861) --- doc/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index ba3cba8322..69fbb6f8fd 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -93,6 +93,10 @@ Unavoidable breaking changes - Since we are now using ``hatch`` as our build backend, we no longer have a usable ``setup.py`` file and require installation using ``pip``. Attempts to invoke the ``setup.py`` file will raise an exception. Additionally, ``pip`` >= 21.3 is now required for editable installs. +- We no longer support the ``srv`` extra, since ``dnspython`` is included as a dependency in PyMongo 4.7+. + Instead of ``pip install pymongo[srv]``, use ``pip install pymongo``. +- We no longer support the ``tls`` extra, which was only valid for Python 2. + Instead of ``pip install pymongo[tls]``, use ``pip install pymongo``. Issues Resolved ............... From 739510214b799664829b1b085918d8c7eb4d67a1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 17 Sep 2024 09:22:17 -0400 Subject: [PATCH 1485/2111] PYTHON-4731 - Explicitly close all MongoClients opened during tests (#1855) --- pymongo/asynchronous/mongo_client.py | 1 - pymongo/synchronous/mongo_client.py | 1 - pyproject.toml | 3 - test/__init__.py | 189 +++++++- test/asynchronous/__init__.py | 205 +++++++- test/asynchronous/test_auth.py | 119 +++-- test/asynchronous/test_auth_spec.py | 5 +- test/asynchronous/test_bulk.py | 19 +- test/asynchronous/test_change_stream.py | 22 +- test/asynchronous/test_client.py | 438 +++++++++--------- test/asynchronous/test_client_bulk_write.py | 46 +- test/asynchronous/test_collection.py | 20 +- test/asynchronous/test_cursor.py | 34 +- test/asynchronous/test_database.py | 8 +- test/asynchronous/test_encryption.py | 218 ++++----- test/asynchronous/test_grid_file.py | 8 +- test/asynchronous/test_logger.py | 3 +- test/asynchronous/test_monitoring.py | 8 +- test/asynchronous/test_session.py | 18 +- test/asynchronous/test_transactions.py | 50 +- test/asynchronous/utils_spec_runner.py | 5 +- test/auth_aws/test_auth_aws.py | 35 +- test/auth_oidc/test_auth_oidc.py | 8 +- test/mockupdb/test_cursor.py | 7 +- test/ocsp/test_ocsp.py | 7 +- test/test_auth.py | 123 ++--- test/test_auth_spec.py | 5 +- test/test_bulk.py | 19 +- test/test_change_stream.py | 20 +- test/test_client.py | 386 ++++++++------- test/test_client_bulk_write.py | 46 +- test/test_collation.py | 4 +- test/test_collection.py | 20 +- test/test_comment.py | 8 +- test/test_common.py | 19 +- test/test_connection_monitoring.py | 19 +- ...nnections_survive_primary_stepdown_spec.py | 3 +- test/test_cursor.py | 34 +- test/test_custom_types.py | 3 +- test/test_data_lake.py | 8 +- test/test_database.py | 8 +- test/test_discovery_and_monitoring.py | 18 +- test/test_dns.py | 24 +- test/test_encryption.py | 216 ++++----- test/test_examples.py | 8 +- test/test_grid_file.py | 8 +- test/test_gridfs.py | 12 +- test/test_gridfs_bucket.py | 12 +- test/test_heartbeat_monitoring.py | 4 +- test/test_load_balancer.py | 8 +- test/test_logger.py | 3 +- test/test_max_staleness.py | 45 +- test/test_monitor.py | 37 +- test/test_monitoring.py | 10 +- test/test_pooling.py | 18 +- test/test_read_concern.py | 6 +- test/test_read_preferences.py | 46 +- test/test_read_write_concern_spec.py | 19 +- test/test_retryable_reads.py | 9 +- test/test_retryable_writes.py | 27 +- test/test_sdam_monitoring_spec.py | 3 +- test/test_server_selection.py | 7 +- test/test_server_selection_in_window.py | 3 +- test/test_session.py | 17 +- test/test_srv_polling.py | 24 +- test/test_ssl.py | 101 ++-- test/test_streaming_protocol.py | 10 +- test/test_transactions.py | 44 +- test/test_typing.py | 9 +- test/test_versioned_api.py | 6 +- test/unified_format.py | 8 +- test/utils.py | 159 ------- test/utils_spec_runner.py | 5 +- 73 files changed, 1608 insertions(+), 1520 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index f7fc8e5e81..6d0e5d5280 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -1193,7 +1193,6 @@ def __del__(self) -> None: ), ResourceWarning, stacklevel=2, - source=self, ) except AttributeError: pass diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 5786bbf5a9..b2dff5b4ab 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1193,7 +1193,6 @@ def __del__(self) -> None: ), ResourceWarning, stacklevel=2, - source=self, ) except AttributeError: pass diff --git a/pyproject.toml b/pyproject.toml index 2df172fde2..30c7c046b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,9 +96,6 @@ filterwarnings = [ "module:please use dns.resolver.Resolver.resolve:DeprecationWarning", # https://github.com/dateutil/dateutil/issues/1314 "module:datetime.datetime.utc:DeprecationWarning:dateutil", - # TODO: Remove both of these in https://jira.mongodb.org/browse/PYTHON-4731 - "ignore:Unclosed AsyncMongoClient*", - "ignore:Unclosed MongoClient*", ] markers = [ "auth_aws: tests that rely on pymongo-auth-aws", diff --git a/test/__init__.py b/test/__init__.py index 41af81f979..1a17ff14c5 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -16,8 +16,6 @@ from __future__ import annotations import asyncio -import base64 -import contextlib import gc import multiprocessing import os @@ -27,7 +25,6 @@ import sys import threading import time -import traceback import unittest import warnings from asyncio import iscoroutinefunction @@ -54,6 +51,8 @@ sanitize_reply, ) +from pymongo.uri_parser import parse_uri + try: import ipaddress @@ -80,6 +79,12 @@ _IS_SYNC = True +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): + return h + return f"mongodb://{h!s}" + + class ClientContext: client: MongoClient @@ -230,6 +235,9 @@ def _init_client(self): if not self._check_user_provided(): _create_user(self.client.admin, db_user, db_pwd) + if self.client: + self.client.close() + self.client = self._connect( host, port, @@ -256,6 +264,8 @@ def _init_client(self): if "setName" in hello: self.replica_set_name = str(hello["setName"]) self.is_rs = True + if self.client: + self.client.close() if self.auth_enabled: # It doesn't matter which member we use as the seed here. self.client = pymongo.MongoClient( @@ -318,6 +328,7 @@ def _init_client(self): hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) if hello.get("msg") == "isdbgrid": self.mongoses.append(next_address) + mongos_client.close() def init(self): with self.conn_lock: @@ -537,12 +548,6 @@ def require_auth(self, func): lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func ) - def require_no_fips(self, func): - """Run a test only if the host does not have FIPS enabled.""" - return self._require( - lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func - ) - def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" return self._require( @@ -930,6 +935,172 @@ def _target() -> None: self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") self.assertEqual(proc.exitcode, 0) + @classmethod + def _unmanaged_async_mongo_client( + cls, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or client_context.host + port = port or client_context.port + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = MongoClient(uri, port, **client_options) + if client._options.connect: + client._connect() + return client + + def _async_mongo_client(self, host, port, authenticate=True, directConnection=None, **kwargs): + """Create a new client over SSL/TLS if necessary.""" + host = host or client_context.host + port = port or client_context.port + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = MongoClient(uri, port, **client_options) + if client._options.connect: + client._connect() + self.addCleanup(client.close) + return client + + @classmethod + def unmanaged_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + @classmethod + def unmanaged_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, directConnection=True, **kwargs) + + @classmethod + def unmanaged_rs_client(cls, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return cls._unmanaged_async_mongo_client(h, p, **kwargs) + + @classmethod + def unmanaged_rs_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + def unmanaged_rs_or_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + def unmanaged_rs_or_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, **kwargs) + + def single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return self._async_mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) + + def single_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return self._async_mongo_client(h, p, directConnection=True, **kwargs) + + def rs_client_noauth(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return self._async_mongo_client(h, p, authenticate=False, **kwargs) + + def rs_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return self._async_mongo_client(h, p, **kwargs) + + def rs_or_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return self._async_mongo_client(h, p, authenticate=False, **kwargs) + + def rs_or_single_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return self._async_mongo_client(h, p, **kwargs) + + def simple_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient: + if not h and not p: + client = MongoClient(**kwargs) + else: + client = MongoClient(h, p, **kwargs) + self.addCleanup(client.close) + return client + + @classmethod + def unmanaged_simple_client(cls, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient: + if not h and not p: + client = MongoClient(**kwargs) + else: + client = MongoClient(h, p, **kwargs) + return client + + def disable_replication(self, client): + """Disable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = self.single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + + def enable_replication(self, client): + """Enable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = self.single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + class UnitTest(PyMongoTestCase): """Async base class for TestCases that don't require a connection to MongoDB.""" diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index d1af89c184..0d94331587 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -16,8 +16,6 @@ from __future__ import annotations import asyncio -import base64 -import contextlib import gc import multiprocessing import os @@ -27,7 +25,6 @@ import sys import threading import time -import traceback import unittest import warnings from asyncio import iscoroutinefunction @@ -54,6 +51,8 @@ sanitize_reply, ) +from pymongo.uri_parser import parse_uri + try: import ipaddress @@ -80,6 +79,12 @@ _IS_SYNC = False +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): + return h + return f"mongodb://{h!s}" + + class AsyncClientContext: client: AsyncMongoClient @@ -230,6 +235,9 @@ async def _init_client(self): if not await self._check_user_provided(): await _create_user(self.client.admin, db_user, db_pwd) + if self.client: + await self.client.close() + self.client = await self._connect( host, port, @@ -256,6 +264,8 @@ async def _init_client(self): if "setName" in hello: self.replica_set_name = str(hello["setName"]) self.is_rs = True + if self.client: + await self.client.close() if self.auth_enabled: # It doesn't matter which member we use as the seed here. self.client = pymongo.AsyncMongoClient( @@ -320,6 +330,7 @@ async def _init_client(self): hello = await mongos_client.admin.command(HelloCompat.LEGACY_CMD) if hello.get("msg") == "isdbgrid": self.mongoses.append(next_address) + await mongos_client.close() async def init(self): with self.conn_lock: @@ -539,12 +550,6 @@ def require_auth(self, func): lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func ) - def require_no_fips(self, func): - """Run a test only if the host does not have FIPS enabled.""" - return self._require( - lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func - ) - def require_no_auth(self, func): """Run a test only if the server is running without auth enabled.""" return self._require( @@ -932,6 +937,188 @@ def _target() -> None: self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") self.assertEqual(proc.exitcode, 0) + @classmethod + async def _unmanaged_async_mongo_client( + cls, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = AsyncMongoClient(uri, port, **client_options) + if client._options.connect: + await client.aconnect() + return client + + async def _async_mongo_client( + self, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = AsyncMongoClient(uri, port, **client_options) + if client._options.connect: + await client.aconnect() + self.addAsyncCleanup(client.close) + return client + + @classmethod + async def unmanaged_async_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + @classmethod + async def unmanaged_async_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, directConnection=True, **kwargs) + + @classmethod + async def unmanaged_async_rs_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await cls._unmanaged_async_mongo_client(h, p, **kwargs) + + @classmethod + async def unmanaged_async_rs_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + async def unmanaged_async_rs_or_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + async def unmanaged_async_rs_or_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, **kwargs) + + async def async_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await self._async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + async def async_single_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return await self._async_mongo_client(h, p, directConnection=True, **kwargs) + + async def async_rs_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return await self._async_mongo_client(h, p, authenticate=False, **kwargs) + + async def async_rs_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await self._async_mongo_client(h, p, **kwargs) + + async def async_rs_or_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return await self._async_mongo_client(h, p, authenticate=False, **kwargs) + + async def async_rs_or_single_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return await self._async_mongo_client(h, p, **kwargs) + + def simple_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> AsyncMongoClient: + if not h and not p: + client = AsyncMongoClient(**kwargs) + else: + client = AsyncMongoClient(h, p, **kwargs) + self.addAsyncCleanup(client.close) + return client + + @classmethod + def unmanaged_simple_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient: + if not h and not p: + client = AsyncMongoClient(**kwargs) + else: + client = AsyncMongoClient(h, p, **kwargs) + return client + + async def disable_replication(self, client): + """Disable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = await self.async_single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + + async def enable_replication(self, client): + """Enable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = await self.async_single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + class AsyncUnitTest(AsyncPyMongoTestCase): """Async base class for TestCases that don't require a connection to MongoDB.""" diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index 06f7fb9ca8..fbaca41f09 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -23,16 +23,14 @@ sys.path[0:0] = [""] -from test.asynchronous import AsyncIntegrationTest, SkipTest, async_client_context, unittest -from test.utils import ( - AllowListEventListener, - async_rs_or_single_client, - async_rs_or_single_client_noauth, - async_single_client, - async_single_client_noauth, - delay, - ignore_deprecations, +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + SkipTest, + async_client_context, + unittest, ) +from test.utils import AllowListEventListener, delay, ignore_deprecations from pymongo import AsyncMongoClient, monitoring from pymongo.asynchronous.auth import HAVE_KERBEROS @@ -81,7 +79,7 @@ def run(self): self.success = True -class TestGSSAPI(unittest.IsolatedAsyncioTestCase): +class TestGSSAPI(AsyncPyMongoTestCase): mech_properties: str service_realm_required: bool @@ -138,7 +136,7 @@ async def test_gssapi_simple(self): if not self.service_realm_required: # Without authMechanismProperties. - client = AsyncMongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -149,11 +147,11 @@ async def test_gssapi_simple(self): await client[GSSAPI_DB].collection.find_one() # Log in using URI, without authMechanismProperties. - client = AsyncMongoClient(uri) + client = self.simple_client(uri) await client[GSSAPI_DB].collection.find_one() # Authenticate with authMechanismProperties. - client = AsyncMongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -166,14 +164,14 @@ async def test_gssapi_simple(self): # Log in using URI, with authMechanismProperties. mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" - client = AsyncMongoClient(mech_uri) + client = self.simple_client(mech_uri) await client[GSSAPI_DB].collection.find_one() set_name = async_client_context.replica_set_name if set_name: if not self.service_realm_required: # Without authMechanismProperties - client = AsyncMongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -185,11 +183,11 @@ async def test_gssapi_simple(self): await client[GSSAPI_DB].list_collection_names() uri = uri + f"&replicaSet={set_name!s}" - client = AsyncMongoClient(uri) + client = self.simple_client(uri) await client[GSSAPI_DB].list_collection_names() # With authMechanismProperties - client = AsyncMongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -202,13 +200,13 @@ async def test_gssapi_simple(self): await client[GSSAPI_DB].list_collection_names() mech_uri = mech_uri + f"&replicaSet={set_name!s}" - client = AsyncMongoClient(mech_uri) + client = self.simple_client(mech_uri) await client[GSSAPI_DB].list_collection_names() @ignore_deprecations @async_client_context.require_sync async def test_gssapi_threaded(self): - client = AsyncMongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -244,7 +242,7 @@ async def test_gssapi_threaded(self): set_name = async_client_context.replica_set_name if set_name: - client = AsyncMongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -267,14 +265,14 @@ async def test_gssapi_threaded(self): self.assertTrue(thread.success) -class TestSASLPlain(unittest.IsolatedAsyncioTestCase): +class TestSASLPlain(AsyncPyMongoTestCase): @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") async def test_sasl_plain(self): - client = AsyncMongoClient( + client = self.simple_client( SASL_HOST, SASL_PORT, username=SASL_USER, @@ -293,12 +291,12 @@ async def test_sasl_plain(self): SASL_PORT, SASL_DB, ) - client = AsyncMongoClient(uri) + client = self.simple_client(uri) await client.ldap.test.find_one() set_name = async_client_context.replica_set_name if set_name: - client = AsyncMongoClient( + client = self.simple_client( SASL_HOST, SASL_PORT, replicaSet=set_name, @@ -317,7 +315,7 @@ async def test_sasl_plain(self): SASL_DB, str(set_name), ) - client = AsyncMongoClient(uri) + client = self.simple_client(uri) await client.ldap.test.find_one() async def test_sasl_plain_bad_credentials(self): @@ -331,8 +329,8 @@ def auth_string(user, password): ) return uri - bad_user = AsyncMongoClient(auth_string("not-user", SASL_PASS)) - bad_pwd = AsyncMongoClient(auth_string(SASL_USER, "not-pwd")) + bad_user = self.simple_client(auth_string("not-user", SASL_PASS)) + bad_pwd = self.simple_client(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. with self.assertRaises(OperationFailure): await bad_user.admin.command("ping") @@ -356,7 +354,7 @@ async def asyncTearDown(self): async def test_scram_sha1(self): host, port = await async_client_context.host, await async_client_context.port - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) ) await client.pymongo_test.command("dbstats") @@ -367,7 +365,7 @@ async def test_scram_sha1(self): "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" "&replicaSet=%s" % (host, port, async_client_context.replica_set_name) ) - client = await async_single_client_noauth(uri) + client = await self.async_single_client_noauth(uri) await client.pymongo_test.command("dbstats") db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) await db.command("dbstats") @@ -395,7 +393,7 @@ async def test_scram_skip_empty_exchange(self): "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] ) - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] ) await client.testscram.command("dbstats") @@ -432,38 +430,38 @@ async def test_scram(self): ) # Step 2: verify auth success cases - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) await client.testscram.command("dbstats") # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) await client.testscram.command("dbstats") self.listener.reset() - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] ) await client.testscram.command("dbstats") @@ -476,19 +474,19 @@ async def test_scram(self): self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) with self.assertRaises(OperationFailure): await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) with self.assertRaises(OperationFailure): await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="not-a-user", password="pwd", authSource="testscram" ) with self.assertRaises(OperationFailure): @@ -501,7 +499,7 @@ async def test_scram(self): port, async_client_context.replica_set_name, ) - client = await async_single_client_noauth(uri) + client = await self.async_single_client_noauth(uri) await client.testscram.command("dbstats") db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) await db.command("dbstats") @@ -521,12 +519,12 @@ async def test_scram_saslprep(self): "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] ) - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="\u2168", password="\u2163", authSource="testscram" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="\u2168", password="\u2163", authSource="testscram", @@ -534,17 +532,17 @@ async def test_scram_saslprep(self): ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="\u2168", password="IV", authSource="testscram" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="IX", password="I\u00ADX", authSource="testscram" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="IX", password="I\u00ADX", authSource="testscram", @@ -552,31 +550,31 @@ async def test_scram_saslprep(self): ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) ) await client.testscram.command("dbstats") - client = await async_rs_or_single_client_noauth( + client = await self.async_rs_or_single_client_noauth( "mongodb://IX:IX@%s:%d/testscram" % (host, port) ) await client.testscram.command("dbstats") async def test_cache(self): - client = await async_single_client() + client = await self.async_single_client() credentials = client.options.pool_options._credentials cache = credentials.cache self.assertIsNotNone(cache) @@ -601,8 +599,7 @@ async def test_scram_threaded(self): await coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate - client = await async_rs_or_single_client() - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client() coll = client.db.test threads = [] for _ in range(4): @@ -631,7 +628,9 @@ async def asyncTearDown(self): async def test_uri_options(self): # Test default to admin host, port = await async_client_context.host, await async_client_context.port - client = await async_rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + client = await self.async_rs_or_single_client_noauth( + "mongodb://admin:pass@%s:%d" % (host, port) + ) self.assertTrue(await client.admin.command("dbstats")) if async_client_context.is_rs: @@ -640,14 +639,14 @@ async def test_uri_options(self): port, async_client_context.replica_set_name, ) - client = await async_single_client_noauth(uri) + client = await self.async_single_client_noauth(uri) self.assertTrue(await client.admin.command("dbstats")) db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) self.assertTrue(await db.command("dbstats")) # Test explicit database uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) - client = await async_rs_or_single_client_noauth(uri) + client = await self.async_rs_or_single_client_noauth(uri) with self.assertRaises(OperationFailure): await client.admin.command("dbstats") self.assertTrue(await client.pymongo_test.command("dbstats")) @@ -658,7 +657,7 @@ async def test_uri_options(self): port, async_client_context.replica_set_name, ) - client = await async_single_client_noauth(uri) + client = await self.async_single_client_noauth(uri) with self.assertRaises(OperationFailure): await client.admin.command("dbstats") self.assertTrue(await client.pymongo_test.command("dbstats")) @@ -667,7 +666,7 @@ async def test_uri_options(self): # Test authSource uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) - client = await async_rs_or_single_client_noauth(uri) + client = await self.async_rs_or_single_client_noauth(uri) with self.assertRaises(OperationFailure): await client.pymongo_test2.command("dbstats") self.assertTrue(await client.pymongo_test.command("dbstats")) @@ -677,7 +676,7 @@ async def test_uri_options(self): "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" "%s;authSource=pymongo_test" % (host, port, async_client_context.replica_set_name) ) - client = await async_single_client_noauth(uri) + client = await self.async_single_client_noauth(uri) with self.assertRaises(OperationFailure): await client.pymongo_test2.command("dbstats") self.assertTrue(await client.pymongo_test.command("dbstats")) diff --git a/test/asynchronous/test_auth_spec.py b/test/asynchronous/test_auth_spec.py index 329b3eec62..a6ab1cb331 100644 --- a/test/asynchronous/test_auth_spec.py +++ b/test/asynchronous/test_auth_spec.py @@ -20,6 +20,7 @@ import os import sys import warnings +from test.asynchronous import AsyncPyMongoTestCase sys.path[0:0] = [""] @@ -34,7 +35,7 @@ _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") -class TestAuthSpec(unittest.IsolatedAsyncioTestCase): +class TestAuthSpec(AsyncPyMongoTestCase): pass @@ -54,7 +55,7 @@ def run_test(self): warnings.simplefilter("default") self.assertRaises(Exception, AsyncMongoClient, uri, connect=False) else: - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 79d8e1a0f1..42a3311072 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -24,23 +24,14 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, remove_all_users, unittest -from test.utils import ( - async_rs_or_single_client_noauth, - async_single_client, - async_wait_until, -) +from test.utils import async_wait_until from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.asynchronous.collection import AsyncCollection from pymongo.common import partition_node -from pymongo.errors import ( - BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure, -) +from pymongo.errors import BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure from pymongo.operations import * from pymongo.write_concern import WriteConcern @@ -915,7 +906,7 @@ class AsyncTestBulkAuthorization(AsyncBulkAuthorizationTestBase): async def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = await async_rs_or_single_client_noauth( + cli = await self.async_rs_or_single_client_noauth( username="readonly", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test @@ -926,7 +917,7 @@ async def test_readonly(self): async def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = await async_rs_or_single_client_noauth( + cli = await self.async_rs_or_single_client_noauth( username="noremove", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test @@ -954,7 +945,7 @@ async def _setup_class(cls): if cls.w is not None and cls.w > 1: for member in (await async_client_context.hello)["hosts"]: if member != (await async_client_context.hello)["primary"]: - cls.secondary = await async_single_client(*partition_node(member)) + cls.secondary = await cls.unmanaged_async_single_client(*partition_node(member)) break @classmethod diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 1b89c43bb7..883ed72c4c 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -28,12 +28,17 @@ sys.path[0:0] = [""] -from test.asynchronous import AsyncIntegrationTest, Version, async_client_context, unittest +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + Version, + async_client_context, + unittest, +) from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, EventListener, - async_rs_or_single_client, async_wait_until, ) @@ -69,8 +74,7 @@ async def change_stream(self, *args, **kwargs): async def client_with_listener(self, *commands): """Return a client with a AllowListEventListener.""" listener = AllowListEventListener(*commands) - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) return client, listener def watched_collection(self, *args, **kwargs): @@ -176,7 +180,7 @@ async def _wait_until(): @no_type_check async def test_try_next_runs_one_getmore(self): listener = EventListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. await client.admin.command("ping") listener.reset() @@ -234,7 +238,7 @@ async def _wait_until(): @no_type_check async def test_batch_size_is_honored(self): listener = EventListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. await client.admin.command("ping") listener.reset() @@ -481,7 +485,9 @@ class ProseSpecTestsMixin: @no_type_check async def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await AsyncPyMongoTestCase.unmanaged_async_rs_or_single_client( + event_listeners=[listener] + ) self.addAsyncCleanup(client.close) return client, listener @@ -1131,7 +1137,7 @@ class TestAllLegacyScenarios(AsyncIntegrationTest): async def _setup_class(cls): await super()._setup_class() cls.listener = AllowListEventListener("aggregate", "getMore") - cls.client = await async_rs_or_single_client(event_listeners=[cls.listener]) + cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) @classmethod async def _tearDown_class(cls): diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 97cbdf6dbd..f610f32779 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -36,6 +36,7 @@ from unittest.mock import patch import pytest +import pytest_asyncio from pymongo.operations import _Op @@ -61,10 +62,6 @@ CMAPListener, FunctionCallRecorder, async_get_pool, - async_rs_client, - async_rs_or_single_client, - async_rs_or_single_client_noauth, - async_single_client, async_wait_until, asyncAssertRaisesExactly, delay, @@ -72,7 +69,6 @@ is_greenthread_patched, lazy_client_trial, one, - rs_or_single_client, wait_until, ) @@ -133,7 +129,9 @@ class AsyncClientUnitTest(AsyncUnitTest): @classmethod async def _setup_class(cls): - cls.client = await async_rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) + cls.client = await cls.unmanaged_async_rs_or_single_client( + connect=False, serverSelectionTimeoutMS=100 + ) @classmethod async def _tearDown_class(cls): @@ -143,8 +141,8 @@ async def _tearDown_class(cls): def inject_fixtures(self, caplog): self._caplog = caplog - def test_keyword_arg_defaults(self): - client = AsyncMongoClient( + async def test_keyword_arg_defaults(self): + client = self.simple_client( socketTimeoutMS=None, connectTimeoutMS=20000, waitQueueTimeoutMS=None, @@ -169,16 +167,18 @@ def test_keyword_arg_defaults(self): self.assertEqual(ReadPreference.PRIMARY, client.read_preference) self.assertAlmostEqual(12, client.options.server_selection_timeout) - def test_connect_timeout(self): - client = AsyncMongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + async def test_connect_timeout(self): + client = self.simple_client(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = AsyncMongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + + client = self.simple_client(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = AsyncMongoClient( + + client = self.simple_client( "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False ) pool_opts = client.options.pool_options @@ -194,8 +194,8 @@ def test_types(self): self.assertRaises(ConfigurationError, AsyncMongoClient, []) - def test_max_pool_size_zero(self): - AsyncMongoClient(maxPoolSize=0) + async def test_max_pool_size_zero(self): + self.simple_client(maxPoolSize=0) def test_uri_detection(self): self.assertRaises(ConfigurationError, AsyncMongoClient, "/foo") @@ -260,7 +260,7 @@ def test_iteration(self): self.assertNotIsInstance(client, Iterable) async def test_get_default_database(self): - c = await async_rs_or_single_client( + c = await self.async_rs_or_single_client( "mongodb://%s:%d/foo" % (await async_client_context.host, await async_client_context.port), connect=False, @@ -277,7 +277,7 @@ async def test_get_default_database(self): self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) - c = await async_rs_or_single_client( + c = await self.async_rs_or_single_client( "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), connect=False, ) @@ -285,7 +285,7 @@ async def test_get_default_database(self): async def test_get_default_database_error(self): # URI with no database. - c = await async_rs_or_single_client( + c = await self.async_rs_or_single_client( "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), connect=False, ) @@ -297,11 +297,11 @@ async def test_get_default_database_with_authsource(self): await async_client_context.host, await async_client_context.port, ) - c = await async_rs_or_single_client(uri, connect=False) + c = await self.async_rs_or_single_client(uri, connect=False) self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database()) async def test_get_database_default(self): - c = await async_rs_or_single_client( + c = await self.async_rs_or_single_client( "mongodb://%s:%d/foo" % (await async_client_context.host, await async_client_context.port), connect=False, @@ -310,7 +310,7 @@ async def test_get_database_default(self): async def test_get_database_default_error(self): # URI with no database. - c = await async_rs_or_single_client( + c = await self.async_rs_or_single_client( "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), connect=False, ) @@ -322,47 +322,53 @@ async def test_get_database_default_with_authsource(self): await async_client_context.host, await async_client_context.port, ) - c = await async_rs_or_single_client(uri, connect=False) + c = await self.async_rs_or_single_client(uri, connect=False) self.assertEqual(AsyncDatabase(c, "foo"), c.get_database()) - def test_primary_read_pref_with_tags(self): + async def test_primary_read_pref_with_tags(self): # No tags allowed with "primary". with self.assertRaises(ConfigurationError): - AsyncMongoClient("mongodb://host/?readpreferencetags=dc:east") + await self.async_single_client("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - AsyncMongoClient("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") + await self.async_single_client( + "mongodb://host/?readpreference=primary&readpreferencetags=dc:east" + ) async def test_read_preference(self): - c = await async_rs_or_single_client( + c = await self.async_rs_or_single_client( "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode ) self.assertEqual(c.read_preference, ReadPreference.NEAREST) - def test_metadata(self): + async def test_metadata(self): metadata = copy.deepcopy(_METADATA) metadata["driver"]["name"] = "PyMongo|async" metadata["application"] = {"name": "foobar"} - client = AsyncMongoClient("mongodb://foo:27017/?appname=foobar&connect=false") + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options self.assertEqual(options.pool_options.metadata, metadata) - client = AsyncMongoClient("foo", 27017, appname="foobar", connect=False) + client = self.simple_client("foo", 27017, appname="foobar", connect=False) options = client.options self.assertEqual(options.pool_options.metadata, metadata) # No error - AsyncMongoClient(appname="x" * 128) - self.assertRaises(ValueError, AsyncMongoClient, appname="x" * 129) + self.simple_client(appname="x" * 128) + with self.assertRaises(ValueError): + self.simple_client(appname="x" * 129) # Bad "driver" options. self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") self.assertRaises(TypeError, DriverInfo, version="1", platform="a") self.assertRaises(TypeError, DriverInfo) - self.assertRaises(TypeError, AsyncMongoClient, driver=1) - self.assertRaises(TypeError, AsyncMongoClient, driver="abc") - self.assertRaises(TypeError, AsyncMongoClient, driver=("Foo", "1", "a")) + with self.assertRaises(TypeError): + self.simple_client(driver=1) + with self.assertRaises(TypeError): + self.simple_client(driver="abc") + with self.assertRaises(TypeError): + self.simple_client(driver=("Foo", "1", "a")) # Test appending to driver info. metadata["driver"]["name"] = "PyMongo|async|FooDriver" metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) - client = AsyncMongoClient( + client = self.simple_client( "foo", 27017, appname="foobar", @@ -372,7 +378,7 @@ def test_metadata(self): options = client.options self.assertEqual(options.pool_options.metadata, metadata) metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) - client = AsyncMongoClient( + client = self.simple_client( "foo", 27017, appname="foobar", @@ -382,7 +388,7 @@ def test_metadata(self): options = client.options self.assertEqual(options.pool_options.metadata, metadata) # Test truncating driver info metadata. - client = AsyncMongoClient( + client = self.simple_client( driver=DriverInfo(name="s" * _MAX_METADATA_SIZE), connect=False, ) @@ -391,7 +397,7 @@ def test_metadata(self): len(bson.encode(options.pool_options.metadata)), _MAX_METADATA_SIZE, ) - client = AsyncMongoClient( + client = self.simple_client( driver=DriverInfo(name="s" * _MAX_METADATA_SIZE, version="s" * _MAX_METADATA_SIZE), connect=False, ) @@ -407,11 +413,11 @@ def test_container_metadata(self): metadata["driver"]["name"] = "PyMongo|async" metadata["env"] = {} metadata["env"]["container"] = {"orchestrator": "kubernetes"} - client = AsyncMongoClient("mongodb://foo:27017/?appname=foobar&connect=false") + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) - def test_kwargs_codec_options(self): + async def test_kwargs_codec_options(self): class MyFloatType: def __init__(self, x): self.__x = x @@ -433,7 +439,7 @@ def transform_python(self, value): uuid_representation_label = "javaLegacy" unicode_decode_error_handler = "ignore" tzinfo = utc - c = AsyncMongoClient( + c = self.simple_client( document_class=document_class, type_registry=type_registry, tz_aware=tz_aware, @@ -442,12 +448,12 @@ def transform_python(self, value): tzinfo=tzinfo, connect=False, ) - self.assertEqual(c.codec_options.document_class, document_class) self.assertEqual(c.codec_options.type_registry, type_registry) self.assertEqual(c.codec_options.tz_aware, tz_aware) self.assertEqual( - c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual(c.codec_options.tzinfo, tzinfo) @@ -469,11 +475,11 @@ async def test_uri_codec_options(self): datetime_conversion, ) ) - c = AsyncMongoClient(uri, connect=False) - + c = self.simple_client(uri, connect=False) self.assertEqual(c.codec_options.tz_aware, True) self.assertEqual( - c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual( @@ -482,16 +488,15 @@ async def test_uri_codec_options(self): # Change the passed datetime_conversion to a number and re-assert. uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") - c = AsyncMongoClient(uri, connect=False) - + c = self.simple_client(uri, connect=False) self.assertEqual( c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] ) - def test_uri_option_precedence(self): + async def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" - c = AsyncMongoClient( + c = self.simple_client( uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred" ) clopts = c.options @@ -501,7 +506,7 @@ def test_uri_option_precedence(self): self.assertEqual(clopts.replica_set_name, "newname") self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) - def test_connection_timeout_ms_propagates_to_DNS_resolver(self): + async def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. from pymongo.srv_resolver import _resolve @@ -520,37 +525,37 @@ def reset_resolver(): uri_with_timeout = base_uri + "/?connectTimeoutMS=6000" expected_uri_value = 6.0 - def test_scenario(args, kwargs, expected_value): + async def test_scenario(args, kwargs, expected_value): patched_resolver.reset() - AsyncMongoClient(*args, **kwargs) + self.simple_client(*args, **kwargs) for _, kw in patched_resolver.call_list(): self.assertAlmostEqual(kw["lifetime"], expected_value) # No timeout specified. - test_scenario((base_uri,), {}, CONNECT_TIMEOUT) + await test_scenario((base_uri,), {}, CONNECT_TIMEOUT) # Timeout only specified in connection string. - test_scenario((uri_with_timeout,), {}, expected_uri_value) + await test_scenario((uri_with_timeout,), {}, expected_uri_value) # Timeout only specified in keyword arguments. kwarg = {"connectTimeoutMS": connectTimeoutMS} - test_scenario((base_uri,), kwarg, expected_kw_value) + await test_scenario((base_uri,), kwarg, expected_kw_value) # Timeout specified in both kwargs and connection string. - test_scenario((uri_with_timeout,), kwarg, expected_kw_value) + await test_scenario((uri_with_timeout,), kwarg, expected_kw_value) - def test_uri_security_options(self): + async def test_uri_security_options(self): # Ensure that we don't silently override security-related options. with self.assertRaises(InvalidURI): - AsyncMongoClient("mongodb://localhost/?ssl=true", tls=False, connect=False) + self.simple_client("mongodb://localhost/?ssl=true", tls=False, connect=False) # Matching SSL and TLS options should not cause errors. - c = AsyncMongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) + c = self.simple_client("mongodb://localhost/?ssl=false", tls=False, connect=False) self.assertEqual(c.options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): - AsyncMongoClient( + self.simple_client( "mongodb://localhost/?tlsInsecure=true", connect=False, tlsAllowInvalidHostnames=True, @@ -558,7 +563,7 @@ def test_uri_security_options(self): # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): - AsyncMongoClient( + self.simple_client( "mongodb://localhost/?tlsInsecure=true", connect=False, tlsAllowInvalidCertificates=False, @@ -566,10 +571,10 @@ def test_uri_security_options(self): # Conflicting kwargs should raise InvalidURI with self.assertRaises(InvalidURI): - AsyncMongoClient(ssl=True, tls=False) + self.simple_client(ssl=True, tls=False) - def test_event_listeners(self): - c = AsyncMongoClient(event_listeners=[], connect=False) + async def test_event_listeners(self): + c = self.simple_client(event_listeners=[], connect=False) self.assertEqual(c.options.event_listeners, []) listeners = [ event_loggers.CommandLogger(), @@ -578,11 +583,11 @@ def test_event_listeners(self): event_loggers.TopologyLogger(), event_loggers.ConnectionPoolLogger(), ] - c = AsyncMongoClient(event_listeners=listeners, connect=False) + c = self.simple_client(event_listeners=listeners, connect=False) self.assertEqual(c.options.event_listeners, listeners) - def test_client_options(self): - c = AsyncMongoClient(connect=False) + async def test_client_options(self): + c = self.simple_client(connect=False) self.assertIsInstance(c.options, ClientOptions) self.assertIsInstance(c.options.pool_options, PoolOptions) self.assertEqual(c.options.server_selection_timeout, 30) @@ -612,16 +617,16 @@ def test_detected_environment_logging(self, mock_get_hosts): ) with self.assertLogs("pymongo", level="INFO") as cm: for host in normal_hosts: - AsyncMongoClient(host) + AsyncMongoClient(host, connect=False) for host in srv_hosts: mock_get_hosts.return_value = [(host, 1)] - AsyncMongoClient(host) - AsyncMongoClient(multi_host) + AsyncMongoClient(host, connect=False) + AsyncMongoClient(multi_host, connect=False) logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) @patch("pymongo.srv_resolver._SrvResolver.get_hosts") - def test_detected_environment_warning(self, mock_get_hosts): + async def test_detected_environment_warning(self, mock_get_hosts): with self._caplog.at_level(logging.WARN): normal_hosts = [ "host.cosmos.azure.com", @@ -634,13 +639,13 @@ def test_detected_environment_warning(self, mock_get_hosts): ) for host in normal_hosts: with self.assertWarns(UserWarning): - AsyncMongoClient(host) + self.simple_client(host) for host in srv_hosts: mock_get_hosts.return_value = [(host, 1)] with self.assertWarns(UserWarning): - AsyncMongoClient(host) + self.simple_client(host) with self.assertWarns(UserWarning): - AsyncMongoClient(multi_host) + self.simple_client(multi_host) class TestClient(AsyncIntegrationTest): @@ -657,7 +662,7 @@ def test_multiple_uris(self): async def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove connections when maxIdleTimeMS not set - client = await async_rs_or_single_client() + client = await self.async_rs_or_single_client() server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) @@ -665,12 +670,11 @@ async def test_max_idle_time_reaper_default(self): pass self.assertEqual(1, len(server._pool.conns)) self.assertTrue(conn in server._pool.conns) - await client.close() async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one - client = await async_rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + client = await self.async_rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) @@ -681,12 +685,11 @@ async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): self.assertGreaterEqual(len(server._pool.conns), 1) wait_until(lambda: conn not in server._pool.conns, "remove stale socket") wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") - await client.close() async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new connections. - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1 ) server = await (await client._get_topology()).select_server( @@ -699,12 +702,11 @@ async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): self.assertEqual(1, len(server._pool.conns)) wait_until(lambda: conn not in server._pool.conns, "remove stale socket") wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") - await client.close() async def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it - client = await async_rs_or_single_client(maxIdleTimeMS=500) + client = await self.async_rs_or_single_client(maxIdleTimeMS=500) server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) @@ -719,18 +721,17 @@ async def test_max_idle_time_reaper_removes_stale(self): lambda: len(server._pool.conns) == 0, "stale socket reaped and new one NOT added to the pool", ) - await client.close() async def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=0.1): - client = await async_rs_or_single_client() + client = await self.async_rs_or_single_client() server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) self.assertEqual(0, len(server._pool.conns)) # Assert that pool started up at minPoolSize - client = await async_rs_or_single_client(minPoolSize=10) + client = await self.async_rs_or_single_client(minPoolSize=10) server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) @@ -751,7 +752,7 @@ async def test_min_pool_size(self): async def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): - client = await async_rs_or_single_client(maxIdleTimeMS=500) + client = await self.async_rs_or_single_client(maxIdleTimeMS=500) server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) @@ -767,7 +768,7 @@ async def test_max_idle_time_checkout(self): self.assertTrue(new_con in server._pool.conns) # Test that connections are reused if maxIdleTimeMS is not set. - client = await async_rs_or_single_client() + client = await self.async_rs_or_single_client() server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) @@ -793,36 +794,38 @@ async def test_constants(self): AsyncMongoClient.HOST = "somedomainthatdoesntexist.org" AsyncMongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): - await connected(AsyncMongoClient(serverSelectionTimeoutMS=10, **kwargs)) + c = self.simple_client(serverSelectionTimeoutMS=10, **kwargs) + await connected(c) + c = self.simple_client(host, port, **kwargs) # Override the defaults. No error. - await connected(AsyncMongoClient(host, port, **kwargs)) + await connected(c) # Set good defaults. AsyncMongoClient.HOST = host AsyncMongoClient.PORT = port # No error. - await connected(AsyncMongoClient(**kwargs)) + c = self.simple_client(**kwargs) + await connected(c) async def test_init_disconnected(self): host, port = await async_client_context.host, await async_client_context.port - c = await async_rs_or_single_client(connect=False) + c = await self.async_rs_or_single_client(connect=False) # is_primary causes client to block until connected self.assertIsInstance(await c.is_primary, bool) - - c = await async_rs_or_single_client(connect=False) + c = await self.async_rs_or_single_client(connect=False) self.assertIsInstance(await c.is_mongos, bool) - c = await async_rs_or_single_client(connect=False) + c = await self.async_rs_or_single_client(connect=False) self.assertIsInstance(c.options.pool_options.max_pool_size, int) self.assertIsInstance(c.nodes, frozenset) - c = await async_rs_or_single_client(connect=False) + c = await self.async_rs_or_single_client(connect=False) self.assertEqual(c.codec_options, CodecOptions()) - c = await async_rs_or_single_client(connect=False) + c = await self.async_rs_or_single_client(connect=False) self.assertFalse(await c.primary) self.assertFalse(await c.secondaries) - c = await async_rs_or_single_client(connect=False) + c = await self.async_rs_or_single_client(connect=False) self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) self.assertIsNone(await c.address) # PYTHON-2981 @@ -834,45 +837,44 @@ async def test_init_disconnected(self): self.assertEqual(await c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" - c = AsyncMongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + c = self.simple_client(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) with self.assertRaises(ConnectionFailure): await c.pymongo_test.test.find_one() async def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = AsyncMongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + c = self.simple_client(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) with self.assertRaises(ConnectionFailure): await c.pymongo_test.test.find_one() async def test_equality(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) - c = await async_rs_or_single_client(seed, connect=False) - self.addAsyncCleanup(c.close) + c = await self.async_rs_or_single_client(seed, connect=False) self.assertEqual(async_client_context.client, c) # Explicitly test inequality self.assertFalse(async_client_context.client != c) - c = await async_rs_or_single_client("invalid.com", connect=False) - self.addAsyncCleanup(c.close) + c = await self.async_rs_or_single_client("invalid.com", connect=False) self.assertNotEqual(async_client_context.client, c) self.assertTrue(async_client_context.client != c) + + c1 = self.simple_client("a", connect=False) + c2 = self.simple_client("b", connect=False) + # Seeds differ: - self.assertNotEqual( - AsyncMongoClient("a", connect=False), AsyncMongoClient("b", connect=False) - ) + self.assertNotEqual(c1, c2) + + c1 = self.simple_client(["a", "b", "c"], connect=False) + c2 = self.simple_client(["c", "a", "b"], connect=False) + # Same seeds but out of order still compares equal: - self.assertEqual( - AsyncMongoClient(["a", "b", "c"], connect=False), - AsyncMongoClient(["c", "a", "b"], connect=False), - ) + self.assertEqual(c1, c2) async def test_hashable(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) - c = await async_rs_or_single_client(seed, connect=False) - self.addAsyncCleanup(c.close) + c = await self.async_rs_or_single_client(seed, connect=False) self.assertIn(c, {async_client_context.client}) - c = await async_rs_or_single_client("invalid.com", connect=False) - self.addAsyncCleanup(c.close) + c = await self.async_rs_or_single_client("invalid.com", connect=False) self.assertNotIn(c, {async_client_context.client}) async def test_host_w_port(self): @@ -886,7 +888,7 @@ async def test_host_w_port(self): ) ) - def test_repr(self): + async def test_repr(self): # Used to test 'eval' below. import bson @@ -905,9 +907,10 @@ def test_repr(self): self.assertIn("w=1", the_repr) self.assertIn("wtimeoutms=100", the_repr) - self.assertEqual(eval(the_repr), client) + async with eval(the_repr) as client_two: + self.assertEqual(client_two, client) - client = AsyncMongoClient( + client = self.simple_client( "localhost:27017,localhost:27018", replicaSet="replset", connectTimeoutMS=12345, @@ -925,7 +928,8 @@ def test_repr(self): self.assertIn("w=1", the_repr) self.assertIn("wtimeoutms=100", the_repr) - self.assertEqual(eval(the_repr), client) + async with eval(the_repr) as client_two: + self.assertEqual(client_two, client) def test_getters(self): wait_until(lambda: async_client_context.nodes == self.client.nodes, "find all nodes") @@ -941,8 +945,7 @@ async def test_list_databases(self): for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): self.assertIs(type(helper_doc), dict) self.assertEqual(helper_doc.keys(), cmd_doc.keys()) - client = await async_rs_or_single_client(document_class=SON) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(document_class=SON) async for doc in await client.list_databases(): self.assertIs(type(doc), dict) @@ -981,7 +984,7 @@ async def test_drop_database(self): await self.client.drop_database("pymongo_test") if async_client_context.is_rs: - wc_client = await async_rs_or_single_client(w=len(async_client_context.nodes) + 1) + wc_client = await self.async_rs_or_single_client(w=len(async_client_context.nodes) + 1) with self.assertRaises(WriteConcernError): await wc_client.drop_database("pymongo_test2") @@ -991,7 +994,7 @@ async def test_drop_database(self): self.assertNotIn("pymongo_test2", dbs) async def test_close(self): - test_client = await async_rs_or_single_client() + test_client = await self.async_rs_or_single_client() coll = test_client.pymongo_test.bar await test_client.close() with self.assertRaises(InvalidOperation): @@ -1001,7 +1004,7 @@ async def test_close_kills_cursors(self): if sys.platform.startswith("java"): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") - test_client = await async_rs_or_single_client() + test_client = await self.async_rs_or_single_client() # Kill any cursors possibly queued up by previous tests. gc.collect() await test_client._process_periodic_tasks() @@ -1028,13 +1031,13 @@ async def test_close_kills_cursors(self): self.assertTrue(test_client._topology._opened) await test_client.close() self.assertFalse(test_client._topology._opened) - test_client = await async_rs_or_single_client() + test_client = await self.async_rs_or_single_client() # The killCursors task should not need to re-open the topology. await test_client._process_periodic_tasks() self.assertTrue(test_client._topology._opened) async def test_close_stops_kill_cursors_thread(self): - client = await async_rs_client() + client = await self.async_rs_client() await client.test.test.find_one() self.assertFalse(client._kill_cursors_executor._stopped) @@ -1050,7 +1053,7 @@ async def test_close_stops_kill_cursors_thread(self): async def test_uri_connect_option(self): # Ensure that topology is not opened if connect=False. - client = await async_rs_client(connect=False) + client = await self.async_rs_client(connect=False) self.assertFalse(client._topology._opened) # Ensure kill cursors thread has not been started. @@ -1063,19 +1066,15 @@ async def test_uri_connect_option(self): kc_thread = client._kill_cursors_executor._thread self.assertTrue(kc_thread and kc_thread.is_alive()) - # Tear down. - await client.close() - async def test_close_does_not_open_servers(self): - client = await async_rs_client(connect=False) + client = await self.async_rs_client(connect=False) topology = client._topology self.assertEqual(topology._servers, {}) await client.close() self.assertEqual(topology._servers, {}) async def test_close_closes_sockets(self): - client = await async_rs_client() - self.addAsyncCleanup(client.close) + client = await self.async_rs_client() await client.test.test.find_one() topology = client._topology await client.close() @@ -1104,35 +1103,35 @@ async def test_auth_from_uri(self): with self.assertRaises(OperationFailure): await connected( - await async_rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port)) + await self.async_rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port)) ) # No error. await connected( - await async_rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + await self.async_rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) ) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) with self.assertRaises(OperationFailure): - await connected(await async_rs_or_single_client_noauth(uri)) + await connected(await self.async_rs_or_single_client_noauth(uri)) # No error. await connected( - await async_rs_or_single_client_noauth( + await self.async_rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) ) ) # Auth with lazy connection. await ( - await async_rs_or_single_client_noauth( + await self.async_rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False ) ).pymongo_test.test.find_one() # Wrong password. - bad_client = await async_rs_or_single_client_noauth( + bad_client = await self.async_rs_or_single_client_noauth( "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False ) @@ -1144,7 +1143,7 @@ async def test_username_and_password(self): await async_client_context.create_user("admin", "ad min", "pa/ss") self.addAsyncCleanup(async_client_context.drop_user, "admin", "ad min") - c = await async_rs_or_single_client_noauth(username="ad min", password="pa/ss") + c = await self.async_rs_or_single_client_noauth(username="ad min", password="pa/ss") # Username and password aren't in strings that will likely be logged. self.assertNotIn("ad min", repr(c)) @@ -1157,14 +1156,14 @@ async def test_username_and_password(self): with self.assertRaises(OperationFailure): await ( - await async_rs_or_single_client_noauth(username="ad min", password="foo") + await self.async_rs_or_single_client_noauth(username="ad min", password="foo") ).server_info() @async_client_context.require_auth @async_client_context.require_no_fips async def test_lazy_auth_raises_operation_failure(self): host = await async_client_context.host - lazy_client = await async_rs_or_single_client_noauth( + lazy_client = await self.async_rs_or_single_client_noauth( f"mongodb://user:wrong@{host}/pymongo_test", connect=False ) @@ -1182,8 +1181,7 @@ async def test_unix_socket(self): uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. - client = await async_rs_or_single_client(uri) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(uri) await client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = await client.list_database_names() self.assertTrue("pymongo_test" in dbs) @@ -1192,11 +1190,10 @@ async def test_unix_socket(self): # Confirm it fails with a missing socket. with self.assertRaises(ConnectionFailure): - await connected( - AsyncMongoClient( - "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 - ), + c = self.simple_client( + "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 ) + await connected(c) async def test_document_class(self): c = self.client @@ -1207,15 +1204,15 @@ async def test_document_class(self): self.assertTrue(isinstance(await db.test.find_one(), dict)) self.assertFalse(isinstance(await db.test.find_one(), SON)) - c = await async_rs_or_single_client(document_class=SON) - self.addAsyncCleanup(c.close) + c = await self.async_rs_or_single_client(document_class=SON) + db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) self.assertTrue(isinstance(await db.test.find_one(), SON)) async def test_timeouts(self): - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( connectTimeoutMS=10500, socketTimeoutMS=10500, maxIdleTimeMS=10500, @@ -1228,28 +1225,31 @@ async def test_timeouts(self): self.assertEqual(10.5, client.options.server_selection_timeout) async def test_socket_timeout_ms_validation(self): - c = await async_rs_or_single_client(socketTimeoutMS=10 * 1000) + c = await self.async_rs_or_single_client(socketTimeoutMS=10 * 1000) self.assertEqual(10, (await async_get_pool(c)).opts.socket_timeout) - c = await connected(await async_rs_or_single_client(socketTimeoutMS=None)) + c = await connected(await self.async_rs_or_single_client(socketTimeoutMS=None)) self.assertEqual(None, (await async_get_pool(c)).opts.socket_timeout) - c = await connected(await async_rs_or_single_client(socketTimeoutMS=0)) + c = await connected(await self.async_rs_or_single_client(socketTimeoutMS=0)) self.assertEqual(None, (await async_get_pool(c)).opts.socket_timeout) with self.assertRaises(ValueError): - await async_rs_or_single_client(socketTimeoutMS=-1) + async with await self.async_rs_or_single_client(socketTimeoutMS=-1): + pass with self.assertRaises(ValueError): - await async_rs_or_single_client(socketTimeoutMS=1e10) + async with await self.async_rs_or_single_client(socketTimeoutMS=1e10): + pass with self.assertRaises(ValueError): - await async_rs_or_single_client(socketTimeoutMS="foo") + async with await self.async_rs_or_single_client(socketTimeoutMS="foo"): + pass async def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 - timeout = await async_rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + timeout = await self.async_rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) self.addAsyncCleanup(timeout.close) await no_timeout.pymongo_test.drop_collection("test") @@ -1266,7 +1266,7 @@ async def get_x(db): with self.assertRaises(NetworkTimeout): await get_x(timeout.pymongo_test) - def test_server_selection_timeout(self): + async def test_server_selection_timeout(self): client = AsyncMongoClient(serverSelectionTimeoutMS=100, connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) @@ -1298,7 +1298,7 @@ def test_server_selection_timeout(self): self.assertAlmostEqual(30, client.options.server_selection_timeout) async def test_waitQueueTimeoutMS(self): - client = await async_rs_or_single_client(waitQueueTimeoutMS=2000) + client = await self.async_rs_or_single_client(waitQueueTimeoutMS=2000) self.assertEqual((await async_get_pool(client)).opts.wait_queue_timeout, 2) async def test_socketKeepAlive(self): @@ -1311,7 +1311,7 @@ async def test_socketKeepAlive(self): async def test_tz_aware(self): self.assertRaises(ValueError, AsyncMongoClient, tz_aware="foo") - aware = await async_rs_or_single_client(tz_aware=True) + aware = await self.async_rs_or_single_client(tz_aware=True) self.addAsyncCleanup(aware.close) naive = self.client await aware.pymongo_test.drop_collection("test") @@ -1341,8 +1341,7 @@ async def test_ipv6(self): if async_client_context.is_rs: uri += "/?replicaSet=" + (async_client_context.replica_set_name or "") - client = await async_rs_or_single_client_noauth(uri) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client_noauth(uri) await client.pymongo_test.test.insert_one({"dummy": "object"}) await client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) @@ -1351,7 +1350,7 @@ async def test_ipv6(self): self.assertTrue("pymongo_test_bernie" in dbs) async def test_contextlib(self): - client = await async_rs_or_single_client() + client = await self.async_rs_or_single_client() await client.pymongo_test.drop_collection("test") await client.pymongo_test.test.insert_one({"foo": "bar"}) @@ -1365,7 +1364,7 @@ async def test_contextlib(self): self.assertEqual("bar", (await client.pymongo_test.test.find_one())["foo"]) with self.assertRaises(InvalidOperation): await client.pymongo_test.test.find_one() - client = await async_rs_or_single_client() + client = await self.async_rs_or_single_client() async with client as client: self.assertEqual("bar", (await client.pymongo_test.test.find_one())["foo"]) with self.assertRaises(InvalidOperation): @@ -1443,8 +1442,7 @@ async def test_operation_failure(self): # response to getLastError. PYTHON-395. We need a new client here # to avoid race conditions caused by replica set failover or idle # socket reaping. - client = await async_single_client() - self.addAsyncCleanup(client.close) + client = await self.async_single_client() await client.pymongo_test.test.find_one() pool = await async_get_pool(client) socket_count = len(pool.conns) @@ -1468,8 +1466,7 @@ async def test_lazy_connect_w0(self): await async_client_context.client.drop_database("test_lazy_connect_w0") self.addAsyncCleanup(async_client_context.client.drop_database, "test_lazy_connect_w0") - client = await async_rs_or_single_client(connect=False, w=0) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(connect=False, w=0) await client.test_lazy_connect_w0.test.insert_one({}) async def predicate(): @@ -1477,8 +1474,7 @@ async def predicate(): await async_wait_until(predicate, "find one document") - client = await async_rs_or_single_client(connect=False, w=0) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(connect=False, w=0) await client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) async def predicate(): @@ -1486,8 +1482,7 @@ async def predicate(): await async_wait_until(predicate, "update one document") - client = await async_rs_or_single_client(connect=False, w=0) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(connect=False, w=0) await client.test_lazy_connect_w0.test.delete_one({}) async def predicate(): @@ -1499,8 +1494,7 @@ async def predicate(): async def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = await async_rs_or_single_client(maxPoolSize=1, retryReads=False) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(maxPoolSize=1, retryReads=False) collection = client.pymongo_test.test pool = await async_get_pool(client) pool._check_interval_seconds = None # Never check. @@ -1527,7 +1521,9 @@ async def test_auth_network_error(self): # Get a client with one socket so we detect if it's leaked. c = await connected( - await async_rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False) + await self.async_rs_or_single_client( + maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False + ) ) # Cause a network error on the actual socket. @@ -1545,8 +1541,7 @@ async def test_auth_network_error(self): @async_client_context.require_no_replica_set async def test_connect_to_standalone_using_replica_set_name(self): - client = await async_single_client(replicaSet="anything", serverSelectionTimeoutMS=100) - + client = await self.async_single_client(replicaSet="anything", serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): await client.test.test.find_one() @@ -1556,7 +1551,7 @@ async def test_stale_getmore(self): # the topology before the getMore message is sent. Test that # AsyncMongoClient._run_operation_with_response handles the error. with self.assertRaises(AutoReconnect): - client = await async_rs_client(connect=False, serverSelectionTimeoutMS=100) + client = await self.async_rs_client(connect=False, serverSelectionTimeoutMS=100) await client._run_operation( operation=message._GetMore( "pymongo_test", @@ -1604,7 +1599,7 @@ def init(self, *args): await async_client_context.host, await async_client_context.port, ) - client = await async_single_client(uri, event_listeners=[listener]) + await self.async_single_client(uri, event_listeners=[listener]) wait_until( lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" ) @@ -1613,7 +1608,6 @@ def init(self, *args): # closer to 0.5 sec with heartbeatFrequencyMS configured. self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) - await client.close() finally: ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore @@ -1630,31 +1624,31 @@ def compression_settings(client): return pool_options._compression_settings uri = "mongodb://localhost:27017/?compressors=zlib" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, 4) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar,zlib" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) @@ -1662,56 +1656,55 @@ def compression_settings(client): # According to the connection string spec, unsupported values # just raise a warning and are ignored. uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) if not _have_snappy(): uri = "mongodb://localhost:27017/?compressors=snappy" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: uri = "mongodb://localhost:27017/?compressors=snappy" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy"]) uri = "mongodb://localhost:27017/?compressors=snappy,zlib" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy", "zlib"]) if not _have_zstd(): uri = "mongodb://localhost:27017/?compressors=zstd" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: uri = "mongodb://localhost:27017/?compressors=zstd" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zstd"]) uri = "mongodb://localhost:27017/?compressors=zstd,zlib" - client = AsyncMongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zstd", "zlib"]) options = async_client_context.default_client_options if "compressors" in options and "zlib" in options["compressors"]: for level in range(-1, 10): - client = await async_single_client(zlibcompressionlevel=level) + client = await self.async_single_client(zlibcompressionlevel=level) # No error await client.pymongo_test.test.find_one() async def test_reset_during_update_pool(self): - client = await async_rs_or_single_client(minPoolSize=10) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(minPoolSize=10) await client.admin.command("ping") pool = await async_get_pool(client) generation = pool.gen.get_overall() @@ -1757,11 +1750,9 @@ def run(self): async def test_background_connections_do_not_hold_locks(self): min_pool_size = 10 - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False ) - self.addAsyncCleanup(client.close) - # Create a single connection in the pool. await client.admin.command("ping") @@ -1791,21 +1782,19 @@ def stall_connect(*args, **kwargs): @async_client_context.require_replica_set async def test_direct_connection(self): # direct_connection=True should result in Single topology. - client = await async_rs_or_single_client(directConnection=True) + client = await self.async_rs_or_single_client(directConnection=True) await client.admin.command("ping") self.assertEqual(len(client.nodes), 1) self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) - await client.close() # direct_connection=False should result in RS topology. - client = await async_rs_or_single_client(directConnection=False) + client = await self.async_rs_or_single_client(directConnection=False) await client.admin.command("ping") self.assertGreaterEqual(len(client.nodes), 1) self.assertIn( client._topology_settings.get_topology_type(), [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], ) - await client.close() # directConnection=True, should error with multiple hosts as a list. with self.assertRaises(ConfigurationError): @@ -1825,11 +1814,10 @@ def server_description_count(): gc.collect() with client_knobs(min_heartbeat_interval=0.003): - client = AsyncMongoClient( + client = self.simple_client( "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 ) initial_count = server_description_count() - self.addAsyncCleanup(client.close) with self.assertRaises(ServerSelectionTimeoutError): await client.test.test.find_one() gc.collect() @@ -1842,8 +1830,7 @@ def server_description_count(): @async_client_context.require_failCommand_fail_point async def test_network_error_message(self): - client = await async_single_client(retryReads=False) - self.addAsyncCleanup(client.close) + client = await self.async_single_client(retryReads=False) await client.admin.command("ping") # connect async with self.fail_point( {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} @@ -1855,7 +1842,7 @@ async def test_network_error_message(self): @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") async def test_process_periodic_tasks(self): - client = await async_rs_or_single_client() + client = await self.async_rs_or_single_client() coll = client.db.collection await coll.insert_many([{} for _ in range(5)]) cursor = coll.find(batch_size=2) @@ -1873,7 +1860,7 @@ async def test_process_periodic_tasks(self): with self.assertRaises(InvalidOperation): await coll.insert_many([{} for _ in range(5)]) - def test_service_name_from_kwargs(self): + async def test_service_name_from_kwargs(self): client = AsyncMongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc", srvServiceName="customname", @@ -1893,12 +1880,12 @@ def test_service_name_from_kwargs(self): ) self.assertEqual(client._topology_settings.srv_service_name, "customname") - def test_srv_max_hosts_kwarg(self): - client = AsyncMongoClient("mongodb+srv://test1.test.build.10gen.cc/") + async def test_srv_max_hosts_kwarg(self): + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") self.assertGreater(len(client.topology_description.server_descriptions()), 1) - client = AsyncMongoClient("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) self.assertEqual(len(client.topology_description.server_descriptions()), 1) - client = AsyncMongoClient( + client = self.simple_client( "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) @@ -1946,10 +1933,10 @@ async def _test_handshake(self, env_vars, expected_env): if "AWS_REGION" not in env_vars: os.environ["AWS_REGION"] = "" - async with await async_rs_or_single_client(serverSelectionTimeoutMS=10000) as client: - await client.admin.command("ping") - options = client.options - self.assertEqual(options.pool_options.metadata, metadata) + client = await self.async_rs_or_single_client(serverSelectionTimeoutMS=10000) + await client.admin.command("ping") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) async def test_handshake_01_aws(self): await self._test_handshake( @@ -2045,7 +2032,7 @@ def setUp(self): async def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = await connected(await async_rs_or_single_client(maxPoolSize=1)) + client = await connected(await self.async_rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = await async_get_pool(client) @@ -2068,7 +2055,7 @@ async def test_exhaust_query_server_error(self): async def test_exhaust_getmore_server_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. - client = await async_rs_or_single_client(maxPoolSize=1) + client = await self.async_rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test await collection.drop() @@ -2107,7 +2094,9 @@ async def receive_message(request_id): async def test_exhaust_query_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = await connected(await async_rs_or_single_client(maxPoolSize=1, retryReads=False)) + client = await connected( + await self.async_rs_or_single_client(maxPoolSize=1, retryReads=False) + ) collection = client.pymongo_test.test pool = await async_get_pool(client) pool._check_interval_seconds = None # Never check. @@ -2128,7 +2117,7 @@ async def test_exhaust_query_network_error(self): async def test_exhaust_getmore_network_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. - client = await async_rs_or_single_client(maxPoolSize=1) + client = await self.async_rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test await collection.drop() await collection.insert_many([{} for _ in range(200)]) # More than one batch. @@ -2177,7 +2166,7 @@ def test_gevent_timeout(self): raise SkipTest("Must be running monkey patched by gevent") from gevent import Timeout, spawn - client = rs_or_single_client(maxPoolSize=1) + client = self.async_rs_or_single_client(maxPoolSize=1) coll = client.pymongo_test.test coll.insert_one({}) @@ -2209,7 +2198,7 @@ def test_gevent_timeout_when_creating_connection(self): raise SkipTest("Must be running monkey patched by gevent") from gevent import Timeout, spawn - client = rs_or_single_client() + client = self.async_rs_or_single_client() self.addCleanup(client.close) coll = client.pymongo_test.test pool = async_get_pool(client) @@ -2246,7 +2235,7 @@ class TestClientLazyConnect(AsyncIntegrationTest): """Test concurrent operations on a lazily-connecting MongoClient.""" def _get_client(self): - return rs_or_single_client(connect=False) + return self.async_rs_or_single_client(connect=False) @async_client_context.require_sync def test_insert_one(self): @@ -2380,6 +2369,7 @@ async def _test_network_error(self, operation_callback): retryReads=False, serverSelectionTimeoutMS=1000, ) + self.addAsyncCleanup(c.close) # Set host-specific information so we can test whether it is reset. diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index c35e823d03..3a17299453 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -27,7 +27,6 @@ ) from test.utils import ( OvertCommandListener, - async_rs_or_single_client, ) from unittest.mock import patch @@ -39,7 +38,6 @@ InvalidOperation, NetworkTimeout, ) -from pymongo.monitoring import * from pymongo.operations import * from pymongo.write_concern import WriteConcern @@ -97,8 +95,7 @@ async def asyncSetUp(self): @async_client_context.require_no_serverless async def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) models = [] for _ in range(self.max_write_batch_size + 1): @@ -123,8 +120,7 @@ async def test_batch_splits_if_num_operations_too_large(self): @async_client_context.require_no_serverless async def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) models = [] num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) @@ -157,11 +153,10 @@ async def test_batch_splits_if_ops_payload_too_large(self): @async_client_context.require_failCommand_fail_point async def test_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( event_listeners=[listener], retryWrites=False, ) - self.addAsyncCleanup(client.close) fail_command = { "configureFailPoint": "failCommand", @@ -200,8 +195,7 @@ async def test_collects_write_concern_errors_across_batches(self): @async_client_context.require_no_serverless async def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -231,8 +225,7 @@ async def test_collects_write_errors_across_batches_unordered(self): @async_client_context.require_no_serverless async def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -262,8 +255,7 @@ async def test_collects_write_errors_across_batches_ordered(self): @async_client_context.require_no_serverless async def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -304,8 +296,7 @@ async def test_handles_cursor_requiring_getMore(self): @async_client_context.require_no_standalone async def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -348,8 +339,7 @@ async def test_handles_cursor_requiring_getMore_within_transaction(self): @async_client_context.require_failCommand_fail_point async def test_handles_getMore_error(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addAsyncCleanup(collection.drop) @@ -403,8 +393,7 @@ async def test_handles_getMore_error(self): @async_client_context.require_no_serverless async def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) b_repeated = "b" * self.max_bson_object_size @@ -460,8 +449,7 @@ async def _setup_namespace_test_models(self): @async_client_context.require_no_serverless async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) num_models, models = await self._setup_namespace_test_models() models.append( @@ -492,8 +480,7 @@ async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): @async_client_context.require_no_serverless async def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) num_models, models = await self._setup_namespace_test_models() c_repeated = "c" * 200 @@ -530,8 +517,7 @@ async def test_batch_splits_if_new_namespace_is_too_large(self): @async_client_context.require_version_min(8, 0, 0, -24) @async_client_context.require_no_serverless async def test_returns_error_if_no_writes_can_be_added_to_ops(self): - client = await async_rs_or_single_client() - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client() # Document too large. b_repeated = "b" * self.max_message_size_bytes @@ -554,8 +540,7 @@ async def test_returns_error_if_auto_encryption_configured(self): key_vault_namespace="db.coll", kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, ) - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -580,7 +565,7 @@ async def asyncSetUp(self): async def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 - internal_client = await async_rs_or_single_client(timeoutMS=None) + internal_client = await self.async_rs_or_single_client(timeoutMS=None) self.addAsyncCleanup(internal_client.close) collection = internal_client.db["coll"] @@ -605,14 +590,13 @@ async def test_timeout_in_multi_batch_bulk_write(self): ) listener = OvertCommandListener() - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( event_listeners=[listener], readConcernLevel="majority", readPreference="primary", timeoutMS=2000, w="majority", ) - self.addAsyncCleanup(client.close) await client.admin.command("ping") # Init the client first. with self.assertRaises(ClientBulkWriteException) as context: await client.bulk_write(models=models) diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 10d64a525c..74a4a5151d 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -30,6 +30,7 @@ from test import unittest from test.asynchronous import ( # TODO: fix sync imports in PYTHON-4528 AsyncIntegrationTest, + AsyncUnitTest, async_client_context, ) from test.utils import ( @@ -37,8 +38,6 @@ EventListener, async_get_pool, async_is_mongos, - async_rs_or_single_client, - async_single_client, async_wait_until, wait_until, ) @@ -82,14 +81,20 @@ _IS_SYNC = False -class TestCollectionNoConnect(unittest.TestCase): +class TestCollectionNoConnect(AsyncUnitTest): """Test Collection features on a client that does not connect.""" db: AsyncDatabase + client: AsyncMongoClient @classmethod - def setUpClass(cls): - cls.db = AsyncMongoClient(connect=False).pymongo_test + async def _setup_class(cls): + cls.client = AsyncMongoClient(connect=False) + cls.db = cls.client.pymongo_test + + @classmethod + async def _tearDown_class(cls): + await cls.client.close() def test_collection(self): self.assertRaises(TypeError, AsyncCollection, self.db, 5) @@ -1819,8 +1824,7 @@ async def test_exhaust(self): # Insert enough documents to require more than one batch await self.db.test.insert_many([{"i": i} for i in range(150)]) - client = await async_rs_or_single_client(maxPoolSize=1) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(maxPoolSize=1) pool = await async_get_pool(client) # Make sure the socket is returned after exhaustion. @@ -2100,7 +2104,7 @@ async def test_find_one_and(self): async def test_find_one_and_write_concern(self): listener = EventListener() - db = (await async_single_client(event_listeners=[listener]))[self.db.name] + db = (await self.async_single_client(event_listeners=[listener]))[self.db.name] # non-default WriteConcern. c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 6967205fe3..d6773d832e 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -34,7 +34,6 @@ AllowListEventListener, EventListener, OvertCommandListener, - async_rs_or_single_client, ignore_deprecations, wait_until, ) @@ -232,7 +231,7 @@ async def test_max_await_time_ms(self): self.assertEqual(90, cursor._max_await_time_ms) listener = AllowListEventListener("find", "getMore") - coll = (await async_rs_or_single_client(event_listeners=[listener]))[ + coll = (await self.async_rs_or_single_client(event_listeners=[listener]))[ self.db.name ].pymongo_test @@ -353,8 +352,7 @@ async def test_explain(self): async def test_explain_with_read_concern(self): # Do not add readConcern level to explain. listener = AllowListEventListener("explain") - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(await coll.find().explain()) started = listener.started_events @@ -1261,8 +1259,7 @@ async def test_close_kills_cursor_synchronously(self): await self.client._process_periodic_tasks() listener = AllowListEventListener("killCursors") - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) coll = client[self.db.name].test_close_kills_cursors # Add some test data. @@ -1300,8 +1297,7 @@ def assertCursorKilled(): @async_client_context.require_failCommand_appName async def test_timeout_kills_cursor_asynchronously(self): listener = AllowListEventListener("killCursors") - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) coll = client[self.db.name].test_timeout_kills_cursor # Add some test data. @@ -1358,8 +1354,7 @@ def test_delete_not_initialized(self): async def test_getMore_does_not_send_readPreference(self): listener = AllowListEventListener("find", "getMore") - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) # We never send primary read preference so override the default. coll = client[self.db.name].get_collection( "test", read_preference=ReadPreference.PRIMARY_PREFERRED @@ -1463,7 +1458,7 @@ async def test_find_raw_transaction(self): await c.insert_many(docs) listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) async with client.start_session() as session: async with await session.start_transaction(): batches = await ( @@ -1493,7 +1488,7 @@ async def test_find_raw_retryable_reads(self): await c.insert_many(docs) listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener], retryReads=True) + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) async with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} ): @@ -1514,7 +1509,7 @@ async def test_find_raw_snapshot_reads(self): await c.insert_many(docs) listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener], retryReads=True) + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] async with client.start_session(snapshot=True) as session: await db.test.distinct("x", {}, session=session) @@ -1577,7 +1572,7 @@ async def test_read_concern(self): async def test_monitoring(self): listener = EventListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test await c.drop() await c.insert_many([{"_id": i} for i in range(10)]) @@ -1643,7 +1638,7 @@ async def test_aggregate_raw_transaction(self): await c.insert_many(docs) listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) async with client.start_session() as session: async with await session.start_transaction(): batches = await ( @@ -1674,7 +1669,7 @@ async def test_aggregate_raw_retryable_reads(self): await c.insert_many(docs) listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener], retryReads=True) + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) async with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} ): @@ -1698,7 +1693,7 @@ async def test_aggregate_raw_snapshot_reads(self): await c.insert_many(docs) listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener], retryReads=True) + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] async with client.start_session(snapshot=True) as session: await db.test.distinct("x", {}, session=session) @@ -1744,7 +1739,7 @@ async def test_collation(self): async def test_monitoring(self): listener = EventListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test await c.drop() await c.insert_many([{"_id": i} for i in range(10)]) @@ -1788,8 +1783,7 @@ async def test_monitoring(self): @async_client_context.require_no_mongos async def test_exhaust_cursor_db_set(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test await c.delete_many({}) await c.insert_many([{"_id": i} for i in range(3)]) diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 8f6886a2a7..c5d62323df 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -29,7 +29,6 @@ from test.utils import ( IMPOSSIBLE_WRITE_CONCERN, OvertCommandListener, - async_rs_or_single_client, async_wait_until, ) @@ -208,7 +207,7 @@ async def test_list_collection_names(self): async def test_list_collection_names_filter(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] await db.capped.drop() await db.create_collection("capped", capped=True, size=4096) @@ -235,8 +234,7 @@ async def test_list_collection_names_filter(self): async def test_check_exists(self): listener = OvertCommandListener() - client = await async_rs_or_single_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] await db.drop_collection("unique") await db.create_collection("unique", check_exists=True) @@ -326,7 +324,7 @@ async def test_list_collections(self): await self.client.drop_database("pymongo_test") async def test_list_collection_names_single_socket(self): - client = await async_rs_or_single_client(maxPoolSize=1) + client = await self.async_rs_or_single_client(maxPoolSize=1) await client.drop_database("test_collection_names_single_socket") db = client.test_collection_names_single_socket for i in range(200): diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 030f468db2..3f3714eeb4 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -31,7 +31,7 @@ from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context from test.asynchronous.test_bulk import AsyncBulkTestBase from threading import Thread -from typing import Any, Dict, Mapping +from typing import Any, Dict, Mapping, Optional import pytest @@ -44,6 +44,8 @@ from test import ( unittest, ) +from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.utils_spec_runner import AsyncSpecRunner from test.helpers import ( AWS_CREDS, AZURE_CREDS, @@ -59,12 +61,10 @@ OvertCommandListener, SpecTestCreator, TopologyEventListener, - async_rs_or_single_client, async_wait_until, camel_to_snake_args, is_greenthread_patched, ) -from test.utils_spec_runner import SpecRunner from bson import DatetimeMS, Decimal128, encode, json_util from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation @@ -109,13 +109,12 @@ class TestAutoEncryptionOpts(AsyncPyMongoTestCase): @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") async def test_crypt_shared(self): # Test that we can pick up crypt_shared lib automatically - client = AsyncMongoClient( + self.simple_client( auto_encryption_opts=AutoEncryptionOpts( KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True ), connect=False, ) - self.addAsyncCleanup(client.aclose) @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): @@ -196,19 +195,16 @@ def test_init_kms_tls_options(self): class TestClientOptions(AsyncPyMongoTestCase): async def test_default(self): - client = AsyncMongoClient(connect=False) - self.addAsyncCleanup(client.aclose) + client = self.simple_client(connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) - client = AsyncMongoClient(auto_encryption_opts=None, connect=False) - self.addAsyncCleanup(client.aclose) + client = self.simple_client(auto_encryption_opts=None, connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") async def test_kwargs(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = AsyncMongoClient(auto_encryption_opts=opts, connect=False) - self.addAsyncCleanup(client.aclose) + client = self.simple_client(auto_encryption_opts=opts, connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) @@ -229,6 +225,34 @@ def assertBinaryUUID(self, val): self.assertIsInstance(val, Binary) self.assertEqual(val.subtype, UUID_SUBTYPE) + def create_client_encryption( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = AsyncClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + self.addAsyncCleanup(client_encryption.close) + return client_encryption + + @classmethod + def unmanaged_create_client_encryption( + cls, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = AsyncClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + return client_encryption + # Location of JSON test files. if _IS_SYNC: @@ -260,8 +284,7 @@ def bson_data(*paths): class TestClientSimple(AsyncEncryptionIntegrationTest): async def _test_auto_encrypt(self, opts): - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) # Create the encrypted field's data key. key_vault = await create_key_vault( @@ -342,8 +365,7 @@ async def test_auto_encrypt_local_schema_map(self): async def test_use_after_close(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) await client.admin.command("ping") await client.aclose() @@ -360,8 +382,7 @@ async def test_use_after_close(self): ) async def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) async def target(): with warnings.catch_warnings(): @@ -375,8 +396,7 @@ async def target(): class TestEncryptedBulkWrite(AsyncBulkTestBase, AsyncEncryptionIntegrationTest): async def test_upsert_uuid_standard_encrypt(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) encrypted_coll = client.pymongo_test.test @@ -416,8 +436,7 @@ async def _setup_class(cls): @async_client_context.require_version_max(4, 0, 99) async def test_raise_max_wire_version_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) msg = "Auto-encryption requires a minimum MongoDB version of 4.2" with self.assertRaisesRegex(ConfigurationError, msg): await client.test.test.insert_one({}) @@ -430,8 +449,7 @@ async def test_raise_max_wire_version_error(self): async def test_raise_unsupported_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client.aclose) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) msg = "find_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): await client.test.test.find_raw_batches({}) @@ -450,10 +468,9 @@ async def test_raise_unsupported_error(self): class TestExplicitSimple(AsyncEncryptionIntegrationTest): async def test_encrypt_decrypt(self): - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS ) - self.addAsyncCleanup(client_encryption.close) # Use standard UUID representation. key_vault = async_client_context.client.keyvault.get_collection( "datakeys", codec_options=OPTS @@ -495,10 +512,9 @@ async def test_encrypt_decrypt(self): self.assertEqual(decrypted_ssn, doc["ssn"]) async def test_validation(self): - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS ) - self.addAsyncCleanup(client_encryption.close) msg = "value to decrypt must be a bson.binary.Binary with subtype 6" with self.assertRaisesRegex(TypeError, msg): @@ -512,10 +528,9 @@ async def test_validation(self): await client_encryption.encrypt("str", algo, key_id=Binary(b"123")) async def test_bson_errors(self): - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS ) - self.addAsyncCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. unencodable_value = object() @@ -528,7 +543,7 @@ async def test_bson_errors(self): async def test_codec_options(self): with self.assertRaisesRegex(TypeError, "codec_options must be"): - AsyncClientEncryption( + self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, @@ -536,10 +551,9 @@ async def test_codec_options(self): ) opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - client_encryption_legacy = AsyncClientEncryption( + client_encryption_legacy = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts ) - self.addAsyncCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. key_id = await client_encryption_legacy.create_data_key("local") @@ -554,10 +568,9 @@ async def test_codec_options(self): # Encrypt the same UUID with STANDARD codec options. opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts ) - self.addAsyncCleanup(client_encryption.close) encrypted_standard = await client_encryption.encrypt( value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id ) @@ -573,7 +586,7 @@ async def test_codec_options(self): self.assertNotEqual(await client_encryption.decrypt(encrypted_legacy), value) async def test_close(self): - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS ) await client_encryption.close() @@ -589,7 +602,7 @@ async def test_close(self): await client_encryption.decrypt(Binary(b"", 6)) async def test_with_statement(self): - async with AsyncClientEncryption( + async with self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS ) as client_encryption: pass @@ -613,7 +626,7 @@ async def test_with_statement(self): if _IS_SYNC: # TODO: Add asynchronous SpecRunner (https://jira.mongodb.org/browse/PYTHON-4700) - class TestSpec(SpecRunner): + class TestSpec(AsyncSpecRunner): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def setUpClass(cls): @@ -811,7 +824,7 @@ class TestDataKeyDoubleEncryption(AsyncEncryptionIntegrationTest): async def _setup_class(cls): await super()._setup_class() cls.listener = OvertCommandListener() - cls.client = await async_rs_or_single_client(event_listeners=[cls.listener]) + cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) await cls.client.db.coll.drop() cls.vault = await create_key_vault(cls.client.keyvault.datakeys) @@ -833,10 +846,10 @@ async def _setup_class(cls): opts = AutoEncryptionOpts( cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS ) - cls.client_encrypted = await async_rs_or_single_client( + cls.client_encrypted = await cls.unmanaged_async_rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - cls.client_encryption = AsyncClientEncryption( + cls.client_encryption = cls.unmanaged_create_client_encryption( cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS ) @@ -923,10 +936,9 @@ async def _test_external_key_vault(self, with_external_key_vault): # Configure the encrypted field via the local schema_map option. schemas = {"db.coll": json_data("external", "external-schema.json")} if with_external_key_vault: - key_vault_client = await async_rs_or_single_client( + key_vault_client = await self.async_rs_or_single_client( username="fake-user", password="fake-pwd" ) - self.addAsyncCleanup(key_vault_client.close) else: key_vault_client = async_client_context.client opts = AutoEncryptionOpts( @@ -936,15 +948,13 @@ async def _test_external_key_vault(self, with_external_key_vault): key_vault_client=key_vault_client, ) - client_encrypted = await async_rs_or_single_client( + client_encrypted = await self.async_rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - self.addAsyncCleanup(client_encrypted.close) - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS ) - self.addAsyncCleanup(client_encryption.close) if with_external_key_vault: # Authentication error. @@ -990,10 +1000,9 @@ async def test_views_are_prohibited(self): self.addAsyncCleanup(self.client.db.view.drop) opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") - client_encrypted = await async_rs_or_single_client( + client_encrypted = await self.async_rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - self.addAsyncCleanup(client_encrypted.aclose) with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): await client_encrypted.db.view.insert_one({}) @@ -1050,17 +1059,15 @@ async def _test_corpus(self, opts): ) self.addAsyncCleanup(vault.drop) - client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client_encrypted.close) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( self.kms_providers(), "keyvault.datakeys", async_client_context.client, OPTS, kms_tls_options=KMS_TLS_OPTS, ) - self.addAsyncCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) corpus_copied: SON = SON() @@ -1203,7 +1210,7 @@ async def _setup_class(cls): opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") cls.listener = OvertCommandListener() - cls.client_encrypted = await async_rs_or_single_client( + cls.client_encrypted = await cls.unmanaged_async_rs_or_single_client( auto_encryption_opts=opts, event_listeners=[cls.listener] ) cls.coll_encrypted = cls.client_encrypted.db.coll @@ -1291,7 +1298,7 @@ def setUp(self): "gcp": GCP_CREDS, "kmip": KMIP_CREDS, } - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers=kms_providers, key_vault_namespace="keyvault.datakeys", key_vault_client=async_client_context.client, @@ -1303,7 +1310,7 @@ def setUp(self): kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" - self.client_encryption_invalid = AsyncClientEncryption( + self.client_encryption_invalid = self.create_client_encryption( kms_providers=kms_providers_invalid, key_vault_namespace="keyvault.datakeys", key_vault_client=async_client_context.client, @@ -1484,7 +1491,7 @@ async def test_12_kmip_master_key_invalid_endpoint(self): await self.client_encryption.create_data_key("kmip", key) -class AzureGCPEncryptionTestMixin: +class AzureGCPEncryptionTestMixin(AsyncEncryptionIntegrationTest): DEK = None KMS_PROVIDER_MAP = None KEYVAULT_DB = "keyvault" @@ -1496,7 +1503,7 @@ async def asyncSetUp(self): await create_key_vault(keyvault, self.DEK) async def _test_explicit(self, expectation): - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), async_client_context.client, @@ -1525,7 +1532,7 @@ async def _test_automatic(self, expectation_extjson, payload): ) insert_listener = AllowListEventListener("insert") - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] ) self.addAsyncCleanup(client.aclose) @@ -1604,19 +1611,17 @@ async def test_automatic(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests class TestDeadlockProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): - self.client_test = await async_rs_or_single_client( + self.client_test = await self.async_rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" ) - self.addAsyncCleanup(self.client_test.aclose) self.client_keyvault_listener = OvertCommandListener() - self.client_keyvault = await async_rs_or_single_client( + self.client_keyvault = await self.async_rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", event_listeners=[self.client_keyvault_listener], ) - self.addAsyncCleanup(self.client_keyvault.aclose) await self.client_test.keyvault.datakeys.drop() await self.client_test.db.coll.drop() @@ -1629,7 +1634,7 @@ async def asyncSetUp(self): codec_options=OPTS, ) - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, key_vault_namespace="keyvault.datakeys", key_vault_client=self.client_test, @@ -1645,7 +1650,7 @@ async def asyncSetUp(self): self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") async def _run_test(self, max_pool_size, auto_encryption_opts): - client_encrypted = await async_rs_or_single_client( + client_encrypted = await self.async_rs_or_single_client( readConcernLevel="majority", w="majority", maxPoolSize=max_pool_size, @@ -1663,8 +1668,6 @@ async def _run_test(self, max_pool_size, auto_encryption_opts): result = await client_encrypted.db.coll.find_one({"_id": 0}) self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) - self.addAsyncCleanup(client_encrypted.close) - async def test_case_1(self): await self._run_test( max_pool_size=1, @@ -1840,7 +1843,7 @@ async def asyncSetUp(self): await create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() ) keyID = await self.client_encryption.create_data_key("local") @@ -1855,10 +1858,9 @@ async def asyncSetUp(self): key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map ) self.listener = AllowListEventListener("aggregate") - self.encrypted_client = await async_rs_or_single_client( + self.encrypted_client = await self.async_rs_or_single_client( auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] ) - self.addAsyncCleanup(self.encrypted_client.close) async def test_01_command_error(self): async with self.fail_point( @@ -1935,8 +1937,7 @@ def reset_timeout(): "--port=27027", ], ) - client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client_encrypted.close) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) with self.assertRaisesRegex(EncryptionError, "Timeout"): await client_encrypted.db.coll.insert_one({"encrypted": "test"}) @@ -1950,11 +1951,10 @@ async def test_bypassAutoEncryption(self): "--port=27027", ], ) - client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client_encrypted.aclose) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) await client_encrypted.db.coll.insert_one({"unencrypted": "test"}) # Validate that mongocryptd was not spawned: - mongocryptd_client = AsyncMongoClient( + mongocryptd_client = self.simple_client( "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" ) with self.assertRaises(ServerSelectionTimeoutError): @@ -1978,15 +1978,13 @@ async def test_via_loading_shared_library(self): ], crypt_shared_lib_required=True, ) - client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client_encrypted.aclose) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) await client_encrypted.db.coll.drop() await client_encrypted.db.coll.insert_one({"encrypted": "test"}) self.assertEncrypted((await async_client_context.client.db.coll.find_one({}))["encrypted"]) - no_mongocryptd_client = AsyncMongoClient( + no_mongocryptd_client = self.simple_client( host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" ) - self.addAsyncCleanup(no_mongocryptd_client.aclose) with self.assertRaises(ServerSelectionTimeoutError): await no_mongocryptd_client.db.command("ping") @@ -2020,8 +2018,7 @@ def listener(): mongocryptd_uri="mongodb://localhost:47021", crypt_shared_lib_required=False, ) - client_encrypted = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(client_encrypted.aclose) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) await client_encrypted.db.coll.drop() await client_encrypted.db.coll.insert_one({"encrypted": "test"}) server.shutdown() @@ -2035,10 +2032,9 @@ class TestKmsTLSProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): await super().asyncSetUp() self.patch_system_certs(CA_PEM) - self.client_encrypted = AsyncClientEncryption( + self.client_encrypted = self.create_client_encryption( {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS ) - self.addAsyncCleanup(self.client_encrypted.close) async def test_invalid_kms_certificate_expired(self): key = { @@ -2083,36 +2079,32 @@ async def asyncSetUp(self): "gcp": {"tlsCAFile": CA_PEM}, "kmip": {"tlsCAFile": CA_PEM}, } - self.client_encryption_no_client_cert = AsyncClientEncryption( + self.client_encryption_no_client_cert = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addAsyncCleanup(self.client_encryption_no_client_cert.close) # 2, same providers as above but with tlsCertificateKeyFile. kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) for p in kms_tls_opts: kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM - self.client_encryption_with_tls = AsyncClientEncryption( + self.client_encryption_with_tls = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts ) - self.addAsyncCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. providers: dict = copy.deepcopy(providers) providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" providers["gcp"]["endpoint"] = "127.0.0.1:9000" providers["kmip"]["endpoint"] = "127.0.0.1:9000" - self.client_encryption_expired = AsyncClientEncryption( + self.client_encryption_expired = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addAsyncCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. providers: dict = copy.deepcopy(providers) providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" providers["gcp"]["endpoint"] = "127.0.0.1:9001" providers["kmip"]["endpoint"] = "127.0.0.1:9001" - self.client_encryption_invalid_hostname = AsyncClientEncryption( + self.client_encryption_invalid_hostname = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addAsyncCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( @@ -2150,7 +2142,7 @@ async def asyncSetUp(self): "gcp:with_tls": with_cert, "kmip:with_tls": with_cert, } - self.client_encryption_with_names = AsyncClientEncryption( + self.client_encryption_with_names = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 ) @@ -2232,10 +2224,9 @@ async def test_04_kmip(self): async def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} - encryption = AsyncClientEncryption( + encryption = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options ) - self.addAsyncCleanup(encryption.close) ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] if not hasattr(ctx, "check_ocsp_endpoint"): raise self.skipTest("OCSP not enabled") @@ -2285,7 +2276,7 @@ async def asyncSetUp(self): self.client = async_client_context.client await create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() ) self.def_key_id = await self.client_encryption.create_data_key( @@ -2327,17 +2318,15 @@ async def asyncSetUp(self): key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) self.key_vault_client = self.client - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS ) - self.addAsyncCleanup(self.client_encryption.close) opts = AutoEncryptionOpts( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, bypass_query_analysis=True, ) - self.encrypted_client = await async_rs_or_single_client(auto_encryption_opts=opts) - self.addAsyncCleanup(self.encrypted_client.aclose) + self.encrypted_client = await self.async_rs_or_single_client(auto_encryption_opts=opts) async def test_01_insert_encrypted_indexed_and_find(self): val = "encrypted indexed value" @@ -2464,14 +2453,13 @@ async def run_test(self, src_provider, dst_provider): await self.client.keyvault.drop_collection("datakeys") # Step 2. Create a ``AsyncClientEncryption`` object named ``client_encryption1`` - client_encryption1 = AsyncClientEncryption( + client_encryption1 = self.create_client_encryption( key_vault_client=self.client, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, kms_tls_options=KMS_TLS_OPTS, codec_options=OPTS, ) - self.addAsyncCleanup(client_encryption1.close) # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. key_id = await client_encryption1.create_data_key( @@ -2484,16 +2472,14 @@ async def run_test(self, src_provider, dst_provider): ) # Step 5. Create a ``AsyncClientEncryption`` object named ``client_encryption2`` - client2 = await async_rs_or_single_client() - self.addAsyncCleanup(client2.aclose) - client_encryption2 = AsyncClientEncryption( + client2 = await self.async_rs_or_single_client() + client_encryption2 = self.create_client_encryption( key_vault_client=client2, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, kms_tls_options=KMS_TLS_OPTS, codec_options=OPTS, ) - self.addAsyncCleanup(client_encryption2.close) # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( @@ -2528,7 +2514,7 @@ async def asyncSetUp(self): @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") async def test_01_failure(self): - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers={"aws": {}}, key_vault_namespace="keyvault.datakeys", key_vault_client=async_client_context.client, @@ -2539,7 +2525,7 @@ async def test_01_failure(self): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") async def test_02_success(self): - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers={"aws": {}}, key_vault_namespace="keyvault.datakeys", key_vault_client=async_client_context.client, @@ -2559,8 +2545,7 @@ async def test_queryable_encryption(self): # AsyncMongoClient to use in testing that handles auth/tls/etc, # and cleanup. async def AsyncMongoClient(**kwargs): - c = await async_rs_or_single_client(**kwargs) - self.addAsyncCleanup(c.aclose) + c = await self.async_rs_or_single_client(**kwargs) return c # Drop data from prior test runs. @@ -2571,7 +2556,7 @@ async def AsyncMongoClient(**kwargs): # Create two data keys. key_vault_client = await AsyncMongoClient() - client_encryption = AsyncClientEncryption( + client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() ) key1_id = await client_encryption.create_data_key("local") @@ -2652,18 +2637,16 @@ async def asyncSetUp(self): key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) self.key_vault_client = self.client - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS ) - self.addAsyncCleanup(self.client_encryption.close) opts = AutoEncryptionOpts( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, bypass_query_analysis=True, ) - self.encrypted_client = await async_rs_or_single_client(auto_encryption_opts=opts) + self.encrypted_client = await self.async_rs_or_single_client(auto_encryption_opts=opts) self.db = self.encrypted_client.db - self.addAsyncCleanup(self.encrypted_client.aclose) async def run_expression_find( self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None @@ -2860,10 +2843,9 @@ async def asyncSetUp(self): await super().asyncSetUp() await self.client.drop_database(self.db) self.key_vault_client = self.client - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS ) - self.addAsyncCleanup(self.client_encryption.close) self.key_id = await self.client_encryption.create_data_key("local") opts = RangeOpts(min=0, max=1000) self.payload_defaults = await self.client_encryption.encrypt( @@ -2896,13 +2878,12 @@ async def asyncSetUp(self): await self.client.drop_database(self.db) self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addAsyncCleanup(self.key_vault.drop) - self.client_encryption = AsyncClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, self.key_vault.full_name, self.client, OPTS, ) - self.addAsyncCleanup(self.client_encryption.close) async def test_01_simple_create(self): coll, _ = await self.client_encryption.create_encrypted_collection( @@ -3118,10 +3099,9 @@ async def _tearDown_class(cls): async def asyncSetUp(self) -> None: self.listener = OvertCommandListener() - self.mongocryptd_client = AsyncMongoClient( + self.mongocryptd_client = self.simple_client( f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] ) - self.addAsyncCleanup(self.mongocryptd_client.aclose) hello = await self.mongocryptd_client.db.command("hello") self.assertNotIn("logicalSessionTimeoutMinutes", hello) diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index 6d589dc01c..9c57c15c5a 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] -from test.utils import EventListener, async_rs_or_single_client +from test.utils import EventListener from bson.objectid import ObjectId from gridfs.asynchronous.grid_file import ( @@ -792,7 +792,7 @@ async def test_grid_out_lazy_connect(self): await outfile.readchunk() async def test_grid_in_lazy_connect(self): - client = AsyncMongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) + client = self.simple_client("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = AsyncGridIn(fs, file_id=-1, chunk_size=1) with self.assertRaises(ServerSelectionTimeoutError): @@ -803,7 +803,7 @@ async def test_grid_in_lazy_connect(self): async def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - AsyncGridIn((await async_rs_or_single_client(w=0)).pymongo_test.fs) + AsyncGridIn((await self.async_rs_or_single_client(w=0)).pymongo_test.fs) async def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. @@ -811,7 +811,7 @@ async def test_survive_cursor_not_found(self): chunk_size = 1024 data = b"d" * (102 * chunk_size) listener = EventListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test async with AsyncGridIn(db.fs, chunk_size=chunk_size) as infile: await infile.write(data) diff --git a/test/asynchronous/test_logger.py b/test/asynchronous/test_logger.py index b219d530e7..a2e8b35c5f 100644 --- a/test/asynchronous/test_logger.py +++ b/test/asynchronous/test_logger.py @@ -16,7 +16,6 @@ import os from test import unittest from test.asynchronous import AsyncIntegrationTest -from test.utils import async_single_client from unittest.mock import patch from bson import json_util @@ -86,7 +85,7 @@ async def test_truncation_multi_byte_codepoints(self): self.assertEqual(last_3_bytes, str_to_repeat) async def test_logging_without_listeners(self): - c = await async_single_client() + c = await self.async_single_client() self.assertEqual(len(c._event_listeners.event_listeners()), 0) with self.assertLogs("pymongo.connection", level="DEBUG") as cm: await c.db.test.insert_one({"x": "1"}) diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index 3f6563ee56..b5d8708dc3 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -31,8 +31,6 @@ ) from test.utils import ( EventListener, - async_rs_or_single_client, - async_single_client, async_wait_until, ) @@ -57,7 +55,7 @@ class AsyncTestCommandMonitoring(AsyncIntegrationTest): async def _setup_class(cls): await super()._setup_class() cls.listener = EventListener() - cls.client = await async_rs_or_single_client( + cls.client = await cls.unmanaged_async_rs_or_single_client( event_listeners=[cls.listener], retryWrites=False ) @@ -407,7 +405,7 @@ async def test_get_more_failure(self): @async_client_context.require_secondaries_count(1) async def test_not_primary_error(self): address = next(iter(await async_client_context.client.secondaries)) - client = await async_single_client(*address, event_listeners=[self.listener]) + client = await self.async_single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. await client.admin.command("ping") self.listener.reset() @@ -1146,7 +1144,7 @@ async def _setup_class(cls): # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) - cls.client = await async_single_client() + cls.client = await cls.unmanaged_async_single_client() # Get one (authenticated) socket in the pool. await cls.client.pymongo_test.command("ping") diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 1e1f5659ba..d264b5ecb0 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -36,9 +36,7 @@ from test.utils import ( EventListener, ExceptionCatchingThread, - async_rs_or_single_client, async_wait_until, - rs_or_single_client, wait_until, ) @@ -90,7 +88,7 @@ async def _setup_class(cls): await super()._setup_class() # Create a second client so we can make sure clients cannot share # sessions. - cls.client2 = await async_rs_or_single_client() + cls.client2 = await cls.unmanaged_async_rs_or_single_client() # Redact no commands, so we can test user-admin commands have "lsid". cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() @@ -105,7 +103,7 @@ async def _tearDown_class(cls): async def asyncSetUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() - self.client = await async_rs_or_single_client( + self.client = await self.async_rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener] ) self.addAsyncCleanup(self.client.close) @@ -202,7 +200,7 @@ def test_implicit_sessions_checkout(self): failures = 0 for _ in range(5): listener = EventListener() - client = async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + client = self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -285,7 +283,7 @@ async def test_end_session(self): async def test_end_sessions(self): # Use a new client so that the tearDown hook does not error. listener = SessionTestListener() - client = await async_rs_or_single_client(event_listeners=[listener]) + client = await self.async_rs_or_single_client(event_listeners=[listener]) # Start many sessions. sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] for s in sessions: @@ -789,8 +787,7 @@ async def _test_unacknowledged_ops(self, client, *ops): async def test_unacknowledged_writes(self): # Ensure the collection exists. await self.client.pymongo_test.test_unacked_writes.insert_one({}) - client = await async_rs_or_single_client(w=0, event_listeners=[self.listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_or_single_client(w=0, event_listeners=[self.listener]) db = client.pymongo_test coll = db.test_unacked_writes ops: list = [ @@ -838,7 +835,7 @@ class TestCausalConsistency(AsyncUnitTest): @classmethod async def _setup_class(cls): cls.listener = SessionTestListener() - cls.client = await async_rs_or_single_client(event_listeners=[cls.listener]) + cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) @classmethod async def _tearDown_class(cls): @@ -1153,10 +1150,9 @@ async def asyncSetUp(self): async def test_cluster_time(self): listener = SessionTestListener() # Prevent heartbeats from updating $clusterTime between operations. - client = await async_rs_or_single_client( + client = await self.async_rs_or_single_client( event_listeners=[listener], heartbeatFrequencyMS=999999 ) - self.addAsyncCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). await collection.insert_many([{} for _ in range(10)]) diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 4034c8e2c4..b5d0686417 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -17,6 +17,7 @@ import sys from io import BytesIO +from test.asynchronous.utils_spec_runner import AsyncSpecRunner from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket @@ -25,8 +26,6 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.utils import ( OvertCommandListener, - async_rs_client, - async_single_client, wait_until, ) from typing import List @@ -59,7 +58,18 @@ UNPIN_TEST_MAX_ATTEMPTS = 50 -class TestTransactions(AsyncIntegrationTest): +class AsyncTransactionsBase(AsyncSpecRunner): + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + if ( + "secondary" in self.id() + and not async_client_context.is_mongos + and not async_client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") + + +class TestTransactions(AsyncTransactionsBase): RUN_ON_SERVERLESS = True @async_client_context.require_transactions @@ -92,8 +102,7 @@ def test_transaction_options_validation(self): @async_client_context.require_transactions async def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" - client = await async_rs_client(w=0) - self.addAsyncCleanup(client.close) + client = await self.async_rs_client(w=0) db = client.test coll = db.test await coll.insert_one({}) @@ -150,12 +159,13 @@ async def test_transaction_write_concern_override(self): async def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = await async_rs_client(async_client_context.mongos_seeds(), localThresholdMS=1000) + client = await self.async_rs_client( + async_client_context.mongos_seeds(), localThresholdMS=1000 + ) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. await coll.insert_one({}) - self.addAsyncCleanup(client.close) async with client.start_session() as s: # Session is pinned to Mongos. async with await s.start_transaction(): @@ -178,12 +188,13 @@ async def test_unpin_for_next_transaction(self): async def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = await async_rs_client(async_client_context.mongos_seeds(), localThresholdMS=1000) + client = await self.async_rs_client( + async_client_context.mongos_seeds(), localThresholdMS=1000 + ) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. await coll.insert_one({}) - self.addAsyncCleanup(client.close) async with client.start_session() as s: # Session is pinned to Mongos. async with await s.start_transaction(): @@ -307,11 +318,10 @@ async def test_transaction_starts_with_batched_write(self): # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() - client = await async_rs_client(event_listeners=[listener]) + client = await self.async_rs_client(event_listeners=[listener]) coll = client[self.db.name].test await coll.delete_many({}) listener.reset() - self.addAsyncCleanup(client.close) self.addAsyncCleanup(coll.drop) large_str = "\0" * (1 * 1024 * 1024) ops: List[InsertOne[RawBSONDocument]] = [ @@ -336,8 +346,7 @@ async def test_transaction_starts_with_batched_write(self): @async_client_context.require_transactions async def test_transaction_direct_connection(self): - client = await async_single_client() - self.addAsyncCleanup(client.close) + client = await self.async_single_client() coll = client.pymongo_test.test # Make sure the collection exists. @@ -393,14 +402,16 @@ def __exit__(self, exc_type, exc_val, exc_tb): client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout -class TestTransactionsConvenientAPI(AsyncIntegrationTest): +class TestTransactionsConvenientAPI(AsyncTransactionsBase): @classmethod async def _setup_class(cls): await super()._setup_class() cls.mongos_clients = [] if async_client_context.supports_transactions(): for address in async_client_context.mongoses: - cls.mongos_clients.append(await async_single_client("{}:{}".format(*address))) + cls.mongos_clients.append( + await cls.unmanaged_async_single_client("{}:{}".format(*address)) + ) @classmethod async def _tearDown_class(cls): @@ -450,8 +461,7 @@ async def callback2(session): @async_client_context.require_transactions async def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() - client = await async_rs_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_client(event_listeners=[listener]) coll = client[self.db.name].test async def callback(session): @@ -479,8 +489,7 @@ async def callback(session): @async_client_context.require_transactions async def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() - client = await async_rs_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_client(event_listeners=[listener]) coll = client[self.db.name].test async def callback(session): @@ -514,8 +523,7 @@ async def callback(session): @async_client_context.require_transactions async def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() - client = await async_rs_client(event_listeners=[listener]) - self.addAsyncCleanup(client.close) + client = await self.async_rs_client(event_listeners=[listener]) coll = client[self.db.name].test async def callback(session): diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index 71044d1530..12cb13c2cd 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -25,7 +25,6 @@ EventListener, OvertCommandListener, ServerAndTopologyEventListener, - async_rs_client, camel_to_snake, camel_to_snake_args, parse_spec_options, @@ -101,6 +100,8 @@ async def _setup_class(cls): @classmethod async def _tearDown_class(cls): cls.knobs.disable() + for client in cls.mongos_clients: + await client.close() await super()._tearDown_class() def setUp(self): @@ -527,7 +528,7 @@ async def run_scenario(self, scenario_def, test): host = async_client_context.MULTI_MONGOS_LB_URI elif async_client_context.is_mongos: host = async_client_context.mongos_seeds() - client = await async_rs_client( + client = await self.async_rs_client( h=host, event_listeners=[listener, pool_listener, server_listener], **client_options ) self.scenario_client = client diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index 10416ae5fe..a7660f2f67 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -18,6 +18,7 @@ import os import sys import unittest +from test import PyMongoTestCase from unittest.mock import patch import pytest @@ -36,7 +37,7 @@ pytestmark = pytest.mark.auth_aws -class TestAuthAWS(unittest.TestCase): +class TestAuthAWS(PyMongoTestCase): uri: str @classmethod @@ -69,7 +70,7 @@ def setup_cache(self): self.skipTest("Not testing cached credentials") # Make a connection to ensure that we enable caching. - client = MongoClient(self.uri) + client = self.simple_client(self.uri) client.get_database().test.find_one() client.close() @@ -79,7 +80,7 @@ def setup_cache(self): auth.set_cached_credentials(None) self.assertEqual(auth.get_cached_credentials(), None) - client = MongoClient(self.uri) + client = self.simple_client(self.uri) client.get_database().test.find_one() client.close() return auth.get_cached_credentials() @@ -90,8 +91,7 @@ def test_cache_credentials(self): def test_cache_about_to_expire(self): creds = self.setup_cache() - client = MongoClient(self.uri) - self.addCleanup(client.close) + client = self.simple_client(self.uri) # Make the creds about to expire. creds = auth.get_cached_credentials() @@ -107,8 +107,7 @@ def test_cache_about_to_expire(self): def test_poisoned_cache(self): creds = self.setup_cache() - client = MongoClient(self.uri) - self.addCleanup(client.close) + client = self.simple_client(self.uri) # Poison the creds with invalid password. assert creds is not None @@ -130,8 +129,7 @@ def test_environment_variables_ignored(self): self.assertIsNotNone(creds) os.environ.copy() - client = MongoClient(self.uri) - self.addCleanup(client.close) + client = self.simple_client(self.uri) client.get_database().test.find_one() @@ -149,8 +147,7 @@ def test_environment_variables_ignored(self): auth.set_cached_credentials(None) - client2 = MongoClient(self.uri) - self.addCleanup(client2.close) + client2 = self.simple_client(self.uri) with patch.dict("os.environ", mock_env): self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") @@ -166,8 +163,7 @@ def test_no_cache_environment_variables(self): if creds.token: mock_env["AWS_SESSION_TOKEN"] = creds.token - client = MongoClient(self.uri) - self.addCleanup(client.close) + client = self.simple_client(self.uri) with patch.dict(os.environ, mock_env): self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], creds.username) @@ -177,22 +173,19 @@ def test_no_cache_environment_variables(self): mock_env["AWS_ACCESS_KEY_ID"] = "foo" - client2 = MongoClient(self.uri) - self.addCleanup(client2.close) + client2 = self.simple_client(self.uri) with patch.dict("os.environ", mock_env), self.assertRaises(OperationFailure): self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") client2.get_database().test.find_one() -class TestAWSLambdaExamples(unittest.TestCase): +class TestAWSLambdaExamples(PyMongoTestCase): def test_shared_client(self): # Start AWS Lambda Example 1 import os - from pymongo import MongoClient - - client = MongoClient(host=os.environ["MONGODB_URI"]) + client = self.simple_client(host=os.environ["MONGODB_URI"]) def lambda_handler(event, context): return client.db.command("ping") @@ -203,9 +196,7 @@ def test_IAM_auth(self): # Start AWS Lambda Example 2 import os - from pymongo import MongoClient - - client = MongoClient( + client = self.simple_client( host=os.environ["MONGODB_URI"], authSource="$external", authMechanism="MONGODB-AWS", diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index fa4b7d6697..6d31f3db4e 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -23,6 +23,7 @@ import warnings from contextlib import contextmanager from pathlib import Path +from test import PyMongoTestCase from typing import Dict import pytest @@ -56,7 +57,7 @@ pytestmark = pytest.mark.auth_oidc -class OIDCTestBase(unittest.TestCase): +class OIDCTestBase(PyMongoTestCase): @classmethod def setUpClass(cls): cls.uri_single = os.environ["MONGODB_URI_SINGLE"] @@ -94,6 +95,7 @@ def fail_point(self, command_args): yield finally: client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") + client.close() @pytest.mark.auth_oidc @@ -149,7 +151,9 @@ def create_client(self, *args, **kwargs): if not len(args): args = [self.uri_single] - return MongoClient(*args, authmechanismproperties=props, **kwargs) + client = self.simple_client(*args, authmechanismproperties=props, **kwargs) + + return client def test_1_1_single_principal_implicit_username(self): # Create default OIDC client with authMechanism=MONGODB-OIDC. diff --git a/test/mockupdb/test_cursor.py b/test/mockupdb/test_cursor.py index 46af39c7b9..2300297218 100644 --- a/test/mockupdb/test_cursor.py +++ b/test/mockupdb/test_cursor.py @@ -29,13 +29,12 @@ from bson.objectid import ObjectId -from pymongo import MongoClient from pymongo.errors import OperationFailure pytestmark = pytest.mark.mockupdb -class TestCursor(unittest.TestCase): +class TestCursor(PyMongoTestCase): def test_getmore_load_balanced(self): server = MockupDB() server.autoresponds( @@ -50,7 +49,7 @@ def test_getmore_load_balanced(self): server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri, loadBalanced=True) + client = self.simple_client(server.uri, loadBalanced=True) self.addCleanup(client.close) collection = client.db.coll cursor = collection.find() @@ -77,7 +76,7 @@ def _test_fail_on_operation_failure_with_code(self, code): self.addCleanup(server.stop) server.autoresponds("ismaster", maxWireVersion=6) - client = MongoClient(server.uri) + client = self.simple_client(server.uri) with going(lambda: server.receives(OpMsg({"find": "collection"})).command_err(code=code)): cursor = client.db.collection.find() diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index fe7f21160e..a42b3a34ee 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -48,8 +48,11 @@ def _connect(options): uri = f"mongodb://localhost:27017/?serverSelectionTimeoutMS={TIMEOUT_MS}&tlsCAFile={CA_FILE}&{options}" print(uri) - client = pymongo.MongoClient(uri) - client.admin.command("ping") + try: + client = pymongo.MongoClient(uri) + client.admin.command("ping") + finally: + client.close() class TestOCSP(unittest.TestCase): diff --git a/test/test_auth.py b/test/test_auth.py index fa3d0905bb..b311d330bc 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -23,16 +23,14 @@ sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import ( - AllowListEventListener, - delay, - ignore_deprecations, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, - single_client_noauth, +from test import ( + IntegrationTest, + PyMongoTestCase, + SkipTest, + client_context, + unittest, ) +from test.utils import AllowListEventListener, delay, ignore_deprecations from pymongo import MongoClient, monitoring from pymongo.auth_shared import _build_credentials_tuple @@ -81,7 +79,7 @@ def run(self): self.success = True -class TestGSSAPI(unittest.TestCase): +class TestGSSAPI(PyMongoTestCase): mech_properties: str service_realm_required: bool @@ -138,7 +136,7 @@ def test_gssapi_simple(self): if not self.service_realm_required: # Without authMechanismProperties. - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -149,11 +147,11 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Log in using URI, without authMechanismProperties. - client = MongoClient(uri) + client = self.simple_client(uri) client[GSSAPI_DB].collection.find_one() # Authenticate with authMechanismProperties. - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -166,14 +164,14 @@ def test_gssapi_simple(self): # Log in using URI, with authMechanismProperties. mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" - client = MongoClient(mech_uri) + client = self.simple_client(mech_uri) client[GSSAPI_DB].collection.find_one() set_name = client_context.replica_set_name if set_name: if not self.service_realm_required: # Without authMechanismProperties - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -185,11 +183,11 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() uri = uri + f"&replicaSet={set_name!s}" - client = MongoClient(uri) + client = self.simple_client(uri) client[GSSAPI_DB].list_collection_names() # With authMechanismProperties - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -202,13 +200,13 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() mech_uri = mech_uri + f"&replicaSet={set_name!s}" - client = MongoClient(mech_uri) + client = self.simple_client(mech_uri) client[GSSAPI_DB].list_collection_names() @ignore_deprecations @client_context.require_sync def test_gssapi_threaded(self): - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -244,7 +242,7 @@ def test_gssapi_threaded(self): set_name = client_context.replica_set_name if set_name: - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -267,14 +265,14 @@ def test_gssapi_threaded(self): self.assertTrue(thread.success) -class TestSASLPlain(unittest.TestCase): +class TestSASLPlain(PyMongoTestCase): @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - client = MongoClient( + client = self.simple_client( SASL_HOST, SASL_PORT, username=SASL_USER, @@ -293,12 +291,12 @@ def test_sasl_plain(self): SASL_PORT, SASL_DB, ) - client = MongoClient(uri) + client = self.simple_client(uri) client.ldap.test.find_one() set_name = client_context.replica_set_name if set_name: - client = MongoClient( + client = self.simple_client( SASL_HOST, SASL_PORT, replicaSet=set_name, @@ -317,7 +315,7 @@ def test_sasl_plain(self): SASL_DB, str(set_name), ) - client = MongoClient(uri) + client = self.simple_client(uri) client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): @@ -331,8 +329,8 @@ def auth_string(user, password): ) return uri - bad_user = MongoClient(auth_string("not-user", SASL_PASS)) - bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) + bad_user = self.simple_client(auth_string("not-user", SASL_PASS)) + bad_pwd = self.simple_client(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. with self.assertRaises(OperationFailure): bad_user.admin.command("ping") @@ -354,7 +352,7 @@ def tearDown(self): def test_scram_sha1(self): host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) ) client.pymongo_test.command("dbstats") @@ -365,7 +363,7 @@ def test_scram_sha1(self): "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" "&replicaSet=%s" % (host, port, client_context.replica_set_name) ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) client.pymongo_test.command("dbstats") db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) db.command("dbstats") @@ -393,7 +391,7 @@ def test_scram_skip_empty_exchange(self): "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] ) - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] ) client.testscram.command("dbstats") @@ -430,36 +428,38 @@ def test_scram(self): ) # Step 2: verify auth success cases - client = rs_or_single_client_noauth(username="sha1", password="pwd", authSource="testscram") + client = self.rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram" + ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) client.testscram.command("dbstats") # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) client.testscram.command("dbstats") self.listener.reset() - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] ) client.testscram.command("dbstats") @@ -472,19 +472,19 @@ def test_scram(self): self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) with self.assertRaises(OperationFailure): client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) with self.assertRaises(OperationFailure): client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="not-a-user", password="pwd", authSource="testscram" ) with self.assertRaises(OperationFailure): @@ -497,7 +497,7 @@ def test_scram(self): port, client_context.replica_set_name, ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) client.testscram.command("dbstats") db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) db.command("dbstats") @@ -517,12 +517,12 @@ def test_scram_saslprep(self): "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] ) - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="\u2168", password="\u2163", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="\u2168", password="\u2163", authSource="testscram", @@ -530,17 +530,17 @@ def test_scram_saslprep(self): ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="\u2168", password="IV", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="IX", password="I\u00ADX", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="IX", password="I\u00ADX", authSource="testscram", @@ -548,25 +548,29 @@ def test_scram_saslprep(self): ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth("mongodb://\u2168:IV@%s:%d/testscram" % (host, port)) + client = self.rs_or_single_client_noauth( + "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) + ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth("mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port)) + client = self.rs_or_single_client_noauth( + "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) + ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) + client = self.rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) client.testscram.command("dbstats") def test_cache(self): - client = single_client() + client = self.single_client() credentials = client.options.pool_options._credentials cache = credentials.cache self.assertIsNotNone(cache) @@ -591,8 +595,7 @@ def test_scram_threaded(self): coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate - client = rs_or_single_client() - self.addCleanup(client.close) + client = self.rs_or_single_client() coll = client.db.test threads = [] for _ in range(4): @@ -619,7 +622,7 @@ def tearDown(self): def test_uri_options(self): # Test default to admin host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + client = self.rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) self.assertTrue(client.admin.command("dbstats")) if client_context.is_rs: @@ -628,14 +631,14 @@ def test_uri_options(self): port, client_context.replica_set_name, ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) self.assertTrue(client.admin.command("dbstats")) db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) self.assertTrue(db.command("dbstats")) # Test explicit database uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) - client = rs_or_single_client_noauth(uri) + client = self.rs_or_single_client_noauth(uri) with self.assertRaises(OperationFailure): client.admin.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) @@ -646,7 +649,7 @@ def test_uri_options(self): port, client_context.replica_set_name, ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) with self.assertRaises(OperationFailure): client.admin.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) @@ -655,7 +658,7 @@ def test_uri_options(self): # Test authSource uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) - client = rs_or_single_client_noauth(uri) + client = self.rs_or_single_client_noauth(uri) with self.assertRaises(OperationFailure): client.pymongo_test2.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) @@ -665,7 +668,7 @@ def test_uri_options(self): "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) with self.assertRaises(OperationFailure): client.pymongo_test2.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 38e5f19bf8..3c3a1a67ae 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -20,6 +20,7 @@ import os import sys import warnings +from test import PyMongoTestCase sys.path[0:0] = [""] @@ -34,7 +35,7 @@ _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") -class TestAuthSpec(unittest.TestCase): +class TestAuthSpec(PyMongoTestCase): pass @@ -54,7 +55,7 @@ def run_test(self): warnings.simplefilter("default") self.assertRaises(Exception, MongoClient, uri, connect=False) else: - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) diff --git a/test/test_bulk.py b/test/test_bulk.py index 63b8c7790a..64fd48e8cd 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -24,22 +24,13 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, remove_all_users, unittest -from test.utils import ( - rs_or_single_client_noauth, - single_client, - wait_until, -) +from test.utils import wait_until from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId from pymongo.common import partition_node -from pymongo.errors import ( - BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure, -) +from pymongo.errors import BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure from pymongo.operations import * from pymongo.synchronous.collection import Collection from pymongo.write_concern import WriteConcern @@ -913,7 +904,7 @@ class TestBulkAuthorization(BulkAuthorizationTestBase): def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth( + cli = self.rs_or_single_client_noauth( username="readonly", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test @@ -924,7 +915,7 @@ def test_readonly(self): def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth( + cli = self.rs_or_single_client_noauth( username="noremove", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test @@ -952,7 +943,7 @@ def _setup_class(cls): if cls.w is not None and cls.w > 1: for member in (client_context.hello)["hosts"]: if member != (client_context.hello)["primary"]: - cls.secondary = single_client(*partition_node(member)) + cls.secondary = cls.unmanaged_single_client(*partition_node(member)) break @classmethod diff --git a/test/test_change_stream.py b/test/test_change_stream.py index cb19452aec..dae224c5e0 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -28,12 +28,17 @@ sys.path[0:0] = [""] -from test import IntegrationTest, Version, client_context, unittest +from test import ( + IntegrationTest, + PyMongoTestCase, + Version, + client_context, + unittest, +) from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, EventListener, - rs_or_single_client, wait_until, ) @@ -69,8 +74,7 @@ def change_stream(self, *args, **kwargs): def client_with_listener(self, *commands): """Return a client with a AllowListEventListener.""" listener = AllowListEventListener(*commands) - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) return client, listener def watched_collection(self, *args, **kwargs): @@ -174,7 +178,7 @@ def _wait_until(): @no_type_check def test_try_next_runs_one_getmore(self): listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") listener.reset() @@ -232,7 +236,7 @@ def _wait_until(): @no_type_check def test_batch_size_is_honored(self): listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") listener.reset() @@ -473,7 +477,7 @@ class ProseSpecTestsMixin: @no_type_check def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) - client = rs_or_single_client(event_listeners=[listener]) + client = PyMongoTestCase.unmanaged_rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener @@ -1111,7 +1115,7 @@ class TestAllLegacyScenarios(IntegrationTest): def _setup_class(cls): super()._setup_class() cls.listener = AllowListEventListener("aggregate", "getMore") - cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) @classmethod def _tearDown_class(cls): diff --git a/test/test_client.py b/test/test_client.py index 785139d6a8..bc45325f0b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -67,10 +67,6 @@ is_greenthread_patched, lazy_client_trial, one, - rs_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, wait_until, ) @@ -131,7 +127,7 @@ class ClientUnitTest(UnitTest): @classmethod def _setup_class(cls): - cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) + cls.client = cls.unmanaged_rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) @classmethod def _tearDown_class(cls): @@ -142,7 +138,7 @@ def inject_fixtures(self, caplog): self._caplog = caplog def test_keyword_arg_defaults(self): - client = MongoClient( + client = self.simple_client( socketTimeoutMS=None, connectTimeoutMS=20000, waitQueueTimeoutMS=None, @@ -168,15 +164,17 @@ def test_keyword_arg_defaults(self): self.assertAlmostEqual(12, client.options.server_selection_timeout) def test_connect_timeout(self): - client = MongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + client = self.simple_client(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = MongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + + client = self.simple_client(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = MongoClient( + + client = self.simple_client( "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False ) pool_opts = client.options.pool_options @@ -193,7 +191,7 @@ def test_types(self): self.assertRaises(ConfigurationError, MongoClient, []) def test_max_pool_size_zero(self): - MongoClient(maxPoolSize=0) + self.simple_client(maxPoolSize=0) def test_uri_detection(self): self.assertRaises(ConfigurationError, MongoClient, "/foo") @@ -258,7 +256,7 @@ def test_iteration(self): self.assertNotIsInstance(client, Iterable) def test_get_default_database(self): - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False, ) @@ -274,7 +272,7 @@ def test_get_default_database(self): self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False, ) @@ -282,7 +280,7 @@ def test_get_default_database(self): def test_get_default_database_error(self): # URI with no database. - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False, ) @@ -294,11 +292,11 @@ def test_get_default_database_with_authsource(self): client_context.host, client_context.port, ) - c = rs_or_single_client(uri, connect=False) + c = self.rs_or_single_client(uri, connect=False) self.assertEqual(Database(c, "foo"), c.get_default_database()) def test_get_database_default(self): - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False, ) @@ -306,7 +304,7 @@ def test_get_database_default(self): def test_get_database_default_error(self): # URI with no database. - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False, ) @@ -318,19 +316,19 @@ def test_get_database_default_with_authsource(self): client_context.host, client_context.port, ) - c = rs_or_single_client(uri, connect=False) + c = self.rs_or_single_client(uri, connect=False) self.assertEqual(Database(c, "foo"), c.get_database()) def test_primary_read_pref_with_tags(self): # No tags allowed with "primary". with self.assertRaises(ConfigurationError): - MongoClient("mongodb://host/?readpreferencetags=dc:east") + self.single_client("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") + self.single_client("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode ) self.assertEqual(c.read_preference, ReadPreference.NEAREST) @@ -339,26 +337,30 @@ def test_metadata(self): metadata = copy.deepcopy(_METADATA) metadata["driver"]["name"] = "PyMongo" metadata["application"] = {"name": "foobar"} - client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options self.assertEqual(options.pool_options.metadata, metadata) - client = MongoClient("foo", 27017, appname="foobar", connect=False) + client = self.simple_client("foo", 27017, appname="foobar", connect=False) options = client.options self.assertEqual(options.pool_options.metadata, metadata) # No error - MongoClient(appname="x" * 128) - self.assertRaises(ValueError, MongoClient, appname="x" * 129) + self.simple_client(appname="x" * 128) + with self.assertRaises(ValueError): + self.simple_client(appname="x" * 129) # Bad "driver" options. self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") self.assertRaises(TypeError, DriverInfo, version="1", platform="a") self.assertRaises(TypeError, DriverInfo) - self.assertRaises(TypeError, MongoClient, driver=1) - self.assertRaises(TypeError, MongoClient, driver="abc") - self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) + with self.assertRaises(TypeError): + self.simple_client(driver=1) + with self.assertRaises(TypeError): + self.simple_client(driver="abc") + with self.assertRaises(TypeError): + self.simple_client(driver=("Foo", "1", "a")) # Test appending to driver info. metadata["driver"]["name"] = "PyMongo|FooDriver" metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) - client = MongoClient( + client = self.simple_client( "foo", 27017, appname="foobar", @@ -368,7 +370,7 @@ def test_metadata(self): options = client.options self.assertEqual(options.pool_options.metadata, metadata) metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) - client = MongoClient( + client = self.simple_client( "foo", 27017, appname="foobar", @@ -378,7 +380,7 @@ def test_metadata(self): options = client.options self.assertEqual(options.pool_options.metadata, metadata) # Test truncating driver info metadata. - client = MongoClient( + client = self.simple_client( driver=DriverInfo(name="s" * _MAX_METADATA_SIZE), connect=False, ) @@ -387,7 +389,7 @@ def test_metadata(self): len(bson.encode(options.pool_options.metadata)), _MAX_METADATA_SIZE, ) - client = MongoClient( + client = self.simple_client( driver=DriverInfo(name="s" * _MAX_METADATA_SIZE, version="s" * _MAX_METADATA_SIZE), connect=False, ) @@ -403,7 +405,7 @@ def test_container_metadata(self): metadata["driver"]["name"] = "PyMongo" metadata["env"] = {} metadata["env"]["container"] = {"orchestrator": "kubernetes"} - client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) @@ -429,7 +431,7 @@ def transform_python(self, value): uuid_representation_label = "javaLegacy" unicode_decode_error_handler = "ignore" tzinfo = utc - c = MongoClient( + c = self.simple_client( document_class=document_class, type_registry=type_registry, tz_aware=tz_aware, @@ -438,12 +440,12 @@ def transform_python(self, value): tzinfo=tzinfo, connect=False, ) - self.assertEqual(c.codec_options.document_class, document_class) self.assertEqual(c.codec_options.type_registry, type_registry) self.assertEqual(c.codec_options.tz_aware, tz_aware) self.assertEqual( - c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual(c.codec_options.tzinfo, tzinfo) @@ -465,11 +467,11 @@ def test_uri_codec_options(self): datetime_conversion, ) ) - c = MongoClient(uri, connect=False) - + c = self.simple_client(uri, connect=False) self.assertEqual(c.codec_options.tz_aware, True) self.assertEqual( - c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual( @@ -478,8 +480,7 @@ def test_uri_codec_options(self): # Change the passed datetime_conversion to a number and re-assert. uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") - c = MongoClient(uri, connect=False) - + c = self.simple_client(uri, connect=False) self.assertEqual( c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] ) @@ -487,7 +488,9 @@ def test_uri_codec_options(self): def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" - c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") + c = self.simple_client( + uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred" + ) clopts = c.options opts = clopts._options @@ -516,7 +519,7 @@ def reset_resolver(): def test_scenario(args, kwargs, expected_value): patched_resolver.reset() - MongoClient(*args, **kwargs) + self.simple_client(*args, **kwargs) for _, kw in patched_resolver.call_list(): self.assertAlmostEqual(kw["lifetime"], expected_value) @@ -536,15 +539,15 @@ def test_scenario(args, kwargs, expected_value): def test_uri_security_options(self): # Ensure that we don't silently override security-related options. with self.assertRaises(InvalidURI): - MongoClient("mongodb://localhost/?ssl=true", tls=False, connect=False) + self.simple_client("mongodb://localhost/?ssl=true", tls=False, connect=False) # Matching SSL and TLS options should not cause errors. - c = MongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) + c = self.simple_client("mongodb://localhost/?ssl=false", tls=False, connect=False) self.assertEqual(c.options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): - MongoClient( + self.simple_client( "mongodb://localhost/?tlsInsecure=true", connect=False, tlsAllowInvalidHostnames=True, @@ -552,7 +555,7 @@ def test_uri_security_options(self): # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): - MongoClient( + self.simple_client( "mongodb://localhost/?tlsInsecure=true", connect=False, tlsAllowInvalidCertificates=False, @@ -560,10 +563,10 @@ def test_uri_security_options(self): # Conflicting kwargs should raise InvalidURI with self.assertRaises(InvalidURI): - MongoClient(ssl=True, tls=False) + self.simple_client(ssl=True, tls=False) def test_event_listeners(self): - c = MongoClient(event_listeners=[], connect=False) + c = self.simple_client(event_listeners=[], connect=False) self.assertEqual(c.options.event_listeners, []) listeners = [ event_loggers.CommandLogger(), @@ -572,11 +575,11 @@ def test_event_listeners(self): event_loggers.TopologyLogger(), event_loggers.ConnectionPoolLogger(), ] - c = MongoClient(event_listeners=listeners, connect=False) + c = self.simple_client(event_listeners=listeners, connect=False) self.assertEqual(c.options.event_listeners, listeners) def test_client_options(self): - c = MongoClient(connect=False) + c = self.simple_client(connect=False) self.assertIsInstance(c.options, ClientOptions) self.assertIsInstance(c.options.pool_options, PoolOptions) self.assertEqual(c.options.server_selection_timeout, 30) @@ -606,11 +609,11 @@ def test_detected_environment_logging(self, mock_get_hosts): ) with self.assertLogs("pymongo", level="INFO") as cm: for host in normal_hosts: - MongoClient(host) + MongoClient(host, connect=False) for host in srv_hosts: mock_get_hosts.return_value = [(host, 1)] - MongoClient(host) - MongoClient(multi_host) + MongoClient(host, connect=False) + MongoClient(multi_host, connect=False) logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) @@ -628,13 +631,13 @@ def test_detected_environment_warning(self, mock_get_hosts): ) for host in normal_hosts: with self.assertWarns(UserWarning): - MongoClient(host) + self.simple_client(host) for host in srv_hosts: mock_get_hosts.return_value = [(host, 1)] with self.assertWarns(UserWarning): - MongoClient(host) + self.simple_client(host) with self.assertWarns(UserWarning): - MongoClient(multi_host) + self.simple_client(multi_host) class TestClient(IntegrationTest): @@ -651,18 +654,17 @@ def test_multiple_uris(self): def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper doesn't remove connections when maxIdleTimeMS not set - client = rs_or_single_client() + client = self.rs_or_single_client() server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) self.assertTrue(conn in server._pool.conns) - client.close() def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one - client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + client = self.rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass @@ -671,12 +673,11 @@ def test_max_idle_time_reaper_removes_stale_minPoolSize(self): self.assertGreaterEqual(len(server._pool.conns), 1) wait_until(lambda: conn not in server._pool.conns, "remove stale socket") wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") - client.close() def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper respects maxPoolSize when adding new connections. - client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) + client = self.rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass @@ -685,12 +686,11 @@ def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): self.assertEqual(1, len(server._pool.conns)) wait_until(lambda: conn not in server._pool.conns, "remove stale socket") wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") - client.close() def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it - client = rs_or_single_client(maxIdleTimeMS=500) + client = self.rs_or_single_client(maxIdleTimeMS=500) server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn_one: pass @@ -703,16 +703,15 @@ def test_max_idle_time_reaper_removes_stale(self): lambda: len(server._pool.conns) == 0, "stale socket reaped and new one NOT added to the pool", ) - client.close() def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=0.1): - client = rs_or_single_client() + client = self.rs_or_single_client() server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) self.assertEqual(0, len(server._pool.conns)) # Assert that pool started up at minPoolSize - client = rs_or_single_client(minPoolSize=10) + client = self.rs_or_single_client(minPoolSize=10) server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) wait_until( lambda: len(server._pool.conns) == 10, @@ -731,7 +730,7 @@ def test_min_pool_size(self): def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): - client = rs_or_single_client(maxIdleTimeMS=500) + client = self.rs_or_single_client(maxIdleTimeMS=500) server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass @@ -745,7 +744,7 @@ def test_max_idle_time_checkout(self): self.assertTrue(new_con in server._pool.conns) # Test that connections are reused if maxIdleTimeMS is not set. - client = rs_or_single_client() + client = self.rs_or_single_client() server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) with server._pool.checkout() as conn: pass @@ -769,36 +768,38 @@ def test_constants(self): MongoClient.HOST = "somedomainthatdoesntexist.org" MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): - connected(MongoClient(serverSelectionTimeoutMS=10, **kwargs)) + c = self.simple_client(serverSelectionTimeoutMS=10, **kwargs) + connected(c) + c = self.simple_client(host, port, **kwargs) # Override the defaults. No error. - connected(MongoClient(host, port, **kwargs)) + connected(c) # Set good defaults. MongoClient.HOST = host MongoClient.PORT = port # No error. - connected(MongoClient(**kwargs)) + c = self.simple_client(**kwargs) + connected(c) def test_init_disconnected(self): host, port = client_context.host, client_context.port - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) # is_primary causes client to block until connected self.assertIsInstance(c.is_primary, bool) - - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.is_mongos, bool) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.options.pool_options.max_pool_size, int) self.assertIsInstance(c.nodes, frozenset) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertEqual(c.codec_options, CodecOptions()) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertFalse(c.primary) self.assertFalse(c.secondaries) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) self.assertIsNone(c.address) # PYTHON-2981 @@ -810,43 +811,44 @@ def test_init_disconnected(self): self.assertEqual(c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" - c = MongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + c = self.simple_client(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) with self.assertRaises(ConnectionFailure): c.pymongo_test.test.find_one() def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = MongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + c = self.simple_client(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) with self.assertRaises(ConnectionFailure): c.pymongo_test.test.find_one() def test_equality(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) - c = rs_or_single_client(seed, connect=False) - self.addCleanup(c.close) + c = self.rs_or_single_client(seed, connect=False) self.assertEqual(client_context.client, c) # Explicitly test inequality self.assertFalse(client_context.client != c) - c = rs_or_single_client("invalid.com", connect=False) - self.addCleanup(c.close) + c = self.rs_or_single_client("invalid.com", connect=False) self.assertNotEqual(client_context.client, c) self.assertTrue(client_context.client != c) + + c1 = self.simple_client("a", connect=False) + c2 = self.simple_client("b", connect=False) + # Seeds differ: - self.assertNotEqual(MongoClient("a", connect=False), MongoClient("b", connect=False)) + self.assertNotEqual(c1, c2) + + c1 = self.simple_client(["a", "b", "c"], connect=False) + c2 = self.simple_client(["c", "a", "b"], connect=False) + # Same seeds but out of order still compares equal: - self.assertEqual( - MongoClient(["a", "b", "c"], connect=False), - MongoClient(["c", "a", "b"], connect=False), - ) + self.assertEqual(c1, c2) def test_hashable(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) - c = rs_or_single_client(seed, connect=False) - self.addCleanup(c.close) + c = self.rs_or_single_client(seed, connect=False) self.assertIn(c, {client_context.client}) - c = rs_or_single_client("invalid.com", connect=False) - self.addCleanup(c.close) + c = self.rs_or_single_client("invalid.com", connect=False) self.assertNotIn(c, {client_context.client}) def test_host_w_port(self): @@ -879,9 +881,10 @@ def test_repr(self): self.assertIn("w=1", the_repr) self.assertIn("wtimeoutms=100", the_repr) - self.assertEqual(eval(the_repr), client) + with eval(the_repr) as client_two: + self.assertEqual(client_two, client) - client = MongoClient( + client = self.simple_client( "localhost:27017,localhost:27018", replicaSet="replset", connectTimeoutMS=12345, @@ -899,7 +902,8 @@ def test_repr(self): self.assertIn("w=1", the_repr) self.assertIn("wtimeoutms=100", the_repr) - self.assertEqual(eval(the_repr), client) + with eval(the_repr) as client_two: + self.assertEqual(client_two, client) def test_getters(self): wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") @@ -915,8 +919,7 @@ def test_list_databases(self): for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): self.assertIs(type(helper_doc), dict) self.assertEqual(helper_doc.keys(), cmd_doc.keys()) - client = rs_or_single_client(document_class=SON) - self.addCleanup(client.close) + client = self.rs_or_single_client(document_class=SON) for doc in client.list_databases(): self.assertIs(type(doc), dict) @@ -955,7 +958,7 @@ def test_drop_database(self): self.client.drop_database("pymongo_test") if client_context.is_rs: - wc_client = rs_or_single_client(w=len(client_context.nodes) + 1) + wc_client = self.rs_or_single_client(w=len(client_context.nodes) + 1) with self.assertRaises(WriteConcernError): wc_client.drop_database("pymongo_test2") @@ -965,7 +968,7 @@ def test_drop_database(self): self.assertNotIn("pymongo_test2", dbs) def test_close(self): - test_client = rs_or_single_client() + test_client = self.rs_or_single_client() coll = test_client.pymongo_test.bar test_client.close() with self.assertRaises(InvalidOperation): @@ -975,7 +978,7 @@ def test_close_kills_cursors(self): if sys.platform.startswith("java"): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") - test_client = rs_or_single_client() + test_client = self.rs_or_single_client() # Kill any cursors possibly queued up by previous tests. gc.collect() test_client._process_periodic_tasks() @@ -1002,13 +1005,13 @@ def test_close_kills_cursors(self): self.assertTrue(test_client._topology._opened) test_client.close() self.assertFalse(test_client._topology._opened) - test_client = rs_or_single_client() + test_client = self.rs_or_single_client() # The killCursors task should not need to re-open the topology. test_client._process_periodic_tasks() self.assertTrue(test_client._topology._opened) def test_close_stops_kill_cursors_thread(self): - client = rs_client() + client = self.rs_client() client.test.test.find_one() self.assertFalse(client._kill_cursors_executor._stopped) @@ -1024,7 +1027,7 @@ def test_close_stops_kill_cursors_thread(self): def test_uri_connect_option(self): # Ensure that topology is not opened if connect=False. - client = rs_client(connect=False) + client = self.rs_client(connect=False) self.assertFalse(client._topology._opened) # Ensure kill cursors thread has not been started. @@ -1037,19 +1040,15 @@ def test_uri_connect_option(self): kc_thread = client._kill_cursors_executor._thread self.assertTrue(kc_thread and kc_thread.is_alive()) - # Tear down. - client.close() - def test_close_does_not_open_servers(self): - client = rs_client(connect=False) + client = self.rs_client(connect=False) topology = client._topology self.assertEqual(topology._servers, {}) client.close() self.assertEqual(topology._servers, {}) def test_close_closes_sockets(self): - client = rs_client() - self.addCleanup(client.close) + client = self.rs_client() client.test.test.find_one() topology = client._topology client.close() @@ -1075,30 +1074,30 @@ def test_auth_from_uri(self): client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) with self.assertRaises(OperationFailure): - connected(rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) + connected(self.rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) # No error. - connected(rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) + connected(self.rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) with self.assertRaises(OperationFailure): - connected(rs_or_single_client_noauth(uri)) + connected(self.rs_or_single_client_noauth(uri)) # No error. connected( - rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) + self.rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) ) # Auth with lazy connection. ( - rs_or_single_client_noauth( + self.rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False ) ).pymongo_test.test.find_one() # Wrong password. - bad_client = rs_or_single_client_noauth( + bad_client = self.rs_or_single_client_noauth( "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False ) @@ -1110,7 +1109,7 @@ def test_username_and_password(self): client_context.create_user("admin", "ad min", "pa/ss") self.addCleanup(client_context.drop_user, "admin", "ad min") - c = rs_or_single_client_noauth(username="ad min", password="pa/ss") + c = self.rs_or_single_client_noauth(username="ad min", password="pa/ss") # Username and password aren't in strings that will likely be logged. self.assertNotIn("ad min", repr(c)) @@ -1122,13 +1121,13 @@ def test_username_and_password(self): c.server_info() with self.assertRaises(OperationFailure): - (rs_or_single_client_noauth(username="ad min", password="foo")).server_info() + (self.rs_or_single_client_noauth(username="ad min", password="foo")).server_info() @client_context.require_auth @client_context.require_no_fips def test_lazy_auth_raises_operation_failure(self): host = client_context.host - lazy_client = rs_or_single_client_noauth( + lazy_client = self.rs_or_single_client_noauth( f"mongodb://user:wrong@{host}/pymongo_test", connect=False ) @@ -1146,8 +1145,7 @@ def test_unix_socket(self): uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. - client = rs_or_single_client(uri) - self.addCleanup(client.close) + client = self.rs_or_single_client(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() self.assertTrue("pymongo_test" in dbs) @@ -1156,9 +1154,10 @@ def test_unix_socket(self): # Confirm it fails with a missing socket. with self.assertRaises(ConnectionFailure): - connected( - MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100), + c = self.simple_client( + "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 ) + connected(c) def test_document_class(self): c = self.client @@ -1169,15 +1168,15 @@ def test_document_class(self): self.assertTrue(isinstance(db.test.find_one(), dict)) self.assertFalse(isinstance(db.test.find_one(), SON)) - c = rs_or_single_client(document_class=SON) - self.addCleanup(c.close) + c = self.rs_or_single_client(document_class=SON) + db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) self.assertTrue(isinstance(db.test.find_one(), SON)) def test_timeouts(self): - client = rs_or_single_client( + client = self.rs_or_single_client( connectTimeoutMS=10500, socketTimeoutMS=10500, maxIdleTimeMS=10500, @@ -1190,28 +1189,31 @@ def test_timeouts(self): self.assertEqual(10.5, client.options.server_selection_timeout) def test_socket_timeout_ms_validation(self): - c = rs_or_single_client(socketTimeoutMS=10 * 1000) + c = self.rs_or_single_client(socketTimeoutMS=10 * 1000) self.assertEqual(10, (get_pool(c)).opts.socket_timeout) - c = connected(rs_or_single_client(socketTimeoutMS=None)) + c = connected(self.rs_or_single_client(socketTimeoutMS=None)) self.assertEqual(None, (get_pool(c)).opts.socket_timeout) - c = connected(rs_or_single_client(socketTimeoutMS=0)) + c = connected(self.rs_or_single_client(socketTimeoutMS=0)) self.assertEqual(None, (get_pool(c)).opts.socket_timeout) with self.assertRaises(ValueError): - rs_or_single_client(socketTimeoutMS=-1) + with self.rs_or_single_client(socketTimeoutMS=-1): + pass with self.assertRaises(ValueError): - rs_or_single_client(socketTimeoutMS=1e10) + with self.rs_or_single_client(socketTimeoutMS=1e10): + pass with self.assertRaises(ValueError): - rs_or_single_client(socketTimeoutMS="foo") + with self.rs_or_single_client(socketTimeoutMS="foo"): + pass def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 - timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + timeout = self.rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) self.addCleanup(timeout.close) no_timeout.pymongo_test.drop_collection("test") @@ -1256,7 +1258,7 @@ def test_server_selection_timeout(self): self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): - client = rs_or_single_client(waitQueueTimeoutMS=2000) + client = self.rs_or_single_client(waitQueueTimeoutMS=2000) self.assertEqual((get_pool(client)).opts.wait_queue_timeout, 2) def test_socketKeepAlive(self): @@ -1269,7 +1271,7 @@ def test_socketKeepAlive(self): def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware="foo") - aware = rs_or_single_client(tz_aware=True) + aware = self.rs_or_single_client(tz_aware=True) self.addCleanup(aware.close) naive = self.client aware.pymongo_test.drop_collection("test") @@ -1299,8 +1301,7 @@ def test_ipv6(self): if client_context.is_rs: uri += "/?replicaSet=" + (client_context.replica_set_name or "") - client = rs_or_single_client_noauth(uri) - self.addCleanup(client.close) + client = self.rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) @@ -1309,7 +1310,7 @@ def test_ipv6(self): self.assertTrue("pymongo_test_bernie" in dbs) def test_contextlib(self): - client = rs_or_single_client() + client = self.rs_or_single_client() client.pymongo_test.drop_collection("test") client.pymongo_test.test.insert_one({"foo": "bar"}) @@ -1323,7 +1324,7 @@ def test_contextlib(self): self.assertEqual("bar", (client.pymongo_test.test.find_one())["foo"]) with self.assertRaises(InvalidOperation): client.pymongo_test.test.find_one() - client = rs_or_single_client() + client = self.rs_or_single_client() with client as client: self.assertEqual("bar", (client.pymongo_test.test.find_one())["foo"]) with self.assertRaises(InvalidOperation): @@ -1401,8 +1402,7 @@ def test_operation_failure(self): # response to getLastError. PYTHON-395. We need a new client here # to avoid race conditions caused by replica set failover or idle # socket reaping. - client = single_client() - self.addCleanup(client.close) + client = self.single_client() client.pymongo_test.test.find_one() pool = get_pool(client) socket_count = len(pool.conns) @@ -1426,8 +1426,7 @@ def test_lazy_connect_w0(self): client_context.client.drop_database("test_lazy_connect_w0") self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") - client = rs_or_single_client(connect=False, w=0) - self.addCleanup(client.close) + client = self.rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.insert_one({}) def predicate(): @@ -1435,8 +1434,7 @@ def predicate(): wait_until(predicate, "find one document") - client = rs_or_single_client(connect=False, w=0) - self.addCleanup(client.close) + client = self.rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) def predicate(): @@ -1444,8 +1442,7 @@ def predicate(): wait_until(predicate, "update one document") - client = rs_or_single_client(connect=False, w=0) - self.addCleanup(client.close) + client = self.rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.delete_one({}) def predicate(): @@ -1457,8 +1454,7 @@ def predicate(): def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = rs_or_single_client(maxPoolSize=1, retryReads=False) - self.addCleanup(client.close) + client = self.rs_or_single_client(maxPoolSize=1, retryReads=False) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. @@ -1484,7 +1480,9 @@ def test_auth_network_error(self): # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. - c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False)) + c = connected( + self.rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False) + ) # Cause a network error on the actual socket. pool = get_pool(c) @@ -1501,8 +1499,7 @@ def test_auth_network_error(self): @client_context.require_no_replica_set def test_connect_to_standalone_using_replica_set_name(self): - client = single_client(replicaSet="anything", serverSelectionTimeoutMS=100) - + client = self.single_client(replicaSet="anything", serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): client.test.test.find_one() @@ -1512,7 +1509,7 @@ def test_stale_getmore(self): # the topology before the getMore message is sent. Test that # MongoClient._run_operation_with_response handles the error. with self.assertRaises(AutoReconnect): - client = rs_client(connect=False, serverSelectionTimeoutMS=100) + client = self.rs_client(connect=False, serverSelectionTimeoutMS=100) client._run_operation( operation=message._GetMore( "pymongo_test", @@ -1560,7 +1557,7 @@ def init(self, *args): client_context.host, client_context.port, ) - client = single_client(uri, event_listeners=[listener]) + self.single_client(uri, event_listeners=[listener]) wait_until( lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" ) @@ -1569,7 +1566,6 @@ def init(self, *args): # closer to 0.5 sec with heartbeatFrequencyMS configured. self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) - client.close() finally: ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore @@ -1586,31 +1582,31 @@ def compression_settings(client): return pool_options._compression_settings uri = "mongodb://localhost:27017/?compressors=zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, 4) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar,zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) @@ -1618,56 +1614,55 @@ def compression_settings(client): # According to the connection string spec, unsupported values # just raise a warning and are ignored. uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) if not _have_snappy(): uri = "mongodb://localhost:27017/?compressors=snappy" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: uri = "mongodb://localhost:27017/?compressors=snappy" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy"]) uri = "mongodb://localhost:27017/?compressors=snappy,zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy", "zlib"]) if not _have_zstd(): uri = "mongodb://localhost:27017/?compressors=zstd" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: uri = "mongodb://localhost:27017/?compressors=zstd" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zstd"]) uri = "mongodb://localhost:27017/?compressors=zstd,zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zstd", "zlib"]) options = client_context.default_client_options if "compressors" in options and "zlib" in options["compressors"]: for level in range(-1, 10): - client = single_client(zlibcompressionlevel=level) + client = self.single_client(zlibcompressionlevel=level) # No error client.pymongo_test.test.find_one() def test_reset_during_update_pool(self): - client = rs_or_single_client(minPoolSize=10) - self.addCleanup(client.close) + client = self.rs_or_single_client(minPoolSize=10) client.admin.command("ping") pool = get_pool(client) generation = pool.gen.get_overall() @@ -1713,11 +1708,9 @@ def run(self): def test_background_connections_do_not_hold_locks(self): min_pool_size = 10 - client = rs_or_single_client( + client = self.rs_or_single_client( serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False ) - self.addCleanup(client.close) - # Create a single connection in the pool. client.admin.command("ping") @@ -1747,21 +1740,19 @@ def stall_connect(*args, **kwargs): @client_context.require_replica_set def test_direct_connection(self): # direct_connection=True should result in Single topology. - client = rs_or_single_client(directConnection=True) + client = self.rs_or_single_client(directConnection=True) client.admin.command("ping") self.assertEqual(len(client.nodes), 1) self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) - client.close() # direct_connection=False should result in RS topology. - client = rs_or_single_client(directConnection=False) + client = self.rs_or_single_client(directConnection=False) client.admin.command("ping") self.assertGreaterEqual(len(client.nodes), 1) self.assertIn( client._topology_settings.get_topology_type(), [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], ) - client.close() # directConnection=True, should error with multiple hosts as a list. with self.assertRaises(ConfigurationError): @@ -1781,11 +1772,10 @@ def server_description_count(): gc.collect() with client_knobs(min_heartbeat_interval=0.003): - client = MongoClient( + client = self.simple_client( "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 ) initial_count = server_description_count() - self.addCleanup(client.close) with self.assertRaises(ServerSelectionTimeoutError): client.test.test.find_one() gc.collect() @@ -1798,8 +1788,7 @@ def server_description_count(): @client_context.require_failCommand_fail_point def test_network_error_message(self): - client = single_client(retryReads=False) - self.addCleanup(client.close) + client = self.single_client(retryReads=False) client.admin.command("ping") # connect with self.fail_point( {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} @@ -1811,7 +1800,7 @@ def test_network_error_message(self): @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") def test_process_periodic_tasks(self): - client = rs_or_single_client() + client = self.rs_or_single_client() coll = client.db.collection coll.insert_many([{} for _ in range(5)]) cursor = coll.find(batch_size=2) @@ -1850,11 +1839,11 @@ def test_service_name_from_kwargs(self): self.assertEqual(client._topology_settings.srv_service_name, "customname") def test_srv_max_hosts_kwarg(self): - client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") self.assertGreater(len(client.topology_description.server_descriptions()), 1) - client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) self.assertEqual(len(client.topology_description.server_descriptions()), 1) - client = MongoClient( + client = self.simple_client( "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 ) self.assertEqual(len(client.topology_description.server_descriptions()), 2) @@ -1902,10 +1891,10 @@ def _test_handshake(self, env_vars, expected_env): if "AWS_REGION" not in env_vars: os.environ["AWS_REGION"] = "" - with rs_or_single_client(serverSelectionTimeoutMS=10000) as client: - client.admin.command("ping") - options = client.options - self.assertEqual(options.pool_options.metadata, metadata) + client = self.rs_or_single_client(serverSelectionTimeoutMS=10000) + client.admin.command("ping") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) def test_handshake_01_aws(self): self._test_handshake( @@ -2001,7 +1990,7 @@ def setUp(self): def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = connected(rs_or_single_client(maxPoolSize=1)) + client = connected(self.rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) @@ -2024,7 +2013,7 @@ def test_exhaust_query_server_error(self): def test_exhaust_getmore_server_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. - client = rs_or_single_client(maxPoolSize=1) + client = self.rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test collection.drop() @@ -2063,7 +2052,7 @@ def receive_message(request_id): def test_exhaust_query_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = connected(rs_or_single_client(maxPoolSize=1, retryReads=False)) + client = connected(self.rs_or_single_client(maxPoolSize=1, retryReads=False)) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. @@ -2084,7 +2073,7 @@ def test_exhaust_query_network_error(self): def test_exhaust_getmore_network_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. - client = rs_or_single_client(maxPoolSize=1) + client = self.rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test collection.drop() collection.insert_many([{} for _ in range(200)]) # More than one batch. @@ -2133,7 +2122,7 @@ def test_gevent_timeout(self): raise SkipTest("Must be running monkey patched by gevent") from gevent import Timeout, spawn - client = rs_or_single_client(maxPoolSize=1) + client = self.rs_or_single_client(maxPoolSize=1) coll = client.pymongo_test.test coll.insert_one({}) @@ -2165,7 +2154,7 @@ def test_gevent_timeout_when_creating_connection(self): raise SkipTest("Must be running monkey patched by gevent") from gevent import Timeout, spawn - client = rs_or_single_client() + client = self.rs_or_single_client() self.addCleanup(client.close) coll = client.pymongo_test.test pool = get_pool(client) @@ -2202,7 +2191,7 @@ class TestClientLazyConnect(IntegrationTest): """Test concurrent operations on a lazily-connecting MongoClient.""" def _get_client(self): - return rs_or_single_client(connect=False) + return self.rs_or_single_client(connect=False) @client_context.require_sync def test_insert_one(self): @@ -2336,6 +2325,7 @@ def _test_network_error(self, operation_callback): retryReads=False, serverSelectionTimeoutMS=1000, ) + self.addCleanup(c.close) # Set host-specific information so we can test whether it is reset. diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index ee19a04176..ebbdc74c1c 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -27,7 +27,6 @@ ) from test.utils import ( OvertCommandListener, - rs_or_single_client, ) from unittest.mock import patch @@ -38,7 +37,6 @@ InvalidOperation, NetworkTimeout, ) -from pymongo.monitoring import * from pymongo.operations import * from pymongo.synchronous.client_bulk import _ClientBulk from pymongo.write_concern import WriteConcern @@ -97,8 +95,7 @@ def setUp(self): @client_context.require_no_serverless def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) models = [] for _ in range(self.max_write_batch_size + 1): @@ -123,8 +120,7 @@ def test_batch_splits_if_num_operations_too_large(self): @client_context.require_no_serverless def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) models = [] num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) @@ -157,11 +153,10 @@ def test_batch_splits_if_ops_payload_too_large(self): @client_context.require_failCommand_fail_point def test_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() - client = rs_or_single_client( + client = self.rs_or_single_client( event_listeners=[listener], retryWrites=False, ) - self.addCleanup(client.close) fail_command = { "configureFailPoint": "failCommand", @@ -200,8 +195,7 @@ def test_collects_write_concern_errors_across_batches(self): @client_context.require_no_serverless def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addCleanup(collection.drop) @@ -231,8 +225,7 @@ def test_collects_write_errors_across_batches_unordered(self): @client_context.require_no_serverless def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addCleanup(collection.drop) @@ -262,8 +255,7 @@ def test_collects_write_errors_across_batches_ordered(self): @client_context.require_no_serverless def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addCleanup(collection.drop) @@ -304,8 +296,7 @@ def test_handles_cursor_requiring_getMore(self): @client_context.require_no_standalone def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addCleanup(collection.drop) @@ -348,8 +339,7 @@ def test_handles_cursor_requiring_getMore_within_transaction(self): @client_context.require_failCommand_fail_point def test_handles_getMore_error(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.db["coll"] self.addCleanup(collection.drop) @@ -403,8 +393,7 @@ def test_handles_getMore_error(self): @client_context.require_no_serverless def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) b_repeated = "b" * self.max_bson_object_size @@ -460,8 +449,7 @@ def _setup_namespace_test_models(self): @client_context.require_no_serverless def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) num_models, models = self._setup_namespace_test_models() models.append( @@ -492,8 +480,7 @@ def test_no_batch_splits_if_new_namespace_is_not_too_large(self): @client_context.require_no_serverless def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) num_models, models = self._setup_namespace_test_models() c_repeated = "c" * 200 @@ -530,8 +517,7 @@ def test_batch_splits_if_new_namespace_is_too_large(self): @client_context.require_version_min(8, 0, 0, -24) @client_context.require_no_serverless def test_returns_error_if_no_writes_can_be_added_to_ops(self): - client = rs_or_single_client() - self.addCleanup(client.close) + client = self.rs_or_single_client() # Document too large. b_repeated = "b" * self.max_message_size_bytes @@ -554,8 +540,7 @@ def test_returns_error_if_auto_encryption_configured(self): key_vault_namespace="db.coll", kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, ) - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -580,7 +565,7 @@ def setUp(self): def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 - internal_client = rs_or_single_client(timeoutMS=None) + internal_client = self.rs_or_single_client(timeoutMS=None) self.addCleanup(internal_client.close) collection = internal_client.db["coll"] @@ -605,14 +590,13 @@ def test_timeout_in_multi_batch_bulk_write(self): ) listener = OvertCommandListener() - client = rs_or_single_client( + client = self.rs_or_single_client( event_listeners=[listener], readConcernLevel="majority", readPreference="primary", timeoutMS=2000, w="majority", ) - self.addCleanup(client.close) client.admin.command("ping") # Init the client first. with self.assertRaises(ClientBulkWriteException) as context: client.bulk_write(models=models) diff --git a/test/test_collation.py b/test/test_collation.py index bedf0a2eaa..19df25c1c0 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -18,7 +18,7 @@ import functools import warnings from test import IntegrationTest, client_context, unittest -from test.utils import EventListener, rs_or_single_client +from test.utils import EventListener from typing import Any from pymongo.collation import ( @@ -99,7 +99,7 @@ class TestCollation(IntegrationTest): def setUpClass(cls): super().setUpClass() cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test cls.collation = Collation("en_US") cls.warn_context = warnings.catch_warnings() diff --git a/test/test_collection.py b/test/test_collection.py index b68aa74f73..dab59cf1b2 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -29,6 +29,7 @@ from test import ( # TODO: fix sync imports in PYTHON-4528 IntegrationTest, + UnitTest, client_context, unittest, ) @@ -37,8 +38,6 @@ EventListener, get_pool, is_mongos, - rs_or_single_client, - single_client, wait_until, ) @@ -81,14 +80,20 @@ _IS_SYNC = True -class TestCollectionNoConnect(unittest.TestCase): +class TestCollectionNoConnect(UnitTest): """Test Collection features on a client that does not connect.""" db: Database + client: MongoClient @classmethod - def setUpClass(cls): - cls.db = MongoClient(connect=False).pymongo_test + def _setup_class(cls): + cls.client = MongoClient(connect=False) + cls.db = cls.client.pymongo_test + + @classmethod + def _tearDown_class(cls): + cls.client.close() def test_collection(self): self.assertRaises(TypeError, Collection, self.db, 5) @@ -1800,8 +1805,7 @@ def test_exhaust(self): # Insert enough documents to require more than one batch self.db.test.insert_many([{"i": i} for i in range(150)]) - client = rs_or_single_client(maxPoolSize=1) - self.addCleanup(client.close) + client = self.rs_or_single_client(maxPoolSize=1) pool = get_pool(client) # Make sure the socket is returned after exhaustion. @@ -2077,7 +2081,7 @@ def test_find_one_and(self): def test_find_one_and_write_concern(self): listener = EventListener() - db = (single_client(event_listeners=[listener]))[self.db.name] + db = (self.single_client(event_listeners=[listener]))[self.db.name] # non-default WriteConcern. c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. diff --git a/test/test_comment.py b/test/test_comment.py index 931446ef3a..c0f037ea44 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import EventListener, rs_or_single_client +from test.utils import EventListener from bson.dbref import DBRef from pymongo.operations import IndexModel @@ -109,7 +109,7 @@ def _test_ops( @client_context.require_replica_set def test_database_helpers(self): listener = EventListener() - db = rs_or_single_client(event_listeners=[listener]).db + db = self.rs_or_single_client(event_listeners=[listener]).db helpers = [ (db.watch, []), (db.command, ["hello"]), @@ -126,7 +126,7 @@ def test_database_helpers(self): @client_context.require_replica_set def test_client_helpers(self): listener = EventListener() - cli = rs_or_single_client(event_listeners=[listener]) + cli = self.rs_or_single_client(event_listeners=[listener]) helpers = [ (cli.watch, []), (cli.list_databases, []), @@ -141,7 +141,7 @@ def test_client_helpers(self): @client_context.require_version_min(4, 7, -1) def test_collection_helpers(self): listener = EventListener() - db = rs_or_single_client(event_listeners=[listener])[self.db.name] + db = self.rs_or_single_client(event_listeners=[listener])[self.db.name] coll = db.get_collection("test") helpers = [ diff --git a/test/test_common.py b/test/test_common.py index 358cd29b81..3228dc97fb 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -21,7 +21,6 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, connected, unittest -from test.utils import rs_or_single_client, single_client from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation from bson.codec_options import CodecOptions @@ -111,10 +110,10 @@ def test_uuid_representation(self): ) def test_write_concern(self): - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertEqual(WriteConcern(), c.write_concern) - c = rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) + c = self.rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) wc = WriteConcern(w=2, wtimeout=1000) self.assertEqual(wc, c.write_concern) @@ -134,7 +133,7 @@ def test_write_concern(self): def test_mongo_client(self): pair = client_context.pair - m = rs_or_single_client(w=0) + m = self.rs_or_single_client(w=0) coll = m.pymongo_test.write_concern_test coll.drop() doc = {"_id": ObjectId()} @@ -143,17 +142,19 @@ def test_mongo_client(self): coll = coll.with_options(write_concern=WriteConcern(w=1)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client() + m = self.rs_or_single_client() coll = m.pymongo_test.write_concern_test new_coll = coll.with_options(write_concern=WriteConcern(w=0)) self.assertTrue(new_coll.insert_one(doc)) self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client(f"mongodb://{pair}/", replicaSet=client_context.replica_set_name) + m = self.rs_or_single_client( + f"mongodb://{pair}/", replicaSet=client_context.replica_set_name + ) coll = m.pymongo_test.write_concern_test self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client( + m = self.rs_or_single_client( f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name ) @@ -161,8 +162,8 @@ def test_mongo_client(self): coll.insert_one(doc) # Equality tests - direct = connected(single_client(w=0)) - direct2 = connected(single_client(f"mongodb://{pair}/?w=0", **self.credentials)) + direct = connected(self.single_client(w=0)) + direct2 = connected(self.single_client(f"mongodb://{pair}/?w=0", **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 9ee3202e13..142af0f9a7 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -30,9 +30,6 @@ client_context, get_pool, get_pools, - rs_or_single_client, - single_client, - single_client_noauth, wait_until, ) from test.utils_spec_runner import SpecRunnerThread @@ -250,7 +247,7 @@ def run_scenario(self, scenario_def, test): else: kill_cursor_frequency = interval / 1000.0 with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): - client = single_client(**opts) + client = self.single_client(**opts) # Update the SD to a known type because the DummyMonitor will not. # Note we cannot simply call topology.on_change because that would # internally call pool.ready() which introduces unexpected @@ -323,13 +320,13 @@ def cleanup(): # Prose tests. Numbers correspond to the prose test number in the spec. # def test_1_client_connection_pool_options(self): - client = rs_or_single_client(**self.POOL_OPTIONS) + client = self.rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) pool_opts = get_pool(client).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) def test_2_all_client_pools_have_same_options(self): - client = rs_or_single_client(**self.POOL_OPTIONS) + client = self.rs_or_single_client(**self.POOL_OPTIONS) self.addCleanup(client.close) client.admin.command("ping") # Discover at least one secondary. @@ -345,14 +342,14 @@ def test_2_all_client_pools_have_same_options(self): def test_3_uri_connection_pool_options(self): opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) uri = f"mongodb://{client_context.pair}/?{opts}" - client = rs_or_single_client(uri) + client = self.rs_or_single_client(uri) self.addCleanup(client.close) pool_opts = get_pool(client).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) def test_4_subscribe_to_events(self): listener = CMAPListener() - client = single_client(event_listeners=[listener]) + client = self.single_client(event_listeners=[listener]) self.addCleanup(client.close) self.assertEqual(listener.event_count(PoolCreatedEvent), 1) @@ -376,7 +373,7 @@ def test_4_subscribe_to_events(self): def test_5_check_out_fails_connection_error(self): listener = CMAPListener() - client = single_client(event_listeners=[listener]) + client = self.single_client(event_listeners=[listener]) self.addCleanup(client.close) pool = get_pool(client) @@ -403,7 +400,7 @@ def mock_connect(*args, **kwargs): @client_context.require_no_fips def test_5_check_out_fails_auth_error(self): listener = CMAPListener() - client = single_client_noauth( + client = self.single_client_noauth( username="notauser", password="fail", event_listeners=[listener] ) self.addCleanup(client.close) @@ -449,7 +446,7 @@ def test_events_repr(self): def test_close_leaves_pool_unpaused(self): listener = CMAPListener() - client = single_client(event_listeners=[listener]) + client = self.single_client(event_listeners=[listener]) client.admin.command("ping") pool = get_pool(client) client.close() diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 674612693c..fba7675743 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -24,7 +24,6 @@ CMAPListener, ensure_all_connected, repl_set_step_down, - rs_or_single_client, ) from bson import SON @@ -43,7 +42,7 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): def setUpClass(cls): super().setUpClass() cls.listener = CMAPListener() - cls.client = rs_or_single_client( + cls.client = cls.unmanaged_rs_or_single_client( event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 ) diff --git a/test/test_cursor.py b/test/test_cursor.py index 8e6fade1ec..9bc22aca3c 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -35,7 +35,6 @@ EventListener, OvertCommandListener, ignore_deprecations, - rs_or_single_client, wait_until, ) @@ -230,7 +229,7 @@ def test_max_await_time_ms(self): self.assertEqual(90, cursor._max_await_time_ms) listener = AllowListEventListener("find", "getMore") - coll = (rs_or_single_client(event_listeners=[listener]))[self.db.name].pymongo_test + coll = (self.rs_or_single_client(event_listeners=[listener]))[self.db.name].pymongo_test # Tailable_defaults. coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() @@ -345,8 +344,7 @@ def test_explain(self): def test_explain_with_read_concern(self): # Do not add readConcern level to explain. listener = AllowListEventListener("explain") - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) started = listener.started_events @@ -1252,8 +1250,7 @@ def test_close_kills_cursor_synchronously(self): self.client._process_periodic_tasks() listener = AllowListEventListener("killCursors") - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) coll = client[self.db.name].test_close_kills_cursors # Add some test data. @@ -1291,8 +1288,7 @@ def assertCursorKilled(): @client_context.require_failCommand_appName def test_timeout_kills_cursor_synchronously(self): listener = AllowListEventListener("killCursors") - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) coll = client[self.db.name].test_timeout_kills_cursor # Add some test data. @@ -1349,8 +1345,7 @@ def test_delete_not_initialized(self): def test_getMore_does_not_send_readPreference(self): listener = AllowListEventListener("find", "getMore") - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) # We never send primary read preference so override the default. coll = client[self.db.name].get_collection( "test", read_preference=ReadPreference.PRIMARY_PREFERRED @@ -1454,7 +1449,7 @@ def test_find_raw_transaction(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): batches = ( @@ -1484,7 +1479,7 @@ def test_find_raw_retryable_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} ): @@ -1505,7 +1500,7 @@ def test_find_raw_snapshot_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: db.test.distinct("x", {}, session=session) @@ -1566,7 +1561,7 @@ def test_read_concern(self): def test_monitoring(self): listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() c.insert_many([{"_id": i} for i in range(10)]) @@ -1632,7 +1627,7 @@ def test_aggregate_raw_transaction(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): batches = ( @@ -1663,7 +1658,7 @@ def test_aggregate_raw_retryable_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} ): @@ -1687,7 +1682,7 @@ def test_aggregate_raw_snapshot_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: db.test.distinct("x", {}, session=session) @@ -1733,7 +1728,7 @@ def test_collation(self): def test_monitoring(self): listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() c.insert_many([{"_id": i} for i in range(10)]) @@ -1777,8 +1772,7 @@ def test_monitoring(self): @client_context.require_no_mongos def test_exhaust_cursor_db_set(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.delete_many({}) c.insert_many([{"_id": i} for i in range(3)]) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index c30c62b1b1..abaa820cb7 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -27,7 +27,6 @@ from test import client_context, unittest from test.test_client import IntegrationTest -from test.utils import rs_client from bson import ( _BUILT_IN_TYPES, @@ -971,7 +970,7 @@ def create_targets(self, *args, **kwargs): if codec_options: kwargs["type_registry"] = codec_options.type_registry kwargs["document_class"] = codec_options.document_class - self.watched_target = rs_client(*args, **kwargs) + self.watched_target = self.rs_client(*args, **kwargs) self.addCleanup(self.watched_target.close) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 8ba83ab190..a374db550e 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -27,8 +27,6 @@ from test.unified_format import generate_test_classes from test.utils import ( OvertCommandListener, - rs_client_noauth, - rs_or_single_client, ) pytestmark = pytest.mark.data_lake @@ -65,7 +63,7 @@ def setUpClass(cls): # Test killCursors def test_1(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) next(cursor) @@ -90,13 +88,13 @@ def test_1(self): # Test no auth def test_2(self): - client = rs_client_noauth() + client = self.rs_client_noauth() client.admin.command("ping") # Test with auth def test_3(self): for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: - client = rs_or_single_client(authMechanism=mechanism) + client = self.rs_or_single_client(authMechanism=mechanism) client[self.TEST_DB][self.TEST_COLLECTION].find_one() diff --git a/test/test_database.py b/test/test_database.py index 12d4eb666a..fe07f343c5 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -28,7 +28,6 @@ from test.utils import ( IMPOSSIBLE_WRITE_CONCERN, OvertCommandListener, - rs_or_single_client, wait_until, ) @@ -207,7 +206,7 @@ def test_list_collection_names(self): def test_list_collection_names_filter(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.capped.drop() db.create_collection("capped", capped=True, size=4096) @@ -234,8 +233,7 @@ def test_list_collection_names_filter(self): def test_check_exists(self): listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.drop_collection("unique") db.create_collection("unique", check_exists=True) @@ -323,7 +321,7 @@ def test_list_collections(self): self.client.drop_database("pymongo_test") def test_list_collection_names_single_socket(self): - client = rs_or_single_client(maxPoolSize=1) + client = self.rs_or_single_client(maxPoolSize=1) client.drop_database("test_collection_names_single_socket") db = client.test_collection_names_single_socket for i in range(200): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index ef32afbcd4..3554619f12 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import IntegrationTest, PyMongoTestCase, unittest from test.pymongo_mocks import DummyMonitor from test.unified_format import generate_test_classes from test.utils import ( @@ -32,9 +32,7 @@ assertion_context, client_context, get_pool, - rs_or_single_client, server_name_to_type, - single_client, wait_until, ) from unittest.mock import patch @@ -272,7 +270,7 @@ class TestIgnoreStaleErrors(IntegrationTest): def test_ignore_stale_connection_errors(self): N_THREADS = 5 barrier = threading.Barrier(N_THREADS, timeout=30) - client = rs_or_single_client(minPoolSize=N_THREADS) + client = self.rs_or_single_client(minPoolSize=N_THREADS) self.addCleanup(client.close) # Wait for initial discovery. @@ -319,7 +317,7 @@ class TestPoolManagement(IntegrationTest): def test_pool_unpause(self): # This test implements the prose test "Connection Pool Management" listener = CMAPHeartbeatListener() - client = single_client( + client = self.single_client( appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] ) self.addCleanup(client.close) @@ -353,7 +351,7 @@ def setUp(self): super().setUp() def test_rtt_connection_is_enabled_stream(self): - client = rs_or_single_client(serverMonitoringMode="stream") + client = self.rs_or_single_client(serverMonitoringMode="stream") self.addCleanup(client.close) client.admin.command("ping") @@ -373,7 +371,7 @@ def predicate(): wait_until(predicate, "find all RTT monitors") def test_rtt_connection_is_disabled_poll(self): - client = rs_or_single_client(serverMonitoringMode="poll") + client = self.rs_or_single_client(serverMonitoringMode="poll") self.addCleanup(client.close) self.assert_rtt_connection_is_disabled(client) @@ -387,7 +385,7 @@ def test_rtt_connection_is_disabled_auto(self): ] for env in envs: with patch.dict("os.environ", env): - client = rs_or_single_client(serverMonitoringMode="auto") + client = self.rs_or_single_client(serverMonitoringMode="auto") self.addCleanup(client.close) self.assert_rtt_connection_is_disabled(client) @@ -415,7 +413,7 @@ def handle_request_and_shutdown(self): self.server_close() -class TestHeartbeatStartOrdering(unittest.TestCase): +class TestHeartbeatStartOrdering(PyMongoTestCase): def test_heartbeat_start_ordering(self): events = [] listener = HeartbeatEventsListListener(events) @@ -423,7 +421,7 @@ def test_heartbeat_start_ordering(self): server.events = events server_thread = threading.Thread(target=server.handle_request_and_shutdown) server_thread.start() - _c = MongoClient( + _c = self.simple_client( "mongodb://localhost:9999", serverSelectionTimeoutMS=500, event_listeners=(listener,) ) server_thread.join() diff --git a/test/test_dns.py b/test/test_dns.py index b4c5e3684c..f2185efb1b 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -22,16 +22,15 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import IntegrationTest, PyMongoTestCase, client_context, unittest from test.utils import wait_until from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError -from pymongo.synchronous.mongo_client import MongoClient from pymongo.uri_parser import parse_uri, split_hosts -class TestDNSRepl(unittest.TestCase): +class TestDNSRepl(PyMongoTestCase): TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "replica-set" ) @@ -42,7 +41,7 @@ def setUp(self): pass -class TestDNSLoadBalanced(unittest.TestCase): +class TestDNSLoadBalanced(PyMongoTestCase): TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "load-balanced" ) @@ -53,7 +52,7 @@ def setUp(self): pass -class TestDNSSharded(unittest.TestCase): +class TestDNSSharded(PyMongoTestCase): TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "sharded") load_balanced = False @@ -120,7 +119,7 @@ def run_test(self): # tests. copts["tlsAllowInvalidHostnames"] = True - client = MongoClient(uri, **copts) + client = PyMongoTestCase.unmanaged_simple_client(uri, **copts) if num_seeds is not None: self.assertEqual(len(client._topology_settings.seeds), num_seeds) if hosts is not None: @@ -133,6 +132,7 @@ def run_test(self): client.admin.command("ping") # XXX: we should block until SRV poller runs at least once # and re-run these assertions. + client.close() else: try: parse_uri(uri) @@ -157,37 +157,37 @@ def create_tests(cls): create_tests(TestDNSSharded) -class TestParsingErrors(unittest.TestCase): +class TestParsingErrors(PyMongoTestCase): def test_invalid_host(self): self.assertRaisesRegex( ConfigurationError, "Invalid URI host: mongodb is not", - MongoClient, + self.simple_client, "mongodb+srv://mongodb", ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: mongodb.com is not", - MongoClient, + self.simple_client, "mongodb+srv://mongodb.com", ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: an IP address is not", - MongoClient, + self.simple_client, "mongodb+srv://127.0.0.1", ) self.assertRaisesRegex( ConfigurationError, "Invalid URI host: an IP address is not", - MongoClient, + self.simple_client, "mongodb+srv://[::1]", ) class TestCaseInsensitive(IntegrationTest): def test_connect_case_insensitive(self): - client = MongoClient("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") self.addCleanup(client.close) self.assertGreater(len(client.topology_description.server_descriptions()), 1) diff --git a/test/test_encryption.py b/test/test_encryption.py index 5e02e4d628..96d40c4a34 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -31,7 +31,7 @@ from test import IntegrationTest, PyMongoTestCase, client_context from test.test_bulk import BulkTestBase from threading import Thread -from typing import Any, Dict, Mapping +from typing import Any, Dict, Mapping, Optional import pytest @@ -53,6 +53,7 @@ KMIP_CREDS, LOCAL_MASTER_KEY, ) +from test.test_bulk import BulkTestBase from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, @@ -61,7 +62,6 @@ TopologyEventListener, camel_to_snake_args, is_greenthread_patched, - rs_or_single_client, wait_until, ) from test.utils_spec_runner import SpecRunner @@ -109,13 +109,12 @@ class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") def test_crypt_shared(self): # Test that we can pick up crypt_shared lib automatically - client = MongoClient( + self.simple_client( auto_encryption_opts=AutoEncryptionOpts( KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True ), connect=False, ) - self.addCleanup(client.close) @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): @@ -196,19 +195,16 @@ def test_init_kms_tls_options(self): class TestClientOptions(PyMongoTestCase): def test_default(self): - client = MongoClient(connect=False) - self.addCleanup(client.close) + client = self.simple_client(connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) - client = MongoClient(auto_encryption_opts=None, connect=False) - self.addCleanup(client.close) + client = self.simple_client(auto_encryption_opts=None, connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_kwargs(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = MongoClient(auto_encryption_opts=opts, connect=False) - self.addCleanup(client.close) + client = self.simple_client(auto_encryption_opts=opts, connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) @@ -229,6 +225,34 @@ def assertBinaryUUID(self, val): self.assertIsInstance(val, Binary) self.assertEqual(val.subtype, UUID_SUBTYPE) + def create_client_encryption( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + self.addCleanup(client_encryption.close) + return client_encryption + + @classmethod + def unmanaged_create_client_encryption( + cls, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + return client_encryption + # Location of JSON test files. if _IS_SYNC: @@ -260,8 +284,7 @@ def bson_data(*paths): class TestClientSimple(EncryptionIntegrationTest): def _test_auto_encrypt(self, opts): - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) # Create the encrypted field's data key. key_vault = create_key_vault( @@ -342,8 +365,7 @@ def test_auto_encrypt_local_schema_map(self): def test_use_after_close(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) client.admin.command("ping") client.close() @@ -360,8 +382,7 @@ def test_use_after_close(self): ) def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) def target(): with warnings.catch_warnings(): @@ -375,8 +396,7 @@ def target(): class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): def test_upsert_uuid_standard_encrypt(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) encrypted_coll = client.pymongo_test.test @@ -416,8 +436,7 @@ def _setup_class(cls): @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) msg = "Auto-encryption requires a minimum MongoDB version of 4.2" with self.assertRaisesRegex(ConfigurationError, msg): client.test.test.insert_one({}) @@ -430,8 +449,7 @@ def test_raise_max_wire_version_error(self): def test_raise_unsupported_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) msg = "find_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.find_raw_batches({}) @@ -450,10 +468,9 @@ def test_raise_unsupported_error(self): class TestExplicitSimple(EncryptionIntegrationTest): def test_encrypt_decrypt(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) - self.addCleanup(client_encryption.close) # Use standard UUID representation. key_vault = client_context.client.keyvault.get_collection("datakeys", codec_options=OPTS) self.addCleanup(key_vault.drop) @@ -493,10 +510,9 @@ def test_encrypt_decrypt(self): self.assertEqual(decrypted_ssn, doc["ssn"]) def test_validation(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) - self.addCleanup(client_encryption.close) msg = "value to decrypt must be a bson.binary.Binary with subtype 6" with self.assertRaisesRegex(TypeError, msg): @@ -510,10 +526,9 @@ def test_validation(self): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) def test_bson_errors(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) - self.addCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. unencodable_value = object() @@ -526,7 +541,7 @@ def test_bson_errors(self): def test_codec_options(self): with self.assertRaisesRegex(TypeError, "codec_options must be"): - ClientEncryption( + self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, @@ -534,10 +549,9 @@ def test_codec_options(self): ) opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - client_encryption_legacy = ClientEncryption( + client_encryption_legacy = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) - self.addCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. key_id = client_encryption_legacy.create_data_key("local") @@ -552,10 +566,9 @@ def test_codec_options(self): # Encrypt the same UUID with STANDARD codec options. opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) - self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id ) @@ -571,7 +584,7 @@ def test_codec_options(self): self.assertNotEqual(client_encryption.decrypt(encrypted_legacy), value) def test_close(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) client_encryption.close() @@ -587,7 +600,7 @@ def test_close(self): client_encryption.decrypt(Binary(b"", 6)) def test_with_statement(self): - with ClientEncryption( + with self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) as client_encryption: pass @@ -807,7 +820,7 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): def _setup_class(cls): super()._setup_class() cls.listener = OvertCommandListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) cls.client.db.coll.drop() cls.vault = create_key_vault(cls.client.keyvault.datakeys) @@ -829,10 +842,10 @@ def _setup_class(cls): opts = AutoEncryptionOpts( cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS ) - cls.client_encrypted = rs_or_single_client( + cls.client_encrypted = cls.unmanaged_rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - cls.client_encryption = ClientEncryption( + cls.client_encryption = cls.unmanaged_create_client_encryption( cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS ) @@ -919,8 +932,7 @@ def _test_external_key_vault(self, with_external_key_vault): # Configure the encrypted field via the local schema_map option. schemas = {"db.coll": json_data("external", "external-schema.json")} if with_external_key_vault: - key_vault_client = rs_or_single_client(username="fake-user", password="fake-pwd") - self.addCleanup(key_vault_client.close) + key_vault_client = self.rs_or_single_client(username="fake-user", password="fake-pwd") else: key_vault_client = client_context.client opts = AutoEncryptionOpts( @@ -930,15 +942,13 @@ def _test_external_key_vault(self, with_external_key_vault): key_vault_client=key_vault_client, ) - client_encrypted = rs_or_single_client( + client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - self.addCleanup(client_encrypted.close) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS ) - self.addCleanup(client_encryption.close) if with_external_key_vault: # Authentication error. @@ -984,10 +994,9 @@ def test_views_are_prohibited(self): self.addCleanup(self.client.db.view.drop) opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") - client_encrypted = rs_or_single_client( + client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - self.addCleanup(client_encrypted.close) with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): client_encrypted.db.view.insert_one({}) @@ -1044,17 +1053,15 @@ def _test_corpus(self, opts): ) self.addCleanup(vault.drop) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( self.kms_providers(), "keyvault.datakeys", client_context.client, OPTS, kms_tls_options=KMS_TLS_OPTS, ) - self.addCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) corpus_copied: SON = SON() @@ -1197,7 +1204,7 @@ def _setup_class(cls): opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") cls.listener = OvertCommandListener() - cls.client_encrypted = rs_or_single_client( + cls.client_encrypted = cls.unmanaged_rs_or_single_client( auto_encryption_opts=opts, event_listeners=[cls.listener] ) cls.coll_encrypted = cls.client_encrypted.db.coll @@ -1285,7 +1292,7 @@ def setUp(self): "gcp": GCP_CREDS, "kmip": KMIP_CREDS, } - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers=kms_providers, key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, @@ -1297,7 +1304,7 @@ def setUp(self): kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" - self.client_encryption_invalid = ClientEncryption( + self.client_encryption_invalid = self.create_client_encryption( kms_providers=kms_providers_invalid, key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, @@ -1476,7 +1483,7 @@ def test_12_kmip_master_key_invalid_endpoint(self): self.client_encryption.create_data_key("kmip", key) -class AzureGCPEncryptionTestMixin: +class AzureGCPEncryptionTestMixin(EncryptionIntegrationTest): DEK = None KMS_PROVIDER_MAP = None KEYVAULT_DB = "keyvault" @@ -1488,7 +1495,7 @@ def setUp(self): create_key_vault(keyvault, self.DEK) def _test_explicit(self, expectation): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, @@ -1517,7 +1524,7 @@ def _test_automatic(self, expectation_extjson, payload): ) insert_listener = AllowListEventListener("insert") - client = rs_or_single_client( + client = self.rs_or_single_client( auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] ) self.addCleanup(client.close) @@ -1596,19 +1603,17 @@ def test_automatic(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests class TestDeadlockProse(EncryptionIntegrationTest): def setUp(self): - self.client_test = rs_or_single_client( + self.client_test = self.rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" ) - self.addCleanup(self.client_test.close) self.client_keyvault_listener = OvertCommandListener() - self.client_keyvault = rs_or_single_client( + self.client_keyvault = self.rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", event_listeners=[self.client_keyvault_listener], ) - self.addCleanup(self.client_keyvault.close) self.client_test.keyvault.datakeys.drop() self.client_test.db.coll.drop() @@ -1619,7 +1624,7 @@ def setUp(self): codec_options=OPTS, ) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, key_vault_namespace="keyvault.datakeys", key_vault_client=self.client_test, @@ -1635,7 +1640,7 @@ def setUp(self): self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") def _run_test(self, max_pool_size, auto_encryption_opts): - client_encrypted = rs_or_single_client( + client_encrypted = self.rs_or_single_client( readConcernLevel="majority", w="majority", maxPoolSize=max_pool_size, @@ -1653,8 +1658,6 @@ def _run_test(self, max_pool_size, auto_encryption_opts): result = client_encrypted.db.coll.find_one({"_id": 0}) self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) - self.addCleanup(client_encrypted.close) - def test_case_1(self): self._run_test( max_pool_size=1, @@ -1830,7 +1833,7 @@ def setUp(self): create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() ) keyID = self.client_encryption.create_data_key("local") @@ -1845,10 +1848,9 @@ def setUp(self): key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map ) self.listener = AllowListEventListener("aggregate") - self.encrypted_client = rs_or_single_client( + self.encrypted_client = self.rs_or_single_client( auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] ) - self.addCleanup(self.encrypted_client.close) def test_01_command_error(self): with self.fail_point( @@ -1925,8 +1927,7 @@ def reset_timeout(): "--port=27027", ], ) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) with self.assertRaisesRegex(EncryptionError, "Timeout"): client_encrypted.db.coll.insert_one({"encrypted": "test"}) @@ -1940,11 +1941,12 @@ def test_bypassAutoEncryption(self): "--port=27027", ], ) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) client_encrypted.db.coll.insert_one({"unencrypted": "test"}) # Validate that mongocryptd was not spawned: - mongocryptd_client = MongoClient("mongodb://localhost:27027/?serverSelectionTimeoutMS=500") + mongocryptd_client = self.simple_client( + "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" + ) with self.assertRaises(ServerSelectionTimeoutError): mongocryptd_client.admin.command("ping") @@ -1966,15 +1968,13 @@ def test_via_loading_shared_library(self): ], crypt_shared_lib_required=True, ) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) client_encrypted.db.coll.drop() client_encrypted.db.coll.insert_one({"encrypted": "test"}) self.assertEncrypted((client_context.client.db.coll.find_one({}))["encrypted"]) - no_mongocryptd_client = MongoClient( + no_mongocryptd_client = self.simple_client( host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" ) - self.addCleanup(no_mongocryptd_client.close) with self.assertRaises(ServerSelectionTimeoutError): no_mongocryptd_client.db.command("ping") @@ -2008,8 +2008,7 @@ def listener(): mongocryptd_uri="mongodb://localhost:47021", crypt_shared_lib_required=False, ) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) client_encrypted.db.coll.drop() client_encrypted.db.coll.insert_one({"encrypted": "test"}) server.shutdown() @@ -2023,10 +2022,9 @@ class TestKmsTLSProse(EncryptionIntegrationTest): def setUp(self): super().setUp() self.patch_system_certs(CA_PEM) - self.client_encrypted = ClientEncryption( + self.client_encrypted = self.create_client_encryption( {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS ) - self.addCleanup(self.client_encrypted.close) def test_invalid_kms_certificate_expired(self): key = { @@ -2071,36 +2069,32 @@ def setUp(self): "gcp": {"tlsCAFile": CA_PEM}, "kmip": {"tlsCAFile": CA_PEM}, } - self.client_encryption_no_client_cert = ClientEncryption( + self.client_encryption_no_client_cert = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addCleanup(self.client_encryption_no_client_cert.close) # 2, same providers as above but with tlsCertificateKeyFile. kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) for p in kms_tls_opts: kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM - self.client_encryption_with_tls = ClientEncryption( + self.client_encryption_with_tls = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts ) - self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. providers: dict = copy.deepcopy(providers) providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" providers["gcp"]["endpoint"] = "127.0.0.1:9000" providers["kmip"]["endpoint"] = "127.0.0.1:9000" - self.client_encryption_expired = ClientEncryption( + self.client_encryption_expired = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. providers: dict = copy.deepcopy(providers) providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" providers["gcp"]["endpoint"] = "127.0.0.1:9001" providers["kmip"]["endpoint"] = "127.0.0.1:9001" - self.client_encryption_invalid_hostname = ClientEncryption( + self.client_encryption_invalid_hostname = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( @@ -2138,7 +2132,7 @@ def setUp(self): "gcp:with_tls": with_cert, "kmip:with_tls": with_cert, } - self.client_encryption_with_names = ClientEncryption( + self.client_encryption_with_names = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 ) @@ -2220,10 +2214,9 @@ def test_04_kmip(self): def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} - encryption = ClientEncryption( + encryption = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options ) - self.addCleanup(encryption.close) ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] if not hasattr(ctx, "check_ocsp_endpoint"): raise self.skipTest("OCSP not enabled") @@ -2273,7 +2266,7 @@ def setUp(self): self.client = client_context.client create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() ) self.def_key_id = self.client_encryption.create_data_key("local", key_alt_names=["def"]) @@ -2311,17 +2304,15 @@ def setUp(self): key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) self.key_vault_client = self.client - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS ) - self.addCleanup(self.client_encryption.close) opts = AutoEncryptionOpts( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, bypass_query_analysis=True, ) - self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(self.encrypted_client.close) + self.encrypted_client = self.rs_or_single_client(auto_encryption_opts=opts) def test_01_insert_encrypted_indexed_and_find(self): val = "encrypted indexed value" @@ -2444,14 +2435,13 @@ def run_test(self, src_provider, dst_provider): self.client.keyvault.drop_collection("datakeys") # Step 2. Create a ``ClientEncryption`` object named ``client_encryption1`` - client_encryption1 = ClientEncryption( + client_encryption1 = self.create_client_encryption( key_vault_client=self.client, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, kms_tls_options=KMS_TLS_OPTS, codec_options=OPTS, ) - self.addCleanup(client_encryption1.close) # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. key_id = client_encryption1.create_data_key( @@ -2464,16 +2454,14 @@ def run_test(self, src_provider, dst_provider): ) # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` - client2 = rs_or_single_client() - self.addCleanup(client2.close) - client_encryption2 = ClientEncryption( + client2 = self.rs_or_single_client() + client_encryption2 = self.create_client_encryption( key_vault_client=client2, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, kms_tls_options=KMS_TLS_OPTS, codec_options=OPTS, ) - self.addCleanup(client_encryption2.close) # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( @@ -2508,7 +2496,7 @@ def setUp(self): @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") def test_01_failure(self): - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers={"aws": {}}, key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, @@ -2519,7 +2507,7 @@ def test_01_failure(self): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_02_success(self): - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers={"aws": {}}, key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, @@ -2539,8 +2527,7 @@ def test_queryable_encryption(self): # MongoClient to use in testing that handles auth/tls/etc, # and cleanup. def MongoClient(**kwargs): - c = rs_or_single_client(**kwargs) - self.addCleanup(c.close) + c = self.rs_or_single_client(**kwargs) return c # Drop data from prior test runs. @@ -2551,7 +2538,7 @@ def MongoClient(**kwargs): # Create two data keys. key_vault_client = MongoClient() - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() ) key1_id = client_encryption.create_data_key("local") @@ -2632,18 +2619,16 @@ def setUp(self): key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) self.key_vault_client = self.client - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS ) - self.addCleanup(self.client_encryption.close) opts = AutoEncryptionOpts( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, bypass_query_analysis=True, ) - self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) + self.encrypted_client = self.rs_or_single_client(auto_encryption_opts=opts) self.db = self.encrypted_client.db - self.addCleanup(self.encrypted_client.close) def run_expression_find( self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None @@ -2838,10 +2823,9 @@ def setUp(self): super().setUp() self.client.drop_database(self.db) self.key_vault_client = self.client - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS ) - self.addCleanup(self.client_encryption.close) self.key_id = self.client_encryption.create_data_key("local") opts = RangeOpts(min=0, max=1000) self.payload_defaults = self.client_encryption.encrypt( @@ -2874,13 +2858,12 @@ def setUp(self): self.client.drop_database(self.db) self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(self.key_vault.drop) - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, self.key_vault.full_name, self.client, OPTS, ) - self.addCleanup(self.client_encryption.close) def test_01_simple_create(self): coll, _ = self.client_encryption.create_encrypted_collection( @@ -3096,10 +3079,9 @@ def _tearDown_class(cls): def setUp(self) -> None: self.listener = OvertCommandListener() - self.mongocryptd_client = MongoClient( + self.mongocryptd_client = self.simple_client( f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] ) - self.addCleanup(self.mongocryptd_client.close) hello = self.mongocryptd_client.db.command("hello") self.assertNotIn("logicalSessionTimeoutMinutes", hello) diff --git a/test/test_examples.py b/test/test_examples.py index 296283db28..ebf1d784a3 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import rs_client, wait_until +from test.utils import wait_until import pymongo from pymongo.errors import ConnectionFailure, OperationFailure @@ -1128,7 +1128,7 @@ def update_employee_info(session): self.assertEqual(employee["status"], "Inactive") def MongoClient(_): - return rs_client() + return self.rs_client() uriString = None @@ -1220,7 +1220,7 @@ class TestVersionedApiExamples(IntegrationTest): def test_versioned_api(self): # Versioned API examples def MongoClient(_, server_api): - return rs_client(server_api=server_api, connect=False) + return self.rs_client(server_api=server_api, connect=False) uri = None @@ -1251,7 +1251,7 @@ def test_versioned_api_migration(self): ): self.skipTest("This test needs MongoDB 5.0.2 or newer") - client = rs_client(server_api=ServerApi("1", strict=True)) + client = self.rs_client(server_api=ServerApi("1", strict=True)) client.db.sales.drop() # Start Versioned API Example 5 diff --git a/test/test_grid_file.py b/test/test_grid_file.py index bd89235b73..fe88aec5ff 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] -from test.utils import EventListener, rs_or_single_client +from test.utils import EventListener from bson.objectid import ObjectId from gridfs.errors import NoFile @@ -790,7 +790,7 @@ def test_grid_out_lazy_connect(self): outfile.readchunk() def test_grid_in_lazy_connect(self): - client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) + client = self.simple_client("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) with self.assertRaises(ServerSelectionTimeoutError): @@ -801,7 +801,7 @@ def test_grid_in_lazy_connect(self): def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - GridIn((rs_or_single_client(w=0)).pymongo_test.fs) + GridIn((self.rs_or_single_client(w=0)).pymongo_test.fs) def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. @@ -809,7 +809,7 @@ def test_survive_cursor_not_found(self): chunk_size = 1024 data = b"d" * (102 * chunk_size) listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test with GridIn(db.fs, chunk_size=chunk_size) as infile: infile.write(data) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 19ec152bd1..549dc0b204 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -26,7 +26,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import joinall, one, rs_client, rs_or_single_client, single_client +from test.utils import joinall, one import gridfs from bson.binary import Binary @@ -411,7 +411,7 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(f)) def test_gridfs_lazy_connect(self): - client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) + client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db gfs = gridfs.GridFS(db) self.assertRaises(ServerSelectionTimeoutError, gfs.list) @@ -492,7 +492,7 @@ def test_grid_in_non_int_chunksize(self): def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - gridfs.GridFS(rs_or_single_client(w=0).pymongo_test) + gridfs.GridFS(self.rs_or_single_client(w=0).pymongo_test) def test_md5(self): gin = self.fs.new_file() @@ -519,7 +519,7 @@ def tearDownClass(cls): client_context.client.drop_database("gfsreplica") def test_gridfs_replica_set(self): - rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) + rsc = self.rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) fs = gridfs.GridFS(rsc.gfsreplica, "gfsreplicatest") @@ -532,7 +532,7 @@ def test_gridfs_replica_set(self): def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) - secondary_connection = single_client( + secondary_connection = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY ) @@ -547,7 +547,7 @@ def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) - client = single_client( + client = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False ) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index c3945d1053..28adb7051a 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -27,7 +27,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import joinall, one, rs_client, rs_or_single_client, single_client +from test.utils import joinall, one import gridfs from bson.binary import Binary @@ -345,7 +345,7 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(fstr)) def test_gridfs_lazy_connect(self): - client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=0) + client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=0) cdb = client.db gfs = gridfs.GridFSBucket(cdb) self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0) @@ -391,7 +391,7 @@ def test_grid_in_non_int_chunksize(self): def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test) + gridfs.GridFSBucket(self.rs_or_single_client(w=0).pymongo_test) def test_rename(self): _id = self.fs.upload_from_stream("first_name", b"testing") @@ -489,7 +489,7 @@ def tearDownClass(cls): client_context.client.drop_database("gfsbucketreplica") def test_gridfs_replica_set(self): - rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) + rsc = self.rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") oid = gfs.upload_from_stream("test_filename", b"foo") @@ -498,7 +498,7 @@ def test_gridfs_replica_set(self): def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) - secondary_connection = single_client( + secondary_connection = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY ) @@ -513,7 +513,7 @@ def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) - client = single_client( + client = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False ) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 1302df8fde..5e203a33b3 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -20,7 +20,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_knobs, unittest -from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until +from test.utils import HeartbeatEventListener, MockPool, wait_until from pymongo.errors import ConnectionFailure from pymongo.hello import Hello, HelloCompat @@ -40,7 +40,7 @@ def _check_with_socket(self, *args, **kwargs): raise responses[1] return Hello(responses[1]), 99 - m = single_client( + m = self.single_client( h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool ) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index a4db7395f1..23bea4d984 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -26,7 +26,7 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import ExceptionCatchingThread, get_pool, rs_client, wait_until +from test.utils import ExceptionCatchingThread, get_pool, wait_until pytestmark = pytest.mark.load_balancer @@ -54,7 +54,7 @@ def test_connections_are_only_returned_once(self): @client_context.require_load_balancer def test_unpin_committed_transaction(self): - client = rs_client() + client = self.rs_client() self.addCleanup(client.close) pool = get_pool(client) coll = client[self.db.name].test @@ -85,7 +85,7 @@ def create_resource(coll): self._test_no_gc_deadlock(create_resource) def _test_no_gc_deadlock(self, create_resource): - client = rs_client() + client = self.rs_client() self.addCleanup(client.close) pool = get_pool(client) coll = client[self.db.name].test @@ -124,7 +124,7 @@ def _test_no_gc_deadlock(self, create_resource): @client_context.require_transactions def test_session_gc(self): - client = rs_client() + client = self.rs_client() self.addCleanup(client.close) pool = get_pool(client) session = client.start_session() diff --git a/test/test_logger.py b/test/test_logger.py index c0011ec3a5..b3c8e6d176 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -15,7 +15,6 @@ import os from test import IntegrationTest, unittest -from test.utils import single_client from unittest.mock import patch from bson import json_util @@ -85,7 +84,7 @@ def test_truncation_multi_byte_codepoints(self): self.assertEqual(last_3_bytes, str_to_repeat) def test_logging_without_listeners(self): - c = single_client() + c = self.single_client() self.assertEqual(len(c._event_listeners.event_listeners()), 0) with self.assertLogs("pymongo.connection", level="DEBUG") as cm: c.db.test.insert_one({"x": "1"}) diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 1b0130f7d8..32d09ada9a 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -20,15 +20,14 @@ import time import warnings +from pymongo import MongoClient from pymongo.operations import _Op sys.path[0:0] = [""] -from test import client_context, unittest -from test.utils import rs_or_single_client +from test import PyMongoTestCase, client_context, unittest from test.utils_selection_tests import create_selection_tests -from pymongo import MongoClient from pymongo.errors import ConfigurationError from pymongo.server_selectors import writable_server_selector @@ -40,54 +39,58 @@ class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore pass -class TestMaxStaleness(unittest.TestCase): +class TestMaxStaleness(PyMongoTestCase): def test_max_staleness(self): - client = MongoClient() + client = self.simple_client() self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary") + client = self.simple_client("mongodb://a/?readPreference=secondary") self.assertEqual(-1, client.read_preference.max_staleness) # These tests are specified in max-staleness-tests.rst. with self.assertRaises(ConfigurationError): # Default read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?maxStalenessSeconds=120") + self.simple_client("mongodb://a/?maxStalenessSeconds=120") with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") + self.simple_client("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") - client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") + client = self.simple_client("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") + client = self.simple_client("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&maxStalenessSeconds=120") + client = self.simple_client( + "mongodb://host/?readPreference=secondary&maxStalenessSeconds=120" + ) self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") + client = self.simple_client(maxStalenessSeconds=-1, readPreference="nearest") self.assertEqual(-1, client.read_preference.max_staleness) with self.assertRaises(TypeError): # Prohibit None. - MongoClient(maxStalenessSeconds=None, readPreference="nearest") + self.simple_client(maxStalenessSeconds=None, readPreference="nearest") def test_max_staleness_float(self): with self.assertRaises(TypeError) as ctx: - rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") + self.rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") self.assertIn("must be an integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest" + ) # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -96,13 +99,15 @@ def test_max_staleness_float(self): def test_max_staleness_zero(self): # Zero is too small. with self.assertRaises(ValueError) as ctx: - rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") + self.rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") self.assertIn("must be a positive integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0&readPreference=nearest") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=0&readPreference=nearest" + ) # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -111,7 +116,7 @@ def test_max_staleness_zero(self): @client_context.require_replica_set def test_last_write_date(self): # From max-staleness-tests.rst, "Parse lastWriteDate". - client = rs_or_single_client(heartbeatFrequencyMS=500) + client = self.rs_or_single_client(heartbeatFrequencyMS=500) client.pymongo_test.test.insert_one({}) # Wait for the server description to be updated. time.sleep(1) diff --git a/test/test_monitor.py b/test/test_monitor.py index fd82fc1ca4..f8e9443fae 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -18,6 +18,7 @@ import gc import subprocess import sys +import warnings from functools import partial sys.path[0:0] = [""] @@ -25,7 +26,6 @@ from test import IntegrationTest, connected, unittest from test.utils import ( ServerAndTopologyEventListener, - single_client, wait_until, ) @@ -47,30 +47,31 @@ def get_executors(client): return [e for e in executors if e is not None] -def create_client(): - listener = ServerAndTopologyEventListener() - client = single_client(event_listeners=[listener]) - connected(client) - return client - - class TestMonitor(IntegrationTest): + def create_client(self): + listener = ServerAndTopologyEventListener() + client = self.unmanaged_single_client(event_listeners=[listener]) + connected(client) + return client + def test_cleanup_executors_on_client_del(self): - client = create_client() - executors = get_executors(client) - self.assertEqual(len(executors), 4) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + client = self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) - # Each executor stores a weakref to itself in _EXECUTORS. - executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + # Each executor stores a weakref to itself in _EXECUTORS. + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] - del executors - del client + del executors + del client - for ref, name in executor_refs: - wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) + for ref, name in executor_refs: + wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) def test_cleanup_executors_on_client_close(self): - client = create_client() + client = self.create_client() executors = get_executors(client) self.assertEqual(len(executors), 4) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 8322e29918..a0c520ed27 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -31,8 +31,6 @@ ) from test.utils import ( EventListener, - rs_or_single_client, - single_client, wait_until, ) @@ -57,7 +55,9 @@ class TestCommandMonitoring(IntegrationTest): def _setup_class(cls): super()._setup_class() cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) + cls.client = cls.unmanaged_rs_or_single_client( + event_listeners=[cls.listener], retryWrites=False + ) @classmethod def _tearDown_class(cls): @@ -405,7 +405,7 @@ def test_get_more_failure(self): @client_context.require_secondaries_count(1) def test_not_primary_error(self): address = next(iter(client_context.client.secondaries)) - client = single_client(*address, event_listeners=[self.listener]) + client = self.single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. client.admin.command("ping") self.listener.reset() @@ -1144,7 +1144,7 @@ def _setup_class(cls): # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) - cls.client = single_client() + cls.client = cls.unmanaged_single_client() # Get one (authenticated) socket in the pool. cls.client.pymongo_test.command("ping") diff --git a/test/test_pooling.py b/test/test_pooling.py index 31259d7b3a..3b867965bd 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -31,7 +31,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import delay, get_pool, joinall, rs_or_single_client +from test.utils import delay, get_pool, joinall from pymongo.socket_checker import SocketChecker from pymongo.synchronous.pool import Pool, PoolOptions @@ -151,7 +151,7 @@ class _TestPoolingBase(IntegrationTest): def setUp(self): super().setUp() - self.c = rs_or_single_client() + self.c = self.rs_or_single_client() db = self.c[DB] db.unique.drop() db.test.drop() @@ -378,7 +378,7 @@ def test_checkout_more_than_max_pool_size(self): socket_info.close_conn(None) def test_maxConnecting(self): - client = rs_or_single_client() + client = self.rs_or_single_client() self.addCleanup(client.close) self.client.test.test.insert_one({}) self.addCleanup(self.client.test.test.delete_many, {}) @@ -415,7 +415,7 @@ def find_one(): @client_context.require_failCommand_appName def test_csot_timeout_message(self): - client = rs_or_single_client(appName="connectionTimeoutApp") + client = self.rs_or_single_client(appName="connectionTimeoutApp") self.addCleanup(client.close) # Mock an operation failing due to pymongo.timeout(). mock_connection_timeout = { @@ -440,7 +440,7 @@ def test_csot_timeout_message(self): @client_context.require_failCommand_appName def test_socket_timeout_message(self): - client = rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") + client = self.rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") self.addCleanup(client.close) # Mock an operation failing due to socketTimeoutMS. mock_connection_timeout = { @@ -479,7 +479,7 @@ def test_connection_timeout_message(self): }, } - client = rs_or_single_client( + client = self.rs_or_single_client( connectTimeoutMS=500, socketTimeoutMS=500, appName="connectionTimeoutApp", @@ -502,7 +502,7 @@ def test_connection_timeout_message(self): class TestPoolMaxSize(_TestPoolingBase): def test_max_pool_size(self): max_pool_size = 4 - c = rs_or_single_client(maxPoolSize=max_pool_size) + c = self.rs_or_single_client(maxPoolSize=max_pool_size) self.addCleanup(c.close) collection = c[DB].test @@ -538,7 +538,7 @@ def f(): self.assertEqual(0, cx_pool.requests) def test_max_pool_size_none(self): - c = rs_or_single_client(maxPoolSize=None) + c = self.rs_or_single_client(maxPoolSize=None) self.addCleanup(c.close) collection = c[DB].test @@ -570,7 +570,7 @@ def f(): self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): - c = rs_or_single_client(maxPoolSize=0) + c = self.rs_or_single_client(maxPoolSize=0) self.addCleanup(c.close) pool = get_pool(c) self.assertEqual(pool.max_pool_size, float("inf")) diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 97855872cf..ea9ce49a30 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context -from test.utils import OvertCommandListener, rs_or_single_client +from test.utils import OvertCommandListener from bson.son import SON from pymongo.errors import OperationFailure @@ -36,7 +36,7 @@ class TestReadConcern(IntegrationTest): def setUpClass(cls): super().setUpClass() cls.listener = OvertCommandListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test client_context.client.pymongo_test.create_collection("coll") @@ -67,7 +67,7 @@ def test_read_concern(self): def test_read_concern_uri(self): uri = f"mongodb://{client_context.pair}/?readConcernLevel=majority" - client = rs_or_single_client(uri, connect=False) + client = self.rs_or_single_client(uri, connect=False) self.assertEqual(ReadConcern("majority"), client.read_concern) def test_invalid_read_concern(self): diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 2cd3195f40..32883399e1 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -30,8 +30,6 @@ from test.utils import ( OvertCommandListener, one, - rs_client, - single_client, wait_until, ) from test.version import Version @@ -58,7 +56,7 @@ class TestSelections(IntegrationTest): @client_context.require_connection def test_bool(self): - client = single_client() + client = self.single_client() wait_until(lambda: client.address, "discover primary") selection = Selection.from_topology_description(client._topology.description) @@ -128,7 +126,7 @@ def read_from_which_kind(self, client): return None def assertReadsFrom(self, expected, **kwargs): - c = rs_client(**kwargs) + c = self.rs_client(**kwargs) wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") used = self.read_from_which_kind(c) @@ -139,7 +137,7 @@ class TestSingleSecondaryOk(TestReadPreferencesBase): def test_reads_from_secondary(self): host, port = next(iter(self.client.secondaries)) # Direct connection to a secondary. - client = single_client(host, port) + client = self.single_client(host, port) self.assertFalse(client.is_primary) # Regardless of read preference, we should be able to do @@ -175,19 +173,21 @@ def test_mode_validation(self): ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST, ): - self.assertEqual(mode, rs_client(read_preference=mode).read_preference) + self.assertEqual(mode, self.rs_client(read_preference=mode).read_preference) - self.assertRaises(TypeError, rs_client, read_preference="foo") + self.assertRaises(TypeError, self.rs_client, read_preference="foo") def test_tag_sets_validation(self): S = Secondary(tag_sets=[{}]) - self.assertEqual([{}], rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{}], self.rs_client(read_preference=S).read_preference.tag_sets) S = Secondary(tag_sets=[{"k": "v"}]) - self.assertEqual([{"k": "v"}], rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{"k": "v"}], self.rs_client(read_preference=S).read_preference.tag_sets) S = Secondary(tag_sets=[{"k": "v"}, {}]) - self.assertEqual([{"k": "v"}, {}], rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual( + [{"k": "v"}, {}], self.rs_client(read_preference=S).read_preference.tag_sets + ) self.assertRaises(ValueError, Secondary, tag_sets=[]) @@ -200,20 +200,22 @@ def test_tag_sets_validation(self): def test_threshold_validation(self): self.assertEqual( - 17, rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms + 17, self.rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms ) self.assertEqual( - 42, rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms + 42, self.rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms ) self.assertEqual( - 666, rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms + 666, self.rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms ) - self.assertEqual(0, rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms) + self.assertEqual( + 0, self.rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms + ) - self.assertRaises(ValueError, rs_client, localthresholdms=-1) + self.assertRaises(ValueError, self.rs_client, localthresholdms=-1) def test_zero_latency(self): ping_times: set = set() @@ -223,7 +225,7 @@ def test_zero_latency(self): for ping_time, host in zip(ping_times, self.client.nodes): ServerDescription._host_to_round_trip_time[host] = ping_time try: - client = connected(rs_client(readPreference="nearest", localThresholdMS=0)) + client = connected(self.rs_client(readPreference="nearest", localThresholdMS=0)) wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes") host = self.read_from_which_host(client) for _ in range(5): @@ -236,7 +238,7 @@ def test_primary(self): def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises(ConfigurationError, rs_client, tag_sets=[{"dc": "ny"}]) + self.assertRaises(ConfigurationError, self.rs_client, tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) @@ -250,7 +252,9 @@ def test_secondary_preferred(self): def test_nearest(self): # With high localThresholdMS, expect to read from any # member - c = rs_client(read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds + c = self.rs_client( + read_preference=ReadPreference.NEAREST, localThresholdMS=10000 + ) # 10 seconds data_members = {self.client.primary} | self.client.secondaries @@ -540,7 +544,7 @@ def test_send_hedge(self): if client_context.supports_secondary_read_pref: cases["secondary"] = Secondary listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) + client = self.rs_client(event_listeners=[listener]) self.addCleanup(client.close) client.admin.command("ping") for _mode, cls in cases.items(): @@ -667,13 +671,13 @@ def test_mongos_max_staleness(self): else: self.fail("mongos accepted invalid staleness") - coll = single_client( + coll = self.single_client( readPreference="secondaryPreferred", maxStalenessSeconds=120 ).pymongo_test.test # No error coll.find_one() - coll = single_client( + coll = self.single_client( readPreference="secondaryPreferred", maxStalenessSeconds=10 ).pymongo_test.test try: diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 3e37e8f9a5..67943d495d 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -24,12 +24,7 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import ( - EventListener, - disable_replication, - enable_replication, - rs_or_single_client, -) +from test.utils import EventListener from pymongo import DESCENDING from pymongo.errors import ( @@ -51,7 +46,7 @@ class TestReadWriteConcernSpec(IntegrationTest): def test_omit_default_read_write_concern(self): listener = EventListener() # Client with default readConcern and writeConcern - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). @@ -104,7 +99,9 @@ def insert_command_default_write_concern(): def assertWriteOpsRaise(self, write_concern, expected_exception): wc = write_concern.document # Set socket timeout to avoid indefinite stalls - client = rs_or_single_client(w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000) + client = self.rs_or_single_client( + w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000 + ) db = client.get_database("pymongo_test") coll = db.test @@ -167,9 +164,9 @@ def test_raise_write_concern_error(self): @client_context.require_test_commands def test_raise_wtimeout(self): self.addCleanup(client_context.client.drop_database, "pymongo_test") - self.addCleanup(enable_replication, client_context.client) + self.addCleanup(self.enable_replication, client_context.client) # Disable replication to guarantee a wtimeout error. - disable_replication(client_context.client) + self.disable_replication(client_context.client) self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) @client_context.require_failCommand_fail_point @@ -209,7 +206,7 @@ def test_error_includes_errInfo(self): @client_context.require_version_min(4, 9) def test_write_error_details_exposes_errinfo(self): listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) db = client.errinfotest self.addCleanup(client.drop_database, "errinfotest") diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index b0fa42a0c9..a1c72bb7b6 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -34,7 +34,6 @@ from test.utils import ( CMAPListener, OvertCommandListener, - rs_or_single_client, set_fail_point, ) @@ -93,7 +92,9 @@ def test_pool_paused_error_is_retryable(self): self.skipTest("Test is flakey on PyPy") cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) + client = self.rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() @@ -163,13 +164,13 @@ def test_retryable_reads_in_sharded_cluster_multiple_available(self): mongos_clients = [] for mongos in client_context.mongos_seeds().split(","): - client = rs_or_single_client(mongos) + client = self.rs_or_single_client(mongos) set_fail_point(client, fail_command) self.addCleanup(client.close) mongos_clients.append(client) listener = OvertCommandListener() - client = rs_or_single_client( + client = self.rs_or_single_client( client_context.mongos_seeds(), appName="retryableReadTest", event_listeners=[listener], diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 2938b7efaf..89454ad236 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -28,7 +28,6 @@ DeprecationFilter, EventListener, OvertCommandListener, - rs_or_single_client, set_fail_point, ) from test.version import Version @@ -145,7 +144,7 @@ def setUpClass(cls): # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() - cls.client = rs_or_single_client(retryWrites=True) + cls.client = cls.unmanaged_rs_or_single_client(retryWrites=True) cls.db = cls.client.pymongo_test @classmethod @@ -181,7 +180,9 @@ def setUpClass(cls): cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() cls.listener = OvertCommandListener() - cls.client = rs_or_single_client(retryWrites=True, event_listeners=[cls.listener]) + cls.client = cls.unmanaged_rs_or_single_client( + retryWrites=True, event_listeners=[cls.listener] + ) cls.db = cls.client.pymongo_test @classmethod @@ -204,7 +205,7 @@ def tearDown(self): def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) + client = self.rs_or_single_client(retryWrites=False, event_listeners=[listener]) self.addCleanup(client.close) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" @@ -297,7 +298,7 @@ def test_unsupported_single_statement(self): def test_server_selection_timeout_not_retried(self): """A ServerSelectionTimeoutError is not retried.""" listener = OvertCommandListener() - client = MongoClient( + client = self.simple_client( "somedomainthatdoesntexist.org", serverSelectionTimeoutMS=1, retryWrites=True, @@ -317,7 +318,7 @@ def test_retry_timeout_raises_original_error(self): original error. """ listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -443,13 +444,13 @@ def test_retryable_writes_in_sharded_cluster_multiple_available(self): mongos_clients = [] for mongos in client_context.mongos_seeds().split(","): - client = rs_or_single_client(mongos) + client = self.rs_or_single_client(mongos) set_fail_point(client, fail_command) self.addCleanup(client.close) mongos_clients.append(client) listener = OvertCommandListener() - client = rs_or_single_client( + client = self.rs_or_single_client( client_context.mongos_seeds(), appName="retryableWriteTest", event_listeners=[listener], @@ -492,7 +493,7 @@ def setUpClass(cls): @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) # Ensure collection exists. @@ -551,7 +552,9 @@ class TestPoolPausedError(IntegrationTest): def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) + client = self.rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() @@ -613,7 +616,7 @@ def test_returns_original_error_code( self, ): cmd_listener = InsertEventListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) client.test.test.drop() self.addCleanup(client.close) cmd_listener.reset() @@ -650,7 +653,7 @@ def test_increment_transaction_id_without_sending_command(self): the first attempt fails before sending the command. """ listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) self.addCleanup(client.close) topology = client._topology select_server = topology.select_server diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 8e0a3cbbb4..81b208d511 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -25,7 +25,6 @@ from test import IntegrationTest, client_context, client_knobs, unittest from test.utils import ( ServerAndTopologyEventListener, - rs_or_single_client, server_name_to_type, wait_until, ) @@ -279,7 +278,7 @@ def setUpClass(cls): cls.knobs.enable() cls.listener = ServerAndTopologyEventListener() retry_writes = client_context.supports_transactions() - cls.test_client = rs_or_single_client( + cls.test_client = cls.unmanaged_rs_or_single_client( event_listeners=[cls.listener], retryWrites=retry_writes ) cls.coll = cls.test_client[cls.client.db.name].test diff --git a/test/test_server_selection.py b/test/test_server_selection.py index d3526617f6..67e9716bf4 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -33,7 +33,6 @@ from test.utils import ( EventListener, FunctionCallRecorder, - rs_or_single_client, wait_until, ) from test.utils_selection_tests import ( @@ -76,7 +75,9 @@ def custom_selector(servers): # Initialize client with appropriate listeners. listener = EventListener() - client = rs_or_single_client(server_selector=custom_selector, event_listeners=[listener]) + client = self.rs_or_single_client( + server_selector=custom_selector, event_listeners=[listener] + ) self.addCleanup(client.close) coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll self.addCleanup(client.drop_database, "testdb") @@ -117,7 +118,7 @@ def test_selector_called(self): selector = FunctionCallRecorder(lambda x: x) # Client setup. - mongo_client = rs_or_single_client(server_selector=selector) + mongo_client = self.rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection self.addCleanup(mongo_client.close) self.addCleanup(mongo_client.drop_database, "testdb") diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 9dced595c9..8e030f61e8 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -22,7 +22,6 @@ OvertCommandListener, SpecTestCreator, get_pool, - rs_client, wait_until, ) from test.utils_selection_tests import create_topology @@ -134,7 +133,7 @@ def test_load_balancing(self): listener = OvertCommandListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. - client = rs_client( + client = self.rs_client( client_context.mongos_seeds(), appName="loadBalancingTest", event_listeners=[listener], diff --git a/test/test_session.py b/test/test_session.py index 563b33c70e..9f94ded927 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -36,7 +36,6 @@ from test.utils import ( EventListener, ExceptionCatchingThread, - rs_or_single_client, wait_until, ) @@ -88,7 +87,7 @@ def _setup_class(cls): super()._setup_class() # Create a second client so we can make sure clients cannot share # sessions. - cls.client2 = rs_or_single_client() + cls.client2 = cls.unmanaged_rs_or_single_client() # Redact no commands, so we can test user-admin commands have "lsid". cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() @@ -103,7 +102,7 @@ def _tearDown_class(cls): def setUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() - self.client = rs_or_single_client( + self.client = self.rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener] ) self.addCleanup(self.client.close) @@ -200,7 +199,7 @@ def test_implicit_sessions_checkout(self): failures = 0 for _ in range(5): listener = EventListener() - client = rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -283,7 +282,7 @@ def test_end_session(self): def test_end_sessions(self): # Use a new client so that the tearDown hook does not error. listener = SessionTestListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) # Start many sessions. sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] for s in sessions: @@ -787,8 +786,7 @@ def _test_unacknowledged_ops(self, client, *ops): def test_unacknowledged_writes(self): # Ensure the collection exists. self.client.pymongo_test.test_unacked_writes.insert_one({}) - client = rs_or_single_client(w=0, event_listeners=[self.listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(w=0, event_listeners=[self.listener]) db = client.pymongo_test coll = db.test_unacked_writes ops: list = [ @@ -836,7 +834,7 @@ class TestCausalConsistency(UnitTest): @classmethod def _setup_class(cls): cls.listener = SessionTestListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) + cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) @classmethod def _tearDown_class(cls): @@ -1137,8 +1135,7 @@ def setUp(self): def test_cluster_time(self): listener = SessionTestListener() # Prevent heartbeats from updating $clusterTime between operations. - client = rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 405db14ac6..e01552bf7d 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] -from test import client_knobs, unittest +from test import PyMongoTestCase, client_knobs, unittest from test.utils import FunctionCallRecorder, wait_until import pymongo @@ -86,7 +86,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.disable() -class TestSrvPolling(unittest.TestCase): +class TestSrvPolling(PyMongoTestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), ("localhost.test.build.10gen.cc", 27018), @@ -167,7 +167,7 @@ def dns_resolver_response(): # Patch timeouts to ensure short test running times. with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING) + client = self.simple_client(self.CONNECTION_STRING) self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( @@ -231,7 +231,7 @@ def final_callback(): count_resolver_calls=True, ): # Client uses unpatched method to get initial nodelist - client = MongoClient(self.CONNECTION_STRING) + client = self.simple_client(self.CONNECTION_STRING) # Invalid DNS resolver response should not change nodelist. self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) @@ -264,8 +264,7 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=0) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -279,8 +278,7 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -295,8 +293,7 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) with SrvPollingKnobs(nodelist_callback=nodelist_callback): sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) final_topology = set(client.topology_description.server_descriptions()) @@ -305,8 +302,7 @@ def nodelist_callback(): def test_does_not_flipflop(self): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=1) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) old = set(client.topology_description.server_descriptions()) sleep(4 * WAIT_TIME) new = set(client.topology_description.server_descriptions()) @@ -323,7 +319,7 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient( + client = self.simple_client( "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" ) with SrvPollingKnobs(nodelist_callback=nodelist_callback): @@ -340,7 +336,7 @@ def resolver_response(): min_srv_rescan_interval=WAIT_TIME, nodelist_callback=resolver_response, ): - client = MongoClient(self.CONNECTION_STRING) + client = self.simple_client(self.CONNECTION_STRING) self.assertRaises( AssertionError, self.assert_nodelist_change, modified, client, timeout=WAIT_TIME / 2 ) diff --git a/test/test_ssl.py b/test/test_ssl.py index 5b3855a82a..36d7ba12b6 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -24,6 +24,7 @@ from test import ( HAVE_IPADDRESS, IntegrationTest, + PyMongoTestCase, SkipTest, client_context, connected, @@ -82,45 +83,45 @@ # use 'localhost' for the hostname of all hosts. -class TestClientSSL(unittest.TestCase): +class TestClientSSL(PyMongoTestCase): @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") def test_no_ssl_module(self): # Explicit - self.assertRaises(ConfigurationError, MongoClient, ssl=True) + self.assertRaises(ConfigurationError, self.simple_client, ssl=True) # Implied - self.assertRaises(ConfigurationError, MongoClient, tlsCertificateKeyFile=CLIENT_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tlsCertificateKeyFile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") @ignore_deprecations def test_config_ssl(self): # Tests various ssl configurations - self.assertRaises(ValueError, MongoClient, ssl="foo") + self.assertRaises(ValueError, self.simple_client, ssl="foo") self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM ) - self.assertRaises(TypeError, MongoClient, ssl=0) - self.assertRaises(TypeError, MongoClient, ssl=5.5) - self.assertRaises(TypeError, MongoClient, ssl=[]) + self.assertRaises(TypeError, self.simple_client, ssl=0) + self.assertRaises(TypeError, self.simple_client, ssl=5.5) + self.assertRaises(TypeError, self.simple_client, ssl=[]) - self.assertRaises(IOError, MongoClient, tlsCertificateKeyFile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=True) - self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=[]) + self.assertRaises(IOError, self.simple_client, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=[]) # Test invalid combinations self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM ) - self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCAFile=CA_PEM) - self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCRLFile=CRL_PEM) self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsAllowInvalidCertificates=False + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidCertificates=False ) self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsAllowInvalidHostnames=False + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidHostnames=False ) self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsDisableOCSPEndpointCheck=False + ConfigurationError, self.simple_client, tls=False, tlsDisableOCSPEndpointCheck=False ) @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") @@ -174,7 +175,7 @@ def test_tlsCertificateKeyFilePassword(self): if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, - MongoClient, + self.simple_client, "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, @@ -184,7 +185,7 @@ def test_tlsCertificateKeyFilePassword(self): ) else: connected( - MongoClient( + self.simple_client( "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, @@ -201,7 +202,7 @@ def test_tlsCertificateKeyFilePassword(self): "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" ) connected( - MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + self.simple_client(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] ) @client_context.require_tlsCertificateKeyFile @@ -215,7 +216,7 @@ def test_cert_ssl_implicitly_set(self): # # test that setting tlsCertificateKeyFile causes ssl to be set to True - client = MongoClient( + client = self.simple_client( client_context.host, client_context.port, tlsAllowInvalidCertificates=True, @@ -223,7 +224,7 @@ def test_cert_ssl_implicitly_set(self): ) response = client.admin.command(HelloCompat.LEGACY_CMD) if "setName" in response: - client = MongoClient( + client = self.simple_client( client_context.pair, replicaSet=response["setName"], w=len(response["hosts"]), @@ -242,7 +243,7 @@ def test_cert_ssl_validation(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - client = MongoClient( + client = self.simple_client( "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -257,7 +258,7 @@ def test_cert_ssl_validation(self): "Cannot validate hostname in the certificate" ) - client = MongoClient( + client = self.simple_client( "localhost", replicaSet=response["setName"], w=len(response["hosts"]), @@ -270,7 +271,7 @@ def test_cert_ssl_validation(self): self.assertClientWorks(client) if HAVE_IPADDRESS: - client = MongoClient( + client = self.simple_client( "127.0.0.1", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -292,7 +293,7 @@ def test_cert_ssl_uri_support(self): "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" ) - client = MongoClient(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) + client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) self.assertClientWorks(client) @client_context.require_tlsCertificateKeyFile @@ -316,7 +317,7 @@ def test_cert_ssl_validation_hostname_matching(self): with self.assertRaises(ConnectionFailure): connected( - MongoClient( + self.simple_client( "server", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -328,7 +329,7 @@ def test_cert_ssl_validation_hostname_matching(self): ) connected( - MongoClient( + self.simple_client( "server", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -343,7 +344,7 @@ def test_cert_ssl_validation_hostname_matching(self): if "setName" in response: with self.assertRaises(ConnectionFailure): connected( - MongoClient( + self.simple_client( "server", replicaSet=response["setName"], ssl=True, @@ -356,7 +357,7 @@ def test_cert_ssl_validation_hostname_matching(self): ) connected( - MongoClient( + self.simple_client( "server", replicaSet=response["setName"], ssl=True, @@ -375,7 +376,7 @@ def test_tlsCRLFile_support(self): if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: self.assertRaises( ConfigurationError, - MongoClient, + self.simple_client, "localhost", ssl=True, tlsCAFile=CA_PEM, @@ -384,7 +385,7 @@ def test_tlsCRLFile_support(self): ) else: connected( - MongoClient( + self.simple_client( "localhost", ssl=True, tlsCAFile=CA_PEM, @@ -395,7 +396,7 @@ def test_tlsCRLFile_support(self): with self.assertRaises(ConnectionFailure): connected( - MongoClient( + self.simple_client( "localhost", ssl=True, tlsCAFile=CA_PEM, @@ -406,7 +407,7 @@ def test_tlsCRLFile_support(self): ) uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" - connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + connected(self.simple_client(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore uri_fmt = ( "mongodb://localhost/?ssl=true&tlsCRLFile=%s" @@ -414,7 +415,7 @@ def test_tlsCRLFile_support(self): ) with self.assertRaises(ConnectionFailure): connected( - MongoClient(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] ) @client_context.require_tlsCertificateKeyFile @@ -431,12 +432,14 @@ def test_validation_with_system_ca_certs(self): with self.assertRaises(ConnectionFailure): # Server cert is verified but hostname matching fails connected( - MongoClient("server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] + self.simple_client( + "server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] ) # Server cert is verified. Disable hostname matching. connected( - MongoClient( + self.simple_client( "server", ssl=True, tlsAllowInvalidHostnames=True, @@ -447,12 +450,14 @@ def test_validation_with_system_ca_certs(self): # Server cert and hostname are verified. connected( - MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials) # type: ignore[arg-type] + self.simple_client( + "localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] ) # Server cert and hostname are verified. connected( - MongoClient( + self.simple_client( "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", **self.credentials, # type: ignore[arg-type] ) @@ -472,7 +477,7 @@ def test_system_certs_config_error(self): ssl_support.HAVE_WINCERTSTORE = False try: with self.assertRaises(ConfigurationError): - MongoClient("mongodb://localhost/?ssl=true") + self.simple_client("mongodb://localhost/?ssl=true") finally: ssl_support.HAVE_CERTIFI = have_certifi ssl_support.HAVE_WINCERTSTORE = have_wincertstore @@ -536,7 +541,7 @@ def test_mongodb_x509_auth(self): ], ) - noauth = MongoClient( + noauth = self.simple_client( client_context.pair, ssl=True, tlsAllowInvalidCertificates=True, @@ -548,7 +553,7 @@ def test_mongodb_x509_auth(self): noauth.pymongo_test.test.find_one() listener = EventListener() - auth = MongoClient( + auth = self.simple_client( client_context.pair, authMechanism="MONGODB-X509", ssl=True, @@ -572,7 +577,7 @@ def test_mongodb_x509_auth(self): host, port, ) - client = MongoClient( + client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) self.addCleanup(client.close) @@ -580,7 +585,7 @@ def test_mongodb_x509_auth(self): client.pymongo_test.test.find_one() uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) - client = MongoClient( + client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) self.addCleanup(client.close) @@ -593,7 +598,7 @@ def test_mongodb_x509_auth(self): port, ) - bad_client = MongoClient( + bad_client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) self.addCleanup(bad_client.close) @@ -601,7 +606,7 @@ def test_mongodb_x509_auth(self): with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() - bad_client = MongoClient( + bad_client = self.simple_client( client_context.pair, username="not the username", authMechanism="MONGODB-X509", @@ -622,7 +627,7 @@ def test_mongodb_x509_auth(self): ) try: connected( - MongoClient( + self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, @@ -648,7 +653,7 @@ def remove(path): self.addCleanup(remove, temp_ca_bundle) # Add the CA cert file to the bundle. cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) - with MongoClient( + with self.simple_client( "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle ) as client: self.assertTrue(client.admin.command("ping")) diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 9bca899a48..b3b68703a4 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -24,8 +24,6 @@ from test.utils import ( HeartbeatEventListener, ServerEventListener, - rs_or_single_client, - single_client, wait_until, ) @@ -38,7 +36,7 @@ class TestStreamingProtocol(IntegrationTest): def test_failCommand_streaming(self): listener = ServerEventListener() hb_listener = HeartbeatEventListener() - client = rs_or_single_client( + client = self.rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName="failingHeartbeatTest", @@ -107,7 +105,7 @@ def test_streaming_rtt(self): }, } with self.fail_point(delay_hello): - client = rs_or_single_client( + client = self.rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name ) self.addCleanup(client.close) @@ -155,7 +153,7 @@ def test_monitor_waits_after_server_check_error(self): } with self.fail_point(fail_hello): start = time.time() - client = single_client( + client = self.single_client( appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 ) self.addCleanup(client.close) @@ -180,7 +178,7 @@ def test_monitor_waits_after_server_check_error(self): @client_context.require_failCommand_appName def test_heartbeat_awaited_flag(self): hb_listener = HeartbeatEventListener() - client = single_client( + client = self.single_client( event_listeners=[hb_listener], heartbeatFrequencyMS=500, appName="heartbeatEventAwaitedFlag", diff --git a/test/test_transactions.py b/test/test_transactions.py index c8c3c32d5b..3cecbe9d38 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -17,6 +17,7 @@ import sys from io import BytesIO +from test.utils_spec_runner import SpecRunner from gridfs.synchronous.grid_file import GridFS, GridFSBucket @@ -25,8 +26,6 @@ from test import IntegrationTest, client_context, unittest from test.utils import ( OvertCommandListener, - rs_client, - single_client, wait_until, ) from typing import List @@ -59,7 +58,18 @@ UNPIN_TEST_MAX_ATTEMPTS = 50 -class TestTransactions(IntegrationTest): +class TransactionsBase(SpecRunner): + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + if ( + "secondary" in self.id() + and not client_context.is_mongos + and not client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") + + +class TestTransactions(TransactionsBase): RUN_ON_SERVERLESS = True @client_context.require_transactions @@ -92,8 +102,7 @@ def test_transaction_options_validation(self): @client_context.require_transactions def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" - client = rs_client(w=0) - self.addCleanup(client.close) + client = self.rs_client(w=0) db = client.test coll = db.test coll.insert_one({}) @@ -146,12 +155,11 @@ def test_transaction_write_concern_override(self): def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) + client = self.rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) - self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): @@ -174,12 +182,11 @@ def test_unpin_for_next_transaction(self): def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) + client = self.rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) - self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): @@ -303,11 +310,10 @@ def test_transaction_starts_with_batched_write(self): # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test coll.delete_many({}) listener.reset() - self.addCleanup(client.close) self.addCleanup(coll.drop) large_str = "\0" * (1 * 1024 * 1024) ops: List[InsertOne[RawBSONDocument]] = [ @@ -332,8 +338,7 @@ def test_transaction_starts_with_batched_write(self): @client_context.require_transactions def test_transaction_direct_connection(self): - client = single_client() - self.addCleanup(client.close) + client = self.single_client() coll = client.pymongo_test.test # Make sure the collection exists. @@ -389,14 +394,14 @@ def __exit__(self, exc_type, exc_val, exc_tb): client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout -class TestTransactionsConvenientAPI(IntegrationTest): +class TestTransactionsConvenientAPI(TransactionsBase): @classmethod def _setup_class(cls): super()._setup_class() cls.mongos_clients = [] if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(single_client("{}:{}".format(*address))) + cls.mongos_clients.append(cls.unmanaged_single_client("{}:{}".format(*address))) @classmethod def _tearDown_class(cls): @@ -446,8 +451,7 @@ def callback2(session): @client_context.require_transactions def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test def callback(session): @@ -475,8 +479,7 @@ def callback(session): @client_context.require_transactions def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test def callback(session): @@ -508,8 +511,7 @@ def callback(session): @client_context.require_transactions def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test def callback(session): diff --git a/test/test_typing.py b/test/test_typing.py index f423b70a3e..6cfe40537b 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -68,8 +68,7 @@ class ImplicitMovie(TypedDict): sys.path[0:0] = [""] -from test import IntegrationTest, client_context -from test.utils import rs_or_single_client +from test import IntegrationTest, PyMongoTestCase, client_context from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument @@ -194,7 +193,7 @@ def test_list_databases(self) -> None: value.items() def test_default_document_type(self) -> None: - client = rs_or_single_client() + client = self.rs_or_single_client() self.addCleanup(client.close) coll = client.test.test doc = {"my": "doc"} @@ -366,7 +365,7 @@ def test_bson_decode_file_iter_none_codec_option(self) -> None: doc["a"] = 2 -class TestDocumentType(unittest.TestCase): +class TestDocumentType(PyMongoTestCase): @only_type_check def test_default(self) -> None: client: MongoClient = MongoClient() @@ -480,7 +479,7 @@ def test_typeddict_empty_document_type(self) -> None: def test_typeddict_find_notrequired(self): if NotRequired is None or ImplicitMovie is None: raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") - client: MongoClient[ImplicitMovie] = rs_or_single_client() + client: MongoClient[ImplicitMovie] = self.rs_or_single_client() coll = client.test.test coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) out = coll.find_one({}) diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 7fe8ebd76f..7a25a507dc 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -20,7 +20,7 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import OvertCommandListener, rs_or_single_client +from test.utils import OvertCommandListener from pymongo.server_api import ServerApi, ServerApiVersion from pymongo.synchronous.mongo_client import MongoClient @@ -77,7 +77,7 @@ def assertServerApiInAllCommands(self, events): @client_context.require_version_min(4, 7) def test_command_options(self): listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) @@ -90,7 +90,7 @@ def test_command_options(self): @client_context.require_transactions def test_command_options_txn(self): listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) self.addCleanup(client.close) coll = client.test.test coll.insert_many([{} for _ in range(100)]) diff --git a/test/unified_format.py b/test/unified_format.py index 78fc638787..62211d3d25 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -55,8 +55,6 @@ parse_collection_options, parse_spec_options, prepare_spec_arguments, - rs_or_single_client, - single_client, snake_to_camel, wait_until, ) @@ -574,7 +572,7 @@ def _create_entity(self, entity_spec, uri=None): ) if uri: kwargs["h"] = uri - client = rs_or_single_client(**kwargs) + client = self.test.rs_or_single_client(**kwargs) self[spec["id"]] = client self.test.addCleanup(client.close) return @@ -1115,7 +1113,7 @@ def setUpClass(cls): and not client_context.serverless ): for address in client_context.mongoses: - cls.mongos_clients.append(single_client("{}:{}".format(*address))) + cls.mongos_clients.append(cls.unmanaged_single_client("{}:{}".format(*address))) # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs( @@ -1646,7 +1644,7 @@ def _testOperation_targetedFailPoint(self, spec): ) ) - client = single_client("{}:{}".format(*session._pinned_address)) + client = self.single_client("{}:{}".format(*session._pinned_address)) self.addCleanup(client.close) self.__set_fail_point(client=client, command_args=spec["failPoint"]) diff --git a/test/utils.py b/test/utils.py index fa198b1c64..6eefd1c7ea 100644 --- a/test/utils.py +++ b/test/utils.py @@ -565,151 +565,6 @@ def create_tests(self): setattr(self._test_class, new_test.__name__, new_test) -def _connection_string(h): - if h.startswith(("mongodb://", "mongodb+srv://")): - return h - return f"mongodb://{h!s}" - - -def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): - """Create a new client over SSL/TLS if necessary.""" - host = host or client_context.host - port = port or client_context.port - client_options: dict = client_context.default_client_options.copy() - if client_context.replica_set_name and not directConnection: - client_options["replicaSet"] = client_context.replica_set_name - if directConnection is not None: - client_options["directConnection"] = directConnection - client_options.update(kwargs) - - uri = _connection_string(host) - auth_mech = kwargs.get("authMechanism", "") - if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": - # Only add the default username or password if one is not provided. - res = parse_uri(uri) - if ( - not res["username"] - and not res["password"] - and "username" not in client_options - and "password" not in client_options - ): - client_options["username"] = db_user - client_options["password"] = db_pwd - return MongoClient(uri, port, **client_options) - - -async def _async_mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): - """Create a new client over SSL/TLS if necessary.""" - host = host or await async_client_context.host - port = port or await async_client_context.port - client_options: dict = async_client_context.default_client_options.copy() - if async_client_context.replica_set_name and not directConnection: - client_options["replicaSet"] = async_client_context.replica_set_name - if directConnection is not None: - client_options["directConnection"] = directConnection - client_options.update(kwargs) - - uri = _connection_string(host) - auth_mech = kwargs.get("authMechanism", "") - if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": - # Only add the default username or password if one is not provided. - res = parse_uri(uri) - if ( - not res["username"] - and not res["password"] - and "username" not in client_options - and "password" not in client_options - ): - client_options["username"] = db_user - client_options["password"] = db_pwd - client = AsyncMongoClient(uri, port, **client_options) - if client._options.connect: - await client.aconnect() - return client - - -def single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: - """Make a direct connection. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) - - -def single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: - """Make a direct connection, and authenticate if necessary.""" - return _mongo_client(h, p, directConnection=True, **kwargs) - - -def rs_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: - """Connect to the replica set. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, **kwargs) - - -def rs_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: - """Connect to the replica set and authenticate if necessary.""" - return _mongo_client(h, p, **kwargs) - - -def rs_or_single_client_noauth(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: - """Connect to the replica set if there is one, otherwise the standalone. - - Like rs_or_single_client, but does not authenticate. - """ - return _mongo_client(h, p, authenticate=False, **kwargs) - - -def rs_or_single_client(h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: - """Connect to the replica set if there is one, otherwise the standalone. - - Authenticates if necessary. - """ - return _mongo_client(h, p, **kwargs) - - -async def async_single_client_noauth( - h: Any = None, p: Any = None, **kwargs: Any -) -> AsyncMongoClient[dict]: - """Make a direct connection. Don't authenticate.""" - return await _async_mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) - - -async def async_single_client( - h: Any = None, p: Any = None, **kwargs: Any -) -> AsyncMongoClient[dict]: - """Make a direct connection, and authenticate if necessary.""" - return await _async_mongo_client(h, p, directConnection=True, **kwargs) - - -async def async_rs_client_noauth( - h: Any = None, p: Any = None, **kwargs: Any -) -> AsyncMongoClient[dict]: - """Connect to the replica set. Don't authenticate.""" - return await _async_mongo_client(h, p, authenticate=False, **kwargs) - - -async def async_rs_client(h: Any = None, p: Any = None, **kwargs: Any) -> AsyncMongoClient[dict]: - """Connect to the replica set and authenticate if necessary.""" - return await _async_mongo_client(h, p, **kwargs) - - -async def async_rs_or_single_client_noauth( - h: Any = None, p: Any = None, **kwargs: Any -) -> AsyncMongoClient[dict]: - """Connect to the replica set if there is one, otherwise the standalone. - - Like rs_or_single_client, but does not authenticate. - """ - return await _async_mongo_client(h, p, authenticate=False, **kwargs) - - -async def async_rs_or_single_client( - h: Any = None, p: Any = None, **kwargs: Any -) -> AsyncMongoClient[Any]: - """Connect to the replica set if there is one, otherwise the standalone. - - Authenticates if necessary. - """ - return await _async_mongo_client(h, p, **kwargs) - - def ensure_all_connected(client: MongoClient) -> None: """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a @@ -1108,20 +963,6 @@ def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() -def disable_replication(client): - """Disable replication on all secondaries.""" - for host, port in client.secondaries: - secondary = single_client(host, port) - secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") - - -def enable_replication(client): - """Enable replication on all secondaries.""" - for host, port in client.secondaries: - secondary = single_client(host, port) - secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") - - class ExceptionCatchingThread(threading.Thread): """A thread that stores any exception encountered from run().""" diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 0b882a8bc3..06a40351cd 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -29,7 +29,6 @@ camel_to_snake_args, parse_spec_options, prepare_spec_arguments, - rs_client, ) from typing import List @@ -101,6 +100,8 @@ def _setup_class(cls): @classmethod def _tearDown_class(cls): cls.knobs.disable() + for client in cls.mongos_clients: + client.close() super()._tearDown_class() def setUp(self): @@ -524,7 +525,7 @@ def run_scenario(self, scenario_def, test): host = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: host = client_context.mongos_seeds() - client = rs_client( + client = self.rs_client( h=host, event_listeners=[listener, pool_listener, server_listener], **client_options ) self.scenario_client = client From 163e3d4a0db5f49548040afde6a04db502431cd1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 17 Sep 2024 12:56:03 -0400 Subject: [PATCH 1486/2111] PYTHON-4738 - Make test_encryption.TestClientSimple.test_fork sync-only (#1862) --- test/asynchronous/test_encryption.py | 1 + test/test_encryption.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 3f3714eeb4..f29b0f824d 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -380,6 +380,7 @@ async def test_use_after_close(self): is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) + @async_client_context.require_sync async def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = await self.async_rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/test_encryption.py b/test/test_encryption.py index 96d40c4a34..512c92f4d1 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -380,6 +380,7 @@ def test_use_after_close(self): is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) + @client_context.require_sync def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = self.rs_or_single_client(auto_encryption_opts=opts) From 40ebc1644c89f352c35aa100ed6548023101b72a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 17 Sep 2024 15:16:55 -0500 Subject: [PATCH 1487/2111] PYTHON-4764 Update to use current supported EVG hosts (#1858) --- .evergreen/config.yml | 50 +++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a9dc669718..14e3426b32 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2081,10 +2081,10 @@ axes: batchtime: 10080 # 7 days - id: rhel8 display_name: "RHEL 8.x" - run_on: rhel87-small + run_on: rhel8.8-small batchtime: 10080 # 7 days - - id: rhel92-fips - display_name: "RHEL 9.2 FIPS" + - id: rhel9-fips + display_name: "RHEL 9 FIPS" run_on: rhel92-fips batchtime: 10080 # 7 days - id: ubuntu-22.04 @@ -2095,24 +2095,24 @@ axes: display_name: "Ubuntu 20.04" run_on: ubuntu2004-small batchtime: 10080 # 7 days - - id: rhel83-zseries - display_name: "RHEL 8.3 (zSeries)" - run_on: rhel83-zseries-small + - id: rhel8-zseries + display_name: "RHEL 8 (zSeries)" + run_on: rhel8-zseries-small batchtime: 10080 # 7 days variables: SKIP_HATCH: true - - id: rhel81-power8 - display_name: "RHEL 8.1 (POWER8)" - run_on: rhel81-power8-small + - id: rhel8-power8 + display_name: "RHEL 8 (POWER8)" + run_on: rhel8-power-small batchtime: 10080 # 7 days variables: SKIP_HATCH: true - - id: rhel82-arm64 - display_name: "RHEL 8.2 (ARM64)" + - id: rhel8-arm64 + display_name: "RHEL 8 (ARM64)" run_on: rhel82-arm64-small batchtime: 10080 # 7 days variables: - - id: windows-64-vsMulti-small + - id: windows display_name: "Windows 64" run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days @@ -2470,7 +2470,7 @@ buildvariants: - matrix_name: "tests-fips" matrix_spec: platform: - - rhel92-fips + - rhel9-fips auth: "auth" ssl: "ssl" display_name: "${platform} ${auth} ${ssl}" @@ -2547,9 +2547,9 @@ buildvariants: - matrix_name: "test-different-cpu-architectures" matrix_spec: platform: - - rhel83-zseries # Added in 5.0.8 (SERVER-44074) - - rhel81-power8 # Added in 4.2.7 (SERVER-44072) - - rhel82-arm64 # Added in 4.4.2 (SERVER-48282) + - rhel8-zseries # Added in 5.0.8 (SERVER-44074) + - rhel8-power8 # Added in 4.2.7 (SERVER-44072) + - rhel8-arm64 # Added in 4.4.2 (SERVER-48282) auth-ssl: "*" display_name: "${platform} ${auth-ssl}" tasks: @@ -2606,7 +2606,7 @@ buildvariants: - matrix_name: "tests-pyopenssl-windows" matrix_spec: - platform: windows-64-vsMulti-small + platform: windows python-version-windows: "*" auth: "auth" ssl: "ssl" @@ -2698,7 +2698,7 @@ buildvariants: - matrix_name: "tests-windows-python-version" matrix_spec: - platform: windows-64-vsMulti-small + platform: windows python-version-windows: "*" auth-ssl: "*" display_name: "${platform} ${python-version-windows} ${auth-ssl}" @@ -2706,7 +2706,7 @@ buildvariants: - matrix_name: "tests-windows-python-version-32-bit" matrix_spec: - platform: windows-64-vsMulti-small + platform: windows python-version-windows-32: "*" auth-ssl: "*" display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" @@ -2724,7 +2724,7 @@ buildvariants: - matrix_name: "tests-windows-encryption" matrix_spec: - platform: windows-64-vsMulti-small + platform: windows python-version-windows: "*" auth-ssl: "*" encryption: "*" @@ -2733,7 +2733,7 @@ buildvariants: rules: - if: encryption: ["encryption", "encryption_crypt_shared"] - platform: windows-64-vsMulti-small + platform: windows python-version-windows: "*" auth-ssl: "*" then: @@ -2795,7 +2795,7 @@ buildvariants: - matrix_name: "tests-windows-enterprise-auth" matrix_spec: - platform: windows-64-vsMulti-small + platform: windows python-version-windows: "*" auth: "auth" display_name: "Enterprise ${auth} ${platform} ${python-version-windows}" @@ -2907,7 +2907,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: - platform: windows-64-vsMulti-small + platform: windows python-version-windows: ["3.8", "3.10"] mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] auth: "noauth" @@ -2932,7 +2932,7 @@ buildvariants: - matrix_name: "oidc-auth-test" matrix_spec: - platform: [ rhel8, macos, windows-64-vsMulti-small ] + platform: [ rhel8, macos, windows ] display_name: "OIDC Auth ${platform}" tasks: - name: testoidc_task_group @@ -2981,7 +2981,7 @@ buildvariants: - matrix_name: "aws-auth-test-windows" matrix_spec: - platform: [windows-64-vsMulti-small] + platform: [windows] python-version-windows: "*" display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" tasks: From c136684047e54f30c8949d3c9be30017e7cd0213 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:38:24 -0700 Subject: [PATCH 1488/2111] PYTHON-4585 Cursor.to_list does not apply client's timeoutMS setting (#1860) --- pymongo/asynchronous/command_cursor.py | 3 +++ pymongo/asynchronous/cursor.py | 4 ++- pymongo/synchronous/command_cursor.py | 3 +++ pymongo/synchronous/cursor.py | 4 ++- test/asynchronous/test_cursor.py | 34 +++++++++++++++++++++++++- test/test_cursor.py | 34 +++++++++++++++++++++++++- 6 files changed, 78 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py index b2cd345f63..5a4559bd77 100644 --- a/pymongo/asynchronous/command_cursor.py +++ b/pymongo/asynchronous/command_cursor.py @@ -29,6 +29,7 @@ ) from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo import _csot from pymongo.asynchronous.cursor import _ConnectionManager from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure @@ -77,6 +78,7 @@ def __init__( self._address = address self._batch_size = batch_size self._max_await_time_ms = max_await_time_ms + self._timeout = self._collection.database.client.options.timeout self._session = session self._explicit_session = explicit_session self._killed = self._id == 0 @@ -385,6 +387,7 @@ async def __aenter__(self) -> AsyncCommandCursor[_DocumentType]: async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: await self.close() + @_csot.apply async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index bae77bb304..4b4bb52a8e 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -36,7 +36,7 @@ from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON -from pymongo import helpers_shared +from pymongo import _csot, helpers_shared from pymongo.asynchronous.helpers import anext from pymongo.collation import validate_collation_or_none from pymongo.common import ( @@ -196,6 +196,7 @@ def __init__( self._explain = False self._comment = comment self._max_time_ms = max_time_ms + self._timeout = self._collection.database.client.options.timeout self._max_await_time_ms: Optional[int] = None self._max: Optional[Union[dict[Any, Any], _Sort]] = max self._min: Optional[Union[dict[Any, Any], _Sort]] = min @@ -1290,6 +1291,7 @@ async def __aenter__(self) -> AsyncCursor[_DocumentType]: async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: await self.close() + @_csot.apply async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index da05bf1a3b..3a4372856a 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -29,6 +29,7 @@ ) from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo import _csot from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure from pymongo.message import ( @@ -77,6 +78,7 @@ def __init__( self._address = address self._batch_size = batch_size self._max_await_time_ms = max_await_time_ms + self._timeout = self._collection.database.client.options.timeout self._session = session self._explicit_session = explicit_session self._killed = self._id == 0 @@ -385,6 +387,7 @@ def __enter__(self) -> CommandCursor[_DocumentType]: def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() + @_csot.apply def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index c352b64098..27a76cf91d 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -36,7 +36,7 @@ from bson import RE_TYPE, _convert_raw_document_lists_to_streams from bson.code import Code from bson.son import SON -from pymongo import helpers_shared +from pymongo import _csot, helpers_shared from pymongo.collation import validate_collation_or_none from pymongo.common import ( validate_is_document_type, @@ -196,6 +196,7 @@ def __init__( self._explain = False self._comment = comment self._max_time_ms = max_time_ms + self._timeout = self._collection.database.client.options.timeout self._max_await_time_ms: Optional[int] = None self._max: Optional[Union[dict[Any, Any], _Sort]] = max self._min: Optional[Union[dict[Any, Any], _Sort]] = min @@ -1288,6 +1289,7 @@ def __enter__(self) -> Cursor[_DocumentType]: def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self.close() + @_csot.apply def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index d6773d832e..33eaacee96 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -34,6 +34,7 @@ AllowListEventListener, EventListener, OvertCommandListener, + delay, ignore_deprecations, wait_until, ) @@ -44,7 +45,7 @@ from pymongo.asynchronous.cursor import AsyncCursor, CursorType from pymongo.asynchronous.helpers import anext from pymongo.collation import Collation -from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference @@ -1410,6 +1411,18 @@ async def test_to_list_length(self): docs = await c.to_list(3) self.assertEqual(len(docs), 2) + async def test_to_list_csot_applied(self): + client = await self.async_single_client(timeoutMS=500) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(2): + await client.admin.command("ping") + coll = client.pymongo.test + await coll.insert_many([{} for _ in range(5)]) + cursor = coll.find({"$where": delay(1)}) + with self.assertRaises(PyMongoError) as ctx: + await cursor.to_list() + self.assertTrue(ctx.exception.timeout) + @async_client_context.require_change_streams async def test_command_cursor_to_list(self): # Set maxAwaitTimeMS=1 to speed up the test. @@ -1439,6 +1452,25 @@ async def test_command_cursor_to_list_length(self): result = await db.test.aggregate([pipeline]) self.assertEqual(len(await result.to_list(1)), 1) + @async_client_context.require_failCommand_blockConnection + async def test_command_cursor_to_list_csot_applied(self): + client = await self.async_single_client(timeoutMS=500) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(2): + await client.admin.command("ping") + coll = client.pymongo.test + await coll.insert_many([{} for _ in range(5)]) + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 5}, + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 1000}, + } + cursor = await coll.aggregate([], batchSize=1) + async with self.fail_point(fail_command): + with self.assertRaises(PyMongoError) as ctx: + await cursor.to_list() + self.assertTrue(ctx.exception.timeout) + class TestRawBatchCursor(AsyncIntegrationTest): async def test_find_raw(self): diff --git a/test/test_cursor.py b/test/test_cursor.py index 9bc22aca3c..d99732aec3 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -34,6 +34,7 @@ AllowListEventListener, EventListener, OvertCommandListener, + delay, ignore_deprecations, wait_until, ) @@ -42,7 +43,7 @@ from bson.code import Code from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation -from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference @@ -1401,6 +1402,18 @@ def test_to_list_length(self): docs = c.to_list(3) self.assertEqual(len(docs), 2) + def test_to_list_csot_applied(self): + client = self.single_client(timeoutMS=500) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(2): + client.admin.command("ping") + coll = client.pymongo.test + coll.insert_many([{} for _ in range(5)]) + cursor = coll.find({"$where": delay(1)}) + with self.assertRaises(PyMongoError) as ctx: + cursor.to_list() + self.assertTrue(ctx.exception.timeout) + @client_context.require_change_streams def test_command_cursor_to_list(self): # Set maxAwaitTimeMS=1 to speed up the test. @@ -1430,6 +1443,25 @@ def test_command_cursor_to_list_length(self): result = db.test.aggregate([pipeline]) self.assertEqual(len(result.to_list(1)), 1) + @client_context.require_failCommand_blockConnection + def test_command_cursor_to_list_csot_applied(self): + client = self.single_client(timeoutMS=500) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(2): + client.admin.command("ping") + coll = client.pymongo.test + coll.insert_many([{} for _ in range(5)]) + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 5}, + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 1000}, + } + cursor = coll.aggregate([], batchSize=1) + with self.fail_point(fail_command): + with self.assertRaises(PyMongoError) as ctx: + cursor.to_list() + self.assertTrue(ctx.exception.timeout) + class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): From 9a71be1615048a6f3aa2d23fa03693923572eeee Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 17 Sep 2024 17:54:09 -0700 Subject: [PATCH 1489/2111] PYTHON-4740 Convert asyncio.TimeoutError to socket.timeout for compat (#1864) --- pymongo/asynchronous/bulk.py | 4 - pymongo/asynchronous/client_bulk.py | 6 +- pymongo/network_layer.py | 110 ++++++++++++++-------------- pymongo/synchronous/bulk.py | 4 - pymongo/synchronous/client_bulk.py | 6 +- 5 files changed, 59 insertions(+), 71 deletions(-) diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index 9fd673693f..9d33a990ed 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -313,8 +313,6 @@ async def write_command( if isinstance(exc, (NotPrimaryError, OperationFailure)): await client._process_response(exc.details, bwc.session) # type: ignore[arg-type] raise - finally: - bwc.start_time = datetime.datetime.now() return reply # type: ignore[return-value] async def unack_write( @@ -403,8 +401,6 @@ async def unack_write( assert bwc.start_time is not None bwc._fail(request_id, failure, duration) raise - finally: - bwc.start_time = datetime.datetime.now() return result # type: ignore[return-value] async def _execute_batch_unack( diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 15a0369f41..dc800c9549 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -319,8 +319,6 @@ async def write_command( await self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] else: await self.client._process_response({}, bwc.session) # type: ignore[arg-type] - finally: - bwc.start_time = datetime.datetime.now() return reply # type: ignore[return-value] async def unack_write( @@ -410,9 +408,7 @@ async def unack_write( bwc._fail(request_id, failure, duration) # Top-level error will be embedded in ClientBulkWriteException. reply = {"error": exc} - finally: - bwc.start_time = datetime.datetime.now() - return result # type: ignore[return-value] + return reply async def _execute_batch_unack( self, diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index d99b4fee41..82a6228acc 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -64,65 +64,69 @@ async def async_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> Non loop = asyncio.get_event_loop() try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): - if sys.platform == "win32": - await asyncio.wait_for(_async_sendall_ssl_windows(sock, buf), timeout=timeout) - else: - await asyncio.wait_for(_async_sendall_ssl(sock, buf, loop), timeout=timeout) + await asyncio.wait_for(_async_sendall_ssl(sock, buf, loop), timeout=timeout) else: await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc finally: sock.settimeout(timeout) -async def _async_sendall_ssl( - sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop -) -> None: - view = memoryview(buf) - fd = sock.fileno() - sent = 0 - - def _is_ready(fut: Future) -> None: - loop.remove_writer(fd) - loop.remove_reader(fd) - if fut.done(): - return - fut.set_result(None) - - while sent < len(buf): - try: - sent += sock.send(view[sent:]) - except BLOCKING_IO_ERRORS as exc: - fd = sock.fileno() - # Check for closed socket. - if fd == -1: - raise SSLError("Underlying socket has been closed") from None - if isinstance(exc, BLOCKING_IO_READ_ERROR): - fut = loop.create_future() - loop.add_reader(fd, _is_ready, fut) - await fut - if isinstance(exc, BLOCKING_IO_WRITE_ERROR): - fut = loop.create_future() - loop.add_writer(fd, _is_ready, fut) - await fut - if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): - fut = loop.create_future() - loop.add_reader(fd, _is_ready, fut) - loop.add_writer(fd, _is_ready, fut) - await fut - - -# The default Windows asyncio event loop does not support loop.add_reader/add_writer: https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support -async def _async_sendall_ssl_windows(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: - view = memoryview(buf) - total_length = len(buf) - total_sent = 0 - while total_sent < total_length: - try: - sent = sock.send(view[total_sent:]) - except BLOCKING_IO_ERRORS: - await asyncio.sleep(0.5) - sent = 0 - total_sent += sent +if sys.platform != "win32": + + async def _async_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop + ) -> None: + view = memoryview(buf) + fd = sock.fileno() + sent = 0 + + def _is_ready(fut: Future) -> None: + loop.remove_writer(fd) + loop.remove_reader(fd) + if fut.done(): + return + fut.set_result(None) + + while sent < len(buf): + try: + sent += sock.send(view[sent:]) + except BLOCKING_IO_ERRORS as exc: + fd = sock.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + await fut + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + await fut + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + loop.add_writer(fd, _is_ready, fut) + await fut +else: + # The default Windows asyncio event loop does not support loop.add_reader/add_writer: + # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support + async def _async_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop + ) -> None: + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + while total_sent < total_length: + try: + sent = sock.send(view[total_sent:]) + except BLOCKING_IO_ERRORS: + await asyncio.sleep(0.5) + sent = 0 + total_sent += sent def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index 27fcff620c..c658157ea1 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -313,8 +313,6 @@ def write_command( if isinstance(exc, (NotPrimaryError, OperationFailure)): client._process_response(exc.details, bwc.session) # type: ignore[arg-type] raise - finally: - bwc.start_time = datetime.datetime.now() return reply # type: ignore[return-value] def unack_write( @@ -403,8 +401,6 @@ def unack_write( assert bwc.start_time is not None bwc._fail(request_id, failure, duration) raise - finally: - bwc.start_time = datetime.datetime.now() return result # type: ignore[return-value] def _execute_batch_unack( diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 23af231d16..f41f0203f2 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -319,8 +319,6 @@ def write_command( self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] else: self.client._process_response({}, bwc.session) # type: ignore[arg-type] - finally: - bwc.start_time = datetime.datetime.now() return reply # type: ignore[return-value] def unack_write( @@ -410,9 +408,7 @@ def unack_write( bwc._fail(request_id, failure, duration) # Top-level error will be embedded in ClientBulkWriteException. reply = {"error": exc} - finally: - bwc.start_time = datetime.datetime.now() - return result # type: ignore[return-value] + return reply def _execute_batch_unack( self, From 6d472a10a1a0cf511ef42c57cda5545ff78d2c27 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 17 Sep 2024 20:00:06 -0500 Subject: [PATCH 1490/2111] PYTHON-4738 Skip encryption test_fork on PyPy (#1865) --- test/asynchronous/test_encryption.py | 1 + test/test_encryption.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index f29b0f824d..c3f6223384 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -380,6 +380,7 @@ async def test_use_after_close(self): is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) + @unittest.skipIf("PyPy" in sys.version, "PYTHON-4738 fails often on PyPy") @async_client_context.require_sync async def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") diff --git a/test/test_encryption.py b/test/test_encryption.py index 512c92f4d1..43c85e2c5b 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -380,6 +380,7 @@ def test_use_after_close(self): is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) + @unittest.skipIf("PyPy" in sys.version, "PYTHON-4738 fails often on PyPy") @client_context.require_sync def test_fork(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") From 2c432b580baf842318d9c0628fcda001da248494 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 18 Sep 2024 09:23:07 -0400 Subject: [PATCH 1491/2111] PYTHON-4768 - Fix atlas connection tests and cleanup uses of raw MongoClients in tests (#1867) --- test/atlas/test_connection.py | 34 +++++++++---------- test/mockupdb/test_auth_recovering_member.py | 7 ++-- test/mockupdb/test_cluster_time.py | 9 +++-- test/mockupdb/test_cursor_namespace.py | 9 ++--- test/mockupdb/test_getmore_sharded.py | 6 ++-- test/mockupdb/test_initial_ismaster.py | 6 ++-- test/mockupdb/test_list_indexes.py | 7 ++-- test/mockupdb/test_max_staleness.py | 9 +++-- test/mockupdb/test_mixed_version_sharded.py | 5 +-- test/mockupdb/test_op_msg.py | 5 +-- test/mockupdb/test_op_msg_read_preference.py | 11 +++--- test/mockupdb/test_query_read_pref_sharded.py | 6 ++-- test/mockupdb/test_reset_and_request_check.py | 5 +-- test/test_index_management.py | 10 +++--- test/test_retryable_reads.py | 11 +++--- 15 files changed, 69 insertions(+), 71 deletions(-) diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 762ac30115..4dcbba6d11 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -19,6 +19,7 @@ import sys import unittest from collections import defaultdict +from test import PyMongoTestCase import pytest @@ -46,38 +47,37 @@ } -def connect(uri): - if not uri: - raise Exception("Must set env variable to test.") - client = pymongo.MongoClient(uri) - # No TLS error - client.admin.command("ping") - # No auth error - client.test.test.count_documents({}) +class TestAtlasConnect(PyMongoTestCase): + def connect(self, uri): + if not uri: + raise Exception("Must set env variable to test.") + client = self.simple_client(uri) + # No TLS error + client.admin.command("ping") + # No auth error + client.test.test.count_documents({}) - -class TestAtlasConnect(unittest.TestCase): @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") def test_free_tier(self): - connect(URIS["ATLAS_FREE"]) + self.connect(URIS["ATLAS_FREE"]) def test_replica_set(self): - connect(URIS["ATLAS_REPL"]) + self.connect(URIS["ATLAS_REPL"]) def test_sharded_cluster(self): - connect(URIS["ATLAS_SHRD"]) + self.connect(URIS["ATLAS_SHRD"]) def test_tls_11(self): - connect(URIS["ATLAS_TLS11"]) + self.connect(URIS["ATLAS_TLS11"]) def test_tls_12(self): - connect(URIS["ATLAS_TLS12"]) + self.connect(URIS["ATLAS_TLS12"]) def test_serverless(self): - connect(URIS["ATLAS_SERVERLESS"]) + self.connect(URIS["ATLAS_SERVERLESS"]) def connect_srv(self, uri): - connect(uri) + self.connect(uri) self.assertIn("mongodb+srv://", uri) @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py index 95a83a11a7..6eadafaf38 100644 --- a/test/mockupdb/test_auth_recovering_member.py +++ b/test/mockupdb/test_auth_recovering_member.py @@ -14,6 +14,7 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase import pytest @@ -30,7 +31,7 @@ pytestmark = pytest.mark.mockupdb -class TestAuthRecoveringMember(unittest.TestCase): +class TestAuthRecoveringMember(PyMongoTestCase): def test_auth_recovering_member(self): # Test that we don't attempt auth against a recovering RS member. server = MockupDB() @@ -48,12 +49,10 @@ def test_auth_recovering_member(self): server.run() self.addCleanup(server.stop) - client = MongoClient( + client = self.simple_client( server.uri, replicaSet="rs", serverSelectionTimeoutMS=100, socketTimeoutMS=100 ) - self.addCleanup(client.close) - # Should see there's no primary or secondary and raise selection timeout # error. If it raises AutoReconnect we know it actually tried the # server, and that's wrong. diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index 9794843175..761415951b 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -16,6 +16,7 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase import pytest @@ -34,7 +35,7 @@ pytestmark = pytest.mark.mockupdb -class TestClusterTime(unittest.TestCase): +class TestClusterTime(PyMongoTestCase): def cluster_time_conversation(self, callback, replies, max_wire_version=6): cluster_time = Timestamp(0, 0) server = MockupDB() @@ -52,8 +53,7 @@ def cluster_time_conversation(self, callback, replies, max_wire_version=6): server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) with going(callback, client): for reply in replies: @@ -118,8 +118,7 @@ def test_monitor(self): server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri, heartbeatFrequencyMS=500) - self.addCleanup(client.close) + client = self.simple_client(server.uri, heartbeatFrequencyMS=500) request = server.receives("ismaster") # No $clusterTime in first ismaster, only in subsequent ones diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 57a98373fc..455a3a923b 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -16,6 +16,7 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase import pytest @@ -32,7 +33,7 @@ pytestmark = pytest.mark.mockupdb -class TestCursorNamespace(unittest.TestCase): +class TestCursorNamespace(PyMongoTestCase): server: MockupDB client: MongoClient @@ -40,7 +41,7 @@ class TestCursorNamespace(unittest.TestCase): def setUpClass(cls): cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) cls.server.run() - cls.client = MongoClient(cls.server.uri) + cls.client = cls.unmanaged_simple_client(cls.server.uri) @classmethod def tearDownClass(cls): @@ -88,7 +89,7 @@ def op(): self._test_cursor_namespace(op, "listIndexes") -class TestKillCursorsNamespace(unittest.TestCase): +class TestKillCursorsNamespace(PyMongoTestCase): server: MockupDB client: MongoClient @@ -96,7 +97,7 @@ class TestKillCursorsNamespace(unittest.TestCase): def setUpClass(cls): cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) cls.server.run() - cls.client = MongoClient(cls.server.uri) + cls.client = cls.unmanaged_simple_client(cls.server.uri) @classmethod def tearDownClass(cls): diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index cf26b10a15..8ba291e4a7 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -17,6 +17,7 @@ import unittest from queue import Queue +from test import PyMongoTestCase import pytest @@ -33,7 +34,7 @@ pytestmark = pytest.mark.mockupdb -class TestGetmoreSharded(unittest.TestCase): +class TestGetmoreSharded(PyMongoTestCase): def test_getmore_sharded(self): servers = [MockupDB(), MockupDB()] @@ -47,11 +48,10 @@ def test_getmore_sharded(self): server.run() self.addCleanup(server.stop) - client = MongoClient( + client = self.simple_client( "mongodb://%s:%d,%s:%d" % (servers[0].host, servers[0].port, servers[1].host, servers[1].port) ) - self.addCleanup(client.close) collection = client.db.collection cursor = collection.find() with going(next, cursor): diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py index 8046f08db5..3eae98716b 100644 --- a/test/mockupdb/test_initial_ismaster.py +++ b/test/mockupdb/test_initial_ismaster.py @@ -15,6 +15,7 @@ import time import unittest +from test import PyMongoTestCase import pytest @@ -31,15 +32,14 @@ pytestmark = pytest.mark.mockupdb -class TestInitialIsMaster(unittest.TestCase): +class TestInitialIsMaster(PyMongoTestCase): def test_initial_ismaster(self): server = MockupDB() server.run() self.addCleanup(server.stop) start = time.time() - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) # A single ismaster is enough for the client to be connected. self.assertFalse(client.nodes) diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index ce6db9e104..ff3363664b 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -16,6 +16,7 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase import pytest @@ -28,18 +29,16 @@ from bson import SON -from pymongo import MongoClient pytestmark = pytest.mark.mockupdb -class TestListIndexes(unittest.TestCase): +class TestListIndexes(PyMongoTestCase): def test_list_indexes_command(self): server = MockupDB(auto_ismaster={"maxWireVersion": 6}) server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) with going(client.test.collection.list_indexes) as cursor: request = server.receives(listIndexes="collection", namespace="test") request.reply({"cursor": {"firstBatch": [{"name": "index_0"}], "id": 123}}) diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py index 40cf7ef00d..7275aaf44b 100644 --- a/test/mockupdb/test_max_staleness.py +++ b/test/mockupdb/test_max_staleness.py @@ -14,6 +14,7 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase import pytest @@ -30,7 +31,7 @@ pytestmark = pytest.mark.mockupdb -class TestMaxStalenessMongos(unittest.TestCase): +class TestMaxStalenessMongos(PyMongoTestCase): def test_mongos(self): mongos = MockupDB() mongos.autoresponds("ismaster", maxWireVersion=6, ismaster=True, msg="isdbgrid") @@ -40,8 +41,7 @@ def test_mongos(self): # No maxStalenessSeconds. uri = "mongodb://localhost:%d/?readPreference=secondary" % mongos.port - client = MongoClient(uri) - self.addCleanup(client.close) + client = self.simple_client(uri) with going(client.db.coll.find_one) as future: request = mongos.receives() self.assertNotIn("maxStalenessSeconds", request.doc["$readPreference"]) @@ -60,8 +60,7 @@ def test_mongos(self): "&maxStalenessSeconds=1" % mongos.port ) - client = MongoClient(uri) - self.addCleanup(client.close) + client = self.simple_client(uri) with going(client.db.coll.find_one) as future: request = mongos.receives() self.assertEqual(1, request.doc["$readPreference"]["maxStalenessSeconds"]) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index 72b42deacc..99d428b5d9 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -18,6 +18,7 @@ import time import unittest from queue import Queue +from test import PyMongoTestCase import pytest @@ -35,7 +36,7 @@ pytestmark = pytest.mark.mockupdb -class TestMixedVersionSharded(unittest.TestCase): +class TestMixedVersionSharded(PyMongoTestCase): def setup_server(self, upgrade): self.mongos_old, self.mongos_new = MockupDB(), MockupDB() @@ -62,7 +63,7 @@ def setup_server(self, upgrade): self.mongos_new.address_string, ) - self.client = MongoClient(self.mongoses_uri) + self.client = self.simple_client(self.mongoses_uri) def tearDown(self): if hasattr(self, "client") and self.client: diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py index 776d1644dd..4b85c5a48a 100644 --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -15,6 +15,7 @@ import unittest from collections import namedtuple +from test import PyMongoTestCase import pytest @@ -273,7 +274,7 @@ operations_312 = [] -class TestOpMsg(unittest.TestCase): +class TestOpMsg(PyMongoTestCase): server: MockupDB client: MongoClient @@ -281,7 +282,7 @@ class TestOpMsg(unittest.TestCase): def setUpClass(cls): cls.server = MockupDB(auto_ismaster=True, max_wire_version=8) cls.server.run() - cls.client = MongoClient(cls.server.uri) + cls.client = cls.unmanaged_simple_client(cls.server.uri) @classmethod def tearDownClass(cls): diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index c7c7037f23..86293d0c09 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -16,6 +16,7 @@ import copy import itertools import unittest +from test import PyMongoTestCase from typing import Any import pytest @@ -39,7 +40,7 @@ pytestmark = pytest.mark.mockupdb -class OpMsgReadPrefBase(unittest.TestCase): +class OpMsgReadPrefBase(PyMongoTestCase): single_mongod = False primary: MockupDB secondary: MockupDB @@ -53,8 +54,7 @@ def add_test(cls, mode, test_name, test): setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, read_preference=read_preference) - self.addCleanup(client.close) + client = self.simple_client(self.primary.uri, read_preference=read_preference) return client @@ -115,12 +115,13 @@ def add_test(cls, mode, test_name, test): setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, replicaSet="rs", read_preference=read_preference) + client = self.simple_client( + self.primary.uri, replicaSet="rs", read_preference=read_preference + ) # Run a command on a secondary to discover the topology. This ensures # that secondaryPreferred commands will select the secondary. client.admin.command("ismaster", read_preference=ReadPreference.SECONDARY) - self.addCleanup(client.close) return client diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 6276ee7789..676e71b711 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -16,6 +16,7 @@ from __future__ import annotations import unittest +from test import PyMongoTestCase import pytest @@ -40,7 +41,7 @@ pytestmark = pytest.mark.mockupdb -class TestQueryAndReadModeSharded(unittest.TestCase): +class TestQueryAndReadModeSharded(PyMongoTestCase): def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() @@ -50,8 +51,7 @@ def test_query_and_read_mode_sharded_op_msg(self): server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) read_prefs = ( Primary(), diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py index 2919025f09..dd6ad46b1e 100644 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -16,6 +16,7 @@ import itertools import time import unittest +from test import PyMongoTestCase import pytest @@ -37,7 +38,7 @@ pytestmark = pytest.mark.mockupdb -class TestResetAndRequestCheck(unittest.TestCase): +class TestResetAndRequestCheck(PyMongoTestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.ismaster_time = 0.0 @@ -58,7 +59,7 @@ def responder(request): kwargs = {"socketTimeoutMS": 100} # Disable retryable reads when pymongo supports it. kwargs["retryReads"] = False - self.client = MongoClient(self.server.uri, **kwargs) # type: ignore + self.client = self.simple_client(self.server.uri, **kwargs) # type: ignore wait_until(lambda: self.client.nodes, "connect to standalone") def tearDown(self): diff --git a/test/test_index_management.py b/test/test_index_management.py index 426d0c7349..ec1e363737 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -25,11 +25,10 @@ sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import IntegrationTest, PyMongoTestCase, unittest from test.unified_format import generate_test_classes from test.utils import AllowListEventListener, EventListener -from pymongo import MongoClient from pymongo.errors import OperationFailure from pymongo.operations import SearchIndexModel from pymongo.read_concern import ReadConcern @@ -47,8 +46,7 @@ def test_inputs(self): if not os.environ.get("TEST_INDEX_MANAGEMENT"): raise unittest.SkipTest("Skipping index management tests") listener = AllowListEventListener("createSearchIndexes") - client = MongoClient(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.simple_client(event_listeners=[listener]) coll = client.test.test coll.drop() definition = dict(mappings=dict(dynamic=True)) @@ -79,7 +77,7 @@ def test_inputs(self): ) -class SearchIndexIntegrationBase(unittest.TestCase): +class SearchIndexIntegrationBase(PyMongoTestCase): db_name = "test_search_index_base" @classmethod @@ -91,7 +89,7 @@ def setUpClass(cls) -> None: username = os.environ["DB_USER"] password = os.environ["DB_PASSWORD"] cls.listener = listener = EventListener() - cls.client = MongoClient( + cls.client = cls.unmanaged_simple_client( url, username=username, password=password, event_listeners=[listener] ) cls.client.drop_database(_NAME) diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index a1c72bb7b6..b4fafe4652 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -43,7 +43,6 @@ ConnectionCheckOutFailedReason, PoolClearedEvent, ) -from pymongo.synchronous.mongo_client import MongoClient # Location of JSON test specifications. _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") @@ -51,19 +50,19 @@ class TestClientOptions(PyMongoTestCase): def test_default(self): - client = MongoClient(connect=False) + client = self.simple_client(connect=False) self.assertEqual(client.options.retry_reads, True) def test_kwargs(self): - client = MongoClient(retryReads=True, connect=False) + client = self.simple_client(retryReads=True, connect=False) self.assertEqual(client.options.retry_reads, True) - client = MongoClient(retryReads=False, connect=False) + client = self.simple_client(retryReads=False, connect=False) self.assertEqual(client.options.retry_reads, False) def test_uri(self): - client = MongoClient("mongodb://h/?retryReads=true", connect=False) + client = self.simple_client("mongodb://h/?retryReads=true", connect=False) self.assertEqual(client.options.retry_reads, True) - client = MongoClient("mongodb://h/?retryReads=false", connect=False) + client = self.simple_client("mongodb://h/?retryReads=false", connect=False) self.assertEqual(client.options.retry_reads, False) From 699d9627583081ff1b09b8932acbc95e09e7beb4 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 14:15:44 +0000 Subject: [PATCH 1492/2111] BUMP 4.9 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 537a340cce..75ec22cc5a 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.9.0.dev0" +__version__ = "4.9" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 2ddd16de5a35cb457c7600d9585bb88351fc67d6 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 14:33:00 +0000 Subject: [PATCH 1493/2111] BUMP 4.10.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 75ec22cc5a..5ff72d6cc8 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.9" +__version__ = "4.10.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From d0772f21619a44f7dc511e067c86c48e7f1f8c6f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 18 Sep 2024 18:09:20 -0400 Subject: [PATCH 1494/2111] PYTHON-4773 - Async PyMongo Beta docs update (#1868) --- doc/api/pymongo/asynchronous/change_stream.rst | 5 +++++ doc/api/pymongo/asynchronous/client_session.rst | 5 +++++ doc/api/pymongo/asynchronous/collection.rst | 5 +++++ doc/api/pymongo/asynchronous/command_cursor.rst | 5 +++++ doc/api/pymongo/asynchronous/cursor.rst | 5 +++++ doc/api/pymongo/asynchronous/database.rst | 5 +++++ doc/api/pymongo/asynchronous/index.rst | 5 +++++ doc/api/pymongo/asynchronous/mongo_client.rst | 5 +++++ doc/changelog.rst | 9 ++++++++- pymongo/asynchronous/mongo_client.py | 2 ++ tools/synchro.py | 11 ++++++++++- 11 files changed, 60 insertions(+), 2 deletions(-) diff --git a/doc/api/pymongo/asynchronous/change_stream.rst b/doc/api/pymongo/asynchronous/change_stream.rst index 2ba0feb5c7..df4f5dee41 100644 --- a/doc/api/pymongo/asynchronous/change_stream.rst +++ b/doc/api/pymongo/asynchronous/change_stream.rst @@ -1,5 +1,10 @@ :mod:`change_stream` -- Watch changes on a collection, database, or cluster =========================================================================== +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.change_stream :members: diff --git a/doc/api/pymongo/asynchronous/client_session.rst b/doc/api/pymongo/asynchronous/client_session.rst index 1e74e1be70..c4bbd8edd2 100644 --- a/doc/api/pymongo/asynchronous/client_session.rst +++ b/doc/api/pymongo/asynchronous/client_session.rst @@ -1,5 +1,10 @@ :mod:`client_session` -- Logical sessions for sequential operations =================================================================== +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.client_session :members: diff --git a/doc/api/pymongo/asynchronous/collection.rst b/doc/api/pymongo/asynchronous/collection.rst index 779557ff65..ce1fe3ca04 100644 --- a/doc/api/pymongo/asynchronous/collection.rst +++ b/doc/api/pymongo/asynchronous/collection.rst @@ -1,6 +1,11 @@ :mod:`collection` -- Collection level operations ================================================ +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.collection :synopsis: Collection level operations diff --git a/doc/api/pymongo/asynchronous/command_cursor.rst b/doc/api/pymongo/asynchronous/command_cursor.rst index 41a8f617e1..7058563eee 100644 --- a/doc/api/pymongo/asynchronous/command_cursor.rst +++ b/doc/api/pymongo/asynchronous/command_cursor.rst @@ -1,6 +1,11 @@ :mod:`command_cursor` -- Tools for iterating over MongoDB command results ========================================================================= +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.command_cursor :synopsis: Tools for iterating over MongoDB command results :members: diff --git a/doc/api/pymongo/asynchronous/cursor.rst b/doc/api/pymongo/asynchronous/cursor.rst index ff7103c013..d357b84514 100644 --- a/doc/api/pymongo/asynchronous/cursor.rst +++ b/doc/api/pymongo/asynchronous/cursor.rst @@ -1,6 +1,11 @@ :mod:`cursor` -- Tools for iterating over MongoDB query results =============================================================== +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.cursor :synopsis: Tools for iterating over MongoDB query results diff --git a/doc/api/pymongo/asynchronous/database.rst b/doc/api/pymongo/asynchronous/database.rst index afd6959c9a..b45fe457e7 100644 --- a/doc/api/pymongo/asynchronous/database.rst +++ b/doc/api/pymongo/asynchronous/database.rst @@ -1,6 +1,11 @@ :mod:`database` -- Database level operations ============================================ +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.database :synopsis: Database level operations diff --git a/doc/api/pymongo/asynchronous/index.rst b/doc/api/pymongo/asynchronous/index.rst index 25dfac6323..1b41fb8222 100644 --- a/doc/api/pymongo/asynchronous/index.rst +++ b/doc/api/pymongo/asynchronous/index.rst @@ -1,6 +1,11 @@ :mod:`pymongo async` -- Async Python driver for MongoDB ======================================================= +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous :synopsis: Asynchronous Python driver for MongoDB diff --git a/doc/api/pymongo/asynchronous/mongo_client.rst b/doc/api/pymongo/asynchronous/mongo_client.rst index 75952f1b6d..d0729da78b 100644 --- a/doc/api/pymongo/asynchronous/mongo_client.rst +++ b/doc/api/pymongo/asynchronous/mongo_client.rst @@ -1,6 +1,11 @@ :mod:`mongo_client` -- Tools for connecting to MongoDB ====================================================== +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. automodule:: pymongo.asynchronous.mongo_client :synopsis: Tools for connecting to MongoDB diff --git a/doc/changelog.rst b/doc/changelog.rst index 69fbb6f8fd..dfb3c79827 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -7,11 +7,18 @@ Changes in Version 4.9.0 .. warning:: Driver support for MongoDB 3.6 reached end of life in April 2024. PyMongo 4.9 will be the last release to support MongoDB 3.6. +.. warning:: PyMongo 4.9 refactors a large portion of internal APIs to support the new asynchronous API beta. + As a result, versions of Motor older than 3.6 are not compatible with PyMongo 4.9. + Existing users of these versions must either upgrade to Motor 3.6 and PyMongo 4.9, + or cap their PyMongo version to ``< 4.9``. + Any applications that use private APIs may also break as a result of these internal changes. + PyMongo 4.9 brings a number of improvements including: - Added support for MongoDB 8.0. - Added support for Python 3.13. -- A new asynchronous API with full asyncio support. +- A new beta asynchronous API with full asyncio support. + This new asynchronous API is a work-in-progress that may change during the beta period before the full release. - Added support for In-Use Encryption range queries with MongoDB 8.0. Added :attr:`~pymongo.encryption.Algorithm.RANGE`. ``sparsity`` and ``trim_factor`` are now optional in :class:`~pymongo.encryption_options.RangeOpts`. diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 6d0e5d5280..814c604562 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -177,6 +177,8 @@ def __init__( For more details, see the relevant section of the PyMongo 4.x migration guide: :ref:`pymongo4-migration-direct-connection`. + .. warning:: This API is currently in beta, meaning the classes, methods, and behaviors described within may change before the full release. + The client object is thread-safe and has connection-pooling built in. If an operation fails because of a network error, :class:`~pymongo.errors.ConnectionFailure` is raised and the client diff --git a/tools/synchro.py b/tools/synchro.py index fdf3a05c95..59d6e653e5 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -119,6 +119,10 @@ be passed as options for the create collection command.""", } +docstring_removals: set[str] = { + ".. warning:: This API is currently in beta, meaning the classes, methods, and behaviors described within may change before the full release." +} + type_replacements = {"_Condition": "threading.Condition"} import_replacements = {"test.synchronous": "test"} @@ -322,7 +326,12 @@ def translate_docstrings(lines: list[str]) -> list[str]: docstring_replacements[k], # type: ignore[index] ) - return lines + for line in docstring_removals: + if line in lines[i]: + lines[i] = "DOCSTRING_REMOVED" + lines[i + 1] = "DOCSTRING_REMOVED" + + return [line for line in lines if line != "DOCSTRING_REMOVED"] def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[str, str]) -> None: From 8b26d4bc0952c4fe4b19eaf593b9040a1d62a9e0 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 22:10:50 +0000 Subject: [PATCH 1495/2111] BUMP 4.9.1 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 5ff72d6cc8..adcb933c3a 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.10.0.dev0" +__version__ = "4.9.1" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 9df635f10276d92e26b39c8ba35243c1064a29dd Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 22:27:23 +0000 Subject: [PATCH 1496/2111] BUMP 4.10.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index adcb933c3a..5ff72d6cc8 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.9.1" +__version__ = "4.10.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 0f84ad6ed98900ba43f0942c9a06ce0f0b073559 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 20 Sep 2024 10:06:03 -0700 Subject: [PATCH 1497/2111] PYTHON-4769 Avoid pytest collection overhead when running perf benchmarks (#1869) --- .evergreen/run-tests.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 66df6b26ca..8d7a9f082a 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -224,6 +224,9 @@ if [ -n "$PERF_TEST" ]; then python -m pip install simplejson start_time=$(date +%s) TEST_SUITES="perf" + # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively + # affects the benchmark results. + TEST_ARGS="test/performance/perf_test.py $TEST_ARGS" fi echo "Running $AUTH tests over $SSL with python $(which python)" From e03f8f24f2387882fcaa5d3099d2cef7ae100816 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 20 Sep 2024 16:50:59 -0500 Subject: [PATCH 1498/2111] PYTHON-4781 Handle errors on Async PyMongo import (#1873) --- pymongo/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 7ee177bdae..8116788bc3 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -88,7 +88,6 @@ from pymongo import _csot from pymongo._version import __version__, get_version_string, version_tuple -from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION from pymongo.cursor import CursorType from pymongo.operations import ( @@ -105,6 +104,14 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +try: + from pymongo.asynchronous.mongo_client import AsyncMongoClient +except Exception as e: + # PYTHON-4781: Importing asyncio can fail on Windows. + import warnings as _warnings + + _warnings.warn(f"Failed to import Async PyMongo: {e!r}", ImportWarning, stacklevel=2) + version = __version__ """Current version of PyMongo.""" From 7742b7f24fd4a16f22d620471cfca5f88cf0b628 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 30 Sep 2024 14:14:12 -0400 Subject: [PATCH 1499/2111] PYTHON-4797 - Convert test.test_raw_bson to async (#1882) --- test/asynchronous/test_raw_bson.py | 219 +++++++++++++++++++++++++++++ test/test_raw_bson.py | 5 +- tools/synchro.py | 5 +- 3 files changed, 225 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_raw_bson.py diff --git a/test/asynchronous/test_raw_bson.py b/test/asynchronous/test_raw_bson.py new file mode 100644 index 0000000000..70832ea668 --- /dev/null +++ b/test/asynchronous/test_raw_bson.py @@ -0,0 +1,219 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import sys +import uuid + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest + +from bson import Code, DBRef, decode, encode +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import InvalidBSON +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from bson.son import SON + +_IS_SYNC = False + + +class TestRawBSONDocument(AsyncIntegrationTest): + # {'_id': ObjectId('556df68b6e32ab21a95e0785'), + # 'name': 'Sherlock', + # 'addresses': [{'street': 'Baker Street'}]} + bson_string = ( + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" + ) + document = RawBSONDocument(bson_string) + + async def asyncTearDown(self): + if async_client_context.connected: + await self.client.pymongo_test.test_raw.drop() + + def test_decode(self): + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] + self.assertIsInstance(first_address, RawBSONDocument) + self.assertEqual("Baker Street", first_address["street"]) + + def test_raw(self): + self.assertEqual(self.bson_string, self.document.raw) + + def test_empty_doc(self): + doc = RawBSONDocument(encode({})) + with self.assertRaises(KeyError): + doc["does-not-exist"] + + def test_invalid_bson_sequence(self): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): + RawBSONDocument(bson_byte_sequence) + + def test_invalid_bson_eoo(self): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): + RawBSONDocument(invalid_bson_eoo) + + @async_client_context.require_connection + async def test_round_trip(self): + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) + await db.test_raw.insert_one(self.document) + result = await db.test_raw.find_one(self.document["_id"]) + assert result is not None + self.assertIsInstance(result, RawBSONDocument) + self.assertEqual(dict(self.document.items()), dict(result.items())) + + @async_client_context.require_connection + async def test_round_trip_raw_uuid(self): + coll = self.client.get_database("pymongo_test").test_raw + uid = uuid.uuid4() + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} + raw = RawBSONDocument(encode(doc)) + await coll.insert_one(raw) + self.assertEqual(await coll.find_one(), doc) + uuid_coll = coll.with_options( + codec_options=coll.codec_options.with_options( + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + await uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) + + # Test that the raw bytes haven't changed. + raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) + self.assertEqual(await raw_coll.find_one(), raw) + + def test_with_codec_options(self): + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" + ) + document = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) + + @async_client_context.require_connection + async def test_round_trip_codec_options(self): + doc = { + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), + } + db = self.client.pymongo_test + coll = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) + await coll.insert_one(doc) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) + self.assertEqual( + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), await coll.find_one() + ) + + @async_client_context.require_connection + async def test_raw_bson_document_embedded(self): + doc = {"embedded": self.document} + db = self.client.pymongo_test + await db.test_raw.insert_one(doc) + result = await db.test_raw.find_one() + assert result is not None + self.assertEqual(decode(self.document.raw), result["embedded"]) + + # Make sure that CodecOptions are preserved. + # {'embedded': [ + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # ]} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" + ) + rbd = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + await db.test_raw.drop() + await db.test_raw.insert_one(rbd) + result = await db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() + assert result is not None + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) + + @async_client_context.require_connection + async def test_write_response_raw_bson(self): + coll = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw + + # No Exceptions raised while handling write response. + await coll.insert_one(self.document) + await coll.delete_one(self.document) + await coll.insert_many([self.document]) + await coll.delete_many(self.document) + await coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + await coll.update_many(self.document, {"$set": {"b": "c"}}) + + def test_preserve_key_ordering(self): + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] + rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) + + for rkey, elt in zip(rawdoc, keyvaluepairs): + self.assertEqual(rkey, elt[0]) + + def test_contains_code_with_scope(self): + doc = RawBSONDocument(encode({"value": Code("x=1", scope={})})) + + self.assertEqual(decode(encode(doc)), {"value": Code("x=1", {})}) + self.assertEqual(doc["value"].scope, RawBSONDocument(encode({}))) + + def test_contains_dbref(self): + doc = RawBSONDocument(encode({"value": DBRef("test", "id")})) + raw = {"$ref": "test", "$id": "id"} + raw_encoded = encode(decode(encode(raw))) + + self.assertEqual(decode(encode(doc)), {"value": DBRef("test", "id")}) + self.assertEqual(doc["value"].raw, raw_encoded) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index 11bc80dd9f..4d9a3ceb05 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -19,8 +19,7 @@ sys.path[0:0] = [""] -from test import client_context, unittest -from test.test_client import IntegrationTest +from test import IntegrationTest, client_context, unittest from bson import Code, DBRef, decode, encode from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation @@ -29,6 +28,8 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from bson.son import SON +_IS_SYNC = True + class TestRawBSONDocument(IntegrationTest): # {'_id': ObjectId('556df68b6e32ab21a95e0785'), diff --git a/tools/synchro.py b/tools/synchro.py index 59d6e653e5..0eca24b2cf 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -171,16 +171,17 @@ "test_change_stream.py", "test_client.py", "test_client_bulk_write.py", + "test_client_context.py", "test_collection.py", "test_cursor.py", "test_database.py", "test_encryption.py", "test_grid_file.py", "test_logger.py", + "test_monitoring.py", + "test_raw_bson.py", "test_session.py", "test_transactions.py", - "test_client_context.py", - "test_monitoring.py", ] sync_test_files = [ From 1e395de9c51aab501ed14bc994e88d96b773961a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:17:57 -0700 Subject: [PATCH 1500/2111] PYTHON-4737 Migrate test_binary.py to async (#1863) --- test/asynchronous/test_client.py | 75 ++++++++++++++- test/test_binary.py | 156 ++++++++----------------------- test/test_client.py | 75 ++++++++++++++- 3 files changed, 188 insertions(+), 118 deletions(-) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index f610f32779..1926ad74d2 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -17,6 +17,7 @@ import _thread as thread import asyncio +import base64 import contextlib import copy import datetime @@ -31,13 +32,15 @@ import sys import threading import time -from typing import Iterable, Type, no_type_check +import uuid +from typing import Any, Iterable, Type, no_type_check from unittest import mock from unittest.mock import patch import pytest import pytest_asyncio +from bson.binary import CSHARP_LEGACY, JAVA_LEGACY, PYTHON_LEGACY, Binary, UuidRepresentation from pymongo.operations import _Op sys.path[0:0] = [""] @@ -57,6 +60,7 @@ unittest, ) from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.test_binary import BinaryData from test.utils import ( NTHREADS, CMAPListener, @@ -2020,6 +2024,75 @@ def test_dict_hints_sort(self): async def test_dict_hints_create_index(self): await self.db.t.create_index({"x": pymongo.ASCENDING}) + async def test_legacy_java_uuid_roundtrip(self): + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) + + await async_client_context.client.pymongo_test.drop_collection("java_uuid") + db = async_client_context.client.pymongo_test + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) + + await coll.insert_many(docs) + self.assertEqual(5, await coll.count_documents({})) + async for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + async for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + await async_client_context.client.pymongo_test.drop_collection("java_uuid") + + async def test_legacy_csharp_uuid_roundtrip(self): + data = BinaryData.csharp_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) + + await async_client_context.client.pymongo_test.drop_collection("csharp_uuid") + db = async_client_context.client.pymongo_test + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) + + await coll.insert_many(docs) + self.assertEqual(5, await coll.count_documents({})) + async for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + async for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + await async_client_context.client.pymongo_test.drop_collection("csharp_uuid") + + async def test_uri_to_uuid(self): + uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" + client = await self.async_single_client(uri, connect=False) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) + + async def test_uuid_queries(self): + db = async_client_context.client.pymongo_test + coll = db.test + await coll.drop() + + uu = uuid.uuid4() + await coll.insert_one({"uuid": Binary(uu.bytes, 3)}) + self.assertEqual(1, await coll.count_documents({})) + + # Test regular UUID queries (using subtype 4). + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, await coll.count_documents({"uuid": uu})) + await coll.insert_one({"uuid": uu}) + self.assertEqual(2, await coll.count_documents({})) + docs = await coll.find({"uuid": uu}).to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) + + # Test both. + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} + self.assertEqual(2, await coll.count_documents(predicate)) + docs = await coll.find(predicate).to_list() + self.assertEqual(2, len(docs)) + await coll.drop() + class TestExhaustCursor(AsyncIntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" diff --git a/test/test_binary.py b/test/test_binary.py index 93f6d08315..567c5ae92f 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -34,53 +34,49 @@ from bson.codec_options import CodecOptions from bson.son import SON from pymongo.common import validate_uuid_representation -from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -class TestBinary(unittest.TestCase): - csharp_data: bytes - java_data: bytes +class BinaryData: + # Generated by the Java driver + from_java = ( + b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" + b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" + b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" + b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" + b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" + b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" + b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" + b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" + b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" + b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" + b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" + b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" + b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" + b"0MQAA" + ) + java_data = base64.b64decode(from_java) + + # Generated by the .net driver + from_csharp = ( + b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" + b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" + b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" + b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" + b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" + b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" + b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" + b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" + b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" + b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" + b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" + b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" + b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" + ) + csharp_data = base64.b64decode(from_csharp) - @classmethod - def setUpClass(cls): - # Generated by the Java driver - from_java = ( - b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" - b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" - b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" - b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" - b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" - b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" - b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" - b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" - b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" - b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" - b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" - b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" - b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" - b"0MQAA" - ) - cls.java_data = base64.b64decode(from_java) - - # Generated by the .net driver - from_csharp = ( - b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" - b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" - b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" - b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" - b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" - b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" - b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" - b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" - b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" - b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" - b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" - b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" - b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" - ) - cls.csharp_data = base64.b64decode(from_csharp) +class TestBinary(unittest.TestCase): def test_binary(self): a_string = "hello world" a_binary = Binary(b"hello world") @@ -159,7 +155,7 @@ def test_uuid_subtype_4(self): def test_legacy_java_uuid(self): # Test decoding - data = self.java_data + data = BinaryData.java_data docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) @@ -197,27 +193,8 @@ def test_legacy_java_uuid(self): ) self.assertEqual(data, encoded) - @client_context.require_connection - def test_legacy_java_uuid_roundtrip(self): - data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) - - client_context.client.pymongo_test.drop_collection("java_uuid") - db = client_context.client.pymongo_test - coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) - - coll.insert_many(docs) - self.assertEqual(5, coll.count_documents({})) - for d in coll.find(): - self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - - coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - for d in coll.find(): - self.assertNotEqual(d["newguid"], d["newguidstring"]) - client_context.client.pymongo_test.drop_collection("java_uuid") - def test_legacy_csharp_uuid(self): - data = self.csharp_data + data = BinaryData.csharp_data # Test decoding docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) @@ -257,59 +234,6 @@ def test_legacy_csharp_uuid(self): ) self.assertEqual(data, encoded) - @client_context.require_connection - def test_legacy_csharp_uuid_roundtrip(self): - data = self.csharp_data - docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) - - client_context.client.pymongo_test.drop_collection("csharp_uuid") - db = client_context.client.pymongo_test - coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) - - coll.insert_many(docs) - self.assertEqual(5, coll.count_documents({})) - for d in coll.find(): - self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - - coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - for d in coll.find(): - self.assertNotEqual(d["newguid"], d["newguidstring"]) - client_context.client.pymongo_test.drop_collection("csharp_uuid") - - def test_uri_to_uuid(self): - uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" - client = MongoClient(uri, connect=False) - self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) - - @client_context.require_connection - def test_uuid_queries(self): - db = client_context.client.pymongo_test - coll = db.test - coll.drop() - - uu = uuid.uuid4() - coll.insert_one({"uuid": Binary(uu.bytes, 3)}) - self.assertEqual(1, coll.count_documents({})) - - # Test regular UUID queries (using subtype 4). - coll = db.get_collection( - "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - ) - self.assertEqual(0, coll.count_documents({"uuid": uu})) - coll.insert_one({"uuid": uu}) - self.assertEqual(2, coll.count_documents({})) - docs = list(coll.find({"uuid": uu})) - self.assertEqual(1, len(docs)) - self.assertEqual(uu, docs[0]["uuid"]) - - # Test both. - uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) - predicate = {"uuid": {"$in": [uu, uu_legacy]}} - self.assertEqual(2, coll.count_documents(predicate)) - docs = list(coll.find(predicate)) - self.assertEqual(2, len(docs)) - coll.drop() - def test_pickle(self): b1 = Binary(b"123", 2) diff --git a/test/test_client.py b/test/test_client.py index bc45325f0b..2642a87fdf 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -17,6 +17,7 @@ import _thread as thread import asyncio +import base64 import contextlib import copy import datetime @@ -31,12 +32,14 @@ import sys import threading import time -from typing import Iterable, Type, no_type_check +import uuid +from typing import Any, Iterable, Type, no_type_check from unittest import mock from unittest.mock import patch import pytest +from bson.binary import CSHARP_LEGACY, JAVA_LEGACY, PYTHON_LEGACY, Binary, UuidRepresentation from pymongo.operations import _Op sys.path[0:0] = [""] @@ -56,6 +59,7 @@ unittest, ) from test.pymongo_mocks import MockClient +from test.test_binary import BinaryData from test.utils import ( NTHREADS, CMAPListener, @@ -1978,6 +1982,75 @@ def test_dict_hints_sort(self): def test_dict_hints_create_index(self): self.db.t.create_index({"x": pymongo.ASCENDING}) + def test_legacy_java_uuid_roundtrip(self): + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) + + client_context.client.pymongo_test.drop_collection("java_uuid") + db = client_context.client.pymongo_test + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) + + coll.insert_many(docs) + self.assertEqual(5, coll.count_documents({})) + for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("java_uuid") + + def test_legacy_csharp_uuid_roundtrip(self): + data = BinaryData.csharp_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) + + client_context.client.pymongo_test.drop_collection("csharp_uuid") + db = client_context.client.pymongo_test + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) + + coll.insert_many(docs) + self.assertEqual(5, coll.count_documents({})) + for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("csharp_uuid") + + def test_uri_to_uuid(self): + uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" + client = self.single_client(uri, connect=False) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) + + def test_uuid_queries(self): + db = client_context.client.pymongo_test + coll = db.test + coll.drop() + + uu = uuid.uuid4() + coll.insert_one({"uuid": Binary(uu.bytes, 3)}) + self.assertEqual(1, coll.count_documents({})) + + # Test regular UUID queries (using subtype 4). + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, coll.count_documents({"uuid": uu})) + coll.insert_one({"uuid": uu}) + self.assertEqual(2, coll.count_documents({})) + docs = coll.find({"uuid": uu}).to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) + + # Test both. + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} + self.assertEqual(2, coll.count_documents(predicate)) + docs = coll.find(predicate).to_list() + self.assertEqual(2, len(docs)) + coll.drop() + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" From 3ef565fa43734dfef6bdbb7458b41a4d71451cb1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 30 Sep 2024 18:01:53 -0500 Subject: [PATCH 1501/2111] PYTHON-4796 Update type checkers and handle with_options typing (#1880) --- bson/__init__.py | 4 ++-- bson/decimal128.py | 2 +- bson/json_util.py | 2 +- bson/son.py | 2 +- hatch.toml | 5 +++-- pymongo/_csot.py | 3 +-- pymongo/asynchronous/collection.py | 23 +++++++++++++++++++- pymongo/asynchronous/database.py | 22 ++++++++++++++++++- pymongo/asynchronous/pool.py | 2 +- pymongo/common.py | 2 +- pymongo/compression_support.py | 2 +- pymongo/encryption_options.py | 2 +- pymongo/synchronous/collection.py | 23 +++++++++++++++++++- pymongo/synchronous/database.py | 22 ++++++++++++++++++- pymongo/synchronous/pool.py | 2 +- requirements/typing.txt | 7 ++++++ test/asynchronous/test_database.py | 2 +- test/test_database.py | 2 +- test/test_typing.py | 34 +++++++++++++++++++----------- tools/synchro.py | 2 +- 20 files changed, 132 insertions(+), 33 deletions(-) create mode 100644 requirements/typing.txt diff --git a/bson/__init__.py b/bson/__init__.py index e8ac7c4441..e866a99c8d 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1324,7 +1324,7 @@ def decode_iter( elements = data[position : position + obj_size] position += obj_size - yield _bson_to_dict(elements, opts) # type:ignore[misc, type-var] + yield _bson_to_dict(elements, opts) # type:ignore[misc] @overload @@ -1370,7 +1370,7 @@ def decode_file_iter( raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) - yield _bson_to_dict(elements, opts) # type:ignore[type-var, arg-type, misc] + yield _bson_to_dict(elements, opts) # type:ignore[arg-type, misc] def is_valid(bson: bytes) -> bool: diff --git a/bson/decimal128.py b/bson/decimal128.py index 8581d5a3c8..016afb5eb8 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -223,7 +223,7 @@ def __init__(self, value: _VALUE_OPTIONS) -> None: "from list or tuple. Must have exactly 2 " "elements." ) - self.__high, self.__low = value # type: ignore + self.__high, self.__low = value else: raise TypeError(f"Cannot convert {value!r} to Decimal128") diff --git a/bson/json_util.py b/bson/json_util.py index 4269ba9858..6f34e4103d 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -324,7 +324,7 @@ def __new__( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation." ) - self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) # type:ignore[arg-type] + self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " diff --git a/bson/son.py b/bson/son.py index cf62717238..24275fce16 100644 --- a/bson/son.py +++ b/bson/son.py @@ -68,7 +68,7 @@ def __init__( self.update(kwargs) def __new__(cls: Type[SON[_Key, _Value]], *args: Any, **kwargs: Any) -> SON[_Key, _Value]: - instance = super().__new__(cls, *args, **kwargs) # type: ignore[type-var] + instance = super().__new__(cls, *args, **kwargs) instance.__keys = [] return instance diff --git a/hatch.toml b/hatch.toml index 8b1cf93e32..d5293a1d7f 100644 --- a/hatch.toml +++ b/hatch.toml @@ -13,8 +13,9 @@ features = ["docs","test"] test = "sphinx-build -E -b doctest doc ./doc/_build/doctest" [envs.typing] -features = ["encryption", "ocsp", "zstd", "aws"] -dependencies = ["mypy==1.2.0","pyright==1.1.290", "certifi", "typing_extensions"] +pre-install-commands = [ + "pip install -q -r requirements/typing.txt", +] [envs.typing.scripts] check-mypy = [ "mypy --install-types --non-interactive bson gridfs tools pymongo", diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 94328f9819..06c6b68ac9 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -75,14 +75,13 @@ def __init__(self, timeout: Optional[float]): self._timeout = timeout self._tokens: Optional[tuple[Token[Optional[float]], Token[float], Token[float]]] = None - def __enter__(self) -> _TimeoutContext: + def __enter__(self) -> None: timeout_token = TIMEOUT.set(self._timeout) prev_deadline = DEADLINE.get() next_deadline = time.monotonic() + self._timeout if self._timeout else float("inf") deadline_token = DEADLINE.set(min(prev_deadline, next_deadline)) rtt_token = RTT.set(0.0) self._tokens = (timeout_token, deadline_token, rtt_token) - return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: if self._tokens: diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 1ec74aad02..5abc41a7e0 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -35,6 +35,7 @@ TypeVar, Union, cast, + overload, ) from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions @@ -332,13 +333,33 @@ def database(self) -> AsyncDatabase[_DocumentType]: """ return self._database + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncCollection[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncCollection[_DocumentTypeArg]: + ... + def with_options( self, codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, - ) -> AsyncCollection[_DocumentType]: + ) -> AsyncCollection[_DocumentType] | AsyncCollection[_DocumentTypeArg]: """Get a clone of this collection changing the specified settings. >>> coll1.read_preference diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index 06c0eca2c1..98a0a6ff3b 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -146,13 +146,33 @@ def name(self) -> str: """The name of this :class:`AsyncDatabase`.""" return self._name + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncDatabase[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncDatabase[_DocumentTypeArg]: + ... + def with_options( self, codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, - ) -> AsyncDatabase[_DocumentType]: + ) -> AsyncDatabase[_DocumentType] | AsyncDatabase[_DocumentTypeArg]: """Get a clone of this database changing the specified settings. >>> db1.read_preference diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index a657042423..442d6c7ed6 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -913,7 +913,7 @@ async def _configured_socket( and not options.tls_allow_invalid_hostnames ): try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined] except _CertificateError: ssl_sock.close() raise diff --git a/pymongo/common.py b/pymongo/common.py index a073eba577..fe8fdd8949 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -850,7 +850,7 @@ def get_normed_key(x: str) -> str: return x def get_setter_key(x: str) -> str: - return options.cased_key(x) # type: ignore[attr-defined] + return options.cased_key(x) else: validated_options = {} diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 7123b90dfe..c71e4bddcf 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -26,7 +26,7 @@ def _have_snappy() -> bool: try: - import snappy # type:ignore[import] # noqa: F401 + import snappy # type:ignore[import-not-found] # noqa: F401 return True except ImportError: diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index df13026500..ee749e7ac1 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Mapping, Optional try: - import pymongocrypt # type:ignore[import] # noqa: F401 + import pymongocrypt # type:ignore[import-untyped] # noqa: F401 # Check for pymongocrypt>=1.10. from pymongocrypt import synchronous as _ # noqa: F401 diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 7a41aef31f..15a1913eaa 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -34,6 +34,7 @@ TypeVar, Union, cast, + overload, ) from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions @@ -333,13 +334,33 @@ def database(self) -> Database[_DocumentType]: """ return self._database + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Collection[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Collection[_DocumentTypeArg]: + ... + def with_options( self, codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, - ) -> Collection[_DocumentType]: + ) -> Collection[_DocumentType] | Collection[_DocumentTypeArg]: """Get a clone of this collection changing the specified settings. >>> coll1.read_preference diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index c57a59e09a..a0bef55343 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -146,13 +146,33 @@ def name(self) -> str: """The name of this :class:`Database`.""" return self._name + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Database[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Database[_DocumentTypeArg]: + ... + def with_options( self, codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None, - ) -> Database[_DocumentType]: + ) -> Database[_DocumentType] | Database[_DocumentTypeArg]: """Get a clone of this database changing the specified settings. >>> db1.read_preference diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 94a1d10436..1b8b1f1ec9 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -909,7 +909,7 @@ def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket. and not options.tls_allow_invalid_hostnames ): try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined] except _CertificateError: ssl_sock.close() raise diff --git a/requirements/typing.txt b/requirements/typing.txt new file mode 100644 index 0000000000..1669e6bbc2 --- /dev/null +++ b/requirements/typing.txt @@ -0,0 +1,7 @@ +mypy==1.11.2 +pyright==1.1.382.post1 +typing_extensions +-r ./encryption.txt +-r ./ocsp.txt +-r ./zstd.txt +-r ./aws.txt diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index c5d62323df..61369c8542 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -711,7 +711,7 @@ def test_with_options(self): "write_concern": WriteConcern(w=1), "read_concern": ReadConcern(level="local"), } - db2 = db1.with_options(**newopts) # type: ignore[arg-type] + db2 = db1.with_options(**newopts) # type: ignore[arg-type, call-overload] for opt in newopts: self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) diff --git a/test/test_database.py b/test/test_database.py index fe07f343c5..4973ed0134 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -702,7 +702,7 @@ def test_with_options(self): "write_concern": WriteConcern(w=1), "read_concern": ReadConcern(level="local"), } - db2 = db1.with_options(**newopts) # type: ignore[arg-type] + db2 = db1.with_options(**newopts) # type: ignore[arg-type, call-overload] for opt in newopts: self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) diff --git a/test/test_typing.py b/test/test_typing.py index 6cfe40537b..441707616e 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -34,7 +34,7 @@ cast, ) -try: +if TYPE_CHECKING: from typing_extensions import NotRequired, TypedDict from bson import ObjectId @@ -49,16 +49,13 @@ class MovieWithId(TypedDict): year: int class ImplicitMovie(TypedDict): - _id: NotRequired[ObjectId] # pyright: ignore[reportGeneralTypeIssues] + _id: NotRequired[ObjectId] name: str year: int - -except ImportError: - Movie = dict # type:ignore[misc,assignment] - ImplicitMovie = dict # type: ignore[assignment,misc] - MovieWithId = dict # type: ignore[assignment,misc] - TypedDict = None - NotRequired = None # type: ignore[assignment] +else: + Movie = dict + ImplicitMovie = dict + NotRequired = None try: @@ -234,6 +231,19 @@ def execute_transaction(session): execute_transaction, read_preference=ReadPreference.PRIMARY ) + def test_with_options(self) -> None: + coll: Collection[Dict[str, Any]] = self.coll + coll.drop() + doc = {"name": "foo", "year": 1982, "other": 1} + coll.insert_one(doc) + + coll2 = coll.with_options(codec_options=CodecOptions(document_class=Movie)) + retrieved = coll2.find_one() + assert retrieved is not None + assert retrieved["name"] == "foo" + # We expect a type error here. + assert retrieved["other"] == 1 # type:ignore[typeddict-item] + class TestDecode(unittest.TestCase): def test_bson_decode(self) -> None: @@ -426,7 +436,7 @@ def test_bulk_write_document_type_insertion(self): ) coll.bulk_write( [ - InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) # pyright: ignore ] # No error because it is in-line. ) @@ -443,7 +453,7 @@ def test_bulk_write_document_type_replacement(self): ) coll.bulk_write( [ - ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) + ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) # pyright: ignore ] # No error because it is in-line. ) @@ -566,7 +576,7 @@ def test_explicit_document_type(self) -> None: def test_typeddict_document_type(self) -> None: options: CodecOptions[Movie] = CodecOptions() # Suppress: Cannot instantiate type "Type[Movie]". - obj = options.document_class(name="a", year=1) # type: ignore[misc] + obj = options.document_class(name="a", year=1) assert obj["year"] == 1 assert obj["name"] == "a" diff --git a/tools/synchro.py b/tools/synchro.py index 0eca24b2cf..86506b7798 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -23,7 +23,7 @@ from os import listdir from pathlib import Path -from unasync import Rule, unasync_files # type: ignore[import] +from unasync import Rule, unasync_files # type: ignore[import-not-found] replacements = { "AsyncCollection": "Collection", From 083359f95f7ce1c202d54a0a16506ae4c5162b23 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Mon, 30 Sep 2024 19:09:57 -0400 Subject: [PATCH 1502/2111] PYTHON-1714 Add c extension use to client metadata (#1874) --- pymongo/__init__.py | 12 +----------- pymongo/common.py | 10 ++++++++++ pymongo/pool_options.py | 6 ++++++ test/asynchronous/test_client.py | 17 +++++++++++++---- test/test_client.py | 17 +++++++++++++---- tools/synchro.py | 1 + 6 files changed, 44 insertions(+), 19 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 8116788bc3..6416f939e8 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -88,7 +88,7 @@ from pymongo import _csot from pymongo._version import __version__, get_version_string, version_tuple -from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION, has_c from pymongo.cursor import CursorType from pymongo.operations import ( DeleteMany, @@ -116,16 +116,6 @@ """Current version of PyMongo.""" -def has_c() -> bool: - """Is the C extension installed?""" - try: - from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 - - return True - except ImportError: - return False - - def timeout(seconds: Optional[float]) -> ContextManager[None]: """**(Provisional)** Apply the given timeout for a block of operations. diff --git a/pymongo/common.py b/pymongo/common.py index fe8fdd8949..126d0ee46e 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -1060,3 +1060,13 @@ def update(self, other: Mapping[str, Any]) -> None: # type: ignore[override] def cased_key(self, key: str) -> Any: return self.__casedkeys[key.lower()] + + +def has_c() -> bool: + """Is the C extension installed?""" + try: + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 + + return True + except ImportError: + return False diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index 6ec97d7d1b..61486c91c6 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -33,6 +33,7 @@ MAX_POOL_SIZE, MIN_POOL_SIZE, WAIT_QUEUE_TIMEOUT, + has_c, ) if TYPE_CHECKING: @@ -363,6 +364,11 @@ def __init__( # }, # 'platform': 'CPython 3.8.0|MyPlatform' # } + if has_c(): + self.__metadata["driver"]["name"] = "{}|{}".format( + self.__metadata["driver"]["name"], + "c", + ) if not is_sync: self.__metadata["driver"]["name"] = "{}|{}".format( self.__metadata["driver"]["name"], diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 1926ad74d2..b6324d3bac 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -99,7 +99,7 @@ from pymongo.asynchronous.settings import TOPOLOGY_TYPE from pymongo.asynchronous.topology import _ErrorContext from pymongo.client_options import ClientOptions -from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT, has_c from pymongo.compression_support import _have_snappy, _have_zstd from pymongo.driver_info import DriverInfo from pymongo.errors import ( @@ -347,7 +347,10 @@ async def test_read_preference(self): async def test_metadata(self): metadata = copy.deepcopy(_METADATA) - metadata["driver"]["name"] = "PyMongo|async" + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async" + else: + metadata["driver"]["name"] = "PyMongo|async" metadata["application"] = {"name": "foobar"} client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options @@ -370,7 +373,10 @@ async def test_metadata(self): with self.assertRaises(TypeError): self.simple_client(driver=("Foo", "1", "a")) # Test appending to driver info. - metadata["driver"]["name"] = "PyMongo|async|FooDriver" + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async|FooDriver" + else: + metadata["driver"]["name"] = "PyMongo|async|FooDriver" metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) client = self.simple_client( "foo", @@ -1931,7 +1937,10 @@ def test_sigstop_sigcont(self): async def _test_handshake(self, env_vars, expected_env): with patch.dict("os.environ", env_vars): metadata = copy.deepcopy(_METADATA) - metadata["driver"]["name"] = "PyMongo|async" + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async" + else: + metadata["driver"]["name"] = "PyMongo|async" if expected_env is not None: metadata["env"] = expected_env diff --git a/test/test_client.py b/test/test_client.py index 2642a87fdf..86b9f41ec9 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -87,7 +87,7 @@ from bson.tz_util import utc from pymongo import event_loggers, message, monitoring from pymongo.client_options import ClientOptions -from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT, has_c from pymongo.compression_support import _have_snappy, _have_zstd from pymongo.driver_info import DriverInfo from pymongo.errors import ( @@ -339,7 +339,10 @@ def test_read_preference(self): def test_metadata(self): metadata = copy.deepcopy(_METADATA) - metadata["driver"]["name"] = "PyMongo" + if has_c(): + metadata["driver"]["name"] = "PyMongo|c" + else: + metadata["driver"]["name"] = "PyMongo" metadata["application"] = {"name": "foobar"} client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") options = client.options @@ -362,7 +365,10 @@ def test_metadata(self): with self.assertRaises(TypeError): self.simple_client(driver=("Foo", "1", "a")) # Test appending to driver info. - metadata["driver"]["name"] = "PyMongo|FooDriver" + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|FooDriver" + else: + metadata["driver"]["name"] = "PyMongo|FooDriver" metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) client = self.simple_client( "foo", @@ -1889,7 +1895,10 @@ def test_sigstop_sigcont(self): def _test_handshake(self, env_vars, expected_env): with patch.dict("os.environ", env_vars): metadata = copy.deepcopy(_METADATA) - metadata["driver"]["name"] = "PyMongo" + if has_c(): + metadata["driver"]["name"] = "PyMongo|c" + else: + metadata["driver"]["name"] = "PyMongo" if expected_env is not None: metadata["env"] = expected_env diff --git a/tools/synchro.py b/tools/synchro.py index 86506b7798..6ce897a0b8 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -101,6 +101,7 @@ "default_async": "default", "aclose": "close", "PyMongo|async": "PyMongo", + "PyMongo|c|async": "PyMongo|c", "AsyncTestGridFile": "TestGridFile", "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", } From 821811e80d72d2ae822d11bac30e1c7a935208c2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 30 Sep 2024 16:24:07 -0700 Subject: [PATCH 1503/2111] PYTHON-4782 Fix deadlock and blocking behavior in _ACondition.wait (#1875) --- pymongo/asynchronous/pool.py | 7 +- pymongo/asynchronous/topology.py | 5 +- pymongo/lock.py | 147 +++++- pymongo/synchronous/pool.py | 9 +- pymongo/synchronous/topology.py | 7 +- test/asynchronous/test_client.py | 4 +- test/asynchronous/test_client_bulk_write.py | 5 +- test/asynchronous/test_cursor.py | 4 +- test/asynchronous/test_locks.py | 513 ++++++++++++++++++++ test/test_client.py | 4 +- test/test_client_bulk_write.py | 5 +- test/test_cursor.py | 4 +- test/test_server_selection_in_window.py | 13 +- tools/synchro.py | 14 +- 14 files changed, 693 insertions(+), 48 deletions(-) create mode 100644 test/asynchronous/test_locks.py diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 442d6c7ed6..a9f02d650a 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -992,7 +992,8 @@ def __init__( # from the right side. self.conns: collections.deque = collections.deque() self.active_contexts: set[_CancellationContext] = set() - self.lock = _ALock(_create_lock()) + _lock = _create_lock() + self.lock = _ALock(_lock) self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 @@ -1018,7 +1019,7 @@ def __init__( # The first portion of the wait queue. # Enforces: maxPoolSize # Also used for: clearing the wait queue - self.size_cond = _ACondition(threading.Condition(self.lock)) # type: ignore[arg-type] + self.size_cond = _ACondition(threading.Condition(_lock)) self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: @@ -1026,7 +1027,7 @@ def __init__( # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue - self._max_connecting_cond = _ACondition(threading.Condition(self.lock)) # type: ignore[arg-type] + self._max_connecting_cond = _ACondition(threading.Condition(_lock)) self._max_connecting = self.opts.max_connecting self._pending = 0 self._client_id = client_id diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 4e778cbc17..82af4257ba 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -170,8 +170,9 @@ def __init__(self, topology_settings: TopologySettings): self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._closed = False - self._lock = _ALock(_create_lock()) - self._condition = _ACondition(self._settings.condition_class(self._lock)) # type: ignore[arg-type] + _lock = _create_lock() + self._lock = _ALock(_lock) + self._condition = _ACondition(self._settings.condition_class(_lock)) self._servers: dict[_Address, Server] = {} self._pid: Optional[int] = None self._max_cluster_time: Optional[ClusterTime] = None diff --git a/pymongo/lock.py b/pymongo/lock.py index b05f6acffb..0cbfb4a57e 100644 --- a/pymongo/lock.py +++ b/pymongo/lock.py @@ -14,17 +14,20 @@ from __future__ import annotations import asyncio +import collections import os import threading import time import weakref -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, TypeVar _HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") # References to instances of _create_lock _forkable_locks: weakref.WeakSet[threading.Lock] = weakref.WeakSet() +_T = TypeVar("_T") + def _create_lock() -> threading.Lock: """Represents a lock that is tracked upon instantiation using a WeakSet and @@ -43,7 +46,14 @@ def _release_locks() -> None: lock.release() +# Needed only for synchro.py compat. +def _Lock(lock: threading.Lock) -> threading.Lock: + return lock + + class _ALock: + __slots__ = ("_lock",) + def __init__(self, lock: threading.Lock) -> None: self._lock = lock @@ -81,9 +91,18 @@ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: self.release() +def _safe_set_result(fut: asyncio.Future) -> None: + # Ensure the future hasn't been cancelled before calling set_result. + if not fut.done(): + fut.set_result(False) + + class _ACondition: + __slots__ = ("_condition", "_waiters") + def __init__(self, condition: threading.Condition) -> None: self._condition = condition + self._waiters: collections.deque = collections.deque() async def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: if timeout > 0: @@ -99,30 +118,116 @@ async def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: await asyncio.sleep(0) async def wait(self, timeout: Optional[float] = None) -> bool: - if timeout is not None: - tstart = time.monotonic() - while True: - notified = self._condition.wait(0.001) - if notified: - return True - if timeout is not None and (time.monotonic() - tstart) > timeout: - return False - - async def wait_for(self, predicate: Callable, timeout: Optional[float] = None) -> bool: - if timeout is not None: - tstart = time.monotonic() - while True: - notified = self._condition.wait_for(predicate, 0.001) - if notified: - return True - if timeout is not None and (time.monotonic() - tstart) > timeout: - return False + """Wait until notified. + + If the calling task has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another task. Once + awakened, it re-acquires the lock and returns True. + + This method may return spuriously, + which is why the caller should always + re-check the state and be prepared to wait() again. + """ + loop = asyncio.get_running_loop() + fut = loop.create_future() + self._waiters.append((loop, fut)) + self.release() + try: + try: + try: + await asyncio.wait_for(fut, timeout) + return True + except asyncio.TimeoutError: + return False # Return false on timeout for sync pool compat. + finally: + # Must re-acquire lock even if wait is cancelled. + # We only catch CancelledError here, since we don't want any + # other (fatal) errors with the future to cause us to spin. + err = None + while True: + try: + await self.acquire() + break + except asyncio.exceptions.CancelledError as e: + err = e + + self._waiters.remove((loop, fut)) + if err is not None: + try: + raise err # Re-raise most recent exception instance. + finally: + err = None # Break reference cycles. + except BaseException: + # Any error raised out of here _may_ have occurred after this Task + # believed to have been successfully notified. + # Make sure to notify another Task instead. This may result + # in a "spurious wakeup", which is allowed as part of the + # Condition Variable protocol. + self.notify(1) + raise + + async def wait_for(self, predicate: Callable[[], _T]) -> _T: + """Wait until a predicate becomes true. + + The predicate should be a callable whose result will be + interpreted as a boolean value. The method will repeatedly + wait() until it evaluates to true. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result def notify(self, n: int = 1) -> None: - self._condition.notify(n) + """By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + idx = 0 + to_remove = [] + for loop, fut in self._waiters: + if idx >= n: + break + + if fut.done(): + continue + + try: + loop.call_soon_threadsafe(_safe_set_result, fut) + except RuntimeError: + # Loop was closed, ignore. + to_remove.append((loop, fut)) + continue + + idx += 1 + + for waiter in to_remove: + self._waiters.remove(waiter) def notify_all(self) -> None: - self._condition.notify_all() + """Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) + + def locked(self) -> bool: + """Only needed for tests in test_locks.""" + return self._condition._lock.locked() # type: ignore[attr-defined] def release(self) -> None: self._condition.release() diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 1b8b1f1ec9..eb007a3471 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -62,7 +62,7 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat -from pymongo.lock import _create_lock +from pymongo.lock import _create_lock, _Lock from pymongo.logger import ( _CONNECTION_LOGGER, _ConnectionStatusMessage, @@ -988,7 +988,8 @@ def __init__( # from the right side. self.conns: collections.deque = collections.deque() self.active_contexts: set[_CancellationContext] = set() - self.lock = _create_lock() + _lock = _create_lock() + self.lock = _Lock(_lock) self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 @@ -1014,7 +1015,7 @@ def __init__( # The first portion of the wait queue. # Enforces: maxPoolSize # Also used for: clearing the wait queue - self.size_cond = threading.Condition(self.lock) # type: ignore[arg-type] + self.size_cond = threading.Condition(_lock) self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: @@ -1022,7 +1023,7 @@ def __init__( # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue - self._max_connecting_cond = threading.Condition(self.lock) # type: ignore[arg-type] + self._max_connecting_cond = threading.Condition(_lock) self._max_connecting = self.opts.max_connecting self._pending = 0 self._client_id = client_id diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index e8070e30ab..a350c1702e 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -39,7 +39,7 @@ WriteError, ) from pymongo.hello import Hello -from pymongo.lock import _create_lock +from pymongo.lock import _create_lock, _Lock from pymongo.logger import ( _SDAM_LOGGER, _SERVER_SELECTION_LOGGER, @@ -170,8 +170,9 @@ def __init__(self, topology_settings: TopologySettings): self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._closed = False - self._lock = _create_lock() - self._condition = self._settings.condition_class(self._lock) # type: ignore[arg-type] + _lock = _create_lock() + self._lock = _Lock(_lock) + self._condition = self._settings.condition_class(_lock) self._servers: dict[_Address, Server] = {} self._pid: Optional[int] = None self._max_cluster_time: Optional[ClusterTime] = None diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index b6324d3bac..5c06331790 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -2433,7 +2433,9 @@ async def test_reconnect(self): # But it can reconnect. c.revive_host("a:1") - await (await c._get_topology()).select_servers(writable_server_selector, _Op.TEST) + await (await c._get_topology()).select_servers( + writable_server_selector, _Op.TEST, server_selection_timeout=10 + ) self.assertEqual(await c.address, ("a", 1)) async def _test_network_error(self, operation_callback): diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 3a17299453..80cfd30bde 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -30,6 +30,7 @@ ) from unittest.mock import patch +import pymongo from pymongo.asynchronous.client_bulk import _AsyncClientBulk from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( @@ -597,7 +598,9 @@ async def test_timeout_in_multi_batch_bulk_write(self): timeoutMS=2000, w="majority", ) - await client.admin.command("ping") # Init the client first. + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(10): + await client.admin.command("ping") with self.assertRaises(ClientBulkWriteException) as context: await client.bulk_write(models=models) self.assertIsInstance(context.exception.error, NetworkTimeout) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 33eaacee96..e79ad00641 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1414,7 +1414,7 @@ async def test_to_list_length(self): async def test_to_list_csot_applied(self): client = await self.async_single_client(timeoutMS=500) # Initialize the client with a larger timeout to help make test less flakey - with pymongo.timeout(2): + with pymongo.timeout(10): await client.admin.command("ping") coll = client.pymongo.test await coll.insert_many([{} for _ in range(5)]) @@ -1456,7 +1456,7 @@ async def test_command_cursor_to_list_length(self): async def test_command_cursor_to_list_csot_applied(self): client = await self.async_single_client(timeoutMS=500) # Initialize the client with a larger timeout to help make test less flakey - with pymongo.timeout(2): + with pymongo.timeout(10): await client.admin.command("ping") coll = client.pymongo.test await coll.insert_many([{} for _ in range(5)]) diff --git a/test/asynchronous/test_locks.py b/test/asynchronous/test_locks.py new file mode 100644 index 0000000000..e0e7f2fc8d --- /dev/null +++ b/test/asynchronous/test_locks.py @@ -0,0 +1,513 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for lock.py""" +from __future__ import annotations + +import asyncio +import sys +import threading +import unittest + +sys.path[0:0] = [""] + +from pymongo.lock import _ACondition + + +# Tests adapted from: https://github.com/python/cpython/blob/v3.13.0rc2/Lib/test/test_asyncio/test_locks.py +# Includes tests for: +# - https://github.com/python/cpython/issues/111693 +# - https://github.com/python/cpython/issues/112202 +class TestConditionStdlib(unittest.IsolatedAsyncioTestCase): + async def test_wait(self): + cond = _ACondition(threading.Condition(threading.Lock())) + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + await cond.acquire() + if await cond.wait(): + result.append(2) + return True + + async def c3(result): + await cond.acquire() + if await cond.wait(): + result.append(3) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertFalse(cond.locked()) + + self.assertTrue(await cond.acquire()) + cond.notify() + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.notify(2) + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2, 3], result) + self.assertTrue(cond.locked()) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + async def test_wait_cancel(self): + cond = _ACondition(threading.Condition(threading.Lock())) + await cond.acquire() + + wait = asyncio.create_task(cond.wait()) + asyncio.get_running_loop().call_soon(wait.cancel) + with self.assertRaises(asyncio.CancelledError): + await wait + self.assertFalse(cond._waiters) + self.assertTrue(cond.locked()) + + async def test_wait_cancel_contested(self): + cond = _ACondition(threading.Condition(threading.Lock())) + + await cond.acquire() + self.assertTrue(cond.locked()) + + wait_task = asyncio.create_task(cond.wait()) + await asyncio.sleep(0) + self.assertFalse(cond.locked()) + + # Notify, but contest the lock before cancelling + await cond.acquire() + self.assertTrue(cond.locked()) + cond.notify() + asyncio.get_running_loop().call_soon(wait_task.cancel) + asyncio.get_running_loop().call_soon(cond.release) + + try: + await wait_task + except asyncio.CancelledError: + # Should not happen, since no cancellation points + pass + + self.assertTrue(cond.locked()) + + async def test_wait_cancel_after_notify(self): + # See bpo-32841 + waited = False + + cond = _ACondition(threading.Condition(threading.Lock())) + + async def wait_on_cond(): + nonlocal waited + async with cond: + waited = True # Make sure this area was reached + await cond.wait() + + waiter = asyncio.create_task(wait_on_cond()) + await asyncio.sleep(0) # Start waiting + + await cond.acquire() + cond.notify() + await asyncio.sleep(0) # Get to acquire() + waiter.cancel() + await asyncio.sleep(0) # Activate cancellation + cond.release() + await asyncio.sleep(0) # Cancellation should occur + + self.assertTrue(waiter.cancelled()) + self.assertTrue(waited) + + async def test_wait_unacquired(self): + cond = _ACondition(threading.Condition(threading.Lock())) + with self.assertRaises(RuntimeError): + await cond.wait() + + async def test_wait_for(self): + cond = _ACondition(threading.Condition(threading.Lock())) + presult = False + + def predicate(): + return presult + + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait_for(predicate): + result.append(1) + cond.release() + return True + + t = asyncio.create_task(c1(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([], result) + + presult = True + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + + self.assertTrue(t.done()) + self.assertTrue(t.result()) + + async def test_wait_for_unacquired(self): + cond = _ACondition(threading.Condition(threading.Lock())) + + # predicate can return true immediately + res = await cond.wait_for(lambda: [1, 2, 3]) + self.assertEqual([1, 2, 3], res) + + with self.assertRaises(RuntimeError): + await cond.wait_for(lambda: False) + + async def test_notify(self): + cond = _ACondition(threading.Condition(threading.Lock())) + result = [] + + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True + + async def c3(result): + async with cond: + if await cond.wait(): + result.append(3) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + async with cond: + cond.notify(1) + await asyncio.sleep(1) + self.assertEqual([1], result) + + async with cond: + cond.notify(1) + cond.notify(2048) + await asyncio.sleep(1) + self.assertEqual([1, 2, 3], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + async def test_notify_all(self): + cond = _ACondition(threading.Condition(threading.Lock())) + + result = [] + + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + async with cond: + cond.notify_all() + await asyncio.sleep(1) + self.assertEqual([1, 2], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + + async def test_context_manager(self): + cond = _ACondition(threading.Condition(threading.Lock())) + self.assertFalse(cond.locked()) + async with cond: + self.assertTrue(cond.locked()) + self.assertFalse(cond.locked()) + + async def test_timeout_in_block(self): + condition = _ACondition(threading.Condition(threading.Lock())) + async with condition: + with self.assertRaises(asyncio.TimeoutError): + await asyncio.wait_for(condition.wait(), timeout=0.5) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_wakeup(self): + # Test that a cancelled error, received when awaiting wakeup, + # will be re-raised un-modified. + wake = False + raised = None + cond = _ACondition(threading.Condition(threading.Lock())) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_re_aquire(self): + # Test that a cancelled error, received when re-aquiring lock, + # will be re-raised un-modified. + wake = False + raised = None + cond = _ACondition(threading.Condition(threading.Lock())) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition + await cond.acquire() + wake = True + cond.notify() + await asyncio.sleep(0) + # Task is now trying to re-acquire the lock, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + cond.release() + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is awaiting initial + # wakeup on the wakeup queue. + condition = _ACondition(threading.Condition(threading.Lock())) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # Cancel it while it is awaiting to be run. + # This cancellation could come from the outside + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup_relock(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is acquiring the lock + # again. + condition = _ACondition(threading.Condition(threading.Lock())) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # now we sleep for a bit. This allows the target task to wake up and + # settle on re-aquiring the lock + await asyncio.sleep(0) + + # Cancel it while awaiting the lock + # This cancel could come the outside. + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + +class TestCondition(unittest.IsolatedAsyncioTestCase): + async def test_multiple_loops_notify(self): + cond = _ACondition(threading.Condition(threading.Lock())) + + def tmain(cond): + async def atmain(cond): + await asyncio.sleep(1) + async with cond: + cond.notify(1) + + asyncio.run(atmain(cond)) + + t = threading.Thread(target=tmain, args=(cond,)) + t.start() + + async with cond: + self.assertTrue(await cond.wait(30)) + t.join() + + async def test_multiple_loops_notify_all(self): + cond = _ACondition(threading.Condition(threading.Lock())) + results = [] + + def tmain(cond, results): + async def atmain(cond, results): + await asyncio.sleep(1) + async with cond: + res = await cond.wait(30) + results.append(res) + + asyncio.run(atmain(cond, results)) + + nthreads = 5 + threads = [] + for _ in range(nthreads): + threads.append(threading.Thread(target=tmain, args=(cond, results))) + for t in threads: + t.start() + + await asyncio.sleep(2) + async with cond: + cond.notify_all() + + for t in threads: + t.join() + + self.assertEqual(results, [True] * nthreads) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 86b9f41ec9..c88a8fd9b4 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -2389,7 +2389,9 @@ def test_reconnect(self): # But it can reconnect. c.revive_host("a:1") - (c._get_topology()).select_servers(writable_server_selector, _Op.TEST) + (c._get_topology()).select_servers( + writable_server_selector, _Op.TEST, server_selection_timeout=10 + ) self.assertEqual(c.address, ("a", 1)) def _test_network_error(self, operation_callback): diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index ebbdc74c1c..d1aff03fc9 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -30,6 +30,7 @@ ) from unittest.mock import patch +import pymongo from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( ClientBulkWriteException, @@ -597,7 +598,9 @@ def test_timeout_in_multi_batch_bulk_write(self): timeoutMS=2000, w="majority", ) - client.admin.command("ping") # Init the client first. + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(10): + client.admin.command("ping") with self.assertRaises(ClientBulkWriteException) as context: client.bulk_write(models=models) self.assertIsInstance(context.exception.error, NetworkTimeout) diff --git a/test/test_cursor.py b/test/test_cursor.py index d99732aec3..7c073bf351 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1405,7 +1405,7 @@ def test_to_list_length(self): def test_to_list_csot_applied(self): client = self.single_client(timeoutMS=500) # Initialize the client with a larger timeout to help make test less flakey - with pymongo.timeout(2): + with pymongo.timeout(10): client.admin.command("ping") coll = client.pymongo.test coll.insert_many([{} for _ in range(5)]) @@ -1447,7 +1447,7 @@ def test_command_cursor_to_list_length(self): def test_command_cursor_to_list_csot_applied(self): client = self.single_client(timeoutMS=500) # Initialize the client with a larger timeout to help make test less flakey - with pymongo.timeout(2): + with pymongo.timeout(10): client.admin.command("ping") coll = client.pymongo.test coll.insert_many([{} for _ in range(5)]) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 8e030f61e8..7cab42cca2 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -19,6 +19,7 @@ import threading from test import IntegrationTest, client_context, unittest from test.utils import ( + CMAPListener, OvertCommandListener, SpecTestCreator, get_pool, @@ -27,6 +28,7 @@ from test.utils_selection_tests import create_topology from pymongo.common import clean_node +from pymongo.monitoring import ConnectionReadyEvent from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference @@ -131,19 +133,20 @@ def frequencies(self, client, listener, n_finds=10): @client_context.require_multiple_mongoses def test_load_balancing(self): listener = OvertCommandListener() + cmap_listener = CMAPListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. client = self.rs_client( client_context.mongos_seeds(), appName="loadBalancingTest", - event_listeners=[listener], + event_listeners=[listener, cmap_listener], localThresholdMS=30000, minPoolSize=10, ) - self.addCleanup(client.close) wait_until(lambda: len(client.nodes) == 2, "discover both nodes") - wait_until(lambda: len(get_pool(client).conns) >= 10, "create 10 connections") - # Delay find commands on + # Wait for both pools to be populated. + cmap_listener.wait_for_event(ConnectionReadyEvent, 20) + # Delay find commands on only one mongos. delay_finds = { "configureFailPoint": "failCommand", "mode": {"times": 10000}, @@ -161,7 +164,7 @@ def test_load_balancing(self): freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() - freqs = self.frequencies(client, listener, n_finds=100) + freqs = self.frequencies(client, listener, n_finds=150) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) diff --git a/tools/synchro.py b/tools/synchro.py index 6ce897a0b8..e0c194f962 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -145,7 +145,17 @@ _gridfs_base + f for f in listdir(_gridfs_base) if (Path(_gridfs_base) / f).is_file() ] -test_files = [_test_base + f for f in listdir(_test_base) if (Path(_test_base) / f).is_file()] + +def async_only_test(f: str) -> bool: + """Return True for async tests that should not be converted to sync.""" + return f in ["test_locks.py"] + + +test_files = [ + _test_base + f + for f in listdir(_test_base) + if (Path(_test_base) / f).is_file() and not async_only_test(f) +] sync_files = [ _pymongo_dest_base + f @@ -242,7 +252,7 @@ def translate_locks(lines: list[str]) -> list[str]: lock_lines = [line for line in lines if "_Lock(" in line] cond_lines = [line for line in lines if "_Condition(" in line] for line in lock_lines: - res = re.search(r"_Lock\(([^()]*\(\))\)", line) + res = re.search(r"_Lock\(([^()]*\([^()]*\))\)", line) if res: old = res[0] index = lines.index(line) From e76d411b593521cadd1f0cafc4009433ed65a246 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 30 Sep 2024 16:48:14 -0700 Subject: [PATCH 1504/2111] PYTHON-4794 Start running IPv6 tests again (#1879) --- test/__init__.py | 2 +- test/asynchronous/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 1a17ff14c5..af12bc032a 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -313,7 +313,7 @@ def _init_client(self): params = self.cmd_line["parsed"].get("setParameter", {}) if params.get("enableTestCommands") == "1": self.test_commands_enabled = True - self.has_ipv6 = self._server_started_with_ipv6() + self.has_ipv6 = self._server_started_with_ipv6() self.is_mongos = (self.hello).get("msg") == "isdbgrid" if self.is_mongos: diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 0d94331587..2a44785b2f 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -313,7 +313,7 @@ async def _init_client(self): params = self.cmd_line["parsed"].get("setParameter", {}) if params.get("enableTestCommands") == "1": self.test_commands_enabled = True - self.has_ipv6 = await self._server_started_with_ipv6() + self.has_ipv6 = await self._server_started_with_ipv6() self.is_mongos = (await self.hello).get("msg") == "isdbgrid" if self.is_mongos: From 15b22651ec9b167148cc228b1d2704bd2a3b0a41 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 30 Sep 2024 18:28:59 -0700 Subject: [PATCH 1505/2111] PYTHON-4801 Add beta warning to async tutorial (#1884) --- doc/async-tutorial.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/async-tutorial.rst b/doc/async-tutorial.rst index caa277f9d8..2ccf011d8e 100644 --- a/doc/async-tutorial.rst +++ b/doc/async-tutorial.rst @@ -1,6 +1,11 @@ Async Tutorial ============== +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + .. code-block:: pycon from pymongo import AsyncMongoClient From 545b88cbd376a7900b1cab921716ed9c291efb73 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 30 Sep 2024 20:42:28 -0500 Subject: [PATCH 1506/2111] PYTHON-4800 Add changelog for 4.10.0 (#1883) --- doc/changelog.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index dfb3c79827..3b7ddd1553 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,25 @@ Changelog ========= +Changes in Version 4.10.0 +------------------------- + +- Added provisional **(BETA)** support for a new Binary BSON subtype (9) used for efficient storage and retrieval of vectors: + densely packed arrays of numbers, all of the same type. + This includes new methods :meth:`~bson.binary.Binary.from_vector` and :meth:`~bson.binary.Binary.as_vector`. +- Added C extension use to client metadata, for example: ``{"driver": {"name": "PyMongo|c", "version": "4.10.0"}, ...}`` +- Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. +- Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. + +Issues Resolved +............... + +See the `PyMongo 4.10 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40553 + + Changes in Version 4.9.0 ------------------------- From ae6cfd6d102d885ac6b0873d31f0dac139b1ddae Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Mon, 30 Sep 2024 22:13:09 -0400 Subject: [PATCH 1507/2111] [DRIVERS-2926] [PYTHON-4577] BSON Binary Vector Subtype Support (#1813) Co-authored-by: Steven Silvester Co-authored-by: Steven Silvester --- .evergreen/resync-specs.sh | 3 + bson/binary.py | 152 +++++++++++++++++++++++- doc/api/bson/binary.rst | 8 ++ doc/changelog.rst | 1 - test/bson_binary_vector/float32.json | 42 +++++++ test/bson_binary_vector/int8.json | 57 +++++++++ test/bson_binary_vector/packed_bit.json | 50 ++++++++ test/bson_corpus/binary.json | 30 +++++ test/test_bson.py | 81 ++++++++++++- test/test_bson_binary_vector.py | 105 ++++++++++++++++ 10 files changed, 519 insertions(+), 10 deletions(-) create mode 100644 test/bson_binary_vector/float32.json create mode 100644 test/bson_binary_vector/int8.json create mode 100644 test/bson_binary_vector/packed_bit.json create mode 100644 test/test_bson_binary_vector.py diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index ac69449729..dca116c2d3 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -76,6 +76,9 @@ do atlas-data-lake-testing|data_lake) cpjson atlas-data-lake-testing/tests/ data_lake ;; + bson-binary-vector|bson_binary_vector) + cpjson bson-binary-vector/tests/ bson_binary_vector + ;; bson-corpus|bson_corpus) cpjson bson-corpus/tests/ bson_corpus ;; diff --git a/bson/binary.py b/bson/binary.py index 5fe1bacd16..47c52d4892 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -13,7 +13,10 @@ # limitations under the License. from __future__ import annotations -from typing import TYPE_CHECKING, Any, Tuple, Type, Union +import struct +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, Any, Sequence, Tuple, Type, Union from uuid import UUID """Tools for representing BSON binary data. @@ -191,21 +194,75 @@ class UuidRepresentation: """ +VECTOR_SUBTYPE = 9 +"""**(BETA)** BSON binary subtype for densely packed vector data. + +.. versionadded:: 4.10 +""" + + USER_DEFINED_SUBTYPE = 128 """BSON binary subtype for any user defined structure. """ +class BinaryVectorDtype(Enum): + """**(BETA)** Datatypes of vector subtype. + + :param FLOAT32: (0x27) Pack list of :class:`float` as float32 + :param INT8: (0x03) Pack list of :class:`int` in [-128, 127] as signed int8 + :param PACKED_BIT: (0x10) Pack list of :class:`int` in [0, 255] as unsigned uint8 + + The `PACKED_BIT` value represents a special case where vector values themselves + can only be of two values (0 or 1) but these are packed together into groups of 8, + a byte. In Python, these are displayed as ints in range [0, 255] + + Each value is of type bytes with a length of one. + + .. versionadded:: 4.10 + """ + + INT8 = b"\x03" + FLOAT32 = b"\x27" + PACKED_BIT = b"\x10" + + +@dataclass +class BinaryVector: + """**(BETA)** Vector of numbers along with metadata for binary interoperability. + .. versionadded:: 4.10 + """ + + __slots__ = ("data", "dtype", "padding") + + def __init__(self, data: Sequence[float | int], dtype: BinaryVectorDtype, padding: int = 0): + """ + :param data: Sequence of numbers representing the mathematical vector. + :param dtype: The data type stored in binary + :param padding: The number of bits in the final byte that are to be ignored + when a vector element's size is less than a byte + and the length of the vector is not a multiple of 8. + """ + self.data = data + self.dtype = dtype + self.padding = padding + + class Binary(bytes): """Representation of BSON binary data. - This is necessary because we want to represent Python strings as - the BSON string type. We need to wrap binary data so we can tell + We want to represent Python strings as the BSON string type. + We need to wrap binary data so that we can tell the difference between what should be considered binary data and what should be considered a string when we encode to BSON. - Raises TypeError if `data` is not an instance of :class:`bytes` - or `subtype` is not an instance of :class:`int`. + **(BETA)** Subtype 9 provides a space-efficient representation of 1-dimensional vector data. + Its data is prepended with two bytes of metadata. + The first (dtype) describes its data type, such as float32 or int8. + The second (padding) prescribes the number of bits to ignore in the final byte. + This is relevant when the element size of the dtype is not a multiple of 8. + + Raises TypeError if `subtype` is not an instance of :class:`int`. Raises ValueError if `subtype` is not in [0, 256). .. note:: @@ -218,7 +275,10 @@ class Binary(bytes): to use .. versionchanged:: 3.9 - Support any bytes-like type that implements the buffer protocol. + Support any bytes-like type that implements the buffer protocol. + + .. versionchanged:: 4.10 + **(BETA)** Addition of vector subtype. """ _type_marker = 5 @@ -337,6 +397,86 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" ) + @classmethod + def from_vector( + cls: Type[Binary], + vector: list[int, float], + dtype: BinaryVectorDtype, + padding: int = 0, + ) -> Binary: + """**(BETA)** Create a BSON :class:`~bson.binary.Binary` of Vector subtype from a list of Numbers. + + To interpret the representation of the numbers, a data type must be included. + See :class:`~bson.binary.BinaryVectorDtype` for available types and descriptions. + + The dtype and padding are prepended to the binary data's value. + + :param vector: List of values + :param dtype: Data type of the values + :param padding: For fractional bytes, number of bits to ignore at end of vector. + :return: Binary packed data identified by dtype and padding. + + .. versionadded:: 4.10 + """ + if dtype == BinaryVectorDtype.INT8: # pack ints in [-128, 127] as signed int8 + format_str = "b" + if padding: + raise ValueError(f"padding does not apply to {dtype=}") + elif dtype == BinaryVectorDtype.PACKED_BIT: # pack ints in [0, 255] as unsigned uint8 + format_str = "B" + elif dtype == BinaryVectorDtype.FLOAT32: # pack floats as float32 + format_str = "f" + if padding: + raise ValueError(f"padding does not apply to {dtype=}") + else: + raise NotImplementedError("%s not yet supported" % dtype) + + metadata = struct.pack(" BinaryVector: + """**(BETA)** From the Binary, create a list of numbers, along with dtype and padding. + + :return: BinaryVector + + .. versionadded:: 4.10 + """ + + if self.subtype != VECTOR_SUBTYPE: + raise ValueError(f"Cannot decode subtype {self.subtype} as a vector.") + + position = 0 + dtype, padding = struct.unpack_from(" int: """Subtype of this binary data.""" diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index c933a687b9..084fd02d50 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -21,6 +21,14 @@ .. autoclass:: UuidRepresentation :members: + .. autoclass:: BinaryVectorDtype + :members: + :show-inheritance: + + .. autoclass:: BinaryVector + :members: + + .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) :members: :show-inheritance: diff --git a/doc/changelog.rst b/doc/changelog.rst index 3b7ddd1553..6c8b8261ac 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -19,7 +19,6 @@ in this release. .. _PyMongo 4.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40553 - Changes in Version 4.9.0 ------------------------- diff --git a/test/bson_binary_vector/float32.json b/test/bson_binary_vector/float32.json new file mode 100644 index 0000000000..bbbe00b758 --- /dev/null +++ b/test/bson_binary_vector/float32.json @@ -0,0 +1,42 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype FLOAT32", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector FLOAT32", + "valid": true, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927000000FE420000E04000" + }, + { + "description": "Empty Vector FLOAT32", + "valid": true, + "vector": [], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009270000" + }, + { + "description": "Infinity Vector FLOAT32", + "valid": true, + "vector": ["-inf", 0.0, "inf"], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "2000000005766563746F72000E000000092700000080FF000000000000807F00" + }, + { + "description": "FLOAT32 with padding", + "valid": false, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 3 + } + ] +} + diff --git a/test/bson_binary_vector/int8.json b/test/bson_binary_vector/int8.json new file mode 100644 index 0000000000..7529721e5e --- /dev/null +++ b/test/bson_binary_vector/int8.json @@ -0,0 +1,57 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype INT8", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector INT8", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000903007F0700" + }, + { + "description": "Empty Vector INT8", + "valid": true, + "vector": [], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009030000" + }, + { + "description": "Overflow Vector INT8", + "valid": false, + "vector": [128], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "Underflow Vector INT8", + "valid": false, + "vector": [-129], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "INT8 with padding", + "valid": false, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 3 + }, + { + "description": "INT8 with float inputs", + "valid": false, + "vector": [127.77, 7.77], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + } + ] +} + diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json new file mode 100644 index 0000000000..a41cd593f5 --- /dev/null +++ b/test/bson_binary_vector/packed_bit.json @@ -0,0 +1,50 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype PACKED_BIT", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector PACKED_BIT", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000910007F0700" + }, + { + "description": "Empty Vector PACKED_BIT", + "valid": true, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" + }, + { + "description": "PACKED_BIT with padding", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000910037F0700" + }, + { + "description": "Overflow Vector PACKED_BIT", + "valid": false, + "vector": [256], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Underflow Vector PACKED_BIT", + "valid": false, + "vector": [-1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + } + ] +} + diff --git a/test/bson_corpus/binary.json b/test/bson_corpus/binary.json index 20aaef743b..0e0056f3a2 100644 --- a/test/bson_corpus/binary.json +++ b/test/bson_corpus/binary.json @@ -74,6 +74,36 @@ "description": "$type query operator (conflicts with legacy $binary form with $type field)", "canonical_bson": "180000000378001000000010247479706500020000000000", "canonical_extjson": "{\"x\" : { \"$type\" : {\"$numberInt\": \"2\"}}}" + }, + { + "description": "subtype 0x09 Vector FLOAT32", + "canonical_bson": "170000000578000A0000000927000000FE420000E04000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwAAAP5CAADgQA==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector INT8", + "canonical_bson": "11000000057800040000000903007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector PACKED_BIT", + "canonical_bson": "11000000057800040000000910007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) FLOAT32", + "canonical_bson": "0F0000000578000200000009270000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) INT8", + "canonical_bson": "0F0000000578000200000009030000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) PACKED_BIT", + "canonical_bson": "0F0000000578000200000009100000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAA=\", \"subType\": \"09\"}}}" } ], "decodeErrors": [ diff --git a/test/test_bson.py b/test/test_bson.py index a0190ef2d8..96aa897d19 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -49,8 +49,9 @@ decode_iter, encode, is_valid, + json_util, ) -from bson.binary import USER_DEFINED_SUBTYPE, Binary, UuidRepresentation +from bson.binary import USER_DEFINED_SUBTYPE, Binary, BinaryVectorDtype, UuidRepresentation from bson.code import Code from bson.codec_options import CodecOptions, DatetimeConversion from bson.datetime_ms import _DATETIME_ERROR_SUGGESTION @@ -148,6 +149,9 @@ def helper(doc): helper({"a binary": Binary(b"test", 128)}) helper({"a binary": Binary(b"test", 254)}) helper({"another binary": Binary(b"test", 2)}) + helper({"binary packed bit vector": Binary(b"\x10\x00\x7f\x07", 9)}) + helper({"binary int8 vector": Binary(b"\x03\x00\x7f\x07", 9)}) + helper({"binary float32 vector": Binary(b"'\x00\x00\x00\xfeB\x00\x00\xe0@", 9)}) helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))])) helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))])) helper({"big float": float(10000000000)}) @@ -447,6 +451,20 @@ def test_basic_encode(self): encode({"test": Binary(b"test", 128)}), b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x80\x74\x65\x73\x74\x00", ) + self.assertEqual( + encode({"vector_int8": Binary.from_vector([-128, -1, 127], BinaryVectorDtype.INT8)}), + b"\x1c\x00\x00\x00\x05vector_int8\x00\x05\x00\x00\x00\t\x03\x00\x80\xff\x7f\x00", + ) + self.assertEqual( + encode({"vector_bool": Binary.from_vector([1, 127], BinaryVectorDtype.PACKED_BIT)}), + b"\x1b\x00\x00\x00\x05vector_bool\x00\x04\x00\x00\x00\t\x10\x00\x01\x7f\x00", + ) + self.assertEqual( + encode( + {"vector_float32": Binary.from_vector([-1.1, 1.1e10], BinaryVectorDtype.FLOAT32)} + ), + b"$\x00\x00\x00\x05vector_float32\x00\n\x00\x00\x00\t'\x00\xcd\xcc\x8c\xbf\xac\xe9#P\x00", + ) self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") self.assertEqual( encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), @@ -711,9 +729,66 @@ def test_uuid_legacy(self): transformed = bin.as_uuid(UuidRepresentation.PYTHON_LEGACY) self.assertEqual(id, transformed) - # The C extension was segfaulting on unicode RegExs, so we have this test - # that doesn't really test anything but the lack of a segfault. + def test_vector(self): + """Tests of subtype 9""" + # We start with valid cases, across the 3 dtypes implemented. + # Work with a simple vector that can be interpreted as int8, float32, or ubyte + list_vector = [127, 7] + # As INT8, vector has length 2 + binary_vector = Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + vector = binary_vector.as_vector() + assert vector.data == list_vector + # test encoding roundtrip + assert {"vector": binary_vector} == decode(encode({"vector": binary_vector})) + # test json roundtrip + assert binary_vector == json_util.loads(json_util.dumps(binary_vector)) + + # For vectors of bits, aka PACKED_BIT type, vector has length 8 * 2 + packed_bit_binary = Binary.from_vector(list_vector, BinaryVectorDtype.PACKED_BIT) + packed_bit_vec = packed_bit_binary.as_vector() + assert packed_bit_vec.data == list_vector + + # A padding parameter permits vectors of length that aren't divisible by 8 + # The following ignores the last 3 bits in list_vector, + # hence it's length is 8 * len(list_vector) - padding + padding = 3 + padded_vec = Binary.from_vector(list_vector, BinaryVectorDtype.PACKED_BIT, padding=padding) + assert padded_vec.as_vector().data == list_vector + # To visualize how this looks as a binary vector.. + uncompressed = "" + for val in list_vector: + uncompressed += format(val, "08b") + assert uncompressed[:-padding] == "0111111100000" + + # It is worthwhile explicitly showing the values encoded to BSON + padded_doc = {"padded_vec": padded_vec} + assert ( + encode(padded_doc) + == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x07\x00" + ) + # and dumped to json + assert ( + json_util.dumps(padded_doc) + == '{"padded_vec": {"$binary": {"base64": "EAN/Bw==", "subType": "09"}}}' + ) + + # FLOAT32 is also implemented + float_binary = Binary.from_vector(list_vector, BinaryVectorDtype.FLOAT32) + assert all(isinstance(d, float) for d in float_binary.as_vector().data) + + # Now some invalid cases + for x in [-1, 257]: + try: + Binary.from_vector([x], BinaryVectorDtype.PACKED_BIT) + except Exception as exc: + self.assertTrue(isinstance(exc, struct.error)) + else: + self.fail("Failed to raise an exception.") + def test_unicode_regex(self): + """Tests we do not get a segfault for C extension on unicode RegExs. + This had been happening. + """ regex = re.compile("revisi\xf3n") decode(encode({"regex": regex})) diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py new file mode 100644 index 0000000000..00c82bbb65 --- /dev/null +++ b/test/test_bson_binary_vector.py @@ -0,0 +1,105 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import binascii +import codecs +import json +import struct +from pathlib import Path +from test import unittest + +from bson import decode, encode +from bson.binary import Binary, BinaryVectorDtype + +_TEST_PATH = Path(__file__).parent / "bson_binary_vector" + + +class TestBSONBinaryVector(unittest.TestCase): + """Runs Binary Vector subtype tests. + + Follows the style of the BSON corpus specification tests. + Tests are automatically generated on import + from json files in _TEST_PATH via `create_tests`. + The actual tests are defined in the inner function `run_test` + of the test generator `create_test`.""" + + +def create_test(case_spec): + """Create standard test given specification in json. + + We use the naming convention expected (exp) and observed (obj) + to differentiate what is in the json (expected or suffix _exp) + from what is produced by the API (observed or suffix _obs) + """ + test_key = case_spec.get("test_key") + + def run_test(self): + for test_case in case_spec.get("tests", []): + description = test_case["description"] + vector_exp = test_case["vector"] + dtype_hex_exp = test_case["dtype_hex"] + dtype_alias_exp = test_case.get("dtype_alias") + padding_exp = test_case.get("padding", 0) + canonical_bson_exp = test_case.get("canonical_bson") + # Convert dtype hex string into bytes + dtype_exp = BinaryVectorDtype(int(dtype_hex_exp, 16).to_bytes(1, byteorder="little")) + + if test_case["valid"]: + # Convert bson string to bytes + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + # Handle special float cases like '-inf' + if dtype_exp in [BinaryVectorDtype.FLOAT32]: + vector_exp = [float(x) for x in vector_exp] + + # Test round-tripping canonical bson. + self.assertEqual(encode(decoded_doc), cB_exp, description) + + # Test BSON to Binary Vector + vector_obs = binary_obs.as_vector() + self.assertEqual(vector_obs.dtype, dtype_exp, description) + if dtype_alias_exp: + self.assertEqual( + vector_obs.dtype, BinaryVectorDtype[dtype_alias_exp], description + ) + self.assertEqual(vector_obs.data, vector_exp, description) + self.assertEqual(vector_obs.padding, padding_exp, description) + + # Test Binary Vector to BSON + vector_exp = Binary.from_vector(vector_exp, dtype_exp, padding_exp) + cB_obs = binascii.hexlify(encode({test_key: vector_exp})).decode().upper() + self.assertEqual(cB_obs, canonical_bson_exp, description) + + else: + with self.assertRaises((struct.error, ValueError), msg=description): + Binary.from_vector(vector_exp, dtype_exp, padding_exp) + + return run_test + + +def create_tests(): + for filename in _TEST_PATH.glob("*.json"): + with codecs.open(str(filename), encoding="utf-8") as test_file: + test_method = create_test(json.load(test_file)) + setattr(TestBSONBinaryVector, "test_" + filename.stem, test_method) + + +create_tests() + + +if __name__ == "__main__": + unittest.main() From 4713afa910f12d013571778a12fda2287d0bf19d Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 02:14:15 +0000 Subject: [PATCH 1508/2111] BUMP 4.10.0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 5ff72d6cc8..7cc4bb8e1d 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.10.0.dev0" +__version__ = "4.10.0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From c0f7810d56555c8a285beaa9aa5fe6d2b7185eff Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 02:31:13 +0000 Subject: [PATCH 1509/2111] BUMP 4.11.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 7cc4bb8e1d..3de24a8e14 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.10.0" +__version__ = "4.11.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 8791aa00ea8f87ceae5e5ec601b75c677a6754ab Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 1 Oct 2024 08:39:57 -0700 Subject: [PATCH 1510/2111] PYTHON-4790 Migrate test_retryable_writes.py to async (#1876) --- test/asynchronous/helpers.py | 360 +++++++++++ test/asynchronous/test_retryable_writes.py | 694 +++++++++++++++++++++ test/helpers.py | 3 + test/test_retryable_writes.py | 48 +- test/utils.py | 6 + tools/synchro.py | 3 + 6 files changed, 1092 insertions(+), 22 deletions(-) create mode 100644 test/asynchronous/helpers.py create mode 100644 test/asynchronous/test_retryable_writes.py diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py new file mode 100644 index 0000000000..46f66af62d --- /dev/null +++ b/test/asynchronous/helpers.py @@ -0,0 +1,360 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared constants and helper methods for pymongo, bson, and gridfs test suites.""" +from __future__ import annotations + +import base64 +import gc +import multiprocessing +import os +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +import unittest +import warnings +from asyncio import iscoroutinefunction + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from functools import wraps +from typing import Any, Callable, Dict, Generator, no_type_check +from unittest import SkipTest + +from bson.son import SON +from pymongo import common, message +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.uri_parser import parse_uri + +if HAVE_SSL: + import ssl + +_IS_SYNC = False + +# Enable debug output for uncollectable objects. PyPy does not have set_debug. +if hasattr(gc, "set_debug"): + gc.set_debug( + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) + +# The host and port of a single mongod or mongos, or the seed host +# for a replica set. +host = os.environ.get("DB_IP", "localhost") +port = int(os.environ.get("DB_PORT", 27017)) +IS_SRV = "mongodb+srv" in host + +db_user = os.environ.get("DB_USER", "user") +db_pwd = os.environ.get("DB_PASSWORD", "password") + +CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) + +TLS_OPTIONS: Dict = {"tls": True} +if CLIENT_PEM: + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM +if CA_PEM: + TLS_OPTIONS["tlsCAFile"] = CA_PEM + +COMPRESSORS = os.environ.get("COMPRESSORS") +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) +TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) +SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") +MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") + +if TEST_LOADBALANCER: + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd +elif TEST_SERVERLESS: + TEST_LOADBALANCER = True + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + TLS_OPTIONS = {"tls": True} + # Spec says serverless tests must be run with compression. + COMPRESSORS = COMPRESSORS or "zlib" + + +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AWS_CREDS_2 = { + "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} + +# Ensure Evergreen metadata doesn't result in truncation +os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") + + +def is_server_resolvable(): + """Returns True if 'server' is resolvable.""" + socket_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(1) + try: + try: + socket.gethostbyname("server") + return True + except OSError: + return False + finally: + socket.setdefaulttimeout(socket_timeout) + + +def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return authdb.command(cmd) + + +class client_knobs: + def __init__( + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): + self.heartbeat_frequency = heartbeat_frequency + self.min_heartbeat_interval = min_heartbeat_interval + self.kill_cursor_frequency = kill_cursor_frequency + self.events_queue_frequency = events_queue_frequency + + self.old_heartbeat_frequency = None + self.old_min_heartbeat_interval = None + self.old_kill_cursor_frequency = None + self.old_events_queue_frequency = None + self._enabled = False + self._stack = None + + def enable(self): + self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY + self.old_min_heartbeat_interval = common.MIN_HEARTBEAT_INTERVAL + self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY + self.old_events_queue_frequency = common.EVENTS_QUEUE_FREQUENCY + + if self.heartbeat_frequency is not None: + common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency + + if self.min_heartbeat_interval is not None: + common.MIN_HEARTBEAT_INTERVAL = self.min_heartbeat_interval + + if self.kill_cursor_frequency is not None: + common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency + + if self.events_queue_frequency is not None: + common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = "".join(traceback.format_stack()) + + def __enter__(self): + self.enable() + + @no_type_check + def disable(self): + common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency + common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval + common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency + common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + async def wrap(*args, **kwargs): + with self: + return await f(*args, **kwargs) + + return wrap + + return make_wrapper(func) + + def __del__(self): + if self._enabled: + msg = ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack, + ) + ) + self.disable() + raise Exception(msg) + + +def _all_users(db): + return {u["user"] for u in db.command("usersInfo").get("users", [])} + + +def sanitize_cmd(cmd): + cp = cmd.copy() + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) + if MONGODB_API_VERSION: + # Stable API parameters + cp.pop("apiVersion", None) + # OP_MSG encoding may move the payload type one field to the + # end of the command. Do the same here. + name = next(iter(cp)) + try: + identifier = message._FIELD_MAP[name] + docs = cp.pop(identifier) + cp[identifier] = docs + except KeyError: + pass + return cp + + +def sanitize_reply(reply): + cp = reply.copy() + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) + return cp + + +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + + +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + +def _get_executors(topology): + executors = [] + for server in topology._servers.values(): + # Some MockMonitor do not have an _executor. + if hasattr(server._monitor, "_executor"): + executors.append(server._monitor._executor) + if hasattr(server._monitor, "_rtt_monitor"): + executors.append(server._monitor._rtt_monitor._executor) + executors.append(topology._Topology__events_executor) + if topology._srv_monitor: + executors.append(topology._srv_monitor._executor) + + return [e for e in executors if e is not None] + + +def print_running_topology(topology): + running = [e for e in _get_executors(topology) if not e._stopped] + if running: + print( + "WARNING: found Topology with running threads:\n" + f" Threads: {running}\n" + f" Topology: {topology}\n" + f" Creation traceback:\n{topology._settings._stack}" + ) + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +# Helper method to workaround https://bugs.python.org/issue21724 +def clear_warning_registry(): + """Clear the __warningregistry__ for all modules.""" + for _, module in list(sys.modules.items()): + if hasattr(module, "__warningregistry__"): + module.__warningregistry__ = {} # type:ignore[attr-defined] + + +class SystemCertsPatcher: + def __init__(self, ca_certs): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") + # Tell OpenSSL where CA certificates live. + os.environ["SSL_CERT_FILE"] = ca_certs + + def disable(self): + if self.original_certs is None: + os.environ.pop("SSL_CERT_FILE") + else: + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py new file mode 100644 index 0000000000..accbbd003f --- /dev/null +++ b/test/asynchronous/test_retryable_writes.py @@ -0,0 +1,694 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable writes.""" +from __future__ import annotations + +import asyncio +import copy +import pprint +import sys +import threading + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + SkipTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import client_knobs +from test.utils import ( + CMAPListener, + DeprecationFilter, + EventListener, + OvertCommandListener, + async_set_fail_point, +) +from test.version import Version + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + AutoReconnect, + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ( + CommandSucceededEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super().succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + async_client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) + + +def retryable_single_statement_ops(coll): + return [ + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({})]], {}), + (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), + ] + + +def non_retryable_single_statement_ops(coll): + return [ + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_many, [{}], {}), + ] + + +class IgnoreDeprecationsTest(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + deprecation_filter: DeprecationFilter + + @classmethod + async def _setup_class(cls): + await super()._setup_class() + cls.deprecation_filter = DeprecationFilter() + + @classmethod + async def _tearDown_class(cls): + cls.deprecation_filter.stop() + await super()._tearDown_class() + + +class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): + knobs: client_knobs + + @classmethod + async def _setup_class(cls): + await super()._setup_class() + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + cls.knobs.enable() + cls.client = await cls.unmanaged_async_rs_or_single_client(retryWrites=True) + cls.db = cls.client.pymongo_test + + @classmethod + async def _tearDown_class(cls): + cls.knobs.disable() + await cls.client.close() + await super()._tearDown_class() + + @async_client_context.require_no_standalone + async def test_actionable_error_message(self): + if async_client_context.storage_engine != "mmapv1": + raise SkipTest("This cluster is not running MMAPv1") + + expected_msg = ( + "This MongoDB deployment does not support retryable " + "writes. Please add retryWrites=false to your " + "connection string." + ) + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + with self.assertRaisesRegex(OperationFailure, expected_msg): + await method(*args, **kwargs) + + +class TestRetryableWrites(IgnoreDeprecationsTest): + listener: OvertCommandListener + knobs: client_knobs + + @classmethod + @async_client_context.require_no_mmap + async def _setup_class(cls): + await super()._setup_class() + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + cls.knobs.enable() + cls.listener = OvertCommandListener() + cls.client = await cls.unmanaged_async_rs_or_single_client( + retryWrites=True, event_listeners=[cls.listener] + ) + cls.db = cls.client.pymongo_test + + @classmethod + async def _tearDown_class(cls): + cls.knobs.disable() + await cls.client.close() + await super()._tearDown_class() + + async def asyncSetUp(self): + if async_client_context.is_rs and async_client_context.test_commands_enabled: + await self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) + + async def asyncTearDown(self): + if async_client_context.is_rs and async_client_context.test_commands_enabled: + await self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) + + async def test_supported_single_statement_no_retry(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=False, event_listeners=[listener]) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + await method(*args, **kwargs) + for event in listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + @async_client_context.require_no_standalone + async def test_supported_single_statement_supported_cluster(self): + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + commands_started = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), 1, msg) + first_attempt = commands_started[0] + self.assertIn( + "lsid", + first_attempt.command, + f"{msg} sent no lsid with {first_attempt.command_name}", + ) + initial_session_id = first_attempt.command["lsid"] + self.assertIn( + "txnNumber", + first_attempt.command, + f"{msg} sent no txnNumber with {first_attempt.command_name}", + ) + + # There should be no retry when the failpoint is not active. + if async_client_context.is_mongos or not async_client_context.test_commands_enabled: + self.assertEqual(len(commands_started), 1) + continue + + initial_transaction_id = first_attempt.command["txnNumber"] + retry_attempt = commands_started[1] + self.assertIn( + "lsid", + retry_attempt.command, + f"{msg} sent no lsid with {first_attempt.command_name}", + ) + self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) + self.assertIn( + "txnNumber", + retry_attempt.command, + f"{msg} sent no txnNumber with {first_attempt.command_name}", + ) + self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) + + async def test_supported_single_statement_unsupported_cluster(self): + if async_client_context.is_rs or async_client_context.is_mongos: + raise SkipTest("This cluster supports retryable writes") + + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + + for event in self.listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_unsupported_single_statement(self): + coll = self.db.retryable_write_test + await coll.insert_many([{}, {}]) + coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) + for event in started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_server_selection_timeout_not_retried(self): + """A ServerSelectionTimeoutError is not retried.""" + listener = OvertCommandListener() + client = self.simple_client( + "somedomainthatdoesntexist.org", + serverSelectionTimeoutMS=1, + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + with self.assertRaises(ServerSelectionTimeoutError, msg=msg): + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 0, msg) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_retry_timeout_raises_original_error(self): + """A ServerSelectionTimeoutError on the retry attempt raises the + original error. + """ + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def mock_select_server(*args, **kwargs): + server = select_server(*args, **kwargs) + + def raise_error(*args, **kwargs): + raise ServerSelectionTimeoutError("No primary available for writes") + + # Raise ServerSelectionTimeout on the retry attempt. + topology.select_server = raise_error + return server + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + topology.select_server = mock_select_server + with self.assertRaises(ConnectionFailure, msg=msg): + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_batch_splitting(self): + """Test retry succeeds after failures during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + await coll.delete_many({}) + self.listener.reset() + bulk_result = await coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) + # Each command should fail and be retried. + # With OP_MSG 3 inserts are one batch. 2 updates another. + # 2 deletes a third. + self.assertEqual(len(self.listener.started_events), 6) + self.assertEqual(await coll.find_one(), {"_id": 1, "count": 1}) + # Assert the final result + expected_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 3, + "nUpserted": 0, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [], + } + self.assertEqual(bulk_result.bulk_api_result, expected_result) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_batch_splitting_retry_fails(self): + """Test retry fails during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + await coll.delete_many({}) + await self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) + self.listener.reset() + async with self.client.start_session() as session: + initial_txn = session._transaction_id + try: + await coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) + except ConnectionFailure: + pass + else: + self.fail("bulk_write should have failed") + + started = self.listener.started_events + self.assertEqual(len(started), 3) + self.assertEqual(len(self.listener.succeeded_events), 1) + expected_txn = Int64(initial_txn + 1) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) + expected_txn = Int64(initial_txn + 2) + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") + self.assertEqual(started[1].command, started[2].command) + final_txn = session._transaction_id + self.assertEqual(final_txn, expected_txn) + self.assertEqual(await coll.find_one(projection={"_id": True}), {"_id": 1}) + + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_writes_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + "appName": "retryableWriteTest", + }, + } + + mongos_clients = [] + + for mongos in async_client_context.mongos_seeds().split(","): + client = await self.async_rs_or_single_client(mongos) + await async_set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + async_client_context.mongos_seeds(), + appName="retryableWriteTest", + event_listeners=[listener], + retryWrites=True, + ) + + with self.assertRaises(AutoReconnect): + await client.t.t.insert_one({"x": 1}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + await async_set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + +class TestWriteConcernError(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + fail_insert: dict + + @classmethod + @async_client_context.require_replica_set + @async_client_context.require_no_mmap + @async_client_context.require_failCommand_fail_point + async def _setup_class(cls): + await super()._setup_class() + cls.fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + + @async_client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_RetryableWriteError_error_label(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + + # Ensure collection exists. + await client.pymongo_test.testcoll.insert_one({}) + + async with self.fail_point(self.fail_insert): + with self.assertRaises(WriteConcernError) as cm: + await client.pymongo_test.testcoll.insert_one({}) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) + + if async_client_context.version >= Version(4, 4): + # In MongoDB 4.4+ we rely on the server returning the error label. + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) + + @async_client_context.require_version_min(4, 4) + async def test_RetryableWriteError_error_label_RawBSONDocument(self): + # using RawBSONDocument should not cause errorLabel parsing to fail + async with self.fail_point(self.fail_insert): + async with self.client.start_session() as s: + s._start_retryable_write() + result = await self.client.pymongo_test.command( + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._transaction_id, + session=s, + codec_options=DEFAULT_CODEC_OPTIONS.with_options( + document_class=RawBSONDocument + ), + ) + + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) + + +class InsertThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + async def run(self): + await self.collection.insert_one({}) + self.passed = True + + +class TestPoolPausedError(AsyncIntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + RUN_ON_SERVERLESS = False + + @async_client_context.require_sync + @async_client_context.require_failCommand_blockConnection + @async_client_context.require_retryable_writes + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], + }, + } + async with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + @async_client_context.require_sync + @async_client_context.require_failCommand_fail_point + @async_client_context.require_replica_set + @async_client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = await self.async_rs_or_single_client( + retryWrites=True, event_listeners=[cmd_listener] + ) + await client.test.test.drop() + cmd_listener.reset() + await client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + await client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + await client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + + +# TODO: Make this a real integration test where we stepdown the primary. +class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): + @async_client_context.require_replica_set + @async_client_context.require_no_mmap + async def test_increment_transaction_id_without_sending_command(self): + """Test that the txnNumber field is properly incremented, even when + the first attempt fails before sending the command. + """ + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def raise_connection_err_select_server(*args, **kwargs): + # Raise ConnectionFailure on the first attempt and perform + # normal selection on the retry attempt. + topology.select_server = select_server + raise ConnectionFailure("Connection refused") + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + listener.reset() + topology.select_server = raise_connection_err_select_server + async with client.start_session() as session: + kwargs = copy.deepcopy(kwargs) + kwargs["session"] = session + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + initial_txn_id = session._transaction_id + + # Each operation should fail on the first attempt and succeed + # on the second. + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command + sent_txn_id = retry_cmd["txnNumber"] + final_txn_id = session._transaction_id + self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) + self.assertEqual(sent_txn_id, final_txn_id, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/helpers.py b/test/helpers.py index b38b2e2980..bf6186d1a0 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -28,6 +28,7 @@ import traceback import unittest import warnings +from asyncio import iscoroutinefunction try: import ipaddress @@ -47,6 +48,8 @@ if HAVE_SSL: import ssl +_IS_SYNC = True + # Enable debug output for uncollectable objects. PyPy does not have set_debug. if hasattr(gc, "set_debug"): gc.set_debug( diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 89454ad236..5df6c41f7a 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -15,6 +15,7 @@ """Test retryable writes.""" from __future__ import annotations +import asyncio import copy import pprint import sys @@ -22,7 +23,13 @@ sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, client_knobs, unittest +from test import ( + IntegrationTest, + SkipTest, + client_context, + unittest, +) +from test.helpers import client_knobs from test.utils import ( CMAPListener, DeprecationFilter, @@ -61,6 +68,8 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class InsertEventListener(EventListener): def succeeded(self, event: CommandSucceededEvent) -> None: @@ -125,22 +134,22 @@ class IgnoreDeprecationsTest(IntegrationTest): deprecation_filter: DeprecationFilter @classmethod - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.deprecation_filter = DeprecationFilter() @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.deprecation_filter.stop() - super().tearDownClass() + super()._tearDown_class() class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): knobs: client_knobs @classmethod - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @@ -148,10 +157,10 @@ def setUpClass(cls): cls.db = cls.client.pymongo_test @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.knobs.disable() cls.client.close() - super().tearDownClass() + super()._tearDown_class() @client_context.require_no_standalone def test_actionable_error_message(self): @@ -174,8 +183,8 @@ class TestRetryableWrites(IgnoreDeprecationsTest): @classmethod @client_context.require_no_mmap - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() # Speed up the tests by decreasing the heartbeat frequency. cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) cls.knobs.enable() @@ -186,10 +195,10 @@ def setUpClass(cls): cls.db = cls.client.pymongo_test @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.knobs.disable() cls.client.close() - super().tearDownClass() + super()._tearDown_class() def setUp(self): if client_context.is_rs and client_context.test_commands_enabled: @@ -206,7 +215,6 @@ def tearDown(self): def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() client = self.rs_or_single_client(retryWrites=False, event_listeners=[listener]) - self.addCleanup(client.close) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" listener.reset() @@ -319,7 +327,6 @@ def test_retry_timeout_raises_original_error(self): """ listener = OvertCommandListener() client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) - self.addCleanup(client.close) topology = client._topology select_server = topology.select_server @@ -446,7 +453,6 @@ def test_retryable_writes_in_sharded_cluster_multiple_available(self): for mongos in client_context.mongos_seeds().split(","): client = self.rs_or_single_client(mongos) set_fail_point(client, fail_command) - self.addCleanup(client.close) mongos_clients.append(client) listener = OvertCommandListener() @@ -478,8 +484,8 @@ class TestWriteConcernError(IntegrationTest): @client_context.require_replica_set @client_context.require_no_mmap @client_context.require_failCommand_fail_point - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.fail_insert = { "configureFailPoint": "failCommand", "mode": {"times": 2}, @@ -494,7 +500,6 @@ def setUpClass(cls): def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) - self.addCleanup(client.close) # Ensure collection exists. client.pymongo_test.testcoll.insert_one({}) @@ -546,6 +551,7 @@ class TestPoolPausedError(IntegrationTest): RUN_ON_LOAD_BALANCER = False RUN_ON_SERVERLESS = False + @client_context.require_sync @client_context.require_failCommand_blockConnection @client_context.require_retryable_writes @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) @@ -555,7 +561,6 @@ def test_pool_paused_error_is_retryable(self): client = self.rs_or_single_client( maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] ) - self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() @@ -606,6 +611,7 @@ def test_pool_paused_error_is_retryable(self): failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) + @client_context.require_sync @client_context.require_failCommand_fail_point @client_context.require_replica_set @client_context.require_version_min( @@ -618,7 +624,6 @@ def test_returns_original_error_code( cmd_listener = InsertEventListener() client = self.rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) client.test.test.drop() - self.addCleanup(client.close) cmd_listener.reset() client.admin.command( { @@ -654,7 +659,6 @@ def test_increment_transaction_id_without_sending_command(self): """ listener = OvertCommandListener() client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) - self.addCleanup(client.close) topology = client._topology select_server = topology.select_server diff --git a/test/utils.py b/test/utils.py index 6eefd1c7ea..9615034899 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1157,3 +1157,9 @@ def set_fail_point(client, command_args): cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) client.admin.command(cmd) + + +async def async_set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await client.admin.command(cmd) diff --git a/tools/synchro.py b/tools/synchro.py index e0c194f962..c5b0afb643 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -104,6 +104,7 @@ "PyMongo|c|async": "PyMongo|c", "AsyncTestGridFile": "TestGridFile", "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", + "async_set_fail_point": "set_fail_point", } docstring_replacements: dict[tuple[str, str], str] = { @@ -173,6 +174,7 @@ def async_only_test(f: str) -> bool: converted_tests = [ "__init__.py", "conftest.py", + "helpers.py", "pymongo_mocks.py", "utils_spec_runner.py", "qcheck.py", @@ -191,6 +193,7 @@ def async_only_test(f: str) -> bool: "test_logger.py", "test_monitoring.py", "test_raw_bson.py", + "test_retryable_writes.py", "test_session.py", "test_transactions.py", ] From bfba5481a09b67fa3f5e9dcaf5e90d7913964406 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 1 Oct 2024 09:16:26 -0700 Subject: [PATCH 1511/2111] PYTHON-4789 Migrate test_retryable_reads.py to async (#1877) --- test/asynchronous/test_retryable_reads.py | 191 ++++++++++++++++++++++ test/test_retryable_reads.py | 6 +- tools/synchro.py | 1 + 3 files changed, 194 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_retryable_reads.py diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py new file mode 100644 index 0000000000..b2d86f5d84 --- /dev/null +++ b/test/asynchronous/test_retryable_reads.py @@ -0,0 +1,191 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable reads spec.""" +from __future__ import annotations + +import os +import pprint +import sys +import threading + +from pymongo.errors import AutoReconnect + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + async_client_context, + client_knobs, + unittest, +) +from test.utils import ( + CMAPListener, + OvertCommandListener, + async_set_fail_point, +) + +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) + +_IS_SYNC = False + + +class TestClientOptions(AsyncPyMongoTestCase): + async def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(client.options.retry_reads, True) + + async def test_kwargs(self): + client = self.simple_client(retryReads=True, connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client(retryReads=False, connect=False) + self.assertEqual(client.options.retry_reads, False) + + async def test_uri(self): + client = self.simple_client("mongodb://h/?retryReads=true", connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client("mongodb://h/?retryReads=false", connect=False) + self.assertEqual(client.options.retry_reads, False) + + +class FindThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + async def run(self): + await self.collection.find_one({}) + self.passed = True + + +class TestPoolPausedError(AsyncIntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + RUN_ON_SERVERLESS = False + + @async_client_context.require_sync + @async_client_context.require_failCommand_blockConnection + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flakey on PyPy") + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + }, + } + async with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + +class TestRetryableReads(AsyncIntegrationTest): + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "closeConnection": True, + "appName": "retryableReadTest", + }, + } + + mongos_clients = [] + + for mongos in async_client_context.mongos_seeds().split(","): + client = await self.async_rs_or_single_client(mongos) + await async_set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + async_client_context.mongos_seeds(), + appName="retryableReadTest", + event_listeners=[listener], + retryReads=True, + ) + + async with self.fail_point(fail_command): + with self.assertRaises(AutoReconnect): + await client.t.t.find_one({}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + await async_set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index b4fafe4652..d4951db5ee 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -44,8 +44,7 @@ PoolClearedEvent, ) -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") +_IS_SYNC = True class TestClientOptions(PyMongoTestCase): @@ -83,6 +82,7 @@ class TestPoolPausedError(IntegrationTest): RUN_ON_LOAD_BALANCER = False RUN_ON_SERVERLESS = False + @client_context.require_sync @client_context.require_failCommand_blockConnection @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): @@ -94,7 +94,6 @@ def test_pool_paused_error_is_retryable(self): client = self.rs_or_single_client( maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] ) - self.addCleanup(client.close) for _ in range(10): cmap_listener.reset() cmd_listener.reset() @@ -165,7 +164,6 @@ def test_retryable_reads_in_sharded_cluster_multiple_available(self): for mongos in client_context.mongos_seeds().split(","): client = self.rs_or_single_client(mongos) set_fail_point(client, fail_command) - self.addCleanup(client.close) mongos_clients.append(client) listener = OvertCommandListener() diff --git a/tools/synchro.py b/tools/synchro.py index c5b0afb643..3333b0de2e 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -193,6 +193,7 @@ def async_only_test(f: str) -> bool: "test_logger.py", "test_monitoring.py", "test_raw_bson.py", + "test_retryable_reads.py", "test_retryable_writes.py", "test_session.py", "test_transactions.py", From 7fbeca9793bb88561a8b9972df926193d54a1225 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Tue, 1 Oct 2024 15:06:12 -0400 Subject: [PATCH 1512/2111] [PYTHON-4803] Big endian fix for binary bson vectors (#1885) --- bson/binary.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index 47c52d4892..96b61b6dab 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -432,7 +432,7 @@ def from_vector( raise NotImplementedError("%s not yet supported" % dtype) metadata = struct.pack(" BinaryVector: @@ -454,7 +454,7 @@ def as_vector(self) -> BinaryVector: if dtype == BinaryVectorDtype.INT8: dtype_format = "b" - format_string = f"{n_values}{dtype_format}" + format_string = f"<{n_values}{dtype_format}" vector = list(struct.unpack_from(format_string, self, position)) return BinaryVector(vector, dtype, padding) @@ -465,13 +465,16 @@ def as_vector(self) -> BinaryVector: raise ValueError( "Corrupt data. N bytes for a float32 vector must be a multiple of 4." ) - vector = list(struct.unpack_from(f"{n_values}f", self, position)) + dtype_format = "f" + format_string = f"<{n_values}{dtype_format}" + vector = list(struct.unpack_from(format_string, self, position)) return BinaryVector(vector, dtype, padding) elif dtype == BinaryVectorDtype.PACKED_BIT: # data packed as uint8 dtype_format = "B" - unpacked_uint8s = list(struct.unpack_from(f"{n_values}{dtype_format}", self, position)) + format_string = f"<{n_values}{dtype_format}" + unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) return BinaryVector(unpacked_uint8s, dtype, padding) else: From 02794079802f264be021707d20fc64e292ef74b7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Oct 2024 14:31:21 -0500 Subject: [PATCH 1513/2111] PYTHON-4806 Fix expected metadata in mockupdb tests (#1888) --- hatch.toml | 2 +- test/mockupdb/test_handshake.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/hatch.toml b/hatch.toml index d5293a1d7f..60bd0af014 100644 --- a/hatch.toml +++ b/hatch.toml @@ -43,7 +43,7 @@ features = ["test"] test = "pytest -v --durations=5 --maxfail=10 {args}" test-eg = "bash ./.evergreen/run-tests.sh {args}" test-async = "pytest -v --durations=5 --maxfail=10 -m default_async {args}" -test-mockupdb = ["pip install -U git+https://github.com/ajdavis/mongo-mockup-db@master", "test -m mockupdb"] +test-mockupdb = ["pip install -U git+https://github.com/mongodb-labs/mongo-mockup-db@master", "test -m mockupdb"] [envs.encryption] skip-install = true diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 19e10f9617..8193714a86 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -26,7 +26,7 @@ from bson.objectid import ObjectId -from pymongo import MongoClient +from pymongo import MongoClient, has_c from pymongo import version as pymongo_version from pymongo.errors import OperationFailure from pymongo.server_api import ServerApi, ServerApiVersion @@ -39,7 +39,11 @@ def _check_handshake_data(request): data = request["client"] assert data["application"] == {"name": "my app"} - assert data["driver"] == {"name": "PyMongo", "version": pymongo_version} + if has_c(): + name = "PyMongo|c" + else: + name = "PyMongo" + assert data["driver"] == {"name": name, "version": pymongo_version} # Keep it simple, just check these fields exist. assert "os" in data From 7848feb09a12bb6a14fb18deb8b873d8c2eff8a9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 1 Oct 2024 18:32:41 -0400 Subject: [PATCH 1514/2111] PYTHON-4786 - Fix UpdateResult.did_upsert TypeError (#1878) --- pymongo/results.py | 7 ++-- test/asynchronous/test_client_bulk_write.py | 40 +++++++++++++++++++++ test/asynchronous/test_collection.py | 13 +++++++ test/test_client_bulk_write.py | 40 +++++++++++++++++++++ test/test_collection.py | 13 +++++++ test/test_results.py | 22 ++++++++++++ 6 files changed, 133 insertions(+), 2 deletions(-) diff --git a/pymongo/results.py b/pymongo/results.py index b34f6c4926..d17ff1c3ea 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -171,9 +171,12 @@ def upserted_id(self) -> Any: @property def did_upsert(self) -> bool: - """Whether or not an upsert took place.""" + """Whether an upsert took place. + + .. versionadded:: 4.9 + """ assert self.__raw_result is not None - return len(self.__raw_result.get("upserted", {})) > 0 + return "upserted" in self.__raw_result class DeleteResult(_WriteResult): diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 80cfd30bde..9464337809 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -550,6 +550,46 @@ async def test_returns_error_if_auto_encryption_configured(self): "bulk_write does not currently support automatic encryption", context.exception._message ) + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless + async def test_upserted_result(self): + client = await self.async_rs_or_single_client() + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + models = [] + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": "a"}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + ) + ) + result = await client.bulk_write(models=models, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(result.update_results[0].did_upsert, True) + self.assertEqual(result.update_results[1].did_upsert, True) + self.assertEqual(result.update_results[2].did_upsert, False) + # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(AsyncIntegrationTest): diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 74a4a5151d..612090b69f 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -1444,6 +1444,19 @@ async def test_update_one(self): self.assertRaises(InvalidOperation, lambda: result.upserted_id) self.assertFalse(result.acknowledged) + async def test_update_result(self): + db = self.db + await db.drop_collection("test") + + result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = await db.test.update_one({"_id": None, "x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = await db.test.update_one({"_id": None}, {"$inc": {"x": 1}}) + self.assertEqual(result.did_upsert, False) + async def test_update_many(self): db = self.db await db.drop_collection("test") diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index d1aff03fc9..58b5015dd2 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -550,6 +550,46 @@ def test_returns_error_if_auto_encryption_configured(self): "bulk_write does not currently support automatic encryption", context.exception._message ) + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless + def test_upserted_result(self): + client = self.rs_or_single_client() + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + models = [] + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": "a"}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + ) + ) + result = client.bulk_write(models=models, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(result.update_results[0].did_upsert, True) + self.assertEqual(result.update_results[1].did_upsert, True) + self.assertEqual(result.update_results[2].did_upsert, False) + # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(IntegrationTest): diff --git a/test/test_collection.py b/test/test_collection.py index dab59cf1b2..a2c3b0b0b6 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1429,6 +1429,19 @@ def test_update_one(self): self.assertRaises(InvalidOperation, lambda: result.upserted_id) self.assertFalse(result.acknowledged) + def test_update_result(self): + db = self.db + db.drop_collection("test") + + result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = db.test.update_one({"_id": None, "x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = db.test.update_one({"_id": None}, {"$inc": {"x": 1}}) + self.assertEqual(result.did_upsert, False) + def test_update_many(self): db = self.db db.drop_collection("test") diff --git a/test/test_results.py b/test/test_results.py index 19e086a9a5..deb09d7ed4 100644 --- a/test/test_results.py +++ b/test/test_results.py @@ -122,6 +122,28 @@ def test_update_result(self): self.assertEqual(raw_result["n"], result.matched_count) self.assertEqual(raw_result["nModified"], result.modified_count) self.assertEqual(raw_result["upserted"], result.upserted_id) + self.assertEqual(result.did_upsert, True) + + raw_result_2 = { + "n": 1, + "nModified": 1, + "upserted": [ + {"index": 5, "_id": 1}, + ], + } + self.repr_test(UpdateResult, raw_result_2) + + result = UpdateResult(raw_result_2, True) + self.assertEqual(result.did_upsert, True) + + raw_result_3 = { + "n": 1, + "nModified": 1, + } + self.repr_test(UpdateResult, raw_result_3) + + result = UpdateResult(raw_result_3, True) + self.assertEqual(result.did_upsert, False) result = UpdateResult(raw_result, False) self.assertEqual(raw_result, result.raw_result) From 1c284307250dec4fb6b7b161d72e1876c067b4cb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Oct 2024 17:52:16 -0500 Subject: [PATCH 1515/2111] PYTHON-4808 Add changelog for 4.10.1 (#1890) --- doc/changelog.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 6c8b8261ac..76e91c2b27 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,23 @@ Changelog ========= +Changes in Version 4.10.1 +------------------------- + +Version 4.10.1 is a bug fix release. + +- Fixed a bug where :meth:`~pymongo.results.UpdateResult.did_upsert` would raise a ``TypeError``. +- Fixed Binary BSON subtype (9) support on big-endian operating systems (such as zSeries). + +Issues Resolved +............... + +See the `PyMongo 4.10.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40788 + + Changes in Version 4.10.0 ------------------------- From 77cd7ab9f6dc48e72a3bae94d2cca2e4200e6978 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:53:25 +0000 Subject: [PATCH 1516/2111] BUMP 4.10.1 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 3de24a8e14..c0232ba514 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.11.0.dev0" +__version__ = "4.10.1" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From da059a6b0afdf971abe6ffbdc5ca4aec09c61b0d Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:09:24 +0000 Subject: [PATCH 1517/2111] BUMP 4.11.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index c0232ba514..3de24a8e14 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.10.1" +__version__ = "4.11.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 2a83349f7159c0117848cf3ab1a67b6ad7d6cf0d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Oct 2024 11:34:43 -0500 Subject: [PATCH 1518/2111] PYTHON-4812 Update changelog for 4.9.2 and 4.9.1 [master] (#1892) --- doc/changelog.rst | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 76e91c2b27..574ecad763 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -36,6 +36,36 @@ in this release. .. _PyMongo 4.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40553 +Changes in Version 4.9.2 +------------------------- + +- Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. +- Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. +- Fixed a bug where :meth:`~pymongo.results.UpdateResult.did_upsert` would raise a ``TypeError``. + +Issues Resolved +............... + +See the `PyMongo 4.9.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40732 + + +Changes in Version 4.9.1 +------------------------- + +- Add missing documentation about the fact the async API is in beta state. + +Issues Resolved +............... + +See the `PyMongo 4.9.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40720 + + Changes in Version 4.9.0 ------------------------- From af23139b4ab7aeba5da71b571809cac6474391a1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:27:22 -0700 Subject: [PATCH 1519/2111] PYTHON-4805 Migrate test_connections_survive_primary_stepdown_spec.py to async (#1889) --- test/asynchronous/helpers.py | 11 ++ ...nnections_survive_primary_stepdown_spec.py | 148 ++++++++++++++++++ test/helpers.py | 11 ++ ...nnections_survive_primary_stepdown_spec.py | 10 +- test/utils.py | 48 ++++-- tools/synchro.py | 3 + 6 files changed, 217 insertions(+), 14 deletions(-) create mode 100644 test/asynchronous/test_connections_survive_primary_stepdown_spec.py diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index 46f66af62d..b5fc5d8ac4 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -42,6 +42,7 @@ from bson.son import SON from pymongo import common, message +from pymongo.read_preferences import ReadPreference from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] from pymongo.uri_parser import parse_uri @@ -150,6 +151,16 @@ def _create_user(authdb, user, pwd=None, roles=None, **kwargs): return authdb.command(cmd) +async def async_repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + await client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + await client.admin.command(cmd) + + class client_knobs: def __init__( self, diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py new file mode 100644 index 0000000000..289cf49751 --- /dev/null +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -0,0 +1,148 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test compliance with the connections survive primary step down spec.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import async_repl_set_step_down +from test.utils import ( + CMAPListener, + async_ensure_all_connected, +) + +from bson import SON +from pymongo import monitoring +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.errors import NotPrimaryError +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestAsyncConnectionsSurvivePrimaryStepDown(AsyncIntegrationTest): + listener: CMAPListener + coll: AsyncCollection + + @classmethod + @async_client_context.require_replica_set + async def _setup_class(cls): + await super()._setup_class() + cls.listener = CMAPListener() + cls.client = await cls.unmanaged_async_rs_or_single_client( + event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) + + # Ensure connections to all servers in replica set. This is to test + # that the is_writable flag is properly updated for connections that + # survive a replica set election. + await async_ensure_all_connected(cls.client) + cls.listener.reset() + + cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) + cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) + + @classmethod + async def _tearDown_class(cls): + await cls.client.close() + + async def asyncSetUp(self): + # Note that all ops use same write-concern as self.db (majority). + await self.db.drop_collection("step-down") + await self.db.create_collection("step-down") + self.listener.reset() + + async def set_fail_point(self, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await self.client.admin.command(cmd) + + def verify_pool_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) + + def verify_pool_not_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) + + @async_client_context.require_version_min(4, 2, -1) + async def test_get_more_iteration(self): + # Insert 5 documents with WC majority. + await self.coll.insert_many([{"data": k} for k in range(5)]) + # Start a find operation and retrieve first batch of results. + batch_size = 2 + cursor = self.coll.find(batch_size=batch_size) + for _ in range(batch_size): + await cursor.next() + # Force step-down the primary. + await async_repl_set_step_down(self.client, replSetStepDown=5, force=True) + # Get await anext batch of results. + for _ in range(batch_size): + await cursor.next() + # Verify pool not cleared. + self.verify_pool_not_cleared() + # Attempt insertion to mark server description as stale and prevent a + # NotPrimaryError on the subsequent operation. + try: + await self.coll.insert_one({}) + except NotPrimaryError: + pass + # Next insert should succeed on the new primary without clearing pool. + await self.coll.insert_one({}) + self.verify_pool_not_cleared() + + async def run_scenario(self, error_code, retry, pool_status_checker): + # Set fail point. + await self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) + self.addAsyncCleanup(self.set_fail_point, {"mode": "off"}) + # Insert record and verify failure. + with self.assertRaises(NotPrimaryError) as exc: + await self.coll.insert_one({"test": 1}) + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore[call-overload] + # Retry before CMAPListener assertion if retry_before=True. + if retry: + await self.coll.insert_one({"test": 1}) + # Verify pool cleared/not cleared. + pool_status_checker() + # Always retry here to ensure discovery of new primary. + await self.coll.insert_one({"test": 1}) + + @async_client_context.require_version_min(4, 2, -1) + @async_client_context.require_test_commands + async def test_not_primary_keep_connection_pool(self): + await self.run_scenario(10107, True, self.verify_pool_not_cleared) + + @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_max(4, 1, 0, -1) + @async_client_context.require_test_commands + async def test_not_primary_reset_connection_pool(self): + await self.run_scenario(10107, False, self.verify_pool_cleared) + + @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_test_commands + async def test_shutdown_in_progress(self): + await self.run_scenario(91, False, self.verify_pool_cleared) + + @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_test_commands + async def test_interrupted_at_shutdown(self): + await self.run_scenario(11600, False, self.verify_pool_cleared) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/helpers.py b/test/helpers.py index bf6186d1a0..11d5ab0374 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -42,6 +42,7 @@ from bson.son import SON from pymongo import common, message +from pymongo.read_preferences import ReadPreference from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] from pymongo.uri_parser import parse_uri @@ -150,6 +151,16 @@ def _create_user(authdb, user, pwd=None, roles=None, **kwargs): return authdb.command(cmd) +def repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + client.admin.command(cmd) + + class client_knobs: def __init__( self, diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index fba7675743..54cc4e0482 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -20,10 +20,10 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest +from test.helpers import repl_set_step_down from test.utils import ( CMAPListener, ensure_all_connected, - repl_set_step_down, ) from bson import SON @@ -32,6 +32,8 @@ from pymongo.synchronous.collection import Collection from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): listener: CMAPListener @@ -39,8 +41,8 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): @classmethod @client_context.require_replica_set - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.listener = CMAPListener() cls.client = cls.unmanaged_rs_or_single_client( event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 @@ -56,7 +58,7 @@ def setUpClass(cls): cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.client.close() def setUp(self): diff --git a/test/utils.py b/test/utils.py index 9615034899..9c78cff3ad 100644 --- a/test/utils.py +++ b/test/utils.py @@ -599,6 +599,44 @@ def discover(): ) +async def async_ensure_all_connected(client: AsyncMongoClient) -> None: + """Ensure that the client's connection pool has socket connections to all + members of a replica set. Raises ConfigurationError when called with a + non-replica set client. + + Depending on the use-case, the caller may need to clear any event listeners + that are configured on the client. + """ + hello: dict = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: + raise ConfigurationError("cluster is not a replica set") + + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} + + # Run hello until we have connected to each host at least once. + async def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = await client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + + async def predicate(): + return target_host_list == await discover() + + await async_wait_until(predicate, "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) + + def one(s): """Get one element of a set""" return next(iter(s)) @@ -761,16 +799,6 @@ async def async_wait_until(predicate, success_description, timeout=10): await asyncio.sleep(interval) -def repl_set_step_down(client, **kwargs): - """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" - cmd = SON([("replSetStepDown", 1)]) - cmd.update(kwargs) - - # Unfreeze a secondary to ensure a speedy election. - client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) - client.admin.command(cmd) - - def is_mongos(client): res = client.admin.command(HelloCompat.LEGACY_CMD) return res.get("msg", "") == "isdbgrid" diff --git a/tools/synchro.py b/tools/synchro.py index 3333b0de2e..d8ec9ae46f 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -105,6 +105,8 @@ "AsyncTestGridFile": "TestGridFile", "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", "async_set_fail_point": "set_fail_point", + "async_ensure_all_connected": "ensure_all_connected", + "async_repl_set_step_down": "repl_set_step_down", } docstring_replacements: dict[tuple[str, str], str] = { @@ -186,6 +188,7 @@ def async_only_test(f: str) -> bool: "test_client_bulk_write.py", "test_client_context.py", "test_collection.py", + "test_connections_survive_primary_stepdown_spec.py", "test_cursor.py", "test_database.py", "test_encryption.py", From 7380097dbca42580f9547bbd632f1efe96afc460 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Thu, 3 Oct 2024 13:39:04 -0400 Subject: [PATCH 1520/2111] PYTHON-3959 - NULL Initialize PyObjects (#1859) --- bson/_cbsonmodule.c | 24 ++++++++++++------------ pymongo/_cmessagemodule.c | 34 +++++++++++++++++----------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 34b407b940..223c392280 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -207,7 +207,7 @@ static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { * * Returns a new ref */ static PyObject* _error(char* name) { - PyObject* error; + PyObject* error = NULL; PyObject* errors = PyImport_ImportModule("bson.errors"); if (!errors) { return NULL; @@ -279,7 +279,7 @@ static PyObject* datetime_from_millis(long long millis) { * micros = diff * 1000 111000 * Resulting in datetime(1, 1, 1, 1, 1, 1, 111000) -- the expected result */ - PyObject* datetime; + PyObject* datetime = NULL; int diff = (int)(((millis % 1000) + 1000) % 1000); int microseconds = diff * 1000; Time64_T seconds = (millis - diff) / 1000; @@ -294,7 +294,7 @@ static PyObject* datetime_from_millis(long long millis) { timeinfo.tm_sec, microseconds); if(!datetime) { - PyObject *etype, *evalue, *etrace; + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; /* * Calling _error clears the error state, so fetch it first. @@ -350,8 +350,8 @@ static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ return NULL; } - PyObject* dt; - PyObject* ll_millis; + PyObject* dt = NULL; + PyObject* ll_millis = NULL; if (!(ll_millis = PyLong_FromLongLong(millis))){ return NULL; @@ -1790,7 +1790,7 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* result; unsigned char check_keys; unsigned char top_level = 1; - PyObject* options_obj; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; PyObject* raw_bson_document_bytes_obj; @@ -2512,8 +2512,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, * Wrap any non-InvalidBSON errors in InvalidBSON. */ if (PyErr_Occurred()) { - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *InvalidBSON = NULL; /* * Calling _error clears the error state, so fetch it first. @@ -2585,8 +2585,8 @@ static int _element_to_dict(PyObject* self, const char* string, if (!*name) { /* If NULL is returned then wrap the UnicodeDecodeError in an InvalidBSON error */ - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *InvalidBSON = NULL; PyErr_Fetch(&etype, &evalue, &etrace); if (PyErr_GivenExceptionMatches(etype, PyExc_Exception)) { @@ -2620,7 +2620,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { /* TODO: Support buffer protocol */ char* string; PyObject* bson; - PyObject* options_obj; + PyObject* options_obj = NULL; codec_options_t options; unsigned position; unsigned max; @@ -2732,7 +2732,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { int32_t size; Py_ssize_t total_size; const char* string; - PyObject* bson; + PyObject* bson = NULL; codec_options_t options; PyObject* result = NULL; PyObject* options_obj; diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index f95b949380..b5adbeec32 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -45,7 +45,7 @@ struct module_state { * * Returns a new ref */ static PyObject* _error(char* name) { - PyObject* error; + PyObject* error = NULL; PyObject* errors = PyImport_ImportModule("pymongo.errors"); if (!errors) { return NULL; @@ -75,9 +75,9 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { int begin, cur_size, max_size = 0; int num_to_skip; int num_to_return; - PyObject* query; - PyObject* field_selector; - PyObject* options_obj; + PyObject* query = NULL; + PyObject* field_selector = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -221,12 +221,12 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ int request_id = rand(); unsigned int flags; - PyObject* command; + PyObject* command = NULL; char* identifier = NULL; Py_ssize_t identifier_length = 0; - PyObject* docs; - PyObject* doc; - PyObject* options_obj; + PyObject* docs = NULL; + PyObject* doc = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -535,12 +535,12 @@ static PyObject* _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; unsigned char ack; - PyObject* command; - PyObject* docs; + PyObject* command = NULL; + PyObject* docs = NULL; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; - PyObject* options_obj; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); @@ -592,12 +592,12 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { unsigned char ack; int request_id; int position; - PyObject* command; - PyObject* docs; + PyObject* command = NULL; + PyObject* docs = NULL; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; - PyObject* options_obj; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); @@ -868,12 +868,12 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; Py_ssize_t ns_len; - PyObject* command; - PyObject* docs; + PyObject* command = NULL; + PyObject* docs = NULL; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; - PyObject* options_obj; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); From b111cbf5d5dab906a94d2c4b2a209cfde2971a94 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 3 Oct 2024 15:18:33 -0400 Subject: [PATCH 1521/2111] PYTHON-4636 - Avoid blocking I/O calls in async code paths (#1870) Co-authored-by: Shane Harvey --- pymongo/asynchronous/network.py | 81 +---------- pymongo/network_layer.py | 230 +++++++++++++++++++++++++++++-- pymongo/pyopenssl_context.py | 13 +- pymongo/synchronous/network.py | 77 +---------- test/asynchronous/test_client.py | 6 +- test/test_client.py | 6 +- tools/synchro.py | 1 + 7 files changed, 248 insertions(+), 166 deletions(-) diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index 44a63a2fc3..d17aead120 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -15,11 +15,8 @@ """Internal network layer helper methods.""" from __future__ import annotations -import asyncio import datetime -import errno import logging -import socket import time from typing import ( TYPE_CHECKING, @@ -40,19 +37,16 @@ NotPrimaryError, OperationFailure, ProtocolError, - _OperationCancelled, ) from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply from pymongo.monitoring import _is_speculative_authenticate from pymongo.network_layer import ( - _POLL_TIMEOUT, _UNPACK_COMPRESSION_HEADER, _UNPACK_HEADER, - BLOCKING_IO_ERRORS, + async_receive_data, async_sendall, ) -from pymongo.socket_checker import _errno_from_exception if TYPE_CHECKING: from bson import CodecOptions @@ -318,9 +312,7 @@ async def receive_message( else: deadline = None # Ignore the response's request id. - length, _, response_to, op_code = _UNPACK_HEADER( - await _receive_data_on_socket(conn, 16, deadline) - ) + length, _, response_to, op_code = _UNPACK_HEADER(await async_receive_data(conn, 16, deadline)) # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: @@ -336,11 +328,11 @@ async def receive_message( ) if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - await _receive_data_on_socket(conn, 9, deadline) + await async_receive_data(conn, 9, deadline) ) - data = decompress(await _receive_data_on_socket(conn, length - 25, deadline), compressor_id) + data = decompress(await async_receive_data(conn, length - 25, deadline), compressor_id) else: - data = await _receive_data_on_socket(conn, length - 16, deadline) + data = await async_receive_data(conn, length - 16, deadline) try: unpack_reply = _UNPACK_REPLY[op_code] @@ -349,66 +341,3 @@ async def receive_message( f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" ) from None return unpack_reply(data) - - -async def wait_for_read(conn: AsyncConnection, deadline: Optional[float]) -> None: - """Block until at least one byte is read, or a timeout, or a cancel.""" - sock = conn.conn - timed_out = False - # Check if the connection's socket has been manually closed - if sock.fileno() == -1: - return - while True: - # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, "pending") and sock.pending() > 0: - readable = True - else: - # Wait up to 500ms for the socket to become readable and then - # check for cancellation. - if deadline: - remaining = deadline - time.monotonic() - # When the timeout has expired perform one final check to - # see if the socket is readable. This helps avoid spurious - # timeouts on AWS Lambda and other FaaS environments. - if remaining <= 0: - timed_out = True - timeout = max(min(remaining, _POLL_TIMEOUT), 0) - else: - timeout = _POLL_TIMEOUT - readable = conn.socket_checker.select(sock, read=True, timeout=timeout) - if conn.cancel_context.cancelled: - raise _OperationCancelled("operation cancelled") - if readable: - return - if timed_out: - raise socket.timeout("timed out") - await asyncio.sleep(0) - - -async def _receive_data_on_socket( - conn: AsyncConnection, length: int, deadline: Optional[float] -) -> memoryview: - buf = bytearray(length) - mv = memoryview(buf) - bytes_read = 0 - while bytes_read < length: - try: - await wait_for_read(conn, deadline) - # CSOT: Update timeout. When the timeout has expired perform one - # final non-blocking recv. This helps avoid spurious timeouts when - # the response is actually already buffered on the client. - if _csot.get_timeout() and deadline is not None: - conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) - chunk_length = conn.conn.recv_into(mv[bytes_read:]) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") from None - except OSError as exc: - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk_length == 0: - raise OSError("connection closed") - - bytes_read += chunk_length - - return mv diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 82a6228acc..4b57620d83 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -16,15 +16,21 @@ from __future__ import annotations import asyncio +import errno import socket import struct import sys +import time from asyncio import AbstractEventLoop, Future from typing import ( + TYPE_CHECKING, + Optional, Union, ) -from pymongo import ssl_support +from pymongo import _csot, ssl_support +from pymongo.errors import _OperationCancelled +from pymongo.socket_checker import _errno_from_exception try: from ssl import SSLError, SSLSocket @@ -51,6 +57,10 @@ BLOCKING_IO_WRITE_ERROR, ) +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.synchronous.pool import Connection + _UNPACK_HEADER = struct.Struct(" None: view = memoryview(buf) - fd = sock.fileno() sent = 0 def _is_ready(fut: Future) -> None: - loop.remove_writer(fd) - loop.remove_reader(fd) if fut.done(): return fut.set_result(None) @@ -101,33 +108,240 @@ def _is_ready(fut: Future) -> None: if isinstance(exc, BLOCKING_IO_READ_ERROR): fut = loop.create_future() loop.add_reader(fd, _is_ready, fut) - await fut + try: + await fut + finally: + loop.remove_reader(fd) if isinstance(exc, BLOCKING_IO_WRITE_ERROR): fut = loop.create_future() loop.add_writer(fd, _is_ready, fut) - await fut + try: + await fut + finally: + loop.remove_writer(fd) if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): fut = loop.create_future() loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + + async def _async_receive_ssl( + conn: _sslConn, length: int, loop: AbstractEventLoop + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + + def _is_ready(fut: Future) -> None: + if fut.done(): + return + fut.set_result(None) + + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + total_read += read + except BLOCKING_IO_ERRORS as exc: + fd = conn.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() loop.add_writer(fd, _is_ready, fut) - await fut + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + return mv + else: # The default Windows asyncio event loop does not support loop.add_reader/add_writer: # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support + # Note: In PYTHON-4493 we plan to replace this code with asyncio streams. async def _async_sendall_ssl( sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop ) -> None: view = memoryview(buf) total_length = len(buf) total_sent = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 while total_sent < total_length: try: sent = sock.send(view[total_sent:]) except BLOCKING_IO_ERRORS: - await asyncio.sleep(0.5) + await asyncio.sleep(backoff) sent = 0 + if sent > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) total_sent += sent + async def _async_receive_ssl( + conn: _sslConn, length: int, dummy: AbstractEventLoop + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + read = 0 + if read > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_read += read + return mv + def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: sock.sendall(buf) + + +async def _poll_cancellation(conn: AsyncConnection) -> None: + while True: + if conn.cancel_context.cancelled: + return + + await asyncio.sleep(_POLL_TIMEOUT) + + +async def async_receive_data( + conn: AsyncConnection, length: int, deadline: Optional[float] +) -> memoryview: + sock = conn.conn + sock_timeout = sock.gettimeout() + timeout: Optional[Union[float, int]] + if deadline: + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + timeout = max(deadline - time.monotonic(), 0) + else: + timeout = sock_timeout + + sock.settimeout(0.0) + loop = asyncio.get_event_loop() + cancellation_task = asyncio.create_task(_poll_cancellation(conn)) + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + read_task = asyncio.create_task(_async_receive_ssl(sock, length, loop)) # type: ignore[arg-type] + else: + read_task = asyncio.create_task(_async_receive(sock, length, loop)) # type: ignore[arg-type] + tasks = [read_task, cancellation_task] + done, pending = await asyncio.wait( + tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED + ) + for task in pending: + task.cancel() + await asyncio.wait(pending) + if len(done) == 0: + raise socket.timeout("timed out") + if read_task in done: + return read_task.result() + raise _OperationCancelled("operation cancelled") + finally: + sock.settimeout(sock_timeout) + + +async def _async_receive(conn: socket.socket, length: int, loop: AbstractEventLoop) -> memoryview: + mv = memoryview(bytearray(length)) + bytes_read = 0 + while bytes_read < length: + chunk_length = await loop.sock_recv_into(conn, mv[bytes_read:]) + if chunk_length == 0: + raise OSError("connection closed") + bytes_read += chunk_length + return mv + + +# Sync version: +def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: + """Block until at least one byte is read, or a timeout, or a cancel.""" + sock = conn.conn + timed_out = False + # Check if the connection's socket has been manually closed + if sock.fileno() == -1: + return + while True: + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) + else: + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") + + +def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: + buf = bytearray(length) + mv = memoryview(buf) + bytes_read = 0 + while bytes_read < length: + try: + wait_for_read(conn, deadline) + # CSOT: Update timeout. When the timeout has expired perform one + # final non-blocking recv. This helps avoid spurious timeouts when + # the response is actually already buffered on the client. + if _csot.get_timeout() and deadline is not None: + conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) + chunk_length = conn.conn.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + raise socket.timeout("timed out") from None + except OSError as exc: + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise OSError("connection closed") + + bytes_read += chunk_length + + return mv diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 4f6f6f4a89..50d8680a74 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -105,13 +105,19 @@ def _ragged_eof(exc: BaseException) -> bool: # https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets class _sslConn(_SSL.Connection): def __init__( - self, ctx: _SSL.Context, sock: Optional[_socket.socket], suppress_ragged_eofs: bool + self, + ctx: _SSL.Context, + sock: Optional[_socket.socket], + suppress_ragged_eofs: bool, + is_async: bool = False, ): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs super().__init__(ctx, sock) + self._is_async = is_async def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: + is_async = kwargs.pop("allow_async", True) and self._is_async timeout = self.gettimeout() if timeout: start = _time.monotonic() @@ -119,6 +125,8 @@ def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: try: return call(*args, **kwargs) except BLOCKING_IO_ERRORS as exc: + if is_async: + raise exc # Check for closed socket. if self.fileno() == -1: if timeout and _time.monotonic() - start > timeout: @@ -139,6 +147,7 @@ def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: continue def do_handshake(self, *args: Any, **kwargs: Any) -> None: + kwargs["allow_async"] = False return self._call(super().do_handshake, *args, **kwargs) def recv(self, *args: Any, **kwargs: Any) -> bytes: @@ -381,7 +390,7 @@ async def a_wrap_socket( """Wrap an existing Python socket connection and return a TLS socket object. """ - ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs) + ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs, True) loop = asyncio.get_running_loop() if session: ssl_conn.set_session(session) diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py index c1978087a9..7206dca735 100644 --- a/pymongo/synchronous/network.py +++ b/pymongo/synchronous/network.py @@ -16,9 +16,7 @@ from __future__ import annotations import datetime -import errno import logging -import socket import time from typing import ( TYPE_CHECKING, @@ -39,19 +37,16 @@ NotPrimaryError, OperationFailure, ProtocolError, - _OperationCancelled, ) from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply from pymongo.monitoring import _is_speculative_authenticate from pymongo.network_layer import ( - _POLL_TIMEOUT, _UNPACK_COMPRESSION_HEADER, _UNPACK_HEADER, - BLOCKING_IO_ERRORS, + receive_data, sendall, ) -from pymongo.socket_checker import _errno_from_exception if TYPE_CHECKING: from bson import CodecOptions @@ -317,7 +312,7 @@ def receive_message( else: deadline = None # Ignore the response's request id. - length, _, response_to, op_code = _UNPACK_HEADER(_receive_data_on_socket(conn, 16, deadline)) + length, _, response_to, op_code = _UNPACK_HEADER(receive_data(conn, 16, deadline)) # No request_id for exhaust cursor "getMore". if request_id is not None: if request_id != response_to: @@ -332,12 +327,10 @@ def receive_message( f"message size ({max_message_size!r})" ) if op_code == 2012: - op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - _receive_data_on_socket(conn, 9, deadline) - ) - data = decompress(_receive_data_on_socket(conn, length - 25, deadline), compressor_id) + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) + data = decompress(receive_data(conn, length - 25, deadline), compressor_id) else: - data = _receive_data_on_socket(conn, length - 16, deadline) + data = receive_data(conn, length - 16, deadline) try: unpack_reply = _UNPACK_REPLY[op_code] @@ -346,63 +339,3 @@ def receive_message( f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" ) from None return unpack_reply(data) - - -def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: - """Block until at least one byte is read, or a timeout, or a cancel.""" - sock = conn.conn - timed_out = False - # Check if the connection's socket has been manually closed - if sock.fileno() == -1: - return - while True: - # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, "pending") and sock.pending() > 0: - readable = True - else: - # Wait up to 500ms for the socket to become readable and then - # check for cancellation. - if deadline: - remaining = deadline - time.monotonic() - # When the timeout has expired perform one final check to - # see if the socket is readable. This helps avoid spurious - # timeouts on AWS Lambda and other FaaS environments. - if remaining <= 0: - timed_out = True - timeout = max(min(remaining, _POLL_TIMEOUT), 0) - else: - timeout = _POLL_TIMEOUT - readable = conn.socket_checker.select(sock, read=True, timeout=timeout) - if conn.cancel_context.cancelled: - raise _OperationCancelled("operation cancelled") - if readable: - return - if timed_out: - raise socket.timeout("timed out") - - -def _receive_data_on_socket(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: - buf = bytearray(length) - mv = memoryview(buf) - bytes_read = 0 - while bytes_read < length: - try: - wait_for_read(conn, deadline) - # CSOT: Update timeout. When the timeout has expired perform one - # final non-blocking recv. This helps avoid spurious timeouts when - # the response is actually already buffered on the client. - if _csot.get_timeout() and deadline is not None: - conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) - chunk_length = conn.conn.recv_into(mv[bytes_read:]) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") from None - except OSError as exc: - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk_length == 0: - raise OSError("connection closed") - - bytes_read += chunk_length - - return mv diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 5c06331790..2052d1cd7f 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -1713,6 +1713,7 @@ def compression_settings(client): # No error await client.pymongo_test.test.find_one() + @async_client_context.require_sync async def test_reset_during_update_pool(self): client = await self.async_rs_or_single_client(minPoolSize=10) await client.admin.command("ping") @@ -1737,10 +1738,7 @@ async def _run(self): await asyncio.sleep(0.001) def run(self): - if _IS_SYNC: - self._run() - else: - asyncio.run(self._run()) + self._run() t = ResetPoolThread(pool) t.start() diff --git a/test/test_client.py b/test/test_client.py index c88a8fd9b4..936c38b8c6 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1671,6 +1671,7 @@ def compression_settings(client): # No error client.pymongo_test.test.find_one() + @client_context.require_sync def test_reset_during_update_pool(self): client = self.rs_or_single_client(minPoolSize=10) client.admin.command("ping") @@ -1695,10 +1696,7 @@ def _run(self): time.sleep(0.001) def run(self): - if _IS_SYNC: - self._run() - else: - asyncio.run(self._run()) + self._run() t = ResetPoolThread(pool) t.start() diff --git a/tools/synchro.py b/tools/synchro.py index d8ec9ae46f..585fc5fefd 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -43,6 +43,7 @@ "AsyncConnection": "Connection", "async_command": "command", "async_receive_message": "receive_message", + "async_receive_data": "receive_data", "async_sendall": "sendall", "asynchronous": "synchronous", "Asynchronous": "Synchronous", From 68127d5efd3580ad718a437eff91230c0f70e20e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 04:49:40 -0500 Subject: [PATCH 1522/2111] Bump the actions group with 2 updates (#1897) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 2 +- .github/workflows/test-python.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 370b8759e6..2dc070d7c6 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -39,7 +39,7 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ inputs.ref }} - - uses: actions/setup-python@v3 + - uses: actions/setup-python@v5 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 921168c130..3ecdfa52f3 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -72,7 +72,7 @@ jobs: pip install hatch fi - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.10.0 + uses: supercharge/mongodb-github-action@1.11.0 with: mongodb-version: 6.0 - name: Run tests @@ -94,7 +94,7 @@ jobs: run: | pip install -U hatch pip - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.10.0 + uses: supercharge/mongodb-github-action@1.11.0 with: mongodb-version: '8.0.0-rc4' - name: Run tests @@ -201,7 +201,7 @@ jobs: # Test sdist on lowest supported Python python-version: '3.8' - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.10.0 + uses: supercharge/mongodb-github-action@1.11.0 - name: Run connect test from sdist shell: bash run: | From def3c11787530290c073080eaceb3682d578b73d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 04:50:49 -0500 Subject: [PATCH 1523/2111] Bump furo from 2023.9.10 to 2024.8.6 (#1898) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/docs.txt b/requirements/docs.txt index 16b2746866..d3f0c73034 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -3,4 +3,4 @@ sphinx_rtd_theme>=2,<3 readthedocs-sphinx-search~=0.3 sphinxcontrib-shellcheck>=1,<2 sphinx-autobuild>=2020.9.1 -furo==2023.9.10 +furo==2024.8.6 From 093d5bebde9c4a12ca05edd0c41c350dba472f67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 04:51:09 -0500 Subject: [PATCH 1524/2111] Bump pyright from 1.1.382.post1 to 1.1.383 (#1899) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 1669e6bbc2..06c33c6db6 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.11.2 -pyright==1.1.382.post1 +pyright==1.1.383 typing_extensions -r ./encryption.txt -r ./ocsp.txt From c48dc692824a4d10a939f3477cb1c0dceb4d5dcc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 04:52:14 -0500 Subject: [PATCH 1525/2111] Update sphinx requirement from <8,>=5.3 to >=5.3,<9 (#1901) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/docs.txt b/requirements/docs.txt index d3f0c73034..34723e6ea0 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,4 +1,4 @@ -sphinx>=5.3,<8 +sphinx>=5.3,<9 sphinx_rtd_theme>=2,<3 readthedocs-sphinx-search~=0.3 sphinxcontrib-shellcheck>=1,<2 From 006a9960f07c06bad9c5803ddbf8dc1750743d6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 04:55:07 -0500 Subject: [PATCH 1526/2111] Update sphinx-rtd-theme requirement from <3,>=2 to >=2,<4 (#1900) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/docs.txt b/requirements/docs.txt index 34723e6ea0..7d52c1cb3e 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,5 +1,5 @@ sphinx>=5.3,<9 -sphinx_rtd_theme>=2,<3 +sphinx_rtd_theme>=2,<4 readthedocs-sphinx-search~=0.3 sphinxcontrib-shellcheck>=1,<2 sphinx-autobuild>=2020.9.1 From 5a66e992542f635175f58a43e37607a6cf6c4717 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 8 Oct 2024 14:52:14 -0400 Subject: [PATCH 1527/2111] PYTHON-4828 - Drop Python 3.8 support (#1902) --- .evergreen/config.yml | 44 ++++++++++++------------------- .evergreen/utils.sh | 32 +++++++++++----------- .github/workflows/dist.yml | 7 +++-- .github/workflows/test-python.yml | 16 +++++------ CONTRIBUTING.md | 2 +- README.md | 2 +- doc/changelog.rst | 5 ++++ doc/faq.rst | 2 +- doc/installation.rst | 4 +-- doc/python3.rst | 2 +- pyproject.toml | 3 +-- test/asynchronous/conftest.py | 2 -- test/conftest.py | 2 -- 13 files changed, 56 insertions(+), 67 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 14e3426b32..7fb48c8054 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2233,10 +2233,6 @@ axes: values: # Note: always display platform with python-version to avoid ambiguous display names. # Linux - - id: "3.8" - display_name: "Python 3.8" - variables: - PYTHON_BINARY: "/opt/python/3.8/bin/python3" - id: "3.9" display_name: "Python 3.9" variables: @@ -2269,10 +2265,6 @@ axes: - id: python-version-windows display_name: "Python" values: - - id: "3.8" - display_name: "Python 3.8" - variables: - PYTHON_BINARY: "C:/python/Python38/python.exe" - id: "3.9" display_name: "Python 3.9" variables: @@ -2297,10 +2289,8 @@ axes: - id: python-version-windows-32 display_name: "Python" values: - - id: "3.8" - display_name: "32-bit Python 3.8" - variables: - PYTHON_BINARY: "C:/python/32/Python38/python.exe" + + - id: "3.9" display_name: "32-bit Python 3.9" variables: @@ -2581,10 +2571,10 @@ buildvariants: auth: "*" ssl: "ssl" pyopenssl: "*" - # Only test "noauth" with Python 3.8. + # Only test "noauth" with Python 3.9. exclude_spec: platform: rhel8 - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "pypy3.9", "pypy3.10"] + python-version: ["3.10", "3.11", "3.12", "3.13", "pypy3.9", "pypy3.10"] auth: "noauth" ssl: "ssl" pyopenssl: "*" @@ -2716,7 +2706,7 @@ buildvariants: matrix_spec: platform: rhel7 # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.8", "3.9"] + python-version: ["3.9"] auth-ssl: "*" display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" tasks: @@ -2739,12 +2729,12 @@ buildvariants: then: add_tasks: *encryption-server-versions -# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.8. +# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.9. - matrix_name: "tests-storage-engines" matrix_spec: platform: rhel8 storage-engine: "*" - python-version: 3.8 + python-version: 3.9 display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: @@ -2774,12 +2764,12 @@ buildvariants: - "test-3.6-standalone" - "test-3.6-replica_set" -# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.8. +# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.9. - matrix_name: "test-disableTestCommands" matrix_spec: platform: rhel8 disableTestCommands: "*" - python-version: "3.8" + python-version: "3.9" display_name: "Disable test commands ${python-version} ${platform}" tasks: - ".latest" @@ -2805,7 +2795,7 @@ buildvariants: - matrix_name: "test-search-index-helpers" matrix_spec: platform: rhel8 - python-version: "3.8" + python-version: "3.9" display_name: "Search Index Helpers ${platform}" tasks: - name: "test_atlas_task_group_search_indexes" @@ -2813,7 +2803,7 @@ buildvariants: - matrix_name: "tests-mod-wsgi" matrix_spec: platform: ubuntu-22.04 - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] + python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] mod-wsgi-version: "*" display_name: "${mod-wsgi-version} ${python-version} ${platform}" tasks: @@ -2825,7 +2815,7 @@ buildvariants: - matrix_name: "mockupdb-tests" matrix_spec: platform: rhel8 - python-version: 3.8 + python-version: 3.9 display_name: "MockupDB Tests" tasks: - name: "mockupdb" @@ -2833,7 +2823,7 @@ buildvariants: - matrix_name: "tests-doctests" matrix_spec: platform: rhel8 - python-version: ["3.8"] + python-version: ["3.9"] display_name: "Doctests ${python-version} ${platform}" tasks: - name: "doctests" @@ -2873,7 +2863,7 @@ buildvariants: - matrix_name: "data-lake-spec-tests" matrix_spec: platform: ubuntu-22.04 - python-version: ["3.8", "3.10"] + python-version: ["3.9", "3.10"] auth: "auth" c-extensions: "*" display_name: "Atlas Data Lake ${python-version} ${c-extensions}" @@ -2883,7 +2873,7 @@ buildvariants: - matrix_name: "stable-api-tests" matrix_spec: platform: rhel8 - python-version: ["3.8", "3.10"] + python-version: ["3.9", "3.10"] auth: "auth" versionedApi: "*" display_name: "Versioned API ${versionedApi} ${python-version}" @@ -2896,7 +2886,7 @@ buildvariants: - matrix_name: "ocsp-test" matrix_spec: platform: rhel8 - python-version: ["3.8", "3.10", "pypy3.9", "pypy3.10"] + python-version: ["3.9", "3.10", "pypy3.9", "pypy3.10"] mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] auth: "noauth" ssl: "ssl" @@ -2908,7 +2898,7 @@ buildvariants: - matrix_name: "ocsp-test-windows" matrix_spec: platform: windows - python-version-windows: ["3.8", "3.10"] + python-version-windows: ["3.9", "3.10"] mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] auth: "noauth" ssl: "ssl" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 1a5e2a153f..d44425a905 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -4,8 +4,8 @@ set -o xtrace find_python3() { PYTHON="" - # Add a fallback system python3 if it is available and Python 3.8+. - if is_python_38 "$(command -v python3)"; then + # Add a fallback system python3 if it is available and Python 3.9+. + if is_python_39 "$(command -v python3)"; then PYTHON="$(command -v python3)" fi # Find a suitable toolchain version, if available. @@ -14,23 +14,23 @@ find_python3() { if [ -d "/Library/Frameworks/Python.Framework/Versions/3.10" ]; then PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" # macos 10.14 - elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.8" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/3.8/bin/python3" + elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.9" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.9/bin/python3" fi elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - PYTHON="C:/python/Python38/python.exe" + PYTHON="C:/python/Python39/python.exe" else - # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.8+. - if [ -f "/opt/python/3.8/bin/python3" ]; then - PYTHON="/opt/python/3.8/bin/python3" - elif is_python_38 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then + # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.9+. + if [ -f "/opt/python/3.9/bin/python3" ]; then + PYTHON="/opt/python/3.9/bin/python3" + elif is_python_39 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v4/bin/python3" - elif is_python_38 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + elif is_python_39 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v3/bin/python3" fi fi if [ -z "$PYTHON" ]; then - echo "Cannot test without python3.8+ installed!" + echo "Cannot test without python3.9+ installed!" exit 1 fi echo "$PYTHON" @@ -96,15 +96,15 @@ testinstall () { fi } -# Function that returns success if the provided Python binary is version 3.8 or later +# Function that returns success if the provided Python binary is version 3.9 or later # Usage: -# is_python_38 /path/to/python +# is_python_39 /path/to/python # * param1: Python binary -is_python_38() { +is_python_39() { if [ -z "$1" ]; then return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 8))"; then - # runs when sys.version_info[:2] >= (3, 8) + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 9))"; then + # runs when sys.version_info[:2] >= (3, 9) return 0 else return 1 diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 7ec55dd3b3..fbc7ff7390 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -53,7 +53,7 @@ jobs: - uses: actions/setup-python@v5 with: cache: 'pip' - python-version: 3.8 + python-version: 3.9 cache-dependency-path: 'pyproject.toml' allow-prereleases: true @@ -79,13 +79,12 @@ jobs: env: CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 CIBW_MANYLINUX_I686_IMAGE: manylinux1 - CIBW_BUILD: "cp38-${{ matrix.buildplat[1] }} cp39-${{ matrix.buildplat[1] }}" + CIBW_BUILD: "cp39-${{ matrix.buildplat[1] }} cp39-${{ matrix.buildplat[1] }}" run: python -m cibuildwheel --output-dir wheelhouse - name: Assert all versions in wheelhouse if: ${{ ! startsWith(matrix.buildplat[1], 'macos') }} run: | - ls wheelhouse/*cp38*.whl ls wheelhouse/*cp39*.whl ls wheelhouse/*cp310*.whl ls wheelhouse/*cp311*.whl @@ -109,7 +108,7 @@ jobs: - uses: actions/setup-python@v5 with: # Build sdist on lowest supported Python - python-version: '3.8' + python-version: '3.9' - name: Build SDist run: | diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 3ecdfa52f3..e55444ceca 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.9" cache: 'pip' cache-dependency-path: 'pyproject.toml' - name: Install Python dependencies @@ -51,7 +51,7 @@ jobs: strategy: matrix: os: [ubuntu-20.04] - python-version: ["3.8", "pypy-3.9", "3.13"] + python-version: ["3.9", "pypy-3.9", "3.13"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v4 @@ -87,7 +87,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.9" cache: 'pip' cache-dependency-path: 'pyproject.toml' - name: Install dependencies @@ -111,7 +111,7 @@ jobs: cache: 'pip' cache-dependency-path: 'pyproject.toml' # Build docs on lowest supported Python for furo - python-version: '3.8' + python-version: '3.9' - name: Install dependencies run: | pip install -U pip hatch @@ -129,7 +129,7 @@ jobs: cache: 'pip' cache-dependency-path: 'pyproject.toml' # Build docs on lowest supported Python for furo - python-version: '3.8' + python-version: '3.9' - name: Install dependencies run: | pip install -U pip hatch @@ -142,7 +142,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ["3.8", "3.11"] + python: ["3.9", "3.11"] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 @@ -167,7 +167,7 @@ jobs: cache: 'pip' cache-dependency-path: 'pyproject.toml' # Build sdist on lowest supported Python - python-version: '3.8' + python-version: '3.9' - name: Build SDist shell: bash run: | @@ -199,7 +199,7 @@ jobs: cache: 'pip' cache-dependency-path: 'sdist/test/pyproject.toml' # Test sdist on lowest supported Python - python-version: '3.8' + python-version: '3.9' - name: Start MongoDB uses: supercharge/mongodb-github-action@1.11.0 - name: Run connect test from sdist diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2c2a5f4316..7516fbc9ed 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,7 @@ be of interest or that has already been addressed. ## Supported Interpreters -PyMongo supports CPython 3.8+ and PyPy3.9+. Language features not +PyMongo supports CPython 3.9+ and PyPy3.9+. Language features not supported by all interpreters can not be used. ## Style Guide diff --git a/README.md b/README.md index 1076b66377..9b5aa33f78 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ package that is incompatible with PyMongo. ## Dependencies -PyMongo supports CPython 3.8+ and PyPy3.9+. +PyMongo supports CPython 3.9+ and PyPy3.9+. Required dependencies: diff --git a/doc/changelog.rst b/doc/changelog.rst index 574ecad763..a73a89a0ef 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,11 @@ Changelog ========= +Changes in Version 4.11.0 +------------------------- + +.. warning:: PyMongo 4.11 drops support for Python 3.8: Python 3.9+ or PyPy 3.9+ is now required. + Changes in Version 4.10.1 ------------------------- diff --git a/doc/faq.rst b/doc/faq.rst index f0463badaa..15950e7716 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -166,7 +166,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.8+ and PyPy3.9+. See the :doc:`python3` for details. +PyMongo supports CPython 3.9+ and PyPy3.9+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index ee83b30c6f..dd8eb6ab42 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.8+ and PyPy3.9+. +PyMongo supports CPython 3.9+ and PyPy3.9+. Required dependencies ..................... @@ -140,7 +140,7 @@ See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.8+ downloaded from python.org. In all cases Xcode must be +versions of Python 3.9+ downloaded from python.org. In all cases Xcode must be installed with 'UNIX Development Support'. **Xcode 5.1**: Starting with version 5.1 the version of clang that ships with diff --git a/doc/python3.rst b/doc/python3.rst index 148c5ee454..1ea43b3ccb 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -4,7 +4,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.8+ and PyPy3.9+. +PyMongo supports CPython 3.9+ and PyPy3.9+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 30c7c046b9..2688aab27e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ dynamic = ["version", "dependencies", "optional-dependencies"] description = "Python driver for MongoDB " readme = "README.md" license = {file="LICENSE"} -requires-python = ">=3.8" +requires-python = ">=3.9" authors = [ { name = "The MongoDB Python Team" }, ] @@ -30,7 +30,6 @@ classifiers = [ "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py index c08f224abd..e443dff6c0 100644 --- a/test/asynchronous/conftest.py +++ b/test/asynchronous/conftest.py @@ -17,8 +17,6 @@ def event_loop_policy(): # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) # We explicitly use a different loop implementation here to prevent that issue if sys.platform == "win32": - # Needed for Python 3.8. - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] return asyncio.get_event_loop_policy() diff --git a/test/conftest.py b/test/conftest.py index ca817a5a62..a3d954c7c3 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -15,8 +15,6 @@ def event_loop_policy(): # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) # We explicitly use a different loop implementation here to prevent that issue if sys.platform == "win32": - # Needed for Python 3.8. - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] return asyncio.get_event_loop_policy() From d21a8ddcff0ce9d54fe5b353bd4477936a02528e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 8 Oct 2024 15:14:54 -0400 Subject: [PATCH 1528/2111] PYTHON-4827 - Fix dnspython typechecking failures (#1903) Co-authored-by: Steven Silvester --- pymongo/srv_resolver.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py index 6f6cc285fa..5be6cb98db 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/srv_resolver.py @@ -99,7 +99,7 @@ def get_options(self) -> Optional[str]: raise ConfigurationError(str(exc)) from None if len(results) > 1: raise ConfigurationError("Only one TXT record is supported") - return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: try: @@ -121,7 +121,8 @@ def _get_srv_response_and_hosts( # Construct address tuples nodes = [ - (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) for res in results + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) # type: ignore[attr-defined] + for res in results ] # Validate hosts From 8f32f3cd245b8bebd2d91469aa7477af2bf5ce38 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 8 Oct 2024 15:52:16 -0400 Subject: [PATCH 1529/2111] PYTHON-4831 - Remove pytz from examples (#1904) Co-authored-by: Steven Silvester --- doc/examples/datetimes.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst index 1790506423..a8c0476903 100644 --- a/doc/examples/datetimes.rst +++ b/doc/examples/datetimes.rst @@ -70,9 +70,9 @@ those datetimes to UTC automatically: .. doctest:: - >>> import pytz - >>> pacific = pytz.timezone("US/Pacific") - >>> aware_datetime = pacific.localize(datetime.datetime(2002, 10, 27, 6, 0, 0)) + >>> from zoneinfo import ZoneInfo + >>> from datetime import datetime + >>> aware_datetime = datetime(2002, 10, 27, 6, 0, 0, tzinfo=ZoneInfo("US/Pacific")) >>> result = db.times.insert_one({"date": aware_datetime}) >>> db.times.find_one()["date"] datetime.datetime(2002, 10, 27, 14, 0) @@ -97,7 +97,7 @@ out of MongoDB in US/Pacific time: datetime.datetime(2002, 10, 27, 14, 0) >>> aware_times = db.times.with_options(codec_options=CodecOptions( ... tz_aware=True, - ... tzinfo=pytz.timezone('US/Pacific'))) + ... tzinfo=ZoneInfo("US/Pacific"))) >>> result = aware_times.find_one()['date'] datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE tzinfo=) From 5fa4380324b7109edce24ad1cd97f3eec6bc7697 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 9 Oct 2024 10:44:41 -0400 Subject: [PATCH 1530/2111] PYTHON-4784 - Add tests to confirm async parallelism (#1886) --- test/asynchronous/test_concurrency.py | 54 +++++++++++++++++++++++++++ tools/synchro.py | 2 +- 2 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_concurrency.py diff --git a/test/asynchronous/test_concurrency.py b/test/asynchronous/test_concurrency.py new file mode 100644 index 0000000000..1683b8413b --- /dev/null +++ b/test/asynchronous/test_concurrency.py @@ -0,0 +1,54 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests to ensure that the async API is properly concurrent with asyncio.""" +from __future__ import annotations + +import asyncio +import time +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils import delay + +_IS_SYNC = False + + +class TestAsyncConcurrency(AsyncIntegrationTest): + async def _task(self, client): + await client.db.test.find_one({"$where": delay(0.20)}) + + async def test_concurrency(self): + tasks = [] + iterations = 5 + + client = await self.async_single_client() + await client.db.test.drop() + await client.db.test.insert_one({"x": 1}) + + start = time.time() + + for _ in range(iterations): + await self._task(client) + + sequential_time = time.time() - start + start = time.time() + + for i in range(iterations): + tasks.append(self._task(client)) + + await asyncio.gather(*tasks) + concurrent_time = time.time() - start + + percent_faster = (sequential_time - concurrent_time) / concurrent_time * 100 + # We expect the concurrent tasks to be at least 75% faster on all platforms as a conservative benchmark + self.assertGreaterEqual(percent_faster, 75) diff --git a/tools/synchro.py b/tools/synchro.py index 585fc5fefd..5ce83cfbeb 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -152,7 +152,7 @@ def async_only_test(f: str) -> bool: """Return True for async tests that should not be converted to sync.""" - return f in ["test_locks.py"] + return f in ["test_locks.py", "test_concurrency.py"] test_files = [ From ac198af557410bce4809138c0089e1e56ff6db87 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 9 Oct 2024 09:58:20 -0700 Subject: [PATCH 1531/2111] PYTHON-4729 Drop support for MongoDB 3.6 (#1905) --- .evergreen/config.yml | 52 ++++--------------- README.md | 2 +- doc/changelog.rst | 15 ++++++ doc/common-issues.rst | 6 +-- doc/examples/authentication.rst | 5 +- pymongo/asynchronous/collection.py | 5 -- pymongo/asynchronous/mongo_client.py | 5 +- pymongo/common.py | 4 +- pymongo/synchronous/collection.py | 5 -- pymongo/synchronous/mongo_client.py | 5 +- test/asynchronous/test_client.py | 8 +-- .../rs/null_election_id-pre-6.0.json | 2 +- test/mockupdb/test_auth_recovering_member.py | 4 +- test/mockupdb/test_cluster_time.py | 7 ++- test/mockupdb/test_cursor.py | 3 +- test/mockupdb/test_cursor_namespace.py | 5 +- test/mockupdb/test_getmore_sharded.py | 8 ++- test/mockupdb/test_handshake.py | 23 ++++++-- test/mockupdb/test_initial_ismaster.py | 10 ++-- test/mockupdb/test_list_indexes.py | 3 +- test/mockupdb/test_max_staleness.py | 6 ++- test/mockupdb/test_mixed_version_sharded.py | 2 - .../mockupdb/test_mongos_command_read_mode.py | 13 ++++- .../test_network_disconnect_primary.py | 9 +++- test/mockupdb/test_op_msg_read_preference.py | 9 ++-- test/mockupdb/test_query_read_pref_sharded.py | 8 ++- test/mockupdb/test_reset_and_request_check.py | 6 ++- test/mockupdb/test_slave_okay_rs.py | 10 +++- test/mockupdb/test_slave_okay_sharded.py | 7 ++- test/mockupdb/test_slave_okay_single.py | 3 +- test/test_client.py | 8 +-- test/test_discovery_and_monitoring.py | 7 ++- test/test_server_description.py | 4 +- test/test_topology.py | 50 +++++++++++++----- test/utils_selection_tests.py | 4 +- 35 files changed, 191 insertions(+), 132 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 7fb48c8054..a345e4f5b7 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1014,33 +1014,6 @@ tasks: TOPOLOGY: "server" - func: "run doctests" - - name: "test-3.6-standalone" - tags: ["3.6", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-3.6-replica_set" - tags: ["3.6", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-3.6-sharded_cluster" - tags: ["3.6", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - name: "test-4.0-standalone" tags: ["4.0", "standalone"] commands: @@ -2186,10 +2159,6 @@ axes: - id: mongodb-version display_name: "MongoDB" values: - - id: "3.6" - display_name: "MongoDB 3.6" - variables: - VERSION: "3.6" - id: "4.0" display_name: "MongoDB 4.0" variables: @@ -2490,7 +2459,6 @@ buildvariants: - ".4.4" - ".4.2" - ".4.0" - - ".3.6" - matrix_name: "test-macos-arm64" matrix_spec: @@ -2562,7 +2530,6 @@ buildvariants: - ".4.4" - ".4.2" - ".4.0" - - ".3.6" - matrix_name: "tests-pyopenssl" matrix_spec: @@ -2657,19 +2624,22 @@ buildvariants: display_name: "${compression} ${c-extensions} ${python-version} ${platform}" tasks: - "test-latest-standalone" + - "test-8.0-standalone" + - "test-7.0-standalone" + - "test-6.0-standalone" - "test-5.0-standalone" - "test-4.4-standalone" - "test-4.2-standalone" + - "test-4.0-standalone" rules: - # Server versions 3.6 and 4.0 support snappy and zlib. + # Server version 4.0 supports snappy and zlib but not zstd. - if: python-version: "*" c-extensions: "*" - compression: ["snappy", "zlib"] + compression: ["zstd"] then: - add_tasks: + remove_tasks: - "test-4.0-standalone" - - "test-3.6-standalone" - matrix_name: "tests-python-version-green-framework-rhel8" matrix_spec: @@ -2734,7 +2704,7 @@ buildvariants: matrix_spec: platform: rhel8 storage-engine: "*" - python-version: 3.9 + python-version: "3.9" display_name: "Storage ${storage-engine} ${python-version} ${platform}" rules: - if: @@ -2751,7 +2721,6 @@ buildvariants: - "test-4.4-standalone" - "test-4.2-standalone" - "test-4.0-standalone" - - "test-3.6-standalone" - if: # MongoDB 4.2 drops support for MMAPv1 platform: rhel8 @@ -2761,8 +2730,6 @@ buildvariants: add_tasks: - "test-4.0-standalone" - "test-4.0-replica_set" - - "test-3.6-standalone" - - "test-3.6-replica_set" # enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.9. - matrix_name: "test-disableTestCommands" @@ -2881,6 +2848,9 @@ buildvariants: tasks: # Versioned API was introduced in MongoDB 4.7 - "test-latest-standalone" + - "test-8.0-standalone" + - "test-7.0-standalone" + - "test-6.0-standalone" - "test-5.0-standalone" - matrix_name: "ocsp-test" diff --git a/README.md b/README.md index 9b5aa33f78..f5e2cdf46d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ a native Python driver for MongoDB. The `gridfs` package is a [gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst/) implementation on top of `pymongo`. -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. +PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. ## Support / Feedback diff --git a/doc/changelog.rst b/doc/changelog.rst index a73a89a0ef..6a118f56ca 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -5,6 +5,21 @@ Changes in Version 4.11.0 ------------------------- .. warning:: PyMongo 4.11 drops support for Python 3.8: Python 3.9+ or PyPy 3.9+ is now required. +.. warning:: PyMongo 4.11 drops support for MongoDB 3.6. PyMongo now supports MongoDB 4.0+. + Driver support for MongoDB 3.6 reached end of life in April 2024. + +PyMongo 4.11 brings a number of changes including: + +- Dropped support for Python 3.8. +- Dropped support for MongoDB 3.6. + +Issues Resolved +............... + +See the `PyMongo 4.11 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.11 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40784 Changes in Version 4.10.1 ------------------------- diff --git a/doc/common-issues.rst b/doc/common-issues.rst index 3d2d06a5a7..b300bac784 100644 --- a/doc/common-issues.rst +++ b/doc/common-issues.rst @@ -6,14 +6,14 @@ Also see the :ref:`TLSErrors` section. Server reports wire version X, PyMongo requires Y ------------------------------------------------- -When one attempts to connect to a <=3.4 version server, PyMongo will throw the following error:: +When one attempts to connect to a <=3.6 version server, PyMongo will throw the following error:: >>> client.admin.command('ping') ... - pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 5, but this version of PyMongo requires at least 6 (MongoDB 3.6). + pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 6, but this version of PyMongo requires at least 7 (MongoDB 4.0). This is caused by the driver being too new for the server it is being run against. -To resolve this issue either upgrade your database to version >= 3.6 or downgrade to PyMongo 3.x which supports MongoDB >= 2.6. +To resolve this issue either upgrade your database to version >= 4.0 or downgrade to an early version of PyMongo which supports MongoDB < 4.0. 'Cursor' object has no attribute '_Cursor__killed' diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 24b3cff8df..6c89910f3c 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -97,9 +97,8 @@ the "MongoDB Challenge-Response" protocol:: Default Authentication Mechanism -------------------------------- -If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 when connected -to MongoDB 3.6 and negotiates the mechanism to use (SCRAM-SHA-1 -or SCRAM-SHA-256) when connected to MongoDB 4.0+. +If no mechanism is specified, PyMongo automatically negotiates the mechanism to use (SCRAM-SHA-1 +or SCRAM-SHA-256) with the MongoDB server. Default Database and "authSource" --------------------------------- diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 5abc41a7e0..4ddcbab4d2 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -1960,20 +1960,15 @@ async def _count_cmd( collation: Optional[Collation], ) -> int: """Internal count command helper.""" - # XXX: "ns missing" checks can be removed when we drop support for - # MongoDB 3.0, see SERVER-17051. res = await self._command( conn, cmd, read_preference=read_preference, - allowable_errors=["ns missing"], codec_options=self._write_response_codec_options, read_concern=self.read_concern, collation=collation, session=session, ) - if res.get("errmsg", "") == "ns missing": - return 0 return int(res["n"]) async def _aggregate_one_result( diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 814c604562..bfae302dac 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -498,9 +498,8 @@ def __init__( - `authSource`: The database to authenticate on. Defaults to the database specified in the URI, if provided, or to "admin". - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 - when connected to MongoDB 3.6 and negotiates the mechanism to use - (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. + If no mechanism is specified, PyMongo automatically negotiates the + mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) with the MongoDB server. - `authMechanismProperties`: Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass authMechanismProperties='SERVICE_NAME: int: """Internal count command helper.""" - # XXX: "ns missing" checks can be removed when we drop support for - # MongoDB 3.0, see SERVER-17051. res = self._command( conn, cmd, read_preference=read_preference, - allowable_errors=["ns missing"], codec_options=self._write_response_codec_options, read_concern=self.read_concern, collation=collation, session=session, ) - if res.get("errmsg", "") == "ns missing": - return 0 return int(res["n"]) def _aggregate_one_result( diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index b2dff5b4ab..1351cb200f 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -496,9 +496,8 @@ def __init__( - `authSource`: The database to authenticate on. Defaults to the database specified in the URI, if provided, or to "admin". - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 - when connected to MongoDB 3.6 and negotiates the mechanism to use - (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. + If no mechanism is specified, PyMongo automatically negotiates the + mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) with the MongoDB server. - `authMechanismProperties`: Used to specify authentication mechanism specific options. To specify the service name for GSSAPI authentication pass authMechanismProperties='SERVICE_NAME: Date: Wed, 9 Oct 2024 14:07:44 -0500 Subject: [PATCH 1532/2111] PYTHON-4818 Use OCSP Scripts from Drivers-Tools (#1895) --- .evergreen/config.yml | 121 ++++++++++++++++-------------------------- 1 file changed, 47 insertions(+), 74 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a345e4f5b7..1ef8751501 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -651,63 +651,16 @@ functions: CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg + bash ${DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh - run-valid-ocsp-server: - - command: shell.exec - params: - background: true - script: | - . src/.evergreen/scripts/env.sh - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - . ./activate-ocspvenv.sh - python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ - -p 8100 -v - run-revoked-ocsp-server: - - command: shell.exec - params: - background: true - script: | - . src/.evergreen/scripts/env.sh - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - . ./activate-ocspvenv.sh - python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ - -p 8100 \ - -v \ - --fault revoked - run-valid-delegate-ocsp-server: - - command: shell.exec - params: - background: true - script: | - . src/.evergreen/scripts/env.sh - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - . ./activate-ocspvenv.sh - python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ - -p 8100 -v - run-revoked-delegate-ocsp-server: - - command: shell.exec + "run-ocsp-server": + - command: subprocess.exec params: background: true - script: | - . src/.evergreen/scripts/env.sh - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - . ./activate-ocspvenv.sh - python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ - -p 8100 \ - -v \ - --fault revoked + binary: bash + include_expansions_in_env: [SERVER_TYPE, OCSP_ALGORITHM] + args: + - ${DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh "run load-balancer": - command: shell.exec @@ -1360,9 +1313,10 @@ tasks: - name: test-ocsp-rsa-valid-cert-server-staples tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: - - func: run-valid-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: "valid" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" @@ -1374,9 +1328,10 @@ tasks: - name: test-ocsp-rsa-invalid-cert-server-staples tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: - - func: run-revoked-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: "revoked" - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" @@ -1388,9 +1343,10 @@ tasks: - name: test-ocsp-rsa-valid-cert-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: run-valid-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: valid - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" @@ -1402,9 +1358,10 @@ tasks: - name: test-ocsp-rsa-invalid-cert-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: run-revoked-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: revoked - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" @@ -1427,9 +1384,10 @@ tasks: - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: run-revoked-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: revoked - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" @@ -1452,9 +1410,10 @@ tasks: - name: test-ocsp-rsa-delegate-valid-cert-server-staples tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: - - func: run-valid-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: valid-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" @@ -1466,9 +1425,10 @@ tasks: - name: test-ocsp-rsa-delegate-invalid-cert-server-staples tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] commands: - - func: run-revoked-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: revoked-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" @@ -1480,9 +1440,10 @@ tasks: - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: run-valid-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: valid-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" @@ -1494,9 +1455,10 @@ tasks: - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: run-revoked-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: revoked-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" @@ -1508,9 +1470,10 @@ tasks: - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: run-revoked-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "rsa" + SERVER_TYPE: revoked-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" @@ -1522,9 +1485,10 @@ tasks: - name: test-ocsp-ecdsa-valid-cert-server-staples tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] commands: - - func: run-valid-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: valid - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" @@ -1536,9 +1500,10 @@ tasks: - name: test-ocsp-ecdsa-invalid-cert-server-staples tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] commands: - - func: run-revoked-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: revoked - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" @@ -1550,9 +1515,10 @@ tasks: - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: run-valid-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: valid - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" @@ -1564,9 +1530,10 @@ tasks: - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: run-revoked-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: revoked - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" @@ -1589,9 +1556,10 @@ tasks: - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: run-revoked-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: revoked - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" @@ -1614,9 +1582,10 @@ tasks: - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] commands: - - func: run-valid-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: valid-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" @@ -1628,9 +1597,10 @@ tasks: - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] commands: - - func: run-revoked-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: revoked-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" @@ -1642,9 +1612,10 @@ tasks: - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: run-valid-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: valid-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" @@ -1656,9 +1627,10 @@ tasks: - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: run-revoked-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: revoked-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" @@ -1670,9 +1642,10 @@ tasks: - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: run-revoked-delegate-ocsp-server + - func: run-ocsp-server vars: OCSP_ALGORITHM: "ecdsa" + SERVER_TYPE: valid-delegate - func: "bootstrap mongo-orchestration" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" From 8f26f43911ecb7cf9973040807d004c69ed88eb2 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 10 Oct 2024 09:01:27 -0700 Subject: [PATCH 1533/2111] PYTHON-4450 Support free-threaded Python 3.13t with no-GIL (#1906) --- .github/workflows/dist.yml | 5 ++++- .github/workflows/test-python.yml | 23 +++++++++++++++++++---- bson/_cbsonmodule.c | 3 +++ doc/changelog.rst | 2 ++ pymongo/_cmessagemodule.c | 3 +++ pyproject.toml | 2 ++ test/asynchronous/test_client_context.py | 7 +++++++ test/test_client_context.py | 7 +++++++ 8 files changed, 47 insertions(+), 5 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index fbc7ff7390..858d269e08 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -67,7 +67,7 @@ jobs: # Note: the default manylinux is manylinux2014 run: | python -m pip install -U pip - python -m pip install "cibuildwheel>=2.17,<3" + python -m pip install "cibuildwheel>=2.20,<3" - name: Build wheels env: @@ -89,6 +89,9 @@ jobs: ls wheelhouse/*cp310*.whl ls wheelhouse/*cp311*.whl ls wheelhouse/*cp312*.whl + ls wheelhouse/*cp313*.whl + # Free-threading builds: + ls wheelhouse/*cp313t*.whl - uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index e55444ceca..40991440d3 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -51,11 +51,18 @@ jobs: strategy: matrix: os: [ubuntu-20.04] - python-version: ["3.9", "pypy-3.9", "3.13"] + python-version: ["3.9", "pypy-3.9", "3.13", "3.13t"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v4 - - name: Setup Python + - if: ${{ matrix.python-version == '3.13t' }} + name: Setup free-threaded Python + uses: deadsnakes/action@v3.2.0 + with: + python-version: 3.13 + nogil: true + - if: ${{ matrix.python-version != '3.13t' }} + name: Setup Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -65,9 +72,13 @@ jobs: - name: Install dependencies run: | pip install -U pip - if [ "${{ matrix.python-version }}" == "3.13" ]; then + if [[ "${{ matrix.python-version }}" == "3.13" ]]; then pip install --pre cffi setuptools pip install --no-build-isolation hatch + elif [[ "${{ matrix.python-version }}" == "3.13t" ]]; then + # Hatch can't be installed on 3.13t, use pytest directly. + pip install . + pip install -r requirements/test.txt else pip install hatch fi @@ -77,7 +88,11 @@ jobs: mongodb-version: 6.0 - name: Run tests run: | - hatch run test:test + if [[ "${{ matrix.python-version }}" == "3.13t" ]]; then + pytest -v --durations=5 --maxfail=10 + else + hatch run test:test + fi doctest: runs-on: ubuntu-latest diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 223c392280..a66071c285 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -3184,6 +3184,9 @@ static PyModuleDef_Slot _cbson_slots[] = { {Py_mod_exec, _cbson_exec}, #if defined(Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED) {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030D0000 + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif {0, NULL}, }; diff --git a/doc/changelog.rst b/doc/changelog.rst index 6a118f56ca..e7b160b176 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -12,6 +12,8 @@ PyMongo 4.11 brings a number of changes including: - Dropped support for Python 3.8. - Dropped support for MongoDB 3.6. +- Added support for free-threaded Python with the GIL disabled. For more information see: + `Free-threaded CPython `_. Issues Resolved ............... diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index b5adbeec32..eb457b341c 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -1022,6 +1022,9 @@ static PyModuleDef_Slot _cmessage_slots[] = { {Py_mod_exec, _cmessage_exec}, #ifdef Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030D0000 + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, #endif {0, NULL}, }; diff --git a/pyproject.toml b/pyproject.toml index 2688aab27e..b4f59f67d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -236,6 +236,8 @@ partial_branches = ["if (.*and +)*not _use_c( and.*)*:"] directory = "htmlcov" [tool.cibuildwheel] +# Enable free-threaded support +free-threaded-support = true skip = "pp* *-musllinux*" build-frontend = "build" test-command = "python {project}/tools/fail_if_no_c.py" diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py index a0cb53a14f..6d77818436 100644 --- a/test/asynchronous/test_client_context.py +++ b/test/asynchronous/test_client_context.py @@ -61,6 +61,13 @@ def test_setdefaultencoding_worked(self): self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) + def test_free_threading_is_enabled(self): + if "free-threading build" not in sys.version: + raise SkipTest("this test requires the Python free-threading build") + + # If the GIL is enabled then pymongo or one of our deps does not support free-threading. + self.assertFalse(sys._is_gil_enabled()) # type: ignore[attr-defined] + if __name__ == "__main__": unittest.main() diff --git a/test/test_client_context.py b/test/test_client_context.py index be8a562142..5996f9243b 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -61,6 +61,13 @@ def test_setdefaultencoding_worked(self): self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) + def test_free_threading_is_enabled(self): + if "free-threading build" not in sys.version: + raise SkipTest("this test requires the Python free-threading build") + + # If the GIL is enabled then pymongo or one of our deps does not support free-threading. + self.assertFalse(sys._is_gil_enabled()) # type: ignore[attr-defined] + if __name__ == "__main__": unittest.main() From d1e4167dc96ee71bd3b0e0e93239b416d35795f9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 10 Oct 2024 13:47:14 -0400 Subject: [PATCH 1534/2111] PYTHON-4841 - Convert test.test_common to async (#1912) --- test/asynchronous/test_common.py | 185 +++++++++++++++++++++++++++++++ test/test_common.py | 52 +++++---- tools/synchro.py | 1 + 3 files changed, 214 insertions(+), 24 deletions(-) create mode 100644 test/asynchronous/test_common.py diff --git a/test/asynchronous/test_common.py b/test/asynchronous/test_common.py new file mode 100644 index 0000000000..00495e7c30 --- /dev/null +++ b/test/asynchronous/test_common.py @@ -0,0 +1,185 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the pymongo common module.""" +from __future__ import annotations + +import sys +import uuid + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest + +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.errors import OperationFailure +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCommon(AsyncIntegrationTest): + async def test_uuid_representation(self): + coll = self.db.uuid + await coll.drop() + + # Test property + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) + + # Test basic query + uu = uuid.uuid4() + # Insert as binary subtype 3 + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + legacy_opts = coll.codec_options + await coll.insert_one({"uu": uu}) + self.assertEqual(uu, (await coll.find_one({"uu": uu}))["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual(STANDARD, coll.codec_options.uuid_representation) + self.assertEqual(None, await coll.find_one({"uu": uu})) + uul = Binary.from_uuid(uu, PYTHON_LEGACY) + self.assertEqual(uul, (await coll.find_one({"uu": uul}))["uu"]) # type: ignore + + # Test count_documents + self.assertEqual(0, await coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, await coll.count_documents({"uu": uu})) + + # Test delete + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + await coll.delete_one({"uu": uu}) + self.assertEqual(1, await coll.count_documents({})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + await coll.delete_one({"uu": uu}) + self.assertEqual(0, await coll.count_documents({})) + + # Test update_one + await coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + await coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + await coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + + # Test Cursor.distinct + self.assertEqual([2], await coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], await coll.find({"_id": uu}).distinct("i")) + + # Test findAndModify + self.assertEqual(None, await coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, (await coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}}))["i"]) + self.assertEqual(5, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + + # Test command + self.assertEqual( + 5, + ( + await self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + ) + )["value"]["i"], + ) + self.assertEqual( + 6, + ( + await self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ) + )["value"]["i"], + ) + + async def test_write_concern(self): + c = await self.async_rs_or_single_client(connect=False) + self.assertEqual(WriteConcern(), c.write_concern) + + c = await self.async_rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) + wc = WriteConcern(w=2, wtimeout=1000) + self.assertEqual(wc, c.write_concern) + + # Can we override back to the server default? + db = c.get_database("pymongo_test", write_concern=WriteConcern()) + self.assertEqual(db.write_concern, WriteConcern()) + + db = c.pymongo_test + self.assertEqual(wc, db.write_concern) + coll = db.test + self.assertEqual(wc, coll.write_concern) + + cwc = WriteConcern(j=True) + coll = db.get_collection("test", write_concern=cwc) + self.assertEqual(cwc, coll.write_concern) + self.assertEqual(wc, db.write_concern) + + async def test_mongo_client(self): + pair = await async_client_context.pair + m = await self.async_rs_or_single_client(w=0) + coll = m.pymongo_test.write_concern_test + await coll.drop() + doc = {"_id": ObjectId()} + await coll.insert_one(doc) + self.assertTrue(await coll.insert_one(doc)) + coll = coll.with_options(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + + m = await self.async_rs_or_single_client() + coll = m.pymongo_test.write_concern_test + new_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertTrue(await new_coll.insert_one(doc)) + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + + m = await self.async_rs_or_single_client( + f"mongodb://{pair}/", replicaSet=async_client_context.replica_set_name + ) + + coll = m.pymongo_test.write_concern_test + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + m = await self.async_rs_or_single_client( + f"mongodb://{pair}/?w=0", replicaSet=async_client_context.replica_set_name + ) + + coll = m.pymongo_test.write_concern_test + await coll.insert_one(doc) + + # Equality tests + direct = await connected(await self.async_single_client(w=0)) + direct2 = await connected( + await self.async_single_client(f"mongodb://{pair}/?w=0", **self.credentials) + ) + self.assertEqual(direct, direct2) + self.assertFalse(direct != direct2) + + async def test_validate_boolean(self): + await self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + await self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_common.py b/test/test_common.py index 3228dc97fb..e69b421c9f 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -28,10 +28,7 @@ from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern - -@client_context.require_connection -def setUpModule(): - pass +_IS_SYNC = True class TestCommon(IntegrationTest): @@ -48,12 +45,12 @@ def test_uuid_representation(self): coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) legacy_opts = coll.codec_options coll.insert_one({"uu": uu}) - self.assertEqual(uu, coll.find_one({"uu": uu})["uu"]) # type: ignore + self.assertEqual(uu, (coll.find_one({"uu": uu}))["uu"]) # type: ignore coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) self.assertEqual(None, coll.find_one({"uu": uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uul, coll.find_one({"uu": uul})["uu"]) # type: ignore + self.assertEqual(uul, (coll.find_one({"uu": uul}))["uu"]) # type: ignore # Test count_documents self.assertEqual(0, coll.count_documents({"uu": uu})) @@ -73,9 +70,9 @@ def test_uuid_representation(self): coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) coll.update_one({"_id": uu}, {"$set": {"i": 2}}) coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({"_id": uu})["i"]) # type: ignore + self.assertEqual(1, (coll.find_one({"_id": uu}))["i"]) # type: ignore coll.update_one({"_id": uu}, {"$set": {"i": 2}}) - self.assertEqual(2, coll.find_one({"_id": uu})["i"]) # type: ignore + self.assertEqual(2, (coll.find_one({"_id": uu}))["i"]) # type: ignore # Test Cursor.distinct self.assertEqual([2], coll.find({"_id": uu}).distinct("i")) @@ -85,27 +82,31 @@ def test_uuid_representation(self): # Test findAndModify self.assertEqual(None, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(2, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})["i"]) - self.assertEqual(5, coll.find_one({"_id": uu})["i"]) # type: ignore + self.assertEqual(2, (coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}}))["i"]) + self.assertEqual(5, (coll.find_one({"_id": uu}))["i"]) # type: ignore # Test command self.assertEqual( 5, - self.db.command( - "findAndModify", - "uuid", - update={"$set": {"i": 6}}, - query={"_id": uu}, - codec_options=legacy_opts, + ( + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + ) )["value"]["i"], ) self.assertEqual( 6, - self.db.command( - "findAndModify", - "uuid", - update={"$set": {"i": 7}}, - query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ( + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ) )["value"]["i"], ) @@ -140,20 +141,23 @@ def test_mongo_client(self): coll.insert_one(doc) self.assertTrue(coll.insert_one(doc)) coll = coll.with_options(write_concern=WriteConcern(w=1)) - self.assertRaises(OperationFailure, coll.insert_one, doc) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) m = self.rs_or_single_client() coll = m.pymongo_test.write_concern_test new_coll = coll.with_options(write_concern=WriteConcern(w=0)) self.assertTrue(new_coll.insert_one(doc)) - self.assertRaises(OperationFailure, coll.insert_one, doc) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) m = self.rs_or_single_client( f"mongodb://{pair}/", replicaSet=client_context.replica_set_name ) coll = m.pymongo_test.write_concern_test - self.assertRaises(OperationFailure, coll.insert_one, doc) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) m = self.rs_or_single_client( f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name ) diff --git a/tools/synchro.py b/tools/synchro.py index 5ce83cfbeb..48c7fc59fd 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -189,6 +189,7 @@ def async_only_test(f: str) -> bool: "test_client_bulk_write.py", "test_client_context.py", "test_collection.py", + "test_common.py", "test_connections_survive_primary_stepdown_spec.py", "test_cursor.py", "test_database.py", From c2338d879b1ccd37eb5d970c8f8be97674a9a252 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 10 Oct 2024 16:38:25 -0400 Subject: [PATCH 1535/2111] PYTHON-4839 - Convert test.test_collation to async (#1911) --- test/asynchronous/test_collation.py | 290 ++++++++++++++++++++++++++++ test/test_collation.py | 11 +- tools/synchro.py | 1 + 3 files changed, 298 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_collation.py diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py new file mode 100644 index 0000000000..be3ea22e42 --- /dev/null +++ b/test/asynchronous/test_collation.py @@ -0,0 +1,290 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collation module.""" +from __future__ import annotations + +import functools +import warnings +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import EventListener +from typing import Any + +from pymongo.asynchronous.helpers import anext +from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) +from pymongo.errors import ConfigurationError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCollationObject(unittest.TestCase): + def test_constructor(self): + self.assertRaises(TypeError, Collation, locale=42) + # Fill in a locale to test the other options. + _Collation = functools.partial(Collation, "en_US") + # No error. + _Collation(caseFirst=CollationCaseFirst.UPPER) + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") + self.assertRaises(TypeError, _Collation, alternate=5) + self.assertRaises(TypeError, _Collation, maxVariable=2) + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") + + # No errors. + Collation("en_US", future_option="bar", another_option=42) + collation = Collation( + "en_US", + caseLevel=True, + caseFirst=CollationCaseFirst.UPPER, + strength=CollationStrength.QUATERNARY, + numericOrdering=True, + alternate=CollationAlternate.SHIFTED, + maxVariable=CollationMaxVariable.SPACE, + normalization=True, + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) + + +class TestCollation(AsyncIntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation + + @classmethod + @async_client_context.require_connection + async def _setup_class(cls): + await super()._setup_class() + cls.listener = EventListener() + cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) + cls.db = cls.client.pymongo_test + cls.collation = Collation("en_US") + cls.warn_context = warnings.catch_warnings() + cls.warn_context.__enter__() + warnings.simplefilter("ignore", DeprecationWarning) + + @classmethod + async def _tearDown_class(cls): + cls.warn_context.__exit__() + cls.warn_context = None + await cls.client.close() + await super()._tearDown_class() + + def tearDown(self): + self.listener.reset() + super().tearDown() + + def last_command_started(self): + return self.listener.started_events[-1].command + + def assertCollationInLastCommand(self): + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) + + async def test_create_collection(self): + await self.db.test.drop() + await self.db.create_collection("test", collation=self.collation) + self.assertCollationInLastCommand() + + # Test passing collation as a dict as well. + await self.db.test.drop() + self.listener.reset() + await self.db.create_collection("test", collation=self.collation.document) + self.assertCollationInLastCommand() + + def test_index_model(self): + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) + + async def test_create_index(self): + await self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.started_events[0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) + + async def test_aggregate(self): + await self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) + self.assertCollationInLastCommand() + + async def test_count_documents(self): + await self.db.test.count_documents({}, collation=self.collation) + self.assertCollationInLastCommand() + + async def test_distinct(self): + await self.db.test.distinct("foo", collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find(collation=self.collation).distinct("foo") + self.assertCollationInLastCommand() + + async def test_find_command(self): + await self.db.test.insert_one({"is this thing on?": True}) + self.listener.reset() + await anext(self.db.test.find(collation=self.collation)) + self.assertCollationInLastCommand() + + async def test_explain_command(self): + self.listener.reset() + await self.db.test.find(collation=self.collation).explain() + # The collation should be part of the explained command. + self.assertEqual( + self.collation.document, self.last_command_started()["explain"]["collation"] + ) + + async def test_delete(self): + await self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + self.listener.reset() + await self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + async def test_update(self): + await self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + await self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + await self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + async def test_find_and(self): + await self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) + self.assertCollationInLastCommand() + + async def test_bulk_write(self): + await self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command + + def check_ops(ops): + for op in ops: + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) + else: + self.assertEqual(self.collation.document, op["collation"]) + + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) + + async def test_indexes_same_keys_different_collations(self): + await self.db.test.drop() + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + await self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) + indexes = await self.db.test.index_information() + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + await self.db.test.drop_index("fieldname_1") + indexes = await self.db.test.index_information() + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) + + async def test_unacknowledged_write(self): + unacknowledged = WriteConcern(w=0) + collection = self.db.get_collection("test", write_concern=unacknowledged) + with self.assertRaises(ConfigurationError): + await collection.update_one( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + with self.assertRaises(ConfigurationError): + await collection.bulk_write([update_one]) + + async def test_cursor_collation(self): + await self.db.test.insert_one({"hello": "world"}) + await anext(self.db.test.find().collation(self.collation)) + self.assertCollationInLastCommand() diff --git a/test/test_collation.py b/test/test_collation.py index 19df25c1c0..e5c1c7eb11 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -37,8 +37,11 @@ UpdateMany, UpdateOne, ) +from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestCollationObject(unittest.TestCase): def test_constructor(self): @@ -96,8 +99,8 @@ class TestCollation(IntegrationTest): @classmethod @client_context.require_connection - def setUpClass(cls): - super().setUpClass() + def _setup_class(cls): + super()._setup_class() cls.listener = EventListener() cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test @@ -107,11 +110,11 @@ def setUpClass(cls): warnings.simplefilter("ignore", DeprecationWarning) @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.warn_context.__exit__() cls.warn_context = None cls.client.close() - super().tearDownClass() + super()._tearDown_class() def tearDown(self): self.listener.reset() diff --git a/tools/synchro.py b/tools/synchro.py index 48c7fc59fd..0ec8985a05 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -188,6 +188,7 @@ def async_only_test(f: str) -> bool: "test_client.py", "test_client_bulk_write.py", "test_client_context.py", + "test_collation.py", "test_collection.py", "test_common.py", "test_connections_survive_primary_stepdown_spec.py", From 8118aea985f017457259bff78e64656232f08eb5 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 08:29:12 -0400 Subject: [PATCH 1536/2111] =?UTF-8?q?PYTHON-4844=20-=20Skip=20async=20test?= =?UTF-8?q?=5Fencryption.AsyncTestSpec.test=5Flegacy=5Fti=E2=80=A6=20(#191?= =?UTF-8?q?4)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/asynchronous/test_encryption.py | 5 +++++ test/test_encryption.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index c3f6223384..3e52fb9e1b 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -693,6 +693,11 @@ def maybe_skip_scenario(self, test): self.skipTest("PYTHON-3706 flaky test on Windows/macOS") if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and not _IS_SYNC + ): + self.skipTest("PYTHON-4844 flaky test on async") def setup_scenario(self, scenario_def): """Override a test's setup.""" diff --git a/test/test_encryption.py b/test/test_encryption.py index 43c85e2c5b..64aa7ebf50 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -691,6 +691,11 @@ def maybe_skip_scenario(self, test): self.skipTest("PYTHON-3706 flaky test on Windows/macOS") if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and not _IS_SYNC + ): + self.skipTest("PYTHON-4844 flaky test on async") def setup_scenario(self, scenario_def): """Override a test's setup.""" From 3a662291e010cbed832c00aff8ffe7b43d470489 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 10:48:24 -0400 Subject: [PATCH 1537/2111] PYTHON-4700 - Convert CSFLE tests to async (#1907) --- .evergreen/run-tests.sh | 4 +- pymongo/asynchronous/encryption.py | 12 +- pymongo/network_layer.py | 27 +- pymongo/synchronous/encryption.py | 12 +- test/__init__.py | 11 +- test/asynchronous/__init__.py | 11 +- test/asynchronous/test_encryption.py | 257 +++++++++--------- test/asynchronous/utils_spec_runner.py | 172 +++++++++++- .../spec/legacy/timeoutMS.json | 4 +- test/test_connection_monitoring.py | 3 +- test/test_encryption.py | 255 +++++++++-------- test/test_server_selection_in_window.py | 2 +- test/utils.py | 147 ---------- test/utils_spec_runner.py | 170 +++++++++++- tools/synchro.py | 2 + 15 files changed, 655 insertions(+), 434 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 8d7a9f082a..5e8429dd28 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -257,9 +257,9 @@ if [ -z "$GREEN_FRAMEWORK" ]; then # Use --capture=tee-sys so pytest prints test output inline: # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html if [ -z "$TEST_SUITES" ]; then - python -m pytest -v --capture=tee-sys --durations=5 --maxfail=10 $TEST_ARGS + python -m pytest -v --capture=tee-sys --durations=5 $TEST_ARGS else - python -m pytest -v --capture=tee-sys --durations=5 --maxfail=10 -m $TEST_SUITES $TEST_ARGS + python -m pytest -v --capture=tee-sys --durations=5 -m $TEST_SUITES $TEST_ARGS fi else python green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 9b00c13e10..735e543047 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -180,10 +180,20 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: while kms_context.bytes_needed > 0: # CSOT: update timeout. conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = conn.recv(kms_context.bytes_needed) + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + async_receive_data_socket, + ) + + data = await async_receive_data_socket(conn, kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) + # Async raises an OSError instead of returning empty bytes + except OSError as err: + raise OSError("KMS connection closed") from err except BLOCKING_IO_ERRORS: raise socket.timeout("timed out") from None finally: diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 4b57620d83..d14a21f41d 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -130,7 +130,7 @@ def _is_ready(fut: Future) -> None: loop.remove_writer(fd) async def _async_receive_ssl( - conn: _sslConn, length: int, loop: AbstractEventLoop + conn: _sslConn, length: int, loop: AbstractEventLoop, once: Optional[bool] = False ) -> memoryview: mv = memoryview(bytearray(length)) total_read = 0 @@ -145,6 +145,9 @@ def _is_ready(fut: Future) -> None: read = conn.recv_into(mv[total_read:]) if read == 0: raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] total_read += read except BLOCKING_IO_ERRORS as exc: fd = conn.fileno() @@ -275,6 +278,28 @@ async def async_receive_data( sock.settimeout(sock_timeout) +async def async_receive_data_socket( + sock: Union[socket.socket, _sslConn], length: int +) -> memoryview: + sock_timeout = sock.gettimeout() + timeout = sock_timeout + + sock.settimeout(0.0) + loop = asyncio.get_event_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + return await asyncio.wait_for( + _async_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] + timeout=timeout, + ) + else: + return await asyncio.wait_for(_async_receive(sock, length, loop), timeout=timeout) # type: ignore[arg-type] + except asyncio.TimeoutError as err: + raise socket.timeout("timed out") from err + finally: + sock.settimeout(sock_timeout) + + async def _async_receive(conn: socket.socket, length: int, loop: AbstractEventLoop) -> memoryview: mv = memoryview(bytearray(length)) bytes_read = 0 diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index efef6df9e8..506ff8bcba 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -180,10 +180,20 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: while kms_context.bytes_needed > 0: # CSOT: update timeout. conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = conn.recv(kms_context.bytes_needed) + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + receive_data_socket, + ) + + data = receive_data_socket(conn, kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) + # Async raises an OSError instead of returning empty bytes + except OSError as err: + raise OSError("KMS connection closed") from err except BLOCKING_IO_ERRORS: raise socket.timeout("timed out") from None finally: diff --git a/test/__init__.py b/test/__init__.py index af12bc032a..fd33fde293 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -464,11 +464,12 @@ def wrap(*args, **kwargs): if not self.connected: pair = self.pair raise SkipTest(f"Cannot connect to MongoDB on {pair}") - if iscoroutinefunction(condition) and condition(): - if wraps_async: - return f(*args, **kwargs) - else: - return f(*args, **kwargs) + if iscoroutinefunction(condition): + if condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) elif condition(): if wraps_async: return f(*args, **kwargs) diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 2a44785b2f..0579828c49 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -466,11 +466,12 @@ async def wrap(*args, **kwargs): if not self.connected: pair = await self.pair raise SkipTest(f"Cannot connect to MongoDB on {pair}") - if iscoroutinefunction(condition) and await condition(): - if wraps_async: - return await f(*args, **kwargs) - else: - return f(*args, **kwargs) + if iscoroutinefunction(condition): + if await condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) elif condition(): if wraps_async: return await f(*args, **kwargs) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 3e52fb9e1b..88b005c4b3 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -30,6 +30,7 @@ import warnings from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.utils_spec_runner import AsyncSpecRunner, AsyncSpecTestCreator from threading import Thread from typing import Any, Dict, Mapping, Optional @@ -59,7 +60,6 @@ from test.utils import ( AllowListEventListener, OvertCommandListener, - SpecTestCreator, TopologyEventListener, async_wait_until, camel_to_snake_args, @@ -626,137 +626,132 @@ async def test_with_statement(self): KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} -if _IS_SYNC: - # TODO: Add asynchronous SpecRunner (https://jira.mongodb.org/browse/PYTHON-4700) - class TestSpec(AsyncSpecRunner): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def setUpClass(cls): - super().setUpClass() - - def parse_auto_encrypt_opts(self, opts): - """Parse clientOptions.autoEncryptOpts.""" - opts = camel_to_snake_args(opts) - kms_providers = opts["kms_providers"] - if "aws" in kms_providers: - kms_providers["aws"] = AWS_CREDS - if not any(AWS_CREDS.values()): - self.skipTest("AWS environment credentials are not set") - if "awsTemporary" in kms_providers: - kms_providers["aws"] = AWS_TEMP_CREDS - del kms_providers["awsTemporary"] - if not any(AWS_TEMP_CREDS.values()): - self.skipTest("AWS Temp environment credentials are not set") - if "awsTemporaryNoSessionToken" in kms_providers: - kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS - del kms_providers["awsTemporaryNoSessionToken"] - if not any(AWS_TEMP_NO_SESSION_CREDS.values()): - self.skipTest("AWS Temp environment credentials are not set") - if "azure" in kms_providers: - kms_providers["azure"] = AZURE_CREDS - if not any(AZURE_CREDS.values()): - self.skipTest("Azure environment credentials are not set") - if "gcp" in kms_providers: - kms_providers["gcp"] = GCP_CREDS - if not any(AZURE_CREDS.values()): - self.skipTest("GCP environment credentials are not set") - if "kmip" in kms_providers: - kms_providers["kmip"] = KMIP_CREDS - opts["kms_tls_options"] = KMS_TLS_OPTS - if "key_vault_namespace" not in opts: - opts["key_vault_namespace"] = "keyvault.datakeys" - if "extra_options" in opts: - opts.update(camel_to_snake_args(opts.pop("extra_options"))) - - opts = dict(opts) - return AutoEncryptionOpts(**opts) - - def parse_client_options(self, opts): - """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop("autoEncryptOpts", None) - if encrypt_opts: - opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - - return super().parse_client_options(opts) - - def get_object_name(self, op): - """Default object is collection.""" - return op.get("object", "collection") - - def maybe_skip_scenario(self, test): - super().maybe_skip_scenario(test) - desc = test["description"].lower() - if ( - "timeoutms applied to listcollections to get collection schema" in desc - and sys.platform in ("win32", "darwin") - ): - self.skipTest("PYTHON-3706 flaky test on Windows/macOS") - if "type=symbol" in desc: - self.skipTest("PyMongo does not support the symbol type") - if ( - "timeoutms applied to listcollections to get collection schema" in desc - and not _IS_SYNC - ): - self.skipTest("PYTHON-4844 flaky test on async") - - def setup_scenario(self, scenario_def): - """Override a test's setup.""" - key_vault_data = scenario_def["key_vault_data"] - encrypted_fields = scenario_def["encrypted_fields"] - json_schema = scenario_def["json_schema"] - data = scenario_def["data"] - coll = async_client_context.client.get_database("keyvault", codec_options=OPTS)[ - "datakeys" - ] - coll.delete_many({}) - if key_vault_data: - coll.insert_many(key_vault_data) - - db_name = self.get_scenario_db_name(scenario_def) - coll_name = self.get_scenario_coll_name(scenario_def) - db = async_client_context.client.get_database(db_name, codec_options=OPTS) - coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) - wc = WriteConcern(w="majority") - kwargs: Dict[str, Any] = {} - if json_schema: - kwargs["validator"] = {"$jsonSchema": json_schema} - kwargs["codec_options"] = OPTS - if not data: - kwargs["write_concern"] = wc - if encrypted_fields: - kwargs["encryptedFields"] = encrypted_fields - db.create_collection(coll_name, **kwargs) - coll = db[coll_name] - if data: - # Load data. - coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) - - def allowable_errors(self, op): - """Override expected error classes.""" - errors = super().allowable_errors(op) - # An updateOne test expects encryption to error when no $ operator - # appears but pymongo raises a client side ValueError in this case. - if op["name"] == "updateOne": - errors += (ValueError,) - return errors - - def create_test(scenario_def, test, name): - @async_client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) - test_creator.create_tests() - - if _HAVE_PYMONGOCRYPT: - globals().update( - generate_test_classes( - os.path.join(SPEC_PATH, "unified"), - module=__name__, - ) +class AsyncTestSpec(AsyncSpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def _setup_class(cls): + await super()._setup_class() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to listcollections to get collection schema" in desc and not _IS_SYNC: + self.skipTest("PYTHON-4844 flaky test on async") + + async def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = async_client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + await coll.delete_many({}) + if key_vault_data: + await coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = async_client_context.client.get_database(db_name, codec_options=OPTS) + await db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + await db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + await coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + +async def create_test(scenario_def, test, name): + @async_client_context.require_test_commands + async def run_scenario(self): + await self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = AsyncSpecTestCreator(create_test, AsyncTestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator.create_tests() + +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, ) + ) # Prose Tests ALL_KMS_PROVIDERS = { diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index 12cb13c2cd..4d9c4c8f20 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -15,8 +15,12 @@ """Utilities for testing driver specs.""" from __future__ import annotations +import asyncio import functools +import os import threading +import unittest +from asyncio import iscoroutinefunction from collections import abc from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs from test.utils import ( @@ -24,6 +28,7 @@ CompareType, EventListener, OvertCommandListener, + ScenarioDict, ServerAndTopologyEventListener, camel_to_snake, camel_to_snake_args, @@ -32,11 +37,12 @@ ) from typing import List -from bson import ObjectId, decode, encode +from bson import ObjectId, decode, encode, json_util from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from gridfs import GridFSBucket +from gridfs.asynchronous.grid_file import AsyncGridFSBucket from pymongo.asynchronous import client_session from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor @@ -83,6 +89,161 @@ def run(self): self.stop() +class AsyncSpecTestCreator: + """Class to create test cases from specifications.""" + + def __init__(self, create_test, test_class, test_path): + """Create a TestCreator object. + + :Parameters: + - `create_test`: callback that returns a test case. The callback + must accept the following arguments - a dictionary containing the + entire test specification (the `scenario_def`), a dictionary + containing the specification for which the test case will be + generated (the `test_def`). + - `test_class`: the unittest.TestCase class in which to create the + test case. + - `test_path`: path to the directory containing the JSON files with + the test specifications. + """ + self._create_test = create_test + self._test_class = test_class + self.test_path = test_path + + def _ensure_min_max_server_version(self, scenario_def, method): + """Test modifier that enforces a version range for the server on a + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) + if min_ver is not None: + method = async_client_context.require_version_min(*min_ver)(method) + + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) + if max_ver is not None: + method = async_client_context.require_version_max(*max_ver)(method) + + if "serverless" in scenario_def: + serverless = scenario_def["serverless"] + if serverless == "require": + serverless_satisfied = async_client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not async_client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + method = unittest.skipUnless( + serverless_satisfied, "Serverless requirement not satisfied" + )(method) + + return method + + @staticmethod + async def valid_topology(run_on_req): + return await async_client_context.is_topology_type( + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) + + @staticmethod + def min_server_version(run_on_req): + version = run_on_req.get("minServerVersion") + if version: + min_ver = tuple(int(elt) for elt in version.split(".")) + return async_client_context.version >= min_ver + return True + + @staticmethod + def max_server_version(run_on_req): + version = run_on_req.get("maxServerVersion") + if version: + max_ver = tuple(int(elt) for elt in version.split(".")) + return async_client_context.version <= max_ver + return True + + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return async_client_context.auth_enabled + return not async_client_context.auth_enabled + return True + + @staticmethod + def serverless_ok(run_on_req): + serverless = run_on_req["serverless"] + if serverless == "require": + return async_client_context.serverless + elif serverless == "forbid": + return not async_client_context.serverless + else: # unset or "allow" + return True + + async def should_run_on(self, scenario_def): + run_on = scenario_def.get("runOn", []) + if not run_on: + # Always run these tests. + return True + + for req in run_on: + if ( + await self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + and self.serverless_ok(req) + ): + return True + return False + + def ensure_run_on(self, scenario_def, method): + """Test modifier that enforces a 'runOn' on a test case.""" + + async def predicate(): + return await self.should_run_on(scenario_def) + + return async_client_context._require(predicate, "runOn not satisfied", method) + + def tests(self, scenario_def): + """Allow CMAP spec test to override the location of test.""" + return scenario_def["tests"] + + async def _create_tests(self): + for dirpath, _, filenames in os.walk(self.test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: # noqa: ASYNC101, RUF100 + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = ScenarioDict( + json_util.loads(scenario_stream.read(), json_options=opts) + ) + + test_type = os.path.splitext(filename)[0] + + # Construct test from scenario. + for test_def in self.tests(scenario_def): + test_name = "test_{}_{}_{}".format( + dirname, + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) + + new_test = await self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) + + new_test.__name__ = test_name + setattr(self._test_class, new_test.__name__, new_test) + + def create_tests(self): + if _IS_SYNC: + self._create_tests() + else: + asyncio.run(self._create_tests()) + + class AsyncSpecRunner(AsyncIntegrationTest): mongos_clients: List knobs: client_knobs @@ -284,7 +445,7 @@ async def run_operation(self, sessions, collection, operation): if object_name == "gridfsbucket": # Only create the GridFSBucket when we need it (for the gridfs # retryable reads tests). - obj = GridFSBucket(database, bucket_name=collection.name) + obj = AsyncGridFSBucket(database, bucket_name=collection.name) else: objects = { "client": database.client, @@ -312,7 +473,10 @@ async def run_operation(self, sessions, collection, operation): args.update(arguments) arguments = args - result = cmd(**dict(arguments)) + if not _IS_SYNC and iscoroutinefunction(cmd): + result = await cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) # Cleanup open change stream cursors. if name == "watch": self.addAsyncCleanup(result.close) @@ -588,7 +752,7 @@ async def run_scenario(self, scenario_def, test): read_preference=ReadPreference.PRIMARY, read_concern=ReadConcern("local"), ) - actual_data = await (await outcome_coll.find(sort=[("_id", 1)])).to_list() + actual_data = await outcome_coll.find(sort=[("_id", 1)]).to_list() # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json index b667767cfc..8411306224 100644 --- a/test/client-side-encryption/spec/legacy/timeoutMS.json +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -110,7 +110,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } }, "clientOptions": { @@ -119,7 +119,7 @@ "aws": {} } }, - "timeoutMS": 50 + "timeoutMS": 500 }, "operations": [ { diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 142af0f9a7..d576a1184a 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -25,14 +25,13 @@ from test.pymongo_mocks import DummyMonitor from test.utils import ( CMAPListener, - SpecTestCreator, camel_to_snake, client_context, get_pool, get_pools, wait_until, ) -from test.utils_spec_runner import SpecRunnerThread +from test.utils_spec_runner import SpecRunnerThread, SpecTestCreator from bson.objectid import ObjectId from bson.son import SON diff --git a/test/test_encryption.py b/test/test_encryption.py index 64aa7ebf50..13a69ca9ad 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -30,6 +30,7 @@ import warnings from test import IntegrationTest, PyMongoTestCase, client_context from test.test_bulk import BulkTestBase +from test.utils_spec_runner import SpecRunner, SpecTestCreator from threading import Thread from typing import Any, Dict, Mapping, Optional @@ -58,7 +59,6 @@ from test.utils import ( AllowListEventListener, OvertCommandListener, - SpecTestCreator, TopologyEventListener, camel_to_snake_args, is_greenthread_patched, @@ -624,135 +624,132 @@ def test_with_statement(self): KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} -if _IS_SYNC: - # TODO: Add synchronous SpecRunner (https://jira.mongodb.org/browse/PYTHON-4700) - class TestSpec(SpecRunner): - @classmethod - @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def setUpClass(cls): - super().setUpClass() - - def parse_auto_encrypt_opts(self, opts): - """Parse clientOptions.autoEncryptOpts.""" - opts = camel_to_snake_args(opts) - kms_providers = opts["kms_providers"] - if "aws" in kms_providers: - kms_providers["aws"] = AWS_CREDS - if not any(AWS_CREDS.values()): - self.skipTest("AWS environment credentials are not set") - if "awsTemporary" in kms_providers: - kms_providers["aws"] = AWS_TEMP_CREDS - del kms_providers["awsTemporary"] - if not any(AWS_TEMP_CREDS.values()): - self.skipTest("AWS Temp environment credentials are not set") - if "awsTemporaryNoSessionToken" in kms_providers: - kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS - del kms_providers["awsTemporaryNoSessionToken"] - if not any(AWS_TEMP_NO_SESSION_CREDS.values()): - self.skipTest("AWS Temp environment credentials are not set") - if "azure" in kms_providers: - kms_providers["azure"] = AZURE_CREDS - if not any(AZURE_CREDS.values()): - self.skipTest("Azure environment credentials are not set") - if "gcp" in kms_providers: - kms_providers["gcp"] = GCP_CREDS - if not any(AZURE_CREDS.values()): - self.skipTest("GCP environment credentials are not set") - if "kmip" in kms_providers: - kms_providers["kmip"] = KMIP_CREDS - opts["kms_tls_options"] = KMS_TLS_OPTS - if "key_vault_namespace" not in opts: - opts["key_vault_namespace"] = "keyvault.datakeys" - if "extra_options" in opts: - opts.update(camel_to_snake_args(opts.pop("extra_options"))) - - opts = dict(opts) - return AutoEncryptionOpts(**opts) - - def parse_client_options(self, opts): - """Override clientOptions parsing to support autoEncryptOpts.""" - encrypt_opts = opts.pop("autoEncryptOpts", None) - if encrypt_opts: - opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - - return super().parse_client_options(opts) - - def get_object_name(self, op): - """Default object is collection.""" - return op.get("object", "collection") - - def maybe_skip_scenario(self, test): - super().maybe_skip_scenario(test) - desc = test["description"].lower() - if ( - "timeoutms applied to listcollections to get collection schema" in desc - and sys.platform in ("win32", "darwin") - ): - self.skipTest("PYTHON-3706 flaky test on Windows/macOS") - if "type=symbol" in desc: - self.skipTest("PyMongo does not support the symbol type") - if ( - "timeoutms applied to listcollections to get collection schema" in desc - and not _IS_SYNC - ): - self.skipTest("PYTHON-4844 flaky test on async") - - def setup_scenario(self, scenario_def): - """Override a test's setup.""" - key_vault_data = scenario_def["key_vault_data"] - encrypted_fields = scenario_def["encrypted_fields"] - json_schema = scenario_def["json_schema"] - data = scenario_def["data"] - coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] - coll.delete_many({}) - if key_vault_data: - coll.insert_many(key_vault_data) - - db_name = self.get_scenario_db_name(scenario_def) - coll_name = self.get_scenario_coll_name(scenario_def) - db = client_context.client.get_database(db_name, codec_options=OPTS) - coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) - wc = WriteConcern(w="majority") - kwargs: Dict[str, Any] = {} - if json_schema: - kwargs["validator"] = {"$jsonSchema": json_schema} - kwargs["codec_options"] = OPTS - if not data: - kwargs["write_concern"] = wc - if encrypted_fields: - kwargs["encryptedFields"] = encrypted_fields - db.create_collection(coll_name, **kwargs) - coll = db[coll_name] - if data: - # Load data. - coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) - - def allowable_errors(self, op): - """Override expected error classes.""" - errors = super().allowable_errors(op) - # An updateOne test expects encryption to error when no $ operator - # appears but pymongo raises a client side ValueError in this case. - if op["name"] == "updateOne": - errors += (ValueError,) - return errors - - def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) - test_creator.create_tests() - - if _HAVE_PYMONGOCRYPT: - globals().update( - generate_test_classes( - os.path.join(SPEC_PATH, "unified"), - module=__name__, - ) +class TestSpec(SpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def _setup_class(cls): + super()._setup_class() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = KMS_TLS_OPTS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to listcollections to get collection schema" in desc and not _IS_SYNC: + self.skipTest("PYTHON-4844 flaky test on async") + + def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + coll.delete_many({}) + if key_vault_data: + coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = client_context.client.get_database(db_name, codec_options=OPTS) + db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + +def create_test(scenario_def, test, name): + @client_context.require_test_commands + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator.create_tests() + +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), + module=__name__, ) + ) # Prose Tests ALL_KMS_PROVIDERS = { diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 7cab42cca2..05772fa385 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -21,11 +21,11 @@ from test.utils import ( CMAPListener, OvertCommandListener, - SpecTestCreator, get_pool, wait_until, ) from test.utils_selection_tests import create_topology +from test.utils_spec_runner import SpecTestCreator from pymongo.common import clean_node from pymongo.monitoring import ConnectionReadyEvent diff --git a/test/utils.py b/test/utils.py index 9c78cff3ad..4575a9fe10 100644 --- a/test/utils.py +++ b/test/utils.py @@ -418,153 +418,6 @@ def call_count(self): return len(self._call_list) -class SpecTestCreator: - """Class to create test cases from specifications.""" - - def __init__(self, create_test, test_class, test_path): - """Create a TestCreator object. - - :Parameters: - - `create_test`: callback that returns a test case. The callback - must accept the following arguments - a dictionary containing the - entire test specification (the `scenario_def`), a dictionary - containing the specification for which the test case will be - generated (the `test_def`). - - `test_class`: the unittest.TestCase class in which to create the - test case. - - `test_path`: path to the directory containing the JSON files with - the test specifications. - """ - self._create_test = create_test - self._test_class = test_class - self.test_path = test_path - - def _ensure_min_max_server_version(self, scenario_def, method): - """Test modifier that enforces a version range for the server on a - test case. - """ - if "minServerVersion" in scenario_def: - min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) - if min_ver is not None: - method = client_context.require_version_min(*min_ver)(method) - - if "maxServerVersion" in scenario_def: - max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) - if max_ver is not None: - method = client_context.require_version_max(*max_ver)(method) - - if "serverless" in scenario_def: - serverless = scenario_def["serverless"] - if serverless == "require": - serverless_satisfied = client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - method = unittest.skipUnless( - serverless_satisfied, "Serverless requirement not satisfied" - )(method) - - return method - - @staticmethod - def valid_topology(run_on_req): - return client_context.is_topology_type( - run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) - ) - - @staticmethod - def min_server_version(run_on_req): - version = run_on_req.get("minServerVersion") - if version: - min_ver = tuple(int(elt) for elt in version.split(".")) - return client_context.version >= min_ver - return True - - @staticmethod - def max_server_version(run_on_req): - version = run_on_req.get("maxServerVersion") - if version: - max_ver = tuple(int(elt) for elt in version.split(".")) - return client_context.version <= max_ver - return True - - @staticmethod - def valid_auth_enabled(run_on_req): - if "authEnabled" in run_on_req: - if run_on_req["authEnabled"]: - return client_context.auth_enabled - return not client_context.auth_enabled - return True - - @staticmethod - def serverless_ok(run_on_req): - serverless = run_on_req["serverless"] - if serverless == "require": - return client_context.serverless - elif serverless == "forbid": - return not client_context.serverless - else: # unset or "allow" - return True - - def should_run_on(self, scenario_def): - run_on = scenario_def.get("runOn", []) - if not run_on: - # Always run these tests. - return True - - for req in run_on: - if ( - self.valid_topology(req) - and self.min_server_version(req) - and self.max_server_version(req) - and self.valid_auth_enabled(req) - and self.serverless_ok(req) - ): - return True - return False - - def ensure_run_on(self, scenario_def, method): - """Test modifier that enforces a 'runOn' on a test case.""" - return client_context._require( - lambda: self.should_run_on(scenario_def), "runOn not satisfied", method - ) - - def tests(self, scenario_def): - """Allow CMAP spec test to override the location of test.""" - return scenario_def["tests"] - - def create_tests(self): - for dirpath, _, filenames in os.walk(self.test_path): - dirname = os.path.split(dirpath)[-1] - - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - # Use tz_aware=False to match how CodecOptions decodes - # dates. - opts = json_util.JSONOptions(tz_aware=False) - scenario_def = ScenarioDict( - json_util.loads(scenario_stream.read(), json_options=opts) - ) - - test_type = os.path.splitext(filename)[0] - - # Construct test from scenario. - for test_def in self.tests(scenario_def): - test_name = "test_{}_{}_{}".format( - dirname, - test_type.replace("-", "_").replace(".", "_"), - str(test_def["description"].replace(" ", "_").replace(".", "_")), - ) - - new_test = self._create_test(scenario_def, test_def, test_name) - new_test = self._ensure_min_max_server_version(scenario_def, new_test) - new_test = self.ensure_run_on(scenario_def, new_test) - - new_test.__name__ = test_name - setattr(self._test_class, new_test.__name__, new_test) - - def ensure_all_connected(client: MongoClient) -> None: """Ensure that the client's connection pool has socket connections to all members of a replica set. Raises ConfigurationError when called with a diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 06a40351cd..8a061de0b1 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -15,8 +15,12 @@ """Utilities for testing driver specs.""" from __future__ import annotations +import asyncio import functools +import os import threading +import unittest +from asyncio import iscoroutinefunction from collections import abc from test import IntegrationTest, client_context, client_knobs from test.utils import ( @@ -24,6 +28,7 @@ CompareType, EventListener, OvertCommandListener, + ScenarioDict, ServerAndTopologyEventListener, camel_to_snake, camel_to_snake_args, @@ -32,11 +37,12 @@ ) from typing import List -from bson import ObjectId, decode, encode +from bson import ObjectId, decode, encode, json_util from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from gridfs import GridFSBucket +from gridfs.synchronous.grid_file import GridFSBucket from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference @@ -83,6 +89,161 @@ def run(self): self.stop() +class SpecTestCreator: + """Class to create test cases from specifications.""" + + def __init__(self, create_test, test_class, test_path): + """Create a TestCreator object. + + :Parameters: + - `create_test`: callback that returns a test case. The callback + must accept the following arguments - a dictionary containing the + entire test specification (the `scenario_def`), a dictionary + containing the specification for which the test case will be + generated (the `test_def`). + - `test_class`: the unittest.TestCase class in which to create the + test case. + - `test_path`: path to the directory containing the JSON files with + the test specifications. + """ + self._create_test = create_test + self._test_class = test_class + self.test_path = test_path + + def _ensure_min_max_server_version(self, scenario_def, method): + """Test modifier that enforces a version range for the server on a + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) + if min_ver is not None: + method = client_context.require_version_min(*min_ver)(method) + + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) + if max_ver is not None: + method = client_context.require_version_max(*max_ver)(method) + + if "serverless" in scenario_def: + serverless = scenario_def["serverless"] + if serverless == "require": + serverless_satisfied = client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + method = unittest.skipUnless( + serverless_satisfied, "Serverless requirement not satisfied" + )(method) + + return method + + @staticmethod + def valid_topology(run_on_req): + return client_context.is_topology_type( + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) + + @staticmethod + def min_server_version(run_on_req): + version = run_on_req.get("minServerVersion") + if version: + min_ver = tuple(int(elt) for elt in version.split(".")) + return client_context.version >= min_ver + return True + + @staticmethod + def max_server_version(run_on_req): + version = run_on_req.get("maxServerVersion") + if version: + max_ver = tuple(int(elt) for elt in version.split(".")) + return client_context.version <= max_ver + return True + + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return client_context.auth_enabled + return not client_context.auth_enabled + return True + + @staticmethod + def serverless_ok(run_on_req): + serverless = run_on_req["serverless"] + if serverless == "require": + return client_context.serverless + elif serverless == "forbid": + return not client_context.serverless + else: # unset or "allow" + return True + + def should_run_on(self, scenario_def): + run_on = scenario_def.get("runOn", []) + if not run_on: + # Always run these tests. + return True + + for req in run_on: + if ( + self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + and self.serverless_ok(req) + ): + return True + return False + + def ensure_run_on(self, scenario_def, method): + """Test modifier that enforces a 'runOn' on a test case.""" + + def predicate(): + return self.should_run_on(scenario_def) + + return client_context._require(predicate, "runOn not satisfied", method) + + def tests(self, scenario_def): + """Allow CMAP spec test to override the location of test.""" + return scenario_def["tests"] + + def _create_tests(self): + for dirpath, _, filenames in os.walk(self.test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: # noqa: ASYNC101, RUF100 + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = ScenarioDict( + json_util.loads(scenario_stream.read(), json_options=opts) + ) + + test_type = os.path.splitext(filename)[0] + + # Construct test from scenario. + for test_def in self.tests(scenario_def): + test_name = "test_{}_{}_{}".format( + dirname, + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) + + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) + + new_test.__name__ = test_name + setattr(self._test_class, new_test.__name__, new_test) + + def create_tests(self): + if _IS_SYNC: + self._create_tests() + else: + asyncio.run(self._create_tests()) + + class SpecRunner(IntegrationTest): mongos_clients: List knobs: client_knobs @@ -312,7 +473,10 @@ def run_operation(self, sessions, collection, operation): args.update(arguments) arguments = args - result = cmd(**dict(arguments)) + if not _IS_SYNC and iscoroutinefunction(cmd): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) # Cleanup open change stream cursors. if name == "watch": self.addCleanup(result.close) @@ -583,7 +747,7 @@ def run_scenario(self, scenario_def, test): read_preference=ReadPreference.PRIMARY, read_concern=ReadConcern("local"), ) - actual_data = (outcome_coll.find(sort=[("_id", 1)])).to_list() + actual_data = outcome_coll.find(sort=[("_id", 1)]).to_list() # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. diff --git a/tools/synchro.py b/tools/synchro.py index 0ec8985a05..f704919a17 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -105,6 +105,8 @@ "PyMongo|c|async": "PyMongo|c", "AsyncTestGridFile": "TestGridFile", "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", + "AsyncTestSpec": "TestSpec", + "AsyncSpecTestCreator": "SpecTestCreator", "async_set_fail_point": "set_fail_point", "async_ensure_all_connected": "ensure_all_connected", "async_repl_set_step_down": "repl_set_step_down", From 6973d2d2743b7679080b8be70391b767740cf674 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 11:02:06 -0400 Subject: [PATCH 1538/2111] PYTHON-4528 - Convert unified test runner to async (#1913) --- test/asynchronous/unified_format.py | 1573 +++++++++++++++++++++++++++ test/unified_format.py | 711 +----------- test/unified_format_shared.py | 679 ++++++++++++ tools/synchro.py | 1 + 4 files changed, 2301 insertions(+), 663 deletions(-) create mode 100644 test/asynchronous/unified_format.py create mode 100644 test/unified_format_shared.py diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py new file mode 100644 index 0000000000..4c37422951 --- /dev/null +++ b/test/asynchronous/unified_format.py @@ -0,0 +1,1573 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +""" +from __future__ import annotations + +import asyncio +import binascii +import copy +import functools +import os +import re +import sys +import time +import traceback +from asyncio import iscoroutinefunction +from collections import defaultdict +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + client_knobs, + unittest, +) +from test.unified_format_shared import ( + IS_INTERRUPTED, + KMS_TLS_OPTS, + PLACEHOLDER_MAP, + SKIP_CSOT_TESTS, + EventListenerUtil, + MatchEvaluatorUtil, + coerce_result, + parse_bulk_write_error_result, + parse_bulk_write_result, + parse_client_bulk_write_error_result, + parse_collection_or_database_options, + with_metaclass, +) +from test.utils import ( + async_get_pool, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + snake_to_camel, + wait_until, +) +from test.utils_spec_runner import SpecRunnerThread +from test.version import Version +from typing import Any, Dict, List, Mapping, Optional + +import pymongo +from bson import SON, json_util +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.objectid import ObjectId +from gridfs import AsyncGridFSBucket, GridOut +from pymongo import ASCENDING, AsyncMongoClient, CursorType, _csot +from pymongo.asynchronous.change_stream import AsyncChangeStream +from pymongo.asynchronous.client_session import AsyncClientSession, TransactionOptions, _TxnState +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.encryption import AsyncClientEncryption +from pymongo.asynchronous.helpers import anext +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.errors import ( + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + EncryptionError, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, +) +from pymongo.monitoring import ( + CommandStartedEvent, +) +from pymongo.operations import ( + SearchIndexModel, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +async def is_run_on_requirement_satisfied(requirement): + topology_satisfied = True + req_topologies = requirement.get("topologies") + if req_topologies: + topology_satisfied = await async_client_context.is_topology_type(req_topologies) + + server_version = Version(*async_client_context.version[:3]) + + min_version_satisfied = True + req_min_server_version = requirement.get("minServerVersion") + if req_min_server_version: + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version + + max_version_satisfied = True + req_max_server_version = requirement.get("maxServerVersion") + if req_max_server_version: + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version + + serverless = requirement.get("serverless") + if serverless == "require": + serverless_satisfied = async_client_context.serverless + elif serverless == "forbid": + serverless_satisfied = not async_client_context.serverless + else: # unset or "allow" + serverless_satisfied = True + + params_satisfied = True + params = requirement.get("serverParameters") + if params: + for param, val in params.items(): + if param not in async_client_context.server_parameters: + params_satisfied = False + elif async_client_context.server_parameters[param] != val: + params_satisfied = False + + auth_satisfied = True + req_auth = requirement.get("auth") + if req_auth is not None: + if req_auth: + auth_satisfied = async_client_context.auth_enabled + if auth_satisfied and "authMechanism" in requirement: + auth_satisfied = async_client_context.check_auth_type(requirement["authMechanism"]) + else: + auth_satisfied = not async_client_context.auth_enabled + + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and serverless_satisfied + and params_satisfied + and auth_satisfied + and csfle_satisfied + ) + + +class NonLazyCursor: + """A find cursor proxy that creates the remote cursor when initialized.""" + + def __init__(self, find_cursor, client): + self.client = client + self.find_cursor = find_cursor + # Create the server side cursor. + self.first_result = None + + @classmethod + async def create(cls, find_cursor, client): + cursor = cls(find_cursor, client) + try: + cursor.first_result = await anext(cursor.find_cursor) + except StopAsyncIteration: + cursor.first_result = None + return cursor + + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + + async def __anext__(self): + if self.first_result is not None: + first = self.first_result + self.first_result = None + return first + return await anext(self.find_cursor) + + # Added to support the iterateOnce operation. + try_next = __anext__ + + async def close(self): + await self.find_cursor.close() + self.client = None + + +class EntityMapUtil: + """Utility class that implements an entity map as per the unified + test format specification. + """ + + def __init__(self, test_class): + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class + self._cluster_time: Mapping[str, Any] = {} + + def __contains__(self, item): + return item in self._entities + + def __len__(self): + return len(self._entities) + + def __getitem__(self, item): + try: + return self._entities[item] + except KeyError: + self.test.fail(f"Could not find entity named {item} in map") + + def __setitem__(self, key, value): + if not isinstance(key, str): + self.test.fail("Expected entity name of type str, got %s" % (type(key))) + + if key in self._entities: + self.test.fail(f"Entity named {key} already in map") + + self._entities[key] = value + + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + + async def _create_entity(self, entity_spec, uri=None): + if len(entity_spec) != 1: + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") + + entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") + if entity_type == "client": + kwargs: dict = {} + observe_events = spec.get("observeEvents", []) + + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent + for i in range(len(observe_events)): + if "topologyOpeningEvent" == observe_events[i]: + observe_events[i] = "topologyOpenedEvent" + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil( + observe_events, + ignore_commands, + observe_sensitive_commands, + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): + if async_client_context.load_balancer or async_client_context.serverless: + kwargs["h"] = async_client_context.MULTI_MONGOS_LB_URI + elif async_client_context.is_mongos: + kwargs["h"] = async_client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") + if "waitQueueSize" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueSize") + if "waitQueueMultiple" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueMultiple") + if server_api: + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) + if uri: + kwargs["h"] = uri + client = await self.test.async_rs_or_single_client(**kwargs) + self[spec["id"]] = client + self.test.addAsyncCleanup(client.close) + return + elif entity_type == "database": + client = self[spec["client"]] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + "Expected entity {} to be of type AsyncMongoClient, got {}".format( + spec["client"], type(client) + ) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) + return + elif entity_type == "collection": + database = self[spec["database"]] + if not isinstance(database, AsyncDatabase): + self.test.fail( + "Expected entity {} to be of type AsyncDatabase, got {}".format( + spec["database"], type(database) + ) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) + return + elif entity_type == "session": + client = self[spec["client"]] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + "Expected entity {} to be of type AsyncMongoClient, got {}".format( + spec["client"], type(client) + ) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) + txn_opts = TransactionOptions(**txn_opts) + opts = copy.deepcopy(opts) + opts["default_transaction_options"] = txn_opts + session = client.start_session(**dict(opts)) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) + self.test.addAsyncCleanup(session.end_session) + return + elif entity_type == "bucket": + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + bucket = AsyncGridFSBucket(db, **kwargs) + + # PyMongo does not support AsyncGridFSBucket.drop(), emulate it. + @_csot.apply + async def drop(self: AsyncGridFSBucket, *args: Any, **kwargs: Any) -> None: + await self._files.drop(*args, **kwargs) + await self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket + return + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + # Set TLS options for providers like "kmip:name1". + kms_tls_options = {} + for provider in opts["kms_providers"]: + provider_type = provider.split(":")[0] + if provider_type in KMS_TLS_OPTS: + kms_tls_options[provider] = KMS_TLS_OPTS[provider_type] + self[spec["id"]] = AsyncClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", kms_tls_options), + ) + return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerThread(name) + thread.start() + self[name] = thread + return + + self.test.fail(f"Unable to create entity of unknown type {entity_type}") + + async def create_entities_from_spec(self, entity_spec, uri=None): + for spec in entity_spec: + await self._create_entity(spec, uri=uri) + + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: + client = self[client_name] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + f"Expected entity {client_name} to be of type AsyncMongoClient, got {type(client)}" + ) + + listener = self._listeners.get(client_name) + if not listener: + self.test.fail(f"No listeners configured for client {client_name}") + + return listener + + def get_lsid_for_session(self, session_name): + session = self[session_name] + if not isinstance(session, AsyncClientSession): + self.test.fail( + f"Expected entity {session_name} to be of type AsyncClientSession, got {type(session)}" + ) + + try: + return session.session_id + except InvalidOperation: + # session has been closed. + return self._session_lsids[session_name] + + async def advance_cluster_times(self) -> None: + """Manually synchronize entities when desired""" + if not self._cluster_time: + self._cluster_time = (await self.test.client.admin.command("ping")).get("$clusterTime") + for entity in self._entities.values(): + if isinstance(entity, AsyncClientSession) and self._cluster_time: + entity.advance_cluster_time(self._cluster_time) + + +class UnifiedSpecTestMixinV1(AsyncIntegrationTest): + """Mixin class to run test cases from test specification files. + + Assumes that tests conform to the `unified test format + `_. + + Specification of the test suite being currently run is available as + a class attribute ``TEST_SPEC``. + """ + + SCHEMA_VERSION = Version.from_string("1.21") + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + TEST_SPEC: Any + mongos_clients: list[AsyncMongoClient] = [] + + @staticmethod + async def should_run_on(run_on_spec): + if not run_on_spec: + # Always run these tests. + return True + + for req in run_on_spec: + if await is_run_on_requirement_satisfied(req): + return True + return False + + async def insert_initial_data(self, initial_data): + for i, collection_data in enumerate(initial_data): + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) + documents = collection_data["documents"] + + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + await db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + if documents: + if opts: + await db.create_collection(coll_name, **opts) + await db.get_collection(coll_name, write_concern=wc).insert_many(documents) + else: + # Ensure collection exists + await db.create_collection(coll_name, write_concern=wc, **opts) + + @classmethod + async def _setup_class(cls): + # super call creates internal client cls.client + await super()._setup_class() + # process file-level runOnRequirements + run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) + if not await cls.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") + + # add any special-casing for skipping tests here + if async_client_context.storage_engine == "mmapv1": + if "retryable-writes" in cls.TEST_SPEC["description"] or "retryable_writes" in str( + cls.TEST_PATH + ): + raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") + + # Handle mongos_clients for transactions tests. + cls.mongos_clients = [] + if ( + async_client_context.supports_transactions() + and not async_client_context.load_balancer + and not async_client_context.serverless + ): + for address in async_client_context.mongoses: + cls.mongos_clients.append( + await cls.unmanaged_async_single_client("{}:{}".format(*address)) + ) + + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + async def _tearDown_class(cls): + cls.knobs.disable() + for client in cls.mongos_clients: + await client.close() + await super()._tearDown_class() + + async def asyncSetUp(self): + await super().asyncSetUp() + # process schemaVersion + # note: we check major schema version during class generation + # note: we do this here because we cannot run assertions in setUpClass + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) + self.assertLessEqual( + version, + self.SCHEMA_VERSION, + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", + ) + + # initialize internals + self.match_evaluator = MatchEvaluatorUtil(self) + + def maybe_skip_test(self, spec): + # add any special-casing for skipping tests here + if async_client_context.storage_engine == "mmapv1": + if ( + "Dirty explicit session is discarded" in spec["description"] + or "Dirty implicit session is discarded" in spec["description"] + or "Cancel server check" in spec["description"] + ): + self.skipTest("MMAPv1 does not support retryWrites=True") + if ( + "AsyncDatabase-level aggregate with $out includes read preference for 5.0+ server" + in spec["description"] + ): + if async_client_context.version[0] == 8: + self.skipTest("waiting on PYTHON-4356") + if "Aggregate with $out includes read preference for 5.0+ server" in spec["description"]: + if async_client_context.version[0] == 8: + self.skipTest("waiting on PYTHON-4356") + if "Client side error in command starting transaction" in spec["description"]: + self.skipTest("Implement PYTHON-1894") + if "timeoutMS applied to entire download" in spec["description"]: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + if "csot" in class_name: + if "gridfs" in class_name and sys.platform == "win32": + self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") + if async_client_context.storage_engine == "mmapv1": + self.skipTest( + "MMAPv1 does not support retryable writes which is required for CSOT tests" + ) + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if "tailable" in class_name: + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if async_client_context.storage_engine == "mmapv1": + if name == "createChangeStream": + self.skipTest("MMAPv1 does not support change streams") + if name == "withTransaction" or name == "startTransaction": + self.skipTest("MMAPv1 does not support document-level locking") + if not async_client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + + def process_error(self, exception, spec): + if isinstance(exception, unittest.SkipTest): + raise + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + if isinstance(exception, ClientBulkWriteException): + self.match_evaluator.match_result(error_response, exception.error.details) + else: + self.match_evaluator.match_result(error_response, exception.details) + + if is_error: + # already satisfied because exception was raised + pass + + if is_client_error: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + # Connection errors are considered client errors. + if isinstance(error, ConnectionFailure): + self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError)): + pass + else: + self.assertNotIsInstance(error, PyMongoError) + + if is_timeout_error: + self.assertIsInstance(exception, PyMongoError) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception + + if error_contains: + if isinstance(exception, BulkWriteError): + errmsg = str(exception.details).lower() + elif isinstance(exception, ClientBulkWriteException): + errmsg = str(exception.details).lower() + else: + errmsg = str(exception).lower() + self.assertIn(error_contains.lower(), errmsg) + + if error_code: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("code")) + else: + self.assertEqual(error_code, exception.details.get("code")) + + if error_code_name: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("codeName")) + else: + self.assertEqual(error_code_name, exception.details.get("codeName")) + + if error_labels_contain: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + labels = [ + err_label for err_label in error_labels_contain if error.has_error_label(err_label) + ] + self.assertEqual(labels, error_labels_contain) + + if error_labels_omit: + for err_label in error_labels_omit: + if exception.has_error_label(err_label): + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") + + if expect_result: + if isinstance(exception, BulkWriteError): + result = parse_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + elif isinstance(exception, ClientBulkWriteException): + result = parse_client_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + else: + self.fail( + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions" + ) + + return exception + + def __raise_if_unsupported(self, opname, target, *target_types): + if not isinstance(target, target_types): + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") + + async def __entityOperation_createChangeStream(self, target, *args, **kwargs): + if async_client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support change streams") + self.__raise_if_unsupported( + "createChangeStream", target, AsyncMongoClient, AsyncDatabase, AsyncCollection + ) + stream = await target.watch(*args, **kwargs) + self.addAsyncCleanup(stream.close) + return stream + + async def _clientOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _databaseOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _collectionOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _databaseOperation_runCommand(self, target, **kwargs): + self.__raise_if_unsupported("runCommand", target, AsyncDatabase) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + return await target.command(**kwargs) + + async def _databaseOperation_runCursorCommand(self, target, **kwargs): + return list(await self._databaseOperation_createCommandCursor(target, **kwargs)) + + async def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, AsyncDatabase) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + batch_size = 0 + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_await_time_ms"] = kwargs.pop("maxTimeMS") + + if "batch_size" in kwargs: + batch_size = kwargs.pop("batch_size") + + cursor = await target.cursor_command(**kwargs) + + if batch_size > 0: + cursor.batch_size(batch_size) + + return cursor + + async def kill_all_sessions(self): + if getattr(self, "client", None) is None: + return + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + await client.admin.command("killAllSessions", []) + except OperationFailure: + # "operation was interrupted" by killing the command's + # own session. + pass + + async def _databaseOperation_listCollections(self, target, *args, **kwargs): + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} + cursor = await target.list_collections(*args, **kwargs) + return list(cursor) + + async def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + kwargs["check_exists"] = False + ret = await target.create_collection(*args, **kwargs) + return ret + + async def __entityOperation_aggregate(self, target, *args, **kwargs): + self.__raise_if_unsupported("aggregate", target, AsyncDatabase, AsyncCollection) + return await (await target.aggregate(*args, **kwargs)).to_list() + + async def _databaseOperation_aggregate(self, target, *args, **kwargs): + return await self.__entityOperation_aggregate(target, *args, **kwargs) + + async def _collectionOperation_aggregate(self, target, *args, **kwargs): + return await self.__entityOperation_aggregate(target, *args, **kwargs) + + async def _collectionOperation_find(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, AsyncCollection) + find_cursor = target.find(*args, **kwargs) + return await find_cursor.to_list() + + async def _collectionOperation_createFindCursor(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, AsyncCollection) + if "filter" not in kwargs: + self.fail('createFindCursor requires a "filter" argument') + cursor = await NonLazyCursor.create(target.find(*args, **kwargs), target.database.client) + self.addAsyncCleanup(cursor.close) + return cursor + + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + + async def _collectionOperation_listIndexes(self, target, *args, **kwargs): + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for list_indexes") + return await (await target.list_indexes(*args, **kwargs)).to_list() + + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + + async def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): + models = [SearchIndexModel(**i) for i in kwargs["models"]] + return await target.create_search_indexes(models) + + async def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): + name = kwargs.get("name") + agg_kwargs = kwargs.get("aggregation_options", dict()) + return await (await target.list_search_indexes(name, **agg_kwargs)).to_list() + + async def _sessionOperation_withTransaction(self, target, *args, **kwargs): + if async_client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("withTransaction", target, AsyncClientSession) + return await target.with_transaction(*args, **kwargs) + + async def _sessionOperation_startTransaction(self, target, *args, **kwargs): + if async_client_context.storage_engine == "mmapv1": + self.skipTest("MMAPv1 does not support document-level locking") + self.__raise_if_unsupported("startTransaction", target, AsyncClientSession) + return await target.start_transaction(*args, **kwargs) + + async def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, AsyncChangeStream) + return await anext(target) + + async def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, AsyncCommandCursor + ) + while target.alive: + try: + return await anext(target) + except StopAsyncIteration: + pass + return None + + async def _cursor_close(self, target, *args, **kwargs): + self.__raise_if_unsupported("close", target, NonLazyCursor, AsyncCommandCursor) + return await target.close() + + async def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + + return await target.create_data_key(*args, **kwargs) + + async def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return await (await target.get_keys(*args, **kwargs)).to_list() + + async def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = await target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + async def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + data = await target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} + + async def _clientEncryptionOperation_encrypt(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return await target.encrypt(*args, **kwargs) + + async def _bucketOperation_download( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + async with await target.open_download_stream(*args, **kwargs) as gout: + return await gout.read() + + async def _bucketOperation_downloadByName( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + async with await target.open_download_stream_by_name(*args, **kwargs) as gout: + return await gout.read() + + async def _bucketOperation_upload( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return await target.upload_from_stream(*args, **kwargs) + + async def _bucketOperation_uploadWithId( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return await target.upload_from_stream_with_id(*args, **kwargs) + + async def _bucketOperation_find( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return await target.find(*args, **kwargs).to_list() + + async def run_entity_operation(self, spec): + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") + if ignore and (expect_error or save_as_entity or expect_result): + raise ValueError( + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) + if opargs: + arguments = parse_spec_options(copy.deepcopy(opargs)) + prepare_spec_arguments( + spec, + arguments, + camel_to_snake(opname), + self.entity_map, + self.run_operations_and_throw, + ) + else: + arguments = {} + + if isinstance(target, AsyncMongoClient): + method_name = f"_clientOperation_{opname}" + elif isinstance(target, AsyncDatabase): + method_name = f"_databaseOperation_{opname}" + elif isinstance(target, AsyncCollection): + method_name = f"_collectionOperation_{opname}" + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") + elif isinstance(target, AsyncChangeStream): + method_name = f"_changeStreamOperation_{opname}" + elif isinstance(target, (NonLazyCursor, AsyncCommandCursor)): + method_name = f"_cursor_{opname}" + elif isinstance(target, AsyncClientSession): + method_name = f"_sessionOperation_{opname}" + elif isinstance(target, AsyncGridFSBucket): + method_name = f"_bucketOperation_{opname}" + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) + elif isinstance(target, AsyncClientEncryption): + method_name = f"_clientEncryptionOperation_{opname}" + else: + method_name = "doesNotExist" + + try: + method = getattr(self, method_name) + except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" + if target_opname == "client_bulk_write": + target_opname = "bulk_write" + try: + cmd = getattr(target, target_opname) + except AttributeError: + self.fail(f"Unsupported operation {opname} on entity {target}") + else: + cmd = functools.partial(method, target) + + try: + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments: + timeout = arguments.pop("timeout") + with pymongo.timeout(timeout): + result = await cmd(**dict(arguments)) + else: + result = await cmd(**dict(arguments)) + except Exception as exc: + # Ignore all operation errors but to avoid masking bugs don't + # ignore things like TypeError and ValueError. + if ignore and isinstance(exc, (PyMongoError,)): + return exc + if expect_error: + if method_name == "_collectionOperation_bulkWrite": + self.skipTest("Skipping test pending PYTHON-4598") + return self.process_error(exc, expect_error) + raise + else: + if method_name == "_collectionOperation_bulkWrite": + self.skipTest("Skipping test pending PYTHON-4598") + if expect_error: + self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') + + if expect_result: + actual = coerce_result(opname, result) + self.match_evaluator.match_result(expect_result, actual) + + if save_as_entity: + self.entity_map[save_as_entity] = result + return None + return None + + async def __set_fail_point(self, client, command_args): + if not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + await client.admin.command(cmd_on) + self.addAsyncCleanup( + client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + + async def _testOperation_failPoint(self, spec): + await self.__set_fail_point( + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) + + async def _testOperation_targetedFailPoint(self, spec): + session = self.entity_map[spec["session"]] + if not session._pinned_address: + self.fail( + "Cannot use targetedFailPoint operation with unpinned " "session {}".format( + spec["session"] + ) + ) + + client = await self.async_single_client("{}:{}".format(*session._pinned_address)) + self.addAsyncCleanup(client.close) + await self.__set_fail_point(client=client, command_args=spec["failPoint"]) + + async def _testOperation_createEntities(self, spec): + await self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + await self.entity_map.advance_cluster_times() + + def _testOperation_assertSessionTransactionState(self, spec): + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) + self.assertEqual(expected_state, session._transaction.state) + + def _testOperation_assertSessionPinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNotNone(session._transaction.pinned_address) + + def _testOperation_assertSessionUnpinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + def __get_last_two_command_lsids(self, listener): + cmd_started_events = [] + for event in reversed(listener.events): + if isinstance(event, CommandStartedEvent): + cmd_started_events.append(event) + if len(cmd_started_events) < 2: + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) + + def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSessionDirty(self, spec): + session = self.entity_map[spec["session"]] + self.assertTrue(session._server_session.dirty) + + def _testOperation_assertSessionNotDirty(self, spec): + session = self.entity_map[spec["session"]] + return self.assertFalse(session._server_session.dirty) + + async def _testOperation_assertCollectionExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list( + await self.client.get_database(database_name).list_collection_names() + ) + self.assertIn(collection_name, collection_name_list) + + async def _testOperation_assertCollectionNotExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list( + await self.client.get_database(database_name).list_collection_names() + ) + self.assertNotIn(collection_name, collection_name_list) + + async def _testOperation_assertIndexExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) + + async def _testOperation_assertIndexNotExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(spec["indexName"], index["name"]) + + async def _testOperation_assertNumberConnectionsCheckedOut(self, spec): + client = self.entity_map[spec["client"]] + pool = await async_get_pool(client) + self.assertEqual(spec["connections"], pool.active_sockets) + + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event(event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") + + def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + wait_until( + lambda: self._event_count(client, event) >= count, + f"find {count} {event} event(s)", + ) + + async def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + await asyncio.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[_Address]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0].address + return None + + old_primary = get_primary(old_description) + + def primary_changed() -> bool: + primary = client.primary + if primary is None: + return False + return primary != old_primary + + wait_until(primary_changed, "change primary", timeout=timeout) + + def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.schedule(lambda: self.run_entity_operation(spec["operation"])) + + def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.stop() + thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) + + async def _testOperation_loop(self, spec): + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 + i = 0 + global IS_INTERRUPTED + while True: + if iteration_limiter_key and i >= iteration_limiter_key: + break + i += 1 + if IS_INTERRUPTED: + break + try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 + for op in spec["operations"]: + await self.run_entity_operation(op) + if successes_key: + self.entity_map._entities[successes_key] += 1 + except Exception as exc: + if isinstance(exc, AssertionError): + key = failure_key or error_key + else: + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) + + async def run_special_operation(self, spec): + opname = spec["name"] + method_name = f"_testOperation_{opname}" + try: + method = getattr(self, method_name) + except AttributeError: + self.fail(f"Unsupported special test operation {opname}") + else: + if iscoroutinefunction(method): + await method(spec["arguments"]) + else: + method(spec["arguments"]) + + async def run_operations(self, spec): + for op in spec: + if op["object"] == "testRunner": + await self.run_special_operation(op) + else: + await self.run_entity_operation(op) + + async def run_operations_and_throw(self, spec): + for op in spec: + if op["object"] == "testRunner": + await self.run_special_operation(op) + else: + result = await self.run_entity_operation(op) + if isinstance(result, Exception): + raise result + + def check_events(self, spec): + for event_spec in spec: + client_name = event_spec["client"] + events = event_spec["events"] + event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + + if len(events) == 0: + self.assertEqual(actual_events, []) + continue + + if len(actual_events) != len(events): + expected = "\n".join(str(e) for e in events) + actual = "\n".join(str(a) for a in actual_events) + self.assertEqual( + len(actual_events), + len(events), + f"expected events:\n{expected}\nactual events:\n{actual}", + ) + + for idx, expected_event in enumerate(events): + self.match_evaluator.match_event(expected_event, actual_events[idx]) + + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + + def process_ignore_messages(self, ignore_logs, actual_logs): + final_logs = [] + for log in actual_logs: + ignored = False + for ignore_log in ignore_logs: + if log["data"]["message"] == ignore_log["data"][ + "message" + ] and self.match_evaluator.match_result(ignore_log, log, test=False): + ignored = True + break + if not ignored: + final_logs.append(log) + return final_logs + + async def check_log_messages(self, operations, spec): + def format_logs(log_list): + client_to_log = defaultdict(list) + for log in log_list: + if log.module == "ocsp_support": + continue + data = json_util.loads(log.getMessage()) + client = data.pop("clientId") if "clientId" in data else data.pop("topologyId") + client_to_log[client].append( + { + "level": log.levelname.lower(), + "component": log.name.replace("pymongo.", "", 1), + "data": data, + } + ) + return client_to_log + + with self.assertLogs("pymongo", level="DEBUG") as cm: + await self.run_operations(operations) + formatted_logs = format_logs(cm.records) + for client in spec: + components = set() + for message in client["messages"]: + components.add(message["component"]) + + clientid = self.entity_map[client["client"]]._topology_settings._topology_id + actual_logs = formatted_logs[clientid] + actual_logs = [log for log in actual_logs if log["component"] in components] + + ignore_logs = client.get("ignoreMessages", []) + if ignore_logs: + actual_logs = self.process_ignore_messages(ignore_logs, actual_logs) + + if client.get("ignoreExtraMessages", False): + actual_logs = actual_logs[: len(client["messages"])] + self.assertEqual( + len(client["messages"]), + len(actual_logs), + f"expected {client['messages']} but got {actual_logs}", + ) + for expected_msg, actual_msg in zip(client["messages"], actual_logs): + expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") + + if "failureIsRedacted" in expected_msg: + self.assertIn("failure", actual_data) + should_redact = expected_msg.pop("failureIsRedacted") + if should_redact: + actual_fields = set(json_util.loads(actual_data["failure"]).keys()) + self.assertTrue( + {"code", "codeName", "errorLabels"}.issuperset(actual_fields) + ) + + self.match_evaluator.match_result(expected_data, actual_data) + self.match_evaluator.match_result(expected_msg, actual_msg) + + async def verify_outcome(self, spec): + for collection_data in spec: + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] + + coll = self.client.get_database(db_name).get_collection( + coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern(level="local"), + ) + + if expected_documents: + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = await coll.find({}, sort=[("_id", ASCENDING)]).to_list() + self.assertListEqual(sorted_expected_documents, actual_documents) + + async def run_scenario(self, spec, uri=None): + if "csot" in self.id().lower() and SKIP_CSOT_TESTS: + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") + + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + await self.kill_all_sessions() + self.addAsyncCleanup(self.kill_all_sessions) + + if "csot" in self.id().lower(): + # Retry CSOT tests up to 2 times to deal with flakey tests. + attempts = 3 + for i in range(attempts): + try: + return await self._run_scenario(spec, uri) + except AssertionError: + if i < attempts - 1: + print( + f"Retrying after attempt {i+1} of {self.id()} failed with:\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + await self.asyncSetUp() + continue + raise + return None + else: + await self._run_scenario(spec, uri) + return None + + async def _run_scenario(self, spec, uri=None): + # maybe skip test manually + self.maybe_skip_test(spec) + + # process test-level runOnRequirements + run_on_spec = spec.get("runOnRequirements", []) + if not await self.should_run_on(run_on_spec): + raise unittest.SkipTest("runOnRequirements not satisfied") + + # process skipReason + skip_reason = spec.get("skipReason", None) + if skip_reason is not None: + raise unittest.SkipTest(f"{skip_reason}") + + # process createEntities + self._uri = uri + self.entity_map = EntityMapUtil(self) + await self.entity_map.create_entities_from_spec( + self.TEST_SPEC.get("createEntities", []), uri=uri + ) + # process initialData + if "initialData" in self.TEST_SPEC: + await self.insert_initial_data(self.TEST_SPEC["initialData"]) + self._cluster_time = (await self.client.admin.command("ping")).get("$clusterTime") + await self.entity_map.advance_cluster_times() + + if "expectLogMessages" in spec: + expect_log_messages = spec["expectLogMessages"] + self.assertTrue(expect_log_messages, "expectEvents must be non-empty") + await self.check_log_messages(spec["operations"], expect_log_messages) + else: + # process operations + await self.run_operations(spec["operations"]) + + # process expectEvents + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") + self.check_events(expect_events) + + # process outcome + await self.verify_outcome(spec.get("outcome", [])) + + +class UnifiedSpecTestMeta(type): + """Metaclass for generating test classes.""" + + TEST_SPEC: Any + EXPECTED_FAILURES: Any + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + def create_test(spec): + async def test_case(self): + await self.run_scenario(spec) + + return test_case + + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) + test_method = create_test(copy.deepcopy(test_spec)) + test_method.__name__ = str(test_name) + + for fail_pattern in cls.EXPECTED_FAILURES: + if re.search(fail_pattern, description): + test_method = unittest.expectedFailure(test_method) + break + + setattr(cls, test_name, test_method) + + +_ALL_MIXIN_CLASSES = [ + UnifiedSpecTestMixinV1, + # add mixin classes for new schema major versions here +] + + +_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} + + +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], # noqa: B006 + bypass_test_generation_errors=False, + **kwargs, +): + """Method for generating test classes. Returns a dictionary where keys are + the names of test classes and values are the test class objects. + """ + test_klasses = {} + + def test_base_class_factory(test_spec): + """Utility that creates the base class to use for test generation. + This is needed to ensure that cls.TEST_SPEC is appropriately set when + the metaclass __init__ is invoked. + """ + + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore + TEST_SPEC = test_spec + EXPECTED_FAILURES = expected_failures + + return SpecTestBase + + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + fpath = os.path.join(dirpath, filename) + with open(fpath) as scenario_stream: + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) + + test_type = os.path.splitext(filename)[0] + snake_class_name = "Test{}_{}_{}".format( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) + class_name = snake_to_camel(snake_class_name) + + try: + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) + if mixin_class is None: + raise ValueError( + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" + ) + module_dict = {"__module__": module, "TEST_PATH": test_path} + module_dict.update(kwargs) + test_klasses[class_name] = type( + class_name, + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) + except Exception: + if bypass_test_generation_errors: + continue + raise + + return test_klasses diff --git a/test/unified_format.py b/test/unified_format.py index 62211d3d25..6a19082b86 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -18,41 +18,41 @@ """ from __future__ import annotations +import asyncio import binascii -import collections import copy -import datetime import functools import os import re import sys import time import traceback -import types -from collections import abc, defaultdict +from asyncio import iscoroutinefunction +from collections import defaultdict from test import ( IntegrationTest, client_context, client_knobs, unittest, ) -from test.helpers import ( - AWS_CREDS, - AWS_CREDS_2, - AZURE_CREDS, - CA_PEM, - CLIENT_PEM, - GCP_CREDS, - KMIP_CREDS, - LOCAL_MASTER_KEY, - client_knobs, +from test.unified_format_shared import ( + IS_INTERRUPTED, + KMS_TLS_OPTS, + PLACEHOLDER_MAP, + SKIP_CSOT_TESTS, + EventListenerUtil, + MatchEvaluatorUtil, + coerce_result, + parse_bulk_write_error_result, + parse_bulk_write_result, + parse_client_bulk_write_error_result, + parse_collection_or_database_options, + with_metaclass, ) from test.utils import ( - CMAPListener, camel_to_snake, camel_to_snake_args, get_pool, - parse_collection_options, parse_spec_options, prepare_spec_arguments, snake_to_camel, @@ -60,14 +60,12 @@ ) from test.utils_spec_runner import SpecRunnerThread from test.version import Version -from typing import Any, Dict, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional import pymongo -from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util -from bson.binary import Binary +from bson import SON, json_util from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId -from bson.regex import RE_TYPE, Regex from gridfs import GridFSBucket, GridOut from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.encryption_options import _HAVE_PYMONGOCRYPT @@ -83,55 +81,14 @@ PyMongoError, ) from pymongo.monitoring import ( - _SENSITIVE_COMMANDS, - CommandFailedEvent, - CommandListener, CommandStartedEvent, - CommandSucceededEvent, - ConnectionCheckedInEvent, - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutStartedEvent, - ConnectionClosedEvent, - ConnectionCreatedEvent, - ConnectionReadyEvent, - PoolClearedEvent, - PoolClosedEvent, - PoolCreatedEvent, - PoolReadyEvent, - ServerClosedEvent, - ServerDescriptionChangedEvent, - ServerHeartbeatFailedEvent, - ServerHeartbeatListener, - ServerHeartbeatStartedEvent, - ServerHeartbeatSucceededEvent, - ServerListener, - ServerOpeningEvent, - TopologyClosedEvent, - TopologyDescriptionChangedEvent, - TopologyEvent, - TopologyListener, - TopologyOpenedEvent, - _CommandEvent, - _ConnectionEvent, - _PoolEvent, - _ServerEvent, - _ServerHeartbeatEvent, ) from pymongo.operations import ( - DeleteMany, - DeleteOne, - InsertOne, - ReplaceOne, SearchIndexModel, - UpdateMany, - UpdateOne, ) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.results import BulkWriteResult, ClientBulkWriteResult from pymongo.server_api import ServerApi -from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection, writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.synchronous.change_stream import ChangeStream @@ -140,85 +97,12 @@ from pymongo.synchronous.command_cursor import CommandCursor from pymongo.synchronous.database import Database from pymongo.synchronous.encryption import ClientEncryption +from pymongo.synchronous.helpers import next from pymongo.topology_description import TopologyDescription from pymongo.typings import _Address from pymongo.write_concern import WriteConcern -SKIP_CSOT_TESTS = os.getenv("SKIP_CSOT_TESTS") - -JSON_OPTS = json_util.JSONOptions(tz_aware=False) - -IS_INTERRUPTED = False - -KMS_TLS_OPTS = { - "kmip": { - "tlsCAFile": CA_PEM, - "tlsCertificateKeyFile": CLIENT_PEM, - } -} - - -# Build up a placeholder maps. -PLACEHOLDER_MAP = {} -for provider_name, provider_data in [ - ("local", {"key": LOCAL_MASTER_KEY}), - ("local:name1", {"key": LOCAL_MASTER_KEY}), - ("aws", AWS_CREDS), - ("aws:name1", AWS_CREDS), - ("aws:name2", AWS_CREDS_2), - ("azure", AZURE_CREDS), - ("azure:name1", AZURE_CREDS), - ("gcp", GCP_CREDS), - ("gcp:name1", GCP_CREDS), - ("kmip", KMIP_CREDS), - ("kmip:name1", KMIP_CREDS), -]: - for key, value in provider_data.items(): - placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" - PLACEHOLDER_MAP[placeholder] = value - -OIDC_ENV = os.environ.get("OIDC_ENV", "test") -if OIDC_ENV == "test": - PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "test"} -elif OIDC_ENV == "azure": - PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { - "ENVIRONMENT": "azure", - "TOKEN_RESOURCE": os.environ["AZUREOIDC_RESOURCE"], - } -elif OIDC_ENV == "gcp": - PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { - "ENVIRONMENT": "gcp", - "TOKEN_RESOURCE": os.environ["GCPOIDC_AUDIENCE"], - } - - -def interrupt_loop(): - global IS_INTERRUPTED - IS_INTERRUPTED = True - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass. - - Vendored from six: https://github.com/benjaminp/six/blob/master/six.py - """ - - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - def __new__(cls, name, this_bases, d): - # __orig_bases__ is required by PEP 560. - resolved_bases = types.resolve_bases(bases) - if resolved_bases is not bases: - d["__orig_bases__"] = bases - return meta(name, resolved_bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - - return type.__new__(metaclass, "temporary_class", (), {}) +_IS_SYNC = True def is_run_on_requirement_satisfied(requirement): @@ -283,77 +167,6 @@ def is_run_on_requirement_satisfied(requirement): ) -def parse_collection_or_database_options(options): - return parse_collection_options(options) - - -def parse_bulk_write_result(result): - upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} - return { - "deletedCount": result.deleted_count, - "insertedCount": result.inserted_count, - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - "upsertedCount": result.upserted_count, - "upsertedIds": upserted_ids, - } - - -def parse_client_bulk_write_individual(op_type, result): - if op_type == "insert": - return {"insertedId": result.inserted_id} - if op_type == "update": - if result.upserted_id: - return { - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - "upsertedId": result.upserted_id, - } - else: - return { - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - } - if op_type == "delete": - return { - "deletedCount": result.deleted_count, - } - - -def parse_client_bulk_write_result(result): - insert_results, update_results, delete_results = {}, {}, {} - if result.has_verbose_results: - for idx, res in result.insert_results.items(): - insert_results[str(idx)] = parse_client_bulk_write_individual("insert", res) - for idx, res in result.update_results.items(): - update_results[str(idx)] = parse_client_bulk_write_individual("update", res) - for idx, res in result.delete_results.items(): - delete_results[str(idx)] = parse_client_bulk_write_individual("delete", res) - - return { - "deletedCount": result.deleted_count, - "insertedCount": result.inserted_count, - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - "upsertedCount": result.upserted_count, - "insertResults": insert_results, - "updateResults": update_results, - "deleteResults": delete_results, - } - - -def parse_bulk_write_error_result(error): - write_result = BulkWriteResult(error.details, True) - return parse_bulk_write_result(write_result) - - -def parse_client_bulk_write_error_result(error): - write_result = error.partial_result - if not write_result: - return None - return parse_client_bulk_write_result(write_result) - - class NonLazyCursor: """A find cursor proxy that creates the remote cursor when initialized.""" @@ -361,7 +174,16 @@ def __init__(self, find_cursor, client): self.client = client self.find_cursor = find_cursor # Create the server side cursor. - self.first_result = next(find_cursor, None) + self.first_result = None + + @classmethod + def create(cls, find_cursor, client): + cursor = cls(find_cursor, client) + try: + cursor.first_result = next(cursor.find_cursor) + except StopIteration: + cursor.first_result = None + return cursor @property def alive(self): @@ -382,105 +204,6 @@ def close(self): self.client = None -class EventListenerUtil( - CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener, TopologyListener -): - def __init__( - self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map - ): - self._event_types = {name.lower() for name in observe_events} - if observe_sensitive_commands: - self._observe_sensitive_commands = True - self._ignore_commands = set(ignore_commands) - else: - self._observe_sensitive_commands = False - self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) - self._ignore_commands.add("configurefailpoint") - self._event_mapping = collections.defaultdict(list) - self.entity_map = entity_map - if store_events: - for i in store_events: - id = i["id"] - events = (i.lower() for i in i["events"]) - for i in events: - self._event_mapping[i].append(id) - self.entity_map[id] = [] - super().__init__() - - def get_events(self, event_type): - assert event_type in ("command", "cmap", "sdam", "all"), event_type - if event_type == "all": - return list(self.events) - if event_type == "command": - return [e for e in self.events if isinstance(e, _CommandEvent)] - if event_type == "cmap": - return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] - return [ - e - for e in self.events - if isinstance(e, (_ServerEvent, TopologyEvent, _ServerHeartbeatEvent)) - ] - - def add_event(self, event): - event_name = type(event).__name__.lower() - if event_name in self._event_types: - super().add_event(event) - for id in self._event_mapping[event_name]: - self.entity_map[id].append( - { - "name": type(event).__name__, - "observedAt": time.time(), - "description": repr(event), - } - ) - - def _command_event(self, event): - if event.command_name.lower() not in self._ignore_commands: - self.add_event(event) - - def started(self, event): - if isinstance(event, CommandStartedEvent): - if event.command == {}: - # Command is redacted. Observe only if flag is set. - if self._observe_sensitive_commands: - self._command_event(event) - else: - self._command_event(event) - else: - self.add_event(event) - - def succeeded(self, event): - if isinstance(event, CommandSucceededEvent): - if event.reply == {}: - # Command is redacted. Observe only if flag is set. - if self._observe_sensitive_commands: - self._command_event(event) - else: - self._command_event(event) - else: - self.add_event(event) - - def failed(self, event): - if isinstance(event, CommandFailedEvent): - self._command_event(event) - else: - self.add_event(event) - - def opened(self, event: Union[ServerOpeningEvent, TopologyOpenedEvent]) -> None: - self.add_event(event) - - def description_changed( - self, event: Union[ServerDescriptionChangedEvent, TopologyDescriptionChangedEvent] - ) -> None: - self.add_event(event) - - def topology_changed(self, event: TopologyDescriptionChangedEvent) -> None: - self.add_event(event) - - def closed(self, event: Union[ServerClosedEvent, TopologyClosedEvent]) -> None: - self.add_event(event) - - class EntityMapUtil: """Utility class that implements an entity map as per the unified test format specification. @@ -692,353 +415,12 @@ def get_lsid_for_session(self, session_name): def advance_cluster_times(self) -> None: """Manually synchronize entities when desired""" if not self._cluster_time: - self._cluster_time = self.test.client.admin.command("ping").get("$clusterTime") + self._cluster_time = (self.test.client.admin.command("ping")).get("$clusterTime") for entity in self._entities.values(): if isinstance(entity, ClientSession) and self._cluster_time: entity.advance_cluster_time(self._cluster_time) -binary_types = (Binary, bytes) -long_types = (Int64,) -unicode_type = str - - -BSON_TYPE_ALIAS_MAP = { - # https://mongodb.com/docs/manual/reference/operator/query/type/ - # https://pymongo.readthedocs.io/en/stable/api/bson/index.html - "double": (float,), - "string": (str,), - "object": (abc.Mapping,), - "array": (abc.MutableSequence,), - "binData": binary_types, - "undefined": (type(None),), - "objectId": (ObjectId,), - "bool": (bool,), - "date": (datetime.datetime,), - "null": (type(None),), - "regex": (Regex, RE_TYPE), - "dbPointer": (DBRef,), - "javascript": (unicode_type, Code), - "symbol": (unicode_type,), - "javascriptWithScope": (unicode_type, Code), - "int": (int,), - "long": (Int64,), - "decimal": (Decimal128,), - "maxKey": (MaxKey,), - "minKey": (MinKey,), -} - - -class MatchEvaluatorUtil: - """Utility class that implements methods for evaluating matches as per - the unified test format specification. - """ - - def __init__(self, test_class): - self.test = test_class - - def _operation_exists(self, spec, actual, key_to_compare): - if spec is True: - if key_to_compare is None: - assert actual is not None - else: - self.test.assertIn(key_to_compare, actual) - elif spec is False: - if key_to_compare is None: - assert actual is None - else: - self.test.assertNotIn(key_to_compare, actual) - else: - self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") - - def __type_alias_to_type(self, alias): - if alias not in BSON_TYPE_ALIAS_MAP: - self.test.fail(f"Unrecognized BSON type alias {alias}") - return BSON_TYPE_ALIAS_MAP[alias] - - def _operation_type(self, spec, actual, key_to_compare): - if isinstance(spec, abc.MutableSequence): - permissible_types = tuple( - [t for alias in spec for t in self.__type_alias_to_type(alias)] - ) - else: - permissible_types = self.__type_alias_to_type(spec) - value = actual[key_to_compare] if key_to_compare else actual - self.test.assertIsInstance(value, permissible_types) - - def _operation_matchesEntity(self, spec, actual, key_to_compare): - expected_entity = self.test.entity_map[spec] - self.test.assertEqual(expected_entity, actual[key_to_compare]) - - def _operation_matchesHexBytes(self, spec, actual, key_to_compare): - expected = binascii.unhexlify(spec) - value = actual[key_to_compare] if key_to_compare else actual - self.test.assertEqual(value, expected) - - def _operation_unsetOrMatches(self, spec, actual, key_to_compare): - if key_to_compare is None and not actual: - # top-level document can be None when unset - return - - if key_to_compare not in actual: - # we add a dummy value for the compared key to pass map size check - actual[key_to_compare] = "dummyValue" - return - self.match_result(spec, actual[key_to_compare], in_recursive_call=True) - - def _operation_sessionLsid(self, spec, actual, key_to_compare): - expected_lsid = self.test.entity_map.get_lsid_for_session(spec) - self.test.assertEqual(expected_lsid, actual[key_to_compare]) - - def _operation_lte(self, spec, actual, key_to_compare): - if key_to_compare not in actual: - self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") - self.test.assertLessEqual(actual[key_to_compare], spec) - - def _operation_matchAsDocument(self, spec, actual, key_to_compare): - self._match_document(spec, json_util.loads(actual[key_to_compare]), False) - - def _operation_matchAsRoot(self, spec, actual, key_to_compare): - self._match_document(spec, actual, True) - - def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): - method_name = "_operation_{}".format(opname.strip("$")) - try: - method = getattr(self, method_name) - except AttributeError: - self.test.fail(f"Unsupported special matching operator {opname}") - else: - method(spec, actual, key_to_compare) - - def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): - """Returns True if a special operation is evaluated, False - otherwise. If the ``expectation`` map contains a single key, - value pair we check it for a special operation. - If given, ``key_to_compare`` is assumed to be the key in - ``expectation`` whose corresponding value needs to be - evaluated for a possible special operation. ``key_to_compare`` - is ignored when ``expectation`` has only one key. - """ - if not isinstance(expectation, abc.Mapping): - return False - - is_special_op, opname, spec = False, False, False - - if key_to_compare is not None: - if key_to_compare.startswith("$$"): - is_special_op = True - opname = key_to_compare - spec = expectation[key_to_compare] - key_to_compare = None - else: - nested = expectation[key_to_compare] - if isinstance(nested, abc.Mapping) and len(nested) == 1: - opname, spec = next(iter(nested.items())) - if opname.startswith("$$"): - is_special_op = True - elif len(expectation) == 1: - opname, spec = next(iter(expectation.items())) - if opname.startswith("$$"): - is_special_op = True - key_to_compare = None - - if is_special_op: - self._evaluate_special_operation( - opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare - ) - return True - - return False - - def _match_document(self, expectation, actual, is_root, test=False): - if self._evaluate_if_special_operation(expectation, actual): - return - - self.test.assertIsInstance(actual, abc.Mapping) - for key, value in expectation.items(): - if self._evaluate_if_special_operation(expectation, actual, key): - continue - - self.test.assertIn(key, actual) - if not self.match_result(value, actual[key], in_recursive_call=True, test=test): - return False - - if not is_root: - expected_keys = set(expectation.keys()) - for key, value in expectation.items(): - if value == {"$$exists": False}: - expected_keys.remove(key) - if test: - self.test.assertEqual(expected_keys, set(actual.keys())) - else: - return set(expected_keys).issubset(set(actual.keys())) - return True - - def match_result(self, expectation, actual, in_recursive_call=False, test=True): - if isinstance(expectation, abc.Mapping): - return self._match_document( - expectation, actual, is_root=not in_recursive_call, test=test - ) - - if isinstance(expectation, abc.MutableSequence): - self.test.assertIsInstance(actual, abc.MutableSequence) - for e, a in zip(expectation, actual): - if isinstance(e, abc.Mapping): - self._match_document(e, a, is_root=not in_recursive_call, test=test) - else: - self.match_result(e, a, in_recursive_call=True, test=test) - return None - - # account for flexible numerics in element-wise comparison - if isinstance(expectation, int) or isinstance(expectation, float): - if test: - self.test.assertEqual(expectation, actual) - else: - return expectation == actual - return None - else: - if test: - self.test.assertIsInstance(actual, type(expectation)) - self.test.assertEqual(expectation, actual) - else: - return isinstance(actual, type(expectation)) and expectation == actual - return None - - def match_server_description(self, actual: ServerDescription, spec: dict) -> None: - for field, expected in spec.items(): - field = camel_to_snake(field) - if field == "type": - field = "server_type_name" - self.test.assertEqual(getattr(actual, field), expected) - - def match_topology_description(self, actual: TopologyDescription, spec: dict) -> None: - for field, expected in spec.items(): - field = camel_to_snake(field) - if field == "type": - field = "topology_type_name" - self.test.assertEqual(getattr(actual, field), expected) - - def match_event_fields(self, actual: Any, spec: dict) -> None: - for field, expected in spec.items(): - if field == "command" and isinstance(actual, CommandStartedEvent): - command = spec["command"] - if command: - self.match_result(command, actual.command) - continue - if field == "reply" and isinstance(actual, CommandSucceededEvent): - reply = spec["reply"] - if reply: - self.match_result(reply, actual.reply) - continue - if field == "hasServiceId": - if spec["hasServiceId"]: - self.test.assertIsNotNone(actual.service_id) - self.test.assertIsInstance(actual.service_id, ObjectId) - else: - self.test.assertIsNone(actual.service_id) - continue - if field == "hasServerConnectionId": - if spec["hasServerConnectionId"]: - self.test.assertIsNotNone(actual.server_connection_id) - self.test.assertIsInstance(actual.server_connection_id, int) - else: - self.test.assertIsNone(actual.server_connection_id) - continue - if field in ("previousDescription", "newDescription"): - if isinstance(actual, ServerDescriptionChangedEvent): - self.match_server_description( - getattr(actual, camel_to_snake(field)), spec[field] - ) - continue - if isinstance(actual, TopologyDescriptionChangedEvent): - self.match_topology_description( - getattr(actual, camel_to_snake(field)), spec[field] - ) - continue - - if field == "interruptInUseConnections": - field = "interrupt_connections" - else: - field = camel_to_snake(field) - self.test.assertEqual(getattr(actual, field), expected) - - def match_event(self, expectation, actual): - name, spec = next(iter(expectation.items())) - if name == "commandStartedEvent": - self.test.assertIsInstance(actual, CommandStartedEvent) - elif name == "commandSucceededEvent": - self.test.assertIsInstance(actual, CommandSucceededEvent) - elif name == "commandFailedEvent": - self.test.assertIsInstance(actual, CommandFailedEvent) - elif name == "poolCreatedEvent": - self.test.assertIsInstance(actual, PoolCreatedEvent) - elif name == "poolReadyEvent": - self.test.assertIsInstance(actual, PoolReadyEvent) - elif name == "poolClearedEvent": - self.test.assertIsInstance(actual, PoolClearedEvent) - self.test.assertIsInstance(actual.interrupt_connections, bool) - elif name == "poolClosedEvent": - self.test.assertIsInstance(actual, PoolClosedEvent) - elif name == "connectionCreatedEvent": - self.test.assertIsInstance(actual, ConnectionCreatedEvent) - elif name == "connectionReadyEvent": - self.test.assertIsInstance(actual, ConnectionReadyEvent) - elif name == "connectionClosedEvent": - self.test.assertIsInstance(actual, ConnectionClosedEvent) - elif name == "connectionCheckOutStartedEvent": - self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) - elif name == "connectionCheckOutFailedEvent": - self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) - elif name == "connectionCheckedOutEvent": - self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) - elif name == "connectionCheckedInEvent": - self.test.assertIsInstance(actual, ConnectionCheckedInEvent) - elif name == "serverDescriptionChangedEvent": - self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) - elif name == "serverHeartbeatStartedEvent": - self.test.assertIsInstance(actual, ServerHeartbeatStartedEvent) - elif name == "serverHeartbeatSucceededEvent": - self.test.assertIsInstance(actual, ServerHeartbeatSucceededEvent) - elif name == "serverHeartbeatFailedEvent": - self.test.assertIsInstance(actual, ServerHeartbeatFailedEvent) - elif name == "topologyDescriptionChangedEvent": - self.test.assertIsInstance(actual, TopologyDescriptionChangedEvent) - elif name == "topologyOpeningEvent": - self.test.assertIsInstance(actual, TopologyOpenedEvent) - elif name == "topologyClosedEvent": - self.test.assertIsInstance(actual, TopologyClosedEvent) - else: - raise Exception(f"Unsupported event type {name}") - - self.match_event_fields(actual, spec) - - -def coerce_result(opname, result): - """Convert a pymongo result into the spec's result format.""" - if hasattr(result, "acknowledged") and not result.acknowledged: - return {"acknowledged": False} - if opname == "bulkWrite": - return parse_bulk_write_result(result) - if opname == "clientBulkWrite": - return parse_client_bulk_write_result(result) - if opname == "insertOne": - return {"insertedId": result.inserted_id} - if opname == "insertMany": - return dict(enumerate(result.inserted_ids)) - if opname in ("deleteOne", "deleteMany"): - return {"deletedCount": result.deleted_count} - if opname in ("updateOne", "updateMany", "replaceOne"): - value = { - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - "upsertedCount": 0 if result.upserted_id is None else 1, - } - if result.upserted_id is not None: - value["upsertedId"] = result.upserted_id - return value - return result - - class UnifiedSpecTestMixinV1(IntegrationTest): """Mixin class to run test cases from test specification files. @@ -1090,9 +472,9 @@ def insert_initial_data(self, initial_data): db.create_collection(coll_name, write_concern=wc, **opts) @classmethod - def setUpClass(cls): + def _setup_class(cls): # super call creates internal client cls.client - super().setUpClass() + super()._setup_class() # process file-level runOnRequirements run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) if not cls.should_run_on(run_on_spec): @@ -1125,11 +507,11 @@ def setUpClass(cls): cls.knobs.enable() @classmethod - def tearDownClass(cls): + def _tearDown_class(cls): cls.knobs.disable() for client in cls.mongos_clients: client.close() - super().tearDownClass() + super()._tearDown_class() def setUp(self): super().setUp() @@ -1391,7 +773,7 @@ def _databaseOperation_createCollection(self, target, *args, **kwargs): def __entityOperation_aggregate(self, target, *args, **kwargs): self.__raise_if_unsupported("aggregate", target, Database, Collection) - return list(target.aggregate(*args, **kwargs)) + return (target.aggregate(*args, **kwargs)).to_list() def _databaseOperation_aggregate(self, target, *args, **kwargs): return self.__entityOperation_aggregate(target, *args, **kwargs) @@ -1402,13 +784,13 @@ def _collectionOperation_aggregate(self, target, *args, **kwargs): def _collectionOperation_find(self, target, *args, **kwargs): self.__raise_if_unsupported("find", target, Collection) find_cursor = target.find(*args, **kwargs) - return list(find_cursor) + return find_cursor.to_list() def _collectionOperation_createFindCursor(self, target, *args, **kwargs): self.__raise_if_unsupported("find", target, Collection) if "filter" not in kwargs: self.fail('createFindCursor requires a "filter" argument') - cursor = NonLazyCursor(target.find(*args, **kwargs), target.database.client) + cursor = NonLazyCursor.create(target.find(*args, **kwargs), target.database.client) self.addCleanup(cursor.close) return cursor @@ -1418,7 +800,7 @@ def _collectionOperation_count(self, target, *args, **kwargs): def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: self.skipTest("PyMongo does not support batch_size for list_indexes") - return list(target.list_indexes(*args, **kwargs)) + return (target.list_indexes(*args, **kwargs)).to_list() def _collectionOperation_listIndexNames(self, target, *args, **kwargs): self.skipTest("PyMongo does not support list_index_names") @@ -1430,7 +812,7 @@ def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): name = kwargs.get("name") agg_kwargs = kwargs.get("aggregation_options", dict()) - return list(target.list_search_indexes(name, **agg_kwargs)) + return (target.list_search_indexes(name, **agg_kwargs)).to_list() def _sessionOperation_withTransaction(self, target, *args, **kwargs): if client_context.storage_engine == "mmapv1": @@ -1470,7 +852,7 @@ def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): return target.create_data_key(*args, **kwargs) def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): - return list(target.get_keys(*args, **kwargs)) + return (target.get_keys(*args, **kwargs)).to_list() def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): result = target.delete_key(*args, **kwargs) @@ -1516,7 +898,7 @@ def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwar def _bucketOperation_find( self, target: GridFSBucket, *args: Any, **kwargs: Any ) -> List[GridOut]: - return list(target.find(*args, **kwargs)) + return target.find(*args, **kwargs).to_list() def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] @@ -1849,7 +1231,10 @@ def run_special_operation(self, spec): except AttributeError: self.fail(f"Unsupported special test operation {opname}") else: - method(spec["arguments"]) + if iscoroutinefunction(method): + method(spec["arguments"]) + else: + method(spec["arguments"]) def run_operations(self, spec): for op in spec: @@ -1985,7 +1370,7 @@ def verify_outcome(self, spec): if expected_documents: sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) - actual_documents = list(coll.find({}, sort=[("_id", ASCENDING)])) + actual_documents = coll.find({}, sort=[("_id", ASCENDING)]).to_list() self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): @@ -2040,7 +1425,7 @@ def _run_scenario(self, spec, uri=None): # process initialData if "initialData" in self.TEST_SPEC: self.insert_initial_data(self.TEST_SPEC["initialData"]) - self._cluster_time = self.client.admin.command("ping").get("$clusterTime") + self._cluster_time = (self.client.admin.command("ping")).get("$clusterTime") self.entity_map.advance_cluster_times() if "expectLogMessages" in spec: diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py new file mode 100644 index 0000000000..d11624476d --- /dev/null +++ b/test/unified_format_shared.py @@ -0,0 +1,679 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utility functions and constants for the unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +""" +from __future__ import annotations + +import binascii +import collections +import datetime +import os +import time +import types +from collections import abc +from test.helpers import ( + AWS_CREDS, + AWS_CREDS_2, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.utils import CMAPListener, camel_to_snake, parse_collection_options +from typing import Any, Union + +from bson import ( + RE_TYPE, + Binary, + Code, + DBRef, + Decimal128, + Int64, + MaxKey, + MinKey, + ObjectId, + Regex, + json_util, +) +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, + TopologyEvent, + TopologyListener, + TopologyOpenedEvent, + _CommandEvent, + _ConnectionEvent, + _PoolEvent, + _ServerEvent, + _ServerHeartbeatEvent, +) +from pymongo.results import BulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TopologyDescription + +SKIP_CSOT_TESTS = os.getenv("SKIP_CSOT_TESTS") + +JSON_OPTS = json_util.JSONOptions(tz_aware=False) + +IS_INTERRUPTED = False + +KMS_TLS_OPTS = { + "kmip": { + "tlsCAFile": CA_PEM, + "tlsCertificateKeyFile": CLIENT_PEM, + } +} + + +# Build up a placeholder maps. +PLACEHOLDER_MAP = {} +for provider_name, provider_data in [ + ("local", {"key": LOCAL_MASTER_KEY}), + ("local:name1", {"key": LOCAL_MASTER_KEY}), + ("aws", AWS_CREDS), + ("aws:name1", AWS_CREDS), + ("aws:name2", AWS_CREDS_2), + ("azure", AZURE_CREDS), + ("azure:name1", AZURE_CREDS), + ("gcp", GCP_CREDS), + ("gcp:name1", GCP_CREDS), + ("kmip", KMIP_CREDS), + ("kmip:name1", KMIP_CREDS), +]: + for key, value in provider_data.items(): + placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + +OIDC_ENV = os.environ.get("OIDC_ENV", "test") +if OIDC_ENV == "test": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "test"} +elif OIDC_ENV == "azure": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": os.environ["AZUREOIDC_RESOURCE"], + } +elif OIDC_ENV == "gcp": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": os.environ["GCPOIDC_AUDIENCE"], + } + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass. + + Vendored from six: https://github.com/benjaminp/six/blob/master/six.py + """ + + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + # __orig_bases__ is required by PEP 560. + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d["__orig_bases__"] = bases + return meta(name, resolved_bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + + return type.__new__(metaclass, "temporary_class", (), {}) + + +def parse_collection_or_database_options(options): + return parse_collection_options(options) + + +def parse_bulk_write_result(result): + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "upsertedIds": upserted_ids, + } + + +def parse_client_bulk_write_individual(op_type, result): + if op_type == "insert": + return {"insertedId": result.inserted_id} + if op_type == "update": + if result.upserted_id: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedId": result.upserted_id, + } + else: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + } + if op_type == "delete": + return { + "deletedCount": result.deleted_count, + } + + +def parse_client_bulk_write_result(result): + insert_results, update_results, delete_results = {}, {}, {} + if result.has_verbose_results: + for idx, res in result.insert_results.items(): + insert_results[str(idx)] = parse_client_bulk_write_individual("insert", res) + for idx, res in result.update_results.items(): + update_results[str(idx)] = parse_client_bulk_write_individual("update", res) + for idx, res in result.delete_results.items(): + delete_results[str(idx)] = parse_client_bulk_write_individual("delete", res) + + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "insertResults": insert_results, + "updateResults": update_results, + "deleteResults": delete_results, + } + + +def parse_bulk_write_error_result(error): + write_result = BulkWriteResult(error.details, True) + return parse_bulk_write_result(write_result) + + +def parse_client_bulk_write_error_result(error): + write_result = error.partial_result + if not write_result: + return None + return parse_client_bulk_write_result(write_result) + + +class EventListenerUtil( + CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener, TopologyListener +): + def __init__( + self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map + ): + self._event_types = {name.lower() for name in observe_events} + if observe_sensitive_commands: + self._observe_sensitive_commands = True + self._ignore_commands = set(ignore_commands) + else: + self._observe_sensitive_commands = False + self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) + self._ignore_commands.add("configurefailpoint") + self._event_mapping = collections.defaultdict(list) + self.entity_map = entity_map + if store_events: + for i in store_events: + id = i["id"] + events = (i.lower() for i in i["events"]) + for i in events: + self._event_mapping[i].append(id) + self.entity_map[id] = [] + super().__init__() + + def get_events(self, event_type): + assert event_type in ("command", "cmap", "sdam", "all"), event_type + if event_type == "all": + return list(self.events) + if event_type == "command": + return [e for e in self.events if isinstance(e, _CommandEvent)] + if event_type == "cmap": + return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] + return [ + e + for e in self.events + if isinstance(e, (_ServerEvent, TopologyEvent, _ServerHeartbeatEvent)) + ] + + def add_event(self, event): + event_name = type(event).__name__.lower() + if event_name in self._event_types: + super().add_event(event) + for id in self._event_mapping[event_name]: + self.entity_map[id].append( + { + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event), + } + ) + + def _command_event(self, event): + if event.command_name.lower() not in self._ignore_commands: + self.add_event(event) + + def started(self, event): + if isinstance(event, CommandStartedEvent): + if event.command == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def succeeded(self, event): + if isinstance(event, CommandSucceededEvent): + if event.reply == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def failed(self, event): + if isinstance(event, CommandFailedEvent): + self._command_event(event) + else: + self.add_event(event) + + def opened(self, event: Union[ServerOpeningEvent, TopologyOpenedEvent]) -> None: + self.add_event(event) + + def description_changed( + self, event: Union[ServerDescriptionChangedEvent, TopologyDescriptionChangedEvent] + ) -> None: + self.add_event(event) + + def topology_changed(self, event: TopologyDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: Union[ServerClosedEvent, TopologyClosedEvent]) -> None: + self.add_event(event) + + +binary_types = (Binary, bytes) +long_types = (Int64,) +unicode_type = str + + +BSON_TYPE_ALIAS_MAP = { + # https://mongodb.com/docs/manual/reference/operator/query/type/ + # https://pymongo.readthedocs.io/en/stable/api/bson/index.html + "double": (float,), + "string": (str,), + "object": (abc.Mapping,), + "array": (abc.MutableSequence,), + "binData": binary_types, + "undefined": (type(None),), + "objectId": (ObjectId,), + "bool": (bool,), + "date": (datetime.datetime,), + "null": (type(None),), + "regex": (Regex, RE_TYPE), + "dbPointer": (DBRef,), + "javascript": (unicode_type, Code), + "symbol": (unicode_type,), + "javascriptWithScope": (unicode_type, Code), + "int": (int,), + "long": (Int64,), + "decimal": (Decimal128,), + "maxKey": (MaxKey,), + "minKey": (MinKey,), +} + + +class MatchEvaluatorUtil: + """Utility class that implements methods for evaluating matches as per + the unified test format specification. + """ + + def __init__(self, test_class): + self.test = test_class + + def _operation_exists(self, spec, actual, key_to_compare): + if spec is True: + if key_to_compare is None: + assert actual is not None + else: + self.test.assertIn(key_to_compare, actual) + elif spec is False: + if key_to_compare is None: + assert actual is None + else: + self.test.assertNotIn(key_to_compare, actual) + else: + self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") + + def __type_alias_to_type(self, alias): + if alias not in BSON_TYPE_ALIAS_MAP: + self.test.fail(f"Unrecognized BSON type alias {alias}") + return BSON_TYPE_ALIAS_MAP[alias] + + def _operation_type(self, spec, actual, key_to_compare): + if isinstance(spec, abc.MutableSequence): + permissible_types = tuple( + [t for alias in spec for t in self.__type_alias_to_type(alias)] + ) + else: + permissible_types = self.__type_alias_to_type(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertIsInstance(value, permissible_types) + + def _operation_matchesEntity(self, spec, actual, key_to_compare): + expected_entity = self.test.entity_map[spec] + self.test.assertEqual(expected_entity, actual[key_to_compare]) + + def _operation_matchesHexBytes(self, spec, actual, key_to_compare): + expected = binascii.unhexlify(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertEqual(value, expected) + + def _operation_unsetOrMatches(self, spec, actual, key_to_compare): + if key_to_compare is None and not actual: + # top-level document can be None when unset + return + + if key_to_compare not in actual: + # we add a dummy value for the compared key to pass map size check + actual[key_to_compare] = "dummyValue" + return + self.match_result(spec, actual[key_to_compare], in_recursive_call=True) + + def _operation_sessionLsid(self, spec, actual, key_to_compare): + expected_lsid = self.test.entity_map.get_lsid_for_session(spec) + self.test.assertEqual(expected_lsid, actual[key_to_compare]) + + def _operation_lte(self, spec, actual, key_to_compare): + if key_to_compare not in actual: + self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") + self.test.assertLessEqual(actual[key_to_compare], spec) + + def _operation_matchAsDocument(self, spec, actual, key_to_compare): + self._match_document(spec, json_util.loads(actual[key_to_compare]), False) + + def _operation_matchAsRoot(self, spec, actual, key_to_compare): + self._match_document(spec, actual, True) + + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): + method_name = "_operation_{}".format(opname.strip("$")) + try: + method = getattr(self, method_name) + except AttributeError: + self.test.fail(f"Unsupported special matching operator {opname}") + else: + method(spec, actual, key_to_compare) + + def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): + """Returns True if a special operation is evaluated, False + otherwise. If the ``expectation`` map contains a single key, + value pair we check it for a special operation. + If given, ``key_to_compare`` is assumed to be the key in + ``expectation`` whose corresponding value needs to be + evaluated for a possible special operation. ``key_to_compare`` + is ignored when ``expectation`` has only one key. + """ + if not isinstance(expectation, abc.Mapping): + return False + + is_special_op, opname, spec = False, False, False + + if key_to_compare is not None: + if key_to_compare.startswith("$$"): + is_special_op = True + opname = key_to_compare + spec = expectation[key_to_compare] + key_to_compare = None + else: + nested = expectation[key_to_compare] + if isinstance(nested, abc.Mapping) and len(nested) == 1: + opname, spec = next(iter(nested.items())) + if opname.startswith("$$"): + is_special_op = True + elif len(expectation) == 1: + opname, spec = next(iter(expectation.items())) + if opname.startswith("$$"): + is_special_op = True + key_to_compare = None + + if is_special_op: + self._evaluate_special_operation( + opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare + ) + return True + + return False + + def _match_document(self, expectation, actual, is_root, test=False): + if self._evaluate_if_special_operation(expectation, actual): + return + + self.test.assertIsInstance(actual, abc.Mapping) + for key, value in expectation.items(): + if self._evaluate_if_special_operation(expectation, actual, key): + continue + + self.test.assertIn(key, actual) + if not self.match_result(value, actual[key], in_recursive_call=True, test=test): + return False + + if not is_root: + expected_keys = set(expectation.keys()) + for key, value in expectation.items(): + if value == {"$$exists": False}: + expected_keys.remove(key) + if test: + self.test.assertEqual(expected_keys, set(actual.keys())) + else: + return set(expected_keys).issubset(set(actual.keys())) + return True + + def match_result(self, expectation, actual, in_recursive_call=False, test=True): + if isinstance(expectation, abc.Mapping): + return self._match_document( + expectation, actual, is_root=not in_recursive_call, test=test + ) + + if isinstance(expectation, abc.MutableSequence): + self.test.assertIsInstance(actual, abc.MutableSequence) + for e, a in zip(expectation, actual): + if isinstance(e, abc.Mapping): + self._match_document(e, a, is_root=not in_recursive_call, test=test) + else: + self.match_result(e, a, in_recursive_call=True, test=test) + return None + + # account for flexible numerics in element-wise comparison + if isinstance(expectation, int) or isinstance(expectation, float): + if test: + self.test.assertEqual(expectation, actual) + else: + return expectation == actual + return None + else: + if test: + self.test.assertIsInstance(actual, type(expectation)) + self.test.assertEqual(expectation, actual) + else: + return isinstance(actual, type(expectation)) and expectation == actual + return None + + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "server_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_topology_description(self, actual: TopologyDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "topology_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_event_fields(self, actual: Any, spec: dict) -> None: + for field, expected in spec.items(): + if field == "command" and isinstance(actual, CommandStartedEvent): + command = spec["command"] + if command: + self.match_result(command, actual.command) + continue + if field == "reply" and isinstance(actual, CommandSucceededEvent): + reply = spec["reply"] + if reply: + self.match_result(reply, actual.reply) + continue + if field == "hasServiceId": + if spec["hasServiceId"]: + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + continue + if field == "hasServerConnectionId": + if spec["hasServerConnectionId"]: + self.test.assertIsNotNone(actual.server_connection_id) + self.test.assertIsInstance(actual.server_connection_id, int) + else: + self.test.assertIsNone(actual.server_connection_id) + continue + if field in ("previousDescription", "newDescription"): + if isinstance(actual, ServerDescriptionChangedEvent): + self.match_server_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + if isinstance(actual, TopologyDescriptionChangedEvent): + self.match_topology_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + + if field == "interruptInUseConnections": + field = "interrupt_connections" + else: + field = camel_to_snake(field) + self.test.assertEqual(getattr(actual, field), expected) + + def match_event(self, expectation, actual): + name, spec = next(iter(expectation.items())) + if name == "commandStartedEvent": + self.test.assertIsInstance(actual, CommandStartedEvent) + elif name == "commandSucceededEvent": + self.test.assertIsInstance(actual, CommandSucceededEvent) + elif name == "commandFailedEvent": + self.test.assertIsInstance(actual, CommandFailedEvent) + elif name == "poolCreatedEvent": + self.test.assertIsInstance(actual, PoolCreatedEvent) + elif name == "poolReadyEvent": + self.test.assertIsInstance(actual, PoolReadyEvent) + elif name == "poolClearedEvent": + self.test.assertIsInstance(actual, PoolClearedEvent) + self.test.assertIsInstance(actual.interrupt_connections, bool) + elif name == "poolClosedEvent": + self.test.assertIsInstance(actual, PoolClosedEvent) + elif name == "connectionCreatedEvent": + self.test.assertIsInstance(actual, ConnectionCreatedEvent) + elif name == "connectionReadyEvent": + self.test.assertIsInstance(actual, ConnectionReadyEvent) + elif name == "connectionClosedEvent": + self.test.assertIsInstance(actual, ConnectionClosedEvent) + elif name == "connectionCheckOutStartedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) + elif name == "connectionCheckOutFailedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) + elif name == "connectionCheckedOutEvent": + self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) + elif name == "connectionCheckedInEvent": + self.test.assertIsInstance(actual, ConnectionCheckedInEvent) + elif name == "serverDescriptionChangedEvent": + self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) + elif name == "serverHeartbeatStartedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatStartedEvent) + elif name == "serverHeartbeatSucceededEvent": + self.test.assertIsInstance(actual, ServerHeartbeatSucceededEvent) + elif name == "serverHeartbeatFailedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatFailedEvent) + elif name == "topologyDescriptionChangedEvent": + self.test.assertIsInstance(actual, TopologyDescriptionChangedEvent) + elif name == "topologyOpeningEvent": + self.test.assertIsInstance(actual, TopologyOpenedEvent) + elif name == "topologyClosedEvent": + self.test.assertIsInstance(actual, TopologyClosedEvent) + else: + raise Exception(f"Unsupported event type {name}") + + self.match_event_fields(actual, spec) + + +def coerce_result(opname, result): + """Convert a pymongo result into the spec's result format.""" + if hasattr(result, "acknowledged") and not result.acknowledged: + return {"acknowledged": False} + if opname == "bulkWrite": + return parse_bulk_write_result(result) + if opname == "clientBulkWrite": + return parse_client_bulk_write_result(result) + if opname == "insertOne": + return {"insertedId": result.inserted_id} + if opname == "insertMany": + return dict(enumerate(result.inserted_ids)) + if opname in ("deleteOne", "deleteMany"): + return {"deletedCount": result.deleted_count} + if opname in ("updateOne", "updateMany", "replaceOne"): + value = { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": 0 if result.upserted_id is None else 1, + } + if result.upserted_id is not None: + value["upsertedId"] = result.upserted_id + return value + return result diff --git a/tools/synchro.py b/tools/synchro.py index f704919a17..e0af5efa44 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -205,6 +205,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes.py", "test_session.py", "test_transactions.py", + "unified_format.py", ] sync_test_files = [ From 7e86d24c7bffe4da0a4d32580b5da0e6230b78d2 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 13:59:37 -0400 Subject: [PATCH 1539/2111] PYTHON-4849 - Convert test.test_connection_logging.py to async (#1918) --- test/asynchronous/test_connection_logging.py | 45 ++++++++++++++++++++ test/test_connection_logging.py | 8 +++- tools/synchro.py | 1 + 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_connection_logging.py diff --git a/test/asynchronous/test_connection_logging.py b/test/asynchronous/test_connection_logging.py new file mode 100644 index 0000000000..6bc9835b70 --- /dev/null +++ b/test/asynchronous/test_connection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the connection logging unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "connection_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "connection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_connection_logging.py b/test/test_connection_logging.py index 262ce821eb..253193cc43 100644 --- a/test/test_connection_logging.py +++ b/test/test_connection_logging.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -23,8 +24,13 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "connection_logging") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "connection_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "connection_logging") globals().update( diff --git a/tools/synchro.py b/tools/synchro.py index e0af5efa44..dbaf0a15e9 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -193,6 +193,7 @@ def async_only_test(f: str) -> bool: "test_collation.py", "test_collection.py", "test_common.py", + "test_connection_logging.py", "test_connections_survive_primary_stepdown_spec.py", "test_cursor.py", "test_database.py", From e0fde2338126ee3e8ca7771b3f88c4a2706638f2 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 13:59:44 -0400 Subject: [PATCH 1540/2111] PYTHON-4850 - Convert test.test_crud_unified to async (#1920) --- test/asynchronous/test_crud_unified.py | 39 ++++++++++++++++++++++++++ test/test_crud_unified.py | 10 +++++-- tools/synchro.py | 1 + 3 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 test/asynchronous/test_crud_unified.py diff --git a/test/asynchronous/test_crud_unified.py b/test/asynchronous/test_crud_unified.py new file mode 100644 index 0000000000..3d8deb36e9 --- /dev/null +++ b/test/asynchronous/test_crud_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CRUD unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "crud", "unified") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py index 92a60a47fc..26f34cba88 100644 --- a/test/test_crud_unified.py +++ b/test/test_crud_unified.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -23,11 +24,16 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "unified") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "crud", "unified") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) if __name__ == "__main__": unittest.main() diff --git a/tools/synchro.py b/tools/synchro.py index dbaf0a15e9..39ce7fbdd0 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -195,6 +195,7 @@ def async_only_test(f: str) -> bool: "test_common.py", "test_connection_logging.py", "test_connections_survive_primary_stepdown_spec.py", + "test_crud_unified.py", "test_cursor.py", "test_database.py", "test_encryption.py", From b2332b2aaeb26ecd7efa4992f037ca4dc56583db Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 13:59:49 -0400 Subject: [PATCH 1541/2111] PYTHON-4846 - Convert test.test_command_logging.py to async (#1915) --- test/asynchronous/test_command_logging.py | 44 +++++++++++++++++++++++ test/test_command_logging.py | 9 ++++- tools/synchro.py | 1 + 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_command_logging.py diff --git a/test/asynchronous/test_command_logging.py b/test/asynchronous/test_command_logging.py new file mode 100644 index 0000000000..f9b459c152 --- /dev/null +++ b/test/asynchronous/test_command_logging.py @@ -0,0 +1,44 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_logging.py b/test/test_command_logging.py index 9b2d52e66b..cf865920ca 100644 --- a/test/test_command_logging.py +++ b/test/test_command_logging.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -23,8 +24,14 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_logging") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_logging") + globals().update( generate_test_classes( diff --git a/tools/synchro.py b/tools/synchro.py index 39ce7fbdd0..f40a64e4c2 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -192,6 +192,7 @@ def async_only_test(f: str) -> bool: "test_client_context.py", "test_collation.py", "test_collection.py", + "test_command_logging.py", "test_common.py", "test_connection_logging.py", "test_connections_survive_primary_stepdown_spec.py", From 4eeaa4b7be9e814fd207166904f42556c10ce63b Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 11 Oct 2024 14:56:43 -0400 Subject: [PATCH 1542/2111] PYTHON-4848 - Convert test.test_command_monitoring.py to async (#1917) --- test/asynchronous/test_command_monitoring.py | 45 ++++++++++++++++++++ test/test_command_monitoring.py | 8 +++- tools/synchro.py | 1 + 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_command_monitoring.py diff --git a/test/asynchronous/test_command_monitoring.py b/test/asynchronous/test_command_monitoring.py new file mode 100644 index 0000000000..311fd1fdc1 --- /dev/null +++ b/test/asynchronous/test_command_monitoring.py @@ -0,0 +1,45 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_monitoring") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_monitoring") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_monitoring.py b/test/test_command_monitoring.py index d2f578824d..4f5ef06f28 100644 --- a/test/test_command_monitoring.py +++ b/test/test_command_monitoring.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -23,8 +24,13 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_monitoring") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_monitoring") globals().update( diff --git a/tools/synchro.py b/tools/synchro.py index f40a64e4c2..b6812e9be6 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -193,6 +193,7 @@ def async_only_test(f: str) -> bool: "test_collation.py", "test_collection.py", "test_command_logging.py", + "test_command_monitoring.py", "test_common.py", "test_connection_logging.py", "test_connections_survive_primary_stepdown_spec.py", From 33163ecc0d4fe7dc8f7bfc12ef93d89513203fe2 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Fri, 11 Oct 2024 16:02:13 -0700 Subject: [PATCH 1543/2111] PYTHON-4804 Migrate test_comment.py to async (#1887) --- test/asynchronous/test_comment.py | 159 ++++++++++++++++++++++++++++++ test/test_comment.py | 60 ++++------- tools/synchro.py | 2 + 3 files changed, 179 insertions(+), 42 deletions(-) create mode 100644 test/asynchronous/test_comment.py diff --git a/test/asynchronous/test_comment.py b/test/asynchronous/test_comment.py new file mode 100644 index 0000000000..be3626a8b8 --- /dev/null +++ b/test/asynchronous/test_comment.py @@ -0,0 +1,159 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +from __future__ import annotations + +import inspect +import sys + +sys.path[0:0] = [""] +from asyncio import iscoroutinefunction +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import OvertCommandListener + +from bson.dbref import DBRef +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.operations import IndexModel + +_IS_SYNC = False + + +class AsyncTestComment(AsyncIntegrationTest): + async def _test_ops( + self, + helpers, + already_supported, + listener, + ): + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + listener.reset() + kwargs = {"comment": cc} + try: + maybe_cursor = await h(*args, **kwargs) + except Exception: + maybe_cursor = None + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" + ) + if isinstance(maybe_cursor, AsyncCommandCursor): + await maybe_cursor.close() + + cmd = listener.started_events[0] + self.assertEqual(cc, cmd.command.get("comment"), msg=cmd) + + if h.__name__ != "aggregate_raw_batches": + self.assertIn( + ":param comment:", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + listener.reset() + + @async_client_context.require_version_min(4, 7, -1) + @async_client_context.require_replica_set + async def test_database_helpers(self): + listener = OvertCommandListener() + db = (await self.async_rs_or_single_client(event_listeners=[listener])).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + await self._test_ops(helpers, already_supported, listener) + + @async_client_context.require_version_min(4, 7, -1) + @async_client_context.require_replica_set + async def test_client_helpers(self): + listener = OvertCommandListener() + cli = await self.async_rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + await self._test_ops(helpers, already_supported, listener) + + @async_client_context.require_version_min(4, 7, -1) + async def test_collection_helpers(self): + listener = OvertCommandListener() + db = (await self.async_rs_or_single_client(event_listeners=[listener]))[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + await self._test_ops(helpers, already_supported, listener) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_comment.py b/test/test_comment.py index c0f037ea44..9f9bf98640 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -20,24 +20,15 @@ import sys sys.path[0:0] = [""] - +from asyncio import iscoroutinefunction from test import IntegrationTest, client_context, unittest -from test.utils import EventListener +from test.utils import OvertCommandListener from bson.dbref import DBRef from pymongo.operations import IndexModel from pymongo.synchronous.command_cursor import CommandCursor - -class Empty: - def __getattr__(self, item): - try: - self.__dict__[item] - except KeyError: - return self.empty - - def empty(self, *args, **kwargs): - return Empty() +_IS_SYNC = True class TestComment(IntegrationTest): @@ -46,8 +37,6 @@ def _test_ops( helpers, already_supported, listener, - db=Empty(), # noqa: B008 - coll=Empty(), # noqa: B008 ): for h, args in helpers: c = "testing comment with " + h.__name__ @@ -55,19 +44,10 @@ def _test_ops( for cc in [c, {"key": c}, ["any", 1]]: listener.reset() kwargs = {"comment": cc} - if h == coll.rename: - _ = db.get_collection("temp_temp_temp").drop() - destruct_coll = db.get_collection("test_temp") - destruct_coll.insert_one({}) - maybe_cursor = destruct_coll.rename(*args, **kwargs) - destruct_coll.drop() - elif h == db.validate_collection: - coll = db.get_collection("test") - coll.insert_one({}) - maybe_cursor = db.validate_collection(*args, **kwargs) - else: - coll.create_index("a") + try: maybe_cursor = h(*args, **kwargs) + except Exception: + maybe_cursor = None self.assertIn( "comment", inspect.signature(h).parameters, @@ -79,15 +59,11 @@ def _test_ops( ) if isinstance(maybe_cursor, CommandCursor): maybe_cursor.close() - tested = False - # For some reason collection.list_indexes creates two commands and the first - # one doesn't contain 'comment'. - for i in listener.started_events: - if cc == i.command.get("comment", ""): - self.assertEqual(cc, i.command["comment"]) - tested = True - self.assertTrue(tested) - if h not in [coll.aggregate_raw_batches]: + + cmd = listener.started_events[0] + self.assertEqual(cc, cmd.command.get("comment"), msg=cmd) + + if h.__name__ != "aggregate_raw_batches": self.assertIn( ":param comment:", h.__doc__, @@ -108,8 +84,8 @@ def _test_ops( @client_context.require_version_min(4, 7, -1) @client_context.require_replica_set def test_database_helpers(self): - listener = EventListener() - db = self.rs_or_single_client(event_listeners=[listener]).db + listener = OvertCommandListener() + db = (self.rs_or_single_client(event_listeners=[listener])).db helpers = [ (db.watch, []), (db.command, ["hello"]), @@ -120,12 +96,12 @@ def test_database_helpers(self): (db.dereference, [DBRef("collection", 1)]), ] already_supported = [db.command, db.list_collections, db.list_collection_names] - self._test_ops(helpers, already_supported, listener, db=db, coll=db.get_collection("test")) + self._test_ops(helpers, already_supported, listener) @client_context.require_version_min(4, 7, -1) @client_context.require_replica_set def test_client_helpers(self): - listener = EventListener() + listener = OvertCommandListener() cli = self.rs_or_single_client(event_listeners=[listener]) helpers = [ (cli.watch, []), @@ -140,8 +116,8 @@ def test_client_helpers(self): @client_context.require_version_min(4, 7, -1) def test_collection_helpers(self): - listener = EventListener() - db = self.rs_or_single_client(event_listeners=[listener])[self.db.name] + listener = OvertCommandListener() + db = (self.rs_or_single_client(event_listeners=[listener]))[self.db.name] coll = db.get_collection("test") helpers = [ @@ -176,7 +152,7 @@ def test_collection_helpers(self): coll.find_one_and_delete, coll.find_one_and_update, ] - self._test_ops(helpers, already_supported, listener, coll=coll, db=db) + self._test_ops(helpers, already_supported, listener) if __name__ == "__main__": diff --git a/tools/synchro.py b/tools/synchro.py index b6812e9be6..25f506ed5a 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -193,7 +193,9 @@ def async_only_test(f: str) -> bool: "test_collation.py", "test_collection.py", "test_command_logging.py", + "test_command_logging.py", "test_command_monitoring.py", + "test_comment.py", "test_common.py", "test_connection_logging.py", "test_connections_survive_primary_stepdown_spec.py", From 3c5e71a1cb28b695bc2eec4c3927ef6af56835a8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Oct 2024 07:32:38 -0500 Subject: [PATCH 1544/2111] PYTHON-4862 Fix handling of interrupt_loop in unified test runner (#1924) --- test/asynchronous/unified_format.py | 8 +++++++- test/unified_format.py | 8 +++++++- test/unified_format_shared.py | 5 ----- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 4c37422951..42bda59cb2 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -36,7 +36,6 @@ unittest, ) from test.unified_format_shared import ( - IS_INTERRUPTED, KMS_TLS_OPTS, PLACEHOLDER_MAP, SKIP_CSOT_TESTS, @@ -104,6 +103,13 @@ _IS_SYNC = False +IS_INTERRUPTED = False + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + async def is_run_on_requirement_satisfied(requirement): topology_satisfied = True diff --git a/test/unified_format.py b/test/unified_format.py index 6a19082b86..13ab0af69b 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -36,7 +36,6 @@ unittest, ) from test.unified_format_shared import ( - IS_INTERRUPTED, KMS_TLS_OPTS, PLACEHOLDER_MAP, SKIP_CSOT_TESTS, @@ -104,6 +103,13 @@ _IS_SYNC = True +IS_INTERRUPTED = False + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + def is_run_on_requirement_satisfied(requirement): topology_satisfied = True diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index d11624476d..f1b908a7a6 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -139,11 +139,6 @@ } -def interrupt_loop(): - global IS_INTERRUPTED - IS_INTERRUPTED = True - - def with_metaclass(meta, *bases): """Create a base class with a metaclass. From 9ba780cac256720be5c3c5051c7f8a19d27693d5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Oct 2024 07:34:01 -0500 Subject: [PATCH 1545/2111] PYTHON-4861 Ensure hatch is isolated in Evergreen (#1923) --- .evergreen/hatch.sh | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index db0da2f4d0..8f862c39d2 100644 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -18,17 +18,22 @@ if [ -n "$SKIP_HATCH" ]; then run_hatch() { bash ./.evergreen/run-tests.sh } -elif $PYTHON_BINARY -m hatch --version; then - run_hatch() { - $PYTHON_BINARY -m hatch run "$@" - } -else # No toolchain hatch present, set up virtualenv before installing hatch +else # Set up virtualenv before installing hatch # Use a random venv name because the encryption tasks run this script multiple times in the same run. ENV_NAME=hatchenv-$RANDOM createvirtualenv "$PYTHON_BINARY" $ENV_NAME # shellcheck disable=SC2064 trap "deactivate; rm -rf $ENV_NAME" EXIT HUP python -m pip install -q hatch + + # Ensure hatch does not write to user or global locations. + touch hatch_config.toml + HATCH_CONFIG=$(pwd)/hatch_config.toml + export HATCH_CONFIG + hatch config restore + hatch config set dirs.data ".hatch/data" + hatch config set dirs.cache ".hatch/cache" + run_hatch() { python -m hatch run "$@" } From 3cc722e9105d5818d57739d623d985d69b0eb626 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Oct 2024 14:05:22 -0500 Subject: [PATCH 1546/2111] PYTHON-4838 Generate OCSP build variants using shrub.py (#1910) --- .evergreen/config.yml | 174 +++++++++++++++++++++----- .evergreen/scripts/generate_config.py | 167 ++++++++++++++++++++++++ 2 files changed, 308 insertions(+), 33 deletions(-) create mode 100644 .evergreen/scripts/generate_config.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1ef8751501..dee4b608ec 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2826,42 +2826,150 @@ buildvariants: - "test-6.0-standalone" - "test-5.0-standalone" -- matrix_name: "ocsp-test" - matrix_spec: - platform: rhel8 - python-version: ["3.9", "3.10", "pypy3.9", "pypy3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" - batchtime: 20160 # 14 days +# OCSP test matrix. +- name: ocsp-test-rhel8-v4.4-py3.9 tasks: - - name: ".ocsp" - -- matrix_name: "ocsp-test-windows" - matrix_spec: - platform: windows - python-version-windows: ["3.9", "3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" - batchtime: 20160 # 14 days + - name: .ocsp + display_name: OCSP test RHEL8 v4.4 py3.9 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "4.4" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: ocsp-test-rhel8-v5.0-py3.10 tasks: - # Windows MongoDB servers do not staple OCSP responses and only support RSA. - - name: ".ocsp-rsa !.ocsp-staple" - -- matrix_name: "ocsp-test-macos" - matrix_spec: - platform: macos - mongodb-version: ["4.4", "5.0", "6.0", "7.0", "8.0", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${mongodb-version}" - batchtime: 20160 # 14 days + - name: .ocsp + display_name: OCSP test RHEL8 v5.0 py3.10 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "5.0" + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: ocsp-test-rhel8-v6.0-py3.11 tasks: - # macOS MongoDB servers do not staple OCSP responses and only support RSA. - - name: ".ocsp-rsa !.ocsp-staple" + - name: .ocsp + display_name: OCSP test RHEL8 v6.0 py3.11 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "6.0" + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: ocsp-test-rhel8-v7.0-py3.12 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 v7.0 py3.12 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "7.0" + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: ocsp-test-rhel8-v8.0-py3.13 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 v8.0 py3.13 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "8.0" + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: ocsp-test-rhel8-rapid-pypy3.9 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 rapid pypy3.9 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: rapid + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: ocsp-test-rhel8-latest-pypy3.10 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 latest pypy3.10 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: latest + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +- name: ocsp-test-win64-v4.4-py3.9 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test Win64 v4.4 py3.9 + run_on: + - windows-64-vsMulti-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "4.4" + PYTHON_BINARY: C:/python/Python39/python.exe +- name: ocsp-test-win64-v8.0-py3.13 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test Win64 v8.0 py3.13 + run_on: + - windows-64-vsMulti-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "8.0" + PYTHON_BINARY: C:/python/Python313/python.exe +- name: ocsp-test-macos-v4.4-py3.9 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test macOS v4.4 py3.9 + run_on: + - macos-14 + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "4.4" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: ocsp-test-macos-v8.0-py3.13 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test macOS v8.0 py3.13 + run_on: + - macos-14 + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: "8.0" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - matrix_name: "oidc-auth-test" matrix_spec: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py new file mode 100644 index 0000000000..e98e527b72 --- /dev/null +++ b/.evergreen/scripts/generate_config.py @@ -0,0 +1,167 @@ +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "shrub.py>=3.2.0", +# "pyyaml>=6.0.2" +# ] +# /// + +# Note: Run this file with `hatch run`, `pipx run`, or `uv run`. +from __future__ import annotations + +from dataclasses import dataclass +from itertools import cycle, product, zip_longest +from typing import Any + +from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_project import EvgProject +from shrub.v3.evg_task import EvgTaskRef +from shrub.v3.shrub_service import ShrubService + +############## +# Globals +############## + +ALL_VERSIONS = ["4.0", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] +PYPYS = ["pypy3.9", "pypy3.10"] +ALL_PYTHONS = CPYTHONS + PYPYS +BATCHTIME_WEEK = 10080 +HOSTS = dict() + + +@dataclass +class Host: + name: str + run_on: str + display_name: str + + +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8") +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64") +HOSTS["macos"] = Host("macos", "macos-14", "macOS") + + +############## +# Helpers +############## + + +def create_variant( + task_names: list[str], + display_name: str, + *, + python: str | None = None, + version: str | None = None, + host: str | None = None, + **kwargs: Any, +) -> BuildVariant: + """Create a build variant for the given inputs.""" + task_refs = [EvgTaskRef(name=n) for n in task_names] + kwargs.setdefault("expansions", dict()) + expansions = kwargs.pop("expansions", dict()).copy() + host = host or "rhel8" + run_on = [HOSTS[host].run_on] + name = display_name.replace(" ", "-").lower() + if python: + expansions["PYTHON_BINARY"] = get_python_binary(python, host) + if version: + expansions["VERSION"] = version + expansions = expansions or None + return BuildVariant( + name=name, + display_name=display_name, + tasks=task_refs, + expansions=expansions, + run_on=run_on, + **kwargs, + ) + + +def get_python_binary(python: str, host: str) -> str: + """Get the appropriate python binary given a python version and host.""" + if host == "win64": + is_32 = python.startswith("32-bit") + if is_32: + _, python = python.split() + base = "C:/python/32" + else: + base = "C:/python" + python = python.replace(".", "") + return f"{base}/Python{python}/python.exe" + + if host == "rhel8": + return f"/opt/python/{python}/bin/python3" + + if host == "macos": + return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" + + raise ValueError(f"no match found for python {python} on {host}") + + +def get_display_name(base: str, host: str, version: str, python: str) -> str: + """Get the display name of a variant.""" + if version not in ["rapid", "latest"]: + version = f"v{version}" + if not python.startswith("pypy"): + python = f"py{python}" + return f"{base} {HOSTS[host].display_name} {version} {python}" + + +def zip_cycle(*iterables, empty_default=None): + """Get all combinations of the inputs, cycling over the shorter list(s).""" + cycles = [cycle(i) for i in iterables] + for _ in zip_longest(*iterables): + yield tuple(next(i, empty_default) for i in cycles) + + +############## +# Variants +############## + + +def create_ocsp_variants() -> list[BuildVariant]: + variants = [] + batchtime = BATCHTIME_WEEK * 2 + expansions = dict(AUTH="noauth", SSL="ssl", TOPOLOGY="server") + base_display = "OCSP test" + + # OCSP tests on rhel8 with all servers v4.4+ and all python versions. + versions = [v for v in ALL_VERSIONS if v != "4.0"] + for version, python in zip_cycle(versions, ALL_PYTHONS): + host = "rhel8" + variant = create_variant( + [".ocsp"], + get_display_name(base_display, host, version, python), + python=python, + version=version, + host=host, + expansions=expansions, + batchtime=batchtime, + ) + variants.append(variant) + + # OCSP tests on Windows and MacOS. + # MongoDB servers on these hosts do not staple OCSP responses and only support RSA. + for host, version in product(["win64", "macos"], ["4.4", "8.0"]): + python = CPYTHONS[0] if version == "4.4" else CPYTHONS[-1] + variant = create_variant( + [".ocsp-rsa !.ocsp-staple"], + get_display_name(base_display, host, version, python), + python=python, + version=version, + host=host, + expansions=expansions, + batchtime=batchtime, + ) + variants.append(variant) + + return variants + + +################## +# Generate Config +################## + +project = EvgProject(tasks=None, buildvariants=create_ocsp_variants()) +print(ShrubService.generate_yaml(project)) # noqa: T201 From a911245bde1377c485f06dfd5373d159b7e8aff7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 14 Oct 2024 15:06:42 -0700 Subject: [PATCH 1547/2111] PYTHON-4866 Fix test_command_cursor_to_list_csot_applied (#1926) --- test/asynchronous/test_cursor.py | 14 ++++++-------- test/test_cursor.py | 14 ++++++-------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index e79ad00641..ee0a757ed3 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1412,12 +1412,11 @@ async def test_to_list_length(self): self.assertEqual(len(docs), 2) async def test_to_list_csot_applied(self): - client = await self.async_single_client(timeoutMS=500) + client = await self.async_single_client(timeoutMS=500, w=1) + coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey with pymongo.timeout(10): - await client.admin.command("ping") - coll = client.pymongo.test - await coll.insert_many([{} for _ in range(5)]) + await coll.insert_many([{} for _ in range(5)]) cursor = coll.find({"$where": delay(1)}) with self.assertRaises(PyMongoError) as ctx: await cursor.to_list() @@ -1454,12 +1453,11 @@ async def test_command_cursor_to_list_length(self): @async_client_context.require_failCommand_blockConnection async def test_command_cursor_to_list_csot_applied(self): - client = await self.async_single_client(timeoutMS=500) + client = await self.async_single_client(timeoutMS=500, w=1) + coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey with pymongo.timeout(10): - await client.admin.command("ping") - coll = client.pymongo.test - await coll.insert_many([{} for _ in range(5)]) + await coll.insert_many([{} for _ in range(5)]) fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 5}, diff --git a/test/test_cursor.py b/test/test_cursor.py index 7c073bf351..7a6dfc9429 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1403,12 +1403,11 @@ def test_to_list_length(self): self.assertEqual(len(docs), 2) def test_to_list_csot_applied(self): - client = self.single_client(timeoutMS=500) + client = self.single_client(timeoutMS=500, w=1) + coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey with pymongo.timeout(10): - client.admin.command("ping") - coll = client.pymongo.test - coll.insert_many([{} for _ in range(5)]) + coll.insert_many([{} for _ in range(5)]) cursor = coll.find({"$where": delay(1)}) with self.assertRaises(PyMongoError) as ctx: cursor.to_list() @@ -1445,12 +1444,11 @@ def test_command_cursor_to_list_length(self): @client_context.require_failCommand_blockConnection def test_command_cursor_to_list_csot_applied(self): - client = self.single_client(timeoutMS=500) + client = self.single_client(timeoutMS=500, w=1) + coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey with pymongo.timeout(10): - client.admin.command("ping") - coll = client.pymongo.test - coll.insert_many([{} for _ in range(5)]) + coll.insert_many([{} for _ in range(5)]) fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 5}, From 9e38c54fa03d0f719a43ff023894c2a1ad9b5480 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 14 Oct 2024 15:25:21 -0700 Subject: [PATCH 1548/2111] PYTHON-4861 Fix HATCH_CONFIG on cygwin (#1927) --- .evergreen/hatch.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index 8f862c39d2..6f3d36b389 100644 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -29,6 +29,9 @@ else # Set up virtualenv before installing hatch # Ensure hatch does not write to user or global locations. touch hatch_config.toml HATCH_CONFIG=$(pwd)/hatch_config.toml + if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") + fi export HATCH_CONFIG hatch config restore hatch config set dirs.data ".hatch/data" From 872fda179e247fb8e1bcc3cf2af3d892788a2e2f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 15 Oct 2024 08:54:42 -0400 Subject: [PATCH 1549/2111] PYTHON-4574 - FaaS detection logic mistakenly identifies EKS as AWS Lambda (#1908) --- test/asynchronous/test_client.py | 16 ++++++++++++++++ test/test_client.py | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index faa23348c9..c6b6416c16 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -2019,6 +2019,22 @@ async def test_handshake_08_invalid_aws_ec2(self): None, ) + async def test_handshake_09_container_with_provider(self): + await self._test_handshake( + { + ENV_VAR_K8S: "1", + "AWS_LAMBDA_RUNTIME_API": "1", + "AWS_REGION": "us-east-1", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "256", + }, + { + "container": {"orchestrator": "kubernetes"}, + "name": "aws.lambda", + "region": "us-east-1", + "memory_mb": 256, + }, + ) + def test_dict_hints(self): self.db.t.find(hint={"x": 1}) diff --git a/test/test_client.py b/test/test_client.py index be1994dd93..8e3d9c8b8b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1977,6 +1977,22 @@ def test_handshake_08_invalid_aws_ec2(self): None, ) + def test_handshake_09_container_with_provider(self): + self._test_handshake( + { + ENV_VAR_K8S: "1", + "AWS_LAMBDA_RUNTIME_API": "1", + "AWS_REGION": "us-east-1", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "256", + }, + { + "container": {"orchestrator": "kubernetes"}, + "name": "aws.lambda", + "region": "us-east-1", + "memory_mb": 256, + }, + ) + def test_dict_hints(self): self.db.t.find(hint={"x": 1}) From 710bc40c730d2fd982e1cb7a41fd91ac7b5d4498 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 15 Oct 2024 12:12:18 -0400 Subject: [PATCH 1550/2111] =?UTF-8?q?PYTHON-4870=20-=20MongoClient.address?= =?UTF-8?q?=20should=20block=20until=20a=20connection=20suc=E2=80=A6=20(#1?= =?UTF-8?q?929)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/asynchronous/mongo_client.py | 7 ------- pymongo/synchronous/mongo_client.py | 7 ------- test/asynchronous/test_client.py | 2 -- test/test_client.py | 2 -- test/test_replica_set_reconfig.py | 3 ++- 5 files changed, 2 insertions(+), 19 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index bfae302dac..4e09efe401 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -1453,13 +1453,6 @@ async def address(self) -> Optional[tuple[str, int]]: 'Cannot use "address" property when load balancing among' ' mongoses, use "nodes" instead.' ) - if topology_type not in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded, - ): - return None return await self._server_property("address") @property diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 1351cb200f..815446bb2c 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1447,13 +1447,6 @@ def address(self) -> Optional[tuple[str, int]]: 'Cannot use "address" property when load balancing among' ' mongoses, use "nodes" instead.' ) - if topology_type not in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded, - ): - return None return self._server_property("address") @property diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index c6b6416c16..590154b857 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -838,8 +838,6 @@ async def test_init_disconnected(self): c = await self.async_rs_or_single_client(connect=False) self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) - self.assertIsNone(await c.address) # PYTHON-2981 - await c.admin.command("ping") # connect if async_client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(await c.address) diff --git a/test/test_client.py b/test/test_client.py index 8e3d9c8b8b..5bbb5bd751 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -812,8 +812,6 @@ def test_init_disconnected(self): c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) - self.assertIsNone(c.address) # PYTHON-2981 - c.admin.command("ping") # connect if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 1dae0aea86..4c23d71b69 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -59,7 +59,8 @@ def test_client(self): with self.assertRaises(ServerSelectionTimeoutError): c.db.command("ping") - self.assertEqual(c.address, None) + with self.assertRaises(ServerSelectionTimeoutError): + _ = c.address # Client can still discover the primary node c.revive_host("a:1") From 82e673d6602b968823768d7c99bb5a676c00eb08 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 15 Oct 2024 14:16:19 -0400 Subject: [PATCH 1551/2111] PYTHON-4870 - Update changelog for MongoClient.address fix (#1931) --- doc/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index e7b160b176..44d6fc9a57 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -14,6 +14,9 @@ PyMongo 4.11 brings a number of changes including: - Dropped support for MongoDB 3.6. - Added support for free-threaded Python with the GIL disabled. For more information see: `Free-threaded CPython `_. +- :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and + :attr:`~pymongo.mongo_client.MongoClient.address` now correctly block when called on unconnected clients + until either connection succeeds or a server selection timeout error is raised. Issues Resolved ............... From 1b6c0d3a2a7b82f9526f71d5583d11e7674d3c54 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Oct 2024 13:33:04 -0500 Subject: [PATCH 1552/2111] PYTHON-4868 Generate server tests using shrub.py (#1930) --- .evergreen/config.yml | 548 ++++++++++++++++++++++---- .evergreen/hatch.sh | 4 +- .evergreen/run-tests.sh | 2 +- .evergreen/scripts/generate_config.py | 121 +++++- 4 files changed, 583 insertions(+), 92 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index dee4b608ec..c3427e66d0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -409,6 +409,7 @@ functions: AUTH=${AUTH} \ SSL=${SSL} \ TEST_DATA_LAKE=${TEST_DATA_LAKE} \ + TEST_SUITES=${TEST_SUITES} \ MONGODB_API_VERSION=${MONGODB_API_VERSION} \ SKIP_HATCH=${SKIP_HATCH} \ bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg @@ -2399,6 +2400,470 @@ axes: batchtime: 10080 # 7 days buildvariants: +# Server Tests for RHEL8. +- name: test-rhel8-py3.9-auth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.9 Auth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-py3.9-noauth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.9 NoAuth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-py3.9-noauth-nossl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.9 NoAuth NoSSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-py3.13-auth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.13 Auth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-py3.13-noauth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.13 NoAuth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-py3.13-noauth-nossl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.13 NoAuth NoSSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-pypy3.10-auth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 pypy3.10 Auth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-pypy3.10-noauth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 pypy3.10 NoAuth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-pypy3.10-noauth-nossl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 pypy3.10 NoAuth NoSSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] +- name: test-rhel8-py3.10-auth-ssl + tasks: + - name: .standalone + display_name: Test RHEL8 py3.10 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: test-rhel8-py3.11-noauth-ssl + tasks: + - name: .replica_set + display_name: Test RHEL8 py3.11 NoAuth SSL + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: test-rhel8-py3.12-noauth-nossl + tasks: + - name: .sharded_cluster + display_name: Test RHEL8 py3.12 NoAuth NoSSL + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: test-rhel8-pypy3.9-auth-ssl + tasks: + - name: .standalone + display_name: Test RHEL8 pypy3.9 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + +# Server tests for MacOS. +- name: test-macos-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test macOS py3.9 Auth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test macOS py3.9 Auth SSL Async + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-py3.13-noauth-ssl-sync + tasks: + - name: .replica_set + display_name: Test macOS py3.13 NoAuth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-py3.13-noauth-ssl-async + tasks: + - name: .replica_set + display_name: Test macOS py3.13 NoAuth SSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-py3.9-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.9 NoAuth NoSSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-py3.9-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.9 NoAuth NoSSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" + +# Server tests for macOS Arm64. +- name: test-macos-arm64-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test macOS Arm64 py3.9 Auth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-arm64-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test macOS Arm64 py3.9 Auth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-arm64-py3.13-noauth-ssl-sync + tasks: + - name: .replica_set + display_name: Test macOS Arm64 py3.13 NoAuth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-arm64-py3.13-noauth-ssl-async + tasks: + - name: .replica_set + display_name: Test macOS Arm64 py3.13 NoAuth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-arm64-py3.9-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" +- name: test-macos-arm64-py3.9-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SKIP_CSOT_TESTS: "true" + +# Server tests for Windows. +- name: test-win64-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win64 py3.9 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: C:/python/Python39/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win64-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test Win64 py3.9 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: C:/python/Python39/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win64-py3.13-noauth-ssl-sync + tasks: + - name: .replica_set + display_name: Test Win64 py3.13 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: C:/python/Python313/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win64-py3.13-noauth-ssl-async + tasks: + - name: .replica_set + display_name: Test Win64 py3.13 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: C:/python/Python313/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win64-py3.9-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.9 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + PYTHON_BINARY: C:/python/Python39/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win64-py3.9-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.9 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + PYTHON_BINARY: C:/python/Python39/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win32-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win32 py3.9 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: C:/python/32/Python39/python.exe + SKIP_CSOT_TESTS: "true" + +# Server tests for Win32. +- name: test-win32-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test Win32 py3.9 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: C:/python/32/Python39/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win32-py3.13-noauth-ssl-sync + tasks: + - name: .replica_set + display_name: Test Win32 py3.13 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + PYTHON_BINARY: C:/python/32/Python313/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win32-py3.13-noauth-ssl-async + tasks: + - name: .replica_set + display_name: Test Win32 py3.13 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + PYTHON_BINARY: C:/python/32/Python313/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win32-py3.9-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.9 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + PYTHON_BINARY: C:/python/32/Python39/python.exe + SKIP_CSOT_TESTS: "true" +- name: test-win32-py3.9-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.9 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + PYTHON_BINARY: C:/python/32/Python39/python.exe + SKIP_CSOT_TESTS: "true" + - matrix_name: "tests-fips" matrix_spec: platform: @@ -2409,44 +2874,6 @@ buildvariants: tasks: - "test-fips-standalone" -- matrix_name: "test-macos" - matrix_spec: - platform: - # MacOS introduced SSL support with MongoDB >= 3.2. - # Older server versions (2.6, 3.0) are supported without SSL. - - macos - auth: "*" - ssl: "*" - exclude_spec: - # No point testing with SSL without auth. - - platform: macos - auth: "noauth" - ssl: "ssl" - display_name: "${platform} ${auth} ${ssl}" - tasks: - - ".latest" - - ".8.0" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - -- matrix_name: "test-macos-arm64" - matrix_spec: - platform: - - macos-arm64 - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".latest" - - ".8.0" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - matrix_name: "test-macos-encryption" matrix_spec: platform: @@ -2486,24 +2913,6 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-python-version-rhel8-test-ssl" - matrix_spec: - platform: rhel8 - python-version: "*" - auth-ssl: "*" - coverage: "*" - display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" - tasks: &all-server-versions - - ".rapid" - - ".latest" - - ".8.0" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - matrix_name: "tests-pyopenssl" matrix_spec: platform: rhel8 @@ -2580,7 +2989,16 @@ buildvariants: auth-ssl: "*" coverage: "*" display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" - tasks: *all-server-versions + tasks: &all-server-versions + - ".rapid" + - ".latest" + - ".8.0" + - ".7.0" + - ".6.0" + - ".5.0" + - ".4.4" + - ".4.2" + - ".4.0" - matrix_name: "tests-python-version-rhel8-compression" matrix_spec: @@ -2629,22 +3047,6 @@ buildvariants: display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" tasks: *all-server-versions -- matrix_name: "tests-windows-python-version" - matrix_spec: - platform: windows - python-version-windows: "*" - auth-ssl: "*" - display_name: "${platform} ${python-version-windows} ${auth-ssl}" - tasks: *all-server-versions - -- matrix_name: "tests-windows-python-version-32-bit" - matrix_spec: - platform: windows - python-version-windows-32: "*" - auth-ssl: "*" - display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" - tasks: *all-server-versions - - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: platform: rhel7 diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index 6f3d36b389..45d5113cd6 100644 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -34,8 +34,8 @@ else # Set up virtualenv before installing hatch fi export HATCH_CONFIG hatch config restore - hatch config set dirs.data ".hatch/data" - hatch config set dirs.cache ".hatch/cache" + hatch config set dirs.data "$(pwd)/.hatch/data" + hatch config set dirs.cache "$(pwd)/.hatch/cache" run_hatch() { python -m hatch run "$@" diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 5e8429dd28..364570999f 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -30,7 +30,7 @@ set -o xtrace AUTH=${AUTH:-noauth} SSL=${SSL:-nossl} -TEST_SUITES="" +TEST_SUITES=${TEST_SUITES:-} TEST_ARGS="${*:1}" export PIP_QUIET=1 # Quiet by default diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e98e527b72..044303ad8f 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -26,7 +26,17 @@ CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] PYPYS = ["pypy3.9", "pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS +MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] BATCHTIME_WEEK = 10080 +AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] +TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] +SYNCS = ["sync", "async"] +DISPLAY_LOOKUP = dict( + ssl=dict(ssl="SSL", nossl="NoSSL"), + auth=dict(auth="Auth", noauth="NoAuth"), + test_suites=dict(default="Sync", default_async="Async"), + coverage=dict(coverage="cov"), +) HOSTS = dict() @@ -35,11 +45,18 @@ class Host: name: str run_on: str display_name: str + expansions: dict[str, str] -HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8") -HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64") -HOSTS["macos"] = Host("macos", "macos-14", "macOS") +_macos_expansions = dict( # CSOT tests are unreliable on slow hosts. + SKIP_CSOT_TESTS="true" +) + +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", _macos_expansions) +HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", _macos_expansions) +HOSTS["macos"] = Host("macos", "macos-14", "macOS", _macos_expansions) +HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", _macos_expansions) ############## @@ -67,6 +84,7 @@ def create_variant( expansions["PYTHON_BINARY"] = get_python_binary(python, host) if version: expansions["VERSION"] = version + expansions.update(HOSTS[host].expansions) expansions = expansions or None return BuildVariant( name=name, @@ -80,10 +98,8 @@ def create_variant( def get_python_binary(python: str, host: str) -> str: """Get the appropriate python binary given a python version and host.""" - if host == "win64": - is_32 = python.startswith("32-bit") - if is_32: - _, python = python.split() + if host in ["win64", "win32"]: + if host == "win32": base = "C:/python/32" else: base = "C:/python" @@ -93,19 +109,29 @@ def get_python_binary(python: str, host: str) -> str: if host == "rhel8": return f"/opt/python/{python}/bin/python3" - if host == "macos": + if host in ["macos", "macos-arm64"]: return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" raise ValueError(f"no match found for python {python} on {host}") -def get_display_name(base: str, host: str, version: str, python: str) -> str: +def get_display_name(base: str, host: str, **kwargs) -> str: """Get the display name of a variant.""" - if version not in ["rapid", "latest"]: - version = f"v{version}" - if not python.startswith("pypy"): - python = f"py{python}" - return f"{base} {HOSTS[host].display_name} {version} {python}" + display_name = f"{base} {HOSTS[host].display_name}" + for key, value in kwargs.items(): + name = value + if key == "version": + if value not in ["rapid", "latest"]: + name = f"v{value}" + elif key == "python": + if not value.startswith("pypy"): + name = f"py{value}" + elif key.lower() in DISPLAY_LOOKUP: + name = DISPLAY_LOOKUP[key.lower()][value] + else: + raise ValueError(f"Missing display handling for {key}") + display_name = f"{display_name} {name}" + return display_name def zip_cycle(*iterables, empty_default=None): @@ -115,6 +141,15 @@ def zip_cycle(*iterables, empty_default=None): yield tuple(next(i, empty_default) for i in cycles) +def generate_yaml(tasks=None, variants=None): + """Generate the yaml for a given set of tasks and variants.""" + project = EvgProject(tasks=tasks, buildvariants=variants) + out = ShrubService.generate_yaml(project) + # Dedent by two spaces to match what we use in config.yml + lines = [line[2:] for line in out.splitlines()] + print("\n".join(lines)) # noqa: T201 + + ############## # Variants ############## @@ -159,9 +194,63 @@ def create_ocsp_variants() -> list[BuildVariant]: return variants +def create_server_variants() -> list[BuildVariant]: + variants = [] + + # Run the full matrix on linux with min and max CPython, and latest pypy. + host = "rhel8" + for python, (auth, ssl) in product([*MIN_MAX_PYTHON, PYPYS[-1]], AUTH_SSLS): + display_name = f"Test {host}" + expansions = dict(AUTH=auth, SSL=ssl, COVERAGE="coverage") + display_name = get_display_name("Test", host, python=python, **expansions) + variant = create_variant( + [f".{t}" for t in TOPOLOGIES], + display_name, + python=python, + host=host, + tags=["coverage_tag"], + expansions=expansions, + ) + variants.append(variant) + + # Test the rest of the pythons on linux. + for python, (auth, ssl), topology in zip_cycle( + CPYTHONS[1:-1] + PYPYS[:-1], AUTH_SSLS, TOPOLOGIES + ): + display_name = f"Test {host}" + expansions = dict(AUTH=auth, SSL=ssl) + display_name = get_display_name("Test", host, python=python, **expansions) + variant = create_variant( + [f".{topology}"], + display_name, + python=python, + host=host, + expansions=expansions, + ) + variants.append(variant) + + # Test a subset on each of the other platforms. + for host in ("macos", "macos-arm64", "win64", "win32"): + for (python, (auth, ssl), topology), sync in product( + zip_cycle(MIN_MAX_PYTHON, AUTH_SSLS, TOPOLOGIES), SYNCS + ): + test_suite = "default" if sync == "sync" else "default_async" + expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite) + display_name = get_display_name("Test", host, python=python, **expansions) + variant = create_variant( + [f".{topology}"], + display_name, + python=python, + host=host, + expansions=expansions, + ) + variants.append(variant) + + return variants + + ################## # Generate Config ################## -project = EvgProject(tasks=None, buildvariants=create_ocsp_variants()) -print(ShrubService.generate_yaml(project)) # noqa: T201 +generate_yaml(variants=create_server_variants()) From 3855effbd844a4c48ca2d13f651ce6dd908b14a3 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 15 Oct 2024 15:16:42 -0400 Subject: [PATCH 1553/2111] PYTHON-4842 - Convert test.test_create_entities to async (#1919) --- test/asynchronous/test_create_entities.py | 128 ++++++++++++++++++++++ test/test_create_entities.py | 2 + tools/synchro.py | 1 + 3 files changed, 131 insertions(+) create mode 100644 test/asynchronous/test_create_entities.py diff --git a/test/asynchronous/test_create_entities.py b/test/asynchronous/test_create_entities.py new file mode 100644 index 0000000000..cb2ec63f4c --- /dev/null +++ b/test/asynchronous/test_create_entities.py @@ -0,0 +1,128 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest +from test.asynchronous.unified_format import UnifiedSpecTestMixinV1 + +_IS_SYNC = False + + +class TestCreateEntities(AsyncIntegrationTest): + async def test_store_events_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "blank", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], + } + }, + ], + "tests": [{"description": "foo", "operations": []}], + } + self.scenario_runner.TEST_SPEC = spec + await self.scenario_runner.asyncSetUp() + await self.scenario_runner.run_scenario(spec["tests"][0]) + await self.scenario_runner.entity_map["client0"].close() + final_entity_map = self.scenario_runner.entity_map + self.assertIn("events1", final_entity_map) + self.assertGreater(len(final_entity_map["events1"]), 0) + for event in final_entity_map["events1"]: + self.assertIn("PoolCreatedEvent", event["name"]) + + async def test_store_all_others_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "Find", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": {"retryReads": True}, + } + }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "dat", + } + }, + ], + "tests": [ + { + "description": "test loops", + "operations": [ + { + "name": "loop", + "object": "testRunner", + "arguments": { + "storeIterationsAsEntity": "iterations", + "storeSuccessesAsEntity": "successes", + "storeFailuresAsEntity": "failures", + "storeErrorsAsEntity": "errors", + "numIterations": 5, + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 2, "x": 44}}, + }, + ], + }, + } + ], + } + ], + } + + await self.client.dat.dat.delete_many({}) + self.scenario_runner.TEST_SPEC = spec + await self.scenario_runner.asyncSetUp() + await self.scenario_runner.run_scenario(spec["tests"][0]) + await self.scenario_runner.entity_map["client0"].close() + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_create_entities.py b/test/test_create_entities.py index b7965d4a1d..ad75fe5702 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -21,6 +21,8 @@ from test import IntegrationTest from test.unified_format import UnifiedSpecTestMixinV1 +_IS_SYNC = True + class TestCreateEntities(IntegrationTest): def test_store_events_as_entities(self): diff --git a/tools/synchro.py b/tools/synchro.py index 25f506ed5a..2123a66616 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -199,6 +199,7 @@ def async_only_test(f: str) -> bool: "test_common.py", "test_connection_logging.py", "test_connections_survive_primary_stepdown_spec.py", + "test_create_entities.py", "test_crud_unified.py", "test_cursor.py", "test_database.py", From fa263dc87dfe13f1c2de14ab86c67871ed3b24fb Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 15 Oct 2024 15:48:05 -0400 Subject: [PATCH 1554/2111] PYTHON-4847 - Convert test.test_collection_management.py to async (#1916) --- .../test_collection_management.py | 41 +++++++++++++++++++ test/asynchronous/unified_format.py | 2 +- test/test_collection_management.py | 12 +++++- test/unified_format.py | 2 +- tools/synchro.py | 1 + 5 files changed, 54 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_collection_management.py diff --git a/test/asynchronous/test_collection_management.py b/test/asynchronous/test_collection_management.py new file mode 100644 index 0000000000..c0edf91581 --- /dev/null +++ b/test/asynchronous/test_collection_management.py @@ -0,0 +1,41 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection management unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "collection_management") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "collection_management" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 42bda59cb2..8f32ac4a2e 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -773,7 +773,7 @@ async def _databaseOperation_listCollections(self, target, *args, **kwargs): if "batch_size" in kwargs: kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} cursor = await target.list_collections(*args, **kwargs) - return list(cursor) + return await cursor.to_list() async def _databaseOperation_createCollection(self, target, *args, **kwargs): # PYTHON-1936 Ignore the listCollections event from create_collection. diff --git a/test/test_collection_management.py b/test/test_collection_management.py index 0eacde1302..063c20df8f 100644 --- a/test/test_collection_management.py +++ b/test/test_collection_management.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -23,11 +24,18 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "collection_management") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "collection_management") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "collection_management" + ) # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index 13ab0af69b..be7fc1f8ad 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -769,7 +769,7 @@ def _databaseOperation_listCollections(self, target, *args, **kwargs): if "batch_size" in kwargs: kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} cursor = target.list_collections(*args, **kwargs) - return list(cursor) + return cursor.to_list() def _databaseOperation_createCollection(self, target, *args, **kwargs): # PYTHON-1936 Ignore the listCollections event from create_collection. diff --git a/tools/synchro.py b/tools/synchro.py index 2123a66616..0a7109c6d4 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -192,6 +192,7 @@ def async_only_test(f: str) -> bool: "test_client_context.py", "test_collation.py", "test_collection.py", + "test_collection_management.py", "test_command_logging.py", "test_command_logging.py", "test_command_monitoring.py", From 8034baec90043c1d3cf4dc17a5481a559743c524 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Tue, 15 Oct 2024 18:45:49 -0400 Subject: [PATCH 1555/2111] PYTHON-4834 Add __repr__ to IndexModel, SearchIndexModel (#1909) --- doc/changelog.rst | 2 ++ pymongo/operations.py | 13 +++++++ test/test_operations.py | 80 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 test/test_operations.py diff --git a/doc/changelog.rst b/doc/changelog.rst index 44d6fc9a57..3935fa3492 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -17,6 +17,8 @@ PyMongo 4.11 brings a number of changes including: - :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and :attr:`~pymongo.mongo_client.MongoClient.address` now correctly block when called on unconnected clients until either connection succeeds or a server selection timeout error is raised. +- Added :func:`repr` support to :class:`pymongo.operations.IndexModel`. +- Added :func:`repr` support to :class:`pymongo.operations.SearchIndexModel`. Issues Resolved ............... diff --git a/pymongo/operations.py b/pymongo/operations.py index d2e1feba69..384ffc94be 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -773,6 +773,13 @@ def document(self) -> dict[str, Any]: """ return self.__document + def __repr__(self) -> str: + return "{}({}{})".format( + self.__class__.__name__, + self.document["key"], + "".join([f", {key}={value!r}" for key, value in self.document.items() if key != "key"]), + ) + class SearchIndexModel: """Represents a search index to create.""" @@ -812,3 +819,9 @@ def __init__( def document(self) -> Mapping[str, Any]: """The document for this index.""" return self.__document + + def __repr__(self) -> str: + return "{}({})".format( + self.__class__.__name__, + ", ".join([f"{key}={value!r}" for key, value in self.document.items()]), + ) diff --git a/test/test_operations.py b/test/test_operations.py new file mode 100644 index 0000000000..3ee6677735 --- /dev/null +++ b/test/test_operations.py @@ -0,0 +1,80 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the operations module.""" +from __future__ import annotations + +from test import UnitTest, unittest + +from pymongo import ASCENDING, DESCENDING +from pymongo.collation import Collation +from pymongo.errors import OperationFailure +from pymongo.operations import IndexModel, SearchIndexModel + + +class TestOperationsBase(UnitTest): + """Base class for testing operations module.""" + + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + +class TestIndexModel(TestOperationsBase): + """Test IndexModel features.""" + + def test_repr(self): + # Based on examples in test_collection.py + self.assertRepr(IndexModel("hello")) + self.assertRepr(IndexModel([("hello", DESCENDING), ("world", ASCENDING)])) + self.assertRepr( + IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") + ) + # Test all the kwargs + self.assertRepr(IndexModel("name", name="name")) + self.assertRepr(IndexModel("unique", unique=False)) + self.assertRepr(IndexModel("background", background=True)) + self.assertRepr(IndexModel("sparse", sparse=True)) + self.assertRepr(IndexModel("bucketSize", bucketSize=1)) + self.assertRepr(IndexModel("min", min=1)) + self.assertRepr(IndexModel("max", max=1)) + self.assertRepr(IndexModel("expireAfterSeconds", expireAfterSeconds=1)) + self.assertRepr( + IndexModel("partialFilterExpression", partialFilterExpression={"hello": "world"}) + ) + self.assertRepr(IndexModel("collation", collation=Collation(locale="en_US"))) + self.assertRepr(IndexModel("wildcardProjection", wildcardProjection={"$**": 1})) + self.assertRepr(IndexModel("hidden", hidden=False)) + # Test string literal + self.assertEqual(repr(IndexModel("hello")), "IndexModel({'hello': 1}, name='hello_1')") + self.assertEqual( + repr(IndexModel({"hello": 1, "world": -1})), + "IndexModel({'hello': 1, 'world': -1}, name='hello_1_world_-1')", + ) + + +class TestSearchIndexModel(TestOperationsBase): + """Test SearchIndexModel features.""" + + def test_repr(self): + self.assertRepr(SearchIndexModel({"hello": "hello"}, key=1)) + self.assertEqual( + repr(SearchIndexModel({"hello": "hello"}, key=1)), + "SearchIndexModel(definition={'hello': 'hello'}, key=1)", + ) + + +if __name__ == "__main__": + unittest.main() From 463518bf8136264fbed34e3c3ddbc0a34d109156 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Wed, 16 Oct 2024 11:02:57 -0400 Subject: [PATCH 1556/2111] PYTHON-4765 Resync server-selection spec (#1935) --- .../operation-id.json | 4 +- .../server_selection_logging/replica-set.json | 2 +- test/server_selection_logging/sharded.json | 2 +- test/server_selection_logging/standalone.json | 930 +----------------- 4 files changed, 6 insertions(+), 932 deletions(-) diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json index ccc2623166..72ebff60d8 100644 --- a/test/server_selection_logging/operation-id.json +++ b/test/server_selection_logging/operation-id.json @@ -197,7 +197,7 @@ } }, { - "level": "debug", + "level": "info", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", @@ -383,7 +383,7 @@ } }, { - "level": "debug", + "level": "info", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json index 830b1ea51a..5eba784bf2 100644 --- a/test/server_selection_logging/replica-set.json +++ b/test/server_selection_logging/replica-set.json @@ -184,7 +184,7 @@ } }, { - "level": "debug", + "level": "info", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json index 346c050f9e..d42fba9100 100644 --- a/test/server_selection_logging/sharded.json +++ b/test/server_selection_logging/sharded.json @@ -193,7 +193,7 @@ } }, { - "level": "debug", + "level": "info", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json index 3152d0bbf3..3b3eddd841 100644 --- a/test/server_selection_logging/standalone.json +++ b/test/server_selection_logging/standalone.json @@ -47,29 +47,9 @@ } } ], - "initialData": [ - { - "collectionName": "server-selection", - "databaseName": "logging-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], "tests": [ { - "description": "A successful insert operation", + "description": "A successful operation", "operations": [ { "name": "waitForEvent", @@ -211,7 +191,7 @@ } }, { - "level": "debug", + "level": "info", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", @@ -250,912 +230,6 @@ ] } ] - }, - { - "description": "A successful find operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - } - } - } - - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful findAndModify operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - }, - "replacement": { - "x": 11 - } - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "findAndModify", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "findAndModify", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful find and getMore operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "batchSize": 3 - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "getMore", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "getMore", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful aggregate operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - } - ] - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "aggregate", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "aggregate", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful count operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "count", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "count", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful distinct operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "distinct", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "distinct", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "Successful collection management operations", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "foo" - } - }, - { - "name": "listCollections", - "object": "database" - }, - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "foo" - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "create", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "create", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "listCollections", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "listCollections", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "drop", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "drop", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "Successful index operations", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - }, - { - "name": "listIndexes", - "object": "collection" - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "createIndexes", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "createIndexes", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "listIndexes", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "listIndexes", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "dropIndexes", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "dropIndexes", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful update operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "update", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "update", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful delete operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - } - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "delete", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "delete", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] } ] } From d1375d4178822c376ce3beb0f5987dd7894a03aa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Oct 2024 13:41:35 -0500 Subject: [PATCH 1557/2111] PYTHON-4865 Skip test_write_concern_failure tests temporarily (#1936) --- test/asynchronous/test_bulk.py | 6 ++++++ test/test_bulk.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 42a3311072..c9ff167b43 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -971,6 +971,9 @@ async def cause_wtimeout(self, requests, ordered): @async_client_context.require_replica_set @async_client_context.require_secondaries_count(1) async def test_write_concern_failure_ordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) result = await coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) @@ -1051,6 +1054,9 @@ async def test_write_concern_failure_ordered(self): @async_client_context.require_replica_set @async_client_context.require_secondaries_count(1) async def test_write_concern_failure_unordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) result = await coll_ww.bulk_write( diff --git a/test/test_bulk.py b/test/test_bulk.py index 64fd48e8cd..ea2b803804 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -969,6 +969,9 @@ def cause_wtimeout(self, requests, ordered): @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_ordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) @@ -1049,6 +1052,9 @@ def test_write_concern_failure_ordered(self): @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_unordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})], ordered=False) From 29064f5b1d85cbea872a6c37023e3d5fa25b9a3d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 16 Oct 2024 12:15:48 -0700 Subject: [PATCH 1558/2111] PYTHON-4873 Remove bson-stdint-win32.h from THIRD-PARTY-NOTICES (#1937) --- THIRD-PARTY-NOTICES | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 0b9fc738ed..55b8ff7078 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -38,36 +38,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -2) License Notice for bson-stdint-win32.h ------------------------------------------ - -ISO C9x compliant stdint.h for Microsoft Visual Studio -Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 - - Copyright (c) 2006-2013 Alexander Chemeris - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the product nor the names of its contributors may - be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From 6f4258c1cdb95f6fe624a66760a66423048b6884 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Oct 2024 16:41:14 -0500 Subject: [PATCH 1559/2111] PYTHON-4576 Allow update to supply sort option (#1881) --- doc/changelog.rst | 4 + pymongo/asynchronous/bulk.py | 13 + pymongo/asynchronous/client_bulk.py | 9 + pymongo/asynchronous/collection.py | 25 + pymongo/operations.py | 44 +- pymongo/synchronous/bulk.py | 13 + pymongo/synchronous/client_bulk.py | 9 + pymongo/synchronous/collection.py | 25 + .../aggregate-write-readPreference.json | 69 --- .../unified/bulkWrite-replaceOne-sort.json | 239 ++++++++ .../unified/bulkWrite-updateOne-sort.json | 255 +++++++++ .../client-bulkWrite-partialResults.json | 540 ++++++++++++++++++ .../client-bulkWrite-replaceOne-sort.json | 162 ++++++ .../client-bulkWrite-updateOne-sort.json | 166 ++++++ .../db-aggregate-write-readPreference.json | 51 -- test/crud/unified/replaceOne-sort.json | 232 ++++++++ test/crud/unified/updateOne-sort.json | 240 ++++++++ test/utils.py | 4 - 18 files changed, 1967 insertions(+), 133 deletions(-) create mode 100644 test/crud/unified/bulkWrite-replaceOne-sort.json create mode 100644 test/crud/unified/bulkWrite-updateOne-sort.json create mode 100644 test/crud/unified/client-bulkWrite-partialResults.json create mode 100644 test/crud/unified/client-bulkWrite-replaceOne-sort.json create mode 100644 test/crud/unified/client-bulkWrite-updateOne-sort.json create mode 100644 test/crud/unified/replaceOne-sort.json create mode 100644 test/crud/unified/updateOne-sort.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 3935fa3492..4c1955d19d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -19,6 +19,10 @@ PyMongo 4.11 brings a number of changes including: until either connection succeeds or a server selection timeout error is raised. - Added :func:`repr` support to :class:`pymongo.operations.IndexModel`. - Added :func:`repr` support to :class:`pymongo.operations.SearchIndexModel`. +- Added ``sort`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.replace_one`, + :class:`~pymongo.operations.UpdateOne`, and + :class:`~pymongo.operations.UpdateMany`, Issues Resolved ............... diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index 9d33a990ed..e6cfe5b36e 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -109,6 +109,7 @@ def __init__( self.uses_array_filters = False self.uses_hint_update = False self.uses_hint_delete = False + self.uses_sort = False self.is_retryable = True self.retrying = False self.started_retryable_write = False @@ -144,6 +145,7 @@ def add_update( collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) @@ -159,6 +161,9 @@ def add_update( if hint is not None: self.uses_hint_update = True cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False @@ -171,6 +176,7 @@ def add_replace( upsert: bool = False, collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) @@ -181,6 +187,9 @@ def add_replace( if hint is not None: self.uses_hint_update = True cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort self.ops.append((_UPDATE, cmd)) def add_delete( @@ -699,6 +708,10 @@ async def execute_no_results( raise ConfigurationError( "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." ) + if unack and self.uses_sort and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: raise OperationFailure( diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index dc800c9549..96571c21eb 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -118,6 +118,7 @@ def __init__( self.uses_array_filters = False self.uses_hint_update = False self.uses_hint_delete = False + self.uses_sort = False self.is_retryable = self.client.options.retry_writes self.retrying = False @@ -148,6 +149,7 @@ def add_update( collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) @@ -169,6 +171,9 @@ def add_update( if collation is not None: self.uses_collation = True cmd["collation"] = collation + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False @@ -184,6 +189,7 @@ def add_replace( upsert: Optional[bool] = None, collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) @@ -202,6 +208,9 @@ def add_replace( if collation is not None: self.uses_collation = True cmd["collation"] = collation + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort self.ops.append(("replace", cmd)) self.namespaces.append(namespace) self.total_ops += 1 diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 4ddcbab4d2..9b73423627 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -993,6 +993,7 @@ async def _update( session: Optional[AsyncClientSession] = None, retryable_write: bool = False, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" @@ -1024,6 +1025,14 @@ async def _update( if not isinstance(hint, str): hint = helpers_shared._index_document(hint) update_doc["hint"] = hint + if sort is not None: + if not acknowledged and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + common.validate_is_mapping("sort", sort) + update_doc["sort"] = sort + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} if let is not None: common.validate_is_mapping("let", let) @@ -1079,6 +1088,7 @@ async def _update_retryable( hint: Optional[_IndexKeyHint] = None, session: Optional[AsyncClientSession] = None, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" @@ -1102,6 +1112,7 @@ async def _update( session=session, retryable_write=retryable_write, let=let, + sort=sort, comment=comment, ) @@ -1122,6 +1133,7 @@ async def replace_one( hint: Optional[_IndexKeyHint] = None, session: Optional[AsyncClientSession] = None, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: """Replace a single document matching the filter. @@ -1176,8 +1188,13 @@ async def replace_one( aggregate expression context (e.g. "$$var"). :param comment: A user-provided comment to attach to this command. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. :return: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.11 + Added ``sort`` parameter. .. versionchanged:: 4.1 Added ``let`` parameter. Added ``comment`` parameter. @@ -1209,6 +1226,7 @@ async def replace_one( hint=hint, session=session, let=let, + sort=sort, comment=comment, ), write_concern.acknowledged, @@ -1225,6 +1243,7 @@ async def update_one( hint: Optional[_IndexKeyHint] = None, session: Optional[AsyncClientSession] = None, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: """Update a single document matching the filter. @@ -1283,11 +1302,16 @@ async def update_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. :param comment: A user-provided comment to attach to this command. :return: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.11 + Added ``sort`` parameter. .. versionchanged:: 4.1 Added ``let`` parameter. Added ``comment`` parameter. @@ -1322,6 +1346,7 @@ async def update_one( hint=hint, session=session, let=let, + sort=sort, comment=comment, ), write_concern.acknowledged, diff --git a/pymongo/operations.py b/pymongo/operations.py index 384ffc94be..8905048c4e 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -325,6 +325,7 @@ class ReplaceOne(Generic[_DocumentType]): "_collation", "_hint", "_namespace", + "_sort", ) def __init__( @@ -335,6 +336,7 @@ def __init__( collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, namespace: Optional[str] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a ReplaceOne instance. @@ -353,8 +355,12 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. :param namespace: (optional) The namespace in which to replace a document. + .. versionchanged:: 4.10 + Added ``sort`` option. .. versionchanged:: 4.9 Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 @@ -371,6 +377,7 @@ def __init__( else: self._hint = hint + self._sort = sort self._filter = filter self._doc = replacement self._upsert = upsert @@ -385,6 +392,7 @@ def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: self._upsert, collation=validate_collation_or_none(self._collation), hint=self._hint, + sort=self._sort, ) def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: @@ -400,6 +408,7 @@ def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: self._upsert, collation=validate_collation_or_none(self._collation), hint=self._hint, + sort=self._sort, ) def __eq__(self, other: Any) -> bool: @@ -411,13 +420,15 @@ def __eq__(self, other: Any) -> bool: other._collation, other._hint, other._namespace, + other._sort, ) == ( self._filter, self._doc, self._upsert, self._collation, - other._hint, + self._hint, self._namespace, + self._sort, ) return NotImplemented @@ -426,7 +437,7 @@ def __ne__(self, other: Any) -> bool: def __repr__(self) -> str: if self._namespace: - return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -434,14 +445,16 @@ def __repr__(self) -> str: self._collation, self._hint, self._namespace, + self._sort, ) - return "{}({!r}, {!r}, {!r}, {!r}, {!r})".format( + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, self._upsert, self._collation, self._hint, + self._sort, ) @@ -456,6 +469,7 @@ class _UpdateOp: "_array_filters", "_hint", "_namespace", + "_sort", ) def __init__( @@ -467,6 +481,7 @@ def __init__( array_filters: Optional[list[Mapping[str, Any]]], hint: Optional[_IndexKeyHint], namespace: Optional[str], + sort: Optional[Mapping[str, Any]], ): if filter is not None: validate_is_mapping("filter", filter) @@ -478,13 +493,13 @@ def __init__( self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) else: self._hint = hint - self._filter = filter self._doc = doc self._upsert = upsert self._collation = collation self._array_filters = array_filters self._namespace = namespace + self._sort = sort def __eq__(self, other: object) -> bool: if isinstance(other, type(self)): @@ -496,6 +511,7 @@ def __eq__(self, other: object) -> bool: other._array_filters, other._hint, other._namespace, + other._sort, ) == ( self._filter, self._doc, @@ -504,6 +520,7 @@ def __eq__(self, other: object) -> bool: self._array_filters, self._hint, self._namespace, + self._sort, ) return NotImplemented @@ -512,7 +529,7 @@ def __ne__(self, other: Any) -> bool: def __repr__(self) -> str: if self._namespace: - return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -521,8 +538,9 @@ def __repr__(self) -> str: self._array_filters, self._hint, self._namespace, + self._sort, ) - return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -530,6 +548,7 @@ def __repr__(self) -> str: self._collation, self._array_filters, self._hint, + self._sort, ) @@ -547,6 +566,7 @@ def __init__( array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, namespace: Optional[str] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Represents an update_one operation. @@ -567,8 +587,12 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. - :param namespace: (optional) The namespace in which to update a document. + :param namespace: The namespace in which to update a document. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + .. versionchanged:: 4.10 + Added ``sort`` option. .. versionchanged:: 4.9 Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 @@ -580,7 +604,7 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super().__init__(filter, update, upsert, collation, array_filters, hint, namespace) + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace, sort) def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" @@ -592,6 +616,7 @@ def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, + sort=self._sort, ) def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: @@ -609,6 +634,7 @@ def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, + sort=self._sort, ) @@ -659,7 +685,7 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super().__init__(filter, update, upsert, collation, array_filters, hint, namespace) + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace, None) def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index c658157ea1..7fb29a977f 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -109,6 +109,7 @@ def __init__( self.uses_array_filters = False self.uses_hint_update = False self.uses_hint_delete = False + self.uses_sort = False self.is_retryable = True self.retrying = False self.started_retryable_write = False @@ -144,6 +145,7 @@ def add_update( collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) @@ -159,6 +161,9 @@ def add_update( if hint is not None: self.uses_hint_update = True cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False @@ -171,6 +176,7 @@ def add_replace( upsert: bool = False, collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) @@ -181,6 +187,9 @@ def add_replace( if hint is not None: self.uses_hint_update = True cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort self.ops.append((_UPDATE, cmd)) def add_delete( @@ -697,6 +706,10 @@ def execute_no_results( raise ConfigurationError( "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." ) + if unack and self.uses_sort and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) # Cannot have both unacknowledged writes and bypass document validation. if self.bypass_doc_val: raise OperationFailure( diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index f41f0203f2..2c38b1d76c 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -118,6 +118,7 @@ def __init__( self.uses_array_filters = False self.uses_hint_update = False self.uses_hint_delete = False + self.uses_sort = False self.is_retryable = self.client.options.retry_writes self.retrying = False @@ -148,6 +149,7 @@ def add_update( collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) @@ -169,6 +171,9 @@ def add_update( if collation is not None: self.uses_collation = True cmd["collation"] = collation + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False @@ -184,6 +189,7 @@ def add_replace( upsert: Optional[bool] = None, collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) @@ -202,6 +208,9 @@ def add_replace( if collation is not None: self.uses_collation = True cmd["collation"] = collation + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort self.ops.append(("replace", cmd)) self.namespaces.append(namespace) self.total_ops += 1 diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 6fd2ac82dd..6edfddc9a9 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -992,6 +992,7 @@ def _update( session: Optional[ClientSession] = None, retryable_write: bool = False, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" @@ -1023,6 +1024,14 @@ def _update( if not isinstance(hint, str): hint = helpers_shared._index_document(hint) update_doc["hint"] = hint + if sort is not None: + if not acknowledged and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + common.validate_is_mapping("sort", sort) + update_doc["sort"] = sort + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} if let is not None: common.validate_is_mapping("let", let) @@ -1078,6 +1087,7 @@ def _update_retryable( hint: Optional[_IndexKeyHint] = None, session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> Optional[Mapping[str, Any]]: """Internal update / replace helper.""" @@ -1101,6 +1111,7 @@ def _update( session=session, retryable_write=retryable_write, let=let, + sort=sort, comment=comment, ) @@ -1121,6 +1132,7 @@ def replace_one( hint: Optional[_IndexKeyHint] = None, session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: """Replace a single document matching the filter. @@ -1175,8 +1187,13 @@ def replace_one( aggregate expression context (e.g. "$$var"). :param comment: A user-provided comment to attach to this command. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. :return: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.11 + Added ``sort`` parameter. .. versionchanged:: 4.1 Added ``let`` parameter. Added ``comment`` parameter. @@ -1208,6 +1225,7 @@ def replace_one( hint=hint, session=session, let=let, + sort=sort, comment=comment, ), write_concern.acknowledged, @@ -1224,6 +1242,7 @@ def update_one( hint: Optional[_IndexKeyHint] = None, session: Optional[ClientSession] = None, let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, ) -> UpdateResult: """Update a single document matching the filter. @@ -1282,11 +1301,16 @@ def update_one( constant or closed expressions that do not reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. :param comment: A user-provided comment to attach to this command. :return: - An instance of :class:`~pymongo.results.UpdateResult`. + .. versionchanged:: 4.11 + Added ``sort`` parameter. .. versionchanged:: 4.1 Added ``let`` parameter. Added ``comment`` parameter. @@ -1321,6 +1345,7 @@ def update_one( hint=hint, session=session, let=let, + sort=sort, comment=comment, ), write_concern.acknowledged, diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json index bc887e83cb..c1fa3b4574 100644 --- a/test/crud/unified/aggregate-write-readPreference.json +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -78,11 +78,6 @@ "x": 33 } ] - }, - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [] } ], "tests": [ @@ -159,22 +154,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -250,22 +229,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -344,22 +307,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -438,22 +385,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] } ] diff --git a/test/crud/unified/bulkWrite-replaceOne-sort.json b/test/crud/unified/bulkWrite-replaceOne-sort.json new file mode 100644 index 0000000000..c0bd383514 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-sort.json @@ -0,0 +1,239 @@ +{ + "description": "BulkWrite replaceOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-sort.json b/test/crud/unified/bulkWrite-updateOne-sort.json new file mode 100644 index 0000000000..f78bd3bf3e --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-sort.json @@ -0,0 +1,255 @@ +{ + "description": "BulkWrite updateOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": [ + { + "$set": { + "x": 1 + } + } + ] + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": [ + { + "$set": { + "x": 1 + } + } + ], + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": [ + { + "$set": { + "x": 1 + } + } + ] + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": [ + { + "$set": { + "x": 1 + } + } + ], + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-partialResults.json b/test/crud/unified/client-bulkWrite-partialResults.json new file mode 100644 index 0000000000..b35e94a2ea --- /dev/null +++ b/test/crud/unified/client-bulkWrite-partialResults.json @@ -0,0 +1,540 @@ +{ + "description": "client bulkWrite partial results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "newDocument": { + "_id": 2, + "x": 22 + } + }, + "tests": [ + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is unset when all operations fail during an unordered bulk write", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "1": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-replaceOne-sort.json b/test/crud/unified/client-bulkWrite-replaceOne-sort.json new file mode 100644 index 0000000000..53218c1f48 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-replaceOne-sort.json @@ -0,0 +1,162 @@ +{ + "description": "client bulkWrite updateOne-sort", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne with sort option", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "updateMods": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "nErrors": 0, + "nMatched": 1, + "nModified": 1 + }, + "commandName": "bulkWrite" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-updateOne-sort.json b/test/crud/unified/client-bulkWrite-updateOne-sort.json new file mode 100644 index 0000000000..4a07b8b97c --- /dev/null +++ b/test/crud/unified/client-bulkWrite-updateOne-sort.json @@ -0,0 +1,166 @@ +{ + "description": "client bulkWrite updateOne-sort", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with sort option", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "nErrors": 0, + "nMatched": 1, + "nModified": 1 + }, + "commandName": "bulkWrite" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json index 2a81282de8..b6460f001f 100644 --- a/test/crud/unified/db-aggregate-write-readPreference.json +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -52,13 +52,6 @@ } } ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [] - } - ], "tests": [ { "description": "Database-level aggregate with $out includes read preference for 5.0+ server", @@ -141,17 +134,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -235,17 +217,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -332,17 +303,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -429,17 +389,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] } ] diff --git a/test/crud/unified/replaceOne-sort.json b/test/crud/unified/replaceOne-sort.json new file mode 100644 index 0000000000..cf2271dda5 --- /dev/null +++ b/test/crud/unified/replaceOne-sort.json @@ -0,0 +1,232 @@ +{ + "description": "replaceOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "replaceOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-sort.json b/test/crud/unified/updateOne-sort.json new file mode 100644 index 0000000000..8fe4f50b94 --- /dev/null +++ b/test/crud/unified/updateOne-sort.json @@ -0,0 +1,240 @@ +{ + "description": "updateOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "updateOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/utils.py b/test/utils.py index 4575a9fe10..3eac4fa509 100644 --- a/test/utils.py +++ b/test/utils.py @@ -958,10 +958,6 @@ def parse_spec_options(opts): def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): for arg_name in list(arguments): c2s = camel_to_snake(arg_name) - # PyMongo accepts sort as list of tuples. - if arg_name == "sort": - sort_dict = arguments[arg_name] - arguments[arg_name] = list(sort_dict.items()) # Named "key" instead not fieldName. if arg_name == "fieldName": arguments["key"] = arguments.pop(arg_name) From 8ce21bc1217f4c3c722a50d81c08ec33ca9d46dc Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Oct 2024 09:18:01 -0500 Subject: [PATCH 1560/2111] PYTHON-4872 Use shrub.py to generate encryption tasks (#1938) --- .evergreen/config.yml | 387 ++++++++++++++++++++------ .evergreen/scripts/generate_config.py | 88 +++++- 2 files changed, 371 insertions(+), 104 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c3427e66d0..54a1ff3368 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2322,32 +2322,6 @@ axes: variables: COVERAGE: "coverage" - # Run encryption tests? - - id: encryption - display_name: "Encryption" - values: - - id: "encryption" - display_name: "Encryption" - tags: ["encryption_tag"] - variables: - test_encryption: true - batchtime: 10080 # 7 days - - id: "encryption_pyopenssl" - display_name: "Encryption PyOpenSSL" - tags: ["encryption_tag"] - variables: - test_encryption: true - test_encryption_pyopenssl: true - batchtime: 10080 # 7 days - # The path to crypt_shared is stored in the $CRYPT_SHARED_LIB_PATH expansion. - - id: "encryption_crypt_shared" - display_name: "Encryption shared lib" - tags: ["encryption_tag"] - variables: - test_encryption: true - test_crypt_shared: true - batchtime: 10080 # 7 days - # Run pyopenssl tests? - id: pyopenssl display_name: "PyOpenSSL" @@ -2864,6 +2838,303 @@ buildvariants: PYTHON_BINARY: C:/python/32/Python39/python.exe SKIP_CSOT_TESTS: "true" +# Encryption tests. +- name: encryption-rhel8-py3.9-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption RHEL8 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [encryption_tag] +- name: encryption-rhel8-py3.13-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption RHEL8 py3.13 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [encryption_tag] +- name: encryption-rhel8-pypy3.10-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption RHEL8 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [encryption_tag] +- name: encryption-crypt_shared-rhel8-py3.9-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption crypt_shared RHEL8 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [encryption_tag] +- name: encryption-crypt_shared-rhel8-py3.13-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption crypt_shared RHEL8 py3.13 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [encryption_tag] +- name: encryption-crypt_shared-rhel8-pypy3.10-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption crypt_shared RHEL8 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [encryption_tag] +- name: encryption-pyopenssl-rhel8-py3.9-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption PyOpenSSL RHEL8 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [encryption_tag] +- name: encryption-pyopenssl-rhel8-py3.13-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption PyOpenSSL RHEL8 py3.13 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [encryption_tag] +- name: encryption-pyopenssl-rhel8-pypy3.10-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption PyOpenSSL RHEL8 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [encryption_tag] +- name: encryption-rhel8-py3.10-auth-ssl + tasks: + - name: .replica_set + display_name: Encryption RHEL8 py3.10 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: encryption-crypt_shared-rhel8-py3.11-auth-nossl + tasks: + - name: .replica_set + display_name: Encryption crypt_shared RHEL8 py3.11 Auth NoSSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: encryption-pyopenssl-rhel8-py3.12-auth-ssl + tasks: + - name: .replica_set + display_name: Encryption PyOpenSSL RHEL8 py3.12 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + TEST_ENCRYPTION_PYOPENSSL: "true" + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: encryption-rhel8-pypy3.9-auth-nossl + tasks: + - name: .replica_set + display_name: Encryption RHEL8 pypy3.9 Auth NoSSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: encryption-macos-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption macOS py3.9 Auth SSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + tags: [encryption_tag] +- name: encryption-macos-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption macOS py3.13 Auth NoSSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + tags: [encryption_tag] +- name: encryption-crypt_shared-macos-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared macOS py3.9 Auth SSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + tags: [encryption_tag] +- name: encryption-crypt_shared-macos-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared macOS py3.13 Auth NoSSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + tags: [encryption_tag] +- name: encryption-win64-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption Win64 py3.9 Auth SSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + tags: [encryption_tag] +- name: encryption-win64-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption Win64 py3.13 Auth NoSSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + tags: [encryption_tag] +- name: encryption-crypt_shared-win64-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared Win64 py3.9 Auth SSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + tags: [encryption_tag] +- name: encryption-crypt_shared-win64-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared Win64 py3.13 Auth NoSSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + tags: [encryption_tag] + - matrix_name: "tests-fips" matrix_spec: platform: @@ -2874,33 +3145,6 @@ buildvariants: tasks: - "test-fips-standalone" -- matrix_name: "test-macos-encryption" - matrix_spec: - platform: - - macos - auth: "auth" - ssl: "nossl" - encryption: "*" - display_name: "${encryption} ${platform} ${auth} ${ssl}" - tasks: "test-latest-replica_set" - rules: - - if: - encryption: ["encryption", "encryption_crypt_shared"] - platform: macos - auth: "auth" - ssl: "nossl" - then: - add_tasks: &encryption-server-versions - - ".rapid" - - ".latest" - - ".8.0" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - # Test one server version with zSeries, POWER8, and ARM. - matrix_name: "test-different-cpu-architectures" matrix_spec: @@ -2954,26 +3198,6 @@ buildvariants: tasks: - '.replica_set' -- matrix_name: "tests-python-version-rhel8-test-encryption" - matrix_spec: - platform: rhel8 - python-version: "*" - auth-ssl: noauth-nossl -# TODO: dependency error for 'coverage-report' task: -# dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config -# coverage: "*" - encryption: "*" - display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" - tasks: "test-latest-replica_set" - rules: - - if: - encryption: ["encryption", "encryption_crypt_shared"] - platform: rhel8 - auth-ssl: noauth-nossl - python-version: "*" - then: - add_tasks: *encryption-server-versions - - matrix_name: "tests-python-version-rhel8-without-c-extensions" matrix_spec: platform: rhel8 @@ -3057,23 +3281,6 @@ buildvariants: tasks: - ".5.0" -- matrix_name: "tests-windows-encryption" - matrix_spec: - platform: windows - python-version-windows: "*" - auth-ssl: "*" - encryption: "*" - display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" - tasks: "test-latest-replica_set" - rules: - - if: - encryption: ["encryption", "encryption_crypt_shared"] - platform: windows - python-version-windows: "*" - auth-ssl: "*" - then: - add_tasks: *encryption-server-versions - # Storage engine tests on RHEL 8.4 (x86_64) with Python 3.9. - matrix_name: "tests-storage-engines" matrix_spec: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 044303ad8f..dcd97b093e 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -45,18 +45,13 @@ class Host: name: str run_on: str display_name: str - expansions: dict[str, str] -_macos_expansions = dict( # CSOT tests are unreliable on slow hosts. - SKIP_CSOT_TESTS="true" -) - -HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) -HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", _macos_expansions) -HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", _macos_expansions) -HOSTS["macos"] = Host("macos", "macos-14", "macOS", _macos_expansions) -HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", _macos_expansions) +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8") +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64") +HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32") +HOSTS["macos"] = Host("macos", "macos-14", "macOS") +HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64") ############## @@ -84,7 +79,6 @@ def create_variant( expansions["PYTHON_BINARY"] = get_python_binary(python, host) if version: expansions["VERSION"] = version - expansions.update(HOSTS[host].expansions) expansions = expansions or None return BuildVariant( name=name, @@ -129,7 +123,7 @@ def get_display_name(base: str, host: str, **kwargs) -> str: elif key.lower() in DISPLAY_LOOKUP: name = DISPLAY_LOOKUP[key.lower()][value] else: - raise ValueError(f"Missing display handling for {key}") + continue display_name = f"{display_name} {name}" return display_name @@ -235,7 +229,7 @@ def create_server_variants() -> list[BuildVariant]: zip_cycle(MIN_MAX_PYTHON, AUTH_SSLS, TOPOLOGIES), SYNCS ): test_suite = "default" if sync == "sync" else "default_async" - expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite) + expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite, SKIP_CSOT_TESTS="true") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( [f".{topology}"], @@ -249,8 +243,74 @@ def create_server_variants() -> list[BuildVariant]: return variants +def create_encryption_variants() -> list[BuildVariant]: + variants = [] + tags = ["encryption_tag"] + batchtime = BATCHTIME_WEEK + + def get_encryption_expansions(encryption, ssl="ssl"): + expansions = dict(AUTH="auth", SSL=ssl, test_encryption="true") + if "crypt_shared" in encryption: + expansions["test_crypt_shared"] = "true" + if "PyOpenSSL" in encryption: + expansions["test_encryption_pyopenssl"] = "true" + return expansions + + host = "rhel8" + + # Test against all server versions and topolgies for the three main python versions. + encryptions = ["Encryption", "Encryption crypt_shared", "Encryption PyOpenSSL"] + for encryption, python in product(encryptions, [*MIN_MAX_PYTHON, PYPYS[-1]]): + expansions = get_encryption_expansions(encryption) + display_name = get_display_name(encryption, host, python=python, **expansions) + variant = create_variant( + [f".{t}" for t in TOPOLOGIES], + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + + # Test the rest of the pythons on linux for all server versions. + for encryption, python, ssl in zip_cycle( + encryptions, CPYTHONS[1:-1] + PYPYS[:-1], ["ssl", "nossl"] + ): + expansions = get_encryption_expansions(encryption, ssl) + display_name = get_display_name(encryption, host, python=python, **expansions) + variant = create_variant( + [".replica_set"], + display_name, + python=python, + host=host, + expansions=expansions, + ) + variants.append(variant) + + # Test on macos and linux on one server version and topology for min and max python. + encryptions = ["Encryption", "Encryption crypt_shared"] + task_names = [".latest .replica_set"] + for host, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): + ssl = "ssl" if python == CPYTHONS[0] else "nossl" + expansions = get_encryption_expansions(encryption, ssl) + display_name = get_display_name(encryption, host, python=python, **expansions) + variant = create_variant( + task_names, + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + return variants + + ################## # Generate Config ################## -generate_yaml(variants=create_server_variants()) +generate_yaml(variants=create_encryption_variants()) From a62ade864ddc07f1c0ee2782ef07ecfbf07fefd7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 17 Oct 2024 11:32:39 -0400 Subject: [PATCH 1561/2111] PYTHON-4874 - Add KMS support for async Windows (#1939) --- pymongo/network_layer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index d14a21f41d..7a325853c8 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -205,7 +205,7 @@ async def _async_sendall_ssl( total_sent += sent async def _async_receive_ssl( - conn: _sslConn, length: int, dummy: AbstractEventLoop + conn: _sslConn, length: int, dummy: AbstractEventLoop, once: Optional[bool] = False ) -> memoryview: mv = memoryview(bytearray(length)) total_read = 0 @@ -215,6 +215,9 @@ async def _async_receive_ssl( while total_read < length: try: read = conn.recv_into(mv[total_read:]) + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] if read == 0: raise OSError("connection closed") except BLOCKING_IO_ERRORS: From 79033bc0b9a6e404dc7680a03d856f12942ec720 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Oct 2024 10:33:44 -0500 Subject: [PATCH 1562/2111] Revert "PYTHON-4765 Resync server-selection spec" (#1940) --- .../operation-id.json | 4 +- .../server_selection_logging/replica-set.json | 2 +- test/server_selection_logging/sharded.json | 2 +- test/server_selection_logging/standalone.json | 930 +++++++++++++++++- 4 files changed, 932 insertions(+), 6 deletions(-) diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json index 72ebff60d8..ccc2623166 100644 --- a/test/server_selection_logging/operation-id.json +++ b/test/server_selection_logging/operation-id.json @@ -197,7 +197,7 @@ } }, { - "level": "info", + "level": "debug", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", @@ -383,7 +383,7 @@ } }, { - "level": "info", + "level": "debug", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json index 5eba784bf2..830b1ea51a 100644 --- a/test/server_selection_logging/replica-set.json +++ b/test/server_selection_logging/replica-set.json @@ -184,7 +184,7 @@ } }, { - "level": "info", + "level": "debug", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json index d42fba9100..346c050f9e 100644 --- a/test/server_selection_logging/sharded.json +++ b/test/server_selection_logging/sharded.json @@ -193,7 +193,7 @@ } }, { - "level": "info", + "level": "debug", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json index 3b3eddd841..3152d0bbf3 100644 --- a/test/server_selection_logging/standalone.json +++ b/test/server_selection_logging/standalone.json @@ -47,9 +47,29 @@ } } ], + "initialData": [ + { + "collectionName": "server-selection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], "tests": [ { - "description": "A successful operation", + "description": "A successful insert operation", "operations": [ { "name": "waitForEvent", @@ -191,7 +211,7 @@ } }, { - "level": "info", + "level": "debug", "component": "serverSelection", "data": { "message": "Waiting for suitable server to become available", @@ -230,6 +250,912 @@ ] } ] + }, + { + "description": "A successful find operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + } + } + } + + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful findAndModify operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + }, + "replacement": { + "x": 11 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "findAndModify", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "findAndModify", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful find and getMore operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "batchSize": 3 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "getMore", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "getMore", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful aggregate operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "aggregate", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "aggregate", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful count operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "count", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "count", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful distinct operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "distinct", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "distinct", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Successful collection management operations", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "foo" + } + }, + { + "name": "listCollections", + "object": "database" + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "foo" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "create", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "create", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "listCollections", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "listCollections", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "drop", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "drop", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Successful index operations", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "listIndexes", + "object": "collection" + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "createIndexes", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "createIndexes", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "listIndexes", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "listIndexes", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "dropIndexes", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "dropIndexes", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful update operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "update", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "update", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A successful delete operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "delete", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "delete", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] } ] } From 257aa2483be980005d4a54f97025baebcb3102f3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Oct 2024 11:01:47 -0500 Subject: [PATCH 1563/2111] PYTHON-4878 Use shrub.py for load balancer tests (#1941) --- .evergreen/config.yml | 218 +++++++++++++++++++++++--- .evergreen/scripts/generate_config.py | 38 ++++- 2 files changed, 230 insertions(+), 26 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 54a1ff3368..ae7c0a6590 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2354,16 +2354,6 @@ axes: variables: ORCHESTRATION_FILE: "versioned-api-testing.json" - # Run load balancer tests? - - id: loadbalancer - display_name: "Load Balancer" - values: - - id: "enabled" - display_name: "Load Balancer" - variables: - test_loadbalancer: true - batchtime: 10080 # 7 days - - id: serverless display_name: "Serverless" values: @@ -3580,6 +3570,203 @@ buildvariants: VERSION: "8.0" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +# Load balancer tests +- name: load-balancer-rhel8-v6.0-py3.9-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v6.0 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "6.0" + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: load-balancer-rhel8-v6.0-py3.10-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v6.0 py3.10 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "6.0" + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: load-balancer-rhel8-v6.0-py3.11-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v6.0 py3.11 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "6.0" + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: load-balancer-rhel8-v7.0-py3.12-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v7.0 py3.12 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "7.0" + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: load-balancer-rhel8-v7.0-py3.13-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v7.0 py3.13 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "7.0" + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: load-balancer-rhel8-v7.0-pypy3.9-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v7.0 pypy3.9 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "7.0" + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: load-balancer-rhel8-v8.0-pypy3.10-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v8.0 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "8.0" + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +- name: load-balancer-rhel8-v8.0-py3.9-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v8.0 py3.9 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "8.0" + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: load-balancer-rhel8-v8.0-py3.10-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v8.0 py3.10 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "8.0" + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: load-balancer-rhel8-latest-py3.11-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 latest py3.11 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: latest + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: load-balancer-rhel8-latest-py3.12-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 latest py3.12 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: latest + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: load-balancer-rhel8-latest-py3.13-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 latest py3.13 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: latest + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: load-balancer-rhel8-rapid-pypy3.9-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 rapid pypy3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: rapid + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: load-balancer-rhel8-rapid-pypy3.10-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 rapid pypy3.10 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: rapid + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +- name: load-balancer-rhel8-rapid-py3.9-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 rapid py3.9 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: rapid + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - matrix_name: "oidc-auth-test" matrix_spec: platform: [ rhel8, macos, windows ] @@ -3643,17 +3830,6 @@ buildvariants: - name: "aws-auth-test-rapid" - name: "aws-auth-test-latest" -- matrix_name: "load-balancer" - matrix_spec: - platform: rhel8 - mongodb-version: ["6.0", "7.0", "8.0", "rapid", "latest"] - auth-ssl: "*" - python-version: "*" - loadbalancer: "*" - display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" - tasks: - - name: "load-balancer-test" - - name: testgcpkms-variant display_name: "GCP KMS" run_on: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index dcd97b093e..03b900301c 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -112,12 +112,14 @@ def get_python_binary(python: str, host: str) -> str: def get_display_name(base: str, host: str, **kwargs) -> str: """Get the display name of a variant.""" display_name = f"{base} {HOSTS[host].display_name}" + version = kwargs.pop("VERSION", None) + if version: + if version not in ["rapid", "latest"]: + version = f"v{version}" + display_name = f"{display_name} {version}" for key, value in kwargs.items(): name = value - if key == "version": - if value not in ["rapid", "latest"]: - name = f"v{value}" - elif key == "python": + if key.lower() == "python": if not value.startswith("pypy"): name = f"py{value}" elif key.lower() in DISPLAY_LOOKUP: @@ -309,8 +311,34 @@ def get_encryption_expansions(encryption, ssl="ssl"): return variants +def create_load_balancer_variants(): + # Load balancer tests - run all supported versions for all combinations of auth and ssl and system python. + host = "rhel8" + task_names = ["load-balancer-test"] + batchtime = BATCHTIME_WEEK + expansions_base = dict(test_loadbalancer="true") + versions = ["6.0", "7.0", "8.0", "latest", "rapid"] + variants = [] + pythons = CPYTHONS + PYPYS + for ind, (version, (auth, ssl)) in enumerate(product(versions, AUTH_SSLS)): + expansions = dict(VERSION=version, AUTH=auth, SSL=ssl) + expansions.update(expansions_base) + python = pythons[ind % len(pythons)] + display_name = get_display_name("Load Balancer", host, python=python, **expansions) + variant = create_variant( + task_names, + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + ) + variants.append(variant) + return variants + + ################## # Generate Config ################## -generate_yaml(variants=create_encryption_variants()) +generate_yaml(variants=create_load_balancer_variants()) From 317a539415a91a4057c9104a2192d05fb47af2e1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Oct 2024 15:01:24 -0500 Subject: [PATCH 1564/2111] PYTHON-4879 Use shrub.py for compressor tests (#1944) --- .evergreen/config.yml | 135 ++++++++++++++++---------- .evergreen/scripts/generate_config.py | 51 +++++++++- 2 files changed, 136 insertions(+), 50 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ae7c0a6590..a7efc223b9 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2112,23 +2112,6 @@ axes: AUTH: "noauth" SSL: "nossl" - # Choice of wire protocol compression support - - id: compression - display_name: Compression - values: - - id: snappy - display_name: snappy compression - variables: - COMPRESSORS: "snappy" - - id: zlib - display_name: zlib compression - variables: - COMPRESSORS: "zlib" - - id: zstd - display_name: zstd compression - variables: - COMPRESSORS: "zstd" - # Choice of MongoDB server version - id: mongodb-version display_name: "MongoDB" @@ -3125,6 +3108,92 @@ buildvariants: PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] +# Compressor tests. +- name: snappy-compression-rhel8-py3.9-no-c + tasks: + - name: .standalone + display_name: snappy compression RHEL8 py3.9 No C + run_on: + - rhel87-small + expansions: + COMPRESSORS: snappy + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: snappy-compression-rhel8-py3.10 + tasks: + - name: .standalone + display_name: snappy compression RHEL8 py3.10 + run_on: + - rhel87-small + expansions: + COMPRESSORS: snappy + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: zlib-compression-rhel8-py3.11-no-c + tasks: + - name: .standalone + display_name: zlib compression RHEL8 py3.11 No C + run_on: + - rhel87-small + expansions: + COMPRESSORS: zlib + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: zlib-compression-rhel8-py3.12 + tasks: + - name: .standalone + display_name: zlib compression RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zlib + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: zstd-compression-rhel8-py3.13-no-c + tasks: + - name: .standalone !.4.0 + display_name: zstd compression RHEL8 py3.13 No C + run_on: + - rhel87-small + expansions: + COMPRESSORS: zstd + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: zstd-compression-rhel8-py3.9 + tasks: + - name: .standalone !.4.0 + display_name: zstd compression RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zstd + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: snappy-compression-rhel8-pypy3.9 + tasks: + - name: .standalone + display_name: snappy compression RHEL8 pypy3.9 + run_on: + - rhel87-small + expansions: + COMPRESSORS: snappy + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: zlib-compression-rhel8-pypy3.10 + tasks: + - name: .standalone + display_name: zlib compression RHEL8 pypy3.10 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zlib + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +- name: zstd-compression-rhel8-pypy3.9 + tasks: + - name: .standalone !.4.0 + display_name: zstd compression RHEL8 pypy3.9 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zstd + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3214,38 +3283,6 @@ buildvariants: - ".4.2" - ".4.0" -- matrix_name: "tests-python-version-rhel8-compression" - matrix_spec: - platform: rhel8 - python-version: "*" - c-extensions: "*" - compression: "*" - exclude_spec: - # These interpreters are always tested without extensions. - - platform: rhel8 - python-version: ["pypy3.9", "pypy3.10"] - c-extensions: "with-c-extensions" - compression: "*" - display_name: "${compression} ${c-extensions} ${python-version} ${platform}" - tasks: - - "test-latest-standalone" - - "test-8.0-standalone" - - "test-7.0-standalone" - - "test-6.0-standalone" - - "test-5.0-standalone" - - "test-4.4-standalone" - - "test-4.2-standalone" - - "test-4.0-standalone" - rules: - # Server version 4.0 supports snappy and zlib but not zstd. - - if: - python-version: "*" - c-extensions: "*" - compression: ["zstd"] - then: - remove_tasks: - - "test-4.0-standalone" - - matrix_name: "tests-python-version-green-framework-rhel8" matrix_spec: platform: rhel8 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 03b900301c..91dedeb620 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -30,12 +30,14 @@ BATCHTIME_WEEK = 10080 AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] +C_EXTS = ["with_ext", "without_ext"] SYNCS = ["sync", "async"] DISPLAY_LOOKUP = dict( ssl=dict(ssl="SSL", nossl="NoSSL"), auth=dict(auth="Auth", noauth="NoAuth"), test_suites=dict(default="Sync", default_async="Async"), coverage=dict(coverage="cov"), + no_ext={"1": "No C"}, ) HOSTS = dict() @@ -137,6 +139,12 @@ def zip_cycle(*iterables, empty_default=None): yield tuple(next(i, empty_default) for i in cycles) +def handle_c_ext(c_ext, expansions): + """Handle c extension option.""" + if c_ext == C_EXTS[0]: + expansions["NO_EXT"] = "1" + + def generate_yaml(tasks=None, variants=None): """Generate the yaml for a given set of tasks and variants.""" project = EvgProject(tasks=tasks, buildvariants=variants) @@ -337,8 +345,49 @@ def create_load_balancer_variants(): return variants +def create_compression_variants(): + # Compression tests - standalone versions of each server, across python versions, with and without c extensions. + # PyPy interpreters are always tested without extensions. + host = "rhel8" + task_names = dict(snappy=[".standalone"], zlib=[".standalone"], zstd=[".standalone !.4.0"]) + variants = [] + for ind, (compressor, c_ext) in enumerate(product(["snappy", "zlib", "zstd"], C_EXTS)): + expansions = dict(COMPRESSORS=compressor) + handle_c_ext(c_ext, expansions) + base_name = f"{compressor} compression" + python = CPYTHONS[ind % len(CPYTHONS)] + display_name = get_display_name(base_name, host, python=python, **expansions) + variant = create_variant( + task_names[compressor], + display_name, + python=python, + host=host, + expansions=expansions, + ) + variants.append(variant) + + other_pythons = PYPYS + CPYTHONS[ind:] + for compressor, python in zip_cycle(["snappy", "zlib", "zstd"], other_pythons): + expansions = dict(COMPRESSORS=compressor) + handle_c_ext(c_ext, expansions) + base_name = f"{compressor} compression" + display_name = get_display_name(base_name, host, python=python, **expansions) + variant = create_variant( + task_names[compressor], + display_name, + python=python, + host=host, + expansions=expansions, + ) + variants.append(variant) + + return variants + + ################## # Generate Config ################## -generate_yaml(variants=create_load_balancer_variants()) +variants = create_compression_variants() +# print(len(variants)) +generate_yaml(variants=variants) From 7e904b3c31d3242ef9e202438e50cbecf611af75 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 17 Oct 2024 13:11:20 -0700 Subject: [PATCH 1565/2111] PYTHON-4874 Fix async Windows KMS support (#1942) --- pymongo/network_layer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 7a325853c8..aa16e85a07 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -215,11 +215,11 @@ async def _async_receive_ssl( while total_read < length: try: read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") # KMS responses update their expected size after the first batch, stop reading after one loop if once: return mv[:read] - if read == 0: - raise OSError("connection closed") except BLOCKING_IO_ERRORS: await asyncio.sleep(backoff) read = 0 From 335b728f070a350239123a755856a1a3d1d51746 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 20:27:27 -0500 Subject: [PATCH 1566/2111] Bump pyright from 1.1.383 to 1.1.384 (#1922) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jib --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 06c33c6db6..2c23212da7 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.11.2 -pyright==1.1.383 +pyright==1.1.384 typing_extensions -r ./encryption.txt -r ./ocsp.txt From 021a9f75243b466d2a333c991ad4d9eb52d8275e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Oct 2024 08:57:20 -0500 Subject: [PATCH 1567/2111] PYTHON-4882 Use shrub.py for enterprise auth tests (#1945) --- .evergreen/config.yml | 83 +++++++++++++++++++++------ .evergreen/run-tests.sh | 2 + .evergreen/scripts/generate_config.py | 23 +++++++- pyproject.toml | 1 + test/asynchronous/test_auth.py | 4 ++ test/test_auth.py | 4 ++ 6 files changed, 98 insertions(+), 19 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a7efc223b9..9e2ab77088 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -3194,6 +3194,71 @@ buildvariants: COMPRESSORS: zstd PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +# Enterprise auth tests. +- name: enterprise-auth-macos-py3.9-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth macOS py3.9 Auth + run_on: + - macos-14 + expansions: + AUTH: auth + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: enterprise-auth-rhel8-py3.10-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 py3.10 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: enterprise-auth-rhel8-py3.11-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 py3.11 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: enterprise-auth-rhel8-py3.12-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 py3.12 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: enterprise-auth-win64-py3.13-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth Win64 py3.13 Auth + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + PYTHON_BINARY: C:/python/Python313/python.exe +- name: enterprise-auth-rhel8-pypy3.9-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 pypy3.9 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: enterprise-auth-rhel8-pypy3.10-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 pypy3.10 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3350,24 +3415,6 @@ buildvariants: tasks: - ".latest" -- matrix_name: "test-linux-enterprise-auth" - matrix_spec: - platform: rhel8 - python-version: "*" - auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version}" - tasks: - - name: "test-enterprise-auth" - -- matrix_name: "tests-windows-enterprise-auth" - matrix_spec: - platform: windows - python-version-windows: "*" - auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version-windows}" - tasks: - - name: "test-enterprise-auth" - - matrix_name: "test-search-index-helpers" matrix_spec: platform: rhel8 diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 364570999f..36fa76e317 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -90,6 +90,8 @@ if [ -n "$TEST_ENTERPRISE_AUTH" ]; then export GSSAPI_HOST=${SASL_HOST} export GSSAPI_PORT=${SASL_PORT} export GSSAPI_PRINCIPAL=${PRINCIPAL} + + export TEST_SUITES="auth" fi if [ -n "$TEST_LOADBALANCER" ]; then diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 91dedeb620..c3bfeef7af 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -384,10 +384,31 @@ def create_compression_variants(): return variants +def create_enterprise_auth_variants(): + expansions = dict(AUTH="auth") + variants = [] + + # All python versions across platforms. + for python in ALL_PYTHONS: + if python == CPYTHONS[0]: + host = "macos" + elif python == CPYTHONS[-1]: + host = "win64" + else: + host = "rhel8" + display_name = get_display_name("Enterprise Auth", host, python=python, **expansions) + variant = create_variant( + ["test-enterprise-auth"], display_name, host=host, python=python, expansions=expansions + ) + variants.append(variant) + + return variants + + ################## # Generate Config ################## -variants = create_compression_variants() +variants = create_enterprise_auth_variants() # print(len(variants)) generate_yaml(variants=variants) diff --git a/pyproject.toml b/pyproject.toml index b4f59f67d5..9a29a777fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -99,6 +99,7 @@ filterwarnings = [ markers = [ "auth_aws: tests that rely on pymongo-auth-aws", "auth_oidc: tests that rely on oidc auth", + "auth: tests that rely on authentication", "ocsp: tests that rely on ocsp", "atlas: tests that rely on atlas", "data_lake: tests that rely on atlas data lake", diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index fbaca41f09..9262714374 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -32,6 +32,8 @@ ) from test.utils import AllowListEventListener, delay, ignore_deprecations +import pytest + from pymongo import AsyncMongoClient, monitoring from pymongo.asynchronous.auth import HAVE_KERBEROS from pymongo.auth_shared import _build_credentials_tuple @@ -42,6 +44,8 @@ _IS_SYNC = False +pytestmark = pytest.mark.auth + # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. GSSAPI_HOST = os.environ.get("GSSAPI_HOST") GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) diff --git a/test/test_auth.py b/test/test_auth.py index b311d330bc..310006afff 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -32,6 +32,8 @@ ) from test.utils import AllowListEventListener, delay, ignore_deprecations +import pytest + from pymongo import MongoClient, monitoring from pymongo.auth_shared import _build_credentials_tuple from pymongo.errors import OperationFailure @@ -42,6 +44,8 @@ _IS_SYNC = True +pytestmark = pytest.mark.auth + # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. GSSAPI_HOST = os.environ.get("GSSAPI_HOST") GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) From 6a7e83dc95319c445b95ab1f54f4aa8435986cc4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Oct 2024 10:36:05 -0500 Subject: [PATCH 1568/2111] PYTHON-4887 Do not test macos arm64 on server versions < 6.0 (#1947) --- .evergreen/config.yml | 48 ++++++++++++++++++++------- .evergreen/scripts/generate_config.py | 9 +++-- 2 files changed, 43 insertions(+), 14 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9e2ab77088..705880be60 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2592,7 +2592,11 @@ buildvariants: # Server tests for macOS Arm64. - name: test-macos-arm64-py3.9-auth-ssl-sync tasks: - - name: .standalone + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest display_name: Test macOS Arm64 py3.9 Auth SSL Sync run_on: - macos-14-arm64 @@ -2600,11 +2604,15 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-arm64-py3.9-auth-ssl-async tasks: - - name: .standalone + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest display_name: Test macOS Arm64 py3.9 Auth SSL Async run_on: - macos-14-arm64 @@ -2612,11 +2620,15 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default_async - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-arm64-py3.13-noauth-ssl-sync tasks: - - name: .replica_set + - name: .replica_set .6.0 + - name: .replica_set .7.0 + - name: .replica_set .8.0 + - name: .replica_set .rapid + - name: .replica_set .latest display_name: Test macOS Arm64 py3.13 NoAuth SSL Sync run_on: - macos-14-arm64 @@ -2624,11 +2636,15 @@ buildvariants: AUTH: noauth SSL: ssl TEST_SUITES: default - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-macos-arm64-py3.13-noauth-ssl-async tasks: - - name: .replica_set + - name: .replica_set .6.0 + - name: .replica_set .7.0 + - name: .replica_set .8.0 + - name: .replica_set .rapid + - name: .replica_set .latest display_name: Test macOS Arm64 py3.13 NoAuth SSL Async run_on: - macos-14-arm64 @@ -2636,11 +2652,15 @@ buildvariants: AUTH: noauth SSL: ssl TEST_SUITES: default_async - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-macos-arm64-py3.9-noauth-nossl-sync tasks: - - name: .sharded_cluster + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync run_on: - macos-14-arm64 @@ -2648,11 +2668,15 @@ buildvariants: AUTH: noauth SSL: nossl TEST_SUITES: default - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-arm64-py3.9-noauth-nossl-async tasks: - - name: .sharded_cluster + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async run_on: - macos-14-arm64 @@ -2660,8 +2684,8 @@ buildvariants: AUTH: noauth SSL: nossl TEST_SUITES: default_async - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 # Server tests for Windows. - name: test-win64-py3.9-auth-ssl-sync diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index c3bfeef7af..a3ec798c3b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -23,6 +23,7 @@ ############## ALL_VERSIONS = ["4.0", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +VERSIONS_6_0_PLUS = ["6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] PYPYS = ["pypy3.9", "pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS @@ -239,10 +240,14 @@ def create_server_variants() -> list[BuildVariant]: zip_cycle(MIN_MAX_PYTHON, AUTH_SSLS, TOPOLOGIES), SYNCS ): test_suite = "default" if sync == "sync" else "default_async" + tasks = [f".{topology}"] + # MacOS arm64 only works on server versions 6.0+ + if host == "macos-arm64": + tasks = [f".{topology} .{version}" for version in VERSIONS_6_0_PLUS] expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite, SKIP_CSOT_TESTS="true") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( - [f".{topology}"], + tasks, display_name, python=python, host=host, @@ -409,6 +414,6 @@ def create_enterprise_auth_variants(): # Generate Config ################## -variants = create_enterprise_auth_variants() +variants = create_server_variants() # print(len(variants)) generate_yaml(variants=variants) From 1ae0c3904c9a71bed1f7d8f81183b59070845a81 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Oct 2024 10:58:28 -0500 Subject: [PATCH 1569/2111] PYTHON-4886 Use shrub.py for PyOpenSSL tests (#1946) --- .evergreen/config.yml | 144 +++++++++++++++++--------- .evergreen/scripts/generate_config.py | 34 +++++- 2 files changed, 126 insertions(+), 52 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 705880be60..9083da145b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2305,16 +2305,6 @@ axes: variables: COVERAGE: "coverage" - # Run pyopenssl tests? - - id: pyopenssl - display_name: "PyOpenSSL" - values: - - id: "enabled" - display_name: "PyOpenSSL" - variables: - test_pyopenssl: true - batchtime: 10080 # 7 days - - id: versionedApi display_name: "versionedApi" values: @@ -3283,6 +3273,99 @@ buildvariants: AUTH: auth PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +# PyOpenSSL tests. +- name: pyopenssl-macos-py3.9 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL macOS py3.9 + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: noauth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: pyopenssl-rhel8-py3.10 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 py3.10 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: pyopenssl-rhel8-py3.11 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 py3.11 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: pyopenssl-rhel8-py3.12 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 py3.12 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: pyopenssl-win64-py3.13 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL Win64 py3.13 + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: C:/python/Python313/python.exe +- name: pyopenssl-rhel8-pypy3.9 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 pypy3.9 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 +- name: pyopenssl-rhel8-pypy3.10 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 pypy3.10 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3305,47 +3388,6 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-pyopenssl" - matrix_spec: - platform: rhel8 - python-version: "*" - auth: "*" - ssl: "ssl" - pyopenssl: "*" - # Only test "noauth" with Python 3.9. - exclude_spec: - platform: rhel8 - python-version: ["3.10", "3.11", "3.12", "3.13", "pypy3.9", "pypy3.10"] - auth: "noauth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" - tasks: - - '.replica_set' - # Test standalone and sharded only on 7.0. - - '.7.0' - -- matrix_name: "tests-pyopenssl-macOS" - matrix_spec: - platform: macos - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${auth}" - tasks: - - '.replica_set' - -- matrix_name: "tests-pyopenssl-windows" - matrix_spec: - platform: windows - python-version-windows: "*" - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${python-version-windows} ${auth}" - tasks: - - '.replica_set' - - matrix_name: "tests-python-version-rhel8-without-c-extensions" matrix_spec: platform: rhel8 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index a3ec798c3b..6d614a9afe 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -410,10 +410,42 @@ def create_enterprise_auth_variants(): return variants +def create_pyopenssl_variants(): + base_name = "PyOpenSSL" + batchtime = BATCHTIME_WEEK + base_expansions = dict(test_pyopenssl="true", SSL="ssl") + variants = [] + + for python in ALL_PYTHONS: + # Only test "noauth" with min python. + auth = "noauth" if python == CPYTHONS[0] else "auth" + if python == CPYTHONS[0]: + host = "macos" + elif python == CPYTHONS[-1]: + host = "win64" + else: + host = "rhel8" + expansions = dict(AUTH=auth) + expansions.update(base_expansions) + + display_name = get_display_name(base_name, host, python=python) + variant = create_variant( + [".replica_set", ".7.0"], + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + ) + variants.append(variant) + + return variants + + ################## # Generate Config ################## -variants = create_server_variants() +variants = create_pyopenssl_variants() # print(len(variants)) generate_yaml(variants=variants) From a1ade45dd3b67a6e4baa50404b9807f68062f043 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 18 Oct 2024 13:32:09 -0400 Subject: [PATCH 1570/2111] PYTHON-4881 - Use OvertCommandListener wherever sensitive events are not needed (#1943) Co-authored-by: Steven Silvester --- test/asynchronous/test_change_stream.py | 5 +++-- test/asynchronous/test_collation.py | 4 ++-- test/asynchronous/test_collection.py | 3 ++- test/asynchronous/test_cursor.py | 4 ++-- test/asynchronous/test_grid_file.py | 4 ++-- test/asynchronous/test_monitoring.py | 21 ++++++++++++--------- test/asynchronous/test_session.py | 3 ++- test/auth_oidc/test_auth_oidc.py | 6 +++--- test/test_change_stream.py | 5 +++-- test/test_collation.py | 4 ++-- test/test_collection.py | 3 ++- test/test_cursor.py | 4 ++-- test/test_grid_file.py | 4 ++-- test/test_index_management.py | 4 ++-- test/test_monitoring.py | 21 ++++++++++++--------- test/test_read_write_concern_spec.py | 6 +++--- test/test_server_selection.py | 3 ++- test/test_session.py | 3 ++- test/test_ssl.py | 1 + 19 files changed, 61 insertions(+), 47 deletions(-) diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 883ed72c4c..98641f46ee 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -39,6 +39,7 @@ from test.utils import ( AllowListEventListener, EventListener, + OvertCommandListener, async_wait_until, ) @@ -179,7 +180,7 @@ async def _wait_until(): @no_type_check async def test_try_next_runs_one_getmore(self): - listener = EventListener() + listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. await client.admin.command("ping") @@ -237,7 +238,7 @@ async def _wait_until(): @no_type_check async def test_batch_size_is_honored(self): - listener = EventListener() + listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. await client.admin.command("ping") diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py index be3ea22e42..d95f4c9917 100644 --- a/test/asynchronous/test_collation.py +++ b/test/asynchronous/test_collation.py @@ -18,7 +18,7 @@ import functools import warnings from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import EventListener +from test.utils import EventListener, OvertCommandListener from typing import Any from pymongo.asynchronous.helpers import anext @@ -101,7 +101,7 @@ class TestCollation(AsyncIntegrationTest): @async_client_context.require_connection async def _setup_class(cls): await super()._setup_class() - cls.listener = EventListener() + cls.listener = OvertCommandListener() cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test cls.collation = Collation("en_US") diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 612090b69f..db52bad4ac 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -36,6 +36,7 @@ from test.utils import ( IMPOSSIBLE_WRITE_CONCERN, EventListener, + OvertCommandListener, async_get_pool, async_is_mongos, async_wait_until, @@ -2116,7 +2117,7 @@ async def test_find_one_and(self): self.assertEqual(4, (await c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) async def test_find_one_and_write_concern(self): - listener = EventListener() + listener = OvertCommandListener() db = (await self.async_single_client(event_listeners=[listener]))[self.db.name] # non-default WriteConcern. c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index ee0a757ed3..787da3d957 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1601,7 +1601,7 @@ async def test_read_concern(self): await anext(c.find_raw_batches()) async def test_monitoring(self): - listener = EventListener() + listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test await c.drop() @@ -1768,7 +1768,7 @@ async def test_collation(self): await anext(await self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) async def test_monitoring(self): - listener = EventListener() + listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test await c.drop() diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index 9c57c15c5a..54fcd3abf6 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] -from test.utils import EventListener +from test.utils import OvertCommandListener from bson.objectid import ObjectId from gridfs.asynchronous.grid_file import ( @@ -810,7 +810,7 @@ async def test_survive_cursor_not_found(self): # Use 102 batches to cause a single getMore. chunk_size = 1024 data = b"d" * (102 * chunk_size) - listener = EventListener() + listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test async with AsyncGridIn(db.fs, chunk_size=chunk_size) as infile: diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index b5d8708dc3..b0c86ab54e 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -31,6 +31,7 @@ ) from test.utils import ( EventListener, + OvertCommandListener, async_wait_until, ) @@ -54,7 +55,7 @@ class AsyncTestCommandMonitoring(AsyncIntegrationTest): @async_client_context.require_connection async def _setup_class(cls): await super()._setup_class() - cls.listener = EventListener() + cls.listener = OvertCommandListener() cls.client = await cls.unmanaged_async_rs_or_single_client( event_listeners=[cls.listener], retryWrites=False ) @@ -1100,11 +1101,13 @@ async def test_first_batch_helper(self): @async_client_context.require_version_max(6, 1, 99) async def test_sensitive_commands(self): - listeners = self.client._event_listeners + listener = EventListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + listeners = client._event_listeners - self.listener.reset() + listener.reset() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start(cmd, "pymongo_test", 12345, await self.client.address, None) # type: ignore[arg-type] + listeners.publish_command_start(cmd, "pymongo_test", 12345, await client.address, None) # type: ignore[arg-type] delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( delta, @@ -1115,15 +1118,15 @@ async def test_sensitive_commands(self): None, database_name="pymongo_test", ) - started = self.listener.started_events[0] - succeeded = self.listener.succeeded_events[0] - self.assertEqual(0, len(self.listener.failed_events)) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getnonce", started.command_name) self.assertIsInstance(started.request_id, int) - self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual(await client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) self.assertEqual(succeeded.duration_micros, 100000) self.assertEqual(started.command_name, succeeded.command_name) @@ -1140,7 +1143,7 @@ class AsyncTestGlobalListener(AsyncIntegrationTest): @async_client_context.require_connection async def _setup_class(cls): await super()._setup_class() - cls.listener = EventListener() + cls.listener = OvertCommandListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index d264b5ecb0..b432621798 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -36,6 +36,7 @@ from test.utils import ( EventListener, ExceptionCatchingThread, + OvertCommandListener, async_wait_until, wait_until, ) @@ -199,7 +200,7 @@ def test_implicit_sessions_checkout(self): lsid_set = set() failures = 0 for _ in range(5): - listener = EventListener() + listener = OvertCommandListener() client = self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 6d31f3db4e..6526391daf 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -31,7 +31,7 @@ sys.path[0:0] = [""] from test.unified_format import generate_test_classes -from test.utils import EventListener +from test.utils import EventListener, OvertCommandListener from bson import SON from pymongo import MongoClient @@ -348,7 +348,7 @@ def test_4_1_reauthenticate_succeeds(self): # Create a default OIDC client and add an event listener. # The following assumes that the driver does not emit saslStart or saslContinue events. # If the driver does emit those events, ignore/filter them for the purposes of this test. - listener = EventListener() + listener = OvertCommandListener() client = self.create_client(event_listeners=[listener]) # Perform a find operation that succeeds. @@ -1021,7 +1021,7 @@ def fetch(self, _): def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): # Create an OIDC configured client that can listen for `SaslStart` commands. - listener = EventListener() + listener = OvertCommandListener() client = self.create_client(event_listeners=[listener]) # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. diff --git a/test/test_change_stream.py b/test/test_change_stream.py index dae224c5e0..3a107122b7 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -39,6 +39,7 @@ from test.utils import ( AllowListEventListener, EventListener, + OvertCommandListener, wait_until, ) @@ -177,7 +178,7 @@ def _wait_until(): @no_type_check def test_try_next_runs_one_getmore(self): - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") @@ -235,7 +236,7 @@ def _wait_until(): @no_type_check def test_batch_size_is_honored(self): - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") diff --git a/test/test_collation.py b/test/test_collation.py index e5c1c7eb11..b878df2fb4 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -18,7 +18,7 @@ import functools import warnings from test import IntegrationTest, client_context, unittest -from test.utils import EventListener +from test.utils import EventListener, OvertCommandListener from typing import Any from pymongo.collation import ( @@ -101,7 +101,7 @@ class TestCollation(IntegrationTest): @client_context.require_connection def _setup_class(cls): super()._setup_class() - cls.listener = EventListener() + cls.listener = OvertCommandListener() cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) cls.db = cls.client.pymongo_test cls.collation = Collation("en_US") diff --git a/test/test_collection.py b/test/test_collection.py index a2c3b0b0b6..84a900d45b 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -36,6 +36,7 @@ from test.utils import ( IMPOSSIBLE_WRITE_CONCERN, EventListener, + OvertCommandListener, get_pool, is_mongos, wait_until, @@ -2093,7 +2094,7 @@ def test_find_one_and(self): self.assertEqual(4, (c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) def test_find_one_and_write_concern(self): - listener = EventListener() + listener = OvertCommandListener() db = (self.single_client(event_listeners=[listener]))[self.db.name] # non-default WriteConcern. c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) diff --git a/test/test_cursor.py b/test/test_cursor.py index 7a6dfc9429..9eac0f1c49 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1590,7 +1590,7 @@ def test_read_concern(self): next(c.find_raw_batches()) def test_monitoring(self): - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() @@ -1757,7 +1757,7 @@ def test_collation(self): next(self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) def test_monitoring(self): - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() diff --git a/test/test_grid_file.py b/test/test_grid_file.py index fe88aec5ff..c35efccef5 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] -from test.utils import EventListener +from test.utils import OvertCommandListener from bson.objectid import ObjectId from gridfs.errors import NoFile @@ -808,7 +808,7 @@ def test_survive_cursor_not_found(self): # Use 102 batches to cause a single getMore. chunk_size = 1024 data = b"d" * (102 * chunk_size) - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test with GridIn(db.fs, chunk_size=chunk_size) as infile: diff --git a/test/test_index_management.py b/test/test_index_management.py index ec1e363737..6ca726e2e0 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -27,7 +27,7 @@ from test import IntegrationTest, PyMongoTestCase, unittest from test.unified_format import generate_test_classes -from test.utils import AllowListEventListener, EventListener +from test.utils import AllowListEventListener, EventListener, OvertCommandListener from pymongo.errors import OperationFailure from pymongo.operations import SearchIndexModel @@ -88,7 +88,7 @@ def setUpClass(cls) -> None: url = os.environ.get("MONGODB_URI") username = os.environ["DB_USER"] password = os.environ["DB_PASSWORD"] - cls.listener = listener = EventListener() + cls.listener = listener = OvertCommandListener() cls.client = cls.unmanaged_simple_client( url, username=username, password=password, event_listeners=[listener] ) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index a0c520ed27..75fe5c987a 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -31,6 +31,7 @@ ) from test.utils import ( EventListener, + OvertCommandListener, wait_until, ) @@ -54,7 +55,7 @@ class TestCommandMonitoring(IntegrationTest): @client_context.require_connection def _setup_class(cls): super()._setup_class() - cls.listener = EventListener() + cls.listener = OvertCommandListener() cls.client = cls.unmanaged_rs_or_single_client( event_listeners=[cls.listener], retryWrites=False ) @@ -1100,11 +1101,13 @@ def test_first_batch_helper(self): @client_context.require_version_max(6, 1, 99) def test_sensitive_commands(self): - listeners = self.client._event_listeners + listener = EventListener() + client = self.rs_or_single_client(event_listeners=[listener]) + listeners = client._event_listeners - self.listener.reset() + listener.reset() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address, None) # type: ignore[arg-type] + listeners.publish_command_start(cmd, "pymongo_test", 12345, client.address, None) # type: ignore[arg-type] delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( delta, @@ -1115,15 +1118,15 @@ def test_sensitive_commands(self): None, database_name="pymongo_test", ) - started = self.listener.started_events[0] - succeeded = self.listener.succeeded_events[0] - self.assertEqual(0, len(self.listener.failed_events)) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getnonce", started.command_name) self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) + self.assertEqual(client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) self.assertEqual(succeeded.duration_micros, 100000) self.assertEqual(started.command_name, succeeded.command_name) @@ -1140,7 +1143,7 @@ class TestGlobalListener(IntegrationTest): @client_context.require_connection def _setup_class(cls): super()._setup_class() - cls.listener = EventListener() + cls.listener = OvertCommandListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 67943d495d..db53b67ae4 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -24,7 +24,7 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import EventListener +from test.utils import OvertCommandListener from pymongo import DESCENDING from pymongo.errors import ( @@ -44,7 +44,7 @@ class TestReadWriteConcernSpec(IntegrationTest): def test_omit_default_read_write_concern(self): - listener = EventListener() + listener = OvertCommandListener() # Client with default readConcern and writeConcern client = self.rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) @@ -205,7 +205,7 @@ def test_error_includes_errInfo(self): @client_context.require_version_min(4, 9) def test_write_error_details_exposes_errinfo(self): - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) db = client.errinfotest diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 67e9716bf4..984b967f50 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -33,6 +33,7 @@ from test.utils import ( EventListener, FunctionCallRecorder, + OvertCommandListener, wait_until, ) from test.utils_selection_tests import ( @@ -74,7 +75,7 @@ def custom_selector(servers): return [servers[idx]] # Initialize client with appropriate listeners. - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client( server_selector=custom_selector, event_listeners=[listener] ) diff --git a/test/test_session.py b/test/test_session.py index 9f94ded927..d0bbb075a8 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -36,6 +36,7 @@ from test.utils import ( EventListener, ExceptionCatchingThread, + OvertCommandListener, wait_until, ) @@ -198,7 +199,7 @@ def test_implicit_sessions_checkout(self): lsid_set = set() failures = 0 for _ in range(5): - listener = EventListener() + listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ diff --git a/test/test_ssl.py b/test/test_ssl.py index 36d7ba12b6..04db9b61a4 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -33,6 +33,7 @@ ) from test.utils import ( EventListener, + OvertCommandListener, cat_files, ignore_deprecations, ) From 849ed7970f45a104d35f77dec8c7ec684e596087 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Oct 2024 15:35:44 -0500 Subject: [PATCH 1571/2111] PYTHON-4888 Use shrub.py for versioned api tests (#1949) --- .evergreen/config.yml | 106 +++++++++++++++++--------- .evergreen/scripts/generate_config.py | 47 +++++++++++- 2 files changed, 111 insertions(+), 42 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9083da145b..cf43d7c246 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2305,28 +2305,6 @@ axes: variables: COVERAGE: "coverage" - - id: versionedApi - display_name: "versionedApi" - values: - # Test against a cluster with requireApiVersion=1. - - id: "requireApiVersion1" - display_name: "requireApiVersion1" - tags: [ "versionedApi_tag" ] - variables: - # REQUIRE_API_VERSION is set to make drivers-evergreen-tools - # start a cluster with the requireApiVersion parameter. - REQUIRE_API_VERSION: "1" - # MONGODB_API_VERSION is the apiVersion to use in the test suite. - MONGODB_API_VERSION: "1" - # Test against a cluster with acceptApiVersion2 but without - # requireApiVersion, and don't automatically add apiVersion to - # clients created in the test suite. - - id: "acceptApiVersion2" - display_name: "acceptApiVersion2" - tags: [ "versionedApi_tag" ] - variables: - ORCHESTRATION_FILE: "versioned-api-testing.json" - - id: serverless display_name: "Serverless" values: @@ -3366,6 +3344,74 @@ buildvariants: SSL: ssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +# Versioned API tests. +- name: versioned-api-require-v1-rhel8-py3.9-auth + tasks: + - name: .standalone .5.0 + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Versioned API require v1 RHEL8 py3.9 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + REQUIRE_API_VERSION: "1" + MONGODB_API_VERSION: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [versionedApi_tag] +- name: versioned-api-accept-v2-rhel8-py3.9-auth + tasks: + - name: .standalone .5.0 + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Versioned API accept v2 RHEL8 py3.9 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + ORCHESTRATION_FILE: versioned-api-testing.json + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [versionedApi_tag] +- name: versioned-api-require-v1-rhel8-py3.13-auth + tasks: + - name: .standalone .5.0 + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Versioned API require v1 RHEL8 py3.13 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + REQUIRE_API_VERSION: "1" + MONGODB_API_VERSION: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [versionedApi_tag] +- name: versioned-api-accept-v2-rhel8-py3.13-auth + tasks: + - name: .standalone .5.0 + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Versioned API accept v2 RHEL8 py3.13 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + ORCHESTRATION_FILE: versioned-api-testing.json + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [versionedApi_tag] + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3559,22 +3605,6 @@ buildvariants: tasks: - name: atlas-data-lake-tests -- matrix_name: "stable-api-tests" - matrix_spec: - platform: rhel8 - python-version: ["3.9", "3.10"] - auth: "auth" - versionedApi: "*" - display_name: "Versioned API ${versionedApi} ${python-version}" - batchtime: 10080 # 7 days - tasks: - # Versioned API was introduced in MongoDB 4.7 - - "test-latest-standalone" - - "test-8.0-standalone" - - "test-7.0-standalone" - - "test-6.0-standalone" - - "test-5.0-standalone" - # OCSP test matrix. - name: ocsp-test-rhel8-v4.4-py3.9 tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 6d614a9afe..dafcd4ff4f 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -23,7 +23,6 @@ ############## ALL_VERSIONS = ["4.0", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] -VERSIONS_6_0_PLUS = ["6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] PYPYS = ["pypy3.9", "pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS @@ -112,6 +111,14 @@ def get_python_binary(python: str, host: str) -> str: raise ValueError(f"no match found for python {python} on {host}") +def get_pythons_from(min_version: str) -> list[str]: + """Get all pythons starting from a minimum version.""" + min_version_float = float(min_version) + rapid_latest = ["rapid", "latest"] + versions = [v for v in ALL_VERSIONS if v not in rapid_latest] + return [v for v in versions if float(v) >= min_version_float] + rapid_latest + + def get_display_name(base: str, host: str, **kwargs) -> str: """Get the display name of a variant.""" display_name = f"{base} {HOSTS[host].display_name}" @@ -243,7 +250,7 @@ def create_server_variants() -> list[BuildVariant]: tasks = [f".{topology}"] # MacOS arm64 only works on server versions 6.0+ if host == "macos-arm64": - tasks = [f".{topology} .{version}" for version in VERSIONS_6_0_PLUS] + tasks = [f".{topology} .{version}" for version in get_pythons_from("6.0")] expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite, SKIP_CSOT_TESTS="true") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( @@ -330,7 +337,7 @@ def create_load_balancer_variants(): task_names = ["load-balancer-test"] batchtime = BATCHTIME_WEEK expansions_base = dict(test_loadbalancer="true") - versions = ["6.0", "7.0", "8.0", "latest", "rapid"] + versions = get_pythons_from("6.0") variants = [] pythons = CPYTHONS + PYPYS for ind, (version, (auth, ssl)) in enumerate(product(versions, AUTH_SSLS)): @@ -442,10 +449,42 @@ def create_pyopenssl_variants(): return variants +def create_versioned_api_tests(): + host = "rhel8" + tags = ["versionedApi_tag"] + tasks = [f".standalone .{v}" for v in get_pythons_from("5.0")] + variants = [] + types = ["require v1", "accept v2"] + + # All python versions across platforms. + for python, test_type in product(MIN_MAX_PYTHON, types): + expansions = dict(AUTH="auth") + # Test against a cluster with requireApiVersion=1. + if test_type == types[0]: + # REQUIRE_API_VERSION is set to make drivers-evergreen-tools + # start a cluster with the requireApiVersion parameter. + expansions["REQUIRE_API_VERSION"] = "1" + # MONGODB_API_VERSION is the apiVersion to use in the test suite. + expansions["MONGODB_API_VERSION"] = "1" + else: + # Test against a cluster with acceptApiVersion2 but without + # requireApiVersion, and don't automatically add apiVersion to + # clients created in the test suite. + expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" + base_display_name = f"Versioned API {test_type}" + display_name = get_display_name(base_display_name, host, python=python, **expansions) + variant = create_variant( + tasks, display_name, host=host, python=python, tags=tags, expansions=expansions + ) + variants.append(variant) + + return variants + + ################## # Generate Config ################## -variants = create_pyopenssl_variants() +variants = create_versioned_api_tests() # print(len(variants)) generate_yaml(variants=variants) From 7e83c8c67f556e61ec7d7d75a544044a04c0b63d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 07:24:39 -0500 Subject: [PATCH 1572/2111] PYTHON-4889 Use shrub.py for green framework tests (#1951) --- .evergreen/config.yml | 74 +++++++++++++++++---------- .evergreen/scripts/generate_config.py | 16 +++++- 2 files changed, 61 insertions(+), 29 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index cf43d7c246..dd35708f7d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2247,19 +2247,6 @@ axes: variables: MOD_WSGI_VERSION: "4" - # Choice of Python async framework - - id: green-framework - display_name: "Green Framework" - values: - - id: "eventlet" - display_name: "Eventlet" - variables: - GREEN_FRAMEWORK: "eventlet" - - id: "gevent" - display_name: "Gevent" - variables: - GREEN_FRAMEWORK: "gevent" - # Install and use the driver's C-extensions? - id: c-extensions display_name: "C Extensions" @@ -3412,6 +3399,52 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [versionedApi_tag] +# Green framework tests. +- name: eventlet-rhel8-py3.9 + tasks: + - name: .standalone + display_name: Eventlet RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: eventlet + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: gevent-rhel8-py3.9 + tasks: + - name: .standalone + display_name: Gevent RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: gevent + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: eventlet-rhel8-py3.12 + tasks: + - name: .standalone + display_name: Eventlet RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: eventlet + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: gevent-rhel8-py3.12 + tasks: + - name: .standalone + display_name: Gevent RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: gevent + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3460,21 +3493,6 @@ buildvariants: - ".4.2" - ".4.0" -- matrix_name: "tests-python-version-green-framework-rhel8" - matrix_spec: - platform: rhel8 - python-version: "*" - green-framework: "*" - auth-ssl: "*" - exclude_spec: - # Don't test green frameworks on these Python versions. - - platform: rhel8 - python-version: ["pypy3.9", "pypy3.10", "3.13"] - green-framework: "*" - auth-ssl: "*" - display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" - tasks: *all-server-versions - - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: platform: rhel7 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index dafcd4ff4f..28b79c10e7 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -481,10 +481,24 @@ def create_versioned_api_tests(): return variants +def create_green_framework_variants(): + variants = [] + tasks = [".standalone"] + host = "rhel8" + for python, framework in product([CPYTHONS[0], CPYTHONS[-2]], ["eventlet", "gevent"]): + expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") + display_name = get_display_name(f"{framework.capitalize()}", host, python=python) + variant = create_variant( + tasks, display_name, host=host, python=python, expansions=expansions + ) + variants.append(variant) + return variants + + ################## # Generate Config ################## -variants = create_versioned_api_tests() +variants = create_green_framework_variants() # print(len(variants)) generate_yaml(variants=variants) From 60109e660c18f8e93a7229c3c3202983053eeb41 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2024 07:32:56 -0500 Subject: [PATCH 1573/2111] Bump mypy from 1.11.2 to 1.12.1 (#1953) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 2c23212da7..7ccc122f53 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,4 +1,4 @@ -mypy==1.11.2 +mypy==1.12.1 pyright==1.1.384 typing_extensions -r ./encryption.txt From 5280596141d577dd13832a36a5fc6409d09e9bad Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 08:16:12 -0500 Subject: [PATCH 1574/2111] PYTHON-4890 Use shrub.py for storage engine tests (#1955) --- .evergreen/config.yml | 73 ++++++++++----------------- .evergreen/scripts/generate_config.py | 43 ++++++++++++++-- 2 files changed, 66 insertions(+), 50 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index dd35708f7d..4ffdfca581 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2260,19 +2260,6 @@ axes: variables: NO_EXT: "" - # Choice of MongoDB storage engine - - id: storage-engine - display_name: Storage - values: - - id: mmapv1 - display_name: MMAPv1 - variables: - STORAGE_ENGINE: "mmapv1" - - id: inmemory - display_name: InMemory - variables: - STORAGE_ENGINE: "inmemory" - # Run with test commands disabled on server? - id: disableTestCommands display_name: Disable test commands @@ -3331,6 +3318,34 @@ buildvariants: SSL: ssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 +# Storage Engine tests. +- name: storage-inmemory-rhel8-py3.9 + tasks: + - name: .standalone .4.0 + - name: .standalone .4.4 + - name: .standalone .5.0 + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Storage InMemory RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + STORAGE_ENGINE: inmemory + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: storage-mmapv1-rhel8-py3.9 + tasks: + - name: .standalone .4.0 + - name: .replica_set .4.0 + display_name: Storage MMAPv1 RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + STORAGE_ENGINE: mmapv1 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + # Versioned API tests. - name: versioned-api-require-v1-rhel8-py3.9-auth tasks: @@ -3503,38 +3518,6 @@ buildvariants: tasks: - ".5.0" -# Storage engine tests on RHEL 8.4 (x86_64) with Python 3.9. -- matrix_name: "tests-storage-engines" - matrix_spec: - platform: rhel8 - storage-engine: "*" - python-version: "3.9" - display_name: "Storage ${storage-engine} ${python-version} ${platform}" - rules: - - if: - platform: rhel8 - storage-engine: ["inmemory"] - python-version: "*" - then: - add_tasks: - - "test-latest-standalone" - - "test-8.0-standalone" - - "test-7.0-standalone" - - "test-6.0-standalone" - - "test-5.0-standalone" - - "test-4.4-standalone" - - "test-4.2-standalone" - - "test-4.0-standalone" - - if: - # MongoDB 4.2 drops support for MMAPv1 - platform: rhel8 - storage-engine: ["mmapv1"] - python-version: "*" - then: - add_tasks: - - "test-4.0-standalone" - - "test-4.0-replica_set" - # enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.9. - matrix_name: "test-disableTestCommands" matrix_spec: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 28b79c10e7..2d20d2de68 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -111,14 +111,24 @@ def get_python_binary(python: str, host: str) -> str: raise ValueError(f"no match found for python {python} on {host}") -def get_pythons_from(min_version: str) -> list[str]: - """Get all pythons starting from a minimum version.""" +def get_versions_from(min_version: str) -> list[str]: + """Get all server versions starting from a minimum version.""" min_version_float = float(min_version) rapid_latest = ["rapid", "latest"] versions = [v for v in ALL_VERSIONS if v not in rapid_latest] return [v for v in versions if float(v) >= min_version_float] + rapid_latest +def get_versions_until(max_version: str) -> list[str]: + """Get all server version up to a max version.""" + max_version_float = float(max_version) + versions = [v for v in ALL_VERSIONS if v not in ["rapid", "latest"]] + versions = [v for v in versions if float(v) <= max_version_float] + if not len(versions): + raise ValueError(f"No server versions found less <= {max_version}") + return versions + + def get_display_name(base: str, host: str, **kwargs) -> str: """Get the display name of a variant.""" display_name = f"{base} {HOSTS[host].display_name}" @@ -250,7 +260,7 @@ def create_server_variants() -> list[BuildVariant]: tasks = [f".{topology}"] # MacOS arm64 only works on server versions 6.0+ if host == "macos-arm64": - tasks = [f".{topology} .{version}" for version in get_pythons_from("6.0")] + tasks = [f".{topology} .{version}" for version in get_versions_from("6.0")] expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite, SKIP_CSOT_TESTS="true") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( @@ -337,7 +347,7 @@ def create_load_balancer_variants(): task_names = ["load-balancer-test"] batchtime = BATCHTIME_WEEK expansions_base = dict(test_loadbalancer="true") - versions = get_pythons_from("6.0") + versions = get_versions_from("6.0") variants = [] pythons = CPYTHONS + PYPYS for ind, (version, (auth, ssl)) in enumerate(product(versions, AUTH_SSLS)): @@ -449,10 +459,33 @@ def create_pyopenssl_variants(): return variants +def create_storage_engine_tests(): + host = "rhel8" + engines = ["InMemory", "MMAPv1"] + variants = [] + for engine in engines: + python = CPYTHONS[0] + expansions = dict(STORAGE_ENGINE=engine.lower()) + if engine == engines[0]: + tasks = [f".standalone .{v}" for v in ALL_VERSIONS] + else: + # MongoDB 4.2 drops support for MMAPv1 + versions = get_versions_until("4.0") + tasks = [f".standalone .{v}" for v in versions] + [ + f".replica_set .{v}" for v in versions + ] + display_name = get_display_name(f"Storage {engine}", host, python=python) + variant = create_variant( + tasks, display_name, host=host, python=python, expansions=expansions + ) + variants.append(variant) + return variants + + def create_versioned_api_tests(): host = "rhel8" tags = ["versionedApi_tag"] - tasks = [f".standalone .{v}" for v in get_pythons_from("5.0")] + tasks = [f".standalone .{v}" for v in get_versions_from("5.0")] variants = [] types = ["require v1", "accept v2"] From 4003edf267b831cfd4d017fc8d3d3826940ee19c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 09:45:14 -0500 Subject: [PATCH 1575/2111] PYTHON-4891 Use shrub.py for c extension tests (#1956) --- .evergreen/config.yml | 132 ++++++++++++++++---------- .evergreen/scripts/generate_config.py | 32 ++++++- 2 files changed, 114 insertions(+), 50 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4ffdfca581..3230b827b0 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2247,19 +2247,6 @@ axes: variables: MOD_WSGI_VERSION: "4" - # Install and use the driver's C-extensions? - - id: c-extensions - display_name: "C Extensions" - values: - - id: "without-c-extensions" - display_name: "Without C Extensions" - variables: - NO_EXT: "1" - - id: "with-c-extensions" - display_name: "With C Extensions" - variables: - NO_EXT: "" - # Run with test commands disabled on server? - id: disableTestCommands display_name: Disable test commands @@ -3460,6 +3447,89 @@ buildvariants: SSL: ssl PYTHON_BINARY: /opt/python/3.12/bin/python3 +# No C Ext tests. +- name: no-c-ext-rhel8-py3.9 + tasks: + - name: .standalone + display_name: No C Ext RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: no-c-ext-rhel8-py3.10 + tasks: + - name: .replica_set + display_name: No C Ext RHEL8 py3.10 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.10/bin/python3 +- name: no-c-ext-rhel8-py3.11 + tasks: + - name: .sharded_cluster + display_name: No C Ext RHEL8 py3.11 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.11/bin/python3 +- name: no-c-ext-rhel8-py3.12 + tasks: + - name: .standalone + display_name: No C Ext RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.12/bin/python3 +- name: no-c-ext-rhel8-py3.13 + tasks: + - name: .replica_set + display_name: No C Ext RHEL8 py3.13 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + +# Atlas Data Lake tests. +- name: atlas-data-lake-rhel8-py3.9-no-c + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.9 No C + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: atlas-data-lake-rhel8-py3.9 + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: atlas-data-lake-rhel8-py3.13-no-c + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.13 No C + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: atlas-data-lake-rhel8-py3.13 + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.13 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3482,32 +3552,6 @@ buildvariants: tasks: - ".6.0" -- matrix_name: "tests-python-version-rhel8-without-c-extensions" - matrix_spec: - platform: rhel8 - python-version: "*" - c-extensions: without-c-extensions - auth-ssl: noauth-nossl - coverage: "*" - exclude_spec: - # These interpreters are always tested without extensions. - - platform: rhel8 - python-version: ["pypy3.9", "pypy3.10"] - c-extensions: "*" - auth-ssl: "*" - coverage: "*" - display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" - tasks: &all-server-versions - - ".rapid" - - ".latest" - - ".8.0" - - ".7.0" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - matrix_name: "tests-python-version-supports-openssl-102-test-ssl" matrix_spec: platform: rhel7 @@ -3596,16 +3640,6 @@ buildvariants: tasks: - "serverless_task_group" -- matrix_name: "data-lake-spec-tests" - matrix_spec: - platform: ubuntu-22.04 - python-version: ["3.9", "3.10"] - auth: "auth" - c-extensions: "*" - display_name: "Atlas Data Lake ${python-version} ${c-extensions}" - tasks: - - name: atlas-data-lake-tests - # OCSP test matrix. - name: ocsp-test-rhel8-v4.4-py3.9 tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 2d20d2de68..aa5bf18c9e 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -528,10 +528,40 @@ def create_green_framework_variants(): return variants +def generate_no_c_ext_variants(): + variants = [] + host = "rhel8" + for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): + tasks = [f".{topology}"] + expansions = dict() + handle_c_ext(C_EXTS[0], expansions) + display_name = get_display_name("No C Ext", host, python=python) + variant = create_variant( + tasks, display_name, host=host, python=python, expansions=expansions + ) + variants.append(variant) + return variants + + +def generate_atlas_data_lake_variants(): + variants = [] + host = "rhel8" + for python, c_ext in product(MIN_MAX_PYTHON, C_EXTS): + tasks = ["atlas-data-lake-tests"] + expansions = dict() + handle_c_ext(c_ext, expansions) + display_name = get_display_name("Atlas Data Lake", host, python=python, **expansions) + variant = create_variant( + tasks, display_name, host=host, python=python, expansions=expansions + ) + variants.append(variant) + return variants + + ################## # Generate Config ################## -variants = create_green_framework_variants() +variants = generate_atlas_data_lake_variants() # print(len(variants)) generate_yaml(variants=variants) From 081ad89b844a8080539e2b45cb315156778bf3c1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 12:05:56 -0500 Subject: [PATCH 1576/2111] PYTHON-4894 Fix handling of auth test marker (#1958) --- test/pytest_conf.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/test/pytest_conf.py b/test/pytest_conf.py index 75f3e74322..a6e24cd9b1 100644 --- a/test/pytest_conf.py +++ b/test/pytest_conf.py @@ -2,15 +2,14 @@ def pytest_collection_modifyitems(items, config): - sync_items = [] - async_items = [ - item - for item in items - if "asynchronous" in item.fspath.dirname or sync_items.append(item) # type: ignore[func-returns-value] - ] - for item in async_items: - if not any(item.iter_markers()): - item.add_marker("default_async") - for item in sync_items: - if not any(item.iter_markers()): - item.add_marker("default") + # Markers that should overlap with the default markers. + overlap_markers = ["async"] + + for item in items: + if "asynchronous" in item.fspath.dirname: + default_marker = "default_async" + else: + default_marker = "default" + markers = [m for m in item.iter_markers() if m not in overlap_markers] + if not markers: + item.add_marker(default_marker) From 25de52ae5d6cc14ff27ba16363ee4aeb6d5b3b92 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 13:11:07 -0500 Subject: [PATCH 1577/2111] PYTHON-4892 Use shrub.py for remaining axes (#1957) --- .evergreen/config.yml | 201 +++++++++----------------- .evergreen/scripts/generate_config.py | 52 ++++++- 2 files changed, 115 insertions(+), 138 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3230b827b0..ebc070f345 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2112,47 +2112,6 @@ axes: AUTH: "noauth" SSL: "nossl" - # Choice of MongoDB server version - - id: mongodb-version - display_name: "MongoDB" - values: - - id: "4.0" - display_name: "MongoDB 4.0" - variables: - VERSION: "4.0" - - id: "4.2" - display_name: "MongoDB 4.2" - variables: - VERSION: "4.2" - - id: "4.4" - display_name: "MongoDB 4.4" - variables: - VERSION: "4.4" - - id: "5.0" - display_name: "MongoDB 5.0" - variables: - VERSION: "5.0" - - id: "6.0" - display_name: "MongoDB 6.0" - variables: - VERSION: "6.0" - - id: "7.0" - display_name: "MongoDB 7.0" - variables: - VERSION: "7.0" - - id: "8.0" - display_name: "MongoDB 8.0" - variables: - VERSION: "8.0" - - id: "latest" - display_name: "MongoDB latest" - variables: - VERSION: "latest" - - id: "rapid" - display_name: "MongoDB rapid" - variables: - VERSION: "rapid" - # Choice of Python runtime version - id: python-version display_name: "Python" @@ -2212,69 +2171,6 @@ axes: variables: PYTHON_BINARY: "C:/python/Python313/python.exe" - - id: python-version-windows-32 - display_name: "Python" - values: - - - - id: "3.9" - display_name: "32-bit Python 3.9" - variables: - PYTHON_BINARY: "C:/python/32/Python39/python.exe" - - id: "3.10" - display_name: "32-bit Python 3.10" - variables: - PYTHON_BINARY: "C:/python/32/Python310/python.exe" - - id: "3.11" - display_name: "32-bit Python 3.11" - variables: - PYTHON_BINARY: "C:/python/32/Python311/python.exe" - - id: "3.12" - display_name: "32-bit Python 3.12" - variables: - PYTHON_BINARY: "C:/python/32/Python312/python.exe" - - id: "3.13" - display_name: "32-bit Python 3.13" - variables: - PYTHON_BINARY: "C:/python/32/Python313/python.exe" - - # Choice of mod_wsgi version - - id: mod-wsgi-version - display_name: "mod_wsgi version" - values: - - id: "4" - display_name: "mod_wsgi 4.x" - variables: - MOD_WSGI_VERSION: "4" - - # Run with test commands disabled on server? - - id: disableTestCommands - display_name: Disable test commands - values: - - id: disabled - display_name: disabled - variables: - DISABLE_TEST_COMMANDS: "1" - - # Generate coverage report? - - id: coverage - display_name: "Coverage" - values: - - id: "coverage" - display_name: "Coverage" - tags: ["coverage_tag"] - variables: - COVERAGE: "coverage" - - - id: serverless - display_name: "Serverless" - values: - - id: "enabled" - display_name: "Serverless" - variables: - test_serverless: true - batchtime: 10080 # 7 days - buildvariants: # Server Tests for RHEL8. - name: test-rhel8-py3.9-auth-ssl-cov @@ -3530,6 +3426,71 @@ buildvariants: expansions: PYTHON_BINARY: /opt/python/3.13/bin/python3 +# Mod_wsgi tests. +- name: mod_wsgi-ubuntu-22-py3.9 + tasks: + - name: mod-wsgi-standalone + - name: mod-wsgi-replica-set + - name: mod-wsgi-embedded-mode-standalone + - name: mod-wsgi-embedded-mode-replica-set + display_name: mod_wsgi Ubuntu-22 py3.9 + run_on: + - ubuntu2204-small + expansions: + MOD_WSGI_VERSION: "4" + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: mod_wsgi-ubuntu-22-py3.13 + tasks: + - name: mod-wsgi-standalone + - name: mod-wsgi-replica-set + - name: mod-wsgi-embedded-mode-standalone + - name: mod-wsgi-embedded-mode-replica-set + display_name: mod_wsgi Ubuntu-22 py3.13 + run_on: + - ubuntu2204-small + expansions: + MOD_WSGI_VERSION: "4" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + +# Disable test commands variants. +- name: disable-test-commands-rhel8-py3.9 + tasks: + - name: .latest + display_name: Disable test commands RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + DISABLE_TEST_COMMANDS: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + +# Serverless variants. +- name: serverless-rhel8-py3.9 + tasks: + - name: serverless_task_group + display_name: Serverless RHEL8 py3.9 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + test_serverless: "true" + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: serverless-rhel8-py3.13 + tasks: + - name: serverless_task_group + display_name: Serverless RHEL8 py3.13 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + test_serverless: "true" + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -3562,16 +3523,6 @@ buildvariants: tasks: - ".5.0" -# enableTestCommands=0 tests on RHEL 8.4 (x86_64) with Python 3.9. -- matrix_name: "test-disableTestCommands" - matrix_spec: - platform: rhel8 - disableTestCommands: "*" - python-version: "3.9" - display_name: "Disable test commands ${python-version} ${platform}" - tasks: - - ".latest" - - matrix_name: "test-search-index-helpers" matrix_spec: platform: rhel8 @@ -3580,18 +3531,6 @@ buildvariants: tasks: - name: "test_atlas_task_group_search_indexes" -- matrix_name: "tests-mod-wsgi" - matrix_spec: - platform: ubuntu-22.04 - python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] - mod-wsgi-version: "*" - display_name: "${mod-wsgi-version} ${python-version} ${platform}" - tasks: - - name: "mod-wsgi-standalone" - - name: "mod-wsgi-replica-set" - - name: "mod-wsgi-embedded-mode-standalone" - - name: "mod-wsgi-embedded-mode-replica-set" - - matrix_name: "mockupdb-tests" matrix_spec: platform: rhel8 @@ -3630,16 +3569,6 @@ buildvariants: tasks: - name: "atlas-connect" -- matrix_name: "serverless" - matrix_spec: - platform: rhel8 - python-version: "*" - auth-ssl: auth-ssl - serverless: "enabled" - display_name: "${serverless} ${python-version} ${platform}" - tasks: - - "serverless_task_group" - # OCSP test matrix. - name: ocsp-test-rhel8-v4.4-py3.9 tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index aa5bf18c9e..d94c6c02fc 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -54,6 +54,7 @@ class Host: HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32") HOSTS["macos"] = Host("macos", "macos-14", "macOS") HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64") +HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22") ############## @@ -102,7 +103,7 @@ def get_python_binary(python: str, host: str) -> str: python = python.replace(".", "") return f"{base}/Python{python}/python.exe" - if host == "rhel8": + if host in ["rhel8", "ubuntu22"]: return f"/opt/python/{python}/bin/python3" if host in ["macos", "macos-arm64"]: @@ -558,10 +559,57 @@ def generate_atlas_data_lake_variants(): return variants +def generate_mod_wsgi_variants(): + variants = [] + host = "ubuntu22" + tasks = [ + "mod-wsgi-standalone", + "mod-wsgi-replica-set", + "mod-wsgi-embedded-mode-standalone", + "mod-wsgi-embedded-mode-replica-set", + ] + expansions = dict(MOD_WSGI_VERSION="4") + for python in MIN_MAX_PYTHON: + display_name = get_display_name("mod_wsgi", host, python=python) + variant = create_variant( + tasks, display_name, host=host, python=python, expansions=expansions + ) + variants.append(variant) + return variants + + +def generate_disable_test_commands_variants(): + host = "rhel8" + expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") + python = CPYTHONS[0] + display_name = get_display_name("Disable test commands", host, python=python) + tasks = [".latest"] + return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] + + +def generate_serverless_variants(): + host = "rhel8" + batchtime = BATCHTIME_WEEK + expansions = dict(test_serverless="true", AUTH="auth", SSL="ssl") + tasks = ["serverless_task_group"] + base_name = "Serverless" + return [ + create_variant( + tasks, + get_display_name(base_name, host, python=python), + host=host, + python=python, + expansions=expansions, + batchtime=batchtime, + ) + for python in MIN_MAX_PYTHON + ] + + ################## # Generate Config ################## -variants = generate_atlas_data_lake_variants() +variants = generate_serverless_variants() # print(len(variants)) generate_yaml(variants=variants) From 1ace0455d78adac5cf95e5414cdc4b61d90fe6d0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 15:47:18 -0500 Subject: [PATCH 1578/2111] PYTHON-4898 Ensure consistent versions of tests across hosts (#1961) --- .evergreen/config.yml | 446 ++++++++++++++++++++++---- .evergreen/scripts/generate_config.py | 11 +- 2 files changed, 382 insertions(+), 75 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ebc070f345..cad0863eaa 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -2172,7 +2172,7 @@ axes: PYTHON_BINARY: "C:/python/Python313/python.exe" buildvariants: -# Server Tests for RHEL8. +# Server Tests. - name: test-rhel8-py3.9-auth-ssl-cov tasks: - name: .standalone @@ -2339,8 +2339,6 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - -# Server tests for MacOS. - name: test-macos-py3.9-auth-ssl-sync tasks: - name: .standalone @@ -2351,8 +2349,32 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-py3.9-noauth-ssl-sync + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-py3.9-noauth-nossl-sync + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth NoSSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-py3.9-auth-ssl-async tasks: - name: .standalone @@ -2363,11 +2385,47 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-py3.9-noauth-ssl-async + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth SSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-py3.9-noauth-nossl-async + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth NoSSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 Auth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-macos-py3.13-noauth-ssl-sync tasks: - - name: .replica_set + - name: .sharded_cluster display_name: Test macOS py3.13 NoAuth SSL Sync run_on: - macos-14 @@ -2375,46 +2433,56 @@ buildvariants: AUTH: noauth SSL: ssl TEST_SUITES: default - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 SKIP_CSOT_TESTS: "true" -- name: test-macos-py3.13-noauth-ssl-async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +- name: test-macos-py3.13-noauth-nossl-sync tasks: - - name: .replica_set - display_name: Test macOS py3.13 NoAuth SSL Async + - name: .sharded_cluster + display_name: Test macOS py3.13 NoAuth NoSSL Sync run_on: - macos-14 expansions: AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +- name: test-macos-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 Auth SSL Async + run_on: + - macos-14 + expansions: + AUTH: auth SSL: ssl TEST_SUITES: default_async - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 SKIP_CSOT_TESTS: "true" -- name: test-macos-py3.9-noauth-nossl-sync + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +- name: test-macos-py3.13-noauth-ssl-async tasks: - name: .sharded_cluster - display_name: Test macOS py3.9 NoAuth NoSSL Sync + display_name: Test macOS py3.13 NoAuth SSL Async run_on: - macos-14 expansions: AUTH: noauth - SSL: nossl - TEST_SUITES: default - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + SSL: ssl + TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" -- name: test-macos-py3.9-noauth-nossl-async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +- name: test-macos-py3.13-noauth-nossl-async tasks: - name: .sharded_cluster - display_name: Test macOS py3.9 NoAuth NoSSL Async + display_name: Test macOS py3.13 NoAuth NoSSL Async run_on: - macos-14 expansions: AUTH: noauth SSL: nossl TEST_SUITES: default_async - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 SKIP_CSOT_TESTS: "true" - -# Server tests for macOS Arm64. + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-macos-arm64-py3.9-auth-ssl-sync tasks: - name: .standalone .6.0 @@ -2431,6 +2499,38 @@ buildvariants: TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-arm64-py3.9-noauth-ssl-sync + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-arm64-py3.9-noauth-nossl-sync + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-arm64-py3.9-auth-ssl-async tasks: - name: .standalone .6.0 @@ -2447,13 +2547,61 @@ buildvariants: TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-arm64-py3.9-noauth-ssl-async + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-arm64-py3.9-noauth-nossl-async + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: test-macos-arm64-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 Auth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-macos-arm64-py3.13-noauth-ssl-sync tasks: - - name: .replica_set .6.0 - - name: .replica_set .7.0 - - name: .replica_set .8.0 - - name: .replica_set .rapid - - name: .replica_set .latest + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest display_name: Test macOS Arm64 py3.13 NoAuth SSL Sync run_on: - macos-14-arm64 @@ -2463,46 +2611,62 @@ buildvariants: TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.13-noauth-ssl-async +- name: test-macos-arm64-py3.13-noauth-nossl-sync tasks: - - name: .replica_set .6.0 - - name: .replica_set .7.0 - - name: .replica_set .8.0 - - name: .replica_set .rapid - - name: .replica_set .latest - display_name: Test macOS Arm64 py3.13 NoAuth SSL Async + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Sync run_on: - macos-14-arm64 expansions: AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +- name: test-macos-arm64-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 Auth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: auth SSL: ssl TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.9-noauth-nossl-sync +- name: test-macos-arm64-py3.13-noauth-ssl-async tasks: - name: .sharded_cluster .6.0 - name: .sharded_cluster .7.0 - name: .sharded_cluster .8.0 - name: .sharded_cluster .rapid - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync + display_name: Test macOS Arm64 py3.13 NoAuth SSL Async run_on: - macos-14-arm64 expansions: AUTH: noauth - SSL: nossl - TEST_SUITES: default + SSL: ssl + TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.9-noauth-nossl-async + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 +- name: test-macos-arm64-py3.13-noauth-nossl-async tasks: - name: .sharded_cluster .6.0 - name: .sharded_cluster .7.0 - name: .sharded_cluster .8.0 - name: .sharded_cluster .rapid - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async + display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Async run_on: - macos-14-arm64 expansions: @@ -2510,9 +2674,7 @@ buildvariants: SSL: nossl TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - -# Server tests for Windows. + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-win64-py3.9-auth-ssl-sync tasks: - name: .standalone @@ -2523,8 +2685,32 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe +- name: test-win64-py3.9-noauth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/Python39/python.exe +- name: test-win64-py3.9-noauth-nossl-sync + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe - name: test-win64-py3.9-auth-ssl-async tasks: - name: .standalone @@ -2535,11 +2721,47 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe +- name: test-win64-py3.9-noauth-ssl-async + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe +- name: test-win64-py3.9-noauth-nossl-async + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/Python39/python.exe +- name: test-win64-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe - name: test-win64-py3.13-noauth-ssl-sync tasks: - - name: .replica_set + - name: .sharded_cluster display_name: Test Win64 py3.13 NoAuth SSL Sync run_on: - windows-64-vsMulti-small @@ -2547,44 +2769,56 @@ buildvariants: AUTH: noauth SSL: ssl TEST_SUITES: default - PYTHON_BINARY: C:/python/Python313/python.exe SKIP_CSOT_TESTS: "true" -- name: test-win64-py3.13-noauth-ssl-async + PYTHON_BINARY: C:/python/Python313/python.exe +- name: test-win64-py3.13-noauth-nossl-sync tasks: - - name: .replica_set - display_name: Test Win64 py3.13 NoAuth SSL Async + - name: .sharded_cluster + display_name: Test Win64 py3.13 NoAuth NoSSL Sync run_on: - windows-64-vsMulti-small expansions: AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe +- name: test-win64-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth SSL: ssl TEST_SUITES: default_async - PYTHON_BINARY: C:/python/Python313/python.exe SKIP_CSOT_TESTS: "true" -- name: test-win64-py3.9-noauth-nossl-sync + PYTHON_BINARY: C:/python/Python313/python.exe +- name: test-win64-py3.13-noauth-ssl-async tasks: - name: .sharded_cluster - display_name: Test Win64 py3.9 NoAuth NoSSL Sync + display_name: Test Win64 py3.13 NoAuth SSL Async run_on: - windows-64-vsMulti-small expansions: AUTH: noauth - SSL: nossl - TEST_SUITES: default - PYTHON_BINARY: C:/python/Python39/python.exe + SSL: ssl + TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" -- name: test-win64-py3.9-noauth-nossl-async + PYTHON_BINARY: C:/python/Python313/python.exe +- name: test-win64-py3.13-noauth-nossl-async tasks: - name: .sharded_cluster - display_name: Test Win64 py3.9 NoAuth NoSSL Async + display_name: Test Win64 py3.13 NoAuth NoSSL Async run_on: - windows-64-vsMulti-small expansions: AUTH: noauth SSL: nossl TEST_SUITES: default_async - PYTHON_BINARY: C:/python/Python39/python.exe SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe - name: test-win32-py3.9-auth-ssl-sync tasks: - name: .standalone @@ -2595,10 +2829,32 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default + SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/32/Python39/python.exe +- name: test-win32-py3.9-noauth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default SKIP_CSOT_TESTS: "true" - -# Server tests for Win32. + PYTHON_BINARY: C:/python/32/Python39/python.exe +- name: test-win32-py3.9-noauth-nossl-sync + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe - name: test-win32-py3.9-auth-ssl-async tasks: - name: .standalone @@ -2609,11 +2865,47 @@ buildvariants: AUTH: auth SSL: ssl TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/32/Python39/python.exe +- name: test-win32-py3.9-noauth-ssl-async + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe +- name: test-win32-py3.9-noauth-nossl-async + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe +- name: test-win32-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe - name: test-win32-py3.13-noauth-ssl-sync tasks: - - name: .replica_set + - name: .sharded_cluster display_name: Test Win32 py3.13 NoAuth SSL Sync run_on: - windows-64-vsMulti-small @@ -2621,44 +2913,56 @@ buildvariants: AUTH: noauth SSL: ssl TEST_SUITES: default - PYTHON_BINARY: C:/python/32/Python313/python.exe SKIP_CSOT_TESTS: "true" -- name: test-win32-py3.13-noauth-ssl-async + PYTHON_BINARY: C:/python/32/Python313/python.exe +- name: test-win32-py3.13-noauth-nossl-sync tasks: - - name: .replica_set - display_name: Test Win32 py3.13 NoAuth SSL Async + - name: .sharded_cluster + display_name: Test Win32 py3.13 NoAuth NoSSL Sync run_on: - windows-64-vsMulti-small expansions: AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe +- name: test-win32-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth SSL: ssl TEST_SUITES: default_async - PYTHON_BINARY: C:/python/32/Python313/python.exe SKIP_CSOT_TESTS: "true" -- name: test-win32-py3.9-noauth-nossl-sync + PYTHON_BINARY: C:/python/32/Python313/python.exe +- name: test-win32-py3.13-noauth-ssl-async tasks: - name: .sharded_cluster - display_name: Test Win32 py3.9 NoAuth NoSSL Sync + display_name: Test Win32 py3.13 NoAuth SSL Async run_on: - windows-64-vsMulti-small expansions: AUTH: noauth - SSL: nossl - TEST_SUITES: default - PYTHON_BINARY: C:/python/32/Python39/python.exe + SSL: ssl + TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" -- name: test-win32-py3.9-noauth-nossl-async + PYTHON_BINARY: C:/python/32/Python313/python.exe +- name: test-win32-py3.13-noauth-nossl-async tasks: - name: .sharded_cluster - display_name: Test Win32 py3.9 NoAuth NoSSL Async + display_name: Test Win32 py3.13 NoAuth NoSSL Async run_on: - windows-64-vsMulti-small expansions: AUTH: noauth SSL: nossl TEST_SUITES: default_async - PYTHON_BINARY: C:/python/32/Python39/python.exe SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe # Encryption tests. - name: encryption-rhel8-py3.9-auth-ssl diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index d94c6c02fc..5a682594e9 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -254,10 +254,13 @@ def create_server_variants() -> list[BuildVariant]: # Test a subset on each of the other platforms. for host in ("macos", "macos-arm64", "win64", "win32"): - for (python, (auth, ssl), topology), sync in product( - zip_cycle(MIN_MAX_PYTHON, AUTH_SSLS, TOPOLOGIES), SYNCS - ): + for ( + python, + sync, + (auth, ssl), + ) in product(MIN_MAX_PYTHON, SYNCS, AUTH_SSLS): test_suite = "default" if sync == "sync" else "default_async" + topology = TOPOLOGIES[0] if python == CPYTHONS[0] else TOPOLOGIES[-1] tasks = [f".{topology}"] # MacOS arm64 only works on server versions 6.0+ if host == "macos-arm64": @@ -610,6 +613,6 @@ def generate_serverless_variants(): # Generate Config ################## -variants = generate_serverless_variants() +variants = create_server_variants() # print(len(variants)) generate_yaml(variants=variants) From 6ca766e066577f9f334c52e0c1ea154e938b24eb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Oct 2024 18:41:17 -0500 Subject: [PATCH 1579/2111] PYTHON-4893 Use shrub.py for aws auth tests (#1959) --- .evergreen/config.yml | 135 ++++++++++++++++++-------- .evergreen/scripts/generate_config.py | 33 ++++++- 2 files changed, 126 insertions(+), 42 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index cad0863eaa..e357f02f2b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -3795,6 +3795,100 @@ buildvariants: SSL: ssl PYTHON_BINARY: /opt/python/3.13/bin/python3 +# AWS Auth tests. +- name: aws-auth-ubuntu-20-py3.9 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Ubuntu-20 py3.9 + run_on: + - ubuntu2004-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: aws-auth-ubuntu-20-py3.13 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Ubuntu-20 py3.13 + run_on: + - ubuntu2004-small + expansions: + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: aws-auth-win64-py3.9 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Win64 py3.9 + run_on: + - windows-64-vsMulti-small + expansions: + skip_ECS_auth_test: "true" + PYTHON_BINARY: C:/python/Python39/python.exe +- name: aws-auth-win64-py3.13 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Win64 py3.13 + run_on: + - windows-64-vsMulti-small + expansions: + skip_ECS_auth_test: "true" + PYTHON_BINARY: C:/python/Python313/python.exe +- name: aws-auth-macos-py3.9 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth macOS py3.9 + run_on: + - macos-14 + expansions: + skip_ECS_auth_test: "true" + skip_EC2_auth_test: "true" + skip_web_identity_auth_test: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 +- name: aws-auth-macos-py3.13 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth macOS py3.13 + run_on: + - macos-14 + expansions: + skip_ECS_auth_test: "true" + skip_EC2_auth_test: "true" + skip_web_identity_auth_test: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - matrix_name: "tests-fips" matrix_spec: platform: @@ -4237,47 +4331,6 @@ buildvariants: - name: testgcpoidc_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README -- matrix_name: "aws-auth-test" - matrix_spec: - platform: [ubuntu-20.04] - python-version: ["3.9"] - display_name: "MONGODB-AWS Auth ${platform} ${python-version}" - tasks: - - name: "aws-auth-test-4.4" - - name: "aws-auth-test-5.0" - - name: "aws-auth-test-6.0" - - name: "aws-auth-test-7.0" - - name: "aws-auth-test-8.0" - - name: "aws-auth-test-rapid" - - name: "aws-auth-test-latest" - -- matrix_name: "aws-auth-test-mac" - matrix_spec: - platform: [macos] - display_name: "MONGODB-AWS Auth ${platform} ${python-version-mac}" - tasks: - - name: "aws-auth-test-4.4" - - name: "aws-auth-test-5.0" - - name: "aws-auth-test-6.0" - - name: "aws-auth-test-7.0" - - name: "aws-auth-test-8.0" - - name: "aws-auth-test-rapid" - - name: "aws-auth-test-latest" - -- matrix_name: "aws-auth-test-windows" - matrix_spec: - platform: [windows] - python-version-windows: "*" - display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" - tasks: - - name: "aws-auth-test-4.4" - - name: "aws-auth-test-5.0" - - name: "aws-auth-test-6.0" - - name: "aws-auth-test-7.0" - - name: "aws-auth-test-8.0" - - name: "aws-auth-test-rapid" - - name: "aws-auth-test-latest" - - name: testgcpkms-variant display_name: "GCP KMS" run_on: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 5a682594e9..3f1ea724ed 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -54,6 +54,7 @@ class Host: HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32") HOSTS["macos"] = Host("macos", "macos-14", "macOS") HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64") +HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20") HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22") @@ -103,7 +104,7 @@ def get_python_binary(python: str, host: str) -> str: python = python.replace(".", "") return f"{base}/Python{python}/python.exe" - if host in ["rhel8", "ubuntu22"]: + if host in ["rhel8", "ubuntu22", "ubuntu20"]: return f"/opt/python/{python}/bin/python3" if host in ["macos", "macos-arm64"]: @@ -609,6 +610,36 @@ def generate_serverless_variants(): ] +def generate_aws_auth_variants(): + variants = [] + tasks = [ + "aws-auth-test-4.4", + "aws-auth-test-5.0", + "aws-auth-test-6.0", + "aws-auth-test-7.0", + "aws-auth-test-8.0", + "aws-auth-test-rapid", + "aws-auth-test-latest", + ] + + for host, python in product(["ubuntu20", "win64", "macos"], MIN_MAX_PYTHON): + expansions = dict() + if host != "ubuntu20": + expansions["skip_ECS_auth_test"] = "true" + if host == "macos": + expansions["skip_EC2_auth_test"] = "true" + expansions["skip_web_identity_auth_test"] = "true" + variant = create_variant( + tasks, + get_display_name("AWS Auth", host, python=python), + host=host, + python=python, + expansions=expansions, + ) + variants.append(variant) + return variants + + ################## # Generate Config ################## From 5f7afeaed6257ae740ce319a644c2aae083a0063 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Tue, 22 Oct 2024 13:07:56 -0400 Subject: [PATCH 1580/2111] PYTHON-4883 Add release date to changelog entries (#1952) --- doc/changelog.rst | 494 +++++++++++++++++++++++----------------------- 1 file changed, 252 insertions(+), 242 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4c1955d19d..29fddb7b5c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes in Version 4.11.0 -------------------------- +Changes in Version 4.11.0 (YYYY/MM/DD) +-------------------------------------- .. warning:: PyMongo 4.11 drops support for Python 3.8: Python 3.9+ or PyPy 3.9+ is now required. .. warning:: PyMongo 4.11 drops support for MongoDB 3.6. PyMongo now supports MongoDB 4.0+. @@ -32,8 +32,8 @@ in this release. .. _PyMongo 4.11 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40784 -Changes in Version 4.10.1 -------------------------- +Changes in Version 4.10.1 (2024/10/01) +-------------------------------------- Version 4.10.1 is a bug fix release. @@ -49,8 +49,8 @@ in this release. .. _PyMongo 4.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40788 -Changes in Version 4.10.0 -------------------------- +Changes in Version 4.10.0 (2024/09/30) +-------------------------------------- - Added provisional **(BETA)** support for a new Binary BSON subtype (9) used for efficient storage and retrieval of vectors: densely packed arrays of numbers, all of the same type. @@ -67,8 +67,8 @@ in this release. .. _PyMongo 4.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40553 -Changes in Version 4.9.2 -------------------------- +Changes in Version 4.9.2 (2024/10/02) +------------------------------------- - Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. - Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. @@ -83,8 +83,8 @@ in this release. .. _PyMongo 4.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40732 -Changes in Version 4.9.1 -------------------------- +Changes in Version 4.9.1 (2024/09/18) +------------------------------------- - Add missing documentation about the fact the async API is in beta state. @@ -97,8 +97,8 @@ in this release. .. _PyMongo 4.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40720 -Changes in Version 4.9.0 -------------------------- +Changes in Version 4.9 (2024/09/18) +----------------------------------- .. warning:: Driver support for MongoDB 3.6 reached end of life in April 2024. PyMongo 4.9 will be the last release to support MongoDB 3.6. @@ -173,8 +173,8 @@ in this release. .. _PyMongo 4.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39940 -Changes in Version 4.8.0 -------------------------- +Changes in Version 4.8.0 (2024/06/26) +------------------------------------- .. warning:: PyMongo 4.8 drops support for Python 3.7 and PyPy 3.8: Python 3.8+ or PyPy 3.9+ is now required. @@ -209,8 +209,8 @@ in this release. .. _PyMongo 4.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37057 -Changes in Version 4.7.3 -------------------------- +Changes in Version 4.7.3 (2024/06/04) +------------------------------------- Version 4.7.3 has further fixes for lazily loading modules. @@ -226,8 +226,8 @@ in this release. .. _PyMongo 4.7.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39865 -Changes in Version 4.7.2 -------------------------- +Changes in Version 4.7.2 (2024/05/07) +------------------------------------- Version 4.7.2 fixes a bug introduced in 4.7.0: @@ -242,8 +242,8 @@ in this release. .. _PyMongo 4.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39710 -Changes in Version 4.7.1 -------------------------- +Changes in Version 4.7.1 (2024/04/30) +------------------------------------- Version 4.7.1 fixes a bug introduced in 4.7.0: @@ -259,8 +259,8 @@ in this release. .. _PyMongo 4.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39680 -Changes in Version 4.7 ------------------------- +Changes in Version 4.7.0 (2024/04/24) +------------------------------------- PyMongo 4.7 brings a number of improvements including: @@ -355,8 +355,8 @@ Unavoidable breaking changes - The "aws" extra now requires minimum version of ``1.1.0`` for ``pymongo_auth_aws``. -Changes in Version 4.6.3 ------------------------- +Changes in Version 4.6.3 (2024/03/27) +------------------------------------- PyMongo 4.6.3 fixes the following bug: @@ -370,8 +370,8 @@ in this release. .. _PyMongo 4.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=38360 -Changes in Version 4.6.2 ------------------------- +Changes in Version 4.6.2 (2024/02/21) +------------------------------------- PyMongo 4.6.2 fixes the following bug: @@ -386,8 +386,8 @@ in this release. .. _PyMongo 4.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37906 -Changes in Version 4.6.1 ------------------------- +Changes in Version 4.6.1 (2023/11/29) +------------------------------------- PyMongo 4.6.1 fixes the following bug: @@ -401,8 +401,8 @@ in this release. .. _PyMongo 4.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37138 -Changes in Version 4.6 ----------------------- +Changes in Version 4.6.0 (2023/11/01) +------------------------------------- PyMongo 4.6 brings a number of improvements including: @@ -441,8 +441,8 @@ in this release. .. _PyMongo 4.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36542 -Changes in Version 4.5 ----------------------- +Changes in Version 4.5.0 (2023/08/22) +------------------------------------- PyMongo 4.5 brings a number of improvements including: @@ -477,8 +477,8 @@ in this release. .. _PYTHON-3824: https://jira.mongodb.org/browse/PYTHON-3824 .. _PYTHON-3846: https://jira.mongodb.org/browse/PYTHON-3846 -Changes in Version 4.4.1 ------------------------- +Changes in Version 4.4.1 (2023/07/13) +------------------------------------- Version 4.4.1 fixes the following bugs: @@ -497,8 +497,8 @@ in this release. .. _PYTHON-3800: https://jira.mongodb.org/browse/PYTHON-3800 .. _PyMongo 4.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36329 -Changes in Version 4.4 ------------------------ +Changes in Version 4.4.0 (2023/06/21) +------------------------------------- PyMongo 4.4 brings a number of improvements including: @@ -539,8 +539,8 @@ in this release. .. _PYTHON-3717: https://jira.mongodb.org/browse/PYTHON-3717 .. _PYTHON-3718: https://jira.mongodb.org/browse/PYTHON-3718 -Changes in Version 4.3.3 ------------------------- +Changes in Version 4.3.3 (2022/11/17) +------------------------------------- Version 4.3.3 documents support for the following: @@ -567,8 +567,8 @@ in this release. .. _PYTHON-3508: https://jira.mongodb.org/browse/PYTHON-3508 .. _PyMongo 4.3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34709 -Changes in Version 4.3 (4.3.2) ------------------------------- +Changes in Version 4.3.2 (2022/10/18) +------------------------------------- Note: We withheld uploading tags 4.3.0 and 4.3.1 to PyPI due to a version handling error and a necessary documentation update. @@ -624,8 +624,8 @@ in this release. .. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 .. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 -Changes in Version 4.2 ----------------------- +Changes in Version 4.2.0 (2022/07/20) +------------------------------------- .. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. @@ -713,8 +713,8 @@ in this release. .. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 -Changes in Version 4.1.1 -------------------------- +Changes in Version 4.1.1 (2022/04/13) +------------------------------------- Version 4.1.1 fixes a number of bugs: @@ -739,8 +739,8 @@ in this release. .. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 .. _PyMongo 4.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33290 -Changes in Version 4.1 ----------------------- +Changes in Version 4.1 (2021/12/07) +----------------------------------- .. warning:: PyMongo 4.1 drops support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. @@ -794,8 +794,18 @@ in this release. .. _PYTHON-3186: https://jira.mongodb.org/browse/PYTHON-3186 .. _pymongo-stubs: https://github.com/mongodb-labs/pymongo-stubs -Changes in Version 4.0 ----------------------- +Changes in Version 4.0.2 (2022/03/03) +------------------------------------- + +- No changes + +Changes in Version 4.0.1 (2021/12/07) +------------------------------------- + +- No changes + +Changes in Version 4.0 (2021/11/29) +----------------------------------- .. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. @@ -1014,8 +1024,8 @@ in this release. .. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 .. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst -Changes in Version 3.13.0 -------------------------- +Changes in Version 3.13.0 (2022/11/01) +-------------------------------------- Version 3.13 provides an upgrade path to PyMongo 4.x. Most of the API changes from PyMongo 4.0 have been backported in a backward compatible way, allowing @@ -1087,8 +1097,8 @@ in this release. .. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 .. _PyMongo 3.13.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31570 -Changes in Version 3.12.3 -------------------------- +Changes in Version 3.12.3 (2021/12/07) +-------------------------------------- Issues Resolved ............... @@ -1102,8 +1112,8 @@ in this release. .. _PYTHON-3028: https://jira.mongodb.org/browse/PYTHON-3028 .. _PyMongo 3.12.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32505 -Changes in Version 3.12.2 -------------------------- +Changes in Version 3.12.2 (2021/11/29) +-------------------------------------- Issues Resolved ............... @@ -1122,8 +1132,8 @@ in this release. .. _PYTHON-3017: https://jira.mongodb.org/browse/PYTHON-3017 .. _PyMongo 3.12.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32310 -Changes in Version 3.12.1 -------------------------- +Changes in Version 3.12.1 (2021/10/19) +-------------------------------------- Issues Resolved ............... @@ -1143,8 +1153,8 @@ in this release. .. _PYTHON-2866: https://jira.mongodb.org/browse/PYTHON-2866 .. _PyMongo 3.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31527 -Changes in Version 3.12.0 -------------------------- +Changes in Version 3.12.0 (2021/07/13) +-------------------------------------- .. warning:: PyMongo 3.12.0 deprecates support for Python 2.7, 3.4 and 3.5. These Python versions will not be supported by PyMongo 4. @@ -1236,8 +1246,8 @@ in this release. .. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 -Changes in Version 3.11.3 -------------------------- +Changes in Version 3.11.3 (2021/02/02) +-------------------------------------- Issues Resolved ............... @@ -1251,8 +1261,8 @@ in this release. .. _PYTHON-2452: https://jira.mongodb.org/browse/PYTHON-2452 .. _PyMongo 3.11.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30355 -Changes in Version 3.11.2 -------------------------- +Changes in Version 3.11.2 (2020/12/02) +-------------------------------------- Issues Resolved ............... @@ -1279,8 +1289,8 @@ in this release. .. _PYTHON-2443: https://jira.mongodb.org/browse/PYTHON-2443 .. _PyMongo 3.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30315 -Changes in Version 3.11.1 -------------------------- +Changes in Version 3.11.1 (2020/11/17) +-------------------------------------- Version 3.11.1 adds support for Python 3.9 and includes a number of bugfixes. Highlights include: @@ -1313,8 +1323,8 @@ in this release. .. _PyMongo 3.11.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29997 -Changes in Version 3.11.0 -------------------------- +Changes in Version 3.11.0 (2020/07/30) +-------------------------------------- Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. Highlights include: @@ -1408,8 +1418,8 @@ in this release. .. _PyMongo 3.11.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=24799 -Changes in Version 3.10.1 -------------------------- +Changes in Version 3.10.1 (2020/01/07) +-------------------------------------- Version 3.10.1 fixes the following issues discovered since the release of 3.10.0: @@ -1427,8 +1437,8 @@ in this release. .. _PyMongo 3.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=25039 -Changes in Version 3.10.0 -------------------------- +Changes in Version 3.10.0 (2019/12/10) +-------------------------------------- Version 3.10 includes a number of improvements and bug fixes. Highlights include: @@ -1454,8 +1464,8 @@ in this release. .. _PyMongo 3.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=23944 -Changes in Version 3.9.0 ------------------------- +Changes in Version 3.9.0 (2019/08/13) +------------------------------------- Version 3.9 adds support for MongoDB 4.2. Highlights include: @@ -1558,8 +1568,8 @@ in this release. .. _PyMongo 3.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21787 -Changes in Version 3.8.0 ------------------------- +Changes in Version 3.8.0 (2019/04/22) +------------------------------------- .. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install Python 2.7 or newer from `Red Hat Software Collections @@ -1637,8 +1647,8 @@ in this release. .. _PyMongo 3.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19904 -Changes in Version 3.7.2 ------------------------- +Changes in Version 3.7.2 (2018/10/10) +------------------------------------- Version 3.7.2 fixes a few issues discovered since the release of 3.7.1. @@ -1661,8 +1671,8 @@ in this release. .. _PyMongo 3.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21519 -Changes in Version 3.7.1 ------------------------- +Changes in Version 3.7.1 (2018/07/16) +------------------------------------- Version 3.7.1 fixes a few issues discovered since the release of 3.7.0. @@ -1681,8 +1691,8 @@ in this release. .. _PyMongo 3.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21096 -Changes in Version 3.7.0 ------------------------- +Changes in Version 3.7.0 (2018/06/26) +------------------------------------- Version 3.7 adds support for MongoDB 4.0. Highlights include: @@ -1791,8 +1801,8 @@ in this release. .. _PyMongo 3.7 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19287 -Changes in Version 3.6.1 ------------------------- +Changes in Version 3.6.1 (2018/03/01) +------------------------------------- Version 3.6.1 fixes bugs reported since the release of 3.6.0: @@ -1817,8 +1827,8 @@ in this release. .. _PyMongo 3.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19438 -Changes in Version 3.6.0 ------------------------- +Changes in Version 3.6.0 (2017/08/23) +------------------------------------- Version 3.6 adds support for MongoDB 3.6, drops support for CPython 3.3 (PyPy3 is still supported), and drops support for MongoDB versions older than 2.6. If @@ -1889,8 +1899,8 @@ in this release. .. _PyMongo 3.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18043 -Changes in Version 3.5.1 ------------------------- +Changes in Version 3.5.1 (2017/08/23) +------------------------------------- Version 3.5.1 fixes bugs reported since the release of 3.5.0: @@ -1908,8 +1918,8 @@ in this release. .. _PyMongo 3.5.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18721 -Changes in Version 3.5 ----------------------- +Changes in Version 3.5.0 (2017/08/08) +------------------------------------- Version 3.5 implements a number of improvements and bug fixes: @@ -1999,8 +2009,8 @@ in this release. .. _PyMongo 3.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17590 -Changes in Version 3.4 ----------------------- +Changes in Version 3.4.0 (2016/11/29) +------------------------------------- Version 3.4 implements the new server features introduced in MongoDB 3.4 and a whole lot more: @@ -2071,8 +2081,8 @@ in this release. .. _PyMongo 3.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16594 -Changes in Version 3.3.1 ------------------------- +Changes in Version 3.3.1 (2016/10/27) +------------------------------------- Version 3.3.1 fixes a memory leak when decoding elements inside of a :class:`~bson.raw_bson.RawBSONDocument`. @@ -2085,8 +2095,8 @@ in this release. .. _PyMongo 3.3.1 release notes in Jira: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17636 -Changes in Version 3.3 ----------------------- +Changes in Version 3.3.0 (2016/07/12) +------------------------------------- Version 3.3 adds the following major new features: @@ -2112,8 +2122,8 @@ in this release. .. _PyMongo 3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16005 -Changes in Version 3.2.2 ------------------------- +Changes in Version 3.2.2 (2016/03/15) +------------------------------------- Version 3.2.2 fixes a few issues reported since the release of 3.2.1, including a fix for using the ``connect`` option in the MongoDB URI and support for setting @@ -2128,8 +2138,8 @@ in this release. .. _PyMongo 3.2.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16538 -Changes in Version 3.2.1 ------------------------- +Changes in Version 3.2.1 (2016/02/02) +------------------------------------- Version 3.2.1 fixes a few issues reported since the release of 3.2, including running the mapreduce command twice when calling the @@ -2146,8 +2156,8 @@ in this release. .. _PyMongo 3.2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16312 -Changes in Version 3.2 ----------------------- +Changes in Version 3.2 (2015/12/07) +----------------------------------- Version 3.2 implements the new server features introduced in MongoDB 3.2. @@ -2179,8 +2189,8 @@ in this release. .. _PyMongo 3.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15612 -Changes in Version 3.1.1 ------------------------- +Changes in Version 3.1.1 (2015/11/17) +------------------------------------- Version 3.1.1 fixes a few issues reported since the release of 3.1, including a regression in error handling for oversize command documents and interrupt @@ -2194,8 +2204,8 @@ in this release. .. _PyMongo 3.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16211 -Changes in Version 3.1 ----------------------- +Changes in Version 3.1 (2015/11/02) +----------------------------------- Version 3.1 implements a few new features and fixes bugs reported since the release of 3.0.3. @@ -2226,8 +2236,8 @@ in this release. .. _PyMongo 3.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14796 -Changes in Version 3.0.3 ------------------------- +Changes in Version 3.0.3 (2015/06/30) +------------------------------------- Version 3.0.3 fixes issues reported since the release of 3.0.2, including a feature breaking bug in the GSSAPI implementation. @@ -2240,8 +2250,8 @@ in this release. .. _PyMongo 3.0.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15528 -Changes in Version 3.0.2 ------------------------- +Changes in Version 3.0.2 (2015/05/12) +------------------------------------- Version 3.0.2 fixes issues reported since the release of 3.0.1, most importantly a bug that could route operations to replica set members @@ -2258,8 +2268,8 @@ in this release. .. _PyMongo 3.0.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15430 -Changes in Version 3.0.1 ------------------------- +Changes in Version 3.0.1 (2015/04/21) +------------------------------------- Version 3.0.1 fixes issues reported since the release of 3.0, most importantly a bug in GridFS.delete that could prevent file chunks from @@ -2273,8 +2283,8 @@ in this release. .. _PyMongo 3.0.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15322 -Changes in Version 3.0 ----------------------- +Changes in Version 3.0 (2015/04/07) +----------------------------------- PyMongo 3.0 is a partial rewrite of PyMongo bringing a large number of improvements: @@ -2719,8 +2729,8 @@ in this release. .. _PyMongo 3.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12501 -Changes in Version 2.9.5 ------------------------- +Changes in Version 2.9.5 (2017/06/30) +------------------------------------- Version 2.9.5 works around ssl module deprecations in Python 3.6, and expected future ssl module deprecations. It also fixes bugs found since the release of @@ -2742,8 +2752,8 @@ in this release. .. _PyMongo 2.9.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17605 -Changes in Version 2.9.4 ------------------------- +Changes in Version 2.9.4 (2016/09/30) +------------------------------------- Version 2.9.4 fixes issues reported since the release of 2.9.3. @@ -2761,8 +2771,8 @@ in this release. .. _PyMongo 2.9.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16885 -Changes in Version 2.9.3 ------------------------- +Changes in Version 2.9.3 (2016/03/15) +------------------------------------- Version 2.9.3 fixes a few issues reported since the release of 2.9.2 including thread safety issues in :meth:`~pymongo.collection.Collection.ensure_index`, @@ -2777,8 +2787,8 @@ in this release. .. _PyMongo 2.9.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16539 -Changes in Version 2.9.2 ------------------------- +Changes in Version 2.9.2 (2016/02/16) +------------------------------------- Version 2.9.2 restores Python 3.1 support, which was broken in PyMongo 2.8. It improves an error message when decoding BSON as well as fixes a couple other @@ -2795,8 +2805,8 @@ in this release. .. _PyMongo 2.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16303 -Changes in Version 2.9.1 ------------------------- +Changes in Version 2.9.1 (2015/11/17) +------------------------------------- Version 2.9.1 fixes two interrupt handling issues in the C extensions and adapts a test case for a behavior change in MongoDB 3.2. @@ -2809,8 +2819,8 @@ in this release. .. _PyMongo 2.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16208 -Changes in Version 2.9 ----------------------- +Changes in Version 2.9 (2015/09/30) +----------------------------------- Version 2.9 provides an upgrade path to PyMongo 3.x. Most of the API changes from PyMongo 3.0 have been backported in a backward compatible way, allowing @@ -2887,8 +2897,8 @@ in this release. .. _PyMongo 2.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14795 -Changes in Version 2.8.1 ------------------------- +Changes in Version 2.8.1 (2015/05/11) +------------------------------------- Version 2.8.1 fixes a number of issues reported since the release of PyMongo 2.8. It is a recommended upgrade for all users of PyMongo 2.x. @@ -2901,8 +2911,8 @@ in this release. .. _PyMongo 2.8.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15324 -Changes in Version 2.8 ----------------------- +Changes in Version 2.8 (2015/01/28) +----------------------------------- Version 2.8 is a major release that provides full support for MongoDB 3.0 and fixes a number of bugs. @@ -2951,8 +2961,8 @@ in this release. .. _PyMongo 2.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14223 -Changes in Version 2.7.2 ------------------------- +Changes in Version 2.7.2 (2014/07/29) +------------------------------------- Version 2.7.2 includes fixes for upsert reporting in the bulk API for MongoDB versions previous to 2.6, a regression in how son manipulators are applied in @@ -2968,8 +2978,8 @@ in this release. .. _PyMongo 2.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14005 -Changes in Version 2.7.1 ------------------------- +Changes in Version 2.7.1 (2014/05/23) +------------------------------------- Version 2.7.1 fixes a number of issues reported since the release of 2.7, most importantly a fix for creating indexes and manipulating users through @@ -2983,8 +2993,8 @@ in this release. .. _PyMongo 2.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=13823 -Changes in Version 2.7 ----------------------- +Changes in Version 2.7 (2014/04/03) +----------------------------------- PyMongo 2.7 is a major release with a large number of new features and bug fixes. Highlights include: @@ -3020,8 +3030,8 @@ in this release. .. _PyMongo 2.7 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12892 -Changes in Version 2.6.3 ------------------------- +Changes in Version 2.6.3 (2013/10/11) +------------------------------------- Version 2.6.3 fixes issues reported since the release of 2.6.2, most importantly a semaphore leak when a connection to the server fails. @@ -3034,8 +3044,8 @@ in this release. .. _PyMongo 2.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=13098 -Changes in Version 2.6.2 ------------------------- +Changes in Version 2.6.2 (2013/09/06) +------------------------------------- Version 2.6.2 fixes a :exc:`TypeError` problem when max_pool_size=None is used in Python 3. @@ -3048,8 +3058,8 @@ in this release. .. _PyMongo 2.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12910 -Changes in Version 2.6.1 ------------------------- +Changes in Version 2.6.1 (2013/09/03) +------------------------------------- Version 2.6.1 fixes a reference leak in the :meth:`~pymongo.collection.Collection.insert` method. @@ -3062,8 +3072,8 @@ in this release. .. _PyMongo 2.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12905 -Changes in Version 2.6 ----------------------- +Changes in Version 2.6 (2013/08/19) +----------------------------------- Version 2.6 includes some frequently requested improvements and adds support for some early MongoDB 2.6 features. @@ -3111,8 +3121,8 @@ in this release. .. _PyMongo 2.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12380 -Changes in Version 2.5.2 ------------------------- +Changes in Version 2.5.2 (2013/06/01) +------------------------------------- Version 2.5.2 fixes a NULL pointer dereference issue when decoding an invalid :class:`~bson.dbref.DBRef`. @@ -3125,8 +3135,8 @@ in this release. .. _PyMongo 2.5.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12581 -Changes in Version 2.5.1 ------------------------- +Changes in Version 2.5.1 (2013/05/13) +------------------------------------- Version 2.5.1 is a minor release that fixes issues discovered after the release of 2.5. Most importantly, this release addresses some race @@ -3140,8 +3150,8 @@ in this release. .. _PyMongo 2.5.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12484 -Changes in Version 2.5 ----------------------- +Changes in Version 2.5 (2013/03/22) +----------------------------------- Version 2.5 includes changes to support new features in MongoDB 2.4. @@ -3164,8 +3174,8 @@ in this release. .. _PyMongo 2.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11981 -Changes in Version 2.4.2 ------------------------- +Changes in Version 2.4.2 (2013/01/23) +------------------------------------- Version 2.4.2 is a minor release that fixes issues discovered after the release of 2.4.1. Most importantly, PyMongo will no longer select a replica @@ -3179,8 +3189,8 @@ in this release. .. _PyMongo 2.4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12299 -Changes in Version 2.4.1 ------------------------- +Changes in Version 2.4.1 (2012/12/06) +------------------------------------- Version 2.4.1 is a minor release that fixes issues discovered after the release of 2.4. Most importantly, this release fixes a regression using @@ -3195,8 +3205,8 @@ in this release. .. _PyMongo 2.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12286 -Changes in Version 2.4 ----------------------- +Changes in Version 2.4 (2012/11/27) +----------------------------------- Version 2.4 includes a few important new features and a large number of bug fixes. @@ -3245,8 +3255,8 @@ in this release. .. _PyMongo 2.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11485 -Changes in Version 2.3 ----------------------- +Changes in Version 2.3 (2012/08/29) +----------------------------------- Version 2.3 adds support for new features and behavior changes in MongoDB 2.2. @@ -3279,8 +3289,8 @@ in this release. .. _PyMongo 2.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11146 -Changes in Version 2.2.1 ------------------------- +Changes in Version 2.2.1 (2012/07/06) +------------------------------------- Version 2.2.1 is a minor release that fixes issues discovered after the release of 2.2. Most importantly, this release fixes an incompatibility @@ -3295,8 +3305,8 @@ in this release. .. _PyMongo 2.2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11185 -Changes in Version 2.2 ----------------------- +Changes in Version 2.2 (2012/04/30) +----------------------------------- Version 2.2 adds a few more frequently requested features and fixes a number of bugs. @@ -3340,8 +3350,8 @@ in this release. .. _PyMongo 2.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10584 -Changes in Version 2.1.1 ------------------------- +Changes in Version 2.1.1 (2012/01/04) +------------------------------------- Version 2.1.1 is a minor release that fixes a few issues discovered after the release of 2.1. You can now use @@ -3360,8 +3370,8 @@ in this release. .. _PyMongo 2.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?version=11081&styleName=Html&projectId=10004 -Changes in Version 2.1 ----------------------- +Changes in Version 2.1 (2011/12/07) +----------------------------------- Version 2.1 adds a few frequently requested features and includes the usual round of bug fixes and improvements. @@ -3403,8 +3413,8 @@ See the `PyMongo 2.1 release notes in JIRA`_ for the list of resolved issues in .. _PyMongo 2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10583 -Changes in Version 2.0.1 ------------------------- +Changes in Version 2.0.1 (2011/08/15) +------------------------------------- Version 2.0.1 fixes a regression in :class:`~gridfs.grid_file.GridIn` when writing pre-chunked strings. Thanks go to Alexey Borzenkov for reporting the @@ -3416,8 +3426,8 @@ Issues Resolved - `PYTHON-271 `_: Regression in GridFS leads to serious loss of data. -Changes in Version 2.0 ----------------------- +Changes in Version 2.0 (2011/08/05) +----------------------------------- Version 2.0 adds a large number of features and fixes a number of issues. @@ -3474,8 +3484,8 @@ See the `PyMongo 2.0 release notes in JIRA`_ for the list of resolved issues in .. _PyMongo 2.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10274 -Changes in Version 1.11 ------------------------ +Changes in Version 1.11 (2011/05/05) +------------------------------------ Version 1.11 adds a few new features and fixes a few more bugs. @@ -3528,8 +3538,8 @@ Issues resolved - `PYTHON-138 `_: Find method for GridFS -Changes in Version 1.10.1 -------------------------- +Changes in Version 1.10.1 (2011/04/07) +-------------------------------------- Version 1.10.1 is primarily a bugfix release. It fixes a regression in version 1.10 that broke pickling of ObjectIds. A number of other bugs @@ -3576,8 +3586,8 @@ Issues resolved - `PYTHON-113 `_: Redunducy in MasterSlaveConnection -Changes in Version 1.10 ------------------------ +Changes in Version 1.10 (2011/03/30) +------------------------------------ Version 1.10 includes changes to support new features in MongoDB 1.8.x. Highlights include a modified map/reduce API including an inline map/reduce @@ -3617,8 +3627,8 @@ Issues resolved - PYTHON-166: Fixes a concurrency issue. - PYTHON-158: Add code and err string to ``db assertion`` messages. -Changes in Version 1.9 ----------------------- +Changes in Version 1.9 (2010/09/28) +----------------------------------- Version 1.9 adds a new package to the PyMongo distribution, :mod:`bson`. :mod:`bson` contains all of the `BSON @@ -3699,8 +3709,8 @@ rather than :class:`pymongo.errors.PyMongoError`. mode. - added :class:`uuid` support to :mod:`~bson.json_util`. -Changes in Version 1.8.1 ------------------------- +Changes in Version 1.8.1 (2010/08/13) +------------------------------------- - fixed a typo in the C extension that could cause safe-mode operations to report a failure (:class:`SystemError`) even when none @@ -3708,8 +3718,8 @@ Changes in Version 1.8.1 - added a :meth:`__ne__` implementation to any class where we define :meth:`__eq__`. -Changes in Version 1.8 ----------------------- +Changes in Version 1.8 (2010/08/05) +----------------------------------- Version 1.8 adds support for connecting to replica sets, specifying per-operation values for ``w`` and ``wtimeout``, and decoding to @@ -3749,8 +3759,8 @@ timezone-aware datetimes. :class:`~pymongo.errors.OperationFailure` exceptions. - fixed serialization of int and float subclasses in the C extension. -Changes in Version 1.7 ----------------------- +Changes in Version 1.7 (2010/06/17) +----------------------------------- Version 1.7 is a recommended upgrade for all PyMongo users. The full release notes are below, and some more in depth discussion of the @@ -3799,8 +3809,8 @@ highlights is `here - don't transparently map ``"filename"`` key to :attr:`name` attribute for GridFS. -Changes in Version 1.6 ----------------------- +Changes in Version 1.6 (2010/04/14) +----------------------------------- The biggest change in version 1.6 is a complete re-implementation of :mod:`gridfs` with a lot of improvements over the old @@ -3821,13 +3831,13 @@ to be modified before upgrading to 1.6. on non-existent collections. - disallow empty bulk inserts. -Changes in Version 1.5.2 ------------------------- +Changes in Version 1.5.2 (2010/03/31) +------------------------------------- - fixed response handling to ignore unknown response flags in queries. - handle server versions containing '-pre-'. -Changes in Version 1.5.1 ------------------------- +Changes in Version 1.5.1 (2010/03/17) +------------------------------------- - added :data:`~gridfs.grid_file.GridFile._id` property for :class:`~gridfs.grid_file.GridFile` instances. - fix for making a :class:`~pymongo.connection.Connection` (with @@ -3840,8 +3850,8 @@ Changes in Version 1.5.1 - improvements to Python code caching in C extension - should improve behavior on mod_wsgi. -Changes in Version 1.5 ----------------------- +Changes in Version 1.5 (2010/03/10) +----------------------------------- - added subtype constants to :mod:`~bson.binary` module. - DEPRECATED ``options`` argument to :meth:`~pymongo.collection.Collection` and @@ -3875,8 +3885,8 @@ Changes in Version 1.5 - added :class:`~gridfs.errors.GridFSError` as base class for :mod:`gridfs` exceptions. -Changes in Version 1.4 ----------------------- +Changes in Version 1.4 (2010/01/17) +----------------------------------- Perhaps the most important change in version 1.4 is that we have decided to **no longer support Python 2.3**. The most immediate reason @@ -3937,8 +3947,8 @@ Other changes: - allow the NULL byte in strings and disallow it in key names or regex patterns -Changes in Version 1.3 ----------------------- +Changes in Version 1.3 (2009/12/16) +----------------------------------- - DEPRECATED running :meth:`~pymongo.collection.Collection.group` as :meth:`~pymongo.database.Database.eval`, also changed default for :meth:`~pymongo.collection.Collection.group` to running as a command @@ -3963,8 +3973,8 @@ Changes in Version 1.3 usual, as it carries some performance implications. - added :meth:`~pymongo.connection.Connection.disconnect` -Changes in Version 1.2.1 ------------------------- +Changes in Version 1.2.1 (2009/12/10) +------------------------------------- - added :doc:`changelog` to docs - added ``setup.py doc --test`` to run doctests for tutorial, examples - moved most examples to Sphinx docs (and remove from *examples/* @@ -3975,8 +3985,8 @@ Changes in Version 1.2.1 characters - allow :class:`unicode` instances for :class:`~bson.objectid.ObjectId` init -Changes in Version 1.2 ----------------------- +Changes in Version 1.2 (2009/12/09) +----------------------------------- - ``spec`` parameter for :meth:`~pymongo.collection.Collection.remove` is now optional to allow for deleting all documents in a :class:`~pymongo.collection.Collection` @@ -4002,15 +4012,15 @@ Changes in Version 1.2 - some minor fixes for installation process - added support for datetime and regex in :mod:`~bson.json_util` -Changes in Version 1.1.2 ------------------------- +Changes in Version 1.1.2 (2009/11/23) +------------------------------------- - improvements to :meth:`~pymongo.collection.Collection.insert` speed (using C for insert message creation) - use random number for request_id - fix some race conditions with :class:`~pymongo.errors.AutoReconnect` -Changes in Version 1.1.1 ------------------------- +Changes in Version 1.1.1 (2009/11/14) +------------------------------------- - added ``multi`` parameter for :meth:`~pymongo.collection.Collection.update` - fix unicode regex patterns with C extension @@ -4023,8 +4033,8 @@ Changes in Version 1.1.1 to ``True`` due to performance regression - switch documentation to Sphinx -Changes in Version 1.1 ----------------------- +Changes in Version 1.1 (2009/10/21) +----------------------------------- - added :meth:`__hash__` for :class:`~bson.dbref.DBRef` and :class:`~bson.objectid.ObjectId` - bulk :meth:`~pymongo.collection.Collection.insert` works with any @@ -4039,8 +4049,8 @@ Changes in Version 1.1 - added ``safe`` parameter for :meth:`~pymongo.collection.Collection.remove` - added ``tailable`` parameter for :meth:`~pymongo.collection.Collection.find` -Changes in Version 1.0 ----------------------- +Changes in Version 1.0 (2009/09/30) +----------------------------------- - fixes for :class:`~pymongo.master_slave_connection.MasterSlaveConnection` - added ``finalize`` parameter for :meth:`~pymongo.collection.Collection.group` @@ -4050,17 +4060,17 @@ Changes in Version 1.0 :meth:`~pymongo.cursor.Cursor.__len__` for :class:`~pymongo.cursor.Cursor` instances -Changes in Version 0.16 ------------------------ +Changes in Version 0.16 (2009/09/16) +------------------------------------ - support for encoding/decoding :class:`uuid.UUID` instances - fix for :meth:`~pymongo.cursor.Cursor.explain` with limits -Changes in Version 0.15.2 -------------------------- +Changes in Version 0.15.2 (2009/09/09) +-------------------------------------- - documentation changes only -Changes in Version 0.15.1 -------------------------- +Changes in Version 0.15.1 (2009/09/02) +-------------------------------------- - various performance improvements - API CHANGE no longer need to specify direction for :meth:`~pymongo.collection.Collection.create_index` and @@ -4069,8 +4079,8 @@ Changes in Version 0.15.1 - support for encoding :class:`tuple` instances as :class:`list` instances -Changes in Version 0.15 ------------------------ +Changes in Version 0.15 (2009/08/26) +------------------------------------ - fix string representation of :class:`~bson.objectid.ObjectId` instances - added ``timeout`` parameter for @@ -4078,25 +4088,25 @@ Changes in Version 0.15 - allow scope for ``reduce`` function in :meth:`~pymongo.collection.Collection.group` -Changes in Version 0.14.2 -------------------------- +Changes in Version 0.14.2 (2009/08/24) +-------------------------------------- - minor bugfixes -Changes in Version 0.14.1 -------------------------- +Changes in Version 0.14.1 (2009/08/21) +-------------------------------------- - :meth:`~gridfs.grid_file.GridFile.seek` and :meth:`~gridfs.grid_file.GridFile.tell` for (read mode) :class:`~gridfs.grid_file.GridFile` instances -Changes in Version 0.14 ------------------------ +Changes in Version 0.14 (2009/08/19) +------------------------------------ - support for long in :class:`~bson.BSON` - added :meth:`~pymongo.collection.Collection.rename` - added ``snapshot`` parameter for :meth:`~pymongo.collection.Collection.find` -Changes in Version 0.13 ------------------------ +Changes in Version 0.13 (2009/07/29) +------------------------------------ - better :class:`~pymongo.master_slave_connection.MasterSlaveConnection` support @@ -4106,38 +4116,38 @@ Changes in Version 0.13 - DEPRECATED passing an index name to :meth:`~pymongo.cursor.Cursor.hint` -Changes in Version 0.12 ------------------------ +Changes in Version 0.12 (2009/07/08) +------------------------------------ - improved :class:`~bson.objectid.ObjectId` generation - added :class:`~pymongo.errors.AutoReconnect` exception for when reconnection is possible - make :mod:`gridfs` thread-safe - fix for :mod:`gridfs` with non :class:`~bson.objectid.ObjectId` ``_id`` -Changes in Version 0.11.3 -------------------------- +Changes in Version 0.11.3 (2009/06/18) +-------------------------------------- - don't allow NULL bytes in string encoder - fixes for Python 2.3 -Changes in Version 0.11.2 -------------------------- +Changes in Version 0.11.2 (2009/06/08) +-------------------------------------- - PEP 8 - updates for :meth:`~pymongo.collection.Collection.group` - VS build -Changes in Version 0.11.1 -------------------------- +Changes in Version 0.11.1 (2009/06/04) +-------------------------------------- - fix for connection pooling under Python 2.5 -Changes in Version 0.11 ------------------------ +Changes in Version 0.11 (2009/06/03) +------------------------------------ - better build failure detection - driver support for selecting fields in sub-documents - disallow insertion of invalid key names - added ``timeout`` parameter for :meth:`~pymongo.connection.Connection` -Changes in Version 0.10.3 -------------------------- +Changes in Version 0.10.3 (2009/05/27) +-------------------------------------- - fix bug with large :meth:`~pymongo.cursor.Cursor.limit` - better exception when modules get reloaded out from underneath the C extension @@ -4145,22 +4155,22 @@ Changes in Version 0.10.3 :class:`~pymongo.collection.Collection` or :class:`~pymongo.database.Database` instance -Changes in Version 0.10.2 -------------------------- +Changes in Version 0.10.2 (2009/05/22) +-------------------------------------- - support subclasses of :class:`dict` in C encoder -Changes in Version 0.10.1 -------------------------- +Changes in Version 0.10.1 (2009/05/18) +-------------------------------------- - alias :class:`~pymongo.connection.Connection` as :attr:`pymongo.Connection` - raise an exception rather than silently overflowing in encoder -Changes in Version 0.10 ------------------------ +Changes in Version 0.10 (2009/05/14) +------------------------------------ - added :meth:`~pymongo.collection.Collection.ensure_index` -Changes in Version 0.9.7 ------------------------- +Changes in Version 0.9.7 (2009/05/13) +------------------------------------- - allow sub-collections of *$cmd* as valid :class:`~pymongo.collection.Collection` names - add version as :attr:`pymongo.version` From 26a61c8c480cdaf4a0ff10c74d578c4b485081d0 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Wed, 23 Oct 2024 08:24:59 -0400 Subject: [PATCH 1581/2111] PYTHON-2926 Updated signature of Binary.from_vector to take a BinaryVector (#1963) --- bson/binary.py | 25 ++++++++++++++++++------- test/test_bson.py | 20 +++++++++++++++++++- 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index 96b61b6dab..f03173a8ef 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -16,7 +16,7 @@ import struct from dataclasses import dataclass from enum import Enum -from typing import TYPE_CHECKING, Any, Sequence, Tuple, Type, Union +from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union from uuid import UUID """Tools for representing BSON binary data. @@ -400,24 +400,35 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI @classmethod def from_vector( cls: Type[Binary], - vector: list[int, float], - dtype: BinaryVectorDtype, - padding: int = 0, + vector: Union[BinaryVector, list[int, float]], + dtype: Optional[BinaryVectorDtype] = None, + padding: Optional[int] = None, ) -> Binary: - """**(BETA)** Create a BSON :class:`~bson.binary.Binary` of Vector subtype from a list of Numbers. + """**(BETA)** Create a BSON :class:`~bson.binary.Binary` of Vector subtype. To interpret the representation of the numbers, a data type must be included. See :class:`~bson.binary.BinaryVectorDtype` for available types and descriptions. The dtype and padding are prepended to the binary data's value. - :param vector: List of values + :param vector: Either a List of values, or a :class:`~bson.binary.BinaryVector` dataclass. :param dtype: Data type of the values :param padding: For fractional bytes, number of bits to ignore at end of vector. :return: Binary packed data identified by dtype and padding. .. versionadded:: 4.10 """ + if isinstance(vector, BinaryVector): + if dtype or padding: + raise ValueError( + "The first argument, vector, has type BinaryVector. " + "dtype or padding cannot be separately defined, but were." + ) + dtype = vector.dtype + padding = vector.padding + vector = vector.data # type: ignore + + padding = 0 if padding is None else padding if dtype == BinaryVectorDtype.INT8: # pack ints in [-128, 127] as signed int8 format_str = "b" if padding: @@ -432,7 +443,7 @@ def from_vector( raise NotImplementedError("%s not yet supported" % dtype) metadata = struct.pack(" BinaryVector: diff --git a/test/test_bson.py b/test/test_bson.py index 96aa897d19..5dc1377bcd 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -51,7 +51,13 @@ is_valid, json_util, ) -from bson.binary import USER_DEFINED_SUBTYPE, Binary, BinaryVectorDtype, UuidRepresentation +from bson.binary import ( + USER_DEFINED_SUBTYPE, + Binary, + BinaryVector, + BinaryVectorDtype, + UuidRepresentation, +) from bson.code import Code from bson.codec_options import CodecOptions, DatetimeConversion from bson.datetime_ms import _DATETIME_ERROR_SUGGESTION @@ -785,6 +791,18 @@ def test_vector(self): else: self.fail("Failed to raise an exception.") + # Test form of Binary.from_vector(BinaryVector) + + assert padded_vec == Binary.from_vector( + BinaryVector(list_vector, BinaryVectorDtype.PACKED_BIT, padding) + ) + assert binary_vector == Binary.from_vector( + BinaryVector(list_vector, BinaryVectorDtype.INT8) + ) + assert float_binary == Binary.from_vector( + BinaryVector(list_vector, BinaryVectorDtype.FLOAT32) + ) + def test_unicode_regex(self): """Tests we do not get a segfault for C extension on unicode RegExs. This had been happening. From 5141a7c5c0e038dae59fe1bebe0cb8c049f32abd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 23 Oct 2024 09:32:35 -0500 Subject: [PATCH 1582/2111] PYTHON-4896 Use shrub.py for other hosts tests (#1962) --- .evergreen/config.yml | 288 +++++++++++++++----------- .evergreen/scripts/generate_config.py | 60 +++++- 2 files changed, 221 insertions(+), 127 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index e357f02f2b..4868096e83 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1830,15 +1830,6 @@ tasks: # and then run our test suite on the vm. export GCPOIDC_TEST_CMD="OIDC_ENV=gcp ./.evergreen/run-mongodb-oidc-test.sh" bash $DRIVERS_TOOLS/.evergreen/auth_oidc/gcp/run-driver-test.sh - - - name: "test-fips-standalone" - tags: ["fips"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run tests" # }}} - name: "coverage-report" tags: ["coverage"] @@ -2004,72 +1995,17 @@ axes: - id: platform display_name: OS values: - - id: macos - display_name: "macOS" - run_on: macos-14 - variables: - skip_EC2_auth_test: true - skip_ECS_auth_test: true - skip_web_identity_auth_test: true - # CSOT tests are unreliable on our slow macOS hosts. - SKIP_CSOT_TESTS: true - - id: macos-arm64 - display_name: "macOS Arm64" - run_on: macos-14-arm64 - variables: - skip_EC2_auth_test: true - skip_ECS_auth_test: true - skip_web_identity_auth_test: true - # CSOT tests are unreliable on our slow macOS hosts. - SKIP_CSOT_TESTS: true - - id: rhel7 - display_name: "RHEL 7.x" - run_on: rhel79-small - batchtime: 10080 # 7 days - id: rhel8 display_name: "RHEL 8.x" run_on: rhel8.8-small batchtime: 10080 # 7 days - - id: rhel9-fips - display_name: "RHEL 9 FIPS" - run_on: rhel92-fips - batchtime: 10080 # 7 days - - id: ubuntu-22.04 - display_name: "Ubuntu 22.04" - run_on: ubuntu2204-small - batchtime: 10080 # 7 days - - id: ubuntu-20.04 - display_name: "Ubuntu 20.04" - run_on: ubuntu2004-small - batchtime: 10080 # 7 days - - id: rhel8-zseries - display_name: "RHEL 8 (zSeries)" - run_on: rhel8-zseries-small - batchtime: 10080 # 7 days - variables: - SKIP_HATCH: true - - id: rhel8-power8 - display_name: "RHEL 8 (POWER8)" - run_on: rhel8-power-small - batchtime: 10080 # 7 days - variables: - SKIP_HATCH: true - - id: rhel8-arm64 - display_name: "RHEL 8 (ARM64)" - run_on: rhel82-arm64-small - batchtime: 10080 # 7 days - variables: - id: windows display_name: "Windows 64" run_on: windows-64-vsMulti-small batchtime: 10080 # 7 days - variables: - skip_ECS_auth_test: true - skip_EC2_auth_test: true - skip_web_identity_auth_test: true - venv_bin_dir: "Scripts" - # CSOT tests are unreliable on our slow Windows hosts. - SKIP_CSOT_TESTS: true + - id: macos + display_name: "macOS" + run_on: macos-14 # Test with authentication? - id: auth @@ -2147,30 +2083,6 @@ axes: variables: PYTHON_BINARY: "/opt/python/pypy3.10/bin/pypy3" - - id: python-version-windows - display_name: "Python" - values: - - id: "3.9" - display_name: "Python 3.9" - variables: - PYTHON_BINARY: "C:/python/Python39/python.exe" - - id: "3.10" - display_name: "Python 3.10" - variables: - PYTHON_BINARY: "C:/python/Python310/python.exe" - - id: "3.11" - display_name: "Python 3.11" - variables: - PYTHON_BINARY: "C:/python/Python311/python.exe" - - id: "3.12" - display_name: "Python 3.12" - variables: - PYTHON_BINARY: "C:/python/Python312/python.exe" - - id: "3.13" - display_name: "Python 3.13" - variables: - PYTHON_BINARY: "C:/python/Python313/python.exe" - buildvariants: # Server Tests. - name: test-rhel8-py3.9-auth-ssl-cov @@ -3889,37 +3801,175 @@ buildvariants: skip_web_identity_auth_test: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- matrix_name: "tests-fips" - matrix_spec: - platform: - - rhel9-fips - auth: "auth" - ssl: "ssl" - display_name: "${platform} ${auth} ${ssl}" +# Other hosts tests. +- name: openssl-1.0.2-rhel7-py3.9-auth-ssl tasks: - - "test-fips-standalone" - -# Test one server version with zSeries, POWER8, and ARM. -- matrix_name: "test-different-cpu-architectures" - matrix_spec: - platform: - - rhel8-zseries # Added in 5.0.8 (SERVER-44074) - - rhel8-power8 # Added in 4.2.7 (SERVER-44072) - - rhel8-arm64 # Added in 4.4.2 (SERVER-48282) - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" + - name: .5.0 .standalone + display_name: OpenSSL 1.0.2 RHEL7 py3.9 Auth SSL + run_on: + - rhel79-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: openssl-1.0.2-rhel7-py3.9-noauth-ssl tasks: - - ".6.0" - -- matrix_name: "tests-python-version-supports-openssl-102-test-ssl" - matrix_spec: - platform: rhel7 - # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.9"] - auth-ssl: "*" - display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" + - name: .5.0 .standalone + display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth SSL + run_on: + - rhel79-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: openssl-1.0.2-rhel7-py3.9-noauth-nossl + tasks: + - name: .5.0 .standalone + display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth NoSSL + run_on: + - rhel79-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: other-hosts-rhel9-fips-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL9-FIPS Auth SSL + run_on: + - rhel92-fips + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl +- name: other-hosts-rhel9-fips-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL9-FIPS NoAuth SSL + run_on: + - rhel92-fips + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl +- name: other-hosts-rhel9-fips-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL9-FIPS NoAuth NoSSL + run_on: + - rhel92-fips + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl +- name: other-hosts-rhel8-zseries-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-zseries Auth SSL + run_on: + - rhel8-zseries-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl +- name: other-hosts-rhel8-zseries-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-zseries NoAuth SSL + run_on: + - rhel8-zseries-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl +- name: other-hosts-rhel8-zseries-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-zseries NoAuth NoSSL + run_on: + - rhel8-zseries-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl +- name: other-hosts-rhel8-power8-auth-ssl tasks: - - ".5.0" + - name: .6.0 .standalone + display_name: Other hosts RHEL8-POWER8 Auth SSL + run_on: + - rhel8-power-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl +- name: other-hosts-rhel8-power8-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-POWER8 NoAuth SSL + run_on: + - rhel8-power-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl +- name: other-hosts-rhel8-power8-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-POWER8 NoAuth NoSSL + run_on: + - rhel8-power-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl +- name: other-hosts-rhel8-arm64-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-arm64 Auth SSL + run_on: + - rhel82-arm64-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl +- name: other-hosts-rhel8-arm64-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-arm64 NoAuth SSL + run_on: + - rhel82-arm64-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl +- name: other-hosts-rhel8-arm64-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-arm64 NoAuth NoSSL + run_on: + - rhel82-arm64-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl - matrix_name: "test-search-index-helpers" matrix_spec: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 3f1ea724ed..1217c26885 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -49,6 +49,7 @@ class Host: display_name: str +# Hosts with toolchains. HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8") HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64") HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32") @@ -56,7 +57,7 @@ class Host: HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64") HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20") HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22") - +HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7") ############## # Helpers @@ -76,8 +77,11 @@ def create_variant( task_refs = [EvgTaskRef(name=n) for n in task_names] kwargs.setdefault("expansions", dict()) expansions = kwargs.pop("expansions", dict()).copy() - host = host or "rhel8" - run_on = [HOSTS[host].run_on] + if "run_on" in kwargs: + run_on = kwargs.pop("run_on") + else: + host = host or "rhel8" + run_on = [HOSTS[host].run_on] name = display_name.replace(" ", "-").lower() if python: expansions["PYTHON_BINARY"] = get_python_binary(python, host) @@ -104,7 +108,7 @@ def get_python_binary(python: str, host: str) -> str: python = python.replace(".", "") return f"{base}/Python{python}/python.exe" - if host in ["rhel8", "ubuntu22", "ubuntu20"]: + if host in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: return f"/opt/python/{python}/bin/python3" if host in ["macos", "macos-arm64"]: @@ -131,9 +135,11 @@ def get_versions_until(max_version: str) -> list[str]: return versions -def get_display_name(base: str, host: str, **kwargs) -> str: +def get_display_name(base: str, host: str | None = None, **kwargs) -> str: """Get the display name of a variant.""" - display_name = f"{base} {HOSTS[host].display_name}" + display_name = base + if host is not None: + display_name += f" {HOSTS[host].display_name}" version = kwargs.pop("VERSION", None) if version: if version not in ["rapid", "latest"]: @@ -640,10 +646,48 @@ def generate_aws_auth_variants(): return variants +def generate_alternative_hosts_variants(): + base_expansions = dict(SKIP_HATCH="true") + batchtime = BATCHTIME_WEEK + variants = [] + + host = "rhel7" + for auth, ssl in AUTH_SSLS: + expansions = base_expansions.copy() + expansions["AUTH"] = auth + expansions["SSL"] = ssl + variants.append( + create_variant( + [".5.0 .standalone"], + get_display_name("OpenSSL 1.0.2", "rhel7", python=CPYTHONS[0], **expansions), + host=host, + python=CPYTHONS[0], + batchtime=batchtime, + expansions=expansions, + ) + ) + + hosts = ["rhel92-fips", "rhel8-zseries-small", "rhel8-power-small", "rhel82-arm64-small"] + host_names = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64"] + for (host, host_name), (auth, ssl) in product(zip(hosts, host_names), AUTH_SSLS): + expansions = base_expansions.copy() + expansions["AUTH"] = auth + expansions["SSL"] = ssl + variants.append( + create_variant( + [".6.0 .standalone"], + display_name=get_display_name(f"Other hosts {host_name}", **expansions), + expansions=expansions, + batchtime=batchtime, + run_on=[host], + ) + ) + return variants + + ################## # Generate Config ################## -variants = create_server_variants() -# print(len(variants)) +variants = generate_alternative_hosts_variants() generate_yaml(variants=variants) From 79ad2a14811ecb8d22d47ad5c2b7e4eb6e8de943 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 23 Oct 2024 10:10:32 -0500 Subject: [PATCH 1583/2111] PYTHON-4900 Convert remaining matrix definitions to use shrub.py (#1964) --- .evergreen/config.yml | 190 ++++++++------------------ .evergreen/scripts/generate_config.py | 69 +++++++++- 2 files changed, 127 insertions(+), 132 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4868096e83..2d73e19a27 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1990,99 +1990,6 @@ tasks: - mongo-python-driver - ${github.amrom.workers.devmit} -axes: - # Choice of distro - - id: platform - display_name: OS - values: - - id: rhel8 - display_name: "RHEL 8.x" - run_on: rhel8.8-small - batchtime: 10080 # 7 days - - id: windows - display_name: "Windows 64" - run_on: windows-64-vsMulti-small - batchtime: 10080 # 7 days - - id: macos - display_name: "macOS" - run_on: macos-14 - - # Test with authentication? - - id: auth - display_name: Authentication - values: - - id: auth - display_name: Auth - variables: - AUTH: "auth" - - id: noauth - display_name: NoAuth - variables: - AUTH: "noauth" - - # Test with SSL? - - id: ssl - display_name: SSL - values: - - id: ssl - display_name: SSL - variables: - SSL: "ssl" - - id: nossl - display_name: NoSSL - variables: - SSL: "nossl" - - # Test with Auth + SSL (combined for convenience)? - - id: auth-ssl - display_name: Auth SSL - values: - - id: auth-ssl - display_name: Auth SSL - variables: - AUTH: "auth" - SSL: "ssl" - - id: noauth-nossl - display_name: NoAuth NoSSL - variables: - AUTH: "noauth" - SSL: "nossl" - - # Choice of Python runtime version - - id: python-version - display_name: "Python" - values: - # Note: always display platform with python-version to avoid ambiguous display names. - # Linux - - id: "3.9" - display_name: "Python 3.9" - variables: - PYTHON_BINARY: "/opt/python/3.9/bin/python3" - - id: "3.10" - display_name: "Python 3.10" - variables: - PYTHON_BINARY: "/opt/python/3.10/bin/python3" - - id: "3.11" - display_name: "Python 3.11" - variables: - PYTHON_BINARY: "/opt/python/3.11/bin/python3" - - id: "3.12" - display_name: "Python 3.12" - variables: - PYTHON_BINARY: "/opt/python/3.12/bin/python3" - - id: "3.13" - display_name: "Python 3.13" - variables: - PYTHON_BINARY: "/opt/python/3.13/bin/python3" - - id: "pypy3.9" - display_name: "PyPy 3.9" - variables: - PYTHON_BINARY: "/opt/python/pypy3.9/bin/pypy3" - - id: "pypy3.10" - display_name: "PyPy 3.10" - variables: - PYTHON_BINARY: "/opt/python/pypy3.10/bin/pypy3" - buildvariants: # Server Tests. - name: test-rhel8-py3.9-auth-ssl-cov @@ -3970,30 +3877,67 @@ buildvariants: SKIP_HATCH: "true" AUTH: noauth SSL: nossl - -- matrix_name: "test-search-index-helpers" - matrix_spec: - platform: rhel8 - python-version: "3.9" - display_name: "Search Index Helpers ${platform}" +- name: oidc-auth-rhel8 tasks: - - name: "test_atlas_task_group_search_indexes" - -- matrix_name: "mockupdb-tests" - matrix_spec: - platform: rhel8 - python-version: 3.9 - display_name: "MockupDB Tests" + - name: testoidc_task_group + display_name: OIDC Auth RHEL8 + run_on: + - rhel87-small + batchtime: 20160 +- name: oidc-auth-macos tasks: - - name: "mockupdb" - -- matrix_name: "tests-doctests" - matrix_spec: - platform: rhel8 - python-version: ["3.9"] - display_name: "Doctests ${python-version} ${platform}" + - name: testoidc_task_group + display_name: OIDC Auth macOS + run_on: + - macos-14 + batchtime: 20160 +- name: oidc-auth-win64 tasks: - - name: "doctests" + - name: testoidc_task_group + display_name: OIDC Auth Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 20160 +- name: atlas-connect-rhel8-py3.9 + tasks: + - name: atlas-connect + display_name: Atlas connect RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: atlas-connect-rhel8-py3.13 + tasks: + - name: atlas-connect + display_name: Atlas connect RHEL8 py3.13 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.13/bin/python3 +- name: doctests-rhel8-py3.9 + tasks: + - name: doctests + display_name: Doctests RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: mockupdb-tests-rhel8-py3.9 + tasks: + - name: mockupdb + display_name: MockupDB Tests RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 +- name: search-index-helpers-rhel8-py3.9 + tasks: + - name: test_atlas_task_group_search_indexes + display_name: Search Index Helpers RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: "no-server" display_name: "No server test" @@ -4009,15 +3953,7 @@ buildvariants: tasks: - name: "coverage-report" -- matrix_name: "atlas-connect" - matrix_spec: - platform: rhel8 - python-version: "*" - display_name: "Atlas connect ${python-version} ${platform}" - tasks: - - name: "atlas-connect" - -# OCSP test matrix. +# OCSP tests. - name: ocsp-test-rhel8-v4.4-py3.9 tasks: - name: .ocsp @@ -4359,14 +4295,6 @@ buildvariants: test_loadbalancer: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 -- matrix_name: "oidc-auth-test" - matrix_spec: - platform: [ rhel8, macos, windows ] - display_name: "OIDC Auth ${platform}" - tasks: - - name: testoidc_task_group - batchtime: 20160 # 14 days - - name: testazureoidc-variant display_name: "OIDC Auth Azure" run_on: ubuntu2204-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 1217c26885..7adeac82e0 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -59,6 +59,7 @@ class Host: HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22") HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7") + ############## # Helpers ############## @@ -616,6 +617,72 @@ def generate_serverless_variants(): ] +def generate_oidc_auth_variants(): + variants = [] + for host in ["rhel8", "macos", "win64"]: + variants.append( + create_variant( + ["testoidc_task_group"], + get_display_name("OIDC Auth", host), + host=host, + batchtime=BATCHTIME_WEEK * 2, + ) + ) + return variants + + +def generate_search_index_variants(): + host = "rhel8" + python = CPYTHONS[0] + return [ + create_variant( + ["test_atlas_task_group_search_indexes"], + get_display_name("Search Index Helpers", host, python=python), + python=python, + host=host, + ) + ] + + +def generate_mockupdb_variants(): + host = "rhel8" + python = CPYTHONS[0] + return [ + create_variant( + ["mockupdb"], + get_display_name("MockupDB Tests", host, python=python), + python=python, + host=host, + ) + ] + + +def generate_doctests_variants(): + host = "rhel8" + python = CPYTHONS[0] + return [ + create_variant( + ["doctests"], + get_display_name("Doctests", host, python=python), + python=python, + host=host, + ) + ] + + +def generate_atlas_connect_variants(): + host = "rhel8" + return [ + create_variant( + ["atlas-connect"], + get_display_name("Atlas connect", host, python=python), + python=python, + host=host, + ) + for python in MIN_MAX_PYTHON + ] + + def generate_aws_auth_variants(): variants = [] tasks = [ @@ -689,5 +756,5 @@ def generate_alternative_hosts_variants(): # Generate Config ################## -variants = generate_alternative_hosts_variants() +variants = generate_search_index_variants() generate_yaml(variants=variants) From 493c331bb83e6c78bc2fb470ec0f5bd06f939a02 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 23 Oct 2024 11:08:50 -0500 Subject: [PATCH 1584/2111] PYTHON-4897 Remove Assign PR Reviewer from PyMongo (#1960) --- .evergreen/config.yml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2d73e19a27..17d12742a6 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1945,23 +1945,6 @@ tasks: - func: "attach benchmark test results" - func: "send dashboard data" - - name: "assign-pr-reviewer" - tags: ["pr"] - allowed_requesters: ["patch", "github_pr"] - commands: - - command: shell.exec - type: test - params: - shell: "bash" - working_dir: src - script: | - . .evergreen/scripts/env.sh - set -x - export CONFIG=$PROJECT_DIRECTORY/.github/reviewers.txt - export SCRIPT="$DRIVERS_TOOLS/.evergreen/github_app/assign-reviewer.sh" - bash $SCRIPT -p $CONFIG -h ${github.amrom.workers.devmit} -o "mongodb" -n "mongo-python-driver" - echo '{"results": [{ "status": "PASS", "test_file": "Build", "log_raw": "Test completed" } ]}' > ${PROJECT_DIRECTORY}/test-results.json - - name: "check-import-time" tags: ["pr"] commands: @@ -4332,12 +4315,6 @@ buildvariants: tasks: - name: test_aws_lambda_task_group -- name: rhel8-pr-assign-reviewer - display_name: Assign PR Reviewer - run_on: rhel87-small - tasks: - - name: "assign-pr-reviewer" - - name: rhel8-import-time display_name: Import Time Check run_on: rhel87-small From cb8cf03eb52c5a84616faf3112d36e0ceb832ab5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 23 Oct 2024 13:29:09 -0500 Subject: [PATCH 1585/2111] PYTHON-4901 Move generated Evergreen variants to an included file (#1965) --- .evergreen/config.yml | 2293 +-------------------- .evergreen/generated_configs/variants.yml | 2204 ++++++++++++++++++++ .evergreen/scripts/generate_config.py | 69 +- 3 files changed, 2258 insertions(+), 2308 deletions(-) create mode 100644 .evergreen/generated_configs/variants.yml diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 17d12742a6..6e48a380d3 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -25,6 +25,9 @@ timeout: script: | ls -la +include: + - filename: .evergreen/generated_configs/variants.yml + functions: "fetch source": # Executes clone and applies the submitted patch, if any @@ -1974,1954 +1977,6 @@ tasks: - ${github.amrom.workers.devmit} buildvariants: -# Server Tests. -- name: test-rhel8-py3.9-auth-ssl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.9 Auth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-py3.9-noauth-ssl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.9 NoAuth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-py3.9-noauth-nossl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.9 NoAuth NoSSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: nossl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-py3.13-auth-ssl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.13 Auth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-py3.13-noauth-ssl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.13 NoAuth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-py3.13-noauth-nossl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.13 NoAuth NoSSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: nossl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-pypy3.10-auth-ssl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 pypy3.10 Auth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-pypy3.10-noauth-ssl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 pypy3.10 NoAuth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-pypy3.10-noauth-nossl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 pypy3.10 NoAuth NoSSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: nossl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] -- name: test-rhel8-py3.10-auth-ssl - tasks: - - name: .standalone - display_name: Test RHEL8 py3.10 Auth SSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: test-rhel8-py3.11-noauth-ssl - tasks: - - name: .replica_set - display_name: Test RHEL8 py3.11 NoAuth SSL - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: ssl - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: test-rhel8-py3.12-noauth-nossl - tasks: - - name: .sharded_cluster - display_name: Test RHEL8 py3.12 NoAuth NoSSL - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: nossl - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: test-rhel8-pypy3.9-auth-ssl - tasks: - - name: .standalone - display_name: Test RHEL8 pypy3.9 Auth SSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: test-macos-py3.9-auth-ssl-sync - tasks: - - name: .standalone - display_name: Test macOS py3.9 Auth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-py3.9-noauth-ssl-sync - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-py3.9-noauth-nossl-sync - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth NoSSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-py3.9-auth-ssl-async - tasks: - - name: .standalone - display_name: Test macOS py3.9 Auth SSL Async - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-py3.9-noauth-ssl-async - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth SSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-py3.9-noauth-nossl-async - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth NoSSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 Auth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth NoSSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 Auth SSL Async - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth SSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth NoSSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.9-auth-ssl-sync - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 Auth SSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.9-noauth-ssl-sync - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth SSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.9-noauth-nossl-sync - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.9-auth-ssl-async - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 Auth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.9-noauth-ssl-async - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.9-noauth-nossl-async - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: test-macos-arm64-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 Auth SSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth SSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 Auth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-macos-arm64-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 -- name: test-win64-py3.9-auth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win64 py3.9 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: test-win64-py3.9-noauth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: test-win64-py3.9-noauth-nossl-sync - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: test-win64-py3.9-auth-ssl-async - tasks: - - name: .standalone - display_name: Test Win64 py3.9 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: test-win64-py3.9-noauth-ssl-async - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: test-win64-py3.9-noauth-nossl-async - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: test-win64-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: test-win64-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: test-win64-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: test-win64-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: test-win64-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: test-win64-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: test-win32-py3.9-auth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win32 py3.9 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe -- name: test-win32-py3.9-noauth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe -- name: test-win32-py3.9-noauth-nossl-sync - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe -- name: test-win32-py3.9-auth-ssl-async - tasks: - - name: .standalone - display_name: Test Win32 py3.9 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe -- name: test-win32-py3.9-noauth-ssl-async - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe -- name: test-win32-py3.9-noauth-nossl-async - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe -- name: test-win32-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe -- name: test-win32-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe -- name: test-win32-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe -- name: test-win32-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe -- name: test-win32-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe -- name: test-win32-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe - -# Encryption tests. -- name: encryption-rhel8-py3.9-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption RHEL8 py3.9 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [encryption_tag] -- name: encryption-rhel8-py3.13-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption RHEL8 py3.13 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [encryption_tag] -- name: encryption-rhel8-pypy3.10-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption RHEL8 pypy3.10 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [encryption_tag] -- name: encryption-crypt_shared-rhel8-py3.9-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption crypt_shared RHEL8 py3.9 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [encryption_tag] -- name: encryption-crypt_shared-rhel8-py3.13-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption crypt_shared RHEL8 py3.13 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [encryption_tag] -- name: encryption-crypt_shared-rhel8-pypy3.10-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption crypt_shared RHEL8 pypy3.10 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [encryption_tag] -- name: encryption-pyopenssl-rhel8-py3.9-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption PyOpenSSL RHEL8 py3.9 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_encryption_pyopenssl: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [encryption_tag] -- name: encryption-pyopenssl-rhel8-py3.13-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption PyOpenSSL RHEL8 py3.13 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_encryption_pyopenssl: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [encryption_tag] -- name: encryption-pyopenssl-rhel8-pypy3.10-auth-ssl - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption PyOpenSSL RHEL8 pypy3.10 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_encryption_pyopenssl: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [encryption_tag] -- name: encryption-rhel8-py3.10-auth-ssl - tasks: - - name: .replica_set - display_name: Encryption RHEL8 py3.10 Auth SSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: encryption-crypt_shared-rhel8-py3.11-auth-nossl - tasks: - - name: .replica_set - display_name: Encryption crypt_shared RHEL8 py3.11 Auth NoSSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: nossl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: encryption-pyopenssl-rhel8-py3.12-auth-ssl - tasks: - - name: .replica_set - display_name: Encryption PyOpenSSL RHEL8 py3.12 Auth SSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - TEST_ENCRYPTION_PYOPENSSL: "true" - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: encryption-rhel8-pypy3.9-auth-nossl - tasks: - - name: .replica_set - display_name: Encryption RHEL8 pypy3.9 Auth NoSSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: nossl - test_encryption: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: encryption-macos-py3.9-auth-ssl - tasks: - - name: .latest .replica_set - display_name: Encryption macOS py3.9 Auth SSL - run_on: - - macos-14 - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - tags: [encryption_tag] -- name: encryption-macos-py3.13-auth-nossl - tasks: - - name: .latest .replica_set - display_name: Encryption macOS py3.13 Auth NoSSL - run_on: - - macos-14 - batchtime: 10080 - expansions: - AUTH: auth - SSL: nossl - test_encryption: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - tags: [encryption_tag] -- name: encryption-crypt_shared-macos-py3.9-auth-ssl - tasks: - - name: .latest .replica_set - display_name: Encryption crypt_shared macOS py3.9 Auth SSL - run_on: - - macos-14 - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - tags: [encryption_tag] -- name: encryption-crypt_shared-macos-py3.13-auth-nossl - tasks: - - name: .latest .replica_set - display_name: Encryption crypt_shared macOS py3.13 Auth NoSSL - run_on: - - macos-14 - batchtime: 10080 - expansions: - AUTH: auth - SSL: nossl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - tags: [encryption_tag] -- name: encryption-win64-py3.9-auth-ssl - tasks: - - name: .latest .replica_set - display_name: Encryption Win64 py3.9 Auth SSL - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - tags: [encryption_tag] -- name: encryption-win64-py3.13-auth-nossl - tasks: - - name: .latest .replica_set - display_name: Encryption Win64 py3.13 Auth NoSSL - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: nossl - test_encryption: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - tags: [encryption_tag] -- name: encryption-crypt_shared-win64-py3.9-auth-ssl - tasks: - - name: .latest .replica_set - display_name: Encryption crypt_shared Win64 py3.9 Auth SSL - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: ssl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - tags: [encryption_tag] -- name: encryption-crypt_shared-win64-py3.13-auth-nossl - tasks: - - name: .latest .replica_set - display_name: Encryption crypt_shared Win64 py3.13 Auth NoSSL - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - AUTH: auth - SSL: nossl - test_encryption: "true" - test_crypt_shared: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - tags: [encryption_tag] - -# Compressor tests. -- name: snappy-compression-rhel8-py3.9-no-c - tasks: - - name: .standalone - display_name: snappy compression RHEL8 py3.9 No C - run_on: - - rhel87-small - expansions: - COMPRESSORS: snappy - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: snappy-compression-rhel8-py3.10 - tasks: - - name: .standalone - display_name: snappy compression RHEL8 py3.10 - run_on: - - rhel87-small - expansions: - COMPRESSORS: snappy - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: zlib-compression-rhel8-py3.11-no-c - tasks: - - name: .standalone - display_name: zlib compression RHEL8 py3.11 No C - run_on: - - rhel87-small - expansions: - COMPRESSORS: zlib - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: zlib-compression-rhel8-py3.12 - tasks: - - name: .standalone - display_name: zlib compression RHEL8 py3.12 - run_on: - - rhel87-small - expansions: - COMPRESSORS: zlib - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: zstd-compression-rhel8-py3.13-no-c - tasks: - - name: .standalone !.4.0 - display_name: zstd compression RHEL8 py3.13 No C - run_on: - - rhel87-small - expansions: - COMPRESSORS: zstd - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: zstd-compression-rhel8-py3.9 - tasks: - - name: .standalone !.4.0 - display_name: zstd compression RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - COMPRESSORS: zstd - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: snappy-compression-rhel8-pypy3.9 - tasks: - - name: .standalone - display_name: snappy compression RHEL8 pypy3.9 - run_on: - - rhel87-small - expansions: - COMPRESSORS: snappy - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: zlib-compression-rhel8-pypy3.10 - tasks: - - name: .standalone - display_name: zlib compression RHEL8 pypy3.10 - run_on: - - rhel87-small - expansions: - COMPRESSORS: zlib - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 -- name: zstd-compression-rhel8-pypy3.9 - tasks: - - name: .standalone !.4.0 - display_name: zstd compression RHEL8 pypy3.9 - run_on: - - rhel87-small - expansions: - COMPRESSORS: zstd - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - -# Enterprise auth tests. -- name: enterprise-auth-macos-py3.9-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth macOS py3.9 Auth - run_on: - - macos-14 - expansions: - AUTH: auth - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: enterprise-auth-rhel8-py3.10-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 py3.10 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: enterprise-auth-rhel8-py3.11-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 py3.11 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: enterprise-auth-rhel8-py3.12-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 py3.12 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: enterprise-auth-win64-py3.13-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth Win64 py3.13 Auth - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - PYTHON_BINARY: C:/python/Python313/python.exe -- name: enterprise-auth-rhel8-pypy3.9-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 pypy3.9 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: enterprise-auth-rhel8-pypy3.10-auth - tasks: - - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 pypy3.10 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - -# PyOpenSSL tests. -- name: pyopenssl-macos-py3.9 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL macOS py3.9 - run_on: - - macos-14 - batchtime: 10080 - expansions: - AUTH: noauth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: pyopenssl-rhel8-py3.10 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL RHEL8 py3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: pyopenssl-rhel8-py3.11 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL RHEL8 py3.11 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: pyopenssl-rhel8-py3.12 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL RHEL8 py3.12 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: pyopenssl-win64-py3.13 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL Win64 py3.13 - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - AUTH: auth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: C:/python/Python313/python.exe -- name: pyopenssl-rhel8-pypy3.9 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL RHEL8 pypy3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: pyopenssl-rhel8-pypy3.10 - tasks: - - name: .replica_set - - name: .7.0 - display_name: PyOpenSSL RHEL8 pypy3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: auth - test_pyopenssl: "true" - SSL: ssl - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - -# Storage Engine tests. -- name: storage-inmemory-rhel8-py3.9 - tasks: - - name: .standalone .4.0 - - name: .standalone .4.4 - - name: .standalone .5.0 - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Storage InMemory RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - STORAGE_ENGINE: inmemory - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: storage-mmapv1-rhel8-py3.9 - tasks: - - name: .standalone .4.0 - - name: .replica_set .4.0 - display_name: Storage MMAPv1 RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - STORAGE_ENGINE: mmapv1 - PYTHON_BINARY: /opt/python/3.9/bin/python3 - -# Versioned API tests. -- name: versioned-api-require-v1-rhel8-py3.9-auth - tasks: - - name: .standalone .5.0 - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Versioned API require v1 RHEL8 py3.9 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - REQUIRE_API_VERSION: "1" - MONGODB_API_VERSION: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [versionedApi_tag] -- name: versioned-api-accept-v2-rhel8-py3.9-auth - tasks: - - name: .standalone .5.0 - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Versioned API accept v2 RHEL8 py3.9 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - ORCHESTRATION_FILE: versioned-api-testing.json - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [versionedApi_tag] -- name: versioned-api-require-v1-rhel8-py3.13-auth - tasks: - - name: .standalone .5.0 - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Versioned API require v1 RHEL8 py3.13 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - REQUIRE_API_VERSION: "1" - MONGODB_API_VERSION: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [versionedApi_tag] -- name: versioned-api-accept-v2-rhel8-py3.13-auth - tasks: - - name: .standalone .5.0 - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Versioned API accept v2 RHEL8 py3.13 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - ORCHESTRATION_FILE: versioned-api-testing.json - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [versionedApi_tag] - -# Green framework tests. -- name: eventlet-rhel8-py3.9 - tasks: - - name: .standalone - display_name: Eventlet RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: eventlet - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: gevent-rhel8-py3.9 - tasks: - - name: .standalone - display_name: Gevent RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: gevent - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: eventlet-rhel8-py3.12 - tasks: - - name: .standalone - display_name: Eventlet RHEL8 py3.12 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: eventlet - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: gevent-rhel8-py3.12 - tasks: - - name: .standalone - display_name: Gevent RHEL8 py3.12 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: gevent - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.12/bin/python3 - -# No C Ext tests. -- name: no-c-ext-rhel8-py3.9 - tasks: - - name: .standalone - display_name: No C Ext RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: no-c-ext-rhel8-py3.10 - tasks: - - name: .replica_set - display_name: No C Ext RHEL8 py3.10 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: no-c-ext-rhel8-py3.11 - tasks: - - name: .sharded_cluster - display_name: No C Ext RHEL8 py3.11 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: no-c-ext-rhel8-py3.12 - tasks: - - name: .standalone - display_name: No C Ext RHEL8 py3.12 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: no-c-ext-rhel8-py3.13 - tasks: - - name: .replica_set - display_name: No C Ext RHEL8 py3.13 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - -# Atlas Data Lake tests. -- name: atlas-data-lake-rhel8-py3.9-no-c - tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.9 No C - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: atlas-data-lake-rhel8-py3.9 - tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: atlas-data-lake-rhel8-py3.13-no-c - tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.13 No C - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: atlas-data-lake-rhel8-py3.13 - tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.13 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 - -# Mod_wsgi tests. -- name: mod_wsgi-ubuntu-22-py3.9 - tasks: - - name: mod-wsgi-standalone - - name: mod-wsgi-replica-set - - name: mod-wsgi-embedded-mode-standalone - - name: mod-wsgi-embedded-mode-replica-set - display_name: mod_wsgi Ubuntu-22 py3.9 - run_on: - - ubuntu2204-small - expansions: - MOD_WSGI_VERSION: "4" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: mod_wsgi-ubuntu-22-py3.13 - tasks: - - name: mod-wsgi-standalone - - name: mod-wsgi-replica-set - - name: mod-wsgi-embedded-mode-standalone - - name: mod-wsgi-embedded-mode-replica-set - display_name: mod_wsgi Ubuntu-22 py3.13 - run_on: - - ubuntu2204-small - expansions: - MOD_WSGI_VERSION: "4" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - -# Disable test commands variants. -- name: disable-test-commands-rhel8-py3.9 - tasks: - - name: .latest - display_name: Disable test commands RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - DISABLE_TEST_COMMANDS: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - -# Serverless variants. -- name: serverless-rhel8-py3.9 - tasks: - - name: serverless_task_group - display_name: Serverless RHEL8 py3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - test_serverless: "true" - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: serverless-rhel8-py3.13 - tasks: - - name: serverless_task_group - display_name: Serverless RHEL8 py3.13 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - test_serverless: "true" - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.13/bin/python3 - -# AWS Auth tests. -- name: aws-auth-ubuntu-20-py3.9 - tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest - display_name: AWS Auth Ubuntu-20 py3.9 - run_on: - - ubuntu2004-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: aws-auth-ubuntu-20-py3.13 - tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest - display_name: AWS Auth Ubuntu-20 py3.13 - run_on: - - ubuntu2004-small - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: aws-auth-win64-py3.9 - tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest - display_name: AWS Auth Win64 py3.9 - run_on: - - windows-64-vsMulti-small - expansions: - skip_ECS_auth_test: "true" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: aws-auth-win64-py3.13 - tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest - display_name: AWS Auth Win64 py3.13 - run_on: - - windows-64-vsMulti-small - expansions: - skip_ECS_auth_test: "true" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: aws-auth-macos-py3.9 - tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest - display_name: AWS Auth macOS py3.9 - run_on: - - macos-14 - expansions: - skip_ECS_auth_test: "true" - skip_EC2_auth_test: "true" - skip_web_identity_auth_test: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: aws-auth-macos-py3.13 - tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest - display_name: AWS Auth macOS py3.13 - run_on: - - macos-14 - expansions: - skip_ECS_auth_test: "true" - skip_EC2_auth_test: "true" - skip_web_identity_auth_test: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - -# Other hosts tests. -- name: openssl-1.0.2-rhel7-py3.9-auth-ssl - tasks: - - name: .5.0 .standalone - display_name: OpenSSL 1.0.2 RHEL7 py3.9 Auth SSL - run_on: - - rhel79-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: openssl-1.0.2-rhel7-py3.9-noauth-ssl - tasks: - - name: .5.0 .standalone - display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth SSL - run_on: - - rhel79-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: openssl-1.0.2-rhel7-py3.9-noauth-nossl - tasks: - - name: .5.0 .standalone - display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth NoSSL - run_on: - - rhel79-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: other-hosts-rhel9-fips-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL9-FIPS Auth SSL - run_on: - - rhel92-fips - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl -- name: other-hosts-rhel9-fips-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL9-FIPS NoAuth SSL - run_on: - - rhel92-fips - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl -- name: other-hosts-rhel9-fips-noauth-nossl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL9-FIPS NoAuth NoSSL - run_on: - - rhel92-fips - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl -- name: other-hosts-rhel8-zseries-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-zseries Auth SSL - run_on: - - rhel8-zseries-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl -- name: other-hosts-rhel8-zseries-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-zseries NoAuth SSL - run_on: - - rhel8-zseries-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl -- name: other-hosts-rhel8-zseries-noauth-nossl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-zseries NoAuth NoSSL - run_on: - - rhel8-zseries-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl -- name: other-hosts-rhel8-power8-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-POWER8 Auth SSL - run_on: - - rhel8-power-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl -- name: other-hosts-rhel8-power8-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-POWER8 NoAuth SSL - run_on: - - rhel8-power-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl -- name: other-hosts-rhel8-power8-noauth-nossl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-POWER8 NoAuth NoSSL - run_on: - - rhel8-power-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl -- name: other-hosts-rhel8-arm64-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-arm64 Auth SSL - run_on: - - rhel82-arm64-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl -- name: other-hosts-rhel8-arm64-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-arm64 NoAuth SSL - run_on: - - rhel82-arm64-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl -- name: other-hosts-rhel8-arm64-noauth-nossl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-arm64 NoAuth NoSSL - run_on: - - rhel82-arm64-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl -- name: oidc-auth-rhel8 - tasks: - - name: testoidc_task_group - display_name: OIDC Auth RHEL8 - run_on: - - rhel87-small - batchtime: 20160 -- name: oidc-auth-macos - tasks: - - name: testoidc_task_group - display_name: OIDC Auth macOS - run_on: - - macos-14 - batchtime: 20160 -- name: oidc-auth-win64 - tasks: - - name: testoidc_task_group - display_name: OIDC Auth Win64 - run_on: - - windows-64-vsMulti-small - batchtime: 20160 -- name: atlas-connect-rhel8-py3.9 - tasks: - - name: atlas-connect - display_name: Atlas connect RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: atlas-connect-rhel8-py3.13 - tasks: - - name: atlas-connect - display_name: Atlas connect RHEL8 py3.13 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: doctests-rhel8-py3.9 - tasks: - - name: doctests - display_name: Doctests RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: mockupdb-tests-rhel8-py3.9 - tasks: - - name: mockupdb - display_name: MockupDB Tests RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: search-index-helpers-rhel8-py3.9 - tasks: - - name: test_atlas_task_group_search_indexes - display_name: Search Index Helpers RHEL8 py3.9 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: "no-server" display_name: "No server test" run_on: @@ -3936,348 +1991,6 @@ buildvariants: tasks: - name: "coverage-report" -# OCSP tests. -- name: ocsp-test-rhel8-v4.4-py3.9 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 v4.4 py3.9 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.4" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: ocsp-test-rhel8-v5.0-py3.10 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 v5.0 py3.10 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "5.0" - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: ocsp-test-rhel8-v6.0-py3.11 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 v6.0 py3.11 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "6.0" - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: ocsp-test-rhel8-v7.0-py3.12 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 v7.0 py3.12 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "7.0" - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: ocsp-test-rhel8-v8.0-py3.13 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 v8.0 py3.13 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "8.0" - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: ocsp-test-rhel8-rapid-pypy3.9 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 rapid pypy3.9 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: rapid - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: ocsp-test-rhel8-latest-pypy3.10 - tasks: - - name: .ocsp - display_name: OCSP test RHEL8 latest pypy3.10 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: latest - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 -- name: ocsp-test-win64-v4.4-py3.9 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test Win64 v4.4 py3.9 - run_on: - - windows-64-vsMulti-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.4" - PYTHON_BINARY: C:/python/Python39/python.exe -- name: ocsp-test-win64-v8.0-py3.13 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test Win64 v8.0 py3.13 - run_on: - - windows-64-vsMulti-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "8.0" - PYTHON_BINARY: C:/python/Python313/python.exe -- name: ocsp-test-macos-v4.4-py3.9 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test macOS v4.4 py3.9 - run_on: - - macos-14 - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.4" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 -- name: ocsp-test-macos-v8.0-py3.13 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test macOS v8.0 py3.13 - run_on: - - macos-14 - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "8.0" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - -# Load balancer tests -- name: load-balancer-rhel8-v6.0-py3.9-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v6.0 py3.9 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "6.0" - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: load-balancer-rhel8-v6.0-py3.10-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v6.0 py3.10 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "6.0" - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: load-balancer-rhel8-v6.0-py3.11-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v6.0 py3.11 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "6.0" - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: load-balancer-rhel8-v7.0-py3.12-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v7.0 py3.12 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "7.0" - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: load-balancer-rhel8-v7.0-py3.13-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v7.0 py3.13 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "7.0" - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: load-balancer-rhel8-v7.0-pypy3.9-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v7.0 pypy3.9 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "7.0" - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: load-balancer-rhel8-v8.0-pypy3.10-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v8.0 pypy3.10 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "8.0" - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 -- name: load-balancer-rhel8-v8.0-py3.9-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v8.0 py3.9 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "8.0" - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 -- name: load-balancer-rhel8-v8.0-py3.10-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v8.0 py3.10 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "8.0" - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.10/bin/python3 -- name: load-balancer-rhel8-latest-py3.11-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 latest py3.11 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: latest - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.11/bin/python3 -- name: load-balancer-rhel8-latest-py3.12-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 latest py3.12 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: latest - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.12/bin/python3 -- name: load-balancer-rhel8-latest-py3.13-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 latest py3.13 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: latest - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 -- name: load-balancer-rhel8-rapid-pypy3.9-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 rapid pypy3.9 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: rapid - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 -- name: load-balancer-rhel8-rapid-pypy3.10-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 rapid pypy3.10 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: rapid - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 -- name: load-balancer-rhel8-rapid-py3.9-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 rapid py3.9 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: rapid - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: testazureoidc-variant display_name: "OIDC Auth Azure" run_on: ubuntu2204-small diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml new file mode 100644 index 0000000000..52f8c673b3 --- /dev/null +++ b/.evergreen/generated_configs/variants.yml @@ -0,0 +1,2204 @@ +buildvariants: + # Alternative hosts tests + - name: openssl-1.0.2-rhel7-py3.9-auth-ssl + tasks: + - name: .5.0 .standalone + display_name: OpenSSL 1.0.2 RHEL7 py3.9 Auth SSL + run_on: + - rhel79-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: openssl-1.0.2-rhel7-py3.9-noauth-ssl + tasks: + - name: .5.0 .standalone + display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth SSL + run_on: + - rhel79-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: openssl-1.0.2-rhel7-py3.9-noauth-nossl + tasks: + - name: .5.0 .standalone + display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth NoSSL + run_on: + - rhel79-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: other-hosts-rhel9-fips-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL9-FIPS Auth SSL + run_on: + - rhel92-fips + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl + - name: other-hosts-rhel9-fips-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL9-FIPS NoAuth SSL + run_on: + - rhel92-fips + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl + - name: other-hosts-rhel9-fips-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL9-FIPS NoAuth NoSSL + run_on: + - rhel92-fips + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl + - name: other-hosts-rhel8-zseries-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-zseries Auth SSL + run_on: + - rhel8-zseries-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl + - name: other-hosts-rhel8-zseries-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-zseries NoAuth SSL + run_on: + - rhel8-zseries-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl + - name: other-hosts-rhel8-zseries-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-zseries NoAuth NoSSL + run_on: + - rhel8-zseries-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl + - name: other-hosts-rhel8-power8-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-POWER8 Auth SSL + run_on: + - rhel8-power-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl + - name: other-hosts-rhel8-power8-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-POWER8 NoAuth SSL + run_on: + - rhel8-power-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl + - name: other-hosts-rhel8-power8-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-POWER8 NoAuth NoSSL + run_on: + - rhel8-power-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl + - name: other-hosts-rhel8-arm64-auth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-arm64 Auth SSL + run_on: + - rhel82-arm64-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: auth + SSL: ssl + - name: other-hosts-rhel8-arm64-noauth-ssl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-arm64 NoAuth SSL + run_on: + - rhel82-arm64-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: ssl + - name: other-hosts-rhel8-arm64-noauth-nossl + tasks: + - name: .6.0 .standalone + display_name: Other hosts RHEL8-arm64 NoAuth NoSSL + run_on: + - rhel82-arm64-small + batchtime: 10080 + expansions: + SKIP_HATCH: "true" + AUTH: noauth + SSL: nossl + + # Atlas connect tests + - name: atlas-connect-rhel8-py3.9 + tasks: + - name: atlas-connect + display_name: Atlas connect RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: atlas-connect-rhel8-py3.13 + tasks: + - name: atlas-connect + display_name: Atlas connect RHEL8 py3.13 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.13/bin/python3 + + # Atlas data lake tests + - name: atlas-data-lake-rhel8-py3.9-no-c + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.9 No C + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: atlas-data-lake-rhel8-py3.9 + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: atlas-data-lake-rhel8-py3.13-no-c + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.13 No C + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: atlas-data-lake-rhel8-py3.13 + tasks: + - name: atlas-data-lake-tests + display_name: Atlas Data Lake RHEL8 py3.13 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.13/bin/python3 + + # Aws auth tests + - name: aws-auth-ubuntu-20-py3.9 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Ubuntu-20 py3.9 + run_on: + - ubuntu2004-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: aws-auth-ubuntu-20-py3.13 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Ubuntu-20 py3.13 + run_on: + - ubuntu2004-small + expansions: + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: aws-auth-win64-py3.9 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Win64 py3.9 + run_on: + - windows-64-vsMulti-small + expansions: + skip_ECS_auth_test: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: aws-auth-win64-py3.13 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth Win64 py3.13 + run_on: + - windows-64-vsMulti-small + expansions: + skip_ECS_auth_test: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: aws-auth-macos-py3.9 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth macOS py3.9 + run_on: + - macos-14 + expansions: + skip_ECS_auth_test: "true" + skip_EC2_auth_test: "true" + skip_web_identity_auth_test: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: aws-auth-macos-py3.13 + tasks: + - name: aws-auth-test-4.4 + - name: aws-auth-test-5.0 + - name: aws-auth-test-6.0 + - name: aws-auth-test-7.0 + - name: aws-auth-test-8.0 + - name: aws-auth-test-rapid + - name: aws-auth-test-latest + display_name: AWS Auth macOS py3.13 + run_on: + - macos-14 + expansions: + skip_ECS_auth_test: "true" + skip_EC2_auth_test: "true" + skip_web_identity_auth_test: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + + # Compression tests + - name: snappy-compression-rhel8-py3.9-no-c + tasks: + - name: .standalone + display_name: snappy compression RHEL8 py3.9 No C + run_on: + - rhel87-small + expansions: + COMPRESSORS: snappy + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: snappy-compression-rhel8-py3.10 + tasks: + - name: .standalone + display_name: snappy compression RHEL8 py3.10 + run_on: + - rhel87-small + expansions: + COMPRESSORS: snappy + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: zlib-compression-rhel8-py3.11-no-c + tasks: + - name: .standalone + display_name: zlib compression RHEL8 py3.11 No C + run_on: + - rhel87-small + expansions: + COMPRESSORS: zlib + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: zlib-compression-rhel8-py3.12 + tasks: + - name: .standalone + display_name: zlib compression RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zlib + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: zstd-compression-rhel8-py3.13-no-c + tasks: + - name: .standalone !.4.0 + display_name: zstd compression RHEL8 py3.13 No C + run_on: + - rhel87-small + expansions: + COMPRESSORS: zstd + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: zstd-compression-rhel8-py3.9 + tasks: + - name: .standalone !.4.0 + display_name: zstd compression RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zstd + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: snappy-compression-rhel8-pypy3.9 + tasks: + - name: .standalone + display_name: snappy compression RHEL8 pypy3.9 + run_on: + - rhel87-small + expansions: + COMPRESSORS: snappy + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: zlib-compression-rhel8-pypy3.10 + tasks: + - name: .standalone + display_name: zlib compression RHEL8 pypy3.10 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zlib + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - name: zstd-compression-rhel8-pypy3.9 + tasks: + - name: .standalone !.4.0 + display_name: zstd compression RHEL8 pypy3.9 + run_on: + - rhel87-small + expansions: + COMPRESSORS: zstd + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + + # Disable test commands tests + - name: disable-test-commands-rhel8-py3.9 + tasks: + - name: .latest + display_name: Disable test commands RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + DISABLE_TEST_COMMANDS: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + + # Doctests tests + - name: doctests-rhel8-py3.9 + tasks: + - name: doctests + display_name: Doctests RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 + + # Encryption tests + - name: encryption-rhel8-py3.9-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption RHEL8 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [encryption_tag] + - name: encryption-rhel8-py3.13-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption RHEL8 py3.13 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [encryption_tag] + - name: encryption-rhel8-pypy3.10-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption RHEL8 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [encryption_tag] + - name: encryption-crypt_shared-rhel8-py3.9-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption crypt_shared RHEL8 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [encryption_tag] + - name: encryption-crypt_shared-rhel8-py3.13-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption crypt_shared RHEL8 py3.13 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [encryption_tag] + - name: encryption-crypt_shared-rhel8-pypy3.10-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption crypt_shared RHEL8 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [encryption_tag] + - name: encryption-pyopenssl-rhel8-py3.9-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption PyOpenSSL RHEL8 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [encryption_tag] + - name: encryption-pyopenssl-rhel8-py3.13-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption PyOpenSSL RHEL8 py3.13 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [encryption_tag] + - name: encryption-pyopenssl-rhel8-pypy3.10-auth-ssl + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Encryption PyOpenSSL RHEL8 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [encryption_tag] + - name: encryption-rhel8-py3.10-auth-ssl + tasks: + - name: .replica_set + display_name: Encryption RHEL8 py3.10 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: encryption-crypt_shared-rhel8-py3.11-auth-nossl + tasks: + - name: .replica_set + display_name: Encryption crypt_shared RHEL8 py3.11 Auth NoSSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: encryption-pyopenssl-rhel8-py3.12-auth-ssl + tasks: + - name: .replica_set + display_name: Encryption PyOpenSSL RHEL8 py3.12 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_encryption_pyopenssl: "true" + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: encryption-rhel8-pypy3.9-auth-nossl + tasks: + - name: .replica_set + display_name: Encryption RHEL8 pypy3.9 Auth NoSSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: encryption-macos-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption macOS py3.9 Auth SSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + tags: [encryption_tag] + - name: encryption-macos-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption macOS py3.13 Auth NoSSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + tags: [encryption_tag] + - name: encryption-crypt_shared-macos-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared macOS py3.9 Auth SSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + tags: [encryption_tag] + - name: encryption-crypt_shared-macos-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared macOS py3.13 Auth NoSSL + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + tags: [encryption_tag] + - name: encryption-win64-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption Win64 py3.9 Auth SSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + tags: [encryption_tag] + - name: encryption-win64-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption Win64 py3.13 Auth NoSSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + tags: [encryption_tag] + - name: encryption-crypt_shared-win64-py3.9-auth-ssl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared Win64 py3.9 Auth SSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: ssl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + tags: [encryption_tag] + - name: encryption-crypt_shared-win64-py3.13-auth-nossl + tasks: + - name: .latest .replica_set + display_name: Encryption crypt_shared Win64 py3.13 Auth NoSSL + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + SSL: nossl + test_encryption: "true" + test_crypt_shared: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + tags: [encryption_tag] + + # Enterprise auth tests + - name: enterprise-auth-macos-py3.9-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth macOS py3.9 Auth + run_on: + - macos-14 + expansions: + AUTH: auth + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: enterprise-auth-rhel8-py3.10-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 py3.10 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: enterprise-auth-rhel8-py3.11-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 py3.11 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: enterprise-auth-rhel8-py3.12-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 py3.12 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: enterprise-auth-win64-py3.13-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth Win64 py3.13 Auth + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + PYTHON_BINARY: C:/python/Python313/python.exe + - name: enterprise-auth-rhel8-pypy3.9-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 pypy3.9 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: enterprise-auth-rhel8-pypy3.10-auth + tasks: + - name: test-enterprise-auth + display_name: Enterprise Auth RHEL8 pypy3.10 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + + # Green framework tests + - name: eventlet-rhel8-py3.9 + tasks: + - name: .standalone + display_name: Eventlet RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: eventlet + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: gevent-rhel8-py3.9 + tasks: + - name: .standalone + display_name: Gevent RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: gevent + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: eventlet-rhel8-py3.12 + tasks: + - name: .standalone + display_name: Eventlet RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: eventlet + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: gevent-rhel8-py3.12 + tasks: + - name: .standalone + display_name: Gevent RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: gevent + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.12/bin/python3 + + # Load balancer tests + - name: load-balancer-rhel8-v6.0-py3.9-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v6.0 py3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "6.0" + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: load-balancer-rhel8-v6.0-py3.10-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v6.0 py3.10 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "6.0" + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: load-balancer-rhel8-v6.0-py3.11-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v6.0 py3.11 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "6.0" + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: load-balancer-rhel8-v7.0-py3.12-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v7.0 py3.12 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "7.0" + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: load-balancer-rhel8-v7.0-py3.13-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v7.0 py3.13 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "7.0" + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: load-balancer-rhel8-v7.0-pypy3.9-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v7.0 pypy3.9 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "7.0" + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: load-balancer-rhel8-v8.0-pypy3.10-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v8.0 pypy3.10 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "8.0" + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - name: load-balancer-rhel8-v8.0-py3.9-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v8.0 py3.9 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "8.0" + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: load-balancer-rhel8-v8.0-py3.10-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 v8.0 py3.10 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: "8.0" + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: load-balancer-rhel8-rapid-py3.11-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 rapid py3.11 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: rapid + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: load-balancer-rhel8-rapid-py3.12-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 rapid py3.12 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: rapid + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: load-balancer-rhel8-rapid-py3.13-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 rapid py3.13 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: rapid + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: load-balancer-rhel8-latest-pypy3.9-auth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 latest pypy3.9 Auth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: latest + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: load-balancer-rhel8-latest-pypy3.10-noauth-ssl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 latest pypy3.10 NoAuth SSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: latest + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - name: load-balancer-rhel8-latest-py3.9-noauth-nossl + tasks: + - name: load-balancer-test + display_name: Load Balancer RHEL8 latest py3.9 NoAuth NoSSL + run_on: + - rhel87-small + batchtime: 10080 + expansions: + VERSION: latest + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + + # Mockupdb tests + - name: mockupdb-tests-rhel8-py3.9 + tasks: + - name: mockupdb + display_name: MockupDB Tests RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 + + # Mod wsgi tests + - name: mod_wsgi-ubuntu-22-py3.9 + tasks: + - name: mod-wsgi-standalone + - name: mod-wsgi-replica-set + - name: mod-wsgi-embedded-mode-standalone + - name: mod-wsgi-embedded-mode-replica-set + display_name: mod_wsgi Ubuntu-22 py3.9 + run_on: + - ubuntu2204-small + expansions: + MOD_WSGI_VERSION: "4" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: mod_wsgi-ubuntu-22-py3.13 + tasks: + - name: mod-wsgi-standalone + - name: mod-wsgi-replica-set + - name: mod-wsgi-embedded-mode-standalone + - name: mod-wsgi-embedded-mode-replica-set + display_name: mod_wsgi Ubuntu-22 py3.13 + run_on: + - ubuntu2204-small + expansions: + MOD_WSGI_VERSION: "4" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + + # No c ext tests + - name: no-c-ext-rhel8-py3.9 + tasks: + - name: .standalone + display_name: No C Ext RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: no-c-ext-rhel8-py3.10 + tasks: + - name: .replica_set + display_name: No C Ext RHEL8 py3.10 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: no-c-ext-rhel8-py3.11 + tasks: + - name: .sharded_cluster + display_name: No C Ext RHEL8 py3.11 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: no-c-ext-rhel8-py3.12 + tasks: + - name: .standalone + display_name: No C Ext RHEL8 py3.12 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: no-c-ext-rhel8-py3.13 + tasks: + - name: .replica_set + display_name: No C Ext RHEL8 py3.13 + run_on: + - rhel87-small + expansions: + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + + # Ocsp tests + - name: ocsp-test-rhel8-py3.9 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 py3.9 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/3.9/bin/python3 + VERSION: "4.4" + - name: ocsp-test-rhel8-py3.10 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 py3.10 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/3.10/bin/python3 + VERSION: "5.0" + - name: ocsp-test-rhel8-py3.11 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 py3.11 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/3.11/bin/python3 + VERSION: "6.0" + - name: ocsp-test-rhel8-py3.12 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 py3.12 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/3.12/bin/python3 + VERSION: "7.0" + - name: ocsp-test-rhel8-py3.13 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 py3.13 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/3.13/bin/python3 + VERSION: "8.0" + - name: ocsp-test-rhel8-pypy3.9 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 pypy3.9 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + VERSION: rapid + - name: ocsp-test-rhel8-pypy3.10 + tasks: + - name: .ocsp + display_name: OCSP test RHEL8 pypy3.10 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + VERSION: latest + - name: ocsp-test-win64-py3.9 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test Win64 py3.9 + run_on: + - windows-64-vsMulti-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: C:/python/Python39/python.exe + VERSION: "4.4" + - name: ocsp-test-win64-py3.13 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test Win64 py3.13 + run_on: + - windows-64-vsMulti-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: C:/python/Python313/python.exe + VERSION: "8.0" + - name: ocsp-test-macos-py3.9 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test macOS py3.9 + run_on: + - macos-14 + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + VERSION: "4.4" + - name: ocsp-test-macos-py3.13 + tasks: + - name: .ocsp-rsa !.ocsp-staple + display_name: OCSP test macOS py3.13 + run_on: + - macos-14 + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + VERSION: "8.0" + + # Oidc auth tests + - name: oidc-auth-rhel8 + tasks: + - name: testoidc_task_group + display_name: OIDC Auth RHEL8 + run_on: + - rhel87-small + batchtime: 20160 + - name: oidc-auth-macos + tasks: + - name: testoidc_task_group + display_name: OIDC Auth macOS + run_on: + - macos-14 + batchtime: 20160 + - name: oidc-auth-win64 + tasks: + - name: testoidc_task_group + display_name: OIDC Auth Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 20160 + + # Pyopenssl tests + - name: pyopenssl-macos-py3.9 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL macOS py3.9 + run_on: + - macos-14 + batchtime: 10080 + expansions: + AUTH: noauth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: pyopenssl-rhel8-py3.10 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 py3.10 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: pyopenssl-rhel8-py3.11 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 py3.11 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: pyopenssl-rhel8-py3.12 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 py3.12 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: pyopenssl-win64-py3.13 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL Win64 py3.13 + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: C:/python/Python313/python.exe + - name: pyopenssl-rhel8-pypy3.9 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 pypy3.9 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: pyopenssl-rhel8-pypy3.10 + tasks: + - name: .replica_set + - name: .7.0 + display_name: PyOpenSSL RHEL8 pypy3.10 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + AUTH: auth + test_pyopenssl: "true" + SSL: ssl + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + + # Search index tests + - name: search-index-helpers-rhel8-py3.9 + tasks: + - name: test_atlas_task_group_search_indexes + display_name: Search Index Helpers RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 + + # Server tests + - name: test-rhel8-py3.9-auth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.9 Auth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-py3.9-noauth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.9 NoAuth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-py3.9-noauth-nossl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.9 NoAuth NoSSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-py3.13-auth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.13 Auth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-py3.13-noauth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.13 NoAuth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-py3.13-noauth-nossl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 py3.13 NoAuth NoSSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-pypy3.10-auth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 pypy3.10 Auth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-pypy3.10-noauth-ssl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 pypy3.10 NoAuth SSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-pypy3.10-noauth-nossl-cov + tasks: + - name: .standalone + - name: .replica_set + - name: .sharded_cluster + display_name: Test RHEL8 pypy3.10 NoAuth NoSSL cov + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + COVERAGE: coverage + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] + - name: test-rhel8-py3.10-auth-ssl + tasks: + - name: .standalone + display_name: Test RHEL8 py3.10 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: test-rhel8-py3.11-noauth-ssl + tasks: + - name: .replica_set + display_name: Test RHEL8 py3.11 NoAuth SSL + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: ssl + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: test-rhel8-py3.12-noauth-nossl + tasks: + - name: .sharded_cluster + display_name: Test RHEL8 py3.12 NoAuth NoSSL + run_on: + - rhel87-small + expansions: + AUTH: noauth + SSL: nossl + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: test-rhel8-pypy3.9-auth-ssl + tasks: + - name: .standalone + display_name: Test RHEL8 pypy3.9 Auth SSL + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: test-macos-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test macOS py3.9 Auth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-py3.9-noauth-ssl-sync + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-py3.9-noauth-nossl-sync + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth NoSSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test macOS py3.9 Auth SSL Async + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-py3.9-noauth-ssl-async + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth SSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-py3.9-noauth-nossl-async + tasks: + - name: .standalone + display_name: Test macOS py3.9 NoAuth NoSSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 Auth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-py3.13-noauth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 NoAuth SSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-py3.13-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 NoAuth NoSSL Sync + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 Auth SSL Async + run_on: + - macos-14 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-py3.13-noauth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 NoAuth SSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-py3.13-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test macOS py3.13 NoAuth NoSSL Async + run_on: + - macos-14 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-arm64-py3.9-auth-ssl-sync + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 Auth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-arm64-py3.9-noauth-ssl-sync + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-arm64-py3.9-noauth-nossl-sync + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-arm64-py3.9-auth-ssl-async + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 Auth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-arm64-py3.9-noauth-ssl-async + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-arm64-py3.9-noauth-nossl-async + tasks: + - name: .standalone .6.0 + - name: .standalone .7.0 + - name: .standalone .8.0 + - name: .standalone .rapid + - name: .standalone .latest + display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: test-macos-arm64-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 Auth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-arm64-py3.13-noauth-ssl-sync + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 NoAuth SSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-arm64-py3.13-noauth-nossl-sync + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Sync + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-arm64-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 Auth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-arm64-py3.13-noauth-ssl-async + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 NoAuth SSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-macos-arm64-py3.13-noauth-nossl-async + tasks: + - name: .sharded_cluster .6.0 + - name: .sharded_cluster .7.0 + - name: .sharded_cluster .8.0 + - name: .sharded_cluster .rapid + - name: .sharded_cluster .latest + display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Async + run_on: + - macos-14-arm64 + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + - name: test-win64-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win64 py3.9 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: test-win64-py3.9-noauth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: test-win64-py3.9-noauth-nossl-sync + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: test-win64-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test Win64 py3.9 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: test-win64-py3.9-noauth-ssl-async + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: test-win64-py3.9-noauth-nossl-async + tasks: + - name: .standalone + display_name: Test Win64 py3.9 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python39/python.exe + - name: test-win64-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: test-win64-py3.13-noauth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: test-win64-py3.13-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: test-win64-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: test-win64-py3.13-noauth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: test-win64-py3.13-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test Win64 py3.13 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/Python313/python.exe + - name: test-win32-py3.9-auth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win32 py3.9 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe + - name: test-win32-py3.9-noauth-ssl-sync + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe + - name: test-win32-py3.9-noauth-nossl-sync + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe + - name: test-win32-py3.9-auth-ssl-async + tasks: + - name: .standalone + display_name: Test Win32 py3.9 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe + - name: test-win32-py3.9-noauth-ssl-async + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe + - name: test-win32-py3.9-noauth-nossl-async + tasks: + - name: .standalone + display_name: Test Win32 py3.9 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python39/python.exe + - name: test-win32-py3.13-auth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 Auth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe + - name: test-win32-py3.13-noauth-ssl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 NoAuth SSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe + - name: test-win32-py3.13-noauth-nossl-sync + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 NoAuth NoSSL Sync + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe + - name: test-win32-py3.13-auth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 Auth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: auth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe + - name: test-win32-py3.13-noauth-ssl-async + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 NoAuth SSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: ssl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe + - name: test-win32-py3.13-noauth-nossl-async + tasks: + - name: .sharded_cluster + display_name: Test Win32 py3.13 NoAuth NoSSL Async + run_on: + - windows-64-vsMulti-small + expansions: + AUTH: noauth + SSL: nossl + TEST_SUITES: default_async + SKIP_CSOT_TESTS: "true" + PYTHON_BINARY: C:/python/32/Python313/python.exe + + # Serverless tests + - name: serverless-rhel8-py3.9 + tasks: + - name: serverless_task_group + display_name: Serverless RHEL8 py3.9 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + test_serverless: "true" + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: serverless-rhel8-py3.13 + tasks: + - name: serverless_task_group + display_name: Serverless RHEL8 py3.13 + run_on: + - rhel87-small + batchtime: 10080 + expansions: + test_serverless: "true" + AUTH: auth + SSL: ssl + PYTHON_BINARY: /opt/python/3.13/bin/python3 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 7adeac82e0..b65d9b62da 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -9,8 +9,11 @@ # Note: Run this file with `hatch run`, `pipx run`, or `uv run`. from __future__ import annotations +import sys from dataclasses import dataclass +from inspect import getmembers, isfunction from itertools import cycle, product, zip_longest +from pathlib import Path from typing import Any from shrub.v3.evg_build_variant import BuildVariant @@ -172,10 +175,10 @@ def handle_c_ext(c_ext, expansions): expansions["NO_EXT"] = "1" -def generate_yaml(tasks=None, variants=None): +def create_yaml(tasks=None, variants=None): """Generate the yaml for a given set of tasks and variants.""" project = EvgProject(tasks=tasks, buildvariants=variants) - out = ShrubService.generate_yaml(project) + out = ShrubService.create_yaml(project) # Dedent by two spaces to match what we use in config.yml lines = [line[2:] for line in out.splitlines()] print("\n".join(lines)) # noqa: T201 @@ -198,7 +201,7 @@ def create_ocsp_variants() -> list[BuildVariant]: host = "rhel8" variant = create_variant( [".ocsp"], - get_display_name(base_display, host, version, python), + get_display_name(base_display, host, version=version, python=python), python=python, version=version, host=host, @@ -213,7 +216,7 @@ def create_ocsp_variants() -> list[BuildVariant]: python = CPYTHONS[0] if version == "4.4" else CPYTHONS[-1] variant = create_variant( [".ocsp-rsa !.ocsp-staple"], - get_display_name(base_display, host, version, python), + get_display_name(base_display, host, version=version, python=python), python=python, version=version, host=host, @@ -540,7 +543,7 @@ def create_green_framework_variants(): return variants -def generate_no_c_ext_variants(): +def create_no_c_ext_variants(): variants = [] host = "rhel8" for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): @@ -555,7 +558,7 @@ def generate_no_c_ext_variants(): return variants -def generate_atlas_data_lake_variants(): +def create_atlas_data_lake_variants(): variants = [] host = "rhel8" for python, c_ext in product(MIN_MAX_PYTHON, C_EXTS): @@ -570,7 +573,7 @@ def generate_atlas_data_lake_variants(): return variants -def generate_mod_wsgi_variants(): +def create_mod_wsgi_variants(): variants = [] host = "ubuntu22" tasks = [ @@ -589,7 +592,7 @@ def generate_mod_wsgi_variants(): return variants -def generate_disable_test_commands_variants(): +def create_disable_test_commands_variants(): host = "rhel8" expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") python = CPYTHONS[0] @@ -598,7 +601,7 @@ def generate_disable_test_commands_variants(): return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] -def generate_serverless_variants(): +def create_serverless_variants(): host = "rhel8" batchtime = BATCHTIME_WEEK expansions = dict(test_serverless="true", AUTH="auth", SSL="ssl") @@ -617,7 +620,7 @@ def generate_serverless_variants(): ] -def generate_oidc_auth_variants(): +def create_oidc_auth_variants(): variants = [] for host in ["rhel8", "macos", "win64"]: variants.append( @@ -631,7 +634,7 @@ def generate_oidc_auth_variants(): return variants -def generate_search_index_variants(): +def create_search_index_variants(): host = "rhel8" python = CPYTHONS[0] return [ @@ -644,7 +647,7 @@ def generate_search_index_variants(): ] -def generate_mockupdb_variants(): +def create_mockupdb_variants(): host = "rhel8" python = CPYTHONS[0] return [ @@ -657,7 +660,7 @@ def generate_mockupdb_variants(): ] -def generate_doctests_variants(): +def create_doctests_variants(): host = "rhel8" python = CPYTHONS[0] return [ @@ -670,7 +673,7 @@ def generate_doctests_variants(): ] -def generate_atlas_connect_variants(): +def create_atlas_connect_variants(): host = "rhel8" return [ create_variant( @@ -683,7 +686,7 @@ def generate_atlas_connect_variants(): ] -def generate_aws_auth_variants(): +def create_aws_auth_variants(): variants = [] tasks = [ "aws-auth-test-4.4", @@ -713,7 +716,7 @@ def generate_aws_auth_variants(): return variants -def generate_alternative_hosts_variants(): +def create_alternative_hosts_variants(): base_expansions = dict(SKIP_HATCH="true") batchtime = BATCHTIME_WEEK variants = [] @@ -756,5 +759,35 @@ def generate_alternative_hosts_variants(): # Generate Config ################## -variants = generate_search_index_variants() -generate_yaml(variants=variants) + +def write_variants_to_file(): + mod = sys.modules[__name__] + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "variants.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("buildvariants:\n") + + for name, func in getmembers(mod, isfunction): + if not name.endswith("_variants"): + continue + if not name.startswith("create_"): + raise ValueError("Variant creators must start with create_") + title = name.replace("create_", "").replace("_variants", "").replace("_", " ").capitalize() + project = EvgProject(tasks=None, buildvariants=func()) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +write_variants_to_file() From 215bca21ec2027e492eef07e89bc4d850eb01671 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 24 Oct 2024 10:30:30 -0500 Subject: [PATCH 1586/2111] PYTHON-4902 Use shrub.py to generate tasks (#1966) --- .evergreen/config.yml | 246 +- .evergreen/generated_configs/tasks.yml | 2882 +++++++++++++++++++++ .evergreen/generated_configs/variants.yml | 1202 ++------- .evergreen/scripts/generate_config.py | 173 +- 4 files changed, 3208 insertions(+), 1295 deletions(-) create mode 100644 .evergreen/generated_configs/tasks.yml diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6e48a380d3..a1587a281d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -26,7 +26,8 @@ timeout: ls -la include: - - filename: .evergreen/generated_configs/variants.yml + - filename: .evergreen/generated_configs/tasks.yml + - filename: .evergreen/generated_configs/variants.yml functions: "fetch source": @@ -971,249 +972,6 @@ tasks: TOPOLOGY: "server" - func: "run doctests" - - name: "test-4.0-standalone" - tags: ["4.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-4.0-replica_set" - tags: ["4.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-4.0-sharded_cluster" - tags: ["4.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-4.2-standalone" - tags: ["4.2", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-4.2-replica_set" - tags: ["4.2", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-4.2-sharded_cluster" - tags: ["4.2", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-4.4-standalone" - tags: ["4.4", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-4.4-replica_set" - tags: ["4.4", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-4.4-sharded_cluster" - tags: ["4.4", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-5.0-standalone" - tags: ["5.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "5.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-5.0-replica_set" - tags: ["5.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "5.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-5.0-sharded_cluster" - tags: ["5.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "5.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-6.0-standalone" - tags: ["6.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-6.0-replica_set" - tags: ["6.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-6.0-sharded_cluster" - tags: ["6.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-8.0-standalone" - tags: ["8.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "8.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-8.0-replica_set" - tags: ["8.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "8.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-8.0-sharded_cluster" - tags: ["8.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "8.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-7.0-standalone" - tags: ["7.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "7.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-7.0-replica_set" - tags: ["7.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "7.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-7.0-sharded_cluster" - tags: ["7.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "7.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-latest-standalone" - tags: ["latest", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-latest-replica_set" - tags: ["latest", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-latest-sharded_cluster" - tags: ["latest", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-rapid-standalone" - tags: ["rapid", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "rapid" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-rapid-replica_set" - tags: ["rapid", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "rapid" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-rapid-sharded_cluster" - tags: ["rapid", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "rapid" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - name: "test-serverless" tags: ["serverless"] commands: diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml new file mode 100644 index 0000000000..fb3da4bb24 --- /dev/null +++ b/.evergreen/generated_configs/tasks.yml @@ -0,0 +1,2882 @@ +tasks: + # Server tests + - name: test-4.0-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - standalone + - auth + - ssl + - sync + - name: test-4.0-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - standalone + - auth + - ssl + - async + - name: test-4.0-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - standalone + - noauth + - ssl + - sync + - name: test-4.0-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - standalone + - noauth + - ssl + - async + - name: test-4.0-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - standalone + - noauth + - nossl + - sync + - name: test-4.0-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - standalone + - noauth + - nossl + - async + - name: test-4.4-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - standalone + - auth + - ssl + - sync + - name: test-4.4-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - standalone + - auth + - ssl + - async + - name: test-4.4-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - standalone + - noauth + - ssl + - sync + - name: test-4.4-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - standalone + - noauth + - ssl + - async + - name: test-4.4-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - standalone + - noauth + - nossl + - sync + - name: test-4.4-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - standalone + - noauth + - nossl + - async + - name: test-5.0-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - standalone + - auth + - ssl + - sync + - name: test-5.0-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - standalone + - auth + - ssl + - async + - name: test-5.0-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - standalone + - noauth + - ssl + - sync + - name: test-5.0-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - standalone + - noauth + - ssl + - async + - name: test-5.0-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - standalone + - noauth + - nossl + - sync + - name: test-5.0-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - standalone + - noauth + - nossl + - async + - name: test-6.0-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - standalone + - auth + - ssl + - sync + - name: test-6.0-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - standalone + - auth + - ssl + - async + - name: test-6.0-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - standalone + - noauth + - ssl + - sync + - name: test-6.0-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - standalone + - noauth + - ssl + - async + - name: test-6.0-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - standalone + - noauth + - nossl + - sync + - name: test-6.0-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - standalone + - noauth + - nossl + - async + - name: test-7.0-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - standalone + - auth + - ssl + - sync + - name: test-7.0-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - standalone + - auth + - ssl + - async + - name: test-7.0-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - standalone + - noauth + - ssl + - sync + - name: test-7.0-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - standalone + - noauth + - ssl + - async + - name: test-7.0-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - standalone + - noauth + - nossl + - sync + - name: test-7.0-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - standalone + - noauth + - nossl + - async + - name: test-8.0-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - standalone + - auth + - ssl + - sync + - name: test-8.0-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - standalone + - auth + - ssl + - async + - name: test-8.0-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - standalone + - noauth + - ssl + - sync + - name: test-8.0-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - standalone + - noauth + - ssl + - async + - name: test-8.0-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - standalone + - noauth + - nossl + - sync + - name: test-8.0-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - standalone + - noauth + - nossl + - async + - name: test-rapid-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - standalone + - auth + - ssl + - sync + - name: test-rapid-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - standalone + - auth + - ssl + - async + - name: test-rapid-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - standalone + - noauth + - ssl + - sync + - name: test-rapid-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - standalone + - noauth + - ssl + - async + - name: test-rapid-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - standalone + - noauth + - nossl + - sync + - name: test-rapid-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - standalone + - noauth + - nossl + - async + - name: test-latest-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - standalone + - auth + - ssl + - sync + - name: test-latest-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - standalone + - auth + - ssl + - async + - name: test-latest-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - standalone + - noauth + - ssl + - sync + - name: test-latest-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - standalone + - noauth + - ssl + - async + - name: test-latest-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - standalone + - noauth + - nossl + - sync + - name: test-latest-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - standalone + - noauth + - nossl + - async + - name: test-4.0-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - replica_set + - auth + - ssl + - sync + - name: test-4.0-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - replica_set + - auth + - ssl + - async + - name: test-4.0-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - replica_set + - noauth + - ssl + - sync + - name: test-4.0-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - replica_set + - noauth + - ssl + - async + - name: test-4.0-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - replica_set + - noauth + - nossl + - sync + - name: test-4.0-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - replica_set + - noauth + - nossl + - async + - name: test-4.4-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - replica_set + - auth + - ssl + - sync + - name: test-4.4-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - replica_set + - auth + - ssl + - async + - name: test-4.4-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - replica_set + - noauth + - ssl + - sync + - name: test-4.4-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - replica_set + - noauth + - ssl + - async + - name: test-4.4-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - replica_set + - noauth + - nossl + - sync + - name: test-4.4-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - replica_set + - noauth + - nossl + - async + - name: test-5.0-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - replica_set + - auth + - ssl + - sync + - name: test-5.0-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - replica_set + - auth + - ssl + - async + - name: test-5.0-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - replica_set + - noauth + - ssl + - sync + - name: test-5.0-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - replica_set + - noauth + - ssl + - async + - name: test-5.0-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - replica_set + - noauth + - nossl + - sync + - name: test-5.0-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - replica_set + - noauth + - nossl + - async + - name: test-6.0-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - replica_set + - auth + - ssl + - sync + - name: test-6.0-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - replica_set + - auth + - ssl + - async + - name: test-6.0-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - replica_set + - noauth + - ssl + - sync + - name: test-6.0-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - replica_set + - noauth + - ssl + - async + - name: test-6.0-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - replica_set + - noauth + - nossl + - sync + - name: test-6.0-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - replica_set + - noauth + - nossl + - async + - name: test-7.0-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - replica_set + - auth + - ssl + - sync + - name: test-7.0-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - replica_set + - auth + - ssl + - async + - name: test-7.0-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - replica_set + - noauth + - ssl + - sync + - name: test-7.0-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - replica_set + - noauth + - ssl + - async + - name: test-7.0-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - replica_set + - noauth + - nossl + - sync + - name: test-7.0-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - replica_set + - noauth + - nossl + - async + - name: test-8.0-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - replica_set + - auth + - ssl + - sync + - name: test-8.0-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - replica_set + - auth + - ssl + - async + - name: test-8.0-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - replica_set + - noauth + - ssl + - sync + - name: test-8.0-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - replica_set + - noauth + - ssl + - async + - name: test-8.0-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - replica_set + - noauth + - nossl + - sync + - name: test-8.0-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - replica_set + - noauth + - nossl + - async + - name: test-rapid-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - replica_set + - auth + - ssl + - sync + - name: test-rapid-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - replica_set + - auth + - ssl + - async + - name: test-rapid-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - replica_set + - noauth + - ssl + - sync + - name: test-rapid-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - replica_set + - noauth + - ssl + - async + - name: test-rapid-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - replica_set + - noauth + - nossl + - sync + - name: test-rapid-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - replica_set + - noauth + - nossl + - async + - name: test-latest-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - replica_set + - auth + - ssl + - sync + - name: test-latest-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - replica_set + - auth + - ssl + - async + - name: test-latest-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - replica_set + - noauth + - ssl + - sync + - name: test-latest-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - replica_set + - noauth + - ssl + - async + - name: test-latest-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - replica_set + - noauth + - nossl + - sync + - name: test-latest-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - replica_set + - noauth + - nossl + - async + - name: test-4.0-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - sharded_cluster + - auth + - ssl + - sync + - name: test-4.0-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - sharded_cluster + - auth + - ssl + - async + - name: test-4.0-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-4.0-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - sharded_cluster + - noauth + - ssl + - async + - name: test-4.0-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.0" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-4.0-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.0" + - sharded_cluster + - noauth + - nossl + - async + - name: test-4.4-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - sharded_cluster + - auth + - ssl + - sync + - name: test-4.4-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - sharded_cluster + - auth + - ssl + - async + - name: test-4.4-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-4.4-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - sharded_cluster + - noauth + - ssl + - async + - name: test-4.4-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.4" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-4.4-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.4" + - sharded_cluster + - noauth + - nossl + - async + - name: test-5.0-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - sharded_cluster + - auth + - ssl + - sync + - name: test-5.0-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - sharded_cluster + - auth + - ssl + - async + - name: test-5.0-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-5.0-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - sharded_cluster + - noauth + - ssl + - async + - name: test-5.0-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "5.0" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-5.0-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "5.0" + - sharded_cluster + - noauth + - nossl + - async + - name: test-6.0-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - sharded_cluster + - auth + - ssl + - sync + - name: test-6.0-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - sharded_cluster + - auth + - ssl + - async + - name: test-6.0-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-6.0-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - sharded_cluster + - noauth + - ssl + - async + - name: test-6.0-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "6.0" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-6.0-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "6.0" + - sharded_cluster + - noauth + - nossl + - async + - name: test-7.0-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - sharded_cluster + - auth + - ssl + - sync + - name: test-7.0-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - sharded_cluster + - auth + - ssl + - async + - name: test-7.0-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-7.0-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - sharded_cluster + - noauth + - ssl + - async + - name: test-7.0-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "7.0" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-7.0-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "7.0" + - sharded_cluster + - noauth + - nossl + - async + - name: test-8.0-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - sharded_cluster + - auth + - ssl + - sync + - name: test-8.0-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - sharded_cluster + - auth + - ssl + - async + - name: test-8.0-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-8.0-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - sharded_cluster + - noauth + - ssl + - async + - name: test-8.0-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "8.0" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-8.0-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "8.0" + - sharded_cluster + - noauth + - nossl + - async + - name: test-rapid-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - sharded_cluster + - auth + - ssl + - sync + - name: test-rapid-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - sharded_cluster + - auth + - ssl + - async + - name: test-rapid-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - sharded_cluster + - noauth + - ssl + - sync + - name: test-rapid-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - sharded_cluster + - noauth + - ssl + - async + - name: test-rapid-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - rapid + - sharded_cluster + - noauth + - nossl + - sync + - name: test-rapid-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - rapid + - sharded_cluster + - noauth + - nossl + - async + - name: test-latest-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - sharded_cluster + - auth + - ssl + - sync + - name: test-latest-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - sharded_cluster + - auth + - ssl + - async + - name: test-latest-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - sharded_cluster + - noauth + - ssl + - sync + - name: test-latest-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - sharded_cluster + - noauth + - ssl + - async + - name: test-latest-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - latest + - sharded_cluster + - noauth + - nossl + - sync + - name: test-latest-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - latest + - sharded_cluster + - noauth + - nossl + - async diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 52f8c673b3..327becc249 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1,173 +1,51 @@ buildvariants: # Alternative hosts tests - - name: openssl-1.0.2-rhel7-py3.9-auth-ssl + - name: openssl-1.0.2-rhel7-py3.9 tasks: - name: .5.0 .standalone - display_name: OpenSSL 1.0.2 RHEL7 py3.9 Auth SSL + display_name: OpenSSL 1.0.2 RHEL7 py3.9 run_on: - rhel79-small batchtime: 10080 expansions: SKIP_HATCH: "true" - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: openssl-1.0.2-rhel7-py3.9-noauth-ssl - tasks: - - name: .5.0 .standalone - display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth SSL - run_on: - - rhel79-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: openssl-1.0.2-rhel7-py3.9-noauth-nossl - tasks: - - name: .5.0 .standalone - display_name: OpenSSL 1.0.2 RHEL7 py3.9 NoAuth NoSSL - run_on: - - rhel79-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: other-hosts-rhel9-fips-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL9-FIPS Auth SSL - run_on: - - rhel92-fips - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl - - name: other-hosts-rhel9-fips-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL9-FIPS NoAuth SSL - run_on: - - rhel92-fips - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl - - name: other-hosts-rhel9-fips-noauth-nossl + - name: other-hosts-rhel9-fips tasks: - name: .6.0 .standalone - display_name: Other hosts RHEL9-FIPS NoAuth NoSSL + display_name: Other hosts RHEL9-FIPS run_on: - rhel92-fips batchtime: 10080 expansions: SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl - - name: other-hosts-rhel8-zseries-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-zseries Auth SSL - run_on: - - rhel8-zseries-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl - - name: other-hosts-rhel8-zseries-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-zseries NoAuth SSL - run_on: - - rhel8-zseries-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl - - name: other-hosts-rhel8-zseries-noauth-nossl + - name: other-hosts-rhel8-zseries tasks: - name: .6.0 .standalone - display_name: Other hosts RHEL8-zseries NoAuth NoSSL + display_name: Other hosts RHEL8-zseries run_on: - rhel8-zseries-small batchtime: 10080 expansions: SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl - - name: other-hosts-rhel8-power8-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-POWER8 Auth SSL - run_on: - - rhel8-power-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl - - name: other-hosts-rhel8-power8-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-POWER8 NoAuth SSL - run_on: - - rhel8-power-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl - - name: other-hosts-rhel8-power8-noauth-nossl + - name: other-hosts-rhel8-power8 tasks: - name: .6.0 .standalone - display_name: Other hosts RHEL8-POWER8 NoAuth NoSSL + display_name: Other hosts RHEL8-POWER8 run_on: - rhel8-power-small batchtime: 10080 expansions: SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl - - name: other-hosts-rhel8-arm64-auth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-arm64 Auth SSL - run_on: - - rhel82-arm64-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: auth - SSL: ssl - - name: other-hosts-rhel8-arm64-noauth-ssl - tasks: - - name: .6.0 .standalone - display_name: Other hosts RHEL8-arm64 NoAuth SSL - run_on: - - rhel82-arm64-small - batchtime: 10080 - expansions: - SKIP_HATCH: "true" - AUTH: noauth - SSL: ssl - - name: other-hosts-rhel8-arm64-noauth-nossl + - name: other-hosts-rhel8-arm64 tasks: - name: .6.0 .standalone - display_name: Other hosts RHEL8-arm64 NoAuth NoSSL + display_name: Other hosts RHEL8-arm64 run_on: - rhel82-arm64-small batchtime: 10080 expansions: SKIP_HATCH: "true" - AUTH: noauth - SSL: nossl # Atlas connect tests - name: atlas-connect-rhel8-py3.9 @@ -320,7 +198,7 @@ buildvariants: # Compression tests - name: snappy-compression-rhel8-py3.9-no-c tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: snappy compression RHEL8 py3.9 No C run_on: - rhel87-small @@ -330,7 +208,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: snappy-compression-rhel8-py3.10 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: snappy compression RHEL8 py3.10 run_on: - rhel87-small @@ -339,7 +217,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: zlib-compression-rhel8-py3.11-no-c tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: zlib compression RHEL8 py3.11 No C run_on: - rhel87-small @@ -349,7 +227,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: zlib-compression-rhel8-py3.12 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: zlib compression RHEL8 py3.12 run_on: - rhel87-small @@ -358,7 +236,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: zstd-compression-rhel8-py3.13-no-c tasks: - - name: .standalone !.4.0 + - name: .standalone .noauth .nossl !.4.0 display_name: zstd compression RHEL8 py3.13 No C run_on: - rhel87-small @@ -368,7 +246,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 - name: zstd-compression-rhel8-py3.9 tasks: - - name: .standalone !.4.0 + - name: .standalone .noauth .nossl !.4.0 display_name: zstd compression RHEL8 py3.9 run_on: - rhel87-small @@ -377,7 +255,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: snappy-compression-rhel8-pypy3.9 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: snappy compression RHEL8 pypy3.9 run_on: - rhel87-small @@ -386,7 +264,7 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: zlib-compression-rhel8-pypy3.10 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: zlib compression RHEL8 pypy3.10 run_on: - rhel87-small @@ -395,7 +273,7 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - name: zstd-compression-rhel8-pypy3.9 tasks: - - name: .standalone !.4.0 + - name: .standalone .noauth .nossl !.4.0 display_name: zstd compression RHEL8 pypy3.9 run_on: - rhel87-small @@ -427,297 +305,255 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Encryption tests - - name: encryption-rhel8-py3.9-auth-ssl + - name: encryption-rhel8-py3.9 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption RHEL8 py3.9 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption RHEL8 py3.9 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-rhel8-py3.13-auth-ssl + - name: encryption-rhel8-py3.13 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption RHEL8 py3.13 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption RHEL8 py3.13 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-rhel8-pypy3.10-auth-ssl + - name: encryption-rhel8-pypy3.10 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption RHEL8 pypy3.10 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption RHEL8 pypy3.10 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-py3.9-auth-ssl + - name: encryption-crypt_shared-rhel8-py3.9 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption crypt_shared RHEL8 py3.9 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption crypt_shared RHEL8 py3.9 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-py3.13-auth-ssl + - name: encryption-crypt_shared-rhel8-py3.13 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption crypt_shared RHEL8 py3.13 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption crypt_shared RHEL8 py3.13 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-pypy3.10-auth-ssl + - name: encryption-crypt_shared-rhel8-pypy3.10 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption crypt_shared RHEL8 pypy3.10 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption crypt_shared RHEL8 pypy3.10 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-py3.9-auth-ssl + - name: encryption-pyopenssl-rhel8-py3.9 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption PyOpenSSL RHEL8 py3.9 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption PyOpenSSL RHEL8 py3.9 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-py3.13-auth-ssl + - name: encryption-pyopenssl-rhel8-py3.13 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption PyOpenSSL RHEL8 py3.13 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption PyOpenSSL RHEL8 py3.13 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-pypy3.10-auth-ssl + - name: encryption-pyopenssl-rhel8-pypy3.10 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Encryption PyOpenSSL RHEL8 pypy3.10 Auth SSL + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Encryption PyOpenSSL RHEL8 pypy3.10 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-rhel8-py3.10-auth-ssl + - name: encryption-rhel8-py3.10 tasks: - - name: .replica_set - display_name: Encryption RHEL8 py3.10 Auth SSL + - name: .sharded_cluster .auth .ssl + display_name: Encryption RHEL8 py3.10 run_on: - rhel87-small expansions: - AUTH: auth - SSL: ssl test_encryption: "true" PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: encryption-crypt_shared-rhel8-py3.11-auth-nossl + - name: encryption-crypt_shared-rhel8-py3.11 tasks: - - name: .replica_set - display_name: Encryption crypt_shared RHEL8 py3.11 Auth NoSSL + - name: .replica_set .noauth .ssl + display_name: Encryption crypt_shared RHEL8 py3.11 run_on: - rhel87-small expansions: - AUTH: auth - SSL: nossl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: encryption-pyopenssl-rhel8-py3.12-auth-ssl + - name: encryption-pyopenssl-rhel8-py3.12 tasks: - - name: .replica_set - display_name: Encryption PyOpenSSL RHEL8 py3.12 Auth SSL + - name: .standalone .noauth .nossl + display_name: Encryption PyOpenSSL RHEL8 py3.12 run_on: - rhel87-small expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: encryption-rhel8-pypy3.9-auth-nossl + - name: encryption-rhel8-pypy3.9 tasks: - - name: .replica_set - display_name: Encryption RHEL8 pypy3.9 Auth NoSSL + - name: .sharded_cluster .auth .ssl + display_name: Encryption RHEL8 pypy3.9 run_on: - rhel87-small expansions: - AUTH: auth - SSL: nossl test_encryption: "true" PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: encryption-macos-py3.9-auth-ssl + - name: encryption-macos-py3.9 tasks: - name: .latest .replica_set - display_name: Encryption macOS py3.9 Auth SSL + display_name: Encryption macOS py3.9 run_on: - macos-14 batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-macos-py3.13-auth-nossl + - name: encryption-macos-py3.13 tasks: - name: .latest .replica_set - display_name: Encryption macOS py3.13 Auth NoSSL + display_name: Encryption macOS py3.13 run_on: - macos-14 batchtime: 10080 expansions: - AUTH: auth - SSL: nossl test_encryption: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-macos-py3.9-auth-ssl + - name: encryption-crypt_shared-macos-py3.9 tasks: - name: .latest .replica_set - display_name: Encryption crypt_shared macOS py3.9 Auth SSL + display_name: Encryption crypt_shared macOS py3.9 run_on: - macos-14 batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-macos-py3.13-auth-nossl + - name: encryption-crypt_shared-macos-py3.13 tasks: - name: .latest .replica_set - display_name: Encryption crypt_shared macOS py3.13 Auth NoSSL + display_name: Encryption crypt_shared macOS py3.13 run_on: - macos-14 batchtime: 10080 expansions: - AUTH: auth - SSL: nossl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-win64-py3.9-auth-ssl + - name: encryption-win64-py3.9 tasks: - name: .latest .replica_set - display_name: Encryption Win64 py3.9 Auth SSL + display_name: Encryption Win64 py3.9 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - - name: encryption-win64-py3.13-auth-nossl + - name: encryption-win64-py3.13 tasks: - name: .latest .replica_set - display_name: Encryption Win64 py3.13 Auth NoSSL + display_name: Encryption Win64 py3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: - AUTH: auth - SSL: nossl test_encryption: "true" PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] - - name: encryption-crypt_shared-win64-py3.9-auth-ssl + - name: encryption-crypt_shared-win64-py3.9 tasks: - name: .latest .replica_set - display_name: Encryption crypt_shared Win64 py3.9 Auth SSL + display_name: Encryption crypt_shared Win64 py3.9 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: - AUTH: auth - SSL: ssl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - - name: encryption-crypt_shared-win64-py3.13-auth-nossl + - name: encryption-crypt_shared-win64-py3.13 tasks: - name: .latest .replica_set - display_name: Encryption crypt_shared Win64 py3.13 Auth NoSSL + display_name: Encryption crypt_shared Win64 py3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: - AUTH: auth - SSL: nossl test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: C:/python/Python313/python.exe @@ -791,7 +627,7 @@ buildvariants: # Green framework tests - name: eventlet-rhel8-py3.9 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: Eventlet RHEL8 py3.9 run_on: - rhel87-small @@ -802,7 +638,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: gevent-rhel8-py3.9 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: Gevent RHEL8 py3.9 run_on: - rhel87-small @@ -813,7 +649,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: eventlet-rhel8-py3.12 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: Eventlet RHEL8 py3.12 run_on: - rhel87-small @@ -824,7 +660,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: gevent-rhel8-py3.12 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: Gevent RHEL8 py3.12 run_on: - rhel87-small @@ -1070,7 +906,7 @@ buildvariants: # No c ext tests - name: no-c-ext-rhel8-py3.9 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: No C Ext RHEL8 py3.9 run_on: - rhel87-small @@ -1079,7 +915,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: no-c-ext-rhel8-py3.10 tasks: - - name: .replica_set + - name: .replica_set .noauth .nossl display_name: No C Ext RHEL8 py3.10 run_on: - rhel87-small @@ -1088,7 +924,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: no-c-ext-rhel8-py3.11 tasks: - - name: .sharded_cluster + - name: .sharded_cluster .noauth .nossl display_name: No C Ext RHEL8 py3.11 run_on: - rhel87-small @@ -1097,7 +933,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: no-c-ext-rhel8-py3.12 tasks: - - name: .standalone + - name: .standalone .noauth .nossl display_name: No C Ext RHEL8 py3.12 run_on: - rhel87-small @@ -1106,7 +942,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: no-c-ext-rhel8-py3.13 tasks: - - name: .replica_set + - name: .replica_set .noauth .nossl display_name: No C Ext RHEL8 py3.13 run_on: - rhel87-small @@ -1285,94 +1121,80 @@ buildvariants: # Pyopenssl tests - name: pyopenssl-macos-py3.9 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .noauth .nossl + - name: .7.0 .noauth .nossl display_name: PyOpenSSL macOS py3.9 run_on: - macos-14 batchtime: 10080 expansions: - AUTH: noauth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-py3.10 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .auth .ssl + - name: .7.0 .auth .ssl display_name: PyOpenSSL RHEL8 py3.10 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-py3.11 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .auth .ssl + - name: .7.0 .auth .ssl display_name: PyOpenSSL RHEL8 py3.11 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-py3.12 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .auth .ssl + - name: .7.0 .auth .ssl display_name: PyOpenSSL RHEL8 py3.12 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-py3.13 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .auth .ssl + - name: .7.0 .auth .ssl display_name: PyOpenSSL Win64 py3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: - AUTH: auth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.9 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .auth .ssl + - name: .7.0 .auth .ssl display_name: PyOpenSSL RHEL8 pypy3.9 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: pyopenssl-rhel8-pypy3.10 tasks: - - name: .replica_set - - name: .7.0 + - name: .replica_set .auth .ssl + - name: .7.0 .auth .ssl display_name: PyOpenSSL RHEL8 pypy3.10 run_on: - rhel87-small batchtime: 10080 expansions: - AUTH: auth test_pyopenssl: "true" - SSL: ssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Search index tests @@ -1386,794 +1208,196 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Server tests - - name: test-rhel8-py3.9-auth-ssl-cov + - name: test-rhel8-py3.9-cov tasks: - name: .standalone - name: .replica_set - name: .sharded_cluster - display_name: Test RHEL8 py3.9 Auth SSL cov + display_name: Test RHEL8 py3.9 cov run_on: - rhel87-small expansions: - AUTH: auth - SSL: ssl COVERAGE: coverage PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [coverage_tag] - - name: test-rhel8-py3.9-noauth-ssl-cov + - name: test-rhel8-py3.13-cov tasks: - name: .standalone - name: .replica_set - name: .sharded_cluster - display_name: Test RHEL8 py3.9 NoAuth SSL cov + display_name: Test RHEL8 py3.13 cov run_on: - rhel87-small expansions: - AUTH: noauth - SSL: ssl COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.9/bin/python3 + PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [coverage_tag] - - name: test-rhel8-py3.9-noauth-nossl-cov + - name: test-rhel8-pypy3.10-cov tasks: - name: .standalone - name: .replica_set - name: .sharded_cluster - display_name: Test RHEL8 py3.9 NoAuth NoSSL cov + display_name: Test RHEL8 pypy3.10 cov run_on: - rhel87-small expansions: - AUTH: noauth - SSL: nossl COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.9/bin/python3 + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [coverage_tag] - - name: test-rhel8-py3.13-auth-ssl-cov + - name: test-rhel8-py3.10 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.13 Auth SSL cov + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test RHEL8 py3.10 run_on: - rhel87-small expansions: - AUTH: auth - SSL: ssl COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-py3.13-noauth-ssl-cov + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: test-rhel8-py3.11 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.13 NoAuth SSL cov + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test RHEL8 py3.11 run_on: - rhel87-small expansions: - AUTH: noauth - SSL: ssl COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-py3.13-noauth-nossl-cov + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: test-rhel8-py3.12 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 py3.13 NoAuth NoSSL cov + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test RHEL8 py3.12 run_on: - rhel87-small expansions: - AUTH: noauth - SSL: nossl COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-pypy3.10-auth-ssl-cov + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: test-rhel8-pypy3.9 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 pypy3.10 Auth SSL cov + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test RHEL8 pypy3.9 run_on: - rhel87-small expansions: - AUTH: auth - SSL: ssl COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-pypy3.10-noauth-ssl-cov + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + - name: test-macos-py3.9 tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 pypy3.10 NoAuth SSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: ssl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-pypy3.10-noauth-nossl-cov - tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster - display_name: Test RHEL8 pypy3.10 NoAuth NoSSL cov - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: nossl - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-py3.10-auth-ssl - tasks: - - name: .standalone - display_name: Test RHEL8 py3.10 Auth SSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: test-rhel8-py3.11-noauth-ssl - tasks: - - name: .replica_set - display_name: Test RHEL8 py3.11 NoAuth SSL - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: ssl - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: test-rhel8-py3.12-noauth-nossl - tasks: - - name: .sharded_cluster - display_name: Test RHEL8 py3.12 NoAuth NoSSL - run_on: - - rhel87-small - expansions: - AUTH: noauth - SSL: nossl - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: test-rhel8-pypy3.9-auth-ssl - tasks: - - name: .standalone - display_name: Test RHEL8 pypy3.9 Auth SSL - run_on: - - rhel87-small - expansions: - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: test-macos-py3.9-auth-ssl-sync - tasks: - - name: .standalone - display_name: Test macOS py3.9 Auth SSL Sync + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test macOS py3.9 run_on: - macos-14 expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.9-noauth-ssl-sync - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.9-noauth-nossl-sync - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth NoSSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.9-auth-ssl-async - tasks: - - name: .standalone - display_name: Test macOS py3.9 Auth SSL Async - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.9-noauth-ssl-async - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth SSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.9-noauth-nossl-async - tasks: - - name: .standalone - display_name: Test macOS py3.9 NoAuth NoSSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 Auth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth SSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth NoSSL Sync - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 Auth SSL Async - run_on: - - macos-14 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-py3.13-noauth-ssl-async + - name: test-macos-py3.13 tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth SSL Async + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test macOS py3.13 run_on: - macos-14 expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster - display_name: Test macOS py3.13 NoAuth NoSSL Async - run_on: - - macos-14 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.9-auth-ssl-sync - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 Auth SSL Sync + - name: test-macos-arm64-py3.9 + tasks: + - name: .sharded_cluster .auth .ssl .6.0 + - name: .replica_set .noauth .ssl .6.0 + - name: .standalone .noauth .nossl .6.0 + - name: .sharded_cluster .auth .ssl .7.0 + - name: .replica_set .noauth .ssl .7.0 + - name: .standalone .noauth .nossl .7.0 + - name: .sharded_cluster .auth .ssl .8.0 + - name: .replica_set .noauth .ssl .8.0 + - name: .standalone .noauth .nossl .8.0 + - name: .sharded_cluster .auth .ssl .rapid + - name: .replica_set .noauth .ssl .rapid + - name: .standalone .noauth .nossl .rapid + - name: .sharded_cluster .auth .ssl .latest + - name: .replica_set .noauth .ssl .latest + - name: .standalone .noauth .nossl .latest + display_name: Test macOS Arm64 py3.9 run_on: - macos-14-arm64 expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.9-noauth-ssl-sync - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth SSL Sync + - name: test-macos-arm64-py3.13 + tasks: + - name: .sharded_cluster .auth .ssl .6.0 + - name: .replica_set .noauth .ssl .6.0 + - name: .standalone .noauth .nossl .6.0 + - name: .sharded_cluster .auth .ssl .7.0 + - name: .replica_set .noauth .ssl .7.0 + - name: .standalone .noauth .nossl .7.0 + - name: .sharded_cluster .auth .ssl .8.0 + - name: .replica_set .noauth .ssl .8.0 + - name: .standalone .noauth .nossl .8.0 + - name: .sharded_cluster .auth .ssl .rapid + - name: .replica_set .noauth .ssl .rapid + - name: .standalone .noauth .nossl .rapid + - name: .sharded_cluster .auth .ssl .latest + - name: .replica_set .noauth .ssl .latest + - name: .standalone .noauth .nossl .latest + display_name: Test macOS Arm64 py3.13 run_on: - macos-14-arm64 expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.9-noauth-nossl-sync - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.9-auth-ssl-async - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 Auth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.9-noauth-ssl-async - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.9-noauth-nossl-async - tasks: - - name: .standalone .6.0 - - name: .standalone .7.0 - - name: .standalone .8.0 - - name: .standalone .rapid - - name: .standalone .latest - display_name: Test macOS Arm64 py3.9 NoAuth NoSSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 Auth SSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth SSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.13-noauth-nossl-sync + - name: test-win64-py3.9 tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Sync - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 Auth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth SSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster .6.0 - - name: .sharded_cluster .7.0 - - name: .sharded_cluster .8.0 - - name: .sharded_cluster .rapid - - name: .sharded_cluster .latest - display_name: Test macOS Arm64 py3.13 NoAuth NoSSL Async - run_on: - - macos-14-arm64 - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-win64-py3.9-auth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win64 py3.9 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.9-noauth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth SSL Sync + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test Win64 py3.9 run_on: - windows-64-vsMulti-small expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.9-noauth-nossl-sync + - name: test-win64-py3.13 tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth NoSSL Sync + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test Win64 py3.13 run_on: - windows-64-vsMulti-small expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.9-auth-ssl-async - tasks: - - name: .standalone - display_name: Test Win64 py3.9 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.9-noauth-ssl-async - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.9-noauth-nossl-async - tasks: - - name: .standalone - display_name: Test Win64 py3.9 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.13-auth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win64-py3.13-noauth-ssl-sync + - name: test-win32-py3.9 tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth SSL Sync + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test Win32 py3.9 run_on: - windows-64-vsMulti-small expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win64-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win64-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win64-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win64-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster - display_name: Test Win64 py3.13 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win32-py3.9-auth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win32 py3.9 Auth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.9-noauth-ssl-sync - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.9-noauth-nossl-sync - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.9-auth-ssl-async - tasks: - - name: .standalone - display_name: Test Win32 py3.9 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.9-noauth-ssl-async - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.9-noauth-nossl-async - tasks: - - name: .standalone - display_name: Test Win32 py3.9 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.13-auth-ssl-sync + - name: test-win32-py3.13 tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 Auth SSL Sync + - name: .sharded_cluster .auth .ssl + - name: .replica_set .noauth .ssl + - name: .standalone .noauth .nossl + display_name: Test Win32 py3.13 run_on: - windows-64-vsMulti-small expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe - - name: test-win32-py3.13-noauth-ssl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth SSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe - - name: test-win32-py3.13-noauth-nossl-sync - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth NoSSL Sync - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe - - name: test-win32-py3.13-auth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 Auth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: auth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe - - name: test-win32-py3.13-noauth-ssl-async - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth SSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: ssl - TEST_SUITES: default_async - SKIP_CSOT_TESTS: "true" - PYTHON_BINARY: C:/python/32/Python313/python.exe - - name: test-win32-py3.13-noauth-nossl-async - tasks: - - name: .sharded_cluster - display_name: Test Win32 py3.13 NoAuth NoSSL Async - run_on: - - windows-64-vsMulti-small - expansions: - AUTH: noauth - SSL: nossl - TEST_SUITES: default_async SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/32/Python313/python.exe diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b65d9b62da..b8b8fa367c 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -17,8 +17,9 @@ from typing import Any from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_command import FunctionCall from shrub.v3.evg_project import EvgProject -from shrub.v3.evg_task import EvgTaskRef +from shrub.v3.evg_task import EvgTask, EvgTaskRef from shrub.v3.shrub_service import ShrubService ############## @@ -34,6 +35,12 @@ AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] C_EXTS = ["with_ext", "without_ext"] +# By default test each of the topologies with a subset of auth/ssl. +SUB_TASKS = [ + ".sharded_cluster .auth .ssl", + ".replica_set .noauth .ssl", + ".standalone .noauth .nossl", +] SYNCS = ["sync", "async"] DISPLAY_LOOKUP = dict( ssl=dict(ssl="SSL", nossl="NoSSL"), @@ -175,10 +182,10 @@ def handle_c_ext(c_ext, expansions): expansions["NO_EXT"] = "1" -def create_yaml(tasks=None, variants=None): +def generate_yaml(tasks=None, variants=None): """Generate the yaml for a given set of tasks and variants.""" project = EvgProject(tasks=tasks, buildvariants=variants) - out = ShrubService.create_yaml(project) + out = ShrubService.generate_yaml(project) # Dedent by two spaces to match what we use in config.yml lines = [line[2:] for line in out.splitlines()] print("\n".join(lines)) # noqa: T201 @@ -233,9 +240,9 @@ def create_server_variants() -> list[BuildVariant]: # Run the full matrix on linux with min and max CPython, and latest pypy. host = "rhel8" - for python, (auth, ssl) in product([*MIN_MAX_PYTHON, PYPYS[-1]], AUTH_SSLS): + for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: display_name = f"Test {host}" - expansions = dict(AUTH=auth, SSL=ssl, COVERAGE="coverage") + expansions = dict(COVERAGE="coverage") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( [f".{t}" for t in TOPOLOGIES], @@ -247,15 +254,12 @@ def create_server_variants() -> list[BuildVariant]: ) variants.append(variant) - # Test the rest of the pythons on linux. - for python, (auth, ssl), topology in zip_cycle( - CPYTHONS[1:-1] + PYPYS[:-1], AUTH_SSLS, TOPOLOGIES - ): + # Test the rest of the pythons. + for python in CPYTHONS[1:-1] + PYPYS[:-1]: display_name = f"Test {host}" - expansions = dict(AUTH=auth, SSL=ssl) - display_name = get_display_name("Test", host, python=python, **expansions) + display_name = get_display_name("Test", host, python=python) variant = create_variant( - [f".{topology}"], + SUB_TASKS, display_name, python=python, host=host, @@ -265,18 +269,14 @@ def create_server_variants() -> list[BuildVariant]: # Test a subset on each of the other platforms. for host in ("macos", "macos-arm64", "win64", "win32"): - for ( - python, - sync, - (auth, ssl), - ) in product(MIN_MAX_PYTHON, SYNCS, AUTH_SSLS): - test_suite = "default" if sync == "sync" else "default_async" - topology = TOPOLOGIES[0] if python == CPYTHONS[0] else TOPOLOGIES[-1] - tasks = [f".{topology}"] + for python in MIN_MAX_PYTHON: + tasks = SUB_TASKS # MacOS arm64 only works on server versions 6.0+ if host == "macos-arm64": - tasks = [f".{topology} .{version}" for version in get_versions_from("6.0")] - expansions = dict(AUTH=auth, SSL=ssl, TEST_SUITES=test_suite, SKIP_CSOT_TESTS="true") + tasks = [] + for version in get_versions_from("6.0"): + tasks.extend(f"{t} .{version}" for t in SUB_TASKS) + expansions = dict(SKIP_CSOT_TESTS="true") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( tasks, @@ -295,8 +295,8 @@ def create_encryption_variants() -> list[BuildVariant]: tags = ["encryption_tag"] batchtime = BATCHTIME_WEEK - def get_encryption_expansions(encryption, ssl="ssl"): - expansions = dict(AUTH="auth", SSL=ssl, test_encryption="true") + def get_encryption_expansions(encryption): + expansions = dict(test_encryption="true") if "crypt_shared" in encryption: expansions["test_crypt_shared"] = "true" if "PyOpenSSL" in encryption: @@ -305,13 +305,13 @@ def get_encryption_expansions(encryption, ssl="ssl"): host = "rhel8" - # Test against all server versions and topolgies for the three main python versions. + # Test against all server versions for the three main python versions. encryptions = ["Encryption", "Encryption crypt_shared", "Encryption PyOpenSSL"] for encryption, python in product(encryptions, [*MIN_MAX_PYTHON, PYPYS[-1]]): expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) variant = create_variant( - [f".{t}" for t in TOPOLOGIES], + SUB_TASKS, display_name, python=python, host=host, @@ -322,13 +322,11 @@ def get_encryption_expansions(encryption, ssl="ssl"): variants.append(variant) # Test the rest of the pythons on linux for all server versions. - for encryption, python, ssl in zip_cycle( - encryptions, CPYTHONS[1:-1] + PYPYS[:-1], ["ssl", "nossl"] - ): - expansions = get_encryption_expansions(encryption, ssl) + for encryption, python, task in zip_cycle(encryptions, CPYTHONS[1:-1] + PYPYS[:-1], SUB_TASKS): + expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) variant = create_variant( - [".replica_set"], + [task], display_name, python=python, host=host, @@ -340,8 +338,7 @@ def get_encryption_expansions(encryption, ssl="ssl"): encryptions = ["Encryption", "Encryption crypt_shared"] task_names = [".latest .replica_set"] for host, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): - ssl = "ssl" if python == CPYTHONS[0] else "nossl" - expansions = get_encryption_expansions(encryption, ssl) + expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) variant = create_variant( task_names, @@ -386,7 +383,8 @@ def create_compression_variants(): # Compression tests - standalone versions of each server, across python versions, with and without c extensions. # PyPy interpreters are always tested without extensions. host = "rhel8" - task_names = dict(snappy=[".standalone"], zlib=[".standalone"], zstd=[".standalone !.4.0"]) + base_task = ".standalone .noauth .nossl" + task_names = dict(snappy=[base_task], zlib=[base_task], zstd=[f"{base_task} !.4.0"]) variants = [] for ind, (compressor, c_ext) in enumerate(product(["snappy", "zlib", "zstd"], C_EXTS)): expansions = dict(COMPRESSORS=compressor) @@ -445,24 +443,23 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" batchtime = BATCHTIME_WEEK - base_expansions = dict(test_pyopenssl="true", SSL="ssl") + expansions = dict(test_pyopenssl="true") variants = [] for python in ALL_PYTHONS: # Only test "noauth" with min python. auth = "noauth" if python == CPYTHONS[0] else "auth" + ssl = "nossl" if auth == "noauth" else "ssl" if python == CPYTHONS[0]: host = "macos" elif python == CPYTHONS[-1]: host = "win64" else: host = "rhel8" - expansions = dict(AUTH=auth) - expansions.update(base_expansions) display_name = get_display_name(base_name, host, python=python) variant = create_variant( - [".replica_set", ".7.0"], + [f".replica_set .{auth} .{ssl}", f".7.0 .{auth} .{ssl}"], display_name, python=python, host=host, @@ -482,12 +479,12 @@ def create_storage_engine_tests(): python = CPYTHONS[0] expansions = dict(STORAGE_ENGINE=engine.lower()) if engine == engines[0]: - tasks = [f".standalone .{v}" for v in ALL_VERSIONS] + tasks = [f".standalone .noauth .nossl .{v}" for v in ALL_VERSIONS] else: # MongoDB 4.2 drops support for MMAPv1 versions = get_versions_until("4.0") - tasks = [f".standalone .{v}" for v in versions] + [ - f".replica_set .{v}" for v in versions + tasks = [f".standalone .{v} .noauth .nossl" for v in versions] + [ + f".replica_set .{v} .noauth .nossl" for v in versions ] display_name = get_display_name(f"Storage {engine}", host, python=python) variant = create_variant( @@ -500,7 +497,7 @@ def create_storage_engine_tests(): def create_versioned_api_tests(): host = "rhel8" tags = ["versionedApi_tag"] - tasks = [f".standalone .{v}" for v in get_versions_from("5.0")] + tasks = [f".standalone .{v} .noauth .nossl" for v in get_versions_from("5.0")] variants = [] types = ["require v1", "accept v2"] @@ -531,7 +528,7 @@ def create_versioned_api_tests(): def create_green_framework_variants(): variants = [] - tasks = [".standalone"] + tasks = [".standalone .noauth .nossl"] host = "rhel8" for python, framework in product([CPYTHONS[0], CPYTHONS[-2]], ["eventlet", "gevent"]): expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") @@ -547,7 +544,7 @@ def create_no_c_ext_variants(): variants = [] host = "rhel8" for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): - tasks = [f".{topology}"] + tasks = [f".{topology} .noauth .nossl"] expansions = dict() handle_c_ext(C_EXTS[0], expansions) display_name = get_display_name("No C Ext", host, python=python) @@ -717,32 +714,25 @@ def create_aws_auth_variants(): def create_alternative_hosts_variants(): - base_expansions = dict(SKIP_HATCH="true") + expansions = dict(SKIP_HATCH="true") batchtime = BATCHTIME_WEEK variants = [] host = "rhel7" - for auth, ssl in AUTH_SSLS: - expansions = base_expansions.copy() - expansions["AUTH"] = auth - expansions["SSL"] = ssl - variants.append( - create_variant( - [".5.0 .standalone"], - get_display_name("OpenSSL 1.0.2", "rhel7", python=CPYTHONS[0], **expansions), - host=host, - python=CPYTHONS[0], - batchtime=batchtime, - expansions=expansions, - ) + variants.append( + create_variant( + [".5.0 .standalone"], + get_display_name("OpenSSL 1.0.2", "rhel7", python=CPYTHONS[0], **expansions), + host=host, + python=CPYTHONS[0], + batchtime=batchtime, + expansions=expansions, ) + ) hosts = ["rhel92-fips", "rhel8-zseries-small", "rhel8-power-small", "rhel82-arm64-small"] host_names = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64"] - for (host, host_name), (auth, ssl) in product(zip(hosts, host_names), AUTH_SSLS): - expansions = base_expansions.copy() - expansions["AUTH"] = auth - expansions["SSL"] = ssl + for host, host_name in zip(hosts, host_names): variants.append( create_variant( [".6.0 .standalone"], @@ -755,6 +745,34 @@ def create_alternative_hosts_variants(): return variants +############## +# Tasks +############## + + +def create_server_tasks(): + tasks = [] + for topo, version, (auth, ssl), sync in product(TOPOLOGIES, ALL_VERSIONS, AUTH_SSLS, SYNCS): + name = f"test-{version}-{topo}-{auth}-{ssl}-{sync}".lower() + tags = [version, topo, auth, ssl, sync] + bootstrap_vars = dict( + VERSION=version, + TOPOLOGY=topo if topo != "standalone" else "server", + AUTH=auth, + SSL=ssl, + ) + bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + test_vars = dict( + AUTH=auth, + SSL=ssl, + SYNC=sync, + TEST_SUITES="default" if sync == "sync" else "default_async", + ) + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) + return tasks + + ################## # Generate Config ################## @@ -790,4 +808,35 @@ def write_variants_to_file(): fid.write(f"{line}\n") +def write_tasks_to_file(): + mod = sys.modules[__name__] + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "tasks.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("tasks:\n") + + for name, func in getmembers(mod, isfunction): + if not name.endswith("_tasks"): + continue + if not name.startswith("create_"): + raise ValueError("Task creators must start with create_") + title = name.replace("create_", "").replace("_tasks", "").replace("_", " ").capitalize() + project = EvgProject(tasks=func(), buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + write_variants_to_file() +write_tasks_to_file() From 85ba541ed5a940bd3a7b7d967fe7510aac48a23d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 25 Oct 2024 07:43:28 -0500 Subject: [PATCH 1587/2111] PYTHON-4905 Use shrub.py to generate load balancer tasks (#1968) --- .evergreen/config.yml | 9 - .evergreen/generated_configs/tasks.yml | 47 +++++ .evergreen/generated_configs/variants.yml | 225 ++++------------------ .evergreen/scripts/generate_config.py | 34 ++-- 4 files changed, 110 insertions(+), 205 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a1587a281d..fda6864317 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1542,15 +1542,6 @@ tasks: - func: "run aws auth test with aws web identity credentials" - func: "run aws ECS auth test" - - name: load-balancer-test - commands: - - func: "bootstrap mongo-orchestration" - vars: - TOPOLOGY: "sharded_cluster" - LOAD_BALANCER: true - - func: "run load-balancer" - - func: "run tests" - - name: "oidc-auth-test" commands: - func: "run oidc auth test with test credentials" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index fb3da4bb24..0f416ab595 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1,4 +1,51 @@ tasks: + # Load balancer tests + - name: test-load-balancer-auth-ssl + commands: + - func: bootstrap mongo-orchestration + vars: + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + LOAD_BALANCER: "true" + - func: run load-balancer + - func: run tests + vars: + AUTH: auth + SSL: ssl + test_loadbalancer: "true" + tags: [load-balancer, auth, ssl] + - name: test-load-balancer-noauth-ssl + commands: + - func: bootstrap mongo-orchestration + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + LOAD_BALANCER: "true" + - func: run load-balancer + - func: run tests + vars: + AUTH: noauth + SSL: ssl + test_loadbalancer: "true" + tags: [load-balancer, noauth, ssl] + - name: test-load-balancer-noauth-nossl + commands: + - func: bootstrap mongo-orchestration + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + LOAD_BALANCER: "true" + - func: run load-balancer + - func: run tests + vars: + AUTH: noauth + SSL: nossl + test_loadbalancer: "true" + tags: [load-balancer, noauth, nossl] + # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 327becc249..f9a452b224 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -671,201 +671,56 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 # Load balancer tests - - name: load-balancer-rhel8-v6.0-py3.9-auth-ssl + - name: load-balancer-rhel8-v6.0-py3.9 tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v6.0 py3.9 Auth SSL + - name: .load-balancer + display_name: Load Balancer RHEL8 v6.0 py3.9 run_on: - rhel87-small batchtime: 10080 expansions: - VERSION: "6.0" - AUTH: auth - SSL: ssl - test_loadbalancer: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: load-balancer-rhel8-v6.0-py3.10-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v6.0 py3.10 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "6.0" - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: load-balancer-rhel8-v6.0-py3.11-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v6.0 py3.11 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: VERSION: "6.0" - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: load-balancer-rhel8-v7.0-py3.12-auth-ssl + - name: load-balancer-rhel8-v7.0-py3.9 tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v7.0 py3.12 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "7.0" - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: load-balancer-rhel8-v7.0-py3.13-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v7.0 py3.13 NoAuth SSL + - name: .load-balancer + display_name: Load Balancer RHEL8 v7.0 py3.9 run_on: - rhel87-small batchtime: 10080 expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "7.0" - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: load-balancer-rhel8-v7.0-pypy3.9-noauth-nossl + - name: load-balancer-rhel8-v8.0-py3.9 tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v7.0 pypy3.9 NoAuth NoSSL + - name: .load-balancer + display_name: Load Balancer RHEL8 v8.0 py3.9 run_on: - rhel87-small batchtime: 10080 expansions: - VERSION: "7.0" - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: load-balancer-rhel8-v8.0-pypy3.10-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v8.0 pypy3.10 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "8.0" - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: load-balancer-rhel8-v8.0-py3.9-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v8.0 py3.9 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "8.0" - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: load-balancer-rhel8-v8.0-py3.10-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 v8.0 py3.10 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: VERSION: "8.0" - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: load-balancer-rhel8-rapid-py3.11-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 rapid py3.11 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: rapid - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: load-balancer-rhel8-rapid-py3.12-noauth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 rapid py3.12 NoAuth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: rapid - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: load-balancer-rhel8-rapid-py3.13-noauth-nossl + - name: load-balancer-rhel8-rapid-py3.9 tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 rapid py3.13 NoAuth NoSSL + - name: .load-balancer + display_name: Load Balancer RHEL8 rapid py3.9 run_on: - rhel87-small batchtime: 10080 expansions: + PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: rapid - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: load-balancer-rhel8-latest-pypy3.9-auth-ssl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 latest pypy3.9 Auth SSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: latest - AUTH: auth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: load-balancer-rhel8-latest-pypy3.10-noauth-ssl + - name: load-balancer-rhel8-latest-py3.9 tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 latest pypy3.10 NoAuth SSL + - name: .load-balancer + display_name: Load Balancer RHEL8 latest py3.9 run_on: - rhel87-small batchtime: 10080 expansions: - VERSION: latest - AUTH: noauth - SSL: ssl - test_loadbalancer: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: load-balancer-rhel8-latest-py3.9-noauth-nossl - tasks: - - name: load-balancer-test - display_name: Load Balancer RHEL8 latest py3.9 NoAuth NoSSL - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: latest - AUTH: noauth - SSL: nossl - test_loadbalancer: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 + VERSION: latest # Mockupdb tests - name: mockupdb-tests-rhel8-py3.9 @@ -951,10 +806,10 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Ocsp tests - - name: ocsp-test-rhel8-py3.9 + - name: ocsp-test-rhel8-v4.4-py3.9 tasks: - name: .ocsp - display_name: OCSP test RHEL8 py3.9 + display_name: OCSP test RHEL8 v4.4 py3.9 run_on: - rhel87-small batchtime: 20160 @@ -964,10 +819,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "4.4" - - name: ocsp-test-rhel8-py3.10 + - name: ocsp-test-rhel8-v5.0-py3.10 tasks: - name: .ocsp - display_name: OCSP test RHEL8 py3.10 + display_name: OCSP test RHEL8 v5.0 py3.10 run_on: - rhel87-small batchtime: 20160 @@ -977,10 +832,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.10/bin/python3 VERSION: "5.0" - - name: ocsp-test-rhel8-py3.11 + - name: ocsp-test-rhel8-v6.0-py3.11 tasks: - name: .ocsp - display_name: OCSP test RHEL8 py3.11 + display_name: OCSP test RHEL8 v6.0 py3.11 run_on: - rhel87-small batchtime: 20160 @@ -990,10 +845,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.11/bin/python3 VERSION: "6.0" - - name: ocsp-test-rhel8-py3.12 + - name: ocsp-test-rhel8-v7.0-py3.12 tasks: - name: .ocsp - display_name: OCSP test RHEL8 py3.12 + display_name: OCSP test RHEL8 v7.0 py3.12 run_on: - rhel87-small batchtime: 20160 @@ -1003,10 +858,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.12/bin/python3 VERSION: "7.0" - - name: ocsp-test-rhel8-py3.13 + - name: ocsp-test-rhel8-v8.0-py3.13 tasks: - name: .ocsp - display_name: OCSP test RHEL8 py3.13 + display_name: OCSP test RHEL8 v8.0 py3.13 run_on: - rhel87-small batchtime: 20160 @@ -1016,10 +871,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.13/bin/python3 VERSION: "8.0" - - name: ocsp-test-rhel8-pypy3.9 + - name: ocsp-test-rhel8-rapid-pypy3.9 tasks: - name: .ocsp - display_name: OCSP test RHEL8 pypy3.9 + display_name: OCSP test RHEL8 rapid pypy3.9 run_on: - rhel87-small batchtime: 20160 @@ -1029,10 +884,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 VERSION: rapid - - name: ocsp-test-rhel8-pypy3.10 + - name: ocsp-test-rhel8-latest-pypy3.10 tasks: - name: .ocsp - display_name: OCSP test RHEL8 pypy3.10 + display_name: OCSP test RHEL8 latest pypy3.10 run_on: - rhel87-small batchtime: 20160 @@ -1042,10 +897,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 VERSION: latest - - name: ocsp-test-win64-py3.9 + - name: ocsp-test-win64-v4.4-py3.9 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test Win64 py3.9 + display_name: OCSP test Win64 v4.4 py3.9 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -1055,10 +910,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: C:/python/Python39/python.exe VERSION: "4.4" - - name: ocsp-test-win64-py3.13 + - name: ocsp-test-win64-v8.0-py3.13 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test Win64 py3.13 + display_name: OCSP test Win64 v8.0 py3.13 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -1068,10 +923,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: C:/python/Python313/python.exe VERSION: "8.0" - - name: ocsp-test-macos-py3.9 + - name: ocsp-test-macos-v4.4-py3.9 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test macOS py3.9 + display_name: OCSP test macOS v4.4 py3.9 run_on: - macos-14 batchtime: 20160 @@ -1081,10 +936,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 VERSION: "4.4" - - name: ocsp-test-macos-py3.13 + - name: ocsp-test-macos-v8.0-py3.13 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test macOS py3.13 + display_name: OCSP test macOS v8.0 py3.13 run_on: - macos-14 batchtime: 20160 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b8b8fa367c..eefd04b040 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -152,6 +152,7 @@ def get_display_name(base: str, host: str | None = None, **kwargs) -> str: if host is not None: display_name += f" {HOSTS[host].display_name}" version = kwargs.pop("VERSION", None) + version = version or kwargs.pop("version", None) if version: if version not in ["rapid", "latest"]: version = f"v{version}" @@ -354,25 +355,20 @@ def get_encryption_expansions(encryption): def create_load_balancer_variants(): - # Load balancer tests - run all supported versions for all combinations of auth and ssl and system python. + # Load balancer tests - run all supported server versions using the lowest supported python. host = "rhel8" - task_names = ["load-balancer-test"] batchtime = BATCHTIME_WEEK - expansions_base = dict(test_loadbalancer="true") versions = get_versions_from("6.0") variants = [] - pythons = CPYTHONS + PYPYS - for ind, (version, (auth, ssl)) in enumerate(product(versions, AUTH_SSLS)): - expansions = dict(VERSION=version, AUTH=auth, SSL=ssl) - expansions.update(expansions_base) - python = pythons[ind % len(pythons)] - display_name = get_display_name("Load Balancer", host, python=python, **expansions) + for version in versions: + python = CPYTHONS[0] + display_name = get_display_name("Load Balancer", host, python=python, version=version) variant = create_variant( - task_names, + [".load-balancer"], display_name, python=python, host=host, - expansions=expansions, + version=version, batchtime=batchtime, ) variants.append(variant) @@ -773,6 +769,22 @@ def create_server_tasks(): return tasks +def create_load_balancer_tasks(): + tasks = [] + for auth, ssl in AUTH_SSLS: + name = f"test-load-balancer-{auth}-{ssl}".lower() + tags = ["load-balancer", auth, ssl] + bootstrap_vars = dict(TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, LOAD_BALANCER="true") + bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + balancer_func = FunctionCall(func="run load-balancer") + test_vars = dict(AUTH=auth, SSL=ssl, test_loadbalancer="true") + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append( + EvgTask(name=name, tags=tags, commands=[bootstrap_func, balancer_func, test_func]) + ) + return tasks + + ################## # Generate Config ################## From 97ac3ebee2e1d97dc4da1687b5277c16681dc3f0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 25 Oct 2024 09:49:37 -0500 Subject: [PATCH 1588/2111] PYTHON-4738 Skip encryption fork test (#1972) --- test/asynchronous/test_encryption.py | 2 +- test/test_encryption.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 88b005c4b3..40f1acd32d 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -380,9 +380,9 @@ async def test_use_after_close(self): is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) - @unittest.skipIf("PyPy" in sys.version, "PYTHON-4738 fails often on PyPy") @async_client_context.require_sync async def test_fork(self): + self.skipTest("Test is flaky, PYTHON-4738") opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = await self.async_rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/test_encryption.py b/test/test_encryption.py index 13a69ca9ad..373981b1d2 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -380,9 +380,9 @@ def test_use_after_close(self): is_greenthread_patched(), "gevent and eventlet do not support POSIX-style forking.", ) - @unittest.skipIf("PyPy" in sys.version, "PYTHON-4738 fails often on PyPy") @client_context.require_sync def test_fork(self): + self.skipTest("Test is flaky, PYTHON-4738") opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = self.rs_or_single_client(auto_encryption_opts=opts) From 4aeca321c5f8607ee8b528eebedf4a3badf7f967 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 04:47:07 -0500 Subject: [PATCH 1589/2111] Bump mypy from 1.12.1 to 1.13.0 (#1974) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 7ccc122f53..db0825c2b1 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,4 +1,4 @@ -mypy==1.12.1 +mypy==1.13.0 pyright==1.1.384 typing_extensions -r ./encryption.txt From 72863862c921cbb0697e3c1ca61ed4de17f012c5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Oct 2024 07:49:11 -0500 Subject: [PATCH 1590/2111] PYTHON-4910 Add server tasks that use both sync and async (#1975) --- .evergreen/generated_configs/tasks.yml | 1440 +++++++++++++++++++++ .evergreen/generated_configs/variants.yml | 292 ++--- .evergreen/scripts/generate_config.py | 45 +- 3 files changed, 1611 insertions(+), 166 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 0f416ab595..c666c6901a 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -87,6 +87,26 @@ tasks: - auth - ssl - async + - name: test-4.0-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - standalone + - auth + - ssl + - sync_async - name: test-4.0-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -127,6 +147,26 @@ tasks: - noauth - ssl - async + - name: test-4.0-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - standalone + - noauth + - ssl + - sync_async - name: test-4.0-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -167,6 +207,26 @@ tasks: - noauth - nossl - async + - name: test-4.0-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - standalone + - noauth + - nossl + - sync_async - name: test-4.4-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -207,6 +267,26 @@ tasks: - auth - ssl - async + - name: test-4.4-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - standalone + - auth + - ssl + - sync_async - name: test-4.4-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -247,6 +327,26 @@ tasks: - noauth - ssl - async + - name: test-4.4-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - standalone + - noauth + - ssl + - sync_async - name: test-4.4-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -287,6 +387,26 @@ tasks: - noauth - nossl - async + - name: test-4.4-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - standalone + - noauth + - nossl + - sync_async - name: test-5.0-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -327,6 +447,26 @@ tasks: - auth - ssl - async + - name: test-5.0-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - standalone + - auth + - ssl + - sync_async - name: test-5.0-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -367,6 +507,26 @@ tasks: - noauth - ssl - async + - name: test-5.0-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - standalone + - noauth + - ssl + - sync_async - name: test-5.0-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -407,6 +567,26 @@ tasks: - noauth - nossl - async + - name: test-5.0-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - standalone + - noauth + - nossl + - sync_async - name: test-6.0-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -447,6 +627,26 @@ tasks: - auth - ssl - async + - name: test-6.0-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - standalone + - auth + - ssl + - sync_async - name: test-6.0-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -487,6 +687,26 @@ tasks: - noauth - ssl - async + - name: test-6.0-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - standalone + - noauth + - ssl + - sync_async - name: test-6.0-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -527,6 +747,26 @@ tasks: - noauth - nossl - async + - name: test-6.0-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - standalone + - noauth + - nossl + - sync_async - name: test-7.0-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -567,6 +807,26 @@ tasks: - auth - ssl - async + - name: test-7.0-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - standalone + - auth + - ssl + - sync_async - name: test-7.0-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -607,6 +867,26 @@ tasks: - noauth - ssl - async + - name: test-7.0-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - standalone + - noauth + - ssl + - sync_async - name: test-7.0-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -647,6 +927,26 @@ tasks: - noauth - nossl - async + - name: test-7.0-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - standalone + - noauth + - nossl + - sync_async - name: test-8.0-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -687,6 +987,26 @@ tasks: - auth - ssl - async + - name: test-8.0-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - standalone + - auth + - ssl + - sync_async - name: test-8.0-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -727,6 +1047,26 @@ tasks: - noauth - ssl - async + - name: test-8.0-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - standalone + - noauth + - ssl + - sync_async - name: test-8.0-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -767,6 +1107,26 @@ tasks: - noauth - nossl - async + - name: test-8.0-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - standalone + - noauth + - nossl + - sync_async - name: test-rapid-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -807,6 +1167,26 @@ tasks: - auth - ssl - async + - name: test-rapid-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - standalone + - auth + - ssl + - sync_async - name: test-rapid-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -847,6 +1227,26 @@ tasks: - noauth - ssl - async + - name: test-rapid-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - standalone + - noauth + - ssl + - sync_async - name: test-rapid-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -887,6 +1287,26 @@ tasks: - noauth - nossl - async + - name: test-rapid-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - standalone + - noauth + - nossl + - sync_async - name: test-latest-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -927,6 +1347,26 @@ tasks: - auth - ssl - async + - name: test-latest-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - standalone + - auth + - ssl + - sync_async - name: test-latest-standalone-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -967,6 +1407,26 @@ tasks: - noauth - ssl - async + - name: test-latest-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - standalone + - noauth + - ssl + - sync_async - name: test-latest-standalone-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1007,6 +1467,26 @@ tasks: - noauth - nossl - async + - name: test-latest-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - standalone + - noauth + - nossl + - sync_async - name: test-4.0-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1047,6 +1527,26 @@ tasks: - auth - ssl - async + - name: test-4.0-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - replica_set + - auth + - ssl + - sync_async - name: test-4.0-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1087,6 +1587,26 @@ tasks: - noauth - ssl - async + - name: test-4.0-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - replica_set + - noauth + - ssl + - sync_async - name: test-4.0-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1127,6 +1647,26 @@ tasks: - noauth - nossl - async + - name: test-4.0-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - replica_set + - noauth + - nossl + - sync_async - name: test-4.4-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1167,6 +1707,26 @@ tasks: - auth - ssl - async + - name: test-4.4-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - replica_set + - auth + - ssl + - sync_async - name: test-4.4-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1207,6 +1767,26 @@ tasks: - noauth - ssl - async + - name: test-4.4-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - replica_set + - noauth + - ssl + - sync_async - name: test-4.4-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1247,6 +1827,26 @@ tasks: - noauth - nossl - async + - name: test-4.4-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - replica_set + - noauth + - nossl + - sync_async - name: test-5.0-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1287,6 +1887,26 @@ tasks: - auth - ssl - async + - name: test-5.0-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - replica_set + - auth + - ssl + - sync_async - name: test-5.0-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1327,6 +1947,26 @@ tasks: - noauth - ssl - async + - name: test-5.0-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - replica_set + - noauth + - ssl + - sync_async - name: test-5.0-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1367,6 +2007,26 @@ tasks: - noauth - nossl - async + - name: test-5.0-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - replica_set + - noauth + - nossl + - sync_async - name: test-6.0-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1407,6 +2067,26 @@ tasks: - auth - ssl - async + - name: test-6.0-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - replica_set + - auth + - ssl + - sync_async - name: test-6.0-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1447,6 +2127,26 @@ tasks: - noauth - ssl - async + - name: test-6.0-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - replica_set + - noauth + - ssl + - sync_async - name: test-6.0-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1487,6 +2187,26 @@ tasks: - noauth - nossl - async + - name: test-6.0-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - replica_set + - noauth + - nossl + - sync_async - name: test-7.0-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1527,6 +2247,26 @@ tasks: - auth - ssl - async + - name: test-7.0-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - replica_set + - auth + - ssl + - sync_async - name: test-7.0-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1567,6 +2307,26 @@ tasks: - noauth - ssl - async + - name: test-7.0-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - replica_set + - noauth + - ssl + - sync_async - name: test-7.0-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1607,6 +2367,26 @@ tasks: - noauth - nossl - async + - name: test-7.0-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - replica_set + - noauth + - nossl + - sync_async - name: test-8.0-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1647,6 +2427,26 @@ tasks: - auth - ssl - async + - name: test-8.0-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - replica_set + - auth + - ssl + - sync_async - name: test-8.0-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1687,6 +2487,26 @@ tasks: - noauth - ssl - async + - name: test-8.0-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - replica_set + - noauth + - ssl + - sync_async - name: test-8.0-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1727,6 +2547,26 @@ tasks: - noauth - nossl - async + - name: test-8.0-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - replica_set + - noauth + - nossl + - sync_async - name: test-rapid-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1767,6 +2607,26 @@ tasks: - auth - ssl - async + - name: test-rapid-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - replica_set + - auth + - ssl + - sync_async - name: test-rapid-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1807,6 +2667,26 @@ tasks: - noauth - ssl - async + - name: test-rapid-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - replica_set + - noauth + - ssl + - sync_async - name: test-rapid-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1847,6 +2727,26 @@ tasks: - noauth - nossl - async + - name: test-rapid-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - replica_set + - noauth + - nossl + - sync_async - name: test-latest-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1887,6 +2787,26 @@ tasks: - auth - ssl - async + - name: test-latest-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - replica_set + - auth + - ssl + - sync_async - name: test-latest-replica_set-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1927,6 +2847,26 @@ tasks: - noauth - ssl - async + - name: test-latest-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - replica_set + - noauth + - ssl + - sync_async - name: test-latest-replica_set-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -1967,6 +2907,26 @@ tasks: - noauth - nossl - async + - name: test-latest-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - replica_set + - noauth + - nossl + - sync_async - name: test-4.0-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2007,6 +2967,26 @@ tasks: - auth - ssl - async + - name: test-4.0-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - sharded_cluster + - auth + - ssl + - sync_async - name: test-4.0-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2047,6 +3027,26 @@ tasks: - noauth - ssl - async + - name: test-4.0-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-4.0-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2087,6 +3087,26 @@ tasks: - noauth - nossl - async + - name: test-4.0-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.0" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-4.4-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2127,6 +3147,26 @@ tasks: - auth - ssl - async + - name: test-4.4-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - sharded_cluster + - auth + - ssl + - sync_async - name: test-4.4-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2167,6 +3207,26 @@ tasks: - noauth - ssl - async + - name: test-4.4-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-4.4-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2207,6 +3267,26 @@ tasks: - noauth - nossl - async + - name: test-4.4-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.4" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.4" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-5.0-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2247,6 +3327,26 @@ tasks: - auth - ssl - async + - name: test-5.0-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - sharded_cluster + - auth + - ssl + - sync_async - name: test-5.0-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2287,6 +3387,26 @@ tasks: - noauth - ssl - async + - name: test-5.0-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-5.0-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2327,6 +3447,26 @@ tasks: - noauth - nossl - async + - name: test-5.0-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "5.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "5.0" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-6.0-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2367,6 +3507,26 @@ tasks: - auth - ssl - async + - name: test-6.0-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - sharded_cluster + - auth + - ssl + - sync_async - name: test-6.0-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2407,6 +3567,26 @@ tasks: - noauth - ssl - async + - name: test-6.0-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-6.0-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2447,6 +3627,26 @@ tasks: - noauth - nossl - async + - name: test-6.0-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "6.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "6.0" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-7.0-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2487,6 +3687,26 @@ tasks: - auth - ssl - async + - name: test-7.0-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - sharded_cluster + - auth + - ssl + - sync_async - name: test-7.0-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2527,6 +3747,26 @@ tasks: - noauth - ssl - async + - name: test-7.0-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-7.0-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2567,6 +3807,26 @@ tasks: - noauth - nossl - async + - name: test-7.0-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "7.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "7.0" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-8.0-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2607,6 +3867,26 @@ tasks: - auth - ssl - async + - name: test-8.0-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - sharded_cluster + - auth + - ssl + - sync_async - name: test-8.0-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2647,6 +3927,26 @@ tasks: - noauth - ssl - async + - name: test-8.0-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-8.0-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2687,6 +3987,26 @@ tasks: - noauth - nossl - async + - name: test-8.0-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "8.0" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "8.0" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-rapid-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2727,6 +4047,26 @@ tasks: - auth - ssl - async + - name: test-rapid-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - sharded_cluster + - auth + - ssl + - sync_async - name: test-rapid-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2767,6 +4107,26 @@ tasks: - noauth - ssl - async + - name: test-rapid-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-rapid-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2807,6 +4167,26 @@ tasks: - noauth - nossl - async + - name: test-rapid-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: rapid + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - rapid + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-latest-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2847,6 +4227,26 @@ tasks: - auth - ssl - async + - name: test-latest-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - sharded_cluster + - auth + - ssl + - sync_async - name: test-latest-sharded_cluster-noauth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -2887,6 +4287,26 @@ tasks: - noauth - ssl - async + - name: test-latest-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - sharded_cluster + - noauth + - ssl + - sync_async - name: test-latest-sharded_cluster-noauth-nossl-sync commands: - func: bootstrap mongo-orchestration @@ -2927,3 +4347,23 @@ tasks: - noauth - nossl - async + - name: test-latest-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: latest + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - latest + - sharded_cluster + - noauth + - nossl + - sync_async diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index f9a452b224..240b237fdc 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -2,7 +2,7 @@ buildvariants: # Alternative hosts tests - name: openssl-1.0.2-rhel7-py3.9 tasks: - - name: .5.0 .standalone + - name: .5.0 .standalone !.sync_async display_name: OpenSSL 1.0.2 RHEL7 py3.9 run_on: - rhel79-small @@ -12,7 +12,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: other-hosts-rhel9-fips tasks: - - name: .6.0 .standalone + - name: .6.0 .standalone !.sync_async display_name: Other hosts RHEL9-FIPS run_on: - rhel92-fips @@ -21,7 +21,7 @@ buildvariants: SKIP_HATCH: "true" - name: other-hosts-rhel8-zseries tasks: - - name: .6.0 .standalone + - name: .6.0 .standalone !.sync_async display_name: Other hosts RHEL8-zseries run_on: - rhel8-zseries-small @@ -30,7 +30,7 @@ buildvariants: SKIP_HATCH: "true" - name: other-hosts-rhel8-power8 tasks: - - name: .6.0 .standalone + - name: .6.0 .standalone !.sync_async display_name: Other hosts RHEL8-POWER8 run_on: - rhel8-power-small @@ -39,7 +39,7 @@ buildvariants: SKIP_HATCH: "true" - name: other-hosts-rhel8-arm64 tasks: - - name: .6.0 .standalone + - name: .6.0 .standalone !.sync_async display_name: Other hosts RHEL8-arm64 run_on: - rhel82-arm64-small @@ -198,7 +198,7 @@ buildvariants: # Compression tests - name: snappy-compression-rhel8-py3.9-no-c tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: snappy compression RHEL8 py3.9 No C run_on: - rhel87-small @@ -208,7 +208,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: snappy-compression-rhel8-py3.10 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: snappy compression RHEL8 py3.10 run_on: - rhel87-small @@ -217,7 +217,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: zlib-compression-rhel8-py3.11-no-c tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: zlib compression RHEL8 py3.11 No C run_on: - rhel87-small @@ -227,7 +227,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: zlib-compression-rhel8-py3.12 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: zlib compression RHEL8 py3.12 run_on: - rhel87-small @@ -236,7 +236,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: zstd-compression-rhel8-py3.13-no-c tasks: - - name: .standalone .noauth .nossl !.4.0 + - name: .standalone .noauth .nossl .sync_async !.4.0 display_name: zstd compression RHEL8 py3.13 No C run_on: - rhel87-small @@ -246,7 +246,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 - name: zstd-compression-rhel8-py3.9 tasks: - - name: .standalone .noauth .nossl !.4.0 + - name: .standalone .noauth .nossl .sync_async !.4.0 display_name: zstd compression RHEL8 py3.9 run_on: - rhel87-small @@ -255,7 +255,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: snappy-compression-rhel8-pypy3.9 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: snappy compression RHEL8 pypy3.9 run_on: - rhel87-small @@ -264,7 +264,7 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: zlib-compression-rhel8-pypy3.10 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: zlib compression RHEL8 pypy3.10 run_on: - rhel87-small @@ -273,7 +273,7 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - name: zstd-compression-rhel8-pypy3.9 tasks: - - name: .standalone .noauth .nossl !.4.0 + - name: .standalone .noauth .nossl .sync_async !.4.0 display_name: zstd compression RHEL8 pypy3.9 run_on: - rhel87-small @@ -284,7 +284,7 @@ buildvariants: # Disable test commands tests - name: disable-test-commands-rhel8-py3.9 tasks: - - name: .latest + - name: .latest .sync_async display_name: Disable test commands RHEL8 py3.9 run_on: - rhel87-small @@ -307,9 +307,9 @@ buildvariants: # Encryption tests - name: encryption-rhel8-py3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption RHEL8 py3.9 run_on: - rhel87-small @@ -320,9 +320,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-rhel8-py3.13 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption RHEL8 py3.13 run_on: - rhel87-small @@ -333,9 +333,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-rhel8-pypy3.10 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption RHEL8 pypy3.10 run_on: - rhel87-small @@ -346,9 +346,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-py3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption crypt_shared RHEL8 py3.9 run_on: - rhel87-small @@ -360,9 +360,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-py3.13 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption crypt_shared RHEL8 py3.13 run_on: - rhel87-small @@ -374,9 +374,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-pypy3.10 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption crypt_shared RHEL8 pypy3.10 run_on: - rhel87-small @@ -388,9 +388,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-py3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption PyOpenSSL RHEL8 py3.9 run_on: - rhel87-small @@ -402,9 +402,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-py3.13 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption PyOpenSSL RHEL8 py3.13 run_on: - rhel87-small @@ -416,9 +416,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-pypy3.10 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Encryption PyOpenSSL RHEL8 pypy3.10 run_on: - rhel87-small @@ -430,7 +430,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-rhel8-py3.10 tasks: - - name: .sharded_cluster .auth .ssl + - name: .sharded_cluster .auth .ssl .sync_async display_name: Encryption RHEL8 py3.10 run_on: - rhel87-small @@ -439,7 +439,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: encryption-crypt_shared-rhel8-py3.11 tasks: - - name: .replica_set .noauth .ssl + - name: .replica_set .noauth .ssl .sync_async display_name: Encryption crypt_shared RHEL8 py3.11 run_on: - rhel87-small @@ -449,7 +449,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: encryption-pyopenssl-rhel8-py3.12 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: Encryption PyOpenSSL RHEL8 py3.12 run_on: - rhel87-small @@ -459,7 +459,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: encryption-rhel8-pypy3.9 tasks: - - name: .sharded_cluster .auth .ssl + - name: .sharded_cluster .auth .ssl .sync_async display_name: Encryption RHEL8 pypy3.9 run_on: - rhel87-small @@ -468,7 +468,7 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: encryption-macos-py3.9 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption macOS py3.9 run_on: - macos-14 @@ -479,7 +479,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-macos-py3.13 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption macOS py3.13 run_on: - macos-14 @@ -490,7 +490,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-macos-py3.9 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption crypt_shared macOS py3.9 run_on: - macos-14 @@ -502,7 +502,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-macos-py3.13 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption crypt_shared macOS py3.13 run_on: - macos-14 @@ -514,7 +514,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-win64-py3.9 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption Win64 py3.9 run_on: - windows-64-vsMulti-small @@ -525,7 +525,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-win64-py3.13 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption Win64 py3.13 run_on: - windows-64-vsMulti-small @@ -536,7 +536,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-win64-py3.9 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption crypt_shared Win64 py3.9 run_on: - windows-64-vsMulti-small @@ -548,7 +548,7 @@ buildvariants: tags: [encryption_tag] - name: encryption-crypt_shared-win64-py3.13 tasks: - - name: .latest .replica_set + - name: .latest .replica_set .sync_async display_name: Encryption crypt_shared Win64 py3.13 run_on: - windows-64-vsMulti-small @@ -627,7 +627,7 @@ buildvariants: # Green framework tests - name: eventlet-rhel8-py3.9 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: Eventlet RHEL8 py3.9 run_on: - rhel87-small @@ -638,7 +638,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: gevent-rhel8-py3.9 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: Gevent RHEL8 py3.9 run_on: - rhel87-small @@ -649,7 +649,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: eventlet-rhel8-py3.12 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: Eventlet RHEL8 py3.12 run_on: - rhel87-small @@ -660,7 +660,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: gevent-rhel8-py3.12 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: Gevent RHEL8 py3.12 run_on: - rhel87-small @@ -761,7 +761,7 @@ buildvariants: # No c ext tests - name: no-c-ext-rhel8-py3.9 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: No C Ext RHEL8 py3.9 run_on: - rhel87-small @@ -770,7 +770,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: no-c-ext-rhel8-py3.10 tasks: - - name: .replica_set .noauth .nossl + - name: .replica_set .noauth .nossl .sync_async display_name: No C Ext RHEL8 py3.10 run_on: - rhel87-small @@ -779,7 +779,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: no-c-ext-rhel8-py3.11 tasks: - - name: .sharded_cluster .noauth .nossl + - name: .sharded_cluster .noauth .nossl .sync_async display_name: No C Ext RHEL8 py3.11 run_on: - rhel87-small @@ -788,7 +788,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: no-c-ext-rhel8-py3.12 tasks: - - name: .standalone .noauth .nossl + - name: .standalone .noauth .nossl .sync_async display_name: No C Ext RHEL8 py3.12 run_on: - rhel87-small @@ -797,7 +797,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: no-c-ext-rhel8-py3.13 tasks: - - name: .replica_set .noauth .nossl + - name: .replica_set .noauth .nossl .sync_async display_name: No C Ext RHEL8 py3.13 run_on: - rhel87-small @@ -976,8 +976,8 @@ buildvariants: # Pyopenssl tests - name: pyopenssl-macos-py3.9 tasks: - - name: .replica_set .noauth .nossl - - name: .7.0 .noauth .nossl + - name: .replica_set .noauth .nossl .sync_async + - name: .7.0 .noauth .nossl .sync_async display_name: PyOpenSSL macOS py3.9 run_on: - macos-14 @@ -987,8 +987,8 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-py3.10 tasks: - - name: .replica_set .auth .ssl - - name: .7.0 .auth .ssl + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL RHEL8 py3.10 run_on: - rhel87-small @@ -998,8 +998,8 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-py3.11 tasks: - - name: .replica_set .auth .ssl - - name: .7.0 .auth .ssl + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL RHEL8 py3.11 run_on: - rhel87-small @@ -1009,8 +1009,8 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-py3.12 tasks: - - name: .replica_set .auth .ssl - - name: .7.0 .auth .ssl + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL RHEL8 py3.12 run_on: - rhel87-small @@ -1020,8 +1020,8 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-py3.13 tasks: - - name: .replica_set .auth .ssl - - name: .7.0 .auth .ssl + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL Win64 py3.13 run_on: - windows-64-vsMulti-small @@ -1031,8 +1031,8 @@ buildvariants: PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.9 tasks: - - name: .replica_set .auth .ssl - - name: .7.0 .auth .ssl + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL RHEL8 pypy3.9 run_on: - rhel87-small @@ -1042,8 +1042,8 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: pyopenssl-rhel8-pypy3.10 tasks: - - name: .replica_set .auth .ssl - - name: .7.0 .auth .ssl + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL RHEL8 pypy3.10 run_on: - rhel87-small @@ -1065,9 +1065,9 @@ buildvariants: # Server tests - name: test-rhel8-py3.9-cov tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster + - name: .standalone .sync_async + - name: .replica_set .sync_async + - name: .sharded_cluster .sync_async display_name: Test RHEL8 py3.9 cov run_on: - rhel87-small @@ -1077,9 +1077,9 @@ buildvariants: tags: [coverage_tag] - name: test-rhel8-py3.13-cov tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster + - name: .standalone .sync_async + - name: .replica_set .sync_async + - name: .sharded_cluster .sync_async display_name: Test RHEL8 py3.13 cov run_on: - rhel87-small @@ -1089,9 +1089,9 @@ buildvariants: tags: [coverage_tag] - name: test-rhel8-pypy3.10-cov tasks: - - name: .standalone - - name: .replica_set - - name: .sharded_cluster + - name: .standalone .sync_async + - name: .replica_set .sync_async + - name: .sharded_cluster .sync_async display_name: Test RHEL8 pypy3.10 cov run_on: - rhel87-small @@ -1101,9 +1101,9 @@ buildvariants: tags: [coverage_tag] - name: test-rhel8-py3.10 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Test RHEL8 py3.10 run_on: - rhel87-small @@ -1112,9 +1112,9 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: test-rhel8-py3.11 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Test RHEL8 py3.11 run_on: - rhel87-small @@ -1123,9 +1123,9 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: test-rhel8-py3.12 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Test RHEL8 py3.12 run_on: - rhel87-small @@ -1134,9 +1134,9 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: test-rhel8-pypy3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl .sync_async + - name: .replica_set .noauth .ssl .sync_async + - name: .standalone .noauth .nossl .sync_async display_name: Test RHEL8 pypy3.9 run_on: - rhel87-small @@ -1145,9 +1145,9 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: test-macos-py3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl !.sync_async + - name: .replica_set .noauth .ssl !.sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: Test macOS py3.9 run_on: - macos-14 @@ -1156,9 +1156,9 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-py3.13 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl !.sync_async + - name: .replica_set .noauth .ssl !.sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: Test macOS py3.13 run_on: - macos-14 @@ -1167,21 +1167,21 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-macos-arm64-py3.9 tasks: - - name: .sharded_cluster .auth .ssl .6.0 - - name: .replica_set .noauth .ssl .6.0 - - name: .standalone .noauth .nossl .6.0 - - name: .sharded_cluster .auth .ssl .7.0 - - name: .replica_set .noauth .ssl .7.0 - - name: .standalone .noauth .nossl .7.0 - - name: .sharded_cluster .auth .ssl .8.0 - - name: .replica_set .noauth .ssl .8.0 - - name: .standalone .noauth .nossl .8.0 - - name: .sharded_cluster .auth .ssl .rapid - - name: .replica_set .noauth .ssl .rapid - - name: .standalone .noauth .nossl .rapid - - name: .sharded_cluster .auth .ssl .latest - - name: .replica_set .noauth .ssl .latest - - name: .standalone .noauth .nossl .latest + - name: .sharded_cluster .auth .ssl .6.0 !.sync_async + - name: .replica_set .noauth .ssl .6.0 !.sync_async + - name: .standalone .noauth .nossl .6.0 !.sync_async + - name: .sharded_cluster .auth .ssl .7.0 !.sync_async + - name: .replica_set .noauth .ssl .7.0 !.sync_async + - name: .standalone .noauth .nossl .7.0 !.sync_async + - name: .sharded_cluster .auth .ssl .8.0 !.sync_async + - name: .replica_set .noauth .ssl .8.0 !.sync_async + - name: .standalone .noauth .nossl .8.0 !.sync_async + - name: .sharded_cluster .auth .ssl .rapid !.sync_async + - name: .replica_set .noauth .ssl .rapid !.sync_async + - name: .standalone .noauth .nossl .rapid !.sync_async + - name: .sharded_cluster .auth .ssl .latest !.sync_async + - name: .replica_set .noauth .ssl .latest !.sync_async + - name: .standalone .noauth .nossl .latest !.sync_async display_name: Test macOS Arm64 py3.9 run_on: - macos-14-arm64 @@ -1190,21 +1190,21 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: test-macos-arm64-py3.13 tasks: - - name: .sharded_cluster .auth .ssl .6.0 - - name: .replica_set .noauth .ssl .6.0 - - name: .standalone .noauth .nossl .6.0 - - name: .sharded_cluster .auth .ssl .7.0 - - name: .replica_set .noauth .ssl .7.0 - - name: .standalone .noauth .nossl .7.0 - - name: .sharded_cluster .auth .ssl .8.0 - - name: .replica_set .noauth .ssl .8.0 - - name: .standalone .noauth .nossl .8.0 - - name: .sharded_cluster .auth .ssl .rapid - - name: .replica_set .noauth .ssl .rapid - - name: .standalone .noauth .nossl .rapid - - name: .sharded_cluster .auth .ssl .latest - - name: .replica_set .noauth .ssl .latest - - name: .standalone .noauth .nossl .latest + - name: .sharded_cluster .auth .ssl .6.0 !.sync_async + - name: .replica_set .noauth .ssl .6.0 !.sync_async + - name: .standalone .noauth .nossl .6.0 !.sync_async + - name: .sharded_cluster .auth .ssl .7.0 !.sync_async + - name: .replica_set .noauth .ssl .7.0 !.sync_async + - name: .standalone .noauth .nossl .7.0 !.sync_async + - name: .sharded_cluster .auth .ssl .8.0 !.sync_async + - name: .replica_set .noauth .ssl .8.0 !.sync_async + - name: .standalone .noauth .nossl .8.0 !.sync_async + - name: .sharded_cluster .auth .ssl .rapid !.sync_async + - name: .replica_set .noauth .ssl .rapid !.sync_async + - name: .standalone .noauth .nossl .rapid !.sync_async + - name: .sharded_cluster .auth .ssl .latest !.sync_async + - name: .replica_set .noauth .ssl .latest !.sync_async + - name: .standalone .noauth .nossl .latest !.sync_async display_name: Test macOS Arm64 py3.13 run_on: - macos-14-arm64 @@ -1213,9 +1213,9 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - name: test-win64-py3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl !.sync_async + - name: .replica_set .noauth .ssl !.sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: Test Win64 py3.9 run_on: - windows-64-vsMulti-small @@ -1224,9 +1224,9 @@ buildvariants: PYTHON_BINARY: C:/python/Python39/python.exe - name: test-win64-py3.13 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl !.sync_async + - name: .replica_set .noauth .ssl !.sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: Test Win64 py3.13 run_on: - windows-64-vsMulti-small @@ -1235,9 +1235,9 @@ buildvariants: PYTHON_BINARY: C:/python/Python313/python.exe - name: test-win32-py3.9 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl !.sync_async + - name: .replica_set .noauth .ssl !.sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: Test Win32 py3.9 run_on: - windows-64-vsMulti-small @@ -1246,9 +1246,9 @@ buildvariants: PYTHON_BINARY: C:/python/32/Python39/python.exe - name: test-win32-py3.13 tasks: - - name: .sharded_cluster .auth .ssl - - name: .replica_set .noauth .ssl - - name: .standalone .noauth .nossl + - name: .sharded_cluster .auth .ssl !.sync_async + - name: .replica_set .noauth .ssl !.sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: Test Win32 py3.13 run_on: - windows-64-vsMulti-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index eefd04b040..59760e7664 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -41,7 +41,7 @@ ".replica_set .noauth .ssl", ".standalone .noauth .nossl", ] -SYNCS = ["sync", "async"] +SYNCS = ["sync", "async", "sync_async"] DISPLAY_LOOKUP = dict( ssl=dict(ssl="SSL", nossl="NoSSL"), auth=dict(auth="Auth", noauth="NoAuth"), @@ -246,7 +246,7 @@ def create_server_variants() -> list[BuildVariant]: expansions = dict(COVERAGE="coverage") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( - [f".{t}" for t in TOPOLOGIES], + [f".{t} .sync_async" for t in TOPOLOGIES], display_name, python=python, host=host, @@ -260,7 +260,7 @@ def create_server_variants() -> list[BuildVariant]: display_name = f"Test {host}" display_name = get_display_name("Test", host, python=python) variant = create_variant( - SUB_TASKS, + [f"{t} .sync_async" for t in SUB_TASKS], display_name, python=python, host=host, @@ -271,12 +271,12 @@ def create_server_variants() -> list[BuildVariant]: # Test a subset on each of the other platforms. for host in ("macos", "macos-arm64", "win64", "win32"): for python in MIN_MAX_PYTHON: - tasks = SUB_TASKS + tasks = [f"{t} !.sync_async" for t in SUB_TASKS] # MacOS arm64 only works on server versions 6.0+ if host == "macos-arm64": tasks = [] for version in get_versions_from("6.0"): - tasks.extend(f"{t} .{version}" for t in SUB_TASKS) + tasks.extend(f"{t} .{version} !.sync_async" for t in SUB_TASKS) expansions = dict(SKIP_CSOT_TESTS="true") display_name = get_display_name("Test", host, python=python, **expansions) variant = create_variant( @@ -312,7 +312,7 @@ def get_encryption_expansions(encryption): expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) variant = create_variant( - SUB_TASKS, + [f"{t} .sync_async" for t in SUB_TASKS], display_name, python=python, host=host, @@ -327,7 +327,7 @@ def get_encryption_expansions(encryption): expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) variant = create_variant( - [task], + [f"{task} .sync_async"], display_name, python=python, host=host, @@ -337,7 +337,7 @@ def get_encryption_expansions(encryption): # Test on macos and linux on one server version and topology for min and max python. encryptions = ["Encryption", "Encryption crypt_shared"] - task_names = [".latest .replica_set"] + task_names = [".latest .replica_set .sync_async"] for host, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) @@ -379,7 +379,7 @@ def create_compression_variants(): # Compression tests - standalone versions of each server, across python versions, with and without c extensions. # PyPy interpreters are always tested without extensions. host = "rhel8" - base_task = ".standalone .noauth .nossl" + base_task = ".standalone .noauth .nossl .sync_async" task_names = dict(snappy=[base_task], zlib=[base_task], zstd=[f"{base_task} !.4.0"]) variants = [] for ind, (compressor, c_ext) in enumerate(product(["snappy", "zlib", "zstd"], C_EXTS)): @@ -455,7 +455,7 @@ def create_pyopenssl_variants(): display_name = get_display_name(base_name, host, python=python) variant = create_variant( - [f".replica_set .{auth} .{ssl}", f".7.0 .{auth} .{ssl}"], + [f".replica_set .{auth} .{ssl} .sync_async", f".7.0 .{auth} .{ssl} .sync_async"], display_name, python=python, host=host, @@ -475,12 +475,12 @@ def create_storage_engine_tests(): python = CPYTHONS[0] expansions = dict(STORAGE_ENGINE=engine.lower()) if engine == engines[0]: - tasks = [f".standalone .noauth .nossl .{v}" for v in ALL_VERSIONS] + tasks = [f".standalone .noauth .nossl .{v} .sync_async" for v in ALL_VERSIONS] else: # MongoDB 4.2 drops support for MMAPv1 versions = get_versions_until("4.0") - tasks = [f".standalone .{v} .noauth .nossl" for v in versions] + [ - f".replica_set .{v} .noauth .nossl" for v in versions + tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in versions] + [ + f".replica_set .{v} .noauth .nossl .sync_async" for v in versions ] display_name = get_display_name(f"Storage {engine}", host, python=python) variant = create_variant( @@ -493,7 +493,7 @@ def create_storage_engine_tests(): def create_versioned_api_tests(): host = "rhel8" tags = ["versionedApi_tag"] - tasks = [f".standalone .{v} .noauth .nossl" for v in get_versions_from("5.0")] + tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0")] variants = [] types = ["require v1", "accept v2"] @@ -524,7 +524,7 @@ def create_versioned_api_tests(): def create_green_framework_variants(): variants = [] - tasks = [".standalone .noauth .nossl"] + tasks = [".standalone .noauth .nossl .sync_async"] host = "rhel8" for python, framework in product([CPYTHONS[0], CPYTHONS[-2]], ["eventlet", "gevent"]): expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") @@ -540,7 +540,7 @@ def create_no_c_ext_variants(): variants = [] host = "rhel8" for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): - tasks = [f".{topology} .noauth .nossl"] + tasks = [f".{topology} .noauth .nossl .sync_async"] expansions = dict() handle_c_ext(C_EXTS[0], expansions) display_name = get_display_name("No C Ext", host, python=python) @@ -590,7 +590,7 @@ def create_disable_test_commands_variants(): expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") python = CPYTHONS[0] display_name = get_display_name("Disable test commands", host, python=python) - tasks = [".latest"] + tasks = [".latest .sync_async"] return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] @@ -717,7 +717,7 @@ def create_alternative_hosts_variants(): host = "rhel7" variants.append( create_variant( - [".5.0 .standalone"], + [".5.0 .standalone !.sync_async"], get_display_name("OpenSSL 1.0.2", "rhel7", python=CPYTHONS[0], **expansions), host=host, python=CPYTHONS[0], @@ -731,7 +731,7 @@ def create_alternative_hosts_variants(): for host, host_name in zip(hosts, host_names): variants.append( create_variant( - [".6.0 .standalone"], + [".6.0 .standalone !.sync_async"], display_name=get_display_name(f"Other hosts {host_name}", **expansions), expansions=expansions, batchtime=batchtime, @@ -758,11 +758,16 @@ def create_server_tasks(): SSL=ssl, ) bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + test_suites = "" + if sync == "sync": + test_suites = "default" + elif sync == "async": + test_suites = "default_async" test_vars = dict( AUTH=auth, SSL=ssl, SYNC=sync, - TEST_SUITES="default" if sync == "sync" else "default_async", + TEST_SUITES=test_suites, ) test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) From 5c1c24101802a3827260246a06e018b3c70202d0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Oct 2024 08:25:23 -0500 Subject: [PATCH 1591/2111] PYTHON-4909 Use ubuntu for Atlas Data Lake tests (#1969) --- .evergreen/generated_configs/variants.yml | 28 +++++++++++++---------- .evergreen/scripts/generate_config.py | 4 ++-- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 240b237fdc..0a4e5cfb14 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -66,39 +66,43 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Atlas data lake tests - - name: atlas-data-lake-rhel8-py3.9-no-c + - name: atlas-data-lake-ubuntu-22-py3.9-auth-no-c tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.9 No C + display_name: Atlas Data Lake Ubuntu-22 py3.9 Auth No C run_on: - - rhel87-small + - ubuntu2204-small expansions: + AUTH: auth NO_EXT: "1" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-rhel8-py3.9 + - name: atlas-data-lake-ubuntu-22-py3.9-auth tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.9 + display_name: Atlas Data Lake Ubuntu-22 py3.9 Auth run_on: - - rhel87-small + - ubuntu2204-small expansions: + AUTH: auth PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-rhel8-py3.13-no-c + - name: atlas-data-lake-ubuntu-22-py3.13-auth-no-c tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.13 No C + display_name: Atlas Data Lake Ubuntu-22 py3.13 Auth No C run_on: - - rhel87-small + - ubuntu2204-small expansions: + AUTH: auth NO_EXT: "1" PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: atlas-data-lake-rhel8-py3.13 + - name: atlas-data-lake-ubuntu-22-py3.13-auth tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake RHEL8 py3.13 + display_name: Atlas Data Lake Ubuntu-22 py3.13 Auth run_on: - - rhel87-small + - ubuntu2204-small expansions: + AUTH: auth PYTHON_BINARY: /opt/python/3.13/bin/python3 # Aws auth tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 59760e7664..9abcc6516a 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -553,10 +553,10 @@ def create_no_c_ext_variants(): def create_atlas_data_lake_variants(): variants = [] - host = "rhel8" + host = "ubuntu22" for python, c_ext in product(MIN_MAX_PYTHON, C_EXTS): tasks = ["atlas-data-lake-tests"] - expansions = dict() + expansions = dict(AUTH="auth") handle_c_ext(c_ext, expansions) display_name = get_display_name("Atlas Data Lake", host, python=python, **expansions) variant = create_variant( From 00c29600decda0081959d532fc8f882a16554fc0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 29 Oct 2024 11:34:06 -0400 Subject: [PATCH 1592/2111] PYTHON-4766 - Fix logic for determining whether to populate BulkWriteException.partialResult (#1980) --- test/crud/unified/client-bulkWrite-partialResults.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/crud/unified/client-bulkWrite-partialResults.json b/test/crud/unified/client-bulkWrite-partialResults.json index b35e94a2ea..1b75e37834 100644 --- a/test/crud/unified/client-bulkWrite-partialResults.json +++ b/test/crud/unified/client-bulkWrite-partialResults.json @@ -486,7 +486,7 @@ ] }, { - "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "description": "partialResult is set when second operation fails during an unordered bulk write (summary)", "operations": [ { "object": "client0", From dfb6a9a4f337c780be832a2f6fe84fe292e24015 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 29 Oct 2024 11:08:22 -0500 Subject: [PATCH 1593/2111] PYTHON-4209 Ensure that no error is raised for unknown auth mechanism (#1981) --- test/mockupdb/test_handshake.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 7cc3017c8f..752c4f8421 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -229,6 +229,39 @@ def test_client_handshake_saslSupportedMechs(self): future() return + def test_client_handshake_saslSupportedMechs_unknown(self): + server = MockupDB() + server.run() + self.addCleanup(server.stop) + + primary_response = OpReply( + "ismaster", + True, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + saslSupportedMechs=["SCRAM-SHA-256", "does_not_exist"], + ) + client = MongoClient( + server.uri, authmechanism="PLAIN", username="username", password="password" + ) + + self.addCleanup(client.close) + + # New monitoring connections send data during handshake. + heartbeat = server.receives("ismaster") + heartbeat.ok(primary_response) + + future = go(client.db.command, "whatever") + for request in server: + if request.matches("ismaster"): + request.ok(primary_response) + elif request.matches("saslStart"): + request.ok("saslStart", True, conversationId=1, payload=b"", done=True, ok=1) + else: + request.ok() + future() + return + def test_handshake_load_balanced(self): self.hello_with_option_helper(OpMsg, loadBalanced=True) with self.assertRaisesRegex(AssertionError, "does not match"): From 2f1227c504064d6859cb05d567999262f99b7937 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 29 Oct 2024 12:28:33 -0400 Subject: [PATCH 1594/2111] =?UTF-8?q?PYTHON-4807=20-=20Specify=20how=20to?= =?UTF-8?q?=20handle=20unacknowledged+(ordered|verbose|m=E2=80=A6=20(#1979?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/asynchronous/mongo_client.py | 7 +++ pymongo/synchronous/mongo_client.py | 7 +++ test/asynchronous/test_client_bulk_write.py | 46 ++++++++++++++- .../unacknowledged-client-bulkWrite.json | 5 +- .../crud/unified/client-bulkWrite-errors.json | 58 +++++++++++++++++++ test/test_client_bulk_write.py | 42 +++++++++++++- 6 files changed, 159 insertions(+), 6 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 4e09efe401..a71e4cb5cd 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2354,6 +2354,13 @@ async def bulk_write( if not write_concern: write_concern = self.write_concern + if write_concern and not write_concern.acknowledged and verbose_results: + raise InvalidOperation( + "Cannot request unacknowledged write concern and verbose results" + ) + elif write_concern and not write_concern.acknowledged and ordered: + raise InvalidOperation("Cannot request unacknowledged write concern and ordered writes") + common.validate_list("models", models) blk = _AsyncClientBulk( diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 815446bb2c..24696f0c8e 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2342,6 +2342,13 @@ def bulk_write( if not write_concern: write_concern = self.write_concern + if write_concern and not write_concern.acknowledged and verbose_results: + raise InvalidOperation( + "Cannot request unacknowledged write concern and verbose results" + ) + elif write_concern and not write_concern.acknowledged and ordered: + raise InvalidOperation("Cannot request unacknowledged write concern and ordered writes") + common.validate_list("models", models) blk = _ClientBulk( diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 9464337809..5f6b3353e8 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -401,12 +401,16 @@ async def test_returns_error_if_unacknowledged_too_large_insert(self): # Insert document. models_insert = [InsertOne(namespace="db.coll", document={"a": b_repeated})] with self.assertRaises(DocumentTooLarge): - await client.bulk_write(models=models_insert, write_concern=WriteConcern(w=0)) + await client.bulk_write( + models=models_insert, ordered=False, write_concern=WriteConcern(w=0) + ) # Replace document. models_replace = [ReplaceOne(namespace="db.coll", filter={}, replacement={"a": b_repeated})] with self.assertRaises(DocumentTooLarge): - await client.bulk_write(models=models_replace, write_concern=WriteConcern(w=0)) + await client.bulk_write( + models=models_replace, ordered=False, write_concern=WriteConcern(w=0) + ) async def _setup_namespace_test_models(self): # See prose test specification below for details on these calculations. @@ -590,6 +594,44 @@ async def test_upserted_result(self): self.assertEqual(result.update_results[1].did_upsert, True) self.assertEqual(result.update_results[2].did_upsert, False) + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless + async def test_15_unacknowledged_write_across_batches(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + await client.db.command({"create": "db.coll"}) + + b_repeated = "b" * (self.max_bson_object_size - 500) + models = [ + InsertOne(namespace="db.coll", document={"a": b_repeated}) + for _ in range(int(self.max_message_size_bytes / self.max_bson_object_size) + 1) + ] + + listener.reset() + + res = await client.bulk_write(models, ordered=False, write_concern=WriteConcern(w=0)) + self.assertEqual(False, res.acknowledged) + + events = listener.started_events + self.assertEqual(2, len(events)) + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size), + len(events[0].command["ops"]), + ) + self.assertEqual(1, len(events[1].command["ops"])) + self.assertEqual(events[0].operation_id, events[1].operation_id) + self.assertEqual({"w": 0}, events[0].command["writeConcern"]) + self.assertEqual({"w": 0}, events[1].command["writeConcern"]) + + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size) + 1, + await collection.count_documents({}), + ) + # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(AsyncIntegrationTest): diff --git a/test/command_monitoring/unacknowledged-client-bulkWrite.json b/test/command_monitoring/unacknowledged-client-bulkWrite.json index b30e1540f4..61bb00726c 100644 --- a/test/command_monitoring/unacknowledged-client-bulkWrite.json +++ b/test/command_monitoring/unacknowledged-client-bulkWrite.json @@ -91,7 +91,8 @@ } } } - ] + ], + "ordered": false }, "expectResult": { "insertedCount": { @@ -158,7 +159,7 @@ "command": { "bulkWrite": 1, "errorsOnly": true, - "ordered": true, + "ordered": false, "ops": [ { "insert": 0, diff --git a/test/crud/unified/client-bulkWrite-errors.json b/test/crud/unified/client-bulkWrite-errors.json index 8cc45bb5f2..015bd95c99 100644 --- a/test/crud/unified/client-bulkWrite-errors.json +++ b/test/crud/unified/client-bulkWrite-errors.json @@ -450,6 +450,64 @@ } } ] + }, + { + "description": "Requesting unacknowledged write with verboseResults is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true, + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and verbose results" + } + } + ] + }, + { + "description": "Requesting unacknowledged write with ordered is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and ordered writes" + } + } + ] } ] } diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 58b5015dd2..733970dd57 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -401,12 +401,12 @@ def test_returns_error_if_unacknowledged_too_large_insert(self): # Insert document. models_insert = [InsertOne(namespace="db.coll", document={"a": b_repeated})] with self.assertRaises(DocumentTooLarge): - client.bulk_write(models=models_insert, write_concern=WriteConcern(w=0)) + client.bulk_write(models=models_insert, ordered=False, write_concern=WriteConcern(w=0)) # Replace document. models_replace = [ReplaceOne(namespace="db.coll", filter={}, replacement={"a": b_repeated})] with self.assertRaises(DocumentTooLarge): - client.bulk_write(models=models_replace, write_concern=WriteConcern(w=0)) + client.bulk_write(models=models_replace, ordered=False, write_concern=WriteConcern(w=0)) def _setup_namespace_test_models(self): # See prose test specification below for details on these calculations. @@ -590,6 +590,44 @@ def test_upserted_result(self): self.assertEqual(result.update_results[1].did_upsert, True) self.assertEqual(result.update_results[2].did_upsert, False) + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless + def test_15_unacknowledged_write_across_batches(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + client.db.command({"create": "db.coll"}) + + b_repeated = "b" * (self.max_bson_object_size - 500) + models = [ + InsertOne(namespace="db.coll", document={"a": b_repeated}) + for _ in range(int(self.max_message_size_bytes / self.max_bson_object_size) + 1) + ] + + listener.reset() + + res = client.bulk_write(models, ordered=False, write_concern=WriteConcern(w=0)) + self.assertEqual(False, res.acknowledged) + + events = listener.started_events + self.assertEqual(2, len(events)) + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size), + len(events[0].command["ops"]), + ) + self.assertEqual(1, len(events[1].command["ops"])) + self.assertEqual(events[0].operation_id, events[1].operation_id) + self.assertEqual({"w": 0}, events[0].command["writeConcern"]) + self.assertEqual({"w": 0}, events[1].command["writeConcern"]) + + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size) + 1, + collection.count_documents({}), + ) + # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(IntegrationTest): From 9a11b78fdfe25de9e845e29503cc296b7f82dffa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 30 Oct 2024 12:49:20 -0500 Subject: [PATCH 1595/2111] PYTHON-4209 Fix test for ensure that no error is raised for unknown auth mechanism (#1982) --- test/mockupdb/test_handshake.py | 39 ++++----------------------------- 1 file changed, 4 insertions(+), 35 deletions(-) diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 752c4f8421..c2c978c4ad 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -218,50 +218,19 @@ def test_client_handshake_saslSupportedMechs(self): request.ok( "ismaster", True, - saslSupportedMechs=["SCRAM-SHA-256"], + # Unsupported auth mech should be ignored. + saslSupportedMechs=["SCRAM-SHA-256", "does_not_exist"], speculativeAuthenticate=auth, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) # Authentication should immediately fail with: # OperationFailure: Server returned an invalid nonce. - with self.assertRaises(OperationFailure): + with self.assertRaises(OperationFailure) as cm: future() + self.assertEqual(str(cm.exception), "Server returned an invalid nonce.") return - def test_client_handshake_saslSupportedMechs_unknown(self): - server = MockupDB() - server.run() - self.addCleanup(server.stop) - - primary_response = OpReply( - "ismaster", - True, - minWireVersion=2, - maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, - saslSupportedMechs=["SCRAM-SHA-256", "does_not_exist"], - ) - client = MongoClient( - server.uri, authmechanism="PLAIN", username="username", password="password" - ) - - self.addCleanup(client.close) - - # New monitoring connections send data during handshake. - heartbeat = server.receives("ismaster") - heartbeat.ok(primary_response) - - future = go(client.db.command, "whatever") - for request in server: - if request.matches("ismaster"): - request.ok(primary_response) - elif request.matches("saslStart"): - request.ok("saslStart", True, conversationId=1, payload=b"", done=True, ok=1) - else: - request.ok() - future() - return - def test_handshake_load_balanced(self): self.hello_with_option_helper(OpMsg, loadBalanced=True) with self.assertRaisesRegex(AssertionError, "does not match"): From ad3292e39b0490db354735a803911bfd6943ee65 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 30 Oct 2024 12:57:31 -0500 Subject: [PATCH 1596/2111] PYTHON-4922 Remove Support for MONGODB-CR Authentication (#1978) --- doc/changelog.rst | 1 + doc/examples/authentication.rst | 21 +--------- pymongo/asynchronous/auth.py | 16 -------- pymongo/auth_shared.py | 1 - pymongo/synchronous/auth.py | 16 -------- test/auth/legacy/connection-string.json | 41 ------------------- test/connection_string/test/valid-auth.json | 27 ++---------- .../connection_string/test/valid-options.json | 4 +- test/test_uri_parser.py | 13 +++--- 9 files changed, 13 insertions(+), 127 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 29fddb7b5c..94d991868d 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -12,6 +12,7 @@ PyMongo 4.11 brings a number of changes including: - Dropped support for Python 3.8. - Dropped support for MongoDB 3.6. +- Dropped support for the MONGODB-CR authenticate mechanism, which is no longer supported by MongoDB 4.0+. - Added support for free-threaded Python with the GIL disabled. For more information see: `Free-threaded CPython `_. - :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index 6c89910f3c..b319df814c 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -76,24 +76,6 @@ For best performance on Python versions older than 2.7.8 install `backports.pbkd .. _backports.pbkdf2: https://pypi.python.org/pypi/backports.pbkdf2/ -MONGODB-CR ----------- - -.. warning:: MONGODB-CR was deprecated with the release of MongoDB 3.6 and - is no longer supported by MongoDB 4.0. - -Before MongoDB 3.0 the default authentication mechanism was MONGODB-CR, -the "MongoDB Challenge-Response" protocol:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... username='user', - ... password='password', - ... authMechanism='MONGODB-CR') - >>> - >>> uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=MONGODB-CR" - >>> client = MongoClient(uri) - Default Authentication Mechanism -------------------------------- @@ -221,8 +203,7 @@ SASL PLAIN (RFC 4616) MongoDB Enterprise Edition version 2.6 and newer support the SASL PLAIN authentication mechanism, initially intended for delegating authentication -to an LDAP server. Using the PLAIN mechanism is very similar to MONGODB-CR. -These examples use the $external virtual database for LDAP support:: +to an LDAP server. These examples use the $external virtual database for LDAP support:: >>> from pymongo import MongoClient >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py index 1fb28f6c49..fc563ec48f 100644 --- a/pymongo/asynchronous/auth.py +++ b/pymongo/asynchronous/auth.py @@ -329,21 +329,6 @@ async def _authenticate_x509(credentials: MongoCredential, conn: AsyncConnection await conn.command("$external", cmd) -async def _authenticate_mongo_cr(credentials: MongoCredential, conn: AsyncConnection) -> None: - """Authenticate using MONGODB-CR.""" - source = credentials.source - username = credentials.username - password = credentials.password - # Get a nonce - response = await conn.command(source, {"getnonce": 1}) - nonce = response["nonce"] - key = _auth_key(nonce, username, password) - - # Actually authenticate - query = {"authenticate": 1, "user": username, "nonce": nonce, "key": key} - await conn.command(source, query) - - async def _authenticate_default(credentials: MongoCredential, conn: AsyncConnection) -> None: if conn.max_wire_version >= 7: if conn.negotiated_mechs: @@ -365,7 +350,6 @@ async def _authenticate_default(credentials: MongoCredential, conn: AsyncConnect _AUTH_MAP: Mapping[str, Callable[..., Coroutine[Any, Any, None]]] = { "GSSAPI": _authenticate_gssapi, - "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py index 7e3acd9dfb..11d08ffe9c 100644 --- a/pymongo/auth_shared.py +++ b/pymongo/auth_shared.py @@ -34,7 +34,6 @@ MECHANISMS = frozenset( [ "GSSAPI", - "MONGODB-CR", "MONGODB-OIDC", "MONGODB-X509", "MONGODB-AWS", diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py index 9a3477679d..7b370843c5 100644 --- a/pymongo/synchronous/auth.py +++ b/pymongo/synchronous/auth.py @@ -326,21 +326,6 @@ def _authenticate_x509(credentials: MongoCredential, conn: Connection) -> None: conn.command("$external", cmd) -def _authenticate_mongo_cr(credentials: MongoCredential, conn: Connection) -> None: - """Authenticate using MONGODB-CR.""" - source = credentials.source - username = credentials.username - password = credentials.password - # Get a nonce - response = conn.command(source, {"getnonce": 1}) - nonce = response["nonce"] - key = _auth_key(nonce, username, password) - - # Actually authenticate - query = {"authenticate": 1, "user": username, "nonce": nonce, "key": key} - conn.command(source, query) - - def _authenticate_default(credentials: MongoCredential, conn: Connection) -> None: if conn.max_wire_version >= 7: if conn.negotiated_mechs: @@ -360,7 +345,6 @@ def _authenticate_default(credentials: MongoCredential, conn: Connection) -> Non _AUTH_MAP: Mapping[str, Callable[..., None]] = { "GSSAPI": _authenticate_gssapi, - "MONGODB-CR": _authenticate_mongo_cr, "MONGODB-X509": _authenticate_x509, "MONGODB-AWS": _authenticate_aws, "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json index 57fd9d4a11..ab559582ae 100644 --- a/test/auth/legacy/connection-string.json +++ b/test/auth/legacy/connection-string.json @@ -127,47 +127,6 @@ "uri": "mongodb://localhost/?authMechanism=GSSAPI", "valid": false }, - { - "description": "should recognize the mechanism (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-CR", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "admin", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should use the authSource when specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if no username is supplied (MONGODB-CR)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-CR", - "valid": false - }, { "description": "should recognize the mechanism (MONGODB-X509)", "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", diff --git a/test/connection_string/test/valid-auth.json b/test/connection_string/test/valid-auth.json index 4f684ff185..12192fab4c 100644 --- a/test/connection_string/test/valid-auth.json +++ b/test/connection_string/test/valid-auth.json @@ -220,29 +220,8 @@ "options": null }, { - "description": "Escaped user info and database (MONGODB-CR)", - "uri": "mongodb://%24am:f%3Azzb%40z%2Fz%3D@127.0.0.1/admin%3F?authMechanism=MONGODB-CR", - "valid": true, - "warning": false, - "hosts": [ - { - "type": "ipv4", - "host": "127.0.0.1", - "port": null - } - ], - "auth": { - "username": "$am", - "password": "f:zzb@z/z=", - "db": "admin?" - }, - "options": { - "authmechanism": "MONGODB-CR" - } - }, - { - "description": "Subdelimiters in user/pass don't need escaping (MONGODB-CR)", - "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", + "description": "Subdelimiters in user/pass don't need escaping (PLAIN)", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=PLAIN", "valid": true, "warning": false, "hosts": [ @@ -258,7 +237,7 @@ "db": "admin" }, "options": { - "authmechanism": "MONGODB-CR" + "authmechanism": "PLAIN" } }, { diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json index 3c79fe7ae5..6c86172d08 100644 --- a/test/connection_string/test/valid-options.json +++ b/test/connection_string/test/valid-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Option names are normalized to lowercase", - "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=MONGODB-CR", + "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=PLAIN", "valid": true, "warning": false, "hosts": [ @@ -18,7 +18,7 @@ "db": "admin" }, "options": { - "authmechanism": "MONGODB-CR" + "authmechanism": "PLAIN" } }, { diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 2a68e9a2cd..f95717e95f 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -142,7 +142,6 @@ def test_split_options(self): self.assertEqual({"fsync": True}, split_options("fsync=true")) self.assertEqual({"fsync": False}, split_options("fsync=false")) self.assertEqual({"authmechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) - self.assertEqual({"authmechanism": "MONGODB-CR"}, split_options("authMechanism=MONGODB-CR")) self.assertEqual( {"authmechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") ) @@ -295,30 +294,30 @@ def test_parse_uri(self): # Various authentication tests res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "MONGODB-CR"} + res["options"] = {"authmechanism": "SCRAM-SHA-256"} res["username"] = "user" res["password"] = "password" self.assertEqual( - res, parse_uri("mongodb://user:password@localhost/?authMechanism=MONGODB-CR") + res, parse_uri("mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256") ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "MONGODB-CR", "authsource": "bar"} + res["options"] = {"authmechanism": "SCRAM-SHA-256", "authsource": "bar"} res["username"] = "user" res["password"] = "password" res["database"] = "foo" self.assertEqual( res, parse_uri( - "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=MONGODB-CR" + "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=SCRAM-SHA-256" ), ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "MONGODB-CR"} + res["options"] = {"authmechanism": "SCRAM-SHA-256"} res["username"] = "user" res["password"] = "" - self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=MONGODB-CR")) + self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=SCRAM-SHA-256")) res = copy.deepcopy(orig) res["username"] = "user@domain.com" From 92d6a732c5492adcb360b624af2b607b47ed31ea Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 30 Oct 2024 14:06:54 -0500 Subject: [PATCH 1597/2111] PYTHON-3906 & PYTHON-2867 Implement GSSAPI ServiceHost support and expand canonicalization options (#1983) --- pymongo/auth_shared.py | 17 ++++++- pymongo/common.py | 12 ++++- test/auth/legacy/connection-string.json | 56 ++++++++++++++++++--- test/connection_string/test/valid-auth.json | 5 +- 4 files changed, 78 insertions(+), 12 deletions(-) diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py index 11d08ffe9c..fa25aa3faa 100644 --- a/pymongo/auth_shared.py +++ b/pymongo/auth_shared.py @@ -77,7 +77,7 @@ def __hash__(self) -> int: GSSAPIProperties = namedtuple( - "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm", "service_host"] ) """Mechanism properties for GSSAPI authentication.""" @@ -86,6 +86,16 @@ def __hash__(self) -> int: """Mechanism properties for MONGODB-AWS authentication.""" +def _validate_canonicalize_host_name(value: str | bool) -> str | bool: + valid_names = [False, True, "none", "forward", "forwardAndReverse"] + if value in ["true", "false", True, False]: + return value in ["true", True] + + if value not in valid_names: + raise ValueError(f"CANONICALIZE_HOST_NAME '{value}' not in valid options: {valid_names}") + return value + + def _build_credentials_tuple( mech: str, source: Optional[str], @@ -102,12 +112,15 @@ def _build_credentials_tuple( raise ValueError("authentication source must be $external or None for GSSAPI") properties = extra.get("authmechanismproperties", {}) service_name = properties.get("SERVICE_NAME", "mongodb") - canonicalize = bool(properties.get("CANONICALIZE_HOST_NAME", False)) + service_host = properties.get("SERVICE_HOST", None) + canonicalize = properties.get("CANONICALIZE_HOST_NAME", "false") + canonicalize = _validate_canonicalize_host_name(canonicalize) service_realm = properties.get("SERVICE_REALM") props = GSSAPIProperties( service_name=service_name, canonicalize_host_name=canonicalize, service_realm=service_realm, + service_host=service_host, ) # Source is always $external. return MongoCredential(mech, "$external", user, passwd, props, None) diff --git a/pymongo/common.py b/pymongo/common.py index 87aa936f5d..d4601a0eb5 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -139,6 +139,9 @@ # Default value for serverMonitoringMode SERVER_MONITORING_MODE = "auto" # poll/stream/auto +# Auth mechanism properties that must raise an error instead of warning if they invalidate. +_MECH_PROP_MUST_RAISE = ["CANONICALIZE_HOST_NAME"] + def partition_node(node: str) -> tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" @@ -423,6 +426,7 @@ def validate_read_preference_tags(name: str, value: Any) -> list[dict[str, str]] _MECHANISM_PROPS = frozenset( [ "SERVICE_NAME", + "SERVICE_HOST", "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN", @@ -476,7 +480,9 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni ) if key == "CANONICALIZE_HOST_NAME": - props[key] = validate_boolean_or_string(key, val) + from pymongo.auth_shared import _validate_canonicalize_host_name + + props[key] = _validate_canonicalize_host_name(val) else: props[key] = val @@ -867,6 +873,10 @@ def get_setter_key(x: str) -> str: validator = _get_validator(opt, URI_OPTIONS_VALIDATOR_MAP, normed_key=normed_key) validated = validator(opt, value) except (ValueError, TypeError, ConfigurationError) as exc: + if normed_key == "authmechanismproperties" and any( + p in str(exc) for p in _MECH_PROP_MUST_RAISE + ): + raise if warn: warnings.warn(str(exc), stacklevel=2) else: diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json index ab559582ae..67aafbff6e 100644 --- a/test/auth/legacy/connection-string.json +++ b/test/auth/legacy/connection-string.json @@ -80,7 +80,7 @@ }, { "description": "should accept generic mechanism property (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com", "valid": true, "credential": { "username": "user@DOMAIN.COM", @@ -89,10 +89,46 @@ "mechanism": "GSSAPI", "mechanism_properties": { "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" } } }, + { + "description": "should accept forwardAndReverse hostname canonicalization (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forwardAndReverse", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": "forwardAndReverse" + } + } + }, + { + "description": "should accept no hostname canonicalization (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:none", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": "none" + } + } + }, + { + "description": "must raise an error when the hostname canonicalization is invalid", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:invalid", + "valid": false + }, { "description": "should accept the password (GSSAPI)", "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", @@ -433,14 +469,14 @@ } }, { - "description": "should throw an exception if username and password is specified for test environment (MONGODB-OIDC)", + "description": "should throw an exception if supplied a password (MONGODB-OIDC)", "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", "valid": false, "credential": null }, { - "description": "should throw an exception if username is specified for test environment (MONGODB-OIDC)", - "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&ENVIRONMENT:test", + "description": "should throw an exception if username is specified for test (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", "valid": false, "credential": null }, @@ -451,11 +487,17 @@ "credential": null }, { - "description": "should throw an exception if neither provider nor callbacks specified (MONGODB-OIDC)", + "description": "should throw an exception if neither environment nor callbacks specified (MONGODB-OIDC)", "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", "valid": false, "credential": null }, + { + "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "valid": false, + "credential": null + }, { "description": "should recognise the mechanism with azure provider (MONGODB-OIDC)", "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", @@ -586,4 +628,4 @@ "credential": null } ] -} \ No newline at end of file +} diff --git a/test/connection_string/test/valid-auth.json b/test/connection_string/test/valid-auth.json index 12192fab4c..60f63f4e3f 100644 --- a/test/connection_string/test/valid-auth.json +++ b/test/connection_string/test/valid-auth.json @@ -263,7 +263,7 @@ }, { "description": "Escaped username (GSSAPI)", - "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authMechanism=GSSAPI", + "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authMechanism=GSSAPI", "valid": true, "warning": false, "hosts": [ @@ -282,7 +282,8 @@ "authmechanism": "GSSAPI", "authmechanismproperties": { "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" } } }, From 2332d69328c28e4a972b633b88703a819ef17ba1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 30 Oct 2024 15:37:00 -0400 Subject: [PATCH 1598/2111] PYTHON-4807 - Update changelog + remove dead code (#1984) --- doc/changelog.rst | 4 +++ pymongo/asynchronous/client_bulk.py | 49 +++-------------------------- pymongo/synchronous/client_bulk.py | 49 +++-------------------------- 3 files changed, 14 insertions(+), 88 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 94d991868d..22b0c744a9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -24,6 +24,10 @@ PyMongo 4.11 brings a number of changes including: :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.replace_one`, :class:`~pymongo.operations.UpdateOne`, and :class:`~pymongo.operations.UpdateMany`, +- :meth:`~pymongo.mongo_client.MongoClient.bulk_write` and + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` now throw an error + when ``ordered=True`` or ``verboseResults=True`` are used with unacknowledged writes. + These are unavoidable breaking changes. Issues Resolved ............... diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 96571c21eb..a6f7178e47 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -681,11 +681,11 @@ async def retryable_bulk( _throw_client_bulk_write_exception(full_result, self.verbose_results) return full_result - async def execute_command_unack_unordered( + async def execute_command_unack( self, conn: AsyncConnection, ) -> None: - """Execute commands with OP_MSG and w=0 writeConcern, unordered.""" + """Execute commands with OP_MSG and w=0 writeConcern. Always unordered.""" db_name = "admin" cmd_name = "bulkWrite" listeners = self.client._event_listeners @@ -704,8 +704,8 @@ async def execute_command_unack_unordered( while self.idx_offset < self.total_ops: # Construct the server command, specifying the relevant options. cmd = {"bulkWrite": 1} - cmd["errorsOnly"] = not self.verbose_results - cmd["ordered"] = self.ordered # type: ignore[assignment] + cmd["errorsOnly"] = True + cmd["ordered"] = False if self.bypass_doc_val is not None: cmd["bypassDocumentValidation"] = self.bypass_doc_val cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] @@ -723,43 +723,6 @@ async def execute_command_unack_unordered( self.idx_offset += len(to_send_ops) - async def execute_command_unack_ordered( - self, - conn: AsyncConnection, - ) -> None: - """Execute commands with OP_MSG and w=0 WriteConcern, ordered.""" - full_result: MutableMapping[str, Any] = { - "anySuccessful": False, - "error": None, - "writeErrors": [], - "writeConcernErrors": [], - "nInserted": 0, - "nUpserted": 0, - "nMatched": 0, - "nModified": 0, - "nDeleted": 0, - "insertResults": {}, - "updateResults": {}, - "deleteResults": {}, - } - # Ordered bulk writes have to be acknowledged so that we stop - # processing at the first error, even when the application - # specified unacknowledged writeConcern. - initial_write_concern = WriteConcern() - op_id = _randint() - try: - await self._execute_command( - initial_write_concern, - None, - conn, - op_id, - False, - full_result, - self.write_concern, - ) - except OperationFailure: - pass - async def execute_no_results( self, conn: AsyncConnection, @@ -775,9 +738,7 @@ async def execute_no_results( "Cannot set bypass_document_validation with unacknowledged write concern" ) - if self.ordered: - return await self.execute_command_unack_ordered(conn) - return await self.execute_command_unack_unordered(conn) + return await self.execute_command_unack(conn) async def execute( self, diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 2c38b1d76c..6cb4275417 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -679,11 +679,11 @@ def retryable_bulk( _throw_client_bulk_write_exception(full_result, self.verbose_results) return full_result - def execute_command_unack_unordered( + def execute_command_unack( self, conn: Connection, ) -> None: - """Execute commands with OP_MSG and w=0 writeConcern, unordered.""" + """Execute commands with OP_MSG and w=0 writeConcern. Always unordered.""" db_name = "admin" cmd_name = "bulkWrite" listeners = self.client._event_listeners @@ -702,8 +702,8 @@ def execute_command_unack_unordered( while self.idx_offset < self.total_ops: # Construct the server command, specifying the relevant options. cmd = {"bulkWrite": 1} - cmd["errorsOnly"] = not self.verbose_results - cmd["ordered"] = self.ordered # type: ignore[assignment] + cmd["errorsOnly"] = True + cmd["ordered"] = False if self.bypass_doc_val is not None: cmd["bypassDocumentValidation"] = self.bypass_doc_val cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] @@ -721,43 +721,6 @@ def execute_command_unack_unordered( self.idx_offset += len(to_send_ops) - def execute_command_unack_ordered( - self, - conn: Connection, - ) -> None: - """Execute commands with OP_MSG and w=0 WriteConcern, ordered.""" - full_result: MutableMapping[str, Any] = { - "anySuccessful": False, - "error": None, - "writeErrors": [], - "writeConcernErrors": [], - "nInserted": 0, - "nUpserted": 0, - "nMatched": 0, - "nModified": 0, - "nDeleted": 0, - "insertResults": {}, - "updateResults": {}, - "deleteResults": {}, - } - # Ordered bulk writes have to be acknowledged so that we stop - # processing at the first error, even when the application - # specified unacknowledged writeConcern. - initial_write_concern = WriteConcern() - op_id = _randint() - try: - self._execute_command( - initial_write_concern, - None, - conn, - op_id, - False, - full_result, - self.write_concern, - ) - except OperationFailure: - pass - def execute_no_results( self, conn: Connection, @@ -773,9 +736,7 @@ def execute_no_results( "Cannot set bypass_document_validation with unacknowledged write concern" ) - if self.ordered: - return self.execute_command_unack_ordered(conn) - return self.execute_command_unack_unordered(conn) + return self.execute_command_unack(conn) def execute( self, From 351196b91b5df0383fd56be5dd2c8f139ccd3a14 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 30 Oct 2024 15:46:52 -0500 Subject: [PATCH 1599/2111] PYTHON-4813 Update specification documentation links (#1977) --- README.md | 2 +- bson/json_util.py | 2 +- doc/api/index.rst | 2 +- doc/changelog.rst | 8 ++++---- doc/conf.py | 2 +- doc/developer/periodic_executor.rst | 2 +- doc/examples/uuid.rst | 2 +- doc/migrate-to-pymongo4.rst | 4 ++-- pymongo/asynchronous/mongo_client.py | 6 +++--- pymongo/pool_options.py | 2 +- pymongo/synchronous/mongo_client.py | 6 +++--- test/asynchronous/test_auth.py | 2 +- test/asynchronous/test_encryption.py | 20 ++++++++++---------- test/asynchronous/unified_format.py | 4 ++-- test/test_auth.py | 2 +- test/test_dbref.py | 2 +- test/test_encryption.py | 20 ++++++++++---------- test/test_streaming_protocol.py | 2 +- test/unified_format.py | 4 ++-- test/unified_format_shared.py | 2 +- 20 files changed, 48 insertions(+), 48 deletions(-) diff --git a/README.md b/README.md index f5e2cdf46d..bd0755620e 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ The PyMongo distribution contains tools for interacting with MongoDB database from Python. The `bson` package is an implementation of the [BSON format](http://bsonspec.org) for Python. The `pymongo` package is a native Python driver for MongoDB. The `gridfs` package is a -[gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst/) +[gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.md/) implementation on top of `pymongo`. PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. diff --git a/bson/json_util.py b/bson/json_util.py index 6f34e4103d..a171327ead 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -22,7 +22,7 @@ when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is provided, respectively. -.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst +.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json/extended-json.md Example usage (deserialization): diff --git a/doc/api/index.rst b/doc/api/index.rst index 30ae3608ca..437f2cc6a6 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -6,7 +6,7 @@ interacting with MongoDB. :mod:`bson` is an implementation of the `BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS -`_ storage +`_ storage specification. .. toctree:: diff --git a/doc/changelog.rst b/doc/changelog.rst index 22b0c744a9..bd4eafe3ef 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1027,7 +1027,7 @@ See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 -.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst +.. _DBRef specification: https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md Changes in Version 3.13.0 (2022/11/01) -------------------------------------- @@ -1562,7 +1562,7 @@ Unavoidable breaking changes: bumped to 1.16.0. This is a breaking change for applications that use PyMongo's SRV support with a version of ``dnspython`` older than 1.16.0. -.. _URI options specification: https://github.com/mongodb/specifications/blob/master/source/uri-options/uri-options.rst +.. _URI options specification: https://github.com/mongodb/specifications/blob/master/source/uri-options/uri-options.md Issues Resolved @@ -1586,7 +1586,7 @@ Changes in Version 3.8.0 (2019/04/22) must upgrade to PyPy3.5+. - :class:`~bson.objectid.ObjectId` now implements the `ObjectID specification - version 0.2 `_. + version 0.2 `_. - For better performance and to better follow the GridFS spec, :class:`~gridfs.grid_file.GridOut` now uses a single cursor to read all the chunks in the file. Previously, each chunk in the file was queried @@ -1948,7 +1948,7 @@ Highlights include: :class:`~pymongo.operations.UpdateOne`, and :class:`~pymongo.operations.UpdateMany`. - Implemented the `MongoDB Extended JSON - `_ + `_ specification. - :class:`~bson.decimal128.Decimal128` now works when cdecimal is installed. - PyMongo is now tested against a wider array of operating systems and CPU diff --git a/doc/conf.py b/doc/conf.py index f0d9f921bb..f82c719361 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -85,7 +85,7 @@ # wiki.centos.org has been flakey. # sourceforge.net is giving a 403 error, but is still accessible from the browser. linkcheck_ignore = [ - "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", r"http://sourceforge.net/", diff --git a/doc/developer/periodic_executor.rst b/doc/developer/periodic_executor.rst index effe18efca..67eaa89f10 100644 --- a/doc/developer/periodic_executor.rst +++ b/doc/developer/periodic_executor.rst @@ -106,7 +106,7 @@ Thus the current design of periodic executors is surprisingly simple: they do a simple ``time.sleep`` for a half-second, check if it is time to wake or terminate, and sleep again. -.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check +.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check .. _PYTHON-863: https://jira.mongodb.org/browse/PYTHON-863 diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst index 90ec71ebe2..350db14d9a 100644 --- a/doc/examples/uuid.rst +++ b/doc/examples/uuid.rst @@ -84,7 +84,7 @@ Finally, the same UUID would historically be serialized by the Java driver as:: .. note:: For in-depth information about the the byte-order historically used by different drivers, see the `Handling of Native UUID Types Specification - `_. + `_. This difference in the byte-order of UUIDs encoded by different drivers can result in highly unintuitive behavior in some scenarios. We detail two such diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index bc6da85560..3e992a8249 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -118,7 +118,7 @@ Renamed URI options Several deprecated URI options have been renamed to the standardized option names defined in the -`URI options specification `_. +`URI options specification `_. The old option names and their renamed equivalents are summarized in the table below. Some renamed options have different semantics from the option being replaced as noted in the 'Migration Notes' column. @@ -965,7 +965,7 @@ correct type. Otherwise the document is returned as normal. Previously, any subdocument containing a ``$ref`` field would be decoded as a :class:`~bson.dbref.DBRef`. -.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst +.. _DBRef specification: https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md Encoding a UUID raises an error by default .......................................... diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index a71e4cb5cd..e4fdf25c28 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -221,7 +221,7 @@ def __init__( `_. See the `Initial DNS Seedlist Discovery spec `_ + initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md>`_ for more details. Note that the use of SRV URIs implicitly enables TLS support. Pass tls=false in the URI to override. @@ -367,7 +367,7 @@ def __init__( :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` using the ``$out`` pipeline operator and any operation with an unacknowledged write concern (e.g. {w: 0})). See - https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md - `retryReads`: (boolean) Whether supported read operations executed within this AsyncMongoClient will be retried once after a network error. Defaults to ``True``. @@ -394,7 +394,7 @@ def __init__( transient errors such as network failures, database upgrades, and replica set failovers. For an exact definition of which errors trigger a retry, see the `retryable reads specification - `_. + `_. - `compressors`: Comma separated list of compressors for wire protocol compression. The list is used to negotiate a compressor diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index 61486c91c6..f3ed6cd2c1 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -216,7 +216,7 @@ def _metadata_env() -> dict[str, Any]: _MAX_METADATA_SIZE = 512 -# See: https://github.com/mongodb/specifications/blob/5112bcc/source/mongodb-handshake/handshake.rst#limitations +# See: https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#limitations def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: """Perform metadata truncation.""" if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 24696f0c8e..0380d4468b 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -216,7 +216,7 @@ def __init__( `_. See the `Initial DNS Seedlist Discovery spec `_ + initial-dns-seedlist-discovery/initial-dns-seedlist-discovery.md>`_ for more details. Note that the use of SRV URIs implicitly enables TLS support. Pass tls=false in the URI to override. @@ -365,7 +365,7 @@ def __init__( :meth:`~pymongo.collection.Collection.aggregate` using the ``$out`` pipeline operator and any operation with an unacknowledged write concern (e.g. {w: 0})). See - https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md - `retryReads`: (boolean) Whether supported read operations executed within this MongoClient will be retried once after a network error. Defaults to ``True``. @@ -392,7 +392,7 @@ def __init__( transient errors such as network failures, database upgrades, and replica set failovers. For an exact definition of which errors trigger a retry, see the `retryable reads specification - `_. + `_. - `compressors`: Comma separated list of compressors for wire protocol compression. The list is used to negotiate a compressor diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index 9262714374..4f26200fb0 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -375,7 +375,7 @@ async def test_scram_sha1(self): await db.command("dbstats") -# https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#scram-sha-256-and-mechanism-negotiation class TestSCRAM(AsyncIntegrationTest): @async_client_context.require_auth @async_client_context.require_version_min(3, 7, 2) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 40f1acd32d..e42c85aa7a 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -1610,7 +1610,7 @@ async def test_automatic(self): return await self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests class TestDeadlockProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): self.client_test = await self.async_rs_or_single_client( @@ -1837,7 +1837,7 @@ async def test_case_8(self): self.assertEqual(len(self.topology_listener.results["opened"]), 1) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events class TestDecryptProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): self.client = async_client_context.client @@ -1909,7 +1909,7 @@ async def test_04_decrypt_success(self): self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(AsyncEncryptionIntegrationTest): @unittest.skipIf( os.environ.get("TEST_CRYPT_SHARED"), @@ -1990,7 +1990,7 @@ async def test_via_loading_shared_library(self): with self.assertRaises(ServerSelectionTimeoutError): await no_mongocryptd_client.db.command("ping") - # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") async def test_client_via_loading_shared_library(self): connection_established = False @@ -2066,7 +2066,7 @@ async def test_invalid_hostname_in_kms_certificate(self): await self.client_encrypted.create_data_key("aws", master_key=key) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#kms-tls-options-tests class TestKmsTLSOptions(AsyncEncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") async def asyncSetUp(self): @@ -2272,7 +2272,7 @@ async def test_06_named_kms_providers_apply_tls_options_kmip(self): await self.client_encryption_with_names.create_data_key("kmip:with_tls") -# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): self.client = async_client_context.client @@ -2303,7 +2303,7 @@ async def test_02_add_key_alt_name(self): assert key_doc["keyAltNames"] == ["def"] -# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.md#explicit-encryption class TestExplicitQueryableEncryption(AsyncEncryptionIntegrationTest): @async_client_context.require_no_standalone @async_client_context.require_version_min(7, 0, -1) @@ -2423,7 +2423,7 @@ async def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) -# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap class TestRewrapWithSeparateClientEncryption(AsyncEncryptionIntegrationTest): MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { "aws": { @@ -2505,7 +2505,7 @@ async def run_test(self, src_provider, dst_provider): ) -# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.md#on-demand-aws-credentials class TestOnDemandAWSCredentials(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): await super().asyncSetUp() @@ -2869,7 +2869,7 @@ async def test_accepts_trim_factor_0(self): assert len(payload) > len(self.payload_defaults) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(AsyncEncryptionIntegrationTest): @async_client_context.require_no_standalone @async_client_context.require_version_min(7, 0, -1) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 8f32ac4a2e..b382db474f 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -14,7 +14,7 @@ """Unified test format runner. -https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md """ from __future__ import annotations @@ -431,7 +431,7 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): """Mixin class to run test cases from test specification files. Assumes that tests conform to the `unified test format - `_. + `_. Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. diff --git a/test/test_auth.py b/test/test_auth.py index 310006afff..70c061b747 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -373,7 +373,7 @@ def test_scram_sha1(self): db.command("dbstats") -# https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#scram-sha-256-and-mechanism-negotiation class TestSCRAM(IntegrationTest): @client_context.require_auth @client_context.require_version_min(3, 7, 2) diff --git a/test/test_dbref.py b/test/test_dbref.py index d170f43f56..ac2767a1ce 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -128,7 +128,7 @@ def test_dbref_hash(self): self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) -# https://github.com/mongodb/specifications/blob/master/source/dbref.rst#test-plan +# https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md#test-plan class TestDBRefSpec(unittest.TestCase): def test_decoding_1_2_3(self): doc: Any diff --git a/test/test_encryption.py b/test/test_encryption.py index 373981b1d2..0806f91a06 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1604,7 +1604,7 @@ def test_automatic(self): return self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests class TestDeadlockProse(EncryptionIntegrationTest): def setUp(self): self.client_test = self.rs_or_single_client( @@ -1829,7 +1829,7 @@ def test_case_8(self): self.assertEqual(len(self.topology_listener.results["opened"]), 1) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events class TestDecryptProse(EncryptionIntegrationTest): def setUp(self): self.client = client_context.client @@ -1901,7 +1901,7 @@ def test_04_decrypt_success(self): self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): @unittest.skipIf( os.environ.get("TEST_CRYPT_SHARED"), @@ -1982,7 +1982,7 @@ def test_via_loading_shared_library(self): with self.assertRaises(ServerSelectionTimeoutError): no_mongocryptd_client.db.command("ping") - # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") def test_client_via_loading_shared_library(self): connection_established = False @@ -2058,7 +2058,7 @@ def test_invalid_hostname_in_kms_certificate(self): self.client_encrypted.create_data_key("aws", master_key=key) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#kms-tls-options-tests class TestKmsTLSOptions(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): @@ -2264,7 +2264,7 @@ def test_06_named_kms_providers_apply_tls_options_kmip(self): self.client_encryption_with_names.create_data_key("kmip:with_tls") -# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): def setUp(self): self.client = client_context.client @@ -2293,7 +2293,7 @@ def test_02_add_key_alt_name(self): assert key_doc["keyAltNames"] == ["def"] -# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.md#explicit-encryption class TestExplicitQueryableEncryption(EncryptionIntegrationTest): @client_context.require_no_standalone @client_context.require_version_min(7, 0, -1) @@ -2407,7 +2407,7 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) -# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.rst#rewrap +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { "aws": { @@ -2489,7 +2489,7 @@ def run_test(self, src_provider, dst_provider): ) -# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.rst#on-demand-aws-credentials +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.md#on-demand-aws-credentials class TestOnDemandAWSCredentials(EncryptionIntegrationTest): def setUp(self): super().setUp() @@ -2851,7 +2851,7 @@ def test_accepts_trim_factor_0(self): assert len(payload) > len(self.payload_defaults) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#automatic-data-encryption-keys +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): @client_context.require_no_standalone @client_context.require_version_min(7, 0, -1) diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index b3b68703a4..d782aa1dd7 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -142,7 +142,7 @@ def changed_event(event): @client_context.require_failCommand_appName def test_monitor_waits_after_server_check_error(self): # This test implements: - # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + # https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks fail_hello = { "mode": {"times": 5}, "data": { diff --git a/test/unified_format.py b/test/unified_format.py index be7fc1f8ad..0da6168303 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -14,7 +14,7 @@ """Unified test format runner. -https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md """ from __future__ import annotations @@ -431,7 +431,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): """Mixin class to run test cases from test specification files. Assumes that tests conform to the `unified test format - `_. + `_. Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index f1b908a7a6..f315a77f48 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -14,7 +14,7 @@ """Shared utility functions and constants for the unified test format runner. -https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md """ from __future__ import annotations From 32269aac1e9c34c2660329fc911a32e3cac78906 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 31 Oct 2024 14:01:30 -0700 Subject: [PATCH 1600/2111] PYTHON-4885 Fix legacy extended JSON encoding of DatetimeMS (#1986) --- bson/json_util.py | 43 ++++++++++++++++++++++-------------------- doc/changelog.rst | 5 +++++ test/test_json_util.py | 25 +++++++++++++++++++++--- 3 files changed, 50 insertions(+), 23 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index a171327ead..ecae103b55 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -617,25 +617,28 @@ def _parse_canonical_datetime( raise TypeError(f"Bad $date, extra field(s): {doc}") # mongoexport 2.6 and newer if isinstance(dtm, str): - # Parse offset - if dtm[-1] == "Z": - dt = dtm[:-1] - offset = "Z" - elif dtm[-6] in ("+", "-") and dtm[-3] == ":": - # (+|-)HH:MM - dt = dtm[:-6] - offset = dtm[-6:] - elif dtm[-5] in ("+", "-"): - # (+|-)HHMM - dt = dtm[:-5] - offset = dtm[-5:] - elif dtm[-3] in ("+", "-"): - # (+|-)HH - dt = dtm[:-3] - offset = dtm[-3:] - else: - dt = dtm - offset = "" + try: + # Parse offset + if dtm[-1] == "Z": + dt = dtm[:-1] + offset = "Z" + elif dtm[-6] in ("+", "-") and dtm[-3] == ":": + # (+|-)HH:MM + dt = dtm[:-6] + offset = dtm[-6:] + elif dtm[-5] in ("+", "-"): + # (+|-)HHMM + dt = dtm[:-5] + offset = dtm[-5:] + elif dtm[-3] in ("+", "-"): + # (+|-)HH + dt = dtm[:-3] + offset = dtm[-3:] + else: + dt = dtm + offset = "" + except IndexError as exc: + raise ValueError(f"time data {dtm!r} does not match ISO-8601 datetime format") from exc # Parse the optional factional seconds portion. dot_index = dt.rfind(".") @@ -848,7 +851,7 @@ def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: ): return _encode_datetime(obj.as_datetime(), json_options) elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: - return {"$date": str(int(obj))} + return {"$date": int(obj)} return {"$date": {"$numberLong": str(int(obj))}} diff --git a/doc/changelog.rst b/doc/changelog.rst index bd4eafe3ef..d9e6cc3f5b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -28,6 +28,11 @@ PyMongo 4.11 brings a number of changes including: :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` now throw an error when ``ordered=True`` or ``verboseResults=True`` are used with unacknowledged writes. These are unavoidable breaking changes. +- Fixed a bug in :const:`bson.json_util.dumps` where a :class:`bson.datetime_ms.DatetimeMS` would + be incorrectly encoded as ``'{"$date": "X"}'`` instead of ``'{"$date": X}'`` when using the + legacy MongoDB Extended JSON datetime representation. +- Fixed a bug where :const:`bson.json_util.loads` would raise an IndexError when parsing an invalid + ``"$date"`` instead of a ValueError. Issues Resolved ............... diff --git a/test/test_json_util.py b/test/test_json_util.py index 3a40c174e8..821ca76da0 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -137,7 +137,7 @@ def test_datetime(self): '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', - '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}', '{"dt": { "$date" : "1970-01-01T00:00:00"}}', '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', @@ -282,9 +282,9 @@ def test_datetime_ms(self): opts = JSONOptions( datetime_representation=DatetimeRepresentation.LEGACY, json_mode=JSONMode.LEGACY ) - self.assertEqual('{"x": {"$date": "-1"}}', json_util.dumps(dat_min, json_options=opts)) + self.assertEqual('{"x": {"$date": -1}}', json_util.dumps(dat_min, json_options=opts)) self.assertEqual( - '{"x": {"$date": "' + str(int(dat_max["x"])) + '"}}', + '{"x": {"$date": ' + str(int(dat_max["x"])) + "}}", json_util.dumps(dat_max, json_options=opts), ) @@ -317,6 +317,25 @@ def test_datetime_ms(self): json_util.loads(json_util.dumps(dat_max), json_options=opts)["x"], ) + def test_parse_invalid_date(self): + # These cases should raise ValueError, not IndexError. + for invalid in [ + '{"dt": { "$date" : "1970-01-01T00:00:"}}', + '{"dt": { "$date" : "1970-01-01T01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:"}}', + '{"dt": { "$date" : "1970-01-01T01"}}', + '{"dt": { "$date" : "1970-01-01T"}}', + '{"dt": { "$date" : "1970-01-01"}}', + '{"dt": { "$date" : "1970-01-"}}', + '{"dt": { "$date" : "1970-01"}}', + '{"dt": { "$date" : "1970-"}}', + '{"dt": { "$date" : "1970"}}', + '{"dt": { "$date" : "1"}}', + '{"dt": { "$date" : ""}}', + ]: + with self.assertRaisesRegex(ValueError, "does not match"): + json_util.loads(invalid) + def test_regex_object_hook(self): # Extended JSON format regular expression. pat = "a*b" From 9f53f299679af7aab03149dc9f91b0b4dce290bb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 1 Nov 2024 08:24:52 -0500 Subject: [PATCH 1601/2111] PYTHON-4906 Add branch creation workflow to Python Driver (#1971) --- .github/workflows/create-release-branch.yml | 55 +++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 .github/workflows/create-release-branch.yml diff --git a/.github/workflows/create-release-branch.yml b/.github/workflows/create-release-branch.yml new file mode 100644 index 0000000000..f24f94179a --- /dev/null +++ b/.github/workflows/create-release-branch.yml @@ -0,0 +1,55 @@ +name: Create Release Branch + +on: + workflow_dispatch: + inputs: + branch_name: + description: The name of the new branch + required: true + version: + description: The version to set on the branch + required: true + base_ref: + description: The base reference for the branch + push_changes: + description: Whether to push the changes + default: "true" + +concurrency: + group: create-branch-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + create-branch: + environment: release + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + outputs: + version: ${{ steps.pre-publish.outputs.version }} + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v2 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} + - uses: mongodb-labs/drivers-github-tools/create-branch@v2 + id: create-branch + with: + branch_name: ${{ inputs.branch_name }} + version: ${{ inputs.version }} + base_ref: ${{ inputs.base_ref }} + push_changes: ${{ inputs.push_changes }} + version_bump_script: hatch version + evergreen_project: mongo-python-driver-release + release_workflow_path: ./.github/workflows/release-python.yml From 260322277d519ade76fe85157e8bc2f18c49dfcf Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 1 Nov 2024 13:20:37 -0400 Subject: [PATCH 1602/2111] PYTHON-4926 - Skip tests with errorCodeName on Serverless (#1989) --- .../unified/commit-retry.json | 5 +++++ .../unified/commit-writeconcernerror.json | 17 ++++++++++++++++- test/transactions/unified/retryable-commit.json | 5 +++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/test/transactions-convenient-api/unified/commit-retry.json b/test/transactions-convenient-api/unified/commit-retry.json index 928f0167e4..cc80201167 100644 --- a/test/transactions-convenient-api/unified/commit-retry.json +++ b/test/transactions-convenient-api/unified/commit-retry.json @@ -422,6 +422,11 @@ }, { "description": "commit is not retried after MaxTimeMSExpired error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/transactions-convenient-api/unified/commit-writeconcernerror.json b/test/transactions-convenient-api/unified/commit-writeconcernerror.json index a6f6e6bd7f..a455a450bf 100644 --- a/test/transactions-convenient-api/unified/commit-writeconcernerror.json +++ b/test/transactions-convenient-api/unified/commit-writeconcernerror.json @@ -1,6 +1,6 @@ { "description": "commit-writeconcernerror", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.0", @@ -414,6 +414,11 @@ }, { "description": "commitTransaction is not retried after UnknownReplWriteConcern error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +551,11 @@ }, { "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", @@ -678,6 +688,11 @@ }, { "description": "commitTransaction is not retried after MaxTimeMSExpired error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/transactions/unified/retryable-commit.json b/test/transactions/unified/retryable-commit.json index b794c1c55c..7d7e52495d 100644 --- a/test/transactions/unified/retryable-commit.json +++ b/test/transactions/unified/retryable-commit.json @@ -89,6 +89,11 @@ "tests": [ { "description": "commitTransaction fails after Interrupted", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "object": "testRunner", From f3343aa952720c1151e73ce38d53d0288095ebd1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 1 Nov 2024 13:20:59 -0400 Subject: [PATCH 1603/2111] =?UTF-8?q?PYTHON-4916=20-=20URI=20options=20spe?= =?UTF-8?q?c=20tests=20specify=20empty=20options=20when=20the=20i=E2=80=A6?= =?UTF-8?q?=20(#1991)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/uri_options/auth-options.json | 5 +- test/uri_options/compression-options.json | 6 +- test/uri_options/concern-options.json | 6 +- test/uri_options/connection-options.json | 38 +++--- test/uri_options/connection-pool-options.json | 8 +- test/uri_options/sdam-options.json | 2 +- test/uri_options/single-threaded-options.json | 2 +- test/uri_options/srv-options.json | 12 +- test/uri_options/tls-options.json | 110 +++++++++--------- 9 files changed, 95 insertions(+), 94 deletions(-) diff --git a/test/uri_options/auth-options.json b/test/uri_options/auth-options.json index fadbac35d2..d7fa14a134 100644 --- a/test/uri_options/auth-options.json +++ b/test/uri_options/auth-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid auth options are parsed correctly (GSSAPI)", - "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authSource=$external", + "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authSource=$external", "valid": true, "warning": false, "hosts": null, @@ -11,7 +11,8 @@ "authMechanism": "GSSAPI", "authMechanismProperties": { "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" }, "authSource": "$external" } diff --git a/test/uri_options/compression-options.json b/test/uri_options/compression-options.json index 16bd27b2cc..3c13dee062 100644 --- a/test/uri_options/compression-options.json +++ b/test/uri_options/compression-options.json @@ -35,7 +35,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low zlibCompressionLevel causes a warning", @@ -44,7 +44,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too high zlibCompressionLevel causes a warning", @@ -53,7 +53,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/concern-options.json b/test/uri_options/concern-options.json index 5a8ef6c272..f55f298087 100644 --- a/test/uri_options/concern-options.json +++ b/test/uri_options/concern-options.json @@ -43,7 +43,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low wTimeoutMS causes a warning", @@ -52,7 +52,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Invalid journal causes a warning", @@ -61,7 +61,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index b2669b6cf1..bbaa295ecb 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -27,7 +27,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low connectTimeoutMS causes a warning", @@ -36,7 +36,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric heartbeatFrequencyMS causes a warning", @@ -45,7 +45,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low heartbeatFrequencyMS causes a warning", @@ -54,7 +54,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric localThresholdMS causes a warning", @@ -63,7 +63,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low localThresholdMS causes a warning", @@ -72,7 +72,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Invalid retryWrites causes a warning", @@ -81,7 +81,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric serverSelectionTimeoutMS causes a warning", @@ -90,7 +90,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low serverSelectionTimeoutMS causes a warning", @@ -99,7 +99,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric socketTimeoutMS causes a warning", @@ -108,7 +108,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low socketTimeoutMS causes a warning", @@ -117,7 +117,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "directConnection=true", @@ -137,7 +137,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "directConnection=false", @@ -168,7 +168,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true", @@ -211,7 +211,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true with multiple hosts causes an error", @@ -220,7 +220,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true with directConnection=true causes an error", @@ -229,7 +229,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true with replicaSet causes an error", @@ -238,7 +238,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "timeoutMS=0", @@ -258,7 +258,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low timeoutMS causes a warning", @@ -267,7 +267,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index 118b2f6783..a582867d07 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -21,7 +21,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low maxIdleTimeMS causes a warning", @@ -30,7 +30,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "maxPoolSize=0 does not error", @@ -61,7 +61,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "maxConnecting<0 causes a warning", @@ -70,7 +70,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/sdam-options.json b/test/uri_options/sdam-options.json index 673f5607ee..ae0aeb2821 100644 --- a/test/uri_options/sdam-options.json +++ b/test/uri_options/sdam-options.json @@ -40,7 +40,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/single-threaded-options.json b/test/uri_options/single-threaded-options.json index fcd24fb880..80ac3fa4ee 100644 --- a/test/uri_options/single-threaded-options.json +++ b/test/uri_options/single-threaded-options.json @@ -18,7 +18,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/srv-options.json b/test/uri_options/srv-options.json index ffc356f12f..0670612c0d 100644 --- a/test/uri_options/srv-options.json +++ b/test/uri_options/srv-options.json @@ -18,7 +18,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with srvMaxHosts", @@ -38,7 +38,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with invalid type for srvMaxHosts", @@ -47,7 +47,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-SRV URI with srvMaxHosts", @@ -56,7 +56,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with positive srvMaxHosts and replicaSet", @@ -65,7 +65,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with positive srvMaxHosts and loadBalanced=true", @@ -74,7 +74,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with positive srvMaxHosts and loadBalanced=false", diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json index 8beaaddd86..526cde1cbe 100644 --- a/test/uri_options/tls-options.json +++ b/test/uri_options/tls-options.json @@ -31,7 +31,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates is parsed correctly", @@ -62,7 +62,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure is parsed correctly", @@ -82,7 +82,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidCertificates both present (and true) raises an error", @@ -91,7 +91,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidCertificates both present (and false) raises an error", @@ -100,7 +100,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsInsecure both present (and true) raises an error", @@ -109,7 +109,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsInsecure both present (and false) raises an error", @@ -118,7 +118,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidHostnames both present (and true) raises an error", @@ -127,7 +127,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidHostnames both present (and false) raises an error", @@ -136,7 +136,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidHostnames and tlsInsecure both present (and true) raises an error", @@ -145,7 +145,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidHostnames and tlsInsecure both present (and false) raises an error", @@ -154,7 +154,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tls=true and ssl=true doesn't warn", @@ -199,7 +199,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tls=true and ssl=false raises error", @@ -208,7 +208,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "ssl=false and tls=true raises error", @@ -217,7 +217,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "ssl=true and tls=false raises error", @@ -226,7 +226,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck can be set to true", @@ -259,7 +259,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", @@ -268,7 +268,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", @@ -277,7 +277,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", @@ -286,7 +286,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", @@ -295,7 +295,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", @@ -304,7 +304,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", @@ -313,7 +313,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", @@ -322,7 +322,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", @@ -331,7 +331,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", @@ -340,7 +340,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", @@ -349,7 +349,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", @@ -358,7 +358,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", @@ -367,7 +367,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", @@ -376,7 +376,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", @@ -385,7 +385,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", @@ -394,7 +394,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", @@ -403,7 +403,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", @@ -412,7 +412,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", @@ -421,7 +421,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", @@ -430,7 +430,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", @@ -439,7 +439,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", @@ -448,7 +448,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", @@ -457,7 +457,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", @@ -466,7 +466,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck can be set to true", @@ -499,7 +499,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=true and tlsDisableOCSPEndpointCheck=false raises an error", @@ -508,7 +508,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=false and tlsDisableOCSPEndpointCheck=true raises an error", @@ -517,7 +517,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and false) raises an error", @@ -526,7 +526,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and true) raises an error", @@ -535,7 +535,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=true and tlsInsecure=false raises an error", @@ -544,7 +544,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=false and tlsInsecure=true raises an error", @@ -553,7 +553,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and false) raises an error", @@ -562,7 +562,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and true) raises an error", @@ -571,7 +571,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=true and tlsDisableOCSPEndpointCheck=false raises an error", @@ -580,7 +580,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=false and tlsDisableOCSPEndpointCheck=true raises an error", @@ -589,7 +589,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and false) raises an error", @@ -598,7 +598,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and true) raises an error", @@ -607,7 +607,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=true and tlsAllowInvalidCertificates=false raises an error", @@ -616,7 +616,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=false and tlsAllowInvalidCertificates=true raises an error", @@ -625,7 +625,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and false) raises an error", @@ -634,7 +634,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null } ] } From c680f6342750593def5cb67d2d4c617ca23e3bbd Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 1 Nov 2024 13:21:07 -0400 Subject: [PATCH 1604/2111] PYTHON-4917 - Test that inserts and upserts respect null _id values (#1992) --- test/crud/unified/create-null-ids.json | 253 +++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100644 test/crud/unified/create-null-ids.json diff --git a/test/crud/unified/create-null-ids.json b/test/crud/unified/create-null-ids.json new file mode 100644 index 0000000000..8e0c3ac5d1 --- /dev/null +++ b/test/crud/unified/create-null-ids.json @@ -0,0 +1,253 @@ +{ + "description": "create-null-ids", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "crud_id" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "type_tests" + } + } + ], + "initialData": [ + { + "collectionName": "type_tests", + "databaseName": "crud_id", + "documents": [] + } + ], + "tests": [ + { + "description": "inserting _id with type null via insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": null + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": null + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": null + }, + "update": { + "$unset": { + "a": "" + } + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": null + }, + "update": { + "$unset": { + "a": "" + } + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via replaceOne", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "_id": null + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via bulkWrite", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": null + } + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via clientBulkWrite", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud_id.type_tests", + "document": { + "_id": null + } + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + } + ] +} From 6862e94d176c6ee7959d3d114aff301774ecf7df Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 1 Nov 2024 13:21:17 -0400 Subject: [PATCH 1605/2111] PYTHON-4923 - Add mixed case tests for read preference tags (#1990) --- test/uri_options/read-preference-options.json | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/uri_options/read-preference-options.json b/test/uri_options/read-preference-options.json index cdac6a63c3..abbf0d0cc6 100644 --- a/test/uri_options/read-preference-options.json +++ b/test/uri_options/read-preference-options.json @@ -36,6 +36,21 @@ ] } }, + { + "description": "Read preference tags are case sensitive", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:NY", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "NY" + } + ] + } + }, { "description": "Invalid readPreferenceTags causes a warning", "uri": "mongodb://example.com/?readPreferenceTags=invalid", @@ -43,7 +58,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric maxStalenessSeconds causes a warning", @@ -52,7 +67,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low maxStalenessSeconds causes a warning", @@ -61,7 +76,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } From a9caaf0d6a7b40234dfb13720684c914dfd3e633 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 4 Nov 2024 09:24:29 -0500 Subject: [PATCH 1606/2111] =?UTF-8?q?PYTHON-4941=20-=20Fix=20Synchronous?= =?UTF-8?q?=20unified=20test=20runner=20being=20used=20in=20async=E2=80=A6?= =?UTF-8?q?=20(#1993)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/asynchronous/test_auth_spec.py | 2 +- test/asynchronous/test_change_stream.py | 2 +- test/asynchronous/test_connection_logging.py | 2 +- test/asynchronous/test_crud_unified.py | 2 +- test/asynchronous/test_encryption.py | 2 +- test/asynchronous/unified_format.py | 2 +- test/unified_format.py | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/asynchronous/test_auth_spec.py b/test/asynchronous/test_auth_spec.py index a6ab1cb331..e9e43d5759 100644 --- a/test/asynchronous/test_auth_spec.py +++ b/test/asynchronous/test_auth_spec.py @@ -25,7 +25,7 @@ sys.path[0:0] = [""] from test import unittest -from test.unified_format import generate_test_classes +from test.asynchronous.unified_format import generate_test_classes from pymongo import AsyncMongoClient from pymongo.asynchronous.auth_oidc import OIDCCallback diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 98641f46ee..8e16fe7528 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -35,7 +35,7 @@ async_client_context, unittest, ) -from test.unified_format import generate_test_classes +from test.asynchronous.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, EventListener, diff --git a/test/asynchronous/test_connection_logging.py b/test/asynchronous/test_connection_logging.py index 6bc9835b70..945c6c59b5 100644 --- a/test/asynchronous/test_connection_logging.py +++ b/test/asynchronous/test_connection_logging.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] from test import unittest -from test.unified_format import generate_test_classes +from test.asynchronous.unified_format import generate_test_classes _IS_SYNC = False diff --git a/test/asynchronous/test_crud_unified.py b/test/asynchronous/test_crud_unified.py index 3d8deb36e9..e6f42d5bdf 100644 --- a/test/asynchronous/test_crud_unified.py +++ b/test/asynchronous/test_crud_unified.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] from test import unittest -from test.unified_format import generate_test_classes +from test.asynchronous.unified_format import generate_test_classes _IS_SYNC = False diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index e42c85aa7a..767b3ecf0a 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -46,6 +46,7 @@ unittest, ) from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.unified_format import generate_test_classes from test.asynchronous.utils_spec_runner import AsyncSpecRunner from test.helpers import ( AWS_CREDS, @@ -56,7 +57,6 @@ KMIP_CREDS, LOCAL_MASTER_KEY, ) -from test.unified_format import generate_test_classes from test.utils import ( AllowListEventListener, OvertCommandListener, diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index b382db474f..f8179dc0c7 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -862,7 +862,7 @@ async def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs return await target.create_data_key(*args, **kwargs) async def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): - return await (await target.get_keys(*args, **kwargs)).to_list() + return await target.get_keys(*args, **kwargs).to_list() async def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): result = await target.delete_key(*args, **kwargs) diff --git a/test/unified_format.py b/test/unified_format.py index 0da6168303..80c37470e3 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -858,7 +858,7 @@ def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): return target.create_data_key(*args, **kwargs) def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): - return (target.get_keys(*args, **kwargs)).to_list() + return target.get_keys(*args, **kwargs).to_list() def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): result = target.delete_key(*args, **kwargs) From 57fd616ace819ac4d8535b7009c2b079b6097d57 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 4 Nov 2024 10:26:07 -0600 Subject: [PATCH 1607/2111] PYTHON-4330 Add Kubernetes Support for OIDC (#1759) --- .evergreen/config.yml | 107 +++++++++++++-------- .evergreen/generated_configs/variants.yml | 9 +- .evergreen/run-mongodb-oidc-remote-test.sh | 60 ++++++++++++ .evergreen/run-mongodb-oidc-test.sh | 3 + .evergreen/scripts/generate_config.py | 8 +- pymongo/auth_oidc_shared.py | 14 +++ pymongo/auth_shared.py | 4 + test/auth/legacy/connection-string.json | 20 ++++ test/auth_oidc/test_auth_oidc.py | 48 ++------- test/unified_format_shared.py | 2 + 10 files changed, 190 insertions(+), 85 deletions(-) create mode 100755 .evergreen/run-mongodb-oidc-remote-test.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index fda6864317..9de7a85e26 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -520,6 +520,18 @@ functions: args: - .evergreen/run-mongodb-oidc-test.sh + "run oidc k8s auth test": + - command: subprocess.exec + type: test + params: + binary: bash + working_dir: src + env: + OIDC_ENV: k8s + include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "K8S_VARIANT"] + args: + - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh + "run aws auth test with aws credentials as environment variables": - command: shell.exec type: test @@ -873,6 +885,32 @@ task_groups: tasks: - oidc-auth-test-gcp + - name: testk8soidc_task_group + setup_group: + - func: fetch source + - func: prepare resources + - func: fix absolute paths + - func: make files executable + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + duration_seconds: 1800 + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/setup.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/teardown.sh + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + tasks: + - oidc-auth-test-k8s + - name: testoidc_task_group setup_group: - func: fetch source @@ -1548,40 +1586,41 @@ tasks: - name: "oidc-auth-test-azure" commands: - - command: shell.exec + - command: subprocess.exec type: test params: - shell: bash - script: |- - set -o errexit - . src/.evergreen/scripts/env.sh - cd src - git add . - git commit -m "add files" - export AZUREOIDC_DRIVERS_TAR_FILE=/tmp/mongo-python-driver.tgz - git archive -o $AZUREOIDC_DRIVERS_TAR_FILE HEAD - export AZUREOIDC_TEST_CMD="OIDC_ENV=azure ./.evergreen/run-mongodb-oidc-test.sh" - bash $DRIVERS_TOOLS/.evergreen/auth_oidc/azure/run-driver-test.sh + binary: bash + working_dir: src + env: + OIDC_ENV: azure + include_expansions_in_env: ["DRIVERS_TOOLS"] + args: + - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh - name: "oidc-auth-test-gcp" commands: - - command: shell.exec + - command: subprocess.exec type: test params: - shell: bash - script: |- - set -o errexit - . src/.evergreen/scripts/env.sh - cd src - git add . - git commit -m "add files" - export GCPOIDC_DRIVERS_TAR_FILE=/tmp/mongo-python-driver.tgz - git archive -o $GCPOIDC_DRIVERS_TAR_FILE HEAD - # Define the command to run on the VM. - # Ensure that we source the environment file created for us, set up any other variables we need, - # and then run our test suite on the vm. - export GCPOIDC_TEST_CMD="OIDC_ENV=gcp ./.evergreen/run-mongodb-oidc-test.sh" - bash $DRIVERS_TOOLS/.evergreen/auth_oidc/gcp/run-driver-test.sh + binary: bash + working_dir: src + env: + OIDC_ENV: gcp + include_expansions_in_env: ["DRIVERS_TOOLS"] + args: + - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh + + - name: "oidc-auth-test-k8s" + commands: + - func: "run oidc k8s auth test" + vars: + K8S_VARIANT: eks + - func: "run oidc k8s auth test" + vars: + K8S_VARIANT: gke + - func: "run oidc k8s auth test" + vars: + K8S_VARIANT: aks # }}} - name: "coverage-report" tags: ["coverage"] @@ -1740,20 +1779,6 @@ buildvariants: tasks: - name: "coverage-report" -- name: testazureoidc-variant - display_name: "OIDC Auth Azure" - run_on: ubuntu2204-small - tasks: - - name: testazureoidc_task_group - batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - -- name: testgcpoidc-variant - display_name: "OIDC Auth GCP" - run_on: ubuntu2204-small - tasks: - - name: testgcpoidc_task_group - batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - - name: testgcpkms-variant display_name: "GCP KMS" run_on: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 0a4e5cfb14..c2a9a70016 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -955,12 +955,15 @@ buildvariants: VERSION: "8.0" # Oidc auth tests - - name: oidc-auth-rhel8 + - name: oidc-auth-ubuntu-22 tasks: - name: testoidc_task_group - display_name: OIDC Auth RHEL8 + - name: testazureoidc_task_group + - name: testgcpoidc_task_group + - name: testk8soidc_task_group + display_name: OIDC Auth Ubuntu-22 run_on: - - rhel87-small + - ubuntu2204-small batchtime: 20160 - name: oidc-auth-macos tasks: diff --git a/.evergreen/run-mongodb-oidc-remote-test.sh b/.evergreen/run-mongodb-oidc-remote-test.sh new file mode 100755 index 0000000000..bb90bddf07 --- /dev/null +++ b/.evergreen/run-mongodb-oidc-remote-test.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +set +x # Disable debug trace +set -eu + +echo "Running MONGODB-OIDC remote tests" + +OIDC_ENV=${OIDC_ENV:-"test"} + +# Make sure DRIVERS_TOOLS is set. +if [ -z "$DRIVERS_TOOLS" ]; then + echo "Must specify DRIVERS_TOOLS" + exit 1 +fi + +# Set up the remote files to test. +git add . +git commit -m "add files" || true +export TEST_TAR_FILE=/tmp/mongo-python-driver.tgz +git archive -o $TEST_TAR_FILE HEAD + +pushd $DRIVERS_TOOLS + +if [ $OIDC_ENV == "test" ]; then + echo "Test OIDC environment does not support remote test!" + exit 1 + +elif [ $OIDC_ENV == "azure" ]; then + export AZUREOIDC_DRIVERS_TAR_FILE=$TEST_TAR_FILE + export AZUREOIDC_TEST_CMD="OIDC_ENV=azure ./.evergreen/run-mongodb-oidc-test.sh" + bash ./.evergreen/auth_oidc/azure/run-driver-test.sh + +elif [ $OIDC_ENV == "gcp" ]; then + export GCPOIDC_DRIVERS_TAR_FILE=$TEST_TAR_FILE + export GCPOIDC_TEST_CMD="OIDC_ENV=gcp ./.evergreen/run-mongodb-oidc-test.sh" + bash ./.evergreen/auth_oidc/gcp/run-driver-test.sh + +elif [ $OIDC_ENV == "k8s" ]; then + # Make sure K8S_VARIANT is set. + if [ -z "$K8S_VARIANT" ]; then + echo "Must specify K8S_VARIANT" + popd + exit 1 + fi + + bash ./.evergreen/auth_oidc/k8s/setup-pod.sh + bash ./.evergreen/auth_oidc/k8s/run-self-test.sh + export K8S_DRIVERS_TAR_FILE=$TEST_TAR_FILE + export K8S_TEST_CMD="OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" + source ./.evergreen/auth_oidc/k8s/secrets-export.sh # for MONGODB_URI + bash ./.evergreen/auth_oidc/k8s/run-driver-test.sh + bash ./.evergreen/auth_oidc/k8s/teardown-pod.sh + +else + echo "Unrecognized OIDC_ENV $OIDC_ENV" + pod + exit 1 +fi + +popd diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 0c34912c8a..22864528c0 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -21,6 +21,9 @@ elif [ $OIDC_ENV == "azure" ]; then elif [ $OIDC_ENV == "gcp" ]; then source ./secrets-export.sh +elif [ $OIDC_ENV == "k8s" ]; then + echo "Running oidc on k8s" + else echo "Unrecognized OIDC_ENV $OIDC_ENV" exit 1 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 9abcc6516a..dfaad0f835 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -615,10 +615,14 @@ def create_serverless_variants(): def create_oidc_auth_variants(): variants = [] - for host in ["rhel8", "macos", "win64"]: + other_tasks = ["testazureoidc_task_group", "testgcpoidc_task_group", "testk8soidc_task_group"] + for host in ["ubuntu22", "macos", "win64"]: + tasks = ["testoidc_task_group"] + if host == "ubuntu22": + tasks += other_tasks variants.append( create_variant( - ["testoidc_task_group"], + tasks, get_display_name("OIDC Auth", host), host=host, batchtime=BATCHTIME_WEEK * 2, diff --git a/pymongo/auth_oidc_shared.py b/pymongo/auth_oidc_shared.py index 5e3603fa31..9e0acaf6c8 100644 --- a/pymongo/auth_oidc_shared.py +++ b/pymongo/auth_oidc_shared.py @@ -116,3 +116,17 @@ def __init__(self, token_resource: str) -> None: def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: resp = _get_gcp_response(self.token_resource, context.timeout_seconds) return OIDCCallbackResult(access_token=resp["access_token"]) + + +class _OIDCK8SCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + return OIDCCallbackResult(access_token=_get_k8s_token()) + + +def _get_k8s_token() -> str: + fname = "/var/run/secrets/kubernetes.io/serviceaccount/token" + for key in ["AZURE_FEDERATED_TOKEN_FILE", "AWS_WEB_IDENTITY_TOKEN_FILE"]: + if key in os.environ: + fname = os.environ[key] + with open(fname) as fid: + return fid.read() diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py index fa25aa3faa..1e1ce7b4d8 100644 --- a/pymongo/auth_shared.py +++ b/pymongo/auth_shared.py @@ -26,6 +26,7 @@ from pymongo.auth_oidc_shared import ( _OIDCAzureCallback, _OIDCGCPCallback, + _OIDCK8SCallback, _OIDCProperties, _OIDCTestCallback, ) @@ -192,6 +193,9 @@ def _build_credentials_tuple( "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" ) callback = _OIDCGCPCallback(token_resource) + elif environ == "k8s": + passwd = None + callback = _OIDCK8SCallback() else: raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") else: diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json index 67aafbff6e..3a099c8137 100644 --- a/test/auth/legacy/connection-string.json +++ b/test/auth/legacy/connection-string.json @@ -626,6 +626,26 @@ "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp", "valid": false, "credential": null + }, + { + "description": "should recognise the mechanism with k8s provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:k8s", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "k8s" + } + } + }, + { + "description": "should throw an error for a username and password with k8s provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:k8s", + "valid": false, + "credential": null } ] } diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 6526391daf..a0127304c1 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -37,6 +37,7 @@ from pymongo import MongoClient from pymongo._azure_helpers import _get_azure_response from pymongo._gcp_helpers import _get_gcp_response +from pymongo.auth_oidc_shared import _get_k8s_token from pymongo.cursor_shared import CursorType from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure from pymongo.hello import HelloCompat @@ -84,6 +85,10 @@ def get_token(self, username=None): opts = parse_uri(self.uri_single)["options"] token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] return _get_gcp_response(token_aud, username)["access_token"] + elif ENVIRON == "k8s": + return _get_k8s_token() + else: + raise ValueError(f"Unknown ENVIRON: {ENVIRON}") @contextmanager def fail_point(self, command_args): @@ -758,7 +763,9 @@ def create_client(self, *args, **kwargs): kwargs["retryReads"] = False if not len(args): args = [self.uri_single] - return MongoClient(*args, authmechanismproperties=props, **kwargs) + client = MongoClient(*args, authmechanismproperties=props, **kwargs) + self.addCleanup(client.close) + return client def test_1_1_callback_is_called_during_reauthentication(self): # Create a ``MongoClient`` configured with a custom OIDC callback that @@ -768,8 +775,6 @@ def test_1_1_callback_is_called_during_reauthentication(self): client.test.test.find_one() # Assert that the callback was called 1 time. self.assertEqual(self.request_called, 1) - # Close the client. - client.close() def test_1_2_callback_is_called_once_for_multiple_connections(self): # Create a ``MongoClient`` configured with a custom OIDC callback that @@ -790,8 +795,6 @@ def target(): thread.join() # Assert that the callback was called 1 time. self.assertEqual(self.request_called, 1) - # Close the client. - client.close() def test_2_1_valid_callback_inputs(self): # Create a MongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. @@ -800,8 +803,6 @@ def test_2_1_valid_callback_inputs(self): client.test.test.find_one() # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. self.assertEqual(self.request_called, 1) - # Close the client. - client.close() def test_2_2_oidc_callback_returns_null(self): # Create a MongoClient configured with an OIDC callback that returns null. @@ -813,8 +814,6 @@ def fetch(self, a): # Perform a find operation that fails. with self.assertRaises(ValueError): client.test.test.find_one() - # Close the client. - client.close() def test_2_3_oidc_callback_returns_missing_data(self): # Create a MongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. @@ -829,8 +828,6 @@ def fetch(self, a): # Perform a find operation that fails. with self.assertRaises(ValueError): client.test.test.find_one() - # Close the client. - client.close() def test_2_4_invalid_client_configuration_with_callback(self): # Create a MongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. @@ -870,8 +867,6 @@ def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_ret client.test.test.find_one() # Verify that the callback was called 1 time. self.assertEqual(self.request_called, 1) - # Close the client. - client.close() def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): # Create a MongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. @@ -889,8 +884,6 @@ def fetch(self, a): client.test.test.find_one() # Verify that the callback was called 1 time. self.assertEqual(callback.count, 1) - # Close the client. - client.close() def test_3_3_unexpected_error_code_does_not_clear_cache(self): # Create a ``MongoClient`` with a human callback that returns a valid token @@ -916,9 +909,6 @@ def test_3_3_unexpected_error_code_does_not_clear_cache(self): # Assert that the callback has been called once. self.assertEqual(self.request_called, 1) - # Close the client. - client.close() - def test_4_1_reauthentication_succeds(self): # Create a ``MongoClient`` configured with a custom OIDC callback that # implements the provider logic. @@ -938,9 +928,6 @@ def test_4_1_reauthentication_succeds(self): # handshake, and again during reauthentication). self.assertEqual(self.request_called, 2) - # Close the client. - client.close() - def test_4_2_read_commands_fail_if_reauthentication_fails(self): # Create a ``MongoClient`` whose OIDC callback returns one good token and then # bad tokens after the first call. @@ -977,9 +964,6 @@ def fetch(self, _): # Verify that the callback was called 2 times. self.assertEqual(callback.count, 2) - # Close the client. - client.close() - def test_4_3_write_commands_fail_if_reauthentication_fails(self): # Create a ``MongoClient`` whose OIDC callback returns one good token and then # bad token after the first call. @@ -1016,12 +1000,9 @@ def fetch(self, _): # Verify that the callback was called 2 times. self.assertEqual(callback.count, 2) - # Close the client. - client.close() - def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): # Create an OIDC configured client that can listen for `SaslStart` commands. - listener = OvertCommandListener() + listener = EventListener() client = self.create_client(event_listeners=[listener]) # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. @@ -1061,9 +1042,6 @@ def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(se # Assert there were `SaslStart` commands executed. assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) - # Close the client. - client.close() - def test_5_1_azure_with_no_username(self): if ENVIRON != "azure": raise unittest.SkipTest("Test is only supported on Azure") @@ -1073,7 +1051,6 @@ def test_5_1_azure_with_no_username(self): props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") client = self.create_client(authMechanismProperties=props) client.test.test.find_one() - client.close() def test_5_2_azure_with_bad_username(self): if ENVIRON != "azure": @@ -1086,7 +1063,6 @@ def test_5_2_azure_with_bad_username(self): client = self.create_client(username="bad", authmechanismproperties=props) with self.assertRaises(ValueError): client.test.test.find_one() - client.close() def test_speculative_auth_success(self): client1 = self.create_client() @@ -1108,10 +1084,6 @@ def test_speculative_auth_success(self): # Perform a find operation. client2.test.test.find_one() - # Close the clients. - client2.close() - client1.close() - def test_reauthentication_succeeds_multiple_connections(self): client1 = self.create_client() client2 = self.create_client() @@ -1151,8 +1123,6 @@ def test_reauthentication_succeeds_multiple_connections(self): client2.test.test.find_one() self.assertEqual(self.request_called, 3) - client1.close() - client2.close() if __name__ == "__main__": diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index f315a77f48..1c87fb3f18 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -137,6 +137,8 @@ "ENVIRONMENT": "gcp", "TOKEN_RESOURCE": os.environ["GCPOIDC_AUDIENCE"], } +elif OIDC_ENV == "k8s": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "k8s"} def with_metaclass(meta, *bases): From 81bef719339888401d6803a1fd0331376495bff7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 11:41:09 -0600 Subject: [PATCH 1608/2111] Bump pyright from 1.1.384 to 1.1.385 (#1954) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jib Co-authored-by: Steven Silvester --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index db0825c2b1..b1f07604dc 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.13.0 -pyright==1.1.384 +pyright==1.1.385 typing_extensions -r ./encryption.txt -r ./ocsp.txt From 91d0d897c0843a7c9939815f3e4628541c49de7b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 4 Nov 2024 11:46:37 -0600 Subject: [PATCH 1609/2111] PYTHON-4943 Clean up EVG Variant Display Names (#1994) --- .evergreen/config.yml | 17 +- .evergreen/generated_configs/variants.yml | 290 ++++++++++++++-------- .evergreen/scripts/generate_config.py | 33 +-- 3 files changed, 216 insertions(+), 124 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 9de7a85e26..fc1713a88e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1766,7 +1766,7 @@ tasks: buildvariants: - name: "no-server" - display_name: "No server test" + display_name: "No server" run_on: - rhel84-small tasks: @@ -1779,31 +1779,26 @@ buildvariants: tasks: - name: "coverage-report" -- name: testgcpkms-variant - display_name: "GCP KMS" +- name: testkms-variant + display_name: "KMS" run_on: - debian11-small tasks: - name: testgcpkms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - testgcpkms-fail-task - -- name: testazurekms-variant - display_name: "Azure KMS" - run_on: debian11-small - tasks: - name: testazurekms_task_group batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - testazurekms-fail-task - name: rhel8-test-lambda - display_name: AWS Lambda handler tests + display_name: FaaS Lambda run_on: rhel87-small tasks: - name: test_aws_lambda_task_group - name: rhel8-import-time - display_name: Import Time Check + display_name: Import Time run_on: rhel87-small tasks: - name: "check-import-time" @@ -1816,7 +1811,7 @@ buildvariants: - name: "backport-pr" - name: "perf-tests" - display_name: "Performance Benchmark Tests" + display_name: "Performance Benchmarks" batchtime: 10080 # 7 days run_on: rhel90-dbx-perf-large tasks: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index c2a9a70016..9ee51899f4 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -106,7 +106,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Aws auth tests - - name: aws-auth-ubuntu-20-py3.9 + - name: auth-aws-ubuntu-20-py3.9 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -115,12 +115,12 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: AWS Auth Ubuntu-20 py3.9 + display_name: Auth AWS Ubuntu-20 py3.9 run_on: - ubuntu2004-small expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: aws-auth-ubuntu-20-py3.13 + - name: auth-aws-ubuntu-20-py3.13 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -129,12 +129,12 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: AWS Auth Ubuntu-20 py3.13 + display_name: Auth AWS Ubuntu-20 py3.13 run_on: - ubuntu2004-small expansions: PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: aws-auth-win64-py3.9 + - name: auth-aws-win64-py3.9 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -143,13 +143,13 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: AWS Auth Win64 py3.9 + display_name: Auth AWS Win64 py3.9 run_on: - windows-64-vsMulti-small expansions: skip_ECS_auth_test: "true" PYTHON_BINARY: C:/python/Python39/python.exe - - name: aws-auth-win64-py3.13 + - name: auth-aws-win64-py3.13 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -158,13 +158,13 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: AWS Auth Win64 py3.13 + display_name: Auth AWS Win64 py3.13 run_on: - windows-64-vsMulti-small expansions: skip_ECS_auth_test: "true" PYTHON_BINARY: C:/python/Python313/python.exe - - name: aws-auth-macos-py3.9 + - name: auth-aws-macos-py3.9 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -173,7 +173,7 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: AWS Auth macOS py3.9 + display_name: Auth AWS macOS py3.9 run_on: - macos-14 expansions: @@ -181,7 +181,7 @@ buildvariants: skip_EC2_auth_test: "true" skip_web_identity_auth_test: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: aws-auth-macos-py3.13 + - name: auth-aws-macos-py3.13 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -190,7 +190,7 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: AWS Auth macOS py3.13 + display_name: Auth AWS macOS py3.13 run_on: - macos-14 expansions: @@ -200,85 +200,85 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 # Compression tests - - name: snappy-compression-rhel8-py3.9-no-c + - name: compression-snappy-rhel8-py3.9-no-c tasks: - name: .standalone .noauth .nossl .sync_async - display_name: snappy compression RHEL8 py3.9 No C + display_name: Compression snappy RHEL8 py3.9 No C run_on: - rhel87-small expansions: COMPRESSORS: snappy NO_EXT: "1" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: snappy-compression-rhel8-py3.10 + - name: compression-snappy-rhel8-py3.10 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: snappy compression RHEL8 py3.10 + display_name: Compression snappy RHEL8 py3.10 run_on: - rhel87-small expansions: COMPRESSORS: snappy PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: zlib-compression-rhel8-py3.11-no-c + - name: compression-zlib-rhel8-py3.11-no-c tasks: - name: .standalone .noauth .nossl .sync_async - display_name: zlib compression RHEL8 py3.11 No C + display_name: Compression zlib RHEL8 py3.11 No C run_on: - rhel87-small expansions: COMPRESSORS: zlib NO_EXT: "1" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: zlib-compression-rhel8-py3.12 + - name: compression-zlib-rhel8-py3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: zlib compression RHEL8 py3.12 + display_name: Compression zlib RHEL8 py3.12 run_on: - rhel87-small expansions: COMPRESSORS: zlib PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: zstd-compression-rhel8-py3.13-no-c + - name: compression-zstd-rhel8-py3.13-no-c tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: zstd compression RHEL8 py3.13 No C + display_name: Compression zstd RHEL8 py3.13 No C run_on: - rhel87-small expansions: COMPRESSORS: zstd NO_EXT: "1" PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: zstd-compression-rhel8-py3.9 + - name: compression-zstd-rhel8-py3.9 tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: zstd compression RHEL8 py3.9 + display_name: Compression zstd RHEL8 py3.9 run_on: - rhel87-small expansions: COMPRESSORS: zstd PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: snappy-compression-rhel8-pypy3.9 + - name: compression-snappy-rhel8-pypy3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: snappy compression RHEL8 pypy3.9 + display_name: Compression snappy RHEL8 pypy3.9 run_on: - rhel87-small expansions: COMPRESSORS: snappy PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: zlib-compression-rhel8-pypy3.10 + - name: compression-zlib-rhel8-pypy3.10 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: zlib compression RHEL8 pypy3.10 + display_name: Compression zlib RHEL8 pypy3.10 run_on: - rhel87-small expansions: COMPRESSORS: zlib PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: zstd-compression-rhel8-pypy3.9 + - name: compression-zstd-rhel8-pypy3.9 tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: zstd compression RHEL8 pypy3.9 + display_name: Compression zstd RHEL8 pypy3.9 run_on: - rhel87-small expansions: @@ -564,64 +564,64 @@ buildvariants: tags: [encryption_tag] # Enterprise auth tests - - name: enterprise-auth-macos-py3.9-auth + - name: auth-enterprise-macos-py3.9-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth macOS py3.9 Auth + display_name: Auth Enterprise macOS py3.9 Auth run_on: - macos-14 expansions: AUTH: auth PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: enterprise-auth-rhel8-py3.10-auth + - name: auth-enterprise-rhel8-py3.10-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 py3.10 Auth + display_name: Auth Enterprise RHEL8 py3.10 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: enterprise-auth-rhel8-py3.11-auth + - name: auth-enterprise-rhel8-py3.11-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 py3.11 Auth + display_name: Auth Enterprise RHEL8 py3.11 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: enterprise-auth-rhel8-py3.12-auth + - name: auth-enterprise-rhel8-py3.12-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 py3.12 Auth + display_name: Auth Enterprise RHEL8 py3.12 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: enterprise-auth-win64-py3.13-auth + - name: auth-enterprise-win64-py3.13-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth Win64 py3.13 Auth + display_name: Auth Enterprise Win64 py3.13 Auth run_on: - windows-64-vsMulti-small expansions: AUTH: auth PYTHON_BINARY: C:/python/Python313/python.exe - - name: enterprise-auth-rhel8-pypy3.9-auth + - name: auth-enterprise-rhel8-pypy3.9-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 pypy3.9 Auth + display_name: Auth Enterprise RHEL8 pypy3.9 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: enterprise-auth-rhel8-pypy3.10-auth + - name: auth-enterprise-rhel8-pypy3.10-auth tasks: - name: test-enterprise-auth - display_name: Enterprise Auth RHEL8 pypy3.10 Auth + display_name: Auth Enterprise RHEL8 pypy3.10 Auth run_on: - rhel87-small expansions: @@ -629,10 +629,10 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Green framework tests - - name: eventlet-rhel8-py3.9 + - name: green-eventlet-rhel8-py3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Eventlet RHEL8 py3.9 + display_name: Green Eventlet RHEL8 py3.9 run_on: - rhel87-small expansions: @@ -640,10 +640,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: gevent-rhel8-py3.9 + - name: green-gevent-rhel8-py3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Gevent RHEL8 py3.9 + display_name: Green Gevent RHEL8 py3.9 run_on: - rhel87-small expansions: @@ -651,10 +651,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: eventlet-rhel8-py3.12 + - name: green-eventlet-rhel8-py3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Eventlet RHEL8 py3.12 + display_name: Green Eventlet RHEL8 py3.12 run_on: - rhel87-small expansions: @@ -662,10 +662,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: gevent-rhel8-py3.12 + - name: green-gevent-rhel8-py3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Gevent RHEL8 py3.12 + display_name: Green Gevent RHEL8 py3.12 run_on: - rhel87-small expansions: @@ -727,10 +727,10 @@ buildvariants: VERSION: latest # Mockupdb tests - - name: mockupdb-tests-rhel8-py3.9 + - name: mockupdb-rhel8-py3.9 tasks: - name: mockupdb - display_name: MockupDB Tests RHEL8 py3.9 + display_name: MockupDB RHEL8 py3.9 run_on: - rhel87-small expansions: @@ -810,10 +810,10 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Ocsp tests - - name: ocsp-test-rhel8-v4.4-py3.9 + - name: ocsp-rhel8-v4.4-py3.9 tasks: - name: .ocsp - display_name: OCSP test RHEL8 v4.4 py3.9 + display_name: OCSP RHEL8 v4.4 py3.9 run_on: - rhel87-small batchtime: 20160 @@ -823,10 +823,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "4.4" - - name: ocsp-test-rhel8-v5.0-py3.10 + - name: ocsp-rhel8-v5.0-py3.10 tasks: - name: .ocsp - display_name: OCSP test RHEL8 v5.0 py3.10 + display_name: OCSP RHEL8 v5.0 py3.10 run_on: - rhel87-small batchtime: 20160 @@ -836,10 +836,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.10/bin/python3 VERSION: "5.0" - - name: ocsp-test-rhel8-v6.0-py3.11 + - name: ocsp-rhel8-v6.0-py3.11 tasks: - name: .ocsp - display_name: OCSP test RHEL8 v6.0 py3.11 + display_name: OCSP RHEL8 v6.0 py3.11 run_on: - rhel87-small batchtime: 20160 @@ -849,10 +849,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.11/bin/python3 VERSION: "6.0" - - name: ocsp-test-rhel8-v7.0-py3.12 + - name: ocsp-rhel8-v7.0-py3.12 tasks: - name: .ocsp - display_name: OCSP test RHEL8 v7.0 py3.12 + display_name: OCSP RHEL8 v7.0 py3.12 run_on: - rhel87-small batchtime: 20160 @@ -862,10 +862,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.12/bin/python3 VERSION: "7.0" - - name: ocsp-test-rhel8-v8.0-py3.13 + - name: ocsp-rhel8-v8.0-py3.13 tasks: - name: .ocsp - display_name: OCSP test RHEL8 v8.0 py3.13 + display_name: OCSP RHEL8 v8.0 py3.13 run_on: - rhel87-small batchtime: 20160 @@ -875,10 +875,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/3.13/bin/python3 VERSION: "8.0" - - name: ocsp-test-rhel8-rapid-pypy3.9 + - name: ocsp-rhel8-rapid-pypy3.9 tasks: - name: .ocsp - display_name: OCSP test RHEL8 rapid pypy3.9 + display_name: OCSP RHEL8 rapid pypy3.9 run_on: - rhel87-small batchtime: 20160 @@ -888,10 +888,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 VERSION: rapid - - name: ocsp-test-rhel8-latest-pypy3.10 + - name: ocsp-rhel8-latest-pypy3.10 tasks: - name: .ocsp - display_name: OCSP test RHEL8 latest pypy3.10 + display_name: OCSP RHEL8 latest pypy3.10 run_on: - rhel87-small batchtime: 20160 @@ -901,10 +901,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 VERSION: latest - - name: ocsp-test-win64-v4.4-py3.9 + - name: ocsp-win64-v4.4-py3.9 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test Win64 v4.4 py3.9 + display_name: OCSP Win64 v4.4 py3.9 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -914,10 +914,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: C:/python/Python39/python.exe VERSION: "4.4" - - name: ocsp-test-win64-v8.0-py3.13 + - name: ocsp-win64-v8.0-py3.13 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test Win64 v8.0 py3.13 + display_name: OCSP Win64 v8.0 py3.13 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -927,10 +927,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: C:/python/Python313/python.exe VERSION: "8.0" - - name: ocsp-test-macos-v4.4-py3.9 + - name: ocsp-macos-v4.4-py3.9 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test macOS v4.4 py3.9 + display_name: OCSP macOS v4.4 py3.9 run_on: - macos-14 batchtime: 20160 @@ -940,10 +940,10 @@ buildvariants: TOPOLOGY: server PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 VERSION: "4.4" - - name: ocsp-test-macos-v8.0-py3.13 + - name: ocsp-macos-v8.0-py3.13 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP test macOS v8.0 py3.13 + display_name: OCSP macOS v8.0 py3.13 run_on: - macos-14 batchtime: 20160 @@ -955,27 +955,27 @@ buildvariants: VERSION: "8.0" # Oidc auth tests - - name: oidc-auth-ubuntu-22 + - name: auth-oidc-ubuntu-22 tasks: - name: testoidc_task_group - name: testazureoidc_task_group - name: testgcpoidc_task_group - name: testk8soidc_task_group - display_name: OIDC Auth Ubuntu-22 + display_name: Auth OIDC Ubuntu-22 run_on: - ubuntu2204-small batchtime: 20160 - - name: oidc-auth-macos + - name: auth-oidc-macos tasks: - name: testoidc_task_group - display_name: OIDC Auth macOS + display_name: Auth OIDC macOS run_on: - macos-14 batchtime: 20160 - - name: oidc-auth-win64 + - name: auth-oidc-win64 tasks: - name: testoidc_task_group - display_name: OIDC Auth Win64 + display_name: Auth OIDC Win64 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -1075,7 +1075,7 @@ buildvariants: - name: .standalone .sync_async - name: .replica_set .sync_async - name: .sharded_cluster .sync_async - display_name: Test RHEL8 py3.9 cov + display_name: "* Test RHEL8 py3.9 cov" run_on: - rhel87-small expansions: @@ -1087,7 +1087,7 @@ buildvariants: - name: .standalone .sync_async - name: .replica_set .sync_async - name: .sharded_cluster .sync_async - display_name: Test RHEL8 py3.13 cov + display_name: "* Test RHEL8 py3.13 cov" run_on: - rhel87-small expansions: @@ -1099,7 +1099,7 @@ buildvariants: - name: .standalone .sync_async - name: .replica_set .sync_async - name: .sharded_cluster .sync_async - display_name: Test RHEL8 pypy3.10 cov + display_name: "* Test RHEL8 pypy3.10 cov" run_on: - rhel87-small expansions: @@ -1111,7 +1111,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Test RHEL8 py3.10 + display_name: "* Test RHEL8 py3.10" run_on: - rhel87-small expansions: @@ -1122,7 +1122,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Test RHEL8 py3.11 + display_name: "* Test RHEL8 py3.11" run_on: - rhel87-small expansions: @@ -1133,7 +1133,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Test RHEL8 py3.12 + display_name: "* Test RHEL8 py3.12" run_on: - rhel87-small expansions: @@ -1144,7 +1144,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Test RHEL8 pypy3.9 + display_name: "* Test RHEL8 pypy3.9" run_on: - rhel87-small expansions: @@ -1155,7 +1155,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: Test macOS py3.9 + display_name: "* Test macOS py3.9" run_on: - macos-14 expansions: @@ -1166,7 +1166,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: Test macOS py3.13 + display_name: "* Test macOS py3.13" run_on: - macos-14 expansions: @@ -1189,7 +1189,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .latest !.sync_async - name: .replica_set .noauth .ssl .latest !.sync_async - name: .standalone .noauth .nossl .latest !.sync_async - display_name: Test macOS Arm64 py3.9 + display_name: "* Test macOS Arm64 py3.9" run_on: - macos-14-arm64 expansions: @@ -1212,7 +1212,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .latest !.sync_async - name: .replica_set .noauth .ssl .latest !.sync_async - name: .standalone .noauth .nossl .latest !.sync_async - display_name: Test macOS Arm64 py3.13 + display_name: "* Test macOS Arm64 py3.13" run_on: - macos-14-arm64 expansions: @@ -1223,7 +1223,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: Test Win64 py3.9 + display_name: "* Test Win64 py3.9" run_on: - windows-64-vsMulti-small expansions: @@ -1234,7 +1234,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: Test Win64 py3.13 + display_name: "* Test Win64 py3.13" run_on: - windows-64-vsMulti-small expansions: @@ -1245,7 +1245,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: Test Win32 py3.9 + display_name: "* Test Win32 py3.9" run_on: - windows-64-vsMulti-small expansions: @@ -1256,7 +1256,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: Test Win32 py3.13 + display_name: "* Test Win32 py3.13" run_on: - windows-64-vsMulti-small expansions: @@ -1288,3 +1288,99 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.13/bin/python3 + + # Stable api tests + - name: stable-api-require-v1-rhel8-py3.9-auth + tasks: + - name: .standalone .5.0 .noauth .nossl .sync_async + - name: .standalone .6.0 .noauth .nossl .sync_async + - name: .standalone .7.0 .noauth .nossl .sync_async + - name: .standalone .8.0 .noauth .nossl .sync_async + - name: .standalone .rapid .noauth .nossl .sync_async + - name: .standalone .latest .noauth .nossl .sync_async + display_name: Stable API require v1 RHEL8 py3.9 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + REQUIRE_API_VERSION: "1" + MONGODB_API_VERSION: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [versionedApi_tag] + - name: stable-api-accept-v2-rhel8-py3.9-auth + tasks: + - name: .standalone .5.0 .noauth .nossl .sync_async + - name: .standalone .6.0 .noauth .nossl .sync_async + - name: .standalone .7.0 .noauth .nossl .sync_async + - name: .standalone .8.0 .noauth .nossl .sync_async + - name: .standalone .rapid .noauth .nossl .sync_async + - name: .standalone .latest .noauth .nossl .sync_async + display_name: Stable API accept v2 RHEL8 py3.9 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + ORCHESTRATION_FILE: versioned-api-testing.json + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [versionedApi_tag] + - name: stable-api-require-v1-rhel8-py3.13-auth + tasks: + - name: .standalone .5.0 .noauth .nossl .sync_async + - name: .standalone .6.0 .noauth .nossl .sync_async + - name: .standalone .7.0 .noauth .nossl .sync_async + - name: .standalone .8.0 .noauth .nossl .sync_async + - name: .standalone .rapid .noauth .nossl .sync_async + - name: .standalone .latest .noauth .nossl .sync_async + display_name: Stable API require v1 RHEL8 py3.13 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + REQUIRE_API_VERSION: "1" + MONGODB_API_VERSION: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [versionedApi_tag] + - name: stable-api-accept-v2-rhel8-py3.13-auth + tasks: + - name: .standalone .5.0 .noauth .nossl .sync_async + - name: .standalone .6.0 .noauth .nossl .sync_async + - name: .standalone .7.0 .noauth .nossl .sync_async + - name: .standalone .8.0 .noauth .nossl .sync_async + - name: .standalone .rapid .noauth .nossl .sync_async + - name: .standalone .latest .noauth .nossl .sync_async + display_name: Stable API accept v2 RHEL8 py3.13 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + ORCHESTRATION_FILE: versioned-api-testing.json + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [versionedApi_tag] + + # Storage engine tests + - name: storage-inmemory-rhel8-py3.9 + tasks: + - name: .standalone .noauth .nossl .4.0 .sync_async + - name: .standalone .noauth .nossl .4.4 .sync_async + - name: .standalone .noauth .nossl .5.0 .sync_async + - name: .standalone .noauth .nossl .6.0 .sync_async + - name: .standalone .noauth .nossl .7.0 .sync_async + - name: .standalone .noauth .nossl .8.0 .sync_async + - name: .standalone .noauth .nossl .rapid .sync_async + - name: .standalone .noauth .nossl .latest .sync_async + display_name: Storage InMemory RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + STORAGE_ENGINE: inmemory + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: storage-mmapv1-rhel8-py3.9 + tasks: + - name: .standalone .4.0 .noauth .nossl .sync_async + - name: .replica_set .4.0 .noauth .nossl .sync_async + display_name: Storage MMAPv1 RHEL8 py3.9 + run_on: + - rhel87-small + expansions: + STORAGE_ENGINE: mmapv1 + PYTHON_BINARY: /opt/python/3.9/bin/python3 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index dfaad0f835..05529ecb25 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -93,7 +93,7 @@ def create_variant( else: host = host or "rhel8" run_on = [HOSTS[host].run_on] - name = display_name.replace(" ", "-").lower() + name = display_name.replace(" ", "-").replace("*-", "").lower() if python: expansions["PYTHON_BINARY"] = get_python_binary(python, host) if version: @@ -201,7 +201,7 @@ def create_ocsp_variants() -> list[BuildVariant]: variants = [] batchtime = BATCHTIME_WEEK * 2 expansions = dict(AUTH="noauth", SSL="ssl", TOPOLOGY="server") - base_display = "OCSP test" + base_display = "OCSP" # OCSP tests on rhel8 with all servers v4.4+ and all python versions. versions = [v for v in ALL_VERSIONS if v != "4.0"] @@ -241,10 +241,11 @@ def create_server_variants() -> list[BuildVariant]: # Run the full matrix on linux with min and max CPython, and latest pypy. host = "rhel8" + # Prefix the display name with an asterisk so it is sorted first. + base_display_name = "* Test" for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: - display_name = f"Test {host}" expansions = dict(COVERAGE="coverage") - display_name = get_display_name("Test", host, python=python, **expansions) + display_name = get_display_name(base_display_name, host, python=python, **expansions) variant = create_variant( [f".{t} .sync_async" for t in TOPOLOGIES], display_name, @@ -258,7 +259,7 @@ def create_server_variants() -> list[BuildVariant]: # Test the rest of the pythons. for python in CPYTHONS[1:-1] + PYPYS[:-1]: display_name = f"Test {host}" - display_name = get_display_name("Test", host, python=python) + display_name = get_display_name(base_display_name, host, python=python) variant = create_variant( [f"{t} .sync_async" for t in SUB_TASKS], display_name, @@ -278,7 +279,7 @@ def create_server_variants() -> list[BuildVariant]: for version in get_versions_from("6.0"): tasks.extend(f"{t} .{version} !.sync_async" for t in SUB_TASKS) expansions = dict(SKIP_CSOT_TESTS="true") - display_name = get_display_name("Test", host, python=python, **expansions) + display_name = get_display_name(base_display_name, host, python=python, **expansions) variant = create_variant( tasks, display_name, @@ -385,7 +386,7 @@ def create_compression_variants(): for ind, (compressor, c_ext) in enumerate(product(["snappy", "zlib", "zstd"], C_EXTS)): expansions = dict(COMPRESSORS=compressor) handle_c_ext(c_ext, expansions) - base_name = f"{compressor} compression" + base_name = f"Compression {compressor}" python = CPYTHONS[ind % len(CPYTHONS)] display_name = get_display_name(base_name, host, python=python, **expansions) variant = create_variant( @@ -401,7 +402,7 @@ def create_compression_variants(): for compressor, python in zip_cycle(["snappy", "zlib", "zstd"], other_pythons): expansions = dict(COMPRESSORS=compressor) handle_c_ext(c_ext, expansions) - base_name = f"{compressor} compression" + base_name = f"Compression {compressor}" display_name = get_display_name(base_name, host, python=python, **expansions) variant = create_variant( task_names[compressor], @@ -427,7 +428,7 @@ def create_enterprise_auth_variants(): host = "win64" else: host = "rhel8" - display_name = get_display_name("Enterprise Auth", host, python=python, **expansions) + display_name = get_display_name("Auth Enterprise", host, python=python, **expansions) variant = create_variant( ["test-enterprise-auth"], display_name, host=host, python=python, expansions=expansions ) @@ -467,7 +468,7 @@ def create_pyopenssl_variants(): return variants -def create_storage_engine_tests(): +def create_storage_engine_variants(): host = "rhel8" engines = ["InMemory", "MMAPv1"] variants = [] @@ -490,7 +491,7 @@ def create_storage_engine_tests(): return variants -def create_versioned_api_tests(): +def create_stable_api_variants(): host = "rhel8" tags = ["versionedApi_tag"] tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0")] @@ -512,7 +513,7 @@ def create_versioned_api_tests(): # requireApiVersion, and don't automatically add apiVersion to # clients created in the test suite. expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" - base_display_name = f"Versioned API {test_type}" + base_display_name = f"Stable API {test_type}" display_name = get_display_name(base_display_name, host, python=python, **expansions) variant = create_variant( tasks, display_name, host=host, python=python, tags=tags, expansions=expansions @@ -528,7 +529,7 @@ def create_green_framework_variants(): host = "rhel8" for python, framework in product([CPYTHONS[0], CPYTHONS[-2]], ["eventlet", "gevent"]): expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") - display_name = get_display_name(f"{framework.capitalize()}", host, python=python) + display_name = get_display_name(f"Green {framework.capitalize()}", host, python=python) variant = create_variant( tasks, display_name, host=host, python=python, expansions=expansions ) @@ -623,7 +624,7 @@ def create_oidc_auth_variants(): variants.append( create_variant( tasks, - get_display_name("OIDC Auth", host), + get_display_name("Auth OIDC", host), host=host, batchtime=BATCHTIME_WEEK * 2, ) @@ -650,7 +651,7 @@ def create_mockupdb_variants(): return [ create_variant( ["mockupdb"], - get_display_name("MockupDB Tests", host, python=python), + get_display_name("MockupDB", host, python=python), python=python, host=host, ) @@ -704,7 +705,7 @@ def create_aws_auth_variants(): expansions["skip_web_identity_auth_test"] = "true" variant = create_variant( tasks, - get_display_name("AWS Auth", host, python=python), + get_display_name("Auth AWS", host, python=python), host=host, python=python, expansions=expansions, From 466d0a188f76d0cc6abe8352795d71d9af09a5d3 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Tue, 5 Nov 2024 10:47:36 -0500 Subject: [PATCH 1610/2111] PYTHON-4903 Adds typing overloading to bson.binary.Binary.from_vector (#1967) --- bson/binary.py | 28 ++++++++++++++++++++-------- test/test_bson.py | 6 ++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index f03173a8ef..6dc5058c2c 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -16,7 +16,7 @@ import struct from dataclasses import dataclass from enum import Enum -from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union +from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union, overload from uuid import UUID """Tools for representing BSON binary data. @@ -195,7 +195,7 @@ class UuidRepresentation: VECTOR_SUBTYPE = 9 -"""**(BETA)** BSON binary subtype for densely packed vector data. +"""BSON binary subtype for densely packed vector data. .. versionadded:: 4.10 """ @@ -207,7 +207,7 @@ class UuidRepresentation: class BinaryVectorDtype(Enum): - """**(BETA)** Datatypes of vector subtype. + """Datatypes of vector subtype. :param FLOAT32: (0x27) Pack list of :class:`float` as float32 :param INT8: (0x03) Pack list of :class:`int` in [-128, 127] as signed int8 @@ -229,7 +229,7 @@ class BinaryVectorDtype(Enum): @dataclass class BinaryVector: - """**(BETA)** Vector of numbers along with metadata for binary interoperability. + """Vector of numbers along with metadata for binary interoperability. .. versionadded:: 4.10 """ @@ -256,7 +256,7 @@ class Binary(bytes): the difference between what should be considered binary data and what should be considered a string when we encode to BSON. - **(BETA)** Subtype 9 provides a space-efficient representation of 1-dimensional vector data. + Subtype 9 provides a space-efficient representation of 1-dimensional vector data. Its data is prepended with two bytes of metadata. The first (dtype) describes its data type, such as float32 or int8. The second (padding) prescribes the number of bits to ignore in the final byte. @@ -278,7 +278,7 @@ class Binary(bytes): Support any bytes-like type that implements the buffer protocol. .. versionchanged:: 4.10 - **(BETA)** Addition of vector subtype. + Addition of vector subtype. """ _type_marker = 5 @@ -397,6 +397,18 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" ) + @classmethod + @overload + def from_vector(cls: Type[Binary], vector: BinaryVector) -> Binary: + ... + + @classmethod + @overload + def from_vector( + cls: Type[Binary], vector: list[int, float], dtype: BinaryVectorDtype, padding: int = 0 + ) -> Binary: + ... + @classmethod def from_vector( cls: Type[Binary], @@ -404,7 +416,7 @@ def from_vector( dtype: Optional[BinaryVectorDtype] = None, padding: Optional[int] = None, ) -> Binary: - """**(BETA)** Create a BSON :class:`~bson.binary.Binary` of Vector subtype. + """Create a BSON :class:`~bson.binary.Binary` of Vector subtype. To interpret the representation of the numbers, a data type must be included. See :class:`~bson.binary.BinaryVectorDtype` for available types and descriptions. @@ -447,7 +459,7 @@ def from_vector( return cls(metadata + data, subtype=VECTOR_SUBTYPE) def as_vector(self) -> BinaryVector: - """**(BETA)** From the Binary, create a list of numbers, along with dtype and padding. + """From the Binary, create a list of numbers, along with dtype and padding. :return: BinaryVector diff --git a/test/test_bson.py b/test/test_bson.py index 5dc1377bcd..b431f700dc 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -802,6 +802,12 @@ def test_vector(self): assert float_binary == Binary.from_vector( BinaryVector(list_vector, BinaryVectorDtype.FLOAT32) ) + # Confirm kwargs cannot be passed when BinaryVector is provided + with self.assertRaises(ValueError): + Binary.from_vector( + BinaryVector(list_vector, BinaryVectorDtype.PACKED_BIT, padding), + dtype=BinaryVectorDtype.PACKED_BIT, + ) # type: ignore[call-overload] def test_unicode_regex(self): """Tests we do not get a segfault for C extension on unicode RegExs. From 0733c4da44d4cd5c2a32c092762e639e14c3dc27 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 5 Nov 2024 12:19:51 -0800 Subject: [PATCH 1611/2111] PYTHON-4925 Fix test bugs in $$matchAsDocument and $$matchAsRoot (#1988) Fixes a bug where the driverConnectionId field was missing from "server heartbeat failed" log messages. Avoids sending "upsert": False since various client.bulkWrite spec tests assume this field is only sent when it's True. --- pymongo/asynchronous/bulk.py | 18 +- pymongo/asynchronous/client_bulk.py | 16 +- pymongo/asynchronous/monitor.py | 5 + pymongo/operations.py | 4 +- pymongo/synchronous/bulk.py | 18 +- pymongo/synchronous/client_bulk.py | 16 +- pymongo/synchronous/monitor.py | 5 + test/asynchronous/unified_format.py | 4 +- .../unified/logging-replicaset.json | 4 + .../unified/logging-sharded.json | 2 + .../unified/logging-standalone.json | 2 + .../runOnRequirement-authMechanism-type.json | 4 +- .../valid-fail/operator-matchAsDocument.json | 205 ++++++++++++++++++ .../valid-fail/operator-matchAsRoot.json | 67 ++++++ ...es-lte-operator.json => operator-lte.json} | 2 +- .../valid-pass/operator-matchAsDocument.json | 124 +++++++++++ .../valid-pass/operator-matchAsRoot.json | 151 +++++++++++++ test/unified_format.py | 4 +- test/unified_format_shared.py | 21 +- test/utils.py | 2 + 20 files changed, 609 insertions(+), 65 deletions(-) create mode 100644 test/unified-test-format/valid-fail/operator-matchAsDocument.json create mode 100644 test/unified-test-format/valid-fail/operator-matchAsRoot.json rename test/unified-test-format/valid-pass/{matches-lte-operator.json => operator-lte.json} (97%) create mode 100644 test/unified-test-format/valid-pass/operator-matchAsDocument.json create mode 100644 test/unified-test-format/valid-pass/operator-matchAsRoot.json diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index e6cfe5b36e..6770d7b34e 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -140,8 +140,8 @@ def add_update( self, selector: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - multi: bool = False, - upsert: bool = False, + multi: bool, + upsert: Optional[bool], collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, @@ -149,9 +149,9 @@ def add_update( ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd: dict[str, Any] = dict( # noqa: C406 - [("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)] - ) + cmd: dict[str, Any] = {"q": selector, "u": update, "multi": multi} + if upsert is not None: + cmd["upsert"] = upsert if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -173,14 +173,16 @@ def add_replace( self, selector: Mapping[str, Any], replacement: Mapping[str, Any], - upsert: bool = False, + upsert: Optional[bool], collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) - cmd = {"q": selector, "u": replacement, "multi": False, "upsert": upsert} + cmd: dict[str, Any] = {"q": selector, "u": replacement} + if upsert is not None: + cmd["upsert"] = upsert if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -200,7 +202,7 @@ def add_delete( hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create a delete document and add it to the list of ops.""" - cmd = {"q": selector, "limit": limit} + cmd: dict[str, Any] = {"q": selector, "limit": limit} if collation is not None: self.uses_collation = True cmd["collation"] = collation diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index a6f7178e47..0dcdaa6c07 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -106,20 +106,13 @@ def __init__( self.bypass_doc_val = bypass_document_validation self.comment = comment self.verbose_results = verbose_results - self.ops: list[tuple[str, Mapping[str, Any]]] = [] self.namespaces: list[str] = [] self.idx_offset: int = 0 self.total_ops: int = 0 - self.executed = False - self.uses_upsert = False self.uses_collation = False self.uses_array_filters = False - self.uses_hint_update = False - self.uses_hint_delete = False - self.uses_sort = False - self.is_retryable = self.client.options.retry_writes self.retrying = False self.started_retryable_write = False @@ -144,7 +137,7 @@ def add_update( namespace: str, selector: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - multi: bool = False, + multi: bool, upsert: Optional[bool] = None, collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, @@ -160,19 +153,16 @@ def add_update( "multi": multi, } if upsert is not None: - self.uses_upsert = True cmd["upsert"] = upsert if array_filters is not None: self.uses_array_filters = True cmd["arrayFilters"] = array_filters if hint is not None: - self.uses_hint_update = True cmd["hint"] = hint if collation is not None: self.uses_collation = True cmd["collation"] = collation if sort is not None: - self.uses_sort = True cmd["sort"] = sort if multi: # A bulk_write containing an update_many is not retryable. @@ -200,16 +190,13 @@ def add_replace( "multi": False, } if upsert is not None: - self.uses_upsert = True cmd["upsert"] = upsert if hint is not None: - self.uses_hint_update = True cmd["hint"] = hint if collation is not None: self.uses_collation = True cmd["collation"] = collation if sort is not None: - self.uses_sort = True cmd["sort"] = sort self.ops.append(("replace", cmd)) self.namespaces.append(namespace) @@ -226,7 +213,6 @@ def add_delete( """Create a delete document and add it to the list of ops.""" cmd = {"delete": -1, "filter": selector, "multi": multi} if hint is not None: - self.uses_hint_delete = True cmd["hint"] = hint if collation is not None: self.uses_collation = True diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index f9e912b084..a4dc9b7f45 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -149,6 +149,7 @@ def __init__( self._listeners = self._settings._pool_options._event_listeners self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat self._cancel_context: Optional[_CancellationContext] = None + self._conn_id: Optional[int] = None self._rtt_monitor = _RttMonitor( topology, topology_settings, @@ -243,6 +244,7 @@ async def _check_server(self) -> ServerDescription: Returns a ServerDescription. """ + self._conn_id = None start = time.monotonic() try: try: @@ -272,6 +274,7 @@ async def _check_server(self) -> ServerDescription: awaited=awaited, durationMS=duration * 1000, failure=error, + driverConnectionId=self._conn_id, message=_SDAMStatusMessage.HEARTBEAT_FAIL, ) await self._reset_connection() @@ -314,6 +317,8 @@ async def _check_once(self) -> ServerDescription: ) self._cancel_context = conn.cancel_context + # Record the connection id so we can later attach it to the failed log message. + self._conn_id = conn.id response, round_trip_time = await self._check_with_socket(conn) if not response.awaitable: self._rtt_monitor.add_sample(round_trip_time) diff --git a/pymongo/operations.py b/pymongo/operations.py index 8905048c4e..482ab68003 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -332,7 +332,7 @@ def __init__( self, filter: Mapping[str, Any], replacement: Union[_DocumentType, RawBSONDocument], - upsert: bool = False, + upsert: Optional[bool] = None, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, namespace: Optional[str] = None, @@ -693,7 +693,7 @@ def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: self._filter, self._doc, True, - bool(self._upsert), + self._upsert, collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index 7fb29a977f..0b709f1acf 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -140,8 +140,8 @@ def add_update( self, selector: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - multi: bool = False, - upsert: bool = False, + multi: bool, + upsert: Optional[bool], collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, @@ -149,9 +149,9 @@ def add_update( ) -> None: """Create an update document and add it to the list of ops.""" validate_ok_for_update(update) - cmd: dict[str, Any] = dict( # noqa: C406 - [("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)] - ) + cmd: dict[str, Any] = {"q": selector, "u": update, "multi": multi} + if upsert is not None: + cmd["upsert"] = upsert if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -173,14 +173,16 @@ def add_replace( self, selector: Mapping[str, Any], replacement: Mapping[str, Any], - upsert: bool = False, + upsert: Optional[bool], collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) - cmd = {"q": selector, "u": replacement, "multi": False, "upsert": upsert} + cmd: dict[str, Any] = {"q": selector, "u": replacement} + if upsert is not None: + cmd["upsert"] = upsert if collation is not None: self.uses_collation = True cmd["collation"] = collation @@ -200,7 +202,7 @@ def add_delete( hint: Union[str, dict[str, Any], None] = None, ) -> None: """Create a delete document and add it to the list of ops.""" - cmd = {"q": selector, "limit": limit} + cmd: dict[str, Any] = {"q": selector, "limit": limit} if collation is not None: self.uses_collation = True cmd["collation"] = collation diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 6cb4275417..625e8429eb 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -106,20 +106,13 @@ def __init__( self.bypass_doc_val = bypass_document_validation self.comment = comment self.verbose_results = verbose_results - self.ops: list[tuple[str, Mapping[str, Any]]] = [] self.namespaces: list[str] = [] self.idx_offset: int = 0 self.total_ops: int = 0 - self.executed = False - self.uses_upsert = False self.uses_collation = False self.uses_array_filters = False - self.uses_hint_update = False - self.uses_hint_delete = False - self.uses_sort = False - self.is_retryable = self.client.options.retry_writes self.retrying = False self.started_retryable_write = False @@ -144,7 +137,7 @@ def add_update( namespace: str, selector: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - multi: bool = False, + multi: bool, upsert: Optional[bool] = None, collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, @@ -160,19 +153,16 @@ def add_update( "multi": multi, } if upsert is not None: - self.uses_upsert = True cmd["upsert"] = upsert if array_filters is not None: self.uses_array_filters = True cmd["arrayFilters"] = array_filters if hint is not None: - self.uses_hint_update = True cmd["hint"] = hint if collation is not None: self.uses_collation = True cmd["collation"] = collation if sort is not None: - self.uses_sort = True cmd["sort"] = sort if multi: # A bulk_write containing an update_many is not retryable. @@ -200,16 +190,13 @@ def add_replace( "multi": False, } if upsert is not None: - self.uses_upsert = True cmd["upsert"] = upsert if hint is not None: - self.uses_hint_update = True cmd["hint"] = hint if collation is not None: self.uses_collation = True cmd["collation"] = collation if sort is not None: - self.uses_sort = True cmd["sort"] = sort self.ops.append(("replace", cmd)) self.namespaces.append(namespace) @@ -226,7 +213,6 @@ def add_delete( """Create a delete document and add it to the list of ops.""" cmd = {"delete": -1, "filter": selector, "multi": multi} if hint is not None: - self.uses_hint_delete = True cmd["hint"] = hint if collation is not None: self.uses_collation = True diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index 3f9bb2ea75..d02ad0a6fd 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -149,6 +149,7 @@ def __init__( self._listeners = self._settings._pool_options._event_listeners self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat self._cancel_context: Optional[_CancellationContext] = None + self._conn_id: Optional[int] = None self._rtt_monitor = _RttMonitor( topology, topology_settings, @@ -243,6 +244,7 @@ def _check_server(self) -> ServerDescription: Returns a ServerDescription. """ + self._conn_id = None start = time.monotonic() try: try: @@ -272,6 +274,7 @@ def _check_server(self) -> ServerDescription: awaited=awaited, durationMS=duration * 1000, failure=error, + driverConnectionId=self._conn_id, message=_SDAMStatusMessage.HEARTBEAT_FAIL, ) self._reset_connection() @@ -314,6 +317,8 @@ def _check_once(self) -> ServerDescription: ) self._cancel_context = conn.cancel_context + # Record the connection id so we can later attach it to the failed log message. + self._conn_id = conn.id response, round_trip_time = self._check_with_socket(conn) if not response.awaitable: self._rtt_monitor.add_sample(round_trip_time) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index f8179dc0c7..81feed4d4c 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -1328,8 +1328,8 @@ def format_logs(log_list): if log.module == "ocsp_support": continue data = json_util.loads(log.getMessage()) - client = data.pop("clientId") if "clientId" in data else data.pop("topologyId") - client_to_log[client].append( + client_id = data.get("clientId", data.get("topologyId")) + client_to_log[client_id].append( { "level": log.levelname.lower(), "component": log.name.replace("pymongo.", "", 1), diff --git a/test/discovery_and_monitoring/unified/logging-replicaset.json b/test/discovery_and_monitoring/unified/logging-replicaset.json index e6738225cd..fe6ac60b68 100644 --- a/test/discovery_and_monitoring/unified/logging-replicaset.json +++ b/test/discovery_and_monitoring/unified/logging-replicaset.json @@ -357,6 +357,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] @@ -398,6 +399,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] @@ -439,6 +441,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] @@ -589,6 +592,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] diff --git a/test/discovery_and_monitoring/unified/logging-sharded.json b/test/discovery_and_monitoring/unified/logging-sharded.json index 61b27f5be0..3788708ab0 100644 --- a/test/discovery_and_monitoring/unified/logging-sharded.json +++ b/test/discovery_and_monitoring/unified/logging-sharded.json @@ -324,6 +324,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] @@ -475,6 +476,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] diff --git a/test/discovery_and_monitoring/unified/logging-standalone.json b/test/discovery_and_monitoring/unified/logging-standalone.json index 1ee6dbe899..0682a1a4fb 100644 --- a/test/discovery_and_monitoring/unified/logging-standalone.json +++ b/test/discovery_and_monitoring/unified/logging-standalone.json @@ -339,6 +339,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] @@ -500,6 +501,7 @@ }, "durationMS": { "$$type": [ + "double", "int", "long" ] diff --git a/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json index b97654a743..007f3f304c 100644 --- a/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json +++ b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json @@ -9,9 +9,7 @@ "tests": [ { "description": "foo", - "operations": [ - - ] + "operations": [] } ] } diff --git a/test/unified-test-format/valid-fail/operator-matchAsDocument.json b/test/unified-test-format/valid-fail/operator-matchAsDocument.json new file mode 100644 index 0000000000..24f6be9cb8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operator-matchAsDocument.json @@ -0,0 +1,205 @@ +{ + "description": "operator-matchAsDocument", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "json": "{ \"x\": 1, \"y\": 2 }" + }, + { + "_id": 2, + "json": "1" + }, + { + "_id": 3, + "json": "[ \"foo\" ]" + }, + { + "_id": 4, + "json": "{ \"x\" }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsDocument with non-matching filter", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": "two" + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument does not permit extra fields", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1 + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument expects JSON object but given scalar", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument expects JSON object but given array", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 3 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 3, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument fails to decode Extended JSON", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 4 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 4, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operator-matchAsRoot.json b/test/unified-test-format/valid-fail/operator-matchAsRoot.json new file mode 100644 index 0000000000..ec6309418c --- /dev/null +++ b/test/unified-test-format/valid-fail/operator-matchAsRoot.json @@ -0,0 +1,67 @@ +{ + "description": "operator-matchAsRoot", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": { + "y": 2, + "z": 3 + } + } + ] + } + ], + "tests": [ + { + "description": "matchAsRoot with nested document does not match", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 3 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/matches-lte-operator.json b/test/unified-test-format/valid-pass/operator-lte.json similarity index 97% rename from test/unified-test-format/valid-pass/matches-lte-operator.json rename to test/unified-test-format/valid-pass/operator-lte.json index 4de65c5838..4a13b16d15 100644 --- a/test/unified-test-format/valid-pass/matches-lte-operator.json +++ b/test/unified-test-format/valid-pass/operator-lte.json @@ -1,5 +1,5 @@ { - "description": "matches-lte-operator", + "description": "operator-lte", "schemaVersion": "1.9", "createEntities": [ { diff --git a/test/unified-test-format/valid-pass/operator-matchAsDocument.json b/test/unified-test-format/valid-pass/operator-matchAsDocument.json new file mode 100644 index 0000000000..fd8b514d4a --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-matchAsDocument.json @@ -0,0 +1,124 @@ +{ + "description": "operator-matchAsDocument", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "json": "{ \"x\": 1, \"y\": 2.0 }" + }, + { + "_id": 2, + "json": "{ \"x\": { \"$oid\": \"57e193d7a9cc81b4027498b5\" } }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsDocument performs flexible numeric comparisons", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument decodes Extended JSON", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "x": { + "$$type": "objectId" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-matchAsRoot.json b/test/unified-test-format/valid-pass/operator-matchAsRoot.json new file mode 100644 index 0000000000..1966e3b377 --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-matchAsRoot.json @@ -0,0 +1,151 @@ +{ + "description": "operator-matchAsRoot", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": { + "y": 2, + "z": 3 + } + }, + { + "_id": 2, + "json": "{ \"x\": 1, \"y\": 2 }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsRoot with nested document", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot performs flexible numeric comparisons", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2, + "z": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot with matchAsDocument", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 80c37470e3..395d40b2d1 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1314,8 +1314,8 @@ def format_logs(log_list): if log.module == "ocsp_support": continue data = json_util.loads(log.getMessage()) - client = data.pop("clientId") if "clientId" in data else data.pop("topologyId") - client_to_log[client].append( + client_id = data.get("clientId", data.get("topologyId")) + client_to_log[client_id].append( { "level": log.levelname.lower(), "component": log.name.replace("pymongo.", "", 1), diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 1c87fb3f18..0c685366f4 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -433,10 +433,12 @@ def _operation_lte(self, spec, actual, key_to_compare): self.test.assertLessEqual(actual[key_to_compare], spec) def _operation_matchAsDocument(self, spec, actual, key_to_compare): - self._match_document(spec, json_util.loads(actual[key_to_compare]), False) + self._match_document(spec, json_util.loads(actual[key_to_compare]), False, test=True) def _operation_matchAsRoot(self, spec, actual, key_to_compare): - self._match_document(spec, actual, True) + if key_to_compare: + actual = actual[key_to_compare] + self._match_document(spec, actual, True, test=True) def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): method_name = "_operation_{}".format(opname.strip("$")) @@ -489,7 +491,7 @@ def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=Non def _match_document(self, expectation, actual, is_root, test=False): if self._evaluate_if_special_operation(expectation, actual): - return + return True self.test.assertIsInstance(actual, abc.Mapping) for key, value in expectation.items(): @@ -521,25 +523,26 @@ def match_result(self, expectation, actual, in_recursive_call=False, test=True): self.test.assertIsInstance(actual, abc.MutableSequence) for e, a in zip(expectation, actual): if isinstance(e, abc.Mapping): - self._match_document(e, a, is_root=not in_recursive_call, test=test) + res = self._match_document(e, a, is_root=not in_recursive_call, test=test) else: - self.match_result(e, a, in_recursive_call=True, test=test) - return None + res = self.match_result(e, a, in_recursive_call=True, test=test) + if not res: + return False + return True # account for flexible numerics in element-wise comparison - if isinstance(expectation, int) or isinstance(expectation, float): + if isinstance(expectation, (int, float)): if test: self.test.assertEqual(expectation, actual) else: return expectation == actual - return None else: if test: self.test.assertIsInstance(actual, type(expectation)) self.test.assertEqual(expectation, actual) else: return isinstance(actual, type(expectation)) and expectation == actual - return None + return True def match_server_description(self, actual: ServerDescription, spec: dict) -> None: for field, expected in spec.items(): diff --git a/test/utils.py b/test/utils.py index 3eac4fa509..766f209de2 100644 --- a/test/utils.py +++ b/test/utils.py @@ -20,6 +20,7 @@ import copy import functools import os +import random import re import shutil import sys @@ -309,6 +310,7 @@ class MockConnection: def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False + self.id = random.randint(0, 100) def close_conn(self, reason): pass From 41527f06bb166f81e0e10608a2b5c4a1d6446a46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 06:38:46 -0600 Subject: [PATCH 1612/2111] Bump pyright from 1.1.385 to 1.1.388 (#1999) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index b1f07604dc..ad799ea368 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.13.0 -pyright==1.1.385 +pyright==1.1.388 typing_extensions -r ./encryption.txt -r ./ocsp.txt From 6a8a8052171f2a5cefb22c3dbdc116d564cdf5ba Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Nov 2024 09:33:29 -0600 Subject: [PATCH 1613/2111] PYTHON-4845 Ensure ALLOWED_HOSTS is optional for Workload Usage (#1998) --- pymongo/asynchronous/auth_oidc.py | 2 +- pymongo/auth_shared.py | 8 ++++--- pymongo/common.py | 6 +++-- pymongo/synchronous/auth_oidc.py | 2 +- test/auth_oidc/test_auth_oidc.py | 38 +++++++++++++++++++++++++++---- 5 files changed, 44 insertions(+), 12 deletions(-) diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py index f5801b85d4..f1c15045de 100644 --- a/pymongo/asynchronous/auth_oidc.py +++ b/pymongo/asynchronous/auth_oidc.py @@ -55,7 +55,7 @@ def _get_authenticator( properties = credentials.mechanism_properties # Validate that the address is allowed. - if not properties.environment: + if properties.human_callback is not None: found = False allowed_hosts = properties.allowed_hosts for patt in allowed_hosts: diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py index 1e1ce7b4d8..9534bd74ad 100644 --- a/pymongo/auth_shared.py +++ b/pymongo/auth_shared.py @@ -100,8 +100,8 @@ def _validate_canonicalize_host_name(value: str | bool) -> str | bool: def _build_credentials_tuple( mech: str, source: Optional[str], - user: str, - passwd: str, + user: Optional[str], + passwd: Optional[str], extra: Mapping[str, Any], database: Optional[str], ) -> MongoCredential: @@ -161,6 +161,8 @@ def _build_credentials_tuple( "::1", ] allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) + if properties.get("ALLOWED_HOSTS", None) is not None and human_callback is None: + raise ConfigurationError("ALLOWED_HOSTS is only valid with OIDC_HUMAN_CALLBACK") msg = ( "authentication with MONGODB-OIDC requires providing either a callback or a environment" ) @@ -207,7 +209,7 @@ def _build_credentials_tuple( environment=environ, allowed_hosts=allowed_hosts, token_resource=token_resource, - username=user, + username=user or "", ) return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) diff --git a/pymongo/common.py b/pymongo/common.py index d4601a0eb5..5661de011c 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -873,8 +873,10 @@ def get_setter_key(x: str) -> str: validator = _get_validator(opt, URI_OPTIONS_VALIDATOR_MAP, normed_key=normed_key) validated = validator(opt, value) except (ValueError, TypeError, ConfigurationError) as exc: - if normed_key == "authmechanismproperties" and any( - p in str(exc) for p in _MECH_PROP_MUST_RAISE + if ( + normed_key == "authmechanismproperties" + and any(p in str(exc) for p in _MECH_PROP_MUST_RAISE) + and "is not a supported auth mechanism property" not in str(exc) ): raise if warn: diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py index 6381a408ab..5a8967d96b 100644 --- a/pymongo/synchronous/auth_oidc.py +++ b/pymongo/synchronous/auth_oidc.py @@ -55,7 +55,7 @@ def _get_authenticator( properties = credentials.mechanism_properties # Validate that the address is allowed. - if not properties.environment: + if properties.human_callback is not None: found = False allowed_hosts = properties.allowed_hosts for patt in allowed_hosts: diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index a0127304c1..7a78f3d2f6 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -38,11 +38,17 @@ from pymongo._azure_helpers import _get_azure_response from pymongo._gcp_helpers import _get_gcp_response from pymongo.auth_oidc_shared import _get_k8s_token +from pymongo.auth_shared import _build_credentials_tuple from pymongo.cursor_shared import CursorType from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.operations import InsertOne -from pymongo.synchronous.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult +from pymongo.synchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + _get_authenticator, +) from pymongo.uri_parser import parse_uri ROOT = Path(__file__).parent.parent.resolve() @@ -103,7 +109,6 @@ def fail_point(self, command_args): client.close() -@pytest.mark.auth_oidc class TestAuthOIDCHuman(OIDCTestBase): uri: str @@ -838,12 +843,35 @@ def test_2_4_invalid_client_configuration_with_callback(self): self.create_client(authmechanismproperties=props) def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): - # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "azure", "ALLOWED_HOSTS": []}`. - props: Dict = {"ENVIRONMENT": "azure", "ALLOWED_HOSTS": []} + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "test", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "test", "ALLOWED_HOSTS": []} # Assert it returns a client configuration error. with self.assertRaises(ConfigurationError): self.create_client(authmechanismproperties=props) + # Create an OIDC configured client with auth mechanism properties `{"OIDC_CALLBACK": "", "ALLOWED_HOSTS": []}`. + props: Dict = {"OIDC_CALLBACK": self.create_request_cb(), "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): + # Create a MongoCredential for OIDC with a machine callback. + props = {"OIDC_CALLBACK": self.create_request_cb()} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, "foo", None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "foo" + + # Create a MongoCredential for OIDC with an ENVIRONMENT. + props = {"ENVIRONMENT": "test"} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, None, None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "" + def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): # Create a MongoClient and an OIDC callback that implements the provider logic. client = self.create_client() @@ -909,7 +937,7 @@ def test_3_3_unexpected_error_code_does_not_clear_cache(self): # Assert that the callback has been called once. self.assertEqual(self.request_called, 1) - def test_4_1_reauthentication_succeds(self): + def test_4_1_reauthentication_succeeds(self): # Create a ``MongoClient`` configured with a custom OIDC callback that # implements the provider logic. client = self.create_client() From 5b00a3d48a9052b00680b71124ee4ef82358fef7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Nov 2024 09:34:05 -0600 Subject: [PATCH 1614/2111] PYTHON-4956 Generated config cleanup (#2000) --- .evergreen/generated_configs/variants.yml | 454 +++++++++++----------- .evergreen/scripts/configure-env.sh | 5 + .evergreen/scripts/generate_config.py | 188 +++++---- 3 files changed, 336 insertions(+), 311 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 9ee51899f4..928347f567 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1,9 +1,9 @@ buildvariants: # Alternative hosts tests - - name: openssl-1.0.2-rhel7-py3.9 + - name: openssl-1.0.2-rhel7-python3.9 tasks: - name: .5.0 .standalone !.sync_async - display_name: OpenSSL 1.0.2 RHEL7 py3.9 + display_name: OpenSSL 1.0.2 RHEL7 Python3.9 run_on: - rhel79-small batchtime: 10080 @@ -48,57 +48,57 @@ buildvariants: SKIP_HATCH: "true" # Atlas connect tests - - name: atlas-connect-rhel8-py3.9 + - name: atlas-connect-rhel8-python3.9 tasks: - name: atlas-connect - display_name: Atlas connect RHEL8 py3.9 + display_name: Atlas connect RHEL8 Python3.9 run_on: - rhel87-small expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-connect-rhel8-py3.13 + - name: atlas-connect-rhel8-python3.13 tasks: - name: atlas-connect - display_name: Atlas connect RHEL8 py3.13 + display_name: Atlas connect RHEL8 Python3.13 run_on: - rhel87-small expansions: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Atlas data lake tests - - name: atlas-data-lake-ubuntu-22-py3.9-auth-no-c + - name: atlas-data-lake-ubuntu-22-python3.9-auth-no-c tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 py3.9 Auth No C + display_name: Atlas Data Lake Ubuntu-22 Python3.9 Auth No C run_on: - ubuntu2204-small expansions: AUTH: auth NO_EXT: "1" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-ubuntu-22-py3.9-auth + - name: atlas-data-lake-ubuntu-22-python3.9-auth tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 py3.9 Auth + display_name: Atlas Data Lake Ubuntu-22 Python3.9 Auth run_on: - ubuntu2204-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-ubuntu-22-py3.13-auth-no-c + - name: atlas-data-lake-ubuntu-22-python3.13-auth-no-c tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 py3.13 Auth No C + display_name: Atlas Data Lake Ubuntu-22 Python3.13 Auth No C run_on: - ubuntu2204-small expansions: AUTH: auth NO_EXT: "1" PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: atlas-data-lake-ubuntu-22-py3.13-auth + - name: atlas-data-lake-ubuntu-22-python3.13-auth tasks: - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 py3.13 Auth + display_name: Atlas Data Lake Ubuntu-22 Python3.13 Auth run_on: - ubuntu2204-small expansions: @@ -106,7 +106,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Aws auth tests - - name: auth-aws-ubuntu-20-py3.9 + - name: auth-aws-ubuntu-20-python3.9 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -115,12 +115,12 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: Auth AWS Ubuntu-20 py3.9 + display_name: Auth AWS Ubuntu-20 Python3.9 run_on: - ubuntu2004-small expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: auth-aws-ubuntu-20-py3.13 + - name: auth-aws-ubuntu-20-python3.13 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -129,12 +129,12 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: Auth AWS Ubuntu-20 py3.13 + display_name: Auth AWS Ubuntu-20 Python3.13 run_on: - ubuntu2004-small expansions: PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: auth-aws-win64-py3.9 + - name: auth-aws-win64-python3.9 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -143,13 +143,13 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: Auth AWS Win64 py3.9 + display_name: Auth AWS Win64 Python3.9 run_on: - windows-64-vsMulti-small expansions: skip_ECS_auth_test: "true" PYTHON_BINARY: C:/python/Python39/python.exe - - name: auth-aws-win64-py3.13 + - name: auth-aws-win64-python3.13 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -158,13 +158,13 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: Auth AWS Win64 py3.13 + display_name: Auth AWS Win64 Python3.13 run_on: - windows-64-vsMulti-small expansions: skip_ECS_auth_test: "true" PYTHON_BINARY: C:/python/Python313/python.exe - - name: auth-aws-macos-py3.9 + - name: auth-aws-macos-python3.9 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -173,7 +173,7 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: Auth AWS macOS py3.9 + display_name: Auth AWS macOS Python3.9 run_on: - macos-14 expansions: @@ -181,7 +181,7 @@ buildvariants: skip_EC2_auth_test: "true" skip_web_identity_auth_test: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: auth-aws-macos-py3.13 + - name: auth-aws-macos-python3.13 tasks: - name: aws-auth-test-4.4 - name: aws-auth-test-5.0 @@ -190,7 +190,7 @@ buildvariants: - name: aws-auth-test-8.0 - name: aws-auth-test-rapid - name: aws-auth-test-latest - display_name: Auth AWS macOS py3.13 + display_name: Auth AWS macOS Python3.13 run_on: - macos-14 expansions: @@ -200,58 +200,58 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 # Compression tests - - name: compression-snappy-rhel8-py3.9-no-c + - name: compression-snappy-rhel8-python3.9-no-c tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 py3.9 No C + display_name: Compression snappy RHEL8 Python3.9 No C run_on: - rhel87-small expansions: COMPRESSORS: snappy NO_EXT: "1" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: compression-snappy-rhel8-py3.10 + - name: compression-snappy-rhel8-python3.10 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 py3.10 + display_name: Compression snappy RHEL8 Python3.10 run_on: - rhel87-small expansions: COMPRESSORS: snappy PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: compression-zlib-rhel8-py3.11-no-c + - name: compression-zlib-rhel8-python3.11-no-c tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression zlib RHEL8 py3.11 No C + display_name: Compression zlib RHEL8 Python3.11 No C run_on: - rhel87-small expansions: COMPRESSORS: zlib NO_EXT: "1" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: compression-zlib-rhel8-py3.12 + - name: compression-zlib-rhel8-python3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression zlib RHEL8 py3.12 + display_name: Compression zlib RHEL8 Python3.12 run_on: - rhel87-small expansions: COMPRESSORS: zlib PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: compression-zstd-rhel8-py3.13-no-c + - name: compression-zstd-rhel8-python3.13-no-c tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 py3.13 No C + display_name: Compression zstd RHEL8 Python3.13 No C run_on: - rhel87-small expansions: COMPRESSORS: zstd NO_EXT: "1" PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: compression-zstd-rhel8-py3.9 + - name: compression-zstd-rhel8-python3.9 tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 py3.9 + display_name: Compression zstd RHEL8 Python3.9 run_on: - rhel87-small expansions: @@ -260,7 +260,7 @@ buildvariants: - name: compression-snappy-rhel8-pypy3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 pypy3.9 + display_name: Compression snappy RHEL8 PyPy3.9 run_on: - rhel87-small expansions: @@ -269,7 +269,7 @@ buildvariants: - name: compression-zlib-rhel8-pypy3.10 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression zlib RHEL8 pypy3.10 + display_name: Compression zlib RHEL8 PyPy3.10 run_on: - rhel87-small expansions: @@ -278,7 +278,7 @@ buildvariants: - name: compression-zstd-rhel8-pypy3.9 tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 pypy3.9 + display_name: Compression zstd RHEL8 PyPy3.9 run_on: - rhel87-small expansions: @@ -286,10 +286,10 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 # Disable test commands tests - - name: disable-test-commands-rhel8-py3.9 + - name: disable-test-commands-rhel8-python3.9 tasks: - name: .latest .sync_async - display_name: Disable test commands RHEL8 py3.9 + display_name: Disable test commands RHEL8 Python3.9 run_on: - rhel87-small expansions: @@ -299,22 +299,22 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Doctests tests - - name: doctests-rhel8-py3.9 + - name: doctests-rhel8-python3.9 tasks: - name: doctests - display_name: Doctests RHEL8 py3.9 + display_name: Doctests RHEL8 Python3.9 run_on: - rhel87-small expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Encryption tests - - name: encryption-rhel8-py3.9 + - name: encryption-rhel8-python3.9 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 py3.9 + display_name: Encryption RHEL8 Python3.9 run_on: - rhel87-small batchtime: 10080 @@ -322,12 +322,12 @@ buildvariants: test_encryption: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-rhel8-py3.13 + - name: encryption-rhel8-python3.13 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 py3.13 + display_name: Encryption RHEL8 Python3.13 run_on: - rhel87-small batchtime: 10080 @@ -340,7 +340,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 pypy3.10 + display_name: Encryption RHEL8 PyPy3.10 run_on: - rhel87-small batchtime: 10080 @@ -348,12 +348,12 @@ buildvariants: test_encryption: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-py3.9 + - name: encryption-crypt_shared-rhel8-python3.9 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption crypt_shared RHEL8 py3.9 + display_name: Encryption crypt_shared RHEL8 Python3.9 run_on: - rhel87-small batchtime: 10080 @@ -362,12 +362,12 @@ buildvariants: test_crypt_shared: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-py3.13 + - name: encryption-crypt_shared-rhel8-python3.13 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption crypt_shared RHEL8 py3.13 + display_name: Encryption crypt_shared RHEL8 Python3.13 run_on: - rhel87-small batchtime: 10080 @@ -381,7 +381,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption crypt_shared RHEL8 pypy3.10 + display_name: Encryption crypt_shared RHEL8 PyPy3.10 run_on: - rhel87-small batchtime: 10080 @@ -390,12 +390,12 @@ buildvariants: test_crypt_shared: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-py3.9 + - name: encryption-pyopenssl-rhel8-python3.9 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption PyOpenSSL RHEL8 py3.9 + display_name: Encryption PyOpenSSL RHEL8 Python3.9 run_on: - rhel87-small batchtime: 10080 @@ -404,12 +404,12 @@ buildvariants: test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-py3.13 + - name: encryption-pyopenssl-rhel8-python3.13 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption PyOpenSSL RHEL8 py3.13 + display_name: Encryption PyOpenSSL RHEL8 Python3.13 run_on: - rhel87-small batchtime: 10080 @@ -423,7 +423,7 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: Encryption PyOpenSSL RHEL8 pypy3.10 + display_name: Encryption PyOpenSSL RHEL8 PyPy3.10 run_on: - rhel87-small batchtime: 10080 @@ -432,29 +432,29 @@ buildvariants: test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-rhel8-py3.10 + - name: encryption-rhel8-python3.10 tasks: - name: .sharded_cluster .auth .ssl .sync_async - display_name: Encryption RHEL8 py3.10 + display_name: Encryption RHEL8 Python3.10 run_on: - rhel87-small expansions: test_encryption: "true" PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: encryption-crypt_shared-rhel8-py3.11 + - name: encryption-crypt_shared-rhel8-python3.11 tasks: - name: .replica_set .noauth .ssl .sync_async - display_name: Encryption crypt_shared RHEL8 py3.11 + display_name: Encryption crypt_shared RHEL8 Python3.11 run_on: - rhel87-small expansions: test_encryption: "true" test_crypt_shared: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: encryption-pyopenssl-rhel8-py3.12 + - name: encryption-pyopenssl-rhel8-python3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Encryption PyOpenSSL RHEL8 py3.12 + display_name: Encryption PyOpenSSL RHEL8 Python3.12 run_on: - rhel87-small expansions: @@ -464,16 +464,16 @@ buildvariants: - name: encryption-rhel8-pypy3.9 tasks: - name: .sharded_cluster .auth .ssl .sync_async - display_name: Encryption RHEL8 pypy3.9 + display_name: Encryption RHEL8 PyPy3.9 run_on: - rhel87-small expansions: test_encryption: "true" PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: encryption-macos-py3.9 + - name: encryption-macos-python3.9 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption macOS py3.9 + display_name: Encryption macOS Python3.9 run_on: - macos-14 batchtime: 10080 @@ -481,10 +481,10 @@ buildvariants: test_encryption: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-macos-py3.13 + - name: encryption-macos-python3.13 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption macOS py3.13 + display_name: Encryption macOS Python3.13 run_on: - macos-14 batchtime: 10080 @@ -492,10 +492,10 @@ buildvariants: test_encryption: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-macos-py3.9 + - name: encryption-crypt_shared-macos-python3.9 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared macOS py3.9 + display_name: Encryption crypt_shared macOS Python3.9 run_on: - macos-14 batchtime: 10080 @@ -504,10 +504,10 @@ buildvariants: test_crypt_shared: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-macos-py3.13 + - name: encryption-crypt_shared-macos-python3.13 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared macOS py3.13 + display_name: Encryption crypt_shared macOS Python3.13 run_on: - macos-14 batchtime: 10080 @@ -516,10 +516,10 @@ buildvariants: test_crypt_shared: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-win64-py3.9 + - name: encryption-win64-python3.9 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption Win64 py3.9 + display_name: Encryption Win64 Python3.9 run_on: - windows-64-vsMulti-small batchtime: 10080 @@ -527,10 +527,10 @@ buildvariants: test_encryption: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - - name: encryption-win64-py3.13 + - name: encryption-win64-python3.13 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption Win64 py3.13 + display_name: Encryption Win64 Python3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 @@ -538,10 +538,10 @@ buildvariants: test_encryption: "true" PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] - - name: encryption-crypt_shared-win64-py3.9 + - name: encryption-crypt_shared-win64-python3.9 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared Win64 py3.9 + display_name: Encryption crypt_shared Win64 Python3.9 run_on: - windows-64-vsMulti-small batchtime: 10080 @@ -550,10 +550,10 @@ buildvariants: test_crypt_shared: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - - name: encryption-crypt_shared-win64-py3.13 + - name: encryption-crypt_shared-win64-python3.13 tasks: - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared Win64 py3.13 + display_name: Encryption crypt_shared Win64 Python3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 @@ -564,46 +564,46 @@ buildvariants: tags: [encryption_tag] # Enterprise auth tests - - name: auth-enterprise-macos-py3.9-auth + - name: auth-enterprise-macos-python3.9-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise macOS py3.9 Auth + display_name: Auth Enterprise macOS Python3.9 Auth run_on: - macos-14 expansions: AUTH: auth PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: auth-enterprise-rhel8-py3.10-auth + - name: auth-enterprise-rhel8-python3.10-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 py3.10 Auth + display_name: Auth Enterprise RHEL8 Python3.10 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: auth-enterprise-rhel8-py3.11-auth + - name: auth-enterprise-rhel8-python3.11-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 py3.11 Auth + display_name: Auth Enterprise RHEL8 Python3.11 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: auth-enterprise-rhel8-py3.12-auth + - name: auth-enterprise-rhel8-python3.12-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 py3.12 Auth + display_name: Auth Enterprise RHEL8 Python3.12 Auth run_on: - rhel87-small expansions: AUTH: auth PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: auth-enterprise-win64-py3.13-auth + - name: auth-enterprise-win64-python3.13-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise Win64 py3.13 Auth + display_name: Auth Enterprise Win64 Python3.13 Auth run_on: - windows-64-vsMulti-small expansions: @@ -612,7 +612,7 @@ buildvariants: - name: auth-enterprise-rhel8-pypy3.9-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 pypy3.9 Auth + display_name: Auth Enterprise RHEL8 PyPy3.9 Auth run_on: - rhel87-small expansions: @@ -621,7 +621,7 @@ buildvariants: - name: auth-enterprise-rhel8-pypy3.10-auth tasks: - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 pypy3.10 Auth + display_name: Auth Enterprise RHEL8 PyPy3.10 Auth run_on: - rhel87-small expansions: @@ -629,10 +629,10 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Green framework tests - - name: green-eventlet-rhel8-py3.9 + - name: green-eventlet-rhel8-python3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Green Eventlet RHEL8 py3.9 + display_name: Green Eventlet RHEL8 Python3.9 run_on: - rhel87-small expansions: @@ -640,10 +640,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: green-gevent-rhel8-py3.9 + - name: green-gevent-rhel8-python3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Green Gevent RHEL8 py3.9 + display_name: Green Gevent RHEL8 Python3.9 run_on: - rhel87-small expansions: @@ -651,10 +651,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: green-eventlet-rhel8-py3.12 + - name: green-eventlet-rhel8-python3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Green Eventlet RHEL8 py3.12 + display_name: Green Eventlet RHEL8 Python3.12 run_on: - rhel87-small expansions: @@ -662,10 +662,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: green-gevent-rhel8-py3.12 + - name: green-gevent-rhel8-python3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Green Gevent RHEL8 py3.12 + display_name: Green Gevent RHEL8 Python3.12 run_on: - rhel87-small expansions: @@ -675,87 +675,87 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 # Load balancer tests - - name: load-balancer-rhel8-v6.0-py3.9 + - name: load-balancer-rhel8-v6.0-python3.9 tasks: - name: .load-balancer - display_name: Load Balancer RHEL8 v6.0 py3.9 + display_name: Load Balancer RHEL8 v6.0 Python3.9 run_on: - rhel87-small batchtime: 10080 expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "6.0" - - name: load-balancer-rhel8-v7.0-py3.9 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: load-balancer-rhel8-v7.0-python3.9 tasks: - name: .load-balancer - display_name: Load Balancer RHEL8 v7.0 py3.9 + display_name: Load Balancer RHEL8 v7.0 Python3.9 run_on: - rhel87-small batchtime: 10080 expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "7.0" - - name: load-balancer-rhel8-v8.0-py3.9 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: load-balancer-rhel8-v8.0-python3.9 tasks: - name: .load-balancer - display_name: Load Balancer RHEL8 v8.0 py3.9 + display_name: Load Balancer RHEL8 v8.0 Python3.9 run_on: - rhel87-small batchtime: 10080 expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "8.0" - - name: load-balancer-rhel8-rapid-py3.9 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: load-balancer-rhel8-rapid-python3.9 tasks: - name: .load-balancer - display_name: Load Balancer RHEL8 rapid py3.9 + display_name: Load Balancer RHEL8 rapid Python3.9 run_on: - rhel87-small batchtime: 10080 expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: rapid - - name: load-balancer-rhel8-latest-py3.9 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: load-balancer-rhel8-latest-python3.9 tasks: - name: .load-balancer - display_name: Load Balancer RHEL8 latest py3.9 + display_name: Load Balancer RHEL8 latest Python3.9 run_on: - rhel87-small batchtime: 10080 expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: latest + PYTHON_BINARY: /opt/python/3.9/bin/python3 # Mockupdb tests - - name: mockupdb-rhel8-py3.9 + - name: mockupdb-rhel8-python3.9 tasks: - name: mockupdb - display_name: MockupDB RHEL8 py3.9 + display_name: MockupDB RHEL8 Python3.9 run_on: - rhel87-small expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Mod wsgi tests - - name: mod_wsgi-ubuntu-22-py3.9 + - name: mod_wsgi-ubuntu-22-python3.9 tasks: - name: mod-wsgi-standalone - name: mod-wsgi-replica-set - name: mod-wsgi-embedded-mode-standalone - name: mod-wsgi-embedded-mode-replica-set - display_name: mod_wsgi Ubuntu-22 py3.9 + display_name: mod_wsgi Ubuntu-22 Python3.9 run_on: - ubuntu2204-small expansions: MOD_WSGI_VERSION: "4" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: mod_wsgi-ubuntu-22-py3.13 + - name: mod_wsgi-ubuntu-22-python3.13 tasks: - name: mod-wsgi-standalone - name: mod-wsgi-replica-set - name: mod-wsgi-embedded-mode-standalone - name: mod-wsgi-embedded-mode-replica-set - display_name: mod_wsgi Ubuntu-22 py3.13 + display_name: mod_wsgi Ubuntu-22 Python3.13 run_on: - ubuntu2204-small expansions: @@ -763,46 +763,46 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # No c ext tests - - name: no-c-ext-rhel8-py3.9 + - name: no-c-ext-rhel8-python3.9 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: No C Ext RHEL8 py3.9 + display_name: No C Ext RHEL8 Python3.9 run_on: - rhel87-small expansions: NO_EXT: "1" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: no-c-ext-rhel8-py3.10 + - name: no-c-ext-rhel8-python3.10 tasks: - name: .replica_set .noauth .nossl .sync_async - display_name: No C Ext RHEL8 py3.10 + display_name: No C Ext RHEL8 Python3.10 run_on: - rhel87-small expansions: NO_EXT: "1" PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: no-c-ext-rhel8-py3.11 + - name: no-c-ext-rhel8-python3.11 tasks: - name: .sharded_cluster .noauth .nossl .sync_async - display_name: No C Ext RHEL8 py3.11 + display_name: No C Ext RHEL8 Python3.11 run_on: - rhel87-small expansions: NO_EXT: "1" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: no-c-ext-rhel8-py3.12 + - name: no-c-ext-rhel8-python3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: No C Ext RHEL8 py3.12 + display_name: No C Ext RHEL8 Python3.12 run_on: - rhel87-small expansions: NO_EXT: "1" PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: no-c-ext-rhel8-py3.13 + - name: no-c-ext-rhel8-python3.13 tasks: - name: .replica_set .noauth .nossl .sync_async - display_name: No C Ext RHEL8 py3.13 + display_name: No C Ext RHEL8 Python3.13 run_on: - rhel87-small expansions: @@ -810,10 +810,10 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Ocsp tests - - name: ocsp-rhel8-v4.4-py3.9 + - name: ocsp-rhel8-v4.4-python3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 v4.4 py3.9 + display_name: OCSP RHEL8 v4.4 Python3.9 run_on: - rhel87-small batchtime: 20160 @@ -821,12 +821,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/3.9/bin/python3 VERSION: "4.4" - - name: ocsp-rhel8-v5.0-py3.10 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: ocsp-rhel8-v5.0-python3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 v5.0 py3.10 + display_name: OCSP RHEL8 v5.0 Python3.10 run_on: - rhel87-small batchtime: 20160 @@ -834,12 +834,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/3.10/bin/python3 VERSION: "5.0" - - name: ocsp-rhel8-v6.0-py3.11 + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: ocsp-rhel8-v6.0-python3.11 tasks: - name: .ocsp - display_name: OCSP RHEL8 v6.0 py3.11 + display_name: OCSP RHEL8 v6.0 Python3.11 run_on: - rhel87-small batchtime: 20160 @@ -847,12 +847,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/3.11/bin/python3 VERSION: "6.0" - - name: ocsp-rhel8-v7.0-py3.12 + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: ocsp-rhel8-v7.0-python3.12 tasks: - name: .ocsp - display_name: OCSP RHEL8 v7.0 py3.12 + display_name: OCSP RHEL8 v7.0 Python3.12 run_on: - rhel87-small batchtime: 20160 @@ -860,12 +860,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/3.12/bin/python3 VERSION: "7.0" - - name: ocsp-rhel8-v8.0-py3.13 + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: ocsp-rhel8-v8.0-python3.13 tasks: - name: .ocsp - display_name: OCSP RHEL8 v8.0 py3.13 + display_name: OCSP RHEL8 v8.0 Python3.13 run_on: - rhel87-small batchtime: 20160 @@ -873,12 +873,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/3.13/bin/python3 VERSION: "8.0" + PYTHON_BINARY: /opt/python/3.13/bin/python3 - name: ocsp-rhel8-rapid-pypy3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 rapid pypy3.9 + display_name: OCSP RHEL8 rapid PyPy3.9 run_on: - rhel87-small batchtime: 20160 @@ -886,12 +886,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 VERSION: rapid + PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: ocsp-rhel8-latest-pypy3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 latest pypy3.10 + display_name: OCSP RHEL8 latest PyPy3.10 run_on: - rhel87-small batchtime: 20160 @@ -899,12 +899,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 VERSION: latest - - name: ocsp-win64-v4.4-py3.9 + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - name: ocsp-win64-v4.4-python3.9 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP Win64 v4.4 py3.9 + display_name: OCSP Win64 v4.4 Python3.9 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -912,12 +912,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: C:/python/Python39/python.exe VERSION: "4.4" - - name: ocsp-win64-v8.0-py3.13 + PYTHON_BINARY: C:/python/Python39/python.exe + - name: ocsp-win64-v8.0-python3.13 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP Win64 v8.0 py3.13 + display_name: OCSP Win64 v8.0 Python3.13 run_on: - windows-64-vsMulti-small batchtime: 20160 @@ -925,12 +925,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: C:/python/Python313/python.exe VERSION: "8.0" - - name: ocsp-macos-v4.4-py3.9 + PYTHON_BINARY: C:/python/Python313/python.exe + - name: ocsp-macos-v4.4-python3.9 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP macOS v4.4 py3.9 + display_name: OCSP macOS v4.4 Python3.9 run_on: - macos-14 batchtime: 20160 @@ -938,12 +938,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 VERSION: "4.4" - - name: ocsp-macos-v8.0-py3.13 + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 + - name: ocsp-macos-v8.0-python3.13 tasks: - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP macOS v8.0 py3.13 + display_name: OCSP macOS v8.0 Python3.13 run_on: - macos-14 batchtime: 20160 @@ -951,8 +951,8 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 VERSION: "8.0" + PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 # Oidc auth tests - name: auth-oidc-ubuntu-22 @@ -981,55 +981,55 @@ buildvariants: batchtime: 20160 # Pyopenssl tests - - name: pyopenssl-macos-py3.9 + - name: pyopenssl-macos-python3.9 tasks: - name: .replica_set .noauth .nossl .sync_async - name: .7.0 .noauth .nossl .sync_async - display_name: PyOpenSSL macOS py3.9 + display_name: PyOpenSSL macOS Python3.9 run_on: - macos-14 batchtime: 10080 expansions: test_pyopenssl: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: pyopenssl-rhel8-py3.10 + - name: pyopenssl-rhel8-python3.10 tasks: - name: .replica_set .auth .ssl .sync_async - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 py3.10 + display_name: PyOpenSSL RHEL8 Python3.10 run_on: - rhel87-small batchtime: 10080 expansions: test_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: pyopenssl-rhel8-py3.11 + - name: pyopenssl-rhel8-python3.11 tasks: - name: .replica_set .auth .ssl .sync_async - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 py3.11 + display_name: PyOpenSSL RHEL8 Python3.11 run_on: - rhel87-small batchtime: 10080 expansions: test_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: pyopenssl-rhel8-py3.12 + - name: pyopenssl-rhel8-python3.12 tasks: - name: .replica_set .auth .ssl .sync_async - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 py3.12 + display_name: PyOpenSSL RHEL8 Python3.12 run_on: - rhel87-small batchtime: 10080 expansions: test_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: pyopenssl-win64-py3.13 + - name: pyopenssl-win64-python3.13 tasks: - name: .replica_set .auth .ssl .sync_async - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL Win64 py3.13 + display_name: PyOpenSSL Win64 Python3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 @@ -1040,7 +1040,7 @@ buildvariants: tasks: - name: .replica_set .auth .ssl .sync_async - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 pypy3.9 + display_name: PyOpenSSL RHEL8 PyPy3.9 run_on: - rhel87-small batchtime: 10080 @@ -1051,7 +1051,7 @@ buildvariants: tasks: - name: .replica_set .auth .ssl .sync_async - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 pypy3.10 + display_name: PyOpenSSL RHEL8 PyPy3.10 run_on: - rhel87-small batchtime: 10080 @@ -1060,34 +1060,34 @@ buildvariants: PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Search index tests - - name: search-index-helpers-rhel8-py3.9 + - name: search-index-helpers-rhel8-python3.9 tasks: - name: test_atlas_task_group_search_indexes - display_name: Search Index Helpers RHEL8 py3.9 + display_name: Search Index Helpers RHEL8 Python3.9 run_on: - rhel87-small expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Server tests - - name: test-rhel8-py3.9-cov + - name: test-rhel8-python3.9-cov tasks: - name: .standalone .sync_async - name: .replica_set .sync_async - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 py3.9 cov" + display_name: "* Test RHEL8 Python3.9 cov" run_on: - rhel87-small expansions: COVERAGE: coverage PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [coverage_tag] - - name: test-rhel8-py3.13-cov + - name: test-rhel8-python3.13-cov tasks: - name: .standalone .sync_async - name: .replica_set .sync_async - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 py3.13 cov" + display_name: "* Test RHEL8 Python3.13 cov" run_on: - rhel87-small expansions: @@ -1099,41 +1099,41 @@ buildvariants: - name: .standalone .sync_async - name: .replica_set .sync_async - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 pypy3.10 cov" + display_name: "* Test RHEL8 PyPy3.10 cov" run_on: - rhel87-small expansions: COVERAGE: coverage PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [coverage_tag] - - name: test-rhel8-py3.10 + - name: test-rhel8-python3.10 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 py3.10" + display_name: "* Test RHEL8 Python3.10" run_on: - rhel87-small expansions: COVERAGE: coverage PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: test-rhel8-py3.11 + - name: test-rhel8-python3.11 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 py3.11" + display_name: "* Test RHEL8 Python3.11" run_on: - rhel87-small expansions: COVERAGE: coverage PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: test-rhel8-py3.12 + - name: test-rhel8-python3.12 tasks: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 py3.12" + display_name: "* Test RHEL8 Python3.12" run_on: - rhel87-small expansions: @@ -1144,35 +1144,33 @@ buildvariants: - name: .sharded_cluster .auth .ssl .sync_async - name: .replica_set .noauth .ssl .sync_async - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 pypy3.9" + display_name: "* Test RHEL8 PyPy3.9" run_on: - rhel87-small expansions: COVERAGE: coverage PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: test-macos-py3.9 + - name: test-macos-python3.9 tasks: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test macOS py3.9" + display_name: "* Test macOS Python3.9" run_on: - macos-14 expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-py3.13 + - name: test-macos-python3.13 tasks: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test macOS py3.13" + display_name: "* Test macOS Python3.13" run_on: - macos-14 expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-py3.9 + - name: test-macos-arm64-python3.9 tasks: - name: .sharded_cluster .auth .ssl .6.0 !.sync_async - name: .replica_set .noauth .ssl .6.0 !.sync_async @@ -1189,13 +1187,12 @@ buildvariants: - name: .sharded_cluster .auth .ssl .latest !.sync_async - name: .replica_set .noauth .ssl .latest !.sync_async - name: .standalone .noauth .nossl .latest !.sync_async - display_name: "* Test macOS Arm64 py3.9" + display_name: "* Test macOS Arm64 Python3.9" run_on: - macos-14-arm64 expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-py3.13 + - name: test-macos-arm64-python3.13 tasks: - name: .sharded_cluster .auth .ssl .6.0 !.sync_async - name: .replica_set .noauth .ssl .6.0 !.sync_async @@ -1212,62 +1209,57 @@ buildvariants: - name: .sharded_cluster .auth .ssl .latest !.sync_async - name: .replica_set .noauth .ssl .latest !.sync_async - name: .standalone .noauth .nossl .latest !.sync_async - display_name: "* Test macOS Arm64 py3.13" + display_name: "* Test macOS Arm64 Python3.13" run_on: - macos-14-arm64 expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-win64-py3.9 + - name: test-win64-python3.9 tasks: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win64 py3.9" + display_name: "* Test Win64 Python3.9" run_on: - windows-64-vsMulti-small expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-py3.13 + - name: test-win64-python3.13 tasks: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win64 py3.13" + display_name: "* Test Win64 Python3.13" run_on: - windows-64-vsMulti-small expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win32-py3.9 + - name: test-win32-python3.9 tasks: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win32 py3.9" + display_name: "* Test Win32 Python3.9" run_on: - windows-64-vsMulti-small expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-py3.13 + - name: test-win32-python3.13 tasks: - name: .sharded_cluster .auth .ssl !.sync_async - name: .replica_set .noauth .ssl !.sync_async - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win32 py3.13" + display_name: "* Test Win32 Python3.13" run_on: - windows-64-vsMulti-small expansions: - SKIP_CSOT_TESTS: "true" PYTHON_BINARY: C:/python/32/Python313/python.exe # Serverless tests - - name: serverless-rhel8-py3.9 + - name: serverless-rhel8-python3.9 tasks: - name: serverless_task_group - display_name: Serverless RHEL8 py3.9 + display_name: Serverless RHEL8 Python3.9 run_on: - rhel87-small batchtime: 10080 @@ -1276,10 +1268,10 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: serverless-rhel8-py3.13 + - name: serverless-rhel8-python3.13 tasks: - name: serverless_task_group - display_name: Serverless RHEL8 py3.13 + display_name: Serverless RHEL8 Python3.13 run_on: - rhel87-small batchtime: 10080 @@ -1290,7 +1282,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Stable api tests - - name: stable-api-require-v1-rhel8-py3.9-auth + - name: stable-api-require-v1-rhel8-python3.9-auth tasks: - name: .standalone .5.0 .noauth .nossl .sync_async - name: .standalone .6.0 .noauth .nossl .sync_async @@ -1298,7 +1290,7 @@ buildvariants: - name: .standalone .8.0 .noauth .nossl .sync_async - name: .standalone .rapid .noauth .nossl .sync_async - name: .standalone .latest .noauth .nossl .sync_async - display_name: Stable API require v1 RHEL8 py3.9 Auth + display_name: Stable API require v1 RHEL8 Python3.9 Auth run_on: - rhel87-small expansions: @@ -1307,7 +1299,7 @@ buildvariants: MONGODB_API_VERSION: "1" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [versionedApi_tag] - - name: stable-api-accept-v2-rhel8-py3.9-auth + - name: stable-api-accept-v2-rhel8-python3.9-auth tasks: - name: .standalone .5.0 .noauth .nossl .sync_async - name: .standalone .6.0 .noauth .nossl .sync_async @@ -1315,7 +1307,7 @@ buildvariants: - name: .standalone .8.0 .noauth .nossl .sync_async - name: .standalone .rapid .noauth .nossl .sync_async - name: .standalone .latest .noauth .nossl .sync_async - display_name: Stable API accept v2 RHEL8 py3.9 Auth + display_name: Stable API accept v2 RHEL8 Python3.9 Auth run_on: - rhel87-small expansions: @@ -1323,7 +1315,7 @@ buildvariants: ORCHESTRATION_FILE: versioned-api-testing.json PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [versionedApi_tag] - - name: stable-api-require-v1-rhel8-py3.13-auth + - name: stable-api-require-v1-rhel8-python3.13-auth tasks: - name: .standalone .5.0 .noauth .nossl .sync_async - name: .standalone .6.0 .noauth .nossl .sync_async @@ -1331,7 +1323,7 @@ buildvariants: - name: .standalone .8.0 .noauth .nossl .sync_async - name: .standalone .rapid .noauth .nossl .sync_async - name: .standalone .latest .noauth .nossl .sync_async - display_name: Stable API require v1 RHEL8 py3.13 Auth + display_name: Stable API require v1 RHEL8 Python3.13 Auth run_on: - rhel87-small expansions: @@ -1340,7 +1332,7 @@ buildvariants: MONGODB_API_VERSION: "1" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [versionedApi_tag] - - name: stable-api-accept-v2-rhel8-py3.13-auth + - name: stable-api-accept-v2-rhel8-python3.13-auth tasks: - name: .standalone .5.0 .noauth .nossl .sync_async - name: .standalone .6.0 .noauth .nossl .sync_async @@ -1348,7 +1340,7 @@ buildvariants: - name: .standalone .8.0 .noauth .nossl .sync_async - name: .standalone .rapid .noauth .nossl .sync_async - name: .standalone .latest .noauth .nossl .sync_async - display_name: Stable API accept v2 RHEL8 py3.13 Auth + display_name: Stable API accept v2 RHEL8 Python3.13 Auth run_on: - rhel87-small expansions: @@ -1358,7 +1350,7 @@ buildvariants: tags: [versionedApi_tag] # Storage engine tests - - name: storage-inmemory-rhel8-py3.9 + - name: storage-inmemory-rhel8-python3.9 tasks: - name: .standalone .noauth .nossl .4.0 .sync_async - name: .standalone .noauth .nossl .4.4 .sync_async @@ -1368,17 +1360,17 @@ buildvariants: - name: .standalone .noauth .nossl .8.0 .sync_async - name: .standalone .noauth .nossl .rapid .sync_async - name: .standalone .noauth .nossl .latest .sync_async - display_name: Storage InMemory RHEL8 py3.9 + display_name: Storage InMemory RHEL8 Python3.9 run_on: - rhel87-small expansions: STORAGE_ENGINE: inmemory PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: storage-mmapv1-rhel8-py3.9 + - name: storage-mmapv1-rhel8-python3.9 tasks: - name: .standalone .4.0 .noauth .nossl .sync_async - name: .replica_set .4.0 .noauth .nossl .sync_async - display_name: Storage MMAPv1 RHEL8 py3.9 + display_name: Storage MMAPv1 RHEL8 Python3.9 run_on: - rhel87-small expansions: diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 0c9c8bb03a..98d400037c 100644 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -46,6 +46,11 @@ export PROJECT="$project" export PIP_QUIET=1 EOT +# Skip CSOT tests on non-linux platforms. +if [ "$(uname -s)" != "Linux" ]; then + echo "export SKIP_CSOT_TESTS=1" >> $SCRIPT_DIR/env.sh +fi + # Add these expansions to make it easier to call out tests scripts from the EVG yaml cat < expansion.yml DRIVERS_TOOLS: "$DRIVERS_TOOLS" diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 05529ecb25..b7187b50db 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -57,17 +57,26 @@ class Host: name: str run_on: str display_name: str + variables: dict[str, str] | None # Hosts with toolchains. -HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8") -HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64") -HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32") -HOSTS["macos"] = Host("macos", "macos-14", "macOS") -HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64") -HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20") -HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22") -HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7") +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", dict()) +HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", dict()) +HOSTS["macos"] = Host("macos", "macos-14", "macOS", dict()) +HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", dict()) +HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) +HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) +HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) +DEFAULT_HOST = HOSTS["rhel8"] + +# Other hosts +OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64"] +for name, run_on in zip( + OTHER_HOSTS, ["rhel92-fips", "rhel8-zseries-small", "rhel8-power-small", "rhel82-arm64-small"] +): + HOSTS[name] = Host(name, run_on, name, dict()) ############## @@ -75,57 +84,77 @@ class Host: ############## -def create_variant( +def create_variant_generic( task_names: list[str], display_name: str, *, - python: str | None = None, - version: str | None = None, - host: str | None = None, + host: Host | None = None, + default_run_on="rhel87-small", + expansions: dict | None = None, **kwargs: Any, ) -> BuildVariant: """Create a build variant for the given inputs.""" task_refs = [EvgTaskRef(name=n) for n in task_names] - kwargs.setdefault("expansions", dict()) - expansions = kwargs.pop("expansions", dict()).copy() + expansions = expansions and expansions.copy() or dict() if "run_on" in kwargs: run_on = kwargs.pop("run_on") + elif host: + run_on = [host.run_on] + if host.variables: + expansions.update(host.variables) else: - host = host or "rhel8" - run_on = [HOSTS[host].run_on] + run_on = [default_run_on] + if isinstance(run_on, str): + run_on = [run_on] name = display_name.replace(" ", "-").replace("*-", "").lower() - if python: - expansions["PYTHON_BINARY"] = get_python_binary(python, host) - if version: - expansions["VERSION"] = version - expansions = expansions or None return BuildVariant( name=name, display_name=display_name, tasks=task_refs, - expansions=expansions, + expansions=expansions or None, run_on=run_on, **kwargs, ) -def get_python_binary(python: str, host: str) -> str: +def create_variant( + task_names: list[str], + display_name: str, + *, + version: str | None = None, + host: Host | None = None, + python: str | None = None, + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + expansions = expansions and expansions.copy() or dict() + if version: + expansions["VERSION"] = version + if python: + expansions["PYTHON_BINARY"] = get_python_binary(python, host) + return create_variant_generic( + task_names, display_name, version=version, host=host, expansions=expansions, **kwargs + ) + + +def get_python_binary(python: str, host: Host) -> str: """Get the appropriate python binary given a python version and host.""" - if host in ["win64", "win32"]: - if host == "win32": + name = host.name + if name in ["win64", "win32"]: + if name == "win32": base = "C:/python/32" else: base = "C:/python" python = python.replace(".", "") return f"{base}/Python{python}/python.exe" - if host in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: + if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: return f"/opt/python/{python}/bin/python3" - if host in ["macos", "macos-arm64"]: + if name in ["macos", "macos-arm64"]: return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" - raise ValueError(f"no match found for python {python} on {host}") + raise ValueError(f"no match found for python {python} on {name}") def get_versions_from(min_version: str) -> list[str]: @@ -146,11 +175,11 @@ def get_versions_until(max_version: str) -> list[str]: return versions -def get_display_name(base: str, host: str | None = None, **kwargs) -> str: +def get_display_name(base: str, host: Host | None = None, **kwargs) -> str: """Get the display name of a variant.""" display_name = base if host is not None: - display_name += f" {HOSTS[host].display_name}" + display_name += f" {host.display_name}" version = kwargs.pop("VERSION", None) version = version or kwargs.pop("version", None) if version: @@ -161,7 +190,9 @@ def get_display_name(base: str, host: str | None = None, **kwargs) -> str: name = value if key.lower() == "python": if not value.startswith("pypy"): - name = f"py{value}" + name = f"Python{value}" + else: + name = f"PyPy{value.replace('pypy', '')}" elif key.lower() in DISPLAY_LOOKUP: name = DISPLAY_LOOKUP[key.lower()][value] else: @@ -203,10 +234,10 @@ def create_ocsp_variants() -> list[BuildVariant]: expansions = dict(AUTH="noauth", SSL="ssl", TOPOLOGY="server") base_display = "OCSP" - # OCSP tests on rhel8 with all servers v4.4+ and all python versions. + # OCSP tests on default host with all servers v4.4+ and all python versions. versions = [v for v in ALL_VERSIONS if v != "4.0"] for version, python in zip_cycle(versions, ALL_PYTHONS): - host = "rhel8" + host = DEFAULT_HOST variant = create_variant( [".ocsp"], get_display_name(base_display, host, version=version, python=python), @@ -220,7 +251,8 @@ def create_ocsp_variants() -> list[BuildVariant]: # OCSP tests on Windows and MacOS. # MongoDB servers on these hosts do not staple OCSP responses and only support RSA. - for host, version in product(["win64", "macos"], ["4.4", "8.0"]): + for host_name, version in product(["win64", "macos"], ["4.4", "8.0"]): + host = HOSTS[host_name] python = CPYTHONS[0] if version == "4.4" else CPYTHONS[-1] variant = create_variant( [".ocsp-rsa !.ocsp-staple"], @@ -240,7 +272,7 @@ def create_server_variants() -> list[BuildVariant]: variants = [] # Run the full matrix on linux with min and max CPython, and latest pypy. - host = "rhel8" + host = DEFAULT_HOST # Prefix the display name with an asterisk so it is sorted first. base_display_name = "* Test" for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: @@ -270,23 +302,17 @@ def create_server_variants() -> list[BuildVariant]: variants.append(variant) # Test a subset on each of the other platforms. - for host in ("macos", "macos-arm64", "win64", "win32"): + for host_name in ("macos", "macos-arm64", "win64", "win32"): for python in MIN_MAX_PYTHON: tasks = [f"{t} !.sync_async" for t in SUB_TASKS] # MacOS arm64 only works on server versions 6.0+ - if host == "macos-arm64": + if host_name == "macos-arm64": tasks = [] for version in get_versions_from("6.0"): tasks.extend(f"{t} .{version} !.sync_async" for t in SUB_TASKS) - expansions = dict(SKIP_CSOT_TESTS="true") - display_name = get_display_name(base_display_name, host, python=python, **expansions) - variant = create_variant( - tasks, - display_name, - python=python, - host=host, - expansions=expansions, - ) + host = HOSTS[host_name] + display_name = get_display_name(base_display_name, host, python=python) + variant = create_variant(tasks, display_name, python=python, host=host) variants.append(variant) return variants @@ -305,7 +331,7 @@ def get_encryption_expansions(encryption): expansions["test_encryption_pyopenssl"] = "true" return expansions - host = "rhel8" + host = DEFAULT_HOST # Test against all server versions for the three main python versions. encryptions = ["Encryption", "Encryption crypt_shared", "Encryption PyOpenSSL"] @@ -339,7 +365,8 @@ def get_encryption_expansions(encryption): # Test on macos and linux on one server version and topology for min and max python. encryptions = ["Encryption", "Encryption crypt_shared"] task_names = [".latest .replica_set .sync_async"] - for host, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): + for host_name, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): + host = HOSTS[host_name] expansions = get_encryption_expansions(encryption) display_name = get_display_name(encryption, host, python=python, **expansions) variant = create_variant( @@ -357,7 +384,7 @@ def get_encryption_expansions(encryption): def create_load_balancer_variants(): # Load balancer tests - run all supported server versions using the lowest supported python. - host = "rhel8" + host = DEFAULT_HOST batchtime = BATCHTIME_WEEK versions = get_versions_from("6.0") variants = [] @@ -379,7 +406,7 @@ def create_load_balancer_variants(): def create_compression_variants(): # Compression tests - standalone versions of each server, across python versions, with and without c extensions. # PyPy interpreters are always tested without extensions. - host = "rhel8" + host = DEFAULT_HOST base_task = ".standalone .noauth .nossl .sync_async" task_names = dict(snappy=[base_task], zlib=[base_task], zstd=[f"{base_task} !.4.0"]) variants = [] @@ -423,11 +450,11 @@ def create_enterprise_auth_variants(): # All python versions across platforms. for python in ALL_PYTHONS: if python == CPYTHONS[0]: - host = "macos" + host = HOSTS["macos"] elif python == CPYTHONS[-1]: - host = "win64" + host = HOSTS["win64"] else: - host = "rhel8" + host = DEFAULT_HOST display_name = get_display_name("Auth Enterprise", host, python=python, **expansions) variant = create_variant( ["test-enterprise-auth"], display_name, host=host, python=python, expansions=expansions @@ -448,11 +475,11 @@ def create_pyopenssl_variants(): auth = "noauth" if python == CPYTHONS[0] else "auth" ssl = "nossl" if auth == "noauth" else "ssl" if python == CPYTHONS[0]: - host = "macos" + host = HOSTS["macos"] elif python == CPYTHONS[-1]: - host = "win64" + host = HOSTS["win64"] else: - host = "rhel8" + host = DEFAULT_HOST display_name = get_display_name(base_name, host, python=python) variant = create_variant( @@ -469,7 +496,7 @@ def create_pyopenssl_variants(): def create_storage_engine_variants(): - host = "rhel8" + host = DEFAULT_HOST engines = ["InMemory", "MMAPv1"] variants = [] for engine in engines: @@ -492,7 +519,7 @@ def create_storage_engine_variants(): def create_stable_api_variants(): - host = "rhel8" + host = DEFAULT_HOST tags = ["versionedApi_tag"] tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0")] variants = [] @@ -526,7 +553,7 @@ def create_stable_api_variants(): def create_green_framework_variants(): variants = [] tasks = [".standalone .noauth .nossl .sync_async"] - host = "rhel8" + host = DEFAULT_HOST for python, framework in product([CPYTHONS[0], CPYTHONS[-2]], ["eventlet", "gevent"]): expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") display_name = get_display_name(f"Green {framework.capitalize()}", host, python=python) @@ -539,7 +566,7 @@ def create_green_framework_variants(): def create_no_c_ext_variants(): variants = [] - host = "rhel8" + host = DEFAULT_HOST for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): tasks = [f".{topology} .noauth .nossl .sync_async"] expansions = dict() @@ -554,7 +581,7 @@ def create_no_c_ext_variants(): def create_atlas_data_lake_variants(): variants = [] - host = "ubuntu22" + host = HOSTS["ubuntu22"] for python, c_ext in product(MIN_MAX_PYTHON, C_EXTS): tasks = ["atlas-data-lake-tests"] expansions = dict(AUTH="auth") @@ -569,7 +596,7 @@ def create_atlas_data_lake_variants(): def create_mod_wsgi_variants(): variants = [] - host = "ubuntu22" + host = HOSTS["ubuntu22"] tasks = [ "mod-wsgi-standalone", "mod-wsgi-replica-set", @@ -587,7 +614,7 @@ def create_mod_wsgi_variants(): def create_disable_test_commands_variants(): - host = "rhel8" + host = DEFAULT_HOST expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") python = CPYTHONS[0] display_name = get_display_name("Disable test commands", host, python=python) @@ -596,7 +623,7 @@ def create_disable_test_commands_variants(): def create_serverless_variants(): - host = "rhel8" + host = DEFAULT_HOST batchtime = BATCHTIME_WEEK expansions = dict(test_serverless="true", AUTH="auth", SSL="ssl") tasks = ["serverless_task_group"] @@ -617,10 +644,11 @@ def create_serverless_variants(): def create_oidc_auth_variants(): variants = [] other_tasks = ["testazureoidc_task_group", "testgcpoidc_task_group", "testk8soidc_task_group"] - for host in ["ubuntu22", "macos", "win64"]: + for host_name in ["ubuntu22", "macos", "win64"]: tasks = ["testoidc_task_group"] - if host == "ubuntu22": + if host_name == "ubuntu22": tasks += other_tasks + host = HOSTS[host_name] variants.append( create_variant( tasks, @@ -633,7 +661,7 @@ def create_oidc_auth_variants(): def create_search_index_variants(): - host = "rhel8" + host = DEFAULT_HOST python = CPYTHONS[0] return [ create_variant( @@ -646,7 +674,7 @@ def create_search_index_variants(): def create_mockupdb_variants(): - host = "rhel8" + host = DEFAULT_HOST python = CPYTHONS[0] return [ create_variant( @@ -659,7 +687,7 @@ def create_mockupdb_variants(): def create_doctests_variants(): - host = "rhel8" + host = DEFAULT_HOST python = CPYTHONS[0] return [ create_variant( @@ -672,7 +700,7 @@ def create_doctests_variants(): def create_atlas_connect_variants(): - host = "rhel8" + host = DEFAULT_HOST return [ create_variant( ["atlas-connect"], @@ -696,13 +724,14 @@ def create_aws_auth_variants(): "aws-auth-test-latest", ] - for host, python in product(["ubuntu20", "win64", "macos"], MIN_MAX_PYTHON): + for host_name, python in product(["ubuntu20", "win64", "macos"], MIN_MAX_PYTHON): expansions = dict() - if host != "ubuntu20": + if host_name != "ubuntu20": expansions["skip_ECS_auth_test"] = "true" - if host == "macos": + if host_name == "macos": expansions["skip_EC2_auth_test"] = "true" expansions["skip_web_identity_auth_test"] = "true" + host = HOSTS[host_name] variant = create_variant( tasks, get_display_name("Auth AWS", host, python=python), @@ -719,11 +748,11 @@ def create_alternative_hosts_variants(): batchtime = BATCHTIME_WEEK variants = [] - host = "rhel7" + host = HOSTS["rhel7"] variants.append( create_variant( [".5.0 .standalone !.sync_async"], - get_display_name("OpenSSL 1.0.2", "rhel7", python=CPYTHONS[0], **expansions), + get_display_name("OpenSSL 1.0.2", host, python=CPYTHONS[0], **expansions), host=host, python=CPYTHONS[0], batchtime=batchtime, @@ -731,16 +760,15 @@ def create_alternative_hosts_variants(): ) ) - hosts = ["rhel92-fips", "rhel8-zseries-small", "rhel8-power-small", "rhel82-arm64-small"] - host_names = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64"] - for host, host_name in zip(hosts, host_names): + for host_name in OTHER_HOSTS: + host = HOSTS[host_name] variants.append( create_variant( [".6.0 .standalone !.sync_async"], - display_name=get_display_name(f"Other hosts {host_name}", **expansions), + display_name=get_display_name("Other hosts", host, **expansions), expansions=expansions, batchtime=batchtime, - run_on=[host], + host=host, ) ) return variants From 5e5528238ca2118e7dd3737ee144f12bc8187f34 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Nov 2024 13:24:22 -0600 Subject: [PATCH 1615/2111] PYTHON-4817 Revert import guard on asyncio (#1894) --- pymongo/__init__.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 6416f939e8..58f6ff338b 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -88,6 +88,7 @@ from pymongo import _csot from pymongo._version import __version__, get_version_string, version_tuple +from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION, has_c from pymongo.cursor import CursorType from pymongo.operations import ( @@ -104,14 +105,6 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -try: - from pymongo.asynchronous.mongo_client import AsyncMongoClient -except Exception as e: - # PYTHON-4781: Importing asyncio can fail on Windows. - import warnings as _warnings - - _warnings.warn(f"Failed to import Async PyMongo: {e!r}", ImportWarning, stacklevel=2) - version = __version__ """Current version of PyMongo.""" From 63c3f8aedec0ebac01f46929f83d69f1e2b6dfcd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Nov 2024 13:25:42 -0600 Subject: [PATCH 1616/2111] PYTHON-4959 Adopt zizmor GitHub Actions security scanner (#2001) --- .github/workflows/codeql.yml | 1 + .github/workflows/dist.yml | 2 ++ .github/workflows/test-python.yml | 14 ++++++++++++++ .github/workflows/zizmor.yml | 32 +++++++++++++++++++++++++++++++ 4 files changed, 49 insertions(+) create mode 100644 .github/workflows/zizmor.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 2dc070d7c6..e620cb1801 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -39,6 +39,7 @@ jobs: uses: actions/checkout@v4 with: ref: ${{ inputs.ref }} + persist-credentials: false - uses: actions/setup-python@v5 # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 858d269e08..a4c5a8279b 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -48,6 +48,7 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false ref: ${{ inputs.ref }} - uses: actions/setup-python@v5 @@ -106,6 +107,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + persist-credentials: false ref: ${{ inputs.ref }} - uses: actions/setup-python@v5 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 40991440d3..12cfaa4b27 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -20,6 +20,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: "3.9" @@ -55,6 +57,8 @@ jobs: name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - if: ${{ matrix.python-version == '3.13t' }} name: Setup free-threaded Python uses: deadsnakes/action@v3.2.0 @@ -99,6 +103,8 @@ jobs: name: DocTest steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - name: Setup Python uses: actions/setup-python@v5 with: @@ -121,6 +127,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: actions/setup-python@v5 with: cache: 'pip' @@ -139,6 +147,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: actions/setup-python@v5 with: cache: 'pip' @@ -160,6 +170,8 @@ jobs: python: ["3.9", "3.11"] steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: actions/setup-python@v5 with: python-version: "${{matrix.python}}" @@ -177,6 +189,8 @@ jobs: name: "Make an sdist" steps: - uses: actions/checkout@v4 + with: + persist-credentials: false - uses: actions/setup-python@v5 with: cache: 'pip' diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 0000000000..31afeb6655 --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,32 @@ +name: GitHub Actions Security Analysis with zizmor 🌈 + +on: + push: + branches: ["master"] + pull_request: + branches: ["**"] + +jobs: + zizmor: + name: zizmor latest via Cargo + runs-on: ubuntu-latest + permissions: + security-events: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Setup Rust + uses: actions-rust-lang/setup-rust-toolchain@v1 + - name: Get zizmor + run: cargo install zizmor + - name: Run zizmor 🌈 + run: zizmor --format sarif . > results.sarif + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif + category: zizmor From 72a51092cd84297c495fb13049a13abafb704bb2 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 12 Nov 2024 09:32:41 -0500 Subject: [PATCH 1617/2111] PYTHON-4915 - Add guidance on adding _id fields to documents to CRUD spec, reorder client.bulk_write generated _id fields (#1976) --- pymongo/message.py | 13 ++- test/asynchronous/test_client_bulk_write.py | 14 +++ test/mockupdb/test_id_ordering.py | 94 +++++++++++++++++++++ test/test_client_bulk_write.py | 14 +++ 4 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 test/mockupdb/test_id_ordering.py diff --git a/pymongo/message.py b/pymongo/message.py index de77ccd382..3e2ae00ae7 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,6 +24,7 @@ import datetime import random import struct +from collections import ChainMap from io import BytesIO as _BytesIO from typing import ( TYPE_CHECKING, @@ -1111,8 +1112,18 @@ def _check_doc_size_limits( # key and the index of its namespace within ns_info as its value. op_doc[op_type] = ns_info[namespace] # type: ignore[index] + # Since the data document itself is nested within the insert document + # it won't be automatically re-ordered by the BSON conversion. + # We use ChainMap here to make the _id field the first field instead. + doc_to_encode = op_doc + if real_op_type == "insert": + doc = op_doc["document"] + if not isinstance(doc, RawBSONDocument): + doc_to_encode = op_doc.copy() # type: ignore[attr-defined] # Shallow copy + doc_to_encode["document"] = ChainMap(doc, {"_id": doc["_id"]}) # type: ignore[index] + # Encode current operation doc and, if newly added, namespace doc. - op_doc_encoded = _dict_to_bson(op_doc, False, opts) + op_doc_encoded = _dict_to_bson(doc_to_encode, False, opts) op_length = len(op_doc_encoded) if ns_doc: ns_doc_encoded = _dict_to_bson(ns_doc, False, opts) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 5f6b3353e8..01294402de 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -18,6 +18,9 @@ import os import sys +from bson import encode +from bson.raw_bson import RawBSONDocument + sys.path[0:0] = [""] from test.asynchronous import ( @@ -84,6 +87,17 @@ async def test_formats_write_error_correctly(self): self.assertEqual(write_error["idx"], 1) self.assertEqual(write_error["op"], {"insert": 0, "document": {"_id": 1}}) + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_no_serverless + async def test_raw_bson_not_inflated(self): + doc = RawBSONDocument(encode({"a": "b" * 100})) + models = [ + InsertOne(namespace="db.coll", document=doc), + ] + await self.client.bulk_write(models=models) + + self.assertIsNone(doc._RawBSONDocument__inflated_doc) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(AsyncIntegrationTest): diff --git a/test/mockupdb/test_id_ordering.py b/test/mockupdb/test_id_ordering.py new file mode 100644 index 0000000000..7e2c91d592 --- /dev/null +++ b/test/mockupdb/test_id_ordering.py @@ -0,0 +1,94 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from test import PyMongoTestCase + +import pytest + +from pymongo import InsertOne + +try: + from mockupdb import MockupDB, OpMsg, go, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson.objectid import ObjectId + +pytestmark = pytest.mark.mockupdb + + +# https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#16-generated-document-identifiers-are-the-first-field-in-their-document +class TestIdOrdering(PyMongoTestCase): + def test_16_generated_document_ids_are_first_field(self): + server = MockupDB() + server.autoresponds( + "hello", + isWritablePrimary=True, + msg="isdbgrid", + minWireVersion=0, + maxWireVersion=25, + helloOk=True, + serviceId=ObjectId(), + ) + server.run() + self.addCleanup(server.stop) + + # We also verify that the original document contains an _id field after each insert + document = {"x": 1} + + client = self.simple_client(server.uri, loadBalanced=True) + collection = client.db.coll + with going(collection.insert_one, document): + request = server.receives() + self.assertEqual("_id", next(iter(request["documents"][0]))) + request.reply({"ok": 1}) + self.assertIn("_id", document) + + document = {"x1": 1} + + with going(collection.bulk_write, [InsertOne(document)]): + request = server.receives() + self.assertEqual("_id", next(iter(request["documents"][0]))) + request.reply({"ok": 1}) + self.assertIn("_id", document) + + document = {"x2": 1} + with going(client.bulk_write, [InsertOne(namespace="db.coll", document=document)]): + request = server.receives() + self.assertEqual("_id", next(iter(request["ops"][0]["document"]))) + request.reply({"ok": 1}) + self.assertIn("_id", document) + + # Re-ordering user-supplied _id fields is not required by the spec, but PyMongo does it for performance reasons + with going(collection.insert_one, {"x": 1, "_id": 111}): + request = server.receives() + self.assertEqual("_id", next(iter(request["documents"][0]))) + request.reply({"ok": 1}) + + with going(collection.bulk_write, [InsertOne({"x1": 1, "_id": 1111})]): + request = server.receives() + self.assertEqual("_id", next(iter(request["documents"][0]))) + request.reply({"ok": 1}) + + with going( + client.bulk_write, [InsertOne(namespace="db.coll", document={"x2": 1, "_id": 11111})] + ): + request = server.receives() + self.assertEqual("_id", next(iter(request["ops"][0]["document"]))) + request.reply({"ok": 1}) diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 733970dd57..f06c07d588 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -18,6 +18,9 @@ import os import sys +from bson import encode +from bson.raw_bson import RawBSONDocument + sys.path[0:0] = [""] from test import ( @@ -84,6 +87,17 @@ def test_formats_write_error_correctly(self): self.assertEqual(write_error["idx"], 1) self.assertEqual(write_error["op"], {"insert": 0, "document": {"_id": 1}}) + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_no_serverless + def test_raw_bson_not_inflated(self): + doc = RawBSONDocument(encode({"a": "b" * 100})) + models = [ + InsertOne(namespace="db.coll", document=doc), + ] + self.client.bulk_write(models=models) + + self.assertIsNone(doc._RawBSONDocument__inflated_doc) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(IntegrationTest): From 35b2fbbd020f91f180eaf7ca5335c5bd1fb6d1bf Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 15 Nov 2024 08:57:34 -0800 Subject: [PATCH 1618/2111] PYTHON-4977 Fix import time on Windows again (#2003) --- pymongo/pool_options.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index f3ed6cd2c1..038dbb3b5d 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -70,13 +70,14 @@ "version": platform.mac_ver()[0], } elif sys.platform == "win32": + _ver = sys.getwindowsversion() _METADATA["os"] = { - "type": platform.system(), - # "Windows XP", "Windows 7", "Windows 10", etc. - "name": " ".join((platform.system(), platform.release())), - "architecture": platform.machine(), - # Windows patch level (e.g. 5.1.2600-SP3) - "version": "-".join(platform.win32_ver()[1:3]), + "type": "Windows", + "name": "Windows", + # Avoid using platform calls, see PYTHON-4455. + "architecture": os.environ.get("PROCESSOR_ARCHITECTURE") or platform.machine(), + # Windows patch level (e.g. 10.0.17763-SP0). + "version": ".".join(map(str, _ver[:3])) + f"-SP{_ver[-1] or '0'}", } elif sys.platform.startswith("java"): _name, _ver, _arch = platform.java_ver()[-1] From d2c1e18cc26672004299e75c9a23475cabdb6834 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 05:41:04 -0600 Subject: [PATCH 1619/2111] Bump pyright from 1.1.388 to 1.1.389 (#2007) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index ad799ea368..613eba7645 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.13.0 -pyright==1.1.388 +pyright==1.1.389 typing_extensions -r ./encryption.txt -r ./ocsp.txt From 18940030f17a8a887c653ef3bbf5aa71e52c86aa Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Nov 2024 10:25:20 -0800 Subject: [PATCH 1620/2111] PYTHON-4921 Eliminate unnecessary killCursors command when batchSize == limit (#2004) --- pymongo/message.py | 4 + .../client-bulkWrite-replaceOne-sort.json | 3 +- .../client-bulkWrite-updateOne-sort.json | 3 +- test/crud/unified/distinct-hint.json | 139 +++++++++++++++ test/crud/unified/estimatedDocumentCount.json | 2 +- test/crud/unified/find.json | 62 +++++++ test/crud/unified/findOne.json | 158 ++++++++++++++++++ test/utils.py | 29 ---- 8 files changed, 368 insertions(+), 32 deletions(-) create mode 100644 test/crud/unified/distinct-hint.json create mode 100644 test/crud/unified/findOne.json diff --git a/pymongo/message.py b/pymongo/message.py index 3e2ae00ae7..b6c00f06cb 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -252,6 +252,10 @@ def _gen_find_command( if limit < 0: cmd["singleBatch"] = True if batch_size: + # When limit and batchSize are equal we increase batchSize by 1 to + # avoid an unnecessary killCursors. + if limit == batch_size: + batch_size += 1 cmd["batchSize"] = batch_size if read_concern.level and not (session and session.in_transaction): cmd["readConcern"] = read_concern.document diff --git a/test/crud/unified/client-bulkWrite-replaceOne-sort.json b/test/crud/unified/client-bulkWrite-replaceOne-sort.json index 53218c1f48..b86bc5f942 100644 --- a/test/crud/unified/client-bulkWrite-replaceOne-sort.json +++ b/test/crud/unified/client-bulkWrite-replaceOne-sort.json @@ -3,7 +3,8 @@ "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/client-bulkWrite-updateOne-sort.json b/test/crud/unified/client-bulkWrite-updateOne-sort.json index 4a07b8b97c..ef75dcb374 100644 --- a/test/crud/unified/client-bulkWrite-updateOne-sort.json +++ b/test/crud/unified/client-bulkWrite-updateOne-sort.json @@ -3,7 +3,8 @@ "schemaVersion": "1.4", "runOnRequirements": [ { - "minServerVersion": "8.0" + "minServerVersion": "8.0", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/crud/unified/distinct-hint.json b/test/crud/unified/distinct-hint.json new file mode 100644 index 0000000000..2a6869cbe0 --- /dev/null +++ b/test/crud/unified/distinct-hint.json @@ -0,0 +1,139 @@ +{ + "description": "distinct-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "7.1.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-hint-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-hint-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with hint string", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": "_id_" + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with hint document", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json index 1b650c1cb6..3577d9006b 100644 --- a/test/crud/unified/estimatedDocumentCount.json +++ b/test/crud/unified/estimatedDocumentCount.json @@ -249,7 +249,7 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "isError": true + "isClientError": true } } ], diff --git a/test/crud/unified/find.json b/test/crud/unified/find.json index 6bf1e4e445..325cd96c21 100644 --- a/test/crud/unified/find.json +++ b/test/crud/unified/find.json @@ -237,6 +237,68 @@ ] } ] + }, + { + "description": "Find with batchSize equal to limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 4 + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "limit": 4, + "batchSize": 5 + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/findOne.json b/test/crud/unified/findOne.json new file mode 100644 index 0000000000..826c0f5dfd --- /dev/null +++ b/test/crud/unified/findOne.json @@ -0,0 +1,158 @@ +{ + "description": "findOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "find-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "find-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "FindOne with filter", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "batchSize": { + "$$exists": false + }, + "limit": 1, + "singleBatch": true + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne with filter, sort, and skip", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2 + }, + "expectResult": { + "_id": 5, + "x": 55 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2, + "batchSize": { + "$$exists": false + }, + "limit": 1, + "singleBatch": true + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/utils.py b/test/utils.py index 766f209de2..9b326e5d73 100644 --- a/test/utils.py +++ b/test/utils.py @@ -925,35 +925,6 @@ def parse_spec_options(opts): if "maxCommitTimeMS" in opts: opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - if "hint" in opts: - hint = opts.pop("hint") - if not isinstance(hint, str): - hint = list(hint.items()) - opts["hint"] = hint - - # Properly format 'hint' arguments for the Bulk API tests. - if "requests" in opts: - reqs = opts.pop("requests") - for req in reqs: - if "name" in req: - # CRUD v2 format - args = req.pop("arguments", {}) - if "hint" in args: - hint = args.pop("hint") - if not isinstance(hint, str): - hint = list(hint.items()) - args["hint"] = hint - req["arguments"] = args - else: - # Unified test format - bulk_model, spec = next(iter(req.items())) - if "hint" in spec: - hint = spec.pop("hint") - if not isinstance(hint, str): - hint = list(hint.items()) - spec["hint"] = hint - opts["requests"] = reqs - return dict(opts) From c9d9d7c2dc6e8077ea048261e46e6a8264da4ef0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Nov 2024 10:25:46 -0800 Subject: [PATCH 1621/2111] PYTHON-4907 Avoid noisy TypeError at interpreter exit (#2005) --- pymongo/asynchronous/mongo_client.py | 3 ++- pymongo/synchronous/mongo_client.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index e4fdf25c28..3e4dc482d7 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -1195,7 +1195,8 @@ def __del__(self) -> None: ResourceWarning, stacklevel=2, ) - except AttributeError: + except (AttributeError, TypeError): + # Ignore errors at interpreter exit. pass def _close_cursor_soon( diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 0380d4468b..00c6203a94 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1193,7 +1193,8 @@ def __del__(self) -> None: ResourceWarning, stacklevel=2, ) - except AttributeError: + except (AttributeError, TypeError): + # Ignore errors at interpreter exit. pass def _close_cursor_soon( From 1dd42173e1101e1d1f8c41758337d617bb737d41 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Nov 2024 10:26:22 -0800 Subject: [PATCH 1622/2111] PYTHON-4919 Resync tests for retryable writes (#2006) --- test/asynchronous/test_retryable_writes.py | 44 +---- .../unified/aggregate-out-merge.json | 144 ++++++++++++++++ test/retryable_writes/unified/bulkWrite.json | 154 +++++++++++++++++- .../client-bulkWrite-serverErrors.json | 15 +- test/retryable_writes/unified/deleteMany.json | 22 ++- test/retryable_writes/unified/deleteOne.json | 32 +++- .../unified/findOneAndDelete.json | 32 +++- .../unified/findOneAndReplace.json | 32 +++- .../unified/findOneAndUpdate.json | 32 +++- test/retryable_writes/unified/insertMany.json | 59 ++++++- test/retryable_writes/unified/insertOne.json | 32 +++- test/retryable_writes/unified/replaceOne.json | 32 +++- .../unified/unacknowledged-write-concern.json | 77 +++++++++ test/retryable_writes/unified/updateMany.json | 22 ++- test/retryable_writes/unified/updateOne.json | 32 +++- test/test_retryable_writes.py | 44 +---- 16 files changed, 705 insertions(+), 100 deletions(-) create mode 100644 test/retryable_writes/unified/aggregate-out-merge.json create mode 100644 test/retryable_writes/unified/unacknowledged-write-concern.json diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py index accbbd003f..ca2f0a5422 100644 --- a/test/asynchronous/test_retryable_writes.py +++ b/test/asynchronous/test_retryable_writes.py @@ -1,4 +1,4 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2017-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,7 +43,6 @@ from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument from bson.son import SON -from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.errors import ( AutoReconnect, ConnectionFailure, @@ -226,47 +225,6 @@ async def test_supported_single_statement_no_retry(self): f"{msg} sent txnNumber with {event.command_name}", ) - @async_client_context.require_no_standalone - async def test_supported_single_statement_supported_cluster(self): - for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" - self.listener.reset() - await method(*args, **kwargs) - commands_started = self.listener.started_events - self.assertEqual(len(self.listener.succeeded_events), 1, msg) - first_attempt = commands_started[0] - self.assertIn( - "lsid", - first_attempt.command, - f"{msg} sent no lsid with {first_attempt.command_name}", - ) - initial_session_id = first_attempt.command["lsid"] - self.assertIn( - "txnNumber", - first_attempt.command, - f"{msg} sent no txnNumber with {first_attempt.command_name}", - ) - - # There should be no retry when the failpoint is not active. - if async_client_context.is_mongos or not async_client_context.test_commands_enabled: - self.assertEqual(len(commands_started), 1) - continue - - initial_transaction_id = first_attempt.command["txnNumber"] - retry_attempt = commands_started[1] - self.assertIn( - "lsid", - retry_attempt.command, - f"{msg} sent no lsid with {first_attempt.command_name}", - ) - self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) - self.assertIn( - "txnNumber", - retry_attempt.command, - f"{msg} sent no txnNumber with {first_attempt.command_name}", - ) - self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) - async def test_supported_single_statement_unsupported_cluster(self): if async_client_context.is_rs or async_client_context.is_mongos: raise SkipTest("This cluster supports retryable writes") diff --git a/test/retryable_writes/unified/aggregate-out-merge.json b/test/retryable_writes/unified/aggregate-out-merge.json new file mode 100644 index 0000000000..c46bf8c31f --- /dev/null +++ b/test/retryable_writes/unified/aggregate-out-merge.json @@ -0,0 +1,144 @@ +{ + "description": "aggregate with $out/$merge does not set txnNumber", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "mergeCollection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "aggregate with $out does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "outCollection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $merge does not set txnNumber", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "mergeCollection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/bulkWrite.json b/test/retryable_writes/unified/bulkWrite.json index 691321746b..f2bd9e0eb8 100644 --- a/test/retryable_writes/unified/bulkWrite.json +++ b/test/retryable_writes/unified/bulkWrite.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -121,6 +124,53 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { @@ -510,6 +560,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { @@ -926,6 +1003,81 @@ ] } ] + }, + { + "description": "collection bulkWrite with updateMany does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "collection bulkWrite with deleteMany does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": {} + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] } ] } diff --git a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json index f58c82bcc7..a1f7c8152a 100644 --- a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json +++ b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json @@ -428,7 +428,10 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "txnNumber": { + "$$exists": false + } } } } @@ -779,7 +782,10 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "txnNumber": { + "$$exists": false + } } } } @@ -861,7 +867,10 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "txnNumber": { + "$$exists": false + } } } } diff --git a/test/retryable_writes/unified/deleteMany.json b/test/retryable_writes/unified/deleteMany.json index 087576cc0f..381f377954 100644 --- a/test/retryable_writes/unified/deleteMany.json +++ b/test/retryable_writes/unified/deleteMany.json @@ -15,7 +15,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": true + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -70,6 +73,23 @@ "databaseName": "retryable-writes-tests", "documents": [] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } ] } ] diff --git a/test/retryable_writes/unified/deleteOne.json b/test/retryable_writes/unified/deleteOne.json index c3aaf88655..9e37ff8bcf 100644 --- a/test/retryable_writes/unified/deleteOne.json +++ b/test/retryable_writes/unified/deleteOne.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -88,6 +91,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/findOneAndDelete.json b/test/retryable_writes/unified/findOneAndDelete.json index 89dbb9d655..ebfb8ce665 100644 --- a/test/retryable_writes/unified/findOneAndDelete.json +++ b/test/retryable_writes/unified/findOneAndDelete.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -94,6 +97,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/findOneAndReplace.json b/test/retryable_writes/unified/findOneAndReplace.json index 6d1cc17974..638d15a41d 100644 --- a/test/retryable_writes/unified/findOneAndReplace.json +++ b/test/retryable_writes/unified/findOneAndReplace.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -98,6 +101,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/findOneAndUpdate.json b/test/retryable_writes/unified/findOneAndUpdate.json index eb88fbe9b3..eefe98ae11 100644 --- a/test/retryable_writes/unified/findOneAndUpdate.json +++ b/test/retryable_writes/unified/findOneAndUpdate.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -99,6 +102,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/insertMany.json b/test/retryable_writes/unified/insertMany.json index 47181d0a9e..35a18c46c6 100644 --- a/test/retryable_writes/unified/insertMany.json +++ b/test/retryable_writes/unified/insertMany.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -107,6 +110,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { @@ -172,6 +202,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/insertOne.json b/test/retryable_writes/unified/insertOne.json index 61957415ed..a6afdbf224 100644 --- a/test/retryable_writes/unified/insertOne.json +++ b/test/retryable_writes/unified/insertOne.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -101,6 +104,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/replaceOne.json b/test/retryable_writes/unified/replaceOne.json index e58625bb5e..ee6e37d3bb 100644 --- a/test/retryable_writes/unified/replaceOne.json +++ b/test/retryable_writes/unified/replaceOne.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -98,6 +101,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/retryable_writes/unified/unacknowledged-write-concern.json b/test/retryable_writes/unified/unacknowledged-write-concern.json new file mode 100644 index 0000000000..eaa114acfd --- /dev/null +++ b/test/retryable_writes/unified/unacknowledged-write-concern.json @@ -0,0 +1,77 @@ +{ + "description": "unacknowledged write does not set txnNumber", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "tests": [ + { + "description": "unacknowledged write does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateMany.json b/test/retryable_writes/unified/updateMany.json index 260b7ad1c6..12c5204ee9 100644 --- a/test/retryable_writes/unified/updateMany.json +++ b/test/retryable_writes/unified/updateMany.json @@ -15,7 +15,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": true + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -86,6 +89,23 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } ] } ] diff --git a/test/retryable_writes/unified/updateOne.json b/test/retryable_writes/unified/updateOne.json index 7947cef3c0..99ffba8e21 100644 --- a/test/retryable_writes/unified/updateOne.json +++ b/test/retryable_writes/unified/updateOne.json @@ -13,7 +13,10 @@ { "client": { "id": "client0", - "useMultipleMongoses": false + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -99,6 +102,33 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } ] }, { diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 5df6c41f7a..74f3c23e51 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -1,4 +1,4 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2017-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,7 +65,6 @@ UpdateMany, UpdateOne, ) -from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern _IS_SYNC = True @@ -226,47 +225,6 @@ def test_supported_single_statement_no_retry(self): f"{msg} sent txnNumber with {event.command_name}", ) - @client_context.require_no_standalone - def test_supported_single_statement_supported_cluster(self): - for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" - self.listener.reset() - method(*args, **kwargs) - commands_started = self.listener.started_events - self.assertEqual(len(self.listener.succeeded_events), 1, msg) - first_attempt = commands_started[0] - self.assertIn( - "lsid", - first_attempt.command, - f"{msg} sent no lsid with {first_attempt.command_name}", - ) - initial_session_id = first_attempt.command["lsid"] - self.assertIn( - "txnNumber", - first_attempt.command, - f"{msg} sent no txnNumber with {first_attempt.command_name}", - ) - - # There should be no retry when the failpoint is not active. - if client_context.is_mongos or not client_context.test_commands_enabled: - self.assertEqual(len(commands_started), 1) - continue - - initial_transaction_id = first_attempt.command["txnNumber"] - retry_attempt = commands_started[1] - self.assertIn( - "lsid", - retry_attempt.command, - f"{msg} sent no lsid with {first_attempt.command_name}", - ) - self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) - self.assertIn( - "txnNumber", - retry_attempt.command, - f"{msg} sent no txnNumber with {first_attempt.command_name}", - ) - self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) - def test_supported_single_statement_unsupported_cluster(self): if client_context.is_rs or client_context.is_mongos: raise SkipTest("This cluster supports retryable writes") From a3bdc133ca497c2e966e41a283c1b712d045f7fe Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 18 Nov 2024 12:17:33 -0800 Subject: [PATCH 1623/2111] PYTHON-4356 Unskip spec tests for agg $out (#2008) --- test/asynchronous/unified_format.py | 9 --------- test/unified_format.py | 9 --------- 2 files changed, 18 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 81feed4d4c..ea61ecbe99 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -545,15 +545,6 @@ def maybe_skip_test(self, spec): or "Cancel server check" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") - if ( - "AsyncDatabase-level aggregate with $out includes read preference for 5.0+ server" - in spec["description"] - ): - if async_client_context.version[0] == 8: - self.skipTest("waiting on PYTHON-4356") - if "Aggregate with $out includes read preference for 5.0+ server" in spec["description"]: - if async_client_context.version[0] == 8: - self.skipTest("waiting on PYTHON-4356") if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: diff --git a/test/unified_format.py b/test/unified_format.py index 395d40b2d1..1bcd750aef 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -543,15 +543,6 @@ def maybe_skip_test(self, spec): or "Cancel server check" in spec["description"] ): self.skipTest("MMAPv1 does not support retryWrites=True") - if ( - "Database-level aggregate with $out includes read preference for 5.0+ server" - in spec["description"] - ): - if client_context.version[0] == 8: - self.skipTest("waiting on PYTHON-4356") - if "Aggregate with $out includes read preference for 5.0+ server" in spec["description"]: - if client_context.version[0] == 8: - self.skipTest("waiting on PYTHON-4356") if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: From a7c1090056c12ec9c492451917177954274daa59 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Nov 2024 10:46:57 -0800 Subject: [PATCH 1624/2111] PYTHON-4414 interruptInUseConnections should cancel pending connections too (#2010) --- pymongo/asynchronous/pool.py | 10 ++++++++++ pymongo/synchronous/pool.py | 10 ++++++++++ test/test_connection_monitoring.py | 5 ----- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index a9f02d650a..ca0cebd417 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -1249,6 +1249,9 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A async with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 + # Use a temporary context so that interrupt_connections can cancel creating the socket. + tmp_context = _CancellationContext() + self.active_contexts.add(tmp_context) listeners = self.opts._event_listeners if self.enabled_for_cmap: @@ -1267,6 +1270,8 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A try: sock = await _configured_socket(self.address, self.opts) except BaseException as error: + async with self.lock: + self.active_contexts.discard(tmp_context) if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_closed( @@ -1292,6 +1297,9 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A conn = AsyncConnection(sock, self, self.address, conn_id) # type: ignore[arg-type] async with self.lock: self.active_contexts.add(conn.cancel_context) + self.active_contexts.discard(tmp_context) + if tmp_context.cancelled: + conn.cancel_context.cancel() try: if self.handshake: await conn.hello() @@ -1301,6 +1309,8 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A await conn.authenticate() except BaseException: + async with self.lock: + self.active_contexts.discard(conn.cancel_context) conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index eb007a3471..86baf15b9a 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -1243,6 +1243,9 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect with self.lock: conn_id = self.next_connection_id self.next_connection_id += 1 + # Use a temporary context so that interrupt_connections can cancel creating the socket. + tmp_context = _CancellationContext() + self.active_contexts.add(tmp_context) listeners = self.opts._event_listeners if self.enabled_for_cmap: @@ -1261,6 +1264,8 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect try: sock = _configured_socket(self.address, self.opts) except BaseException as error: + with self.lock: + self.active_contexts.discard(tmp_context) if self.enabled_for_cmap: assert listeners is not None listeners.publish_connection_closed( @@ -1286,6 +1291,9 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] with self.lock: self.active_contexts.add(conn.cancel_context) + self.active_contexts.discard(tmp_context) + if tmp_context.cancelled: + conn.cancel_context.cancel() try: if self.handshake: conn.hello() @@ -1295,6 +1303,8 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect conn.authenticate() except BaseException: + with self.lock: + self.active_contexts.discard(conn.cancel_context) conn.close_conn(ConnectionClosedReason.ERROR) raise diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index d576a1184a..05411d17ba 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -216,11 +216,6 @@ def set_fail_point(self, command_args): def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" - if ( - scenario_def["description"] - == "clear with interruptInUseConnections = true closes pending connections" - ): - self.skipTest("Skip pending PYTHON-4414") self.logs: list = [] self.assertEqual(scenario_def["version"], 1) self.assertIn(scenario_def["style"], ["unit", "integration"]) From ddf783b69a400411db2bee155052f648396c3c7f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Nov 2024 15:43:17 -0800 Subject: [PATCH 1625/2111] PYTHON-4982 Remove redundant configureFailPoint (#2012) --- test/asynchronous/test_retryable_reads.py | 5 ++--- test/test_retryable_reads.py | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py index b2d86f5d84..bde7a9f2ee 100644 --- a/test/asynchronous/test_retryable_reads.py +++ b/test/asynchronous/test_retryable_reads.py @@ -174,9 +174,8 @@ async def test_retryable_reads_in_sharded_cluster_multiple_available(self): retryReads=True, ) - async with self.fail_point(fail_command): - with self.assertRaises(AutoReconnect): - await client.t.t.find_one({}) + with self.assertRaises(AutoReconnect): + await client.t.t.find_one({}) # Disable failpoints on each mongos for client in mongos_clients: diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index d4951db5ee..9c3f6b170f 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -174,9 +174,8 @@ def test_retryable_reads_in_sharded_cluster_multiple_available(self): retryReads=True, ) - with self.fail_point(fail_command): - with self.assertRaises(AutoReconnect): - client.t.t.find_one({}) + with self.assertRaises(AutoReconnect): + client.t.t.find_one({}) # Disable failpoints on each mongos for client in mongos_clients: From b5f010404809b4a5770173837f467a307e9b084a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 19 Nov 2024 15:43:28 -0800 Subject: [PATCH 1626/2111] PYTHON-4980 Ignore network error on killAllSessions (#2011) --- test/asynchronous/unified_format.py | 4 +++- test/asynchronous/utils_spec_runner.py | 5 +++-- test/unified_format.py | 4 +++- test/utils_spec_runner.py | 5 +++-- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index ea61ecbe99..db5ed81e24 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -76,6 +76,7 @@ from pymongo.asynchronous.helpers import anext from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( + AutoReconnect, BulkWriteError, ClientBulkWriteException, ConfigurationError, @@ -755,9 +756,10 @@ async def kill_all_sessions(self): for client in clients: try: await client.admin.command("killAllSessions", []) - except OperationFailure: + except (OperationFailure, AutoReconnect): # "operation was interrupted" by killing the command's # own session. + # On 8.0+ killAllSessions sometimes returns a network error. pass async def _databaseOperation_listCollections(self, target, *args, **kwargs): diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index 4d9c4c8f20..f27f52ec2c 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -46,7 +46,7 @@ from pymongo.asynchronous import client_session from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor -from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError +from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult, _WriteResult @@ -343,9 +343,10 @@ async def kill_all_sessions(self): for client in clients: try: await client.admin.command("killAllSessions", []) - except OperationFailure: + except (OperationFailure, AutoReconnect): # "operation was interrupted" by killing the command's # own session. + # On 8.0+ killAllSessions sometimes returns a network error. pass def check_command_result(self, expected_result, result): diff --git a/test/unified_format.py b/test/unified_format.py index 1bcd750aef..3489a8ac84 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -69,6 +69,7 @@ from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( + AutoReconnect, BulkWriteError, ClientBulkWriteException, ConfigurationError, @@ -751,9 +752,10 @@ def kill_all_sessions(self): for client in clients: try: client.admin.command("killAllSessions", []) - except OperationFailure: + except (OperationFailure, AutoReconnect): # "operation was interrupted" by killing the command's # own session. + # On 8.0+ killAllSessions sometimes returns a network error. pass def _databaseOperation_listCollections(self, target, *args, **kwargs): diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 8a061de0b1..8b2679d776 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -43,7 +43,7 @@ from bson.son import SON from gridfs import GridFSBucket from gridfs.synchronous.grid_file import GridFSBucket -from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError +from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult, _WriteResult @@ -343,9 +343,10 @@ def kill_all_sessions(self): for client in clients: try: client.admin.command("killAllSessions", []) - except OperationFailure: + except (OperationFailure, AutoReconnect): # "operation was interrupted" by killing the command's # own session. + # On 8.0+ killAllSessions sometimes returns a network error. pass def check_command_result(self, expected_result, result): From 89f4e5c786d4439c92914614474306910ccc8142 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 20 Nov 2024 09:21:30 -0600 Subject: [PATCH 1627/2111] PYTHON-3730 Ensure C extensions when running the test suite (#2013) --- .evergreen/run-tests.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 36fa76e317..1e03e27147 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -47,6 +47,11 @@ else echo "Not sourcing secrets" fi +# Ensure C extensions have compiled. +if [ -z "${NO_EXT:-}" ]; then + python tools/fail_if_no_c.py +fi + if [ "$AUTH" != "noauth" ]; then if [ ! -z "$TEST_DATA_LAKE" ]; then export DB_USER="mhuser" From 906d021bb1a8b4c381ce79f943c5c57b4314ecb7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 20 Nov 2024 11:56:10 -0600 Subject: [PATCH 1628/2111] PYTHON-4447 Test OIDC on Server Latest (#2014) --- .evergreen/config.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index fc1713a88e..1e4996c288 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -922,9 +922,6 @@ task_groups: params: binary: bash include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - env: - # PYTHON-4447 - MONGODB_VERSION: "8.0" args: - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/setup.sh teardown_task: From 1c7a7fe9ec3119228bc7bf98f1f9de199a4f8f2c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 20 Nov 2024 14:47:28 -0500 Subject: [PATCH 1629/2111] PYTHON-4721 - Create individualized scripts for all shell.exec commands (#1997) Co-authored-by: Jib --- .evergreen/config.yml | 710 ++++++++---------- .evergreen/hatch.sh | 2 +- .evergreen/run-tests.sh | 8 +- .evergreen/scripts/archive-mongodb-logs.sh | 8 + .../scripts/bootstrap-mongo-orchestration.sh | 46 ++ .evergreen/scripts/check-import-time.sh | 7 + .evergreen/scripts/cleanup.sh | 7 + .evergreen/scripts/configure-env.sh | 19 +- .../scripts/download-and-merge-coverage.sh | 4 + .evergreen/scripts/fix-absolute-paths.sh | 8 + .evergreen/scripts/init-test-results.sh | 5 + .evergreen/scripts/install-dependencies.sh | 6 + .evergreen/scripts/make-files-executable.sh | 8 + .evergreen/scripts/prepare-resources.sh | 12 + .evergreen/scripts/run-atlas-tests.sh | 7 + .evergreen/scripts/run-aws-ecs-auth-test.sh | 15 + .evergreen/scripts/run-doctests.sh | 4 + .../scripts/run-enterprise-auth-tests.sh | 6 + .evergreen/scripts/run-gcpkms-fail-test.sh | 7 + .evergreen/scripts/run-getdata.sh | 22 + .evergreen/scripts/run-load-balancer.sh | 3 + .evergreen/scripts/run-mockupdb-tests.sh | 5 + .../{ => scripts}/run-mod-wsgi-tests.sh | 2 +- .../{ => scripts}/run-mongodb-aws-test.sh | 10 +- .evergreen/scripts/run-ocsp-test.sh | 8 + .evergreen/scripts/run-perf-tests.sh | 4 + .evergreen/scripts/run-tests.sh | 55 ++ .evergreen/scripts/run-with-env.sh | 21 + .evergreen/scripts/setup-encryption.sh | 5 + .evergreen/scripts/setup-tests.sh | 27 + .evergreen/scripts/stop-load-balancer.sh | 5 + .evergreen/scripts/teardown-aws.sh | 7 + .evergreen/scripts/teardown-docker.sh | 7 + .evergreen/scripts/upload-coverage-report.sh | 3 + .evergreen/scripts/windows-fix.sh | 11 + .evergreen/setup-encryption.sh | 7 +- .evergreen/utils.sh | 4 +- test/asynchronous/test_client_context.py | 8 +- test/mod_wsgi_test/README.rst | 2 +- test/test_client_context.py | 8 +- 40 files changed, 683 insertions(+), 430 deletions(-) create mode 100644 .evergreen/scripts/archive-mongodb-logs.sh create mode 100644 .evergreen/scripts/bootstrap-mongo-orchestration.sh create mode 100644 .evergreen/scripts/check-import-time.sh create mode 100644 .evergreen/scripts/cleanup.sh create mode 100644 .evergreen/scripts/download-and-merge-coverage.sh create mode 100644 .evergreen/scripts/fix-absolute-paths.sh create mode 100644 .evergreen/scripts/init-test-results.sh create mode 100644 .evergreen/scripts/install-dependencies.sh create mode 100644 .evergreen/scripts/make-files-executable.sh create mode 100644 .evergreen/scripts/prepare-resources.sh create mode 100644 .evergreen/scripts/run-atlas-tests.sh create mode 100644 .evergreen/scripts/run-aws-ecs-auth-test.sh create mode 100644 .evergreen/scripts/run-doctests.sh create mode 100644 .evergreen/scripts/run-enterprise-auth-tests.sh create mode 100644 .evergreen/scripts/run-gcpkms-fail-test.sh create mode 100644 .evergreen/scripts/run-getdata.sh create mode 100644 .evergreen/scripts/run-load-balancer.sh create mode 100644 .evergreen/scripts/run-mockupdb-tests.sh rename .evergreen/{ => scripts}/run-mod-wsgi-tests.sh (97%) rename .evergreen/{ => scripts}/run-mongodb-aws-test.sh (67%) create mode 100644 .evergreen/scripts/run-ocsp-test.sh create mode 100644 .evergreen/scripts/run-perf-tests.sh create mode 100644 .evergreen/scripts/run-tests.sh create mode 100644 .evergreen/scripts/run-with-env.sh create mode 100644 .evergreen/scripts/setup-encryption.sh create mode 100644 .evergreen/scripts/setup-tests.sh create mode 100644 .evergreen/scripts/stop-load-balancer.sh create mode 100644 .evergreen/scripts/teardown-aws.sh create mode 100644 .evergreen/scripts/teardown-docker.sh create mode 100644 .evergreen/scripts/upload-coverage-report.sh create mode 100644 .evergreen/scripts/windows-fix.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1e4996c288..59b8a543fd 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -20,10 +20,9 @@ exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: - - command: shell.exec + - command: subprocess.exec params: - script: | - ls -la + binary: ls -la include: - filename: .evergreen/generated_configs/tasks.yml @@ -41,7 +40,7 @@ functions: # Make an evergreen expansion file with dynamic values - command: subprocess.exec params: - include_expansions_in_env: ["is_patch", "project", "version_id"] + include_expansions_in_env: ["is_patch", "project", "version_id", "AUTH", "SSL", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "SETDEFAULTENCODING", "test_loadbalancer", "test_serverless", "SKIP_CSOT_TESTS", "MONGODB_STARTED", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", "COVERAGE", "COMPRESSORS", "TEST_SUITES", "MONGODB_API_VERSION", "SKIP_HATCH", "skip_crypt_shared", "VERSION", "TOPOLOGY", "STORAGE_ENGINE", "ORCHESTRATION_FILE", "REQUIRE_API_VERSION", "LOAD_BALANCER", "skip_web_identity_auth_test", "skip_ECS_auth_test"] binary: bash working_dir: "src" args: @@ -52,19 +51,11 @@ functions: file: src/expansion.yml "prepare resources": - - command: shell.exec - params: - script: | - . src/.evergreen/scripts/env.sh - set -o xtrace - rm -rf $DRIVERS_TOOLS - if [ "$PROJECT" = "drivers-tools" ]; then - # If this was a patch build, doing a fresh clone would not actually test the patch - cp -R ${PROJECT_DIRECTORY}/ ${DRIVERS_TOOLS} - else - git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git ${DRIVERS_TOOLS} - fi - echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config + - command: subprocess.exec + params: + binary: bash + args: + - src/.evergreen/scripts/prepare-resources.sh "upload coverage" : - command: ec2.assume_role @@ -88,14 +79,17 @@ functions: - command: ec2.assume_role params: role_arn: ${assume_role_arn} - - command: shell.exec + - command: subprocess.exec params: silent: true + binary: bash working_dir: "src" include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - script: | - # Download all the task coverage files. - aws s3 cp --recursive s3://${bucket_name}/coverage/${revision}/${version_id}/coverage/ coverage/ + args: + - .evergreen/scripts/download-and-merge-coverage.sh + - ${bucket_name} + - ${revision} + - ${version_id} - command: subprocess.exec params: working_dir: "src" @@ -103,13 +97,17 @@ functions: args: - .evergreen/combine-coverage.sh # Upload the resulting html coverage report. - - command: shell.exec + - command: subprocess.exec params: silent: true + binary: bash working_dir: "src" include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - script: | - aws s3 cp htmlcov/ s3://${bucket_name}/coverage/${revision}/${version_id}/htmlcov/ --recursive --acl public-read --region us-east-1 + args: + - .evergreen/scripts/upload-coverage-report.sh + - ${bucket_name} + - ${revision} + - ${version_id} # Attach the index.html with s3.put so it shows up in the Evergreen UI. - command: s3.put params: @@ -128,15 +126,6 @@ functions: - command: ec2.assume_role params: role_arn: ${assume_role_arn} - - command: shell.exec - params: - script: | - . src/.evergreen/scripts/env.sh - set -o xtrace - mkdir out_dir - find $MONGO_ORCHESTRATION_HOME -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; - tar zcvf mongodb-logs.tar.gz -C out_dir/ . - rm -rf out_dir - command: archive.targz_pack params: target: "mongo-coredumps.tgz" @@ -161,23 +150,12 @@ functions: aws_key: ${AWS_ACCESS_KEY_ID} aws_secret: ${AWS_SECRET_ACCESS_KEY} aws_session_token: ${AWS_SESSION_TOKEN} - local_file: mongodb-logs.tar.gz - remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-mongodb-logs.tar.gz + local_file: ${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz bucket: ${bucket_name} permissions: public-read content_type: ${content_type|application/x-gzip} - display_name: "mongodb-logs.tar.gz" - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: drivers-tools/.evergreen/orchestration/server.log - remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-orchestration.log - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|text/plain} - display_name: "orchestration.log" + display_name: "drivers-tools-logs.tar.gz" "upload working dir": - command: ec2.assume_role @@ -230,54 +208,13 @@ functions: file: "src/xunit-results/TEST-*.xml" "bootstrap mongo-orchestration": - - command: shell.exec - params: - script: | - . src/.evergreen/scripts/env.sh - set -o xtrace - - # Enable core dumps if enabled on the machine - # Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml - if [ -f /proc/self/coredump_filter ]; then - # Set the shell process (and its children processes) to dump ELF headers (bit 4), - # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). - echo 0x13 > /proc/self/coredump_filter - - if [ -f /sbin/sysctl ]; then - # Check that the core pattern is set explicitly on our distro image instead - # of being the OS's default value. This ensures that coredump names are consistent - # across distros and can be picked up by Evergreen. - core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") - if [ "$core_pattern" = "dump_%e.%p.core" ]; then - echo "Enabling coredumps" - ulimit -c unlimited - fi - fi - fi - - if [ $(uname -s) = "Darwin" ]; then - core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") - if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then - echo "Enabling coredumps" - ulimit -c unlimited - fi - fi - - if [ -n "${skip_crypt_shared}" ]; then - export SKIP_CRYPT_SHARED=1 - fi - - MONGODB_VERSION=${VERSION} \ - TOPOLOGY=${TOPOLOGY} \ - AUTH=${AUTH} \ - SSL=${SSL} \ - STORAGE_ENGINE=${STORAGE_ENGINE} \ - DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ - ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ - REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ - LOAD_BALANCER=${LOAD_BALANCER} \ - bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh - # run-orchestration generates expansion file with the MONGODB_URI for the cluster + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: ["VERSION", "TOPOLOGY", "AUTH", "SSL", "ORCHESTRATION_FILE", "LOAD_BALANCER"] + args: + - src/.evergreen/scripts/run-with-env.sh + - src/.evergreen/scripts/bootstrap-mongo-orchestration.sh - command: expansions.update params: file: mo-expansion.yml @@ -288,167 +225,107 @@ functions: value: "1" "bootstrap data lake": - - command: shell.exec + - command: subprocess.exec type: setup params: - script: | - . src/.evergreen/scripts/env.sh - bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh - - command: shell.exec + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh + - command: subprocess.exec type: setup params: - script: | - . src/.evergreen/scripts/env.sh - bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh - sleep 1 - docker ps + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh "stop mongo-orchestration": - - command: shell.exec + - command: subprocess.exec params: - script: | - . src/.evergreen/scripts/env.sh - set -o xtrace - bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh "run mod_wsgi tests": - - command: shell.exec + - command: subprocess.exec type: test params: + include_expansions_in_env: [MOD_WSGI_VERSION, MOD_WSGI_EMBEDDED, "PYTHON_BINARY"] working_dir: "src" - script: | - . .evergreen/scripts/env.sh - set -o xtrace - PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} \ - MOD_WSGI_EMBEDDED=${MOD_WSGI_EMBEDDED} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} \ - bash ${PROJECT_DIRECTORY}/.evergreen/run-mod-wsgi-tests.sh + binary: bash + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mod-wsgi-tests.sh "run mockupdb tests": - - command: shell.exec + - command: subprocess.exec type: test params: + include_expansions_in_env: ["PYTHON_BINARY"] working_dir: "src" - script: | - . .evergreen/scripts/env.sh - set -o xtrace - export PYTHON_BINARY=${PYTHON_BINARY} - bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-mockupdb + binary: bash + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mockupdb-tests.sh "run doctests": - - command: shell.exec + - command: subprocess.exec type: test params: + include_expansions_in_env: [ "PYTHON_BINARY" ] working_dir: "src" - script: | - . .evergreen/scripts/env.sh - set -o xtrace - PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh doctest:test + binary: bash + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-doctests.sh "run tests": - - command: shell.exec + - command: subprocess.exec params: + include_expansions_in_env: ["TEST_DATA_LAKE", "AUTH", "SSL", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE"] + binary: bash working_dir: "src" - shell: bash - background: true - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - script: | - . .evergreen/scripts/env.sh - if [ -n "${test_encryption}" ]; then - ./.evergreen/hatch.sh encryption:setup - fi - - command: shell.exec + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec + params: + working_dir: "src" + binary: bash + background: true + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/setup-encryption.sh + - command: subprocess.exec type: test params: working_dir: "src" - shell: bash - script: | - # Disable xtrace - set +x - . .evergreen/scripts/env.sh - if [ -n "${MONGODB_STARTED}" ]; then - export PYMONGO_MUST_CONNECT=true - fi - if [ -n "${DISABLE_TEST_COMMANDS}" ]; then - export PYMONGO_DISABLE_TEST_COMMANDS=1 - fi - if [ -n "${test_encryption}" ]; then - # Disable xtrace (just in case it was accidentally set). - set +x - bash ${DRIVERS_TOOLS}/.evergreen/csfle/await-servers.sh - export TEST_ENCRYPTION=1 - if [ -n "${test_encryption_pyopenssl}" ]; then - export TEST_ENCRYPTION_PYOPENSSL=1 - fi - fi - if [ -n "${test_crypt_shared}" ]; then - export TEST_CRYPT_SHARED=1 - export CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} - fi - if [ -n "${test_pyopenssl}" ]; then - export TEST_PYOPENSSL=1 - fi - if [ -n "${SETDEFAULTENCODING}" ]; then - export SETDEFAULTENCODING="${SETDEFAULTENCODING}" - fi - if [ -n "${test_loadbalancer}" ]; then - export TEST_LOADBALANCER=1 - export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" - export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" - fi - if [ -n "${test_serverless}" ]; then - export TEST_SERVERLESS=1 - fi - if [ -n "${TEST_INDEX_MANAGEMENT}" ]; then - export TEST_INDEX_MANAGEMENT=1 - fi - if [ -n "${SKIP_CSOT_TESTS}" ]; then - export SKIP_CSOT_TESTS=1 - fi - - GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ - PYTHON_BINARY=${PYTHON_BINARY} \ - NO_EXT=${NO_EXT} \ - COVERAGE=${COVERAGE} \ - COMPRESSORS=${COMPRESSORS} \ - AUTH=${AUTH} \ - SSL=${SSL} \ - TEST_DATA_LAKE=${TEST_DATA_LAKE} \ - TEST_SUITES=${TEST_SUITES} \ - MONGODB_API_VERSION=${MONGODB_API_VERSION} \ - SKIP_HATCH=${SKIP_HATCH} \ - bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg + binary: bash + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "PYTHON_BINARY", "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "SINGLE_MONGOS_LB_URI", "MULTI_MONGOS_LB_URI", "TEST_SUITES"] + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-tests.sh "run enterprise auth tests": - - command: shell.exec + - command: subprocess.exec type: test params: + binary: bash working_dir: "src" - include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - script: | - # Disable xtrace for security reasons (just in case it was accidentally set). - set +x - bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/enterprise_auth - PROJECT_DIRECTORY="${PROJECT_DIRECTORY}" \ - PYTHON_BINARY="${PYTHON_BINARY}" \ - TEST_ENTERPRISE_AUTH=1 \ - AUTH=auth \ - bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "PYTHON_BINARY"] + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-enterprise-auth-tests.sh "run atlas tests": - - command: shell.exec + - command: subprocess.exec type: test params: - include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + binary: bash + include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "PYTHON_BINARY"] working_dir: "src" - script: | - # Disable xtrace for security reasons (just in case it was accidentally set). - set +x - set -o errexit - bash ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect - PROJECT_DIRECTORY="${PROJECT_DIRECTORY}" \ - PYTHON_BINARY="${PYTHON_BINARY}" \ - TEST_ATLAS=1 \ - bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-atlas-tests.sh "get aws auth secrets": - command: subprocess.exec @@ -460,57 +337,140 @@ functions: - ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup-secrets.sh "run aws auth test with regular aws credentials": - - command: shell.exec + - command: subprocess.exec + params: + include_expansions_in_env: ["TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE"] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec type: test params: - shell: "bash" + include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] + binary: bash working_dir: "src" - script: | - . .evergreen/scripts/env.sh - .evergreen/run-mongodb-aws-test.sh regular + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - regular "run aws auth test with assume role credentials": - - command: shell.exec + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec type: test params: - shell: "bash" + include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] + binary: bash working_dir: "src" - script: | - . .evergreen/scripts/env.sh - .evergreen/run-mongodb-aws-test.sh assume-role + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - assume-role "run aws auth test with aws EC2 credentials": - - command: shell.exec + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec type: test params: + include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] + binary: bash working_dir: "src" - shell: "bash" - script: | - if [ "${skip_EC2_auth_test}" = "true" ]; then - echo "This platform does not support the EC2 auth test, skipping..." - exit 0 - fi - . .evergreen/scripts/env.sh - .evergreen/run-mongodb-aws-test.sh ec2 + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - ec2 "run aws auth test with aws web identity credentials": - - command: shell.exec + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - # Test with and without AWS_ROLE_SESSION_NAME set. + - command: subprocess.exec + type: test + params: + include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - web-identity + - command: subprocess.exec + type: test + params: + include_expansions_in_env: [ "DRIVERS_TOOLS", "skip_EC2_auth_test" ] + binary: bash + working_dir: "src" + env: + AWS_ROLE_SESSION_NAME: test + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - web-identity + + "run aws auth test with aws credentials as environment variables": + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec type: test params: + include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] + binary: bash working_dir: "src" - shell: "bash" - script: | - if [ "${skip_EC2_auth_test}" = "true" ]; then - echo "This platform does not support the web identity auth test, skipping..." - exit 0 - fi - . .evergreen/scripts/env.sh - # Test with and without AWS_ROLE_SESSION_NAME set. - .evergreen/run-mongodb-aws-test.sh web-identity - AWS_ROLE_SESSION_NAME="test" \ - .evergreen/run-mongodb-aws-test.sh web-identity + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - env-creds + + "run aws auth test with aws credentials and session token as environment variables": + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec + type: test + params: + include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-mongodb-aws-test.sh + - session-creds "run oidc auth test with test credentials": + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -532,110 +492,69 @@ functions: args: - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh - "run aws auth test with aws credentials as environment variables": - - command: shell.exec - type: test - params: - working_dir: "src" - shell: bash - script: | - . .evergreen/scripts/env.sh - .evergreen/run-mongodb-aws-test.sh env-creds - - "run aws auth test with aws credentials and session token as environment variables": - - command: shell.exec + "run aws ECS auth test": + - command: subprocess.exec type: test params: + binary: bash working_dir: "src" - shell: bash - script: | - . .evergreen/scripts/env.sh - .evergreen/run-mongodb-aws-test.sh session-creds + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-aws-ecs-auth-test.sh - "run aws ECS auth test": - - command: shell.exec - type: test + "cleanup": + - command: subprocess.exec params: - shell: "bash" + binary: bash working_dir: "src" - script: | - if [ "${skip_ECS_auth_test}" = "true" ]; then - echo "This platform does not support the ECS auth test, skipping..." - exit 0 - fi - . .evergreen/scripts/env.sh - set -ex - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate-authawsvenv.sh - . aws_setup.sh ecs - export MONGODB_BINARIES="$MONGODB_BINARIES"; - export PROJECT_DIRECTORY="${PROJECT_DIRECTORY}"; - python aws_tester.py ecs - cd - + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/cleanup.sh - "cleanup": - - command: shell.exec + "teardown": + - command: subprocess.exec params: + binary: bash working_dir: "src" - script: | - . .evergreen/scripts/env.sh - if [ -f $DRIVERS_TOOLS/.evergreen/csfle/secrets-export.sh ]; then - . .evergreen/hatch.sh encryption:teardown - fi - rm -rf ${DRIVERS_TOOLS} || true - rm -f ./secrets-export.sh || true + args: + - ${DRIVERS_TOOLS}/.evergreen/teardown.sh "fix absolute paths": - - command: shell.exec + - command: subprocess.exec params: - script: | - set +x - . src/.evergreen/scripts/env.sh - for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do - perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename - done + binary: bash + args: + - src/.evergreen/scripts/fix-absolute-paths.sh "windows fix": - - command: shell.exec - params: - script: | - set +x - . src/.evergreen/scripts/env.sh - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - cat $i | tr -d '\r' > $i.new - mv $i.new $i - done - # Copy client certificate because symlinks do not work on Windows. - cp ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem $MONGO_ORCHESTRATION_HOME/lib/client.pem + - command: subprocess.exec + params: + binary: bash + args: + - src/.evergreen/scripts/windows-fix.sh "make files executable": - - command: shell.exec + - command: subprocess.exec params: - script: | - set +x - . src/.evergreen/scripts/env.sh - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - chmod +x $i - done + binary: bash + args: + - src/.evergreen/scripts/make-files-executable.sh "init test-results": - - command: shell.exec + - command: subprocess.exec params: - script: | - set +x - . src/.evergreen/scripts/env.sh - echo '{"results": [{ "status": "FAIL", "test_file": "Build", "log_raw": "No test-results.json found was created" } ]}' > ${PROJECT_DIRECTORY}/test-results.json + binary: bash + args: + - src/.evergreen/scripts/init-test-results.sh "install dependencies": - - command: shell.exec + - command: subprocess.exec params: + binary: bash working_dir: "src" - script: | - . .evergreen/scripts/env.sh - set -o xtrace - file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" - # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && bash $file || echo "$file not available, skipping" + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/install-dependencies.sh "assume ec2 role": - command: ec2.assume_role @@ -657,18 +576,22 @@ functions: file: atlas-expansion.yml "run-ocsp-test": - - command: shell.exec + - command: subprocess.exec + params: + include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] + binary: bash + working_dir: "src" + args: + - .evergreen/scripts/setup-tests.sh + - command: subprocess.exec type: test params: + include_expansions_in_env: ["OCSP_ALGORITHM", "OCSP_TLS_SHOULD_SUCCEED", "PYTHON_BINARY"] + binary: bash working_dir: "src" - script: | - . .evergreen/scripts/env.sh - TEST_OCSP=1 \ - PYTHON_BINARY=${PYTHON_BINARY} \ - CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ - OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ - bash ${PROJECT_DIRECTORY}/.evergreen/hatch.sh test:test-eg - bash ${DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-ocsp-test.sh "run-ocsp-server": - command: subprocess.exec @@ -680,42 +603,38 @@ functions: - ${DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh "run load-balancer": - - command: shell.exec + - command: subprocess.exec params: - script: | - DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh start + binary: bash + include_expansions_in_env: ["MONGODB_URI"] + args: + - src/.evergreen/scripts/run-with-env.sh + - src/.evergreen/scripts/run-load-balancer.sh - command: expansions.update params: file: lb-expansion.yml "stop load-balancer": - - command: shell.exec + - command: subprocess.exec params: - script: | - cd ${DRIVERS_TOOLS}/.evergreen - DRIVERS_TOOLS=${DRIVERS_TOOLS} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop + binary: bash + args: + - src/.evergreen/scripts/stop-load-balancer.sh "teardown_docker": - - command: shell.exec + - command: subprocess.exec params: - script: | - # Remove all Docker images - DOCKER=$(command -v docker) || true - if [ -n "$DOCKER" ]; then - docker rmi -f $(docker images -a -q) &> /dev/null || true - fi + binary: bash + args: + - src/.evergreen/scripts/teardown-docker.sh "teardown_aws": - - command: shell.exec + - command: subprocess.exec params: - shell: "bash" - script: | - . src/.evergreen/scripts/env.sh - cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" - if [ -f "./aws_e2e_setup.json" ]; then - . ./activate-authawsvenv.sh - python ./lib/aws_assign_instance_profile.py - fi + binary: bash + args: + - src/.evergreen/scripts/run-with-env.sh + - src/.evergreen/scripts/teardown-aws.sh "teardown atlas": - command: subprocess.exec @@ -725,13 +644,14 @@ functions: - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh "run perf tests": - - command: shell.exec + - command: subprocess.exec type: test params: working_dir: "src" - script: | - . .evergreen/scripts/env.sh - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh + binary: bash + args: + - .evergreen/scripts/run-with-env.sh + - .evergreen/scripts/run-perf-tests.sh "attach benchmark test results": - command: attach.results @@ -756,6 +676,7 @@ pre: post: # Disabled, causing timeouts # - func: "upload working dir" + - func: "teardown" - func: "upload coverage" - func: "upload mo artifacts" - func: "upload test results" @@ -798,13 +719,13 @@ task_groups: - func: make files executable - command: subprocess.exec params: - binary: "bash" + binary: bash args: - ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/create-and-setup-instance.sh teardown_task: - command: subprocess.exec params: - binary: "bash" + binary: bash args: - ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/delete-instance.sh - func: "upload test results" @@ -966,31 +887,12 @@ tasks: # Throw it here, and execute this task on all buildvariants - name: getdata commands: - - command: shell.exec + - command: subprocess.exec + binary: bash type: test params: - script: | - set -o xtrace - . ${DRIVERS_TOOLS}/.evergreen/download-mongodb.sh || true - get_distro || true - echo $DISTRO - echo $MARCH - echo $OS - uname -a || true - ls /etc/*release* || true - cc --version || true - gcc --version || true - clang --version || true - gcov --version || true - lcov --version || true - llvm-cov --version || true - echo $PATH - ls -la /usr/local/Cellar/llvm/*/bin/ || true - ls -la /usr/local/Cellar/ || true - scan-build --version || true - genhtml --version || true - valgrind --version || true - + args: + - src/.evergreen/scripts/run-getdata.sh # Standard test tasks {{{ - name: "mockupdb" @@ -1647,7 +1549,7 @@ tasks: type: setup params: working_dir: "src" - binary: "bash" + binary: bash include_expansions_in_env: ["DRIVERS_TOOLS"] args: - .evergreen/run-gcpkms-test.sh @@ -1660,17 +1562,14 @@ tasks: vars: VERSION: "latest" TOPOLOGY: "server" - - command: shell.exec + - command: subprocess.exec type: test params: + include_expansions_in_env: ["PYTHON_BINARY"] working_dir: "src" - shell: "bash" - script: | - . .evergreen/scripts/env.sh - export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 - export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz - SKIP_SERVERS=1 bash ./.evergreen/setup-encryption.sh - SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/hatch.sh test:test-eg + binary: "bash" + args: + - .evergreen/scripts/run-gcpkms-fail-test.sh - name: testazurekms-task commands: @@ -1736,18 +1635,15 @@ tasks: - name: "check-import-time" tags: ["pr"] commands: - - command: shell.exec + - command: subprocess.exec type: test params: - shell: "bash" + binary: bash working_dir: src - script: | - . .evergreen/scripts/env.sh - set -x - export BASE_SHA=${revision} - export HEAD_SHA=${github.amrom.workers.devmit} - bash .evergreen/run-import-time-test.sh - + args: + - .evergreen/scripts/check-import-time.sh + - ${revision} + - ${github.amrom.workers.devmit} - name: "backport-pr" allowed_requesters: ["commit"] commands: diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index 45d5113cd6..98cd9ed734 100644 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -29,7 +29,7 @@ else # Set up virtualenv before installing hatch # Ensure hatch does not write to user or global locations. touch hatch_config.toml HATCH_CONFIG=$(pwd)/hatch_config.toml - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") fi export HATCH_CONFIG diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 1e03e27147..9716c1fc79 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -53,18 +53,18 @@ if [ -z "${NO_EXT:-}" ]; then fi if [ "$AUTH" != "noauth" ]; then - if [ ! -z "$TEST_DATA_LAKE" ]; then + if [ -n "$TEST_DATA_LAKE" ]; then export DB_USER="mhuser" export DB_PASSWORD="pencil" - elif [ ! -z "$TEST_SERVERLESS" ]; then - source ${DRIVERS_TOOLS}/.evergreen/serverless/secrets-export.sh + elif [ -n "$TEST_SERVERLESS" ]; then + source "${DRIVERS_TOOLS}"/.evergreen/serverless/secrets-export.sh export DB_USER=$SERVERLESS_ATLAS_USER export DB_PASSWORD=$SERVERLESS_ATLAS_PASSWORD export MONGODB_URI="$SERVERLESS_URI" echo "MONGODB_URI=$MONGODB_URI" export SINGLE_MONGOS_LB_URI=$MONGODB_URI export MULTI_MONGOS_LB_URI=$MONGODB_URI - elif [ ! -z "$TEST_AUTH_OIDC" ]; then + elif [ -n "$TEST_AUTH_OIDC" ]; then export DB_USER=$OIDC_ADMIN_USER export DB_PASSWORD=$OIDC_ADMIN_PWD export DB_IP="$MONGODB_URI" diff --git a/.evergreen/scripts/archive-mongodb-logs.sh b/.evergreen/scripts/archive-mongodb-logs.sh new file mode 100644 index 0000000000..70a337cd11 --- /dev/null +++ b/.evergreen/scripts/archive-mongodb-logs.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -o xtrace +mkdir out_dir +# shellcheck disable=SC2156 +find "$MONGO_ORCHESTRATION_HOME" -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; +tar zcvf mongodb-logs.tar.gz -C out_dir/ . +rm -rf out_dir diff --git a/.evergreen/scripts/bootstrap-mongo-orchestration.sh b/.evergreen/scripts/bootstrap-mongo-orchestration.sh new file mode 100644 index 0000000000..1d2b145de8 --- /dev/null +++ b/.evergreen/scripts/bootstrap-mongo-orchestration.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -o xtrace + +# Enable core dumps if enabled on the machine +# Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml +if [ -f /proc/self/coredump_filter ]; then + # Set the shell process (and its children processes) to dump ELF headers (bit 4), + # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). + echo 0x13 >/proc/self/coredump_filter + + if [ -f /sbin/sysctl ]; then + # Check that the core pattern is set explicitly on our distro image instead + # of being the OS's default value. This ensures that coredump names are consistent + # across distros and can be picked up by Evergreen. + core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") + if [ "$core_pattern" = "dump_%e.%p.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi + fi +fi + +if [ "$(uname -s)" = "Darwin" ]; then + core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") + if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi +fi + +if [ -n "${skip_crypt_shared}" ]; then + export SKIP_CRYPT_SHARED=1 +fi + +MONGODB_VERSION=${VERSION} \ + TOPOLOGY=${TOPOLOGY} \ + AUTH=${AUTH:-noauth} \ + SSL=${SSL:-nossl} \ + STORAGE_ENGINE=${STORAGE_ENGINE:-} \ + DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS:-} \ + ORCHESTRATION_FILE=${ORCHESTRATION_FILE:-} \ + REQUIRE_API_VERSION=${REQUIRE_API_VERSION:-} \ + LOAD_BALANCER=${LOAD_BALANCER:-} \ + bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh +# run-orchestration generates expansion file with the MONGODB_URI for the cluster diff --git a/.evergreen/scripts/check-import-time.sh b/.evergreen/scripts/check-import-time.sh new file mode 100644 index 0000000000..cdd2025d59 --- /dev/null +++ b/.evergreen/scripts/check-import-time.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. .evergreen/scripts/env.sh +set -x +export BASE_SHA="$1" +export HEAD_SHA="$2" +bash .evergreen/run-import-time-test.sh diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh new file mode 100644 index 0000000000..9e583e4f1e --- /dev/null +++ b/.evergreen/scripts/cleanup.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +if [ -f "$DRIVERS_TOOLS"/.evergreen/csfle/secrets-export.sh ]; then + . .evergreen/hatch.sh encryption:teardown +fi +rm -rf "${DRIVERS_TOOLS}" || true +rm -f ./secrets-export.sh || true diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 98d400037c..3c0a0436de 100644 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -1,4 +1,4 @@ -#!/bin/bash -ex +#!/bin/bash -eux # Get the current unique version of this checkout # shellcheck disable=SC2154 @@ -29,7 +29,7 @@ fi export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" -cat < $SCRIPT_DIR/env.sh +cat < "$SCRIPT_DIR"/env.sh set -o errexit export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" export CURRENT_VERSION="$CURRENT_VERSION" @@ -38,6 +38,21 @@ export DRIVERS_TOOLS="$DRIVERS_TOOLS" export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" +export SETDEFAULTENCODING="${SETDEFAULTENCODING:-}" +export SKIP_CSOT_TESTS="${SKIP_CSOT_TESTS:-}" +export MONGODB_STARTED="${MONGODB_STARTED:-}" +export DISABLE_TEST_COMMANDS="${DISABLE_TEST_COMMANDS:-}" +export GREEN_FRAMEWORK="${GREEN_FRAMEWORK:-}" +export NO_EXT="${NO_EXT:-}" +export COVERAGE="${COVERAGE:-}" +export COMPRESSORS="${COMPRESSORS:-}" +export MONGODB_API_VERSION="${MONGODB_API_VERSION:-}" +export SKIP_HATCH="${SKIP_HATCH:-}" +export skip_crypt_shared="${skip_crypt_shared:-}" +export STORAGE_ENGINE="${STORAGE_ENGINE:-}" +export REQUIRE_API_VERSION="${REQUIRE_API_VERSION:-}" +export skip_web_identity_auth_test="${skip_web_identity_auth_test:-}" +export skip_ECS_auth_test="${skip_ECS_auth_test:-}" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" export PATH="$MONGODB_BINARIES:$PATH" diff --git a/.evergreen/scripts/download-and-merge-coverage.sh b/.evergreen/scripts/download-and-merge-coverage.sh new file mode 100644 index 0000000000..808bb957ef --- /dev/null +++ b/.evergreen/scripts/download-and-merge-coverage.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# Download all the task coverage files. +aws s3 cp --recursive s3://"$1"/coverage/"$2"/"$3"/coverage/ coverage/ diff --git a/.evergreen/scripts/fix-absolute-paths.sh b/.evergreen/scripts/fix-absolute-paths.sh new file mode 100644 index 0000000000..eb9433c673 --- /dev/null +++ b/.evergreen/scripts/fix-absolute-paths.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set +x +. src/.evergreen/scripts/env.sh +# shellcheck disable=SC2044 +for filename in $(find $DRIVERS_TOOLS -name \*.json); do + perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|$DRIVERS_TOOLS|g" $filename +done diff --git a/.evergreen/scripts/init-test-results.sh b/.evergreen/scripts/init-test-results.sh new file mode 100644 index 0000000000..666ac60620 --- /dev/null +++ b/.evergreen/scripts/init-test-results.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +set +x +. src/.evergreen/scripts/env.sh +echo '{"results": [{ "status": "FAIL", "test_file": "Build", "log_raw": "No test-results.json found was created" } ]}' >$PROJECT_DIRECTORY/test-results.json diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh new file mode 100644 index 0000000000..ebcc8f3069 --- /dev/null +++ b/.evergreen/scripts/install-dependencies.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +set -o xtrace +file="$PROJECT_DIRECTORY/.evergreen/install-dependencies.sh" +# Don't use ${file} syntax here because evergreen treats it as an empty expansion. +[ -f "$file" ] && bash "$file" || echo "$file not available, skipping" diff --git a/.evergreen/scripts/make-files-executable.sh b/.evergreen/scripts/make-files-executable.sh new file mode 100644 index 0000000000..806be7c599 --- /dev/null +++ b/.evergreen/scripts/make-files-executable.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set +x +. src/.evergreen/scripts/env.sh +# shellcheck disable=SC2044 +for i in $(find "$DRIVERS_TOOLS"/.evergreen "$PROJECT_DIRECTORY"/.evergreen -name \*.sh); do + chmod +x "$i" +done diff --git a/.evergreen/scripts/prepare-resources.sh b/.evergreen/scripts/prepare-resources.sh new file mode 100644 index 0000000000..33394b55ff --- /dev/null +++ b/.evergreen/scripts/prepare-resources.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +. src/.evergreen/scripts/env.sh +set -o xtrace +rm -rf $DRIVERS_TOOLS +if [ "$PROJECT" = "drivers-tools" ]; then + # If this was a patch build, doing a fresh clone would not actually test the patch + cp -R $PROJECT_DIRECTORY/ $DRIVERS_TOOLS +else + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS +fi +echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" >$MONGO_ORCHESTRATION_HOME/orchestration.config diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh new file mode 100644 index 0000000000..98a19f047f --- /dev/null +++ b/.evergreen/scripts/run-atlas-tests.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Disable xtrace for security reasons (just in case it was accidentally set). +set +x +set -o errexit +bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect +TEST_ATLAS=1 bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg diff --git a/.evergreen/scripts/run-aws-ecs-auth-test.sh b/.evergreen/scripts/run-aws-ecs-auth-test.sh new file mode 100644 index 0000000000..787e0a710b --- /dev/null +++ b/.evergreen/scripts/run-aws-ecs-auth-test.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# shellcheck disable=SC2154 +if [ "${skip_ECS_auth_test}" = "true" ]; then + echo "This platform does not support the ECS auth test, skipping..." + exit 0 +fi +set -ex +cd "$DRIVERS_TOOLS"/.evergreen/auth_aws +. ./activate-authawsvenv.sh +. aws_setup.sh ecs +export MONGODB_BINARIES="$MONGODB_BINARIES" +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" +python aws_tester.py ecs +cd - diff --git a/.evergreen/scripts/run-doctests.sh b/.evergreen/scripts/run-doctests.sh new file mode 100644 index 0000000000..f7215ad347 --- /dev/null +++ b/.evergreen/scripts/run-doctests.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +set -o xtrace +PYTHON_BINARY=${PYTHON_BINARY} bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh doctest:test diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh new file mode 100644 index 0000000000..31371ead45 --- /dev/null +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Disable xtrace for security reasons (just in case it was accidentally set). +set +x +bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/enterprise_auth +TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh new file mode 100644 index 0000000000..dd9d522c8a --- /dev/null +++ b/.evergreen/scripts/run-gcpkms-fail-test.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +. .evergreen/scripts/env.sh +export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 +export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz +SKIP_SERVERS=1 bash ./.evergreen/setup-encryption.sh +SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/hatch.sh test:test-eg diff --git a/.evergreen/scripts/run-getdata.sh b/.evergreen/scripts/run-getdata.sh new file mode 100644 index 0000000000..b2d6ecb476 --- /dev/null +++ b/.evergreen/scripts/run-getdata.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -o xtrace +. ${DRIVERS_TOOLS}/.evergreen/download-mongodb.sh || true +get_distro || true +echo $DISTRO +echo $MARCH +echo $OS +uname -a || true +ls /etc/*release* || true +cc --version || true +gcc --version || true +clang --version || true +gcov --version || true +lcov --version || true +llvm-cov --version || true +echo $PATH +ls -la /usr/local/Cellar/llvm/*/bin/ || true +ls -la /usr/local/Cellar/ || true +scan-build --version || true +genhtml --version || true +valgrind --version || true diff --git a/.evergreen/scripts/run-load-balancer.sh b/.evergreen/scripts/run-load-balancer.sh new file mode 100644 index 0000000000..7d431777e5 --- /dev/null +++ b/.evergreen/scripts/run-load-balancer.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +MONGODB_URI=${MONGODB_URI} bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh start diff --git a/.evergreen/scripts/run-mockupdb-tests.sh b/.evergreen/scripts/run-mockupdb-tests.sh new file mode 100644 index 0000000000..8825a0237d --- /dev/null +++ b/.evergreen/scripts/run-mockupdb-tests.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +set -o xtrace +export PYTHON_BINARY=${PYTHON_BINARY} +bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-mockupdb diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/scripts/run-mod-wsgi-tests.sh similarity index 97% rename from .evergreen/run-mod-wsgi-tests.sh rename to .evergreen/scripts/run-mod-wsgi-tests.sh index e1f5238110..607458b8c6 100644 --- a/.evergreen/run-mod-wsgi-tests.sh +++ b/.evergreen/scripts/run-mod-wsgi-tests.sh @@ -28,7 +28,7 @@ export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_ export PYTHONHOME=/opt/python/$PYTHON_VERSION # If MOD_WSGI_EMBEDDED is set use the default embedded mode behavior instead # of daemon mode (WSGIDaemonProcess). -if [ -n "$MOD_WSGI_EMBEDDED" ]; then +if [ -n "${MOD_WSGI_EMBEDDED:-}" ]; then export MOD_WSGI_CONF=mod_wsgi_test_embedded.conf else export MOD_WSGI_CONF=mod_wsgi_test.conf diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/scripts/run-mongodb-aws-test.sh similarity index 67% rename from .evergreen/run-mongodb-aws-test.sh rename to .evergreen/scripts/run-mongodb-aws-test.sh index c4051bb34a..ec20bfd06b 100755 --- a/.evergreen/run-mongodb-aws-test.sh +++ b/.evergreen/scripts/run-mongodb-aws-test.sh @@ -13,10 +13,16 @@ set -o errexit # Exit the script with error if any of the commands fail # mechanism. # PYTHON_BINARY The Python version to use. -echo "Running MONGODB-AWS authentication tests" +# shellcheck disable=SC2154 +if [ "${skip_EC2_auth_test:-}" = "true" ] && { [ "$1" = "ec2" ] || [ "$1" = "web-identity" ]; }; then + echo "This platform does not support the EC2 auth test, skipping..." + exit 0 +fi + +echo "Running MONGODB-AWS authentication tests for $1" # Handle credentials and environment setup. -. $DRIVERS_TOOLS/.evergreen/auth_aws/aws_setup.sh $1 +. "$DRIVERS_TOOLS"/.evergreen/auth_aws/aws_setup.sh "$1" # show test output set -x diff --git a/.evergreen/scripts/run-ocsp-test.sh b/.evergreen/scripts/run-ocsp-test.sh new file mode 100644 index 0000000000..3c6d3b2b3b --- /dev/null +++ b/.evergreen/scripts/run-ocsp-test.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +TEST_OCSP=1 \ +PYTHON_BINARY="${PYTHON_BINARY}" \ +CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ +OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ +bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg +bash "${DRIVERS_TOOLS}"/.evergreen/ocsp/teardown.sh diff --git a/.evergreen/scripts/run-perf-tests.sh b/.evergreen/scripts/run-perf-tests.sh new file mode 100644 index 0000000000..69a369fee1 --- /dev/null +++ b/.evergreen/scripts/run-perf-tests.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +PROJECT_DIRECTORY=${PROJECT_DIRECTORY} +bash "${PROJECT_DIRECTORY}"/.evergreen/run-perf-tests.sh diff --git a/.evergreen/scripts/run-tests.sh b/.evergreen/scripts/run-tests.sh new file mode 100644 index 0000000000..495db83e70 --- /dev/null +++ b/.evergreen/scripts/run-tests.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Disable xtrace +set +x +if [ -n "${MONGODB_STARTED}" ]; then + export PYMONGO_MUST_CONNECT=true +fi +if [ -n "${DISABLE_TEST_COMMANDS}" ]; then + export PYMONGO_DISABLE_TEST_COMMANDS=1 +fi +if [ -n "${test_encryption}" ]; then + # Disable xtrace (just in case it was accidentally set). + set +x + bash "${DRIVERS_TOOLS}"/.evergreen/csfle/await-servers.sh + export TEST_ENCRYPTION=1 + if [ -n "${test_encryption_pyopenssl}" ]; then + export TEST_ENCRYPTION_PYOPENSSL=1 + fi +fi +if [ -n "${test_crypt_shared}" ]; then + export TEST_CRYPT_SHARED=1 + export CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} +fi +if [ -n "${test_pyopenssl}" ]; then + export TEST_PYOPENSSL=1 +fi +if [ -n "${SETDEFAULTENCODING}" ]; then + export SETDEFAULTENCODING="${SETDEFAULTENCODING}" +fi +if [ -n "${test_loadbalancer}" ]; then + export TEST_LOADBALANCER=1 + export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" + export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" +fi +if [ -n "${test_serverless}" ]; then + export TEST_SERVERLESS=1 +fi +if [ -n "${TEST_INDEX_MANAGEMENT:-}" ]; then + export TEST_INDEX_MANAGEMENT=1 +fi +if [ -n "${SKIP_CSOT_TESTS}" ]; then + export SKIP_CSOT_TESTS=1 +fi +GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ + PYTHON_BINARY=${PYTHON_BINARY} \ + NO_EXT=${NO_EXT} \ + COVERAGE=${COVERAGE} \ + COMPRESSORS=${COMPRESSORS} \ + AUTH=${AUTH} \ + SSL=${SSL} \ + TEST_DATA_LAKE=${TEST_DATA_LAKE:-} \ + TEST_SUITES=${TEST_SUITES:-} \ + MONGODB_API_VERSION=${MONGODB_API_VERSION} \ + SKIP_HATCH=${SKIP_HATCH} \ + bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg diff --git a/.evergreen/scripts/run-with-env.sh b/.evergreen/scripts/run-with-env.sh new file mode 100644 index 0000000000..2fd073605d --- /dev/null +++ b/.evergreen/scripts/run-with-env.sh @@ -0,0 +1,21 @@ +#!/bin/bash -eu + +# Example use: bash run-with-env.sh run-tests.sh {args...} + +# Parameter expansion to get just the current directory's name +if [ "${PWD##*/}" == "src" ]; then + . .evergreen/scripts/env.sh + if [ -f ".evergreen/scripts/test-env.sh" ]; then + . .evergreen/scripts/test-env.sh + fi +else + . src/.evergreen/scripts/env.sh + if [ -f "src/.evergreen/scripts/test-env.sh" ]; then + . src/.evergreen/scripts/test-env.sh + fi +fi + +set -eu + +# shellcheck source=/dev/null +. "$@" diff --git a/.evergreen/scripts/setup-encryption.sh b/.evergreen/scripts/setup-encryption.sh new file mode 100644 index 0000000000..2f167cd20b --- /dev/null +++ b/.evergreen/scripts/setup-encryption.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [ -n "${test_encryption}" ]; then + ./.evergreen/hatch.sh encryption:setup +fi diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh new file mode 100644 index 0000000000..65462b2a68 --- /dev/null +++ b/.evergreen/scripts/setup-tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash -eux + +PROJECT_DIRECTORY="$(pwd)" +SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" + +if [ -f "$SCRIPT_DIR/test-env.sh" ]; then + echo "Reading $SCRIPT_DIR/test-env.sh file" + . "$SCRIPT_DIR/test-env.sh" + exit 0 +fi + +cat < "$SCRIPT_DIR"/test-env.sh +export test_encryption="${test_encryption:-}" +export test_encryption_pyopenssl="${test_encryption_pyopenssl:-}" +export test_crypt_shared="${test_crypt_shared:-}" +export test_pyopenssl="${test_pyopenssl:-}" +export test_loadbalancer="${test_loadbalancer:-}" +export test_serverless="${test_serverless:-}" +export TEST_INDEX_MANAGEMENT="${TEST_INDEX_MANAGEMENT:-}" +export TEST_DATA_LAKE="${TEST_DATA_LAKE:-}" +export ORCHESTRATION_FILE="${ORCHESTRATION_FILE:-}" +export AUTH="${AUTH:-noauth}" +export SSL="${SSL:-nossl}" +export PYTHON_BINARY="${PYTHON_BINARY:-}" +EOT + +chmod +x "$SCRIPT_DIR"/test-env.sh diff --git a/.evergreen/scripts/stop-load-balancer.sh b/.evergreen/scripts/stop-load-balancer.sh new file mode 100644 index 0000000000..2d3c5366ec --- /dev/null +++ b/.evergreen/scripts/stop-load-balancer.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +cd "${DRIVERS_TOOLS}"/.evergreen || exit +DRIVERS_TOOLS=${DRIVERS_TOOLS} +bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh stop diff --git a/.evergreen/scripts/teardown-aws.sh b/.evergreen/scripts/teardown-aws.sh new file mode 100644 index 0000000000..634d1e5724 --- /dev/null +++ b/.evergreen/scripts/teardown-aws.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" || exit +if [ -f "./aws_e2e_setup.json" ]; then + . ./activate-authawsvenv.sh + python ./lib/aws_assign_instance_profile.py +fi diff --git a/.evergreen/scripts/teardown-docker.sh b/.evergreen/scripts/teardown-docker.sh new file mode 100644 index 0000000000..733779d058 --- /dev/null +++ b/.evergreen/scripts/teardown-docker.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Remove all Docker images +DOCKER=$(command -v docker) || true +if [ -n "$DOCKER" ]; then + docker rmi -f "$(docker images -a -q)" &> /dev/null || true +fi diff --git a/.evergreen/scripts/upload-coverage-report.sh b/.evergreen/scripts/upload-coverage-report.sh new file mode 100644 index 0000000000..71a2a80bb8 --- /dev/null +++ b/.evergreen/scripts/upload-coverage-report.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +aws s3 cp htmlcov/ s3://"$1"/coverage/"$2"/"$3"/htmlcov/ --recursive --acl public-read --region us-east-1 diff --git a/.evergreen/scripts/windows-fix.sh b/.evergreen/scripts/windows-fix.sh new file mode 100644 index 0000000000..cb4fa44130 --- /dev/null +++ b/.evergreen/scripts/windows-fix.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set +x +. src/.evergreen/scripts/env.sh +# shellcheck disable=SC2044 +for i in $(find "$DRIVERS_TOOLS"/.evergreen "$PROJECT_DIRECTORY"/.evergreen -name \*.sh); do + < "$i" tr -d '\r' >"$i".new + mv "$i".new "$i" +done +# Copy client certificate because symlinks do not work on Windows. +cp "$DRIVERS_TOOLS"/.evergreen/x509gen/client.pem "$MONGO_ORCHESTRATION_HOME"/lib/client.pem diff --git a/.evergreen/setup-encryption.sh b/.evergreen/setup-encryption.sh index 71231e1732..b403ef9ca8 100644 --- a/.evergreen/setup-encryption.sh +++ b/.evergreen/setup-encryption.sh @@ -52,6 +52,9 @@ ls -la libmongocrypt ls -la libmongocrypt/nocrypto if [ -z "${SKIP_SERVERS:-}" ]; then - bash ${DRIVERS_TOOLS}/.evergreen/csfle/setup-secrets.sh - bash ${DRIVERS_TOOLS}/.evergreen/csfle/start-servers.sh + PYTHON_BINARY_OLD=${PYTHON_BINARY} + export PYTHON_BINARY="" + bash "${DRIVERS_TOOLS}"/.evergreen/csfle/setup-secrets.sh + export PYTHON_BINARY=$PYTHON_BINARY_OLD + bash "${DRIVERS_TOOLS}"/.evergreen/csfle/start-servers.sh fi diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index d44425a905..908cf0564a 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -17,7 +17,7 @@ find_python3() { elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.9" ]; then PYTHON="/Library/Frameworks/Python.Framework/Versions/3.9/bin/python3" fi - elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin + elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin PYTHON="C:/python/Python39/python.exe" else # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.9+. @@ -56,7 +56,7 @@ createvirtualenv () { # Workaround for bug in older versions of virtualenv. $VIRTUALENV $VENVPATH 2>/dev/null || $VIRTUALENV $VENVPATH fi - if [ "Windows_NT" = "$OS" ]; then + if [ "Windows_NT" = "${OS:-}" ]; then # Workaround https://bugs.python.org/issue32451: # mongovenv/Scripts/activate: line 3: $'\r': command not found dos2unix $VENVPATH/Scripts/activate || true diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py index 6d77818436..6a195eb6b8 100644 --- a/test/asynchronous/test_client_context.py +++ b/test/asynchronous/test_client_context.py @@ -25,7 +25,7 @@ class TestAsyncClientContext(AsyncUnitTest): def test_must_connect(self): - if "PYMONGO_MUST_CONNECT" not in os.environ: + if not os.environ.get("PYMONGO_MUST_CONNECT"): raise SkipTest("PYMONGO_MUST_CONNECT is not set") self.assertTrue( @@ -37,7 +37,7 @@ def test_must_connect(self): ) def test_serverless(self): - if "TEST_SERVERLESS" not in os.environ: + if not os.environ.get("TEST_SERVERLESS"): raise SkipTest("TEST_SERVERLESS is not set") self.assertTrue( @@ -47,7 +47,7 @@ def test_serverless(self): ) def test_enableTestCommands_is_disabled(self): - if "PYMONGO_DISABLE_TEST_COMMANDS" not in os.environ: + if not os.environ.get("PYMONGO_DISABLE_TEST_COMMANDS"): raise SkipTest("PYMONGO_DISABLE_TEST_COMMANDS is not set") self.assertFalse( @@ -56,7 +56,7 @@ def test_enableTestCommands_is_disabled(self): ) def test_setdefaultencoding_worked(self): - if "SETDEFAULTENCODING" not in os.environ: + if not os.environ.get("SETDEFAULTENCODING"): raise SkipTest("SETDEFAULTENCODING is not set") self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) diff --git a/test/mod_wsgi_test/README.rst b/test/mod_wsgi_test/README.rst index 2c204f7ac5..e96db9406c 100644 --- a/test/mod_wsgi_test/README.rst +++ b/test/mod_wsgi_test/README.rst @@ -107,4 +107,4 @@ Automation At MongoDB, Inc. we use a continuous integration job that tests each combination in the matrix. The job starts up Apache, starts a single server or replica set, and runs ``test_client.py`` with the proper arguments. -See `run-mod-wsgi-tests.sh `_ +See `run-mod-wsgi-tests.sh `_ diff --git a/test/test_client_context.py b/test/test_client_context.py index 5996f9243b..e807ac5f5f 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -25,7 +25,7 @@ class TestClientContext(UnitTest): def test_must_connect(self): - if "PYMONGO_MUST_CONNECT" not in os.environ: + if not os.environ.get("PYMONGO_MUST_CONNECT"): raise SkipTest("PYMONGO_MUST_CONNECT is not set") self.assertTrue( @@ -37,7 +37,7 @@ def test_must_connect(self): ) def test_serverless(self): - if "TEST_SERVERLESS" not in os.environ: + if not os.environ.get("TEST_SERVERLESS"): raise SkipTest("TEST_SERVERLESS is not set") self.assertTrue( @@ -47,7 +47,7 @@ def test_serverless(self): ) def test_enableTestCommands_is_disabled(self): - if "PYMONGO_DISABLE_TEST_COMMANDS" not in os.environ: + if not os.environ.get("PYMONGO_DISABLE_TEST_COMMANDS"): raise SkipTest("PYMONGO_DISABLE_TEST_COMMANDS is not set") self.assertFalse( @@ -56,7 +56,7 @@ def test_enableTestCommands_is_disabled(self): ) def test_setdefaultencoding_worked(self): - if "SETDEFAULTENCODING" not in os.environ: + if not os.environ.get("SETDEFAULTENCODING"): raise SkipTest("SETDEFAULTENCODING is not set") self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) From 9b5c0981d91aa1baf23d6300bc033fd832457ae4 Mon Sep 17 00:00:00 2001 From: Jib Date: Mon, 25 Nov 2024 13:13:44 -0500 Subject: [PATCH 1630/2111] PYTHON-4988: Check C extensions are loaded ONLY in CPython builds (#2016) --- .evergreen/run-tests.sh | 4 ++-- .evergreen/utils.sh | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 9716c1fc79..95fe10a6c3 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -38,6 +38,7 @@ export PIP_PREFER_BINARY=1 # Prefer binary dists by default set +x python -c "import sys; sys.exit(sys.prefix == sys.base_prefix)" || (echo "Not inside a virtual env!"; exit 1) +PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") # Try to source local Drivers Secrets if [ -f ./secrets-export.sh ]; then @@ -48,7 +49,7 @@ else fi # Ensure C extensions have compiled. -if [ -z "${NO_EXT:-}" ]; then +if [ -z "${NO_EXT:-}" ] && [ "$PYTHON_IMPL" = "CPython" ]; then python tools/fail_if_no_c.py fi @@ -245,7 +246,6 @@ python -c 'import sys; print(sys.version)' # Run the tests with coverage if requested and coverage is installed. # Only cover CPython. PyPy reports suspiciously low coverage. -PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then # Keep in sync with combine-coverage.sh. # coverage >=5 is needed for relative_files=true. diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 908cf0564a..d3af2dcc7a 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -78,6 +78,7 @@ testinstall () { PYTHON=$1 RELEASE=$2 NO_VIRTUALENV=$3 + PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") if [ -z "$NO_VIRTUALENV" ]; then createvirtualenv $PYTHON venvtestinstall @@ -86,7 +87,11 @@ testinstall () { $PYTHON -m pip install --upgrade $RELEASE cd tools - $PYTHON fail_if_no_c.py + + if [ "$PYTHON_IMPL" = "CPython" ]; then + $PYTHON fail_if_no_c.py + fi + $PYTHON -m pip uninstall -y pymongo cd .. From 0e8d70457f2e1ac05baff0c9f5232ddee2b0abcf Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 26 Nov 2024 16:55:27 -0500 Subject: [PATCH 1631/2111] Async client uses tasks instead of threads PYTHON-4725 - Async client should use tasks for SDAM instead of threads PYTHON-4860 - Async client should use asyncio.Lock and asyncio.Condition PYTHON-4941 - Synchronous unified test runner being used in asynchronous tests PYTHON-4843 - Async test suite should use a single event loop PYTHON-4945 - Fix test cleanups for mongoses Co-authored-by: Iris <58442094+sleepyStick@users.noreply.github.com> --- .evergreen/config.yml | 2 +- THIRD-PARTY-NOTICES | 58 ++ pymongo/_asyncio_lock.py | 309 +++++++ pymongo/_asyncio_task.py | 49 ++ pymongo/asynchronous/client_bulk.py | 1 - pymongo/asynchronous/cursor.py | 4 +- pymongo/asynchronous/encryption.py | 7 + pymongo/asynchronous/mongo_client.py | 25 +- pymongo/asynchronous/monitor.py | 54 +- pymongo/asynchronous/periodic_executor.py | 219 ----- pymongo/asynchronous/pool.py | 28 +- pymongo/asynchronous/topology.py | 24 +- pymongo/lock.py | 245 +----- pymongo/network_layer.py | 10 +- .../{synchronous => }/periodic_executor.py | 113 ++- pymongo/synchronous/client_bulk.py | 1 - pymongo/synchronous/cursor.py | 2 +- pymongo/synchronous/encryption.py | 7 + pymongo/synchronous/mongo_client.py | 21 +- pymongo/synchronous/monitor.py | 18 +- pymongo/synchronous/pool.py | 28 +- pymongo/synchronous/topology.py | 20 +- test/__init__.py | 97 +-- test/asynchronous/__init__.py | 101 +-- test/asynchronous/conftest.py | 2 +- test/asynchronous/test_bulk.py | 36 +- test/asynchronous/test_change_stream.py | 43 +- test/asynchronous/test_client.py | 80 +- test/asynchronous/test_collation.py | 33 +- test/asynchronous/test_collection.py | 41 +- ...nnections_survive_primary_stepdown_spec.py | 31 +- test/asynchronous/test_create_entities.py | 6 + test/asynchronous/test_cursor.py | 14 +- test/asynchronous/test_database.py | 3 +- test/asynchronous/test_encryption.py | 140 ++- test/asynchronous/test_grid_file.py | 1 + test/asynchronous/test_locks.py | 817 ++++++++---------- test/asynchronous/test_monitoring.py | 41 +- test/asynchronous/test_retryable_writes.py | 65 +- test/asynchronous/test_session.py | 33 +- test/asynchronous/test_transactions.py | 23 +- test/asynchronous/unified_format.py | 60 +- test/asynchronous/utils_spec_runner.py | 26 +- test/conftest.py | 2 +- test/test_bulk.py | 32 +- test/test_change_stream.py | 39 +- test/test_client.py | 31 +- test/test_collation.py | 31 +- test/test_collection.py | 38 +- ...nnections_survive_primary_stepdown_spec.py | 31 +- test/test_create_entities.py | 6 + test/test_cursor.py | 4 - test/test_custom_types.py | 23 +- test/test_database.py | 1 + test/test_encryption.py | 138 ++- test/test_examples.py | 13 +- test/test_grid_file.py | 1 + test/test_gridfs.py | 20 +- test/test_gridfs_bucket.py | 14 +- test/test_monitor.py | 2 +- test/test_monitoring.py | 39 +- test/test_read_concern.py | 20 +- test/test_retryable_writes.py | 65 +- test/test_sdam_monitoring_spec.py | 2 +- test/test_session.py | 32 +- test/test_threads.py | 1 + test/test_transactions.py | 15 +- test/test_typing.py | 7 +- test/unified_format.py | 53 +- test/utils.py | 11 +- test/utils_spec_runner.py | 26 +- tools/synchro.py | 39 +- 72 files changed, 1715 insertions(+), 1959 deletions(-) create mode 100644 pymongo/_asyncio_lock.py create mode 100644 pymongo/_asyncio_task.py delete mode 100644 pymongo/asynchronous/periodic_executor.py rename pymongo/{synchronous => }/periodic_executor.py (67%) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 59b8a543fd..7ca3a72b1a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -281,7 +281,7 @@ functions: "run tests": - command: subprocess.exec params: - include_expansions_in_env: ["TEST_DATA_LAKE", "AUTH", "SSL", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE"] + include_expansions_in_env: ["TEST_DATA_LAKE", "PYTHON_BINARY", "AUTH", "SSL", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE"] binary: bash working_dir: "src" args: diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 55b8ff7078..ad00831a2a 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -38,3 +38,61 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +2) License Notice for _asyncio_lock.py +----------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/pymongo/_asyncio_lock.py b/pymongo/_asyncio_lock.py new file mode 100644 index 0000000000..669b0f63a7 --- /dev/null +++ b/pymongo/_asyncio_lock.py @@ -0,0 +1,309 @@ +# Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved + +"""Lock and Condition classes vendored from https://github.com/python/cpython/blob/main/Lib/asyncio/locks.py +to port 3.13 fixes to older versions of Python. +Can be removed once we drop Python 3.12 support.""" + +from __future__ import annotations + +import collections +import threading +from asyncio import events, exceptions +from typing import Any, Coroutine, Optional + +_global_lock = threading.Lock() + + +class _LoopBoundMixin: + _loop = None + + def _get_loop(self) -> Any: + loop = events._get_running_loop() + + if self._loop is None: + with _global_lock: + if self._loop is None: + self._loop = loop + if loop is not self._loop: + raise RuntimeError(f"{self!r} is bound to a different event loop") + return loop + + +class _ContextManagerMixin: + async def __aenter__(self) -> None: + await self.acquire() # type: ignore[attr-defined] + # We have no use for the "as ..." clause in the with + # statement for locks. + return + + async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() # type: ignore[attr-defined] + + +class Lock(_ContextManagerMixin, _LoopBoundMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular task when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another task changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one task is blocked in acquire() waiting for + the state to turn to unlocked, only one task proceeds when a + release() call resets the state to unlocked; successive release() + calls will unblock tasks in FIFO order. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + """ + + def __init__(self) -> None: + self._waiters: Optional[collections.deque] = None + self._locked = False + + def __repr__(self) -> str: + res = super().__repr__() + extra = "locked" if self._locked else "unlocked" + if self._waiters: + extra = f"{extra}, waiters:{len(self._waiters)}" + return f"<{res[1:-1]} [{extra}]>" + + def locked(self) -> bool: + """Return True if lock is acquired.""" + return self._locked + + async def acquire(self) -> bool: + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + # Implement fair scheduling, where thread always waits + # its turn. Jumping the queue if all are cancelled is an optimization. + if not self._locked and ( + self._waiters is None or all(w.cancelled() for w in self._waiters) + ): + self._locked = True + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + # Currently the only exception designed be able to occur here. + + # Ensure the lock invariant: If lock is not claimed (or about + # to be claimed by us) and there is a Task in waiters, + # ensure that the Task at the head will run. + if not self._locked: + self._wake_up_first() + raise + + # assert self._locked is False + self._locked = True + return True + + def release(self) -> None: + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other tasks are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + self._wake_up_first() + else: + raise RuntimeError("Lock is not acquired.") + + def _wake_up_first(self) -> None: + """Ensure that the first waiter will wake up.""" + if not self._waiters: + return + try: + fut = next(iter(self._waiters)) + except StopIteration: + return + + # .done() means that the waiter is already set to wake up. + if not fut.done(): + fut.set_result(True) + + +class Condition(_ContextManagerMixin, _LoopBoundMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more tasks to wait until they are notified by another + task. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock: Optional[Lock] = None) -> None: + if lock is None: + lock = Lock() + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters: collections.deque = collections.deque() + + def __repr__(self) -> str: + res = super().__repr__() + extra = "locked" if self.locked() else "unlocked" + if self._waiters: + extra = f"{extra}, waiters:{len(self._waiters)}" + return f"<{res[1:-1]} [{extra}]>" + + async def wait(self) -> bool: + """Wait until notified. + + If the calling task has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another task. Once + awakened, it re-acquires the lock and returns True. + + This method may return spuriously, + which is why the caller should always + re-check the state and be prepared to wait() again. + """ + if not self.locked(): + raise RuntimeError("cannot wait on un-acquired lock") + + fut = self._get_loop().create_future() + self.release() + try: + try: + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must re-acquire lock even if wait is cancelled. + # We only catch CancelledError here, since we don't want any + # other (fatal) errors with the future to cause us to spin. + err = None + while True: + try: + await self.acquire() + break + except exceptions.CancelledError as e: + err = e + + if err is not None: + try: + raise err # Re-raise most recent exception instance. + finally: + err = None # Break reference cycles. + except BaseException: + # Any error raised out of here _may_ have occurred after this Task + # believed to have been successfully notified. + # Make sure to notify another Task instead. This may result + # in a "spurious wakeup", which is allowed as part of the + # Condition Variable protocol. + self._notify(1) + raise + + async def wait_for(self, predicate: Any) -> Coroutine: + """Wait until a predicate becomes true. + + The predicate should be a callable whose result will be + interpreted as a boolean value. The method will repeatedly + wait() until it evaluates to true. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result + + def notify(self, n: int = 1) -> None: + """By default, wake up one task waiting on this condition, if any. + If the calling task has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up n of the tasks waiting for the condition + variable; if fewer than n are waiting, they are all awoken. + + Note: an awakened task does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError("cannot notify on un-acquired lock") + self._notify(n) + + def _notify(self, n: int) -> None: + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self) -> None: + """Wake up all tasks waiting on this condition. This method acts + like notify(), but wakes up all waiting tasks instead of one. If the + calling task has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) diff --git a/pymongo/_asyncio_task.py b/pymongo/_asyncio_task.py new file mode 100644 index 0000000000..8e457763d9 --- /dev/null +++ b/pymongo/_asyncio_task.py @@ -0,0 +1,49 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A custom asyncio.Task that allows checking if a task has been sent a cancellation request. +Can be removed once we drop Python 3.10 support in favor of asyncio.Task.cancelling.""" + + +from __future__ import annotations + +import asyncio +import sys +from typing import Any, Coroutine, Optional + + +# TODO (https://jira.mongodb.org/browse/PYTHON-4981): Revisit once the underlying cause of the swallowed cancellations is uncovered +class _Task(asyncio.Task): + def __init__(self, coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> None: + super().__init__(coro, name=name) + self._cancel_requests = 0 + asyncio._register_task(self) + + def cancel(self, msg: Optional[str] = None) -> bool: + self._cancel_requests += 1 + return super().cancel(msg=msg) + + def uncancel(self) -> int: + if self._cancel_requests > 0: + self._cancel_requests -= 1 + return self._cancel_requests + + def cancelling(self) -> int: + return self._cancel_requests + + +def create_task(coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> asyncio.Task: + if sys.version_info >= (3, 11): + return asyncio.create_task(coro, name=name) + return _Task(coro, name=name) diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 0dcdaa6c07..45824256da 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -476,7 +476,6 @@ async def _process_results_cursor( if op_type == "delete": res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] full_result[f"{op_type}Results"][original_index] = res - except Exception as exc: # Attempt to close the cursor, then raise top-level error. if cmd_cursor.alive: diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 4b4bb52a8e..7d7ae4a5db 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -45,7 +45,7 @@ ) from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure -from pymongo.lock import _ALock, _create_lock +from pymongo.lock import _async_create_lock from pymongo.message import ( _CursorAddress, _GetMore, @@ -77,7 +77,7 @@ class _ConnectionManager: def __init__(self, conn: AsyncConnection, more_to_come: bool): self.conn: Optional[AsyncConnection] = conn self.more_to_come = more_to_come - self._alock = _ALock(_create_lock()) + self._lock = _async_create_lock() def update_exhaust(self, more_to_come: bool) -> None: self.more_to_come = more_to_come diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 735e543047..4802c3f54e 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -15,6 +15,7 @@ """Support for explicit client-side field level encryption.""" from __future__ import annotations +import asyncio import contextlib import enum import socket @@ -111,6 +112,8 @@ def _wrap_encryption_errors() -> Iterator[None]: # BSON encoding/decoding errors are unrelated to encryption so # we should propagate them unchanged. raise + except asyncio.CancelledError: + raise except Exception as exc: raise EncryptionError(exc) from exc @@ -200,6 +203,8 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: conn.close() except (PyMongoError, MongoCryptError): raise # Propagate pymongo errors directly. + except asyncio.CancelledError: + raise except Exception as error: # Wrap I/O errors in PyMongo exceptions. _raise_connection_failure((host, port), error) @@ -722,6 +727,8 @@ async def create_encrypted_collection( await database.create_collection(name=name, **kwargs), encrypted_fields, ) + except asyncio.CancelledError: + raise except Exception as exc: raise EncryptedCollectionError(exc, encrypted_fields) from exc diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 3e4dc482d7..1600e50628 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -32,6 +32,7 @@ """ from __future__ import annotations +import asyncio import contextlib import os import warnings @@ -59,8 +60,8 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.timestamp import Timestamp -from pymongo import _csot, common, helpers_shared, uri_parser -from pymongo.asynchronous import client_session, database, periodic_executor +from pymongo import _csot, common, helpers_shared, periodic_executor, uri_parser +from pymongo.asynchronous import client_session, database from pymongo.asynchronous.change_stream import AsyncChangeStream, AsyncClusterChangeStream from pymongo.asynchronous.client_bulk import _AsyncClientBulk from pymongo.asynchronous.client_session import _EmptyServerSession @@ -82,7 +83,11 @@ WaitQueueTimeoutError, WriteConcernError, ) -from pymongo.lock import _HAS_REGISTER_AT_FORK, _ALock, _create_lock, _release_locks +from pymongo.lock import ( + _HAS_REGISTER_AT_FORK, + _async_create_lock, + _release_locks, +) from pymongo.logger import _CLIENT_LOGGER, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason @@ -842,7 +847,7 @@ def __init__( self._options = options = ClientOptions(username, password, dbase, opts, _IS_SYNC) self._default_database_name = dbase - self._lock = _ALock(_create_lock()) + self._lock = _async_create_lock() self._kill_cursors_queue: list = [] self._event_listeners = options.pool_options._event_listeners @@ -908,7 +913,7 @@ async def target() -> bool: await AsyncMongoClient._process_periodic_tasks(client) return True - executor = periodic_executor.PeriodicExecutor( + executor = periodic_executor.AsyncPeriodicExecutor( interval=common.KILL_CURSOR_FREQUENCY, min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, @@ -1722,7 +1727,7 @@ async def _run_operation( address=address, ) - async with operation.conn_mgr._alock: + async with operation.conn_mgr._lock: async with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] err_handler.contribute_socket(operation.conn_mgr.conn) return await server.run_operation( @@ -1970,7 +1975,7 @@ async def _close_cursor_now( try: if conn_mgr: - async with conn_mgr._alock: + async with conn_mgr._lock: # Cursor is pinned to LB outside of a transaction. assert address is not None assert conn_mgr.conn is not None @@ -2033,6 +2038,8 @@ async def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: await self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) + except asyncio.CancelledError: + raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it @@ -2047,6 +2054,8 @@ async def _process_kill_cursors(self) -> None: for address, cursor_ids in address_to_cursor_ids.items(): try: await self._kill_cursors(cursor_ids, address, topology, session=None) + except asyncio.CancelledError: + raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: raise @@ -2061,6 +2070,8 @@ async def _process_periodic_tasks(self) -> None: try: await self._process_kill_cursors() await self._topology.update_pool() + except asyncio.CancelledError: + raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: return diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index a4dc9b7f45..ad1bc70aba 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -16,20 +16,20 @@ from __future__ import annotations +import asyncio import atexit import logging import time import weakref from typing import TYPE_CHECKING, Any, Mapping, Optional, cast -from pymongo import common +from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum -from pymongo.asynchronous import periodic_executor -from pymongo.asynchronous.periodic_executor import _shutdown_executors from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello -from pymongo.lock import _create_lock +from pymongo.lock import _async_create_lock from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage +from pymongo.periodic_executor import _shutdown_executors from pymongo.pool_options import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription @@ -76,7 +76,7 @@ async def target() -> bool: await monitor._run() # type:ignore[attr-defined] return True - executor = periodic_executor.PeriodicExecutor( + executor = periodic_executor.AsyncPeriodicExecutor( interval=interval, min_interval=min_interval, target=target, name=name ) @@ -112,9 +112,9 @@ async def close(self) -> None: """ self.gc_safe_close() - def join(self, timeout: Optional[int] = None) -> None: + async def join(self, timeout: Optional[int] = None) -> None: """Wait for the monitor to stop.""" - self._executor.join(timeout) + await self._executor.join(timeout) def request_check(self) -> None: """If the monitor is sleeping, wake it soon.""" @@ -139,7 +139,7 @@ def __init__( """ super().__init__( topology, - "pymongo_server_monitor_thread", + "pymongo_server_monitor_task", topology_settings.heartbeat_frequency, common.MIN_HEARTBEAT_INTERVAL, ) @@ -238,6 +238,9 @@ async def _run(self) -> None: except ReferenceError: # Topology was garbage-collected. await self.close() + finally: + if self._executor._stopped: + await self._rtt_monitor.close() async def _check_server(self) -> ServerDescription: """Call hello or read the next streaming response. @@ -252,8 +255,10 @@ async def _check_server(self) -> ServerDescription: except (OperationFailure, NotPrimaryError) as exc: # Update max cluster time even when hello fails. details = cast(Mapping[str, Any], exc.details) - self._topology.receive_cluster_time(details.get("$clusterTime")) + await self._topology.receive_cluster_time(details.get("$clusterTime")) raise + except asyncio.CancelledError: + raise except ReferenceError: raise except Exception as error: @@ -280,7 +285,7 @@ async def _check_server(self) -> ServerDescription: await self._reset_connection() if isinstance(error, _OperationCancelled): raise - self._rtt_monitor.reset() + await self._rtt_monitor.reset() # Server type defaults to Unknown. return ServerDescription(address, error=error) @@ -321,9 +326,9 @@ async def _check_once(self) -> ServerDescription: self._conn_id = conn.id response, round_trip_time = await self._check_with_socket(conn) if not response.awaitable: - self._rtt_monitor.add_sample(round_trip_time) + await self._rtt_monitor.add_sample(round_trip_time) - avg_rtt, min_rtt = self._rtt_monitor.get() + avg_rtt, min_rtt = await self._rtt_monitor.get() sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) if self._publish: assert self._listeners is not None @@ -419,6 +424,8 @@ def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception + except asyncio.CancelledError: + raise except Exception: # As per the spec, upon encountering an error: # - An error must not be raised @@ -439,7 +446,7 @@ def __init__(self, topology: Topology, topology_settings: TopologySettings, pool """ super().__init__( topology, - "pymongo_server_rtt_thread", + "pymongo_server_rtt_task", topology_settings.heartbeat_frequency, common.MIN_HEARTBEAT_INTERVAL, ) @@ -447,7 +454,7 @@ def __init__(self, topology: Topology, topology_settings: TopologySettings, pool self._pool = pool self._moving_average = MovingAverage() self._moving_min = MovingMinimum() - self._lock = _create_lock() + self._lock = _async_create_lock() async def close(self) -> None: self.gc_safe_close() @@ -455,20 +462,20 @@ async def close(self) -> None: # thread has the socket checked out, it will be closed when checked in. await self._pool.reset() - def add_sample(self, sample: float) -> None: + async def add_sample(self, sample: float) -> None: """Add a RTT sample.""" - with self._lock: + async with self._lock: self._moving_average.add_sample(sample) self._moving_min.add_sample(sample) - def get(self) -> tuple[Optional[float], float]: + async def get(self) -> tuple[Optional[float], float]: """Get the calculated average, or None if no samples yet and the min.""" - with self._lock: + async with self._lock: return self._moving_average.get(), self._moving_min.get() - def reset(self) -> None: + async def reset(self) -> None: """Reset the average RTT.""" - with self._lock: + async with self._lock: self._moving_average.reset() self._moving_min.reset() @@ -478,10 +485,12 @@ async def _run(self) -> None: # heartbeat protocol (MongoDB 4.4+). # XXX: Skip check if the server is unknown? rtt = await self._ping() - self.add_sample(rtt) + await self.add_sample(rtt) except ReferenceError: # Topology was garbage-collected. await self.close() + except asyncio.CancelledError: + raise except Exception: await self._pool.reset() @@ -536,4 +545,5 @@ def _shutdown_resources() -> None: shutdown() -atexit.register(_shutdown_resources) +if _IS_SYNC: + atexit.register(_shutdown_resources) diff --git a/pymongo/asynchronous/periodic_executor.py b/pymongo/asynchronous/periodic_executor.py deleted file mode 100644 index f3d2fddba3..0000000000 --- a/pymongo/asynchronous/periodic_executor.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Run a target function on a background thread.""" - -from __future__ import annotations - -import asyncio -import sys -import threading -import time -import weakref -from typing import Any, Optional - -from pymongo.lock import _ALock, _create_lock - -_IS_SYNC = False - - -class PeriodicExecutor: - def __init__( - self, - interval: float, - min_interval: float, - target: Any, - name: Optional[str] = None, - ): - """Run a target function periodically on a background thread. - - If the target's return value is false, the executor stops. - - :param interval: Seconds between calls to `target`. - :param min_interval: Minimum seconds between calls if `wake` is - called very often. - :param target: A function. - :param name: A name to give the underlying thread. - """ - # threading.Event and its internal condition variable are expensive - # in Python 2, see PYTHON-983. Use a boolean to know when to wake. - # The executor's design is constrained by several Python issues, see - # "periodic_executor.rst" in this repository. - self._event = False - self._interval = interval - self._min_interval = min_interval - self._target = target - self._stopped = False - self._thread: Optional[threading.Thread] = None - self._name = name - self._skip_sleep = False - self._thread_will_exit = False - self._lock = _ALock(_create_lock()) - - def __repr__(self) -> str: - return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" - - def _run_async(self) -> None: - # The default asyncio loop implementation on Windows - # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) - # We explicitly use a different loop implementation here to prevent that issue - if sys.platform == "win32": - loop = asyncio.SelectorEventLoop() - try: - loop.run_until_complete(self._run()) # type: ignore[func-returns-value] - finally: - loop.close() - else: - asyncio.run(self._run()) # type: ignore[func-returns-value] - - def open(self) -> None: - """Start. Multiple calls have no effect. - - Not safe to call from multiple threads at once. - """ - with self._lock: - if self._thread_will_exit: - # If the background thread has read self._stopped as True - # there is a chance that it has not yet exited. The call to - # join should not block indefinitely because there is no - # other work done outside the while loop in self._run. - try: - assert self._thread is not None - self._thread.join() - except ReferenceError: - # Thread terminated. - pass - self._thread_will_exit = False - self._stopped = False - started: Any = False - try: - started = self._thread and self._thread.is_alive() - except ReferenceError: - # Thread terminated. - pass - - if not started: - if _IS_SYNC: - thread = threading.Thread(target=self._run, name=self._name) - else: - thread = threading.Thread(target=self._run_async, name=self._name) - thread.daemon = True - self._thread = weakref.proxy(thread) - _register_executor(self) - # Mitigation to RuntimeError firing when thread starts on shutdown - # https://github.com/python/cpython/issues/114570 - try: - thread.start() - except RuntimeError as e: - if "interpreter shutdown" in str(e) or sys.is_finalizing(): - self._thread = None - return - raise - - def close(self, dummy: Any = None) -> None: - """Stop. To restart, call open(). - - The dummy parameter allows an executor's close method to be a weakref - callback; see monitor.py. - """ - self._stopped = True - - def join(self, timeout: Optional[int] = None) -> None: - if self._thread is not None: - try: - self._thread.join(timeout) - except (ReferenceError, RuntimeError): - # Thread already terminated, or not yet started. - pass - - def wake(self) -> None: - """Execute the target function soon.""" - self._event = True - - def update_interval(self, new_interval: int) -> None: - self._interval = new_interval - - def skip_sleep(self) -> None: - self._skip_sleep = True - - async def _should_stop(self) -> bool: - async with self._lock: - if self._stopped: - self._thread_will_exit = True - return True - return False - - async def _run(self) -> None: - while not await self._should_stop(): - try: - if not await self._target(): - self._stopped = True - break - except BaseException: - async with self._lock: - self._stopped = True - self._thread_will_exit = True - - raise - - if self._skip_sleep: - self._skip_sleep = False - else: - deadline = time.monotonic() + self._interval - while not self._stopped and time.monotonic() < deadline: - await asyncio.sleep(self._min_interval) - if self._event: - break # Early wake. - - self._event = False - - -# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started, -# an executor is kept alive by a strong reference from its thread and perhaps -# from other objects. When the thread dies and all other referrers are freed, -# the executor is freed and removed from _EXECUTORS. If any threads are -# running when the interpreter begins to shut down, we try to halt and join -# them to avoid spurious errors. -_EXECUTORS = set() - - -def _register_executor(executor: PeriodicExecutor) -> None: - ref = weakref.ref(executor, _on_executor_deleted) - _EXECUTORS.add(ref) - - -def _on_executor_deleted(ref: weakref.ReferenceType[PeriodicExecutor]) -> None: - _EXECUTORS.remove(ref) - - -def _shutdown_executors() -> None: - if _EXECUTORS is None: - return - - # Copy the set. Stopping threads has the side effect of removing executors. - executors = list(_EXECUTORS) - - # First signal all executors to close... - for ref in executors: - executor = ref() - if executor: - executor.close() - - # ...then try to join them. - for ref in executors: - executor = ref() - if executor: - executor.join(1) - - executor = None diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index ca0cebd417..5dc5675a0a 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -23,7 +23,6 @@ import socket import ssl import sys -import threading import time import weakref from typing import ( @@ -65,7 +64,11 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat -from pymongo.lock import _ACondition, _ALock, _create_lock +from pymongo.lock import ( + _async_cond_wait, + _async_create_condition, + _async_create_lock, +) from pymongo.logger import ( _CONNECTION_LOGGER, _ConnectionStatusMessage, @@ -208,11 +211,6 @@ def _raise_connection_failure( raise AutoReconnect(msg) from error -async def _cond_wait(condition: _ACondition, deadline: Optional[float]) -> bool: - timeout = deadline - time.monotonic() if deadline else None - return await condition.wait(timeout) - - def _get_timeout_details(options: PoolOptions) -> dict[str, float]: details = {} timeout = _csot.get_timeout() @@ -706,6 +704,8 @@ def _close_conn(self) -> None: # shutdown. try: self.conn.close() + except asyncio.CancelledError: + raise except Exception: # noqa: S110 pass @@ -992,8 +992,8 @@ def __init__( # from the right side. self.conns: collections.deque = collections.deque() self.active_contexts: set[_CancellationContext] = set() - _lock = _create_lock() - self.lock = _ALock(_lock) + self.lock = _async_create_lock() + self._max_connecting_cond = _async_create_condition(self.lock) self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 @@ -1019,7 +1019,7 @@ def __init__( # The first portion of the wait queue. # Enforces: maxPoolSize # Also used for: clearing the wait queue - self.size_cond = _ACondition(threading.Condition(_lock)) + self.size_cond = _async_create_condition(self.lock) self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: @@ -1027,7 +1027,7 @@ def __init__( # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue - self._max_connecting_cond = _ACondition(threading.Condition(_lock)) + self._max_connecting_cond = _async_create_condition(self.lock) self._max_connecting = self.opts.max_connecting self._pending = 0 self._client_id = client_id @@ -1466,7 +1466,8 @@ async def _get_conn( async with self.size_cond: self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): - if not await _cond_wait(self.size_cond, deadline): + timeout = deadline - time.monotonic() if deadline else None + if not await _async_cond_wait(self.size_cond, timeout): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: @@ -1489,7 +1490,8 @@ async def _get_conn( async with self._max_connecting_cond: self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self._max_connecting): - if not await _cond_wait(self._max_connecting_cond, deadline): + timeout = deadline - time.monotonic() if deadline else None + if not await _async_cond_wait(self._max_connecting_cond, timeout): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self._max_connecting: diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 82af4257ba..6d67710a7e 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -27,8 +27,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast -from pymongo import _csot, common, helpers_shared -from pymongo.asynchronous import periodic_executor +from pymongo import _csot, common, helpers_shared, periodic_executor from pymongo.asynchronous.client_session import _ServerSession, _ServerSessionPool from pymongo.asynchronous.monitor import SrvMonitor from pymongo.asynchronous.pool import Pool @@ -44,7 +43,11 @@ WriteError, ) from pymongo.hello import Hello -from pymongo.lock import _ACondition, _ALock, _create_lock +from pymongo.lock import ( + _async_cond_wait, + _async_create_condition, + _async_create_lock, +) from pymongo.logger import ( _SDAM_LOGGER, _SERVER_SELECTION_LOGGER, @@ -170,9 +173,10 @@ def __init__(self, topology_settings: TopologySettings): self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._closed = False - _lock = _create_lock() - self._lock = _ALock(_lock) - self._condition = _ACondition(self._settings.condition_class(_lock)) + self._lock = _async_create_lock() + self._condition = _async_create_condition( + self._lock, self._settings.condition_class if _IS_SYNC else None + ) self._servers: dict[_Address, Server] = {} self._pid: Optional[int] = None self._max_cluster_time: Optional[ClusterTime] = None @@ -185,7 +189,7 @@ def __init__(self, topology_settings: TopologySettings): async def target() -> bool: return process_events_queue(weak) - executor = periodic_executor.PeriodicExecutor( + executor = periodic_executor.AsyncPeriodicExecutor( interval=common.EVENTS_QUEUE_FREQUENCY, min_interval=common.MIN_HEARTBEAT_INTERVAL, target=target, @@ -354,7 +358,7 @@ async def _select_servers_loop( # change, or for a timeout. We won't miss any changes that # came after our most recent apply_selector call, since we've # held the lock until now. - await self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) + await _async_cond_wait(self._condition, common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() now = time.monotonic() server_descriptions = self._description.apply_selector( @@ -654,7 +658,7 @@ async def request_check_all(self, wait_time: int = 5) -> None: """Wake all monitors, wait for at least one to check its server.""" async with self._lock: self._request_check_all() - await self._condition.wait(wait_time) + await _async_cond_wait(self._condition, wait_time) def data_bearing_servers(self) -> list[ServerDescription]: """Return a list of all data-bearing servers. @@ -742,7 +746,7 @@ async def close(self) -> None: if self._publish_server or self._publish_tp: # Make sure the events executor thread is fully closed before publishing the remaining events self.__events_executor.close() - self.__events_executor.join(1) + await self.__events_executor.join(1) process_events_queue(weakref.ref(self._events)) # type: ignore[arg-type] @property diff --git a/pymongo/lock.py b/pymongo/lock.py index 0cbfb4a57e..6bf7138017 100644 --- a/pymongo/lock.py +++ b/pymongo/lock.py @@ -11,15 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +"""Internal helpers for lock and condition coordination primitives.""" + from __future__ import annotations import asyncio -import collections import os +import sys import threading -import time import weakref -from typing import Any, Callable, Optional, TypeVar +from asyncio import wait_for +from typing import Any, Optional, TypeVar + +import pymongo._asyncio_lock _HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") @@ -28,6 +33,15 @@ _T = TypeVar("_T") +# Needed to support 3.13 asyncio fixes (https://github.com/python/cpython/issues/112202) +# in older versions of Python +if sys.version_info >= (3, 13): + Lock = asyncio.Lock + Condition = asyncio.Condition +else: + Lock = pymongo._asyncio_lock.Lock + Condition = pymongo._asyncio_lock.Condition + def _create_lock() -> threading.Lock: """Represents a lock that is tracked upon instantiation using a WeakSet and @@ -39,6 +53,27 @@ def _create_lock() -> threading.Lock: return lock +def _async_create_lock() -> Lock: + """Represents an asyncio.Lock.""" + return Lock() + + +def _create_condition( + lock: threading.Lock, condition_class: Optional[Any] = None +) -> threading.Condition: + """Represents a threading.Condition.""" + if condition_class: + return condition_class(lock) + return threading.Condition(lock) + + +def _async_create_condition(lock: Lock, condition_class: Optional[Any] = None) -> Condition: + """Represents an asyncio.Condition.""" + if condition_class: + return condition_class(lock) + return Condition(lock) + + def _release_locks() -> None: # Completed the fork, reset all the locks in the child. for lock in _forkable_locks: @@ -46,202 +81,12 @@ def _release_locks() -> None: lock.release() -# Needed only for synchro.py compat. -def _Lock(lock: threading.Lock) -> threading.Lock: - return lock +async def _async_cond_wait(condition: Condition, timeout: Optional[float]) -> bool: + try: + return await wait_for(condition.wait(), timeout) + except asyncio.TimeoutError: + return False -class _ALock: - __slots__ = ("_lock",) - - def __init__(self, lock: threading.Lock) -> None: - self._lock = lock - - def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: - return self._lock.acquire(blocking=blocking, timeout=timeout) - - async def a_acquire(self, blocking: bool = True, timeout: float = -1) -> bool: - if timeout > 0: - tstart = time.monotonic() - while True: - acquired = self._lock.acquire(blocking=False) - if acquired: - return True - if timeout > 0 and (time.monotonic() - tstart) > timeout: - return False - if not blocking: - return False - await asyncio.sleep(0) - - def release(self) -> None: - self._lock.release() - - async def __aenter__(self) -> _ALock: - await self.a_acquire() - return self - - def __enter__(self) -> _ALock: - self._lock.acquire() - return self - - def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: - self.release() - - async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: - self.release() - - -def _safe_set_result(fut: asyncio.Future) -> None: - # Ensure the future hasn't been cancelled before calling set_result. - if not fut.done(): - fut.set_result(False) - - -class _ACondition: - __slots__ = ("_condition", "_waiters") - - def __init__(self, condition: threading.Condition) -> None: - self._condition = condition - self._waiters: collections.deque = collections.deque() - - async def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: - if timeout > 0: - tstart = time.monotonic() - while True: - acquired = self._condition.acquire(blocking=False) - if acquired: - return True - if timeout > 0 and (time.monotonic() - tstart) > timeout: - return False - if not blocking: - return False - await asyncio.sleep(0) - - async def wait(self, timeout: Optional[float] = None) -> bool: - """Wait until notified. - - If the calling task has not acquired the lock when this - method is called, a RuntimeError is raised. - - This method releases the underlying lock, and then blocks - until it is awakened by a notify() or notify_all() call for - the same condition variable in another task. Once - awakened, it re-acquires the lock and returns True. - - This method may return spuriously, - which is why the caller should always - re-check the state and be prepared to wait() again. - """ - loop = asyncio.get_running_loop() - fut = loop.create_future() - self._waiters.append((loop, fut)) - self.release() - try: - try: - try: - await asyncio.wait_for(fut, timeout) - return True - except asyncio.TimeoutError: - return False # Return false on timeout for sync pool compat. - finally: - # Must re-acquire lock even if wait is cancelled. - # We only catch CancelledError here, since we don't want any - # other (fatal) errors with the future to cause us to spin. - err = None - while True: - try: - await self.acquire() - break - except asyncio.exceptions.CancelledError as e: - err = e - - self._waiters.remove((loop, fut)) - if err is not None: - try: - raise err # Re-raise most recent exception instance. - finally: - err = None # Break reference cycles. - except BaseException: - # Any error raised out of here _may_ have occurred after this Task - # believed to have been successfully notified. - # Make sure to notify another Task instead. This may result - # in a "spurious wakeup", which is allowed as part of the - # Condition Variable protocol. - self.notify(1) - raise - - async def wait_for(self, predicate: Callable[[], _T]) -> _T: - """Wait until a predicate becomes true. - - The predicate should be a callable whose result will be - interpreted as a boolean value. The method will repeatedly - wait() until it evaluates to true. The final predicate value is - the return value. - """ - result = predicate() - while not result: - await self.wait() - result = predicate() - return result - - def notify(self, n: int = 1) -> None: - """By default, wake up one coroutine waiting on this condition, if any. - If the calling coroutine has not acquired the lock when this method - is called, a RuntimeError is raised. - - This method wakes up at most n of the coroutines waiting for the - condition variable; it is a no-op if no coroutines are waiting. - - Note: an awakened coroutine does not actually return from its - wait() call until it can reacquire the lock. Since notify() does - not release the lock, its caller should. - """ - idx = 0 - to_remove = [] - for loop, fut in self._waiters: - if idx >= n: - break - - if fut.done(): - continue - - try: - loop.call_soon_threadsafe(_safe_set_result, fut) - except RuntimeError: - # Loop was closed, ignore. - to_remove.append((loop, fut)) - continue - - idx += 1 - - for waiter in to_remove: - self._waiters.remove(waiter) - - def notify_all(self) -> None: - """Wake up all threads waiting on this condition. This method acts - like notify(), but wakes up all waiting threads instead of one. If the - calling thread has not acquired the lock when this method is called, - a RuntimeError is raised. - """ - self.notify(len(self._waiters)) - - def locked(self) -> bool: - """Only needed for tests in test_locks.""" - return self._condition._lock.locked() # type: ignore[attr-defined] - - def release(self) -> None: - self._condition.release() - - async def __aenter__(self) -> _ACondition: - await self.acquire() - return self - - def __enter__(self) -> _ACondition: - self._condition.acquire() - return self - - async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: - self.release() - - def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> None: - self.release() +def _cond_wait(condition: threading.Condition, timeout: Optional[float]) -> bool: + return condition.wait(timeout) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index aa16e85a07..6ab6db2f7d 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -29,6 +29,7 @@ ) from pymongo import _csot, ssl_support +from pymongo._asyncio_task import create_task from pymongo.errors import _OperationCancelled from pymongo.socket_checker import _errno_from_exception @@ -259,19 +260,20 @@ async def async_receive_data( sock.settimeout(0.0) loop = asyncio.get_event_loop() - cancellation_task = asyncio.create_task(_poll_cancellation(conn)) + cancellation_task = create_task(_poll_cancellation(conn)) try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): - read_task = asyncio.create_task(_async_receive_ssl(sock, length, loop)) # type: ignore[arg-type] + read_task = create_task(_async_receive_ssl(sock, length, loop)) # type: ignore[arg-type] else: - read_task = asyncio.create_task(_async_receive(sock, length, loop)) # type: ignore[arg-type] + read_task = create_task(_async_receive(sock, length, loop)) # type: ignore[arg-type] tasks = [read_task, cancellation_task] done, pending = await asyncio.wait( tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED ) for task in pending: task.cancel() - await asyncio.wait(pending) + if pending: + await asyncio.wait(pending) if len(done) == 0: raise socket.timeout("timed out") if read_task in done: diff --git a/pymongo/synchronous/periodic_executor.py b/pymongo/periodic_executor.py similarity index 67% rename from pymongo/synchronous/periodic_executor.py rename to pymongo/periodic_executor.py index 525268b14b..2f89b91deb 100644 --- a/pymongo/synchronous/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -23,9 +23,102 @@ import weakref from typing import Any, Optional +from pymongo._asyncio_task import create_task from pymongo.lock import _create_lock -_IS_SYNC = True +_IS_SYNC = False + + +class AsyncPeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Any, + name: Optional[str] = None, + ): + """Run a target function periodically on a background task. + + If the target's return value is false, the executor stops. + + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is + called very often. + :param target: A function. + :param name: A name to give the underlying task. + """ + self._event = False + self._interval = interval + self._min_interval = min_interval + self._target = target + self._stopped = False + self._task: Optional[asyncio.Task] = None + self._name = name + self._skip_sleep = False + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + + def open(self) -> None: + """Start. Multiple calls have no effect.""" + self._stopped = False + + if self._task is None or ( + self._task.done() and not self._task.cancelled() and not self._task.cancelling() # type: ignore[unused-ignore, attr-defined] + ): + self._task = create_task(self._run(), name=self._name) + + def close(self, dummy: Any = None) -> None: + """Stop. To restart, call open(). + + The dummy parameter allows an executor's close method to be a weakref + callback; see monitor.py. + """ + self._stopped = True + + async def join(self, timeout: Optional[int] = None) -> None: + if self._task is not None: + try: + await asyncio.wait_for(self._task, timeout=timeout) # type-ignore: [arg-type] + except asyncio.TimeoutError: + # Task timed out + pass + except asyncio.exceptions.CancelledError: + # Task was already finished, or not yet started. + raise + + def wake(self) -> None: + """Execute the target function soon.""" + self._event = True + + def update_interval(self, new_interval: int) -> None: + self._interval = new_interval + + def skip_sleep(self) -> None: + self._skip_sleep = True + + async def _run(self) -> None: + while not self._stopped: + if self._task and self._task.cancelling(): # type: ignore[unused-ignore, attr-defined] + raise asyncio.CancelledError + try: + if not await self._target(): + self._stopped = True + break + except BaseException: + self._stopped = True + raise + + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: + await asyncio.sleep(self._min_interval) + if self._event: + break # Early wake. + + self._event = False class PeriodicExecutor: @@ -64,19 +157,6 @@ def __init__( def __repr__(self) -> str: return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" - def _run_async(self) -> None: - # The default asyncio loop implementation on Windows - # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) - # We explicitly use a different loop implementation here to prevent that issue - if sys.platform == "win32": - loop = asyncio.SelectorEventLoop() - try: - loop.run_until_complete(self._run()) # type: ignore[func-returns-value] - finally: - loop.close() - else: - asyncio.run(self._run()) # type: ignore[func-returns-value] - def open(self) -> None: """Start. Multiple calls have no effect. @@ -104,10 +184,7 @@ def open(self) -> None: pass if not started: - if _IS_SYNC: - thread = threading.Thread(target=self._run, name=self._name) - else: - thread = threading.Thread(target=self._run_async, name=self._name) + thread = threading.Thread(target=self._run, name=self._name) thread.daemon = True self._thread = weakref.proxy(thread) _register_executor(self) diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 625e8429eb..9f6e3f7cf0 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -474,7 +474,6 @@ def _process_results_cursor( if op_type == "delete": res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] full_result[f"{op_type}Results"][original_index] = res - except Exception as exc: # Attempt to close the cursor, then raise top-level error. if cmd_cursor.alive: diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 27a76cf91d..9a7637704f 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -77,7 +77,7 @@ class _ConnectionManager: def __init__(self, conn: Connection, more_to_come: bool): self.conn: Optional[Connection] = conn self.more_to_come = more_to_come - self._alock = _create_lock() + self._lock = _create_lock() def update_exhaust(self, more_to_come: bool) -> None: self.more_to_come = more_to_come diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 506ff8bcba..09d0c0f2fd 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -15,6 +15,7 @@ """Support for explicit client-side field level encryption.""" from __future__ import annotations +import asyncio import contextlib import enum import socket @@ -111,6 +112,8 @@ def _wrap_encryption_errors() -> Iterator[None]: # BSON encoding/decoding errors are unrelated to encryption so # we should propagate them unchanged. raise + except asyncio.CancelledError: + raise except Exception as exc: raise EncryptionError(exc) from exc @@ -200,6 +203,8 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: conn.close() except (PyMongoError, MongoCryptError): raise # Propagate pymongo errors directly. + except asyncio.CancelledError: + raise except Exception as error: # Wrap I/O errors in PyMongo exceptions. _raise_connection_failure((host, port), error) @@ -716,6 +721,8 @@ def create_encrypted_collection( database.create_collection(name=name, **kwargs), encrypted_fields, ) + except asyncio.CancelledError: + raise except Exception as exc: raise EncryptedCollectionError(exc, encrypted_fields) from exc diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 00c6203a94..a694a58c1e 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -32,6 +32,7 @@ """ from __future__ import annotations +import asyncio import contextlib import os import warnings @@ -58,7 +59,7 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.timestamp import Timestamp -from pymongo import _csot, common, helpers_shared, uri_parser +from pymongo import _csot, common, helpers_shared, periodic_executor, uri_parser from pymongo.client_options import ClientOptions from pymongo.errors import ( AutoReconnect, @@ -74,7 +75,11 @@ WaitQueueTimeoutError, WriteConcernError, ) -from pymongo.lock import _HAS_REGISTER_AT_FORK, _create_lock, _release_locks +from pymongo.lock import ( + _HAS_REGISTER_AT_FORK, + _create_lock, + _release_locks, +) from pymongo.logger import _CLIENT_LOGGER, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason @@ -91,7 +96,7 @@ from pymongo.results import ClientBulkWriteResult from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.synchronous import client_session, database, periodic_executor +from pymongo.synchronous import client_session, database from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream from pymongo.synchronous.client_bulk import _ClientBulk from pymongo.synchronous.client_session import _EmptyServerSession @@ -1716,7 +1721,7 @@ def _run_operation( address=address, ) - with operation.conn_mgr._alock: + with operation.conn_mgr._lock: with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] err_handler.contribute_socket(operation.conn_mgr.conn) return server.run_operation( @@ -1964,7 +1969,7 @@ def _close_cursor_now( try: if conn_mgr: - with conn_mgr._alock: + with conn_mgr._lock: # Cursor is pinned to LB outside of a transaction. assert address is not None assert conn_mgr.conn is not None @@ -2027,6 +2032,8 @@ def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) + except asyncio.CancelledError: + raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it @@ -2041,6 +2048,8 @@ def _process_kill_cursors(self) -> None: for address, cursor_ids in address_to_cursor_ids.items(): try: self._kill_cursors(cursor_ids, address, topology, session=None) + except asyncio.CancelledError: + raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: raise @@ -2055,6 +2064,8 @@ def _process_periodic_tasks(self) -> None: try: self._process_kill_cursors() self._topology.update_pool() + except asyncio.CancelledError: + raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: return diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index d02ad0a6fd..df4130d4ab 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -16,24 +16,24 @@ from __future__ import annotations +import asyncio import atexit import logging import time import weakref from typing import TYPE_CHECKING, Any, Mapping, Optional, cast -from pymongo import common +from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage +from pymongo.periodic_executor import _shutdown_executors from pymongo.pool_options import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription from pymongo.srv_resolver import _SrvResolver -from pymongo.synchronous import periodic_executor -from pymongo.synchronous.periodic_executor import _shutdown_executors if TYPE_CHECKING: from pymongo.synchronous.pool import Connection, Pool, _CancellationContext @@ -238,6 +238,9 @@ def _run(self) -> None: except ReferenceError: # Topology was garbage-collected. self.close() + finally: + if self._executor._stopped: + self._rtt_monitor.close() def _check_server(self) -> ServerDescription: """Call hello or read the next streaming response. @@ -254,6 +257,8 @@ def _check_server(self) -> ServerDescription: details = cast(Mapping[str, Any], exc.details) self._topology.receive_cluster_time(details.get("$clusterTime")) raise + except asyncio.CancelledError: + raise except ReferenceError: raise except Exception as error: @@ -419,6 +424,8 @@ def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception + except asyncio.CancelledError: + raise except Exception: # As per the spec, upon encountering an error: # - An error must not be raised @@ -482,6 +489,8 @@ def _run(self) -> None: except ReferenceError: # Topology was garbage-collected. self.close() + except asyncio.CancelledError: + raise except Exception: self._pool.reset() @@ -536,4 +545,5 @@ def _shutdown_resources() -> None: shutdown() -atexit.register(_shutdown_resources) +if _IS_SYNC: + atexit.register(_shutdown_resources) diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 86baf15b9a..1a155c82d7 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -23,7 +23,6 @@ import socket import ssl import sys -import threading import time import weakref from typing import ( @@ -62,7 +61,11 @@ _CertificateError, ) from pymongo.hello import Hello, HelloCompat -from pymongo.lock import _create_lock, _Lock +from pymongo.lock import ( + _cond_wait, + _create_condition, + _create_lock, +) from pymongo.logger import ( _CONNECTION_LOGGER, _ConnectionStatusMessage, @@ -208,11 +211,6 @@ def _raise_connection_failure( raise AutoReconnect(msg) from error -def _cond_wait(condition: threading.Condition, deadline: Optional[float]) -> bool: - timeout = deadline - time.monotonic() if deadline else None - return condition.wait(timeout) - - def _get_timeout_details(options: PoolOptions) -> dict[str, float]: details = {} timeout = _csot.get_timeout() @@ -704,6 +702,8 @@ def _close_conn(self) -> None: # shutdown. try: self.conn.close() + except asyncio.CancelledError: + raise except Exception: # noqa: S110 pass @@ -988,8 +988,8 @@ def __init__( # from the right side. self.conns: collections.deque = collections.deque() self.active_contexts: set[_CancellationContext] = set() - _lock = _create_lock() - self.lock = _Lock(_lock) + self.lock = _create_lock() + self._max_connecting_cond = _create_condition(self.lock) self.active_sockets = 0 # Monotonically increasing connection ID required for CMAP Events. self.next_connection_id = 1 @@ -1015,7 +1015,7 @@ def __init__( # The first portion of the wait queue. # Enforces: maxPoolSize # Also used for: clearing the wait queue - self.size_cond = threading.Condition(_lock) + self.size_cond = _create_condition(self.lock) self.requests = 0 self.max_pool_size = self.opts.max_pool_size if not self.max_pool_size: @@ -1023,7 +1023,7 @@ def __init__( # The second portion of the wait queue. # Enforces: maxConnecting # Also used for: clearing the wait queue - self._max_connecting_cond = threading.Condition(_lock) + self._max_connecting_cond = _create_condition(self.lock) self._max_connecting = self.opts.max_connecting self._pending = 0 self._client_id = client_id @@ -1460,7 +1460,8 @@ def _get_conn( with self.size_cond: self._raise_if_not_ready(checkout_started_time, emit_event=True) while not (self.requests < self.max_pool_size): - if not _cond_wait(self.size_cond, deadline): + timeout = deadline - time.monotonic() if deadline else None + if not _cond_wait(self.size_cond, timeout): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.requests < self.max_pool_size: @@ -1483,7 +1484,8 @@ def _get_conn( with self._max_connecting_cond: self._raise_if_not_ready(checkout_started_time, emit_event=False) while not (self.conns or self._pending < self._max_connecting): - if not _cond_wait(self._max_connecting_cond, deadline): + timeout = deadline - time.monotonic() if deadline else None + if not _cond_wait(self._max_connecting_cond, timeout): # Timed out, notify the next thread to ensure a # timeout doesn't consume the condition. if self.conns or self._pending < self._max_connecting: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index a350c1702e..b03269ae43 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -27,7 +27,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast -from pymongo import _csot, common, helpers_shared +from pymongo import _csot, common, helpers_shared, periodic_executor from pymongo.errors import ( ConnectionFailure, InvalidOperation, @@ -39,7 +39,11 @@ WriteError, ) from pymongo.hello import Hello -from pymongo.lock import _create_lock, _Lock +from pymongo.lock import ( + _cond_wait, + _create_condition, + _create_lock, +) from pymongo.logger import ( _SDAM_LOGGER, _SERVER_SELECTION_LOGGER, @@ -56,7 +60,6 @@ secondary_server_selector, writable_server_selector, ) -from pymongo.synchronous import periodic_executor from pymongo.synchronous.client_session import _ServerSession, _ServerSessionPool from pymongo.synchronous.monitor import SrvMonitor from pymongo.synchronous.pool import Pool @@ -170,9 +173,10 @@ def __init__(self, topology_settings: TopologySettings): self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._closed = False - _lock = _create_lock() - self._lock = _Lock(_lock) - self._condition = self._settings.condition_class(_lock) + self._lock = _create_lock() + self._condition = _create_condition( + self._lock, self._settings.condition_class if _IS_SYNC else None + ) self._servers: dict[_Address, Server] = {} self._pid: Optional[int] = None self._max_cluster_time: Optional[ClusterTime] = None @@ -354,7 +358,7 @@ def _select_servers_loop( # change, or for a timeout. We won't miss any changes that # came after our most recent apply_selector call, since we've # held the lock until now. - self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) + _cond_wait(self._condition, common.MIN_HEARTBEAT_INTERVAL) self._description.check_compatible() now = time.monotonic() server_descriptions = self._description.apply_selector( @@ -652,7 +656,7 @@ def request_check_all(self, wait_time: int = 5) -> None: """Wake all monitors, wait for at least one to check its server.""" with self._lock: self._request_check_all() - self._condition.wait(wait_time) + _cond_wait(self._condition, wait_time) def data_bearing_servers(self) -> list[ServerDescription]: """Return a list of all data-bearing servers. diff --git a/test/__init__.py b/test/__init__.py index fd33fde293..d3a63db2d5 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -17,6 +17,7 @@ import asyncio import gc +import logging import multiprocessing import os import signal @@ -25,6 +26,7 @@ import sys import threading import time +import traceback import unittest import warnings from asyncio import iscoroutinefunction @@ -191,6 +193,8 @@ def _connect(self, host, port, **kwargs): client.close() def _init_client(self): + self.mongoses = [] + self.connection_attempts = [] self.client = self._connect(host, port) if self.client is not None: # Return early when connected to dataLake as mongohoused does not @@ -860,6 +864,16 @@ def max_message_size_bytes(self): client_context = ClientContext() +def reset_client_context(): + if _IS_SYNC: + # sync tests don't need to reset a client context + return + elif client_context.client is not None: + client_context.client.close() + client_context.client = None + client_context._init_client() + + class PyMongoTestCase(unittest.TestCase): def assertEqualCommand(self, expected, actual, msg=None): self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) @@ -1106,26 +1120,10 @@ def enable_replication(self, client): class UnitTest(PyMongoTestCase): """Async base class for TestCases that don't require a connection to MongoDB.""" - @classmethod - def setUpClass(cls): - if _IS_SYNC: - cls._setup_class() - else: - asyncio.run(cls._setup_class()) - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls._tearDown_class() - else: - asyncio.run(cls._tearDown_class()) - - @classmethod - def _setup_class(cls): + def setUp(self) -> None: pass - @classmethod - def _tearDown_class(cls): + def tearDown(self) -> None: pass @@ -1136,37 +1134,20 @@ class IntegrationTest(PyMongoTestCase): db: Database credentials: Dict[str, str] - @classmethod - def setUpClass(cls): - if _IS_SYNC: - cls._setup_class() - else: - asyncio.run(cls._setup_class()) - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls._tearDown_class() - else: - asyncio.run(cls._tearDown_class()) - - @classmethod @client_context.require_connection - def _setup_class(cls): - if client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + def setUp(self) -> None: + if not _IS_SYNC: + reset_client_context() + if client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") - if client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + if client_context.serverless and not getattr(self, "RUN_ON_SERVERLESS", False): raise SkipTest("this test does not support serverless") - cls.client = client_context.client - cls.db = cls.client.pymongo_test + self.client = client_context.client + self.db = self.client.pymongo_test if client_context.auth_enabled: - cls.credentials = {"username": db_user, "password": db_pwd} + self.credentials = {"username": db_user, "password": db_pwd} else: - cls.credentials = {} - - @classmethod - def _tearDown_class(cls): - pass + self.credentials = {} def cleanup_colls(self, *collections): """Cleanup collections faster than drop_collection.""" @@ -1192,37 +1173,14 @@ class MockClientTest(UnitTest): # MockClients tests that use replicaSet, directConnection=True, pass # multiple seed addresses, or wait for heartbeat events are incompatible # with loadBalanced=True. - @classmethod - def setUpClass(cls): - if _IS_SYNC: - cls._setup_class() - else: - asyncio.run(cls._setup_class()) - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls._tearDown_class() - else: - asyncio.run(cls._tearDown_class()) - - @classmethod @client_context.require_no_load_balancer - def _setup_class(cls): - pass - - @classmethod - def _tearDown_class(cls): - pass - - def setUp(self): + def setUp(self) -> None: super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) - self.client_knobs.enable() - def tearDown(self): + def tearDown(self) -> None: self.client_knobs.disable() super().tearDown() @@ -1253,7 +1211,6 @@ def teardown(): c.drop_database("pymongo_test_mike") c.drop_database("pymongo_test_bernie") c.close() - print_running_clients() diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 0579828c49..73e2824742 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -17,6 +17,7 @@ import asyncio import gc +import logging import multiprocessing import os import signal @@ -25,6 +26,7 @@ import sys import threading import time +import traceback import unittest import warnings from asyncio import iscoroutinefunction @@ -191,6 +193,8 @@ async def _connect(self, host, port, **kwargs): await client.close() async def _init_client(self): + self.mongoses = [] + self.connection_attempts = [] self.client = await self._connect(host, port) if self.client is not None: # Return early when connected to dataLake as mongohoused does not @@ -862,6 +866,16 @@ async def max_message_size_bytes(self): async_client_context = AsyncClientContext() +async def reset_client_context(): + if _IS_SYNC: + # sync tests don't need to reset a client context + return + elif async_client_context.client is not None: + await async_client_context.client.close() + async_client_context.client = None + await async_client_context._init_client() + + class AsyncPyMongoTestCase(unittest.IsolatedAsyncioTestCase): def assertEqualCommand(self, expected, actual, msg=None): self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) @@ -1124,26 +1138,10 @@ async def enable_replication(self, client): class AsyncUnitTest(AsyncPyMongoTestCase): """Async base class for TestCases that don't require a connection to MongoDB.""" - @classmethod - def setUpClass(cls): - if _IS_SYNC: - cls._setup_class() - else: - asyncio.run(cls._setup_class()) - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls._tearDown_class() - else: - asyncio.run(cls._tearDown_class()) - - @classmethod - async def _setup_class(cls): + async def asyncSetUp(self) -> None: pass - @classmethod - async def _tearDown_class(cls): + async def asyncTearDown(self) -> None: pass @@ -1154,37 +1152,20 @@ class AsyncIntegrationTest(AsyncPyMongoTestCase): db: AsyncDatabase credentials: Dict[str, str] - @classmethod - def setUpClass(cls): - if _IS_SYNC: - cls._setup_class() - else: - asyncio.run(cls._setup_class()) - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls._tearDown_class() - else: - asyncio.run(cls._tearDown_class()) - - @classmethod @async_client_context.require_connection - async def _setup_class(cls): - if async_client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + async def asyncSetUp(self) -> None: + if not _IS_SYNC: + await reset_client_context() + if async_client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") - if async_client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): + if async_client_context.serverless and not getattr(self, "RUN_ON_SERVERLESS", False): raise SkipTest("this test does not support serverless") - cls.client = async_client_context.client - cls.db = cls.client.pymongo_test + self.client = async_client_context.client + self.db = self.client.pymongo_test if async_client_context.auth_enabled: - cls.credentials = {"username": db_user, "password": db_pwd} + self.credentials = {"username": db_user, "password": db_pwd} else: - cls.credentials = {} - - @classmethod - async def _tearDown_class(cls): - pass + self.credentials = {} async def cleanup_colls(self, *collections): """Cleanup collections faster than drop_collection.""" @@ -1210,39 +1191,16 @@ class AsyncMockClientTest(AsyncUnitTest): # MockClients tests that use replicaSet, directConnection=True, pass # multiple seed addresses, or wait for heartbeat events are incompatible # with loadBalanced=True. - @classmethod - def setUpClass(cls): - if _IS_SYNC: - cls._setup_class() - else: - asyncio.run(cls._setup_class()) - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls._tearDown_class() - else: - asyncio.run(cls._tearDown_class()) - - @classmethod @async_client_context.require_no_load_balancer - async def _setup_class(cls): - pass - - @classmethod - async def _tearDown_class(cls): - pass - - def setUp(self): - super().setUp() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) - self.client_knobs.enable() - def tearDown(self): + async def asyncTearDown(self) -> None: self.client_knobs.disable() - super().tearDown() + await super().asyncTearDown() async def async_setup(): @@ -1271,7 +1229,6 @@ async def async_teardown(): await c.drop_database("pymongo_test_mike") await c.drop_database("pymongo_test_bernie") await c.close() - print_running_clients() diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py index e443dff6c0..a27a9f213d 100644 --- a/test/asynchronous/conftest.py +++ b/test/asynchronous/conftest.py @@ -22,7 +22,7 @@ def event_loop_policy(): return asyncio.get_event_loop_policy() -@pytest_asyncio.fixture(scope="session", autouse=True) +@pytest_asyncio.fixture(scope="package", autouse=True) async def test_setup_and_teardown(): await async_setup() yield diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index c9ff167b43..7191a412c1 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -42,15 +42,11 @@ class AsyncBulkTestBase(AsyncIntegrationTest): coll: AsyncCollection coll_w0: AsyncCollection - @classmethod - async def _setup_class(cls): - await super()._setup_class() - cls.coll = cls.db.test - cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) - async def asyncSetUp(self): - super().setUp() + await super().asyncSetUp() + self.coll = self.db.test await self.coll.drop() + self.coll_w0 = self.coll.with_options(write_concern=WriteConcern(w=0)) def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" @@ -787,14 +783,10 @@ async def test_large_inserts_unordered(self): class AsyncBulkAuthorizationTestBase(AsyncBulkTestBase): - @classmethod @async_client_context.require_auth @async_client_context.require_no_api_version - async def _setup_class(cls): - await super()._setup_class() - async def asyncSetUp(self): - super().setUp() + await super().asyncSetUp() await async_client_context.create_user(self.db.name, "readonly", "pw", ["read"]) await self.db.command( "createRole", @@ -937,21 +929,19 @@ class AsyncTestBulkWriteConcern(AsyncBulkTestBase): w: Optional[int] secondary: AsyncMongoClient - @classmethod - async def _setup_class(cls): - await super()._setup_class() - cls.w = async_client_context.w - cls.secondary = None - if cls.w is not None and cls.w > 1: + async def asyncSetUp(self): + await super().asyncSetUp() + self.w = async_client_context.w + self.secondary = None + if self.w is not None and self.w > 1: for member in (await async_client_context.hello)["hosts"]: if member != (await async_client_context.hello)["primary"]: - cls.secondary = await cls.unmanaged_async_single_client(*partition_node(member)) + self.secondary = await self.async_single_client(*partition_node(member)) break - @classmethod - async def async_tearDownClass(cls): - if cls.secondary: - await cls.secondary.close() + async def asyncTearDown(self): + if self.secondary: + await self.secondary.close() async def cause_wtimeout(self, requests, ordered): if not async_client_context.test_commands_enabled: diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 8e16fe7528..08da00cc1e 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -836,18 +836,16 @@ async def test_split_large_change(self): class TestClusterAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): dbs: list - @classmethod @async_client_context.require_version_min(4, 0, 0, -1) @async_client_context.require_change_streams - async def _setup_class(cls): - await super()._setup_class() - cls.dbs = [cls.db, cls.client.pymongo_test_2] + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.dbs = [self.db, self.client.pymongo_test_2] - @classmethod - async def _tearDown_class(cls): - for db in cls.dbs: - await cls.client.drop_database(db) - await super()._tearDown_class() + async def asyncTearDown(self): + for db in self.dbs: + await self.client.drop_database(db) + await super().asyncTearDown() async def change_stream_with_client(self, client, *args, **kwargs): return await client.watch(*args, **kwargs) @@ -898,11 +896,10 @@ async def test_full_pipeline(self): class TestAsyncDatabaseAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): - @classmethod @async_client_context.require_version_min(4, 0, 0, -1) @async_client_context.require_change_streams - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() async def change_stream_with_client(self, client, *args, **kwargs): return await client[self.db.name].watch(*args, **kwargs) @@ -988,12 +985,9 @@ async def test_isolation(self): class TestAsyncCollectionAsyncChangeStream( TestAsyncChangeStreamBase, APITestsMixin, ProseSpecTestsMixin ): - @classmethod @async_client_context.require_change_streams - async def _setup_class(cls): - await super()._setup_class() - async def asyncSetUp(self): + await super().asyncSetUp() # Use a new collection for each test. await self.watched_collection().drop() await self.watched_collection().insert_one({}) @@ -1133,20 +1127,11 @@ class TestAllLegacyScenarios(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True listener: AllowListEventListener - @classmethod @async_client_context.require_connection - async def _setup_class(cls): - await super()._setup_class() - cls.listener = AllowListEventListener("aggregate", "getMore") - cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - async def _tearDown_class(cls): - await cls.client.close() - await super()._tearDown_class() - - def asyncSetUp(self): - super().asyncSetUp() + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = AllowListEventListener("aggregate", "getMore") + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) self.listener.reset() async def asyncSetUpCluster(self, scenario_dict): diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 590154b857..db232386ee 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -73,7 +73,6 @@ is_greenthread_patched, lazy_client_trial, one, - wait_until, ) import bson @@ -131,16 +130,11 @@ class AsyncClientUnitTest(AsyncUnitTest): client: AsyncMongoClient - @classmethod - async def _setup_class(cls): - cls.client = await cls.unmanaged_async_rs_or_single_client( + async def asyncSetUp(self) -> None: + self.client = await self.async_rs_or_single_client( connect=False, serverSelectionTimeoutMS=100 ) - @classmethod - async def _tearDown_class(cls): - await cls.client.close() - @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog @@ -693,8 +687,8 @@ async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): # When the reaper runs at the same time as the get_socket, two # connections could be created and checked into the pool. self.assertGreaterEqual(len(server._pool.conns), 1) - wait_until(lambda: conn not in server._pool.conns, "remove stale socket") - wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") + await async_wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + await async_wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): @@ -710,8 +704,8 @@ async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): # When the reaper runs at the same time as the get_socket, # maxPoolSize=1 should prevent two connections from being created. self.assertEqual(1, len(server._pool.conns)) - wait_until(lambda: conn not in server._pool.conns, "remove stale socket") - wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") + await async_wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + await async_wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") async def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): @@ -727,7 +721,7 @@ async def test_max_idle_time_reaper_removes_stale(self): async with server._pool.checkout() as conn_two: pass self.assertIs(conn_one, conn_two) - wait_until( + await async_wait_until( lambda: len(server._pool.conns) == 0, "stale socket reaped and new one NOT added to the pool", ) @@ -745,7 +739,7 @@ async def test_min_pool_size(self): server = await (await client._get_topology()).select_server( readable_server_selector, _Op.TEST ) - wait_until( + await async_wait_until( lambda: len(server._pool.conns) == 10, "pool initialized with 10 connections", ) @@ -753,7 +747,7 @@ async def test_min_pool_size(self): # Assert that if a socket is closed, a new one takes its place async with server._pool.checkout() as conn: conn.close_conn(None) - wait_until( + await async_wait_until( lambda: len(server._pool.conns) == 10, "a closed socket gets replaced from the pool", ) @@ -939,8 +933,10 @@ async def test_repr(self): async with eval(the_repr) as client_two: self.assertEqual(client_two, client) - def test_getters(self): - wait_until(lambda: async_client_context.nodes == self.client.nodes, "find all nodes") + async def test_getters(self): + await async_wait_until( + lambda: async_client_context.nodes == self.client.nodes, "find all nodes" + ) async def test_list_databases(self): cmd_docs = (await self.client.admin.command("listDatabases"))["databases"] @@ -1065,14 +1061,21 @@ async def test_uri_connect_option(self): self.assertFalse(client._topology._opened) # Ensure kill cursors thread has not been started. - kc_thread = client._kill_cursors_executor._thread - self.assertFalse(kc_thread and kc_thread.is_alive()) - + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertFalse(kc_task and not kc_task.done()) # Using the client should open topology and start the thread. await client.admin.command("ping") self.assertTrue(client._topology._opened) - kc_thread = client._kill_cursors_executor._thread - self.assertTrue(kc_thread and kc_thread.is_alive()) + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertTrue(kc_task and not kc_task.done()) async def test_close_does_not_open_servers(self): client = await self.async_rs_client(connect=False) @@ -1277,6 +1280,7 @@ async def get_x(db): async def test_server_selection_timeout(self): client = AsyncMongoClient(serverSelectionTimeoutMS=100, connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + await client.close() client = AsyncMongoClient(serverSelectionTimeoutMS=0, connect=False) @@ -1289,18 +1293,22 @@ async def test_server_selection_timeout(self): self.assertRaises( ConfigurationError, AsyncMongoClient, serverSelectionTimeoutMS=None, connect=False ) + await client.close() client = AsyncMongoClient( "mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False ) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + await client.close() client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) + await client.close() # Test invalid timeout in URI ignored and set to default. client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) + await client.close() client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) @@ -1608,7 +1616,7 @@ def init(self, *args): await async_client_context.port, ) await self.async_single_client(uri, event_listeners=[listener]) - wait_until( + await async_wait_until( lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" ) @@ -1766,16 +1774,16 @@ async def test_background_connections_do_not_hold_locks(self): pool = await async_get_pool(client) original_connect = pool.connect - def stall_connect(*args, **kwargs): - time.sleep(2) - return original_connect(*args, **kwargs) + async def stall_connect(*args, **kwargs): + await asyncio.sleep(2) + return await original_connect(*args, **kwargs) pool.connect = stall_connect # Un-patch Pool.connect to break the cyclic reference. self.addCleanup(delattr, pool, "connect") # Wait for the background thread to start creating connections - wait_until(lambda: len(pool.conns) > 1, "start creating connections") + await async_wait_until(lambda: len(pool.conns) > 1, "start creating connections") # Assert that application operations do not block. for _ in range(10): @@ -1858,7 +1866,7 @@ async def test_process_periodic_tasks(self): await client.close() # Add cursor to kill cursors queue del cursor - wait_until( + await async_wait_until( lambda: client._kill_cursors_queue, "waited for cursor to be added to queue", ) @@ -2232,7 +2240,7 @@ async def test_exhaust_getmore_network_error(self): await cursor.to_list() self.assertTrue(conn.closed) - wait_until( + await async_wait_until( lambda: len(client._kill_cursors_queue) == 0, "waited for all killCursor requests to complete", ) @@ -2403,7 +2411,7 @@ async def test_discover_primary(self): ) self.addAsyncCleanup(c.close) - wait_until(lambda: len(c.nodes) == 3, "connect") + await async_wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(await c.address, ("a", 1)) # Fail over. @@ -2430,7 +2438,7 @@ async def test_reconnect(self): ) self.addAsyncCleanup(c.close) - wait_until(lambda: len(c.nodes) == 3, "connect") + await async_wait_until(lambda: len(c.nodes) == 3, "connect") # Total failure. c.kill_host("a:1") @@ -2472,7 +2480,7 @@ async def _test_network_error(self, operation_callback): c.set_wire_version_range("a:1", 2, MIN_SUPPORTED_WIRE_VERSION) c.set_wire_version_range("b:2", 2, MIN_SUPPORTED_WIRE_VERSION + 1) await (await c._get_topology()).select_servers(writable_server_selector, _Op.TEST) - wait_until(lambda: len(c.nodes) == 2, "connect") + await async_wait_until(lambda: len(c.nodes) == 2, "connect") c.kill_host("a:1") @@ -2544,11 +2552,11 @@ async def test_rs_client_does_not_maintain_pool_to_arbiters(self): ) self.addAsyncCleanup(c.close) - wait_until(lambda: len(c.nodes) == 3, "connect") + await async_wait_until(lambda: len(c.nodes) == 3, "connect") self.assertEqual(await c.address, ("a", 1)) self.assertEqual(await c.arbiters, {("c", 3)}) # Assert that we create 2 and only 2 pooled connections. - listener.wait_for_event(monitoring.ConnectionReadyEvent, 2) + await listener.async_wait_for_event(monitoring.ConnectionReadyEvent, 2) self.assertEqual(listener.event_count(monitoring.ConnectionCreatedEvent), 2) # Assert that we do not create connections to arbiters. arbiter = c._topology.get_server_by_address(("c", 3)) @@ -2574,10 +2582,10 @@ async def test_direct_client_maintains_pool_to_arbiter(self): ) self.addAsyncCleanup(c.close) - wait_until(lambda: len(c.nodes) == 1, "connect") + await async_wait_until(lambda: len(c.nodes) == 1, "connect") self.assertEqual(await c.address, ("c", 3)) # Assert that we create 1 pooled connection. - listener.wait_for_event(monitoring.ConnectionReadyEvent, 1) + await listener.async_wait_for_event(monitoring.ConnectionReadyEvent, 1) self.assertEqual(listener.event_count(monitoring.ConnectionCreatedEvent), 1) arbiter = c._topology.get_server_by_address(("c", 3)) self.assertEqual(len(arbiter.pool.conns), 1) diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py index d95f4c9917..d7fd85b168 100644 --- a/test/asynchronous/test_collation.py +++ b/test/asynchronous/test_collation.py @@ -97,28 +97,21 @@ class TestCollation(AsyncIntegrationTest): warn_context: Any collation: Collation - @classmethod @async_client_context.require_connection - async def _setup_class(cls): - await super()._setup_class() - cls.listener = OvertCommandListener() - cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) - cls.db = cls.client.pymongo_test - cls.collation = Collation("en_US") - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - @classmethod - async def _tearDown_class(cls): - cls.warn_context.__exit__() - cls.warn_context = None - await cls.client.close() - await super()._tearDown_class() - - def tearDown(self): + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + self.collation = Collation("en_US") + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + + async def asyncTearDown(self) -> None: + self.warn_context.__exit__() + self.warn_context = None self.listener.reset() - super().tearDown() + await super().asyncTearDown() def last_command_started(self): return self.listener.started_events[-1].command diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index db52bad4ac..528919f63c 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -40,7 +40,6 @@ async_get_pool, async_is_mongos, async_wait_until, - wait_until, ) from bson import encode @@ -88,14 +87,10 @@ class TestCollectionNoConnect(AsyncUnitTest): db: AsyncDatabase client: AsyncMongoClient - @classmethod - async def _setup_class(cls): - cls.client = AsyncMongoClient(connect=False) - cls.db = cls.client.pymongo_test - - @classmethod - async def _tearDown_class(cls): - await cls.client.close() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.client = self.simple_client(connect=False) + self.db = self.client.pymongo_test def test_collection(self): self.assertRaises(TypeError, AsyncCollection, self.db, 5) @@ -165,27 +160,14 @@ def test_iteration(self): class AsyncTestCollection(AsyncIntegrationTest): w: int - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.w = async_client_context.w # type: ignore - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls.db.drop_collection("test_large_limit") # type: ignore[unused-coroutine] - else: - asyncio.run(cls.async_tearDownClass()) - - @classmethod - async def async_tearDownClass(cls): - await cls.db.drop_collection("test_large_limit") - async def asyncSetUp(self): - await self.db.test.drop() + await super().asyncSetUp() + self.w = async_client_context.w # type: ignore async def asyncTearDown(self): await self.db.test.drop() + await self.db.drop_collection("test_large_limit") + await super().asyncTearDown() @contextlib.contextmanager def write_concern_collection(self): @@ -1023,7 +1005,10 @@ async def test_replace_bypass_document_validation(self): await db.test.insert_one({"y": 1}, bypass_document_validation=True) await db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) - await async_wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") + async def predicate(): + return await db_w0.test.find_one({"x": 1}) + + await async_wait_until(predicate, "find w:0 replaced document") async def test_update_bypass_document_validation(self): db = self.db @@ -1871,7 +1856,7 @@ async def test_exhaust(self): await cur.close() cur = None # Wait until the background thread returns the socket. - wait_until(lambda: pool.active_sockets == 0, "return socket") + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") # The socket should be discarded. self.assertEqual(0, len(pool.conns)) diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py index 289cf49751..bc9638b443 100644 --- a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -19,7 +19,12 @@ sys.path[0:0] = [""] -from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + reset_client_context, + unittest, +) from test.asynchronous.helpers import async_repl_set_step_down from test.utils import ( CMAPListener, @@ -39,29 +44,19 @@ class TestAsyncConnectionsSurvivePrimaryStepDown(AsyncIntegrationTest): listener: CMAPListener coll: AsyncCollection - @classmethod @async_client_context.require_replica_set - async def _setup_class(cls): - await super()._setup_class() - cls.listener = CMAPListener() - cls.client = await cls.unmanaged_async_rs_or_single_client( - event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + async def asyncSetUp(self): + self.listener = CMAPListener() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 ) # Ensure connections to all servers in replica set. This is to test # that the is_writable flag is properly updated for connections that # survive a replica set election. - await async_ensure_all_connected(cls.client) - cls.listener.reset() - - cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) - cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) - - @classmethod - async def _tearDown_class(cls): - await cls.client.close() - - async def asyncSetUp(self): + await async_ensure_all_connected(self.client) + self.db = self.client.get_database("step-down", write_concern=WriteConcern("majority")) + self.coll = self.db.get_collection("step-down", write_concern=WriteConcern("majority")) # Note that all ops use same write-concern as self.db (majority). await self.db.drop_collection("step-down") await self.db.create_collection("step-down") diff --git a/test/asynchronous/test_create_entities.py b/test/asynchronous/test_create_entities.py index cb2ec63f4c..1f68cf6ddc 100644 --- a/test/asynchronous/test_create_entities.py +++ b/test/asynchronous/test_create_entities.py @@ -56,6 +56,9 @@ async def test_store_events_as_entities(self): self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: self.assertIn("PoolCreatedEvent", event["name"]) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + await client.close() async def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() @@ -122,6 +125,9 @@ async def test_store_all_others_as_entities(self): self.assertEqual(entity_map["failures"], []) self.assertEqual(entity_map["successes"], 2) self.assertEqual(entity_map["iterations"], 5) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + await client.close() if __name__ == "__main__": diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 787da3d957..d216479451 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -34,9 +34,9 @@ AllowListEventListener, EventListener, OvertCommandListener, + async_wait_until, delay, ignore_deprecations, - wait_until, ) from bson import decode_all @@ -1324,8 +1324,8 @@ async def test_timeout_kills_cursor_asynchronously(self): with self.assertRaises(ExecutionTimeout): await cursor.next() - def assertCursorKilled(): - wait_until( + async def assertCursorKilled(): + await async_wait_until( lambda: len(listener.succeeded_events), "find successful killCursors command", ) @@ -1335,7 +1335,7 @@ def assertCursorKilled(): self.assertEqual(1, len(listener.succeeded_events)) self.assertEqual("killCursors", listener.succeeded_events[0].command_name) - assertCursorKilled() + await assertCursorKilled() listener.reset() cursor = await coll.aggregate([], batchSize=1) @@ -1345,7 +1345,7 @@ def assertCursorKilled(): with self.assertRaises(ExecutionTimeout): await cursor.next() - assertCursorKilled() + await assertCursorKilled() def test_delete_not_initialized(self): # Creating a cursor with invalid arguments will not run __init__ @@ -1647,10 +1647,6 @@ async def test_monitoring(self): class TestRawBatchCommandCursor(AsyncIntegrationTest): - @classmethod - async def _setup_class(cls): - await super()._setup_class() - async def test_aggregate_raw(self): c = self.db.test await c.drop() diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 61369c8542..b5a5960420 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -717,7 +717,8 @@ def test_with_options(self): class TestDatabaseAggregation(AsyncIntegrationTest): - def setUp(self): + async def asyncSetUp(self): + await super().asyncSetUp() self.pipeline: List[Mapping[str, Any]] = [ {"$listLocalSessions": {}}, {"$limit": 1}, diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 767b3ecf0a..048db2d501 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -211,11 +211,10 @@ async def test_kwargs(self): class AsyncEncryptionIntegrationTest(AsyncIntegrationTest): """Base class for encryption integration tests.""" - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @async_client_context.require_version_min(4, 2, -1) - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -430,10 +429,9 @@ async def test_upsert_uuid_standard_encrypt(self): class TestClientMaxWireVersion(AsyncIntegrationTest): - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self): + await super().asyncSetUp() @async_client_context.require_version_max(4, 0, 99) async def test_raise_max_wire_version_error(self): @@ -818,17 +816,16 @@ class TestDataKeyDoubleEncryption(AsyncEncryptionIntegrationTest): "local": None, } - @classmethod @unittest.skipUnless( any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - async def _setup_class(cls): - await super()._setup_class() - cls.listener = OvertCommandListener() - cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) - await cls.client.db.coll.drop() - cls.vault = await create_key_vault(cls.client.keyvault.datakeys) + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + await self.client.db.coll.drop() + self.vault = await create_key_vault(self.client.keyvault.datakeys) # Configure the encrypted field via the local schema_map option. schemas = { @@ -846,25 +843,22 @@ async def _setup_class(cls): } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + self.KMS_PROVIDERS, + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS, ) - cls.client_encrypted = await cls.unmanaged_async_rs_or_single_client( + self.client_encrypted = await self.async_rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - cls.client_encryption = cls.unmanaged_create_client_encryption( - cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + self.client_encryption = self.create_client_encryption( + self.KMS_PROVIDERS, "keyvault.datakeys", self.client, OPTS, kms_tls_options=KMS_TLS_OPTS ) - - @classmethod - async def _tearDown_class(cls): - await cls.vault.drop() - await cls.client.close() - await cls.client_encrypted.close() - await cls.client_encryption.close() - - def setUp(self): self.listener.reset() + async def asyncTearDown(self) -> None: + await self.vault.drop() + async def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] @@ -1011,10 +1005,9 @@ async def test_views_are_prohibited(self): class TestCorpus(AsyncEncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self): + await super().asyncSetUp() @staticmethod def kms_providers(): @@ -1188,12 +1181,11 @@ class TestBsonSizeBatches(AsyncEncryptionIntegrationTest): client_encrypted: AsyncMongoClient listener: OvertCommandListener - @classmethod - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self): + await super().asyncSetUp() db = async_client_context.client.db - cls.coll = db.coll - await cls.coll.drop() + self.coll = db.coll + await self.coll.drop() # Configure the encrypted 'db.coll' collection via jsonSchema. json_schema = json_data("limits", "limits-schema.json") await db.create_collection( @@ -1211,17 +1203,14 @@ async def _setup_class(cls): await coll.insert_one(json_data("limits", "limits-key.json")) opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") - cls.listener = OvertCommandListener() - cls.client_encrypted = await cls.unmanaged_async_rs_or_single_client( - auto_encryption_opts=opts, event_listeners=[cls.listener] + self.listener = OvertCommandListener() + self.client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[self.listener] ) - cls.coll_encrypted = cls.client_encrypted.db.coll + self.coll_encrypted = self.client_encrypted.db.coll - @classmethod - async def _tearDown_class(cls): - await cls.coll_encrypted.drop() - await cls.client_encrypted.close() - await super()._tearDown_class() + async def asyncTearDown(self) -> None: + await self.coll_encrypted.drop() async def test_01_insert_succeeds_under_2MiB(self): doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} @@ -1245,7 +1234,9 @@ async def test_03_bulk_batch_split(self): doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} self.listener.reset() await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) async def test_04_bulk_batch_split(self): limits_doc = json_data("limits", "limits-doc.json") @@ -1255,7 +1246,9 @@ async def test_04_bulk_batch_split(self): doc2.update(limits_doc) self.listener.reset() await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) async def test_05_insert_succeeds_just_under_16MiB(self): doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} @@ -1285,15 +1278,12 @@ async def test_06_insert_fails_over_16MiB(self): class TestCustomEndpoint(AsyncEncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" - @classmethod @unittest.skipUnless( any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - async def _setup_class(cls): - await super()._setup_class() - - def setUp(self): + async def asyncSetUp(self): + await super().asyncSetUp() kms_providers = { "aws": AWS_CREDS, "azure": AZURE_CREDS, @@ -1322,10 +1312,6 @@ def setUp(self): self._kmip_host_error = None self._invalid_host_error = None - async def asyncTearDown(self): - await self.client_encryption.close() - await self.client_encryption_invalid.close() - async def run_test_expected_success(self, provider_name, master_key): data_key_id = await self.client_encryption.create_data_key( provider_name, master_key=master_key @@ -1500,18 +1486,18 @@ class AzureGCPEncryptionTestMixin(AsyncEncryptionIntegrationTest): KEYVAULT_COLL = "datakeys" client: AsyncMongoClient - async def asyncSetUp(self): + async def _setup(self): keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) await create_key_vault(keyvault, self.DEK) async def _test_explicit(self, expectation): + await self._setup() client_encryption = self.create_client_encryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), async_client_context.client, OPTS, ) - self.addAsyncCleanup(client_encryption.close) ciphertext = await client_encryption.encrypt( "string0", @@ -1523,6 +1509,7 @@ async def _test_explicit(self, expectation): self.assertEqual(await client_encryption.decrypt(ciphertext), "string0") async def _test_automatic(self, expectation_extjson, payload): + await self._setup() encrypted_db = "db" encrypted_coll = "coll" keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) @@ -1537,7 +1524,6 @@ async def _test_automatic(self, expectation_extjson, payload): client = await self.async_rs_or_single_client( auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] ) - self.addAsyncCleanup(client.aclose) coll = client.get_database(encrypted_db).get_collection( encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") @@ -1559,13 +1545,12 @@ async def _test_automatic(self, expectation_extjson, payload): class TestAzureEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") - async def _setup_class(cls): - cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} - cls.DEK = json_data(BASE, "custom", "azure-dek.json") - cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - await super()._setup_class() + async def asyncSetUp(self): + self.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + self.DEK = json_data(BASE, "custom", "azure-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super().asyncSetUp() async def test_explicit(self): return await self._test_explicit( @@ -1585,13 +1570,12 @@ async def test_automatic(self): class TestGCPEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") - async def _setup_class(cls): - cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} - cls.DEK = json_data(BASE, "custom", "gcp-dek.json") - cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - await super()._setup_class() + async def asyncSetUp(self): + self.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + self.DEK = json_data(BASE, "custom", "gcp-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super().asyncSetUp() async def test_explicit(self): return await self._test_explicit( @@ -1613,6 +1597,7 @@ async def test_automatic(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests class TestDeadlockProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): + await super().asyncSetUp() self.client_test = await self.async_rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" ) @@ -1645,7 +1630,6 @@ async def asyncSetUp(self): self.ciphertext = await client_encryption.encrypt( "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" ) - await client_encryption.close() self.client_listener = OvertCommandListener() self.topology_listener = TopologyEventListener() @@ -1840,6 +1824,7 @@ async def test_case_8(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events class TestDecryptProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): + await super().asyncSetUp() self.client = async_client_context.client await self.client.db.drop_collection("decryption_events") await create_key_vault(self.client.keyvault.datakeys) @@ -2275,6 +2260,7 @@ async def test_06_named_kms_providers_apply_tls_options_kmip(self): # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(AsyncEncryptionIntegrationTest): async def asyncSetUp(self): + await super().asyncSetUp() self.client = async_client_context.client await create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} @@ -2624,8 +2610,6 @@ async def AsyncMongoClient(**kwargs): assert isinstance(res["encrypted_indexed"], Binary) assert isinstance(res["encrypted_unindexed"], Binary) - await client_encryption.close() - # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption class TestRangeQueryProse(AsyncEncryptionIntegrationTest): @@ -3089,17 +3073,11 @@ class TestNoSessionsSupport(AsyncEncryptionIntegrationTest): mongocryptd_client: AsyncMongoClient MONGOCRYPTD_PORT = 27020 - @classmethod @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") - async def _setup_class(cls): - await super()._setup_class() - start_mongocryptd(cls.MONGOCRYPTD_PORT) - - @classmethod - async def _tearDown_class(cls): - await super()._tearDown_class() - async def asyncSetUp(self) -> None: + await super().asyncSetUp() + start_mongocryptd(self.MONGOCRYPTD_PORT) + self.listener = OvertCommandListener() self.mongocryptd_client = self.simple_client( f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index 54fcd3abf6..affdacde91 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -97,6 +97,7 @@ def test_grid_in_custom_opts(self): class AsyncTestGridFile(AsyncIntegrationTest): async def asyncSetUp(self): + await super().asyncSetUp() await self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) async def test_basic(self): diff --git a/test/asynchronous/test_locks.py b/test/asynchronous/test_locks.py index e0e7f2fc8d..e5a0adfee6 100644 --- a/test/asynchronous/test_locks.py +++ b/test/asynchronous/test_locks.py @@ -16,498 +16,447 @@ import asyncio import sys -import threading import unittest +from pymongo.lock import _async_create_condition, _async_create_lock + sys.path[0:0] = [""] -from pymongo.lock import _ACondition +if sys.version_info < (3, 13): + # Tests adapted from: https://github.com/python/cpython/blob/v3.13.0rc2/Lib/test/test_asyncio/test_locks.py + # Includes tests for: + # - https://github.com/python/cpython/issues/111693 + # - https://github.com/python/cpython/issues/112202 + class TestConditionStdlib(unittest.IsolatedAsyncioTestCase): + async def test_wait(self): + cond = _async_create_condition(_async_create_lock()) + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait(): + result.append(1) + return True + async def c2(result): + await cond.acquire() + if await cond.wait(): + result.append(2) + return True -# Tests adapted from: https://github.com/python/cpython/blob/v3.13.0rc2/Lib/test/test_asyncio/test_locks.py -# Includes tests for: -# - https://github.com/python/cpython/issues/111693 -# - https://github.com/python/cpython/issues/112202 -class TestConditionStdlib(unittest.IsolatedAsyncioTestCase): - async def test_wait(self): - cond = _ACondition(threading.Condition(threading.Lock())) - result = [] + async def c3(result): + await cond.acquire() + if await cond.wait(): + result.append(3) + return True - async def c1(result): - await cond.acquire() - if await cond.wait(): - result.append(1) - return True + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) - async def c2(result): - await cond.acquire() - if await cond.wait(): - result.append(2) - return True + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertFalse(cond.locked()) - async def c3(result): - await cond.acquire() - if await cond.wait(): - result.append(3) - return True - - t1 = asyncio.create_task(c1(result)) - t2 = asyncio.create_task(c2(result)) - t3 = asyncio.create_task(c3(result)) - - await asyncio.sleep(0) - self.assertEqual([], result) - self.assertFalse(cond.locked()) - - self.assertTrue(await cond.acquire()) - cond.notify() - await asyncio.sleep(0) - self.assertEqual([], result) - self.assertTrue(cond.locked()) - - cond.release() - await asyncio.sleep(0) - self.assertEqual([1], result) - self.assertTrue(cond.locked()) - - cond.notify(2) - await asyncio.sleep(0) - self.assertEqual([1], result) - self.assertTrue(cond.locked()) - - cond.release() - await asyncio.sleep(0) - self.assertEqual([1, 2], result) - self.assertTrue(cond.locked()) - - cond.release() - await asyncio.sleep(0) - self.assertEqual([1, 2, 3], result) - self.assertTrue(cond.locked()) - - self.assertTrue(t1.done()) - self.assertTrue(t1.result()) - self.assertTrue(t2.done()) - self.assertTrue(t2.result()) - self.assertTrue(t3.done()) - self.assertTrue(t3.result()) - - async def test_wait_cancel(self): - cond = _ACondition(threading.Condition(threading.Lock())) - await cond.acquire() - - wait = asyncio.create_task(cond.wait()) - asyncio.get_running_loop().call_soon(wait.cancel) - with self.assertRaises(asyncio.CancelledError): - await wait - self.assertFalse(cond._waiters) - self.assertTrue(cond.locked()) - - async def test_wait_cancel_contested(self): - cond = _ACondition(threading.Condition(threading.Lock())) - - await cond.acquire() - self.assertTrue(cond.locked()) - - wait_task = asyncio.create_task(cond.wait()) - await asyncio.sleep(0) - self.assertFalse(cond.locked()) - - # Notify, but contest the lock before cancelling - await cond.acquire() - self.assertTrue(cond.locked()) - cond.notify() - asyncio.get_running_loop().call_soon(wait_task.cancel) - asyncio.get_running_loop().call_soon(cond.release) - - try: - await wait_task - except asyncio.CancelledError: - # Should not happen, since no cancellation points - pass - - self.assertTrue(cond.locked()) - - async def test_wait_cancel_after_notify(self): - # See bpo-32841 - waited = False - - cond = _ACondition(threading.Condition(threading.Lock())) - - async def wait_on_cond(): - nonlocal waited - async with cond: - waited = True # Make sure this area was reached - await cond.wait() + self.assertTrue(await cond.acquire()) + cond.notify() + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertTrue(cond.locked()) - waiter = asyncio.create_task(wait_on_cond()) - await asyncio.sleep(0) # Start waiting + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.notify(2) + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) - await cond.acquire() - cond.notify() - await asyncio.sleep(0) # Get to acquire() - waiter.cancel() - await asyncio.sleep(0) # Activate cancellation - cond.release() - await asyncio.sleep(0) # Cancellation should occur + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2], result) + self.assertTrue(cond.locked()) - self.assertTrue(waiter.cancelled()) - self.assertTrue(waited) + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2, 3], result) + self.assertTrue(cond.locked()) - async def test_wait_unacquired(self): - cond = _ACondition(threading.Condition(threading.Lock())) - with self.assertRaises(RuntimeError): - await cond.wait() + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) - async def test_wait_for(self): - cond = _ACondition(threading.Condition(threading.Lock())) - presult = False + async def test_wait_cancel(self): + cond = _async_create_condition(_async_create_lock()) + await cond.acquire() - def predicate(): - return presult + wait = asyncio.create_task(cond.wait()) + asyncio.get_running_loop().call_soon(wait.cancel) + with self.assertRaises(asyncio.CancelledError): + await wait + self.assertFalse(cond._waiters) + self.assertTrue(cond.locked()) - result = [] + async def test_wait_cancel_contested(self): + cond = _async_create_condition(_async_create_lock()) - async def c1(result): await cond.acquire() - if await cond.wait_for(predicate): - result.append(1) - cond.release() - return True + self.assertTrue(cond.locked()) - t = asyncio.create_task(c1(result)) + wait_task = asyncio.create_task(cond.wait()) + await asyncio.sleep(0) + self.assertFalse(cond.locked()) - await asyncio.sleep(0) - self.assertEqual([], result) + # Notify, but contest the lock before cancelling + await cond.acquire() + self.assertTrue(cond.locked()) + cond.notify() + asyncio.get_running_loop().call_soon(wait_task.cancel) + asyncio.get_running_loop().call_soon(cond.release) - await cond.acquire() - cond.notify() - cond.release() - await asyncio.sleep(0) - self.assertEqual([], result) + try: + await wait_task + except asyncio.CancelledError: + # Should not happen, since no cancellation points + pass - presult = True - await cond.acquire() - cond.notify() - cond.release() - await asyncio.sleep(0) - self.assertEqual([1], result) + self.assertTrue(cond.locked()) - self.assertTrue(t.done()) - self.assertTrue(t.result()) + async def test_wait_cancel_after_notify(self): + # See bpo-32841 + waited = False - async def test_wait_for_unacquired(self): - cond = _ACondition(threading.Condition(threading.Lock())) + cond = _async_create_condition(_async_create_lock()) - # predicate can return true immediately - res = await cond.wait_for(lambda: [1, 2, 3]) - self.assertEqual([1, 2, 3], res) + async def wait_on_cond(): + nonlocal waited + async with cond: + waited = True # Make sure this area was reached + await cond.wait() - with self.assertRaises(RuntimeError): - await cond.wait_for(lambda: False) + waiter = asyncio.create_task(wait_on_cond()) + await asyncio.sleep(0) # Start waiting - async def test_notify(self): - cond = _ACondition(threading.Condition(threading.Lock())) - result = [] + await cond.acquire() + cond.notify() + await asyncio.sleep(0) # Get to acquire() + waiter.cancel() + await asyncio.sleep(0) # Activate cancellation + cond.release() + await asyncio.sleep(0) # Cancellation should occur + + self.assertTrue(waiter.cancelled()) + self.assertTrue(waited) + + async def test_wait_unacquired(self): + cond = _async_create_condition(_async_create_lock()) + with self.assertRaises(RuntimeError): + await cond.wait() - async def c1(result): - async with cond: - if await cond.wait(): - result.append(1) - return True + async def test_wait_for(self): + cond = _async_create_condition(_async_create_lock()) + presult = False - async def c2(result): - async with cond: - if await cond.wait(): - result.append(2) - return True + def predicate(): + return presult - async def c3(result): - async with cond: - if await cond.wait(): - result.append(3) + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait_for(predicate): + result.append(1) + cond.release() return True - t1 = asyncio.create_task(c1(result)) - t2 = asyncio.create_task(c2(result)) - t3 = asyncio.create_task(c3(result)) + t = asyncio.create_task(c1(result)) - await asyncio.sleep(0) - self.assertEqual([], result) + await asyncio.sleep(0) + self.assertEqual([], result) - async with cond: - cond.notify(1) - await asyncio.sleep(1) - self.assertEqual([1], result) + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([], result) - async with cond: - cond.notify(1) - cond.notify(2048) - await asyncio.sleep(1) - self.assertEqual([1, 2, 3], result) + presult = True + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) - self.assertTrue(t1.done()) - self.assertTrue(t1.result()) - self.assertTrue(t2.done()) - self.assertTrue(t2.result()) - self.assertTrue(t3.done()) - self.assertTrue(t3.result()) + self.assertTrue(t.done()) + self.assertTrue(t.result()) - async def test_notify_all(self): - cond = _ACondition(threading.Condition(threading.Lock())) + async def test_wait_for_unacquired(self): + cond = _async_create_condition(_async_create_lock()) - result = [] + # predicate can return true immediately + res = await cond.wait_for(lambda: [1, 2, 3]) + self.assertEqual([1, 2, 3], res) - async def c1(result): - async with cond: - if await cond.wait(): - result.append(1) - return True + with self.assertRaises(RuntimeError): + await cond.wait_for(lambda: False) - async def c2(result): - async with cond: - if await cond.wait(): - result.append(2) - return True + async def test_notify(self): + cond = _async_create_condition(_async_create_lock()) + result = [] - t1 = asyncio.create_task(c1(result)) - t2 = asyncio.create_task(c2(result)) + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True - await asyncio.sleep(0) - self.assertEqual([], result) + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True - async with cond: - cond.notify_all() - await asyncio.sleep(1) - self.assertEqual([1, 2], result) + async def c3(result): + async with cond: + if await cond.wait(): + result.append(3) + return True - self.assertTrue(t1.done()) - self.assertTrue(t1.result()) - self.assertTrue(t2.done()) - self.assertTrue(t2.result()) + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) - async def test_context_manager(self): - cond = _ACondition(threading.Condition(threading.Lock())) - self.assertFalse(cond.locked()) - async with cond: - self.assertTrue(cond.locked()) - self.assertFalse(cond.locked()) - - async def test_timeout_in_block(self): - condition = _ACondition(threading.Condition(threading.Lock())) - async with condition: - with self.assertRaises(asyncio.TimeoutError): - await asyncio.wait_for(condition.wait(), timeout=0.5) - - @unittest.skipIf( - sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" - ) - async def test_cancelled_error_wakeup(self): - # Test that a cancelled error, received when awaiting wakeup, - # will be re-raised un-modified. - wake = False - raised = None - cond = _ACondition(threading.Condition(threading.Lock())) - - async def func(): - nonlocal raised - async with cond: - with self.assertRaises(asyncio.CancelledError) as err: - await cond.wait_for(lambda: wake) - raised = err.exception - raise raised - - task = asyncio.create_task(func()) - await asyncio.sleep(0) - # Task is waiting on the condition, cancel it there. - task.cancel(msg="foo") # type: ignore[call-arg] - with self.assertRaises(asyncio.CancelledError) as err: - await task - self.assertEqual(err.exception.args, ("foo",)) - # We should have got the _same_ exception instance as the one - # originally raised. - self.assertIs(err.exception, raised) - - @unittest.skipIf( - sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" - ) - async def test_cancelled_error_re_aquire(self): - # Test that a cancelled error, received when re-aquiring lock, - # will be re-raised un-modified. - wake = False - raised = None - cond = _ACondition(threading.Condition(threading.Lock())) - - async def func(): - nonlocal raised - async with cond: - with self.assertRaises(asyncio.CancelledError) as err: - await cond.wait_for(lambda: wake) - raised = err.exception - raise raised - - task = asyncio.create_task(func()) - await asyncio.sleep(0) - # Task is waiting on the condition - await cond.acquire() - wake = True - cond.notify() - await asyncio.sleep(0) - # Task is now trying to re-acquire the lock, cancel it there. - task.cancel(msg="foo") # type: ignore[call-arg] - cond.release() - with self.assertRaises(asyncio.CancelledError) as err: - await task - self.assertEqual(err.exception.args, ("foo",)) - # We should have got the _same_ exception instance as the one - # originally raised. - self.assertIs(err.exception, raised) - - @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") - async def test_cancelled_wakeup(self): - # Test that a task cancelled at the "same" time as it is woken - # up as part of a Condition.notify() does not result in a lost wakeup. - # This test simulates a cancel while the target task is awaiting initial - # wakeup on the wakeup queue. - condition = _ACondition(threading.Condition(threading.Lock())) - state = 0 - - async def consumer(): - nonlocal state - async with condition: - while True: - await condition.wait_for(lambda: state != 0) - if state < 0: - return - state -= 1 - - # create two consumers - c = [asyncio.create_task(consumer()) for _ in range(2)] - # wait for them to settle - await asyncio.sleep(0.1) - async with condition: - # produce one item and wake up one - state += 1 - condition.notify(1) - - # Cancel it while it is awaiting to be run. - # This cancellation could come from the outside - c[0].cancel() - - # now wait for the item to be consumed - # if it doesn't means that our "notify" didn"t take hold. - # because it raced with a cancel() - try: - async with asyncio.timeout(1): - await condition.wait_for(lambda: state == 0) - except TimeoutError: - pass - self.assertEqual(state, 0) - - # clean up - state = -1 - condition.notify_all() - await c[1] - - @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") - async def test_cancelled_wakeup_relock(self): - # Test that a task cancelled at the "same" time as it is woken - # up as part of a Condition.notify() does not result in a lost wakeup. - # This test simulates a cancel while the target task is acquiring the lock - # again. - condition = _ACondition(threading.Condition(threading.Lock())) - state = 0 - - async def consumer(): - nonlocal state - async with condition: - while True: - await condition.wait_for(lambda: state != 0) - if state < 0: - return - state -= 1 - - # create two consumers - c = [asyncio.create_task(consumer()) for _ in range(2)] - # wait for them to settle - await asyncio.sleep(0.1) - async with condition: - # produce one item and wake up one - state += 1 - condition.notify(1) - - # now we sleep for a bit. This allows the target task to wake up and - # settle on re-aquiring the lock await asyncio.sleep(0) + self.assertEqual([], result) - # Cancel it while awaiting the lock - # This cancel could come the outside. - c[0].cancel() + async with cond: + cond.notify(1) + await asyncio.sleep(1) + self.assertEqual([1], result) - # now wait for the item to be consumed - # if it doesn't means that our "notify" didn"t take hold. - # because it raced with a cancel() - try: - async with asyncio.timeout(1): - await condition.wait_for(lambda: state == 0) - except TimeoutError: - pass - self.assertEqual(state, 0) + async with cond: + cond.notify(1) + cond.notify(2048) + await asyncio.sleep(1) + self.assertEqual([1, 2, 3], result) - # clean up - state = -1 - condition.notify_all() - await c[1] + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + async def test_notify_all(self): + cond = _async_create_condition(_async_create_lock()) -class TestCondition(unittest.IsolatedAsyncioTestCase): - async def test_multiple_loops_notify(self): - cond = _ACondition(threading.Condition(threading.Lock())) + result = [] - def tmain(cond): - async def atmain(cond): - await asyncio.sleep(1) + async def c1(result): async with cond: - cond.notify(1) - - asyncio.run(atmain(cond)) - - t = threading.Thread(target=tmain, args=(cond,)) - t.start() + if await cond.wait(): + result.append(1) + return True - async with cond: - self.assertTrue(await cond.wait(30)) - t.join() - - async def test_multiple_loops_notify_all(self): - cond = _ACondition(threading.Condition(threading.Lock())) - results = [] - - def tmain(cond, results): - async def atmain(cond, results): - await asyncio.sleep(1) + async def c2(result): async with cond: - res = await cond.wait(30) - results.append(res) - - asyncio.run(atmain(cond, results)) + if await cond.wait(): + result.append(2) + return True - nthreads = 5 - threads = [] - for _ in range(nthreads): - threads.append(threading.Thread(target=tmain, args=(cond, results))) - for t in threads: - t.start() + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) - await asyncio.sleep(2) - async with cond: - cond.notify_all() + await asyncio.sleep(0) + self.assertEqual([], result) - for t in threads: - t.join() + async with cond: + cond.notify_all() + await asyncio.sleep(1) + self.assertEqual([1, 2], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + + async def test_context_manager(self): + cond = _async_create_condition(_async_create_lock()) + self.assertFalse(cond.locked()) + async with cond: + self.assertTrue(cond.locked()) + self.assertFalse(cond.locked()) - self.assertEqual(results, [True] * nthreads) + async def test_timeout_in_block(self): + condition = _async_create_condition(_async_create_lock()) + async with condition: + with self.assertRaises(asyncio.TimeoutError): + await asyncio.wait_for(condition.wait(), timeout=0.5) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_wakeup(self): + # Test that a cancelled error, received when awaiting wakeup, + # will be re-raised un-modified. + wake = False + raised = None + cond = _async_create_condition(_async_create_lock()) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_re_aquire(self): + # Test that a cancelled error, received when re-aquiring lock, + # will be re-raised un-modified. + wake = False + raised = None + cond = _async_create_condition(_async_create_lock()) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised -if __name__ == "__main__": - unittest.main() + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition + await cond.acquire() + wake = True + cond.notify() + await asyncio.sleep(0) + # Task is now trying to re-acquire the lock, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + cond.release() + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is awaiting initial + # wakeup on the wakeup queue. + condition = _async_create_condition(_async_create_lock()) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # Cancel it while it is awaiting to be run. + # This cancellation could come from the outside + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup_relock(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is acquiring the lock + # again. + condition = _async_create_condition(_async_create_lock()) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # now we sleep for a bit. This allows the target task to wake up and + # settle on re-aquiring the lock + await asyncio.sleep(0) + + # Cancel it while awaiting the lock + # This cancel could come the outside. + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index b0c86ab54e..eaad60beac 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -52,22 +52,16 @@ class AsyncTestCommandMonitoring(AsyncIntegrationTest): listener: EventListener @classmethod - @async_client_context.require_connection - async def _setup_class(cls): - await super()._setup_class() + def setUpClass(cls) -> None: cls.listener = OvertCommandListener() - cls.client = await cls.unmanaged_async_rs_or_single_client( - event_listeners=[cls.listener], retryWrites=False - ) - @classmethod - async def _tearDown_class(cls): - await cls.client.close() - await super()._tearDown_class() - - async def asyncTearDown(self): + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + await super().asyncSetUp() self.listener.reset() - await super().asyncTearDown() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=False + ) async def test_started_simple(self): await self.client.pymongo_test.command("ping") @@ -1140,26 +1134,23 @@ class AsyncTestGlobalListener(AsyncIntegrationTest): saved_listeners: Any @classmethod - @async_client_context.require_connection - async def _setup_class(cls): - await super()._setup_class() + def setUpClass(cls) -> None: cls.listener = OvertCommandListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) - cls.client = await cls.unmanaged_async_single_client() - # Get one (authenticated) socket in the pool. - await cls.client.pymongo_test.command("ping") - - @classmethod - async def _tearDown_class(cls): - monitoring._LISTENERS = cls.saved_listeners - await cls.client.close() - await super()._tearDown_class() + @async_client_context.require_connection async def asyncSetUp(self): await super().asyncSetUp() self.listener.reset() + self.client = await self.async_single_client() + # Get one (authenticated) socket in the pool. + await self.client.pymongo_test.command("ping") + + @classmethod + def tearDownClass(cls): + monitoring._LISTENERS = cls.saved_listeners async def test_simple(self): await self.client.pymongo_test.command("ping") diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py index ca2f0a5422..738ce04192 100644 --- a/test/asynchronous/test_retryable_writes.py +++ b/test/asynchronous/test_retryable_writes.py @@ -132,34 +132,27 @@ class IgnoreDeprecationsTest(AsyncIntegrationTest): RUN_ON_SERVERLESS = True deprecation_filter: DeprecationFilter - @classmethod - async def _setup_class(cls): - await super()._setup_class() - cls.deprecation_filter = DeprecationFilter() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.deprecation_filter = DeprecationFilter() - @classmethod - async def _tearDown_class(cls): - cls.deprecation_filter.stop() - await super()._tearDown_class() + async def asyncTearDown(self) -> None: + self.deprecation_filter.stop() class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): knobs: client_knobs - @classmethod - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - cls.client = await cls.unmanaged_async_rs_or_single_client(retryWrites=True) - cls.db = cls.client.pymongo_test + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.client = await self.async_rs_or_single_client(retryWrites=True) + self.db = self.client.pymongo_test - @classmethod - async def _tearDown_class(cls): - cls.knobs.disable() - await cls.client.close() - await super()._tearDown_class() + async def asyncTearDown(self) -> None: + self.knobs.disable() @async_client_context.require_no_standalone async def test_actionable_error_message(self): @@ -180,26 +173,18 @@ class TestRetryableWrites(IgnoreDeprecationsTest): listener: OvertCommandListener knobs: client_knobs - @classmethod @async_client_context.require_no_mmap - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self) -> None: + await super().asyncSetUp() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - cls.listener = OvertCommandListener() - cls.client = await cls.unmanaged_async_rs_or_single_client( - retryWrites=True, event_listeners=[cls.listener] + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client( + retryWrites=True, event_listeners=[self.listener] ) - cls.db = cls.client.pymongo_test + self.db = self.client.pymongo_test - @classmethod - async def _tearDown_class(cls): - cls.knobs.disable() - await cls.client.close() - await super()._tearDown_class() - - async def asyncSetUp(self): if async_client_context.is_rs and async_client_context.test_commands_enabled: await self.client.admin.command( SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) @@ -210,6 +195,7 @@ async def asyncTearDown(self): await self.client.admin.command( SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) ) + self.knobs.disable() async def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() @@ -438,13 +424,12 @@ class TestWriteConcernError(AsyncIntegrationTest): RUN_ON_SERVERLESS = True fail_insert: dict - @classmethod @async_client_context.require_replica_set @async_client_context.require_no_mmap @async_client_context.require_failCommand_fail_point - async def _setup_class(cls): - await super()._setup_class() - cls.fail_insert = { + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.fail_insert = { "configureFailPoint": "failCommand", "mode": {"times": 2}, "data": { diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index b432621798..42bc253b56 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -38,7 +38,6 @@ ExceptionCatchingThread, OvertCommandListener, async_wait_until, - wait_until, ) from bson import DBRef @@ -83,36 +82,27 @@ class TestSession(AsyncIntegrationTest): client2: AsyncMongoClient sensitive_commands: Set[str] - @classmethod @async_client_context.require_sessions - async def _setup_class(cls): - await super()._setup_class() + async def asyncSetUp(self): + await super().asyncSetUp() # Create a second client so we can make sure clients cannot share # sessions. - cls.client2 = await cls.unmanaged_async_rs_or_single_client() + self.client2 = await self.async_rs_or_single_client() # Redact no commands, so we can test user-admin commands have "lsid". - cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() + self.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() monitoring._SENSITIVE_COMMANDS.clear() - @classmethod - async def _tearDown_class(cls): - monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) - await cls.client2.close() - await super()._tearDown_class() - - async def asyncSetUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() self.client = await self.async_rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener] ) - self.addAsyncCleanup(self.client.close) self.db = self.client.pymongo_test self.initial_lsids = {s["id"] for s in session_ids(self.client)} async def asyncTearDown(self): - """All sessions used in the test must be returned to the pool.""" + monitoring._SENSITIVE_COMMANDS.update(self.sensitive_commands) await self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() for event in self.session_checker_listener.started_events: @@ -122,6 +112,8 @@ async def asyncTearDown(self): current_lsids = {s["id"] for s in session_ids(self.client)} self.assertLessEqual(used_lsids, current_lsids) + await super().asyncTearDown() + async def _test_ops(self, client, *ops): listener = client.options.event_listeners[0] @@ -833,18 +825,11 @@ class TestCausalConsistency(AsyncUnitTest): listener: SessionTestListener client: AsyncMongoClient - @classmethod - async def _setup_class(cls): - cls.listener = SessionTestListener() - cls.client = await cls.unmanaged_async_rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - async def _tearDown_class(cls): - await cls.client.close() - @async_client_context.require_sessions async def asyncSetUp(self): await super().asyncSetUp() + self.listener = SessionTestListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) @async_client_context.require_no_standalone async def test_core(self): diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index b5d0686417..d11d0a9776 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -26,7 +26,7 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.utils import ( OvertCommandListener, - wait_until, + async_wait_until, ) from typing import List @@ -162,7 +162,7 @@ async def test_unpin_for_next_transaction(self): client = await self.async_rs_client( async_client_context.mongos_seeds(), localThresholdMS=1000 ) - wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + await async_wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. await coll.insert_one({}) @@ -191,7 +191,7 @@ async def test_unpin_for_non_transaction_operation(self): client = await self.async_rs_client( async_client_context.mongos_seeds(), localThresholdMS=1000 ) - wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + await async_wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. await coll.insert_one({}) @@ -403,21 +403,12 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(AsyncTransactionsBase): - @classmethod - async def _setup_class(cls): - await super()._setup_class() - cls.mongos_clients = [] + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.mongos_clients = [] if async_client_context.supports_transactions(): for address in async_client_context.mongoses: - cls.mongos_clients.append( - await cls.unmanaged_async_single_client("{}:{}".format(*address)) - ) - - @classmethod - async def _tearDown_class(cls): - for client in cls.mongos_clients: - await client.close() - await super()._tearDown_class() + self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) async def _set_fail_point(self, client, command_args): cmd = {"configureFailPoint": "failCommand"} diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index db5ed81e24..b18b09383e 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -50,6 +50,7 @@ ) from test.utils import ( async_get_pool, + async_wait_until, camel_to_snake, camel_to_snake_args, parse_spec_options, @@ -304,7 +305,6 @@ async def _create_entity(self, entity_spec, uri=None): kwargs["h"] = uri client = await self.test.async_rs_or_single_client(**kwargs) self[spec["id"]] = client - self.test.addAsyncCleanup(client.close) return elif entity_type == "database": client = self[spec["client"]] @@ -479,54 +479,47 @@ async def insert_initial_data(self, initial_data): await db.create_collection(coll_name, write_concern=wc, **opts) @classmethod - async def _setup_class(cls): + def setUpClass(cls) -> None: + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + def tearDownClass(cls) -> None: + cls.knobs.disable() + + async def asyncSetUp(self): # super call creates internal client cls.client - await super()._setup_class() + await super().asyncSetUp() # process file-level runOnRequirements - run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) - if not await cls.should_run_on(run_on_spec): - raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") + run_on_spec = self.TEST_SPEC.get("runOnRequirements", []) + if not await self.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here if async_client_context.storage_engine == "mmapv1": - if "retryable-writes" in cls.TEST_SPEC["description"] or "retryable_writes" in str( - cls.TEST_PATH + if "retryable-writes" in self.TEST_SPEC["description"] or "retryable_writes" in str( + self.TEST_PATH ): raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") # Handle mongos_clients for transactions tests. - cls.mongos_clients = [] + self.mongos_clients = [] if ( async_client_context.supports_transactions() and not async_client_context.load_balancer and not async_client_context.serverless ): for address in async_client_context.mongoses: - cls.mongos_clients.append( - await cls.unmanaged_async_single_client("{}:{}".format(*address)) - ) + self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) - # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs( - heartbeat_frequency=0.1, - min_heartbeat_interval=0.1, - kill_cursor_frequency=0.1, - events_queue_frequency=0.1, - ) - cls.knobs.enable() - - @classmethod - async def _tearDown_class(cls): - cls.knobs.disable() - for client in cls.mongos_clients: - await client.close() - await super()._tearDown_class() - - async def asyncSetUp(self): - await super().asyncSetUp() # process schemaVersion # note: we check major schema version during class generation - # note: we do this here because we cannot run assertions in setUpClass version = Version.from_string(self.TEST_SPEC["schemaVersion"]) self.assertLessEqual( version, @@ -1036,7 +1029,6 @@ async def _testOperation_targetedFailPoint(self, spec): ) client = await self.async_single_client("{}:{}".format(*session._pinned_address)) - self.addAsyncCleanup(client.close) await self.__set_fail_point(client=client, command_args=spec["failPoint"]) async def _testOperation_createEntities(self, spec): @@ -1137,13 +1129,13 @@ def _testOperation_assertEventCount(self, spec): client, event, count = spec["client"], spec["event"], spec["count"] self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") - def _testOperation_waitForEvent(self, spec): + async def _testOperation_waitForEvent(self, spec): """Run the waitForEvent test operation. Wait for a number of events to be published, or fail. """ client, event, count = spec["client"], spec["event"], spec["count"] - wait_until( + await async_wait_until( lambda: self._event_count(client, event) >= count, f"find {count} {event} event(s)", ) diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index f27f52ec2c..b79e5258b5 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -249,30 +249,22 @@ class AsyncSpecRunner(AsyncIntegrationTest): knobs: client_knobs listener: EventListener - @classmethod - async def _setup_class(cls): - await super()._setup_class() - cls.mongos_clients = [] + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - - @classmethod - async def _tearDown_class(cls): - cls.knobs.disable() - for client in cls.mongos_clients: - await client.close() - await super()._tearDown_class() - - def setUp(self): - super().setUp() + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() self.targets = {} self.listener = None # type: ignore self.pool_listener = None self.server_listener = None self.maxDiff = None + async def asyncTearDown(self) -> None: + self.knobs.disable() + async def _set_fail_point(self, client, command_args): cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) @@ -700,8 +692,6 @@ async def run_scenario(self, scenario_def, test): self.listener = listener self.pool_listener = pool_listener self.server_listener = server_listener - # Close the client explicitly to avoid having too many threads open. - self.addAsyncCleanup(client.close) # Create session0 and session1. sessions = {} diff --git a/test/conftest.py b/test/conftest.py index a3d954c7c3..91fad28d0a 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -20,7 +20,7 @@ def event_loop_policy(): return asyncio.get_event_loop_policy() -@pytest.fixture(scope="session", autouse=True) +@pytest.fixture(scope="package", autouse=True) def test_setup_and_teardown(): setup() yield diff --git a/test/test_bulk.py b/test/test_bulk.py index ea2b803804..6d29ff510a 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -42,15 +42,11 @@ class BulkTestBase(IntegrationTest): coll: Collection coll_w0: Collection - @classmethod - def _setup_class(cls): - super()._setup_class() - cls.coll = cls.db.test - cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) - def setUp(self): super().setUp() + self.coll = self.db.test self.coll.drop() + self.coll_w0 = self.coll.with_options(write_concern=WriteConcern(w=0)) def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" @@ -785,12 +781,8 @@ def test_large_inserts_unordered(self): class BulkAuthorizationTestBase(BulkTestBase): - @classmethod @client_context.require_auth @client_context.require_no_api_version - def _setup_class(cls): - super()._setup_class() - def setUp(self): super().setUp() client_context.create_user(self.db.name, "readonly", "pw", ["read"]) @@ -935,21 +927,19 @@ class TestBulkWriteConcern(BulkTestBase): w: Optional[int] secondary: MongoClient - @classmethod - def _setup_class(cls): - super()._setup_class() - cls.w = client_context.w - cls.secondary = None - if cls.w is not None and cls.w > 1: + def setUp(self): + super().setUp() + self.w = client_context.w + self.secondary = None + if self.w is not None and self.w > 1: for member in (client_context.hello)["hosts"]: if member != (client_context.hello)["primary"]: - cls.secondary = cls.unmanaged_single_client(*partition_node(member)) + self.secondary = self.single_client(*partition_node(member)) break - @classmethod - def async_tearDownClass(cls): - if cls.secondary: - cls.secondary.close() + def tearDown(self): + if self.secondary: + self.secondary.close() def cause_wtimeout(self, requests, ordered): if not client_context.test_commands_enabled: diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 3a107122b7..4ed21f55cf 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -820,18 +820,16 @@ def test_split_large_change(self): class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): dbs: list - @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams - def _setup_class(cls): - super()._setup_class() - cls.dbs = [cls.db, cls.client.pymongo_test_2] + def setUp(self) -> None: + super().setUp() + self.dbs = [self.db, self.client.pymongo_test_2] - @classmethod - def _tearDown_class(cls): - for db in cls.dbs: - cls.client.drop_database(db) - super()._tearDown_class() + def tearDown(self): + for db in self.dbs: + self.client.drop_database(db) + super().tearDown() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) @@ -882,11 +880,10 @@ def test_full_pipeline(self): class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): - @classmethod @client_context.require_version_min(4, 0, 0, -1) @client_context.require_change_streams - def _setup_class(cls): - super()._setup_class() + def setUp(self) -> None: + super().setUp() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) @@ -968,12 +965,9 @@ def test_isolation(self): class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): - @classmethod @client_context.require_change_streams - def _setup_class(cls): - super()._setup_class() - def setUp(self): + super().setUp() # Use a new collection for each test. self.watched_collection().drop() self.watched_collection().insert_one({}) @@ -1111,20 +1105,11 @@ class TestAllLegacyScenarios(IntegrationTest): RUN_ON_LOAD_BALANCER = True listener: AllowListEventListener - @classmethod @client_context.require_connection - def _setup_class(cls): - super()._setup_class() - cls.listener = AllowListEventListener("aggregate", "getMore") - cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def _tearDown_class(cls): - cls.client.close() - super()._tearDown_class() - def setUp(self): super().setUp() + self.listener = AllowListEventListener("aggregate", "getMore") + self.client = self.rs_or_single_client(event_listeners=[self.listener]) self.listener.reset() def setUpCluster(self, scenario_dict): diff --git a/test/test_client.py b/test/test_client.py index 5bbb5bd751..5ec425f312 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -129,13 +129,8 @@ class ClientUnitTest(UnitTest): client: MongoClient - @classmethod - def _setup_class(cls): - cls.client = cls.unmanaged_rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) - - @classmethod - def _tearDown_class(cls): - cls.client.close() + def setUp(self) -> None: + self.client = self.rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): @@ -1039,14 +1034,21 @@ def test_uri_connect_option(self): self.assertFalse(client._topology._opened) # Ensure kill cursors thread has not been started. - kc_thread = client._kill_cursors_executor._thread - self.assertFalse(kc_thread and kc_thread.is_alive()) - + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertFalse(kc_task and not kc_task.done()) # Using the client should open topology and start the thread. client.admin.command("ping") self.assertTrue(client._topology._opened) - kc_thread = client._kill_cursors_executor._thread - self.assertTrue(kc_thread and kc_thread.is_alive()) + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertTrue(kc_task and not kc_task.done()) def test_close_does_not_open_servers(self): client = self.rs_client(connect=False) @@ -1241,6 +1243,7 @@ def get_x(db): def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=100, connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + client.close() client = MongoClient(serverSelectionTimeoutMS=0, connect=False) @@ -1251,16 +1254,20 @@ def test_server_selection_timeout(self): self.assertRaises( ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False ) + client.close() client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + client.close() client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) + client.close() # Test invalid timeout in URI ignored and set to default. client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) + client.close() client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) diff --git a/test/test_collation.py b/test/test_collation.py index b878df2fb4..06436f0638 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -97,26 +97,19 @@ class TestCollation(IntegrationTest): warn_context: Any collation: Collation - @classmethod @client_context.require_connection - def _setup_class(cls): - super()._setup_class() - cls.listener = OvertCommandListener() - cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) - cls.db = cls.client.pymongo_test - cls.collation = Collation("en_US") - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - @classmethod - def _tearDown_class(cls): - cls.warn_context.__exit__() - cls.warn_context = None - cls.client.close() - super()._tearDown_class() - - def tearDown(self): + def setUp(self) -> None: + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + self.collation = Collation("en_US") + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + + def tearDown(self) -> None: + self.warn_context.__exit__() + self.warn_context = None self.listener.reset() super().tearDown() diff --git a/test/test_collection.py b/test/test_collection.py index 84a900d45b..af524bba47 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -87,14 +87,10 @@ class TestCollectionNoConnect(UnitTest): db: Database client: MongoClient - @classmethod - def _setup_class(cls): - cls.client = MongoClient(connect=False) - cls.db = cls.client.pymongo_test - - @classmethod - def _tearDown_class(cls): - cls.client.close() + def setUp(self) -> None: + super().setUp() + self.client = self.simple_client(connect=False) + self.db = self.client.pymongo_test def test_collection(self): self.assertRaises(TypeError, Collection, self.db, 5) @@ -164,27 +160,14 @@ def test_iteration(self): class TestCollection(IntegrationTest): w: int - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.w = client_context.w # type: ignore - - @classmethod - def tearDownClass(cls): - if _IS_SYNC: - cls.db.drop_collection("test_large_limit") # type: ignore[unused-coroutine] - else: - asyncio.run(cls.async_tearDownClass()) - - @classmethod - def async_tearDownClass(cls): - cls.db.drop_collection("test_large_limit") - def setUp(self): - self.db.test.drop() + super().setUp() + self.w = client_context.w # type: ignore def tearDown(self): self.db.test.drop() + self.db.drop_collection("test_large_limit") + super().tearDown() @contextlib.contextmanager def write_concern_collection(self): @@ -1010,7 +993,10 @@ def test_replace_bypass_document_validation(self): db.test.insert_one({"y": 1}, bypass_document_validation=True) db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") + def predicate(): + return db_w0.test.find_one({"x": 1}) + + wait_until(predicate, "find w:0 replaced document") def test_update_bypass_document_validation(self): db = self.db diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 54cc4e0482..84ef6decd5 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -19,7 +19,12 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import ( + IntegrationTest, + client_context, + reset_client_context, + unittest, +) from test.helpers import repl_set_step_down from test.utils import ( CMAPListener, @@ -39,29 +44,19 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): listener: CMAPListener coll: Collection - @classmethod @client_context.require_replica_set - def _setup_class(cls): - super()._setup_class() - cls.listener = CMAPListener() - cls.client = cls.unmanaged_rs_or_single_client( - event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + def setUp(self): + self.listener = CMAPListener() + self.client = self.rs_or_single_client( + event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 ) # Ensure connections to all servers in replica set. This is to test # that the is_writable flag is properly updated for connections that # survive a replica set election. - ensure_all_connected(cls.client) - cls.listener.reset() - - cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) - cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) - - @classmethod - def _tearDown_class(cls): - cls.client.close() - - def setUp(self): + ensure_all_connected(self.client) + self.db = self.client.get_database("step-down", write_concern=WriteConcern("majority")) + self.coll = self.db.get_collection("step-down", write_concern=WriteConcern("majority")) # Note that all ops use same write-concern as self.db (majority). self.db.drop_collection("step-down") self.db.create_collection("step-down") diff --git a/test/test_create_entities.py b/test/test_create_entities.py index ad75fe5702..9d77a08eee 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -56,6 +56,9 @@ def test_store_events_as_entities(self): self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: self.assertIn("PoolCreatedEvent", event["name"]) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + client.close() def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() @@ -122,6 +125,9 @@ def test_store_all_others_as_entities(self): self.assertEqual(entity_map["failures"], []) self.assertEqual(entity_map["successes"], 2) self.assertEqual(entity_map["iterations"], 5) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + client.close() if __name__ == "__main__": diff --git a/test/test_cursor.py b/test/test_cursor.py index 9eac0f1c49..bcc7ed75f1 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1636,10 +1636,6 @@ def test_monitoring(self): class TestRawBatchCommandCursor(IntegrationTest): - @classmethod - def _setup_class(cls): - super()._setup_class() - def test_aggregate_raw(self): c = self.db.test c.drop() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index abaa820cb7..6771ea25f9 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -633,6 +633,7 @@ class MyType(pytype): # type: ignore class TestCollectionWCustomType(IntegrationTest): def setUp(self): + super().setUp() self.db.test.drop() def tearDown(self): @@ -754,6 +755,7 @@ def test_find_one_and__w_custom_type_decoder(self): class TestGridFileCustomType(IntegrationTest): def setUp(self): + super().setUp() self.db.drop_collection("fs.files") self.db.drop_collection("fs.chunks") @@ -917,11 +919,10 @@ def run_test(doc_cls): class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @classmethod @client_context.require_change_streams - def setUpClass(cls): - super().setUpClass() - cls.db.test.delete_many({}) + def setUp(self): + super().setUp() + self.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -935,12 +936,11 @@ def create_targets(self, *args, **kwargs): class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @classmethod @client_context.require_version_min(4, 0, 0) @client_context.require_change_streams - def setUpClass(cls): - super().setUpClass() - cls.db.test.delete_many({}) + def setUp(self): + super().setUp() + self.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -954,12 +954,11 @@ def create_targets(self, *args, **kwargs): class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @classmethod @client_context.require_version_min(4, 0, 0) @client_context.require_change_streams - def setUpClass(cls): - super().setUpClass() - cls.db.test.delete_many({}) + def setUp(self): + super().setUp() + self.db.test.delete_many({}) def tearDown(self): self.input_target.drop() diff --git a/test/test_database.py b/test/test_database.py index 4973ed0134..5e854c941d 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -709,6 +709,7 @@ def test_with_options(self): class TestDatabaseAggregation(IntegrationTest): def setUp(self): + super().setUp() self.pipeline: List[Mapping[str, Any]] = [ {"$listLocalSessions": {}}, {"$limit": 1}, diff --git a/test/test_encryption.py b/test/test_encryption.py index 0806f91a06..cb8bcb74d6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -211,11 +211,10 @@ def test_kwargs(self): class EncryptionIntegrationTest(IntegrationTest): """Base class for encryption integration tests.""" - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) - def _setup_class(cls): - super()._setup_class() + def setUp(self) -> None: + super().setUp() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -430,10 +429,9 @@ def test_upsert_uuid_standard_encrypt(self): class TestClientMaxWireVersion(IntegrationTest): - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def _setup_class(cls): - super()._setup_class() + def setUp(self): + super().setUp() @client_context.require_version_max(4, 0, 99) def test_raise_max_wire_version_error(self): @@ -816,17 +814,16 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): "local": None, } - @classmethod @unittest.skipUnless( any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - def _setup_class(cls): - super()._setup_class() - cls.listener = OvertCommandListener() - cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) - cls.client.db.coll.drop() - cls.vault = create_key_vault(cls.client.keyvault.datakeys) + def setUp(self): + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.client.db.coll.drop() + self.vault = create_key_vault(self.client.keyvault.datakeys) # Configure the encrypted field via the local schema_map option. schemas = { @@ -844,25 +841,22 @@ def _setup_class(cls): } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + self.KMS_PROVIDERS, + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=KMS_TLS_OPTS, ) - cls.client_encrypted = cls.unmanaged_rs_or_single_client( + self.client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - cls.client_encryption = cls.unmanaged_create_client_encryption( - cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + self.client_encryption = self.create_client_encryption( + self.KMS_PROVIDERS, "keyvault.datakeys", self.client, OPTS, kms_tls_options=KMS_TLS_OPTS ) - - @classmethod - def _tearDown_class(cls): - cls.vault.drop() - cls.client.close() - cls.client_encrypted.close() - cls.client_encryption.close() - - def setUp(self): self.listener.reset() + def tearDown(self) -> None: + self.vault.drop() + def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] @@ -1007,10 +1001,9 @@ def test_views_are_prohibited(self): class TestCorpus(EncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - def _setup_class(cls): - super()._setup_class() + def setUp(self): + super().setUp() @staticmethod def kms_providers(): @@ -1184,12 +1177,11 @@ class TestBsonSizeBatches(EncryptionIntegrationTest): client_encrypted: MongoClient listener: OvertCommandListener - @classmethod - def _setup_class(cls): - super()._setup_class() + def setUp(self): + super().setUp() db = client_context.client.db - cls.coll = db.coll - cls.coll.drop() + self.coll = db.coll + self.coll.drop() # Configure the encrypted 'db.coll' collection via jsonSchema. json_schema = json_data("limits", "limits-schema.json") db.create_collection( @@ -1207,17 +1199,14 @@ def _setup_class(cls): coll.insert_one(json_data("limits", "limits-key.json")) opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") - cls.listener = OvertCommandListener() - cls.client_encrypted = cls.unmanaged_rs_or_single_client( - auto_encryption_opts=opts, event_listeners=[cls.listener] + self.listener = OvertCommandListener() + self.client_encrypted = self.rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[self.listener] ) - cls.coll_encrypted = cls.client_encrypted.db.coll + self.coll_encrypted = self.client_encrypted.db.coll - @classmethod - def _tearDown_class(cls): - cls.coll_encrypted.drop() - cls.client_encrypted.close() - super()._tearDown_class() + def tearDown(self) -> None: + self.coll_encrypted.drop() def test_01_insert_succeeds_under_2MiB(self): doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} @@ -1241,7 +1230,9 @@ def test_03_bulk_batch_split(self): doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) def test_04_bulk_batch_split(self): limits_doc = json_data("limits", "limits-doc.json") @@ -1251,7 +1242,9 @@ def test_04_bulk_batch_split(self): doc2.update(limits_doc) self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) def test_05_insert_succeeds_just_under_16MiB(self): doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} @@ -1281,15 +1274,12 @@ def test_06_insert_fails_over_16MiB(self): class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" - @classmethod @unittest.skipUnless( any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - def _setup_class(cls): - super()._setup_class() - def setUp(self): + super().setUp() kms_providers = { "aws": AWS_CREDS, "azure": AZURE_CREDS, @@ -1318,10 +1308,6 @@ def setUp(self): self._kmip_host_error = None self._invalid_host_error = None - def tearDown(self): - self.client_encryption.close() - self.client_encryption_invalid.close() - def run_test_expected_success(self, provider_name, master_key): data_key_id = self.client_encryption.create_data_key(provider_name, master_key=master_key) encrypted = self.client_encryption.encrypt( @@ -1494,18 +1480,18 @@ class AzureGCPEncryptionTestMixin(EncryptionIntegrationTest): KEYVAULT_COLL = "datakeys" client: MongoClient - def setUp(self): + def _setup(self): keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) create_key_vault(keyvault, self.DEK) def _test_explicit(self, expectation): + self._setup() client_encryption = self.create_client_encryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, OPTS, ) - self.addCleanup(client_encryption.close) ciphertext = client_encryption.encrypt( "string0", @@ -1517,6 +1503,7 @@ def _test_explicit(self, expectation): self.assertEqual(client_encryption.decrypt(ciphertext), "string0") def _test_automatic(self, expectation_extjson, payload): + self._setup() encrypted_db = "db" encrypted_coll = "coll" keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) @@ -1531,7 +1518,6 @@ def _test_automatic(self, expectation_extjson, payload): client = self.rs_or_single_client( auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] ) - self.addCleanup(client.close) coll = client.get_database(encrypted_db).get_collection( encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") @@ -1553,13 +1539,12 @@ def _test_automatic(self, expectation_extjson, payload): class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") - def _setup_class(cls): - cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} - cls.DEK = json_data(BASE, "custom", "azure-dek.json") - cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super()._setup_class() + def setUp(self): + self.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + self.DEK = json_data(BASE, "custom", "azure-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUp() def test_explicit(self): return self._test_explicit( @@ -1579,13 +1564,12 @@ def test_automatic(self): class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") - def _setup_class(cls): - cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} - cls.DEK = json_data(BASE, "custom", "gcp-dek.json") - cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super()._setup_class() + def setUp(self): + self.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + self.DEK = json_data(BASE, "custom", "gcp-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUp() def test_explicit(self): return self._test_explicit( @@ -1607,6 +1591,7 @@ def test_automatic(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests class TestDeadlockProse(EncryptionIntegrationTest): def setUp(self): + super().setUp() self.client_test = self.rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" ) @@ -1637,7 +1622,6 @@ def setUp(self): self.ciphertext = client_encryption.encrypt( "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" ) - client_encryption.close() self.client_listener = OvertCommandListener() self.topology_listener = TopologyEventListener() @@ -1832,6 +1816,7 @@ def test_case_8(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events class TestDecryptProse(EncryptionIntegrationTest): def setUp(self): + super().setUp() self.client = client_context.client self.client.db.drop_collection("decryption_events") create_key_vault(self.client.keyvault.datakeys) @@ -2267,6 +2252,7 @@ def test_06_named_kms_providers_apply_tls_options_kmip(self): # https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): def setUp(self): + super().setUp() self.client = client_context.client create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} @@ -2608,8 +2594,6 @@ def MongoClient(**kwargs): assert isinstance(res["encrypted_indexed"], Binary) assert isinstance(res["encrypted_unindexed"], Binary) - client_encryption.close() - # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption class TestRangeQueryProse(EncryptionIntegrationTest): @@ -3071,17 +3055,11 @@ class TestNoSessionsSupport(EncryptionIntegrationTest): mongocryptd_client: MongoClient MONGOCRYPTD_PORT = 27020 - @classmethod @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") - def _setup_class(cls): - super()._setup_class() - start_mongocryptd(cls.MONGOCRYPTD_PORT) - - @classmethod - def _tearDown_class(cls): - super()._tearDown_class() - def setUp(self) -> None: + super().setUp() + start_mongocryptd(self.MONGOCRYPTD_PORT) + self.listener = OvertCommandListener() self.mongocryptd_client = self.simple_client( f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] diff --git a/test/test_examples.py b/test/test_examples.py index ebf1d784a3..7f98226e7a 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -33,19 +33,14 @@ class TestSampleShellCommands(IntegrationTest): - @classmethod - def setUpClass(cls): - super().setUpClass() - # Run once before any tests run. - cls.db.inventory.drop() - - @classmethod - def tearDownClass(cls): - cls.client.drop_database("pymongo_test") + def setUp(self): + super().setUp() + self.db.inventory.drop() def tearDown(self): # Run after every test. self.db.inventory.drop() + self.client.drop_database("pymongo_test") def test_first_three_examples(self): db = self.db diff --git a/test/test_grid_file.py b/test/test_grid_file.py index c35efccef5..6534bc11bf 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -97,6 +97,7 @@ def test_grid_in_custom_opts(self): class TestGridFile(IntegrationTest): def setUp(self): + super().setUp() self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) def test_basic(self): diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 549dc0b204..a36109f399 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -75,9 +75,9 @@ def run(self): class TestGridfsNoConnect(unittest.TestCase): db: Database - @classmethod - def setUpClass(cls): - cls.db = MongoClient(connect=False).pymongo_test + def setUp(self): + super().setUp() + self.db = MongoClient(connect=False).pymongo_test def test_gridfs(self): self.assertRaises(TypeError, gridfs.GridFS, "foo") @@ -88,13 +88,10 @@ class TestGridfs(IntegrationTest): fs: gridfs.GridFS alt: gridfs.GridFS - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.fs = gridfs.GridFS(cls.db) - cls.alt = gridfs.GridFS(cls.db, "alt") - def setUp(self): + super().setUp() + self.fs = gridfs.GridFS(self.db) + self.alt = gridfs.GridFS(self.db, "alt") self.cleanup_colls( self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks ) @@ -509,10 +506,9 @@ def test_md5(self): class TestGridfsReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super().setUpClass() + def setUp(self): + super().setUp() @classmethod def tearDownClass(cls): diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 28adb7051a..04c7427350 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -79,13 +79,10 @@ class TestGridfs(IntegrationTest): fs: gridfs.GridFSBucket alt: gridfs.GridFSBucket - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.fs = gridfs.GridFSBucket(cls.db) - cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") - def setUp(self): + super().setUp() + self.fs = gridfs.GridFSBucket(self.db) + self.alt = gridfs.GridFSBucket(self.db, bucket_name="alt") self.cleanup_colls( self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks ) @@ -479,10 +476,9 @@ def test_md5(self): class TestGridfsBucketReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super().setUpClass() + def setUp(self): + super().setUp() @classmethod def tearDownClass(cls): diff --git a/test/test_monitor.py b/test/test_monitor.py index f8e9443fae..a704f3d8cb 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -29,7 +29,7 @@ wait_until, ) -from pymongo.synchronous.periodic_executor import _EXECUTORS +from pymongo.periodic_executor import _EXECUTORS def unregistered(ref): diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 75fe5c987a..670558c0a0 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -52,22 +52,14 @@ class TestCommandMonitoring(IntegrationTest): listener: EventListener @classmethod - @client_context.require_connection - def _setup_class(cls): - super()._setup_class() + def setUpClass(cls) -> None: cls.listener = OvertCommandListener() - cls.client = cls.unmanaged_rs_or_single_client( - event_listeners=[cls.listener], retryWrites=False - ) - @classmethod - def _tearDown_class(cls): - cls.client.close() - super()._tearDown_class() - - def tearDown(self): + @client_context.require_connection + def setUp(self) -> None: + super().setUp() self.listener.reset() - super().tearDown() + self.client = self.rs_or_single_client(event_listeners=[self.listener], retryWrites=False) def test_started_simple(self): self.client.pymongo_test.command("ping") @@ -1140,26 +1132,23 @@ class TestGlobalListener(IntegrationTest): saved_listeners: Any @classmethod - @client_context.require_connection - def _setup_class(cls): - super()._setup_class() + def setUpClass(cls) -> None: cls.listener = OvertCommandListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) - cls.client = cls.unmanaged_single_client() - # Get one (authenticated) socket in the pool. - cls.client.pymongo_test.command("ping") - - @classmethod - def _tearDown_class(cls): - monitoring._LISTENERS = cls.saved_listeners - cls.client.close() - super()._tearDown_class() + @client_context.require_connection def setUp(self): super().setUp() self.listener.reset() + self.client = self.single_client() + # Get one (authenticated) socket in the pool. + self.client.pymongo_test.command("ping") + + @classmethod + def tearDownClass(cls): + monitoring._LISTENERS = cls.saved_listeners def test_simple(self): self.client.pymongo_test.command("ping") diff --git a/test/test_read_concern.py b/test/test_read_concern.py index ea9ce49a30..f7c0901422 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -31,24 +31,16 @@ class TestReadConcern(IntegrationTest): listener: OvertCommandListener - @classmethod @client_context.require_connection - def setUpClass(cls): - super().setUpClass() - cls.listener = OvertCommandListener() - cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) - cls.db = cls.client.pymongo_test + def setUp(self): + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test client_context.client.pymongo_test.create_collection("coll") - @classmethod - def tearDownClass(cls): - cls.client.close() - client_context.client.pymongo_test.drop_collection("coll") - super().tearDownClass() - def tearDown(self): - self.listener.reset() - super().tearDown() + client_context.client.pymongo_test.drop_collection("coll") def test_read_concern(self): rc = ReadConcern() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 74f3c23e51..07bd1db0ba 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -132,34 +132,27 @@ class IgnoreDeprecationsTest(IntegrationTest): RUN_ON_SERVERLESS = True deprecation_filter: DeprecationFilter - @classmethod - def _setup_class(cls): - super()._setup_class() - cls.deprecation_filter = DeprecationFilter() + def setUp(self) -> None: + super().setUp() + self.deprecation_filter = DeprecationFilter() - @classmethod - def _tearDown_class(cls): - cls.deprecation_filter.stop() - super()._tearDown_class() + def tearDown(self) -> None: + self.deprecation_filter.stop() class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): knobs: client_knobs - @classmethod - def _setup_class(cls): - super()._setup_class() + def setUp(self) -> None: + super().setUp() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - cls.client = cls.unmanaged_rs_or_single_client(retryWrites=True) - cls.db = cls.client.pymongo_test + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.client = self.rs_or_single_client(retryWrites=True) + self.db = self.client.pymongo_test - @classmethod - def _tearDown_class(cls): - cls.knobs.disable() - cls.client.close() - super()._tearDown_class() + def tearDown(self) -> None: + self.knobs.disable() @client_context.require_no_standalone def test_actionable_error_message(self): @@ -180,26 +173,16 @@ class TestRetryableWrites(IgnoreDeprecationsTest): listener: OvertCommandListener knobs: client_knobs - @classmethod @client_context.require_no_mmap - def _setup_class(cls): - super()._setup_class() + def setUp(self) -> None: + super().setUp() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - cls.listener = OvertCommandListener() - cls.client = cls.unmanaged_rs_or_single_client( - retryWrites=True, event_listeners=[cls.listener] - ) - cls.db = cls.client.pymongo_test - - @classmethod - def _tearDown_class(cls): - cls.knobs.disable() - cls.client.close() - super()._tearDown_class() + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(retryWrites=True, event_listeners=[self.listener]) + self.db = self.client.pymongo_test - def setUp(self): if client_context.is_rs and client_context.test_commands_enabled: self.client.admin.command( SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) @@ -210,6 +193,7 @@ def tearDown(self): self.client.admin.command( SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) ) + self.knobs.disable() def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() @@ -438,13 +422,12 @@ class TestWriteConcernError(IntegrationTest): RUN_ON_SERVERLESS = True fail_insert: dict - @classmethod @client_context.require_replica_set @client_context.require_no_mmap @client_context.require_failCommand_fail_point - def _setup_class(cls): - super()._setup_class() - cls.fail_insert = { + def setUp(self) -> None: + super().setUp() + self.fail_insert = { "configureFailPoint": "failCommand", "mode": {"times": 2}, "data": { diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 81b208d511..6b808b159d 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -270,7 +270,7 @@ class TestSdamMonitoring(IntegrationTest): @classmethod @client_context.require_failCommand_fail_point def setUpClass(cls): - super().setUpClass() + super().setUp(cls) # Speed up the tests by decreasing the event publish frequency. cls.knobs = client_knobs( events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 diff --git a/test/test_session.py b/test/test_session.py index d0bbb075a8..634efa11c0 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -82,36 +82,27 @@ class TestSession(IntegrationTest): client2: MongoClient sensitive_commands: Set[str] - @classmethod @client_context.require_sessions - def _setup_class(cls): - super()._setup_class() + def setUp(self): + super().setUp() # Create a second client so we can make sure clients cannot share # sessions. - cls.client2 = cls.unmanaged_rs_or_single_client() + self.client2 = self.rs_or_single_client() # Redact no commands, so we can test user-admin commands have "lsid". - cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() + self.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() monitoring._SENSITIVE_COMMANDS.clear() - @classmethod - def _tearDown_class(cls): - monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) - cls.client2.close() - super()._tearDown_class() - - def setUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() self.client = self.rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener] ) - self.addCleanup(self.client.close) self.db = self.client.pymongo_test self.initial_lsids = {s["id"] for s in session_ids(self.client)} def tearDown(self): - """All sessions used in the test must be returned to the pool.""" + monitoring._SENSITIVE_COMMANDS.update(self.sensitive_commands) self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() for event in self.session_checker_listener.started_events: @@ -121,6 +112,8 @@ def tearDown(self): current_lsids = {s["id"] for s in session_ids(self.client)} self.assertLessEqual(used_lsids, current_lsids) + super().tearDown() + def _test_ops(self, client, *ops): listener = client.options.event_listeners[0] @@ -832,18 +825,11 @@ class TestCausalConsistency(UnitTest): listener: SessionTestListener client: MongoClient - @classmethod - def _setup_class(cls): - cls.listener = SessionTestListener() - cls.client = cls.unmanaged_rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def _tearDown_class(cls): - cls.client.close() - @client_context.require_sessions def setUp(self): super().setUp() + self.listener = SessionTestListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) @client_context.require_no_standalone def test_core(self): diff --git a/test/test_threads.py b/test/test_threads.py index b3dadbb1a3..3e469e28fe 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -105,6 +105,7 @@ def run(self): class TestThreads(IntegrationTest): def setUp(self): + super().setUp() self.db = self.client.pymongo_test def test_threading(self): diff --git a/test/test_transactions.py b/test/test_transactions.py index 3cecbe9d38..949b88e60b 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -395,19 +395,12 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(TransactionsBase): - @classmethod - def _setup_class(cls): - super()._setup_class() - cls.mongos_clients = [] + def setUp(self) -> None: + super().setUp() + self.mongos_clients = [] if client_context.supports_transactions(): for address in client_context.mongoses: - cls.mongos_clients.append(cls.unmanaged_single_client("{}:{}".format(*address))) - - @classmethod - def _tearDown_class(cls): - for client in cls.mongos_clients: - client.close() - super()._tearDown_class() + self.mongos_clients.append(self.single_client("{}:{}".format(*address))) def _set_fail_point(self, client, command_args): cmd = {"configureFailPoint": "failCommand"} diff --git a/test/test_typing.py b/test/test_typing.py index 441707616e..bfe4d032c1 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -114,10 +114,9 @@ def test_mypy_failures(self) -> None: class TestPymongo(IntegrationTest): coll: Collection - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.coll = cls.client.test.test + def setUp(self): + super().setUp() + self.coll = self.client.test.test def test_insert_find(self) -> None: doc = {"my": "doc"} diff --git a/test/unified_format.py b/test/unified_format.py index 3489a8ac84..5cb268a29d 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -304,7 +304,6 @@ def _create_entity(self, entity_spec, uri=None): kwargs["h"] = uri client = self.test.rs_or_single_client(**kwargs) self[spec["id"]] = client - self.test.addCleanup(client.close) return elif entity_type == "database": client = self[spec["client"]] @@ -479,52 +478,47 @@ def insert_initial_data(self, initial_data): db.create_collection(coll_name, write_concern=wc, **opts) @classmethod - def _setup_class(cls): + def setUpClass(cls) -> None: + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + def tearDownClass(cls) -> None: + cls.knobs.disable() + + def setUp(self): # super call creates internal client cls.client - super()._setup_class() + super().setUp() # process file-level runOnRequirements - run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) - if not cls.should_run_on(run_on_spec): - raise unittest.SkipTest(f"{cls.__name__} runOnRequirements not satisfied") + run_on_spec = self.TEST_SPEC.get("runOnRequirements", []) + if not self.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here if client_context.storage_engine == "mmapv1": - if "retryable-writes" in cls.TEST_SPEC["description"] or "retryable_writes" in str( - cls.TEST_PATH + if "retryable-writes" in self.TEST_SPEC["description"] or "retryable_writes" in str( + self.TEST_PATH ): raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") # Handle mongos_clients for transactions tests. - cls.mongos_clients = [] + self.mongos_clients = [] if ( client_context.supports_transactions() and not client_context.load_balancer and not client_context.serverless ): for address in client_context.mongoses: - cls.mongos_clients.append(cls.unmanaged_single_client("{}:{}".format(*address))) + self.mongos_clients.append(self.single_client("{}:{}".format(*address))) - # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs( - heartbeat_frequency=0.1, - min_heartbeat_interval=0.1, - kill_cursor_frequency=0.1, - events_queue_frequency=0.1, - ) - cls.knobs.enable() - - @classmethod - def _tearDown_class(cls): - cls.knobs.disable() - for client in cls.mongos_clients: - client.close() - super()._tearDown_class() - - def setUp(self): - super().setUp() # process schemaVersion # note: we check major schema version during class generation - # note: we do this here because we cannot run assertions in setUpClass version = Version.from_string(self.TEST_SPEC["schemaVersion"]) self.assertLessEqual( version, @@ -1026,7 +1020,6 @@ def _testOperation_targetedFailPoint(self, spec): ) client = self.single_client("{}:{}".format(*session._pinned_address)) - self.addCleanup(client.close) self.__set_fail_point(client=client, command_args=spec["failPoint"]) def _testOperation_createEntities(self, spec): diff --git a/test/utils.py b/test/utils.py index 9b326e5d73..69154bc63b 100644 --- a/test/utils.py +++ b/test/utils.py @@ -99,6 +99,12 @@ def wait_for_event(self, event, count): """Wait for a number of events to be published, or fail.""" wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") + async def async_wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + await async_wait_until( + lambda: self.event_count(event) >= count, f"find {count} {event} event(s)" + ) + class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): def connection_created(self, event): @@ -644,7 +650,10 @@ async def async_wait_until(predicate, success_description, timeout=10): start = time.time() interval = min(float(timeout) / 100, 0.1) while True: - retval = await predicate() + if iscoroutinefunction(predicate): + retval = await predicate() + else: + retval = predicate() if retval: return retval diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 8b2679d776..4508502cd0 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -249,30 +249,22 @@ class SpecRunner(IntegrationTest): knobs: client_knobs listener: EventListener - @classmethod - def _setup_class(cls): - super()._setup_class() - cls.mongos_clients = [] + def setUp(self) -> None: + super().setUp() + self.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - - @classmethod - def _tearDown_class(cls): - cls.knobs.disable() - for client in cls.mongos_clients: - client.close() - super()._tearDown_class() - - def setUp(self): - super().setUp() + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() self.targets = {} self.listener = None # type: ignore self.pool_listener = None self.server_listener = None self.maxDiff = None + def tearDown(self) -> None: + self.knobs.disable() + def _set_fail_point(self, client, command_args): cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) @@ -697,8 +689,6 @@ def run_scenario(self, scenario_def, test): self.listener = listener self.pool_listener = pool_listener self.server_listener = server_listener - # Close the client explicitly to avoid having too many threads open. - self.addCleanup(client.close) # Create session0 and session1. sessions = {} diff --git a/tools/synchro.py b/tools/synchro.py index 0a7109c6d4..47617365f4 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -110,6 +110,13 @@ "async_set_fail_point": "set_fail_point", "async_ensure_all_connected": "ensure_all_connected", "async_repl_set_step_down": "repl_set_step_down", + "AsyncPeriodicExecutor": "PeriodicExecutor", + "async_wait_for_event": "wait_for_event", + "pymongo_server_monitor_task": "pymongo_server_monitor_thread", + "pymongo_server_rtt_task": "pymongo_server_rtt_thread", + "_async_create_lock": "_create_lock", + "_async_create_condition": "_create_condition", + "_async_cond_wait": "_cond_wait", } docstring_replacements: dict[tuple[str, str], str] = { @@ -130,8 +137,6 @@ ".. warning:: This API is currently in beta, meaning the classes, methods, and behaviors described within may change before the full release." } -type_replacements = {"_Condition": "threading.Condition"} - import_replacements = {"test.synchronous": "test"} _pymongo_base = "./pymongo/asynchronous/" @@ -234,8 +239,6 @@ def process_files(files: list[str]) -> None: lines = translate_async_sleeps(lines) if file in docstring_translate_files: lines = translate_docstrings(lines) - translate_locks(lines) - translate_types(lines) if file in sync_test_files: translate_imports(lines) f.seek(0) @@ -269,34 +272,6 @@ def translate_coroutine_types(lines: list[str]) -> list[str]: return lines -def translate_locks(lines: list[str]) -> list[str]: - lock_lines = [line for line in lines if "_Lock(" in line] - cond_lines = [line for line in lines if "_Condition(" in line] - for line in lock_lines: - res = re.search(r"_Lock\(([^()]*\([^()]*\))\)", line) - if res: - old = res[0] - index = lines.index(line) - lines[index] = line.replace(old, res[1]) - for line in cond_lines: - res = re.search(r"_Condition\(([^()]*\([^()]*\))\)", line) - if res: - old = res[0] - index = lines.index(line) - lines[index] = line.replace(old, res[1]) - - return lines - - -def translate_types(lines: list[str]) -> list[str]: - for k, v in type_replacements.items(): - matches = [line for line in lines if k in line and "import" not in line] - for line in matches: - index = lines.index(line) - lines[index] = line.replace(k, v) - return lines - - def translate_imports(lines: list[str]) -> list[str]: for k, v in import_replacements.items(): matches = [line for line in lines if k in line and "import" in line] From cbeebd01901467a204687a70ca498dad70539502 Mon Sep 17 00:00:00 2001 From: theRealProHacker <77074862+theRealProHacker@users.noreply.github.com> Date: Mon, 2 Dec 2024 17:54:56 +0100 Subject: [PATCH 1632/2111] Small doc fix (#2021) Co-authored-by: Steven Silvester --- pymongo/asynchronous/cursor.py | 2 +- pymongo/synchronous/cursor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 7d7ae4a5db..8193e53282 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -1299,7 +1299,7 @@ async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: >>> await cursor.to_list() - Or, so read at most n items from the cursor:: + Or, to read at most n items from the cursor:: >>> await cursor.to_list(n) diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 9a7637704f..b35098a327 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -1297,7 +1297,7 @@ def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: >>> cursor.to_list() - Or, so read at most n items from the cursor:: + Or, to read at most n items from the cursor:: >>> cursor.to_list(n) From bc66598623c46e072518f6f11347096476b100c1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 2 Dec 2024 12:17:52 -0500 Subject: [PATCH 1633/2111] PYTHON-4965 - Consolidate startup and teardown tasks (#2017) --- .evergreen/combine-coverage.sh | 0 .evergreen/config.yml | 99 ++++++------------- .evergreen/hatch.sh | 0 .evergreen/install-dependencies.sh | 0 .evergreen/run-azurekms-fail-test.sh | 0 .evergreen/run-azurekms-test.sh | 0 .evergreen/run-deployed-lambda-aws-tests.sh | 0 .evergreen/run-gcpkms-test.sh | 0 .evergreen/run-perf-tests.sh | 0 .evergreen/scripts/archive-mongodb-logs.sh | 0 .../scripts/bootstrap-mongo-orchestration.sh | 0 .evergreen/scripts/check-import-time.sh | 0 .evergreen/scripts/cleanup.sh | 0 .evergreen/scripts/configure-env.sh | 0 .../scripts/download-and-merge-coverage.sh | 0 .evergreen/scripts/fix-absolute-paths.sh | 0 .evergreen/scripts/init-test-results.sh | 0 .evergreen/scripts/install-dependencies.sh | 0 .evergreen/scripts/make-files-executable.sh | 0 .evergreen/scripts/prepare-resources.sh | 0 .evergreen/scripts/run-atlas-tests.sh | 0 .evergreen/scripts/run-aws-ecs-auth-test.sh | 0 .evergreen/scripts/run-doctests.sh | 0 .../scripts/run-enterprise-auth-tests.sh | 0 .evergreen/scripts/run-gcpkms-fail-test.sh | 0 .evergreen/scripts/run-getdata.sh | 0 .evergreen/scripts/run-load-balancer.sh | 0 .evergreen/scripts/run-mockupdb-tests.sh | 0 .evergreen/scripts/run-mod-wsgi-tests.sh | 0 .evergreen/scripts/run-ocsp-test.sh | 0 .evergreen/scripts/run-perf-tests.sh | 0 .evergreen/scripts/run-tests.sh | 0 .evergreen/scripts/run-with-env.sh | 0 .evergreen/scripts/setup-encryption.sh | 0 .evergreen/scripts/setup-tests.sh | 0 .evergreen/scripts/stop-load-balancer.sh | 0 .evergreen/scripts/teardown-aws.sh | 7 -- .evergreen/scripts/teardown-docker.sh | 7 -- .evergreen/scripts/upload-coverage-report.sh | 0 .evergreen/scripts/windows-fix.sh | 0 .evergreen/setup-encryption.sh | 0 .evergreen/teardown-encryption.sh | 0 .pre-commit-config.yaml | 12 +++ tools/synchro.sh | 0 44 files changed, 44 insertions(+), 81 deletions(-) mode change 100644 => 100755 .evergreen/combine-coverage.sh mode change 100644 => 100755 .evergreen/hatch.sh mode change 100644 => 100755 .evergreen/install-dependencies.sh mode change 100644 => 100755 .evergreen/run-azurekms-fail-test.sh mode change 100644 => 100755 .evergreen/run-azurekms-test.sh mode change 100644 => 100755 .evergreen/run-deployed-lambda-aws-tests.sh mode change 100644 => 100755 .evergreen/run-gcpkms-test.sh mode change 100644 => 100755 .evergreen/run-perf-tests.sh mode change 100644 => 100755 .evergreen/scripts/archive-mongodb-logs.sh mode change 100644 => 100755 .evergreen/scripts/bootstrap-mongo-orchestration.sh mode change 100644 => 100755 .evergreen/scripts/check-import-time.sh mode change 100644 => 100755 .evergreen/scripts/cleanup.sh mode change 100644 => 100755 .evergreen/scripts/configure-env.sh mode change 100644 => 100755 .evergreen/scripts/download-and-merge-coverage.sh mode change 100644 => 100755 .evergreen/scripts/fix-absolute-paths.sh mode change 100644 => 100755 .evergreen/scripts/init-test-results.sh mode change 100644 => 100755 .evergreen/scripts/install-dependencies.sh mode change 100644 => 100755 .evergreen/scripts/make-files-executable.sh mode change 100644 => 100755 .evergreen/scripts/prepare-resources.sh mode change 100644 => 100755 .evergreen/scripts/run-atlas-tests.sh mode change 100644 => 100755 .evergreen/scripts/run-aws-ecs-auth-test.sh mode change 100644 => 100755 .evergreen/scripts/run-doctests.sh mode change 100644 => 100755 .evergreen/scripts/run-enterprise-auth-tests.sh mode change 100644 => 100755 .evergreen/scripts/run-gcpkms-fail-test.sh mode change 100644 => 100755 .evergreen/scripts/run-getdata.sh mode change 100644 => 100755 .evergreen/scripts/run-load-balancer.sh mode change 100644 => 100755 .evergreen/scripts/run-mockupdb-tests.sh mode change 100644 => 100755 .evergreen/scripts/run-mod-wsgi-tests.sh mode change 100644 => 100755 .evergreen/scripts/run-ocsp-test.sh mode change 100644 => 100755 .evergreen/scripts/run-perf-tests.sh mode change 100644 => 100755 .evergreen/scripts/run-tests.sh mode change 100644 => 100755 .evergreen/scripts/run-with-env.sh mode change 100644 => 100755 .evergreen/scripts/setup-encryption.sh mode change 100644 => 100755 .evergreen/scripts/setup-tests.sh mode change 100644 => 100755 .evergreen/scripts/stop-load-balancer.sh delete mode 100644 .evergreen/scripts/teardown-aws.sh delete mode 100644 .evergreen/scripts/teardown-docker.sh mode change 100644 => 100755 .evergreen/scripts/upload-coverage-report.sh mode change 100644 => 100755 .evergreen/scripts/windows-fix.sh mode change 100644 => 100755 .evergreen/setup-encryption.sh mode change 100644 => 100755 .evergreen/teardown-encryption.sh mode change 100644 => 100755 tools/synchro.sh diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh old mode 100644 new mode 100755 diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 7ca3a72b1a..ac89270d84 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -37,6 +37,8 @@ functions: # Applies the subitted patch, if any # Deprecated. Should be removed. But still needed for certain agents (ZAP) - command: git.apply_patch + + "setup system": # Make an evergreen expansion file with dynamic values - command: subprocess.exec params: @@ -49,13 +51,19 @@ functions: - command: expansions.update params: file: src/expansion.yml - - "prepare resources": - command: subprocess.exec params: + include_expansions_in_env: ["PROJECT_DIRECTORY", "DRIVERS_TOOLS"] binary: bash args: - src/.evergreen/scripts/prepare-resources.sh + # Run drivers-evergreen-tools system setup + - command: subprocess.exec + params: + include_expansions_in_env: ["PROJECT_DIRECTORY", "DRIVERS_TOOLS"] + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/setup.sh "upload coverage" : - command: ec2.assume_role @@ -511,41 +519,32 @@ functions: - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/cleanup.sh - "teardown": + "teardown system": - command: subprocess.exec params: binary: bash working_dir: "src" args: - - ${DRIVERS_TOOLS}/.evergreen/teardown.sh - - "fix absolute paths": + # Ensure the instance profile is reassigned for aws tests. + - ${DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh - command: subprocess.exec params: binary: bash + working_dir: "src" args: - - src/.evergreen/scripts/fix-absolute-paths.sh - - "windows fix": + - ${DRIVERS_TOOLS}/.evergreen/csfle/teardown.sh - command: subprocess.exec params: binary: bash + working_dir: "src" args: - - src/.evergreen/scripts/windows-fix.sh - - "make files executable": - - command: subprocess.exec - params: - binary: bash - args: - - src/.evergreen/scripts/make-files-executable.sh - - "init test-results": + - ${DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh - command: subprocess.exec params: binary: bash + working_dir: "src" args: - - src/.evergreen/scripts/init-test-results.sh + - ${DRIVERS_TOOLS}/.evergreen/teardown.sh "install dependencies": - command: subprocess.exec @@ -621,21 +620,6 @@ functions: args: - src/.evergreen/scripts/stop-load-balancer.sh - "teardown_docker": - - command: subprocess.exec - params: - binary: bash - args: - - src/.evergreen/scripts/teardown-docker.sh - - "teardown_aws": - - command: subprocess.exec - params: - binary: bash - args: - - src/.evergreen/scripts/run-with-env.sh - - src/.evergreen/scripts/teardown-aws.sh - "teardown atlas": - command: subprocess.exec params: @@ -665,25 +649,19 @@ functions: pre: - func: "fetch source" - - func: "prepare resources" - - func: "windows fix" - - func: "fix absolute paths" - - func: "init test-results" - - func: "make files executable" + - func: "setup system" - func: "install dependencies" - func: "assume ec2 role" post: # Disabled, causing timeouts # - func: "upload working dir" - - func: "teardown" + - func: "teardown system" - func: "upload coverage" - func: "upload mo artifacts" - func: "upload test results" - func: "stop mongo-orchestration" - - func: "teardown_aws" - func: "cleanup" - - func: "teardown_docker" task_groups: - name: serverless_task_group @@ -691,7 +669,7 @@ task_groups: setup_group_timeout_secs: 1800 # 30 minutes setup_group: - func: "fetch source" - - func: "prepare resources" + - func: "setup system" - command: subprocess.exec params: binary: bash @@ -714,9 +692,7 @@ task_groups: setup_group_timeout_secs: 1800 # 30 minutes setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - command: subprocess.exec params: binary: bash @@ -735,9 +711,7 @@ task_groups: - name: testazurekms_task_group setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - command: subprocess.exec params: binary: bash @@ -761,9 +735,7 @@ task_groups: - name: testazureoidc_task_group setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - command: subprocess.exec params: binary: bash @@ -785,9 +757,7 @@ task_groups: - name: testgcpoidc_task_group setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - command: subprocess.exec params: binary: bash @@ -809,9 +779,7 @@ task_groups: - name: testk8soidc_task_group setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - command: ec2.assume_role params: role_arn: ${aws_test_secrets_role} @@ -835,9 +803,7 @@ task_groups: - name: testoidc_task_group setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - func: "assume ec2 role" - command: subprocess.exec params: @@ -859,7 +825,7 @@ task_groups: - name: test_aws_lambda_task_group setup_group: - func: fetch source - - func: prepare resources + - func: setup system - func: setup atlas teardown_task: - func: teardown atlas @@ -871,9 +837,7 @@ task_groups: - name: test_atlas_task_group_search_indexes setup_group: - func: fetch source - - func: prepare resources - - func: fix absolute paths - - func: make files executable + - func: setup system - func: setup atlas teardown_task: - func: teardown atlas @@ -1584,7 +1548,7 @@ tasks: - name: testazurekms-fail-task commands: - func: fetch source - - func: make files executable + - func: setup system - func: "bootstrap mongo-orchestration" vars: VERSION: "latest" @@ -1640,6 +1604,7 @@ tasks: params: binary: bash working_dir: src + include_expansions_in_env: ["PYTHON_BINARY"] args: - .evergreen/scripts/check-import-time.sh - ${revision} diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh old mode 100644 new mode 100755 diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh old mode 100644 new mode 100755 diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh old mode 100644 new mode 100755 diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh old mode 100644 new mode 100755 diff --git a/.evergreen/run-deployed-lambda-aws-tests.sh b/.evergreen/run-deployed-lambda-aws-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh old mode 100644 new mode 100755 diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/archive-mongodb-logs.sh b/.evergreen/scripts/archive-mongodb-logs.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/bootstrap-mongo-orchestration.sh b/.evergreen/scripts/bootstrap-mongo-orchestration.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/check-import-time.sh b/.evergreen/scripts/check-import-time.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/download-and-merge-coverage.sh b/.evergreen/scripts/download-and-merge-coverage.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/fix-absolute-paths.sh b/.evergreen/scripts/fix-absolute-paths.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/init-test-results.sh b/.evergreen/scripts/init-test-results.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/make-files-executable.sh b/.evergreen/scripts/make-files-executable.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/prepare-resources.sh b/.evergreen/scripts/prepare-resources.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-aws-ecs-auth-test.sh b/.evergreen/scripts/run-aws-ecs-auth-test.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-doctests.sh b/.evergreen/scripts/run-doctests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-getdata.sh b/.evergreen/scripts/run-getdata.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-load-balancer.sh b/.evergreen/scripts/run-load-balancer.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-mockupdb-tests.sh b/.evergreen/scripts/run-mockupdb-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-mod-wsgi-tests.sh b/.evergreen/scripts/run-mod-wsgi-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-ocsp-test.sh b/.evergreen/scripts/run-ocsp-test.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-perf-tests.sh b/.evergreen/scripts/run-perf-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-tests.sh b/.evergreen/scripts/run-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/run-with-env.sh b/.evergreen/scripts/run-with-env.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/setup-encryption.sh b/.evergreen/scripts/setup-encryption.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/stop-load-balancer.sh b/.evergreen/scripts/stop-load-balancer.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/teardown-aws.sh b/.evergreen/scripts/teardown-aws.sh deleted file mode 100644 index 634d1e5724..0000000000 --- a/.evergreen/scripts/teardown-aws.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" || exit -if [ -f "./aws_e2e_setup.json" ]; then - . ./activate-authawsvenv.sh - python ./lib/aws_assign_instance_profile.py -fi diff --git a/.evergreen/scripts/teardown-docker.sh b/.evergreen/scripts/teardown-docker.sh deleted file mode 100644 index 733779d058..0000000000 --- a/.evergreen/scripts/teardown-docker.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Remove all Docker images -DOCKER=$(command -v docker) || true -if [ -n "$DOCKER" ]; then - docker rmi -f "$(docker images -a -q)" &> /dev/null || true -fi diff --git a/.evergreen/scripts/upload-coverage-report.sh b/.evergreen/scripts/upload-coverage-report.sh old mode 100644 new mode 100755 diff --git a/.evergreen/scripts/windows-fix.sh b/.evergreen/scripts/windows-fix.sh old mode 100644 new mode 100755 diff --git a/.evergreen/setup-encryption.sh b/.evergreen/setup-encryption.sh old mode 100644 new mode 100755 diff --git a/.evergreen/teardown-encryption.sh b/.evergreen/teardown-encryption.sh old mode 100644 new mode 100755 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6e2b497e59..4f6759bc5a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -102,3 +102,15 @@ repos: # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine # - test/test_client.py:188: te ==> the, be, we, to args: ["-L", "fle,fo,infinit,isnt,nin,te,aks"] + +- repo: local + hooks: + - id: executable-shell + name: executable-shell + entry: chmod +x + language: system + types: [shell] + exclude: | + (?x)( + .evergreen/retry-with-backoff.sh + ) diff --git a/tools/synchro.sh b/tools/synchro.sh old mode 100644 new mode 100755 From 0f61ebb1150266a8cc9df70b87c4175b5f23aead Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 2 Dec 2024 12:35:31 -0500 Subject: [PATCH 1634/2111] PYTHON-4995 - Skip TestNoSessionsSupport tests on crypt_shared (#2022) --- test/asynchronous/test_encryption.py | 2 +- test/test_encryption.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 048db2d501..21cd5e2666 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -3069,11 +3069,11 @@ def start_mongocryptd(port) -> None: _spawn_daemon(args) +@unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") class TestNoSessionsSupport(AsyncEncryptionIntegrationTest): mongocryptd_client: AsyncMongoClient MONGOCRYPTD_PORT = 27020 - @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") async def asyncSetUp(self) -> None: await super().asyncSetUp() start_mongocryptd(self.MONGOCRYPTD_PORT) diff --git a/test/test_encryption.py b/test/test_encryption.py index cb8bcb74d6..18e21fe6a7 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -3051,11 +3051,11 @@ def start_mongocryptd(port) -> None: _spawn_daemon(args) +@unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") class TestNoSessionsSupport(EncryptionIntegrationTest): mongocryptd_client: MongoClient MONGOCRYPTD_PORT = 27020 - @unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") def setUp(self) -> None: super().setUp() start_mongocryptd(self.MONGOCRYPTD_PORT) From a9e61f6bed71dbf9a26d46d18d9905a6a0ccdd16 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 2 Dec 2024 10:08:52 -0800 Subject: [PATCH 1635/2111] PYTHON-4292 Improve TLS read performance (#2020) --- pymongo/network_layer.py | 91 +++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 53 deletions(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 6ab6db2f7d..beffba6d18 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -28,7 +28,7 @@ Union, ) -from pymongo import _csot, ssl_support +from pymongo import ssl_support from pymongo._asyncio_task import create_task from pymongo.errors import _OperationCancelled from pymongo.socket_checker import _errno_from_exception @@ -316,62 +316,47 @@ async def _async_receive(conn: socket.socket, length: int, loop: AbstractEventLo return mv -# Sync version: -def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: - """Block until at least one byte is read, or a timeout, or a cancel.""" - sock = conn.conn - timed_out = False - # Check if the connection's socket has been manually closed - if sock.fileno() == -1: - return - while True: - # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, "pending") and sock.pending() > 0: - readable = True - else: - # Wait up to 500ms for the socket to become readable and then - # check for cancellation. - if deadline: - remaining = deadline - time.monotonic() - # When the timeout has expired perform one final check to - # see if the socket is readable. This helps avoid spurious - # timeouts on AWS Lambda and other FaaS environments. - if remaining <= 0: - timed_out = True - timeout = max(min(remaining, _POLL_TIMEOUT), 0) - else: - timeout = _POLL_TIMEOUT - readable = conn.socket_checker.select(sock, read=True, timeout=timeout) - if conn.cancel_context.cancelled: - raise _OperationCancelled("operation cancelled") - if readable: - return - if timed_out: - raise socket.timeout("timed out") - - def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: buf = bytearray(length) mv = memoryview(buf) bytes_read = 0 - while bytes_read < length: - try: - wait_for_read(conn, deadline) - # CSOT: Update timeout. When the timeout has expired perform one - # final non-blocking recv. This helps avoid spurious timeouts when - # the response is actually already buffered on the client. - if _csot.get_timeout() and deadline is not None: - conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) - chunk_length = conn.conn.recv_into(mv[bytes_read:]) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") from None - except OSError as exc: - if _errno_from_exception(exc) == errno.EINTR: + # To support cancelling a network read, we shorten the socket timeout and + # check for the cancellation signal after each timeout. Alternatively we + # could close the socket but that does not reliably cancel recv() calls + # on all OSes. + orig_timeout = conn.conn.gettimeout() + try: + while bytes_read < length: + if deadline is not None: + # CSOT: Update timeout. When the timeout has expired perform one + # final non-blocking recv. This helps avoid spurious timeouts when + # the response is actually already buffered on the client. + short_timeout = min(max(deadline - time.monotonic(), 0), _POLL_TIMEOUT) + else: + short_timeout = _POLL_TIMEOUT + conn.set_conn_timeout(short_timeout) + try: + chunk_length = conn.conn.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + # We reached the true deadline. + raise socket.timeout("timed out") from None + except socket.timeout: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None continue - raise - if chunk_length == 0: - raise OSError("connection closed") - - bytes_read += chunk_length + except OSError as exc: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise OSError("connection closed") + + bytes_read += chunk_length + finally: + conn.set_conn_timeout(orig_timeout) return mv From f45b35a478f5f8a9c07b8e029869db5799113576 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 2 Dec 2024 14:08:22 -0500 Subject: [PATCH 1636/2111] PYTHON-4996 - Ensure all async integration tests call their parent asyncSetup method (#2023) --- test/asynchronous/test_client_bulk_write.py | 2 ++ .../test_connections_survive_primary_stepdown_spec.py | 1 + test/test_client_bulk_write.py | 2 ++ test/test_connections_survive_primary_stepdown_spec.py | 1 + test/test_gridfs.py | 1 + test/test_gridfs_bucket.py | 1 + 6 files changed, 8 insertions(+) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 01294402de..a82629f495 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -102,6 +102,7 @@ async def test_raw_bson_not_inflated(self): # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(AsyncIntegrationTest): async def asyncSetUp(self): + await super().asyncSetUp() self.max_write_batch_size = await async_client_context.max_write_batch_size self.max_bson_object_size = await async_client_context.max_bson_size self.max_message_size_bytes = await async_client_context.max_message_size_bytes @@ -652,6 +653,7 @@ class TestClientBulkWriteCSOT(AsyncIntegrationTest): async def asyncSetUp(self): if os.environ.get("SKIP_CSOT_TESTS", ""): raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") + await super().asyncSetUp() self.max_write_batch_size = await async_client_context.max_write_batch_size self.max_bson_object_size = await async_client_context.max_bson_size self.max_message_size_bytes = await async_client_context.max_message_size_bytes diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py index bc9638b443..4795d3937a 100644 --- a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -46,6 +46,7 @@ class TestAsyncConnectionsSurvivePrimaryStepDown(AsyncIntegrationTest): @async_client_context.require_replica_set async def asyncSetUp(self): + await super().asyncSetUp() self.listener = CMAPListener() self.client = await self.async_rs_or_single_client( event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index f06c07d588..c1cc27c28a 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -102,6 +102,7 @@ def test_raw_bson_not_inflated(self): # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(IntegrationTest): def setUp(self): + super().setUp() self.max_write_batch_size = client_context.max_write_batch_size self.max_bson_object_size = client_context.max_bson_size self.max_message_size_bytes = client_context.max_message_size_bytes @@ -648,6 +649,7 @@ class TestClientBulkWriteCSOT(IntegrationTest): def setUp(self): if os.environ.get("SKIP_CSOT_TESTS", ""): raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") + super().setUp() self.max_write_batch_size = client_context.max_write_batch_size self.max_bson_object_size = client_context.max_bson_size self.max_message_size_bytes = client_context.max_message_size_bytes diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 84ef6decd5..1fb08cbed5 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -46,6 +46,7 @@ class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): @client_context.require_replica_set def setUp(self): + super().setUp() self.listener = CMAPListener() self.client = self.rs_or_single_client( event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 diff --git a/test/test_gridfs.py b/test/test_gridfs.py index a36109f399..ab8950250b 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -511,6 +511,7 @@ def setUp(self): super().setUp() @classmethod + @client_context.require_connection def tearDownClass(cls): client_context.client.drop_database("gfsreplica") diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 04c7427350..0af4dce811 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -481,6 +481,7 @@ def setUp(self): super().setUp() @classmethod + @client_context.require_connection def tearDownClass(cls): client_context.client.drop_database("gfsbucketreplica") From fdcbe2e62237623c20b23f740bc66894532d475a Mon Sep 17 00:00:00 2001 From: Navjot Date: Tue, 3 Dec 2024 14:22:06 +0000 Subject: [PATCH 1637/2111] PYTHON-1982 Update Invalid Document error message to include doc (#1854) Co-authored-by: Navjot Singh Co-authored-by: Navjot Singh Co-authored-by: Steven Silvester --- bson/__init__.py | 5 ++++- bson/_cbsonmodule.c | 35 +++++++++++++++++++++++++++++++++++ doc/contributors.rst | 1 + test/test_bson.py | 13 +++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) diff --git a/bson/__init__.py b/bson/__init__.py index e866a99c8d..fc6efe0d59 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1006,7 +1006,10 @@ def _dict_to_bson( elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) for key, value in doc.items(): if not top_level or key != "_id": - elements.append(_element_to_bson(key, value, check_keys, opts)) + try: + elements.append(_element_to_bson(key, value, check_keys, opts)) + except InvalidDocument as err: + raise InvalidDocument(f"Invalid document {doc} | {err}") from err except AttributeError: raise TypeError(f"encoder expected a mapping type but got: {doc!r}") from None diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index a66071c285..d91c7e0536 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1743,6 +1743,41 @@ int write_dict(PyObject* self, buffer_t buffer, while (PyDict_Next(dict, &pos, &key, &value)) { if (!decode_and_write_pair(self, buffer, key, value, check_keys, options, top_level)) { + if (PyErr_Occurred()) { + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyErr_Fetch(&etype, &evalue, &etrace); + PyObject *InvalidDocument = _error("InvalidDocument"); + + if (top_level && InvalidDocument && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { + + Py_DECREF(etype); + etype = InvalidDocument; + + if (evalue) { + PyObject *msg = PyObject_Str(evalue); + Py_DECREF(evalue); + + if (msg) { + // Prepend doc to the existing message + PyObject *dict_str = PyObject_Str(dict); + PyObject *new_msg = PyUnicode_FromFormat("Invalid document %s | %s", PyUnicode_AsUTF8(dict_str), PyUnicode_AsUTF8(msg)); + Py_DECREF(dict_str); + + if (new_msg) { + evalue = new_msg; + } + else { + evalue = msg; + } + } + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } + else { + Py_DECREF(InvalidDocument); + } + PyErr_Restore(etype, evalue, etrace); + } return 0; } } diff --git a/doc/contributors.rst b/doc/contributors.rst index 272b81d6ae..4a7f5424b1 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -102,3 +102,4 @@ The following is a list of people who have contributed to - Ivan Lukyanchikov (ilukyanchikov) - Terry Patterson - Romain Morotti +- Navjot Singh (navjots18) diff --git a/test/test_bson.py b/test/test_bson.py index b431f700dc..e550b538d3 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1099,6 +1099,19 @@ def __repr__(self): ): encode({"t": Wrapper(1)}) + def test_doc_in_invalid_document_error_message(self): + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + doc = {"t": Wrapper(1)} + with self.assertRaisesRegex(InvalidDocument, f"Invalid document {doc}"): + encode(doc) + class TestCodecOptions(unittest.TestCase): def test_document_class(self): From ce1c49a668a2dce46c8faafaa3f4be9adfaf4b90 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 3 Dec 2024 14:29:23 -0600 Subject: [PATCH 1638/2111] PYTHON-4646 Improve usage of hatch in evergreen (#2025) --- .evergreen/config.yml | 2 +- .evergreen/generated_configs/variants.yml | 9 --- .evergreen/hatch.sh | 46 +--------------- .evergreen/install-dependencies.sh | 3 + .evergreen/run-mongodb-aws-ecs-test.sh | 6 +- .evergreen/scripts/configure-env.sh | 4 +- .evergreen/scripts/ensure-hatch.sh | 55 +++++++++++++++++++ .evergreen/scripts/generate_config.py | 7 +-- .../scripts/run-enterprise-auth-tests.sh | 3 +- .evergreen/scripts/run-tests.sh | 1 - .gitignore | 2 + 11 files changed, 74 insertions(+), 64 deletions(-) create mode 100755 .evergreen/scripts/ensure-hatch.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac89270d84..5c0e2983ea 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -42,7 +42,7 @@ functions: # Make an evergreen expansion file with dynamic values - command: subprocess.exec params: - include_expansions_in_env: ["is_patch", "project", "version_id", "AUTH", "SSL", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "SETDEFAULTENCODING", "test_loadbalancer", "test_serverless", "SKIP_CSOT_TESTS", "MONGODB_STARTED", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", "COVERAGE", "COMPRESSORS", "TEST_SUITES", "MONGODB_API_VERSION", "SKIP_HATCH", "skip_crypt_shared", "VERSION", "TOPOLOGY", "STORAGE_ENGINE", "ORCHESTRATION_FILE", "REQUIRE_API_VERSION", "LOAD_BALANCER", "skip_web_identity_auth_test", "skip_ECS_auth_test"] + include_expansions_in_env: ["is_patch", "project", "version_id", "AUTH", "SSL", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "SETDEFAULTENCODING", "test_loadbalancer", "test_serverless", "SKIP_CSOT_TESTS", "MONGODB_STARTED", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", "COVERAGE", "COMPRESSORS", "TEST_SUITES", "MONGODB_API_VERSION", "skip_crypt_shared", "VERSION", "TOPOLOGY", "STORAGE_ENGINE", "ORCHESTRATION_FILE", "REQUIRE_API_VERSION", "LOAD_BALANCER", "skip_web_identity_auth_test", "skip_ECS_auth_test"] binary: bash working_dir: "src" args: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 928347f567..226f4238f2 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -8,7 +8,6 @@ buildvariants: - rhel79-small batchtime: 10080 expansions: - SKIP_HATCH: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: other-hosts-rhel9-fips tasks: @@ -17,8 +16,6 @@ buildvariants: run_on: - rhel92-fips batchtime: 10080 - expansions: - SKIP_HATCH: "true" - name: other-hosts-rhel8-zseries tasks: - name: .6.0 .standalone !.sync_async @@ -26,8 +23,6 @@ buildvariants: run_on: - rhel8-zseries-small batchtime: 10080 - expansions: - SKIP_HATCH: "true" - name: other-hosts-rhel8-power8 tasks: - name: .6.0 .standalone !.sync_async @@ -35,8 +30,6 @@ buildvariants: run_on: - rhel8-power-small batchtime: 10080 - expansions: - SKIP_HATCH: "true" - name: other-hosts-rhel8-arm64 tasks: - name: .6.0 .standalone !.sync_async @@ -44,8 +37,6 @@ buildvariants: run_on: - rhel82-arm64-small batchtime: 10080 - expansions: - SKIP_HATCH: "true" # Atlas connect tests - name: atlas-connect-rhel8-python3.9 diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh index 98cd9ed734..c01dfcd19e 100755 --- a/.evergreen/hatch.sh +++ b/.evergreen/hatch.sh @@ -1,45 +1,5 @@ #!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -set -x +set -eu -. .evergreen/utils.sh - -if [ -z "$PYTHON_BINARY" ]; then - PYTHON_BINARY=$(find_python3) -fi - -# Check if we should skip hatch and run the tests directly. -if [ -n "$SKIP_HATCH" ]; then - ENV_NAME=testenv-$RANDOM - createvirtualenv "$PYTHON_BINARY" $ENV_NAME - # shellcheck disable=SC2064 - trap "deactivate; rm -rf $ENV_NAME" EXIT HUP - python -m pip install -e ".[test]" - run_hatch() { - bash ./.evergreen/run-tests.sh - } -else # Set up virtualenv before installing hatch - # Use a random venv name because the encryption tasks run this script multiple times in the same run. - ENV_NAME=hatchenv-$RANDOM - createvirtualenv "$PYTHON_BINARY" $ENV_NAME - # shellcheck disable=SC2064 - trap "deactivate; rm -rf $ENV_NAME" EXIT HUP - python -m pip install -q hatch - - # Ensure hatch does not write to user or global locations. - touch hatch_config.toml - HATCH_CONFIG=$(pwd)/hatch_config.toml - if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") - fi - export HATCH_CONFIG - hatch config restore - hatch config set dirs.data "$(pwd)/.hatch/data" - hatch config set dirs.cache "$(pwd)/.hatch/cache" - - run_hatch() { - python -m hatch run "$@" - } -fi - -run_hatch "${@:1}" +. .evergreen/scripts/ensure-hatch.sh +hatch run "$@" diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh index 9f4bcdbb59..4c0541a4e2 100755 --- a/.evergreen/install-dependencies.sh +++ b/.evergreen/install-dependencies.sh @@ -8,6 +8,9 @@ cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ # Replace MongoOrchestration's client certificate. cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem +# Ensure hatch is installed. +bash ${PROJECT_DIRECTORY}/scripts/ensure-hatch.sh + if [ -w /etc/hosts ]; then SUDO="" else diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 3905a08764..3189a6cc6c 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -22,13 +22,13 @@ set -o xtrace # Install python with pip. PYTHON_VER="python3.9" -apt-get update -apt-get install $PYTHON_VER python3-pip build-essential $PYTHON_VER-dev -y +apt-get -qq update < /dev/null > /dev/null +apt-get -qq install $PYTHON_VER $PYTHON_VER-venv build-essential $PYTHON_VER-dev -y < /dev/null > /dev/null export PYTHON_BINARY=$PYTHON_VER export TEST_AUTH_AWS=1 export AUTH="auth" export SET_XTRACE_ON=1 cd src -$PYTHON_BINARY -m pip install -q --user hatch +rm -rf .venv bash .evergreen/hatch.sh test:test-eg diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 3c0a0436de..313f4c3c92 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -11,11 +11,13 @@ fi PROJECT_DIRECTORY="$(pwd)" DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" +CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) + CARGO_HOME=$(cygpath -m $CARGO_HOME) fi SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" @@ -47,13 +49,13 @@ export NO_EXT="${NO_EXT:-}" export COVERAGE="${COVERAGE:-}" export COMPRESSORS="${COMPRESSORS:-}" export MONGODB_API_VERSION="${MONGODB_API_VERSION:-}" -export SKIP_HATCH="${SKIP_HATCH:-}" export skip_crypt_shared="${skip_crypt_shared:-}" export STORAGE_ENGINE="${STORAGE_ENGINE:-}" export REQUIRE_API_VERSION="${REQUIRE_API_VERSION:-}" export skip_web_identity_auth_test="${skip_web_identity_auth_test:-}" export skip_ECS_auth_test="${skip_ECS_auth_test:-}" +export CARGO_HOME="$CARGO_HOME" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" export PATH="$MONGODB_BINARIES:$PATH" # shellcheck disable=SC2154 diff --git a/.evergreen/scripts/ensure-hatch.sh b/.evergreen/scripts/ensure-hatch.sh new file mode 100755 index 0000000000..a57b705127 --- /dev/null +++ b/.evergreen/scripts/ensure-hatch.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +set -eu + +# Ensure hatch is available. +if [ ! -x "$(command -v hatch)" ]; then + # Install a virtual env with "hatch" + # Ensure there is a python venv. + . .evergreen/utils.sh + + if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) + fi + VENV_DIR=.venv + if [ ! -d $VENV_DIR ]; then + echo "Creating virtual environment..." + createvirtualenv "$PYTHON_BINARY" .venv + echo "Creating virtual environment... done." + fi + if [ -f $VENV_DIR/Scripts/activate ]; then + . $VENV_DIR/Scripts/activate + else + . $VENV_DIR/bin/activate + fi + + python --version + + echo "Installing hatch..." + python -m pip install -U pip + python -m pip install hatch || { + # Install rust and try again. + CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} + # Handle paths on Windows. + if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin + CARGO_HOME=$(cygpath -m $CARGO_HOME) + fi + export RUSTUP_HOME="${CARGO_HOME}/.rustup" + ${DRIVERS_TOOLS}/.evergreen/install-rust.sh + source "${CARGO_HOME}/env" + python -m pip install hatch + } + # Ensure hatch does not write to user or global locations. + touch hatch_config.toml + HATCH_CONFIG=$(pwd)/hatch_config.toml + if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin + HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") + fi + export HATCH_CONFIG + hatch config restore + hatch config set dirs.data "$(pwd)/.hatch/data" + hatch config set dirs.cache "$(pwd)/.hatch/cache" + + echo "Installing hatch... done." +fi +hatch --version diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b7187b50db..c7f55fa946 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -744,7 +744,6 @@ def create_aws_auth_variants(): def create_alternative_hosts_variants(): - expansions = dict(SKIP_HATCH="true") batchtime = BATCHTIME_WEEK variants = [] @@ -752,11 +751,10 @@ def create_alternative_hosts_variants(): variants.append( create_variant( [".5.0 .standalone !.sync_async"], - get_display_name("OpenSSL 1.0.2", host, python=CPYTHONS[0], **expansions), + get_display_name("OpenSSL 1.0.2", host, python=CPYTHONS[0]), host=host, python=CPYTHONS[0], batchtime=batchtime, - expansions=expansions, ) ) @@ -765,8 +763,7 @@ def create_alternative_hosts_variants(): variants.append( create_variant( [".6.0 .standalone !.sync_async"], - display_name=get_display_name("Other hosts", host, **expansions), - expansions=expansions, + display_name=get_display_name("Other hosts", host), batchtime=batchtime, host=host, ) diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh index 31371ead45..11f8db22e1 100755 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -2,5 +2,6 @@ # Disable xtrace for security reasons (just in case it was accidentally set). set +x -bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/enterprise_auth +# Use the default python to bootstrap secrets. +PYTHON_BINARY="" bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/enterprise_auth TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg diff --git a/.evergreen/scripts/run-tests.sh b/.evergreen/scripts/run-tests.sh index 495db83e70..6986a0bbee 100755 --- a/.evergreen/scripts/run-tests.sh +++ b/.evergreen/scripts/run-tests.sh @@ -51,5 +51,4 @@ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ TEST_DATA_LAKE=${TEST_DATA_LAKE:-} \ TEST_SUITES=${TEST_SUITES:-} \ MONGODB_API_VERSION=${MONGODB_API_VERSION} \ - SKIP_HATCH=${SKIP_HATCH} \ bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg diff --git a/.gitignore b/.gitignore index 69dd20efa3..e4587125e8 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,8 @@ secrets-export.sh libmongocrypt.tar.gz libmongocrypt/ libmongocrypt_git/ +hatch_config.toml +.venv # Lambda temp files test/lambda/.aws-sam From ff2f95987f945fed483dcf802082ac3a173eb905 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 3 Dec 2024 16:16:47 -0800 Subject: [PATCH 1639/2111] PYTHON-2560 Retry KMS requests on transient errors (#2024) --- pymongo/asynchronous/encryption.py | 64 +++++++++++++++------ pymongo/synchronous/encryption.py | 64 +++++++++++++++------ test/asynchronous/test_encryption.py | 86 +++++++++++++++++++++++++++- test/test_encryption.py | 86 +++++++++++++++++++++++++++- 4 files changed, 260 insertions(+), 40 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 4802c3f54e..1cf165e6a2 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -19,6 +19,7 @@ import contextlib import enum import socket +import time as time # noqa: PLC0414 # needed in sync version import uuid import weakref from copy import deepcopy @@ -63,7 +64,11 @@ from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.database import AsyncDatabase from pymongo.asynchronous.mongo_client import AsyncMongoClient -from pymongo.asynchronous.pool import _configured_socket, _raise_connection_failure +from pymongo.asynchronous.pool import ( + _configured_socket, + _get_timeout_details, + _raise_connection_failure, +) from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts @@ -72,7 +77,7 @@ EncryptedCollectionError, EncryptionError, InvalidOperation, - PyMongoError, + NetworkTimeout, ServerSelectionTimeoutError, ) from pymongo.network_layer import BLOCKING_IO_ERRORS, async_sendall @@ -88,6 +93,9 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + _IS_SYNC = False @@ -103,6 +111,13 @@ _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) +async def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: + try: + return await _configured_socket(address, opts) + except Exception as exc: + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + + @contextlib.contextmanager def _wrap_encryption_errors() -> Iterator[None]: """Context manager to wrap encryption related errors.""" @@ -166,8 +181,8 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: None, # crlfile False, # allow_invalid_certificates False, # allow_invalid_hostnames - False, - ) # disable_ocsp_endpoint_check + False, # disable_ocsp_endpoint_check + ) # CSOT: set timeout for socket creation. connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) opts = PoolOptions( @@ -175,9 +190,13 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: socket_timeout=connect_timeout, ssl_context=ctx, ) - host, port = parse_host(endpoint, _HTTPS_PORT) + address = parse_host(endpoint, _HTTPS_PORT) + sleep_u = kms_context.usleep + if sleep_u: + sleep_sec = float(sleep_u) / 1e6 + await asyncio.sleep(sleep_sec) try: - conn = await _configured_socket((host, port), opts) + conn = await _connect_kms(address, opts) try: await async_sendall(conn, message) while kms_context.bytes_needed > 0: @@ -194,20 +213,29 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: if not data: raise OSError("KMS connection closed") kms_context.feed(data) - # Async raises an OSError instead of returning empty bytes - except OSError as err: - raise OSError("KMS connection closed") from err - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") from None + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + # Wrap I/O errors in PyMongo exceptions. + if isinstance(exc, BLOCKING_IO_ERRORS): + exc = socket.timeout("timed out") + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) finally: conn.close() - except (PyMongoError, MongoCryptError): - raise # Propagate pymongo errors directly. - except asyncio.CancelledError: - raise - except Exception as error: - # Wrap I/O errors in PyMongo exceptions. - _raise_connection_failure((host, port), error) + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + remaining = _csot.remaining() + if isinstance(exc, NetworkTimeout) or (remaining is not None and remaining <= 0): + raise + # Mark this attempt as failed and defer to libmongocrypt to retry. + try: + kms_context.fail() + except MongoCryptError as final_err: + exc = MongoCryptError( + f"{final_err}, last attempt failed with: {exc}", final_err.code + ) + raise exc from final_err async def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: """Get the collection info for a namespace. diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 09d0c0f2fd..ef49855059 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -19,6 +19,7 @@ import contextlib import enum import socket +import time as time # noqa: PLC0414 # needed in sync version import uuid import weakref from copy import deepcopy @@ -67,7 +68,7 @@ EncryptedCollectionError, EncryptionError, InvalidOperation, - PyMongoError, + NetworkTimeout, ServerSelectionTimeoutError, ) from pymongo.network_layer import BLOCKING_IO_ERRORS, sendall @@ -80,7 +81,11 @@ from pymongo.synchronous.cursor import Cursor from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient -from pymongo.synchronous.pool import _configured_socket, _raise_connection_failure +from pymongo.synchronous.pool import ( + _configured_socket, + _get_timeout_details, + _raise_connection_failure, +) from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser import parse_host from pymongo.write_concern import WriteConcern @@ -88,6 +93,9 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + _IS_SYNC = True @@ -103,6 +111,13 @@ _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) +def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: + try: + return _configured_socket(address, opts) + except Exception as exc: + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + + @contextlib.contextmanager def _wrap_encryption_errors() -> Iterator[None]: """Context manager to wrap encryption related errors.""" @@ -166,8 +181,8 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: None, # crlfile False, # allow_invalid_certificates False, # allow_invalid_hostnames - False, - ) # disable_ocsp_endpoint_check + False, # disable_ocsp_endpoint_check + ) # CSOT: set timeout for socket creation. connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) opts = PoolOptions( @@ -175,9 +190,13 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: socket_timeout=connect_timeout, ssl_context=ctx, ) - host, port = parse_host(endpoint, _HTTPS_PORT) + address = parse_host(endpoint, _HTTPS_PORT) + sleep_u = kms_context.usleep + if sleep_u: + sleep_sec = float(sleep_u) / 1e6 + time.sleep(sleep_sec) try: - conn = _configured_socket((host, port), opts) + conn = _connect_kms(address, opts) try: sendall(conn, message) while kms_context.bytes_needed > 0: @@ -194,20 +213,29 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: if not data: raise OSError("KMS connection closed") kms_context.feed(data) - # Async raises an OSError instead of returning empty bytes - except OSError as err: - raise OSError("KMS connection closed") from err - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") from None + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + # Wrap I/O errors in PyMongo exceptions. + if isinstance(exc, BLOCKING_IO_ERRORS): + exc = socket.timeout("timed out") + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) finally: conn.close() - except (PyMongoError, MongoCryptError): - raise # Propagate pymongo errors directly. - except asyncio.CancelledError: - raise - except Exception as error: - # Wrap I/O errors in PyMongo exceptions. - _raise_connection_failure((host, port), error) + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + remaining = _csot.remaining() + if isinstance(exc, NetworkTimeout) or (remaining is not None and remaining <= 0): + raise + # Mark this attempt as failed and defer to libmongocrypt to retry. + try: + kms_context.fail() + except MongoCryptError as final_err: + exc = MongoCryptError( + f"{final_err}, last attempt failed with: {exc}", final_err.code + ) + raise exc from final_err def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: """Get the collection info for a namespace. diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 21cd5e2666..559b06ddf4 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -17,6 +17,8 @@ import base64 import copy +import http.client +import json import os import pathlib import re @@ -91,6 +93,7 @@ WriteError, ) from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern _IS_SYNC = False @@ -1366,9 +1369,8 @@ async def test_04_aws_endpoint_invalid_port(self): "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:12345", } - with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345") as ctx: + with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345"): await self.client_encryption.create_data_key("aws", master_key=master_key) - self.assertIsInstance(ctx.exception.cause, AutoReconnect) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") async def test_05_aws_endpoint_wrong_region(self): @@ -2853,6 +2855,86 @@ async def test_accepts_trim_factor_0(self): assert len(payload) > len(self.payload_defaults) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#24-kms-retry-tests +class TestKmsRetryProse(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9003" + providers["gcp"]["endpoint"] = "127.0.0.1:9003" + kms_tls_opts = { + p: {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} for p in providers + } + self.client_encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + + async def http_post(self, path, data=None): + # Note, the connection to the mock server needs to be closed after + # each request because the server is single threaded. + ctx: ssl.SSLContext = get_ssl_context( + CLIENT_PEM, # certfile + None, # passphrase + CA_PEM, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, # disable_ocsp_endpoint_check + ) + conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) + try: + if data is not None: + headers = {"Content-type": "application/json"} + body = json.dumps(data) + else: + headers = {} + body = None + conn.request("POST", path, body, headers) + res = conn.getresponse() + res.read() + finally: + conn.close() + + async def _test(self, provider, master_key): + await self.http_post("/reset") + # Case 1: createDataKey and encrypt with TCP retry + await self.http_post("/set_failpoint/network", {"count": 1}) + key_id = await self.client_encryption.create_data_key(provider, master_key=master_key) + await self.http_post("/set_failpoint/network", {"count": 1}) + await self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 2: createDataKey and encrypt with HTTP retry + await self.http_post("/set_failpoint/http", {"count": 1}) + key_id = await self.client_encryption.create_data_key(provider, master_key=master_key) + await self.http_post("/set_failpoint/http", {"count": 1}) + await self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 3: createDataKey fails after too many retries + await self.http_post("/set_failpoint/network", {"count": 4}) + with self.assertRaisesRegex(EncryptionError, "KMS request failed after"): + await self.client_encryption.create_data_key(provider, master_key=master_key) + + async def test_kms_retry(self): + await self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) + await self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) + await self._test( + "gcp", + { + "projectId": "foo", + "location": "bar", + "keyRing": "baz", + "keyName": "qux", + "endpoint": "127.0.0.1:9003", + }, + ) + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(AsyncEncryptionIntegrationTest): @async_client_context.require_no_standalone diff --git a/test/test_encryption.py b/test/test_encryption.py index 18e21fe6a7..7a9929b7fd 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -17,6 +17,8 @@ import base64 import copy +import http.client +import json import os import pathlib import re @@ -88,6 +90,7 @@ WriteError, ) from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.ssl_support import get_ssl_context from pymongo.synchronous import encryption from pymongo.synchronous.encryption import Algorithm, ClientEncryption, QueryType from pymongo.synchronous.mongo_client import MongoClient @@ -1360,9 +1363,8 @@ def test_04_aws_endpoint_invalid_port(self): "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), "endpoint": "kms.us-east-1.amazonaws.com:12345", } - with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345") as ctx: + with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345"): self.client_encryption.create_data_key("aws", master_key=master_key) - self.assertIsInstance(ctx.exception.cause, AutoReconnect) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): @@ -2835,6 +2837,86 @@ def test_accepts_trim_factor_0(self): assert len(payload) > len(self.payload_defaults) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#24-kms-retry-tests +class TestKmsRetryProse(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9003" + providers["gcp"]["endpoint"] = "127.0.0.1:9003" + kms_tls_opts = { + p: {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} for p in providers + } + self.client_encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + + def http_post(self, path, data=None): + # Note, the connection to the mock server needs to be closed after + # each request because the server is single threaded. + ctx: ssl.SSLContext = get_ssl_context( + CLIENT_PEM, # certfile + None, # passphrase + CA_PEM, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, # disable_ocsp_endpoint_check + ) + conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) + try: + if data is not None: + headers = {"Content-type": "application/json"} + body = json.dumps(data) + else: + headers = {} + body = None + conn.request("POST", path, body, headers) + res = conn.getresponse() + res.read() + finally: + conn.close() + + def _test(self, provider, master_key): + self.http_post("/reset") + # Case 1: createDataKey and encrypt with TCP retry + self.http_post("/set_failpoint/network", {"count": 1}) + key_id = self.client_encryption.create_data_key(provider, master_key=master_key) + self.http_post("/set_failpoint/network", {"count": 1}) + self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 2: createDataKey and encrypt with HTTP retry + self.http_post("/set_failpoint/http", {"count": 1}) + key_id = self.client_encryption.create_data_key(provider, master_key=master_key) + self.http_post("/set_failpoint/http", {"count": 1}) + self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 3: createDataKey fails after too many retries + self.http_post("/set_failpoint/network", {"count": 4}) + with self.assertRaisesRegex(EncryptionError, "KMS request failed after"): + self.client_encryption.create_data_key(provider, master_key=master_key) + + def test_kms_retry(self): + self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) + self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) + self._test( + "gcp", + { + "projectId": "foo", + "location": "bar", + "keyRing": "baz", + "keyName": "qux", + "endpoint": "127.0.0.1:9003", + }, + ) + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): @client_context.require_no_standalone From 89852ba7046a3020075e4008c5b3df25a3c0889f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 4 Dec 2024 07:24:28 -0600 Subject: [PATCH 1640/2111] PYTHON-5001 Fix import time check (#2027) --- .evergreen/run-import-time-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/run-import-time-test.sh b/.evergreen/run-import-time-test.sh index 2b17f5ffeb..e9f6161bcc 100755 --- a/.evergreen/run-import-time-test.sh +++ b/.evergreen/run-import-time-test.sh @@ -25,9 +25,9 @@ function get_import_time() { } get_import_time $HEAD_SHA -git stash +git stash || true git checkout $BASE_SHA get_import_time $BASE_SHA git checkout $HEAD_SHA -git stash apply +git stash apply || true python tools/compare_import_time.py $HEAD_SHA $BASE_SHA From 11287e12570efeb86f4fb927d3170fa8e2cc59df Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 4 Dec 2024 09:18:13 -0600 Subject: [PATCH 1641/2111] PYTHON-5004 Fix handling of TEST_PATH (#2028) --- test/asynchronous/unified_format.py | 1 + test/unified_format.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index b18b09383e..52d964eb3e 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -442,6 +442,7 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any + TEST_PATH = "" # This gets filled in by generate_test_classes mongos_clients: list[AsyncMongoClient] = [] @staticmethod diff --git a/test/unified_format.py b/test/unified_format.py index 5cb268a29d..372eb8abba 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -441,6 +441,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any + TEST_PATH = "" # This gets filled in by generate_test_classes mongos_clients: list[MongoClient] = [] @staticmethod From dc34833d97d1a97ac38fc03117a84ad5f87220c6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 4 Dec 2024 09:26:29 -0600 Subject: [PATCH 1642/2111] PYTHON-5005 Skip more csot tests where applicable (#2029) --- test/asynchronous/test_cursor.py | 5 +++++ test/test_csot.py | 4 ++++ test/test_cursor.py | 5 +++++ 3 files changed, 14 insertions(+) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index d216479451..d843ffb4aa 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -18,6 +18,7 @@ import copy import gc import itertools +import os import random import re import sys @@ -1412,6 +1413,8 @@ async def test_to_list_length(self): self.assertEqual(len(docs), 2) async def test_to_list_csot_applied(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = await self.async_single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey @@ -1453,6 +1456,8 @@ async def test_command_cursor_to_list_length(self): @async_client_context.require_failCommand_blockConnection async def test_command_cursor_to_list_csot_applied(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = await self.async_single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey diff --git a/test/test_csot.py b/test/test_csot.py index 64210b4d64..c075a07d5a 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -39,6 +39,8 @@ class TestCSOT(IntegrationTest): RUN_ON_LOAD_BALANCER = True def test_timeout_nested(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") coll = self.db.coll self.assertEqual(_csot.get_timeout(), None) self.assertEqual(_csot.get_deadline(), float("inf")) @@ -76,6 +78,8 @@ def test_timeout_nested(self): @client_context.require_change_streams def test_change_stream_can_resume_after_timeouts(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") coll = self.db.test coll.insert_one({}) with coll.watch() as stream: diff --git a/test/test_cursor.py b/test/test_cursor.py index bcc7ed75f1..84e431f8cb 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -18,6 +18,7 @@ import copy import gc import itertools +import os import random import re import sys @@ -1403,6 +1404,8 @@ def test_to_list_length(self): self.assertEqual(len(docs), 2) def test_to_list_csot_applied(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = self.single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey @@ -1444,6 +1447,8 @@ def test_command_cursor_to_list_length(self): @client_context.require_failCommand_blockConnection def test_command_cursor_to_list_csot_applied(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = self.single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey From 5204e87ca235ffbc34446dd4f8ba1c6b8d571e17 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 4 Dec 2024 09:35:06 -0600 Subject: [PATCH 1643/2111] PYTHON-5002 Add guard to synchro hook to accidental overwrite (#2026) --- .pre-commit-config.yaml | 1 + CONTRIBUTING.md | 12 ++++++++++++ tools/synchro.py | 15 +++++++++++++++ tools/synchro.sh | 6 ++++-- 4 files changed, 32 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4f6759bc5a..a0b06ab0dc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,6 +24,7 @@ repos: entry: bash ./tools/synchro.sh language: python require_serial: true + fail_fast: true additional_dependencies: - ruff==0.1.3 - unasync diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7516fbc9ed..814e040048 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -246,6 +246,18 @@ you are attempting to validate new spec tests in PyMongo. Follow the [Python Driver Release Process Wiki](https://wiki.corp.mongodb.com/display/DRIVERS/Python+Driver+Release+Process). +## Asyncio considerations + +PyMongo adds asyncio capability by modifying the source files in `*/asynchronous` to `*/synchronous` using +[unasync](https://github.com/python-trio/unasync/) and some custom transforms. + +Where possible, edit the code in `*/asynchronous/*.py` and not the synchronous files. +You can run `pre-commit run --all-files synchro` before running tests if you are testing synchronous code. + +To prevent the `synchro` hook from accidentally overwriting code, it first checks to see whether a sync version +of a file is changing and not its async counterpart, and will fail. +In the unlikely scenario that you want to override this behavior, first export `OVERRIDE_SYNCHRO_CHECK=1`. + ## Converting a test to async The `tools/convert_test_to_async.py` script takes in an existing synchronous test file and outputs a partially-converted asynchronous version of the same name to the `test/asynchronous` directory. diff --git a/tools/synchro.py b/tools/synchro.py index 47617365f4..577e82d14e 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -19,7 +19,9 @@ from __future__ import annotations +import os import re +import sys from os import listdir from pathlib import Path @@ -356,6 +358,19 @@ def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[ def main() -> None: + modified_files = [f"./{f}" for f in sys.argv[1:]] + errored = False + for fname in async_files + gridfs_files: + # If the async file was modified, we don't need to check if the sync file was also modified. + if str(fname) in modified_files: + continue + sync_name = str(fname).replace("asynchronous", "synchronous") + if sync_name in modified_files and "OVERRIDE_SYNCHRO_CHECK" not in os.environ: + print(f"Refusing to overwrite {sync_name}") + errored = True + if errored: + raise ValueError("Aborting synchro due to errors") + unasync_directory(async_files, _pymongo_base, _pymongo_dest_base, replacements) unasync_directory(gridfs_files, _gridfs_base, _gridfs_dest_base, replacements) unasync_directory(test_files, _test_base, _test_dest_base, replacements) diff --git a/tools/synchro.sh b/tools/synchro.sh index 2887509fe9..51c51a9548 100755 --- a/tools/synchro.sh +++ b/tools/synchro.sh @@ -1,5 +1,7 @@ -#!/bin/bash -eu +#!/bin/bash -python ./tools/synchro.py +set -eu + +python ./tools/synchro.py "$@" python -m ruff check pymongo/synchronous/ gridfs/synchronous/ test/ --fix --silent python -m ruff format pymongo/synchronous/ gridfs/synchronous/ test/ --silent From 1b89da4829945359ae99ecfb579104d2d28f7b27 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 4 Dec 2024 10:53:27 -0600 Subject: [PATCH 1644/2111] PYTHON-5006 Skip test_kms_retry when using PyOpenSSL (#2030) --- test/asynchronous/test_encryption.py | 9 +++++++++ test/test_encryption.py | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 559b06ddf4..48f791ac16 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -42,6 +42,11 @@ from pymongo.asynchronous.helpers import anext from pymongo.daemon import _spawn_daemon +try: + from pymongo.pyopenssl_context import IS_PYOPENSSL +except ImportError: + IS_PYOPENSSL = False + sys.path[0:0] = [""] from test import ( @@ -2921,6 +2926,10 @@ async def _test(self, provider, master_key): await self.client_encryption.create_data_key(provider, master_key=master_key) async def test_kms_retry(self): + if IS_PYOPENSSL: + self.skipTest( + "PyOpenSSL does not support a required method for this test, Connection.makefile" + ) await self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) await self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) await self._test( diff --git a/test/test_encryption.py b/test/test_encryption.py index 7a9929b7fd..daa5fd5d4c 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -42,6 +42,11 @@ from pymongo.synchronous.collection import Collection from pymongo.synchronous.helpers import next +try: + from pymongo.pyopenssl_context import IS_PYOPENSSL +except ImportError: + IS_PYOPENSSL = False + sys.path[0:0] = [""] from test import ( @@ -2903,6 +2908,10 @@ def _test(self, provider, master_key): self.client_encryption.create_data_key(provider, master_key=master_key) def test_kms_retry(self): + if IS_PYOPENSSL: + self.skipTest( + "PyOpenSSL does not support a required method for this test, Connection.makefile" + ) self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) self._test( From 30e4cceb24f9c34f4bfb4ba3d00293f98b33b1f6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 6 Dec 2024 08:12:30 -0600 Subject: [PATCH 1645/2111] PYTHON-5008 Do not build c extensions on other hosts (#2031) --- .evergreen/generated_configs/variants.yml | 8 ++++++++ .evergreen/scripts/generate_config.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 226f4238f2..b17a500ade 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -16,6 +16,8 @@ buildvariants: run_on: - rhel92-fips batchtime: 10080 + expansions: + NO_EXT: "1" - name: other-hosts-rhel8-zseries tasks: - name: .6.0 .standalone !.sync_async @@ -23,6 +25,8 @@ buildvariants: run_on: - rhel8-zseries-small batchtime: 10080 + expansions: + NO_EXT: "1" - name: other-hosts-rhel8-power8 tasks: - name: .6.0 .standalone !.sync_async @@ -30,6 +34,8 @@ buildvariants: run_on: - rhel8-power-small batchtime: 10080 + expansions: + NO_EXT: "1" - name: other-hosts-rhel8-arm64 tasks: - name: .6.0 .standalone !.sync_async @@ -37,6 +43,8 @@ buildvariants: run_on: - rhel82-arm64-small batchtime: 10080 + expansions: + NO_EXT: "1" # Atlas connect tests - name: atlas-connect-rhel8-python3.9 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index c7f55fa946..1637ae9711 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -758,6 +758,8 @@ def create_alternative_hosts_variants(): ) ) + expansions = dict() + handle_c_ext(C_EXTS[0], expansions) for host_name in OTHER_HOSTS: host = HOSTS[host_name] variants.append( @@ -766,6 +768,7 @@ def create_alternative_hosts_variants(): display_name=get_display_name("Other hosts", host), batchtime=batchtime, host=host, + expansions=expansions, ) ) return variants From d2fe1ed1542632b0fd8d49b6525a469afb2b6146 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 15:07:34 -0600 Subject: [PATCH 1646/2111] Bump pyright from 1.1.389 to 1.1.390 (#2032) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 613eba7645..404fe00748 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.13.0 -pyright==1.1.389 +pyright==1.1.390 typing_extensions -r ./encryption.txt -r ./ocsp.txt From 2d21035396f63437176965cc8f157505189f2f08 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Fri, 20 Dec 2024 12:58:08 -0500 Subject: [PATCH 1647/2111] PYTHON-2187 Remove easy_install from documentation (#2033) --- doc/examples/authentication.rst | 2 +- doc/installation.rst | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index b319df814c..a92222bafc 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -139,7 +139,7 @@ Unix ~~~~ To authenticate using GSSAPI you must first install the python `kerberos`_ or -`pykerberos`_ module using easy_install or pip. Make sure you run kinit before +`pykerberos`_ module using pip. Make sure you run kinit before using the following authentication methods:: $ kinit mongodbuser@EXAMPLE.COM diff --git a/doc/installation.rst b/doc/installation.rst index dd8eb6ab42..f21a3792ad 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -6,7 +6,7 @@ Installing / Upgrading `_. .. warning:: **Do not install the "bson" package from pypi.** PyMongo comes - with its own bson package; doing "pip install bson" or "easy_install bson" + with its own bson package; doing "pip install bson" installs a third-party package that is incompatible with PyMongo. Installing with pip @@ -134,7 +134,7 @@ Python to fail to build the C extensions if you have Xcode 4 installed. There is a workaround:: # For some Python builds from python.org - $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m easy_install pymongo + $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m pip install pymongo See `http://bugs.python.org/issue11623 `_ for a more detailed explanation. @@ -152,15 +152,9 @@ This may cause C extension builds to fail with an error similar to:: There are workarounds:: # Apple specified workaround for Xcode 5.1 - # easy_install - $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future easy_install pymongo - # or pip $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install pymongo # Alternative workaround using CFLAGS - # easy_install - $ CFLAGS=-Qunused-arguments easy_install pymongo - # or pip $ CFLAGS=-Qunused-arguments pip install pymongo From bdaf43c53df07a97ad9bb7f8f091b66bc7b82b60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Dec 2024 08:18:52 -0600 Subject: [PATCH 1648/2111] Bump pyright from 1.1.390 to 1.1.391 (#2035) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 404fe00748..189f4f8719 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.13.0 -pyright==1.1.390 +pyright==1.1.391 typing_extensions -r ./encryption.txt -r ./ocsp.txt From b3ce9320f00b94047a0160f739e63b0a34427d8c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 31 Dec 2024 13:26:45 -0600 Subject: [PATCH 1649/2111] PYTHON-5016 Create spawn host helper scripts (#2036) --- .evergreen/install-dependencies.sh | 5 ++--- .evergreen/scripts/configure-env.sh | 10 ++++++---- .evergreen/scripts/prepare-resources.sh | 9 +++++++-- .evergreen/scripts/setup-system.sh | 14 ++++++++++++++ .evergreen/setup-spawn-host.sh | 16 ++++++++++++++++ .evergreen/sync-spawn-host.sh | 13 +++++++++++++ 6 files changed, 58 insertions(+), 9 deletions(-) create mode 100755 .evergreen/scripts/setup-system.sh create mode 100755 .evergreen/setup-spawn-host.sh create mode 100755 .evergreen/sync-spawn-host.sh diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh index 4c0541a4e2..d90ff4ab45 100755 --- a/.evergreen/install-dependencies.sh +++ b/.evergreen/install-dependencies.sh @@ -1,6 +1,5 @@ #!/bin/bash -set -o xtrace # Write all commands first to stderr -set -o errexit # Exit the script with error if any of the commands fail +set -eu # Copy PyMongo's test certificates over driver-evergreen-tools' cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ @@ -9,7 +8,7 @@ cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem # Ensure hatch is installed. -bash ${PROJECT_DIRECTORY}/scripts/ensure-hatch.sh +bash ${PROJECT_DIRECTORY}/.evergreen/scripts/ensure-hatch.sh if [ -w /etc/hosts ]; then SUDO="" diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 313f4c3c92..e0c845a333 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -1,8 +1,10 @@ -#!/bin/bash -eux +#!/bin/bash + +set -eu # Get the current unique version of this checkout # shellcheck disable=SC2154 -if [ "$is_patch" = "true" ]; then +if [ "${is_patch:-}" = "true" ]; then # shellcheck disable=SC2154 CURRENT_VERSION="$(git describe)-patch-$version_id" else @@ -14,7 +16,7 @@ DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory -if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin +if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) CARGO_HOME=$(cygpath -m $CARGO_HOME) @@ -59,7 +61,7 @@ export CARGO_HOME="$CARGO_HOME" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" export PATH="$MONGODB_BINARIES:$PATH" # shellcheck disable=SC2154 -export PROJECT="$project" +export PROJECT="${project:-mongo-python-driver}" export PIP_QUIET=1 EOT diff --git a/.evergreen/scripts/prepare-resources.sh b/.evergreen/scripts/prepare-resources.sh index 33394b55ff..3cfa2c4efd 100755 --- a/.evergreen/scripts/prepare-resources.sh +++ b/.evergreen/scripts/prepare-resources.sh @@ -1,7 +1,10 @@ #!/bin/bash +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd $HERE +. env.sh -. src/.evergreen/scripts/env.sh -set -o xtrace rm -rf $DRIVERS_TOOLS if [ "$PROJECT" = "drivers-tools" ]; then # If this was a patch build, doing a fresh clone would not actually test the patch @@ -10,3 +13,5 @@ else git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS fi echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" >$MONGO_ORCHESTRATION_HOME/orchestration.config + +popd diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh new file mode 100755 index 0000000000..d78d924f6b --- /dev/null +++ b/.evergreen/scripts/setup-system.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" +echo "Setting up system..." +bash .evergreen/scripts/configure-env.sh +source .evergreen/scripts/env.sh +bash .evergreen/scripts/prepare-resources.sh +bash $DRIVERS_TOOLS/.evergreen/setup.sh +bash .evergreen/scripts/install-dependencies.sh +popd +echo "Setting up system... done." diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh new file mode 100755 index 0000000000..4de2153d51 --- /dev/null +++ b/.evergreen/setup-spawn-host.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -eu + +if [ -z "$1" ] + then + echo "Must supply a spawn host URL!" +fi + +target=$1 + +echo "Copying files to $target..." +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver +echo "Copying files to $target... done" + +ssh $target /home/ec2-user/mongo-python-driver/.evergreen/scripts/setup-system.sh diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh new file mode 100755 index 0000000000..4c3e276d41 --- /dev/null +++ b/.evergreen/sync-spawn-host.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [ -z "$1" ] + then + echo "Must supply a spawn host URL!" +fi + +target=$1 + +echo "Syncing files to $target..." +# shellcheck disable=SC2034 +fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver; done +echo "Syncing files to $target... done." From 8d27699e758145655cdb5bfc52b03fb81f3b02bb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 31 Dec 2024 16:29:48 -0600 Subject: [PATCH 1650/2111] PYTHON-3096 Finish implementation and tests for GSSAPI options (#1985) --- .../scripts/run-enterprise-auth-tests.sh | 3 +- pymongo/asynchronous/auth.py | 14 ++-- pymongo/synchronous/auth.py | 14 ++-- test/asynchronous/test_auth.py | 66 +++++++++++++++++-- test/test_auth.py | 66 +++++++++++++++++-- 5 files changed, 144 insertions(+), 19 deletions(-) diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh index 11f8db22e1..7f936b1955 100755 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -1,7 +1,8 @@ #!/bin/bash +set -eu # Disable xtrace for security reasons (just in case it was accidentally set). set +x # Use the default python to bootstrap secrets. -PYTHON_BINARY="" bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/enterprise_auth +bash "${DRIVERS_TOOLS}"/.evergreen/secrets_handling/setup-secrets.sh drivers/enterprise_auth TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py index fc563ec48f..48ce4bbd39 100644 --- a/pymongo/asynchronous/auth.py +++ b/pymongo/asynchronous/auth.py @@ -177,13 +177,20 @@ def _auth_key(nonce: str, username: str, password: str) -> str: return md5hash.hexdigest() -def _canonicalize_hostname(hostname: str) -> str: +def _canonicalize_hostname(hostname: str, option: str | bool) -> str: """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + if option in [False, "none"]: + return hostname + af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME )[0] + # For forward just to resolve the cname as dns.lookup() will not return it. + if option == "forward": + return canonname.lower() + try: name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) except socket.gaierror: @@ -205,9 +212,8 @@ async def _authenticate_gssapi(credentials: MongoCredential, conn: AsyncConnecti props = credentials.mechanism_properties # Starting here and continuing through the while loop below - establish # the security context. See RFC 4752, Section 3.1, first paragraph. - host = conn.address[0] - if props.canonicalize_host_name: - host = _canonicalize_hostname(host) + host = props.service_host or conn.address[0] + host = _canonicalize_hostname(host, props.canonicalize_host_name) service = props.service_name + "@" + host if props.service_realm is not None: service = service + "@" + props.service_realm diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py index 7b370843c5..0e51ff8b7f 100644 --- a/pymongo/synchronous/auth.py +++ b/pymongo/synchronous/auth.py @@ -174,13 +174,20 @@ def _auth_key(nonce: str, username: str, password: str) -> str: return md5hash.hexdigest() -def _canonicalize_hostname(hostname: str) -> str: +def _canonicalize_hostname(hostname: str, option: str | bool) -> str: """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + if option in [False, "none"]: + return hostname + af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME )[0] + # For forward just to resolve the cname as dns.lookup() will not return it. + if option == "forward": + return canonname.lower() + try: name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) except socket.gaierror: @@ -202,9 +209,8 @@ def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None props = credentials.mechanism_properties # Starting here and continuing through the while loop below - establish # the security context. See RFC 4752, Section 3.1, first paragraph. - host = conn.address[0] - if props.canonicalize_host_name: - host = _canonicalize_hostname(host) + host = props.service_host or conn.address[0] + host = _canonicalize_hostname(host, props.canonicalize_host_name) service = props.service_name + "@" + host if props.service_realm is not None: service = service + "@" + props.service_realm diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index 4f26200fb0..08dc4d7247 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -35,7 +35,7 @@ import pytest from pymongo import AsyncMongoClient, monitoring -from pymongo.asynchronous.auth import HAVE_KERBEROS +from pymongo.asynchronous.auth import HAVE_KERBEROS, _canonicalize_hostname from pymongo.auth_shared import _build_credentials_tuple from pymongo.errors import OperationFailure from pymongo.hello import HelloCompat @@ -96,10 +96,11 @@ def setUpClass(cls): cls.service_realm_required = ( GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL ) - mech_properties = f"SERVICE_NAME:{GSSAPI_SERVICE_NAME}" - mech_properties += f",CANONICALIZE_HOST_NAME:{GSSAPI_CANONICALIZE}" + mech_properties = dict( + SERVICE_NAME=GSSAPI_SERVICE_NAME, CANONICALIZE_HOST_NAME=GSSAPI_CANONICALIZE + ) if GSSAPI_SERVICE_REALM is not None: - mech_properties += f",SERVICE_REALM:{GSSAPI_SERVICE_REALM}" + mech_properties["SERVICE_REALM"] = GSSAPI_SERVICE_REALM cls.mech_properties = mech_properties async def test_credentials_hashing(self): @@ -167,7 +168,10 @@ async def test_gssapi_simple(self): await client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" + mech_properties_str = "" + for key, value in self.mech_properties.items(): + mech_properties_str += f"{key}:{value}," + mech_uri = uri + f"&authMechanismProperties={mech_properties_str[:-1]}" client = self.simple_client(mech_uri) await client[GSSAPI_DB].collection.find_one() @@ -268,6 +272,58 @@ async def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) + async def test_gssapi_canonicalize_host_name(self): + # Test the low level method. + assert GSSAPI_HOST is not None + result = _canonicalize_hostname(GSSAPI_HOST, "forward") + if "compute-1.amazonaws.com" not in result: + self.assertEqual(result, GSSAPI_HOST) + result = _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + self.assertEqual(result, GSSAPI_HOST) + + # Use the equivalent named CANONICALIZE_HOST_NAME. + props = self.mech_properties.copy() + if props["CANONICALIZE_HOST_NAME"] == "true": + props["CANONICALIZE_HOST_NAME"] = "forwardAndReverse" + else: + props["CANONICALIZE_HOST_NAME"] = "none" + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=props, + ) + await client.server_info() + + async def test_gssapi_host_name(self): + props = self.mech_properties + props["SERVICE_HOST"] = "example.com" + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + with self.assertRaises(OperationFailure): + await client.server_info() + + props["SERVICE_HOST"] = GSSAPI_HOST + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + await client.server_info() + class TestSASLPlain(AsyncPyMongoTestCase): @classmethod diff --git a/test/test_auth.py b/test/test_auth.py index 70c061b747..345d16121b 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -40,7 +40,7 @@ from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP -from pymongo.synchronous.auth import HAVE_KERBEROS +from pymongo.synchronous.auth import HAVE_KERBEROS, _canonicalize_hostname _IS_SYNC = True @@ -96,10 +96,11 @@ def setUpClass(cls): cls.service_realm_required = ( GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL ) - mech_properties = f"SERVICE_NAME:{GSSAPI_SERVICE_NAME}" - mech_properties += f",CANONICALIZE_HOST_NAME:{GSSAPI_CANONICALIZE}" + mech_properties = dict( + SERVICE_NAME=GSSAPI_SERVICE_NAME, CANONICALIZE_HOST_NAME=GSSAPI_CANONICALIZE + ) if GSSAPI_SERVICE_REALM is not None: - mech_properties += f",SERVICE_REALM:{GSSAPI_SERVICE_REALM}" + mech_properties["SERVICE_REALM"] = GSSAPI_SERVICE_REALM cls.mech_properties = mech_properties def test_credentials_hashing(self): @@ -167,7 +168,10 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + f"&authMechanismProperties={self.mech_properties}" + mech_properties_str = "" + for key, value in self.mech_properties.items(): + mech_properties_str += f"{key}:{value}," + mech_uri = uri + f"&authMechanismProperties={mech_properties_str[:-1]}" client = self.simple_client(mech_uri) client[GSSAPI_DB].collection.find_one() @@ -268,6 +272,58 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) + def test_gssapi_canonicalize_host_name(self): + # Test the low level method. + assert GSSAPI_HOST is not None + result = _canonicalize_hostname(GSSAPI_HOST, "forward") + if "compute-1.amazonaws.com" not in result: + self.assertEqual(result, GSSAPI_HOST) + result = _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + self.assertEqual(result, GSSAPI_HOST) + + # Use the equivalent named CANONICALIZE_HOST_NAME. + props = self.mech_properties.copy() + if props["CANONICALIZE_HOST_NAME"] == "true": + props["CANONICALIZE_HOST_NAME"] = "forwardAndReverse" + else: + props["CANONICALIZE_HOST_NAME"] = "none" + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=props, + ) + client.server_info() + + def test_gssapi_host_name(self): + props = self.mech_properties + props["SERVICE_HOST"] = "example.com" + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + with self.assertRaises(OperationFailure): + client.server_info() + + props["SERVICE_HOST"] = GSSAPI_HOST + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + client.server_info() + class TestSASLPlain(PyMongoTestCase): @classmethod From 71ef4e0c35417aeaa1dd794711ec11ec0b2f76e0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 2 Jan 2025 10:12:53 -0600 Subject: [PATCH 1651/2111] PYTHON-5018 Use a single script for system setup (#2041) --- .evergreen/config.yml | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 5c0e2983ea..aa0eee3620 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -46,24 +46,11 @@ functions: binary: bash working_dir: "src" args: - - .evergreen/scripts/configure-env.sh + - .evergreen/scripts/setup-system.sh # Load the expansion file to make an evergreen variable with the current unique version - command: expansions.update params: file: src/expansion.yml - - command: subprocess.exec - params: - include_expansions_in_env: ["PROJECT_DIRECTORY", "DRIVERS_TOOLS"] - binary: bash - args: - - src/.evergreen/scripts/prepare-resources.sh - # Run drivers-evergreen-tools system setup - - command: subprocess.exec - params: - include_expansions_in_env: ["PROJECT_DIRECTORY", "DRIVERS_TOOLS"] - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/setup.sh "upload coverage" : - command: ec2.assume_role @@ -546,15 +533,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/teardown.sh - "install dependencies": - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/install-dependencies.sh - "assume ec2 role": - command: ec2.assume_role params: @@ -650,7 +628,6 @@ functions: pre: - func: "fetch source" - func: "setup system" - - func: "install dependencies" - func: "assume ec2 role" post: @@ -958,7 +935,6 @@ tasks: - name: "test-aws-lambda-deployed" commands: - - func: "install dependencies" - command: ec2.assume_role params: role_arn: ${LAMBDA_AWS_ROLE_ARN} @@ -1547,8 +1523,6 @@ tasks: - name: testazurekms-fail-task commands: - - func: fetch source - - func: setup system - func: "bootstrap mongo-orchestration" vars: VERSION: "latest" From d2d8f6e29be2d6d2a23f61d2c1af6164b0c9e958 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 2 Jan 2025 10:54:04 -0600 Subject: [PATCH 1652/2111] PYTHON-5019 Fix mod_wsgi tests (#2039) --- .evergreen/install-dependencies.sh | 3 --- .evergreen/scripts/ensure-hatch.sh | 4 ++++ .evergreen/setup-spawn-host.sh | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh index d90ff4ab45..e6dceb33fc 100755 --- a/.evergreen/install-dependencies.sh +++ b/.evergreen/install-dependencies.sh @@ -7,9 +7,6 @@ cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ # Replace MongoOrchestration's client certificate. cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem -# Ensure hatch is installed. -bash ${PROJECT_DIRECTORY}/.evergreen/scripts/ensure-hatch.sh - if [ -w /etc/hosts ]; then SUDO="" else diff --git a/.evergreen/scripts/ensure-hatch.sh b/.evergreen/scripts/ensure-hatch.sh index a57b705127..e63d98bb6d 100755 --- a/.evergreen/scripts/ensure-hatch.sh +++ b/.evergreen/scripts/ensure-hatch.sh @@ -2,6 +2,9 @@ set -eu +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" > /dev/null + # Ensure hatch is available. if [ ! -x "$(command -v hatch)" ]; then # Install a virtual env with "hatch" @@ -53,3 +56,4 @@ if [ ! -x "$(command -v hatch)" ]; then echo "Installing hatch... done." fi hatch --version +popd > /dev/null diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh index 4de2153d51..1a526c762c 100755 --- a/.evergreen/setup-spawn-host.sh +++ b/.evergreen/setup-spawn-host.sh @@ -8,9 +8,11 @@ if [ -z "$1" ] fi target=$1 +remote_dir=/home/ec2-user/mongo-python-driver echo "Copying files to $target..." -rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir echo "Copying files to $target... done" -ssh $target /home/ec2-user/mongo-python-driver/.evergreen/scripts/setup-system.sh +ssh $target $remote_dir/.evergreen/scripts/setup-system.sh +ssh $target "PYTHON_BINARY=${PYTHON_BINARY:-} $remote_dir/.evergreen/scripts/ensure-hatch.sh" From 1b3f04c59996379dbaca8ace17e1b49e88432ce3 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 2 Jan 2025 10:26:33 -0800 Subject: [PATCH 1653/2111] PYTHON-5020 Fix behavior of network timeouts on pyopenssl connections (#2037) --- pymongo/pyopenssl_context.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 50d8680a74..a320e94929 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -125,7 +125,8 @@ def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: try: return call(*args, **kwargs) except BLOCKING_IO_ERRORS as exc: - if is_async: + # Do not retry if the connection is in non-blocking mode. + if is_async or timeout == 0: raise exc # Check for closed socket. if self.fileno() == -1: From 163514bce1c71c4f0d785575d9da5a8b4dea9141 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 2 Jan 2025 12:29:20 -0600 Subject: [PATCH 1654/2111] PYTHON-5016 Fix initial sync in spawn host script (#2038) --- .evergreen/sync-spawn-host.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh index 4c3e276d41..3d6c968901 100755 --- a/.evergreen/sync-spawn-host.sh +++ b/.evergreen/sync-spawn-host.sh @@ -8,6 +8,7 @@ fi target=$1 echo "Syncing files to $target..." +rsync -haz -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver # shellcheck disable=SC2034 fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver; done echo "Syncing files to $target... done." From bf415371bbe7eef157ab16662c5edf8fb42d0431 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 3 Jan 2025 06:19:18 -0600 Subject: [PATCH 1655/2111] PYTHON-4754 Add gevent test for Python 3.13 (#2044) --- .evergreen/generated_configs/variants.yml | 12 ++++++------ .evergreen/scripts/generate_config.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index b17a500ade..b77859bc91 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -650,28 +650,28 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: green-eventlet-rhel8-python3.12 + - name: green-eventlet-rhel8-python3.13 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Green Eventlet RHEL8 Python3.12 + display_name: Green Eventlet RHEL8 Python3.13 run_on: - rhel87-small expansions: GREEN_FRAMEWORK: eventlet AUTH: auth SSL: ssl - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: green-gevent-rhel8-python3.12 + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: green-gevent-rhel8-python3.13 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Green Gevent RHEL8 Python3.12 + display_name: Green Gevent RHEL8 Python3.13 run_on: - rhel87-small expansions: GREEN_FRAMEWORK: gevent AUTH: auth SSL: ssl - PYTHON_BINARY: /opt/python/3.12/bin/python3 + PYTHON_BINARY: /opt/python/3.13/bin/python3 # Load balancer tests - name: load-balancer-rhel8-v6.0-python3.9 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 1637ae9711..8a9ba8a206 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -554,7 +554,7 @@ def create_green_framework_variants(): variants = [] tasks = [".standalone .noauth .nossl .sync_async"] host = DEFAULT_HOST - for python, framework in product([CPYTHONS[0], CPYTHONS[-2]], ["eventlet", "gevent"]): + for python, framework in product([CPYTHONS[0], CPYTHONS[-1]], ["eventlet", "gevent"]): expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") display_name = get_display_name(f"Green {framework.capitalize()}", host, python=python) variant = create_variant( From 27039c30bf6bd3e8167d084597cde28eb82a526f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 3 Jan 2025 14:15:49 -0600 Subject: [PATCH 1656/2111] PYTHON-5003 Update pymongo's pymongocrypt version to >=1.12 (#2043) --- doc/changelog.rst | 1 + requirements/encryption.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index d9e6cc3f5b..d185e3c728 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,6 +13,7 @@ PyMongo 4.11 brings a number of changes including: - Dropped support for Python 3.8. - Dropped support for MongoDB 3.6. - Dropped support for the MONGODB-CR authenticate mechanism, which is no longer supported by MongoDB 4.0+. +- pymongocrypt>=1.12 is now required for :ref:`In-Use Encryption` support. - Added support for free-threaded Python with the GIL disabled. For more information see: `Free-threaded CPython `_. - :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and diff --git a/requirements/encryption.txt b/requirements/encryption.txt index 1a8c14844c..5962f5028f 100644 --- a/requirements/encryption.txt +++ b/requirements/encryption.txt @@ -1,3 +1,3 @@ pymongo-auth-aws>=1.1.0,<2.0.0 -pymongocrypt>=1.10.0,<2.0.0 +pymongocrypt>=1.12.0,<2.0.0 certifi;os.name=='nt' or sys_platform=='darwin' From a1e681142b630f2cac295440b53b2b774ba0e5dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 06:57:50 -0600 Subject: [PATCH 1657/2111] Bump supercharge/mongodb-github-action from 1.11.0 to 1.12.0 in the actions group (#2046) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test-python.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 12cfaa4b27..2310b7698d 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -87,7 +87,7 @@ jobs: pip install hatch fi - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.11.0 + uses: supercharge/mongodb-github-action@1.12.0 with: mongodb-version: 6.0 - name: Run tests @@ -115,7 +115,7 @@ jobs: run: | pip install -U hatch pip - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.11.0 + uses: supercharge/mongodb-github-action@1.12.0 with: mongodb-version: '8.0.0-rc4' - name: Run tests @@ -230,7 +230,7 @@ jobs: # Test sdist on lowest supported Python python-version: '3.9' - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.11.0 + uses: supercharge/mongodb-github-action@1.12.0 - name: Run connect test from sdist shell: bash run: | From a0de09efc6f7addaa68c5c2c1fc721690622db16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 06:58:50 -0600 Subject: [PATCH 1658/2111] Bump mypy from 1.13.0 to 1.14.1 (#2045) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 189f4f8719..5a2f76f6bc 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,4 +1,4 @@ -mypy==1.13.0 +mypy==1.14.1 pyright==1.1.391 typing_extensions -r ./encryption.txt From fd5a10599b69338b6f5bf66542e06a550904e27d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 6 Jan 2025 09:55:04 -0600 Subject: [PATCH 1659/2111] PYTHON-5016 Update scripts to handle Windows spawn hosts (#2047) --- .evergreen/setup-spawn-host.sh | 3 ++- .evergreen/sync-spawn-host.sh | 5 +++-- _setup.py | 5 +++++ pyproject.toml | 1 + 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh index 1a526c762c..4109e59183 100755 --- a/.evergreen/setup-spawn-host.sh +++ b/.evergreen/setup-spawn-host.sh @@ -8,7 +8,8 @@ if [ -z "$1" ] fi target=$1 -remote_dir=/home/ec2-user/mongo-python-driver +user=${target%@*} +remote_dir=/home/$user/mongo-python-driver echo "Copying files to $target..." rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh index 3d6c968901..de3374a008 100755 --- a/.evergreen/sync-spawn-host.sh +++ b/.evergreen/sync-spawn-host.sh @@ -6,9 +6,10 @@ if [ -z "$1" ] fi target=$1 +user=${target%@*} echo "Syncing files to $target..." -rsync -haz -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver +rsync -haz -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/$user/mongo-python-driver # shellcheck disable=SC2034 -fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/ec2-user/mongo-python-driver; done +fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/$user/mongo-python-driver; done echo "Syncing files to $target... done." diff --git a/_setup.py b/_setup.py index 65ae1908fe..1a8b9e0246 100644 --- a/_setup.py +++ b/_setup.py @@ -82,6 +82,11 @@ def run(self): ) def build_extension(self, ext): + # "ProgramFiles(x86)" is not a valid environment variable in Cygwin but is needed for + # the MSVCCompiler in distutils. + if os.name == "nt": + if "ProgramFiles" in os.environ and "ProgramFiles(x86)" not in os.environ: + os.environ["ProgramFiles(x86)"] = os.environ["ProgramFiles"] + " (x86)" name = ext.name try: build_ext.build_extension(self, ext) diff --git a/pyproject.toml b/pyproject.toml index 9a29a777fc..a9977a382c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -212,6 +212,7 @@ dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?)|dummy.*)$" "tools/*.py" = ["T201"] "green_framework_test.py" = ["T201"] "hatch_build.py" = ["S"] +"_setup.py" = ["SIM112"] [tool.coverage.run] branch = true From da8c7aa4e057b95a46e1f9c8bedc53872db4dfd4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 6 Jan 2025 14:25:36 -0600 Subject: [PATCH 1660/2111] PYTHON-5017 Use a separate PyPI publish step (#2042) --- .github/workflows/release-python.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index cee222d109..78f7ba1faf 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -67,6 +67,23 @@ jobs: publish: needs: [build-dist, static-scan] + name: Upload release to PyPI + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + steps: + - name: Download all the dists + uses: actions/download-artifact@v4 + with: + name: all-dist-${{ github.run_id }} + path: dist/ + - name: Publish package distributions to PyPI + if: startsWith(inputs.dry_run, 'false') + uses: pypa/gh-action-pypi-publish@release/v1 + + post-publish: + needs: [publish] runs-on: ubuntu-latest environment: release permissions: From c40283ed0926cd5a5191fab36bcb8cbf4d80cf5d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jan 2025 12:48:28 -0600 Subject: [PATCH 1661/2111] PYTHON-4840 Add evergreen tests for free-threaded Python 3.13t (#2048) --- .evergreen/config.yml | 19 ++++++++++++++++- .evergreen/generated_configs/variants.yml | 26 +++++++++++++++++++++++ .evergreen/scripts/generate_config.py | 19 +++++++++++++++++ .evergreen/scripts/run-direct-tests.sh | 10 +++++++++ doc/changelog.rst | 3 +++ 5 files changed, 76 insertions(+), 1 deletion(-) create mode 100755 .evergreen/scripts/run-direct-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index aa0eee3620..f854f6bd3d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -116,7 +116,6 @@ functions: content_type: text/html display_name: "Coverage Report HTML" - "upload mo artifacts": - command: ec2.assume_role params: @@ -300,6 +299,15 @@ functions: - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/run-tests.sh + "run direct tests": + - command: subprocess.exec + type: test + params: + working_dir: "src" + binary: bash + include_expansions_in_env: ["PYTHON_BINARY"] + args: [ .evergreen/scripts/run-direct-tests.sh ] + "run enterprise auth tests": - command: subprocess.exec type: test @@ -920,6 +928,15 @@ tasks: commands: - func: "run tests" + - name: "free-threading" + tags: ["free-threading"] + commands: + - func: "bootstrap mongo-orchestration" + vars: + VERSION: "8.0" + TOPOLOGY: "replica_set" + - func: "run direct tests" + - name: "atlas-connect" tags: ["atlas-connect"] commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index b77859bc91..b1db61d492 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -627,6 +627,32 @@ buildvariants: AUTH: auth PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + # Free threaded tests + - name: free-threaded-rhel8-python3.13t + tasks: + - name: .free-threading + display_name: Free-threaded RHEL8 Python3.13t + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.13t/bin/python3 + - name: free-threaded-macos-python3.13t + tasks: + - name: .free-threading + display_name: Free-threaded macOS Python3.13t + run_on: + - macos-14 + expansions: + PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t + - name: free-threaded-macos-arm64-python3.13t + tasks: + - name: .free-threading + display_name: Free-threaded macOS Arm64 Python3.13t + run_on: + - macos-14-arm64 + expansions: + PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t + # Green framework tests - name: green-eventlet-rhel8-python3.9 tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 8a9ba8a206..e8d0b171bd 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -146,12 +146,16 @@ def get_python_binary(python: str, host: Host) -> str: else: base = "C:/python" python = python.replace(".", "") + if python == "313t": + return f"{base}/Python313/python3.13t.exe" return f"{base}/Python{python}/python.exe" if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: return f"/opt/python/{python}/bin/python3" if name in ["macos", "macos-arm64"]: + if python == "3.13t": + return "/Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t" return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" raise ValueError(f"no match found for python {python} on {name}") @@ -318,6 +322,21 @@ def create_server_variants() -> list[BuildVariant]: return variants +def create_free_threaded_variants() -> list[BuildVariant]: + variants = [] + for host_name in ("rhel8", "macos", "macos-arm64", "win64"): + if host_name == "win64": + # TODO: PYTHON-5027 + continue + tasks = [".free-threading"] + host = HOSTS[host_name] + python = "3.13t" + display_name = get_display_name("Free-threaded", host, python=python) + variant = create_variant(tasks, display_name, python=python, host=host) + variants.append(variant) + return variants + + def create_encryption_variants() -> list[BuildVariant]: variants = [] tags = ["encryption_tag"] diff --git a/.evergreen/scripts/run-direct-tests.sh b/.evergreen/scripts/run-direct-tests.sh new file mode 100755 index 0000000000..a00235311c --- /dev/null +++ b/.evergreen/scripts/run-direct-tests.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -x +. .evergreen/utils.sh + +. .evergreen/scripts/env.sh +createvirtualenv "$PYTHON_BINARY" .venv + +export PYMONGO_C_EXT_MUST_BUILD=1 +pip install -e ".[test]" +pytest -v diff --git a/doc/changelog.rst b/doc/changelog.rst index d185e3c728..22c98f566c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -16,6 +16,7 @@ PyMongo 4.11 brings a number of changes including: - pymongocrypt>=1.12 is now required for :ref:`In-Use Encryption` support. - Added support for free-threaded Python with the GIL disabled. For more information see: `Free-threaded CPython `_. + We do not yet support free-threaded Python on Windows (`PYTHON-5027`_) or with In-Use Encryption (`PYTHON-5024`_). - :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and :attr:`~pymongo.mongo_client.MongoClient.address` now correctly block when called on unconnected clients until either connection succeeds or a server selection timeout error is raised. @@ -42,6 +43,8 @@ See the `PyMongo 4.11 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.11 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40784 +.. _PYTHON-5027: https://jira.mongodb.org/browse/PYTHON-5027 +.. _PYTHON-5024: https://jira.mongodb.org/browse/PYTHON-5024 Changes in Version 4.10.1 (2024/10/01) -------------------------------------- From 1f22139323c5ad863aeffbd25f112b0dd2940786 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Jan 2025 12:49:52 -0600 Subject: [PATCH 1662/2111] PYTHON-4949 Communicate future minWireVersion bump / 4.0 EoL (#2050) --- doc/changelog.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 22c98f566c..fba6713bd9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -7,6 +7,10 @@ Changes in Version 4.11.0 (YYYY/MM/DD) .. warning:: PyMongo 4.11 drops support for Python 3.8: Python 3.9+ or PyPy 3.9+ is now required. .. warning:: PyMongo 4.11 drops support for MongoDB 3.6. PyMongo now supports MongoDB 4.0+. Driver support for MongoDB 3.6 reached end of life in April 2024. +.. warning:: Driver support for MongoDB 4.0 reaches end of life in April 2025. + A future minor release of PyMongo will raise the minimum supported MongoDB Server version from 4.0 to 4.2. + This is in accordance with [MongoDB Software Lifecycle Schedules](https://www.mongodb.com/legal/support-policy/lifecycles). + **Support for MongoDB Server 4.0 will be dropped in a future release!** PyMongo 4.11 brings a number of changes including: From 42df09c4b4cfc63fa02e4fbc5c1930c29aedeec7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 8 Jan 2025 17:07:18 -0600 Subject: [PATCH 1663/2111] PYTHON-5017 Fix post-publish step (#2051) --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 78f7ba1faf..ee4ea32f82 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -102,7 +102,7 @@ jobs: aws_region_name: ${{ vars.AWS_REGION_NAME }} aws_secret_id: ${{ secrets.AWS_SECRET_ID }} artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} - - uses: mongodb-labs/drivers-github-tools/python/publish@v2 + - uses: mongodb-labs/drivers-github-tools/python/post-publish@v2 with: version: ${{ inputs.version }} following_version: ${{ inputs.following_version }} From 53943ac5391d5a9fcd2283dd7a6d061b0537ef99 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 9 Jan 2025 13:36:56 -0800 Subject: [PATCH 1664/2111] PYTHON-5011 Fix behavior of TLS connection errors on PyPy (#2052) --- pymongo/network_layer.py | 64 ++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index beffba6d18..4d21300bc6 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -28,7 +28,7 @@ Union, ) -from pymongo import ssl_support +from pymongo import _csot, ssl_support from pymongo._asyncio_task import create_task from pymongo.errors import _OperationCancelled from pymongo.socket_checker import _errno_from_exception @@ -316,6 +316,42 @@ async def _async_receive(conn: socket.socket, length: int, loop: AbstractEventLo return mv +_PYPY = "PyPy" in sys.version + + +def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: + """Block until at least one byte is read, or a timeout, or a cancel.""" + sock = conn.conn + timed_out = False + # Check if the connection's socket has been manually closed + if sock.fileno() == -1: + return + while True: + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) + else: + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") + + def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: buf = bytearray(length) mv = memoryview(buf) @@ -324,18 +360,25 @@ def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> me # check for the cancellation signal after each timeout. Alternatively we # could close the socket but that does not reliably cancel recv() calls # on all OSes. + # When the timeout has expired we perform one final non-blocking recv. + # This helps avoid spurious timeouts when the response is actually already + # buffered on the client. orig_timeout = conn.conn.gettimeout() try: while bytes_read < length: - if deadline is not None: - # CSOT: Update timeout. When the timeout has expired perform one - # final non-blocking recv. This helps avoid spurious timeouts when - # the response is actually already buffered on the client. - short_timeout = min(max(deadline - time.monotonic(), 0), _POLL_TIMEOUT) - else: - short_timeout = _POLL_TIMEOUT - conn.set_conn_timeout(short_timeout) try: + # Use the legacy wait_for_read cancellation approach on PyPy due to PYTHON-5011. + if _PYPY: + wait_for_read(conn, deadline) + if _csot.get_timeout() and deadline is not None: + conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) + else: + if deadline is not None: + short_timeout = min(max(deadline - time.monotonic(), 0), _POLL_TIMEOUT) + else: + short_timeout = _POLL_TIMEOUT + conn.set_conn_timeout(short_timeout) + chunk_length = conn.conn.recv_into(mv[bytes_read:]) except BLOCKING_IO_ERRORS: if conn.cancel_context.cancelled: @@ -345,6 +388,9 @@ def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> me except socket.timeout: if conn.cancel_context.cancelled: raise _OperationCancelled("operation cancelled") from None + if _PYPY: + # We reached the true deadline. + raise continue except OSError as exc: if conn.cancel_context.cancelled: From 6c9a20a49d37bff2f2163011d1ccd686f83555a6 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 10 Jan 2025 09:32:07 -0800 Subject: [PATCH 1665/2111] PYTHON-5014 Tests that use HTTPSConnection should only use stdlib ssl (#2053) --- test/asynchronous/test_encryption.py | 12 ++---------- test/test_encryption.py | 12 ++---------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 48f791ac16..10c4c8a564 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -98,7 +98,6 @@ WriteError, ) from pymongo.operations import InsertOne, ReplaceOne, UpdateOne -from pymongo.ssl_support import get_ssl_context from pymongo.write_concern import WriteConcern _IS_SYNC = False @@ -2879,15 +2878,8 @@ async def asyncSetUp(self): async def http_post(self, path, data=None): # Note, the connection to the mock server needs to be closed after # each request because the server is single threaded. - ctx: ssl.SSLContext = get_ssl_context( - CLIENT_PEM, # certfile - None, # passphrase - CA_PEM, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False, # disable_ocsp_endpoint_check - ) + ctx = ssl.create_default_context(cafile=CA_PEM) + ctx.load_cert_chain(CLIENT_PEM) conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) try: if data is not None: diff --git a/test/test_encryption.py b/test/test_encryption.py index daa5fd5d4c..7b5aa776e6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -95,7 +95,6 @@ WriteError, ) from pymongo.operations import InsertOne, ReplaceOne, UpdateOne -from pymongo.ssl_support import get_ssl_context from pymongo.synchronous import encryption from pymongo.synchronous.encryption import Algorithm, ClientEncryption, QueryType from pymongo.synchronous.mongo_client import MongoClient @@ -2861,15 +2860,8 @@ def setUp(self): def http_post(self, path, data=None): # Note, the connection to the mock server needs to be closed after # each request because the server is single threaded. - ctx: ssl.SSLContext = get_ssl_context( - CLIENT_PEM, # certfile - None, # passphrase - CA_PEM, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False, # disable_ocsp_endpoint_check - ) + ctx = ssl.create_default_context(cafile=CA_PEM) + ctx.load_cert_chain(CLIENT_PEM) conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) try: if data is not None: From 493fc2ab3e237c2155fde4400002ed2aafe9b2be Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 10 Jan 2025 13:05:57 -0600 Subject: [PATCH 1666/2111] PYTHON-5014 Fix handling of async socket errors in kms request (#2054) --- pymongo/asynchronous/encryption.py | 9 ++++++++- pymongo/synchronous/encryption.py | 9 ++++++++- test/asynchronous/test_encryption.py | 12 ++++++++---- test/test_encryption.py | 12 ++++++++---- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 1cf165e6a2..98ab68527c 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -219,7 +219,14 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: # Wrap I/O errors in PyMongo exceptions. if isinstance(exc, BLOCKING_IO_ERRORS): exc = socket.timeout("timed out") - _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + # Async raises an OSError instead of returning empty bytes. + if isinstance(exc, OSError): + msg_prefix = "KMS connection closed" + else: + msg_prefix = None + _raise_connection_failure( + address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) + ) finally: conn.close() except MongoCryptError: diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index ef49855059..d41169861f 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -219,7 +219,14 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: # Wrap I/O errors in PyMongo exceptions. if isinstance(exc, BLOCKING_IO_ERRORS): exc = socket.timeout("timed out") - _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + # Async raises an OSError instead of returning empty bytes. + if isinstance(exc, OSError): + msg_prefix = "KMS connection closed" + else: + msg_prefix = None + _raise_connection_failure( + address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) + ) finally: conn.close() except MongoCryptError: diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 10c4c8a564..ef53d8ccd5 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -2162,7 +2162,8 @@ async def test_01_aws(self): # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) key["endpoint"] = "127.0.0.1:9001" with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): await self.client_encryption_invalid_hostname.create_data_key("aws", key) @@ -2179,7 +2180,8 @@ async def test_02_azure(self): await self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): await self.client_encryption_invalid_hostname.create_data_key("azure", key) @@ -2196,7 +2198,8 @@ async def test_03_gcp(self): await self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): await self.client_encryption_invalid_hostname.create_data_key("gcp", key) @@ -2210,7 +2213,8 @@ async def test_04_kmip(self): await self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): await self.client_encryption_invalid_hostname.create_data_key("kmip") diff --git a/test/test_encryption.py b/test/test_encryption.py index 7b5aa776e6..726463c41e 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2154,7 +2154,8 @@ def test_01_aws(self): # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) key["endpoint"] = "127.0.0.1:9001" with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): self.client_encryption_invalid_hostname.create_data_key("aws", key) @@ -2171,7 +2172,8 @@ def test_02_azure(self): self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): self.client_encryption_invalid_hostname.create_data_key("azure", key) @@ -2188,7 +2190,8 @@ def test_03_gcp(self): self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): self.client_encryption_invalid_hostname.create_data_key("gcp", key) @@ -2202,7 +2205,8 @@ def test_04_kmip(self): self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): self.client_encryption_invalid_hostname.create_data_key("kmip") From b9f4f796f1af1787b3fcf6d918abdd603d6d41e9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 13 Jan 2025 09:16:28 -0500 Subject: [PATCH 1667/2111] Revert "PYTHON-4915 - Add guidance on adding _id fields to documents to CRUD spec, reorder client.bulk_write generated _id fields" (#2055) Co-authored-by: Steven Silvester --- pymongo/message.py | 13 +-- test/asynchronous/test_client_bulk_write.py | 14 --- test/mockupdb/test_id_ordering.py | 94 --------------------- test/test_client_bulk_write.py | 14 --- 4 files changed, 1 insertion(+), 134 deletions(-) delete mode 100644 test/mockupdb/test_id_ordering.py diff --git a/pymongo/message.py b/pymongo/message.py index b6c00f06cb..10c9edb5cd 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -24,7 +24,6 @@ import datetime import random import struct -from collections import ChainMap from io import BytesIO as _BytesIO from typing import ( TYPE_CHECKING, @@ -1116,18 +1115,8 @@ def _check_doc_size_limits( # key and the index of its namespace within ns_info as its value. op_doc[op_type] = ns_info[namespace] # type: ignore[index] - # Since the data document itself is nested within the insert document - # it won't be automatically re-ordered by the BSON conversion. - # We use ChainMap here to make the _id field the first field instead. - doc_to_encode = op_doc - if real_op_type == "insert": - doc = op_doc["document"] - if not isinstance(doc, RawBSONDocument): - doc_to_encode = op_doc.copy() # type: ignore[attr-defined] # Shallow copy - doc_to_encode["document"] = ChainMap(doc, {"_id": doc["_id"]}) # type: ignore[index] - # Encode current operation doc and, if newly added, namespace doc. - op_doc_encoded = _dict_to_bson(doc_to_encode, False, opts) + op_doc_encoded = _dict_to_bson(op_doc, False, opts) op_length = len(op_doc_encoded) if ns_doc: ns_doc_encoded = _dict_to_bson(ns_doc, False, opts) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index a82629f495..282009f554 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -18,9 +18,6 @@ import os import sys -from bson import encode -from bson.raw_bson import RawBSONDocument - sys.path[0:0] = [""] from test.asynchronous import ( @@ -87,17 +84,6 @@ async def test_formats_write_error_correctly(self): self.assertEqual(write_error["idx"], 1) self.assertEqual(write_error["op"], {"insert": 0, "document": {"_id": 1}}) - @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless - async def test_raw_bson_not_inflated(self): - doc = RawBSONDocument(encode({"a": "b" * 100})) - models = [ - InsertOne(namespace="db.coll", document=doc), - ] - await self.client.bulk_write(models=models) - - self.assertIsNone(doc._RawBSONDocument__inflated_doc) - # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(AsyncIntegrationTest): diff --git a/test/mockupdb/test_id_ordering.py b/test/mockupdb/test_id_ordering.py deleted file mode 100644 index 7e2c91d592..0000000000 --- a/test/mockupdb/test_id_ordering.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2024-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from test import PyMongoTestCase - -import pytest - -from pymongo import InsertOne - -try: - from mockupdb import MockupDB, OpMsg, go, going - - _HAVE_MOCKUPDB = True -except ImportError: - _HAVE_MOCKUPDB = False - - -from bson.objectid import ObjectId - -pytestmark = pytest.mark.mockupdb - - -# https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#16-generated-document-identifiers-are-the-first-field-in-their-document -class TestIdOrdering(PyMongoTestCase): - def test_16_generated_document_ids_are_first_field(self): - server = MockupDB() - server.autoresponds( - "hello", - isWritablePrimary=True, - msg="isdbgrid", - minWireVersion=0, - maxWireVersion=25, - helloOk=True, - serviceId=ObjectId(), - ) - server.run() - self.addCleanup(server.stop) - - # We also verify that the original document contains an _id field after each insert - document = {"x": 1} - - client = self.simple_client(server.uri, loadBalanced=True) - collection = client.db.coll - with going(collection.insert_one, document): - request = server.receives() - self.assertEqual("_id", next(iter(request["documents"][0]))) - request.reply({"ok": 1}) - self.assertIn("_id", document) - - document = {"x1": 1} - - with going(collection.bulk_write, [InsertOne(document)]): - request = server.receives() - self.assertEqual("_id", next(iter(request["documents"][0]))) - request.reply({"ok": 1}) - self.assertIn("_id", document) - - document = {"x2": 1} - with going(client.bulk_write, [InsertOne(namespace="db.coll", document=document)]): - request = server.receives() - self.assertEqual("_id", next(iter(request["ops"][0]["document"]))) - request.reply({"ok": 1}) - self.assertIn("_id", document) - - # Re-ordering user-supplied _id fields is not required by the spec, but PyMongo does it for performance reasons - with going(collection.insert_one, {"x": 1, "_id": 111}): - request = server.receives() - self.assertEqual("_id", next(iter(request["documents"][0]))) - request.reply({"ok": 1}) - - with going(collection.bulk_write, [InsertOne({"x1": 1, "_id": 1111})]): - request = server.receives() - self.assertEqual("_id", next(iter(request["documents"][0]))) - request.reply({"ok": 1}) - - with going( - client.bulk_write, [InsertOne(namespace="db.coll", document={"x2": 1, "_id": 11111})] - ): - request = server.receives() - self.assertEqual("_id", next(iter(request["ops"][0]["document"]))) - request.reply({"ok": 1}) diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index c1cc27c28a..f8d92668ea 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -18,9 +18,6 @@ import os import sys -from bson import encode -from bson.raw_bson import RawBSONDocument - sys.path[0:0] = [""] from test import ( @@ -87,17 +84,6 @@ def test_formats_write_error_correctly(self): self.assertEqual(write_error["idx"], 1) self.assertEqual(write_error["op"], {"insert": 0, "document": {"_id": 1}}) - @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless - def test_raw_bson_not_inflated(self): - doc = RawBSONDocument(encode({"a": "b" * 100})) - models = [ - InsertOne(namespace="db.coll", document=doc), - ] - self.client.bulk_write(models=models) - - self.assertIsNone(doc._RawBSONDocument__inflated_doc) - # https://github.com/mongodb/specifications/tree/master/source/crud/tests class TestClientBulkWriteCRUD(IntegrationTest): From ecf7ac77702e0ea1710e4d06082f618999ba3398 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 13 Jan 2025 20:34:58 -0600 Subject: [PATCH 1668/2111] PYTHON-5013 Add NULL checks in InvalidDocument bson handling (#2049) --- bson/_cbsonmodule.c | 89 ++++++++++++++++++++++++++++----------------- test/test_bson.py | 28 ++++++++++++++ 2 files changed, 83 insertions(+), 34 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index d91c7e0536..672f5eeda5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1644,6 +1644,56 @@ static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { return bytes_written; } + +/* Update Invalid Document error message to include doc. + */ +void handle_invalid_doc_error(PyObject* dict) { + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *msg = NULL, *dict_str = NULL, *new_msg = NULL; + PyErr_Fetch(&etype, &evalue, &etrace); + PyObject *InvalidDocument = _error("InvalidDocument"); + if (InvalidDocument == NULL) { + goto cleanup; + } + + if (evalue && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { + PyObject *msg = PyObject_Str(evalue); + if (msg) { + // Prepend doc to the existing message + PyObject *dict_str = PyObject_Str(dict); + if (dict_str == NULL) { + goto cleanup; + } + const char * dict_str_utf8 = PyUnicode_AsUTF8(dict_str); + if (dict_str_utf8 == NULL) { + goto cleanup; + } + const char * msg_utf8 = PyUnicode_AsUTF8(msg); + if (msg_utf8 == NULL) { + goto cleanup; + } + PyObject *new_msg = PyUnicode_FromFormat("Invalid document %s | %s", dict_str_utf8, msg_utf8); + Py_DECREF(evalue); + Py_DECREF(etype); + etype = InvalidDocument; + InvalidDocument = NULL; + if (new_msg) { + evalue = new_msg; + } else { + evalue = msg; + } + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } +cleanup: + PyErr_Restore(etype, evalue, etrace); + Py_XDECREF(msg); + Py_XDECREF(InvalidDocument); + Py_XDECREF(dict_str); + Py_XDECREF(new_msg); +} + + /* returns the number of bytes written or 0 on failure */ int write_dict(PyObject* self, buffer_t buffer, PyObject* dict, unsigned char check_keys, @@ -1743,40 +1793,8 @@ int write_dict(PyObject* self, buffer_t buffer, while (PyDict_Next(dict, &pos, &key, &value)) { if (!decode_and_write_pair(self, buffer, key, value, check_keys, options, top_level)) { - if (PyErr_Occurred()) { - PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; - PyErr_Fetch(&etype, &evalue, &etrace); - PyObject *InvalidDocument = _error("InvalidDocument"); - - if (top_level && InvalidDocument && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { - - Py_DECREF(etype); - etype = InvalidDocument; - - if (evalue) { - PyObject *msg = PyObject_Str(evalue); - Py_DECREF(evalue); - - if (msg) { - // Prepend doc to the existing message - PyObject *dict_str = PyObject_Str(dict); - PyObject *new_msg = PyUnicode_FromFormat("Invalid document %s | %s", PyUnicode_AsUTF8(dict_str), PyUnicode_AsUTF8(msg)); - Py_DECREF(dict_str); - - if (new_msg) { - evalue = new_msg; - } - else { - evalue = msg; - } - } - } - PyErr_NormalizeException(&etype, &evalue, &etrace); - } - else { - Py_DECREF(InvalidDocument); - } - PyErr_Restore(etype, evalue, etrace); + if (PyErr_Occurred() && top_level) { + handle_invalid_doc_error(dict); } return 0; } @@ -1796,6 +1814,9 @@ int write_dict(PyObject* self, buffer_t buffer, } if (!decode_and_write_pair(self, buffer, key, value, check_keys, options, top_level)) { + if (PyErr_Occurred() && top_level) { + handle_invalid_doc_error(dict); + } Py_DECREF(key); Py_DECREF(value); Py_DECREF(iter); diff --git a/test/test_bson.py b/test/test_bson.py index e550b538d3..e601be4915 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1112,6 +1112,34 @@ def __repr__(self): with self.assertRaisesRegex(InvalidDocument, f"Invalid document {doc}"): encode(doc) + def test_doc_in_invalid_document_error_message_mapping(self): + class MyMapping(abc.Mapping): + def keys(): + return ["t"] + + def __getitem__(self, name): + if name == "_id": + return None + return Wrapper(name) + + def __len__(self): + return 1 + + def __iter__(self): + return iter(["t"]) + + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + doc = MyMapping() + with self.assertRaisesRegex(InvalidDocument, f"Invalid document {doc}"): + encode(doc) + class TestCodecOptions(unittest.TestCase): def test_document_class(self): From 069ebf3e13770e43e0c95f576b6c026209d1549c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 15 Jan 2025 07:54:52 -0500 Subject: [PATCH 1669/2111] PYTHON-5037 - Update pyopenssl_context.__get_options type hint (#2060) --- pymongo/pyopenssl_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index a320e94929..8c643394b2 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -273,7 +273,7 @@ def __set_check_ocsp_endpoint(self, value: bool) -> None: check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) - def __get_options(self) -> None: + def __get_options(self) -> int: # Calling set_options adds the option to the existing bitmask and # returns the new bitmask. # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_options From f8bd891df4415e9d793c5146a1126cb5a4c14141 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 15 Jan 2025 09:53:45 -0500 Subject: [PATCH 1670/2111] =?UTF-8?q?PYTHON-5039=20-=20Always=20use=20asyn?= =?UTF-8?q?cio.get=5Frunning=5Floop()=20instead=20of=20asynci=E2=80=A6=20(?= =?UTF-8?q?#2063)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/network_layer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 4d21300bc6..c1db31f89c 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -72,7 +72,7 @@ async def async_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: timeout = sock.gettimeout() sock.settimeout(0.0) - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): await asyncio.wait_for(_async_sendall_ssl(sock, buf, loop), timeout=timeout) @@ -259,7 +259,7 @@ async def async_receive_data( timeout = sock_timeout sock.settimeout(0.0) - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() cancellation_task = create_task(_poll_cancellation(conn)) try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): @@ -290,7 +290,7 @@ async def async_receive_data_socket( timeout = sock_timeout sock.settimeout(0.0) - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): return await asyncio.wait_for( From 820701f15ad9a41d0bab01f69291b368798a512d Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 15 Jan 2025 12:23:12 -0800 Subject: [PATCH 1671/2111] PYTHON-5038 Resolve certificate verify failed: Missing Authority Key Identifier (#2062) --- test/asynchronous/test_encryption.py | 2 ++ test/test_encryption.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index ef53d8ccd5..2b22bd8b76 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -2884,6 +2884,8 @@ async def http_post(self, path, data=None): # each request because the server is single threaded. ctx = ssl.create_default_context(cafile=CA_PEM) ctx.load_cert_chain(CLIENT_PEM) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) try: if data is not None: diff --git a/test/test_encryption.py b/test/test_encryption.py index 726463c41e..9224310144 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2866,6 +2866,8 @@ def http_post(self, path, data=None): # each request because the server is single threaded. ctx = ssl.create_default_context(cafile=CA_PEM) ctx.load_cert_chain(CLIENT_PEM) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) try: if data is not None: From 8fa6750a7e077c33f304e72f3e17467ced9cd224 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 16 Jan 2025 12:14:25 -0800 Subject: [PATCH 1672/2111] PYTHON-5042 Resync transaction spec tests (#2058) --- .../unified/commit-retry.json | 5 ----- .../unified/commit-writeconcernerror.json | 17 +---------------- .../transactions/unified/findOneAndReplace.json | 8 ++++++-- test/transactions/unified/findOneAndUpdate.json | 16 ++++++++++++---- .../unified/mongos-recovery-token.json | 6 ++++-- test/transactions/unified/pin-mongos.json | 6 ++++-- test/transactions/unified/retryable-commit.json | 5 ----- test/transactions/unified/write-concern.json | 8 ++++++-- 8 files changed, 33 insertions(+), 38 deletions(-) diff --git a/test/transactions-convenient-api/unified/commit-retry.json b/test/transactions-convenient-api/unified/commit-retry.json index cc80201167..928f0167e4 100644 --- a/test/transactions-convenient-api/unified/commit-retry.json +++ b/test/transactions-convenient-api/unified/commit-retry.json @@ -422,11 +422,6 @@ }, { "description": "commit is not retried after MaxTimeMSExpired error", - "runOnRequirements": [ - { - "serverless": "forbid" - } - ], "operations": [ { "name": "failPoint", diff --git a/test/transactions-convenient-api/unified/commit-writeconcernerror.json b/test/transactions-convenient-api/unified/commit-writeconcernerror.json index a455a450bf..a6f6e6bd7f 100644 --- a/test/transactions-convenient-api/unified/commit-writeconcernerror.json +++ b/test/transactions-convenient-api/unified/commit-writeconcernerror.json @@ -1,6 +1,6 @@ { "description": "commit-writeconcernerror", - "schemaVersion": "1.4", + "schemaVersion": "1.3", "runOnRequirements": [ { "minServerVersion": "4.0", @@ -414,11 +414,6 @@ }, { "description": "commitTransaction is not retried after UnknownReplWriteConcern error", - "runOnRequirements": [ - { - "serverless": "forbid" - } - ], "operations": [ { "name": "failPoint", @@ -551,11 +546,6 @@ }, { "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", - "runOnRequirements": [ - { - "serverless": "forbid" - } - ], "operations": [ { "name": "failPoint", @@ -688,11 +678,6 @@ }, { "description": "commitTransaction is not retried after MaxTimeMSExpired error", - "runOnRequirements": [ - { - "serverless": "forbid" - } - ], "operations": [ { "name": "failPoint", diff --git a/test/transactions/unified/findOneAndReplace.json b/test/transactions/unified/findOneAndReplace.json index d9248244b3..f0742f0c60 100644 --- a/test/transactions/unified/findOneAndReplace.json +++ b/test/transactions/unified/findOneAndReplace.json @@ -127,7 +127,9 @@ "update": { "x": 1 }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -299,7 +301,9 @@ "update": { "x": 1 }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, diff --git a/test/transactions/unified/findOneAndUpdate.json b/test/transactions/unified/findOneAndUpdate.json index 34a40bb570..f5308efef3 100644 --- a/test/transactions/unified/findOneAndUpdate.json +++ b/test/transactions/unified/findOneAndUpdate.json @@ -189,7 +189,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -281,7 +283,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -340,7 +344,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -485,7 +491,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, diff --git a/test/transactions/unified/mongos-recovery-token.json b/test/transactions/unified/mongos-recovery-token.json index 00909c4218..bb88aa16bd 100644 --- a/test/transactions/unified/mongos-recovery-token.json +++ b/test/transactions/unified/mongos-recovery-token.json @@ -232,7 +232,8 @@ "id": "client1", "useMultipleMongoses": true, "uriOptions": { - "heartbeatFrequencyMS": 30000 + "heartbeatFrequencyMS": 30000, + "appName": "transactionsClient" }, "observeEvents": [ "commandStartedEvent" @@ -299,7 +300,8 @@ "isMaster", "hello" ], - "closeConnection": true + "closeConnection": true, + "appName": "transactionsClient" } } } diff --git a/test/transactions/unified/pin-mongos.json b/test/transactions/unified/pin-mongos.json index 5f2ecca5c1..c96f3f341f 100644 --- a/test/transactions/unified/pin-mongos.json +++ b/test/transactions/unified/pin-mongos.json @@ -1249,7 +1249,8 @@ "id": "client1", "useMultipleMongoses": true, "uriOptions": { - "heartbeatFrequencyMS": 30000 + "heartbeatFrequencyMS": 30000, + "appName": "transactionsClient" }, "observeEvents": [ "commandStartedEvent" @@ -1316,7 +1317,8 @@ "isMaster", "hello" ], - "closeConnection": true + "closeConnection": true, + "appName": "transactionsClient" } } } diff --git a/test/transactions/unified/retryable-commit.json b/test/transactions/unified/retryable-commit.json index 7d7e52495d..b794c1c55c 100644 --- a/test/transactions/unified/retryable-commit.json +++ b/test/transactions/unified/retryable-commit.json @@ -89,11 +89,6 @@ "tests": [ { "description": "commitTransaction fails after Interrupted", - "runOnRequirements": [ - { - "serverless": "forbid" - } - ], "operations": [ { "object": "testRunner", diff --git a/test/transactions/unified/write-concern.json b/test/transactions/unified/write-concern.json index 7acdd54066..29d1977a82 100644 --- a/test/transactions/unified/write-concern.json +++ b/test/transactions/unified/write-concern.json @@ -1417,7 +1417,9 @@ "update": { "x": 1 }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -1522,7 +1524,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, From e4d84494c321f2532c93cf4d5cc815311f396e03 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 17 Jan 2025 09:46:48 -0500 Subject: [PATCH 1673/2111] PYTHON-5021 - Fix usages of getaddrinfo to be non-blocking (#2059) --- pymongo/asynchronous/auth.py | 18 +++++++++++++----- pymongo/asynchronous/helpers.py | 20 ++++++++++++++++++++ pymongo/asynchronous/pool.py | 8 ++++---- pymongo/synchronous/auth.py | 14 +++++++++++--- pymongo/synchronous/helpers.py | 20 ++++++++++++++++++++ pymongo/synchronous/pool.py | 4 ++-- test/asynchronous/test_auth.py | 4 ++-- 7 files changed, 72 insertions(+), 16 deletions(-) diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py index 48ce4bbd39..b1e6d0125b 100644 --- a/pymongo/asynchronous/auth.py +++ b/pymongo/asynchronous/auth.py @@ -38,6 +38,7 @@ _authenticate_oidc, _get_authenticator, ) +from pymongo.asynchronous.helpers import _getaddrinfo from pymongo.auth_shared import ( MongoCredential, _authenticate_scram_start, @@ -177,15 +178,22 @@ def _auth_key(nonce: str, username: str, password: str) -> str: return md5hash.hexdigest() -def _canonicalize_hostname(hostname: str, option: str | bool) -> str: +async def _canonicalize_hostname(hostname: str, option: str | bool) -> str: """Canonicalize hostname following MIT-krb5 behavior.""" # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 if option in [False, "none"]: return hostname - af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( - hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME - )[0] + af, socktype, proto, canonname, sockaddr = ( + await _getaddrinfo( + hostname, + None, + family=0, + type=0, + proto=socket.IPPROTO_TCP, + flags=socket.AI_CANONNAME, + ) + )[0] # type: ignore[index] # For forward just to resolve the cname as dns.lookup() will not return it. if option == "forward": @@ -213,7 +221,7 @@ async def _authenticate_gssapi(credentials: MongoCredential, conn: AsyncConnecti # Starting here and continuing through the while loop below - establish # the security context. See RFC 4752, Section 3.1, first paragraph. host = props.service_host or conn.address[0] - host = _canonicalize_hostname(host, props.canonicalize_host_name) + host = await _canonicalize_hostname(host, props.canonicalize_host_name) service = props.service_name + "@" + host if props.service_realm is not None: service = service + "@" + props.service_realm diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 1ac8b6630f..d519e8749c 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -15,7 +15,9 @@ """Miscellaneous pieces that need to be synchronized.""" from __future__ import annotations +import asyncio import builtins +import socket import sys from typing import ( Any, @@ -68,6 +70,24 @@ async def inner(*args: Any, **kwargs: Any) -> Any: return cast(F, inner) +async def _getaddrinfo( + host: Any, port: Any, **kwargs: Any +) -> list[ + tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int], + ] +]: + if not _IS_SYNC: + loop = asyncio.get_running_loop() + return await loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] + else: + return socket.getaddrinfo(host, port, **kwargs) + + if sys.version_info >= (3, 10): anext = builtins.anext aiter = builtins.aiter diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 5dc5675a0a..bf2f2b4946 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -40,7 +40,7 @@ from bson import DEFAULT_CODEC_OPTIONS from pymongo import _csot, helpers_shared from pymongo.asynchronous.client_session import _validate_session_write_concern -from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.asynchronous.helpers import _getaddrinfo, _handle_reauth from pymongo.asynchronous.network import command, receive_message from pymongo.common import ( MAX_BSON_SIZE, @@ -783,7 +783,7 @@ def __repr__(self) -> str: ) -def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: +async def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: """Given (host, port) and PoolOptions, connect and return a socket object. Can raise socket.error. @@ -814,7 +814,7 @@ def _create_connection(address: _Address, options: PoolOptions) -> socket.socket family = socket.AF_UNSPEC err = None - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + for res in await _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined] af, socktype, proto, dummy, sa = res # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 @@ -863,7 +863,7 @@ async def _configured_socket( Sets socket's SSL and timeout options. """ - sock = _create_connection(address, options) + sock = await _create_connection(address, options) ssl_context = options._ssl_context if ssl_context is None: diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py index 0e51ff8b7f..56860eff3b 100644 --- a/pymongo/synchronous/auth.py +++ b/pymongo/synchronous/auth.py @@ -45,6 +45,7 @@ _authenticate_oidc, _get_authenticator, ) +from pymongo.synchronous.helpers import _getaddrinfo if TYPE_CHECKING: from pymongo.hello import Hello @@ -180,9 +181,16 @@ def _canonicalize_hostname(hostname: str, option: str | bool) -> str: if option in [False, "none"]: return hostname - af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( - hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME - )[0] + af, socktype, proto, canonname, sockaddr = ( + _getaddrinfo( + hostname, + None, + family=0, + type=0, + proto=socket.IPPROTO_TCP, + flags=socket.AI_CANONNAME, + ) + )[0] # type: ignore[index] # For forward just to resolve the cname as dns.lookup() will not return it. if option == "forward": diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index 064583dad3..f800e7dcc8 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -15,7 +15,9 @@ """Miscellaneous pieces that need to be synchronized.""" from __future__ import annotations +import asyncio import builtins +import socket import sys from typing import ( Any, @@ -68,6 +70,24 @@ def inner(*args: Any, **kwargs: Any) -> Any: return cast(F, inner) +def _getaddrinfo( + host: Any, port: Any, **kwargs: Any +) -> list[ + tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int], + ] +]: + if not _IS_SYNC: + loop = asyncio.get_running_loop() + return loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] + else: + return socket.getaddrinfo(host, port, **kwargs) + + if sys.version_info >= (3, 10): next = builtins.next iter = builtins.iter diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 1a155c82d7..05f930d480 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -84,7 +84,7 @@ from pymongo.socket_checker import SocketChecker from pymongo.ssl_support import HAS_SNI, SSLError from pymongo.synchronous.client_session import _validate_session_write_concern -from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.helpers import _getaddrinfo, _handle_reauth from pymongo.synchronous.network import command, receive_message if TYPE_CHECKING: @@ -812,7 +812,7 @@ def _create_connection(address: _Address, options: PoolOptions) -> socket.socket family = socket.AF_UNSPEC err = None - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): + for res in _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined] af, socktype, proto, dummy, sa = res # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index 08dc4d7247..7172152d69 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -275,10 +275,10 @@ async def test_gssapi_threaded(self): async def test_gssapi_canonicalize_host_name(self): # Test the low level method. assert GSSAPI_HOST is not None - result = _canonicalize_hostname(GSSAPI_HOST, "forward") + result = await _canonicalize_hostname(GSSAPI_HOST, "forward") if "compute-1.amazonaws.com" not in result: self.assertEqual(result, GSSAPI_HOST) - result = _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + result = await _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") self.assertEqual(result, GSSAPI_HOST) # Use the equivalent named CANONICALIZE_HOST_NAME. From 86084adb29537bd6c432913346f3d14b59c90a69 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 17 Jan 2025 14:48:01 -0600 Subject: [PATCH 1674/2111] PYTHON-4975 Use justfile as the task runner (#2057) Co-authored-by: Noah Stapp --- .evergreen/combine-coverage.sh | 2 +- .evergreen/hatch.sh | 5 -- .evergreen/install-dependencies.sh | 27 +++++++ .evergreen/just.sh | 5 ++ .evergreen/run-azurekms-fail-test.sh | 2 +- .evergreen/run-azurekms-test.sh | 2 +- .evergreen/run-gcpkms-test.sh | 2 +- .evergreen/run-import-time-test.sh | 2 +- .evergreen/run-mongodb-aws-ecs-test.sh | 2 +- .evergreen/run-mongodb-oidc-test.sh | 2 +- .evergreen/run-perf-tests.sh | 2 +- .evergreen/scripts/cleanup.sh | 2 +- .evergreen/scripts/configure-env.sh | 8 ++- .evergreen/scripts/ensure-hatch.sh | 59 --------------- .evergreen/scripts/install-dependencies.sh | 2 +- .evergreen/scripts/run-atlas-tests.sh | 2 +- .evergreen/scripts/run-doctests.sh | 2 +- .../scripts/run-enterprise-auth-tests.sh | 2 +- .evergreen/scripts/run-gcpkms-fail-test.sh | 2 +- .evergreen/scripts/run-mockupdb-tests.sh | 2 +- .evergreen/scripts/run-mongodb-aws-test.sh | 2 +- .evergreen/scripts/run-ocsp-test.sh | 2 +- .evergreen/scripts/run-tests.sh | 2 +- .evergreen/scripts/setup-dev-env.sh | 72 +++++++++++++++++++ .evergreen/scripts/setup-encryption.sh | 2 +- .evergreen/setup-spawn-host.sh | 2 +- .evergreen/utils.sh | 4 +- .github/workflows/test-python.yml | 50 +++++++------ .gitignore | 2 + CONTRIBUTING.md | 37 +++++----- hatch.toml | 13 ---- justfile | 69 ++++++++++++++++++ 32 files changed, 252 insertions(+), 139 deletions(-) delete mode 100755 .evergreen/hatch.sh create mode 100755 .evergreen/just.sh delete mode 100755 .evergreen/scripts/ensure-hatch.sh create mode 100755 .evergreen/scripts/setup-dev-env.sh create mode 100644 justfile diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh index 92d2f1f1f8..c31f755bd9 100755 --- a/.evergreen/combine-coverage.sh +++ b/.evergreen/combine-coverage.sh @@ -8,7 +8,7 @@ set -o errexit # Exit the script with error if any of the commands fail . .evergreen/utils.sh -if [ -z "$PYTHON_BINARY" ]; then +if [ -z "${PYTHON_BINARY:-}" ]; then PYTHON_BINARY=$(find_python3) fi diff --git a/.evergreen/hatch.sh b/.evergreen/hatch.sh deleted file mode 100755 index c01dfcd19e..0000000000 --- a/.evergreen/hatch.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -set -eu - -. .evergreen/scripts/ensure-hatch.sh -hatch run "$@" diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh index e6dceb33fc..8773fa2c6d 100755 --- a/.evergreen/install-dependencies.sh +++ b/.evergreen/install-dependencies.sh @@ -13,6 +13,33 @@ else SUDO="sudo" fi +# Install just. +# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. +if [ "${CI:-}" == "true" ]; then + BIN_DIR=$DRIVERS_TOOLS_BINARIES +else + BIN_DIR=$HOME/.local/bin +fi +if [ ! -f $BIN_DIR/just ]; then + if [ "Windows_NT" = "${OS:-}" ]; then + TARGET="--target x86_64-pc-windows-msvc" + else + TARGET="" + fi + curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $TARGET --to "$BIN_DIR" || { + # CARGO_HOME is defined in configure-env.sh + export CARGO_HOME=${CARGO_HOME:-$HOME/.cargo/} + export RUSTUP_HOME="${CARGO_HOME}/.rustup" + . ${DRIVERS_TOOLS}/.evergreen/install-rust.sh + cargo install just + if [ "Windows_NT" = "${OS:-}" ]; then + mv $CARGO_HOME/just.exe $BIN_DIR/just + else + mv $CARGO_HOME/just $BIN_DIR + fi + } +fi + # Add 'server' and 'hostname_not_in_cert' as a hostnames echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts diff --git a/.evergreen/just.sh b/.evergreen/just.sh new file mode 100755 index 0000000000..bebbca8282 --- /dev/null +++ b/.evergreen/just.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -eu + +. .evergreen/scripts/setup-dev-env.sh +just "$@" diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh index d99c178fb9..d1117dcb32 100755 --- a/.evergreen/run-azurekms-fail-test.sh +++ b/.evergreen/run-azurekms-fail-test.sh @@ -8,5 +8,5 @@ PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ KEY_NAME="${AZUREKMS_KEYNAME}" \ KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ - $HERE/hatch.sh test:test-eg + $HERE/just.sh test-eg bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh index bb515a9386..d5c332fa8d 100755 --- a/.evergreen/run-azurekms-test.sh +++ b/.evergreen/run-azurekms-test.sh @@ -18,7 +18,7 @@ AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/hatch.sh test:test-eg" \ +AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/just.sh test-eg" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Running test ... end" bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh index 7ccc74b453..4c953584b2 100755 --- a/.evergreen/run-gcpkms-test.sh +++ b/.evergreen/run-gcpkms-test.sh @@ -17,6 +17,6 @@ echo "Untarring file ... begin" GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 ./.evergreen/hatch.sh test:test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 ./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Running test ... end" bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-import-time-test.sh b/.evergreen/run-import-time-test.sh index e9f6161bcc..95e3c93d25 100755 --- a/.evergreen/run-import-time-test.sh +++ b/.evergreen/run-import-time-test.sh @@ -5,7 +5,7 @@ set -x . .evergreen/utils.sh -if [ -z "$PYTHON_BINARY" ]; then +if [ -z "${PYTHON_BINARY:-}" ]; then PYTHON_BINARY=$(find_python3) fi diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 3189a6cc6c..91777be226 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -31,4 +31,4 @@ export AUTH="auth" export SET_XTRACE_ON=1 cd src rm -rf .venv -bash .evergreen/hatch.sh test:test-eg +bash .evergreen/just.sh test-eg diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 22864528c0..46c4f24969 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -32,4 +32,4 @@ fi export TEST_AUTH_OIDC=1 export COVERAGE=1 export AUTH="auth" -bash ./.evergreen/hatch.sh test:test-eg -- "${@:1}" +bash ./.evergreen/just.sh test-eg "${@:1}" diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index ff8d81a837..e6a51b3297 100755 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -16,4 +16,4 @@ export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 export PERF_TEST=1 -bash ./.evergreen/hatch.sh test:test-eg +bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh index 9e583e4f1e..a1fd92f04d 100755 --- a/.evergreen/scripts/cleanup.sh +++ b/.evergreen/scripts/cleanup.sh @@ -1,7 +1,7 @@ #!/bin/bash if [ -f "$DRIVERS_TOOLS"/.evergreen/csfle/secrets-export.sh ]; then - . .evergreen/hatch.sh encryption:teardown + bash .evergreen/teardown-encryption.sh fi rm -rf "${DRIVERS_TOOLS}" || true rm -f ./secrets-export.sh || true diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index e0c845a333..ebbffcf1db 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -14,12 +14,14 @@ fi PROJECT_DIRECTORY="$(pwd)" DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} +HATCH_CONFIG=$PROJECT_DIRECTORY/hatch_config.toml # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) CARGO_HOME=$(cygpath -m $CARGO_HOME) + HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") fi SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" @@ -32,15 +34,16 @@ fi export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" +export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS/.bin" cat < "$SCRIPT_DIR"/env.sh -set -o errexit export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" export CURRENT_VERSION="$CURRENT_VERSION" export SKIP_LEGACY_SHELL=1 export DRIVERS_TOOLS="$DRIVERS_TOOLS" export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" +export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS_BINARIES" export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" export SETDEFAULTENCODING="${SETDEFAULTENCODING:-}" export SKIP_CSOT_TESTS="${SKIP_CSOT_TESTS:-}" @@ -59,7 +62,8 @@ export skip_ECS_auth_test="${skip_ECS_auth_test:-}" export CARGO_HOME="$CARGO_HOME" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" -export PATH="$MONGODB_BINARIES:$PATH" +export HATCH_CONFIG="$HATCH_CONFIG" +export PATH="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PATH" # shellcheck disable=SC2154 export PROJECT="${project:-mongo-python-driver}" export PIP_QUIET=1 diff --git a/.evergreen/scripts/ensure-hatch.sh b/.evergreen/scripts/ensure-hatch.sh deleted file mode 100755 index e63d98bb6d..0000000000 --- a/.evergreen/scripts/ensure-hatch.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -set -eu - -HERE=$(dirname ${BASH_SOURCE:-$0}) -pushd "$(dirname "$(dirname $HERE)")" > /dev/null - -# Ensure hatch is available. -if [ ! -x "$(command -v hatch)" ]; then - # Install a virtual env with "hatch" - # Ensure there is a python venv. - . .evergreen/utils.sh - - if [ -z "${PYTHON_BINARY:-}" ]; then - PYTHON_BINARY=$(find_python3) - fi - VENV_DIR=.venv - if [ ! -d $VENV_DIR ]; then - echo "Creating virtual environment..." - createvirtualenv "$PYTHON_BINARY" .venv - echo "Creating virtual environment... done." - fi - if [ -f $VENV_DIR/Scripts/activate ]; then - . $VENV_DIR/Scripts/activate - else - . $VENV_DIR/bin/activate - fi - - python --version - - echo "Installing hatch..." - python -m pip install -U pip - python -m pip install hatch || { - # Install rust and try again. - CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} - # Handle paths on Windows. - if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - CARGO_HOME=$(cygpath -m $CARGO_HOME) - fi - export RUSTUP_HOME="${CARGO_HOME}/.rustup" - ${DRIVERS_TOOLS}/.evergreen/install-rust.sh - source "${CARGO_HOME}/env" - python -m pip install hatch - } - # Ensure hatch does not write to user or global locations. - touch hatch_config.toml - HATCH_CONFIG=$(pwd)/hatch_config.toml - if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") - fi - export HATCH_CONFIG - hatch config restore - hatch config set dirs.data "$(pwd)/.hatch/data" - hatch config set dirs.cache "$(pwd)/.hatch/cache" - - echo "Installing hatch... done." -fi -hatch --version -popd > /dev/null diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index ebcc8f3069..bbbfc745ec 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -o xtrace +set -eu file="$PROJECT_DIRECTORY/.evergreen/install-dependencies.sh" # Don't use ${file} syntax here because evergreen treats it as an empty expansion. [ -f "$file" ] && bash "$file" || echo "$file not available, skipping" diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh index 98a19f047f..30b8d5a615 100755 --- a/.evergreen/scripts/run-atlas-tests.sh +++ b/.evergreen/scripts/run-atlas-tests.sh @@ -4,4 +4,4 @@ set +x set -o errexit bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect -TEST_ATLAS=1 bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg +TEST_ATLAS=1 bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-doctests.sh b/.evergreen/scripts/run-doctests.sh index f7215ad347..5950e2c107 100755 --- a/.evergreen/scripts/run-doctests.sh +++ b/.evergreen/scripts/run-doctests.sh @@ -1,4 +1,4 @@ #!/bin/bash set -o xtrace -PYTHON_BINARY=${PYTHON_BINARY} bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh doctest:test +PYTHON_BINARY=${PYTHON_BINARY} bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh docs-test diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh index 7f936b1955..e015a34ca4 100755 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -5,4 +5,4 @@ set -eu set +x # Use the default python to bootstrap secrets. bash "${DRIVERS_TOOLS}"/.evergreen/secrets_handling/setup-secrets.sh drivers/enterprise_auth -TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg +TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh index dd9d522c8a..594a2984fa 100755 --- a/.evergreen/scripts/run-gcpkms-fail-test.sh +++ b/.evergreen/scripts/run-gcpkms-fail-test.sh @@ -4,4 +4,4 @@ export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz SKIP_SERVERS=1 bash ./.evergreen/setup-encryption.sh -SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/hatch.sh test:test-eg +SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-mockupdb-tests.sh b/.evergreen/scripts/run-mockupdb-tests.sh index 8825a0237d..32594f05d3 100755 --- a/.evergreen/scripts/run-mockupdb-tests.sh +++ b/.evergreen/scripts/run-mockupdb-tests.sh @@ -2,4 +2,4 @@ set -o xtrace export PYTHON_BINARY=${PYTHON_BINARY} -bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-mockupdb +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-mockupdb diff --git a/.evergreen/scripts/run-mongodb-aws-test.sh b/.evergreen/scripts/run-mongodb-aws-test.sh index ec20bfd06b..88c3236b3f 100755 --- a/.evergreen/scripts/run-mongodb-aws-test.sh +++ b/.evergreen/scripts/run-mongodb-aws-test.sh @@ -30,4 +30,4 @@ set -x export TEST_AUTH_AWS=1 export AUTH="auth" export SET_XTRACE_ON=1 -bash ./.evergreen/hatch.sh test:test-eg +bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-ocsp-test.sh b/.evergreen/scripts/run-ocsp-test.sh index 3c6d3b2b3b..328bd2f203 100755 --- a/.evergreen/scripts/run-ocsp-test.sh +++ b/.evergreen/scripts/run-ocsp-test.sh @@ -4,5 +4,5 @@ TEST_OCSP=1 \ PYTHON_BINARY="${PYTHON_BINARY}" \ CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ -bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg bash "${DRIVERS_TOOLS}"/.evergreen/ocsp/teardown.sh diff --git a/.evergreen/scripts/run-tests.sh b/.evergreen/scripts/run-tests.sh index 6986a0bbee..ea923b3f5e 100755 --- a/.evergreen/scripts/run-tests.sh +++ b/.evergreen/scripts/run-tests.sh @@ -51,4 +51,4 @@ GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ TEST_DATA_LAKE=${TEST_DATA_LAKE:-} \ TEST_SUITES=${TEST_SUITES:-} \ MONGODB_API_VERSION=${MONGODB_API_VERSION} \ - bash "${PROJECT_DIRECTORY}"/.evergreen/hatch.sh test:test-eg + bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh new file mode 100755 index 0000000000..7042871942 --- /dev/null +++ b/.evergreen/scripts/setup-dev-env.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" > /dev/null + +# Source the env file to pick up common variables. +if [ -f $HERE/scripts/env.sh ]; then + source $HERE/scripts/env.sh +fi + +# Set the location of the python bin dir. +if [ "Windows_NT" = "${OS:-}" ]; then + BIN_DIR=.venv/Scripts +else + BIN_DIR=.venv/bin +fi + +# Ensure there is a python venv. +if [ ! -d $BIN_DIR ]; then + . .evergreen/utils.sh + + if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) + fi + + echo "Creating virtual environment..." + createvirtualenv "$PYTHON_BINARY" .venv + echo "Creating virtual environment... done." +fi + +# Activate the virtual env. +. $BIN_DIR/activate + +# Ensure there is a local hatch. +if [ ! -f $BIN_DIR/hatch ]; then + echo "Installing hatch..." + python -m pip install hatch || { + # CARGO_HOME is defined in configure-env.sh + export CARGO_HOME=${CARGO_HOME:-$HOME/.cargo/} + export RUSTUP_HOME="${CARGO_HOME}/.rustup" + ${DRIVERS_TOOLS}/.evergreen/install-rust.sh + source "${CARGO_HOME}/env" + python -m pip install hatch + } + echo "Installing hatch... done." +fi + +# Ensure hatch does not write to user or global locations. +HATCH_CONFIG=${HATCH_CONFIG:-hatch_config.toml} +if [ ! -f ${HATCH_CONFIG} ]; then + touch hatch_config.toml + hatch config restore + hatch config set dirs.data "$(pwd)/.hatch/data" + hatch config set dirs.cache "$(pwd)/.hatch/cache" +fi + +# Ensure there is a local pre-commit if there is a git checkout. +if [ -d .git ]; then + if [ ! -f $BIN_DIR/pre-commit ]; then + python -m pip install pre-commit + fi + + # Ensure the pre-commit hook is installed. + if [ ! -f .git/hooks/pre-commit ]; then + pre-commit install + fi +fi + +# Install pymongo and its test deps. +python -m pip install ".[test]" diff --git a/.evergreen/scripts/setup-encryption.sh b/.evergreen/scripts/setup-encryption.sh index 2f167cd20b..5b73240205 100755 --- a/.evergreen/scripts/setup-encryption.sh +++ b/.evergreen/scripts/setup-encryption.sh @@ -1,5 +1,5 @@ #!/bin/bash if [ -n "${test_encryption}" ]; then - ./.evergreen/hatch.sh encryption:setup + bash .evergreen/setup-encryption.sh fi diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh index 4109e59183..c20e1c756e 100755 --- a/.evergreen/setup-spawn-host.sh +++ b/.evergreen/setup-spawn-host.sh @@ -16,4 +16,4 @@ rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_ echo "Copying files to $target... done" ssh $target $remote_dir/.evergreen/scripts/setup-system.sh -ssh $target "PYTHON_BINARY=${PYTHON_BINARY:-} $remote_dir/.evergreen/scripts/ensure-hatch.sh" +ssh $target "cd $remote_dir && PYTHON_BINARY=${PYTHON_BINARY:-} just install" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index d3af2dcc7a..e044b3d766 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -1,6 +1,6 @@ -#!/bin/bash -ex +#!/bin/bash -set -o xtrace +set -eu find_python3() { PYTHON="" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 2310b7698d..a41daaabb1 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -27,12 +27,14 @@ jobs: python-version: "3.9" cache: 'pip' cache-dependency-path: 'pyproject.toml' + - name: Install just + uses: extractions/setup-just@v2 - name: Install Python dependencies run: | - python -m pip install -U pip hatch + just install - name: Run linters run: | - hatch run lint:run-manual + just lint-manual - name: Run compilation run: | export PYMONGO_C_EXT_MUST_BUILD=1 @@ -40,7 +42,7 @@ jobs: python tools/fail_if_no_c.py - name: Run typecheck run: | - hatch run typing:check + just typing - run: | sudo apt-get install -y cppcheck - run: | @@ -73,18 +75,16 @@ jobs: cache: 'pip' cache-dependency-path: 'pyproject.toml' allow-prereleases: true + - name: Install just + uses: extractions/setup-just@v2 - name: Install dependencies run: | - pip install -U pip - if [[ "${{ matrix.python-version }}" == "3.13" ]]; then - pip install --pre cffi setuptools - pip install --no-build-isolation hatch - elif [[ "${{ matrix.python-version }}" == "3.13t" ]]; then - # Hatch can't be installed on 3.13t, use pytest directly. + if [[ "${{ matrix.python-version }}" == "3.13t" ]]; then + # Just can't be installed on 3.13t, use pytest directly. pip install . pip install -r requirements/test.txt else - pip install hatch + just install fi - name: Start MongoDB uses: supercharge/mongodb-github-action@1.12.0 @@ -95,7 +95,7 @@ jobs: if [[ "${{ matrix.python-version }}" == "3.13t" ]]; then pytest -v --durations=5 --maxfail=10 else - hatch run test:test + just test fi doctest: @@ -111,16 +111,18 @@ jobs: python-version: "3.9" cache: 'pip' cache-dependency-path: 'pyproject.toml' - - name: Install dependencies - run: | - pip install -U hatch pip + - name: Install just + uses: extractions/setup-just@v2 - name: Start MongoDB uses: supercharge/mongodb-github-action@1.12.0 with: mongodb-version: '8.0.0-rc4' + - name: Install dependencies + run: | + just install - name: Run tests run: | - hatch run doctest:test + just docs-test docs: name: Docs Checks @@ -135,12 +137,14 @@ jobs: cache-dependency-path: 'pyproject.toml' # Build docs on lowest supported Python for furo python-version: '3.9' + - name: Install just + uses: extractions/setup-just@v2 - name: Install dependencies run: | - pip install -U pip hatch + just install - name: Build docs run: | - hatch run doc:build + just docs linkcheck: name: Link Check @@ -155,12 +159,14 @@ jobs: cache-dependency-path: 'pyproject.toml' # Build docs on lowest supported Python for furo python-version: '3.9' + - name: Install just + uses: extractions/setup-just@v2 - name: Install dependencies run: | - pip install -U pip hatch + just install - name: Build docs run: | - hatch run doc:linkcheck + just docs-linkcheck typing: name: Typing Tests @@ -177,12 +183,14 @@ jobs: python-version: "${{matrix.python}}" cache: 'pip' cache-dependency-path: 'pyproject.toml' + - name: Install just + uses: extractions/setup-just@v2 - name: Install dependencies run: | - pip install -U pip hatch + just install - name: Run typecheck run: | - hatch run typing:check + just typing make_sdist: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index e4587125e8..01f896d316 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,8 @@ libmongocrypt/ libmongocrypt_git/ hatch_config.toml .venv +expansion.yml +.evergreen/scripts/env.sh # Lambda temp files test/lambda/.aws-sam diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 814e040048..5a46151760 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -28,9 +28,10 @@ including 4 space indents and 79 character line limits. - Avoid backward breaking changes if at all possible. - Write inline documentation for new classes and methods. -- We use [hatch](https://hatch.pypa.io/dev/) for our script runner and packaging tool. +- We use [hatch](https://hatch.pypa.io/dev/) for python environment management and packaging. +- We use [just](https://just.systems/man/en/) as our task runner. - Write tests and make sure they pass (make sure you have a mongod - running on the default port, then execute `hatch run test:test` from the cmd + running on the default port, then execute `just test` from the cmd line to run the test suite). - Add yourself to doc/contributors.rst `:)` @@ -148,17 +149,18 @@ To run `pre-commit` manually, run: pre-commit run --all-files ``` -To run a manual hook like `mypy` manually, run: +To run a manual hook like `ruff` manually, run: ```bash -pre-commit run --all-files --hook-stage manual mypy +pre-commit run --all-files --hook-stage manual ruff ``` -Typically we use `hatch` to run the linters, e.g. +Typically we use `just` to run the linters, e.g. ```bash -hatch run typing:check-mypy -hatch run lint:build-manual +just install # this will install a venv with pre-commit installed, and install the pre-commit hook. +just typing-mypy +just run lint-manual ``` ## Documentation @@ -176,13 +178,13 @@ documentation including narrative docs, and the [Sphinx docstring format](https: You can build the documentation locally by running: ```bash -hatch run doc:build +just docs-build ``` When updating docs, it can be helpful to run the live docs server as: ```bash -hatch run doc:serve +just docs-serve ``` Browse to the link provided, and then as you make changes to docstrings or narrative docs, @@ -192,13 +194,14 @@ the pages will re-render and the browser will automatically refresh. ## Running Tests Locally - Ensure you have started the appropriate Mongo Server(s). -- Run `pip install hatch` to use `hatch` for testing or run - `pip install -e ".[test]"` to run `pytest` directly. -- Run `hatch run test:test` or `pytest` to run all of the tests. +- Run `just install` to set up `hatch` in a local virtual environment, or you can manually + create a virtual environment and run `pytest` directly. If you want to use a specific + version of Python, remove the `.venv` folder and set `PYTHON_BINARY` before running `just install`. +- Run `just test` or `pytest` to run all of the tests. - Append `test/.py::::` to run specific tests. You can omit the `` to test a full class and the `` to test a full module. For example: - `hatch run test:test -- test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress`. + `just test test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress`. - Use the `-k` argument to select tests by pattern. ## Running Load Balancer Tests Locally @@ -211,15 +214,15 @@ the pages will re-render and the browser will automatically refresh. - Start the load balancer using: `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start`. - Run the tests from the `pymongo` checkout directory using: - `TEST_LOADBALANCER=1 hatch run test:test-eg`. + `TEST_LOADBALANCER=1 just test-eg`. ## Running Encryption Tests Locally - Clone `drivers-evergreen-tools`: `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. - Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools` -- Run `AWS_PROFILE= hatch run encryption:setup` after setting up your AWS profile with `aws configure sso`. -- Run the tests with `TEST_ENCRYPTION=1 hatch run test:test-eg`. -- When done, run `hatch run encryption:teardown` to clean up. +- Run `AWS_PROFILE= just setup-encryption` after setting up your AWS profile with `aws configure sso`. +- Run the tests with `TEST_ENCRYPTION=1 just test-eg`. +- When done, run `just teardown-encryption` to clean up. ## Re-sync Spec Tests diff --git a/hatch.toml b/hatch.toml index 60bd0af014..15d0f25f07 100644 --- a/hatch.toml +++ b/hatch.toml @@ -30,13 +30,6 @@ check-strict-pyright = [ ] check = ["check-mypy", "check-pyright", "check-strict-pyright"] -[envs.lint] -skip-install = true -dependencies = ["pre-commit"] -[envs.lint.scripts] -run = "pre-commit run --all-files" -run-manual = "pre-commit run --all-files --hook-stage manual" - [envs.test] features = ["test"] [envs.test.scripts] @@ -44,9 +37,3 @@ test = "pytest -v --durations=5 --maxfail=10 {args}" test-eg = "bash ./.evergreen/run-tests.sh {args}" test-async = "pytest -v --durations=5 --maxfail=10 -m default_async {args}" test-mockupdb = ["pip install -U git+https://github.com/mongodb-labs/mongo-mockup-db@master", "test -m mockupdb"] - -[envs.encryption] -skip-install = true -[envs.encryption.scripts] -setup = "bash .evergreen/setup-encryption.sh" -teardown = "bash .evergreen/teardown-encryption.sh" diff --git a/justfile b/justfile new file mode 100644 index 0000000000..23f0993c6b --- /dev/null +++ b/justfile @@ -0,0 +1,69 @@ +# See https://just.systems/man/en/ for instructions +set shell := ["bash", "-c"] +set dotenv-load +set dotenv-filename := "./.evergreen/scripts/env.sh" + +# Handle cross-platform paths to local python cli tools. +python_bin_dir := if os_family() == "windows" { "./.venv/Scripts" } else { "./.venv/bin" } +hatch_bin := python_bin_dir + "/hatch" +pre_commit_bin := python_bin_dir + "/pre-commit" + +# Make the default recipe private so it doesn't show up in the list. +[private] +default: + @just --list + +install: + bash .evergreen/scripts/setup-dev-env.sh + +[group('docs')] +docs: + {{hatch_bin}} run doc:build + +[group('docs')] +docs-serve: + {{hatch_bin}} run doc:serve + +[group('docs')] +docs-linkcheck: + {{hatch_bin}} run doc:linkcheck + +[group('docs')] +docs-test: + {{hatch_bin}} run doctest:test + +[group('typing')] +typing: + {{hatch_bin}} run typing:check + +[group('typing')] +typing-mypy: + {{hatch_bin}} run typing:mypy + +[group('lint')] +lint: + {{pre_commit_bin}} run --all-files + +[group('lint')] +lint-manual: + {{pre_commit_bin}} run --all-files --hook-stage manual + +[group('test')] +test *args: + {{hatch_bin}} run test:test {{args}} + +[group('test')] +test-mockupdb: + {{hatch_bin}} run test:test-mockupdb + +[group('test')] +test-eg *args: + {{hatch_bin}} run test:test-eg {{args}} + +[group('encryption')] +setup-encryption: + bash .evergreen/setup-encryption.sh + +[group('encryption')] +teardown-encryption: + bash .evergreen/teardown-encryption.sh From 14bc1f6be2126addb1cf4028e4654a0c568bfedd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 06:29:11 -0600 Subject: [PATCH 1675/2111] Bump pyright from 1.1.391 to 1.1.392.post0 (#2067) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/typing.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/typing.txt b/requirements/typing.txt index 5a2f76f6bc..b0f0c9c7fc 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,5 +1,5 @@ mypy==1.14.1 -pyright==1.1.391 +pyright==1.1.392.post0 typing_extensions -r ./encryption.txt -r ./ocsp.txt From 85877a0802bc3a3ba40113aa45a8a13e2e5f86c9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 21 Jan 2025 10:26:21 -0500 Subject: [PATCH 1676/2111] PYTHON-5048 - Synchro script should correctly process all files (#2069) --- tools/synchro.py | 49 +++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/tools/synchro.py b/tools/synchro.py index 577e82d14e..dbcbbd1351 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -149,6 +149,10 @@ _gridfs_dest_base = "./gridfs/synchronous/" _test_dest_base = "./test/" +if not Path.exists(Path(_pymongo_dest_base)): + Path.mkdir(Path(_pymongo_dest_base)) +if not Path.exists(Path(_gridfs_dest_base)): + Path.mkdir(Path(_gridfs_dest_base)) async_files = [ _pymongo_base + f for f in listdir(_pymongo_base) if (Path(_pymongo_base) / f).is_file() @@ -170,18 +174,6 @@ def async_only_test(f: str) -> bool: if (Path(_test_base) / f).is_file() and not async_only_test(f) ] -sync_files = [ - _pymongo_dest_base + f - for f in listdir(_pymongo_dest_base) - if (Path(_pymongo_dest_base) / f).is_file() -] - -sync_gridfs_files = [ - _gridfs_dest_base + f - for f in listdir(_gridfs_dest_base) - if (Path(_gridfs_dest_base) / f).is_file() -] - # Add each asynchronized test here as part of the converting PR converted_tests = [ "__init__.py", @@ -223,15 +215,10 @@ def async_only_test(f: str) -> bool: "unified_format.py", ] -sync_test_files = [ - _test_dest_base + f for f in converted_tests if (Path(_test_dest_base) / f).is_file() -] - - -docstring_translate_files = sync_files + sync_gridfs_files + sync_test_files - -def process_files(files: list[str]) -> None: +def process_files( + files: list[str], docstring_translate_files: list[str], sync_test_files: list[str] +) -> None: for file in files: if "__init__" not in file or "__init__" and "test" in file: with open(file, "r+") as f: @@ -374,7 +361,27 @@ def main() -> None: unasync_directory(async_files, _pymongo_base, _pymongo_dest_base, replacements) unasync_directory(gridfs_files, _gridfs_base, _gridfs_dest_base, replacements) unasync_directory(test_files, _test_base, _test_dest_base, replacements) - process_files(sync_files + sync_gridfs_files + sync_test_files) + + sync_files = [ + _pymongo_dest_base + f + for f in listdir(_pymongo_dest_base) + if (Path(_pymongo_dest_base) / f).is_file() + ] + + sync_gridfs_files = [ + _gridfs_dest_base + f + for f in listdir(_gridfs_dest_base) + if (Path(_gridfs_dest_base) / f).is_file() + ] + sync_test_files = [ + _test_dest_base + f for f in converted_tests if (Path(_test_dest_base) / f).is_file() + ] + + docstring_translate_files = sync_files + sync_gridfs_files + sync_test_files + + process_files( + sync_files + sync_gridfs_files + sync_test_files, docstring_translate_files, sync_test_files + ) if __name__ == "__main__": From 2ff2fde9111c4c9d2165af4cec358791105b1bda Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 21 Jan 2025 12:38:02 -0600 Subject: [PATCH 1677/2111] PYTHON-5049 Drop support for PyPy 3.9 (#2070) --- .evergreen/generated_configs/variants.yml | 64 +++++------------------ .evergreen/scripts/generate_config.py | 2 +- .github/workflows/test-python.yml | 2 +- README.md | 2 +- doc/changelog.rst | 4 +- doc/faq.rst | 2 +- doc/installation.rst | 2 +- doc/python3.rst | 2 +- test/asynchronous/test_client.py | 5 +- test/asynchronous/test_collection.py | 8 +-- test/asynchronous/test_database.py | 5 +- test/test_client.py | 5 +- test/test_collection.py | 8 +-- test/test_database.py | 5 +- test/test_errors.py | 12 ++--- 15 files changed, 29 insertions(+), 99 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index b1db61d492..79c9b22c93 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -256,15 +256,15 @@ buildvariants: expansions: COMPRESSORS: zstd PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: compression-snappy-rhel8-pypy3.9 + - name: compression-snappy-rhel8-pypy3.10 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 PyPy3.9 + display_name: Compression snappy RHEL8 PyPy3.10 run_on: - rhel87-small expansions: COMPRESSORS: snappy - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - name: compression-zlib-rhel8-pypy3.10 tasks: - name: .standalone .noauth .nossl .sync_async @@ -274,15 +274,15 @@ buildvariants: expansions: COMPRESSORS: zlib PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: compression-zstd-rhel8-pypy3.9 + - name: compression-zstd-rhel8-pypy3.10 tasks: - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 PyPy3.9 + display_name: Compression zstd RHEL8 PyPy3.10 run_on: - rhel87-small expansions: COMPRESSORS: zstd - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Disable test commands tests - name: disable-test-commands-rhel8-python3.9 @@ -460,15 +460,6 @@ buildvariants: test_encryption: "true" test_encryption_pyopenssl: "true" PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: encryption-rhel8-pypy3.9 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - display_name: Encryption RHEL8 PyPy3.9 - run_on: - - rhel87-small - expansions: - test_encryption: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: encryption-macos-python3.9 tasks: - name: .latest .replica_set .sync_async @@ -608,15 +599,6 @@ buildvariants: expansions: AUTH: auth PYTHON_BINARY: C:/python/Python313/python.exe - - name: auth-enterprise-rhel8-pypy3.9-auth - tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 PyPy3.9 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: auth-enterprise-rhel8-pypy3.10-auth tasks: - name: test-enterprise-auth @@ -900,10 +882,10 @@ buildvariants: TOPOLOGY: server VERSION: "8.0" PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: ocsp-rhel8-rapid-pypy3.9 + - name: ocsp-rhel8-rapid-pypy3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 rapid PyPy3.9 + display_name: OCSP RHEL8 rapid PyPy3.10 run_on: - rhel87-small batchtime: 20160 @@ -912,11 +894,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: rapid - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - - name: ocsp-rhel8-latest-pypy3.10 + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - name: ocsp-rhel8-latest-python3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 latest PyPy3.10 + display_name: OCSP RHEL8 latest Python3.9 run_on: - rhel87-small batchtime: 20160 @@ -925,7 +907,7 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: latest - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: ocsp-win64-v4.4-python3.9 tasks: - name: .ocsp-rsa !.ocsp-staple @@ -1061,17 +1043,6 @@ buildvariants: expansions: test_pyopenssl: "true" PYTHON_BINARY: C:/python/Python313/python.exe - - name: pyopenssl-rhel8-pypy3.9 - tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 PyPy3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - test_pyopenssl: "true" - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: pyopenssl-rhel8-pypy3.10 tasks: - name: .replica_set .auth .ssl .sync_async @@ -1164,17 +1135,6 @@ buildvariants: expansions: COVERAGE: coverage PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: test-rhel8-pypy3.9 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 PyPy3.9" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.9/bin/python3 - name: test-macos-python3.9 tasks: - name: .sharded_cluster .auth .ssl !.sync_async diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e8d0b171bd..2917e882d8 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -28,7 +28,7 @@ ALL_VERSIONS = ["4.0", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] -PYPYS = ["pypy3.9", "pypy3.10"] +PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] BATCHTIME_WEEK = 10080 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index a41daaabb1..3760e308a5 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -55,7 +55,7 @@ jobs: strategy: matrix: os: [ubuntu-20.04] - python-version: ["3.9", "pypy-3.9", "3.13", "3.13t"] + python-version: ["3.9", "pypy-3.10", "3.13", "3.13t"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v4 diff --git a/README.md b/README.md index bd0755620e..b8e0078101 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ package that is incompatible with PyMongo. ## Dependencies -PyMongo supports CPython 3.9+ and PyPy3.9+. +PyMongo supports CPython 3.9+ and PyPy3.10+. Required dependencies: diff --git a/doc/changelog.rst b/doc/changelog.rst index fba6713bd9..4942d85de8 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,7 +4,7 @@ Changelog Changes in Version 4.11.0 (YYYY/MM/DD) -------------------------------------- -.. warning:: PyMongo 4.11 drops support for Python 3.8: Python 3.9+ or PyPy 3.9+ is now required. +.. warning:: PyMongo 4.11 drops support for Python 3.8 and PyPy 3.9: Python 3.9+ or PyPy 3.10+ is now required. .. warning:: PyMongo 4.11 drops support for MongoDB 3.6. PyMongo now supports MongoDB 4.0+. Driver support for MongoDB 3.6 reached end of life in April 2024. .. warning:: Driver support for MongoDB 4.0 reaches end of life in April 2025. @@ -14,7 +14,7 @@ Changes in Version 4.11.0 (YYYY/MM/DD) PyMongo 4.11 brings a number of changes including: -- Dropped support for Python 3.8. +- Dropped support for Python 3.8 and PyPy 3.9. - Dropped support for MongoDB 3.6. - Dropped support for the MONGODB-CR authenticate mechanism, which is no longer supported by MongoDB 4.0+. - pymongocrypt>=1.12 is now required for :ref:`In-Use Encryption` support. diff --git a/doc/faq.rst b/doc/faq.rst index 15950e7716..73d0ec8966 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -166,7 +166,7 @@ they are returned to the pool. Does PyMongo support Python 3? ------------------------------ -PyMongo supports CPython 3.9+ and PyPy3.9+. See the :doc:`python3` for details. +PyMongo supports CPython 3.9+ and PyPy3.10+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- diff --git a/doc/installation.rst b/doc/installation.rst index f21a3792ad..abda06db16 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -28,7 +28,7 @@ To upgrade using pip:: Dependencies ------------ -PyMongo supports CPython 3.9+ and PyPy3.9+. +PyMongo supports CPython 3.9+ and PyPy3.10+. Required dependencies ..................... diff --git a/doc/python3.rst b/doc/python3.rst index 1ea43b3ccb..0a63f968a5 100644 --- a/doc/python3.rst +++ b/doc/python3.rst @@ -4,7 +4,7 @@ Python 3 FAQ What Python 3 versions are supported? ------------------------------------- -PyMongo supports CPython 3.9+ and PyPy3.9+. +PyMongo supports CPython 3.9+ and PyPy3.10+. Are there any PyMongo behavior changes with Python 3? ----------------------------------------------------- diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index db232386ee..744a170be2 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -237,10 +237,7 @@ def test_getattr(self): def test_iteration(self): client = self.client - if "PyPy" in sys.version and sys.version_info < (3, 8, 15): - msg = "'NoneType' object is not callable" - else: - msg = "'AsyncMongoClient' object is not iterable" + msg = "'AsyncMongoClient' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in client: # type: ignore[misc] # error: "None" not callable [misc] diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 528919f63c..beb58012a8 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -133,13 +133,7 @@ def test_getattr(self): def test_iteration(self): coll = self.db.coll - if "PyPy" in sys.version and sys.version_info < (3, 8, 15): - msg = "'NoneType' object is not callable" - else: - if _IS_SYNC: - msg = "'Collection' object is not iterable" - else: - msg = "'AsyncCollection' object is not iterable" + msg = "'AsyncCollection' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index b5a5960420..55a8cc3ab2 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -103,10 +103,7 @@ def test_getattr(self): def test_iteration(self): db = self.client.pymongo_test - if "PyPy" in sys.version and sys.version_info < (3, 8, 15): - msg = "'NoneType' object is not callable" - else: - msg = "'AsyncDatabase' object is not iterable" + msg = "'AsyncDatabase' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in db: # type: ignore[misc] # error: "None" not callable [misc] diff --git a/test/test_client.py b/test/test_client.py index 5ec425f312..2a33077f5f 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -234,10 +234,7 @@ def test_getattr(self): def test_iteration(self): client = self.client - if "PyPy" in sys.version and sys.version_info < (3, 8, 15): - msg = "'NoneType' object is not callable" - else: - msg = "'MongoClient' object is not iterable" + msg = "'MongoClient' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in client: # type: ignore[misc] # error: "None" not callable [misc] diff --git a/test/test_collection.py b/test/test_collection.py index af524bba47..8a862646eb 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -133,13 +133,7 @@ def test_getattr(self): def test_iteration(self): coll = self.db.coll - if "PyPy" in sys.version and sys.version_info < (3, 8, 15): - msg = "'NoneType' object is not callable" - else: - if _IS_SYNC: - msg = "'Collection' object is not iterable" - else: - msg = "'Collection' object is not iterable" + msg = "'Collection' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] diff --git a/test/test_database.py b/test/test_database.py index 5e854c941d..aad9089bd8 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -102,10 +102,7 @@ def test_getattr(self): def test_iteration(self): db = self.client.pymongo_test - if "PyPy" in sys.version and sys.version_info < (3, 8, 15): - msg = "'NoneType' object is not callable" - else: - msg = "'Database' object is not iterable" + msg = "'Database' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in db: # type: ignore[misc] # error: "None" not callable [misc] diff --git a/test/test_errors.py b/test/test_errors.py index 2cee7c15d8..d6db6a4ec1 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -47,15 +47,9 @@ def test_operation_failure(self): self.assertIn("full error", traceback.format_exc()) def _test_unicode_strs(self, exc): - if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): - # PyPy used to display unicode in repr differently. - self.assertEqual( - "unicode \U0001f40d, full error: {'errmsg': 'unicode \\U0001f40d'}", str(exc) - ) - else: - self.assertEqual( - "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) - ) + self.assertEqual( + "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) + ) try: raise exc except Exception: From 7dba1e5dd9c8b483e327f295f8b0bf0cad5e3be8 Mon Sep 17 00:00:00 2001 From: Jib Date: Tue, 21 Jan 2025 14:37:52 -0500 Subject: [PATCH 1678/2111] PYTHON-5043: Fix list[int, float] typo in binary.py (#2066) --- bson/binary.py | 7 +++++-- test/test_typing.py | 21 ++++++++++++++++++++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index 6dc5058c2c..f90dce226c 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -405,14 +405,17 @@ def from_vector(cls: Type[Binary], vector: BinaryVector) -> Binary: @classmethod @overload def from_vector( - cls: Type[Binary], vector: list[int, float], dtype: BinaryVectorDtype, padding: int = 0 + cls: Type[Binary], + vector: Union[list[int], list[float]], + dtype: BinaryVectorDtype, + padding: int = 0, ) -> Binary: ... @classmethod def from_vector( cls: Type[Binary], - vector: Union[BinaryVector, list[int, float]], + vector: Union[BinaryVector, list[int], list[float]], dtype: Optional[BinaryVectorDtype] = None, padding: Optional[int] = None, ) -> Binary: diff --git a/test/test_typing.py b/test/test_typing.py index bfe4d032c1..65937020d2 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -15,6 +15,7 @@ """Test that each file in mypy_fails/ actually fails mypy, and test some sample client code that uses PyMongo typings. """ + from __future__ import annotations import os @@ -37,7 +38,8 @@ if TYPE_CHECKING: from typing_extensions import NotRequired, TypedDict - from bson import ObjectId + from bson import Binary, ObjectId + from bson.binary import BinaryVector, BinaryVectorDtype class Movie(TypedDict): name: str @@ -591,5 +593,22 @@ def test_son_document_type(self) -> None: obj["a"] = 1 +class TestBSONFromVectorType(unittest.TestCase): + @only_type_check + def test_from_vector_binaryvector(self): + list_vector = BinaryVector([127, 7], BinaryVectorDtype.INT8) + Binary.from_vector(list_vector) + + @only_type_check + def test_from_vector_list_int(self): + list_vector = [127, 7] + Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + + @only_type_check + def test_from_vector_list_float(self): + list_vector = [127.0, 7.0] + Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + + if __name__ == "__main__": unittest.main() From 2235b8354cef0acc0b41321fc103d14acf0ef92f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 21 Jan 2025 16:22:14 -0600 Subject: [PATCH 1679/2111] PYTHON-5050 Clean up handling of installed dependencies across deployment targets (#2071) --- .evergreen/install-dependencies.sh | 45 ---------------------- .evergreen/run-azurekms-test.sh | 13 +++++-- .evergreen/run-gcpkms-test.sh | 11 ++++-- .evergreen/scripts/install-dependencies.sh | 42 ++++++++++++++++++-- .evergreen/scripts/prepare-resources.sh | 23 ++++++++--- .evergreen/scripts/setup-dev-env.sh | 2 + 6 files changed, 75 insertions(+), 61 deletions(-) delete mode 100755 .evergreen/install-dependencies.sh diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh deleted file mode 100755 index 8773fa2c6d..0000000000 --- a/.evergreen/install-dependencies.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -eu - -# Copy PyMongo's test certificates over driver-evergreen-tools' -cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ - -# Replace MongoOrchestration's client certificate. -cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem - -if [ -w /etc/hosts ]; then - SUDO="" -else - SUDO="sudo" -fi - -# Install just. -# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. -if [ "${CI:-}" == "true" ]; then - BIN_DIR=$DRIVERS_TOOLS_BINARIES -else - BIN_DIR=$HOME/.local/bin -fi -if [ ! -f $BIN_DIR/just ]; then - if [ "Windows_NT" = "${OS:-}" ]; then - TARGET="--target x86_64-pc-windows-msvc" - else - TARGET="" - fi - curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $TARGET --to "$BIN_DIR" || { - # CARGO_HOME is defined in configure-env.sh - export CARGO_HOME=${CARGO_HOME:-$HOME/.cargo/} - export RUSTUP_HOME="${CARGO_HOME}/.rustup" - . ${DRIVERS_TOOLS}/.evergreen/install-rust.sh - cargo install just - if [ "Windows_NT" = "${OS:-}" ]; then - mv $CARGO_HOME/just.exe $BIN_DIR/just - else - mv $CARGO_HOME/just $BIN_DIR - fi - } -fi - -# Add 'server' and 'hostname_not_in_cert' as a hostnames -echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts -echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh index d5c332fa8d..28a84a52e2 100755 --- a/.evergreen/run-azurekms-test.sh +++ b/.evergreen/run-azurekms-test.sh @@ -8,17 +8,22 @@ export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz SKIP_SERVERS=1 bash $HERE/setup-encryption.sh -tar czf /tmp/mongo-python-driver.tgz . +# Set up the remote files to test. +git add . +git commit -m "add files" || true +git archive -o /tmp/mongo-python-driver.tar HEAD +tar -rf /tmp/mongo-python-driver.tar libmongocrypt +gzip -f /tmp/mongo-python-driver.tar # shellcheck disable=SC2088 -AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" AZUREKMS_DST="~/" \ +AZUREKMS_SRC="/tmp/mongo-python-driver.tar.gz" AZUREKMS_DST="~/" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh echo "Copying files ... end" echo "Untarring file ... begin" -AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ +AZUREKMS_CMD="tar xf mongo-python-driver.tar.gz" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" SUCCESS=true TEST_FLE_AZURE_AUTO=1 ./.evergreen/just.sh test-eg" \ +AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" SUCCESS=true TEST_FLE_AZURE_AUTO=1 bash ./.evergreen/just.sh test-eg" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Running test ... end" bash $HERE/teardown-encryption.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh index 4c953584b2..37ec2bfe56 100755 --- a/.evergreen/run-gcpkms-test.sh +++ b/.evergreen/run-gcpkms-test.sh @@ -10,11 +10,16 @@ export GCPKMS_ZONE=${GCPKMS_ZONE} export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz SKIP_SERVERS=1 bash $HERE/setup-encryption.sh -tar czf /tmp/mongo-python-driver.tgz . -GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh +# Set up the remote files to test. +git add . +git commit -m "add files" || true +git archive -o /tmp/mongo-python-driver.tar HEAD +tar -rf /tmp/mongo-python-driver.tar libmongocrypt +gzip -f /tmp/mongo-python-driver.tar +GCPKMS_SRC=/tmp/mongo-python-driver.tar.gz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh echo "Copying files ... end" echo "Untarring file ... begin" -GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="tar xf mongo-python-driver.tar.gz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 ./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index bbbfc745ec..2b127889aa 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -1,6 +1,42 @@ #!/bin/bash set -eu -file="$PROJECT_DIRECTORY/.evergreen/install-dependencies.sh" -# Don't use ${file} syntax here because evergreen treats it as an empty expansion. -[ -f "$file" ] && bash "$file" || echo "$file not available, skipping" + +# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. +if [ "${CI:-}" == "true" ]; then + _BIN_DIR=${DRIVERS_TOOLS_BINARIES:-} +else + _BIN_DIR=$HOME/.local/bin +fi + + +# Helper function to pip install a dependency using a temporary python env. +function _pip_install() { + _HERE=$(dirname ${BASH_SOURCE:-$0}) + . $_HERE/../utils.sh + _VENV_PATH=$(mktemp -d) + echo "Installing $2 using pip..." + createvirtualenv "$(find_python3)" $_VENV_PATH + python -m pip install $1 + ln -s "$(which $2)" $_BIN_DIR/$2 + echo "Installing $2 using pip... done." +} + + +# Ensure just is installed. +if ! command -v just 2>/dev/null; then + # On most systems we can install directly. + _TARGET="" + if [ "Windows_NT" = "${OS:-}" ]; then + _TARGET="--target x86_64-pc-windows-msvc" + fi + echo "Installing just..." + mkdir -p "$_BIN_DIR" 2>/dev/null || true + curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { + _pip_install rust-just just + } + if ! command -v just 2>/dev/null; then + export PATH="$PATH:$_BIN_DIR" + fi + echo "Installing just... done." +fi diff --git a/.evergreen/scripts/prepare-resources.sh b/.evergreen/scripts/prepare-resources.sh index 3cfa2c4efd..da869e7055 100755 --- a/.evergreen/scripts/prepare-resources.sh +++ b/.evergreen/scripts/prepare-resources.sh @@ -6,12 +6,23 @@ pushd $HERE . env.sh rm -rf $DRIVERS_TOOLS -if [ "$PROJECT" = "drivers-tools" ]; then - # If this was a patch build, doing a fresh clone would not actually test the patch - cp -R $PROJECT_DIRECTORY/ $DRIVERS_TOOLS -else - git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS -fi +git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" >$MONGO_ORCHESTRATION_HOME/orchestration.config popd + +# Copy PyMongo's test certificates over driver-evergreen-tools' +cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ + +# Replace MongoOrchestration's client certificate. +cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem + +if [ -w /etc/hosts ]; then + SUDO="" +else + SUDO="sudo" +fi + +# Add 'server' and 'hostname_not_in_cert' as a hostnames +echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts +echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 7042871942..bfe0bc5b9a 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -17,6 +17,8 @@ else BIN_DIR=.venv/bin fi +. $HERE/install-dependencies.sh + # Ensure there is a python venv. if [ ! -d $BIN_DIR ]; then . .evergreen/utils.sh From f1af9178946c51f8f200cf6960f37b610c294158 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 22 Jan 2025 08:49:16 -0500 Subject: [PATCH 1680/2111] =?UTF-8?q?PYTHON-5044=20-=20Fix=20successive=20?= =?UTF-8?q?AsyncMongoClients=20on=20a=20single=20loop=20always=20ti?= =?UTF-8?q?=E2=80=A6=20(#2065)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/network_layer.py | 31 ++++++++++++------- pymongo/periodic_executor.py | 9 +----- ...nnections_survive_primary_stepdown_spec.py | 1 - ...nnections_survive_primary_stepdown_spec.py | 1 - 4 files changed, 20 insertions(+), 22 deletions(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index c1db31f89c..11c66bf16e 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -267,18 +267,25 @@ async def async_receive_data( else: read_task = create_task(_async_receive(sock, length, loop)) # type: ignore[arg-type] tasks = [read_task, cancellation_task] - done, pending = await asyncio.wait( - tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED - ) - for task in pending: - task.cancel() - if pending: - await asyncio.wait(pending) - if len(done) == 0: - raise socket.timeout("timed out") - if read_task in done: - return read_task.result() - raise _OperationCancelled("operation cancelled") + try: + done, pending = await asyncio.wait( + tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED + ) + for task in pending: + task.cancel() + if pending: + await asyncio.wait(pending) + if len(done) == 0: + raise socket.timeout("timed out") + if read_task in done: + return read_task.result() + raise _OperationCancelled("operation cancelled") + except asyncio.CancelledError: + for task in tasks: + task.cancel() + await asyncio.wait(tasks) + raise + finally: sock.settimeout(sock_timeout) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 2f89b91deb..9b10f6e7e3 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -78,14 +78,7 @@ def close(self, dummy: Any = None) -> None: async def join(self, timeout: Optional[int] = None) -> None: if self._task is not None: - try: - await asyncio.wait_for(self._task, timeout=timeout) # type-ignore: [arg-type] - except asyncio.TimeoutError: - # Task timed out - pass - except asyncio.exceptions.CancelledError: - # Task was already finished, or not yet started. - raise + await asyncio.wait([self._task], timeout=timeout) # type-ignore: [arg-type] def wake(self) -> None: """Execute the target function soon.""" diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py index 4795d3937a..7c11742a90 100644 --- a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -22,7 +22,6 @@ from test.asynchronous import ( AsyncIntegrationTest, async_client_context, - reset_client_context, unittest, ) from test.asynchronous.helpers import async_repl_set_step_down diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 1fb08cbed5..9cac633301 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -22,7 +22,6 @@ from test import ( IntegrationTest, client_context, - reset_client_context, unittest, ) from test.helpers import repl_set_step_down From cfe7784db952ccad09ab6d3afd9629fca792a85a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 22 Jan 2025 08:48:17 -0600 Subject: [PATCH 1681/2111] PYTHON-4976 Replace hatch with uv as our python environment and workfow tool (#2068) --- .evergreen/run-tests.sh | 74 +- .evergreen/scripts/configure-env.sh | 10 +- .evergreen/scripts/generate_config.py | 2 +- .evergreen/scripts/install-dependencies.sh | 13 + .evergreen/scripts/setup-dev-env.sh | 64 +- .evergreen/teardown-encryption.sh | 2 +- .gitignore | 3 +- CONTRIBUTING.md | 6 +- README.md | 17 +- doc/index.rst | 9 +- hatch.toml | 39 - justfile | 44 +- pymongo/compression_support.py | 2 +- pyproject.toml | 24 + requirements/typing.txt | 7 - strict_pyrightconfig.json | 1 + uv.lock | 2092 ++++++++++++++++++++ 17 files changed, 2234 insertions(+), 175 deletions(-) delete mode 100644 hatch.toml delete mode 100644 requirements/typing.txt create mode 100644 strict_pyrightconfig.json create mode 100644 uv.lock diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 95fe10a6c3..d647955059 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -37,8 +37,7 @@ export PIP_QUIET=1 # Quiet by default export PIP_PREFER_BINARY=1 # Prefer binary dists by default set +x -python -c "import sys; sys.exit(sys.prefix == sys.base_prefix)" || (echo "Not inside a virtual env!"; exit 1) -PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") +PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") # Try to source local Drivers Secrets if [ -f ./secrets-export.sh ]; then @@ -48,9 +47,13 @@ else echo "Not sourcing secrets" fi -# Ensure C extensions have compiled. +# Start compiling the args we'll pass to uv. +# Run in an isolated environment so as not to pollute the base venv. +UV_ARGS=("--isolated --extra test") + +# Ensure C extensions if applicable. if [ -z "${NO_EXT:-}" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - python tools/fail_if_no_c.py + uv run tools/fail_if_no_c.py fi if [ "$AUTH" != "noauth" ]; then @@ -77,7 +80,7 @@ if [ "$AUTH" != "noauth" ]; then fi if [ -n "$TEST_ENTERPRISE_AUTH" ]; then - python -m pip install '.[gssapi]' + UV_ARGS+=("--extra gssapi") if [ "Windows_NT" = "$OS" ]; then echo "Setting GSSAPI_PASS" export GSSAPI_PASS=${SASL_PASS} @@ -118,24 +121,26 @@ if [ "$SSL" != "nossl" ]; then fi if [ "$COMPRESSORS" = "snappy" ]; then - python -m pip install '.[snappy]' + UV_ARGS+=("--extra snappy") elif [ "$COMPRESSORS" = "zstd" ]; then - python -m pip install zstandard + UV_ARGS+=("--extra zstandard") fi # PyOpenSSL test setup. if [ -n "$TEST_PYOPENSSL" ]; then - python -m pip install '.[ocsp]' + UV_ARGS+=("--extra ocsp") fi if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then - # Check for libmongocrypt checkout. + # Check for libmongocrypt download. if [ ! -d "libmongocrypt" ]; then echo "Run encryption setup first!" exit 1 fi - python -m pip install '.[encryption]' + UV_ARGS+=("--extra encryption") + # TODO: Test with 'pip install pymongocrypt' + UV_ARGS+=("--group pymongocrypt_source") # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE=$(pwd)/libmongocrypt/nocrypto @@ -155,21 +160,17 @@ if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE exit 1 fi export PYMONGOCRYPT_LIB - - # TODO: Test with 'pip install pymongocrypt' - if [ ! -d "libmongocrypt_git" ]; then - git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git - fi - python -m pip install -U setuptools - python -m pip install ./libmongocrypt_git/bindings/python - python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" - python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" - # PATH is updated by PREPARE_SHELL for access to mongocryptd. + # Ensure pymongocrypt is working properly. + # shellcheck disable=SC2048 + uv run ${UV_ARGS[*]} python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" + # shellcheck disable=SC2048 + uv run ${UV_ARGS[*]} python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" + # PATH is updated by configure-env.sh for access to mongocryptd. fi if [ -n "$TEST_ENCRYPTION" ]; then if [ -n "$TEST_ENCRYPTION_PYOPENSSL" ]; then - python -m pip install '.[ocsp]' + UV_ARGS+=("--extra ocsp") fi if [ -n "$TEST_CRYPT_SHARED" ]; then @@ -214,22 +215,22 @@ if [ -n "$TEST_ATLAS" ]; then fi if [ -n "$TEST_OCSP" ]; then - python -m pip install ".[ocsp]" + UV_ARGS+=("--extra ocsp") TEST_SUITES="ocsp" fi if [ -n "$TEST_AUTH_AWS" ]; then - python -m pip install ".[aws]" + UV_ARGS+=("--extra aws") TEST_SUITES="auth_aws" fi if [ -n "$TEST_AUTH_OIDC" ]; then - python -m pip install ".[aws]" + UV_ARGS+=("--extra aws") TEST_SUITES="auth_oidc" fi if [ -n "$PERF_TEST" ]; then - python -m pip install simplejson + UV_ARGS+=("--group perf") start_time=$(date +%s) TEST_SUITES="perf" # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively @@ -237,8 +238,8 @@ if [ -n "$PERF_TEST" ]; then TEST_ARGS="test/performance/perf_test.py $TEST_ARGS" fi -echo "Running $AUTH tests over $SSL with python $(which python)" -python -c 'import sys; print(sys.version)' +echo "Running $AUTH tests over $SSL with python $(uv python find)" +uv run python -c 'import sys; print(sys.version)' # Run the tests, and store the results in Evergreen compatible XUnit XML @@ -249,27 +250,30 @@ python -c 'import sys; print(sys.version)' if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then # Keep in sync with combine-coverage.sh. # coverage >=5 is needed for relative_files=true. - python -m pip install pytest-cov "coverage>=5,<=7.5" + UV_ARGS+=("--group coverage") TEST_ARGS="$TEST_ARGS --cov" fi if [ -n "$GREEN_FRAMEWORK" ]; then - python -m pip install $GREEN_FRAMEWORK + UV_ARGS+=("--group $GREEN_FRAMEWORK") fi # Show the installed packages -PIP_QUIET=0 python -m pip list +# shellcheck disable=SC2048 +PIP_QUIET=0 uv run ${UV_ARGS[*]} --with pip pip list if [ -z "$GREEN_FRAMEWORK" ]; then # Use --capture=tee-sys so pytest prints test output inline: # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html - if [ -z "$TEST_SUITES" ]; then - python -m pytest -v --capture=tee-sys --durations=5 $TEST_ARGS - else - python -m pytest -v --capture=tee-sys --durations=5 -m $TEST_SUITES $TEST_ARGS + PYTEST_ARGS="-v --capture=tee-sys --durations=5 $TEST_ARGS" + if [ -n "$TEST_SUITES" ]; then + PYTEST_ARGS="-m $TEST_SUITES $PYTEST_ARGS" fi + # shellcheck disable=SC2048 + uv run ${UV_ARGS[*]} pytest $PYTEST_ARGS else - python green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS + # shellcheck disable=SC2048 + uv run ${UV_ARGS[*]} green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS fi # Handle perf test post actions. diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index ebbffcf1db..cb018d09f0 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -14,14 +14,16 @@ fi PROJECT_DIRECTORY="$(pwd)" DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} -HATCH_CONFIG=$PROJECT_DIRECTORY/hatch_config.toml +UV_TOOL_DIR=$PROJECT_DIRECTORY/.local/uv/tools +UV_CACHE_DIR=$PROJECT_DIRECTORY/.local/uv/cache # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) CARGO_HOME=$(cygpath -m $CARGO_HOME) - HATCH_CONFIG=$(cygpath -m "$HATCH_CONFIG") + UV_TOOL_DIR=$(cygpath -m "$UV_TOOL_DIR") + UV_CACHE_DIR=$(cygpath -m "$UV_CACHE_DIR") fi SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" @@ -62,7 +64,9 @@ export skip_ECS_auth_test="${skip_ECS_auth_test:-}" export CARGO_HOME="$CARGO_HOME" export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" -export HATCH_CONFIG="$HATCH_CONFIG" +export UV_TOOL_DIR="$UV_TOOL_DIR" +export UV_CACHE_DIR="$UV_CACHE_DIR" +export UV_TOOL_BIN_DIR="$DRIVERS_TOOLS_BINARIES" export PATH="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PATH" # shellcheck disable=SC2154 export PROJECT="${project:-mongo-python-driver}" diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 2917e882d8..e9624ab109 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -6,7 +6,7 @@ # ] # /// -# Note: Run this file with `hatch run`, `pipx run`, or `uv run`. +# Note: Run this file with `pipx run`, or `uv run`. from __future__ import annotations import sys diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 2b127889aa..39b77199bb 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -40,3 +40,16 @@ if ! command -v just 2>/dev/null; then fi echo "Installing just... done." fi + +# Install uv. +if ! command -v uv 2>/dev/null; then + echo "Installing uv..." + # On most systems we can install directly. + curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { + _pip_install uv uv + } + if ! command -v uv 2>/dev/null; then + export PATH="$PATH:$_BIN_DIR" + fi + echo "Installing uv... done." +fi diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index bfe0bc5b9a..3f8d0c4292 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -6,10 +6,14 @@ HERE=$(dirname ${BASH_SOURCE:-$0}) pushd "$(dirname "$(dirname $HERE)")" > /dev/null # Source the env file to pick up common variables. -if [ -f $HERE/scripts/env.sh ]; then - source $HERE/scripts/env.sh +if [ -f $HERE/env.sh ]; then + source $HERE/env.sh fi +# Ensure dependencies are installed. +. $HERE/install-dependencies.sh + + # Set the location of the python bin dir. if [ "Windows_NT" = "${OS:-}" ]; then BIN_DIR=.venv/Scripts @@ -17,8 +21,6 @@ else BIN_DIR=.venv/bin fi -. $HERE/install-dependencies.sh - # Ensure there is a python venv. if [ ! -d $BIN_DIR ]; then . .evergreen/utils.sh @@ -26,49 +28,15 @@ if [ ! -d $BIN_DIR ]; then if [ -z "${PYTHON_BINARY:-}" ]; then PYTHON_BINARY=$(find_python3) fi - - echo "Creating virtual environment..." - createvirtualenv "$PYTHON_BINARY" .venv - echo "Creating virtual environment... done." -fi - -# Activate the virtual env. -. $BIN_DIR/activate - -# Ensure there is a local hatch. -if [ ! -f $BIN_DIR/hatch ]; then - echo "Installing hatch..." - python -m pip install hatch || { - # CARGO_HOME is defined in configure-env.sh - export CARGO_HOME=${CARGO_HOME:-$HOME/.cargo/} - export RUSTUP_HOME="${CARGO_HOME}/.rustup" - ${DRIVERS_TOOLS}/.evergreen/install-rust.sh - source "${CARGO_HOME}/env" - python -m pip install hatch - } - echo "Installing hatch... done." -fi - -# Ensure hatch does not write to user or global locations. -HATCH_CONFIG=${HATCH_CONFIG:-hatch_config.toml} -if [ ! -f ${HATCH_CONFIG} ]; then - touch hatch_config.toml - hatch config restore - hatch config set dirs.data "$(pwd)/.hatch/data" - hatch config set dirs.cache "$(pwd)/.hatch/cache" + export UV_PYTHON=${PYTHON_BINARY} + echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh fi - -# Ensure there is a local pre-commit if there is a git checkout. -if [ -d .git ]; then - if [ ! -f $BIN_DIR/pre-commit ]; then - python -m pip install pre-commit - fi - - # Ensure the pre-commit hook is installed. - if [ ! -f .git/hooks/pre-commit ]; then - pre-commit install - fi +echo "Using python $UV_PYTHON" +uv sync +uv run --with pip pip install -e . +echo "Setting up python environment... done." + +# Ensure there is a pre-commit hook if there is a git checkout. +if [ -d .git ] && [ ! -f .git/hooks/pre-commit ]; then + uv run pre-commit install fi - -# Install pymongo and its test deps. -python -m pip install ".[test]" diff --git a/.evergreen/teardown-encryption.sh b/.evergreen/teardown-encryption.sh index 88dc16bba8..5ce2f1d71b 100755 --- a/.evergreen/teardown-encryption.sh +++ b/.evergreen/teardown-encryption.sh @@ -7,4 +7,4 @@ if [ -z "${DRIVERS_TOOLS}" ]; then fi bash ${DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh -rm -rf libmongocrypt/ libmongocrypt_git/ libmongocrypt.tar.gz mongocryptd.pid +rm -rf libmongocrypt/ libmongocrypt.tar.gz mongocryptd.pid diff --git a/.gitignore b/.gitignore index 01f896d316..2582c517fd 100644 --- a/.gitignore +++ b/.gitignore @@ -22,10 +22,9 @@ venv/ secrets-export.sh libmongocrypt.tar.gz libmongocrypt/ -libmongocrypt_git/ -hatch_config.toml .venv expansion.yml +*expansions.yml .evergreen/scripts/env.sh # Lambda temp files diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5a46151760..536110fcfc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,7 @@ be of interest or that has already been addressed. ## Supported Interpreters -PyMongo supports CPython 3.9+ and PyPy3.9+. Language features not +PyMongo supports CPython 3.9+ and PyPy3.10+. Language features not supported by all interpreters can not be used. ## Style Guide @@ -28,7 +28,7 @@ including 4 space indents and 79 character line limits. - Avoid backward breaking changes if at all possible. - Write inline documentation for new classes and methods. -- We use [hatch](https://hatch.pypa.io/dev/) for python environment management and packaging. +- We use [uv](https://docs.astral.sh/uv/) for python environment management and packaging. - We use [just](https://just.systems/man/en/) as our task runner. - Write tests and make sure they pass (make sure you have a mongod running on the default port, then execute `just test` from the cmd @@ -194,7 +194,7 @@ the pages will re-render and the browser will automatically refresh. ## Running Tests Locally - Ensure you have started the appropriate Mongo Server(s). -- Run `just install` to set up `hatch` in a local virtual environment, or you can manually +- Run `just install` to set a local virtual environment, or you can manually create a virtual environment and run `pytest` directly. If you want to use a specific version of Python, remove the `.venv` folder and set `PYTHON_BINARY` before running `just install`. - Run `just test` or `pytest` to run all of the tests. diff --git a/README.md b/README.md index b8e0078101..962d0d958c 100644 --- a/README.md +++ b/README.md @@ -152,11 +152,6 @@ command: python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" ``` -Additional dependencies are: - -- (to generate documentation or run tests) - [hatch](https://hatch.pypa.io/dev/) - ## Examples Here's a basic example (for more see the *examples* section of the @@ -201,8 +196,7 @@ ObjectId('4aba160ee23f6b543e000002') Documentation is available at [pymongo.readthedocs.io](https://pymongo.readthedocs.io/en/stable/). -Documentation can be generated by running **pip install hatch; hatch run doc:build**. Generated -documentation can be found in the `doc/build/html/` directory. +See the [contributing guide](./CONTRIBUTING.md#documentation) for how to build the documentation. ## Learning Resources @@ -213,10 +207,11 @@ Center](https://www.mongodb.com/developer/languages/python/). ## Testing -The easiest way to run the tests is to run *hatch run test:test** in the root -of the distribution. For example, +The easiest way to run the tests is to run the following from the repository root. ```bash -pip install hatch -hatch run test:test +pip install -e ".[test]" +pytest ``` + +For more advanced testing scenarios, see the [contributing guide](./CONTRIBUTING.md#running-tests-locally). diff --git a/doc/index.rst b/doc/index.rst index 0ac8bdec6e..079738314a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -106,13 +106,8 @@ About This Documentation This documentation is generated using the `Sphinx `_ documentation generator. The source files for the documentation are located in the *doc/* directory of the -**PyMongo** distribution. To generate the docs locally run the -following command from the root directory of the **PyMongo** source: - -.. code-block:: bash - - $ pip install hatch - $ hatch run doc:build +**PyMongo** distribution. See the PyMongo `contributing guide `_ +for instructions on the building the docs from source. Indices and tables ------------------ diff --git a/hatch.toml b/hatch.toml deleted file mode 100644 index 15d0f25f07..0000000000 --- a/hatch.toml +++ /dev/null @@ -1,39 +0,0 @@ -# See https://hatch.pypa.io/dev/config/environment/overview/ - -[envs.doc] -features = ["docs"] -[envs.doc.scripts] -build = "sphinx-build -W -b html doc ./doc/_build/html" -serve = "sphinx-autobuild -W -b html doc --watch ./pymongo --watch ./bson --watch ./gridfs ./doc/_build/serve" -linkcheck = "sphinx-build -E -b linkcheck doc ./doc/_build/linkcheck" - -[envs.doctest] -features = ["docs","test"] -[envs.doctest.scripts] -test = "sphinx-build -E -b doctest doc ./doc/_build/doctest" - -[envs.typing] -pre-install-commands = [ - "pip install -q -r requirements/typing.txt", -] -[envs.typing.scripts] -check-mypy = [ - "mypy --install-types --non-interactive bson gridfs tools pymongo", - "mypy --install-types --non-interactive --config-file mypy_test.ini test", - "mypy --install-types --non-interactive test/test_typing.py test/test_typing_strict.py" -] -check-pyright = ["rm -f pyrightconfig.json", "pyright test/test_typing.py test/test_typing_strict.py"] -check-strict-pyright = [ - "echo '{{\"strict\": [\"tests/test_typing_strict.py\"]}}' > pyrightconfig.json", - "pyright test/test_typing_strict.py", - "rm -f pyrightconfig.json" -] -check = ["check-mypy", "check-pyright", "check-strict-pyright"] - -[envs.test] -features = ["test"] -[envs.test.scripts] -test = "pytest -v --durations=5 --maxfail=10 {args}" -test-eg = "bash ./.evergreen/run-tests.sh {args}" -test-async = "pytest -v --durations=5 --maxfail=10 -m default_async {args}" -test-mockupdb = ["pip install -U git+https://github.com/mongodb-labs/mongo-mockup-db@master", "test -m mockupdb"] diff --git a/justfile b/justfile index 23f0993c6b..6bcfe0c79c 100644 --- a/justfile +++ b/justfile @@ -3,10 +3,12 @@ set shell := ["bash", "-c"] set dotenv-load set dotenv-filename := "./.evergreen/scripts/env.sh" -# Handle cross-platform paths to local python cli tools. -python_bin_dir := if os_family() == "windows" { "./.venv/Scripts" } else { "./.venv/bin" } -hatch_bin := python_bin_dir + "/hatch" -pre_commit_bin := python_bin_dir + "/pre-commit" +# Commonly used command segments. +uv_run := "uv run --isolated " +typing_run := uv_run + "--group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" +docs_run := uv_run + "--extra docs" +doc_build := "./doc/_build" +mypy_args := "--install-types --non-interactive" # Make the default recipe private so it doesn't show up in the list. [private] @@ -18,47 +20,55 @@ install: [group('docs')] docs: - {{hatch_bin}} run doc:build + {{docs_run}} sphinx-build -W -b html doc {{doc_build}}/html [group('docs')] docs-serve: - {{hatch_bin}} run doc:serve + {{docs_run}} sphinx-autobuild -W -b html doc --watch ./pymongo --watch ./bson --watch ./gridfs {{doc_build}}/serve [group('docs')] docs-linkcheck: - {{hatch_bin}} run doc:linkcheck + {{docs_run}} sphinx-build -E -b linkcheck doc {{doc_build}}/linkcheck [group('docs')] docs-test: - {{hatch_bin}} run doctest:test + {{docs_run}} --extra test sphinx-build -E -b doctest doc {{doc_build}}/doctest [group('typing')] typing: - {{hatch_bin}} run typing:check + just typing-mypy + just typing-pyright [group('typing')] typing-mypy: - {{hatch_bin}} run typing:mypy + {{typing_run}} mypy {{mypy_args}} bson gridfs tools pymongo + {{typing_run}} mypy {{mypy_args}} --config-file mypy_test.ini test + {{typing_run}} mypy {{mypy_args}} test/test_typing.py test/test_typing_strict.py + +[group('typing')] +typing-pyright: + {{typing_run}} pyright test/test_typing.py test/test_typing_strict.py + {{typing_run}} pyright -p strict_pyrightconfig.json test/test_typing_strict.py [group('lint')] lint: - {{pre_commit_bin}} run --all-files + {{uv_run}} pre-commit run --all-files [group('lint')] lint-manual: - {{pre_commit_bin}} run --all-files --hook-stage manual + {{uv_run}} pre-commit run --all-files --hook-stage manual [group('test')] -test *args: - {{hatch_bin}} run test:test {{args}} +test *args="-v --durations=5 --maxfail=10": + {{uv_run}} --extra test pytest {{args}} [group('test')] -test-mockupdb: - {{hatch_bin}} run test:test-mockupdb +test-mockupdb *args: + {{uv_run}} -v --extra test --group mockupdb pytest -m mockupdb {{args}} [group('test')] test-eg *args: - {{hatch_bin}} run test:test-eg {{args}} + bash ./.evergreen/run-tests.sh {{args}} [group('encryption')] setup-encryption: diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index c71e4bddcf..f49b56cc96 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -26,7 +26,7 @@ def _have_snappy() -> bool: try: - import snappy # type:ignore[import-not-found] # noqa: F401 + import snappy # type:ignore[import-untyped] # noqa: F401 return True except ImportError: diff --git a/pyproject.toml b/pyproject.toml index a9977a382c..69249ee4c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,30 @@ Documentation = "https://www.mongodb.com/docs/languages/python/pymongo-driver/cu Source = "https://github.com/mongodb/mongo-python-driver" Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" +[dependency-groups] +dev = [ + "pre-commit>=4.0" +] +gevent = ["gevent"] +eventlet = ["eventlet"] +coverage = [ + "pytest-cov", + "coverage>=5,<=7.5" +] +mockupdb = [ + "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" +] +pymongocrypt_source = [ + "pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" +] +perf = ["simplejson"] +typing = [ + "mypy==1.14.1", + "pyright==1.1.392.post0", + "typing_extensions", + "pip" +] + # Used to call hatch_build.py [tool.hatch.build.hooks.custom] diff --git a/requirements/typing.txt b/requirements/typing.txt deleted file mode 100644 index b0f0c9c7fc..0000000000 --- a/requirements/typing.txt +++ /dev/null @@ -1,7 +0,0 @@ -mypy==1.14.1 -pyright==1.1.392.post0 -typing_extensions --r ./encryption.txt --r ./ocsp.txt --r ./zstd.txt --r ./aws.txt diff --git a/strict_pyrightconfig.json b/strict_pyrightconfig.json new file mode 100644 index 0000000000..9684598cd9 --- /dev/null +++ b/strict_pyrightconfig.json @@ -0,0 +1 @@ +{"strict": ["tests/test_typing_strict.py"]} \ No newline at end of file diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000000..e7f09f66fc --- /dev/null +++ b/uv.lock @@ -0,0 +1,2092 @@ +version = 1 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version >= '3.10'", + "python_full_version < '3.10'", +] + +[[package]] +name = "alabaster" +version = "0.7.16" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511 }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929 }, +] + +[[package]] +name = "anyio" +version = "4.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 }, +] + +[[package]] +name = "attrs" +version = "24.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397 }, +] + +[[package]] +name = "babel" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/ca/824b1195773ce6166d388573fc106ce56d4a805bd7427b624e063596ec58/beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", size = 581181 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, +] + +[[package]] +name = "boto3" +version = "1.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/e9/c0b2fa75efc4007ea1af21bc2fcbedf6e545c517fb90904d7f59850e02bf/boto3-1.36.2.tar.gz", hash = "sha256:fde1c29996b77274a60b7bc9f741525afa6267bb1716eb644a764fb7c124a0d2", size = 110998 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/c2/72a92794237b43f64141e156bc3a58bc36d18631f1a614e1e97a48b56447/boto3-1.36.2-py3-none-any.whl", hash = "sha256:76cfc9a705be46e8d22607efacc8d688c064f923d785a01c00b28e9a96425d1a", size = 139166 }, +] + +[[package]] +name = "botocore" +version = "1.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "urllib3", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/93/353b70cea6447e37789fc2d6f761fc12ae36fb4adb6f558055de8cdf655f/botocore-1.36.2.tar.gz", hash = "sha256:a1fe6610983f0214b0c7655fe6990b6a731746baf305b182976fc7b568fc3cb0", size = 13505440 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/fe/c066e8cb069027c12dbcf9066a7a4f3e9d2a31b10c7b174a8455ef1d0f46/botocore-1.36.2-py3-none-any.whl", hash = "sha256:bc3b7e3b573a48af2bd7116b80fe24f9a335b0b67314dcb2697a327d009abf29", size = 13302324 }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191 }, + { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592 }, + { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024 }, + { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188 }, + { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571 }, + { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687 }, + { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211 }, + { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325 }, + { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784 }, + { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564 }, + { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804 }, + { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299 }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264 }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651 }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259 }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200 }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235 }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721 }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242 }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999 }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242 }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604 }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, + { url = "https://files.pythonhosted.org/packages/b9/ea/8bb50596b8ffbc49ddd7a1ad305035daa770202a6b782fc164647c2673ad/cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", size = 182220 }, + { url = "https://files.pythonhosted.org/packages/ae/11/e77c8cd24f58285a82c23af484cf5b124a376b32644e445960d1a4654c3a/cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", size = 178605 }, + { url = "https://files.pythonhosted.org/packages/ed/65/25a8dc32c53bf5b7b6c2686b42ae2ad58743f7ff644844af7cdb29b49361/cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", size = 424910 }, + { url = "https://files.pythonhosted.org/packages/42/7a/9d086fab7c66bd7c4d0f27c57a1b6b068ced810afc498cc8c49e0088661c/cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", size = 447200 }, + { url = "https://files.pythonhosted.org/packages/da/63/1785ced118ce92a993b0ec9e0d0ac8dc3e5dbfbcaa81135be56c69cabbb6/cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", size = 454565 }, + { url = "https://files.pythonhosted.org/packages/74/06/90b8a44abf3556599cdec107f7290277ae8901a58f75e6fe8f970cd72418/cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", size = 435635 }, + { url = "https://files.pythonhosted.org/packages/bd/62/a1f468e5708a70b1d86ead5bab5520861d9c7eacce4a885ded9faa7729c3/cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", size = 445218 }, + { url = "https://files.pythonhosted.org/packages/5b/95/b34462f3ccb09c2594aa782d90a90b045de4ff1f70148ee79c69d37a0a5a/cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", size = 460486 }, + { url = "https://files.pythonhosted.org/packages/fc/fc/a1e4bebd8d680febd29cf6c8a40067182b64f00c7d105f8f26b5bc54317b/cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", size = 437911 }, + { url = "https://files.pythonhosted.org/packages/e6/c3/21cab7a6154b6a5ea330ae80de386e7665254835b9e98ecc1340b3a7de9a/cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", size = 460632 }, + { url = "https://files.pythonhosted.org/packages/cb/b5/fd9f8b5a84010ca169ee49f4e4ad6f8c05f4e3545b72ee041dbbcb159882/cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", size = 171820 }, + { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290 }, +] + +[[package]] +name = "cfgv" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/58/5580c1716040bc89206c77d8f74418caf82ce519aae06450393ca73475d1/charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de", size = 198013 }, + { url = "https://files.pythonhosted.org/packages/d0/11/00341177ae71c6f5159a08168bcb98c6e6d196d372c94511f9f6c9afe0c6/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176", size = 141285 }, + { url = "https://files.pythonhosted.org/packages/01/09/11d684ea5819e5a8f5100fb0b38cf8d02b514746607934134d31233e02c8/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037", size = 151449 }, + { url = "https://files.pythonhosted.org/packages/08/06/9f5a12939db324d905dc1f70591ae7d7898d030d7662f0d426e2286f68c9/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f", size = 143892 }, + { url = "https://files.pythonhosted.org/packages/93/62/5e89cdfe04584cb7f4d36003ffa2936681b03ecc0754f8e969c2becb7e24/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a", size = 146123 }, + { url = "https://files.pythonhosted.org/packages/a9/ac/ab729a15c516da2ab70a05f8722ecfccc3f04ed7a18e45c75bbbaa347d61/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a", size = 147943 }, + { url = "https://files.pythonhosted.org/packages/03/d2/3f392f23f042615689456e9a274640c1d2e5dd1d52de36ab8f7955f8f050/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247", size = 142063 }, + { url = "https://files.pythonhosted.org/packages/f2/e3/e20aae5e1039a2cd9b08d9205f52142329f887f8cf70da3650326670bddf/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408", size = 150578 }, + { url = "https://files.pythonhosted.org/packages/8d/af/779ad72a4da0aed925e1139d458adc486e61076d7ecdcc09e610ea8678db/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb", size = 153629 }, + { url = "https://files.pythonhosted.org/packages/c2/b6/7aa450b278e7aa92cf7732140bfd8be21f5f29d5bf334ae987c945276639/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d", size = 150778 }, + { url = "https://files.pythonhosted.org/packages/39/f4/d9f4f712d0951dcbfd42920d3db81b00dd23b6ab520419626f4023334056/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807", size = 146453 }, + { url = "https://files.pythonhosted.org/packages/49/2b/999d0314e4ee0cff3cb83e6bc9aeddd397eeed693edb4facb901eb8fbb69/charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f", size = 95479 }, + { url = "https://files.pythonhosted.org/packages/2d/ce/3cbed41cff67e455a386fb5e5dd8906cdda2ed92fbc6297921f2e4419309/charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f", size = 102790 }, + { url = "https://files.pythonhosted.org/packages/72/80/41ef5d5a7935d2d3a773e3eaebf0a9350542f2cab4eac59a7a4741fbbbbe/charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125", size = 194995 }, + { url = "https://files.pythonhosted.org/packages/7a/28/0b9fefa7b8b080ec492110af6d88aa3dea91c464b17d53474b6e9ba5d2c5/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1", size = 139471 }, + { url = "https://files.pythonhosted.org/packages/71/64/d24ab1a997efb06402e3fc07317e94da358e2585165930d9d59ad45fcae2/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3", size = 149831 }, + { url = "https://files.pythonhosted.org/packages/37/ed/be39e5258e198655240db5e19e0b11379163ad7070962d6b0c87ed2c4d39/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd", size = 142335 }, + { url = "https://files.pythonhosted.org/packages/88/83/489e9504711fa05d8dde1574996408026bdbdbd938f23be67deebb5eca92/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00", size = 143862 }, + { url = "https://files.pythonhosted.org/packages/c6/c7/32da20821cf387b759ad24627a9aca289d2822de929b8a41b6241767b461/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12", size = 145673 }, + { url = "https://files.pythonhosted.org/packages/68/85/f4288e96039abdd5aeb5c546fa20a37b50da71b5cf01e75e87f16cd43304/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77", size = 140211 }, + { url = "https://files.pythonhosted.org/packages/28/a3/a42e70d03cbdabc18997baf4f0227c73591a08041c149e710045c281f97b/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146", size = 148039 }, + { url = "https://files.pythonhosted.org/packages/85/e4/65699e8ab3014ecbe6f5c71d1a55d810fb716bbfd74f6283d5c2aa87febf/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd", size = 151939 }, + { url = "https://files.pythonhosted.org/packages/b1/82/8e9fe624cc5374193de6860aba3ea8070f584c8565ee77c168ec13274bd2/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6", size = 149075 }, + { url = "https://files.pythonhosted.org/packages/3d/7b/82865ba54c765560c8433f65e8acb9217cb839a9e32b42af4aa8e945870f/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8", size = 144340 }, + { url = "https://files.pythonhosted.org/packages/b5/b6/9674a4b7d4d99a0d2df9b215da766ee682718f88055751e1e5e753c82db0/charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b", size = 95205 }, + { url = "https://files.pythonhosted.org/packages/1e/ab/45b180e175de4402dcf7547e4fb617283bae54ce35c27930a6f35b6bef15/charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76", size = 102441 }, + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, + { url = "https://files.pythonhosted.org/packages/7f/c0/b913f8f02836ed9ab32ea643c6fe4d3325c3d8627cf6e78098671cafff86/charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41", size = 197867 }, + { url = "https://files.pythonhosted.org/packages/0f/6c/2bee440303d705b6fb1e2ec789543edec83d32d258299b16eed28aad48e0/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f", size = 141385 }, + { url = "https://files.pythonhosted.org/packages/3d/04/cb42585f07f6f9fd3219ffb6f37d5a39b4fd2db2355b23683060029c35f7/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2", size = 151367 }, + { url = "https://files.pythonhosted.org/packages/54/54/2412a5b093acb17f0222de007cc129ec0e0df198b5ad2ce5699355269dfe/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770", size = 143928 }, + { url = "https://files.pythonhosted.org/packages/5a/6d/e2773862b043dcf8a221342954f375392bb2ce6487bcd9f2c1b34e1d6781/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4", size = 146203 }, + { url = "https://files.pythonhosted.org/packages/b9/f8/ca440ef60d8f8916022859885f231abb07ada3c347c03d63f283bec32ef5/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537", size = 148082 }, + { url = "https://files.pythonhosted.org/packages/04/d2/42fd330901aaa4b805a1097856c2edf5095e260a597f65def493f4b8c833/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496", size = 142053 }, + { url = "https://files.pythonhosted.org/packages/9e/af/3a97a4fa3c53586f1910dadfc916e9c4f35eeada36de4108f5096cb7215f/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78", size = 150625 }, + { url = "https://files.pythonhosted.org/packages/26/ae/23d6041322a3556e4da139663d02fb1b3c59a23ab2e2b56432bd2ad63ded/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7", size = 153549 }, + { url = "https://files.pythonhosted.org/packages/94/22/b8f2081c6a77cb20d97e57e0b385b481887aa08019d2459dc2858ed64871/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6", size = 150945 }, + { url = "https://files.pythonhosted.org/packages/c7/0b/c5ec5092747f801b8b093cdf5610e732b809d6cb11f4c51e35fc28d1d389/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294", size = 146595 }, + { url = "https://files.pythonhosted.org/packages/0c/5a/0b59704c38470df6768aa154cc87b1ac7c9bb687990a1559dc8765e8627e/charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5", size = 95453 }, + { url = "https://files.pythonhosted.org/packages/85/2d/a9790237cb4d01a6d57afadc8573c8b73c609ade20b80f4cda30802009ee/charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765", size = 102811 }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "coverage" +version = "7.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/d3/3ec80acdd57a0d6a1111b978ade388824f37126446fd6750d38bfaca949c/coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8", size = 798314 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/db/08d54dbc12fdfe5857b06105fd1235bdebb7da7c11cd1a0fae936556162a/coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c", size = 210025 }, + { url = "https://files.pythonhosted.org/packages/a8/ff/02c4bcff1025b4a788aa3933e1cd1474d79de43e0d859273b3319ef43cd3/coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b", size = 210499 }, + { url = "https://files.pythonhosted.org/packages/ab/b1/7820a8ef62adeebd37612af9d2369f4467a3bc2641dea1243450def5489e/coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932", size = 238399 }, + { url = "https://files.pythonhosted.org/packages/2c/0e/23a388f3ce16c5ea01a454fef6a9039115abd40b748027d4fef18b3628a7/coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3", size = 236676 }, + { url = "https://files.pythonhosted.org/packages/f8/81/e871b0d58ca5d6cc27d00b2f668ce09c4643ef00512341f3a592a81fb6cd/coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517", size = 237467 }, + { url = "https://files.pythonhosted.org/packages/95/cb/42a6d34d5840635394f1e172aaa0e7cbd9346155e5004a8ee75d8e434c6b/coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a", size = 243539 }, + { url = "https://files.pythonhosted.org/packages/6a/6a/18b3819919fdfd3e2062a75219b363f895f24ae5b80e72ffe5dfb1a7e9c8/coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880", size = 241725 }, + { url = "https://files.pythonhosted.org/packages/b5/3d/a0650978e8b8f78d269358421b7401acaf7cb89e957b2e1be5205ea5940e/coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58", size = 242913 }, + { url = "https://files.pythonhosted.org/packages/8a/fe/95a74158fa0eda56d39783e918edc6fbb3dd3336be390557fc0a2815ecd4/coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4", size = 212381 }, + { url = "https://files.pythonhosted.org/packages/4c/26/b276e0c70cba5059becce2594a268a2731d5b4f2386e9a6afdf37ffa3d44/coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a", size = 213225 }, + { url = "https://files.pythonhosted.org/packages/71/cf/964bb667ea37d64b25f04d4cfaf6232cdb7a6472e1f4a4faf0459ddcec40/coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375", size = 210130 }, + { url = "https://files.pythonhosted.org/packages/aa/56/31edd4baa132fe2b991437e0acf3e36c50418370044a89b65518e5581f4c/coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb", size = 210617 }, + { url = "https://files.pythonhosted.org/packages/26/6d/4cd14bd0221180c307fae4f8ef00dbd86a13507c25081858c620aa6fafd8/coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95", size = 242048 }, + { url = "https://files.pythonhosted.org/packages/84/60/7eb84255bd9947b140e0382721b0a1b25fd670b4f0f176f11f90b5632d02/coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d", size = 239619 }, + { url = "https://files.pythonhosted.org/packages/76/6b/e8f4696194fdf3c19422f2a80ac10e03a9322f93e6c9ef57a89e03a8c8f7/coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743", size = 241321 }, + { url = "https://files.pythonhosted.org/packages/3f/1c/6a6990fd2e6890807775852882b1ed0a8e50519a525252490b0c219aa8a5/coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1", size = 250419 }, + { url = "https://files.pythonhosted.org/packages/1a/be/b6422a1422381704dd015cc23e503acd1a44a6bdc4e59c75f8c6a2b24151/coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de", size = 248794 }, + { url = "https://files.pythonhosted.org/packages/9b/93/e8231000754d4a31fe9a6c550f6a436eacd2e50763ba2b418f10b2308e45/coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff", size = 249873 }, + { url = "https://files.pythonhosted.org/packages/d3/6f/eb5aae80bf9d01d0f293121d4caa660ac968da2cb967f82547a7b5e8d65b/coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d", size = 212380 }, + { url = "https://files.pythonhosted.org/packages/30/73/b70ab57f11b62f5ca9a83f43cae752fbbb4417bea651875235c32eb2fc2e/coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656", size = 213316 }, + { url = "https://files.pythonhosted.org/packages/36/db/f4e17ffb5ac2d125c72ee3b235c2e04f85a4296a6a9e17730e218af113d8/coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9", size = 210340 }, + { url = "https://files.pythonhosted.org/packages/c3/bc/d7e832280f269be9e8d46cff5c4031b4840f1844674dc53ad93c5a9c1da6/coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64", size = 210612 }, + { url = "https://files.pythonhosted.org/packages/54/84/543e2cd6c1de30c7522a0afcb040677957bac756dd8677bade8bdd9274ba/coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af", size = 242926 }, + { url = "https://files.pythonhosted.org/packages/ad/06/570533f747141b4fd727a193317e16c6e677ed7945e23a195b8f64e685a2/coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc", size = 240294 }, + { url = "https://files.pythonhosted.org/packages/fa/d9/ec4ba0913195d240d026670d41b91f3e5b9a8a143a385f93a09e97c90f5c/coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2", size = 242232 }, + { url = "https://files.pythonhosted.org/packages/d9/3f/1a613c32aa1980d20d6ca2f54faf800df04aafad6016d7132b3276d8715d/coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1", size = 249171 }, + { url = "https://files.pythonhosted.org/packages/b9/3b/e16b12693572fd69148453abc6ddcd20cbeae6f0a040b5ed6af2f75b646f/coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb", size = 247073 }, + { url = "https://files.pythonhosted.org/packages/e7/3e/04a05d40bb09f90a312296a32fb2c5ade2dfcf803edf777ad18b97547503/coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2", size = 248812 }, + { url = "https://files.pythonhosted.org/packages/ba/f7/3a8b7b0affe548227f3d45e248c0f22c5b55bff0ee062b49afc165b3ff25/coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4", size = 212634 }, + { url = "https://files.pythonhosted.org/packages/7c/31/5f5286d2a5e21e1fe5670629bb24c79bf46383a092e74e00077e7a178e5c/coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475", size = 213460 }, + { url = "https://files.pythonhosted.org/packages/62/18/5573216d5b8db7d9f29189350dcd81830a03a624966c35f8201ae10df09c/coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1", size = 210014 }, + { url = "https://files.pythonhosted.org/packages/7c/0e/e98d6c6d569d65ff3195f095e6b006b3d7780fd6182322a25e7dfe0d53d3/coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5", size = 210494 }, + { url = "https://files.pythonhosted.org/packages/d3/63/98e5a6b7ed1bfca874729ee309cc49a6d6658ab9e479a2b6d223ccc96e03/coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631", size = 237996 }, + { url = "https://files.pythonhosted.org/packages/76/e4/d3c67a0a092127b8a3dffa2f75334a8cdb2cefc99e3d75a7f42cf1ff98a9/coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46", size = 236287 }, + { url = "https://files.pythonhosted.org/packages/12/7f/9b787ffc31bc39aa9e98c7005b698e7c6539bd222043e4a9c83b83c782a2/coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e", size = 237070 }, + { url = "https://files.pythonhosted.org/packages/31/ee/9998a0d855cad5f8e04062f7428b83c34aa643e5df468409593a480d5585/coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be", size = 243115 }, + { url = "https://files.pythonhosted.org/packages/16/94/1e348cd4445404c588ec8199adde0b45727b1d7989d8fb097d39c93e3da5/coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b", size = 241315 }, + { url = "https://files.pythonhosted.org/packages/28/17/6fe1695d2a706e586b87a407598f4ed82dd218b2b43cdc790f695f259849/coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0", size = 242467 }, + { url = "https://files.pythonhosted.org/packages/81/a2/1e550272c8b1f89b980504230b1a929de83d8f3d5ecb268477b32e5996a6/coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7", size = 212394 }, + { url = "https://files.pythonhosted.org/packages/c9/48/7d3c31064c5adcc743fe5370cf7e198cee06cc0e2d37b5cbe930691a3f54/coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493", size = 213246 }, + { url = "https://files.pythonhosted.org/packages/34/81/f00ce7ef95479085feb01fa9e352b2b5b2b9d24767acf2266d6267a6dba9/coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067", size = 202381 }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "cramjam" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/68/09b6b5603d21a0c7d4362d513217a5079c47b1b7a88967c52dbef13db183/cramjam-2.9.1.tar.gz", hash = "sha256:336cc591d86cbd225d256813779f46624f857bc9c779db126271eff9ddc524ae", size = 47892 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/5d/0b03115fa6a95a6dd9be344cd186879b763f1a6fab57ae55ffe2777aa0a7/cramjam-2.9.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:8e82464d1e00fbbb12958999b8471ba5e9f3d9711954505a0a7b378762332e6f", size = 2136622 }, + { url = "https://files.pythonhosted.org/packages/6f/ac/a17644e182ede7e8e24fb3af038bc2c1cf3dd0447c935cb10409f21d099b/cramjam-2.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d2df8a6511cc08ef1fccd2e0c65e2ebc9f57574ec8376052a76851af5398810", size = 1927947 }, + { url = "https://files.pythonhosted.org/packages/9e/1e/e6c4f9695e4ba7b9c63160dcbfa76428bd3221930eedeb8f16364ab6f642/cramjam-2.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:21ea784e6c3f1843d3523ae0f03651dd06058b39eeb64beb82ee3b100fa83662", size = 2268766 }, + { url = "https://files.pythonhosted.org/packages/ab/37/4c81e5d039bdfc75a695abd426e6cdd9ab18a87f65d57837d78936cfa226/cramjam-2.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e0c5d98a4e791f0bbd0ffcb7dae879baeb2dcc357348a8dc2be0a8c10403a2a", size = 2108762 }, + { url = "https://files.pythonhosted.org/packages/b9/bb/3bf3a8877b9a4105b625d710410bd2bc83ef38d4a7fe4eaeb3895d997b2d/cramjam-2.9.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e076fd87089197cb61117c63dbe7712ad5eccb93968860eb3bae09b767bac813", size = 2086694 }, + { url = "https://files.pythonhosted.org/packages/c3/78/317b7ab6a9b0f24c45d56305a8288cdb6408f855034dc80530ed16a5cc6c/cramjam-2.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d86b44933aea0151e4a2e1e6935448499849045c38167d288ca4c59d5b8cd4e", size = 2441698 }, + { url = "https://files.pythonhosted.org/packages/c5/2d/bc98992c29eb8647196b3bda814fd7ecfba6aff85177d44180be2aa320e8/cramjam-2.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb032549dec897b942ddcf80c1cdccbcb40629f15fc902731dbe6362da49326", size = 2759280 }, + { url = "https://files.pythonhosted.org/packages/dd/64/a4e54d74110c22477e467586935167d61fc7bae5284d393e76779b214a3e/cramjam-2.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf29b4def86ec503e329fe138842a9b79a997e3beb6c7809b05665a0d291edff", size = 2385128 }, + { url = "https://files.pythonhosted.org/packages/b0/1a/6ee093bf8a41cf31980175310abbbcdd1a39dadadbe96843112f42cef0fe/cramjam-2.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a36adf7d13b7accfa206e1c917f08924eb905b45aa8e62176509afa7b14db71e", size = 2373494 }, + { url = "https://files.pythonhosted.org/packages/9d/a6/1ae1f1a8ef559c2fab9d6d7f09b19995684e6727e617bf1b73967ee1c6be/cramjam-2.9.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:cf4ea758d98b6fad1b4b2d808d0de690d3162ac56c26968aea0af6524e3eb736", size = 2386900 }, + { url = "https://files.pythonhosted.org/packages/d9/e6/cf18deeaa0a96e7fc87f0eacde3c97e2893b573ac148ec746655570c18fc/cramjam-2.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4826d6d81ea490fa7a3ae7a4b9729866a945ffac1f77fe57b71e49d6e1b21efd", size = 2400609 }, + { url = "https://files.pythonhosted.org/packages/90/97/98a8fa24249dc72a936a9a51a81407a399070ba4ceb528d0af291c760eff/cramjam-2.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:335103317475bf992953c58838152a4761fc3c87354000edbfc4d7e57cf05909", size = 2553159 }, + { url = "https://files.pythonhosted.org/packages/ae/6b/4f71f72bc3405f221ec8bd2ba869e324d5f87ddd58c14bf59f7937ea37ab/cramjam-2.9.1-cp310-cp310-win32.whl", hash = "sha256:258120cb1e3afc3443f756f9de161ed63eed56a2c31f6093e81c571c0f2dc9f6", size = 1817873 }, + { url = "https://files.pythonhosted.org/packages/8e/f4/32639916897d59e94d286b5b22263ce8c2903ecc93a868ebe9443ece8f12/cramjam-2.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c60e5996aa02547d12bc2740d44e90e006b0f93100f53206f7abe6732ad56e69", size = 2092168 }, + { url = "https://files.pythonhosted.org/packages/6c/28/dd2b62be30ffe1fa8df10c99ba7b46abfbfb2fc6ace6acbbf9264a1a6b48/cramjam-2.9.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b9db1debe48060e41a5b91af9193c524e473c57f6105462c5524a41f5aabdb88", size = 2136699 }, + { url = "https://files.pythonhosted.org/packages/03/c9/fcebeb6f06879af4226337715fbc42ffe543158bcba8c244bba144767897/cramjam-2.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f6f18f0242212d3409d26ce3874937b5b979cebd61f08b633a6ea893c32fc7b6", size = 1927934 }, + { url = "https://files.pythonhosted.org/packages/e8/f3/77032e4f5db4dfcc2b0365f92655b7d6f3fc1527ea5b637f9fb9f8156a65/cramjam-2.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b5b1cd7d39242b2b903cf09cd4696b3a6e04dc537ffa9f3ac8668edae76eecb6", size = 2268584 }, + { url = "https://files.pythonhosted.org/packages/38/16/52175e94390f57196382783a3386c122ace7656b57339abaacdc9433b609/cramjam-2.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47de0a68f5f4d9951250ef5af31f2a7228132caa9ed60994234f7eb98090d33", size = 2108599 }, + { url = "https://files.pythonhosted.org/packages/99/25/5f7476d127a8d18cd19a2f3fd25c0fe09ef7848069d23aac70bc96385eb6/cramjam-2.9.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e13c9a697881e5e38148958612dc6856967f5ff8cd7bba5ff751f2d6ac020aa4", size = 2086632 }, + { url = "https://files.pythonhosted.org/packages/7b/97/76ff3e1209add6acb7e2aa7997be48dc1f92ad66ee3e8fa1179eb2bb9b44/cramjam-2.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba560244bc1335b420b74e91e35f9d4e7f307a3be3a4603ce0f0d7e15a0acdf0", size = 2441757 }, + { url = "https://files.pythonhosted.org/packages/69/c4/228e74c30576556d11e54d86f356955cd86ff5e11bbfec74b66ed0dd237d/cramjam-2.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d47fd41ce260cf4f0ff0e788de961fab9e9c6844a05ce55d06ce31e06107bdc", size = 2758144 }, + { url = "https://files.pythonhosted.org/packages/4b/e7/0fd22e12c6a2879abc501979779d4b8cfe8fe692c708c2c0d1664e88fd79/cramjam-2.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84d154fbadece82935396eb6bcb502085d944d2fd13b07a94348364344370c2c", size = 2385062 }, + { url = "https://files.pythonhosted.org/packages/dd/9c/845592ddf9eb7130ae8bc5958a01d469304a43f8071effe164e2d239e3fa/cramjam-2.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:038df668ffb94d64d67b6ecc59cbd206745a425ffc0402897dde12d89fa6a870", size = 2373473 }, + { url = "https://files.pythonhosted.org/packages/10/c2/287cc94b7f8e87e3b0c21819d3a5deead99ebfdcb2b2d85cd04011b37292/cramjam-2.9.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:4125d8cd86fa08495d310e80926c2f0563f157b76862e7479f9b2cf94823ea0c", size = 2386816 }, + { url = "https://files.pythonhosted.org/packages/7c/22/869a1eeea53db4d9fbde6693a2465909762bffeab1a671e193c95b26f99f/cramjam-2.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4206ebdd1d1ef0f3f86c8c2f7c426aa4af6094f4f41e274601fd4c4569f37454", size = 2400713 }, + { url = "https://files.pythonhosted.org/packages/3f/89/ff988bd6427f01041ccb1a9104c05b6373ae476682d317b6844f4b40af92/cramjam-2.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ab687bef5c493732b9a4ab870542ee43f5eae0025f9c684c7cb399c3a85cb380", size = 2553081 }, + { url = "https://files.pythonhosted.org/packages/2e/68/13fa8561335de609f3cd40b132c1a3abbaf26d3c277e8b8a7446de34ef2c/cramjam-2.9.1-cp311-cp311-win32.whl", hash = "sha256:dda7698b6d7caeae1047adafebc4b43b2a82478234f6c2b45bc3edad854e0600", size = 1817782 }, + { url = "https://files.pythonhosted.org/packages/94/75/f3506ee802460e3b86a91e53bba1f67cf457fa04e4316fe7d5823ba5d28b/cramjam-2.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:872b00ff83e84bcbdc7e951af291ebe65eed20b09c47e7c4af21c312f90b796f", size = 2092227 }, + { url = "https://files.pythonhosted.org/packages/56/66/69a1c17331e38b02c78c923262fc315272de7c2618ef7eac8b3358969d90/cramjam-2.9.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:79417957972553502b217a0093532e48893c8b4ca30ccc941cefe9c72379df7c", size = 2132273 }, + { url = "https://files.pythonhosted.org/packages/3d/17/23d0b1d3301480e924545cdd27f2b949c50438949f64c74e800a09c12c37/cramjam-2.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce2b94117f373defc876f88e74e44049a9969223dbca3240415b71752d0422fb", size = 1926919 }, + { url = "https://files.pythonhosted.org/packages/8e/da/e9565f4abbbaa14645ccd7ce83f9631e90955454b87dc3ef9208aebc72e6/cramjam-2.9.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:67040e0fd84404885ec716a806bee6110f9960c3647e0ef1670aab3b7375a70a", size = 2271776 }, + { url = "https://files.pythonhosted.org/packages/88/ac/e6e0794ac01deb52e7a6a3e59720699abdee08d9b9c63a8d8874201d8155/cramjam-2.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bedb84e068b53c944bd08dcb501fd00d67daa8a917922356dd559b484ce7eab", size = 2109248 }, + { url = "https://files.pythonhosted.org/packages/22/0f/c3724b2dcdfbe7e07917803cf7a6db4a874818a6f8d2b95ca1ceaf177170/cramjam-2.9.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:06e3f97a379386d97debf08638a78b3d3850fdf6124755eb270b54905a169930", size = 2088611 }, + { url = "https://files.pythonhosted.org/packages/ce/16/929a5ae899ad6298f58e66622dc223476fe8e1d4e8dae608f4e1a34bfd09/cramjam-2.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11118675e9c7952ececabc62f023290ee4f8ecf0bee0d2c7eb8d1c402ee9769d", size = 2438373 }, + { url = "https://files.pythonhosted.org/packages/2a/2a/ad473f1ca65d3285e8c1d99fc0289f5856224c0d452dabcf856fd4dcdd77/cramjam-2.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b7de6b61b11545570e4d6033713f3599525efc615ee353a822be8f6b0c65b77", size = 2836669 }, + { url = "https://files.pythonhosted.org/packages/9b/5a/e9b4868ee27099a2a21646cf5ea5cf08c660eae90b55a395ada974dcf3fb/cramjam-2.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57ca8f3775324a9de3ee6f05ca172687ba258c0dea79f7e3a6b4112834982f2a", size = 2343995 }, + { url = "https://files.pythonhosted.org/packages/5f/c4/870a9b4524107bf85a207b82a42613318881238b20f2d237e62815af646a/cramjam-2.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9847dd6f288f1c56359f52acb48ff2df848ff3e3bff34d23855bbcf7016427cc", size = 2374270 }, + { url = "https://files.pythonhosted.org/packages/70/4b/b69e8e3951b7cec5e7da2539b7573bb396bed66af07d760b1878b00fd120/cramjam-2.9.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:8d1248dfa7f151e893ce819670f00879e4b7650b8d4c01279ce4f12140d68dd2", size = 2388789 }, + { url = "https://files.pythonhosted.org/packages/05/1a/af02f6192060413314735c0db61259d7279b0d8d99eee29eff2af09c5892/cramjam-2.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9da6d970281083bae91b914362de325414aa03c01fc806f6bb2cc006322ec834", size = 2402459 }, + { url = "https://files.pythonhosted.org/packages/20/9a/a4ab3e90d72eb4f2c1b983fa32b4050ba676f533ba15bd78158f0632295a/cramjam-2.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c33bc095db5733c841a102b8693062be5db8cdac17b9782ebc00577c6a94480", size = 2518440 }, + { url = "https://files.pythonhosted.org/packages/35/3b/e632dd7e2c5c8a2af2d83144b00d6840f1afcf9c6959ed59ec5b0f925288/cramjam-2.9.1-cp312-cp312-win32.whl", hash = "sha256:9e9193cd4bb57e7acd3af24891526299244bfed88168945efdaa09af4e50720f", size = 1822630 }, + { url = "https://files.pythonhosted.org/packages/0e/a2/d1c46618b81b83578d58a62f3709046c4f3b4ddba10df4b9797cfe096b98/cramjam-2.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:15955dd75e80f66c1ea271167a5347661d9bdc365f894a57698c383c9b7d465c", size = 2094684 }, + { url = "https://files.pythonhosted.org/packages/85/45/f1d1e6ffdceb3b0c18511df2f8e779e03972459fb71d7c1ab0f6a5c063a3/cramjam-2.9.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5a7797a2fff994fc5e323f7a967a35a3e37e3006ed21d64dcded086502f482af", size = 2131814 }, + { url = "https://files.pythonhosted.org/packages/3a/96/36bbd431fbf0fa2ff51fd2db4c3bead66e9e373693a8455d411d45125a68/cramjam-2.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d51b9b140b1df39a44bff7896d98a10da345b7d5f5ce92368d328c1c2c829167", size = 1926380 }, + { url = "https://files.pythonhosted.org/packages/67/c4/99b6507ec697d5f56d32c9c04614775004b05b7fa870725a492dc6b639eb/cramjam-2.9.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:07ac76b7f992556e7aa910244be11ece578cdf84f4d5d5297461f9a895e18312", size = 2271581 }, + { url = "https://files.pythonhosted.org/packages/cb/1b/6d55dff244fb22c0b686dd5a96a754c0638f8a94056beb27c457c6035cc5/cramjam-2.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d90a72608c7550cd7eba914668f6277bfb0b24f074d1f1bd9d061fcb6f2adbd6", size = 2109255 }, + { url = "https://files.pythonhosted.org/packages/ca/fb/b9fcf492a21a8d978c6f999025fce2c6656399448c017ed2fc859425f37f/cramjam-2.9.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:56495975401b1821dbe1f29cf222e23556232209a2fdb809fe8156d120ca9c7f", size = 2088323 }, + { url = "https://files.pythonhosted.org/packages/88/1f/69b523395aeaa201dbd53d203453288205a0c651e7c910161892d694eb4d/cramjam-2.9.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b695259e71fde6d5be66b77a4474523ced9ffe9fe8a34cb9b520ec1241a14d3", size = 2437930 }, + { url = "https://files.pythonhosted.org/packages/b0/2c/d07e802f1786c4082e8286db1087563e4fab31cd6534ed31523f1f9584d1/cramjam-2.9.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab1e69dc4831bbb79b6d547077aae89074c83e8ad94eba1a3d80e94d2424fd02", size = 2836655 }, + { url = "https://files.pythonhosted.org/packages/1f/f5/6b425e82395c078bc95a7437b685e6bdba39d28c2b2986d79374fc1681aa/cramjam-2.9.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:440b489902bfb7a26d3fec1ca888007615336ff763d2a32a2fc40586548a0dbf", size = 2387107 }, + { url = "https://files.pythonhosted.org/packages/33/65/7bf97d89ba7607aaea5464af6f249e3d94c291acf73d72768367a3e361c0/cramjam-2.9.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:217fe22b41f8c3dce03852f828b059abfad11d1344a1df2f43d3eb8634b18d75", size = 2374006 }, + { url = "https://files.pythonhosted.org/packages/29/11/8b6c82eda6d0affbc15d7ab4dc758856eb4308e8ddae73300c1648f5aa0f/cramjam-2.9.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:95f3646ddc98af25af25d5692ae65966488a283813336ea9cf41b22e542e7c0d", size = 2388731 }, + { url = "https://files.pythonhosted.org/packages/48/25/6cdd57c0b1a83c98aec9029310d09a6c1a31e9e9fb8efd9001bd0cbea992/cramjam-2.9.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:6b19fc60ead1cae9795a5b359599da3a1c95d38f869bdfb51c441fd76b04e926", size = 2402131 }, + { url = "https://files.pythonhosted.org/packages/b4/e7/cbf80c9647fa582432aa833c4bdd20cf437917c8066ce653e3b78deff658/cramjam-2.9.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:8dc5207567459d049696f62a1fdfb220f3fe6aa0d722285d44753e12504dac6c", size = 2555296 }, + { url = "https://files.pythonhosted.org/packages/18/a6/fabe1959a980f5d2783a6c138311509dd168bd76e62018624a91cd1cbb41/cramjam-2.9.1-cp313-cp313-win32.whl", hash = "sha256:fbfe35929a61b914de9e5dbacde0cfbba86cbf5122f9285a24c14ed0b645490b", size = 1822484 }, + { url = "https://files.pythonhosted.org/packages/55/d5/24e4562771711711c466768c92097640ed97b0283abe9043ffb6c6d4cf04/cramjam-2.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:06068bd191a82ad4fc1ac23d6f8627fb5e37ec4be0431711b9a2dbacaccfeddb", size = 2094445 }, + { url = "https://files.pythonhosted.org/packages/c7/5a/50523fd478390acb6ca8e57239f7cf79f7260dc0d16be89137d47823e50a/cramjam-2.9.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:af39006faddfc6253beb93ca821d544931cfee7f0177b99ff106dfd8fd6a2cd8", size = 2137158 }, + { url = "https://files.pythonhosted.org/packages/df/83/54eca302e431d51149074d8aad6ec588870c5797060e2142dfe6ca3599a8/cramjam-2.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b3291be0d3f73d5774d69013be4ab33978c777363b5312d14f62f77817c2f75a", size = 1927910 }, + { url = "https://files.pythonhosted.org/packages/6d/e9/5d38ffa5376c5bffcbd16545707d9dac6beffccd00410f0cc19d83d85ef7/cramjam-2.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1539fd758f0e57fad7913cebff8baaee871bb561ddf6fa710a427b74da6b6778", size = 2269458 }, + { url = "https://files.pythonhosted.org/packages/15/f3/99fedc4210db1967256e602fdcb60947585421fd659f8baeeeb4ea16e4c7/cramjam-2.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff362f68bd68ac0eccb445209238d589bba728fb6d7f2e9dc199e0ec3a61d6e0", size = 2109406 }, + { url = "https://files.pythonhosted.org/packages/f2/e9/f380e0c1bd03046c522da4fd6d43ea897ba0b832c78fc4ea5708d8c35c21/cramjam-2.9.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23b9786d1d17686fb8d600ade2a19374c7188d4b8867efa9af0d8274a220aec7", size = 2086677 }, + { url = "https://files.pythonhosted.org/packages/13/a7/3ae887753f6d41f6e4af8e25654d103c56e13dda2f4b4d13acac570c65c1/cramjam-2.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bc9c2c748aaf91863d89c4583f529c1c709485c94f8dfeb3ee48662d88e3258", size = 2442136 }, + { url = "https://files.pythonhosted.org/packages/de/a2/763fd98340936057e44ea0b870c9cdb87ad5f90d49e492e8a11cf74e7b29/cramjam-2.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd0fa9a0e7f18224b6d2d1d69dbdc3aecec80ef1393c59244159b131604a4395", size = 2754985 }, + { url = "https://files.pythonhosted.org/packages/33/31/7c8cdf6b16fcd46bad4a307c8203a58b7a2fddf6cb3aad9dc441c050f62f/cramjam-2.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ceef6e09ee22457997370882aa3c69de01e6dd0aaa2f953e1e87ad11641d042", size = 2385597 }, + { url = "https://files.pythonhosted.org/packages/dd/ba/ec0f3b5a3a90721bdb42f4f4989b60adf823d137f40365e83df0cd299378/cramjam-2.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1376f6fdbf0b30712413a0b4e51663a4938ae2f6b449f8e4635dbb3694db83cf", size = 2374339 }, + { url = "https://files.pythonhosted.org/packages/ff/0a/f5bccdc8d12821aed4473a427e9eb8282a38c9337a30e02ed102b18941bf/cramjam-2.9.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:342fb946f8d3e9e35b837288b03ab23cfbe0bb5a30e582ed805ef79706823a96", size = 2386933 }, + { url = "https://files.pythonhosted.org/packages/a0/6e/ce3ffad2b3b8cb73156a19345e27a2e27fb5be79b64f2c81b0c6d6e16c57/cramjam-2.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a237064a6e2c2256c9a1cf2beb7c971382190c0f1eb2e810e02e971881756132", size = 2400860 }, + { url = "https://files.pythonhosted.org/packages/32/a9/e4509e5dfc8f41d9e7f9fdddbf567967937303621d410197c86b11d6a3e4/cramjam-2.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53145fc9f2319c1245d4329e1da8cfacd6e35e27090c07c0b9d453ae2bbdac3e", size = 2553681 }, + { url = "https://files.pythonhosted.org/packages/0a/83/52401c5c654ddff2850d890b0f1cfc355ff6887c6def420d0c8d8178ff97/cramjam-2.9.1-cp39-cp39-win32.whl", hash = "sha256:8a9f52c27292c21457f43c4ce124939302a9acfb62295e7cda8667310563a5a3", size = 1818130 }, + { url = "https://files.pythonhosted.org/packages/93/b3/1645986d8b915fd0426a7224cd00c2c17c32b4d69bc5faad3fb3f5fd5081/cramjam-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:8097ee39b61c86848a443c0b25b2df1de6b331fd512b20836a4f5cfde51ab255", size = 2092440 }, + { url = "https://files.pythonhosted.org/packages/bc/91/3f7884172573072a4280bc8bc19b7562b2cd66d2a65576b11e72115cd5fe/cramjam-2.9.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:86824c695688fcd06c5ac9bbd3fea9bdfb4cca194b1e706fbf11a629df48d2b4", size = 2159537 }, + { url = "https://files.pythonhosted.org/packages/ef/49/a0a89e9c45413e89a1e408d4ab416c0f88f19f6db7571fd5c517e429e276/cramjam-2.9.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:27571bfa5a5d618604696747d0dc1d2a99b5906c967c8dee53c13a7107edfde6", size = 1936244 }, + { url = "https://files.pythonhosted.org/packages/26/f7/6422b9e4d148f1a351c0358a95d59023f25cab76609b180804f6a3ed17e9/cramjam-2.9.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb01f6e38719818778144d3165a89ea1ad9dc58c6342b7f20aa194c70f34cbd1", size = 2119487 }, + { url = "https://files.pythonhosted.org/packages/b5/59/6fc930217f7ae085eca6d22d3477cd0145a105cdc39e63b834cb0c1b25e3/cramjam-2.9.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b5cef5cf40725fe64592af9ec163e7389855077700678a1d94bec549403a74d", size = 2400910 }, + { url = "https://files.pythonhosted.org/packages/2d/36/7e53cf5aaed4b446490e298f7571e69ce15d0dfb148feabe8bf02e58827f/cramjam-2.9.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ac48b978aa0675f62b642750e798c394a64d25ce852e4e541f69bef9a564c2f0", size = 2100860 }, +] + +[[package]] +name = "cryptography" +version = "44.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, + { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, + { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, + { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, + { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, + { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, + { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, + { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, + { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, + { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, + { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, + { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, + { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, + { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, + { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, + { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, + { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, + { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, + { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, + { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, + { url = "https://files.pythonhosted.org/packages/77/d4/fea74422326388bbac0c37b7489a0fcb1681a698c3b875959430ba550daa/cryptography-44.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37d76e6863da3774cd9db5b409a9ecfd2c71c981c38788d3fcfaf177f447b731", size = 3338857 }, + { url = "https://files.pythonhosted.org/packages/1a/aa/ba8a7467c206cb7b62f09b4168da541b5109838627f582843bbbe0235e8e/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f677e1268c4e23420c3acade68fac427fffcb8d19d7df95ed7ad17cdef8404f4", size = 3850615 }, + { url = "https://files.pythonhosted.org/packages/89/fa/b160e10a64cc395d090105be14f399b94e617c879efd401188ce0fea39ee/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f5e7cb1e5e56ca0933b4873c0220a78b773b24d40d186b6738080b73d3d0a756", size = 4081622 }, + { url = "https://files.pythonhosted.org/packages/47/8f/20ff0656bb0cf7af26ec1d01f780c5cfbaa7666736063378c5f48558b515/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:8b3e6eae66cf54701ee7d9c83c30ac0a1e3fa17be486033000f2a73a12ab507c", size = 3867546 }, + { url = "https://files.pythonhosted.org/packages/38/d9/28edf32ee2fcdca587146bcde90102a7319b2f2c690edfa627e46d586050/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:be4ce505894d15d5c5037167ffb7f0ae90b7be6f2a98f9a5c3442395501c32fa", size = 4090937 }, + { url = "https://files.pythonhosted.org/packages/cc/9d/37e5da7519de7b0b070a3fedd4230fe76d50d2a21403e0f2153d70ac4163/cryptography-44.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:62901fb618f74d7d81bf408c8719e9ec14d863086efe4185afd07c352aee1d2c", size = 3128774 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "distlib" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973 }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, +] + +[[package]] +name = "eventlet" +version = "0.38.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dnspython" }, + { name = "greenlet" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/4e/f974cc85b8d19b31176e0cca90e1650156f385c9c294a96fc42846ca75e9/eventlet-0.38.2.tar.gz", hash = "sha256:6a46823af1dca7d29cf04c0d680365805435473c3acbffc176765c7f8787edac", size = 561526 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/07/00feb2c708d71796e190a3051a0d530a4922bfb6b346aa8302725840698c/eventlet-0.38.2-py3-none-any.whl", hash = "sha256:4a2e3cbc53917c8f39074ccf689501168563d3a4df59e9cddd5e9d3b7f85c599", size = 363192 }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, +] + +[[package]] +name = "filelock" +version = "3.16.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163 }, +] + +[[package]] +name = "furo" +version = "2024.8.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "pygments" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx-basic-ng" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/e2/d351d69a9a9e4badb4a5be062c2d0e87bd9e6c23b5e57337fef14bef34c8/furo-2024.8.6.tar.gz", hash = "sha256:b63e4cee8abfc3136d3bc03a3d45a76a850bada4d6374d24c1716b0e01394a01", size = 1661506 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/48/e791a7ed487dbb9729ef32bb5d1af16693d8925f4366befef54119b2e576/furo-2024.8.6-py3-none-any.whl", hash = "sha256:6cd97c58b47813d3619e63e9081169880fbe331f0ca883c871ff1f3f11814f5c", size = 341333 }, +] + +[[package]] +name = "gevent" +version = "24.11.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/75/a53f1cb732420f5e5d79b2563fc3504d22115e7ecfe7966e5cf9b3582ae7/gevent-24.11.1.tar.gz", hash = "sha256:8bd1419114e9e4a3ed33a5bad766afff9a3cf765cb440a582a1b3a9bc80c1aca", size = 5976624 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/7d/27ed3603f4bf96b36fb2746e923e033bc600c6684de8fe164d64eb8c4dcc/gevent-24.11.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:92fe5dfee4e671c74ffaa431fd7ffd0ebb4b339363d24d0d944de532409b935e", size = 2998254 }, + { url = "https://files.pythonhosted.org/packages/a8/03/a8f6c70f50a644a79e75d9f15e6f1813115d34c3c55528e4669a9316534d/gevent-24.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7bfcfe08d038e1fa6de458891bca65c1ada6d145474274285822896a858c870", size = 4817711 }, + { url = "https://files.pythonhosted.org/packages/f0/05/4f9bc565520a18f107464d40ac15a91708431362c797e77fbb5e7ff26e64/gevent-24.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7398c629d43b1b6fd785db8ebd46c0a353880a6fab03d1cf9b6788e7240ee32e", size = 4934468 }, + { url = "https://files.pythonhosted.org/packages/4a/7d/f15561eeebecbebc0296dd7bebea10ac4af0065d98249e3d8c4998e68edd/gevent-24.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7886b63ebfb865178ab28784accd32f287d5349b3ed71094c86e4d3ca738af5", size = 5014067 }, + { url = "https://files.pythonhosted.org/packages/67/c1/07eff117a600fc3c9bd4e3a1ff3b726f146ee23ce55981156547ccae0c85/gevent-24.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9ca80711e6553880974898d99357fb649e062f9058418a92120ca06c18c3c59", size = 6625531 }, + { url = "https://files.pythonhosted.org/packages/4b/72/43f76ab6b18e5e56b1003c844829971f3044af08b39b3c9040559be00a2b/gevent-24.11.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e24181d172f50097ac8fc272c8c5b030149b630df02d1c639ee9f878a470ba2b", size = 5249671 }, + { url = "https://files.pythonhosted.org/packages/6b/fc/1a847ada0757cc7690f83959227514b1a52ff6de504619501c81805fa1da/gevent-24.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1d4fadc319b13ef0a3c44d2792f7918cf1bca27cacd4d41431c22e6b46668026", size = 6773903 }, + { url = "https://files.pythonhosted.org/packages/3b/9d/254dcf455f6659ab7e36bec0bc11f51b18ea25eac2de69185e858ccf3c30/gevent-24.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d882faa24f347f761f934786dde6c73aa6c9187ee710189f12dcc3a63ed4a50", size = 1560443 }, + { url = "https://files.pythonhosted.org/packages/ea/fd/86a170f77ef51a15297573c50dbec4cc67ddc98b677cc2d03cc7f2927f4c/gevent-24.11.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:351d1c0e4ef2b618ace74c91b9b28b3eaa0dd45141878a964e03c7873af09f62", size = 2951424 }, + { url = "https://files.pythonhosted.org/packages/7f/0a/987268c9d446f61883bc627c77c5ed4a97869c0f541f76661a62b2c411f6/gevent-24.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5efe72e99b7243e222ba0c2c2ce9618d7d36644c166d63373af239da1036bab", size = 4878504 }, + { url = "https://files.pythonhosted.org/packages/dc/d4/2f77ddd837c0e21b4a4460bcb79318b6754d95ef138b7a29f3221c7e9993/gevent-24.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d3b249e4e1f40c598ab8393fc01ae6a3b4d51fc1adae56d9ba5b315f6b2d758", size = 5007668 }, + { url = "https://files.pythonhosted.org/packages/80/a0/829e0399a1f9b84c344b72d2be9aa60fe2a64e993cac221edcc14f069679/gevent-24.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81d918e952954675f93fb39001da02113ec4d5f4921bf5a0cc29719af6824e5d", size = 5067055 }, + { url = "https://files.pythonhosted.org/packages/1e/67/0e693f9ddb7909c2414f8fcfc2409aa4157884c147bc83dab979e9cf717c/gevent-24.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9c935b83d40c748b6421625465b7308d87c7b3717275acd587eef2bd1c39546", size = 6761883 }, + { url = "https://files.pythonhosted.org/packages/fa/b6/b69883fc069d7148dd23c5dda20826044e54e7197f3c8e72b8cc2cd4035a/gevent-24.11.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff96c5739834c9a594db0e12bf59cb3fa0e5102fc7b893972118a3166733d61c", size = 5440802 }, + { url = "https://files.pythonhosted.org/packages/32/4e/b00094d995ff01fd88b3cf6b9d1d794f935c31c645c431e65cd82d808c9c/gevent-24.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d6c0a065e31ef04658f799215dddae8752d636de2bed61365c358f9c91e7af61", size = 6866992 }, + { url = "https://files.pythonhosted.org/packages/37/ed/58dbe9fb09d36f6477ff8db0459ebd3be9a77dc05ae5d96dc91ad657610d/gevent-24.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:97e2f3999a5c0656f42065d02939d64fffaf55861f7d62b0107a08f52c984897", size = 1543736 }, + { url = "https://files.pythonhosted.org/packages/dd/32/301676f67ffa996ff1c4175092fb0c48c83271cc95e5c67650b87156b6cf/gevent-24.11.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:a3d75fa387b69c751a3d7c5c3ce7092a171555126e136c1d21ecd8b50c7a6e46", size = 2956467 }, + { url = "https://files.pythonhosted.org/packages/6b/84/aef1a598123cef2375b6e2bf9d17606b961040f8a10e3dcc3c3dd2a99f05/gevent-24.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:beede1d1cff0c6fafae3ab58a0c470d7526196ef4cd6cc18e7769f207f2ea4eb", size = 5136486 }, + { url = "https://files.pythonhosted.org/packages/92/7b/04f61187ee1df7a913b3fca63b0a1206c29141ab4d2a57e7645237b6feb5/gevent-24.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85329d556aaedced90a993226d7d1186a539c843100d393f2349b28c55131c85", size = 5299718 }, + { url = "https://files.pythonhosted.org/packages/36/2a/ebd12183ac25eece91d084be2111e582b061f4d15ead32239b43ed47e9ba/gevent-24.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:816b3883fa6842c1cf9d2786722014a0fd31b6312cca1f749890b9803000bad6", size = 5400118 }, + { url = "https://files.pythonhosted.org/packages/ec/c9/f006c0cd59f0720fbb62ee11da0ad4c4c0fd12799afd957dd491137e80d9/gevent-24.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b24d800328c39456534e3bc3e1684a28747729082684634789c2f5a8febe7671", size = 6775163 }, + { url = "https://files.pythonhosted.org/packages/49/f1/5edf00b674b10d67e3b967c2d46b8a124c2bc8cfd59d4722704392206444/gevent-24.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a5f1701ce0f7832f333dd2faf624484cbac99e60656bfbb72504decd42970f0f", size = 5479886 }, + { url = "https://files.pythonhosted.org/packages/22/11/c48e62744a32c0d48984268ae62b99edb81eaf0e03b42de52e2f09855509/gevent-24.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d740206e69dfdfdcd34510c20adcb9777ce2cc18973b3441ab9767cd8948ca8a", size = 6891452 }, + { url = "https://files.pythonhosted.org/packages/11/b2/5d20664ef6a077bec9f27f7a7ee761edc64946d0b1e293726a3d074a9a18/gevent-24.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:68bee86b6e1c041a187347ef84cf03a792f0b6c7238378bf6ba4118af11feaae", size = 1541631 }, + { url = "https://files.pythonhosted.org/packages/a4/8f/4958e70caeaf469c576ecc5b5f2cb49ddaad74336fa82363d89cddb3c284/gevent-24.11.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:d618e118fdb7af1d6c1a96597a5cd6ac84a9f3732b5be8515c6a66e098d498b6", size = 2949601 }, + { url = "https://files.pythonhosted.org/packages/3b/64/79892d250b7b2aa810688dfebe783aec02568e5cecacb1e100acbb9d95c6/gevent-24.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2142704c2adce9cd92f6600f371afb2860a446bfd0be5bd86cca5b3e12130766", size = 5107052 }, + { url = "https://files.pythonhosted.org/packages/66/44/9ee0ed1909b4f41375e32bf10036d5d8624962afcbd901573afdecd2e36a/gevent-24.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92e0d7759de2450a501effd99374256b26359e801b2d8bf3eedd3751973e87f5", size = 5271736 }, + { url = "https://files.pythonhosted.org/packages/e3/48/0184b2622a388a256199c5fadcad6b52b6455019c2a4b19edd6de58e30ba/gevent-24.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca845138965c8c56d1550499d6b923eb1a2331acfa9e13b817ad8305dde83d11", size = 5367782 }, + { url = "https://files.pythonhosted.org/packages/9a/b1/1a2704c346234d889d2e0042efb182534f7d294115f0e9f99d8079fa17eb/gevent-24.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:356b73d52a227d3313f8f828025b665deada57a43d02b1cf54e5d39028dbcf8d", size = 6757533 }, + { url = "https://files.pythonhosted.org/packages/ed/6e/b2eed8dec617264f0046d50a13a42d3f0a06c50071b9fc1eae00285a03f1/gevent-24.11.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:58851f23c4bdb70390f10fc020c973ffcf409eb1664086792c8b1e20f25eef43", size = 5449436 }, + { url = "https://files.pythonhosted.org/packages/63/c2/eca6b95fbf9af287fa91c327494e4b74a8d5bfa0156cd87b233f63f118dc/gevent-24.11.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1ea50009ecb7f1327347c37e9eb6561bdbc7de290769ee1404107b9a9cba7cf1", size = 6866470 }, + { url = "https://files.pythonhosted.org/packages/b7/e6/51824bd1f2c1ce70aa01495aa6ffe04ab789fa819fa7e6f0ad2388fb03c6/gevent-24.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:ec68e270543ecd532c4c1d70fca020f90aa5486ad49c4f3b8b2e64a66f5c9274", size = 1540088 }, + { url = "https://files.pythonhosted.org/packages/a0/73/263d0f63186d27d205b3dc157efe838afe3aba10a3baca15d85e97b90eae/gevent-24.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9347690f4e53de2c4af74e62d6fabc940b6d4a6cad555b5a379f61e7d3f2a8e", size = 6658480 }, + { url = "https://files.pythonhosted.org/packages/8a/fd/ec7b5c764a3d1340160b82f7394fdc1220d18e11ae089c472cf7bcc2fe6a/gevent-24.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8619d5c888cb7aebf9aec6703e410620ef5ad48cdc2d813dd606f8aa7ace675f", size = 6808247 }, + { url = "https://files.pythonhosted.org/packages/95/82/2ce68dc8dbc2c3ed3f4e73f21e1b7a45d80b5225670225a48e695f248850/gevent-24.11.1-cp39-cp39-win32.whl", hash = "sha256:c6b775381f805ff5faf250e3a07c0819529571d19bb2a9d474bee8c3f90d66af", size = 1483133 }, + { url = "https://files.pythonhosted.org/packages/76/96/aa4cbcf1807187b65a9c9ff15b32b08c2014968be852dda34d212cf8cc58/gevent-24.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1c3443b0ed23dcb7c36a748d42587168672953d368f2956b17fad36d43b58836", size = 1566354 }, + { url = "https://files.pythonhosted.org/packages/86/63/197aa67250943b508b34995c2aa6b46402e7e6f11785487740c2057bfb20/gevent-24.11.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:f43f47e702d0c8e1b8b997c00f1601486f9f976f84ab704f8f11536e3fa144c9", size = 1271676 }, +] + +[[package]] +name = "greenlet" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/90/5234a78dc0ef6496a6eb97b67a42a8e96742a56f7dc808cb954a85390448/greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563", size = 271235 }, + { url = "https://files.pythonhosted.org/packages/7c/16/cd631fa0ab7d06ef06387135b7549fdcc77d8d859ed770a0d28e47b20972/greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83", size = 637168 }, + { url = "https://files.pythonhosted.org/packages/2f/b1/aed39043a6fec33c284a2c9abd63ce191f4f1a07319340ffc04d2ed3256f/greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0", size = 648826 }, + { url = "https://files.pythonhosted.org/packages/76/25/40e0112f7f3ebe54e8e8ed91b2b9f970805143efef16d043dfc15e70f44b/greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120", size = 644443 }, + { url = "https://files.pythonhosted.org/packages/fb/2f/3850b867a9af519794784a7eeed1dd5bc68ffbcc5b28cef703711025fd0a/greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc", size = 643295 }, + { url = "https://files.pythonhosted.org/packages/cf/69/79e4d63b9387b48939096e25115b8af7cd8a90397a304f92436bcb21f5b2/greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617", size = 599544 }, + { url = "https://files.pythonhosted.org/packages/46/1d/44dbcb0e6c323bd6f71b8c2f4233766a5faf4b8948873225d34a0b7efa71/greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7", size = 1125456 }, + { url = "https://files.pythonhosted.org/packages/e0/1d/a305dce121838d0278cee39d5bb268c657f10a5363ae4b726848f833f1bb/greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6", size = 1149111 }, + { url = "https://files.pythonhosted.org/packages/96/28/d62835fb33fb5652f2e98d34c44ad1a0feacc8b1d3f1aecab035f51f267d/greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80", size = 298392 }, + { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479 }, + { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404 }, + { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813 }, + { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517 }, + { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831 }, + { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413 }, + { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619 }, + { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198 }, + { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930 }, + { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260 }, + { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064 }, + { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420 }, + { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035 }, + { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105 }, + { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077 }, + { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, + { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, + { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, + { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, + { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, + { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, + { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, + { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, + { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, + { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, + { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, + { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, + { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, + { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, + { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, + { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, + { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, + { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, + { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, + { url = "https://files.pythonhosted.org/packages/8c/82/8051e82af6d6b5150aacb6789a657a8afd48f0a44d8e91cb72aaaf28553a/greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3", size = 270027 }, + { url = "https://files.pythonhosted.org/packages/f9/74/f66de2785880293780eebd18a2958aeea7cbe7814af1ccef634f4701f846/greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42", size = 634822 }, + { url = "https://files.pythonhosted.org/packages/68/23/acd9ca6bc412b02b8aa755e47b16aafbe642dde0ad2f929f836e57a7949c/greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f", size = 646866 }, + { url = "https://files.pythonhosted.org/packages/a9/ab/562beaf8a53dc9f6b2459f200e7bc226bb07e51862a66351d8b7817e3efd/greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437", size = 641985 }, + { url = "https://files.pythonhosted.org/packages/03/d3/1006543621f16689f6dc75f6bcf06e3c23e044c26fe391c16c253623313e/greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145", size = 641268 }, + { url = "https://files.pythonhosted.org/packages/2f/c1/ad71ce1b5f61f900593377b3f77b39408bce5dc96754790311b49869e146/greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c", size = 597376 }, + { url = "https://files.pythonhosted.org/packages/f7/ff/183226685b478544d61d74804445589e069d00deb8ddef042699733950c7/greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e", size = 1123359 }, + { url = "https://files.pythonhosted.org/packages/c0/8b/9b3b85a89c22f55f315908b94cd75ab5fed5973f7393bbef000ca8b2c5c1/greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e", size = 1147458 }, + { url = "https://files.pythonhosted.org/packages/b8/1c/248fadcecd1790b0ba793ff81fa2375c9ad6442f4c748bf2cc2e6563346a/greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c", size = 281131 }, + { url = "https://files.pythonhosted.org/packages/ae/02/e7d0aef2354a38709b764df50b2b83608f0621493e47f47694eb80922822/greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22", size = 298306 }, +] + +[[package]] +name = "h11" +version = "0.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, +] + +[[package]] +name = "identify" +version = "2.6.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cf/92/69934b9ef3c31ca2470980423fda3d00f0460ddefdf30a67adf7f17e2e00/identify-2.6.5.tar.gz", hash = "sha256:c10b33f250e5bba374fae86fb57f3adcebf1161bce7cdf92031915fd480c13bc", size = 99213 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/fa/dce098f4cdf7621aa8f7b4f919ce545891f489482f0bfa5102f3eca8608b/identify-2.6.5-py2.py3-none-any.whl", hash = "sha256:14181a47091eb75b337af4c23078c9d09225cd4c48929f521f3bf16b09d02566", size = 99078 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, +] + +[[package]] +name = "importlib-metadata" +version = "8.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "jinja2" +version = "3.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/af/92/b3130cbbf5591acf9ade8708c365f3238046ac7cb8ccba6e81abccb0ccff/jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", size = 244674 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357 }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393 }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732 }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866 }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964 }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977 }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366 }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091 }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065 }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514 }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, + { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344 }, + { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389 }, + { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607 }, + { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728 }, + { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826 }, + { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843 }, + { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219 }, + { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946 }, + { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063 }, + { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506 }, +] + +[[package]] +name = "mockupdb" +version = "1.9.0.dev1" +source = { git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master#317c4e049965f9d99423698a81e52d0ab37b7599" } +dependencies = [ + { name = "pymongo" }, +] + +[[package]] +name = "mypy" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002 }, + { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400 }, + { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172 }, + { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732 }, + { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197 }, + { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836 }, + { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432 }, + { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515 }, + { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791 }, + { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203 }, + { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900 }, + { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869 }, + { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668 }, + { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060 }, + { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167 }, + { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341 }, + { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991 }, + { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016 }, + { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097 }, + { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728 }, + { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965 }, + { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660 }, + { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198 }, + { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276 }, + { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493 }, + { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702 }, + { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104 }, + { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167 }, + { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834 }, + { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231 }, + { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905 }, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "pip" +version = "24.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f4/b1/b422acd212ad7eedddaf7981eee6e5de085154ff726459cf2da7c5a184c1/pip-24.3.1.tar.gz", hash = "sha256:ebcb60557f2aefabc2e0f918751cd24ea0d56d8ec5445fe1807f1d2109660b99", size = 1931073 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/7d/500c9ad20238fcfcb4cb9243eede163594d7020ce87bd9610c9e02771876/pip-24.3.1-py3-none-any.whl", hash = "sha256:3790624780082365f47549d032f3770eeb2b1e8bd1f7b2e02dace1afa361b4ed", size = 1822182 }, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "pre-commit" +version = "4.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgv" }, + { name = "identify" }, + { name = "nodeenv" }, + { name = "pyyaml" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2e/c8/e22c292035f1bac8b9f5237a2622305bc0304e776080b246f3df57c4ff9f/pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2", size = 191678 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/16/8f/496e10d51edd6671ebe0432e33ff800aa86775d2d147ce7d43389324a525/pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878", size = 218713 }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1d/67/6afbf0d507f73c32d21084a79946bfcfca5fbc62a72057e9c23797a737c9/pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c", size = 310028 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/89/bc88a6711935ba795a679ea6ebee07e128050d6382eaa35a0a47c8032bdc/pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", size = 181537 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pykerberos" +version = "1.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5ca1ed745a2c5672dc838a8398101051dd5f255b130d/pykerberos-1.2.4.tar.gz", hash = "sha256:9d701ebd8fc596c99d3155d5ba45813bd5908d26ef83ba0add250edb622abed4", size = 25046 } + +[[package]] +name = "pymongo" +version = "4.11.0.dev0" +source = { editable = "." } +dependencies = [ + { name = "dnspython" }, +] + +[package.optional-dependencies] +aws = [ + { name = "pymongo-auth-aws" }, +] +docs = [ + { name = "furo" }, + { name = "readthedocs-sphinx-search" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx-autobuild" }, + { name = "sphinx-rtd-theme" }, + { name = "sphinxcontrib-shellcheck" }, +] +encryption = [ + { name = "certifi", marker = "os_name == 'nt' or sys_platform == 'darwin'" }, + { name = "pymongo-auth-aws" }, + { name = "pymongocrypt" }, +] +gssapi = [ + { name = "pykerberos", marker = "os_name != 'nt'" }, + { name = "winkerberos", marker = "os_name == 'nt'" }, +] +ocsp = [ + { name = "certifi", marker = "os_name == 'nt' or sys_platform == 'darwin'" }, + { name = "cryptography" }, + { name = "pyopenssl" }, + { name = "requests" }, + { name = "service-identity" }, +] +snappy = [ + { name = "python-snappy" }, +] +test = [ + { name = "pytest" }, + { name = "pytest-asyncio" }, +] +zstd = [ + { name = "zstandard" }, +] + +[package.dev-dependencies] +coverage = [ + { name = "coverage" }, + { name = "pytest-cov" }, +] +dev = [ + { name = "pre-commit" }, +] +eventlet = [ + { name = "eventlet" }, +] +gevent = [ + { name = "gevent" }, +] +mockupdb = [ + { name = "mockupdb" }, +] +perf = [ + { name = "simplejson" }, +] +pymongocrypt-source = [ + { name = "pymongocrypt" }, +] +typing = [ + { name = "mypy" }, + { name = "pip" }, + { name = "pyright" }, + { name = "typing-extensions" }, +] + +[package.metadata] +requires-dist = [ + { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')" }, + { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')" }, + { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, + { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, + { name = "furo", marker = "extra == 'docs'", specifier = "==2024.8.6" }, + { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, + { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongocrypt", marker = "extra == 'encryption'", specifier = ">=1.12.0,<2.0.0" }, + { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, + { name = "pytest", marker = "extra == 'test'", specifier = ">=8.2" }, + { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24.0" }, + { name = "python-snappy", marker = "extra == 'snappy'" }, + { name = "readthedocs-sphinx-search", marker = "extra == 'docs'", specifier = "~=0.3" }, + { name = "requests", marker = "extra == 'ocsp'", specifier = "<3.0.0" }, + { name = "service-identity", marker = "extra == 'ocsp'", specifier = ">=18.1.0" }, + { name = "sphinx", marker = "extra == 'docs'", specifier = ">=5.3,<9" }, + { name = "sphinx-autobuild", marker = "extra == 'docs'", specifier = ">=2020.9.1" }, + { name = "sphinx-rtd-theme", marker = "extra == 'docs'", specifier = ">=2,<4" }, + { name = "sphinxcontrib-shellcheck", marker = "extra == 'docs'", specifier = ">=1,<2" }, + { name = "winkerberos", marker = "os_name == 'nt' and extra == 'gssapi'", specifier = ">=0.5.0" }, + { name = "zstandard", marker = "extra == 'zstd'" }, +] + +[package.metadata.requires-dev] +coverage = [ + { name = "coverage", specifier = ">=5,<=7.5" }, + { name = "pytest-cov" }, +] +dev = [{ name = "pre-commit", specifier = ">=4.0" }] +eventlet = [{ name = "eventlet" }] +gevent = [{ name = "gevent" }] +mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] +perf = [{ name = "simplejson" }] +pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] +typing = [ + { name = "mypy", specifier = "==1.14.1" }, + { name = "pip" }, + { name = "pyright", specifier = "==1.1.392.post0" }, + { name = "typing-extensions" }, +] + +[[package]] +name = "pymongo-auth-aws" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/37/ca8d840f322f0047b71afcec7a489b1ea1f59a5f6d29f91ad8004024736f/pymongo_auth_aws-1.3.0.tar.gz", hash = "sha256:d0fa893958dc525ca29f601c34f2ca73c860f66bc6511ec0a7da6eb7ea44e94f", size = 18559 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/12/a997fc108416f31fac55748e5406c1c8c4e976a4073f07b5553825641611/pymongo_auth_aws-1.3.0-py3-none-any.whl", hash = "sha256:367f6d853da428a02e9e450422756133715d40f8141f47ae5d98f139a88c0ce5", size = 15470 }, +] + +[[package]] +name = "pymongocrypt" +version = "1.13.0.dev0" +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#90476d5db7737bab2ce1c198df5671a12dbaae1a" } +dependencies = [ + { name = "cffi" }, + { name = "cryptography" }, + { name = "httpx" }, + { name = "packaging" }, +] + +[[package]] +name = "pyopenssl" +version = "25.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/26/e25b4a374b4639e0c235527bbe31c0524f26eda701d79456a7e1877f4cc5/pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16", size = 179573 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/d7/eb76863d2060dcbe7c7e6cccfd95ac02ea0b9acc37745a0d99ff6457aefb/pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90", size = 56453 }, +] + +[[package]] +name = "pyright" +version = "1.1.392.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/df/3c6f6b08fba7ccf49b114dfc4bb33e25c299883fd763f93fad47ef8bc58d/pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd", size = 3789911 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/b1/a18de17f40e4f61ca58856b9ef9b0febf74ff88978c3f7776f910071f567/pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2", size = 5595487 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.25.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/df/adcc0d60f1053d74717d21d58c0048479e9cab51464ce0d2965b086bd0e2/pytest_asyncio-0.25.2.tar.gz", hash = "sha256:3f8ef9a98f45948ea91a0ed3dc4268b5326c0e7bce73892acc654df4262ad45f", size = 53950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/d8/defa05ae50dcd6019a95527200d3b3980043df5aa445d40cb0ef9f7f98ab/pytest_asyncio-0.25.2-py3-none-any.whl", hash = "sha256:0d0bb693f7b99da304a0634afc0a4b19e49d5e0de2d670f38dc4bfa5727c5075", size = 19400 }, +] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/45/9b538de8cef30e17c7b45ef42f538a94889ed6a16f2387a6c89e73220651/pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0", size = 66945 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/3b/48e79f2cd6a61dbbd4807b4ed46cb564b4fd50a76166b1c4ea5c1d9e2371/pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35", size = 22949 }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "python-snappy" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cramjam" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/66/9185fbb6605ba92716d9f77fbb13c97eb671cd13c3ad56bd154016fbf08b/python_snappy-0.7.3.tar.gz", hash = "sha256:40216c1badfb2d38ac781ecb162a1d0ec40f8ee9747e610bcfefdfa79486cee3", size = 9337 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199 }, + { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758 }, + { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463 }, + { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280 }, + { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239 }, + { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802 }, + { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527 }, + { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052 }, + { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774 }, + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, + { url = "https://files.pythonhosted.org/packages/65/d8/b7a1db13636d7fb7d4ff431593c510c8b8fca920ade06ca8ef20015493c5/PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", size = 184777 }, + { url = "https://files.pythonhosted.org/packages/0a/02/6ec546cd45143fdf9840b2c6be8d875116a64076218b61d68e12548e5839/PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", size = 172318 }, + { url = "https://files.pythonhosted.org/packages/0e/9a/8cc68be846c972bda34f6c2a93abb644fb2476f4dcc924d52175786932c9/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", size = 720891 }, + { url = "https://files.pythonhosted.org/packages/e9/6c/6e1b7f40181bc4805e2e07f4abc10a88ce4648e7e95ff1abe4ae4014a9b2/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", size = 722614 }, + { url = "https://files.pythonhosted.org/packages/3d/32/e7bd8535d22ea2874cef6a81021ba019474ace0d13a4819c2a4bce79bd6a/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", size = 737360 }, + { url = "https://files.pythonhosted.org/packages/d7/12/7322c1e30b9be969670b672573d45479edef72c9a0deac3bb2868f5d7469/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", size = 699006 }, + { url = "https://files.pythonhosted.org/packages/82/72/04fcad41ca56491995076630c3ec1e834be241664c0c09a64c9a2589b507/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", size = 723577 }, + { url = "https://files.pythonhosted.org/packages/ed/5e/46168b1f2757f1fcd442bc3029cd8767d88a98c9c05770d8b420948743bb/PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", size = 144593 }, + { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312 }, +] + +[[package]] +name = "readthedocs-sphinx-search" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8f/96/0c51439e3dbc634cf5328ffb173ff759b7fc9abf3276e78bf71d9fc0aa51/readthedocs-sphinx-search-0.3.2.tar.gz", hash = "sha256:277773bfa28566a86694c08e568d5a648cd80f22826545555a764d6d20c365fb", size = 21949 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/3c/41bc9d7d4d936a73e380423f23996bee1691e17598d8a03c062be6aac640/readthedocs_sphinx_search-0.3.2-py3-none-any.whl", hash = "sha256:58716fd21f01581e6e67bf3bc02e79c77e10dc58b5f8e4c7cc1977e013eda173", size = 21379 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "urllib3", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "s3transfer" +version = "0.11.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/aa/fdd958c626b00e3f046d4004363e7f1a2aba4354f78d65ceb3b217fa5eb8/s3transfer-0.11.1.tar.gz", hash = "sha256:3f25c900a367c8b7f7d8f9c34edc87e300bde424f779dc9f0a8ae4f9df9264f6", size = 146952 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ce/22673f4a85ccc640735b4f8d12178a0f41b5d3c6eda7f33756d10ce56901/s3transfer-0.11.1-py3-none-any.whl", hash = "sha256:8fa0aa48177be1f3425176dfe1ab85dcd3d962df603c3dbfc585e6bf857ef0ff", size = 84111 }, +] + +[[package]] +name = "service-identity" +version = "24.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "cryptography" }, + { name = "pyasn1" }, + { name = "pyasn1-modules" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/a5/dfc752b979067947261dbbf2543470c58efe735c3c1301dd870ef27830ee/service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09", size = 39245 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/2c/ca6dd598b384bc1ce581e24aaae0f2bed4ccac57749d5c3befbb5e742081/service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85", size = 11364 }, +] + +[[package]] +name = "setuptools" +version = "75.8.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/92/ec/089608b791d210aec4e7f97488e67ab0d33add3efccb83a056cbafe3a2a6/setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6", size = 1343222 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/8a/b9dc7678803429e4a3bc9ba462fa3dd9066824d3c607490235c6a796be5a/setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3", size = 1228782 }, +] + +[[package]] +name = "simplejson" +version = "3.19.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3d/29/085111f19717f865eceaf0d4397bf3e76b08d60428b076b64e2a1903706d/simplejson-3.19.3.tar.gz", hash = "sha256:8e086896c36210ab6050f2f9f095a5f1e03c83fa0e7f296d6cba425411364680", size = 85237 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/24/260ad03435ce8ef2436031951134659c7161776ec3a78094b35b9375ceea/simplejson-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:50d8b742d74c449c4dcac570d08ce0f21f6a149d2d9cf7652dbf2ba9a1bc729a", size = 93660 }, + { url = "https://files.pythonhosted.org/packages/63/a1/dee207f357bcd6b106f2ca5129ee916c24993ba08b7dfbf9a37c22442ea9/simplejson-3.19.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd011fc3c1d88b779645495fdb8189fb318a26981eebcce14109460e062f209b", size = 75546 }, + { url = "https://files.pythonhosted.org/packages/80/7b/45ef1da43f54d209ce2ef59b7356cda13f810186c381f38ae23a4d2b1337/simplejson-3.19.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:637c4d4b81825c1f4d651e56210bd35b5604034b192b02d2d8f17f7ce8c18f42", size = 75602 }, + { url = "https://files.pythonhosted.org/packages/7f/4b/9a132382982f8127bc7ce5212a5585d83c174707c9dd698d0cb6a0d41882/simplejson-3.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f56eb03bc9e432bb81adc8ecff2486d39feb371abb442964ffb44f6db23b332", size = 138632 }, + { url = "https://files.pythonhosted.org/packages/76/37/012f5ad2f38afa28f8a6ad9da01dc0b64492ffbaf2a3f2f8a0e1fddf9c1d/simplejson-3.19.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ef59a53be400c1fad2c914b8d74c9d42384fed5174f9321dd021b7017fd40270", size = 146740 }, + { url = "https://files.pythonhosted.org/packages/69/b3/89640bd676e26ea2315b5aaf80712a6fbbb4338e4caf872d91448502a19b/simplejson-3.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72e8abbc86fcac83629a030888b45fed3a404d54161118be52cb491cd6975d3e", size = 134440 }, + { url = "https://files.pythonhosted.org/packages/61/20/0035a288deaff05397d6cc0145b33f3dd2429b99cdc880de4c5eca41ca72/simplejson-3.19.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8efb03ca77bd7725dfacc9254df00d73e6f43013cf39bd37ef1a8ed0ebb5165", size = 137949 }, + { url = "https://files.pythonhosted.org/packages/5d/de/5b03fafe3003e32d179588953d38183af6c3747e95c7dcc668c4f9eb886a/simplejson-3.19.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:add8850db04b98507a8b62d248a326ecc8561e6d24336d1ca5c605bbfaab4cad", size = 139992 }, + { url = "https://files.pythonhosted.org/packages/d1/ce/e493116ff49fd215f7baa25195b8f684c91e65c153e2a57e04dc3f3a466b/simplejson-3.19.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fc3dc9fb413fc34c396f52f4c87de18d0bd5023804afa8ab5cc224deeb6a9900", size = 140320 }, + { url = "https://files.pythonhosted.org/packages/86/f3/a18b98a7a27548829f672754dd3940fb637a27981399838128d3e560087f/simplejson-3.19.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dfa420bb9225dd33b6efdabde7c6a671b51150b9b1d9c4e5cd74d3b420b3fe1", size = 148625 }, + { url = "https://files.pythonhosted.org/packages/0f/55/d3da33ee3e708133da079b9d537693d7fef281e6f0d27921cc7e5b3ec523/simplejson-3.19.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7b5c472099b39b274dcde27f1113db8d818c9aa3ba8f78cbb8ad04a4c1ac2118", size = 141287 }, + { url = "https://files.pythonhosted.org/packages/17/e8/56184ab4d66bb64a6ff569f069b3796dfd943f9b961268fe0d403526fc17/simplejson-3.19.3-cp310-cp310-win32.whl", hash = "sha256:817abad79241ed4a507b3caf4d3f2be5079f39d35d4c550a061988986bffd2ec", size = 74143 }, + { url = "https://files.pythonhosted.org/packages/be/8f/a0089eff060f10a925f08b0a0f50854321484f1ac54b1895bbf4c9213dfe/simplejson-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:dd5b9b1783e14803e362a558680d88939e830db2466f3fa22df5c9319f8eea94", size = 75643 }, + { url = "https://files.pythonhosted.org/packages/8c/bb/9ee3959e6929d228cf669b3f13f0edd43c5261b6cd69598640748b19ca35/simplejson-3.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e88abff510dcff903a18d11c2a75f9964e768d99c8d147839913886144b2065e", size = 91930 }, + { url = "https://files.pythonhosted.org/packages/ac/ae/a06523928af3a6783e2638cd4f6035c3e32de1c1063d563d9060c8d2f1ad/simplejson-3.19.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:934a50a614fb831614db5dbfba35127ee277624dda4d15895c957d2f5d48610c", size = 74787 }, + { url = "https://files.pythonhosted.org/packages/c3/58/fea732e48a7540035fe46d39e6fd77679f5810311d31da8661ce7a18210a/simplejson-3.19.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:212fce86a22188b0c7f53533b0f693ea9605c1a0f02c84c475a30616f55a744d", size = 74612 }, + { url = "https://files.pythonhosted.org/packages/ab/4d/15718f20cb0e3875b8af9597d6bb3bfbcf1383834b82b6385ee9ac0b72a9/simplejson-3.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d9e8f836688a8fabe6a6b41b334aa550a6823f7b4ac3d3712fc0ad8655be9a8", size = 143550 }, + { url = "https://files.pythonhosted.org/packages/93/44/815a4343774760f7a82459c8f6a4d8268b4b6d23f81e7b922a5e2ca79171/simplejson-3.19.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23228037dc5d41c36666384062904d74409a62f52283d9858fa12f4c22cffad1", size = 153284 }, + { url = "https://files.pythonhosted.org/packages/9d/52/d3202d9bba95444090d1c98e43da3c10907875babf63ed3c134d1b9437e3/simplejson-3.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0791f64fed7d4abad639491f8a6b1ba56d3c604eb94b50f8697359b92d983f36", size = 141518 }, + { url = "https://files.pythonhosted.org/packages/b7/d4/850948bcbcfe0b4a6c69dfde10e245d3a1ea45252f16a1e2308a3b06b1da/simplejson-3.19.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f614581b61a26fbbba232a1391f6cee82bc26f2abbb6a0b44a9bba25c56a1c", size = 144688 }, + { url = "https://files.pythonhosted.org/packages/58/d2/b8dcb0a07d9cd54c47f9fe8733dbb83891d1efe4fc786d9dfc8781cc04f9/simplejson-3.19.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1df0aaf1cb787fdf34484ed4a1f0c545efd8811f6028623290fef1a53694e597", size = 144534 }, + { url = "https://files.pythonhosted.org/packages/a9/95/1e92d99039041f596e0923ec4f9153244acaf3830944dc69a7c11b23ceaa/simplejson-3.19.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:951095be8d4451a7182403354c22ec2de3e513e0cc40408b689af08d02611588", size = 146565 }, + { url = "https://files.pythonhosted.org/packages/21/04/c96aeb3a74031255e4cbcc0ca1b6ebfb5549902f0a065f06d65ce8447c0c/simplejson-3.19.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a954b30810988feeabde843e3263bf187697e0eb5037396276db3612434049b", size = 155014 }, + { url = "https://files.pythonhosted.org/packages/b7/41/e28a28593afc4a75d8999d057bfb7c73a103e35f927e66f4bb92571787ae/simplejson-3.19.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c40df31a75de98db2cdfead6074d4449cd009e79f54c1ebe5e5f1f153c68ad20", size = 148092 }, + { url = "https://files.pythonhosted.org/packages/2b/82/1c81a3af06f937afb6d2e9d74a465c0e0ae6db444d1bf2a436ea26de1965/simplejson-3.19.3-cp311-cp311-win32.whl", hash = "sha256:7e2a098c21ad8924076a12b6c178965d88a0ad75d1de67e1afa0a66878f277a5", size = 73942 }, + { url = "https://files.pythonhosted.org/packages/65/be/d8ab9717f471be3c114f16abd8be21d9a6a0a09b9b49177d93d64d3717d9/simplejson-3.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:c9bedebdc5fdad48af8783022bae307746d54006b783007d1d3c38e10872a2c6", size = 75469 }, + { url = "https://files.pythonhosted.org/packages/20/15/513fea93fafbdd4993eacfcb762965b2ff3d29e618c029e2956174d68c4b/simplejson-3.19.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:66a0399e21c2112acacfebf3d832ebe2884f823b1c7e6d1363f2944f1db31a99", size = 92921 }, + { url = "https://files.pythonhosted.org/packages/a4/4f/998a907ae1a6c104dc0ee48aa248c2478490152808d34d8e07af57f396c3/simplejson-3.19.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6ef9383c5e05f445be60f1735c1816163c874c0b1ede8bb4390aff2ced34f333", size = 75311 }, + { url = "https://files.pythonhosted.org/packages/db/44/acd6122201e927451869d45952b9ab1d3025cdb5e61548d286d08fbccc08/simplejson-3.19.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:42e5acf80d4d971238d4df97811286a044d720693092b20a56d5e56b7dcc5d09", size = 74964 }, + { url = "https://files.pythonhosted.org/packages/27/ca/d0a1e8f16e1bbdc0b8c6d88166f45f565ed7285f53928cfef3b6ce78f14d/simplejson-3.19.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0b0efc7279d768db7c74d3d07f0b5c81280d16ae3fb14e9081dc903e8360771", size = 150106 }, + { url = "https://files.pythonhosted.org/packages/63/59/0554b78cf26c98e2b9cae3f44723bd72c2394e2afec1a14eedc6211f7187/simplejson-3.19.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0552eb06e7234da892e1d02365cd2b7b2b1f8233aa5aabdb2981587b7cc92ea0", size = 158347 }, + { url = "https://files.pythonhosted.org/packages/b2/fe/9f30890352e431e8508cc569912d3322147d3e7e4f321e48c0adfcb4c97d/simplejson-3.19.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf6a3b9a7d7191471b464fe38f684df10eb491ec9ea454003edb45a011ab187", size = 148456 }, + { url = "https://files.pythonhosted.org/packages/37/e3/663a09542ee021d4131162f7a164cb2e7f04ef48433a67591738afbf12ea/simplejson-3.19.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7017329ca8d4dca94ad5e59f496e5fc77630aecfc39df381ffc1d37fb6b25832", size = 152190 }, + { url = "https://files.pythonhosted.org/packages/31/20/4e0c4d35e10ff6465003bec304316d822a559a1c38c66ef6892ca199c207/simplejson-3.19.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:67a20641afebf4cfbcff50061f07daad1eace6e7b31d7622b6fa2c40d43900ba", size = 149846 }, + { url = "https://files.pythonhosted.org/packages/08/7a/46e2e072cac3987cbb05946f25167f0ad2fe536748e7405953fd6661a486/simplejson-3.19.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:dd6a7dabcc4c32daf601bc45e01b79175dde4b52548becea4f9545b0a4428169", size = 151714 }, + { url = "https://files.pythonhosted.org/packages/7f/7d/dbeeac10eb61d5d8858d0bb51121a21050d281dc83af4c557f86da28746c/simplejson-3.19.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:08f9b443a94e72dd02c87098c96886d35790e79e46b24e67accafbf13b73d43b", size = 158777 }, + { url = "https://files.pythonhosted.org/packages/fc/8f/a98bdbb799c6a4a884b5823db31785a96ba895b4b0f4d8ac345d6fe98bbf/simplejson-3.19.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa97278ae6614346b5ca41a45a911f37a3261b57dbe4a00602048652c862c28b", size = 154230 }, + { url = "https://files.pythonhosted.org/packages/b1/db/852eebceb85f969ae40e06babed1a93d3bacb536f187d7a80ff5823a5979/simplejson-3.19.3-cp312-cp312-win32.whl", hash = "sha256:ef28c3b328d29b5e2756903aed888960bc5df39b4c2eab157ae212f70ed5bf74", size = 74002 }, + { url = "https://files.pythonhosted.org/packages/fe/68/9f0e5df0651cb79ef83cba1378765a00ee8038e6201cc82b8e7178a7778e/simplejson-3.19.3-cp312-cp312-win_amd64.whl", hash = "sha256:1e662336db50ad665777e6548b5076329a94a0c3d4a0472971c588b3ef27de3a", size = 75596 }, + { url = "https://files.pythonhosted.org/packages/93/3a/5896821ed543899fcb9c4256c7e71bb110048047349a00f42bc8b8fb379f/simplejson-3.19.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0959e6cb62e3994b5a40e31047ff97ef5c4138875fae31659bead691bed55896", size = 92931 }, + { url = "https://files.pythonhosted.org/packages/39/15/5d33d269440912ee40d856db0c8be2b91aba7a219690ab01f86cb0edd590/simplejson-3.19.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7a7bfad839c624e139a4863007233a3f194e7c51551081f9789cba52e4da5167", size = 75318 }, + { url = "https://files.pythonhosted.org/packages/2a/8d/2e7483a2bf7ec53acf7e012bafbda79d7b34f90471dda8e424544a59d484/simplejson-3.19.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afab2f7f2486a866ff04d6d905e9386ca6a231379181a3838abce1f32fbdcc37", size = 74971 }, + { url = "https://files.pythonhosted.org/packages/4d/9d/9bdf34437c8834a7cf7246f85e9d5122e30579f512c10a0c2560e994294f/simplejson-3.19.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00313681015ac498e1736b304446ee6d1c72c5b287cd196996dad84369998f7", size = 150112 }, + { url = "https://files.pythonhosted.org/packages/a7/e2/1f2ae2d89eaf85f6163c82150180aae5eaa18085cfaf892f8a57d4c51cbd/simplejson-3.19.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d936ae682d5b878af9d9eb4d8bb1fdd5e41275c8eb59ceddb0aeed857bb264a2", size = 158354 }, + { url = "https://files.pythonhosted.org/packages/60/83/26f610adf234c8492b3f30501e12f2271e67790f946c6898fe0c58aefe99/simplejson-3.19.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c6657485393f2e9b8177c77a7634f13ebe70d5e6de150aae1677d91516ce6b", size = 148455 }, + { url = "https://files.pythonhosted.org/packages/b5/4b/109af50006af77133653c55b5b91b4bd2d579ff8254ce11216c0b75f911b/simplejson-3.19.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a6a750d3c7461b1c47cfc6bba8d9e57a455e7c5f80057d2a82f738040dd1129", size = 152191 }, + { url = "https://files.pythonhosted.org/packages/75/dc/108872a8825cbd99ae6f4334e0490ff1580367baf12198bcaf988f6820ba/simplejson-3.19.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ea7a4a998c87c5674a27089e022110a1a08a7753f21af3baf09efe9915c23c3c", size = 149954 }, + { url = "https://files.pythonhosted.org/packages/eb/be/deec1d947a5d0472276ab4a4d1a9378dc5ee27f3dc9e54d4f62ffbad7a08/simplejson-3.19.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6300680d83a399be2b8f3b0ef7ef90b35d2a29fe6e9c21438097e0938bbc1564", size = 151812 }, + { url = "https://files.pythonhosted.org/packages/e9/58/4ee130702d36b1551ef66e7587eefe56651f3669255bf748cd71691e2434/simplejson-3.19.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ab69f811a660c362651ae395eba8ce84f84c944cea0df5718ea0ba9d1e4e7252", size = 158880 }, + { url = "https://files.pythonhosted.org/packages/0f/e1/59cc6a371b60f89e3498d9f4c8109f6b7359094d453f5fe80b2677b777b0/simplejson-3.19.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:256e09d0f94d9c3d177d9e95fd27a68c875a4baa2046633df387b86b652f5747", size = 154344 }, + { url = "https://files.pythonhosted.org/packages/79/45/1b36044670016f5cb25ebd92497427d2d1711ecb454d00f71eb9a00b77cc/simplejson-3.19.3-cp313-cp313-win32.whl", hash = "sha256:2c78293470313aefa9cfc5e3f75ca0635721fb016fb1121c1c5b0cb8cc74712a", size = 74002 }, + { url = "https://files.pythonhosted.org/packages/e2/58/b06226e6b0612f2b1fa13d5273551da259f894566b1eef32249ddfdcce44/simplejson-3.19.3-cp313-cp313-win_amd64.whl", hash = "sha256:3bbcdc438dc1683b35f7a8dc100960c721f922f9ede8127f63bed7dfded4c64c", size = 75599 }, + { url = "https://files.pythonhosted.org/packages/9a/3d/e7f1caf7fa8c004c30e2c0595a22646a178344a7f53924c11c3d263a8623/simplejson-3.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b5587feda2b65a79da985ae6d116daf6428bf7489992badc29fc96d16cd27b05", size = 93646 }, + { url = "https://files.pythonhosted.org/packages/01/40/ff5cae1b4ff35c7822456ad7d098371d697479d418194064b8aff8142d70/simplejson-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e0d2b00ecbcd1a3c5ea1abc8bb99a26508f758c1759fd01c3be482a3655a176f", size = 75544 }, + { url = "https://files.pythonhosted.org/packages/56/a8/dbe799f3620a08337ff5f3be27df7b5ba5beb1ee06acaf75f3cb46f8d650/simplejson-3.19.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:32a3ada8f3ea41db35e6d37b86dade03760f804628ec22e4fe775b703d567426", size = 75593 }, + { url = "https://files.pythonhosted.org/packages/d5/53/6ed299b9201ea914bb6a178a7e65413ed1969981533f50bfbe8a215be98f/simplejson-3.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f455672f4738b0f47183c5896e3606cd65c9ddee3805a4d18e8c96aa3f47c84", size = 138077 }, + { url = "https://files.pythonhosted.org/packages/1c/73/14306559157a6faedb4ecae28ad907b64b5359be5c9ec79233546acb96a4/simplejson-3.19.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b737a5fefedb8333fa50b8db3dcc9b1d18fd6c598f89fa7debff8b46bf4e511", size = 146307 }, + { url = "https://files.pythonhosted.org/packages/5b/1a/7994abb33e53ec972dd5e6dbb337b9070d3ad96017c4cff9d5dc83678ad4/simplejson-3.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb47ee773ce67476a960e2db4a0a906680c54f662521550828c0cc57d0099426", size = 133922 }, + { url = "https://files.pythonhosted.org/packages/08/15/8b4e1a8c7729b37797d0eab1381f517f928bd323d17efa7f4414c3565e1f/simplejson-3.19.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eed8cd98a7b24861da9d3d937f5fbfb6657350c547528a117297fe49e3960667", size = 137367 }, + { url = "https://files.pythonhosted.org/packages/59/9a/f5b786fe611395564d3e84f58f668242a7a2e674b4fac71b4e6b21d6d2b7/simplejson-3.19.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:619756f1dd634b5bdf57d9a3914300526c3b348188a765e45b8b08eabef0c94e", size = 139513 }, + { url = "https://files.pythonhosted.org/packages/4d/87/c310daf5e2f10306de3720f075f8ed74cbe83396879b8c55e832393233a5/simplejson-3.19.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dd7230d061e755d60a4d5445bae854afe33444cdb182f3815cff26ac9fb29a15", size = 139749 }, + { url = "https://files.pythonhosted.org/packages/fd/89/690880e1639b421a919d36fadf1fc364a38c3bc4f208dc11627426cdbe98/simplejson-3.19.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:101a3c8392028cd704a93c7cba8926594e775ca3c91e0bee82144e34190903f1", size = 148103 }, + { url = "https://files.pythonhosted.org/packages/a3/31/ef13eda5b5a0d8d9555b70151ee2956f63b845e1fac4ff904339dfb4dd89/simplejson-3.19.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e557712fc79f251673aeb3fad3501d7d4da3a27eff0857af2e1d1afbbcf6685", size = 140740 }, + { url = "https://files.pythonhosted.org/packages/39/5f/26b0a036592e45a2cb4be2f53d8827257e169bd5c84744a1aac89b0ff56f/simplejson-3.19.3-cp39-cp39-win32.whl", hash = "sha256:0bc5544e3128891bf613b9f71813ee2ec9c11574806f74dd8bb84e5e95bf64a2", size = 74115 }, + { url = "https://files.pythonhosted.org/packages/32/06/a35e2e1d8850aff1cf1320d4887bd5f97921c8964a1e260983d38d5d6c17/simplejson-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:06662392e4913dc8846d6a71a6d5de86db5fba244831abe1dd741d62a4136764", size = 75636 }, + { url = "https://files.pythonhosted.org/packages/0d/e7/f9fafbd4f39793a20cc52e77bbd766f7384312526d402c382928dc7667f6/simplejson-3.19.3-py3-none-any.whl", hash = "sha256:49cc4c7b940d43bd12bf87ec63f28cbc4964fc4e12c031cc8cd01650f43eb94e", size = 57004 }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1", size = 86699 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a", size = 93002 }, +] + +[[package]] +name = "soupsieve" +version = "2.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/ce/fbaeed4f9fb8b2daa961f90591662df6a86c1abf25c548329a86920aedfb/soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb", size = 101569 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 }, +] + +[[package]] +name = "sphinx" +version = "7.4.7" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "alabaster", version = "0.7.16", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "babel", marker = "python_full_version < '3.10'" }, + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version < '3.10'" }, + { name = "imagesize", marker = "python_full_version < '3.10'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2", marker = "python_full_version < '3.10'" }, + { name = "packaging", marker = "python_full_version < '3.10'" }, + { name = "pygments", marker = "python_full_version < '3.10'" }, + { name = "requests", marker = "python_full_version < '3.10'" }, + { name = "snowballstemmer", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version < '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624 }, +] + +[[package]] +name = "sphinx" +version = "8.1.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.10'", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "babel", marker = "python_full_version >= '3.10'" }, + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version >= '3.10'" }, + { name = "imagesize", marker = "python_full_version >= '3.10'" }, + { name = "jinja2", marker = "python_full_version >= '3.10'" }, + { name = "packaging", marker = "python_full_version >= '3.10'" }, + { name = "pygments", marker = "python_full_version >= '3.10'" }, + { name = "requests", marker = "python_full_version >= '3.10'" }, + { name = "snowballstemmer", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.10'" }, + { name = "tomli", marker = "python_full_version == '3.10.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125 }, +] + +[[package]] +name = "sphinx-autobuild" +version = "2024.10.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "starlette" }, + { name = "uvicorn" }, + { name = "watchfiles" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a5/2c/155e1de2c1ba96a72e5dba152c509a8b41e047ee5c2def9e9f0d812f8be7/sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1", size = 14023 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/c0/eba125db38c84d3c74717008fd3cb5000b68cd7e2cbafd1349c6a38c3d3b/sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa", size = 11908 }, +] + +[[package]] +name = "sphinx-basic-ng" +version = "1.0.0b2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/dd/018ce05c532a22007ac58d4f45232514cd9d6dd0ee1dc374e309db830983/sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b", size = 22496 }, +] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinxcontrib-jquery" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561 }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300 }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530 }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705 }, +] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104 }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071 }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743 }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, +] + +[[package]] +name = "sphinxcontrib-shellcheck" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "docutils" }, + { name = "six" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/2b/20717a5e0c7ee99dfd5fcdf11a8cf0ab02533cf62775f24d344ea5cf48c1/sphinxcontrib-shellcheck-1.1.2.zip", hash = "sha256:475a3ae12a1cfc1bc26cff57f0dd15561213818e3b470b3eacc4bb8be7b129c0", size = 338739 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/9c/1ff7fe5174f944fac0fcb53bdaac7b98d73a98dd2ca800d95af6af9edb9a/sphinxcontrib_shellcheck-1.1.2-py35-none-any.whl", hash = "sha256:c0449dc9402521ab1d05a1b9eb8c9099707da64824341686dab4f620dc688514", size = 11532 }, + { url = "https://files.pythonhosted.org/packages/9f/8c/833388d3127d8dc0d5558bf52225eb20ed024ac46ef8ef4bffe7298ceb3d/sphinxcontrib_shellcheck-1.1.2-py36-none-any.whl", hash = "sha256:bcd8ffd26e6430deff9ffd10705683b502ace3fc8b4d1ba84496b3752f65fe52", size = 11533 }, + { url = "https://files.pythonhosted.org/packages/9d/b5/cdc74763bcf0916f47d053830c00114f1de65d97ea2281b66bbf2a587b8a/sphinxcontrib_shellcheck-1.1.2-py37-none-any.whl", hash = "sha256:46d1aba8201bbfc7a2c51e08446cab36bdab318c997223c8fc40733a5eedc71f", size = 11533 }, + { url = "https://files.pythonhosted.org/packages/58/ba/cf15480bc238a15e10604ee7f0e3e20ea0bf9a55a4f0b4e50571e8d13e60/sphinxcontrib_shellcheck-1.1.2-py38-none-any.whl", hash = "sha256:4c5f2840418cd1d7d662c0b3f51a07625f1a8f92755b19347ce85e8258e9d847", size = 11532 }, +] + +[[package]] +name = "starlette" +version = "0.45.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/4f/e1c9f4ec3dae67a94c9285ed275355d5f7cf0f3a5c34538c8ae5412af550/starlette-0.45.2.tar.gz", hash = "sha256:bba1831d15ae5212b22feab2f218bab6ed3cd0fc2dc1d4442443bb1ee52260e0", size = 2574026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/ab/fe4f57c83620b39dfc9e7687ebad59129ff05170b99422105019d9a65eec/starlette-0.45.2-py3-none-any.whl", hash = "sha256:4daec3356fb0cb1e723a5235e5beaf375d2259af27532958e2d79df549dad9da", size = 71505 }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "urllib3" +version = "1.26.20" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225 }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, +] + +[[package]] +name = "uvicorn" +version = "0.34.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, +] + +[[package]] +name = "virtualenv" +version = "20.29.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/ca/f23dcb02e161a9bba141b1c08aa50e8da6ea25e6d780528f1d385a3efe25/virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35", size = 7658028 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/9b/599bcfc7064fbe5740919e78c5df18e5dceb0887e676256a1061bb5ae232/virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779", size = 4282379 }, +] + +[[package]] +name = "watchfiles" +version = "1.0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f5/26/c705fc77d0a9ecdb9b66f1e2976d95b81df3cae518967431e7dbf9b5e219/watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205", size = 94625 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/02/22fcaed0396730b0d362bc8d1ffb3be2658fd473eecbb2ba84243e157f11/watchfiles-1.0.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08", size = 395212 }, + { url = "https://files.pythonhosted.org/packages/e9/3d/ec5a2369a46edf3ebe092c39d9ae48e8cb6dacbde51c4b4f98936c524269/watchfiles-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1", size = 384815 }, + { url = "https://files.pythonhosted.org/packages/df/b4/898991cececbe171e67142c31905510203649569d9817848f47c4177ee42/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a", size = 450680 }, + { url = "https://files.pythonhosted.org/packages/58/f7/d4aa3000e812cfb5e5c2c6c0a3ec9d0a46a42489a8727edd160631c4e210/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1", size = 455923 }, + { url = "https://files.pythonhosted.org/packages/dd/95/7e2e4c6aba1b02fb5c76d2f6a450b85215921ec5f8f7ad5efd075369563f/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3", size = 482339 }, + { url = "https://files.pythonhosted.org/packages/bb/67/4265b0fabcc2ef2c9e3e8802ba7908cf718a357ebfb49c72e53787156a48/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2", size = 519908 }, + { url = "https://files.pythonhosted.org/packages/0d/96/b57802d5f8164bdf070befb4fd3dec4edba5a364ec0670965a97eb8098ce/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2", size = 501410 }, + { url = "https://files.pythonhosted.org/packages/8b/18/6db0de4e8911ba14e31853201b40c0fa9fea5ecf3feb86b0ad58f006dfc3/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899", size = 452876 }, + { url = "https://files.pythonhosted.org/packages/df/df/092a961815edf723a38ba2638c49491365943919c3526cc9cf82c42786a6/watchfiles-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff", size = 615353 }, + { url = "https://files.pythonhosted.org/packages/f3/cf/b85fe645de4ff82f3f436c5e9032379fce37c303f6396a18f9726cc34519/watchfiles-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f", size = 613187 }, + { url = "https://files.pythonhosted.org/packages/f6/d4/a9fea27aef4dd69689bc3556718c1157a7accb72aa035ece87c1fa8483b5/watchfiles-1.0.4-cp310-cp310-win32.whl", hash = "sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f", size = 270799 }, + { url = "https://files.pythonhosted.org/packages/df/02/dbe9d4439f15dd4ad0720b6e039bde9d66d1f830331f34c18eb70fa6608e/watchfiles-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161", size = 284145 }, + { url = "https://files.pythonhosted.org/packages/0f/bb/8461adc4b1fed009546fb797fc0d5698dcfe5e289cb37e1b8f16a93cdc30/watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19", size = 394869 }, + { url = "https://files.pythonhosted.org/packages/55/88/9ebf36b3547176d1709c320de78c1fa3263a46be31b5b1267571d9102686/watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235", size = 384905 }, + { url = "https://files.pythonhosted.org/packages/03/8a/04335ce23ef78d8c69f0913e8b20cf7d9233e3986543aeef95ef2d6e43d2/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202", size = 449944 }, + { url = "https://files.pythonhosted.org/packages/17/4e/c8d5dcd14fe637f4633616dabea8a4af0a10142dccf3b43e0f081ba81ab4/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6", size = 456020 }, + { url = "https://files.pythonhosted.org/packages/5e/74/3e91e09e1861dd7fbb1190ce7bd786700dc0fbc2ccd33bb9fff5de039229/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317", size = 482983 }, + { url = "https://files.pythonhosted.org/packages/a1/3d/e64de2d1ce4eb6a574fd78ce3a28c279da263be9ef3cfcab6f708df192f2/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee", size = 520320 }, + { url = "https://files.pythonhosted.org/packages/2c/bd/52235f7063b57240c66a991696ed27e2a18bd6fcec8a1ea5a040b70d0611/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49", size = 500988 }, + { url = "https://files.pythonhosted.org/packages/3a/b0/ff04194141a5fe650c150400dd9e42667916bc0f52426e2e174d779b8a74/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c", size = 452573 }, + { url = "https://files.pythonhosted.org/packages/3d/9d/966164332c5a178444ae6d165082d4f351bd56afd9c3ec828eecbf190e6a/watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1", size = 615114 }, + { url = "https://files.pythonhosted.org/packages/94/df/f569ae4c1877f96ad4086c153a8eee5a19a3b519487bf5c9454a3438c341/watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226", size = 613076 }, + { url = "https://files.pythonhosted.org/packages/15/ae/8ce5f29e65d5fa5790e3c80c289819c55e12be2e1b9f5b6a0e55e169b97d/watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105", size = 271013 }, + { url = "https://files.pythonhosted.org/packages/a4/c6/79dc4a7c598a978e5fafa135090aaf7bbb03b8dec7bada437dfbe578e7ed/watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74", size = 284229 }, + { url = "https://files.pythonhosted.org/packages/37/3d/928633723211753f3500bfb138434f080363b87a1b08ca188b1ce54d1e05/watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3", size = 276824 }, + { url = "https://files.pythonhosted.org/packages/5b/1a/8f4d9a1461709756ace48c98f07772bc6d4519b1e48b5fa24a4061216256/watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2", size = 391345 }, + { url = "https://files.pythonhosted.org/packages/bc/d2/6750b7b3527b1cdaa33731438432e7238a6c6c40a9924049e4cebfa40805/watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9", size = 381515 }, + { url = "https://files.pythonhosted.org/packages/4e/17/80500e42363deef1e4b4818729ed939aaddc56f82f4e72b2508729dd3c6b/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712", size = 449767 }, + { url = "https://files.pythonhosted.org/packages/10/37/1427fa4cfa09adbe04b1e97bced19a29a3462cc64c78630787b613a23f18/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12", size = 455677 }, + { url = "https://files.pythonhosted.org/packages/c5/7a/39e9397f3a19cb549a7d380412fd9e507d4854eddc0700bfad10ef6d4dba/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844", size = 482219 }, + { url = "https://files.pythonhosted.org/packages/45/2d/7113931a77e2ea4436cad0c1690c09a40a7f31d366f79c6f0a5bc7a4f6d5/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733", size = 518830 }, + { url = "https://files.pythonhosted.org/packages/f9/1b/50733b1980fa81ef3c70388a546481ae5fa4c2080040100cd7bf3bf7b321/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af", size = 497997 }, + { url = "https://files.pythonhosted.org/packages/2b/b4/9396cc61b948ef18943e7c85ecfa64cf940c88977d882da57147f62b34b1/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a", size = 452249 }, + { url = "https://files.pythonhosted.org/packages/fb/69/0c65a5a29e057ad0dc691c2fa6c23b2983c7dabaa190ba553b29ac84c3cc/watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff", size = 614412 }, + { url = "https://files.pythonhosted.org/packages/7f/b9/319fcba6eba5fad34327d7ce16a6b163b39741016b1996f4a3c96b8dd0e1/watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e", size = 611982 }, + { url = "https://files.pythonhosted.org/packages/f1/47/143c92418e30cb9348a4387bfa149c8e0e404a7c5b0585d46d2f7031b4b9/watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94", size = 271822 }, + { url = "https://files.pythonhosted.org/packages/ea/94/b0165481bff99a64b29e46e07ac2e0df9f7a957ef13bec4ceab8515f44e3/watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c", size = 285441 }, + { url = "https://files.pythonhosted.org/packages/11/de/09fe56317d582742d7ca8c2ca7b52a85927ebb50678d9b0fa8194658f536/watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90", size = 277141 }, + { url = "https://files.pythonhosted.org/packages/08/98/f03efabec64b5b1fa58c0daab25c68ef815b0f320e54adcacd0d6847c339/watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9", size = 390954 }, + { url = "https://files.pythonhosted.org/packages/16/09/4dd49ba0a32a45813debe5fb3897955541351ee8142f586303b271a02b40/watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60", size = 381133 }, + { url = "https://files.pythonhosted.org/packages/76/59/5aa6fc93553cd8d8ee75c6247763d77c02631aed21551a97d94998bf1dae/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407", size = 449516 }, + { url = "https://files.pythonhosted.org/packages/4c/aa/df4b6fe14b6317290b91335b23c96b488d365d65549587434817e06895ea/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d", size = 454820 }, + { url = "https://files.pythonhosted.org/packages/5e/71/185f8672f1094ce48af33252c73e39b48be93b761273872d9312087245f6/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d", size = 481550 }, + { url = "https://files.pythonhosted.org/packages/85/d7/50ebba2c426ef1a5cb17f02158222911a2e005d401caf5d911bfca58f4c4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b", size = 518647 }, + { url = "https://files.pythonhosted.org/packages/f0/7a/4c009342e393c545d68987e8010b937f72f47937731225b2b29b7231428f/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590", size = 497547 }, + { url = "https://files.pythonhosted.org/packages/0f/7c/1cf50b35412d5c72d63b2bf9a4fffee2e1549a245924960dd087eb6a6de4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902", size = 452179 }, + { url = "https://files.pythonhosted.org/packages/d6/a9/3db1410e1c1413735a9a472380e4f431ad9a9e81711cda2aaf02b7f62693/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1", size = 614125 }, + { url = "https://files.pythonhosted.org/packages/f2/e1/0025d365cf6248c4d1ee4c3d2e3d373bdd3f6aff78ba4298f97b4fad2740/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303", size = 611911 }, + { url = "https://files.pythonhosted.org/packages/55/55/035838277d8c98fc8c917ac9beeb0cd6c59d675dc2421df5f9fcf44a0070/watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80", size = 271152 }, + { url = "https://files.pythonhosted.org/packages/f0/e5/96b8e55271685ddbadc50ce8bc53aa2dff278fb7ac4c2e473df890def2dc/watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc", size = 285216 }, + { url = "https://files.pythonhosted.org/packages/15/81/54484fc2fa715abe79694b975692af963f0878fb9d72b8251aa542bf3f10/watchfiles-1.0.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21", size = 394967 }, + { url = "https://files.pythonhosted.org/packages/14/b3/557f0cd90add86586fe3deeebd11e8299db6bc3452b44a534f844c6ab831/watchfiles-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0", size = 384707 }, + { url = "https://files.pythonhosted.org/packages/03/a3/34638e1bffcb85a405e7b005e30bb211fd9be2ab2cb1847f2ceb81bef27b/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff", size = 450442 }, + { url = "https://files.pythonhosted.org/packages/8f/9f/6a97460dd11a606003d634c7158d9fea8517e98daffc6f56d0f5fde2e86a/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a", size = 455959 }, + { url = "https://files.pythonhosted.org/packages/9d/bb/e0648c6364e4d37ec692bc3f0c77507d17d8bb8f75689148819142010bbf/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a", size = 483187 }, + { url = "https://files.pythonhosted.org/packages/dd/ad/d9290586a25288a81dfa8ad6329cf1de32aa1a9798ace45259eb95dcfb37/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8", size = 519733 }, + { url = "https://files.pythonhosted.org/packages/4e/a9/150c1666825cc9637093f8cae7fc6f53b3296311ab8bd65f1389acb717cb/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3", size = 502275 }, + { url = "https://files.pythonhosted.org/packages/44/dc/5bfd21e20a330aca1706ac44713bc322838061938edf4b53130f97a7b211/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf", size = 452907 }, + { url = "https://files.pythonhosted.org/packages/50/fe/8f4fc488f1699f564687b697456eb5c0cb8e2b0b8538150511c234c62094/watchfiles-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a", size = 615927 }, + { url = "https://files.pythonhosted.org/packages/ad/19/2e45f6f6eec89dd97a4d281635e3d73c17e5f692e7432063bdfdf9562c89/watchfiles-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b", size = 613435 }, + { url = "https://files.pythonhosted.org/packages/91/17/dc5ac62ca377827c24321d68050efc2eaee2ebaf3f21d055bbce2206d309/watchfiles-1.0.4-cp39-cp39-win32.whl", hash = "sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27", size = 270810 }, + { url = "https://files.pythonhosted.org/packages/82/2b/dad851342492d538e7ffe72a8c756f747dd147988abb039ac9d6577d2235/watchfiles-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43", size = 284866 }, + { url = "https://files.pythonhosted.org/packages/6f/06/175d5ac6b838fb319008c0cd981d7bf289317c510154d411d3584ca2b67b/watchfiles-1.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18", size = 396269 }, + { url = "https://files.pythonhosted.org/packages/86/ee/5db93b0b57dc0587abdbac4149296ee73275f615d790a82cb5598af0557f/watchfiles-1.0.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817", size = 386010 }, + { url = "https://files.pythonhosted.org/packages/75/61/fe0dc5fedf152bfc085a53711f740701f6bdb8ab6b5c950402b681d4858b/watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0", size = 450913 }, + { url = "https://files.pythonhosted.org/packages/9f/dd/3c7731af3baf1a9957afc643d176f94480921a690ec3237c9f9d11301c08/watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d", size = 453474 }, + { url = "https://files.pythonhosted.org/packages/6b/b4/c3998f54c91a35cee60ee6d3a855a069c5dff2bae6865147a46e9090dccd/watchfiles-1.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3", size = 395565 }, + { url = "https://files.pythonhosted.org/packages/3f/05/ac1a4d235beb9ddfb8ac26ce93a00ba6bd1b1b43051ef12d7da957b4a9d1/watchfiles-1.0.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e", size = 385406 }, + { url = "https://files.pythonhosted.org/packages/4c/ea/36532e7d86525f4e52a10efed182abf33efb106a93d49f5fbc994b256bcd/watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb", size = 450424 }, + { url = "https://files.pythonhosted.org/packages/7a/e9/3cbcf4d70cd0b6d3f30631deae1bf37cc0be39887ca327a44462fe546bf5/watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42", size = 452488 }, +] + +[[package]] +name = "websockets" +version = "14.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f4/1b/380b883ce05bb5f45a905b61790319a28958a9ab1e4b6b95ff5464b60ca1/websockets-14.1.tar.gz", hash = "sha256:398b10c77d471c0aab20a845e7a60076b6390bfdaac7a6d2edb0d2c59d75e8d8", size = 162840 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/91/b1b375dbd856fd5fff3f117de0e520542343ecaf4e8fc60f1ac1e9f5822c/websockets-14.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a0adf84bc2e7c86e8a202537b4fd50e6f7f0e4a6b6bf64d7ccb96c4cd3330b29", size = 161950 }, + { url = "https://files.pythonhosted.org/packages/61/8f/4d52f272d3ebcd35e1325c646e98936099a348374d4a6b83b524bded8116/websockets-14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90b5d9dfbb6d07a84ed3e696012610b6da074d97453bd01e0e30744b472c8179", size = 159601 }, + { url = "https://files.pythonhosted.org/packages/c4/b1/29e87b53eb1937992cdee094a0988aadc94f25cf0b37e90c75eed7123d75/websockets-14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2177ee3901075167f01c5e335a6685e71b162a54a89a56001f1c3e9e3d2ad250", size = 159854 }, + { url = "https://files.pythonhosted.org/packages/3f/e6/752a2f5e8321ae2a613062676c08ff2fccfb37dc837a2ee919178a372e8a/websockets-14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f14a96a0034a27f9d47fd9788913924c89612225878f8078bb9d55f859272b0", size = 168835 }, + { url = "https://files.pythonhosted.org/packages/60/27/ca62de7877596926321b99071639275e94bb2401397130b7cf33dbf2106a/websockets-14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f874ba705deea77bcf64a9da42c1f5fc2466d8f14daf410bc7d4ceae0a9fcb0", size = 167844 }, + { url = "https://files.pythonhosted.org/packages/7e/db/f556a1d06635c680ef376be626c632e3f2bbdb1a0189d1d1bffb061c3b70/websockets-14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9607b9a442392e690a57909c362811184ea429585a71061cd5d3c2b98065c199", size = 168157 }, + { url = "https://files.pythonhosted.org/packages/b3/bc/99e5f511838c365ac6ecae19674eb5e94201aa4235bd1af3e6fa92c12905/websockets-14.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bea45f19b7ca000380fbd4e02552be86343080120d074b87f25593ce1700ad58", size = 168561 }, + { url = "https://files.pythonhosted.org/packages/c6/e7/251491585bad61c79e525ac60927d96e4e17b18447cc9c3cfab47b2eb1b8/websockets-14.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:219c8187b3ceeadbf2afcf0f25a4918d02da7b944d703b97d12fb01510869078", size = 167979 }, + { url = "https://files.pythonhosted.org/packages/ac/98/7ac2e4eeada19bdbc7a3a66a58e3ebdf33648b9e1c5b3f08c3224df168cf/websockets-14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ad2ab2547761d79926effe63de21479dfaf29834c50f98c4bf5b5480b5838434", size = 167925 }, + { url = "https://files.pythonhosted.org/packages/ab/3d/09e65c47ee2396b7482968068f6e9b516221e1032b12dcf843b9412a5dfb/websockets-14.1-cp310-cp310-win32.whl", hash = "sha256:1288369a6a84e81b90da5dbed48610cd7e5d60af62df9851ed1d1d23a9069f10", size = 162831 }, + { url = "https://files.pythonhosted.org/packages/8a/67/59828a3d09740e6a485acccfbb66600632f2178b6ed1b61388ee96f17d5a/websockets-14.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0744623852f1497d825a49a99bfbec9bea4f3f946df6eb9d8a2f0c37a2fec2e", size = 163266 }, + { url = "https://files.pythonhosted.org/packages/97/ed/c0d03cb607b7fe1f7ff45e2cd4bb5cd0f9e3299ced79c2c303a6fff44524/websockets-14.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:449d77d636f8d9c17952628cc7e3b8faf6e92a17ec581ec0c0256300717e1512", size = 161949 }, + { url = "https://files.pythonhosted.org/packages/06/91/bf0a44e238660d37a2dda1b4896235d20c29a2d0450f3a46cd688f43b239/websockets-14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a35f704be14768cea9790d921c2c1cc4fc52700410b1c10948511039be824aac", size = 159606 }, + { url = "https://files.pythonhosted.org/packages/ff/b8/7185212adad274c2b42b6a24e1ee6b916b7809ed611cbebc33b227e5c215/websockets-14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b1f3628a0510bd58968c0f60447e7a692933589b791a6b572fcef374053ca280", size = 159854 }, + { url = "https://files.pythonhosted.org/packages/5a/8a/0849968d83474be89c183d8ae8dcb7f7ada1a3c24f4d2a0d7333c231a2c3/websockets-14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c3deac3748ec73ef24fc7be0b68220d14d47d6647d2f85b2771cb35ea847aa1", size = 169402 }, + { url = "https://files.pythonhosted.org/packages/bd/4f/ef886e37245ff6b4a736a09b8468dae05d5d5c99de1357f840d54c6f297d/websockets-14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7048eb4415d46368ef29d32133134c513f507fff7d953c18c91104738a68c3b3", size = 168406 }, + { url = "https://files.pythonhosted.org/packages/11/43/e2dbd4401a63e409cebddedc1b63b9834de42f51b3c84db885469e9bdcef/websockets-14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cf0ad281c979306a6a34242b371e90e891bce504509fb6bb5246bbbf31e7b6", size = 168776 }, + { url = "https://files.pythonhosted.org/packages/6d/d6/7063e3f5c1b612e9f70faae20ebaeb2e684ffa36cb959eb0862ee2809b32/websockets-14.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cc1fc87428c1d18b643479caa7b15db7d544652e5bf610513d4a3478dbe823d0", size = 169083 }, + { url = "https://files.pythonhosted.org/packages/49/69/e6f3d953f2fa0f8a723cf18cd011d52733bd7f6e045122b24e0e7f49f9b0/websockets-14.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f95ba34d71e2fa0c5d225bde3b3bdb152e957150100e75c86bc7f3964c450d89", size = 168529 }, + { url = "https://files.pythonhosted.org/packages/70/ff/f31fa14561fc1d7b8663b0ed719996cf1f581abee32c8fb2f295a472f268/websockets-14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9481a6de29105d73cf4515f2bef8eb71e17ac184c19d0b9918a3701c6c9c4f23", size = 168475 }, + { url = "https://files.pythonhosted.org/packages/f1/15/b72be0e4bf32ff373aa5baef46a4c7521b8ea93ad8b49ca8c6e8e764c083/websockets-14.1-cp311-cp311-win32.whl", hash = "sha256:368a05465f49c5949e27afd6fbe0a77ce53082185bbb2ac096a3a8afaf4de52e", size = 162833 }, + { url = "https://files.pythonhosted.org/packages/bc/ef/2d81679acbe7057ffe2308d422f744497b52009ea8bab34b6d74a2657d1d/websockets-14.1-cp311-cp311-win_amd64.whl", hash = "sha256:6d24fc337fc055c9e83414c94e1ee0dee902a486d19d2a7f0929e49d7d604b09", size = 163263 }, + { url = "https://files.pythonhosted.org/packages/55/64/55698544ce29e877c9188f1aee9093712411a8fc9732cca14985e49a8e9c/websockets-14.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ed907449fe5e021933e46a3e65d651f641975a768d0649fee59f10c2985529ed", size = 161957 }, + { url = "https://files.pythonhosted.org/packages/a2/b1/b088f67c2b365f2c86c7b48edb8848ac27e508caf910a9d9d831b2f343cb/websockets-14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:87e31011b5c14a33b29f17eb48932e63e1dcd3fa31d72209848652310d3d1f0d", size = 159620 }, + { url = "https://files.pythonhosted.org/packages/c1/89/2a09db1bbb40ba967a1b8225b07b7df89fea44f06de9365f17f684d0f7e6/websockets-14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bc6ccf7d54c02ae47a48ddf9414c54d48af9c01076a2e1023e3b486b6e72c707", size = 159852 }, + { url = "https://files.pythonhosted.org/packages/ca/c1/f983138cd56e7d3079f1966e81f77ce6643f230cd309f73aa156bb181749/websockets-14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9777564c0a72a1d457f0848977a1cbe15cfa75fa2f67ce267441e465717dcf1a", size = 169675 }, + { url = "https://files.pythonhosted.org/packages/c1/c8/84191455d8660e2a0bdb33878d4ee5dfa4a2cedbcdc88bbd097303b65bfa/websockets-14.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a655bde548ca98f55b43711b0ceefd2a88a71af6350b0c168aa77562104f3f45", size = 168619 }, + { url = "https://files.pythonhosted.org/packages/8d/a7/62e551fdcd7d44ea74a006dc193aba370505278ad76efd938664531ce9d6/websockets-14.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3dfff83ca578cada2d19e665e9c8368e1598d4e787422a460ec70e531dbdd58", size = 169042 }, + { url = "https://files.pythonhosted.org/packages/ad/ed/1532786f55922c1e9c4d329608e36a15fdab186def3ca9eb10d7465bc1cc/websockets-14.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6a6c9bcf7cdc0fd41cc7b7944447982e8acfd9f0d560ea6d6845428ed0562058", size = 169345 }, + { url = "https://files.pythonhosted.org/packages/ea/fb/160f66960d495df3de63d9bcff78e1b42545b2a123cc611950ffe6468016/websockets-14.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4b6caec8576e760f2c7dd878ba817653144d5f369200b6ddf9771d64385b84d4", size = 168725 }, + { url = "https://files.pythonhosted.org/packages/cf/53/1bf0c06618b5ac35f1d7906444b9958f8485682ab0ea40dee7b17a32da1e/websockets-14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb6d38971c800ff02e4a6afd791bbe3b923a9a57ca9aeab7314c21c84bf9ff05", size = 168712 }, + { url = "https://files.pythonhosted.org/packages/e5/22/5ec2f39fff75f44aa626f86fa7f20594524a447d9c3be94d8482cd5572ef/websockets-14.1-cp312-cp312-win32.whl", hash = "sha256:1d045cbe1358d76b24d5e20e7b1878efe578d9897a25c24e6006eef788c0fdf0", size = 162838 }, + { url = "https://files.pythonhosted.org/packages/74/27/28f07df09f2983178db7bf6c9cccc847205d2b92ced986cd79565d68af4f/websockets-14.1-cp312-cp312-win_amd64.whl", hash = "sha256:90f4c7a069c733d95c308380aae314f2cb45bd8a904fb03eb36d1a4983a4993f", size = 163277 }, + { url = "https://files.pythonhosted.org/packages/34/77/812b3ba5110ed8726eddf9257ab55ce9e85d97d4aa016805fdbecc5e5d48/websockets-14.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3630b670d5057cd9e08b9c4dab6493670e8e762a24c2c94ef312783870736ab9", size = 161966 }, + { url = "https://files.pythonhosted.org/packages/8d/24/4fcb7aa6986ae7d9f6d083d9d53d580af1483c5ec24bdec0978307a0f6ac/websockets-14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36ebd71db3b89e1f7b1a5deaa341a654852c3518ea7a8ddfdf69cc66acc2db1b", size = 159625 }, + { url = "https://files.pythonhosted.org/packages/f8/47/2a0a3a2fc4965ff5b9ce9324d63220156bd8bedf7f90824ab92a822e65fd/websockets-14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5b918d288958dc3fa1c5a0b9aa3256cb2b2b84c54407f4813c45d52267600cd3", size = 159857 }, + { url = "https://files.pythonhosted.org/packages/dd/c8/d7b425011a15e35e17757e4df75b25e1d0df64c0c315a44550454eaf88fc/websockets-14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00fe5da3f037041da1ee0cf8e308374e236883f9842c7c465aa65098b1c9af59", size = 169635 }, + { url = "https://files.pythonhosted.org/packages/93/39/6e3b5cffa11036c40bd2f13aba2e8e691ab2e01595532c46437b56575678/websockets-14.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8149a0f5a72ca36720981418eeffeb5c2729ea55fa179091c81a0910a114a5d2", size = 168578 }, + { url = "https://files.pythonhosted.org/packages/cf/03/8faa5c9576299b2adf34dcccf278fc6bbbcda8a3efcc4d817369026be421/websockets-14.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77569d19a13015e840b81550922056acabc25e3f52782625bc6843cfa034e1da", size = 169018 }, + { url = "https://files.pythonhosted.org/packages/8c/05/ea1fec05cc3a60defcdf0bb9f760c3c6bd2dd2710eff7ac7f891864a22ba/websockets-14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cf5201a04550136ef870aa60ad3d29d2a59e452a7f96b94193bee6d73b8ad9a9", size = 169383 }, + { url = "https://files.pythonhosted.org/packages/21/1d/eac1d9ed787f80754e51228e78855f879ede1172c8b6185aca8cef494911/websockets-14.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:88cf9163ef674b5be5736a584c999e98daf3aabac6e536e43286eb74c126b9c7", size = 168773 }, + { url = "https://files.pythonhosted.org/packages/0e/1b/e808685530185915299740d82b3a4af3f2b44e56ccf4389397c7a5d95d39/websockets-14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:836bef7ae338a072e9d1863502026f01b14027250a4545672673057997d5c05a", size = 168757 }, + { url = "https://files.pythonhosted.org/packages/b6/19/6ab716d02a3b068fbbeb6face8a7423156e12c446975312f1c7c0f4badab/websockets-14.1-cp313-cp313-win32.whl", hash = "sha256:0d4290d559d68288da9f444089fd82490c8d2744309113fc26e2da6e48b65da6", size = 162834 }, + { url = "https://files.pythonhosted.org/packages/6c/fd/ab6b7676ba712f2fc89d1347a4b5bdc6aa130de10404071f2b2606450209/websockets-14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8621a07991add373c3c5c2cf89e1d277e49dc82ed72c75e3afc74bd0acc446f0", size = 163277 }, + { url = "https://files.pythonhosted.org/packages/4d/23/ac9d8c5ec7b90efc3687d60474ef7e698f8b75cb7c9dfedad72701e797c9/websockets-14.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01bb2d4f0a6d04538d3c5dfd27c0643269656c28045a53439cbf1c004f90897a", size = 161945 }, + { url = "https://files.pythonhosted.org/packages/c5/6b/ffa450e3b736a86ae6b40ce20a758ac9af80c96a18548f6c323ed60329c5/websockets-14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:414ffe86f4d6f434a8c3b7913655a1a5383b617f9bf38720e7c0799fac3ab1c6", size = 159600 }, + { url = "https://files.pythonhosted.org/packages/74/62/f90d1fd57ea7337ecaa99f17c31a544b9dcdb7c7c32a3d3997ccc42d57d3/websockets-14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fda642151d5affdee8a430bd85496f2e2517be3a2b9d2484d633d5712b15c56", size = 159850 }, + { url = "https://files.pythonhosted.org/packages/35/dd/1e71865de1f3c265e11d02b0b4c76178f84351c6611e515fbe3d2bd1b98c/websockets-14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd7c11968bc3860d5c78577f0dbc535257ccec41750675d58d8dc66aa47fe52c", size = 168616 }, + { url = "https://files.pythonhosted.org/packages/ba/ae/0d069b52e26d48402dbe90c7581eb6a5bed5d7dbe3d9ca3cf1033859d58e/websockets-14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a032855dc7db987dff813583d04f4950d14326665d7e714d584560b140ae6b8b", size = 167619 }, + { url = "https://files.pythonhosted.org/packages/1c/3f/d3f2df62704c53e0296f0ce714921b6a15df10e2e463734c737b1d9e2522/websockets-14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7e7ea2f782408c32d86b87a0d2c1fd8871b0399dd762364c731d86c86069a78", size = 167921 }, + { url = "https://files.pythonhosted.org/packages/e0/e2/2dcb295bdae9393070cea58c790d87d1d36149bb4319b1da6014c8a36d42/websockets-14.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:39450e6215f7d9f6f7bc2a6da21d79374729f5d052333da4d5825af8a97e6735", size = 168343 }, + { url = "https://files.pythonhosted.org/packages/6b/fd/fa48e8b4e10e2c165cbfc16dada7405b4008818be490fc6b99a4928e232a/websockets-14.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ceada5be22fa5a5a4cdeec74e761c2ee7db287208f54c718f2df4b7e200b8d4a", size = 167745 }, + { url = "https://files.pythonhosted.org/packages/42/45/79db33f2b744d2014b40946428e6c37ce944fde8791d82e1c2f4d4a67d96/websockets-14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3fc753451d471cff90b8f467a1fc0ae64031cf2d81b7b34e1811b7e2691bc4bc", size = 167705 }, + { url = "https://files.pythonhosted.org/packages/da/27/f66507db34ca9c79562f28fa5983433f7b9080fd471cc188906006d36ba4/websockets-14.1-cp39-cp39-win32.whl", hash = "sha256:14839f54786987ccd9d03ed7f334baec0f02272e7ec4f6e9d427ff584aeea8b4", size = 162828 }, + { url = "https://files.pythonhosted.org/packages/11/25/bb8f81a4ec94f595adb845608c5ec9549cb6b446945b292fe61807c7c95b/websockets-14.1-cp39-cp39-win_amd64.whl", hash = "sha256:d9fd19ecc3a4d5ae82ddbfb30962cf6d874ff943e56e0c81f5169be2fda62979", size = 163271 }, + { url = "https://files.pythonhosted.org/packages/fb/cd/382a05a1ba2a93bd9fb807716a660751295df72e77204fb130a102fcdd36/websockets-14.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e5dc25a9dbd1a7f61eca4b7cb04e74ae4b963d658f9e4f9aad9cd00b688692c8", size = 159633 }, + { url = "https://files.pythonhosted.org/packages/b7/a0/fa7c62e2952ef028b422fbf420f9353d9dd4dfaa425de3deae36e98c0784/websockets-14.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:04a97aca96ca2acedf0d1f332c861c5a4486fdcba7bcef35873820f940c4231e", size = 159867 }, + { url = "https://files.pythonhosted.org/packages/c1/94/954b4924f868db31d5f0935893c7a8446515ee4b36bb8ad75a929469e453/websockets-14.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df174ece723b228d3e8734a6f2a6febbd413ddec39b3dc592f5a4aa0aff28098", size = 161121 }, + { url = "https://files.pythonhosted.org/packages/7a/2e/f12bbb41a8f2abb76428ba4fdcd9e67b5b364a3e7fa97c88f4d6950aa2d4/websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:034feb9f4286476f273b9a245fb15f02c34d9586a5bc936aff108c3ba1b21beb", size = 160731 }, + { url = "https://files.pythonhosted.org/packages/13/97/b76979401f2373af1fe3e08f960b265cecab112e7dac803446fb98351a52/websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c308dabd2b380807ab64b62985eaccf923a78ebc572bd485375b9ca2b7dc7", size = 160681 }, + { url = "https://files.pythonhosted.org/packages/39/9c/16916d9a436c109a1d7ba78817e8fee357b78968be3f6e6f517f43afa43d/websockets-14.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5a42d3ecbb2db5080fc578314439b1d79eef71d323dc661aa616fb492436af5d", size = 163316 }, + { url = "https://files.pythonhosted.org/packages/0f/57/50fd09848a80a1b63a572c610f230f8a17590ca47daf256eb28a0851df73/websockets-14.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ddaa4a390af911da6f680be8be4ff5aaf31c4c834c1a9147bc21cbcbca2d4370", size = 159633 }, + { url = "https://files.pythonhosted.org/packages/d7/2f/db728b0c7962ad6a13ced8286325bf430b59722d943e7f6bdbd8a78e2bfe/websockets-14.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a4c805c6034206143fbabd2d259ec5e757f8b29d0a2f0bf3d2fe5d1f60147a4a", size = 159863 }, + { url = "https://files.pythonhosted.org/packages/fa/e4/21e7481936fbfffee138edb488a6184eb3468b402a8181b95b9e44f6a676/websockets-14.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:205f672a6c2c671a86d33f6d47c9b35781a998728d2c7c2a3e1cf3333fcb62b7", size = 161119 }, + { url = "https://files.pythonhosted.org/packages/64/2d/efb6cf716d4f9da60190756e06f8db2066faf1ae4a4a8657ab136dfcc7a8/websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef440054124728cc49b01c33469de06755e5a7a4e83ef61934ad95fc327fbb0", size = 160724 }, + { url = "https://files.pythonhosted.org/packages/40/b0/a70b972d853c3f26040834fcff3dd45c8a0292af9f5f0b36f9fbb82d5d44/websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7591d6f440af7f73c4bd9404f3772bfee064e639d2b6cc8c94076e71b2471c1", size = 160676 }, + { url = "https://files.pythonhosted.org/packages/4a/76/f9da7f97476cc7b8c74829bb4851f1faf660455839689ffcc354b52860a7/websockets-14.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:25225cc79cfebc95ba1d24cd3ab86aaa35bcd315d12fa4358939bd55e9bd74a5", size = 163311 }, + { url = "https://files.pythonhosted.org/packages/b0/0b/c7e5d11020242984d9d37990310520ed663b942333b83a033c2f20191113/websockets-14.1-py3-none-any.whl", hash = "sha256:4d4fc827a20abe6d544a119896f6b78ee13fe81cbfef416f3f2ddf09a03f0e2e", size = 156277 }, +] + +[[package]] +name = "winkerberos" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/4f/8db9aae372e88031877067a9d8da027d6e67454d233177cb49198ab216a5/winkerberos-0.12.0.tar.gz", hash = "sha256:b19b9b8c87ab9dc76bb325f0dd4e93a2d669abc68d2283eec25ed67176ad7ad3", size = 35572 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/71/69549a95c4077a35819b04f3179292eec7119903ec035995254a41a3622a/winkerberos-0.12.0-cp310-cp310-win32.whl", hash = "sha256:bb37e91f9959adbeb3c6ae25c828c1d033fa2b1b03176037d7bec0adfbb85b8f", size = 25297 }, + { url = "https://files.pythonhosted.org/packages/8d/47/c8e2138e51201f79f9adc73a13a6616c375d0490081b124e2d8eebf21711/winkerberos-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:e479a498ab1f93bde0c0eb880f2c68378272850db51b978c75e9d73148c44f9c", size = 27635 }, + { url = "https://files.pythonhosted.org/packages/bf/5b/5799a0b7b3162b4476443b16c7a12a63ec3dbd9e9e2bf622c5833c27079b/winkerberos-0.12.0-cp311-cp311-win32.whl", hash = "sha256:35ed9eedc2551063758756724c345d906b4a68b8d31bc9fd6e935c1eb37c4a35", size = 25297 }, + { url = "https://files.pythonhosted.org/packages/24/ec/d437a005207d3c66bdb22196f954d25716fea21b79d4873873a2cd836946/winkerberos-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:838fdab8f71905c5a80ee1c868e2c7f3c2fee233113e8e65cd989b353e9a980e", size = 27640 }, + { url = "https://files.pythonhosted.org/packages/12/6f/1cab2c1685c3cb55a5a6b87c75df33def11b25cf01525021fa4f18c2ba24/winkerberos-0.12.0-cp312-cp312-win32.whl", hash = "sha256:f8a9dedd35eda764cd0591d050234a8f381c57a559c16a914de311ed426f6f50", size = 25365 }, + { url = "https://files.pythonhosted.org/packages/01/e9/0408c1abd6d599d61709ceecafdb0f8ff725e015b8c5444db62de6466b37/winkerberos-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:22db6871a842d16bb045d93440d0acc98d7690320acd7d7174ae36509ce78198", size = 27678 }, + { url = "https://files.pythonhosted.org/packages/7a/ff/b6cd850e9bed012d289cbcf1a2c9f70292c6d2664f65c0b6741877f0f7ec/winkerberos-0.12.0-cp39-cp39-win32.whl", hash = "sha256:987a16e5fff8b6e1cd2d1a52db92c51ba657a34e6c55b0b7d96247f512ed7444", size = 25290 }, + { url = "https://files.pythonhosted.org/packages/85/a9/c2319bcf270170ddb9c52105851d7565e6ce7266dc5a3e6cdf97fb6fe43b/winkerberos-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:4ffe1b654884e169c88785aa3960cc8dc4f09b757d242b59b3022c632736d2cd", size = 27629 }, +] + +[[package]] +name = "zipp" +version = "3.21.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630 }, +] + +[[package]] +name = "zope-event" +version = "5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/c2/427f1867bb96555d1d34342f1dd97f8c420966ab564d58d18469a1db8736/zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd", size = 17350 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/42/f8dbc2b9ad59e927940325a22d6d3931d630c3644dae7e2369ef5d9ba230/zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26", size = 6824 }, +] + +[[package]] +name = "zope-interface" +version = "7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/71/e6177f390e8daa7e75378505c5ab974e0bf59c1d3b19155638c7afbf4b2d/zope.interface-7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce290e62229964715f1011c3dbeab7a4a1e4971fd6f31324c4519464473ef9f2", size = 208243 }, + { url = "https://files.pythonhosted.org/packages/52/db/7e5f4226bef540f6d55acfd95cd105782bc6ee044d9b5587ce2c95558a5e/zope.interface-7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05b910a5afe03256b58ab2ba6288960a2892dfeef01336dc4be6f1b9ed02ab0a", size = 208759 }, + { url = "https://files.pythonhosted.org/packages/28/ea/fdd9813c1eafd333ad92464d57a4e3a82b37ae57c19497bcffa42df673e4/zope.interface-7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:550f1c6588ecc368c9ce13c44a49b8d6b6f3ca7588873c679bd8fd88a1b557b6", size = 254922 }, + { url = "https://files.pythonhosted.org/packages/3b/d3/0000a4d497ef9fbf4f66bb6828b8d0a235e690d57c333be877bec763722f/zope.interface-7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ef9e2f865721553c6f22a9ff97da0f0216c074bd02b25cf0d3af60ea4d6931d", size = 249367 }, + { url = "https://files.pythonhosted.org/packages/3e/e5/0b359e99084f033d413419eff23ee9c2bd33bca2ca9f4e83d11856f22d10/zope.interface-7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f926f0dcb058211a3bb3e0e501c69759613b17a553788b2caeb991bed3b61d", size = 254488 }, + { url = "https://files.pythonhosted.org/packages/7b/90/12d50b95f40e3b2fc0ba7f7782104093b9fd62806b13b98ef4e580f2ca61/zope.interface-7.2-cp310-cp310-win_amd64.whl", hash = "sha256:144964649eba4c5e4410bb0ee290d338e78f179cdbfd15813de1a664e7649b3b", size = 211947 }, + { url = "https://files.pythonhosted.org/packages/98/7d/2e8daf0abea7798d16a58f2f3a2bf7588872eee54ac119f99393fdd47b65/zope.interface-7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1909f52a00c8c3dcab6c4fad5d13de2285a4b3c7be063b239b8dc15ddfb73bd2", size = 208776 }, + { url = "https://files.pythonhosted.org/packages/a0/2a/0c03c7170fe61d0d371e4c7ea5b62b8cb79b095b3d630ca16719bf8b7b18/zope.interface-7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ecf2451596f19fd607bb09953f426588fc1e79e93f5968ecf3367550396b22", size = 209296 }, + { url = "https://files.pythonhosted.org/packages/49/b4/451f19448772b4a1159519033a5f72672221e623b0a1bd2b896b653943d8/zope.interface-7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:033b3923b63474800b04cba480b70f6e6243a62208071fc148354f3f89cc01b7", size = 260997 }, + { url = "https://files.pythonhosted.org/packages/65/94/5aa4461c10718062c8f8711161faf3249d6d3679c24a0b81dd6fc8ba1dd3/zope.interface-7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a102424e28c6b47c67923a1f337ede4a4c2bba3965b01cf707978a801fc7442c", size = 255038 }, + { url = "https://files.pythonhosted.org/packages/9f/aa/1a28c02815fe1ca282b54f6705b9ddba20328fabdc37b8cf73fc06b172f0/zope.interface-7.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25e6a61dcb184453bb00eafa733169ab6d903e46f5c2ace4ad275386f9ab327a", size = 259806 }, + { url = "https://files.pythonhosted.org/packages/a7/2c/82028f121d27c7e68632347fe04f4a6e0466e77bb36e104c8b074f3d7d7b/zope.interface-7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3f6771d1647b1fc543d37640b45c06b34832a943c80d1db214a37c31161a93f1", size = 212305 }, + { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959 }, + { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357 }, + { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235 }, + { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253 }, + { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702 }, + { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466 }, + { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961 }, + { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356 }, + { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196 }, + { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237 }, + { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696 }, + { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472 }, + { url = "https://files.pythonhosted.org/packages/8c/2c/1f49dc8b4843c4f0848d8e43191aed312bad946a1563d1bf9e46cf2816ee/zope.interface-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7bd449c306ba006c65799ea7912adbbfed071089461a19091a228998b82b1fdb", size = 208349 }, + { url = "https://files.pythonhosted.org/packages/ed/7d/83ddbfc8424c69579a90fc8edc2b797223da2a8083a94d8dfa0e374c5ed4/zope.interface-7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a19a6cc9c6ce4b1e7e3d319a473cf0ee989cbbe2b39201d7c19e214d2dfb80c7", size = 208799 }, + { url = "https://files.pythonhosted.org/packages/36/22/b1abd91854c1be03f5542fe092e6a745096d2eca7704d69432e119100583/zope.interface-7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cd1790b48c16db85d51fbbd12d20949d7339ad84fd971427cf00d990c1f137", size = 254267 }, + { url = "https://files.pythonhosted.org/packages/2a/dd/fcd313ee216ad0739ae00e6126bc22a0af62a74f76a9ca668d16cd276222/zope.interface-7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52e446f9955195440e787596dccd1411f543743c359eeb26e9b2c02b077b0519", size = 248614 }, + { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800 }, + { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980 }, +] + +[[package]] +name = "zstandard" +version = "0.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/55/bd0487e86679db1823fc9ee0d8c9c78ae2413d34c0b461193b5f4c31d22f/zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", size = 788701 }, + { url = "https://files.pythonhosted.org/packages/e1/8a/ccb516b684f3ad987dfee27570d635822e3038645b1a950c5e8022df1145/zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", size = 633678 }, + { url = "https://files.pythonhosted.org/packages/12/89/75e633d0611c028e0d9af6df199423bf43f54bea5007e6718ab7132e234c/zstandard-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", size = 4941098 }, + { url = "https://files.pythonhosted.org/packages/4a/7a/bd7f6a21802de358b63f1ee636ab823711c25ce043a3e9f043b4fcb5ba32/zstandard-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", size = 5308798 }, + { url = "https://files.pythonhosted.org/packages/79/3b/775f851a4a65013e88ca559c8ae42ac1352db6fcd96b028d0df4d7d1d7b4/zstandard-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", size = 5341840 }, + { url = "https://files.pythonhosted.org/packages/09/4f/0cc49570141dd72d4d95dd6fcf09328d1b702c47a6ec12fbed3b8aed18a5/zstandard-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", size = 5440337 }, + { url = "https://files.pythonhosted.org/packages/e7/7c/aaa7cd27148bae2dc095191529c0570d16058c54c4597a7d118de4b21676/zstandard-0.23.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", size = 4861182 }, + { url = "https://files.pythonhosted.org/packages/ac/eb/4b58b5c071d177f7dc027129d20bd2a44161faca6592a67f8fcb0b88b3ae/zstandard-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", size = 4932936 }, + { url = "https://files.pythonhosted.org/packages/44/f9/21a5fb9bb7c9a274b05ad700a82ad22ce82f7ef0f485980a1e98ed6e8c5f/zstandard-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", size = 5464705 }, + { url = "https://files.pythonhosted.org/packages/49/74/b7b3e61db3f88632776b78b1db597af3f44c91ce17d533e14a25ce6a2816/zstandard-0.23.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", size = 4857882 }, + { url = "https://files.pythonhosted.org/packages/4a/7f/d8eb1cb123d8e4c541d4465167080bec88481ab54cd0b31eb4013ba04b95/zstandard-0.23.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", size = 4697672 }, + { url = "https://files.pythonhosted.org/packages/5e/05/f7dccdf3d121309b60342da454d3e706453a31073e2c4dac8e1581861e44/zstandard-0.23.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", size = 5206043 }, + { url = "https://files.pythonhosted.org/packages/86/9d/3677a02e172dccd8dd3a941307621c0cbd7691d77cb435ac3c75ab6a3105/zstandard-0.23.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", size = 5667390 }, + { url = "https://files.pythonhosted.org/packages/41/7e/0012a02458e74a7ba122cd9cafe491facc602c9a17f590367da369929498/zstandard-0.23.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", size = 5198901 }, + { url = "https://files.pythonhosted.org/packages/65/3a/8f715b97bd7bcfc7342d8adcd99a026cb2fb550e44866a3b6c348e1b0f02/zstandard-0.23.0-cp310-cp310-win32.whl", hash = "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", size = 430596 }, + { url = "https://files.pythonhosted.org/packages/19/b7/b2b9eca5e5a01111e4fe8a8ffb56bdcdf56b12448a24effe6cfe4a252034/zstandard-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", size = 495498 }, + { url = "https://files.pythonhosted.org/packages/9e/40/f67e7d2c25a0e2dc1744dd781110b0b60306657f8696cafb7ad7579469bd/zstandard-0.23.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", size = 788699 }, + { url = "https://files.pythonhosted.org/packages/e8/46/66d5b55f4d737dd6ab75851b224abf0afe5774976fe511a54d2eb9063a41/zstandard-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", size = 633681 }, + { url = "https://files.pythonhosted.org/packages/63/b6/677e65c095d8e12b66b8f862b069bcf1f1d781b9c9c6f12eb55000d57583/zstandard-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", size = 4944328 }, + { url = "https://files.pythonhosted.org/packages/59/cc/e76acb4c42afa05a9d20827116d1f9287e9c32b7ad58cc3af0721ce2b481/zstandard-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", size = 5311955 }, + { url = "https://files.pythonhosted.org/packages/78/e4/644b8075f18fc7f632130c32e8f36f6dc1b93065bf2dd87f03223b187f26/zstandard-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", size = 5344944 }, + { url = "https://files.pythonhosted.org/packages/76/3f/dbafccf19cfeca25bbabf6f2dd81796b7218f768ec400f043edc767015a6/zstandard-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", size = 5442927 }, + { url = "https://files.pythonhosted.org/packages/0c/c3/d24a01a19b6733b9f218e94d1a87c477d523237e07f94899e1c10f6fd06c/zstandard-0.23.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", size = 4864910 }, + { url = "https://files.pythonhosted.org/packages/1c/a9/cf8f78ead4597264f7618d0875be01f9bc23c9d1d11afb6d225b867cb423/zstandard-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", size = 4935544 }, + { url = "https://files.pythonhosted.org/packages/2c/96/8af1e3731b67965fb995a940c04a2c20997a7b3b14826b9d1301cf160879/zstandard-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", size = 5467094 }, + { url = "https://files.pythonhosted.org/packages/ff/57/43ea9df642c636cb79f88a13ab07d92d88d3bfe3e550b55a25a07a26d878/zstandard-0.23.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", size = 4860440 }, + { url = "https://files.pythonhosted.org/packages/46/37/edb78f33c7f44f806525f27baa300341918fd4c4af9472fbc2c3094be2e8/zstandard-0.23.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", size = 4700091 }, + { url = "https://files.pythonhosted.org/packages/c1/f1/454ac3962671a754f3cb49242472df5c2cced4eb959ae203a377b45b1a3c/zstandard-0.23.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", size = 5208682 }, + { url = "https://files.pythonhosted.org/packages/85/b2/1734b0fff1634390b1b887202d557d2dd542de84a4c155c258cf75da4773/zstandard-0.23.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", size = 5669707 }, + { url = "https://files.pythonhosted.org/packages/52/5a/87d6971f0997c4b9b09c495bf92189fb63de86a83cadc4977dc19735f652/zstandard-0.23.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", size = 5201792 }, + { url = "https://files.pythonhosted.org/packages/79/02/6f6a42cc84459d399bd1a4e1adfc78d4dfe45e56d05b072008d10040e13b/zstandard-0.23.0-cp311-cp311-win32.whl", hash = "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", size = 430586 }, + { url = "https://files.pythonhosted.org/packages/be/a2/4272175d47c623ff78196f3c10e9dc7045c1b9caf3735bf041e65271eca4/zstandard-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", size = 495420 }, + { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713 }, + { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459 }, + { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707 }, + { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545 }, + { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533 }, + { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510 }, + { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973 }, + { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968 }, + { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179 }, + { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577 }, + { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899 }, + { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964 }, + { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398 }, + { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313 }, + { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877 }, + { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595 }, + { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975 }, + { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448 }, + { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269 }, + { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228 }, + { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891 }, + { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310 }, + { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912 }, + { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946 }, + { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994 }, + { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681 }, + { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239 }, + { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149 }, + { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392 }, + { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299 }, + { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862 }, + { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578 }, + { url = "https://files.pythonhosted.org/packages/fb/96/4fcafeb7e013a2386d22f974b5b97a0b9a65004ed58c87ae001599bfbd48/zstandard-0.23.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", size = 788697 }, + { url = "https://files.pythonhosted.org/packages/83/ff/a52ce725be69b86a2967ecba0497a8184540cc284c0991125515449e54e2/zstandard-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", size = 633679 }, + { url = "https://files.pythonhosted.org/packages/34/0f/3dc62db122f6a9c481c335fff6fc9f4e88d8f6e2d47321ee3937328addb4/zstandard-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", size = 4940416 }, + { url = "https://files.pythonhosted.org/packages/1d/e5/9fe0dd8c85fdc2f635e6660d07872a5dc4b366db566630161e39f9f804e1/zstandard-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", size = 5307693 }, + { url = "https://files.pythonhosted.org/packages/73/bf/fe62c0cd865c171ee8ed5bc83174b5382a2cb729c8d6162edfb99a83158b/zstandard-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", size = 5341236 }, + { url = "https://files.pythonhosted.org/packages/39/86/4fe79b30c794286110802a6cd44a73b6a314ac8196b9338c0fbd78c2407d/zstandard-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", size = 5439101 }, + { url = "https://files.pythonhosted.org/packages/72/ed/cacec235c581ebf8c608c7fb3d4b6b70d1b490d0e5128ea6996f809ecaef/zstandard-0.23.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", size = 4860320 }, + { url = "https://files.pythonhosted.org/packages/f6/1e/2c589a2930f93946b132fc852c574a19d5edc23fad2b9e566f431050c7ec/zstandard-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", size = 4931933 }, + { url = "https://files.pythonhosted.org/packages/8e/f5/30eadde3686d902b5d4692bb5f286977cbc4adc082145eb3f49d834b2eae/zstandard-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", size = 5463878 }, + { url = "https://files.pythonhosted.org/packages/e0/c8/8aed1f0ab9854ef48e5ad4431367fcb23ce73f0304f7b72335a8edc66556/zstandard-0.23.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", size = 4857192 }, + { url = "https://files.pythonhosted.org/packages/a8/c6/55e666cfbcd032b9e271865e8578fec56e5594d4faeac379d371526514f5/zstandard-0.23.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69", size = 4696513 }, + { url = "https://files.pythonhosted.org/packages/dc/bd/720b65bea63ec9de0ac7414c33b9baf271c8de8996e5ff324dc93fc90ff1/zstandard-0.23.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", size = 5204823 }, + { url = "https://files.pythonhosted.org/packages/d8/40/d678db1556e3941d330cd4e95623a63ef235b18547da98fa184cbc028ecf/zstandard-0.23.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", size = 5666490 }, + { url = "https://files.pythonhosted.org/packages/ed/cc/c89329723d7515898a1fc7ef5d251264078548c505719d13e9511800a103/zstandard-0.23.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", size = 5196622 }, + { url = "https://files.pythonhosted.org/packages/78/4c/634289d41e094327a94500dfc919e58841b10ea3a9efdfafbac614797ec2/zstandard-0.23.0-cp39-cp39-win32.whl", hash = "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", size = 430620 }, + { url = "https://files.pythonhosted.org/packages/a2/e2/0b0c5a0f4f7699fecd92c1ba6278ef9b01f2b0b0dd46f62bfc6729c05659/zstandard-0.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", size = 495528 }, +] From a3cc43f60d605739a40680ec962b7204f1908426 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 23 Jan 2025 12:47:19 -0800 Subject: [PATCH 1682/2111] PYTHON-4999 Resync retryable writes tests (#2073) --- test/retryable_writes/unified/aggregate-out-merge.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/retryable_writes/unified/aggregate-out-merge.json b/test/retryable_writes/unified/aggregate-out-merge.json index c46bf8c31f..fd25c345ac 100644 --- a/test/retryable_writes/unified/aggregate-out-merge.json +++ b/test/retryable_writes/unified/aggregate-out-merge.json @@ -1,6 +1,6 @@ { "description": "aggregate with $out/$merge does not set txnNumber", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "3.6", @@ -45,6 +45,11 @@ "tests": [ { "description": "aggregate with $out does not set txnNumber", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "object": "collection0", From dc182310dabff470dbbfc7da3c09eb6a4e08dfed Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Jan 2025 07:47:14 -0600 Subject: [PATCH 1683/2111] PYTHON-5047 Avoid updating the uv lock unintentionally (#2076) --- .evergreen/run-tests.sh | 8 ++++---- .evergreen/scripts/setup-dev-env.sh | 6 +++--- justfile | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index d647955059..fbe310ad1e 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -37,7 +37,7 @@ export PIP_QUIET=1 # Quiet by default export PIP_PREFER_BINARY=1 # Prefer binary dists by default set +x -PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") +PYTHON_IMPL=$(uv run --frozen python -c "import platform; print(platform.python_implementation())") # Try to source local Drivers Secrets if [ -f ./secrets-export.sh ]; then @@ -49,11 +49,11 @@ fi # Start compiling the args we'll pass to uv. # Run in an isolated environment so as not to pollute the base venv. -UV_ARGS=("--isolated --extra test") +UV_ARGS=("--isolated --frozen --extra test") # Ensure C extensions if applicable. if [ -z "${NO_EXT:-}" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - uv run tools/fail_if_no_c.py + uv run --frozen tools/fail_if_no_c.py fi if [ "$AUTH" != "noauth" ]; then @@ -239,7 +239,7 @@ if [ -n "$PERF_TEST" ]; then fi echo "Running $AUTH tests over $SSL with python $(uv python find)" -uv run python -c 'import sys; print(sys.version)' +uv run --frozen python -c 'import sys; print(sys.version)' # Run the tests, and store the results in Evergreen compatible XUnit XML diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 3f8d0c4292..ae4b44c626 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -32,11 +32,11 @@ if [ ! -d $BIN_DIR ]; then echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh fi echo "Using python $UV_PYTHON" -uv sync -uv run --with pip pip install -e . +uv sync --frozen +uv run --frozen --with pip pip install -e . echo "Setting up python environment... done." # Ensure there is a pre-commit hook if there is a git checkout. if [ -d .git ] && [ ! -f .git/hooks/pre-commit ]; then - uv run pre-commit install + uv run --frozen pre-commit install fi diff --git a/justfile b/justfile index 6bcfe0c79c..8a076038a4 100644 --- a/justfile +++ b/justfile @@ -4,7 +4,7 @@ set dotenv-load set dotenv-filename := "./.evergreen/scripts/env.sh" # Commonly used command segments. -uv_run := "uv run --isolated " +uv_run := "uv run --isolated --frozen " typing_run := uv_run + "--group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" docs_run := uv_run + "--extra docs" doc_build := "./doc/_build" From a3208df5c94620228b015ac79cd1548582c65ab1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 24 Jan 2025 14:30:07 -0800 Subject: [PATCH 1684/2111] PYTHON-5059 Update default maxMessageSizeBytes and maxWriteBatchSize (#2078) --- pymongo/common.py | 4 ++-- pymongo/hello.py | 2 +- test/test_server_description.py | 13 ++++++++----- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pymongo/common.py b/pymongo/common.py index 5661de011c..b442da6a3e 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -60,10 +60,10 @@ # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024**2) -MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE +MAX_MESSAGE_SIZE = 48 * 1000 * 1000 MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 -MAX_WRITE_BATCH_SIZE = 1000 +MAX_WRITE_BATCH_SIZE = 100000 # What this version of PyMongo supports. MIN_SUPPORTED_SERVER_VERSION = "4.0" diff --git a/pymongo/hello.py b/pymongo/hello.py index 62bb799805..c30b825e19 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -133,7 +133,7 @@ def max_bson_size(self) -> int: @property def max_message_size(self) -> int: - return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) + return self._doc.get("maxMessageSizeBytes", common.MAX_MESSAGE_SIZE) @property def max_write_batch_size(self) -> int: diff --git a/test/test_server_description.py b/test/test_server_description.py index fe7a5f7119..e8c0098cb6 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -23,6 +23,7 @@ from bson.int64 import Int64 from bson.objectid import ObjectId +from pymongo import common from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription from pymongo.server_type import SERVER_TYPE @@ -132,11 +133,13 @@ def test_fields(self): self.assertEqual(4, s.min_wire_version) self.assertEqual(25, s.max_wire_version) - def test_default_max_message_size(self): - s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "maxBsonObjectSize": 2}) - - # Twice max_bson_size. - self.assertEqual(4, s.max_message_size) + def test_defaults(self): + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) + self.assertEqual(common.MAX_BSON_SIZE, s.max_bson_size) + self.assertEqual(common.MAX_MESSAGE_SIZE, s.max_message_size) + self.assertEqual(common.MIN_WIRE_VERSION, s.min_wire_version) + self.assertEqual(common.MAX_WIRE_VERSION, s.max_wire_version) + self.assertEqual(common.MAX_WRITE_BATCH_SIZE, s.max_write_batch_size) def test_standalone(self): s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) From 9082a4be23622458ee350c3171bc754cdf1db89a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 24 Jan 2025 17:14:20 -0600 Subject: [PATCH 1685/2111] PYTHON-5058 Build linux aarch64 wheel using native runner and omit ppc64le and s390x wheels (#2077) --- .github/workflows/dist.yml | 7 ++++--- doc/changelog.rst | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index a4c5a8279b..5100c70d43 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -35,9 +35,10 @@ jobs: # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - [ubuntu-20.04, "manylinux_x86_64", "cp3*-manylinux_x86_64"] - - [ubuntu-20.04, "manylinux_aarch64", "cp3*-manylinux_aarch64"] - - [ubuntu-20.04, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] - - [ubuntu-20.04, "manylinux_s390x", "cp3*-manylinux_s390x"] + - [ubuntu-24.04-arm, "manylinux_aarch64", "cp3*-manylinux_aarch64"] + # Disabled pending PYTHON-5058 + # - [ubuntu-24.04, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] + # - [ubuntu-24.04, "manylinux_s390x", "cp3*-manylinux_s390x"] - [ubuntu-20.04, "manylinux_i686", "cp3*-manylinux_i686"] - [windows-2019, "win_amd6", "cp3*-win_amd64"] - [windows-2019, "win32", "cp3*-win32"] diff --git a/doc/changelog.rst b/doc/changelog.rst index 4942d85de8..1f3efb8ad0 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -11,6 +11,7 @@ Changes in Version 4.11.0 (YYYY/MM/DD) A future minor release of PyMongo will raise the minimum supported MongoDB Server version from 4.0 to 4.2. This is in accordance with [MongoDB Software Lifecycle Schedules](https://www.mongodb.com/legal/support-policy/lifecycles). **Support for MongoDB Server 4.0 will be dropped in a future release!** +.. warning:: This version does not include wheels for ``ppc64le`` or ``s390x`` architectures, see `PYTHON-5058`_ for more information. PyMongo 4.11 brings a number of changes including: @@ -49,6 +50,7 @@ in this release. .. _PyMongo 4.11 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40784 .. _PYTHON-5027: https://jira.mongodb.org/browse/PYTHON-5027 .. _PYTHON-5024: https://jira.mongodb.org/browse/PYTHON-5024 +.. _PYTHON-5058: https://jira.mongodb.org/browse/PYTHON-5058 Changes in Version 4.10.1 (2024/10/01) -------------------------------------- From 2225ccadce0033f2ed16a99e44dc98085263da59 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Jan 2025 08:59:29 -0600 Subject: [PATCH 1686/2111] PYTHON-5062 Add GitHub Actions CodeQL scanning (#2079) --- .github/workflows/codeql.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e620cb1801..bb2418cf89 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -34,6 +34,8 @@ jobs: build-mode: manual - language: python build-mode: none + - language: actions + build-mode: none steps: - name: Checkout repository uses: actions/checkout@v4 From 848ab4f7db6e2a151b98765547300198e489172a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Jan 2025 09:24:48 -0600 Subject: [PATCH 1687/2111] PYTHON-5047 Improve testing of publish workflows (#2080) --- .github/workflows/release-python.yml | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index ee4ea32f82..6548a7d1ad 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -13,6 +13,8 @@ on: description: "Dry Run?" default: false type: boolean + schedule: + - cron: '30 5 * * *' env: # Changes per repo @@ -20,6 +22,10 @@ env: # Changes per branch SILK_ASSET_GROUP: mongodb-python-driver EVERGREEN_PROJECT: mongo-python-driver + # Constant + DRY_RUN: ${{ inputs.dry_run || 'true' }} + FOLLOWING_VERSION: ${{ inputs.following_version || '' }} + VERSION: ${{ inputs.version || '10.10.10.10' }} defaults: run: @@ -48,8 +54,8 @@ jobs: - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v2 id: pre-publish with: - version: ${{ inputs.version }} - dry_run: ${{ inputs.dry_run }} + version: ${{ env.VERSION }} + dry_run: ${{ env.DRY_RUN }} build-dist: needs: [pre-publish] @@ -78,8 +84,13 @@ jobs: with: name: all-dist-${{ github.run_id }} path: dist/ + - name: Publish package distributions to TestPyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + skip-existing: true - name: Publish package distributions to PyPI - if: startsWith(inputs.dry_run, 'false') + if: startsWith(env.DRY_RUN, 'false') uses: pypa/gh-action-pypi-publish@release/v1 post-publish: @@ -104,10 +115,10 @@ jobs: artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} - uses: mongodb-labs/drivers-github-tools/python/post-publish@v2 with: - version: ${{ inputs.version }} - following_version: ${{ inputs.following_version }} + version: ${{ env.VERSION }} + following_version: ${{ env.FOLLOWING_VERSION }} product_name: ${{ env.PRODUCT_NAME }} silk_asset_group: ${{ env.SILK_ASSET_GROUP }} evergreen_project: ${{ env.EVERGREEN_PROJECT }} token: ${{ github.token }} - dry_run: ${{ inputs.dry_run }} + dry_run: ${{ env.DRY_RUN }} From 4567f8875eb8ea8c4a1bd2e0bb13e41e4d3da7b2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 27 Jan 2025 13:43:11 -0600 Subject: [PATCH 1688/2111] PYTHON-5047 Fix handling of attestation on testpypi workflows (#2081) --- .github/workflows/release-python.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 6548a7d1ad..88cffcda53 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -89,6 +89,7 @@ jobs: with: repository-url: https://test.pypi.org/legacy/ skip-existing: true + attestations: ${{ env.DRY_RUN }} - name: Publish package distributions to PyPI if: startsWith(env.DRY_RUN, 'false') uses: pypa/gh-action-pypi-publish@release/v1 From dc2993835eaeacb2dcb08969afafcbddf08f41fb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 28 Jan 2025 09:27:09 -0600 Subject: [PATCH 1689/2111] PYTHON-5047 Fix dry run logic in releases (#2083) --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 88cffcda53..bcf37d1a22 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -23,7 +23,7 @@ env: SILK_ASSET_GROUP: mongodb-python-driver EVERGREEN_PROJECT: mongo-python-driver # Constant - DRY_RUN: ${{ inputs.dry_run || 'true' }} + DRY_RUN: ${{ inputs.dry_run == 'true' }} FOLLOWING_VERSION: ${{ inputs.following_version || '' }} VERSION: ${{ inputs.version || '10.10.10.10' }} From cae161ecddbc9c380c21b675da8c042603450c63 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:30:50 +0000 Subject: [PATCH 1690/2111] BUMP 4.11 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 3de24a8e14..22972c5ce4 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.11.0.dev0" +__version__ = "4.11" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 78724cde8d93a29b1ca92ca5211bb4f6e909200d Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:42:56 +0000 Subject: [PATCH 1691/2111] BUMP 4.12.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 22972c5ce4..f7a1f3dcb3 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.11" +__version__ = "4.12.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 4ecf786892b71011b61a03de4cb0a9b17397c96c Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 05:33:03 +0000 Subject: [PATCH 1692/2111] BUMP 10.10.10.10 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index f7a1f3dcb3..07a622cdc6 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.12.0.dev0" +__version__ = "10.10.10.10" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 9603e92894a17954586c1fc68f333d56b2d2c5e2 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 29 Jan 2025 05:43:30 +0000 Subject: [PATCH 1693/2111] BUMP 10.10.11.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 07a622cdc6..983e7373a9 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "10.10.10.10" +__version__ = "10.10.11.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From c2e7fae7d9a45357090cc75f0e3ca34692e72506 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jan 2025 09:33:36 -0600 Subject: [PATCH 1694/2111] BUMP 4.12.0.dev0 --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 983e7373a9..f7a1f3dcb3 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "10.10.11.dev0" +__version__ = "4.12.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 82a8a60af64a6b3fedcfcf7e7e002e8adb2ecc38 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 29 Jan 2025 14:05:59 -0500 Subject: [PATCH 1695/2111] PYTHON-5077 - Convert test.test_data_lake to async (#2091) --- test/asynchronous/test_data_lake.py | 111 ++++++++++++++++++++++++++++ test/test_data_lake.py | 20 ++--- tools/synchro.py | 1 + 3 files changed, 123 insertions(+), 9 deletions(-) create mode 100644 test/asynchronous/test_data_lake.py diff --git a/test/asynchronous/test_data_lake.py b/test/asynchronous/test_data_lake.py new file mode 100644 index 0000000000..0b259fb0d0 --- /dev/null +++ b/test/asynchronous/test_data_lake.py @@ -0,0 +1,111 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test Atlas Data Lake.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, AsyncUnitTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils import ( + OvertCommandListener, +) + +from pymongo.asynchronous.helpers import anext + +_IS_SYNC = False + +pytestmark = pytest.mark.data_lake + + +class TestDataLakeMustConnect(AsyncUnitTest): + async def test_connected_to_data_lake(self): + data_lake = os.environ.get("TEST_DATA_LAKE") + if not data_lake: + self.skipTest("TEST_DATA_LAKE is not set") + + self.assertTrue( + async_client_context.is_data_lake and async_client_context.connected, + "client context must be connected to data lake when DATA_LAKE is set. Failed attempts:\n{}".format( + async_client_context.connection_attempt_info() + ), + ) + + +class TestDataLakeProse(AsyncIntegrationTest): + # Default test database and collection names. + TEST_DB = "test" + TEST_COLLECTION = "driverdata" + + @async_client_context.require_data_lake + async def asyncSetUp(self): + await super().asyncSetUp() + + # Test killCursors + async def test_1(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) + await anext(cursor) + + # find command assertions + find_cmd = listener.succeeded_events[-1] + self.assertEqual(find_cmd.command_name, "find") + cursor_id = find_cmd.reply["cursor"]["id"] + cursor_ns = find_cmd.reply["cursor"]["ns"] + + # killCursors command assertions + await cursor.close() + started = listener.started_events[-1] + self.assertEqual(started.command_name, "killCursors") + succeeded = listener.succeeded_events[-1] + self.assertEqual(succeeded.command_name, "killCursors") + + self.assertIn(cursor_id, started.command["cursors"]) + target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) + self.assertEqual(cursor_ns, target_ns) + + self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) + + # Test no auth + async def test_2(self): + client = await self.async_rs_client_noauth() + await client.admin.command("ping") + + # Test with auth + async def test_3(self): + for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: + client = await self.async_rs_or_single_client(authMechanism=mechanism) + await client[self.TEST_DB][self.TEST_COLLECTION].find_one() + + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = Path(__file__).parent / "data_lake/unified" +else: + TEST_PATH = Path(__file__).parent.parent / "data_lake/unified" + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_data_lake.py b/test/test_data_lake.py index a374db550e..797ef85000 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -23,20 +23,20 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import IntegrationTest, UnitTest, client_context, unittest from test.unified_format import generate_test_classes from test.utils import ( OvertCommandListener, ) -pytestmark = pytest.mark.data_lake +from pymongo.synchronous.helpers import next +_IS_SYNC = True -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") +pytestmark = pytest.mark.data_lake -class TestDataLakeMustConnect(unittest.TestCase): +class TestDataLakeMustConnect(UnitTest): def test_connected_to_data_lake(self): data_lake = os.environ.get("TEST_DATA_LAKE") if not data_lake: @@ -55,10 +55,9 @@ class TestDataLakeProse(IntegrationTest): TEST_DB = "test" TEST_COLLECTION = "driverdata" - @classmethod @client_context.require_data_lake - def setUpClass(cls): - super().setUpClass() + def setUp(self): + super().setUp() # Test killCursors def test_1(self): @@ -99,7 +98,10 @@ def test_3(self): # Location of JSON test specifications. -TEST_PATH = Path(__file__).parent / "data_lake/unified" +if _IS_SYNC: + TEST_PATH = Path(__file__).parent / "data_lake/unified" +else: + TEST_PATH = Path(__file__).parent.parent / "data_lake/unified" # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/tools/synchro.py b/tools/synchro.py index dbcbbd1351..cbac5752cc 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -203,6 +203,7 @@ def async_only_test(f: str) -> bool: "test_crud_unified.py", "test_cursor.py", "test_database.py", + "test_data_lake.py", "test_encryption.py", "test_grid_file.py", "test_logger.py", From cbc3af704f022622def68e1a7752b12e671d6df9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 29 Jan 2025 14:06:09 -0500 Subject: [PATCH 1696/2111] PYTHON-5076 - Convert test.test_custom_types to async (#2090) --- test/asynchronous/test_custom_types.py | 989 +++++++++++++++++++++++++ test/test_custom_types.py | 13 +- tools/synchro.py | 1 + 3 files changed, 998 insertions(+), 5 deletions(-) create mode 100644 test/asynchronous/test_custom_types.py diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py new file mode 100644 index 0000000000..0f9d737afe --- /dev/null +++ b/test/asynchronous/test_custom_types.py @@ -0,0 +1,989 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test support for callbacks to encode/decode custom types.""" +from __future__ import annotations + +import datetime +import sys +import tempfile +from collections import OrderedDict +from decimal import Decimal +from random import random +from typing import Any, Tuple, Type, no_type_check + +from gridfs.asynchronous.grid_file import AsyncGridIn, AsyncGridOut + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) +from bson.errors import InvalidDocument +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from pymongo.asynchronous.collection import ReturnDocument +from pymongo.asynchronous.helpers import anext +from pymongo.errors import DuplicateKeyError +from pymongo.message import _CursorAddress + +_IS_SYNC = False + + +class DecimalEncoder(TypeEncoder): + @property + def python_type(self): + return Decimal + + def transform_python(self, value): + return Decimal128(value) + + +class DecimalDecoder(TypeDecoder): + @property + def bson_type(self): + return Decimal128 + + def transform_bson(self, value): + return value.to_decimal() + + +class DecimalCodec(DecimalDecoder, DecimalEncoder): + pass + + +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) + + +class UndecipherableInt64Type: + def __init__(self, value): + self.value = value + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.value == other.value + # Does not compare equal to integers. + return False + + +class UndecipherableIntDecoder(TypeDecoder): + bson_type = Int64 + + def transform_bson(self, value): + return UndecipherableInt64Type(value) + + +class UndecipherableIntEncoder(TypeEncoder): + python_type = UndecipherableInt64Type + + def transform_python(self, value): + return Int64(value.value) + + +UNINT_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) + + +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) + + +class UppercaseTextDecoder(TypeDecoder): + bson_type = str + + def transform_bson(self, value): + return value.upper() + + +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) + + +def type_obfuscating_decoder_factory(rt_type): + class ResumeTokenToNanDecoder(TypeDecoder): + bson_type = rt_type + + def transform_bson(self, value): + return "NaN" + + return ResumeTokenToNanDecoder + + +class CustomBSONTypeTests: + @no_type_check + def roundtrip(self, doc): + bsonbytes = encode(doc, codec_options=self.codecopts) + rt_document = decode(bsonbytes, codec_options=self.codecopts) + self.assertEqual(doc, rt_document) + + def test_encode_decode_roundtrip(self): + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) + + @no_type_check + def test_decode_all(self): + documents = [] + for dec in range(3): + documents.append({"average": Decimal(f"56.4{dec}")}) + + bsonstream = b"" + for doc in documents: + bsonstream += encode(doc, codec_options=self.codecopts) + + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) + + @no_type_check + def test__bson_to_dict(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + decoded_document = _bson_to_dict(rawbytes, self.codecopts) + self.assertEqual(document, decoded_document) + + @no_type_check + def test__dict_to_bson(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + encoded_document = _dict_to_bson(document, False, self.codecopts) + self.assertEqual(encoded_document, rawbytes) + + def _generate_multidocument_bson_stream(self): + inp_num = [str(random() * 100)[:4] for _ in range(10)] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] + bsonstream = b"" + for doc in docs: + bsonstream += encode(doc) + return edocs, bsonstream + + @no_type_check + def test_decode_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + @no_type_check + def test_decode_file_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + fileobj = tempfile.TemporaryFile() + fileobj.write(bson_data) + fileobj.seek(0) + + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + fileobj.close() + + +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.codecopts = DECIMAL_CODECOPTS + + +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + codec_options = CodecOptions( + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) + cls.codecopts = codec_options + + +class TestBSONFallbackEncoder(unittest.TestCase): + def _get_codec_options(self, fallback_encoder): + type_registry = TypeRegistry(fallback_encoder=fallback_encoder) + return CodecOptions(type_registry=type_registry) + + def test_simple(self): + codecopts = self._get_codec_options(lambda x: Decimal128(x)) + document = {"average": Decimal("56.47")} + bsonbytes = encode(document, codec_options=codecopts) + + exp_document = {"average": Decimal128("56.47")} + exp_bsonbytes = encode(exp_document) + self.assertEqual(bsonbytes, exp_bsonbytes) + + def test_erroring_fallback_encoder(self): + codecopts = self._get_codec_options(lambda _: 1 / 0) + + # fallback converter should not be invoked when encoding known types. + encode( + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) + + # expect an error when encoding a custom type. + document = {"average": Decimal("56.47")} + with self.assertRaises(ZeroDivisionError): + encode(document, codec_options=codecopts) + + def test_noop_fallback_encoder(self): + codecopts = self._get_codec_options(lambda x: x) + document = {"average": Decimal("56.47")} + with self.assertRaises(InvalidDocument): + encode(document, codec_options=codecopts) + + def test_type_unencodable_by_fallback_encoder(self): + def fallback_encoder(value): + try: + return Decimal128(value) + except: + raise TypeError("cannot encode type %s" % (type(value))) + + codecopts = self._get_codec_options(fallback_encoder) + document = {"average": Decimal} + with self.assertRaises(TypeError): + encode(document, codec_options=codecopts) + + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + + +class TestBSONTypeEnDeCodecs(unittest.TestCase): + def test_instantiation(self): + msg = "Can't instantiate abstract class" + + def run_test(base, attrs, fail): + codec = type("testcodec", (base,), attrs) + if fail: + with self.assertRaisesRegex(TypeError, msg): + codec() + else: + codec() + + class MyType: + pass + + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) + + def test_type_checks(self): + self.assertTrue(issubclass(TypeCodec, TypeEncoder)) + self.assertTrue(issubclass(TypeCodec, TypeDecoder)) + self.assertFalse(issubclass(TypeDecoder, TypeEncoder)) + self.assertFalse(issubclass(TypeEncoder, TypeDecoder)) + + +class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + + @classmethod + def setUpClass(cls): + class TypeA: + def __init__(self, x): + self.value = x + + class TypeB: + def __init__(self, x): + self.value = x + + # transforms A, and only A into B + def fallback_encoder_A2B(value): + assert isinstance(value, TypeA) + return TypeB(value.value) + + # transforms A, and only A into something encodable + def fallback_encoder_A2BSON(value): + assert isinstance(value, TypeA) + return value.value + + # transforms B into something encodable + class B2BSON(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return value.value + + # transforms A into B + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class A2B(TypeEncoder): + python_type = TypeA + + def transform_python(self, value): + return TypeB(value.value) + + # transforms B into A + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class B2A(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return TypeA(value.value) + + cls.TypeA = TypeA + cls.TypeB = TypeB + cls.fallback_encoder_A2B = staticmethod(fallback_encoder_A2B) + cls.fallback_encoder_A2BSON = staticmethod(fallback_encoder_A2BSON) + cls.B2BSON = B2BSON + cls.B2A = B2A + cls.A2B = A2B + + def test_encode_fallback_then_custom(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_encode_custom_then_fallback(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_chaining_encoders_fails(self): + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) + + with self.assertRaises(InvalidDocument): + encode({"x": self.TypeA(123)}, codec_options=codecopts) + + def test_infinite_loop_exceeds_max_recursion_depth(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) + + # Raises max recursion depth exceeded error + with self.assertRaises(RuntimeError): + encode({"x": self.TypeA(100)}, codec_options=codecopts) + + +class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + + @classmethod + def setUpClass(cls): + class MyIntType: + def __init__(self, x): + assert isinstance(x, int) + self.x = x + + class MyStrType: + def __init__(self, x): + assert isinstance(x, str) + self.x = x + + class MyIntCodec(TypeCodec): + @property + def python_type(self): + return MyIntType + + @property + def bson_type(self): + return int + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyIntType(value) + + class MyStrCodec(TypeCodec): + @property + def python_type(self): + return MyStrType + + @property + def bson_type(self): + return str + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyStrType(value) + + def fallback_encoder(value): + return value + + cls.types = (MyIntType, MyStrType) + cls.codecs = (MyIntCodec, MyStrCodec) + cls.fallback_encoder = fallback_encoder + + def test_simple(self): + codec_instances = [codec() for codec in self.codecs] + + def assert_proper_initialization(type_registry, codec_instances): + self.assertEqual( + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) + + type_registry = TypeRegistry(codec_instances, self.fallback_encoder) + assert_proper_initialization(type_registry, codec_instances) + + type_registry = TypeRegistry( + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) + assert_proper_initialization(type_registry, codec_instances) + + # Ensure codec list held by the type registry doesn't change if we + # mutate the initial list. + codec_instances_copy = list(codec_instances) + codec_instances.pop(0) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) + + def test_simple_separate_codecs(self): + class MyIntEncoder(TypeEncoder): + python_type = self.types[0] + + def transform_python(self, value): + return value.x + + class MyIntDecoder(TypeDecoder): + bson_type = int + + def transform_bson(self, value): + return self.types[0](value) + + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] + type_registry = TypeRegistry(codec_instances) + + self.assertEqual( + type_registry._encoder_map, + {MyIntEncoder.python_type: codec_instances[1].transform_python}, + ) + self.assertEqual( + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, + ) + + def test_initialize_fail(self): + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(self.codecs) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([type("AnyType", (object,), {})()]) + + err_msg = f"fallback_encoder {True!r} is not a callable" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([], True) # type: ignore[arg-type] + + err_msg = "fallback_encoder {!r} is not a callable".format("hello") + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + + def test_type_registry_repr(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" + self.assertEqual(r, repr(type_registry)) + + def test_type_registry_eq(self): + codec_instances = [codec() for codec in self.codecs] + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + + codec_instances_2 = [codec() for codec in self.codecs] + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + + def test_builtin_types_override_fails(self): + def run_test(base, attrs): + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) + for pytype in _BUILT_IN_TYPES: + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + # Test only some subtypes as not all can be subclassed. + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: + continue + + class MyType(pytype): # type: ignore + pass + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + run_test(TypeEncoder, {}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) + + +class TestCollectionWCustomType(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.drop() + + async def asyncTearDown(self): + await self.db.test.drop() + + async def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + await collection.insert_one({"_id": 1, "data": 2**520}) + ret = await collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + + async def test_command_errors_w_custom_type_decoder(self): + db = self.db + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + result = await test.insert_one(test_doc) + self.assertEqual(result.inserted_id, test_doc["_id"]) + with self.assertRaises(DuplicateKeyError): + await test.insert_one(test_doc) + + async def test_find_w_custom_type_decoder(self): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + await db.test.insert_one(doc) + + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + async for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + async def test_find_w_custom_type_decoder_and_document_class(self): + async def run_test(doc_cls): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + await db.test.insert_one(doc) + + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) + async for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc, doc_cls) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + for doc_cls in [RawBSONDocument, OrderedDict]: + await run_test(doc_cls) + + async def test_aggregate_w_custom_type_decoder(self): + db = self.db + await db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + pipeline: list = [ + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] + result = await test.aggregate(pipeline) + + res = (await result.to_list())[0] + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) + + async def test_distinct_w_custom_type(self): + await self.db.drop_collection("test") + + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) + values = [ + UndecipherableInt64Type(1), + UndecipherableInt64Type(2), + UndecipherableInt64Type(3), + {"b": UndecipherableInt64Type(3)}, + ] + await test.insert_many({"a": val} for val in values) + + self.assertEqual(values, await test.distinct("a")) + + async def test_find_one_and__w_custom_type_decoder(self): + db = self.db + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + await c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = await c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = await c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = await c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertIsNone(await c.find_one()) + + +class TestGridFileCustomType(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + + async def test_grid_out_custom_opts(self): + db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) + one = AsyncGridIn( + db.fs, + _id=5, + filename="my_file", + chunkSize=1000, + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(db.fs, 5) + await two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual(1000, two.chunk_size) + self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) + self.assertEqual(3, two.bar) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + +class ChangeStreamsWCustomTypesTestMixin: + @no_type_check + async def change_stream(self, *args, **kwargs): + stream = await self.watched_target.watch(*args, max_await_time_ms=1, **kwargs) + self.addAsyncCleanup(stream.close) + return stream + + @no_type_check + async def insert_and_check(self, change_stream, insert_doc, expected_doc): + await self.input_target.insert_one(insert_doc) + change = await anext(change_stream) + self.assertEqual(change["fullDocument"], expected_doc) + + @no_type_check + async def kill_change_stream_cursor(self, change_stream): + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.input_target.database.client + await client._close_cursor_now(cursor.cursor_id, address) + + @no_type_check + async def test_simple(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + await self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [ + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] + + change_stream = await self.change_stream() + + await self.insert_and_check(change_stream, input_docs[0], expected_docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[1], expected_docs[1]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + + @no_type_check + async def test_custom_type_in_pipeline(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + await self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] + + # UndecipherableInt64Type should be encoded with the TypeRegistry. + change_stream = await self.change_stream( + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) + + await self.input_target.insert_one(input_docs[0]) + await self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + + @no_type_check + async def test_break_resume_token(self): + # Get one document from a change stream to determine resumeToken type. + await self.create_targets() + change_stream = await self.change_stream() + await self.input_target.insert_one({"data": "test"}) + change = await anext(change_stream) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) + + # Custom-decoding the resumeToken type breaks resume tokens. + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) + + # Re-create targets, change stream and proceed. + await self.create_targets(codec_options=codecopts) + + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] + + change_stream = await self.change_stream() + await self.insert_and_check(change_stream, docs[0], docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, docs[1], docs[1]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, docs[2], docs[2]) + + @no_type_check + async def test_document_class(self): + async def run_test(doc_cls): + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) + + await self.create_targets(codec_options=codecopts) + change_stream = await self.change_stream() + + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} + await self.input_target.insert_one(doc) + change = await anext(change_stream) + + self.assertIsInstance(change, doc_cls) + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") + + for doc_cls in [OrderedDict, RawBSONDocument]: + await run_test(doc_cls) + + +class TestCollectionChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + + async def create_targets(self, *args, **kwargs): + self.watched_target = self.db.get_collection("test", *args, **kwargs) + self.input_target = self.watched_target + # Ensure the collection exists and is empty. + await self.input_target.insert_one({}) + await self.input_target.delete_many({}) + + +class TestDatabaseChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + await self.client.drop_database(self.watched_target) + + async def create_targets(self, *args, **kwargs): + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) + self.input_target = self.watched_target.test + # Insert a record to ensure db, coll are created. + await self.input_target.insert_one({"data": "dummy"}) + + +class TestClusterChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + await self.client.drop_database(self.db) + + async def create_targets(self, *args, **kwargs): + codec_options = kwargs.pop("codec_options", None) + if codec_options: + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class + self.watched_target = await self.async_rs_client(*args, **kwargs) + self.input_target = self.watched_target[self.db.name].test + # Insert a record to ensure db, coll are created. + await self.input_target.insert_one({"data": "dummy"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 6771ea25f9..08e2a46f8f 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -23,10 +23,11 @@ from random import random from typing import Any, Tuple, Type, no_type_check +from gridfs.synchronous.grid_file import GridIn, GridOut + sys.path[0:0] = [""] -from test import client_context, unittest -from test.test_client import IntegrationTest +from test import IntegrationTest, client_context, unittest from bson import ( _BUILT_IN_TYPES, @@ -50,10 +51,12 @@ from bson.errors import InvalidDocument from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument -from gridfs import GridIn, GridOut from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.helpers import next + +_IS_SYNC = True class DecimalEncoder(TypeEncoder): @@ -707,7 +710,7 @@ def test_aggregate_w_custom_type_decoder(self): ] result = test.aggregate(pipeline) - res = list(result)[0] + res = (result.to_list())[0] self.assertEqual(res["_id"], "complete") self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) self.assertEqual(res["total_qty"].value, 20) @@ -774,6 +777,7 @@ def test_grid_out_custom_opts(self): one.close() two = GridOut(db.fs, 5) + two.open() self.assertEqual("my_file", two.name) self.assertEqual("my_file", two.filename) @@ -970,7 +974,6 @@ def create_targets(self, *args, **kwargs): kwargs["type_registry"] = codec_options.type_registry kwargs["document_class"] = codec_options.document_class self.watched_target = self.rs_client(*args, **kwargs) - self.addCleanup(self.watched_target.close) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. self.input_target.insert_one({"data": "dummy"}) diff --git a/tools/synchro.py b/tools/synchro.py index cbac5752cc..897e5e8018 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -202,6 +202,7 @@ def async_only_test(f: str) -> bool: "test_create_entities.py", "test_crud_unified.py", "test_cursor.py", + "test_custom_types.py", "test_database.py", "test_data_lake.py", "test_encryption.py", From b4e32a1d8388fe5bf731c0c866b8bb96bbf19870 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jan 2025 13:27:07 -0600 Subject: [PATCH 1697/2111] PYTHON-5047 Fix dry run logic in releases again (#2092) --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index bcf37d1a22..0801d12f59 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -23,7 +23,7 @@ env: SILK_ASSET_GROUP: mongodb-python-driver EVERGREEN_PROJECT: mongo-python-driver # Constant - DRY_RUN: ${{ inputs.dry_run == 'true' }} + DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && inputs.dry_run || 'true' }} FOLLOWING_VERSION: ${{ inputs.following_version || '' }} VERSION: ${{ inputs.version || '10.10.10.10' }} From 1784e2c4b9c7e5efbed1796e81e37fa49f8845f0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 29 Jan 2025 15:35:00 -0500 Subject: [PATCH 1698/2111] PYTHON-5112 - Fix just install (#2095) --- .evergreen/scripts/setup-dev-env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index ae4b44c626..b56897961e 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -30,8 +30,8 @@ if [ ! -d $BIN_DIR ]; then fi export UV_PYTHON=${PYTHON_BINARY} echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh + echo "Using python $UV_PYTHON" fi -echo "Using python $UV_PYTHON" uv sync --frozen uv run --frozen --with pip pip install -e . echo "Setting up python environment... done." From 34ae214e33e922e3478388517a2b37aa4fd64dba Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 29 Jan 2025 17:46:36 -0600 Subject: [PATCH 1699/2111] PYTHON-5047 Fix dry run logic in releases yet again (#2098) --- .github/workflows/release-python.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 0801d12f59..45157bfc2b 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -23,7 +23,9 @@ env: SILK_ASSET_GROUP: mongodb-python-driver EVERGREEN_PROJECT: mongo-python-driver # Constant - DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && inputs.dry_run || 'true' }} + # inputs will be empty on a scheduled run. so, we only set dry_run + # to 'false' when the input is set to 'false'. + DRY_RUN: ${{ ! contains(inputs.dry_run, 'false') }} FOLLOWING_VERSION: ${{ inputs.following_version || '' }} VERSION: ${{ inputs.version || '10.10.10.10' }} From 01f659cd8bd6ae970b044c3043c9ed2ca6d89bf4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 30 Jan 2025 12:34:59 -0800 Subject: [PATCH 1700/2111] PYTHON-5071 Use one event loop for all asyncio tests (#2086) --- test/__init__.py | 119 +++++++++++++++++++++++---------- test/asynchronous/__init__.py | 121 ++++++++++++++++++++++++---------- 2 files changed, 173 insertions(+), 67 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index d3a63db2d5..b49eee99ac 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -17,6 +17,7 @@ import asyncio import gc +import inspect import logging import multiprocessing import os @@ -30,28 +31,6 @@ import unittest import warnings from asyncio import iscoroutinefunction -from test.helpers import ( - COMPRESSORS, - IS_SRV, - MONGODB_API_VERSION, - MULTI_MONGOS_LB_URI, - TEST_LOADBALANCER, - TEST_SERVERLESS, - TLS_OPTIONS, - SystemCertsPatcher, - client_knobs, - db_pwd, - db_user, - global_knobs, - host, - is_server_resolvable, - port, - print_running_topology, - print_thread_stacks, - print_thread_tracebacks, - sanitize_cmd, - sanitize_reply, -) from pymongo.uri_parser import parse_uri @@ -63,7 +42,6 @@ HAVE_IPADDRESS = False from contextlib import contextmanager from functools import partial, wraps -from test.version import Version from typing import Any, Callable, Dict, Generator, overload from unittest import SkipTest from urllib.parse import quote_plus @@ -78,6 +56,32 @@ from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient +sys.path[0:0] = [""] + +from test.helpers import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TEST_SERVERLESS, + TLS_OPTIONS, + SystemCertsPatcher, + client_knobs, + db_pwd, + db_user, + global_knobs, + host, + is_server_resolvable, + port, + print_running_topology, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) +from test.version import Version + _IS_SYNC = True @@ -863,18 +867,66 @@ def max_message_size_bytes(self): # Reusable client context client_context = ClientContext() +# Global event loop for async tests. +LOOP = None -def reset_client_context(): - if _IS_SYNC: - # sync tests don't need to reset a client context - return - elif client_context.client is not None: - client_context.client.close() - client_context.client = None - client_context._init_client() + +def get_loop() -> asyncio.AbstractEventLoop: + """Get the test suite's global event loop.""" + global LOOP + if LOOP is None: + try: + LOOP = asyncio.get_running_loop() + except RuntimeError: + # no running event loop, fallback to get_event_loop. + try: + # Ignore DeprecationWarning: There is no current event loop + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + LOOP = asyncio.get_event_loop() + except RuntimeError: + LOOP = asyncio.new_event_loop() + asyncio.set_event_loop(LOOP) + return LOOP class PyMongoTestCase(unittest.TestCase): + if not _IS_SYNC: + # An async TestCase that uses a single event loop for all tests. + # Inspired by TestCase. + def setUp(self): + pass + + def tearDown(self): + pass + + def addCleanup(self, func, /, *args, **kwargs): + self.addCleanup(*(func, *args), **kwargs) + + def _callSetUp(self): + self.setUp() + self._callAsync(self.setUp) + + def _callTestMethod(self, method): + self._callMaybeAsync(method) + + def _callTearDown(self): + self._callAsync(self.tearDown) + self.tearDown() + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert inspect.iscoroutinefunction(func), f"{func!r} is not an async function" + return get_loop().run_until_complete(func(*args, **kwargs)) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return get_loop().run_until_complete(func(*args, **kwargs)) + else: + return func(*args, **kwargs) + def assertEqualCommand(self, expected, actual, msg=None): self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) @@ -1136,8 +1188,6 @@ class IntegrationTest(PyMongoTestCase): @client_context.require_connection def setUp(self) -> None: - if not _IS_SYNC: - reset_client_context() if client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") if client_context.serverless and not getattr(self, "RUN_ON_SERVERLESS", False): @@ -1186,6 +1236,9 @@ def tearDown(self) -> None: def setup(): + if not _IS_SYNC: + # Set up the event loop. + get_loop() client_context.init() warnings.resetwarnings() warnings.simplefilter("always") diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 73e2824742..76fae407da 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -17,6 +17,7 @@ import asyncio import gc +import inspect import logging import multiprocessing import os @@ -30,28 +31,6 @@ import unittest import warnings from asyncio import iscoroutinefunction -from test.helpers import ( - COMPRESSORS, - IS_SRV, - MONGODB_API_VERSION, - MULTI_MONGOS_LB_URI, - TEST_LOADBALANCER, - TEST_SERVERLESS, - TLS_OPTIONS, - SystemCertsPatcher, - client_knobs, - db_pwd, - db_user, - global_knobs, - host, - is_server_resolvable, - port, - print_running_topology, - print_thread_stacks, - print_thread_tracebacks, - sanitize_cmd, - sanitize_reply, -) from pymongo.uri_parser import parse_uri @@ -63,7 +42,6 @@ HAVE_IPADDRESS = False from contextlib import asynccontextmanager, contextmanager from functools import partial, wraps -from test.version import Version from typing import Any, Callable, Dict, Generator, overload from unittest import SkipTest from urllib.parse import quote_plus @@ -78,6 +56,32 @@ from pymongo.server_api import ServerApi from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +sys.path[0:0] = [""] + +from test.helpers import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TEST_SERVERLESS, + TLS_OPTIONS, + SystemCertsPatcher, + client_knobs, + db_pwd, + db_user, + global_knobs, + host, + is_server_resolvable, + port, + print_running_topology, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) +from test.version import Version + _IS_SYNC = False @@ -865,18 +869,66 @@ async def max_message_size_bytes(self): # Reusable client context async_client_context = AsyncClientContext() +# Global event loop for async tests. +LOOP = None + + +def get_loop() -> asyncio.AbstractEventLoop: + """Get the test suite's global event loop.""" + global LOOP + if LOOP is None: + try: + LOOP = asyncio.get_running_loop() + except RuntimeError: + # no running event loop, fallback to get_event_loop. + try: + # Ignore DeprecationWarning: There is no current event loop + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + LOOP = asyncio.get_event_loop() + except RuntimeError: + LOOP = asyncio.new_event_loop() + asyncio.set_event_loop(LOOP) + return LOOP + + +class AsyncPyMongoTestCase(unittest.TestCase): + if not _IS_SYNC: + # An async TestCase that uses a single event loop for all tests. + # Inspired by IsolatedAsyncioTestCase. + async def asyncSetUp(self): + pass -async def reset_client_context(): - if _IS_SYNC: - # sync tests don't need to reset a client context - return - elif async_client_context.client is not None: - await async_client_context.client.close() - async_client_context.client = None - await async_client_context._init_client() + async def asyncTearDown(self): + pass + def addAsyncCleanup(self, func, /, *args, **kwargs): + self.addCleanup(*(func, *args), **kwargs) + + def _callSetUp(self): + self.setUp() + self._callAsync(self.asyncSetUp) + + def _callTestMethod(self, method): + self._callMaybeAsync(method) + + def _callTearDown(self): + self._callAsync(self.asyncTearDown) + self.tearDown() + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert inspect.iscoroutinefunction(func), f"{func!r} is not an async function" + return get_loop().run_until_complete(func(*args, **kwargs)) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return get_loop().run_until_complete(func(*args, **kwargs)) + else: + return func(*args, **kwargs) -class AsyncPyMongoTestCase(unittest.IsolatedAsyncioTestCase): def assertEqualCommand(self, expected, actual, msg=None): self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) @@ -1154,8 +1206,6 @@ class AsyncIntegrationTest(AsyncPyMongoTestCase): @async_client_context.require_connection async def asyncSetUp(self) -> None: - if not _IS_SYNC: - await reset_client_context() if async_client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") if async_client_context.serverless and not getattr(self, "RUN_ON_SERVERLESS", False): @@ -1204,6 +1254,9 @@ async def asyncTearDown(self) -> None: async def async_setup(): + if not _IS_SYNC: + # Set up the event loop. + get_loop() await async_client_context.init() warnings.resetwarnings() warnings.simplefilter("always") From 94b9a54c8ef829307c04d984858386a4476986e8 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:10:01 -0800 Subject: [PATCH 1701/2111] PYTHON-5083 Convert test.test_gridfs_spec to async (#2104) --- test/asynchronous/test_gridfs_spec.py | 39 +++++++++++++++++++++++++++ test/test_gridfs_spec.py | 8 +++++- tools/synchro.py | 1 + 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_gridfs_spec.py diff --git a/test/asynchronous/test_gridfs_spec.py b/test/asynchronous/test_gridfs_spec.py new file mode 100644 index 0000000000..f3dc14fbdc --- /dev/null +++ b/test/asynchronous/test_gridfs_spec.py @@ -0,0 +1,39 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the AsyncGridFS unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "gridfs") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "gridfs") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index 6840b6ae0c..e84e19725e 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -17,14 +17,20 @@ import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "gridfs") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "gridfs") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/tools/synchro.py b/tools/synchro.py index 897e5e8018..e20a8facda 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -207,6 +207,7 @@ def async_only_test(f: str) -> bool: "test_data_lake.py", "test_encryption.py", "test_grid_file.py", + "test_gridfs_spec.py", "test_logger.py", "test_monitoring.py", "test_raw_bson.py", From 2909e1fc8a1937b3f2ae50f9df17521b623688d1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 30 Jan 2025 16:15:18 -0500 Subject: [PATCH 1702/2111] PYTHON-5085 - Convert test.test_index_management to async (#2101) --- test/asynchronous/test_index_management.py | 383 +++++++++++++++++++++ test/test_index_management.py | 57 +-- tools/synchro.py | 1 + 3 files changed, 417 insertions(+), 24 deletions(-) create mode 100644 test/asynchronous/test_index_management.py diff --git a/test/asynchronous/test_index_management.py b/test/asynchronous/test_index_management.py new file mode 100644 index 0000000000..2920c48b2f --- /dev/null +++ b/test/asynchronous/test_index_management.py @@ -0,0 +1,383 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import asyncio +import os +import pathlib +import sys +import time +import uuid +from typing import Any, Mapping + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils import AllowListEventListener, OvertCommandListener + +from pymongo.errors import OperationFailure +from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +pytestmark = pytest.mark.index_management + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "index_management") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "index_management") + +_NAME = "test-search-index" + + +class TestCreateSearchIndex(AsyncIntegrationTest): + async def test_inputs(self): + if not os.environ.get("TEST_INDEX_MANAGEMENT"): + raise unittest.SkipTest("Skipping index management tests") + listener = AllowListEventListener("createSearchIndexes") + client = self.simple_client(event_listeners=[listener]) + coll = client.test.test + await coll.drop() + definition = dict(mappings=dict(dynamic=True)) + model_kwarg_list: list[Mapping[str, Any]] = [ + dict(definition=definition, name=None), + dict(definition=definition, name="test"), + ] + for model_kwargs in model_kwarg_list: + model = SearchIndexModel(**model_kwargs) + with self.assertRaises(OperationFailure): + await coll.create_search_index(model) + with self.assertRaises(OperationFailure): + await coll.create_search_index(model_kwargs) + + listener.reset() + with self.assertRaises(OperationFailure): + await coll.create_search_index({"definition": definition, "arbitraryOption": 1}) + self.assertEqual( + {"definition": definition, "arbitraryOption": 1}, + listener.events[0].command["indexes"][0], + ) + + listener.reset() + with self.assertRaises(OperationFailure): + await coll.create_search_index({"definition": definition, "type": "search"}) + self.assertEqual( + {"definition": definition, "type": "search"}, listener.events[0].command["indexes"][0] + ) + + +class SearchIndexIntegrationBase(AsyncPyMongoTestCase): + db_name = "test_search_index_base" + + @classmethod + def setUpClass(cls) -> None: + if not os.environ.get("TEST_INDEX_MANAGEMENT"): + raise unittest.SkipTest("Skipping index management tests") + cls.url = os.environ.get("MONGODB_URI") + cls.username = os.environ["DB_USER"] + cls.password = os.environ["DB_PASSWORD"] + cls.listener = OvertCommandListener() + + async def asyncSetUp(self) -> None: + self.client = self.simple_client( + self.url, + username=self.username, + password=self.password, + event_listeners=[self.listener], + ) + await self.client.drop_database(_NAME) + self.db = self.client[self.db_name] + + async def asyncTearDown(self): + await self.client.drop_database(_NAME) + + async def wait_for_ready(self, coll, name=_NAME, predicate=None): + """Wait for a search index to be ready.""" + indices: list[Mapping[str, Any]] = [] + if predicate is None: + predicate = lambda index: index.get("queryable") is True + + while True: + indices = await (await coll.list_search_indexes(name)).to_list() + if len(indices) and predicate(indices[0]): + return indices[0] + await asyncio.sleep(5) + + +class TestSearchIndexIntegration(SearchIndexIntegrationBase): + db_name = "test_search_index" + + async def test_comment_field(self): + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0`` that implicitly passes its type. + search_definition = {"mappings": {"dynamic": False}} + self.listener.reset() + implicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition}, comment="foo" + ) + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + # Get the index definition. + self.listener.reset() + await (await coll0.list_search_indexes(name=implicit_search_resp, comment="foo")).next() + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + +class TestSearchIndexProse(SearchIndexIntegrationBase): + db_name = "test_search_index_prose" + + async def test_case_1(self): + """Driver can successfully create and list search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + await coll0.insert_one({}) + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. + index = await self.wait_for_ready(coll0) + + # . Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + async def test_case_2(self): + """Driver can successfully create multiple indexes in batch.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. + name1 = "test-search-index-1" + name2 = "test-search-index-2" + definition = {"mappings": {"dynamic": False}} + index_definitions: list[dict[str, Any]] = [ + {"name": name1, "definition": definition}, + {"name": name2, "definition": definition}, + ] + await coll0.create_search_indexes( + [SearchIndexModel(i["definition"], i["name"]) for i in index_definitions] + ) + + # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. + indices = await (await coll0.list_search_indexes()).to_list() + names = [i["name"] for i in indices] + self.assertIn(name1, names) + self.assertIn(name2, names) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied. + # An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. + # An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. + index1 = await self.wait_for_ready(coll0, name1) + index2 = await self.wait_for_ready(coll0, name2) + + # Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` + for index in [index1, index2]: + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], definition) + + async def test_case_3(self): + """Driver can successfully drop search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, "test-search-index") + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + await self.wait_for_ready(coll0) + + # Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. + await coll0.drop_search_index(_NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. + t0 = time.time() + while True: + indices = await (await coll0.list_search_indexes()).to_list() + if indices: + break + if (time.time() - t0) / 60 > 5: + raise TimeoutError("Timed out waiting for index deletion") + await asyncio.sleep(5) + + async def test_case_4(self): + """Driver can update a search index.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + await self.wait_for_ready(coll0) + + # Run a ``updateSearchIndex`` on ``coll0``. + # Assert that the command does not error and the server responds with a success. + model2: dict[str, Any] = {"name": _NAME, "definition": {"mappings": {"dynamic": True}}} + await coll0.update_search_index(_NAME, model2["definition"]) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. + # The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. + predicate = lambda index: index.get("queryable") is True and index.get("status") == "READY" + await self.wait_for_ready(coll0, predicate=predicate) + + # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. + index = (await (await coll0.list_search_indexes(_NAME)).to_list())[0] + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model2["definition"]) + + async def test_case_5(self): + """``dropSearchIndex`` suppresses namespace not found errors.""" + # Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Run a ``dropSearchIndex`` command and assert that no error is thrown. + await coll0.drop_search_index("foo") + + async def test_case_6(self): + """Driver can successfully create and list search indexes with non-default readConcern and writeConcern.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. + coll0 = coll0.with_options( + write_concern=WriteConcern(w="1"), read_concern=ReadConcern(level="majority") + ) + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. + name = "test-search-index-case6" + model = {"name": name, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index-case6"``. + self.assertEqual(resp, name) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. + index = await self.wait_for_ready(coll0, name) + + # Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + async def test_case_7(self): + """Driver handles index types.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Use these search and vector search definitions for indexes. + search_definition = {"mappings": {"dynamic": False}} + vector_search_definition = { + "fields": [ + { + "type": "vector", + "path": "plot_embedding", + "numDimensions": 1536, + "similarity": "euclidean", + }, + ] + } + + # Create a new search index on ``coll0`` that implicitly passes its type. + implicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition} + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=implicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new search index on ``coll0`` that explicitly passes its type. + explicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-explicit", "type": "search", "definition": search_definition} + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=explicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new vector search index on ``coll0`` that explicitly passes its type. + explicit_vector_resp = await coll0.create_search_index( + model={ + "name": _NAME + "-vector", + "type": "vectorSearch", + "definition": vector_search_definition, + } + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=explicit_vector_resp)).next() + + # Assert that the index model contains the correct index type: ``"vectorSearch"``. + self.assertEqual(resp["type"], "vectorSearch") + + # Catch the error raised when trying to create a vector search index without specifying the type + with self.assertRaises(OperationFailure) as e: + await coll0.create_search_index( + model={"name": _NAME + "-error", "definition": vector_search_definition} + ) + self.assertIn("Attribute mappings missing.", e.exception.details["errmsg"]) + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_index_management.py b/test/test_index_management.py index 6ca726e2e0..5135e43f1f 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -15,7 +15,9 @@ """Run the auth spec tests.""" from __future__ import annotations +import asyncio import os +import pathlib import sys import time import uuid @@ -27,16 +29,22 @@ from test import IntegrationTest, PyMongoTestCase, unittest from test.unified_format import generate_test_classes -from test.utils import AllowListEventListener, EventListener, OvertCommandListener +from test.utils import AllowListEventListener, OvertCommandListener from pymongo.errors import OperationFailure from pymongo.operations import SearchIndexModel from pymongo.read_concern import ReadConcern from pymongo.write_concern import WriteConcern +_IS_SYNC = True + pytestmark = pytest.mark.index_management -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "index_management") +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "index_management") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "index_management") _NAME = "test-search-index" @@ -82,23 +90,25 @@ class SearchIndexIntegrationBase(PyMongoTestCase): @classmethod def setUpClass(cls) -> None: - super().setUpClass() if not os.environ.get("TEST_INDEX_MANAGEMENT"): raise unittest.SkipTest("Skipping index management tests") - url = os.environ.get("MONGODB_URI") - username = os.environ["DB_USER"] - password = os.environ["DB_PASSWORD"] - cls.listener = listener = OvertCommandListener() - cls.client = cls.unmanaged_simple_client( - url, username=username, password=password, event_listeners=[listener] + cls.url = os.environ.get("MONGODB_URI") + cls.username = os.environ["DB_USER"] + cls.password = os.environ["DB_PASSWORD"] + cls.listener = OvertCommandListener() + + def setUp(self) -> None: + self.client = self.simple_client( + self.url, + username=self.username, + password=self.password, + event_listeners=[self.listener], ) - cls.client.drop_database(_NAME) - cls.db = cls.client[cls.db_name] + self.client.drop_database(_NAME) + self.db = self.client[self.db_name] - @classmethod - def tearDownClass(cls): - cls.client.drop_database(_NAME) - cls.client.close() + def tearDown(self): + self.client.drop_database(_NAME) def wait_for_ready(self, coll, name=_NAME, predicate=None): """Wait for a search index to be ready.""" @@ -107,10 +117,9 @@ def wait_for_ready(self, coll, name=_NAME, predicate=None): predicate = lambda index: index.get("queryable") is True while True: - indices = list(coll.list_search_indexes(name)) + indices = (coll.list_search_indexes(name)).to_list() if len(indices) and predicate(indices[0]): return indices[0] - break time.sleep(5) @@ -133,7 +142,7 @@ def test_comment_field(self): # Get the index definition. self.listener.reset() - coll0.list_search_indexes(name=implicit_search_resp, comment="foo").next() + (coll0.list_search_indexes(name=implicit_search_resp, comment="foo")).next() event = self.listener.events[0] self.assertEqual(event.command["comment"], "foo") @@ -183,7 +192,7 @@ def test_case_2(self): ) # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. - indices = list(coll0.list_search_indexes()) + indices = (coll0.list_search_indexes()).to_list() names = [i["name"] for i in indices] self.assertIn(name1, names) self.assertIn(name2, names) @@ -223,7 +232,7 @@ def test_case_3(self): # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. t0 = time.time() while True: - indices = list(coll0.list_search_indexes()) + indices = (coll0.list_search_indexes()).to_list() if indices: break if (time.time() - t0) / 60 > 5: @@ -259,7 +268,7 @@ def test_case_4(self): self.wait_for_ready(coll0, predicate=predicate) # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. - index = list(coll0.list_search_indexes(_NAME))[0] + index = ((coll0.list_search_indexes(_NAME)).to_list())[0] self.assertIn("latestDefinition", index) self.assertEqual(index["latestDefinition"], model2["definition"]) @@ -324,7 +333,7 @@ def test_case_7(self): ) # Get the index definition. - resp = coll0.list_search_indexes(name=implicit_search_resp).next() + resp = (coll0.list_search_indexes(name=implicit_search_resp)).next() # Assert that the index model contains the correct index type: ``"search"``. self.assertEqual(resp["type"], "search") @@ -335,7 +344,7 @@ def test_case_7(self): ) # Get the index definition. - resp = coll0.list_search_indexes(name=explicit_search_resp).next() + resp = (coll0.list_search_indexes(name=explicit_search_resp)).next() # Assert that the index model contains the correct index type: ``"search"``. self.assertEqual(resp["type"], "search") @@ -350,7 +359,7 @@ def test_case_7(self): ) # Get the index definition. - resp = coll0.list_search_indexes(name=explicit_vector_resp).next() + resp = (coll0.list_search_indexes(name=explicit_vector_resp)).next() # Assert that the index model contains the correct index type: ``"vectorSearch"``. self.assertEqual(resp["type"], "vectorSearch") diff --git a/tools/synchro.py b/tools/synchro.py index e20a8facda..08281c73d0 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -206,6 +206,7 @@ def async_only_test(f: str) -> bool: "test_database.py", "test_data_lake.py", "test_encryption.py", + "test_index_management.py", "test_grid_file.py", "test_gridfs_spec.py", "test_logger.py", From 0a1471d8f99c5f48cf7937f7008f942c8eb6c5e4 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 30 Jan 2025 16:29:52 -0500 Subject: [PATCH 1703/2111] PYTHON-5084 - Convert test.test_heartbeat_monitoring to async (#2100) --- .../asynchronous/test_heartbeat_monitoring.py | 97 +++++++++++++++++++ test/test_client.py | 2 +- test/test_heartbeat_monitoring.py | 34 ++++--- test/utils.py | 59 ++++++++++- tools/synchro.py | 3 + 5 files changed, 177 insertions(+), 18 deletions(-) create mode 100644 test/asynchronous/test_heartbeat_monitoring.py diff --git a/test/asynchronous/test_heartbeat_monitoring.py b/test/asynchronous/test_heartbeat_monitoring.py new file mode 100644 index 0000000000..ff595a8144 --- /dev/null +++ b/test/asynchronous/test_heartbeat_monitoring.py @@ -0,0 +1,97 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitoring of the server heartbeats.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, client_knobs, unittest +from test.utils import AsyncMockPool, HeartbeatEventListener, async_wait_until + +from pymongo.asynchronous.monitor import Monitor +from pymongo.errors import ConnectionFailure +from pymongo.hello import Hello, HelloCompat + +_IS_SYNC = False + + +class TestHeartbeatMonitoring(AsyncIntegrationTest): + async def create_mock_monitor(self, responses, uri, expected_results): + listener = HeartbeatEventListener() + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + + class MockMonitor(Monitor): + async def _check_with_socket(self, *args, **kwargs): + if isinstance(responses[1], Exception): + raise responses[1] + return Hello(responses[1]), 99 + + _ = await self.async_single_client( + h=uri, + event_listeners=(listener,), + _monitor_class=MockMonitor, + _pool_class=AsyncMockPool, + connect=True, + ) + + expected_len = len(expected_results) + # Wait for *at least* expected_len number of results. The + # monitor thread may run multiple times during the execution + # of this test. + await async_wait_until( + lambda: len(listener.events) >= expected_len, "publish all events" + ) + + # zip gives us len(expected_results) pairs. + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): + self.assertEqual(actual.duration, 99) + self.assertEqual(actual.reply._doc, responses[1]) + else: + self.assertEqual(actual.reply, responses[1]) + + async def test_standalone(self): + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) + uri = "mongodb://a:27017" + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] + + await self.create_mock_monitor(responses, uri, expected_results) + + async def test_standalone_error(self): + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) + uri = "mongodb://a:27017" + # _check_with_socket failing results in a second attempt. + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] + + await self.create_mock_monitor(responses, uri, expected_results) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_client.py b/test/test_client.py index 2a33077f5f..cdc7691c28 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -2399,7 +2399,7 @@ def test_reconnect(self): # MongoClient discovers it's alone. The first attempt raises either # ServerSelectionTimeoutError or AutoReconnect (from - # AsyncMockPool.get_socket). + # MockPool.get_socket). with self.assertRaises(AutoReconnect): c.db.collection.find_one() diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 5e203a33b3..0523d0ba4d 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -26,6 +26,8 @@ from pymongo.hello import Hello, HelloCompat from pymongo.synchronous.monitor import Monitor +_IS_SYNC = True + class TestHeartbeatMonitoring(IntegrationTest): def create_mock_monitor(self, responses, uri, expected_results): @@ -40,8 +42,12 @@ def _check_with_socket(self, *args, **kwargs): raise responses[1] return Hello(responses[1]), 99 - m = self.single_client( - h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool + _ = self.single_client( + h=uri, + event_listeners=(listener,), + _monitor_class=MockMonitor, + _pool_class=MockPool, + connect=True, ) expected_len = len(expected_results) @@ -50,20 +56,16 @@ def _check_with_socket(self, *args, **kwargs): # of this test. wait_until(lambda: len(listener.events) >= expected_len, "publish all events") - try: - # zip gives us len(expected_results) pairs. - for expected, actual in zip(expected_results, listener.events): - self.assertEqual(expected, actual.__class__.__name__) - self.assertEqual(actual.connection_id, responses[0]) - if expected != "ServerHeartbeatStartedEvent": - if isinstance(actual.reply, Hello): - self.assertEqual(actual.duration, 99) - self.assertEqual(actual.reply._doc, responses[1]) - else: - self.assertEqual(actual.reply, responses[1]) - - finally: - m.close() + # zip gives us len(expected_results) pairs. + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): + self.assertEqual(actual.duration, 99) + self.assertEqual(actual.reply._doc, responses[1]) + else: + self.assertEqual(actual.reply, responses[1]) def test_standalone(self): responses = ( diff --git a/test/utils.py b/test/utils.py index 69154bc63b..91000a636a 100644 --- a/test/utils.py +++ b/test/utils.py @@ -43,7 +43,7 @@ from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat from pymongo.helpers_shared import _SENSITIVE_COMMANDS -from pymongo.lock import _create_lock +from pymongo.lock import _async_create_lock, _create_lock from pymongo.monitoring import ( ConnectionCheckedInEvent, ConnectionCheckedOutEvent, @@ -312,6 +312,22 @@ def failed(self, event): self.event_list.append("serverHeartbeatFailedEvent") +class AsyncMockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + self.id = random.randint(0, 100) + + def close_conn(self, reason): + pass + + def __aenter__(self): + return self + + def __aexit__(self, exc_type, exc_val, exc_tb): + pass + + class MockConnection: def __init__(self): self.cancel_context = _CancellationContext() @@ -328,6 +344,47 @@ def __exit__(self, exc_type, exc_val, exc_tb): pass +class AsyncMockPool: + def __init__(self, address, options, handshake=True, client_id=None): + self.gen = _PoolGeneration() + self._lock = _async_create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] + + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) + + @contextlib.asynccontextmanager + async def checkout(self, handler=None): + yield AsyncMockConnection() + + async def checkin(self, *args, **kwargs): + pass + + async def _reset(self, service_id=None): + async with self._lock: + self.gen.inc(service_id) + + async def ready(self): + pass + + async def reset(self, service_id=None, interrupt_connections=False): + await self._reset() + + async def reset_without_pause(self): + await self._reset() + + async def close(self): + await self._reset() + + async def update_is_writable(self, is_writable): + pass + + async def remove_stale_sockets(self, *args, **kwargs): + pass + + class MockPool: def __init__(self, address, options, handshake=True, client_id=None): self.gen = _PoolGeneration() diff --git a/tools/synchro.py b/tools/synchro.py index 08281c73d0..74b7c80533 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -119,6 +119,8 @@ "_async_create_lock": "_create_lock", "_async_create_condition": "_create_condition", "_async_cond_wait": "_cond_wait", + "AsyncMockConnection": "MockConnection", + "AsyncMockPool": "MockPool", } docstring_replacements: dict[tuple[str, str], str] = { @@ -206,6 +208,7 @@ def async_only_test(f: str) -> bool: "test_database.py", "test_data_lake.py", "test_encryption.py", + "test_heartbeat_monitoring.py", "test_index_management.py", "test_grid_file.py", "test_gridfs_spec.py", From c8d3afdefd627d60dc47201681e4fcd65289815e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 30 Jan 2025 16:30:04 -0500 Subject: [PATCH 1704/2111] PYTHON-5086 - Convert test.json_util integration test to async (#2102) --- .../test_json_util_integration.py | 28 +++++++++++++++++++ test/test_json_util.py | 23 ++------------- test/test_json_util_integration.py | 28 +++++++++++++++++++ tools/synchro.py | 1 + 4 files changed, 59 insertions(+), 21 deletions(-) create mode 100644 test/asynchronous/test_json_util_integration.py create mode 100644 test/test_json_util_integration.py diff --git a/test/asynchronous/test_json_util_integration.py b/test/asynchronous/test_json_util_integration.py new file mode 100644 index 0000000000..4c02792d89 --- /dev/null +++ b/test/asynchronous/test_json_util_integration.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from test.asynchronous import AsyncIntegrationTest +from typing import Any, List, MutableMapping + +from bson import Binary, Code, DBRef, ObjectId, json_util +from bson.binary import USER_DEFINED_SUBTYPE + +_IS_SYNC = False + + +class TestJsonUtilRoundtrip(AsyncIntegrationTest): + async def test_cursor(self): + db = self.db + + await db.drop_collection("test") + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + ] + + await db.test.insert_many(docs) + reloaded_docs = json_util.loads(json_util.dumps(await (db.test.find()).to_list())) + for doc in docs: + self.assertTrue(doc in reloaded_docs) diff --git a/test/test_json_util.py b/test/test_json_util.py index 821ca76da0..8aed4a82bc 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -21,13 +21,13 @@ import sys import uuid from collections import OrderedDict -from typing import Any, List, MutableMapping, Tuple, Type +from typing import Any, Tuple, Type from bson.codec_options import CodecOptions, DatetimeConversion sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import unittest from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, DatetimeMS, json_util from bson.binary import ( @@ -636,24 +636,5 @@ class MyBinary(Binary): self.assertEqual(json_util.dumps(MyBinary(b"bin", USER_DEFINED_SUBTYPE)), expected_json) -class TestJsonUtilRoundtrip(IntegrationTest): - def test_cursor(self): - db = self.db - - db.drop_collection("test") - docs: List[MutableMapping[str, Any]] = [ - {"foo": [1, 2]}, - {"bar": {"hello": "world"}}, - {"code": Code("function x() { return 1; }")}, - {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, - {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, - ] - - db.test.insert_many(docs) - reloaded_docs = json_util.loads(json_util.dumps(db.test.find())) - for doc in docs: - self.assertTrue(doc in reloaded_docs) - - if __name__ == "__main__": unittest.main() diff --git a/test/test_json_util_integration.py b/test/test_json_util_integration.py new file mode 100644 index 0000000000..acab4f3182 --- /dev/null +++ b/test/test_json_util_integration.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from test import IntegrationTest +from typing import Any, List, MutableMapping + +from bson import Binary, Code, DBRef, ObjectId, json_util +from bson.binary import USER_DEFINED_SUBTYPE + +_IS_SYNC = True + + +class TestJsonUtilRoundtrip(IntegrationTest): + def test_cursor(self): + db = self.db + + db.drop_collection("test") + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + ] + + db.test.insert_many(docs) + reloaded_docs = json_util.loads(json_util.dumps((db.test.find()).to_list())) + for doc in docs: + self.assertTrue(doc in reloaded_docs) diff --git a/tools/synchro.py b/tools/synchro.py index 74b7c80533..dc272929ad 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -211,6 +211,7 @@ def async_only_test(f: str) -> bool: "test_heartbeat_monitoring.py", "test_index_management.py", "test_grid_file.py", + "test_json_util_integration.py", "test_gridfs_spec.py", "test_logger.py", "test_monitoring.py", From 19fdf7ccebf4c5ec45f54a976abeecb4ebcae1da Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 31 Jan 2025 11:39:48 -0500 Subject: [PATCH 1705/2111] PYTHON-5093 - Convert test.test_read_concern to async (#2109) --- test/asynchronous/test_read_concern.py | 122 +++++++++++++++++++++++++ test/test_read_concern.py | 12 ++- tools/synchro.py | 1 + 3 files changed, 130 insertions(+), 5 deletions(-) create mode 100644 test/asynchronous/test_read_concern.py diff --git a/test/asynchronous/test_read_concern.py b/test/asynchronous/test_read_concern.py new file mode 100644 index 0000000000..fbc07a5c36 --- /dev/null +++ b/test/asynchronous/test_read_concern.py @@ -0,0 +1,122 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the read_concern module.""" +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils import OvertCommandListener + +from bson.son import SON +from pymongo.errors import OperationFailure +from pymongo.read_concern import ReadConcern + +_IS_SYNC = False + + +class TestReadConcern(AsyncIntegrationTest): + listener: OvertCommandListener + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + await async_client_context.client.pymongo_test.create_collection("coll") + + async def asyncTearDown(self): + await async_client_context.client.pymongo_test.drop_collection("coll") + + def test_read_concern(self): + rc = ReadConcern() + self.assertIsNone(rc.level) + self.assertTrue(rc.ok_for_legacy) + + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) + self.assertFalse(rc.ok_for_legacy) + + rc = ReadConcern("local") + self.assertEqual("local", rc.level) + self.assertTrue(rc.ok_for_legacy) + + self.assertRaises(TypeError, ReadConcern, 42) + + async def test_read_concern_uri(self): + uri = f"mongodb://{await async_client_context.pair}/?readConcernLevel=majority" + client = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(ReadConcern("majority"), client.read_concern) + + async def test_invalid_read_concern(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) + # We rely on the server to validate read concern. + with self.assertRaises(OperationFailure): + await coll.find_one() + + async def test_find_command(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + await coll.find({"field": "value"}).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await coll.find({"field": "value"}).to_list() + self.assertEqualCommand( + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.started_events[0].command, + ) + + async def test_command_cursor(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + await (await coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await (await coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) + + async def test_aggregate_out(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await ( + await coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}]) + ).to_list() + + # Aggregate with $out supports readConcern MongoDB 4.2 onwards. + if async_client_context.version >= (4, 1): + self.assertIn("readConcern", self.listener.started_events[0].command) + else: + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_concern.py b/test/test_read_concern.py index f7c0901422..8ec9865eaa 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -27,6 +27,8 @@ from pymongo.errors import OperationFailure from pymongo.read_concern import ReadConcern +_IS_SYNC = True + class TestReadConcern(IntegrationTest): listener: OvertCommandListener @@ -71,14 +73,14 @@ def test_invalid_read_concern(self): def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.find({"field": "value"})) + coll.find({"field": "value"}).to_list() self.assertNotIn("readConcern", self.listener.started_events[0].command) self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) - tuple(coll.find({"field": "value"})) + coll.find({"field": "value"}).to_list() self.assertEqualCommand( SON( [ @@ -93,19 +95,19 @@ def test_find_command(self): def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.aggregate([{"$match": {"field": "value"}}])) + (coll.aggregate([{"$match": {"field": "value"}}])).to_list() self.assertNotIn("readConcern", self.listener.started_events[0].command) self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) - tuple(coll.aggregate([{"$match": {"field": "value"}}])) + (coll.aggregate([{"$match": {"field": "value"}}])).to_list() self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) def test_aggregate_out(self): coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) - tuple(coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])) + (coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])).to_list() # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): diff --git a/tools/synchro.py b/tools/synchro.py index dc272929ad..59c5e1ad4f 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -216,6 +216,7 @@ def async_only_test(f: str) -> bool: "test_logger.py", "test_monitoring.py", "test_raw_bson.py", + "test_read_concern.py", "test_retryable_reads.py", "test_retryable_writes.py", "test_session.py", From 8f6249e2f9528895d1cb7f9d760095df60c58e96 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 31 Jan 2025 11:40:05 -0500 Subject: [PATCH 1706/2111] PYTHON-5091 - Convert test.test_on_demand_csfle to async (#2108) --- test/asynchronous/test_on_demand_csfle.py | 115 ++++++++++++++++++++++ test/test_on_demand_csfle.py | 16 ++- tools/synchro.py | 1 + 3 files changed, 123 insertions(+), 9 deletions(-) create mode 100644 test/asynchronous/test_on_demand_csfle.py diff --git a/test/asynchronous/test_on_demand_csfle.py b/test/asynchronous/test_on_demand_csfle.py new file mode 100644 index 0000000000..617e2ed8d6 --- /dev/null +++ b/test/asynchronous/test_on_demand_csfle.py @@ -0,0 +1,115 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +from __future__ import annotations + +import os +import sys +import unittest + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context + +from bson.codec_options import CodecOptions +from pymongo.asynchronous.encryption import ( + _HAVE_PYMONGOCRYPT, + AsyncClientEncryption, + EncryptionError, +) + +_IS_SYNC = False + +pytestmark = pytest.mark.csfle + + +class TestonDemandGCPCredentials(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + async def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = AsyncClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + async def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = AsyncClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + await self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "keyVaultEndpoint": os.environ["KEY_VAULT_ENDPOINT"], + "keyName": os.environ["KEY_NAME"], + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + async def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = AsyncClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + async def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = AsyncClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + await self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index 023feca8c2..023d44f641 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -26,18 +26,20 @@ from test import IntegrationTest, client_context from bson.codec_options import CodecOptions -from pymongo.synchronous.encryption import _HAVE_PYMONGOCRYPT, ClientEncryption, EncryptionError +from pymongo.synchronous.encryption import ( + _HAVE_PYMONGOCRYPT, + ClientEncryption, + EncryptionError, +) + +_IS_SYNC = True pytestmark = pytest.mark.csfle class TestonDemandGCPCredentials(IntegrationTest): - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) - def setUpClass(cls): - super().setUpClass() - def setUp(self): super().setUp() self.master_key = { @@ -74,12 +76,8 @@ def test_02_success(self): class TestonDemandAzureCredentials(IntegrationTest): - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) - def setUpClass(cls): - super().setUpClass() - def setUp(self): super().setUp() self.master_key = { diff --git a/tools/synchro.py b/tools/synchro.py index 59c5e1ad4f..2cfce1f01c 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -215,6 +215,7 @@ def async_only_test(f: str) -> bool: "test_gridfs_spec.py", "test_logger.py", "test_monitoring.py", + "test_on_demand_csfle.py", "test_raw_bson.py", "test_read_concern.py", "test_retryable_reads.py", From c42f3d64213610be63abfa51c0225c6c12c5a6ba Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Fri, 31 Jan 2025 08:43:35 -0800 Subject: [PATCH 1707/2111] PYTHON-5079 Convert test.test_dns to async (#2096) --- test/asynchronous/test_dns.py | 221 ++++++++++++++++++++++++++++++++++ test/test_dns.py | 45 +++++-- tools/synchro.py | 1 + 3 files changed, 256 insertions(+), 11 deletions(-) create mode 100644 test/asynchronous/test_dns.py diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py new file mode 100644 index 0000000000..e24e0fb5ce --- /dev/null +++ b/test/asynchronous/test_dns.py @@ -0,0 +1,221 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import glob +import json +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + async_client_context, + unittest, +) +from test.utils import async_wait_until + +from pymongo.common import validate_read_preference_tags +from pymongo.errors import ConfigurationError +from pymongo.uri_parser import parse_uri, split_hosts + +_IS_SYNC = False + + +class TestDNSRepl(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "replica-set" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "replica-set" + ) + load_balanced = False + + @async_client_context.require_replica_set + def asyncSetUp(self): + pass + + +class TestDNSLoadBalanced(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "load-balanced" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "load-balanced" + ) + load_balanced = True + + @async_client_context.require_load_balancer + def asyncSetUp(self): + pass + + +class TestDNSSharded(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "srv_seedlist", "sharded") + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "sharded" + ) + load_balanced = False + + @async_client_context.require_mongos + def asyncSetUp(self): + pass + + +def create_test(test_case): + async def run_test(self): + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") + num_hosts = test_case.get("numHosts", len(hosts or [])) + + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") + # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) + if needs_tls and not async_client_context.tls: + self.skipTest("this test requires a TLS cluster") + if not needs_tls and async_client_context.tls: + self.skipTest("this test requires a non-TLS cluster") + + if seeds: + seeds = split_hosts(",".join(seeds)) + if hosts: + hosts = frozenset(split_hosts(",".join(hosts))) + + if seeds or num_seeds: + result = parse_uri(uri, validate=True) + if seeds is not None: + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) + if num_seeds is not None: + self.assertEqual(len(result["nodelist"]), num_seeds) + if options: + opts = result["options"] + if "readpreferencetags" in opts: + rpts = validate_read_preference_tags( + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) + if parsed_options: + for opt, expected in parsed_options.items(): + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) + + hostname = next(iter(async_client_context.client.nodes))[0] + # The replica set members must be configured as 'localhost'. + if hostname == "localhost": + copts = async_client_context.default_client_options.copy() + # Remove tls since SRV parsing should add it automatically. + copts.pop("tls", None) + if async_client_context.tls: + # Our test certs don't support the SRV hosts used in these + # tests. + copts["tlsAllowInvalidHostnames"] = True + + client = self.simple_client(uri, **copts) + if client._options.connect: + await client.aconnect() + if num_seeds is not None: + self.assertEqual(len(client._topology_settings.seeds), num_seeds) + if hosts is not None: + await async_wait_until( + lambda: hosts == client.nodes, "match test hosts to client nodes" + ) + if num_hosts is not None: + await async_wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) + if test_case.get("ping", True): + await client.admin.command("ping") + # XXX: we should block until SRV poller runs at least once + # and re-run these assertions. + else: + try: + parse_uri(uri) + except (ConfigurationError, ValueError): + pass + else: + self.fail("failed to raise an exception") + + return run_test + + +def create_tests(cls): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as dns_test_file: + test_method = create_test(json.load(dns_test_file)) + setattr(cls, "test_" + test_suffix, test_method) + + +create_tests(TestDNSRepl) +create_tests(TestDNSLoadBalanced) +create_tests(TestDNSSharded) + + +class TestParsingErrors(AsyncPyMongoTestCase): + async def test_invalid_host(self): + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: mongodb is not", + self.simple_client, + "mongodb+srv://mongodb", + ) + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: mongodb.com is not", + self.simple_client, + "mongodb+srv://mongodb.com", + ) + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: an IP address is not", + self.simple_client, + "mongodb+srv://127.0.0.1", + ) + self.assertRaisesRegex( + ConfigurationError, + "Invalid URI host: an IP address is not", + self.simple_client, + "mongodb+srv://[::1]", + ) + + +class IsolatedAsyncioTestCaseInsensitive(AsyncIntegrationTest): + async def test_connect_case_insensitive(self): + client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_dns.py b/test/test_dns.py index f2185efb1b..6f4736fd5e 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -18,22 +18,35 @@ import glob import json import os +import pathlib import sys sys.path[0:0] = [""] -from test import IntegrationTest, PyMongoTestCase, client_context, unittest +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + unittest, +) from test.utils import wait_until from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError from pymongo.uri_parser import parse_uri, split_hosts +_IS_SYNC = True + class TestDNSRepl(PyMongoTestCase): - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "replica-set" - ) + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "replica-set" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "replica-set" + ) load_balanced = False @client_context.require_replica_set @@ -42,9 +55,14 @@ def setUp(self): class TestDNSLoadBalanced(PyMongoTestCase): - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "load-balanced" - ) + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "load-balanced" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "load-balanced" + ) load_balanced = True @client_context.require_load_balancer @@ -53,7 +71,12 @@ def setUp(self): class TestDNSSharded(PyMongoTestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "sharded") + if _IS_SYNC: + TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "srv_seedlist", "sharded") + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "sharded" + ) load_balanced = False @client_context.require_mongos @@ -119,7 +142,9 @@ def run_test(self): # tests. copts["tlsAllowInvalidHostnames"] = True - client = PyMongoTestCase.unmanaged_simple_client(uri, **copts) + client = self.simple_client(uri, **copts) + if client._options.connect: + client._connect() if num_seeds is not None: self.assertEqual(len(client._topology_settings.seeds), num_seeds) if hosts is not None: @@ -132,7 +157,6 @@ def run_test(self): client.admin.command("ping") # XXX: we should block until SRV poller runs at least once # and re-run these assertions. - client.close() else: try: parse_uri(uri) @@ -188,7 +212,6 @@ def test_invalid_host(self): class TestCaseInsensitive(IntegrationTest): def test_connect_case_insensitive(self): client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") - self.addCleanup(client.close) self.assertGreater(len(client.topology_description.server_descriptions()), 1) diff --git a/tools/synchro.py b/tools/synchro.py index 2cfce1f01c..ef82db756d 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -207,6 +207,7 @@ def async_only_test(f: str) -> bool: "test_custom_types.py", "test_database.py", "test_data_lake.py", + "test_dns.py", "test_encryption.py", "test_heartbeat_monitoring.py", "test_index_management.py", From 44d1d40d6574d1cb51479bdf68a766d0ade37079 Mon Sep 17 00:00:00 2001 From: The Light <59693377+tejaschauhan36912@users.noreply.github.com> Date: Sat, 1 Feb 2025 01:31:58 +0530 Subject: [PATCH 1708/2111] PYTHON-5115 Type validation errors should include the invalid type name (#2085) Co-authored-by: Iris Ho --- bson/__init__.py | 2 +- bson/binary.py | 6 ++-- bson/code.py | 4 +-- bson/codec_options.py | 12 ++++++-- bson/dbref.py | 4 +-- bson/decimal128.py | 2 +- bson/timestamp.py | 4 +-- gridfs/asynchronous/grid_file.py | 12 +++++--- gridfs/synchronous/grid_file.py | 12 +++++--- pymongo/__init__.py | 2 +- pymongo/_asyncio_lock.py | 2 +- pymongo/_azure_helpers.py | 2 +- pymongo/asynchronous/auth.py | 2 +- pymongo/asynchronous/auth_oidc.py | 4 ++- pymongo/asynchronous/client_session.py | 12 ++++++-- pymongo/asynchronous/collection.py | 10 +++---- pymongo/asynchronous/command_cursor.py | 6 ++-- pymongo/asynchronous/cursor.py | 28 +++++++++--------- pymongo/asynchronous/database.py | 8 ++++-- pymongo/asynchronous/encryption.py | 8 ++++-- pymongo/asynchronous/mongo_client.py | 12 +++++--- pymongo/auth_shared.py | 4 +-- pymongo/collation.py | 2 +- pymongo/common.py | 40 ++++++++++++++++---------- pymongo/compression_support.py | 2 +- pymongo/driver_info.py | 2 +- pymongo/encryption_options.py | 4 ++- pymongo/helpers_shared.py | 9 ++++-- pymongo/monitoring.py | 8 ++++-- pymongo/read_concern.py | 2 +- pymongo/ssl_support.py | 2 +- pymongo/synchronous/auth.py | 2 +- pymongo/synchronous/auth_oidc.py | 4 ++- pymongo/synchronous/client_session.py | 12 ++++++-- pymongo/synchronous/collection.py | 10 +++---- pymongo/synchronous/command_cursor.py | 6 ++-- pymongo/synchronous/cursor.py | 28 +++++++++--------- pymongo/synchronous/database.py | 8 ++++-- pymongo/synchronous/encryption.py | 8 ++++-- pymongo/synchronous/mongo_client.py | 12 +++++--- pymongo/uri_parser.py | 10 +++---- pymongo/write_concern.py | 4 +-- 42 files changed, 204 insertions(+), 129 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index fc6efe0d59..790ac06ef1 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1386,7 +1386,7 @@ def is_valid(bson: bytes) -> bool: :param bson: the data to be validated """ if not isinstance(bson, bytes): - raise TypeError("BSON data must be an instance of a subclass of bytes") + raise TypeError(f"BSON data must be an instance of a subclass of bytes, not {type(bson)}") try: _bson_to_dict(bson, DEFAULT_CODEC_OPTIONS) diff --git a/bson/binary.py b/bson/binary.py index f90dce226c..aab59cccbc 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -290,7 +290,7 @@ def __new__( subtype: int = BINARY_SUBTYPE, ) -> Binary: if not isinstance(subtype, int): - raise TypeError("subtype must be an instance of int") + raise TypeError(f"subtype must be an instance of int, not {type(subtype)}") if subtype >= 256 or subtype < 0: raise ValueError("subtype must be contained in [0, 256)") # Support any type that implements the buffer protocol. @@ -321,7 +321,7 @@ def from_uuid( .. versionadded:: 3.11 """ if not isinstance(uuid, UUID): - raise TypeError("uuid must be an instance of uuid.UUID") + raise TypeError(f"uuid must be an instance of uuid.UUID, not {type(uuid)}") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( @@ -470,7 +470,7 @@ def as_vector(self) -> BinaryVector: """ if self.subtype != VECTOR_SUBTYPE: - raise ValueError(f"Cannot decode subtype {self.subtype} as a vector.") + raise ValueError(f"Cannot decode subtype {self.subtype} as a vector") position = 0 dtype, padding = struct.unpack_from(" Code: if not isinstance(code, str): - raise TypeError("code must be an instance of str") + raise TypeError(f"code must be an instance of str, not {type(code)}") self = str.__new__(cls, code) @@ -67,7 +67,7 @@ def __new__( if scope is not None: if not isinstance(scope, _Mapping): - raise TypeError("scope must be an instance of dict") + raise TypeError(f"scope must be an instance of dict, not {type(scope)}") if self.__scope is not None: self.__scope.update(scope) # type: ignore else: diff --git a/bson/codec_options.py b/bson/codec_options.py index 3a0b83b7be..258a777a1b 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -401,17 +401,23 @@ def __new__( "uuid_representation must be a value from bson.binary.UuidRepresentation" ) if not isinstance(unicode_decode_error_handler, str): - raise ValueError("unicode_decode_error_handler must be a string") + raise ValueError( + f"unicode_decode_error_handler must be a string, not {type(unicode_decode_error_handler)}" + ) if tzinfo is not None: if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError("tzinfo must be an instance of datetime.tzinfo") + raise TypeError( + f"tzinfo must be an instance of datetime.tzinfo, not {type(tzinfo)}" + ) if not tz_aware: raise ValueError("cannot specify tzinfo without also setting tz_aware=True") type_registry = type_registry or TypeRegistry() if not isinstance(type_registry, TypeRegistry): - raise TypeError("type_registry must be an instance of TypeRegistry") + raise TypeError( + f"type_registry must be an instance of TypeRegistry, not {type(type_registry)}" + ) return tuple.__new__( cls, diff --git a/bson/dbref.py b/bson/dbref.py index 6c21b8162c..40bdb73cff 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -56,9 +56,9 @@ def __init__( .. seealso:: The MongoDB documentation on `dbrefs `_. """ if not isinstance(collection, str): - raise TypeError("collection must be an instance of str") + raise TypeError(f"collection must be an instance of str, not {type(collection)}") if database is not None and not isinstance(database, str): - raise TypeError("database must be an instance of str") + raise TypeError(f"database must be an instance of str, not {type(database)}") self.__collection = collection self.__id = id diff --git a/bson/decimal128.py b/bson/decimal128.py index 016afb5eb8..92c054d878 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -277,7 +277,7 @@ def from_bid(cls: Type[Decimal128], value: bytes) -> Decimal128: point in Binary Integer Decimal (BID) format). """ if not isinstance(value, bytes): - raise TypeError("value must be an instance of bytes") + raise TypeError(f"value must be an instance of bytes, not {type(value)}") if len(value) != 16: raise ValueError("value must be exactly 16 bytes") return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) # type: ignore diff --git a/bson/timestamp.py b/bson/timestamp.py index 3e76e7baad..949bd7b36c 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -58,9 +58,9 @@ def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): - raise TypeError("time must be an instance of int") + raise TypeError(f"time must be an instance of int, not {type(time)}") if not isinstance(inc, int): - raise TypeError("inc must be an instance of int") + raise TypeError(f"inc must be an instance of int, not {type(inc)}") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") if not 0 <= inc < UPPERBOUND: diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index a49d51d304..d15713c51b 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -100,7 +100,7 @@ def __init__(self, database: AsyncDatabase, collection: str = "fs"): .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(database, AsyncDatabase): - raise TypeError("database must be an instance of Database") + raise TypeError(f"database must be an instance of Database, not {type(database)}") database = _clear_entity_type_registry(database) @@ -503,7 +503,7 @@ def __init__( .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(db, AsyncDatabase): - raise TypeError("database must be an instance of AsyncDatabase") + raise TypeError(f"database must be an instance of AsyncDatabase, not {type(db)}") db = _clear_entity_type_registry(db) @@ -1082,7 +1082,9 @@ def __init__( :attr:`~pymongo.collection.AsyncCollection.write_concern` """ if not isinstance(root_collection, AsyncCollection): - raise TypeError("root_collection must be an instance of AsyncCollection") + raise TypeError( + f"root_collection must be an instance of AsyncCollection, not {type(root_collection)}" + ) if not root_collection.write_concern.acknowledged: raise ConfigurationError("root_collection must use acknowledged write_concern") @@ -1436,7 +1438,9 @@ def __init__( from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, AsyncCollection): - raise TypeError("root_collection must be an instance of AsyncCollection") + raise TypeError( + f"root_collection must be an instance of AsyncCollection, not {type(root_collection)}" + ) _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index 655f05f57a..ea0b53cfb7 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -100,7 +100,7 @@ def __init__(self, database: Database, collection: str = "fs"): .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(database, Database): - raise TypeError("database must be an instance of Database") + raise TypeError(f"database must be an instance of Database, not {type(database)}") database = _clear_entity_type_registry(database) @@ -501,7 +501,7 @@ def __init__( .. seealso:: The MongoDB documentation on `gridfs `_. """ if not isinstance(db, Database): - raise TypeError("database must be an instance of Database") + raise TypeError(f"database must be an instance of Database, not {type(db)}") db = _clear_entity_type_registry(db) @@ -1076,7 +1076,9 @@ def __init__( :attr:`~pymongo.collection.Collection.write_concern` """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an instance of Collection") + raise TypeError( + f"root_collection must be an instance of Collection, not {type(root_collection)}" + ) if not root_collection.write_concern.acknowledged: raise ConfigurationError("root_collection must use acknowledged write_concern") @@ -1426,7 +1428,9 @@ def __init__( from the server. Metadata is fetched when first needed. """ if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an instance of Collection") + raise TypeError( + f"root_collection must be an instance of Collection, not {type(root_collection)}" + ) _disallow_transactions(session) root_collection = _clear_entity_type_registry(root_collection) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 58f6ff338b..8d6def1606 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -160,7 +160,7 @@ def timeout(seconds: Optional[float]) -> ContextManager[None]: .. versionadded:: 4.2 """ if not isinstance(seconds, (int, float, type(None))): - raise TypeError("timeout must be None, an int, or a float") + raise TypeError(f"timeout must be None, an int, or a float, not {type(seconds)}") if seconds and seconds < 0: raise ValueError("timeout cannot be negative") if seconds is not None: diff --git a/pymongo/_asyncio_lock.py b/pymongo/_asyncio_lock.py index 669b0f63a7..a9c409d486 100644 --- a/pymongo/_asyncio_lock.py +++ b/pymongo/_asyncio_lock.py @@ -160,7 +160,7 @@ def release(self) -> None: self._locked = False self._wake_up_first() else: - raise RuntimeError("Lock is not acquired.") + raise RuntimeError("Lock is not acquired") def _wake_up_first(self) -> None: """Ensure that the first waiter will wake up.""" diff --git a/pymongo/_azure_helpers.py b/pymongo/_azure_helpers.py index 704c561cd5..6e86ab5670 100644 --- a/pymongo/_azure_helpers.py +++ b/pymongo/_azure_helpers.py @@ -46,7 +46,7 @@ def _get_azure_response( try: data = json.loads(body) except Exception: - raise ValueError("Azure IMDS response must be in JSON format.") from None + raise ValueError("Azure IMDS response must be in JSON format") from None for key in ["access_token", "expires_in"]: if not data.get(key): diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py index b1e6d0125b..8cc4edf19c 100644 --- a/pymongo/asynchronous/auth.py +++ b/pymongo/asynchronous/auth.py @@ -161,7 +161,7 @@ def _password_digest(username: str, password: str) -> str: if len(password) == 0: raise ValueError("password can't be empty") if not isinstance(username, str): - raise TypeError("username must be an instance of str") + raise TypeError(f"username must be an instance of str, not {type(username)}") md5hash = hashlib.md5() # noqa: S324 data = f"{username}:mongo:{password}" diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py index f1c15045de..38346648c5 100644 --- a/pymongo/asynchronous/auth_oidc.py +++ b/pymongo/asynchronous/auth_oidc.py @@ -213,7 +213,9 @@ def _get_access_token(self) -> Optional[str]: ) resp = cb.fetch(context) if not isinstance(resp, OIDCCallbackResult): - raise ValueError("Callback result must be of type OIDCCallbackResult") + raise ValueError( + f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" + ) self.refresh_token = resp.refresh_token self.access_token = resp.access_token self.token_gen_id += 1 diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index d80495d804..4c5171a350 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -310,7 +310,9 @@ def __init__( ) if max_commit_time_ms is not None: if not isinstance(max_commit_time_ms, int): - raise TypeError("max_commit_time_ms must be an integer or None") + raise TypeError( + f"max_commit_time_ms must be an integer or None, not {type(max_commit_time_ms)}" + ) @property def read_concern(self) -> Optional[ReadConcern]: @@ -902,7 +904,9 @@ def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: another `AsyncClientSession` instance. """ if not isinstance(cluster_time, _Mapping): - raise TypeError("cluster_time must be a subclass of collections.Mapping") + raise TypeError( + f"cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}" + ) if not isinstance(cluster_time.get("clusterTime"), Timestamp): raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) @@ -923,7 +927,9 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: another `AsyncClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") + raise TypeError( + f"operation_time must be an instance of bson.timestamp.Timestamp, not {type(operation_time)}" + ) self._advance_operation_time(operation_time) def _process_response(self, reply: Mapping[str, Any]) -> None: diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 9b73423627..e83a391439 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -228,7 +228,7 @@ def __init__( read_concern or database.read_concern, ) if not isinstance(name, str): - raise TypeError("name must be an instance of str") + raise TypeError(f"name must be an instance of str, not {type(name)}") from pymongo.asynchronous.database import AsyncDatabase if not isinstance(database, AsyncDatabase): @@ -2475,7 +2475,7 @@ async def _drop_index( name = helpers_shared._gen_index_name(index_or_name) if not isinstance(name, str): - raise TypeError("index_or_name must be an instance of str or list") + raise TypeError(f"index_or_name must be an instance of str or list, not {type(name)}") cmd = {"dropIndexes": self._name, "index": name} cmd.update(kwargs) @@ -3078,7 +3078,7 @@ async def rename( """ if not isinstance(new_name, str): - raise TypeError("new_name must be an instance of str") + raise TypeError(f"new_name must be an instance of str, not {type(new_name)}") if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") @@ -3148,7 +3148,7 @@ async def distinct( """ if not isinstance(key, str): - raise TypeError("key must be an instance of str") + raise TypeError(f"key must be an instance of str, not {type(key)}") cmd = {"distinct": self._name, "key": key} if filter is not None: if "query" in kwargs: @@ -3196,7 +3196,7 @@ async def _find_and_modify( common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): raise ValueError( - "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" + f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd = {"findAndModify": self._name, "query": filter, "new": return_document} diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py index 5a4559bd77..353c5e91c2 100644 --- a/pymongo/asynchronous/command_cursor.py +++ b/pymongo/asynchronous/command_cursor.py @@ -94,7 +94,9 @@ def __init__( self.batch_size(batch_size) if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) def __del__(self) -> None: self._die_no_lock() @@ -115,7 +117,7 @@ def batch_size(self, batch_size: int) -> AsyncCommandCursor[_DocumentType]: :param batch_size: The size of each batch of results requested. """ if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") if batch_size < 0: raise ValueError("batch_size must be >= 0") diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 8193e53282..9101197ce2 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -146,9 +146,9 @@ def __init__( spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): - raise TypeError("skip must be an instance of int") + raise TypeError(f"skip must be an instance of int, not {type(skip)}") if not isinstance(limit, int): - raise TypeError("limit must be an instance of int") + raise TypeError(f"limit must be an instance of int, not {type(limit)}") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self._explicit_session: warnings.warn( @@ -171,7 +171,7 @@ def __init__( validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") if batch_size < 0: raise ValueError("batch_size must be >= 0") # Only set if allow_disk_use is provided by the user, else None. @@ -388,7 +388,7 @@ async def add_option(self, mask: int) -> AsyncCursor[_DocumentType]: cursor.add_option(2) """ if not isinstance(mask, int): - raise TypeError("mask must be an int") + raise TypeError(f"mask must be an int, not {type(mask)}") self._check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: @@ -408,7 +408,7 @@ def remove_option(self, mask: int) -> AsyncCursor[_DocumentType]: cursor.remove_option(2) """ if not isinstance(mask, int): - raise TypeError("mask must be an int") + raise TypeError(f"mask must be an int, not {type(mask)}") self._check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: @@ -432,7 +432,7 @@ def allow_disk_use(self, allow_disk_use: bool) -> AsyncCursor[_DocumentType]: .. versionadded:: 3.11 """ if not isinstance(allow_disk_use, bool): - raise TypeError("allow_disk_use must be a bool") + raise TypeError(f"allow_disk_use must be a bool, not {type(allow_disk_use)}") self._check_okay_to_chain() self._allow_disk_use = allow_disk_use @@ -451,7 +451,7 @@ def limit(self, limit: int) -> AsyncCursor[_DocumentType]: .. seealso:: The MongoDB documentation on `limit `_. """ if not isinstance(limit, int): - raise TypeError("limit must be an integer") + raise TypeError(f"limit must be an integer, not {type(limit)}") if self._exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self._check_okay_to_chain() @@ -479,7 +479,7 @@ def batch_size(self, batch_size: int) -> AsyncCursor[_DocumentType]: :param batch_size: The size of each batch of results requested. """ if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") if batch_size < 0: raise ValueError("batch_size must be >= 0") self._check_okay_to_chain() @@ -499,7 +499,7 @@ def skip(self, skip: int) -> AsyncCursor[_DocumentType]: :param skip: the number of results to skip """ if not isinstance(skip, int): - raise TypeError("skip must be an integer") + raise TypeError(f"skip must be an integer, not {type(skip)}") if skip < 0: raise ValueError("skip must be >= 0") self._check_okay_to_chain() @@ -520,7 +520,7 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> AsyncCursor[_DocumentType]: :param max_time_ms: the time limit after which the operation is aborted """ if not isinstance(max_time_ms, int) and max_time_ms is not None: - raise TypeError("max_time_ms must be an integer or None") + raise TypeError(f"max_time_ms must be an integer or None, not {type(max_time_ms)}") self._check_okay_to_chain() self._max_time_ms = max_time_ms @@ -543,7 +543,9 @@ def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> AsyncCursor[_Do .. versionadded:: 3.2 """ if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) self._check_okay_to_chain() # Ignore max_await_time_ms if not tailable or await_data is False. @@ -679,7 +681,7 @@ def max(self, spec: _Sort) -> AsyncCursor[_DocumentType]: .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") self._check_okay_to_chain() self._max = dict(spec) @@ -701,7 +703,7 @@ def min(self, spec: _Sort) -> AsyncCursor[_DocumentType]: .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") self._check_okay_to_chain() self._min = dict(spec) diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index 98a0a6ff3b..4aba9ab0e9 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -122,7 +122,7 @@ def __init__( from pymongo.asynchronous.mongo_client import AsyncMongoClient if not isinstance(name, str): - raise TypeError("name must be an instance of str") + raise TypeError(f"name must be an instance of str, not {type(name)}") if not isinstance(client, AsyncMongoClient): # This is for compatibility with mocked and subclassed types, such as in Motor. @@ -1310,7 +1310,7 @@ async def drop_collection( name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str") + raise TypeError(f"name_or_collection must be an instance of str, not {type(name)}") encrypted_fields = await self._get_encrypted_fields( {"encryptedFields": encrypted_fields}, name, @@ -1374,7 +1374,9 @@ async def validate_collection( name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or AsyncCollection") + raise TypeError( + f"name_or_collection must be an instance of str or AsyncCollection, not {type(name)}" + ) cmd = {"validate": name, "scandata": scandata, "full": full} if comment is not None: cmd["comment"] = comment diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 98ab68527c..f777104cf5 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -322,7 +322,9 @@ async def insert_data_key(self, data_key: bytes) -> Binary: raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) data_key_id = raw_doc.get("_id") if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: - raise TypeError("data_key _id must be Binary with a UUID subtype") + raise TypeError( + f"data_key _id must be Binary with a UUID subtype, not {type(data_key_id)}" + ) assert self.key_vault_coll is not None await self.key_vault_coll.insert_one(raw_doc) @@ -644,7 +646,9 @@ def __init__( ) if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + raise TypeError( + f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" + ) if not isinstance(key_vault_client, AsyncMongoClient): # This is for compatibility with mocked and subclassed types, such as in Motor. diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 1600e50628..cf7de19c2f 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -750,7 +750,7 @@ def __init__( if port is None: port = self.PORT if not isinstance(port, int): - raise TypeError("port must be an instance of int") + raise TypeError(f"port must be an instance of int, not {type(port)}") # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. @@ -1971,7 +1971,7 @@ async def _close_cursor_now( The cursor is closed synchronously on the current thread. """ if not isinstance(cursor_id, int): - raise TypeError("cursor_id must be an instance of int") + raise TypeError(f"cursor_id must be an instance of int, not {type(cursor_id)}") try: if conn_mgr: @@ -2093,7 +2093,9 @@ async def _tmp_session( """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.AsyncClientSession): - raise ValueError("'session' argument must be an AsyncClientSession or None.") + raise ValueError( + f"'session' argument must be an AsyncClientSession or None, not {type(session)}" + ) # Don't call end_session. yield session return @@ -2247,7 +2249,9 @@ async def drop_database( name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance of str or a AsyncDatabase") + raise TypeError( + f"name_or_database must be an instance of str or a AsyncDatabase, not {type(name)}" + ) async with await self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: await self[name]._command( diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py index 9534bd74ad..410521d73a 100644 --- a/pymongo/auth_shared.py +++ b/pymongo/auth_shared.py @@ -107,7 +107,7 @@ def _build_credentials_tuple( ) -> MongoCredential: """Build and return a mechanism specific credentials tuple.""" if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: - raise ConfigurationError(f"{mech} requires a username.") + raise ConfigurationError(f"{mech} requires a username") if mech == "GSSAPI": if source is not None and source != "$external": raise ValueError("authentication source must be $external or None for GSSAPI") @@ -219,7 +219,7 @@ def _build_credentials_tuple( else: source_database = source or database or "admin" if passwd is None: - raise ConfigurationError("A password is required.") + raise ConfigurationError("A password is required") return MongoCredential(mech, source_database, user, passwd, None, _Cache()) diff --git a/pymongo/collation.py b/pymongo/collation.py index 9adcb2e408..fc84b937f2 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -223,4 +223,4 @@ def validate_collation_or_none( return value.document if isinstance(value, dict): return value - raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") + raise TypeError("collation must be a dict, an instance of collation.Collation, or None") diff --git a/pymongo/common.py b/pymongo/common.py index b442da6a3e..4be7a3122a 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -202,7 +202,7 @@ def validate_integer(option: str, value: Any) -> int: return int(value) except ValueError: raise ValueError(f"The value of {option} must be an integer") from None - raise TypeError(f"Wrong type for {option}, value must be an integer") + raise TypeError(f"Wrong type for {option}, value must be an integer, not {type(value)}") def validate_positive_integer(option: str, value: Any) -> int: @@ -250,7 +250,7 @@ def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError(f"Wrong type for {option}, value must be an instance of str") + raise TypeError(f"Wrong type for {option}, value must be an instance of str, not {type(value)}") def validate_string_or_none(option: str, value: Any) -> Optional[str]: @@ -269,7 +269,9 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError(f"Wrong type for {option}, value must be an integer or a string") + raise TypeError( + f"Wrong type for {option}, value must be an integer or a string, not {type(value)}" + ) def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: @@ -282,7 +284,9 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in except ValueError: return value return validate_non_negative_integer(option, val) - raise TypeError(f"Wrong type for {option}, value must be an non negative integer or a string") + raise TypeError( + f"Wrong type for {option}, value must be an non negative integer or a string, not {type(value)}" + ) def validate_positive_float(option: str, value: Any) -> float: @@ -365,7 +369,7 @@ def validate_max_staleness(option: str, value: Any) -> int: def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: """Validate a read preference.""" if not isinstance(value, _ServerMode): - raise TypeError(f"{value!r} is not a read preference.") + raise TypeError(f"{value!r} is not a read preference") return value @@ -441,7 +445,9 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni props: dict[str, Any] = {} if not isinstance(value, str): if not isinstance(value, dict): - raise ValueError("Auth mechanism properties must be given as a string or a dictionary") + raise ValueError( + f"Auth mechanism properties must be given as a string or a dictionary, not {type(value)}" + ) for key, value in value.items(): # noqa: B020 if isinstance(value, str): props[key] = value @@ -453,7 +459,7 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni from pymongo.auth_oidc_shared import OIDCCallback if not isinstance(value, OIDCCallback): - raise ValueError("callback must be an OIDCCallback object") + raise ValueError(f"callback must be an OIDCCallback object, not {type(value)}") props[key] = value else: raise ValueError(f"Invalid type for auth mechanism property {key}, {type(value)}") @@ -476,7 +482,7 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni raise ValueError( f"{key} is not a supported auth " "mechanism property. Must be one of " - f"{tuple(_MECHANISM_PROPS)}." + f"{tuple(_MECHANISM_PROPS)}" ) if key == "CANONICALIZE_HOST_NAME": @@ -520,7 +526,7 @@ def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: def validate_list(option: str, value: Any) -> list: """Validates that 'value' is a list.""" if not isinstance(value, list): - raise TypeError(f"{option} must be a list") + raise TypeError(f"{option} must be a list, not {type(value)}") return value @@ -587,7 +593,7 @@ def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: if value is None: return value if not isinstance(value, ServerApi): - raise TypeError(f"{option} must be an instance of ServerApi") + raise TypeError(f"{option} must be an instance of ServerApi, not {type(value)}") return value @@ -596,7 +602,7 @@ def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: if value is None: return value if not callable(value): - raise ValueError(f"{option} must be a callable") + raise ValueError(f"{option} must be a callable, not {type(value)}") return value @@ -651,7 +657,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A from pymongo.encryption_options import AutoEncryptionOpts if not isinstance(value, AutoEncryptionOpts): - raise TypeError(f"{option} must be an instance of AutoEncryptionOpts") + raise TypeError(f"{option} must be an instance of AutoEncryptionOpts, not {type(value)}") return value @@ -668,7 +674,9 @@ def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeCo elif isinstance(value, int): return DatetimeConversion(value) - raise TypeError(f"{option} must be a str or int representing DatetimeConversion") + raise TypeError( + f"{option} must be a str or int representing DatetimeConversion, not {type(value)}" + ) def validate_server_monitoring_mode(option: str, value: str) -> str: @@ -928,12 +936,14 @@ def __init__( if not isinstance(write_concern, WriteConcern): raise TypeError( - "write_concern must be an instance of pymongo.write_concern.WriteConcern" + f"write_concern must be an instance of pymongo.write_concern.WriteConcern, not {type(write_concern)}" ) self._write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") + raise TypeError( + f"read_concern must be an instance of pymongo.read_concern.ReadConcern, not {type(read_concern)}" + ) self._read_concern = read_concern @property diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index f49b56cc96..7486451730 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -91,7 +91,7 @@ def validate_zlib_compression_level(option: str, value: Any) -> int: try: level = int(value) except Exception: - raise TypeError(f"{option} must be an integer, not {value!r}.") from None + raise TypeError(f"{option} must be an integer, not {value!r}") from None if level < -1 or level > 9: raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 5ca3f952cd..724a6f20d5 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -39,7 +39,7 @@ def __new__( for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): raise TypeError( - f"Wrong type for DriverInfo {key} option, value must be an instance of str" + f"Wrong type for DriverInfo {key} option, value must be an instance of str, not {type(value)}" ) return self diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index ee749e7ac1..26dfbf5f03 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -225,7 +225,9 @@ def __init__( mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): - raise TypeError("mongocryptd_spawn_args must be a list") + raise TypeError( + f"mongocryptd_spawn_args must be a list, not {type(self._mongocryptd_spawn_args)}" + ) if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. diff --git a/pymongo/helpers_shared.py b/pymongo/helpers_shared.py index 83ea2ddf78..c6b820c1c2 100644 --- a/pymongo/helpers_shared.py +++ b/pymongo/helpers_shared.py @@ -122,7 +122,7 @@ def _index_list( """ if direction is not None: if not isinstance(key_or_list, str): - raise TypeError("Expected a string and a direction") + raise TypeError(f"Expected a string and a direction, not {type(key_or_list)}") return [(key_or_list, direction)] else: if isinstance(key_or_list, str): @@ -132,7 +132,9 @@ def _index_list( elif isinstance(key_or_list, abc.Mapping): return list(key_or_list.items()) elif not isinstance(key_or_list, (list, tuple)): - raise TypeError("if no direction is specified, key_or_list must be an instance of list") + raise TypeError( + f"if no direction is specified, key_or_list must be an instance of list, not {type(key_or_list)}" + ) values: list[tuple[str, int]] = [] for item in key_or_list: if isinstance(item, str): @@ -172,11 +174,12 @@ def _index_document(index_list: _IndexList) -> dict[str, Any]: def _validate_index_key_pair(key: Any, value: Any) -> None: if not isinstance(key, str): - raise TypeError("first item in each key pair must be an instance of str") + raise TypeError(f"first item in each key pair must be an instance of str, not {type(key)}") if not isinstance(value, (str, int, abc.Mapping)): raise TypeError( "second item in each key pair must be 1, -1, " "'2d', or another valid MongoDB index specifier." + f", not {type(value)}" ) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 96f88597d2..38d6e3a22a 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -472,14 +472,15 @@ def _validate_event_listeners( ) -> Sequence[_EventListeners]: """Validate event listeners""" if not isinstance(listeners, abc.Sequence): - raise TypeError(f"{option} must be a list or tuple") + raise TypeError(f"{option} must be a list or tuple, not {type(listeners)}") for listener in listeners: if not isinstance(listener, _EventListener): raise TypeError( f"Listeners for {option} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." + "ConnectionPoolListener," + f"not {type(listener)}" ) return listeners @@ -496,7 +497,8 @@ def register(listener: _EventListener) -> None: f"Listeners for {listener} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." + "ConnectionPoolListener," + f"not {type(listener)}" ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index fa2f4a318a..17f3a46edb 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -38,7 +38,7 @@ def __init__(self, level: Optional[str] = None) -> None: if level is None or isinstance(level, str): self.__level = level else: - raise TypeError("level must be a string or None.") + raise TypeError(f"level must be a string or None, not {type(level)}") @property def level(self) -> Optional[str]: diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 580d71f9b0..0faf21ba8f 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -115,4 +115,4 @@ class SSLError(Exception): # type: ignore def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" - raise ConfigurationError("The ssl module is not available.") + raise ConfigurationError("The ssl module is not available") diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py index 56860eff3b..6041ebdbe3 100644 --- a/pymongo/synchronous/auth.py +++ b/pymongo/synchronous/auth.py @@ -158,7 +158,7 @@ def _password_digest(username: str, password: str) -> str: if len(password) == 0: raise ValueError("password can't be empty") if not isinstance(username, str): - raise TypeError("username must be an instance of str") + raise TypeError(f"username must be an instance of str, not {type(username)}") md5hash = hashlib.md5() # noqa: S324 data = f"{username}:mongo:{password}" diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py index 5a8967d96b..c5efdd5fcc 100644 --- a/pymongo/synchronous/auth_oidc.py +++ b/pymongo/synchronous/auth_oidc.py @@ -213,7 +213,9 @@ def _get_access_token(self) -> Optional[str]: ) resp = cb.fetch(context) if not isinstance(resp, OIDCCallbackResult): - raise ValueError("Callback result must be of type OIDCCallbackResult") + raise ValueError( + f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" + ) self.refresh_token = resp.refresh_token self.access_token = resp.access_token self.token_gen_id += 1 diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index f1d680fc0a..298dd7b357 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -309,7 +309,9 @@ def __init__( ) if max_commit_time_ms is not None: if not isinstance(max_commit_time_ms, int): - raise TypeError("max_commit_time_ms must be an integer or None") + raise TypeError( + f"max_commit_time_ms must be an integer or None, not {type(max_commit_time_ms)}" + ) @property def read_concern(self) -> Optional[ReadConcern]: @@ -897,7 +899,9 @@ def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: another `ClientSession` instance. """ if not isinstance(cluster_time, _Mapping): - raise TypeError("cluster_time must be a subclass of collections.Mapping") + raise TypeError( + f"cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}" + ) if not isinstance(cluster_time.get("clusterTime"), Timestamp): raise ValueError("Invalid cluster_time") self._advance_cluster_time(cluster_time) @@ -918,7 +922,9 @@ def advance_operation_time(self, operation_time: Timestamp) -> None: another `ClientSession` instance. """ if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") + raise TypeError( + f"operation_time must be an instance of bson.timestamp.Timestamp, not {type(operation_time)}" + ) self._advance_operation_time(operation_time) def _process_response(self, reply: Mapping[str, Any]) -> None: diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 6edfddc9a9..b956ac58a5 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -231,7 +231,7 @@ def __init__( read_concern or database.read_concern, ) if not isinstance(name, str): - raise TypeError("name must be an instance of str") + raise TypeError(f"name must be an instance of str, not {type(name)}") from pymongo.synchronous.database import Database if not isinstance(database, Database): @@ -2472,7 +2472,7 @@ def _drop_index( name = helpers_shared._gen_index_name(index_or_name) if not isinstance(name, str): - raise TypeError("index_or_name must be an instance of str or list") + raise TypeError(f"index_or_name must be an instance of str or list, not {type(name)}") cmd = {"dropIndexes": self._name, "index": name} cmd.update(kwargs) @@ -3071,7 +3071,7 @@ def rename( """ if not isinstance(new_name, str): - raise TypeError("new_name must be an instance of str") + raise TypeError(f"new_name must be an instance of str, not {type(new_name)}") if not new_name or ".." in new_name: raise InvalidName("collection names cannot be empty") @@ -3141,7 +3141,7 @@ def distinct( """ if not isinstance(key, str): - raise TypeError("key must be an instance of str") + raise TypeError(f"key must be an instance of str, not {type(key)}") cmd = {"distinct": self._name, "key": key} if filter is not None: if "query" in kwargs: @@ -3189,7 +3189,7 @@ def _find_and_modify( common.validate_is_mapping("filter", filter) if not isinstance(return_document, bool): raise ValueError( - "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" + f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) cmd = {"findAndModify": self._name, "query": filter, "new": return_document} diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index 3a4372856a..e23519d740 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -94,7 +94,9 @@ def __init__( self.batch_size(batch_size) if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) def __del__(self) -> None: self._die_no_lock() @@ -115,7 +117,7 @@ def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: :param batch_size: The size of each batch of results requested. """ if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") if batch_size < 0: raise ValueError("batch_size must be >= 0") diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index b35098a327..cda093ee19 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -146,9 +146,9 @@ def __init__( spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): - raise TypeError("skip must be an instance of int") + raise TypeError(f"skip must be an instance of int, not {type(skip)}") if not isinstance(limit, int): - raise TypeError("limit must be an instance of int") + raise TypeError(f"limit must be an instance of int, not {type(limit)}") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self._explicit_session: warnings.warn( @@ -171,7 +171,7 @@ def __init__( validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") if batch_size < 0: raise ValueError("batch_size must be >= 0") # Only set if allow_disk_use is provided by the user, else None. @@ -388,7 +388,7 @@ def add_option(self, mask: int) -> Cursor[_DocumentType]: cursor.add_option(2) """ if not isinstance(mask, int): - raise TypeError("mask must be an int") + raise TypeError(f"mask must be an int, not {type(mask)}") self._check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: @@ -408,7 +408,7 @@ def remove_option(self, mask: int) -> Cursor[_DocumentType]: cursor.remove_option(2) """ if not isinstance(mask, int): - raise TypeError("mask must be an int") + raise TypeError(f"mask must be an int, not {type(mask)}") self._check_okay_to_chain() if mask & _QUERY_OPTIONS["exhaust"]: @@ -432,7 +432,7 @@ def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: .. versionadded:: 3.11 """ if not isinstance(allow_disk_use, bool): - raise TypeError("allow_disk_use must be a bool") + raise TypeError(f"allow_disk_use must be a bool, not {type(allow_disk_use)}") self._check_okay_to_chain() self._allow_disk_use = allow_disk_use @@ -451,7 +451,7 @@ def limit(self, limit: int) -> Cursor[_DocumentType]: .. seealso:: The MongoDB documentation on `limit `_. """ if not isinstance(limit, int): - raise TypeError("limit must be an integer") + raise TypeError(f"limit must be an integer, not {type(limit)}") if self._exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self._check_okay_to_chain() @@ -479,7 +479,7 @@ def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: :param batch_size: The size of each batch of results requested. """ if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") if batch_size < 0: raise ValueError("batch_size must be >= 0") self._check_okay_to_chain() @@ -499,7 +499,7 @@ def skip(self, skip: int) -> Cursor[_DocumentType]: :param skip: the number of results to skip """ if not isinstance(skip, int): - raise TypeError("skip must be an integer") + raise TypeError(f"skip must be an integer, not {type(skip)}") if skip < 0: raise ValueError("skip must be >= 0") self._check_okay_to_chain() @@ -520,7 +520,7 @@ def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: :param max_time_ms: the time limit after which the operation is aborted """ if not isinstance(max_time_ms, int) and max_time_ms is not None: - raise TypeError("max_time_ms must be an integer or None") + raise TypeError(f"max_time_ms must be an integer or None, not {type(max_time_ms)}") self._check_okay_to_chain() self._max_time_ms = max_time_ms @@ -543,7 +543,9 @@ def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_Documen .. versionadded:: 3.2 """ if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) self._check_okay_to_chain() # Ignore max_await_time_ms if not tailable or await_data is False. @@ -677,7 +679,7 @@ def max(self, spec: _Sort) -> Cursor[_DocumentType]: .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") self._check_okay_to_chain() self._max = dict(spec) @@ -699,7 +701,7 @@ def min(self, spec: _Sort) -> Cursor[_DocumentType]: .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") self._check_okay_to_chain() self._min = dict(spec) diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index a0bef55343..0dc03cb746 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -122,7 +122,7 @@ def __init__( from pymongo.synchronous.mongo_client import MongoClient if not isinstance(name, str): - raise TypeError("name must be an instance of str") + raise TypeError(f"name must be an instance of str, not {type(name)}") if not isinstance(client, MongoClient): # This is for compatibility with mocked and subclassed types, such as in Motor. @@ -1303,7 +1303,7 @@ def drop_collection( name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str") + raise TypeError(f"name_or_collection must be an instance of str, not {type(name)}") encrypted_fields = self._get_encrypted_fields( {"encryptedFields": encrypted_fields}, name, @@ -1367,7 +1367,9 @@ def validate_collection( name = name.name if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or Collection") + raise TypeError( + f"name_or_collection must be an instance of str or Collection, not {type(name)}" + ) cmd = {"validate": name, "scandata": scandata, "full": full} if comment is not None: cmd["comment"] = comment diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index d41169861f..59f38e1913 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -320,7 +320,9 @@ def insert_data_key(self, data_key: bytes) -> Binary: raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) data_key_id = raw_doc.get("_id") if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: - raise TypeError("data_key _id must be Binary with a UUID subtype") + raise TypeError( + f"data_key _id must be Binary with a UUID subtype, not {type(data_key_id)}" + ) assert self.key_vault_coll is not None self.key_vault_coll.insert_one(raw_doc) @@ -642,7 +644,9 @@ def __init__( ) if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") + raise TypeError( + f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" + ) if not isinstance(key_vault_client, MongoClient): # This is for compatibility with mocked and subclassed types, such as in Motor. diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index a694a58c1e..706623c214 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -748,7 +748,7 @@ def __init__( if port is None: port = self.PORT if not isinstance(port, int): - raise TypeError("port must be an instance of int") + raise TypeError(f"port must be an instance of int, not {type(port)}") # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. @@ -1965,7 +1965,7 @@ def _close_cursor_now( The cursor is closed synchronously on the current thread. """ if not isinstance(cursor_id, int): - raise TypeError("cursor_id must be an instance of int") + raise TypeError(f"cursor_id must be an instance of int, not {type(cursor_id)}") try: if conn_mgr: @@ -2087,7 +2087,9 @@ def _tmp_session( """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.ClientSession): - raise ValueError("'session' argument must be a ClientSession or None.") + raise ValueError( + f"'session' argument must be a ClientSession or None, not {type(session)}" + ) # Don't call end_session. yield session return @@ -2235,7 +2237,9 @@ def drop_database( name = name.name if not isinstance(name, str): - raise TypeError("name_or_database must be an instance of str or a Database") + raise TypeError( + f"name_or_database must be an instance of str or a Database, not {type(name)}" + ) with self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: self[name]._command( diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 7018dad7d8..8f56ae4093 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -91,7 +91,7 @@ def parse_userinfo(userinfo: str) -> tuple[str, str]: user, _, passwd = userinfo.partition(":") # No password is expected with GSSAPI authentication. if not user: - raise InvalidURI("The empty string is not valid username.") + raise InvalidURI("The empty string is not valid username") return unquote_plus(user), unquote_plus(passwd) @@ -347,7 +347,7 @@ def split_options( semi_idx = opts.find(";") try: if and_idx >= 0 and semi_idx >= 0: - raise InvalidURI("Can not mix '&' and ';' for option separators.") + raise InvalidURI("Can not mix '&' and ';' for option separators") elif and_idx >= 0: options = _parse_options(opts, "&") elif semi_idx >= 0: @@ -357,7 +357,7 @@ def split_options( else: raise ValueError except ValueError: - raise InvalidURI("MongoDB URI options are key=value pairs.") from None + raise InvalidURI("MongoDB URI options are key=value pairs") from None options = _handle_security_options(options) @@ -389,7 +389,7 @@ def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[ nodes = [] for entity in hosts.split(","): if not entity: - raise ConfigurationError("Empty host (or extra comma in host list).") + raise ConfigurationError("Empty host (or extra comma in host list)") port = default_port # Unix socket entities don't have ports if entity.endswith(".sock"): @@ -502,7 +502,7 @@ def parse_uri( raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") if not scheme_free: - raise InvalidURI("Must provide at least one hostname or IP.") + raise InvalidURI("Must provide at least one hostname or IP") user = None passwd = None diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 67c9549897..21faeebed0 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -74,7 +74,7 @@ def __init__( if wtimeout is not None: if not isinstance(wtimeout, int): - raise TypeError("wtimeout must be an integer") + raise TypeError(f"wtimeout must be an integer, not {type(wtimeout)}") if wtimeout < 0: raise ValueError("wtimeout cannot be less than 0") self.__document["wtimeout"] = wtimeout @@ -98,7 +98,7 @@ def __init__( raise ValueError("w cannot be less than 0") self.__acknowledged = w > 0 elif not isinstance(w, str): - raise TypeError("w must be an integer or string") + raise TypeError(f"w must be an integer or string, not {type(w)}") self.__document["w"] = w self.__server_default = not self.__document From 3e783f5489c5ac899be5e1bbbe0d26f1fe4f1f73 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Fri, 31 Jan 2025 12:13:21 -0800 Subject: [PATCH 1709/2111] PYTHON-5088 Convert test.test_max_staleness to async (#2105) --- test/asynchronous/test_max_staleness.py | 149 ++++++++++++++++++++++++ test/test_max_staleness.py | 11 +- tools/synchro.py | 1 + 3 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 test/asynchronous/test_max_staleness.py diff --git a/test/asynchronous/test_max_staleness.py b/test/asynchronous/test_max_staleness.py new file mode 100644 index 0000000000..7dbf17021f --- /dev/null +++ b/test/asynchronous/test_max_staleness.py @@ -0,0 +1,149 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test maxStalenessSeconds support.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +import warnings +from pathlib import Path + +from pymongo import AsyncMongoClient +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest +from test.utils_selection_tests import create_selection_tests + +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "max_staleness") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "max_staleness") + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestMaxStaleness(AsyncPyMongoTestCase): + async def test_max_staleness(self): + client = self.simple_client() + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary") + self.assertEqual(-1, client.read_preference.max_staleness) + + # These tests are specified in max-staleness-tests.rst. + with self.assertRaises(ConfigurationError): + # Default read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?maxStalenessSeconds=120") + + with self.assertRaises(ConfigurationError): + # Read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") + + client = self.simple_client("mongodb://host/?maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client( + "mongodb://host/?readPreference=secondary&maxStalenessSeconds=120" + ) + self.assertEqual(120, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") + self.assertEqual(1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client(maxStalenessSeconds=-1, readPreference="nearest") + self.assertEqual(-1, client.read_preference.max_staleness) + + with self.assertRaises(TypeError): + # Prohibit None. + self.simple_client(maxStalenessSeconds=None, readPreference="nearest") + + async def test_max_staleness_float(self): + with self.assertRaises(TypeError) as ctx: + await self.async_rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") + + self.assertIn("must be an integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be an integer", str(ctx[0])) + + async def test_max_staleness_zero(self): + # Zero is too small. + with self.assertRaises(ValueError) as ctx: + await self.async_rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") + + self.assertIn("must be a positive integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=0&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be a positive integer", str(ctx[0])) + + @async_client_context.require_replica_set + async def test_last_write_date(self): + # From max-staleness-tests.rst, "Parse lastWriteDate". + client = await self.async_rs_or_single_client(heartbeatFrequencyMS=500) + await client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + await asyncio.sleep(1) + server = await client._topology.select_server(writable_server_selector, _Op.TEST) + first = server.description.last_write_date + self.assertTrue(first) + # The first last_write_date may correspond to a internal server write, + # sleep so that the next write does not occur within the same second. + await asyncio.sleep(1) + await client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + await asyncio.sleep(1) + server = await client._topology.select_server(writable_server_selector, _Op.TEST) + second = server.description.last_write_date + assert first is not None + + assert second is not None + self.assertGreater(second, first) + self.assertLess(second, first + 10) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 32d09ada9a..56e047fd4b 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -15,10 +15,12 @@ """Test maxStalenessSeconds support.""" from __future__ import annotations +import asyncio import os import sys import time import warnings +from pathlib import Path from pymongo import MongoClient from pymongo.operations import _Op @@ -31,11 +33,16 @@ from pymongo.errors import ConfigurationError from pymongo.server_selectors import writable_server_selector +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "max_staleness") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "max_staleness") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "max_staleness") -class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore pass diff --git a/tools/synchro.py b/tools/synchro.py index ef82db756d..2a2969679e 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -215,6 +215,7 @@ def async_only_test(f: str) -> bool: "test_json_util_integration.py", "test_gridfs_spec.py", "test_logger.py", + "test_max_staleness.py", "test_monitoring.py", "test_on_demand_csfle.py", "test_raw_bson.py", From 3b5788906ddeb326c0407f7b490aadde0f88c2ee Mon Sep 17 00:00:00 2001 From: Jib Date: Fri, 31 Jan 2025 15:16:17 -0500 Subject: [PATCH 1710/2111] Update ReadTheDocs to include django-mongodb-backend (#2084) --- doc/tools.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/tools.rst b/doc/tools.rst index 6dd0df8a4d..7ec3ddb443 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -67,6 +67,14 @@ uMongo mongomock. The source `is available on GitHub `_ +Django MongoDB Backend + `Django MongoDB Backend `_ is a + database backend library specifically made for Django. The integration takes + advantage of MongoDB's unique document model capabilities, which align + naturally with Django's philosophy of simplified data modeling and + reduced development complexity. The source is available + `on GitHub `_. + No longer maintained """""""""""""""""""" From acc437af57aca88fc2b333ca6ca5dead28819fe6 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 31 Jan 2025 15:50:46 -0500 Subject: [PATCH 1711/2111] PYTHON-5097 - Convert test.test_retryable_writes_unified to async (#2113) --- .../test_retryable_writes_unified.py | 39 +++++++++++++++++++ test/test_retryable_writes_unified.py | 8 +++- tools/synchro.py | 1 + 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_retryable_writes_unified.py diff --git a/test/asynchronous/test_retryable_writes_unified.py b/test/asynchronous/test_retryable_writes_unified.py new file mode 100644 index 0000000000..bb493e6010 --- /dev/null +++ b/test/asynchronous/test_retryable_writes_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_writes/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_writes/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py index da16166ec6..036c410e24 100644 --- a/test/test_retryable_writes_unified.py +++ b/test/test_retryable_writes_unified.py @@ -17,14 +17,20 @@ import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "unified") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_writes/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_writes/unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/tools/synchro.py b/tools/synchro.py index 2a2969679e..e0b208377e 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -222,6 +222,7 @@ def async_only_test(f: str) -> bool: "test_read_concern.py", "test_retryable_reads.py", "test_retryable_writes.py", + "test_retryable_writes_unified.py", "test_session.py", "test_transactions.py", "unified_format.py", From 6b141d1f5bb2fb9a301932ce45870b21ddd8ea21 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 31 Jan 2025 15:51:00 -0500 Subject: [PATCH 1712/2111] PYTHON-5096 - Convert test.test_retryable_reads_unified to async (#2112) --- .../test_retryable_reads_unified.py | 46 +++++++++++++++++++ test/test_retryable_reads_unified.py | 8 +++- tools/synchro.py | 1 + 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_retryable_reads_unified.py diff --git a/test/asynchronous/test_retryable_reads_unified.py b/test/asynchronous/test_retryable_reads_unified.py new file mode 100644 index 0000000000..e62d606810 --- /dev/null +++ b/test/asynchronous/test_retryable_reads_unified.py @@ -0,0 +1,46 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_reads/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_reads/unified") + +# Generate unified tests. +# PyMongo does not support MapReduce, ListDatabaseObjects or ListCollectionObjects. +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + expected_failures=["ListDatabaseObjects .*", "ListCollectionObjects .*", "MapReduce .*"], + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py index 3f8740cf4b..b1c6435c9a 100644 --- a/test/test_retryable_reads_unified.py +++ b/test/test_retryable_reads_unified.py @@ -15,6 +15,7 @@ """Test the Retryable Reads unified spec tests.""" from __future__ import annotations +import os import sys from pathlib import Path @@ -23,8 +24,13 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = Path(__file__).parent / "retryable_reads/unified" +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_reads/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_reads/unified") # Generate unified tests. # PyMongo does not support MapReduce, ListDatabaseObjects or ListCollectionObjects. diff --git a/tools/synchro.py b/tools/synchro.py index e0b208377e..eb44ef4ac0 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -221,6 +221,7 @@ def async_only_test(f: str) -> bool: "test_raw_bson.py", "test_read_concern.py", "test_retryable_reads.py", + "test_retryable_reads_unified.py", "test_retryable_writes.py", "test_retryable_writes_unified.py", "test_session.py", From 702c86c02cbf989fc38df0ddc916260ccf43ac43 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 3 Feb 2025 08:52:54 -0500 Subject: [PATCH 1713/2111] PYTHON-5095 - Convert test_read_write_concern_spec to async (#2111) --- test/asynchronous/__init__.py | 8 +- .../test_read_write_concern_spec.py | 344 ++++++++++++++++++ test/test_read_write_concern_spec.py | 22 +- tools/synchro.py | 1 + 4 files changed, 364 insertions(+), 11 deletions(-) create mode 100644 test/asynchronous/test_read_write_concern_spec.py diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 76fae407da..a6ba29baaa 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -1176,15 +1176,15 @@ def unmanaged_simple_client( async def disable_replication(self, client): """Disable replication on all secondaries.""" - for h, p in client.secondaries: + for h, p in await client.secondaries: secondary = await self.async_single_client(h, p) - secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + await secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") async def enable_replication(self, client): """Enable replication on all secondaries.""" - for h, p in client.secondaries: + for h, p in await client.secondaries: secondary = await self.async_single_client(h, p) - secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + await secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") class AsyncUnitTest(AsyncPyMongoTestCase): diff --git a/test/asynchronous/test_read_write_concern_spec.py b/test/asynchronous/test_read_write_concern_spec.py new file mode 100644 index 0000000000..3fb13ba194 --- /dev/null +++ b/test/asynchronous/test_read_write_concern_spec.py @@ -0,0 +1,344 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the read and write concern tests.""" +from __future__ import annotations + +import json +import os +import sys +import warnings +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils import OvertCommandListener + +from pymongo import DESCENDING +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "read_write_concern") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "read_write_concern") + + +class TestReadWriteConcernSpec(AsyncIntegrationTest): + async def test_omit_default_read_write_concern(self): + listener = OvertCommandListener() + # Client with default readConcern and writeConcern + client = await self.async_rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + await collection.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(collection.drop) + self.addAsyncCleanup(client.pymongo_test.collection2.drop) + # Commands MUST NOT send the default read/write concern to the server. + + async def rename_and_drop(): + # Ensure collection exists. + await collection.insert_one({}) + await collection.rename("collection2") + await client.pymongo_test.collection2.drop() + + async def insert_command_default_write_concern(): + await collection.database.command( + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) + + async def aggregate_op(): + await (await collection.aggregate([])).to_list() + + ops = [ + ("aggregate", aggregate_op), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), + ] + + for name, f in ops: + listener.reset() + await f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for _i, event in enumerate(listener.started_events): + self.assertNotIn( + "readConcern", + event.command, + f"{name} sent default readConcern with {event.command_name}", + ) + self.assertNotIn( + "writeConcern", + event.command, + f"{name} sent default writeConcern with {event.command_name}", + ) + + async def assertWriteOpsRaise(self, write_concern, expected_exception): + wc = write_concern.document + # Set socket timeout to avoid indefinite stalls + client = await self.async_rs_or_single_client( + w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000 + ) + db = client.get_database("pymongo_test") + coll = db.test + + async def insert_command(): + await coll.database.command( + "insert", + "new_collection", + documents=[{}], + writeConcern=write_concern.document, + parse_write_concern_error=True, + ) + + ops = [ + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), + # SERVER-46668 Delete all the documents in the collection to + # workaround a hang in createIndexes. + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), + ] + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. + if async_client_context.version[:2] != (3, 6): + ops.append(("drop_database", lambda: client.drop_database(db))) + + for name, f in ops: + # Ensure insert_many and bulk_write still raise BulkWriteError. + if name in ("insert_many", "bulk_write"): + expected = BulkWriteError + else: + expected = expected_exception + with self.assertRaises(expected, msg=name) as cm: + await f() + if expected == BulkWriteError: + bulk_result = cm.exception.details + assert bulk_result is not None + wc_errors = bulk_result["writeConcernErrors"] + self.assertTrue(wc_errors) + + @async_client_context.require_replica_set + async def test_raise_write_concern_error(self): + self.addAsyncCleanup(async_client_context.client.drop_database, "pymongo_test") + assert async_client_context.w is not None + await self.assertWriteOpsRaise( + WriteConcern(w=async_client_context.w + 1, wtimeout=1), WriteConcernError + ) + + @async_client_context.require_secondaries_count(1) + @async_client_context.require_test_commands + async def test_raise_wtimeout(self): + self.addAsyncCleanup(async_client_context.client.drop_database, "pymongo_test") + self.addAsyncCleanup(self.enable_replication, async_client_context.client) + # Disable replication to guarantee a wtimeout error. + await self.disable_replication(async_client_context.client) + await self.assertWriteOpsRaise( + WriteConcern(w=async_client_context.w, wtimeout=1), WTimeoutError + ) + + @async_client_context.require_failCommand_fail_point + async def test_error_includes_errInfo(self): + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + async with self.fail_point(cause_wce): + # Write concern error on insert includes errInfo. + with self.assertRaises(WriteConcernError) as ctx: + await self.db.test.insert_one({}) + self.assertEqual(ctx.exception.details, expected_wce) + + # Test bulk_write as well. + with self.assertRaises(BulkWriteError) as ctx: + await self.db.test.bulk_write([InsertOne({})]) + expected_details = { + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + self.assertEqual(ctx.exception.details, expected_details) + + @async_client_context.require_version_min(4, 9) + async def test_write_error_details_exposes_errinfo(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client.errinfotest + self.addAsyncCleanup(client.drop_database, "errinfotest") + validator = {"x": {"$type": "string"}} + await db.create_collection("test", validator=validator) + with self.assertRaises(WriteError) as ctx: + await db.test.insert_one({"x": 1}) + self.assertEqual(ctx.exception.code, 121) + self.assertIsNotNone(ctx.exception.details) + assert ctx.exception.details is not None + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.succeeded_events: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) + break + else: + self.fail("Couldn't find insert event.") + + +def normalize_write_concern(concern): + result = {} + for key in concern: + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] + else: + result[key] = concern[key] + return result + + +def create_connection_string_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] + + if not valid: + if warning is False: + self.assertRaises( + (ConfigurationError, ValueError), AsyncMongoClient, uri, connect=False + ) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, AsyncMongoClient, uri, connect=False) + else: + client = AsyncMongoClient(uri, connect=False) + if "writeConcern" in test_case: + document = client.write_concern.document + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: + document = client.read_concern.document + self.assertEqual(document, test_case["readConcern"]) + + return run_test + + +def create_document_test(test_case): + def run_test(self): + valid = test_case["valid"] + + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) + if not valid: + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) + else: + write_concern = WriteConcern(**normalized) + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: + # Any string for 'level' is equally valid + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) + + return run_test + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + if dirname == "operation": + # This directory is tested by TestOperations. + continue + elif dirname == "connection-string": + create_test = create_connection_string_test + else: + create_test = create_document_test + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as test_stream: + test_cases = json.load(test_stream)["tests"] + + fname = os.path.splitext(filename)[0] + for test_case in test_cases: + new_test = create_test(test_case) + test_name = "test_{}_{}_{}".format( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) + + new_test.__name__ = test_name + setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) + + +create_tests() + + +# Generate unified tests. +# PyMongo does not support MapReduce. +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "operation"), + module=__name__, + expected_failures=["MapReduce .*"], + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index db53b67ae4..8543991f72 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -19,6 +19,7 @@ import os import sys import warnings +from pathlib import Path sys.path[0:0] = [""] @@ -39,7 +40,13 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "read_write_concern") +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "read_write_concern") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "read_write_concern") class TestReadWriteConcernSpec(IntegrationTest): @@ -47,7 +54,6 @@ def test_omit_default_read_write_concern(self): listener = OvertCommandListener() # Client with default readConcern and writeConcern client = self.rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) @@ -66,9 +72,12 @@ def insert_command_default_write_concern(): "insert", "collection", documents=[{}], write_concern=WriteConcern() ) + def aggregate_op(): + (collection.aggregate([])).to_list() + ops = [ - ("aggregate", lambda: list(collection.aggregate([]))), - ("find", lambda: list(collection.find())), + ("aggregate", aggregate_op), + ("find", lambda: collection.find().to_list()), ("insert_one", lambda: collection.insert_one({})), ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), @@ -207,7 +216,6 @@ def test_error_includes_errInfo(self): def test_write_error_details_exposes_errinfo(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) db = client.errinfotest self.addCleanup(client.drop_database, "errinfotest") validator = {"x": {"$type": "string"}} @@ -286,7 +294,7 @@ def run_test(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(TEST_PATH): dirname = os.path.split(dirpath)[-1] if dirname == "operation": @@ -321,7 +329,7 @@ def create_tests(): # PyMongo does not support MapReduce. globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "operation"), + os.path.join(TEST_PATH, "operation"), module=__name__, expected_failures=["MapReduce .*"], ) diff --git a/tools/synchro.py b/tools/synchro.py index eb44ef4ac0..ba0a4712e3 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -220,6 +220,7 @@ def async_only_test(f: str) -> bool: "test_on_demand_csfle.py", "test_raw_bson.py", "test_read_concern.py", + "test_read_write_concern_spec.py", "test_retryable_reads.py", "test_retryable_reads_unified.py", "test_retryable_writes.py", From 665eb9a4b83029b43266e0de45339a2ae8764dee Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 3 Feb 2025 14:37:37 -0500 Subject: [PATCH 1714/2111] PYTHON-5105 - Convert test.test_srv_polling to async (#2124) --- test/asynchronous/test_srv_polling.py | 361 ++++++++++++++++++++++++++ test/test_srv_polling.py | 23 +- tools/synchro.py | 1 + 3 files changed, 378 insertions(+), 7 deletions(-) create mode 100644 test/asynchronous/test_srv_polling.py diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py new file mode 100644 index 0000000000..763c80e665 --- /dev/null +++ b/test/asynchronous/test_srv_polling.py @@ -0,0 +1,361 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import asyncio +import sys +import time +from typing import Any + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, client_knobs, unittest +from test.utils import FunctionCallRecorder, async_wait_until + +import pymongo +from pymongo import common +from pymongo.errors import ConfigurationError +from pymongo.srv_resolver import _have_dnspython + +_IS_SYNC = False + +WAIT_TIME = 0.1 + + +class SrvPollingKnobs: + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): + self.ttl_time = ttl_time + self.min_srv_rescan_interval = min_srv_rescan_interval + self.nodelist_callback = nodelist_callback + self.count_resolver_calls = count_resolver_calls + + self.old_min_srv_rescan_interval = None + self.old_dns_resolver_response = None + + def enable(self): + self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL + self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + + if self.min_srv_rescan_interval is not None: + common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval + + def mock_get_hosts_and_min_ttl(resolver, *args): + assert self.old_dns_resolver_response is not None + nodes, ttl = self.old_dns_resolver_response(resolver) + if self.nodelist_callback is not None: + nodes = self.nodelist_callback() + if self.ttl_time is not None: + ttl = self.ttl_time + return nodes, ttl + + patch_func: Any + if self.count_resolver_calls: + patch_func = FunctionCallRecorder(mock_get_hosts_and_min_ttl) + else: + patch_func = mock_get_hosts_and_min_ttl + + pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + + def __enter__(self): + self.enable() + + def disable(self): + common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore + pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response + ) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + +class TestSrvPolling(AsyncPyMongoTestCase): + BASE_SRV_RESPONSE = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27018), + ] + + CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" + + async def asyncSetUp(self): + # Patch timeouts to ensure short rescan SRV interval. + self.client_knobs = client_knobs( + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) + self.client_knobs.enable() + + async def asyncTearDown(self): + self.client_knobs.disable() + + def get_nodelist(self, client): + return client._topology.description.server_descriptions().keys() + + async def assert_nodelist_change(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology eventually sees all nodes in the + expected_nodelist. + """ + + def predicate(): + nodelist = self.get_nodelist(client) + if set(expected_nodelist) == set(nodelist): + return True + return False + + await async_wait_until(predicate, "see expected nodelist", timeout=timeout) + + async def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology ever deviates from seeing all nodes + in the expected_nodelist. Consistency is checked after sleeping for + (WAIT_TIME * 10) seconds. Also check that the resolver is called at + least once. + """ + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return False + + await async_wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) + + nodelist = self.get_nodelist(client) + if set(expected_nodelist) != set(nodelist): + msg = "Client nodelist %s changed unexpectedly (expected %s)" + raise self.fail(msg % (nodelist, expected_nodelist)) + self.assertGreaterEqual( + pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + 1, + "resolver was never called", + ) + return True + + async def run_scenario(self, dns_response, expect_change): + self.assertEqual(_have_dnspython(), True) + if callable(dns_response): + dns_resolver_response = dns_response + else: + + def dns_resolver_response(): + return dns_response + + if expect_change: + assertion_method = self.assert_nodelist_change + count_resolver_calls = False + expected_response = dns_response + else: + assertion_method = self.assert_nodelist_nochange + count_resolver_calls = True + expected_response = self.BASE_SRV_RESPONSE + + # Patch timeouts to ensure short test running times. + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + await self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) + # Patch list of hosts returned by DNS query. + with SrvPollingKnobs( + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): + await assertion_method(expected_response, client) + + async def test_addition(self): + response = self.BASE_SRV_RESPONSE[:] + response.append(("localhost.test.build.10gen.cc", 27019)) + await self.run_scenario(response, True) + + async def test_removal(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + await self.run_scenario(response, True) + + async def test_replace_one(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) + await self.run_scenario(response, True) + + async def test_replace_both_with_one(self): + response = [("localhost.test.build.10gen.cc", 27019)] + await self.run_scenario(response, True) + + async def test_replace_both_with_two(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + await self.run_scenario(response, True) + + async def test_dns_failures(self): + from dns import exception + + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + + def response_callback(*args): + raise exc("DNS Failure!") + + await self.run_scenario(response_callback, False) + + async def test_dns_record_lookup_empty(self): + response: list = [] + await self.run_scenario(response, False) + + async def _test_recover_from_initial(self, initial_callback): + # Construct a valid final response callback distinct from base. + response_final = self.BASE_SRV_RESPONSE[:] + response_final.pop() + + def final_callback(): + return response_final + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): + # Client uses unpatched method to get initial nodelist + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + # Invalid DNS resolver response should not change nodelist. + await self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): + # Nodelist should reflect new valid DNS resolver response. + await self.assert_nodelist_change(response_final, client) + + async def test_recover_from_initially_empty_seedlist(self): + def empty_seedlist(): + return [] + + await self._test_recover_from_initial(empty_seedlist) + + async def test_recover_from_initially_erroring_seedlist(self): + def erroring_seedlist(): + raise ConfigurationError + + await self._test_recover_from_initial(erroring_seedlist) + + async def test_10_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_11_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_12_new_dns_randomly_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await asyncio.sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) + self.assertEqual(len(final_topology), 2) + + async def test_does_not_flipflop(self): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) + await client.aconnect() + old = set(client.topology_description.server_descriptions()) + await asyncio.sleep(4 * WAIT_TIME) + new = set(client.topology_description.server_descriptions()) + self.assertSetEqual(old, new) + + async def test_srv_service_name(self): + # Construct a valid final response callback distinct from base. + response = [ + ("localhost.test.build.10gen.cc.", 27019), + ("localhost.test.build.10gen.cc.", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client( + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" + ) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_srv_waits_to_poll(self): + modified = [("localhost.test.build.10gen.cc", 27019)] + + def resolver_response(): + return modified + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=resolver_response, + ): + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + with self.assertRaises(AssertionError): + await self.assert_nodelist_change(modified, client, timeout=WAIT_TIME / 2) + + def test_import_dns_resolver(self): + # Regression test for PYTHON-4407 + import dns.resolver + + self.assertTrue(hasattr(dns.resolver, "resolve")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index e01552bf7d..86fad6d90e 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -15,8 +15,9 @@ """Run the SRV support tests.""" from __future__ import annotations +import asyncio import sys -from time import sleep +import time from typing import Any sys.path[0:0] = [""] @@ -28,7 +29,8 @@ from pymongo import common from pymongo.errors import ConfigurationError from pymongo.srv_resolver import _have_dnspython -from pymongo.synchronous.mongo_client import MongoClient + +_IS_SYNC = True WAIT_TIME = 0.1 @@ -168,6 +170,7 @@ def dns_resolver_response(): # Patch timeouts to ensure short test running times. with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = self.simple_client(self.CONNECTION_STRING) + client._connect() self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( @@ -232,6 +235,7 @@ def final_callback(): ): # Client uses unpatched method to get initial nodelist client = self.simple_client(self.CONNECTION_STRING) + client._connect() # Invalid DNS resolver response should not change nodelist. self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) @@ -265,6 +269,7 @@ def nodelist_callback(): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -279,6 +284,7 @@ def nodelist_callback(): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -294,8 +300,9 @@ def nodelist_callback(): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): - sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + time.sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) final_topology = set(client.topology_description.server_descriptions()) self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) self.assertEqual(len(final_topology), 2) @@ -303,8 +310,9 @@ def nodelist_callback(): def test_does_not_flipflop(self): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) + client._connect() old = set(client.topology_description.server_descriptions()) - sleep(4 * WAIT_TIME) + time.sleep(4 * WAIT_TIME) new = set(client.topology_description.server_descriptions()) self.assertSetEqual(old, new) @@ -322,6 +330,7 @@ def nodelist_callback(): client = self.simple_client( "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" ) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -337,9 +346,9 @@ def resolver_response(): nodelist_callback=resolver_response, ): client = self.simple_client(self.CONNECTION_STRING) - self.assertRaises( - AssertionError, self.assert_nodelist_change, modified, client, timeout=WAIT_TIME / 2 - ) + client._connect() + with self.assertRaises(AssertionError): + self.assert_nodelist_change(modified, client, timeout=WAIT_TIME / 2) def test_import_dns_resolver(self): # Regression test for PYTHON-4407 diff --git a/tools/synchro.py b/tools/synchro.py index ba0a4712e3..6317cb84fb 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -226,6 +226,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes.py", "test_retryable_writes_unified.py", "test_session.py", + "test_srv_polling.py", "test_transactions.py", "unified_format.py", ] From 1fda6a2310511a1e39e0476418f6477495441101 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 3 Feb 2025 15:48:04 -0500 Subject: [PATCH 1715/2111] PYTHON-5110 - Convert test.test_unified_format to async (#2130) --- test/asynchronous/test_unified_format.py | 99 ++++++++++++++++++++++++ test/asynchronous/unified_format.py | 2 +- test/test_unified_format.py | 17 ++-- test/unified_format.py | 2 +- tools/synchro.py | 1 + 5 files changed, 114 insertions(+), 7 deletions(-) create mode 100644 test/asynchronous/test_unified_format.py diff --git a/test/asynchronous/test_unified_format.py b/test/asynchronous/test_unified_format.py new file mode 100644 index 0000000000..a005739e95 --- /dev/null +++ b/test/asynchronous/test_unified_format.py @@ -0,0 +1,99 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from typing import Any + +sys.path[0:0] = [""] + +from test import UnitTest, unittest +from test.asynchronous.unified_format import MatchEvaluatorUtil, generate_test_classes + +from bson import ObjectId + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "unified-test-format") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "unified-test-format") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + RUN_ON_SERVERLESS=False, + ) +) + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + RUN_ON_SERVERLESS=False, + ) +) + + +class TestMatchEvaluatorUtil(UnitTest): + def setUp(self): + self.match_evaluator = MatchEvaluatorUtil(self) + + def test_unsetOrMatches(self): + spec: dict[str, Any] = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: + self.match_evaluator.match_result(spec, actual) + + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: + self.match_evaluator.match_result(spec, actual) + + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + + def test_type(self): + self.match_evaluator.match_result( + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 52d964eb3e..37248e9ad8 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -711,7 +711,7 @@ async def _databaseOperation_runCommand(self, target, **kwargs): return await target.command(**kwargs) async def _databaseOperation_runCursorCommand(self, target, **kwargs): - return list(await self._databaseOperation_createCommandCursor(target, **kwargs)) + return await (await self._databaseOperation_createCommandCursor(target, **kwargs)).to_list() async def _databaseOperation_createCommandCursor(self, target, **kwargs): self.__raise_if_unsupported("createCommandCursor", target, AsyncDatabase) diff --git a/test/test_unified_format.py b/test/test_unified_format.py index 1b3a134237..05f58d5d06 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -15,21 +15,28 @@ import os import sys +from pathlib import Path from typing import Any sys.path[0:0] = [""] -from test import unittest +from test import UnitTest, unittest from test.unified_format import MatchEvaluatorUtil, generate_test_classes from bson import ObjectId -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unified-test-format") +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "unified-test-format") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "unified-test-format") globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "valid-pass"), + os.path.join(TEST_PATH, "valid-pass"), module=__name__, class_name_prefix="UnifiedTestFormat", expected_failures=[ @@ -42,7 +49,7 @@ globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "valid-fail"), + os.path.join(TEST_PATH, "valid-fail"), module=__name__, class_name_prefix="UnifiedTestFormat", bypass_test_generation_errors=True, @@ -54,7 +61,7 @@ ) -class TestMatchEvaluatorUtil(unittest.TestCase): +class TestMatchEvaluatorUtil(UnitTest): def setUp(self): self.match_evaluator = MatchEvaluatorUtil(self) diff --git a/test/unified_format.py b/test/unified_format.py index 372eb8abba..e66b57f9db 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -708,7 +708,7 @@ def _databaseOperation_runCommand(self, target, **kwargs): return target.command(**kwargs) def _databaseOperation_runCursorCommand(self, target, **kwargs): - return list(self._databaseOperation_createCommandCursor(target, **kwargs)) + return (self._databaseOperation_createCommandCursor(target, **kwargs)).to_list() def _databaseOperation_createCommandCursor(self, target, **kwargs): self.__raise_if_unsupported("createCommandCursor", target, Database) diff --git a/tools/synchro.py b/tools/synchro.py index 6317cb84fb..f83c5d4caf 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -228,6 +228,7 @@ def async_only_test(f: str) -> bool: "test_session.py", "test_srv_polling.py", "test_transactions.py", + "test_unified_format.py", "unified_format.py", ] From b47143cd1047f388d2a76df09f52b19a728ac4ab Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 4 Feb 2025 07:42:13 -0500 Subject: [PATCH 1716/2111] PYTHON-4864 - Create async version of SpecRunnerThread (#2094) --- test/asynchronous/helpers.py | 37 ++++++++++++++++++++++++++ test/asynchronous/unified_format.py | 16 +++++------ test/asynchronous/utils_spec_runner.py | 29 ++++++++++---------- test/helpers.py | 37 ++++++++++++++++++++++++++ test/unified_format.py | 2 +- test/utils_spec_runner.py | 13 +++++---- tools/synchro.py | 1 + 7 files changed, 104 insertions(+), 31 deletions(-) diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index b5fc5d8ac4..7758f281e1 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -15,6 +15,7 @@ """Shared constants and helper methods for pymongo, bson, and gridfs test suites.""" from __future__ import annotations +import asyncio import base64 import gc import multiprocessing @@ -30,6 +31,8 @@ import warnings from asyncio import iscoroutinefunction +from pymongo._asyncio_task import create_task + try: import ipaddress @@ -369,3 +372,37 @@ def disable(self): os.environ.pop("SSL_CERT_FILE") else: os.environ["SSL_CERT_FILE"] = self.original_certs + + +if _IS_SYNC: + PARENT = threading.Thread +else: + PARENT = object + + +class ConcurrentRunner(PARENT): + def __init__(self, name, *args, **kwargs): + if _IS_SYNC: + super().__init__(*args, **kwargs) + self.name = name + self.stopped = False + self.task = None + if "target" in kwargs: + self.target = kwargs["target"] + + if not _IS_SYNC: + + async def start(self): + self.task = create_task(self.run(), name=self.name) + + async def join(self, timeout: float | None = 0): # type: ignore[override] + if self.task is not None: + await asyncio.wait([self.task], timeout=timeout) + + def is_alive(self): + return not self.stopped + + async def run(self): + if self.target: + await self.target() + self.stopped = True diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 37248e9ad8..149aad9786 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -35,6 +35,7 @@ client_knobs, unittest, ) +from test.asynchronous.utils_spec_runner import SpecRunnerTask from test.unified_format_shared import ( KMS_TLS_OPTS, PLACEHOLDER_MAP, @@ -58,7 +59,6 @@ snake_to_camel, wait_until, ) -from test.utils_spec_runner import SpecRunnerThread from test.version import Version from typing import Any, Dict, List, Mapping, Optional @@ -382,8 +382,8 @@ async def drop(self: AsyncGridFSBucket, *args: Any, **kwargs: Any) -> None: return elif entity_type == "thread": name = spec["id"] - thread = SpecRunnerThread(name) - thread.start() + thread = SpecRunnerTask(name) + await thread.start() self[name] = thread return @@ -1177,16 +1177,16 @@ def primary_changed() -> bool: wait_until(primary_changed, "change primary", timeout=timeout) - def _testOperation_runOnThread(self, spec): + async def _testOperation_runOnThread(self, spec): """Run the 'runOnThread' operation.""" thread = self.entity_map[spec["thread"]] - thread.schedule(lambda: self.run_entity_operation(spec["operation"])) + await thread.schedule(functools.partial(self.run_entity_operation, spec["operation"])) - def _testOperation_waitForThread(self, spec): + async def _testOperation_waitForThread(self, spec): """Run the 'waitForThread' operation.""" thread = self.entity_map[spec["thread"]] - thread.stop() - thread.join(10) + await thread.stop() + await thread.join(10) if thread.exc: raise thread.exc self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index b79e5258b5..d103374313 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -18,11 +18,11 @@ import asyncio import functools import os -import threading import unittest from asyncio import iscoroutinefunction from collections import abc from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs +from test.asynchronous.helpers import ConcurrentRunner from test.utils import ( CMAPListener, CompareType, @@ -47,6 +47,7 @@ from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError +from pymongo.lock import _async_cond_wait, _async_create_condition, _async_create_lock from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult, _WriteResult @@ -55,38 +56,36 @@ _IS_SYNC = False -class SpecRunnerThread(threading.Thread): +class SpecRunnerTask(ConcurrentRunner): def __init__(self, name): - super().__init__() - self.name = name + super().__init__(name) self.exc = None self.daemon = True - self.cond = threading.Condition() + self.cond = _async_create_condition(_async_create_lock()) self.ops = [] - self.stopped = False - def schedule(self, work): + async def schedule(self, work): self.ops.append(work) - with self.cond: + async with self.cond: self.cond.notify() - def stop(self): + async def stop(self): self.stopped = True - with self.cond: + async with self.cond: self.cond.notify() - def run(self): + async def run(self): while not self.stopped or self.ops: if not self.ops: - with self.cond: - self.cond.wait(10) + async with self.cond: + await _async_cond_wait(self.cond, 10) if self.ops: try: work = self.ops.pop(0) - work() + await work() except Exception as exc: self.exc = exc - self.stop() + await self.stop() class AsyncSpecTestCreator: diff --git a/test/helpers.py b/test/helpers.py index 11d5ab0374..bd9e23bba4 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -15,6 +15,7 @@ """Shared constants and helper methods for pymongo, bson, and gridfs test suites.""" from __future__ import annotations +import asyncio import base64 import gc import multiprocessing @@ -30,6 +31,8 @@ import warnings from asyncio import iscoroutinefunction +from pymongo._asyncio_task import create_task + try: import ipaddress @@ -369,3 +372,37 @@ def disable(self): os.environ.pop("SSL_CERT_FILE") else: os.environ["SSL_CERT_FILE"] = self.original_certs + + +if _IS_SYNC: + PARENT = threading.Thread +else: + PARENT = object + + +class ConcurrentRunner(PARENT): + def __init__(self, name, *args, **kwargs): + if _IS_SYNC: + super().__init__(*args, **kwargs) + self.name = name + self.stopped = False + self.task = None + if "target" in kwargs: + self.target = kwargs["target"] + + if not _IS_SYNC: + + def start(self): + self.task = create_task(self.run(), name=self.name) + + def join(self, timeout: float | None = 0): # type: ignore[override] + if self.task is not None: + asyncio.wait([self.task], timeout=timeout) + + def is_alive(self): + return not self.stopped + + def run(self): + if self.target: + self.target() + self.stopped = True diff --git a/test/unified_format.py b/test/unified_format.py index e66b57f9db..b2e6ae1e83 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1167,7 +1167,7 @@ def primary_changed() -> bool: def _testOperation_runOnThread(self, spec): """Run the 'runOnThread' operation.""" thread = self.entity_map[spec["thread"]] - thread.schedule(lambda: self.run_entity_operation(spec["operation"])) + thread.schedule(functools.partial(self.run_entity_operation, spec["operation"])) def _testOperation_waitForThread(self, spec): """Run the 'waitForThread' operation.""" diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 4508502cd0..6a62112afb 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -18,11 +18,11 @@ import asyncio import functools import os -import threading import unittest from asyncio import iscoroutinefunction from collections import abc from test import IntegrationTest, client_context, client_knobs +from test.helpers import ConcurrentRunner from test.utils import ( CMAPListener, CompareType, @@ -44,6 +44,7 @@ from gridfs import GridFSBucket from gridfs.synchronous.grid_file import GridFSBucket from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError +from pymongo.lock import _cond_wait, _create_condition, _create_lock from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult, _WriteResult @@ -55,15 +56,13 @@ _IS_SYNC = True -class SpecRunnerThread(threading.Thread): +class SpecRunnerThread(ConcurrentRunner): def __init__(self, name): - super().__init__() - self.name = name + super().__init__(name) self.exc = None self.daemon = True - self.cond = threading.Condition() + self.cond = _create_condition(_create_lock()) self.ops = [] - self.stopped = False def schedule(self, work): self.ops.append(work) @@ -79,7 +78,7 @@ def run(self): while not self.stopped or self.ops: if not self.ops: with self.cond: - self.cond.wait(10) + _cond_wait(self.cond, 10) if self.ops: try: work = self.ops.pop(0) diff --git a/tools/synchro.py b/tools/synchro.py index f83c5d4caf..8c03a346e8 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -119,6 +119,7 @@ "_async_create_lock": "_create_lock", "_async_create_condition": "_create_condition", "_async_cond_wait": "_cond_wait", + "SpecRunnerTask": "SpecRunnerThread", "AsyncMockConnection": "MockConnection", "AsyncMockPool": "MockPool", } From 68237f78ecda3399518c3700b3593b028eb9eeef Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 4 Feb 2025 09:42:21 -0500 Subject: [PATCH 1717/2111] PYTHON-5098 - Convert test.test_run_command to async (#2114) --- test/asynchronous/test_run_command.py | 41 +++++++++++++++++++++++++++ test/test_run_command.py | 26 +++++++++++++++-- tools/synchro.py | 1 + 3 files changed, 66 insertions(+), 2 deletions(-) create mode 100644 test/asynchronous/test_run_command.py diff --git a/test/asynchronous/test_run_command.py b/test/asynchronous/test_run_command.py new file mode 100644 index 0000000000..3ac8c32706 --- /dev/null +++ b/test/asynchronous/test_run_command.py @@ -0,0 +1,41 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Command unified tests.""" +from __future__ import annotations + +import os +import unittest +from pathlib import Path +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "run_command") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "run_command") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_run_command.py b/test/test_run_command.py index 486a4c7e39..d2ef43b97e 100644 --- a/test/test_run_command.py +++ b/test/test_run_command.py @@ -1,15 +1,37 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Command unified tests.""" from __future__ import annotations import os import unittest +from pathlib import Path from test.unified_format import generate_test_classes -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "run_command") +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "run_command") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "run_command") globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "unified"), + os.path.join(TEST_PATH, "unified"), module=__name__, ) ) diff --git a/tools/synchro.py b/tools/synchro.py index 8c03a346e8..4ac5604f21 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -226,6 +226,7 @@ def async_only_test(f: str) -> bool: "test_retryable_reads_unified.py", "test_retryable_writes.py", "test_retryable_writes_unified.py", + "test_run_command.py", "test_session.py", "test_srv_polling.py", "test_transactions.py", From 554e1fddb8ac83d19237975cdfa7682c6b0f491c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 4 Feb 2025 09:43:35 -0500 Subject: [PATCH 1718/2111] PYTHON-5106 - Convert test.test_ssl to async (#2125) --- test/asynchronous/test_ssl.py | 662 ++++++++++++++++++++++++++++++++++ test/test_ssl.py | 30 +- tools/synchro.py | 1 + 3 files changed, 677 insertions(+), 16 deletions(-) create mode 100644 test/asynchronous/test_ssl.py diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py new file mode 100644 index 0000000000..d50bb220b1 --- /dev/null +++ b/test/asynchronous/test_ssl.py @@ -0,0 +1,662 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for SSL support.""" +from __future__ import annotations + +import os +import pathlib +import socket +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import ( + HAVE_IPADDRESS, + AsyncIntegrationTest, + AsyncPyMongoTestCase, + SkipTest, + async_client_context, + connected, + remove_all_users, + unittest, +) +from test.utils import ( + EventListener, + OvertCommandListener, + cat_files, + ignore_deprecations, +) +from urllib.parse import quote_plus + +from pymongo import AsyncMongoClient, ssl_support +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context +from pymongo.write_concern import WriteConcern + +_HAVE_PYOPENSSL = False +try: + # All of these must be available to use PyOpenSSL + import OpenSSL + import requests + import service_identity + + # Ensure service_identity>=18.1 is installed + from service_identity.pyopenssl import verify_ip_address + + from pymongo.ocsp_support import _load_trusted_ca_certs + + _HAVE_PYOPENSSL = True +except ImportError: + _load_trusted_ca_certs = None # type: ignore + + +if HAVE_SSL: + import ssl + +_IS_SYNC = False + +if _IS_SYNC: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "certificates") +else: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "certificates") + +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" + +# To fully test this start a mongod instance (built with SSL support) like so: +# mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ +# --sslPEMKeyFile /path/to/pymongo/test/certificates/server.pem \ +# --sslCAFile /path/to/pymongo/test/certificates/ca.pem \ +# --sslWeakCertificateValidation +# Also, make sure you have 'server' as an alias for localhost in /etc/hosts +# +# Note: For all replica set tests to pass, the replica set configuration must +# use 'localhost' for the hostname of all hosts. + + +class TestClientSSL(AsyncPyMongoTestCase): + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") + def test_no_ssl_module(self): + # Explicit + self.assertRaises(ConfigurationError, self.simple_client, ssl=True) + + # Implied + self.assertRaises(ConfigurationError, self.simple_client, tlsCertificateKeyFile=CLIENT_PEM) + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + @ignore_deprecations + def test_config_ssl(self): + # Tests various ssl configurations + self.assertRaises(ValueError, self.simple_client, ssl="foo") + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(TypeError, self.simple_client, ssl=0) + self.assertRaises(TypeError, self.simple_client, ssl=5.5) + self.assertRaises(TypeError, self.simple_client, ssl=[]) + + self.assertRaises(IOError, self.simple_client, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=[]) + + # Test invalid combinations + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidCertificates=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsDisableOCSPEndpointCheck=False + ) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + def test_use_pyopenssl_when_available(self): + self.assertTrue(_ssl.IS_PYOPENSSL) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") + def test_load_trusted_ca_certs(self): + trusted_ca_certs = _load_trusted_ca_certs(CA_BUNDLE_PEM) + self.assertEqual(2, len(trusted_ca_certs)) + + +class TestSSL(AsyncIntegrationTest): + saved_port: int + + async def assertClientWorks(self, client): + coll = client.pymongo_test.ssl_test.with_options( + write_concern=WriteConcern(w=async_client_context.w) + ) + await coll.drop() + await coll.insert_one({"ssl": True}) + self.assertTrue((await coll.find_one())["ssl"]) + await coll.drop() + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + async def asyncSetUp(self): + await super().asyncSetUp() + # MongoClient should connect to the primary by default. + self.saved_port = AsyncMongoClient.PORT + AsyncMongoClient.PORT = await async_client_context.port + + async def asyncTearDown(self): + AsyncMongoClient.PORT = self.saved_port + + @async_client_context.require_tls + async def test_simple_ssl(self): + # Expects the server to be running with ssl and with + # no --sslPEMKeyFile or with --sslWeakCertificateValidation + await self.assertClientWorks(self.client) + + @async_client_context.require_tlsCertificateKeyFile + @ignore_deprecations + async def test_tlsCertificateKeyFilePassword(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + await connected( + self.simple_client(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_implicitly_set(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + + # test that setting tlsCertificateKeyFile causes ssl to be set to True + client = self.simple_client( + await async_client_context.host, + await async_client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + response = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + client = self.simple_client( + await async_client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_validation(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + client = self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + response = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = self.simple_client( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + + await self.assertClientWorks(client) + + if HAVE_IPADDRESS: + client = self.simple_client( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_uri_support(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_server_resolvable + @ignore_deprecations + async def test_cert_ssl_validation_hostname_matching(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + ctx = get_ssl_context(None, None, None, None, True, True, False) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, True, False, False) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, True, False) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, False, False) + self.assertTrue(ctx.check_hostname) + + response = await self.client.admin.command(HelloCompat.LEGACY_CMD) + + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + await connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + if "setName" in response: + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + await connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + @async_client_context.require_tlsCertificateKeyFile + @ignore_deprecations + async def test_tlsCRLFile_support(self): + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + await connected(self.simple_client(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_server_resolvable + @ignore_deprecations + async def test_validation_with_system_ca_certs(self): + # Expects the server to be running with server.pem and ca.pem. + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # --sslWeakCertificateValidation + # + self.patch_system_certs(CA_PEM) + with self.assertRaises(ConnectionFailure): + # Server cert is verified but hostname matching fails + await connected( + self.simple_client( + "server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert is verified. Disable hostname matching. + await connected( + self.simple_client( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + # Server cert and hostname are verified. + await connected( + self.simple_client( + "localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert and hostname are verified. + await connected( + self.simple_client( + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", + **self.credentials, # type: ignore[arg-type] + ) + ) + + def test_system_certs_config_error(self): + ctx = get_ssl_context(None, None, None, None, True, True, False) + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") + + have_certifi = ssl_support.HAVE_CERTIFI + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test regardless of environment. + ssl_support.HAVE_CERTIFI = False + ssl_support.HAVE_WINCERTSTORE = False + try: + with self.assertRaises(ConfigurationError): + self.simple_client("mongodb://localhost/?ssl=true") + finally: + ssl_support.HAVE_CERTIFI = have_certifi + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_certifi_support(self): + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_CERTIFI: + raise SkipTest("Need certifi to test certifi support.") + + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test on Windows, regardless of environment. + ssl_support.HAVE_WINCERTSTORE = False + try: + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) + finally: + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_wincertstore(self): + if sys.platform != "win32": + raise SkipTest("Only valid on Windows.") + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_WINCERTSTORE: + raise SkipTest("Need wincertstore to test wincertstore.") + + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) + + @async_client_context.require_auth + @async_client_context.require_tlsCertificateKeyFile + @ignore_deprecations + async def test_mongodb_x509_auth(self): + host, port = await async_client_context.host, await async_client_context.port + self.addAsyncCleanup(remove_all_users, async_client_context.client["$external"]) + + # Give x509 user all necessary privileges. + await async_client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) + + noauth = self.simple_client( + await async_client_context.pair, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + await noauth.pymongo_test.test.find_one() + + listener = EventListener() + auth = self.simple_client( + await async_client_context.pair, + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + event_listeners=[listener], + ) + + # No error + await auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if async_client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ["find"]) + else: + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + await client.pymongo_test.test.find_one() + + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + await client.pymongo_test.test.find_one() + # Auth should fail if username and certificate do not match + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) + + bad_client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + bad_client = self.simple_client( + await async_client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + # Invalid certificate (using CA certificate as client certificate) + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + try: + await connected( + self.simple_client( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + ) + except (ConnectionFailure, ConfigurationError): + pass + else: + self.fail("Invalid certificate accepted.") + + @async_client_context.require_tlsCertificateKeyFile + @ignore_deprecations + async def test_connect_with_ca_bundle(self): + def remove(path): + try: + os.remove(path) + except OSError: + pass + + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") + self.addCleanup(remove, temp_ca_bundle) + # Add the CA cert file to the bundle. + cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) + async with self.simple_client( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(await client.admin.command("ping")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index 04db9b61a4..7d6c3f7cd1 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -16,6 +16,7 @@ from __future__ import annotations import os +import pathlib import socket import sys @@ -65,7 +66,13 @@ if HAVE_SSL: import ssl -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +_IS_SYNC = True + +if _IS_SYNC: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "certificates") +else: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "certificates") + CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") CA_PEM = os.path.join(CERT_PATH, "ca.pem") @@ -144,21 +151,18 @@ def assertClientWorks(self, client): ) coll.drop() coll.insert_one({"ssl": True}) - self.assertTrue(coll.find_one()["ssl"]) + self.assertTrue((coll.find_one())["ssl"]) coll.drop() - @classmethod @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") - def setUpClass(cls): - super().setUpClass() + def setUp(self): + super().setUp() # MongoClient should connect to the primary by default. - cls.saved_port = MongoClient.PORT + self.saved_port = MongoClient.PORT MongoClient.PORT = client_context.port - @classmethod - def tearDownClass(cls): - MongoClient.PORT = cls.saved_port - super().tearDownClass() + def tearDown(self): + MongoClient.PORT = self.saved_port @client_context.require_tls def test_simple_ssl(self): @@ -548,7 +552,6 @@ def test_mongodb_x509_auth(self): tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, ) - self.addCleanup(noauth.close) with self.assertRaises(OperationFailure): noauth.pymongo_test.test.find_one() @@ -562,7 +565,6 @@ def test_mongodb_x509_auth(self): tlsCertificateKeyFile=CLIENT_PEM, event_listeners=[listener], ) - self.addCleanup(auth.close) # No error auth.pymongo_test.test.find_one() @@ -581,7 +583,6 @@ def test_mongodb_x509_auth(self): client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) - self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() @@ -589,7 +590,6 @@ def test_mongodb_x509_auth(self): client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) - self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match @@ -602,7 +602,6 @@ def test_mongodb_x509_auth(self): bad_client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) - self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() @@ -615,7 +614,6 @@ def test_mongodb_x509_auth(self): tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, ) - self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() diff --git a/tools/synchro.py b/tools/synchro.py index 4ac5604f21..9f59448bb7 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -229,6 +229,7 @@ def async_only_test(f: str) -> bool: "test_run_command.py", "test_session.py", "test_srv_polling.py", + "test_ssl.py", "test_transactions.py", "test_unified_format.py", "unified_format.py", From 097a853805f986bf02e75b5a92e5b12d470d570d Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 4 Feb 2025 09:52:26 -0800 Subject: [PATCH 1719/2111] PYTHON 5104 - Convert test.test_sessions_unified to async (#2123) Co-authored-by: Noah Stapp --- test/asynchronous/test_sessions_unified.py | 40 ++++++++++++++++++++++ test/test_sessions_unified.py | 9 ++++- tools/synchro.py | 1 + 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_sessions_unified.py diff --git a/test/asynchronous/test_sessions_unified.py b/test/asynchronous/test_sessions_unified.py new file mode 100644 index 0000000000..b4cbac5704 --- /dev/null +++ b/test/asynchronous/test_sessions_unified.py @@ -0,0 +1,40 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Sessions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sessions") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sessions") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py index c51b4642e7..3c80c70d38 100644 --- a/test/test_sessions_unified.py +++ b/test/test_sessions_unified.py @@ -17,14 +17,21 @@ import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sessions") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sessions") + # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/tools/synchro.py b/tools/synchro.py index 9f59448bb7..7b5892f276 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -228,6 +228,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes_unified.py", "test_run_command.py", "test_session.py", + "test_sessions_unified.py", "test_srv_polling.py", "test_ssl.py", "test_transactions.py", From 2c492155a6b2284746e53ededf65ac30800b5536 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 4 Feb 2025 10:30:35 -0800 Subject: [PATCH 1720/2111] PYTHON-5103 Convert test.test_server_selection_rtt to async (#2122) Co-authored-by: Noah Stapp --- .../asynchronous/test_server_selection_rtt.py | 77 +++++++++++++++++++ test/test_server_selection_rtt.py | 14 +++- tools/synchro.py | 1 + 3 files changed, 88 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_server_selection_rtt.py diff --git a/test/asynchronous/test_server_selection_rtt.py b/test/asynchronous/test_server_selection_rtt.py new file mode 100644 index 0000000000..1f8f6bc7df --- /dev/null +++ b/test/asynchronous/test_server_selection_rtt.py @@ -0,0 +1,77 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import json +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import AsyncPyMongoTestCase + +from pymongo.read_preferences import MovingAverage + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection/rtt") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection/rtt") + + +class TestAllScenarios(AsyncPyMongoTestCase): + pass + + +def create_test(scenario_def): + def run_scenario(self): + moving_average = MovingAverage() + + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) + + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) + + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index a129af4585..2aef36a585 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -18,18 +18,24 @@ import json import os import sys +from pathlib import Path sys.path[0:0] = [""] -from test import unittest +from test import PyMongoTestCase, unittest from pymongo.read_preferences import MovingAverage +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection/rtt") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection/rtt") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection/rtt") -class TestAllScenarios(unittest.TestCase): +class TestAllScenarios(PyMongoTestCase): pass @@ -49,7 +55,7 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(TEST_PATH): dirname = os.path.split(dirpath)[-1] for filename in filenames: diff --git a/tools/synchro.py b/tools/synchro.py index 7b5892f276..f9a4c5208a 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -228,6 +228,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes_unified.py", "test_run_command.py", "test_session.py", + "test_server_selection_rtt.py", "test_sessions_unified.py", "test_srv_polling.py", "test_ssl.py", From 8ae9a0432a867e8d0d9dbb81d15830323cc3c7ae Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 4 Feb 2025 10:31:11 -0800 Subject: [PATCH 1721/2111] PYTHON-5102 Convert test.test_server_selection_logging to async (#2121) Co-authored-by: Noah Stapp --- .../test_server_selection_logging.py | 45 +++++++++++++++++++ test/test_server_selection_logging.py | 10 ++++- tools/synchro.py | 1 + 3 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 test/asynchronous/test_server_selection_logging.py diff --git a/test/asynchronous/test_server_selection_logging.py b/test/asynchronous/test_server_selection_logging.py new file mode 100644 index 0000000000..6b0975318a --- /dev/null +++ b/test/asynchronous/test_server_selection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the server selection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection_logging") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection_logging") + + +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_logging.py b/test/test_server_selection_logging.py index 2df749cb10..d53d8dc84f 100644 --- a/test/test_server_selection_logging.py +++ b/test/test_server_selection_logging.py @@ -17,19 +17,25 @@ import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection_logging") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection_logging") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection_logging") globals().update( generate_test_classes( - _TEST_PATH, + TEST_PATH, module=__name__, ) ) diff --git a/tools/synchro.py b/tools/synchro.py index f9a4c5208a..06dc708e08 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -227,6 +227,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes.py", "test_retryable_writes_unified.py", "test_run_command.py", + "test_server_selection_logging.py", "test_session.py", "test_server_selection_rtt.py", "test_sessions_unified.py", From 7a4150ac17859444eea38dd98682672c0d5935bb Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 5 Feb 2025 08:48:54 -0500 Subject: [PATCH 1722/2111] PYTHON-5080 - Convert test.test_examples to async (#2097) --- test/asynchronous/helpers.py | 17 +- test/asynchronous/test_examples.py | 1461 ++++++++++++++++++++++++ test/asynchronous/utils_spec_runner.py | 2 +- test/helpers.py | 17 +- test/test_examples.py | 163 +-- test/utils_spec_runner.py | 2 +- tools/synchro.py | 1 + 7 files changed, 1580 insertions(+), 83 deletions(-) create mode 100644 test/asynchronous/test_examples.py diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index 7758f281e1..a35c71b107 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -381,14 +381,14 @@ def disable(self): class ConcurrentRunner(PARENT): - def __init__(self, name, *args, **kwargs): + def __init__(self, **kwargs): if _IS_SYNC: - super().__init__(*args, **kwargs) - self.name = name + super().__init__(**kwargs) + self.name = kwargs.get("name", "ConcurrentRunner") self.stopped = False self.task = None - if "target" in kwargs: - self.target = kwargs["target"] + self.target = kwargs.get("target", None) + self.args = kwargs.get("args", []) if not _IS_SYNC: @@ -403,6 +403,7 @@ def is_alive(self): return not self.stopped async def run(self): - if self.target: - await self.target() - self.stopped = True + try: + await self.target(*self.args) + finally: + self.stopped = True diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py new file mode 100644 index 0000000000..7fea9d41af --- /dev/null +++ b/test/asynchronous/test_examples.py @@ -0,0 +1,1461 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MongoDB documentation examples in Python.""" +from __future__ import annotations + +import asyncio +import datetime +import functools +import sys +import threading +import time +from test.asynchronous.helpers import ConcurrentRunner + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import async_wait_until + +import pymongo +from pymongo.asynchronous.helpers import anext +from pymongo.errors import ConnectionFailure, OperationFailure +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestSampleShellCommands(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.inventory.drop() + + async def asyncTearDown(self): + # Run after every test. + await self.db.inventory.drop() + await self.client.drop_database("pymongo_test") + + async def test_first_three_examples(self): + db = self.db + + # Start Example 1 + await db.inventory.insert_one( + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) + # End Example 1 + + self.assertEqual(await db.inventory.count_documents({}), 1) + + # Start Example 2 + cursor = db.inventory.find({"item": "canvas"}) + # End Example 2 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 3 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) + # End Example 3 + + self.assertEqual(await db.inventory.count_documents({}), 4) + + async def test_query_top_level_fields(self): + db = self.db + + # Start Example 6 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 6 + + self.assertEqual(await db.inventory.count_documents({}), 5) + + # Start Example 7 + cursor = db.inventory.find({}) + # End Example 7 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 9 + cursor = db.inventory.find({"status": "D"}) + # End Example 9 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 10 + cursor = db.inventory.find({"status": {"$in": ["A", "D"]}}) + # End Example 10 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 11 + cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}}) + # End Example 11 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 12 + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + # End Example 12 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 13 + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) + # End Example 13 + + self.assertEqual(len(await cursor.to_list()), 2) + + async def test_query_embedded_documents(self): + db = self.db + + # Start Example 14 + # Subdocument key order matters in a few of these examples so we have + # to use bson.son.SON instead of a Python dict. + from bson.son import SON + + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "status": "A", + }, + ] + ) + # End Example 14 + + # Start Example 15 + cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + # End Example 15 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 16 + cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + # End Example 16 + + self.assertEqual(len(await cursor.to_list()), 0) + + # Start Example 17 + cursor = db.inventory.find({"size.uom": "in"}) + # End Example 17 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 18 + cursor = db.inventory.find({"size.h": {"$lt": 15}}) + # End Example 18 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 19 + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + # End Example 19 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_query_arrays(self): + db = self.db + + # Start Example 20 + await db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) + # End Example 20 + + # Start Example 21 + cursor = db.inventory.find({"tags": ["red", "blank"]}) + # End Example 21 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 22 + cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}}) + # End Example 22 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 23 + cursor = db.inventory.find({"tags": "red"}) + # End Example 23 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 24 + cursor = db.inventory.find({"dim_cm": {"$gt": 25}}) + # End Example 24 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 25 + cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}}) + # End Example 25 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 26 + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + # End Example 26 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 27 + cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) + # End Example 27 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 28 + cursor = db.inventory.find({"tags": {"$size": 3}}) + # End Example 28 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_query_array_of_documents(self): + db = self.db + + # Start Example 29 + # Subdocument key order matters in a few of these examples so we have + # to use bson.son.SON instead of a Python dict. + from bson.son import SON + + await db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + SON([("warehouse", "A"), ("qty", 5)]), + SON([("warehouse", "C"), ("qty", 15)]), + ], + }, + {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + { + "item": "paper", + "instock": [ + SON([("warehouse", "A"), ("qty", 60)]), + SON([("warehouse", "B"), ("qty", 15)]), + ], + }, + { + "item": "planner", + "instock": [ + SON([("warehouse", "A"), ("qty", 40)]), + SON([("warehouse", "B"), ("qty", 5)]), + ], + }, + { + "item": "postcard", + "instock": [ + SON([("warehouse", "B"), ("qty", 15)]), + SON([("warehouse", "C"), ("qty", 35)]), + ], + }, + ] + ) + # End Example 29 + + # Start Example 30 + cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) + # End Example 30 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 31 + cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) + # End Example 31 + + self.assertEqual(len(await cursor.to_list()), 0) + + # Start Example 32 + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) + # End Example 32 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 33 + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) + # End Example 33 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 34 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + # End Example 34 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 35 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + # End Example 35 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 36 + cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) + # End Example 36 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 37 + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) + # End Example 37 + + self.assertEqual(len(await cursor.to_list()), 2) + + async def test_query_null(self): + db = self.db + + # Start Example 38 + await db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}]) + # End Example 38 + + # Start Example 39 + cursor = db.inventory.find({"item": None}) + # End Example 39 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 40 + cursor = db.inventory.find({"item": {"$type": 10}}) + # End Example 40 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 41 + cursor = db.inventory.find({"item": {"$exists": False}}) + # End Example 41 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_projection(self): + db = self.db + + # Start Example 42 + await db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) + # End Example 42 + + # Start Example 43 + cursor = db.inventory.find({"status": "A"}) + # End Example 43 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 44 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) + # End Example 44 + + async for doc in cursor: + self.assertTrue("_id" in doc) + self.assertTrue("item" in doc) + self.assertTrue("status" in doc) + self.assertFalse("size" in doc) + self.assertFalse("instock" in doc) + + # Start Example 45 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + # End Example 45 + + async for doc in cursor: + self.assertFalse("_id" in doc) + self.assertTrue("item" in doc) + self.assertTrue("status" in doc) + self.assertFalse("size" in doc) + self.assertFalse("instock" in doc) + + # Start Example 46 + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) + # End Example 46 + + async for doc in cursor: + self.assertTrue("_id" in doc) + self.assertTrue("item" in doc) + self.assertFalse("status" in doc) + self.assertTrue("size" in doc) + self.assertFalse("instock" in doc) + + # Start Example 47 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + # End Example 47 + + async for doc in cursor: + self.assertTrue("_id" in doc) + self.assertTrue("item" in doc) + self.assertTrue("status" in doc) + self.assertTrue("size" in doc) + self.assertFalse("instock" in doc) + size = doc["size"] + self.assertTrue("uom" in size) + self.assertFalse("h" in size) + self.assertFalse("w" in size) + + # Start Example 48 + cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) + # End Example 48 + + async for doc in cursor: + self.assertTrue("_id" in doc) + self.assertTrue("item" in doc) + self.assertTrue("status" in doc) + self.assertTrue("size" in doc) + self.assertTrue("instock" in doc) + size = doc["size"] + self.assertFalse("uom" in size) + self.assertTrue("h" in size) + self.assertTrue("w" in size) + + # Start Example 49 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + # End Example 49 + + async for doc in cursor: + self.assertTrue("_id" in doc) + self.assertTrue("item" in doc) + self.assertTrue("status" in doc) + self.assertFalse("size" in doc) + self.assertTrue("instock" in doc) + for subdoc in doc["instock"]: + self.assertFalse("warehouse" in subdoc) + self.assertTrue("qty" in subdoc) + + # Start Example 50 + cursor = db.inventory.find( + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) + # End Example 50 + + async for doc in cursor: + self.assertTrue("_id" in doc) + self.assertTrue("item" in doc) + self.assertTrue("status" in doc) + self.assertFalse("size" in doc) + self.assertTrue("instock" in doc) + self.assertEqual(len(doc["instock"]), 1) + + async def test_update_and_replace(self): + db = self.db + + # Start Example 51 + await db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 51 + + # Start Example 52 + await db.inventory.update_one( + {"item": "paper"}, + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 52 + + async for doc in db.inventory.find({"item": "paper"}): + self.assertEqual(doc["size"]["uom"], "cm") + self.assertEqual(doc["status"], "P") + self.assertTrue("lastModified" in doc) + + # Start Example 53 + await db.inventory.update_many( + {"qty": {"$lt": 50}}, + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 53 + + async for doc in db.inventory.find({"qty": {"$lt": 50}}): + self.assertEqual(doc["size"]["uom"], "in") + self.assertEqual(doc["status"], "P") + self.assertTrue("lastModified" in doc) + + # Start Example 54 + await db.inventory.replace_one( + {"item": "paper"}, + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) + # End Example 54 + + async for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): + self.assertEqual(len(doc.keys()), 2) + self.assertTrue("item" in doc) + self.assertTrue("instock" in doc) + self.assertEqual(len(doc["instock"]), 2) + + async def test_delete(self): + db = self.db + + # Start Example 55 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 55 + + self.assertEqual(await db.inventory.count_documents({}), 5) + + # Start Example 57 + await db.inventory.delete_many({"status": "A"}) + # End Example 57 + + self.assertEqual(await db.inventory.count_documents({}), 3) + + # Start Example 58 + await db.inventory.delete_one({"status": "D"}) + # End Example 58 + + self.assertEqual(await db.inventory.count_documents({}), 2) + + # Start Example 56 + await db.inventory.delete_many({}) + # End Example 56 + + self.assertEqual(await db.inventory.count_documents({}), 0) + + @async_client_context.require_change_streams + async def test_change_streams(self): + db = self.db + done = False + + async def insert_docs(): + nonlocal done + while not done: + await db.inventory.insert_one({"username": "alice"}) + await db.inventory.delete_one({"username": "alice"}) + await asyncio.sleep(0.005) + + t = ConcurrentRunner(target=insert_docs) + await t.start() + + try: + # 1. The database for reactive, real-time applications + # Start Changestream Example 1 + cursor = await db.inventory.watch() + await anext(cursor) + # End Changestream Example 1 + await cursor.close() + + # Start Changestream Example 2 + cursor = await db.inventory.watch(full_document="updateLookup") + await anext(cursor) + # End Changestream Example 2 + await cursor.close() + + # Start Changestream Example 3 + resume_token = cursor.resume_token + cursor = await db.inventory.watch(resume_after=resume_token) + await anext(cursor) + # End Changestream Example 3 + await cursor.close() + + # Start Changestream Example 4 + pipeline = [ + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, + ] + cursor = await db.inventory.watch(pipeline=pipeline) + await anext(cursor) + # End Changestream Example 4 + await cursor.close() + finally: + done = True + await t.join() + + async def test_aggregate_examples(self): + db = self.db + + # Start Aggregation Example 1 + await db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) + # End Aggregation Example 1 + + # Start Aggregation Example 2 + await db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) + # End Aggregation Example 2 + + # Start Aggregation Example 3 + await db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, + } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, + } + }, + ] + ) + # End Aggregation Example 3 + + # Start Aggregation Example 4 + await db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", + } + }, + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, + } + }, + } + }, + ] + ) + # End Aggregation Example 4 + + @async_client_context.require_version_min(4, 4) + async def test_aggregate_projection_example(self): + db = self.db + + # Start Aggregation Projection Example 1 + db.inventory.find( + {}, + { + "_id": 0, + "item": 1, + "status": { + "$switch": { + "branches": [ + {"case": {"$eq": ["$status", "A"]}, "then": "Available"}, + {"case": {"$eq": ["$status", "D"]}, "then": "Discontinued"}, + ], + "default": "No status found", + } + }, + "area": { + "$concat": [ + {"$toString": {"$multiply": ["$size.h", "$size.w"]}}, + " ", + "$size.uom", + ] + }, + "reportNumber": {"$literal": 1}, + }, + ) + + # End Aggregation Projection Example 1 + + async def test_commands(self): + db = self.db + await db.restaurants.insert_one({}) + + # Start runCommand Example 1 + await db.command("buildInfo") + # End runCommand Example 1 + + # Start runCommand Example 2 + await db.command("count", "restaurants") + # End runCommand Example 2 + + async def test_index_management(self): + db = self.db + + # Start Index Example 1 + await db.records.create_index("score") + # End Index Example 1 + + # Start Index Example 1 + await db.restaurants.create_index( + [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], + partialFilterExpression={"rating": {"$gt": 5}}, + ) + # End Index Example 1 + + @async_client_context.require_replica_set + async def test_misc(self): + # Marketing examples + client = self.client + self.addAsyncCleanup(client.drop_database, "test") + self.addAsyncCleanup(client.drop_database, "my_database") + + # 2. Tunable consistency controls + collection = client.my_database.my_collection + async with client.start_session() as session: + await collection.insert_one({"_id": 1}, session=session) + await collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) + async for _doc in collection.find({}, session=session): + pass + + # 3. Exploiting the power of arrays + collection = client.test.array_updates_test + await collection.update_one( + {"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}] + ) + + +class TestTransactionExamples(AsyncIntegrationTest): + @async_client_context.require_transactions + async def test_transactions(self): + # Transaction examples + client = self.client + self.addAsyncCleanup(client.drop_database, "hr") + self.addAsyncCleanup(client.drop_database, "reporting") + + employees = client.hr.employees + events = client.reporting.events + await employees.insert_one({"employee": 3, "status": "Active"}) + await events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) + + # Start Transactions Intro Example 1 + + async def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + async with await session.start_transaction( + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): + await employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + await events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Intro Example 1 + + async with client.start_session() as session: + await update_employee_info(session) + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 1 + async def run_transaction_with_retry(txn_func, session): + while True: + try: + await txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + print("Transaction aborted. Caught exception during transaction.") + + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + # End Transactions Retry Example 1 + + async with client.start_session() as session: + await run_transaction_with_retry(update_employee_info, session) + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 2 + async def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Retry Example 2 + + # Test commit_with_retry from the previous examples + async def _insert_employee_retry_commit(session): + async with await session.start_transaction(): + await employees.insert_one({"employee": 4, "status": "Active"}, session=session) + await events.insert_one( + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) + + await commit_with_retry(session) + + async with client.start_session() as session: + await run_transaction_with_retry(_insert_employee_retry_commit, session) + + employee = await employees.find_one({"employee": 4}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Active") + + # Start Transactions Retry Example 3 + + async def run_transaction_with_retry(txn_func, session): + while True: + try: + await txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + async def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # Updates two collections in a transactions + + async def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + async with await session.start_transaction( + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): + await employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + await events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + await commit_with_retry(session) + + # Start a session. + async with client.start_session() as session: + try: + await run_transaction_with_retry(update_employee_info, session) + except Exception: + # Do something with error. + raise + + # End Transactions Retry Example 3 + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + async def MongoClient(_): + return await self.async_rs_client() + + uriString = None + + # Start Transactions withTxn API Example 1 + + # For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g. + # uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl' + # For a sharded cluster, connect to the mongos instances; e.g. + # uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/' + + client = await MongoClient(uriString) + wc_majority = WriteConcern("majority", wtimeout=1000) + + # Prereq: Create collections. + await client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + await client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) + + # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. + async def callback(session): + collection_one = session.client.mydb1.foo + collection_two = session.client.mydb2.bar + + # Important:: You must pass the session to the operations. + await collection_one.insert_one({"abc": 1}, session=session) + await collection_two.insert_one({"xyz": 999}, session=session) + + # Step 2: Start a client session. + async with client.start_session() as session: + # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). + await session.with_transaction( + callback, + read_concern=ReadConcern("local"), + write_concern=wc_majority, + read_preference=ReadPreference.PRIMARY, + ) + + # End Transactions withTxn API Example 1 + + +class TestCausalConsistencyExamples(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + @async_client_context.require_no_mmap + async def test_causal_consistency(self): + # Causal consistency examples + client = self.client + self.addAsyncCleanup(client.drop_database, "test") + await client.test.drop_collection("items") + await client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) + + # Start Causal Consistency Example 1 + async with client.start_session(causal_consistency=True) as s1: + current_date = datetime.datetime.today() + items = client.get_database( + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + await items.update_one( + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) + await items.insert_one( + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) + # End Causal Consistency Example 1 + + assert s1.cluster_time is not None + assert s1.operation_time is not None + + # Start Causal Consistency Example 2 + async with client.start_session(causal_consistency=True) as s2: + s2.advance_cluster_time(s1.cluster_time) + s2.advance_operation_time(s1.operation_time) + + items = client.get_database( + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + async for item in items.find({"end": None}, session=s2): + print(item) + # End Causal Consistency Example 2 + + +class TestVersionedApiExamples(AsyncIntegrationTest): + @async_client_context.require_version_min(4, 7) + async def test_versioned_api(self): + # Versioned API examples + async def MongoClient(_, server_api): + return await self.async_rs_client(server_api=server_api, connect=False) + + uri = None + + # Start Versioned API Example 1 + from pymongo.server_api import ServerApi + + await MongoClient(uri, server_api=ServerApi("1")) + # End Versioned API Example 1 + + # Start Versioned API Example 2 + await MongoClient(uri, server_api=ServerApi("1", strict=True)) + # End Versioned API Example 2 + + # Start Versioned API Example 3 + await MongoClient(uri, server_api=ServerApi("1", strict=False)) + # End Versioned API Example 3 + + # Start Versioned API Example 4 + await MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + # End Versioned API Example 4 + + @unittest.skip("PYTHON-3167 count has been added to API version 1") + @async_client_context.require_version_min(4, 7) + async def test_versioned_api_migration(self): + # SERVER-58785 + if await async_client_context.is_topology_type( + ["sharded"] + ) and not async_client_context.version.at_least(5, 0, 2): + self.skipTest("This test needs MongoDB 5.0.2 or newer") + + client = await self.async_rs_client(server_api=ServerApi("1", strict=True)) + await client.db.sales.drop() + + # Start Versioned API Example 5 + def strptime(s): + return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") + + await client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) + # End Versioned API Example 5 + + with self.assertRaisesRegex( + OperationFailure, + "Provided apiStrict:true, but the command count is not in API Version 1", + ): + await client.db.command("count", "sales", query={}) + # Start Versioned API Example 6 + # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} + # End Versioned API Example 6 + + # Start Versioned API Example 7 + await client.db.sales.count_documents({}) + # End Versioned API Example 7 + + # Start Versioned API Example 8 + # 8 + # End Versioned API Example 8 + + +class TestSnapshotQueryExamples(AsyncIntegrationTest): + @async_client_context.require_version_min(5, 0) + async def test_snapshot_query(self): + client = self.client + + if not await async_client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addAsyncCleanup(client.drop_database, "pets") + db = client.pets + await db.drop_collection("cats") + await db.drop_collection("dogs") + await db.cats.insert_one( + {"name": "Whiskers", "color": "white", "age": 10, "adoptable": True} + ) + await db.dogs.insert_one( + {"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True} + ) + + async def predicate_one(): + return await self.check_for_snapshot(db.cats) + + async def predicate_two(): + return await self.check_for_snapshot(db.dogs) + + await async_wait_until(predicate_two, "success") + await async_wait_until(predicate_one, "success") + + # Start Snapshot Query Example 1 + + db = client.pets + async with client.start_session(snapshot=True) as s: + adoptablePetsCount = ( + await ( + await db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], + session=s, + ) + ).next() + )["adoptableCatsCount"] + + adoptablePetsCount += ( + await ( + await db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], + session=s, + ) + ).next() + )["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addAsyncCleanup(client.drop_database, "retail") + await db.drop_collection("sales") + + saleDate = datetime.datetime.now() + await db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + + async def predicate_three(): + return await self.check_for_snapshot(db.sales) + + await async_wait_until(predicate_three, "success") + + # Start Snapshot Query Example 2 + db = client.retail + async with client.start_session(snapshot=True) as s: + _ = ( + await ( + await db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ) + ).next() + )["totalDailySales"] + + # End Snapshot Query Example 2 + + async def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + async with self.client.start_session(snapshot=True) as s: + try: + if await collection.find_one(session=s): + return True + return False + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index d103374313..d433f1a7e6 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -58,7 +58,7 @@ class SpecRunnerTask(ConcurrentRunner): def __init__(self, name): - super().__init__(name) + super().__init__(name=name) self.exc = None self.daemon = True self.cond = _async_create_condition(_async_create_lock()) diff --git a/test/helpers.py b/test/helpers.py index bd9e23bba4..705843efcd 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -381,14 +381,14 @@ def disable(self): class ConcurrentRunner(PARENT): - def __init__(self, name, *args, **kwargs): + def __init__(self, **kwargs): if _IS_SYNC: - super().__init__(*args, **kwargs) - self.name = name + super().__init__(**kwargs) + self.name = kwargs.get("name", "ConcurrentRunner") self.stopped = False self.task = None - if "target" in kwargs: - self.target = kwargs["target"] + self.target = kwargs.get("target", None) + self.args = kwargs.get("args", []) if not _IS_SYNC: @@ -403,6 +403,7 @@ def is_alive(self): return not self.stopped def run(self): - if self.target: - self.target() - self.stopped = True + try: + self.target(*self.args) + finally: + self.stopped = True diff --git a/test/test_examples.py b/test/test_examples.py index 7f98226e7a..9bcc276248 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -15,9 +15,13 @@ """MongoDB documentation examples in Python.""" from __future__ import annotations +import asyncio import datetime +import functools import sys import threading +import time +from test.helpers import ConcurrentRunner sys.path[0:0] = [""] @@ -29,8 +33,11 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_api import ServerApi +from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestSampleShellCommands(IntegrationTest): def setUp(self): @@ -62,7 +69,7 @@ def test_first_three_examples(self): cursor = db.inventory.find({"item": "canvas"}) # End Example 2 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 3 db.inventory.insert_many( @@ -137,31 +144,31 @@ def test_query_top_level_fields(self): cursor = db.inventory.find({}) # End Example 7 - self.assertEqual(len(list(cursor)), 5) + self.assertEqual(len(cursor.to_list()), 5) # Start Example 9 cursor = db.inventory.find({"status": "D"}) # End Example 9 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) # Start Example 10 cursor = db.inventory.find({"status": {"$in": ["A", "D"]}}) # End Example 10 - self.assertEqual(len(list(cursor)), 5) + self.assertEqual(len(cursor.to_list()), 5) # Start Example 11 cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}}) # End Example 11 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 12 cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) # End Example 12 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 13 cursor = db.inventory.find( @@ -169,7 +176,7 @@ def test_query_top_level_fields(self): ) # End Example 13 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) def test_query_embedded_documents(self): db = self.db @@ -219,31 +226,31 @@ def test_query_embedded_documents(self): cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) # End Example 15 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 16 cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) # End Example 16 - self.assertEqual(len(list(cursor)), 0) + self.assertEqual(len(cursor.to_list()), 0) # Start Example 17 cursor = db.inventory.find({"size.uom": "in"}) # End Example 17 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) # Start Example 18 cursor = db.inventory.find({"size.h": {"$lt": 15}}) # End Example 18 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 19 cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) # End Example 19 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) def test_query_arrays(self): db = self.db @@ -269,49 +276,49 @@ def test_query_arrays(self): cursor = db.inventory.find({"tags": ["red", "blank"]}) # End Example 21 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 22 cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}}) # End Example 22 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 23 cursor = db.inventory.find({"tags": "red"}) # End Example 23 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 24 cursor = db.inventory.find({"dim_cm": {"$gt": 25}}) # End Example 24 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 25 cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}}) # End Example 25 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 26 cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) # End Example 26 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 27 cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) # End Example 27 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 28 cursor = db.inventory.find({"tags": {"$size": 3}}) # End Example 28 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) def test_query_array_of_documents(self): db = self.db @@ -360,49 +367,49 @@ def test_query_array_of_documents(self): cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) # End Example 30 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 31 cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) # End Example 31 - self.assertEqual(len(list(cursor)), 0) + self.assertEqual(len(cursor.to_list()), 0) # Start Example 32 cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) # End Example 32 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 33 cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) # End Example 33 - self.assertEqual(len(list(cursor)), 5) + self.assertEqual(len(cursor.to_list()), 5) # Start Example 34 cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) # End Example 34 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 35 cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) # End Example 35 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 36 cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) # End Example 36 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 37 cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) # End Example 37 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) def test_query_null(self): db = self.db @@ -415,19 +422,19 @@ def test_query_null(self): cursor = db.inventory.find({"item": None}) # End Example 39 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) # Start Example 40 cursor = db.inventory.find({"item": {"$type": 10}}) # End Example 40 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 41 cursor = db.inventory.find({"item": {"$exists": False}}) # End Example 41 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) def test_projection(self): db = self.db @@ -473,7 +480,7 @@ def test_projection(self): cursor = db.inventory.find({"status": "A"}) # End Example 43 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 44 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) @@ -746,8 +753,9 @@ def insert_docs(): while not done: db.inventory.insert_one({"username": "alice"}) db.inventory.delete_one({"username": "alice"}) + time.sleep(0.005) - t = threading.Thread(target=insert_docs) + t = ConcurrentRunner(target=insert_docs) t.start() try: @@ -1347,20 +1355,37 @@ def test_snapshot_query(self): db.drop_collection("dogs") db.cats.insert_one({"name": "Whiskers", "color": "white", "age": 10, "adoptable": True}) db.dogs.insert_one({"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True}) - wait_until(lambda: self.check_for_snapshot(db.cats), "success") - wait_until(lambda: self.check_for_snapshot(db.dogs), "success") + + def predicate_one(): + return self.check_for_snapshot(db.cats) + + def predicate_two(): + return self.check_for_snapshot(db.dogs) + + wait_until(predicate_two, "success") + wait_until(predicate_one, "success") # Start Snapshot Query Example 1 db = client.pets with client.start_session(snapshot=True) as s: - adoptablePetsCount = db.cats.aggregate( - [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], session=s - ).next()["adoptableCatsCount"] - - adoptablePetsCount += db.dogs.aggregate( - [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], session=s - ).next()["adoptableDogsCount"] + adoptablePetsCount = ( + ( + db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], + session=s, + ) + ).next() + )["adoptableCatsCount"] + + adoptablePetsCount += ( + ( + db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], + session=s, + ) + ).next() + )["adoptableDogsCount"] print(adoptablePetsCount) @@ -1371,33 +1396,41 @@ def test_snapshot_query(self): saleDate = datetime.datetime.now() db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) - wait_until(lambda: self.check_for_snapshot(db.sales), "success") + + def predicate_three(): + return self.check_for_snapshot(db.sales) + + wait_until(predicate_three, "success") # Start Snapshot Query Example 2 db = client.retail with client.start_session(snapshot=True) as s: - db.sales.aggregate( - [ - { - "$match": { - "$expr": { - "$gt": [ - "$saleDate", - { - "$dateSubtract": { - "startDate": "$$NOW", - "unit": "day", - "amount": 1, - } - }, - ] - } - } - }, - {"$count": "totalDailySales"}, - ], - session=s, - ).next()["totalDailySales"] + _ = ( + ( + db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ) + ).next() + )["totalDailySales"] # End Snapshot Query Example 2 diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 6a62112afb..98949431d0 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -58,7 +58,7 @@ class SpecRunnerThread(ConcurrentRunner): def __init__(self, name): - super().__init__(name) + super().__init__(name=name) self.exc = None self.daemon = True self.cond = _create_condition(_create_lock()) diff --git a/tools/synchro.py b/tools/synchro.py index 06dc708e08..ffbea4e537 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -210,6 +210,7 @@ def async_only_test(f: str) -> bool: "test_data_lake.py", "test_dns.py", "test_encryption.py", + "test_examples.py", "test_heartbeat_monitoring.py", "test_index_management.py", "test_grid_file.py", From 02d6cc9cfdcac8f52a140c8549b19d5edc34d8f1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 5 Feb 2025 12:10:44 -0500 Subject: [PATCH 1723/2111] PYTHON-5107 - Convert test.test_streaming_protocol to async (#2126) --- test/asynchronous/test_streaming_protocol.py | 228 +++++++++++++++++++ test/test_streaming_protocol.py | 8 +- tools/synchro.py | 1 + 3 files changed, 232 insertions(+), 5 deletions(-) create mode 100644 test/asynchronous/test_streaming_protocol.py diff --git a/test/asynchronous/test_streaming_protocol.py b/test/asynchronous/test_streaming_protocol.py new file mode 100644 index 0000000000..fd890d29fb --- /dev/null +++ b/test/asynchronous/test_streaming_protocol.py @@ -0,0 +1,228 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import sys +import time + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import ( + HeartbeatEventListener, + ServerEventListener, + async_wait_until, +) + +from pymongo import monitoring +from pymongo.hello import HelloCompat + +_IS_SYNC = False + + +class TestStreamingProtocol(AsyncIntegrationTest): + @async_client_context.require_failCommand_appName + async def test_failCommand_streaming(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + client = await self.async_rs_or_single_client( + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) + # Force a connection. + await client.admin.command("ping") + address = await client.address + listener.reset() + + fail_hello = { + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", + }, + } + async with self.fail_point(fail_hello): + + def _marked_unknown(event): + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) + + def _discovered_node(event): + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown(): + return len(listener.matching(_marked_unknown)) >= 1 + + def rediscovered(): + return len(listener.matching(_discovered_node)) >= 1 + + # Topology events are not published synchronously + await async_wait_until(marked_unknown, "mark node unknown") + await async_wait_until(rediscovered, "rediscover node") + + # Server should be selectable. + await client.admin.command("ping") + + @async_client_context.require_failCommand_appName + async def test_streaming_rtt(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + # On Windows, RTT can actually be 0.0 because time.time() only has + # 1-15 millisecond resolution. We need to delay the initial hello + # to ensure that RTT is never zero. + name = "streamingRttTest" + delay_hello: dict = { + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': name, + }, + } + async with self.fail_point(delay_hello): + client = await self.async_rs_or_single_client( + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) + # Force a connection. + await client.admin.command("ping") + address = await client.address + + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name + async with self.fail_point(delay_hello): + + def rtt_exceeds_250_ms(): + # XXX: Add a public TopologyDescription getter to MongoClient? + topology = client._topology + sd = topology.description.server_descriptions()[address] + assert sd.round_trip_time is not None + return sd.round_trip_time > 0.250 + + await async_wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") + + # Server should be selectable. + await client.admin.command("ping") + + def changed_event(event): + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) + + # There should only be one event published, for the initial discovery. + events = listener.matching(changed_event) + self.assertEqual(1, len(events)) + self.assertGreater(events[0].new_description.round_trip_time, 0) + + @async_client_context.require_failCommand_appName + async def test_monitor_waits_after_server_check_error(self): + # This test implements: + # https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + fail_hello = { + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", + }, + } + async with self.fail_point(fail_hello): + start = time.time() + client = await self.async_single_client( + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) + # Force a connection. + await client.admin.command("ping") + duration = time.time() - start + # Explanation of the expected events: + # 0ms: run configureFailPoint + # 1ms: create MongoClient + # 2ms: failed monitor handshake, 1 + # 502ms: failed monitor handshake, 2 + # 1002ms: failed monitor handshake, 3 + # 1502ms: failed monitor handshake, 4 + # 2002ms: failed monitor handshake, 5 + # 2502ms: monitor handshake succeeds + # 2503ms: run awaitable hello + # 2504ms: application handshake succeeds + # 2505ms: ping command succeeds + self.assertGreaterEqual(duration, 2) + self.assertLessEqual(duration, 3.5) + + @async_client_context.require_failCommand_appName + async def test_heartbeat_awaited_flag(self): + hb_listener = HeartbeatEventListener() + client = await self.async_single_client( + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) + # Force a connection. + await client.admin.command("ping") + + def hb_succeeded(event): + return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + + def hb_failed(event): + return isinstance(event, monitoring.ServerHeartbeatFailedEvent) + + fail_heartbeat = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", + }, + } + async with self.fail_point(fail_heartbeat): + await async_wait_until( + lambda: hb_listener.matching(hb_failed), "published failed event" + ) + # Reconnect. + await client.admin.command("ping") + + hb_succeeded_events = hb_listener.matching(hb_succeeded) + hb_failed_events = hb_listener.matching(hb_failed) + self.assertFalse(hb_succeeded_events[0].awaited) + self.assertTrue(hb_failed_events[0].awaited) + # Depending on thread scheduling, the failed heartbeat could occur on + # the second or third check. + events = [type(e) for e in hb_listener.events[:4]] + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: + self.assertFalse(hb_succeeded_events[1].awaited) + else: + self.assertTrue(hb_succeeded_events[1].awaited) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index d782aa1dd7..894e89e208 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -30,6 +30,8 @@ from pymongo import monitoring from pymongo.hello import HelloCompat +_IS_SYNC = True + class TestStreamingProtocol(IntegrationTest): @client_context.require_failCommand_appName @@ -41,7 +43,6 @@ def test_failCommand_streaming(self): heartbeatFrequencyMS=500, appName="failingHeartbeatTest", ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") address = client.address @@ -78,7 +79,7 @@ def marked_unknown(): def rediscovered(): return len(listener.matching(_discovered_node)) >= 1 - # Topology events are published asynchronously + # Topology events are not published synchronously wait_until(marked_unknown, "mark node unknown") wait_until(rediscovered, "rediscover node") @@ -108,7 +109,6 @@ def test_streaming_rtt(self): client = self.rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") address = client.address @@ -156,7 +156,6 @@ def test_monitor_waits_after_server_check_error(self): client = self.single_client( appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") duration = time.time() - start @@ -183,7 +182,6 @@ def test_heartbeat_awaited_flag(self): heartbeatFrequencyMS=500, appName="heartbeatEventAwaitedFlag", ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") diff --git a/tools/synchro.py b/tools/synchro.py index ffbea4e537..d1fc032ebf 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -234,6 +234,7 @@ def async_only_test(f: str) -> bool: "test_sessions_unified.py", "test_srv_polling.py", "test_ssl.py", + "test_streaming_protocol.py", "test_transactions.py", "test_unified_format.py", "unified_format.py", From 7108c2199d1b61b3132d89cd1cceaa3928792b8b Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 5 Feb 2025 12:11:03 -0500 Subject: [PATCH 1724/2111] PYTHON-5108 - Convert test.test_transactions_unified to async (#2128) --- .../asynchronous/test_transactions_unified.py | 56 +++++++++++++++++++ test/test_transactions_unified.py | 17 ++++-- tools/synchro.py | 1 + 3 files changed, 70 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_transactions_unified.py diff --git a/test/asynchronous/test_transactions_unified.py b/test/asynchronous/test_transactions_unified.py new file mode 100644 index 0000000000..4519a0e39a --- /dev/null +++ b/test/asynchronous/test_transactions_unified.py @@ -0,0 +1,56 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Transactions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import client_context, unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + + +@client_context.require_no_mmap +def setUpModule(): + pass + + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "transactions/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +# Location of JSON test specifications for transactions-convenient-api. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions-convenient-api/unified") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "transactions-convenient-api/unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py index 81137bf658..641e05108a 100644 --- a/test/test_transactions_unified.py +++ b/test/test_transactions_unified.py @@ -17,12 +17,15 @@ import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import client_context, unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + @client_context.require_no_mmap def setUpModule(): @@ -30,15 +33,21 @@ def setUpModule(): # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "unified") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "transactions/unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) # Location of JSON test specifications for transactions-convenient-api. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api", "unified" -) +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions-convenient-api/unified") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "transactions-convenient-api/unified" + ) # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/tools/synchro.py b/tools/synchro.py index d1fc032ebf..fc6b160821 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -236,6 +236,7 @@ def async_only_test(f: str) -> bool: "test_ssl.py", "test_streaming_protocol.py", "test_transactions.py", + "test_transactions_unified.py", "test_unified_format.py", "unified_format.py", ] From ac8fa2d645eaa22dd6346320bfee039294139dbe Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 5 Feb 2025 12:13:56 -0500 Subject: [PATCH 1725/2111] PYTHON-5094 - Convert test.test_read_preferences to async (#2110) --- test/__init__.py | 2 +- test/asynchronous/__init__.py | 4 +- test/asynchronous/test_read_preferences.py | 730 +++++++++++++++++++++ test/test_read_preferences.py | 108 +-- tools/synchro.py | 1 + 5 files changed, 795 insertions(+), 50 deletions(-) create mode 100644 test/asynchronous/test_read_preferences.py diff --git a/test/__init__.py b/test/__init__.py index b49eee99ac..6eda00bdec 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -593,7 +593,7 @@ def supports_secondary_read_pref(self): if self.has_secondaries: return True if self.is_mongos: - shard = self.client.config.shards.find_one()["host"] # type:ignore[index] + shard = (self.client.config.shards.find_one())["host"] # type:ignore[index] num_members = shard.count(",") + 1 return num_members > 1 return False diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index a6ba29baaa..b3b0ca93e1 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -592,10 +592,10 @@ async def check(): @property async def supports_secondary_read_pref(self): - if self.has_secondaries: + if await self.has_secondaries: return True if self.is_mongos: - shard = await self.client.config.shards.find_one()["host"] # type:ignore[index] + shard = (await self.client.config.shards.find_one())["host"] # type:ignore[index] num_members = shard.count(",") + 1 return num_members > 1 return False diff --git a/test/asynchronous/test_read_preferences.py b/test/asynchronous/test_read_preferences.py new file mode 100644 index 0000000000..077bc21eaf --- /dev/null +++ b/test/asynchronous/test_read_preferences.py @@ -0,0 +1,730 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the replica_set_connection module.""" +from __future__ import annotations + +import contextlib +import copy +import pickle +import random +import sys +from typing import Any + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + SkipTest, + async_client_context, + connected, + unittest, +) +from test.utils import ( + OvertCommandListener, + async_wait_until, + one, +) +from test.version import Version + +from bson.son import SON +from pymongo.asynchronous.helpers import anext +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.message import _maybe_add_read_preference +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, readable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestSelections(AsyncIntegrationTest): + @async_client_context.require_connection + async def test_bool(self): + client = await self.async_single_client() + + async def predicate(): + return await client.address + + await async_wait_until(predicate, "discover primary") + selection = Selection.from_topology_description(client._topology.description) + + self.assertTrue(selection) + self.assertFalse(selection.with_server_descriptions([])) + + +class TestReadPreferenceObjects(unittest.TestCase): + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] + + def test_pickle(self): + for pref in self.prefs: + self.assertEqual(pref, pickle.loads(pickle.dumps(pref))) + + def test_copy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.copy(pref)) + + def test_deepcopy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.deepcopy(pref)) + + +class TestReadPreferencesBase(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + # Insert some data so we can use cursors in read_from_which_host + await self.client.pymongo_test.test.drop() + await self.client.get_database( + "pymongo_test", write_concern=WriteConcern(w=async_client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) + + self.addAsyncCleanup(self.client.pymongo_test.test.drop) + + async def read_from_which_host(self, client): + """Do a find() on the client and return which host was used""" + cursor = client.pymongo_test.test.find() + await anext(cursor) + return cursor.address + + async def read_from_which_kind(self, client): + """Do a find() on the client and return 'primary' or 'secondary' + depending on which the client used. + """ + address = await self.read_from_which_host(client) + if address == await client.primary: + return "primary" + elif address in await client.secondaries: + return "secondary" + else: + self.fail( + f"Cursor used address {address}, expected either primary " + f"{client.primary} or secondaries {client.secondaries}" + ) + + async def assertReadsFrom(self, expected, **kwargs): + c = await self.async_rs_client(**kwargs) + + async def predicate(): + return len(c.nodes - await c.arbiters) == async_client_context.w + + await async_wait_until(predicate, "discovered all nodes") + + used = await self.read_from_which_kind(c) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") + + +class TestSingleSecondaryOk(TestReadPreferencesBase): + async def test_reads_from_secondary(self): + host, port = next(iter(await self.client.secondaries)) + # Direct connection to a secondary. + client = await self.async_single_client(host, port) + self.assertFalse(await client.is_primary) + + # Regardless of read preference, we should be able to do + # "reads" with a direct connection to a secondary. + # See server-selection.rst#topology-type-single. + self.assertEqual(client.read_preference, ReadPreference.PRIMARY) + + db = client.pymongo_test + coll = db.test + + # Test find and find_one. + self.assertIsNotNone(await coll.find_one()) + self.assertEqual(10, len(await coll.find().to_list())) + + # Test some database helpers. + self.assertIsNotNone(await db.list_collection_names()) + self.assertIsNotNone(await db.validate_collection("test")) + self.assertIsNotNone(await db.command("ping")) + + # Test some collection helpers. + self.assertEqual(10, await coll.count_documents({})) + self.assertEqual(10, len(await coll.distinct("_id"))) + self.assertIsNotNone(await coll.aggregate([])) + self.assertIsNotNone(await coll.index_information()) + + +class TestReadPreferences(TestReadPreferencesBase): + async def test_mode_validation(self): + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual( + mode, (await self.async_rs_client(read_preference=mode)).read_preference + ) + + with self.assertRaises(TypeError): + await self.async_rs_client(read_preference="foo") + + async def test_tag_sets_validation(self): + S = Secondary(tag_sets=[{}]) + self.assertEqual( + [{}], (await self.async_rs_client(read_preference=S)).read_preference.tag_sets + ) + + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual( + [{"k": "v"}], (await self.async_rs_client(read_preference=S)).read_preference.tag_sets + ) + + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual( + [{"k": "v"}, {}], + (await self.async_rs_client(read_preference=S)).read_preference.tag_sets, + ) + + self.assertRaises(ValueError, Secondary, tag_sets=[]) + + # One dict not ok, must be a list of dicts + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) + + self.assertRaises(TypeError, Secondary, tag_sets="foo") + + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) + + async def test_threshold_validation(self): + self.assertEqual( + 17, + ( + await self.async_rs_client(localThresholdMS=17, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 42, + ( + await self.async_rs_client(localThresholdMS=42, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 666, + ( + await self.async_rs_client(localThresholdMS=666, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 0, + ( + await self.async_rs_client(localThresholdMS=0, connect=False) + ).options.local_threshold_ms, + ) + + with self.assertRaises(ValueError): + await self.async_rs_client(localthresholdms=-1) + + async def test_zero_latency(self): + ping_times: set = set() + # Generate unique ping times. + while len(ping_times) < len(self.client.nodes): + ping_times.add(random.random()) + for ping_time, host in zip(ping_times, self.client.nodes): + ServerDescription._host_to_round_trip_time[host] = ping_time + try: + client = await connected( + await self.async_rs_client(readPreference="nearest", localThresholdMS=0) + ) + await async_wait_until( + lambda: client.nodes == self.client.nodes, "discovered all nodes" + ) + host = await self.read_from_which_host(client) + for _ in range(5): + self.assertEqual(host, await self.read_from_which_host(client)) + finally: + ServerDescription._host_to_round_trip_time.clear() + + async def test_primary(self): + await self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) + + async def test_primary_with_tags(self): + # Tags not allowed with PRIMARY + with self.assertRaises(ConfigurationError): + await self.async_rs_client(tag_sets=[{"dc": "ny"}]) + + async def test_primary_preferred(self): + await self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) + + async def test_secondary(self): + await self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) + + async def test_secondary_preferred(self): + await self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) + + async def test_nearest(self): + # With high localThresholdMS, expect to read from any + # member + c = await self.async_rs_client( + read_preference=ReadPreference.NEAREST, localThresholdMS=10000 + ) # 10 seconds + + data_members = {await self.client.primary} | await self.client.secondaries + + # This is a probabilistic test; track which members we've read from so + # far, and keep reading until we've used all the members or give up. + # Chance of using only 2 of 3 members 10k times if there's no bug = + # 3 * (2/3)**10000, very low. + used: set = set() + i = 0 + while data_members.difference(used) and i < 10000: + address = await self.read_from_which_host(c) + used.add(address) + i += 1 + + not_used = data_members.difference(used) + latencies = ", ".join( + "%s: %sms" % (server.description.address, server.description.round_trip_time) + for server in await (await c._get_topology()).select_servers( + readable_server_selector, _Op.TEST + ) + ) + + self.assertFalse( + not_used, + "Expected to use primary and all secondaries for mode NEAREST," + f" but didn't use {not_used}\nlatencies: {latencies}", + ) + + +class ReadPrefTester(AsyncMongoClient): + def __init__(self, *args, **kwargs): + self.has_read_from = set() + client_options = async_client_context.client_options + client_options.update(kwargs) + super().__init__(*args, **client_options) + + async def _conn_for_reads(self, read_preference, session, operation): + context = await super()._conn_for_reads(read_preference, session, operation) + return context + + @contextlib.asynccontextmanager + async def _conn_from_server(self, read_preference, server, session): + context = super()._conn_from_server(read_preference, server, session) + async with context as (conn, read_preference): + await self.record_a_read(conn.address) + yield conn, read_preference + + async def record_a_read(self, address): + server = await (await self._get_topology()).select_server_by_address(address, _Op.TEST, 0) + self.has_read_from.add(server) + + +_PREF_MAP = [ + (Primary, SERVER_TYPE.RSPrimary), + (PrimaryPreferred, SERVER_TYPE.RSPrimary), + (Secondary, SERVER_TYPE.RSSecondary), + (SecondaryPreferred, SERVER_TYPE.RSSecondary), + (Nearest, "any"), +] + + +class TestCommandAndReadPreference(AsyncIntegrationTest): + c: ReadPrefTester + client_version: Version + + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.c = ReadPrefTester( + # Ignore round trip times, to test ReadPreference modes only. + localThresholdMS=1000 * 1000, + ) + self.client_version = await Version.async_from_client(self.c) + # mapReduce fails if the collection does not exist. + coll = self.c.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=async_client_context.w) + ) + await coll.insert_one({}) + + async def asyncTearDown(self): + await self.c.drop_database("pymongo_test") + await self.c.close() + + async def executed_on_which_server(self, client, fn, *args, **kwargs): + """Execute fn(*args, **kwargs) and return the Server instance used.""" + client.has_read_from.clear() + await fn(*args, **kwargs) + self.assertEqual(1, len(client.has_read_from)) + return one(client.has_read_from) + + async def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): + server = await self.executed_on_which_server(client, fn, *args, **kwargs) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) + + async def _test_fn(self, server_type, fn): + for _ in range(10): + if server_type == "any": + used = set() + for _ in range(1000): + server = await self.executed_on_which_server(self.c, fn) + used.add(server.description.address) + if len(used) == len(await self.c.secondaries) + 1: + # Success + break + + assert await self.c.primary is not None + unused = (await self.c.secondaries).union({await self.c.primary}).difference(used) + if unused: + self.fail("Some members not used for NEAREST: %s" % (unused)) + else: + await self.assertExecutedOn(server_type, self.c, fn) + + async def _test_primary_helper(self, func): + # Helpers that ignore read preference. + await self._test_fn(SERVER_TYPE.RSPrimary, func) + + async def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): + for mode, server_type in _PREF_MAP: + new_coll = coll.with_options(read_preference=mode()) + + async def func(): + return await getattr(new_coll, meth)(*args, **kwargs) + + if secondary_ok: + await self._test_fn(server_type, func) + else: + await self._test_fn(SERVER_TYPE.RSPrimary, func) + + async def test_command(self): + # Test that the generic command helper obeys the read preference + # passed to it. + for mode, server_type in _PREF_MAP: + + async def func(): + return await self.c.pymongo_test.command("dbStats", read_preference=mode()) + + await self._test_fn(server_type, func) + + async def test_create_collection(self): + # create_collection runs listCollections on the primary to check if + # the collection already exists. + async def func(): + return await self.c.pymongo_test.create_collection( + "some_collection%s" % random.randint(0, sys.maxsize) + ) + + await self._test_primary_helper(func) + + async def test_count_documents(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) + + async def test_estimated_document_count(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") + + async def test_distinct(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") + + async def test_aggregate(self): + await self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) + + async def test_aggregate_write(self): + # 5.0 servers support $out on secondaries. + secondary_ok = async_client_context.version.at_least(5, 0) + await self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) + + +class TestMovingAverage(unittest.TestCase): + def test_moving_average(self): + avg = MovingAverage() + self.assertIsNone(avg.get()) + avg.add_sample(10) + self.assertAlmostEqual(10, avg.get()) # type: ignore + avg.add_sample(20) + self.assertAlmostEqual(12, avg.get()) # type: ignore + avg.add_sample(30) + self.assertAlmostEqual(15.6, avg.get()) # type: ignore + + +class TestMongosAndReadPreference(AsyncIntegrationTest): + def test_read_preference_document(self): + pref = Primary() + self.assertEqual(pref.document, {"mode": "primary"}) + + pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + with self.assertRaises(TypeError): + # Float is prohibited. + Nearest(max_staleness=1.5) # type: ignore + + with self.assertRaises(ValueError): + Nearest(max_staleness=0) + + with self.assertRaises(ValueError): + Nearest(max_staleness=-2) + + def test_read_preference_document_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for mode, cls in cases.items(): + with self.assertRaises(TypeError): + cls(hedge=[]) # type: ignore + + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + async def test_send_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + if await async_client_context.supports_secondary_read_pref: + cases["secondary"] = Secondary + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + await client.admin.command("ping") + for _mode, cls in cases.items(): + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) + listener.reset() + await coll.find_one() + started = listener.started_events + self.assertEqual(len(started), 1, started) + cmd = started[0].command + if async_client_context.is_rs or async_client_context.is_mongos: + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) + else: + self.assertNotIn("$readPreference", cmd) + + def test_maybe_add_read_preference(self): + # Primary doesn't add $readPreference + out = _maybe_add_read_preference({}, Primary()) + self.assertEqual(out, {}) + + pref = PrimaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Secondary() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + # SecondaryPreferred without tag_sets or max_staleness doesn't add + # $readPreference + pref = SecondaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, {}) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = SecondaryPreferred(max_staleness=120) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Nearest() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) + pref = Nearest() + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + + @async_client_context.require_mongos + async def test_mongos(self): + res = await async_client_context.client.config.shards.find_one() + assert res is not None + shard = res["host"] + num_members = shard.count(",") + 1 + if num_members == 1: + raise SkipTest("Need a replica set shard to test.") + coll = async_client_context.client.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=num_members) + ) + await coll.drop() + res = await coll.insert_many([{} for _ in range(5)]) + first_id = res.inserted_ids[0] + last_id = res.inserted_ids[-1] + + # Note - this isn't a perfect test since there's no way to + # tell what shard member a query ran on. + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): + qcoll = coll.with_options(read_preference=pref) + results = await qcoll.find().sort([("_id", 1)]).to_list() + self.assertEqual(first_id, results[0]["_id"]) + self.assertEqual(last_id, results[-1]["_id"]) + results = await qcoll.find().sort([("_id", -1)]).to_list() + self.assertEqual(first_id, results[-1]["_id"]) + self.assertEqual(last_id, results[0]["_id"]) + + @async_client_context.require_mongos + async def test_mongos_max_staleness(self): + # Sanity check that we're sending maxStalenessSeconds + coll = async_client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) + # No error + await coll.find_one() + + coll = async_client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) + try: + await coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + coll = ( + await self.async_single_client( + readPreference="secondaryPreferred", maxStalenessSeconds=120 + ) + ).pymongo_test.test + # No error + await coll.find_one() + + coll = ( + await self.async_single_client( + readPreference="secondaryPreferred", maxStalenessSeconds=10 + ) + ).pymongo_test.test + try: + await coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 32883399e1..0d38f3f00d 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -26,7 +26,13 @@ sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, connected, unittest +from test import ( + IntegrationTest, + SkipTest, + client_context, + connected, + unittest, +) from test.utils import ( OvertCommandListener, one, @@ -49,16 +55,22 @@ from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection, readable_server_selector from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.helpers import next from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestSelections(IntegrationTest): @client_context.require_connection def test_bool(self): client = self.single_client() - wait_until(lambda: client.address, "discover primary") + def predicate(): + return client.address + + wait_until(predicate, "discover primary") selection = Selection.from_topology_description(client._topology.description) self.assertTrue(selection) @@ -88,11 +100,7 @@ def test_deepcopy(self): class TestReadPreferencesBase(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super().setUpClass() - def setUp(self): super().setUp() # Insert some data so we can use cursors in read_from_which_host @@ -123,11 +131,14 @@ def read_from_which_kind(self, client): f"Cursor used address {address}, expected either primary " f"{client.primary} or secondaries {client.secondaries}" ) - return None def assertReadsFrom(self, expected, **kwargs): c = self.rs_client(**kwargs) - wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") + + def predicate(): + return len(c.nodes - c.arbiters) == client_context.w + + wait_until(predicate, "discovered all nodes") used = self.read_from_which_kind(c) self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") @@ -150,7 +161,7 @@ def test_reads_from_secondary(self): # Test find and find_one. self.assertIsNotNone(coll.find_one()) - self.assertEqual(10, len(list(coll.find()))) + self.assertEqual(10, len(coll.find().to_list())) # Test some database helpers. self.assertIsNotNone(db.list_collection_names()) @@ -173,20 +184,22 @@ def test_mode_validation(self): ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST, ): - self.assertEqual(mode, self.rs_client(read_preference=mode).read_preference) + self.assertEqual(mode, (self.rs_client(read_preference=mode)).read_preference) - self.assertRaises(TypeError, self.rs_client, read_preference="foo") + with self.assertRaises(TypeError): + self.rs_client(read_preference="foo") def test_tag_sets_validation(self): S = Secondary(tag_sets=[{}]) - self.assertEqual([{}], self.rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{}], (self.rs_client(read_preference=S)).read_preference.tag_sets) S = Secondary(tag_sets=[{"k": "v"}]) - self.assertEqual([{"k": "v"}], self.rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{"k": "v"}], (self.rs_client(read_preference=S)).read_preference.tag_sets) S = Secondary(tag_sets=[{"k": "v"}, {}]) self.assertEqual( - [{"k": "v"}, {}], self.rs_client(read_preference=S).read_preference.tag_sets + [{"k": "v"}, {}], + (self.rs_client(read_preference=S)).read_preference.tag_sets, ) self.assertRaises(ValueError, Secondary, tag_sets=[]) @@ -200,22 +213,27 @@ def test_tag_sets_validation(self): def test_threshold_validation(self): self.assertEqual( - 17, self.rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms + 17, + (self.rs_client(localThresholdMS=17, connect=False)).options.local_threshold_ms, ) self.assertEqual( - 42, self.rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms + 42, + (self.rs_client(localThresholdMS=42, connect=False)).options.local_threshold_ms, ) self.assertEqual( - 666, self.rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms + 666, + (self.rs_client(localThresholdMS=666, connect=False)).options.local_threshold_ms, ) self.assertEqual( - 0, self.rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms + 0, + (self.rs_client(localThresholdMS=0, connect=False)).options.local_threshold_ms, ) - self.assertRaises(ValueError, self.rs_client, localthresholdms=-1) + with self.assertRaises(ValueError): + self.rs_client(localthresholdms=-1) def test_zero_latency(self): ping_times: set = set() @@ -238,7 +256,8 @@ def test_primary(self): def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises(ConfigurationError, self.rs_client, tag_sets=[{"dc": "ny"}]) + with self.assertRaises(ConfigurationError): + self.rs_client(tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) @@ -272,7 +291,7 @@ def test_nearest(self): not_used = data_members.difference(used) latencies = ", ".join( "%s: %sms" % (server.description.address, server.description.round_trip_time) - for server in c._get_topology().select_servers(readable_server_selector, _Op.TEST) + for server in (c._get_topology()).select_servers(readable_server_selector, _Op.TEST) ) self.assertFalse( @@ -289,12 +308,9 @@ def __init__(self, *args, **kwargs): client_options.update(kwargs) super().__init__(*args, **client_options) - @contextlib.contextmanager def _conn_for_reads(self, read_preference, session, operation): context = super()._conn_for_reads(read_preference, session, operation) - with context as (conn, read_preference): - self.record_a_read(conn.address) - yield conn, read_preference + return context @contextlib.contextmanager def _conn_from_server(self, read_preference, server, session): @@ -304,7 +320,7 @@ def _conn_from_server(self, read_preference, server, session): yield conn, read_preference def record_a_read(self, address): - server = self._get_topology().select_server_by_address(address, _Op.TEST, 0) + server = (self._get_topology()).select_server_by_address(address, _Op.TEST, 0) self.has_read_from.add(server) @@ -321,25 +337,23 @@ class TestCommandAndReadPreference(IntegrationTest): c: ReadPrefTester client_version: Version - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super().setUpClass() - cls.c = ReadPrefTester( + def setUp(self): + super().setUp() + self.c = ReadPrefTester( # Ignore round trip times, to test ReadPreference modes only. localThresholdMS=1000 * 1000, ) - cls.client_version = Version.from_client(cls.c) + self.client_version = Version.from_client(self.c) # mapReduce fails if the collection does not exist. - coll = cls.c.pymongo_test.get_collection( + coll = self.c.pymongo_test.get_collection( "test", write_concern=WriteConcern(w=client_context.w) ) coll.insert_one({}) - @classmethod - def tearDownClass(cls): - cls.c.drop_database("pymongo_test") - cls.c.close() + def tearDown(self): + self.c.drop_database("pymongo_test") + self.c.close() def executed_on_which_server(self, client, fn, *args, **kwargs): """Execute fn(*args, **kwargs) and return the Server instance used.""" @@ -366,7 +380,7 @@ def _test_fn(self, server_type, fn): break assert self.c.primary is not None - unused = self.c.secondaries.union({self.c.primary}).difference(used) + unused = (self.c.secondaries).union({self.c.primary}).difference(used) if unused: self.fail("Some members not used for NEAREST: %s" % (unused)) else: @@ -401,11 +415,12 @@ def func(): def test_create_collection(self): # create_collection runs listCollections on the primary to check if # the collection already exists. - self._test_primary_helper( - lambda: self.c.pymongo_test.create_collection( + def func(): + return self.c.pymongo_test.create_collection( "some_collection%s" % random.randint(0, sys.maxsize) ) - ) + + self._test_primary_helper(func) def test_count_documents(self): self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) @@ -545,7 +560,6 @@ def test_send_hedge(self): cases["secondary"] = Secondary listener = OvertCommandListener() client = self.rs_client(event_listeners=[listener]) - self.addCleanup(client.close) client.admin.command("ping") for _mode, cls in cases.items(): pref = cls(hedge={"enabled": True}) @@ -645,10 +659,10 @@ def test_mongos(self): # tell what shard member a query ran on. for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): qcoll = coll.with_options(read_preference=pref) - results = list(qcoll.find().sort([("_id", 1)])) + results = qcoll.find().sort([("_id", 1)]).to_list() self.assertEqual(first_id, results[0]["_id"]) self.assertEqual(last_id, results[-1]["_id"]) - results = list(qcoll.find().sort([("_id", -1)])) + results = qcoll.find().sort([("_id", -1)]).to_list() self.assertEqual(first_id, results[-1]["_id"]) self.assertEqual(last_id, results[0]["_id"]) @@ -671,14 +685,14 @@ def test_mongos_max_staleness(self): else: self.fail("mongos accepted invalid staleness") - coll = self.single_client( - readPreference="secondaryPreferred", maxStalenessSeconds=120 + coll = ( + self.single_client(readPreference="secondaryPreferred", maxStalenessSeconds=120) ).pymongo_test.test # No error coll.find_one() - coll = self.single_client( - readPreference="secondaryPreferred", maxStalenessSeconds=10 + coll = ( + self.single_client(readPreference="secondaryPreferred", maxStalenessSeconds=10) ).pymongo_test.test try: coll.find_one() diff --git a/tools/synchro.py b/tools/synchro.py index fc6b160821..443d57d414 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -222,6 +222,7 @@ def async_only_test(f: str) -> bool: "test_on_demand_csfle.py", "test_raw_bson.py", "test_read_concern.py", + "test_read_preferences.py", "test_read_write_concern_spec.py", "test_retryable_reads.py", "test_retryable_reads_unified.py", From f344eb7965a95d0d949c58f65d71bff9a07f6adb Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 5 Feb 2025 12:14:12 -0500 Subject: [PATCH 1726/2111] PYTHON-5109 - Convert test.test_versioned_api to async (#2129) --- .../test_versioned_api_integration.py | 86 +++++++++++++++++++ test/test_versioned_api.py | 47 +--------- test/test_versioned_api_integration.py | 82 ++++++++++++++++++ tools/synchro.py | 1 + 4 files changed, 173 insertions(+), 43 deletions(-) create mode 100644 test/asynchronous/test_versioned_api_integration.py create mode 100644 test/test_versioned_api_integration.py diff --git a/test/asynchronous/test_versioned_api_integration.py b/test/asynchronous/test_versioned_api_integration.py new file mode 100644 index 0000000000..7e9a79da90 --- /dev/null +++ b/test/asynchronous/test_versioned_api_integration.py @@ -0,0 +1,86 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from test.asynchronous.unified_format import generate_test_classes + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import OvertCommandListener + +from pymongo.server_api import ServerApi + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "versioned-api") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "versioned-api") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApiIntegration(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @async_client_context.require_version_min(4, 7) + async def test_command_options(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_api=ServerApi("1"), event_listeners=[listener] + ) + coll = client.test.test + await coll.insert_many([{} for _ in range(100)]) + self.addAsyncCleanup(coll.delete_many, {}) + await coll.find(batch_size=25).to_list() + await client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @async_client_context.require_version_min(4, 7) + @async_client_context.require_transactions + async def test_command_options_txn(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_api=ServerApi("1"), event_listeners=[listener] + ) + coll = client.test.test + await coll.insert_many([{} for _ in range(100)]) + self.addAsyncCleanup(coll.delete_many, {}) + + listener.reset() + async with client.start_session() as s, await s.start_transaction(): + await coll.insert_many([{} for _ in range(100)], session=s) + await coll.find(batch_size=25, session=s).to_list() + await client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index 7a25a507dc..19b125770f 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -13,28 +13,18 @@ # limitations under the License. from __future__ import annotations -import os import sys +from test import UnitTest sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest -from test.unified_format import generate_test_classes -from test.utils import OvertCommandListener +from test import unittest +from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi, ServerApiVersion -from pymongo.synchronous.mongo_client import MongoClient -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "versioned-api") - -# Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) - - -class TestServerApi(IntegrationTest): - RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True +class TestServerApi(UnitTest): def test_server_api_defaults(self): api = ServerApi(ServerApiVersion.V1) self.assertEqual(api.version, "1") @@ -74,35 +64,6 @@ def assertServerApiInAllCommands(self, events): for event in events: self.assertServerApi(event) - @client_context.require_version_min(4, 7) - def test_command_options(self): - listener = OvertCommandListener() - client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) - self.addCleanup(client.close) - coll = client.test.test - coll.insert_many([{} for _ in range(100)]) - self.addCleanup(coll.delete_many, {}) - list(coll.find(batch_size=25)) - client.admin.command("ping") - self.assertServerApiInAllCommands(listener.started_events) - - @client_context.require_version_min(4, 7) - @client_context.require_transactions - def test_command_options_txn(self): - listener = OvertCommandListener() - client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) - self.addCleanup(client.close) - coll = client.test.test - coll.insert_many([{} for _ in range(100)]) - self.addCleanup(coll.delete_many, {}) - - listener.reset() - with client.start_session() as s, s.start_transaction(): - coll.insert_many([{} for _ in range(100)], session=s) - list(coll.find(batch_size=25, session=s)) - client.test.command("find", "test", session=s) - self.assertServerApiInAllCommands(listener.started_events) - if __name__ == "__main__": unittest.main() diff --git a/test/test_versioned_api_integration.py b/test/test_versioned_api_integration.py new file mode 100644 index 0000000000..502198576a --- /dev/null +++ b/test/test_versioned_api_integration.py @@ -0,0 +1,82 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from test.unified_format import generate_test_classes + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils import OvertCommandListener + +from pymongo.server_api import ServerApi + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "versioned-api") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "versioned-api") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApiIntegration(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @client_context.require_version_min(4, 7) + def test_command_options(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + coll.find(batch_size=25).to_list() + client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @client_context.require_version_min(4, 7) + @client_context.require_transactions + def test_command_options_txn(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + coll.insert_many([{} for _ in range(100)], session=s) + coll.find(batch_size=25, session=s).to_list() + client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/tools/synchro.py b/tools/synchro.py index 443d57d414..4b6326a49c 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -239,6 +239,7 @@ def async_only_test(f: str) -> bool: "test_transactions.py", "test_transactions_unified.py", "test_unified_format.py", + "test_versioned_api_integration.py", "unified_format.py", ] From 1b818470fcb14fd8307f33127262bbaeeafab3f9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 5 Feb 2025 15:05:41 -0500 Subject: [PATCH 1727/2111] PYTHON-5053 - AsyncMongoClient.close() should await all background tasks (#2127) --- pymongo/asynchronous/mongo_client.py | 6 ++++++ pymongo/asynchronous/monitor.py | 9 +++++++-- pymongo/asynchronous/topology.py | 29 +++++++++++++++++++++++++++- pymongo/periodic_executor.py | 2 ++ pymongo/synchronous/mongo_client.py | 6 ++++++ pymongo/synchronous/monitor.py | 7 +++++-- pymongo/synchronous/topology.py | 29 +++++++++++++++++++++++++++- 7 files changed, 82 insertions(+), 6 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index cf7de19c2f..365fc62100 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -1565,6 +1565,12 @@ async def close(self) -> None: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. await self._encrypter.close() self._closed = True + if not _IS_SYNC: + await asyncio.gather( + self._topology.cleanup_monitors(), # type: ignore[func-returns-value] + self._kill_cursors_executor.join(), # type: ignore[func-returns-value] + return_exceptions=True, + ) if not _IS_SYNC: # Add support for contextlib.aclosing. diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index ad1bc70aba..abde7a9055 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -112,9 +112,9 @@ async def close(self) -> None: """ self.gc_safe_close() - async def join(self, timeout: Optional[int] = None) -> None: + async def join(self) -> None: """Wait for the monitor to stop.""" - await self._executor.join(timeout) + await self._executor.join() def request_check(self) -> None: """If the monitor is sleeping, wake it soon.""" @@ -189,6 +189,11 @@ def gc_safe_close(self) -> None: self._rtt_monitor.gc_safe_close() self.cancel_check() + async def join(self) -> None: + await asyncio.gather( + self._executor.join(), self._rtt_monitor.join(), return_exceptions=True + ) # type: ignore[func-returns-value] + async def close(self) -> None: self.gc_safe_close() await self._rtt_monitor.close() diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 6d67710a7e..3033377de5 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -16,6 +16,7 @@ from __future__ import annotations +import asyncio import logging import os import queue @@ -29,7 +30,7 @@ from pymongo import _csot, common, helpers_shared, periodic_executor from pymongo.asynchronous.client_session import _ServerSession, _ServerSessionPool -from pymongo.asynchronous.monitor import SrvMonitor +from pymongo.asynchronous.monitor import MonitorBase, SrvMonitor from pymongo.asynchronous.pool import Pool from pymongo.asynchronous.server import Server from pymongo.errors import ( @@ -207,6 +208,9 @@ async def target() -> bool: if self._settings.fqdn is not None and not self._settings.load_balanced: self._srv_monitor = SrvMonitor(self, self._settings) + # Stores all monitor tasks that need to be joined on close or server selection + self._monitor_tasks: list[MonitorBase] = [] + async def open(self) -> None: """Start monitoring, or restart after a fork. @@ -241,6 +245,8 @@ async def open(self) -> None: # Close servers and clear the pools. for server in self._servers.values(): await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) # Reset the session pool to avoid duplicate sessions in # the child process. self._session_pool.reset() @@ -283,6 +289,10 @@ async def select_servers( else: server_timeout = server_selection_timeout + # Cleanup any completed monitor tasks safely + if not _IS_SYNC and self._monitor_tasks: + await self.cleanup_monitors() + async with self._lock: server_descriptions = await self._select_servers_loop( selector, server_timeout, operation, operation_id, address @@ -520,6 +530,8 @@ async def _process_change( and self._description.topology_type not in SRV_POLLING_TOPOLOGIES ): await self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) # Clear the pool from a failed heartbeat. if reset_pool: @@ -695,6 +707,8 @@ async def close(self) -> None: old_td = self._description for server in self._servers.values(): await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) # Mark all servers Unknown. self._description = self._description.reset() @@ -705,6 +719,8 @@ async def close(self) -> None: # Stop SRV polling thread. if self._srv_monitor: await self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) self._opened = False self._closed = True @@ -944,6 +960,8 @@ async def _update_servers(self) -> None: for address, server in list(self._servers.items()): if not self._description.has_server(address): await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) self._servers.pop(address) def _create_pool_for_server(self, address: _Address) -> Pool: @@ -1031,6 +1049,15 @@ def _error_message(self, selector: Callable[[Selection], Selection]) -> str: else: return ",".join(str(server.error) for server in servers if server.error) + async def cleanup_monitors(self) -> None: + tasks = [] + try: + while self._monitor_tasks: + tasks.append(self._monitor_tasks.pop()) + except IndexError: + pass + await asyncio.gather(*[t.join() for t in tasks], return_exceptions=True) # type: ignore[func-returns-value] + def __repr__(self) -> str: msg = "" if not self._opened: diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 9b10f6e7e3..f51a988728 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -75,6 +75,8 @@ def close(self, dummy: Any = None) -> None: callback; see monitor.py. """ self._stopped = True + if self._task is not None: + self._task.cancel() async def join(self, timeout: Optional[int] = None) -> None: if self._task is not None: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 706623c214..8cd08ab725 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -1559,6 +1559,12 @@ def close(self) -> None: # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. self._encrypter.close() self._closed = True + if not _IS_SYNC: + asyncio.gather( + self._topology.cleanup_monitors(), # type: ignore[func-returns-value] + self._kill_cursors_executor.join(), # type: ignore[func-returns-value] + return_exceptions=True, + ) if not _IS_SYNC: # Add support for contextlib.closing. diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index df4130d4ab..211635d8b8 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -112,9 +112,9 @@ def close(self) -> None: """ self.gc_safe_close() - def join(self, timeout: Optional[int] = None) -> None: + def join(self) -> None: """Wait for the monitor to stop.""" - self._executor.join(timeout) + self._executor.join() def request_check(self) -> None: """If the monitor is sleeping, wake it soon.""" @@ -189,6 +189,9 @@ def gc_safe_close(self) -> None: self._rtt_monitor.gc_safe_close() self.cancel_check() + def join(self) -> None: + asyncio.gather(self._executor.join(), self._rtt_monitor.join(), return_exceptions=True) # type: ignore[func-returns-value] + def close(self) -> None: self.gc_safe_close() self._rtt_monitor.close() diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index b03269ae43..09b61f6d05 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -16,6 +16,7 @@ from __future__ import annotations +import asyncio import logging import os import queue @@ -61,7 +62,7 @@ writable_server_selector, ) from pymongo.synchronous.client_session import _ServerSession, _ServerSessionPool -from pymongo.synchronous.monitor import SrvMonitor +from pymongo.synchronous.monitor import MonitorBase, SrvMonitor from pymongo.synchronous.pool import Pool from pymongo.synchronous.server import Server from pymongo.topology_description import ( @@ -207,6 +208,9 @@ def target() -> bool: if self._settings.fqdn is not None and not self._settings.load_balanced: self._srv_monitor = SrvMonitor(self, self._settings) + # Stores all monitor tasks that need to be joined on close or server selection + self._monitor_tasks: list[MonitorBase] = [] + def open(self) -> None: """Start monitoring, or restart after a fork. @@ -241,6 +245,8 @@ def open(self) -> None: # Close servers and clear the pools. for server in self._servers.values(): server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) # Reset the session pool to avoid duplicate sessions in # the child process. self._session_pool.reset() @@ -283,6 +289,10 @@ def select_servers( else: server_timeout = server_selection_timeout + # Cleanup any completed monitor tasks safely + if not _IS_SYNC and self._monitor_tasks: + self.cleanup_monitors() + with self._lock: server_descriptions = self._select_servers_loop( selector, server_timeout, operation, operation_id, address @@ -520,6 +530,8 @@ def _process_change( and self._description.topology_type not in SRV_POLLING_TOPOLOGIES ): self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) # Clear the pool from a failed heartbeat. if reset_pool: @@ -693,6 +705,8 @@ def close(self) -> None: old_td = self._description for server in self._servers.values(): server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) # Mark all servers Unknown. self._description = self._description.reset() @@ -703,6 +717,8 @@ def close(self) -> None: # Stop SRV polling thread. if self._srv_monitor: self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) self._opened = False self._closed = True @@ -942,6 +958,8 @@ def _update_servers(self) -> None: for address, server in list(self._servers.items()): if not self._description.has_server(address): server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) self._servers.pop(address) def _create_pool_for_server(self, address: _Address) -> Pool: @@ -1029,6 +1047,15 @@ def _error_message(self, selector: Callable[[Selection], Selection]) -> str: else: return ",".join(str(server.error) for server in servers if server.error) + def cleanup_monitors(self) -> None: + tasks = [] + try: + while self._monitor_tasks: + tasks.append(self._monitor_tasks.pop()) + except IndexError: + pass + asyncio.gather(*[t.join() for t in tasks], return_exceptions=True) # type: ignore[func-returns-value] + def __repr__(self) -> str: msg = "" if not self._opened: From 3dd44e6e5f89ffc6171c8ffc7ed052b620e35065 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 6 Feb 2025 10:07:24 -0500 Subject: [PATCH 1728/2111] PYTHON-5087 - Convert test.test_load_balancer to async (#2103) --- test/asynchronous/helpers.py | 19 ++- test/asynchronous/test_load_balancer.py | 199 ++++++++++++++++++++++++ test/asynchronous/test_session.py | 38 ++--- test/helpers.py | 19 ++- test/test_bson.py | 6 +- test/test_load_balancer.py | 72 ++++++--- test/test_session.py | 32 ++-- test/utils.py | 24 ++- tools/synchro.py | 2 + 9 files changed, 331 insertions(+), 80 deletions(-) create mode 100644 test/asynchronous/test_load_balancer.py diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index a35c71b107..28260d0a52 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -40,7 +40,7 @@ except ImportError: HAVE_IPADDRESS = False from functools import wraps -from typing import Any, Callable, Dict, Generator, no_type_check +from typing import Any, Callable, Dict, Generator, Optional, no_type_check from unittest import SkipTest from bson.son import SON @@ -395,7 +395,7 @@ def __init__(self, **kwargs): async def start(self): self.task = create_task(self.run(), name=self.name) - async def join(self, timeout: float | None = 0): # type: ignore[override] + async def join(self, timeout: Optional[float] = None): # type: ignore[override] if self.task is not None: await asyncio.wait([self.task], timeout=timeout) @@ -407,3 +407,18 @@ async def run(self): await self.target(*self.args) finally: self.stopped = True + + +class ExceptionCatchingTask(ConcurrentRunner): + """A Task that stores any exception encountered while running.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.exc = None + + async def run(self): + try: + await super().run() + except BaseException as exc: + self.exc = exc + raise diff --git a/test/asynchronous/test_load_balancer.py b/test/asynchronous/test_load_balancer.py new file mode 100644 index 0000000000..fd50841c87 --- /dev/null +++ b/test/asynchronous/test_load_balancer.py @@ -0,0 +1,199 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" +from __future__ import annotations + +import asyncio +import gc +import os +import pathlib +import sys +import threading +from asyncio import Event +from test.asynchronous.helpers import ConcurrentRunner, ExceptionCatchingTask + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils import ( + async_get_pool, + async_wait_until, + create_async_event, +) + +from pymongo.asynchronous.helpers import anext + +_IS_SYNC = False + +pytestmark = pytest.mark.load_balancer + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "load_balancer") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "load_balancer") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +class TestLB(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + RUN_ON_SERVERLESS = True + + async def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") + pool = await async_get_pool(self.client) + n_conns = len(pool.conns) + await self.db.test.find_one({}) + self.assertEqual(len(pool.conns), n_conns) + await (await self.db.test.aggregate([{"$limit": 1}])).to_list() + self.assertEqual(len(pool.conns), n_conns) + + @async_client_context.require_load_balancer + async def test_unpin_committed_transaction(self): + client = await self.async_rs_client() + pool = await async_get_pool(client) + coll = client[self.db.name].test + async with client.start_session() as session: + async with await session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + await coll.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + @async_client_context.require_failCommand_fail_point + async def test_cursor_gc(self): + async def create_resource(coll): + cursor = coll.find({}, batch_size=3) + await anext(cursor) + return cursor + + await self._test_no_gc_deadlock(create_resource) + + @async_client_context.require_failCommand_fail_point + async def test_command_cursor_gc(self): + async def create_resource(coll): + cursor = await coll.aggregate([], batchSize=3) + await anext(cursor) + return cursor + + await self._test_no_gc_deadlock(create_resource) + + async def _test_no_gc_deadlock(self, create_resource): + client = await self.async_rs_client() + pool = await async_get_pool(client) + coll = client[self.db.name].test + await coll.insert_many([{} for _ in range(10)]) + self.assertEqual(pool.active_sockets, 0) + # Cause the initial find attempt to fail to induce a reference cycle. + args = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, + } + async with self.fail_point(args): + resource = await create_resource(coll) + if async_client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + await task.start() + self.assertTrue(await task.wait(task.locked, 5), "timed out") + # Garbage collect the resource while the pool is locked to ensure we + # don't deadlock. + del resource + # On PyPy it can take a few rounds to collect the cursor. + for _ in range(3): + gc.collect() + task.unlock.set() + await task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + await coll.delete_many({}) + + @async_client_context.require_transactions + async def test_session_gc(self): + client = await self.async_rs_client() + pool = await async_get_pool(client) + session = client.start_session() + await session.start_transaction() + await client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server unless we're + # testing serverless which does not support killSessions. + if not async_client_context.serverless: + self.addAsyncCleanup(self.client.admin.command, "killSessions", [session.session_id]) + if async_client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + await task.start() + self.assertTrue(await task.wait(task.locked, 5), "timed out") + # Garbage collect the session while the pool is locked to ensure we + # don't deadlock. + del session + # On PyPy it can take a few rounds to collect the session. + for _ in range(3): + gc.collect() + task.unlock.set() + await task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + await client[self.db.name].test.delete_many({}) + + +class PoolLocker(ExceptionCatchingTask): + def __init__(self, pool): + super().__init__(target=self.lock_pool) + self.pool = pool + self.daemon = True + self.locked = create_async_event() + self.unlock = create_async_event() + + async def lock_pool(self): + async with self.pool.lock: + self.locked.set() + # Wait for the unlock flag. + unlock_pool = await self.wait(self.unlock, 10) + if not unlock_pool: + raise Exception("timed out waiting for unlock signal: deadlock?") + + async def wait(self, event: Event, timeout: int): + if _IS_SYNC: + return event.wait(timeout) # type: ignore[call-arg] + else: + try: + await asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return False + return True + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 42bc253b56..03d1032b5b 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -15,10 +15,13 @@ """Test the client_session module.""" from __future__ import annotations +import asyncio import copy import sys import time +from asyncio import iscoroutinefunction from io import BytesIO +from test.asynchronous.helpers import ExceptionCatchingTask from typing import Any, Callable, List, Set, Tuple from pymongo.synchronous.mongo_client import MongoClient @@ -35,7 +38,6 @@ ) from test.utils import ( EventListener, - ExceptionCatchingThread, OvertCommandListener, async_wait_until, ) @@ -184,8 +186,7 @@ async def _test_ops(self, client, *ops): f"{f.__name__} did not return implicit session to pool", ) - @async_client_context.require_sync - def test_implicit_sessions_checkout(self): + async def test_implicit_sessions_checkout(self): # "To confirm that implicit sessions only allocate their server session after a # successful connection checkout" test from Driver Sessions Spec. succeeded = False @@ -193,7 +194,7 @@ def test_implicit_sessions_checkout(self): failures = 0 for _ in range(5): listener = OvertCommandListener() - client = self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + client = await self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -210,26 +211,27 @@ def test_implicit_sessions_checkout(self): (cursor.distinct, ["_id"]), (client.db.list_collections, []), ] - threads = [] + tasks = [] listener.reset() - def thread_target(op, *args): - res = op(*args) + async def target(op, *args): + if iscoroutinefunction(op): + res = await op(*args) + else: + res = op(*args) if isinstance(res, (AsyncCursor, AsyncCommandCursor)): - list(res) # type: ignore[call-overload] + await res.to_list() for op, args in ops: - threads.append( - ExceptionCatchingThread( - target=thread_target, args=[op, *args], name=op.__name__ - ) + tasks.append( + ExceptionCatchingTask(target=target, args=[op, *args], name=op.__name__) ) - threads[-1].start() - self.assertEqual(len(threads), len(ops)) - for thread in threads: - thread.join() - self.assertIsNone(thread.exc) - client.close() + await tasks[-1].start() + self.assertEqual(len(tasks), len(ops)) + for t in tasks: + await t.join() + self.assertIsNone(t.exc) + await client.close() lsid_set.clear() for i in listener.started_events: if i.command.get("lsid"): diff --git a/test/helpers.py b/test/helpers.py index 705843efcd..3f51fde08c 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -40,7 +40,7 @@ except ImportError: HAVE_IPADDRESS = False from functools import wraps -from typing import Any, Callable, Dict, Generator, no_type_check +from typing import Any, Callable, Dict, Generator, Optional, no_type_check from unittest import SkipTest from bson.son import SON @@ -395,7 +395,7 @@ def __init__(self, **kwargs): def start(self): self.task = create_task(self.run(), name=self.name) - def join(self, timeout: float | None = 0): # type: ignore[override] + def join(self, timeout: Optional[float] = None): # type: ignore[override] if self.task is not None: asyncio.wait([self.task], timeout=timeout) @@ -407,3 +407,18 @@ def run(self): self.target(*self.args) finally: self.stopped = True + + +class ExceptionCatchingTask(ConcurrentRunner): + """A Task that stores any exception encountered while running.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.exc = None + + def run(self): + try: + super().run() + except BaseException as exc: + self.exc = exc + raise diff --git a/test/test_bson.py b/test/test_bson.py index e601be4915..e704efe451 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] from test import qcheck, unittest -from test.utils import ExceptionCatchingThread +from test.helpers import ExceptionCatchingTask import bson from bson import ( @@ -1075,7 +1075,7 @@ def target(i): my_int = type(f"MyInt_{i}_{j}", (int,), {}) bson.encode({"my_int": my_int()}) - threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] + threads = [ExceptionCatchingTask(target=target, args=(i,)) for i in range(3)] for t in threads: t.start() @@ -1114,7 +1114,7 @@ def __repr__(self): def test_doc_in_invalid_document_error_message_mapping(self): class MyMapping(abc.Mapping): - def keys(): + def keys(self): return ["t"] def __getitem__(self, name): diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 23bea4d984..7db19b46b5 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -15,10 +15,14 @@ """Test the Load Balancer unified spec tests.""" from __future__ import annotations +import asyncio import gc import os +import pathlib import sys import threading +from asyncio import Event +from test.helpers import ConcurrentRunner, ExceptionCatchingTask import pytest @@ -26,15 +30,26 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import ExceptionCatchingThread, get_pool, wait_until +from test.utils import ( + create_event, + get_pool, + wait_until, +) + +from pymongo.synchronous.helpers import next + +_IS_SYNC = True pytestmark = pytest.mark.load_balancer # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "load_balancer") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "load_balancer") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "load_balancer") # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) class TestLB(IntegrationTest): @@ -49,13 +64,12 @@ def test_connections_are_only_returned_once(self): n_conns = len(pool.conns) self.db.test.find_one({}) self.assertEqual(len(pool.conns), n_conns) - list(self.db.test.aggregate([{"$limit": 1}])) + (self.db.test.aggregate([{"$limit": 1}])).to_list() self.assertEqual(len(pool.conns), n_conns) @client_context.require_load_balancer def test_unpin_committed_transaction(self): client = self.rs_client() - self.addCleanup(client.close) pool = get_pool(client) coll = client[self.db.name].test with client.start_session() as session: @@ -86,7 +100,6 @@ def create_resource(coll): def _test_no_gc_deadlock(self, create_resource): client = self.rs_client() - self.addCleanup(client.close) pool = get_pool(client) coll = client[self.db.name].test coll.insert_many([{} for _ in range(10)]) @@ -104,19 +117,19 @@ def _test_no_gc_deadlock(self, create_resource): if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. - thread = PoolLocker(pool) - thread.start() - self.assertTrue(thread.locked.wait(5), "timed out") + task = PoolLocker(pool) + task.start() + self.assertTrue(task.wait(task.locked, 5), "timed out") # Garbage collect the resource while the pool is locked to ensure we # don't deadlock. del resource # On PyPy it can take a few rounds to collect the cursor. for _ in range(3): gc.collect() - thread.unlock.set() - thread.join(5) - self.assertFalse(thread.is_alive()) - self.assertIsNone(thread.exc) + task.unlock.set() + task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. @@ -125,7 +138,6 @@ def _test_no_gc_deadlock(self, create_resource): @client_context.require_transactions def test_session_gc(self): client = self.rs_client() - self.addCleanup(client.close) pool = get_pool(client) session = client.start_session() session.start_transaction() @@ -137,41 +149,51 @@ def test_session_gc(self): if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. - thread = PoolLocker(pool) - thread.start() - self.assertTrue(thread.locked.wait(5), "timed out") + task = PoolLocker(pool) + task.start() + self.assertTrue(task.wait(task.locked, 5), "timed out") # Garbage collect the session while the pool is locked to ensure we # don't deadlock. del session # On PyPy it can take a few rounds to collect the session. for _ in range(3): gc.collect() - thread.unlock.set() - thread.join(5) - self.assertFalse(thread.is_alive()) - self.assertIsNone(thread.exc) + task.unlock.set() + task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. client[self.db.name].test.delete_many({}) -class PoolLocker(ExceptionCatchingThread): +class PoolLocker(ExceptionCatchingTask): def __init__(self, pool): super().__init__(target=self.lock_pool) self.pool = pool self.daemon = True - self.locked = threading.Event() - self.unlock = threading.Event() + self.locked = create_event() + self.unlock = create_event() def lock_pool(self): with self.pool.lock: self.locked.set() # Wait for the unlock flag. - unlock_pool = self.unlock.wait(10) + unlock_pool = self.wait(self.unlock, 10) if not unlock_pool: raise Exception("timed out waiting for unlock signal: deadlock?") + def wait(self, event: Event, timeout: int): + if _IS_SYNC: + return event.wait(timeout) # type: ignore[call-arg] + else: + try: + asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return False + return True + if __name__ == "__main__": unittest.main() diff --git a/test/test_session.py b/test/test_session.py index 634efa11c0..175a282495 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -15,10 +15,13 @@ """Test the client_session module.""" from __future__ import annotations +import asyncio import copy import sys import time +from asyncio import iscoroutinefunction from io import BytesIO +from test.helpers import ExceptionCatchingTask from typing import Any, Callable, List, Set, Tuple from pymongo.synchronous.mongo_client import MongoClient @@ -35,7 +38,6 @@ ) from test.utils import ( EventListener, - ExceptionCatchingThread, OvertCommandListener, wait_until, ) @@ -184,7 +186,6 @@ def _test_ops(self, client, *ops): f"{f.__name__} did not return implicit session to pool", ) - @client_context.require_sync def test_implicit_sessions_checkout(self): # "To confirm that implicit sessions only allocate their server session after a # successful connection checkout" test from Driver Sessions Spec. @@ -210,25 +211,26 @@ def test_implicit_sessions_checkout(self): (cursor.distinct, ["_id"]), (client.db.list_collections, []), ] - threads = [] + tasks = [] listener.reset() - def thread_target(op, *args): - res = op(*args) + def target(op, *args): + if iscoroutinefunction(op): + res = op(*args) + else: + res = op(*args) if isinstance(res, (Cursor, CommandCursor)): - list(res) # type: ignore[call-overload] + res.to_list() for op, args in ops: - threads.append( - ExceptionCatchingThread( - target=thread_target, args=[op, *args], name=op.__name__ - ) + tasks.append( + ExceptionCatchingTask(target=target, args=[op, *args], name=op.__name__) ) - threads[-1].start() - self.assertEqual(len(threads), len(ops)) - for thread in threads: - thread.join() - self.assertIsNone(thread.exc) + tasks[-1].start() + self.assertEqual(len(tasks), len(ops)) + for t in tasks: + t.join() + self.assertIsNone(t.exc) client.close() lsid_set.clear() for i in listener.started_events: diff --git a/test/utils.py b/test/utils.py index 91000a636a..5c1e0bfb7c 100644 --- a/test/utils.py +++ b/test/utils.py @@ -39,6 +39,7 @@ from bson.objectid import ObjectId from bson.son import SON from pymongo import AsyncMongoClient, monitoring, operations, read_preferences +from pymongo._asyncio_task import create_task from pymongo.cursor_shared import CursorType from pymongo.errors import ConfigurationError, OperationFailure from pymongo.hello import HelloCompat @@ -912,21 +913,6 @@ def is_greenthread_patched(): return gevent_monkey_patched() or eventlet_monkey_patched() -class ExceptionCatchingThread(threading.Thread): - """A thread that stores any exception encountered from run().""" - - def __init__(self, *args, **kwargs): - self.exc = None - super().__init__(*args, **kwargs) - - def run(self): - try: - super().run() - except BaseException as exc: - self.exc = exc - raise - - def parse_read_preference(pref): # Make first letter lowercase to match read_pref's modes. mode_string = pref.get("mode", "primary") @@ -1079,3 +1065,11 @@ async def async_set_fail_point(client, command_args): cmd = SON([("configureFailPoint", "failCommand")]) cmd.update(command_args) await client.admin.command(cmd) + + +def create_async_event(): + return asyncio.Event() + + +def create_event(): + return threading.Event() diff --git a/tools/synchro.py b/tools/synchro.py index 4b6326a49c..fe38b4dcfe 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -122,6 +122,7 @@ "SpecRunnerTask": "SpecRunnerThread", "AsyncMockConnection": "MockConnection", "AsyncMockPool": "MockPool", + "create_async_event": "create_event", } docstring_replacements: dict[tuple[str, str], str] = { @@ -214,6 +215,7 @@ def async_only_test(f: str) -> bool: "test_heartbeat_monitoring.py", "test_index_management.py", "test_grid_file.py", + "test_load_balancer.py", "test_json_util_integration.py", "test_gridfs_spec.py", "test_logger.py", From 25c9b90842b23f072557455a7ffd7961f8f3483a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 6 Feb 2025 10:24:37 -0800 Subject: [PATCH 1729/2111] PYTHON-5099 Convert test.test_sdam_monitoring_spec to async (#2117) --- .../asynchronous/test_sdam_monitoring_spec.py | 374 ++++++++++++++++++ test/test_sdam_monitoring_spec.py | 35 +- tools/synchro.py | 1 + 3 files changed, 397 insertions(+), 13 deletions(-) create mode 100644 test/asynchronous/test_sdam_monitoring_spec.py diff --git a/test/asynchronous/test_sdam_monitoring_spec.py b/test/asynchronous/test_sdam_monitoring_spec.py new file mode 100644 index 0000000000..8b0ec63cfe --- /dev/null +++ b/test/asynchronous/test_sdam_monitoring_spec.py @@ -0,0 +1,374 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the sdam monitoring spec tests.""" +from __future__ import annotations + +import asyncio +import json +import os +import sys +import time +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest +from test.utils import ( + ServerAndTopologyEventListener, + async_wait_until, + server_name_to_type, +) + +from bson.json_util import object_hook +from pymongo import AsyncMongoClient, monitoring +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.monitor import Monitor +from pymongo.common import clean_node +from pymongo.errors import ConnectionFailure, NotPrimaryError +from pymongo.hello import Hello +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sdam_monitoring") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sdam_monitoring") + + +def compare_server_descriptions(expected, actual): + if (expected["address"] != "{}:{}".format(*actual.address)) or ( + server_name_to_type(expected["type"]) != actual.server_type + ): + return False + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} + + +def compare_topology_descriptions(expected, actual): + if TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) != actual.topology_type: + return False + expected = expected["servers"] + actual = actual.server_descriptions() + if len(expected) != len(actual): + return False + for exp_server in expected: + for _address, actual_server in actual.items(): + if compare_server_descriptions(exp_server, actual_server): + break + else: + return False + return True + + +def compare_events(expected_dict, actual): + if not expected_dict: + return False, "Error: Bad expected value in YAML test" + if not actual: + return False, "Error: Event published was None" + + expected_type, expected = list(expected_dict.items())[0] + + if expected_type == "server_opening_event": + if not isinstance(actual, monitoring.ServerOpeningEvent): + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerOpeningEvent published with wrong address (expected" " {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "server_description_changed_event": + if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerDescriptionChangedEvent has wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") + if not compare_server_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", + ) + + elif expected_type == "server_closed_event": + if not isinstance(actual, monitoring.ServerClosedEvent): + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerClosedEvent published with wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "topology_opening_event": + if not isinstance(actual, monitoring.TopologyOpenedEvent): + return False, "Expected TopologyOpenedEvent, got %s" % (actual.__class__) + + elif expected_type == "topology_description_changed_event": + if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): + return ( + False, + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + if not compare_topology_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + + elif expected_type == "topology_await aclosed_event": + if not isinstance(actual, monitoring.TopologyClosedEvent): + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) + + else: + return False, f"Incorrect event: expected {expected_type}, actual {actual}" + + return True, "" + + +def compare_multiple_events(i, expected_results, actual_results): + events_in_a_row = [] + j = i + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): + events_in_a_row.append(actual_results[j]) + j += 1 + message = "" + for event in events_in_a_row: + for k in range(i, j): + passed, message = compare_events(expected_results[k], event) + if passed: + expected_results[k] = None + break + else: + return i, False, message + return j, True, "" + + +class TestAllScenarios(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.all_listener = ServerAndTopologyEventListener() + + +def create_test(scenario_def): + async def run_scenario(self): + with client_knobs(events_queue_frequency=0.05, min_heartbeat_interval=0.05): + await _run_scenario(self) + + async def _run_scenario(self): + class NoopMonitor(Monitor): + """Override the _run method to do nothing.""" + + async def _run(self): + await asyncio.sleep(0.05) + + m = AsyncMongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) + topology = await m._get_topology() + + try: + for phase in scenario_def["phases"]: + for source, response in phase.get("responses", []): + source_address = clean_node(source) + await topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) + + expected_results = phase["outcome"]["events"] + expected_len = len(expected_results) + await async_wait_until( + lambda: len(self.all_listener.results) >= expected_len, + "publish all events", + timeout=15, + ) + + # Wait some time to catch possible lagging extra events. + await async_wait_until(lambda: topology._events.empty(), "publish lagging events") + + i = 0 + while i < expected_len: + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) + # The order of ServerOpening/ClosedEvents doesn't matter + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): + i, passed, message = compare_multiple_events( + i, expected_results, self.all_listener.results + ) + self.assertTrue(passed, message) + else: + self.assertTrue(*compare_events(expected_results[i], result)) + i += 1 + + # Assert no extra events. + extra_events = self.all_listener.results[expected_len:] + if extra_events: + self.fail(f"Extra events {extra_events!r}") + + self.all_listener.reset() + finally: + await m.close() + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream, object_hook=object_hook) + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{os.path.splitext(filename)[0]}" + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestSdamMonitoring(AsyncIntegrationTest): + knobs: client_knobs + listener: ServerAndTopologyEventListener + test_client: AsyncMongoClient + coll: AsyncCollection + + @classmethod + def setUpClass(cls): + # Speed up the tests by decreasing the event publish frequency. + cls.knobs = client_knobs( + events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 + ) + cls.knobs.enable() + cls.listener = ServerAndTopologyEventListener() + + @classmethod + def tearDownClass(cls): + cls.knobs.disable() + + @async_client_context.require_failCommand_fail_point + async def asyncSetUp(self): + await super().asyncSetUp() + + retry_writes = async_client_context.supports_transactions() + self.test_client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=retry_writes + ) + self.coll = self.test_client[self.client.db.name].test + await self.coll.insert_one({}) + self.listener.reset() + + async def asyncTearDown(self): + await super().asyncTearDown() + + async def _test_app_error(self, fail_command_opts, expected_error): + address = await self.test_client.address + + # Test that an application error causes a ServerDescriptionChangedEvent + # to be published. + data = {"failCommands": ["insert"]} + data.update(fail_command_opts) + fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, + } + async with self.fail_point(fail_insert): + if self.test_client.options.retry_writes: + await self.coll.insert_one({}) + else: + with self.assertRaises(expected_error): + await self.coll.insert_one({}) + await self.coll.insert_one({}) + + def marked_unknown(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.new_description.is_server_type_known + ) + + def discovered_node(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown_and_rediscovered(): + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) + + # Topology events are not published synchronously + await async_wait_until(marked_unknown_and_rediscovered, "rediscover node") + + # Expect a single ServerDescriptionChangedEvent for the network error. + marked_unknown_events = self.listener.matching(marked_unknown) + self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) + + async def test_network_error_publishes_events(self): + await self._test_app_error({"closeConnection": True}, ConnectionFailure) + + # In 4.4+, not primary errors from failCommand don't cause SDAM state + # changes because topologyVersion is not incremented. + @async_client_context.require_version_max(4, 3) + async def test_not_primary_error_publishes_events(self): + await self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + async def test_shutdown_error_publishes_events(self): + await self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 6b808b159d..6a53c062cc 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -15,10 +15,12 @@ """Run the sdam monitoring spec tests.""" from __future__ import annotations +import asyncio import json import os import sys import time +from pathlib import Path sys.path[0:0] = [""] @@ -39,8 +41,13 @@ from pymongo.synchronous.monitor import Monitor from pymongo.topology_description import TOPOLOGY_TYPE +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sdam_monitoring") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sdam_monitoring") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sdam_monitoring") def compare_server_descriptions(expected, actual): @@ -247,7 +254,7 @@ def _run(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json.load(scenario_stream, object_hook=object_hook) @@ -268,31 +275,33 @@ class TestSdamMonitoring(IntegrationTest): coll: Collection @classmethod - @client_context.require_failCommand_fail_point def setUpClass(cls): - super().setUp(cls) # Speed up the tests by decreasing the event publish frequency. cls.knobs = client_knobs( events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 ) cls.knobs.enable() cls.listener = ServerAndTopologyEventListener() - retry_writes = client_context.supports_transactions() - cls.test_client = cls.unmanaged_rs_or_single_client( - event_listeners=[cls.listener], retryWrites=retry_writes - ) - cls.coll = cls.test_client[cls.client.db.name].test - cls.coll.insert_one({}) @classmethod def tearDownClass(cls): - cls.test_client.close() cls.knobs.disable() - super().tearDownClass() + @client_context.require_failCommand_fail_point def setUp(self): + super().setUp() + + retry_writes = client_context.supports_transactions() + self.test_client = self.rs_or_single_client( + event_listeners=[self.listener], retryWrites=retry_writes + ) + self.coll = self.test_client[self.client.db.name].test + self.coll.insert_one({}) self.listener.reset() + def tearDown(self): + super().tearDown() + def _test_app_error(self, fail_command_opts, expected_error): address = self.test_client.address @@ -334,7 +343,7 @@ def marked_unknown_and_rediscovered(): and len(self.listener.matching(discovered_node)) >= 1 ) - # Topology events are published asynchronously + # Topology events are not published synchronously wait_until(marked_unknown_and_rediscovered, "rediscover node") # Expect a single ServerDescriptionChangedEvent for the network error. diff --git a/tools/synchro.py b/tools/synchro.py index fe38b4dcfe..10030a275d 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -231,6 +231,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes.py", "test_retryable_writes_unified.py", "test_run_command.py", + "test_sdam_monitoring_spec.py", "test_server_selection_logging.py", "test_session.py", "test_server_selection_rtt.py", From a1a21099edee3e22b0fcf864687a4a0c1ee54a15 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 6 Feb 2025 10:29:16 -0800 Subject: [PATCH 1730/2111] PYTHON-5089 Convert test.test_mongos_load_balancing to async (#2107) Co-authored-by: Noah Stapp --- .../test_mongos_load_balancing.py | 199 ++++++++++++++++++ test/test_mongos_load_balancing.py | 40 ++-- tools/synchro.py | 1 + 3 files changed, 223 insertions(+), 17 deletions(-) create mode 100644 test/asynchronous/test_mongos_load_balancing.py diff --git a/test/asynchronous/test_mongos_load_balancing.py b/test/asynchronous/test_mongos_load_balancing.py new file mode 100644 index 0000000000..0bc6a405f4 --- /dev/null +++ b/test/asynchronous/test_mongos_load_balancing.py @@ -0,0 +1,199 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test AsyncMongoClient's mongos load balancing using a mock.""" +from __future__ import annotations + +import asyncio +import sys +import threading +from test.asynchronous.helpers import ConcurrentRunner + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncMockClientTest, async_client_context, connected, unittest +from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.utils import async_wait_until + +from pymongo.errors import AutoReconnect, InvalidOperation +from pymongo.server_selectors import writable_server_selector +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + + +class SimpleOp(ConcurrentRunner): + def __init__(self, client): + super().__init__() + self.client = client + self.passed = False + + async def run(self): + await self.client.db.command("ping") + self.passed = True # No exception raised. + + +async def do_simple_op(client, ntasks): + tasks = [SimpleOp(client) for _ in range(ntasks)] + for t in tasks: + await t.start() + + for t in tasks: + await t.join() + + for t in tasks: + assert t.passed + + +async def writable_addresses(topology): + return { + server.description.address + for server in await topology.select_servers(writable_server_selector, _Op.TEST) + } + + +class TestMongosLoadBalancing(AsyncMockClientTest): + @async_client_context.require_connection + @async_client_context.require_no_load_balancer + async def asyncSetUp(self): + await super().asyncSetUp() + + def mock_client(self, **kwargs): + mock_client = AsyncMockClient( + standalones=[], + members=[], + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", + connect=False, + **kwargs, + ) + self.addAsyncCleanup(mock_client.aclose) + + # Latencies in seconds. + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 + return mock_client + + async def test_lazy_connect(self): + # While connected() ensures we can trigger connection from the main + # thread and wait for the monitors, this test triggers connection from + # several threads at once to check for data races. + nthreads = 10 + client = self.mock_client() + self.assertEqual(0, len(client.nodes)) + + # Trigger initial connection. + await do_simple_op(client, nthreads) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + async def test_failover(self): + ntasks = 10 + client = await connected(self.mock_client(localThresholdMS=0.001)) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Our chosen mongos goes down. + client.kill_host("a:1") + + # Trigger failover to higher-latency nodes. AutoReconnect should be + # raised at most once in each thread. + passed = [] + + async def f(): + try: + await client.db.command("ping") + except AutoReconnect: + # Second attempt succeeds. + await client.db.command("ping") + + passed.append(True) + + tasks = [ConcurrentRunner(target=f) for _ in range(ntasks)] + for t in tasks: + await t.start() + + for t in tasks: + await t.join() + + self.assertEqual(ntasks, len(passed)) + + # Down host removed from list. + self.assertEqual(2, len(client.nodes)) + + async def test_local_threshold(self): + client = await connected(self.mock_client(localThresholdMS=30)) + self.assertEqual(30, client.options.local_threshold_ms) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + topology = client._topology + + # All are within a 30-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, await writable_addresses(topology)) + + # No error + await client.admin.command("ping") + + client = await connected(self.mock_client(localThresholdMS=0)) + self.assertEqual(0, client.options.local_threshold_ms) + # No error + await client.db.command("ping") + # Our chosen mongos goes down. + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) + try: + await client.db.command("ping") + except: + pass + + # We eventually connect to a new mongos. + async def connect_to_new_mongos(): + try: + return await client.db.command("ping") + except AutoReconnect: + pass + + await async_wait_until(connect_to_new_mongos, "connect to a new mongos") + + async def test_load_balancing(self): + # Although the server selection JSON tests already prove that + # select_servers works for sharded topologies, here we do an end-to-end + # test of discovering servers' round trip times and configuring + # localThresholdMS. + client = await connected(self.mock_client()) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Prohibited for topology type Sharded. + with self.assertRaises(InvalidOperation): + await client.address + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) + + # a and b are within the 15-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2)}, await writable_addresses(topology)) + + client.mock_rtts["a:1"] = 0.045 + + # Discover only b is within latency window. + async def predicate(): + return {("b", 2)} == await writable_addresses(topology) + + await async_wait_until( + predicate, + 'discover server "a" is too far', + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index 7bc8225465..ca2f3cfd1e 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -15,8 +15,10 @@ """Test MongoClient's mongos load balancing using a mock.""" from __future__ import annotations +import asyncio import sys import threading +from test.helpers import ConcurrentRunner from pymongo.operations import _Op @@ -30,14 +32,10 @@ from pymongo.server_selectors import writable_server_selector from pymongo.topology_description import TOPOLOGY_TYPE +_IS_SYNC = True -@client_context.require_connection -@client_context.require_no_load_balancer -def setUpModule(): - pass - -class SimpleOp(threading.Thread): +class SimpleOp(ConcurrentRunner): def __init__(self, client): super().__init__() self.client = client @@ -48,15 +46,15 @@ def run(self): self.passed = True # No exception raised. -def do_simple_op(client, nthreads): - threads = [SimpleOp(client) for _ in range(nthreads)] - for t in threads: +def do_simple_op(client, ntasks): + tasks = [SimpleOp(client) for _ in range(ntasks)] + for t in tasks: t.start() - for t in threads: + for t in tasks: t.join() - for t in threads: + for t in tasks: assert t.passed @@ -68,6 +66,11 @@ def writable_addresses(topology): class TestMongosLoadBalancing(MockClientTest): + @client_context.require_connection + @client_context.require_no_load_balancer + def setUp(self): + super().setUp() + def mock_client(self, **kwargs): mock_client = MockClient( standalones=[], @@ -98,7 +101,7 @@ def test_lazy_connect(self): wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") def test_failover(self): - nthreads = 10 + ntasks = 10 client = connected(self.mock_client(localThresholdMS=0.001)) wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") @@ -118,14 +121,14 @@ def f(): passed.append(True) - threads = [threading.Thread(target=f) for _ in range(nthreads)] - for t in threads: + tasks = [ConcurrentRunner(target=f) for _ in range(ntasks)] + for t in tasks: t.start() - for t in threads: + for t in tasks: t.join() - self.assertEqual(nthreads, len(passed)) + self.assertEqual(ntasks, len(passed)) # Down host removed from list. self.assertEqual(2, len(client.nodes)) @@ -183,8 +186,11 @@ def test_load_balancing(self): client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. + def predicate(): + return {("b", 2)} == writable_addresses(topology) + wait_until( - lambda: {("b", 2)} == writable_addresses(topology), + predicate, 'discover server "a" is too far', ) diff --git a/tools/synchro.py b/tools/synchro.py index 10030a275d..7e7aeec3a4 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -221,6 +221,7 @@ def async_only_test(f: str) -> bool: "test_logger.py", "test_max_staleness.py", "test_monitoring.py", + "test_mongos_load_balancing.py", "test_on_demand_csfle.py", "test_raw_bson.py", "test_read_concern.py", From 041edbecba28b793e194479688a750e23c9317c2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 6 Feb 2025 13:31:55 -0600 Subject: [PATCH 1731/2111] PYTHON-5058 Restore alternate architecture builds (#2133) --- .github/workflows/dist.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 5100c70d43..3dee8f581c 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -35,10 +35,9 @@ jobs: # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - [ubuntu-20.04, "manylinux_x86_64", "cp3*-manylinux_x86_64"] - - [ubuntu-24.04-arm, "manylinux_aarch64", "cp3*-manylinux_aarch64"] - # Disabled pending PYTHON-5058 - # - [ubuntu-24.04, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] - # - [ubuntu-24.04, "manylinux_s390x", "cp3*-manylinux_s390x"] + - [ubuntu-20.04, "manylinux_aarch64", "cp3*-manylinux_aarch64"] + - [ubuntu-20.04, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] + - [ubuntu-20.04, "manylinux_s390x", "cp3*-manylinux_s390x"] - [ubuntu-20.04, "manylinux_i686", "cp3*-manylinux_i686"] - [windows-2019, "win_amd6", "cp3*-win_amd64"] - [windows-2019, "win32", "cp3*-win32"] @@ -63,6 +62,10 @@ jobs: if: runner.os == 'Linux' uses: docker/setup-qemu-action@v3 with: + # setup-qemu-action by default uses `tonistiigi/binfmt:latest` image, + # which is out of date. This causes seg faults during build. + # Here we manually fix the version. + image: tonistiigi/binfmt:qemu-v8.1.5 platforms: all - name: Install cibuildwheel From a641337b5c98087884c0197628c772ced0f59965 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 6 Feb 2025 13:59:26 -0600 Subject: [PATCH 1732/2111] PYTHON-5047 Do not run nightly release check on forks (#2134) --- .github/workflows/release-python.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 45157bfc2b..d8c900e77b 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -37,6 +37,7 @@ jobs: pre-publish: environment: release runs-on: ubuntu-latest + if: github.repository_owner == 'mongodb' || github.event_name == 'workflow_dispatch' permissions: id-token: write contents: write From 0fe1691b748a358c38045b8ec9134ba61c0fcaf3 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 10 Feb 2025 10:05:56 -0500 Subject: [PATCH 1733/2111] PYTHON-5118 - Improve contributing documentation of synchro (#2139) --- CONTRIBUTING.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 536110fcfc..c3aa420fa4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -261,6 +261,11 @@ To prevent the `synchro` hook from accidentally overwriting code, it first check of a file is changing and not its async counterpart, and will fail. In the unlikely scenario that you want to override this behavior, first export `OVERRIDE_SYNCHRO_CHECK=1`. +Sometimes, the `synchro` hook will fail and introduce changes many previously unmodified files. This is due to static +Python errors, such as missing imports, incorrect syntax, or other fatal typos. To resolve these issues, +run `pre-commit run --all-files --hook-stage manual ruff` and fix all reported errors before running the `synchro` +hook again. + ## Converting a test to async The `tools/convert_test_to_async.py` script takes in an existing synchronous test file and outputs a partially-converted asynchronous version of the same name to the `test/asynchronous` directory. From b9228684a42b4df2ed3318b8dfab47d9a829c6b9 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 10 Feb 2025 11:02:44 -0500 Subject: [PATCH 1734/2111] PYTHON-5116 - Add MongoDB 4.2 back to our tested versions (#2140) --- .evergreen/generated_configs/tasks.yml | 540 ++++++++++++++++++++++ .evergreen/generated_configs/variants.yml | 56 ++- .evergreen/scripts/generate_config.py | 2 +- uv.lock | 2 +- 4 files changed, 577 insertions(+), 23 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index c666c6901a..37b7a622d5 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -227,6 +227,186 @@ tasks: - noauth - nossl - sync_async + - name: test-4.2-standalone-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - standalone + - auth + - ssl + - sync + - name: test-4.2-standalone-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - standalone + - auth + - ssl + - async + - name: test-4.2-standalone-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - standalone + - auth + - ssl + - sync_async + - name: test-4.2-standalone-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - standalone + - noauth + - ssl + - sync + - name: test-4.2-standalone-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - standalone + - noauth + - ssl + - async + - name: test-4.2-standalone-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - standalone + - noauth + - ssl + - sync_async + - name: test-4.2-standalone-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - standalone + - noauth + - nossl + - sync + - name: test-4.2-standalone-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - standalone + - noauth + - nossl + - async + - name: test-4.2-standalone-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: server + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - standalone + - noauth + - nossl + - sync_async - name: test-4.4-standalone-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -1667,6 +1847,186 @@ tasks: - noauth - nossl - sync_async + - name: test-4.2-replica_set-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - replica_set + - auth + - ssl + - sync + - name: test-4.2-replica_set-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - replica_set + - auth + - ssl + - async + - name: test-4.2-replica_set-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - replica_set + - auth + - ssl + - sync_async + - name: test-4.2-replica_set-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - replica_set + - noauth + - ssl + - sync + - name: test-4.2-replica_set-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - replica_set + - noauth + - ssl + - async + - name: test-4.2-replica_set-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - replica_set + - noauth + - ssl + - sync_async + - name: test-4.2-replica_set-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - replica_set + - noauth + - nossl + - sync + - name: test-4.2-replica_set-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - replica_set + - noauth + - nossl + - async + - name: test-4.2-replica_set-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: replica_set + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - replica_set + - noauth + - nossl + - sync_async - name: test-4.4-replica_set-auth-ssl-sync commands: - func: bootstrap mongo-orchestration @@ -3107,6 +3467,186 @@ tasks: - noauth - nossl - sync_async + - name: test-4.2-sharded_cluster-auth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - sharded_cluster + - auth + - ssl + - sync + - name: test-4.2-sharded_cluster-auth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - sharded_cluster + - auth + - ssl + - async + - name: test-4.2-sharded_cluster-auth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + - func: run tests + vars: + AUTH: auth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - sharded_cluster + - auth + - ssl + - sync_async + - name: test-4.2-sharded_cluster-noauth-ssl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - sharded_cluster + - noauth + - ssl + - sync + - name: test-4.2-sharded_cluster-noauth-ssl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - sharded_cluster + - noauth + - ssl + - async + - name: test-4.2-sharded_cluster-noauth-ssl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + - func: run tests + vars: + AUTH: noauth + SSL: ssl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - sharded_cluster + - noauth + - ssl + - sync_async + - name: test-4.2-sharded_cluster-noauth-nossl-sync + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync + TEST_SUITES: default + tags: + - "4.2" + - sharded_cluster + - noauth + - nossl + - sync + - name: test-4.2-sharded_cluster-noauth-nossl-async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: async + TEST_SUITES: default_async + tags: + - "4.2" + - sharded_cluster + - noauth + - nossl + - async + - name: test-4.2-sharded_cluster-noauth-nossl-sync_async + commands: + - func: bootstrap mongo-orchestration + vars: + VERSION: "4.2" + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + - func: run tests + vars: + AUTH: noauth + SSL: nossl + SYNC: sync_async + TEST_SUITES: "" + tags: + - "4.2" + - sharded_cluster + - noauth + - nossl + - sync_async - name: test-4.4-sharded_cluster-auth-ssl-sync commands: - func: bootstrap mongo-orchestration diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 79c9b22c93..8f5d833e90 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -817,10 +817,10 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Ocsp tests - - name: ocsp-rhel8-v4.4-python3.9 + - name: ocsp-rhel8-v4.2-python3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 v4.4 Python3.9 + display_name: OCSP RHEL8 v4.2 Python3.9 run_on: - rhel87-small batchtime: 20160 @@ -828,12 +828,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: "4.4" + VERSION: "4.2" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: ocsp-rhel8-v5.0-python3.10 + - name: ocsp-rhel8-v4.4-python3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 v5.0 Python3.10 + display_name: OCSP RHEL8 v4.4 Python3.10 run_on: - rhel87-small batchtime: 20160 @@ -841,12 +841,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: "5.0" + VERSION: "4.4" PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: ocsp-rhel8-v6.0-python3.11 + - name: ocsp-rhel8-v5.0-python3.11 tasks: - name: .ocsp - display_name: OCSP RHEL8 v6.0 Python3.11 + display_name: OCSP RHEL8 v5.0 Python3.11 run_on: - rhel87-small batchtime: 20160 @@ -854,12 +854,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: "6.0" + VERSION: "5.0" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: ocsp-rhel8-v7.0-python3.12 + - name: ocsp-rhel8-v6.0-python3.12 tasks: - name: .ocsp - display_name: OCSP RHEL8 v7.0 Python3.12 + display_name: OCSP RHEL8 v6.0 Python3.12 run_on: - rhel87-small batchtime: 20160 @@ -867,12 +867,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: "7.0" + VERSION: "6.0" PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: ocsp-rhel8-v8.0-python3.13 + - name: ocsp-rhel8-v7.0-python3.13 tasks: - name: .ocsp - display_name: OCSP RHEL8 v8.0 Python3.13 + display_name: OCSP RHEL8 v7.0 Python3.13 run_on: - rhel87-small batchtime: 20160 @@ -880,12 +880,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: "8.0" + VERSION: "7.0" PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: ocsp-rhel8-rapid-pypy3.10 + - name: ocsp-rhel8-v8.0-pypy3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 rapid PyPy3.10 + display_name: OCSP RHEL8 v8.0 PyPy3.10 run_on: - rhel87-small batchtime: 20160 @@ -893,12 +893,12 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: rapid + VERSION: "8.0" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: ocsp-rhel8-latest-python3.9 + - name: ocsp-rhel8-rapid-python3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 latest Python3.9 + display_name: OCSP RHEL8 rapid Python3.9 run_on: - rhel87-small batchtime: 20160 @@ -906,8 +906,21 @@ buildvariants: AUTH: noauth SSL: ssl TOPOLOGY: server - VERSION: latest + VERSION: rapid PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: ocsp-rhel8-latest-python3.10 + tasks: + - name: .ocsp + display_name: OCSP RHEL8 latest Python3.10 + run_on: + - rhel87-small + batchtime: 20160 + expansions: + AUTH: noauth + SSL: ssl + TOPOLOGY: server + VERSION: latest + PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: ocsp-win64-v4.4-python3.9 tasks: - name: .ocsp-rsa !.ocsp-staple @@ -1338,6 +1351,7 @@ buildvariants: - name: storage-inmemory-rhel8-python3.9 tasks: - name: .standalone .noauth .nossl .4.0 .sync_async + - name: .standalone .noauth .nossl .4.2 .sync_async - name: .standalone .noauth .nossl .4.4 .sync_async - name: .standalone .noauth .nossl .5.0 .sync_async - name: .standalone .noauth .nossl .6.0 .sync_async diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e9624ab109..41b1266a70 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -26,7 +26,7 @@ # Globals ############## -ALL_VERSIONS = ["4.0", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +ALL_VERSIONS = ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS diff --git a/uv.lock b/uv.lock index e7f09f66fc..a2e951e76c 100644 --- a/uv.lock +++ b/uv.lock @@ -997,7 +997,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5 [[package]] name = "pymongo" -version = "4.11.0.dev0" +version = "4.12.0.dev0" source = { editable = "." } dependencies = [ { name = "dnspython" }, From c6ffa1e95115dea5a015c75d84d8279359c3b15a Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 10 Feb 2025 13:29:11 -0500 Subject: [PATCH 1735/2111] PYTHON-5129 - Fix async transaction docstrings (#2138) --- pymongo/asynchronous/client_session.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index 4c5171a350..4f354001c2 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -21,7 +21,7 @@ .. code-block:: python - with client.start_session(causal_consistency=True) as session: + async with client.start_session(causal_consistency=True) as session: collection = client.db.collection await collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) @@ -53,8 +53,8 @@ orders = client.db.orders inventory = client.db.inventory - with client.start_session() as session: - async with session.start_transaction(): + async with client.start_session() as session: + async with await session.start_transaction(): await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) await inventory.update_one( {"sku": "abc123", "qty": {"$gte": 100}}, @@ -62,7 +62,7 @@ session=session, ) -Upon normal completion of ``async with session.start_transaction()`` block, the +Upon normal completion of ``async with await session.start_transaction()`` block, the transaction automatically calls :meth:`AsyncClientSession.commit_transaction`. If the block exits with an exception, the transaction automatically calls :meth:`AsyncClientSession.abort_transaction`. @@ -113,7 +113,7 @@ .. code-block:: python # Each read using this session reads data from the same point in time. - with client.start_session(snapshot=True) as session: + async with client.start_session(snapshot=True) as session: order = await orders.find_one({"sku": "abc123"}, session=session) inventory = await inventory.find_one({"sku": "abc123"}, session=session) @@ -619,7 +619,7 @@ async def callback(session): await inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, {"$inc": {"qty": -100}}, session=session) - with client.start_session() as session: + async with client.start_session() as session: await session.with_transaction(callback) To pass arbitrary arguments to the ``callback``, wrap your callable @@ -628,7 +628,7 @@ async def callback(session): async def callback(session, custom_arg, custom_kwarg=None): # Transaction operations... - with client.start_session() as session: + async with client.start_session() as session: await session.with_transaction( lambda s: callback(s, "custom_arg", custom_kwarg=1)) From 7a7ffa615d4d49742a18589bf196a7bae1a58161 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Mon, 10 Feb 2025 15:00:30 -0500 Subject: [PATCH 1736/2111] PYTHON-5111 Update datetime_conversion in docstrings of MongoClients (#2135) --- CONTRIBUTING.md | 2 +- pymongo/asynchronous/mongo_client.py | 7 +++---- pymongo/synchronous/mongo_client.py | 7 +++---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3aa420fa4..f67077e57d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -178,7 +178,7 @@ documentation including narrative docs, and the [Sphinx docstring format](https: You can build the documentation locally by running: ```bash -just docs-build +just docs ``` When updating docs, it can be helpful to run the live docs server as: diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 365fc62100..d0be6ee21e 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -276,7 +276,9 @@ def __init__( :param type_registry: instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. - :param datetime_conversion: Specifies how UTC datetimes should be decoded + :param kwargs: **Additional optional parameters available as keyword arguments:** + + - `datetime_conversion` (optional): Specifies how UTC datetimes should be decoded within BSON. Valid options include 'datetime_ms' to return as a DatetimeMS, 'datetime' to return as a datetime.datetime and raising a ValueError for out-of-range values, 'datetime_auto' to @@ -284,9 +286,6 @@ def __init__( out-of-range and 'datetime_clamp' to clamp to the minimum and maximum possible datetimes. Defaults to 'datetime'. See :ref:`handling-out-of-range-datetimes` for details. - - | **Other optional parameters can be passed as keyword arguments:** - - `directConnection` (optional): if ``True``, forces this client to connect directly to the specified MongoDB host as a standalone. If ``false``, the client connects to the entire replica set of diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 8cd08ab725..3ed5a49ac0 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -274,7 +274,9 @@ def __init__( :param type_registry: instance of :class:`~bson.codec_options.TypeRegistry` to enable encoding and decoding of custom types. - :param datetime_conversion: Specifies how UTC datetimes should be decoded + :param kwargs: **Additional optional parameters available as keyword arguments:** + + - `datetime_conversion` (optional): Specifies how UTC datetimes should be decoded within BSON. Valid options include 'datetime_ms' to return as a DatetimeMS, 'datetime' to return as a datetime.datetime and raising a ValueError for out-of-range values, 'datetime_auto' to @@ -282,9 +284,6 @@ def __init__( out-of-range and 'datetime_clamp' to clamp to the minimum and maximum possible datetimes. Defaults to 'datetime'. See :ref:`handling-out-of-range-datetimes` for details. - - | **Other optional parameters can be passed as keyword arguments:** - - `directConnection` (optional): if ``True``, forces this client to connect directly to the specified MongoDB host as a standalone. If ``false``, the client connects to the entire replica set of From b94dd8e12b13d6ad5ab88bd9677784eab95837ed Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 10 Feb 2025 16:50:40 -0500 Subject: [PATCH 1737/2111] PYTHON-4745 - Test behavior of async task cancellation (#2136) --- pymongo/asynchronous/change_stream.py | 3 +- pymongo/asynchronous/client_session.py | 3 +- pymongo/asynchronous/cursor.py | 3 +- pymongo/asynchronous/pool.py | 8 +- pymongo/periodic_executor.py | 2 + pymongo/synchronous/change_stream.py | 3 +- pymongo/synchronous/client_session.py | 3 +- pymongo/synchronous/cursor.py | 3 +- pymongo/synchronous/pool.py | 8 +- test/asynchronous/test_async_cancellation.py | 126 +++++++++++++++++++ tools/synchro.py | 2 +- 11 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 test/asynchronous/test_async_cancellation.py diff --git a/pymongo/asynchronous/change_stream.py b/pymongo/asynchronous/change_stream.py index 719020c409..f405e91161 100644 --- a/pymongo/asynchronous/change_stream.py +++ b/pymongo/asynchronous/change_stream.py @@ -391,7 +391,8 @@ async def try_next(self) -> Optional[_DocumentType]: if not _resumable(exc) and not exc.timeout: await self.close() raise - except Exception: + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: await self.close() raise diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index 4f354001c2..e9548b0ec4 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -697,7 +697,8 @@ async def callback(session, custom_arg, custom_kwarg=None): ) try: ret = await callback(self) - except Exception as exc: + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as exc: if self.in_transaction: await self.abort_transaction() if ( diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 9101197ce2..1b25bf4ee8 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -1126,7 +1126,8 @@ async def _send_message(self, operation: Union[_Query, _GetMore]) -> None: self._killed = True await self.close() raise - except Exception: + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: await self.close() raise diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index bf2f2b4946..39b3bfc042 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -559,7 +559,7 @@ async def command( ) except (OperationFailure, NotPrimaryError): raise - # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. + # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. except BaseException as error: self._raise_connection_failure(error) @@ -576,6 +576,7 @@ async def send_message(self, message: bytes, max_doc_size: int) -> None: try: await async_sendall(self.conn, message) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: self._raise_connection_failure(error) @@ -586,6 +587,7 @@ async def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _O """ try: return await receive_message(self, request_id, self.max_message_size) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: self._raise_connection_failure(error) @@ -1269,6 +1271,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A try: sock = await _configured_socket(self.address, self.opts) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: async with self.lock: self.active_contexts.discard(tmp_context) @@ -1308,6 +1311,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A handler.contribute_socket(conn, completed_handshake=False) await conn.authenticate() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: async with self.lock: self.active_contexts.discard(conn.cancel_context) @@ -1369,6 +1373,7 @@ async def checkout( async with self.lock: self.active_contexts.add(conn.cancel_context) yield conn + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: # Exception in caller. Ensure the connection gets returned. # Note that when pinned is True, the session owns the @@ -1515,6 +1520,7 @@ async def _get_conn( async with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: if conn: # We checked out a socket but authentication failed. diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index f51a988728..5f54b243ec 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -100,6 +100,7 @@ async def _run(self) -> None: if not await self._target(): self._stopped = True break + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: self._stopped = True raise @@ -232,6 +233,7 @@ def _run(self) -> None: if not self._target(): self._stopped = True break + # Catch KeyboardInterrupt, etc. and cleanup. except BaseException: with self._lock: self._stopped = True diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py index a971ad08c0..43aab39ee1 100644 --- a/pymongo/synchronous/change_stream.py +++ b/pymongo/synchronous/change_stream.py @@ -389,7 +389,8 @@ def try_next(self) -> Optional[_DocumentType]: if not _resumable(exc) and not exc.timeout: self.close() raise - except Exception: + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: self.close() raise diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index 298dd7b357..af7ff59b3d 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -694,7 +694,8 @@ def callback(session, custom_arg, custom_kwarg=None): self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) try: ret = callback(self) - except Exception as exc: + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as exc: if self.in_transaction: self.abort_transaction() if ( diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index cda093ee19..31c4604f89 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -1124,7 +1124,8 @@ def _send_message(self, operation: Union[_Query, _GetMore]) -> None: self._killed = True self.close() raise - except Exception: + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: self.close() raise diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 05f930d480..7c55e04b22 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -559,7 +559,7 @@ def command( ) except (OperationFailure, NotPrimaryError): raise - # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. + # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. except BaseException as error: self._raise_connection_failure(error) @@ -576,6 +576,7 @@ def send_message(self, message: bytes, max_doc_size: int) -> None: try: sendall(self.conn, message) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: self._raise_connection_failure(error) @@ -586,6 +587,7 @@ def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: """ try: return receive_message(self, request_id, self.max_message_size) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: self._raise_connection_failure(error) @@ -1263,6 +1265,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect try: sock = _configured_socket(self.address, self.opts) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: with self.lock: self.active_contexts.discard(tmp_context) @@ -1302,6 +1305,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect handler.contribute_socket(conn, completed_handshake=False) conn.authenticate() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: with self.lock: self.active_contexts.discard(conn.cancel_context) @@ -1363,6 +1367,7 @@ def checkout( with self.lock: self.active_contexts.add(conn.cancel_context) yield conn + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: # Exception in caller. Ensure the connection gets returned. # Note that when pinned is True, the session owns the @@ -1509,6 +1514,7 @@ def _get_conn( with self._max_connecting_cond: self._pending -= 1 self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException: if conn: # We checked out a socket but authentication failed. diff --git a/test/asynchronous/test_async_cancellation.py b/test/asynchronous/test_async_cancellation.py new file mode 100644 index 0000000000..b73c7a8084 --- /dev/null +++ b/test/asynchronous/test_async_cancellation.py @@ -0,0 +1,126 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that async cancellation performed by users clean up resources correctly.""" +from __future__ import annotations + +import asyncio +import sys +from test.utils import async_get_pool, delay, one + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected + + +class TestAsyncCancellation(AsyncIntegrationTest): + async def test_async_cancellation_closes_connection(self): + pool = await async_get_pool(self.client) + await self.client.db.test.insert_one({"x": 1}) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + conn = one(pool.conns) + + async def task(): + await self.client.db.test.find_one({"$where": delay(0.2)}) + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(conn.closed) + + @async_client_context.require_transactions + async def test_async_cancellation_aborts_transaction(self): + await self.client.db.test.insert_one({"x": 1}) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + session = self.client.start_session() + + async def callback(session): + await self.client.db.test.find_one({"$where": delay(0.2)}, session=session) + + async def task(): + await session.with_transaction(callback) + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertFalse(session.in_transaction) + + @async_client_context.require_failCommand_blockConnection + async def test_async_cancellation_closes_cursor(self): + await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + cursor = self.client.db.test.find({}, batch_size=1) + await cursor.next() + + # Make sure getMore commands block + fail_command = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 200}, + } + + async def task(): + async with self.fail_point(fail_command): + await cursor.next() + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(cursor._killed) + + @async_client_context.require_change_streams + @async_client_context.require_failCommand_blockConnection + async def test_async_cancellation_closes_change_stream(self): + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + change_stream = await self.client.db.test.watch(batch_size=2) + + # Make sure getMore commands block + fail_command = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 200}, + } + + async def task(): + async with self.fail_point(fail_command): + await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + await change_stream.next() + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(change_stream._closed) diff --git a/tools/synchro.py b/tools/synchro.py index 7e7aeec3a4..77fdcce5ae 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -169,7 +169,7 @@ def async_only_test(f: str) -> bool: """Return True for async tests that should not be converted to sync.""" - return f in ["test_locks.py", "test_concurrency.py"] + return f in ["test_locks.py", "test_concurrency.py", "test_async_cancellation.py"] test_files = [ From 1a7239c5ac84637bdf8d71d864c8d36dbb378dca Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 11 Feb 2025 11:45:23 -0500 Subject: [PATCH 1738/2111] PYTHON-4745 - Update Async Cancellation documentation (#2141) --- doc/async-tutorial.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/async-tutorial.rst b/doc/async-tutorial.rst index 2ccf011d8e..7a3a987111 100644 --- a/doc/async-tutorial.rst +++ b/doc/async-tutorial.rst @@ -420,3 +420,10 @@ the collection: DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } .. seealso:: The MongoDB documentation on `indexes `_ + +Task Cancellation +----------------- +`Cancelling `_ an asyncio Task +that is running a PyMongo operation is treated as a fatal interrupt. Any connections, cursors, and transactions +involved in a cancelled Task will be safely closed and cleaned up as part of the cancellation. If those resources are +also used elsewhere, attempting to utilize them after the cancellation will result in an error. From 13fa3614210b7cfc7fc12b28993b111aa1bb78b2 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 11 Feb 2025 10:11:41 -0800 Subject: [PATCH 1739/2111] PYTHON-5101 Convert test.test_server_selection_in_window to async (#2119) Co-authored-by: Noah Stapp --- test/asynchronous/test_encryption.py | 2 +- .../test_server_selection_in_window.py | 179 +++++++++++++++ test/asynchronous/utils_selection_tests.py | 203 ++++++++++++++++++ test/asynchronous/utils_spec_runner.py | 2 +- test/test_server_selection_in_window.py | 33 +-- test/utils_selection_tests.py | 86 +------- test/utils_selection_tests_shared.py | 100 +++++++++ tools/synchro.py | 2 + 8 files changed, 515 insertions(+), 92 deletions(-) create mode 100644 test/asynchronous/test_server_selection_in_window.py create mode 100644 test/asynchronous/utils_selection_tests.py create mode 100644 test/utils_selection_tests_shared.py diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 2b22bd8b76..335aa9d81c 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -739,7 +739,7 @@ def allowable_errors(self, op): return errors -async def create_test(scenario_def, test, name): +def create_test(scenario_def, test, name): @async_client_context.require_test_commands async def run_scenario(self): await self.run_scenario(scenario_def, test) diff --git a/test/asynchronous/test_server_selection_in_window.py b/test/asynchronous/test_server_selection_in_window.py new file mode 100644 index 0000000000..e2ae92a27c --- /dev/null +++ b/test/asynchronous/test_server_selection_in_window.py @@ -0,0 +1,179 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import asyncio +import os +import threading +from pathlib import Path +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils_selection_tests import create_topology +from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator +from test.utils import ( + CMAPListener, + OvertCommandListener, + async_get_pool, + async_wait_until, +) + +from pymongo.common import clean_node +from pymongo.monitoring import ConnectionReadyEvent +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection", "in_window") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "in_window" + ) + + +class TestAllScenarios(unittest.IsolatedAsyncioTestCase): + async def run_scenario(self, scenario_def): + topology = await create_topology(scenario_def) + + # Update mock operation_count state: + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) + server = topology.get_server_by_address(address) + server.pool.operation_count = mock["operation_count"] + + pref = ReadPreference.NEAREST + counts = {address: 0 for address in topology._description.server_descriptions()} + + # Number of times to repeat server selection + iterations = scenario_def["iterations"] + for _ in range(iterations): + server = await topology.select_server(pref, _Op.TEST, server_selection_timeout=0) + counts[server.description.address] += 1 + + # Verify expected_frequencies + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] + for host_str, freq in expected_frequencies.items(): + address = clean_node(host_str) + actual_freq = float(counts[address]) / iterations + if freq == 0: + # Should be exactly 0. + self.assertEqual(actual_freq, 0) + else: + # Should be within 'tolerance'. + self.assertAlmostEqual(actual_freq, freq, delta=tolerance) + + +def create_test(scenario_def, test, name): + async def run_scenario(self): + await self.run_scenario(scenario_def) + + return run_scenario + + +class CustomSpecTestCreator(AsyncSpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + Server selection in_window tests do not have a 'tests' field. + The whole file represents a single test case. + """ + return [scenario_def] + + +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() + + +class FinderTask(ConcurrentRunner): + def __init__(self, collection, iterations): + super().__init__() + self.daemon = True + self.collection = collection + self.iterations = iterations + self.passed = False + + async def run(self): + for _ in range(self.iterations): + await self.collection.find_one({}) + self.passed = True + + +class TestProse(AsyncIntegrationTest): + async def frequencies(self, client, listener, n_finds=10): + coll = client.test.test + N_TASKS = 10 + tasks = [FinderTask(coll, n_finds) for _ in range(N_TASKS)] + for task in tasks: + await task.start() + for task in tasks: + await task.join() + for task in tasks: + self.assertTrue(task.passed) + + events = listener.started_events + self.assertEqual(len(events), n_finds * N_TASKS) + nodes = client.nodes + self.assertEqual(len(nodes), 2) + freqs = {address: 0.0 for address in nodes} + for event in events: + freqs[event.connection_id] += 1 + for address in freqs: + freqs[address] = freqs[address] / float(len(events)) + return freqs + + @async_client_context.require_failCommand_appName + @async_client_context.require_multiple_mongoses + async def test_load_balancing(self): + listener = OvertCommandListener() + cmap_listener = CMAPListener() + # PYTHON-2584: Use a large localThresholdMS to avoid the impact of + # varying RTTs. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener, cmap_listener], + localThresholdMS=30000, + minPoolSize=10, + ) + await async_wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + # Wait for both pools to be populated. + await cmap_listener.async_wait_for_event(ConnectionReadyEvent, 20) + # Delay find commands on only one mongos. + delay_finds = { + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", + }, + } + async with self.fail_point(delay_finds): + nodes = async_client_context.client.nodes + self.assertEqual(len(nodes), 1) + delayed_server = next(iter(nodes)) + freqs = await self.frequencies(client, listener) + self.assertLessEqual(freqs[delayed_server], 0.25) + listener.reset() + freqs = await self.frequencies(client, listener, n_finds=150) + self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/utils_selection_tests.py b/test/asynchronous/utils_selection_tests.py new file mode 100644 index 0000000000..71e287569a --- /dev/null +++ b/test/asynchronous/utils_selection_tests.py @@ -0,0 +1,203 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys +from test.asynchronous import AsyncPyMongoTestCase + +sys.path[0:0] = [""] + +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils import AsyncMockPool, parse_read_preference +from test.utils_selection_tests_shared import ( + get_addresses, + get_topology_type_name, + make_server_description, +) + +from bson import json_util +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology +from pymongo.common import HEARTBEAT_FREQUENCY +from pymongo.errors import AutoReconnect, ConfigurationError +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = False + + +def get_topology_settings_dict(**kwargs): + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": AsyncMockPool, + } + settings.update(kwargs) + return settings + + +async def create_topology(scenario_def, **kwargs): + # Initialize topologies. + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 + else: + frequency = HEARTBEAT_FREQUENCY + + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + + topology_type = get_topology_type_name(scenario_def) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) + # Force topology description to ReplicaSet + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) + + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + topology = Topology(TopologySettings(**settings)) + await topology.open() + + # Update topologies with server descriptions. + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Assert that descriptions match + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name + + return topology + + +def create_test(scenario_def): + async def run_scenario(self): + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + top_latency = await create_topology(scenario_def) + + # "In latency window" is defined in the server selection + # spec as the subset of suitable_servers that falls within the + # allowable latency window. + top_suitable = await create_topology(scenario_def, local_threshold_ms=1000000) + + # Create server selector. + if scenario_def.get("operation") == "write": + pref = writable_server_selector + else: + # Make first letter lowercase to match read_pref's modes. + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): + with self.assertRaises((ConfigurationError, ValueError)): + # Error can be raised when making Read Pref or selecting. + pref = parse_read_preference(pref_def) + await top_latency.select_server(pref, _Op.TEST) + return + + pref = parse_read_preference(pref_def) + + # Select servers. + if not scenario_def.get("suitable_servers"): + with self.assertRaises(AutoReconnect): + await top_suitable.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + if not scenario_def["in_latency_window"]: + with self.assertRaises(AutoReconnect): + await top_latency.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + actual_suitable_s = await top_suitable.select_servers( + pref, _Op.TEST, server_selection_timeout=0 + ) + actual_latency_s = await top_latency.select_servers( + pref, _Op.TEST, server_selection_timeout=0 + ) + + expected_suitable_servers = {} + for server in scenario_def["suitable_servers"]: + server_description = make_server_description(server, hosts) + expected_suitable_servers[server["address"]] = server_description + + actual_suitable_servers = {} + for s in actual_suitable_s: + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) + for k, actual in actual_suitable_servers.items(): + expected = expected_suitable_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + expected_latency_servers = {} + for server in scenario_def["in_latency_window"]: + server_description = make_server_description(server, hosts) + expected_latency_servers[server["address"]] = server_description + + actual_latency_servers = {} + for s in actual_latency_s: + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) + for k, actual in actual_latency_servers.items(): + expected = expected_latency_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + return run_scenario + + +def create_selection_tests(test_dir): + class TestAllScenarios(AsyncPyMongoTestCase): + pass + + for dirpath, _, filenames in os.walk(test_dir): + dirname = os.path.split(dirpath) + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + return TestAllScenarios diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index d433f1a7e6..11d88850fc 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -229,7 +229,7 @@ async def _create_tests(self): str(test_def["description"].replace(" ", "_").replace(".", "_")), ) - new_test = await self._create_test(scenario_def, test_def, test_name) + new_test = self._create_test(scenario_def, test_def, test_name) new_test = self._ensure_min_max_server_version(scenario_def, new_test) new_test = self.ensure_run_on(scenario_def, new_test) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 05772fa385..7ccd4b529e 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -15,9 +15,12 @@ """Test the topology module's Server Selection Spec implementation.""" from __future__ import annotations +import asyncio import os import threading +from pathlib import Path from test import IntegrationTest, client_context, unittest +from test.helpers import ConcurrentRunner from test.utils import ( CMAPListener, OvertCommandListener, @@ -32,10 +35,14 @@ from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference +_IS_SYNC = True # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), os.path.join("server_selection", "in_window") -) +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection", "in_window") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "in_window" + ) class TestAllScenarios(unittest.TestCase): @@ -92,7 +99,7 @@ def tests(self, scenario_def): CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() -class FinderThread(threading.Thread): +class FinderTask(ConcurrentRunner): def __init__(self, collection, iterations): super().__init__() self.daemon = True @@ -109,17 +116,17 @@ def run(self): class TestProse(IntegrationTest): def frequencies(self, client, listener, n_finds=10): coll = client.test.test - N_THREADS = 10 - threads = [FinderThread(coll, n_finds) for _ in range(N_THREADS)] - for thread in threads: - thread.start() - for thread in threads: - thread.join() - for thread in threads: - self.assertTrue(thread.passed) + N_TASKS = 10 + tasks = [FinderTask(coll, n_finds) for _ in range(N_TASKS)] + for task in tasks: + task.start() + for task in tasks: + task.join() + for task in tasks: + self.assertTrue(task.passed) events = listener.started_events - self.assertEqual(len(events), n_finds * N_THREADS) + self.assertEqual(len(events), n_finds * N_TASKS) nodes = client.nodes self.assertEqual(len(nodes), 2) freqs = {address: 0.0 for address in nodes} diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 2d21888e27..9667ea701b 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -18,96 +18,28 @@ import datetime import os import sys +from test import PyMongoTestCase sys.path[0:0] = [""] from test import unittest from test.pymongo_mocks import DummyMonitor from test.utils import MockPool, parse_read_preference +from test.utils_selection_tests_shared import ( + get_addresses, + get_topology_type_name, + make_server_description, +) from bson import json_util -from pymongo.common import HEARTBEAT_FREQUENCY, MIN_SUPPORTED_WIRE_VERSION, clean_node +from pymongo.common import HEARTBEAT_FREQUENCY from pymongo.errors import AutoReconnect, ConfigurationError -from pymongo.hello import Hello, HelloCompat from pymongo.operations import _Op -from pymongo.server_description import ServerDescription from pymongo.server_selectors import writable_server_selector from pymongo.synchronous.settings import TopologySettings from pymongo.synchronous.topology import Topology - -def get_addresses(server_list): - seeds = [] - hosts = [] - for server in server_list: - seeds.append(clean_node(server["address"])) - hosts.append(server["address"]) - return seeds, hosts - - -def make_last_write_date(server): - epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) - millis = server.get("lastWrite", {}).get("lastWriteDate") - if millis: - diff = ((millis % 1000) + 1000) % 1000 - seconds = (millis - diff) / 1000 - micros = diff * 1000 - return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) - else: - # "Unknown" server. - return epoch - - -def make_server_description(server, hosts): - """Make a ServerDescription from server info in a JSON test.""" - server_type = server["type"] - if server_type in ("Unknown", "PossiblePrimary"): - return ServerDescription(clean_node(server["address"]), Hello({})) - - hello_response = {"ok": True, "hosts": hosts} - if server_type not in ("Standalone", "Mongos", "RSGhost"): - hello_response["setName"] = "rs" - - if server_type == "RSPrimary": - hello_response[HelloCompat.LEGACY_CMD] = True - elif server_type == "RSSecondary": - hello_response["secondary"] = True - elif server_type == "Mongos": - hello_response["msg"] = "isdbgrid" - elif server_type == "RSGhost": - hello_response["isreplicaset"] = True - elif server_type == "RSArbiter": - hello_response["arbiterOnly"] = True - - hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} - - for field in "maxWireVersion", "tags", "idleWritePeriodMillis": - if field in server: - hello_response[field] = server[field] - - hello_response.setdefault("maxWireVersion", MIN_SUPPORTED_WIRE_VERSION) - - # Sets _last_update_time to now. - sd = ServerDescription( - clean_node(server["address"]), - Hello(hello_response), - round_trip_time=server["avg_rtt_ms"] / 1000.0, - ) - - if "lastUpdateTime" in server: - sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. - - return sd - - -def get_topology_type_name(scenario_def): - td = scenario_def["topology_description"] - name = td["type"] - if name == "Unknown": - # PyMongo never starts a topology in type Unknown. - return "Sharded" if len(td["servers"]) > 1 else "Single" - else: - return name +_IS_SYNC = True def get_topology_settings_dict(**kwargs): @@ -244,7 +176,7 @@ def run_scenario(self): def create_selection_tests(test_dir): - class TestAllScenarios(unittest.TestCase): + class TestAllScenarios(PyMongoTestCase): pass for dirpath, _, filenames in os.walk(test_dir): diff --git a/test/utils_selection_tests_shared.py b/test/utils_selection_tests_shared.py new file mode 100644 index 0000000000..dbaed1034f --- /dev/null +++ b/test/utils_selection_tests_shared.py @@ -0,0 +1,100 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys + +sys.path[0:0] = [""] + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION, clean_node +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription + + +def get_addresses(server_list): + seeds = [] + hosts = [] + for server in server_list: + seeds.append(clean_node(server["address"])) + hosts.append(server["address"]) + return seeds, hosts + + +def make_last_write_date(server): + epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) + millis = server.get("lastWrite", {}).get("lastWriteDate") + if millis: + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) / 1000 + micros = diff * 1000 + return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) + else: + # "Unknown" server. + return epoch + + +def make_server_description(server, hosts): + """Make a ServerDescription from server info in a JSON test.""" + server_type = server["type"] + if server_type in ("Unknown", "PossiblePrimary"): + return ServerDescription(clean_node(server["address"]), Hello({})) + + hello_response = {"ok": True, "hosts": hosts} + if server_type not in ("Standalone", "Mongos", "RSGhost"): + hello_response["setName"] = "rs" + + if server_type == "RSPrimary": + hello_response[HelloCompat.LEGACY_CMD] = True + elif server_type == "RSSecondary": + hello_response["secondary"] = True + elif server_type == "Mongos": + hello_response["msg"] = "isdbgrid" + elif server_type == "RSGhost": + hello_response["isreplicaset"] = True + elif server_type == "RSArbiter": + hello_response["arbiterOnly"] = True + + hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} + + for field in "maxWireVersion", "tags", "idleWritePeriodMillis": + if field in server: + hello_response[field] = server[field] + + hello_response.setdefault("maxWireVersion", MIN_SUPPORTED_WIRE_VERSION) + + # Sets _last_update_time to now. + sd = ServerDescription( + clean_node(server["address"]), + Hello(hello_response), + round_trip_time=server["avg_rtt_ms"] / 1000.0, + ) + + if "lastUpdateTime" in server: + sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. + + return sd + + +def get_topology_type_name(scenario_def): + td = scenario_def["topology_description"] + name = td["type"] + if name == "Unknown": + # PyMongo never starts a topology in type Unknown. + return "Sharded" if len(td["servers"]) > 1 else "Single" + else: + return name diff --git a/tools/synchro.py b/tools/synchro.py index 77fdcce5ae..f8d0be80fd 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -233,6 +233,7 @@ def async_only_test(f: str) -> bool: "test_retryable_writes_unified.py", "test_run_command.py", "test_sdam_monitoring_spec.py", + "test_server_selection_in_window.py", "test_server_selection_logging.py", "test_session.py", "test_server_selection_rtt.py", @@ -245,6 +246,7 @@ def async_only_test(f: str) -> bool: "test_unified_format.py", "test_versioned_api_integration.py", "unified_format.py", + "utils_selection_tests.py", ] From 8b6be4ab7191b9b8916688ebd3b646963ba576ea Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 11 Feb 2025 13:13:14 -0500 Subject: [PATCH 1740/2111] PYTHON-4983 - Restore no C extension coverage variants (#2142) --- .evergreen/generated_configs/variants.yml | 39 +++++++++++++++++++++++ .evergreen/scripts/generate_config.py | 3 +- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 8f5d833e90..20b89f7e69 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1079,6 +1079,19 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Server tests + - name: test-rhel8-python3.9-cov-no-c + tasks: + - name: .standalone .sync_async + - name: .replica_set .sync_async + - name: .sharded_cluster .sync_async + display_name: "* Test RHEL8 Python3.9 cov No C" + run_on: + - rhel87-small + expansions: + COVERAGE: coverage + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.9/bin/python3 + tags: [coverage_tag] - name: test-rhel8-python3.9-cov tasks: - name: .standalone .sync_async @@ -1091,6 +1104,19 @@ buildvariants: COVERAGE: coverage PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [coverage_tag] + - name: test-rhel8-python3.13-cov-no-c + tasks: + - name: .standalone .sync_async + - name: .replica_set .sync_async + - name: .sharded_cluster .sync_async + display_name: "* Test RHEL8 Python3.13 cov No C" + run_on: + - rhel87-small + expansions: + COVERAGE: coverage + NO_EXT: "1" + PYTHON_BINARY: /opt/python/3.13/bin/python3 + tags: [coverage_tag] - name: test-rhel8-python3.13-cov tasks: - name: .standalone .sync_async @@ -1103,6 +1129,19 @@ buildvariants: COVERAGE: coverage PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [coverage_tag] + - name: test-rhel8-pypy3.10-cov-no-c + tasks: + - name: .standalone .sync_async + - name: .replica_set .sync_async + - name: .sharded_cluster .sync_async + display_name: "* Test RHEL8 PyPy3.10 cov No C" + run_on: + - rhel87-small + expansions: + COVERAGE: coverage + NO_EXT: "1" + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + tags: [coverage_tag] - name: test-rhel8-pypy3.10-cov tasks: - name: .standalone .sync_async diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 41b1266a70..1337836379 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -279,8 +279,9 @@ def create_server_variants() -> list[BuildVariant]: host = DEFAULT_HOST # Prefix the display name with an asterisk so it is sorted first. base_display_name = "* Test" - for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: + for python, c_ext in product([*MIN_MAX_PYTHON, PYPYS[-1]], C_EXTS): expansions = dict(COVERAGE="coverage") + handle_c_ext(c_ext, expansions) display_name = get_display_name(base_display_name, host, python=python, **expansions) variant = create_variant( [f".{t} .sync_async" for t in TOPOLOGIES], From 1f7f8a9e0f41e32c290bf0ee1c6721fee6e2f8d8 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 11 Feb 2025 10:27:43 -0800 Subject: [PATCH 1741/2111] PYTHON-5081 Convert test.test_gridfs to async (#2099) --- gridfs/asynchronous/grid_file.py | 2 +- test/asynchronous/test_gridfs.py | 602 +++++++++++++++++++++++++++++++ test/test_gridfs.py | 184 ++++++---- test/utils.py | 5 + tools/synchro.py | 9 +- 5 files changed, 722 insertions(+), 80 deletions(-) create mode 100644 test/asynchronous/test_gridfs.py diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index d15713c51b..baa88d4808 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -231,7 +231,7 @@ async def get_version( try: doc = await anext(cursor) return AsyncGridOut(self._collection, file_document=doc, session=session) - except StopIteration: + except StopAsyncIteration: raise NoFile("no version %d for filename %r" % (version, filename)) from None async def get_last_version( diff --git a/test/asynchronous/test_gridfs.py b/test/asynchronous/test_gridfs.py new file mode 100644 index 0000000000..b1c1e754ff --- /dev/null +++ b/test/asynchronous/test_gridfs.py @@ -0,0 +1,602 @@ +# +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import sys +import threading +import time +from io import BytesIO +from test.asynchronous.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import async_joinall, one + +import gridfs +from bson.binary import Binary +from gridfs.asynchronous.grid_file import DEFAULT_CHUNK_SIZE, AsyncGridOutCursor +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +class JustWrite(ConcurrentRunner): + def __init__(self, fs, n): + super().__init__() + self.fs = fs + self.n = n + self.daemon = True + + async def run(self): + for _ in range(self.n): + file = self.fs.new_file(filename="test") + await file.write(b"hello") + await file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, fs, n, results): + super().__init__() + self.fs = fs + self.n = n + self.results = results + self.daemon = True + + async def run(self): + for _ in range(self.n): + file = await self.fs.get("test") + data = await file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfsNoConnect(unittest.IsolatedAsyncioTestCase): + db: AsyncDatabase + + async def asyncSetUp(self): + await super().asyncSetUp() + self.db = AsyncMongoClient(connect=False).pymongo_test + + async def test_gridfs(self): + self.assertRaises(TypeError, gridfs.AsyncGridFS, "foo") + self.assertRaises(TypeError, gridfs.AsyncGridFS, self.db, 5) + + +class TestGridfs(AsyncIntegrationTest): + fs: gridfs.AsyncGridFS + alt: gridfs.AsyncGridFS + + async def asyncSetUp(self): + await super().asyncSetUp() + self.fs = gridfs.AsyncGridFS(self.db) + self.alt = gridfs.AsyncGridFS(self.db, "alt") + await self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + async def test_basic(self): + oid = await self.fs.put(b"hello world") + self.assertEqual(b"hello world", await (await self.fs.get(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + await self.fs.delete(oid) + with self.assertRaises(NoFile): + await self.fs.get(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.fs.get("foo") + oid = await self.fs.put(b"hello world", _id="foo") + self.assertEqual("foo", oid) + self.assertEqual(b"hello world", await (await self.fs.get("foo")).read()) + + async def test_multi_chunk_delete(self): + await self.db.fs.drop() + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFS(self.db) + oid = await gfs.put(b"hello", chunkSize=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_list(self): + self.assertEqual([], await self.fs.list()) + await self.fs.put(b"hello world") + self.assertEqual([], await self.fs.list()) + + # PYTHON-598: in server versions before 2.5.x, creating an index on + # filename, uploadDate causes list() to include None. + await self.fs.get_last_version() + self.assertEqual([], await self.fs.list()) + + await self.fs.put(b"", filename="mike") + await self.fs.put(b"foo", filename="test") + await self.fs.put(b"", filename="hello world") + + self.assertEqual({"mike", "test", "hello world"}, set(await self.fs.list())) + + async def test_empty_file(self): + oid = await self.fs.put(b"") + self.assertEqual(b"", await (await self.fs.get(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + raw = await self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + async def test_corrupt_chunk(self): + files_id = await self.fs.put(b"foobar") + await self.db.fs.chunks.update_one( + {"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}} + ) + try: + out = await self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + await out.read() + + out = await self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + await out.readline() + finally: + await self.fs.delete(files_id) + + async def test_put_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + await chunks.drop() + await files.drop() + await self.fs.put(b"junk") + + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in (await chunks.index_information()).values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in (await files.index_information()).values() + ) + ) + + async def test_alt_collection(self): + oid = await self.alt.put(b"hello world") + self.assertEqual(b"hello world", await (await self.alt.get(oid)).read()) + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + await self.alt.delete(oid) + with self.assertRaises(NoFile): + await self.alt.get(oid) + self.assertEqual(0, await self.db.alt.files.count_documents({})) + self.assertEqual(0, await self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.alt.get("foo") + oid = await self.alt.put(b"hello world", _id="foo") + self.assertEqual("foo", oid) + self.assertEqual(b"hello world", await (await self.alt.get("foo")).read()) + + await self.alt.put(b"", filename="mike") + await self.alt.put(b"foo", filename="test") + await self.alt.put(b"", filename="hello world") + + self.assertEqual({"mike", "test", "hello world"}, set(await self.alt.list())) + + async def test_threaded_reads(self): + await self.fs.put(b"hello", _id="test") + + tasks = [] + results: list = [] + for i in range(10): + tasks.append(JustRead(self.fs, 10, results)) + await tasks[i].start() + + await async_joinall(tasks) + + self.assertEqual(100 * [b"hello"], results) + + async def test_threaded_writes(self): + tasks = [] + for i in range(10): + tasks.append(JustWrite(self.fs, 10)) + await tasks[i].start() + + await async_joinall(tasks) + + f = await self.fs.get_last_version("test") + self.assertEqual(await f.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, await self.db.fs.files.count_documents({"filename": "test"})) + + async def test_get_last_version(self): + one = await self.fs.put(b"foo", filename="test") + await asyncio.sleep(0.01) + two = self.fs.new_file(filename="test") + await two.write(b"bar") + await two.close() + await asyncio.sleep(0.01) + two = two._id + three = await self.fs.put(b"baz", filename="test") + + self.assertEqual(b"baz", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(three) + self.assertEqual(b"bar", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(one) + with self.assertRaises(NoFile): + await self.fs.get_last_version("test") + + async def test_get_last_version_with_metadata(self): + one = await self.fs.put(b"foo", filename="test", author="author") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author") + + self.assertEqual(b"bar", await (await self.fs.get_last_version(author="author")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.get_last_version(author="author")).read()) + await self.fs.delete(one) + + one = await self.fs.put(b"foo", filename="test", author="author1") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author2") + + self.assertEqual(b"foo", await (await self.fs.get_last_version(author="author1")).read()) + self.assertEqual(b"bar", await (await self.fs.get_last_version(author="author2")).read()) + self.assertEqual(b"bar", await (await self.fs.get_last_version(filename="test")).read()) + + with self.assertRaises(NoFile): + await self.fs.get_last_version(author="author3") + with self.assertRaises(NoFile): + await self.fs.get_last_version(filename="nottest", author="author1") + + await self.fs.delete(one) + await self.fs.delete(two) + + async def test_get_version(self): + await self.fs.put(b"foo", filename="test") + await asyncio.sleep(0.01) + await self.fs.put(b"bar", filename="test") + await asyncio.sleep(0.01) + await self.fs.put(b"baz", filename="test") + await asyncio.sleep(0.01) + + self.assertEqual(b"foo", await (await self.fs.get_version("test", 0)).read()) + self.assertEqual(b"bar", await (await self.fs.get_version("test", 1)).read()) + self.assertEqual(b"baz", await (await self.fs.get_version("test", 2)).read()) + + self.assertEqual(b"baz", await (await self.fs.get_version("test", -1)).read()) + self.assertEqual(b"bar", await (await self.fs.get_version("test", -2)).read()) + self.assertEqual(b"foo", await (await self.fs.get_version("test", -3)).read()) + + with self.assertRaises(NoFile): + await self.fs.get_version("test", 3) + with self.assertRaises(NoFile): + await self.fs.get_version("test", -4) + + async def test_get_version_with_metadata(self): + one = await self.fs.put(b"foo", filename="test", author="author1") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author1") + await asyncio.sleep(0.01) + three = await self.fs.put(b"baz", filename="test", author="author2") + + self.assertEqual( + b"foo", + await (await self.fs.get_version(filename="test", author="author1", version=-2)).read(), + ) + self.assertEqual( + b"bar", + await (await self.fs.get_version(filename="test", author="author1", version=-1)).read(), + ) + self.assertEqual( + b"foo", + await (await self.fs.get_version(filename="test", author="author1", version=0)).read(), + ) + self.assertEqual( + b"bar", + await (await self.fs.get_version(filename="test", author="author1", version=1)).read(), + ) + self.assertEqual( + b"baz", + await (await self.fs.get_version(filename="test", author="author2", version=0)).read(), + ) + self.assertEqual( + b"baz", await (await self.fs.get_version(filename="test", version=-1)).read() + ) + self.assertEqual( + b"baz", await (await self.fs.get_version(filename="test", version=2)).read() + ) + + with self.assertRaises(NoFile): + await self.fs.get_version(filename="test", author="author3") + with self.assertRaises(NoFile): + await self.fs.get_version(filename="test", author="author1", version=2) + + await self.fs.delete(one) + await self.fs.delete(two) + await self.fs.delete(three) + + async def test_put_filelike(self): + oid = await self.fs.put(BytesIO(b"hello world"), chunk_size=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", await (await self.fs.get(oid)).read()) + + async def test_file_exists(self): + oid = await self.fs.put(b"hello") + with self.assertRaises(FileExists): + await self.fs.put(b"world", _id=oid) + + one = self.fs.new_file(_id=123) + await one.write(b"some content") + await one.close() + + # Attempt to upload a file with more chunks to the same _id. + with patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + two = self.fs.new_file(_id=123) + with self.assertRaises(FileExists): + await two.write(b"x" * DEFAULT_CHUNK_SIZE * 3) + # Original file is still readable (no extra chunks were uploaded). + self.assertEqual(await (await self.fs.get(123)).read(), b"some content") + + two = self.fs.new_file(_id=123) + await two.write(b"some content") + with self.assertRaises(FileExists): + await two.close() + # Original file is still readable. + self.assertEqual(await (await self.fs.get(123)).read(), b"some content") + + async def test_exists(self): + oid = await self.fs.put(b"hello") + self.assertTrue(await self.fs.exists(oid)) + self.assertTrue(await self.fs.exists({"_id": oid})) + self.assertTrue(await self.fs.exists(_id=oid)) + + self.assertFalse(await self.fs.exists(filename="mike")) + self.assertFalse(await self.fs.exists("mike")) + + oid = await self.fs.put(b"hello", filename="mike", foo=12) + self.assertTrue(await self.fs.exists(oid)) + self.assertTrue(await self.fs.exists({"_id": oid})) + self.assertTrue(await self.fs.exists(_id=oid)) + self.assertTrue(await self.fs.exists(filename="mike")) + self.assertTrue(await self.fs.exists({"filename": "mike"})) + self.assertTrue(await self.fs.exists(foo=12)) + self.assertTrue(await self.fs.exists({"foo": 12})) + self.assertTrue(await self.fs.exists(foo={"$gt": 11})) + self.assertTrue(await self.fs.exists({"foo": {"$gt": 11}})) + + self.assertFalse(await self.fs.exists(foo=13)) + self.assertFalse(await self.fs.exists({"foo": 13})) + self.assertFalse(await self.fs.exists(foo={"$gt": 12})) + self.assertFalse(await self.fs.exists({"foo": {"$gt": 12}})) + + async def test_put_unicode(self): + with self.assertRaises(TypeError): + await self.fs.put("hello") + + oid = await self.fs.put("hello", encoding="utf-8") + self.assertEqual(b"hello", await (await self.fs.get(oid)).read()) + self.assertEqual("utf-8", (await self.fs.get(oid)).encoding) + + oid = await self.fs.put("aé", encoding="iso-8859-1") + self.assertEqual("aé".encode("iso-8859-1"), await (await self.fs.get(oid)).read()) + self.assertEqual("iso-8859-1", (await self.fs.get(oid)).encoding) + + async def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + await self.fs.put(b"", filename="empty") + doc = await self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + await self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + f = await self.fs.get_last_version(filename="empty") + + async def iterate_file(grid_file): + async for _chunk in grid_file: + pass + return True + + self.assertTrue(await iterate_file(f)) + + async def test_gridfs_lazy_connect(self): + client = await self.async_single_client( + "badhost", connect=False, serverSelectionTimeoutMS=10 + ) + db = client.db + gfs = gridfs.AsyncGridFS(db) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.list() + + fs = gridfs.AsyncGridFS(db) + f = fs.new_file() + with self.assertRaises(ServerSelectionTimeoutError): + await f.close() + + async def test_gridfs_find(self): + await self.fs.put(b"test2", filename="two") + await asyncio.sleep(0.01) + await self.fs.put(b"test2+", filename="two") + await asyncio.sleep(0.01) + await self.fs.put(b"test1", filename="one") + await asyncio.sleep(0.01) + await self.fs.put(b"test2++", filename="two") + files = self.db.fs.files + self.assertEqual(3, await files.count_documents({"filename": "two"})) + self.assertEqual(4, await files.count_documents({})) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + await cursor.rewind() + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + gout = await cursor.next() + self.assertEqual(b"test2+", await gout.read()) + with self.assertRaises(StopAsyncIteration): + await cursor.__anext__() + await cursor.rewind() + items = await cursor.to_list() + self.assertEqual(len(items), 2) + await cursor.rewind() + items = await cursor.to_list(1) + self.assertEqual(len(items), 1) + await cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + async def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__. + cursor = AsyncGridOutCursor.__new__(AsyncGridOutCursor) # Skip calling __init__ + with self.assertRaises(TypeError): + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore + cursor.__del__() # no error + + async def test_gridfs_find_one(self): + self.assertEqual(None, await self.fs.find_one()) + + id1 = await self.fs.put(b"test1", filename="file1") + res = await self.fs.find_one() + assert res is not None + self.assertEqual(b"test1", await res.read()) + + id2 = await self.fs.put(b"test2", filename="file2", meta="data") + res1 = await self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b"test1", await res1.read()) + res2 = await self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b"test2", await res2.read()) + + res3 = await self.fs.find_one({"filename": "file1"}) + assert res3 is not None + self.assertEqual(b"test1", await res3.read()) + + res4 = await self.fs.find_one(id2) + assert res4 is not None + self.assertEqual("data", res4.meta) + + async def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy AsyncGridFS clients, store size as a float. + data = b"data" + await self.fs.put(data, filename="f") + await self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, await (await self.fs.get_version("f")).read()) + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.AsyncGridFS((await self.async_rs_or_single_client(w=0)).pymongo_test) + + async def test_md5(self): + gin = self.fs.new_file() + await gin.write(b"no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.get(gin._id) + self.assertIsNone(gout.md5) + + _id = await self.fs.put(b"still no md5 sum") + gout = await self.fs.get(_id) + self.assertIsNone(gout.md5) + + +class TestGridfsReplicaSet(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + + @classmethod + @async_client_context.require_connection + async def asyncTearDownClass(cls): + await async_client_context.client.drop_database("gfsreplica") + + async def test_gridfs_replica_set(self): + rsc = await self.async_rs_client( + w=async_client_context.w, read_preference=ReadPreference.SECONDARY + ) + + fs = gridfs.AsyncGridFS(rsc.gfsreplica, "gfsreplicatest") + + gin = fs.new_file() + self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) + + oid = await fs.put(b"foo") + content = await (await fs.get(oid)).read() + self.assertEqual(b"foo", content) + + async def test_gridfs_secondary(self): + secondary_host, secondary_port = one(await self.client.secondaries) + secondary_connection = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + fs = gridfs.AsyncGridFS(secondary_connection.gfsreplica, "gfssecondarytest") + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + await fs.put(b"foo") + + async def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(await self.client.secondaries) + client = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + fs = gridfs.AsyncGridFS(client.gfsreplica, "gfssecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + await fs.get_last_version() + with self.assertRaises(NotPrimaryError): + await fs.put("data", encoding="utf-8") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_gridfs.py b/test/test_gridfs.py index ab8950250b..47e38141b2 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -16,11 +16,13 @@ """Tests for the gridfs package.""" from __future__ import annotations +import asyncio import datetime import sys import threading import time from io import BytesIO +from test.helpers import ConcurrentRunner from unittest.mock import patch sys.path[0:0] = [""] @@ -41,10 +43,12 @@ from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient +_IS_SYNC = True -class JustWrite(threading.Thread): + +class JustWrite(ConcurrentRunner): def __init__(self, fs, n): - threading.Thread.__init__(self) + super().__init__() self.fs = fs self.n = n self.daemon = True @@ -56,9 +60,9 @@ def run(self): file.close() -class JustRead(threading.Thread): +class JustRead(ConcurrentRunner): def __init__(self, fs, n, results): - threading.Thread.__init__(self) + super().__init__() self.fs = fs self.n = n self.results = results @@ -98,19 +102,21 @@ def setUp(self): def test_basic(self): oid = self.fs.put(b"hello world") - self.assertEqual(b"hello world", self.fs.get(oid).read()) + self.assertEqual(b"hello world", (self.fs.get(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) self.fs.delete(oid) - self.assertRaises(NoFile, self.fs.get, oid) + with self.assertRaises(NoFile): + self.fs.get(oid) self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) - self.assertRaises(NoFile, self.fs.get, "foo") + with self.assertRaises(NoFile): + self.fs.get("foo") oid = self.fs.put(b"hello world", _id="foo") self.assertEqual("foo", oid) - self.assertEqual(b"hello world", self.fs.get("foo").read()) + self.assertEqual(b"hello world", (self.fs.get("foo")).read()) def test_multi_chunk_delete(self): self.db.fs.drop() @@ -142,7 +148,7 @@ def test_list(self): def test_empty_file(self): oid = self.fs.put(b"") - self.assertEqual(b"", self.fs.get(oid).read()) + self.assertEqual(b"", (self.fs.get(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -159,10 +165,12 @@ def test_corrupt_chunk(self): self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.get(files_id) - self.assertRaises(CorruptGridFile, out.read) + with self.assertRaises(CorruptGridFile): + out.read() out = self.fs.get(files_id) - self.assertRaises(CorruptGridFile, out.readline) + with self.assertRaises(CorruptGridFile): + out.readline() finally: self.fs.delete(files_id) @@ -177,31 +185,33 @@ def test_put_ensures_index(self): self.assertTrue( any( info.get("key") == [("files_id", 1), ("n", 1)] - for info in chunks.index_information().values() + for info in (chunks.index_information()).values() ) ) self.assertTrue( any( info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in files.index_information().values() + for info in (files.index_information()).values() ) ) def test_alt_collection(self): oid = self.alt.put(b"hello world") - self.assertEqual(b"hello world", self.alt.get(oid).read()) + self.assertEqual(b"hello world", (self.alt.get(oid)).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) self.alt.delete(oid) - self.assertRaises(NoFile, self.alt.get, oid) + with self.assertRaises(NoFile): + self.alt.get(oid) self.assertEqual(0, self.db.alt.files.count_documents({})) self.assertEqual(0, self.db.alt.chunks.count_documents({})) - self.assertRaises(NoFile, self.alt.get, "foo") + with self.assertRaises(NoFile): + self.alt.get("foo") oid = self.alt.put(b"hello world", _id="foo") self.assertEqual("foo", oid) - self.assertEqual(b"hello world", self.alt.get("foo").read()) + self.assertEqual(b"hello world", (self.alt.get("foo")).read()) self.alt.put(b"", filename="mike") self.alt.put(b"foo", filename="test") @@ -212,23 +222,23 @@ def test_alt_collection(self): def test_threaded_reads(self): self.fs.put(b"hello", _id="test") - threads = [] + tasks = [] results: list = [] for i in range(10): - threads.append(JustRead(self.fs, 10, results)) - threads[i].start() + tasks.append(JustRead(self.fs, 10, results)) + tasks[i].start() - joinall(threads) + joinall(tasks) self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): - threads = [] + tasks = [] for i in range(10): - threads.append(JustWrite(self.fs, 10)) - threads[i].start() + tasks.append(JustWrite(self.fs, 10)) + tasks[i].start() - joinall(threads) + joinall(tasks) f = self.fs.get_last_version("test") self.assertEqual(f.read(), b"hello") @@ -246,34 +256,37 @@ def test_get_last_version(self): two = two._id three = self.fs.put(b"baz", filename="test") - self.assertEqual(b"baz", self.fs.get_last_version("test").read()) + self.assertEqual(b"baz", (self.fs.get_last_version("test")).read()) self.fs.delete(three) - self.assertEqual(b"bar", self.fs.get_last_version("test").read()) + self.assertEqual(b"bar", (self.fs.get_last_version("test")).read()) self.fs.delete(two) - self.assertEqual(b"foo", self.fs.get_last_version("test").read()) + self.assertEqual(b"foo", (self.fs.get_last_version("test")).read()) self.fs.delete(one) - self.assertRaises(NoFile, self.fs.get_last_version, "test") + with self.assertRaises(NoFile): + self.fs.get_last_version("test") def test_get_last_version_with_metadata(self): one = self.fs.put(b"foo", filename="test", author="author") time.sleep(0.01) two = self.fs.put(b"bar", filename="test", author="author") - self.assertEqual(b"bar", self.fs.get_last_version(author="author").read()) + self.assertEqual(b"bar", (self.fs.get_last_version(author="author")).read()) self.fs.delete(two) - self.assertEqual(b"foo", self.fs.get_last_version(author="author").read()) + self.assertEqual(b"foo", (self.fs.get_last_version(author="author")).read()) self.fs.delete(one) one = self.fs.put(b"foo", filename="test", author="author1") time.sleep(0.01) two = self.fs.put(b"bar", filename="test", author="author2") - self.assertEqual(b"foo", self.fs.get_last_version(author="author1").read()) - self.assertEqual(b"bar", self.fs.get_last_version(author="author2").read()) - self.assertEqual(b"bar", self.fs.get_last_version(filename="test").read()) + self.assertEqual(b"foo", (self.fs.get_last_version(author="author1")).read()) + self.assertEqual(b"bar", (self.fs.get_last_version(author="author2")).read()) + self.assertEqual(b"bar", (self.fs.get_last_version(filename="test")).read()) - self.assertRaises(NoFile, self.fs.get_last_version, author="author3") - self.assertRaises(NoFile, self.fs.get_last_version, filename="nottest", author="author1") + with self.assertRaises(NoFile): + self.fs.get_last_version(author="author3") + with self.assertRaises(NoFile): + self.fs.get_last_version(filename="nottest", author="author1") self.fs.delete(one) self.fs.delete(two) @@ -286,16 +299,18 @@ def test_get_version(self): self.fs.put(b"baz", filename="test") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.get_version("test", 0).read()) - self.assertEqual(b"bar", self.fs.get_version("test", 1).read()) - self.assertEqual(b"baz", self.fs.get_version("test", 2).read()) + self.assertEqual(b"foo", (self.fs.get_version("test", 0)).read()) + self.assertEqual(b"bar", (self.fs.get_version("test", 1)).read()) + self.assertEqual(b"baz", (self.fs.get_version("test", 2)).read()) - self.assertEqual(b"baz", self.fs.get_version("test", -1).read()) - self.assertEqual(b"bar", self.fs.get_version("test", -2).read()) - self.assertEqual(b"foo", self.fs.get_version("test", -3).read()) + self.assertEqual(b"baz", (self.fs.get_version("test", -1)).read()) + self.assertEqual(b"bar", (self.fs.get_version("test", -2)).read()) + self.assertEqual(b"foo", (self.fs.get_version("test", -3)).read()) - self.assertRaises(NoFile, self.fs.get_version, "test", 3) - self.assertRaises(NoFile, self.fs.get_version, "test", -4) + with self.assertRaises(NoFile): + self.fs.get_version("test", 3) + with self.assertRaises(NoFile): + self.fs.get_version("test", -4) def test_get_version_with_metadata(self): one = self.fs.put(b"foo", filename="test", author="author1") @@ -305,25 +320,32 @@ def test_get_version_with_metadata(self): three = self.fs.put(b"baz", filename="test", author="author2") self.assertEqual( - b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read() + b"foo", + (self.fs.get_version(filename="test", author="author1", version=-2)).read(), ) self.assertEqual( - b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read() + b"bar", + (self.fs.get_version(filename="test", author="author1", version=-1)).read(), ) self.assertEqual( - b"foo", self.fs.get_version(filename="test", author="author1", version=0).read() + b"foo", + (self.fs.get_version(filename="test", author="author1", version=0)).read(), ) self.assertEqual( - b"bar", self.fs.get_version(filename="test", author="author1", version=1).read() + b"bar", + (self.fs.get_version(filename="test", author="author1", version=1)).read(), ) self.assertEqual( - b"baz", self.fs.get_version(filename="test", author="author2", version=0).read() + b"baz", + (self.fs.get_version(filename="test", author="author2", version=0)).read(), ) - self.assertEqual(b"baz", self.fs.get_version(filename="test", version=-1).read()) - self.assertEqual(b"baz", self.fs.get_version(filename="test", version=2).read()) + self.assertEqual(b"baz", (self.fs.get_version(filename="test", version=-1)).read()) + self.assertEqual(b"baz", (self.fs.get_version(filename="test", version=2)).read()) - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) + with self.assertRaises(NoFile): + self.fs.get_version(filename="test", author="author3") + with self.assertRaises(NoFile): + self.fs.get_version(filename="test", author="author1", version=2) self.fs.delete(one) self.fs.delete(two) @@ -332,11 +354,12 @@ def test_get_version_with_metadata(self): def test_put_filelike(self): oid = self.fs.put(BytesIO(b"hello world"), chunk_size=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", self.fs.get(oid).read()) + self.assertEqual(b"hello world", (self.fs.get(oid)).read()) def test_file_exists(self): oid = self.fs.put(b"hello") - self.assertRaises(FileExists, self.fs.put, b"world", _id=oid) + with self.assertRaises(FileExists): + self.fs.put(b"world", _id=oid) one = self.fs.new_file(_id=123) one.write(b"some content") @@ -345,15 +368,17 @@ def test_file_exists(self): # Attempt to upload a file with more chunks to the same _id. with patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): two = self.fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b"x" * DEFAULT_CHUNK_SIZE * 3) + with self.assertRaises(FileExists): + two.write(b"x" * DEFAULT_CHUNK_SIZE * 3) # Original file is still readable (no extra chunks were uploaded). - self.assertEqual(self.fs.get(123).read(), b"some content") + self.assertEqual((self.fs.get(123)).read(), b"some content") two = self.fs.new_file(_id=123) two.write(b"some content") - self.assertRaises(FileExists, two.close) + with self.assertRaises(FileExists): + two.close() # Original file is still readable. - self.assertEqual(self.fs.get(123).read(), b"some content") + self.assertEqual((self.fs.get(123)).read(), b"some content") def test_exists(self): oid = self.fs.put(b"hello") @@ -381,15 +406,16 @@ def test_exists(self): self.assertFalse(self.fs.exists({"foo": {"$gt": 12}})) def test_put_unicode(self): - self.assertRaises(TypeError, self.fs.put, "hello") + with self.assertRaises(TypeError): + self.fs.put("hello") oid = self.fs.put("hello", encoding="utf-8") - self.assertEqual(b"hello", self.fs.get(oid).read()) - self.assertEqual("utf-8", self.fs.get(oid).encoding) + self.assertEqual(b"hello", (self.fs.get(oid)).read()) + self.assertEqual("utf-8", (self.fs.get(oid)).encoding) oid = self.fs.put("aé", encoding="iso-8859-1") - self.assertEqual("aé".encode("iso-8859-1"), self.fs.get(oid).read()) - self.assertEqual("iso-8859-1", self.fs.get(oid).encoding) + self.assertEqual("aé".encode("iso-8859-1"), (self.fs.get(oid)).read()) + self.assertEqual("iso-8859-1", (self.fs.get(oid)).encoding) def test_missing_length_iter(self): # Test fix that guards against PHP-237 @@ -411,11 +437,13 @@ def test_gridfs_lazy_connect(self): client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db gfs = gridfs.GridFS(db) - self.assertRaises(ServerSelectionTimeoutError, gfs.list) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.list() fs = gridfs.GridFS(db) f = fs.new_file() - self.assertRaises(ServerSelectionTimeoutError, f.close) + with self.assertRaises(ServerSelectionTimeoutError): + f.close() def test_gridfs_find(self): self.fs.put(b"test2", filename="two") @@ -429,14 +457,15 @@ def test_gridfs_find(self): self.assertEqual(3, files.count_documents({"filename": "two"})) self.assertEqual(4, files.count_documents({})) cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) cursor.rewind() - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test2+", gout.read()) - self.assertRaises(StopIteration, cursor.__next__) + with self.assertRaises(StopIteration): + cursor.__next__() cursor.rewind() items = cursor.to_list() self.assertEqual(len(items), 2) @@ -484,12 +513,12 @@ def test_grid_in_non_int_chunksize(self): self.fs.put(data, filename="f") self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.get_version("f").read()) + self.assertEqual(data, (self.fs.get_version("f")).read()) def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - gridfs.GridFS(self.rs_or_single_client(w=0).pymongo_test) + gridfs.GridFS((self.rs_or_single_client(w=0)).pymongo_test) def test_md5(self): gin = self.fs.new_file() @@ -524,7 +553,7 @@ def test_gridfs_replica_set(self): self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) oid = fs.put(b"foo") - content = fs.get(oid).read() + content = (fs.get(oid)).read() self.assertEqual(b"foo", content) def test_gridfs_secondary(self): @@ -538,7 +567,8 @@ def test_gridfs_secondary(self): fs = gridfs.GridFS(secondary_connection.gfsreplica, "gfssecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, fs.put, b"foo") + with self.assertRaises(NotPrimaryError): + fs.put(b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to @@ -552,8 +582,10 @@ def test_gridfs_secondary_lazy(self): fs = gridfs.GridFS(client.gfsreplica, "gfssecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotPrimaryError, fs.put, "data", encoding="utf-8") + with self.assertRaises(NoFile): + fs.get_last_version() + with self.assertRaises(NotPrimaryError): + fs.put("data", encoding="utf-8") if __name__ == "__main__": diff --git a/test/utils.py b/test/utils.py index 5c1e0bfb7c..40eec01cb4 100644 --- a/test/utils.py +++ b/test/utils.py @@ -666,6 +666,11 @@ def joinall(threads): assert not t.is_alive(), "Thread %s hung" % t +async def async_joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + await asyncio.wait([t.task for t in tasks if t is not None], timeout=300) + + def wait_until(predicate, success_description, timeout=10): """Wait up to 10 seconds (by default) for predicate to be true. diff --git a/tools/synchro.py b/tools/synchro.py index f8d0be80fd..5c09dcff4d 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -122,7 +122,9 @@ "SpecRunnerTask": "SpecRunnerThread", "AsyncMockConnection": "MockConnection", "AsyncMockPool": "MockPool", + "StopAsyncIteration": "StopIteration", "create_async_event": "create_event", + "async_joinall": "joinall", } docstring_replacements: dict[tuple[str, str], str] = { @@ -212,12 +214,13 @@ def async_only_test(f: str) -> bool: "test_dns.py", "test_encryption.py", "test_examples.py", + "test_grid_file.py", + "test_gridfs.py", + "test_gridfs_spec.py", "test_heartbeat_monitoring.py", "test_index_management.py", - "test_grid_file.py", - "test_load_balancer.py", "test_json_util_integration.py", - "test_gridfs_spec.py", + "test_load_balancer.py", "test_logger.py", "test_max_staleness.py", "test_monitoring.py", From 61c3ddda828e7560850d17f5ba65cc34673c8163 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 11 Feb 2025 10:58:49 -0800 Subject: [PATCH 1742/2111] PYTHON-5100 Convert test.test_server_selection to async (#2120) --- test/asynchronous/test_server_selection.py | 211 +++++++++++++++++++++ test/test_server_selection.py | 21 +- tools/synchro.py | 3 +- 3 files changed, 226 insertions(+), 9 deletions(-) create mode 100644 test/asynchronous/test_server_selection.py diff --git a/test/asynchronous/test_server_selection.py b/test/asynchronous/test_server_selection.py new file mode 100644 index 0000000000..f0451841cd --- /dev/null +++ b/test/asynchronous/test_server_selection.py @@ -0,0 +1,211 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +from pymongo import AsyncMongoClient, ReadPreference +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology +from pymongo.errors import ServerSelectionTimeoutError +from pymongo.hello import HelloCompat +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.typings import strip_optional + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils_selection_tests import ( + create_selection_tests, + get_addresses, + get_topology_settings_dict, + make_server_description, +) +from test.utils import ( + EventListener, + FunctionCallRecorder, + OvertCommandListener, + async_wait_until, +) + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent, "server_selection", "server_selection" + ) +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "server_selection" + ) + + +class SelectionStoreSelector: + """No-op selector that keeps track of what was passed to it.""" + + def __init__(self): + self.selection = None + + def __call__(self, selection): + self.selection = selection + return selection + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestCustomServerSelectorFunction(AsyncIntegrationTest): + @async_client_context.require_replica_set + async def test_functional_select_max_port_number_host(self): + # Selector that returns server with highest port number. + def custom_selector(servers): + ports = [s.address[1] for s in servers] + idx = ports.index(max(ports)) + return [servers[idx]] + + # Initialize client with appropriate listeners. + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_selector=custom_selector, event_listeners=[listener] + ) + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addAsyncCleanup(client.drop_database, "testdb") + + # Wait the node list to be fully populated. + async def all_hosts_started(): + return len((await client.admin.command(HelloCompat.LEGACY_CMD))["hosts"]) == len( + client._topology._description.readable_servers + ) + + await async_wait_until(all_hosts_started, "receive heartbeat from all hosts") + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) + + # Insert 1 record and access it 10 times. + await coll.insert_one({"name": "John Doe"}) + for _ in range(10): + await coll.find_one({"name": "John Doe"}) + + # Confirm all find commands are run against appropriate host. + for command in listener.started_events: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) + + async def test_invalid_server_selector(self): + # Client initialization must fail if server_selector is not callable. + for selector_candidate in [[], 10, "string", {}]: + with self.assertRaisesRegex(ValueError, "must be a callable"): + AsyncMongoClient(connect=False, server_selector=selector_candidate) + + # None value for server_selector is OK. + AsyncMongoClient(connect=False, server_selector=None) + + @async_client_context.require_replica_set + async def test_selector_called(self): + selector = FunctionCallRecorder(lambda x: x) + + # Client setup. + mongo_client = await self.async_rs_or_single_client(server_selector=selector) + test_collection = mongo_client.testdb.test_collection + self.addAsyncCleanup(mongo_client.drop_database, "testdb") + + # Do N operations and test selector is called at least N times. + await test_collection.insert_one({"age": 20, "name": "John"}) + await test_collection.insert_one({"age": 31, "name": "Jane"}) + await test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + await test_collection.find_one({"name": "Roe"}) + self.assertGreaterEqual(selector.call_count, 4) + + @async_client_context.require_replica_set + async def test_latency_threshold_application(self): + selector = SelectionStoreSelector() + + scenario_def: dict = { + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that all but one server is too slow. + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] + min_rtt_idx = rtt_times.index(min(rtt_times)) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + await topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Invoke server selection and assert no filtering based on latency + # prior to custom server selection logic kicking in. + server = await topology.select_server(ReadPreference.NEAREST, _Op.TEST) + assert selector.selection is not None + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) + + # Ensure proper filtering based on latency after custom selection. + self.assertEqual(server.description.address, seeds[min_rtt_idx]) + + @async_client_context.require_replica_set + async def test_server_selector_bypassed(self): + selector = FunctionCallRecorder(lambda x: x) + + scenario_def = { + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that no server is writeable. + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + await topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Invoke server selection and assert no calls to our custom selector. + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + await topology.select_server( + writable_server_selector, _Op.TEST, server_selection_timeout=0.1 + ) + self.assertEqual(selector.call_count, 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 984b967f50..3e7f9a8671 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -17,6 +17,7 @@ import os import sys +from pathlib import Path from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError @@ -43,11 +44,17 @@ make_server_description, ) +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join("server_selection", "server_selection"), -) +if _IS_SYNC: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent, "server_selection", "server_selection" + ) +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "server_selection" + ) class SelectionStoreSelector: @@ -61,7 +68,7 @@ def __call__(self, selection): return selection -class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore pass @@ -79,13 +86,12 @@ def custom_selector(servers): client = self.rs_or_single_client( server_selector=custom_selector, event_listeners=[listener] ) - self.addCleanup(client.close) coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll self.addCleanup(client.drop_database, "testdb") # Wait the node list to be fully populated. def all_hosts_started(): - return len(client.admin.command(HelloCompat.LEGACY_CMD)["hosts"]) == len( + return len((client.admin.command(HelloCompat.LEGACY_CMD))["hosts"]) == len( client._topology._description.readable_servers ) @@ -121,7 +127,6 @@ def test_selector_called(self): # Client setup. mongo_client = self.rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection - self.addCleanup(mongo_client.close) self.addCleanup(mongo_client.drop_database, "testdb") # Do N operations and test selector is called at least N times. diff --git a/tools/synchro.py b/tools/synchro.py index 5c09dcff4d..bc1fcd7869 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -236,10 +236,11 @@ def async_only_test(f: str) -> bool: "test_retryable_writes_unified.py", "test_run_command.py", "test_sdam_monitoring_spec.py", + "test_server_selection.py", "test_server_selection_in_window.py", "test_server_selection_logging.py", - "test_session.py", "test_server_selection_rtt.py", + "test_session.py", "test_sessions_unified.py", "test_srv_polling.py", "test_ssl.py", From 0e6aa6fa15e43ec1b3bc58c3ad1432496df45d80 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:01:39 -0800 Subject: [PATCH 1743/2111] PYTHON-5073 Convert test.test_connection_monitoring to async (#2087) --- test/asynchronous/pymongo_mocks.py | 4 +- .../test_connection_monitoring.py | 479 ++++++++++++++++++ test/test_connection_monitoring.py | 20 +- test/utils.py | 2 +- tools/synchro.py | 1 + 5 files changed, 493 insertions(+), 13 deletions(-) create mode 100644 test/asynchronous/test_connection_monitoring.py diff --git a/test/asynchronous/pymongo_mocks.py b/test/asynchronous/pymongo_mocks.py index ed2395bc98..40beb3c0dc 100644 --- a/test/asynchronous/pymongo_mocks.py +++ b/test/asynchronous/pymongo_mocks.py @@ -66,7 +66,7 @@ def __init__(self, server_description, topology, pool, topology_settings): def cancel_check(self): pass - def join(self): + async def join(self): pass def open(self): @@ -75,7 +75,7 @@ def open(self): def request_check(self): pass - def close(self): + async def close(self): self.opened = False diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py new file mode 100644 index 0000000000..a68b2a90cb --- /dev/null +++ b/test/asynchronous/test_connection_monitoring.py @@ -0,0 +1,479 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, client_knobs, unittest +from test.asynchronous.pymongo_mocks import DummyMonitor +from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator, SpecRunnerTask +from test.utils import ( + CMAPListener, + async_client_context, + async_get_pool, + async_get_pools, + async_wait_until, + camel_to_snake, +) + +from bson.objectid import ObjectId +from bson.son import SON +from pymongo.asynchronous.pool import PoolState, _PoolClosedError +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_preferences import ReadPreference +from pymongo.topology_description import updated_topology_description + +_IS_SYNC = False + +OBJECT_TYPES = { + # Event types. + "ConnectionCheckedIn": ConnectionCheckedInEvent, + "ConnectionCheckedOut": ConnectionCheckedOutEvent, + "ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent, + "ConnectionClosed": ConnectionClosedEvent, + "ConnectionCreated": ConnectionCreatedEvent, + "ConnectionReady": ConnectionReadyEvent, + "ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent, + "ConnectionPoolCreated": PoolCreatedEvent, + "ConnectionPoolReady": PoolReadyEvent, + "ConnectionPoolCleared": PoolClearedEvent, + "ConnectionPoolClosed": PoolClosedEvent, + # Error types. + "PoolClosedError": _PoolClosedError, + "WaitQueueTimeoutError": WaitQueueTimeoutError, +} + + +class AsyncTestCMAP(AsyncIntegrationTest): + # Location of JSON test specifications. + if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "connection_monitoring") + else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "connection_monitoring") + + # Test operations: + + async def start(self, op): + """Run the 'start' thread operation.""" + target = op["target"] + thread = SpecRunnerTask(target) + await thread.start() + self.targets[target] = thread + + async def wait(self, op): + """Run the 'wait' operation.""" + await asyncio.sleep(op["ms"] / 1000.0) + + async def wait_for_thread(self, op): + """Run the 'waitForThread' operation.""" + target = op["target"] + thread = self.targets[target] + await thread.stop() + await thread.join() + if thread.exc: + raise thread.exc + self.assertFalse(thread.ops) + + async def wait_for_event(self, op): + """Run the 'waitForEvent' operation.""" + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + await async_wait_until( + lambda: self.listener.event_count(event) >= count, + f"find {count} {event} event(s)", + timeout=timeout, + ) + + async def check_out(self, op): + """Run the 'checkOut' operation.""" + label = op["label"] + async with self.pool.checkout() as conn: + # Call 'pin_cursor' so we can hold the socket. + conn.pin_cursor() + if label: + self.labels[label] = conn + else: + self.addAsyncCleanup(conn.close_conn, None) + + async def check_in(self, op): + """Run the 'checkIn' operation.""" + label = op["connection"] + conn = self.labels[label] + await self.pool.checkin(conn) + + async def ready(self, op): + """Run the 'ready' operation.""" + await self.pool.ready() + + async def clear(self, op): + """Run the 'clear' operation.""" + if "interruptInUseConnections" in op: + await self.pool.reset(interrupt_connections=op["interruptInUseConnections"]) + else: + await self.pool.reset() + + async def close(self, op): + """Run the 'close' operation.""" + await self.pool.close() + + async def run_operation(self, op): + """Run a single operation in a test.""" + op_name = camel_to_snake(op["name"]) + thread = op["thread"] + meth = getattr(self, op_name) + if thread: + await self.targets[thread].schedule(lambda: meth(op)) + else: + await meth(op) + + async def run_operations(self, ops): + """Run a test's operations.""" + for op in ops: + self._ops.append(op) + await self.run_operation(op) + + def check_object(self, actual, expected): + """Assert that the actual object matches the expected object.""" + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) + for attr, expected_val in expected.items(): + if attr == "type": + continue + c2s = camel_to_snake(attr) + if c2s == "interrupt_in_use_connections": + c2s = "interrupt_connections" + actual_val = getattr(actual, c2s) + if expected_val == 42: + self.assertIsNotNone(actual_val) + else: + self.assertEqual(actual_val, expected_val) + + def check_event(self, actual, expected): + """Assert that the actual event matches the expected event.""" + self.check_object(actual, expected) + + def actual_events(self, ignore): + """Return all the non-ignored events.""" + ignore = tuple(OBJECT_TYPES[name] for name in ignore) + return [event for event in self.listener.events if not isinstance(event, ignore)] + + def check_events(self, events, ignore): + """Check the events of a test.""" + actual_events = self.actual_events(ignore) + for actual, expected in zip(actual_events, events): + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") + self.check_event(actual, expected) + + if len(events) > len(actual_events): + self.fail(f"missing events: {events[len(actual_events) :]!r}") + + def check_error(self, actual, expected): + message = expected.pop("message") + self.check_object(actual, expected) + self.assertIn(message, str(actual)) + + async def _set_fail_point(self, client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await client.admin.command(cmd) + + async def set_fail_point(self, command_args): + if not async_client_context.supports_failCommand_fail_point: + self.skipTest("failCommand fail point must be supported") + await self._set_fail_point(self.client, command_args) + + async def run_scenario(self, scenario_def, test): + """Run a CMAP spec test.""" + self.logs: list = [] + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) + self.listener = CMAPListener() + self._ops: list = [] + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point(fp) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop("backgroundThreadIntervalMS", 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): + client = await self.async_single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = async_client_context.client._topology.description + sd = td.server_descriptions()[ + (await async_client_context.host, await async_client_context.port) + ] + client._topology._description = updated_topology_description( + client._topology._description, sd + ) + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + await client._topology.open() + else: + await client._get_topology() + self.pool = list(client._topology._servers.values())[0].pool + + # Map of target names to Thread objects. + self.targets: dict = {} + # Map of label names to AsyncConnection objects + self.labels: dict = {} + + async def cleanup(): + for t in self.targets.values(): + await t.stop() + for t in self.targets.values(): + await t.join(5) + for conn in self.labels.values(): + conn.close_conn(None) + + self.addAsyncCleanup(cleanup) + + try: + if test["error"]: + with self.assertRaises(PyMongoError) as ctx: + await self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) + else: + await self.run_operations(test["operations"]) + + self.check_events(test["events"], test["ignore"]) + except Exception: + # Print the events after a test failure. + print("\nFailed test: {!r}".format(test["description"])) + print("Operations:") + for op in self._ops: + print(op) + print("Threads:") + print(self.targets) + print("AsyncConnections:") + print(self.labels) + print("Events:") + for event in self.listener.events: + print(event) + print("Log:") + for log in self.logs: + print(log) + raise + + POOL_OPTIONS = { + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, + } + + # + # Prose tests. Numbers correspond to the prose test number in the spec. + # + async def test_1_client_connection_pool_options(self): + client = await self.async_rs_or_single_client(**self.POOL_OPTIONS) + pool_opts = (await async_get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + async def test_2_all_client_pools_have_same_options(self): + client = await self.async_rs_or_single_client(**self.POOL_OPTIONS) + await client.admin.command("ping") + # Discover at least one secondary. + if await async_client_context.has_secondaries: + await client.admin.command("ping", read_preference=ReadPreference.SECONDARY) + pools = await async_get_pools(client) + pool_opts = pools[0].opts + + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + for pool in pools[1:]: + self.assertEqual(pool.opts, pool_opts) + + async def test_3_uri_connection_pool_options(self): + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{await async_client_context.pair}/?{opts}" + client = await self.async_rs_or_single_client(uri) + pool_opts = (await async_get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + async def test_4_subscribe_to_events(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + self.assertEqual(listener.event_count(PoolCreatedEvent), 1) + + # Creates a new connection. + await client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) + self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) + self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) + + # Uses the existing connection. + await client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) + + await client.close() + self.assertEqual(listener.event_count(PoolClosedEvent), 1) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) + + async def test_5_check_out_fails_connection_error(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + pool = await async_get_pool(client) + + def mock_connect(*args, **kwargs): + raise ConnectionFailure("connect failed") + + pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Attempt to create a new connection. + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + await client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) + + failed_event = listener.events[3] + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + @async_client_context.require_no_fips + async def test_5_check_out_fails_auth_error(self): + listener = CMAPListener() + client = await self.async_single_client_noauth( + username="notauser", password="fail", event_listeners=[listener] + ) + + # Attempt to create a new connection. + with self.assertRaisesRegex(OperationFailure, "failed"): + await client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) + # Error happens here. + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + # + # Extra non-spec tests + # + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + async def test_events_repr(self): + host = ("localhost", 27017) + self.assertRepr(ConnectionCheckedInEvent(host, 1)) + self.assertRepr(ConnectionCheckedOutEvent(host, 1, time.monotonic())) + self.assertRepr( + ConnectionCheckOutFailedEvent( + host, ConnectionCheckOutFailedReason.POOL_CLOSED, time.monotonic() + ) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr(ConnectionCreatedEvent(host, 1)) + self.assertRepr(ConnectionReadyEvent(host, 1, time.monotonic())) + self.assertRepr(ConnectionCheckOutStartedEvent(host)) + self.assertRepr(PoolCreatedEvent(host, {})) + self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) + self.assertRepr(PoolClosedEvent(host)) + + async def test_close_leaves_pool_unpaused(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + await client.admin.command("ping") + pool = await async_get_pool(client) + await client.close() + self.assertEqual(1, listener.event_count(PoolClosedEvent)) + self.assertEqual(PoolState.CLOSED, pool.state) + # Checking out a connection should fail + with self.assertRaises(_PoolClosedError): + async with pool.checkout(): + pass + + +def create_test(scenario_def, test, name): + async def run_scenario(self): + await self.run_scenario(scenario_def, test) + + return run_scenario + + +class CMAPSpecTestCreator(AsyncSpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + CMAP tests do not have a 'tests' field. The whole file represents + a single test case. + """ + return [scenario_def] + + +test_creator = CMAPSpecTestCreator(create_test, AsyncTestCMAP, AsyncTestCMAP.TEST_PATH) +test_creator.create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 05411d17ba..810d440932 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -15,9 +15,11 @@ """Execute Transactions Spec tests.""" from __future__ import annotations +import asyncio import os import sys import time +from pathlib import Path sys.path[0:0] = [""] @@ -60,6 +62,8 @@ from pymongo.synchronous.pool import PoolState, _PoolClosedError from pymongo.topology_description import updated_topology_description +_IS_SYNC = True + OBJECT_TYPES = { # Event types. "ConnectionCheckedIn": ConnectionCheckedInEvent, @@ -81,7 +85,10 @@ class TestCMAP(IntegrationTest): # Location of JSON test specifications. - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "connection_monitoring") + if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "connection_monitoring") + else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "connection_monitoring") # Test operations: @@ -258,7 +265,6 @@ def run_scenario(self, scenario_def, test): client._topology.open() else: client._get_topology() - self.addCleanup(client.close) self.pool = list(client._topology._servers.values())[0].pool # Map of target names to Thread objects. @@ -315,13 +321,11 @@ def cleanup(): # def test_1_client_connection_pool_options(self): client = self.rs_or_single_client(**self.POOL_OPTIONS) - self.addCleanup(client.close) - pool_opts = get_pool(client).opts + pool_opts = (get_pool(client)).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) def test_2_all_client_pools_have_same_options(self): client = self.rs_or_single_client(**self.POOL_OPTIONS) - self.addCleanup(client.close) client.admin.command("ping") # Discover at least one secondary. if client_context.has_secondaries: @@ -337,14 +341,12 @@ def test_3_uri_connection_pool_options(self): opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) uri = f"mongodb://{client_context.pair}/?{opts}" client = self.rs_or_single_client(uri) - self.addCleanup(client.close) - pool_opts = get_pool(client).opts + pool_opts = (get_pool(client)).opts self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) def test_4_subscribe_to_events(self): listener = CMAPListener() client = self.single_client(event_listeners=[listener]) - self.addCleanup(client.close) self.assertEqual(listener.event_count(PoolCreatedEvent), 1) # Creates a new connection. @@ -368,7 +370,6 @@ def test_4_subscribe_to_events(self): def test_5_check_out_fails_connection_error(self): listener = CMAPListener() client = self.single_client(event_listeners=[listener]) - self.addCleanup(client.close) pool = get_pool(client) def mock_connect(*args, **kwargs): @@ -397,7 +398,6 @@ def test_5_check_out_fails_auth_error(self): client = self.single_client_noauth( username="notauser", password="fail", event_listeners=[listener] ) - self.addCleanup(client.close) # Attempt to create a new connection. with self.assertRaisesRegex(OperationFailure, "failed"): diff --git a/test/utils.py b/test/utils.py index 40eec01cb4..e089b3fc2f 100644 --- a/test/utils.py +++ b/test/utils.py @@ -832,7 +832,7 @@ async def async_get_pools(client): """Get all pools.""" return [ server.pool - async for server in await (await client._get_topology()).select_servers( + for server in await (await client._get_topology()).select_servers( any_server_selector, _Op.TEST ) ] diff --git a/tools/synchro.py b/tools/synchro.py index bc1fcd7869..aa681df54e 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -204,6 +204,7 @@ def async_only_test(f: str) -> bool: "test_comment.py", "test_common.py", "test_connection_logging.py", + "test_connection_monitoring.py", "test_connections_survive_primary_stepdown_spec.py", "test_create_entities.py", "test_crud_unified.py", From 42d7ec274942a18dcc97f676e2895b94206a9ec6 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:07:47 -0800 Subject: [PATCH 1744/2111] PYTHON-5082 Convert test.test_gridfs_bucket to async (#2143) --- test/asynchronous/test_gridfs_bucket.py | 574 ++++++++++++++++++++++++ test/test_gridfs_bucket.py | 120 ++--- tools/synchro.py | 1 + 3 files changed, 644 insertions(+), 51 deletions(-) create mode 100644 test/asynchronous/test_gridfs_bucket.py diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py new file mode 100644 index 0000000000..5d1cf5beff --- /dev/null +++ b/test/asynchronous/test_gridfs_bucket.py @@ -0,0 +1,574 @@ +# +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import itertools +import sys +import threading +import time +from io import BytesIO +from test.asynchronous.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils import async_joinall, joinall, one + +import gridfs +from bson.binary import Binary +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +class JustWrite(ConcurrentRunner): + def __init__(self, gfs, num): + super().__init__() + self.gfs = gfs + self.num = num + self.daemon = True + + async def run(self): + for _ in range(self.num): + file = self.gfs.open_upload_stream("test") + await file.write(b"hello") + await file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, gfs, num, results): + super().__init__() + self.gfs = gfs + self.num = num + self.results = results + self.daemon = True + + async def run(self): + for _ in range(self.num): + file = await self.gfs.open_download_stream_by_name("test") + data = await file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfs(AsyncIntegrationTest): + fs: gridfs.AsyncGridFSBucket + alt: gridfs.AsyncGridFSBucket + + async def asyncSetUp(self): + await super().asyncSetUp() + self.fs = gridfs.AsyncGridFSBucket(self.db) + self.alt = gridfs.AsyncGridFSBucket(self.db, bucket_name="alt") + await self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + async def test_basic(self): + oid = await self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", await (await self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + await self.fs.delete(oid) + with self.assertRaises(NoFile): + await self.fs.open_download_stream(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_multi_chunk_delete(self): + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFSBucket(self.db) + oid = await gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_empty_file(self): + oid = await self.fs.upload_from_stream("test_filename", b"") + self.assertEqual(b"", await (await self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + raw = await self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + async def test_corrupt_chunk(self): + files_id = await self.fs.upload_from_stream("test_filename", b"foobar") + await self.db.fs.chunks.update_one( + {"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}} + ) + try: + out = await self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + await out.read() + + out = await self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + await out.readline() + finally: + await self.fs.delete(files_id) + + async def test_upload_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + await chunks.drop() + await files.drop() + await self.fs.upload_from_stream("filename", b"junk") + + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in (await chunks.index_information()).values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in (await files.index_information()).values() + ) + ) + + async def test_ensure_index_shell_compat(self): + files = self.db.fs.files + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): + # Create the index with different numeric types (as might be done + # from the mongo shell). + shell_index = [("filename", i), ("uploadDate", j)] + await self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) + + # No error. + await self.fs.upload_from_stream("filename", b"data") + + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in (await files.index_information()).values() + ) + ) + await files.drop() + + async def test_alt_collection(self): + oid = await self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", await (await self.alt.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + await self.alt.delete(oid) + with self.assertRaises(NoFile): + await self.alt.open_download_stream(oid) + self.assertEqual(0, await self.db.alt.files.count_documents({})) + self.assertEqual(0, await self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.alt.open_download_stream("foo") + await self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual( + b"hello world", await (await self.alt.open_download_stream_by_name("foo")).read() + ) + + await self.alt.upload_from_stream("mike", b"") + await self.alt.upload_from_stream("test", b"foo") + await self.alt.upload_from_stream("hello world", b"") + + self.assertEqual( + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in await self.db.alt.files.find().to_list()}, + ) + + async def test_threaded_reads(self): + await self.fs.upload_from_stream("test", b"hello") + + threads = [] + results: list = [] + for i in range(10): + threads.append(JustRead(self.fs, 10, results)) + await threads[i].start() + + await async_joinall(threads) + + self.assertEqual(100 * [b"hello"], results) + + async def test_threaded_writes(self): + threads = [] + for i in range(10): + threads.append(JustWrite(self.fs, 10)) + await threads[i].start() + + await async_joinall(threads) + + fstr = await self.fs.open_download_stream_by_name("test") + self.assertEqual(await fstr.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, await self.db.fs.files.count_documents({"filename": "test"})) + + async def test_get_last_version(self): + one = await self.fs.upload_from_stream("test", b"foo") + await asyncio.sleep(0.01) + two = self.fs.open_upload_stream("test") + await two.write(b"bar") + await two.close() + await asyncio.sleep(0.01) + two = two._id + three = await self.fs.upload_from_stream("test", b"baz") + + self.assertEqual(b"baz", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(three) + self.assertEqual(b"bar", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(one) + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test") + + async def test_get_version(self): + await self.fs.upload_from_stream("test", b"foo") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("test", b"bar") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("test", b"baz") + await asyncio.sleep(0.01) + + self.assertEqual( + b"foo", await (await self.fs.open_download_stream_by_name("test", revision=0)).read() + ) + self.assertEqual( + b"bar", await (await self.fs.open_download_stream_by_name("test", revision=1)).read() + ) + self.assertEqual( + b"baz", await (await self.fs.open_download_stream_by_name("test", revision=2)).read() + ) + + self.assertEqual( + b"baz", await (await self.fs.open_download_stream_by_name("test", revision=-1)).read() + ) + self.assertEqual( + b"bar", await (await self.fs.open_download_stream_by_name("test", revision=-2)).read() + ) + self.assertEqual( + b"foo", await (await self.fs.open_download_stream_by_name("test", revision=-3)).read() + ) + + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test", revision=3) + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test", revision=-4) + + async def test_upload_from_stream(self): + oid = await self.fs.upload_from_stream( + "test_file", BytesIO(b"hello world"), chunk_size_bytes=1 + ) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", await (await self.fs.open_download_stream(oid)).read()) + + async def test_upload_from_stream_with_id(self): + oid = ObjectId() + await self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", await (await self.fs.open_download_stream(oid)).read()) + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @async_client_context.require_failCommand_fail_point + async def test_upload_bulk_write_error(self): + # Test BulkWriteError from insert_many is converted to an insert_one style error. + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + gin = self.fs.open_upload_stream("test_file", chunk_size_bytes=1) + async with self.fail_point(cause_wce): + # Assert we raise WriteConcernError, not BulkWriteError. + with self.assertRaises(WriteConcernError): + await gin.write(b"hello world") + # 3 chunks were uploaded. + self.assertEqual(3, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.abort() + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + async def test_upload_batching(self): + async with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: + await gin.write(b"s" * (10 - 1)) + # No chunks were uploaded yet. + self.assertEqual(0, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.write(b"s") + # All chunks were uploaded since we hit the _UPLOAD_BUFFER_CHUNKS limit. + self.assertEqual(10, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + + async def test_open_upload_stream(self): + gin = self.fs.open_upload_stream("from_stream") + await gin.write(b"from stream") + await gin.close() + self.assertEqual(b"from stream", await (await self.fs.open_download_stream(gin._id)).read()) + + async def test_open_upload_stream_with_id(self): + oid = ObjectId() + gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") + await gin.write(b"from stream with custom id") + await gin.close() + self.assertEqual( + b"from stream with custom id", await (await self.fs.open_download_stream(oid)).read() + ) + + async def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + await self.fs.upload_from_stream("empty", b"") + doc = await self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + await self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + fstr = await self.fs.open_download_stream_by_name("empty") + + async def iterate_file(grid_file): + async for _ in grid_file: + pass + return True + + self.assertTrue(await iterate_file(fstr)) + + async def test_gridfs_lazy_connect(self): + client = await self.async_single_client( + "badhost", connect=False, serverSelectionTimeoutMS=0 + ) + cdb = client.db + gfs = gridfs.AsyncGridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.delete(0) + + gfs = gridfs.AsyncGridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.upload_from_stream("test", b"") # Still no connection. + + async def test_gridfs_find(self): + await self.fs.upload_from_stream("two", b"test2") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("two", b"test2+") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("one", b"test1") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("two", b"test2++") + files = self.db.fs.files + self.assertEqual(3, await files.count_documents({"filename": "two"})) + self.assertEqual(4, await files.count_documents({})) + cursor = self.fs.find( + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + await cursor.rewind() + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + gout = await cursor.next() + self.assertEqual(b"test2+", await gout.read()) + with self.assertRaises(StopAsyncIteration): + await cursor.next() + await cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + async def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy AsyncGridFS clients, store size as a float. + data = b"data" + await self.fs.upload_from_stream("f", data) + await self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, await (await self.fs.open_download_stream_by_name("f")).read()) + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.AsyncGridFSBucket((await self.async_rs_or_single_client(w=0)).pymongo_test) + + async def test_rename(self): + _id = await self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("first_name")).read() + ) + + await self.fs.rename(_id, "second_name") + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("first_name") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() + ) + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) + async def test_abort(self): + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) + await gin.write(b"test1") + await gin.write(b"test2") + await gin.write(b"test3") + self.assertEqual(3, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.abort() + self.assertTrue(gin.closed) + with self.assertRaises(ValueError): + await gin.write(b"test4") + self.assertEqual(0, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + + async def test_download_to_stream(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + oid = await self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + file1.seek(0) + oid = await self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + async def test_download_to_stream_by_name(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + _ = await self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream_by_name("one_chunk", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + file1.seek(0) + await self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + + file2 = BytesIO() + await self.fs.download_to_stream_by_name("many_chunks", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + async def test_md5(self): + gin = self.fs.open_upload_stream("no md5") + await gin.write(b"no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5") + await gin.write(b"also no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + +class TestGridfsBucketReplicaSet(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + + @classmethod + @async_client_context.require_connection + async def asyncTearDownClass(cls): + await async_client_context.client.drop_database("gfsbucketreplica") + + async def test_gridfs_replica_set(self): + rsc = await self.async_rs_client( + w=async_client_context.w, read_preference=ReadPreference.SECONDARY + ) + + gfs = gridfs.AsyncGridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = await gfs.upload_from_stream("test_filename", b"foo") + content = await (await gfs.open_download_stream(oid)).read() + self.assertEqual(b"foo", content) + + async def test_gridfs_secondary(self): + secondary_host, secondary_port = one(await self.client.secondaries) + secondary_connection = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + gfs = gridfs.AsyncGridFSBucket( + secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest" + ) + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + await gfs.upload_from_stream("test_filename", b"foo") + + async def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(await self.client.secondaries) + client = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + gfs = gridfs.AsyncGridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + await gfs.open_download_stream_by_name("test_filename") + with self.assertRaises(NotPrimaryError): + await gfs.upload_from_stream("test_filename", b"data") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 0af4dce811..e7486cb237 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -16,12 +16,14 @@ """Tests for the gridfs package.""" from __future__ import annotations +import asyncio import datetime import itertools import sys import threading import time from io import BytesIO +from test.helpers import ConcurrentRunner from unittest.mock import patch sys.path[0:0] = [""] @@ -44,10 +46,12 @@ from pymongo.read_preferences import ReadPreference from pymongo.synchronous.mongo_client import MongoClient +_IS_SYNC = True -class JustWrite(threading.Thread): + +class JustWrite(ConcurrentRunner): def __init__(self, gfs, num): - threading.Thread.__init__(self) + super().__init__() self.gfs = gfs self.num = num self.daemon = True @@ -59,9 +63,9 @@ def run(self): file.close() -class JustRead(threading.Thread): +class JustRead(ConcurrentRunner): def __init__(self, gfs, num, results): - threading.Thread.__init__(self) + super().__init__() self.gfs = gfs self.num = num self.results = results @@ -89,12 +93,13 @@ def setUp(self): def test_basic(self): oid = self.fs.upload_from_stream("test_filename", b"hello world") - self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", (self.fs.open_download_stream(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) self.fs.delete(oid) - self.assertRaises(NoFile, self.fs.open_download_stream, oid) + with self.assertRaises(NoFile): + self.fs.open_download_stream(oid) self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -111,7 +116,7 @@ def test_multi_chunk_delete(self): def test_empty_file(self): oid = self.fs.upload_from_stream("test_filename", b"") - self.assertEqual(b"", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"", (self.fs.open_download_stream(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -128,10 +133,12 @@ def test_corrupt_chunk(self): self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.open_download_stream(files_id) - self.assertRaises(CorruptGridFile, out.read) + with self.assertRaises(CorruptGridFile): + out.read() out = self.fs.open_download_stream(files_id) - self.assertRaises(CorruptGridFile, out.readline) + with self.assertRaises(CorruptGridFile): + out.readline() finally: self.fs.delete(files_id) @@ -146,13 +153,13 @@ def test_upload_ensures_index(self): self.assertTrue( any( info.get("key") == [("files_id", 1), ("n", 1)] - for info in chunks.index_information().values() + for info in (chunks.index_information()).values() ) ) self.assertTrue( any( info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in files.index_information().values() + for info in (files.index_information()).values() ) ) @@ -174,25 +181,27 @@ def test_ensure_index_shell_compat(self): self.assertTrue( any( info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in files.index_information().values() + for info in (files.index_information()).values() ) ) files.drop() def test_alt_collection(self): oid = self.alt.upload_from_stream("test_filename", b"hello world") - self.assertEqual(b"hello world", self.alt.open_download_stream(oid).read()) + self.assertEqual(b"hello world", (self.alt.open_download_stream(oid)).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) self.alt.delete(oid) - self.assertRaises(NoFile, self.alt.open_download_stream, oid) + with self.assertRaises(NoFile): + self.alt.open_download_stream(oid) self.assertEqual(0, self.db.alt.files.count_documents({})) self.assertEqual(0, self.db.alt.chunks.count_documents({})) - self.assertRaises(NoFile, self.alt.open_download_stream, "foo") + with self.assertRaises(NoFile): + self.alt.open_download_stream("foo") self.alt.upload_from_stream("foo", b"hello world") - self.assertEqual(b"hello world", self.alt.open_download_stream_by_name("foo").read()) + self.assertEqual(b"hello world", (self.alt.open_download_stream_by_name("foo")).read()) self.alt.upload_from_stream("mike", b"") self.alt.upload_from_stream("test", b"foo") @@ -200,7 +209,7 @@ def test_alt_collection(self): self.assertEqual( {"mike", "test", "hello world", "foo"}, - {k["filename"] for k in list(self.db.alt.files.find())}, + {k["filename"] for k in self.db.alt.files.find().to_list()}, ) def test_threaded_reads(self): @@ -240,13 +249,14 @@ def test_get_last_version(self): two = two._id three = self.fs.upload_from_stream("test", b"baz") - self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test")).read()) self.fs.delete(three) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test")).read()) self.fs.delete(two) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test")).read()) self.fs.delete(one) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test") + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test") def test_get_version(self): self.fs.upload_from_stream("test", b"foo") @@ -256,28 +266,30 @@ def test_get_version(self): self.fs.upload_from_stream("test", b"baz") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=0).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=1).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=2).read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test", revision=0)).read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test", revision=1)).read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test", revision=2)).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=-1).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=-2).read()) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=-3).read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test", revision=-1)).read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test", revision=-2)).read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test", revision=-3)).read()) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=3) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=-4) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test", revision=3) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test", revision=-4) def test_upload_from_stream(self): oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", (self.fs.open_download_stream(oid)).read()) def test_upload_from_stream_with_id(self): oid = ObjectId() self.fs.upload_from_stream_with_id( oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 ) - self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"custom id", (self.fs.open_download_stream(oid)).read()) @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) @client_context.require_failCommand_fail_point @@ -316,14 +328,14 @@ def test_open_upload_stream(self): gin = self.fs.open_upload_stream("from_stream") gin.write(b"from stream") gin.close() - self.assertEqual(b"from stream", self.fs.open_download_stream(gin._id).read()) + self.assertEqual(b"from stream", (self.fs.open_download_stream(gin._id)).read()) def test_open_upload_stream_with_id(self): oid = ObjectId() gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") gin.write(b"from stream with custom id") gin.close() - self.assertEqual(b"from stream with custom id", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"from stream with custom id", (self.fs.open_download_stream(oid)).read()) def test_missing_length_iter(self): # Test fix that guards against PHP-237 @@ -345,12 +357,12 @@ def test_gridfs_lazy_connect(self): client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=0) cdb = client.db gfs = gridfs.GridFSBucket(cdb) - self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.delete(0) gfs = gridfs.GridFSBucket(cdb) - self.assertRaises( - ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b"" - ) # Still no connection. + with self.assertRaises(ServerSelectionTimeoutError): + gfs.upload_from_stream("test", b"") # Still no connection. def test_gridfs_find(self): self.fs.upload_from_stream("two", b"test2") @@ -366,14 +378,15 @@ def test_gridfs_find(self): cursor = self.fs.find( {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 ) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) cursor.rewind() - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test2+", gout.read()) - self.assertRaises(StopIteration, cursor.__next__) + with self.assertRaises(StopIteration): + cursor.next() cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) @@ -383,20 +396,21 @@ def test_grid_in_non_int_chunksize(self): self.fs.upload_from_stream("f", data) self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.open_download_stream_by_name("f").read()) + self.assertEqual(data, (self.fs.open_download_stream_by_name("f")).read()) def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - gridfs.GridFSBucket(self.rs_or_single_client(w=0).pymongo_test) + gridfs.GridFSBucket((self.rs_or_single_client(w=0)).pymongo_test) def test_rename(self): _id = self.fs.upload_from_stream("first_name", b"testing") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name("first_name").read()) + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("first_name")).read()) self.fs.rename(_id, "second_name") - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("first_name") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) def test_abort(self): @@ -407,7 +421,8 @@ def test_abort(self): self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) gin.abort() self.assertTrue(gin.closed) - self.assertRaises(ValueError, gin.write, b"test4") + with self.assertRaises(ValueError): + gin.write(b"test4") self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) def test_download_to_stream(self): @@ -490,7 +505,7 @@ def test_gridfs_replica_set(self): gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") oid = gfs.upload_from_stream("test_filename", b"foo") - content = gfs.open_download_stream(oid).read() + content = (gfs.open_download_stream(oid)).read() self.assertEqual(b"foo", content) def test_gridfs_secondary(self): @@ -504,7 +519,8 @@ def test_gridfs_secondary(self): gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"foo") + with self.assertRaises(NotPrimaryError): + gfs.upload_from_stream("test_filename", b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to @@ -518,8 +534,10 @@ def test_gridfs_secondary_lazy(self): gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"data") + with self.assertRaises(NoFile): + gfs.open_download_stream_by_name("test_filename") + with self.assertRaises(NotPrimaryError): + gfs.upload_from_stream("test_filename", b"data") if __name__ == "__main__": diff --git a/tools/synchro.py b/tools/synchro.py index aa681df54e..69a2f07ba6 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -217,6 +217,7 @@ def async_only_test(f: str) -> bool: "test_examples.py", "test_grid_file.py", "test_gridfs.py", + "test_gridfs_bucket.py", "test_gridfs_spec.py", "test_heartbeat_monitoring.py", "test_index_management.py", From 8496d58faa090b007ceca736ffc52cd90a2f96cf Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 12 Feb 2025 06:59:32 -0500 Subject: [PATCH 1745/2111] PYTHON-4993 - Reevaluate handling of asyncio.CancelledError (#2132) --- pymongo/asynchronous/encryption.py | 4 ---- pymongo/asynchronous/mongo_client.py | 6 ------ pymongo/asynchronous/monitor.py | 6 ------ pymongo/asynchronous/pool.py | 2 -- pymongo/synchronous/encryption.py | 5 ----- pymongo/synchronous/mongo_client.py | 6 ------ pymongo/synchronous/monitor.py | 6 ------ pymongo/synchronous/pool.py | 2 -- 8 files changed, 37 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index f777104cf5..9d3ea67191 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -127,8 +127,6 @@ def _wrap_encryption_errors() -> Iterator[None]: # BSON encoding/decoding errors are unrelated to encryption so # we should propagate them unchanged. raise - except asyncio.CancelledError: - raise except Exception as exc: raise EncryptionError(exc) from exc @@ -766,8 +764,6 @@ async def create_encrypted_collection( await database.create_collection(name=name, **kwargs), encrypted_fields, ) - except asyncio.CancelledError: - raise except Exception as exc: raise EncryptedCollectionError(exc, encrypted_fields) from exc diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index d0be6ee21e..37be9a194c 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2043,8 +2043,6 @@ async def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: await self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) - except asyncio.CancelledError: - raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it @@ -2059,8 +2057,6 @@ async def _process_kill_cursors(self) -> None: for address, cursor_ids in address_to_cursor_ids.items(): try: await self._kill_cursors(cursor_ids, address, topology, session=None) - except asyncio.CancelledError: - raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: raise @@ -2075,8 +2071,6 @@ async def _process_periodic_tasks(self) -> None: try: await self._process_kill_cursors() await self._topology.update_pool() - except asyncio.CancelledError: - raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: return diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index abde7a9055..15289af4dc 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -262,8 +262,6 @@ async def _check_server(self) -> ServerDescription: details = cast(Mapping[str, Any], exc.details) await self._topology.receive_cluster_time(details.get("$clusterTime")) raise - except asyncio.CancelledError: - raise except ReferenceError: raise except Exception as error: @@ -429,8 +427,6 @@ def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception - except asyncio.CancelledError: - raise except Exception: # As per the spec, upon encountering an error: # - An error must not be raised @@ -494,8 +490,6 @@ async def _run(self) -> None: except ReferenceError: # Topology was garbage-collected. await self.close() - except asyncio.CancelledError: - raise except Exception: await self._pool.reset() diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 39b3bfc042..1da695c5c8 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -706,8 +706,6 @@ def _close_conn(self) -> None: # shutdown. try: self.conn.close() - except asyncio.CancelledError: - raise except Exception: # noqa: S110 pass diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 59f38e1913..7cbac1c509 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -15,7 +15,6 @@ """Support for explicit client-side field level encryption.""" from __future__ import annotations -import asyncio import contextlib import enum import socket @@ -127,8 +126,6 @@ def _wrap_encryption_errors() -> Iterator[None]: # BSON encoding/decoding errors are unrelated to encryption so # we should propagate them unchanged. raise - except asyncio.CancelledError: - raise except Exception as exc: raise EncryptionError(exc) from exc @@ -760,8 +757,6 @@ def create_encrypted_collection( database.create_collection(name=name, **kwargs), encrypted_fields, ) - except asyncio.CancelledError: - raise except Exception as exc: raise EncryptedCollectionError(exc, encrypted_fields) from exc diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 3ed5a49ac0..373deabd4e 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2037,8 +2037,6 @@ def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) - except asyncio.CancelledError: - raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it @@ -2053,8 +2051,6 @@ def _process_kill_cursors(self) -> None: for address, cursor_ids in address_to_cursor_ids.items(): try: self._kill_cursors(cursor_ids, address, topology, session=None) - except asyncio.CancelledError: - raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: raise @@ -2069,8 +2065,6 @@ def _process_periodic_tasks(self) -> None: try: self._process_kill_cursors() self._topology.update_pool() - except asyncio.CancelledError: - raise except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: return diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index 211635d8b8..802ba4742f 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -260,8 +260,6 @@ def _check_server(self) -> ServerDescription: details = cast(Mapping[str, Any], exc.details) self._topology.receive_cluster_time(details.get("$clusterTime")) raise - except asyncio.CancelledError: - raise except ReferenceError: raise except Exception as error: @@ -427,8 +425,6 @@ def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception - except asyncio.CancelledError: - raise except Exception: # As per the spec, upon encountering an error: # - An error must not be raised @@ -492,8 +488,6 @@ def _run(self) -> None: except ReferenceError: # Topology was garbage-collected. self.close() - except asyncio.CancelledError: - raise except Exception: self._pool.reset() diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 7c55e04b22..978f0ae391 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -704,8 +704,6 @@ def _close_conn(self) -> None: # shutdown. try: self.conn.close() - except asyncio.CancelledError: - raise except Exception: # noqa: S110 pass From 9a7bac7d4512bd2450e5b2609b3629f013d2cd5c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 12 Feb 2025 08:15:55 -0500 Subject: [PATCH 1746/2111] PYTHON-4865 - Re-enable TestBulkWriteConcern tests (#2144) --- test/asynchronous/test_bulk.py | 1 - test/test_bulk.py | 1 - 2 files changed, 2 deletions(-) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 7191a412c1..86568b666b 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -961,7 +961,6 @@ async def cause_wtimeout(self, requests, ordered): @async_client_context.require_replica_set @async_client_context.require_secondaries_count(1) async def test_write_concern_failure_ordered(self): - self.skipTest("Skipping until PYTHON-4865 is resolved.") details = None # Ensure we don't raise on wnote. diff --git a/test/test_bulk.py b/test/test_bulk.py index 6d29ff510a..6a72bddfc0 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -959,7 +959,6 @@ def cause_wtimeout(self, requests, ordered): @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_ordered(self): - self.skipTest("Skipping until PYTHON-4865 is resolved.") details = None # Ensure we don't raise on wnote. From 61988056479ae02acd48c7744b0939dd4c62294b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Feb 2025 11:15:54 -0600 Subject: [PATCH 1747/2111] PYTHON-4540 Cleaner separation of test lifecycle (#2082) --- .evergreen/config.yml | 112 +------ .evergreen/generated_configs/tasks.yml | 9 +- .evergreen/generated_configs/variants.yml | 80 ++--- .evergreen/run-azurekms-fail-test.sh | 5 +- .evergreen/run-azurekms-test.sh | 17 +- .evergreen/run-gcpkms-test.sh | 16 +- .evergreen/run-mongodb-aws-ecs-test.sh | 1 + .evergreen/run-mongodb-oidc-test.sh | 4 +- .evergreen/run-perf-tests.sh | 1 + .evergreen/run-tests.sh | 288 +++-------------- .evergreen/scripts/configure-env.sh | 15 +- .evergreen/scripts/generate_config.py | 17 +- .evergreen/scripts/install-dependencies.sh | 22 +- .evergreen/scripts/run-atlas-tests.sh | 3 +- .../scripts/run-enterprise-auth-tests.sh | 3 +- .evergreen/scripts/run-gcpkms-fail-test.sh | 9 +- .evergreen/scripts/run-load-balancer.sh | 3 - .evergreen/scripts/run-mongodb-aws-test.sh | 7 +- .evergreen/scripts/run-ocsp-test.sh | 1 + .evergreen/scripts/run-tests.sh | 54 ---- .evergreen/scripts/setup-dev-env.sh | 17 +- .evergreen/scripts/setup-encryption.sh | 5 - .../setup-libmongocrypt.sh} | 14 +- .evergreen/scripts/setup-tests.py | 299 ++++++++++++++++++ .evergreen/scripts/setup-tests.sh | 80 +++-- .evergreen/scripts/stop-load-balancer.sh | 5 - .evergreen/scripts/teardown-tests.sh | 29 ++ .evergreen/setup-spawn-host.sh | 2 +- .evergreen/sync-spawn-host.sh | 5 +- .evergreen/teardown-encryption.sh | 10 - .github/workflows/test-python.yml | 98 ++---- .gitignore | 1 + CONTRIBUTING.md | 12 +- justfile | 12 +- 34 files changed, 612 insertions(+), 644 deletions(-) delete mode 100755 .evergreen/scripts/run-load-balancer.sh delete mode 100755 .evergreen/scripts/run-tests.sh delete mode 100755 .evergreen/scripts/setup-encryption.sh rename .evergreen/{setup-encryption.sh => scripts/setup-libmongocrypt.sh} (79%) create mode 100644 .evergreen/scripts/setup-tests.py delete mode 100755 .evergreen/scripts/stop-load-balancer.sh create mode 100755 .evergreen/scripts/teardown-tests.sh delete mode 100755 .evergreen/teardown-encryption.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f854f6bd3d..028caf4d9b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -42,7 +42,7 @@ functions: # Make an evergreen expansion file with dynamic values - command: subprocess.exec params: - include_expansions_in_env: ["is_patch", "project", "version_id", "AUTH", "SSL", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "SETDEFAULTENCODING", "test_loadbalancer", "test_serverless", "SKIP_CSOT_TESTS", "MONGODB_STARTED", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", "COVERAGE", "COMPRESSORS", "TEST_SUITES", "MONGODB_API_VERSION", "skip_crypt_shared", "VERSION", "TOPOLOGY", "STORAGE_ENGINE", "ORCHESTRATION_FILE", "REQUIRE_API_VERSION", "LOAD_BALANCER", "skip_web_identity_auth_test", "skip_ECS_auth_test"] + include_expansions_in_env: ["is_patch", "project", "version_id", "AUTH", "SSL", "TEST_ENCRYPTION", "TEST_ENCRYPTION_PYOPENSSL", "TEST_CRYPT_SHARED", "TEST_PYOPENSSL", "SETDEFAULTENCODING", "TEST_LOADBALANCER", "TEST_SEVERLESS", "SKIP_CSOT_TESTS", "MONGODB_STARTED", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", "COVERAGE", "COMPRESSORS", "MONGODB_API_VERSION", "skip_crypt_shared", "VERSION", "TOPOLOGY", "STORAGE_ENGINE", "ORCHESTRATION_FILE", "REQUIRE_API_VERSION", "LOAD_BALANCER", "skip_web_identity_auth_test", "skip_ECS_auth_test"] binary: bash working_dir: "src" args: @@ -274,39 +274,22 @@ functions: "run tests": - command: subprocess.exec + type: test params: - include_expansions_in_env: ["TEST_DATA_LAKE", "PYTHON_BINARY", "AUTH", "SSL", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE"] + include_expansions_in_env: ["TEST_DATA_LAKE", "PYTHON_BINARY", "AUTH", "SSL", + "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "TEST_SUITES", + "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "TEST_ENCRYPTION", "TEST_ENCRYPTION_PYOPENSSL", + "TEST_CRYPT_SHARED", "TEST_PYOPENSSL", "TEST_LOADBALANCER", "TEST_SEVERLESS", "MONGODB_URI"] binary: bash working_dir: "src" args: - .evergreen/scripts/setup-tests.sh - - command: subprocess.exec - params: - working_dir: "src" - binary: bash - background: true - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/setup-encryption.sh - - command: subprocess.exec - type: test - params: - working_dir: "src" - binary: bash - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "PYTHON_BINARY", "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "SINGLE_MONGOS_LB_URI", "MULTI_MONGOS_LB_URI", "TEST_SUITES"] - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-tests.sh - - "run direct tests": - command: subprocess.exec type: test params: working_dir: "src" binary: bash - include_expansions_in_env: ["PYTHON_BINARY"] - args: [ .evergreen/scripts/run-direct-tests.sh ] + args: [.evergreen/just.sh, test-eg] "run enterprise auth tests": - command: subprocess.exec @@ -340,13 +323,6 @@ functions: - ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup-secrets.sh "run aws auth test with regular aws credentials": - - command: subprocess.exec - params: - include_expansions_in_env: ["TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -359,13 +335,6 @@ functions: - regular "run aws auth test with assume role credentials": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -378,13 +347,6 @@ functions: - assume-role "run aws auth test with aws EC2 credentials": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -397,13 +359,6 @@ functions: - ec2 "run aws auth test with aws web identity credentials": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - # Test with and without AWS_ROLE_SESSION_NAME set. - command: subprocess.exec type: test @@ -429,13 +384,6 @@ functions: - web-identity "run aws auth test with aws credentials as environment variables": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -448,13 +396,6 @@ functions: - env-creds "run aws auth test with aws credentials and session token as environment variables": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -467,13 +408,6 @@ functions: - session-creds "run oidc auth test with test credentials": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -561,13 +495,6 @@ functions: file: atlas-expansion.yml "run-ocsp-test": - - command: subprocess.exec - params: - include_expansions_in_env: [ "TEST_DATA_LAKE", "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "test_encryption", "test_encryption_pyopenssl", "test_crypt_shared", "test_pyopenssl", "test_loadbalancer", "test_serverless", "ORCHESTRATION_FILE" ] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh - command: subprocess.exec type: test params: @@ -587,25 +514,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh - "run load-balancer": - - command: subprocess.exec - params: - binary: bash - include_expansions_in_env: ["MONGODB_URI"] - args: - - src/.evergreen/scripts/run-with-env.sh - - src/.evergreen/scripts/run-load-balancer.sh - - command: expansions.update - params: - file: lb-expansion.yml - - "stop load-balancer": - - command: subprocess.exec - params: - binary: bash - args: - - src/.evergreen/scripts/stop-load-balancer.sh - "teardown atlas": - command: subprocess.exec params: @@ -882,6 +790,7 @@ tasks: - func: "run tests" vars: TEST_INDEX_MANAGEMENT: "1" + AUTH: "auth" - name: "mod-wsgi-standalone" tags: ["mod_wsgi"] @@ -935,7 +844,7 @@ tasks: vars: VERSION: "8.0" TOPOLOGY: "replica_set" - - func: "run direct tests" + - func: "run tests" - name: "atlas-connect" tags: ["atlas-connect"] @@ -1503,7 +1412,7 @@ tasks: - name: "testgcpkms-task" commands: - command: subprocess.exec - type: setup + type: test params: working_dir: "src" binary: bash @@ -1531,6 +1440,7 @@ tasks: - name: testazurekms-task commands: - command: subprocess.exec + type: test params: binary: bash working_dir: src diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 37b7a622d5..6b17035748 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -8,12 +8,11 @@ tasks: AUTH: auth SSL: ssl LOAD_BALANCER: "true" - - func: run load-balancer - func: run tests vars: AUTH: auth SSL: ssl - test_loadbalancer: "true" + TEST_LOADBALANCER: "true" tags: [load-balancer, auth, ssl] - name: test-load-balancer-noauth-ssl commands: @@ -23,12 +22,11 @@ tasks: AUTH: noauth SSL: ssl LOAD_BALANCER: "true" - - func: run load-balancer - func: run tests vars: AUTH: noauth SSL: ssl - test_loadbalancer: "true" + TEST_LOADBALANCER: "true" tags: [load-balancer, noauth, ssl] - name: test-load-balancer-noauth-nossl commands: @@ -38,12 +36,11 @@ tasks: AUTH: noauth SSL: nossl LOAD_BALANCER: "true" - - func: run load-balancer - func: run tests vars: AUTH: noauth SSL: nossl - test_loadbalancer: "true" + TEST_LOADBALANCER: "true" tags: [load-balancer, noauth, nossl] # Server tests diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 20b89f7e69..531f23eb66 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -318,7 +318,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - name: encryption-rhel8-python3.13 @@ -331,7 +331,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - name: encryption-rhel8-pypy3.10 @@ -344,7 +344,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-python3.9 @@ -357,8 +357,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-python3.13 @@ -371,8 +371,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-pypy3.10 @@ -385,8 +385,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-python3.9 @@ -399,8 +399,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" - test_encryption_pyopenssl: "true" + TEST_ENCRYPTION: "true" + TEST_ENCRYPTION_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-python3.13 @@ -413,8 +413,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" - test_encryption_pyopenssl: "true" + TEST_ENCRYPTION: "true" + TEST_ENCRYPTION_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-pypy3.10 @@ -427,8 +427,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_encryption: "true" - test_encryption_pyopenssl: "true" + TEST_ENCRYPTION: "true" + TEST_ENCRYPTION_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - name: encryption-rhel8-python3.10 @@ -438,7 +438,7 @@ buildvariants: run_on: - rhel87-small expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: encryption-crypt_shared-rhel8-python3.11 tasks: @@ -447,8 +447,8 @@ buildvariants: run_on: - rhel87-small expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: encryption-pyopenssl-rhel8-python3.12 tasks: @@ -457,8 +457,8 @@ buildvariants: run_on: - rhel87-small expansions: - test_encryption: "true" - test_encryption_pyopenssl: "true" + TEST_ENCRYPTION: "true" + TEST_ENCRYPTION_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: encryption-macos-python3.9 tasks: @@ -468,7 +468,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - name: encryption-macos-python3.13 @@ -479,7 +479,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-macos-python3.9 @@ -490,8 +490,8 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-macos-python3.13 @@ -502,8 +502,8 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - name: encryption-win64-python3.9 @@ -514,7 +514,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - name: encryption-win64-python3.13 @@ -525,7 +525,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - test_encryption: "true" + TEST_ENCRYPTION: "true" PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] - name: encryption-crypt_shared-win64-python3.9 @@ -536,8 +536,8 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - name: encryption-crypt_shared-win64-python3.13 @@ -548,8 +548,8 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - test_encryption: "true" - test_crypt_shared: "true" + TEST_ENCRYPTION: "true" + TEST_CRYPT_SHARED: "true" PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] @@ -1010,7 +1010,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - test_pyopenssl: "true" + TEST_PYOPENSSL: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-python3.10 tasks: @@ -1021,7 +1021,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_pyopenssl: "true" + TEST_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-python3.11 tasks: @@ -1032,7 +1032,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_pyopenssl: "true" + TEST_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-python3.12 tasks: @@ -1043,7 +1043,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_pyopenssl: "true" + TEST_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-python3.13 tasks: @@ -1054,7 +1054,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - test_pyopenssl: "true" + TEST_PYOPENSSL: "true" PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.10 tasks: @@ -1065,7 +1065,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_pyopenssl: "true" + TEST_PYOPENSSL: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Search index tests @@ -1301,7 +1301,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_serverless: "true" + TEST_SERVERLESS: "true" AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 @@ -1313,7 +1313,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - test_serverless: "true" + TEST_SERVERLESS: "true" AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.13/bin/python3 diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh index d1117dcb32..31ca30b3e2 100755 --- a/.evergreen/run-azurekms-fail-test.sh +++ b/.evergreen/run-azurekms-fail-test.sh @@ -3,10 +3,9 @@ set -o errexit # Exit the script with error if any of the commands fail HERE=$(dirname ${BASH_SOURCE:-$0}) . $DRIVERS_TOOLS/.evergreen/csfle/azurekms/setup-secrets.sh export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz -SKIP_SERVERS=1 bash $HERE/setup-encryption.sh +SUCCESS=false TEST_FLE_AZURE_AUTO=1 bash $HERE/scripts/setup-tests.sh PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ KEY_NAME="${AZUREKMS_KEYNAME}" \ KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ - SUCCESS=false TEST_FLE_AZURE_AUTO=1 \ $HERE/just.sh test-eg -bash $HERE/teardown-encryption.sh +bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh index 28a84a52e2..27cb3fb315 100755 --- a/.evergreen/run-azurekms-test.sh +++ b/.evergreen/run-azurekms-test.sh @@ -6,24 +6,23 @@ echo "Copying files ... begin" export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey -export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz -SKIP_SERVERS=1 bash $HERE/setup-encryption.sh +LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz # Set up the remote files to test. git add . git commit -m "add files" || true -git archive -o /tmp/mongo-python-driver.tar HEAD -tar -rf /tmp/mongo-python-driver.tar libmongocrypt -gzip -f /tmp/mongo-python-driver.tar +git archive -o /tmp/mongo-python-driver.tgz HEAD # shellcheck disable=SC2088 -AZUREKMS_SRC="/tmp/mongo-python-driver.tar.gz" AZUREKMS_DST="~/" \ +AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" AZUREKMS_DST="~/" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh echo "Copying files ... end" echo "Untarring file ... begin" -AZUREKMS_CMD="tar xf mongo-python-driver.tar.gz" \ +AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" SUCCESS=true TEST_FLE_AZURE_AUTO=1 bash ./.evergreen/just.sh test-eg" \ +AZUREKMS_CMD="SUCCESS=true TEST_FLE_AZURE_AUTO=1 LIBMONGOCRYPT_URL=$LIBMONGOCRYPT_URL bash .evergreen/just.sh setup-test" \ + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh +AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" bash ./.evergreen/just.sh test-eg" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Running test ... end" -bash $HERE/teardown-encryption.sh +bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh index 37ec2bfe56..077ca0cb9f 100755 --- a/.evergreen/run-gcpkms-test.sh +++ b/.evergreen/run-gcpkms-test.sh @@ -8,20 +8,18 @@ export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} export GCPKMS_PROJECT=${GCPKMS_PROJECT} export GCPKMS_ZONE=${GCPKMS_ZONE} export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} -export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz -SKIP_SERVERS=1 bash $HERE/setup-encryption.sh +LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz # Set up the remote files to test. git add . git commit -m "add files" || true -git archive -o /tmp/mongo-python-driver.tar HEAD -tar -rf /tmp/mongo-python-driver.tar libmongocrypt -gzip -f /tmp/mongo-python-driver.tar -GCPKMS_SRC=/tmp/mongo-python-driver.tar.gz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh +git archive -o /tmp/mongo-python-driver.tgz HEAD +GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh echo "Copying files ... end" echo "Untarring file ... begin" -GCPKMS_CMD="tar xf mongo-python-driver.tar.gz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 ./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=$LIBMONGOCRYPT_URL bash ./.evergreen/just.sh setup-test" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Running test ... end" -bash $HERE/teardown-encryption.sh +bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 91777be226..96d3c0611e 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -31,4 +31,5 @@ export AUTH="auth" export SET_XTRACE_ON=1 cd src rm -rf .venv +bash ./.evergreen/just.sh setup-test bash .evergreen/just.sh test-eg diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 46c4f24969..552f9ef08e 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -29,7 +29,5 @@ else exit 1 fi -export TEST_AUTH_OIDC=1 -export COVERAGE=1 -export AUTH="auth" +TEST_AUTH_OIDC=1 COVERAGE=1 AUTH="auth" bash ./.evergreen/just.sh setup-test bash ./.evergreen/just.sh test-eg "${@:1}" diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index e6a51b3297..d0e001c5fc 100755 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -16,4 +16,5 @@ export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 export PERF_TEST=1 +bash ./.evergreen/just.sh setup-test bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index fbe310ad1e..12935b25a0 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,283 +1,81 @@ #!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -set -o xtrace +set -eu -# Note: It is assumed that you have already set up a virtual environment before running this file. - -# Supported/used environment variables: -# AUTH Set to enable authentication. Defaults to "noauth" -# SSL Set to enable SSL. Defaults to "nossl" -# GREEN_FRAMEWORK The green framework to test with, if any. -# COVERAGE If non-empty, run the test suite with coverage. -# COMPRESSORS If non-empty, install appropriate compressor. -# LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# TEST_DATA_LAKE If non-empty, run data lake tests. -# TEST_ENCRYPTION If non-empty, run encryption tests. -# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. -# TEST_SERVERLESS If non-empy, test on serverless. -# TEST_LOADBALANCER If non-empy, test load balancing. -# TEST_FLE_AZURE_AUTO If non-empy, test auto FLE on Azure -# TEST_FLE_GCP_AUTO If non-empy, test auto FLE on GCP -# TEST_PYOPENSSL If non-empy, test with PyOpenSSL -# TEST_ENTERPRISE_AUTH If non-empty, test with Enterprise Auth -# TEST_AUTH_AWS If non-empty, test AWS Auth Mechanism -# TEST_AUTH_OIDC If non-empty, test OIDC Auth Mechanism -# TEST_PERF If non-empty, run performance tests -# TEST_OCSP If non-empty, run OCSP tests -# TEST_ATLAS If non-empty, test Atlas connections -# TEST_INDEX_MANAGEMENT If non-empty, run index management tests -# TEST_ENCRYPTION_PYOPENSSL If non-empy, test encryption with PyOpenSSL - -AUTH=${AUTH:-noauth} -SSL=${SSL:-nossl} -TEST_SUITES=${TEST_SUITES:-} -TEST_ARGS="${*:1}" +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) +ROOT_DIR="$(dirname "$(dirname $SCRIPT_DIR)")" export PIP_QUIET=1 # Quiet by default export PIP_PREFER_BINARY=1 # Prefer binary dists by default +export UV_FROZEN=1 # Do not modify lock files -set +x -PYTHON_IMPL=$(uv run --frozen python -c "import platform; print(platform.python_implementation())") - -# Try to source local Drivers Secrets -if [ -f ./secrets-export.sh ]; then - echo "Sourcing secrets" - source ./secrets-export.sh +# Try to source the env file. +if [ -f $SCRIPT_DIR/scripts/env.sh ]; then + echo "Sourcing env inputs" + . $SCRIPT_DIR/scripts/env.sh else - echo "Not sourcing secrets" + echo "Not sourcing env inputs" fi -# Start compiling the args we'll pass to uv. -# Run in an isolated environment so as not to pollute the base venv. -UV_ARGS=("--isolated --frozen --extra test") - -# Ensure C extensions if applicable. -if [ -z "${NO_EXT:-}" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - uv run --frozen tools/fail_if_no_c.py -fi - -if [ "$AUTH" != "noauth" ]; then - if [ -n "$TEST_DATA_LAKE" ]; then - export DB_USER="mhuser" - export DB_PASSWORD="pencil" - elif [ -n "$TEST_SERVERLESS" ]; then - source "${DRIVERS_TOOLS}"/.evergreen/serverless/secrets-export.sh - export DB_USER=$SERVERLESS_ATLAS_USER - export DB_PASSWORD=$SERVERLESS_ATLAS_PASSWORD - export MONGODB_URI="$SERVERLESS_URI" - echo "MONGODB_URI=$MONGODB_URI" - export SINGLE_MONGOS_LB_URI=$MONGODB_URI - export MULTI_MONGOS_LB_URI=$MONGODB_URI - elif [ -n "$TEST_AUTH_OIDC" ]; then - export DB_USER=$OIDC_ADMIN_USER - export DB_PASSWORD=$OIDC_ADMIN_PWD - export DB_IP="$MONGODB_URI" - else - export DB_USER="bob" - export DB_PASSWORD="pwd123" - fi - echo "Added auth, DB_USER: $DB_USER" -fi - -if [ -n "$TEST_ENTERPRISE_AUTH" ]; then - UV_ARGS+=("--extra gssapi") - if [ "Windows_NT" = "$OS" ]; then - echo "Setting GSSAPI_PASS" - export GSSAPI_PASS=${SASL_PASS} - export GSSAPI_CANONICALIZE="true" - else - # BUILD-3830 - touch krb5.conf.empty - export KRB5_CONFIG=${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - - echo "Writing keytab" - echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab - echo "Running kinit" - kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p ${PRINCIPAL} - fi - echo "Setting GSSAPI variables" - export GSSAPI_HOST=${SASL_HOST} - export GSSAPI_PORT=${SASL_PORT} - export GSSAPI_PRINCIPAL=${PRINCIPAL} - - export TEST_SUITES="auth" -fi - -if [ -n "$TEST_LOADBALANCER" ]; then - export LOAD_BALANCER=1 - export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI:-mongodb://127.0.0.1:8000/?loadBalanced=true}" - export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI:-mongodb://127.0.0.1:8001/?loadBalanced=true}" - export TEST_SUITES="load_balancer" +# Ensure there are test inputs. +if [ -f $SCRIPT_DIR/scripts/test-env.sh ]; then + echo "Sourcing test inputs" + . $SCRIPT_DIR/scripts/test-env.sh +else + echo "Missing test inputs, please run 'just setup-test'" fi -if [ "$SSL" != "nossl" ]; then - export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" - export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" - - if [ -n "$TEST_LOADBALANCER" ]; then - export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}&tls=true" - export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}&tls=true" - fi +# Source the local secrets export file if available. +if [ -f "$ROOT_DIR/secrets-export.sh" ]; then + . "$ROOT_DIR/secrets-export.sh" fi -if [ "$COMPRESSORS" = "snappy" ]; then - UV_ARGS+=("--extra snappy") -elif [ "$COMPRESSORS" = "zstd" ]; then - UV_ARGS+=("--extra zstandard") -fi +PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") -# PyOpenSSL test setup. -if [ -n "$TEST_PYOPENSSL" ]; then - UV_ARGS+=("--extra ocsp") +# Ensure C extensions if applicable. +if [ -z "${NO_EXT:-}" ] && [ "$PYTHON_IMPL" = "CPython" ]; then + uv run --frozen tools/fail_if_no_c.py fi -if [ -n "$TEST_ENCRYPTION" ] || [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then - # Check for libmongocrypt download. - if [ ! -d "libmongocrypt" ]; then - echo "Run encryption setup first!" - exit 1 - fi - - UV_ARGS+=("--extra encryption") - # TODO: Test with 'pip install pymongocrypt' - UV_ARGS+=("--group pymongocrypt_source") - - # Use the nocrypto build to avoid dependency issues with older windows/python versions. - BASE=$(pwd)/libmongocrypt/nocrypto - if [ -f "${BASE}/lib/libmongocrypt.so" ]; then - PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so - elif [ -f "${BASE}/lib/libmongocrypt.dylib" ]; then - PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib - elif [ -f "${BASE}/bin/mongocrypt.dll" ]; then - PYMONGOCRYPT_LIB=${BASE}/bin/mongocrypt.dll - # libmongocrypt's windows dll is not marked executable. - chmod +x $PYMONGOCRYPT_LIB - PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) - elif [ -f "${BASE}/lib64/libmongocrypt.so" ]; then - PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so - else - echo "Cannot find libmongocrypt shared object file" - exit 1 - fi - export PYMONGOCRYPT_LIB +if [ -n "${PYMONGOCRYPT_LIB:-}" ]; then # Ensure pymongocrypt is working properly. # shellcheck disable=SC2048 - uv run ${UV_ARGS[*]} python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" + uv run ${UV_ARGS} python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" # shellcheck disable=SC2048 - uv run ${UV_ARGS[*]} python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" + uv run ${UV_ARGS} python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" # PATH is updated by configure-env.sh for access to mongocryptd. fi -if [ -n "$TEST_ENCRYPTION" ]; then - if [ -n "$TEST_ENCRYPTION_PYOPENSSL" ]; then - UV_ARGS+=("--extra ocsp") - fi - - if [ -n "$TEST_CRYPT_SHARED" ]; then - CRYPT_SHARED_DIR=`dirname $CRYPT_SHARED_LIB_PATH` - echo "using crypt_shared_dir $CRYPT_SHARED_DIR" - export DYLD_FALLBACK_LIBRARY_PATH=$CRYPT_SHARED_DIR:$DYLD_FALLBACK_LIBRARY_PATH - export LD_LIBRARY_PATH=$CRYPT_SHARED_DIR:$LD_LIBRARY_PATH - export PATH=$CRYPT_SHARED_DIR:$PATH - fi - # Only run the encryption tests. - TEST_SUITES="encryption" -fi - -if [ -n "$TEST_FLE_AZURE_AUTO" ] || [ -n "$TEST_FLE_GCP_AUTO" ]; then - if [[ -z "$SUCCESS" ]]; then - echo "Must define SUCCESS" - exit 1 - fi - - if echo "$MONGODB_URI" | grep -q "@"; then - echo "MONGODB_URI unexpectedly contains user credentials in FLE test!"; - exit 1 - fi - TEST_SUITES="csfle" -fi - -if [ -n "$TEST_INDEX_MANAGEMENT" ]; then - source $DRIVERS_TOOLS/.evergreen/atlas/secrets-export.sh - export DB_USER="${DRIVERS_ATLAS_LAMBDA_USER}" - set +x - export DB_PASSWORD="${DRIVERS_ATLAS_LAMBDA_PASSWORD}" - set -x - TEST_SUITES="index_management" -fi - -if [ -n "$TEST_DATA_LAKE" ] && [ -z "$TEST_ARGS" ]; then - TEST_SUITES="data_lake" -fi - -if [ -n "$TEST_ATLAS" ]; then - TEST_SUITES="atlas" -fi - -if [ -n "$TEST_OCSP" ]; then - UV_ARGS+=("--extra ocsp") - TEST_SUITES="ocsp" -fi - -if [ -n "$TEST_AUTH_AWS" ]; then - UV_ARGS+=("--extra aws") - TEST_SUITES="auth_aws" -fi +PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") +echo "Running ${AUTH:-noauth} tests over ${SSL:-nossl} with python $(uv python find)" +uv run python -c 'import sys; print(sys.version)' -if [ -n "$TEST_AUTH_OIDC" ]; then - UV_ARGS+=("--extra aws") - TEST_SUITES="auth_oidc" -fi +# Show the installed packages +# shellcheck disable=SC2048 +PIP_QUIET=0 uv run ${UV_ARGS} --with pip pip list -if [ -n "$PERF_TEST" ]; then - UV_ARGS+=("--group perf") +# Record the start time for a perf test. +if [ -n "${PERF_TEST:-}" ]; then start_time=$(date +%s) - TEST_SUITES="perf" - # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively - # affects the benchmark results. - TEST_ARGS="test/performance/perf_test.py $TEST_ARGS" fi -echo "Running $AUTH tests over $SSL with python $(uv python find)" -uv run --frozen python -c 'import sys; print(sys.version)' - - # Run the tests, and store the results in Evergreen compatible XUnit XML # files in the xunit-results/ directory. - -# Run the tests with coverage if requested and coverage is installed. -# Only cover CPython. PyPy reports suspiciously low coverage. -if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - # Keep in sync with combine-coverage.sh. - # coverage >=5 is needed for relative_files=true. - UV_ARGS+=("--group coverage") - TEST_ARGS="$TEST_ARGS --cov" +TEST_ARGS=${TEST_ARGS} +if [ "$#" -ne 0 ]; then + TEST_ARGS="$*" fi - -if [ -n "$GREEN_FRAMEWORK" ]; then - UV_ARGS+=("--group $GREEN_FRAMEWORK") -fi - -# Show the installed packages -# shellcheck disable=SC2048 -PIP_QUIET=0 uv run ${UV_ARGS[*]} --with pip pip list - -if [ -z "$GREEN_FRAMEWORK" ]; then - # Use --capture=tee-sys so pytest prints test output inline: - # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html - PYTEST_ARGS="-v --capture=tee-sys --durations=5 $TEST_ARGS" - if [ -n "$TEST_SUITES" ]; then - PYTEST_ARGS="-m $TEST_SUITES $PYTEST_ARGS" - fi +echo "Running tests with $TEST_ARGS and uv args $UV_ARGS..." +if [ -z "${GREEN_FRAMEWORK:-}" ]; then # shellcheck disable=SC2048 - uv run ${UV_ARGS[*]} pytest $PYTEST_ARGS + uv run ${UV_ARGS} pytest $TEST_ARGS else # shellcheck disable=SC2048 - uv run ${UV_ARGS[*]} green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS + uv run ${UV_ARGS} green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS fi +echo "Running tests with $TEST_ARGS... done." # Handle perf test post actions. -if [ -n "$PERF_TEST" ]; then +if [ -n "${PERF_TEST:-}" ]; then end_time=$(date +%s) elapsed_secs=$((end_time-start_time)) @@ -289,6 +87,6 @@ if [ -n "$PERF_TEST" ]; then fi # Handle coverage post actions. -if [ -n "$COVERAGE" ]; then +if [ -n "${COVERAGE:-}" ]; then rm -rf .pytest_cache fi diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index cb018d09f0..a008499ea2 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -16,6 +16,15 @@ DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} UV_TOOL_DIR=$PROJECT_DIRECTORY/.local/uv/tools UV_CACHE_DIR=$PROJECT_DIRECTORY/.local/uv/cache +DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS/.bin" + +# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. +if [ "${CI:-}" == "true" ]; then + PYMONGO_BIN_DIR=${DRIVERS_TOOLS_BINARIES:-} +# We want to use a path that's already on PATH on spawn hosts. +else + PYMONGO_BIN_DIR=$HOME/cli_bin +fi # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin @@ -24,6 +33,8 @@ if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin CARGO_HOME=$(cygpath -m $CARGO_HOME) UV_TOOL_DIR=$(cygpath -m "$UV_TOOL_DIR") UV_CACHE_DIR=$(cygpath -m "$UV_CACHE_DIR") + DRIVERS_TOOLS_BINARIES=$(cygpath -m "$DRIVERS_TOOLS_BINARIES") + PYMONGO_BIN_DIR=$(cygpath -m "$PYMONGO_BIN_DIR") fi SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" @@ -36,7 +47,6 @@ fi export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" -export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS/.bin" cat < "$SCRIPT_DIR"/env.sh export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" @@ -67,7 +77,8 @@ export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" export UV_TOOL_DIR="$UV_TOOL_DIR" export UV_CACHE_DIR="$UV_CACHE_DIR" export UV_TOOL_BIN_DIR="$DRIVERS_TOOLS_BINARIES" -export PATH="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PATH" +export PYMONGO_BIN_DIR="$PYMONGO_BIN_DIR" +export PATH="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PYMONGO_BIN_DIR:$PATH" # shellcheck disable=SC2154 export PROJECT="${project:-mongo-python-driver}" export PIP_QUIET=1 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 1337836379..59c3c720bf 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -344,11 +344,11 @@ def create_encryption_variants() -> list[BuildVariant]: batchtime = BATCHTIME_WEEK def get_encryption_expansions(encryption): - expansions = dict(test_encryption="true") + expansions = dict(TEST_ENCRYPTION="true") if "crypt_shared" in encryption: - expansions["test_crypt_shared"] = "true" + expansions["TEST_CRYPT_SHARED"] = "true" if "PyOpenSSL" in encryption: - expansions["test_encryption_pyopenssl"] = "true" + expansions["TEST_ENCRYPTION_PYOPENSSL"] = "true" return expansions host = DEFAULT_HOST @@ -487,7 +487,7 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" batchtime = BATCHTIME_WEEK - expansions = dict(test_pyopenssl="true") + expansions = dict(TEST_PYOPENSSL="true") variants = [] for python in ALL_PYTHONS: @@ -645,7 +645,7 @@ def create_disable_test_commands_variants(): def create_serverless_variants(): host = DEFAULT_HOST batchtime = BATCHTIME_WEEK - expansions = dict(test_serverless="true", AUTH="auth", SSL="ssl") + expansions = dict(TEST_SERVERLESS="true", AUTH="auth", SSL="ssl") tasks = ["serverless_task_group"] base_name = "Serverless" return [ @@ -834,12 +834,9 @@ def create_load_balancer_tasks(): tags = ["load-balancer", auth, ssl] bootstrap_vars = dict(TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, LOAD_BALANCER="true") bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) - balancer_func = FunctionCall(func="run load-balancer") - test_vars = dict(AUTH=auth, SSL=ssl, test_loadbalancer="true") + test_vars = dict(AUTH=auth, SSL=ssl, TEST_LOADBALANCER="true") test_func = FunctionCall(func="run tests", vars=test_vars) - tasks.append( - EvgTask(name=name, tags=tags, commands=[bootstrap_func, balancer_func, test_func]) - ) + tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) return tasks diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 39b77199bb..e2598edcad 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -2,13 +2,16 @@ set -eu -# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. -if [ "${CI:-}" == "true" ]; then - _BIN_DIR=${DRIVERS_TOOLS_BINARIES:-} -else - _BIN_DIR=$HOME/.local/bin +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" > /dev/null + +# Source the env files to pick up common variables. +if [ -f $HERE/env.sh ]; then + . $HERE/env.sh fi +_BIN_DIR=${PYMONGO_BIN_DIR:-$HOME/.local/bin} +export PATH="$PATH:${_BIN_DIR}" # Helper function to pip install a dependency using a temporary python env. function _pip_install() { @@ -19,6 +22,7 @@ function _pip_install() { createvirtualenv "$(find_python3)" $_VENV_PATH python -m pip install $1 ln -s "$(which $2)" $_BIN_DIR/$2 + echo "Installed to ${_BIN_DIR}" echo "Installing $2 using pip... done." } @@ -35,9 +39,6 @@ if ! command -v just 2>/dev/null; then curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { _pip_install rust-just just } - if ! command -v just 2>/dev/null; then - export PATH="$PATH:$_BIN_DIR" - fi echo "Installing just... done." fi @@ -48,8 +49,7 @@ if ! command -v uv 2>/dev/null; then curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { _pip_install uv uv } - if ! command -v uv 2>/dev/null; then - export PATH="$PATH:$_BIN_DIR" - fi echo "Installing uv... done." fi + +popd > /dev/null diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh index 30b8d5a615..e5684b7cb4 100755 --- a/.evergreen/scripts/run-atlas-tests.sh +++ b/.evergreen/scripts/run-atlas-tests.sh @@ -4,4 +4,5 @@ set +x set -o errexit bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect -TEST_ATLAS=1 bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg +TEST_ATLAS=1 bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh index e015a34ca4..6c300325d2 100755 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -5,4 +5,5 @@ set -eu set +x # Use the default python to bootstrap secrets. bash "${DRIVERS_TOOLS}"/.evergreen/secrets_handling/setup-secrets.sh drivers/enterprise_auth -TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg +TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh index 594a2984fa..61f5e30ccc 100755 --- a/.evergreen/scripts/run-gcpkms-fail-test.sh +++ b/.evergreen/scripts/run-gcpkms-fail-test.sh @@ -1,7 +1,8 @@ #!/bin/bash - -. .evergreen/scripts/env.sh +set -eu +HERE=$(dirname ${BASH_SOURCE:-$0}) +. $HERE/env.sh export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz -SKIP_SERVERS=1 bash ./.evergreen/setup-encryption.sh -SUCCESS=false TEST_FLE_GCP_AUTO=1 ./.evergreen/just.sh test-eg +SUCCESS=false TEST_FLE_GCP_AUTO=1 bash $HERE/setup-tests.sh +bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-load-balancer.sh b/.evergreen/scripts/run-load-balancer.sh deleted file mode 100755 index 7d431777e5..0000000000 --- a/.evergreen/scripts/run-load-balancer.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -MONGODB_URI=${MONGODB_URI} bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh start diff --git a/.evergreen/scripts/run-mongodb-aws-test.sh b/.evergreen/scripts/run-mongodb-aws-test.sh index 88c3236b3f..255f84f295 100755 --- a/.evergreen/scripts/run-mongodb-aws-test.sh +++ b/.evergreen/scripts/run-mongodb-aws-test.sh @@ -24,10 +24,5 @@ echo "Running MONGODB-AWS authentication tests for $1" # Handle credentials and environment setup. . "$DRIVERS_TOOLS"/.evergreen/auth_aws/aws_setup.sh "$1" -# show test output -set -x - -export TEST_AUTH_AWS=1 -export AUTH="auth" -export SET_XTRACE_ON=1 +TEST_AUTH_AWS=1 AUTH="auth" bash ./.evergreen/just.sh setup-test bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-ocsp-test.sh b/.evergreen/scripts/run-ocsp-test.sh index 328bd2f203..2b9cbd476d 100755 --- a/.evergreen/scripts/run-ocsp-test.sh +++ b/.evergreen/scripts/run-ocsp-test.sh @@ -4,5 +4,6 @@ TEST_OCSP=1 \ PYTHON_BINARY="${PYTHON_BINARY}" \ CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg bash "${DRIVERS_TOOLS}"/.evergreen/ocsp/teardown.sh diff --git a/.evergreen/scripts/run-tests.sh b/.evergreen/scripts/run-tests.sh deleted file mode 100755 index ea923b3f5e..0000000000 --- a/.evergreen/scripts/run-tests.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# Disable xtrace -set +x -if [ -n "${MONGODB_STARTED}" ]; then - export PYMONGO_MUST_CONNECT=true -fi -if [ -n "${DISABLE_TEST_COMMANDS}" ]; then - export PYMONGO_DISABLE_TEST_COMMANDS=1 -fi -if [ -n "${test_encryption}" ]; then - # Disable xtrace (just in case it was accidentally set). - set +x - bash "${DRIVERS_TOOLS}"/.evergreen/csfle/await-servers.sh - export TEST_ENCRYPTION=1 - if [ -n "${test_encryption_pyopenssl}" ]; then - export TEST_ENCRYPTION_PYOPENSSL=1 - fi -fi -if [ -n "${test_crypt_shared}" ]; then - export TEST_CRYPT_SHARED=1 - export CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} -fi -if [ -n "${test_pyopenssl}" ]; then - export TEST_PYOPENSSL=1 -fi -if [ -n "${SETDEFAULTENCODING}" ]; then - export SETDEFAULTENCODING="${SETDEFAULTENCODING}" -fi -if [ -n "${test_loadbalancer}" ]; then - export TEST_LOADBALANCER=1 - export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" - export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" -fi -if [ -n "${test_serverless}" ]; then - export TEST_SERVERLESS=1 -fi -if [ -n "${TEST_INDEX_MANAGEMENT:-}" ]; then - export TEST_INDEX_MANAGEMENT=1 -fi -if [ -n "${SKIP_CSOT_TESTS}" ]; then - export SKIP_CSOT_TESTS=1 -fi -GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ - PYTHON_BINARY=${PYTHON_BINARY} \ - NO_EXT=${NO_EXT} \ - COVERAGE=${COVERAGE} \ - COMPRESSORS=${COMPRESSORS} \ - AUTH=${AUTH} \ - SSL=${SSL} \ - TEST_DATA_LAKE=${TEST_DATA_LAKE:-} \ - TEST_SUITES=${TEST_SUITES:-} \ - MONGODB_API_VERSION=${MONGODB_API_VERSION} \ - bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index b56897961e..0cbf53e19e 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -5,14 +5,17 @@ set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) pushd "$(dirname "$(dirname $HERE)")" > /dev/null -# Source the env file to pick up common variables. +# Source the env files to pick up common variables. if [ -f $HERE/env.sh ]; then - source $HERE/env.sh + . $HERE/env.sh +fi +# PYTHON_BINARY may be defined in test-env.sh. +if [ -f $HERE/test-env.sh ]; then + . $HERE/test-env.sh fi # Ensure dependencies are installed. -. $HERE/install-dependencies.sh - +bash $HERE/install-dependencies.sh # Set the location of the python bin dir. if [ "Windows_NT" = "${OS:-}" ]; then @@ -32,6 +35,12 @@ if [ ! -d $BIN_DIR ]; then echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh echo "Using python $UV_PYTHON" fi + +# Add the default install path to the path if needed. +if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + export PATH="$PATH:$HOME/.local/bin" +fi + uv sync --frozen uv run --frozen --with pip pip install -e . echo "Setting up python environment... done." diff --git a/.evergreen/scripts/setup-encryption.sh b/.evergreen/scripts/setup-encryption.sh deleted file mode 100755 index 5b73240205..0000000000 --- a/.evergreen/scripts/setup-encryption.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -if [ -n "${test_encryption}" ]; then - bash .evergreen/setup-encryption.sh -fi diff --git a/.evergreen/setup-encryption.sh b/.evergreen/scripts/setup-libmongocrypt.sh similarity index 79% rename from .evergreen/setup-encryption.sh rename to .evergreen/scripts/setup-libmongocrypt.sh index b403ef9ca8..775db07cb0 100755 --- a/.evergreen/setup-encryption.sh +++ b/.evergreen/scripts/setup-libmongocrypt.sh @@ -2,11 +2,6 @@ set -o errexit # Exit the script with error if any of the commands fail set -o xtrace -if [ -z "${DRIVERS_TOOLS}" ]; then - echo "Missing environment variable DRIVERS_TOOLS" - exit 1 -fi - TARGET="" if [ "Windows_NT" = "${OS:-''}" ]; then # Magic variable in cygwin @@ -51,10 +46,7 @@ tar xzf libmongocrypt.tar.gz -C ./libmongocrypt ls -la libmongocrypt ls -la libmongocrypt/nocrypto -if [ -z "${SKIP_SERVERS:-}" ]; then - PYTHON_BINARY_OLD=${PYTHON_BINARY} - export PYTHON_BINARY="" - bash "${DRIVERS_TOOLS}"/.evergreen/csfle/setup-secrets.sh - export PYTHON_BINARY=$PYTHON_BINARY_OLD - bash "${DRIVERS_TOOLS}"/.evergreen/csfle/start-servers.sh +if [ "Windows_NT" = "${OS:-''}" ]; then + # libmongocrypt's windows dll is not marked executable. + chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll fi diff --git a/.evergreen/scripts/setup-tests.py b/.evergreen/scripts/setup-tests.py new file mode 100644 index 0000000000..4e14ff9830 --- /dev/null +++ b/.evergreen/scripts/setup-tests.py @@ -0,0 +1,299 @@ +from __future__ import annotations + +import base64 +import logging +import os +import platform +import shlex +import stat +import subprocess +import sys +from pathlib import Path +from typing import Any + +HERE = Path(__file__).absolute().parent +ROOT = HERE.parent.parent +ENV_FILE = HERE / "test-env.sh" +DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +EXPECTED_VARS = [ + "TEST_ENCRYPTION", + "TEST_ENCRYPTION_PYOPENSSL", + "TEST_CRYPT_SHARED", + "TEST_PYOPENSSL", + "TEST_LOAD_BALANCER", + "TEST_SERVERLESS", + "TEST_INDEX_MANAGEMENT", + "TEST_ENTERPRISE_AUTH", + "TEST_FLE_AZURE_AUTO", + "TEST_FLE_GCP_AUTO", + "TEST_LOADBALANCER", + "TEST_DATA_LAKE", + "TEST_ATLAS", + "TEST_OCSP", + "TEST_AUTH_AWS", + "TEST_AUTH_OIDC", + "COMPRESSORS", + "MONGODB_URI", + "PERF_TEST", + "GREEN_FRAMEWORK", + "PYTHON_BINARY", + "LIBMONGOCRYPT_URL", +] + +# Handle the test suite based on the presence of env variables. +TEST_SUITE_MAP = dict( + TEST_DATA_LAKE="data_lake", + TEST_AUTH_OIDC="auth_oidc", + TEST_INDEX_MANAGEMENT="index_management", + TEST_ENTERPRISE_AUTH="auth", + TEST_LOADBALANCER="load_balancer", + TEST_ENCRYPTION="encryption", + TEST_FLE_AZURE_AUTO="csfle", + TEST_FLE_GCP_AUTO="csfle", + TEST_ATLAS="atlas", + TEST_OCSP="ocsp", + TEST_AUTH_AWS="auth_aws", + PERF_TEST="perf", +) + +# Handle extras based on the presence of env variables. +EXTRAS_MAP = dict( + TEST_AUTH_OIDC="aws", + TEST_AUTH_AWS="aws", + TEST_OCSP="ocsp", + TEST_PYOPENSSL="ocsp", + TEST_ENTERPRISE_AUTH="gssapi", + TEST_ENCRYPTION="encryption", + TEST_FLE_AZURE_AUTO="encryption", + TEST_FLE_GCP_AUTO="encryption", + TEST_ENCRYPTION_PYOPENSSL="ocsp", +) + + +def write_env(name: str, value: Any) -> None: + with ENV_FILE.open("a", newline="\n") as fid: + # Remove any existing quote chars. + value = str(value).replace('"', "") + fid.write(f'export {name}="{value}"\n') + + +def is_set(var: str) -> bool: + value = os.environ.get(var, "") + return len(value.strip()) > 0 + + +def run_command(cmd: str) -> None: + LOGGER.info("Running command %s...", cmd) + subprocess.check_call(shlex.split(cmd)) # noqa: S603 + LOGGER.info("Running command %s... done.", cmd) + + +def handle_test_env() -> None: + AUTH = os.environ.get("AUTH", "noauth") + SSL = os.environ.get("SSL", "nossl") + TEST_SUITES = os.environ.get("TEST_SUITES", "") + TEST_ARGS = "" + # Start compiling the args we'll pass to uv. + # Run in an isolated environment so as not to pollute the base venv. + UV_ARGS = ["--isolated --extra test"] + + # Save variables in EXPECTED_VARS that have values. + with ENV_FILE.open("w", newline="\n") as fid: + fid.write("#!/usr/bin/env bash\n") + fid.write("set +x\n") + fid.write(f"export AUTH={AUTH}\n") + fid.write(f"export SSL={SSL}\n") + for var in EXPECTED_VARS: + value = os.environ.get(var, "") + # Remove any existing quote chars. + value = value.replace('"', "") + if value: + fid.write(f'export {var}="{value}"\n') + ENV_FILE.chmod(ENV_FILE.stat().st_mode | stat.S_IEXEC) + + for env_var, extra in EXTRAS_MAP.items(): + if env_var in os.environ: + UV_ARGS.append(f"--extra {extra}") + + for env_var, suite in TEST_SUITE_MAP.items(): + if TEST_SUITES: + break + if env_var in os.environ: + TEST_SUITES = suite + + if AUTH != "noauth": + if is_set("TEST_DATA_LAKE"): + DB_USER = os.environ["ADL_USERNAME"] + DB_PASSWORD = os.environ["ADL_PASSWORD"] + elif is_set("TEST_SERVERLESS"): + DB_USER = os.environ("SERVERLESS_ATLAS_USER") + DB_PASSWORD = os.environ("SERVERLESS_ATLAS_PASSWORD") + write_env("MONGODB_URI", os.environ("SERVERLESS_URI")) + write_env("SINGLE_MONGOS_LB_URI", os.environ("SERVERLESS_URI")) + write_env("MULTI_MONGOS_LB_URI", os.environ("SERVERLESS_URI")) + elif is_set("TEST_AUTH_OIDC"): + DB_USER = os.environ["OIDC_ADMIN_USER"] + DB_PASSWORD = os.environ["OIDC_ADMIN_PWD"] + write_env("DB_IP", os.environ["MONGODB_URI"]) + elif is_set("TEST_INDEX_MANAGEMENT"): + DB_USER = os.environ["DRIVERS_ATLAS_LAMBDA_USER"] + DB_PASSWORD = os.environ["DRIVERS_ATLAS_LAMBDA_PASSWORD"] + else: + DB_USER = "bob" + DB_PASSWORD = "pwd123" # noqa: S105 + write_env("DB_USER", DB_USER) + write_env("DB_PASSWORD", DB_PASSWORD) + LOGGER.info("Added auth, DB_USER: %s", DB_USER) + + if is_set("MONGODB_STARTED"): + write_env("PYMONGO_MUST_CONNECT", "true") + + if is_set("DISABLE_TEST_COMMANDS"): + write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") + + if is_set("TEST_ENTERPRISE_AUTH"): + if os.name == "nt": + LOGGER.info("Setting GSSAPI_PASS") + write_env("GSSAPI_PASS", os.environ["SASL_PASS"]) + write_env("GSSAPI_CANONICALIZE", "true") + else: + # BUILD-3830 + krb_conf = ROOT / ".evergreen/krb5.conf.empty" + krb_conf.touch() + write_env("KRB5_CONFIG", krb_conf) + LOGGER.info("Writing keytab") + keytab = base64.b64decode(os.environ["KEYTAB_BASE64"]) + keytab_file = ROOT / ".evergreen/drivers.keytab" + with keytab_file.open("wb") as fid: + fid.write(keytab) + principal = os.environ["PRINCIPAL"] + LOGGER.info("Running kinit") + os.environ["KRB5_CONFIG"] = str(krb_conf) + cmd = f"kinit -k -t {keytab_file} -p {principal}" + run_command(cmd) + + LOGGER.info("Setting GSSAPI variables") + write_env("GSSAPI_HOST", os.environ["SASL_HOST"]) + write_env("GSSAPI_PORT", os.environ["SASL_PORT"]) + write_env("GSSAPI_PRINCIPAL", os.environ["PRINCIPAL"]) + + if is_set("TEST_LOADBALANCER"): + write_env("LOAD_BALANCER", "1") + SINGLE_MONGOS_LB_URI = os.environ.get( + "SINGLE_MONGOS_LB_URI", "mongodb://127.0.0.1:8000/?loadBalanced=true" + ) + MULTI_MONGOS_LB_URI = os.environ.get( + "MULTI_MONGOS_LB_URI", "mongodb://127.0.0.1:8001/?loadBalanced=true" + ) + if SSL != "nossl": + SINGLE_MONGOS_LB_URI += "&tls=true" + MULTI_MONGOS_LB_URI += "&tls=true" + write_env("SINGLE_MONGOS_LB_URI", SINGLE_MONGOS_LB_URI) + write_env("MULTI_MONGOS_LB_URI", MULTI_MONGOS_LB_URI) + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + cmd = f'bash "{DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh" start' + run_command(cmd) + + if SSL != "nossl": + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + write_env("CLIENT_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/client.pem") + write_env("CA_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem") + + compressors = os.environ.get("COMPRESSORS") + if compressors == "snappy": + UV_ARGS.append("--extra snappy") + elif compressors == "zstd": + UV_ARGS.append("--extra zstandard") + + if is_set("TEST_ENCRYPTION") or is_set("TEST_FLE_AZURE_AUTO") or is_set("TEST_FLE_GCP_AUTO"): + # Check for libmongocrypt download. + if not (ROOT / "libmongocrypt").exists(): + run_command(f"bash {HERE.as_posix()}/setup-libmongocrypt.sh") + + # TODO: Test with 'pip install pymongocrypt' + UV_ARGS.append("--group pymongocrypt_source") + + # Use the nocrypto build to avoid dependency issues with older windows/python versions. + BASE = ROOT / "libmongocrypt/nocrypto" + if sys.platform == "linux": + if (BASE / "lib/libmongocrypt.so").exists(): + PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.so" + else: + PYMONGOCRYPT_LIB = BASE / "lib64/libmongocrypt.so" + elif sys.platform == "darwin": + PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.dylib" + else: + PYMONGOCRYPT_LIB = BASE / "bin/mongocrypt.dll" + if not PYMONGOCRYPT_LIB.exists(): + raise RuntimeError("Cannot find libmongocrypt shared object file") + write_env("PYMONGOCRYPT_LIB", PYMONGOCRYPT_LIB.as_posix()) + # PATH is updated by configure-env.sh for access to mongocryptd. + + if is_set("TEST_ENCRYPTION"): + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/setup-secrets.sh") + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/start-servers.sh") + + if is_set("TEST_CRYPT_SHARED"): + CRYPT_SHARED_DIR = Path(os.environ["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() + LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) + if os.name == "nt": + write_env("PATH", f"{CRYPT_SHARED_DIR}:$PATH") + else: + write_env( + "DYLD_FALLBACK_LIBRARY_PATH", + f"{CRYPT_SHARED_DIR}:${{DYLD_FALLBACK_LIBRARY_PATH:-}}", + ) + write_env("LD_LIBRARY_PATH", f"{CRYPT_SHARED_DIR}:${{LD_LIBRARY_PATH:-}}") + + if is_set("TEST_FLE_AZURE_AUTO") or is_set("TEST_FLE_GCP_AUTO"): + if "SUCCESS" not in os.environ: + raise RuntimeError("Must define SUCCESS") + + write_env("SUCCESS", os.environ["SUCCESS"]) + MONGODB_URI = os.environ.get("MONGODB_URI", "") + if "@" in MONGODB_URI: + raise RuntimeError("MONGODB_URI unexpectedly contains user credentials in FLE test!") + + if is_set("TEST_OCSP"): + write_env("CA_FILE", os.environ["CA_FILE"]) + write_env("OCSP_TLS_SHOULD_SUCCEED", os.environ["OCSP_TLS_SHOULD_SUCCEED"]) + + if is_set("PERF_TEST"): + UV_ARGS.append("--group perf") + # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively + # affects the benchmark results. + TEST_ARGS = f"test/performance/perf_test.py {TEST_ARGS}" + + # Add coverage if requested. + # Only cover CPython. PyPy reports suspiciously low coverage. + if is_set("COVERAGE") and platform.python_implementation() == "CPython": + # Keep in sync with combine-coverage.sh. + # coverage >=5 is needed for relative_files=true. + UV_ARGS.append("--group coverage") + TEST_ARGS = f"{TEST_ARGS} --cov" + + if is_set("GREEN_FRAMEWORK"): + framework = os.environ["GREEN_FRAMEWORK"] + UV_ARGS.append(f"--group {framework}") + + else: + # Use --capture=tee-sys so pytest prints test output inline: + # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html + TEST_ARGS = f"-v --capture=tee-sys --durations=5 {TEST_ARGS}" + if TEST_SUITES: + TEST_ARGS = f"-m {TEST_SUITES} {TEST_ARGS}" + + write_env("TEST_ARGS", TEST_ARGS) + write_env("UV_ARGS", " ".join(UV_ARGS)) + + +if __name__ == "__main__": + handle_test_env() diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh index 65462b2a68..80fc717047 100755 --- a/.evergreen/scripts/setup-tests.sh +++ b/.evergreen/scripts/setup-tests.sh @@ -1,27 +1,61 @@ -#!/bin/bash -eux +#!/bin/bash +set -eu -PROJECT_DIRECTORY="$(pwd)" -SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" +# Supported/used environment variables: +# AUTH Set to enable authentication. Defaults to "noauth" +# SSL Set to enable SSL. Defaults to "nossl" +# GREEN_FRAMEWORK The green framework to test with, if any. +# COVERAGE If non-empty, run the test suite with coverage. +# COMPRESSORS If non-empty, install appropriate compressor. +# LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# TEST_DATA_LAKE If non-empty, run data lake tests. +# TEST_ENCRYPTION If non-empty, run encryption tests. +# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. +# TEST_SERVERLESS If non-empy, test on serverless. +# TEST_LOADBALANCER If non-empy, test load balancing. +# TEST_FLE_AZURE_AUTO If non-empy, test auto FLE on Azure +# TEST_FLE_GCP_AUTO If non-empy, test auto FLE on GCP +# TEST_PYOPENSSL If non-empy, test with PyOpenSSL +# TEST_ENTERPRISE_AUTH If non-empty, test with Enterprise Auth +# TEST_AUTH_AWS If non-empty, test AWS Auth Mechanism +# TEST_AUTH_OIDC If non-empty, test OIDC Auth Mechanism +# TEST_PERF If non-empty, run performance tests +# TEST_OCSP If non-empty, run OCSP tests +# TEST_ATLAS If non-empty, test Atlas connections +# TEST_INDEX_MANAGEMENT If non-empty, run index management tests +# TEST_ENCRYPTION_PYOPENSSL If non-empy, test encryption with PyOpenSSL +# PERF_TEST If non-empty, run the performance tests. +# MONGODB_URI If non-empty, use as the MONGODB_URI in tests. +# PYTHON_BINARY The python binary to use in tests. -if [ -f "$SCRIPT_DIR/test-env.sh" ]; then - echo "Reading $SCRIPT_DIR/test-env.sh file" - . "$SCRIPT_DIR/test-env.sh" - exit 0 +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) +ROOT_DIR="$(dirname "$(dirname $SCRIPT_DIR)")" + +# Try to source the env file. +if [ -f $SCRIPT_DIR/env.sh ]; then + source $SCRIPT_DIR/env.sh +fi + +# Source serverless secrets if applicable. +if [ -n "${TEST_SERVERLESS:-}" ]; then + source $DRIVERS_TOOLS/.evergreen/serverless/secrets-export.sh +fi + +# Source atlas secrets if applicable. +if [ -n "${TEST_INDEX_MANAGEMENT:-}" ]; then + source $DRIVERS_TOOLS/.evergreen/atlas/secrets-export.sh +fi + +# Source ADL secrets if applicable. +if [ -n "${TEST_DATA_LAKE:-}" ]; then + source ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh +fi + +# Source local secrets if applicable. +if [ -f "$ROOT_DIR/secrets-export.sh" ]; then + source "$ROOT_DIR/secrets-export.sh" fi -cat < "$SCRIPT_DIR"/test-env.sh -export test_encryption="${test_encryption:-}" -export test_encryption_pyopenssl="${test_encryption_pyopenssl:-}" -export test_crypt_shared="${test_crypt_shared:-}" -export test_pyopenssl="${test_pyopenssl:-}" -export test_loadbalancer="${test_loadbalancer:-}" -export test_serverless="${test_serverless:-}" -export TEST_INDEX_MANAGEMENT="${TEST_INDEX_MANAGEMENT:-}" -export TEST_DATA_LAKE="${TEST_DATA_LAKE:-}" -export ORCHESTRATION_FILE="${ORCHESTRATION_FILE:-}" -export AUTH="${AUTH:-noauth}" -export SSL="${SSL:-nossl}" -export PYTHON_BINARY="${PYTHON_BINARY:-}" -EOT - -chmod +x "$SCRIPT_DIR"/test-env.sh +. $ROOT_DIR/.evergreen/utils.sh +PYTHON=${PYTHON_BINARY:-$(find_python3)} +$PYTHON $SCRIPT_DIR/setup-tests.py diff --git a/.evergreen/scripts/stop-load-balancer.sh b/.evergreen/scripts/stop-load-balancer.sh deleted file mode 100755 index 2d3c5366ec..0000000000 --- a/.evergreen/scripts/stop-load-balancer.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -cd "${DRIVERS_TOOLS}"/.evergreen || exit -DRIVERS_TOOLS=${DRIVERS_TOOLS} -bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh stop diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh new file mode 100755 index 0000000000..9c78c0965c --- /dev/null +++ b/.evergreen/scripts/teardown-tests.sh @@ -0,0 +1,29 @@ +#!/bin/bash +set -eu + +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) +ROOT_DIR="$(dirname "$(dirname $SCRIPT_DIR)")" + +# Remove temporary test files. +pushd $ROOT_DIR > /dev/null +rm -rf libmongocrypt/ libmongocrypt.tar.gz mongocryptd.pid > /dev/null +popd > /dev/null + +if [ ! -f $SCRIPT_DIR/test-env.sh ]; then + exit 0 +fi +if [ -f $SCRIPT_DIR/env.sh ]; then + source $SCRIPT_DIR/env.sh +fi + +source $SCRIPT_DIR/test-env.sh + +# Shut down csfle servers if applicable +if [ -n "${TEST_ENCRYPTION:-}" ]; then + bash ${DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh +fi + +# Shut down load balancer if applicable. +if [ -n "${TEST_LOADBALANCER:-}" ]; then + bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh stop +fi diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh index c20e1c756e..4c8fa65e2e 100755 --- a/.evergreen/setup-spawn-host.sh +++ b/.evergreen/setup-spawn-host.sh @@ -16,4 +16,4 @@ rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_ echo "Copying files to $target... done" ssh $target $remote_dir/.evergreen/scripts/setup-system.sh -ssh $target "cd $remote_dir && PYTHON_BINARY=${PYTHON_BINARY:-} just install" +ssh $target "cd $remote_dir && PYTHON_BINARY=${PYTHON_BINARY:-} .evergreen/scripts/setup-dev-env.sh" diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh index de3374a008..3f6540ad4d 100755 --- a/.evergreen/sync-spawn-host.sh +++ b/.evergreen/sync-spawn-host.sh @@ -7,9 +7,12 @@ fi target=$1 user=${target%@*} +remote_dir=/home/$user/mongo-python-driver +echo "Copying files to $target..." +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir +echo "Copying files to $target... done." echo "Syncing files to $target..." -rsync -haz -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/$user/mongo-python-driver # shellcheck disable=SC2034 fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/$user/mongo-python-driver; done echo "Syncing files to $target... done." diff --git a/.evergreen/teardown-encryption.sh b/.evergreen/teardown-encryption.sh deleted file mode 100755 index 5ce2f1d71b..0000000000 --- a/.evergreen/teardown-encryption.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -set -o xtrace - -if [ -z "${DRIVERS_TOOLS}" ]; then - echo "Missing environment variable DRIVERS_TOOLS" -fi - -bash ${DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh -rm -rf libmongocrypt/ libmongocrypt.tar.gz mongocryptd.pid diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 3760e308a5..5f95aa4a5e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -22,13 +22,13 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - uses: actions/setup-python@v5 - with: - python-version: "3.9" - cache: 'pip' - cache-dependency-path: 'pyproject.toml' - name: Install just uses: extractions/setup-just@v2 + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + enable-cache: true + python-version: "3.9" - name: Install Python dependencies run: | just install @@ -61,42 +61,21 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - if: ${{ matrix.python-version == '3.13t' }} - name: Setup free-threaded Python - uses: deadsnakes/action@v3.2.0 - with: - python-version: 3.13 - nogil: true - - if: ${{ matrix.python-version != '3.13t' }} - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - cache: 'pip' - cache-dependency-path: 'pyproject.toml' - allow-prereleases: true - name: Install just uses: extractions/setup-just@v2 + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + enable-cache: true + python-version: ${{ matrix.python-version }} - name: Install dependencies - run: | - if [[ "${{ matrix.python-version }}" == "3.13t" ]]; then - # Just can't be installed on 3.13t, use pytest directly. - pip install . - pip install -r requirements/test.txt - else - just install - fi + run: just install - name: Start MongoDB uses: supercharge/mongodb-github-action@1.12.0 with: mongodb-version: 6.0 - name: Run tests - run: | - if [[ "${{ matrix.python-version }}" == "3.13t" ]]; then - pytest -v --durations=5 --maxfail=10 - else - just test - fi + run: just test doctest: runs-on: ubuntu-latest @@ -105,24 +84,21 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.9" - cache: 'pip' - cache-dependency-path: 'pyproject.toml' - name: Install just uses: extractions/setup-just@v2 + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + enable-cache: true + python-version: "3.9" - name: Start MongoDB uses: supercharge/mongodb-github-action@1.12.0 with: mongodb-version: '8.0.0-rc4' - name: Install dependencies - run: | - just install + run: just install - name: Run tests - run: | - just docs-test + run: just docs-test docs: name: Docs Checks @@ -131,20 +107,17 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - uses: actions/setup-python@v5 + - name: Install uv + uses: astral-sh/setup-uv@v5 with: - cache: 'pip' - cache-dependency-path: 'pyproject.toml' - # Build docs on lowest supported Python for furo - python-version: '3.9' + enable-cache: true + python-version: "3.9" - name: Install just uses: extractions/setup-just@v2 - name: Install dependencies - run: | - just install + run: just install - name: Build docs - run: | - just docs + run: just docs linkcheck: name: Link Check @@ -153,20 +126,17 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - uses: actions/setup-python@v5 + - name: Install uv + uses: astral-sh/setup-uv@v5 with: - cache: 'pip' - cache-dependency-path: 'pyproject.toml' - # Build docs on lowest supported Python for furo - python-version: '3.9' + enable-cache: true + python-version: "3.9" - name: Install just uses: extractions/setup-just@v2 - name: Install dependencies - run: | - just install + run: just install - name: Build docs - run: | - just docs-linkcheck + run: just docs-linkcheck typing: name: Typing Tests @@ -178,11 +148,11 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - uses: actions/setup-python@v5 + - name: Install uv + uses: astral-sh/setup-uv@v5 with: + enable-cache: true python-version: "${{matrix.python}}" - cache: 'pip' - cache-dependency-path: 'pyproject.toml' - name: Install just uses: extractions/setup-just@v2 - name: Install dependencies diff --git a/.gitignore b/.gitignore index 2582c517fd..8c095c2157 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ libmongocrypt/ expansion.yml *expansions.yml .evergreen/scripts/env.sh +.evergreen/scripts/test-env.sh # Lambda temp files test/lambda/.aws-sam diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f67077e57d..ce5bebf181 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -211,18 +211,18 @@ the pages will re-render and the browser will automatically refresh. `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. - Start the servers using `LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh`. -- Start the load balancer using: - `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start`. +- Set up the test using: + `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' TEST_LOADBALANCER=1 just setup-test`. - Run the tests from the `pymongo` checkout directory using: - `TEST_LOADBALANCER=1 just test-eg`. + `just test-eg`. ## Running Encryption Tests Locally - Clone `drivers-evergreen-tools`: `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. - Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools` -- Run `AWS_PROFILE= just setup-encryption` after setting up your AWS profile with `aws configure sso`. -- Run the tests with `TEST_ENCRYPTION=1 just test-eg`. -- When done, run `just teardown-encryption` to clean up. +- Run `TEST_ENCRYPTION=1 AWS_PROFILE= just setup-test` after setting up your AWS profile with `aws configure sso`. +- Run the tests with `just test-eg`. +- When done, run `just teardown-test` to clean up. ## Re-sync Spec Tests diff --git a/justfile b/justfile index 8a076038a4..bf1355576d 100644 --- a/justfile +++ b/justfile @@ -70,10 +70,10 @@ test-mockupdb *args: test-eg *args: bash ./.evergreen/run-tests.sh {{args}} -[group('encryption')] -setup-encryption: - bash .evergreen/setup-encryption.sh +[group('test')] +setup-test: + bash .evergreen/scripts/setup-tests.sh -[group('encryption')] -teardown-encryption: - bash .evergreen/teardown-encryption.sh +[group('test')] +teardown-test: + bash .evergreen/scripts/teardown-tests.sh From 338b7dcd494f31f03e07c631840214107ce91f93 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 13 Feb 2025 13:01:42 -0800 Subject: [PATCH 1748/2111] PYTHON-5092 - Convert test.test_pooling to async (#2145) --- test/asynchronous/test_pooling.py | 595 ++++++++++++++++++++++++++++++ test/test_pooling.py | 107 +++--- tools/synchro.py | 1 + 3 files changed, 645 insertions(+), 58 deletions(-) create mode 100644 test/asynchronous/test_pooling.py diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py new file mode 100644 index 0000000000..09b8fb0853 --- /dev/null +++ b/test/asynchronous/test_pooling.py @@ -0,0 +1,595 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test built in connection-pooling with threads.""" +from __future__ import annotations + +import asyncio +import gc +import random +import socket +import sys +import time + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.son import SON +from pymongo import AsyncMongoClient, message, timeout +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError +from pymongo.hello import HelloCompat +from pymongo.lock import _async_create_lock + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import ConcurrentRunner +from test.utils import async_get_pool, async_joinall, delay + +from pymongo.asynchronous.pool import Pool, PoolOptions +from pymongo.socket_checker import SocketChecker + +_IS_SYNC = False + + +N = 10 +DB = "pymongo-pooling-tests" + + +async def gc_collect_until_done(tasks, timeout=60): + start = time.time() + running = list(tasks) + while running: + assert (time.time() - start) < timeout, "Tasks timed out" + for t in running: + await t.join(0.1) + if not t.is_alive(): + running.remove(t) + gc.collect() + + +class MongoTask(ConcurrentRunner): + """A thread/Task that uses a AsyncMongoClient.""" + + def __init__(self, client): + super().__init__() + self.daemon = True # Don't hang whole test if task hangs. + self.client = client + self.db = self.client[DB] + self.passed = False + + async def run(self): + await self.run_mongo_thread() + self.passed = True + + async def run_mongo_thread(self): + raise NotImplementedError + + +class InsertOneAndFind(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + rand = random.randint(0, N) + _id = (await self.db.sf.insert_one({"x": rand})).inserted_id + assert rand == (await self.db.sf.find_one(_id))["x"] + + +class Unique(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + await self.db.unique.insert_one({}) # no error + + +class NonUnique(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + try: + await self.db.unique.insert_one({"_id": "jesse"}) + except DuplicateKeyError: + pass + else: + raise AssertionError("Should have raised DuplicateKeyError") + + +class SocketGetter(MongoTask): + """Utility for TestPooling. + + Checks out a socket and holds it forever. Used in + test_no_wait_queue_timeout. + """ + + def __init__(self, client, pool): + super().__init__(client) + self.state = "init" + self.pool = pool + self.sock = None + + async def run_mongo_thread(self): + self.state = "get_socket" + + # Call 'pin_cursor' so we can hold the socket. + async with self.pool.checkout() as sock: + sock.pin_cursor() + self.sock = sock + + self.state = "connection" + + def __del__(self): + if self.sock: + self.sock.close_conn(None) + + +async def run_cases(client, cases): + tasks = [] + n_runs = 5 + + for case in cases: + for _i in range(n_runs): + t = case(client) + await t.start() + tasks.append(t) + + for t in tasks: + await t.join() + + for t in tasks: + assert t.passed, "%s.run() threw an exception" % repr(t) + + +class _TestPoolingBase(AsyncIntegrationTest): + """Base class for all connection-pool tests.""" + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.c = await self.async_rs_or_single_client() + db = self.c[DB] + await db.unique.drop() + await db.test.drop() + await db.unique.insert_one({"_id": "jesse"}) + await db.test.insert_many([{} for _ in range(10)]) + + async def create_pool(self, pair=None, *args, **kwargs): + if pair is None: + pair = (await async_client_context.host, await async_client_context.port) + # Start the pool with the correct ssl options. + pool_options = async_client_context.client._topology_settings.pool_options + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api + pool = Pool(pair, PoolOptions(*args, **kwargs)) + await pool.ready() + return pool + + +class TestPooling(_TestPoolingBase): + async def test_max_pool_size_validation(self): + host, port = await async_client_context.host, await async_client_context.port + self.assertRaises(ValueError, AsyncMongoClient, host=host, port=port, maxPoolSize=-1) + + self.assertRaises(ValueError, AsyncMongoClient, host=host, port=port, maxPoolSize="foo") + + c = AsyncMongoClient(host=host, port=port, maxPoolSize=100, connect=False) + self.assertEqual(c.options.pool_options.max_pool_size, 100) + + async def test_no_disconnect(self): + await run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) + + async def test_pool_reuses_open_socket(self): + # Test Pool's _check_closed() method doesn't close a healthy socket. + cx_pool = await self.create_pool(max_pool_size=10) + cx_pool._check_interval_seconds = 0 # Always check. + async with cx_pool.checkout() as conn: + pass + + async with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + async def test_get_socket_and_exception(self): + # get_socket() returns socket after a non-network error. + cx_pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=1) + with self.assertRaises(ZeroDivisionError): + async with cx_pool.checkout() as conn: + 1 / 0 + + # Socket was returned, not closed. + async with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + async def test_pool_removes_closed_socket(self): + # Test that Pool removes explicitly closed socket. + cx_pool = await self.create_pool() + + async with cx_pool.checkout() as conn: + # Use Connection's API to close the socket. + conn.close_conn(None) + + self.assertEqual(0, len(cx_pool.conns)) + + async def test_pool_removes_dead_socket(self): + # Test that Pool removes dead socket and the socket doesn't return + # itself PYTHON-344 + cx_pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + + async with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + conn.conn.close() + self.assertTrue(conn.conn_closed()) + + async with cx_pool.checkout() as new_connection: + self.assertEqual(0, len(cx_pool.conns)) + self.assertNotEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + # Semaphore was released. + async with cx_pool.checkout(): + pass + + async def test_socket_closed(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((await async_client_context.host, await async_client_context.port)) + socket_checker = SocketChecker() + self.assertFalse(socket_checker.socket_closed(s)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + async def test_socket_checker(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((await async_client_context.host, await async_client_context.port)) + socket_checker = SocketChecker() + # Socket has nothing to read. + self.assertFalse(socket_checker.select(s, read=True)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + # Make the socket readable + _, msg, _ = message._query( + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) + s.sendall(msg) + # Block until the socket is readable. + self.assertTrue(socket_checker.select(s, read=True, timeout=None)) + self.assertTrue(socket_checker.select(s, read=True)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is still writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + async def test_return_socket_after_reset(self): + pool = await self.create_pool() + async with pool.checkout() as sock: + self.assertEqual(pool.active_sockets, 1) + self.assertEqual(pool.operation_count, 1) + await pool.reset() + + self.assertTrue(sock.closed) + self.assertEqual(0, len(pool.conns)) + self.assertEqual(pool.active_sockets, 0) + self.assertEqual(pool.operation_count, 0) + + async def test_pool_check(self): + # Test that Pool recovers from two connection failures in a row. + # This exercises code at the end of Pool._check(). + cx_pool = await self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + self.addAsyncCleanup(cx_pool.close) + + async with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + conn.conn.close() + + # Swap pool's address with a bad one. + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) + with self.assertRaises(AutoReconnect): + async with cx_pool.checkout(): + pass + + # Back to normal, semaphore was correctly released. + cx_pool.address = address + async with cx_pool.checkout(): + pass + + async def test_wait_queue_timeout(self): + wait_queue_timeout = 2 # Seconds + pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + self.addAsyncCleanup(pool.close) + + async with pool.checkout(): + start = time.time() + with self.assertRaises(ConnectionFailure): + async with pool.checkout(): + pass + + duration = time.time() - start + self.assertTrue( + abs(wait_queue_timeout - duration) < 1, + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", + ) + + async def test_no_wait_queue_timeout(self): + # Verify get_socket() with no wait_queue_timeout blocks forever. + pool = await self.create_pool(max_pool_size=1) + self.addAsyncCleanup(pool.close) + + # Reach max_size. + async with pool.checkout() as s1: + t = SocketGetter(self.c, pool) + await t.start() + while t.state != "get_socket": + await asyncio.sleep(0.1) + + await asyncio.sleep(1) + self.assertEqual(t.state, "get_socket") + + while t.state != "connection": + await asyncio.sleep(0.1) + + self.assertEqual(t.state, "connection") + self.assertEqual(t.sock, s1) + + async def test_checkout_more_than_max_pool_size(self): + pool = await self.create_pool(max_pool_size=2) + + socks = [] + for _ in range(2): + # Call 'pin_cursor' so we can hold the socket. + async with pool.checkout() as sock: + sock.pin_cursor() + socks.append(sock) + + tasks = [] + for _ in range(30): + t = SocketGetter(self.c, pool) + await t.start() + tasks.append(t) + await asyncio.sleep(1) + for t in tasks: + self.assertEqual(t.state, "get_socket") + + for socket_info in socks: + socket_info.close_conn(None) + + async def test_maxConnecting(self): + client = await self.async_rs_or_single_client() + await self.client.test.test.insert_one({}) + self.addAsyncCleanup(self.client.test.test.delete_many, {}) + pool = await async_get_pool(client) + docs = [] + + # Run 50 short running operations + async def find_one(): + docs.append(await client.test.test.find_one({})) + + tasks = [ConcurrentRunner(target=find_one) for _ in range(50)] + for task in tasks: + await task.start() + for task in tasks: + await task.join(10) + + self.assertEqual(len(docs), 50) + self.assertLessEqual(len(pool.conns), 50) + # TLS and auth make connection establishment more expensive than + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. + if async_client_context.tls and async_client_context.auth_enabled: + self.assertLessEqual(len(pool.conns), 30) + else: + self.assertLessEqual(len(pool.conns), 50) + # MongoDB 4.4.1 with auth + ssl: + # maxConnecting = 2: 6 connections in ~0.231+ seconds + # maxConnecting = unbounded: 50 connections in ~0.642+ seconds + # + # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: + # maxConnecting = 2: 15-22 connections in ~0.108+ seconds + # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds + print(len(pool.conns)) + + @async_client_context.require_failCommand_appName + async def test_csot_timeout_message(self): + client = await self.async_rs_or_single_client(appName="connectionTimeoutApp") + # Mock an operation failing due to pymongo.timeout(). + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + await client.db.t.insert_one({"x": 1}) + + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + with timeout(0.5): + await client.db.t.find_one({"$where": delay(2)}) + + self.assertTrue("(configured timeouts: timeoutMS: 500.0ms" in str(error.exception)) + + @async_client_context.require_failCommand_appName + async def test_socket_timeout_message(self): + client = await self.async_rs_or_single_client( + socketTimeoutMS=500, appName="connectionTimeoutApp" + ) + # Mock an operation failing due to socketTimeoutMS. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + await client.db.t.insert_one({"x": 1}) + + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + await client.db.t.find_one({"$where": delay(2)}) + + self.assertTrue( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)" + in str(error.exception) + ) + + @async_client_context.require_failCommand_appName + async def test_connection_timeout_message(self): + # Mock a connection creation failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "appName": "connectionTimeoutApp", + }, + } + + client = await self.async_rs_or_single_client( + connectTimeoutMS=500, + socketTimeoutMS=500, + appName="connectionTimeoutApp", + heartbeatFrequencyMS=1000000, + ) + await client.admin.command("ping") + pool = await async_get_pool(client) + await pool.reset_without_pause() + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + await client.admin.command("ping") + + self.assertTrue( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)" + in str(error.exception) + ) + + +class TestPoolMaxSize(_TestPoolingBase): + async def test_max_pool_size(self): + max_pool_size = 4 + c = await self.async_rs_or_single_client(maxPoolSize=max_pool_size) + collection = c[DB].test + + # Need one document. + await collection.drop() + await collection.insert_one({}) + + # ntasks had better be much larger than max_pool_size to ensure that + # max_pool_size connections are actually required at some point in this + # test's execution. + cx_pool = await async_get_pool(c) + ntasks = 10 + tasks = [] + lock = _async_create_lock() + self.n_passed = 0 + + async def f(): + for _ in range(5): + await collection.find_one({"$where": delay(0.1)}) + assert len(cx_pool.conns) <= max_pool_size + + async with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + await t.start() + + await async_joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertTrue(len(cx_pool.conns) > 1) + self.assertEqual(0, cx_pool.requests) + + async def test_max_pool_size_none(self): + c = await self.async_rs_or_single_client(maxPoolSize=None) + collection = c[DB].test + + # Need one document. + await collection.drop() + await collection.insert_one({}) + + cx_pool = await async_get_pool(c) + ntasks = 10 + tasks = [] + lock = _async_create_lock() + self.n_passed = 0 + + async def f(): + for _ in range(5): + await collection.find_one({"$where": delay(0.1)}) + + async with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + await t.start() + + await async_joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertTrue(len(cx_pool.conns) > 1) + self.assertEqual(cx_pool.max_pool_size, float("inf")) + + async def test_max_pool_size_zero(self): + c = await self.async_rs_or_single_client(maxPoolSize=0) + pool = await async_get_pool(c) + self.assertEqual(pool.max_pool_size, float("inf")) + + async def test_max_pool_size_with_connection_failure(self): + # The pool acquires its semaphore before attempting to connect; ensure + # it releases the semaphore on connection failure. + test_pool = Pool( + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) + await test_pool.ready() + + # First call to get_socket fails; if pool doesn't release its semaphore + # then the second call raises "ConnectionFailure: Timed out waiting for + # socket from pool" instead of AutoReconnect. + for _i in range(2): + with self.assertRaises(AutoReconnect) as context: + async with test_pool.checkout(): + pass + + # Testing for AutoReconnect instead of ConnectionFailure, above, + # is sufficient right *now* to catch a semaphore leak. But that + # seems error-prone, so check the message too. + self.assertNotIn("waiting for socket from pool", str(context.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_pooling.py b/test/test_pooling.py index 3b867965bd..5d23b85f23 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -15,11 +15,11 @@ """Test built in connection-pooling with threads.""" from __future__ import annotations +import asyncio import gc import random import socket import sys -import threading import time from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -27,30 +27,29 @@ from pymongo import MongoClient, message, timeout from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest +from test.helpers import ConcurrentRunner from test.utils import delay, get_pool, joinall from pymongo.socket_checker import SocketChecker from pymongo.synchronous.pool import Pool, PoolOptions - -@client_context.require_connection -def setUpModule(): - pass +_IS_SYNC = True N = 10 DB = "pymongo-pooling-tests" -def gc_collect_until_done(threads, timeout=60): +def gc_collect_until_done(tasks, timeout=60): start = time.time() - running = list(threads) + running = list(tasks) while running: - assert (time.time() - start) < timeout, "Threads timed out" + assert (time.time() - start) < timeout, "Tasks timed out" for t in running: t.join(0.1) if not t.is_alive(): @@ -58,12 +57,12 @@ def gc_collect_until_done(threads, timeout=60): gc.collect() -class MongoThread(threading.Thread): - """A thread that uses a MongoClient.""" +class MongoTask(ConcurrentRunner): + """A thread/Task that uses a MongoClient.""" def __init__(self, client): super().__init__() - self.daemon = True # Don't hang whole test if thread hangs. + self.daemon = True # Don't hang whole test if task hangs. self.client = client self.db = self.client[DB] self.passed = False @@ -76,21 +75,21 @@ def run_mongo_thread(self): raise NotImplementedError -class InsertOneAndFind(MongoThread): +class InsertOneAndFind(MongoTask): def run_mongo_thread(self): for _ in range(N): rand = random.randint(0, N) - _id = self.db.sf.insert_one({"x": rand}).inserted_id - assert rand == self.db.sf.find_one(_id)["x"] + _id = (self.db.sf.insert_one({"x": rand})).inserted_id + assert rand == (self.db.sf.find_one(_id))["x"] -class Unique(MongoThread): +class Unique(MongoTask): def run_mongo_thread(self): for _ in range(N): self.db.unique.insert_one({}) # no error -class NonUnique(MongoThread): +class NonUnique(MongoTask): def run_mongo_thread(self): for _ in range(N): try: @@ -101,7 +100,7 @@ def run_mongo_thread(self): raise AssertionError("Should have raised DuplicateKeyError") -class SocketGetter(MongoThread): +class SocketGetter(MongoTask): """Utility for TestPooling. Checks out a socket and holds it forever. Used in @@ -130,25 +129,26 @@ def __del__(self): def run_cases(client, cases): - threads = [] + tasks = [] n_runs = 5 for case in cases: for _i in range(n_runs): t = case(client) t.start() - threads.append(t) + tasks.append(t) - for t in threads: + for t in tasks: t.join() - for t in threads: + for t in tasks: assert t.passed, "%s.run() threw an exception" % repr(t) class _TestPoolingBase(IntegrationTest): """Base class for all connection-pool tests.""" + @client_context.require_connection def setUp(self): super().setUp() self.c = self.rs_or_single_client() @@ -158,11 +158,9 @@ def setUp(self): db.unique.insert_one({"_id": "jesse"}) db.test.insert_many([{} for _ in range(10)]) - def tearDown(self): - self.c.close() - super().tearDown() - - def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): + def create_pool(self, pair=None, *args, **kwargs): + if pair is None: + pair = (client_context.host, client_context.port) # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options kwargs["ssl_context"] = pool_options._ssl_context @@ -365,13 +363,13 @@ def test_checkout_more_than_max_pool_size(self): sock.pin_cursor() socks.append(sock) - threads = [] + tasks = [] for _ in range(30): t = SocketGetter(self.c, pool) t.start() - threads.append(t) + tasks.append(t) time.sleep(1) - for t in threads: + for t in tasks: self.assertEqual(t.state, "get_socket") for socket_info in socks: @@ -379,7 +377,6 @@ def test_checkout_more_than_max_pool_size(self): def test_maxConnecting(self): client = self.rs_or_single_client() - self.addCleanup(client.close) self.client.test.test.insert_one({}) self.addCleanup(self.client.test.test.delete_many, {}) pool = get_pool(client) @@ -389,11 +386,11 @@ def test_maxConnecting(self): def find_one(): docs.append(client.test.test.find_one({})) - threads = [threading.Thread(target=find_one) for _ in range(50)] - for thread in threads: - thread.start() - for thread in threads: - thread.join(10) + tasks = [ConcurrentRunner(target=find_one) for _ in range(50)] + for task in tasks: + task.start() + for task in tasks: + task.join(10) self.assertEqual(len(docs), 50) self.assertLessEqual(len(pool.conns), 50) @@ -416,7 +413,6 @@ def find_one(): @client_context.require_failCommand_appName def test_csot_timeout_message(self): client = self.rs_or_single_client(appName="connectionTimeoutApp") - self.addCleanup(client.close) # Mock an operation failing due to pymongo.timeout(). mock_connection_timeout = { "configureFailPoint": "failCommand", @@ -441,7 +437,6 @@ def test_csot_timeout_message(self): @client_context.require_failCommand_appName def test_socket_timeout_message(self): client = self.rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") - self.addCleanup(client.close) # Mock an operation failing due to socketTimeoutMS. mock_connection_timeout = { "configureFailPoint": "failCommand", @@ -485,7 +480,6 @@ def test_connection_timeout_message(self): appName="connectionTimeoutApp", heartbeatFrequencyMS=1000000, ) - self.addCleanup(client.close) client.admin.command("ping") pool = get_pool(client) pool.reset_without_pause() @@ -503,20 +497,19 @@ class TestPoolMaxSize(_TestPoolingBase): def test_max_pool_size(self): max_pool_size = 4 c = self.rs_or_single_client(maxPoolSize=max_pool_size) - self.addCleanup(c.close) collection = c[DB].test # Need one document. collection.drop() collection.insert_one({}) - # nthreads had better be much larger than max_pool_size to ensure that + # ntasks had better be much larger than max_pool_size to ensure that # max_pool_size connections are actually required at some point in this # test's execution. cx_pool = get_pool(c) - nthreads = 10 - threads = [] - lock = threading.Lock() + ntasks = 10 + tasks = [] + lock = _create_lock() self.n_passed = 0 def f(): @@ -527,19 +520,18 @@ def f(): with lock: self.n_passed += 1 - for _i in range(nthreads): - t = threading.Thread(target=f) - threads.append(t) + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) t.start() - joinall(threads) - self.assertEqual(nthreads, self.n_passed) + joinall(tasks) + self.assertEqual(ntasks, self.n_passed) self.assertTrue(len(cx_pool.conns) > 1) self.assertEqual(0, cx_pool.requests) def test_max_pool_size_none(self): c = self.rs_or_single_client(maxPoolSize=None) - self.addCleanup(c.close) collection = c[DB].test # Need one document. @@ -547,9 +539,9 @@ def test_max_pool_size_none(self): collection.insert_one({}) cx_pool = get_pool(c) - nthreads = 10 - threads = [] - lock = threading.Lock() + ntasks = 10 + tasks = [] + lock = _create_lock() self.n_passed = 0 def f(): @@ -559,19 +551,18 @@ def f(): with lock: self.n_passed += 1 - for _i in range(nthreads): - t = threading.Thread(target=f) - threads.append(t) + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) t.start() - joinall(threads) - self.assertEqual(nthreads, self.n_passed) + joinall(tasks) + self.assertEqual(ntasks, self.n_passed) self.assertTrue(len(cx_pool.conns) > 1) self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): c = self.rs_or_single_client(maxPoolSize=0) - self.addCleanup(c.close) pool = get_pool(c) self.assertEqual(pool.max_pool_size, float("inf")) diff --git a/tools/synchro.py b/tools/synchro.py index 69a2f07ba6..519ebb102b 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -228,6 +228,7 @@ def async_only_test(f: str) -> bool: "test_monitoring.py", "test_mongos_load_balancing.py", "test_on_demand_csfle.py", + "test_pooling.py", "test_raw_bson.py", "test_read_concern.py", "test_read_preferences.py", From 3e2967147e0fb857160205341e207b03c2eac87b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Feb 2025 15:45:08 -0600 Subject: [PATCH 1749/2111] PYTHON-5134 Fix binary installation and remove unused scripts (#2146) --- .evergreen/scripts/archive-mongodb-logs.sh | 8 -------- .evergreen/scripts/configure-env.sh | 19 +++++++++++++++++-- .evergreen/scripts/fix-absolute-paths.sh | 8 -------- .evergreen/scripts/init-test-results.sh | 5 ----- .evergreen/scripts/install-dependencies.sh | 12 +++++++++++- .evergreen/scripts/make-files-executable.sh | 8 -------- .evergreen/scripts/prepare-resources.sh | 4 ---- .evergreen/scripts/setup-dev-env.sh | 2 +- .evergreen/scripts/windows-fix.sh | 11 ----------- justfile | 2 -- 10 files changed, 29 insertions(+), 50 deletions(-) delete mode 100755 .evergreen/scripts/archive-mongodb-logs.sh delete mode 100755 .evergreen/scripts/fix-absolute-paths.sh delete mode 100755 .evergreen/scripts/init-test-results.sh delete mode 100755 .evergreen/scripts/make-files-executable.sh delete mode 100755 .evergreen/scripts/windows-fix.sh diff --git a/.evergreen/scripts/archive-mongodb-logs.sh b/.evergreen/scripts/archive-mongodb-logs.sh deleted file mode 100755 index 70a337cd11..0000000000 --- a/.evergreen/scripts/archive-mongodb-logs.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -o xtrace -mkdir out_dir -# shellcheck disable=SC2156 -find "$MONGO_ORCHESTRATION_HOME" -name \*.log -exec sh -c 'x="{}"; mv $x $PWD/out_dir/$(basename $(dirname $x))_$(basename $x)' \; -tar zcvf mongodb-logs.tar.gz -C out_dir/ . -rm -rf out_dir diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index a008499ea2..16212ad3b1 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -17,6 +17,7 @@ CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} UV_TOOL_DIR=$PROJECT_DIRECTORY/.local/uv/tools UV_CACHE_DIR=$PROJECT_DIRECTORY/.local/uv/cache DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS/.bin" +MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" # On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. if [ "${CI:-}" == "true" ]; then @@ -26,6 +27,8 @@ else PYMONGO_BIN_DIR=$HOME/cli_bin fi +PATH_EXT="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PYMONGO_BIN_DIR:\$PATH" + # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) @@ -34,6 +37,7 @@ if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin UV_TOOL_DIR=$(cygpath -m "$UV_TOOL_DIR") UV_CACHE_DIR=$(cygpath -m "$UV_CACHE_DIR") DRIVERS_TOOLS_BINARIES=$(cygpath -m "$DRIVERS_TOOLS_BINARIES") + MONGODB_BINARIES=$(cygpath -m "$MONGODB_BINARIES") PYMONGO_BIN_DIR=$(cygpath -m "$PYMONGO_BIN_DIR") fi @@ -73,17 +77,28 @@ export skip_web_identity_auth_test="${skip_web_identity_auth_test:-}" export skip_ECS_auth_test="${skip_ECS_auth_test:-}" export CARGO_HOME="$CARGO_HOME" -export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" export UV_TOOL_DIR="$UV_TOOL_DIR" export UV_CACHE_DIR="$UV_CACHE_DIR" export UV_TOOL_BIN_DIR="$DRIVERS_TOOLS_BINARIES" export PYMONGO_BIN_DIR="$PYMONGO_BIN_DIR" -export PATH="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PYMONGO_BIN_DIR:$PATH" +export PATH="$PATH_EXT" # shellcheck disable=SC2154 export PROJECT="${project:-mongo-python-driver}" export PIP_QUIET=1 EOT +# Write the .env file for drivers-tools. +rm -rf $DRIVERS_TOOLS +git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + +cat < ${DRIVERS_TOOLS}/.env +SKIP_LEGACY_SHELL=1 +DRIVERS_TOOLS="$DRIVERS_TOOLS" +MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" +MONGODB_BINARIES="$MONGODB_BINARIES" +TMPDIR="$MONGO_ORCHESTRATION_HOME/db" +EOT + # Skip CSOT tests on non-linux platforms. if [ "$(uname -s)" != "Linux" ]; then echo "export SKIP_CSOT_TESTS=1" >> $SCRIPT_DIR/env.sh diff --git a/.evergreen/scripts/fix-absolute-paths.sh b/.evergreen/scripts/fix-absolute-paths.sh deleted file mode 100755 index eb9433c673..0000000000 --- a/.evergreen/scripts/fix-absolute-paths.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set +x -. src/.evergreen/scripts/env.sh -# shellcheck disable=SC2044 -for filename in $(find $DRIVERS_TOOLS -name \*.json); do - perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|$DRIVERS_TOOLS|g" $filename -done diff --git a/.evergreen/scripts/init-test-results.sh b/.evergreen/scripts/init-test-results.sh deleted file mode 100755 index 666ac60620..0000000000 --- a/.evergreen/scripts/init-test-results.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set +x -. src/.evergreen/scripts/env.sh -echo '{"results": [{ "status": "FAIL", "test_file": "Build", "log_raw": "No test-results.json found was created" } ]}' >$PROJECT_DIRECTORY/test-results.json diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index e2598edcad..31ae4c1735 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -18,10 +18,17 @@ function _pip_install() { _HERE=$(dirname ${BASH_SOURCE:-$0}) . $_HERE/../utils.sh _VENV_PATH=$(mktemp -d) + if [ "Windows_NT" = "${OS:-}" ]; then + _VENV_PATH=$(cygpath -m $_VENV_PATH) + fi echo "Installing $2 using pip..." createvirtualenv "$(find_python3)" $_VENV_PATH python -m pip install $1 - ln -s "$(which $2)" $_BIN_DIR/$2 + if [ "Windows_NT" = "${OS:-}" ]; then + ln -s "$(which $2)" $_BIN_DIR/$2.exe + else + ln -s "$(which $2)" $_BIN_DIR/$2 + fi echo "Installed to ${_BIN_DIR}" echo "Installing $2 using pip... done." } @@ -49,6 +56,9 @@ if ! command -v uv 2>/dev/null; then curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { _pip_install uv uv } + if [ "Windows_NT" = "${OS:-}" ]; then + chmod +x "$(cygpath -u $_BIN_DIR)/uv.exe" + fi echo "Installing uv... done." fi diff --git a/.evergreen/scripts/make-files-executable.sh b/.evergreen/scripts/make-files-executable.sh deleted file mode 100755 index 806be7c599..0000000000 --- a/.evergreen/scripts/make-files-executable.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set +x -. src/.evergreen/scripts/env.sh -# shellcheck disable=SC2044 -for i in $(find "$DRIVERS_TOOLS"/.evergreen "$PROJECT_DIRECTORY"/.evergreen -name \*.sh); do - chmod +x "$i" -done diff --git a/.evergreen/scripts/prepare-resources.sh b/.evergreen/scripts/prepare-resources.sh index da869e7055..f5285a39de 100755 --- a/.evergreen/scripts/prepare-resources.sh +++ b/.evergreen/scripts/prepare-resources.sh @@ -5,10 +5,6 @@ HERE=$(dirname ${BASH_SOURCE:-$0}) pushd $HERE . env.sh -rm -rf $DRIVERS_TOOLS -git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS -echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" >$MONGO_ORCHESTRATION_HOME/orchestration.config - popd # Copy PyMongo's test certificates over driver-evergreen-tools' diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 0cbf53e19e..b26dc3ae0e 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -eu +set -eux HERE=$(dirname ${BASH_SOURCE:-$0}) pushd "$(dirname "$(dirname $HERE)")" > /dev/null diff --git a/.evergreen/scripts/windows-fix.sh b/.evergreen/scripts/windows-fix.sh deleted file mode 100755 index cb4fa44130..0000000000 --- a/.evergreen/scripts/windows-fix.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set +x -. src/.evergreen/scripts/env.sh -# shellcheck disable=SC2044 -for i in $(find "$DRIVERS_TOOLS"/.evergreen "$PROJECT_DIRECTORY"/.evergreen -name \*.sh); do - < "$i" tr -d '\r' >"$i".new - mv "$i".new "$i" -done -# Copy client certificate because symlinks do not work on Windows. -cp "$DRIVERS_TOOLS"/.evergreen/x509gen/client.pem "$MONGO_ORCHESTRATION_HOME"/lib/client.pem diff --git a/justfile b/justfile index bf1355576d..3840484bc5 100644 --- a/justfile +++ b/justfile @@ -1,7 +1,5 @@ # See https://just.systems/man/en/ for instructions set shell := ["bash", "-c"] -set dotenv-load -set dotenv-filename := "./.evergreen/scripts/env.sh" # Commonly used command segments. uv_run := "uv run --isolated --frozen " From 4e672bd5497aacc7122405de9b3c8491443d76a0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Feb 2025 13:15:48 -0600 Subject: [PATCH 1750/2111] PYTHON-4540 Convert libmongocrypt download to python (#2148) --- .evergreen/run-azurekms-fail-test.sh | 4 +- .evergreen/run-azurekms-test.sh | 3 +- .evergreen/run-gcpkms-test.sh | 3 +- .evergreen/scripts/run-gcpkms-fail-test.sh | 2 - .evergreen/scripts/setup-libmongocrypt.sh | 52 ----------- .evergreen/scripts/setup-tests.sh | 2 +- .../{setup-tests.py => setup_tests.py} | 86 +++++++++++++++++-- 7 files changed, 85 insertions(+), 67 deletions(-) delete mode 100755 .evergreen/scripts/setup-libmongocrypt.sh rename .evergreen/scripts/{setup-tests.py => setup_tests.py} (79%) diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh index 31ca30b3e2..eea84d42a9 100755 --- a/.evergreen/run-azurekms-fail-test.sh +++ b/.evergreen/run-azurekms-fail-test.sh @@ -2,10 +2,8 @@ set -o errexit # Exit the script with error if any of the commands fail HERE=$(dirname ${BASH_SOURCE:-$0}) . $DRIVERS_TOOLS/.evergreen/csfle/azurekms/setup-secrets.sh -export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz SUCCESS=false TEST_FLE_AZURE_AUTO=1 bash $HERE/scripts/setup-tests.sh -PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 \ - KEY_NAME="${AZUREKMS_KEYNAME}" \ +KEY_NAME="${AZUREKMS_KEYNAME}" \ KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ $HERE/just.sh test-eg bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh index 27cb3fb315..8e6b050cb6 100755 --- a/.evergreen/run-azurekms-test.sh +++ b/.evergreen/run-azurekms-test.sh @@ -6,7 +6,6 @@ echo "Copying files ... begin" export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey -LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz # Set up the remote files to test. git add . git commit -m "add files" || true @@ -20,7 +19,7 @@ AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -AZUREKMS_CMD="SUCCESS=true TEST_FLE_AZURE_AUTO=1 LIBMONGOCRYPT_URL=$LIBMONGOCRYPT_URL bash .evergreen/just.sh setup-test" \ +AZUREKMS_CMD="SUCCESS=true TEST_FLE_AZURE_AUTO=1 bash .evergreen/just.sh setup-test" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" bash ./.evergreen/just.sh test-eg" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh index 077ca0cb9f..a430f4e4f9 100755 --- a/.evergreen/run-gcpkms-test.sh +++ b/.evergreen/run-gcpkms-test.sh @@ -8,7 +8,6 @@ export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} export GCPKMS_PROJECT=${GCPKMS_PROJECT} export GCPKMS_ZONE=${GCPKMS_ZONE} export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} -LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz # Set up the remote files to test. git add . git commit -m "add files" || true @@ -19,7 +18,7 @@ echo "Untarring file ... begin" GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 LIBMONGOCRYPT_URL=$LIBMONGOCRYPT_URL bash ./.evergreen/just.sh setup-test" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 bash ./.evergreen/just.sh setup-test" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh GCPKMS_CMD="./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Running test ... end" bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh index 61f5e30ccc..8675c7c242 100755 --- a/.evergreen/scripts/run-gcpkms-fail-test.sh +++ b/.evergreen/scripts/run-gcpkms-fail-test.sh @@ -2,7 +2,5 @@ set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) . $HERE/env.sh -export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 -export LIBMONGOCRYPT_URL=https://s3.amazonaws.com/mciuploads/libmongocrypt/debian11/master/latest/libmongocrypt.tar.gz SUCCESS=false TEST_FLE_GCP_AUTO=1 bash $HERE/setup-tests.sh bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/setup-libmongocrypt.sh b/.evergreen/scripts/setup-libmongocrypt.sh deleted file mode 100755 index 775db07cb0..0000000000 --- a/.evergreen/scripts/setup-libmongocrypt.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -set -o xtrace - -TARGET="" - -if [ "Windows_NT" = "${OS:-''}" ]; then # Magic variable in cygwin - # PYTHON-2808 Ensure this machine has the CA cert for google KMS. - powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true - TARGET="windows-test" -fi - -if [ "$(uname -s)" = "Darwin" ]; then - TARGET="macos" -fi - -if [ "$(uname -s)" = "Linux" ]; then - rhel_ver=$(awk -F'=' '/VERSION_ID/{ gsub(/"/,""); print $2}' /etc/os-release) - arch=$(uname -m) - echo "RHEL $rhel_ver $arch" - if [[ $rhel_ver =~ 7 ]]; then - TARGET="rhel-70-64-bit" - elif [[ $rhel_ver =~ 8 ]]; then - if [ "$arch" = "x86_64" ]; then - TARGET="rhel-80-64-bit" - elif [ "$arch" = "arm" ]; then - TARGET="rhel-82-arm64" - fi - fi -fi - -if [ -z "$LIBMONGOCRYPT_URL" ] && [ -n "$TARGET" ]; then - LIBMONGOCRYPT_URL="https://s3.amazonaws.com/mciuploads/libmongocrypt/$TARGET/master/latest/libmongocrypt.tar.gz" -fi - -if [ -z "$LIBMONGOCRYPT_URL" ]; then - echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" - exit 1 -fi -rm -rf libmongocrypt libmongocrypt.tar.gz -echo "Fetching $LIBMONGOCRYPT_URL..." -curl -O "$LIBMONGOCRYPT_URL" -echo "Fetching $LIBMONGOCRYPT_URL...done" -mkdir libmongocrypt -tar xzf libmongocrypt.tar.gz -C ./libmongocrypt -ls -la libmongocrypt -ls -la libmongocrypt/nocrypto - -if [ "Windows_NT" = "${OS:-''}" ]; then - # libmongocrypt's windows dll is not marked executable. - chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll -fi diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh index 80fc717047..330469711c 100755 --- a/.evergreen/scripts/setup-tests.sh +++ b/.evergreen/scripts/setup-tests.sh @@ -58,4 +58,4 @@ fi . $ROOT_DIR/.evergreen/utils.sh PYTHON=${PYTHON_BINARY:-$(find_python3)} -$PYTHON $SCRIPT_DIR/setup-tests.py +$PYTHON $SCRIPT_DIR/setup_tests.py diff --git a/.evergreen/scripts/setup-tests.py b/.evergreen/scripts/setup_tests.py similarity index 79% rename from .evergreen/scripts/setup-tests.py rename to .evergreen/scripts/setup_tests.py index 4e14ff9830..07693d1e99 100644 --- a/.evergreen/scripts/setup-tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -1,20 +1,26 @@ from __future__ import annotations import base64 +import dataclasses +import io import logging import os import platform import shlex +import shutil import stat import subprocess import sys +import tarfile from pathlib import Path from typing import Any +from urllib import request HERE = Path(__file__).absolute().parent ROOT = HERE.parent.parent ENV_FILE = HERE / "test-env.sh" DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") +PLATFORM = "windows" if os.name == "nt" else sys.platform logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) @@ -74,6 +80,13 @@ ) +@dataclasses.dataclass +class Distro: + name: str + version_id: str + arch: str + + def write_env(name: str, value: Any) -> None: with ENV_FILE.open("a", newline="\n") as fid: # Remove any existing quote chars. @@ -92,6 +105,69 @@ def run_command(cmd: str) -> None: LOGGER.info("Running command %s... done.", cmd) +def get_distro() -> Distro: + name = "" + version_id = "" + arch = platform.machine() + with open("/etc/os-release") as fid: + for line in fid.readlines(): + line = line.replace('"', "") # noqa: PLW2901 + if line.startswith("NAME="): + _, _, name = line.strip().partition("=") + if line.startswith("VERSION_ID="): + _, _, version_id = line.strip().partition("=") + return Distro(name=name, version_id=version_id, arch=arch) + + +def setup_libmongocrypt(): + target = "" + if PLATFORM == "windows": + # PYTHON-2808 Ensure this machine has the CA cert for google KMS. + if is_set("TEST_FLE_GCP_AUTO"): + run_command('powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/"') + target = "windows-test" + + elif PLATFORM == "darwin": + target = "macos" + + else: + distro = get_distro() + if distro.name.startswith("Debian"): + target = f"debian{distro.version_id}" + elif distro.name.startswith("Red Hat"): + if distro.version_id.startswith("7"): + target = "rhel-70-64-bit" + elif distro.version_id.startswith("8"): + if distro.arch == "aarch64": + target = "rhel-82-arm64" + else: + target = "rhel-80-64-bit" + + if not is_set("LIBMONGOCRYPT_URL"): + if not target: + raise ValueError("Cannot find libmongocrypt target for current platform!") + url = f"https://s3.amazonaws.com/mciuploads/libmongocrypt/{target}/master/latest/libmongocrypt.tar.gz" + else: + url = os.environ["LIBMONGOCRYPT_URL"] + + shutil.rmtree(HERE / "libmongocrypt", ignore_errors=True) + + LOGGER.info(f"Fetching {url}...") + with request.urlopen(request.Request(url), timeout=15.0) as response: # noqa: S310 + if response.status == 200: + fileobj = io.BytesIO(response.read()) + with tarfile.open("libmongocrypt.tar.gz", fileobj=fileobj) as fid: + fid.extractall(Path.cwd() / "libmongocrypt") + LOGGER.info(f"Fetching {url}... done.") + + run_command("ls -la libmongocrypt") + run_command("ls -la libmongocrypt/nocrypto") + + if PLATFORM == "windows": + # libmongocrypt's windows dll is not marked executable. + run_command("chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll") + + def handle_test_env() -> None: AUTH = os.environ.get("AUTH", "noauth") SSL = os.environ.get("SSL", "nossl") @@ -156,7 +232,7 @@ def handle_test_env() -> None: write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") if is_set("TEST_ENTERPRISE_AUTH"): - if os.name == "nt": + if PLATFORM == "windows": LOGGER.info("Setting GSSAPI_PASS") write_env("GSSAPI_PASS", os.environ["SASL_PASS"]) write_env("GSSAPI_CANONICALIZE", "true") @@ -214,19 +290,19 @@ def handle_test_env() -> None: if is_set("TEST_ENCRYPTION") or is_set("TEST_FLE_AZURE_AUTO") or is_set("TEST_FLE_GCP_AUTO"): # Check for libmongocrypt download. if not (ROOT / "libmongocrypt").exists(): - run_command(f"bash {HERE.as_posix()}/setup-libmongocrypt.sh") + setup_libmongocrypt() # TODO: Test with 'pip install pymongocrypt' UV_ARGS.append("--group pymongocrypt_source") # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE = ROOT / "libmongocrypt/nocrypto" - if sys.platform == "linux": + if PLATFORM == "linux": if (BASE / "lib/libmongocrypt.so").exists(): PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.so" else: PYMONGOCRYPT_LIB = BASE / "lib64/libmongocrypt.so" - elif sys.platform == "darwin": + elif PLATFORM == "darwin": PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.dylib" else: PYMONGOCRYPT_LIB = BASE / "bin/mongocrypt.dll" @@ -244,7 +320,7 @@ def handle_test_env() -> None: if is_set("TEST_CRYPT_SHARED"): CRYPT_SHARED_DIR = Path(os.environ["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) - if os.name == "nt": + if PLATFORM == "windows": write_env("PATH", f"{CRYPT_SHARED_DIR}:$PATH") else: write_env( From f32e2bc3727f14ab60ca37b02e407a19666182df Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 19 Feb 2025 11:21:51 -0800 Subject: [PATCH 1751/2111] PYTHON-5075 Convert test.test_csot to async (#2088) Co-authored-by: Noah Stapp Co-authored-by: Noah Stapp --- test/asynchronous/test_csot.py | 118 ++++++++++++++++++++++++++++ test/asynchronous/unified_format.py | 7 +- test/test_csot.py | 8 +- test/unified_format.py | 7 +- tools/synchro.py | 1 + 5 files changed, 136 insertions(+), 5 deletions(-) create mode 100644 test/asynchronous/test_csot.py diff --git a/test/asynchronous/test_csot.py b/test/asynchronous/test_csot.py new file mode 100644 index 0000000000..9e928c2251 --- /dev/null +++ b/test/asynchronous/test_csot.py @@ -0,0 +1,118 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes + +import pymongo +from pymongo import _csot +from pymongo.errors import PyMongoError + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "csot") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestCSOT(AsyncIntegrationTest): + RUN_ON_SERVERLESS = True + RUN_ON_LOAD_BALANCER = True + + async def test_timeout_nested(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + # Capped at the original 10 deadline. + with pymongo.timeout(15): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertEqual(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + await coll.find_one() + + with pymongo.timeout(5): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + await coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + @async_client_context.require_change_streams + async def test_change_stream_can_resume_after_timeouts(self): + if os.environ.get("SKIP_CSOT_TESTS", ""): + raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") + coll = self.db.test + await coll.insert_one({}) + async with await coll.watch() as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + await stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + await stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if async_client_context.version < (4, 0): + await stream.try_next() + await coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(await stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + await stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 149aad9786..695f58ee27 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -1387,7 +1387,6 @@ async def run_scenario(self, spec, uri=None): # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. await self.kill_all_sessions() - self.addAsyncCleanup(self.kill_all_sessions) if "csot" in self.id().lower(): # Retry CSOT tests up to 2 times to deal with flakey tests. @@ -1395,7 +1394,11 @@ async def run_scenario(self, spec, uri=None): for i in range(attempts): try: return await self._run_scenario(spec, uri) - except AssertionError: + except (AssertionError, OperationFailure) as exc: + if isinstance(exc, OperationFailure) and ( + _IS_SYNC or "failpoint" not in exc._message + ): + raise if i < attempts - 1: print( f"Retrying after attempt {i+1} of {self.id()} failed with:\n" diff --git a/test/test_csot.py b/test/test_csot.py index c075a07d5a..5201156a1d 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -17,6 +17,7 @@ import os import sys +from pathlib import Path sys.path[0:0] = [""] @@ -27,8 +28,13 @@ from pymongo import _csot from pymongo.errors import PyMongoError +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "csot") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "csot") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/unified_format.py b/test/unified_format.py index b2e6ae1e83..73dee10ddf 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1374,7 +1374,6 @@ def run_scenario(self, spec, uri=None): # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. self.kill_all_sessions() - self.addCleanup(self.kill_all_sessions) if "csot" in self.id().lower(): # Retry CSOT tests up to 2 times to deal with flakey tests. @@ -1382,7 +1381,11 @@ def run_scenario(self, spec, uri=None): for i in range(attempts): try: return self._run_scenario(spec, uri) - except AssertionError: + except (AssertionError, OperationFailure) as exc: + if isinstance(exc, OperationFailure) and ( + _IS_SYNC or "failpoint" not in exc._message + ): + raise if i < attempts - 1: print( f"Retrying after attempt {i+1} of {self.id()} failed with:\n" diff --git a/tools/synchro.py b/tools/synchro.py index 519ebb102b..39c53b435f 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -208,6 +208,7 @@ def async_only_test(f: str) -> bool: "test_connections_survive_primary_stepdown_spec.py", "test_create_entities.py", "test_crud_unified.py", + "test_csot.py", "test_cursor.py", "test_custom_types.py", "test_database.py", From 5456f1ec045beaeee3a9b008b9522266fc9f43c1 Mon Sep 17 00:00:00 2001 From: Kevin Albertson Date: Wed, 19 Feb 2025 15:22:28 -0500 Subject: [PATCH 1752/2111] PYTHON-5142 Sync `non-lb-connection-establishment` test (#2150) --- .../non-lb-connection-establishment.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/load_balancer/non-lb-connection-establishment.json b/test/load_balancer/non-lb-connection-establishment.json index 6aaa7bdf98..f4fed13cc2 100644 --- a/test/load_balancer/non-lb-connection-establishment.json +++ b/test/load_balancer/non-lb-connection-establishment.json @@ -57,6 +57,19 @@ "tests": [ { "description": "operations against non-load balanced clusters fail if URI contains loadBalanced=true", + "runOnRequirements": [ + { + "maxServerVersion": "8.0.99", + "topologies": [ + "single" + ] + }, + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { "name": "runCommand", From 691ab8e783f01324b91f2178658957849d94ffeb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 20 Feb 2025 09:37:47 -0600 Subject: [PATCH 1753/2111] PYTHON-5146 Fix handling of AWS ECS test (#2152) --- .evergreen/run-mongodb-aws-ecs-test.sh | 1 + .evergreen/scripts/run-aws-ecs-auth-test.sh | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 96d3c0611e..f141792420 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -31,5 +31,6 @@ export AUTH="auth" export SET_XTRACE_ON=1 cd src rm -rf .venv +rm -f .evergreen/scripts/test-env.sh || true bash ./.evergreen/just.sh setup-test bash .evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-aws-ecs-auth-test.sh b/.evergreen/scripts/run-aws-ecs-auth-test.sh index 787e0a710b..b8197c4da5 100755 --- a/.evergreen/scripts/run-aws-ecs-auth-test.sh +++ b/.evergreen/scripts/run-aws-ecs-auth-test.sh @@ -9,7 +9,4 @@ set -ex cd "$DRIVERS_TOOLS"/.evergreen/auth_aws . ./activate-authawsvenv.sh . aws_setup.sh ecs -export MONGODB_BINARIES="$MONGODB_BINARIES" -export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" -python aws_tester.py ecs cd - From b56605cc1fe922d8d07947e40389921c2fca0a34 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 20 Feb 2025 10:54:20 -0600 Subject: [PATCH 1754/2111] PYTHON-5147 Do not run OCSP on MongoDB 4.2 (#2153) --- .evergreen/generated_configs/variants.yml | 55 +++++++++-------------- .evergreen/scripts/generate_config.py | 2 +- 2 files changed, 22 insertions(+), 35 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 531f23eb66..da149a3a1b 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -817,23 +817,10 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Ocsp tests - - name: ocsp-rhel8-v4.2-python3.9 + - name: ocsp-rhel8-v4.4-python3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 v4.2 Python3.9 - run_on: - - rhel87-small - batchtime: 20160 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.2" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: ocsp-rhel8-v4.4-python3.10 - tasks: - - name: .ocsp - display_name: OCSP RHEL8 v4.4 Python3.10 + display_name: OCSP RHEL8 v4.4 Python3.9 run_on: - rhel87-small batchtime: 20160 @@ -842,11 +829,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: "4.4" - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: ocsp-rhel8-v5.0-python3.11 + PYTHON_BINARY: /opt/python/3.9/bin/python3 + - name: ocsp-rhel8-v5.0-python3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 v5.0 Python3.11 + display_name: OCSP RHEL8 v5.0 Python3.10 run_on: - rhel87-small batchtime: 20160 @@ -855,11 +842,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: "5.0" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: ocsp-rhel8-v6.0-python3.12 + PYTHON_BINARY: /opt/python/3.10/bin/python3 + - name: ocsp-rhel8-v6.0-python3.11 tasks: - name: .ocsp - display_name: OCSP RHEL8 v6.0 Python3.12 + display_name: OCSP RHEL8 v6.0 Python3.11 run_on: - rhel87-small batchtime: 20160 @@ -868,11 +855,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: "6.0" - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: ocsp-rhel8-v7.0-python3.13 + PYTHON_BINARY: /opt/python/3.11/bin/python3 + - name: ocsp-rhel8-v7.0-python3.12 tasks: - name: .ocsp - display_name: OCSP RHEL8 v7.0 Python3.13 + display_name: OCSP RHEL8 v7.0 Python3.12 run_on: - rhel87-small batchtime: 20160 @@ -881,11 +868,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: "7.0" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: ocsp-rhel8-v8.0-pypy3.10 + PYTHON_BINARY: /opt/python/3.12/bin/python3 + - name: ocsp-rhel8-v8.0-python3.13 tasks: - name: .ocsp - display_name: OCSP RHEL8 v8.0 PyPy3.10 + display_name: OCSP RHEL8 v8.0 Python3.13 run_on: - rhel87-small batchtime: 20160 @@ -894,11 +881,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: "8.0" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: ocsp-rhel8-rapid-python3.9 + PYTHON_BINARY: /opt/python/3.13/bin/python3 + - name: ocsp-rhel8-rapid-pypy3.10 tasks: - name: .ocsp - display_name: OCSP RHEL8 rapid Python3.9 + display_name: OCSP RHEL8 rapid PyPy3.10 run_on: - rhel87-small batchtime: 20160 @@ -907,11 +894,11 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: rapid - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: ocsp-rhel8-latest-python3.10 + PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + - name: ocsp-rhel8-latest-python3.9 tasks: - name: .ocsp - display_name: OCSP RHEL8 latest Python3.10 + display_name: OCSP RHEL8 latest Python3.9 run_on: - rhel87-small batchtime: 20160 @@ -920,7 +907,7 @@ buildvariants: SSL: ssl TOPOLOGY: server VERSION: latest - PYTHON_BINARY: /opt/python/3.10/bin/python3 + PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: ocsp-win64-v4.4-python3.9 tasks: - name: .ocsp-rsa !.ocsp-staple diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 59c3c720bf..7236ee6793 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -239,7 +239,7 @@ def create_ocsp_variants() -> list[BuildVariant]: base_display = "OCSP" # OCSP tests on default host with all servers v4.4+ and all python versions. - versions = [v for v in ALL_VERSIONS if v != "4.0"] + versions = get_versions_from("4.4") for version, python in zip_cycle(versions, ALL_PYTHONS): host = DEFAULT_HOST variant = create_variant( From 25b2d77b6377ea42ebd767abb64af6b91cb5c637 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 21 Feb 2025 14:27:33 -0600 Subject: [PATCH 1755/2111] PYTHON-5138 Convert setup_tests.py to a cli (#2154) --- .evergreen/config.yml | 42 +- .evergreen/generated_configs/tasks.yml | 411 +++++++----------- .evergreen/generated_configs/variants.yml | 74 ++-- .evergreen/run-azurekms-fail-test.sh | 2 +- .evergreen/run-azurekms-test.sh | 2 +- .evergreen/run-gcpkms-test.sh | 2 +- .evergreen/run-mongodb-aws-ecs-test.sh | 4 +- .evergreen/run-mongodb-oidc-test.sh | 2 +- .evergreen/run-perf-tests.sh | 3 +- .evergreen/run-tests.sh | 20 +- .../scripts/bootstrap-mongo-orchestration.sh | 5 +- .evergreen/scripts/configure-env.sh | 19 - .evergreen/scripts/generate_config.py | 25 +- .evergreen/scripts/run-atlas-tests.sh | 2 +- .../scripts/run-enterprise-auth-tests.sh | 2 +- .evergreen/scripts/run-gcpkms-fail-test.sh | 2 +- .evergreen/scripts/run-mockupdb-tests.sh | 5 - .evergreen/scripts/run-mongodb-aws-test.sh | 2 +- .evergreen/scripts/run-ocsp-test.sh | 13 +- .evergreen/scripts/setup-dev-env.sh | 8 +- .evergreen/scripts/setup-tests.sh | 43 +- .evergreen/scripts/setup_tests.py | 285 +++++++----- CONTRIBUTING.md | 4 +- justfile | 8 +- test/asynchronous/test_client_context.py | 12 +- test/asynchronous/test_data_lake.py | 4 - test/asynchronous/test_index_management.py | 4 - test/test_client_context.py | 12 +- test/test_data_lake.py | 4 - test/test_index_management.py | 4 - 30 files changed, 452 insertions(+), 573 deletions(-) delete mode 100755 .evergreen/scripts/run-mockupdb-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 028caf4d9b..72cab17dc9 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -42,7 +42,7 @@ functions: # Make an evergreen expansion file with dynamic values - command: subprocess.exec params: - include_expansions_in_env: ["is_patch", "project", "version_id", "AUTH", "SSL", "TEST_ENCRYPTION", "TEST_ENCRYPTION_PYOPENSSL", "TEST_CRYPT_SHARED", "TEST_PYOPENSSL", "SETDEFAULTENCODING", "TEST_LOADBALANCER", "TEST_SEVERLESS", "SKIP_CSOT_TESTS", "MONGODB_STARTED", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", "COVERAGE", "COMPRESSORS", "MONGODB_API_VERSION", "skip_crypt_shared", "VERSION", "TOPOLOGY", "STORAGE_ENGINE", "ORCHESTRATION_FILE", "REQUIRE_API_VERSION", "LOAD_BALANCER", "skip_web_identity_auth_test", "skip_ECS_auth_test"] + include_expansions_in_env: ["is_patch", "project", "version_id", "skip_web_identity_auth_test", "skip_ECS_auth_test"] binary: bash working_dir: "src" args: @@ -205,18 +205,13 @@ functions: - command: subprocess.exec params: binary: bash - include_expansions_in_env: ["VERSION", "TOPOLOGY", "AUTH", "SSL", "ORCHESTRATION_FILE", "LOAD_BALANCER"] + include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, LOAD_BALANCER, + STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED] args: - - src/.evergreen/scripts/run-with-env.sh - src/.evergreen/scripts/bootstrap-mongo-orchestration.sh - command: expansions.update params: file: mo-expansion.yml - - command: expansions.update - params: - updates: - - key: MONGODB_STARTED - value: "1" "bootstrap data lake": - command: subprocess.exec @@ -250,17 +245,6 @@ functions: - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/run-mod-wsgi-tests.sh - "run mockupdb tests": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["PYTHON_BINARY"] - working_dir: "src" - binary: bash - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mockupdb-tests.sh - "run doctests": - command: subprocess.exec type: test @@ -276,14 +260,12 @@ functions: - command: subprocess.exec type: test params: - include_expansions_in_env: ["TEST_DATA_LAKE", "PYTHON_BINARY", "AUTH", "SSL", - "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "TEST_SUITES", - "TEST_INDEX_MANAGEMENT", "CRYPT_SHARED_LIB_PATH", "TEST_ENCRYPTION", "TEST_ENCRYPTION_PYOPENSSL", - "TEST_CRYPT_SHARED", "TEST_PYOPENSSL", "TEST_LOADBALANCER", "TEST_SEVERLESS", "MONGODB_URI"] + include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, + AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, + DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS] binary: bash working_dir: "src" - args: - - .evergreen/scripts/setup-tests.sh + args: [.evergreen/just.sh, setup-test, "${TEST_NAME}", "${SUB_TEST_NAME}"] - command: subprocess.exec type: test params: @@ -755,7 +737,9 @@ tasks: - name: "mockupdb" tags: ["mockupdb"] commands: - - func: "run mockupdb tests" + - func: "run tests" + vars: + TEST_NAME: mockupdb - name: "doctests" tags: ["doctests"] @@ -770,6 +754,8 @@ tasks: tags: ["serverless"] commands: - func: "run tests" + vars: + TEST_NAME: serverless - name: "test-enterprise-auth" tags: ["enterprise-auth"] @@ -789,7 +775,7 @@ tasks: TOPOLOGY: "replica_set" - func: "run tests" vars: - TEST_INDEX_MANAGEMENT: "1" + TEST_NAME: index_management AUTH: "auth" - name: "mod-wsgi-standalone" @@ -857,7 +843,7 @@ tasks: - func: "bootstrap data lake" - func: "run tests" vars: - TEST_DATA_LAKE: "true" + TEST_NAME: "data_lake" - name: "test-aws-lambda-deployed" commands: diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 6b17035748..5495ad3470 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -12,7 +12,7 @@ tasks: vars: AUTH: auth SSL: ssl - TEST_LOADBALANCER: "true" + TEST_NAME: load_balancer tags: [load-balancer, auth, ssl] - name: test-load-balancer-noauth-ssl commands: @@ -26,7 +26,7 @@ tasks: vars: AUTH: noauth SSL: ssl - TEST_LOADBALANCER: "true" + TEST_NAME: load_balancer tags: [load-balancer, noauth, ssl] - name: test-load-balancer-noauth-nossl commands: @@ -40,7 +40,7 @@ tasks: vars: AUTH: noauth SSL: nossl - TEST_LOADBALANCER: "true" + TEST_NAME: load_balancer tags: [load-balancer, noauth, nossl] # Server tests @@ -57,7 +57,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - standalone @@ -77,7 +77,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - standalone @@ -97,7 +97,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - standalone @@ -117,7 +116,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - standalone @@ -137,7 +136,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - standalone @@ -157,7 +156,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - standalone @@ -177,7 +175,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - standalone @@ -197,7 +195,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - standalone @@ -217,7 +215,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - standalone @@ -237,7 +234,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - standalone @@ -257,7 +254,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - standalone @@ -277,7 +274,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - standalone @@ -297,7 +293,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - standalone @@ -317,7 +313,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - standalone @@ -337,7 +333,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - standalone @@ -357,7 +352,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - standalone @@ -377,7 +372,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - standalone @@ -397,7 +392,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - standalone @@ -417,7 +411,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - standalone @@ -437,7 +431,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - standalone @@ -457,7 +451,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - standalone @@ -477,7 +470,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - standalone @@ -497,7 +490,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - standalone @@ -517,7 +510,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - standalone @@ -537,7 +529,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - standalone @@ -557,7 +549,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - standalone @@ -577,7 +569,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - standalone @@ -597,7 +588,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - standalone @@ -617,7 +608,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - standalone @@ -637,7 +628,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - standalone @@ -657,7 +647,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - standalone @@ -677,7 +667,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - standalone @@ -697,7 +687,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - standalone @@ -717,7 +706,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - standalone @@ -737,7 +726,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - standalone @@ -757,7 +746,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - standalone @@ -777,7 +765,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - standalone @@ -797,7 +785,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - standalone @@ -817,7 +805,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - standalone @@ -837,7 +824,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - standalone @@ -857,7 +844,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - standalone @@ -877,7 +864,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - standalone @@ -897,7 +883,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - standalone @@ -917,7 +903,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - standalone @@ -937,7 +923,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - standalone @@ -957,7 +942,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - standalone @@ -977,7 +962,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - standalone @@ -997,7 +982,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - standalone @@ -1017,7 +1001,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - standalone @@ -1037,7 +1021,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - standalone @@ -1057,7 +1041,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - standalone @@ -1077,7 +1060,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - standalone @@ -1097,7 +1080,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - standalone @@ -1117,7 +1100,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - standalone @@ -1137,7 +1119,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - standalone @@ -1157,7 +1139,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - standalone @@ -1177,7 +1159,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - standalone @@ -1197,7 +1178,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - standalone @@ -1217,7 +1198,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - standalone @@ -1237,7 +1218,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - standalone @@ -1257,7 +1237,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - standalone @@ -1277,7 +1257,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - standalone @@ -1297,7 +1277,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - standalone @@ -1317,7 +1296,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - standalone @@ -1337,7 +1316,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - standalone @@ -1357,7 +1336,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - standalone @@ -1377,7 +1355,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - standalone @@ -1397,7 +1375,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - standalone @@ -1417,7 +1395,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - standalone @@ -1437,7 +1414,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - standalone @@ -1457,7 +1434,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - standalone @@ -1477,7 +1454,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - standalone @@ -1497,7 +1473,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - standalone @@ -1517,7 +1493,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - standalone @@ -1537,7 +1513,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - latest - standalone @@ -1557,7 +1532,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - standalone @@ -1577,7 +1552,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - standalone @@ -1597,7 +1572,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - latest - standalone @@ -1617,7 +1591,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - standalone @@ -1637,7 +1611,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - standalone @@ -1657,7 +1631,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - latest - standalone @@ -1677,7 +1650,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - replica_set @@ -1697,7 +1670,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - replica_set @@ -1717,7 +1690,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - replica_set @@ -1737,7 +1709,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - replica_set @@ -1757,7 +1729,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - replica_set @@ -1777,7 +1749,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - replica_set @@ -1797,7 +1768,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - replica_set @@ -1817,7 +1788,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - replica_set @@ -1837,7 +1808,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - replica_set @@ -1857,7 +1827,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - replica_set @@ -1877,7 +1847,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - replica_set @@ -1897,7 +1867,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - replica_set @@ -1917,7 +1886,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - replica_set @@ -1937,7 +1906,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - replica_set @@ -1957,7 +1926,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - replica_set @@ -1977,7 +1945,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - replica_set @@ -1997,7 +1965,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - replica_set @@ -2017,7 +1985,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - replica_set @@ -2037,7 +2004,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - replica_set @@ -2057,7 +2024,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - replica_set @@ -2077,7 +2044,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - replica_set @@ -2097,7 +2063,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - replica_set @@ -2117,7 +2083,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - replica_set @@ -2137,7 +2103,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - replica_set @@ -2157,7 +2122,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - replica_set @@ -2177,7 +2142,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - replica_set @@ -2197,7 +2162,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - replica_set @@ -2217,7 +2181,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - replica_set @@ -2237,7 +2201,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - replica_set @@ -2257,7 +2221,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - replica_set @@ -2277,7 +2240,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - replica_set @@ -2297,7 +2260,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - replica_set @@ -2317,7 +2280,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - replica_set @@ -2337,7 +2299,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - replica_set @@ -2357,7 +2319,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - replica_set @@ -2377,7 +2339,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - replica_set @@ -2397,7 +2358,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - replica_set @@ -2417,7 +2378,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - replica_set @@ -2437,7 +2398,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - replica_set @@ -2457,7 +2417,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - replica_set @@ -2477,7 +2437,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - replica_set @@ -2497,7 +2457,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - replica_set @@ -2517,7 +2476,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - replica_set @@ -2537,7 +2496,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - replica_set @@ -2557,7 +2516,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - replica_set @@ -2577,7 +2535,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - replica_set @@ -2597,7 +2555,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - replica_set @@ -2617,7 +2575,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - replica_set @@ -2637,7 +2594,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - replica_set @@ -2657,7 +2614,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - replica_set @@ -2677,7 +2634,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - replica_set @@ -2697,7 +2653,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - replica_set @@ -2717,7 +2673,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - replica_set @@ -2737,7 +2693,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - replica_set @@ -2757,7 +2712,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - replica_set @@ -2777,7 +2732,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - replica_set @@ -2797,7 +2752,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - replica_set @@ -2817,7 +2771,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - replica_set @@ -2837,7 +2791,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - replica_set @@ -2857,7 +2811,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - replica_set @@ -2877,7 +2830,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - replica_set @@ -2897,7 +2850,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - replica_set @@ -2917,7 +2870,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - replica_set @@ -2937,7 +2889,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - replica_set @@ -2957,7 +2909,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - replica_set @@ -2977,7 +2929,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - replica_set @@ -2997,7 +2948,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - replica_set @@ -3017,7 +2968,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - replica_set @@ -3037,7 +2988,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - replica_set @@ -3057,7 +3007,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - replica_set @@ -3077,7 +3027,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - replica_set @@ -3097,7 +3047,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - replica_set @@ -3117,7 +3066,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - replica_set @@ -3137,7 +3086,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - replica_set @@ -3157,7 +3106,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - latest - replica_set @@ -3177,7 +3125,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - replica_set @@ -3197,7 +3145,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - replica_set @@ -3217,7 +3165,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - latest - replica_set @@ -3237,7 +3184,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - replica_set @@ -3257,7 +3204,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - replica_set @@ -3277,7 +3224,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - latest - replica_set @@ -3297,7 +3243,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - sharded_cluster @@ -3317,7 +3263,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - sharded_cluster @@ -3337,7 +3283,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - sharded_cluster @@ -3357,7 +3302,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - sharded_cluster @@ -3377,7 +3322,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - sharded_cluster @@ -3397,7 +3342,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - sharded_cluster @@ -3417,7 +3361,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.0" - sharded_cluster @@ -3437,7 +3381,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.0" - sharded_cluster @@ -3457,7 +3401,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.0" - sharded_cluster @@ -3477,7 +3420,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - sharded_cluster @@ -3497,7 +3440,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - sharded_cluster @@ -3517,7 +3460,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - sharded_cluster @@ -3537,7 +3479,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - sharded_cluster @@ -3557,7 +3499,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - sharded_cluster @@ -3577,7 +3519,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - sharded_cluster @@ -3597,7 +3538,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.2" - sharded_cluster @@ -3617,7 +3558,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.2" - sharded_cluster @@ -3637,7 +3578,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.2" - sharded_cluster @@ -3657,7 +3597,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - sharded_cluster @@ -3677,7 +3617,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - sharded_cluster @@ -3697,7 +3637,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - sharded_cluster @@ -3717,7 +3656,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - sharded_cluster @@ -3737,7 +3676,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - sharded_cluster @@ -3757,7 +3696,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - sharded_cluster @@ -3777,7 +3715,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "4.4" - sharded_cluster @@ -3797,7 +3735,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "4.4" - sharded_cluster @@ -3817,7 +3755,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "4.4" - sharded_cluster @@ -3837,7 +3774,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - sharded_cluster @@ -3857,7 +3794,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - sharded_cluster @@ -3877,7 +3814,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - sharded_cluster @@ -3897,7 +3833,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - sharded_cluster @@ -3917,7 +3853,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - sharded_cluster @@ -3937,7 +3873,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - sharded_cluster @@ -3957,7 +3892,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "5.0" - sharded_cluster @@ -3977,7 +3912,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "5.0" - sharded_cluster @@ -3997,7 +3932,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "5.0" - sharded_cluster @@ -4017,7 +3951,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - sharded_cluster @@ -4037,7 +3971,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - sharded_cluster @@ -4057,7 +3991,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - sharded_cluster @@ -4077,7 +4010,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - sharded_cluster @@ -4097,7 +4030,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - sharded_cluster @@ -4117,7 +4050,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - sharded_cluster @@ -4137,7 +4069,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "6.0" - sharded_cluster @@ -4157,7 +4089,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "6.0" - sharded_cluster @@ -4177,7 +4109,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "6.0" - sharded_cluster @@ -4197,7 +4128,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - sharded_cluster @@ -4217,7 +4148,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - sharded_cluster @@ -4237,7 +4168,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - sharded_cluster @@ -4257,7 +4187,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - sharded_cluster @@ -4277,7 +4207,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - sharded_cluster @@ -4297,7 +4227,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - sharded_cluster @@ -4317,7 +4246,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "7.0" - sharded_cluster @@ -4337,7 +4266,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "7.0" - sharded_cluster @@ -4357,7 +4286,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "7.0" - sharded_cluster @@ -4377,7 +4305,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - sharded_cluster @@ -4397,7 +4325,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - sharded_cluster @@ -4417,7 +4345,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - sharded_cluster @@ -4437,7 +4364,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - sharded_cluster @@ -4457,7 +4384,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - sharded_cluster @@ -4477,7 +4404,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - sharded_cluster @@ -4497,7 +4423,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - "8.0" - sharded_cluster @@ -4517,7 +4443,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - "8.0" - sharded_cluster @@ -4537,7 +4463,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - "8.0" - sharded_cluster @@ -4557,7 +4482,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - sharded_cluster @@ -4577,7 +4502,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - sharded_cluster @@ -4597,7 +4522,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - sharded_cluster @@ -4617,7 +4541,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - sharded_cluster @@ -4637,7 +4561,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - sharded_cluster @@ -4657,7 +4581,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - sharded_cluster @@ -4677,7 +4600,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - rapid - sharded_cluster @@ -4697,7 +4620,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - rapid - sharded_cluster @@ -4717,7 +4640,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - rapid - sharded_cluster @@ -4737,7 +4659,7 @@ tasks: AUTH: auth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - sharded_cluster @@ -4757,7 +4679,7 @@ tasks: AUTH: auth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - sharded_cluster @@ -4777,7 +4699,6 @@ tasks: AUTH: auth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - latest - sharded_cluster @@ -4797,7 +4718,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - sharded_cluster @@ -4817,7 +4738,7 @@ tasks: AUTH: noauth SSL: ssl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - sharded_cluster @@ -4837,7 +4758,6 @@ tasks: AUTH: noauth SSL: ssl SYNC: sync_async - TEST_SUITES: "" tags: - latest - sharded_cluster @@ -4857,7 +4777,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync - TEST_SUITES: default + TEST_NAME: default_sync tags: - latest - sharded_cluster @@ -4877,7 +4797,7 @@ tasks: AUTH: noauth SSL: nossl SYNC: async - TEST_SUITES: default_async + TEST_NAME: default_async tags: - latest - sharded_cluster @@ -4897,7 +4817,6 @@ tasks: AUTH: noauth SSL: nossl SYNC: sync_async - TEST_SUITES: "" tags: - latest - sharded_cluster diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index da149a3a1b..88c564909f 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -318,7 +318,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - name: encryption-rhel8-python3.13 @@ -331,7 +331,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - name: encryption-rhel8-pypy3.10 @@ -344,7 +344,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-rhel8-python3.9 @@ -357,7 +357,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] @@ -371,7 +371,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] @@ -385,7 +385,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] @@ -399,8 +399,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" - TEST_ENCRYPTION_PYOPENSSL: "true" + TEST_NAME: encryption + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.9/bin/python3 tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-python3.13 @@ -413,8 +413,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" - TEST_ENCRYPTION_PYOPENSSL: "true" + TEST_NAME: encryption + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-pypy3.10 @@ -427,8 +427,8 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" - TEST_ENCRYPTION_PYOPENSSL: "true" + TEST_NAME: encryption + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - name: encryption-rhel8-python3.10 @@ -438,7 +438,7 @@ buildvariants: run_on: - rhel87-small expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: encryption-crypt_shared-rhel8-python3.11 tasks: @@ -447,7 +447,7 @@ buildvariants: run_on: - rhel87-small expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: encryption-pyopenssl-rhel8-python3.12 @@ -457,8 +457,8 @@ buildvariants: run_on: - rhel87-small expansions: - TEST_ENCRYPTION: "true" - TEST_ENCRYPTION_PYOPENSSL: "true" + TEST_NAME: encryption + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: encryption-macos-python3.9 tasks: @@ -468,7 +468,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - name: encryption-macos-python3.13 @@ -479,7 +479,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - name: encryption-crypt_shared-macos-python3.9 @@ -490,7 +490,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] @@ -502,7 +502,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] @@ -514,7 +514,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - name: encryption-win64-python3.13 @@ -525,7 +525,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] - name: encryption-crypt_shared-win64-python3.9 @@ -536,7 +536,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] @@ -548,7 +548,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - TEST_ENCRYPTION: "true" + TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] @@ -772,7 +772,7 @@ buildvariants: # No c ext tests - name: no-c-ext-rhel8-python3.9 tasks: - - name: .standalone .noauth .nossl .sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: No C Ext RHEL8 Python3.9 run_on: - rhel87-small @@ -781,7 +781,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: no-c-ext-rhel8-python3.10 tasks: - - name: .replica_set .noauth .nossl .sync_async + - name: .replica_set .noauth .nossl !.sync_async display_name: No C Ext RHEL8 Python3.10 run_on: - rhel87-small @@ -790,7 +790,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: no-c-ext-rhel8-python3.11 tasks: - - name: .sharded_cluster .noauth .nossl .sync_async + - name: .sharded_cluster .noauth .nossl !.sync_async display_name: No C Ext RHEL8 Python3.11 run_on: - rhel87-small @@ -799,7 +799,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: no-c-ext-rhel8-python3.12 tasks: - - name: .standalone .noauth .nossl .sync_async + - name: .standalone .noauth .nossl !.sync_async display_name: No C Ext RHEL8 Python3.12 run_on: - rhel87-small @@ -808,7 +808,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: no-c-ext-rhel8-python3.13 tasks: - - name: .replica_set .noauth .nossl .sync_async + - name: .replica_set .noauth .nossl !.sync_async display_name: No C Ext RHEL8 Python3.13 run_on: - rhel87-small @@ -997,7 +997,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - TEST_PYOPENSSL: "true" + TEST_NAME: pyopenssl PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-python3.10 tasks: @@ -1008,7 +1008,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_PYOPENSSL: "true" + TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-python3.11 tasks: @@ -1019,7 +1019,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_PYOPENSSL: "true" + TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-python3.12 tasks: @@ -1030,7 +1030,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_PYOPENSSL: "true" + TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-python3.13 tasks: @@ -1041,7 +1041,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - TEST_PYOPENSSL: "true" + TEST_NAME: pyopenssl PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.10 tasks: @@ -1052,7 +1052,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_PYOPENSSL: "true" + TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Search index tests @@ -1288,7 +1288,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_SERVERLESS: "true" + TEST_NAME: serverless AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 @@ -1300,7 +1300,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_SERVERLESS: "true" + TEST_NAME: serverless AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.13/bin/python3 diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh index eea84d42a9..0804c34d66 100755 --- a/.evergreen/run-azurekms-fail-test.sh +++ b/.evergreen/run-azurekms-fail-test.sh @@ -2,7 +2,7 @@ set -o errexit # Exit the script with error if any of the commands fail HERE=$(dirname ${BASH_SOURCE:-$0}) . $DRIVERS_TOOLS/.evergreen/csfle/azurekms/setup-secrets.sh -SUCCESS=false TEST_FLE_AZURE_AUTO=1 bash $HERE/scripts/setup-tests.sh +bash $HERE/just.sh setup-test kms azure-fail KEY_NAME="${AZUREKMS_KEYNAME}" \ KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ $HERE/just.sh test-eg diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh index 8e6b050cb6..b40b07b019 100755 --- a/.evergreen/run-azurekms-test.sh +++ b/.evergreen/run-azurekms-test.sh @@ -19,7 +19,7 @@ AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -AZUREKMS_CMD="SUCCESS=true TEST_FLE_AZURE_AUTO=1 bash .evergreen/just.sh setup-test" \ +AZUREKMS_CMD="bash .evergreen/just.sh setup-test kms azure" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" bash ./.evergreen/just.sh test-eg" \ $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh index a430f4e4f9..ee2e8d8a57 100755 --- a/.evergreen/run-gcpkms-test.sh +++ b/.evergreen/run-gcpkms-test.sh @@ -18,7 +18,7 @@ echo "Untarring file ... begin" GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Untarring file ... end" echo "Running test ... begin" -GCPKMS_CMD="SUCCESS=true TEST_FLE_GCP_AUTO=1 bash ./.evergreen/just.sh setup-test" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh +GCPKMS_CMD="bash ./.evergreen/just.sh setup-test kms gcp" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh GCPKMS_CMD="./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh echo "Running test ... end" bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index f141792420..fc3a092bd4 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -26,11 +26,9 @@ apt-get -qq update < /dev/null > /dev/null apt-get -qq install $PYTHON_VER $PYTHON_VER-venv build-essential $PYTHON_VER-dev -y < /dev/null > /dev/null export PYTHON_BINARY=$PYTHON_VER -export TEST_AUTH_AWS=1 -export AUTH="auth" export SET_XTRACE_ON=1 cd src rm -rf .venv rm -f .evergreen/scripts/test-env.sh || true -bash ./.evergreen/just.sh setup-test +bash ./.evergreen/just.sh setup-test auth_aws ecs bash .evergreen/just.sh test-eg diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 552f9ef08e..c789d6d147 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -29,5 +29,5 @@ else exit 1 fi -TEST_AUTH_OIDC=1 COVERAGE=1 AUTH="auth" bash ./.evergreen/just.sh setup-test +COVERAGE=1 bash ./.evergreen/just.sh setup-test auth_oidc bash ./.evergreen/just.sh test-eg "${@:1}" diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index d0e001c5fc..85a82c2a5a 100755 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -14,7 +14,6 @@ export TEST_PATH="${PROJECT_DIRECTORY}/specifications/source/benchmarking/data" export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 -export PERF_TEST=1 -bash ./.evergreen/just.sh setup-test +bash ./.evergreen/just.sh setup-test perf bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 12935b25a0..f3c71b41ff 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,8 +1,11 @@ #!/bin/bash -set -eu +set -eux SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) -ROOT_DIR="$(dirname "$(dirname $SCRIPT_DIR)")" +SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" +ROOT_DIR="$(dirname $SCRIPT_DIR)" + +pushd $ROOT_DIR export PIP_QUIET=1 # Quiet by default export PIP_PREFER_BINARY=1 # Prefer binary dists by default @@ -16,7 +19,7 @@ else echo "Not sourcing env inputs" fi -# Ensure there are test inputs. +# Handle test inputs. if [ -f $SCRIPT_DIR/scripts/test-env.sh ]; then echo "Sourcing test inputs" . $SCRIPT_DIR/scripts/test-env.sh @@ -24,9 +27,10 @@ else echo "Missing test inputs, please run 'just setup-test'" fi + # Source the local secrets export file if available. -if [ -f "$ROOT_DIR/secrets-export.sh" ]; then - . "$ROOT_DIR/secrets-export.sh" +if [ -f "./secrets-export.sh" ]; then + . "./secrets-export.sh" fi PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") @@ -54,7 +58,7 @@ uv run python -c 'import sys; print(sys.version)' PIP_QUIET=0 uv run ${UV_ARGS} --with pip pip list # Record the start time for a perf test. -if [ -n "${PERF_TEST:-}" ]; then +if [ -n "${TEST_PERF:-}" ]; then start_time=$(date +%s) fi @@ -75,7 +79,7 @@ fi echo "Running tests with $TEST_ARGS... done." # Handle perf test post actions. -if [ -n "${PERF_TEST:-}" ]; then +if [ -n "${TEST_PERF:-}" ]; then end_time=$(date +%s) elapsed_secs=$((end_time-start_time)) @@ -90,3 +94,5 @@ fi if [ -n "${COVERAGE:-}" ]; then rm -rf .pytest_cache fi + +popd diff --git a/.evergreen/scripts/bootstrap-mongo-orchestration.sh b/.evergreen/scripts/bootstrap-mongo-orchestration.sh index 1d2b145de8..5c6387d4b1 100755 --- a/.evergreen/scripts/bootstrap-mongo-orchestration.sh +++ b/.evergreen/scripts/bootstrap-mongo-orchestration.sh @@ -1,6 +1,7 @@ #!/bin/bash -set -o xtrace +set -eu + # Enable core dumps if enabled on the machine # Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml @@ -29,7 +30,7 @@ if [ "$(uname -s)" = "Darwin" ]; then fi fi -if [ -n "${skip_crypt_shared}" ]; then +if [ -z "${TEST_CRYPT_SHARED:-}" ]; then export SKIP_CRYPT_SHARED=1 fi diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 16212ad3b1..5515413562 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -55,24 +55,11 @@ export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" cat < "$SCRIPT_DIR"/env.sh export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" export CURRENT_VERSION="$CURRENT_VERSION" -export SKIP_LEGACY_SHELL=1 export DRIVERS_TOOLS="$DRIVERS_TOOLS" export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS_BINARIES" export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" -export SETDEFAULTENCODING="${SETDEFAULTENCODING:-}" -export SKIP_CSOT_TESTS="${SKIP_CSOT_TESTS:-}" -export MONGODB_STARTED="${MONGODB_STARTED:-}" -export DISABLE_TEST_COMMANDS="${DISABLE_TEST_COMMANDS:-}" -export GREEN_FRAMEWORK="${GREEN_FRAMEWORK:-}" -export NO_EXT="${NO_EXT:-}" -export COVERAGE="${COVERAGE:-}" -export COMPRESSORS="${COMPRESSORS:-}" -export MONGODB_API_VERSION="${MONGODB_API_VERSION:-}" -export skip_crypt_shared="${skip_crypt_shared:-}" -export STORAGE_ENGINE="${STORAGE_ENGINE:-}" -export REQUIRE_API_VERSION="${REQUIRE_API_VERSION:-}" export skip_web_identity_auth_test="${skip_web_identity_auth_test:-}" export skip_ECS_auth_test="${skip_ECS_auth_test:-}" @@ -96,14 +83,8 @@ SKIP_LEGACY_SHELL=1 DRIVERS_TOOLS="$DRIVERS_TOOLS" MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" MONGODB_BINARIES="$MONGODB_BINARIES" -TMPDIR="$MONGO_ORCHESTRATION_HOME/db" EOT -# Skip CSOT tests on non-linux platforms. -if [ "$(uname -s)" != "Linux" ]; then - echo "export SKIP_CSOT_TESTS=1" >> $SCRIPT_DIR/env.sh -fi - # Add these expansions to make it easier to call out tests scripts from the EVG yaml cat < expansion.yml DRIVERS_TOOLS: "$DRIVERS_TOOLS" diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 7236ee6793..d1c3b92262 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -344,11 +344,11 @@ def create_encryption_variants() -> list[BuildVariant]: batchtime = BATCHTIME_WEEK def get_encryption_expansions(encryption): - expansions = dict(TEST_ENCRYPTION="true") + expansions = dict(TEST_NAME="encryption") if "crypt_shared" in encryption: expansions["TEST_CRYPT_SHARED"] = "true" if "PyOpenSSL" in encryption: - expansions["TEST_ENCRYPTION_PYOPENSSL"] = "true" + expansions["SUB_TEST_NAME"] = "pyopenssl" return expansions host = DEFAULT_HOST @@ -487,7 +487,7 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" batchtime = BATCHTIME_WEEK - expansions = dict(TEST_PYOPENSSL="true") + expansions = dict(TEST_NAME="pyopenssl") variants = [] for python in ALL_PYTHONS: @@ -588,7 +588,7 @@ def create_no_c_ext_variants(): variants = [] host = DEFAULT_HOST for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): - tasks = [f".{topology} .noauth .nossl .sync_async"] + tasks = [f".{topology} .noauth .nossl !.sync_async"] expansions = dict() handle_c_ext(C_EXTS[0], expansions) display_name = get_display_name("No C Ext", host, python=python) @@ -645,7 +645,7 @@ def create_disable_test_commands_variants(): def create_serverless_variants(): host = DEFAULT_HOST batchtime = BATCHTIME_WEEK - expansions = dict(TEST_SERVERLESS="true", AUTH="auth", SSL="ssl") + expansions = dict(TEST_NAME="serverless", AUTH="auth", SSL="ssl") tasks = ["serverless_task_group"] base_name = "Serverless" return [ @@ -811,17 +811,11 @@ def create_server_tasks(): SSL=ssl, ) bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) - test_suites = "" + test_vars = dict(AUTH=auth, SSL=ssl, SYNC=sync) if sync == "sync": - test_suites = "default" + test_vars["TEST_NAME"] = "default_sync" elif sync == "async": - test_suites = "default_async" - test_vars = dict( - AUTH=auth, - SSL=ssl, - SYNC=sync, - TEST_SUITES=test_suites, - ) + test_vars["TEST_NAME"] = "default_async" test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) return tasks @@ -834,9 +828,10 @@ def create_load_balancer_tasks(): tags = ["load-balancer", auth, ssl] bootstrap_vars = dict(TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, LOAD_BALANCER="true") bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) - test_vars = dict(AUTH=auth, SSL=ssl, TEST_LOADBALANCER="true") + test_vars = dict(AUTH=auth, SSL=ssl, TEST_NAME="load_balancer") test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) + return tasks diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh index e5684b7cb4..5f9d447c3d 100755 --- a/.evergreen/scripts/run-atlas-tests.sh +++ b/.evergreen/scripts/run-atlas-tests.sh @@ -4,5 +4,5 @@ set +x set -o errexit bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect -TEST_ATLAS=1 bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test atlas bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh index 6c300325d2..21a7fef301 100755 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -5,5 +5,5 @@ set -eu set +x # Use the default python to bootstrap secrets. bash "${DRIVERS_TOOLS}"/.evergreen/secrets_handling/setup-secrets.sh drivers/enterprise_auth -TEST_ENTERPRISE_AUTH=1 AUTH=auth bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test enterprise_auth bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh index 8675c7c242..746ea4103a 100755 --- a/.evergreen/scripts/run-gcpkms-fail-test.sh +++ b/.evergreen/scripts/run-gcpkms-fail-test.sh @@ -2,5 +2,5 @@ set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) . $HERE/env.sh -SUCCESS=false TEST_FLE_GCP_AUTO=1 bash $HERE/setup-tests.sh +./.evergreen/just.sh setup-test kms gcp-fail bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-mockupdb-tests.sh b/.evergreen/scripts/run-mockupdb-tests.sh deleted file mode 100755 index 32594f05d3..0000000000 --- a/.evergreen/scripts/run-mockupdb-tests.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -o xtrace -export PYTHON_BINARY=${PYTHON_BINARY} -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-mockupdb diff --git a/.evergreen/scripts/run-mongodb-aws-test.sh b/.evergreen/scripts/run-mongodb-aws-test.sh index 255f84f295..917482eaa2 100755 --- a/.evergreen/scripts/run-mongodb-aws-test.sh +++ b/.evergreen/scripts/run-mongodb-aws-test.sh @@ -24,5 +24,5 @@ echo "Running MONGODB-AWS authentication tests for $1" # Handle credentials and environment setup. . "$DRIVERS_TOOLS"/.evergreen/auth_aws/aws_setup.sh "$1" -TEST_AUTH_AWS=1 AUTH="auth" bash ./.evergreen/just.sh setup-test +bash ./.evergreen/just.sh setup-test auth_aws $1 bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-ocsp-test.sh b/.evergreen/scripts/run-ocsp-test.sh index 2b9cbd476d..9c48867041 100755 --- a/.evergreen/scripts/run-ocsp-test.sh +++ b/.evergreen/scripts/run-ocsp-test.sh @@ -1,9 +1,12 @@ #!/bin/bash +set -eu -TEST_OCSP=1 \ -PYTHON_BINARY="${PYTHON_BINARY}" \ +pushd "${PROJECT_DIRECTORY}/.evergreen" +bash scripts/setup-dev-env.sh CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ -OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg + OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ + bash scripts/setup-tests.sh ocsp +bash run-tests.sh bash "${DRIVERS_TOOLS}"/.evergreen/ocsp/teardown.sh + +popd diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index b26dc3ae0e..04a377a2d2 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -3,7 +3,9 @@ set -eux HERE=$(dirname ${BASH_SOURCE:-$0}) -pushd "$(dirname "$(dirname $HERE)")" > /dev/null +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" +ROOT=$(dirname "$(dirname $HERE)") +pushd $ROOT > /dev/null # Source the env files to pick up common variables. if [ -f $HERE/env.sh ]; then @@ -26,7 +28,7 @@ fi # Ensure there is a python venv. if [ ! -d $BIN_DIR ]; then - . .evergreen/utils.sh + . $ROOT/.evergreen/utils.sh if [ -z "${PYTHON_BINARY:-}" ]; then PYTHON_BINARY=$(find_python3) @@ -49,3 +51,5 @@ echo "Setting up python environment... done." if [ -d .git ] && [ ! -f .git/hooks/pre-commit ]; then uv run --frozen pre-commit install fi + +popd > /dev/null diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh index 330469711c..8e073dcec9 100755 --- a/.evergreen/scripts/setup-tests.sh +++ b/.evergreen/scripts/setup-tests.sh @@ -8,54 +8,15 @@ set -eu # COVERAGE If non-empty, run the test suite with coverage. # COMPRESSORS If non-empty, install appropriate compressor. # LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# TEST_DATA_LAKE If non-empty, run data lake tests. -# TEST_ENCRYPTION If non-empty, run encryption tests. # TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. -# TEST_SERVERLESS If non-empy, test on serverless. -# TEST_LOADBALANCER If non-empy, test load balancing. -# TEST_FLE_AZURE_AUTO If non-empy, test auto FLE on Azure -# TEST_FLE_GCP_AUTO If non-empy, test auto FLE on GCP -# TEST_PYOPENSSL If non-empy, test with PyOpenSSL -# TEST_ENTERPRISE_AUTH If non-empty, test with Enterprise Auth -# TEST_AUTH_AWS If non-empty, test AWS Auth Mechanism -# TEST_AUTH_OIDC If non-empty, test OIDC Auth Mechanism -# TEST_PERF If non-empty, run performance tests -# TEST_OCSP If non-empty, run OCSP tests -# TEST_ATLAS If non-empty, test Atlas connections -# TEST_INDEX_MANAGEMENT If non-empty, run index management tests -# TEST_ENCRYPTION_PYOPENSSL If non-empy, test encryption with PyOpenSSL -# PERF_TEST If non-empty, run the performance tests. +# MONGODB_API_VERSION The mongodb api version to use in tests. # MONGODB_URI If non-empty, use as the MONGODB_URI in tests. -# PYTHON_BINARY The python binary to use in tests. SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) -ROOT_DIR="$(dirname "$(dirname $SCRIPT_DIR)")" # Try to source the env file. if [ -f $SCRIPT_DIR/env.sh ]; then source $SCRIPT_DIR/env.sh fi -# Source serverless secrets if applicable. -if [ -n "${TEST_SERVERLESS:-}" ]; then - source $DRIVERS_TOOLS/.evergreen/serverless/secrets-export.sh -fi - -# Source atlas secrets if applicable. -if [ -n "${TEST_INDEX_MANAGEMENT:-}" ]; then - source $DRIVERS_TOOLS/.evergreen/atlas/secrets-export.sh -fi - -# Source ADL secrets if applicable. -if [ -n "${TEST_DATA_LAKE:-}" ]; then - source ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh -fi - -# Source local secrets if applicable. -if [ -f "$ROOT_DIR/secrets-export.sh" ]; then - source "$ROOT_DIR/secrets-export.sh" -fi - -. $ROOT_DIR/.evergreen/utils.sh -PYTHON=${PYTHON_BINARY:-$(find_python3)} -$PYTHON $SCRIPT_DIR/setup_tests.py +uv run $SCRIPT_DIR/setup_tests.py "$@" diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 07693d1e99..96c138b4ae 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -1,5 +1,6 @@ from __future__ import annotations +import argparse import base64 import dataclasses import io @@ -20,64 +21,52 @@ ROOT = HERE.parent.parent ENV_FILE = HERE / "test-env.sh" DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") -PLATFORM = "windows" if os.name == "nt" else sys.platform +PLATFORM = "windows" if os.name == "nt" else sys.platform.lower() -logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) - -EXPECTED_VARS = [ - "TEST_ENCRYPTION", - "TEST_ENCRYPTION_PYOPENSSL", - "TEST_CRYPT_SHARED", - "TEST_PYOPENSSL", - "TEST_LOAD_BALANCER", - "TEST_SERVERLESS", - "TEST_INDEX_MANAGEMENT", - "TEST_ENTERPRISE_AUTH", - "TEST_FLE_AZURE_AUTO", - "TEST_FLE_GCP_AUTO", - "TEST_LOADBALANCER", - "TEST_DATA_LAKE", - "TEST_ATLAS", - "TEST_OCSP", - "TEST_AUTH_AWS", - "TEST_AUTH_OIDC", - "COMPRESSORS", - "MONGODB_URI", - "PERF_TEST", - "GREEN_FRAMEWORK", - "PYTHON_BINARY", - "LIBMONGOCRYPT_URL", -] - -# Handle the test suite based on the presence of env variables. -TEST_SUITE_MAP = dict( - TEST_DATA_LAKE="data_lake", - TEST_AUTH_OIDC="auth_oidc", - TEST_INDEX_MANAGEMENT="index_management", - TEST_ENTERPRISE_AUTH="auth", - TEST_LOADBALANCER="load_balancer", - TEST_ENCRYPTION="encryption", - TEST_FLE_AZURE_AUTO="csfle", - TEST_FLE_GCP_AUTO="csfle", - TEST_ATLAS="atlas", - TEST_OCSP="ocsp", - TEST_AUTH_AWS="auth_aws", - PERF_TEST="perf", -) - -# Handle extras based on the presence of env variables. -EXTRAS_MAP = dict( - TEST_AUTH_OIDC="aws", - TEST_AUTH_AWS="aws", - TEST_OCSP="ocsp", - TEST_PYOPENSSL="ocsp", - TEST_ENTERPRISE_AUTH="gssapi", - TEST_ENCRYPTION="encryption", - TEST_FLE_AZURE_AUTO="encryption", - TEST_FLE_GCP_AUTO="encryption", - TEST_ENCRYPTION_PYOPENSSL="ocsp", -) +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") + +# Passthrough environment variables. +PASS_THROUGH_ENV = ["GREEN_FRAMEWORK", "NO_EXT", "MONGODB_API_VERSION"] + +# Map the test name to a test suite. +TEST_SUITE_MAP = { + "atlas": "atlas", + "auth_aws": "auth_aws", + "auth_oidc": "auth_oidc", + "data_lake": "data_lake", + "default": "", + "default_async": "default_async", + "default_sync": "default", + "encryption": "encryption", + "enterprise_auth": "auth", + "index_management": "index_management", + "kms": "csfle", + "load_balancer": "load_balancer", + "mockupdb": "mockupdb", + "pyopenssl": "", + "ocsp": "ocsp", + "perf": "perf", + "serverless": "", +} + +# Tests that require a sub test suite. +SUB_TEST_REQUIRED = ["auth_aws", "kms"] + +# Map the test name to test extra. +EXTRAS_MAP = { + "auth_aws": "aws", + "auth_oidc": "aws", + "encryption": "encryption", + "enterprise_auth": "gssapi", + "kms": "encryption", + "ocsp": "ocsp", + "pyopenssl": "ocsp", +} + + +# Map the test name to test group. +GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") @dataclasses.dataclass @@ -87,7 +76,7 @@ class Distro: arch: str -def write_env(name: str, value: Any) -> None: +def write_env(name: str, value: Any = "1") -> None: with ENV_FILE.open("a", newline="\n") as fid: # Remove any existing quote chars. value = str(value).replace('"', "") @@ -105,6 +94,43 @@ def run_command(cmd: str) -> None: LOGGER.info("Running command %s... done.", cmd) +def read_env(path: Path | str) -> dict[str, Any]: + config = dict() + with Path(path).open() as fid: + for line in fid.readlines(): + if "=" not in line: + continue + name, _, value = line.strip().partition("=") + if value.startswith(('"', "'")): + value = value[1:-1] + name = name.replace("export ", "") + config[name] = value + return config + + +def get_options(): + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("test_name", choices=sorted(TEST_SUITE_MAP), nargs="?", default="default") + parser.add_argument("sub_test_name", nargs="?") + parser.add_argument( + "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level" + ) + parser.add_argument( + "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level" + ) + parser.add_argument("--auth", action="store_true", help="Whether to add authentication") + parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration") + # Get the options. + opts = parser.parse_args() + if opts.verbose: + LOGGER.setLevel(logging.DEBUG) + elif opts.quiet: + LOGGER.setLevel(logging.WARNING) + return opts + + def get_distro() -> Distro: name = "" version_id = "" @@ -169,55 +195,80 @@ def setup_libmongocrypt(): def handle_test_env() -> None: + opts = get_options() + test_name = opts.test_name + sub_test_name = opts.sub_test_name + if test_name in SUB_TEST_REQUIRED and not sub_test_name: + raise ValueError(f"Test '{test_name}' requires a sub_test_name") AUTH = os.environ.get("AUTH", "noauth") + if opts.auth or "auth" in test_name: + AUTH = "auth" + # 'auth_aws ecs' shouldn't have extra auth set. + if test_name == "auth_aws" and sub_test_name == "ecs": + AUTH = "noauth" SSL = os.environ.get("SSL", "nossl") - TEST_SUITES = os.environ.get("TEST_SUITES", "") + if opts.ssl: + SSL = "ssl" TEST_ARGS = "" + # Start compiling the args we'll pass to uv. # Run in an isolated environment so as not to pollute the base venv. UV_ARGS = ["--isolated --extra test"] - # Save variables in EXPECTED_VARS that have values. + test_title = test_name + if sub_test_name: + test_title += f" {sub_test_name}" + LOGGER.info(f"Setting up '{test_title}' with {AUTH=} and {SSL=}...") + + # Create the test env file with the initial set of values. with ENV_FILE.open("w", newline="\n") as fid: fid.write("#!/usr/bin/env bash\n") fid.write("set +x\n") - fid.write(f"export AUTH={AUTH}\n") - fid.write(f"export SSL={SSL}\n") - for var in EXPECTED_VARS: - value = os.environ.get(var, "") - # Remove any existing quote chars. - value = value.replace('"', "") - if value: - fid.write(f'export {var}="{value}"\n') ENV_FILE.chmod(ENV_FILE.stat().st_mode | stat.S_IEXEC) - for env_var, extra in EXTRAS_MAP.items(): - if env_var in os.environ: - UV_ARGS.append(f"--extra {extra}") + write_env("AUTH", AUTH) + write_env("SSL", SSL) + + # Skip CSOT tests on non-linux platforms. + if PLATFORM != "linux": + write_env("SKIP_CSOT_TESTS") - for env_var, suite in TEST_SUITE_MAP.items(): - if TEST_SUITES: - break - if env_var in os.environ: - TEST_SUITES = suite + # Set an environment variable for the test name and sub test name. + write_env(f"TEST_{test_name.upper()}") + write_env("SUB_TEST_NAME", sub_test_name) + + # Handle pass through env vars. + for var in PASS_THROUGH_ENV: + if is_set(var): + write_env(var, os.environ[var]) + + if extra := EXTRAS_MAP.get(test_name, ""): + UV_ARGS.append(f"--extra {extra}") + + if group := GROUP_MAP.get(test_name, ""): + UV_ARGS.append(f"--group {group}") if AUTH != "noauth": - if is_set("TEST_DATA_LAKE"): - DB_USER = os.environ["ADL_USERNAME"] - DB_PASSWORD = os.environ["ADL_PASSWORD"] - elif is_set("TEST_SERVERLESS"): - DB_USER = os.environ("SERVERLESS_ATLAS_USER") - DB_PASSWORD = os.environ("SERVERLESS_ATLAS_PASSWORD") - write_env("MONGODB_URI", os.environ("SERVERLESS_URI")) - write_env("SINGLE_MONGOS_LB_URI", os.environ("SERVERLESS_URI")) - write_env("MULTI_MONGOS_LB_URI", os.environ("SERVERLESS_URI")) - elif is_set("TEST_AUTH_OIDC"): + if test_name == "data_lake": + config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh") + DB_USER = config["ADL_USERNAME"] + DB_PASSWORD = config["ADL_PASSWORD"] + elif test_name == "serverless": + config = read_env(f"{DRIVERS_TOOLS}/.evergreen/serverless/secrets-export.sh") + DB_USER = config["SERVERLESS_ATLAS_USER"] + DB_PASSWORD = config["SERVERLESS_ATLAS_PASSWORD"] + write_env("MONGODB_URI", config["SERVERLESS_URI"]) + write_env("SINGLE_MONGOS_LB_URI", config["SERVERLESS_URI"]) + write_env("MULTI_MONGOS_LB_URI", config["SERVERLESS_URI"]) + elif test_name == "auth_oidc": DB_USER = os.environ["OIDC_ADMIN_USER"] DB_PASSWORD = os.environ["OIDC_ADMIN_PWD"] write_env("DB_IP", os.environ["MONGODB_URI"]) - elif is_set("TEST_INDEX_MANAGEMENT"): - DB_USER = os.environ["DRIVERS_ATLAS_LAMBDA_USER"] - DB_PASSWORD = os.environ["DRIVERS_ATLAS_LAMBDA_PASSWORD"] + elif test_name == "index_management": + config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas/secrets-export.sh") + DB_USER = config["DRIVERS_ATLAS_LAMBDA_USER"] + DB_PASSWORD = config["DRIVERS_ATLAS_LAMBDA_PASSWORD"] + write_env("MONGODB_URI", config["MONGODB_URI"]) else: DB_USER = "bob" DB_PASSWORD = "pwd123" # noqa: S105 @@ -225,16 +276,17 @@ def handle_test_env() -> None: write_env("DB_PASSWORD", DB_PASSWORD) LOGGER.info("Added auth, DB_USER: %s", DB_USER) - if is_set("MONGODB_STARTED"): + if is_set("MONGODB_URI"): write_env("PYMONGO_MUST_CONNECT", "true") if is_set("DISABLE_TEST_COMMANDS"): write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") - if is_set("TEST_ENTERPRISE_AUTH"): + if test_name == "enterprise_auth": + config = read_env(f"{ROOT}/secrets-export.sh") if PLATFORM == "windows": LOGGER.info("Setting GSSAPI_PASS") - write_env("GSSAPI_PASS", os.environ["SASL_PASS"]) + write_env("GSSAPI_PASS", config["SASL_PASS"]) write_env("GSSAPI_CANONICALIZE", "true") else: # BUILD-3830 @@ -242,23 +294,22 @@ def handle_test_env() -> None: krb_conf.touch() write_env("KRB5_CONFIG", krb_conf) LOGGER.info("Writing keytab") - keytab = base64.b64decode(os.environ["KEYTAB_BASE64"]) + keytab = base64.b64decode(config["KEYTAB_BASE64"]) keytab_file = ROOT / ".evergreen/drivers.keytab" with keytab_file.open("wb") as fid: fid.write(keytab) - principal = os.environ["PRINCIPAL"] + principal = config["PRINCIPAL"] LOGGER.info("Running kinit") os.environ["KRB5_CONFIG"] = str(krb_conf) cmd = f"kinit -k -t {keytab_file} -p {principal}" run_command(cmd) LOGGER.info("Setting GSSAPI variables") - write_env("GSSAPI_HOST", os.environ["SASL_HOST"]) - write_env("GSSAPI_PORT", os.environ["SASL_PORT"]) - write_env("GSSAPI_PRINCIPAL", os.environ["PRINCIPAL"]) + write_env("GSSAPI_HOST", config["SASL_HOST"]) + write_env("GSSAPI_PORT", config["SASL_PORT"]) + write_env("GSSAPI_PRINCIPAL", config["PRINCIPAL"]) - if is_set("TEST_LOADBALANCER"): - write_env("LOAD_BALANCER", "1") + if test_name == "load_balancer": SINGLE_MONGOS_LB_URI = os.environ.get( "SINGLE_MONGOS_LB_URI", "mongodb://127.0.0.1:8000/?loadBalanced=true" ) @@ -285,9 +336,9 @@ def handle_test_env() -> None: if compressors == "snappy": UV_ARGS.append("--extra snappy") elif compressors == "zstd": - UV_ARGS.append("--extra zstandard") + UV_ARGS.append("--extra zstd") - if is_set("TEST_ENCRYPTION") or is_set("TEST_FLE_AZURE_AUTO") or is_set("TEST_FLE_GCP_AUTO"): + if test_name in ["encryption", "kms"]: # Check for libmongocrypt download. if not (ROOT / "libmongocrypt").exists(): setup_libmongocrypt() @@ -311,14 +362,18 @@ def handle_test_env() -> None: write_env("PYMONGOCRYPT_LIB", PYMONGOCRYPT_LIB.as_posix()) # PATH is updated by configure-env.sh for access to mongocryptd. - if is_set("TEST_ENCRYPTION"): + if test_name == "encryption": if not DRIVERS_TOOLS: raise RuntimeError("Missing DRIVERS_TOOLS") run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/setup-secrets.sh") run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/start-servers.sh") + if sub_test_name == "pyopenssl": + UV_ARGS.append("--extra ocsp") + if is_set("TEST_CRYPT_SHARED"): - CRYPT_SHARED_DIR = Path(os.environ["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() + config = read_env(f"{DRIVERS_TOOLS}/mo-expansion.sh") + CRYPT_SHARED_DIR = Path(config["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) if PLATFORM == "windows": write_env("PATH", f"{CRYPT_SHARED_DIR}:$PATH") @@ -329,21 +384,25 @@ def handle_test_env() -> None: ) write_env("LD_LIBRARY_PATH", f"{CRYPT_SHARED_DIR}:${{LD_LIBRARY_PATH:-}}") - if is_set("TEST_FLE_AZURE_AUTO") or is_set("TEST_FLE_GCP_AUTO"): - if "SUCCESS" not in os.environ: - raise RuntimeError("Must define SUCCESS") + if test_name == "kms": + if sub_test_name.startswith("azure"): + write_env("TEST_FLE_AZURE_AUTO") + else: + write_env("TEST_FLE_GCP_AUTO") - write_env("SUCCESS", os.environ["SUCCESS"]) + write_env("SUCCESS", "fail" not in sub_test_name) MONGODB_URI = os.environ.get("MONGODB_URI", "") if "@" in MONGODB_URI: raise RuntimeError("MONGODB_URI unexpectedly contains user credentials in FLE test!") - if is_set("TEST_OCSP"): + if test_name == "ocsp": write_env("CA_FILE", os.environ["CA_FILE"]) write_env("OCSP_TLS_SHOULD_SUCCEED", os.environ["OCSP_TLS_SHOULD_SUCCEED"]) - if is_set("PERF_TEST"): - UV_ARGS.append("--group perf") + if test_name == "auth_aws": + write_env("MONGODB_URI", os.environ["MONGODB_URI"]) + + if test_name == "perf": # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively # affects the benchmark results. TEST_ARGS = f"test/performance/perf_test.py {TEST_ARGS}" @@ -355,6 +414,7 @@ def handle_test_env() -> None: # coverage >=5 is needed for relative_files=true. UV_ARGS.append("--group coverage") TEST_ARGS = f"{TEST_ARGS} --cov" + write_env("COVERAGE") if is_set("GREEN_FRAMEWORK"): framework = os.environ["GREEN_FRAMEWORK"] @@ -364,12 +424,15 @@ def handle_test_env() -> None: # Use --capture=tee-sys so pytest prints test output inline: # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html TEST_ARGS = f"-v --capture=tee-sys --durations=5 {TEST_ARGS}" - if TEST_SUITES: - TEST_ARGS = f"-m {TEST_SUITES} {TEST_ARGS}" + TEST_SUITE = TEST_SUITE_MAP[test_name] + if TEST_SUITE: + TEST_ARGS = f"-m {TEST_SUITE} {TEST_ARGS}" write_env("TEST_ARGS", TEST_ARGS) write_env("UV_ARGS", " ".join(UV_ARGS)) + LOGGER.info(f"Setting up test '{test_title}' with {AUTH=} and {SSL=}... done.") + if __name__ == "__main__": handle_test_env() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ce5bebf181..0b8b77fc6e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -212,7 +212,7 @@ the pages will re-render and the browser will automatically refresh. - Start the servers using `LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh`. - Set up the test using: - `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' TEST_LOADBALANCER=1 just setup-test`. + `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' just setup-test load-balancer`. - Run the tests from the `pymongo` checkout directory using: `just test-eg`. @@ -220,7 +220,7 @@ the pages will re-render and the browser will automatically refresh. - Clone `drivers-evergreen-tools`: `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. - Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools` -- Run `TEST_ENCRYPTION=1 AWS_PROFILE= just setup-test` after setting up your AWS profile with `aws configure sso`. +- Run `AWS_PROFILE= just setup-test encryption` after setting up your AWS profile with `aws configure sso`. - Run the tests with `just test-eg`. - When done, run `just teardown-test` to clean up. diff --git a/justfile b/justfile index 3840484bc5..5de578ecc7 100644 --- a/justfile +++ b/justfile @@ -60,17 +60,13 @@ lint-manual: test *args="-v --durations=5 --maxfail=10": {{uv_run}} --extra test pytest {{args}} -[group('test')] -test-mockupdb *args: - {{uv_run}} -v --extra test --group mockupdb pytest -m mockupdb {{args}} - [group('test')] test-eg *args: bash ./.evergreen/run-tests.sh {{args}} [group('test')] -setup-test: - bash .evergreen/scripts/setup-tests.sh +setup-test *args="": + bash .evergreen/scripts/setup-tests.sh {{args}} [group('test')] teardown-test: diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py index 6a195eb6b8..afca1c0b26 100644 --- a/test/asynchronous/test_client_context.py +++ b/test/asynchronous/test_client_context.py @@ -47,20 +47,14 @@ def test_serverless(self): ) def test_enableTestCommands_is_disabled(self): - if not os.environ.get("PYMONGO_DISABLE_TEST_COMMANDS"): - raise SkipTest("PYMONGO_DISABLE_TEST_COMMANDS is not set") + if not os.environ.get("DISABLE_TEST_COMMANDS"): + raise SkipTest("DISABLE_TEST_COMMANDS is not set") self.assertFalse( async_client_context.test_commands_enabled, - "enableTestCommands must be disabled when PYMONGO_DISABLE_TEST_COMMANDS is set.", + "enableTestCommands must be disabled when DISABLE_TEST_COMMANDS is set.", ) - def test_setdefaultencoding_worked(self): - if not os.environ.get("SETDEFAULTENCODING"): - raise SkipTest("SETDEFAULTENCODING is not set") - - self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) - def test_free_threading_is_enabled(self): if "free-threading build" not in sys.version: raise SkipTest("this test requires the Python free-threading build") diff --git a/test/asynchronous/test_data_lake.py b/test/asynchronous/test_data_lake.py index 0b259fb0d0..e67782ad3f 100644 --- a/test/asynchronous/test_data_lake.py +++ b/test/asynchronous/test_data_lake.py @@ -38,10 +38,6 @@ class TestDataLakeMustConnect(AsyncUnitTest): async def test_connected_to_data_lake(self): - data_lake = os.environ.get("TEST_DATA_LAKE") - if not data_lake: - self.skipTest("TEST_DATA_LAKE is not set") - self.assertTrue( async_client_context.is_data_lake and async_client_context.connected, "client context must be connected to data lake when DATA_LAKE is set. Failed attempts:\n{}".format( diff --git a/test/asynchronous/test_index_management.py b/test/asynchronous/test_index_management.py index 2920c48b2f..c155047089 100644 --- a/test/asynchronous/test_index_management.py +++ b/test/asynchronous/test_index_management.py @@ -51,8 +51,6 @@ class TestCreateSearchIndex(AsyncIntegrationTest): async def test_inputs(self): - if not os.environ.get("TEST_INDEX_MANAGEMENT"): - raise unittest.SkipTest("Skipping index management tests") listener = AllowListEventListener("createSearchIndexes") client = self.simple_client(event_listeners=[listener]) coll = client.test.test @@ -90,8 +88,6 @@ class SearchIndexIntegrationBase(AsyncPyMongoTestCase): @classmethod def setUpClass(cls) -> None: - if not os.environ.get("TEST_INDEX_MANAGEMENT"): - raise unittest.SkipTest("Skipping index management tests") cls.url = os.environ.get("MONGODB_URI") cls.username = os.environ["DB_USER"] cls.password = os.environ["DB_PASSWORD"] diff --git a/test/test_client_context.py b/test/test_client_context.py index e807ac5f5f..ef3633a8b0 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -47,20 +47,14 @@ def test_serverless(self): ) def test_enableTestCommands_is_disabled(self): - if not os.environ.get("PYMONGO_DISABLE_TEST_COMMANDS"): - raise SkipTest("PYMONGO_DISABLE_TEST_COMMANDS is not set") + if not os.environ.get("DISABLE_TEST_COMMANDS"): + raise SkipTest("DISABLE_TEST_COMMANDS is not set") self.assertFalse( client_context.test_commands_enabled, - "enableTestCommands must be disabled when PYMONGO_DISABLE_TEST_COMMANDS is set.", + "enableTestCommands must be disabled when DISABLE_TEST_COMMANDS is set.", ) - def test_setdefaultencoding_worked(self): - if not os.environ.get("SETDEFAULTENCODING"): - raise SkipTest("SETDEFAULTENCODING is not set") - - self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) - def test_free_threading_is_enabled(self): if "free-threading build" not in sys.version: raise SkipTest("this test requires the Python free-threading build") diff --git a/test/test_data_lake.py b/test/test_data_lake.py index 797ef85000..c8b76eb1ca 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -38,10 +38,6 @@ class TestDataLakeMustConnect(UnitTest): def test_connected_to_data_lake(self): - data_lake = os.environ.get("TEST_DATA_LAKE") - if not data_lake: - self.skipTest("TEST_DATA_LAKE is not set") - self.assertTrue( client_context.is_data_lake and client_context.connected, "client context must be connected to data lake when DATA_LAKE is set. Failed attempts:\n{}".format( diff --git a/test/test_index_management.py b/test/test_index_management.py index 5135e43f1f..e4b931cf00 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -51,8 +51,6 @@ class TestCreateSearchIndex(IntegrationTest): def test_inputs(self): - if not os.environ.get("TEST_INDEX_MANAGEMENT"): - raise unittest.SkipTest("Skipping index management tests") listener = AllowListEventListener("createSearchIndexes") client = self.simple_client(event_listeners=[listener]) coll = client.test.test @@ -90,8 +88,6 @@ class SearchIndexIntegrationBase(PyMongoTestCase): @classmethod def setUpClass(cls) -> None: - if not os.environ.get("TEST_INDEX_MANAGEMENT"): - raise unittest.SkipTest("Skipping index management tests") cls.url = os.environ.get("MONGODB_URI") cls.username = os.environ["DB_USER"] cls.password = os.environ["DB_PASSWORD"] From f27e8e123a2740b955ea6af64b53b09dfb3cab41 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 24 Feb 2025 09:14:10 -0600 Subject: [PATCH 1756/2111] PYTHON-5149 Convert run-tests.sh to a Python script (#2155) --- .evergreen/config.yml | 2 +- .evergreen/run-tests.sh | 70 ++-------------- .evergreen/scripts/run_tests.py | 119 +++++++++++++++++++++++++++ .evergreen/scripts/setup_tests.py | 3 + .evergreen/scripts/teardown-tests.sh | 2 +- green_framework_test.py | 117 -------------------------- pyproject.toml | 1 - test/asynchronous/helpers.py | 2 +- test/helpers.py | 2 +- tools/fail_if_no_c.py | 44 +++++----- 10 files changed, 151 insertions(+), 211 deletions(-) create mode 100644 .evergreen/scripts/run_tests.py delete mode 100644 green_framework_test.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 72cab17dc9..d00261cc7e 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -262,7 +262,7 @@ functions: params: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, - DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS] + DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION] binary: bash working_dir: "src" args: [.evergreen/just.sh, setup-test, "${TEST_NAME}", "${SUB_TEST_NAME}"] diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index f3c71b41ff..e1b3c779ff 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,5 +1,5 @@ #!/bin/bash -set -eux +set -eu SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" @@ -7,10 +7,6 @@ ROOT_DIR="$(dirname $SCRIPT_DIR)" pushd $ROOT_DIR -export PIP_QUIET=1 # Quiet by default -export PIP_PREFER_BINARY=1 # Prefer binary dists by default -export UV_FROZEN=1 # Do not modify lock files - # Try to source the env file. if [ -f $SCRIPT_DIR/scripts/env.sh ]; then echo "Sourcing env inputs" @@ -25,74 +21,18 @@ if [ -f $SCRIPT_DIR/scripts/test-env.sh ]; then . $SCRIPT_DIR/scripts/test-env.sh else echo "Missing test inputs, please run 'just setup-test'" + exit 1 fi - # Source the local secrets export file if available. if [ -f "./secrets-export.sh" ]; then . "./secrets-export.sh" fi -PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") - -# Ensure C extensions if applicable. -if [ -z "${NO_EXT:-}" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - uv run --frozen tools/fail_if_no_c.py -fi - -if [ -n "${PYMONGOCRYPT_LIB:-}" ]; then - # Ensure pymongocrypt is working properly. - # shellcheck disable=SC2048 - uv run ${UV_ARGS} python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" - # shellcheck disable=SC2048 - uv run ${UV_ARGS} python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" - # PATH is updated by configure-env.sh for access to mongocryptd. -fi - -PYTHON_IMPL=$(uv run python -c "import platform; print(platform.python_implementation())") -echo "Running ${AUTH:-noauth} tests over ${SSL:-nossl} with python $(uv python find)" -uv run python -c 'import sys; print(sys.version)' - -# Show the installed packages -# shellcheck disable=SC2048 +# List the packages. PIP_QUIET=0 uv run ${UV_ARGS} --with pip pip list -# Record the start time for a perf test. -if [ -n "${TEST_PERF:-}" ]; then - start_time=$(date +%s) -fi - -# Run the tests, and store the results in Evergreen compatible XUnit XML -# files in the xunit-results/ directory. -TEST_ARGS=${TEST_ARGS} -if [ "$#" -ne 0 ]; then - TEST_ARGS="$*" -fi -echo "Running tests with $TEST_ARGS and uv args $UV_ARGS..." -if [ -z "${GREEN_FRAMEWORK:-}" ]; then - # shellcheck disable=SC2048 - uv run ${UV_ARGS} pytest $TEST_ARGS -else - # shellcheck disable=SC2048 - uv run ${UV_ARGS} green_framework_test.py $GREEN_FRAMEWORK -v $TEST_ARGS -fi -echo "Running tests with $TEST_ARGS... done." - -# Handle perf test post actions. -if [ -n "${TEST_PERF:-}" ]; then - end_time=$(date +%s) - elapsed_secs=$((end_time-start_time)) - - cat results.json - - echo "{\"failures\": 0, \"results\": [{\"status\": \"pass\", \"exit_code\": 0, \"test_file\": \"BenchMarkTests\", \"start\": $start_time, \"end\": $end_time, \"elapsed\": $elapsed_secs}]}" > report.json - - cat report.json -fi - -# Handle coverage post actions. -if [ -n "${COVERAGE:-}" ]; then - rm -rf .pytest_cache -fi +# Start the test runner. +uv run ${UV_ARGS} .evergreen/scripts/run_tests.py popd diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py new file mode 100644 index 0000000000..e41691ca81 --- /dev/null +++ b/.evergreen/scripts/run_tests.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import json +import logging +import os +import platform +import shutil +import sys +from datetime import datetime +from pathlib import Path + +import pytest + +HERE = Path(__file__).absolute().parent +ROOT = HERE.parent.parent +AUTH = os.environ.get("AUTH", "noauth") +SSL = os.environ.get("SSL", "nossl") +UV_ARGS = os.environ.get("UV_ARGS", "") +TEST_PERF = os.environ.get("TEST_PERF") +GREEN_FRAMEWORK = os.environ.get("GREEN_FRAMEWORK") +TEST_ARGS = os.environ.get("TEST_ARGS", "").split() + +LOGGER = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") + + +def handle_perf(start_time: datetime): + end_time = datetime.now() + elapsed_secs = (end_time - start_time).total_seconds() + with open("results.json") as fid: + results = json.load(fid) + LOGGER.info("results.json:\n%s", json.dumps(results, indent=2)) + + results = dict( + status="PASS", + exit_code=0, + test_file="BenchMarkTests", + start=int(start_time.timestamp()), + end=int(end_time.timestamp()), + elapsed=elapsed_secs, + ) + report = dict(failures=0, results=[results]) + LOGGER.info("report.json\n%s", json.dumps(report, indent=2)) + + with open("report.json", "w", newline="\n") as fid: + json.dump(report, fid) + + +def handle_green_framework() -> None: + if GREEN_FRAMEWORK == "eventlet": + import eventlet + + # https://github.com/eventlet/eventlet/issues/401 + eventlet.sleep() + eventlet.monkey_patch() + elif GREEN_FRAMEWORK == "gevent": + from gevent import monkey + + monkey.patch_all() + + # Never run async tests with a framework. + if len(TEST_ARGS) <= 1: + TEST_ARGS.extend(["-m", "not default_async and default"]) + else: + for i in range(len(TEST_ARGS) - 1): + if "-m" in TEST_ARGS[i]: + TEST_ARGS[i + 1] = f"not default_async and {TEST_ARGS[i + 1]}" + + LOGGER.info(f"Running tests with {GREEN_FRAMEWORK}...") + + +def handle_c_ext() -> None: + if platform.python_implementation() != "CPython": + return + sys.path.insert(0, str(ROOT / "tools")) + from fail_if_no_c import main as fail_if_no_c + + fail_if_no_c() + + +def handle_pymongocrypt() -> None: + import pymongocrypt + + LOGGER.info(f"pymongocrypt version: {pymongocrypt.__version__})") + LOGGER.info(f"libmongocrypt version: {pymongocrypt.libmongocrypt_version()})") + + +def run() -> None: + # Handle green framework first so they can patch modules. + if GREEN_FRAMEWORK: + handle_green_framework() + + # Ensure C extensions if applicable. + if not os.environ.get("NO_EXT"): + handle_c_ext() + + if os.environ.get("PYMONGOCRYPT_LIB"): + handle_pymongocrypt() + + LOGGER.info(f"Test setup:\n{AUTH=}\n{SSL=}\n{UV_ARGS=}\n{TEST_ARGS=}") + + # Record the start time for a perf test. + if TEST_PERF: + start_time = datetime.now() + + # Run the tests. + pytest.main(TEST_ARGS) + + # Handle perf test post actions. + if TEST_PERF: + handle_perf(start_time) + + # Handle coverage post actions. + if os.environ.get("COVERAGE"): + shutil.rmtree(".pytest_cache", ignore_errors=True) + + +if __name__ == "__main__": + run() diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 96c138b4ae..78bfad7224 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -228,6 +228,9 @@ def handle_test_env() -> None: write_env("AUTH", AUTH) write_env("SSL", SSL) + write_env("PIP_QUIET") # Quiet by default. + write_env("PIP_PREFER_BINARY") # Prefer binary dists by default. + write_env("UV_FROZEN") # Do not modify lock files. # Skip CSOT tests on non-linux platforms. if PLATFORM != "linux": diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh index 9c78c0965c..be1b88390f 100755 --- a/.evergreen/scripts/teardown-tests.sh +++ b/.evergreen/scripts/teardown-tests.sh @@ -24,6 +24,6 @@ if [ -n "${TEST_ENCRYPTION:-}" ]; then fi # Shut down load balancer if applicable. -if [ -n "${TEST_LOADBALANCER:-}" ]; then +if [ -n "${TEST_LOAD_BALANCER:-}" ]; then bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh stop fi diff --git a/green_framework_test.py b/green_framework_test.py deleted file mode 100644 index 037d0279c3..0000000000 --- a/green_framework_test.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test PyMongo with a variety of greenlet-based monkey-patching frameworks.""" -from __future__ import annotations - -import getopt -import sys - -import pytest - - -def run_gevent(): - """Prepare to run tests with Gevent. Can raise ImportError.""" - from gevent import monkey - - monkey.patch_all() - - -def run_eventlet(): - """Prepare to run tests with Eventlet. Can raise ImportError.""" - import eventlet - - # https://github.com/eventlet/eventlet/issues/401 - eventlet.sleep() - eventlet.monkey_patch() - - -FRAMEWORKS = { - "gevent": run_gevent, - "eventlet": run_eventlet, -} - - -def list_frameworks(): - """Tell the user what framework names are valid.""" - sys.stdout.write( - """Testable frameworks: %s - -Note that membership in this list means the framework can be tested with -PyMongo, not necessarily that it is officially supported. -""" - % ", ".join(sorted(FRAMEWORKS)) - ) - - -def run(framework_name, *args): - """Run tests with monkey-patching enabled. Can raise ImportError.""" - # Monkey-patch. - FRAMEWORKS[framework_name]() - - arg_list = list(args) - - # Never run async tests with a framework - if len(arg_list) <= 1: - arg_list.extend(["-m", "not default_async and default"]) - else: - for i in range(len(arg_list) - 1): - if "-m" in arg_list[i]: - arg_list[i + 1] = f"not default_async and {arg_list[i + 1]}" - - # Run the tests. - sys.exit(pytest.main(arg_list)) - - -def main(): - """Parse options and run tests.""" - usage = f"""python {sys.argv[0]} FRAMEWORK_NAME - -Test PyMongo with a variety of greenlet-based monkey-patching frameworks. See -python {sys.argv[0]} --help-frameworks.""" - - try: - opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "help-frameworks"]) - except getopt.GetoptError as err: - print(str(err)) - print(usage) - sys.exit(2) - - for option_name, _ in opts: - if option_name in ("-h", "--help"): - print(usage) - sys.exit() - elif option_name == "--help-frameworks": - list_frameworks() - sys.exit() - else: - raise AssertionError("unhandled option") - - if not args: - print(usage) - sys.exit(1) - - if args[0] not in FRAMEWORKS: - print("%r is not a testable framework.\n" % args[0]) - list_frameworks() - sys.exit(1) - - run( - args[0], - *args[1:], # Framework name. - ) # Command line args to pytest, like what test to run. - - -if __name__ == "__main__": - main() diff --git a/pyproject.toml b/pyproject.toml index 69249ee4c6..b86e9df6ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -234,7 +234,6 @@ dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?)|dummy.*)$" "RET", "ARG", "F405", "B028", "PGH001", "B018", "F403", "RUF015", "E731", "B007", "UP031", "F401", "B023", "F811"] "tools/*.py" = ["T201"] -"green_framework_test.py" = ["T201"] "hatch_build.py" = ["S"] "_setup.py" = ["SIM112"] diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index 28260d0a52..98e00e9385 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -81,7 +81,7 @@ COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") -TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") diff --git a/test/helpers.py b/test/helpers.py index 3f51fde08c..627be182b5 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -81,7 +81,7 @@ COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") -TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 6848e155aa..64280a81d2 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -18,34 +18,30 @@ """ from __future__ import annotations -import os -import subprocess +import logging import sys -from pathlib import Path + +LOGGER = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") sys.path[0:0] = [""] import bson # noqa: E402 import pymongo # noqa: E402 -if not pymongo.has_c() or not bson.has_c(): - try: - from pymongo import _cmessage # type:ignore[attr-defined] # noqa: F401 - except Exception as e: - print(e) - try: - from bson import _cbson # type:ignore[attr-defined] # noqa: F401 - except Exception as e: - print(e) - sys.exit("could not load C extensions") - -if os.environ.get("ENSURE_UNIVERSAL2") == "1": - parent_dir = Path(pymongo.__path__[0]).parent - for pkg in ["pymongo", "bson", "grifs"]: - for so_file in Path(f"{parent_dir}/{pkg}").glob("*.so"): - print(f"Checking universal2 compatibility in {so_file}...") - output = subprocess.check_output(["file", so_file]) # noqa: S603, S607 - if "arm64" not in output.decode("utf-8"): - sys.exit("Universal wheel was not compiled with arm64 support") - if "x86_64" not in output.decode("utf-8"): - sys.exit("Universal wheel was not compiled with x86_64 support") + +def main() -> None: + if not pymongo.has_c() or not bson.has_c(): + try: + from pymongo import _cmessage # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + LOGGER.exception(e) + try: + from bson import _cbson # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + LOGGER.exception(e) + sys.exit("could not load C extensions") + + +if __name__ == "__main__": + main() From 0ac56a3019dd9f913aa4083f9311d93ad310a793 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 25 Feb 2025 11:12:42 -0800 Subject: [PATCH 1757/2111] PYTHON-5155 Fix FAQ link for fork safety (#2156) --- doc/changelog.rst | 2 +- pymongo/asynchronous/topology.py | 4 +--- pymongo/synchronous/topology.py | 4 +--- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 1f3efb8ad0..ee66bb178f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -189,7 +189,7 @@ Issues Resolved See the `PyMongo 4.9 release notes in JIRA`_ for the list of resolved issues in this release. -.. _Is PyMongo Fork-Safe : https://www.mongodb.com/docs/languages/python/pymongo-driver/current/faq/#is-pymongo-fork-safe- +.. _Is PyMongo Fork-Safe: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/troubleshooting/#forking-a-process-causes-a-deadlock .. _PyMongo 4.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39940 diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 3033377de5..19fc76b0d3 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -236,9 +236,7 @@ async def open(self) -> None: warnings.warn( # type: ignore[call-overload] # noqa: B028 "AsyncMongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " - "https://www.mongodb.com/docs/languages/" - "python/pymongo-driver/current/faq/" - "#is-pymongo-fork-safe-", + "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/troubleshooting/#forking-a-process-causes-a-deadlock", **kwargs, ) async with self._lock: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 09b61f6d05..6a8503c6c0 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -236,9 +236,7 @@ def open(self) -> None: warnings.warn( # type: ignore[call-overload] # noqa: B028 "MongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " - "https://www.mongodb.com/docs/languages/" - "python/pymongo-driver/current/faq/" - "#is-pymongo-fork-safe-", + "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/troubleshooting/#forking-a-process-causes-a-deadlock", **kwargs, ) with self._lock: From eaae22c63ba6673460d18c7782b65716f9e18ea4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Feb 2025 16:31:02 -0600 Subject: [PATCH 1758/2111] PYTHON-5151 Convert kms tests to use python scripts (#2158) Co-authored-by: Noah Stapp --- .evergreen/config.yml | 120 ++-------------- .evergreen/generated_configs/tasks.yml | 28 ++++ .evergreen/run-azurekms-fail-test.sh | 9 -- .evergreen/run-azurekms-test.sh | 27 ---- .evergreen/run-gcpkms-test.sh | 24 ---- .evergreen/run-mongodb-aws-ecs-test.sh | 4 +- .evergreen/run-mongodb-oidc-test.sh | 4 +- .evergreen/run-perf-tests.sh | 4 +- .evergreen/run-tests.sh | 2 +- .evergreen/scripts/__init__.py | 0 .../scripts/bootstrap-mongo-orchestration.sh | 8 +- .evergreen/scripts/cleanup.sh | 3 - .evergreen/scripts/generate_config.py | 19 +++ .evergreen/scripts/kms_tester.py | 130 ++++++++++++++++++ .evergreen/scripts/run-atlas-tests.sh | 4 +- .../scripts/run-enterprise-auth-tests.sh | 4 +- .evergreen/scripts/run-gcpkms-fail-test.sh | 6 - .evergreen/scripts/run-mongodb-aws-test.sh | 4 +- .evergreen/scripts/run_tests.py | 19 +-- .evergreen/scripts/setup-dev-env.sh | 2 +- .evergreen/scripts/setup_tests.py | 82 ++++------- .evergreen/scripts/teardown-tests.sh | 34 ++--- .evergreen/scripts/teardown_tests.py | 26 ++++ .evergreen/scripts/utils.py | 54 ++++++++ CONTRIBUTING.md | 10 +- justfile | 6 +- pyproject.toml | 2 +- test/asynchronous/test_on_demand_csfle.py | 2 +- test/test_on_demand_csfle.py | 2 +- 29 files changed, 348 insertions(+), 291 deletions(-) delete mode 100755 .evergreen/run-azurekms-fail-test.sh delete mode 100755 .evergreen/run-azurekms-test.sh delete mode 100755 .evergreen/run-gcpkms-test.sh create mode 100644 .evergreen/scripts/__init__.py create mode 100644 .evergreen/scripts/kms_tester.py delete mode 100755 .evergreen/scripts/run-gcpkms-fail-test.sh create mode 100644 .evergreen/scripts/teardown_tests.py create mode 100644 .evergreen/scripts/utils.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d00261cc7e..4c12e8bdb2 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -265,13 +265,13 @@ functions: DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION] binary: bash working_dir: "src" - args: [.evergreen/just.sh, setup-test, "${TEST_NAME}", "${SUB_TEST_NAME}"] + args: [.evergreen/just.sh, setup-tests, "${TEST_NAME}", "${SUB_TEST_NAME}"] - command: subprocess.exec type: test params: working_dir: "src" binary: bash - args: [.evergreen/just.sh, test-eg] + args: [.evergreen/just.sh, run-tests] "run enterprise auth tests": - command: subprocess.exec @@ -443,13 +443,12 @@ functions: binary: bash working_dir: "src" args: - - ${DRIVERS_TOOLS}/.evergreen/csfle/teardown.sh + - ${DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh - command: subprocess.exec params: - binary: bash - working_dir: "src" - args: - - ${DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh + binary: bash + working_dir: "src" + args: [.evergreen/just.sh, teardown-tests] - command: subprocess.exec params: binary: bash @@ -562,51 +561,6 @@ task_groups: tasks: - ".serverless" - - name: testgcpkms_task_group - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 # 30 minutes - setup_group: - - func: fetch source - - func: setup system - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/create-and-setup-instance.sh - teardown_task: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/delete-instance.sh - - func: "upload test results" - tasks: - - testgcpkms-task - - - name: testazurekms_task_group - setup_group: - - func: fetch source - - func: setup system - - command: subprocess.exec - params: - binary: bash - env: - AZUREKMS_VMNAME_PREFIX: "PYTHON_DRIVER" - args: - - ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/create-and-setup-vm.sh - teardown_group: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/delete-vm.sh - - func: "upload test results" - setup_group_can_fail_task: true - teardown_task_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - testazurekms-task - - name: testazureoidc_task_group setup_group: - func: fetch source @@ -1395,60 +1349,6 @@ tasks: commands: - func: "download and merge coverage" - - name: "testgcpkms-task" - commands: - - command: subprocess.exec - type: test - params: - working_dir: "src" - binary: bash - include_expansions_in_env: ["DRIVERS_TOOLS"] - args: - - .evergreen/run-gcpkms-test.sh - - - name: "testgcpkms-fail-task" - # testgcpkms-fail-task runs in a non-GCE environment. - # It is expected to fail to obtain GCE credentials. - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["PYTHON_BINARY"] - working_dir: "src" - binary: "bash" - args: - - .evergreen/scripts/run-gcpkms-fail-test.sh - - - name: testazurekms-task - commands: - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: src - include_expansions_in_env: ["DRIVERS_TOOLS"] - args: - - .evergreen/run-azurekms-test.sh - - - name: testazurekms-fail-task - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: src - include_expansions_in_env: ["DRIVERS_TOOLS"] - args: - - .evergreen/run-azurekms-fail-test.sh - - name: "perf-6.0-standalone" tags: ["perf"] commands: @@ -1529,12 +1429,12 @@ buildvariants: run_on: - debian11-small tasks: - - name: testgcpkms_task_group + - name: test-gcpkms batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - - testgcpkms-fail-task - - name: testazurekms_task_group + - name: test-gcpkms-fail + - name: test-azurekms batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README - - testazurekms-fail-task + - name: test-azurekms-fail - name: rhel8-test-lambda display_name: FaaS Lambda diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 5495ad3470..b7aab80b30 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1,4 +1,32 @@ tasks: + # Kms tests + - name: test-gcpkms + commands: + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: gcp + - name: test-gcpkms-fail + commands: + - func: bootstrap mongo-orchestration + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: gcp-fail + - name: test-azurekms + commands: + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: azure + - name: test-azurekms-fail + commands: + - func: bootstrap mongo-orchestration + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: azure-fail + # Load balancer tests - name: test-load-balancer-auth-ssl commands: diff --git a/.evergreen/run-azurekms-fail-test.sh b/.evergreen/run-azurekms-fail-test.sh deleted file mode 100755 index 0804c34d66..0000000000 --- a/.evergreen/run-azurekms-fail-test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -HERE=$(dirname ${BASH_SOURCE:-$0}) -. $DRIVERS_TOOLS/.evergreen/csfle/azurekms/setup-secrets.sh -bash $HERE/just.sh setup-test kms azure-fail -KEY_NAME="${AZUREKMS_KEYNAME}" \ - KEY_VAULT_ENDPOINT="${AZUREKMS_KEYVAULTENDPOINT}" \ - $HERE/just.sh test-eg -bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-azurekms-test.sh b/.evergreen/run-azurekms-test.sh deleted file mode 100755 index b40b07b019..0000000000 --- a/.evergreen/run-azurekms-test.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -HERE=$(dirname ${BASH_SOURCE:-$0}) -source ${DRIVERS_TOOLS}/.evergreen/csfle/azurekms/secrets-export.sh -echo "Copying files ... begin" -export AZUREKMS_RESOURCEGROUP=${AZUREKMS_RESOURCEGROUP} -export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} -export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey -# Set up the remote files to test. -git add . -git commit -m "add files" || true -git archive -o /tmp/mongo-python-driver.tgz HEAD -# shellcheck disable=SC2088 -AZUREKMS_SRC="/tmp/mongo-python-driver.tgz" AZUREKMS_DST="~/" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh -echo "Copying files ... end" -echo "Untarring file ... begin" -AZUREKMS_CMD="tar xf mongo-python-driver.tgz" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh -echo "Untarring file ... end" -echo "Running test ... begin" -AZUREKMS_CMD="bash .evergreen/just.sh setup-test kms azure" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh -AZUREKMS_CMD="KEY_NAME=\"$AZUREKMS_KEYNAME\" KEY_VAULT_ENDPOINT=\"$AZUREKMS_KEYVAULTENDPOINT\" bash ./.evergreen/just.sh test-eg" \ - $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh -echo "Running test ... end" -bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-gcpkms-test.sh b/.evergreen/run-gcpkms-test.sh deleted file mode 100755 index ee2e8d8a57..0000000000 --- a/.evergreen/run-gcpkms-test.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -HERE=$(dirname ${BASH_SOURCE:-$0}) - -source ${DRIVERS_TOOLS}/.evergreen/csfle/gcpkms/secrets-export.sh -echo "Copying files ... begin" -export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} -export GCPKMS_PROJECT=${GCPKMS_PROJECT} -export GCPKMS_ZONE=${GCPKMS_ZONE} -export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} -# Set up the remote files to test. -git add . -git commit -m "add files" || true -git archive -o /tmp/mongo-python-driver.tgz HEAD -GCPKMS_SRC=/tmp/mongo-python-driver.tgz GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh -echo "Copying files ... end" -echo "Untarring file ... begin" -GCPKMS_CMD="tar xf mongo-python-driver.tgz" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh -echo "Untarring file ... end" -echo "Running test ... begin" -GCPKMS_CMD="bash ./.evergreen/just.sh setup-test kms gcp" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh -GCPKMS_CMD="./.evergreen/just.sh test-eg" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh -echo "Running test ... end" -bash $HERE/scripts/teardown-tests.sh diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index fc3a092bd4..ef7e0ba333 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -30,5 +30,5 @@ export SET_XTRACE_ON=1 cd src rm -rf .venv rm -f .evergreen/scripts/test-env.sh || true -bash ./.evergreen/just.sh setup-test auth_aws ecs -bash .evergreen/just.sh test-eg +bash ./.evergreen/just.sh setup-tests auth_aws ecs +bash .evergreen/just.sh run-tests diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index c789d6d147..759ac5d2bb 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -29,5 +29,5 @@ else exit 1 fi -COVERAGE=1 bash ./.evergreen/just.sh setup-test auth_oidc -bash ./.evergreen/just.sh test-eg "${@:1}" +COVERAGE=1 bash ./.evergreen/just.sh setup-tests auth_oidc +bash ./.evergreen/just.sh run-tests "${@:1}" diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index 85a82c2a5a..5e423caa23 100755 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -15,5 +15,5 @@ export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 -bash ./.evergreen/just.sh setup-test perf -bash ./.evergreen/just.sh test-eg +bash ./.evergreen/just.sh setup-tests perf +bash ./.evergreen/just.sh run-tests diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index e1b3c779ff..1c453c1d6d 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -20,7 +20,7 @@ if [ -f $SCRIPT_DIR/scripts/test-env.sh ]; then echo "Sourcing test inputs" . $SCRIPT_DIR/scripts/test-env.sh else - echo "Missing test inputs, please run 'just setup-test'" + echo "Missing test inputs, please run 'just setup-tests'" exit 1 fi diff --git a/.evergreen/scripts/__init__.py b/.evergreen/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.evergreen/scripts/bootstrap-mongo-orchestration.sh b/.evergreen/scripts/bootstrap-mongo-orchestration.sh index 5c6387d4b1..8f7d9d0aea 100755 --- a/.evergreen/scripts/bootstrap-mongo-orchestration.sh +++ b/.evergreen/scripts/bootstrap-mongo-orchestration.sh @@ -34,10 +34,10 @@ if [ -z "${TEST_CRYPT_SHARED:-}" ]; then export SKIP_CRYPT_SHARED=1 fi -MONGODB_VERSION=${VERSION} \ - TOPOLOGY=${TOPOLOGY} \ - AUTH=${AUTH:-noauth} \ - SSL=${SSL:-nossl} \ +MONGODB_VERSION=${VERSION:-} \ + TOPOLOGY=${TOPOLOGY:-} \ + AUTH=${AUTH:-} \ + SSL=${SSL:-} \ STORAGE_ENGINE=${STORAGE_ENGINE:-} \ DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS:-} \ ORCHESTRATION_FILE=${ORCHESTRATION_FILE:-} \ diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh index a1fd92f04d..c58d2163dd 100755 --- a/.evergreen/scripts/cleanup.sh +++ b/.evergreen/scripts/cleanup.sh @@ -1,7 +1,4 @@ #!/bin/bash -if [ -f "$DRIVERS_TOOLS"/.evergreen/csfle/secrets-export.sh ]; then - bash .evergreen/teardown-encryption.sh -fi rm -rf "${DRIVERS_TOOLS}" || true rm -f ./secrets-export.sh || true diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index d1c3b92262..4d4d29c6dd 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -835,6 +835,25 @@ def create_load_balancer_tasks(): return tasks +def create_kms_tasks(): + tasks = [] + for kms_type in ["gcp", "azure"]: + for success in [True, False]: + name = f"test-{kms_type}kms" + sub_test_name = kms_type + if not success: + name += "-fail" + sub_test_name += "-fail" + commands = [] + if not success: + commands.append(FunctionCall(func="bootstrap mongo-orchestration")) + test_vars = dict(TEST_NAME="kms", SUB_TEST_NAME=sub_test_name) + test_func = FunctionCall(func="run tests", vars=test_vars) + commands.append(test_func) + tasks.append(EvgTask(name=name, commands=commands)) + return tasks + + ################## # Generate Config ################## diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py new file mode 100644 index 0000000000..d38ec3a69e --- /dev/null +++ b/.evergreen/scripts/kms_tester.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import os + +from utils import DRIVERS_TOOLS, LOGGER, ROOT, read_env, run_command, write_env + +TMP_DRIVER_FILE = "/tmp/mongo-python-driver.tgz" # noqa: S108 +DIRS = dict( + gcp=f"{DRIVERS_TOOLS}/.evergreen/csfle/gcpkms", + azure=f"{DRIVERS_TOOLS}/.evergreen/csfle/azurekms", +) + + +def _setup_azure_vm(base_env: dict[str, str]) -> None: + LOGGER.info("Setting up Azure VM...") + azure_dir = DIRS["azure"] + env = base_env.copy() + env["AZUREKMS_SRC"] = TMP_DRIVER_FILE + env["AZUREKMS_DST"] = "~/" + run_command(f"{azure_dir}/copy-file.sh", env=env) + + env = base_env.copy() + env["AZUREKMS_CMD"] = "tar xf mongo-python-driver.tgz" + run_command(f"{azure_dir}/run-command.sh", env=env) + + env["AZUREKMS_CMD"] = "bash .evergreen/just.sh setup-tests kms azure-remote" + run_command(f"{azure_dir}/run-command.sh", env=env) + LOGGER.info("Setting up Azure VM... done.") + + +def _setup_gcp_vm(base_env: dict[str, str]) -> None: + LOGGER.info("Setting up GCP VM...") + gcp_dir = DIRS["gcp"] + env = base_env.copy() + env["GCPKMS_SRC"] = TMP_DRIVER_FILE + env["GCPKMS_DST"] = f"{env['GCPKMS_INSTANCENAME']}:" + run_command(f"{gcp_dir}/copy-file.sh", env=env) + + env = base_env.copy() + env["GCPKMS_CMD"] = "tar xf mongo-python-driver.tgz" + run_command(f"{gcp_dir}/run-command.sh", env=env) + + env["GCPKMS_CMD"] = "bash ./.evergreen/just.sh setup-tests kms gcp-remote" + run_command(f"{gcp_dir}/run-command.sh", env=env) + LOGGER.info("Setting up GCP VM...") + + +def _create_archive() -> None: + run_command("git add .", cwd=ROOT) + run_command('git commit -m "add files"', check=False, cwd=ROOT) + run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) + + +def _load_kms_config(sub_test_target: str) -> dict[str, str]: + target_dir = DIRS[sub_test_target] + config = read_env(f"{target_dir}/secrets-export.sh") + base_env = os.environ.copy() + for key, value in config.items(): + base_env[key] = str(value) + return base_env + + +def setup_kms(sub_test_name: str) -> None: + if "-" in sub_test_name: + sub_test_target, sub_test_type = sub_test_name.split("-") + else: + sub_test_target = sub_test_name + sub_test_type = "" + + assert sub_test_target in ["azure", "gcp"], sub_test_target + assert sub_test_type in ["", "remote", "fail"], sub_test_type + success = sub_test_type != "fail" + kms_dir = DIRS[sub_test_target] + + if sub_test_target == "azure": + write_env("TEST_FLE_AZURE_AUTO") + else: + write_env("TEST_FLE_GCP_AUTO") + + write_env("SUCCESS", success) + + # For remote tests, there is no further work required. + if sub_test_type == "remote": + return + + if sub_test_target == "azure": + run_command("./setup-secrets.sh", cwd=kms_dir) + + if success: + _create_archive() + if sub_test_target == "azure": + os.environ["AZUREKMS_VMNAME_PREFIX"] = "PYTHON_DRIVER" + + run_command("./setup.sh", cwd=kms_dir) + base_env = _load_kms_config(sub_test_target) + + if sub_test_target == "azure": + _setup_azure_vm(base_env) + else: + _setup_gcp_vm(base_env) + + if sub_test_target == "azure": + config = read_env(f"{kms_dir}/secrets-export.sh") + if success: + write_env("AZUREKMS_VMNAME", config["AZUREKMS_VMNAME"]) + + write_env("KEY_NAME", config["AZUREKMS_KEYNAME"]) + write_env("KEY_VAULT_ENDPOINT", config["AZUREKMS_KEYVAULTENDPOINT"]) + + +def test_kms_remote(sub_test_name: str) -> None: + env = _load_kms_config(sub_test_name) + if sub_test_name == "azure": + key_name = os.environ["KEY_NAME"] + key_vault_endpoint = os.environ["KEY_VAULT_ENDPOINT"] + env[ + "AZUREKMS_CMD" + ] = f'KEY_NAME="{key_name}" KEY_VAULT_ENDPOINT="{key_vault_endpoint}" bash ./.evergreen/just.sh run-tests' + else: + env["GCPKMS_CMD"] = "./.evergreen/just.sh run-tests" + cmd = f"{DIRS[sub_test_name]}/run-command.sh" + run_command(cmd, env=env) + + +def teardown_kms(sub_test_name: str) -> None: + run_command(f"{DIRS[sub_test_name]}/teardown.sh") + + +if __name__ == "__main__": + setup_kms() diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh index 5f9d447c3d..99968063bd 100755 --- a/.evergreen/scripts/run-atlas-tests.sh +++ b/.evergreen/scripts/run-atlas-tests.sh @@ -4,5 +4,5 @@ set +x set -o errexit bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test atlas -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-tests atlas +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh index 21a7fef301..65aafde2df 100755 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ b/.evergreen/scripts/run-enterprise-auth-tests.sh @@ -5,5 +5,5 @@ set -eu set +x # Use the default python to bootstrap secrets. bash "${DRIVERS_TOOLS}"/.evergreen/secrets_handling/setup-secrets.sh drivers/enterprise_auth -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-test enterprise_auth -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh test-eg +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-tests enterprise_auth +bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/run-gcpkms-fail-test.sh b/.evergreen/scripts/run-gcpkms-fail-test.sh deleted file mode 100755 index 746ea4103a..0000000000 --- a/.evergreen/scripts/run-gcpkms-fail-test.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -eu -HERE=$(dirname ${BASH_SOURCE:-$0}) -. $HERE/env.sh -./.evergreen/just.sh setup-test kms gcp-fail -bash ./.evergreen/just.sh test-eg diff --git a/.evergreen/scripts/run-mongodb-aws-test.sh b/.evergreen/scripts/run-mongodb-aws-test.sh index 917482eaa2..fd38574db8 100755 --- a/.evergreen/scripts/run-mongodb-aws-test.sh +++ b/.evergreen/scripts/run-mongodb-aws-test.sh @@ -24,5 +24,5 @@ echo "Running MONGODB-AWS authentication tests for $1" # Handle credentials and environment setup. . "$DRIVERS_TOOLS"/.evergreen/auth_aws/aws_setup.sh "$1" -bash ./.evergreen/just.sh setup-test auth_aws $1 -bash ./.evergreen/just.sh test-eg +bash ./.evergreen/just.sh setup-tests auth_aws $1 +bash ./.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index e41691ca81..ceae46d343 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -1,27 +1,23 @@ from __future__ import annotations import json -import logging import os import platform import shutil import sys from datetime import datetime -from pathlib import Path import pytest +from utils import LOGGER, ROOT -HERE = Path(__file__).absolute().parent -ROOT = HERE.parent.parent AUTH = os.environ.get("AUTH", "noauth") SSL = os.environ.get("SSL", "nossl") UV_ARGS = os.environ.get("UV_ARGS", "") TEST_PERF = os.environ.get("TEST_PERF") GREEN_FRAMEWORK = os.environ.get("GREEN_FRAMEWORK") TEST_ARGS = os.environ.get("TEST_ARGS", "").split() - -LOGGER = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") +TEST_NAME = os.environ.get("TEST_NAME") +SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") def handle_perf(start_time: datetime): @@ -103,7 +99,14 @@ def run() -> None: if TEST_PERF: start_time = datetime.now() - # Run the tests. + # Run remote kms tests. + if TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: + from kms_tester import test_kms_remote + + test_kms_remote(SUB_TEST_NAME) + return + + # Run local tests. pytest.main(TEST_ARGS) # Handle perf test post actions. diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 04a377a2d2..f158c71320 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -eux +set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 78bfad7224..615f07320a 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -2,29 +2,28 @@ import argparse import base64 -import dataclasses import io import logging import os import platform -import shlex import shutil import stat -import subprocess -import sys import tarfile from pathlib import Path -from typing import Any from urllib import request -HERE = Path(__file__).absolute().parent -ROOT = HERE.parent.parent -ENV_FILE = HERE / "test-env.sh" -DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") -PLATFORM = "windows" if os.name == "nt" else sys.platform.lower() - -LOGGER = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") +from utils import ( + DRIVERS_TOOLS, + ENV_FILE, + HERE, + LOGGER, + PLATFORM, + ROOT, + Distro, + read_env, + run_command, + write_env, +) # Passthrough environment variables. PASS_THROUGH_ENV = ["GREEN_FRAMEWORK", "NO_EXT", "MONGODB_API_VERSION"] @@ -41,7 +40,7 @@ "encryption": "encryption", "enterprise_auth": "auth", "index_management": "index_management", - "kms": "csfle", + "kms": "kms", "load_balancer": "load_balancer", "mockupdb": "mockupdb", "pyopenssl": "", @@ -69,51 +68,23 @@ GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") -@dataclasses.dataclass -class Distro: - name: str - version_id: str - arch: str - - -def write_env(name: str, value: Any = "1") -> None: - with ENV_FILE.open("a", newline="\n") as fid: - # Remove any existing quote chars. - value = str(value).replace('"', "") - fid.write(f'export {name}="{value}"\n') - - def is_set(var: str) -> bool: value = os.environ.get(var, "") return len(value.strip()) > 0 -def run_command(cmd: str) -> None: - LOGGER.info("Running command %s...", cmd) - subprocess.check_call(shlex.split(cmd)) # noqa: S603 - LOGGER.info("Running command %s... done.", cmd) - - -def read_env(path: Path | str) -> dict[str, Any]: - config = dict() - with Path(path).open() as fid: - for line in fid.readlines(): - if "=" not in line: - continue - name, _, value = line.strip().partition("=") - if value.startswith(('"', "'")): - value = value[1:-1] - name = name.replace("export ", "") - config[name] = value - return config - - def get_options(): parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) - parser.add_argument("test_name", choices=sorted(TEST_SUITE_MAP), nargs="?", default="default") - parser.add_argument("sub_test_name", nargs="?") + parser.add_argument( + "test_name", + choices=sorted(TEST_SUITE_MAP), + nargs="?", + default="default", + help="The name of the test suite to set up, typically the same name as a pytest marker.", + ) + parser.add_argument("sub_test_name", nargs="?", help="The sub test name, for example 'azure'") parser.add_argument( "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level" ) @@ -238,6 +209,7 @@ def handle_test_env() -> None: # Set an environment variable for the test name and sub test name. write_env(f"TEST_{test_name.upper()}") + write_env("TEST_NAME", test_name) write_env("SUB_TEST_NAME", sub_test_name) # Handle pass through env vars. @@ -388,15 +360,9 @@ def handle_test_env() -> None: write_env("LD_LIBRARY_PATH", f"{CRYPT_SHARED_DIR}:${{LD_LIBRARY_PATH:-}}") if test_name == "kms": - if sub_test_name.startswith("azure"): - write_env("TEST_FLE_AZURE_AUTO") - else: - write_env("TEST_FLE_GCP_AUTO") + from kms_tester import setup_kms - write_env("SUCCESS", "fail" not in sub_test_name) - MONGODB_URI = os.environ.get("MONGODB_URI", "") - if "@" in MONGODB_URI: - raise RuntimeError("MONGODB_URI unexpectedly contains user credentials in FLE test!") + setup_kms(sub_test_name) if test_name == "ocsp": write_env("CA_FILE", os.environ["CA_FILE"]) diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh index be1b88390f..cd705c6b35 100755 --- a/.evergreen/scripts/teardown-tests.sh +++ b/.evergreen/scripts/teardown-tests.sh @@ -2,28 +2,28 @@ set -eu SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) -ROOT_DIR="$(dirname "$(dirname $SCRIPT_DIR)")" +SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" +ROOT_DIR="$(dirname $SCRIPT_DIR)" -# Remove temporary test files. pushd $ROOT_DIR > /dev/null -rm -rf libmongocrypt/ libmongocrypt.tar.gz mongocryptd.pid > /dev/null -popd > /dev/null -if [ ! -f $SCRIPT_DIR/test-env.sh ]; then - exit 0 -fi +# Try to source the env file. if [ -f $SCRIPT_DIR/env.sh ]; then - source $SCRIPT_DIR/env.sh + echo "Sourcing env inputs" + . $SCRIPT_DIR/env.sh +else + echo "Not sourcing env inputs" fi -source $SCRIPT_DIR/test-env.sh - -# Shut down csfle servers if applicable -if [ -n "${TEST_ENCRYPTION:-}" ]; then - bash ${DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh +# Handle test inputs. +if [ -f $SCRIPT_DIR/test-env.sh ]; then + echo "Sourcing test inputs" + . $SCRIPT_DIR/test-env.sh +else + echo "Missing test inputs, please run 'just setup-tests'" fi -# Shut down load balancer if applicable. -if [ -n "${TEST_LOAD_BALANCER:-}" ]; then - bash "${DRIVERS_TOOLS}"/.evergreen/run-load-balancer.sh stop -fi +# Teardown the test runner. +uv run $SCRIPT_DIR/teardown_tests.py + +popd > /dev/null diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py new file mode 100644 index 0000000000..fc1a937de0 --- /dev/null +++ b/.evergreen/scripts/teardown_tests.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import os + +from utils import DRIVERS_TOOLS, LOGGER, run_command + +TEST_NAME = os.environ.get("TEST_NAME", "unconfigured") +SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") + +LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'...") + +# Shut down csfle servers if applicable +if TEST_NAME == "encryption": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh") + +# Shut down load balancer if applicable. +elif TEST_NAME == "load-balancer": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop") + +# Tear down kms VM if applicable. +elif TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: + from kms_tester import teardown_kms + + teardown_kms(SUB_TEST_NAME) + +LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py new file mode 100644 index 0000000000..d830275def --- /dev/null +++ b/.evergreen/scripts/utils.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import dataclasses +import logging +import os +import shlex +import subprocess +import sys +from pathlib import Path +from typing import Any + +HERE = Path(__file__).absolute().parent +ROOT = HERE.parent.parent +DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") + +LOGGER = logging.getLogger("test") +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") +ENV_FILE = HERE / "test-env.sh" +PLATFORM = "windows" if os.name == "nt" else sys.platform.lower() + + +@dataclasses.dataclass +class Distro: + name: str + version_id: str + arch: str + + +def read_env(path: Path | str) -> dict[str, Any]: + config = dict() + with Path(path).open() as fid: + for line in fid.readlines(): + if "=" not in line: + continue + name, _, value = line.strip().partition("=") + if value.startswith(('"', "'")): + value = value[1:-1] + name = name.replace("export ", "") + config[name] = value + return config + + +def write_env(name: str, value: Any = "1") -> None: + with ENV_FILE.open("a", newline="\n") as fid: + # Remove any existing quote chars. + value = str(value).replace('"', "") + fid.write(f'export {name}="{value}"\n') + + +def run_command(cmd: str, **kwargs: Any) -> None: + LOGGER.info("Running command %s...", cmd) + kwargs.setdefault("check", True) + subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 + LOGGER.info("Running command %s... done.", cmd) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0b8b77fc6e..4b1d139684 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -212,17 +212,17 @@ the pages will re-render and the browser will automatically refresh. - Start the servers using `LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh`. - Set up the test using: - `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' just setup-test load-balancer`. + `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' just setup-tests load-balancer`. - Run the tests from the `pymongo` checkout directory using: - `just test-eg`. + `just run-tests`. ## Running Encryption Tests Locally - Clone `drivers-evergreen-tools`: `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. - Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools` -- Run `AWS_PROFILE= just setup-test encryption` after setting up your AWS profile with `aws configure sso`. -- Run the tests with `just test-eg`. -- When done, run `just teardown-test` to clean up. +- Run `AWS_PROFILE= just setup-tests encryption` after setting up your AWS profile with `aws configure sso`. +- Run the tests with `just run-tests`. +- When done, run `just teardown-tests` to clean up. ## Re-sync Spec Tests diff --git a/justfile b/justfile index 5de578ecc7..5a5a05cd5a 100644 --- a/justfile +++ b/justfile @@ -61,13 +61,13 @@ test *args="-v --durations=5 --maxfail=10": {{uv_run}} --extra test pytest {{args}} [group('test')] -test-eg *args: +run-tests *args: bash ./.evergreen/run-tests.sh {{args}} [group('test')] -setup-test *args="": +setup-tests *args="": bash .evergreen/scripts/setup-tests.sh {{args}} [group('test')] -teardown-test: +teardown-tests: bash .evergreen/scripts/teardown-tests.sh diff --git a/pyproject.toml b/pyproject.toml index b86e9df6ad..c898169895 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -129,7 +129,7 @@ markers = [ "data_lake: tests that rely on atlas data lake", "perf: benchmark tests", "index_management: index management tests", - "csfle: client-side field-level encryption tests", + "kms: client-side field-level encryption tests using kms", "encryption: encryption tests", "load_balancer: load balancer tests", "mockupdb: tests that rely on mockupdb", diff --git a/test/asynchronous/test_on_demand_csfle.py b/test/asynchronous/test_on_demand_csfle.py index 617e2ed8d6..55394ddeb8 100644 --- a/test/asynchronous/test_on_demand_csfle.py +++ b/test/asynchronous/test_on_demand_csfle.py @@ -34,7 +34,7 @@ _IS_SYNC = False -pytestmark = pytest.mark.csfle +pytestmark = pytest.mark.kms class TestonDemandGCPCredentials(AsyncIntegrationTest): diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py index 023d44f641..648e46815a 100644 --- a/test/test_on_demand_csfle.py +++ b/test/test_on_demand_csfle.py @@ -34,7 +34,7 @@ _IS_SYNC = True -pytestmark = pytest.mark.csfle +pytestmark = pytest.mark.kms class TestonDemandGCPCredentials(IntegrationTest): From 2b667df14f55f669d2b4cb4126f4501b4c30420e Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 26 Feb 2025 11:16:44 -0800 Subject: [PATCH 1759/2111] PYTHON-5120 Reduce configureFailPoint duplication in tests (#2131) --- test/__init__.py | 17 +++++++++++------ test/asynchronous/__init__.py | 17 +++++++++++------ test/asynchronous/test_connection_monitoring.py | 7 +------ test/asynchronous/test_transactions.py | 7 +------ test/asynchronous/unified_format.py | 8 ++------ test/asynchronous/utils_spec_runner.py | 9 ++------- test/test_connection_monitoring.py | 7 +------ test/test_transactions.py | 7 +------ test/unified_format.py | 8 ++------ test/utils_spec_runner.py | 9 ++------- 10 files changed, 34 insertions(+), 62 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 6eda00bdec..307780271d 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -933,17 +933,22 @@ def assertEqualCommand(self, expected, actual, msg=None): def assertEqualReply(self, expected, actual, msg=None): self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + @staticmethod + def configure_fail_point(client, command_args, off=False): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + if off: + cmd["mode"] = "off" + cmd.pop("data", None) + client.admin.command(cmd) + @contextmanager def fail_point(self, command_args): - cmd_on = SON([("configureFailPoint", "failCommand")]) - cmd_on.update(command_args) - client_context.client.admin.command(cmd_on) + self.configure_fail_point(client_context.client, command_args) try: yield finally: - client_context.client.admin.command( - "configureFailPoint", cmd_on["configureFailPoint"], mode="off" - ) + self.configure_fail_point(client_context.client, command_args, off=True) @contextmanager def fork( diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index b3b0ca93e1..f03fcf4eeb 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -935,17 +935,22 @@ def assertEqualCommand(self, expected, actual, msg=None): def assertEqualReply(self, expected, actual, msg=None): self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + @staticmethod + async def configure_fail_point(client, command_args, off=False): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + if off: + cmd["mode"] = "off" + cmd.pop("data", None) + await client.admin.command(cmd) + @asynccontextmanager async def fail_point(self, command_args): - cmd_on = SON([("configureFailPoint", "failCommand")]) - cmd_on.update(command_args) - await async_client_context.client.admin.command(cmd_on) + await self.configure_fail_point(async_client_context.client, command_args) try: yield finally: - await async_client_context.client.admin.command( - "configureFailPoint", cmd_on["configureFailPoint"], mode="off" - ) + await self.configure_fail_point(async_client_context.client, command_args, off=True) @contextmanager def fork( diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py index a68b2a90cb..cdf4887ba3 100644 --- a/test/asynchronous/test_connection_monitoring.py +++ b/test/asynchronous/test_connection_monitoring.py @@ -211,15 +211,10 @@ def check_error(self, actual, expected): self.check_object(actual, expected) self.assertIn(message, str(actual)) - async def _set_fail_point(self, client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - await client.admin.command(cmd) - async def set_fail_point(self, command_args): if not async_client_context.supports_failCommand_fail_point: self.skipTest("failCommand fail point must be supported") - await self._set_fail_point(self.client, command_args) + await self.configure_fail_point(self.client, command_args) async def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index d11d0a9776..5f75746a4d 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -410,15 +410,10 @@ async def asyncSetUp(self) -> None: for address in async_client_context.mongoses: self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) - async def _set_fail_point(self, client, command_args): - cmd = {"configureFailPoint": "failCommand"} - cmd.update(command_args) - await client.admin.command(cmd) - async def set_fail_point(self, command_args): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - await self._set_fail_point(client, command_args) + await self.configure_fail_point(client, command_args) @async_client_context.require_transactions async def test_callback_raises_custom_error(self): diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 695f58ee27..c315e86945 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -1008,12 +1008,8 @@ async def __set_fail_point(self, client, command_args): if not async_client_context.test_commands_enabled: self.skipTest("Test commands must be enabled") - cmd_on = SON([("configureFailPoint", "failCommand")]) - cmd_on.update(command_args) - await client.admin.command(cmd_on) - self.addAsyncCleanup( - client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" - ) + await self.configure_fail_point(client, command_args) + self.addAsyncCleanup(self.configure_fail_point, client, command_args, off=True) async def _testOperation_failPoint(self, spec): await self.__set_fail_point( diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index 11d88850fc..7530ba36a7 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -264,15 +264,10 @@ async def asyncSetUp(self) -> None: async def asyncTearDown(self) -> None: self.knobs.disable() - async def _set_fail_point(self, client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - await client.admin.command(cmd) - async def set_fail_point(self, command_args): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - await self._set_fail_point(client, command_args) + await self.configure_fail_point(client, command_args) async def targeted_fail_point(self, session, fail_point): """Run the targetedFailPoint test operation. @@ -281,7 +276,7 @@ async def targeted_fail_point(self, session, fail_point): """ clients = {c.address: c for c in self.mongos_clients} client = clients[session._pinned_address] - await self._set_fail_point(client, fail_point) + await self.configure_fail_point(client, fail_point) self.addAsyncCleanup(self.set_fail_point, {"mode": "off"}) def assert_session_pinned(self, session): diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 810d440932..3987f2b68b 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -211,15 +211,10 @@ def check_error(self, actual, expected): self.check_object(actual, expected) self.assertIn(message, str(actual)) - def _set_fail_point(self, client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - client.admin.command(cmd) - def set_fail_point(self, command_args): if not client_context.supports_failCommand_fail_point: self.skipTest("failCommand fail point must be supported") - self._set_fail_point(self.client, command_args) + self.configure_fail_point(self.client, command_args) def run_scenario(self, scenario_def, test): """Run a CMAP spec test.""" diff --git a/test/test_transactions.py b/test/test_transactions.py index 949b88e60b..7a8dcd0f00 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -402,15 +402,10 @@ def setUp(self) -> None: for address in client_context.mongoses: self.mongos_clients.append(self.single_client("{}:{}".format(*address))) - def _set_fail_point(self, client, command_args): - cmd = {"configureFailPoint": "failCommand"} - cmd.update(command_args) - client.admin.command(cmd) - def set_fail_point(self, command_args): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - self._set_fail_point(client, command_args) + self.configure_fail_point(client, command_args) @client_context.require_transactions def test_callback_raises_custom_error(self): diff --git a/test/unified_format.py b/test/unified_format.py index 73dee10ddf..d5698f5a77 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -999,12 +999,8 @@ def __set_fail_point(self, client, command_args): if not client_context.test_commands_enabled: self.skipTest("Test commands must be enabled") - cmd_on = SON([("configureFailPoint", "failCommand")]) - cmd_on.update(command_args) - client.admin.command(cmd_on) - self.addCleanup( - client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" - ) + self.configure_fail_point(client, command_args) + self.addCleanup(self.configure_fail_point, client, command_args, off=True) def _testOperation_failPoint(self, spec): self.__set_fail_point( diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 98949431d0..ac4031e821 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -264,15 +264,10 @@ def setUp(self) -> None: def tearDown(self) -> None: self.knobs.disable() - def _set_fail_point(self, client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - client.admin.command(cmd) - def set_fail_point(self, command_args): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - self._set_fail_point(client, command_args) + self.configure_fail_point(client, command_args) def targeted_fail_point(self, session, fail_point): """Run the targetedFailPoint test operation. @@ -281,7 +276,7 @@ def targeted_fail_point(self, session, fail_point): """ clients = {c.address: c for c in self.mongos_clients} client = clients[session._pinned_address] - self._set_fail_point(client, fail_point) + self.configure_fail_point(client, fail_point) self.addCleanup(self.set_fail_point, {"mode": "off"}) def assert_session_pinned(self, session): From 324ed1730ff0c7146b8b49e08e27cd4ae74d8b07 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 26 Feb 2025 13:21:07 -0600 Subject: [PATCH 1760/2111] DRIVERS-3119 Add options to provide certificate and CA files (#2159) --- .../scripts/bootstrap-mongo-orchestration.sh | 37 +++++-------------- .evergreen/scripts/configure-env.sh | 4 +- .evergreen/scripts/prepare-resources.sh | 24 ------------ .evergreen/scripts/setup-system.sh | 29 ++++++++++++++- 4 files changed, 41 insertions(+), 53 deletions(-) delete mode 100755 .evergreen/scripts/prepare-resources.sh diff --git a/.evergreen/scripts/bootstrap-mongo-orchestration.sh b/.evergreen/scripts/bootstrap-mongo-orchestration.sh index 8f7d9d0aea..af38edd095 100755 --- a/.evergreen/scripts/bootstrap-mongo-orchestration.sh +++ b/.evergreen/scripts/bootstrap-mongo-orchestration.sh @@ -2,38 +2,21 @@ set -eu - -# Enable core dumps if enabled on the machine -# Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml -if [ -f /proc/self/coredump_filter ]; then - # Set the shell process (and its children processes) to dump ELF headers (bit 4), - # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). - echo 0x13 >/proc/self/coredump_filter - - if [ -f /sbin/sysctl ]; then - # Check that the core pattern is set explicitly on our distro image instead - # of being the OS's default value. This ensures that coredump names are consistent - # across distros and can be picked up by Evergreen. - core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") - if [ "$core_pattern" = "dump_%e.%p.core" ]; then - echo "Enabling coredumps" - ulimit -c unlimited - fi - fi -fi - -if [ "$(uname -s)" = "Darwin" ]; then - core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") - if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then - echo "Enabling coredumps" - ulimit -c unlimited - fi -fi +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" +ROOT=$(dirname "$(dirname $HERE)") if [ -z "${TEST_CRYPT_SHARED:-}" ]; then export SKIP_CRYPT_SHARED=1 fi +# Override the tls files if applicable. +if [ "${SSL:-}" == "ssl" ]; then + export TLS_CERT_KEY_FILE=${ROOT}/test/certificates/client.pem + export TLS_PEM_KEY_FILE=${ROOT}/test/certificates/server.pem + export TLS_CA_FILE=${ROOT}/test/certificates/ca.pem +fi + MONGODB_VERSION=${VERSION:-} \ TOPOLOGY=${TOPOLOGY:-} \ AUTH=${AUTH:-} \ diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 5515413562..f23af8a811 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -76,7 +76,9 @@ EOT # Write the .env file for drivers-tools. rm -rf $DRIVERS_TOOLS -git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS +BRANCH=master +ORG=mongodb-labs +git clone --branch $BRANCH https://github.com/$ORG/drivers-evergreen-tools.git $DRIVERS_TOOLS cat < ${DRIVERS_TOOLS}/.env SKIP_LEGACY_SHELL=1 diff --git a/.evergreen/scripts/prepare-resources.sh b/.evergreen/scripts/prepare-resources.sh deleted file mode 100755 index f5285a39de..0000000000 --- a/.evergreen/scripts/prepare-resources.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -set -eu - -HERE=$(dirname ${BASH_SOURCE:-$0}) -pushd $HERE -. env.sh - -popd - -# Copy PyMongo's test certificates over driver-evergreen-tools' -cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ - -# Replace MongoOrchestration's client certificate. -cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem - -if [ -w /etc/hosts ]; then - SUDO="" -else - SUDO="sudo" -fi - -# Add 'server' and 'hostname_not_in_cert' as a hostnames -echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts -echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh index d78d924f6b..0ab08ff01c 100755 --- a/.evergreen/scripts/setup-system.sh +++ b/.evergreen/scripts/setup-system.sh @@ -7,8 +7,35 @@ pushd "$(dirname "$(dirname $HERE)")" echo "Setting up system..." bash .evergreen/scripts/configure-env.sh source .evergreen/scripts/env.sh -bash .evergreen/scripts/prepare-resources.sh bash $DRIVERS_TOOLS/.evergreen/setup.sh bash .evergreen/scripts/install-dependencies.sh popd + +# Enable core dumps if enabled on the machine +# Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml +if [ -f /proc/self/coredump_filter ]; then + # Set the shell process (and its children processes) to dump ELF headers (bit 4), + # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). + echo 0x13 >/proc/self/coredump_filter + + if [ -f /sbin/sysctl ]; then + # Check that the core pattern is set explicitly on our distro image instead + # of being the OS's default value. This ensures that coredump names are consistent + # across distros and can be picked up by Evergreen. + core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") + if [ "$core_pattern" = "dump_%e.%p.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi + fi +fi + +if [ "$(uname -s)" = "Darwin" ]; then + core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") + if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi +fi + echo "Setting up system... done." From f5aeac3cccde06ab1166d44f63d5ebae116d554e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 26 Feb 2025 13:21:46 -0600 Subject: [PATCH 1761/2111] DRIVERS-3058 Include nsType field in ChangeStreamDocument (#2157) --- .../unified/change-streams-nsType.json | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 test/change_streams/unified/change-streams-nsType.json diff --git a/test/change_streams/unified/change-streams-nsType.json b/test/change_streams/unified/change-streams-nsType.json new file mode 100644 index 0000000000..1861c9a5e0 --- /dev/null +++ b/test/change_streams/unified/change-streams-nsType.json @@ -0,0 +1,145 @@ +{ + "description": "change-streams-nsType", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.1.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + } + ], + "tests": [ + { + "description": "nsType is present when creating collections", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "collection" + } + } + ] + }, + { + "description": "nsType is present when creating timeseries", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "timeseries" + } + } + ] + }, + { + "description": "nsType is present when creating views", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "view" + } + } + ] + } + ] +} From 61feccacfefaf342ad673bf320b68d839bbdf66c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 26 Feb 2025 13:23:02 -0600 Subject: [PATCH 1762/2111] DRIVERS-2915 Add ENVIRONMENT auth mechanism property to test URIs (#2160) --- test/connection_string/test/valid-options.json | 7 ++++--- test/connection_string/test/valid-warnings.json | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json index 6c86172d08..e094bcf606 100644 --- a/test/connection_string/test/valid-options.json +++ b/test/connection_string/test/valid-options.json @@ -40,7 +40,7 @@ }, { "description": "Colon in a key value pair", - "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster", + "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster,ENVIRONMENT:azure", "valid": true, "warning": false, "hosts": [ @@ -53,9 +53,10 @@ "auth": null, "options": { "authmechanismProperties": { - "TOKEN_RESOURCE": "mongodb://test-cluster" + "TOKEN_RESOURCE": "mongodb://test-cluster", + "ENVIRONMENT": "azure" } } } ] -} +} \ No newline at end of file diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json index daf814a75f..c46a8311c5 100644 --- a/test/connection_string/test/valid-warnings.json +++ b/test/connection_string/test/valid-warnings.json @@ -96,7 +96,7 @@ }, { "description": "Comma in a key value pair causes a warning", - "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2", + "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2,ENVIRONMENT:azure", "valid": true, "warning": true, "hosts": [ @@ -112,4 +112,4 @@ } } ] -} +} \ No newline at end of file From 85ca6f1d9fa71badeeee2b80db7ec89dc4bef0f4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 26 Feb 2025 13:18:04 -0800 Subject: [PATCH 1763/2111] PYTHON-4579 Stop gossiping $clusterTime on SDAM connections (#1925) --- pymongo/asynchronous/monitor.py | 16 +++------- pymongo/asynchronous/network.py | 4 +++ pymongo/asynchronous/pool.py | 13 +++++---- pymongo/asynchronous/topology.py | 1 - pymongo/synchronous/monitor.py | 16 +++------- pymongo/synchronous/network.py | 4 +++ pymongo/synchronous/pool.py | 13 +++++---- pymongo/synchronous/topology.py | 1 - test/asynchronous/test_session.py | 42 ++++++++++++++++++++++++--- test/test_discovery_and_monitoring.py | 20 ++++++------- test/test_session.py | 38 ++++++++++++++++++++++-- 11 files changed, 113 insertions(+), 55 deletions(-) diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index 15289af4dc..d7f87b718a 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -21,11 +21,11 @@ import logging import time import weakref -from typing import TYPE_CHECKING, Any, Mapping, Optional, cast +from typing import TYPE_CHECKING, Any, Optional from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum -from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled +from pymongo.errors import NetworkTimeout, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _async_create_lock from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage @@ -255,13 +255,7 @@ async def _check_server(self) -> ServerDescription: self._conn_id = None start = time.monotonic() try: - try: - return await self._check_once() - except (OperationFailure, NotPrimaryError) as exc: - # Update max cluster time even when hello fails. - details = cast(Mapping[str, Any], exc.details) - await self._topology.receive_cluster_time(details.get("$clusterTime")) - raise + return await self._check_once() except ReferenceError: raise except Exception as error: @@ -358,7 +352,6 @@ async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float] Can raise ConnectionFailure or OperationFailure. """ - cluster_time = self._topology.max_cluster_time() start = time.monotonic() if conn.more_to_come: # Read the next streaming hello (MongoDB 4.4+). @@ -368,13 +361,12 @@ async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float] ): # Initiate streaming hello (MongoDB 4.4+). response = await conn._hello( - cluster_time, self._server_description.topology_version, self._settings.heartbeat_frequency, ) else: # New connection handshake or polling hello (MongoDB <4.4). - response = await conn._hello(cluster_time, None, None) + response = await conn._hello(None, None) duration = _monotonic_duration(start) return response, duration diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index d17aead120..c7a5580eca 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -207,6 +207,10 @@ async def command( ) response_doc = unpacked_docs[0] + if not conn.ready: + cluster_time = response_doc.get("$clusterTime") + if cluster_time: + conn._cluster_time = cluster_time if client: await client._process_response(response_doc, session) if check: diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 1da695c5c8..698558aa5d 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -102,7 +102,7 @@ from pymongo.pyopenssl_context import _sslConn from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _ServerMode - from pymongo.typings import ClusterTime, _Address, _CollationIn + from pymongo.typings import _Address, _CollationIn from pymongo.write_concern import WriteConcern try: @@ -310,6 +310,8 @@ def __init__( self.connect_rtt = 0.0 self._client_id = pool._client_id self.creation_time = time.monotonic() + # For gossiping $clusterTime from the connection handshake to the client. + self._cluster_time = None def set_conn_timeout(self, timeout: Optional[float]) -> None: """Cache last timeout to avoid duplicate calls to conn.settimeout.""" @@ -374,11 +376,10 @@ def hello_cmd(self) -> dict[str, Any]: return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} async def hello(self) -> Hello: - return await self._hello(None, None, None) + return await self._hello(None, None) async def _hello( self, - cluster_time: Optional[ClusterTime], topology_version: Optional[Any], heartbeat_frequency: Optional[int], ) -> Hello[dict[str, Any]]: @@ -401,9 +402,6 @@ async def _hello( if self.opts.connect_timeout: self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) - if not performing_handshake and cluster_time is not None: - cmd["$clusterTime"] = cluster_time - creds = self.opts._credentials if creds: if creds.mechanism == "DEFAULT" and creds.username: @@ -1316,6 +1314,9 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A conn.close_conn(ConnectionClosedReason.ERROR) raise + if handler: + await handler.client._topology.receive_cluster_time(conn._cluster_time) + return conn @contextlib.asynccontextmanager diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 19fc76b0d3..bb003bbfde 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -501,7 +501,6 @@ async def _process_change( self._description = new_td await self._update_servers() - self._receive_cluster_time_no_lock(server_description.cluster_time) if self._publish_tp and not suppress_event: assert self._events is not None diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index 802ba4742f..c39a57c392 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -21,11 +21,11 @@ import logging import time import weakref -from typing import TYPE_CHECKING, Any, Mapping, Optional, cast +from typing import TYPE_CHECKING, Any, Optional from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum -from pymongo.errors import NetworkTimeout, NotPrimaryError, OperationFailure, _OperationCancelled +from pymongo.errors import NetworkTimeout, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _create_lock from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage @@ -253,13 +253,7 @@ def _check_server(self) -> ServerDescription: self._conn_id = None start = time.monotonic() try: - try: - return self._check_once() - except (OperationFailure, NotPrimaryError) as exc: - # Update max cluster time even when hello fails. - details = cast(Mapping[str, Any], exc.details) - self._topology.receive_cluster_time(details.get("$clusterTime")) - raise + return self._check_once() except ReferenceError: raise except Exception as error: @@ -356,7 +350,6 @@ def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: Can raise ConnectionFailure or OperationFailure. """ - cluster_time = self._topology.max_cluster_time() start = time.monotonic() if conn.more_to_come: # Read the next streaming hello (MongoDB 4.4+). @@ -366,13 +359,12 @@ def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: ): # Initiate streaming hello (MongoDB 4.4+). response = conn._hello( - cluster_time, self._server_description.topology_version, self._settings.heartbeat_frequency, ) else: # New connection handshake or polling hello (MongoDB <4.4). - response = conn._hello(cluster_time, None, None) + response = conn._hello(None, None) duration = _monotonic_duration(start) return response, duration diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py index 7206dca735..543b069bfc 100644 --- a/pymongo/synchronous/network.py +++ b/pymongo/synchronous/network.py @@ -207,6 +207,10 @@ def command( ) response_doc = unpacked_docs[0] + if not conn.ready: + cluster_time = response_doc.get("$clusterTime") + if cluster_time: + conn._cluster_time = cluster_time if client: client._process_response(response_doc, session) if check: diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 978f0ae391..e575710ff5 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -102,7 +102,7 @@ from pymongo.synchronous.auth import _AuthContext from pymongo.synchronous.client_session import ClientSession from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler - from pymongo.typings import ClusterTime, _Address, _CollationIn + from pymongo.typings import _Address, _CollationIn from pymongo.write_concern import WriteConcern try: @@ -310,6 +310,8 @@ def __init__( self.connect_rtt = 0.0 self._client_id = pool._client_id self.creation_time = time.monotonic() + # For gossiping $clusterTime from the connection handshake to the client. + self._cluster_time = None def set_conn_timeout(self, timeout: Optional[float]) -> None: """Cache last timeout to avoid duplicate calls to conn.settimeout.""" @@ -374,11 +376,10 @@ def hello_cmd(self) -> dict[str, Any]: return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} def hello(self) -> Hello: - return self._hello(None, None, None) + return self._hello(None, None) def _hello( self, - cluster_time: Optional[ClusterTime], topology_version: Optional[Any], heartbeat_frequency: Optional[int], ) -> Hello[dict[str, Any]]: @@ -401,9 +402,6 @@ def _hello( if self.opts.connect_timeout: self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) - if not performing_handshake and cluster_time is not None: - cmd["$clusterTime"] = cluster_time - creds = self.opts._credentials if creds: if creds.mechanism == "DEFAULT" and creds.username: @@ -1310,6 +1308,9 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect conn.close_conn(ConnectionClosedReason.ERROR) raise + if handler: + handler.client._topology.receive_cluster_time(conn._cluster_time) + return conn @contextlib.contextmanager diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 6a8503c6c0..2bc8934540 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -501,7 +501,6 @@ def _process_change( self._description = new_td self._update_servers() - self._receive_cluster_time_no_lock(server_description.cluster_time) if self._publish_tp and not suppress_event: assert self._events is not None diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 03d1032b5b..568d392cd5 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -36,8 +36,10 @@ async_client_context, unittest, ) +from test.asynchronous.helpers import client_knobs from test.utils import ( EventListener, + HeartbeatEventListener, OvertCommandListener, async_wait_until, ) @@ -1135,12 +1137,10 @@ async def asyncSetUp(self): if "$clusterTime" not in (await async_client_context.hello): raise SkipTest("$clusterTime not supported") + # Sessions prose test: 3) $clusterTime in commands async def test_cluster_time(self): listener = SessionTestListener() - # Prevent heartbeats from updating $clusterTime between operations. - client = await self.async_rs_or_single_client( - event_listeners=[listener], heartbeatFrequencyMS=999999 - ) + client = await self.async_rs_or_single_client(event_listeners=[listener]) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). await collection.insert_many([{} for _ in range(10)]) @@ -1219,6 +1219,40 @@ async def aggregate(): f"{f.__name__} sent wrong $clusterTime with {event.command_name}", ) + # Sessions prose test: 20) Drivers do not gossip `$clusterTime` on SDAM commands + async def test_cluster_time_not_used_by_sdam(self): + heartbeat_listener = HeartbeatEventListener() + cmd_listener = OvertCommandListener() + with client_knobs(min_heartbeat_interval=0.01): + c1 = await self.async_single_client( + event_listeners=[heartbeat_listener, cmd_listener], heartbeatFrequencyMS=10 + ) + cluster_time = (await c1.admin.command({"ping": 1}))["$clusterTime"] + self.assertEqual(c1._topology.max_cluster_time(), cluster_time) + + # Advance the server's $clusterTime by performing an insert via another client. + await self.db.test.insert_one({"advance": "$clusterTime"}) + # Wait until the client C1 processes the next pair of SDAM heartbeat started + succeeded events. + heartbeat_listener.reset() + + async def next_heartbeat(): + events = heartbeat_listener.events + for i in range(len(events) - 1): + if isinstance(events[i], monitoring.ServerHeartbeatStartedEvent): + if isinstance(events[i + 1], monitoring.ServerHeartbeatSucceededEvent): + return True + return False + + await async_wait_until( + next_heartbeat, "never found pair of heartbeat started + succeeded events" + ) + # Assert that C1's max $clusterTime is still the same and has not been updated by SDAM. + cmd_listener.reset() + await c1.admin.command({"ping": 1}) + started = cmd_listener.started_events[0] + self.assertEqual(started.command_name, "ping") + self.assertEqual(started.command["$clusterTime"], cluster_time) + if __name__ == "__main__": unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index ce7a52f1a0..70dcfc5b48 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -244,7 +244,7 @@ class TestClusterTimeComparison(unittest.TestCase): def test_cluster_time_comparison(self): t = create_mock_topology("mongodb://host") - def send_cluster_time(time, inc, should_update): + def send_cluster_time(time, inc): old = t.max_cluster_time() new = {"clusterTime": Timestamp(time, inc)} got_hello( @@ -259,16 +259,14 @@ def send_cluster_time(time, inc, should_update): ) actual = t.max_cluster_time() - if should_update: - self.assertEqual(actual, new) - else: - self.assertEqual(actual, old) - - send_cluster_time(0, 1, True) - send_cluster_time(2, 2, True) - send_cluster_time(2, 1, False) - send_cluster_time(1, 3, False) - send_cluster_time(2, 3, True) + # We never update $clusterTime from monitoring connections. + self.assertEqual(actual, old) + + send_cluster_time(0, 1) + send_cluster_time(2, 2) + send_cluster_time(2, 1) + send_cluster_time(1, 3) + send_cluster_time(2, 3) class TestIgnoreStaleErrors(IntegrationTest): diff --git a/test/test_session.py b/test/test_session.py index 175a282495..e80ab41896 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -36,8 +36,10 @@ client_context, unittest, ) +from test.helpers import client_knobs from test.utils import ( EventListener, + HeartbeatEventListener, OvertCommandListener, wait_until, ) @@ -1121,10 +1123,10 @@ def setUp(self): if "$clusterTime" not in (client_context.hello): raise SkipTest("$clusterTime not supported") + # Sessions prose test: 3) $clusterTime in commands def test_cluster_time(self): listener = SessionTestListener() - # Prevent heartbeats from updating $clusterTime between operations. - client = self.rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) @@ -1203,6 +1205,38 @@ def aggregate(): f"{f.__name__} sent wrong $clusterTime with {event.command_name}", ) + # Sessions prose test: 20) Drivers do not gossip `$clusterTime` on SDAM commands + def test_cluster_time_not_used_by_sdam(self): + heartbeat_listener = HeartbeatEventListener() + cmd_listener = OvertCommandListener() + with client_knobs(min_heartbeat_interval=0.01): + c1 = self.single_client( + event_listeners=[heartbeat_listener, cmd_listener], heartbeatFrequencyMS=10 + ) + cluster_time = (c1.admin.command({"ping": 1}))["$clusterTime"] + self.assertEqual(c1._topology.max_cluster_time(), cluster_time) + + # Advance the server's $clusterTime by performing an insert via another client. + self.db.test.insert_one({"advance": "$clusterTime"}) + # Wait until the client C1 processes the next pair of SDAM heartbeat started + succeeded events. + heartbeat_listener.reset() + + def next_heartbeat(): + events = heartbeat_listener.events + for i in range(len(events) - 1): + if isinstance(events[i], monitoring.ServerHeartbeatStartedEvent): + if isinstance(events[i + 1], monitoring.ServerHeartbeatSucceededEvent): + return True + return False + + wait_until(next_heartbeat, "never found pair of heartbeat started + succeeded events") + # Assert that C1's max $clusterTime is still the same and has not been updated by SDAM. + cmd_listener.reset() + c1.admin.command({"ping": 1}) + started = cmd_listener.started_events[0] + self.assertEqual(started.command_name, "ping") + self.assertEqual(started.command["$clusterTime"], cluster_time) + if __name__ == "__main__": unittest.main() From c9a85ad321f98caf314fc9da7a367e94d661abac Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 27 Feb 2025 08:05:23 -0800 Subject: [PATCH 1764/2111] PYTHON-5090 Convert test.test_monitor to async (#2106) --- test/asynchronous/test_monitor.py | 121 ++++++++++++++++++++++++++++++ test/test_monitor.py | 29 ++++++- tools/synchro.py | 1 + 3 files changed, 147 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_monitor.py diff --git a/test/asynchronous/test_monitor.py b/test/asynchronous/test_monitor.py new file mode 100644 index 0000000000..2705fbda3b --- /dev/null +++ b/test/asynchronous/test_monitor.py @@ -0,0 +1,121 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitor module.""" +from __future__ import annotations + +import asyncio +import gc +import subprocess +import sys +import warnings +from functools import partial + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest +from test.utils import ( + ServerAndTopologyEventListener, + async_wait_until, +) + +from pymongo.periodic_executor import _EXECUTORS + +_IS_SYNC = False + + +def unregistered(ref): + gc.collect() + return ref not in _EXECUTORS + + +def get_executors(client): + executors = [] + for server in client._topology._servers.values(): + executors.append(server._monitor._executor) + executors.append(server._monitor._rtt_monitor._executor) + executors.append(client._kill_cursors_executor) + executors.append(client._topology._Topology__events_executor) + return [e for e in executors if e is not None] + + +class TestMonitor(AsyncIntegrationTest): + async def create_client(self): + listener = ServerAndTopologyEventListener() + client = await self.unmanaged_async_single_client(event_listeners=[listener]) + await connected(client) + return client + + async def test_cleanup_executors_on_client_del(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + client = await self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + # Each executor stores a weakref to itself in _EXECUTORS. + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + + del executors + del client + + for ref, name in executor_refs: + await async_wait_until( + partial(unregistered, ref), f"unregister executor: {name}", timeout=5 + ) + + def resource_warning_caught(): + gc.collect() + for warning in w: + if ( + issubclass(warning.category, ResourceWarning) + and "Call AsyncMongoClient.close() to safely shut down your client and free up resources." + in str(warning.message) + ): + return True + return False + + await async_wait_until(resource_warning_caught, "catch resource warning") + + async def test_cleanup_executors_on_client_close(self): + client = await self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + await client.close() + + for executor in executors: + await async_wait_until( + lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5 + ) + + @async_client_context.require_sync + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the AsyncMongoClient spawns a new thread + on process shutdown.""" + command = [ + sys.executable, + "-c", + "from pymongo import AsyncMongoClient; c = AsyncMongoClient()", + ] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_monitor.py b/test/test_monitor.py index a704f3d8cb..0fb7eb9cae 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -15,6 +15,7 @@ """Test the monitor module.""" from __future__ import annotations +import asyncio import gc import subprocess import sys @@ -23,7 +24,7 @@ sys.path[0:0] = [""] -from test import IntegrationTest, connected, unittest +from test import IntegrationTest, client_context, connected, unittest from test.utils import ( ServerAndTopologyEventListener, wait_until, @@ -31,6 +32,8 @@ from pymongo.periodic_executor import _EXECUTORS +_IS_SYNC = True + def unregistered(ref): gc.collect() @@ -55,8 +58,8 @@ def create_client(self): return client def test_cleanup_executors_on_client_del(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") client = self.create_client() executors = get_executors(client) self.assertEqual(len(executors), 4) @@ -70,6 +73,19 @@ def test_cleanup_executors_on_client_del(self): for ref, name in executor_refs: wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) + def resource_warning_caught(): + gc.collect() + for warning in w: + if ( + issubclass(warning.category, ResourceWarning) + and "Call MongoClient.close() to safely shut down your client and free up resources." + in str(warning.message) + ): + return True + return False + + wait_until(resource_warning_caught, "catch resource warning") + def test_cleanup_executors_on_client_close(self): client = self.create_client() executors = get_executors(client) @@ -80,10 +96,15 @@ def test_cleanup_executors_on_client_close(self): for executor in executors: wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) + @client_context.require_sync def test_no_thread_start_runtime_err_on_shutdown(self): """Test we silence noisy runtime errors fired when the MongoClient spawns a new thread on process shutdown.""" - command = [sys.executable, "-c", "from pymongo import MongoClient; c = MongoClient()"] + command = [ + sys.executable, + "-c", + "from pymongo import MongoClient; c = MongoClient()", + ] completed_process: subprocess.CompletedProcess = subprocess.run( command, capture_output=True ) diff --git a/tools/synchro.py b/tools/synchro.py index 39c53b435f..877a683531 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -226,6 +226,7 @@ def async_only_test(f: str) -> bool: "test_load_balancer.py", "test_logger.py", "test_max_staleness.py", + "test_monitor.py", "test_monitoring.py", "test_mongos_load_balancing.py", "test_on_demand_csfle.py", From e52965eea4f578d0359c43e8ef4b49711d2133f1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 27 Feb 2025 15:07:50 -0800 Subject: [PATCH 1765/2111] Remove redundant branch in GridFS (#2064) --- gridfs/asynchronous/grid_file.py | 17 +++++++---------- gridfs/synchronous/grid_file.py | 17 +++++++---------- 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index baa88d4808..3f3179c45c 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -1301,11 +1301,8 @@ async def write(self, data: Any) -> None: raise ValueError("cannot write to a closed file") try: - if isinstance(data, AsyncGridOut): - read = data.read - else: - # file-like - read = data.read + # file-like + read = data.read except AttributeError: # string if not isinstance(data, (str, bytes)): @@ -1317,7 +1314,7 @@ async def write(self, data: Any) -> None: raise TypeError( "must specify an encoding for file in order to write str" ) from None - read = io.BytesIO(data).read # type: ignore[assignment] + read = io.BytesIO(data).read if inspect.iscoroutinefunction(read): await self._write_async(read) @@ -1331,15 +1328,15 @@ async def write(self, data: Any) -> None: except BaseException: await self.abort() raise - self._buffer.write(to_write) # type: ignore - if len(to_write) < space: # type: ignore + self._buffer.write(to_write) + if len(to_write) < space: return # EOF or incomplete await self._flush_buffer() to_write = read(self.chunk_size) - while to_write and len(to_write) == self.chunk_size: # type: ignore + while to_write and len(to_write) == self.chunk_size: await self._flush_data(to_write) to_write = read(self.chunk_size) - self._buffer.write(to_write) # type: ignore + self._buffer.write(to_write) async def _write_async(self, read: Any) -> None: if self._buffer.tell() > 0: diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index ea0b53cfb7..35386857d6 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -1291,11 +1291,8 @@ def write(self, data: Any) -> None: raise ValueError("cannot write to a closed file") try: - if isinstance(data, GridOut): - read = data.read - else: - # file-like - read = data.read + # file-like + read = data.read except AttributeError: # string if not isinstance(data, (str, bytes)): @@ -1307,7 +1304,7 @@ def write(self, data: Any) -> None: raise TypeError( "must specify an encoding for file in order to write str" ) from None - read = io.BytesIO(data).read # type: ignore[assignment] + read = io.BytesIO(data).read if inspect.iscoroutinefunction(read): self._write_async(read) @@ -1321,15 +1318,15 @@ def write(self, data: Any) -> None: except BaseException: self.abort() raise - self._buffer.write(to_write) # type: ignore - if len(to_write) < space: # type: ignore + self._buffer.write(to_write) + if len(to_write) < space: return # EOF or incomplete self._flush_buffer() to_write = read(self.chunk_size) - while to_write and len(to_write) == self.chunk_size: # type: ignore + while to_write and len(to_write) == self.chunk_size: self._flush_data(to_write) to_write = read(self.chunk_size) - self._buffer.write(to_write) # type: ignore + self._buffer.write(to_write) def _write_async(self, read: Any) -> None: if self._buffer.tell() > 0: From 080c1c61212594e4de4792cf055b904a570c4359 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Feb 2025 10:48:36 -0800 Subject: [PATCH 1766/2111] PYTHON-5166 Allow Database.command to run bulkWrite commands (#2164) --- doc/changelog.rst | 21 ++++++++++++++++++++- pymongo/message.py | 2 +- test/asynchronous/test_database.py | 15 +++++++++++++++ test/test_database.py | 15 +++++++++++++++ 4 files changed, 51 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index ee66bb178f..fcad842def 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,7 +1,26 @@ Changelog ========= -Changes in Version 4.11.0 (YYYY/MM/DD) +Changes in Version 4.11.2 (YYYY/MM/DD) +-------------------------------------- + +Version 4.11.2 is a bug fix release. + +- Fixed a bug where :meth:`~pymongo.database.Database.command` would fail when attempting to run the bulkWrite command. + +Issues Resolved +............... + +See the `PyMongo 4.11.2 release notes in JIRA`_ for the list of resolved issues in this release. + +.. _PyMongo 4.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=42506 + +Changes in Version 4.11.1 (2025/02/10) +-------------------------------------- + +- Fixed support for prebuilt ``ppc64le`` and ``s390x`` wheels. + +Changes in Version 4.11.0 (2025/01/28) -------------------------------------- .. warning:: PyMongo 4.11 drops support for Python 3.8 and PyPy 3.9: Python 3.9+ or PyPy 3.10+ is now required. diff --git a/pymongo/message.py b/pymongo/message.py index 10c9edb5cd..8e2fd6f990 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -105,7 +105,7 @@ "insert": "documents", "update": "updates", "delete": "deletes", - "bulkWrite": "bulkWrite", + "bulkWrite": "ops", } _UNICODE_REPLACE_CODEC_OPTIONS: CodecOptions[Mapping[str, Any]] = CodecOptions( diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 55a8cc3ab2..2bbf763ab3 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -430,6 +430,21 @@ async def test_command_with_regex(self): for doc in result["cursor"]["firstBatch"]: self.assertTrue(isinstance(doc["r"], Regex)) + async def test_command_bulkWrite(self): + # Ensure bulk write commands can be run directly via db.command(). + if async_client_context.version.at_least(8, 0): + await self.client.admin.command( + { + "bulkWrite": 1, + "nsInfo": [{"ns": self.db.test.full_name}], + "ops": [{"insert": 0, "document": {}}], + } + ) + await self.db.command({"insert": "test", "documents": [{}]}) + await self.db.command({"update": "test", "updates": [{"q": {}, "u": {"$set": {"x": 1}}}]}) + await self.db.command({"delete": "test", "deletes": [{"q": {}, "limit": 1}]}) + await self.db.test.drop() + async def test_cursor_command(self): db = self.client.pymongo_test await db.test.drop() diff --git a/test/test_database.py b/test/test_database.py index aad9089bd8..48cca921b1 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -425,6 +425,21 @@ def test_command_with_regex(self): for doc in result["cursor"]["firstBatch"]: self.assertTrue(isinstance(doc["r"], Regex)) + def test_command_bulkWrite(self): + # Ensure bulk write commands can be run directly via db.command(). + if client_context.version.at_least(8, 0): + self.client.admin.command( + { + "bulkWrite": 1, + "nsInfo": [{"ns": self.db.test.full_name}], + "ops": [{"insert": 0, "document": {}}], + } + ) + self.db.command({"insert": "test", "documents": [{}]}) + self.db.command({"update": "test", "updates": [{"q": {}, "u": {"$set": {"x": 1}}}]}) + self.db.command({"delete": "test", "deletes": [{"q": {}, "limit": 1}]}) + self.db.test.drop() + def test_cursor_command(self): db = self.client.pymongo_test db.test.drop() From e28f49c51098592268ace56c49b8110691c178b7 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 28 Feb 2025 15:24:28 -0800 Subject: [PATCH 1767/2111] PYTHON-5164 Fix mockupdb TestClusterTime (#2163) --- test/mockupdb/test_cluster_time.py | 49 +++--------------------------- 1 file changed, 5 insertions(+), 44 deletions(-) diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index ea879b7ea3..42ca916971 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -123,50 +123,11 @@ def test_monitor(self): client = self.simple_client(server.uri, heartbeatFrequencyMS=500) - request = server.receives("ismaster") - # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn("$clusterTime", request) - request.ok(reply) - - # Next exchange: client returns first clusterTime, we send the second. - request = server.receives("ismaster") - self.assertIn("$clusterTime", request) - self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) - cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) - reply["$clusterTime"] = {"clusterTime": cluster_time} - request.reply(reply) - - # Third exchange: client returns second clusterTime. - request = server.receives("ismaster") - self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) - - # Return command error with a new clusterTime. - cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) - error = { - "ok": 0, - "code": 211, - "errmsg": "Cache Reader No keys found for HMAC ...", - "$clusterTime": {"clusterTime": cluster_time}, - } - request.reply(error) - - # PyMongo 3.11+ closes the monitoring connection on command errors. - - # Fourth exchange: the Monitor closes the connection and runs the - # handshake on a new connection. - request = server.receives("ismaster") - # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn("$clusterTime", request) - - # Reply without $clusterTime. - reply.pop("$clusterTime") - request.reply(reply) - - # Fifth exchange: the Monitor attempt uses the clusterTime from - # the previous isMaster error. - request = server.receives("ismaster") - self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) - request.reply(reply) + for _ in range(3): + request = server.receives("ismaster") + # No $clusterTime in heartbeats or handshakes. + self.assertNotIn("$clusterTime", request) + request.ok(reply) client.close() def test_collection_bulk_error(self): From 38f97a3b49dc0a00cb117b7b60a45c5e24c9d195 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 3 Mar 2025 11:20:44 -0500 Subject: [PATCH 1768/2111] PYTHON-5177 - Add instructions for enabling debug logs to CONTRIBUTING.md (#2170) --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4b1d139684..d22874faf8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -224,6 +224,10 @@ the pages will re-render and the browser will automatically refresh. - Run the tests with `just run-tests`. - When done, run `just teardown-tests` to clean up. +## Enable Debug Logs +- Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. +- Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine. + ## Re-sync Spec Tests If you would like to re-sync the copy of the specification tests in the From 6da1fdbed9c1dd06ad87e642ed9a696b2847d61b Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Mon, 3 Mar 2025 12:51:32 -0500 Subject: [PATCH 1769/2111] PYTHON-5126 Resync bson vector spec tests following additions (#2161) --- bson/binary.py | 4 +++ test/bson_binary_vector/float32.json | 27 +++++++++++++++++-- test/bson_binary_vector/int8.json | 4 +-- test/bson_binary_vector/packed_bit.json | 35 ++++++++++++++++++++++++- test/test_bson_binary_vector.py | 18 ++++++++++--- 5 files changed, 79 insertions(+), 9 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index aab59cccbc..ee481fa1a5 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -450,6 +450,10 @@ def from_vector( raise ValueError(f"padding does not apply to {dtype=}") elif dtype == BinaryVectorDtype.PACKED_BIT: # pack ints in [0, 255] as unsigned uint8 format_str = "B" + if 0 <= padding > 7: + raise ValueError(f"{padding=}. It must be in [0,1, ..7].") + if padding and not vector: + raise ValueError("Empty vector with non-zero padding.") elif dtype == BinaryVectorDtype.FLOAT32: # pack floats as float32 format_str = "f" if padding: diff --git a/test/bson_binary_vector/float32.json b/test/bson_binary_vector/float32.json index bbbe00b758..845f504ff3 100644 --- a/test/bson_binary_vector/float32.json +++ b/test/bson_binary_vector/float32.json @@ -11,6 +11,15 @@ "padding": 0, "canonical_bson": "1C00000005766563746F72000A0000000927000000FE420000E04000" }, + { + "description": "Vector with decimals and negative value FLOAT32", + "valid": true, + "vector": [127.7, -7.7], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927006666FF426666F6C000" + }, { "description": "Empty Vector FLOAT32", "valid": true, @@ -35,8 +44,22 @@ "vector": [127.0, 7.0], "dtype_hex": "0x27", "dtype_alias": "FLOAT32", - "padding": 3 + "padding": 3, + "canonical_bson": "1C00000005766563746F72000A0000000927030000FE420000E04000" + }, + { + "description": "Insufficient vector data with 3 bytes FLOAT32", + "valid": false, + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "canonical_bson": "1700000005766563746F7200050000000927002A2A2A00" + }, + { + "description": "Insufficient vector data with 5 bytes FLOAT32", + "valid": false, + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "canonical_bson": "1900000005766563746F7200070000000927002A2A2A2A2A00" } ] } - diff --git a/test/bson_binary_vector/int8.json b/test/bson_binary_vector/int8.json index 7529721e5e..29524fb617 100644 --- a/test/bson_binary_vector/int8.json +++ b/test/bson_binary_vector/int8.json @@ -42,7 +42,8 @@ "vector": [127, 7], "dtype_hex": "0x03", "dtype_alias": "INT8", - "padding": 3 + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000903037F0700" }, { "description": "INT8 with float inputs", @@ -54,4 +55,3 @@ } ] } - diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json index a41cd593f5..a220e7e318 100644 --- a/test/bson_binary_vector/packed_bit.json +++ b/test/bson_binary_vector/packed_bit.json @@ -2,6 +2,15 @@ "description": "Tests of Binary subtype 9, Vectors, with dtype PACKED_BIT", "test_key": "vector", "tests": [ + { + "description": "Padding specified with no vector data PACKED_BIT", + "valid": false, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 1, + "canonical_bson": "1400000005766563746F72000200000009100100" + }, { "description": "Simple Vector PACKED_BIT", "valid": true, @@ -44,7 +53,31 @@ "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", "padding": 0 + }, + { + "description": "Vector with float values PACKED_BIT", + "valid": false, + "vector": [127.5], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Exceeding maximum padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 8, + "canonical_bson": "1500000005766563746F7200030000000910080100" + }, + { + "description": "Negative padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": -1 } ] } - diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py index 00c82bbb65..a49f515fea 100644 --- a/test/test_bson_binary_vector.py +++ b/test/test_bson_binary_vector.py @@ -49,7 +49,7 @@ def create_test(case_spec): def run_test(self): for test_case in case_spec.get("tests", []): description = test_case["description"] - vector_exp = test_case["vector"] + vector_exp = test_case.get("vector", []) dtype_hex_exp = test_case["dtype_hex"] dtype_alias_exp = test_case.get("dtype_alias") padding_exp = test_case.get("padding", 0) @@ -76,9 +76,13 @@ def run_test(self): self.assertEqual( vector_obs.dtype, BinaryVectorDtype[dtype_alias_exp], description ) - self.assertEqual(vector_obs.data, vector_exp, description) - self.assertEqual(vector_obs.padding, padding_exp, description) - + if dtype_exp in [BinaryVectorDtype.FLOAT32]: + [ + self.assertAlmostEqual(vector_obs.data[i], vector_exp[i], delta=1e-5) + for i in range(len(vector_exp)) + ] + else: + self.assertEqual(vector_obs.data, vector_exp, description) # Test Binary Vector to BSON vector_exp = Binary.from_vector(vector_exp, dtype_exp, padding_exp) cB_obs = binascii.hexlify(encode({test_key: vector_exp})).decode().upper() @@ -86,7 +90,13 @@ def run_test(self): else: with self.assertRaises((struct.error, ValueError), msg=description): + # Tests Binary.from_vector Binary.from_vector(vector_exp, dtype_exp, padding_exp) + # Tests Binary.as_vector + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + binary_obs.as_vector() return run_test From 150a3ba756ecea63499b986e9abebd60f8cc6c9b Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 3 Mar 2025 10:14:04 -0800 Subject: [PATCH 1770/2111] PYTHON-5078 Convert test.test_discovery_and_monitoring to async (#2093) Co-authored-by: Noah Stapp --- .../test_discovery_and_monitoring.py | 503 ++++++++++++++++++ test/asynchronous/unified_format.py | 16 +- test/test_discovery_and_monitoring.py | 125 +++-- test/unified_format.py | 8 + test/utils.py | 16 + tools/synchro.py | 3 + 6 files changed, 631 insertions(+), 40 deletions(-) create mode 100644 test/asynchronous/test_discovery_and_monitoring.py diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py new file mode 100644 index 0000000000..7c4095ebb8 --- /dev/null +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -0,0 +1,503 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import asyncio +import os +import socketserver +import sys +import threading +from asyncio import StreamReader, StreamWriter +from pathlib import Path +from test.asynchronous.helpers import ConcurrentRunner + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, AsyncUnitTest, unittest +from test.asynchronous.pymongo_mocks import DummyMonitor +from test.asynchronous.unified_format import generate_test_classes +from test.utils import ( + CMAPListener, + HeartbeatEventListener, + HeartbeatEventsListListener, + assertion_context, + async_barrier_wait, + async_client_context, + async_create_barrier, + async_get_pool, + async_wait_until, + server_name_to_type, +) +from unittest.mock import patch + +from bson import Timestamp, json_util +from pymongo import AsyncMongoClient, common, monitoring +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology, _ErrorContext +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _check_command_response, _check_write_command_response +from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent +from pymongo.server_description import SERVER_TYPE, ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE +from pymongo.uri_parser import parse_uri + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + SDAM_PATH = os.path.join(Path(__file__).resolve().parent, "discovery_and_monitoring") +else: + SDAM_PATH = os.path.join( + Path(__file__).resolve().parent.parent, + "discovery_and_monitoring", + ) + + +async def create_mock_topology(uri, monitor_class=DummyMonitor): + parsed_uri = parse_uri(uri) + replica_set_name = None + direct_connection = None + load_balanced = None + if "replicaset" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaset"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] + + topology_settings = TopologySettings( + parsed_uri["nodelist"], + replica_set_name=replica_set_name, + monitor_class=monitor_class, + direct_connection=direct_connection, + load_balanced=load_balanced, + ) + + c = Topology(topology_settings) + await c.open() + return c + + +async def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) + await topology.on_change(server_description) + + +async def got_app_error(topology, app_error): + server_address = common.partition_node(app_error["address"]) + server = topology.get_server_by_address(server_address) + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] + # XXX: We could get better test coverage by mocking the errors on the + # Pool/AsyncConnection. + try: + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") + else: + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError + except (AutoReconnect, NotPrimaryError, OperationFailure) as e: + if when == "beforeHandshakeCompletes": + completed_handshake = False + elif when == "afterHandshakeCompletes": + completed_handshake = True + else: + raise AssertionError(f"Unknown when field {when}") + + await topology.handle_error( + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) + + +def get_type(topology, hostname): + description = topology.get_server_by_address((hostname, 27017)).description + return description.server_type + + +class TestAllScenarios(AsyncUnitTest): + pass + + +def topology_type_name(topology_type): + return TOPOLOGY_TYPE._fields[topology_type] + + +def server_type_name(server_type): + return SERVER_TYPE._fields[server_type] + + +def check_outcome(self, topology, outcome): + expected_servers = outcome["servers"] + + # Check weak equality before proceeding. + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) + + if outcome.get("compatible") is False: + with self.assertRaises(ConfigurationError): + topology.description.check_compatible() + else: + # No error. + topology.description.check_compatible() + + # Since lengths are equal, every actual server must have a corresponding + # expected server. + for expected_server_address, expected_server in expected_servers.items(): + node = common.partition_node(expected_server_address) + self.assertTrue(topology.has_server(node)) + actual_server = topology.get_server_by_address(node) + actual_server_description = actual_server.description + expected_server_type = server_name_to_type(expected_server["type"]) + + self.assertEqual( + server_type_name(expected_server_type), + server_type_name(actual_server_description.server_type), + ) + + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) + + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) + + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) + + self.assertEqual( + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) + + expected_pool = expected_server.get("pool") + if expected_pool: + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) + + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) + + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) + + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) + + +def create_test(scenario_def): + async def run_scenario(self): + c = await create_mock_topology(scenario_def["uri"]) + + for i, phase in enumerate(scenario_def["phases"]): + # Including the phase description makes failures easier to debug. + description = phase.get("description", str(i)) + with assertion_context(f"phase: {description}"): + for response in phase.get("responses", []): + await got_hello(c, common.partition_node(response[0]), response[1]) + + for app_error in phase.get("applicationErrors", []): + await got_app_error(c, app_error) + + check_outcome(self, c, phase["outcome"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(SDAM_PATH): + dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestClusterTimeComparison(AsyncPyMongoTestCase): + async def test_cluster_time_comparison(self): + t = await create_mock_topology("mongodb://host") + + async def send_cluster_time(time, inc): + old = t.max_cluster_time() + new = {"clusterTime": Timestamp(time, inc)} + await got_hello( + t, + ("host", 27017), + { + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "$clusterTime": new, + }, + ) + + actual = t.max_cluster_time() + # We never update $clusterTime from monitoring connections. + self.assertEqual(actual, old) + + await send_cluster_time(0, 1) + await send_cluster_time(2, 2) + await send_cluster_time(2, 1) + await send_cluster_time(1, 3) + await send_cluster_time(2, 3) + + +class TestIgnoreStaleErrors(AsyncIntegrationTest): + async def test_ignore_stale_connection_errors(self): + if not _IS_SYNC and sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") + N_TASKS = 5 + barrier = async_create_barrier(N_TASKS, timeout=30) + client = await self.async_rs_or_single_client(minPoolSize=N_TASKS) + + # Wait for initial discovery. + await client.admin.command("ping") + pool = await async_get_pool(client) + starting_generation = pool.gen.get_overall() + await async_wait_until(lambda: len(pool.conns) == N_TASKS, "created conns") + + async def mock_command(*args, **kwargs): + # Synchronize all tasks to ensure they use the same generation. + await async_barrier_wait(barrier, timeout=30) + raise AutoReconnect("mock AsyncConnection.command error") + + for conn in pool.conns: + conn.command = mock_command + + async def insert_command(i): + try: + await client.test.command("insert", "test", documents=[{"i": i}]) + except AutoReconnect: + pass + + tasks = [] + for i in range(N_TASKS): + tasks.append(ConcurrentRunner(target=insert_command, args=(i,))) + for t in tasks: + await t.start() + for t in tasks: + await t.join() + + # Expect a single pool reset for the network error + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) + + # Server should be selectable. + await client.admin.command("ping") + + +class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): + pass + + +class TestPoolManagement(AsyncIntegrationTest): + @async_client_context.require_failCommand_appName + async def test_pool_unpause(self): + # This test implements the prose test "AsyncConnection Pool Management" + listener = CMAPHeartbeatListener() + _ = await self.async_single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) + # Assert that AsyncConnectionPoolReadyEvent occurs after the first + # ServerHeartbeatSucceededEvent. + await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) + + listener.reset() + fail_hello = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", + }, + } + async with self.fail_point(fail_hello): + await listener.async_wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + await listener.async_wait_for_event(monitoring.PoolClearedEvent, 1) + await listener.async_wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) + await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + + +class TestServerMonitoringMode(AsyncIntegrationTest): + @async_client_context.require_no_serverless + @async_client_context.require_no_load_balancer + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_rtt_connection_is_enabled_stream(self): + client = await self.async_rs_or_single_client(serverMonitoringMode="stream") + await client.admin.command("ping") + + def predicate(): + for _, server in client._topology._servers.items(): + monitor = server._monitor + if not monitor._stream: + return False + if async_client_context.version >= (4, 4): + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._task is None: + return False + else: + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is not None: + return False + else: + if monitor._rtt_monitor._executor._task is not None: + return False + return True + + await async_wait_until(predicate, "find all RTT monitors") + + async def test_rtt_connection_is_disabled_poll(self): + client = await self.async_rs_or_single_client(serverMonitoringMode="poll") + + await self.assert_rtt_connection_is_disabled(client) + + async def test_rtt_connection_is_disabled_auto(self): + envs = [ + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9"}, + {"FUNCTIONS_WORKER_RUNTIME": "python"}, + {"K_SERVICE": "gcpservicename"}, + {"FUNCTION_NAME": "gcpfunctionname"}, + {"VERCEL": "1"}, + ] + for env in envs: + with patch.dict("os.environ", env): + client = await self.async_rs_or_single_client(serverMonitoringMode="auto") + await self.assert_rtt_connection_is_disabled(client) + + async def assert_rtt_connection_is_disabled(self, client): + await client.admin.command("ping") + for _, server in client._topology._servers.items(): + monitor = server._monitor + self.assertFalse(monitor._stream) + if _IS_SYNC: + self.assertIsNone(monitor._rtt_monitor._executor._thread) + else: + self.assertIsNone(monitor._rtt_monitor._executor._task) + + +class MockTCPHandler(socketserver.BaseRequestHandler): + def handle(self): + self.server.events.append("client connected") + if self.request.recv(1024).strip(): + self.server.events.append("client hello received") + self.request.close() + + +class TCPServer(socketserver.TCPServer): + allow_reuse_address = True + + def handle_request_and_shutdown(self): + self.handle_request() + self.server_close() + + +class TestHeartbeatStartOrdering(AsyncPyMongoTestCase): + async def test_heartbeat_start_ordering(self): + events = [] + listener = HeartbeatEventsListListener(events) + + if _IS_SYNC: + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = ConcurrentRunner(target=server.handle_request_and_shutdown) + await server_thread.start() + _c = await self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + await server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + else: + + async def handle_client(reader: StreamReader, writer: StreamWriter): + events.append("client connected") + if (await reader.read(1024)).strip(): + events.append("client hello received") + writer.close() + await writer.wait_closed() + + server = await asyncio.start_server(handle_client, "localhost", 9999) + server.events = events + await server.start_serving() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + await _c.aconnect() + + await listener.async_wait_for_event(ServerHeartbeatStartedEvent, 1) + await listener.async_wait_for_event(ServerHeartbeatFailedEvent, 1) + + server.close() + await server.wait_closed() + await _c.close() + + self.assertEqual( + events, + [ + "serverHeartbeatStartedEvent", + "client connected", + "client hello received", + "serverHeartbeatFailedEvent", + ], + ) + + +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index c315e86945..c3931da936 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -544,6 +544,14 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if ( + "Error returned from connection pool clear with interruptInUseConnections=true is retryable" + in spec["description"] + and not _IS_SYNC + ): + self.skipTest("PYTHON-5170 tests are flakey") + if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: + self.skipTest("PYTHON-5174 tests are flakey") class_name = self.__class__.__name__.lower() description = spec["description"].lower() @@ -1151,7 +1159,7 @@ def _testOperation_assertTopologyType(self, spec): self.assertIsInstance(description, TopologyDescription) self.assertEqual(description.topology_type_name, spec["topologyType"]) - def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + async def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: """Run the waitForPrimaryChange test operation.""" client = self.entity_map[spec["client"]] old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] @@ -1165,13 +1173,13 @@ def get_primary(td: TopologyDescription) -> Optional[_Address]: old_primary = get_primary(old_description) - def primary_changed() -> bool: - primary = client.primary + async def primary_changed() -> bool: + primary = await client.primary if primary is None: return False return primary != old_primary - wait_until(primary_changed, "change primary", timeout=timeout) + await async_wait_until(primary_changed, "change primary", timeout=timeout) async def _testOperation_runOnThread(self, spec): """Run the 'runOnThread' operation.""" diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 70dcfc5b48..2eb278383c 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -15,14 +15,18 @@ """Test the topology module.""" from __future__ import annotations +import asyncio import os import socketserver import sys import threading +from asyncio import StreamReader, StreamWriter +from pathlib import Path +from test.helpers import ConcurrentRunner sys.path[0:0] = [""] -from test import IntegrationTest, PyMongoTestCase, unittest +from test import IntegrationTest, PyMongoTestCase, UnitTest, unittest from test.pymongo_mocks import DummyMonitor from test.unified_format import generate_test_classes from test.utils import ( @@ -30,7 +34,9 @@ HeartbeatEventListener, HeartbeatEventsListListener, assertion_context, + barrier_wait, client_context, + create_barrier, get_pool, server_name_to_type, wait_until, @@ -55,8 +61,16 @@ from pymongo.topology_description import TOPOLOGY_TYPE from pymongo.uri_parser import parse_uri +_IS_SYNC = True + # Location of JSON test specifications. -SDAM_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") +if _IS_SYNC: + SDAM_PATH = os.path.join(Path(__file__).resolve().parent, "discovery_and_monitoring") +else: + SDAM_PATH = os.path.join( + Path(__file__).resolve().parent.parent, + "discovery_and_monitoring", + ) def create_mock_topology(uri, monitor_class=DummyMonitor): @@ -128,7 +142,7 @@ def get_type(topology, hostname): return description.server_type -class TestAllScenarios(unittest.TestCase): +class TestAllScenarios(UnitTest): pass @@ -240,7 +254,7 @@ def create_tests(): create_tests() -class TestClusterTimeComparison(unittest.TestCase): +class TestClusterTimeComparison(PyMongoTestCase): def test_cluster_time_comparison(self): t = create_mock_topology("mongodb://host") @@ -271,20 +285,21 @@ def send_cluster_time(time, inc): class TestIgnoreStaleErrors(IntegrationTest): def test_ignore_stale_connection_errors(self): - N_THREADS = 5 - barrier = threading.Barrier(N_THREADS, timeout=30) - client = self.rs_or_single_client(minPoolSize=N_THREADS) - self.addCleanup(client.close) + if not _IS_SYNC and sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") + N_TASKS = 5 + barrier = create_barrier(N_TASKS, timeout=30) + client = self.rs_or_single_client(minPoolSize=N_TASKS) # Wait for initial discovery. client.admin.command("ping") pool = get_pool(client) starting_generation = pool.gen.get_overall() - wait_until(lambda: len(pool.conns) == N_THREADS, "created conns") + wait_until(lambda: len(pool.conns) == N_TASKS, "created conns") def mock_command(*args, **kwargs): - # Synchronize all threads to ensure they use the same generation. - barrier.wait() + # Synchronize all tasks to ensure they use the same generation. + barrier_wait(barrier, timeout=30) raise AutoReconnect("mock Connection.command error") for conn in pool.conns: @@ -296,12 +311,12 @@ def insert_command(i): except AutoReconnect: pass - threads = [] - for i in range(N_THREADS): - threads.append(threading.Thread(target=insert_command, args=(i,))) - for t in threads: + tasks = [] + for i in range(N_TASKS): + tasks.append(ConcurrentRunner(target=insert_command, args=(i,))) + for t in tasks: t.start() - for t in threads: + for t in tasks: t.join() # Expect a single pool reset for the network error @@ -320,10 +335,9 @@ class TestPoolManagement(IntegrationTest): def test_pool_unpause(self): # This test implements the prose test "Connection Pool Management" listener = CMAPHeartbeatListener() - client = self.single_client( + _ = self.single_client( appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] ) - self.addCleanup(client.close) # Assert that ConnectionPoolReadyEvent occurs after the first # ServerHeartbeatSucceededEvent. listener.wait_for_event(monitoring.PoolReadyEvent, 1) @@ -355,7 +369,6 @@ def setUp(self): def test_rtt_connection_is_enabled_stream(self): client = self.rs_or_single_client(serverMonitoringMode="stream") - self.addCleanup(client.close) client.admin.command("ping") def predicate(): @@ -364,18 +377,26 @@ def predicate(): if not monitor._stream: return False if client_context.version >= (4, 4): - if monitor._rtt_monitor._executor._thread is None: - return False + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._task is None: + return False else: - if monitor._rtt_monitor._executor._thread is not None: - return False + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is not None: + return False + else: + if monitor._rtt_monitor._executor._task is not None: + return False return True wait_until(predicate, "find all RTT monitors") def test_rtt_connection_is_disabled_poll(self): client = self.rs_or_single_client(serverMonitoringMode="poll") - self.addCleanup(client.close) + self.assert_rtt_connection_is_disabled(client) def test_rtt_connection_is_disabled_auto(self): @@ -389,7 +410,6 @@ def test_rtt_connection_is_disabled_auto(self): for env in envs: with patch.dict("os.environ", env): client = self.rs_or_single_client(serverMonitoringMode="auto") - self.addCleanup(client.close) self.assert_rtt_connection_is_disabled(client) def assert_rtt_connection_is_disabled(self, client): @@ -397,7 +417,10 @@ def assert_rtt_connection_is_disabled(self, client): for _, server in client._topology._servers.items(): monitor = server._monitor self.assertFalse(monitor._stream) - self.assertIsNone(monitor._rtt_monitor._executor._thread) + if _IS_SYNC: + self.assertIsNone(monitor._rtt_monitor._executor._thread) + else: + self.assertIsNone(monitor._rtt_monitor._executor._task) class MockTCPHandler(socketserver.BaseRequestHandler): @@ -420,16 +443,46 @@ class TestHeartbeatStartOrdering(PyMongoTestCase): def test_heartbeat_start_ordering(self): events = [] listener = HeartbeatEventsListListener(events) - server = TCPServer(("localhost", 9999), MockTCPHandler) - server.events = events - server_thread = threading.Thread(target=server.handle_request_and_shutdown) - server_thread.start() - _c = self.simple_client( - "mongodb://localhost:9999", serverSelectionTimeoutMS=500, event_listeners=(listener,) - ) - server_thread.join() - listener.wait_for_event(ServerHeartbeatStartedEvent, 1) - listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + if _IS_SYNC: + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = ConcurrentRunner(target=server.handle_request_and_shutdown) + server_thread.start() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + else: + + def handle_client(reader: StreamReader, writer: StreamWriter): + events.append("client connected") + if (reader.read(1024)).strip(): + events.append("client hello received") + writer.close() + writer.wait_closed() + + server = asyncio.start_server(handle_client, "localhost", 9999) + server.events = events + server.start_serving() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + _c._connect() + + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + server.close() + server.wait_closed() + _c.close() self.assertEqual( events, diff --git a/test/unified_format.py b/test/unified_format.py index d5698f5a77..8ed9e214bb 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -543,6 +543,14 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if ( + "Error returned from connection pool clear with interruptInUseConnections=true is retryable" + in spec["description"] + and not _IS_SYNC + ): + self.skipTest("PYTHON-5170 tests are flakey") + if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: + self.skipTest("PYTHON-5174 tests are flakey") class_name = self.__class__.__name__.lower() description = spec["description"].lower() diff --git a/test/utils.py b/test/utils.py index e089b3fc2f..ae316d0387 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1078,3 +1078,19 @@ def create_async_event(): def create_event(): return threading.Event() + + +def async_create_barrier(N_TASKS, timeout: float | None = None): + return asyncio.Barrier(N_TASKS) + + +def create_barrier(N_TASKS, timeout: float | None = None): + return threading.Barrier(N_TASKS, timeout=timeout) + + +async def async_barrier_wait(barrier, timeout: float | None = None): + await asyncio.wait_for(barrier.wait(), timeout=timeout) + + +def barrier_wait(barrier, timeout: float | None = None): + barrier.wait() diff --git a/tools/synchro.py b/tools/synchro.py index 877a683531..42d5694f47 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -124,6 +124,8 @@ "AsyncMockPool": "MockPool", "StopAsyncIteration": "StopIteration", "create_async_event": "create_event", + "async_create_barrier": "create_barrier", + "async_barrier_wait": "barrier_wait", "async_joinall": "joinall", } @@ -213,6 +215,7 @@ def async_only_test(f: str) -> bool: "test_custom_types.py", "test_database.py", "test_data_lake.py", + "test_discovery_and_monitoring.py", "test_dns.py", "test_encryption.py", "test_examples.py", From 9d5d4fa735be939f88c4e4610ec1da27dc025d6f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Mar 2025 12:19:51 -0600 Subject: [PATCH 1771/2111] PYTHON-5136 Add check-json to pre-commit checks (#2167) --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0b06ab0dc..335bf97490 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,6 +6,7 @@ repos: - id: check-added-large-files - id: check-case-conflict - id: check-toml + - id: check-json - id: check-yaml exclude: template.yaml - id: debug-statements From 5ac262783f7fae2bc95ba354aad0ab27d5c23423 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 3 Mar 2025 12:07:43 -0800 Subject: [PATCH 1772/2111] PYTHON-5155 Use dochub link for fork warning (#2173) --- doc/changelog.rst | 3 +-- pymongo/asynchronous/topology.py | 2 +- pymongo/synchronous/topology.py | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index fcad842def..cf5d5e8ff7 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -199,7 +199,7 @@ PyMongo 4.9 brings a number of improvements including: unction-as-a-service (FaaS) like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. On some FaaS systems, there is a ``fork()`` operation at function startup. By delaying the connection to the first operation, we avoid a deadlock. See - `Is PyMongo Fork-Safe`_ for more information. + :ref:`pymongo-fork-safe` for more information. Issues Resolved @@ -208,7 +208,6 @@ Issues Resolved See the `PyMongo 4.9 release notes in JIRA`_ for the list of resolved issues in this release. -.. _Is PyMongo Fork-Safe: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/troubleshooting/#forking-a-process-causes-a-deadlock .. _PyMongo 4.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39940 diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index bb003bbfde..76f0fb6cde 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -236,7 +236,7 @@ async def open(self) -> None: warnings.warn( # type: ignore[call-overload] # noqa: B028 "AsyncMongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " - "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/troubleshooting/#forking-a-process-causes-a-deadlock", + "https://dochub.mongodb.org/core/pymongo-fork-deadlock", **kwargs, ) async with self._lock: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 2bc8934540..ea0edae919 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -236,7 +236,7 @@ def open(self) -> None: warnings.warn( # type: ignore[call-overload] # noqa: B028 "MongoClient opened before fork. May not be entirely fork-safe, " "proceed with caution. See PyMongo's documentation for details: " - "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/troubleshooting/#forking-a-process-causes-a-deadlock", + "https://dochub.mongodb.org/core/pymongo-fork-deadlock", **kwargs, ) with self._lock: From f1fe49784b5ec7364b009fe64ab17ff7f86e7111 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 3 Mar 2025 15:09:29 -0600 Subject: [PATCH 1773/2111] PYTHON-5178 Make test_async_cancellation_closes_change_stream more robust (#2175) --- test/asynchronous/test_async_cancellation.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/asynchronous/test_async_cancellation.py b/test/asynchronous/test_async_cancellation.py index b73c7a8084..b7fde834a2 100644 --- a/test/asynchronous/test_async_cancellation.py +++ b/test/asynchronous/test_async_cancellation.py @@ -102,6 +102,7 @@ async def task(): async def test_async_cancellation_closes_change_stream(self): self.addAsyncCleanup(self.client.db.test.delete_many, {}) change_stream = await self.client.db.test.watch(batch_size=2) + event = asyncio.Event() # Make sure getMore commands block fail_command = { @@ -113,11 +114,12 @@ async def test_async_cancellation_closes_change_stream(self): async def task(): async with self.fail_point(fail_command): await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + event.set() await change_stream.next() task = asyncio.create_task(task()) - await asyncio.sleep(0.1) + await event.wait() task.cancel() with self.assertRaises(asyncio.CancelledError): From 8927cfe79b981f8ea2177e3c41061d4388485c82 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 4 Mar 2025 08:32:39 -0600 Subject: [PATCH 1774/2111] PYTHON-5181 Make it easier to set debugging logging in an Evergreen patch (#2177) --- .evergreen/config.yml | 2 +- .evergreen/run-tests.sh | 2 +- .evergreen/scripts/run_tests.py | 6 +++++- .evergreen/scripts/setup_tests.py | 2 +- CONTRIBUTING.md | 2 ++ 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4c12e8bdb2..2d357c8229 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -262,7 +262,7 @@ functions: params: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, - DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION] + DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG] binary: bash working_dir: "src" args: [.evergreen/just.sh, setup-tests, "${TEST_NAME}", "${SUB_TEST_NAME}"] diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 1c453c1d6d..61d505d45a 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -33,6 +33,6 @@ fi PIP_QUIET=0 uv run ${UV_ARGS} --with pip pip list # Start the test runner. -uv run ${UV_ARGS} .evergreen/scripts/run_tests.py +uv run ${UV_ARGS} .evergreen/scripts/run_tests.py "$@" popd diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index ceae46d343..830f5190e0 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import logging import os import platform import shutil @@ -106,8 +107,11 @@ def run() -> None: test_kms_remote(SUB_TEST_NAME) return + if os.environ.get("DEBUG_LOG"): + TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG} -o log_cli=1".split()) + # Run local tests. - pytest.main(TEST_ARGS) + pytest.main(TEST_ARGS + sys.argv[1:]) # Handle perf test post actions. if TEST_PERF: diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 615f07320a..c874263a5d 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -26,7 +26,7 @@ ) # Passthrough environment variables. -PASS_THROUGH_ENV = ["GREEN_FRAMEWORK", "NO_EXT", "MONGODB_API_VERSION"] +PASS_THROUGH_ENV = ["GREEN_FRAMEWORK", "NO_EXT", "MONGODB_API_VERSION", "DEBUG_LOG"] # Map the test name to a test suite. TEST_SUITE_MAP = { diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d22874faf8..ccce0c1ae7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -227,6 +227,8 @@ the pages will re-render and the browser will automatically refresh. ## Enable Debug Logs - Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. - Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine. +- You can also set `DEBUG_LOG=1` and run either `just setup-tests` or `just-test`. +- For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for the patch. ## Re-sync Spec Tests From 9a123bb631a268cdc6cac0f899e4d687ca7ea821 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 4 Mar 2025 09:14:10 -0600 Subject: [PATCH 1775/2111] PYTHON-5183 Fix C Extension building for Windows spawn hosts (#2178) --- .evergreen/scripts/setup-dev-env.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index f158c71320..d1c4be3494 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -43,8 +43,18 @@ if [ -z "${PYMONGO_BIN_DIR:-}" ]; then export PATH="$PATH:$HOME/.local/bin" fi +# Set up venv, making sure c extensions build unless disabled. +if [ -z "${NO_EXT:-}" ]; then + export PYMONGO_C_EXT_MUST_BUILD=1 +fi +# Set up visual studio env on Windows spawn hosts. +if [ -f $HOME/.visualStudioEnv.sh ]; then + set +u + SSH_TTY=1 source $HOME/.visualStudioEnv.sh + set -u +fi uv sync --frozen -uv run --frozen --with pip pip install -e . + echo "Setting up python environment... done." # Ensure there is a pre-commit hook if there is a git checkout. From 3f5d6c1539e66436b5eeda7e257d1804ca9aa13d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 4 Mar 2025 11:23:39 -0600 Subject: [PATCH 1776/2111] PYTHON-5131 Migrate off of Ubuntu 20.04 GitHub Actions Runners (#2169) --- .github/workflows/dist.yml | 10 +++++----- .github/workflows/test-python.yml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 3dee8f581c..81f86721ef 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -34,11 +34,11 @@ jobs: # Github Actions doesn't support pairing matrix values together, let's improvise # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - - [ubuntu-20.04, "manylinux_x86_64", "cp3*-manylinux_x86_64"] - - [ubuntu-20.04, "manylinux_aarch64", "cp3*-manylinux_aarch64"] - - [ubuntu-20.04, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] - - [ubuntu-20.04, "manylinux_s390x", "cp3*-manylinux_s390x"] - - [ubuntu-20.04, "manylinux_i686", "cp3*-manylinux_i686"] + - [ubuntu-latest, "manylinux_x86_64", "cp3*-manylinux_x86_64"] + - [ubuntu-latest, "manylinux_aarch64", "cp3*-manylinux_aarch64"] + - [ubuntu-latest, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] + - [ubuntu-latest, "manylinux_s390x", "cp3*-manylinux_s390x"] + - [ubuntu-latest, "manylinux_i686", "cp3*-manylinux_i686"] - [windows-2019, "win_amd6", "cp3*-win_amd64"] - [windows-2019, "win32", "cp3*-win32"] - [macos-14, "macos", "cp*-macosx_*"] diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 5f95aa4a5e..4f9ed398ad 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -54,7 +54,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04] + os: [ubuntu-latest] python-version: ["3.9", "pypy-3.10", "3.13", "3.13t"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: From 9edfc626ed3251be4079a2186abec9410689c2ce Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 4 Mar 2025 11:25:15 -0800 Subject: [PATCH 1777/2111] PYTHON-5167 Properly cleanup test SocketGetter tasks (#2176) --- test/asynchronous/test_pooling.py | 27 ++++++++++++++++++++++----- test/test_pooling.py | 27 ++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 09b8fb0853..812b5a48e0 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -123,9 +123,12 @@ async def run_mongo_thread(self): self.state = "connection" - def __del__(self): + async def release_conn(self): if self.sock: - self.sock.close_conn(None) + await self.sock.unpin() + self.sock = None + return True + return False async def run_cases(client, cases): @@ -352,6 +355,10 @@ async def test_no_wait_queue_timeout(self): self.assertEqual(t.state, "connection") self.assertEqual(t.sock, s1) + # Cleanup + await t.release_conn() + await t.join() + await pool.close() async def test_checkout_more_than_max_pool_size(self): pool = await self.create_pool(max_pool_size=2) @@ -364,16 +371,26 @@ async def test_checkout_more_than_max_pool_size(self): socks.append(sock) tasks = [] - for _ in range(30): + for _ in range(10): t = SocketGetter(self.c, pool) await t.start() tasks.append(t) await asyncio.sleep(1) for t in tasks: self.assertEqual(t.state, "get_socket") - + # Cleanup for socket_info in socks: - socket_info.close_conn(None) + await socket_info.unpin() + while tasks: + to_remove = [] + for t in tasks: + if await t.release_conn(): + to_remove.append(t) + await t.join() + for t in to_remove: + tasks.remove(t) + await asyncio.sleep(0.05) + await pool.close() async def test_maxConnecting(self): client = await self.async_rs_or_single_client() diff --git a/test/test_pooling.py b/test/test_pooling.py index 5d23b85f23..1755365f80 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -123,9 +123,12 @@ def run_mongo_thread(self): self.state = "connection" - def __del__(self): + def release_conn(self): if self.sock: - self.sock.close_conn(None) + self.sock.unpin() + self.sock = None + return True + return False def run_cases(client, cases): @@ -352,6 +355,10 @@ def test_no_wait_queue_timeout(self): self.assertEqual(t.state, "connection") self.assertEqual(t.sock, s1) + # Cleanup + t.release_conn() + t.join() + pool.close() def test_checkout_more_than_max_pool_size(self): pool = self.create_pool(max_pool_size=2) @@ -364,16 +371,26 @@ def test_checkout_more_than_max_pool_size(self): socks.append(sock) tasks = [] - for _ in range(30): + for _ in range(10): t = SocketGetter(self.c, pool) t.start() tasks.append(t) time.sleep(1) for t in tasks: self.assertEqual(t.state, "get_socket") - + # Cleanup for socket_info in socks: - socket_info.close_conn(None) + socket_info.unpin() + while tasks: + to_remove = [] + for t in tasks: + if t.release_conn(): + to_remove.append(t) + t.join() + for t in to_remove: + tasks.remove(t) + time.sleep(0.05) + pool.close() def test_maxConnecting(self): client = self.rs_or_single_client() From 74b85d51d5e91b2c39fe912b9218101ecd0ed156 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 4 Mar 2025 15:09:57 -0600 Subject: [PATCH 1778/2111] PYTHON-5180 Use a standard batchtime of one week (#2179) --- .evergreen/config.yml | 4 ++-- .evergreen/generated_configs/variants.yml | 28 +++++++++++------------ .evergreen/scripts/generate_config.py | 4 ++-- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 2d357c8229..a06a93dab3 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -1430,10 +1430,10 @@ buildvariants: - debian11-small tasks: - name: test-gcpkms - batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + batchtime: 10080 # 7 days - name: test-gcpkms-fail - name: test-azurekms - batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + batchtime: 10080 # 7 days - name: test-azurekms-fail - name: rhel8-test-lambda diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 88c564909f..153b2310c2 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -823,7 +823,7 @@ buildvariants: display_name: OCSP RHEL8 v4.4 Python3.9 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -836,7 +836,7 @@ buildvariants: display_name: OCSP RHEL8 v5.0 Python3.10 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -849,7 +849,7 @@ buildvariants: display_name: OCSP RHEL8 v6.0 Python3.11 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -862,7 +862,7 @@ buildvariants: display_name: OCSP RHEL8 v7.0 Python3.12 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -875,7 +875,7 @@ buildvariants: display_name: OCSP RHEL8 v8.0 Python3.13 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -888,7 +888,7 @@ buildvariants: display_name: OCSP RHEL8 rapid PyPy3.10 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -901,7 +901,7 @@ buildvariants: display_name: OCSP RHEL8 latest Python3.9 run_on: - rhel87-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -914,7 +914,7 @@ buildvariants: display_name: OCSP Win64 v4.4 Python3.9 run_on: - windows-64-vsMulti-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -927,7 +927,7 @@ buildvariants: display_name: OCSP Win64 v8.0 Python3.13 run_on: - windows-64-vsMulti-small - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -940,7 +940,7 @@ buildvariants: display_name: OCSP macOS v4.4 Python3.9 run_on: - macos-14 - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -953,7 +953,7 @@ buildvariants: display_name: OCSP macOS v8.0 Python3.13 run_on: - macos-14 - batchtime: 20160 + batchtime: 10080 expansions: AUTH: noauth SSL: ssl @@ -971,21 +971,21 @@ buildvariants: display_name: Auth OIDC Ubuntu-22 run_on: - ubuntu2204-small - batchtime: 20160 + batchtime: 10080 - name: auth-oidc-macos tasks: - name: testoidc_task_group display_name: Auth OIDC macOS run_on: - macos-14 - batchtime: 20160 + batchtime: 10080 - name: auth-oidc-win64 tasks: - name: testoidc_task_group display_name: Auth OIDC Win64 run_on: - windows-64-vsMulti-small - batchtime: 20160 + batchtime: 10080 # Pyopenssl tests - name: pyopenssl-macos-python3.9 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 4d4d29c6dd..8052e33b6b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -234,7 +234,7 @@ def generate_yaml(tasks=None, variants=None): def create_ocsp_variants() -> list[BuildVariant]: variants = [] - batchtime = BATCHTIME_WEEK * 2 + batchtime = BATCHTIME_WEEK expansions = dict(AUTH="noauth", SSL="ssl", TOPOLOGY="server") base_display = "OCSP" @@ -674,7 +674,7 @@ def create_oidc_auth_variants(): tasks, get_display_name("Auth OIDC", host), host=host, - batchtime=BATCHTIME_WEEK * 2, + batchtime=BATCHTIME_WEEK, ) ) return variants From baf0344446ecfd4cbab118038165fffb61f020d4 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 4 Mar 2025 22:03:56 -0800 Subject: [PATCH 1779/2111] PYTHON-5167 Properly cleanup test SpecRunnerTask (#2181) --- test/asynchronous/unified_format.py | 1 + test/unified_format.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index c3931da936..d4c3d40d20 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -384,6 +384,7 @@ async def drop(self: AsyncGridFSBucket, *args: Any, **kwargs: Any) -> None: name = spec["id"] thread = SpecRunnerTask(name) await thread.start() + self.test.addAsyncCleanup(thread.join, 5) self[name] = thread return diff --git a/test/unified_format.py b/test/unified_format.py index 8ed9e214bb..293fbd97ca 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -383,6 +383,7 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: name = spec["id"] thread = SpecRunnerThread(name) thread.start() + self.test.addCleanup(thread.join, 5) self[name] = thread return From de09181b1cf45fd2bec6448d744d6dfbdae86407 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Mar 2025 10:30:52 -0800 Subject: [PATCH 1780/2111] PYTHON-4960 More informative error message for stale primary (#2115) --- pymongo/topology_description.py | 18 ++++++++++++++---- .../test_discovery_and_monitoring.py | 3 +++ .../rs/new_primary.json | 3 ++- .../rs/new_primary_new_electionid.json | 6 ++++-- .../rs/new_primary_new_setversion.json | 6 ++++-- .../rs/primary_disconnect_electionid.json | 4 +++- .../rs/primary_disconnect_setversion.json | 4 +++- ...ion_greaterthan_max_without_electionid.json | 3 ++- .../setversion_without_electionid-pre-6.0.json | 3 ++- ..._setversion_without_electionid-pre-6.0.json | 6 ++++-- .../rs/use_setversion_without_electionid.json | 6 ++++-- test/test_discovery_and_monitoring.py | 3 +++ 12 files changed, 48 insertions(+), 17 deletions(-) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index f669fefd2e..742bbf8c6e 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -33,7 +33,7 @@ from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common -from pymongo.errors import ConfigurationError +from pymongo.errors import ConfigurationError, PyMongoError from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection @@ -563,7 +563,11 @@ def _update_rs_from_primary( if None not in new_election_tuple: if None not in max_election_tuple and new_election_tuple < max_election_tuple: # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() + sds[server_description.address] = server_description.to_unknown( + PyMongoError( + f"primary marked stale due to electionId/setVersion mismatch, {new_election_tuple} is stale compared to {max_election_tuple}" + ) + ) return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id max_election_id = server_description.election_id @@ -578,7 +582,11 @@ def _update_rs_from_primary( max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) if new_election_safe < max_election_safe: # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() + sds[server_description.address] = server_description.to_unknown( + PyMongoError( + f"primary marked stale due to electionId/setVersion mismatch, {new_election_tuple} is stale compared to {max_election_tuple}" + ) + ) return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id else: max_election_id = server_description.election_id @@ -591,7 +599,9 @@ def _update_rs_from_primary( and server.address != server_description.address ): # Reset old primary's type to Unknown. - sds[server.address] = server.to_unknown() + sds[server.address] = server.to_unknown( + PyMongoError("primary marked stale due to discovery of newer primary") + ) # There can be only one prior primary. break diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index 7c4095ebb8..c3c2bb1a6c 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -180,6 +180,9 @@ def check_outcome(self, topology, outcome): server_type_name(expected_server_type), server_type_name(actual_server_description.server_type), ) + expected_error = expected_server.get("error") + if expected_error: + self.assertIn(expected_error, str(actual_server_description.error)) self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json index 1a84c69c91..69b07516b9 100644 --- a/test/discovery_and_monitoring/rs/new_primary.json +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -58,7 +58,8 @@ "servers": { "a:27017": { "type": "Unknown", - "setName": null + "setName": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index 509720d445..90ef0ce8dc 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -76,7 +76,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -123,7 +124,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index 96533c61ee..9c1e2d4bdd 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -76,7 +76,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -123,7 +124,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index 5a91188ea8..b030bd2c53 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -48,7 +48,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -124,6 +125,7 @@ "a:27017": { "type": "Unknown", "setName": null, + "error": "primary marked stale due to electionId/setVersion mismatch", "electionId": null }, "b:27017": { diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index f7417ad77b..653a5f29e8 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -48,7 +48,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -124,6 +125,7 @@ "a:27017": { "type": "Unknown", "setName": null, + "error": "primary marked stale due to electionId/setVersion mismatch", "electionId": null }, "b:27017": { diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json index 97870d71d5..06c89609f5 100644 --- a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -65,7 +65,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json index e62c6963ed..87029e578b 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -65,7 +65,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json index 2f9b567b85..a63efeac12 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -73,7 +73,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -117,7 +118,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 551f3e12c2..eaf586d728 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -81,7 +81,8 @@ "b:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" } }, "topologyType": "ReplicaSetWithPrimary", @@ -128,7 +129,8 @@ "b:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" } }, "topologyType": "ReplicaSetWithPrimary", diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 2eb278383c..bfe0b24387 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -180,6 +180,9 @@ def check_outcome(self, topology, outcome): server_type_name(expected_server_type), server_type_name(actual_server_description.server_type), ) + expected_error = expected_server.get("error") + if expected_error: + self.assertIn(expected_error, str(actual_server_description.error)) self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) From df7304ddf003c516f7d37a274ee312306ee72673 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 5 Mar 2025 13:34:33 -0600 Subject: [PATCH 1781/2111] PYTHON-5148 Update SBOM usage for Kondukto (#2168) --- .github/workflows/release-python.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index d8c900e77b..21c7ca5f7a 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -20,7 +20,6 @@ env: # Changes per repo PRODUCT_NAME: PyMongo # Changes per branch - SILK_ASSET_GROUP: mongodb-python-driver EVERGREEN_PROJECT: mongo-python-driver # Constant # inputs will be empty on a scheduled run. so, we only set dry_run @@ -122,7 +121,6 @@ jobs: version: ${{ env.VERSION }} following_version: ${{ env.FOLLOWING_VERSION }} product_name: ${{ env.PRODUCT_NAME }} - silk_asset_group: ${{ env.SILK_ASSET_GROUP }} evergreen_project: ${{ env.EVERGREEN_PROJECT }} token: ${{ github.token }} dry_run: ${{ env.DRY_RUN }} From 4ed621b3e724b3a43e08057e8b0e88b1b6074f0c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Mar 2025 13:42:36 -0800 Subject: [PATCH 1782/2111] PYTHON-5173 Adjust test_continuous_network_errors to be less flaky (#2183) --- test/asynchronous/test_client.py | 10 +++++----- test/test_client.py | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 744a170be2..4758ce3dda 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -1824,20 +1824,20 @@ def server_description_count(): return i gc.collect() - with client_knobs(min_heartbeat_interval=0.003): + with client_knobs(min_heartbeat_interval=0.002): client = self.simple_client( - "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 + "invalid:27017", heartbeatFrequencyMS=2, serverSelectionTimeoutMS=200 ) initial_count = server_description_count() with self.assertRaises(ServerSelectionTimeoutError): await client.test.test.find_one() gc.collect() final_count = server_description_count() + await client.close() # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: - # AssertionError: 19 != 46 within 15 delta (27 difference) - # On Python 3.11 we seem to get more of a delta. - self.assertAlmostEqual(initial_count, final_count, delta=20) + # AssertionError: 11 != 47 within 20 delta (36 difference) + self.assertAlmostEqual(initial_count, final_count, delta=30) @async_client_context.require_failCommand_fail_point async def test_network_error_message(self): diff --git a/test/test_client.py b/test/test_client.py index cdc7691c28..aa321d2925 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1781,20 +1781,20 @@ def server_description_count(): return i gc.collect() - with client_knobs(min_heartbeat_interval=0.003): + with client_knobs(min_heartbeat_interval=0.002): client = self.simple_client( - "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 + "invalid:27017", heartbeatFrequencyMS=2, serverSelectionTimeoutMS=200 ) initial_count = server_description_count() with self.assertRaises(ServerSelectionTimeoutError): client.test.test.find_one() gc.collect() final_count = server_description_count() + client.close() # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: - # AssertionError: 19 != 46 within 15 delta (27 difference) - # On Python 3.11 we seem to get more of a delta. - self.assertAlmostEqual(initial_count, final_count, delta=20) + # AssertionError: 11 != 47 within 20 delta (36 difference) + self.assertAlmostEqual(initial_count, final_count, delta=30) @client_context.require_failCommand_fail_point def test_network_error_message(self): From 4ffebb04e9d219021588e0d16ddea31982775b5a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 5 Mar 2025 14:05:21 -0800 Subject: [PATCH 1783/2111] PYTHON-5168 Use logging for client background task errors (#2166) --- pymongo/asynchronous/mongo_client.py | 8 ++++---- pymongo/logger.py | 8 ++++++++ pymongo/synchronous/mongo_client.py | 8 ++++---- test/asynchronous/test_client.py | 23 +++++++++++++++++++++++ test/test_client.py | 23 +++++++++++++++++++++++ 5 files changed, 62 insertions(+), 8 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 37be9a194c..eefafd5fda 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -88,7 +88,7 @@ _async_create_lock, _release_locks, ) -from pymongo.logger import _CLIENT_LOGGER, _log_or_warn +from pymongo.logger import _CLIENT_LOGGER, _log_client_error, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason from pymongo.operations import ( @@ -2049,7 +2049,7 @@ async def _process_kill_cursors(self) -> None: # can be caught in _process_periodic_tasks raise else: - helpers_shared._handle_exception() + _log_client_error() # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: @@ -2061,7 +2061,7 @@ async def _process_kill_cursors(self) -> None: if isinstance(exc, InvalidOperation) and self._topology._closed: raise else: - helpers_shared._handle_exception() + _log_client_error() # This method is run periodically by a background thread. async def _process_periodic_tasks(self) -> None: @@ -2075,7 +2075,7 @@ async def _process_periodic_tasks(self) -> None: if isinstance(exc, InvalidOperation) and self._topology._closed: return else: - helpers_shared._handle_exception() + _log_client_error() def _return_server_session( self, server_session: Union[_ServerSession, _EmptyServerSession] diff --git a/pymongo/logger.py b/pymongo/logger.py index 2ff35328b4..9079dc3f3d 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -96,6 +96,14 @@ class _SDAMStatusMessage(str, enum.Enum): } +def _log_client_error() -> None: + # This is called from a daemon thread so check for None to account for interpreter shutdown. + logger = _CLIENT_LOGGER + if logger: + # logger.exception includes the full traceback. + logger.exception("MongoClient background task encountered an error:") + + def _debug_log(logger: logging.Logger, **fields: Any) -> None: logger.debug(LogMessage(**fields)) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 373deabd4e..b101636066 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -80,7 +80,7 @@ _create_lock, _release_locks, ) -from pymongo.logger import _CLIENT_LOGGER, _log_or_warn +from pymongo.logger import _CLIENT_LOGGER, _log_client_error, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason from pymongo.operations import ( @@ -2043,7 +2043,7 @@ def _process_kill_cursors(self) -> None: # can be caught in _process_periodic_tasks raise else: - helpers_shared._handle_exception() + _log_client_error() # Don't re-open topology if it's closed and there's no pending cursors. if address_to_cursor_ids: @@ -2055,7 +2055,7 @@ def _process_kill_cursors(self) -> None: if isinstance(exc, InvalidOperation) and self._topology._closed: raise else: - helpers_shared._handle_exception() + _log_client_error() # This method is run periodically by a background thread. def _process_periodic_tasks(self) -> None: @@ -2069,7 +2069,7 @@ def _process_periodic_tasks(self) -> None: if isinstance(exc, InvalidOperation) and self._topology._closed: return else: - helpers_shared._handle_exception() + _log_client_error() def _return_server_session( self, server_session: Union[_ServerSession, _EmptyServerSession] diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 4758ce3dda..acc815c8a4 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -1790,6 +1790,29 @@ async def stall_connect(*args, **kwargs): # Each ping command should not take more than 2 seconds self.assertLess(total, 2) + async def test_background_connections_log_on_error(self): + with self.assertLogs("pymongo.client", level="ERROR") as cm: + client = await self.async_rs_or_single_client(minPoolSize=1) + # Create a single connection in the pool. + await client.admin.command("ping") + + # Cause new connections to fail. + pool = await async_get_pool(client) + + async def fail_connect(*args, **kwargs): + raise Exception("failed to connect") + + pool.connect = fail_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + await pool.reset_without_pause() + + await async_wait_until( + lambda: "failed to connect" in "".join(cm.output), "start creating connections" + ) + self.assertIn("MongoClient background task encountered an error", "".join(cm.output)) + @async_client_context.require_replica_set async def test_direct_connection(self): # direct_connection=True should result in Single topology. diff --git a/test/test_client.py b/test/test_client.py index aa321d2925..8e99866cc8 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1747,6 +1747,29 @@ def stall_connect(*args, **kwargs): # Each ping command should not take more than 2 seconds self.assertLess(total, 2) + def test_background_connections_log_on_error(self): + with self.assertLogs("pymongo.client", level="ERROR") as cm: + client = self.rs_or_single_client(minPoolSize=1) + # Create a single connection in the pool. + client.admin.command("ping") + + # Cause new connections to fail. + pool = get_pool(client) + + def fail_connect(*args, **kwargs): + raise Exception("failed to connect") + + pool.connect = fail_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + pool.reset_without_pause() + + wait_until( + lambda: "failed to connect" in "".join(cm.output), "start creating connections" + ) + self.assertIn("MongoClient background task encountered an error", "".join(cm.output)) + @client_context.require_replica_set def test_direct_connection(self): # direct_connection=True should result in Single topology. From 85b6f182ae58109472a30de63ddfdb680d1beef4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 5 Mar 2025 20:03:06 -0600 Subject: [PATCH 1784/2111] PYTHON-5157 Convert aws tests to use python scripts (#2180) --- .evergreen/config.yml | 239 +------ .evergreen/generated_configs/tasks.yml | 681 ++++++++++++++++++++ .evergreen/generated_configs/variants.yml | 56 +- .evergreen/run-mongodb-aws-ecs-test.sh | 2 +- .evergreen/scripts/generate_config.py | 54 +- .evergreen/scripts/run-aws-ecs-auth-test.sh | 12 - .evergreen/scripts/run-mongodb-aws-test.sh | 28 - .evergreen/scripts/run_tests.py | 11 +- .evergreen/scripts/setup_tests.py | 14 +- .evergreen/scripts/teardown-tests.sh | 6 - .evergreen/scripts/teardown_tests.py | 9 +- .github/workflows/test-python.yml | 1 + pyproject.toml | 2 +- 13 files changed, 761 insertions(+), 354 deletions(-) delete mode 100755 .evergreen/scripts/run-aws-ecs-auth-test.sh delete mode 100755 .evergreen/scripts/run-mongodb-aws-test.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a06a93dab3..5ebe4ec65a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -206,7 +206,7 @@ functions: params: binary: bash include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, LOAD_BALANCER, - STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED] + STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS] args: - src/.evergreen/scripts/bootstrap-mongo-orchestration.sh - command: expansions.update @@ -295,100 +295,6 @@ functions: - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/run-atlas-tests.sh - "get aws auth secrets": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_aws/setup-secrets.sh - - "run aws auth test with regular aws credentials": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - regular - - "run aws auth test with assume role credentials": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - assume-role - - "run aws auth test with aws EC2 credentials": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - ec2 - - "run aws auth test with aws web identity credentials": - - # Test with and without AWS_ROLE_SESSION_NAME set. - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - web-identity - - command: subprocess.exec - type: test - params: - include_expansions_in_env: [ "DRIVERS_TOOLS", "skip_EC2_auth_test" ] - binary: bash - working_dir: "src" - env: - AWS_ROLE_SESSION_NAME: test - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - web-identity - - "run aws auth test with aws credentials as environment variables": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - env-creds - - "run aws auth test with aws credentials and session token as environment variables": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["DRIVERS_TOOLS", "skip_EC2_auth_test"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mongodb-aws-test.sh - - session-creds - "run oidc auth test with test credentials": - command: subprocess.exec type: test @@ -411,16 +317,6 @@ functions: args: - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh - "run aws ECS auth test": - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-aws-ecs-auth-test.sh - "cleanup": - command: subprocess.exec params: @@ -431,13 +327,6 @@ functions: - .evergreen/scripts/cleanup.sh "teardown system": - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - args: - # Ensure the instance profile is reassigned for aws tests. - - ${DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh - command: subprocess.exec params: binary: bash @@ -1159,132 +1048,6 @@ tasks: OCSP_ALGORITHM: "ecdsa" OCSP_TLS_SHOULD_SUCCEED: "false" - - name: "aws-auth-test-4.4" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "4.4" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-5.0" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "5.0" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-6.0" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "6.0" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-7.0" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "7.0" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-8.0" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "8.0" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-rapid" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "rapid" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-latest" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "latest" - - func: "assume ec2 role" - - func: "get aws auth secrets" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws auth test with aws web identity credentials" - - func: "run aws ECS auth test" - - name: "oidc-auth-test" commands: - func: "run oidc auth test with test credentials" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index b7aab80b30..56fdc1aa67 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1,4 +1,685 @@ tasks: + # Aws tests + - name: test-auth-aws-4.4-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-4.4-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-4.4-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-4.4-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-4.4-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-4.4-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-4.4-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-4.4-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-5.0-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-5.0-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-5.0-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-5.0-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-5.0-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-5.0-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-5.0-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-5.0-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-6.0-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-6.0-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-6.0-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-6.0-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-6.0-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-6.0-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-6.0-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-6.0-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-7.0-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-7.0-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-7.0-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-7.0-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-7.0-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-7.0-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-7.0-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-7.0-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-8.0-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-8.0-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-8.0-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-8.0-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-8.0-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-8.0-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-8.0-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-8.0-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-rapid-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-rapid-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-rapid-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-rapid-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-rapid-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-rapid-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-rapid-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-rapid-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-latest-regular + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-latest-assume-role + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-latest-ec2 + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-latest-env-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-latest-session-creds + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + tags: [auth-aws, auth-aws-session-creds] + - name: test-auth-aws-latest-web-identity + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-latest-ecs + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + tags: [auth-aws, auth-aws-ecs] + - name: test-auth-aws-latest-web-identity-session-name + commands: + - func: bootstrap mongo-orchestration + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + tags: [auth-aws, auth-aws-web-identity] + # Kms tests - name: test-gcpkms commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 153b2310c2..80f08bc7a4 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -107,13 +107,7 @@ buildvariants: # Aws auth tests - name: auth-aws-ubuntu-20-python3.9 tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest + - name: .auth-aws display_name: Auth AWS Ubuntu-20 Python3.9 run_on: - ubuntu2004-small @@ -121,13 +115,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: auth-aws-ubuntu-20-python3.13 tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest + - name: .auth-aws display_name: Auth AWS Ubuntu-20 Python3.13 run_on: - ubuntu2004-small @@ -135,67 +123,35 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 - name: auth-aws-win64-python3.9 tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest + - name: .auth-aws !.auth-aws-ecs display_name: Auth AWS Win64 Python3.9 run_on: - windows-64-vsMulti-small expansions: - skip_ECS_auth_test: "true" PYTHON_BINARY: C:/python/Python39/python.exe - name: auth-aws-win64-python3.13 tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest + - name: .auth-aws !.auth-aws-ecs display_name: Auth AWS Win64 Python3.13 run_on: - windows-64-vsMulti-small expansions: - skip_ECS_auth_test: "true" PYTHON_BINARY: C:/python/Python313/python.exe - name: auth-aws-macos-python3.9 tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest + - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 display_name: Auth AWS macOS Python3.9 run_on: - macos-14 expansions: - skip_ECS_auth_test: "true" - skip_EC2_auth_test: "true" - skip_web_identity_auth_test: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: auth-aws-macos-python3.13 tasks: - - name: aws-auth-test-4.4 - - name: aws-auth-test-5.0 - - name: aws-auth-test-6.0 - - name: aws-auth-test-7.0 - - name: aws-auth-test-8.0 - - name: aws-auth-test-rapid - - name: aws-auth-test-latest + - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 display_name: Auth AWS macOS Python3.13 run_on: - macos-14 expansions: - skip_ECS_auth_test: "true" - skip_EC2_auth_test: "true" - skip_web_identity_auth_test: "true" PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 # Compression tests diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index ef7e0ba333..09fa571959 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -30,5 +30,5 @@ export SET_XTRACE_ON=1 cd src rm -rf .venv rm -f .evergreen/scripts/test-env.sh || true -bash ./.evergreen/just.sh setup-tests auth_aws ecs +bash ./.evergreen/just.sh setup-tests auth_aws ecs-remote bash .evergreen/just.sh run-tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 8052e33b6b..3c731a1f9a 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -734,23 +734,14 @@ def create_atlas_connect_variants(): def create_aws_auth_variants(): variants = [] - tasks = [ - "aws-auth-test-4.4", - "aws-auth-test-5.0", - "aws-auth-test-6.0", - "aws-auth-test-7.0", - "aws-auth-test-8.0", - "aws-auth-test-rapid", - "aws-auth-test-latest", - ] for host_name, python in product(["ubuntu20", "win64", "macos"], MIN_MAX_PYTHON): expansions = dict() - if host_name != "ubuntu20": - expansions["skip_ECS_auth_test"] = "true" + tasks = [".auth-aws"] if host_name == "macos": - expansions["skip_EC2_auth_test"] = "true" - expansions["skip_web_identity_auth_test"] = "true" + tasks = [".auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2"] + elif host_name == "win64": + tasks = [".auth-aws !.auth-aws-ecs"] host = HOSTS[host_name] variant = create_variant( tasks, @@ -854,6 +845,43 @@ def create_kms_tasks(): return tasks +def create_aws_tasks(): + tasks = [] + aws_test_types = [ + "regular", + "assume-role", + "ec2", + "env-creds", + "session-creds", + "web-identity", + "ecs", + ] + for version in get_versions_from("4.4"): + base_name = f"test-auth-aws-{version}" + base_tags = ["auth-aws"] + bootstrap_vars = dict(AUTH_AWS="1", VERSION=version) + bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + assume_func = FunctionCall(func="assume ec2 role") + for test_type in aws_test_types: + tags = [*base_tags, f"auth-aws-{test_type}"] + name = f"{base_name}-{test_type}" + test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type) + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [bootstrap_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + + tags = [*base_tags, "auth-aws-web-identity"] + name = f"{base_name}-web-identity-session-name" + test_vars = dict( + TEST_NAME="auth_aws", SUB_TEST_NAME="web-identity", AWS_ROLE_SESSION_NAME="test" + ) + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [bootstrap_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + + return tasks + + ################## # Generate Config ################## diff --git a/.evergreen/scripts/run-aws-ecs-auth-test.sh b/.evergreen/scripts/run-aws-ecs-auth-test.sh deleted file mode 100755 index b8197c4da5..0000000000 --- a/.evergreen/scripts/run-aws-ecs-auth-test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# shellcheck disable=SC2154 -if [ "${skip_ECS_auth_test}" = "true" ]; then - echo "This platform does not support the ECS auth test, skipping..." - exit 0 -fi -set -ex -cd "$DRIVERS_TOOLS"/.evergreen/auth_aws -. ./activate-authawsvenv.sh -. aws_setup.sh ecs -cd - diff --git a/.evergreen/scripts/run-mongodb-aws-test.sh b/.evergreen/scripts/run-mongodb-aws-test.sh deleted file mode 100755 index fd38574db8..0000000000 --- a/.evergreen/scripts/run-mongodb-aws-test.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit # Exit the script with error if any of the commands fail - -############################################ -# Main Program # -############################################ - -# Supported/used environment variables: -# MONGODB_URI Set the URI, including an optional username/password to use -# to connect to the server via MONGODB-AWS authentication -# mechanism. -# PYTHON_BINARY The Python version to use. - -# shellcheck disable=SC2154 -if [ "${skip_EC2_auth_test:-}" = "true" ] && { [ "$1" = "ec2" ] || [ "$1" = "web-identity" ]; }; then - echo "This platform does not support the EC2 auth test, skipping..." - exit 0 -fi - -echo "Running MONGODB-AWS authentication tests for $1" - -# Handle credentials and environment setup. -. "$DRIVERS_TOOLS"/.evergreen/auth_aws/aws_setup.sh "$1" - -bash ./.evergreen/just.sh setup-tests auth_aws $1 -bash ./.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 830f5190e0..cd781ccd70 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -9,7 +9,7 @@ from datetime import datetime import pytest -from utils import LOGGER, ROOT +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command AUTH = os.environ.get("AUTH", "noauth") SSL = os.environ.get("SSL", "nossl") @@ -107,11 +107,18 @@ def run() -> None: test_kms_remote(SUB_TEST_NAME) return + # Run remote ecs tests. + if TEST_NAME == "auth_aws" and SUB_TEST_NAME == "ecs": + run_command(f"{DRIVERS_TOOLS}/.evergreen/auth_aws/aws_setup.sh ecs") + return + if os.environ.get("DEBUG_LOG"): TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG} -o log_cli=1".split()) # Run local tests. - pytest.main(TEST_ARGS + sys.argv[1:]) + ret = pytest.main(TEST_ARGS + sys.argv[1:]) + if ret != 0: + sys.exit(ret) # Handle perf test post actions. if TEST_PERF: diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index c874263a5d..eff7bed773 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -368,8 +368,18 @@ def handle_test_env() -> None: write_env("CA_FILE", os.environ["CA_FILE"]) write_env("OCSP_TLS_SHOULD_SUCCEED", os.environ["OCSP_TLS_SHOULD_SUCCEED"]) - if test_name == "auth_aws": - write_env("MONGODB_URI", os.environ["MONGODB_URI"]) + if test_name == "auth_aws" and sub_test_name != "ecs-remote": + auth_aws_dir = f"{DRIVERS_TOOLS}/.evergreen/auth_aws" + if "AWS_ROLE_SESSION_NAME" in os.environ: + write_env("AWS_ROLE_SESSION_NAME") + if sub_test_name != "ecs": + aws_setup = f"{auth_aws_dir}/aws_setup.sh" + run_command(f"bash {aws_setup} {sub_test_name}") + creds = read_env(f"{auth_aws_dir}/test-env.sh") + for name, value in creds.items(): + write_env(name, value) + else: + run_command(f"bash {auth_aws_dir}/setup-secrets.sh") if test_name == "perf": # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh index cd705c6b35..f9e76a20cf 100755 --- a/.evergreen/scripts/teardown-tests.sh +++ b/.evergreen/scripts/teardown-tests.sh @@ -2,10 +2,6 @@ set -eu SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) -SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" -ROOT_DIR="$(dirname $SCRIPT_DIR)" - -pushd $ROOT_DIR > /dev/null # Try to source the env file. if [ -f $SCRIPT_DIR/env.sh ]; then @@ -25,5 +21,3 @@ fi # Teardown the test runner. uv run $SCRIPT_DIR/teardown_tests.py - -popd > /dev/null diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index fc1a937de0..824fc2c9bb 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -1,6 +1,7 @@ from __future__ import annotations import os +import sys from utils import DRIVERS_TOOLS, LOGGER, run_command @@ -9,7 +10,7 @@ LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'...") -# Shut down csfle servers if applicable +# Shut down csfle servers if applicable. if TEST_NAME == "encryption": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh") @@ -23,4 +24,10 @@ teardown_kms(SUB_TEST_NAME) +# Tear down auth_aws if applicable. +# We do not run web-identity hosts on macos, because the hosts lack permissions, +# so there is no reason to run the teardown, which would error with a 401. +elif TEST_NAME == "auth_aws" and sys.platform != "darwin": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh") + LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 4f9ed398ad..c8ecb80091 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -53,6 +53,7 @@ jobs: # supercharge/mongodb-github-action requires containers so we don't test other platforms runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: [ubuntu-latest] python-version: ["3.9", "pypy-3.10", "3.13", "3.13t"] diff --git a/pyproject.toml b/pyproject.toml index c898169895..ca76cfa2c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,7 +118,7 @@ filterwarnings = [ # https://github.com/eventlet/eventlet/issues/818 "module:please use dns.resolver.Resolver.resolve:DeprecationWarning", # https://github.com/dateutil/dateutil/issues/1314 - "module:datetime.datetime.utc:DeprecationWarning:dateutil", + "module:datetime.datetime.utc:DeprecationWarning", ] markers = [ "auth_aws: tests that rely on pymongo-auth-aws", From 3653984f21880317e73df490ae237c4d44675f52 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 6 Mar 2025 16:15:14 -0600 Subject: [PATCH 1785/2111] PYTHON-5187 Add scripts to start and stop a server (#2184) --- .evergreen/config.yml | 105 ++- .evergreen/generated_configs/tasks.yml | 614 +++++++++--------- .../scripts/bootstrap-mongo-orchestration.sh | 30 - .evergreen/scripts/generate_config.py | 24 +- .evergreen/scripts/run-server.sh | 13 + .evergreen/scripts/run_server.py | 50 ++ .evergreen/scripts/setup_tests.py | 72 +- .evergreen/scripts/stop-server.sh | 14 + .evergreen/scripts/utils.py | 92 ++- CONTRIBUTING.md | 63 +- justfile | 8 + 11 files changed, 587 insertions(+), 498 deletions(-) delete mode 100755 .evergreen/scripts/bootstrap-mongo-orchestration.sh create mode 100755 .evergreen/scripts/run-server.sh create mode 100644 .evergreen/scripts/run_server.py create mode 100755 .evergreen/scripts/stop-server.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 5ebe4ec65a..3f29391373 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -201,17 +201,17 @@ functions: params: file: "src/xunit-results/TEST-*.xml" - "bootstrap mongo-orchestration": + "run-server": - command: subprocess.exec params: binary: bash - include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, LOAD_BALANCER, - STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS] - args: - - src/.evergreen/scripts/bootstrap-mongo-orchestration.sh + working_dir: "src" + include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, + STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER] + args: [.evergreen/just.sh, run-server, "${TEST_NAME}", "${SUB_TEST_NAME}"] - command: expansions.update params: - file: mo-expansion.yml + file: ${DRIVERS_TOOLS}/mo-expansion.yml "bootstrap data lake": - command: subprocess.exec @@ -227,13 +227,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh - "stop mongo-orchestration": - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh - "run mod_wsgi tests": - command: subprocess.exec type: test @@ -423,7 +416,6 @@ post: - func: "upload coverage" - func: "upload mo artifacts" - func: "upload test results" - - func: "stop mongo-orchestration" - func: "cleanup" task_groups: @@ -587,10 +579,7 @@ tasks: - name: "doctests" tags: ["doctests"] commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" + - func: "run-server" - func: "run doctests" - name: "test-serverless" @@ -603,16 +592,13 @@ tasks: - name: "test-enterprise-auth" tags: ["enterprise-auth"] commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" + - func: "run-server" - func: "assume ec2 role" - func: "run enterprise auth tests" - name: "test-search-index-helpers" commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: VERSION: "6.0" TOPOLOGY: "replica_set" @@ -624,28 +610,23 @@ tasks: - name: "mod-wsgi-standalone" tags: ["mod_wsgi"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: - VERSION: "latest" TOPOLOGY: "server" - func: "run mod_wsgi tests" - name: "mod-wsgi-replica-set" tags: ["mod_wsgi"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: - VERSION: "latest" TOPOLOGY: "replica_set" - func: "run mod_wsgi tests" - name: "mod-wsgi-embedded-mode-standalone" tags: ["mod_wsgi"] commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" + - func: "run-server" - func: "run mod_wsgi tests" vars: MOD_WSGI_EMBEDDED: "1" @@ -653,9 +634,8 @@ tasks: - name: "mod-wsgi-embedded-mode-replica-set" tags: ["mod_wsgi"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: - VERSION: "latest" TOPOLOGY: "replica_set" - func: "run mod_wsgi tests" vars: @@ -669,7 +649,7 @@ tasks: - name: "free-threading" tags: ["free-threading"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: VERSION: "8.0" TOPOLOGY: "replica_set" @@ -711,7 +691,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: "valid" - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -726,7 +706,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: "revoked" - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -741,7 +721,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: valid - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -756,7 +736,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: revoked - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -767,7 +747,7 @@ tasks: - name: test-ocsp-rsa-soft-fail tags: ["ocsp", "ocsp-rsa"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -782,7 +762,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: revoked - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - func: run-ocsp-test @@ -793,7 +773,7 @@ tasks: - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple tags: ["ocsp", "ocsp-rsa"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - func: run-ocsp-test @@ -808,7 +788,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: valid-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -823,7 +803,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: revoked-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -838,7 +818,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: valid-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -853,7 +833,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: revoked-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -868,7 +848,7 @@ tasks: vars: OCSP_ALGORITHM: "rsa" SERVER_TYPE: revoked-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - func: run-ocsp-test @@ -883,7 +863,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: valid - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -898,7 +878,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: revoked - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -913,7 +893,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: valid - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -928,7 +908,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: revoked - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -939,7 +919,7 @@ tasks: - name: test-ocsp-ecdsa-soft-fail tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -954,7 +934,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: revoked - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - func: run-ocsp-test @@ -965,7 +945,7 @@ tasks: - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple tags: ["ocsp", "ocsp-ecdsa"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - func: run-ocsp-test @@ -980,7 +960,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: valid-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -995,7 +975,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: revoked-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - func: run-ocsp-test @@ -1010,7 +990,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: valid-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -1025,7 +1005,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: revoked-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - func: run-ocsp-test @@ -1040,7 +1020,7 @@ tasks: vars: OCSP_ALGORITHM: "ecdsa" SERVER_TYPE: valid-delegate - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - func: run-ocsp-test @@ -1115,10 +1095,9 @@ tasks: - name: "perf-6.0-standalone" tags: ["perf"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: VERSION: "v6.0-perf" - TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" - func: "send dashboard data" @@ -1126,10 +1105,9 @@ tasks: - name: "perf-6.0-standalone-ssl" tags: ["perf"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: VERSION: "v6.0-perf" - TOPOLOGY: "server" SSL: "ssl" - func: "run perf tests" - func: "attach benchmark test results" @@ -1138,10 +1116,9 @@ tasks: - name: "perf-8.0-standalone" tags: ["perf"] commands: - - func: "bootstrap mongo-orchestration" + - func: "run-server" vars: VERSION: "8.0" - TOPOLOGY: "server" - func: "run perf tests" - func: "attach benchmark test results" - func: "send dashboard data" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 56fdc1aa67..04e1451d45 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -2,7 +2,7 @@ tasks: # Aws tests - name: test-auth-aws-4.4-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -14,7 +14,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-4.4-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -26,7 +26,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-4.4-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -38,7 +38,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-4.4-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -50,7 +50,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-4.4-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -62,7 +62,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-4.4-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -74,7 +74,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-4.4-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -86,7 +86,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-4.4-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -99,7 +99,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-5.0-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -111,7 +111,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-5.0-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -123,7 +123,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-5.0-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -135,7 +135,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-5.0-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -147,7 +147,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-5.0-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -159,7 +159,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-5.0-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -171,7 +171,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-5.0-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -183,7 +183,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-5.0-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -196,7 +196,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-6.0-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -208,7 +208,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-6.0-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -220,7 +220,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-6.0-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -232,7 +232,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-6.0-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -244,7 +244,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-6.0-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -256,7 +256,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-6.0-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -268,7 +268,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-6.0-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -280,7 +280,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-6.0-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -293,7 +293,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-7.0-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -305,7 +305,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-7.0-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -317,7 +317,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-7.0-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -329,7 +329,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-7.0-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -341,7 +341,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-7.0-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -353,7 +353,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-7.0-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -365,7 +365,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-7.0-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -377,7 +377,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-7.0-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -390,7 +390,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-8.0-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -402,7 +402,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-8.0-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -414,7 +414,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-8.0-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -426,7 +426,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-8.0-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -438,7 +438,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-8.0-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -450,7 +450,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-8.0-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -462,7 +462,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-8.0-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -474,7 +474,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-8.0-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -487,7 +487,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-rapid-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -499,7 +499,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-rapid-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -511,7 +511,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-rapid-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -523,7 +523,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-rapid-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -535,7 +535,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-rapid-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -547,7 +547,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-rapid-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -559,7 +559,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-rapid-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -571,7 +571,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-rapid-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: rapid @@ -584,7 +584,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-latest-regular commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -596,7 +596,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-latest-assume-role commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -608,7 +608,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-latest-ec2 commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -620,7 +620,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-latest-env-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -632,7 +632,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-latest-session-creds commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -644,7 +644,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-latest-web-identity commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -656,7 +656,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-latest-ecs commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -668,7 +668,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-latest-web-identity-session-name commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: AUTH_AWS: "1" VERSION: latest @@ -689,7 +689,7 @@ tasks: SUB_TEST_NAME: gcp - name: test-gcpkms-fail commands: - - func: bootstrap mongo-orchestration + - func: run-server - func: run tests vars: TEST_NAME: kms @@ -702,7 +702,7 @@ tasks: SUB_TEST_NAME: azure - name: test-azurekms-fail commands: - - func: bootstrap mongo-orchestration + - func: run-server - func: run tests vars: TEST_NAME: kms @@ -711,12 +711,12 @@ tasks: # Load balancer tests - name: test-load-balancer-auth-ssl commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: TOPOLOGY: sharded_cluster AUTH: auth SSL: ssl - LOAD_BALANCER: "true" + TEST_NAME: load_balancer - func: run tests vars: AUTH: auth @@ -725,12 +725,12 @@ tasks: tags: [load-balancer, auth, ssl] - name: test-load-balancer-noauth-ssl commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: TOPOLOGY: sharded_cluster AUTH: noauth SSL: ssl - LOAD_BALANCER: "true" + TEST_NAME: load_balancer - func: run tests vars: AUTH: noauth @@ -739,12 +739,12 @@ tasks: tags: [load-balancer, noauth, ssl] - name: test-load-balancer-noauth-nossl commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: TOPOLOGY: sharded_cluster AUTH: noauth SSL: nossl - LOAD_BALANCER: "true" + TEST_NAME: load_balancer - func: run tests vars: AUTH: noauth @@ -755,7 +755,7 @@ tasks: # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -775,7 +775,7 @@ tasks: - sync - name: test-4.0-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -795,7 +795,7 @@ tasks: - async - name: test-4.0-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -814,7 +814,7 @@ tasks: - sync_async - name: test-4.0-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -834,7 +834,7 @@ tasks: - sync - name: test-4.0-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -854,7 +854,7 @@ tasks: - async - name: test-4.0-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -873,7 +873,7 @@ tasks: - sync_async - name: test-4.0-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -893,7 +893,7 @@ tasks: - sync - name: test-4.0-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -913,7 +913,7 @@ tasks: - async - name: test-4.0-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: server @@ -932,7 +932,7 @@ tasks: - sync_async - name: test-4.2-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -952,7 +952,7 @@ tasks: - sync - name: test-4.2-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -972,7 +972,7 @@ tasks: - async - name: test-4.2-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -991,7 +991,7 @@ tasks: - sync_async - name: test-4.2-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -1011,7 +1011,7 @@ tasks: - sync - name: test-4.2-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -1031,7 +1031,7 @@ tasks: - async - name: test-4.2-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -1050,7 +1050,7 @@ tasks: - sync_async - name: test-4.2-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -1070,7 +1070,7 @@ tasks: - sync - name: test-4.2-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -1090,7 +1090,7 @@ tasks: - async - name: test-4.2-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: server @@ -1109,7 +1109,7 @@ tasks: - sync_async - name: test-4.4-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1129,7 +1129,7 @@ tasks: - sync - name: test-4.4-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1149,7 +1149,7 @@ tasks: - async - name: test-4.4-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1168,7 +1168,7 @@ tasks: - sync_async - name: test-4.4-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1188,7 +1188,7 @@ tasks: - sync - name: test-4.4-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1208,7 +1208,7 @@ tasks: - async - name: test-4.4-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1227,7 +1227,7 @@ tasks: - sync_async - name: test-4.4-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1247,7 +1247,7 @@ tasks: - sync - name: test-4.4-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1267,7 +1267,7 @@ tasks: - async - name: test-4.4-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: server @@ -1286,7 +1286,7 @@ tasks: - sync_async - name: test-5.0-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1306,7 +1306,7 @@ tasks: - sync - name: test-5.0-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1326,7 +1326,7 @@ tasks: - async - name: test-5.0-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1345,7 +1345,7 @@ tasks: - sync_async - name: test-5.0-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1365,7 +1365,7 @@ tasks: - sync - name: test-5.0-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1385,7 +1385,7 @@ tasks: - async - name: test-5.0-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1404,7 +1404,7 @@ tasks: - sync_async - name: test-5.0-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1424,7 +1424,7 @@ tasks: - sync - name: test-5.0-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1444,7 +1444,7 @@ tasks: - async - name: test-5.0-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: server @@ -1463,7 +1463,7 @@ tasks: - sync_async - name: test-6.0-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1483,7 +1483,7 @@ tasks: - sync - name: test-6.0-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1503,7 +1503,7 @@ tasks: - async - name: test-6.0-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1522,7 +1522,7 @@ tasks: - sync_async - name: test-6.0-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1542,7 +1542,7 @@ tasks: - sync - name: test-6.0-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1562,7 +1562,7 @@ tasks: - async - name: test-6.0-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1581,7 +1581,7 @@ tasks: - sync_async - name: test-6.0-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1601,7 +1601,7 @@ tasks: - sync - name: test-6.0-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1621,7 +1621,7 @@ tasks: - async - name: test-6.0-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: server @@ -1640,7 +1640,7 @@ tasks: - sync_async - name: test-7.0-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1660,7 +1660,7 @@ tasks: - sync - name: test-7.0-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1680,7 +1680,7 @@ tasks: - async - name: test-7.0-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1699,7 +1699,7 @@ tasks: - sync_async - name: test-7.0-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1719,7 +1719,7 @@ tasks: - sync - name: test-7.0-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1739,7 +1739,7 @@ tasks: - async - name: test-7.0-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1758,7 +1758,7 @@ tasks: - sync_async - name: test-7.0-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1778,7 +1778,7 @@ tasks: - sync - name: test-7.0-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1798,7 +1798,7 @@ tasks: - async - name: test-7.0-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: server @@ -1817,7 +1817,7 @@ tasks: - sync_async - name: test-8.0-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1837,7 +1837,7 @@ tasks: - sync - name: test-8.0-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1857,7 +1857,7 @@ tasks: - async - name: test-8.0-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1876,7 +1876,7 @@ tasks: - sync_async - name: test-8.0-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1896,7 +1896,7 @@ tasks: - sync - name: test-8.0-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1916,7 +1916,7 @@ tasks: - async - name: test-8.0-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1935,7 +1935,7 @@ tasks: - sync_async - name: test-8.0-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1955,7 +1955,7 @@ tasks: - sync - name: test-8.0-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1975,7 +1975,7 @@ tasks: - async - name: test-8.0-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: server @@ -1994,7 +1994,7 @@ tasks: - sync_async - name: test-rapid-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2014,7 +2014,7 @@ tasks: - sync - name: test-rapid-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2034,7 +2034,7 @@ tasks: - async - name: test-rapid-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2053,7 +2053,7 @@ tasks: - sync_async - name: test-rapid-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2073,7 +2073,7 @@ tasks: - sync - name: test-rapid-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2093,7 +2093,7 @@ tasks: - async - name: test-rapid-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2112,7 +2112,7 @@ tasks: - sync_async - name: test-rapid-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2132,7 +2132,7 @@ tasks: - sync - name: test-rapid-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2152,7 +2152,7 @@ tasks: - async - name: test-rapid-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: server @@ -2171,7 +2171,7 @@ tasks: - sync_async - name: test-latest-standalone-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2191,7 +2191,7 @@ tasks: - sync - name: test-latest-standalone-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2211,7 +2211,7 @@ tasks: - async - name: test-latest-standalone-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2230,7 +2230,7 @@ tasks: - sync_async - name: test-latest-standalone-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2250,7 +2250,7 @@ tasks: - sync - name: test-latest-standalone-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2270,7 +2270,7 @@ tasks: - async - name: test-latest-standalone-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2289,7 +2289,7 @@ tasks: - sync_async - name: test-latest-standalone-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2309,7 +2309,7 @@ tasks: - sync - name: test-latest-standalone-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2329,7 +2329,7 @@ tasks: - async - name: test-latest-standalone-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: server @@ -2348,7 +2348,7 @@ tasks: - sync_async - name: test-4.0-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2368,7 +2368,7 @@ tasks: - sync - name: test-4.0-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2388,7 +2388,7 @@ tasks: - async - name: test-4.0-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2407,7 +2407,7 @@ tasks: - sync_async - name: test-4.0-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2427,7 +2427,7 @@ tasks: - sync - name: test-4.0-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2447,7 +2447,7 @@ tasks: - async - name: test-4.0-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2466,7 +2466,7 @@ tasks: - sync_async - name: test-4.0-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2486,7 +2486,7 @@ tasks: - sync - name: test-4.0-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2506,7 +2506,7 @@ tasks: - async - name: test-4.0-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2525,7 +2525,7 @@ tasks: - sync_async - name: test-4.2-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2545,7 +2545,7 @@ tasks: - sync - name: test-4.2-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2565,7 +2565,7 @@ tasks: - async - name: test-4.2-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2584,7 +2584,7 @@ tasks: - sync_async - name: test-4.2-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2604,7 +2604,7 @@ tasks: - sync - name: test-4.2-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2624,7 +2624,7 @@ tasks: - async - name: test-4.2-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2643,7 +2643,7 @@ tasks: - sync_async - name: test-4.2-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2663,7 +2663,7 @@ tasks: - sync - name: test-4.2-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2683,7 +2683,7 @@ tasks: - async - name: test-4.2-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2702,7 +2702,7 @@ tasks: - sync_async - name: test-4.4-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2722,7 +2722,7 @@ tasks: - sync - name: test-4.4-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2742,7 +2742,7 @@ tasks: - async - name: test-4.4-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2761,7 +2761,7 @@ tasks: - sync_async - name: test-4.4-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2781,7 +2781,7 @@ tasks: - sync - name: test-4.4-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2801,7 +2801,7 @@ tasks: - async - name: test-4.4-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2820,7 +2820,7 @@ tasks: - sync_async - name: test-4.4-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2840,7 +2840,7 @@ tasks: - sync - name: test-4.4-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2860,7 +2860,7 @@ tasks: - async - name: test-4.4-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2879,7 +2879,7 @@ tasks: - sync_async - name: test-5.0-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2899,7 +2899,7 @@ tasks: - sync - name: test-5.0-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2919,7 +2919,7 @@ tasks: - async - name: test-5.0-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2938,7 +2938,7 @@ tasks: - sync_async - name: test-5.0-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2958,7 +2958,7 @@ tasks: - sync - name: test-5.0-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2978,7 +2978,7 @@ tasks: - async - name: test-5.0-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2997,7 +2997,7 @@ tasks: - sync_async - name: test-5.0-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -3017,7 +3017,7 @@ tasks: - sync - name: test-5.0-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -3037,7 +3037,7 @@ tasks: - async - name: test-5.0-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -3056,7 +3056,7 @@ tasks: - sync_async - name: test-6.0-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3076,7 +3076,7 @@ tasks: - sync - name: test-6.0-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3096,7 +3096,7 @@ tasks: - async - name: test-6.0-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3115,7 +3115,7 @@ tasks: - sync_async - name: test-6.0-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3135,7 +3135,7 @@ tasks: - sync - name: test-6.0-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3155,7 +3155,7 @@ tasks: - async - name: test-6.0-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3174,7 +3174,7 @@ tasks: - sync_async - name: test-6.0-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3194,7 +3194,7 @@ tasks: - sync - name: test-6.0-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3214,7 +3214,7 @@ tasks: - async - name: test-6.0-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3233,7 +3233,7 @@ tasks: - sync_async - name: test-7.0-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3253,7 +3253,7 @@ tasks: - sync - name: test-7.0-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3273,7 +3273,7 @@ tasks: - async - name: test-7.0-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3292,7 +3292,7 @@ tasks: - sync_async - name: test-7.0-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3312,7 +3312,7 @@ tasks: - sync - name: test-7.0-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3332,7 +3332,7 @@ tasks: - async - name: test-7.0-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3351,7 +3351,7 @@ tasks: - sync_async - name: test-7.0-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3371,7 +3371,7 @@ tasks: - sync - name: test-7.0-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3391,7 +3391,7 @@ tasks: - async - name: test-7.0-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3410,7 +3410,7 @@ tasks: - sync_async - name: test-8.0-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3430,7 +3430,7 @@ tasks: - sync - name: test-8.0-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3450,7 +3450,7 @@ tasks: - async - name: test-8.0-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3469,7 +3469,7 @@ tasks: - sync_async - name: test-8.0-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3489,7 +3489,7 @@ tasks: - sync - name: test-8.0-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3509,7 +3509,7 @@ tasks: - async - name: test-8.0-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3528,7 +3528,7 @@ tasks: - sync_async - name: test-8.0-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3548,7 +3548,7 @@ tasks: - sync - name: test-8.0-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3568,7 +3568,7 @@ tasks: - async - name: test-8.0-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3587,7 +3587,7 @@ tasks: - sync_async - name: test-rapid-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3607,7 +3607,7 @@ tasks: - sync - name: test-rapid-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3627,7 +3627,7 @@ tasks: - async - name: test-rapid-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3646,7 +3646,7 @@ tasks: - sync_async - name: test-rapid-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3666,7 +3666,7 @@ tasks: - sync - name: test-rapid-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3686,7 +3686,7 @@ tasks: - async - name: test-rapid-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3705,7 +3705,7 @@ tasks: - sync_async - name: test-rapid-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3725,7 +3725,7 @@ tasks: - sync - name: test-rapid-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3745,7 +3745,7 @@ tasks: - async - name: test-rapid-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3764,7 +3764,7 @@ tasks: - sync_async - name: test-latest-replica_set-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3784,7 +3784,7 @@ tasks: - sync - name: test-latest-replica_set-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3804,7 +3804,7 @@ tasks: - async - name: test-latest-replica_set-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3823,7 +3823,7 @@ tasks: - sync_async - name: test-latest-replica_set-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3843,7 +3843,7 @@ tasks: - sync - name: test-latest-replica_set-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3863,7 +3863,7 @@ tasks: - async - name: test-latest-replica_set-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3882,7 +3882,7 @@ tasks: - sync_async - name: test-latest-replica_set-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3902,7 +3902,7 @@ tasks: - sync - name: test-latest-replica_set-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3922,7 +3922,7 @@ tasks: - async - name: test-latest-replica_set-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: replica_set @@ -3941,7 +3941,7 @@ tasks: - sync_async - name: test-4.0-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -3961,7 +3961,7 @@ tasks: - sync - name: test-4.0-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -3981,7 +3981,7 @@ tasks: - async - name: test-4.0-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4000,7 +4000,7 @@ tasks: - sync_async - name: test-4.0-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4020,7 +4020,7 @@ tasks: - sync - name: test-4.0-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4040,7 +4040,7 @@ tasks: - async - name: test-4.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4059,7 +4059,7 @@ tasks: - sync_async - name: test-4.0-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4079,7 +4079,7 @@ tasks: - sync - name: test-4.0-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4099,7 +4099,7 @@ tasks: - async - name: test-4.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4118,7 +4118,7 @@ tasks: - sync_async - name: test-4.2-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4138,7 +4138,7 @@ tasks: - sync - name: test-4.2-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4158,7 +4158,7 @@ tasks: - async - name: test-4.2-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4177,7 +4177,7 @@ tasks: - sync_async - name: test-4.2-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4197,7 +4197,7 @@ tasks: - sync - name: test-4.2-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4217,7 +4217,7 @@ tasks: - async - name: test-4.2-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4236,7 +4236,7 @@ tasks: - sync_async - name: test-4.2-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4256,7 +4256,7 @@ tasks: - sync - name: test-4.2-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4276,7 +4276,7 @@ tasks: - async - name: test-4.2-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4295,7 +4295,7 @@ tasks: - sync_async - name: test-4.4-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4315,7 +4315,7 @@ tasks: - sync - name: test-4.4-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4335,7 +4335,7 @@ tasks: - async - name: test-4.4-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4354,7 +4354,7 @@ tasks: - sync_async - name: test-4.4-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4374,7 +4374,7 @@ tasks: - sync - name: test-4.4-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4394,7 +4394,7 @@ tasks: - async - name: test-4.4-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4413,7 +4413,7 @@ tasks: - sync_async - name: test-4.4-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4433,7 +4433,7 @@ tasks: - sync - name: test-4.4-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4453,7 +4453,7 @@ tasks: - async - name: test-4.4-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4472,7 +4472,7 @@ tasks: - sync_async - name: test-5.0-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4492,7 +4492,7 @@ tasks: - sync - name: test-5.0-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4512,7 +4512,7 @@ tasks: - async - name: test-5.0-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4531,7 +4531,7 @@ tasks: - sync_async - name: test-5.0-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4551,7 +4551,7 @@ tasks: - sync - name: test-5.0-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4571,7 +4571,7 @@ tasks: - async - name: test-5.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4590,7 +4590,7 @@ tasks: - sync_async - name: test-5.0-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4610,7 +4610,7 @@ tasks: - sync - name: test-5.0-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4630,7 +4630,7 @@ tasks: - async - name: test-5.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4649,7 +4649,7 @@ tasks: - sync_async - name: test-6.0-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4669,7 +4669,7 @@ tasks: - sync - name: test-6.0-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4689,7 +4689,7 @@ tasks: - async - name: test-6.0-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4708,7 +4708,7 @@ tasks: - sync_async - name: test-6.0-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4728,7 +4728,7 @@ tasks: - sync - name: test-6.0-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4748,7 +4748,7 @@ tasks: - async - name: test-6.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4767,7 +4767,7 @@ tasks: - sync_async - name: test-6.0-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4787,7 +4787,7 @@ tasks: - sync - name: test-6.0-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4807,7 +4807,7 @@ tasks: - async - name: test-6.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4826,7 +4826,7 @@ tasks: - sync_async - name: test-7.0-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4846,7 +4846,7 @@ tasks: - sync - name: test-7.0-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4866,7 +4866,7 @@ tasks: - async - name: test-7.0-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4885,7 +4885,7 @@ tasks: - sync_async - name: test-7.0-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4905,7 +4905,7 @@ tasks: - sync - name: test-7.0-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4925,7 +4925,7 @@ tasks: - async - name: test-7.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4944,7 +4944,7 @@ tasks: - sync_async - name: test-7.0-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4964,7 +4964,7 @@ tasks: - sync - name: test-7.0-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4984,7 +4984,7 @@ tasks: - async - name: test-7.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -5003,7 +5003,7 @@ tasks: - sync_async - name: test-8.0-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5023,7 +5023,7 @@ tasks: - sync - name: test-8.0-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5043,7 +5043,7 @@ tasks: - async - name: test-8.0-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5062,7 +5062,7 @@ tasks: - sync_async - name: test-8.0-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5082,7 +5082,7 @@ tasks: - sync - name: test-8.0-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5102,7 +5102,7 @@ tasks: - async - name: test-8.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5121,7 +5121,7 @@ tasks: - sync_async - name: test-8.0-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5141,7 +5141,7 @@ tasks: - sync - name: test-8.0-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5161,7 +5161,7 @@ tasks: - async - name: test-8.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5180,7 +5180,7 @@ tasks: - sync_async - name: test-rapid-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5200,7 +5200,7 @@ tasks: - sync - name: test-rapid-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5220,7 +5220,7 @@ tasks: - async - name: test-rapid-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5239,7 +5239,7 @@ tasks: - sync_async - name: test-rapid-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5259,7 +5259,7 @@ tasks: - sync - name: test-rapid-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5279,7 +5279,7 @@ tasks: - async - name: test-rapid-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5298,7 +5298,7 @@ tasks: - sync_async - name: test-rapid-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5318,7 +5318,7 @@ tasks: - sync - name: test-rapid-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5338,7 +5338,7 @@ tasks: - async - name: test-rapid-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5357,7 +5357,7 @@ tasks: - sync_async - name: test-latest-sharded_cluster-auth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5377,7 +5377,7 @@ tasks: - sync - name: test-latest-sharded_cluster-auth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5397,7 +5397,7 @@ tasks: - async - name: test-latest-sharded_cluster-auth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5416,7 +5416,7 @@ tasks: - sync_async - name: test-latest-sharded_cluster-noauth-ssl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5436,7 +5436,7 @@ tasks: - sync - name: test-latest-sharded_cluster-noauth-ssl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5456,7 +5456,7 @@ tasks: - async - name: test-latest-sharded_cluster-noauth-ssl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5475,7 +5475,7 @@ tasks: - sync_async - name: test-latest-sharded_cluster-noauth-nossl-sync commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5495,7 +5495,7 @@ tasks: - sync - name: test-latest-sharded_cluster-noauth-nossl-async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5515,7 +5515,7 @@ tasks: - async - name: test-latest-sharded_cluster-noauth-nossl-sync_async commands: - - func: bootstrap mongo-orchestration + - func: run-server vars: VERSION: latest TOPOLOGY: sharded_cluster diff --git a/.evergreen/scripts/bootstrap-mongo-orchestration.sh b/.evergreen/scripts/bootstrap-mongo-orchestration.sh deleted file mode 100755 index af38edd095..0000000000 --- a/.evergreen/scripts/bootstrap-mongo-orchestration.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -set -eu - -HERE=$(dirname ${BASH_SOURCE:-$0}) -HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" -ROOT=$(dirname "$(dirname $HERE)") - -if [ -z "${TEST_CRYPT_SHARED:-}" ]; then - export SKIP_CRYPT_SHARED=1 -fi - -# Override the tls files if applicable. -if [ "${SSL:-}" == "ssl" ]; then - export TLS_CERT_KEY_FILE=${ROOT}/test/certificates/client.pem - export TLS_PEM_KEY_FILE=${ROOT}/test/certificates/server.pem - export TLS_CA_FILE=${ROOT}/test/certificates/ca.pem -fi - -MONGODB_VERSION=${VERSION:-} \ - TOPOLOGY=${TOPOLOGY:-} \ - AUTH=${AUTH:-} \ - SSL=${SSL:-} \ - STORAGE_ENGINE=${STORAGE_ENGINE:-} \ - DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS:-} \ - ORCHESTRATION_FILE=${ORCHESTRATION_FILE:-} \ - REQUIRE_API_VERSION=${REQUIRE_API_VERSION:-} \ - LOAD_BALANCER=${LOAD_BALANCER:-} \ - bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh -# run-orchestration generates expansion file with the MONGODB_URI for the cluster diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 3c731a1f9a..03b4619899 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -795,20 +795,20 @@ def create_server_tasks(): for topo, version, (auth, ssl), sync in product(TOPOLOGIES, ALL_VERSIONS, AUTH_SSLS, SYNCS): name = f"test-{version}-{topo}-{auth}-{ssl}-{sync}".lower() tags = [version, topo, auth, ssl, sync] - bootstrap_vars = dict( + server_vars = dict( VERSION=version, TOPOLOGY=topo if topo != "standalone" else "server", AUTH=auth, SSL=ssl, ) - bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + server_func = FunctionCall(func="run-server", vars=server_vars) test_vars = dict(AUTH=auth, SSL=ssl, SYNC=sync) if sync == "sync": test_vars["TEST_NAME"] = "default_sync" elif sync == "async": test_vars["TEST_NAME"] = "default_async" test_func = FunctionCall(func="run tests", vars=test_vars) - tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) return tasks @@ -817,11 +817,13 @@ def create_load_balancer_tasks(): for auth, ssl in AUTH_SSLS: name = f"test-load-balancer-{auth}-{ssl}".lower() tags = ["load-balancer", auth, ssl] - bootstrap_vars = dict(TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, LOAD_BALANCER="true") - bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + server_vars = dict( + TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, TEST_NAME="load_balancer" + ) + server_func = FunctionCall(func="run-server", vars=server_vars) test_vars = dict(AUTH=auth, SSL=ssl, TEST_NAME="load_balancer") test_func = FunctionCall(func="run tests", vars=test_vars) - tasks.append(EvgTask(name=name, tags=tags, commands=[bootstrap_func, test_func])) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) return tasks @@ -837,7 +839,7 @@ def create_kms_tasks(): sub_test_name += "-fail" commands = [] if not success: - commands.append(FunctionCall(func="bootstrap mongo-orchestration")) + commands.append(FunctionCall(func="run-server")) test_vars = dict(TEST_NAME="kms", SUB_TEST_NAME=sub_test_name) test_func = FunctionCall(func="run tests", vars=test_vars) commands.append(test_func) @@ -859,15 +861,15 @@ def create_aws_tasks(): for version in get_versions_from("4.4"): base_name = f"test-auth-aws-{version}" base_tags = ["auth-aws"] - bootstrap_vars = dict(AUTH_AWS="1", VERSION=version) - bootstrap_func = FunctionCall(func="bootstrap mongo-orchestration", vars=bootstrap_vars) + server_vars = dict(AUTH_AWS="1", VERSION=version) + server_func = FunctionCall(func="run-server", vars=server_vars) assume_func = FunctionCall(func="assume ec2 role") for test_type in aws_test_types: tags = [*base_tags, f"auth-aws-{test_type}"] name = f"{base_name}-{test_type}" test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type) test_func = FunctionCall(func="run tests", vars=test_vars) - funcs = [bootstrap_func, assume_func, test_func] + funcs = [server_func, assume_func, test_func] tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) tags = [*base_tags, "auth-aws-web-identity"] @@ -876,7 +878,7 @@ def create_aws_tasks(): TEST_NAME="auth_aws", SUB_TEST_NAME="web-identity", AWS_ROLE_SESSION_NAME="test" ) test_func = FunctionCall(func="run tests", vars=test_vars) - funcs = [bootstrap_func, assume_func, test_func] + funcs = [server_func, assume_func, test_func] tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) return tasks diff --git a/.evergreen/scripts/run-server.sh b/.evergreen/scripts/run-server.sh new file mode 100755 index 0000000000..298eedcd3e --- /dev/null +++ b/.evergreen/scripts/run-server.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +uv run $HERE/run_server.py "$@" diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py new file mode 100644 index 0000000000..51fe8a67f1 --- /dev/null +++ b/.evergreen/scripts/run_server.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import os +from typing import Any + +from utils import DRIVERS_TOOLS, ROOT, get_test_options, run_command + + +def set_env(name: str, value: Any = "1") -> None: + os.environ[name] = str(value) + + +def start_server(): + opts, extra_opts = get_test_options( + "Run a MongoDB server. All given flags will be passed to run-orchestration.sh in DRIVERS_TOOLS.", + require_sub_test_name=False, + allow_extra_opts=True, + ) + test_name = opts.test_name + + if opts.auth: + extra_opts.append("--auth") + + if opts.verbose: + extra_opts.append("-v") + elif opts.quiet: + extra_opts.append("-q") + + if test_name == "auth_aws": + set_env("AUTH_AWS") + + elif test_name == "load_balancer": + set_env("LOAD_BALANCER") + + if not os.environ.get("TEST_CRYPT_SHARED"): + set_env("SKIP_CRYPT_SHARED") + + if opts.ssl: + extra_opts.append("--ssl") + certs = ROOT / "test/certificates" + set_env("TLS_CERT_KEY_FILE", certs / "client.pem") + set_env("TLS_PEM_KEY_FILE", certs / "server.pem") + set_env("TLS_CA_FILE", certs / "ca.pem") + + cmd = ["bash", f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", *extra_opts] + run_command(cmd, cwd=DRIVERS_TOOLS) + + +if __name__ == "__main__": + start_server() diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index eff7bed773..2fa5e69cbc 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -1,9 +1,7 @@ from __future__ import annotations -import argparse import base64 import io -import logging import os import platform import shutil @@ -19,7 +17,9 @@ LOGGER, PLATFORM, ROOT, + TEST_SUITE_MAP, Distro, + get_test_options, read_env, run_command, write_env, @@ -28,30 +28,6 @@ # Passthrough environment variables. PASS_THROUGH_ENV = ["GREEN_FRAMEWORK", "NO_EXT", "MONGODB_API_VERSION", "DEBUG_LOG"] -# Map the test name to a test suite. -TEST_SUITE_MAP = { - "atlas": "atlas", - "auth_aws": "auth_aws", - "auth_oidc": "auth_oidc", - "data_lake": "data_lake", - "default": "", - "default_async": "default_async", - "default_sync": "default", - "encryption": "encryption", - "enterprise_auth": "auth", - "index_management": "index_management", - "kms": "kms", - "load_balancer": "load_balancer", - "mockupdb": "mockupdb", - "pyopenssl": "", - "ocsp": "ocsp", - "perf": "perf", - "serverless": "", -} - -# Tests that require a sub test suite. -SUB_TEST_REQUIRED = ["auth_aws", "kms"] - # Map the test name to test extra. EXTRAS_MAP = { "auth_aws": "aws", @@ -73,35 +49,6 @@ def is_set(var: str) -> bool: return len(value.strip()) > 0 -def get_options(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter - ) - parser.add_argument( - "test_name", - choices=sorted(TEST_SUITE_MAP), - nargs="?", - default="default", - help="The name of the test suite to set up, typically the same name as a pytest marker.", - ) - parser.add_argument("sub_test_name", nargs="?", help="The sub test name, for example 'azure'") - parser.add_argument( - "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level" - ) - parser.add_argument( - "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level" - ) - parser.add_argument("--auth", action="store_true", help="Whether to add authentication") - parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration") - # Get the options. - opts = parser.parse_args() - if opts.verbose: - LOGGER.setLevel(logging.DEBUG) - elif opts.quiet: - LOGGER.setLevel(logging.WARNING) - return opts - - def get_distro() -> Distro: name = "" version_id = "" @@ -166,20 +113,11 @@ def setup_libmongocrypt(): def handle_test_env() -> None: - opts = get_options() + opts, _ = get_test_options("Set up the test environment and services.") test_name = opts.test_name sub_test_name = opts.sub_test_name - if test_name in SUB_TEST_REQUIRED and not sub_test_name: - raise ValueError(f"Test '{test_name}' requires a sub_test_name") - AUTH = os.environ.get("AUTH", "noauth") - if opts.auth or "auth" in test_name: - AUTH = "auth" - # 'auth_aws ecs' shouldn't have extra auth set. - if test_name == "auth_aws" and sub_test_name == "ecs": - AUTH = "noauth" - SSL = os.environ.get("SSL", "nossl") - if opts.ssl: - SSL = "ssl" + AUTH = "auth" if opts.auth else "noauth" + SSL = "ssl" if opts.ssl else "nossl" TEST_ARGS = "" # Start compiling the args we'll pass to uv. diff --git a/.evergreen/scripts/stop-server.sh b/.evergreen/scripts/stop-server.sh new file mode 100755 index 0000000000..7db20d4bf3 --- /dev/null +++ b/.evergreen/scripts/stop-server.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index d830275def..dcb50cc4dc 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -1,5 +1,6 @@ from __future__ import annotations +import argparse import dataclasses import logging import os @@ -26,6 +27,89 @@ class Distro: arch: str +# Map the test name to a test suite. +TEST_SUITE_MAP = { + "atlas": "atlas", + "auth_aws": "auth_aws", + "auth_oidc": "auth_oidc", + "data_lake": "data_lake", + "default": "", + "default_async": "default_async", + "default_sync": "default", + "encryption": "encryption", + "enterprise_auth": "auth", + "index_management": "index_management", + "kms": "kms", + "load_balancer": "load_balancer", + "mockupdb": "mockupdb", + "pyopenssl": "", + "ocsp": "ocsp", + "perf": "perf", + "serverless": "", +} + +# Tests that require a sub test suite. +SUB_TEST_REQUIRED = ["auth_aws", "kms"] + + +def get_test_options( + description, require_sub_test_name=True, allow_extra_opts=False +) -> tuple[argparse.Namespace, list[str]]: + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) + if require_sub_test_name: + parser.add_argument( + "test_name", + choices=sorted(TEST_SUITE_MAP), + nargs="?", + default="default", + help="The optional name of the test suite to set up, typically the same name as a pytest marker.", + ) + parser.add_argument( + "sub_test_name", nargs="?", help="The optional sub test name, for example 'azure'." + ) + else: + parser.add_argument( + "test_name", + choices=sorted(TEST_SUITE_MAP), + nargs="?", + default="default", + help="The optional name of the test suite to be run, which informs the server configuration.", + ) + parser.add_argument( + "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level" + ) + parser.add_argument( + "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level" + ) + parser.add_argument("--auth", action="store_true", help="Whether to add authentication") + parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration") + # Get the options. + if not allow_extra_opts: + opts, extra_opts = parser.parse_args(), [] + else: + opts, extra_opts = parser.parse_known_args() + if opts.verbose: + LOGGER.setLevel(logging.DEBUG) + elif opts.quiet: + LOGGER.setLevel(logging.WARNING) + + # Handle validation and environment variable overrides. + test_name = opts.test_name + sub_test_name = opts.sub_test_name if require_sub_test_name else "" + if require_sub_test_name and test_name in SUB_TEST_REQUIRED and not sub_test_name: + raise ValueError(f"Test '{test_name}' requires a sub_test_name") + if "auth" in test_name or os.environ.get("AUTH") == "auth": + opts.auth = True + # 'auth_aws ecs' shouldn't have extra auth set. + if test_name == "auth_aws" and sub_test_name == "ecs": + opts.auth = False + if os.environ.get("SSL") == "ssl": + opts.ssl = True + return opts, extra_opts + + def read_env(path: Path | str) -> dict[str, Any]: config = dict() with Path(path).open() as fid: @@ -47,8 +131,10 @@ def write_env(name: str, value: Any = "1") -> None: fid.write(f'export {name}="{value}"\n') -def run_command(cmd: str, **kwargs: Any) -> None: - LOGGER.info("Running command %s...", cmd) +def run_command(cmd: str | list[str], **kwargs: Any) -> None: + if isinstance(cmd, list): + cmd = " ".join(cmd) + LOGGER.info("Running command '%s'...", cmd) kwargs.setdefault("check", True) subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 - LOGGER.info("Running command %s... done.", cmd) + LOGGER.info("Running command '%s'... done.", cmd) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ccce0c1ae7..b3423ed402 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -204,25 +204,56 @@ the pages will re-render and the browser will automatically refresh. `just test test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress`. - Use the `-k` argument to select tests by pattern. -## Running Load Balancer Tests Locally -- Install `haproxy` (available as `brew install haproxy` on macOS). -- Clone `drivers-evergreen-tools`: - `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. -- Start the servers using - `LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh`. -- Set up the test using: - `MONGODB_URI='mongodb://localhost:27017,localhost:27018/' just setup-tests load-balancer`. -- Run the tests from the `pymongo` checkout directory using: - `just run-tests`. - -## Running Encryption Tests Locally +## Running tests that require secrets, services, or other configuration + +### Prerequisites + - Clone `drivers-evergreen-tools`: - `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. -- Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools` -- Run `AWS_PROFILE= just setup-tests encryption` after setting up your AWS profile with `aws configure sso`. + `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. +- Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools`. This can be put into a `.bashrc` file + for convenience. +- Set up access to [Drivers test secrets](https://github.com/mongodb-labs/drivers-evergreen-tools/tree/master/.evergreen/secrets_handling#secrets-handling). + +### Usage + +- Run `just run-server` with optional args to set up the server. + All given flags will be passed to `run-orchestration.sh` in `DRIVERS_TOOLS`. +- Run `just setup-tests` with optional args to set up the test environment, secrets, etc. +- Run `just run-tests` to run the tests in an appropriate Python environment. +- When done, run `just teardown-tests` to clean up and `just stop-server` to stop the server. + +## Encryption tests + +- Run `just run-server` to start the server. +- Run `just setup-tests encryption`. - Run the tests with `just run-tests`. -- When done, run `just teardown-tests` to clean up. + +### Load balancer tests + +- Install `haproxy` (available as `brew install haproxy` on macOS). +- Start the server with `just run-server load_balancer`. +- Set up the test with `just setup-tests load_balancer`. +- Run the tests with `just run-tests`. + +## AWS tests + +- Run `just run-server auth_aws` to start the server. +- Run `just setup-tests auth_aws ` to set up the AWS test. +- Run the tests with `just run-tests`. + +## KMS tests + +For KMS tests that are run locally, and expected to fail, in this case using `azure`: + +- Run `just run-server`. +- Run `just setup-tests kms azure-fail`. +- Run `just run-tests`. + +For KMS tests that run remotely and are expected to pass, in this case using `gcp`: + +- Run `just setup-tests kms gcp`. +- Run `just run-tests`. ## Enable Debug Logs - Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. diff --git a/justfile b/justfile index 5a5a05cd5a..43aefb3f1a 100644 --- a/justfile +++ b/justfile @@ -71,3 +71,11 @@ setup-tests *args="": [group('test')] teardown-tests: bash .evergreen/scripts/teardown-tests.sh + +[group('server')] +run-server *args="": + bash .evergreen/scripts/run-server.sh {{args}} + +[group('server')] +stop-server: + bash .evergreen/scripts/stop-server.sh From f69e1f6f0485d0b6b99b5359c0291a562bfc0d7b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 6 Mar 2025 16:53:47 -0600 Subject: [PATCH 1786/2111] PYTHON-5184 Revert skip to non-lb-connection-establishment (#2185) --- .../non-lb-connection-establishment.json | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/test/load_balancer/non-lb-connection-establishment.json b/test/load_balancer/non-lb-connection-establishment.json index f4fed13cc2..6aaa7bdf98 100644 --- a/test/load_balancer/non-lb-connection-establishment.json +++ b/test/load_balancer/non-lb-connection-establishment.json @@ -57,19 +57,6 @@ "tests": [ { "description": "operations against non-load balanced clusters fail if URI contains loadBalanced=true", - "runOnRequirements": [ - { - "maxServerVersion": "8.0.99", - "topologies": [ - "single" - ] - }, - { - "topologies": [ - "sharded" - ] - } - ], "operations": [ { "name": "runCommand", From 38127f458ba0c6aea8c0f2484d6d06ae99719c68 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Sat, 8 Mar 2025 13:02:03 -0600 Subject: [PATCH 1787/2111] PYTHON-5193 & PYTHON-5192 Fix run-server usage (#2187) --- .evergreen/config.yml | 4 ++-- .evergreen/scripts/run-mod-wsgi-tests.sh | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3f29391373..3f498ba3fa 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -206,9 +206,9 @@ functions: params: binary: bash working_dir: "src" - include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, + include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER] - args: [.evergreen/just.sh, run-server, "${TEST_NAME}", "${SUB_TEST_NAME}"] + args: [.evergreen/just.sh, run-server, "${TEST_NAME}"] - command: expansions.update params: file: ${DRIVERS_TOOLS}/mo-expansion.yml diff --git a/.evergreen/scripts/run-mod-wsgi-tests.sh b/.evergreen/scripts/run-mod-wsgi-tests.sh index 607458b8c6..f59ace8116 100755 --- a/.evergreen/scripts/run-mod-wsgi-tests.sh +++ b/.evergreen/scripts/run-mod-wsgi-tests.sh @@ -22,7 +22,8 @@ PYTHON_VERSION=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('.'.join(str( ${PYTHON_BINARY} -m venv --system-site-packages .venv source .venv/bin/activate pip install -U pip -python -m pip install -e . +export PYMONGO_C_EXT_MUST_BUILD=1 +python -m pip install -v -e . export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_version/$MOD_WSGI_VERSION/mod_wsgi.so export PYTHONHOME=/opt/python/$PYTHON_VERSION From a548f7a3d45f8827e88e8f2c060ffb1045e5883e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 10 Mar 2025 10:25:27 -0500 Subject: [PATCH 1788/2111] PYTHON-5195 Convert OCSP tests to use new test scripts (#2190) Co-authored-by: Noah Stapp --- .evergreen/config.yml | 397 +---------- .evergreen/generated_configs/tasks.yml | 898 ++++++++++++++++--------- .evergreen/scripts/generate_config.py | 62 +- .evergreen/scripts/run-ocsp-test.sh | 12 - .evergreen/scripts/run_server.py | 19 +- .evergreen/scripts/setup_tests.py | 28 +- .evergreen/scripts/teardown_tests.py | 4 + CONTRIBUTING.md | 19 +- 8 files changed, 724 insertions(+), 715 deletions(-) delete mode 100755 .evergreen/scripts/run-ocsp-test.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 3f498ba3fa..54931dcb48 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -201,7 +201,7 @@ functions: params: file: "src/xunit-results/TEST-*.xml" - "run-server": + "run server": - command: subprocess.exec params: binary: bash @@ -255,7 +255,8 @@ functions: params: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, - DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG] + DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG, + ORCHESTRATION_FILE, OCSP_SERVER_TYPE] binary: bash working_dir: "src" args: [.evergreen/just.sh, setup-tests, "${TEST_NAME}", "${SUB_TEST_NAME}"] @@ -320,12 +321,6 @@ functions: - .evergreen/scripts/cleanup.sh "teardown system": - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - args: - - ${DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh - command: subprocess.exec params: binary: bash @@ -357,26 +352,6 @@ functions: params: file: atlas-expansion.yml - "run-ocsp-test": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: ["OCSP_ALGORITHM", "OCSP_TLS_SHOULD_SUCCEED", "PYTHON_BINARY"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-ocsp-test.sh - - "run-ocsp-server": - - command: subprocess.exec - params: - background: true - binary: bash - include_expansions_in_env: [SERVER_TYPE, OCSP_ALGORITHM] - args: - - ${DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh - "teardown atlas": - command: subprocess.exec params: @@ -579,7 +554,7 @@ tasks: - name: "doctests" tags: ["doctests"] commands: - - func: "run-server" + - func: "run server" - func: "run doctests" - name: "test-serverless" @@ -592,13 +567,13 @@ tasks: - name: "test-enterprise-auth" tags: ["enterprise-auth"] commands: - - func: "run-server" + - func: "run server" - func: "assume ec2 role" - func: "run enterprise auth tests" - name: "test-search-index-helpers" commands: - - func: "run-server" + - func: "run server" vars: VERSION: "6.0" TOPOLOGY: "replica_set" @@ -610,7 +585,7 @@ tasks: - name: "mod-wsgi-standalone" tags: ["mod_wsgi"] commands: - - func: "run-server" + - func: "run server" vars: TOPOLOGY: "server" - func: "run mod_wsgi tests" @@ -618,7 +593,7 @@ tasks: - name: "mod-wsgi-replica-set" tags: ["mod_wsgi"] commands: - - func: "run-server" + - func: "run server" vars: TOPOLOGY: "replica_set" - func: "run mod_wsgi tests" @@ -626,7 +601,7 @@ tasks: - name: "mod-wsgi-embedded-mode-standalone" tags: ["mod_wsgi"] commands: - - func: "run-server" + - func: "run server" - func: "run mod_wsgi tests" vars: MOD_WSGI_EMBEDDED: "1" @@ -634,7 +609,7 @@ tasks: - name: "mod-wsgi-embedded-mode-replica-set" tags: ["mod_wsgi"] commands: - - func: "run-server" + - func: "run server" vars: TOPOLOGY: "replica_set" - func: "run mod_wsgi tests" @@ -649,7 +624,7 @@ tasks: - name: "free-threading" tags: ["free-threading"] commands: - - func: "run-server" + - func: "run server" vars: VERSION: "8.0" TOPOLOGY: "replica_set" @@ -684,350 +659,6 @@ tasks: env: TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/test/lambda - - name: test-ocsp-rsa-valid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: "valid" - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-invalid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: "revoked" - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: valid - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: revoked - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-soft-fail - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: revoked - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-delegate-valid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: valid-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: revoked-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: valid-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: revoked-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - SERVER_TYPE: revoked-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-valid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: valid - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-invalid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: revoked - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: valid - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: revoked - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-soft-fail - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: revoked - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: valid-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: revoked-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: valid-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: revoked-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - SERVER_TYPE: valid-delegate - - func: "run-server" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - name: "oidc-auth-test" commands: - func: "run oidc auth test with test credentials" @@ -1095,7 +726,7 @@ tasks: - name: "perf-6.0-standalone" tags: ["perf"] commands: - - func: "run-server" + - func: "run server" vars: VERSION: "v6.0-perf" - func: "run perf tests" @@ -1105,7 +736,7 @@ tasks: - name: "perf-6.0-standalone-ssl" tags: ["perf"] commands: - - func: "run-server" + - func: "run server" vars: VERSION: "v6.0-perf" SSL: "ssl" @@ -1116,7 +747,7 @@ tasks: - name: "perf-8.0-standalone" tags: ["perf"] commands: - - func: "run-server" + - func: "run server" vars: VERSION: "8.0" - func: "run perf tests" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 04e1451d45..02ee29e6ed 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -2,7 +2,7 @@ tasks: # Aws tests - name: test-auth-aws-4.4-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -14,7 +14,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-4.4-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -26,7 +26,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-4.4-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -38,7 +38,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-4.4-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -50,7 +50,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-4.4-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -62,7 +62,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-4.4-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -74,7 +74,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-4.4-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -86,7 +86,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-4.4-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "4.4" @@ -99,7 +99,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-5.0-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -111,7 +111,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-5.0-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -123,7 +123,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-5.0-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -135,7 +135,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-5.0-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -147,7 +147,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-5.0-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -159,7 +159,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-5.0-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -171,7 +171,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-5.0-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -183,7 +183,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-5.0-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "5.0" @@ -196,7 +196,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-6.0-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -208,7 +208,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-6.0-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -220,7 +220,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-6.0-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -232,7 +232,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-6.0-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -244,7 +244,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-6.0-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -256,7 +256,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-6.0-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -268,7 +268,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-6.0-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -280,7 +280,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-6.0-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "6.0" @@ -293,7 +293,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-7.0-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -305,7 +305,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-7.0-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -317,7 +317,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-7.0-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -329,7 +329,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-7.0-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -341,7 +341,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-7.0-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -353,7 +353,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-7.0-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -365,7 +365,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-7.0-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -377,7 +377,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-7.0-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "7.0" @@ -390,7 +390,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-8.0-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -402,7 +402,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-8.0-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -414,7 +414,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-8.0-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -426,7 +426,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-8.0-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -438,7 +438,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-8.0-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -450,7 +450,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-8.0-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -462,7 +462,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-8.0-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -474,7 +474,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-8.0-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" @@ -487,7 +487,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-rapid-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -499,7 +499,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-rapid-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -511,7 +511,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-rapid-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -523,7 +523,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-rapid-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -535,7 +535,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-rapid-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -547,7 +547,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-rapid-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -559,7 +559,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-rapid-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -571,7 +571,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-rapid-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: rapid @@ -584,7 +584,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-latest-regular commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -596,7 +596,7 @@ tasks: tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-latest-assume-role commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -608,7 +608,7 @@ tasks: tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-latest-ec2 commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -620,7 +620,7 @@ tasks: tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-latest-env-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -632,7 +632,7 @@ tasks: tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-latest-session-creds commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -644,7 +644,7 @@ tasks: tags: [auth-aws, auth-aws-session-creds] - name: test-auth-aws-latest-web-identity commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -656,7 +656,7 @@ tasks: tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-latest-ecs commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -668,7 +668,7 @@ tasks: tags: [auth-aws, auth-aws-ecs] - name: test-auth-aws-latest-web-identity-session-name commands: - - func: run-server + - func: run server vars: AUTH_AWS: "1" VERSION: latest @@ -689,7 +689,7 @@ tasks: SUB_TEST_NAME: gcp - name: test-gcpkms-fail commands: - - func: run-server + - func: run server - func: run tests vars: TEST_NAME: kms @@ -702,7 +702,7 @@ tasks: SUB_TEST_NAME: azure - name: test-azurekms-fail commands: - - func: run-server + - func: run server - func: run tests vars: TEST_NAME: kms @@ -711,7 +711,7 @@ tasks: # Load balancer tests - name: test-load-balancer-auth-ssl commands: - - func: run-server + - func: run server vars: TOPOLOGY: sharded_cluster AUTH: auth @@ -725,7 +725,7 @@ tasks: tags: [load-balancer, auth, ssl] - name: test-load-balancer-noauth-ssl commands: - - func: run-server + - func: run server vars: TOPOLOGY: sharded_cluster AUTH: noauth @@ -739,7 +739,7 @@ tasks: tags: [load-balancer, noauth, ssl] - name: test-load-balancer-noauth-nossl commands: - - func: run-server + - func: run server vars: TOPOLOGY: sharded_cluster AUTH: noauth @@ -752,10 +752,300 @@ tasks: TEST_NAME: load_balancer tags: [load-balancer, noauth, nossl] + # Ocsp tests + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-soft-fail + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-valid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa, ocsp-staple] + - name: test-ocsp-ecdsa-invalid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa, ocsp-staple] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa, ocsp-staple] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa, ocsp-staple] + - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + tags: [ocsp, ocsp-ecdsa] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-soft-fail + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-valid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa, ocsp-staple] + - name: test-ocsp-rsa-invalid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa, ocsp-staple] + - name: test-ocsp-rsa-delegate-valid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa, ocsp-staple] + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa, ocsp-staple] + - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple + commands: + - func: run server + vars: + TEST_NAME: ocsp + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + tags: [ocsp, ocsp-rsa] + # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -775,7 +1065,7 @@ tasks: - sync - name: test-4.0-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -795,7 +1085,7 @@ tasks: - async - name: test-4.0-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -814,7 +1104,7 @@ tasks: - sync_async - name: test-4.0-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -834,7 +1124,7 @@ tasks: - sync - name: test-4.0-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -854,7 +1144,7 @@ tasks: - async - name: test-4.0-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -873,7 +1163,7 @@ tasks: - sync_async - name: test-4.0-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -893,7 +1183,7 @@ tasks: - sync - name: test-4.0-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -913,7 +1203,7 @@ tasks: - async - name: test-4.0-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: server @@ -932,7 +1222,7 @@ tasks: - sync_async - name: test-4.2-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -952,7 +1242,7 @@ tasks: - sync - name: test-4.2-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -972,7 +1262,7 @@ tasks: - async - name: test-4.2-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -991,7 +1281,7 @@ tasks: - sync_async - name: test-4.2-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -1011,7 +1301,7 @@ tasks: - sync - name: test-4.2-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -1031,7 +1321,7 @@ tasks: - async - name: test-4.2-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -1050,7 +1340,7 @@ tasks: - sync_async - name: test-4.2-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -1070,7 +1360,7 @@ tasks: - sync - name: test-4.2-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -1090,7 +1380,7 @@ tasks: - async - name: test-4.2-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: server @@ -1109,7 +1399,7 @@ tasks: - sync_async - name: test-4.4-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1129,7 +1419,7 @@ tasks: - sync - name: test-4.4-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1149,7 +1439,7 @@ tasks: - async - name: test-4.4-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1168,7 +1458,7 @@ tasks: - sync_async - name: test-4.4-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1188,7 +1478,7 @@ tasks: - sync - name: test-4.4-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1208,7 +1498,7 @@ tasks: - async - name: test-4.4-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1227,7 +1517,7 @@ tasks: - sync_async - name: test-4.4-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1247,7 +1537,7 @@ tasks: - sync - name: test-4.4-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1267,7 +1557,7 @@ tasks: - async - name: test-4.4-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: server @@ -1286,7 +1576,7 @@ tasks: - sync_async - name: test-5.0-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1306,7 +1596,7 @@ tasks: - sync - name: test-5.0-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1326,7 +1616,7 @@ tasks: - async - name: test-5.0-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1345,7 +1635,7 @@ tasks: - sync_async - name: test-5.0-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1365,7 +1655,7 @@ tasks: - sync - name: test-5.0-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1385,7 +1675,7 @@ tasks: - async - name: test-5.0-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1404,7 +1694,7 @@ tasks: - sync_async - name: test-5.0-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1424,7 +1714,7 @@ tasks: - sync - name: test-5.0-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1444,7 +1734,7 @@ tasks: - async - name: test-5.0-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: server @@ -1463,7 +1753,7 @@ tasks: - sync_async - name: test-6.0-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1483,7 +1773,7 @@ tasks: - sync - name: test-6.0-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1503,7 +1793,7 @@ tasks: - async - name: test-6.0-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1522,7 +1812,7 @@ tasks: - sync_async - name: test-6.0-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1542,7 +1832,7 @@ tasks: - sync - name: test-6.0-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1562,7 +1852,7 @@ tasks: - async - name: test-6.0-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1581,7 +1871,7 @@ tasks: - sync_async - name: test-6.0-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1601,7 +1891,7 @@ tasks: - sync - name: test-6.0-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1621,7 +1911,7 @@ tasks: - async - name: test-6.0-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: server @@ -1640,7 +1930,7 @@ tasks: - sync_async - name: test-7.0-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1660,7 +1950,7 @@ tasks: - sync - name: test-7.0-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1680,7 +1970,7 @@ tasks: - async - name: test-7.0-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1699,7 +1989,7 @@ tasks: - sync_async - name: test-7.0-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1719,7 +2009,7 @@ tasks: - sync - name: test-7.0-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1739,7 +2029,7 @@ tasks: - async - name: test-7.0-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1758,7 +2048,7 @@ tasks: - sync_async - name: test-7.0-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1778,7 +2068,7 @@ tasks: - sync - name: test-7.0-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1798,7 +2088,7 @@ tasks: - async - name: test-7.0-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: server @@ -1817,7 +2107,7 @@ tasks: - sync_async - name: test-8.0-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1837,7 +2127,7 @@ tasks: - sync - name: test-8.0-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1857,7 +2147,7 @@ tasks: - async - name: test-8.0-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1876,7 +2166,7 @@ tasks: - sync_async - name: test-8.0-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1896,7 +2186,7 @@ tasks: - sync - name: test-8.0-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1916,7 +2206,7 @@ tasks: - async - name: test-8.0-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1935,7 +2225,7 @@ tasks: - sync_async - name: test-8.0-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1955,7 +2245,7 @@ tasks: - sync - name: test-8.0-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1975,7 +2265,7 @@ tasks: - async - name: test-8.0-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: server @@ -1994,7 +2284,7 @@ tasks: - sync_async - name: test-rapid-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2014,7 +2304,7 @@ tasks: - sync - name: test-rapid-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2034,7 +2324,7 @@ tasks: - async - name: test-rapid-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2053,7 +2343,7 @@ tasks: - sync_async - name: test-rapid-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2073,7 +2363,7 @@ tasks: - sync - name: test-rapid-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2093,7 +2383,7 @@ tasks: - async - name: test-rapid-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2112,7 +2402,7 @@ tasks: - sync_async - name: test-rapid-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2132,7 +2422,7 @@ tasks: - sync - name: test-rapid-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2152,7 +2442,7 @@ tasks: - async - name: test-rapid-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: server @@ -2171,7 +2461,7 @@ tasks: - sync_async - name: test-latest-standalone-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2191,7 +2481,7 @@ tasks: - sync - name: test-latest-standalone-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2211,7 +2501,7 @@ tasks: - async - name: test-latest-standalone-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2230,7 +2520,7 @@ tasks: - sync_async - name: test-latest-standalone-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2250,7 +2540,7 @@ tasks: - sync - name: test-latest-standalone-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2270,7 +2560,7 @@ tasks: - async - name: test-latest-standalone-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2289,7 +2579,7 @@ tasks: - sync_async - name: test-latest-standalone-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2309,7 +2599,7 @@ tasks: - sync - name: test-latest-standalone-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2329,7 +2619,7 @@ tasks: - async - name: test-latest-standalone-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: server @@ -2348,7 +2638,7 @@ tasks: - sync_async - name: test-4.0-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2368,7 +2658,7 @@ tasks: - sync - name: test-4.0-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2388,7 +2678,7 @@ tasks: - async - name: test-4.0-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2407,7 +2697,7 @@ tasks: - sync_async - name: test-4.0-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2427,7 +2717,7 @@ tasks: - sync - name: test-4.0-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2447,7 +2737,7 @@ tasks: - async - name: test-4.0-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2466,7 +2756,7 @@ tasks: - sync_async - name: test-4.0-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2486,7 +2776,7 @@ tasks: - sync - name: test-4.0-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2506,7 +2796,7 @@ tasks: - async - name: test-4.0-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: replica_set @@ -2525,7 +2815,7 @@ tasks: - sync_async - name: test-4.2-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2545,7 +2835,7 @@ tasks: - sync - name: test-4.2-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2565,7 +2855,7 @@ tasks: - async - name: test-4.2-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2584,7 +2874,7 @@ tasks: - sync_async - name: test-4.2-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2604,7 +2894,7 @@ tasks: - sync - name: test-4.2-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2624,7 +2914,7 @@ tasks: - async - name: test-4.2-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2643,7 +2933,7 @@ tasks: - sync_async - name: test-4.2-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2663,7 +2953,7 @@ tasks: - sync - name: test-4.2-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2683,7 +2973,7 @@ tasks: - async - name: test-4.2-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: replica_set @@ -2702,7 +2992,7 @@ tasks: - sync_async - name: test-4.4-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2722,7 +3012,7 @@ tasks: - sync - name: test-4.4-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2742,7 +3032,7 @@ tasks: - async - name: test-4.4-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2761,7 +3051,7 @@ tasks: - sync_async - name: test-4.4-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2781,7 +3071,7 @@ tasks: - sync - name: test-4.4-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2801,7 +3091,7 @@ tasks: - async - name: test-4.4-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2820,7 +3110,7 @@ tasks: - sync_async - name: test-4.4-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2840,7 +3130,7 @@ tasks: - sync - name: test-4.4-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2860,7 +3150,7 @@ tasks: - async - name: test-4.4-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: replica_set @@ -2879,7 +3169,7 @@ tasks: - sync_async - name: test-5.0-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2899,7 +3189,7 @@ tasks: - sync - name: test-5.0-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2919,7 +3209,7 @@ tasks: - async - name: test-5.0-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2938,7 +3228,7 @@ tasks: - sync_async - name: test-5.0-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2958,7 +3248,7 @@ tasks: - sync - name: test-5.0-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2978,7 +3268,7 @@ tasks: - async - name: test-5.0-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -2997,7 +3287,7 @@ tasks: - sync_async - name: test-5.0-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -3017,7 +3307,7 @@ tasks: - sync - name: test-5.0-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -3037,7 +3327,7 @@ tasks: - async - name: test-5.0-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: replica_set @@ -3056,7 +3346,7 @@ tasks: - sync_async - name: test-6.0-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3076,7 +3366,7 @@ tasks: - sync - name: test-6.0-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3096,7 +3386,7 @@ tasks: - async - name: test-6.0-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3115,7 +3405,7 @@ tasks: - sync_async - name: test-6.0-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3135,7 +3425,7 @@ tasks: - sync - name: test-6.0-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3155,7 +3445,7 @@ tasks: - async - name: test-6.0-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3174,7 +3464,7 @@ tasks: - sync_async - name: test-6.0-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3194,7 +3484,7 @@ tasks: - sync - name: test-6.0-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3214,7 +3504,7 @@ tasks: - async - name: test-6.0-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: replica_set @@ -3233,7 +3523,7 @@ tasks: - sync_async - name: test-7.0-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3253,7 +3543,7 @@ tasks: - sync - name: test-7.0-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3273,7 +3563,7 @@ tasks: - async - name: test-7.0-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3292,7 +3582,7 @@ tasks: - sync_async - name: test-7.0-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3312,7 +3602,7 @@ tasks: - sync - name: test-7.0-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3332,7 +3622,7 @@ tasks: - async - name: test-7.0-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3351,7 +3641,7 @@ tasks: - sync_async - name: test-7.0-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3371,7 +3661,7 @@ tasks: - sync - name: test-7.0-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3391,7 +3681,7 @@ tasks: - async - name: test-7.0-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: replica_set @@ -3410,7 +3700,7 @@ tasks: - sync_async - name: test-8.0-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3430,7 +3720,7 @@ tasks: - sync - name: test-8.0-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3450,7 +3740,7 @@ tasks: - async - name: test-8.0-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3469,7 +3759,7 @@ tasks: - sync_async - name: test-8.0-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3489,7 +3779,7 @@ tasks: - sync - name: test-8.0-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3509,7 +3799,7 @@ tasks: - async - name: test-8.0-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3528,7 +3818,7 @@ tasks: - sync_async - name: test-8.0-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3548,7 +3838,7 @@ tasks: - sync - name: test-8.0-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3568,7 +3858,7 @@ tasks: - async - name: test-8.0-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: replica_set @@ -3587,7 +3877,7 @@ tasks: - sync_async - name: test-rapid-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3607,7 +3897,7 @@ tasks: - sync - name: test-rapid-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3627,7 +3917,7 @@ tasks: - async - name: test-rapid-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3646,7 +3936,7 @@ tasks: - sync_async - name: test-rapid-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3666,7 +3956,7 @@ tasks: - sync - name: test-rapid-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3686,7 +3976,7 @@ tasks: - async - name: test-rapid-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3705,7 +3995,7 @@ tasks: - sync_async - name: test-rapid-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3725,7 +4015,7 @@ tasks: - sync - name: test-rapid-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3745,7 +4035,7 @@ tasks: - async - name: test-rapid-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: replica_set @@ -3764,7 +4054,7 @@ tasks: - sync_async - name: test-latest-replica_set-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3784,7 +4074,7 @@ tasks: - sync - name: test-latest-replica_set-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3804,7 +4094,7 @@ tasks: - async - name: test-latest-replica_set-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3823,7 +4113,7 @@ tasks: - sync_async - name: test-latest-replica_set-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3843,7 +4133,7 @@ tasks: - sync - name: test-latest-replica_set-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3863,7 +4153,7 @@ tasks: - async - name: test-latest-replica_set-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3882,7 +4172,7 @@ tasks: - sync_async - name: test-latest-replica_set-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3902,7 +4192,7 @@ tasks: - sync - name: test-latest-replica_set-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3922,7 +4212,7 @@ tasks: - async - name: test-latest-replica_set-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: replica_set @@ -3941,7 +4231,7 @@ tasks: - sync_async - name: test-4.0-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -3961,7 +4251,7 @@ tasks: - sync - name: test-4.0-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -3981,7 +4271,7 @@ tasks: - async - name: test-4.0-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4000,7 +4290,7 @@ tasks: - sync_async - name: test-4.0-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4020,7 +4310,7 @@ tasks: - sync - name: test-4.0-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4040,7 +4330,7 @@ tasks: - async - name: test-4.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4059,7 +4349,7 @@ tasks: - sync_async - name: test-4.0-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4079,7 +4369,7 @@ tasks: - sync - name: test-4.0-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4099,7 +4389,7 @@ tasks: - async - name: test-4.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.0" TOPOLOGY: sharded_cluster @@ -4118,7 +4408,7 @@ tasks: - sync_async - name: test-4.2-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4138,7 +4428,7 @@ tasks: - sync - name: test-4.2-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4158,7 +4448,7 @@ tasks: - async - name: test-4.2-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4177,7 +4467,7 @@ tasks: - sync_async - name: test-4.2-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4197,7 +4487,7 @@ tasks: - sync - name: test-4.2-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4217,7 +4507,7 @@ tasks: - async - name: test-4.2-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4236,7 +4526,7 @@ tasks: - sync_async - name: test-4.2-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4256,7 +4546,7 @@ tasks: - sync - name: test-4.2-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4276,7 +4566,7 @@ tasks: - async - name: test-4.2-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.2" TOPOLOGY: sharded_cluster @@ -4295,7 +4585,7 @@ tasks: - sync_async - name: test-4.4-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4315,7 +4605,7 @@ tasks: - sync - name: test-4.4-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4335,7 +4625,7 @@ tasks: - async - name: test-4.4-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4354,7 +4644,7 @@ tasks: - sync_async - name: test-4.4-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4374,7 +4664,7 @@ tasks: - sync - name: test-4.4-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4394,7 +4684,7 @@ tasks: - async - name: test-4.4-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4413,7 +4703,7 @@ tasks: - sync_async - name: test-4.4-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4433,7 +4723,7 @@ tasks: - sync - name: test-4.4-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4453,7 +4743,7 @@ tasks: - async - name: test-4.4-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "4.4" TOPOLOGY: sharded_cluster @@ -4472,7 +4762,7 @@ tasks: - sync_async - name: test-5.0-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4492,7 +4782,7 @@ tasks: - sync - name: test-5.0-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4512,7 +4802,7 @@ tasks: - async - name: test-5.0-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4531,7 +4821,7 @@ tasks: - sync_async - name: test-5.0-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4551,7 +4841,7 @@ tasks: - sync - name: test-5.0-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4571,7 +4861,7 @@ tasks: - async - name: test-5.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4590,7 +4880,7 @@ tasks: - sync_async - name: test-5.0-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4610,7 +4900,7 @@ tasks: - sync - name: test-5.0-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4630,7 +4920,7 @@ tasks: - async - name: test-5.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "5.0" TOPOLOGY: sharded_cluster @@ -4649,7 +4939,7 @@ tasks: - sync_async - name: test-6.0-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4669,7 +4959,7 @@ tasks: - sync - name: test-6.0-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4689,7 +4979,7 @@ tasks: - async - name: test-6.0-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4708,7 +4998,7 @@ tasks: - sync_async - name: test-6.0-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4728,7 +5018,7 @@ tasks: - sync - name: test-6.0-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4748,7 +5038,7 @@ tasks: - async - name: test-6.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4767,7 +5057,7 @@ tasks: - sync_async - name: test-6.0-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4787,7 +5077,7 @@ tasks: - sync - name: test-6.0-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4807,7 +5097,7 @@ tasks: - async - name: test-6.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "6.0" TOPOLOGY: sharded_cluster @@ -4826,7 +5116,7 @@ tasks: - sync_async - name: test-7.0-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4846,7 +5136,7 @@ tasks: - sync - name: test-7.0-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4866,7 +5156,7 @@ tasks: - async - name: test-7.0-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4885,7 +5175,7 @@ tasks: - sync_async - name: test-7.0-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4905,7 +5195,7 @@ tasks: - sync - name: test-7.0-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4925,7 +5215,7 @@ tasks: - async - name: test-7.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4944,7 +5234,7 @@ tasks: - sync_async - name: test-7.0-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4964,7 +5254,7 @@ tasks: - sync - name: test-7.0-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -4984,7 +5274,7 @@ tasks: - async - name: test-7.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "7.0" TOPOLOGY: sharded_cluster @@ -5003,7 +5293,7 @@ tasks: - sync_async - name: test-8.0-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5023,7 +5313,7 @@ tasks: - sync - name: test-8.0-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5043,7 +5333,7 @@ tasks: - async - name: test-8.0-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5062,7 +5352,7 @@ tasks: - sync_async - name: test-8.0-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5082,7 +5372,7 @@ tasks: - sync - name: test-8.0-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5102,7 +5392,7 @@ tasks: - async - name: test-8.0-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5121,7 +5411,7 @@ tasks: - sync_async - name: test-8.0-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5141,7 +5431,7 @@ tasks: - sync - name: test-8.0-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5161,7 +5451,7 @@ tasks: - async - name: test-8.0-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: "8.0" TOPOLOGY: sharded_cluster @@ -5180,7 +5470,7 @@ tasks: - sync_async - name: test-rapid-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5200,7 +5490,7 @@ tasks: - sync - name: test-rapid-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5220,7 +5510,7 @@ tasks: - async - name: test-rapid-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5239,7 +5529,7 @@ tasks: - sync_async - name: test-rapid-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5259,7 +5549,7 @@ tasks: - sync - name: test-rapid-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5279,7 +5569,7 @@ tasks: - async - name: test-rapid-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5298,7 +5588,7 @@ tasks: - sync_async - name: test-rapid-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5318,7 +5608,7 @@ tasks: - sync - name: test-rapid-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5338,7 +5628,7 @@ tasks: - async - name: test-rapid-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: rapid TOPOLOGY: sharded_cluster @@ -5357,7 +5647,7 @@ tasks: - sync_async - name: test-latest-sharded_cluster-auth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5377,7 +5667,7 @@ tasks: - sync - name: test-latest-sharded_cluster-auth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5397,7 +5687,7 @@ tasks: - async - name: test-latest-sharded_cluster-auth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5416,7 +5706,7 @@ tasks: - sync_async - name: test-latest-sharded_cluster-noauth-ssl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5436,7 +5726,7 @@ tasks: - sync - name: test-latest-sharded_cluster-noauth-ssl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5456,7 +5746,7 @@ tasks: - async - name: test-latest-sharded_cluster-noauth-ssl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5475,7 +5765,7 @@ tasks: - sync_async - name: test-latest-sharded_cluster-noauth-nossl-sync commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5495,7 +5785,7 @@ tasks: - sync - name: test-latest-sharded_cluster-noauth-nossl-async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster @@ -5515,7 +5805,7 @@ tasks: - async - name: test-latest-sharded_cluster-noauth-nossl-sync_async commands: - - func: run-server + - func: run server vars: VERSION: latest TOPOLOGY: sharded_cluster diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 03b4619899..505c6de060 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -801,7 +801,7 @@ def create_server_tasks(): AUTH=auth, SSL=ssl, ) - server_func = FunctionCall(func="run-server", vars=server_vars) + server_func = FunctionCall(func="run server", vars=server_vars) test_vars = dict(AUTH=auth, SSL=ssl, SYNC=sync) if sync == "sync": test_vars["TEST_NAME"] = "default_sync" @@ -820,7 +820,7 @@ def create_load_balancer_tasks(): server_vars = dict( TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, TEST_NAME="load_balancer" ) - server_func = FunctionCall(func="run-server", vars=server_vars) + server_func = FunctionCall(func="run server", vars=server_vars) test_vars = dict(AUTH=auth, SSL=ssl, TEST_NAME="load_balancer") test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) @@ -839,7 +839,7 @@ def create_kms_tasks(): sub_test_name += "-fail" commands = [] if not success: - commands.append(FunctionCall(func="run-server")) + commands.append(FunctionCall(func="run server")) test_vars = dict(TEST_NAME="kms", SUB_TEST_NAME=sub_test_name) test_func = FunctionCall(func="run tests", vars=test_vars) commands.append(test_func) @@ -862,7 +862,7 @@ def create_aws_tasks(): base_name = f"test-auth-aws-{version}" base_tags = ["auth-aws"] server_vars = dict(AUTH_AWS="1", VERSION=version) - server_func = FunctionCall(func="run-server", vars=server_vars) + server_func = FunctionCall(func="run server", vars=server_vars) assume_func = FunctionCall(func="assume ec2 role") for test_type in aws_test_types: tags = [*base_tags, f"auth-aws-{test_type}"] @@ -884,6 +884,60 @@ def create_aws_tasks(): return tasks +def _create_ocsp_task(algo, variant, server_type, base_task_name): + file_name = f"{algo}-basic-tls-ocsp-{variant}.json" + + vars = dict(TEST_NAME="ocsp", ORCHESTRATION_FILE=file_name) + server_func = FunctionCall(func="run server", vars=vars) + + vars = dict(ORCHESTRATION_FILE=file_name, OCSP_SERVER_TYPE=server_type, TEST_NAME="ocsp") + test_func = FunctionCall(func="run tests", vars=vars) + + tags = ["ocsp", f"ocsp-{algo}"] + if "disableStapling" not in variant: + tags.append("ocsp-staple") + + task_name = f"test-ocsp-{algo}-{base_task_name}" + commands = [server_func, test_func] + return EvgTask(name=task_name, tags=tags, commands=commands) + + +def create_ocsp_tasks(): + tasks = [] + tests = [ + ("disableStapling", "valid", "valid-cert-server-does-not-staple"), + ("disableStapling", "revoked", "invalid-cert-server-does-not-staple"), + ("disableStapling", "valid-delegate", "delegate-valid-cert-server-does-not-staple"), + ("disableStapling", "revoked-delegate", "delegate-invalid-cert-server-does-not-staple"), + ("disableStapling", "no-responder", "soft-fail"), + ("mustStaple", "valid", "valid-cert-server-staples"), + ("mustStaple", "revoked", "invalid-cert-server-staples"), + ("mustStaple", "valid-delegate", "delegate-valid-cert-server-staples"), + ("mustStaple", "revoked-delegate", "delegate-invalid-cert-server-staples"), + ( + "mustStaple-disableStapling", + "revoked", + "malicious-invalid-cert-mustStaple-server-does-not-staple", + ), + ( + "mustStaple-disableStapling", + "revoked-delegate", + "delegate-malicious-invalid-cert-mustStaple-server-does-not-staple", + ), + ( + "mustStaple-disableStapling", + "no-responder", + "malicious-no-responder-mustStaple-server-does-not-staple", + ), + ] + for algo in ["ecdsa", "rsa"]: + for variant, server_type, base_task_name in tests: + task = _create_ocsp_task(algo, variant, server_type, base_task_name) + tasks.append(task) + + return tasks + + ################## # Generate Config ################## diff --git a/.evergreen/scripts/run-ocsp-test.sh b/.evergreen/scripts/run-ocsp-test.sh deleted file mode 100755 index 9c48867041..0000000000 --- a/.evergreen/scripts/run-ocsp-test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -eu - -pushd "${PROJECT_DIRECTORY}/.evergreen" -bash scripts/setup-dev-env.sh -CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ - OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ - bash scripts/setup-tests.sh ocsp -bash run-tests.sh -bash "${DRIVERS_TOOLS}"/.evergreen/ocsp/teardown.sh - -popd diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py index 51fe8a67f1..f6a45c23a4 100644 --- a/.evergreen/scripts/run_server.py +++ b/.evergreen/scripts/run_server.py @@ -32,15 +32,26 @@ def start_server(): elif test_name == "load_balancer": set_env("LOAD_BALANCER") + elif test_name == "ocsp": + opts.ssl = True + if "ORCHESTRATION_FILE" not in os.environ: + found = False + for opt in extra_opts: + if opt.startswith("--orchestration-file"): + found = True + if not found: + raise ValueError("Please provide an orchestration file") + if not os.environ.get("TEST_CRYPT_SHARED"): set_env("SKIP_CRYPT_SHARED") if opts.ssl: extra_opts.append("--ssl") - certs = ROOT / "test/certificates" - set_env("TLS_CERT_KEY_FILE", certs / "client.pem") - set_env("TLS_PEM_KEY_FILE", certs / "server.pem") - set_env("TLS_CA_FILE", certs / "ca.pem") + if test_name != "ocsp": + certs = ROOT / "test/certificates" + set_env("TLS_CERT_KEY_FILE", certs / "client.pem") + set_env("TLS_PEM_KEY_FILE", certs / "server.pem") + set_env("TLS_CA_FILE", certs / "ca.pem") cmd = ["bash", f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", *extra_opts] run_command(cmd, cwd=DRIVERS_TOOLS) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 2fa5e69cbc..b75a821c3a 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -239,6 +239,30 @@ def handle_test_env() -> None: cmd = f'bash "{DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh" start' run_command(cmd) + if test_name == "ocsp": + if sub_test_name: + os.environ["OCSP_SERVER_TYPE"] = sub_test_name + for name in ["OCSP_SERVER_TYPE", "ORCHESTRATION_FILE"]: + if name not in os.environ: + raise ValueError(f"Please set {name}") + + server_type = os.environ["OCSP_SERVER_TYPE"] + orch_file = os.environ["ORCHESTRATION_FILE"] + ocsp_algo = orch_file.split("-")[0] + if server_type == "no-responder": + tls_should_succeed = "false" if "mustStaple-disableStapling" in orch_file else "true" + else: + tls_should_succeed = "true" if "valid" in server_type else "false" + + write_env("OCSP_TLS_SHOULD_SUCCEED", tls_should_succeed) + write_env("CA_FILE", f"{DRIVERS_TOOLS}/.evergreen/ocsp/{ocsp_algo}/ca.pem") + + if server_type != "no-responder": + env = os.environ.copy() + env["SERVER_TYPE"] = server_type + env["OCSP_ALGORITHM"] = ocsp_algo + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh", env=env) + if SSL != "nossl": if not DRIVERS_TOOLS: raise RuntimeError("Missing DRIVERS_TOOLS") @@ -302,10 +326,6 @@ def handle_test_env() -> None: setup_kms(sub_test_name) - if test_name == "ocsp": - write_env("CA_FILE", os.environ["CA_FILE"]) - write_env("OCSP_TLS_SHOULD_SUCCEED", os.environ["OCSP_TLS_SHOULD_SUCCEED"]) - if test_name == "auth_aws" and sub_test_name != "ecs-remote": auth_aws_dir = f"{DRIVERS_TOOLS}/.evergreen/auth_aws" if "AWS_ROLE_SESSION_NAME" in os.environ: diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index 824fc2c9bb..fedbdc2fe8 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -24,6 +24,10 @@ teardown_kms(SUB_TEST_NAME) +# Tear down ocsp if applicable. +elif TEST_NAME == "ocsp": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/teardown.sh") + # Tear down auth_aws if applicable. # We do not run web-identity hosts on macos, because the hosts lack permissions, # so there is no reason to run the teardown, which would error with a 401. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b3423ed402..1d8783d9d1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -218,12 +218,12 @@ the pages will re-render and the browser will automatically refresh. ### Usage - Run `just run-server` with optional args to set up the server. - All given flags will be passed to `run-orchestration.sh` in `DRIVERS_TOOLS`. + All given flags will be passed to `run-orchestration.sh` in `$DRIVERS_TOOLS`. - Run `just setup-tests` with optional args to set up the test environment, secrets, etc. - Run `just run-tests` to run the tests in an appropriate Python environment. - When done, run `just teardown-tests` to clean up and `just stop-server` to stop the server. -## Encryption tests +### Encryption tests - Run `just run-server` to start the server. - Run `just setup-tests encryption`. @@ -236,13 +236,13 @@ the pages will re-render and the browser will automatically refresh. - Set up the test with `just setup-tests load_balancer`. - Run the tests with `just run-tests`. -## AWS tests +### AWS tests - Run `just run-server auth_aws` to start the server. - Run `just setup-tests auth_aws ` to set up the AWS test. - Run the tests with `just run-tests`. -## KMS tests +### KMS tests For KMS tests that are run locally, and expected to fail, in this case using `azure`: @@ -255,6 +255,17 @@ For KMS tests that run remotely and are expected to pass, in this case using `gc - Run `just setup-tests kms gcp`. - Run `just run-tests`. +### OCSP tests + + - Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. + This corresponds to a config file in `$DRIVERS_TOOLS/.evergreen/orchestration/configs/servers`. + MongoDB servers on MacOS and Windows do not staple OCSP responses and only support RSA. + - Run `just run-server ocsp`. + - Run `just setup-tests ocsp ` (options are "valid", "revoked", "valid-delegate", "revoked-delegate"). + - Run `just run-tests` + + If you are running one of the `no-responder` tests, omit the `run-server` step. + ## Enable Debug Logs - Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. - Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine. From b66a5cb673499626e660102a984c5e1036717ab3 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Mon, 10 Mar 2025 15:45:32 -0400 Subject: [PATCH 1789/2111] PYTHON-5172 bugfix: Add __repr__ and __eq__ to bson.binary.BinaryVector (#2162) --- bson/binary.py | 12 ++++++++-- test/test_bson.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index ee481fa1a5..6698e55ccc 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -14,7 +14,6 @@ from __future__ import annotations import struct -from dataclasses import dataclass from enum import Enum from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union, overload from uuid import UUID @@ -227,7 +226,6 @@ class BinaryVectorDtype(Enum): PACKED_BIT = b"\x10" -@dataclass class BinaryVector: """Vector of numbers along with metadata for binary interoperability. .. versionadded:: 4.10 @@ -247,6 +245,16 @@ def __init__(self, data: Sequence[float | int], dtype: BinaryVectorDtype, paddin self.dtype = dtype self.padding = padding + def __repr__(self) -> str: + return f"BinaryVector(dtype={self.dtype}, padding={self.padding}, data={self.data})" + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, BinaryVector): + return False + return ( + self.dtype == other.dtype and self.padding == other.padding and self.data == other.data + ) + class Binary(bytes): """Representation of BSON binary data. diff --git a/test/test_bson.py b/test/test_bson.py index e704efe451..6f26856b00 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -809,6 +809,64 @@ def test_vector(self): dtype=BinaryVectorDtype.PACKED_BIT, ) # type: ignore[call-overload] + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + def test_binaryvector_repr(self): + """Tests of repr(BinaryVector)""" + + data = [1 / 127, -7 / 6] + one = BinaryVector(data, BinaryVectorDtype.FLOAT32) + self.assertEqual( + repr(one), f"BinaryVector(dtype=BinaryVectorDtype.FLOAT32, padding=0, data={data})" + ) + self.assertRepr(one) + + data = [127, 7] + two = BinaryVector(data, BinaryVectorDtype.INT8) + self.assertEqual( + repr(two), f"BinaryVector(dtype=BinaryVectorDtype.INT8, padding=0, data={data})" + ) + self.assertRepr(two) + + three = BinaryVector(data, BinaryVectorDtype.INT8, padding=0) + self.assertEqual( + repr(three), f"BinaryVector(dtype=BinaryVectorDtype.INT8, padding=0, data={data})" + ) + self.assertRepr(three) + + four = BinaryVector(data, BinaryVectorDtype.PACKED_BIT, padding=3) + self.assertEqual( + repr(four), f"BinaryVector(dtype=BinaryVectorDtype.PACKED_BIT, padding=3, data={data})" + ) + self.assertRepr(four) + + zero = BinaryVector([], BinaryVectorDtype.INT8) + self.assertEqual( + repr(zero), "BinaryVector(dtype=BinaryVectorDtype.INT8, padding=0, data=[])" + ) + self.assertRepr(zero) + + def test_binaryvector_equality(self): + """Tests of == __eq__""" + self.assertEqual( + BinaryVector([1.2, 1 - 1 / 3], BinaryVectorDtype.FLOAT32, 0), + BinaryVector([1.2, 1 - 1.0 / 3.0], BinaryVectorDtype.FLOAT32, 0), + ) + self.assertNotEqual( + BinaryVector([1.2, 1 - 1 / 3], BinaryVectorDtype.FLOAT32, 0), + BinaryVector([1.2, 6.0 / 9.0], BinaryVectorDtype.FLOAT32, 0), + ) + self.assertEqual( + BinaryVector([], BinaryVectorDtype.FLOAT32, 0), + BinaryVector([], BinaryVectorDtype.FLOAT32, 0), + ) + self.assertNotEqual( + BinaryVector([1], BinaryVectorDtype.INT8), BinaryVector([2], BinaryVectorDtype.INT8) + ) + def test_unicode_regex(self): """Tests we do not get a segfault for C extension on unicode RegExs. This had been happening. From 61d435408e05693fa57801738c3180fffb74508b Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Mar 2025 14:19:16 -0700 Subject: [PATCH 1790/2111] PYTHON-5194 Test secondary with IPv6 literal in SDAM (#2189) --- .../rs/secondary_ipv6_literal.json | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 test/discovery_and_monitoring/rs/secondary_ipv6_literal.json diff --git a/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json b/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json new file mode 100644 index 0000000000..c23d8dc4c9 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json @@ -0,0 +1,38 @@ +{ + "description": "Secondary with IPv6 literal", + "uri": "mongodb://[::1]/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "[::1]:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "me": "[::1]:27017", + "hosts": [ + "[::1]:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 26 + } + ] + ], + "outcome": { + "servers": { + "[::1]:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} From 7ef18af49b35335d8cf92746c8bcfde65e0d989f Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 10 Mar 2025 17:11:38 -0700 Subject: [PATCH 1791/2111] PYTHON-4580 Add key_expiration_ms option for DEK cache lifetime (#2186) --- doc/changelog.rst | 18 +- pymongo/asynchronous/encryption.py | 26 +- pymongo/encryption_options.py | 10 +- pymongo/synchronous/encryption.py | 26 +- test/asynchronous/unified_format.py | 3 +- test/asynchronous/utils_spec_runner.py | 5 + .../spec/legacy/fle2v2-Rangev2-Compact.json | 3 +- .../spec/legacy/keyCache.json | 270 ++++++++++++++++++ .../spec/legacy/timeoutMS.json | 4 +- .../spec/unified/keyCache.json | 198 +++++++++++++ test/unified_format.py | 3 +- test/utils_spec_runner.py | 5 + uv.lock | 2 +- 13 files changed, 549 insertions(+), 24 deletions(-) create mode 100644 test/client-side-encryption/spec/legacy/keyCache.json create mode 100644 test/client-side-encryption/spec/unified/keyCache.json diff --git a/doc/changelog.rst b/doc/changelog.rst index cf5d5e8ff7..21e86953c6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,7 +1,23 @@ Changelog ========= -Changes in Version 4.11.2 (YYYY/MM/DD) +Changes in Version 4.12.0 (YYYY/MM/DD) +-------------------------------------- + +PyMongo 4.12 brings a number of changes including: + +- Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. + +Issues Resolved +............... + +See the `PyMongo 4.12 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.12 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=41916 + +Changes in Version 4.11.2 (2025/03/05) -------------------------------------- Version 4.11.2 is a bug fix release. diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 9d3ea67191..ef8d817b2c 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -445,6 +445,7 @@ def _get_internal_client( bypass_encryption=opts._bypass_auto_encryption, encrypted_fields_map=encrypted_fields_map, bypass_query_analysis=opts._bypass_query_analysis, + key_expiration_ms=opts._key_expiration_ms, ), ) self._closed = False @@ -547,11 +548,10 @@ class QueryType(str, enum.Enum): def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: - opts = MongoCryptOptions(**kwargs) - # Opt into range V2 encryption. - if hasattr(opts, "enable_range_v2"): - opts.enable_range_v2 = True - return opts + # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. + if kwargs.get("key_expiration_ms") is None: + kwargs.pop("key_expiration_ms", None) + return MongoCryptOptions(**kwargs) class AsyncClientEncryption(Generic[_DocumentType]): @@ -564,6 +564,7 @@ def __init__( key_vault_client: AsyncMongoClient[_DocumentTypeArg], codec_options: CodecOptions[_DocumentTypeArg], kms_tls_options: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, ) -> None: """Explicit client-side field level encryption. @@ -630,7 +631,12 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -666,14 +672,19 @@ def __init__( key_vault_coll = key_vault_client[db][coll] opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options + kms_providers, + key_vault_namespace, + kms_tls_options=kms_tls_options, + key_expiration_ms=key_expiration_ms, ) self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( None, key_vault_coll, None, opts ) self._encryption = AsyncExplicitEncrypter( self._io_callbacks, - _create_mongocrypt_options(kms_providers=kms_providers, schema_map=None), + _create_mongocrypt_options( + kms_providers=kms_providers, schema_map=None, key_expiration_ms=key_expiration_ms + ), ) # Use the same key vault collection as the callback. assert self._io_callbacks.key_vault_coll is not None @@ -700,6 +711,7 @@ async def create_encrypted_collection( creation. :class:`~pymongo.errors.EncryptionError` will be raised if the collection already exists. + :param database: the database to create the collection :param name: the name of the collection to create :param encrypted_fields: Document that describes the encrypted fields for Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 26dfbf5f03..a1c40dc7b2 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -57,6 +57,7 @@ def __init__( crypt_shared_lib_required: bool = False, bypass_query_analysis: bool = False, encrypted_fields_map: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, ) -> None: """Options to configure automatic client-side field level encryption. @@ -191,9 +192,14 @@ def __init__( ] } } + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. .. versionchanged:: 4.2 - Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, + Added the `encrypted_fields_map`, `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` parameters. .. versionchanged:: 4.0 @@ -210,7 +216,6 @@ def __init__( if encrypted_fields_map: validate_is_mapping("encrypted_fields_map", encrypted_fields_map) self._encrypted_fields_map = encrypted_fields_map - self._bypass_query_analysis = bypass_query_analysis self._crypt_shared_lib_path = crypt_shared_lib_path self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers @@ -233,6 +238,7 @@ def __init__( # Maps KMS provider name to a SSLContext. self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) self._bypass_query_analysis = bypass_query_analysis + self._key_expiration_ms = key_expiration_ms class RangeOpts: diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 7cbac1c509..a97534ed41 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -442,6 +442,7 @@ def _get_internal_client( bypass_encryption=opts._bypass_auto_encryption, encrypted_fields_map=encrypted_fields_map, bypass_query_analysis=opts._bypass_query_analysis, + key_expiration_ms=opts._key_expiration_ms, ), ) self._closed = False @@ -544,11 +545,10 @@ class QueryType(str, enum.Enum): def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: - opts = MongoCryptOptions(**kwargs) - # Opt into range V2 encryption. - if hasattr(opts, "enable_range_v2"): - opts.enable_range_v2 = True - return opts + # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. + if kwargs.get("key_expiration_ms") is None: + kwargs.pop("key_expiration_ms", None) + return MongoCryptOptions(**kwargs) class ClientEncryption(Generic[_DocumentType]): @@ -561,6 +561,7 @@ def __init__( key_vault_client: MongoClient[_DocumentTypeArg], codec_options: CodecOptions[_DocumentTypeArg], kms_tls_options: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, ) -> None: """Explicit client-side field level encryption. @@ -627,7 +628,12 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. .. versionchanged:: 4.0 Added the `kms_tls_options` parameter and the "kmip" KMS provider. @@ -659,14 +665,19 @@ def __init__( key_vault_coll = key_vault_client[db][coll] opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options + kms_providers, + key_vault_namespace, + kms_tls_options=kms_tls_options, + key_expiration_ms=key_expiration_ms, ) self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( None, key_vault_coll, None, opts ) self._encryption = ExplicitEncrypter( self._io_callbacks, - _create_mongocrypt_options(kms_providers=kms_providers, schema_map=None), + _create_mongocrypt_options( + kms_providers=kms_providers, schema_map=None, key_expiration_ms=key_expiration_ms + ), ) # Use the same key vault collection as the callback. assert self._io_callbacks.key_vault_coll is not None @@ -693,6 +704,7 @@ def create_encrypted_collection( creation. :class:`~pymongo.errors.EncryptionError` will be raised if the collection already exists. + :param database: the database to create the collection :param name: the name of the collection to create :param encrypted_fields: Document that describes the encrypted fields for Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index d4c3d40d20..ce0b9979e2 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -378,6 +378,7 @@ async def drop(self: AsyncGridFSBucket, *args: Any, **kwargs: Any) -> None: opts["key_vault_client"], DEFAULT_CODEC_OPTIONS, opts.get("kms_tls_options", kms_tls_options), + opts.get("key_expiration_ms"), ) return elif entity_type == "thread": @@ -439,7 +440,7 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.21") + SCHEMA_VERSION = Version.from_string("1.22") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index 7530ba36a7..f1c6deb690 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -18,6 +18,7 @@ import asyncio import functools import os +import time import unittest from asyncio import iscoroutinefunction from collections import abc @@ -314,6 +315,10 @@ async def assert_index_not_exists(self, database, collection, index): coll = self.client[database][collection] self.assertNotIn(index, [doc["name"] async for doc in await coll.list_indexes()]) + async def wait(self, ms): + """Run the "wait" test operation.""" + await asyncio.sleep(ms / 1000.0) + def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] self.assertEqual(labels, expected_labels) diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json index bba9f25535..59241927ca 100644 --- a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json @@ -6,8 +6,7 @@ "replicaset", "sharded", "load-balanced" - ], - "serverless": "forbid" + ] } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/keyCache.json b/test/client-side-encryption/spec/legacy/keyCache.json new file mode 100644 index 0000000000..912ce80020 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/keyCache.json @@ -0,0 +1,270 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "keyExpirationMS": 1 + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 50 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json index 8411306224..b667767cfc 100644 --- a/test/client-side-encryption/spec/legacy/timeoutMS.json +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -110,7 +110,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 600 + "blockTimeMS": 60 } }, "clientOptions": { @@ -119,7 +119,7 @@ "aws": {} } }, - "timeoutMS": 500 + "timeoutMS": 50 }, "operations": [ { diff --git a/test/client-side-encryption/spec/unified/keyCache.json b/test/client-side-encryption/spec/unified/keyCache.json new file mode 100644 index 0000000000..a39701e286 --- /dev/null +++ b/test/client-side-encryption/spec/unified/keyCache.json @@ -0,0 +1,198 @@ +{ + "description": "keyCache-explicit", + "schemaVersion": "1.22", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "OCTP9uKPPmvuqpHlqq83gPk4U6rUPxKVRRyVtrjFmVjdoa4Xzm1SzUbr7aIhNI42czkUBmrCtZKF31eaaJnxEBkqf0RFukA9Mo3NEHQWgAQ2cn9duOcRbaFUQo2z0/rB" + } + }, + "keyExpirationMS": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + }, + "keyAltNames": [], + "keyMaterial": { + "$binary": { + "base64": "iocBkhO3YBokiJ+FtxDTS71/qKXQ7tSWhWbcnFTXBcMjarsepvALeJ5li+SdUd9ePuatjidxAdMo7vh1V2ZESLMkQWdpPJ9PaJjA67gKQKbbbB4Ik5F2uKjULvrMBnFNVRMup4JNUwWFQJpqbfMveXnUVcD06+pUpAkml/f+DSXrV3e5rxciiNVtz03dAG8wJrsKsFXWj6vTjFhsfknyBA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "decrypt, wait, and decrypt again", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AWvmFs3coEwButv3DVJKmYkCJ6lUzRX9R28WNlw5uyndb+8gurA+p8q14s7GZ04K2ZvghieRlAr5UwZbow3PMq27u5EIhDDczwBFcbdP1amllw==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 50 + } + }, + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AWvmFs3coEwButv3DVJKmYkCJ6lUzRX9R28WNlw5uyndb+8gurA+p8q14s7GZ04K2ZvghieRlAr5UwZbow3PMq27u5EIhDDczwBFcbdP1amllw==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 293fbd97ca..682a6105f3 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -377,6 +377,7 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: opts["key_vault_client"], DEFAULT_CODEC_OPTIONS, opts.get("kms_tls_options", kms_tls_options), + opts.get("key_expiration_ms"), ) return elif entity_type == "thread": @@ -438,7 +439,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.21") + SCHEMA_VERSION = Version.from_string("1.22") RUN_ON_LOAD_BALANCER = True RUN_ON_SERVERLESS = True TEST_SPEC: Any diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index ac4031e821..fe0ba6eb44 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -18,6 +18,7 @@ import asyncio import functools import os +import time import unittest from asyncio import iscoroutinefunction from collections import abc @@ -314,6 +315,10 @@ def assert_index_not_exists(self, database, collection, index): coll = self.client[database][collection] self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()]) + def wait(self, ms): + """Run the "wait" test operation.""" + time.sleep(ms / 1000.0) + def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] self.assertEqual(labels, expected_labels) diff --git a/uv.lock b/uv.lock index a2e951e76c..8b5d592dc0 100644 --- a/uv.lock +++ b/uv.lock @@ -1133,7 +1133,7 @@ wheels = [ [[package]] name = "pymongocrypt" version = "1.13.0.dev0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#90476d5db7737bab2ce1c198df5671a12dbaae1a" } +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#1e96c283162aa7789cf01f99f211e0ace8e6d49f" } dependencies = [ { name = "cffi" }, { name = "cryptography" }, From 4322fdf7ce67e3f4d72487fd6057aaab9f19550c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Mar 2025 10:17:15 -0500 Subject: [PATCH 1792/2111] PYTHON-5199 Fix handling of MongoDB version in run-server (#2193) --- .evergreen/scripts/run_server.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py index f6a45c23a4..4ef1568add 100644 --- a/.evergreen/scripts/run_server.py +++ b/.evergreen/scripts/run_server.py @@ -18,6 +18,10 @@ def start_server(): ) test_name = opts.test_name + # drivers-evergreen-tools expects the version variable to be named MONGODB_VERSION. + if "VERSION" in os.environ: + os.environ["MONGODB_VERSION"] = os.environ["VERSION"] + if opts.auth: extra_opts.append("--auth") From 34ca759f85039ea133e75aba9420a757a53bebd2 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 12 Mar 2025 12:59:56 -0400 Subject: [PATCH 1793/2111] PYTHON-5113 - Refactor test utils for async (#2149) --- test/asynchronous/test_async_cancellation.py | 3 +- test/asynchronous/test_auth.py | 2 +- test/asynchronous/test_bulk.py | 2 +- test/asynchronous/test_change_stream.py | 2 +- test/asynchronous/test_client.py | 10 +- test/asynchronous/test_client_bulk_write.py | 2 +- test/asynchronous/test_collation.py | 2 +- test/asynchronous/test_collection.py | 5 +- test/asynchronous/test_comment.py | 2 +- test/asynchronous/test_concurrency.py | 2 +- .../test_connection_monitoring.py | 8 +- ...nnections_survive_primary_stepdown_spec.py | 4 +- test/asynchronous/test_cursor.py | 2 +- test/asynchronous/test_data_lake.py | 2 +- test/asynchronous/test_database.py | 2 +- .../test_discovery_and_monitoring.py | 19 +- test/asynchronous/test_dns.py | 2 +- test/asynchronous/test_encryption.py | 2 +- test/asynchronous/test_examples.py | 2 +- test/asynchronous/test_grid_file.py | 2 +- test/asynchronous/test_gridfs.py | 3 +- test/asynchronous/test_gridfs_bucket.py | 3 +- .../asynchronous/test_heartbeat_monitoring.py | 3 +- test/asynchronous/test_index_management.py | 2 +- test/asynchronous/test_load_balancer.py | 4 +- test/asynchronous/test_max_staleness.py | 2 +- .../test_mongos_load_balancing.py | 2 +- test/asynchronous/test_monitor.py | 4 +- test/asynchronous/test_monitoring.py | 2 +- test/asynchronous/test_pooling.py | 3 +- test/asynchronous/test_read_concern.py | 2 +- test/asynchronous/test_read_preferences.py | 2 +- .../test_read_write_concern_spec.py | 2 +- test/asynchronous/test_retryable_reads.py | 4 +- test/asynchronous/test_retryable_writes.py | 4 +- .../asynchronous/test_sdam_monitoring_spec.py | 2 +- test/asynchronous/test_server_selection.py | 9 +- .../test_server_selection_in_window.py | 3 +- test/asynchronous/test_session.py | 3 +- test/asynchronous/test_srv_polling.py | 3 +- test/asynchronous/test_ssl.py | 2 +- test/asynchronous/test_streaming_protocol.py | 2 +- test/asynchronous/test_transactions.py | 2 +- .../test_versioned_api_integration.py | 2 +- test/asynchronous/unified_format.py | 4 +- test/asynchronous/utils.py | 211 ++++ test/asynchronous/utils_selection_tests.py | 3 +- test/asynchronous/utils_spec_runner.py | 2 +- test/auth_oidc/test_auth_oidc.py | 2 +- test/test_auth.py | 2 +- test/test_bulk.py | 2 +- test/test_change_stream.py | 2 +- test/test_client.py | 8 +- test/test_client_bulk_write.py | 2 +- test/test_collation.py | 2 +- test/test_collection.py | 5 +- test/test_comment.py | 2 +- test/test_connection_monitoring.py | 8 +- ...nnections_survive_primary_stepdown_spec.py | 4 +- test/test_cursor.py | 2 +- test/test_data_lake.py | 2 +- test/test_database.py | 2 +- test/test_discovery_and_monitoring.py | 17 +- test/test_dns.py | 2 +- test/test_encryption.py | 2 +- test/test_examples.py | 2 +- test/test_fork.py | 2 +- test/test_grid_file.py | 2 +- test/test_gridfs.py | 3 +- test/test_gridfs_bucket.py | 3 +- test/test_heartbeat_monitoring.py | 3 +- test/test_index_management.py | 2 +- test/test_load_balancer.py | 4 +- test/test_mongos_load_balancing.py | 2 +- test/test_monitor.py | 2 +- test/test_monitoring.py | 2 +- test/test_objectid.py | 2 +- test/test_pooling.py | 3 +- test/test_read_concern.py | 2 +- test/test_read_preferences.py | 2 +- test/test_read_write_concern_spec.py | 2 +- test/test_replica_set_reconfig.py | 2 +- test/test_retryable_reads.py | 4 +- test/test_retryable_writes.py | 4 +- test/test_sdam_monitoring_spec.py | 2 +- test/test_server_selection.py | 15 +- test/test_server_selection_in_window.py | 5 +- test/test_session.py | 3 +- test/test_srv_polling.py | 3 +- test/test_ssl.py | 2 +- test/test_streaming_protocol.py | 2 +- test/test_topology.py | 3 +- test/test_transactions.py | 2 +- test/test_versioned_api_integration.py | 2 +- test/unified_format.py | 4 +- test/unified_format_shared.py | 2 +- test/utils.py | 1077 ++--------------- test/utils_selection_tests.py | 3 +- test/utils_shared.py | 705 +++++++++++ test/utils_spec_runner.py | 2 +- tools/synchro.py | 1 + 101 files changed, 1184 insertions(+), 1130 deletions(-) create mode 100644 test/asynchronous/utils.py create mode 100644 test/utils_shared.py diff --git a/test/asynchronous/test_async_cancellation.py b/test/asynchronous/test_async_cancellation.py index b7fde834a2..f450ea23cc 100644 --- a/test/asynchronous/test_async_cancellation.py +++ b/test/asynchronous/test_async_cancellation.py @@ -17,7 +17,8 @@ import asyncio import sys -from test.utils import async_get_pool, delay, one +from test.asynchronous.utils import async_get_pool +from test.utils_shared import delay, one sys.path[0:0] = [""] diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py index 7172152d69..904674db16 100644 --- a/test/asynchronous/test_auth.py +++ b/test/asynchronous/test_auth.py @@ -30,7 +30,7 @@ async_client_context, unittest, ) -from test.utils import AllowListEventListener, delay, ignore_deprecations +from test.utils_shared import AllowListEventListener, delay, ignore_deprecations import pytest diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 86568b666b..5573c3987f 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -24,7 +24,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, remove_all_users, unittest -from test.utils import async_wait_until +from test.utils_shared import async_wait_until from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 08da00cc1e..4025c13730 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -36,7 +36,7 @@ unittest, ) from test.asynchronous.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, EventListener, OvertCommandListener, diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index acc815c8a4..f9678b11e2 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -60,14 +60,16 @@ unittest, ) from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.asynchronous.utils import ( + async_get_pool, + async_wait_until, + asyncAssertRaisesExactly, +) from test.test_binary import BinaryData -from test.utils import ( +from test.utils_shared import ( NTHREADS, CMAPListener, FunctionCallRecorder, - async_get_pool, - async_wait_until, - asyncAssertRaisesExactly, delay, gevent_monkey_patched, is_greenthread_patched, diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 282009f554..f8b9465b09 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -25,7 +25,7 @@ async_client_context, unittest, ) -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, ) from unittest.mock import patch diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py index d7fd85b168..05e548c79e 100644 --- a/test/asynchronous/test_collation.py +++ b/test/asynchronous/test_collation.py @@ -18,7 +18,7 @@ import functools import warnings from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import EventListener, OvertCommandListener +from test.utils_shared import EventListener, OvertCommandListener from typing import Any from pymongo.asynchronous.helpers import anext diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index beb58012a8..00ed020d88 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -21,6 +21,7 @@ import sys from codecs import utf_8_decode from collections import defaultdict +from test.asynchronous.utils import async_get_pool, async_is_mongos from typing import Any, Iterable, no_type_check from pymongo.asynchronous.database import AsyncDatabase @@ -33,12 +34,10 @@ AsyncUnitTest, async_client_context, ) -from test.utils import ( +from test.utils_shared import ( IMPOSSIBLE_WRITE_CONCERN, EventListener, OvertCommandListener, - async_get_pool, - async_is_mongos, async_wait_until, ) diff --git a/test/asynchronous/test_comment.py b/test/asynchronous/test_comment.py index be3626a8b8..d3ddaf2b65 100644 --- a/test/asynchronous/test_comment.py +++ b/test/asynchronous/test_comment.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] from asyncio import iscoroutinefunction from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from bson.dbref import DBRef from pymongo.asynchronous.command_cursor import AsyncCommandCursor diff --git a/test/asynchronous/test_concurrency.py b/test/asynchronous/test_concurrency.py index 1683b8413b..193ecf05c8 100644 --- a/test/asynchronous/test_concurrency.py +++ b/test/asynchronous/test_concurrency.py @@ -18,7 +18,7 @@ import asyncio import time from test.asynchronous import AsyncIntegrationTest, async_client_context -from test.utils import delay +from test.utils_shared import delay _IS_SYNC = False diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py index cdf4887ba3..359346d984 100644 --- a/test/asynchronous/test_connection_monitoring.py +++ b/test/asynchronous/test_connection_monitoring.py @@ -20,17 +20,15 @@ import sys import time from pathlib import Path +from test.asynchronous.utils import async_get_pool, async_get_pools sys.path[0:0] = [""] -from test.asynchronous import AsyncIntegrationTest, client_knobs, unittest +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest from test.asynchronous.pymongo_mocks import DummyMonitor from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator, SpecRunnerTask -from test.utils import ( +from test.utils_shared import ( CMAPListener, - async_client_context, - async_get_pool, - async_get_pools, async_wait_until, camel_to_snake, ) diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py index 7c11742a90..92c750c4fe 100644 --- a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -16,6 +16,7 @@ from __future__ import annotations import sys +from test.asynchronous.utils import async_ensure_all_connected sys.path[0:0] = [""] @@ -25,9 +26,8 @@ unittest, ) from test.asynchronous.helpers import async_repl_set_step_down -from test.utils import ( +from test.utils_shared import ( CMAPListener, - async_ensure_all_connected, ) from bson import SON diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index d843ffb4aa..90d5e7801e 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -31,7 +31,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, EventListener, OvertCommandListener, diff --git a/test/asynchronous/test_data_lake.py b/test/asynchronous/test_data_lake.py index e67782ad3f..689bf38534 100644 --- a/test/asynchronous/test_data_lake.py +++ b/test/asynchronous/test_data_lake.py @@ -25,7 +25,7 @@ from test.asynchronous import AsyncIntegrationTest, AsyncUnitTest, async_client_context, unittest from test.asynchronous.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, ) diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 2bbf763ab3..b2ddd4122d 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -26,7 +26,7 @@ from test import unittest from test.asynchronous import AsyncIntegrationTest, async_client_context from test.test_custom_types import DECIMAL_CODECOPTS -from test.utils import ( +from test.utils_shared import ( IMPOSSIBLE_WRITE_CONCERN, OvertCommandListener, async_wait_until, diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index c3c2bb1a6c..b3de2c5a4d 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -26,25 +26,32 @@ sys.path[0:0] = [""] -from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, AsyncUnitTest, unittest +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + AsyncUnitTest, + async_client_context, + unittest, +) from test.asynchronous.pymongo_mocks import DummyMonitor from test.asynchronous.unified_format import generate_test_classes -from test.utils import ( +from test.asynchronous.utils import ( + async_get_pool, +) +from test.utils_shared import ( CMAPListener, HeartbeatEventListener, HeartbeatEventsListListener, assertion_context, async_barrier_wait, - async_client_context, async_create_barrier, - async_get_pool, async_wait_until, server_name_to_type, ) from unittest.mock import patch from bson import Timestamp, json_util -from pymongo import AsyncMongoClient, common, monitoring +from pymongo import common, monitoring from pymongo.asynchronous.settings import TopologySettings from pymongo.asynchronous.topology import Topology, _ErrorContext from pymongo.errors import ( @@ -291,7 +298,7 @@ async def test_ignore_stale_connection_errors(self): if not _IS_SYNC and sys.version_info < (3, 11): self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") N_TASKS = 5 - barrier = async_create_barrier(N_TASKS, timeout=30) + barrier = async_create_barrier(N_TASKS) client = await self.async_rs_or_single_client(minPoolSize=N_TASKS) # Wait for initial discovery. diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py index e24e0fb5ce..a622062fec 100644 --- a/test/asynchronous/test_dns.py +++ b/test/asynchronous/test_dns.py @@ -29,7 +29,7 @@ async_client_context, unittest, ) -from test.utils import async_wait_until +from test.utils_shared import async_wait_until from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 335aa9d81c..000d98a111 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -64,7 +64,7 @@ KMIP_CREDS, LOCAL_MASTER_KEY, ) -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, OvertCommandListener, TopologyEventListener, diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py index 7fea9d41af..1312f1e215 100644 --- a/test/asynchronous/test_examples.py +++ b/test/asynchronous/test_examples.py @@ -26,7 +26,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import async_wait_until +from test.utils_shared import async_wait_until import pymongo from pymongo.asynchronous.helpers import anext diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index affdacde91..3f864367de 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from bson.objectid import ObjectId from gridfs.asynchronous.grid_file import ( diff --git a/test/asynchronous/test_gridfs.py b/test/asynchronous/test_gridfs.py index b1c1e754ff..f886601f36 100644 --- a/test/asynchronous/test_gridfs.py +++ b/test/asynchronous/test_gridfs.py @@ -28,7 +28,8 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import async_joinall, one +from test.asynchronous.utils import async_joinall +from test.utils_shared import one import gridfs from bson.binary import Binary diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py index 5d1cf5beff..29877ee9c4 100644 --- a/test/asynchronous/test_gridfs_bucket.py +++ b/test/asynchronous/test_gridfs_bucket.py @@ -29,7 +29,8 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import async_joinall, joinall, one +from test.asynchronous.utils import async_joinall +from test.utils_shared import one import gridfs from bson.binary import Binary diff --git a/test/asynchronous/test_heartbeat_monitoring.py b/test/asynchronous/test_heartbeat_monitoring.py index ff595a8144..aa8a205021 100644 --- a/test/asynchronous/test_heartbeat_monitoring.py +++ b/test/asynchronous/test_heartbeat_monitoring.py @@ -16,11 +16,12 @@ from __future__ import annotations import sys +from test.asynchronous.utils import AsyncMockPool sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, client_knobs, unittest -from test.utils import AsyncMockPool, HeartbeatEventListener, async_wait_until +from test.utils_shared import HeartbeatEventListener, async_wait_until from pymongo.asynchronous.monitor import Monitor from pymongo.errors import ConnectionFailure diff --git a/test/asynchronous/test_index_management.py b/test/asynchronous/test_index_management.py index c155047089..4b218de130 100644 --- a/test/asynchronous/test_index_management.py +++ b/test/asynchronous/test_index_management.py @@ -29,7 +29,7 @@ from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, unittest from test.asynchronous.unified_format import generate_test_classes -from test.utils import AllowListEventListener, OvertCommandListener +from test.utils_shared import AllowListEventListener, OvertCommandListener from pymongo.errors import OperationFailure from pymongo.operations import SearchIndexModel diff --git a/test/asynchronous/test_load_balancer.py b/test/asynchronous/test_load_balancer.py index fd50841c87..127fdfd24d 100644 --- a/test/asynchronous/test_load_balancer.py +++ b/test/asynchronous/test_load_balancer.py @@ -23,6 +23,7 @@ import threading from asyncio import Event from test.asynchronous.helpers import ConcurrentRunner, ExceptionCatchingTask +from test.asynchronous.utils import async_get_pool import pytest @@ -30,8 +31,7 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.asynchronous.unified_format import generate_test_classes -from test.utils import ( - async_get_pool, +from test.utils_shared import ( async_wait_until, create_async_event, ) diff --git a/test/asynchronous/test_max_staleness.py b/test/asynchronous/test_max_staleness.py index 7dbf17021f..b6e15f9158 100644 --- a/test/asynchronous/test_max_staleness.py +++ b/test/asynchronous/test_max_staleness.py @@ -28,7 +28,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest -from test.utils_selection_tests import create_selection_tests +from test.asynchronous.utils_selection_tests import create_selection_tests from pymongo.errors import ConfigurationError from pymongo.server_selectors import writable_server_selector diff --git a/test/asynchronous/test_mongos_load_balancing.py b/test/asynchronous/test_mongos_load_balancing.py index 0bc6a405f4..97170aa9e0 100644 --- a/test/asynchronous/test_mongos_load_balancing.py +++ b/test/asynchronous/test_mongos_load_balancing.py @@ -26,7 +26,7 @@ from test.asynchronous import AsyncMockClientTest, async_client_context, connected, unittest from test.asynchronous.pymongo_mocks import AsyncMockClient -from test.utils import async_wait_until +from test.utils_shared import async_wait_until from pymongo.errors import AutoReconnect, InvalidOperation from pymongo.server_selectors import writable_server_selector diff --git a/test/asynchronous/test_monitor.py b/test/asynchronous/test_monitor.py index 2705fbda3b..195f6f9fac 100644 --- a/test/asynchronous/test_monitor.py +++ b/test/asynchronous/test_monitor.py @@ -25,10 +25,10 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest -from test.utils import ( - ServerAndTopologyEventListener, +from test.asynchronous.utils import ( async_wait_until, ) +from test.utils_shared import ServerAndTopologyEventListener from pymongo.periodic_executor import _EXECUTORS diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index eaad60beac..a7d56a8cf7 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -29,7 +29,7 @@ sanitize_cmd, unittest, ) -from test.utils import ( +from test.utils_shared import ( EventListener, OvertCommandListener, async_wait_until, diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 812b5a48e0..8213c794fe 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -21,6 +21,7 @@ import socket import sys import time +from test.asynchronous.utils import async_get_pool, async_joinall from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -33,7 +34,7 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.asynchronous.helpers import ConcurrentRunner -from test.utils import async_get_pool, async_joinall, delay +from test.utils_shared import delay from pymongo.asynchronous.pool import Pool, PoolOptions from pymongo.socket_checker import SocketChecker diff --git a/test/asynchronous/test_read_concern.py b/test/asynchronous/test_read_concern.py index fbc07a5c36..8659bf80b2 100644 --- a/test/asynchronous/test_read_concern.py +++ b/test/asynchronous/test_read_concern.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from bson.son import SON from pymongo.errors import OperationFailure diff --git a/test/asynchronous/test_read_preferences.py b/test/asynchronous/test_read_preferences.py index 077bc21eaf..5bea174058 100644 --- a/test/asynchronous/test_read_preferences.py +++ b/test/asynchronous/test_read_preferences.py @@ -33,7 +33,7 @@ connected, unittest, ) -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, async_wait_until, one, diff --git a/test/asynchronous/test_read_write_concern_spec.py b/test/asynchronous/test_read_write_concern_spec.py index 3fb13ba194..86f79fd28d 100644 --- a/test/asynchronous/test_read_write_concern_spec.py +++ b/test/asynchronous/test_read_write_concern_spec.py @@ -25,7 +25,7 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.asynchronous.unified_format import generate_test_classes -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from pymongo import DESCENDING from pymongo.asynchronous.mongo_client import AsyncMongoClient diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py index bde7a9f2ee..10d9e738b4 100644 --- a/test/asynchronous/test_retryable_reads.py +++ b/test/asynchronous/test_retryable_reads.py @@ -19,6 +19,7 @@ import pprint import sys import threading +from test.asynchronous.utils import async_set_fail_point from pymongo.errors import AutoReconnect @@ -31,10 +32,9 @@ client_knobs, unittest, ) -from test.utils import ( +from test.utils_shared import ( CMAPListener, OvertCommandListener, - async_set_fail_point, ) from pymongo.monitoring import ( diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py index 738ce04192..2f6cb2b575 100644 --- a/test/asynchronous/test_retryable_writes.py +++ b/test/asynchronous/test_retryable_writes.py @@ -20,6 +20,7 @@ import pprint import sys import threading +from test.asynchronous.utils import async_set_fail_point sys.path[0:0] = [""] @@ -30,12 +31,11 @@ unittest, ) from test.asynchronous.helpers import client_knobs -from test.utils import ( +from test.utils_shared import ( CMAPListener, DeprecationFilter, EventListener, OvertCommandListener, - async_set_fail_point, ) from test.version import Version diff --git a/test/asynchronous/test_sdam_monitoring_spec.py b/test/asynchronous/test_sdam_monitoring_spec.py index 8b0ec63cfe..71ec6c6b46 100644 --- a/test/asynchronous/test_sdam_monitoring_spec.py +++ b/test/asynchronous/test_sdam_monitoring_spec.py @@ -25,7 +25,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest -from test.utils import ( +from test.utils_shared import ( ServerAndTopologyEventListener, async_wait_until, server_name_to_type, diff --git a/test/asynchronous/test_server_selection.py b/test/asynchronous/test_server_selection.py index f0451841cd..f98a05ee91 100644 --- a/test/asynchronous/test_server_selection.py +++ b/test/asynchronous/test_server_selection.py @@ -31,17 +31,18 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_wait_until from test.asynchronous.utils_selection_tests import ( create_selection_tests, - get_addresses, get_topology_settings_dict, +) +from test.utils_selection_tests_shared import ( + get_addresses, make_server_description, ) -from test.utils import ( - EventListener, +from test.utils_shared import ( FunctionCallRecorder, OvertCommandListener, - async_wait_until, ) _IS_SYNC = False diff --git a/test/asynchronous/test_server_selection_in_window.py b/test/asynchronous/test_server_selection_in_window.py index e2ae92a27c..3fe448d4dd 100644 --- a/test/asynchronous/test_server_selection_in_window.py +++ b/test/asynchronous/test_server_selection_in_window.py @@ -23,10 +23,9 @@ from test.asynchronous.helpers import ConcurrentRunner from test.asynchronous.utils_selection_tests import create_topology from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator -from test.utils import ( +from test.utils_shared import ( CMAPListener, OvertCommandListener, - async_get_pool, async_wait_until, ) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 568d392cd5..4431cbcb16 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -30,14 +30,13 @@ from test.asynchronous import ( AsyncIntegrationTest, - AsyncPyMongoTestCase, AsyncUnitTest, SkipTest, async_client_context, unittest, ) from test.asynchronous.helpers import client_knobs -from test.utils import ( +from test.utils_shared import ( EventListener, HeartbeatEventListener, OvertCommandListener, diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index 763c80e665..bf7807eb97 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -18,12 +18,13 @@ import asyncio import sys import time +from test.utils_shared import FunctionCallRecorder from typing import Any sys.path[0:0] = [""] from test.asynchronous import AsyncPyMongoTestCase, client_knobs, unittest -from test.utils import FunctionCallRecorder, async_wait_until +from test.asynchronous.utils import async_wait_until import pymongo from pymongo import common diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py index d50bb220b1..d920b77ac2 100644 --- a/test/asynchronous/test_ssl.py +++ b/test/asynchronous/test_ssl.py @@ -32,7 +32,7 @@ remove_all_users, unittest, ) -from test.utils import ( +from test.utils_shared import ( EventListener, OvertCommandListener, cat_files, diff --git a/test/asynchronous/test_streaming_protocol.py b/test/asynchronous/test_streaming_protocol.py index fd890d29fb..1206e7b2fa 100644 --- a/test/asynchronous/test_streaming_protocol.py +++ b/test/asynchronous/test_streaming_protocol.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import ( +from test.utils_shared import ( HeartbeatEventListener, ServerEventListener, async_wait_until, diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 5f75746a4d..884110cd45 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -24,7 +24,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, async_wait_until, ) diff --git a/test/asynchronous/test_versioned_api_integration.py b/test/asynchronous/test_versioned_api_integration.py index 7e9a79da90..46e62d5c14 100644 --- a/test/asynchronous/test_versioned_api_integration.py +++ b/test/asynchronous/test_versioned_api_integration.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from pymongo.server_api import ServerApi diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index ce0b9979e2..886b31e4a6 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -35,6 +35,7 @@ client_knobs, unittest, ) +from test.asynchronous.utils import async_get_pool from test.asynchronous.utils_spec_runner import SpecRunnerTask from test.unified_format_shared import ( KMS_TLS_OPTS, @@ -49,8 +50,7 @@ parse_collection_or_database_options, with_metaclass, ) -from test.utils import ( - async_get_pool, +from test.utils_shared import ( async_wait_until, camel_to_snake, camel_to_snake_args, diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py new file mode 100644 index 0000000000..4b68595397 --- /dev/null +++ b/test/asynchronous/utils.py @@ -0,0 +1,211 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing pymongo that require synchronization.""" +from __future__ import annotations + +import asyncio +import contextlib +import random +import threading # Used in the synchronized version of this file +import time +from asyncio import iscoroutinefunction + +from bson.son import SON +from pymongo import AsyncMongoClient +from pymongo.errors import ConfigurationError +from pymongo.hello import HelloCompat +from pymongo.lock import _async_create_lock +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration + +_IS_SYNC = False + + +async def async_get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = await client._get_topology() + server = await topology._select_server(writable_server_selector, _Op.TEST) + return server.pool + + +async def async_get_pools(client): + """Get all pools.""" + return [ + server.pool + for server in await (await client._get_topology()).select_servers( + any_server_selector, _Op.TEST + ) + ] + + +async def async_wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. + + E.g.: + + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') + + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). + + Returns the predicate's first true value. + """ + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + if iscoroutinefunction(predicate): + retval = await predicate() + else: + retval = predicate() + if retval: + return retval + + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) + + await asyncio.sleep(interval) + + +async def async_is_mongos(client): + res = await client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" + + +async def async_ensure_all_connected(client: AsyncMongoClient) -> None: + """Ensure that the client's connection pool has socket connections to all + members of a replica set. Raises ConfigurationError when called with a + non-replica set client. + + Depending on the use-case, the caller may need to clear any event listeners + that are configured on the client. + """ + hello: dict = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: + raise ConfigurationError("cluster is not a replica set") + + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} + + # Run hello until we have connected to each host at least once. + async def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = await client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + + async def predicate(): + return target_host_list == await discover() + + await async_wait_until(predicate, "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) + + +async def asyncAssertRaisesExactly(cls, fn, *args, **kwargs): + """ + Unlike the standard assertRaises, this checks that a function raises a + specific class of exception, and not a subclass. E.g., check that + MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. + """ + try: + await fn(*args, **kwargs) + except Exception as e: + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" + else: + raise AssertionError("%s not raised" % cls) + + +async def async_set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await client.admin.command(cmd) + + +async def async_joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + if _IS_SYNC: + for t in tasks: + t.join(300) + assert not t.is_alive(), "Thread %s hung" % t + else: + await asyncio.wait([t.task for t in tasks if t is not None], timeout=300) + + +class AsyncMockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + self.id = random.randint(0, 100) + + def close_conn(self, reason): + pass + + def __aenter__(self): + return self + + def __aexit__(self, exc_type, exc_val, exc_tb): + pass + + +class AsyncMockPool: + def __init__(self, address, options, handshake=True, client_id=None): + self.gen = _PoolGeneration() + self._lock = _async_create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] + + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) + + @contextlib.asynccontextmanager + async def checkout(self, handler=None): + yield AsyncMockConnection() + + async def checkin(self, *args, **kwargs): + pass + + async def _reset(self, service_id=None): + async with self._lock: + self.gen.inc(service_id) + + async def ready(self): + pass + + async def reset(self, service_id=None, interrupt_connections=False): + await self._reset() + + async def reset_without_pause(self): + await self._reset() + + async def close(self): + await self._reset() + + async def update_is_writable(self, is_writable): + pass + + async def remove_stale_sockets(self, *args, **kwargs): + pass diff --git a/test/asynchronous/utils_selection_tests.py b/test/asynchronous/utils_selection_tests.py index 71e287569a..d6b92fadb4 100644 --- a/test/asynchronous/utils_selection_tests.py +++ b/test/asynchronous/utils_selection_tests.py @@ -19,17 +19,18 @@ import os import sys from test.asynchronous import AsyncPyMongoTestCase +from test.asynchronous.utils import AsyncMockPool sys.path[0:0] = [""] from test import unittest from test.pymongo_mocks import DummyMonitor -from test.utils import AsyncMockPool, parse_read_preference from test.utils_selection_tests_shared import ( get_addresses, get_topology_type_name, make_server_description, ) +from test.utils_shared import parse_read_preference from bson import json_util from pymongo.asynchronous.settings import TopologySettings diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index f1c6deb690..c83636a734 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -24,7 +24,7 @@ from collections import abc from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs from test.asynchronous.helpers import ConcurrentRunner -from test.utils import ( +from test.utils_shared import ( CMAPListener, CompareType, EventListener, diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 7a78f3d2f6..a5334d79bd 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -31,7 +31,7 @@ sys.path[0:0] = [""] from test.unified_format import generate_test_classes -from test.utils import EventListener, OvertCommandListener +from test.utils_shared import EventListener, OvertCommandListener from bson import SON from pymongo import MongoClient diff --git a/test/test_auth.py b/test/test_auth.py index 345d16121b..27f6743fae 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -30,7 +30,7 @@ client_context, unittest, ) -from test.utils import AllowListEventListener, delay, ignore_deprecations +from test.utils_shared import AllowListEventListener, delay, ignore_deprecations import pytest diff --git a/test/test_bulk.py b/test/test_bulk.py index 6a72bddfc0..8a863cc49b 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -24,7 +24,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, remove_all_users, unittest -from test.utils import wait_until +from test.utils_shared import wait_until from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 4ed21f55cf..e50f4667f6 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -36,7 +36,7 @@ unittest, ) from test.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, EventListener, OvertCommandListener, diff --git a/test/test_client.py b/test/test_client.py index 8e99866cc8..a340263937 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -61,17 +61,19 @@ from test.pymongo_mocks import MockClient from test.test_binary import BinaryData from test.utils import ( + assertRaisesExactly, + get_pool, + wait_until, +) +from test.utils_shared import ( NTHREADS, CMAPListener, FunctionCallRecorder, - assertRaisesExactly, delay, - get_pool, gevent_monkey_patched, is_greenthread_patched, lazy_client_trial, one, - wait_until, ) import bson diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index f8d92668ea..b00b2c1b03 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -25,7 +25,7 @@ client_context, unittest, ) -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, ) from unittest.mock import patch diff --git a/test/test_collation.py b/test/test_collation.py index 06436f0638..5425551dc6 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -18,7 +18,7 @@ import functools import warnings from test import IntegrationTest, client_context, unittest -from test.utils import EventListener, OvertCommandListener +from test.utils_shared import EventListener, OvertCommandListener from typing import Any from pymongo.collation import ( diff --git a/test/test_collection.py b/test/test_collection.py index 8a862646eb..75c11383d0 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -21,6 +21,7 @@ import sys from codecs import utf_8_decode from collections import defaultdict +from test.utils import get_pool, is_mongos from typing import Any, Iterable, no_type_check from pymongo.synchronous.database import Database @@ -33,12 +34,10 @@ client_context, unittest, ) -from test.utils import ( +from test.utils_shared import ( IMPOSSIBLE_WRITE_CONCERN, EventListener, OvertCommandListener, - get_pool, - is_mongos, wait_until, ) diff --git a/test/test_comment.py b/test/test_comment.py index 9f9bf98640..b6c17c14fe 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -22,7 +22,7 @@ sys.path[0:0] = [""] from asyncio import iscoroutinefunction from test import IntegrationTest, client_context, unittest -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from bson.dbref import DBRef from pymongo.operations import IndexModel diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py index 3987f2b68b..1405824453 100644 --- a/test/test_connection_monitoring.py +++ b/test/test_connection_monitoring.py @@ -20,17 +20,15 @@ import sys import time from pathlib import Path +from test.utils import get_pool, get_pools sys.path[0:0] = [""] -from test import IntegrationTest, client_knobs, unittest +from test import IntegrationTest, client_context, client_knobs, unittest from test.pymongo_mocks import DummyMonitor -from test.utils import ( +from test.utils_shared import ( CMAPListener, camel_to_snake, - client_context, - get_pool, - get_pools, wait_until, ) from test.utils_spec_runner import SpecRunnerThread, SpecTestCreator diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index 9cac633301..d923a477b5 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -16,6 +16,7 @@ from __future__ import annotations import sys +from test.utils import ensure_all_connected sys.path[0:0] = [""] @@ -25,9 +26,8 @@ unittest, ) from test.helpers import repl_set_step_down -from test.utils import ( +from test.utils_shared import ( CMAPListener, - ensure_all_connected, ) from bson import SON diff --git a/test/test_cursor.py b/test/test_cursor.py index 84e431f8cb..a9cbe99942 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -31,7 +31,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, EventListener, OvertCommandListener, diff --git a/test/test_data_lake.py b/test/test_data_lake.py index c8b76eb1ca..d6d2007007 100644 --- a/test/test_data_lake.py +++ b/test/test_data_lake.py @@ -25,7 +25,7 @@ from test import IntegrationTest, UnitTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, ) diff --git a/test/test_database.py b/test/test_database.py index 48cca921b1..4c09b421cf 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -25,7 +25,7 @@ from test import IntegrationTest, client_context, unittest from test.test_custom_types import DECIMAL_CODECOPTS -from test.utils import ( +from test.utils_shared import ( IMPOSSIBLE_WRITE_CONCERN, OvertCommandListener, wait_until, diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index bfe0b24387..00021310c9 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -26,25 +26,32 @@ sys.path[0:0] = [""] -from test import IntegrationTest, PyMongoTestCase, UnitTest, unittest +from test import ( + IntegrationTest, + PyMongoTestCase, + UnitTest, + client_context, + unittest, +) from test.pymongo_mocks import DummyMonitor from test.unified_format import generate_test_classes from test.utils import ( + get_pool, +) +from test.utils_shared import ( CMAPListener, HeartbeatEventListener, HeartbeatEventsListListener, assertion_context, barrier_wait, - client_context, create_barrier, - get_pool, server_name_to_type, wait_until, ) from unittest.mock import patch from bson import Timestamp, json_util -from pymongo import MongoClient, common, monitoring +from pymongo import common, monitoring from pymongo.errors import ( AutoReconnect, ConfigurationError, @@ -291,7 +298,7 @@ def test_ignore_stale_connection_errors(self): if not _IS_SYNC and sys.version_info < (3, 11): self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") N_TASKS = 5 - barrier = create_barrier(N_TASKS, timeout=30) + barrier = create_barrier(N_TASKS) client = self.rs_or_single_client(minPoolSize=N_TASKS) # Wait for initial discovery. diff --git a/test/test_dns.py b/test/test_dns.py index 6f4736fd5e..71326ae49e 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -29,7 +29,7 @@ client_context, unittest, ) -from test.utils import wait_until +from test.utils_shared import wait_until from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError diff --git a/test/test_encryption.py b/test/test_encryption.py index 9224310144..6efb167442 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -63,7 +63,7 @@ ) from test.test_bulk import BulkTestBase from test.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, OvertCommandListener, TopologyEventListener, diff --git a/test/test_examples.py b/test/test_examples.py index 9bcc276248..ef06a77b9a 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -26,7 +26,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import wait_until +from test.utils_shared import wait_until import pymongo from pymongo.errors import ConnectionFailure, OperationFailure diff --git a/test/test_fork.py b/test/test_fork.py index 1a89159435..fe88d778d2 100644 --- a/test/test_fork.py +++ b/test/test_fork.py @@ -24,7 +24,7 @@ sys.path[0:0] = [""] from test import IntegrationTest -from test.utils import is_greenthread_patched +from test.utils_shared import is_greenthread_patched from bson.objectid import ObjectId diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 6534bc11bf..0baeb5ae19 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -33,7 +33,7 @@ sys.path[0:0] = [""] -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from bson.objectid import ObjectId from gridfs.errors import NoFile diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 47e38141b2..75342ee437 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -28,7 +28,8 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import joinall, one +from test.utils import joinall +from test.utils_shared import one import gridfs from bson.binary import Binary diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index e7486cb237..d68c9f6ba2 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -29,7 +29,8 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import joinall, one +from test.utils import joinall +from test.utils_shared import one import gridfs from bson.binary import Binary diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index 0523d0ba4d..7864caf6e1 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -16,11 +16,12 @@ from __future__ import annotations import sys +from test.utils import MockPool sys.path[0:0] = [""] from test import IntegrationTest, client_knobs, unittest -from test.utils import HeartbeatEventListener, MockPool, wait_until +from test.utils_shared import HeartbeatEventListener, wait_until from pymongo.errors import ConnectionFailure from pymongo.hello import Hello, HelloCompat diff --git a/test/test_index_management.py b/test/test_index_management.py index e4b931cf00..3a2b17cd3d 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -29,7 +29,7 @@ from test import IntegrationTest, PyMongoTestCase, unittest from test.unified_format import generate_test_classes -from test.utils import AllowListEventListener, OvertCommandListener +from test.utils_shared import AllowListEventListener, OvertCommandListener from pymongo.errors import OperationFailure from pymongo.operations import SearchIndexModel diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 7db19b46b5..d7f1d596cc 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -23,6 +23,7 @@ import threading from asyncio import Event from test.helpers import ConcurrentRunner, ExceptionCatchingTask +from test.utils import get_pool import pytest @@ -30,9 +31,8 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( create_event, - get_pool, wait_until, ) diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index ca2f3cfd1e..8c31854343 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -26,7 +26,7 @@ from test import MockClientTest, client_context, connected, unittest from test.pymongo_mocks import MockClient -from test.utils import wait_until +from test.utils_shared import wait_until from pymongo.errors import AutoReconnect, InvalidOperation from pymongo.server_selectors import writable_server_selector diff --git a/test/test_monitor.py b/test/test_monitor.py index 0fb7eb9cae..25620a99e8 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -26,9 +26,9 @@ from test import IntegrationTest, client_context, connected, unittest from test.utils import ( - ServerAndTopologyEventListener, wait_until, ) +from test.utils_shared import ServerAndTopologyEventListener from pymongo.periodic_executor import _EXECUTORS diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 670558c0a0..ae3e50db77 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -29,7 +29,7 @@ sanitize_cmd, unittest, ) -from test.utils import ( +from test.utils_shared import ( EventListener, OvertCommandListener, wait_until, diff --git a/test/test_objectid.py b/test/test_objectid.py index 26670832f6..d7db7229ea 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -23,7 +23,7 @@ sys.path[0:0] = [""] from test import SkipTest, unittest -from test.utils import oid_generated_on_process +from test.utils_shared import oid_generated_on_process from bson.errors import InvalidId from bson.objectid import _MAX_COUNTER_VALUE, ObjectId diff --git a/test/test_pooling.py b/test/test_pooling.py index 1755365f80..44e8c4afe5 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -21,6 +21,7 @@ import socket import sys import time +from test.utils import get_pool, joinall from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -33,7 +34,7 @@ from test import IntegrationTest, client_context, unittest from test.helpers import ConcurrentRunner -from test.utils import delay, get_pool, joinall +from test.utils_shared import delay from pymongo.socket_checker import SocketChecker from pymongo.synchronous.pool import Pool, PoolOptions diff --git a/test/test_read_concern.py b/test/test_read_concern.py index 8ec9865eaa..62b2491475 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from bson.son import SON from pymongo.errors import OperationFailure diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index 0d38f3f00d..e754c896ad 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -33,7 +33,7 @@ connected, unittest, ) -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, one, wait_until, diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 8543991f72..383dc70902 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -25,7 +25,7 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from pymongo import DESCENDING from pymongo.errors import ( diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 4c23d71b69..3371543f27 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -21,7 +21,7 @@ from test import MockClientTest, client_context, client_knobs, unittest from test.pymongo_mocks import MockClient -from test.utils import wait_until +from test.utils_shared import wait_until from pymongo import ReadPreference from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 9c3f6b170f..7ae4c41e70 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -19,6 +19,7 @@ import pprint import sys import threading +from test.utils import set_fail_point from pymongo.errors import AutoReconnect @@ -31,10 +32,9 @@ client_knobs, unittest, ) -from test.utils import ( +from test.utils_shared import ( CMAPListener, OvertCommandListener, - set_fail_point, ) from pymongo.monitoring import ( diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 07bd1db0ba..b099820a45 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -20,6 +20,7 @@ import pprint import sys import threading +from test.utils import set_fail_point sys.path[0:0] = [""] @@ -30,12 +31,11 @@ unittest, ) from test.helpers import client_knobs -from test.utils import ( +from test.utils_shared import ( CMAPListener, DeprecationFilter, EventListener, OvertCommandListener, - set_fail_point, ) from test.version import Version diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index 6a53c062cc..2167e561cf 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -25,7 +25,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, client_knobs, unittest -from test.utils import ( +from test.utils_shared import ( ServerAndTopologyEventListener, server_name_to_type, wait_until, diff --git a/test/test_server_selection.py b/test/test_server_selection.py index 3e7f9a8671..aec8e2e47a 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -31,18 +31,19 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( - EventListener, - FunctionCallRecorder, - OvertCommandListener, - wait_until, -) +from test.utils import wait_until from test.utils_selection_tests import ( create_selection_tests, - get_addresses, get_topology_settings_dict, +) +from test.utils_selection_tests_shared import ( + get_addresses, make_server_description, ) +from test.utils_shared import ( + FunctionCallRecorder, + OvertCommandListener, +) _IS_SYNC = True diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 7ccd4b529e..4aad34050c 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -21,13 +21,12 @@ from pathlib import Path from test import IntegrationTest, client_context, unittest from test.helpers import ConcurrentRunner -from test.utils import ( +from test.utils_selection_tests import create_topology +from test.utils_shared import ( CMAPListener, OvertCommandListener, - get_pool, wait_until, ) -from test.utils_selection_tests import create_topology from test.utils_spec_runner import SpecTestCreator from pymongo.common import clean_node diff --git a/test/test_session.py b/test/test_session.py index e80ab41896..905539a1f8 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -30,14 +30,13 @@ from test import ( IntegrationTest, - PyMongoTestCase, SkipTest, UnitTest, client_context, unittest, ) from test.helpers import client_knobs -from test.utils import ( +from test.utils_shared import ( EventListener, HeartbeatEventListener, OvertCommandListener, diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 86fad6d90e..6812465074 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -18,12 +18,13 @@ import asyncio import sys import time +from test.utils_shared import FunctionCallRecorder from typing import Any sys.path[0:0] = [""] from test import PyMongoTestCase, client_knobs, unittest -from test.utils import FunctionCallRecorder, wait_until +from test.utils import wait_until import pymongo from pymongo import common diff --git a/test/test_ssl.py b/test/test_ssl.py index 7d6c3f7cd1..a66fe21be5 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -32,7 +32,7 @@ remove_all_users, unittest, ) -from test.utils import ( +from test.utils_shared import ( EventListener, OvertCommandListener, cat_files, diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 894e89e208..acf7610c94 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test.utils_shared import ( HeartbeatEventListener, ServerEventListener, wait_until, diff --git a/test/test_topology.py b/test/test_topology.py index 86aa87c2cc..22e94739ee 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -23,7 +23,8 @@ from test import client_knobs, unittest from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool, wait_until +from test.utils import MockPool +from test.utils_shared import wait_until from bson.objectid import ObjectId from pymongo import common diff --git a/test/test_transactions.py b/test/test_transactions.py index 7a8dcd0f00..80b3e3765e 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -24,7 +24,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test.utils_shared import ( OvertCommandListener, wait_until, ) diff --git a/test/test_versioned_api_integration.py b/test/test_versioned_api_integration.py index 502198576a..0066ecd977 100644 --- a/test/test_versioned_api_integration.py +++ b/test/test_versioned_api_integration.py @@ -21,7 +21,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import OvertCommandListener +from test.utils_shared import OvertCommandListener from pymongo.server_api import ServerApi diff --git a/test/unified_format.py b/test/unified_format.py index 682a6105f3..471a067bee 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -48,10 +48,10 @@ parse_collection_or_database_options, with_metaclass, ) -from test.utils import ( +from test.utils import get_pool +from test.utils_shared import ( camel_to_snake, camel_to_snake_args, - get_pool, parse_spec_options, prepare_spec_arguments, snake_to_camel, diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 0c685366f4..009c5c7e28 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -35,7 +35,7 @@ KMIP_CREDS, LOCAL_MASTER_KEY, ) -from test.utils import CMAPListener, camel_to_snake, parse_collection_options +from test.utils_shared import CMAPListener, camel_to_snake, parse_collection_options from typing import Any, Union from bson import ( diff --git a/test/utils.py b/test/utils.py index ae316d0387..1459a8fba7 100644 --- a/test/utils.py +++ b/test/utils.py @@ -12,476 +12,76 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for testing pymongo""" +"""Utilities for testing pymongo that require synchronization.""" from __future__ import annotations import asyncio import contextlib -import copy -import functools -import os import random -import re -import shutil -import sys -import threading +import threading # Used in the synchronized version of this file import time -import unittest -import warnings from asyncio import iscoroutinefunction -from collections import abc, defaultdict -from functools import partial -from test import client_context, db_pwd, db_user -from test.asynchronous import async_client_context -from typing import Any, List -from bson import json_util -from bson.objectid import ObjectId from bson.son import SON -from pymongo import AsyncMongoClient, monitoring, operations, read_preferences -from pymongo._asyncio_task import create_task -from pymongo.cursor_shared import CursorType -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo import MongoClient +from pymongo.errors import ConfigurationError from pymongo.hello import HelloCompat -from pymongo.helpers_shared import _SENSITIVE_COMMANDS -from pymongo.lock import _async_create_lock, _create_lock -from pymongo.monitoring import ( - ConnectionCheckedInEvent, - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutStartedEvent, - ConnectionClosedEvent, - ConnectionCreatedEvent, - ConnectionReadyEvent, - PoolClearedEvent, - PoolClosedEvent, - PoolCreatedEvent, - PoolReadyEvent, -) +from pymongo.lock import _create_lock from pymongo.operations import _Op -from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import any_server_selector, writable_server_selector -from pymongo.server_type import SERVER_TYPE -from pymongo.synchronous.collection import ReturnDocument -from pymongo.synchronous.mongo_client import MongoClient from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration -from pymongo.uri_parser import parse_uri -from pymongo.write_concern import WriteConcern -IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) +_IS_SYNC = True -class BaseListener: - def __init__(self): - self.events = [] - - def reset(self): - self.events = [] - - def add_event(self, event): - self.events.append(event) - - def event_count(self, event_type): - return len(self.events_by_type(event_type)) - - def events_by_type(self, event_type): - """Return the matching events by event class. - - event_type can be a single class or a tuple of classes. - """ - return self.matching(lambda e: isinstance(e, event_type)) - - def matching(self, matcher): - """Return the matching events.""" - return [event for event in self.events[:] if matcher(event)] - - def wait_for_event(self, event, count): - """Wait for a number of events to be published, or fail.""" - wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") - - async def async_wait_for_event(self, event, count): - """Wait for a number of events to be published, or fail.""" - await async_wait_until( - lambda: self.event_count(event) >= count, f"find {count} {event} event(s)" - ) - - -class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): - def connection_created(self, event): - assert isinstance(event, ConnectionCreatedEvent) - self.add_event(event) - - def connection_ready(self, event): - assert isinstance(event, ConnectionReadyEvent) - self.add_event(event) - - def connection_closed(self, event): - assert isinstance(event, ConnectionClosedEvent) - self.add_event(event) - - def connection_check_out_started(self, event): - assert isinstance(event, ConnectionCheckOutStartedEvent) - self.add_event(event) - - def connection_check_out_failed(self, event): - assert isinstance(event, ConnectionCheckOutFailedEvent) - self.add_event(event) - - def connection_checked_out(self, event): - assert isinstance(event, ConnectionCheckedOutEvent) - self.add_event(event) - - def connection_checked_in(self, event): - assert isinstance(event, ConnectionCheckedInEvent) - self.add_event(event) - - def pool_created(self, event): - assert isinstance(event, PoolCreatedEvent) - self.add_event(event) - - def pool_ready(self, event): - assert isinstance(event, PoolReadyEvent) - self.add_event(event) - - def pool_cleared(self, event): - assert isinstance(event, PoolClearedEvent) - self.add_event(event) - - def pool_closed(self, event): - assert isinstance(event, PoolClosedEvent) - self.add_event(event) - - -class EventListener(BaseListener, monitoring.CommandListener): - def __init__(self): - super().__init__() - self.results = defaultdict(list) - - @property - def started_events(self) -> List[monitoring.CommandStartedEvent]: - return self.results["started"] - - @property - def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: - return self.results["succeeded"] - - @property - def failed_events(self) -> List[monitoring.CommandFailedEvent]: - return self.results["failed"] - - def started(self, event: monitoring.CommandStartedEvent) -> None: - self.started_events.append(event) - self.add_event(event) - - def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: - self.succeeded_events.append(event) - self.add_event(event) - - def failed(self, event: monitoring.CommandFailedEvent) -> None: - self.failed_events.append(event) - self.add_event(event) - - def started_command_names(self) -> List[str]: - """Return list of command names started.""" - return [event.command_name for event in self.started_events] - - def reset(self) -> None: - """Reset the state of this listener.""" - self.results.clear() - super().reset() - - -class TopologyEventListener(monitoring.TopologyListener): - def __init__(self): - self.results = defaultdict(list) - - def closed(self, event): - self.results["closed"].append(event) - - def description_changed(self, event): - self.results["description_changed"].append(event) - - def opened(self, event): - self.results["opened"].append(event) - - def reset(self): - """Reset the state of this listener.""" - self.results.clear() - - -class AllowListEventListener(EventListener): - def __init__(self, *commands): - self.commands = set(commands) - super().__init__() - - def started(self, event): - if event.command_name in self.commands: - super().started(event) - - def succeeded(self, event): - if event.command_name in self.commands: - super().succeeded(event) - - def failed(self, event): - if event.command_name in self.commands: - super().failed(event) - - -class OvertCommandListener(EventListener): - """A CommandListener that ignores sensitive commands.""" - - ignore_list_collections = False - - def started(self, event): - if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super().started(event) - - def succeeded(self, event): - if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super().succeeded(event) - - def failed(self, event): - if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super().failed(event) - - -class _ServerEventListener: - """Listens to all events.""" - - def __init__(self): - self.results = [] - - def opened(self, event): - self.results.append(event) - - def description_changed(self, event): - self.results.append(event) - - def closed(self, event): - self.results.append(event) - - def matching(self, matcher): - """Return the matching events.""" - results = self.results[:] - return [event for event in results if matcher(event)] - - def reset(self): - self.results = [] - - -class ServerEventListener(_ServerEventListener, monitoring.ServerListener): - """Listens to Server events.""" - - -class ServerAndTopologyEventListener( # type: ignore[misc] - ServerEventListener, monitoring.TopologyListener -): - """Listens to Server and Topology events.""" - - -class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): - """Listens to only server heartbeat events.""" - - def started(self, event): - self.add_event(event) - - def succeeded(self, event): - self.add_event(event) - - def failed(self, event): - self.add_event(event) - - -class HeartbeatEventsListListener(HeartbeatEventListener): - """Listens to only server heartbeat events and publishes them to a provided list.""" - - def __init__(self, events): - super().__init__() - self.event_list = events - - def started(self, event): - self.add_event(event) - self.event_list.append("serverHeartbeatStartedEvent") - - def succeeded(self, event): - self.add_event(event) - self.event_list.append("serverHeartbeatSucceededEvent") - - def failed(self, event): - self.add_event(event) - self.event_list.append("serverHeartbeatFailedEvent") - - -class AsyncMockConnection: - def __init__(self): - self.cancel_context = _CancellationContext() - self.more_to_come = False - self.id = random.randint(0, 100) - - def close_conn(self, reason): - pass - - def __aenter__(self): - return self - - def __aexit__(self, exc_type, exc_val, exc_tb): - pass - - -class MockConnection: - def __init__(self): - self.cancel_context = _CancellationContext() - self.more_to_come = False - self.id = random.randint(0, 100) - - def close_conn(self, reason): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -class AsyncMockPool: - def __init__(self, address, options, handshake=True, client_id=None): - self.gen = _PoolGeneration() - self._lock = _async_create_lock() - self.opts = options - self.operation_count = 0 - self.conns = [] - - def stale_generation(self, gen, service_id): - return self.gen.stale(gen, service_id) - - @contextlib.asynccontextmanager - async def checkout(self, handler=None): - yield AsyncMockConnection() - - async def checkin(self, *args, **kwargs): - pass - - async def _reset(self, service_id=None): - async with self._lock: - self.gen.inc(service_id) - - async def ready(self): - pass - - async def reset(self, service_id=None, interrupt_connections=False): - await self._reset() - - async def reset_without_pause(self): - await self._reset() - - async def close(self): - await self._reset() - - async def update_is_writable(self, is_writable): - pass - - async def remove_stale_sockets(self, *args, **kwargs): - pass - - -class MockPool: - def __init__(self, address, options, handshake=True, client_id=None): - self.gen = _PoolGeneration() - self._lock = _create_lock() - self.opts = options - self.operation_count = 0 - self.conns = [] - - def stale_generation(self, gen, service_id): - return self.gen.stale(gen, service_id) - - def checkout(self, handler=None): - return MockConnection() - - def checkin(self, *args, **kwargs): - pass - - def _reset(self, service_id=None): - with self._lock: - self.gen.inc(service_id) - - def ready(self): - pass - - def reset(self, service_id=None, interrupt_connections=False): - self._reset() - - def reset_without_pause(self): - self._reset() - - def close(self): - self._reset() - - def update_is_writable(self, is_writable): - pass - - def remove_stale_sockets(self, *args, **kwargs): - pass - - -class ScenarioDict(dict): - """Dict that returns {} for any unknown key, recursively.""" - - def __init__(self, data): - def convert(v): - if isinstance(v, abc.Mapping): - return ScenarioDict(v) - if isinstance(v, (str, bytes)): - return v - if isinstance(v, abc.Sequence): - return [convert(item) for item in v] - return v - - dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) +def get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = client._get_topology() + server = topology._select_server(writable_server_selector, _Op.TEST) + return server.pool - def __getitem__(self, item): - try: - return dict.__getitem__(self, item) - except KeyError: - # Unlike a defaultdict, don't set the key, just return a dict. - return ScenarioDict({}) +def get_pools(client): + """Get all pools.""" + return [ + server.pool + for server in (client._get_topology()).select_servers(any_server_selector, _Op.TEST) + ] -class CompareType: - """Class that compares equal to any object of the given type(s).""" - def __init__(self, types): - self.types = types +def wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. - def __eq__(self, other): - return isinstance(other, self.types) + E.g.: + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') -class FunctionCallRecorder: - """Utility class to wrap a callable and record its invocations.""" + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). - def __init__(self, function): - self._function = function - self._call_list = [] + Returns the predicate's first true value. + """ + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + if iscoroutinefunction(predicate): + retval = predicate() + else: + retval = predicate() + if retval: + return retval - def __call__(self, *args, **kwargs): - self._call_list.append((args, kwargs)) - return self._function(*args, **kwargs) + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) - def reset(self): - """Wipes the call list.""" - self._call_list = [] + time.sleep(interval) - def call_list(self): - """Returns a copy of the call list.""" - return self._call_list[:] - @property - def call_count(self): - """Returns the number of times the function has been called.""" - return len(self._call_list) +def is_mongos(client): + res = client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" def ensure_all_connected(client: MongoClient) -> None: @@ -511,231 +111,17 @@ def discover(): return connected_host_list try: - wait_until(lambda: target_host_list == discover(), "connected to all hosts") - except AssertionError as exc: - raise AssertionError( - f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" - ) - - -async def async_ensure_all_connected(client: AsyncMongoClient) -> None: - """Ensure that the client's connection pool has socket connections to all - members of a replica set. Raises ConfigurationError when called with a - non-replica set client. - Depending on the use-case, the caller may need to clear any event listeners - that are configured on the client. - """ - hello: dict = await client.admin.command(HelloCompat.LEGACY_CMD) - if "setName" not in hello: - raise ConfigurationError("cluster is not a replica set") - - target_host_list = set(hello["hosts"] + hello.get("passives", [])) - connected_host_list = {hello["me"]} + def predicate(): + return target_host_list == discover() - # Run hello until we have connected to each host at least once. - async def discover(): - i = 0 - while i < 100 and connected_host_list != target_host_list: - hello: dict = await client.admin.command( - HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY - ) - connected_host_list.update([hello["me"]]) - i += 1 - return connected_host_list - - try: - - async def predicate(): - return target_host_list == await discover() - - await async_wait_until(predicate, "connected to all hosts") + wait_until(predicate, "connected to all hosts") except AssertionError as exc: raise AssertionError( f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" ) -def one(s): - """Get one element of a set""" - return next(iter(s)) - - -def oid_generated_on_process(oid): - """Makes a determination as to whether the given ObjectId was generated - by the current process, based on the 5-byte random number in the ObjectId. - """ - return ObjectId._random() == oid.binary[4:9] - - -def delay(sec): - return """function() { sleep(%f * 1000); return true; }""" % sec - - -def get_command_line(client): - command_line = client.admin.command("getCmdLineOpts") - assert command_line["ok"] == 1, "getCmdLineOpts() failed" - return command_line - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -def camel_to_upper_camel(camel): - return camel[0].upper() + camel[1:] - - -def camel_to_snake_args(arguments): - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - arguments[c2s] = arguments.pop(arg_name) - return arguments - - -def snake_to_camel(snake): - # Regex to convert snake_case to lowerCamelCase. - return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) - - -def parse_collection_options(opts): - if "readPreference" in opts: - opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - - if "writeConcern" in opts: - opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - - if "readConcern" in opts: - opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - - if "timeoutMS" in opts: - opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 - return opts - - -def server_started_with_option(client, cmdline_opt, config_opt): - """Check if the server was started with a particular option. - - :Parameters: - - `cmdline_opt`: The command line option (i.e. --nojournal) - - `config_opt`: The config file option (i.e. nojournal) - """ - command_line = get_command_line(client) - if "parsed" in command_line: - parsed = command_line["parsed"] - if config_opt in parsed: - return parsed[config_opt] - argv = command_line["argv"] - return cmdline_opt in argv - - -def server_started_with_auth(client): - try: - command_line = get_command_line(client) - except OperationFailure as e: - assert e.details is not None - msg = e.details.get("errmsg", "") - if e.code == 13 or "unauthorized" in msg or "login" in msg: - # Unauthorized. - return True - raise - - # MongoDB >= 2.0 - if "parsed" in command_line: - parsed = command_line["parsed"] - # MongoDB >= 2.6 - if "security" in parsed: - security = parsed["security"] - # >= rc3 - if "authorization" in security: - return security["authorization"] == "enabled" - # < rc3 - return security.get("auth", False) or bool(security.get("keyFile")) - return parsed.get("auth", False) or bool(parsed.get("keyFile")) - # Legacy - argv = command_line["argv"] - return "--auth" in argv or "--keyFile" in argv - - -def joinall(threads): - """Join threads with a 5-minute timeout, assert joins succeeded""" - for t in threads: - t.join(300) - assert not t.is_alive(), "Thread %s hung" % t - - -async def async_joinall(tasks): - """Join threads with a 5-minute timeout, assert joins succeeded""" - await asyncio.wait([t.task for t in tasks if t is not None], timeout=300) - - -def wait_until(predicate, success_description, timeout=10): - """Wait up to 10 seconds (by default) for predicate to be true. - - E.g.: - - wait_until(lambda: client.primary == ('a', 1), - 'connect to the primary') - - If the lambda-expression isn't true after 10 seconds, we raise - AssertionError("Didn't ever connect to the primary"). - - Returns the predicate's first true value. - """ - start = time.time() - interval = min(float(timeout) / 100, 0.1) - while True: - retval = predicate() - if retval: - return retval - - if time.time() - start > timeout: - raise AssertionError("Didn't ever %s" % success_description) - - time.sleep(interval) - - -async def async_wait_until(predicate, success_description, timeout=10): - """Wait up to 10 seconds (by default) for predicate to be true. - - E.g.: - - wait_until(lambda: client.primary == ('a', 1), - 'connect to the primary') - - If the lambda-expression isn't true after 10 seconds, we raise - AssertionError("Didn't ever connect to the primary"). - - Returns the predicate's first true value. - """ - start = time.time() - interval = min(float(timeout) / 100, 0.1) - while True: - if iscoroutinefunction(predicate): - retval = await predicate() - else: - retval = predicate() - if retval: - return retval - - if time.time() - start > timeout: - raise AssertionError("Didn't ever %s" % success_description) - - await asyncio.sleep(interval) - - -def is_mongos(client): - res = client.admin.command(HelloCompat.LEGACY_CMD) - return res.get("msg", "") == "isdbgrid" - - -async def async_is_mongos(client): - res = await client.admin.command(HelloCompat.LEGACY_CMD) - return res.get("msg", "") == "isdbgrid" - - def assertRaisesExactly(cls, fn, *args, **kwargs): """ Unlike the standard assertRaises, this checks that a function raises a @@ -750,347 +136,74 @@ def assertRaisesExactly(cls, fn, *args, **kwargs): raise AssertionError("%s not raised" % cls) -async def asyncAssertRaisesExactly(cls, fn, *args, **kwargs): - """ - Unlike the standard assertRaises, this checks that a function raises a - specific class of exception, and not a subclass. E.g., check that - MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. - """ - try: - await fn(*args, **kwargs) - except Exception as e: - assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" - else: - raise AssertionError("%s not raised" % cls) - - -@contextlib.contextmanager -def _ignore_deprecations(): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - yield - - -def ignore_deprecations(wrapped=None): - """A context manager or a decorator.""" - if wrapped: - if iscoroutinefunction(wrapped): - - @functools.wraps(wrapped) - async def wrapper(*args, **kwargs): - with _ignore_deprecations(): - return await wrapped(*args, **kwargs) - else: - - @functools.wraps(wrapped) - def wrapper(*args, **kwargs): - with _ignore_deprecations(): - return wrapped(*args, **kwargs) +def set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + client.admin.command(cmd) - return wrapper +def joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + if _IS_SYNC: + for t in tasks: + t.join(300) + assert not t.is_alive(), "Thread %s hung" % t else: - return _ignore_deprecations() - - -class DeprecationFilter: - def __init__(self, action="ignore"): - """Start filtering deprecations.""" - self.warn_context = warnings.catch_warnings() - self.warn_context.__enter__() - warnings.simplefilter(action, DeprecationWarning) - - def stop(self): - """Stop filtering deprecations.""" - self.warn_context.__exit__() # type: ignore - self.warn_context = None # type: ignore + asyncio.wait([t.task for t in tasks if t is not None], timeout=300) -def get_pool(client): - """Get the standalone, primary, or mongos pool.""" - topology = client._get_topology() - server = topology._select_server(writable_server_selector, _Op.TEST) - return server.pool - - -async def async_get_pool(client): - """Get the standalone, primary, or mongos pool.""" - topology = await client._get_topology() - server = await topology._select_server(writable_server_selector, _Op.TEST) - return server.pool - - -def get_pools(client): - """Get all pools.""" - return [ - server.pool - for server in client._get_topology().select_servers(any_server_selector, _Op.TEST) - ] - - -async def async_get_pools(client): - """Get all pools.""" - return [ - server.pool - for server in await (await client._get_topology()).select_servers( - any_server_selector, _Op.TEST - ) - ] - - -# Constants for run_threads and lazy_client_trial. -NTRIALS = 5 -NTHREADS = 10 - - -def run_threads(collection, target): - """Run a target function in many threads. - - target is a function taking a Collection and an integer. - """ - threads = [] - for i in range(NTHREADS): - bound_target = partial(target, collection, i) - threads.append(threading.Thread(target=bound_target)) - - for t in threads: - t.start() - - for t in threads: - t.join(60) - assert not t.is_alive() - - -@contextlib.contextmanager -def frequent_thread_switches(): - """Make concurrency bugs more likely to manifest.""" - interval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) - - try: - yield - finally: - sys.setswitchinterval(interval) - - -def lazy_client_trial(reset, target, test, get_client): - """Test concurrent operations on a lazily-connecting client. - - `reset` takes a collection and resets it for the next trial. - - `target` takes a lazily-connecting collection and an index from - 0 to NTHREADS, and performs some operation, e.g. an insert. - - `test` takes the lazily-connecting collection and asserts a - post-condition to prove `target` succeeded. - """ - collection = client_context.client.pymongo_test.test - - with frequent_thread_switches(): - for _i in range(NTRIALS): - reset(collection) - lazy_client = get_client() - lazy_collection = lazy_client.pymongo_test.test - run_threads(lazy_collection, target) - test(lazy_collection) - - -def gevent_monkey_patched(): - """Check if gevent's monkey patching is active.""" - try: - import socket - - import gevent.socket # type:ignore[import] - - return socket.socket is gevent.socket.socket - except ImportError: - return False - - -def eventlet_monkey_patched(): - """Check if eventlet's monkey patching is active.""" - import threading - - return threading.current_thread.__module__ == "eventlet.green.threading" - - -def is_greenthread_patched(): - return gevent_monkey_patched() or eventlet_monkey_patched() - - -def parse_read_preference(pref): - # Make first letter lowercase to match read_pref's modes. - mode_string = pref.get("mode", "primary") - mode_string = mode_string[:1].lower() + mode_string[1:] - mode = read_preferences.read_pref_mode_from_name(mode_string) - max_staleness = pref.get("maxStalenessSeconds", -1) - tag_sets = pref.get("tagSets") or pref.get("tag_sets") - return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness - ) - - -def server_name_to_type(name): - """Convert a ServerType name to the corresponding value. For SDAM tests.""" - # Special case, some tests in the spec include the PossiblePrimary - # type, but only single-threaded drivers need that type. We call - # possible primaries Unknown. - if name == "PossiblePrimary": - return SERVER_TYPE.Unknown - return getattr(SERVER_TYPE, name) - - -def cat_files(dest, *sources): - """Cat multiple files into dest.""" - with open(dest, "wb") as fdst: - for src in sources: - with open(src, "rb") as fsrc: - shutil.copyfileobj(fsrc, fdst) - - -@contextlib.contextmanager -def assertion_context(msg): - """A context manager that adds info to an assertion failure.""" - try: - yield - except AssertionError as exc: - raise AssertionError(f"{msg}: {exc}") - - -def parse_spec_options(opts): - if "readPreference" in opts: - opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - - if "writeConcern" in opts: - w_opts = opts.pop("writeConcern") - if "journal" in w_opts: - w_opts["j"] = w_opts.pop("journal") - if "wtimeoutMS" in w_opts: - w_opts["wtimeout"] = w_opts.pop("wtimeoutMS") - opts["write_concern"] = WriteConcern(**dict(w_opts)) - - if "readConcern" in opts: - opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - - if "timeoutMS" in opts: - assert isinstance(opts["timeoutMS"], int) - opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 - - if "maxTimeMS" in opts: - opts["max_time_ms"] = opts.pop("maxTimeMS") - - if "maxCommitTimeMS" in opts: - opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - - return dict(opts) - - -def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - # Named "key" instead not fieldName. - if arg_name == "fieldName": - arguments["key"] = arguments.pop(arg_name) - # Aggregate uses "batchSize", while find uses batch_size. - elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": - continue - elif arg_name == "timeoutMode": - raise unittest.SkipTest("PyMongo does not support timeoutMode") - # Requires boolean returnDocument. - elif arg_name == "returnDocument": - arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) - elif "bulk_write" in opname and (c2s == "requests" or c2s == "models"): - # Parse each request into a bulk write model. - requests = [] - for request in arguments[c2s]: - if "name" in request: - # CRUD v2 format - bulk_model = camel_to_upper_camel(request["name"]) - bulk_class = getattr(operations, bulk_model) - bulk_arguments = camel_to_snake_args(request["arguments"]) - else: - # Unified test format - bulk_model, spec = next(iter(request.items())) - bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) - bulk_arguments = camel_to_snake_args(spec) - requests.append(bulk_class(**dict(bulk_arguments))) - arguments[c2s] = requests - elif arg_name == "session": - arguments["session"] = entity_map[arguments["session"]] - elif opname == "open_download_stream" and arg_name == "id": - arguments["file_id"] = arguments.pop(arg_name) - elif opname not in ("find", "find_one") and c2s == "max_time_ms": - # find is the only method that accepts snake_case max_time_ms. - # All other methods take kwargs which must use the server's - # camelCase maxTimeMS. See PYTHON-1855. - arguments["maxTimeMS"] = arguments.pop("max_time_ms") - elif opname == "with_transaction" and arg_name == "callback": - if "operations" in arguments[arg_name]: - # CRUD v2 format - callback_ops = arguments[arg_name]["operations"] - else: - # Unified test format - callback_ops = arguments[arg_name] - arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) - elif opname == "drop_collection" and arg_name == "collection": - arguments["name_or_collection"] = arguments.pop(arg_name) - elif opname == "create_collection": - if arg_name == "collection": - arguments["name"] = arguments.pop(arg_name) - arguments["check_exists"] = False - # Any other arguments to create_collection are passed through - # **kwargs. - elif opname == "create_index" and arg_name == "keys": - arguments["keys"] = list(arguments.pop(arg_name).items()) - elif opname == "drop_index" and arg_name == "name": - arguments["index_or_name"] = arguments.pop(arg_name) - elif opname == "rename" and arg_name == "to": - arguments["new_name"] = arguments.pop(arg_name) - elif opname == "rename" and arg_name == "dropTarget": - arguments["dropTarget"] = arguments.pop(arg_name) - elif arg_name == "cursorType": - cursor_type = arguments.pop(arg_name) - if cursor_type == "tailable": - arguments["cursor_type"] = CursorType.TAILABLE - elif cursor_type == "tailableAwait": - arguments["cursor_type"] = CursorType.TAILABLE - else: - raise AssertionError(f"Unsupported cursorType: {cursor_type}") - else: - arguments[c2s] = arguments.pop(arg_name) - +class MockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + self.id = random.randint(0, 100) -def set_fail_point(client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - client.admin.command(cmd) + def close_conn(self, reason): + pass + def __enter__(self): + return self -async def async_set_fail_point(client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - await client.admin.command(cmd) + def __exit__(self, exc_type, exc_val, exc_tb): + pass -def create_async_event(): - return asyncio.Event() +class MockPool: + def __init__(self, address, options, handshake=True, client_id=None): + self.gen = _PoolGeneration() + self._lock = _create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) -def create_event(): - return threading.Event() + @contextlib.contextmanager + def checkout(self, handler=None): + yield MockConnection() + def checkin(self, *args, **kwargs): + pass -def async_create_barrier(N_TASKS, timeout: float | None = None): - return asyncio.Barrier(N_TASKS) + def _reset(self, service_id=None): + with self._lock: + self.gen.inc(service_id) + def ready(self): + pass -def create_barrier(N_TASKS, timeout: float | None = None): - return threading.Barrier(N_TASKS, timeout=timeout) + def reset(self, service_id=None, interrupt_connections=False): + self._reset() + def reset_without_pause(self): + self._reset() -async def async_barrier_wait(barrier, timeout: float | None = None): - await asyncio.wait_for(barrier.wait(), timeout=timeout) + def close(self): + self._reset() + def update_is_writable(self, is_writable): + pass -def barrier_wait(barrier, timeout: float | None = None): - barrier.wait() + def remove_stale_sockets(self, *args, **kwargs): + pass diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index 9667ea701b..2772f06070 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -19,17 +19,18 @@ import os import sys from test import PyMongoTestCase +from test.utils import MockPool sys.path[0:0] = [""] from test import unittest from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool, parse_read_preference from test.utils_selection_tests_shared import ( get_addresses, get_topology_type_name, make_server_description, ) +from test.utils_shared import parse_read_preference from bson import json_util from pymongo.common import HEARTBEAT_FREQUENCY diff --git a/test/utils_shared.py b/test/utils_shared.py new file mode 100644 index 0000000000..2c52445968 --- /dev/null +++ b/test/utils_shared.py @@ -0,0 +1,705 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utilities for testing pymongo""" +from __future__ import annotations + +import asyncio +import contextlib +import copy +import functools +import random +import re +import shutil +import sys +import threading +import unittest +import warnings +from asyncio import iscoroutinefunction +from collections import abc, defaultdict +from functools import partial +from test import client_context +from test.asynchronous.utils import async_wait_until +from test.utils import wait_until +from typing import List + +from bson.objectid import ObjectId +from pymongo import monitoring, operations, read_preferences +from pymongo.cursor_shared import CursorType +from pymongo.errors import OperationFailure +from pymongo.helpers_shared import _SENSITIVE_COMMANDS +from pymongo.lock import _async_create_lock, _create_lock +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_concern import ReadConcern +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration +from pymongo.write_concern import WriteConcern + +IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) + + +class BaseListener: + def __init__(self): + self.events = [] + + def reset(self): + self.events = [] + + def add_event(self, event): + self.events.append(event) + + def event_count(self, event_type): + return len(self.events_by_type(event_type)) + + def events_by_type(self, event_type): + """Return the matching events by event class. + + event_type can be a single class or a tuple of classes. + """ + return self.matching(lambda e: isinstance(e, event_type)) + + def matching(self, matcher): + """Return the matching events.""" + return [event for event in self.events[:] if matcher(event)] + + def wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") + + async def async_wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + await async_wait_until( + lambda: self.event_count(event) >= count, f"find {count} {event} event(s)" + ) + + +class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): + def connection_created(self, event): + assert isinstance(event, ConnectionCreatedEvent) + self.add_event(event) + + def connection_ready(self, event): + assert isinstance(event, ConnectionReadyEvent) + self.add_event(event) + + def connection_closed(self, event): + assert isinstance(event, ConnectionClosedEvent) + self.add_event(event) + + def connection_check_out_started(self, event): + assert isinstance(event, ConnectionCheckOutStartedEvent) + self.add_event(event) + + def connection_check_out_failed(self, event): + assert isinstance(event, ConnectionCheckOutFailedEvent) + self.add_event(event) + + def connection_checked_out(self, event): + assert isinstance(event, ConnectionCheckedOutEvent) + self.add_event(event) + + def connection_checked_in(self, event): + assert isinstance(event, ConnectionCheckedInEvent) + self.add_event(event) + + def pool_created(self, event): + assert isinstance(event, PoolCreatedEvent) + self.add_event(event) + + def pool_ready(self, event): + assert isinstance(event, PoolReadyEvent) + self.add_event(event) + + def pool_cleared(self, event): + assert isinstance(event, PoolClearedEvent) + self.add_event(event) + + def pool_closed(self, event): + assert isinstance(event, PoolClosedEvent) + self.add_event(event) + + +class EventListener(BaseListener, monitoring.CommandListener): + def __init__(self): + super().__init__() + self.results = defaultdict(list) + + @property + def started_events(self) -> List[monitoring.CommandStartedEvent]: + return self.results["started"] + + @property + def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: + return self.results["succeeded"] + + @property + def failed_events(self) -> List[monitoring.CommandFailedEvent]: + return self.results["failed"] + + def started(self, event: monitoring.CommandStartedEvent) -> None: + self.started_events.append(event) + self.add_event(event) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + self.succeeded_events.append(event) + self.add_event(event) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + self.failed_events.append(event) + self.add_event(event) + + def started_command_names(self) -> List[str]: + """Return list of command names started.""" + return [event.command_name for event in self.started_events] + + def reset(self) -> None: + """Reset the state of this listener.""" + self.results.clear() + super().reset() + + +class TopologyEventListener(monitoring.TopologyListener): + def __init__(self): + self.results = defaultdict(list) + + def closed(self, event): + self.results["closed"].append(event) + + def description_changed(self, event): + self.results["description_changed"].append(event) + + def opened(self, event): + self.results["opened"].append(event) + + def reset(self): + """Reset the state of this listener.""" + self.results.clear() + + +class AllowListEventListener(EventListener): + def __init__(self, *commands): + self.commands = set(commands) + super().__init__() + + def started(self, event): + if event.command_name in self.commands: + super().started(event) + + def succeeded(self, event): + if event.command_name in self.commands: + super().succeeded(event) + + def failed(self, event): + if event.command_name in self.commands: + super().failed(event) + + +class OvertCommandListener(EventListener): + """A CommandListener that ignores sensitive commands.""" + + ignore_list_collections = False + + def started(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().started(event) + + def succeeded(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().succeeded(event) + + def failed(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().failed(event) + + +class _ServerEventListener: + """Listens to all events.""" + + def __init__(self): + self.results = [] + + def opened(self, event): + self.results.append(event) + + def description_changed(self, event): + self.results.append(event) + + def closed(self, event): + self.results.append(event) + + def matching(self, matcher): + """Return the matching events.""" + results = self.results[:] + return [event for event in results if matcher(event)] + + def reset(self): + self.results = [] + + +class ServerEventListener(_ServerEventListener, monitoring.ServerListener): + """Listens to Server events.""" + + +class ServerAndTopologyEventListener( # type: ignore[misc] + ServerEventListener, monitoring.TopologyListener +): + """Listens to Server and Topology events.""" + + +class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): + """Listens to only server heartbeat events.""" + + def started(self, event): + self.add_event(event) + + def succeeded(self, event): + self.add_event(event) + + def failed(self, event): + self.add_event(event) + + +class HeartbeatEventsListListener(HeartbeatEventListener): + """Listens to only server heartbeat events and publishes them to a provided list.""" + + def __init__(self, events): + super().__init__() + self.event_list = events + + def started(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatStartedEvent") + + def succeeded(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatSucceededEvent") + + def failed(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatFailedEvent") + + +class ScenarioDict(dict): + """Dict that returns {} for any unknown key, recursively.""" + + def __init__(self, data): + def convert(v): + if isinstance(v, abc.Mapping): + return ScenarioDict(v) + if isinstance(v, (str, bytes)): + return v + if isinstance(v, abc.Sequence): + return [convert(item) for item in v] + return v + + dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) + + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + # Unlike a defaultdict, don't set the key, just return a dict. + return ScenarioDict({}) + + +class CompareType: + """Class that compares equal to any object of the given type(s).""" + + def __init__(self, types): + self.types = types + + def __eq__(self, other): + return isinstance(other, self.types) + + +class FunctionCallRecorder: + """Utility class to wrap a callable and record its invocations.""" + + def __init__(self, function): + self._function = function + self._call_list = [] + + def __call__(self, *args, **kwargs): + self._call_list.append((args, kwargs)) + if iscoroutinefunction(self._function): + return self._function(*args, **kwargs) + else: + return self._function(*args, **kwargs) + + def reset(self): + """Wipes the call list.""" + self._call_list = [] + + def call_list(self): + """Returns a copy of the call list.""" + return self._call_list[:] + + @property + def call_count(self): + """Returns the number of times the function has been called.""" + return len(self._call_list) + + +def one(s): + """Get one element of a set""" + return next(iter(s)) + + +def oid_generated_on_process(oid): + """Makes a determination as to whether the given ObjectId was generated + by the current process, based on the 5-byte random number in the ObjectId. + """ + return ObjectId._random() == oid.binary[4:9] + + +def delay(sec): + return """function() { sleep(%f * 1000); return true; }""" % sec + + +def camel_to_snake(camel): + # Regex to convert CamelCase to snake_case. + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() + + +def camel_to_upper_camel(camel): + return camel[0].upper() + camel[1:] + + +def camel_to_snake_args(arguments): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + arguments[c2s] = arguments.pop(arg_name) + return arguments + + +def snake_to_camel(snake): + # Regex to convert snake_case to lowerCamelCase. + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) + + +def parse_collection_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + return opts + + +@contextlib.contextmanager +def _ignore_deprecations(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + yield + + +def ignore_deprecations(wrapped=None): + """A context manager or a decorator.""" + if wrapped: + if iscoroutinefunction(wrapped): + + @functools.wraps(wrapped) + async def wrapper(*args, **kwargs): + with _ignore_deprecations(): + return await wrapped(*args, **kwargs) + else: + + @functools.wraps(wrapped) + def wrapper(*args, **kwargs): + with _ignore_deprecations(): + return wrapped(*args, **kwargs) + + return wrapper + + else: + return _ignore_deprecations() + + +class DeprecationFilter: + def __init__(self, action="ignore"): + """Start filtering deprecations.""" + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + warnings.simplefilter(action, DeprecationWarning) + + def stop(self): + """Stop filtering deprecations.""" + self.warn_context.__exit__() # type: ignore + self.warn_context = None # type: ignore + + +# Constants for run_threads and lazy_client_trial. +NTRIALS = 5 +NTHREADS = 10 + + +def run_threads(collection, target): + """Run a target function in many threads. + + target is a function taking a Collection and an integer. + """ + threads = [] + for i in range(NTHREADS): + bound_target = partial(target, collection, i) + threads.append(threading.Thread(target=bound_target)) + + for t in threads: + t.start() + + for t in threads: + t.join(60) + assert not t.is_alive() + + +@contextlib.contextmanager +def frequent_thread_switches(): + """Make concurrency bugs more likely to manifest.""" + interval = sys.getswitchinterval() + sys.setswitchinterval(1e-6) + + try: + yield + finally: + sys.setswitchinterval(interval) + + +def lazy_client_trial(reset, target, test, get_client): + """Test concurrent operations on a lazily-connecting client. + + `reset` takes a collection and resets it for the next trial. + + `target` takes a lazily-connecting collection and an index from + 0 to NTHREADS, and performs some operation, e.g. an insert. + + `test` takes the lazily-connecting collection and asserts a + post-condition to prove `target` succeeded. + """ + collection = client_context.client.pymongo_test.test + + with frequent_thread_switches(): + for _i in range(NTRIALS): + reset(collection) + lazy_client = get_client() + lazy_collection = lazy_client.pymongo_test.test + run_threads(lazy_collection, target) + test(lazy_collection) + + +def gevent_monkey_patched(): + """Check if gevent's monkey patching is active.""" + try: + import socket + + import gevent.socket # type:ignore[import] + + return socket.socket is gevent.socket.socket + except ImportError: + return False + + +def eventlet_monkey_patched(): + """Check if eventlet's monkey patching is active.""" + import threading + + return threading.current_thread.__module__ == "eventlet.green.threading" + + +def is_greenthread_patched(): + return gevent_monkey_patched() or eventlet_monkey_patched() + + +def parse_read_preference(pref): + # Make first letter lowercase to match read_pref's modes. + mode_string = pref.get("mode", "primary") + mode_string = mode_string[:1].lower() + mode_string[1:] + mode = read_preferences.read_pref_mode_from_name(mode_string) + max_staleness = pref.get("maxStalenessSeconds", -1) + tag_sets = pref.get("tagSets") or pref.get("tag_sets") + return read_preferences.make_read_preference( + mode, tag_sets=tag_sets, max_staleness=max_staleness + ) + + +def server_name_to_type(name): + """Convert a ServerType name to the corresponding value. For SDAM tests.""" + # Special case, some tests in the spec include the PossiblePrimary + # type, but only single-threaded drivers need that type. We call + # possible primaries Unknown. + if name == "PossiblePrimary": + return SERVER_TYPE.Unknown + return getattr(SERVER_TYPE, name) + + +def cat_files(dest, *sources): + """Cat multiple files into dest.""" + with open(dest, "wb") as fdst: + for src in sources: + with open(src, "rb") as fsrc: + shutil.copyfileobj(fsrc, fdst) + + +@contextlib.contextmanager +def assertion_context(msg): + """A context manager that adds info to an assertion failure.""" + try: + yield + except AssertionError as exc: + raise AssertionError(f"{msg}: {exc}") + + +def parse_spec_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + w_opts = opts.pop("writeConcern") + if "journal" in w_opts: + w_opts["j"] = w_opts.pop("journal") + if "wtimeoutMS" in w_opts: + w_opts["wtimeout"] = w_opts.pop("wtimeoutMS") + opts["write_concern"] = WriteConcern(**dict(w_opts)) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + assert isinstance(opts["timeoutMS"], int) + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + + if "maxTimeMS" in opts: + opts["max_time_ms"] = opts.pop("maxTimeMS") + + if "maxCommitTimeMS" in opts: + opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") + + return dict(opts) + + +def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + # Named "key" instead not fieldName. + if arg_name == "fieldName": + arguments["key"] = arguments.pop(arg_name) + # Aggregate uses "batchSize", while find uses batch_size. + elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": + continue + elif arg_name == "timeoutMode": + raise unittest.SkipTest("PyMongo does not support timeoutMode") + # Requires boolean returnDocument. + elif arg_name == "returnDocument": + arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) + elif "bulk_write" in opname and (c2s == "requests" or c2s == "models"): + # Parse each request into a bulk write model. + requests = [] + for request in arguments[c2s]: + if "name" in request: + # CRUD v2 format + bulk_model = camel_to_upper_camel(request["name"]) + bulk_class = getattr(operations, bulk_model) + bulk_arguments = camel_to_snake_args(request["arguments"]) + else: + # Unified test format + bulk_model, spec = next(iter(request.items())) + bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) + bulk_arguments = camel_to_snake_args(spec) + requests.append(bulk_class(**dict(bulk_arguments))) + arguments[c2s] = requests + elif arg_name == "session": + arguments["session"] = entity_map[arguments["session"]] + elif opname == "open_download_stream" and arg_name == "id": + arguments["file_id"] = arguments.pop(arg_name) + elif opname not in ("find", "find_one") and c2s == "max_time_ms": + # find is the only method that accepts snake_case max_time_ms. + # All other methods take kwargs which must use the server's + # camelCase maxTimeMS. See PYTHON-1855. + arguments["maxTimeMS"] = arguments.pop("max_time_ms") + elif opname == "with_transaction" and arg_name == "callback": + if "operations" in arguments[arg_name]: + # CRUD v2 format + callback_ops = arguments[arg_name]["operations"] + else: + # Unified test format + callback_ops = arguments[arg_name] + arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) + elif opname == "drop_collection" and arg_name == "collection": + arguments["name_or_collection"] = arguments.pop(arg_name) + elif opname == "create_collection": + if arg_name == "collection": + arguments["name"] = arguments.pop(arg_name) + arguments["check_exists"] = False + # Any other arguments to create_collection are passed through + # **kwargs. + elif opname == "create_index" and arg_name == "keys": + arguments["keys"] = list(arguments.pop(arg_name).items()) + elif opname == "drop_index" and arg_name == "name": + arguments["index_or_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "to": + arguments["new_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "dropTarget": + arguments["dropTarget"] = arguments.pop(arg_name) + elif arg_name == "cursorType": + cursor_type = arguments.pop(arg_name) + if cursor_type == "tailable": + arguments["cursor_type"] = CursorType.TAILABLE + elif cursor_type == "tailableAwait": + arguments["cursor_type"] = CursorType.TAILABLE + else: + raise AssertionError(f"Unsupported cursorType: {cursor_type}") + else: + arguments[c2s] = arguments.pop(arg_name) + + +def create_async_event(): + return asyncio.Event() + + +def create_event(): + return threading.Event() + + +def async_create_barrier(n_tasks: int): + return asyncio.Barrier(n_tasks) + + +def create_barrier(n_tasks: int, timeout: float | None = None): + return threading.Barrier(n_tasks, timeout=timeout) + + +async def async_barrier_wait(barrier, timeout: float | None = None): + await asyncio.wait_for(barrier.wait(), timeout=timeout) + + +def barrier_wait(barrier, timeout: float | None = None): + barrier.wait(timeout=timeout) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index fe0ba6eb44..580e7cc120 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -24,7 +24,7 @@ from collections import abc from test import IntegrationTest, client_context, client_knobs from test.helpers import ConcurrentRunner -from test.utils import ( +from test.utils_shared import ( CMAPListener, CompareType, EventListener, diff --git a/tools/synchro.py b/tools/synchro.py index 42d5694f47..e65270733e 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -259,6 +259,7 @@ def async_only_test(f: str) -> bool: "test_versioned_api_integration.py", "unified_format.py", "utils_selection_tests.py", + "utils.py", ] From e48365c5f14b3f6bd4ba7c76648f2d9322653960 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 12 Mar 2025 11:21:19 -0700 Subject: [PATCH 1794/2111] PYTHON-5202 WaitQueueTimeoutError should not clear the pool (#2192) --- pymongo/asynchronous/topology.py | 3 +++ pymongo/synchronous/topology.py | 3 +++ test/asynchronous/test_client.py | 13 +++++++++++-- test/test_client.py | 13 +++++++++++-- 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 76f0fb6cde..f00f62ffe5 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -41,6 +41,7 @@ OperationFailure, PyMongoError, ServerSelectionTimeoutError, + WaitQueueTimeoutError, WriteError, ) from pymongo.hello import Hello @@ -892,6 +893,8 @@ async def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None # Clear the pool. await server.reset(service_id) elif isinstance(error, ConnectionFailure): + if isinstance(error, WaitQueueTimeoutError): + return # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." if not self._settings.load_balanced: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index ea0edae919..0af793a969 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -37,6 +37,7 @@ OperationFailure, PyMongoError, ServerSelectionTimeoutError, + WaitQueueTimeoutError, WriteError, ) from pymongo.hello import Hello @@ -890,6 +891,8 @@ def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: # Clear the pool. server.reset(service_id) elif isinstance(error, ConnectionFailure): + if isinstance(error, WaitQueueTimeoutError): + return # "Client MUST replace the server's description with type Unknown # ... MUST NOT request an immediate check of the server." if not self._settings.load_balanced: diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index f9678b11e2..f529dcce14 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -113,6 +113,7 @@ NetworkTimeout, OperationFailure, ServerSelectionTimeoutError, + WaitQueueTimeoutError, WriteConcernError, ) from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent @@ -1313,8 +1314,16 @@ async def test_server_selection_timeout(self): self.assertAlmostEqual(30, client.options.server_selection_timeout) async def test_waitQueueTimeoutMS(self): - client = await self.async_rs_or_single_client(waitQueueTimeoutMS=2000) - self.assertEqual((await async_get_pool(client)).opts.wait_queue_timeout, 2) + listener = CMAPListener() + client = await self.async_rs_or_single_client( + waitQueueTimeoutMS=10, maxPoolSize=1, event_listeners=[listener] + ) + pool = await async_get_pool(client) + self.assertEqual(pool.opts.wait_queue_timeout, 0.01) + async with pool.checkout(): + with self.assertRaises(WaitQueueTimeoutError): + await client.test.command("ping") + self.assertFalse(listener.events_by_type(monitoring.PoolClearedEvent)) async def test_socketKeepAlive(self): pool = await async_get_pool(self.client) diff --git a/test/test_client.py b/test/test_client.py index a340263937..e445fa632a 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -102,6 +102,7 @@ NetworkTimeout, OperationFailure, ServerSelectionTimeoutError, + WaitQueueTimeoutError, WriteConcernError, ) from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent @@ -1272,8 +1273,16 @@ def test_server_selection_timeout(self): self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): - client = self.rs_or_single_client(waitQueueTimeoutMS=2000) - self.assertEqual((get_pool(client)).opts.wait_queue_timeout, 2) + listener = CMAPListener() + client = self.rs_or_single_client( + waitQueueTimeoutMS=10, maxPoolSize=1, event_listeners=[listener] + ) + pool = get_pool(client) + self.assertEqual(pool.opts.wait_queue_timeout, 0.01) + with pool.checkout(): + with self.assertRaises(WaitQueueTimeoutError): + client.test.command("ping") + self.assertFalse(listener.events_by_type(monitoring.PoolClearedEvent)) def test_socketKeepAlive(self): pool = get_pool(self.client) From 6e5126d6bbb1a64f40b0b517c7a43901b474d8a9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 12 Mar 2025 15:47:56 -0500 Subject: [PATCH 1795/2111] PYTHON-5196 Convert OIDC tests to use new test scripts (#2194) --- .evergreen/config.yml | 153 --------------------- .evergreen/generated_configs/tasks.yml | 44 ++++++ .evergreen/generated_configs/variants.yml | 9 +- .evergreen/run-mongodb-oidc-remote-test.sh | 60 -------- .evergreen/run-mongodb-oidc-test.sh | 31 +---- .evergreen/run-tests.sh | 1 + .evergreen/scripts/generate_config.py | 19 ++- .evergreen/scripts/kms_tester.py | 21 +-- .evergreen/scripts/oidc_tester.py | 99 +++++++++++++ .evergreen/scripts/run_server.py | 5 + .evergreen/scripts/run_tests.py | 19 ++- .evergreen/scripts/setup_tests.py | 12 +- .evergreen/scripts/teardown_tests.py | 6 + .evergreen/scripts/utils.py | 9 +- CONTRIBUTING.md | 9 +- test/auth_oidc/test_auth_oidc.py | 5 + 16 files changed, 237 insertions(+), 265 deletions(-) delete mode 100755 .evergreen/run-mongodb-oidc-remote-test.sh create mode 100644 .evergreen/scripts/oidc_tester.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 54931dcb48..25ea2c4b90 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -289,28 +289,6 @@ functions: - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/run-atlas-tests.sh - "run oidc auth test with test credentials": - - command: subprocess.exec - type: test - params: - working_dir: "src" - binary: bash - include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - args: - - .evergreen/run-mongodb-oidc-test.sh - - "run oidc k8s auth test": - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: src - env: - OIDC_ENV: k8s - include_expansions_in_env: ["DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "K8S_VARIANT"] - args: - - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh - "cleanup": - command: subprocess.exec params: @@ -417,96 +395,6 @@ task_groups: tasks: - ".serverless" - - name: testazureoidc_task_group - setup_group: - - func: fetch source - - func: setup system - - command: subprocess.exec - params: - binary: bash - env: - AZUREOIDC_VMNAME_PREFIX: "PYTHON_DRIVER" - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/create-and-setup-vm.sh - teardown_task: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/delete-vm.sh - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - oidc-auth-test-azure - - - name: testgcpoidc_task_group - setup_group: - - func: fetch source - - func: setup system - - command: subprocess.exec - params: - binary: bash - env: - GCPOIDC_VMNAME_PREFIX: "PYTHON_DRIVER" - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/setup.sh - teardown_task: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/teardown.sh - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - oidc-auth-test-gcp - - - name: testk8soidc_task_group - setup_group: - - func: fetch source - - func: setup system - - command: ec2.assume_role - params: - role_arn: ${aws_test_secrets_role} - duration_seconds: 1800 - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/setup.sh - teardown_task: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/teardown.sh - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - oidc-auth-test-k8s - - - name: testoidc_task_group - setup_group: - - func: fetch source - - func: setup system - - func: "assume ec2 role" - - command: subprocess.exec - params: - binary: bash - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/setup.sh - teardown_task: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/teardown.sh - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - oidc-auth-test - - name: test_aws_lambda_task_group setup_group: - func: fetch source @@ -659,47 +547,6 @@ tasks: env: TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/test/lambda - - name: "oidc-auth-test" - commands: - - func: "run oidc auth test with test credentials" - - - name: "oidc-auth-test-azure" - commands: - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: src - env: - OIDC_ENV: azure - include_expansions_in_env: ["DRIVERS_TOOLS"] - args: - - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh - - - name: "oidc-auth-test-gcp" - commands: - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: src - env: - OIDC_ENV: gcp - include_expansions_in_env: ["DRIVERS_TOOLS"] - args: - - ${PROJECT_DIRECTORY}/.evergreen/run-mongodb-oidc-remote-test.sh - - - name: "oidc-auth-test-k8s" - commands: - - func: "run oidc k8s auth test" - vars: - K8S_VARIANT: eks - - func: "run oidc k8s auth test" - vars: - K8S_VARIANT: gke - - func: "run oidc k8s auth test" - vars: - K8S_VARIANT: aks # }}} - name: "coverage-report" tags: ["coverage"] diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 02ee29e6ed..9d52cf957d 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1042,6 +1042,50 @@ tasks: TEST_NAME: ocsp tags: [ocsp, ocsp-rsa] + # Oidc tests + - name: test-auth-oidc-default + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: default + tags: [auth_oidc] + - name: test-auth-oidc-azure + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: azure + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-gcp + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: gcp + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-eks + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: eks + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-aks + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: aks + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-gke + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: gke + tags: [auth_oidc, auth_oidc_remote] + # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 80f08bc7a4..cf3e0cc903 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -920,24 +920,21 @@ buildvariants: # Oidc auth tests - name: auth-oidc-ubuntu-22 tasks: - - name: testoidc_task_group - - name: testazureoidc_task_group - - name: testgcpoidc_task_group - - name: testk8soidc_task_group + - name: .auth_oidc display_name: Auth OIDC Ubuntu-22 run_on: - ubuntu2204-small batchtime: 10080 - name: auth-oidc-macos tasks: - - name: testoidc_task_group + - name: .auth_oidc !.auth_oidc_remote display_name: Auth OIDC macOS run_on: - macos-14 batchtime: 10080 - name: auth-oidc-win64 tasks: - - name: testoidc_task_group + - name: .auth_oidc !.auth_oidc_remote display_name: Auth OIDC Win64 run_on: - windows-64-vsMulti-small diff --git a/.evergreen/run-mongodb-oidc-remote-test.sh b/.evergreen/run-mongodb-oidc-remote-test.sh deleted file mode 100755 index bb90bddf07..0000000000 --- a/.evergreen/run-mongodb-oidc-remote-test.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -set +x # Disable debug trace -set -eu - -echo "Running MONGODB-OIDC remote tests" - -OIDC_ENV=${OIDC_ENV:-"test"} - -# Make sure DRIVERS_TOOLS is set. -if [ -z "$DRIVERS_TOOLS" ]; then - echo "Must specify DRIVERS_TOOLS" - exit 1 -fi - -# Set up the remote files to test. -git add . -git commit -m "add files" || true -export TEST_TAR_FILE=/tmp/mongo-python-driver.tgz -git archive -o $TEST_TAR_FILE HEAD - -pushd $DRIVERS_TOOLS - -if [ $OIDC_ENV == "test" ]; then - echo "Test OIDC environment does not support remote test!" - exit 1 - -elif [ $OIDC_ENV == "azure" ]; then - export AZUREOIDC_DRIVERS_TAR_FILE=$TEST_TAR_FILE - export AZUREOIDC_TEST_CMD="OIDC_ENV=azure ./.evergreen/run-mongodb-oidc-test.sh" - bash ./.evergreen/auth_oidc/azure/run-driver-test.sh - -elif [ $OIDC_ENV == "gcp" ]; then - export GCPOIDC_DRIVERS_TAR_FILE=$TEST_TAR_FILE - export GCPOIDC_TEST_CMD="OIDC_ENV=gcp ./.evergreen/run-mongodb-oidc-test.sh" - bash ./.evergreen/auth_oidc/gcp/run-driver-test.sh - -elif [ $OIDC_ENV == "k8s" ]; then - # Make sure K8S_VARIANT is set. - if [ -z "$K8S_VARIANT" ]; then - echo "Must specify K8S_VARIANT" - popd - exit 1 - fi - - bash ./.evergreen/auth_oidc/k8s/setup-pod.sh - bash ./.evergreen/auth_oidc/k8s/run-self-test.sh - export K8S_DRIVERS_TAR_FILE=$TEST_TAR_FILE - export K8S_TEST_CMD="OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" - source ./.evergreen/auth_oidc/k8s/secrets-export.sh # for MONGODB_URI - bash ./.evergreen/auth_oidc/k8s/run-driver-test.sh - bash ./.evergreen/auth_oidc/k8s/teardown-pod.sh - -else - echo "Unrecognized OIDC_ENV $OIDC_ENV" - pod - exit 1 -fi - -popd diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 759ac5d2bb..bd67106a36 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -3,31 +3,14 @@ set +x # Disable debug trace set -eu -echo "Running MONGODB-OIDC authentication tests" - -OIDC_ENV=${OIDC_ENV:-"test"} - -if [ $OIDC_ENV == "test" ]; then - # Make sure DRIVERS_TOOLS is set. - if [ -z "$DRIVERS_TOOLS" ]; then - echo "Must specify DRIVERS_TOOLS" - exit 1 - fi - source ${DRIVERS_TOOLS}/.evergreen/auth_oidc/secrets-export.sh - -elif [ $OIDC_ENV == "azure" ]; then - source ./env.sh - -elif [ $OIDC_ENV == "gcp" ]; then - source ./secrets-export.sh - -elif [ $OIDC_ENV == "k8s" ]; then - echo "Running oidc on k8s" +echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}..." +if [ ${OIDC_ENV} == "k8s" ]; then + SUB_TEST_NAME=$K8S_VARIANT-remote else - echo "Unrecognized OIDC_ENV $OIDC_ENV" - exit 1 + SUB_TEST_NAME=$OIDC_ENV-remote fi - -COVERAGE=1 bash ./.evergreen/just.sh setup-tests auth_oidc +bash ./.evergreen/just.sh setup-tests auth_oidc $SUB_TEST_NAME bash ./.evergreen/just.sh run-tests "${@:1}" + +echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}... done." diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 61d505d45a..f9a853f27c 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -26,6 +26,7 @@ fi # Source the local secrets export file if available. if [ -f "./secrets-export.sh" ]; then + echo "Sourcing local secrets file" . "./secrets-export.sh" fi diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 505c6de060..14f30fed91 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -663,11 +663,11 @@ def create_serverless_variants(): def create_oidc_auth_variants(): variants = [] - other_tasks = ["testazureoidc_task_group", "testgcpoidc_task_group", "testk8soidc_task_group"] for host_name in ["ubuntu22", "macos", "win64"]: - tasks = ["testoidc_task_group"] if host_name == "ubuntu22": - tasks += other_tasks + tasks = [".auth_oidc"] + else: + tasks = [".auth_oidc !.auth_oidc_remote"] host = HOSTS[host_name] variants.append( create_variant( @@ -884,6 +884,19 @@ def create_aws_tasks(): return tasks +def create_oidc_tasks(): + tasks = [] + for sub_test in ["default", "azure", "gcp", "eks", "aks", "gke"]: + vars = dict(TEST_NAME="auth_oidc", SUB_TEST_NAME=sub_test) + test_func = FunctionCall(func="run tests", vars=vars) + task_name = f"test-auth-oidc-{sub_test}" + tags = ["auth_oidc"] + if sub_test != "default": + tags.append("auth_oidc_remote") + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + return tasks + + def _create_ocsp_task(algo, variant, server_type, base_task_name): file_name = f"{algo}-basic-tls-ocsp-{variant}.json" diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py index d38ec3a69e..40fd65919d 100644 --- a/.evergreen/scripts/kms_tester.py +++ b/.evergreen/scripts/kms_tester.py @@ -2,9 +2,16 @@ import os -from utils import DRIVERS_TOOLS, LOGGER, ROOT, read_env, run_command, write_env +from utils import ( + DRIVERS_TOOLS, + LOGGER, + TMP_DRIVER_FILE, + create_archive, + read_env, + run_command, + write_env, +) -TMP_DRIVER_FILE = "/tmp/mongo-python-driver.tgz" # noqa: S108 DIRS = dict( gcp=f"{DRIVERS_TOOLS}/.evergreen/csfle/gcpkms", azure=f"{DRIVERS_TOOLS}/.evergreen/csfle/azurekms", @@ -45,12 +52,6 @@ def _setup_gcp_vm(base_env: dict[str, str]) -> None: LOGGER.info("Setting up GCP VM...") -def _create_archive() -> None: - run_command("git add .", cwd=ROOT) - run_command('git commit -m "add files"', check=False, cwd=ROOT) - run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) - - def _load_kms_config(sub_test_target: str) -> dict[str, str]: target_dir = DIRS[sub_test_target] config = read_env(f"{target_dir}/secrets-export.sh") @@ -87,7 +88,7 @@ def setup_kms(sub_test_name: str) -> None: run_command("./setup-secrets.sh", cwd=kms_dir) if success: - _create_archive() + create_archive() if sub_test_target == "azure": os.environ["AZUREKMS_VMNAME_PREFIX"] = "PYTHON_DRIVER" @@ -108,7 +109,7 @@ def setup_kms(sub_test_name: str) -> None: write_env("KEY_VAULT_ENDPOINT", config["AZUREKMS_KEYVAULTENDPOINT"]) -def test_kms_remote(sub_test_name: str) -> None: +def test_kms_send_to_remote(sub_test_name: str) -> None: env = _load_kms_config(sub_test_name) if sub_test_name == "azure": key_name = os.environ["KEY_NAME"] diff --git a/.evergreen/scripts/oidc_tester.py b/.evergreen/scripts/oidc_tester.py new file mode 100644 index 0000000000..fd702cf1d1 --- /dev/null +++ b/.evergreen/scripts/oidc_tester.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import os + +from utils import DRIVERS_TOOLS, TMP_DRIVER_FILE, create_archive, read_env, run_command, write_env + +K8S_NAMES = ["aks", "gke", "eks"] +K8S_REMOTE_NAMES = [f"{n}-remote" for n in K8S_NAMES] + + +def _get_target_dir(sub_test_name: str) -> str: + if sub_test_name == "default": + target_dir = "auth_oidc" + elif sub_test_name.startswith("azure"): + target_dir = "auth_oidc/azure" + elif sub_test_name.startswith("gcp"): + target_dir = "auth_oidc/gcp" + elif sub_test_name in K8S_NAMES + K8S_REMOTE_NAMES: + target_dir = "auth_oidc/k8s" + else: + raise ValueError(f"Invalid sub test name '{sub_test_name}'") + return f"{DRIVERS_TOOLS}/.evergreen/{target_dir}" + + +def setup_oidc(sub_test_name: str) -> dict[str, str] | None: + target_dir = _get_target_dir(sub_test_name) + env = os.environ.copy() + + if sub_test_name == "eks" and "AWS_ACCESS_KEY_ID" in os.environ: + # Store AWS creds for kubectl access. + for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: + if key in os.environ: + write_env(key, os.environ[key]) + + if sub_test_name == "azure": + env["AZUREOIDC_VMNAME_PREFIX"] = "PYTHON_DRIVER" + if "-remote" not in sub_test_name: + run_command(f"bash {target_dir}/setup.sh", env=env) + if sub_test_name in K8S_NAMES: + run_command(f"bash {target_dir}/setup-pod.sh {sub_test_name}") + run_command(f"bash {target_dir}/run-self-test.sh") + return None + + source_file = None + if sub_test_name == "default": + source_file = f"{target_dir}/secrets-export.sh" + elif sub_test_name in ["azure-remote", "gcp-remote"]: + source_file = "./secrets-export.sh" + if sub_test_name in K8S_REMOTE_NAMES: + return os.environ.copy() + if source_file is None: + return None + + config = read_env(source_file) + write_env("MONGODB_URI_SINGLE", config["MONGODB_URI_SINGLE"]) + write_env("MONGODB_URI", config["MONGODB_URI"]) + write_env("DB_IP", config["MONGODB_URI"]) + + if sub_test_name == "default": + write_env("OIDC_TOKEN_FILE", config["OIDC_TOKEN_FILE"]) + write_env("OIDC_TOKEN_DIR", config["OIDC_TOKEN_DIR"]) + if "OIDC_DOMAIN" in config: + write_env("OIDC_DOMAIN", config["OIDC_DOMAIN"]) + elif sub_test_name == "azure-remote": + write_env("AZUREOIDC_RESOURCE", config["AZUREOIDC_RESOURCE"]) + elif sub_test_name == "gcp-remote": + write_env("GCPOIDC_AUDIENCE", config["GCPOIDC_AUDIENCE"]) + return config + + +def test_oidc_send_to_remote(sub_test_name: str) -> None: + env = os.environ.copy() + target_dir = _get_target_dir(sub_test_name) + create_archive() + if sub_test_name in ["azure", "gcp"]: + upper_name = sub_test_name.upper() + env[f"{upper_name}OIDC_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE + env[ + f"{upper_name}OIDC_TEST_CMD" + ] = f"OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" + elif sub_test_name in K8S_NAMES: + env["K8S_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE + env["K8S_TEST_CMD"] = "OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" + run_command(f"bash {target_dir}/run-driver-test.sh", env=env) + + +def teardown_oidc(sub_test_name: str) -> None: + target_dir = _get_target_dir(sub_test_name) + # For k8s, make sure an error while tearing down the pod doesn't prevent + # the Altas server teardown. + error = None + if sub_test_name in K8S_NAMES: + try: + run_command(f"bash {target_dir}/teardown-pod.sh") + except Exception as e: + error = e + run_command(f"bash {target_dir}/teardown.sh") + if error: + raise error diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py index 4ef1568add..f43ada4bbb 100644 --- a/.evergreen/scripts/run_server.py +++ b/.evergreen/scripts/run_server.py @@ -36,6 +36,11 @@ def start_server(): elif test_name == "load_balancer": set_env("LOAD_BALANCER") + elif test_name == "auth_oidc": + raise ValueError( + "OIDC auth does not use run-orchestration directly, do not use run-server!" + ) + elif test_name == "ocsp": opts.ssl = True if "ORCHESTRATION_FILE" not in os.environ: diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index cd781ccd70..38fd3c67cb 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -100,18 +100,29 @@ def run() -> None: if TEST_PERF: start_time = datetime.now() - # Run remote kms tests. + # Send kms tests to run remotely. if TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: - from kms_tester import test_kms_remote + from kms_tester import test_kms_send_to_remote - test_kms_remote(SUB_TEST_NAME) + test_kms_send_to_remote(SUB_TEST_NAME) return - # Run remote ecs tests. + # Send ecs tests to run remotely. if TEST_NAME == "auth_aws" and SUB_TEST_NAME == "ecs": run_command(f"{DRIVERS_TOOLS}/.evergreen/auth_aws/aws_setup.sh ecs") return + # Send OIDC tests to run remotely. + if ( + TEST_NAME == "auth_oidc" + and SUB_TEST_NAME != "default" + and not SUB_TEST_NAME.endswith("-remote") + ): + from oidc_tester import test_oidc_send_to_remote + + test_oidc_send_to_remote(SUB_TEST_NAME) + return + if os.environ.get("DEBUG_LOG"): TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG} -o log_cli=1".split()) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index b75a821c3a..8432eacd5a 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -161,6 +161,13 @@ def handle_test_env() -> None: if group := GROUP_MAP.get(test_name, ""): UV_ARGS.append(f"--group {group}") + if test_name == "auth_oidc": + from oidc_tester import setup_oidc + + config = setup_oidc(sub_test_name) + if not config: + AUTH = "noauth" + if AUTH != "noauth": if test_name == "data_lake": config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh") @@ -174,9 +181,8 @@ def handle_test_env() -> None: write_env("SINGLE_MONGOS_LB_URI", config["SERVERLESS_URI"]) write_env("MULTI_MONGOS_LB_URI", config["SERVERLESS_URI"]) elif test_name == "auth_oidc": - DB_USER = os.environ["OIDC_ADMIN_USER"] - DB_PASSWORD = os.environ["OIDC_ADMIN_PWD"] - write_env("DB_IP", os.environ["MONGODB_URI"]) + DB_USER = config["OIDC_ADMIN_USER"] + DB_PASSWORD = config["OIDC_ADMIN_PWD"] elif test_name == "index_management": config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas/secrets-export.sh") DB_USER = config["DRIVERS_ATLAS_LAMBDA_USER"] diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index fedbdc2fe8..988d7ec48a 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -24,6 +24,12 @@ teardown_kms(SUB_TEST_NAME) +# Tear down OIDC if applicable. +elif TEST_NAME == "auth_oidc": + from oidc_tester import teardown_oidc + + teardown_oidc(SUB_TEST_NAME) + # Tear down ocsp if applicable. elif TEST_NAME == "ocsp": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/teardown.sh") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index dcb50cc4dc..70a527028b 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -13,6 +13,7 @@ HERE = Path(__file__).absolute().parent ROOT = HERE.parent.parent DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") +TMP_DRIVER_FILE = "/tmp/mongo-python-driver.tgz" # noqa: S108 LOGGER = logging.getLogger("test") logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") @@ -49,7 +50,7 @@ class Distro: } # Tests that require a sub test suite. -SUB_TEST_REQUIRED = ["auth_aws", "kms"] +SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms"] def get_test_options( @@ -138,3 +139,9 @@ def run_command(cmd: str | list[str], **kwargs: Any) -> None: kwargs.setdefault("check", True) subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 LOGGER.info("Running command '%s'... done.", cmd) + + +def create_archive() -> None: + run_command("git add .", cwd=ROOT) + run_command('git commit -m "add files"', check=False, cwd=ROOT) + run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1d8783d9d1..7e70c025ed 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -236,12 +236,19 @@ the pages will re-render and the browser will automatically refresh. - Set up the test with `just setup-tests load_balancer`. - Run the tests with `just run-tests`. -### AWS tests +### AWS auth tests - Run `just run-server auth_aws` to start the server. - Run `just setup-tests auth_aws ` to set up the AWS test. - Run the tests with `just run-tests`. +### OIDC auth tests + +- Run `just setup-tests auth_oidc ` to set up the OIDC test. +- Run the tests with `just run-tests`. + +The supported types are [`default`, `azure`, `gcp`, `eks`, `aks`, and `gke`]. + ### KMS tests For KMS tests that are run locally, and expected to fail, in this case using `azure`: diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index a5334d79bd..6dc36dc8a4 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -70,6 +70,11 @@ def setUpClass(cls): cls.uri_single = os.environ["MONGODB_URI_SINGLE"] cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") cls.uri_admin = os.environ["MONGODB_URI"] + if ENVIRON == "test": + if not TOKEN_DIR: + raise ValueError("Please set OIDC_TOKEN_DIR") + if not TOKEN_FILE: + raise ValueError("Please set OIDC_TOKEN_FILE") def setUp(self): self.request_called = 0 From 189923f7c3821965ad92b65b1b137f171f9e4842 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Wed, 12 Mar 2025 15:19:40 -0700 Subject: [PATCH 1796/2111] PYTHON-5198 Fix test_03_invalid_keyid (#2195) --- test/asynchronous/test_encryption.py | 3 ++- test/test_encryption.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 000d98a111..728a6f9139 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -2982,9 +2982,10 @@ async def test_02_no_fields(self): ) async def test_03_invalid_keyid(self): + # checkAuthForCreateCollection can be removed when SERVER-102101 is fixed. with self.assertRaisesRegex( EncryptedCollectionError, - "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + "(create|checkAuthForCreateCollection).encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", ): await self.client_encryption.create_encrypted_collection( database=self.db, diff --git a/test/test_encryption.py b/test/test_encryption.py index 6efb167442..36c0ab0e24 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2964,9 +2964,10 @@ def test_02_no_fields(self): ) def test_03_invalid_keyid(self): + # checkAuthForCreateCollection can be removed when SERVER-102101 is fixed. with self.assertRaisesRegex( EncryptedCollectionError, - "create.encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + "(create|checkAuthForCreateCollection).encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", ): self.client_encryption.create_encrypted_collection( database=self.db, From 0351992ddb7db34f4700c5d8eb824d1599dfe80a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Mar 2025 09:28:34 -0500 Subject: [PATCH 1797/2111] PYTHON-5204 Convert Serverless tests to use new test scripts (#2197) --- .evergreen/config.yml | 30 ----------------------- .evergreen/generated_configs/tasks.yml | 10 ++++++++ .evergreen/generated_configs/variants.yml | 10 ++------ .evergreen/scripts/generate_config.py | 12 ++++++--- .evergreen/scripts/setup_tests.py | 1 + .evergreen/scripts/teardown_tests.py | 6 ++++- 6 files changed, 27 insertions(+), 42 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 25ea2c4b90..f563a1aced 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -372,29 +372,6 @@ post: - func: "cleanup" task_groups: - - name: serverless_task_group - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 # 30 minutes - setup_group: - - func: "fetch source" - - func: "setup system" - - command: subprocess.exec - params: - binary: bash - env: - VAULT_NAME: ${VAULT_NAME} - args: - - ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh - teardown_task: - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh - - func: "upload test results" - tasks: - - ".serverless" - - name: test_aws_lambda_task_group setup_group: - func: fetch source @@ -445,13 +422,6 @@ tasks: - func: "run server" - func: "run doctests" - - name: "test-serverless" - tags: ["serverless"] - commands: - - func: "run tests" - vars: - TEST_NAME: serverless - - name: "test-enterprise-auth" tags: ["enterprise-auth"] commands: diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 9d52cf957d..de18b4df01 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -5866,3 +5866,13 @@ tasks: - noauth - nossl - sync_async + + # Serverless tests + - name: test-serverless + commands: + - func: run tests + vars: + TEST_NAME: serverless + AUTH: auth + SSL: ssl + tags: [serverless] diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index cf3e0cc903..938f98e43b 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1235,27 +1235,21 @@ buildvariants: # Serverless tests - name: serverless-rhel8-python3.9 tasks: - - name: serverless_task_group + - name: .serverless display_name: Serverless RHEL8 Python3.9 run_on: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: serverless - AUTH: auth - SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: serverless-rhel8-python3.13 tasks: - - name: serverless_task_group + - name: .serverless display_name: Serverless RHEL8 Python3.13 run_on: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: serverless - AUTH: auth - SSL: ssl PYTHON_BINARY: /opt/python/3.13/bin/python3 # Stable api tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 14f30fed91..84cd1075b9 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -645,8 +645,7 @@ def create_disable_test_commands_variants(): def create_serverless_variants(): host = DEFAULT_HOST batchtime = BATCHTIME_WEEK - expansions = dict(TEST_NAME="serverless", AUTH="auth", SSL="ssl") - tasks = ["serverless_task_group"] + tasks = [".serverless"] base_name = "Serverless" return [ create_variant( @@ -654,7 +653,6 @@ def create_serverless_variants(): get_display_name(base_name, host, python=python), host=host, python=python, - expansions=expansions, batchtime=batchtime, ) for python in MIN_MAX_PYTHON @@ -951,6 +949,14 @@ def create_ocsp_tasks(): return tasks +def create_serverless_tasks(): + vars = dict(TEST_NAME="serverless", AUTH="auth", SSL="ssl") + test_func = FunctionCall(func="run tests", vars=vars) + tags = ["serverless"] + task_name = "test-serverless" + return [EvgTask(name=task_name, tags=tags, commands=[test_func])] + + ################## # Generate Config ################## diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 8432eacd5a..3ba6d61758 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -174,6 +174,7 @@ def handle_test_env() -> None: DB_USER = config["ADL_USERNAME"] DB_PASSWORD = config["ADL_PASSWORD"] elif test_name == "serverless": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/serverless/setup.sh") config = read_env(f"{DRIVERS_TOOLS}/.evergreen/serverless/secrets-export.sh") DB_USER = config["SERVERLESS_ATLAS_USER"] DB_PASSWORD = config["SERVERLESS_ATLAS_PASSWORD"] diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index 988d7ec48a..3920180422 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -32,7 +32,11 @@ # Tear down ocsp if applicable. elif TEST_NAME == "ocsp": - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/teardown.sh") + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh") + +# Tear down serverless if applicable. +elif TEST_NAME == "serverless": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/serverless/teardown.sh") # Tear down auth_aws if applicable. # We do not run web-identity hosts on macos, because the hosts lack permissions, From 8274db27226ce17607ab64dc92ff34d126e655f0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Mar 2025 10:18:17 -0500 Subject: [PATCH 1798/2111] PYTHON-5203 Use uv from Python toolchain if available (#2200) --- .evergreen/scripts/configure-env.sh | 21 +++++++++++++++++ .evergreen/scripts/install-dependencies.sh | 10 ++++++--- .evergreen/scripts/utils.py | 7 +++++- .evergreen/utils.sh | 26 ++++++++++------------ 4 files changed, 46 insertions(+), 18 deletions(-) diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index f23af8a811..fa37b8fb08 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -92,3 +92,24 @@ cat < expansion.yml DRIVERS_TOOLS: "$DRIVERS_TOOLS" PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" EOT + +# If the toolchain is available, symlink binaries to the bin dir. This has to be done +# after drivers-tools is cloned, since we might be using its binary dir. +_bin_path="" +if [ "Windows_NT" == "${OS:-}" ]; then + _bin_path="/cygdrive/c/Python/Current/Scripts" +elif [ "$(uname -s)" != "Darwin" ]; then + _bin_path="/Library/Frameworks/Python.Framework/Versions/Current/bin" +else + _bin_path="/opt/python/Current/bin" +fi +if [ -d "${_bin_path}" ]; then + _suffix="" + if [ "Windows_NT" == "${OS:-}" ]; then + _suffix=".exe" + fi + mkdir -p $PYMONGO_BIN_DIR + ln -s ${_bin_path}/just${_suffix} $PYMONGO_BIN_DIR/just${_suffix} + ln -s ${_bin_path}/uv${_suffix} $PYMONGO_BIN_DIR/uv${_suffix} + ln -s ${_bin_path}/uvx${_suffix} $PYMONGO_BIN_DIR/uvx${_suffix} +fi diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 31ae4c1735..5ec06a87df 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -24,10 +24,14 @@ function _pip_install() { echo "Installing $2 using pip..." createvirtualenv "$(find_python3)" $_VENV_PATH python -m pip install $1 + _suffix="" if [ "Windows_NT" = "${OS:-}" ]; then - ln -s "$(which $2)" $_BIN_DIR/$2.exe - else - ln -s "$(which $2)" $_BIN_DIR/$2 + _suffix=".exe" + fi + ln -s "$(which $2)" $_BIN_DIR/${2}${_suffix} + # uv also comes with a uvx binary. + if [ $2 == "uv" ]; then + ln -s "$(which uvx)" $_BIN_DIR/uvx${_suffix} fi echo "Installed to ${_BIN_DIR}" echo "Installing $2 using pip... done." diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 70a527028b..039eec2436 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -137,7 +137,12 @@ def run_command(cmd: str | list[str], **kwargs: Any) -> None: cmd = " ".join(cmd) LOGGER.info("Running command '%s'...", cmd) kwargs.setdefault("check", True) - subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 + try: + subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 + except subprocess.CalledProcessError as e: + LOGGER.error(e.output) + LOGGER.error(str(e)) + sys.exit(e.returncode) LOGGER.info("Running command '%s'... done.", cmd) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index e044b3d766..bb3ed8dabd 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -4,31 +4,29 @@ set -eu find_python3() { PYTHON="" - # Add a fallback system python3 if it is available and Python 3.9+. - if is_python_39 "$(command -v python3)"; then - PYTHON="$(command -v python3)" - fi # Find a suitable toolchain version, if available. if [ "$(uname -s)" = "Darwin" ]; then - # macos 11.00 - if [ -d "/Library/Frameworks/Python.Framework/Versions/3.10" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" - # macos 10.14 - elif [ -d "/Library/Frameworks/Python.Framework/Versions/3.9" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/3.9/bin/python3" - fi + PYTHON="/Library/Frameworks/Python.Framework/Versions/Current/bin/python3" elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - PYTHON="C:/python/Python39/python.exe" + PYTHON="C:/python/Current/python.exe" else # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.9+. - if [ -f "/opt/python/3.9/bin/python3" ]; then - PYTHON="/opt/python/3.9/bin/python3" + if [ -f "/opt/python/Current/bin/python3" ]; then + PYTHON="/opt/python/Current/bin/python3" + elif is_python_39 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v5/bin/python3" elif is_python_39 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v4/bin/python3" elif is_python_39 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v3/bin/python3" fi fi + # Add a fallback system python3 if it is available and Python 3.9+. + if [ -z "$PYTHON" ]; then + if is_python_39 "$(command -v python3)"; then + PYTHON="$(command -v python3)" + fi + fi if [ -z "$PYTHON" ]; then echo "Cannot test without python3.9+ installed!" exit 1 From 5e055eea0f09b0a2856063af80af810134d23849 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 13 Mar 2025 12:00:56 -0500 Subject: [PATCH 1799/2111] PYTHON-5206 Convert Atlas Connect and Enterprise Auth tests to use new test scripts (#2201) Co-authored-by: Noah Stapp --- .evergreen/config.yml | 35 -------------- .evergreen/generated_configs/tasks.yml | 23 ++++++++++ .evergreen/generated_configs/variants.yml | 46 ++++++++----------- .evergreen/scripts/generate_config.py | 28 ++++++++--- .evergreen/scripts/run-atlas-tests.sh | 8 ---- .evergreen/scripts/run-direct-tests.sh | 10 ---- .../scripts/run-enterprise-auth-tests.sh | 9 ---- .evergreen/scripts/setup_tests.py | 10 ++++ .evergreen/scripts/utils.py | 2 +- CONTRIBUTING.md | 13 ++++++ pyproject.toml | 2 +- test/atlas/test_connection.py | 2 +- 12 files changed, 91 insertions(+), 97 deletions(-) delete mode 100755 .evergreen/scripts/run-atlas-tests.sh delete mode 100755 .evergreen/scripts/run-direct-tests.sh delete mode 100755 .evergreen/scripts/run-enterprise-auth-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index f563a1aced..4562f1d2be 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -267,28 +267,6 @@ functions: binary: bash args: [.evergreen/just.sh, run-tests] - "run enterprise auth tests": - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: "src" - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "PYTHON_BINARY"] - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-enterprise-auth-tests.sh - - "run atlas tests": - - command: subprocess.exec - type: test - params: - binary: bash - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "PYTHON_BINARY"] - working_dir: "src" - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-atlas-tests.sh - "cleanup": - command: subprocess.exec params: @@ -422,13 +400,6 @@ tasks: - func: "run server" - func: "run doctests" - - name: "test-enterprise-auth" - tags: ["enterprise-auth"] - commands: - - func: "run server" - - func: "assume ec2 role" - - func: "run enterprise auth tests" - - name: "test-search-index-helpers" commands: - func: "run server" @@ -488,12 +459,6 @@ tasks: TOPOLOGY: "replica_set" - func: "run tests" - - name: "atlas-connect" - tags: ["atlas-connect"] - commands: - - func: "assume ec2 role" - - func: "run atlas tests" - - name: atlas-data-lake-tests commands: - func: "bootstrap data lake" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index de18b4df01..0b0f09329a 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1,4 +1,13 @@ tasks: + # Atlas connect tests + - name: test-atlas-connect + commands: + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: atlas_connect + tags: [atlas_connect] + # Aws tests - name: test-auth-aws-4.4-regular commands: @@ -680,6 +689,20 @@ tasks: AWS_ROLE_SESSION_NAME: test tags: [auth-aws, auth-aws-web-identity] + # Enterprise auth tests + - name: test-enterprise-auth + commands: + - func: run server + vars: + TEST_NAME: enterprise_auth + AUTH: auth + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: enterprise_auth + AUTH: auth + tags: [enterprise_auth] + # Kms tests - name: test-gcpkms commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 938f98e43b..4c54abf4b9 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -49,7 +49,7 @@ buildvariants: # Atlas connect tests - name: atlas-connect-rhel8-python3.9 tasks: - - name: atlas-connect + - name: .atlas_connect display_name: Atlas connect RHEL8 Python3.9 run_on: - rhel87-small @@ -57,7 +57,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: atlas-connect-rhel8-python3.13 tasks: - - name: atlas-connect + - name: .atlas_connect display_name: Atlas connect RHEL8 Python3.13 run_on: - rhel87-small @@ -510,59 +510,53 @@ buildvariants: tags: [encryption_tag] # Enterprise auth tests - - name: auth-enterprise-macos-python3.9-auth + - name: auth-enterprise-macos-python3.9 tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise macOS Python3.9 Auth + - name: .enterprise_auth + display_name: Auth Enterprise macOS Python3.9 run_on: - macos-14 expansions: - AUTH: auth PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: auth-enterprise-rhel8-python3.10-auth + - name: auth-enterprise-rhel8-python3.10 tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 Python3.10 Auth + - name: .enterprise_auth + display_name: Auth Enterprise RHEL8 Python3.10 run_on: - rhel87-small expansions: - AUTH: auth PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: auth-enterprise-rhel8-python3.11-auth + - name: auth-enterprise-rhel8-python3.11 tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 Python3.11 Auth + - name: .enterprise_auth + display_name: Auth Enterprise RHEL8 Python3.11 run_on: - rhel87-small expansions: - AUTH: auth PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: auth-enterprise-rhel8-python3.12-auth + - name: auth-enterprise-rhel8-python3.12 tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 Python3.12 Auth + - name: .enterprise_auth + display_name: Auth Enterprise RHEL8 Python3.12 run_on: - rhel87-small expansions: - AUTH: auth PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: auth-enterprise-win64-python3.13-auth + - name: auth-enterprise-win64-python3.13 tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise Win64 Python3.13 Auth + - name: .enterprise_auth + display_name: Auth Enterprise Win64 Python3.13 run_on: - windows-64-vsMulti-small expansions: - AUTH: auth PYTHON_BINARY: C:/python/Python313/python.exe - - name: auth-enterprise-rhel8-pypy3.10-auth + - name: auth-enterprise-rhel8-pypy3.10 tasks: - - name: test-enterprise-auth - display_name: Auth Enterprise RHEL8 PyPy3.10 Auth + - name: .enterprise_auth + display_name: Auth Enterprise RHEL8 PyPy3.10 run_on: - rhel87-small expansions: - AUTH: auth PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Free threaded tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 84cd1075b9..d91e0e6ded 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -464,7 +464,6 @@ def create_compression_variants(): def create_enterprise_auth_variants(): - expansions = dict(AUTH="auth") variants = [] # All python versions across platforms. @@ -475,10 +474,8 @@ def create_enterprise_auth_variants(): host = HOSTS["win64"] else: host = DEFAULT_HOST - display_name = get_display_name("Auth Enterprise", host, python=python, **expansions) - variant = create_variant( - ["test-enterprise-auth"], display_name, host=host, python=python, expansions=expansions - ) + display_name = get_display_name("Auth Enterprise", host, python=python) + variant = create_variant([".enterprise_auth"], display_name, host=host, python=python) variants.append(variant) return variants @@ -721,7 +718,7 @@ def create_atlas_connect_variants(): host = DEFAULT_HOST return [ create_variant( - ["atlas-connect"], + [".atlas_connect"], get_display_name("Atlas connect", host, python=python), python=python, host=host, @@ -913,6 +910,25 @@ def _create_ocsp_task(algo, variant, server_type, base_task_name): return EvgTask(name=task_name, tags=tags, commands=commands) +def create_atlas_connect_tasks(): + vars = dict(TEST_NAME="atlas_connect") + assume_func = FunctionCall(func="assume ec2 role") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-atlas-connect" + tags = ["atlas_connect"] + return [EvgTask(name=task_name, tags=tags, commands=[assume_func, test_func])] + + +def create_enterprise_auth_tasks(): + vars = dict(TEST_NAME="enterprise_auth", AUTH="auth") + server_func = FunctionCall(func="run server", vars=vars) + assume_func = FunctionCall(func="assume ec2 role") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-enterprise-auth" + tags = ["enterprise_auth"] + return [EvgTask(name=task_name, tags=tags, commands=[server_func, assume_func, test_func])] + + def create_ocsp_tasks(): tasks = [] tests = [ diff --git a/.evergreen/scripts/run-atlas-tests.sh b/.evergreen/scripts/run-atlas-tests.sh deleted file mode 100755 index 99968063bd..0000000000 --- a/.evergreen/scripts/run-atlas-tests.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Disable xtrace for security reasons (just in case it was accidentally set). -set +x -set -o errexit -bash "${DRIVERS_TOOLS}"/.evergreen/auth_aws/setup_secrets.sh drivers/atlas_connect -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-tests atlas -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/run-direct-tests.sh b/.evergreen/scripts/run-direct-tests.sh deleted file mode 100755 index a00235311c..0000000000 --- a/.evergreen/scripts/run-direct-tests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -x -. .evergreen/utils.sh - -. .evergreen/scripts/env.sh -createvirtualenv "$PYTHON_BINARY" .venv - -export PYMONGO_C_EXT_MUST_BUILD=1 -pip install -e ".[test]" -pytest -v diff --git a/.evergreen/scripts/run-enterprise-auth-tests.sh b/.evergreen/scripts/run-enterprise-auth-tests.sh deleted file mode 100755 index 65aafde2df..0000000000 --- a/.evergreen/scripts/run-enterprise-auth-tests.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -eu - -# Disable xtrace for security reasons (just in case it was accidentally set). -set +x -# Use the default python to bootstrap secrets. -bash "${DRIVERS_TOOLS}"/.evergreen/secrets_handling/setup-secrets.sh drivers/enterprise_auth -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh setup-tests enterprise_auth -bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 3ba6d61758..868ac419b5 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -112,6 +112,10 @@ def setup_libmongocrypt(): run_command("chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll") +def get_secrets(name: str) -> None: + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh {name}") + + def handle_test_env() -> None: opts, _ = get_test_options("Set up the test environment and services.") test_name = opts.test_name @@ -203,6 +207,7 @@ def handle_test_env() -> None: write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") if test_name == "enterprise_auth": + get_secrets("drivers/enterprise_auth") config = read_env(f"{ROOT}/secrets-export.sh") if PLATFORM == "windows": LOGGER.info("Setting GSSAPI_PASS") @@ -346,6 +351,11 @@ def handle_test_env() -> None: else: run_command(f"bash {auth_aws_dir}/setup-secrets.sh") + if test_name == "atlas_connect": + get_secrets("drivers/atlas_connect") + # We do not want the default client_context to be initialized. + write_env("DISABLE_CONTEXT") + if test_name == "perf": # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively # affects the benchmark results. diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 039eec2436..08d376461e 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -30,7 +30,7 @@ class Distro: # Map the test name to a test suite. TEST_SUITE_MAP = { - "atlas": "atlas", + "atlas_connect": "atlas_connect", "auth_aws": "auth_aws", "auth_oidc": "auth_oidc", "data_lake": "data_lake", diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e70c025ed..8844565d31 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -262,6 +262,19 @@ For KMS tests that run remotely and are expected to pass, in this case using `gc - Run `just setup-tests kms gcp`. - Run `just run-tests`. +### Enterprise Auth tests + +Note: these tests can only be run from an Evergreen host. + +- Run `just run-server enterprise_auth`. +- Run `just setup-tests enterprise_auth`. +- Run `just run-tests`. + +### Atlas Connect tests + +- Run `just setup-tests atlas_connect`. +- Run `just run-tests`. + ### OCSP tests - Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. diff --git a/pyproject.toml b/pyproject.toml index ca76cfa2c0..993b3e5aee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -125,7 +125,7 @@ markers = [ "auth_oidc: tests that rely on oidc auth", "auth: tests that rely on authentication", "ocsp: tests that rely on ocsp", - "atlas: tests that rely on atlas", + "atlas_connect: tests that rely on an atlas connection", "data_lake: tests that rely on atlas data lake", "perf: benchmark tests", "index_management: index management tests", diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 4dcbba6d11..3d34ff326e 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -28,7 +28,7 @@ import pymongo from pymongo.ssl_support import HAS_SNI -pytestmark = pytest.mark.atlas +pytestmark = pytest.mark.atlas_connect URIS = { From e6e8650cc95314e742a30244b5db0239f5259f68 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 13 Mar 2025 15:08:41 -0400 Subject: [PATCH 1800/2111] PYTHON-5144 - Add async performance benchmarks (#2188) --- .evergreen/config.yml | 65 +++- .evergreen/run-perf-tests.sh | 2 +- .evergreen/scripts/run-perf-tests.sh | 2 +- .evergreen/scripts/setup_tests.py | 5 +- test/performance/async_perf_test.py | 466 +++++++++++++++++++++++++++ 5 files changed, 523 insertions(+), 17 deletions(-) create mode 100644 test/performance/async_perf_test.py diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 4562f1d2be..24f08d67af 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -321,6 +321,7 @@ functions: params: working_dir: "src" binary: bash + include_expansions_in_env: [SUB_TEST_NAME] args: - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/run-perf-tests.sh @@ -512,6 +513,8 @@ tasks: vars: VERSION: "v6.0-perf" - func: "run perf tests" + vars: + SUB_TEST_NAME: "sync" - func: "attach benchmark test results" - func: "send dashboard data" @@ -523,6 +526,8 @@ tasks: VERSION: "v6.0-perf" SSL: "ssl" - func: "run perf tests" + vars: + SUB_TEST_NAME: "sync" - func: "attach benchmark test results" - func: "send dashboard data" @@ -533,9 +538,52 @@ tasks: vars: VERSION: "8.0" - func: "run perf tests" + vars: + SUB_TEST_NAME: "sync" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "perf-6.0-standalone-async" + tags: [ "perf" ] + commands: + - func: "run server" + vars: + VERSION: "v6.0-perf" + TOPOLOGY: "server" + - func: "run perf tests" + vars: + SUB_TEST_NAME: "async" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "perf-6.0-standalone-ssl-async" + tags: [ "perf" ] + commands: + - func: "run server" + vars: + VERSION: "v6.0-perf" + TOPOLOGY: "server" + SSL: "ssl" + - func: "run perf tests" + vars: + SUB_TEST_NAME: "async" + - func: "attach benchmark test results" + - func: "send dashboard data" + + - name: "perf-8.0-standalone-async" + tags: [ "perf" ] + commands: + - func: "run server" + vars: + VERSION: "8.0" + TOPOLOGY: "server" + - func: "run perf tests" + vars: + SUB_TEST_NAME: "async" - func: "attach benchmark test results" - func: "send dashboard data" + - name: "check-import-time" tags: ["pr"] commands: @@ -616,17 +664,6 @@ buildvariants: - name: "perf-6.0-standalone" - name: "perf-6.0-standalone-ssl" - name: "perf-8.0-standalone" - - # Platform notes - # i386 builds of OpenSSL or Cyrus SASL are not available - # Debian 8.1 only supports MongoDB 3.4+ - # SUSE12 s390x is only supported by MongoDB 3.4+ - # No enterprise build for Archlinux, SSL not available - # RHEL 7.6 and RHEL 8.4 only supports 3.6+. - # RHEL 7 only supports 2.6+ - # RHEL 7.1 ppc64le is only supported by MongoDB 3.2+ - # RHEL 7.2 s390x is only supported by MongoDB 3.4+ - # Solaris MongoDB SSL builds are not available - # Darwin MongoDB SSL builds are not available for 2.6 - # SUSE12 x86_64 is only supported by MongoDB 3.2+ - # vim: set et sw=2 ts=2 : + - name: "perf-6.0-standalone-async" + - name: "perf-6.0-standalone-ssl-async" + - name: "perf-8.0-standalone-async" diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh index 5e423caa23..cf88b93710 100755 --- a/.evergreen/run-perf-tests.sh +++ b/.evergreen/run-perf-tests.sh @@ -15,5 +15,5 @@ export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 -bash ./.evergreen/just.sh setup-tests perf +bash ./.evergreen/just.sh setup-tests perf "${SUB_TEST_NAME}" bash ./.evergreen/just.sh run-tests diff --git a/.evergreen/scripts/run-perf-tests.sh b/.evergreen/scripts/run-perf-tests.sh index 69a369fee1..e1c1311d67 100755 --- a/.evergreen/scripts/run-perf-tests.sh +++ b/.evergreen/scripts/run-perf-tests.sh @@ -1,4 +1,4 @@ #!/bin/bash PROJECT_DIRECTORY=${PROJECT_DIRECTORY} -bash "${PROJECT_DIRECTORY}"/.evergreen/run-perf-tests.sh +SUB_TEST_NAME=${SUB_TEST_NAME} bash "${PROJECT_DIRECTORY}"/.evergreen/run-perf-tests.sh diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 868ac419b5..53e3a568ba 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -359,7 +359,10 @@ def handle_test_env() -> None: if test_name == "perf": # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively # affects the benchmark results. - TEST_ARGS = f"test/performance/perf_test.py {TEST_ARGS}" + if sub_test_name == "sync": + TEST_ARGS = f"test/performance/perf_test.py {TEST_ARGS}" + else: + TEST_ARGS = f"test/performance/async_perf_test.py {TEST_ARGS}" # Add coverage if requested. # Only cover CPython. PyPy reports suspiciously low coverage. diff --git a/test/performance/async_perf_test.py b/test/performance/async_perf_test.py new file mode 100644 index 0000000000..2ceee45bf9 --- /dev/null +++ b/test/performance/async_perf_test.py @@ -0,0 +1,466 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous Tests for the MongoDB Driver Performance Benchmarking Spec. + +See https://github.com/mongodb/specifications/blob/master/source/benchmarking/benchmarking.md + + +To set up the benchmarks locally:: + + python -m pip install simplejson + git clone --depth 1 https://github.com/mongodb/specifications.git + pushd specifications/source/benchmarking/data + tar xf extended_bson.tgz + tar xf parallel.tgz + tar xf single_and_multi_document.tgz + popd + export TEST_PATH="specifications/source/benchmarking/data" + export OUTPUT_FILE="results.json" + +Then to run all benchmarks quickly:: + + FASTBENCH=1 python test/performance/async_perf_test.py -v + +To run individual benchmarks quickly:: + + FASTBENCH=1 python test/performance/async_perf_test.py -v TestRunCommand TestFindManyAndEmptyCursor +""" +from __future__ import annotations + +import asyncio +import os +import sys +import tempfile +import time +import warnings +from typing import Any, List, Optional, Union + +import pytest + +try: + import simplejson as json +except ImportError: + import json # type: ignore[no-redef] + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest + +from bson import encode +from gridfs import AsyncGridFSBucket +from pymongo import ( + DeleteOne, + InsertOne, + ReplaceOne, +) + +pytestmark = pytest.mark.perf + +# Spec says to use at least 1 minute cumulative execution time and up to 100 iterations or 5 minutes but that +# makes the benchmarks too slow. Instead, we use at least 30 seconds and at most 60 seconds. +NUM_ITERATIONS = 100 +MIN_ITERATION_TIME = 30 +MAX_ITERATION_TIME = 120 +NUM_DOCS = 10000 +# When debugging or prototyping it's often useful to run the benchmarks locally, set FASTBENCH=1 to run quickly. +if bool(os.getenv("FASTBENCH")): + NUM_ITERATIONS = 2 + MIN_ITERATION_TIME = 1 + MAX_ITERATION_TIME = 30 + NUM_DOCS = 1000 + +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) + +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") + +result_data: List = [] + + +def tearDownModule(): + output = json.dumps(result_data, indent=4) + if OUTPUT_FILE: + with open(OUTPUT_FILE, "w") as opf: + opf.write(output) + else: + print(output) + + +class Timer: + def __enter__(self): + self.start = time.monotonic() + return self + + def __exit__(self, *args): + self.end = time.monotonic() + self.interval = self.end - self.start + + +async def concurrent(n_tasks, func): + tasks = [func() for _ in range(n_tasks)] + await asyncio.gather(*tasks) + + +class PerformanceTest: + dataset: str + data_size: int + fail: Any + n_tasks: int = 1 + did_init: bool = False + + async def asyncSetUp(self): + await async_client_context.init() + self.setup_time = time.monotonic() + + async def asyncTearDown(self): + duration = time.monotonic() - self.setup_time + # Remove "Test" so that TestFlatEncoding is reported as "FlatEncoding". + name = self.__class__.__name__[4:] + median = self.percentile(50) + megabytes_per_sec = (self.data_size * self.n_tasks) / median / 1000000 + print( + f"Completed {self.__class__.__name__} {megabytes_per_sec:.3f} MB/s, MEDIAN={self.percentile(50):.3f}s, " + f"total time={duration:.3f}s, iterations={len(self.results)}" + ) + result_data.append( + { + "info": { + "test_name": name, + "args": { + "tasks": self.n_tasks, + }, + }, + "metrics": [ + {"name": "megabytes_per_sec", "type": "MEDIAN", "value": megabytes_per_sec}, + ], + } + ) + + async def before(self): + pass + + async def do_task(self): + raise NotImplementedError + + async def after(self): + pass + + def percentile(self, percentile): + if hasattr(self, "results"): + sorted_results = sorted(self.results) + percentile_index = int(len(sorted_results) * percentile / 100) - 1 + return sorted_results[percentile_index] + else: + self.fail("Test execution failed") + return None + + async def runTest(self): + results = [] + start = time.monotonic() + i = 0 + while True: + i += 1 + await self.before() + with Timer() as timer: + if self.n_tasks == 1: + await self.do_task() + else: + await concurrent(self.n_tasks, self.do_task) + await self.after() + results.append(timer.interval) + duration = time.monotonic() - start + if duration > MIN_ITERATION_TIME and i >= NUM_ITERATIONS: + break + if i >= NUM_ITERATIONS: + break + if duration > MAX_ITERATION_TIME: + with warnings.catch_warnings(): + warnings.simplefilter("default") + warnings.warn( + f"{self.__class__.__name__} timed out after {MAX_ITERATION_TIME}s, completed {i}/{NUM_ITERATIONS} iterations." + ) + + break + + self.results = results + + +# SINGLE-DOC BENCHMARKS +class TestRunCommand(PerformanceTest, AsyncPyMongoTestCase): + data_size = len(encode({"hello": True})) * NUM_DOCS + + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.drop_database("perftest") + + async def do_task(self): + command = self.client.perftest.command + for _ in range(NUM_DOCS): + await command("hello", True) + + +class TestRunCommand8Tasks(TestRunCommand): + n_tasks = 8 + + +class TestRunCommand80Tasks(TestRunCommand): + n_tasks = 80 + + +class TestRunCommandUnlimitedTasks(TestRunCommand): + async def do_task(self): + command = self.client.perftest.command + await asyncio.gather(*[command("hello", True) for _ in range(NUM_DOCS)]) + + +class TestDocument(PerformanceTest): + async def asyncSetUp(self): + await super().asyncSetUp() + # Location of test data. + with open( # noqa: ASYNC101 + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) + ) as data: + self.document = json.loads(data.read()) + + self.client = async_client_context.client + await self.client.drop_database("perftest") + + async def asyncTearDown(self): + await super().asyncTearDown() + await self.client.drop_database("perftest") + + async def before(self): + self.corpus = await self.client.perftest.create_collection("corpus") + + async def after(self): + await self.client.perftest.drop_collection("corpus") + + +class FindTest(TestDocument): + dataset = "tweet.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + documents = [self.document.copy() for _ in range(NUM_DOCS)] + self.corpus = self.client.perftest.corpus + result = await self.corpus.insert_many(documents) + self.inserted_ids = result.inserted_ids + + async def before(self): + pass + + async def after(self): + pass + + +class TestFindOneByID(FindTest, AsyncPyMongoTestCase): + async def do_task(self): + find_one = self.corpus.find_one + for _id in self.inserted_ids: + await find_one({"_id": _id}) + + +class TestFindOneByID8Tasks(TestFindOneByID): + n_tasks = 8 + + +class TestFindOneByID80Tasks(TestFindOneByID): + n_tasks = 80 + + +class TestFindOneByIDUnlimitedTasks(TestFindOneByID): + async def do_task(self): + find_one = self.corpus.find_one + await asyncio.gather(*[find_one({"_id": _id}) for _id in self.inserted_ids]) + + +class SmallDocInsertTest(TestDocument): + dataset = "small_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class SmallDocMixedTest(TestDocument): + dataset = "small_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS * 2 + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class TestSmallDocInsertOne(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + await insert_one(doc) + + +class TestSmallDocInsertOneUnlimitedTasks(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + await asyncio.gather(*[insert_one(doc) for doc in self.documents]) + + +class LargeDocInsertTest(TestDocument): + dataset = "large_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + n_docs = 10 + self.data_size = len(encode(self.document)) * n_docs + self.documents = [self.document.copy() for _ in range(n_docs)] + + +class TestLargeDocInsertOne(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + await insert_one(doc) + + +class TestLargeDocInsertOneUnlimitedTasks(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + await asyncio.gather(*[insert_one(doc) for doc in self.documents]) + + +# MULTI-DOC BENCHMARKS +class TestFindManyAndEmptyCursor(FindTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.find().to_list() + + +class TestFindManyAndEmptyCursor8Tasks(TestFindManyAndEmptyCursor): + n_tasks = 8 + + +class TestFindManyAndEmptyCursor80Tasks(TestFindManyAndEmptyCursor): + n_tasks = 80 + + +class TestSmallDocBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.insert_many(self.documents, ordered=True) + + +class TestSmallDocClientBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class TestSmallDocBulkMixedOps(SmallDocMixedTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(document=doc)) + self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) + self.models.append(DeleteOne(filter={})) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkMixedOps(SmallDocMixedTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + self.models.append( + ReplaceOne( + namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True + ) + ) + self.models.append(DeleteOne(namespace="perftest.corpus", filter={})) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class TestLargeDocBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.insert_many(self.documents, ordered=True) + + +class TestLargeDocClientBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class GridFsTest(PerformanceTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.drop_database("perftest") + + gridfs_path = os.path.join( + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: # noqa: ASYNC101 + self.document = data.read() + self.data_size = len(self.document) + self.bucket = AsyncGridFSBucket(self.client.perftest) + + async def asyncTearDown(self): + await super().asyncTearDown() + await self.client.drop_database("perftest") + + +class TestGridFsUpload(GridFsTest, AsyncPyMongoTestCase): + async def before(self): + # Create the bucket. + await self.bucket.upload_from_stream("init", b"x") + + async def do_task(self): + await self.bucket.upload_from_stream("gridfstest", self.document) + + +class TestGridFsDownload(GridFsTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.uploaded_id = await self.bucket.upload_from_stream("gridfstest", self.document) + + async def do_task(self): + await (await self.bucket.open_download_stream(self.uploaded_id)).read() + + +if __name__ == "__main__": + unittest.main() From 72ed1029be70b6f7d2e7bfd2476dbefcc84173a8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 14 Mar 2025 06:05:21 -0500 Subject: [PATCH 1801/2111] PYTHON-5210 Prevent overriding the python used by other tools (#2203) --- .evergreen/scripts/utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 08d376461e..be17d756e3 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -137,6 +137,11 @@ def run_command(cmd: str | list[str], **kwargs: Any) -> None: cmd = " ".join(cmd) LOGGER.info("Running command '%s'...", cmd) kwargs.setdefault("check", True) + # Prevent overriding the python used by other tools. + env = kwargs.pop("env", os.environ).copy() + if "UV_PYTHON" in env: + del env["UV_PYTHON"] + kwargs["env"] = env try: subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 except subprocess.CalledProcessError as e: From 4353278dc8655f7c12e624e9c9b774bc66c2060b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 14 Mar 2025 10:38:30 -0500 Subject: [PATCH 1802/2111] PYTHON-5207 Convert mod_wsgi tests to use the new test runner (#2202) --- .evergreen/config.yml | 45 ----------- .evergreen/generated_configs/tasks.yml | 42 ++++++++++ .evergreen/generated_configs/variants.yml | 10 +-- .evergreen/scripts/generate_config.py | 25 ++++-- .evergreen/scripts/mod_wsgi_tester.py | 93 +++++++++++++++++++++++ .evergreen/scripts/run-mod-wsgi-tests.sh | 53 ------------- .evergreen/scripts/run_tests.py | 7 ++ .evergreen/scripts/setup_tests.py | 7 +- .evergreen/scripts/teardown_tests.py | 6 ++ .evergreen/scripts/utils.py | 6 +- CONTRIBUTING.md | 11 +++ test/mod_wsgi_test/test_client.py | 10 +-- 12 files changed, 195 insertions(+), 120 deletions(-) create mode 100644 .evergreen/scripts/mod_wsgi_tester.py delete mode 100755 .evergreen/scripts/run-mod-wsgi-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 24f08d67af..c54c688e46 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -227,17 +227,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh - "run mod_wsgi tests": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: [MOD_WSGI_VERSION, MOD_WSGI_EMBEDDED, "PYTHON_BINARY"] - working_dir: "src" - binary: bash - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-mod-wsgi-tests.sh - "run doctests": - command: subprocess.exec type: test @@ -412,40 +401,6 @@ tasks: TEST_NAME: index_management AUTH: "auth" - - name: "mod-wsgi-standalone" - tags: ["mod_wsgi"] - commands: - - func: "run server" - vars: - TOPOLOGY: "server" - - func: "run mod_wsgi tests" - - - name: "mod-wsgi-replica-set" - tags: ["mod_wsgi"] - commands: - - func: "run server" - vars: - TOPOLOGY: "replica_set" - - func: "run mod_wsgi tests" - - - name: "mod-wsgi-embedded-mode-standalone" - tags: ["mod_wsgi"] - commands: - - func: "run server" - - func: "run mod_wsgi tests" - vars: - MOD_WSGI_EMBEDDED: "1" - - - name: "mod-wsgi-embedded-mode-replica-set" - tags: ["mod_wsgi"] - commands: - - func: "run server" - vars: - TOPOLOGY: "replica_set" - - func: "run mod_wsgi tests" - vars: - MOD_WSGI_EMBEDDED: "1" - - name: "no-server" tags: ["no-server"] commands: diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 0b0f09329a..070b163e90 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -775,6 +775,48 @@ tasks: TEST_NAME: load_balancer tags: [load-balancer, noauth, nossl] + # Mod wsgi tests + - name: mod-wsgi-standalone + commands: + - func: run server + vars: + TOPOLOGY: standalone + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: standalone + tags: [mod_wsgi] + - name: mod-wsgi-replica-set + commands: + - func: run server + vars: + TOPOLOGY: replica_set + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: standalone + tags: [mod_wsgi] + - name: mod-wsgi-embedded-mode-standalone + commands: + - func: run server + vars: + TOPOLOGY: standalone + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + tags: [mod_wsgi] + - name: mod-wsgi-embedded-mode-replica-set + commands: + - func: run server + vars: + TOPOLOGY: replica_set + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + tags: [mod_wsgi] + # Ocsp tests - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 4c54abf4b9..d70afa2bdd 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -696,10 +696,7 @@ buildvariants: # Mod wsgi tests - name: mod_wsgi-ubuntu-22-python3.9 tasks: - - name: mod-wsgi-standalone - - name: mod-wsgi-replica-set - - name: mod-wsgi-embedded-mode-standalone - - name: mod-wsgi-embedded-mode-replica-set + - name: .mod_wsgi display_name: mod_wsgi Ubuntu-22 Python3.9 run_on: - ubuntu2204-small @@ -708,10 +705,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: mod_wsgi-ubuntu-22-python3.13 tasks: - - name: mod-wsgi-standalone - - name: mod-wsgi-replica-set - - name: mod-wsgi-embedded-mode-standalone - - name: mod-wsgi-embedded-mode-replica-set + - name: .mod_wsgi display_name: mod_wsgi Ubuntu-22 Python3.13 run_on: - ubuntu2204-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index d91e0e6ded..b90a6af437 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -614,12 +614,7 @@ def create_atlas_data_lake_variants(): def create_mod_wsgi_variants(): variants = [] host = HOSTS["ubuntu22"] - tasks = [ - "mod-wsgi-standalone", - "mod-wsgi-replica-set", - "mod-wsgi-embedded-mode-standalone", - "mod-wsgi-embedded-mode-replica-set", - ] + tasks = [".mod_wsgi"] expansions = dict(MOD_WSGI_VERSION="4") for python in MIN_MAX_PYTHON: display_name = get_display_name("mod_wsgi", host, python=python) @@ -892,6 +887,24 @@ def create_oidc_tasks(): return tasks +def create_mod_wsgi_tasks(): + tasks = [] + for test, topology in product(["standalone", "embedded-mode"], ["standalone", "replica_set"]): + if test == "standalone": + task_name = "mod-wsgi-" + else: + task_name = "mod-wsgi-embedded-mode-" + task_name += topology.replace("_", "-") + server_vars = dict(TOPOLOGY=topology) + server_func = FunctionCall(func="run server", vars=server_vars) + vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0]) + test_func = FunctionCall(func="run tests", vars=vars) + tags = ["mod_wsgi"] + commands = [server_func, test_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + def _create_ocsp_task(algo, variant, server_type, base_task_name): file_name = f"{algo}-basic-tls-ocsp-{variant}.json" diff --git a/.evergreen/scripts/mod_wsgi_tester.py b/.evergreen/scripts/mod_wsgi_tester.py new file mode 100644 index 0000000000..5968849068 --- /dev/null +++ b/.evergreen/scripts/mod_wsgi_tester.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import os +import sys +import time +import urllib.error +import urllib.request +from pathlib import Path +from shutil import which + +from utils import LOGGER, ROOT, run_command, write_env + + +def make_request(url, timeout=10): + for _ in range(int(timeout)): + try: + urllib.request.urlopen(url) # noqa: S310 + return + except urllib.error.HTTPError: + pass + time.sleep(1) + raise TimeoutError(f"Failed to access {url}") + + +def setup_mod_wsgi(sub_test_name: str) -> None: + env = os.environ.copy() + if sub_test_name == "embedded": + env["MOD_WSGI_CONF"] = "mod_wsgi_test_embedded.conf" + elif sub_test_name == "standalone": + env["MOD_WSGI_CONF"] = "mod_wsgi_test.conf" + else: + raise ValueError("mod_wsgi sub test must be either 'standalone' or 'embedded'") + write_env("MOD_WSGI_CONF", env["MOD_WSGI_CONF"]) + apache = which("apache2") + if not apache and Path("/usr/lib/apache2/mpm-prefork/apache2").exists(): + apache = "/usr/lib/apache2/mpm-prefork/apache2" + if apache: + apache_config = "apache24ubuntu161404.conf" + else: + apache = which("httpd") + if not apache: + raise ValueError("Could not find apache2 or httpd") + apache_config = "apache22amazon.conf" + python_version = ".".join(str(val) for val in sys.version_info[:2]) + mod_wsgi_version = 4 + so_file = f"/opt/python/mod_wsgi/python_version/{python_version}/mod_wsgi_version/{mod_wsgi_version}/mod_wsgi.so" + write_env("MOD_WSGI_SO", so_file) + env["MOD_WSGI_SO"] = so_file + env["PYTHONHOME"] = f"/opt/python/{python_version}" + env["PROJECT_DIRECTORY"] = project_directory = str(ROOT) + write_env("APACHE_BINARY", apache) + write_env("APACHE_CONFIG", apache_config) + uri1 = f"http://localhost:8080/interpreter1{project_directory}" + write_env("TEST_URI1", uri1) + uri2 = f"http://localhost:8080/interpreter2{project_directory}" + write_env("TEST_URI2", uri2) + run_command(f"{apache} -k start -f {ROOT}/test/mod_wsgi_test/{apache_config}", env=env) + + # Wait for the endpoints to be available. + try: + make_request(uri1, 10) + make_request(uri2, 10) + except Exception as e: + LOGGER.error(Path("error_log").read_text()) + raise e + + +def test_mod_wsgi() -> None: + sys.path.insert(0, ROOT) + from test.mod_wsgi_test.test_client import main, parse_args + + uri1 = os.environ["TEST_URI1"] + uri2 = os.environ["TEST_URI2"] + args = f"-n 25000 -t 100 parallel {uri1} {uri2}" + try: + main(*parse_args(args.split())) + + args = f"-n 25000 serial {uri1} {uri2}" + main(*parse_args(args.split())) + except Exception as e: + LOGGER.error(Path("error_log").read_text()) + raise e + + +def teardown_mod_wsgi() -> None: + apache = os.environ["APACHE_BINARY"] + apache_config = os.environ["APACHE_CONFIG"] + + run_command(f"{apache} -k stop -f {ROOT}/test/mod_wsgi_test/{apache_config}") + + +if __name__ == "__main__": + setup_mod_wsgi() diff --git a/.evergreen/scripts/run-mod-wsgi-tests.sh b/.evergreen/scripts/run-mod-wsgi-tests.sh deleted file mode 100755 index f59ace8116..0000000000 --- a/.evergreen/scripts/run-mod-wsgi-tests.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -o xtrace -set -o errexit - -APACHE=$(command -v apache2 || command -v /usr/lib/apache2/mpm-prefork/apache2) || true -if [ -n "$APACHE" ]; then - APACHE_CONFIG=apache24ubuntu161404.conf -else - APACHE=$(command -v httpd) || true - if [ -z "$APACHE" ]; then - echo "Could not find apache2 binary" - exit 1 - else - APACHE_CONFIG=apache22amazon.conf - fi -fi - - -PYTHON_VERSION=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") - -# Ensure the C extensions are installed. -${PYTHON_BINARY} -m venv --system-site-packages .venv -source .venv/bin/activate -pip install -U pip -export PYMONGO_C_EXT_MUST_BUILD=1 -python -m pip install -v -e . - -export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_version/$MOD_WSGI_VERSION/mod_wsgi.so -export PYTHONHOME=/opt/python/$PYTHON_VERSION -# If MOD_WSGI_EMBEDDED is set use the default embedded mode behavior instead -# of daemon mode (WSGIDaemonProcess). -if [ -n "${MOD_WSGI_EMBEDDED:-}" ]; then - export MOD_WSGI_CONF=mod_wsgi_test_embedded.conf -else - export MOD_WSGI_CONF=mod_wsgi_test.conf -fi - -cd .. -$APACHE -k start -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG} -trap '$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}' EXIT HUP - -wget -t 1 -T 10 -O - "http://localhost:8080/interpreter1${PROJECT_DIRECTORY}" || (cat error_log && exit 1) -wget -t 1 -T 10 -O - "http://localhost:8080/interpreter2${PROJECT_DIRECTORY}" || (cat error_log && exit 1) - -python ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel \ - http://localhost:8080/interpreter1${PROJECT_DIRECTORY} http://localhost:8080/interpreter2${PROJECT_DIRECTORY} || \ - (tail -n 100 error_log && exit 1) - -python ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial \ - http://localhost:8080/interpreter1${PROJECT_DIRECTORY} http://localhost:8080/interpreter2${PROJECT_DIRECTORY} || \ - (tail -n 100 error_log && exit 1) - -rm -rf .venv diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 38fd3c67cb..13a510475f 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -100,6 +100,13 @@ def run() -> None: if TEST_PERF: start_time = datetime.now() + # Run mod_wsgi tests using the helper. + if TEST_NAME == "mod_wsgi": + from mod_wsgi_tester import test_mod_wsgi + + test_mod_wsgi() + return + # Send kms tests to run remotely. if TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: from kms_tester import test_kms_send_to_remote diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 53e3a568ba..59928271c9 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -251,6 +251,11 @@ def handle_test_env() -> None: cmd = f'bash "{DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh" start' run_command(cmd) + if test_name == "mod_wsgi": + from mod_wsgi_tester import setup_mod_wsgi + + setup_mod_wsgi(sub_test_name) + if test_name == "ocsp": if sub_test_name: os.environ["OCSP_SERVER_TYPE"] = sub_test_name @@ -381,7 +386,7 @@ def handle_test_env() -> None: # Use --capture=tee-sys so pytest prints test output inline: # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html TEST_ARGS = f"-v --capture=tee-sys --durations=5 {TEST_ARGS}" - TEST_SUITE = TEST_SUITE_MAP[test_name] + TEST_SUITE = TEST_SUITE_MAP.get(test_name) if TEST_SUITE: TEST_ARGS = f"-m {TEST_SUITE} {TEST_ARGS}" diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index 3920180422..750d2a0652 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -44,4 +44,10 @@ elif TEST_NAME == "auth_aws" and sys.platform != "darwin": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh") +# Tear down mog_wsgi if applicable. +elif TEST_NAME == "mod_wsgi": + from mod_wsgi_tester import teardown_mod_wsgi + + teardown_mod_wsgi() + LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index be17d756e3..cd55410cf6 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -50,7 +50,9 @@ class Distro: } # Tests that require a sub test suite. -SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms"] +SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi"] + +EXTRA_TESTS = ["mod_wsgi"] def get_test_options( @@ -62,7 +64,7 @@ def get_test_options( if require_sub_test_name: parser.add_argument( "test_name", - choices=sorted(TEST_SUITE_MAP), + choices=sorted(list(TEST_SUITE_MAP) + EXTRA_TESTS), nargs="?", default="default", help="The optional name of the test suite to set up, typically the same name as a pytest marker.", diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8844565d31..d2a833d874 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -275,6 +275,17 @@ Note: these tests can only be run from an Evergreen host. - Run `just setup-tests atlas_connect`. - Run `just run-tests`. +### mod_wsgi tests + +Note: these tests can only be run from an Evergreen Linux host that has the Python toolchain. + +- Run `just run-server`. +- Run `just setup-tests mod_wsgi `. +- Run `just run-tests`. + +The `mode` can be `standalone` or `embedded`. For the `replica_set` version of the tests, use +`TOPOLOGY=replica_set just run-server`. + ### OCSP tests - Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index 88eeb7a57e..c122863bfa 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -24,7 +24,7 @@ from urllib.request import urlopen -def parse_args(): +def parse_args(args=None): parser = OptionParser( """usage: %prog [options] mode url [...] @@ -70,7 +70,7 @@ def parse_args(): ) try: - options, args = parser.parse_args() + options, args = parser.parse_args(args or sys.argv[1:]) mode, urls = args[0], args[1:] except (ValueError, IndexError): parser.print_usage() @@ -103,11 +103,11 @@ def __init__(self, options, urls, nrequests_per_thread): def run(self): for _i in range(self.nrequests_per_thread): try: - get(urls) + get(self.urls) except Exception as e: print(e) - if not options.continue_: + if not self.options.continue_: thread.interrupt_main() thread.exit() @@ -117,7 +117,7 @@ def run(self): URLGetterThread.counter += 1 counter = URLGetterThread.counter - should_print = options.verbose and not counter % 1000 + should_print = self.options.verbose and not counter % 1000 if should_print: print(counter) From 166821f22c54175aa1af72ecdbd8c159537ec577 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Fri, 14 Mar 2025 21:37:11 -0400 Subject: [PATCH 1803/2111] PYTHON-5205 Replace http with https in doc links (#2204) --- doc/api/index.rst | 2 +- doc/async-tutorial.rst | 2 +- doc/changelog.rst | 10 +++---- doc/compatibility-policy.rst | 2 +- doc/conf.py | 2 +- doc/examples/aggregation.rst | 2 +- doc/examples/authentication.rst | 2 +- doc/examples/copydb.rst | 2 +- doc/examples/gevent.rst | 2 +- doc/examples/gridfs.rst | 2 +- doc/examples/high_availability.rst | 8 +++--- doc/examples/index.rst | 2 +- doc/examples/tls.rst | 2 +- doc/faq.rst | 14 ++++----- doc/index.rst | 6 ++-- doc/installation.rst | 6 ++-- doc/make.bat | 2 +- doc/tools.rst | 40 ++++++++++++-------------- doc/tutorial.rst | 2 +- pymongo/__init__.py | 10 +++---- pymongo/_asyncio_task.py | 2 +- pymongo/_azure_helpers.py | 2 +- pymongo/_client_bulk_shared.py | 2 +- pymongo/_cmessagemodule.c | 2 +- pymongo/_gcp_helpers.py | 2 +- pymongo/_version.py | 2 +- pymongo/asynchronous/aggregation.py | 2 +- pymongo/asynchronous/auth.py | 2 +- pymongo/asynchronous/auth_aws.py | 2 +- pymongo/asynchronous/auth_oidc.py | 2 +- pymongo/asynchronous/bulk.py | 2 +- pymongo/asynchronous/change_stream.py | 2 +- pymongo/asynchronous/client_bulk.py | 2 +- pymongo/asynchronous/client_session.py | 2 +- pymongo/asynchronous/collection.py | 2 +- pymongo/asynchronous/database.py | 2 +- pymongo/asynchronous/encryption.py | 2 +- pymongo/asynchronous/helpers.py | 2 +- pymongo/asynchronous/mongo_client.py | 4 +-- pymongo/asynchronous/monitor.py | 2 +- pymongo/asynchronous/network.py | 2 +- pymongo/asynchronous/pool.py | 4 +-- pymongo/asynchronous/server.py | 2 +- pymongo/asynchronous/settings.py | 2 +- pymongo/asynchronous/topology.py | 2 +- pymongo/auth.py | 2 +- pymongo/auth_oidc.py | 2 +- pymongo/auth_oidc_shared.py | 2 +- pymongo/auth_shared.py | 2 +- pymongo/bulk_shared.py | 2 +- pymongo/change_stream.py | 2 +- pymongo/client_options.py | 2 +- pymongo/client_session.py | 2 +- pymongo/collation.py | 2 +- pymongo/collection.py | 2 +- pymongo/common.py | 4 +-- pymongo/compression_support.py | 2 +- pymongo/daemon.py | 2 +- pymongo/database.py | 2 +- pymongo/database_shared.py | 2 +- pymongo/driver_info.py | 2 +- pymongo/encryption.py | 2 +- pymongo/encryption_options.py | 2 +- pymongo/errors.py | 2 +- pymongo/event_loggers.py | 2 +- pymongo/hello.py | 2 +- pymongo/helpers_shared.py | 2 +- pymongo/lock.py | 2 +- pymongo/logger.py | 2 +- pymongo/max_staleness_selectors.py | 2 +- pymongo/message.py | 2 +- pymongo/mongo_client.py | 2 +- pymongo/monitoring.py | 2 +- pymongo/network_layer.py | 2 +- pymongo/ocsp_cache.py | 2 +- pymongo/ocsp_support.py | 2 +- pymongo/operations.py | 2 +- pymongo/periodic_executor.py | 2 +- pymongo/pool.py | 2 +- pymongo/pool_options.py | 2 +- pymongo/pyopenssl_context.py | 2 +- pymongo/read_concern.py | 2 +- pymongo/read_preferences.py | 2 +- pymongo/response.py | 2 +- pymongo/results.py | 2 +- pymongo/saslprep.py | 2 +- pymongo/server_api.py | 2 +- pymongo/server_description.py | 2 +- pymongo/server_selectors.py | 2 +- pymongo/server_type.py | 2 +- pymongo/ssl_context.py | 2 +- pymongo/ssl_support.py | 2 +- pymongo/synchronous/aggregation.py | 2 +- pymongo/synchronous/auth.py | 2 +- pymongo/synchronous/auth_aws.py | 2 +- pymongo/synchronous/auth_oidc.py | 2 +- pymongo/synchronous/bulk.py | 2 +- pymongo/synchronous/change_stream.py | 2 +- pymongo/synchronous/client_bulk.py | 2 +- pymongo/synchronous/client_session.py | 2 +- pymongo/synchronous/collection.py | 2 +- pymongo/synchronous/database.py | 2 +- pymongo/synchronous/encryption.py | 2 +- pymongo/synchronous/helpers.py | 2 +- pymongo/synchronous/mongo_client.py | 4 +-- pymongo/synchronous/monitor.py | 2 +- pymongo/synchronous/network.py | 2 +- pymongo/synchronous/pool.py | 4 +-- pymongo/synchronous/server.py | 2 +- pymongo/synchronous/settings.py | 2 +- pymongo/synchronous/topology.py | 2 +- pymongo/topology_description.py | 2 +- pymongo/typings.py | 2 +- pymongo/uri_parser.py | 4 +-- pymongo/write_concern.py | 2 +- 115 files changed, 160 insertions(+), 162 deletions(-) diff --git a/doc/api/index.rst b/doc/api/index.rst index 437f2cc6a6..339f5843bf 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -3,7 +3,7 @@ API Documentation The PyMongo distribution contains three top-level packages for interacting with MongoDB. :mod:`bson` is an implementation of the -`BSON format `_, :mod:`pymongo` is a +`BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS `_ storage diff --git a/doc/async-tutorial.rst b/doc/async-tutorial.rst index 7a3a987111..1884631ec3 100644 --- a/doc/async-tutorial.rst +++ b/doc/async-tutorial.rst @@ -385,7 +385,7 @@ Indexing Adding indexes can help accelerate certain queries and can also add additional functionality to querying and storing documents. In this example, we'll demonstrate how to create a `unique index -`_ on a key that rejects +`_ on a key that rejects documents whose value for that key already exists in the index. First, we'll need to create the index: diff --git a/doc/changelog.rst b/doc/changelog.rst index 21e86953c6..a54d229075 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -3067,7 +3067,7 @@ fixes. Highlights include: :class:`~gridfs.grid_file.GridOutCursor`. - Greatly improved :doc:`support for mod_wsgi ` when using PyMongo's C extensions. Read `Jesse's blog post - `_ for details. + `_ for details. - Improved C extension support for ARM little endian. Breaking changes @@ -3322,7 +3322,7 @@ Important New Features: - Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework - `_. + `_. - Support for legacy Java and C# byte order when encoding and decoding UUIDs. - Support for connecting directly to an arbiter. @@ -3686,7 +3686,7 @@ Changes in Version 1.9 (2010/09/28) Version 1.9 adds a new package to the PyMongo distribution, :mod:`bson`. :mod:`bson` contains all of the `BSON -`_ encoding and decoding logic, and the BSON +`_ encoding and decoding logic, and the BSON types that were formerly in the :mod:`pymongo` package. The following modules have been renamed: @@ -3819,7 +3819,7 @@ Changes in Version 1.7 (2010/06/17) Version 1.7 is a recommended upgrade for all PyMongo users. The full release notes are below, and some more in depth discussion of the highlights is `here -`_. +`_. - no longer attempt to build the C extension on big-endian systems. - added :class:`~bson.min_key.MinKey` and @@ -3870,7 +3870,7 @@ The biggest change in version 1.6 is a complete re-implementation of :mod:`gridfs` with a lot of improvements over the old implementation. There are many details and examples of using the new API in `this blog post -`_. The +`_. The old API has been removed in this version, so existing code will need to be modified before upgrading to 1.6. diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst index 834f86ce54..9721877d4d 100644 --- a/doc/compatibility-policy.rst +++ b/doc/compatibility-policy.rst @@ -52,7 +52,7 @@ deprecated PyMongo features. .. seealso:: The Python documentation on `the warnings module`_, and `the -W command line option`_. -.. _semantic versioning: http://semver.org/ +.. _semantic versioning: https://semver.org/ .. _DeprecationWarning: https://docs.python.org/3/library/exceptions.html#DeprecationWarning diff --git a/doc/conf.py b/doc/conf.py index f82c719361..c3ee5d8900 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -88,7 +88,7 @@ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", - r"http://sourceforge.net/", + r"https://sourceforge.net/", ] # -- Options for extensions ---------------------------------------------------- diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst index 9b1a89fba7..e7e3df6ce1 100644 --- a/doc/examples/aggregation.rst +++ b/doc/examples/aggregation.rst @@ -87,4 +87,4 @@ you can add computed fields, create new virtual sub-objects, and extract sub-fields into the top-level of results. .. seealso:: The full documentation for MongoDB's `aggregation framework - `_ + `_ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst index a92222bafc..3f1137969d 100644 --- a/doc/examples/authentication.rst +++ b/doc/examples/authentication.rst @@ -191,7 +191,7 @@ Two extra ``authMechanismProperties`` are supported on Windows platforms: >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_REALM:otherrealm" -.. _kerberos: http://pypi.python.org/pypi/kerberos +.. _kerberos: https://pypi.python.org/pypi/kerberos .. _pykerberos: https://pypi.python.org/pypi/pykerberos .. _winkerberos: https://pypi.python.org/pypi/winkerberos/ diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst index b37677b5c2..c8026ba05f 100644 --- a/doc/examples/copydb.rst +++ b/doc/examples/copydb.rst @@ -67,7 +67,7 @@ Versions of PyMongo before 3.0 included a ``copy_database`` helper method, but it has been removed. .. _copyDatabase function in the mongo shell: - http://mongodb.com/docs/manual/reference/method/db.copyDatabase/ + https://mongodb.com/docs/manual/reference/method/db.copyDatabase/ .. _Copy a Database: https://www.mongodb.com/docs/database-tools/mongodump/mongodump-examples/#copy-and-clone-databases diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst index 0ab41c1ec6..f62697d19f 100644 --- a/doc/examples/gevent.rst +++ b/doc/examples/gevent.rst @@ -1,7 +1,7 @@ Gevent ====== -PyMongo supports `Gevent `_. Simply call Gevent's +PyMongo supports `Gevent `_. Simply call Gevent's ``monkey.patch_all()`` before loading any other modules: .. code-block:: pycon diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst index 5f40805d79..52920adbda 100644 --- a/doc/examples/gridfs.rst +++ b/doc/examples/gridfs.rst @@ -14,7 +14,7 @@ objects (e.g. files) in MongoDB. .. seealso:: The API docs for :mod:`gridfs`. .. seealso:: `This blog post - `_ + `_ for some motivation behind this API. Setup diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst index 8f94aba074..80026153f8 100644 --- a/doc/examples/high_availability.rst +++ b/doc/examples/high_availability.rst @@ -2,7 +2,7 @@ High Availability and PyMongo ============================= PyMongo makes it easy to write highly available applications whether -you use a `single replica set `_ +you use a `single replica set `_ or a `large sharded cluster `_. @@ -10,17 +10,17 @@ Connecting to a Replica Set --------------------------- PyMongo makes working with `replica sets -`_ easy. Here we'll launch a new +`_ easy. Here we'll launch a new replica set and show how to handle both initialization and normal connections with PyMongo. -.. seealso:: The MongoDB documentation on `replication `_. +.. seealso:: The MongoDB documentation on `replication `_. Starting a Replica Set ~~~~~~~~~~~~~~~~~~~~~~ The main `replica set documentation -`_ contains extensive information +`_ contains extensive information about setting up a new replica set or migrating an existing MongoDB setup, be sure to check that out. Here, we'll just do the bare minimum to get a three node replica set setup locally. diff --git a/doc/examples/index.rst b/doc/examples/index.rst index ac450470ef..57682fa1af 100644 --- a/doc/examples/index.rst +++ b/doc/examples/index.rst @@ -6,7 +6,7 @@ of how to accomplish specific tasks with MongoDB and PyMongo. Unless otherwise noted, all examples assume that a MongoDB instance is running on the default host and port. Assuming you have `downloaded -and installed `_ +and installed `_ MongoDB, you can start it like so: .. code-block:: bash diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst index 9241ac23e7..ee4d75027e 100644 --- a/doc/examples/tls.rst +++ b/doc/examples/tls.rst @@ -3,7 +3,7 @@ TLS/SSL and PyMongo PyMongo supports connecting to MongoDB over TLS/SSL. This guide covers the configuration options supported by PyMongo. See `the server documentation -`_ to configure +`_ to configure MongoDB. .. warning:: Industry best practices recommend, and some regulations require, diff --git a/doc/faq.rst b/doc/faq.rst index 73d0ec8966..7656481d89 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -53,9 +53,9 @@ a non `async-signal-safe`_ function. For examples of deadlocks or crashes that could occur see `PYTHON-3406`_. For a long but interesting read about the problems of Python locks in -multithreaded contexts with ``fork()``, see http://bugs.python.org/issue6721. +multithreaded contexts with ``fork()``, see https://bugs.python.org/issue6721. -.. _not fork-safe: http://bugs.python.org/issue6721 +.. _not fork-safe: https://bugs.python.org/issue6721 .. _OpenSSL: https://github.com/openssl/openssl/issues/19066 .. _fork(): https://man7.org/linux/man-pages/man2/fork.2.html .. _signal-safety(7): https://man7.org/linux/man-pages/man7/signal-safety.7.html @@ -174,10 +174,10 @@ Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or T PyMongo fully supports :doc:`Gevent `. To use MongoDB with `asyncio `_ -or `Tornado `_, see the +or `Tornado `_, see the `Motor `_ project. -For `Twisted `_, see `TxMongo +For `Twisted `_, see `TxMongo `_. Its stated mission is to keep feature parity with PyMongo. @@ -381,7 +381,7 @@ Can you add attribute style access for documents? ------------------------------------------------- This request has come up a number of times but we've decided not to implement anything like this. The relevant `jira case -`_ has some information +`_ has some information about the decision, but here is a brief summary: 1. This will pollute the attribute namespace for documents, so could @@ -451,7 +451,7 @@ in Flask_ (other web frameworks are similar):: How can I use PyMongo from Django? ---------------------------------- -`Django `_ is a popular Python web +`Django `_ is a popular Python web framework. Django includes an ORM, :mod:`django.db`. Currently, there's no official MongoDB backend for Django. @@ -468,7 +468,7 @@ using just MongoDB, but most of what Django provides can still be used. One project which should make working with MongoDB and Django easier -is `mango `_. Mango is a set of +is `mango `_. Mango is a set of MongoDB backends for Django sessions and authentication (bypassing :mod:`django.db` entirely). diff --git a/doc/index.rst b/doc/index.rst index 079738314a..c7616ca795 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -9,7 +9,7 @@ PyMongo |release| Documentation Overview -------- **PyMongo** is a Python distribution containing tools for working with -`MongoDB `_, and is the recommended way to +`MongoDB `_, and is the recommended way to work with MongoDB from Python. This documentation attempts to explain everything you need to know to use **PyMongo**. @@ -81,7 +81,7 @@ Issues ------ All issues should be reported (and can be tracked / voted for / commented on) at the main `MongoDB JIRA bug tracker -`_, in the "Python Driver" +`_, in the "Python Driver" project. Feature Requests / Feedback @@ -94,7 +94,7 @@ Contributing **PyMongo** has a large :doc:`community ` and contributions are always encouraged. Contributions can be as simple as minor tweaks to this documentation. To contribute, fork the project on -`GitHub `_ and send a +`GitHub `_ and send a pull request. Changes diff --git a/doc/installation.rst b/doc/installation.rst index abda06db16..837cbf4d97 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -3,7 +3,7 @@ Installing / Upgrading .. highlight:: bash **PyMongo** is in the `Python Package Index -`_. +`_. .. warning:: **Do not install the "bson" package from pypi.** PyMongo comes with its own bson package; doing "pip install bson" @@ -12,7 +12,7 @@ Installing / Upgrading Installing with pip ------------------- -We recommend using `pip `_ +We recommend using `pip `_ to install pymongo on all platforms:: $ python3 -m pip install pymongo @@ -136,7 +136,7 @@ is a workaround:: # For some Python builds from python.org $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m pip install pymongo -See `http://bugs.python.org/issue11623 `_ +See `https://bugs.python.org/issue11623 `_ for a more detailed explanation. **Lion (10.7) and newer** - PyMongo's C extensions can be built against diff --git a/doc/make.bat b/doc/make.bat index 2119f51099..aa1adb91a6 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -21,7 +21,7 @@ if errorlevel 9009 ( echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ + echo.https://sphinx-doc.org/ exit /b 1 ) diff --git a/doc/tools.rst b/doc/tools.rst index 7ec3ddb443..a3f167d024 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -31,7 +31,7 @@ MongoEngine layer on top of PyMongo. It allows you to define schemas for documents and query collections using syntax inspired by the Django ORM. The code is available on `GitHub - `_; for more information, see + `_; for more information, see the `tutorial `_. MincePy @@ -47,17 +47,15 @@ Ming `Ming `_ is a library that allows you to enforce schemas on a MongoDB database in your Python application. It was developed by `SourceForge - `_ in the course of their migration to - MongoDB. See the `introductory blog post - `_ - for more details. + `_ in the course of their migration to + MongoDB. MotorEngine `MotorEngine `_ is a port of MongoEngine to Motor, for asynchronous access with Tornado. It implements the same modeling APIs to be data-portable, meaning that a model defined in MongoEngine can be read in MotorEngine. The source is - `available on GitHub `_. + `available on GitHub `_. uMongo `uMongo `_ is a Python MongoDB ODM. @@ -89,12 +87,12 @@ PyMODM `_. MongoKit - The `MongoKit `_ framework + The `MongoKit `_ framework is an ORM-like layer on top of PyMongo. There is also a MongoKit - `google group `_. + `google group `_. Minimongo - `minimongo `_ is a lightweight, + `minimongo `_ is a lightweight, pythonic interface to MongoDB. It retains pymongo's query and update API, and provides a number of additional features, including a simple document-oriented interface, connection pooling, index management, and @@ -102,15 +100,15 @@ Minimongo `_. Manga - `Manga `_ aims to be a simpler ORM-like + `Manga `_ aims to be a simpler ORM-like layer on top of PyMongo. The syntax for defining schema is inspired by the Django ORM, but Pymongo's query language is maintained. The source `is on - GitHub `_. + GitHub `_. Humongolus `Humongolus `_ is a lightweight ORM framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the + MongoDB and `Homunculus `_ (the concept of a miniature though fully formed human body). Humongolus allows you to create models/schemas with robust validation. It attempts to be as pythonic as possible and exposes the pymongo cursor objects whenever @@ -133,30 +131,30 @@ various Python frameworks and libraries. database backend for Django that completely integrates with its ORM. For more information `see the tutorial `_. -* `mango `_ provides MongoDB backends for +* `mango `_ provides MongoDB backends for Django sessions and authentication (bypassing :mod:`django.db` entirely). * `Django MongoEngine `_ is a MongoDB backend for Django, an `example: `_. For more information see ``_ -* `mongodb_beaker `_ is a +* `mongodb_beaker `_ is a project to enable using MongoDB as a backend for `beakers `_ caching / session system. - `The source is on GitHub `_. + `The source is on GitHub `_. * `Log4Mongo `_ is a flexible Python logging handler that can store logs in MongoDB using normal and capped collections. -* `MongoLog `_ is a Python logging +* `MongoLog `_ is a Python logging handler that stores logs in MongoDB using a capped collection. -* `rod.recipe.mongodb `_ is a +* `rod.recipe.mongodb `_ is a ZC Buildout recipe for downloading and installing MongoDB. -* `mongobox `_ is a tool to run a sandboxed +* `mongobox `_ is a tool to run a sandboxed MongoDB instance from within a python app. -* `Flask-MongoAlchemy `_ Add +* `Flask-MongoAlchemy `_ Add Flask support for MongoDB using MongoAlchemy. -* `Flask-MongoKit `_ Flask extension +* `Flask-MongoKit `_ Flask extension to better integrate MongoKit into Flask. -* `Flask-PyMongo `_ Flask-PyMongo +* `Flask-PyMongo `_ Flask-PyMongo bridges Flask and PyMongo. Alternative Drivers diff --git a/doc/tutorial.rst b/doc/tutorial.rst index e33936363d..46bde3035d 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -375,7 +375,7 @@ Indexing Adding indexes can help accelerate certain queries and can also add additional functionality to querying and storing documents. In this example, we'll demonstrate how to create a `unique index -`_ on a key that rejects +`_ on a key that rejects documents whose value for that key already exists in the index. First, we'll need to create the index: diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 8d6def1606..e392508912 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -55,7 +55,7 @@ GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`_. -.. _geospatial index: http://mongodb.com/docs/manual/core/2d/ +.. _geospatial index: https://mongodb.com/docs/manual/core/2d/ """ GEOSPHERE = "2dsphere" @@ -63,7 +63,7 @@ .. versionadded:: 2.5 -.. _spherical geospatial index: http://mongodb.com/docs/manual/core/2dsphere/ +.. _spherical geospatial index: https://mongodb.com/docs/manual/core/2dsphere/ """ HASHED = "hashed" @@ -71,7 +71,7 @@ .. versionadded:: 2.5 -.. _hashed index: http://mongodb.com/docs/manual/core/index-hashed/ +.. _hashed index: https://mongodb.com/docs/manual/core/index-hashed/ """ TEXT = "text" @@ -83,7 +83,7 @@ .. versionadded:: 2.7.1 -.. _text index: http://mongodb.com/docs/manual/core/index-text/ +.. _text index: https://mongodb.com/docs/manual/core/index-text/ """ from pymongo import _csot diff --git a/pymongo/_asyncio_task.py b/pymongo/_asyncio_task.py index 8e457763d9..7a528f027d 100644 --- a/pymongo/_asyncio_task.py +++ b/pymongo/_asyncio_task.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/_azure_helpers.py b/pymongo/_azure_helpers.py index 6e86ab5670..8a7af0b407 100644 --- a/pymongo/_azure_helpers.py +++ b/pymongo/_azure_helpers.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/_client_bulk_shared.py b/pymongo/_client_bulk_shared.py index 649f1c6aa0..5814025566 100644 --- a/pymongo/_client_bulk_shared.py +++ b/pymongo/_client_bulk_shared.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index eb457b341c..a506863737 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/_gcp_helpers.py b/pymongo/_gcp_helpers.py index d90f3cc217..7979d1e807 100644 --- a/pymongo/_gcp_helpers.py +++ b/pymongo/_gcp_helpers.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/_version.py b/pymongo/_version.py index f7a1f3dcb3..985acfd81b 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/aggregation.py b/pymongo/asynchronous/aggregation.py index 7684151897..daccd1bcb0 100644 --- a/pymongo/asynchronous/aggregation.py +++ b/pymongo/asynchronous/aggregation.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py index 8cc4edf19c..c1321f1d90 100644 --- a/pymongo/asynchronous/auth.py +++ b/pymongo/asynchronous/auth.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/auth_aws.py b/pymongo/asynchronous/auth_aws.py index 9dcc625d19..210d306046 100644 --- a/pymongo/asynchronous/auth_aws.py +++ b/pymongo/asynchronous/auth_aws.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py index 38346648c5..217c8104a2 100644 --- a/pymongo/asynchronous/auth_oidc.py +++ b/pymongo/asynchronous/auth_oidc.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index 6770d7b34e..1ea6fd60d9 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/change_stream.py b/pymongo/asynchronous/change_stream.py index f405e91161..6c37f9d05f 100644 --- a/pymongo/asynchronous/change_stream.py +++ b/pymongo/asynchronous/change_stream.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 45824256da..1100527552 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index e9548b0ec4..98dd6a4706 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index e83a391439..aef3539e8c 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index 4aba9ab0e9..d0089eb4ee 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index ef8d817b2c..b18ed53f92 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index d519e8749c..88b710345b 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index eefafd5fda..ecd57a1886 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -192,7 +192,7 @@ def __init__( execute. The `host` parameter can be a full `mongodb URI - `_, in addition to + `_, in addition to a simple hostname. It can also be a list of hostnames but no more than one URI. Any port specified in the host string(s) will override the `port` parameter. For username and diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index d7f87b718a..5cb42f4d46 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index c7a5580eca..e529a52ee9 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 698558aa5d..d06c528e78 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -964,7 +964,7 @@ class PoolState: # Do *not* explicitly inherit from object or Jython won't call __del__ -# http://bugs.jython.org/issue1057 +# https://bugs.jython.org/issue1057 class Pool: def __init__( self, diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index 72f22584e2..3ad8374b00 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/settings.py b/pymongo/asynchronous/settings.py index 1103e1bd18..62be853fba 100644 --- a/pymongo/asynchronous/settings.py +++ b/pymongo/asynchronous/settings.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index f00f62ffe5..d83ceca55b 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/auth.py b/pymongo/auth.py index a65113841d..a36f3f4233 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py index 4ac266de5f..61764b8111 100644 --- a/pymongo/auth_oidc.py +++ b/pymongo/auth_oidc.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/auth_oidc_shared.py b/pymongo/auth_oidc_shared.py index 9e0acaf6c8..d33397f52d 100644 --- a/pymongo/auth_oidc_shared.py +++ b/pymongo/auth_oidc_shared.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py index 410521d73a..5a9a2b6732 100644 --- a/pymongo/auth_shared.py +++ b/pymongo/auth_shared.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/bulk_shared.py b/pymongo/bulk_shared.py index 7aa6340d55..9276419d8a 100644 --- a/pymongo/bulk_shared.py +++ b/pymongo/bulk_shared.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index b96a1750cf..f9abddec44 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 9b9b88a736..a66e87c9f6 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 1a3af44e12..db72b0b2e1 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/collation.py b/pymongo/collation.py index fc84b937f2..8a1eca7aff 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/collection.py b/pymongo/collection.py index f726ed0376..16063425a7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/common.py b/pymongo/common.py index 4be7a3122a..3d8095eedf 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -160,7 +160,7 @@ def clean_node(node: str) -> tuple[str, int]: host, port = partition_node(node) # Normalize hostname to lowercase, since DNS is case-insensitive: - # http://tools.ietf.org/html/rfc4343 + # https://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the hello response. return host.lower(), port diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 7486451730..db14b8d83f 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/daemon.py b/pymongo/daemon.py index b40384df13..be976decd9 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/database.py b/pymongo/database.py index bbd05702dc..f85b312f91 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/database_shared.py b/pymongo/database_shared.py index 2d4e37feef..d6563a4b3d 100644 --- a/pymongo/database_shared.py +++ b/pymongo/database_shared.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 724a6f20d5..f24321d973 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 5bc2a75909..71c1d4b723 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index a1c40dc7b2..02fcc98e46 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/errors.py b/pymongo/errors.py index 2cd1081e3b..794b5a9398 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 86b53c6376..80acaa10c0 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/hello.py b/pymongo/hello.py index c30b825e19..1eb40ed929 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/helpers_shared.py b/pymongo/helpers_shared.py index c6b820c1c2..a664e87a69 100644 --- a/pymongo/helpers_shared.py +++ b/pymongo/helpers_shared.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/lock.py b/pymongo/lock.py index 6bf7138017..ad990fce3f 100644 --- a/pymongo/lock.py +++ b/pymongo/lock.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/logger.py b/pymongo/logger.py index 9079dc3f3d..1b3fe43b86 100644 --- a/pymongo/logger.py +++ b/pymongo/logger.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 89bfa65281..5f1e404720 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/message.py b/pymongo/message.py index 8e2fd6f990..d51c77a174 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index a815cbc8a9..778abe27ef 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 38d6e3a22a..101a8fbc37 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 11c66bf16e..4512aba59f 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 3facefe350..2df232848f 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index ee359b71c2..8322f821fb 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/operations.py b/pymongo/operations.py index 482ab68003..300f1ba123 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 5f54b243ec..323debdce2 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/pool.py b/pymongo/pool.py index fbbb70fc68..456ff3df0a 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index 038dbb3b5d..a2e309cc56 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 8c643394b2..0cc35c4f66 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index 17f3a46edb..2adc403366 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 8c6e6de45d..581f7ca66f 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/response.py b/pymongo/response.py index e47749423f..211ddf2354 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/results.py b/pymongo/results.py index d17ff1c3ea..bcce121fe7 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index 7fb546f61b..9cef22419e 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/server_api.py b/pymongo/server_api.py index 4a746008c4..40bb1aac3e 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 064ad43375..afc5346bb7 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index c22ad599ee..0d1425ab31 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/server_type.py b/pymongo/server_type.py index 937855cc7a..7a6d2aaf14 100644 --- a/pymongo/server_type.py +++ b/pymongo/server_type.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index ee32145c02..2ff7428cab 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 0faf21ba8f..2e6a509e3e 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/aggregation.py b/pymongo/synchronous/aggregation.py index 7c7e6252f7..3eb0c8bf54 100644 --- a/pymongo/synchronous/aggregation.py +++ b/pymongo/synchronous/aggregation.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py index 6041ebdbe3..650e25234d 100644 --- a/pymongo/synchronous/auth.py +++ b/pymongo/synchronous/auth.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/auth_aws.py b/pymongo/synchronous/auth_aws.py index 7c0d24f3a1..c7ea47886f 100644 --- a/pymongo/synchronous/auth_aws.py +++ b/pymongo/synchronous/auth_aws.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py index c5efdd5fcc..8a8703c142 100644 --- a/pymongo/synchronous/auth_oidc.py +++ b/pymongo/synchronous/auth_oidc.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index 0b709f1acf..f54dcdd42d 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py index 43aab39ee1..304427b89b 100644 --- a/pymongo/synchronous/change_stream.py +++ b/pymongo/synchronous/change_stream.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 9f6e3f7cf0..e6de22d237 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index af7ff59b3d..60c15a9ec0 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index b956ac58a5..fe869a622d 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 0dc03cb746..a11674b9aa 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index a97534ed41..724131fa93 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index f800e7dcc8..bc69a49e80 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index b101636066..79b6cf6ed9 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -187,7 +187,7 @@ def __init__( execute. The `host` parameter can be a full `mongodb URI - `_, in addition to + `_, in addition to a simple hostname. It can also be a list of hostnames but no more than one URI. Any port specified in the host string(s) will override the `port` parameter. For username and diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index c39a57c392..5b45ed9a4d 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py index 543b069bfc..0e53e806b0 100644 --- a/pymongo/synchronous/network.py +++ b/pymongo/synchronous/network.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index e575710ff5..cd78e26fea 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -960,7 +960,7 @@ class PoolState: # Do *not* explicitly inherit from object or Jython won't call __del__ -# http://bugs.jython.org/issue1057 +# https://bugs.jython.org/issue1057 class Pool: def __init__( self, diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py index ed48cc6cc8..5b8a8e3919 100644 --- a/pymongo/synchronous/server.py +++ b/pymongo/synchronous/server.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/settings.py b/pymongo/synchronous/settings.py index 040776713f..bb17de1874 100644 --- a/pymongo/synchronous/settings.py +++ b/pymongo/synchronous/settings.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 0af793a969..bf9011830d 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 742bbf8c6e..29293b2314 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/typings.py b/pymongo/typings.py index 68962eb540..ce6f369d1f 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index 8f56ae4093..ee7ca9c205 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -164,7 +164,7 @@ def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Addr port = int(port) # Normalize hostname to lowercase, since DNS is case-insensitive: - # http://tools.ietf.org/html/rfc4343 + # https://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the hello response. return host.lower(), port diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index 21faeebed0..ff31c6730d 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, From 4a9e90a24b24c9b17f56ea896eba0e5e7fc101d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 06:25:51 -0500 Subject: [PATCH 1804/2111] Bump extractions/setup-just from 2 to 3 in the actions group (#2207) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test-python.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index c8ecb80091..3c3eef989e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -23,7 +23,7 @@ jobs: with: persist-credentials: false - name: Install just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 - name: Install uv uses: astral-sh/setup-uv@v5 with: @@ -63,7 +63,7 @@ jobs: with: persist-credentials: false - name: Install just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 - name: Install uv uses: astral-sh/setup-uv@v5 with: @@ -86,7 +86,7 @@ jobs: with: persist-credentials: false - name: Install just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 - name: Install uv uses: astral-sh/setup-uv@v5 with: @@ -114,7 +114,7 @@ jobs: enable-cache: true python-version: "3.9" - name: Install just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 - name: Install dependencies run: just install - name: Build docs @@ -133,7 +133,7 @@ jobs: enable-cache: true python-version: "3.9" - name: Install just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 - name: Install dependencies run: just install - name: Build docs @@ -155,7 +155,7 @@ jobs: enable-cache: true python-version: "${{matrix.python}}" - name: Install just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 - name: Install dependencies run: | just install From 1d866b3cb4ed6de0444f5126f370905d45ef1d43 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 17 Mar 2025 18:27:43 -0500 Subject: [PATCH 1805/2111] PYTHON-5216 & PYTHON-5185 Convert perf tests to use new scripts and use specific python version (#2206) --- .evergreen/config.yml | 101 ---------------------- .evergreen/generated_configs/tasks.yml | 54 ++++++++++++ .evergreen/generated_configs/variants.yml | 9 ++ .evergreen/run-perf-tests.sh | 19 ---- .evergreen/run-tests.sh | 5 +- .evergreen/scripts/generate_config.py | 28 ++++++ .evergreen/scripts/run-perf-tests.sh | 4 - .evergreen/scripts/run_tests.py | 5 -- .evergreen/scripts/setup_tests.py | 16 ++++ .evergreen/scripts/teardown_tests.py | 13 ++- .evergreen/scripts/utils.py | 2 +- .gitignore | 2 + CONTRIBUTING.md | 6 ++ 13 files changed, 131 insertions(+), 133 deletions(-) delete mode 100755 .evergreen/run-perf-tests.sh delete mode 100755 .evergreen/scripts/run-perf-tests.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c54c688e46..ee29d65f2b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -304,17 +304,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh - "run perf tests": - - command: subprocess.exec - type: test - params: - working_dir: "src" - binary: bash - include_expansions_in_env: [SUB_TEST_NAME] - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-perf-tests.sh - "attach benchmark test results": - command: attach.results params: @@ -461,84 +450,6 @@ tasks: commands: - func: "download and merge coverage" - - name: "perf-6.0-standalone" - tags: ["perf"] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "sync" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone-ssl" - tags: ["perf"] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - SSL: "ssl" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "sync" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-8.0-standalone" - tags: ["perf"] - commands: - - func: "run server" - vars: - VERSION: "8.0" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "sync" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone-async" - tags: [ "perf" ] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - TOPOLOGY: "server" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "async" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone-ssl-async" - tags: [ "perf" ] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - TOPOLOGY: "server" - SSL: "ssl" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "async" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-8.0-standalone-async" - tags: [ "perf" ] - commands: - - func: "run server" - vars: - VERSION: "8.0" - TOPOLOGY: "server" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "async" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "check-import-time" tags: ["pr"] commands: @@ -610,15 +521,3 @@ buildvariants: - rhel8.7-small tasks: - name: "backport-pr" - -- name: "perf-tests" - display_name: "Performance Benchmarks" - batchtime: 10080 # 7 days - run_on: rhel90-dbx-perf-large - tasks: - - name: "perf-6.0-standalone" - - name: "perf-6.0-standalone-ssl" - - name: "perf-8.0-standalone" - - name: "perf-6.0-standalone-async" - - name: "perf-6.0-standalone-ssl-async" - - name: "perf-8.0-standalone-async" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 070b163e90..5b5cf92d68 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1151,6 +1151,60 @@ tasks: SUB_TEST_NAME: gke tags: [auth_oidc, auth_oidc_remote] + # Perf tests + - name: perf-8.0-standalone-ssl + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-ssl-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index d70afa2bdd..864b061a15 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -928,6 +928,15 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 + # Perf tests + - name: performance-benchmarks + tasks: + - name: .perf + display_name: Performance Benchmarks + run_on: + - rhel90-dbx-perf-large + batchtime: 10080 + # Pyopenssl tests - name: pyopenssl-macos-python3.9 tasks: diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh deleted file mode 100755 index cf88b93710..0000000000 --- a/.evergreen/run-perf-tests.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -git clone --depth 1 https://github.com/mongodb/specifications.git -pushd specifications/source/benchmarking/data -tar xf extended_bson.tgz -tar xf parallel.tgz -tar xf single_and_multi_document.tgz -popd - -export TEST_PATH="${PROJECT_DIRECTORY}/specifications/source/benchmarking/data" -export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" - -export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 - -bash ./.evergreen/just.sh setup-tests perf "${SUB_TEST_NAME}" -bash ./.evergreen/just.sh run-tests diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index f9a853f27c..40336c6d2d 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -31,9 +31,10 @@ if [ -f "./secrets-export.sh" ]; then fi # List the packages. -PIP_QUIET=0 uv run ${UV_ARGS} --with pip pip list +uv sync ${UV_ARGS} --reinstall +uv pip list # Start the test runner. -uv run ${UV_ARGS} .evergreen/scripts/run_tests.py "$@" +uv run .evergreen/scripts/run_tests.py "$@" popd diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b90a6af437..b9f9377066 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -69,6 +69,7 @@ class Host: HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) +HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) DEFAULT_HOST = HOSTS["rhel8"] # Other hosts @@ -722,6 +723,13 @@ def create_atlas_connect_variants(): ] +def create_perf_variants(): + host = HOSTS["perf"] + return [ + create_variant([".perf"], "Performance Benchmarks", host=host, batchtime=BATCHTIME_WEEK) + ] + + def create_aws_auth_variants(): variants = [] @@ -942,6 +950,26 @@ def create_enterprise_auth_tasks(): return [EvgTask(name=task_name, tags=tags, commands=[server_func, assume_func, test_func])] +def create_perf_tasks(): + tasks = [] + for version, ssl, sync in product(["8.0"], ["ssl", "nossl"], ["sync", "async"]): + vars = dict(VERSION=f"v{version}-perf", SSL=ssl) + server_func = FunctionCall(func="run server", vars=vars) + vars = dict(TEST_NAME="perf", SUB_TEST_NAME=sync) + test_func = FunctionCall(func="run tests", vars=vars) + attach_func = FunctionCall(func="attach benchmark test results") + send_func = FunctionCall(func="send dashboard data") + task_name = f"perf-{version}-standalone" + if ssl == "ssl": + task_name += "-ssl" + if sync == "async": + task_name += "-async" + tags = ["perf"] + commands = [server_func, test_func, attach_func, send_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + def create_ocsp_tasks(): tasks = [] tests = [ diff --git a/.evergreen/scripts/run-perf-tests.sh b/.evergreen/scripts/run-perf-tests.sh deleted file mode 100755 index e1c1311d67..0000000000 --- a/.evergreen/scripts/run-perf-tests.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -PROJECT_DIRECTORY=${PROJECT_DIRECTORY} -SUB_TEST_NAME=${SUB_TEST_NAME} bash "${PROJECT_DIRECTORY}"/.evergreen/run-perf-tests.sh diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 13a510475f..2e23a366b1 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -4,7 +4,6 @@ import logging import os import platform -import shutil import sys from datetime import datetime @@ -142,10 +141,6 @@ def run() -> None: if TEST_PERF: handle_perf(start_time) - # Handle coverage post actions. - if os.environ.get("COVERAGE"): - shutil.rmtree(".pytest_cache", ignore_errors=True) - if __name__ == "__main__": run() diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 59928271c9..6d7c8037c7 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -43,6 +43,9 @@ # Map the test name to test group. GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") +# The python version used for perf tests. +PERF_PYTHON_VERSION = "3.9.13" + def is_set(var: str) -> bool: value = os.environ.get(var, "") @@ -362,6 +365,19 @@ def handle_test_env() -> None: write_env("DISABLE_CONTEXT") if test_name == "perf": + data_dir = ROOT / "specifications/source/benchmarking/data" + if not data_dir.exists(): + run_command("git clone --depth 1 https://github.com/mongodb/specifications.git") + run_command("tar xf extended_bson.tgz", cwd=data_dir) + run_command("tar xf parallel.tgz", cwd=data_dir) + run_command("tar xf single_and_multi_document.tgz", cwd=data_dir) + write_env("TEST_PATH", str(data_dir)) + write_env("OUTPUT_FILE", str(ROOT / "results.json")) + # Overwrite the UV_PYTHON from the env.sh file. + write_env("UV_PYTHON", "") + + UV_ARGS.append(f"--python={PERF_PYTHON_VERSION}") + # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively # affects the benchmark results. if sub_test_name == "sync": diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index 750d2a0652..b081478423 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -1,9 +1,11 @@ from __future__ import annotations import os +import shutil import sys +from pathlib import Path -from utils import DRIVERS_TOOLS, LOGGER, run_command +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command TEST_NAME = os.environ.get("TEST_NAME", "unconfigured") SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") @@ -44,10 +46,19 @@ elif TEST_NAME == "auth_aws" and sys.platform != "darwin": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh") +# Tear down perf if applicable. +elif TEST_NAME == "perf": + shutil.rmtree(ROOT / "specifications", ignore_errors=True) + Path(os.environ["OUTPUT_FILE"]).unlink(missing_ok=True) + # Tear down mog_wsgi if applicable. elif TEST_NAME == "mod_wsgi": from mod_wsgi_tester import teardown_mod_wsgi teardown_mod_wsgi() +# Tear down coverage if applicable. +if os.environ.get("COVERAGE"): + shutil.rmtree(".pytest_cache", ignore_errors=True) + LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index cd55410cf6..535e392ea2 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -50,7 +50,7 @@ class Distro: } # Tests that require a sub test suite. -SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi"] +SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] EXTRA_TESTS = ["mod_wsgi"] diff --git a/.gitignore b/.gitignore index 8c095c2157..966059e693 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,8 @@ expansion.yml *expansions.yml .evergreen/scripts/env.sh .evergreen/scripts/test-env.sh +specifications/ +results.json # Lambda temp files test/lambda/.aws-sam diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d2a833d874..47eb01dbf0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -297,6 +297,12 @@ The `mode` can be `standalone` or `embedded`. For the `replica_set` version of If you are running one of the `no-responder` tests, omit the `run-server` step. +### Perf Tests + +- Start the appropriate server, e.g. `just run-server --version=v8.0-perf --ssl`. +- Set up the tests with `sync` or `async`: `just setup-tests perf sync`. +- Run the tests: `just run-tests`. + ## Enable Debug Logs - Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. - Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine. From dc44b4912849e7e5c72ba0bd85d31957d1fff1dd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 18 Mar 2025 20:45:47 -0500 Subject: [PATCH 1806/2111] PYTHON-5217 Update Atlas Data Lake tests (#2209) --- .evergreen/config.yml | 21 -------------- .evergreen/generated_configs/tasks.yml | 15 ++++++++++ .evergreen/generated_configs/variants.yml | 34 ++++------------------- .evergreen/scripts/generate_config.py | 26 +++++++++++------ .evergreen/scripts/setup_tests.py | 6 ++++ .evergreen/scripts/teardown_tests.py | 4 +++ CONTRIBUTING.md | 21 +++++++++----- 7 files changed, 62 insertions(+), 65 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ee29d65f2b..15bdac315d 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -213,20 +213,6 @@ functions: params: file: ${DRIVERS_TOOLS}/mo-expansion.yml - "bootstrap data lake": - - command: subprocess.exec - type: setup - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh - - command: subprocess.exec - type: setup - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh - "run doctests": - command: subprocess.exec type: test @@ -404,13 +390,6 @@ tasks: TOPOLOGY: "replica_set" - func: "run tests" - - name: atlas-data-lake-tests - commands: - - func: "bootstrap data lake" - - func: "run tests" - vars: - TEST_NAME: "data_lake" - - name: "test-aws-lambda-deployed" commands: - command: ec2.assume_role diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 5b5cf92d68..7d03822a6f 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -8,6 +8,21 @@ tasks: TEST_NAME: atlas_connect tags: [atlas_connect] + # Atlas data lake tests + - name: test-atlas-data-lake-with_ext + commands: + - func: run tests + vars: + TEST_NAME: data_lake + NO_EXT: "1" + tags: [atlas_data_lake] + - name: test-atlas-data-lake-without_ext + commands: + - func: run tests + vars: + TEST_NAME: data_lake + tags: [atlas_data_lake] + # Aws tests - name: test-auth-aws-4.4-regular commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 864b061a15..78f181fc64 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -65,43 +65,21 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Atlas data lake tests - - name: atlas-data-lake-ubuntu-22-python3.9-auth-no-c + - name: atlas-data-lake-ubuntu-22-python3.9 tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 Python3.9 Auth No C + - name: .atlas_data_lake + display_name: Atlas Data Lake Ubuntu-22 Python3.9 run_on: - ubuntu2204-small expansions: - AUTH: auth - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-ubuntu-22-python3.9-auth - tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 Python3.9 Auth - run_on: - - ubuntu2204-small - expansions: - AUTH: auth PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-ubuntu-22-python3.13-auth-no-c + - name: atlas-data-lake-ubuntu-22-python3.13 tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 Python3.13 Auth No C + - name: .atlas_data_lake + display_name: Atlas Data Lake Ubuntu-22 Python3.13 run_on: - ubuntu2204-small expansions: - AUTH: auth - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: atlas-data-lake-ubuntu-22-python3.13-auth - tasks: - - name: atlas-data-lake-tests - display_name: Atlas Data Lake Ubuntu-22 Python3.13 Auth - run_on: - - ubuntu2204-small - expansions: - AUTH: auth PYTHON_BINARY: /opt/python/3.13/bin/python3 # Aws auth tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b9f9377066..ebb4b5d31c 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -213,7 +213,7 @@ def zip_cycle(*iterables, empty_default=None): yield tuple(next(i, empty_default) for i in cycles) -def handle_c_ext(c_ext, expansions): +def handle_c_ext(c_ext, expansions) -> None: """Handle c extension option.""" if c_ext == C_EXTS[0]: expansions["NO_EXT"] = "1" @@ -600,14 +600,10 @@ def create_no_c_ext_variants(): def create_atlas_data_lake_variants(): variants = [] host = HOSTS["ubuntu22"] - for python, c_ext in product(MIN_MAX_PYTHON, C_EXTS): - tasks = ["atlas-data-lake-tests"] - expansions = dict(AUTH="auth") - handle_c_ext(c_ext, expansions) - display_name = get_display_name("Atlas Data Lake", host, python=python, **expansions) - variant = create_variant( - tasks, display_name, host=host, python=python, expansions=expansions - ) + for python in MIN_MAX_PYTHON: + tasks = [".atlas_data_lake"] + display_name = get_display_name("Atlas Data Lake", host, python=python) + variant = create_variant(tasks, display_name, host=host, python=python) variants.append(variant) return variants @@ -970,6 +966,18 @@ def create_perf_tasks(): return tasks +def create_atlas_data_lake_tasks(): + tags = ["atlas_data_lake"] + tasks = [] + for c_ext in C_EXTS: + vars = dict(TEST_NAME="data_lake") + handle_c_ext(c_ext, vars) + test_func = FunctionCall(func="run tests", vars=vars) + task_name = f"test-atlas-data-lake-{c_ext}" + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + return tasks + + def create_ocsp_tasks(): tasks = [] tests = [ diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 6d7c8037c7..56011341a3 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -175,6 +175,12 @@ def handle_test_env() -> None: if not config: AUTH = "noauth" + if test_name == "data_lake": + # Stop any running mongo-orchestration which might be using the port. + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh") + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/atlas_data_lake/setup.sh") + AUTH = "auth" + if AUTH != "noauth": if test_name == "data_lake": config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh") diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index b081478423..d27cb8682e 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -57,6 +57,10 @@ teardown_mod_wsgi() +# Tear down data_lake if applicable. +elif TEST_NAME == "data_lake": + run_command(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/teardown.sh") + # Tear down coverage if applicable. if os.environ.get("COVERAGE"): shutil.rmtree(".pytest_cache", ignore_errors=True) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 47eb01dbf0..e466c6847e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -286,16 +286,23 @@ Note: these tests can only be run from an Evergreen Linux host that has the Pyth The `mode` can be `standalone` or `embedded`. For the `replica_set` version of the tests, use `TOPOLOGY=replica_set just run-server`. +### Atlas Data Lake tests. + +You must have `docker` or `podman` installed locally. + +- Run `just setup-tests data_lake`. +- Run `just run-tests`. + ### OCSP tests - - Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. - This corresponds to a config file in `$DRIVERS_TOOLS/.evergreen/orchestration/configs/servers`. - MongoDB servers on MacOS and Windows do not staple OCSP responses and only support RSA. - - Run `just run-server ocsp`. - - Run `just setup-tests ocsp ` (options are "valid", "revoked", "valid-delegate", "revoked-delegate"). - - Run `just run-tests` +- Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. +This corresponds to a config file in `$DRIVERS_TOOLS/.evergreen/orchestration/configs/servers`. +MongoDB servers on MacOS and Windows do not staple OCSP responses and only support RSA. +- Run `just run-server ocsp`. +- Run `just setup-tests ocsp ` (options are "valid", "revoked", "valid-delegate", "revoked-delegate"). +- Run `just run-tests` - If you are running one of the `no-responder` tests, omit the `run-server` step. +If you are running one of the `no-responder` tests, omit the `run-server` step. ### Perf Tests From 134f52ff23947c801b2a5b77bb60c2bea7e1b373 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 19 Mar 2025 15:58:12 -0500 Subject: [PATCH 1807/2111] PYTHON-5213 Convert AWS Lambda and Search Index tests to use new test scripts (#2205) --- .evergreen/config.yml | 80 +-------------------- .evergreen/generated_configs/tasks.yml | 21 ++++++ .evergreen/generated_configs/variants.yml | 10 ++- .evergreen/run-deployed-lambda-aws-tests.sh | 10 --- .evergreen/scripts/generate_config.py | 28 +++++++- .evergreen/scripts/run_server.py | 20 +++--- .evergreen/scripts/run_tests.py | 44 ++++++++++++ .evergreen/scripts/setup_tests.py | 24 ++++++- .evergreen/scripts/teardown_tests.py | 4 ++ .evergreen/scripts/utils.py | 7 +- .gitignore | 3 +- CONTRIBUTING.md | 14 ++++ pyproject.toml | 2 +- test/asynchronous/test_index_management.py | 2 +- test/lambda/build.sh | 28 -------- test/test_index_management.py | 2 +- 16 files changed, 164 insertions(+), 135 deletions(-) delete mode 100755 .evergreen/run-deployed-lambda-aws-tests.sh delete mode 100755 test/lambda/build.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 15bdac315d..6b2b332a5c 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -268,27 +268,7 @@ functions: - command: ec2.assume_role params: role_arn: ${aws_test_secrets_role} - - "setup atlas": - - command: subprocess.exec - params: - binary: bash - include_expansions_in_env: ["task_id", "execution"] - env: - MONGODB_VERSION: "7.0" - LAMBDA_STACK_NAME: dbx-python-lambda - args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh - - command: expansions.update - params: - file: atlas-expansion.yml - - "teardown atlas": - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + duration_seconds: 3600 "attach benchmark test results": - command: attach.results @@ -314,31 +294,6 @@ post: - func: "upload test results" - func: "cleanup" -task_groups: - - name: test_aws_lambda_task_group - setup_group: - - func: fetch source - - func: setup system - - func: setup atlas - teardown_task: - - func: teardown atlas - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - test-aws-lambda-deployed - - - name: test_atlas_task_group_search_indexes - setup_group: - - func: fetch source - - func: setup system - - func: setup atlas - teardown_task: - - func: teardown atlas - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 - tasks: - - test-search-index-helpers - tasks: # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants @@ -365,17 +320,6 @@ tasks: - func: "run server" - func: "run doctests" - - name: "test-search-index-helpers" - commands: - - func: "run server" - vars: - VERSION: "6.0" - TOPOLOGY: "replica_set" - - func: "run tests" - vars: - TEST_NAME: index_management - AUTH: "auth" - - name: "no-server" tags: ["no-server"] commands: @@ -390,22 +334,6 @@ tasks: TOPOLOGY: "replica_set" - func: "run tests" - - name: "test-aws-lambda-deployed" - commands: - - command: ec2.assume_role - params: - role_arn: ${LAMBDA_AWS_ROLE_ARN} - duration_seconds: 3600 - - command: subprocess.exec - params: - working_dir: src - binary: bash - add_expansions_to_env: true - args: - - .evergreen/run-deployed-lambda-aws-tests.sh - env: - TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/test/lambda - # }}} - name: "coverage-report" tags: ["coverage"] @@ -482,12 +410,6 @@ buildvariants: batchtime: 10080 # 7 days - name: test-azurekms-fail -- name: rhel8-test-lambda - display_name: FaaS Lambda - run_on: rhel87-small - tasks: - - name: test_aws_lambda_task_group - - name: rhel8-import-time display_name: Import Time run_on: rhel87-small diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 7d03822a6f..c692ec31db 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -23,6 +23,15 @@ tasks: TEST_NAME: data_lake tags: [atlas_data_lake] + # Aws lambda tests + - name: test-aws-lambda-deployed + commands: + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: aws_lambda + tags: [aws_lambda] + # Aws tests - name: test-auth-aws-4.4-regular commands: @@ -1220,6 +1229,18 @@ tasks: - func: send dashboard data tags: [perf] + # Search index tests + - name: test-search-index-helpers + commands: + - func: assume ec2 role + - func: run server + vars: + TEST_NAME: search_index + - func: run tests + vars: + TEST_NAME: search_index + tags: [search_index] + # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 78f181fc64..361a0c9dde 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -132,6 +132,14 @@ buildvariants: expansions: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 + # Aws lambda tests + - name: faas-lambda + tasks: + - name: .aws_lambda + display_name: FaaS Lambda + run_on: + - rhel87-small + # Compression tests - name: compression-snappy-rhel8-python3.9-no-c tasks: @@ -986,7 +994,7 @@ buildvariants: # Search index tests - name: search-index-helpers-rhel8-python3.9 tasks: - - name: test_atlas_task_group_search_indexes + - name: .search_index display_name: Search Index Helpers RHEL8 Python3.9 run_on: - rhel87-small diff --git a/.evergreen/run-deployed-lambda-aws-tests.sh b/.evergreen/run-deployed-lambda-aws-tests.sh deleted file mode 100755 index aa16d62650..0000000000 --- a/.evergreen/run-deployed-lambda-aws-tests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail - -export PATH="/opt/python/3.9/bin:${PATH}" -python --version -pushd ./test/lambda - -. build.sh -popd -. ${DRIVERS_TOOLS}/.evergreen/aws_lambda/run-deployed-lambda-aws-tests.sh diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index ebb4b5d31c..dad4f27bcb 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -672,7 +672,7 @@ def create_search_index_variants(): python = CPYTHONS[0] return [ create_variant( - ["test_atlas_task_group_search_indexes"], + [".search_index"], get_display_name("Search Index Helpers", host, python=python), python=python, host=host, @@ -779,6 +779,11 @@ def create_alternative_hosts_variants(): return variants +def create_aws_lambda_variants(): + host = HOSTS["rhel8"] + return [create_variant([".aws_lambda"], display_name="FaaS Lambda", host=host)] + + ############## # Tasks ############## @@ -927,6 +932,27 @@ def _create_ocsp_task(algo, variant, server_type, base_task_name): return EvgTask(name=task_name, tags=tags, commands=commands) +def create_aws_lambda_tasks(): + assume_func = FunctionCall(func="assume ec2 role") + vars = dict(TEST_NAME="aws_lambda") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-aws-lambda-deployed" + tags = ["aws_lambda"] + commands = [assume_func, test_func] + return [EvgTask(name=task_name, tags=tags, commands=commands)] + + +def create_search_index_tasks(): + assume_func = FunctionCall(func="assume ec2 role") + server_func = FunctionCall(func="run server", vars=dict(TEST_NAME="search_index")) + vars = dict(TEST_NAME="search_index") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-search-index-helpers" + tags = ["search_index"] + commands = [assume_func, server_func, test_func] + return [EvgTask(name=task_name, tags=tags, commands=commands)] + + def create_atlas_connect_tasks(): vars = dict(TEST_NAME="atlas_connect") assume_func = FunctionCall(func="assume ec2 role") diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py index f43ada4bbb..f85207daa4 100644 --- a/.evergreen/scripts/run_server.py +++ b/.evergreen/scripts/run_server.py @@ -22,14 +22,6 @@ def start_server(): if "VERSION" in os.environ: os.environ["MONGODB_VERSION"] = os.environ["VERSION"] - if opts.auth: - extra_opts.append("--auth") - - if opts.verbose: - extra_opts.append("-v") - elif opts.quiet: - extra_opts.append("-q") - if test_name == "auth_aws": set_env("AUTH_AWS") @@ -51,6 +43,10 @@ def start_server(): if not found: raise ValueError("Please provide an orchestration file") + elif test_name == "search_index": + os.environ["TOPOLOGY"] = "replica_set" + os.environ["MONGODB_VERSION"] = "7.0" + if not os.environ.get("TEST_CRYPT_SHARED"): set_env("SKIP_CRYPT_SHARED") @@ -62,6 +58,14 @@ def start_server(): set_env("TLS_PEM_KEY_FILE", certs / "server.pem") set_env("TLS_CA_FILE", certs / "ca.pem") + if opts.auth: + extra_opts.append("--auth") + + if opts.verbose: + extra_opts.append("-v") + elif opts.quiet: + extra_opts.append("-q") + cmd = ["bash", f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", *extra_opts] run_command(cmd, cwd=DRIVERS_TOOLS) diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 2e23a366b1..9f700d70e0 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -4,8 +4,11 @@ import logging import os import platform +import shutil import sys from datetime import datetime +from pathlib import Path +from shutil import which import pytest from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command @@ -81,6 +84,42 @@ def handle_pymongocrypt() -> None: LOGGER.info(f"libmongocrypt version: {pymongocrypt.libmongocrypt_version()})") +def handle_aws_lambda() -> None: + env = os.environ.copy() + target_dir = ROOT / "test/lambda" + env["TEST_LAMBDA_DIRECTORY"] = str(target_dir) + env.setdefault("AWS_REGION", "us-east-1") + dirs = ["pymongo", "gridfs", "bson"] + # Store the original .so files. + before_sos = [] + for dname in dirs: + before_sos.extend(f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")) + # Build the c extensions. + docker = which("docker") or which("podman") + if not docker: + raise ValueError("Could not find docker!") + image = "quay.io/pypa/manylinux2014_x86_64:latest" + run_command( + f'{docker} run --rm -v "{ROOT}:/src" --platform linux/amd64 {image} /src/test/lambda/build_internal.sh' + ) + for dname in dirs: + target = ROOT / "test/lambda/mongodb" / dname + shutil.rmtree(target, ignore_errors=True) + shutil.copytree(ROOT / dname, target) + # Remove the original so files from the lambda directory. + for so_path in before_sos: + (ROOT / "test/lambda/mongodb" / so_path).unlink() + # Remove the new so files from the ROOT directory. + for dname in dirs: + so_paths = [f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")] + for so_path in list(so_paths): + if so_path not in before_sos: + Path(so_path).unlink() + + script_name = "run-deployed-lambda-aws-tests.sh" + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/aws_lambda/{script_name}", env=env) + + def run() -> None: # Handle green framework first so they can patch modules. if GREEN_FRAMEWORK: @@ -129,6 +168,11 @@ def run() -> None: test_oidc_send_to_remote(SUB_TEST_NAME) return + # Run deployed aws lambda tests. + if TEST_NAME == "aws_lambda": + handle_aws_lambda() + return + if os.environ.get("DEBUG_LOG"): TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG} -o log_cli=1".split()) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 56011341a3..ae7fde5efc 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -175,6 +175,28 @@ def handle_test_env() -> None: if not config: AUTH = "noauth" + if test_name in ["aws_lambda", "search_index"]: + env = os.environ.copy() + env["MONGODB_VERSION"] = "7.0" + env["LAMBDA_STACK_NAME"] = "dbx-python-lambda" + write_env("LAMBDA_STACK_NAME", env["LAMBDA_STACK_NAME"]) + run_command( + f"bash {DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh", + env=env, + cwd=DRIVERS_TOOLS, + ) + + if test_name == "search_index": + AUTH = "auth" + + if test_name == "aws_lambda": + UV_ARGS.append("--with pip") + # Store AWS creds if they were given. + if "AWS_ACCESS_KEY_ID" in os.environ: + for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: + if key in os.environ: + write_env(key, os.environ[key]) + if test_name == "data_lake": # Stop any running mongo-orchestration which might be using the port. run_command(f"bash {DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh") @@ -197,7 +219,7 @@ def handle_test_env() -> None: elif test_name == "auth_oidc": DB_USER = config["OIDC_ADMIN_USER"] DB_PASSWORD = config["OIDC_ADMIN_PWD"] - elif test_name == "index_management": + elif test_name == "search_index": config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas/secrets-export.sh") DB_USER = config["DRIVERS_ATLAS_LAMBDA_USER"] DB_PASSWORD = config["DRIVERS_ATLAS_LAMBDA_PASSWORD"] diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index d27cb8682e..390e0a68eb 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -40,6 +40,10 @@ elif TEST_NAME == "serverless": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/serverless/teardown.sh") +# Tear down atlas cluster if applicable. +if TEST_NAME in ["aws_lambda", "search_index"]: + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh") + # Tear down auth_aws if applicable. # We do not run web-identity hosts on macos, because the hosts lack permissions, # so there is no reason to run the teardown, which would error with a 401. diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 535e392ea2..0ff3b76a5f 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -39,7 +39,7 @@ class Distro: "default_sync": "default", "encryption": "encryption", "enterprise_auth": "auth", - "index_management": "index_management", + "search_index": "search_index", "kms": "kms", "load_balancer": "load_balancer", "mockupdb": "mockupdb", @@ -52,7 +52,7 @@ class Distro: # Tests that require a sub test suite. SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] -EXTRA_TESTS = ["mod_wsgi"] +EXTRA_TESTS = ["mod_wsgi", "aws_lambda", "search_index"] def get_test_options( @@ -153,7 +153,8 @@ def run_command(cmd: str | list[str], **kwargs: Any) -> None: LOGGER.info("Running command '%s'... done.", cmd) -def create_archive() -> None: +def create_archive() -> str: run_command("git add .", cwd=ROOT) run_command('git commit -m "add files"', check=False, cwd=ROOT) run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) + return TMP_DRIVER_FILE diff --git a/.gitignore b/.gitignore index 966059e693..a88a7556e2 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ mongocryptd.pid .idea/ .vscode/ .nova/ +.temp/ venv/ secrets-export.sh libmongocrypt.tar.gz @@ -32,10 +33,10 @@ results.json # Lambda temp files test/lambda/.aws-sam -test/lambda/env.json test/lambda/mongodb/pymongo/* test/lambda/mongodb/gridfs/* test/lambda/mongodb/bson/* +test/lambda/*.json # test results and logs xunit-results/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e466c6847e..5f55e56684 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -248,6 +248,7 @@ the pages will re-render and the browser will automatically refresh. - Run the tests with `just run-tests`. The supported types are [`default`, `azure`, `gcp`, `eks`, `aks`, and `gke`]. +For the `eks` test, you will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). ### KMS tests @@ -275,6 +276,19 @@ Note: these tests can only be run from an Evergreen host. - Run `just setup-tests atlas_connect`. - Run `just run-tests`. +### Search Index tests + +- Run `just run-server search_index`. +- Run `just setup-tests search_index`. +- Run `just run-tests`. + +### AWS Lambda tests + +You will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). + +- Run `just setup-tests aws_lambda`. +- Run `just run-tests`. + ### mod_wsgi tests Note: these tests can only be run from an Evergreen Linux host that has the Python toolchain. diff --git a/pyproject.toml b/pyproject.toml index 993b3e5aee..f8c25ed602 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,7 +128,7 @@ markers = [ "atlas_connect: tests that rely on an atlas connection", "data_lake: tests that rely on atlas data lake", "perf: benchmark tests", - "index_management: index management tests", + "search_index: search index helper tests", "kms: client-side field-level encryption tests using kms", "encryption: encryption tests", "load_balancer: load balancer tests", diff --git a/test/asynchronous/test_index_management.py b/test/asynchronous/test_index_management.py index 4b218de130..890788fc56 100644 --- a/test/asynchronous/test_index_management.py +++ b/test/asynchronous/test_index_management.py @@ -38,7 +38,7 @@ _IS_SYNC = False -pytestmark = pytest.mark.index_management +pytestmark = pytest.mark.search_index # Location of JSON test specifications. if _IS_SYNC: diff --git a/test/lambda/build.sh b/test/lambda/build.sh deleted file mode 100755 index c7cc24eab2..0000000000 --- a/test/lambda/build.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail -set -o xtrace - -rm -rf mongodb/pymongo -rm -rf mongodb/gridfs -rm -rf mongodb/bson - -pushd ../.. -rm -f pymongo/*.so -rm -f bson/*.so -image="quay.io/pypa/manylinux2014_x86_64:latest" - -DOCKER=$(command -v docker) || true -if [ -z "$DOCKER" ]; then - PODMAN=$(command -v podman) || true - if [ -z "$PODMAN" ]; then - echo "docker or podman are required!" - exit 1 - fi - DOCKER=podman -fi - -$DOCKER run --rm -v "`pwd`:/src" $image /src/test/lambda/build_internal.sh -cp -r pymongo ./test/lambda/mongodb/pymongo -cp -r bson ./test/lambda/mongodb/bson -cp -r gridfs ./test/lambda/mongodb/gridfs -popd diff --git a/test/test_index_management.py b/test/test_index_management.py index 3a2b17cd3d..dea8c0e2be 100644 --- a/test/test_index_management.py +++ b/test/test_index_management.py @@ -38,7 +38,7 @@ _IS_SYNC = True -pytestmark = pytest.mark.index_management +pytestmark = pytest.mark.search_index # Location of JSON test specifications. if _IS_SYNC: From 737a1b73443223cc8c986826f3426d583f243b09 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 19 Mar 2025 20:53:35 -0500 Subject: [PATCH 1808/2111] PYTHON-5220 Convert remaining tests to use standard test setup (#2211) --- .evergreen/config.yml | 56 ++++------------------- .evergreen/generated_configs/tasks.yml | 33 +++++++++++++ .evergreen/generated_configs/variants.yml | 12 ++++- .evergreen/run-tests.sh | 5 +- .evergreen/scripts/cleanup.sh | 8 ++++ .evergreen/scripts/configure-env.sh | 2 - .evergreen/scripts/generate_config.py | 40 +++++++++++++++- .evergreen/scripts/run-with-env.sh | 21 --------- .evergreen/scripts/setup_tests.py | 3 +- CONTRIBUTING.md | 21 +++++++++ uv.lock | 2 + 11 files changed, 126 insertions(+), 77 deletions(-) delete mode 100755 .evergreen/scripts/run-with-env.sh diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 6b2b332a5c..97845b86d4 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -42,7 +42,7 @@ functions: # Make an evergreen expansion file with dynamic values - command: subprocess.exec params: - include_expansions_in_env: ["is_patch", "project", "version_id", "skip_web_identity_auth_test", "skip_ECS_auth_test"] + include_expansions_in_env: ["is_patch", "project", "version_id"] binary: bash working_dir: "src" args: @@ -213,16 +213,14 @@ functions: params: file: ${DRIVERS_TOOLS}/mo-expansion.yml - "run doctests": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: [ "PYTHON_BINARY" ] - working_dir: "src" - binary: bash - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-doctests.sh + "run just script": + - command: subprocess.exec + type: test + params: + include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] + binary: bash + working_dir: "src" + args: [.evergreen/just.sh, "${JUSTFILE_TARGET}"] "run tests": - command: subprocess.exec @@ -248,7 +246,6 @@ functions: binary: bash working_dir: "src" args: - - .evergreen/scripts/run-with-env.sh - .evergreen/scripts/cleanup.sh "teardown system": @@ -305,36 +302,7 @@ tasks: params: args: - src/.evergreen/scripts/run-getdata.sh -# Standard test tasks {{{ - - - name: "mockupdb" - tags: ["mockupdb"] - commands: - - func: "run tests" - vars: - TEST_NAME: mockupdb - - - name: "doctests" - tags: ["doctests"] - commands: - - func: "run server" - - func: "run doctests" - - name: "no-server" - tags: ["no-server"] - commands: - - func: "run tests" - - - name: "free-threading" - tags: ["free-threading"] - commands: - - func: "run server" - vars: - VERSION: "8.0" - TOPOLOGY: "replica_set" - - func: "run tests" - -# }}} - name: "coverage-report" tags: ["coverage"] depends_on: @@ -384,12 +352,6 @@ tasks: - ${github.amrom.workers.devmit} buildvariants: -- name: "no-server" - display_name: "No server" - run_on: - - rhel84-small - tasks: - - name: "no-server" - name: "Coverage Report" display_name: "Coverage Report" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index c692ec31db..efc7844061 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -713,6 +713,15 @@ tasks: AWS_ROLE_SESSION_NAME: test tags: [auth-aws, auth-aws-web-identity] + # Doctest tests + - name: test-doctests + commands: + - func: run server + - func: run just script + vars: + JUSTFILE_TARGET: docs-test + tags: [doctests] + # Enterprise auth tests - name: test-enterprise-auth commands: @@ -727,6 +736,16 @@ tasks: AUTH: auth tags: [enterprise_auth] + # Free threading tests + - name: test-free-threading + commands: + - func: run server + vars: + VERSION: "8.0" + TOPOLOGY: replica_set + - func: run tests + tags: [free-threading] + # Kms tests - name: test-gcpkms commands: @@ -799,6 +818,14 @@ tasks: TEST_NAME: load_balancer tags: [load-balancer, noauth, nossl] + # Mockupdb tests + - name: test-mockupdb + commands: + - func: run tests + vars: + TEST_NAME: mockupdb + tags: [mockupdb] + # Mod wsgi tests - name: mod-wsgi-standalone commands: @@ -841,6 +868,12 @@ tasks: SUB_TEST_NAME: embedded tags: [mod_wsgi] + # No server tests + - name: test-no-server + commands: + - func: run tests + tags: [no-server] + # Ocsp tests - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 361a0c9dde..aa20fef895 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -242,7 +242,7 @@ buildvariants: # Doctests tests - name: doctests-rhel8-python3.9 tasks: - - name: doctests + - name: .doctests display_name: Doctests RHEL8 Python3.9 run_on: - rhel87-small @@ -672,7 +672,7 @@ buildvariants: # Mockupdb tests - name: mockupdb-rhel8-python3.9 tasks: - - name: mockupdb + - name: .mockupdb display_name: MockupDB RHEL8 Python3.9 run_on: - rhel87-small @@ -746,6 +746,14 @@ buildvariants: NO_EXT: "1" PYTHON_BINARY: /opt/python/3.13/bin/python3 + # No server tests + - name: no-server + tasks: + - name: .no-server + display_name: No server + run_on: + - rhel87-small + # Ocsp tests - name: ocsp-rhel8-v4.4-python3.9 tasks: diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 40336c6d2d..6f53ced61c 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -34,7 +34,10 @@ fi uv sync ${UV_ARGS} --reinstall uv pip list +# Ensure we go back to base environment after the test. +trap "uv sync" EXIT HUP + # Start the test runner. -uv run .evergreen/scripts/run_tests.py "$@" +uv run ${UV_ARGS} .evergreen/scripts/run_tests.py "$@" popd diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh index c58d2163dd..6bb4b3ce5f 100755 --- a/.evergreen/scripts/cleanup.sh +++ b/.evergreen/scripts/cleanup.sh @@ -1,4 +1,12 @@ #!/bin/bash +HERE=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + rm -rf "${DRIVERS_TOOLS}" || true rm -f ./secrets-export.sh || true diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index fa37b8fb08..9ec98bb5be 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -60,8 +60,6 @@ export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS_BINARIES" export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" -export skip_web_identity_auth_test="${skip_web_identity_auth_test:-}" -export skip_ECS_auth_test="${skip_ECS_auth_test:-}" export CARGO_HOME="$CARGO_HOME" export UV_TOOL_DIR="$UV_TOOL_DIR" diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index dad4f27bcb..0a2496c66d 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -685,7 +685,7 @@ def create_mockupdb_variants(): python = CPYTHONS[0] return [ create_variant( - ["mockupdb"], + [".mockupdb"], get_display_name("MockupDB", host, python=python), python=python, host=host, @@ -698,7 +698,7 @@ def create_doctests_variants(): python = CPYTHONS[0] return [ create_variant( - ["doctests"], + [".doctests"], get_display_name("Doctests", host, python=python), python=python, host=host, @@ -748,6 +748,11 @@ def create_aws_auth_variants(): return variants +def create_no_server_variants(): + host = HOSTS["rhel8"] + return [create_variant([".no-server"], "No server", host=host)] + + def create_alternative_hosts_variants(): batchtime = BATCHTIME_WEEK variants = [] @@ -1040,6 +1045,37 @@ def create_ocsp_tasks(): return tasks +def create_mockupdb_tasks(): + test_func = FunctionCall(func="run tests", vars=dict(TEST_NAME="mockupdb")) + task_name = "test-mockupdb" + tags = ["mockupdb"] + return [EvgTask(name=task_name, tags=tags, commands=[test_func])] + + +def create_doctest_tasks(): + server_func = FunctionCall(func="run server") + test_func = FunctionCall(func="run just script", vars=dict(JUSTFILE_TARGET="docs-test")) + task_name = "test-doctests" + tags = ["doctests"] + return [EvgTask(name=task_name, tags=tags, commands=[server_func, test_func])] + + +def create_no_server_tasks(): + test_func = FunctionCall(func="run tests") + task_name = "test-no-server" + tags = ["no-server"] + return [EvgTask(name=task_name, tags=tags, commands=[test_func])] + + +def create_free_threading_tasks(): + vars = dict(VERSION="8.0", TOPOLOGY="replica_set") + server_func = FunctionCall(func="run server", vars=vars) + test_func = FunctionCall(func="run tests") + task_name = "test-free-threading" + tags = ["free-threading"] + return [EvgTask(name=task_name, tags=tags, commands=[server_func, test_func])] + + def create_serverless_tasks(): vars = dict(TEST_NAME="serverless", AUTH="auth", SSL="ssl") test_func = FunctionCall(func="run tests", vars=vars) diff --git a/.evergreen/scripts/run-with-env.sh b/.evergreen/scripts/run-with-env.sh deleted file mode 100755 index 2fd073605d..0000000000 --- a/.evergreen/scripts/run-with-env.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -eu - -# Example use: bash run-with-env.sh run-tests.sh {args...} - -# Parameter expansion to get just the current directory's name -if [ "${PWD##*/}" == "src" ]; then - . .evergreen/scripts/env.sh - if [ -f ".evergreen/scripts/test-env.sh" ]; then - . .evergreen/scripts/test-env.sh - fi -else - . src/.evergreen/scripts/env.sh - if [ -f "src/.evergreen/scripts/test-env.sh" ]; then - . src/.evergreen/scripts/test-env.sh - fi -fi - -set -eu - -# shellcheck source=/dev/null -. "$@" diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index ae7fde5efc..17f9de1a71 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -128,8 +128,7 @@ def handle_test_env() -> None: TEST_ARGS = "" # Start compiling the args we'll pass to uv. - # Run in an isolated environment so as not to pollute the base venv. - UV_ARGS = ["--isolated --extra test"] + UV_ARGS = ["--extra test --no-group dev"] test_title = test_name if sub_test_name: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5f55e56684..e75510171e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -282,6 +282,27 @@ Note: these tests can only be run from an Evergreen host. - Run `just setup-tests search_index`. - Run `just run-tests`. +### MockupDB tests + +- Run `just setup-tests mockupdb`. +- Run `just run-tests`. + +### Doc tests + +The doc tests require a running server. + +- Run `just run-server`. +- Run `just docs-test`. + +### Free-threaded Python Tests + +In the evergreen builds, the tests are configured to use the free-threaded python from the toolchain. +Locally you can run: + +- Run `just run-server`. +- Run `just setup-tests`. +- Run `UV_PYTHON=3.13t just run-tests`. + ### AWS Lambda tests You will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). diff --git a/uv.lock b/uv.lock index 8b5d592dc0..7dae5c9136 100644 --- a/uv.lock +++ b/uv.lock @@ -1036,6 +1036,7 @@ snappy = [ { name = "python-snappy" }, ] test = [ + { name = "pip" }, { name = "pytest" }, { name = "pytest-asyncio" }, ] @@ -1080,6 +1081,7 @@ requires-dist = [ { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, { name = "furo", marker = "extra == 'docs'", specifier = "==2024.8.6" }, + { name = "pip", marker = "extra == 'test'" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, From 1145c9de543bb5748a61dbf338385ff2ff6330c6 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 20 Mar 2025 13:55:52 -0700 Subject: [PATCH 1809/2111] PYTHON-5046 Support $lookup in CSFLE and QE (#2210) --- doc/changelog.rst | 1 + pymongo/asynchronous/encryption.py | 10 +- pymongo/synchronous/encryption.py | 10 +- test/asynchronous/test_encryption.py | 307 +++++++++++++++++- .../etc/data/lookup/key-doc.json | 30 ++ .../etc/data/lookup/schema-csfle.json | 19 ++ .../etc/data/lookup/schema-csfle2.json | 19 ++ .../etc/data/lookup/schema-qe.json | 20 ++ .../etc/data/lookup/schema-qe2.json | 20 ++ test/test_encryption.py | 307 +++++++++++++++++- uv.lock | 2 +- 11 files changed, 730 insertions(+), 15 deletions(-) create mode 100644 test/client-side-encryption/etc/data/lookup/key-doc.json create mode 100644 test/client-side-encryption/etc/data/lookup/schema-csfle.json create mode 100644 test/client-side-encryption/etc/data/lookup/schema-csfle2.json create mode 100644 test/client-side-encryption/etc/data/lookup/schema-qe.json create mode 100644 test/client-side-encryption/etc/data/lookup/schema-qe2.json diff --git a/doc/changelog.rst b/doc/changelog.rst index a54d229075..b172da6b8e 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,6 +8,7 @@ PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. +- Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. Issues Resolved ............... diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index b18ed53f92..3582bec9ab 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -242,7 +242,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: ) raise exc from final_err - async def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: + async def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: """Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads @@ -251,14 +251,12 @@ async def collection_info(self, database: str, filter: bytes) -> Optional[bytes] :param database: The database on which to run listCollections. :param filter: The filter to pass to listCollections. - :return: The first document from the listCollections command response as BSON. + :return: All documents from the listCollections command response as BSON. """ async with await self.client_ref()[database].list_collections( filter=RawBSONDocument(filter) ) as cursor: - async for doc in cursor: - return _dict_to_bson(doc, False, _DATA_KEY_OPTS) - return None + return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) async for doc in cursor] def spawn(self) -> None: """Spawn mongocryptd. @@ -551,7 +549,7 @@ def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. if kwargs.get("key_expiration_ms") is None: kwargs.pop("key_expiration_ms", None) - return MongoCryptOptions(**kwargs) + return MongoCryptOptions(**kwargs, enable_multiple_collinfo=True) class AsyncClientEncryption(Generic[_DocumentType]): diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 724131fa93..ebffc7d74c 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -241,7 +241,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: ) raise exc from final_err - def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: + def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: """Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads @@ -250,12 +250,10 @@ def collection_info(self, database: str, filter: bytes) -> Optional[bytes]: :param database: The database on which to run listCollections. :param filter: The filter to pass to listCollections. - :return: The first document from the listCollections command response as BSON. + :return: All documents from the listCollections command response as BSON. """ with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: - for doc in cursor: - return _dict_to_bson(doc, False, _DATA_KEY_OPTS) - return None + return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) for doc in cursor] def spawn(self) -> None: """Spawn mongocryptd. @@ -548,7 +546,7 @@ def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. if kwargs.get("key_expiration_ms") is None: kwargs.pop("key_expiration_ms", None) - return MongoCryptOptions(**kwargs) + return MongoCryptOptions(**kwargs, enable_multiple_collinfo=True) class ClientEncryption(Generic[_DocumentType]): diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 728a6f9139..3b9096ef6a 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -73,7 +73,7 @@ is_greenthread_patched, ) -from bson import DatetimeMS, Decimal128, encode, json_util +from bson import BSON, DatetimeMS, Decimal128, encode, json_util from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError @@ -94,6 +94,7 @@ EncryptionError, InvalidOperation, OperationFailure, + PyMongoError, ServerSelectionTimeoutError, WriteError, ) @@ -2419,6 +2420,310 @@ async def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) +# https://github.com/mongodb/specifications/blob/527e22d5090ec48bf1e144c45fc831de0f1935f6/source/client-side-encryption/tests/README.md#25-test-lookup +class TestLookupProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + await encrypted_client.drop_database("db") + + key_doc = json_data("etc", "data", "lookup", "key-doc.json") + await create_key_vault(encrypted_client.db.keyvault, key_doc) + self.addAsyncCleanup(async_client_context.client.drop_database, "db") + + await encrypted_client.db.create_collection( + "csfle", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle.json")}, + ) + await encrypted_client.db.create_collection( + "csfle2", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle2.json")}, + ) + await encrypted_client.db.create_collection( + "qe", encryptedFields=json_data("etc", "data", "lookup", "schema-qe.json") + ) + await encrypted_client.db.create_collection( + "qe2", encryptedFields=json_data("etc", "data", "lookup", "schema-qe2.json") + ) + await encrypted_client.db.create_collection("no_schema") + await encrypted_client.db.create_collection("no_schema2") + + unencrypted_client = await self.async_rs_or_single_client() + + await encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) + doc = await unencrypted_client.db.csfle.find_one() + self.assertTrue(isinstance(doc["csfle"], Binary)) + await encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) + doc = await unencrypted_client.db.csfle2.find_one() + self.assertTrue(isinstance(doc["csfle2"], Binary)) + await encrypted_client.db.qe.insert_one({"qe": "qe"}) + doc = await unencrypted_client.db.qe.find_one() + self.assertTrue(isinstance(doc["qe"], Binary)) + await encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) + doc = await unencrypted_client.db.qe2.find_one() + self.assertTrue(isinstance(doc["qe2"], Binary)) + await encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) + await encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) + + await encrypted_client.close() + await unencrypted_client.close() + + @async_client_context.require_version_min(8, 1, -1) + async def test_1_csfle_joins_no_schema(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"no_schema": "no_schema"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_2_qe_joins_no_schema(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"no_schema": "no_schema"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_3_no_schema_joins_csfle(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "csfle", + "as": "matched", + "pipeline": [{"$match": {"csfle": "csfle"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"csfle": "csfle"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_4_no_schema_joins_qe(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [ + {"$match": {"qe": "qe"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"qe": "qe"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_5_csfle_joins_csfle2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "csfle2", + "as": "matched", + "pipeline": [ + {"$match": {"csfle2": "csfle2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"csfle2": "csfle2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_6_qe_joins_qe2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "qe2", + "as": "matched", + "pipeline": [ + {"$match": {"qe2": "qe2"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"qe2": "qe2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_7_no_schema_joins_no_schema2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "no_schema2", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema2": "no_schema2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"no_schema2": "no_schema2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_8_csfle_joins_qe(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "qe"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [{"$match": {"qe": "qe"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertTrue("not supported" in str(exc)) + + @async_client_context.require_version_max(8, 1, -1) + async def test_9_error(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertTrue("Upgrade" in str(exc)) + + # https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap class TestRewrapWithSeparateClientEncryption(AsyncEncryptionIntegrationTest): MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { diff --git a/test/client-side-encryption/etc/data/lookup/key-doc.json b/test/client-side-encryption/etc/data/lookup/key-doc.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/key-doc.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-csfle.json b/test/client-side-encryption/etc/data/lookup/schema-csfle.json new file mode 100644 index 0000000000..29ac9ad5da --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-csfle.json @@ -0,0 +1,19 @@ +{ + "properties": { + "csfle": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-csfle2.json b/test/client-side-encryption/etc/data/lookup/schema-csfle2.json new file mode 100644 index 0000000000..3f1c02781c --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-csfle2.json @@ -0,0 +1,19 @@ +{ + "properties": { + "csfle2": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-qe.json b/test/client-side-encryption/etc/data/lookup/schema-qe.json new file mode 100644 index 0000000000..9428ea1b45 --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-qe.json @@ -0,0 +1,20 @@ +{ + "escCollection": "enxcol_.qe.esc", + "ecocCollection": "enxcol_.qe.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "qe", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": 0 + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-qe2.json b/test/client-side-encryption/etc/data/lookup/schema-qe2.json new file mode 100644 index 0000000000..77d5bd37cb --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-qe2.json @@ -0,0 +1,20 @@ +{ + "escCollection": "enxcol_.qe2.esc", + "ecocCollection": "enxcol_.qe2.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "qe2", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": 0 + } + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index 36c0ab0e24..6d669a538d 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -73,7 +73,7 @@ ) from test.utils_spec_runner import SpecRunner -from bson import DatetimeMS, Decimal128, encode, json_util +from bson import BSON, DatetimeMS, Decimal128, encode, json_util from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError @@ -91,6 +91,7 @@ EncryptionError, InvalidOperation, OperationFailure, + PyMongoError, ServerSelectionTimeoutError, WriteError, ) @@ -2403,6 +2404,310 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) +# https://github.com/mongodb/specifications/blob/527e22d5090ec48bf1e144c45fc831de0f1935f6/source/client-side-encryption/tests/README.md#25-test-lookup +class TestLookupProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + encrypted_client.drop_database("db") + + key_doc = json_data("etc", "data", "lookup", "key-doc.json") + create_key_vault(encrypted_client.db.keyvault, key_doc) + self.addCleanup(client_context.client.drop_database, "db") + + encrypted_client.db.create_collection( + "csfle", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle.json")}, + ) + encrypted_client.db.create_collection( + "csfle2", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle2.json")}, + ) + encrypted_client.db.create_collection( + "qe", encryptedFields=json_data("etc", "data", "lookup", "schema-qe.json") + ) + encrypted_client.db.create_collection( + "qe2", encryptedFields=json_data("etc", "data", "lookup", "schema-qe2.json") + ) + encrypted_client.db.create_collection("no_schema") + encrypted_client.db.create_collection("no_schema2") + + unencrypted_client = self.rs_or_single_client() + + encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) + doc = unencrypted_client.db.csfle.find_one() + self.assertTrue(isinstance(doc["csfle"], Binary)) + encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) + doc = unencrypted_client.db.csfle2.find_one() + self.assertTrue(isinstance(doc["csfle2"], Binary)) + encrypted_client.db.qe.insert_one({"qe": "qe"}) + doc = unencrypted_client.db.qe.find_one() + self.assertTrue(isinstance(doc["qe"], Binary)) + encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) + doc = unencrypted_client.db.qe2.find_one() + self.assertTrue(isinstance(doc["qe2"], Binary)) + encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) + encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) + + encrypted_client.close() + unencrypted_client.close() + + @client_context.require_version_min(8, 1, -1) + def test_1_csfle_joins_no_schema(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"no_schema": "no_schema"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_2_qe_joins_no_schema(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"no_schema": "no_schema"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_3_no_schema_joins_csfle(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "csfle", + "as": "matched", + "pipeline": [{"$match": {"csfle": "csfle"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"csfle": "csfle"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_4_no_schema_joins_qe(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [ + {"$match": {"qe": "qe"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"qe": "qe"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_5_csfle_joins_csfle2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "csfle2", + "as": "matched", + "pipeline": [ + {"$match": {"csfle2": "csfle2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"csfle2": "csfle2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_6_qe_joins_qe2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "qe2", + "as": "matched", + "pipeline": [ + {"$match": {"qe2": "qe2"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"qe2": "qe2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_7_no_schema_joins_no_schema2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "no_schema2", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema2": "no_schema2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"no_schema2": "no_schema2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_8_csfle_joins_qe(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "qe"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [{"$match": {"qe": "qe"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertTrue("not supported" in str(exc)) + + @client_context.require_version_max(8, 1, -1) + def test_9_error(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertTrue("Upgrade" in str(exc)) + + # https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { diff --git a/uv.lock b/uv.lock index 7dae5c9136..758036fe5d 100644 --- a/uv.lock +++ b/uv.lock @@ -1135,7 +1135,7 @@ wheels = [ [[package]] name = "pymongocrypt" version = "1.13.0.dev0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#1e96c283162aa7789cf01f99f211e0ace8e6d49f" } +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#1cad4ad1c4cd6c11c6a4710da2127dab6a374471" } dependencies = [ { name = "cffi" }, { name = "cryptography" }, From e99818df0830f36a3d23c04f7e543d8139b50df0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 24 Mar 2025 11:14:59 -0400 Subject: [PATCH 1810/2111] PYTHON-5222 - Revise assertion for unacknowledged client bulkWrite result (#2212) --- .../unacknowledged-client-bulkWrite.json | 51 ++++++++++--------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/test/command_monitoring/unacknowledged-client-bulkWrite.json b/test/command_monitoring/unacknowledged-client-bulkWrite.json index 61bb00726c..14740cea34 100644 --- a/test/command_monitoring/unacknowledged-client-bulkWrite.json +++ b/test/command_monitoring/unacknowledged-client-bulkWrite.json @@ -95,29 +95,34 @@ "ordered": false }, "expectResult": { - "insertedCount": { - "$$unsetOrMatches": 0 - }, - "upsertedCount": { - "$$unsetOrMatches": 0 - }, - "matchedCount": { - "$$unsetOrMatches": 0 - }, - "modifiedCount": { - "$$unsetOrMatches": 0 - }, - "deletedCount": { - "$$unsetOrMatches": 0 - }, - "insertResults": { - "$$unsetOrMatches": {} - }, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + }, + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } }, From f77e1ac119f5bc8d9b3f0864338edef17ff56ac4 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 24 Mar 2025 11:45:12 -0400 Subject: [PATCH 1811/2111] PYTHON-4961 - Split updateWithPipelines.yml by operation (#2217) --- .../bulkWrite-updateMany-pipeline.json | 148 ++++++ .../unified/bulkWrite-updateOne-pipeline.json | 156 ++++++ .../unified/findOneAndUpdate-pipeline.json | 130 +++++ test/crud/unified/updateMany-pipeline.json | 142 +++++ test/crud/unified/updateOne-pipeline.json | 150 ++++++ test/crud/unified/updateWithPipelines.json | 494 ------------------ 6 files changed, 726 insertions(+), 494 deletions(-) create mode 100644 test/crud/unified/bulkWrite-updateMany-pipeline.json create mode 100644 test/crud/unified/bulkWrite-updateOne-pipeline.json create mode 100644 test/crud/unified/findOneAndUpdate-pipeline.json create mode 100644 test/crud/unified/updateMany-pipeline.json create mode 100644 test/crud/unified/updateOne-pipeline.json delete mode 100644 test/crud/unified/updateWithPipelines.json diff --git a/test/crud/unified/bulkWrite-updateMany-pipeline.json b/test/crud/unified/bulkWrite-updateMany-pipeline.json new file mode 100644 index 0000000000..e938ea7535 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-pipeline.json @@ -0,0 +1,148 @@ +{ + "description": "bulkWrite-updateMany-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-pipeline.json b/test/crud/unified/bulkWrite-updateOne-pipeline.json new file mode 100644 index 0000000000..769bd106f8 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-pipeline.json @@ -0,0 +1,156 @@ +{ + "description": "bulkWrite-updateOne-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-pipeline.json b/test/crud/unified/findOneAndUpdate-pipeline.json new file mode 100644 index 0000000000..81dba9ae93 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-pipeline.json @@ -0,0 +1,130 @@ +{ + "description": "findOneAndUpdate-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate using pipelines", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "commandName": "findAndModify", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-pipeline.json b/test/crud/unified/updateMany-pipeline.json new file mode 100644 index 0000000000..e0f6d9d4a4 --- /dev/null +++ b/test/crud/unified/updateMany-pipeline.json @@ -0,0 +1,142 @@ +{ + "description": "updateMany-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-pipeline.json b/test/crud/unified/updateOne-pipeline.json new file mode 100644 index 0000000000..1348c6b53b --- /dev/null +++ b/test/crud/unified/updateOne-pipeline.json @@ -0,0 +1,150 @@ +{ + "description": "updateOne-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateWithPipelines.json b/test/crud/unified/updateWithPipelines.json deleted file mode 100644 index 164f2f6a19..0000000000 --- a/test/crud/unified/updateWithPipelines.json +++ /dev/null @@ -1,494 +0,0 @@ -{ - "description": "updateWithPipelines", - "schemaVersion": "1.0", - "runOnRequirements": [ - { - "minServerVersion": "4.1.11" - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "test" - } - } - ], - "initialData": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "y": 1, - "t": { - "u": { - "v": 1 - } - } - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ], - "tests": [ - { - "description": "UpdateOne using pipelines", - "operations": [ - { - "object": "collection0", - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "expectResult": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": { - "$$unsetOrMatches": false - }, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ] - }, - { - "description": "UpdateMany using pipelines", - "operations": [ - { - "object": "collection0", - "name": "updateMany", - "arguments": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "expectResult": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - ] - }, - { - "description": "FindOneAndUpdate using pipelines", - "operations": [ - { - "object": "collection0", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "findAndModify": "test", - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "commandName": "findAndModify", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ] - }, - { - "description": "UpdateOne in bulk write using pipelines", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ] - }, - "expectResult": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": { - "$$unsetOrMatches": false - }, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ] - }, - { - "description": "UpdateMany in bulk write using pipelines", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ] - }, - "expectResult": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - ] - } - ] -} From dec13a80209ba3129a9cec63c17bc6fa1da08bd1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 24 Mar 2025 14:14:53 -0400 Subject: [PATCH 1812/2111] PYTHON-4990 - Remove deprecated field from GridFS unified tests (#2216) --- test/gridfs/delete.json | 60 ---------------------------- test/gridfs/download.json | 18 --------- test/gridfs/downloadByName.json | 15 ------- test/gridfs/upload.json | 69 --------------------------------- 4 files changed, 162 deletions(-) diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json index 7a4ec27f88..277b9ed7e1 100644 --- a/test/gridfs/delete.json +++ b/test/gridfs/delete.json @@ -49,10 +49,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -64,10 +61,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -79,10 +73,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -94,10 +85,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -197,10 +185,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -212,10 +197,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -227,10 +209,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -330,10 +309,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -345,10 +321,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -360,10 +333,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -448,10 +418,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -463,10 +430,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -478,10 +442,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -554,10 +515,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -569,10 +527,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -584,10 +539,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -599,10 +551,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -719,10 +668,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -734,10 +680,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -749,10 +692,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] diff --git a/test/gridfs/download.json b/test/gridfs/download.json index 48d3246218..f0cb851708 100644 --- a/test/gridfs/download.json +++ b/test/gridfs/download.json @@ -49,10 +49,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -64,10 +61,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -79,10 +73,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -94,10 +85,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -109,10 +97,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -124,9 +109,6 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json index cd44663957..7b20933c16 100644 --- a/test/gridfs/downloadByName.json +++ b/test/gridfs/downloadByName.json @@ -49,10 +49,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "47ed733b8d10be225eceba344d533586", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -64,10 +61,7 @@ "uploadDate": { "$date": "1970-01-02T00:00:00.000Z" }, - "md5": "b15835f133ff2e27c7cb28117bfae8f4", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -79,10 +73,7 @@ "uploadDate": { "$date": "1970-01-03T00:00:00.000Z" }, - "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -94,10 +85,7 @@ "uploadDate": { "$date": "1970-01-04T00:00:00.000Z" }, - "md5": "f623e75af30e62bbd73d6df5b50bb7b5", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -109,10 +97,7 @@ "uploadDate": { "$date": "1970-01-05T00:00:00.000Z" }, - "md5": "4c614360da93c0a041b22e537de151eb", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json index 97e18d2bc2..3c1644653a 100644 --- a/test/gridfs/upload.json +++ b/test/gridfs/upload.json @@ -470,75 +470,6 @@ } ] }, - { - "description": "upload when contentType is provided", - "operations": [ - { - "name": "upload", - "object": "bucket0", - "arguments": { - "filename": "filename", - "source": { - "$$hexBytes": "11" - }, - "chunkSizeBytes": 4, - "contentType": "image/jpeg" - }, - "expectResult": { - "$$type": "objectId" - }, - "saveResultAsEntity": "uploadedObjectId" - }, - { - "name": "find", - "object": "bucket0_files_collection", - "arguments": { - "filter": {} - }, - "expectResult": [ - { - "_id": { - "$$matchesEntity": "uploadedObjectId" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$$type": "date" - }, - "md5": { - "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" - }, - "filename": "filename", - "contentType": "image/jpeg" - } - ] - }, - { - "name": "find", - "object": "bucket0_chunks_collection", - "arguments": { - "filter": {} - }, - "expectResult": [ - { - "_id": { - "$$type": "objectId" - }, - "files_id": { - "$$matchesEntity": "uploadedObjectId" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - } - ] - }, { "description": "upload when metadata is provided", "operations": [ From df30eff3901439f04e27f9861fd93665047c947c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 24 Mar 2025 15:37:00 -0400 Subject: [PATCH 1813/2111] PYTHON-5135 - Rename WriteConcernFailed code name to WriteConcernTimeout (#2214) --- pymongo/asynchronous/client_session.py | 4 ++-- pymongo/synchronous/client_session.py | 4 ++-- test/retryable_writes/unified/insertOne-serverErrors.json | 3 +-- .../unified/commit-writeconcernerror.json | 6 ++---- test/transactions/unified/error-labels.json | 5 ++--- 5 files changed, 9 insertions(+), 13 deletions(-) diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index 98dd6a4706..b808684dd4 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -458,10 +458,10 @@ def _max_time_expired_error(exc: PyMongoError) -> bool: # From the transactions spec, all the retryable writes errors plus -# WriteConcernFailed. +# WriteConcernTimeout. _UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( [ - 64, # WriteConcernFailed + 64, # WriteConcernTimeout 50, # MaxTimeMSExpired ] ) diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index 60c15a9ec0..aaf2d7574f 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -457,10 +457,10 @@ def _max_time_expired_error(exc: PyMongoError) -> bool: # From the transactions spec, all the retryable writes errors plus -# WriteConcernFailed. +# WriteConcernTimeout. _UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( [ - 64, # WriteConcernFailed + 64, # WriteConcernTimeout 50, # MaxTimeMSExpired ] ) diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json index f404adcaf4..8edafb7029 100644 --- a/test/retryable_writes/unified/insertOne-serverErrors.json +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -739,7 +739,7 @@ ] }, { - "description": "InsertOne fails after WriteConcernError WriteConcernFailed", + "description": "InsertOne fails after WriteConcernError WriteConcernTimeout", "operations": [ { "name": "failPoint", @@ -757,7 +757,6 @@ ], "writeConcernError": { "code": 64, - "codeName": "WriteConcernFailed", "errmsg": "waiting for replication timed out", "errInfo": { "wtimeout": true diff --git a/test/transactions-convenient-api/unified/commit-writeconcernerror.json b/test/transactions-convenient-api/unified/commit-writeconcernerror.json index a6f6e6bd7f..568f7ede42 100644 --- a/test/transactions-convenient-api/unified/commit-writeconcernerror.json +++ b/test/transactions-convenient-api/unified/commit-writeconcernerror.json @@ -56,7 +56,7 @@ ], "tests": [ { - "description": "commitTransaction is retried after WriteConcernFailed timeout error", + "description": "commitTransaction is retried after WriteConcernTimeout timeout error", "operations": [ { "name": "failPoint", @@ -74,7 +74,6 @@ ], "writeConcernError": { "code": 64, - "codeName": "WriteConcernFailed", "errmsg": "waiting for replication timed out", "errInfo": { "wtimeout": true @@ -236,7 +235,7 @@ ] }, { - "description": "commitTransaction is retried after WriteConcernFailed non-timeout error", + "description": "commitTransaction is retried after WriteConcernTimeout non-timeout error", "operations": [ { "name": "failPoint", @@ -254,7 +253,6 @@ ], "writeConcernError": { "code": 64, - "codeName": "WriteConcernFailed", "errmsg": "multiple errors reported" } } diff --git a/test/transactions/unified/error-labels.json b/test/transactions/unified/error-labels.json index be8df10ed3..74ed750b07 100644 --- a/test/transactions/unified/error-labels.json +++ b/test/transactions/unified/error-labels.json @@ -1176,7 +1176,7 @@ ] }, { - "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed", + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernTimeout", "operations": [ { "object": "testRunner", @@ -1338,7 +1338,7 @@ ] }, { - "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed with wtimeout", + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernTimeout with wtimeout", "operations": [ { "object": "testRunner", @@ -1356,7 +1356,6 @@ ], "writeConcernError": { "code": 64, - "codeName": "WriteConcernFailed", "errmsg": "waiting for replication timed out", "errInfo": { "wtimeout": true From 296046cc380959def92c917dcf0e755c105fc4df Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 24 Mar 2025 16:02:10 -0400 Subject: [PATCH 1814/2111] =?UTF-8?q?PYTHON-5121=20-=20Use=20canonical=20E?= =?UTF-8?q?xtended=20JSON=20for=20BSON=20binary=20vector=20spec=E2=80=A6?= =?UTF-8?q?=20(#2215)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/bson_binary_vector/float32.json | 2 +- test/test_bson_binary_vector.py | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/test/bson_binary_vector/float32.json b/test/bson_binary_vector/float32.json index 845f504ff3..72dafce10f 100644 --- a/test/bson_binary_vector/float32.json +++ b/test/bson_binary_vector/float32.json @@ -32,7 +32,7 @@ { "description": "Infinity Vector FLOAT32", "valid": true, - "vector": ["-inf", 0.0, "inf"], + "vector": [{"$numberDouble": "-Infinity"}, 0.0, {"$numberDouble": "Infinity"} ], "dtype_hex": "0x27", "dtype_alias": "FLOAT32", "padding": 0, diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py index a49f515fea..9bfdcbfb9a 100644 --- a/test/test_bson_binary_vector.py +++ b/test/test_bson_binary_vector.py @@ -16,12 +16,11 @@ import binascii import codecs -import json import struct from pathlib import Path from test import unittest -from bson import decode, encode +from bson import decode, encode, json_util from bson.binary import Binary, BinaryVectorDtype _TEST_PATH = Path(__file__).parent / "bson_binary_vector" @@ -62,9 +61,6 @@ def run_test(self): cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) decoded_doc = decode(cB_exp) binary_obs = decoded_doc[test_key] - # Handle special float cases like '-inf' - if dtype_exp in [BinaryVectorDtype.FLOAT32]: - vector_exp = [float(x) for x in vector_exp] # Test round-tripping canonical bson. self.assertEqual(encode(decoded_doc), cB_exp, description) @@ -104,7 +100,7 @@ def run_test(self): def create_tests(): for filename in _TEST_PATH.glob("*.json"): with codecs.open(str(filename), encoding="utf-8") as test_file: - test_method = create_test(json.load(test_file)) + test_method = create_test(json_util.loads(test_file.read())) setattr(TestBSONBinaryVector, "test_" + filename.stem, test_method) From fa5e637da8f97e4f58b8e841d4d74d00aeb9c13f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 25 Mar 2025 11:01:07 -0400 Subject: [PATCH 1815/2111] PYTHON-4937 - Add support for 'number' alias in $$type operator (#2223) --- .../operator-type-number_alias.json | 174 ++++++++++++++++++ test/unified_format_shared.py | 1 + 2 files changed, 175 insertions(+) create mode 100644 test/unified-test-format/valid-pass/operator-type-number_alias.json diff --git a/test/unified-test-format/valid-pass/operator-type-number_alias.json b/test/unified-test-format/valid-pass/operator-type-number_alias.json new file mode 100644 index 0000000000..e628d0d777 --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-type-number_alias.json @@ -0,0 +1,174 @@ +{ + "description": "operator-type-number_alias", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "type number alias matches int32", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberInt": "2147483647" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches int64", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberLong": "9223372036854775807" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches double", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDouble": "2.71828" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches decimal128", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDecimal": "3.14159" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 009c5c7e28..ea0f2f233e 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -363,6 +363,7 @@ def closed(self, event: Union[ServerClosedEvent, TopologyClosedEvent]) -> None: "decimal": (Decimal128,), "maxKey": (MaxKey,), "minKey": (MinKey,), + "number": (float, int, Int64, Decimal128), } From 440316982122e1fbd2d95429e9a09407b28113fc Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 25 Mar 2025 11:20:10 -0400 Subject: [PATCH 1816/2111] PYTHON-4940 - Add index hint as an explicit parameter for distinct command. (#2225) --- doc/changelog.rst | 3 +++ pymongo/asynchronous/collection.py | 12 ++++++++++++ pymongo/synchronous/collection.py | 12 ++++++++++++ 3 files changed, 27 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index b172da6b8e..12991eeb29 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,9 @@ PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. - Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- Added index hinting support to the + :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` and + :meth:`~pymongo.collection.Collection.distinct` commands. Issues Resolved ............... diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index aef3539e8c..b87f207760 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -3111,6 +3111,7 @@ async def distinct( filter: Optional[Mapping[str, Any]] = None, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, + hint: Optional[_IndexKeyHint] = None, **kwargs: Any, ) -> list: """Get a list of distinct values for `key` among all documents @@ -3138,8 +3139,15 @@ async def distinct( :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param comment: A user-provided comment to attach to this command. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``). :param kwargs: See list of options above. + .. versionchanged:: 4.12 + Added ``hint`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -3158,6 +3166,10 @@ async def distinct( cmd.update(kwargs) if comment is not None: cmd["comment"] = comment + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + cmd["hint"] = hint # type: ignore[assignment] async def _cmd( session: Optional[AsyncClientSession], diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index fe869a622d..e63ed70fc2 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -3104,6 +3104,7 @@ def distinct( filter: Optional[Mapping[str, Any]] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, + hint: Optional[_IndexKeyHint] = None, **kwargs: Any, ) -> list: """Get a list of distinct values for `key` among all documents @@ -3131,8 +3132,15 @@ def distinct( :class:`~pymongo.client_session.ClientSession`. :param comment: A user-provided comment to attach to this command. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). :param kwargs: See list of options above. + .. versionchanged:: 4.12 + Added ``hint`` parameter. + .. versionchanged:: 3.6 Added ``session`` parameter. @@ -3151,6 +3159,10 @@ def distinct( cmd.update(kwargs) if comment is not None: cmd["comment"] = comment + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + cmd["hint"] = hint # type: ignore[assignment] def _cmd( session: Optional[ClientSession], From 43fa11cb8023336dd553c53fd788ac407389c048 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 25 Mar 2025 13:46:44 -0400 Subject: [PATCH 1817/2111] PYTHON-4939 - Sync CSOT runCursorCommand test (#2221) --- test/asynchronous/unified_format.py | 6 +- test/csot/runCursorCommand.json | 583 ++++++++++++++++++++++++++++ test/unified_format.py | 6 +- 3 files changed, 593 insertions(+), 2 deletions(-) create mode 100644 test/csot/runCursorCommand.json diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 886b31e4a6..c6884a6d16 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -568,7 +568,11 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") - if "tailable" in class_name: + if ( + "tailable" in class_name + or "tailable" in description + and "non-tailable" not in description + ): self.skipTest("CSOT not implemented for tailable cursors") if "sessions" in class_name: self.skipTest("CSOT not implemented for sessions") diff --git a/test/csot/runCursorCommand.json b/test/csot/runCursorCommand.json new file mode 100644 index 0000000000..36f774fb5a --- /dev/null +++ b/test/csot/runCursorCommand.json @@ -0,0 +1,583 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "commandClient", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "commandDb", + "client": "commandClient", + "databaseName": "commandDb" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "errors if timeoutMode is set without timeoutMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if timeoutMode is cursorLifetime and cursorType is tailableAwait", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "timeoutMS": 100, + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Non-tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "cappedCollection", + "batchSize": 1, + "tailable": true + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 1, + "cursorType": "tailable" + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "foo": "bar" + }, + { + "fizz": "buzz" + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true + }, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 471a067bee..4aec2ad729 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -567,7 +567,11 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") - if "tailable" in class_name: + if ( + "tailable" in class_name + or "tailable" in description + and "non-tailable" not in description + ): self.skipTest("CSOT not implemented for tailable cursors") if "sessions" in class_name: self.skipTest("CSOT not implemented for sessions") From 894d5e1c7fbc8e884cff81789be4e0dec9d42f31 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Mar 2025 12:53:23 -0500 Subject: [PATCH 1818/2111] PYTHON-5231 Finish up test scripts and add documentation for creating a new test suite (#2224) --- .evergreen/run-tests.sh | 6 ----- .evergreen/scripts/run_server.py | 5 ---- .evergreen/scripts/setup_tests.py | 36 ++++++++++++++++--------- .evergreen/scripts/utils.py | 45 ++++++++++++++++++++++++++----- CONTRIBUTING.md | 23 ++++++++++++++-- 5 files changed, 82 insertions(+), 33 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 6f53ced61c..04dd16d34f 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -24,12 +24,6 @@ else exit 1 fi -# Source the local secrets export file if available. -if [ -f "./secrets-export.sh" ]; then - echo "Sourcing local secrets file" - . "./secrets-export.sh" -fi - # List the packages. uv sync ${UV_ARGS} --reinstall uv pip list diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py index f85207daa4..5d9aa54e11 100644 --- a/.evergreen/scripts/run_server.py +++ b/.evergreen/scripts/run_server.py @@ -28,11 +28,6 @@ def start_server(): elif test_name == "load_balancer": set_env("LOAD_BALANCER") - elif test_name == "auth_oidc": - raise ValueError( - "OIDC auth does not use run-orchestration directly, do not use run-server!" - ) - elif test_name == "ocsp": opts.ssl = True if "ORCHESTRATION_FILE" not in os.environ: diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 17f9de1a71..74971bca76 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -115,8 +115,17 @@ def setup_libmongocrypt(): run_command("chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll") -def get_secrets(name: str) -> None: - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh {name}") +def load_config_from_file(path: str | Path) -> dict[str, str]: + config = read_env(path) + for key, value in config.items(): + write_env(key, value) + return config + + +def get_secrets(name: str) -> dict[str, str]: + secrets_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/secrets_handling") + run_command(f"bash {secrets_dir}/setup-secrets.sh {name}", cwd=secrets_dir) + return load_config_from_file(secrets_dir / "secrets-export.sh") def handle_test_env() -> None: @@ -158,7 +167,7 @@ def handle_test_env() -> None: # Handle pass through env vars. for var in PASS_THROUGH_ENV: - if is_set(var): + if is_set(var) or getattr(opts, var.lower()): write_env(var, os.environ[var]) if extra := EXTRAS_MAP.get(test_name, ""): @@ -233,12 +242,11 @@ def handle_test_env() -> None: if is_set("MONGODB_URI"): write_env("PYMONGO_MUST_CONNECT", "true") - if is_set("DISABLE_TEST_COMMANDS"): + if is_set("DISABLE_TEST_COMMANDS") or opts.disable_test_commands: write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") if test_name == "enterprise_auth": - get_secrets("drivers/enterprise_auth") - config = read_env(f"{ROOT}/secrets-export.sh") + config = get_secrets("drivers/enterprise_auth") if PLATFORM == "windows": LOGGER.info("Setting GSSAPI_PASS") write_env("GSSAPI_PASS", config["SASL_PASS"]) @@ -316,7 +324,7 @@ def handle_test_env() -> None: write_env("CLIENT_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/client.pem") write_env("CA_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem") - compressors = os.environ.get("COMPRESSORS") + compressors = os.environ.get("COMPRESSORS") or opts.compressor if compressors == "snappy": UV_ARGS.append("--extra snappy") elif compressors == "zstd": @@ -349,13 +357,15 @@ def handle_test_env() -> None: if test_name == "encryption": if not DRIVERS_TOOLS: raise RuntimeError("Missing DRIVERS_TOOLS") - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/setup-secrets.sh") - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/start-servers.sh") + csfle_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/csfle") + run_command(f"bash {csfle_dir}/setup-secrets.sh", cwd=csfle_dir) + load_config_from_file(csfle_dir / "secrets-export.sh") + run_command(f"bash {csfle_dir}/start-servers.sh") if sub_test_name == "pyopenssl": UV_ARGS.append("--extra ocsp") - if is_set("TEST_CRYPT_SHARED"): + if is_set("TEST_CRYPT_SHARED") or opts.crypt_shared: config = read_env(f"{DRIVERS_TOOLS}/mo-expansion.sh") CRYPT_SHARED_DIR = Path(config["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) @@ -414,15 +424,15 @@ def handle_test_env() -> None: # Add coverage if requested. # Only cover CPython. PyPy reports suspiciously low coverage. - if is_set("COVERAGE") and platform.python_implementation() == "CPython": + if (is_set("COVERAGE") or opts.cov) and platform.python_implementation() == "CPython": # Keep in sync with combine-coverage.sh. # coverage >=5 is needed for relative_files=true. UV_ARGS.append("--group coverage") TEST_ARGS = f"{TEST_ARGS} --cov" write_env("COVERAGE") - if is_set("GREEN_FRAMEWORK"): - framework = os.environ["GREEN_FRAMEWORK"] + if is_set("GREEN_FRAMEWORK") or opts.green_framework: + framework = opts.green_framework or os.environ["GREEN_FRAMEWORK"] UV_ARGS.append(f"--group {framework}") else: diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 0ff3b76a5f..3eb44f2ab9 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -52,7 +52,10 @@ class Distro: # Tests that require a sub test suite. SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] -EXTRA_TESTS = ["mod_wsgi", "aws_lambda", "search_index"] +EXTRA_TESTS = ["mod_wsgi", "aws_lambda"] + +# Tests that do not use run-orchestration. +NO_RUN_ORCHESTRATION = ["auth_oidc", "atlas_connect", "data_lake", "mockupdb", "serverless"] def get_test_options( @@ -75,19 +78,47 @@ def get_test_options( else: parser.add_argument( "test_name", - choices=sorted(TEST_SUITE_MAP), + choices=set(TEST_SUITE_MAP) - set(NO_RUN_ORCHESTRATION), nargs="?", default="default", help="The optional name of the test suite to be run, which informs the server configuration.", ) parser.add_argument( - "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level" + "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level." ) parser.add_argument( - "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level" + "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level." ) - parser.add_argument("--auth", action="store_true", help="Whether to add authentication") - parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration") + parser.add_argument("--auth", action="store_true", help="Whether to add authentication.") + parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration.") + + # Add the test modifiers. + if require_sub_test_name: + parser.add_argument( + "--debug-log", action="store_true", help="Enable pymongo standard logging." + ) + parser.add_argument("--cov", action="store_true", help="Add test coverage.") + parser.add_argument( + "--green-framework", + nargs=1, + choices=["eventlet", "gevent"], + help="Optional green framework to test against.", + ) + parser.add_argument( + "--compressor", + nargs=1, + choices=["zlib", "zstd", "snappy"], + help="Optional compression algorithm.", + ) + parser.add_argument("--crypt-shared", action="store_true", help="Test with crypt_shared.") + parser.add_argument("--no-ext", action="store_true", help="Run without c extensions.") + parser.add_argument( + "--mongodb-api-version", choices=["1"], help="MongoDB stable API version to use." + ) + parser.add_argument( + "--disable-test-commands", action="store_true", help="Disable test commands." + ) + # Get the options. if not allow_extra_opts: opts, extra_opts = parser.parse_args(), [] @@ -113,7 +144,7 @@ def get_test_options( return opts, extra_opts -def read_env(path: Path | str) -> dict[str, Any]: +def read_env(path: Path | str) -> dict[str, str]: config = dict() with Path(path).open() as fid: for line in fid.readlines(): diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e75510171e..d8cc8c8bcd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -217,9 +217,11 @@ the pages will re-render and the browser will automatically refresh. ### Usage -- Run `just run-server` with optional args to set up the server. - All given flags will be passed to `run-orchestration.sh` in `$DRIVERS_TOOLS`. +- Run `just run-server` with optional args to set up the server. All given options will be passed to + `run-orchestration.sh` in `$DRIVERS_TOOLS`. See `$DRIVERS_TOOLS/evergreen/run-orchestration.sh -h` + for a full list of options. - Run `just setup-tests` with optional args to set up the test environment, secrets, etc. + See `just setup-tests -h` for a full list of available options. - Run `just run-tests` to run the tests in an appropriate Python environment. - When done, run `just teardown-tests` to clean up and `just stop-server` to stop the server. @@ -346,11 +348,28 @@ If you are running one of the `no-responder` tests, omit the `run-server` step. - Run the tests: `just run-tests`. ## Enable Debug Logs + - Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. - Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine. - You can also set `DEBUG_LOG=1` and run either `just setup-tests` or `just-test`. +- Finally, you can use `just setup-tests --debug-log`. - For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for the patch. +## Adding a new test suite + +- If adding new tests files that should only be run for that test suite, add a pytest marker to the file and add + to the list of pytest markers in `pyproject.toml`. Then add the test suite to the `TEST_SUITE_MAP` in `.evergreen/scripts/utils.py`. If for some reason it is not a pytest-runnable test, add it to the list of `EXTRA_TESTS` instead. +- If the test uses Atlas or otherwise doesn't use `run-orchestration.sh`, add it to the `NO_RUN_ORCHESTRATION` list in + `.evergreen/scripts/utils.py`. +- If there is something special required to run the local server or there is an extra flag that should always be set + like `AUTH`, add that logic to `.evergreen/scripts/run_server.py`. +- The bulk of the logic will typically be in `.evergreen/scripts/setup_tests.py`. This is where you should fetch secrets and make them available using `write_env`, start services, and write other env vars needed using `write_env`. +- If there are any special test considerations, including not running `pytest` at all, handle it in `.evergreen/scripts/run_tests.py`. +- If there are any services or atlas clusters to teardown, handle them in `.evergreen/scripts/teardown_tests.py`. +- Add functions to generate the test variant(s) and task(s) to the `.evergreen/scripts/generate_config.py`. +- Regenerate the test variants and tasks using the instructions in `.evergreen/scripts/generate_config.py`. +- Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. + ## Re-sync Spec Tests If you would like to re-sync the copy of the specification tests in the From 95cedeefb85fc010e50a2797b1dd461ee36247e8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Mar 2025 13:40:03 -0500 Subject: [PATCH 1819/2111] PYTHON-5232 Fix aws lambda test setup (#2226) --- .evergreen/scripts/setup_tests.py | 2 +- pyproject.toml | 1 + uv.lock | 6 ++++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 74971bca76..d02cd8759a 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -198,7 +198,7 @@ def handle_test_env() -> None: AUTH = "auth" if test_name == "aws_lambda": - UV_ARGS.append("--with pip") + UV_ARGS.append("--group pip") # Store AWS creds if they were given. if "AWS_ACCESS_KEY_ID" in os.environ: for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: diff --git a/pyproject.toml b/pyproject.toml index f8c25ed602..353f527879 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" dev = [ "pre-commit>=4.0" ] +pip = ["pip"] gevent = ["gevent"] eventlet = ["eventlet"] coverage = [ diff --git a/uv.lock b/uv.lock index 758036fe5d..39aae339ee 100644 --- a/uv.lock +++ b/uv.lock @@ -1036,7 +1036,6 @@ snappy = [ { name = "python-snappy" }, ] test = [ - { name = "pip" }, { name = "pytest" }, { name = "pytest-asyncio" }, ] @@ -1064,6 +1063,9 @@ mockupdb = [ perf = [ { name = "simplejson" }, ] +pip = [ + { name = "pip" }, +] pymongocrypt-source = [ { name = "pymongocrypt" }, ] @@ -1081,7 +1083,6 @@ requires-dist = [ { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, { name = "furo", marker = "extra == 'docs'", specifier = "==2024.8.6" }, - { name = "pip", marker = "extra == 'test'" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, @@ -1111,6 +1112,7 @@ eventlet = [{ name = "eventlet" }] gevent = [{ name = "gevent" }] mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] perf = [{ name = "simplejson" }] +pip = [{ name = "pip" }] pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] typing = [ { name = "mypy", specifier = "==1.14.1" }, From 38ceda4c09d0945b50d296abf9c59e0389c455db Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Mar 2025 13:42:29 -0500 Subject: [PATCH 1820/2111] PYTHON-5189 Explicitly test drivers on Graviton processors (#2222) --- .evergreen/generated_configs/variants.yml | 11 +++++++++++ .evergreen/scripts/generate_config.py | 16 +++++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index aa20fef895..53e178bd19 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -45,6 +45,17 @@ buildvariants: batchtime: 10080 expansions: NO_EXT: "1" + - name: other-hosts-amazon2023 + tasks: + - name: .latest !.sync_async .sharded_cluster .auth .ssl + - name: .latest !.sync_async .replica_set .noauth .ssl + - name: .latest !.sync_async .standalone .noauth .nossl + display_name: Other hosts Amazon2023 + run_on: + - amazon2023-arm64-latest-large-m8g + batchtime: 10080 + expansions: + NO_EXT: "1" # Atlas connect tests - name: atlas-connect-rhel8-python3.9 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 0a2496c66d..50c81a5840 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -73,9 +73,16 @@ class Host: DEFAULT_HOST = HOSTS["rhel8"] # Other hosts -OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64"] +OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64", "Amazon2023"] for name, run_on in zip( - OTHER_HOSTS, ["rhel92-fips", "rhel8-zseries-small", "rhel8-power-small", "rhel82-arm64-small"] + OTHER_HOSTS, + [ + "rhel92-fips", + "rhel8-zseries-small", + "rhel8-power-small", + "rhel82-arm64-small", + "amazon2023-arm64-latest-large-m8g", + ], ): HOSTS[name] = Host(name, run_on, name, dict()) @@ -772,9 +779,12 @@ def create_alternative_hosts_variants(): handle_c_ext(C_EXTS[0], expansions) for host_name in OTHER_HOSTS: host = HOSTS[host_name] + tags = [".6.0 .standalone !.sync_async"] + if host_name == "Amazon2023": + tags = [f".latest !.sync_async {t}" for t in SUB_TASKS] variants.append( create_variant( - [".6.0 .standalone !.sync_async"], + tags, display_name=get_display_name("Other hosts", host), batchtime=batchtime, host=host, From eea8a3725799c6e4d7286336ceb229c5025ebb25 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 25 Mar 2025 13:45:06 -0700 Subject: [PATCH 1821/2111] PYTHON-3636 AsyncMongoClient should perform SRV resolution lazily (#2191) Co-authored-by: Noah Stapp Co-authored-by: Shane Harvey --- doc/changelog.rst | 2 + pymongo/asynchronous/encryption.py | 2 +- pymongo/asynchronous/mongo_client.py | 257 +++++-- pymongo/asynchronous/monitor.py | 8 +- pymongo/asynchronous/srv_resolver.py | 160 +++++ pymongo/asynchronous/uri_parser.py | 188 ++++++ pymongo/encryption_options.py | 2 +- pymongo/synchronous/encryption.py | 2 +- pymongo/synchronous/mongo_client.py | 257 +++++-- pymongo/synchronous/monitor.py | 2 +- pymongo/{ => synchronous}/srv_resolver.py | 24 +- pymongo/synchronous/uri_parser.py | 188 ++++++ pymongo/uri_parser.py | 629 +----------------- pymongo/uri_parser_shared.py | 549 +++++++++++++++ test/__init__.py | 2 +- test/asynchronous/__init__.py | 6 +- test/asynchronous/helpers.py | 2 +- test/asynchronous/test_client.py | 28 +- .../test_discovery_and_monitoring.py | 4 +- test/asynchronous/test_dns.py | 44 +- test/asynchronous/test_srv_polling.py | 21 +- test/auth_aws/test_auth_aws.py | 2 +- test/auth_oidc/test_auth_oidc.py | 2 +- test/helpers.py | 2 +- test/test_client.py | 28 +- test/test_discovery_and_monitoring.py | 2 +- test/test_dns.py | 40 +- test/test_srv_polling.py | 17 +- test/test_uri_parser.py | 4 +- test/test_uri_spec.py | 2 +- tools/synchro.py | 1 + 31 files changed, 1627 insertions(+), 850 deletions(-) create mode 100644 pymongo/asynchronous/srv_resolver.py create mode 100644 pymongo/asynchronous/uri_parser.py rename pymongo/{ => synchronous}/srv_resolver.py (88%) create mode 100644 pymongo/synchronous/uri_parser.py create mode 100644 pymongo/uri_parser_shared.py diff --git a/doc/changelog.rst b/doc/changelog.rst index 12991eeb29..5683fcaaca 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,8 @@ PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. - Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- AsyncMongoClient no longer performs DNS resolution for "mongodb+srv://" connection strings on creation. + To avoid blocking the asyncio loop, the resolution is now deferred until the client is first connected. - Added index hinting support to the :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` and :meth:`~pymongo.collection.Collection.distinct` commands. diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 3582bec9ab..68de42db84 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -87,7 +87,7 @@ from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import get_ssl_context from pymongo.typings import _DocumentType, _DocumentTypeArg -from pymongo.uri_parser import parse_host +from pymongo.uri_parser_shared import parse_host from pymongo.write_concern import WriteConcern if TYPE_CHECKING: diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index ecd57a1886..754b8325ed 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -44,6 +44,7 @@ AsyncContextManager, AsyncGenerator, Callable, + Collection, Coroutine, FrozenSet, Generic, @@ -60,8 +61,8 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.timestamp import Timestamp -from pymongo import _csot, common, helpers_shared, periodic_executor, uri_parser -from pymongo.asynchronous import client_session, database +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.asynchronous import client_session, database, uri_parser from pymongo.asynchronous.change_stream import AsyncChangeStream, AsyncClusterChangeStream from pymongo.asynchronous.client_bulk import _AsyncClientBulk from pymongo.asynchronous.client_session import _EmptyServerSession @@ -113,11 +114,14 @@ _DocumentTypeArg, _Pipeline, ) -from pymongo.uri_parser import ( +from pymongo.uri_parser_shared import ( + SRV_SCHEME, _check_options, _handle_option_deprecations, _handle_security_options, _normalize_options, + _validate_uri, + split_hosts, ) from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern @@ -128,6 +132,7 @@ from pymongo.asynchronous.bulk import _AsyncBulk from pymongo.asynchronous.client_session import AsyncClientSession, _ServerSession from pymongo.asynchronous.cursor import _ConnectionManager + from pymongo.asynchronous.encryption import _Encrypter from pymongo.asynchronous.pool import AsyncConnection from pymongo.asynchronous.server import Server from pymongo.read_concern import ReadConcern @@ -750,6 +755,9 @@ def __init__( port = self.PORT if not isinstance(port, int): raise TypeError(f"port must be an instance of int, not {type(port)}") + self._host = host + self._port = port + self._topology: Topology = None # type: ignore[assignment] # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. @@ -760,8 +768,10 @@ def __init__( # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) keyword_opts["document_class"] = doc_class + self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} seeds = set() + is_srv = False username = None password = None dbase = None @@ -769,29 +779,22 @@ def __init__( fqdn = None srv_service_name = keyword_opts.get("srvservicename") srv_max_hosts = keyword_opts.get("srvmaxhosts") - if len([h for h in host if "/" in h]) > 1: + if len([h for h in self._host if "/" in h]) > 1: raise ConfigurationError("host must not contain multiple MongoDB URIs") - for entity in host: + for entity in self._host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names if "/" in entity: - # Determine connection timeout from kwargs. - timeout = keyword_opts.get("connecttimeoutms") - if timeout is not None: - timeout = common.validate_timeout_or_none_or_zero( - keyword_opts.cased_key("connecttimeoutms"), timeout - ) - res = uri_parser.parse_uri( + res = _validate_uri( entity, port, validate=True, warn=True, normalize=False, - connect_timeout=timeout, - srv_service_name=srv_service_name, srv_max_hosts=srv_max_hosts, ) + is_srv = entity.startswith(SRV_SCHEME) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -799,7 +802,7 @@ def __init__( opts = res["options"] fqdn = res["fqdn"] else: - seeds.update(uri_parser.split_hosts(entity, port)) + seeds.update(split_hosts(entity, self._port)) if not seeds: raise ConfigurationError("need to specify at least one host") @@ -820,80 +823,179 @@ def __init__( keyword_opts["tz_aware"] = tz_aware keyword_opts["connect"] = connect - # Handle deprecated options in kwarg options. - keyword_opts = _handle_option_deprecations(keyword_opts) - # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary( - dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) - ) - - # Override connection string options with kwarg options. - opts.update(keyword_opts) + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) if srv_service_name is None: srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") - # Handle security-option conflicts in combined options. - opts = _handle_security_options(opts) - # Normalize combined options. - opts = _normalize_options(opts) - _check_options(seeds, opts) + opts = self._normalize_and_validate_options(opts, seeds) # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self._options = options = ClientOptions(username, password, dbase, opts, _IS_SYNC) + self._options = ClientOptions(username, password, dbase, opts, _IS_SYNC) self._default_database_name = dbase self._lock = _async_create_lock() self._kill_cursors_queue: list = [] - self._event_listeners = options.pool_options._event_listeners - super().__init__( - options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern, + self._encrypter: Optional[_Encrypter] = None + + self._resolve_srv_info.update( + { + "is_srv": is_srv, + "username": username, + "password": password, + "dbase": dbase, + "seeds": seeds, + "fqdn": fqdn, + "srv_service_name": srv_service_name, + "pool_class": pool_class, + "monitor_class": monitor_class, + "condition_class": condition_class, + } ) - self._topology_settings = TopologySettings( - seeds=seeds, - replica_set_name=options.replica_set_name, - pool_class=pool_class, - pool_options=options.pool_options, - monitor_class=monitor_class, - condition_class=condition_class, - local_threshold_ms=options.local_threshold_ms, - server_selection_timeout=options.server_selection_timeout, - server_selector=options.server_selector, - heartbeat_frequency=options.heartbeat_frequency, - fqdn=fqdn, - direct_connection=options.direct_connection, - load_balanced=options.load_balanced, - srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts, - server_monitoring_mode=options.server_monitoring_mode, + super().__init__( + self._options.codec_options, + self._options.read_preference, + self._options.write_concern, + self._options.read_concern, ) + if not is_srv: + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + self._opened = False self._closed = False - self._init_background() + if not is_srv: + self._init_background() if _IS_SYNC and connect: self._get_topology() # type: ignore[unused-coroutine] - self._encrypter = None + async def _resolve_srv(self) -> None: + keyword_opts = self._resolve_srv_info["keyword_opts"] + seeds = set() + opts = common._CaseInsensitiveDictionary() + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = await uri_parser._parse_srv( + entity, + self._port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + opts = res["options"] + else: + seeds.update(split_hosts(entity, self._port)) + + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + tz_aware = keyword_opts["tz_aware"] + connect = keyword_opts["connect"] + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", self._resolve_srv_info["username"]) + password = opts.get("password", self._resolve_srv_info["password"]) + self._options = ClientOptions( + username, password, self._resolve_srv_info["dbase"], opts, _IS_SYNC + ) + + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + + def _init_based_on_options( + self, seeds: Collection[tuple[str, int]], srv_max_hosts: Any, srv_service_name: Any + ) -> None: + self._event_listeners = self._options.pool_options._event_listeners + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=self._options.replica_set_name, + pool_class=self._resolve_srv_info["pool_class"], + pool_options=self._options.pool_options, + monitor_class=self._resolve_srv_info["monitor_class"], + condition_class=self._resolve_srv_info["condition_class"], + local_threshold_ms=self._options.local_threshold_ms, + server_selection_timeout=self._options.server_selection_timeout, + server_selector=self._options.server_selector, + heartbeat_frequency=self._options.heartbeat_frequency, + fqdn=self._resolve_srv_info["fqdn"], + direct_connection=self._options.direct_connection, + load_balanced=self._options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=self._options.server_monitoring_mode, + ) if self._options.auto_encryption_opts: from pymongo.asynchronous.encryption import _Encrypter self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) self._timeout = self._options.timeout - if _HAS_REGISTER_AT_FORK: - # Add this client to the list of weakly referenced items. - # This will be used later if we fork. - AsyncMongoClient._clients[self._topology._topology_id] = self + def _normalize_and_validate_options( + self, opts: common._CaseInsensitiveDictionary, seeds: set[tuple[str, int | None]] + ) -> common._CaseInsensitiveDictionary: + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + return opts + + def _validate_kwargs_and_update_opts( + self, + keyword_opts: common._CaseInsensitiveDictionary, + opts: common._CaseInsensitiveDictionary, + ) -> common._CaseInsensitiveDictionary: + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + # Override connection string options with kwarg options. + opts.update(keyword_opts) + return opts async def aconnect(self) -> None: """Explicitly connect to MongoDB asynchronously instead of on the first operation.""" @@ -901,6 +1003,10 @@ async def aconnect(self) -> None: def _init_background(self, old_pid: Optional[int] = None) -> None: self._topology = Topology(self._topology_settings) + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + AsyncMongoClient._clients[self._topology._topology_id] = self # Seed the topology with the old one's pid so we can detect clients # that are opened before a fork and used after. self._topology._pid = old_pid @@ -1115,16 +1221,24 @@ def options(self) -> ClientOptions: """ return self._options + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + return ( + tuple(sorted(self._resolve_srv_info["seeds"])), + self._options.replica_set_name, + self._resolve_srv_info["fqdn"], + self._resolve_srv_info["srv_service_name"], + ) + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): - return self._topology == other._topology + return self.eq_props() == other.eq_props() return NotImplemented def __ne__(self, other: Any) -> bool: return not self == other def __hash__(self) -> int: - return hash(self._topology) + return hash(self.eq_props()) def _repr_helper(self) -> str: def option_repr(option: str, value: Any) -> str: @@ -1140,13 +1254,16 @@ def option_repr(option: str, value: Any) -> str: return f"{option}={value!r}" # Host first... - options = [ - "host=%r" - % [ - "%s:%d" % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds + if self._topology is None: + options = [f"host='mongodb+srv://{self._resolve_srv_info['fqdn']}'"] + else: + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] ] - ] # ... then everything in self._constructor_args... options.extend( option_repr(key, self._options._options[key]) for key in self._constructor_args @@ -1552,6 +1669,8 @@ async def close(self) -> None: .. versionchanged:: 3.6 End all server sessions created by this client. """ + if self._topology is None: + return session_ids = self._topology.pop_all_sessions() if session_ids: await self._end_sessions(session_ids) @@ -1582,6 +1701,9 @@ async def _get_topology(self) -> Topology: launches the connection process in the background. """ if not self._opened: + if self._resolve_srv_info["is_srv"]: + await self._resolve_srv() + self._init_background() await self._topology.open() async with self._lock: self._kill_cursors_executor.open() @@ -2511,6 +2633,7 @@ async def handle( self.completed_handshake, self.service_id, ) + assert self.client._topology is not None await self.client._topology.handle_error(self.server_address, err_ctx) async def __aenter__(self) -> _MongoClientErrorHandler: diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index 5cb42f4d46..1b0799e1c4 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -25,6 +25,7 @@ from pymongo import common, periodic_executor from pymongo._csot import MovingMinimum +from pymongo.asynchronous.srv_resolver import _SrvResolver from pymongo.errors import NetworkTimeout, _OperationCancelled from pymongo.hello import Hello from pymongo.lock import _async_create_lock @@ -33,7 +34,6 @@ from pymongo.pool_options import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription -from pymongo.srv_resolver import _SrvResolver if TYPE_CHECKING: from pymongo.asynchronous.pool import AsyncConnection, Pool, _CancellationContext @@ -395,7 +395,7 @@ async def _run(self) -> None: # Don't poll right after creation, wait 60 seconds first if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: return - seedlist = self._get_seedlist() + seedlist = await self._get_seedlist() if seedlist: self._seedlist = seedlist try: @@ -404,7 +404,7 @@ async def _run(self) -> None: # Topology was garbage-collected. await self.close() - def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: + async def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: """Poll SRV records for a seedlist. Returns a list of ServerDescriptions. @@ -415,7 +415,7 @@ def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: self._settings.pool_options.connect_timeout, self._settings.srv_service_name, ) - seedlist, ttl = resolver.get_hosts_and_min_ttl() + seedlist, ttl = await resolver.get_hosts_and_min_ttl() if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py new file mode 100644 index 0000000000..8b811e5dc2 --- /dev/null +++ b/pymongo/asynchronous/srv_resolver.py @@ -0,0 +1,160 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from dns import resolver + +_IS_SYNC = False + + +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False + + +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +async def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + if _IS_SYNC: + from dns import resolver + + if hasattr(resolver, "resolve"): + # dnspython >= 2 + return resolver.resolve(*args, **kwargs) + # dnspython 1.X + return resolver.query(*args, **kwargs) + else: + from dns import asyncresolver + + if hasattr(asyncresolver, "resolve"): + # dnspython >= 2 + return await asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] + raise ConfigurationError( + "Upgrade to dnspython version >= 2.0 to use AsyncMongoClient with mongodb+srv:// connections." + ) + + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) + + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): + self.__fqdn = fqdn + self.__srv = srv_service_name + self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT + self.__srv_max_hosts = srv_max_hosts or 0 + # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + + try: + self.__plist = self.__fqdn.split(".")[1:] + except Exception: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None + self.__slen = len(self.__plist) + if self.__slen < 2: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) + + async def get_options(self) -> Optional[str]: + from dns import resolver + + try: + results = await _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) + except (resolver.NoAnswer, resolver.NXDOMAIN): + # No TXT records + return None + except Exception as exc: + raise ConfigurationError(str(exc)) from None + if len(results) > 1: + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] + + async def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: + try: + results = await _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) + except Exception as exc: + if not encapsulate_errors: + # Raise the original error. + raise + # Else, raise all errors as ConfigurationError. + raise ConfigurationError(str(exc)) from None + return results + + async def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: + results = await self._resolve_uri(encapsulate_errors) + + # Construct address tuples + nodes = [ + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) # type: ignore[attr-defined] + for res in results + ] + + # Validate hosts + for node in nodes: + try: + nlist = node[0].lower().split(".")[1:][-self.__slen :] + except Exception: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None + if self.__plist != nlist: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) + return results, nodes + + async def get_hosts(self) -> list[tuple[str, Any]]: + _, nodes = await self._get_srv_response_and_hosts(True) + return nodes + + async def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: + results, nodes = await self._get_srv_response_and_hosts(False) + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/asynchronous/uri_parser.py b/pymongo/asynchronous/uri_parser.py new file mode 100644 index 0000000000..47c6d72031 --- /dev/null +++ b/pymongo/asynchronous/uri_parser.py @@ -0,0 +1,188 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +from typing import Any, Optional +from urllib.parse import unquote_plus + +from pymongo.asynchronous.srv_resolver import _SrvResolver +from pymongo.common import SRV_SERVICE_NAME, _CaseInsensitiveDictionary +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.uri_parser_shared import ( + _ALLOWED_TXT_OPTS, + DEFAULT_PORT, + SCHEME, + SCHEME_LEN, + SRV_SCHEME_LEN, + _check_options, + _validate_uri, + split_hosts, + split_options, +) + +_IS_SYNC = False + + +async def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + result = _validate_uri(uri, default_port, validate, warn, normalize, srv_max_hosts) + result.update( + await _parse_srv( + uri, + default_port, + validate, + warn, + normalize, + connect_timeout, + srv_service_name, + srv_max_hosts, + ) + ) + return result + + +async def _parse_srv( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + else: + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, _ = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + _, _, hosts = host_part.rpartition("@") + else: + hosts = host_part + + hosts = unquote_plus(hosts) + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + nodes = split_hosts(hosts, default_port=None) + fqdn, port = nodes[0] + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = await dns_resolver.get_hosts() + dns_options = await dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "options": options, + } diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 02fcc98e46..4cb94cba30 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -32,7 +32,7 @@ from bson import int64 from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError -from pymongo.uri_parser import _parse_kms_tls_options +from pymongo.uri_parser_shared import _parse_kms_tls_options if TYPE_CHECKING: from pymongo.typings import _AgnosticMongoClient, _DocumentTypeArg diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index ebffc7d74c..38c28de91e 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -86,7 +86,7 @@ _raise_connection_failure, ) from pymongo.typings import _DocumentType, _DocumentTypeArg -from pymongo.uri_parser import parse_host +from pymongo.uri_parser_shared import parse_host from pymongo.write_concern import WriteConcern if TYPE_CHECKING: diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 79b6cf6ed9..1cedbfe1e2 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -42,6 +42,7 @@ TYPE_CHECKING, Any, Callable, + Collection, ContextManager, FrozenSet, Generator, @@ -59,7 +60,7 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry from bson.timestamp import Timestamp -from pymongo import _csot, common, helpers_shared, periodic_executor, uri_parser +from pymongo import _csot, common, helpers_shared, periodic_executor from pymongo.client_options import ClientOptions from pymongo.errors import ( AutoReconnect, @@ -96,7 +97,7 @@ from pymongo.results import ClientBulkWriteResult from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.synchronous import client_session, database +from pymongo.synchronous import client_session, database, uri_parser from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream from pymongo.synchronous.client_bulk import _ClientBulk from pymongo.synchronous.client_session import _EmptyServerSession @@ -112,11 +113,14 @@ _DocumentTypeArg, _Pipeline, ) -from pymongo.uri_parser import ( +from pymongo.uri_parser_shared import ( + SRV_SCHEME, _check_options, _handle_option_deprecations, _handle_security_options, _normalize_options, + _validate_uri, + split_hosts, ) from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern @@ -130,6 +134,7 @@ from pymongo.synchronous.bulk import _Bulk from pymongo.synchronous.client_session import ClientSession, _ServerSession from pymongo.synchronous.cursor import _ConnectionManager + from pymongo.synchronous.encryption import _Encrypter from pymongo.synchronous.pool import Connection from pymongo.synchronous.server import Server @@ -748,6 +753,9 @@ def __init__( port = self.PORT if not isinstance(port, int): raise TypeError(f"port must be an instance of int, not {type(port)}") + self._host = host + self._port = port + self._topology: Topology = None # type: ignore[assignment] # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. @@ -758,8 +766,10 @@ def __init__( # Parse options passed as kwargs. keyword_opts = common._CaseInsensitiveDictionary(kwargs) keyword_opts["document_class"] = doc_class + self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} seeds = set() + is_srv = False username = None password = None dbase = None @@ -767,29 +777,22 @@ def __init__( fqdn = None srv_service_name = keyword_opts.get("srvservicename") srv_max_hosts = keyword_opts.get("srvmaxhosts") - if len([h for h in host if "/" in h]) > 1: + if len([h for h in self._host if "/" in h]) > 1: raise ConfigurationError("host must not contain multiple MongoDB URIs") - for entity in host: + for entity in self._host: # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' # it must be a URI, # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names if "/" in entity: - # Determine connection timeout from kwargs. - timeout = keyword_opts.get("connecttimeoutms") - if timeout is not None: - timeout = common.validate_timeout_or_none_or_zero( - keyword_opts.cased_key("connecttimeoutms"), timeout - ) - res = uri_parser.parse_uri( + res = _validate_uri( entity, port, validate=True, warn=True, normalize=False, - connect_timeout=timeout, - srv_service_name=srv_service_name, srv_max_hosts=srv_max_hosts, ) + is_srv = entity.startswith(SRV_SCHEME) seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password @@ -797,7 +800,7 @@ def __init__( opts = res["options"] fqdn = res["fqdn"] else: - seeds.update(uri_parser.split_hosts(entity, port)) + seeds.update(split_hosts(entity, self._port)) if not seeds: raise ConfigurationError("need to specify at least one host") @@ -818,80 +821,179 @@ def __init__( keyword_opts["tz_aware"] = tz_aware keyword_opts["connect"] = connect - # Handle deprecated options in kwarg options. - keyword_opts = _handle_option_deprecations(keyword_opts) - # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary( - dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) - ) - - # Override connection string options with kwarg options. - opts.update(keyword_opts) + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) if srv_service_name is None: srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") - # Handle security-option conflicts in combined options. - opts = _handle_security_options(opts) - # Normalize combined options. - opts = _normalize_options(opts) - _check_options(seeds, opts) + opts = self._normalize_and_validate_options(opts, seeds) # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) password = opts.get("password", password) - self._options = options = ClientOptions(username, password, dbase, opts, _IS_SYNC) + self._options = ClientOptions(username, password, dbase, opts, _IS_SYNC) self._default_database_name = dbase self._lock = _create_lock() self._kill_cursors_queue: list = [] - self._event_listeners = options.pool_options._event_listeners - super().__init__( - options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern, + self._encrypter: Optional[_Encrypter] = None + + self._resolve_srv_info.update( + { + "is_srv": is_srv, + "username": username, + "password": password, + "dbase": dbase, + "seeds": seeds, + "fqdn": fqdn, + "srv_service_name": srv_service_name, + "pool_class": pool_class, + "monitor_class": monitor_class, + "condition_class": condition_class, + } ) - self._topology_settings = TopologySettings( - seeds=seeds, - replica_set_name=options.replica_set_name, - pool_class=pool_class, - pool_options=options.pool_options, - monitor_class=monitor_class, - condition_class=condition_class, - local_threshold_ms=options.local_threshold_ms, - server_selection_timeout=options.server_selection_timeout, - server_selector=options.server_selector, - heartbeat_frequency=options.heartbeat_frequency, - fqdn=fqdn, - direct_connection=options.direct_connection, - load_balanced=options.load_balanced, - srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts, - server_monitoring_mode=options.server_monitoring_mode, + super().__init__( + self._options.codec_options, + self._options.read_preference, + self._options.write_concern, + self._options.read_concern, ) + if not is_srv: + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + self._opened = False self._closed = False - self._init_background() + if not is_srv: + self._init_background() if _IS_SYNC and connect: self._get_topology() # type: ignore[unused-coroutine] - self._encrypter = None + def _resolve_srv(self) -> None: + keyword_opts = self._resolve_srv_info["keyword_opts"] + seeds = set() + opts = common._CaseInsensitiveDictionary() + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = uri_parser._parse_srv( + entity, + self._port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + opts = res["options"] + else: + seeds.update(split_hosts(entity, self._port)) + + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + tz_aware = keyword_opts["tz_aware"] + connect = keyword_opts["connect"] + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", self._resolve_srv_info["username"]) + password = opts.get("password", self._resolve_srv_info["password"]) + self._options = ClientOptions( + username, password, self._resolve_srv_info["dbase"], opts, _IS_SYNC + ) + + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + + def _init_based_on_options( + self, seeds: Collection[tuple[str, int]], srv_max_hosts: Any, srv_service_name: Any + ) -> None: + self._event_listeners = self._options.pool_options._event_listeners + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=self._options.replica_set_name, + pool_class=self._resolve_srv_info["pool_class"], + pool_options=self._options.pool_options, + monitor_class=self._resolve_srv_info["monitor_class"], + condition_class=self._resolve_srv_info["condition_class"], + local_threshold_ms=self._options.local_threshold_ms, + server_selection_timeout=self._options.server_selection_timeout, + server_selector=self._options.server_selector, + heartbeat_frequency=self._options.heartbeat_frequency, + fqdn=self._resolve_srv_info["fqdn"], + direct_connection=self._options.direct_connection, + load_balanced=self._options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=self._options.server_monitoring_mode, + ) if self._options.auto_encryption_opts: from pymongo.synchronous.encryption import _Encrypter self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) self._timeout = self._options.timeout - if _HAS_REGISTER_AT_FORK: - # Add this client to the list of weakly referenced items. - # This will be used later if we fork. - MongoClient._clients[self._topology._topology_id] = self + def _normalize_and_validate_options( + self, opts: common._CaseInsensitiveDictionary, seeds: set[tuple[str, int | None]] + ) -> common._CaseInsensitiveDictionary: + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + return opts + + def _validate_kwargs_and_update_opts( + self, + keyword_opts: common._CaseInsensitiveDictionary, + opts: common._CaseInsensitiveDictionary, + ) -> common._CaseInsensitiveDictionary: + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + # Override connection string options with kwarg options. + opts.update(keyword_opts) + return opts def _connect(self) -> None: """Explicitly connect to MongoDB synchronously instead of on the first operation.""" @@ -899,6 +1001,10 @@ def _connect(self) -> None: def _init_background(self, old_pid: Optional[int] = None) -> None: self._topology = Topology(self._topology_settings) + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self # Seed the topology with the old one's pid so we can detect clients # that are opened before a fork and used after. self._topology._pid = old_pid @@ -1113,16 +1219,24 @@ def options(self) -> ClientOptions: """ return self._options + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + return ( + tuple(sorted(self._resolve_srv_info["seeds"])), + self._options.replica_set_name, + self._resolve_srv_info["fqdn"], + self._resolve_srv_info["srv_service_name"], + ) + def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): - return self._topology == other._topology + return self.eq_props() == other.eq_props() return NotImplemented def __ne__(self, other: Any) -> bool: return not self == other def __hash__(self) -> int: - return hash(self._topology) + return hash(self.eq_props()) def _repr_helper(self) -> str: def option_repr(option: str, value: Any) -> str: @@ -1138,13 +1252,16 @@ def option_repr(option: str, value: Any) -> str: return f"{option}={value!r}" # Host first... - options = [ - "host=%r" - % [ - "%s:%d" % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds + if self._topology is None: + options = [f"host='mongodb+srv://{self._resolve_srv_info['fqdn']}'"] + else: + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] ] - ] # ... then everything in self._constructor_args... options.extend( option_repr(key, self._options._options[key]) for key in self._constructor_args @@ -1546,6 +1663,8 @@ def close(self) -> None: .. versionchanged:: 3.6 End all server sessions created by this client. """ + if self._topology is None: + return session_ids = self._topology.pop_all_sessions() if session_ids: self._end_sessions(session_ids) @@ -1576,6 +1695,9 @@ def _get_topology(self) -> Topology: launches the connection process in the background. """ if not self._opened: + if self._resolve_srv_info["is_srv"]: + self._resolve_srv() + self._init_background() self._topology.open() with self._lock: self._kill_cursors_executor.open() @@ -2497,6 +2619,7 @@ def handle( self.completed_handshake, self.service_id, ) + assert self.client._topology is not None self.client._topology.handle_error(self.server_address, err_ctx) def __enter__(self) -> _MongoClientErrorHandler: diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index 5b45ed9a4d..a2b76c4e8a 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -33,7 +33,7 @@ from pymongo.pool_options import _is_faas from pymongo.read_preferences import MovingAverage from pymongo.server_description import ServerDescription -from pymongo.srv_resolver import _SrvResolver +from pymongo.synchronous.srv_resolver import _SrvResolver if TYPE_CHECKING: from pymongo.synchronous.pool import Connection, Pool, _CancellationContext diff --git a/pymongo/srv_resolver.py b/pymongo/synchronous/srv_resolver.py similarity index 88% rename from pymongo/srv_resolver.py rename to pymongo/synchronous/srv_resolver.py index 5be6cb98db..1b36efd1c9 100644 --- a/pymongo/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -25,6 +25,8 @@ if TYPE_CHECKING: from dns import resolver +_IS_SYNC = True + def _have_dnspython() -> bool: try: @@ -45,13 +47,23 @@ def maybe_decode(text: Union[str, bytes]) -> str: # PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: - from dns import resolver + if _IS_SYNC: + from dns import resolver - if hasattr(resolver, "resolve"): - # dnspython >= 2 - return resolver.resolve(*args, **kwargs) - # dnspython 1.X - return resolver.query(*args, **kwargs) + if hasattr(resolver, "resolve"): + # dnspython >= 2 + return resolver.resolve(*args, **kwargs) + # dnspython 1.X + return resolver.query(*args, **kwargs) + else: + from dns import asyncresolver + + if hasattr(asyncresolver, "resolve"): + # dnspython >= 2 + return asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] + raise ConfigurationError( + "Upgrade to dnspython version >= 2.0 to use MongoClient with mongodb+srv:// connections." + ) _INVALID_HOST_MSG = ( diff --git a/pymongo/synchronous/uri_parser.py b/pymongo/synchronous/uri_parser.py new file mode 100644 index 0000000000..52b59b8fe8 --- /dev/null +++ b/pymongo/synchronous/uri_parser.py @@ -0,0 +1,188 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +from typing import Any, Optional +from urllib.parse import unquote_plus + +from pymongo.common import SRV_SERVICE_NAME, _CaseInsensitiveDictionary +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.synchronous.srv_resolver import _SrvResolver +from pymongo.uri_parser_shared import ( + _ALLOWED_TXT_OPTS, + DEFAULT_PORT, + SCHEME, + SCHEME_LEN, + SRV_SCHEME_LEN, + _check_options, + _validate_uri, + split_hosts, + split_options, +) + +_IS_SYNC = True + + +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + result = _validate_uri(uri, default_port, validate, warn, normalize, srv_max_hosts) + result.update( + _parse_srv( + uri, + default_port, + validate, + warn, + normalize, + connect_timeout, + srv_service_name, + srv_max_hosts, + ) + ) + return result + + +def _parse_srv( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + else: + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, _ = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + _, _, hosts = host_part.rpartition("@") + else: + hosts = host_part + + hosts = unquote_plus(hosts) + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + nodes = split_hosts(hosts, default_port=None) + fqdn, port = nodes[0] + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = dns_resolver.get_hosts() + dns_options = dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "options": options, + } diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index ee7ca9c205..fe253b9bbf 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -13,627 +13,32 @@ # permissions and limitations under the License. -"""Tools to parse and validate a MongoDB URI. - -.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. -""" +"""Re-import of synchronous URI Parser API for compatibility.""" from __future__ import annotations -import re import sys -import warnings -from typing import ( - TYPE_CHECKING, - Any, - Mapping, - MutableMapping, - Optional, - Sized, - Union, - cast, -) -from urllib.parse import unquote_plus - -from pymongo.client_options import _parse_ssl_options -from pymongo.common import ( - INTERNAL_URI_OPTION_NAME_MAP, - SRV_SERVICE_NAME, - URI_OPTIONS_DEPRECATION_MAP, - _CaseInsensitiveDictionary, - get_validated_options, -) -from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.srv_resolver import _have_dnspython, _SrvResolver -from pymongo.typings import _Address - -if TYPE_CHECKING: - from pymongo.pyopenssl_context import SSLContext - -SCHEME = "mongodb://" -SCHEME_LEN = len(SCHEME) -SRV_SCHEME = "mongodb+srv://" -SRV_SCHEME_LEN = len(SRV_SCHEME) -DEFAULT_PORT = 27017 - - -def _unquoted_percent(s: str) -> bool: - """Check for unescaped percent signs. - - :param s: A string. `s` can have things like '%25', '%2525', - and '%E2%85%A8' but cannot have unquoted percent like '%foo'. - """ - for i in range(len(s)): - if s[i] == "%": - sub = s[i : i + 3] - # If unquoting yields the same string this means there was an - # unquoted %. - if unquote_plus(sub) == sub: - return True - return False - - -def parse_userinfo(userinfo: str) -> tuple[str, str]: - """Validates the format of user information in a MongoDB URI. - Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", - "]", "@") as per RFC 3986 must be escaped. - - Returns a 2-tuple containing the unescaped username followed - by the unescaped password. - - :param userinfo: A string of the form : - """ - if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): - raise InvalidURI( - "Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus" - ) - - user, _, passwd = userinfo.partition(":") - # No password is expected with GSSAPI authentication. - if not user: - raise InvalidURI("The empty string is not valid username") - - return unquote_plus(user), unquote_plus(passwd) - - -def parse_ipv6_literal_host( - entity: str, default_port: Optional[int] -) -> tuple[str, Optional[Union[str, int]]]: - """Validates an IPv6 literal host:port string. - - Returns a 2-tuple of IPv6 literal followed by port where - port is default_port if it wasn't specified in entity. - - :param entity: A string that represents an IPv6 literal enclosed - in braces (e.g. '[::1]' or '[::1]:27017'). - :param default_port: The port number to use when one wasn't - specified in entity. - """ - if entity.find("]") == -1: - raise ValueError( - "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." - ) - i = entity.find("]:") - if i == -1: - return entity[1:-1], default_port - return entity[1:i], entity[i + 2 :] - - -def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: - """Validates a host string - - Returns a 2-tuple of host followed by port where port is default_port - if it wasn't specified in the string. - - :param entity: A host or host:port string where host could be a - hostname or IP address. - :param default_port: The port number to use when one wasn't - specified in entity. - """ - host = entity - port: Optional[Union[str, int]] = default_port - if entity[0] == "[": - host, port = parse_ipv6_literal_host(entity, default_port) - elif entity.endswith(".sock"): - return entity, default_port - elif entity.find(":") != -1: - if entity.count(":") > 1: - raise ValueError( - "Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732." - ) - host, port = host.split(":", 1) - if isinstance(port, str): - if not port.isdigit(): - # Special case check for mistakes like "mongodb://localhost:27017 ". - if all(c.isspace() or c.isdigit() for c in port): - for c in port: - if c.isspace(): - raise ValueError(f"Port contains whitespace character: {c!r}") - - # A non-digit port indicates that the URI is invalid, likely because the password - # or username were not escaped. - raise ValueError( - "Port contains non-digit characters. Hint: username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus" - ) - if int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535") - port = int(port) - - # Normalize hostname to lowercase, since DNS is case-insensitive: - # https://tools.ietf.org/html/rfc4343 - # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the hello response. - return host.lower(), port - - -# Options whose values are implicitly determined by tlsInsecure. -_IMPLICIT_TLSINSECURE_OPTS = { - "tlsallowinvalidcertificates", - "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck", -} - - -def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: - """Helper method for split_options which creates the options dict. - Also handles the creation of a list for the URI tag_sets/ - readpreferencetags portion, and the use of a unicode options string. - """ - options = _CaseInsensitiveDictionary() - for uriopt in opts.split(delim): - key, value = uriopt.split("=") - if key.lower() == "readpreferencetags": - options.setdefault(key, []).append(value) - else: - if key in options: - warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) - if key.lower() == "authmechanismproperties": - val = value - else: - val = unquote_plus(value) - options[key] = val - - return options - - -def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: - """Raise appropriate errors when conflicting TLS options are present in - the options dictionary. - - :param options: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - # Implicitly defined options must not be explicitly specified. - tlsinsecure = options.get("tlsinsecure") - if tlsinsecure is not None: - for opt in _IMPLICIT_TLSINSECURE_OPTS: - if opt in options: - err_msg = "URI options %s and %s cannot be specified simultaneously." - raise InvalidURI( - err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) - ) - - # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. - tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") - if tlsallowinvalidcerts is not None: - if "tlsdisableocspendpointcheck" in options: - err_msg = "URI options %s and %s cannot be specified simultaneously." - raise InvalidURI( - err_msg - % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) - ) - if tlsallowinvalidcerts is True: - options["tlsdisableocspendpointcheck"] = True - - # Handle co-occurence of CRL and OCSP-related options. - tlscrlfile = options.get("tlscrlfile") - if tlscrlfile is not None: - for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): - if options.get(opt) is True: - err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." - raise InvalidURI(err_msg % (opt,)) - - if "ssl" in options and "tls" in options: - - def truth_value(val: Any) -> Any: - if val in ("true", "false"): - return val == "true" - if isinstance(val, bool): - return val - return val - - if truth_value(options.get("ssl")) != truth_value(options.get("tls")): - err_msg = "Can not specify conflicting values for URI options %s and %s." - raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) - - return options - - -def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: - """Issue appropriate warnings when deprecated options are present in the - options dictionary. Removes deprecated option key, value pairs if the - options dictionary is found to also have the renamed option. - - :param options: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - for optname in list(options): - if optname in URI_OPTIONS_DEPRECATION_MAP: - mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] - if mode == "renamed": - newoptname = message - if newoptname in options: - warn_msg = "Deprecated option '%s' ignored in favor of '%s'." - warnings.warn( - warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), - DeprecationWarning, - stacklevel=2, - ) - options.pop(optname) - continue - warn_msg = "Option '%s' is deprecated, use '%s' instead." - warnings.warn( - warn_msg % (options.cased_key(optname), newoptname), - DeprecationWarning, - stacklevel=2, - ) - elif mode == "removed": - warn_msg = "Option '%s' is deprecated. %s." - warnings.warn( - warn_msg % (options.cased_key(optname), message), - DeprecationWarning, - stacklevel=2, - ) - - return options - - -def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: - """Normalizes option names in the options dictionary by converting them to - their internally-used names. - - :param options: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - # Expand the tlsInsecure option. - tlsinsecure = options.get("tlsinsecure") - if tlsinsecure is not None: - for opt in _IMPLICIT_TLSINSECURE_OPTS: - # Implicit options are logically the same as tlsInsecure. - options[opt] = tlsinsecure - - for optname in list(options): - intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) - if intname is not None: - options[intname] = options.pop(optname) - - return options - - -def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: - """Validates and normalizes options passed in a MongoDB URI. - - Returns a new dictionary of validated and normalized options. If warn is - False then errors will be thrown for invalid options, otherwise they will - be ignored and a warning will be issued. - - :param opts: A dict of MongoDB URI options. - :param warn: If ``True`` then warnings will be logged and - invalid options will be ignored. Otherwise invalid options will - cause errors. - """ - return get_validated_options(opts, warn) - - -def split_options( - opts: str, validate: bool = True, warn: bool = False, normalize: bool = True -) -> MutableMapping[str, Any]: - """Takes the options portion of a MongoDB URI, validates each option - and returns the options in a dictionary. - - :param opt: A string representing MongoDB URI options. - :param validate: If ``True`` (the default), validate and normalize all - options. - :param warn: If ``False`` (the default), suppress all warnings raised - during validation of options. - :param normalize: If ``True`` (the default), renames all options to their - internally-used names. - """ - and_idx = opts.find("&") - semi_idx = opts.find(";") - try: - if and_idx >= 0 and semi_idx >= 0: - raise InvalidURI("Can not mix '&' and ';' for option separators") - elif and_idx >= 0: - options = _parse_options(opts, "&") - elif semi_idx >= 0: - options = _parse_options(opts, ";") - elif opts.find("=") != -1: - options = _parse_options(opts, None) - else: - raise ValueError - except ValueError: - raise InvalidURI("MongoDB URI options are key=value pairs") from None - - options = _handle_security_options(options) - - options = _handle_option_deprecations(options) - - if normalize: - options = _normalize_options(options) - - if validate: - options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) - if options.get("authsource") == "": - raise InvalidURI("the authSource database cannot be an empty string") - - return options - - -def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: - """Takes a string of the form host1[:port],host2[:port]... and - splits it into (host, port) tuples. If [:port] isn't present the - default_port is used. - - Returns a set of 2-tuples containing the host name (or IP) followed by - port number. - - :param hosts: A string of the form host1[:port],host2[:port],... - :param default_port: The port number to use when one wasn't specified - for a host. - """ - nodes = [] - for entity in hosts.split(","): - if not entity: - raise ConfigurationError("Empty host (or extra comma in host list)") - port = default_port - # Unix socket entities don't have ports - if entity.endswith(".sock"): - port = None - nodes.append(parse_host(entity, port)) - return nodes - - -# Prohibited characters in database name. DB names also can't have ".", but for -# backward-compat we allow "db.collection" in URI. -_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") - -_ALLOWED_TXT_OPTS = frozenset( - ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] -) - - -def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: - # Ensure directConnection was not True if there are multiple seeds. - if len(nodes) > 1 and options.get("directconnection"): - raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") - - if options.get("loadbalanced"): - if len(nodes) > 1: - raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") - if options.get("directconnection"): - raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") - if options.get("replicaset"): - raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") - - -def parse_uri( - uri: str, - default_port: Optional[int] = DEFAULT_PORT, - validate: bool = True, - warn: bool = False, - normalize: bool = True, - connect_timeout: Optional[float] = None, - srv_service_name: Optional[str] = None, - srv_max_hosts: Optional[int] = None, -) -> dict[str, Any]: - """Parse and validate a MongoDB URI. - - Returns a dict of the form:: - - { - 'nodelist': , - 'username': or None, - 'password': or None, - 'database': or None, - 'collection': or None, - 'options': , - 'fqdn': or None - } - - If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done - to build nodelist and options. - - :param uri: The MongoDB URI to parse. - :param default_port: The port number to use when one wasn't specified - for a host in the URI. - :param validate: If ``True`` (the default), validate and - normalize all options. Default: ``True``. - :param warn: When validating, if ``True`` then will warn - the user then ignore any invalid options or values. If ``False``, - validation will error when options are unsupported or values are - invalid. Default: ``False``. - :param normalize: If ``True``, convert names of URI options - to their internally-used names. Default: ``True``. - :param connect_timeout: The maximum time in milliseconds to - wait for a response from the DNS server. - :param srv_service_name: A custom SRV service name - - .. versionchanged:: 4.6 - The delimiting slash (``/``) between hosts and connection options is now optional. - For example, "mongodb://example.com?tls=true" is now a valid URI. - - .. versionchanged:: 4.0 - To better follow RFC 3986, unquoted percent signs ("%") are no longer - supported. - - .. versionchanged:: 3.9 - Added the ``normalize`` parameter. - - .. versionchanged:: 3.6 - Added support for mongodb+srv:// URIs. - - .. versionchanged:: 3.5 - Return the original value of the ``readPreference`` MongoDB URI option - instead of the validated read preference mode. - - .. versionchanged:: 3.1 - ``warn`` added so invalid options can be ignored. - """ - if uri.startswith(SCHEME): - is_srv = False - scheme_free = uri[SCHEME_LEN:] - elif uri.startswith(SRV_SCHEME): - if not _have_dnspython(): - python_path = sys.executable or "python" - raise ConfigurationError( - 'The "dnspython" module must be ' - "installed to use mongodb+srv:// URIs. " - "To fix this error install pymongo again:\n " - "%s -m pip install pymongo>=4.3" % (python_path) - ) - is_srv = True - scheme_free = uri[SRV_SCHEME_LEN:] - else: - raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") - - if not scheme_free: - raise InvalidURI("Must provide at least one hostname or IP") - - user = None - passwd = None - dbase = None - collection = None - options = _CaseInsensitiveDictionary() - - host_plus_db_part, _, opts = scheme_free.partition("?") - if "/" in host_plus_db_part: - host_part, _, dbase = host_plus_db_part.partition("/") - else: - host_part = host_plus_db_part - - if dbase: - dbase = unquote_plus(dbase) - if "." in dbase: - dbase, collection = dbase.split(".", 1) - if _BAD_DB_CHARS.search(dbase): - raise InvalidURI('Bad database name "%s"' % dbase) - else: - dbase = None - - if opts: - options.update(split_options(opts, validate, warn, normalize)) - if srv_service_name is None: - srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) - if "@" in host_part: - userinfo, _, hosts = host_part.rpartition("@") - user, passwd = parse_userinfo(userinfo) - else: - hosts = host_part - - if "/" in hosts: - raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) - - hosts = unquote_plus(hosts) - fqdn = None - srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") - if is_srv: - if options.get("directConnection"): - raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") - nodes = split_hosts(hosts, default_port=None) - if len(nodes) != 1: - raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") - fqdn, port = nodes[0] - if port is not None: - raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") - - # Use the connection timeout. connectTimeoutMS passed as a keyword - # argument overrides the same option passed in the connection string. - connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) - nodes = dns_resolver.get_hosts() - dns_options = dns_resolver.get_options() - if dns_options: - parsed_dns_options = split_options(dns_options, validate, warn, normalize) - if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: - raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are supported from DNS" - ) - for opt, val in parsed_dns_options.items(): - if opt not in options: - options[opt] = val - if options.get("loadBalanced") and srv_max_hosts: - raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") - if options.get("replicaSet") and srv_max_hosts: - raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") - if "tls" not in options and "ssl" not in options: - options["tls"] = True if validate else "true" - elif not is_srv and options.get("srvServiceName") is not None: - raise ConfigurationError( - "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" - ) - elif not is_srv and srv_max_hosts: - raise ConfigurationError( - "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" - ) - else: - nodes = split_hosts(hosts, default_port=default_port) - - _check_options(nodes, options) - - return { - "nodelist": nodes, - "username": user, - "password": passwd, - "database": dbase, - "collection": collection, - "options": options, - "fqdn": fqdn, - } - - -def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: - """Parse KMS TLS connection options.""" - if not kms_tls_options: - return {} - if not isinstance(kms_tls_options, dict): - raise TypeError("kms_tls_options must be a dict") - contexts = {} - for provider, options in kms_tls_options.items(): - if not isinstance(options, dict): - raise TypeError(f'kms_tls_options["{provider}"] must be a dict') - options.setdefault("tls", True) - opts = _CaseInsensitiveDictionary(options) - opts = _handle_security_options(opts) - opts = _normalize_options(opts) - opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) - ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) - if ssl_context is None: - raise ConfigurationError("TLS is required for KMS providers") - if allow_invalid_hostnames: - raise ConfigurationError("Insecure TLS options prohibited") - - for n in [ - "tlsInsecure", - "tlsAllowInvalidCertificates", - "tlsAllowInvalidHostnames", - "tlsDisableCertificateRevocationCheck", - ]: - if n in opts: - raise ConfigurationError(f"Insecure TLS options prohibited: {n}") - contexts[provider] = ssl_context - return contexts +from pymongo.errors import InvalidURI +from pymongo.synchronous.uri_parser import * # noqa: F403 +from pymongo.synchronous.uri_parser import __doc__ as original_doc +from pymongo.uri_parser_shared import * # noqa: F403 + +__doc__ = original_doc +__all__ = [ # noqa: F405 + "parse_userinfo", + "parse_ipv6_literal_host", + "parse_host", + "validate_options", + "split_options", + "split_hosts", + "parse_uri", +] if __name__ == "__main__": import pprint try: - pprint.pprint(parse_uri(sys.argv[1])) # noqa: T203 + pprint.pprint(parse_uri(sys.argv[1])) # noqa: F405, T203 except InvalidURI as exc: print(exc) # noqa: T201 sys.exit(0) diff --git a/pymongo/uri_parser_shared.py b/pymongo/uri_parser_shared.py new file mode 100644 index 0000000000..e7ba4c9fb5 --- /dev/null +++ b/pymongo/uri_parser_shared.py @@ -0,0 +1,549 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import re +import sys +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sized, + Union, + cast, +) +from urllib.parse import unquote_plus + +from pymongo.asynchronous.srv_resolver import _have_dnspython +from pymongo.client_options import _parse_ssl_options +from pymongo.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.typings import _Address + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext + +SCHEME = "mongodb://" +SCHEME_LEN = len(SCHEME) +SRV_SCHEME = "mongodb+srv://" +SRV_SCHEME_LEN = len(SRV_SCHEME) +DEFAULT_PORT = 27017 + + +def _unquoted_percent(s: str) -> bool: + """Check for unescaped percent signs. + + :param s: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == "%": + sub = s[i : i + 3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote_plus(sub) == sub: + return True + return False + + +def parse_userinfo(userinfo: str) -> tuple[str, str]: + """Validates the format of user information in a MongoDB URI. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. + + Returns a 2-tuple containing the unescaped username followed + by the unescaped password. + + :param userinfo: A string of the form : + """ + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + + user, _, passwd = userinfo.partition(":") + # No password is expected with GSSAPI authentication. + if not user: + raise InvalidURI("The empty string is not valid username") + + return unquote_plus(user), unquote_plus(passwd) + + +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> tuple[str, Optional[Union[str, int]]]: + """Validates an IPv6 literal host:port string. + + Returns a 2-tuple of IPv6 literal followed by port where + port is default_port if it wasn't specified in entity. + + :param entity: A string that represents an IPv6 literal enclosed + in braces (e.g. '[::1]' or '[::1]:27017'). + :param default_port: The port number to use when one wasn't + specified in entity. + """ + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." + ) + i = entity.find("]:") + if i == -1: + return entity[1:-1], default_port + return entity[1:i], entity[i + 2 :] + + +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: + """Validates a host string + + Returns a 2-tuple of host followed by port where port is default_port + if it wasn't specified in the string. + + :param entity: A host or host:port string where host could be a + hostname or IP address. + :param default_port: The port number to use when one wasn't + specified in entity. + """ + host = entity + port: Optional[Union[str, int]] = default_port + if entity[0] == "[": + host, port = parse_ipv6_literal_host(entity, default_port) + elif entity.endswith(".sock"): + return entity, default_port + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) + if isinstance(port, str): + if not port.isdigit(): + # Special case check for mistakes like "mongodb://localhost:27017 ". + if all(c.isspace() or c.isdigit() for c in port): + for c in port: + if c.isspace(): + raise ValueError(f"Port contains whitespace character: {c!r}") + + # A non-digit port indicates that the URI is invalid, likely because the password + # or username were not escaped. + raise ValueError( + "Port contains non-digit characters. Hint: username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + if int(port) > 65535 or int(port) <= 0: + raise ValueError("Port must be an integer between 0 and 65535") + port = int(port) + + # Normalize hostname to lowercase, since DNS is case-insensitive: + # https://tools.ietf.org/html/rfc4343 + # This prevents useless rediscovery if "foo.com" is in the seed list but + # "FOO.com" is in the hello response. + return host.lower(), port + + +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", +} + + +def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: + """Helper method for split_options which creates the options dict. + Also handles the creation of a list for the URI tag_sets/ + readpreferencetags portion, and the use of a unicode options string. + """ + options = _CaseInsensitiveDictionary() + for uriopt in opts.split(delim): + key, value = uriopt.split("=") + if key.lower() == "readpreferencetags": + options.setdefault(key, []).append(value) + else: + if key in options: + warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) + if key.lower() == "authmechanismproperties": + val = value + else: + val = unquote_plus(value) + options[key] = val + + return options + + +def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Raise appropriate errors when conflicting TLS options are present in + the options dictionary. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Implicitly defined options must not be explicitly specified. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + if opt in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") + if tlsallowinvalidcerts is not None: + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) + if tlsallowinvalidcerts is True: + options["tlsdisableocspendpointcheck"] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = options.get("tlscrlfile") + if tlscrlfile is not None: + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): + if options.get(opt) is True: + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." + raise InvalidURI(err_msg % (opt,)) + + if "ssl" in options and "tls" in options: + + def truth_value(val: Any) -> Any: + if val in ("true", "false"): + return val == "true" + if isinstance(val, bool): + return val + return val + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) + + return options + + +def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Issue appropriate warnings when deprecated options are present in the + options dictionary. Removes deprecated option key, value pairs if the + options dictionary is found to also have the renamed option. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + for optname in list(options): + if optname in URI_OPTIONS_DEPRECATION_MAP: + mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] + if mode == "renamed": + newoptname = message + if newoptname in options: + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." + warnings.warn( + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) + options.pop(optname) + continue + warn_msg = "Option '%s' is deprecated, use '%s' instead." + warnings.warn( + warn_msg % (options.cased_key(optname), newoptname), + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": + warn_msg = "Option '%s' is deprecated. %s." + warnings.warn( + warn_msg % (options.cased_key(optname), message), + DeprecationWarning, + stacklevel=2, + ) + + return options + + +def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Normalizes option names in the options dictionary by converting them to + their internally-used names. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Expand the tlsInsecure option. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure + + for optname in list(options): + intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) + if intname is not None: + options[intname] = options.pop(optname) + + return options + + +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: + """Validates and normalizes options passed in a MongoDB URI. + + Returns a new dictionary of validated and normalized options. If warn is + False then errors will be thrown for invalid options, otherwise they will + be ignored and a warning will be issued. + + :param opts: A dict of MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and + invalid options will be ignored. Otherwise invalid options will + cause errors. + """ + return get_validated_options(opts, warn) + + +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: + """Takes the options portion of a MongoDB URI, validates each option + and returns the options in a dictionary. + + :param opt: A string representing MongoDB URI options. + :param validate: If ``True`` (the default), validate and normalize all + options. + :param warn: If ``False`` (the default), suppress all warnings raised + during validation of options. + :param normalize: If ``True`` (the default), renames all options to their + internally-used names. + """ + and_idx = opts.find("&") + semi_idx = opts.find(";") + try: + if and_idx >= 0 and semi_idx >= 0: + raise InvalidURI("Can not mix '&' and ';' for option separators") + elif and_idx >= 0: + options = _parse_options(opts, "&") + elif semi_idx >= 0: + options = _parse_options(opts, ";") + elif opts.find("=") != -1: + options = _parse_options(opts, None) + else: + raise ValueError + except ValueError: + raise InvalidURI("MongoDB URI options are key=value pairs") from None + + options = _handle_security_options(options) + + options = _handle_option_deprecations(options) + + if normalize: + options = _normalize_options(options) + + if validate: + options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") + + return options + + +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: + """Takes a string of the form host1[:port],host2[:port]... and + splits it into (host, port) tuples. If [:port] isn't present the + default_port is used. + + Returns a set of 2-tuples containing the host name (or IP) followed by + port number. + + :param hosts: A string of the form host1[:port],host2[:port],... + :param default_port: The port number to use when one wasn't specified + for a host. + """ + nodes = [] + for entity in hosts.split(","): + if not entity: + raise ConfigurationError("Empty host (or extra comma in host list)") + port = default_port + # Unix socket entities don't have ports + if entity.endswith(".sock"): + port = None + nodes.append(parse_host(entity, port)) + return nodes + + +# Prohibited characters in database name. DB names also can't have ".", but for +# backward-compat we allow "db.collection" in URI. +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") + +_ALLOWED_TXT_OPTS = frozenset( + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) + + +def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") + + if options.get("loadbalanced"): + if len(nodes) > 1: + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") + + +def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError("kms_tls_options must be a dict") + contexts = {} + for provider, options in kms_tls_options.items(): + if not isinstance(options, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + options.setdefault("tls", True) + opts = _CaseInsensitiveDictionary(options) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + if ssl_context is None: + raise ConfigurationError("TLS is required for KMS providers") + if allow_invalid_hostnames: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableCertificateRevocationCheck", + ]: + if n in opts: + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") + contexts[provider] = ssl_context + return contexts + + +def _validate_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + elif uri.startswith(SRV_SCHEME): + if not _have_dnspython(): + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) + ) + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + else: + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") + + if not scheme_free: + raise InvalidURI("Must provide at least one hostname or IP") + + user = None + passwd = None + dbase = None + collection = None + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, dbase = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if dbase: + dbase = unquote_plus(dbase) + if "." in dbase: + dbase, collection = dbase.split(".", 1) + if _BAD_DB_CHARS.search(dbase): + raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") + user, passwd = parse_userinfo(userinfo) + else: + hosts = host_part + + if "/" in hosts: + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) + + hosts = unquote_plus(hosts) + fqdn = None + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + if options.get("directConnection"): + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") + nodes = split_hosts(hosts, default_port=None) + if len(nodes) != 1: + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") + fqdn, port = nodes[0] + if port is not None: + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError( + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" + ) + elif not is_srv and srv_max_hosts: + raise ConfigurationError( + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" + ) + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, + } diff --git a/test/__init__.py b/test/__init__.py index 307780271d..8e362de5ad 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -32,7 +32,7 @@ import warnings from asyncio import iscoroutinefunction -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri try: import ipaddress diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index f03fcf4eeb..b3f65e5d3c 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -32,7 +32,7 @@ import warnings from asyncio import iscoroutinefunction -from pymongo.uri_parser import parse_uri +from pymongo.asynchronous.uri_parser import parse_uri try: import ipaddress @@ -1027,7 +1027,7 @@ async def _unmanaged_async_mongo_client( auth_mech = kwargs.get("authMechanism", "") if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": # Only add the default username or password if one is not provided. - res = parse_uri(uri) + res = await parse_uri(uri) if ( not res["username"] and not res["password"] @@ -1058,7 +1058,7 @@ async def _async_mongo_client( auth_mech = kwargs.get("authMechanism", "") if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": # Only add the default username or password if one is not provided. - res = parse_uri(uri) + res = await parse_uri(uri) if ( not res["username"] and not res["password"] diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index 98e00e9385..7b021e8b44 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -47,7 +47,7 @@ from pymongo import common, message from pymongo.read_preferences import ReadPreference from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri if HAVE_SSL: import ssl diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index f529dcce14..7f70b84825 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -512,13 +512,13 @@ async def test_uri_option_precedence(self): async def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. - from pymongo.srv_resolver import _resolve + from pymongo.asynchronous.srv_resolver import _resolve patched_resolver = FunctionCallRecorder(_resolve) - pymongo.srv_resolver._resolve = patched_resolver + pymongo.asynchronous.srv_resolver._resolve = patched_resolver def reset_resolver(): - pymongo.srv_resolver._resolve = _resolve + pymongo.asynchronous.srv_resolver._resolve = _resolve self.addCleanup(reset_resolver) @@ -607,7 +607,7 @@ def test_validate_suggestion(self): with self.assertRaisesRegex(ConfigurationError, expected): AsyncMongoClient(**{typo: "standard"}) # type: ignore[arg-type] - @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") def test_detected_environment_logging(self, mock_get_hosts): normal_hosts = [ "normal.host.com", @@ -629,7 +629,7 @@ def test_detected_environment_logging(self, mock_get_hosts): logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) - @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") async def test_detected_environment_warning(self, mock_get_hosts): with self._caplog.at_level(logging.WARN): normal_hosts = [ @@ -933,6 +933,15 @@ async def test_repr(self): async with eval(the_repr) as client_two: self.assertEqual(client_two, client) + async def test_repr_srv_host(self): + client = AsyncMongoClient("mongodb+srv://test1.test.build.10gen.cc/", connect=False) + # before srv resolution + self.assertIn("host='mongodb+srv://test1.test.build.10gen.cc'", repr(client)) + await client.aconnect() + # after srv resolution + self.assertIn("host=['localhost.test.build.10gen.cc:", repr(client)) + await client.close() + async def test_getters(self): await async_wait_until( lambda: async_client_context.nodes == self.client.nodes, "find all nodes" @@ -1911,28 +1920,37 @@ async def test_service_name_from_kwargs(self): srvServiceName="customname", connect=False, ) + await client.aconnect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() client = AsyncMongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=shouldbeoverriden", srvServiceName="customname", connect=False, ) + await client.aconnect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() client = AsyncMongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", connect=False, ) + await client.aconnect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() async def test_srv_max_hosts_kwarg(self): client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") + await client.aconnect() self.assertGreater(len(client.topology_description.server_descriptions()), 1) client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + await client.aconnect() self.assertEqual(len(client.topology_description.server_descriptions()), 1) client = self.simple_client( "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 ) + await client.aconnect() self.assertEqual(len(client.topology_description.server_descriptions()), 2) @unittest.skipIf( diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index b3de2c5a4d..fa62b25dd1 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -54,6 +54,7 @@ from pymongo import common, monitoring from pymongo.asynchronous.settings import TopologySettings from pymongo.asynchronous.topology import Topology, _ErrorContext +from pymongo.asynchronous.uri_parser import parse_uri from pymongo.errors import ( AutoReconnect, ConfigurationError, @@ -66,7 +67,6 @@ from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent from pymongo.server_description import SERVER_TYPE, ServerDescription from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo.uri_parser import parse_uri _IS_SYNC = False @@ -81,7 +81,7 @@ async def create_mock_topology(uri, monitor_class=DummyMonitor): - parsed_uri = parse_uri(uri) + parsed_uri = await parse_uri(uri) replica_set_name = None direct_connection = None load_balanced = None diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py index a622062fec..d0e801e123 100644 --- a/test/asynchronous/test_dns.py +++ b/test/asynchronous/test_dns.py @@ -31,9 +31,10 @@ ) from test.utils_shared import async_wait_until +from pymongo.asynchronous.uri_parser import parse_uri from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError -from pymongo.uri_parser import parse_uri, split_hosts +from pymongo.uri_parser_shared import split_hosts _IS_SYNC = False @@ -109,7 +110,7 @@ async def run_test(self): hosts = frozenset(split_hosts(",".join(hosts))) if seeds or num_seeds: - result = parse_uri(uri, validate=True) + result = await parse_uri(uri, validate=True) if seeds is not None: self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) if num_seeds is not None: @@ -161,7 +162,7 @@ async def run_test(self): # and re-run these assertions. else: try: - parse_uri(uri) + await parse_uri(uri) except (ConfigurationError, ValueError): pass else: @@ -185,35 +186,24 @@ def create_tests(cls): class TestParsingErrors(AsyncPyMongoTestCase): async def test_invalid_host(self): - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: mongodb is not", - self.simple_client, - "mongodb+srv://mongodb", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: mongodb.com is not", - self.simple_client, - "mongodb+srv://mongodb.com", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: an IP address is not", - self.simple_client, - "mongodb+srv://127.0.0.1", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: an IP address is not", - self.simple_client, - "mongodb+srv://[::1]", - ) + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb is not"): + client = self.simple_client("mongodb+srv://mongodb") + await client.aconnect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb.com is not"): + client = self.simple_client("mongodb+srv://mongodb.com") + await client.aconnect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://127.0.0.1") + await client.aconnect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://[::1]") + await client.aconnect() class IsolatedAsyncioTestCaseInsensitive(AsyncIntegrationTest): async def test_connect_case_insensitive(self): client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + await client.aconnect() self.assertGreater(len(client.topology_description.server_descriptions()), 1) diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index bf7807eb97..3dcd21ef1d 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -28,8 +28,8 @@ import pymongo from pymongo import common +from pymongo.asynchronous.srv_resolver import _have_dnspython from pymongo.errors import ConfigurationError -from pymongo.srv_resolver import _have_dnspython _IS_SYNC = False @@ -54,14 +54,16 @@ def __init__( def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = ( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval - def mock_get_hosts_and_min_ttl(resolver, *args): + async def mock_get_hosts_and_min_ttl(resolver, *args): assert self.old_dns_resolver_response is not None - nodes, ttl = self.old_dns_resolver_response(resolver) + nodes, ttl = await self.old_dns_resolver_response(resolver) if self.nodelist_callback is not None: nodes = self.nodelist_callback() if self.ttl_time is not None: @@ -74,14 +76,14 @@ def mock_get_hosts_and_min_ttl(resolver, *args): else: patch_func = mock_get_hosts_and_min_ttl - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore def __enter__(self): self.enable() def disable(self): common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore self.old_dns_resolver_response ) @@ -134,7 +136,10 @@ async def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 def predicate(): if set(expected_nodelist) == set(self.get_nodelist(client)): - return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return ( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) return False await async_wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) @@ -144,7 +149,7 @@ def predicate(): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore 1, "resolver was never called", ) diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index a7660f2f67..9738694d85 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -32,7 +32,7 @@ from pymongo import MongoClient from pymongo.errors import OperationFailure -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri pytestmark = pytest.mark.auth_aws diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 6dc36dc8a4..0c8431a1e8 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -49,7 +49,7 @@ OIDCCallbackResult, _get_authenticator, ) -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri ROOT = Path(__file__).parent.parent.resolve() TEST_PATH = ROOT / "auth" / "unified" diff --git a/test/helpers.py b/test/helpers.py index 627be182b5..12c55ade1b 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -47,7 +47,7 @@ from pymongo import common, message from pymongo.read_preferences import ReadPreference from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri if HAVE_SSL: import ssl diff --git a/test/test_client.py b/test/test_client.py index e445fa632a..cd4ceb3299 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -505,13 +505,13 @@ def test_uri_option_precedence(self): def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. - from pymongo.srv_resolver import _resolve + from pymongo.synchronous.srv_resolver import _resolve patched_resolver = FunctionCallRecorder(_resolve) - pymongo.srv_resolver._resolve = patched_resolver + pymongo.synchronous.srv_resolver._resolve = patched_resolver def reset_resolver(): - pymongo.srv_resolver._resolve = _resolve + pymongo.synchronous.srv_resolver._resolve = _resolve self.addCleanup(reset_resolver) @@ -600,7 +600,7 @@ def test_validate_suggestion(self): with self.assertRaisesRegex(ConfigurationError, expected): MongoClient(**{typo: "standard"}) # type: ignore[arg-type] - @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") def test_detected_environment_logging(self, mock_get_hosts): normal_hosts = [ "normal.host.com", @@ -622,7 +622,7 @@ def test_detected_environment_logging(self, mock_get_hosts): logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) - @patch("pymongo.srv_resolver._SrvResolver.get_hosts") + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") def test_detected_environment_warning(self, mock_get_hosts): with self._caplog.at_level(logging.WARN): normal_hosts = [ @@ -908,6 +908,15 @@ def test_repr(self): with eval(the_repr) as client_two: self.assertEqual(client_two, client) + def test_repr_srv_host(self): + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", connect=False) + # before srv resolution + self.assertIn("host='mongodb+srv://test1.test.build.10gen.cc'", repr(client)) + client._connect() + # after srv resolution + self.assertIn("host=['localhost.test.build.10gen.cc:", repr(client)) + client.close() + def test_getters(self): wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") @@ -1868,28 +1877,37 @@ def test_service_name_from_kwargs(self): srvServiceName="customname", connect=False, ) + client._connect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() client = MongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=shouldbeoverriden", srvServiceName="customname", connect=False, ) + client._connect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() client = MongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", connect=False, ) + client._connect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() def test_srv_max_hosts_kwarg(self): client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") + client._connect() self.assertGreater(len(client.topology_description.server_descriptions()), 1) client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + client._connect() self.assertEqual(len(client.topology_description.server_descriptions()), 1) client = self.simple_client( "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 ) + client._connect() self.assertEqual(len(client.topology_description.server_descriptions()), 2) @unittest.skipIf( diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 00021310c9..07720473ca 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -65,8 +65,8 @@ from pymongo.server_description import SERVER_TYPE, ServerDescription from pymongo.synchronous.settings import TopologySettings from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.synchronous.uri_parser import parse_uri from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo.uri_parser import parse_uri _IS_SYNC = True diff --git a/test/test_dns.py b/test/test_dns.py index 71326ae49e..0290eb16d9 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -33,7 +33,8 @@ from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError -from pymongo.uri_parser import parse_uri, split_hosts +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.uri_parser_shared import split_hosts _IS_SYNC = True @@ -183,35 +184,24 @@ def create_tests(cls): class TestParsingErrors(PyMongoTestCase): def test_invalid_host(self): - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: mongodb is not", - self.simple_client, - "mongodb+srv://mongodb", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: mongodb.com is not", - self.simple_client, - "mongodb+srv://mongodb.com", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: an IP address is not", - self.simple_client, - "mongodb+srv://127.0.0.1", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: an IP address is not", - self.simple_client, - "mongodb+srv://[::1]", - ) + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb is not"): + client = self.simple_client("mongodb+srv://mongodb") + client._connect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb.com is not"): + client = self.simple_client("mongodb+srv://mongodb.com") + client._connect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://127.0.0.1") + client._connect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://[::1]") + client._connect() class TestCaseInsensitive(IntegrationTest): def test_connect_case_insensitive(self): client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + client._connect() self.assertGreater(len(client.topology_description.server_descriptions()), 1) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 6812465074..df802acb43 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -29,7 +29,7 @@ import pymongo from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.srv_resolver import _have_dnspython +from pymongo.synchronous.srv_resolver import _have_dnspython _IS_SYNC = True @@ -54,7 +54,9 @@ def __init__( def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval @@ -74,14 +76,14 @@ def mock_get_hosts_and_min_ttl(resolver, *args): else: patch_func = mock_get_hosts_and_min_ttl - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore def __enter__(self): self.enable() def disable(self): common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore self.old_dns_resolver_response ) @@ -134,7 +136,10 @@ def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAI def predicate(): if set(expected_nodelist) == set(self.get_nodelist(client)): - return pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count >= 1 + return ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) return False wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) @@ -144,7 +149,7 @@ def predicate(): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore 1, "resolver was never called", ) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index f95717e95f..0baefa0c3a 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -28,8 +28,8 @@ from bson.binary import JAVA_LEGACY from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.uri_parser import ( - parse_uri, +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.uri_parser_shared import ( parse_userinfo, split_hosts, split_options, diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 29cde7e078..aeb0be94b5 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -29,7 +29,7 @@ from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate from pymongo.compression_support import _have_snappy -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri CONN_STRING_TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") diff --git a/tools/synchro.py b/tools/synchro.py index e65270733e..d8760b83bc 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -127,6 +127,7 @@ "async_create_barrier": "create_barrier", "async_barrier_wait": "barrier_wait", "async_joinall": "joinall", + "pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts": "pymongo.synchronous.srv_resolver._SrvResolver.get_hosts", } docstring_replacements: dict[tuple[str, str], str] = { From 2c1a1608f28ca19ded7df583aa1473e7df63e7ce Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 25 Mar 2025 17:00:19 -0400 Subject: [PATCH 1822/2111] PYTHON-5169 - Deprecate Hedged Reads option (#2213) Co-authored-by: Shane Harvey --- doc/changelog.rst | 5 ++ pymongo/read_preferences.py | 24 +++++++-- test/asynchronous/test_read_preferences.py | 61 +++++++++++++--------- test/test_read_preferences.py | 61 +++++++++++++--------- 4 files changed, 98 insertions(+), 53 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 5683fcaaca..0633049857 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -14,6 +14,11 @@ PyMongo 4.12 brings a number of changes including: - Added index hinting support to the :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` and :meth:`~pymongo.collection.Collection.distinct` commands. +- Deprecated the ``hedge`` parameter for + :class:`~pymongo.read_preferences.PrimaryPreferred`, + :class:`~pymongo.read_preferences.Secondary`, + :class:`~pymongo.read_preferences.SecondaryPreferred`, + :class:`~pymongo.read_preferences.Nearest`. Support for ``hedge`` will be removed in PyMongo 5.0. Issues Resolved ............... diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index 581f7ca66f..dae414c37c 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -19,6 +19,7 @@ from __future__ import annotations +import warnings from collections import abc from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence @@ -103,6 +104,11 @@ def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: if not isinstance(hedge, dict): raise TypeError(f"hedge must be a dictionary, not {hedge!r}") + warnings.warn( + "The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.", + DeprecationWarning, + stacklevel=4, + ) return hedge @@ -183,7 +189,9 @@ def max_staleness(self) -> int: @property def hedge(self) -> Optional[_Hedge]: - """The read preference ``hedge`` parameter. + """**DEPRECATED** - The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0. + + The read preference ``hedge`` parameter. A dictionary that configures how the server will perform hedged reads. It consists of the following keys: @@ -203,6 +211,12 @@ def hedge(self) -> Optional[_Hedge]: .. versionadded:: 3.11 """ + if self.__hedge is not None: + warnings.warn( + "The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.", + DeprecationWarning, + stacklevel=2, + ) return self.__hedge @property @@ -312,7 +326,7 @@ class PrimaryPreferred(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - :param hedge: The :attr:`~hedge` to use if the primary is not available. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -354,7 +368,7 @@ class Secondary(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - :param hedge: The :attr:`~hedge` for this read preference. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -397,7 +411,7 @@ class SecondaryPreferred(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - :param hedge: The :attr:`~hedge` for this read preference. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -441,7 +455,7 @@ class Nearest(_ServerMode): replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - :param hedge: The :attr:`~hedge` for this read preference. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. diff --git a/test/asynchronous/test_read_preferences.py b/test/asynchronous/test_read_preferences.py index 5bea174058..72dd809db0 100644 --- a/test/asynchronous/test_read_preferences.py +++ b/test/asynchronous/test_read_preferences.py @@ -35,6 +35,7 @@ ) from test.utils_shared import ( OvertCommandListener, + _ignore_deprecations, async_wait_until, one, ) @@ -542,33 +543,44 @@ def test_read_preference_document_hedge(self): for mode, cls in cases.items(): with self.assertRaises(TypeError): cls(hedge=[]) # type: ignore - - pref = cls(hedge={}) - self.assertEqual(pref.document, {"mode": mode}) - out = _maybe_add_read_preference({}, pref) - if cls == SecondaryPreferred: - # SecondaryPreferred without hedge doesn't add $readPreference. - self.assertEqual(out, {}) - else: + with _ignore_deprecations(): + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge: dict[str, Any] = {"enabled": True} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": False} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": False, "extra": "option"} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + def test_read_preference_hedge_deprecated(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for _, cls in cases.items(): + with self.assertRaises(DeprecationWarning): + cls(hedge={"enabled": True}) async def test_send_hedge(self): cases = { @@ -582,7 +594,8 @@ async def test_send_hedge(self): client = await self.async_rs_client(event_listeners=[listener]) await client.admin.command("ping") for _mode, cls in cases.items(): - pref = cls(hedge={"enabled": True}) + with _ignore_deprecations(): + pref = cls(hedge={"enabled": True}) coll = client.test.get_collection("test", read_preference=pref) listener.reset() await coll.find_one() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index e754c896ad..afde01723d 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -35,6 +35,7 @@ ) from test.utils_shared import ( OvertCommandListener, + _ignore_deprecations, one, wait_until, ) @@ -522,33 +523,44 @@ def test_read_preference_document_hedge(self): for mode, cls in cases.items(): with self.assertRaises(TypeError): cls(hedge=[]) # type: ignore - - pref = cls(hedge={}) - self.assertEqual(pref.document, {"mode": mode}) - out = _maybe_add_read_preference({}, pref) - if cls == SecondaryPreferred: - # SecondaryPreferred without hedge doesn't add $readPreference. - self.assertEqual(out, {}) - else: + with _ignore_deprecations(): + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge: dict[str, Any] = {"enabled": True} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": False} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": False, "extra": "option"} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + def test_read_preference_hedge_deprecated(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for _, cls in cases.items(): + with self.assertRaises(DeprecationWarning): + cls(hedge={"enabled": True}) def test_send_hedge(self): cases = { @@ -562,7 +574,8 @@ def test_send_hedge(self): client = self.rs_client(event_listeners=[listener]) client.admin.command("ping") for _mode, cls in cases.items(): - pref = cls(hedge={"enabled": True}) + with _ignore_deprecations(): + pref = cls(hedge={"enabled": True}) coll = client.test.get_collection("test", read_preference=pref) listener.reset() coll.find_one() From 2149567ed373e70656ae1f48b6c2ffe82e0a6848 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 25 Mar 2025 17:28:08 -0500 Subject: [PATCH 1823/2111] PYTHON-5236 Test sharded clusters with requireApiVersion=1 (#2229) --- .evergreen/generated_configs/variants.yml | 24 +++++++++++------------ .evergreen/scripts/generate_config.py | 7 ++++++- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 53e178bd19..ca8c0e1966 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1257,12 +1257,12 @@ buildvariants: # Stable api tests - name: stable-api-require-v1-rhel8-python3.9-auth tasks: - - name: .standalone .5.0 .noauth .nossl .sync_async - - name: .standalone .6.0 .noauth .nossl .sync_async - - name: .standalone .7.0 .noauth .nossl .sync_async - - name: .standalone .8.0 .noauth .nossl .sync_async - - name: .standalone .rapid .noauth .nossl .sync_async - - name: .standalone .latest .noauth .nossl .sync_async + - name: "!.replica_set .5.0 .noauth .nossl .sync_async" + - name: "!.replica_set .6.0 .noauth .nossl .sync_async" + - name: "!.replica_set .7.0 .noauth .nossl .sync_async" + - name: "!.replica_set .8.0 .noauth .nossl .sync_async" + - name: "!.replica_set .rapid .noauth .nossl .sync_async" + - name: "!.replica_set .latest .noauth .nossl .sync_async" display_name: Stable API require v1 RHEL8 Python3.9 Auth run_on: - rhel87-small @@ -1290,12 +1290,12 @@ buildvariants: tags: [versionedApi_tag] - name: stable-api-require-v1-rhel8-python3.13-auth tasks: - - name: .standalone .5.0 .noauth .nossl .sync_async - - name: .standalone .6.0 .noauth .nossl .sync_async - - name: .standalone .7.0 .noauth .nossl .sync_async - - name: .standalone .8.0 .noauth .nossl .sync_async - - name: .standalone .rapid .noauth .nossl .sync_async - - name: .standalone .latest .noauth .nossl .sync_async + - name: "!.replica_set .5.0 .noauth .nossl .sync_async" + - name: "!.replica_set .6.0 .noauth .nossl .sync_async" + - name: "!.replica_set .7.0 .noauth .nossl .sync_async" + - name: "!.replica_set .8.0 .noauth .nossl .sync_async" + - name: "!.replica_set .rapid .noauth .nossl .sync_async" + - name: "!.replica_set .latest .noauth .nossl .sync_async" display_name: Stable API require v1 RHEL8 Python3.13 Auth run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 50c81a5840..54d5e4efe8 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -546,7 +546,6 @@ def create_storage_engine_variants(): def create_stable_api_variants(): host = DEFAULT_HOST tags = ["versionedApi_tag"] - tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0")] variants = [] types = ["require v1", "accept v2"] @@ -560,11 +559,17 @@ def create_stable_api_variants(): expansions["REQUIRE_API_VERSION"] = "1" # MONGODB_API_VERSION is the apiVersion to use in the test suite. expansions["MONGODB_API_VERSION"] = "1" + tasks = [ + f"!.replica_set .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0") + ] else: # Test against a cluster with acceptApiVersion2 but without # requireApiVersion, and don't automatically add apiVersion to # clients created in the test suite. expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" + tasks = [ + f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0") + ] base_display_name = f"Stable API {test_type}" display_name = get_display_name(base_display_name, host, python=python, **expansions) variant = create_variant( From 4e5166b29a5a8fe93b90bceacf411edfb7500b3a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 26 Mar 2025 17:39:10 -0500 Subject: [PATCH 1824/2111] PYTHON-3712 Deprecate sharded-replicaset topology type and clean up redundant runOnRequirements (#2232) --- test/__init__.py | 11 -- test/asynchronous/__init__.py | 11 -- .../unified/change-streams-clusterTime.json | 1 - .../change-streams-disambiguatedPaths.json | 1 - .../unified/change-streams-errors.json | 4 +- .../change-streams-pre_and_post_images.json | 2 +- .../change-streams-resume-allowlist.json | 2 +- .../change-streams-resume-errorLabels.json | 2 +- .../change-streams-showExpandedEvents.json | 5 +- test/csot/tailable-awaitData.json | 138 +++++++++++++++++- .../unified/serverMonitoringMode.json | 3 +- test/run_command/unified/runCommand.json | 3 +- .../driver-sessions-dirty-session-errors.json | 2 +- .../snapshot-sessions-unsupported-ops.json | 2 +- test/sessions/snapshot-sessions.json | 2 +- .../valid-pass/operator-lte.json | 16 +- test/versioned-api/transaction-handling.json | 6 +- 17 files changed, 165 insertions(+), 46 deletions(-) diff --git a/test/__init__.py b/test/__init__.py index 8e362de5ad..d8686e3257 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -678,7 +678,6 @@ def is_topology_type(self, topologies): "single", "replicaset", "sharded", - "sharded-replicaset", "load-balanced", } if unknown: @@ -693,16 +692,6 @@ def is_topology_type(self, topologies): return True if "sharded" in topologies and self.is_mongos: return True - if "sharded-replicaset" in topologies and self.is_mongos: - shards = client_context.client.config.shards.find().to_list() - for shard in shards: - # For a 3-member RS-backed sharded cluster, shard['host'] - # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' - # Otherwise it will be 'ip1:port1' - host_spec = shard["host"] - if not len(host_spec.split("/")) > 1: - return False - return True return False def require_cluster_type(self, topologies=None): diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index b3f65e5d3c..9e9cb9316d 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -680,7 +680,6 @@ async def is_topology_type(self, topologies): "single", "replicaset", "sharded", - "sharded-replicaset", "load-balanced", } if unknown: @@ -695,16 +694,6 @@ async def is_topology_type(self, topologies): return True if "sharded" in topologies and self.is_mongos: return True - if "sharded-replicaset" in topologies and self.is_mongos: - shards = await async_client_context.client.config.shards.find().to_list() - for shard in shards: - # For a 3-member RS-backed sharded cluster, shard['host'] - # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' - # Otherwise it will be 'ip1:port1' - host_spec = shard["host"] - if not len(host_spec.split("/")) > 1: - return False - return True return False def require_cluster_type(self, topologies=None): diff --git a/test/change_streams/unified/change-streams-clusterTime.json b/test/change_streams/unified/change-streams-clusterTime.json index 55b4ae3fbc..2b09e548f1 100644 --- a/test/change_streams/unified/change-streams-clusterTime.json +++ b/test/change_streams/unified/change-streams-clusterTime.json @@ -28,7 +28,6 @@ "minServerVersion": "4.0.0", "topologies": [ "replicaset", - "sharded-replicaset", "load-balanced", "sharded" ], diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json index 91d8e66da2..e6cc5ef66e 100644 --- a/test/change_streams/unified/change-streams-disambiguatedPaths.json +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -28,7 +28,6 @@ "minServerVersion": "6.1.0", "topologies": [ "replicaset", - "sharded-replicaset", "load-balanced", "sharded" ], diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json index 04fe8f04f3..65e99e541e 100644 --- a/test/change_streams/unified/change-streams-errors.json +++ b/test/change_streams/unified/change-streams-errors.json @@ -145,7 +145,7 @@ "minServerVersion": "4.1.11", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -190,7 +190,7 @@ "minServerVersion": "4.2", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/test/change_streams/unified/change-streams-pre_and_post_images.json b/test/change_streams/unified/change-streams-pre_and_post_images.json index 8beefb2bc8..e62fc03459 100644 --- a/test/change_streams/unified/change-streams-pre_and_post_images.json +++ b/test/change_streams/unified/change-streams-pre_and_post_images.json @@ -6,7 +6,7 @@ "minServerVersion": "6.0.0", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/test/change_streams/unified/change-streams-resume-allowlist.json b/test/change_streams/unified/change-streams-resume-allowlist.json index b4953ec736..1ec72b432b 100644 --- a/test/change_streams/unified/change-streams-resume-allowlist.json +++ b/test/change_streams/unified/change-streams-resume-allowlist.json @@ -6,7 +6,7 @@ "minServerVersion": "3.6", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json index f5f4505a9f..7fd70108f0 100644 --- a/test/change_streams/unified/change-streams-resume-errorLabels.json +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -6,7 +6,7 @@ "minServerVersion": "4.3.1", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/test/change_streams/unified/change-streams-showExpandedEvents.json b/test/change_streams/unified/change-streams-showExpandedEvents.json index 3eed2f534a..b9594e0c1e 100644 --- a/test/change_streams/unified/change-streams-showExpandedEvents.json +++ b/test/change_streams/unified/change-streams-showExpandedEvents.json @@ -6,9 +6,9 @@ "minServerVersion": "6.0.0", "topologies": [ "replicaset", - "sharded-replicaset", "sharded" - ] + ], + "serverless": "forbid" } ], "createEntities": [ @@ -462,7 +462,6 @@ "runOnRequirements": [ { "topologies": [ - "sharded-replicaset", "sharded" ] } diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json index 535fb69243..81683d3993 100644 --- a/test/csot/tailable-awaitData.json +++ b/test/csot/tailable-awaitData.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ @@ -417,6 +418,141 @@ ] } ] + }, + { + "description": "apply remaining timeoutMS if less than maxAwaitTimeMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "cursorType": "tailableAwait", + "batchSize": 1, + "maxAwaitTimeMS": 100, + "timeoutMS": 200 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 100 + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 70 + } + } + } + } + ] + } + ] + }, + { + "description": "apply maxAwaitTimeMS if less than remaining timeout", + "operations": [ + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1, + "maxAwaitTimeMS": 100, + "timeoutMS": 200 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 100 + } + } + } + } + ] + } + ] } ] } diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json index 4b492f7d85..e44fad1bcd 100644 --- a/test/discovery_and_monitoring/unified/serverMonitoringMode.json +++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json @@ -5,8 +5,7 @@ { "topologies": [ "single", - "sharded", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/run_command/unified/runCommand.json b/test/run_command/unified/runCommand.json index 007e514bd7..fde9de92e6 100644 --- a/test/run_command/unified/runCommand.json +++ b/test/run_command/unified/runCommand.json @@ -229,7 +229,6 @@ { "topologies": [ "replicaset", - "sharded-replicaset", "load-balanced", "sharded" ] @@ -493,7 +492,7 @@ { "minServerVersion": "4.2", "topologies": [ - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json index 361ea83d7b..6aa1da1df5 100644 --- a/test/sessions/driver-sessions-dirty-session-errors.json +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/sessions/snapshot-sessions-unsupported-ops.json b/test/sessions/snapshot-sessions-unsupported-ops.json index 1021b7f264..c41f74d337 100644 --- a/test/sessions/snapshot-sessions-unsupported-ops.json +++ b/test/sessions/snapshot-sessions-unsupported-ops.json @@ -6,7 +6,7 @@ "minServerVersion": "5.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/sessions/snapshot-sessions.json b/test/sessions/snapshot-sessions.json index 75b577b039..260f8b6f48 100644 --- a/test/sessions/snapshot-sessions.json +++ b/test/sessions/snapshot-sessions.json @@ -6,7 +6,7 @@ "minServerVersion": "5.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/operator-lte.json b/test/unified-test-format/valid-pass/operator-lte.json index 4a13b16d15..7a6a8057ad 100644 --- a/test/unified-test-format/valid-pass/operator-lte.json +++ b/test/unified-test-format/valid-pass/operator-lte.json @@ -42,7 +42,9 @@ "arguments": { "document": { "_id": 1, - "y": 1 + "x": 2, + "y": 3, + "z": 4 } } } @@ -58,10 +60,18 @@ "documents": [ { "_id": { - "$$lte": 1 + "$$lte": 2 + }, + "x": { + "$$lte": 2.1 }, "y": { - "$$lte": 2 + "$$lte": { + "$numberLong": "3" + } + }, + "z": { + "$$lte": 4 } } ] diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json index c00c5240ae..32031296af 100644 --- a/test/versioned-api/transaction-handling.json +++ b/test/versioned-api/transaction-handling.json @@ -6,7 +6,7 @@ "minServerVersion": "4.9", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -92,7 +92,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -221,7 +221,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } From 3a3f3d22145dc3beb78caa5584cf30617ea70a24 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 27 Mar 2025 08:39:12 -0500 Subject: [PATCH 1825/2111] PYTHON-5240 Add pre-commit hook for config generation (#2237) --- .evergreen/scripts/generate-config.sh | 6 ++++++ .evergreen/scripts/generate_config.py | 14 +++----------- .pre-commit-config.yaml | 6 ++++++ CONTRIBUTING.md | 2 +- 4 files changed, 16 insertions(+), 12 deletions(-) create mode 100755 .evergreen/scripts/generate-config.sh diff --git a/.evergreen/scripts/generate-config.sh b/.evergreen/scripts/generate-config.sh new file mode 100755 index 0000000000..70b4578cf9 --- /dev/null +++ b/.evergreen/scripts/generate-config.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# Entry point for the generate-config pre-commit hook. + +set -eu + +python .evergreen/scripts/generate_config.py diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 54d5e4efe8..5d067923b6 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -1,12 +1,4 @@ -# /// script -# requires-python = ">=3.9" -# dependencies = [ -# "shrub.py>=3.2.0", -# "pyyaml>=6.0.2" -# ] -# /// - -# Note: Run this file with `pipx run`, or `uv run`. +# Note: See CONTRIBUTING.md for how to update/run this file. from __future__ import annotations import sys @@ -1113,7 +1105,7 @@ def write_variants_to_file(): with target.open("w") as fid: fid.write("buildvariants:\n") - for name, func in getmembers(mod, isfunction): + for name, func in sorted(getmembers(mod, isfunction)): if not name.endswith("_variants"): continue if not name.startswith("create_"): @@ -1143,7 +1135,7 @@ def write_tasks_to_file(): with target.open("w") as fid: fid.write("tasks:\n") - for name, func in getmembers(mod, isfunction): + for name, func in sorted(getmembers(mod, isfunction)): if not name.endswith("_tasks"): continue if not name.startswith("create_"): diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 335bf97490..bab2ea47da 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -116,3 +116,9 @@ repos: (?x)( .evergreen/retry-with-backoff.sh ) + - id: generate-config + name: generate-config + entry: .evergreen/scripts/generate-config.sh + language: python + require_serial: true + additional_dependencies: ["shrub.py>=3.2.0", "pyyaml>=6.0.2"] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d8cc8c8bcd..c31d2d1c96 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -367,7 +367,7 @@ If you are running one of the `no-responder` tests, omit the `run-server` step. - If there are any special test considerations, including not running `pytest` at all, handle it in `.evergreen/scripts/run_tests.py`. - If there are any services or atlas clusters to teardown, handle them in `.evergreen/scripts/teardown_tests.py`. - Add functions to generate the test variant(s) and task(s) to the `.evergreen/scripts/generate_config.py`. -- Regenerate the test variants and tasks using the instructions in `.evergreen/scripts/generate_config.py`. +- Regenerate the test variants and tasks using `pre-commit run --all-files generate-config`. - Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. ## Re-sync Spec Tests From 53c4694cf9bb09b48669d74c9ba59e6374f51680 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 27 Mar 2025 08:57:41 -0500 Subject: [PATCH 1826/2111] PYTHON-5233 Allow python version to be set at the task level (#2228) --- .evergreen/config.yml | 4 +- .evergreen/generated_configs/tasks.yml | 32 +++++- .evergreen/generated_configs/variants.yml | 46 ++------- .evergreen/scripts/generate_config.py | 116 ++++++++++++---------- .evergreen/scripts/setup-dev-env.sh | 6 +- .evergreen/scripts/setup_tests.py | 2 +- .evergreen/utils.sh | 21 ++++ 7 files changed, 132 insertions(+), 95 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 97845b86d4..30c4fe2002 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -206,7 +206,7 @@ functions: params: binary: bash working_dir: "src" - include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, + include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, PYTHON_VERSION, STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER] args: [.evergreen/just.sh, run-server, "${TEST_NAME}"] - command: expansions.update @@ -227,7 +227,7 @@ functions: type: test params: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, - AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, + AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, PYTHON_VERSION, DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG, ORCHESTRATION_FILE, OCSP_SERVER_TYPE] binary: bash diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index efc7844061..f859169ed4 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -723,18 +723,48 @@ tasks: tags: [doctests] # Enterprise auth tests - - name: test-enterprise-auth + - name: test-enterprise-auth-python3.9 commands: - func: run server vars: TEST_NAME: enterprise_auth AUTH: auth + PYTHON_VERSION: "3.9" - func: assume ec2 role - func: run tests vars: TEST_NAME: enterprise_auth AUTH: auth + PYTHON_VERSION: "3.9" tags: [enterprise_auth] + - name: test-enterprise-auth-python3.13 + commands: + - func: run server + vars: + TEST_NAME: enterprise_auth + AUTH: auth + PYTHON_VERSION: "3.13" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: enterprise_auth + AUTH: auth + PYTHON_VERSION: "3.13" + tags: [enterprise_auth] + - name: test-enterprise-auth-pypy3.10 + commands: + - func: run server + vars: + TEST_NAME: enterprise_auth + AUTH: auth + PYTHON_VERSION: pypy3.10 + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: enterprise_auth + AUTH: auth + PYTHON_VERSION: pypy3.10 + tags: [enterprise_auth, pypy] # Free threading tests - name: test-free-threading diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index ca8c0e1966..2242e9c338 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -507,54 +507,24 @@ buildvariants: tags: [encryption_tag] # Enterprise auth tests - - name: auth-enterprise-macos-python3.9 + - name: auth-enterprise-macos tasks: - - name: .enterprise_auth - display_name: Auth Enterprise macOS Python3.9 + - name: .enterprise_auth !.pypy + display_name: Auth Enterprise macOS run_on: - macos-14 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: auth-enterprise-rhel8-python3.10 - tasks: - - name: .enterprise_auth - display_name: Auth Enterprise RHEL8 Python3.10 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: auth-enterprise-rhel8-python3.11 - tasks: - - name: .enterprise_auth - display_name: Auth Enterprise RHEL8 Python3.11 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: auth-enterprise-rhel8-python3.12 + - name: auth-enterprise-win64 tasks: - - name: .enterprise_auth - display_name: Auth Enterprise RHEL8 Python3.12 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: auth-enterprise-win64-python3.13 - tasks: - - name: .enterprise_auth - display_name: Auth Enterprise Win64 Python3.13 + - name: .enterprise_auth !.pypy + display_name: Auth Enterprise Win64 run_on: - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/Python313/python.exe - - name: auth-enterprise-rhel8-pypy3.10 + - name: auth-enterprise-rhel8 tasks: - name: .enterprise_auth - display_name: Auth Enterprise RHEL8 PyPy3.10 + display_name: Auth Enterprise RHEL8 run_on: - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Free threaded tests - name: free-threaded-rhel8-python3.13t diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 5d067923b6..24aadfbe04 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -179,17 +179,14 @@ def get_versions_until(max_version: str) -> list[str]: return versions -def get_display_name(base: str, host: Host | None = None, **kwargs) -> str: - """Get the display name of a variant.""" +def get_common_name(base: str, sep: str, **kwargs) -> str: display_name = base - if host is not None: - display_name += f" {host.display_name}" version = kwargs.pop("VERSION", None) version = version or kwargs.pop("version", None) if version: if version not in ["rapid", "latest"]: version = f"v{version}" - display_name = f"{display_name} {version}" + display_name = f"{display_name}{sep}{version}" for key, value in kwargs.items(): name = value if key.lower() == "python": @@ -201,10 +198,22 @@ def get_display_name(base: str, host: Host | None = None, **kwargs) -> str: name = DISPLAY_LOOKUP[key.lower()][value] else: continue - display_name = f"{display_name} {name}" + display_name = f"{display_name}{sep}{name}" return display_name +def get_variant_name(base: str, host: Host | None = None, **kwargs) -> str: + """Get the display name of a variant.""" + display_name = base + if host is not None: + display_name += f" {host.display_name}" + return get_common_name(display_name, " ", **kwargs) + + +def get_task_name(base: str, **kwargs): + return get_common_name(base, "-", **kwargs).lower() + + def zip_cycle(*iterables, empty_default=None): """Get all combinations of the inputs, cycling over the shorter list(s).""" cycles = [cycle(i) for i in iterables] @@ -244,7 +253,7 @@ def create_ocsp_variants() -> list[BuildVariant]: host = DEFAULT_HOST variant = create_variant( [".ocsp"], - get_display_name(base_display, host, version=version, python=python), + get_variant_name(base_display, host, version=version, python=python), python=python, version=version, host=host, @@ -260,7 +269,7 @@ def create_ocsp_variants() -> list[BuildVariant]: python = CPYTHONS[0] if version == "4.4" else CPYTHONS[-1] variant = create_variant( [".ocsp-rsa !.ocsp-staple"], - get_display_name(base_display, host, version=version, python=python), + get_variant_name(base_display, host, version=version, python=python), python=python, version=version, host=host, @@ -282,7 +291,7 @@ def create_server_variants() -> list[BuildVariant]: for python, c_ext in product([*MIN_MAX_PYTHON, PYPYS[-1]], C_EXTS): expansions = dict(COVERAGE="coverage") handle_c_ext(c_ext, expansions) - display_name = get_display_name(base_display_name, host, python=python, **expansions) + display_name = get_variant_name(base_display_name, host, python=python, **expansions) variant = create_variant( [f".{t} .sync_async" for t in TOPOLOGIES], display_name, @@ -296,7 +305,7 @@ def create_server_variants() -> list[BuildVariant]: # Test the rest of the pythons. for python in CPYTHONS[1:-1] + PYPYS[:-1]: display_name = f"Test {host}" - display_name = get_display_name(base_display_name, host, python=python) + display_name = get_variant_name(base_display_name, host, python=python) variant = create_variant( [f"{t} .sync_async" for t in SUB_TASKS], display_name, @@ -316,7 +325,7 @@ def create_server_variants() -> list[BuildVariant]: for version in get_versions_from("6.0"): tasks.extend(f"{t} .{version} !.sync_async" for t in SUB_TASKS) host = HOSTS[host_name] - display_name = get_display_name(base_display_name, host, python=python) + display_name = get_variant_name(base_display_name, host, python=python) variant = create_variant(tasks, display_name, python=python, host=host) variants.append(variant) @@ -332,7 +341,7 @@ def create_free_threaded_variants() -> list[BuildVariant]: tasks = [".free-threading"] host = HOSTS[host_name] python = "3.13t" - display_name = get_display_name("Free-threaded", host, python=python) + display_name = get_variant_name("Free-threaded", host, python=python) variant = create_variant(tasks, display_name, python=python, host=host) variants.append(variant) return variants @@ -357,7 +366,7 @@ def get_encryption_expansions(encryption): encryptions = ["Encryption", "Encryption crypt_shared", "Encryption PyOpenSSL"] for encryption, python in product(encryptions, [*MIN_MAX_PYTHON, PYPYS[-1]]): expansions = get_encryption_expansions(encryption) - display_name = get_display_name(encryption, host, python=python, **expansions) + display_name = get_variant_name(encryption, host, python=python, **expansions) variant = create_variant( [f"{t} .sync_async" for t in SUB_TASKS], display_name, @@ -372,7 +381,7 @@ def get_encryption_expansions(encryption): # Test the rest of the pythons on linux for all server versions. for encryption, python, task in zip_cycle(encryptions, CPYTHONS[1:-1] + PYPYS[:-1], SUB_TASKS): expansions = get_encryption_expansions(encryption) - display_name = get_display_name(encryption, host, python=python, **expansions) + display_name = get_variant_name(encryption, host, python=python, **expansions) variant = create_variant( [f"{task} .sync_async"], display_name, @@ -388,7 +397,7 @@ def get_encryption_expansions(encryption): for host_name, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): host = HOSTS[host_name] expansions = get_encryption_expansions(encryption) - display_name = get_display_name(encryption, host, python=python, **expansions) + display_name = get_variant_name(encryption, host, python=python, **expansions) variant = create_variant( task_names, display_name, @@ -410,7 +419,7 @@ def create_load_balancer_variants(): variants = [] for version in versions: python = CPYTHONS[0] - display_name = get_display_name("Load Balancer", host, python=python, version=version) + display_name = get_variant_name("Load Balancer", host, python=python, version=version) variant = create_variant( [".load-balancer"], display_name, @@ -435,7 +444,7 @@ def create_compression_variants(): handle_c_ext(c_ext, expansions) base_name = f"Compression {compressor}" python = CPYTHONS[ind % len(CPYTHONS)] - display_name = get_display_name(base_name, host, python=python, **expansions) + display_name = get_variant_name(base_name, host, python=python, **expansions) variant = create_variant( task_names[compressor], display_name, @@ -450,7 +459,7 @@ def create_compression_variants(): expansions = dict(COMPRESSORS=compressor) handle_c_ext(c_ext, expansions) base_name = f"Compression {compressor}" - display_name = get_display_name(base_name, host, python=python, **expansions) + display_name = get_variant_name(base_name, host, python=python, **expansions) variant = create_variant( task_names[compressor], display_name, @@ -465,17 +474,13 @@ def create_compression_variants(): def create_enterprise_auth_variants(): variants = [] - - # All python versions across platforms. - for python in ALL_PYTHONS: - if python == CPYTHONS[0]: - host = HOSTS["macos"] - elif python == CPYTHONS[-1]: - host = HOSTS["win64"] + for host in [HOSTS["macos"], HOSTS["win64"], DEFAULT_HOST]: + display_name = get_variant_name("Auth Enterprise", host) + if host == DEFAULT_HOST: + tags = [".enterprise_auth"] else: - host = DEFAULT_HOST - display_name = get_display_name("Auth Enterprise", host, python=python) - variant = create_variant([".enterprise_auth"], display_name, host=host, python=python) + tags = [".enterprise_auth !.pypy"] + variant = create_variant(tags, display_name, host=host) variants.append(variant) return variants @@ -498,7 +503,7 @@ def create_pyopenssl_variants(): else: host = DEFAULT_HOST - display_name = get_display_name(base_name, host, python=python) + display_name = get_variant_name(base_name, host, python=python) variant = create_variant( [f".replica_set .{auth} .{ssl} .sync_async", f".7.0 .{auth} .{ssl} .sync_async"], display_name, @@ -527,7 +532,7 @@ def create_storage_engine_variants(): tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in versions] + [ f".replica_set .{v} .noauth .nossl .sync_async" for v in versions ] - display_name = get_display_name(f"Storage {engine}", host, python=python) + display_name = get_variant_name(f"Storage {engine}", host, python=python) variant = create_variant( tasks, display_name, host=host, python=python, expansions=expansions ) @@ -563,7 +568,7 @@ def create_stable_api_variants(): f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0") ] base_display_name = f"Stable API {test_type}" - display_name = get_display_name(base_display_name, host, python=python, **expansions) + display_name = get_variant_name(base_display_name, host, python=python, **expansions) variant = create_variant( tasks, display_name, host=host, python=python, tags=tags, expansions=expansions ) @@ -578,7 +583,7 @@ def create_green_framework_variants(): host = DEFAULT_HOST for python, framework in product([CPYTHONS[0], CPYTHONS[-1]], ["eventlet", "gevent"]): expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") - display_name = get_display_name(f"Green {framework.capitalize()}", host, python=python) + display_name = get_variant_name(f"Green {framework.capitalize()}", host, python=python) variant = create_variant( tasks, display_name, host=host, python=python, expansions=expansions ) @@ -593,7 +598,7 @@ def create_no_c_ext_variants(): tasks = [f".{topology} .noauth .nossl !.sync_async"] expansions = dict() handle_c_ext(C_EXTS[0], expansions) - display_name = get_display_name("No C Ext", host, python=python) + display_name = get_variant_name("No C Ext", host, python=python) variant = create_variant( tasks, display_name, host=host, python=python, expansions=expansions ) @@ -606,7 +611,7 @@ def create_atlas_data_lake_variants(): host = HOSTS["ubuntu22"] for python in MIN_MAX_PYTHON: tasks = [".atlas_data_lake"] - display_name = get_display_name("Atlas Data Lake", host, python=python) + display_name = get_variant_name("Atlas Data Lake", host, python=python) variant = create_variant(tasks, display_name, host=host, python=python) variants.append(variant) return variants @@ -618,7 +623,7 @@ def create_mod_wsgi_variants(): tasks = [".mod_wsgi"] expansions = dict(MOD_WSGI_VERSION="4") for python in MIN_MAX_PYTHON: - display_name = get_display_name("mod_wsgi", host, python=python) + display_name = get_variant_name("mod_wsgi", host, python=python) variant = create_variant( tasks, display_name, host=host, python=python, expansions=expansions ) @@ -630,7 +635,7 @@ def create_disable_test_commands_variants(): host = DEFAULT_HOST expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") python = CPYTHONS[0] - display_name = get_display_name("Disable test commands", host, python=python) + display_name = get_variant_name("Disable test commands", host, python=python) tasks = [".latest .sync_async"] return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] @@ -643,7 +648,7 @@ def create_serverless_variants(): return [ create_variant( tasks, - get_display_name(base_name, host, python=python), + get_variant_name(base_name, host, python=python), host=host, python=python, batchtime=batchtime, @@ -663,7 +668,7 @@ def create_oidc_auth_variants(): variants.append( create_variant( tasks, - get_display_name("Auth OIDC", host), + get_variant_name("Auth OIDC", host), host=host, batchtime=BATCHTIME_WEEK, ) @@ -677,7 +682,7 @@ def create_search_index_variants(): return [ create_variant( [".search_index"], - get_display_name("Search Index Helpers", host, python=python), + get_variant_name("Search Index Helpers", host, python=python), python=python, host=host, ) @@ -690,7 +695,7 @@ def create_mockupdb_variants(): return [ create_variant( [".mockupdb"], - get_display_name("MockupDB", host, python=python), + get_variant_name("MockupDB", host, python=python), python=python, host=host, ) @@ -703,7 +708,7 @@ def create_doctests_variants(): return [ create_variant( [".doctests"], - get_display_name("Doctests", host, python=python), + get_variant_name("Doctests", host, python=python), python=python, host=host, ) @@ -715,7 +720,7 @@ def create_atlas_connect_variants(): return [ create_variant( [".atlas_connect"], - get_display_name("Atlas connect", host, python=python), + get_variant_name("Atlas connect", host, python=python), python=python, host=host, ) @@ -743,7 +748,7 @@ def create_aws_auth_variants(): host = HOSTS[host_name] variant = create_variant( tasks, - get_display_name("Auth AWS", host, python=python), + get_variant_name("Auth AWS", host, python=python), host=host, python=python, expansions=expansions, @@ -765,7 +770,7 @@ def create_alternative_hosts_variants(): variants.append( create_variant( [".5.0 .standalone !.sync_async"], - get_display_name("OpenSSL 1.0.2", host, python=CPYTHONS[0]), + get_variant_name("OpenSSL 1.0.2", host, python=CPYTHONS[0]), host=host, python=CPYTHONS[0], batchtime=batchtime, @@ -782,7 +787,7 @@ def create_alternative_hosts_variants(): variants.append( create_variant( tags, - display_name=get_display_name("Other hosts", host), + display_name=get_variant_name("Other hosts", host), batchtime=batchtime, host=host, expansions=expansions, @@ -975,13 +980,20 @@ def create_atlas_connect_tasks(): def create_enterprise_auth_tasks(): - vars = dict(TEST_NAME="enterprise_auth", AUTH="auth") - server_func = FunctionCall(func="run server", vars=vars) - assume_func = FunctionCall(func="assume ec2 role") - test_func = FunctionCall(func="run tests", vars=vars) - task_name = "test-enterprise-auth" - tags = ["enterprise_auth"] - return [EvgTask(name=task_name, tags=tags, commands=[server_func, assume_func, test_func])] + tasks = [] + for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: + vars = dict(TEST_NAME="enterprise_auth", AUTH="auth", PYTHON_VERSION=python) + server_func = FunctionCall(func="run server", vars=vars) + assume_func = FunctionCall(func="assume ec2 role") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = get_task_name("test-enterprise-auth", python=python) + tags = ["enterprise_auth"] + if python in PYPYS: + tags += ["pypy"] + tasks.append( + EvgTask(name=task_name, tags=tags, commands=[server_func, assume_func, test_func]) + ) + return tasks def create_perf_tasks(): diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index d1c4be3494..d9b88e3385 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -31,7 +31,11 @@ if [ ! -d $BIN_DIR ]; then . $ROOT/.evergreen/utils.sh if [ -z "${PYTHON_BINARY:-}" ]; then - PYTHON_BINARY=$(find_python3) + if [ -n "${PYTHON_VERSION:-}" ]; then + PYTHON_BINARY=$(get_python_binary $PYTHON_VERSION) + else + PYTHON_BINARY=$(find_python3) + fi fi export UV_PYTHON=${PYTHON_BINARY} echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index d02cd8759a..8f4299a6d0 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -124,7 +124,7 @@ def load_config_from_file(path: str | Path) -> dict[str, str]: def get_secrets(name: str) -> dict[str, str]: secrets_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/secrets_handling") - run_command(f"bash {secrets_dir}/setup-secrets.sh {name}", cwd=secrets_dir) + run_command(f"bash {secrets_dir.as_posix()}/setup-secrets.sh {name}", cwd=secrets_dir) return load_config_from_file(secrets_dir / "secrets-export.sh") diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index bb3ed8dabd..8dc7dd72f0 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -113,3 +113,24 @@ is_python_39() { return 1 fi } + + +# Function that gets a python binary given a python version string. +# Versions can be of the form 3.xx or pypy3.xx. +get_python_binary() { + version=$1 + if [ "$(uname -s)" = "Darwin" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/$version/bin/python3" + elif [ "Windows_NT" = "${OS:-}" ]; then + version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g') + PYTHON="C:/python/Python$version/python.exe" + else + PYTHON="/opt/python/$version/bin/python3" + fi + if is_python_39 "$(command -v $PYTHON)"; then + echo "$PYTHON" + else + echo "Could not find suitable python binary for '$version'" >&2 + return 1 + fi +} From 3875cc6e1b8db0abdcedcc0be2c43b77035cd580 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 27 Mar 2025 11:43:37 -0500 Subject: [PATCH 1827/2111] PYTHON-5421 Fix handling of client timeout property (#2240) --- pymongo/asynchronous/mongo_client.py | 1 + pymongo/synchronous/mongo_client.py | 1 + 2 files changed, 2 insertions(+) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 754b8325ed..43dbd49f76 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -758,6 +758,7 @@ def __init__( self._host = host self._port = port self._topology: Topology = None # type: ignore[assignment] + self._timeout: float | None = None # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 1cedbfe1e2..2d8d6d730b 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -756,6 +756,7 @@ def __init__( self._host = host self._port = port self._topology: Topology = None # type: ignore[assignment] + self._timeout: float | None = None # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. From 33843d285b104dbae913c90f8ce6d4dae08f9d10 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 27 Mar 2025 12:56:21 -0500 Subject: [PATCH 1828/2111] PYTHON-5234 Clean up load balancer variants (#2241) --- .evergreen/generated_configs/tasks.yml | 189 +++++++++++++++++++++- .evergreen/generated_configs/variants.yml | 47 +----- .evergreen/scripts/generate_config.py | 30 ++-- 3 files changed, 199 insertions(+), 67 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index f859169ed4..3450e491a7 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -805,7 +805,7 @@ tasks: SUB_TEST_NAME: azure-fail # Load balancer tests - - name: test-load-balancer-auth-ssl + - name: test-load-balancer-auth-ssl-v6.0 commands: - func: run server vars: @@ -813,13 +813,74 @@ tasks: AUTH: auth SSL: ssl TEST_NAME: load_balancer + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, auth, ssl] + - name: test-load-balancer-auth-ssl-v7.0 + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, auth, ssl] + - name: test-load-balancer-auth-ssl-v8.0 + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, auth, ssl] + - name: test-load-balancer-auth-ssl-rapid + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + VERSION: rapid - func: run tests vars: AUTH: auth SSL: ssl TEST_NAME: load_balancer tags: [load-balancer, auth, ssl] - - name: test-load-balancer-noauth-ssl + - name: test-load-balancer-auth-ssl-latest + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, auth, ssl] + - name: test-load-balancer-noauth-ssl-v6.0 commands: - func: run server vars: @@ -827,13 +888,74 @@ tasks: AUTH: noauth SSL: ssl TEST_NAME: load_balancer + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, ssl] + - name: test-load-balancer-noauth-ssl-v7.0 + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, ssl] + - name: test-load-balancer-noauth-ssl-v8.0 + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: ssl TEST_NAME: load_balancer tags: [load-balancer, noauth, ssl] - - name: test-load-balancer-noauth-nossl + - name: test-load-balancer-noauth-ssl-rapid + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, ssl] + - name: test-load-balancer-noauth-ssl-latest + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, ssl] + - name: test-load-balancer-noauth-nossl-v6.0 commands: - func: run server vars: @@ -841,6 +963,67 @@ tasks: AUTH: noauth SSL: nossl TEST_NAME: load_balancer + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, nossl] + - name: test-load-balancer-noauth-nossl-v7.0 + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, nossl] + - name: test-load-balancer-noauth-nossl-v8.0 + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, nossl] + - name: test-load-balancer-noauth-nossl-rapid + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + tags: [load-balancer, noauth, nossl] + - name: test-load-balancer-noauth-nossl-latest + commands: + - func: run server + vars: + TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TEST_NAME: load_balancer + VERSION: latest - func: run tests vars: AUTH: noauth diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 2242e9c338..9d299fc7fa 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -599,56 +599,13 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Load balancer tests - - name: load-balancer-rhel8-v6.0-python3.9 + - name: load-balancer tasks: - name: .load-balancer - display_name: Load Balancer RHEL8 v6.0 Python3.9 + display_name: Load Balancer run_on: - rhel87-small batchtime: 10080 - expansions: - VERSION: "6.0" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: load-balancer-rhel8-v7.0-python3.9 - tasks: - - name: .load-balancer - display_name: Load Balancer RHEL8 v7.0 Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "7.0" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: load-balancer-rhel8-v8.0-python3.9 - tasks: - - name: .load-balancer - display_name: Load Balancer RHEL8 v8.0 Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: "8.0" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: load-balancer-rhel8-rapid-python3.9 - tasks: - - name: .load-balancer - display_name: Load Balancer RHEL8 rapid Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: rapid - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: load-balancer-rhel8-latest-python3.9 - tasks: - - name: .load-balancer - display_name: Load Balancer RHEL8 latest Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - VERSION: latest - PYTHON_BINARY: /opt/python/3.9/bin/python3 # Mockupdb tests - name: mockupdb-rhel8-python3.9 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 24aadfbe04..ca8146460b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -413,23 +413,11 @@ def get_encryption_expansions(encryption): def create_load_balancer_variants(): # Load balancer tests - run all supported server versions using the lowest supported python. - host = DEFAULT_HOST - batchtime = BATCHTIME_WEEK - versions = get_versions_from("6.0") - variants = [] - for version in versions: - python = CPYTHONS[0] - display_name = get_variant_name("Load Balancer", host, python=python, version=version) - variant = create_variant( - [".load-balancer"], - display_name, - python=python, - host=host, - version=version, - batchtime=batchtime, + return [ + create_variant( + [".load-balancer"], "Load Balancer", host=DEFAULT_HOST, batchtime=BATCHTIME_WEEK ) - variants.append(variant) - return variants + ] def create_compression_variants(): @@ -830,11 +818,15 @@ def create_server_tasks(): def create_load_balancer_tasks(): tasks = [] - for auth, ssl in AUTH_SSLS: - name = f"test-load-balancer-{auth}-{ssl}".lower() + for (auth, ssl), version in product(AUTH_SSLS, get_versions_from("6.0")): + name = get_task_name(f"test-load-balancer-{auth}-{ssl}", version=version) tags = ["load-balancer", auth, ssl] server_vars = dict( - TOPOLOGY="sharded_cluster", AUTH=auth, SSL=ssl, TEST_NAME="load_balancer" + TOPOLOGY="sharded_cluster", + AUTH=auth, + SSL=ssl, + TEST_NAME="load_balancer", + VERSION=version, ) server_func = FunctionCall(func="run server", vars=server_vars) test_vars = dict(AUTH=auth, SSL=ssl, TEST_NAME="load_balancer") From 0c6f84642c4b9ecfa667d03b842ff5228487e7c8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 27 Mar 2025 13:09:09 -0500 Subject: [PATCH 1829/2111] PYTHON-4942 & PYTHON-4936 Test that isClientError considers network errors and operations may be an empty array (#2236) --- .../unified/estimatedDocumentCount.json | 4 +- .../expectedError-isClientError.json | 74 +++++++++++++++++++ .../valid-pass/operation-empty_array.json | 10 +++ 3 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 test/unified-test-format/valid-pass/expectedError-isClientError.json create mode 100644 test/unified-test-format/valid-pass/operation-empty_array.json diff --git a/test/retryable_reads/unified/estimatedDocumentCount.json b/test/retryable_reads/unified/estimatedDocumentCount.json index 75a676b9b6..2ee29f6799 100644 --- a/test/retryable_reads/unified/estimatedDocumentCount.json +++ b/test/retryable_reads/unified/estimatedDocumentCount.json @@ -195,7 +195,7 @@ "object": "collection1", "name": "estimatedDocumentCount", "expectError": { - "isError": true + "isClientError": true } } ], @@ -241,7 +241,7 @@ "object": "collection0", "name": "estimatedDocumentCount", "expectError": { - "isError": true + "isClientError": true } } ], diff --git a/test/unified-test-format/valid-pass/expectedError-isClientError.json b/test/unified-test-format/valid-pass/expectedError-isClientError.json new file mode 100644 index 0000000000..9c6beda588 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedError-isClientError.json @@ -0,0 +1,74 @@ +{ + "description": "expectedError-isClientError", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "isClientError considers network errors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operation-empty_array.json b/test/unified-test-format/valid-pass/operation-empty_array.json new file mode 100644 index 0000000000..93b25c983c --- /dev/null +++ b/test/unified-test-format/valid-pass/operation-empty_array.json @@ -0,0 +1,10 @@ +{ + "description": "operation-empty_array", + "schemaVersion": "1.0", + "tests": [ + { + "description": "Empty operations array", + "operations": [] + } + ] +} From a1b4a7481cf0933b8b62313b8696319be682e37f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 28 Mar 2025 08:38:52 -0500 Subject: [PATCH 1830/2111] PYTHON-5243 Migrate remaining variants to generated config (#2243) --- .evergreen/config.yml | 34 --------------------- .evergreen/generated_configs/variants.yml | 37 +++++++++++++++++++++++ .evergreen/scripts/generate_config.py | 35 ++++++++++++++++++--- 3 files changed, 68 insertions(+), 38 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 30c4fe2002..887bdfcd9a 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -350,37 +350,3 @@ tasks: - mongodb - mongo-python-driver - ${github.amrom.workers.devmit} - -buildvariants: - -- name: "Coverage Report" - display_name: "Coverage Report" - run_on: - - rhel84-small - tasks: - - name: "coverage-report" - -- name: testkms-variant - display_name: "KMS" - run_on: - - debian11-small - tasks: - - name: test-gcpkms - batchtime: 10080 # 7 days - - name: test-gcpkms-fail - - name: test-azurekms - batchtime: 10080 # 7 days - - name: test-azurekms-fail - -- name: rhel8-import-time - display_name: Import Time - run_on: rhel87-small - tasks: - - name: "check-import-time" - -- name: backport-pr - display_name: "Backport PR" - run_on: - - rhel8.7-small - tasks: - - name: "backport-pr" diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 9d299fc7fa..e7c1ed88c4 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -151,6 +151,14 @@ buildvariants: run_on: - rhel87-small + # Backport pr tests + - name: backport-pr + tasks: + - name: backport-pr + display_name: Backport PR + run_on: + - rhel87-small + # Compression tests - name: compression-snappy-rhel8-python3.9-no-c tasks: @@ -237,6 +245,14 @@ buildvariants: COMPRESSORS: zstd PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + # Coverage report tests + - name: coverage-report + tasks: + - name: coverage-report + display_name: Coverage Report + run_on: + - rhel87-small + # Disable test commands tests - name: disable-test-commands-rhel8-python3.9 tasks: @@ -598,6 +614,27 @@ buildvariants: SSL: ssl PYTHON_BINARY: /opt/python/3.13/bin/python3 + # Import time tests + - name: import-time + tasks: + - name: check-import-time + display_name: Import Time + run_on: + - rhel87-small + + # Kms tests + - name: kms + tasks: + - name: test-gcpkms + batchtime: 10080 + - name: test-gcpkms-fail + - name: test-azurekms + batchtime: 10080 + - name: test-azurekms-fail + display_name: KMS + run_on: + - debian11-small + # Load balancer tests - name: load-balancer tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index ca8146460b..09370bc2b1 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -62,6 +62,7 @@ class Host: HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) +HOSTS["debian11"] = Host("debian11", "debian11-small", "Debian11", dict()) DEFAULT_HOST = HOSTS["rhel8"] # Other hosts @@ -85,7 +86,7 @@ class Host: def create_variant_generic( - task_names: list[str], + tasks: list[str | EvgTaskRef], display_name: str, *, host: Host | None = None, @@ -94,7 +95,12 @@ def create_variant_generic( **kwargs: Any, ) -> BuildVariant: """Create a build variant for the given inputs.""" - task_refs = [EvgTaskRef(name=n) for n in task_names] + task_refs = [] + for t in tasks: + if isinstance(t, EvgTaskRef): + task_refs.append(t) + else: + task_refs.append(EvgTaskRef(name=t)) expansions = expansions and expansions.copy() or dict() if "run_on" in kwargs: run_on = kwargs.pop("run_on") @@ -118,7 +124,7 @@ def create_variant_generic( def create_variant( - task_names: list[str], + tasks: list[str | EvgTaskRef], display_name: str, *, version: str | None = None, @@ -133,7 +139,7 @@ def create_variant( if python: expansions["PYTHON_BINARY"] = get_python_binary(python, host) return create_variant_generic( - task_names, display_name, version=version, host=host, expansions=expansions, **kwargs + tasks, display_name, version=version, host=host, expansions=expansions, **kwargs ) @@ -716,6 +722,27 @@ def create_atlas_connect_variants(): ] +def create_coverage_report_variants(): + return [create_variant(["coverage-report"], "Coverage Report", host=DEFAULT_HOST)] + + +def create_kms_variants(): + tasks = [] + tasks.append(EvgTaskRef(name="test-gcpkms", batchtime=BATCHTIME_WEEK)) + tasks.append("test-gcpkms-fail") + tasks.append(EvgTaskRef(name="test-azurekms", batchtime=BATCHTIME_WEEK)) + tasks.append("test-azurekms-fail") + return [create_variant(tasks, "KMS", host=HOSTS["debian11"])] + + +def create_import_time_variants(): + return [create_variant(["check-import-time"], "Import Time", host=DEFAULT_HOST)] + + +def create_backport_pr_variants(): + return [create_variant(["backport-pr"], "Backport PR", host=DEFAULT_HOST)] + + def create_perf_variants(): host = HOSTS["perf"] return [ From a0951515f4439cfb7bd93a7b66d4be00b46c9af0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 28 Mar 2025 11:12:49 -0400 Subject: [PATCH 1831/2111] PYTHON-5238 - Add async GridFS API docs (#2235) --- doc/api/gridfs/asynchronous/grid_file.rst | 19 ++++++++++ doc/api/gridfs/asynchronous/index.rst | 18 ++++++++++ doc/api/gridfs/index.rst | 3 +- gridfs/asynchronous/__init__.py | 42 +++++++++++++++++++++++ 4 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 doc/api/gridfs/asynchronous/grid_file.rst create mode 100644 doc/api/gridfs/asynchronous/index.rst create mode 100644 gridfs/asynchronous/__init__.py diff --git a/doc/api/gridfs/asynchronous/grid_file.rst b/doc/api/gridfs/asynchronous/grid_file.rst new file mode 100644 index 0000000000..fbf34adc8a --- /dev/null +++ b/doc/api/gridfs/asynchronous/grid_file.rst @@ -0,0 +1,19 @@ +:mod:`grid_file` -- Async tools for representing files stored in GridFS +======================================================================= + +.. automodule:: gridfs.asynchronous.grid_file + :synopsis: Async tools for representing files stored in GridFS + + .. autoclass:: AsyncGridIn + :members: + + .. autoattribute:: _id + + .. autoclass:: AsyncGridOut + :members: + + .. autoattribute:: _id + .. automethod:: __aiter__ + + .. autoclass:: AsyncGridOutCursor + :members: diff --git a/doc/api/gridfs/asynchronous/index.rst b/doc/api/gridfs/asynchronous/index.rst new file mode 100644 index 0000000000..0904d10f98 --- /dev/null +++ b/doc/api/gridfs/asynchronous/index.rst @@ -0,0 +1,18 @@ +:mod:`gridfs async` -- Async tools for working with GridFS +========================================================== + +.. warning:: This API is currently in beta, meaning the classes, methods, + and behaviors described within may change before the full release. + If you come across any bugs during your use of this API, + please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. + +.. automodule:: gridfs.asynchronous + :synopsis: Async tools for working with GridFS + :members: AsyncGridFS, AsyncGridFSBucket + +Sub-modules: + +.. toctree:: + :maxdepth: 2 + + grid_file diff --git a/doc/api/gridfs/index.rst b/doc/api/gridfs/index.rst index b81fbde782..190c561d05 100644 --- a/doc/api/gridfs/index.rst +++ b/doc/api/gridfs/index.rst @@ -8,7 +8,8 @@ Sub-modules: .. toctree:: - :maxdepth: 2 + :maxdepth: 3 + asynchronous/index errors grid_file diff --git a/gridfs/asynchronous/__init__.py b/gridfs/asynchronous/__init__.py new file mode 100644 index 0000000000..0826145b11 --- /dev/null +++ b/gridfs/asynchronous/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GridFS is a specification for storing large objects in Mongo. + +The :mod:`gridfs` package is an implementation of GridFS on top of +:mod:`pymongo`, exposing a file-like interface. + +.. seealso:: The MongoDB documentation on `gridfs `_. +""" +from __future__ import annotations + +from gridfs.asynchronous.grid_file import ( + AsyncGridFS, + AsyncGridFSBucket, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE + +__all__ = [ + "AsyncGridFS", + "AsyncGridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "AsyncGridIn", + "AsyncGridOut", + "AsyncGridOutCursor", +] From f3ca1e03727ad0cc93495cbe4099dc2cafd43b4c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 28 Mar 2025 13:48:46 -0400 Subject: [PATCH 1832/2111] PYTHON-4833 - Add Collection bulk_write benchmarks (#2245) --- test/performance/async_perf_test.py | 22 ++++++++++++++++++++++ test/performance/perf_test.py | 22 ++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/test/performance/async_perf_test.py b/test/performance/async_perf_test.py index 2ceee45bf9..969437f9c9 100644 --- a/test/performance/async_perf_test.py +++ b/test/performance/async_perf_test.py @@ -362,6 +362,17 @@ async def do_task(self): await self.corpus.insert_many(self.documents, ordered=True) +class TestSmallDocCollectionBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + class TestSmallDocClientBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): @async_client_context.require_version_min(8, 0, 0, -24) async def asyncSetUp(self): @@ -412,6 +423,17 @@ async def do_task(self): await self.corpus.insert_many(self.documents, ordered=True) +class TestLargeDocCollectionBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + class TestLargeDocClientBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): @async_client_context.require_version_min(8, 0, 0, -24) async def asyncSetUp(self): diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 6e269e25b0..39487eff6d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -443,6 +443,17 @@ def do_task(self): self.corpus.insert_many(self.documents, ordered=True) +class TestSmallDocCollectionBulkInsert(SmallDocInsertTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + class TestSmallDocClientBulkInsert(SmallDocInsertTest, unittest.TestCase): @client_context.require_version_min(8, 0, 0, -24) def setUp(self): @@ -493,6 +504,17 @@ def do_task(self): self.corpus.insert_many(self.documents, ordered=True) +class TestLargeDocCollectionBulkInsert(LargeDocInsertTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + class TestLargeDocClientBulkInsert(LargeDocInsertTest, unittest.TestCase): @client_context.require_version_min(8, 0, 0, -24) def setUp(self): From e51ad27d20a523b834e91d8e80a2810594a58f9f Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 28 Mar 2025 15:02:40 -0400 Subject: [PATCH 1833/2111] PYTHON-4493 - Use asyncio protocols instead of sockets for network IO (#2151) Co-authored-by: Shane Harvey --- pymongo/asynchronous/encryption.py | 18 +- pymongo/asynchronous/mongo_client.py | 2 +- pymongo/asynchronous/monitor.py | 6 +- pymongo/asynchronous/network.py | 59 +- pymongo/asynchronous/pool.py | 361 ++---------- pymongo/network_layer.py | 482 ++++++++++++++-- pymongo/pool_shared.py | 546 ++++++++++++++++++ pymongo/synchronous/encryption.py | 14 +- pymongo/synchronous/monitor.py | 6 +- pymongo/synchronous/network.py | 55 +- pymongo/synchronous/pool.py | 311 +--------- pyproject.toml | 1 + test/asynchronous/test_auth_spec.py | 4 + test/asynchronous/test_bulk.py | 14 +- test/asynchronous/test_client.py | 13 +- test/asynchronous/test_client_bulk_write.py | 1 - .../test_connection_monitoring.py | 2 +- test/asynchronous/test_cursor.py | 5 +- test/asynchronous/test_pooling.py | 6 +- test/asynchronous/test_retryable_writes.py | 2 + test/asynchronous/utils.py | 1 + test/test_auth_spec.py | 4 + test/test_client.py | 3 +- test/test_client_bulk_write.py | 1 - test/test_cursor.py | 3 +- test/test_retryable_writes.py | 2 + test/utils.py | 1 + tools/synchro.py | 5 + 28 files changed, 1127 insertions(+), 801 deletions(-) create mode 100644 pymongo/pool_shared.py diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 68de42db84..71a694a619 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -64,11 +64,6 @@ from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.database import AsyncDatabase from pymongo.asynchronous.mongo_client import AsyncMongoClient -from pymongo.asynchronous.pool import ( - _configured_socket, - _get_timeout_details, - _raise_connection_failure, -) from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts @@ -80,12 +75,17 @@ NetworkTimeout, ServerSelectionTimeoutError, ) -from pymongo.network_layer import BLOCKING_IO_ERRORS, async_sendall +from pymongo.network_layer import async_socket_sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _async_configured_socket, + _get_timeout_details, + _raise_connection_failure, +) from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult -from pymongo.ssl_support import get_ssl_context +from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser_shared import parse_host from pymongo.write_concern import WriteConcern @@ -113,7 +113,7 @@ async def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: try: - return await _configured_socket(address, opts) + return await _async_configured_socket(address, opts) except Exception as exc: _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) @@ -196,7 +196,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: try: conn = await _connect_kms(address, opts) try: - await async_sendall(conn, message) + await async_socket_sendall(conn, message) while kms_context.bytes_needed > 0: # CSOT: update timeout. conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 43dbd49f76..16753420c0 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2079,7 +2079,7 @@ async def _cleanup_cursor_lock( # exhausted the result set we *must* close the socket # to stop the server from sending more data. assert conn_mgr.conn is not None - conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + await conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) else: await self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) if conn_mgr: diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index 1b0799e1c4..479ca1a314 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -36,7 +36,11 @@ from pymongo.server_description import ServerDescription if TYPE_CHECKING: - from pymongo.asynchronous.pool import AsyncConnection, Pool, _CancellationContext + from pymongo.asynchronous.pool import ( # type: ignore[attr-defined] + AsyncConnection, + Pool, + _CancellationContext, + ) from pymongo.asynchronous.settings import TopologySettings from pymongo.asynchronous.topology import Topology diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index e529a52ee9..5f14bef45d 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -17,7 +17,6 @@ import datetime import logging -import time from typing import ( TYPE_CHECKING, Any, @@ -31,20 +30,16 @@ from bson import _decode_all_selective from pymongo import _csot, helpers_shared, message -from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import _NO_COMPRESSION, decompress +from pymongo.compression_support import _NO_COMPRESSION from pymongo.errors import ( NotPrimaryError, OperationFailure, - ProtocolError, ) from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log -from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.message import _OpMsg from pymongo.monitoring import _is_speculative_authenticate from pymongo.network_layer import ( - _UNPACK_COMPRESSION_HEADER, - _UNPACK_HEADER, - async_receive_data, + async_receive_message, async_sendall, ) @@ -194,13 +189,13 @@ async def command( ) try: - await async_sendall(conn.conn, msg) + await async_sendall(conn.conn.get_conn, msg) if use_op_msg and unacknowledged: # Unacknowledged, fake a successful command response. reply = None response_doc: _DocumentOut = {"ok": 1} else: - reply = await receive_message(conn, request_id) + reply = await async_receive_message(conn, request_id) conn.more_to_come = reply.more_to_come unpacked_docs = reply.unpack_response( codec_options=codec_options, user_fields=user_fields @@ -301,47 +296,3 @@ async def command( ) return response_doc # type: ignore[return-value] - - -async def receive_message( - conn: AsyncConnection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE -) -> Union[_OpReply, _OpMsg]: - """Receive a raw BSON message or raise socket.error.""" - if _csot.get_timeout(): - deadline = _csot.get_deadline() - else: - timeout = conn.conn.gettimeout() - if timeout: - deadline = time.monotonic() + timeout - else: - deadline = None - # Ignore the response's request id. - length, _, response_to, op_code = _UNPACK_HEADER(await async_receive_data(conn, 16, deadline)) - # No request_id for exhaust cursor "getMore". - if request_id is not None: - if request_id != response_to: - raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") - if length <= 16: - raise ProtocolError( - f"Message length ({length!r}) not longer than standard message header size (16)" - ) - if length > max_message_size: - raise ProtocolError( - f"Message length ({length!r}) is larger than server max " - f"message size ({max_message_size!r})" - ) - if op_code == 2012: - op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - await async_receive_data(conn, 9, deadline) - ) - data = decompress(await async_receive_data(conn, length - 25, deadline), compressor_id) - else: - data = await async_receive_data(conn, length - 16, deadline) - - try: - unpack_reply = _UNPACK_REPLY[op_code] - except KeyError: - raise ProtocolError( - f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" - ) from None - return unpack_reply(data) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index d06c528e78..6ebdb5cb20 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -14,14 +14,10 @@ from __future__ import annotations -import asyncio import collections import contextlib -import functools import logging import os -import socket -import ssl import sys import time import weakref @@ -40,8 +36,8 @@ from bson import DEFAULT_CODEC_OPTIONS from pymongo import _csot, helpers_shared from pymongo.asynchronous.client_session import _validate_session_write_concern -from pymongo.asynchronous.helpers import _getaddrinfo, _handle_reauth -from pymongo.asynchronous.network import command, receive_message +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.asynchronous.network import command from pymongo.common import ( MAX_BSON_SIZE, MAX_MESSAGE_SIZE, @@ -52,16 +48,13 @@ from pymongo.errors import ( # type:ignore[attr-defined] AutoReconnect, ConfigurationError, - ConnectionFailure, DocumentTooLarge, ExecutionTimeout, InvalidOperation, - NetworkTimeout, NotPrimaryError, OperationFailure, PyMongoError, WaitQueueTimeoutError, - _CertificateError, ) from pymongo.hello import Hello, HelloCompat from pymongo.lock import ( @@ -79,13 +72,20 @@ ConnectionCheckOutFailedReason, ConnectionClosedReason, ) -from pymongo.network_layer import async_sendall +from pymongo.network_layer import AsyncNetworkingInterface, async_receive_message, async_sendall from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _CancellationContext, + _configured_protocol_interface, + _get_timeout_details, + _raise_connection_failure, + format_timeout_details, +) from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import HAS_SNI, SSLError +from pymongo.ssl_support import SSLError if TYPE_CHECKING: from bson import CodecOptions @@ -99,7 +99,6 @@ ZstdContext, ) from pymongo.message import _OpMsg, _OpReply - from pymongo.pyopenssl_context import _sslConn from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _ServerMode from pymongo.typings import _Address, _CollationIn @@ -123,133 +122,6 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 _IS_SYNC = False -_MAX_TCP_KEEPIDLE = 120 -_MAX_TCP_KEEPINTVL = 10 -_MAX_TCP_KEEPCNT = 9 - -if sys.platform == "win32": - try: - import _winreg as winreg - except ImportError: - import winreg - - def _query(key, name, default): - try: - value, _ = winreg.QueryValueEx(key, name) - # Ensure the value is a number or raise ValueError. - return int(value) - except (OSError, ValueError): - # QueryValueEx raises OSError when the key does not exist (i.e. - # the system is using the Windows default value). - return default - - try: - with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" - ) as key: - _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) - _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) - except OSError: - # We could not check the default values because winreg.OpenKey failed. - # Assume the system is using the default values. - _WINDOWS_TCP_IDLE_MS = 7200000 - _WINDOWS_TCP_INTERVAL_MS = 1000 - - def _set_keepalive_times(sock): - idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) - if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: - sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) - -else: - - def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: - if hasattr(socket, tcp_option): - sockopt = getattr(socket, tcp_option) - try: - # PYTHON-1350 - NetBSD doesn't implement getsockopt for - # TCP_KEEPIDLE and friends. Don't attempt to set the - # values there. - default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) - if default > max_value: - sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except OSError: - pass - - def _set_keepalive_times(sock: socket.socket) -> None: - _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) - - -def _raise_connection_failure( - address: Any, - error: Exception, - msg_prefix: Optional[str] = None, - timeout_details: Optional[dict[str, float]] = None, -) -> NoReturn: - """Convert a socket.error to ConnectionFailure and raise it.""" - host, port = address - # If connecting to a Unix socket, port will be None. - if port is not None: - msg = "%s:%d: %s" % (host, port, error) - else: - msg = f"{host}: {error}" - if msg_prefix: - msg = msg_prefix + msg - if "configured timeouts" not in msg: - msg += format_timeout_details(timeout_details) - if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) from error - elif isinstance(error, SSLError) and "timed out" in str(error): - # Eventlet does not distinguish TLS network timeouts from other - # SSLErrors (https://github.com/eventlet/eventlet/issues/692). - # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised. - raise NetworkTimeout(msg) from error - else: - raise AutoReconnect(msg) from error - - -def _get_timeout_details(options: PoolOptions) -> dict[str, float]: - details = {} - timeout = _csot.get_timeout() - socket_timeout = options.socket_timeout - connect_timeout = options.connect_timeout - if timeout: - details["timeoutMS"] = timeout * 1000 - if socket_timeout and not timeout: - details["socketTimeoutMS"] = socket_timeout * 1000 - if connect_timeout: - details["connectTimeoutMS"] = connect_timeout * 1000 - return details - - -def format_timeout_details(details: Optional[dict[str, float]]) -> str: - result = "" - if details: - result += " (configured timeouts:" - for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: - if timeout in details: - result += f" {timeout}: {details[timeout]}ms," - result = result[:-1] - result += ")" - return result - - -class _CancellationContext: - def __init__(self) -> None: - self._cancelled = False - - def cancel(self) -> None: - """Cancel this context.""" - self._cancelled = True - - @property - def cancelled(self) -> bool: - """Was cancel called?""" - return self._cancelled - class AsyncConnection: """Store a connection with some metadata. @@ -261,7 +133,11 @@ class AsyncConnection: """ def __init__( - self, conn: Union[socket.socket, _sslConn], pool: Pool, address: tuple[str, int], id: int + self, + conn: AsyncNetworkingInterface, + pool: Pool, + address: tuple[str, int], + id: int, ): self.pool_ref = weakref.ref(pool) self.conn = conn @@ -318,7 +194,7 @@ def set_conn_timeout(self, timeout: Optional[float]) -> None: if timeout == self.last_timeout: return self.last_timeout = timeout - self.conn.settimeout(timeout) + self.conn.get_conn.settimeout(timeout) def apply_timeout( self, client: AsyncMongoClient, cmd: Optional[MutableMapping[str, Any]] @@ -364,7 +240,7 @@ async def unpin(self) -> None: if pool: await pool.checkin(self) else: - self.close_conn(ConnectionClosedReason.STALE) + await self.close_conn(ConnectionClosedReason.STALE) def hello_cmd(self) -> dict[str, Any]: # Handshake spec requires us to use OP_MSG+hello command for the @@ -559,7 +435,7 @@ async def command( raise # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. except BaseException as error: - self._raise_connection_failure(error) + await self._raise_connection_failure(error) async def send_message(self, message: bytes, max_doc_size: int) -> None: """Send a raw BSON message or raise ConnectionFailure. @@ -573,10 +449,10 @@ async def send_message(self, message: bytes, max_doc_size: int) -> None: ) try: - await async_sendall(self.conn, message) + await async_sendall(self.conn.get_conn, message) # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: - self._raise_connection_failure(error) + await self._raise_connection_failure(error) async def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: """Receive a raw BSON message or raise ConnectionFailure. @@ -584,10 +460,10 @@ async def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _O If any exception is raised, the socket is closed. """ try: - return await receive_message(self, request_id, self.max_message_size) + return await async_receive_message(self, request_id, self.max_message_size) # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: - self._raise_connection_failure(error) + await self._raise_connection_failure(error) def _raise_if_not_writable(self, unacknowledged: bool) -> None: """Raise NotPrimaryError on unacknowledged write if this socket is not @@ -673,11 +549,11 @@ def validate_session( "Can only use session with the AsyncMongoClient that started it" ) - def close_conn(self, reason: Optional[str]) -> None: + async def close_conn(self, reason: Optional[str]) -> None: """Close this connection with a reason.""" if self.closed: return - self._close_conn() + await self._close_conn() if reason: if self.enabled_for_cmap: assert self.listeners is not None @@ -694,7 +570,7 @@ def close_conn(self, reason: Optional[str]) -> None: error=reason, ) - def _close_conn(self) -> None: + async def _close_conn(self) -> None: """Close this connection.""" if self.closed: return @@ -703,13 +579,16 @@ def _close_conn(self) -> None: # Note: We catch exceptions to avoid spurious errors on interpreter # shutdown. try: - self.conn.close() + await self.conn.close() except Exception: # noqa: S110 pass def conn_closed(self) -> bool: """Return True if we know socket has been closed, False otherwise.""" - return self.socket_checker.socket_closed(self.conn) + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() def send_cluster_time( self, @@ -736,7 +615,7 @@ def idle_time_seconds(self) -> float: """Seconds since this socket was last checked into its pool.""" return time.monotonic() - self.last_checkin_time - def _raise_connection_failure(self, error: BaseException) -> NoReturn: + async def _raise_connection_failure(self, error: BaseException) -> NoReturn: # Catch *all* exceptions from socket methods and close the socket. In # regular Python, socket operations only raise socket.error, even if # the underlying cause was a Ctrl-C: a signal raised during socket.recv @@ -756,7 +635,7 @@ def _raise_connection_failure(self, error: BaseException) -> NoReturn: reason = None else: reason = ConnectionClosedReason.ERROR - self.close_conn(reason) + await self.close_conn(reason) # SSLError from PyOpenSSL inherits directly from Exception. if isinstance(error, (IOError, OSError, SSLError)): details = _get_timeout_details(self.opts) @@ -781,145 +660,6 @@ def __repr__(self) -> str: ) -async def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: - """Given (host, port) and PoolOptions, connect and return a socket object. - - Can raise socket.error. - - This is a modified version of create_connection from CPython >= 2.7. - """ - host, port = address - - # Check if dealing with a unix domain socket - if host.endswith(".sock"): - if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported on this system") - sock = socket.socket(socket.AF_UNIX) - # SOCK_CLOEXEC not supported for Unix sockets. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.connect(host) - return sock - except OSError: - sock.close() - raise - - # Don't try IPv6 if we don't support it. Also skip it if host - # is 'localhost' (::1 is fine). Avoids slow connect issues - # like PYTHON-356. - family = socket.AF_INET - if socket.has_ipv6 and host != "localhost": - family = socket.AF_UNSPEC - - err = None - for res in await _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined] - af, socktype, proto, dummy, sa = res - # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited - # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 - # all file descriptors are created non-inheritable. See PEP 446. - try: - sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) - except OSError: - # Can SOCK_CLOEXEC be defined even if the kernel doesn't support - # it? - sock = socket.socket(af, socktype, proto) - # Fallback when SOCK_CLOEXEC isn't available. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - # CSOT: apply timeout to socket connect. - timeout = _csot.remaining() - if timeout is None: - timeout = options.connect_timeout - elif timeout <= 0: - raise socket.timeout("timed out") - sock.settimeout(timeout) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) - _set_keepalive_times(sock) - sock.connect(sa) - return sock - except OSError as e: - err = e - sock.close() - - if err is not None: - raise err - else: - # This likely means we tried to connect to an IPv6 only - # host with an OS/kernel or Python interpreter that doesn't - # support IPv6. The test case is Jython2.5.1 which doesn't - # support IPv6 at all. - raise OSError("getaddrinfo failed") - - -async def _configured_socket( - address: _Address, options: PoolOptions -) -> Union[socket.socket, _sslConn]: - """Given (host, port) and PoolOptions, return a configured socket. - - Can raise socket.error, ConnectionFailure, or _CertificateError. - - Sets socket's SSL and timeout options. - """ - sock = await _create_connection(address, options) - ssl_context = options._ssl_context - - if ssl_context is None: - sock.settimeout(options.socket_timeout) - return sock - - host = address[0] - try: - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if HAS_SNI: - if _IS_SYNC: - ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) - else: - if hasattr(ssl_context, "a_wrap_socket"): - ssl_sock = await ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc] - else: - loop = asyncio.get_running_loop() - ssl_sock = await loop.run_in_executor( - None, - functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc] - ) - else: - if _IS_SYNC: - ssl_sock = ssl_context.wrap_socket(sock) - else: - if hasattr(ssl_context, "a_wrap_socket"): - ssl_sock = await ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc] - else: - loop = asyncio.get_running_loop() - ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc] - except _CertificateError: - sock.close() - # Raise _CertificateError directly like we do after match_hostname - # below. - raise - except (OSError, SSLError) as exc: - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - details = _get_timeout_details(options) - _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) - if ( - ssl_context.verify_mode - and not ssl_context.check_hostname - and not options.tls_allow_invalid_hostnames - ): - try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined] - except _CertificateError: - ssl_sock.close() - raise - - ssl_sock.settimeout(options.socket_timeout) - return ssl_sock - - class _PoolClosedError(PyMongoError): """Internal error raised when a thread tries to get a connection from a closed pool. @@ -1121,7 +861,7 @@ async def _reset( # publishing the PoolClearedEvent. if close: for conn in sockets: - conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) if self.enabled_for_cmap: assert listeners is not None listeners.publish_pool_closed(self.address) @@ -1152,7 +892,7 @@ async def _reset( serviceId=service_id, ) for conn in sockets: - conn.close_conn(ConnectionClosedReason.STALE) + await conn.close_conn(ConnectionClosedReason.STALE) async def update_is_writable(self, is_writable: Optional[bool]) -> None: """Updates the is_writable attribute on all sockets currently in the @@ -1197,7 +937,7 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds ): conn = self.conns.pop() - conn.close_conn(ConnectionClosedReason.IDLE) + await conn.close_conn(ConnectionClosedReason.IDLE) while True: async with self.size_cond: @@ -1221,7 +961,7 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. if self.gen.get_overall() != reference_generation: - conn.close_conn(ConnectionClosedReason.STALE) + await conn.close_conn(ConnectionClosedReason.STALE) return self.conns.appendleft(conn) self.active_contexts.discard(conn.cancel_context) @@ -1266,7 +1006,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A ) try: - sock = await _configured_socket(self.address, self.opts) + networking_interface = await _configured_protocol_interface(self.address, self.opts) # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: async with self.lock: @@ -1293,7 +1033,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A raise - conn = AsyncConnection(sock, self, self.address, conn_id) # type: ignore[arg-type] + conn = AsyncConnection(networking_interface, self, self.address, conn_id) # type: ignore[arg-type] async with self.lock: self.active_contexts.add(conn.cancel_context) self.active_contexts.discard(tmp_context) @@ -1311,7 +1051,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A except BaseException: async with self.lock: self.active_contexts.discard(conn.cancel_context) - conn.close_conn(ConnectionClosedReason.ERROR) + await conn.close_conn(ConnectionClosedReason.ERROR) raise if handler: @@ -1509,7 +1249,7 @@ async def _get_conn( except IndexError: self._pending += 1 if conn: # We got a socket from the pool - if self._perished(conn): + if await self._perished(conn): conn = None continue else: # We need to create a new connection @@ -1523,7 +1263,7 @@ async def _get_conn( except BaseException: if conn: # We checked out a socket but authentication failed. - conn.close_conn(ConnectionClosedReason.ERROR) + await conn.close_conn(ConnectionClosedReason.ERROR) async with self.size_cond: self.requests -= 1 if incremented: @@ -1583,7 +1323,7 @@ async def checkin(self, conn: AsyncConnection) -> None: await self.reset_without_pause() else: if self.closed: - conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) elif conn.closed: # CMAP requires the closed event be emitted after the check in. if self.enabled_for_cmap: @@ -1607,7 +1347,7 @@ async def checkin(self, conn: AsyncConnection) -> None: # Hold the lock to ensure this section does not race with # Pool.reset(). if self.stale_generation(conn.generation, conn.service_id): - conn.close_conn(ConnectionClosedReason.STALE) + await conn.close_conn(ConnectionClosedReason.STALE) else: conn.update_last_checkin_time() conn.update_is_writable(bool(self.is_writable)) @@ -1625,7 +1365,7 @@ async def checkin(self, conn: AsyncConnection) -> None: self.operation_count -= 1 self.size_cond.notify() - def _perished(self, conn: AsyncConnection) -> bool: + async def _perished(self, conn: AsyncConnection) -> bool: """Return True and close the connection if it is "perished". This side-effecty function checks if this socket has been idle for @@ -1645,18 +1385,18 @@ def _perished(self, conn: AsyncConnection) -> bool: self.opts.max_idle_time_seconds is not None and idle_time_seconds > self.opts.max_idle_time_seconds ): - conn.close_conn(ConnectionClosedReason.IDLE) + await conn.close_conn(ConnectionClosedReason.IDLE) return True if self._check_interval_seconds is not None and ( self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds ): if conn.conn_closed(): - conn.close_conn(ConnectionClosedReason.ERROR) + await conn.close_conn(ConnectionClosedReason.ERROR) return True if self.stale_generation(conn.generation, conn.service_id): - conn.close_conn(ConnectionClosedReason.STALE) + await conn.close_conn(ConnectionClosedReason.STALE) return True return False @@ -1704,5 +1444,6 @@ def __del__(self) -> None: # Avoid ResourceWarnings in Python 3 # Close all sockets without calling reset() or close() because it is # not safe to acquire a lock in __del__. - for conn in self.conns: - conn.close_conn(None) + if _IS_SYNC: + for conn in self.conns: + conn.close_conn(None) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 4512aba59f..e287655c61 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -16,21 +16,26 @@ from __future__ import annotations import asyncio +import collections import errno import socket import struct import sys import time -from asyncio import AbstractEventLoop, Future +from asyncio import AbstractEventLoop, BaseTransport, BufferedProtocol, Future, Transport from typing import ( TYPE_CHECKING, + Any, Optional, Union, ) from pymongo import _csot, ssl_support from pymongo._asyncio_task import create_task -from pymongo.errors import _OperationCancelled +from pymongo.common import MAX_MESSAGE_SIZE +from pymongo.compression_support import decompress +from pymongo.errors import ProtocolError, _OperationCancelled +from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply from pymongo.socket_checker import _errno_from_exception try: @@ -69,13 +74,15 @@ BLOCKING_IO_ERRORS = (BlockingIOError, BLOCKING_IO_LOOKUP_ERROR, *ssl_support.BLOCKING_IO_ERRORS) -async def async_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: +# These socket-based I/O methods are for KMS requests and any other network operations that do not use +# the MongoDB wire protocol +async def async_socket_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: timeout = sock.gettimeout() sock.settimeout(0.0) loop = asyncio.get_running_loop() try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): - await asyncio.wait_for(_async_sendall_ssl(sock, buf, loop), timeout=timeout) + await asyncio.wait_for(_async_socket_sendall_ssl(sock, buf, loop), timeout=timeout) else: await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] except asyncio.TimeoutError as exc: @@ -87,7 +94,7 @@ async def async_sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> Non if sys.platform != "win32": - async def _async_sendall_ssl( + async def _async_socket_sendall_ssl( sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop ) -> None: view = memoryview(buf) @@ -130,7 +137,7 @@ def _is_ready(fut: Future) -> None: loop.remove_reader(fd) loop.remove_writer(fd) - async def _async_receive_ssl( + async def _async_socket_receive_ssl( conn: _sslConn, length: int, loop: AbstractEventLoop, once: Optional[bool] = False ) -> memoryview: mv = memoryview(bytearray(length)) @@ -184,7 +191,7 @@ def _is_ready(fut: Future) -> None: # The default Windows asyncio event loop does not support loop.add_reader/add_writer: # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support # Note: In PYTHON-4493 we plan to replace this code with asyncio streams. - async def _async_sendall_ssl( + async def _async_socket_sendall_ssl( sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop ) -> None: view = memoryview(buf) @@ -205,7 +212,7 @@ async def _async_sendall_ssl( backoff = min(backoff * 2, 0.512) total_sent += sent - async def _async_receive_ssl( + async def _async_socket_receive_ssl( conn: _sslConn, length: int, dummy: AbstractEventLoop, once: Optional[bool] = False ) -> memoryview: mv = memoryview(bytearray(length)) @@ -244,52 +251,6 @@ async def _poll_cancellation(conn: AsyncConnection) -> None: await asyncio.sleep(_POLL_TIMEOUT) -async def async_receive_data( - conn: AsyncConnection, length: int, deadline: Optional[float] -) -> memoryview: - sock = conn.conn - sock_timeout = sock.gettimeout() - timeout: Optional[Union[float, int]] - if deadline: - # When the timeout has expired perform one final check to - # see if the socket is readable. This helps avoid spurious - # timeouts on AWS Lambda and other FaaS environments. - timeout = max(deadline - time.monotonic(), 0) - else: - timeout = sock_timeout - - sock.settimeout(0.0) - loop = asyncio.get_running_loop() - cancellation_task = create_task(_poll_cancellation(conn)) - try: - if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): - read_task = create_task(_async_receive_ssl(sock, length, loop)) # type: ignore[arg-type] - else: - read_task = create_task(_async_receive(sock, length, loop)) # type: ignore[arg-type] - tasks = [read_task, cancellation_task] - try: - done, pending = await asyncio.wait( - tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED - ) - for task in pending: - task.cancel() - if pending: - await asyncio.wait(pending) - if len(done) == 0: - raise socket.timeout("timed out") - if read_task in done: - return read_task.result() - raise _OperationCancelled("operation cancelled") - except asyncio.CancelledError: - for task in tasks: - task.cancel() - await asyncio.wait(tasks) - raise - - finally: - sock.settimeout(sock_timeout) - - async def async_receive_data_socket( sock: Union[socket.socket, _sslConn], length: int ) -> memoryview: @@ -301,18 +262,23 @@ async def async_receive_data_socket( try: if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): return await asyncio.wait_for( - _async_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] + _async_socket_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] timeout=timeout, ) else: - return await asyncio.wait_for(_async_receive(sock, length, loop), timeout=timeout) # type: ignore[arg-type] + return await asyncio.wait_for( + _async_socket_receive(sock, length, loop), # type: ignore[arg-type] + timeout=timeout, + ) except asyncio.TimeoutError as err: raise socket.timeout("timed out") from err finally: sock.settimeout(sock_timeout) -async def _async_receive(conn: socket.socket, length: int, loop: AbstractEventLoop) -> memoryview: +async def _async_socket_receive( + conn: socket.socket, length: int, loop: AbstractEventLoop +) -> memoryview: mv = memoryview(bytearray(length)) bytes_read = 0 while bytes_read < length: @@ -328,7 +294,7 @@ async def _async_receive(conn: socket.socket, length: int, loop: AbstractEventLo def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: """Block until at least one byte is read, or a timeout, or a cancel.""" - sock = conn.conn + sock = conn.conn.sock timed_out = False # Check if the connection's socket has been manually closed if sock.fileno() == -1: @@ -413,3 +379,403 @@ def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> me conn.set_conn_timeout(orig_timeout) return mv + + +class NetworkingInterfaceBase: + def __init__(self, conn: Any): + self.conn = conn + + @property + def gettimeout(self) -> Any: + raise NotImplementedError + + def settimeout(self, timeout: float | None) -> None: + raise NotImplementedError + + def close(self) -> Any: + raise NotImplementedError + + def is_closing(self) -> bool: + raise NotImplementedError + + @property + def get_conn(self) -> Any: + raise NotImplementedError + + @property + def sock(self) -> Any: + raise NotImplementedError + + +class AsyncNetworkingInterface(NetworkingInterfaceBase): + def __init__(self, conn: tuple[Transport, PyMongoProtocol]): + super().__init__(conn) + + @property + def gettimeout(self) -> float | None: + return self.conn[1].gettimeout + + def settimeout(self, timeout: float | None) -> None: + self.conn[1].settimeout(timeout) + + async def close(self) -> None: + self.conn[1].close() + await self.conn[1].wait_closed() + + def is_closing(self) -> bool: + return self.conn[0].is_closing() + + @property + def get_conn(self) -> PyMongoProtocol: + return self.conn[1] + + @property + def sock(self) -> socket.socket: + return self.conn[0].get_extra_info("socket") + + +class NetworkingInterface(NetworkingInterfaceBase): + def __init__(self, conn: Union[socket.socket, _sslConn]): + super().__init__(conn) + + def gettimeout(self) -> float | None: + return self.conn.gettimeout() + + def settimeout(self, timeout: float | None) -> None: + self.conn.settimeout(timeout) + + def close(self) -> None: + self.conn.close() + + def is_closing(self) -> bool: + return self.conn.is_closing() + + @property + def get_conn(self) -> Union[socket.socket, _sslConn]: + return self.conn + + @property + def sock(self) -> Union[socket.socket, _sslConn]: + return self.conn + + def fileno(self) -> int: + return self.conn.fileno() + + def recv_into(self, buffer: bytes) -> int: + return self.conn.recv_into(buffer) + + +class PyMongoProtocol(BufferedProtocol): + def __init__(self, timeout: Optional[float] = None): + self.transport: Transport = None # type: ignore[assignment] + # Each message is reader in 2-3 parts: header, compression header, and message body + # The message buffer is allocated after the header is read. + self._header = memoryview(bytearray(16)) + self._header_index = 0 + self._compression_header = memoryview(bytearray(9)) + self._compression_index = 0 + self._message: Optional[memoryview] = None + self._message_index = 0 + # State. TODO: replace booleans with an enum? + self._expecting_header = True + self._expecting_compression = False + self._message_size = 0 + self._op_code = 0 + self._connection_lost = False + self._read_waiter: Optional[Future] = None + self._timeout = timeout + self._is_compressed = False + self._compressor_id: Optional[int] = None + self._max_message_size = MAX_MESSAGE_SIZE + self._response_to: Optional[int] = None + self._closed = asyncio.get_running_loop().create_future() + self._pending_messages: collections.deque[Future] = collections.deque() + self._done_messages: collections.deque[Future] = collections.deque() + + def settimeout(self, timeout: float | None) -> None: + self._timeout = timeout + + @property + def gettimeout(self) -> float | None: + """The configured timeout for the socket that underlies our protocol pair.""" + return self._timeout + + def connection_made(self, transport: BaseTransport) -> None: + """Called exactly once when a connection is made. + The transport argument is the transport representing the write side of the connection. + """ + self.transport = transport # type: ignore[assignment] + self.transport.set_write_buffer_limits(MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE) + + async def write(self, message: bytes) -> None: + """Write a message to this connection's transport.""" + if self.transport.is_closing(): + raise OSError("Connection is closed") + self.transport.write(message) + self.transport.resume_reading() + + async def read(self, request_id: Optional[int], max_message_size: int) -> tuple[bytes, int]: + """Read a single MongoDB Wire Protocol message from this connection.""" + if self.transport: + try: + self.transport.resume_reading() + # Known bug in SSL Protocols, fixed in Python 3.11: https://github.com/python/cpython/issues/89322 + except AttributeError: + raise OSError("connection is already closed") from None + self._max_message_size = max_message_size + if self._done_messages: + message = await self._done_messages.popleft() + else: + if self.transport and self.transport.is_closing(): + raise OSError("connection is already closed") + read_waiter = asyncio.get_running_loop().create_future() + self._pending_messages.append(read_waiter) + try: + message = await read_waiter + finally: + if read_waiter in self._done_messages: + self._done_messages.remove(read_waiter) + if message: + op_code, compressor_id, response_to, data = message + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError( + f"Got response id {response_to!r} but expected {request_id!r}" + ) + if compressor_id is not None: + data = decompress(data, compressor_id) + return data, op_code + raise OSError("connection closed") + + def get_buffer(self, sizehint: int) -> memoryview: + """Called to allocate a new receive buffer. + The asyncio loop calls this method expecting to receive a non-empty buffer to fill with data. + If any data does not fit into the returned buffer, this method will be called again until + either no data remains or an empty buffer is returned. + """ + # Due to a bug, Python <=3.11 will call get_buffer() even after we raise + # ProtocolError in buffer_updated() and call connection_lost(). We allocate + # a temp buffer to drain the waiting data. + if self._connection_lost: + if not self._message: + self._message = memoryview(bytearray(2**14)) + return self._message + # TODO: optimize this by caching pointers to the buffers. + # return self._buffer[self._index:] + if self._expecting_header: + return self._header[self._header_index :] + if self._expecting_compression: + return self._compression_header[self._compression_index :] + return self._message[self._message_index :] # type: ignore[index] + + def buffer_updated(self, nbytes: int) -> None: + """Called when the buffer was updated with the received data""" + # Wrote 0 bytes into a non-empty buffer, signal connection closed + if nbytes == 0: + self.close(OSError("connection closed")) + return + if self._connection_lost: + return + if self._expecting_header: + self._header_index += nbytes + if self._header_index >= 16: + self._expecting_header = False + try: + ( + self._message_size, + self._op_code, + self._response_to, + self._expecting_compression, + ) = self.process_header() + except ProtocolError as exc: + self.close(exc) + return + self._message = memoryview(bytearray(self._message_size)) + return + if self._expecting_compression: + self._compression_index += nbytes + if self._compression_index >= 9: + self._expecting_compression = False + self._op_code, self._compressor_id = self.process_compression_header() + return + + self._message_index += nbytes + if self._message_index >= self._message_size: + self._expecting_header = True + # Pause reading to avoid storing an arbitrary number of messages in memory. + self.transport.pause_reading() + if self._pending_messages: + result = self._pending_messages.popleft() + else: + result = asyncio.get_running_loop().create_future() + # Future has been cancelled, close this connection + if result.done(): + self.close(None) + return + # Necessary values to reconstruct and verify message + result.set_result( + (self._op_code, self._compressor_id, self._response_to, self._message) + ) + self._done_messages.append(result) + # Reset internal state to expect a new message + self._header_index = 0 + self._compression_index = 0 + self._message_index = 0 + self._message_size = 0 + self._message = None + self._op_code = 0 + self._compressor_id = None + self._response_to = None + + def process_header(self) -> tuple[int, int, int, bool]: + """Unpack a MongoDB Wire Protocol header.""" + length, _, response_to, op_code = _UNPACK_HEADER(self._header) + expecting_compression = False + if op_code == 2012: # OP_COMPRESSED + if length <= 25: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard OP_COMPRESSED message header size (25)" + ) + expecting_compression = True + length -= 9 + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > self._max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({self._max_message_size!r})" + ) + + return length - 16, op_code, response_to, expecting_compression + + def process_compression_header(self) -> tuple[int, int]: + """Unpack a MongoDB Wire Protocol compression header.""" + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(self._compression_header) + return op_code, compressor_id + + def _resolve_pending_messages(self, exc: Optional[Exception] = None) -> None: + pending = list(self._pending_messages) + for msg in pending: + if not msg.done(): + if exc is None: + msg.set_result(None) + else: + msg.set_exception(exc) + self._done_messages.append(msg) + + def close(self, exc: Optional[Exception] = None) -> None: + self.transport.abort() + self._resolve_pending_messages(exc) + self._connection_lost = True + + def connection_lost(self, exc: Optional[Exception] = None) -> None: + self._resolve_pending_messages(exc) + if not self._closed.done(): + self._closed.set_result(None) + + async def wait_closed(self) -> None: + await self._closed + + +async def async_sendall(conn: PyMongoProtocol, buf: bytes) -> None: + try: + await asyncio.wait_for(conn.write(buf), timeout=conn.gettimeout) + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc + + +async def async_receive_message( + conn: AsyncConnection, + request_id: Optional[int], + max_message_size: int = MAX_MESSAGE_SIZE, +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + timeout: Optional[Union[float, int]] + timeout = conn.conn.gettimeout + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + if deadline: + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + timeout = max(deadline - time.monotonic(), 0) + + cancellation_task = create_task(_poll_cancellation(conn)) + read_task = create_task(conn.conn.get_conn.read(request_id, max_message_size)) + tasks = [read_task, cancellation_task] + try: + done, pending = await asyncio.wait( + tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED + ) + for task in pending: + task.cancel() + if pending: + await asyncio.wait(pending) + if len(done) == 0: + raise socket.timeout("timed out") + if read_task in done: + data, op_code = read_task.result() + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) + raise _OperationCancelled("operation cancelled") + except asyncio.CancelledError: + for task in tasks: + task.cancel() + await asyncio.wait(tasks) + raise + + +def receive_message( + conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + timeout = conn.conn.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + # Ignore the response's request id. + length, _, response_to, op_code = _UNPACK_HEADER(receive_data(conn, 16, deadline)) + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({max_message_size!r})" + ) + if op_code == 2012: + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) + data = decompress(receive_data(conn, length - 25, deadline), compressor_id) + else: + data = receive_data(conn, length - 16, deadline) + + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py new file mode 100644 index 0000000000..42b330b1e2 --- /dev/null +++ b/pymongo/pool_shared.py @@ -0,0 +1,546 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pool utilities and shared helper methods.""" +from __future__ import annotations + +import asyncio +import functools +import socket +import ssl +import sys +from typing import ( + TYPE_CHECKING, + Any, + NoReturn, + Optional, + Union, +) + +from pymongo import _csot +from pymongo.asynchronous.helpers import _getaddrinfo +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConnectionFailure, + NetworkTimeout, + _CertificateError, +) +from pymongo.network_layer import AsyncNetworkingInterface, NetworkingInterface, PyMongoProtocol +from pymongo.pool_options import PoolOptions +from pymongo.ssl_support import HAS_SNI, SSLError + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_MAX_TCP_KEEPIDLE = 120 +_MAX_TCP_KEEPINTVL = 10 +_MAX_TCP_KEEPCNT = 9 + +if sys.platform == "win32": + try: + import _winreg as winreg + except ImportError: + import winreg + + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + + try: + with winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + +else: + + def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: + if hasattr(socket, tcp_option): + sockopt = getattr(socket, tcp_option) + try: + # PYTHON-1350 - NetBSD doesn't implement getsockopt for + # TCP_KEEPIDLE and friends. Don't attempt to set the + # values there. + default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) + if default > max_value: + sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) + except OSError: + pass + + def _set_keepalive_times(sock: socket.socket) -> None: + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) + + +def _raise_connection_failure( + address: Any, + error: Exception, + msg_prefix: Optional[str] = None, + timeout_details: Optional[dict[str, float]] = None, +) -> NoReturn: + """Convert a socket.error to ConnectionFailure and raise it.""" + host, port = address + # If connecting to a Unix socket, port will be None. + if port is not None: + msg = "%s:%d: %s" % (host, port, error) + else: + msg = f"{host}: {error}" + if msg_prefix: + msg = msg_prefix + msg + if "configured timeouts" not in msg: + msg += format_timeout_details(timeout_details) + if isinstance(error, socket.timeout): + raise NetworkTimeout(msg) from error + elif isinstance(error, SSLError) and "timed out" in str(error): + # Eventlet does not distinguish TLS network timeouts from other + # SSLErrors (https://github.com/eventlet/eventlet/issues/692). + # Luckily, we can work around this limitation because the phrase + # 'timed out' appears in all the timeout related SSLErrors raised. + raise NetworkTimeout(msg) from error + else: + raise AutoReconnect(msg) from error + + +def _get_timeout_details(options: PoolOptions) -> dict[str, float]: + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details + + +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result + + +class _CancellationContext: + def __init__(self) -> None: + self._cancelled = False + + def cancel(self) -> None: + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self) -> bool: + """Was cancel called?""" + return self._cancelled + + +async def _async_create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a raw socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.connect(host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in await _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + sock.connect(sa) + return sock + except OSError as e: + err = e + sock.close() + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +async def _async_configured_socket( + address: _Address, options: PoolOptions +) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if HAS_SNI: + if hasattr(ssl_context, "a_wrap_socket"): + ssl_sock = await ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] + else: + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor( + None, + functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] + ) + else: + if hasattr(ssl_context, "a_wrap_socket"): + ssl_sock = await ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] + else: + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +async def _configured_protocol_interface( + address: _Address, options: PoolOptions +) -> AsyncNetworkingInterface: + """Given (host, port) and PoolOptions, return a configured AsyncNetworkingInterface. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets protocol's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + timeout = options.socket_timeout + + if ssl_context is None: + return AsyncNetworkingInterface( + await asyncio.get_running_loop().create_connection( + lambda: PyMongoProtocol(timeout=timeout), sock=sock + ) + ) + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + transport, protocol = await asyncio.get_running_loop().create_connection( # type: ignore[call-overload] + lambda: PyMongoProtocol(timeout=timeout), + sock=sock, + server_hostname=host, + ssl=ssl_context, + ) + except _CertificateError: + transport.abort() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + transport.abort() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(transport.get_extra_info("peercert"), hostname=host) # type:ignore[attr-defined,unused-ignore] + except _CertificateError: + transport.abort() + raise + + return AsyncNetworkingInterface((transport, protocol)) + + +def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a raw socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.connect(host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in socket.getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined, unused-ignore] + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + sock.connect(sa) + return sock + except OSError as e: + err = e + sock.close() + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if HAS_SNI: + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] + else: + ssl_sock = ssl_context.wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +def _configured_socket_interface(address: _Address, options: PoolOptions) -> NetworkingInterface: + """Given (host, port) and PoolOptions, return a NetworkingInterface wrapping a configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return NetworkingInterface(sock) + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if HAS_SNI: + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + ssl_sock = ssl_context.wrap_socket(sock) + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, SSLError) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined,unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return NetworkingInterface(ssl_sock) diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 38c28de91e..ed631e135d 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -70,21 +70,21 @@ NetworkTimeout, ServerSelectionTimeoutError, ) -from pymongo.network_layer import BLOCKING_IO_ERRORS, sendall +from pymongo.network_layer import sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _configured_socket, + _get_timeout_details, + _raise_connection_failure, +) from pymongo.read_concern import ReadConcern from pymongo.results import BulkWriteResult, DeleteResult -from pymongo.ssl_support import get_ssl_context +from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context from pymongo.synchronous.collection import Collection from pymongo.synchronous.cursor import Cursor from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient -from pymongo.synchronous.pool import ( - _configured_socket, - _get_timeout_details, - _raise_connection_failure, -) from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser_shared import parse_host from pymongo.write_concern import WriteConcern diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index a2b76c4e8a..1413bb1437 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -36,7 +36,11 @@ from pymongo.synchronous.srv_resolver import _SrvResolver if TYPE_CHECKING: - from pymongo.synchronous.pool import Connection, Pool, _CancellationContext + from pymongo.synchronous.pool import ( # type: ignore[attr-defined] + Connection, + Pool, + _CancellationContext, + ) from pymongo.synchronous.settings import TopologySettings from pymongo.synchronous.topology import Topology diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py index 0e53e806b0..786edb7003 100644 --- a/pymongo/synchronous/network.py +++ b/pymongo/synchronous/network.py @@ -17,7 +17,6 @@ import datetime import logging -import time from typing import ( TYPE_CHECKING, Any, @@ -31,20 +30,16 @@ from bson import _decode_all_selective from pymongo import _csot, helpers_shared, message -from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import _NO_COMPRESSION, decompress +from pymongo.compression_support import _NO_COMPRESSION from pymongo.errors import ( NotPrimaryError, OperationFailure, - ProtocolError, ) from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log -from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.message import _OpMsg from pymongo.monitoring import _is_speculative_authenticate from pymongo.network_layer import ( - _UNPACK_COMPRESSION_HEADER, - _UNPACK_HEADER, - receive_data, + receive_message, sendall, ) @@ -194,7 +189,7 @@ def command( ) try: - sendall(conn.conn, msg) + sendall(conn.conn.get_conn, msg) if use_op_msg and unacknowledged: # Unacknowledged, fake a successful command response. reply = None @@ -301,45 +296,3 @@ def command( ) return response_doc # type: ignore[return-value] - - -def receive_message( - conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE -) -> Union[_OpReply, _OpMsg]: - """Receive a raw BSON message or raise socket.error.""" - if _csot.get_timeout(): - deadline = _csot.get_deadline() - else: - timeout = conn.conn.gettimeout() - if timeout: - deadline = time.monotonic() + timeout - else: - deadline = None - # Ignore the response's request id. - length, _, response_to, op_code = _UNPACK_HEADER(receive_data(conn, 16, deadline)) - # No request_id for exhaust cursor "getMore". - if request_id is not None: - if request_id != response_to: - raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") - if length <= 16: - raise ProtocolError( - f"Message length ({length!r}) not longer than standard message header size (16)" - ) - if length > max_message_size: - raise ProtocolError( - f"Message length ({length!r}) is larger than server max " - f"message size ({max_message_size!r})" - ) - if op_code == 2012: - op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) - data = decompress(receive_data(conn, length - 25, deadline), compressor_id) - else: - data = receive_data(conn, length - 16, deadline) - - try: - unpack_reply = _UNPACK_REPLY[op_code] - except KeyError: - raise ProtocolError( - f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" - ) from None - return unpack_reply(data) diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index cd78e26fea..6a302e2728 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -14,14 +14,10 @@ from __future__ import annotations -import asyncio import collections import contextlib -import functools import logging import os -import socket -import ssl import sys import time import weakref @@ -49,16 +45,13 @@ from pymongo.errors import ( # type:ignore[attr-defined] AutoReconnect, ConfigurationError, - ConnectionFailure, DocumentTooLarge, ExecutionTimeout, InvalidOperation, - NetworkTimeout, NotPrimaryError, OperationFailure, PyMongoError, WaitQueueTimeoutError, - _CertificateError, ) from pymongo.hello import Hello, HelloCompat from pymongo.lock import ( @@ -76,16 +69,23 @@ ConnectionCheckOutFailedReason, ConnectionClosedReason, ) -from pymongo.network_layer import sendall +from pymongo.network_layer import NetworkingInterface, receive_message, sendall from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _CancellationContext, + _configured_socket_interface, + _get_timeout_details, + _raise_connection_failure, + format_timeout_details, +) from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import HAS_SNI, SSLError +from pymongo.ssl_support import SSLError from pymongo.synchronous.client_session import _validate_session_write_concern -from pymongo.synchronous.helpers import _getaddrinfo, _handle_reauth -from pymongo.synchronous.network import command, receive_message +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.network import command if TYPE_CHECKING: from bson import CodecOptions @@ -96,7 +96,6 @@ ZstdContext, ) from pymongo.message import _OpMsg, _OpReply - from pymongo.pyopenssl_context import _sslConn from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _ServerMode from pymongo.synchronous.auth import _AuthContext @@ -123,133 +122,6 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 _IS_SYNC = True -_MAX_TCP_KEEPIDLE = 120 -_MAX_TCP_KEEPINTVL = 10 -_MAX_TCP_KEEPCNT = 9 - -if sys.platform == "win32": - try: - import _winreg as winreg - except ImportError: - import winreg - - def _query(key, name, default): - try: - value, _ = winreg.QueryValueEx(key, name) - # Ensure the value is a number or raise ValueError. - return int(value) - except (OSError, ValueError): - # QueryValueEx raises OSError when the key does not exist (i.e. - # the system is using the Windows default value). - return default - - try: - with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" - ) as key: - _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) - _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) - except OSError: - # We could not check the default values because winreg.OpenKey failed. - # Assume the system is using the default values. - _WINDOWS_TCP_IDLE_MS = 7200000 - _WINDOWS_TCP_INTERVAL_MS = 1000 - - def _set_keepalive_times(sock): - idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) - if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: - sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) - -else: - - def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: - if hasattr(socket, tcp_option): - sockopt = getattr(socket, tcp_option) - try: - # PYTHON-1350 - NetBSD doesn't implement getsockopt for - # TCP_KEEPIDLE and friends. Don't attempt to set the - # values there. - default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) - if default > max_value: - sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except OSError: - pass - - def _set_keepalive_times(sock: socket.socket) -> None: - _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) - - -def _raise_connection_failure( - address: Any, - error: Exception, - msg_prefix: Optional[str] = None, - timeout_details: Optional[dict[str, float]] = None, -) -> NoReturn: - """Convert a socket.error to ConnectionFailure and raise it.""" - host, port = address - # If connecting to a Unix socket, port will be None. - if port is not None: - msg = "%s:%d: %s" % (host, port, error) - else: - msg = f"{host}: {error}" - if msg_prefix: - msg = msg_prefix + msg - if "configured timeouts" not in msg: - msg += format_timeout_details(timeout_details) - if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) from error - elif isinstance(error, SSLError) and "timed out" in str(error): - # Eventlet does not distinguish TLS network timeouts from other - # SSLErrors (https://github.com/eventlet/eventlet/issues/692). - # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised. - raise NetworkTimeout(msg) from error - else: - raise AutoReconnect(msg) from error - - -def _get_timeout_details(options: PoolOptions) -> dict[str, float]: - details = {} - timeout = _csot.get_timeout() - socket_timeout = options.socket_timeout - connect_timeout = options.connect_timeout - if timeout: - details["timeoutMS"] = timeout * 1000 - if socket_timeout and not timeout: - details["socketTimeoutMS"] = socket_timeout * 1000 - if connect_timeout: - details["connectTimeoutMS"] = connect_timeout * 1000 - return details - - -def format_timeout_details(details: Optional[dict[str, float]]) -> str: - result = "" - if details: - result += " (configured timeouts:" - for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: - if timeout in details: - result += f" {timeout}: {details[timeout]}ms," - result = result[:-1] - result += ")" - return result - - -class _CancellationContext: - def __init__(self) -> None: - self._cancelled = False - - def cancel(self) -> None: - """Cancel this context.""" - self._cancelled = True - - @property - def cancelled(self) -> bool: - """Was cancel called?""" - return self._cancelled - class Connection: """Store a connection with some metadata. @@ -261,7 +133,11 @@ class Connection: """ def __init__( - self, conn: Union[socket.socket, _sslConn], pool: Pool, address: tuple[str, int], id: int + self, + conn: NetworkingInterface, + pool: Pool, + address: tuple[str, int], + id: int, ): self.pool_ref = weakref.ref(pool) self.conn = conn @@ -318,7 +194,7 @@ def set_conn_timeout(self, timeout: Optional[float]) -> None: if timeout == self.last_timeout: return self.last_timeout = timeout - self.conn.settimeout(timeout) + self.conn.get_conn.settimeout(timeout) def apply_timeout( self, client: MongoClient, cmd: Optional[MutableMapping[str, Any]] @@ -573,7 +449,7 @@ def send_message(self, message: bytes, max_doc_size: int) -> None: ) try: - sendall(self.conn, message) + sendall(self.conn.get_conn, message) # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: self._raise_connection_failure(error) @@ -707,7 +583,10 @@ def _close_conn(self) -> None: def conn_closed(self) -> bool: """Return True if we know socket has been closed, False otherwise.""" - return self.socket_checker.socket_closed(self.conn) + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() def send_cluster_time( self, @@ -779,143 +658,6 @@ def __repr__(self) -> str: ) -def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: - """Given (host, port) and PoolOptions, connect and return a socket object. - - Can raise socket.error. - - This is a modified version of create_connection from CPython >= 2.7. - """ - host, port = address - - # Check if dealing with a unix domain socket - if host.endswith(".sock"): - if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported on this system") - sock = socket.socket(socket.AF_UNIX) - # SOCK_CLOEXEC not supported for Unix sockets. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.connect(host) - return sock - except OSError: - sock.close() - raise - - # Don't try IPv6 if we don't support it. Also skip it if host - # is 'localhost' (::1 is fine). Avoids slow connect issues - # like PYTHON-356. - family = socket.AF_INET - if socket.has_ipv6 and host != "localhost": - family = socket.AF_UNSPEC - - err = None - for res in _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined] - af, socktype, proto, dummy, sa = res - # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited - # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 - # all file descriptors are created non-inheritable. See PEP 446. - try: - sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) - except OSError: - # Can SOCK_CLOEXEC be defined even if the kernel doesn't support - # it? - sock = socket.socket(af, socktype, proto) - # Fallback when SOCK_CLOEXEC isn't available. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - # CSOT: apply timeout to socket connect. - timeout = _csot.remaining() - if timeout is None: - timeout = options.connect_timeout - elif timeout <= 0: - raise socket.timeout("timed out") - sock.settimeout(timeout) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) - _set_keepalive_times(sock) - sock.connect(sa) - return sock - except OSError as e: - err = e - sock.close() - - if err is not None: - raise err - else: - # This likely means we tried to connect to an IPv6 only - # host with an OS/kernel or Python interpreter that doesn't - # support IPv6. The test case is Jython2.5.1 which doesn't - # support IPv6 at all. - raise OSError("getaddrinfo failed") - - -def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: - """Given (host, port) and PoolOptions, return a configured socket. - - Can raise socket.error, ConnectionFailure, or _CertificateError. - - Sets socket's SSL and timeout options. - """ - sock = _create_connection(address, options) - ssl_context = options._ssl_context - - if ssl_context is None: - sock.settimeout(options.socket_timeout) - return sock - - host = address[0] - try: - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if HAS_SNI: - if _IS_SYNC: - ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) - else: - if hasattr(ssl_context, "a_wrap_socket"): - ssl_sock = ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc] - else: - loop = asyncio.get_running_loop() - ssl_sock = loop.run_in_executor( - None, - functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc] - ) - else: - if _IS_SYNC: - ssl_sock = ssl_context.wrap_socket(sock) - else: - if hasattr(ssl_context, "a_wrap_socket"): - ssl_sock = ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc] - else: - loop = asyncio.get_running_loop() - ssl_sock = loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc] - except _CertificateError: - sock.close() - # Raise _CertificateError directly like we do after match_hostname - # below. - raise - except (OSError, SSLError) as exc: - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - details = _get_timeout_details(options) - _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) - if ( - ssl_context.verify_mode - and not ssl_context.check_hostname - and not options.tls_allow_invalid_hostnames - ): - try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined] - except _CertificateError: - ssl_sock.close() - raise - - ssl_sock.settimeout(options.socket_timeout) - return ssl_sock - - class _PoolClosedError(PyMongoError): """Internal error raised when a thread tries to get a connection from a closed pool. @@ -1260,7 +1002,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect ) try: - sock = _configured_socket(self.address, self.opts) + networking_interface = _configured_socket_interface(self.address, self.opts) # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. except BaseException as error: with self.lock: @@ -1287,7 +1029,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect raise - conn = Connection(sock, self, self.address, conn_id) # type: ignore[arg-type] + conn = Connection(networking_interface, self, self.address, conn_id) # type: ignore[arg-type] with self.lock: self.active_contexts.add(conn.cancel_context) self.active_contexts.discard(tmp_context) @@ -1698,5 +1440,6 @@ def __del__(self) -> None: # Avoid ResourceWarnings in Python 3 # Close all sockets without calling reset() or close() because it is # not safe to acquire a lock in __del__. - for conn in self.conns: - conn.close_conn(None) + if _IS_SYNC: + for conn in self.conns: + conn.close_conn(None) diff --git a/pyproject.toml b/pyproject.toml index 353f527879..611cac13aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -116,6 +116,7 @@ filterwarnings = [ "module:unclosed None: self.deprecation_filter = DeprecationFilter() async def asyncTearDown(self) -> None: + await super().asyncTearDown() self.deprecation_filter.stop() @@ -196,6 +197,7 @@ async def asyncTearDown(self): SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) ) self.knobs.disable() + await super().asyncTearDown() async def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py index 4b68595397..f653c575e9 100644 --- a/test/asynchronous/utils.py +++ b/test/asynchronous/utils.py @@ -159,6 +159,7 @@ def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False self.id = random.randint(0, 100) + self.server_connection_id = random.randint(0, 100) def close_conn(self, reason): pass diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 3c3a1a67ae..9ba15e8d78 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -22,6 +22,8 @@ import warnings from test import PyMongoTestCase +import pytest + sys.path[0:0] = [""] from test import unittest @@ -30,6 +32,8 @@ from pymongo import MongoClient from pymongo.synchronous.auth_oidc import OIDCCallback +pytestmark = pytest.mark.auth + _IS_SYNC = True _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") diff --git a/test/test_client.py b/test/test_client.py index cd4ceb3299..038ba2241b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1233,7 +1233,6 @@ def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = self.rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) - self.addCleanup(timeout.close) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) @@ -1296,7 +1295,7 @@ def test_waitQueueTimeoutMS(self): def test_socketKeepAlive(self): pool = get_pool(self.client) with pool.checkout() as conn: - keepalive = conn.conn.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) + keepalive = conn.conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) self.assertTrue(keepalive) @no_type_check diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index b00b2c1b03..866b179c9e 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -647,7 +647,6 @@ def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 internal_client = self.rs_or_single_client(timeoutMS=None) - self.addCleanup(internal_client.close) collection = internal_client.db["coll"] self.addCleanup(collection.drop) diff --git a/test/test_cursor.py b/test/test_cursor.py index a9cbe99942..7b75f4ddc4 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1801,6 +1801,7 @@ def test_monitoring(self): @client_context.require_version_min(5, 0, -1) @client_context.require_no_mongos + @client_context.require_sync def test_exhaust_cursor_db_set(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -1810,7 +1811,7 @@ def test_exhaust_cursor_db_set(self): listener.reset() - result = c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1).to_list() + result = list(c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1)) self.assertEqual(len(result), 3) diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index b099820a45..598fc3fd76 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -137,6 +137,7 @@ def setUp(self) -> None: self.deprecation_filter = DeprecationFilter() def tearDown(self) -> None: + super().tearDown() self.deprecation_filter.stop() @@ -194,6 +195,7 @@ def tearDown(self): SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) ) self.knobs.disable() + super().tearDown() def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() diff --git a/test/utils.py b/test/utils.py index 1459a8fba7..3027ed7517 100644 --- a/test/utils.py +++ b/test/utils.py @@ -157,6 +157,7 @@ def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False self.id = random.randint(0, 100) + self.server_connection_id = random.randint(0, 100) def close_conn(self, reason): pass diff --git a/tools/synchro.py b/tools/synchro.py index d8760b83bc..f451d09a26 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -47,6 +47,7 @@ "async_receive_message": "receive_message", "async_receive_data": "receive_data", "async_sendall": "sendall", + "async_socket_sendall": "sendall", "asynchronous": "synchronous", "Asynchronous": "Synchronous", "AsyncBulkTestBase": "BulkTestBase", @@ -119,6 +120,9 @@ "_async_create_lock": "_create_lock", "_async_create_condition": "_create_condition", "_async_cond_wait": "_cond_wait", + "AsyncNetworkingInterface": "NetworkingInterface", + "_configured_protocol_interface": "_configured_socket_interface", + "_async_configured_socket": "_configured_socket", "SpecRunnerTask": "SpecRunnerThread", "AsyncMockConnection": "MockConnection", "AsyncMockPool": "MockPool", @@ -127,6 +131,7 @@ "async_create_barrier": "create_barrier", "async_barrier_wait": "barrier_wait", "async_joinall": "joinall", + "_async_create_connection": "_create_connection", "pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts": "pymongo.synchronous.srv_resolver._SrvResolver.get_hosts", } From c326161379f1c7850ca7765cd413dea70cb3ed9e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 28 Mar 2025 15:56:37 -0400 Subject: [PATCH 1834/2111] =?UTF-8?q?PYTHON-4933=20-=20Allow=20drivers=20t?= =?UTF-8?q?o=20set=20bypassDocumentValidation:=20false=20on=E2=80=A6=20(#2?= =?UTF-8?q?227)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/asynchronous/bulk.py | 6 +- pymongo/asynchronous/collection.py | 25 +- pymongo/synchronous/bulk.py | 6 +- pymongo/synchronous/collection.py | 25 +- .../unified/bypassDocumentValidation.json | 493 ++++++++++++++++++ test/utils_shared.py | 4 + 6 files changed, 529 insertions(+), 30 deletions(-) create mode 100644 test/crud/unified/bypassDocumentValidation.json diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index 1ea6fd60d9..91e08f61b3 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -87,7 +87,7 @@ def __init__( self, collection: AsyncCollection[_DocumentType], ordered: bool, - bypass_document_validation: bool, + bypass_document_validation: Optional[bool], comment: Optional[str] = None, let: Optional[Any] = None, ) -> None: @@ -516,8 +516,8 @@ async def _execute_command( if self.comment: cmd["comment"] = self.comment _csot.apply_write_concern(cmd, write_concern) - if self.bypass_doc_val: - cmd["bypassDocumentValidation"] = True + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val if self.let is not None and run.op_type in (_DELETE, _UPDATE): cmd["let"] = self.let if session: diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index b87f207760..7fb20b7ab3 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -701,7 +701,7 @@ async def bulk_write( self, requests: Sequence[_WriteOp[_DocumentType]], ordered: bool = True, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, let: Optional[Mapping] = None, @@ -800,7 +800,7 @@ async def _insert_one( ordered: bool, write_concern: WriteConcern, op_id: Optional[int], - bypass_doc_val: bool, + bypass_doc_val: Optional[bool], session: Optional[AsyncClientSession], comment: Optional[Any] = None, ) -> Any: @@ -814,8 +814,8 @@ async def _insert_one( async def _insert_command( session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool ) -> None: - if bypass_doc_val: - command["bypassDocumentValidation"] = True + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val result = await conn.command( self._database.name, @@ -840,7 +840,7 @@ async def _insert_command( async def insert_one( self, document: Union[_DocumentType, RawBSONDocument], - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, ) -> InsertOneResult: @@ -906,7 +906,7 @@ async def insert_many( self, documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, ) -> InsertManyResult: @@ -986,7 +986,7 @@ async def _update( write_concern: Optional[WriteConcern] = None, op_id: Optional[int] = None, ordered: bool = True, - bypass_doc_val: Optional[bool] = False, + bypass_doc_val: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, @@ -1041,8 +1041,8 @@ async def _update( if comment is not None: command["comment"] = comment # Update command. - if bypass_doc_val: - command["bypassDocumentValidation"] = True + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. @@ -1082,7 +1082,7 @@ async def _update_retryable( write_concern: Optional[WriteConcern] = None, op_id: Optional[int] = None, ordered: bool = True, - bypass_doc_val: Optional[bool] = False, + bypass_doc_val: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, @@ -1128,7 +1128,7 @@ async def replace_one( filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional[AsyncClientSession] = None, @@ -1237,7 +1237,7 @@ async def update_one( filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, @@ -2948,6 +2948,7 @@ async def aggregate( returning aggregate results using a cursor. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. + - `bypassDocumentValidation` (bool): If ``True``, allows the write to opt-out of document level validation. :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index f54dcdd42d..3823ef354d 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -87,7 +87,7 @@ def __init__( self, collection: Collection[_DocumentType], ordered: bool, - bypass_document_validation: bool, + bypass_document_validation: Optional[bool], comment: Optional[str] = None, let: Optional[Any] = None, ) -> None: @@ -516,8 +516,8 @@ def _execute_command( if self.comment: cmd["comment"] = self.comment _csot.apply_write_concern(cmd, write_concern) - if self.bypass_doc_val: - cmd["bypassDocumentValidation"] = True + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val if self.let is not None and run.op_type in (_DELETE, _UPDATE): cmd["let"] = self.let if session: diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index e63ed70fc2..8a71768318 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -700,7 +700,7 @@ def bulk_write( self, requests: Sequence[_WriteOp[_DocumentType]], ordered: bool = True, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, let: Optional[Mapping] = None, @@ -799,7 +799,7 @@ def _insert_one( ordered: bool, write_concern: WriteConcern, op_id: Optional[int], - bypass_doc_val: bool, + bypass_doc_val: Optional[bool], session: Optional[ClientSession], comment: Optional[Any] = None, ) -> Any: @@ -813,8 +813,8 @@ def _insert_one( def _insert_command( session: Optional[ClientSession], conn: Connection, retryable_write: bool ) -> None: - if bypass_doc_val: - command["bypassDocumentValidation"] = True + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val result = conn.command( self._database.name, @@ -839,7 +839,7 @@ def _insert_command( def insert_one( self, document: Union[_DocumentType, RawBSONDocument], - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> InsertOneResult: @@ -905,7 +905,7 @@ def insert_many( self, documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> InsertManyResult: @@ -985,7 +985,7 @@ def _update( write_concern: Optional[WriteConcern] = None, op_id: Optional[int] = None, ordered: bool = True, - bypass_doc_val: Optional[bool] = False, + bypass_doc_val: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, @@ -1040,8 +1040,8 @@ def _update( if comment is not None: command["comment"] = comment # Update command. - if bypass_doc_val: - command["bypassDocumentValidation"] = True + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val # The command result has to be published for APM unmodified # so we make a shallow copy here before adding updatedExisting. @@ -1081,7 +1081,7 @@ def _update_retryable( write_concern: Optional[WriteConcern] = None, op_id: Optional[int] = None, ordered: bool = True, - bypass_doc_val: Optional[bool] = False, + bypass_doc_val: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, @@ -1127,7 +1127,7 @@ def replace_one( filter: Mapping[str, Any], replacement: Mapping[str, Any], upsert: bool = False, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional[ClientSession] = None, @@ -1236,7 +1236,7 @@ def update_one( filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], upsert: bool = False, - bypass_document_validation: bool = False, + bypass_document_validation: Optional[bool] = None, collation: Optional[_CollationIn] = None, array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, @@ -2941,6 +2941,7 @@ def aggregate( returning aggregate results using a cursor. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. + - `bypassDocumentValidation` (bool): If ``True``, allows the write to opt-out of document level validation. :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result diff --git a/test/crud/unified/bypassDocumentValidation.json b/test/crud/unified/bypassDocumentValidation.json new file mode 100644 index 0000000000..aff2d37f81 --- /dev/null +++ b/test/crud/unified/bypassDocumentValidation.json @@ -0,0 +1,493 @@ +{ + "description": "bypassDocumentValidation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + }, + "commandName": "aggregate", + "databaseName": "crud" + } + } + ] + } + ] + }, + { + "description": "BulkWrite passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndReplace passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4, + "x": 44 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "ReplaceOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 32 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/utils_shared.py b/test/utils_shared.py index 2c52445968..e0789b6632 100644 --- a/test/utils_shared.py +++ b/test/utils_shared.py @@ -615,6 +615,10 @@ def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callbac # Aggregate uses "batchSize", while find uses batch_size. elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": continue + elif arg_name == "bypassDocumentValidation" and ( + opname == "aggregate" or "find_one_and" in opname + ): + continue elif arg_name == "timeoutMode": raise unittest.SkipTest("PyMongo does not support timeoutMode") # Requires boolean returnDocument. From 58a41ae7f5811e4a76ee66aecc2fe875321e294e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 28 Mar 2025 15:31:56 -0500 Subject: [PATCH 1835/2111] PYTHON-4020 Drivers should unpin connections when ending a session (#2239) --- test/load_balancer/transactions.json | 44 ++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/test/load_balancer/transactions.json b/test/load_balancer/transactions.json index 0dd04ee854..ca9c145217 100644 --- a/test/load_balancer/transactions.json +++ b/test/load_balancer/transactions.json @@ -1616,6 +1616,50 @@ ] } ] + }, + { + "description": "pinned connection is released when session ended", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "endSession", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] } ] } From 8675a163df130007909ea36ad3828b645bb292c1 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 31 Mar 2025 09:07:53 -0400 Subject: [PATCH 1836/2111] =?UTF-8?q?PYTHON-4947=20-=20GridFS=20spec:=20Ad?= =?UTF-8?q?d=20performant=20'delete=20revisions=20by=20filena=E2=80=A6=20(?= =?UTF-8?q?#2218)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/changelog.rst | 2 + gridfs/asynchronous/grid_file.py | 29 +++ gridfs/synchronous/grid_file.py | 27 +++ test/asynchronous/test_gridfs_bucket.py | 11 ++ test/asynchronous/test_session.py | 4 +- test/asynchronous/test_transactions.py | 3 +- test/asynchronous/unified_format.py | 4 +- test/gridfs/deleteByName.json | 230 ++++++++++++++++++++++++ test/test_gridfs_bucket.py | 11 ++ test/test_session.py | 4 +- test/test_transactions.py | 3 +- test/unified_format.py | 4 +- 12 files changed, 322 insertions(+), 10 deletions(-) create mode 100644 test/gridfs/deleteByName.json diff --git a/doc/changelog.rst b/doc/changelog.rst index 0633049857..d25aff5655 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,8 @@ PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. - Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.delete_by_name` and :meth:`gridfs.grid_file.GridFSBucket.delete_by_name` + for more performant deletion of a file with multiple revisions. - AsyncMongoClient no longer performs DNS resolution for "mongodb+srv://" connection strings on creation. To avoid blocking the asyncio loop, the resolution is now deferred until the client is first connected. - Added index hinting support to the diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index 3f3179c45c..d634eb745a 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -834,6 +834,35 @@ async def delete(self, file_id: Any, session: Optional[AsyncClientSession] = Non if not res.deleted_count: raise NoFile("no file could be deleted because none matched %s" % file_id) + @_csot.apply + async def delete_by_name( + self, filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Given a filename, delete this stored file's files collection document(s) + and associated chunks from a GridFS bucket. + + For example:: + + my_db = AsyncMongoClient().test + fs = AsyncGridFSBucket(my_db) + await fs.upload_from_stream("test_file", "data I want to store!") + await fs.delete_by_name("test_file") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The name of the file to be deleted. + :param session: a :class:`~pymongo.client_session.AsyncClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + files = self._files.find({"filename": filename}, {"_id": 1}, session=session) + file_ids = [file["_id"] async for file in files] + res = await self._files.delete_many({"_id": {"$in": file_ids}}, session=session) + await self._chunks.delete_many({"files_id": {"$in": file_ids}}, session=session) + if not res.deleted_count: + raise NoFile(f"no file could be deleted because none matched filename {filename!r}") + def find(self, *args: Any, **kwargs: Any) -> AsyncGridOutCursor: """Find and return the files collection documents that match ``filter`` diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index 35386857d6..c5c3c62cde 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -830,6 +830,33 @@ def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: if not res.deleted_count: raise NoFile("no file could be deleted because none matched %s" % file_id) + @_csot.apply + def delete_by_name(self, filename: str, session: Optional[ClientSession] = None) -> None: + """Given a filename, delete this stored file's files collection document(s) + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + fs.upload_from_stream("test_file", "data I want to store!") + fs.delete_by_name("test_file") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The name of the file to be deleted. + :param session: a :class:`~pymongo.client_session.ClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + files = self._files.find({"filename": filename}, {"_id": 1}, session=session) + file_ids = [file["_id"] for file in files] + res = self._files.delete_many({"_id": {"$in": file_ids}}, session=session) + self._chunks.delete_many({"files_id": {"$in": file_ids}}, session=session) + if not res.deleted_count: + raise NoFile(f"no file could be deleted because none matched filename {filename!r}") + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: """Find and return the files collection documents that match ``filter`` diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py index 29877ee9c4..03d49d5c3d 100644 --- a/test/asynchronous/test_gridfs_bucket.py +++ b/test/asynchronous/test_gridfs_bucket.py @@ -115,6 +115,17 @@ async def test_multi_chunk_delete(self): self.assertEqual(0, await self.db.fs.files.count_documents({})) self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + async def test_delete_by_name(self): + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFSBucket(self.db) + await gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete_by_name("test_filename") + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + async def test_empty_file(self): oid = await self.fs.upload_from_stream("test_filename", b"") self.assertEqual(b"", await (await self.fs.open_download_stream(oid)).read()) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 4431cbcb16..3c249718ce 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -45,7 +45,7 @@ from bson import DBRef from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket -from pymongo import ASCENDING, AsyncMongoClient, monitoring +from pymongo import ASCENDING, AsyncMongoClient, _csot, monitoring from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.helpers import anext @@ -543,7 +543,7 @@ async def find(session=None): (bucket.rename, [1, "f2"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), - (bucket.delete, [2], {}), + (bucket.delete_by_name, ["f"], {}), ) async def test_gridfsbucket_cursor(self): diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 884110cd45..ea4d1e3e6c 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -32,7 +32,7 @@ from bson import encode from bson.raw_bson import RawBSONDocument -from pymongo import WriteConcern +from pymongo import WriteConcern, _csot from pymongo.asynchronous import client_session from pymongo.asynchronous.client_session import TransactionOptions from pymongo.asynchronous.command_cursor import AsyncCommandCursor @@ -295,6 +295,7 @@ async def gridfs_open_upload_stream(*args, **kwargs): "new-name", ), ), + (bucket.delete_by_name, ("new-name",)), ] async with client.start_session() as s, await s.start_transaction(): diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index c6884a6d16..cc516ee822 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -66,7 +66,7 @@ from bson import SON, json_util from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId -from gridfs import AsyncGridFSBucket, GridOut +from gridfs import AsyncGridFSBucket, GridOut, NoFile from pymongo import ASCENDING, AsyncMongoClient, CursorType, _csot from pymongo.asynchronous.change_stream import AsyncChangeStream from pymongo.asynchronous.client_session import AsyncClientSession, TransactionOptions, _TxnState @@ -632,7 +632,7 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(error, ConnectionFailure): self.assertNotIsInstance(error, NotPrimaryError) - elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError)): + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): pass else: self.assertNotIsInstance(error, PyMongoError) diff --git a/test/gridfs/deleteByName.json b/test/gridfs/deleteByName.json new file mode 100644 index 0000000000..884d0300ce --- /dev/null +++ b/test/gridfs/deleteByName.json @@ -0,0 +1,230 @@ +{ + "description": "gridfs-deleteByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "delete when multiple revisions of the file exist", + "operations": [ + { + "name": "deleteByName", + "object": "bucket0", + "arguments": { + "filename": "filename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when file name does not exist", + "operations": [ + { + "name": "deleteByName", + "object": "bucket0", + "arguments": { + "filename": "missing-file" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index d68c9f6ba2..04063a213d 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -115,6 +115,17 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) + def test_delete_by_name(self): + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + gfs = gridfs.GridFSBucket(self.db) + gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(5, self.db.fs.chunks.count_documents({})) + gfs.delete_by_name("test_filename") + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + def test_empty_file(self): oid = self.fs.upload_from_stream("test_filename", b"") self.assertEqual(b"", (self.fs.open_download_stream(oid)).read()) diff --git a/test/test_session.py b/test/test_session.py index 905539a1f8..ec25a735e7 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -45,7 +45,7 @@ from bson import DBRef from gridfs.synchronous.grid_file import GridFS, GridFSBucket -from pymongo import ASCENDING, MongoClient, monitoring +from pymongo import ASCENDING, MongoClient, _csot, monitoring from pymongo.common import _MAX_END_SESSIONS from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure from pymongo.operations import IndexModel, InsertOne, UpdateOne @@ -543,7 +543,7 @@ def find(session=None): (bucket.rename, [1, "f2"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), - (bucket.delete, [2], {}), + (bucket.delete_by_name, ["f"], {}), ) def test_gridfsbucket_cursor(self): diff --git a/test/test_transactions.py b/test/test_transactions.py index 80b3e3765e..c549b743be 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -32,7 +32,7 @@ from bson import encode from bson.raw_bson import RawBSONDocument -from pymongo import WriteConcern +from pymongo import WriteConcern, _csot from pymongo.errors import ( CollectionInvalid, ConfigurationError, @@ -287,6 +287,7 @@ def gridfs_open_upload_stream(*args, **kwargs): "new-name", ), ), + (bucket.delete_by_name, ("new-name",)), ] with client.start_session() as s, s.start_transaction(): diff --git a/test/unified_format.py b/test/unified_format.py index 4aec2ad729..fd7f92909e 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -65,7 +65,7 @@ from bson import SON, json_util from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId -from gridfs import GridFSBucket, GridOut +from gridfs import GridFSBucket, GridOut, NoFile from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( @@ -631,7 +631,7 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(error, ConnectionFailure): self.assertNotIsInstance(error, NotPrimaryError) - elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError)): + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): pass else: self.assertNotIsInstance(error, PyMongoError) From a3f3ec52bca56bb016b4d2616b9c1dad0df76e2a Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 31 Mar 2025 14:02:06 -0400 Subject: [PATCH 1837/2111] PYTHON-4946 - Add GridFSBucket.rename_by_name (#2219) --- doc/changelog.rst | 2 + gridfs/asynchronous/grid_file.py | 29 +++ gridfs/synchronous/grid_file.py | 29 +++ test/asynchronous/test_gridfs_bucket.py | 13 + test/asynchronous/test_session.py | 1 + test/asynchronous/test_transactions.py | 9 +- test/gridfs/renameByName.json | 313 ++++++++++++++++++++++++ test/test_gridfs_bucket.py | 9 + test/test_session.py | 1 + test/test_transactions.py | 9 +- 10 files changed, 413 insertions(+), 2 deletions(-) create mode 100644 test/gridfs/renameByName.json diff --git a/doc/changelog.rst b/doc/changelog.rst index d25aff5655..351bd38dfc 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,8 @@ PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. - Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.rename_by_name` and :meth:`gridfs.grid_file.GridFSBucket.rename_by_name` + for more performant renaming of a file with multiple revisions. - Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.delete_by_name` and :meth:`gridfs.grid_file.GridFSBucket.delete_by_name` for more performant deletion of a file with multiple revisions. - AsyncMongoClient no longer performs DNS resolution for "mongodb+srv://" connection strings on creation. diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index d634eb745a..3c7d4ef0e9 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -1050,6 +1050,35 @@ async def rename( "matched file_id %i" % (new_filename, file_id) ) + async def rename_by_name( + self, filename: str, new_filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Renames the stored file with the specified filename. + + For example:: + + my_db = AsyncMongoClient().test + fs = AsyncGridFSBucket(my_db) + await fs.upload_from_stream("test_file", "data I want to store!") + await fs.rename_by_name("test_file", "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The filename of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.AsyncClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + result = await self._files.update_many( + {"filename": filename}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + f"no files could be renamed {new_filename!r} because none matched filename {filename!r}" + ) + class AsyncGridIn: """Class to write data to GridFS.""" diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index c5c3c62cde..d0a4c7fc7f 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -1042,6 +1042,35 @@ def rename( "matched file_id %i" % (new_filename, file_id) ) + def rename_by_name( + self, filename: str, new_filename: str, session: Optional[ClientSession] = None + ) -> None: + """Renames the stored file with the specified filename. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + fs.upload_from_stream("test_file", "data I want to store!") + fs.rename_by_name("test_file", "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The filename of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.ClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + result = self._files.update_many( + {"filename": filename}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + f"no files could be renamed {new_filename!r} because none matched filename {filename!r}" + ) + class GridIn: """Class to write data to GridFS.""" diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py index 03d49d5c3d..e8d063b712 100644 --- a/test/asynchronous/test_gridfs_bucket.py +++ b/test/asynchronous/test_gridfs_bucket.py @@ -450,6 +450,19 @@ async def test_rename(self): b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() ) + async def test_rename_by_name(self): + _id = await self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("first_name")).read() + ) + + await self.fs.rename_by_name("first_name", "second_name") + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("first_name") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() + ) + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) async def test_abort(self): gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 3c249718ce..3655f49aab 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -541,6 +541,7 @@ async def find(session=None): (bucket.download_to_stream_by_name, ["f", sio], {}), (find, [], {}), (bucket.rename, [1, "f2"], {}), + (bucket.rename_by_name, ["f2", "f3"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), (bucket.delete_by_name, ["f"], {}), diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index ea4d1e3e6c..e9ce16fe61 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -295,7 +295,14 @@ async def gridfs_open_upload_stream(*args, **kwargs): "new-name", ), ), - (bucket.delete_by_name, ("new-name",)), + ( + bucket.rename_by_name, + ( + "new-name", + "new-name2", + ), + ), + (bucket.delete_by_name, ("new-name2",)), ] async with client.start_session() as s, await s.start_transaction(): diff --git a/test/gridfs/renameByName.json b/test/gridfs/renameByName.json new file mode 100644 index 0000000000..26f04fb9e0 --- /dev/null +++ b/test/gridfs/renameByName.json @@ -0,0 +1,313 @@ +{ + "description": "gridfs-renameByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "rename when multiple revisions of the file exist", + "operations": [ + { + "name": "renameByName", + "object": "bucket0", + "arguments": { + "filename": "filename", + "newFilename": "newfilename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "rename when file name does not exist", + "operations": [ + { + "name": "renameByName", + "object": "bucket0", + "arguments": { + "filename": "missing-file", + "newFilename": "newfilename" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 04063a213d..e941369f99 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -424,6 +424,15 @@ def test_rename(self): self.fs.open_download_stream_by_name("first_name") self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) + def test_rename_by_name(self): + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("first_name")).read()) + + self.fs.rename_by_name("first_name", "second_name") + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("first_name") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) def test_abort(self): gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) diff --git a/test/test_session.py b/test/test_session.py index ec25a735e7..a6266884aa 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -541,6 +541,7 @@ def find(session=None): (bucket.download_to_stream_by_name, ["f", sio], {}), (find, [], {}), (bucket.rename, [1, "f2"], {}), + (bucket.rename_by_name, ["f2", "f3"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), (bucket.delete_by_name, ["f"], {}), diff --git a/test/test_transactions.py b/test/test_transactions.py index c549b743be..a524f6fce5 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -287,7 +287,14 @@ def gridfs_open_upload_stream(*args, **kwargs): "new-name", ), ), - (bucket.delete_by_name, ("new-name",)), + ( + bucket.rename_by_name, + ( + "new-name", + "new-name2", + ), + ), + (bucket.delete_by_name, ("new-name2",)), ] with client.start_session() as s, s.start_transaction(): From d1c14150dc49a66db4a97f2c07929046279c5e29 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 31 Mar 2025 15:04:05 -0400 Subject: [PATCH 1838/2111] PYTHON-5154 - Remove PyOpenSSL support from Async PyMongo (#2246) --- .evergreen/generated_configs/variants.yml | 47 +++++++++-------- .evergreen/scripts/generate_config.py | 19 ++++++- doc/changelog.rst | 1 + pymongo/pool_shared.py | 20 +++----- pymongo/pyopenssl_context.py | 61 ++--------------------- 5 files changed, 51 insertions(+), 97 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index e7c1ed88c4..4b3b8e28b0 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -360,9 +360,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-python3.9 tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async + - name: .sharded_cluster .auth .ssl .sync + - name: .replica_set .noauth .ssl .sync + - name: .standalone .noauth .nossl .sync display_name: Encryption PyOpenSSL RHEL8 Python3.9 run_on: - rhel87-small @@ -374,9 +374,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-python3.13 tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async + - name: .sharded_cluster .auth .ssl .sync + - name: .replica_set .noauth .ssl .sync + - name: .standalone .noauth .nossl .sync display_name: Encryption PyOpenSSL RHEL8 Python3.13 run_on: - rhel87-small @@ -388,9 +388,9 @@ buildvariants: tags: [encryption_tag] - name: encryption-pyopenssl-rhel8-pypy3.10 tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async + - name: .sharded_cluster .auth .ssl .sync + - name: .replica_set .noauth .ssl .sync + - name: .standalone .noauth .nossl .sync display_name: Encryption PyOpenSSL RHEL8 PyPy3.10 run_on: - rhel87-small @@ -419,15 +419,14 @@ buildvariants: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: encryption-pyopenssl-rhel8-python3.12 + - name: encryption-rhel8-python3.12 tasks: - name: .standalone .noauth .nossl .sync_async - display_name: Encryption PyOpenSSL RHEL8 Python3.12 + display_name: Encryption RHEL8 Python3.12 run_on: - rhel87-small expansions: TEST_NAME: encryption - SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: encryption-macos-python3.9 tasks: @@ -909,8 +908,8 @@ buildvariants: # Pyopenssl tests - name: pyopenssl-macos-python3.9 tasks: - - name: .replica_set .noauth .nossl .sync_async - - name: .7.0 .noauth .nossl .sync_async + - name: .replica_set .noauth .nossl .sync + - name: .7.0 .noauth .nossl .sync display_name: PyOpenSSL macOS Python3.9 run_on: - macos-14 @@ -920,8 +919,8 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-python3.10 tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async + - name: .replica_set .auth .ssl .sync + - name: .7.0 .auth .ssl .sync display_name: PyOpenSSL RHEL8 Python3.10 run_on: - rhel87-small @@ -931,8 +930,8 @@ buildvariants: PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-python3.11 tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async + - name: .replica_set .auth .ssl .sync + - name: .7.0 .auth .ssl .sync display_name: PyOpenSSL RHEL8 Python3.11 run_on: - rhel87-small @@ -942,8 +941,8 @@ buildvariants: PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-python3.12 tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async + - name: .replica_set .auth .ssl .sync + - name: .7.0 .auth .ssl .sync display_name: PyOpenSSL RHEL8 Python3.12 run_on: - rhel87-small @@ -953,8 +952,8 @@ buildvariants: PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-python3.13 tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async + - name: .replica_set .auth .ssl .sync + - name: .7.0 .auth .ssl .sync display_name: PyOpenSSL Win64 Python3.13 run_on: - windows-64-vsMulti-small @@ -964,8 +963,8 @@ buildvariants: PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.10 tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async + - name: .replica_set .auth .ssl .sync + - name: .7.0 .auth .ssl .sync display_name: PyOpenSSL RHEL8 PyPy3.10 run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 09370bc2b1..d1880c7644 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -369,7 +369,7 @@ def get_encryption_expansions(encryption): host = DEFAULT_HOST # Test against all server versions for the three main python versions. - encryptions = ["Encryption", "Encryption crypt_shared", "Encryption PyOpenSSL"] + encryptions = ["Encryption", "Encryption crypt_shared"] for encryption, python in product(encryptions, [*MIN_MAX_PYTHON, PYPYS[-1]]): expansions = get_encryption_expansions(encryption) display_name = get_variant_name(encryption, host, python=python, **expansions) @@ -384,6 +384,21 @@ def get_encryption_expansions(encryption): ) variants.append(variant) + # Test PyOpenSSL against on all server versions for all python versions. + for encryption, python in product(["Encryption PyOpenSSL"], [*MIN_MAX_PYTHON, PYPYS[-1]]): + expansions = get_encryption_expansions(encryption) + display_name = get_variant_name(encryption, host, python=python, **expansions) + variant = create_variant( + [f"{t} .sync" for t in SUB_TASKS], + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + # Test the rest of the pythons on linux for all server versions. for encryption, python, task in zip_cycle(encryptions, CPYTHONS[1:-1] + PYPYS[:-1], SUB_TASKS): expansions = get_encryption_expansions(encryption) @@ -499,7 +514,7 @@ def create_pyopenssl_variants(): display_name = get_variant_name(base_name, host, python=python) variant = create_variant( - [f".replica_set .{auth} .{ssl} .sync_async", f".7.0 .{auth} .{ssl} .sync_async"], + [f".replica_set .{auth} .{ssl} .sync", f".7.0 .{auth} .{ssl} .sync"], display_name, python=python, host=host, diff --git a/doc/changelog.rst b/doc/changelog.rst index 351bd38dfc..4d8b26a5e2 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -23,6 +23,7 @@ PyMongo 4.12 brings a number of changes including: :class:`~pymongo.read_preferences.Secondary`, :class:`~pymongo.read_preferences.SecondaryPreferred`, :class:`~pymongo.read_preferences.Nearest`. Support for ``hedge`` will be removed in PyMongo 5.0. +- Removed PyOpenSSL support from the asynchronous API due to limitations of the CPython asyncio.Protocol SSL implementation. Issues Resolved ............... diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index 42b330b1e2..a46a4d2300 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -280,20 +280,14 @@ async def _async_configured_socket( # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. if HAS_SNI: - if hasattr(ssl_context, "a_wrap_socket"): - ssl_sock = await ssl_context.a_wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] - else: - loop = asyncio.get_running_loop() - ssl_sock = await loop.run_in_executor( - None, - functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] - ) + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor( + None, + functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] + ) else: - if hasattr(ssl_context, "a_wrap_socket"): - ssl_sock = await ssl_context.a_wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] - else: - loop = asyncio.get_running_loop() - ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] except _CertificateError: sock.close() # Raise _CertificateError directly like we do after match_hostname diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 0cc35c4f66..0d4f27cf55 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -14,10 +14,11 @@ """A CPython compatible SSLContext implementation wrapping PyOpenSSL's context. + +Due to limitations of the CPython asyncio.Protocol implementation for SSL, the async API does not support PyOpenSSL. """ from __future__ import annotations -import asyncio import socket as _socket import ssl as _stdlibssl import sys as _sys @@ -109,15 +110,12 @@ def __init__( ctx: _SSL.Context, sock: Optional[_socket.socket], suppress_ragged_eofs: bool, - is_async: bool = False, ): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs super().__init__(ctx, sock) - self._is_async = is_async def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: - is_async = kwargs.pop("allow_async", True) and self._is_async timeout = self.gettimeout() if timeout: start = _time.monotonic() @@ -126,7 +124,7 @@ def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: return call(*args, **kwargs) except BLOCKING_IO_ERRORS as exc: # Do not retry if the connection is in non-blocking mode. - if is_async or timeout == 0: + if timeout == 0: raise exc # Check for closed socket. if self.fileno() == -1: @@ -148,7 +146,6 @@ def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: continue def do_handshake(self, *args: Any, **kwargs: Any) -> None: - kwargs["allow_async"] = False return self._call(super().do_handshake, *args, **kwargs) def recv(self, *args: Any, **kwargs: Any) -> bytes: @@ -379,58 +376,6 @@ def set_default_verify_paths(self) -> None: # but not that same as CPython's. self._ctx.set_default_verify_paths() - async def a_wrap_socket( - self, - sock: _socket.socket, - server_side: bool = False, - do_handshake_on_connect: bool = True, - suppress_ragged_eofs: bool = True, - server_hostname: Optional[str] = None, - session: Optional[_SSL.Session] = None, - ) -> _sslConn: - """Wrap an existing Python socket connection and return a TLS socket - object. - """ - ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs, True) - loop = asyncio.get_running_loop() - if session: - ssl_conn.set_session(session) - if server_side is True: - ssl_conn.set_accept_state() - else: - # SNI - if server_hostname and not _is_ip_address(server_hostname): - # XXX: Do this in a callback registered with - # SSLContext.set_info_callback? See Twisted for an example. - ssl_conn.set_tlsext_host_name(server_hostname.encode("idna")) - if self.verify_mode != _stdlibssl.CERT_NONE: - # Request a stapled OCSP response. - await loop.run_in_executor(None, ssl_conn.request_ocsp) - ssl_conn.set_connect_state() - # If this wasn't true the caller of wrap_socket would call - # do_handshake() - if do_handshake_on_connect: - # XXX: If we do hostname checking in a callback we can get rid - # of this call to do_handshake() since the handshake - # will happen automatically later. - await loop.run_in_executor(None, ssl_conn.do_handshake) - # XXX: Do this in a callback registered with - # SSLContext.set_info_callback? See Twisted for an example. - if self.check_hostname and server_hostname is not None: - from service_identity import pyopenssl - - try: - if _is_ip_address(server_hostname): - pyopenssl.verify_ip_address(ssl_conn, server_hostname) - else: - pyopenssl.verify_hostname(ssl_conn, server_hostname) - except ( # type:ignore[misc] - service_identity.SICertificateError, - service_identity.SIVerificationError, - ) as exc: - raise _CertificateError(str(exc)) from None - return ssl_conn - def wrap_socket( self, sock: _socket.socket, From 4bffc4e492d6e5153366f8a4a36305b6f6d8c512 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 31 Mar 2025 16:06:31 -0400 Subject: [PATCH 1839/2111] PYTHON-4471 - Logging records should have a standard field order (#2247) --- pymongo/asynchronous/bulk.py | 12 +++++------ pymongo/asynchronous/client_bulk.py | 12 +++++------ pymongo/asynchronous/monitor.py | 6 +++--- pymongo/asynchronous/network.py | 6 +++--- pymongo/asynchronous/pool.py | 32 ++++++++++++++--------------- pymongo/asynchronous/server.py | 8 ++++---- pymongo/asynchronous/topology.py | 14 ++++++------- pymongo/synchronous/bulk.py | 12 +++++------ pymongo/synchronous/client_bulk.py | 12 +++++------ pymongo/synchronous/monitor.py | 6 +++--- pymongo/synchronous/network.py | 6 +++--- pymongo/synchronous/pool.py | 32 ++++++++++++++--------------- pymongo/synchronous/server.py | 8 ++++---- pymongo/synchronous/topology.py | 14 ++++++------- 14 files changed, 90 insertions(+), 90 deletions(-) diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index 91e08f61b3..ac514db98f 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -255,8 +255,8 @@ async def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -276,8 +276,8 @@ async def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -302,8 +302,8 @@ async def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), @@ -340,8 +340,8 @@ async def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -366,8 +366,8 @@ async def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -393,8 +393,8 @@ async def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 1100527552..5f7ac013e9 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -241,8 +241,8 @@ async def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -262,8 +262,8 @@ async def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -289,8 +289,8 @@ async def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), @@ -330,8 +330,8 @@ async def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -356,8 +356,8 @@ async def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -383,8 +383,8 @@ async def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index 479ca1a314..32b545380a 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -274,6 +274,7 @@ async def _check_server(self) -> ServerDescription: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, topologyId=self._topology._topology_id, serverHost=address[0], serverPort=address[1], @@ -281,7 +282,6 @@ async def _check_server(self) -> ServerDescription: durationMS=duration * 1000, failure=error, driverConnectionId=self._conn_id, - message=_SDAMStatusMessage.HEARTBEAT_FAIL, ) await self._reset_connection() if isinstance(error, _OperationCancelled): @@ -313,13 +313,13 @@ async def _check_once(self) -> ServerDescription: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_START, topologyId=self._topology._topology_id, driverConnectionId=conn.id, serverConnectionId=conn.server_connection_id, serverHost=address[0], serverPort=address[1], awaited=awaited, - message=_SDAMStatusMessage.HEARTBEAT_START, ) self._cancel_context = conn.cancel_context @@ -339,6 +339,7 @@ async def _check_once(self) -> ServerDescription: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, topologyId=self._topology._topology_id, driverConnectionId=conn.id, serverConnectionId=conn.server_connection_id, @@ -347,7 +348,6 @@ async def _check_once(self) -> ServerDescription: awaited=awaited, durationMS=round_trip_time * 1000, reply=response.document, - message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, ) return sd diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index 5f14bef45d..1605efe92d 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -163,8 +163,8 @@ async def command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=spec, commandName=next(iter(spec)), databaseName=dbname, @@ -225,8 +225,8 @@ async def command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(spec)), @@ -259,8 +259,8 @@ async def command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=response_doc, commandName=next(iter(spec)), diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 6ebdb5cb20..18644cf7de 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -528,8 +528,8 @@ async def authenticate(self, reauthenticate: bool = False) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_READY, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=self.id, @@ -561,8 +561,8 @@ async def close_conn(self, reason: Optional[str]) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=self.id, @@ -777,8 +777,8 @@ def __init__( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_CREATED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], **self.opts.non_default_options, @@ -803,8 +803,8 @@ async def ready(self) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_READY, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], ) @@ -868,8 +868,8 @@ async def _reset( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], ) @@ -885,8 +885,8 @@ async def _reset( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_CLEARED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], serviceId=service_id, @@ -998,8 +998,8 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CREATED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn_id, @@ -1019,8 +1019,8 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn_id, @@ -1086,8 +1086,8 @@ async def checkout( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_STARTED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], ) @@ -1101,8 +1101,8 @@ async def checkout( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn.id, @@ -1150,8 +1150,8 @@ def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="An error occurred while trying to establish a new connection", @@ -1184,8 +1184,8 @@ async def _get_conn( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="Connection pool was closed", @@ -1280,8 +1280,8 @@ async def _get_conn( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="An error occurred while trying to establish a new connection", @@ -1313,8 +1313,8 @@ async def checkin(self, conn: AsyncConnection) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKEDIN, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn.id, @@ -1334,8 +1334,8 @@ async def checkin(self, conn: AsyncConnection) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn.id, @@ -1412,8 +1412,8 @@ def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="Wait queue timeout elapsed without a connection becoming available", diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index 3ad8374b00..0e0d53b96f 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -108,10 +108,10 @@ async def close(self) -> None: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.STOP_SERVER, topologyId=self._topology_id, serverHost=self._description.address[0], serverPort=self._description.address[1], - message=_SDAMStatusMessage.STOP_SERVER, ) await self._monitor.close() @@ -173,8 +173,8 @@ async def run_operation( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=dbn, @@ -234,8 +234,8 @@ async def run_operation( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), @@ -278,8 +278,8 @@ async def run_operation( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=res, commandName=next(iter(cmd)), diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index d83ceca55b..9de069af7e 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -120,8 +120,8 @@ def __init__(self, topology_settings: TopologySettings): if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, - topologyId=self._topology_id, message=_SDAMStatusMessage.START_TOPOLOGY, + topologyId=self._topology_id, ) if self._publish_tp: @@ -152,10 +152,10 @@ def __init__(self, topology_settings: TopologySettings): if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=initial_td, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) for seed in topology_settings.seeds: @@ -165,10 +165,10 @@ def __init__(self, topology_settings: TopologySettings): if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.START_SERVER, topologyId=self._topology_id, serverHost=seed[0], serverPort=seed[1], - message=_SDAMStatusMessage.START_SERVER, ) # Store the seed list to help diagnose errors in _error_message(). @@ -514,10 +514,10 @@ async def _process_change( if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=td_old, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) # Shutdown SRV polling for unsupported cluster types. @@ -582,10 +582,10 @@ async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=td_old, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) async def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: @@ -748,13 +748,13 @@ async def close(self) -> None: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=old_td, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) _debug_log( - _SDAM_LOGGER, topologyId=self._topology_id, message=_SDAMStatusMessage.STOP_TOPOLOGY + _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id ) if self._publish_server or self._publish_tp: diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index 3823ef354d..a528b09add 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -255,8 +255,8 @@ def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -276,8 +276,8 @@ def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -302,8 +302,8 @@ def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), @@ -340,8 +340,8 @@ def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -366,8 +366,8 @@ def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -393,8 +393,8 @@ def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index e6de22d237..d73bfb2a2b 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -241,8 +241,8 @@ def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -262,8 +262,8 @@ def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -289,8 +289,8 @@ def write_command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), @@ -330,8 +330,8 @@ def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=bwc.db_name, @@ -356,8 +356,8 @@ def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=reply, commandName=next(iter(cmd)), @@ -383,8 +383,8 @@ def unack_write( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index 1413bb1437..f41040801f 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -272,6 +272,7 @@ def _check_server(self) -> ServerDescription: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, topologyId=self._topology._topology_id, serverHost=address[0], serverPort=address[1], @@ -279,7 +280,6 @@ def _check_server(self) -> ServerDescription: durationMS=duration * 1000, failure=error, driverConnectionId=self._conn_id, - message=_SDAMStatusMessage.HEARTBEAT_FAIL, ) self._reset_connection() if isinstance(error, _OperationCancelled): @@ -311,13 +311,13 @@ def _check_once(self) -> ServerDescription: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_START, topologyId=self._topology._topology_id, driverConnectionId=conn.id, serverConnectionId=conn.server_connection_id, serverHost=address[0], serverPort=address[1], awaited=awaited, - message=_SDAMStatusMessage.HEARTBEAT_START, ) self._cancel_context = conn.cancel_context @@ -337,6 +337,7 @@ def _check_once(self) -> ServerDescription: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, topologyId=self._topology._topology_id, driverConnectionId=conn.id, serverConnectionId=conn.server_connection_id, @@ -345,7 +346,6 @@ def _check_once(self) -> ServerDescription: awaited=awaited, durationMS=round_trip_time * 1000, reply=response.document, - message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, ) return sd diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py index 786edb7003..9559a5a542 100644 --- a/pymongo/synchronous/network.py +++ b/pymongo/synchronous/network.py @@ -163,8 +163,8 @@ def command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=spec, commandName=next(iter(spec)), databaseName=dbname, @@ -225,8 +225,8 @@ def command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(spec)), @@ -259,8 +259,8 @@ def command( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=response_doc, commandName=next(iter(spec)), diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 6a302e2728..1151776b94 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -528,8 +528,8 @@ def authenticate(self, reauthenticate: bool = False) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_READY, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=self.id, @@ -559,8 +559,8 @@ def close_conn(self, reason: Optional[str]) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=self.id, @@ -775,8 +775,8 @@ def __init__( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_CREATED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], **self.opts.non_default_options, @@ -801,8 +801,8 @@ def ready(self) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_READY, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], ) @@ -866,8 +866,8 @@ def _reset( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], ) @@ -883,8 +883,8 @@ def _reset( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.POOL_CLEARED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], serviceId=service_id, @@ -994,8 +994,8 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CREATED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn_id, @@ -1015,8 +1015,8 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn_id, @@ -1082,8 +1082,8 @@ def checkout( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_STARTED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], ) @@ -1097,8 +1097,8 @@ def checkout( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn.id, @@ -1146,8 +1146,8 @@ def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="An error occurred while trying to establish a new connection", @@ -1180,8 +1180,8 @@ def _get_conn( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="Connection pool was closed", @@ -1276,8 +1276,8 @@ def _get_conn( if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="An error occurred while trying to establish a new connection", @@ -1309,8 +1309,8 @@ def checkin(self, conn: Connection) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKEDIN, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn.id, @@ -1330,8 +1330,8 @@ def checkin(self, conn: Connection) -> None: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], driverConnectionId=conn.id, @@ -1408,8 +1408,8 @@ def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _CONNECTION_LOGGER, - clientId=self._client_id, message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, serverHost=self.address[0], serverPort=self.address[1], reason="Wait queue timeout elapsed without a connection becoming available", diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py index 5b8a8e3919..c3643ba815 100644 --- a/pymongo/synchronous/server.py +++ b/pymongo/synchronous/server.py @@ -108,10 +108,10 @@ def close(self) -> None: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.STOP_SERVER, topologyId=self._topology_id, serverHost=self._description.address[0], serverPort=self._description.address[1], - message=_SDAMStatusMessage.STOP_SERVER, ) self._monitor.close() @@ -173,8 +173,8 @@ def run_operation( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, command=cmd, commandName=next(iter(cmd)), databaseName=dbn, @@ -234,8 +234,8 @@ def run_operation( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, durationMS=duration, failure=failure, commandName=next(iter(cmd)), @@ -278,8 +278,8 @@ def run_operation( if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _COMMAND_LOGGER, - clientId=client._topology_settings._topology_id, message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, durationMS=duration, reply=res, commandName=next(iter(cmd)), diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index bf9011830d..bccc8a2eb7 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -120,8 +120,8 @@ def __init__(self, topology_settings: TopologySettings): if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, - topologyId=self._topology_id, message=_SDAMStatusMessage.START_TOPOLOGY, + topologyId=self._topology_id, ) if self._publish_tp: @@ -152,10 +152,10 @@ def __init__(self, topology_settings: TopologySettings): if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=initial_td, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) for seed in topology_settings.seeds: @@ -165,10 +165,10 @@ def __init__(self, topology_settings: TopologySettings): if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.START_SERVER, topologyId=self._topology_id, serverHost=seed[0], serverPort=seed[1], - message=_SDAMStatusMessage.START_SERVER, ) # Store the seed list to help diagnose errors in _error_message(). @@ -514,10 +514,10 @@ def _process_change( if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=td_old, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) # Shutdown SRV polling for unsupported cluster types. @@ -582,10 +582,10 @@ def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=td_old, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: @@ -746,13 +746,13 @@ def close(self) -> None: if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log( _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, previousDescription=old_td, newDescription=self._description, - message=_SDAMStatusMessage.TOPOLOGY_CHANGE, ) _debug_log( - _SDAM_LOGGER, topologyId=self._topology_id, message=_SDAMStatusMessage.STOP_TOPOLOGY + _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id ) if self._publish_server or self._publish_tp: From 711a45a0e9c040357698499fcbcf2c9850bf89cd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 31 Mar 2025 19:23:32 -0500 Subject: [PATCH 1840/2111] PYTHON-4938 Clarify write concern rules in the transactions spec (#2231) --- test/asynchronous/test_transactions.py | 24 ++++++++++++++++++++++++ test/test_transactions.py | 24 ++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index e9ce16fe61..f151755217 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -586,5 +586,29 @@ async def callback(session): self.assertFalse(s.in_transaction) +class TestOptionsInsideTransactionProse(AsyncTransactionsBase): + @async_client_context.require_transactions + @async_client_context.require_no_standalone + async def test_case_1(self): + # Write concern not inherited from collection object inside transaction + # Create a MongoClient running against a configured sharded/replica set/load balanced cluster. + client = async_client_context.client + coll = client[self.db.name].test + await coll.delete_many({}) + # Start a new session on the client. + async with client.start_session() as s: + # Start a transaction on the session. + await s.start_transaction() + # Instantiate a collection object in the driver with a default write concern of { w: 0 }. + inner_coll = coll.with_options(write_concern=WriteConcern(w=0)) + # Insert the document { n: 1 } on the instantiated collection. + result = await inner_coll.insert_one({"n": 1}, session=s) + # Commit the transaction. + await s.commit_transaction() + # End the session. + # Ensure the document was inserted and no error was thrown from the transaction. + assert result.inserted_id is not None + + if __name__ == "__main__": unittest.main() diff --git a/test/test_transactions.py b/test/test_transactions.py index a524f6fce5..63ea5c74fe 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -574,5 +574,29 @@ def callback(session): self.assertFalse(s.in_transaction) +class TestOptionsInsideTransactionProse(TransactionsBase): + @client_context.require_transactions + @client_context.require_no_standalone + def test_case_1(self): + # Write concern not inherited from collection object inside transaction + # Create a MongoClient running against a configured sharded/replica set/load balanced cluster. + client = client_context.client + coll = client[self.db.name].test + coll.delete_many({}) + # Start a new session on the client. + with client.start_session() as s: + # Start a transaction on the session. + s.start_transaction() + # Instantiate a collection object in the driver with a default write concern of { w: 0 }. + inner_coll = coll.with_options(write_concern=WriteConcern(w=0)) + # Insert the document { n: 1 } on the instantiated collection. + result = inner_coll.insert_one({"n": 1}, session=s) + # Commit the transaction. + s.commit_transaction() + # End the session. + # Ensure the document was inserted and no error was thrown from the transaction. + assert result.inserted_id is not None + + if __name__ == "__main__": unittest.main() From 9ff5a1755cb626c62fd4f61ea4950c4655a4f301 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 31 Mar 2025 19:26:18 -0500 Subject: [PATCH 1841/2111] PYTHON-3674 Simplify transaction options in convenient API doc example code (#2230) --- test/asynchronous/test_examples.py | 7 +------ test/test_examples.py | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py index 1312f1e215..9e9b208f51 100644 --- a/test/asynchronous/test_examples.py +++ b/test/asynchronous/test_examples.py @@ -1163,12 +1163,7 @@ async def callback(session): # Step 2: Start a client session. async with client.start_session() as session: # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). - await session.with_transaction( - callback, - read_concern=ReadConcern("local"), - write_concern=wc_majority, - read_preference=ReadPreference.PRIMARY, - ) + await session.with_transaction(callback) # End Transactions withTxn API Example 1 diff --git a/test/test_examples.py b/test/test_examples.py index ef06a77b9a..28fe1beaff 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1161,12 +1161,7 @@ def callback(session): # Step 2: Start a client session. with client.start_session() as session: # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). - session.with_transaction( - callback, - read_concern=ReadConcern("local"), - write_concern=wc_majority, - read_preference=ReadPreference.PRIMARY, - ) + session.with_transaction(callback) # End Transactions withTxn API Example 1 From 02fc85f63505060ba5d786b5b5c274c88b7f7a80 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 31 Mar 2025 19:27:56 -0500 Subject: [PATCH 1842/2111] PYTHON-5239 Audit bash scripts for consistency (#2238) --- .evergreen/combine-coverage.sh | 3 +- .evergreen/resync-specs.sh | 4 +- .evergreen/run-import-time-test.sh | 33 ------------- .evergreen/run-mongodb-aws-ecs-test.sh | 5 +- .evergreen/run-mongodb-oidc-test.sh | 3 +- .evergreen/run-tests.sh | 1 + .evergreen/scripts/check-import-time.sh | 46 +++++++++++++++++-- .evergreen/scripts/cleanup.sh | 4 +- .evergreen/scripts/configure-env.sh | 2 +- .../scripts/download-and-merge-coverage.sh | 2 +- .evergreen/scripts/install-dependencies.sh | 2 +- .evergreen/scripts/run-doctests.sh | 4 -- .evergreen/scripts/run-getdata.sh | 8 +++- .evergreen/scripts/setup-dev-env.sh | 2 +- .evergreen/scripts/setup-system.sh | 2 +- .evergreen/scripts/setup-tests.sh | 1 + .evergreen/scripts/stop-server.sh | 2 +- .evergreen/scripts/teardown-tests.sh | 1 + .evergreen/scripts/upload-coverage-report.sh | 3 +- .evergreen/setup-spawn-host.sh | 2 +- .evergreen/sync-spawn-host.sh | 2 + .evergreen/utils.sh | 2 +- tools/synchro.sh | 2 +- 23 files changed, 72 insertions(+), 64 deletions(-) delete mode 100755 .evergreen/run-import-time-test.sh delete mode 100755 .evergreen/scripts/run-doctests.sh diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh index c31f755bd9..36266c1842 100755 --- a/.evergreen/combine-coverage.sh +++ b/.evergreen/combine-coverage.sh @@ -3,8 +3,7 @@ # Coverage combine merges (and removes) all the coverage files and # generates a new .coverage file in the current directory. -set -o xtrace # Write all commands first to stderr -set -o errexit # Exit the script with error if any of the commands fail +set -eu . .evergreen/utils.sh diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index dca116c2d3..1f70940aa0 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -1,6 +1,6 @@ #!/bin/bash -# exit when any command fails -set -e +# Resync test files from the specifications repo. +set -eu PYMONGO=$(dirname "$(cd "$(dirname "$0")"; pwd)") SPECS=${MDB_SPECS:-~/Work/specifications} diff --git a/.evergreen/run-import-time-test.sh b/.evergreen/run-import-time-test.sh deleted file mode 100755 index 95e3c93d25..0000000000 --- a/.evergreen/run-import-time-test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -ex - -set -o errexit # Exit the script with error if any of the commands fail -set -x - -. .evergreen/utils.sh - -if [ -z "${PYTHON_BINARY:-}" ]; then - PYTHON_BINARY=$(find_python3) -fi - -# Use the previous commit if this was not a PR run. -if [ "$BASE_SHA" == "$HEAD_SHA" ]; then - BASE_SHA=$(git rev-parse HEAD~1) -fi - -function get_import_time() { - local log_file - createvirtualenv "$PYTHON_BINARY" import-venv - python -m pip install -q ".[aws,encryption,gssapi,ocsp,snappy,zstd]" - # Import once to cache modules - python -c "import pymongo" - log_file="pymongo-$1.log" - python -X importtime -c "import pymongo" 2> $log_file -} - -get_import_time $HEAD_SHA -git stash || true -git checkout $BASE_SHA -get_import_time $BASE_SHA -git checkout $HEAD_SHA -git stash apply || true -python tools/compare_import_time.py $HEAD_SHA $BASE_SHA diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 09fa571959..c55c423e49 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -1,7 +1,6 @@ #!/bin/bash - -# Don't trace since the URI contains a password that shouldn't show up in the logs -set -o errexit # Exit the script with error if any of the commands fail +# Script run on an ECS host to test MONGODB-AWS. +set -eu ############################################ # Main Program # diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index bd67106a36..a60b112bcb 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -1,6 +1,5 @@ #!/bin/bash - -set +x # Disable debug trace +# Script run on a remote host to test MONGODB-OIDC. set -eu echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}..." diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 04dd16d34f..2b7d856d41 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,4 +1,5 @@ #!/bin/bash +# Run a test suite that was configured with setup-tests.sh. set -eu SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) diff --git a/.evergreen/scripts/check-import-time.sh b/.evergreen/scripts/check-import-time.sh index cdd2025d59..f7a1117b97 100755 --- a/.evergreen/scripts/check-import-time.sh +++ b/.evergreen/scripts/check-import-time.sh @@ -1,7 +1,43 @@ #!/bin/bash +# Check for regressions in the import time of pymongo. +set -eu -. .evergreen/scripts/env.sh -set -x -export BASE_SHA="$1" -export HEAD_SHA="$2" -bash .evergreen/run-import-time-test.sh +HERE=$(dirname ${BASH_SOURCE:-$0}) + +source $HERE/env.sh + +pushd $HERE/../.. >/dev/null + +BASE_SHA="$1" +HEAD_SHA="$2" + +. .evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) +fi + +# Use the previous commit if this was not a PR run. +if [ "$BASE_SHA" == "$HEAD_SHA" ]; then + BASE_SHA=$(git rev-parse HEAD~1) +fi + +function get_import_time() { + local log_file + createvirtualenv "$PYTHON_BINARY" import-venv + python -m pip install -q ".[aws,encryption,gssapi,ocsp,snappy,zstd]" + # Import once to cache modules + python -c "import pymongo" + log_file="pymongo-$1.log" + python -X importtime -c "import pymongo" 2> $log_file +} + +get_import_time $HEAD_SHA +git stash || true +git checkout $BASE_SHA +get_import_time $BASE_SHA +git checkout $HEAD_SHA +git stash apply || true +python tools/compare_import_time.py $HEAD_SHA $BASE_SHA + +popd >/dev/null diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh index 6bb4b3ce5f..f04a936fd2 100755 --- a/.evergreen/scripts/cleanup.sh +++ b/.evergreen/scripts/cleanup.sh @@ -1,4 +1,6 @@ #!/bin/bash +# Clean up resources at the end of an evergreen run. +set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) @@ -9,4 +11,4 @@ if [ -f $HERE/env.sh ]; then fi rm -rf "${DRIVERS_TOOLS}" || true -rm -f ./secrets-export.sh || true +rm -f $HERE/../../secrets-export.sh || true diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 9ec98bb5be..81713f4191 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Configure an evergreen test environment. set -eu # Get the current unique version of this checkout diff --git a/.evergreen/scripts/download-and-merge-coverage.sh b/.evergreen/scripts/download-and-merge-coverage.sh index 808bb957ef..c006813ba9 100755 --- a/.evergreen/scripts/download-and-merge-coverage.sh +++ b/.evergreen/scripts/download-and-merge-coverage.sh @@ -1,4 +1,4 @@ #!/bin/bash - # Download all the task coverage files. +set -eu aws s3 cp --recursive s3://"$1"/coverage/"$2"/"$3"/coverage/ coverage/ diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 5ec06a87df..1347374bf5 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Install the dependencies needed for an evergreen run. set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) diff --git a/.evergreen/scripts/run-doctests.sh b/.evergreen/scripts/run-doctests.sh deleted file mode 100755 index 5950e2c107..0000000000 --- a/.evergreen/scripts/run-doctests.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -set -o xtrace -PYTHON_BINARY=${PYTHON_BINARY} bash "${PROJECT_DIRECTORY}"/.evergreen/just.sh docs-test diff --git a/.evergreen/scripts/run-getdata.sh b/.evergreen/scripts/run-getdata.sh index b2d6ecb476..9435a5fcc3 100755 --- a/.evergreen/scripts/run-getdata.sh +++ b/.evergreen/scripts/run-getdata.sh @@ -1,11 +1,14 @@ #!/bin/bash +# Get the debug data for an evergreen task. +set -eu -set -o xtrace -. ${DRIVERS_TOOLS}/.evergreen/download-mongodb.sh || true +. ${DRIVERS_TOOLS}/.evergreen/get-distro.sh || true get_distro || true echo $DISTRO echo $MARCH echo $OS + +set -x uname -a || true ls /etc/*release* || true cc --version || true @@ -20,3 +23,4 @@ ls -la /usr/local/Cellar/ || true scan-build --version || true genhtml --version || true valgrind --version || true +set +x diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index d9b88e3385..1db54156dd 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Set up a development environment on an evergreen host. set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh index 0ab08ff01c..d8552e0ad2 100755 --- a/.evergreen/scripts/setup-system.sh +++ b/.evergreen/scripts/setup-system.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Set up the system on an evergreen host. set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh index 8e073dcec9..0b75051a68 100755 --- a/.evergreen/scripts/setup-tests.sh +++ b/.evergreen/scripts/setup-tests.sh @@ -1,4 +1,5 @@ #!/bin/bash +# Set up the test environment, including secrets and services. set -eu # Supported/used environment variables: diff --git a/.evergreen/scripts/stop-server.sh b/.evergreen/scripts/stop-server.sh index 7db20d4bf3..7599387f5f 100755 --- a/.evergreen/scripts/stop-server.sh +++ b/.evergreen/scripts/stop-server.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Stop a server that was started using run-orchestration.sh in DRIVERS_TOOLS. set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh index f9e76a20cf..898425b6cf 100755 --- a/.evergreen/scripts/teardown-tests.sh +++ b/.evergreen/scripts/teardown-tests.sh @@ -1,4 +1,5 @@ #!/bin/bash +# Tear down any services that were used by tests. set -eu SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) diff --git a/.evergreen/scripts/upload-coverage-report.sh b/.evergreen/scripts/upload-coverage-report.sh index 71a2a80bb8..895664cbf2 100755 --- a/.evergreen/scripts/upload-coverage-report.sh +++ b/.evergreen/scripts/upload-coverage-report.sh @@ -1,3 +1,4 @@ #!/bin/bash - +# Upload a coverate report to s3. +set -eu aws s3 cp htmlcov/ s3://"$1"/coverage/"$2"/"$3"/htmlcov/ --recursive --acl public-read --region us-east-1 diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh index 4c8fa65e2e..bada61e568 100755 --- a/.evergreen/setup-spawn-host.sh +++ b/.evergreen/setup-spawn-host.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Set up a remote evergreen spawn host. set -eu if [ -z "$1" ] diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh index 3f6540ad4d..61dd84ec22 100755 --- a/.evergreen/sync-spawn-host.sh +++ b/.evergreen/sync-spawn-host.sh @@ -1,4 +1,6 @@ #!/bin/bash +# Synchronize local files to a remote Evergreen spawn host. +set -eu if [ -z "$1" ] then diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 8dc7dd72f0..17f5d8a1d1 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Utility functions used by pymongo evergreen scripts. set -eu find_python3() { diff --git a/tools/synchro.sh b/tools/synchro.sh index 51c51a9548..28b9c6d6c4 100755 --- a/tools/synchro.sh +++ b/tools/synchro.sh @@ -1,5 +1,5 @@ #!/bin/bash - +# Keep the synchronous folders in sync with there async counterparts. set -eu python ./tools/synchro.py "$@" From 894782e1b3f488f20c1f7ae706e0f8e82698b23b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Apr 2025 12:19:06 -0500 Subject: [PATCH 1843/2111] PYTHON-5255 Fix OIDC allowed_hosts test (#2251) --- .evergreen/utils.sh | 6 +++--- test/auth_oidc/test_auth_oidc.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 17f5d8a1d1..faecde05fd 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -6,12 +6,12 @@ find_python3() { PYTHON="" # Find a suitable toolchain version, if available. if [ "$(uname -s)" = "Darwin" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/Current/bin/python3" + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.9/bin/python3" elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - PYTHON="C:/python/Current/python.exe" + PYTHON="C:/python/Python39/python.exe" else # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.9+. - if [ -f "/opt/python/Current/bin/python3" ]; then + if [ -f "/opt/python/3.9/bin/python3" ]; then PYTHON="/opt/python/Current/bin/python3" elif is_python_39 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v5/bin/python3" diff --git a/test/auth_oidc/test_auth_oidc.py b/test/auth_oidc/test_auth_oidc.py index 0c8431a1e8..7dbf817cce 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/auth_oidc/test_auth_oidc.py @@ -242,9 +242,9 @@ def test_1_6_allowed_hosts_blocked(self): authmechanismproperties=props, connect=False, ) - # Assert that a find operation fails with a client-side error. - with self.assertRaises(ConfigurationError): - client.test.test.find_one() + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + client.test.test.find_one() # Close the client. client.close() From 7424f6c09812d6bca1320767f6e5473d094fb1a9 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 1 Apr 2025 11:41:33 -0700 Subject: [PATCH 1844/2111] PYTHON-5242 MongoClient does not define all attributes in __init__ (#2249) --- pymongo/asynchronous/mongo_client.py | 4 +++- pymongo/synchronous/mongo_client.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 16753420c0..98b8204d93 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -91,7 +91,7 @@ ) from pymongo.logger import _CLIENT_LOGGER, _log_client_error, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query -from pymongo.monitoring import ConnectionClosedReason +from pymongo.monitoring import ConnectionClosedReason, _EventListeners from pymongo.operations import ( DeleteMany, DeleteOne, @@ -759,6 +759,8 @@ def __init__( self._port = port self._topology: Topology = None # type: ignore[assignment] self._timeout: float | None = None + self._topology_settings: TopologySettings = None # type: ignore[assignment] + self._event_listeners: _EventListeners | None = None # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 2d8d6d730b..170798e9f9 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -83,7 +83,7 @@ ) from pymongo.logger import _CLIENT_LOGGER, _log_client_error, _log_or_warn from pymongo.message import _CursorAddress, _GetMore, _Query -from pymongo.monitoring import ConnectionClosedReason +from pymongo.monitoring import ConnectionClosedReason, _EventListeners from pymongo.operations import ( DeleteMany, DeleteOne, @@ -757,6 +757,8 @@ def __init__( self._port = port self._topology: Topology = None # type: ignore[assignment] self._timeout: float | None = None + self._topology_settings: TopologySettings = None # type: ignore[assignment] + self._event_listeners: _EventListeners | None = None # _pool_class, _monitor_class, and _condition_class are for deep # customization of PyMongo, e.g. Motor. From e724d66bb524052684c107db736d2b74e4098d66 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 1 Apr 2025 15:01:38 -0400 Subject: [PATCH 1845/2111] PYTHON-5250 - Change streams expanded events present by default in 8.2+ (#2252) --- test/asynchronous/test_change_stream.py | 9 ++- .../change-streams-disambiguatedPaths.json | 64 ------------------- .../unified/change-streams.json | 12 +++- test/test_change_stream.py | 9 ++- 4 files changed, 27 insertions(+), 67 deletions(-) diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 4025c13730..0260cb7a82 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -410,7 +410,14 @@ async def test_change_operations(self): expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} if async_client_context.version.at_least(4, 5, 0): expected_update_description["truncatedArrays"] = [] - self.assertEqual(expected_update_description, change["updateDescription"]) + self.assertEqual( + expected_update_description, + { + k: v + for k, v in change["updateDescription"].items() + if k in expected_update_description + }, + ) # Replace. await self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) change = await change_stream.next() diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json index e6cc5ef66e..a8667b5436 100644 --- a/test/change_streams/unified/change-streams-disambiguatedPaths.json +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -42,70 +42,6 @@ } ], "tests": [ - { - "description": "disambiguatedPaths is not present when showExpandedEvents is false/unset", - "operations": [ - { - "name": "insertOne", - "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "a": { - "1": 1 - } - } - } - }, - { - "name": "createChangeStream", - "object": "collection0", - "arguments": { - "pipeline": [] - }, - "saveResultAsEntity": "changeStream0" - }, - { - "name": "updateOne", - "object": "collection0", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "a.1": 2 - } - } - } - }, - { - "name": "iterateUntilDocumentOrError", - "object": "changeStream0", - "expectResult": { - "operationType": "update", - "ns": { - "db": "database0", - "coll": "collection0" - }, - "updateDescription": { - "updatedFields": { - "$$exists": true - }, - "removedFields": { - "$$exists": true - }, - "truncatedArrays": { - "$$exists": true - }, - "disambiguatedPaths": { - "$$exists": false - } - } - } - } - ] - }, { "description": "disambiguatedPaths is present on updateDescription when an ambiguous path is present", "operations": [ diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index c8b60ed4e2..a155d85b6e 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -181,7 +181,12 @@ "field": "array", "newSize": 2 } - ] + ], + "disambiguatedPaths": { + "$$unsetOrMatches": { + "$$exists": true + } + } } } } @@ -1408,6 +1413,11 @@ "$$unsetOrMatches": { "$$exists": true } + }, + "disambiguatedPaths": { + "$$unsetOrMatches": { + "$$exists": true + } } } } diff --git a/test/test_change_stream.py b/test/test_change_stream.py index e50f4667f6..6099829031 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -406,7 +406,14 @@ def test_change_operations(self): expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} if client_context.version.at_least(4, 5, 0): expected_update_description["truncatedArrays"] = [] - self.assertEqual(expected_update_description, change["updateDescription"]) + self.assertEqual( + expected_update_description, + { + k: v + for k, v in change["updateDescription"].items() + if k in expected_update_description + }, + ) # Replace. self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) change = change_stream.next() From 04ebbcde875e5fab4b39a31e1d7a0a6daf6c3027 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 2 Apr 2025 08:22:25 -0400 Subject: [PATCH 1846/2111] PYTHON-5259 - Better test assertions for error substrings (#2253) --- test/asynchronous/test_encryption.py | 4 ++-- test/asynchronous/test_pooling.py | 14 +++++++------- test/test_bson.py | 2 +- test/test_encryption.py | 4 ++-- test/test_pooling.py | 14 +++++++------- test/test_server.py | 2 +- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 3b9096ef6a..f9b03f6303 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -2692,7 +2692,7 @@ async def test_8_csfle_joins_qe(self): ] ) ) - self.assertTrue("not supported" in str(exc)) + self.assertIn("not supported", str(exc)) @async_client_context.require_version_max(8, 1, -1) async def test_9_error(self): @@ -2721,7 +2721,7 @@ async def test_9_error(self): ] ) ) - self.assertTrue("Upgrade" in str(exc)) + self.assertIn("Upgrade", str(exc)) # https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 81ed16f2d5..64c5738dba 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -450,7 +450,7 @@ async def test_csot_timeout_message(self): with timeout(0.5): await client.db.t.find_one({"$where": delay(2)}) - self.assertTrue("(configured timeouts: timeoutMS: 500.0ms" in str(error.exception)) + self.assertIn("(configured timeouts: timeoutMS: 500.0ms", str(error.exception)) @async_client_context.require_failCommand_appName async def test_socket_timeout_message(self): @@ -475,9 +475,9 @@ async def test_socket_timeout_message(self): with self.assertRaises(Exception) as error: await client.db.t.find_one({"$where": delay(2)}) - self.assertTrue( - "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)" - in str(error.exception) + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)", + str(error.exception), ) @async_client_context.require_failCommand_appName @@ -507,9 +507,9 @@ async def test_connection_timeout_message(self): with self.assertRaises(Exception) as error: await client.admin.command("ping") - self.assertTrue( - "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)" - in str(error.exception) + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)", + str(error.exception), ) diff --git a/test/test_bson.py b/test/test_bson.py index 6f26856b00..1616c513c2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -558,7 +558,7 @@ def test_unknown_type(self): decode(bs) except Exception as exc: self.assertTrue(isinstance(exc, InvalidBSON)) - self.assertTrue(part in str(exc)) + self.assertIn(part, str(exc)) else: self.fail("Failed to raise an exception.") diff --git a/test/test_encryption.py b/test/test_encryption.py index 6d669a538d..5bbf8c8ad8 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2676,7 +2676,7 @@ def test_8_csfle_joins_qe(self): ] ) ) - self.assertTrue("not supported" in str(exc)) + self.assertIn("not supported", str(exc)) @client_context.require_version_max(8, 1, -1) def test_9_error(self): @@ -2705,7 +2705,7 @@ def test_9_error(self): ] ) ) - self.assertTrue("Upgrade" in str(exc)) + self.assertIn("Upgrade", str(exc)) # https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap diff --git a/test/test_pooling.py b/test/test_pooling.py index 44e8c4afe5..05513afe12 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -450,7 +450,7 @@ def test_csot_timeout_message(self): with timeout(0.5): client.db.t.find_one({"$where": delay(2)}) - self.assertTrue("(configured timeouts: timeoutMS: 500.0ms" in str(error.exception)) + self.assertIn("(configured timeouts: timeoutMS: 500.0ms", str(error.exception)) @client_context.require_failCommand_appName def test_socket_timeout_message(self): @@ -473,9 +473,9 @@ def test_socket_timeout_message(self): with self.assertRaises(Exception) as error: client.db.t.find_one({"$where": delay(2)}) - self.assertTrue( - "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)" - in str(error.exception) + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)", + str(error.exception), ) @client_context.require_failCommand_appName @@ -505,9 +505,9 @@ def test_connection_timeout_message(self): with self.assertRaises(Exception) as error: client.admin.command("ping") - self.assertTrue( - "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)" - in str(error.exception) + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)", + str(error.exception), ) diff --git a/test/test_server.py b/test/test_server.py index 45d01c10de..ab5a40a79b 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -31,7 +31,7 @@ def test_repr(self): hello = Hello({"ok": 1}) sd = ServerDescription(("localhost", 27017), hello) server = Server(sd, pool=object(), monitor=object()) # type: ignore[arg-type] - self.assertTrue("Standalone" in str(server)) + self.assertIn("Standalone", str(server)) if __name__ == "__main__": From 3210b175dd672ad20623d7cc2994594cd7d94991 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 2 Apr 2025 08:29:23 -0400 Subject: [PATCH 1847/2111] PYTHON-4557 - Add log message for retried commands (#2248) --- pymongo/asynchronous/mongo_client.py | 27 ++++++++++++++++++++++++++- pymongo/synchronous/mongo_client.py | 27 ++++++++++++++++++++++++++- test/asynchronous/test_logger.py | 18 +++++++++++++++++- test/test_logger.py | 18 +++++++++++++++++- 4 files changed, 86 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 98b8204d93..a0ff8741a5 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -89,7 +89,13 @@ _async_create_lock, _release_locks, ) -from pymongo.logger import _CLIENT_LOGGER, _log_client_error, _log_or_warn +from pymongo.logger import ( + _CLIENT_LOGGER, + _COMMAND_LOGGER, + _debug_log, + _log_client_error, + _log_or_warn, +) from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason, _EventListeners from pymongo.operations import ( @@ -2686,6 +2692,7 @@ def __init__( self._deprioritized_servers: list[Server] = [] self._operation = operation self._operation_id = operation_id + self._attempt_number = 0 async def run(self) -> T: """Runs the supplied func() and attempts a retry @@ -2728,6 +2735,7 @@ async def run(self) -> T: raise self._retrying = True self._last_error = exc + self._attempt_number += 1 else: raise @@ -2749,6 +2757,7 @@ async def run(self) -> T: raise self._last_error from exc else: raise + self._attempt_number += 1 if self._bulk: self._bulk.retrying = True else: @@ -2827,6 +2836,14 @@ async def _write(self) -> T: # not support sessions raise the last error. self._check_last_error() self._retryable = False + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying write attempt number {self._attempt_number}", + clientId=self._client.client_id, + commandName=self._operation, + operationId=self._operation_id, + ) return await self._func(self._session, conn, self._retryable) # type: ignore except PyMongoError as exc: if not self._retryable: @@ -2848,6 +2865,14 @@ async def _read(self) -> T: ): if self._retrying and not self._retryable: self._check_last_error() + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying read attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) return await self._func(self._session, self._server, conn, read_pref) # type: ignore diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 170798e9f9..a674bfb667 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -81,7 +81,13 @@ _create_lock, _release_locks, ) -from pymongo.logger import _CLIENT_LOGGER, _log_client_error, _log_or_warn +from pymongo.logger import ( + _CLIENT_LOGGER, + _COMMAND_LOGGER, + _debug_log, + _log_client_error, + _log_or_warn, +) from pymongo.message import _CursorAddress, _GetMore, _Query from pymongo.monitoring import ConnectionClosedReason, _EventListeners from pymongo.operations import ( @@ -2672,6 +2678,7 @@ def __init__( self._deprioritized_servers: list[Server] = [] self._operation = operation self._operation_id = operation_id + self._attempt_number = 0 def run(self) -> T: """Runs the supplied func() and attempts a retry @@ -2714,6 +2721,7 @@ def run(self) -> T: raise self._retrying = True self._last_error = exc + self._attempt_number += 1 else: raise @@ -2735,6 +2743,7 @@ def run(self) -> T: raise self._last_error from exc else: raise + self._attempt_number += 1 if self._bulk: self._bulk.retrying = True else: @@ -2813,6 +2822,14 @@ def _write(self) -> T: # not support sessions raise the last error. self._check_last_error() self._retryable = False + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying write attempt number {self._attempt_number}", + clientId=self._client.client_id, + commandName=self._operation, + operationId=self._operation_id, + ) return self._func(self._session, conn, self._retryable) # type: ignore except PyMongoError as exc: if not self._retryable: @@ -2834,6 +2851,14 @@ def _read(self) -> T: ): if self._retrying and not self._retryable: self._check_last_error() + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying read attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) return self._func(self._session, self._server, conn, read_pref) # type: ignore diff --git a/test/asynchronous/test_logger.py b/test/asynchronous/test_logger.py index a2e8b35c5f..92c29e1117 100644 --- a/test/asynchronous/test_logger.py +++ b/test/asynchronous/test_logger.py @@ -15,7 +15,7 @@ import os from test import unittest -from test.asynchronous import AsyncIntegrationTest +from test.asynchronous import AsyncIntegrationTest, async_client_context from unittest.mock import patch from bson import json_util @@ -97,6 +97,22 @@ async def test_logging_without_listeners(self): await c.db.test.insert_one({"x": "1"}) self.assertGreater(len(cm.records), 0) + @async_client_context.require_failCommand_fail_point + async def test_logging_retry_read_attempts(self): + await self.db.test.insert_one({"x": "1"}) + + async with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.find_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() + ] + print(retry_messages) + self.assertEqual(len(retry_messages), 1) + if __name__ == "__main__": unittest.main() diff --git a/test/test_logger.py b/test/test_logger.py index b3c8e6d176..398f768c9d 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -14,7 +14,7 @@ from __future__ import annotations import os -from test import IntegrationTest, unittest +from test import IntegrationTest, client_context, unittest from unittest.mock import patch from bson import json_util @@ -96,6 +96,22 @@ def test_logging_without_listeners(self): c.db.test.insert_one({"x": "1"}) self.assertGreater(len(cm.records), 0) + @client_context.require_failCommand_fail_point + def test_logging_retry_read_attempts(self): + self.db.test.insert_one({"x": "1"}) + + with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.find_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() + ] + print(retry_messages) + self.assertEqual(len(retry_messages), 1) + if __name__ == "__main__": unittest.main() From 61033760e53af9bcae29390d1e2f2aa7c337fa1c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Apr 2025 09:53:49 -0500 Subject: [PATCH 1848/2111] PYTHON-5260 Fix OCSP test setup (#2254) --- .evergreen/config.yml | 4 +- .evergreen/generated_configs/tasks.yml | 1934 +++++++++++++++++++-- .evergreen/generated_configs/variants.yml | 140 +- .evergreen/scripts/generate_config.py | 81 +- .evergreen/scripts/run_server.py | 10 - .evergreen/scripts/setup-dev-env.sh | 36 +- .evergreen/scripts/setup_tests.py | 26 +- .evergreen/scripts/utils.py | 4 +- CONTRIBUTING.md | 4 +- test/ocsp/test_ocsp.py | 10 +- 10 files changed, 1869 insertions(+), 380 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 887bdfcd9a..1d9de12c35 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -207,7 +207,7 @@ functions: binary: bash working_dir: "src" include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, PYTHON_VERSION, - STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER] + STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER, LOCAL_ATLAS] args: [.evergreen/just.sh, run-server, "${TEST_NAME}"] - command: expansions.update params: @@ -229,7 +229,7 @@ functions: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, PYTHON_VERSION, DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG, - ORCHESTRATION_FILE, OCSP_SERVER_TYPE] + ORCHESTRATION_FILE, OCSP_SERVER_TYPE, VERSION] binary: bash working_dir: "src" args: [.evergreen/just.sh, setup-tests, "${TEST_NAME}", "${SUB_TEST_NAME}"] diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 3450e491a7..e70f0cbdfb 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1088,294 +1088,1910 @@ tasks: tags: [no-server] # Ocsp tests - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v5.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: revoked + OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v6.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: valid-delegate + OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v7.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: revoked-delegate + OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-soft-fail + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v8.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: no-responder + OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-valid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-rapid-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa, ocsp-staple] - - name: test-ocsp-ecdsa-invalid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.13 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa, ocsp-staple] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v5.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json - OCSP_SERVER_TYPE: valid-delegate + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa, ocsp-staple] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v6.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json - OCSP_SERVER_TYPE: revoked-delegate + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa, ocsp-staple] - - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v7.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v8.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json - OCSP_SERVER_TYPE: revoked-delegate + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-rapid-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.13 + commands: - func: run tests vars: - ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json - OCSP_SERVER_TYPE: no-responder + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - tags: [ocsp, ocsp-ecdsa] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: valid + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: revoked + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-rapid-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.13 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-soft-fail + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json - OCSP_SERVER_TYPE: no-responder + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-valid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json - OCSP_SERVER_TYPE: valid + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa, ocsp-staple] - - name: test-ocsp-rsa-invalid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json - OCSP_SERVER_TYPE: revoked + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa, ocsp-staple] - - name: test-ocsp-rsa-delegate-valid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.9 commands: - - func: run server - vars: - TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json - OCSP_SERVER_TYPE: valid-delegate + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa, ocsp-staple] - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.13 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa, ocsp-staple] - - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-soft-fail-v5.0-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json - OCSP_SERVER_TYPE: revoked + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-soft-fail-v6.0-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-soft-fail-v7.0-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json - OCSP_SERVER_TYPE: revoked-delegate + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] - - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-soft-fail-v8.0-python3.9 commands: - - func: run server + - func: run tests vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-soft-fail-rapid-python3.9 + commands: - func: run tests vars: - ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - tags: [ocsp, ocsp-rsa] + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-soft-fail-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-soft-fail-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-soft-fail-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-soft-fail-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-soft-fail-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-soft-fail-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-soft-fail-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-soft-fail-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.9 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.9" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.13 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.13" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] # Oidc tests - name: test-auth-oidc-default diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 4b3b8e28b0..892e50f9da 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -729,149 +729,29 @@ buildvariants: - rhel87-small # Ocsp tests - - name: ocsp-rhel8-v4.4-python3.9 + - name: ocsp-rhel8 tasks: - name: .ocsp - display_name: OCSP RHEL8 v4.4 Python3.9 + display_name: OCSP RHEL8 run_on: - rhel87-small batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.4" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: ocsp-rhel8-v5.0-python3.10 - tasks: - - name: .ocsp - display_name: OCSP RHEL8 v5.0 Python3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "5.0" - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: ocsp-rhel8-v6.0-python3.11 - tasks: - - name: .ocsp - display_name: OCSP RHEL8 v6.0 Python3.11 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "6.0" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: ocsp-rhel8-v7.0-python3.12 - tasks: - - name: .ocsp - display_name: OCSP RHEL8 v7.0 Python3.12 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "7.0" - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: ocsp-rhel8-v8.0-python3.13 + - name: ocsp-win64 tasks: - - name: .ocsp - display_name: OCSP RHEL8 v8.0 Python3.13 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "8.0" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: ocsp-rhel8-rapid-pypy3.10 - tasks: - - name: .ocsp - display_name: OCSP RHEL8 rapid PyPy3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: rapid - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: ocsp-rhel8-latest-python3.9 - tasks: - - name: .ocsp - display_name: OCSP RHEL8 latest Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: latest - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: ocsp-win64-v4.4-python3.9 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP Win64 v4.4 Python3.9 + - name: .ocsp-rsa !.ocsp-staple .latest + - name: .ocsp-rsa !.ocsp-staple .4.4 + display_name: OCSP Win64 run_on: - windows-64-vsMulti-small batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.4" - PYTHON_BINARY: C:/python/Python39/python.exe - - name: ocsp-win64-v8.0-python3.13 + - name: ocsp-macos tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP Win64 v8.0 Python3.13 - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "8.0" - PYTHON_BINARY: C:/python/Python313/python.exe - - name: ocsp-macos-v4.4-python3.9 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP macOS v4.4 Python3.9 + - name: .ocsp-rsa !.ocsp-staple .latest + - name: .ocsp-rsa !.ocsp-staple .4.4 + display_name: OCSP macOS run_on: - macos-14 batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "4.4" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: ocsp-macos-v8.0-python3.13 - tasks: - - name: .ocsp-rsa !.ocsp-staple - display_name: OCSP macOS v8.0 Python3.13 - run_on: - - macos-14 - batchtime: 10080 - expansions: - AUTH: noauth - SSL: ssl - TOPOLOGY: server - VERSION: "8.0" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 # Oidc auth tests - name: auth-oidc-ubuntu-22 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index d1880c7644..419bead92b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -249,41 +249,22 @@ def generate_yaml(tasks=None, variants=None): def create_ocsp_variants() -> list[BuildVariant]: variants = [] - batchtime = BATCHTIME_WEEK - expansions = dict(AUTH="noauth", SSL="ssl", TOPOLOGY="server") - base_display = "OCSP" - - # OCSP tests on default host with all servers v4.4+ and all python versions. - versions = get_versions_from("4.4") - for version, python in zip_cycle(versions, ALL_PYTHONS): - host = DEFAULT_HOST - variant = create_variant( - [".ocsp"], - get_variant_name(base_display, host, version=version, python=python), - python=python, - version=version, - host=host, - expansions=expansions, - batchtime=batchtime, - ) - variants.append(variant) - - # OCSP tests on Windows and MacOS. - # MongoDB servers on these hosts do not staple OCSP responses and only support RSA. - for host_name, version in product(["win64", "macos"], ["4.4", "8.0"]): + # OCSP tests on default host with all servers v4.4+. + # MongoDB servers on Windows and MacOS do not staple OCSP responses and only support RSA. + # Only test with MongoDB 4.4 and latest. + for host_name in ["rhel8", "win64", "macos"]: host = HOSTS[host_name] - python = CPYTHONS[0] if version == "4.4" else CPYTHONS[-1] + if host == DEFAULT_HOST: + tasks = [".ocsp"] + else: + tasks = [".ocsp-rsa !.ocsp-staple .latest", ".ocsp-rsa !.ocsp-staple .4.4"] variant = create_variant( - [".ocsp-rsa !.ocsp-staple"], - get_variant_name(base_display, host, version=version, python=python), - python=python, - version=version, + tasks, + get_variant_name("OCSP", host), host=host, - expansions=expansions, - batchtime=batchtime, + batchtime=BATCHTIME_WEEK, ) variants.append(variant) - return variants @@ -965,22 +946,34 @@ def create_mod_wsgi_tasks(): return tasks -def _create_ocsp_task(algo, variant, server_type, base_task_name): +def _create_ocsp_tasks(algo, variant, server_type, base_task_name): + tasks = [] file_name = f"{algo}-basic-tls-ocsp-{variant}.json" - vars = dict(TEST_NAME="ocsp", ORCHESTRATION_FILE=file_name) - server_func = FunctionCall(func="run server", vars=vars) + for version in get_versions_from("4.4"): + if version == "latest": + python = MIN_MAX_PYTHON[-1] + else: + python = MIN_MAX_PYTHON[0] - vars = dict(ORCHESTRATION_FILE=file_name, OCSP_SERVER_TYPE=server_type, TEST_NAME="ocsp") - test_func = FunctionCall(func="run tests", vars=vars) + vars = dict( + ORCHESTRATION_FILE=file_name, + OCSP_SERVER_TYPE=server_type, + TEST_NAME="ocsp", + PYTHON_VERSION=python, + VERSION=version, + ) + test_func = FunctionCall(func="run tests", vars=vars) - tags = ["ocsp", f"ocsp-{algo}"] - if "disableStapling" not in variant: - tags.append("ocsp-staple") + tags = ["ocsp", f"ocsp-{algo}", version] + if "disableStapling" not in variant: + tags.append("ocsp-staple") - task_name = f"test-ocsp-{algo}-{base_task_name}" - commands = [server_func, test_func] - return EvgTask(name=task_name, tags=tags, commands=commands) + task_name = get_task_name( + f"test-ocsp-{algo}-{base_task_name}", python=python, version=version + ) + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + return tasks def create_aws_lambda_tasks(): @@ -1092,8 +1085,8 @@ def create_ocsp_tasks(): ] for algo in ["ecdsa", "rsa"]: for variant, server_type, base_task_name in tests: - task = _create_ocsp_task(algo, variant, server_type, base_task_name) - tasks.append(task) + new_tasks = _create_ocsp_tasks(algo, variant, server_type, base_task_name) + tasks.extend(new_tasks) return tasks @@ -1182,7 +1175,7 @@ def write_tasks_to_file(): fid.write("tasks:\n") for name, func in sorted(getmembers(mod, isfunction)): - if not name.endswith("_tasks"): + if name.startswith("_") or not name.endswith("_tasks"): continue if not name.startswith("create_"): raise ValueError("Task creators must start with create_") diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py index 5d9aa54e11..a35fbb57a8 100644 --- a/.evergreen/scripts/run_server.py +++ b/.evergreen/scripts/run_server.py @@ -28,16 +28,6 @@ def start_server(): elif test_name == "load_balancer": set_env("LOAD_BALANCER") - elif test_name == "ocsp": - opts.ssl = True - if "ORCHESTRATION_FILE" not in os.environ: - found = False - for opt in extra_opts: - if opt.startswith("--orchestration-file"): - found = True - if not found: - raise ValueError("Please provide an orchestration file") - elif test_name == "search_index": os.environ["TOPOLOGY"] = "replica_set" os.environ["MONGODB_VERSION"] = "7.0" diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 1db54156dd..3051c9aad7 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -19,28 +19,20 @@ fi # Ensure dependencies are installed. bash $HERE/install-dependencies.sh -# Set the location of the python bin dir. -if [ "Windows_NT" = "${OS:-}" ]; then - BIN_DIR=.venv/Scripts -else - BIN_DIR=.venv/bin -fi - -# Ensure there is a python venv. -if [ ! -d $BIN_DIR ]; then - . $ROOT/.evergreen/utils.sh - - if [ -z "${PYTHON_BINARY:-}" ]; then - if [ -n "${PYTHON_VERSION:-}" ]; then - PYTHON_BINARY=$(get_python_binary $PYTHON_VERSION) - else - PYTHON_BINARY=$(find_python3) - fi - fi - export UV_PYTHON=${PYTHON_BINARY} - echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh - echo "Using python $UV_PYTHON" -fi +# Get the appropriate UV_PYTHON. +. $ROOT/.evergreen/utils.sh +set -x + +if [ -z "${PYTHON_BINARY:-}" ]; then + if [ -n "${PYTHON_VERSION:-}" ]; then + PYTHON_BINARY=$(get_python_binary $PYTHON_VERSION) + else + PYTHON_BINARY=$(find_python3) + fi +fi +export UV_PYTHON=${PYTHON_BINARY} +echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh +echo "Using python $UV_PYTHON" # Add the default install path to the path if needed. if [ -z "${PYMONGO_BIN_DIR:-}" ]; then diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 8f4299a6d0..fc2cadf61d 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -142,7 +142,6 @@ def handle_test_env() -> None: test_title = test_name if sub_test_name: test_title += f" {sub_test_name}" - LOGGER.info(f"Setting up '{test_title}' with {AUTH=} and {SSL=}...") # Create the test env file with the initial set of values. with ENV_FILE.open("w", newline="\n") as fid: @@ -150,8 +149,6 @@ def handle_test_env() -> None: fid.write("set +x\n") ENV_FILE.chmod(ENV_FILE.stat().st_mode | stat.S_IEXEC) - write_env("AUTH", AUTH) - write_env("SSL", SSL) write_env("PIP_QUIET") # Quiet by default. write_env("PIP_PREFER_BINARY") # Prefer binary dists by default. write_env("UV_FROZEN") # Do not modify lock files. @@ -197,6 +194,13 @@ def handle_test_env() -> None: if test_name == "search_index": AUTH = "auth" + if test_name == "ocsp": + SSL = "ssl" + + write_env("AUTH", AUTH) + write_env("SSL", SSL) + LOGGER.info(f"Setting up '{test_title}' with {AUTH=} and {SSL=}...") + if test_name == "aws_lambda": UV_ARGS.append("--group pip") # Store AWS creds if they were given. @@ -318,6 +322,22 @@ def handle_test_env() -> None: env["OCSP_ALGORITHM"] = ocsp_algo run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh", env=env) + # The mock OCSP responder MUST BE started before the mongod as the mongod expects that + # a responder will be available upon startup. + version = os.environ.get("VERSION", "latest") + cmd = [ + "bash", + f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", + "--ssl", + "--version", + version, + ] + if opts.verbose: + cmd.append("-v") + elif opts.quiet: + cmd.append("-q") + run_command(cmd, cwd=DRIVERS_TOOLS) + if SSL != "nossl": if not DRIVERS_TOOLS: raise RuntimeError("Missing DRIVERS_TOOLS") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 3eb44f2ab9..214a1fc347 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -54,8 +54,8 @@ class Distro: EXTRA_TESTS = ["mod_wsgi", "aws_lambda"] -# Tests that do not use run-orchestration. -NO_RUN_ORCHESTRATION = ["auth_oidc", "atlas_connect", "data_lake", "mockupdb", "serverless"] +# Tests that do not use run-orchestration directly. +NO_RUN_ORCHESTRATION = ["auth_oidc", "atlas_connect", "data_lake", "mockupdb", "serverless", "ocsp"] def get_test_options( diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c31d2d1c96..60583022b7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -335,7 +335,9 @@ You must have `docker` or `podman` installed locally. - Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. This corresponds to a config file in `$DRIVERS_TOOLS/.evergreen/orchestration/configs/servers`. MongoDB servers on MacOS and Windows do not staple OCSP responses and only support RSA. -- Run `just run-server ocsp`. +NOTE: because the mock ocsp responder MUST be started prior to the server starting, the ocsp tests start the server +as part of `setup-tests`. + - Run `just setup-tests ocsp ` (options are "valid", "revoked", "valid-delegate", "revoked-delegate"). - Run `just run-tests` diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index a42b3a34ee..b20eaa35d6 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -19,6 +19,7 @@ import os import sys import unittest +from pathlib import Path import pytest @@ -38,15 +39,10 @@ FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) -if sys.platform == "win32": - # The non-stapled OCSP endpoint check is slow on Windows. - TIMEOUT_MS = 5000 -else: - TIMEOUT_MS = 500 - def _connect(options): - uri = f"mongodb://localhost:27017/?serverSelectionTimeoutMS={TIMEOUT_MS}&tlsCAFile={CA_FILE}&{options}" + assert CA_FILE is not None + uri = f"mongodb://localhost:27017/?serverSelectionTimeoutMS=10000&tlsCAFile={Path(CA_FILE).as_posix()}&{options}" print(uri) try: client = pymongo.MongoClient(uri) From 7243b43e635d30da8816e4bbe5b6b30cb96fa807 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Apr 2025 10:42:43 -0500 Subject: [PATCH 1849/2111] PYTHON-5245 Convert remaining tasks to generated config (#2255) --- .evergreen/config.yml | 60 -------------------------- .evergreen/generated_configs/tasks.yml | 58 +++++++++++++++++++++++++ .evergreen/scripts/generate_config.py | 54 ++++++++++++++++++++++- .pre-commit-config.yaml | 2 +- 4 files changed, 111 insertions(+), 63 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 1d9de12c35..a297c49162 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -290,63 +290,3 @@ post: - func: "upload mo artifacts" - func: "upload test results" - func: "cleanup" - -tasks: - # Wildcard task. Do you need to find out what tools are available and where? - # Throw it here, and execute this task on all buildvariants - - name: getdata - commands: - - command: subprocess.exec - binary: bash - type: test - params: - args: - - src/.evergreen/scripts/run-getdata.sh - - - name: "coverage-report" - tags: ["coverage"] - depends_on: - # BUILD-3165: We can't use "*" (all tasks) and specify "variant". - # Instead list out all coverage tasks using tags. - - name: ".standalone" - variant: ".coverage_tag" - # Run the coverage task even if some tasks fail. - status: "*" - # Run the coverage task even if some tasks are not scheduled in a patch build. - patch_optional: true - - name: ".replica_set" - variant: ".coverage_tag" - status: "*" - patch_optional: true - - name: ".sharded_cluster" - variant: ".coverage_tag" - status: "*" - patch_optional: true - commands: - - func: "download and merge coverage" - - - name: "check-import-time" - tags: ["pr"] - commands: - - command: subprocess.exec - type: test - params: - binary: bash - working_dir: src - include_expansions_in_env: ["PYTHON_BINARY"] - args: - - .evergreen/scripts/check-import-time.sh - - ${revision} - - ${github.amrom.workers.devmit} - - name: "backport-pr" - allowed_requesters: ["commit"] - commands: - - command: subprocess.exec - type: test - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh - - mongodb - - mongo-python-driver - - ${github.amrom.workers.devmit} diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index e70f0cbdfb..c42cf444b1 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -713,6 +713,39 @@ tasks: AWS_ROLE_SESSION_NAME: test tags: [auth-aws, auth-aws-web-identity] + # Backport pr tests + - name: backport-pr + commands: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh + - mongodb + - mongo-python-driver + - ${github.amrom.workers.devmit} + working_dir: src + type: test + + # Coverage report tests + - name: coverage-report + commands: + - func: download and merge coverage + depends_on: + - name: .standalone + variant: .coverage_tag + status: "*" + patch_optional: true + - name: .replica_set + variant: .coverage_tag + status: "*" + patch_optional: true + - name: .sharded_cluster + variant: .coverage_tag + status: "*" + patch_optional: true + tags: [coverage] + # Doctest tests - name: test-doctests commands: @@ -776,6 +809,31 @@ tasks: - func: run tests tags: [free-threading] + # Getdata tests + - name: getdata + commands: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/run-getdata.sh + working_dir: src + type: test + + # Import time tests + - name: check-import-time + commands: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/check-import-time.sh + - ${revision} + - ${github.amrom.workers.devmit} + working_dir: src + type: test + tags: [pr] + # Kms tests - name: test-gcpkms commands: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 419bead92b..824786ed47 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -9,9 +9,9 @@ from typing import Any from shrub.v3.evg_build_variant import BuildVariant -from shrub.v3.evg_command import FunctionCall +from shrub.v3.evg_command import EvgCommandType, FunctionCall, subprocess_exec from shrub.v3.evg_project import EvgProject -from shrub.v3.evg_task import EvgTask, EvgTaskRef +from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef from shrub.v3.shrub_service import ShrubService ############## @@ -233,6 +233,13 @@ def handle_c_ext(c_ext, expansions) -> None: expansions["NO_EXT"] = "1" +def get_subprocess_exec(**kwargs): + kwargs.setdefault("binary", "bash") + kwargs.setdefault("working_dir", "src") + kwargs.setdefault("command_type", EvgCommandType.TEST) + return subprocess_exec(**kwargs) + + def generate_yaml(tasks=None, variants=None): """Generate the yaml for a given set of tasks and variants.""" project = EvgProject(tasks=tasks, buildvariants=variants) @@ -1055,6 +1062,49 @@ def create_atlas_data_lake_tasks(): return tasks +def create_getdata_tasks(): + # Wildcard task. Do you need to find out what tools are available and where? + # Throw it here, and execute this task on all buildvariants + cmd = get_subprocess_exec(args=[".evergreen/scripts/run-getdata.sh"]) + return [EvgTask(name="getdata", commands=[cmd])] + + +def create_coverage_report_tasks(): + tags = ["coverage"] + task_name = "coverage-report" + # BUILD-3165: We can't use "*" (all tasks) and specify "variant". + # Instead list out all coverage tasks using tags. + # Run the coverage task even if some tasks fail. + # Run the coverage task even if some tasks are not scheduled in a patch build. + task_deps = [] + for name in [".standalone", ".replica_set", ".sharded_cluster"]: + task_deps.append( + EvgTaskDependency(name=name, variant=".coverage_tag", status="*", patch_optional=True) + ) + cmd = FunctionCall(func="download and merge coverage") + return [EvgTask(name=task_name, tags=tags, depends_on=task_deps, commands=[cmd])] + + +def create_import_time_tasks(): + name = "check-import-time" + tags = ["pr"] + args = [".evergreen/scripts/check-import-time.sh", "${revision}", "${github.amrom.workers.devmit}"] + cmd = get_subprocess_exec(args=args) + return [EvgTask(name=name, tags=tags, commands=[cmd])] + + +def create_backport_pr_tasks(): + name = "backport-pr" + args = [ + "${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh", + "mongodb", + "mongo-python-driver", + "${github.amrom.workers.devmit}", + ] + cmd = get_subprocess_exec(args=args) + return [EvgTask(name=name, commands=[cmd], allowed_requesters=["commit"])] + + def create_ocsp_tasks(): tasks = [] tests = [ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bab2ea47da..deab0724a4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -121,4 +121,4 @@ repos: entry: .evergreen/scripts/generate-config.sh language: python require_serial: true - additional_dependencies: ["shrub.py>=3.2.0", "pyyaml>=6.0.2"] + additional_dependencies: ["shrub.py>=3.8.0", "pyyaml>=6.0.2"] From 5177e4ec5305ffff983449ad41e8da84b782a283 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Apr 2025 14:20:44 -0500 Subject: [PATCH 1850/2111] PYTHON-5261 Clean up compression variants (#2257) --- .evergreen/generated_configs/tasks.yml | 93 ++++++++++++++++++++++- .evergreen/generated_configs/variants.yml | 84 +++----------------- .evergreen/scripts/generate_config.py | 83 +++++++++++--------- 3 files changed, 151 insertions(+), 109 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index c42cf444b1..b2b8dc1191 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -9,14 +9,14 @@ tasks: tags: [atlas_connect] # Atlas data lake tests - - name: test-atlas-data-lake-with_ext + - name: test-atlas-data-lake-without_ext commands: - func: run tests vars: TEST_NAME: data_lake NO_EXT: "1" tags: [atlas_data_lake] - - name: test-atlas-data-lake-without_ext + - name: test-atlas-data-lake-with_ext commands: - func: run tests vars: @@ -727,6 +727,95 @@ tasks: working_dir: src type: test + # Compression tests + - name: test-compression-v4.0-python3.9 + commands: + - func: run server + vars: + VERSION: "4.0" + - func: run tests + tags: [compression, "4.0"] + - name: test-compression-v4.2-python3.9 + commands: + - func: run server + vars: + VERSION: "4.2" + - func: run tests + tags: [compression, "4.2"] + - name: test-compression-v4.4-python3.9 + commands: + - func: run server + vars: + VERSION: "4.4" + - func: run tests + tags: [compression, "4.4"] + - name: test-compression-v5.0-python3.9 + commands: + - func: run server + vars: + VERSION: "5.0" + - func: run tests + tags: [compression, "5.0"] + - name: test-compression-v6.0-python3.9 + commands: + - func: run server + vars: + VERSION: "6.0" + - func: run tests + tags: [compression, "6.0"] + - name: test-compression-v7.0-python3.9 + commands: + - func: run server + vars: + VERSION: "7.0" + - func: run tests + tags: [compression, "7.0"] + - name: test-compression-v8.0-python3.9 + commands: + - func: run server + vars: + VERSION: "8.0" + - func: run tests + tags: [compression, "8.0"] + - name: test-compression-rapid-python3.9 + commands: + - func: run server + vars: + VERSION: rapid + - func: run tests + tags: [compression, rapid] + - name: test-compression-latest-python3.9 + commands: + - func: run server + vars: + VERSION: latest + - func: run tests + tags: [compression, latest] + - name: test-compression-latest-python3.13-no-c + commands: + - func: run server + vars: + VERSION: latest + - func: run tests + vars: + NO_EXT: "1" + tags: [compression, latest] + - name: test-compression-latest-python3.13 + commands: + - func: run server + vars: + VERSION: latest + - func: run tests + vars: {} + tags: [compression, latest] + - name: test-compression-latest-pypy3.10 + commands: + - func: run server + vars: + VERSION: latest + - func: run tests + tags: [compression, latest] + # Coverage report tests - name: coverage-report commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 892e50f9da..7082dda44d 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -160,90 +160,30 @@ buildvariants: - rhel87-small # Compression tests - - name: compression-snappy-rhel8-python3.9-no-c + - name: compression-snappy-rhel8 tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 Python3.9 No C - run_on: - - rhel87-small - expansions: - COMPRESSORS: snappy - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: compression-snappy-rhel8-python3.10 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 Python3.10 - run_on: - - rhel87-small - expansions: - COMPRESSORS: snappy - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: compression-zlib-rhel8-python3.11-no-c - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Compression zlib RHEL8 Python3.11 No C - run_on: - - rhel87-small - expansions: - COMPRESSORS: zlib - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: compression-zlib-rhel8-python3.12 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Compression zlib RHEL8 Python3.12 + - name: .compression + display_name: Compression snappy RHEL8 run_on: - rhel87-small expansions: - COMPRESSORS: zlib - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: compression-zstd-rhel8-python3.13-no-c + COMPRESSOR: snappy + - name: compression-zlib-rhel8 tasks: - - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 Python3.13 No C + - name: .compression + display_name: Compression zlib RHEL8 run_on: - rhel87-small expansions: - COMPRESSORS: zstd - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: compression-zstd-rhel8-python3.9 + COMPRESSOR: zlib + - name: compression-zstd-rhel8 tasks: - - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 Python3.9 + - name: .compression !.4.0 + display_name: Compression zstd RHEL8 run_on: - rhel87-small expansions: - COMPRESSORS: zstd - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: compression-snappy-rhel8-pypy3.10 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Compression snappy RHEL8 PyPy3.10 - run_on: - - rhel87-small - expansions: - COMPRESSORS: snappy - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: compression-zlib-rhel8-pypy3.10 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Compression zlib RHEL8 PyPy3.10 - run_on: - - rhel87-small - expansions: - COMPRESSORS: zlib - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - - name: compression-zstd-rhel8-pypy3.10 - tasks: - - name: .standalone .noauth .nossl .sync_async !.4.0 - display_name: Compression zstd RHEL8 PyPy3.10 - run_on: - - rhel87-small - expansions: - COMPRESSORS: zstd - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 + COMPRESSOR: zstd # Coverage report tests - name: coverage-report diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 824786ed47..723ef6ba31 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -26,7 +26,7 @@ BATCHTIME_WEEK = 10080 AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] -C_EXTS = ["with_ext", "without_ext"] +C_EXTS = ["without_ext", "with_ext"] # By default test each of the topologies with a subset of auth/ssl. SUB_TASKS = [ ".sharded_cluster .auth .ssl", @@ -217,7 +217,7 @@ def get_variant_name(base: str, host: Host | None = None, **kwargs) -> str: def get_task_name(base: str, **kwargs): - return get_common_name(base, "-", **kwargs).lower() + return get_common_name(base, "-", **kwargs).replace(" ", "-").lower() def zip_cycle(*iterables, empty_default=None): @@ -430,42 +430,22 @@ def create_load_balancer_variants(): def create_compression_variants(): - # Compression tests - standalone versions of each server, across python versions, with and without c extensions. - # PyPy interpreters are always tested without extensions. + # Compression tests - standalone versions of each server, across python versions. host = DEFAULT_HOST - base_task = ".standalone .noauth .nossl .sync_async" - task_names = dict(snappy=[base_task], zlib=[base_task], zstd=[f"{base_task} !.4.0"]) + base_task = ".compression" variants = [] - for ind, (compressor, c_ext) in enumerate(product(["snappy", "zlib", "zstd"], C_EXTS)): - expansions = dict(COMPRESSORS=compressor) - handle_c_ext(c_ext, expansions) - base_name = f"Compression {compressor}" - python = CPYTHONS[ind % len(CPYTHONS)] - display_name = get_variant_name(base_name, host, python=python, **expansions) - variant = create_variant( - task_names[compressor], - display_name, - python=python, - host=host, - expansions=expansions, - ) - variants.append(variant) - - other_pythons = PYPYS + CPYTHONS[ind:] - for compressor, python in zip_cycle(["snappy", "zlib", "zstd"], other_pythons): - expansions = dict(COMPRESSORS=compressor) - handle_c_ext(c_ext, expansions) - base_name = f"Compression {compressor}" - display_name = get_variant_name(base_name, host, python=python, **expansions) - variant = create_variant( - task_names[compressor], - display_name, - python=python, - host=host, - expansions=expansions, + for compressor in "snappy", "zlib", "zstd": + expansions = dict(COMPRESSOR=compressor) + tasks = [base_task] if compressor != "zstd" else [f"{base_task} !.4.0"] + display_name = get_variant_name(f"Compression {compressor}", host) + variants.append( + create_variant( + tasks, + display_name, + host=host, + expansions=expansions, + ) ) - variants.append(variant) - return variants @@ -866,6 +846,39 @@ def create_load_balancer_tasks(): return tasks +def create_compression_tasks(): + tasks = [] + versions = get_versions_from("4.0") + # Test all server versions with min python. + for version in versions: + python = CPYTHONS[0] + tags = ["compression", version] + name = get_task_name("test-compression", python=python, version=version) + server_func = FunctionCall(func="run server", vars=dict(VERSION=version)) + test_func = FunctionCall(func="run tests") + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + + # Test latest with max python, with and without c exts. + version = "latest" + tags = ["compression", "latest"] + for c_ext in C_EXTS: + python = CPYTHONS[-1] + expansions = dict() + handle_c_ext(c_ext, expansions) + name = get_task_name("test-compression", python=python, version=version, **expansions) + server_func = FunctionCall(func="run server", vars=dict(VERSION=version)) + test_func = FunctionCall(func="run tests", vars=expansions) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + + # Test on latest with pypy. + python = PYPYS[-1] + name = get_task_name("test-compression", python=python, version=version) + server_func = FunctionCall(func="run server", vars=dict(VERSION=version)) + test_func = FunctionCall(func="run tests") + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_kms_tasks(): tasks = [] for kms_type in ["gcp", "azure"]: From 8b668898b880e848a638f84c6d024e0640079f14 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Thu, 3 Apr 2025 12:05:45 -0700 Subject: [PATCH 1851/2111] PYTHON-5208 Add spec test for wait queue timeout errors do not clear the pool (#2199) Also stop running the ping command to advance session cluster times in the unified tests. --- test/asynchronous/unified_format.py | 16 ++- test/csot/waitQueueTimeout.json | 176 ++++++++++++++++++++++++++++ test/unified_format.py | 16 ++- 3 files changed, 190 insertions(+), 18 deletions(-) create mode 100644 test/csot/waitQueueTimeout.json diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index cc516ee822..9099efbf0f 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -222,7 +222,6 @@ def __init__(self, test_class): self._listeners: Dict[str, EventListenerUtil] = {} self._session_lsids: Dict[str, Mapping[str, Any]] = {} self.test: UnifiedSpecTestMixinV1 = test_class - self._cluster_time: Mapping[str, Any] = {} def __contains__(self, item): return item in self._entities @@ -421,13 +420,11 @@ def get_lsid_for_session(self, session_name): # session has been closed. return self._session_lsids[session_name] - async def advance_cluster_times(self) -> None: + async def advance_cluster_times(self, cluster_time) -> None: """Manually synchronize entities when desired""" - if not self._cluster_time: - self._cluster_time = (await self.test.client.admin.command("ping")).get("$clusterTime") for entity in self._entities.values(): - if isinstance(entity, AsyncClientSession) and self._cluster_time: - entity.advance_cluster_time(self._cluster_time) + if isinstance(entity, AsyncClientSession) and cluster_time: + entity.advance_cluster_time(cluster_time) class UnifiedSpecTestMixinV1(AsyncIntegrationTest): @@ -1044,7 +1041,7 @@ async def _testOperation_targetedFailPoint(self, spec): async def _testOperation_createEntities(self, spec): await self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) - await self.entity_map.advance_cluster_times() + await self.entity_map.advance_cluster_times(self._cluster_time) def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec["session"]] @@ -1443,11 +1440,12 @@ async def _run_scenario(self, spec, uri=None): await self.entity_map.create_entities_from_spec( self.TEST_SPEC.get("createEntities", []), uri=uri ) + self._cluster_time = None # process initialData if "initialData" in self.TEST_SPEC: await self.insert_initial_data(self.TEST_SPEC["initialData"]) - self._cluster_time = (await self.client.admin.command("ping")).get("$clusterTime") - await self.entity_map.advance_cluster_times() + self._cluster_time = self.client._topology.max_cluster_time() + await self.entity_map.advance_cluster_times(self._cluster_time) if "expectLogMessages" in spec: expect_log_messages = spec["expectLogMessages"] diff --git a/test/csot/waitQueueTimeout.json b/test/csot/waitQueueTimeout.json new file mode 100644 index 0000000000..138d5cc161 --- /dev/null +++ b/test/csot/waitQueueTimeout.json @@ -0,0 +1,176 @@ +{ + "description": "WaitQueueTimeoutError does not clear the pool", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "maxPoolSize": 1, + "appname": "waitQueueTimeoutErrorTest" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "WaitQueueTimeoutError does not clear the pool", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "waitQueueTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "commandStartedEvent": { + "commandName": "ping" + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100, + "command": { + "hello": 1 + }, + "commandName": "hello" + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "hello": 1 + }, + "commandName": "hello" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "hello", + "databaseName": "test", + "command": { + "hello": 1 + } + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index fd7f92909e..71d6cd50d4 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -221,7 +221,6 @@ def __init__(self, test_class): self._listeners: Dict[str, EventListenerUtil] = {} self._session_lsids: Dict[str, Mapping[str, Any]] = {} self.test: UnifiedSpecTestMixinV1 = test_class - self._cluster_time: Mapping[str, Any] = {} def __contains__(self, item): return item in self._entities @@ -420,13 +419,11 @@ def get_lsid_for_session(self, session_name): # session has been closed. return self._session_lsids[session_name] - def advance_cluster_times(self) -> None: + def advance_cluster_times(self, cluster_time) -> None: """Manually synchronize entities when desired""" - if not self._cluster_time: - self._cluster_time = (self.test.client.admin.command("ping")).get("$clusterTime") for entity in self._entities.values(): - if isinstance(entity, ClientSession) and self._cluster_time: - entity.advance_cluster_time(self._cluster_time) + if isinstance(entity, ClientSession) and cluster_time: + entity.advance_cluster_time(cluster_time) class UnifiedSpecTestMixinV1(IntegrationTest): @@ -1035,7 +1032,7 @@ def _testOperation_targetedFailPoint(self, spec): def _testOperation_createEntities(self, spec): self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) - self.entity_map.advance_cluster_times() + self.entity_map.advance_cluster_times(self._cluster_time) def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec["session"]] @@ -1428,11 +1425,12 @@ def _run_scenario(self, spec, uri=None): self._uri = uri self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) + self._cluster_time = None # process initialData if "initialData" in self.TEST_SPEC: self.insert_initial_data(self.TEST_SPEC["initialData"]) - self._cluster_time = (self.client.admin.command("ping")).get("$clusterTime") - self.entity_map.advance_cluster_times() + self._cluster_time = self.client._topology.max_cluster_time() + self.entity_map.advance_cluster_times(self._cluster_time) if "expectLogMessages" in spec: expect_log_messages = spec["expectLogMessages"] From b40223938c9646e0b1a8f60d22e36120991b394d Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 3 Apr 2025 15:32:47 -0400 Subject: [PATCH 1852/2111] PYTHON-5219 - Avoid awaiting coroutines when holding locks (#2250) --- pymongo/asynchronous/pool.py | 23 ++++++++++++++++------- pymongo/asynchronous/topology.py | 2 -- pymongo/synchronous/pool.py | 23 ++++++++++++++++------- pymongo/synchronous/topology.py | 2 -- 4 files changed, 32 insertions(+), 18 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 18644cf7de..a67cc5f3c8 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -931,13 +931,15 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: return if self.opts.max_idle_time_seconds is not None: + close_conns = [] async with self.lock: while ( self.conns and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds ): - conn = self.conns.pop() - await conn.close_conn(ConnectionClosedReason.IDLE) + close_conns.append(self.conns.pop()) + for conn in close_conns: + await conn.close_conn(ConnectionClosedReason.IDLE) while True: async with self.size_cond: @@ -957,14 +959,18 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: self._pending += 1 incremented = True conn = await self.connect() + close_conn = False async with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. if self.gen.get_overall() != reference_generation: - await conn.close_conn(ConnectionClosedReason.STALE) - return - self.conns.appendleft(conn) - self.active_contexts.discard(conn.cancel_context) + close_conn = True + if not close_conn: + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + if close_conn: + await conn.close_conn(ConnectionClosedReason.STALE) + return finally: if incremented: # Notify after adding the socket to the pool. @@ -1343,17 +1349,20 @@ async def checkin(self, conn: AsyncConnection) -> None: error=ConnectionClosedReason.ERROR, ) else: + close_conn = False async with self.lock: # Hold the lock to ensure this section does not race with # Pool.reset(). if self.stale_generation(conn.generation, conn.service_id): - await conn.close_conn(ConnectionClosedReason.STALE) + close_conn = True else: conn.update_last_checkin_time() conn.update_is_writable(bool(self.is_writable)) self.conns.appendleft(conn) # Notify any threads waiting to create a connection. self._max_connecting_cond.notify() + if close_conn: + await conn.close_conn(ConnectionClosedReason.STALE) async with self.size_cond: if txn: diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 9de069af7e..b315cc33b7 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -244,8 +244,6 @@ async def open(self) -> None: # Close servers and clear the pools. for server in self._servers.values(): await server.close() - if not _IS_SYNC: - self._monitor_tasks.append(server._monitor) # Reset the session pool to avoid duplicate sessions in # the child process. self._session_pool.reset() diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 1151776b94..224834af31 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -927,13 +927,15 @@ def remove_stale_sockets(self, reference_generation: int) -> None: return if self.opts.max_idle_time_seconds is not None: + close_conns = [] with self.lock: while ( self.conns and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds ): - conn = self.conns.pop() - conn.close_conn(ConnectionClosedReason.IDLE) + close_conns.append(self.conns.pop()) + for conn in close_conns: + conn.close_conn(ConnectionClosedReason.IDLE) while True: with self.size_cond: @@ -953,14 +955,18 @@ def remove_stale_sockets(self, reference_generation: int) -> None: self._pending += 1 incremented = True conn = self.connect() + close_conn = False with self.lock: # Close connection and return if the pool was reset during # socket creation or while acquiring the pool lock. if self.gen.get_overall() != reference_generation: - conn.close_conn(ConnectionClosedReason.STALE) - return - self.conns.appendleft(conn) - self.active_contexts.discard(conn.cancel_context) + close_conn = True + if not close_conn: + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + if close_conn: + conn.close_conn(ConnectionClosedReason.STALE) + return finally: if incremented: # Notify after adding the socket to the pool. @@ -1339,17 +1345,20 @@ def checkin(self, conn: Connection) -> None: error=ConnectionClosedReason.ERROR, ) else: + close_conn = False with self.lock: # Hold the lock to ensure this section does not race with # Pool.reset(). if self.stale_generation(conn.generation, conn.service_id): - conn.close_conn(ConnectionClosedReason.STALE) + close_conn = True else: conn.update_last_checkin_time() conn.update_is_writable(bool(self.is_writable)) self.conns.appendleft(conn) # Notify any threads waiting to create a connection. self._max_connecting_cond.notify() + if close_conn: + conn.close_conn(ConnectionClosedReason.STALE) with self.size_cond: if txn: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index bccc8a2eb7..7df475b4c8 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -244,8 +244,6 @@ def open(self) -> None: # Close servers and clear the pools. for server in self._servers.values(): server.close() - if not _IS_SYNC: - self._monitor_tasks.append(server._monitor) # Reset the session pool to avoid duplicate sessions in # the child process. self._session_pool.reset() From e7c0814512ef4bc104bd17919acd80319460e1a0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 3 Apr 2025 15:33:11 -0400 Subject: [PATCH 1853/2111] PYTHON-4557 - Fix write log messages for retried commands (#2260) --- pymongo/asynchronous/mongo_client.py | 2 +- pymongo/synchronous/mongo_client.py | 2 +- test/asynchronous/test_logger.py | 31 ++++++++++++++++++++++++++-- test/test_logger.py | 31 ++++++++++++++++++++++++++-- 4 files changed, 60 insertions(+), 6 deletions(-) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index a0ff8741a5..7c8f7180bd 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2840,7 +2840,7 @@ async def _write(self) -> T: _debug_log( _COMMAND_LOGGER, message=f"Retrying write attempt number {self._attempt_number}", - clientId=self._client.client_id, + clientId=self._client._topology_settings._topology_id, commandName=self._operation, operationId=self._operation_id, ) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index a674bfb667..14fdefcb6f 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2826,7 +2826,7 @@ def _write(self) -> T: _debug_log( _COMMAND_LOGGER, message=f"Retrying write attempt number {self._attempt_number}", - clientId=self._client.client_id, + clientId=self._client._topology_settings._topology_id, commandName=self._operation, operationId=self._operation_id, ) diff --git a/test/asynchronous/test_logger.py b/test/asynchronous/test_logger.py index 92c29e1117..d024735fd8 100644 --- a/test/asynchronous/test_logger.py +++ b/test/asynchronous/test_logger.py @@ -102,7 +102,14 @@ async def test_logging_retry_read_attempts(self): await self.db.test.insert_one({"x": "1"}) async with self.fail_point( - {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + }, + } ): with self.assertLogs("pymongo.command", level="DEBUG") as cm: await self.db.test.find_one({"x": "1"}) @@ -110,7 +117,27 @@ async def test_logging_retry_read_attempts(self): retry_messages = [ r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() ] - print(retry_messages) + self.assertEqual(len(retry_messages), 1) + + @async_client_context.require_failCommand_fail_point + @async_client_context.require_retryable_writes + async def test_logging_retry_write_attempts(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + "failCommands": ["insert"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.insert_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying write attempt" in r.getMessage() + ] self.assertEqual(len(retry_messages), 1) diff --git a/test/test_logger.py b/test/test_logger.py index 398f768c9d..a7d97927fa 100644 --- a/test/test_logger.py +++ b/test/test_logger.py @@ -101,7 +101,14 @@ def test_logging_retry_read_attempts(self): self.db.test.insert_one({"x": "1"}) with self.fail_point( - {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + }, + } ): with self.assertLogs("pymongo.command", level="DEBUG") as cm: self.db.test.find_one({"x": "1"}) @@ -109,7 +116,27 @@ def test_logging_retry_read_attempts(self): retry_messages = [ r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() ] - print(retry_messages) + self.assertEqual(len(retry_messages), 1) + + @client_context.require_failCommand_fail_point + @client_context.require_retryable_writes + def test_logging_retry_write_attempts(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + "failCommands": ["insert"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.insert_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying write attempt" in r.getMessage() + ] self.assertEqual(len(retry_messages), 1) From 1c813dc6489214bfab50aed04dfd63455bb4d231 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Fri, 4 Apr 2025 13:09:04 -0400 Subject: [PATCH 1854/2111] PYTHON-4575 Allow valid SRV hostnames with less than 3 parts (#2234) --- doc/changelog.rst | 1 + pymongo/asynchronous/srv_resolver.py | 10 +-- pymongo/synchronous/srv_resolver.py | 10 +-- test/asynchronous/test_dns.py | 95 ++++++++++++++++++++++++++-- test/test_dns.py | 95 ++++++++++++++++++++++++++-- test/test_uri_parser.py | 1 + tools/synchro.py | 1 + 7 files changed, 193 insertions(+), 20 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4d8b26a5e2..1ab3bc49af 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -24,6 +24,7 @@ PyMongo 4.12 brings a number of changes including: :class:`~pymongo.read_preferences.SecondaryPreferred`, :class:`~pymongo.read_preferences.Nearest`. Support for ``hedge`` will be removed in PyMongo 5.0. - Removed PyOpenSSL support from the asynchronous API due to limitations of the CPython asyncio.Protocol SSL implementation. +- Allow valid SRV hostnames with less than 3 parts. Issues Resolved ............... diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py index 8b811e5dc2..f7c67af3e1 100644 --- a/pymongo/asynchronous/srv_resolver.py +++ b/pymongo/asynchronous/srv_resolver.py @@ -90,14 +90,12 @@ def __init__( raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) except ValueError: pass - try: - self.__plist = self.__fqdn.split(".")[1:] + split_fqdn = self.__fqdn.split(".") + self.__plist = split_fqdn[1:] if len(split_fqdn) > 2 else split_fqdn except Exception: raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None self.__slen = len(self.__plist) - if self.__slen < 2: - raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) async def get_options(self) -> Optional[str]: from dns import resolver @@ -139,6 +137,10 @@ async def _get_srv_response_and_hosts( # Validate hosts for node in nodes: + if self.__fqdn == node[0].lower(): + raise ConfigurationError( + "Invalid SRV host: return address is identical to SRV hostname" + ) try: nlist = node[0].lower().split(".")[1:][-self.__slen :] except Exception: diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py index 1b36efd1c9..cf7b0842ab 100644 --- a/pymongo/synchronous/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -90,14 +90,12 @@ def __init__( raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) except ValueError: pass - try: - self.__plist = self.__fqdn.split(".")[1:] + split_fqdn = self.__fqdn.split(".") + self.__plist = split_fqdn[1:] if len(split_fqdn) > 2 else split_fqdn except Exception: raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None self.__slen = len(self.__plist) - if self.__slen < 2: - raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) def get_options(self) -> Optional[str]: from dns import resolver @@ -139,6 +137,10 @@ def _get_srv_response_and_hosts( # Validate hosts for node in nodes: + if self.__fqdn == node[0].lower(): + raise ConfigurationError( + "Invalid SRV host: return address is identical to SRV hostname" + ) try: nlist = node[0].lower().split(".")[1:][-self.__slen :] except Exception: diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py index d0e801e123..01c8d7b40b 100644 --- a/test/asynchronous/test_dns.py +++ b/test/asynchronous/test_dns.py @@ -30,6 +30,7 @@ unittest, ) from test.utils_shared import async_wait_until +from unittest.mock import MagicMock, patch from pymongo.asynchronous.uri_parser import parse_uri from pymongo.common import validate_read_preference_tags @@ -186,12 +187,6 @@ def create_tests(cls): class TestParsingErrors(AsyncPyMongoTestCase): async def test_invalid_host(self): - with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb is not"): - client = self.simple_client("mongodb+srv://mongodb") - await client.aconnect() - with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb.com is not"): - client = self.simple_client("mongodb+srv://mongodb.com") - await client.aconnect() with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): client = self.simple_client("mongodb+srv://127.0.0.1") await client.aconnect() @@ -207,5 +202,93 @@ async def test_connect_case_insensitive(self): self.assertGreater(len(client.topology_description.server_descriptions()), 1) +class TestInitialDnsSeedlistDiscovery(AsyncPyMongoTestCase): + """ + Initial DNS Seedlist Discovery prose tests + https://github.com/mongodb/specifications/blob/0a7a8b5/source/initial-dns-seedlist-discovery/tests/README.md#prose-tests + """ + + async def run_initial_dns_seedlist_discovery_prose_tests(self, test_cases): + for case in test_cases: + with patch("dns.asyncresolver.resolve") as mock_resolver: + + async def mock_resolve(query, record_type, *args, **kwargs): + mock_srv = MagicMock() + mock_srv.target.to_text.return_value = case["mock_target"] + return [mock_srv] + + mock_resolver.side_effect = mock_resolve + domain = case["query"].split("._tcp.")[1] + connection_string = f"mongodb+srv://{domain}" + try: + await parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") + + async def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): + with patch("dns.asyncresolver.resolve"): + await parse_uri("mongodb+srv://localhost/") + await parse_uri("mongodb+srv://mongo.local/") + + async def test_2_throw_when_return_address_does_not_end_with_srv_domain(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost.mongodb", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.evil.com", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongo.local", + "mock_target": "test_1.evil.com", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_3_throw_when_return_address_is_identical_to_srv_hostname(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "mongo.local", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part_of_domain( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "test_1.cluster_1localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "test_1.my_hostmongo.local", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "cluster.testmongodb.com", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_dns.py b/test/test_dns.py index 0290eb16d9..9360f3f289 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -30,6 +30,7 @@ unittest, ) from test.utils_shared import wait_until +from unittest.mock import MagicMock, patch from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError @@ -184,12 +185,6 @@ def create_tests(cls): class TestParsingErrors(PyMongoTestCase): def test_invalid_host(self): - with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb is not"): - client = self.simple_client("mongodb+srv://mongodb") - client._connect() - with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: mongodb.com is not"): - client = self.simple_client("mongodb+srv://mongodb.com") - client._connect() with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): client = self.simple_client("mongodb+srv://127.0.0.1") client._connect() @@ -205,5 +200,93 @@ def test_connect_case_insensitive(self): self.assertGreater(len(client.topology_description.server_descriptions()), 1) +class TestInitialDnsSeedlistDiscovery(PyMongoTestCase): + """ + Initial DNS Seedlist Discovery prose tests + https://github.com/mongodb/specifications/blob/0a7a8b5/source/initial-dns-seedlist-discovery/tests/README.md#prose-tests + """ + + def run_initial_dns_seedlist_discovery_prose_tests(self, test_cases): + for case in test_cases: + with patch("dns.resolver.resolve") as mock_resolver: + + def mock_resolve(query, record_type, *args, **kwargs): + mock_srv = MagicMock() + mock_srv.target.to_text.return_value = case["mock_target"] + return [mock_srv] + + mock_resolver.side_effect = mock_resolve + domain = case["query"].split("._tcp.")[1] + connection_string = f"mongodb+srv://{domain}" + try: + parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") + + def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): + with patch("dns.resolver.resolve"): + parse_uri("mongodb+srv://localhost/") + parse_uri("mongodb+srv://mongo.local/") + + def test_2_throw_when_return_address_does_not_end_with_srv_domain(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost.mongodb", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.evil.com", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongo.local", + "mock_target": "test_1.evil.com", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_3_throw_when_return_address_is_identical_to_srv_hostname(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "mongo.local", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part_of_domain( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "test_1.cluster_1localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "test_1.my_hostmongo.local", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "cluster.testmongodb.com", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 0baefa0c3a..d4d17ac211 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -24,6 +24,7 @@ sys.path[0:0] = [""] from test import unittest +from unittest.mock import patch from bson.binary import JAVA_LEGACY from pymongo import ReadPreference diff --git a/tools/synchro.py b/tools/synchro.py index f451d09a26..37bf9bc740 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -133,6 +133,7 @@ "async_joinall": "joinall", "_async_create_connection": "_create_connection", "pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts": "pymongo.synchronous.srv_resolver._SrvResolver.get_hosts", + "dns.asyncresolver.resolve": "dns.resolver.resolve", } docstring_replacements: dict[tuple[str, str], str] = { From 708ce16961f077b7a03fc97e66ccfaccaa0d847a Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 4 Apr 2025 13:22:22 -0400 Subject: [PATCH 1855/2111] PYTHON-4724 - Prohibit AsyncMongoClient from being used across multiple event loops (#2256) --- pymongo/asynchronous/mongo_client.py | 8 +++++ pymongo/synchronous/mongo_client.py | 8 +++++ pyproject.toml | 2 ++ test/asynchronous/test_async_loop_safety.py | 36 +++++++++++++++++++++ tools/synchro.py | 7 +++- 5 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 test/asynchronous/test_async_loop_safety.py diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 7c8f7180bd..7744a75d9c 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -878,6 +878,7 @@ def __init__( self._opened = False self._closed = False + self._loop: Optional[asyncio.AbstractEventLoop] = None if not is_srv: self._init_background() @@ -1709,6 +1710,13 @@ async def _get_topology(self) -> Topology: If this client was created with "connect=False", calling _get_topology launches the connection process in the background. """ + if not _IS_SYNC: + if self._loop is None: + self._loop = asyncio.get_running_loop() + elif self._loop != asyncio.get_running_loop(): + raise RuntimeError( + "Cannot use AsyncMongoClient in different event loop. AsyncMongoClient uses low-level asyncio APIs that bind it to the event loop it was created on." + ) if not self._opened: if self._resolve_srv_info["is_srv"]: await self._resolve_srv() diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 14fdefcb6f..1c0adb5d6b 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -876,6 +876,7 @@ def __init__( self._opened = False self._closed = False + self._loop: Optional[asyncio.AbstractEventLoop] = None if not is_srv: self._init_background() @@ -1703,6 +1704,13 @@ def _get_topology(self) -> Topology: If this client was created with "connect=False", calling _get_topology launches the connection process in the background. """ + if not _IS_SYNC: + if self._loop is None: + self._loop = asyncio.get_running_loop() + elif self._loop != asyncio.get_running_loop(): + raise RuntimeError( + "Cannot use MongoClient in different event loop. MongoClient uses low-level asyncio APIs that bind it to the event loop it was created on." + ) if not self._opened: if self._resolve_srv_info["is_srv"]: self._resolve_srv() diff --git a/pyproject.toml b/pyproject.toml index 611cac13aa..4da75b4c13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,6 +117,8 @@ filterwarnings = [ "module:unclosed bool: """Return True for async tests that should not be converted to sync.""" - return f in ["test_locks.py", "test_concurrency.py", "test_async_cancellation.py"] + return f in [ + "test_locks.py", + "test_concurrency.py", + "test_async_cancellation.py", + "test_async_loop_safety.py", + ] test_files = [ From bf0aa56fbbfa33c3bfbdf3c03a4ab55aceb0cda4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 4 Apr 2025 20:00:41 -0500 Subject: [PATCH 1856/2111] PYTHON-5252 Add dependency on pymongocrypt 1.13 (#2258) --- .evergreen/scripts/setup_tests.py | 2 +- requirements/encryption.txt | 2 +- uv.lock | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index fc2cadf61d..0764d24643 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -165,7 +165,7 @@ def handle_test_env() -> None: # Handle pass through env vars. for var in PASS_THROUGH_ENV: if is_set(var) or getattr(opts, var.lower()): - write_env(var, os.environ[var]) + write_env(var, os.environ.get(var, getattr(opts, var.lower()))) if extra := EXTRAS_MAP.get(test_name, ""): UV_ARGS.append(f"--extra {extra}") diff --git a/requirements/encryption.txt b/requirements/encryption.txt index 5962f5028f..321aba5bac 100644 --- a/requirements/encryption.txt +++ b/requirements/encryption.txt @@ -1,3 +1,3 @@ pymongo-auth-aws>=1.1.0,<2.0.0 -pymongocrypt>=1.12.0,<2.0.0 +pymongocrypt>=1.13.0,<2.0.0 certifi;os.name=='nt' or sys_platform=='darwin' diff --git a/uv.lock b/uv.lock index 39aae339ee..aa23663a84 100644 --- a/uv.lock +++ b/uv.lock @@ -1,4 +1,5 @@ version = 1 +revision = 1 requires-python = ">=3.9" resolution-markers = [ "python_full_version >= '3.10'", @@ -997,7 +998,6 @@ sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5 [[package]] name = "pymongo" -version = "4.12.0.dev0" source = { editable = "." } dependencies = [ { name = "dnspython" }, @@ -1086,7 +1086,7 @@ requires-dist = [ { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, - { name = "pymongocrypt", marker = "extra == 'encryption'", specifier = ">=1.12.0,<2.0.0" }, + { name = "pymongocrypt", marker = "extra == 'encryption'", specifier = ">=1.13.0,<2.0.0" }, { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, { name = "pytest", marker = "extra == 'test'", specifier = ">=8.2" }, { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24.0" }, @@ -1101,6 +1101,7 @@ requires-dist = [ { name = "winkerberos", marker = "os_name == 'nt' and extra == 'gssapi'", specifier = ">=0.5.0" }, { name = "zstandard", marker = "extra == 'zstd'" }, ] +provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "test", "zstd"] [package.metadata.requires-dev] coverage = [ @@ -1136,8 +1137,8 @@ wheels = [ [[package]] name = "pymongocrypt" -version = "1.13.0.dev0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#1cad4ad1c4cd6c11c6a4710da2127dab6a374471" } +version = "1.14.0.dev0" +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#af621673c46d3d8fd2a2fe9d5540e24a79d9357a" } dependencies = [ { name = "cffi" }, { name = "cryptography" }, From 79e5d601397f6d3891d9eaf881b43fd40a712ee2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 7 Apr 2025 09:48:05 -0500 Subject: [PATCH 1857/2111] PYTHON-5268 Fix handling of PYTHON_BINARY (#2264) --- .evergreen/scripts/install-dependencies.sh | 4 ++-- .evergreen/scripts/setup-dev-env.sh | 4 +--- .evergreen/scripts/setup_tests.py | 13 ++++++++++--- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 1347374bf5..780d250a2b 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -39,7 +39,7 @@ function _pip_install() { # Ensure just is installed. -if ! command -v just 2>/dev/null; then +if ! command -v just >/dev/null 2>&1; then # On most systems we can install directly. _TARGET="" if [ "Windows_NT" = "${OS:-}" ]; then @@ -54,7 +54,7 @@ if ! command -v just 2>/dev/null; then fi # Install uv. -if ! command -v uv 2>/dev/null; then +if ! command -v uv >/dev/null 2>&1; then echo "Installing uv..." # On most systems we can install directly. curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 3051c9aad7..6e6b5965bd 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -11,7 +11,7 @@ pushd $ROOT > /dev/null if [ -f $HERE/env.sh ]; then . $HERE/env.sh fi -# PYTHON_BINARY may be defined in test-env.sh. +# PYTHON_BINARY or PYTHON_VERSION may be defined in test-env.sh. if [ -f $HERE/test-env.sh ]; then . $HERE/test-env.sh fi @@ -21,7 +21,6 @@ bash $HERE/install-dependencies.sh # Get the appropriate UV_PYTHON. . $ROOT/.evergreen/utils.sh -set -x if [ -z "${PYTHON_BINARY:-}" ]; then if [ -n "${PYTHON_VERSION:-}" ]; then @@ -31,7 +30,6 @@ if [ -z "${PYTHON_BINARY:-}" ]; then fi fi export UV_PYTHON=${PYTHON_BINARY} -echo "export UV_PYTHON=$UV_PYTHON" >> $HERE/env.sh echo "Using python $UV_PYTHON" # Add the default install path to the path if needed. diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 0764d24643..2ee8aa12ee 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -26,7 +26,14 @@ ) # Passthrough environment variables. -PASS_THROUGH_ENV = ["GREEN_FRAMEWORK", "NO_EXT", "MONGODB_API_VERSION", "DEBUG_LOG"] +PASS_THROUGH_ENV = [ + "GREEN_FRAMEWORK", + "NO_EXT", + "MONGODB_API_VERSION", + "DEBUG_LOG", + "PYTHON_BINARY", + "PYTHON_VERSION", +] # Map the test name to test extra. EXTRAS_MAP = { @@ -164,8 +171,8 @@ def handle_test_env() -> None: # Handle pass through env vars. for var in PASS_THROUGH_ENV: - if is_set(var) or getattr(opts, var.lower()): - write_env(var, os.environ.get(var, getattr(opts, var.lower()))) + if is_set(var) or getattr(opts, var.lower(), ""): + write_env(var, os.environ.get(var, getattr(opts, var.lower(), ""))) if extra := EXTRAS_MAP.get(test_name, ""): UV_ARGS.append(f"--extra {extra}") From 7a4218f0ad653af5091c1cb6a8dfc5d377f3df37 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 7 Apr 2025 10:36:35 -0500 Subject: [PATCH 1858/2111] PYTHON-5263 Convert s3-related functions to generated config (#2259) --- .evergreen/config.yml | 142 +-------------------- .evergreen/generated_configs/functions.yml | 117 +++++++++++++++++ .evergreen/scripts/generate_config.py | 135 +++++++++++++++++++- .pre-commit-config.yaml | 2 +- 4 files changed, 253 insertions(+), 143 deletions(-) create mode 100644 .evergreen/generated_configs/functions.yml diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a297c49162..d83a5620df 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -25,6 +25,7 @@ timeout: binary: ls -la include: + - filename: .evergreen/generated_configs/functions.yml - filename: .evergreen/generated_configs/tasks.yml - filename: .evergreen/generated_configs/variants.yml @@ -52,147 +53,6 @@ functions: params: file: src/expansion.yml - "upload coverage" : - - command: ec2.assume_role - params: - role_arn: ${assume_role_arn} - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: src/.coverage - optional: true - # Upload the coverage report for all tasks in a single build to the same directory. - remote_file: coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} - bucket: ${bucket_name} - permissions: public-read - content_type: text/html - display_name: "Raw Coverage Report" - - "download and merge coverage" : - - command: ec2.assume_role - params: - role_arn: ${assume_role_arn} - - command: subprocess.exec - params: - silent: true - binary: bash - working_dir: "src" - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - args: - - .evergreen/scripts/download-and-merge-coverage.sh - - ${bucket_name} - - ${revision} - - ${version_id} - - command: subprocess.exec - params: - working_dir: "src" - binary: bash - args: - - .evergreen/combine-coverage.sh - # Upload the resulting html coverage report. - - command: subprocess.exec - params: - silent: true - binary: bash - working_dir: "src" - include_expansions_in_env: ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - args: - - .evergreen/scripts/upload-coverage-report.sh - - ${bucket_name} - - ${revision} - - ${version_id} - # Attach the index.html with s3.put so it shows up in the Evergreen UI. - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: src/htmlcov/index.html - remote_file: coverage/${revision}/${version_id}/htmlcov/index.html - bucket: ${bucket_name} - permissions: public-read - content_type: text/html - display_name: "Coverage Report HTML" - - "upload mo artifacts": - - command: ec2.assume_role - params: - role_arn: ${assume_role_arn} - - command: archive.targz_pack - params: - target: "mongo-coredumps.tgz" - source_dir: "./" - include: - - "./**.core" - - "./**.mdmp" # Windows: minidumps - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: mongo-coredumps.tgz - remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Core Dumps - Execution - optional: true - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: ${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz - remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|application/x-gzip} - display_name: "drivers-tools-logs.tar.gz" - - "upload working dir": - - command: ec2.assume_role - params: - role_arn: ${assume_role_arn} - - command: archive.targz_pack - params: - target: "working-dir.tar.gz" - source_dir: ${PROJECT_DIRECTORY}/ - include: - - "./**" - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: working-dir.tar.gz - remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-working-dir.tar.gz - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|application/x-gzip} - display_name: "working-dir.tar.gz" - - command: archive.targz_pack - params: - target: "drivers-dir.tar.gz" - source_dir: ${DRIVERS_TOOLS} - include: - - "./**" - exclude_files: - # Windows cannot read the mongod *.lock files because they are locked. - - "*.lock" - - command: s3.put - params: - aws_key: ${AWS_ACCESS_KEY_ID} - aws_secret: ${AWS_SECRET_ACCESS_KEY} - aws_session_token: ${AWS_SESSION_TOKEN} - local_file: drivers-dir.tar.gz - remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-drivers-dir.tar.gz - bucket: ${bucket_name} - permissions: public-read - content_type: ${content_type|application/x-gzip} - display_name: "drivers-dir.tar.gz" - "upload test results": - command: attach.results params: diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml new file mode 100644 index 0000000000..afd7f11374 --- /dev/null +++ b/.evergreen/generated_configs/functions.yml @@ -0,0 +1,117 @@ +functions: + # Download and merge coverage + download and merge coverage: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/download-and-merge-coverage.sh + - ${bucket_name} + - ${revision} + - ${version_id} + working_dir: src + silent: true + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/combine-coverage.sh + working_dir: src + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/upload-coverage-report.sh + - ${bucket_name} + - ${revision} + - ${version_id} + working_dir: src + silent: true + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + - command: s3.put + params: + remote_file: coverage/${revision}/${version_id}/htmlcov/index.html + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: src/htmlcov/index.html + permissions: public-read + content_type: text/html + display_name: Coverage Report HTML + optional: "true" + type: setup + + # Upload coverage + upload coverage: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: s3.put + params: + remote_file: coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: src/.coverage + permissions: public-read + content_type: text/html + display_name: Raw Coverage Report + optional: "true" + type: setup + + # Upload mo artifacts + upload mo artifacts: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: archive.targz_pack + params: + target: mongo-coredumps.tgz + source_dir: ./ + include: + - ./**.core + - ./**.mdmp + - command: s3.put + params: + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: mongo-coredumps.tgz + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: Core Dumps - Execution + optional: "true" + type: setup + - command: s3.put + params: + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: ${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: drivers-tools-logs.tar.gz + optional: "true" + type: setup diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 723ef6ba31..5a5f6e93db 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -9,7 +9,14 @@ from typing import Any from shrub.v3.evg_build_variant import BuildVariant -from shrub.v3.evg_command import EvgCommandType, FunctionCall, subprocess_exec +from shrub.v3.evg_command import ( + EvgCommandType, + FunctionCall, + archive_targz_pack, + ec2_assume_role, + s3_put, + subprocess_exec, +) from shrub.v3.evg_project import EvgProject from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef from shrub.v3.shrub_service import ShrubService @@ -233,6 +240,12 @@ def handle_c_ext(c_ext, expansions) -> None: expansions["NO_EXT"] = "1" +def get_assume_role(**kwargs): + kwargs.setdefault("command_type", EvgCommandType.SETUP) + kwargs.setdefault("role_arn", "${assume_role_arn}") + return ec2_assume_role(**kwargs) + + def get_subprocess_exec(**kwargs): kwargs.setdefault("binary", "bash") kwargs.setdefault("working_dir", "src") @@ -240,6 +253,18 @@ def get_subprocess_exec(**kwargs): return subprocess_exec(**kwargs) +def get_s3_put(**kwargs): + kwargs["aws_key"] = "${AWS_ACCESS_KEY_ID}" + kwargs["aws_secret"] = "${AWS_SECRET_ACCESS_KEY}" # noqa:S105 + kwargs["aws_session_token"] = "${AWS_SESSION_TOKEN}" # noqa:S105 + kwargs["bucket"] = "${bucket_name}" + kwargs.setdefault("optional", "true") + kwargs.setdefault("permissions", "public-read") + kwargs.setdefault("content_type", "${content_type|application/x-gzip}") + kwargs.setdefault("command_type", EvgCommandType.SETUP) + return s3_put(**kwargs) + + def generate_yaml(tasks=None, variants=None): """Generate the yaml for a given set of tasks and variants.""" project = EvgProject(tasks=tasks, buildvariants=variants) @@ -1193,6 +1218,79 @@ def create_serverless_tasks(): return [EvgTask(name=task_name, tags=tags, commands=[test_func])] +############## +# Functions +############## + + +def create_upload_coverage_func(): + # Upload the coverage report for all tasks in a single build to the same directory. + remote_file = ( + "coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name}" + ) + display_name = "Raw Coverage Report" + cmd = get_s3_put( + local_file="src/.coverage", + remote_file=remote_file, + display_name=display_name, + content_type="text/html", + ) + return "upload coverage", [get_assume_role(), cmd] + + +def create_download_and_merge_coverage_func(): + include_expansions = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + args = [ + ".evergreen/scripts/download-and-merge-coverage.sh", + "${bucket_name}", + "${revision}", + "${version_id}", + ] + merge_cmd = get_subprocess_exec( + silent=True, include_expansions_in_env=include_expansions, args=args + ) + combine_cmd = get_subprocess_exec(args=[".evergreen/combine-coverage.sh"]) + # Upload the resulting html coverage report. + args = [ + ".evergreen/scripts/upload-coverage-report.sh", + "${bucket_name}", + "${revision}", + "${version_id}", + ] + upload_cmd = get_subprocess_exec( + silent=True, include_expansions_in_env=include_expansions, args=args + ) + display_name = "Coverage Report HTML" + remote_file = "coverage/${revision}/${version_id}/htmlcov/index.html" + put_cmd = get_s3_put( + local_file="src/htmlcov/index.html", + remote_file=remote_file, + display_name=display_name, + content_type="text/html", + ) + cmds = [get_assume_role(), merge_cmd, combine_cmd, upload_cmd, put_cmd] + return "download and merge coverage", cmds + + +def create_upload_mo_artifacts_func(): + include = ["./**.core", "./**.mdmp"] # Windows: minidumps + archive_cmd = archive_targz_pack(target="mongo-coredumps.tgz", source_dir="./", include=include) + display_name = "Core Dumps - Execution" + remote_file = "${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz" + s3_dumps = get_s3_put( + local_file="mongo-coredumps.tgz", remote_file=remote_file, display_name=display_name + ) + display_name = "drivers-tools-logs.tar.gz" + remote_file = "${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz" + s3_logs = get_s3_put( + local_file="${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz", + remote_file=remote_file, + display_name=display_name, + ) + cmds = [get_assume_role(), archive_cmd, s3_dumps, s3_logs] + return "upload mo artifacts", cmds + + ################## # Generate Config ################## @@ -1258,5 +1356,40 @@ def write_tasks_to_file(): fid.write(f"{line}\n") +def write_functions_to_file(): + mod = sys.modules[__name__] + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "functions.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("functions:\n") + + functions = dict() + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_func"): + continue + if not name.startswith("create_"): + raise ValueError("Function creators must start with create_") + title = name.replace("create_", "").replace("_func", "").replace("_", " ").capitalize() + func_name, cmds = func() + functions = dict() + functions[func_name] = cmds + project = EvgProject(functions=functions, tasks=None, buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title}\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + write_variants_to_file() write_tasks_to_file() +write_functions_to_file() diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index deab0724a4..a570e55ad1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -121,4 +121,4 @@ repos: entry: .evergreen/scripts/generate-config.sh language: python require_serial: true - additional_dependencies: ["shrub.py>=3.8.0", "pyyaml>=6.0.2"] + additional_dependencies: ["shrub.py>=3.9.0", "pyyaml>=6.0.2"] From 4d4a26cbbe994675551234b15953bd2cf27d6d8a Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 7 Apr 2025 11:34:01 -0700 Subject: [PATCH 1859/2111] PYTHON-5270 Server selection should log remainingTimeMS as milliseconds (#2263) --- pymongo/asynchronous/topology.py | 2 +- pymongo/synchronous/topology.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index b315cc33b7..32776bf7b9 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -354,7 +354,7 @@ async def _select_servers_loop( operationId=operation_id, topologyDescription=self.description, clientId=self.description._topology_settings._topology_id, - remainingTimeMS=int(end_time - time.monotonic()), + remainingTimeMS=int(1000 * (end_time - time.monotonic())), ) logged_waiting = True diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 7df475b4c8..df23bff28c 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -354,7 +354,7 @@ def _select_servers_loop( operationId=operation_id, topologyDescription=self.description, clientId=self.description._topology_settings._topology_id, - remainingTimeMS=int(end_time - time.monotonic()), + remainingTimeMS=int(1000 * (end_time - time.monotonic())), ) logged_waiting = True From 92970d39fd0fd9060c95c35a6081676b7b6af548 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 7 Apr 2025 19:34:44 -0500 Subject: [PATCH 1860/2111] PYTHON-5248 Update changelog for 4.12 release (#2265) --- doc/changelog.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 1ab3bc49af..077c85bb4b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,14 +1,18 @@ Changelog ========= -Changes in Version 4.12.0 (YYYY/MM/DD) +Changes in Version 4.12.0 (2025/04/08) -------------------------------------- +.. warning:: Driver support for MongoDB 4.0 reached end of life in April 2025. + PyMongo 4.12 will be the last release to support MongoDB 4.0. + PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. - Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- pymongocrypt>=1.13 is now required for :ref:`In-Use Encryption` support. - Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.rename_by_name` and :meth:`gridfs.grid_file.GridFSBucket.rename_by_name` for more performant renaming of a file with multiple revisions. - Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.delete_by_name` and :meth:`gridfs.grid_file.GridFSBucket.delete_by_name` From 98b656f286790ecbfc98acd5bbdf5bb128ad2804 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 12:52:55 +0000 Subject: [PATCH 1861/2111] BUMP 4.12.0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 985acfd81b..9fcd4783a4 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.12.0.dev0" +__version__ = "4.12.0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 2c077ba8a480f68035084e2a6eca46c608945cc2 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 13:12:35 +0000 Subject: [PATCH 1862/2111] BUMP 4.13.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 9fcd4783a4..e49406e755 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.12.0" +__version__ = "4.13.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From bc2cc1ed58815e26a86328bc4f0795cad7de95be Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 8 Apr 2025 11:39:06 -0400 Subject: [PATCH 1863/2111] =?UTF-8?q?PYTHON-4924=20-=20PoolClearedError=20?= =?UTF-8?q?should=20have=20TransientTransactionError=20=E2=80=A6=20(#2244)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/asynchronous/test_transactions.py | 19 +++++++++++++++++++ test/test_transactions.py | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index f151755217..e1b6001edb 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -20,6 +20,8 @@ from test.asynchronous.utils_spec_runner import AsyncSpecRunner from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket +from pymongo.asynchronous.pool import PoolState +from pymongo.server_selectors import writable_server_selector sys.path[0:0] = [""] @@ -39,6 +41,7 @@ from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.helpers import anext from pymongo.errors import ( + AutoReconnect, CollectionInvalid, ConfigurationError, ConnectionFailure, @@ -394,6 +397,22 @@ async def find_raw_batches(*args, **kwargs): if isinstance(res, (AsyncCommandCursor, AsyncCursor)): await res.to_list() + @async_client_context.require_transactions + async def test_transaction_pool_cleared_error_labelled_transient(self): + c = await self.async_single_client() + + with self.assertRaises(AutoReconnect) as context: + async with c.start_session() as session: + async with await session.start_transaction(): + server = await c._select_server(writable_server_selector, session, "test") + # Pause the server's pool, causing it to fail connection checkout. + server.pool.state = PoolState.PAUSED + async with c._checkout(server, session): + pass + + # Verify that the TransientTransactionError label is present in the error. + self.assertTrue(context.exception.has_error_label("TransientTransactionError")) + class PatchSessionTimeout: """Patches the client_session's with_transaction timeout for testing.""" diff --git a/test/test_transactions.py b/test/test_transactions.py index 63ea5c74fe..93bcdbaeb1 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -20,6 +20,8 @@ from test.utils_spec_runner import SpecRunner from gridfs.synchronous.grid_file import GridFS, GridFSBucket +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.pool import PoolState sys.path[0:0] = [""] @@ -34,6 +36,7 @@ from bson.raw_bson import RawBSONDocument from pymongo import WriteConcern, _csot from pymongo.errors import ( + AutoReconnect, CollectionInvalid, ConfigurationError, ConnectionFailure, @@ -386,6 +389,22 @@ def find_raw_batches(*args, **kwargs): if isinstance(res, (CommandCursor, Cursor)): res.to_list() + @client_context.require_transactions + def test_transaction_pool_cleared_error_labelled_transient(self): + c = self.single_client() + + with self.assertRaises(AutoReconnect) as context: + with c.start_session() as session: + with session.start_transaction(): + server = c._select_server(writable_server_selector, session, "test") + # Pause the server's pool, causing it to fail connection checkout. + server.pool.state = PoolState.PAUSED + with c._checkout(server, session): + pass + + # Verify that the TransientTransactionError label is present in the error. + self.assertTrue(context.exception.has_error_label("TransientTransactionError")) + class PatchSessionTimeout: """Patches the client_session's with_transaction timeout for testing.""" From 93886286a3ee2339fa23a839d7f60c319c75369e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 8 Apr 2025 11:30:17 -0500 Subject: [PATCH 1864/2111] PYTHON-5275 Fix handlig of FIPS build (#2266) --- .evergreen/config.yml | 4 ++-- .evergreen/generated_configs/variants.yml | 1 + .evergreen/scripts/generate_config.py | 6 ++++-- .evergreen/scripts/setup_tests.py | 1 + test/__init__.py | 2 ++ test/asynchronous/__init__.py | 2 ++ 6 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index d83a5620df..a1d6284713 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -67,7 +67,7 @@ functions: binary: bash working_dir: "src" include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, PYTHON_VERSION, - STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER, LOCAL_ATLAS] + STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER, LOCAL_ATLAS, NO_EXT] args: [.evergreen/just.sh, run-server, "${TEST_NAME}"] - command: expansions.update params: @@ -89,7 +89,7 @@ functions: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, PYTHON_VERSION, DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG, - ORCHESTRATION_FILE, OCSP_SERVER_TYPE, VERSION] + ORCHESTRATION_FILE, OCSP_SERVER_TYPE, VERSION, REQUIRE_FIPS] binary: bash working_dir: "src" args: [.evergreen/just.sh, setup-tests, "${TEST_NAME}", "${SUB_TEST_NAME}"] diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 7082dda44d..940c4e9b0c 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -18,6 +18,7 @@ buildvariants: batchtime: 10080 expansions: NO_EXT: "1" + REQUIRE_FIPS: "1" - name: other-hosts-rhel8-zseries tasks: - name: .6.0 .standalone !.sync_async diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 5a5f6e93db..5957c3b1e3 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -800,10 +800,12 @@ def create_alternative_hosts_variants(): ) ) - expansions = dict() - handle_c_ext(C_EXTS[0], expansions) for host_name in OTHER_HOSTS: + expansions = dict() + handle_c_ext(C_EXTS[0], expansions) host = HOSTS[host_name] + if "fips" in host_name.lower(): + expansions["REQUIRE_FIPS"] = "1" tags = [".6.0 .standalone !.sync_async"] if host_name == "Amazon2023": tags = [f".latest !.sync_async {t}" for t in SUB_TASKS] diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 2ee8aa12ee..2fa1fc47fc 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -33,6 +33,7 @@ "DEBUG_LOG", "PYTHON_BINARY", "PYTHON_VERSION", + "REQUIRE_FIPS", ] # Map the test name to test extra. diff --git a/test/__init__.py b/test/__init__.py index d8686e3257..a1c5091f3b 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -389,6 +389,8 @@ def fips_enabled(self): self._fips_enabled = True except (subprocess.SubprocessError, FileNotFoundError): self._fips_enabled = False + if os.environ.get("REQUIRE_FIPS") and not self._fips_enabled: + raise RuntimeError("Expected FIPS to be enabled") return self._fips_enabled def check_auth_type(self, auth_type): diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 9e9cb9316d..f8d04f0d5d 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -391,6 +391,8 @@ def fips_enabled(self): self._fips_enabled = True except (subprocess.SubprocessError, FileNotFoundError): self._fips_enabled = False + if os.environ.get("REQUIRE_FIPS") and not self._fips_enabled: + raise RuntimeError("Expected FIPS to be enabled") return self._fips_enabled def check_auth_type(self, auth_type): From 175481e35d39d851e64441c2121b6c2cf8b335c6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 8 Apr 2025 11:31:13 -0500 Subject: [PATCH 1865/2111] PYTHON-5282 Move config utility functions to separate file (#2267) --- .evergreen/scripts/generate_config.py | 402 ++------------------ .evergreen/scripts/generate_config_utils.py | 365 ++++++++++++++++++ 2 files changed, 400 insertions(+), 367 deletions(-) create mode 100644 .evergreen/scripts/generate_config_utils.py diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 5957c3b1e3..afa97f3906 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -2,277 +2,43 @@ from __future__ import annotations import sys -from dataclasses import dataclass -from inspect import getmembers, isfunction -from itertools import cycle, product, zip_longest -from pathlib import Path -from typing import Any - +from itertools import product + +from generate_config_utils import ( + ALL_PYTHONS, + ALL_VERSIONS, + AUTH_SSLS, + BATCHTIME_WEEK, + C_EXTS, + CPYTHONS, + DEFAULT_HOST, + HOSTS, + MIN_MAX_PYTHON, + OTHER_HOSTS, + PYPYS, + SUB_TASKS, + SYNCS, + TOPOLOGIES, + create_variant, + get_assume_role, + get_s3_put, + get_subprocess_exec, + get_task_name, + get_variant_name, + get_versions_from, + get_versions_until, + handle_c_ext, + write_functions_to_file, + write_tasks_to_file, + write_variants_to_file, + zip_cycle, +) from shrub.v3.evg_build_variant import BuildVariant from shrub.v3.evg_command import ( - EvgCommandType, FunctionCall, archive_targz_pack, - ec2_assume_role, - s3_put, - subprocess_exec, ) -from shrub.v3.evg_project import EvgProject from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef -from shrub.v3.shrub_service import ShrubService - -############## -# Globals -############## - -ALL_VERSIONS = ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] -CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] -PYPYS = ["pypy3.10"] -ALL_PYTHONS = CPYTHONS + PYPYS -MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] -BATCHTIME_WEEK = 10080 -AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] -TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] -C_EXTS = ["without_ext", "with_ext"] -# By default test each of the topologies with a subset of auth/ssl. -SUB_TASKS = [ - ".sharded_cluster .auth .ssl", - ".replica_set .noauth .ssl", - ".standalone .noauth .nossl", -] -SYNCS = ["sync", "async", "sync_async"] -DISPLAY_LOOKUP = dict( - ssl=dict(ssl="SSL", nossl="NoSSL"), - auth=dict(auth="Auth", noauth="NoAuth"), - test_suites=dict(default="Sync", default_async="Async"), - coverage=dict(coverage="cov"), - no_ext={"1": "No C"}, -) -HOSTS = dict() - - -@dataclass -class Host: - name: str - run_on: str - display_name: str - variables: dict[str, str] | None - - -# Hosts with toolchains. -HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) -HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", dict()) -HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", dict()) -HOSTS["macos"] = Host("macos", "macos-14", "macOS", dict()) -HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", dict()) -HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) -HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) -HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) -HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) -HOSTS["debian11"] = Host("debian11", "debian11-small", "Debian11", dict()) -DEFAULT_HOST = HOSTS["rhel8"] - -# Other hosts -OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64", "Amazon2023"] -for name, run_on in zip( - OTHER_HOSTS, - [ - "rhel92-fips", - "rhel8-zseries-small", - "rhel8-power-small", - "rhel82-arm64-small", - "amazon2023-arm64-latest-large-m8g", - ], -): - HOSTS[name] = Host(name, run_on, name, dict()) - - -############## -# Helpers -############## - - -def create_variant_generic( - tasks: list[str | EvgTaskRef], - display_name: str, - *, - host: Host | None = None, - default_run_on="rhel87-small", - expansions: dict | None = None, - **kwargs: Any, -) -> BuildVariant: - """Create a build variant for the given inputs.""" - task_refs = [] - for t in tasks: - if isinstance(t, EvgTaskRef): - task_refs.append(t) - else: - task_refs.append(EvgTaskRef(name=t)) - expansions = expansions and expansions.copy() or dict() - if "run_on" in kwargs: - run_on = kwargs.pop("run_on") - elif host: - run_on = [host.run_on] - if host.variables: - expansions.update(host.variables) - else: - run_on = [default_run_on] - if isinstance(run_on, str): - run_on = [run_on] - name = display_name.replace(" ", "-").replace("*-", "").lower() - return BuildVariant( - name=name, - display_name=display_name, - tasks=task_refs, - expansions=expansions or None, - run_on=run_on, - **kwargs, - ) - - -def create_variant( - tasks: list[str | EvgTaskRef], - display_name: str, - *, - version: str | None = None, - host: Host | None = None, - python: str | None = None, - expansions: dict | None = None, - **kwargs: Any, -) -> BuildVariant: - expansions = expansions and expansions.copy() or dict() - if version: - expansions["VERSION"] = version - if python: - expansions["PYTHON_BINARY"] = get_python_binary(python, host) - return create_variant_generic( - tasks, display_name, version=version, host=host, expansions=expansions, **kwargs - ) - - -def get_python_binary(python: str, host: Host) -> str: - """Get the appropriate python binary given a python version and host.""" - name = host.name - if name in ["win64", "win32"]: - if name == "win32": - base = "C:/python/32" - else: - base = "C:/python" - python = python.replace(".", "") - if python == "313t": - return f"{base}/Python313/python3.13t.exe" - return f"{base}/Python{python}/python.exe" - - if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: - return f"/opt/python/{python}/bin/python3" - - if name in ["macos", "macos-arm64"]: - if python == "3.13t": - return "/Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t" - return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" - - raise ValueError(f"no match found for python {python} on {name}") - - -def get_versions_from(min_version: str) -> list[str]: - """Get all server versions starting from a minimum version.""" - min_version_float = float(min_version) - rapid_latest = ["rapid", "latest"] - versions = [v for v in ALL_VERSIONS if v not in rapid_latest] - return [v for v in versions if float(v) >= min_version_float] + rapid_latest - - -def get_versions_until(max_version: str) -> list[str]: - """Get all server version up to a max version.""" - max_version_float = float(max_version) - versions = [v for v in ALL_VERSIONS if v not in ["rapid", "latest"]] - versions = [v for v in versions if float(v) <= max_version_float] - if not len(versions): - raise ValueError(f"No server versions found less <= {max_version}") - return versions - - -def get_common_name(base: str, sep: str, **kwargs) -> str: - display_name = base - version = kwargs.pop("VERSION", None) - version = version or kwargs.pop("version", None) - if version: - if version not in ["rapid", "latest"]: - version = f"v{version}" - display_name = f"{display_name}{sep}{version}" - for key, value in kwargs.items(): - name = value - if key.lower() == "python": - if not value.startswith("pypy"): - name = f"Python{value}" - else: - name = f"PyPy{value.replace('pypy', '')}" - elif key.lower() in DISPLAY_LOOKUP: - name = DISPLAY_LOOKUP[key.lower()][value] - else: - continue - display_name = f"{display_name}{sep}{name}" - return display_name - - -def get_variant_name(base: str, host: Host | None = None, **kwargs) -> str: - """Get the display name of a variant.""" - display_name = base - if host is not None: - display_name += f" {host.display_name}" - return get_common_name(display_name, " ", **kwargs) - - -def get_task_name(base: str, **kwargs): - return get_common_name(base, "-", **kwargs).replace(" ", "-").lower() - - -def zip_cycle(*iterables, empty_default=None): - """Get all combinations of the inputs, cycling over the shorter list(s).""" - cycles = [cycle(i) for i in iterables] - for _ in zip_longest(*iterables): - yield tuple(next(i, empty_default) for i in cycles) - - -def handle_c_ext(c_ext, expansions) -> None: - """Handle c extension option.""" - if c_ext == C_EXTS[0]: - expansions["NO_EXT"] = "1" - - -def get_assume_role(**kwargs): - kwargs.setdefault("command_type", EvgCommandType.SETUP) - kwargs.setdefault("role_arn", "${assume_role_arn}") - return ec2_assume_role(**kwargs) - - -def get_subprocess_exec(**kwargs): - kwargs.setdefault("binary", "bash") - kwargs.setdefault("working_dir", "src") - kwargs.setdefault("command_type", EvgCommandType.TEST) - return subprocess_exec(**kwargs) - - -def get_s3_put(**kwargs): - kwargs["aws_key"] = "${AWS_ACCESS_KEY_ID}" - kwargs["aws_secret"] = "${AWS_SECRET_ACCESS_KEY}" # noqa:S105 - kwargs["aws_session_token"] = "${AWS_SESSION_TOKEN}" # noqa:S105 - kwargs["bucket"] = "${bucket_name}" - kwargs.setdefault("optional", "true") - kwargs.setdefault("permissions", "public-read") - kwargs.setdefault("content_type", "${content_type|application/x-gzip}") - kwargs.setdefault("command_type", EvgCommandType.SETUP) - return s3_put(**kwargs) - - -def generate_yaml(tasks=None, variants=None): - """Generate the yaml for a given set of tasks and variants.""" - project = EvgProject(tasks=tasks, buildvariants=variants) - out = ShrubService.generate_yaml(project) - # Dedent by two spaces to match what we use in config.yml - lines = [line[2:] for line in out.splitlines()] - print("\n".join(lines)) # noqa: T201 - ############## # Variants @@ -1293,105 +1059,7 @@ def create_upload_mo_artifacts_func(): return "upload mo artifacts", cmds -################## -# Generate Config -################## - - -def write_variants_to_file(): - mod = sys.modules[__name__] - here = Path(__file__).absolute().parent - target = here.parent / "generated_configs" / "variants.yml" - if target.exists(): - target.unlink() - with target.open("w") as fid: - fid.write("buildvariants:\n") - - for name, func in sorted(getmembers(mod, isfunction)): - if not name.endswith("_variants"): - continue - if not name.startswith("create_"): - raise ValueError("Variant creators must start with create_") - title = name.replace("create_", "").replace("_variants", "").replace("_", " ").capitalize() - project = EvgProject(tasks=None, buildvariants=func()) - out = ShrubService.generate_yaml(project).splitlines() - with target.open("a") as fid: - fid.write(f" # {title} tests\n") - for line in out[1:]: - fid.write(f"{line}\n") - fid.write("\n") - - # Remove extra trailing newline: - data = target.read_text().splitlines() - with target.open("w") as fid: - for line in data[:-1]: - fid.write(f"{line}\n") - - -def write_tasks_to_file(): - mod = sys.modules[__name__] - here = Path(__file__).absolute().parent - target = here.parent / "generated_configs" / "tasks.yml" - if target.exists(): - target.unlink() - with target.open("w") as fid: - fid.write("tasks:\n") - - for name, func in sorted(getmembers(mod, isfunction)): - if name.startswith("_") or not name.endswith("_tasks"): - continue - if not name.startswith("create_"): - raise ValueError("Task creators must start with create_") - title = name.replace("create_", "").replace("_tasks", "").replace("_", " ").capitalize() - project = EvgProject(tasks=func(), buildvariants=None) - out = ShrubService.generate_yaml(project).splitlines() - with target.open("a") as fid: - fid.write(f" # {title} tests\n") - for line in out[1:]: - fid.write(f"{line}\n") - fid.write("\n") - - # Remove extra trailing newline: - data = target.read_text().splitlines() - with target.open("w") as fid: - for line in data[:-1]: - fid.write(f"{line}\n") - - -def write_functions_to_file(): - mod = sys.modules[__name__] - here = Path(__file__).absolute().parent - target = here.parent / "generated_configs" / "functions.yml" - if target.exists(): - target.unlink() - with target.open("w") as fid: - fid.write("functions:\n") - - functions = dict() - for name, func in sorted(getmembers(mod, isfunction)): - if name.startswith("_") or not name.endswith("_func"): - continue - if not name.startswith("create_"): - raise ValueError("Function creators must start with create_") - title = name.replace("create_", "").replace("_func", "").replace("_", " ").capitalize() - func_name, cmds = func() - functions = dict() - functions[func_name] = cmds - project = EvgProject(functions=functions, tasks=None, buildvariants=None) - out = ShrubService.generate_yaml(project).splitlines() - with target.open("a") as fid: - fid.write(f" # {title}\n") - for line in out[1:]: - fid.write(f"{line}\n") - fid.write("\n") - - # Remove extra trailing newline: - data = target.read_text().splitlines() - with target.open("w") as fid: - for line in data[:-1]: - fid.write(f"{line}\n") - - -write_variants_to_file() -write_tasks_to_file() -write_functions_to_file() +mod = sys.modules[__name__] +write_variants_to_file(mod) +write_tasks_to_file(mod) +write_functions_to_file(mod) diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py new file mode 100644 index 0000000000..a91501cf25 --- /dev/null +++ b/.evergreen/scripts/generate_config_utils.py @@ -0,0 +1,365 @@ +from __future__ import annotations + +from dataclasses import dataclass +from inspect import getmembers, isfunction +from itertools import cycle, zip_longest +from pathlib import Path +from typing import Any + +from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_command import ( + EvgCommandType, + ec2_assume_role, + s3_put, + subprocess_exec, +) +from shrub.v3.evg_project import EvgProject +from shrub.v3.evg_task import EvgTaskRef +from shrub.v3.shrub_service import ShrubService + +############## +# Globals +############## + +ALL_VERSIONS = ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] +PYPYS = ["pypy3.10"] +ALL_PYTHONS = CPYTHONS + PYPYS +MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] +BATCHTIME_WEEK = 10080 +AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] +TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] +C_EXTS = ["without_ext", "with_ext"] +# By default test each of the topologies with a subset of auth/ssl. +SUB_TASKS = [ + ".sharded_cluster .auth .ssl", + ".replica_set .noauth .ssl", + ".standalone .noauth .nossl", +] +SYNCS = ["sync", "async", "sync_async"] +DISPLAY_LOOKUP = dict( + ssl=dict(ssl="SSL", nossl="NoSSL"), + auth=dict(auth="Auth", noauth="NoAuth"), + test_suites=dict(default="Sync", default_async="Async"), + coverage=dict(coverage="cov"), + no_ext={"1": "No C"}, +) +HOSTS = dict() + + +@dataclass +class Host: + name: str + run_on: str + display_name: str + variables: dict[str, str] | None + + +# Hosts with toolchains. +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", dict()) +HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", dict()) +HOSTS["macos"] = Host("macos", "macos-14", "macOS", dict()) +HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", dict()) +HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) +HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) +HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) +HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) +HOSTS["debian11"] = Host("debian11", "debian11-small", "Debian11", dict()) +DEFAULT_HOST = HOSTS["rhel8"] + +# Other hosts +OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64", "Amazon2023"] +for name, run_on in zip( + OTHER_HOSTS, + [ + "rhel92-fips", + "rhel8-zseries-small", + "rhel8-power-small", + "rhel82-arm64-small", + "amazon2023-arm64-latest-large-m8g", + ], +): + HOSTS[name] = Host(name, run_on, name, dict()) + +############## +# Helpers +############## + + +def create_variant_generic( + tasks: list[str | EvgTaskRef], + display_name: str, + *, + host: Host | None = None, + default_run_on="rhel87-small", + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + """Create a build variant for the given inputs.""" + task_refs = [] + for t in tasks: + if isinstance(t, EvgTaskRef): + task_refs.append(t) + else: + task_refs.append(EvgTaskRef(name=t)) + expansions = expansions and expansions.copy() or dict() + if "run_on" in kwargs: + run_on = kwargs.pop("run_on") + elif host: + run_on = [host.run_on] + if host.variables: + expansions.update(host.variables) + else: + run_on = [default_run_on] + if isinstance(run_on, str): + run_on = [run_on] + name = display_name.replace(" ", "-").replace("*-", "").lower() + return BuildVariant( + name=name, + display_name=display_name, + tasks=task_refs, + expansions=expansions or None, + run_on=run_on, + **kwargs, + ) + + +def create_variant( + tasks: list[str | EvgTaskRef], + display_name: str, + *, + version: str | None = None, + host: Host | None = None, + python: str | None = None, + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + expansions = expansions and expansions.copy() or dict() + if version: + expansions["VERSION"] = version + if python: + expansions["PYTHON_BINARY"] = get_python_binary(python, host) + return create_variant_generic( + tasks, display_name, version=version, host=host, expansions=expansions, **kwargs + ) + + +def get_python_binary(python: str, host: Host) -> str: + """Get the appropriate python binary given a python version and host.""" + name = host.name + if name in ["win64", "win32"]: + if name == "win32": + base = "C:/python/32" + else: + base = "C:/python" + python = python.replace(".", "") + if python == "313t": + return f"{base}/Python313/python3.13t.exe" + return f"{base}/Python{python}/python.exe" + + if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: + return f"/opt/python/{python}/bin/python3" + + if name in ["macos", "macos-arm64"]: + if python == "3.13t": + return "/Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t" + return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" + + raise ValueError(f"no match found for python {python} on {name}") + + +def get_versions_from(min_version: str) -> list[str]: + """Get all server versions starting from a minimum version.""" + min_version_float = float(min_version) + rapid_latest = ["rapid", "latest"] + versions = [v for v in ALL_VERSIONS if v not in rapid_latest] + return [v for v in versions if float(v) >= min_version_float] + rapid_latest + + +def get_versions_until(max_version: str) -> list[str]: + """Get all server version up to a max version.""" + max_version_float = float(max_version) + versions = [v for v in ALL_VERSIONS if v not in ["rapid", "latest"]] + versions = [v for v in versions if float(v) <= max_version_float] + if not len(versions): + raise ValueError(f"No server versions found less <= {max_version}") + return versions + + +def get_common_name(base: str, sep: str, **kwargs) -> str: + display_name = base + version = kwargs.pop("VERSION", None) + version = version or kwargs.pop("version", None) + if version: + if version not in ["rapid", "latest"]: + version = f"v{version}" + display_name = f"{display_name}{sep}{version}" + for key, value in kwargs.items(): + name = value + if key.lower() == "python": + if not value.startswith("pypy"): + name = f"Python{value}" + else: + name = f"PyPy{value.replace('pypy', '')}" + elif key.lower() in DISPLAY_LOOKUP: + name = DISPLAY_LOOKUP[key.lower()][value] + else: + continue + display_name = f"{display_name}{sep}{name}" + return display_name + + +def get_variant_name(base: str, host: Host | None = None, **kwargs) -> str: + """Get the display name of a variant.""" + display_name = base + if host is not None: + display_name += f" {host.display_name}" + return get_common_name(display_name, " ", **kwargs) + + +def get_task_name(base: str, **kwargs): + return get_common_name(base, "-", **kwargs).replace(" ", "-").lower() + + +def zip_cycle(*iterables, empty_default=None): + """Get all combinations of the inputs, cycling over the shorter list(s).""" + cycles = [cycle(i) for i in iterables] + for _ in zip_longest(*iterables): + yield tuple(next(i, empty_default) for i in cycles) + + +def handle_c_ext(c_ext, expansions) -> None: + """Handle c extension option.""" + if c_ext == C_EXTS[0]: + expansions["NO_EXT"] = "1" + + +def get_assume_role(**kwargs): + kwargs.setdefault("command_type", EvgCommandType.SETUP) + kwargs.setdefault("role_arn", "${assume_role_arn}") + return ec2_assume_role(**kwargs) + + +def get_subprocess_exec(**kwargs): + kwargs.setdefault("binary", "bash") + kwargs.setdefault("working_dir", "src") + kwargs.setdefault("command_type", EvgCommandType.TEST) + return subprocess_exec(**kwargs) + + +def get_s3_put(**kwargs): + kwargs["aws_key"] = "${AWS_ACCESS_KEY_ID}" + kwargs["aws_secret"] = "${AWS_SECRET_ACCESS_KEY}" # noqa:S105 + kwargs["aws_session_token"] = "${AWS_SESSION_TOKEN}" # noqa:S105 + kwargs["bucket"] = "${bucket_name}" + kwargs.setdefault("optional", "true") + kwargs.setdefault("permissions", "public-read") + kwargs.setdefault("content_type", "${content_type|application/x-gzip}") + kwargs.setdefault("command_type", EvgCommandType.SETUP) + return s3_put(**kwargs) + + +def generate_yaml(tasks=None, variants=None): + """Generate the yaml for a given set of tasks and variants.""" + project = EvgProject(tasks=tasks, buildvariants=variants) + out = ShrubService.generate_yaml(project) + # Dedent by two spaces to match what we use in config.yml + lines = [line[2:] for line in out.splitlines()] + print("\n".join(lines)) # noqa: T201 + + +################## +# Generate Config +################## + + +def write_variants_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "variants.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("buildvariants:\n") + + for name, func in sorted(getmembers(mod, isfunction)): + if not name.endswith("_variants"): + continue + if not name.startswith("create_"): + raise ValueError("Variant creators must start with create_") + title = name.replace("create_", "").replace("_variants", "").replace("_", " ").capitalize() + project = EvgProject(tasks=None, buildvariants=func()) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +def write_tasks_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "tasks.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("tasks:\n") + + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_tasks"): + continue + if not name.startswith("create_"): + raise ValueError("Task creators must start with create_") + title = name.replace("create_", "").replace("_tasks", "").replace("_", " ").capitalize() + project = EvgProject(tasks=func(), buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +def write_functions_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "functions.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("functions:\n") + + functions = dict() + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_func"): + continue + if not name.startswith("create_"): + raise ValueError("Function creators must start with create_") + title = name.replace("create_", "").replace("_func", "").replace("_", " ").capitalize() + func_name, cmds = func() + functions = dict() + functions[func_name] = cmds + project = EvgProject(functions=functions, tasks=None, buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title}\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") From fafa00e9e3448cf44f1504c907e6400b24612cb8 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Wed, 9 Apr 2025 09:09:42 -0400 Subject: [PATCH 1866/2111] PYTHON-5126 & PYTHON-5280 Addresses issues raised in DRIVERS-3097 and DRIVERS-3123 (#2261) --- bson/binary.py | 15 ++++++++++++ test/bson_binary_vector/packed_bit.json | 21 ++++++++++++----- test/test_bson.py | 8 +++---- test/test_bson_binary_vector.py | 31 +++++++++++++++++-------- 4 files changed, 55 insertions(+), 20 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index 6698e55ccc..48f1f58512 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -462,6 +462,10 @@ def from_vector( raise ValueError(f"{padding=}. It must be in [0,1, ..7].") if padding and not vector: raise ValueError("Empty vector with non-zero padding.") + if padding and not (vector[-1] & ((1 << padding) - 1)) == 0: # type: ignore + raise ValueError( + "If padding p is provided, all bits in the final byte lower than p must be 0." + ) elif dtype == BinaryVectorDtype.FLOAT32: # pack floats as float32 format_str = "f" if padding: @@ -490,6 +494,11 @@ def as_vector(self) -> BinaryVector: dtype = BinaryVectorDtype(dtype) n_values = len(self) - position + if padding and dtype != BinaryVectorDtype.PACKED_BIT: + raise ValueError( + f"Corrupt data. Padding ({padding}) must be 0 for all but PACKED_BIT dtypes. ({dtype=})" + ) + if dtype == BinaryVectorDtype.INT8: dtype_format = "b" format_string = f"<{n_values}{dtype_format}" @@ -513,6 +522,12 @@ def as_vector(self) -> BinaryVector: dtype_format = "B" format_string = f"<{n_values}{dtype_format}" unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) + if padding and not n_values: + raise ValueError("Corrupt data. Vector has a padding P, but no data.") + if padding and n_values and not (unpacked_uint8s[-1] & ((1 << padding) - 1)) == 0: + raise ValueError( + "Corrupt data. Vector has a padding P, but bits in the final byte lower than P are non-zero." + ) return BinaryVector(unpacked_uint8s, dtype, padding) else: diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json index a220e7e318..3015acba66 100644 --- a/test/bson_binary_vector/packed_bit.json +++ b/test/bson_binary_vector/packed_bit.json @@ -21,23 +21,32 @@ "canonical_bson": "1600000005766563746F7200040000000910007F0700" }, { - "description": "Empty Vector PACKED_BIT", + "description": "PACKED_BIT with padding", "valid": true, - "vector": [], + "vector": [127, 8], "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", - "padding": 0, - "canonical_bson": "1400000005766563746F72000200000009100000" + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000910037F0800" }, { - "description": "PACKED_BIT with padding", - "valid": true, + "description": "PACKED_BIT with inconsistent padding", + "valid": false, "vector": [127, 7], "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", "padding": 3, "canonical_bson": "1600000005766563746F7200040000000910037F0700" }, + { + "description": "Empty Vector PACKED_BIT", + "valid": true, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" + }, { "description": "Overflow Vector PACKED_BIT", "valid": false, diff --git a/test/test_bson.py b/test/test_bson.py index 1616c513c2..522945d5f4 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -739,7 +739,7 @@ def test_vector(self): """Tests of subtype 9""" # We start with valid cases, across the 3 dtypes implemented. # Work with a simple vector that can be interpreted as int8, float32, or ubyte - list_vector = [127, 7] + list_vector = [127, 8] # As INT8, vector has length 2 binary_vector = Binary.from_vector(list_vector, BinaryVectorDtype.INT8) vector = binary_vector.as_vector() @@ -764,18 +764,18 @@ def test_vector(self): uncompressed = "" for val in list_vector: uncompressed += format(val, "08b") - assert uncompressed[:-padding] == "0111111100000" + assert uncompressed[:-padding] == "0111111100001" # It is worthwhile explicitly showing the values encoded to BSON padded_doc = {"padded_vec": padded_vec} assert ( encode(padded_doc) - == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x07\x00" + == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x08\x00" ) # and dumped to json assert ( json_util.dumps(padded_doc) - == '{"padded_vec": {"$binary": {"base64": "EAN/Bw==", "subType": "09"}}}' + == '{"padded_vec": {"$binary": {"base64": "EAN/CA==", "subType": "09"}}}' ) # FLOAT32 is also implemented diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py index 9bfdcbfb9a..afe01f42bf 100644 --- a/test/test_bson_binary_vector.py +++ b/test/test_bson_binary_vector.py @@ -48,11 +48,11 @@ def create_test(case_spec): def run_test(self): for test_case in case_spec.get("tests", []): description = test_case["description"] - vector_exp = test_case.get("vector", []) + vector_exp = test_case.get("vector", None) dtype_hex_exp = test_case["dtype_hex"] dtype_alias_exp = test_case.get("dtype_alias") padding_exp = test_case.get("padding", 0) - canonical_bson_exp = test_case.get("canonical_bson") + canonical_bson_exp = test_case.get("canonical_bson", None) # Convert dtype hex string into bytes dtype_exp = BinaryVectorDtype(int(dtype_hex_exp, 16).to_bytes(1, byteorder="little")) @@ -85,14 +85,25 @@ def run_test(self): self.assertEqual(cB_obs, canonical_bson_exp, description) else: - with self.assertRaises((struct.error, ValueError), msg=description): - # Tests Binary.from_vector - Binary.from_vector(vector_exp, dtype_exp, padding_exp) - # Tests Binary.as_vector - cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) - decoded_doc = decode(cB_exp) - binary_obs = decoded_doc[test_key] - binary_obs.as_vector() + """ + #### To prove correct in an invalid case (`valid:false`), one MUST + - if the vector field is present, raise an exception when attempting to encode a document from the numeric values, + dtype, and padding. + - if the canonical_bson field is present, raise an exception when attempting to deserialize it into the corresponding + numeric values, as the field contains corrupted data. + """ + # Tests Binary.from_vector() + if vector_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + Binary.from_vector(vector_exp, dtype_exp, padding_exp) + + # Tests Binary.as_vector() + if canonical_bson_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + binary_obs.as_vector() return run_test From 3c2ce16ad85d96b30009197dbbbdbe123c5a4248 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 9 Apr 2025 15:14:35 -0400 Subject: [PATCH 1867/2111] =?UTF-8?q?PYTHON-5283=20-=20Skip=20test.test=5F?= =?UTF-8?q?monitor.TestMonitor.test=5Fcleanup=5Fexecuto=E2=80=A6=20(#2268)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/asynchronous/test_monitor.py | 1 + test/test_monitor.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/asynchronous/test_monitor.py b/test/asynchronous/test_monitor.py index 195f6f9fac..55a20d7643 100644 --- a/test/asynchronous/test_monitor.py +++ b/test/asynchronous/test_monitor.py @@ -57,6 +57,7 @@ async def create_client(self): await connected(client) return client + @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") async def test_cleanup_executors_on_client_del(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") diff --git a/test/test_monitor.py b/test/test_monitor.py index 25620a99e8..8bcdf7130a 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -57,6 +57,7 @@ def create_client(self): connected(client) return client + @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") def test_cleanup_executors_on_client_del(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") From 86e221eb5cc68f6c6feed399572e40f151154658 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Wed, 9 Apr 2025 18:00:04 -0400 Subject: [PATCH 1868/2111] PYTHON-5288: SRV hostname validation fails when resolver and resolved hostnames are identical with three domain levels (#2272) --- doc/changelog.rst | 16 ++++++++++++++++ pymongo/asynchronous/srv_resolver.py | 6 ++++-- pymongo/synchronous/srv_resolver.py | 6 ++++-- test/asynchronous/test_dns.py | 22 ++++++++++++++++++---- test/test_dns.py | 22 ++++++++++++++++++---- 5 files changed, 60 insertions(+), 12 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 077c85bb4b..3c307564b1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,22 @@ Changelog ========= +Changes in Version 4.12.1 (XXXX/XX/XX) +-------------------------------------- + +Version 4.12.1 is a bug fix release. + +- Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. + +Issues Resolved +............... + +See the `PyMongo 4.12 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.12 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=41916 +.. _PYTHON-5288: https://jira.mongodb.org/browse/PYTHON-5288 + Changes in Version 4.12.0 (2025/04/08) -------------------------------------- diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py index f7c67af3e1..9d1b8fe141 100644 --- a/pymongo/asynchronous/srv_resolver.py +++ b/pymongo/asynchronous/srv_resolver.py @@ -96,6 +96,7 @@ def __init__( except Exception: raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None self.__slen = len(self.__plist) + self.nparts = len(split_fqdn) async def get_options(self) -> Optional[str]: from dns import resolver @@ -137,12 +138,13 @@ async def _get_srv_response_and_hosts( # Validate hosts for node in nodes: - if self.__fqdn == node[0].lower(): + srv_host = node[0].lower() + if self.__fqdn == srv_host and self.nparts < 3: raise ConfigurationError( "Invalid SRV host: return address is identical to SRV hostname" ) try: - nlist = node[0].lower().split(".")[1:][-self.__slen :] + nlist = srv_host.split(".")[1:][-self.__slen :] except Exception: raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None if self.__plist != nlist: diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py index cf7b0842ab..0817c6dcd7 100644 --- a/pymongo/synchronous/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -96,6 +96,7 @@ def __init__( except Exception: raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None self.__slen = len(self.__plist) + self.nparts = len(split_fqdn) def get_options(self) -> Optional[str]: from dns import resolver @@ -137,12 +138,13 @@ def _get_srv_response_and_hosts( # Validate hosts for node in nodes: - if self.__fqdn == node[0].lower(): + srv_host = node[0].lower() + if self.__fqdn == srv_host and self.nparts < 3: raise ConfigurationError( "Invalid SRV host: return address is identical to SRV hostname" ) try: - nlist = node[0].lower().split(".")[1:][-self.__slen :] + nlist = srv_host.split(".")[1:][-self.__slen :] except Exception: raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None if self.__plist != nlist: diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py index 01c8d7b40b..5666612218 100644 --- a/test/asynchronous/test_dns.py +++ b/test/asynchronous/test_dns.py @@ -220,12 +220,15 @@ async def mock_resolve(query, record_type, *args, **kwargs): mock_resolver.side_effect = mock_resolve domain = case["query"].split("._tcp.")[1] connection_string = f"mongodb+srv://{domain}" - try: + if "expected_error" not in case: await parse_uri(connection_string) - except ConfigurationError as e: - self.assertIn(case["expected_error"], str(e)) else: - self.fail(f"ConfigurationError was not raised for query: {case['query']}") + try: + await parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") async def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): with patch("dns.asyncresolver.resolve"): @@ -289,6 +292,17 @@ async def test_4_throw_when_return_address_does_not_contain_dot_separating_share ] await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + async def test_5_when_srv_hostname_has_two_dot_separated_parts_it_is_valid_for_the_returned_hostname_to_be_identical( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.mongodb.com", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + if __name__ == "__main__": unittest.main() diff --git a/test/test_dns.py b/test/test_dns.py index 9360f3f289..8f88562e3f 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -218,12 +218,15 @@ def mock_resolve(query, record_type, *args, **kwargs): mock_resolver.side_effect = mock_resolve domain = case["query"].split("._tcp.")[1] connection_string = f"mongodb+srv://{domain}" - try: + if "expected_error" not in case: parse_uri(connection_string) - except ConfigurationError as e: - self.assertIn(case["expected_error"], str(e)) else: - self.fail(f"ConfigurationError was not raised for query: {case['query']}") + try: + parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): with patch("dns.resolver.resolve"): @@ -287,6 +290,17 @@ def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part ] self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + def test_5_when_srv_hostname_has_two_dot_separated_parts_it_is_valid_for_the_returned_hostname_to_be_identical( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.mongodb.com", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + if __name__ == "__main__": unittest.main() From 5b0862e78ef05cdfe4015c0e81b6596943f102de Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 10 Apr 2025 10:30:09 -0400 Subject: [PATCH 1869/2111] PYTHON-5297 - AsyncMongoClient connection error causes UnboundLocalError (#2273) --- doc/changelog.rst | 7 ++++--- pymongo/pool_shared.py | 2 -- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 3c307564b1..e82804565f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,21 +1,22 @@ Changelog ========= + Changes in Version 4.12.1 (XXXX/XX/XX) -------------------------------------- Version 4.12.1 is a bug fix release. +- Fixed a bug that could raise ``UnboundLocalError`` when creating asynchronous connections over SSL. - Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. Issues Resolved ............... -See the `PyMongo 4.12 release notes in JIRA`_ for the list of resolved issues +See the `PyMongo 4.12.1 release notes in JIRA`_ for the list of resolved issues in this release. -.. _PyMongo 4.12 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=41916 -.. _PYTHON-5288: https://jira.mongodb.org/browse/PYTHON-5288 +.. _PyMongo 4.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43094 Changes in Version 4.12.0 (2025/04/08) -------------------------------------- diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index a46a4d2300..be7c416dc5 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -346,12 +346,10 @@ async def _configured_protocol_interface( ssl=ssl_context, ) except _CertificateError: - transport.abort() # Raise _CertificateError directly like we do after match_hostname # below. raise except (OSError, SSLError) as exc: - transport.abort() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol # mismatch, will be turned into ServerSelectionTimeoutErrors later. From 5b42ed8cacc1f5af6cc2e4d99364256e7a2df51b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Apr 2025 09:31:50 -0500 Subject: [PATCH 1870/2111] PYTHON-5286 Create server version variants (#2270) --- .evergreen/generated_configs/tasks.yml | 282 +++++++++++++++++++- .evergreen/generated_configs/variants.yml | 173 +++++------- .evergreen/scripts/generate_config.py | 71 ++--- .evergreen/scripts/generate_config_utils.py | 5 +- 4 files changed, 377 insertions(+), 154 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index b2b8dc1191..89d8ff8d6c 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -820,19 +820,7 @@ tasks: - name: coverage-report commands: - func: download and merge coverage - depends_on: - - name: .standalone - variant: .coverage_tag - status: "*" - patch_optional: true - - name: .replica_set - variant: .coverage_tag - status: "*" - patch_optional: true - - name: .sharded_cluster - variant: .coverage_tag - status: "*" - patch_optional: true + depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] tags: [coverage] # Doctest tests @@ -8031,6 +8019,274 @@ tasks: - nossl - sync_async + # Server version tests + - name: test-python3.9-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.9" + tags: [server-version, "3.9", sharded_cluster-auth-ssl] + - name: test-python3.10-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + tags: [server-version, "3.10", sharded_cluster-auth-ssl] + - name: test-python3.11-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + tags: [server-version, "3.11", sharded_cluster-auth-ssl] + - name: test-python3.12-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + tags: [server-version, "3.12", sharded_cluster-auth-ssl] + - name: test-python3.13-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + tags: [server-version, "3.13", sharded_cluster-auth-ssl] + - name: test-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + tags: [server-version, pypy3.10, sharded_cluster-auth-ssl] + - name: test-python3.9-auth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.9" + tags: [server-version, "3.9", standalone-auth-ssl] + - name: test-python3.10-auth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.10" + tags: [server-version, "3.10", standalone-auth-nossl] + - name: test-python3.11-noauth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.11" + tags: [server-version, "3.11", standalone-noauth-ssl] + - name: test-python3.12-noauth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.12" + tags: [server-version, "3.12", standalone-noauth-nossl] + - name: test-python3.13-auth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.13" + tags: [server-version, "3.13", replica_set-auth-ssl] + - name: test-pypy3.10-auth-nossl-replica-set + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + PYTHON_VERSION: pypy3.10 + tags: [server-version, pypy3.10, replica_set-auth-nossl] + - name: test-python3.9-noauth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.9" + tags: [server-version, "3.9", replica_set-noauth-ssl] + - name: test-python3.10-noauth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.10" + tags: [server-version, "3.10", replica_set-noauth-nossl] + - name: test-python3.12-auth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + tags: [server-version, "3.12", sharded_cluster-auth-nossl] + - name: test-python3.13-noauth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + tags: [server-version, "3.13", sharded_cluster-noauth-ssl] + - name: test-pypy3.10-noauth-nossl-sharded-cluster + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + tags: [server-version, pypy3.10, sharded_cluster-noauth-nossl] + # Serverless tests - name: test-serverless commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 940c4e9b0c..0aa2ac7454 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -805,114 +805,6 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Server tests - - name: test-rhel8-python3.9-cov-no-c - tasks: - - name: .standalone .sync_async - - name: .replica_set .sync_async - - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 Python3.9 cov No C" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-python3.9-cov - tasks: - - name: .standalone .sync_async - - name: .replica_set .sync_async - - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 Python3.9 cov" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-python3.13-cov-no-c - tasks: - - name: .standalone .sync_async - - name: .replica_set .sync_async - - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 Python3.13 cov No C" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-python3.13-cov - tasks: - - name: .standalone .sync_async - - name: .replica_set .sync_async - - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 Python3.13 cov" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-pypy3.10-cov-no-c - tasks: - - name: .standalone .sync_async - - name: .replica_set .sync_async - - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 PyPy3.10 cov No C" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - NO_EXT: "1" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-pypy3.10-cov - tasks: - - name: .standalone .sync_async - - name: .replica_set .sync_async - - name: .sharded_cluster .sync_async - display_name: "* Test RHEL8 PyPy3.10 cov" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [coverage_tag] - - name: test-rhel8-python3.10 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 Python3.10" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: test-rhel8-python3.11 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 Python3.11" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: test-rhel8-python3.12 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: "* Test RHEL8 Python3.12" - run_on: - - rhel87-small - expansions: - COVERAGE: coverage - PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: test-macos-python3.9 tasks: - name: .sharded_cluster .auth .ssl !.sync_async @@ -1018,6 +910,71 @@ buildvariants: expansions: PYTHON_BINARY: C:/python/32/Python313/python.exe + # Server version tests + - name: mongodb-v4.0 + tasks: + - name: .server-version + display_name: "* MongoDB v4.0" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-v4.2 + tasks: + - name: .server-version + display_name: "* MongoDB v4.2" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-v4.4 + tasks: + - name: .server-version + display_name: "* MongoDB v4.4" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-v5.0 + tasks: + - name: .server-version + display_name: "* MongoDB v5.0" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-v6.0 + tasks: + - name: .server-version + display_name: "* MongoDB v6.0" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-v7.0 + tasks: + - name: .server-version + display_name: "* MongoDB v7.0" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-v8.0 + tasks: + - name: .server-version + display_name: "* MongoDB v8.0" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-rapid + tasks: + - name: .server-version + display_name: "* MongoDB rapid" + run_on: + - rhel87-small + tags: [coverage_tag] + - name: mongodb-latest + tasks: + - name: .server-version + display_name: "* MongoDB latest" + run_on: + - rhel87-small + tags: [coverage_tag] + # Serverless tests - name: serverless-rhel8-python3.9 tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index afa97f3906..2f1e978252 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -66,39 +66,20 @@ def create_ocsp_variants() -> list[BuildVariant]: return variants -def create_server_variants() -> list[BuildVariant]: +def create_server_version_variants() -> list[BuildVariant]: variants = [] - - # Run the full matrix on linux with min and max CPython, and latest pypy. - host = DEFAULT_HOST - # Prefix the display name with an asterisk so it is sorted first. - base_display_name = "* Test" - for python, c_ext in product([*MIN_MAX_PYTHON, PYPYS[-1]], C_EXTS): - expansions = dict(COVERAGE="coverage") - handle_c_ext(c_ext, expansions) - display_name = get_variant_name(base_display_name, host, python=python, **expansions) + for version in ALL_VERSIONS: + display_name = get_variant_name("* MongoDB", version=version) variant = create_variant( - [f".{t} .sync_async" for t in TOPOLOGIES], - display_name, - python=python, - host=host, - tags=["coverage_tag"], - expansions=expansions, + [".server-version"], display_name, host=DEFAULT_HOST, tags=["coverage_tag"] ) variants.append(variant) + return variants - # Test the rest of the pythons. - for python in CPYTHONS[1:-1] + PYPYS[:-1]: - display_name = f"Test {host}" - display_name = get_variant_name(base_display_name, host, python=python) - variant = create_variant( - [f"{t} .sync_async" for t in SUB_TASKS], - display_name, - python=python, - host=host, - expansions=expansions, - ) - variants.append(variant) + +def create_server_variants() -> list[BuildVariant]: + variants = [] + base_display_name = "* Test" # Test a subset on each of the other platforms. for host_name in ("macos", "macos-arm64", "win64", "win32"): @@ -597,6 +578,32 @@ def create_aws_lambda_variants(): ############## +def create_server_version_tasks(): + tasks = [] + # Test all pythons with sharded_cluster, auth, and ssl. + task_types = [(p, "sharded_cluster", "auth", "ssl") for p in ALL_PYTHONS] + # Test all combinations of topology, auth, and ssl, with rotating pythons. + for (topology, auth, ssl), python in zip_cycle( + list(product(TOPOLOGIES, ["auth", "noauth"], ["ssl", "nossl"])), ALL_PYTHONS + ): + # Skip the ones we already have. + if topology == "sharded_cluster" and auth == "auth" and ssl == "ssl": + continue + task_types.append((python, topology, auth, ssl)) + for python, topology, auth, ssl in task_types: + tags = ["server-version", python, f"{topology}-{auth}-{ssl}"] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + if python not in PYPYS: + expansions["COVERAGE"] = "1" + name = get_task_name("test", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_server_tasks(): tasks = [] for topo, version, (auth, ssl), sync in product(TOPOLOGIES, ALL_VERSIONS, AUTH_SSLS, SYNCS): @@ -882,11 +889,11 @@ def create_coverage_report_tasks(): # Instead list out all coverage tasks using tags. # Run the coverage task even if some tasks fail. # Run the coverage task even if some tasks are not scheduled in a patch build. - task_deps = [] - for name in [".standalone", ".replica_set", ".sharded_cluster"]: - task_deps.append( - EvgTaskDependency(name=name, variant=".coverage_tag", status="*", patch_optional=True) + task_deps = [ + EvgTaskDependency( + name=".server-version", variant=".coverage_tag", status="*", patch_optional=True ) + ] cmd = FunctionCall(func="download and merge coverage") return [EvgTask(name=task_name, tags=tags, depends_on=task_deps, commands=[cmd])] diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index a91501cf25..59de5beb70 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -40,8 +40,11 @@ DISPLAY_LOOKUP = dict( ssl=dict(ssl="SSL", nossl="NoSSL"), auth=dict(auth="Auth", noauth="NoAuth"), + topology=dict( + standalone="Standalone", replica_set="Replica Set", sharded_cluster="Sharded Cluster" + ), test_suites=dict(default="Sync", default_async="Async"), - coverage=dict(coverage="cov"), + coverage={"1": "cov"}, no_ext={"1": "No C"}, ) HOSTS = dict() From cce4a0d17923bc5b96a32abba08350b92906bcec Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Apr 2025 09:55:46 -0500 Subject: [PATCH 1871/2111] PYTHON-5295 Update lockfile for compat with older versions of uv (#2271) --- uv.lock | 1 + 1 file changed, 1 insertion(+) diff --git a/uv.lock b/uv.lock index aa23663a84..6bc0839795 100644 --- a/uv.lock +++ b/uv.lock @@ -998,6 +998,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5 [[package]] name = "pymongo" +version = "4.13.0.dev0" source = { editable = "." } dependencies = [ { name = "dnspython" }, From 7a0afcf0b9432f0f01ccfae428aa98ffff4a7656 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 10 Apr 2025 11:08:23 -0400 Subject: [PATCH 1872/2111] PYTHON-5302 - Run ruff before synchro in pre-commit hooks (#2274) --- .pre-commit-config.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a570e55ad1..6c5fa764b9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,6 +18,14 @@ repos: exclude: .patch exclude_types: [json] +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.3 + hooks: + - id: ruff + args: ["--fix", "--show-fixes"] + - id: ruff-format + - repo: local hooks: - id: synchro @@ -30,14 +38,6 @@ repos: - ruff==0.1.3 - unasync -- repo: https://github.com/astral-sh/ruff-pre-commit - # Ruff version. - rev: v0.1.3 - hooks: - - id: ruff - args: ["--fix", "--show-fixes"] - - id: ruff-format - - repo: https://github.com/adamchainz/blacken-docs rev: "1.16.0" hooks: From 7ec9c07081fdff3b5155e39a5200092a835b89c4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 11 Apr 2025 16:20:08 -0500 Subject: [PATCH 1873/2111] PYTHON-5303 Add missing gridfs synchronous init file (#2279) --- gridfs/synchronous/__init__.py | 42 ++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 gridfs/synchronous/__init__.py diff --git a/gridfs/synchronous/__init__.py b/gridfs/synchronous/__init__.py new file mode 100644 index 0000000000..bc2704364b --- /dev/null +++ b/gridfs/synchronous/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GridFS is a specification for storing large objects in Mongo. + +The :mod:`gridfs` package is an implementation of GridFS on top of +:mod:`pymongo`, exposing a file-like interface. + +.. seealso:: The MongoDB documentation on `gridfs `_. +""" +from __future__ import annotations + +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE +from gridfs.synchronous.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutCursor, +) + +__all__ = [ + "GridFS", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "GridIn", + "GridOut", + "GridOutCursor", +] From a8197a792e3f04c24301a3725db9733ec5e5dbf1 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 11 Apr 2025 14:58:03 -0700 Subject: [PATCH 1874/2111] PYTHON-5308 Remove SON from doc examples (#2280) --- test/asynchronous/test_examples.py | 44 ++++++++++++------------------ test/test_examples.py | 44 ++++++++++++------------------ 2 files changed, 36 insertions(+), 52 deletions(-) diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py index 9e9b208f51..a334a3ed1d 100644 --- a/test/asynchronous/test_examples.py +++ b/test/asynchronous/test_examples.py @@ -182,40 +182,36 @@ async def test_query_embedded_documents(self): db = self.db # Start Example 14 - # Subdocument key order matters in a few of these examples so we have - # to use bson.son.SON instead of a Python dict. - from bson.son import SON - await db.inventory.insert_many( [ { "item": "journal", "qty": 25, - "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "size": {"h": 14, "w": 21, "uom": "cm"}, "status": "A", }, { "item": "notebook", "qty": 50, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "A", }, { "item": "paper", "qty": 100, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "D", }, { "item": "planner", "qty": 75, - "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "size": {"h": 22.85, "w": 30, "uom": "cm"}, "status": "D", }, { "item": "postcard", "qty": 45, - "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "size": {"h": 10, "w": 15.25, "uom": "cm"}, "status": "A", }, ] @@ -223,13 +219,13 @@ async def test_query_embedded_documents(self): # End Example 14 # Start Example 15 - cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + cursor = db.inventory.find({"size": {"h": 14, "w": 21, "uom": "cm"}}) # End Example 15 self.assertEqual(len(await cursor.to_list()), 1) # Start Example 16 - cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + cursor = db.inventory.find({"size": {"w": 21, "h": 14, "uom": "cm"}}) # End Example 16 self.assertEqual(len(await cursor.to_list()), 0) @@ -324,39 +320,35 @@ async def test_query_array_of_documents(self): db = self.db # Start Example 29 - # Subdocument key order matters in a few of these examples so we have - # to use bson.son.SON instead of a Python dict. - from bson.son import SON - await db.inventory.insert_many( [ { "item": "journal", "instock": [ - SON([("warehouse", "A"), ("qty", 5)]), - SON([("warehouse", "C"), ("qty", 15)]), + {"warehouse": "A", "qty": 5}, + {"warehouse": "C", "qty": 15}, ], }, - {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + {"item": "notebook", "instock": [{"warehouse": "C", "qty": 5}]}, { "item": "paper", "instock": [ - SON([("warehouse", "A"), ("qty", 60)]), - SON([("warehouse", "B"), ("qty", 15)]), + {"warehouse": "A", "qty": 60}, + {"warehouse": "B", "qty": 15}, ], }, { "item": "planner", "instock": [ - SON([("warehouse", "A"), ("qty", 40)]), - SON([("warehouse", "B"), ("qty", 5)]), + {"warehouse": "A", "qty": 40}, + {"warehouse": "B", "qty": 5}, ], }, { "item": "postcard", "instock": [ - SON([("warehouse", "B"), ("qty", 15)]), - SON([("warehouse", "C"), ("qty", 35)]), + {"warehouse": "B", "qty": 15}, + {"warehouse": "C", "qty": 35}, ], }, ] @@ -364,13 +356,13 @@ async def test_query_array_of_documents(self): # End Example 29 # Start Example 30 - cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) + cursor = db.inventory.find({"instock": {"warehouse": "A", "qty": 5}}) # End Example 30 self.assertEqual(len(await cursor.to_list()), 1) # Start Example 31 - cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) + cursor = db.inventory.find({"instock": {"qty": 5, "warehouse": "A"}}) # End Example 31 self.assertEqual(len(await cursor.to_list()), 0) diff --git a/test/test_examples.py b/test/test_examples.py index 28fe1beaff..0585d1e057 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -182,40 +182,36 @@ def test_query_embedded_documents(self): db = self.db # Start Example 14 - # Subdocument key order matters in a few of these examples so we have - # to use bson.son.SON instead of a Python dict. - from bson.son import SON - db.inventory.insert_many( [ { "item": "journal", "qty": 25, - "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "size": {"h": 14, "w": 21, "uom": "cm"}, "status": "A", }, { "item": "notebook", "qty": 50, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "A", }, { "item": "paper", "qty": 100, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "D", }, { "item": "planner", "qty": 75, - "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "size": {"h": 22.85, "w": 30, "uom": "cm"}, "status": "D", }, { "item": "postcard", "qty": 45, - "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "size": {"h": 10, "w": 15.25, "uom": "cm"}, "status": "A", }, ] @@ -223,13 +219,13 @@ def test_query_embedded_documents(self): # End Example 14 # Start Example 15 - cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + cursor = db.inventory.find({"size": {"h": 14, "w": 21, "uom": "cm"}}) # End Example 15 self.assertEqual(len(cursor.to_list()), 1) # Start Example 16 - cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + cursor = db.inventory.find({"size": {"w": 21, "h": 14, "uom": "cm"}}) # End Example 16 self.assertEqual(len(cursor.to_list()), 0) @@ -324,39 +320,35 @@ def test_query_array_of_documents(self): db = self.db # Start Example 29 - # Subdocument key order matters in a few of these examples so we have - # to use bson.son.SON instead of a Python dict. - from bson.son import SON - db.inventory.insert_many( [ { "item": "journal", "instock": [ - SON([("warehouse", "A"), ("qty", 5)]), - SON([("warehouse", "C"), ("qty", 15)]), + {"warehouse": "A", "qty": 5}, + {"warehouse": "C", "qty": 15}, ], }, - {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + {"item": "notebook", "instock": [{"warehouse": "C", "qty": 5}]}, { "item": "paper", "instock": [ - SON([("warehouse", "A"), ("qty", 60)]), - SON([("warehouse", "B"), ("qty", 15)]), + {"warehouse": "A", "qty": 60}, + {"warehouse": "B", "qty": 15}, ], }, { "item": "planner", "instock": [ - SON([("warehouse", "A"), ("qty", 40)]), - SON([("warehouse", "B"), ("qty", 5)]), + {"warehouse": "A", "qty": 40}, + {"warehouse": "B", "qty": 5}, ], }, { "item": "postcard", "instock": [ - SON([("warehouse", "B"), ("qty", 15)]), - SON([("warehouse", "C"), ("qty", 35)]), + {"warehouse": "B", "qty": 15}, + {"warehouse": "C", "qty": 35}, ], }, ] @@ -364,13 +356,13 @@ def test_query_array_of_documents(self): # End Example 29 # Start Example 30 - cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) + cursor = db.inventory.find({"instock": {"warehouse": "A", "qty": 5}}) # End Example 30 self.assertEqual(len(cursor.to_list()), 1) # Start Example 31 - cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) + cursor = db.inventory.find({"instock": {"qty": 5, "warehouse": "A"}}) # End Example 31 self.assertEqual(len(cursor.to_list()), 0) From 5d14b3458eff5292e04eb3fd6974e73b03d767dd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Apr 2025 08:09:08 -0500 Subject: [PATCH 1875/2111] PYTHON-5304 Create standard non-linux tests (#2275) --- .evergreen/config.yml | 4 +- .evergreen/generated_configs/tasks.yml | 630 +++++++++++++++++++- .evergreen/generated_configs/variants.yml | 142 ++--- .evergreen/scripts/generate_config.py | 59 +- .evergreen/scripts/generate_config_utils.py | 3 +- .evergreen/scripts/setup_tests.py | 1 + .evergreen/utils.sh | 6 +- 7 files changed, 704 insertions(+), 141 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index a1d6284713..28f8bcfcca 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -66,7 +66,7 @@ functions: params: binary: bash working_dir: "src" - include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, PYTHON_VERSION, + include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, PYTHON_VERSION, IS_WIN32, STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER, LOCAL_ATLAS, NO_EXT] args: [.evergreen/just.sh, run-server, "${TEST_NAME}"] - command: expansions.update @@ -87,7 +87,7 @@ functions: type: test params: include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, - AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, PYTHON_VERSION, + AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, PYTHON_VERSION, IS_WIN32, DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG, ORCHESTRATION_FILE, OCSP_SERVER_TYPE, VERSION, REQUIRE_FIPS] binary: bash diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 89d8ff8d6c..8d80b113a3 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -8035,7 +8035,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.9" - tags: [server-version, "3.9", sharded_cluster-auth-ssl] + tags: [server-version, python-3.9, sharded_cluster-auth-ssl] - name: test-python3.10-auth-ssl-sharded-cluster-cov commands: - func: run server @@ -8051,7 +8051,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.10" - tags: [server-version, "3.10", sharded_cluster-auth-ssl] + tags: [server-version, python-3.10, sharded_cluster-auth-ssl] - name: test-python3.11-auth-ssl-sharded-cluster-cov commands: - func: run server @@ -8067,7 +8067,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.11" - tags: [server-version, "3.11", sharded_cluster-auth-ssl] + tags: [server-version, python-3.11, sharded_cluster-auth-ssl] - name: test-python3.12-auth-ssl-sharded-cluster-cov commands: - func: run server @@ -8083,7 +8083,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.12" - tags: [server-version, "3.12", sharded_cluster-auth-ssl] + tags: [server-version, python-3.12, sharded_cluster-auth-ssl] - name: test-python3.13-auth-ssl-sharded-cluster-cov commands: - func: run server @@ -8099,7 +8099,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.13" - tags: [server-version, "3.13", sharded_cluster-auth-ssl] + tags: [server-version, python-3.13, sharded_cluster-auth-ssl] - name: test-pypy3.10-auth-ssl-sharded-cluster commands: - func: run server @@ -8113,7 +8113,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster PYTHON_VERSION: pypy3.10 - tags: [server-version, pypy3.10, sharded_cluster-auth-ssl] + tags: [server-version, python-pypy3.10, sharded_cluster-auth-ssl] - name: test-python3.9-auth-ssl-standalone-cov commands: - func: run server @@ -8129,7 +8129,7 @@ tasks: TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.9" - tags: [server-version, "3.9", standalone-auth-ssl] + tags: [server-version, python-3.9, standalone-auth-ssl] - name: test-python3.10-auth-nossl-standalone-cov commands: - func: run server @@ -8145,7 +8145,7 @@ tasks: TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.10" - tags: [server-version, "3.10", standalone-auth-nossl] + tags: [server-version, python-3.10, standalone-auth-nossl] - name: test-python3.11-noauth-ssl-standalone-cov commands: - func: run server @@ -8161,7 +8161,7 @@ tasks: TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.11" - tags: [server-version, "3.11", standalone-noauth-ssl] + tags: [server-version, python-3.11, standalone-noauth-ssl] - name: test-python3.12-noauth-nossl-standalone-cov commands: - func: run server @@ -8177,7 +8177,7 @@ tasks: TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.12" - tags: [server-version, "3.12", standalone-noauth-nossl] + tags: [server-version, python-3.12, standalone-noauth-nossl] - name: test-python3.13-auth-ssl-replica-set-cov commands: - func: run server @@ -8193,7 +8193,7 @@ tasks: TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.13" - tags: [server-version, "3.13", replica_set-auth-ssl] + tags: [server-version, python-3.13, replica_set-auth-ssl] - name: test-pypy3.10-auth-nossl-replica-set commands: - func: run server @@ -8207,7 +8207,7 @@ tasks: SSL: nossl TOPOLOGY: replica_set PYTHON_VERSION: pypy3.10 - tags: [server-version, pypy3.10, replica_set-auth-nossl] + tags: [server-version, python-pypy3.10, replica_set-auth-nossl] - name: test-python3.9-noauth-ssl-replica-set-cov commands: - func: run server @@ -8223,7 +8223,7 @@ tasks: TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.9" - tags: [server-version, "3.9", replica_set-noauth-ssl] + tags: [server-version, python-3.9, replica_set-noauth-ssl] - name: test-python3.10-noauth-nossl-replica-set-cov commands: - func: run server @@ -8239,7 +8239,7 @@ tasks: TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.10" - tags: [server-version, "3.10", replica_set-noauth-nossl] + tags: [server-version, python-3.10, replica_set-noauth-nossl] - name: test-python3.12-auth-nossl-sharded-cluster-cov commands: - func: run server @@ -8255,7 +8255,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.12" - tags: [server-version, "3.12", sharded_cluster-auth-nossl] + tags: [server-version, python-3.12, sharded_cluster-auth-nossl] - name: test-python3.13-noauth-ssl-sharded-cluster-cov commands: - func: run server @@ -8271,7 +8271,7 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.13" - tags: [server-version, "3.13", sharded_cluster-noauth-ssl] + tags: [server-version, python-3.13, sharded_cluster-noauth-ssl] - name: test-pypy3.10-noauth-nossl-sharded-cluster commands: - func: run server @@ -8285,7 +8285,7 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster PYTHON_VERSION: pypy3.10 - tags: [server-version, pypy3.10, sharded_cluster-noauth-nossl] + tags: [server-version, python-pypy3.10, sharded_cluster-noauth-nossl] # Serverless tests - name: test-serverless @@ -8296,3 +8296,599 @@ tasks: AUTH: auth SSL: ssl tags: [serverless] + + # Standard non linux tests + - name: test-v4.0-python3.9-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-4.0 + - python-3.9 + - standalone-noauth-nossl + - sync + - name: test-v4.0-python3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-4.0 + - python-3.10 + - replica_set-noauth-ssl + - async + - name: test-v4.0-python3.11-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-4.0 + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-v4.2-python3.12-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-4.2 + - python-3.12 + - standalone-noauth-nossl + - async + - name: test-v4.2-python3.13-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-4.2 + - python-3.13 + - replica_set-noauth-ssl + - sync + - name: test-v4.2-python3.9-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.9" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-4.2 + - python-3.9 + - sharded_cluster-auth-ssl + - async + - name: test-v4.4-python3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-4.4 + - python-3.10 + - standalone-noauth-nossl + - sync + - name: test-v4.4-python3.11-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-4.4 + - python-3.11 + - replica_set-noauth-ssl + - async + - name: test-v4.4-python3.12-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-4.4 + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-v5.0-python3.13-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-5.0 + - python-3.13 + - standalone-noauth-nossl + - async + - name: test-v5.0-python3.9-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-5.0 + - python-3.9 + - replica_set-noauth-ssl + - sync + - name: test-v5.0-python3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-5.0 + - python-3.10 + - sharded_cluster-auth-ssl + - async + - name: test-v6.0-python3.11-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-6.0 + - python-3.11 + - standalone-noauth-nossl + - sync + - name: test-v6.0-python3.12-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-6.0 + - python-3.12 + - replica_set-noauth-ssl + - async + - name: test-v6.0-python3.13-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-6.0 + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-v7.0-python3.9-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-7.0 + - python-3.9 + - standalone-noauth-nossl + - async + - name: test-v7.0-python3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-7.0 + - python-3.10 + - replica_set-noauth-ssl + - sync + - name: test-v7.0-python3.11-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-7.0 + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-v8.0-python3.12-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-8.0 + - python-3.12 + - standalone-noauth-nossl + - sync + - name: test-v8.0-python3.13-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-8.0 + - python-3.13 + - replica_set-noauth-ssl + - async + - name: test-v8.0-python3.9-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-8.0 + - python-3.9 + - sharded_cluster-auth-ssl + - sync + - name: test-rapid-python3.10-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-rapid + - python-3.10 + - standalone-noauth-nossl + - async + - name: test-rapid-python3.11-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-rapid + - python-3.11 + - replica_set-noauth-ssl + - sync + - name: test-rapid-python3.12-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-rapid + - python-3.12 + - sharded_cluster-auth-ssl + - async + - name: test-latest-python3.13-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-latest + - python-3.13 + - standalone-noauth-nossl + - sync + - name: test-latest-python3.9-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: "3.9" + TEST_NAME: default_async + tags: + - standard-non-linux + - server-latest + - python-3.9 + - replica_set-noauth-ssl + - async + - name: test-latest-python3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - standard-non-linux + - server-latest + - python-3.10 + - sharded_cluster-auth-ssl + - sync diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 0aa2ac7454..0ad366dbea 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -804,112 +804,6 @@ buildvariants: expansions: PYTHON_BINARY: /opt/python/3.9/bin/python3 - # Server tests - - name: test-macos-python3.9 - tasks: - - name: .sharded_cluster .auth .ssl !.sync_async - - name: .replica_set .noauth .ssl !.sync_async - - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test macOS Python3.9" - run_on: - - macos-14 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl !.sync_async - - name: .replica_set .noauth .ssl !.sync_async - - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test macOS Python3.13" - run_on: - - macos-14 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-macos-arm64-python3.9 - tasks: - - name: .sharded_cluster .auth .ssl .6.0 !.sync_async - - name: .replica_set .noauth .ssl .6.0 !.sync_async - - name: .standalone .noauth .nossl .6.0 !.sync_async - - name: .sharded_cluster .auth .ssl .7.0 !.sync_async - - name: .replica_set .noauth .ssl .7.0 !.sync_async - - name: .standalone .noauth .nossl .7.0 !.sync_async - - name: .sharded_cluster .auth .ssl .8.0 !.sync_async - - name: .replica_set .noauth .ssl .8.0 !.sync_async - - name: .standalone .noauth .nossl .8.0 !.sync_async - - name: .sharded_cluster .auth .ssl .rapid !.sync_async - - name: .replica_set .noauth .ssl .rapid !.sync_async - - name: .standalone .noauth .nossl .rapid !.sync_async - - name: .sharded_cluster .auth .ssl .latest !.sync_async - - name: .replica_set .noauth .ssl .latest !.sync_async - - name: .standalone .noauth .nossl .latest !.sync_async - display_name: "* Test macOS Arm64 Python3.9" - run_on: - - macos-14-arm64 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: test-macos-arm64-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl .6.0 !.sync_async - - name: .replica_set .noauth .ssl .6.0 !.sync_async - - name: .standalone .noauth .nossl .6.0 !.sync_async - - name: .sharded_cluster .auth .ssl .7.0 !.sync_async - - name: .replica_set .noauth .ssl .7.0 !.sync_async - - name: .standalone .noauth .nossl .7.0 !.sync_async - - name: .sharded_cluster .auth .ssl .8.0 !.sync_async - - name: .replica_set .noauth .ssl .8.0 !.sync_async - - name: .standalone .noauth .nossl .8.0 !.sync_async - - name: .sharded_cluster .auth .ssl .rapid !.sync_async - - name: .replica_set .noauth .ssl .rapid !.sync_async - - name: .standalone .noauth .nossl .rapid !.sync_async - - name: .sharded_cluster .auth .ssl .latest !.sync_async - - name: .replica_set .noauth .ssl .latest !.sync_async - - name: .standalone .noauth .nossl .latest !.sync_async - display_name: "* Test macOS Arm64 Python3.13" - run_on: - - macos-14-arm64 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - - name: test-win64-python3.9 - tasks: - - name: .sharded_cluster .auth .ssl !.sync_async - - name: .replica_set .noauth .ssl !.sync_async - - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win64 Python3.9" - run_on: - - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/Python39/python.exe - - name: test-win64-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl !.sync_async - - name: .replica_set .noauth .ssl !.sync_async - - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win64 Python3.13" - run_on: - - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/Python313/python.exe - - name: test-win32-python3.9 - tasks: - - name: .sharded_cluster .auth .ssl !.sync_async - - name: .replica_set .noauth .ssl !.sync_async - - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win32 Python3.9" - run_on: - - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/32/Python39/python.exe - - name: test-win32-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl !.sync_async - - name: .replica_set .noauth .ssl !.sync_async - - name: .standalone .noauth .nossl !.sync_async - display_name: "* Test Win32 Python3.13" - run_on: - - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/32/Python313/python.exe - # Server version tests - name: mongodb-v4.0 tasks: @@ -1063,6 +957,42 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [versionedApi_tag] + # Standard nonlinux tests + - name: test-macos + tasks: + - name: .standard-non-linux + display_name: "* Test macOS" + run_on: + - macos-14 + tags: [standard-non-linux] + - name: test-macos-arm64 + tasks: + - name: .standard-non-linux .server-6.0 + - name: .standard-non-linux .server-7.0 + - name: .standard-non-linux .server-8.0 + - name: .standard-non-linux .server-rapid + - name: .standard-non-linux .server-latest + display_name: "* Test macOS Arm64" + run_on: + - macos-14-arm64 + tags: [standard-non-linux] + - name: test-win64 + tasks: + - name: .standard-non-linux + display_name: "* Test Win64" + run_on: + - windows-64-vsMulti-small + tags: [standard-non-linux] + - name: test-win32 + tasks: + - name: .standard-non-linux + display_name: "* Test Win32" + run_on: + - windows-64-vsMulti-small + expansions: + IS_WIN32: "1" + tags: [standard-non-linux] + # Storage engine tests - name: storage-inmemory-rhel8-python3.9 tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 2f1e978252..1aea20c8f3 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -77,23 +77,26 @@ def create_server_version_variants() -> list[BuildVariant]: return variants -def create_server_variants() -> list[BuildVariant]: +def create_standard_nonlinux_variants() -> list[BuildVariant]: variants = [] base_display_name = "* Test" # Test a subset on each of the other platforms. for host_name in ("macos", "macos-arm64", "win64", "win32"): - for python in MIN_MAX_PYTHON: - tasks = [f"{t} !.sync_async" for t in SUB_TASKS] - # MacOS arm64 only works on server versions 6.0+ - if host_name == "macos-arm64": - tasks = [] - for version in get_versions_from("6.0"): - tasks.extend(f"{t} .{version} !.sync_async" for t in SUB_TASKS) - host = HOSTS[host_name] - display_name = get_variant_name(base_display_name, host, python=python) - variant = create_variant(tasks, display_name, python=python, host=host) - variants.append(variant) + tasks = [".standard-non-linux"] + # MacOS arm64 only works on server versions 6.0+ + if host_name == "macos-arm64": + tasks = [ + f".standard-non-linux .server-{version}" for version in get_versions_from("6.0") + ] + host = HOSTS[host_name] + tags = ["standard-non-linux"] + expansions = dict() + if host_name == "win32": + expansions["IS_WIN32"] = "1" + display_name = get_variant_name(base_display_name, host) + variant = create_variant(tasks, display_name, host=host, tags=tags, expansions=expansions) + variants.append(variant) return variants @@ -591,7 +594,7 @@ def create_server_version_tasks(): continue task_types.append((python, topology, auth, ssl)) for python, topology, auth, ssl in task_types: - tags = ["server-version", python, f"{topology}-{auth}-{ssl}"] + tags = ["server-version", f"python-{python}", f"{topology}-{auth}-{ssl}"] expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) if python not in PYPYS: expansions["COVERAGE"] = "1" @@ -604,9 +607,37 @@ def create_server_version_tasks(): return tasks +def create_standard_non_linux_tasks(): + tasks = [] + + for (version, topology), python, sync in zip_cycle( + list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS, SYNCS + ): + auth = "auth" if topology == "sharded_cluster" else "noauth" + ssl = "nossl" if topology == "standalone" else "ssl" + tags = [ + "standard-non-linux", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + sync, + ] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test", python=python, sync=sync, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_server_tasks(): tasks = [] - for topo, version, (auth, ssl), sync in product(TOPOLOGIES, ALL_VERSIONS, AUTH_SSLS, SYNCS): + for topo, version, (auth, ssl), sync in product( + TOPOLOGIES, ALL_VERSIONS, AUTH_SSLS, [*SYNCS, "sync_async"] + ): name = f"test-{version}-{topo}-{auth}-{ssl}-{sync}".lower() tags = [version, topo, auth, ssl, sync] server_vars = dict( diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 59de5beb70..aa43af9a68 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -36,7 +36,7 @@ ".replica_set .noauth .ssl", ".standalone .noauth .nossl", ] -SYNCS = ["sync", "async", "sync_async"] +SYNCS = ["sync", "async"] DISPLAY_LOOKUP = dict( ssl=dict(ssl="SSL", nossl="NoSSL"), auth=dict(auth="Auth", noauth="NoAuth"), @@ -44,6 +44,7 @@ standalone="Standalone", replica_set="Replica Set", sharded_cluster="Sharded Cluster" ), test_suites=dict(default="Sync", default_async="Async"), + sync={"sync": "Sync", "async": "Async"}, coverage={"1": "cov"}, no_ext={"1": "No C"}, ) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 2fa1fc47fc..9b657507c1 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -34,6 +34,7 @@ "PYTHON_BINARY", "PYTHON_VERSION", "REQUIRE_FIPS", + "IS_WIN32", ] # Map the test name to test extra. diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index faecde05fd..354d18dbf7 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -123,7 +123,11 @@ get_python_binary() { PYTHON="/Library/Frameworks/Python.Framework/Versions/$version/bin/python3" elif [ "Windows_NT" = "${OS:-}" ]; then version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g') - PYTHON="C:/python/Python$version/python.exe" + if [ -n "${IS_WIN32:-}" ]; then + PYTHON="C:/python/32/Python$version/python.exe" + else + PYTHON="C:/python/Python$version/python.exe" + fi else PYTHON="/opt/python/$version/bin/python3" fi From 3723edc199a45e9dd0012ce449671eddedae40ee Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Apr 2025 09:35:37 -0500 Subject: [PATCH 1876/2111] PYTHON-5277 Convert remaining Evergreen functions to generated config (#2281) --- .evergreen/config.yml | 108 ------------- .evergreen/generated_configs/functions.yml | 168 +++++++++++++++++++++ .evergreen/scripts/generate_config.py | 115 ++++++++++++++ .pre-commit-config.yaml | 2 +- 4 files changed, 284 insertions(+), 109 deletions(-) diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 28f8bcfcca..46c86103ad 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -29,114 +29,6 @@ include: - filename: .evergreen/generated_configs/tasks.yml - filename: .evergreen/generated_configs/variants.yml -functions: - "fetch source": - # Executes clone and applies the submitted patch, if any - - command: git.get_project - params: - directory: "src" - # Applies the subitted patch, if any - # Deprecated. Should be removed. But still needed for certain agents (ZAP) - - command: git.apply_patch - - "setup system": - # Make an evergreen expansion file with dynamic values - - command: subprocess.exec - params: - include_expansions_in_env: ["is_patch", "project", "version_id"] - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/setup-system.sh - # Load the expansion file to make an evergreen variable with the current unique version - - command: expansions.update - params: - file: src/expansion.yml - - "upload test results": - - command: attach.results - params: - file_location: "${DRIVERS_TOOLS}/results.json" - - command: attach.xunit_results - params: - file: "src/xunit-results/TEST-*.xml" - - "run server": - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - include_expansions_in_env: [VERSION, TOPOLOGY, AUTH, SSL, ORCHESTRATION_FILE, PYTHON_BINARY, PYTHON_VERSION, IS_WIN32, - STORAGE_ENGINE, REQUIRE_API_VERSION, DRIVERS_TOOLS, TEST_CRYPT_SHARED, AUTH_AWS, LOAD_BALANCER, LOCAL_ATLAS, NO_EXT] - args: [.evergreen/just.sh, run-server, "${TEST_NAME}"] - - command: expansions.update - params: - file: ${DRIVERS_TOOLS}/mo-expansion.yml - - "run just script": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] - binary: bash - working_dir: "src" - args: [.evergreen/just.sh, "${JUSTFILE_TARGET}"] - - "run tests": - - command: subprocess.exec - type: test - params: - include_expansions_in_env: [AUTH, SSL, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, - AWS_SESSION_TOKEN, COVERAGE, PYTHON_BINARY, LIBMONGOCRYPT_URL, MONGODB_URI, PYTHON_VERSION, IS_WIN32, - DISABLE_TEST_COMMANDS, GREEN_FRAMEWORK, NO_EXT, COMPRESSORS, MONGODB_API_VERSION, DEBUG_LOG, - ORCHESTRATION_FILE, OCSP_SERVER_TYPE, VERSION, REQUIRE_FIPS] - binary: bash - working_dir: "src" - args: [.evergreen/just.sh, setup-tests, "${TEST_NAME}", "${SUB_TEST_NAME}"] - - command: subprocess.exec - type: test - params: - working_dir: "src" - binary: bash - args: [.evergreen/just.sh, run-tests] - - "cleanup": - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - args: - - .evergreen/scripts/cleanup.sh - - "teardown system": - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - args: [.evergreen/just.sh, teardown-tests] - - command: subprocess.exec - params: - binary: bash - working_dir: "src" - args: - - ${DRIVERS_TOOLS}/.evergreen/teardown.sh - - "assume ec2 role": - - command: ec2.assume_role - params: - role_arn: ${aws_test_secrets_role} - duration_seconds: 3600 - - "attach benchmark test results": - - command: attach.results - params: - file_location: src/report.json - - "send dashboard data": - - command: perf.send - params: - file: src/results.json - pre: - func: "fetch source" - func: "setup system" diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index afd7f11374..bc8942efc5 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -1,4 +1,27 @@ functions: + # Assume ec2 role + assume ec2 role: + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + duration_seconds: 3600 + + # Attach benchmark test results + attach benchmark test results: + - command: attach.results + params: + file_location: src/report.json + + # Cleanup + cleanup: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/cleanup.sh + working_dir: src + type: test + # Download and merge coverage download and merge coverage: - command: ec2.assume_role @@ -56,6 +79,142 @@ functions: optional: "true" type: setup + # Fetch source + fetch source: + - command: git.get_project + params: + directory: src + + # Run just script + run just script: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - ${JUSTFILE_TARGET} + working_dir: src + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + + # Run server + run server: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - run-server + - ${TEST_NAME} + working_dir: src + include_expansions_in_env: + - VERSION + - TOPOLOGY + - AUTH + - SSL + - ORCHESTRATION_FILE + - PYTHON_BINARY + - PYTHON_VERSION + - STORAGE_ENGINE + - REQUIRE_API_VERSION + - DRIVERS_TOOLS + - TEST_CRYPT_SHARED + - AUTH_AWS + - LOAD_BALANCER + - LOCAL_ATLAS + - NO_EXT + type: test + - command: expansions.update + params: + file: ${DRIVERS_TOOLS}/mo-expansion.yml + + # Run tests + run tests: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - setup-tests + - ${TEST_NAME} + - ${SUB_TEST_NAME} + working_dir: src + include_expansions_in_env: + - AUTH + - SSL + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + - COVERAGE + - PYTHON_BINARY + - LIBMONGOCRYPT_URL + - MONGODB_URI + - PYTHON_VERSION + - DISABLE_TEST_COMMANDS + - GREEN_FRAMEWORK + - NO_EXT + - COMPRESSORS + - MONGODB_API_VERSION + - DEBUG_LOG + - ORCHESTRATION_FILE + - OCSP_SERVER_TYPE + - VERSION + - IS_WIN32 + - REQUIRE_FIPS + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - run-tests + working_dir: src + type: test + + # Send dashboard data + send dashboard data: + - command: perf.send + params: + file: src/results.json + + # Setup system + setup system: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/setup-system.sh + working_dir: src + include_expansions_in_env: + - is_patch + - project + - version_id + type: test + - command: expansions.update + params: + file: src/expansion.yml + + # Teardown system + teardown system: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - teardown-tests + working_dir: src + type: test + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/teardown.sh + working_dir: src + type: test + # Upload coverage upload coverage: - command: ec2.assume_role @@ -115,3 +274,12 @@ functions: display_name: drivers-tools-logs.tar.gz optional: "true" type: setup + + # Upload test results + upload test results: + - command: attach.results + params: + file_location: ${DRIVERS_TOOLS}/results.json + - command: attach.xunit_results + params: + file: src/xunit-results/TEST-*.xml diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 1aea20c8f3..e99a9a3980 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -37,6 +37,12 @@ from shrub.v3.evg_command import ( FunctionCall, archive_targz_pack, + attach_results, + attach_xunit_results, + ec2_assume_role, + expansions_update, + git_get_project, + perf_send, ) from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef @@ -1097,6 +1103,115 @@ def create_upload_mo_artifacts_func(): return "upload mo artifacts", cmds +def create_fetch_source_func(): + # Executes clone and applies the submitted patch, if any. + cmd = git_get_project(directory="src") + return "fetch source", [cmd] + + +def create_setup_system_func(): + # Make an evergreen expansion file with dynamic values. + includes = ["is_patch", "project", "version_id"] + args = [".evergreen/scripts/setup-system.sh"] + setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + # Load the expansion file to make an evergreen variable with the current unique version. + expansion_cmd = expansions_update(file="src/expansion.yml") + return "setup system", [setup_cmd, expansion_cmd] + + +def create_upload_test_results_func(): + results_cmd = attach_results(file_location="${DRIVERS_TOOLS}/results.json") + xresults_cmd = attach_xunit_results(file="src/xunit-results/TEST-*.xml") + return "upload test results", [results_cmd, xresults_cmd] + + +def create_run_server_func(): + includes = [ + "VERSION", + "TOPOLOGY", + "AUTH", + "SSL", + "ORCHESTRATION_FILE", + "PYTHON_BINARY", + "PYTHON_VERSION", + "STORAGE_ENGINE", + "REQUIRE_API_VERSION", + "DRIVERS_TOOLS", + "TEST_CRYPT_SHARED", + "AUTH_AWS", + "LOAD_BALANCER", + "LOCAL_ATLAS", + "NO_EXT", + ] + args = [".evergreen/just.sh", "run-server", "${TEST_NAME}"] + sub_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + expansion_cmd = expansions_update(file="${DRIVERS_TOOLS}/mo-expansion.yml") + return "run server", [sub_cmd, expansion_cmd] + + +def create_run_just_script_func(): + includes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + args = [".evergreen/just.sh", "${JUSTFILE_TARGET}"] + cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + return "run just script", [cmd] + + +def create_run_tests_func(): + includes = [ + "AUTH", + "SSL", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + "COVERAGE", + "PYTHON_BINARY", + "LIBMONGOCRYPT_URL", + "MONGODB_URI", + "PYTHON_VERSION", + "DISABLE_TEST_COMMANDS", + "GREEN_FRAMEWORK", + "NO_EXT", + "COMPRESSORS", + "MONGODB_API_VERSION", + "DEBUG_LOG", + "ORCHESTRATION_FILE", + "OCSP_SERVER_TYPE", + "VERSION", + "IS_WIN32", + "REQUIRE_FIPS", + ] + args = [".evergreen/just.sh", "setup-tests", "${TEST_NAME}", "${SUB_TEST_NAME}"] + setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + test_cmd = get_subprocess_exec(args=[".evergreen/just.sh", "run-tests"]) + return "run tests", [setup_cmd, test_cmd] + + +def create_cleanup_func(): + cmd = get_subprocess_exec(args=[".evergreen/scripts/cleanup.sh"]) + return "cleanup", [cmd] + + +def create_teardown_system_func(): + tests_cmd = get_subprocess_exec(args=[".evergreen/just.sh", "teardown-tests"]) + drivers_cmd = get_subprocess_exec(args=["${DRIVERS_TOOLS}/.evergreen/teardown.sh"]) + return "teardown system", [tests_cmd, drivers_cmd] + + +def create_assume_ec2_role_func(): + cmd = ec2_assume_role(role_arn="${aws_test_secrets_role}", duration_seconds=3600) + return "assume ec2 role", [cmd] + + +def create_attach_benchmark_test_results_func(): + cmd = attach_results(file_location="src/report.json") + return "attach benchmark test results", [cmd] + + +def create_send_dashboard_data_func(): + cmd = perf_send(file="src/results.json") + return "send dashboard data", [cmd] + + mod = sys.modules[__name__] write_variants_to_file(mod) write_tasks_to_file(mod) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6c5fa764b9..9c67b8283b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -121,4 +121,4 @@ repos: entry: .evergreen/scripts/generate-config.sh language: python require_serial: true - additional_dependencies: ["shrub.py>=3.9.0", "pyyaml>=6.0.2"] + additional_dependencies: ["shrub.py>=3.10.0", "pyyaml>=6.0.2"] From e6a4a7145eccb0b6c76c84aa539f29b40f3424b3 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 15 Apr 2025 08:05:20 -0400 Subject: [PATCH 1877/2111] PYTHON-5310 - Fix uri_parser AttributeError when used directly (#2283) --- doc/changelog.rst | 1 + pymongo/__init__.py | 3 +++ test/test_default_exports.py | 7 +++++++ 3 files changed, 11 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index e82804565f..597f75f873 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,7 @@ Version 4.12.1 is a bug fix release. - Fixed a bug that could raise ``UnboundLocalError`` when creating asynchronous connections over SSL. - Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. +- Fixed a bug that caused direct use of ``pymongo.uri_parser`` to raise an ``AttributeError``. Issues Resolved ............... diff --git a/pymongo/__init__.py b/pymongo/__init__.py index e392508912..9a35750811 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -105,6 +105,9 @@ from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +# Public module compatibility imports +import pymongo.uri_parser # noqa: F401 # isort: skip + version = __version__ """Current version of PyMongo.""" diff --git a/test/test_default_exports.py b/test/test_default_exports.py index d9301d2223..5f3e749d36 100644 --- a/test/test_default_exports.py +++ b/test/test_default_exports.py @@ -209,6 +209,13 @@ def test_pymongo_imports(self): ) from pymongo.write_concern import WriteConcern, validate_boolean + def test_pymongo_submodule_attributes(self): + import pymongo + + self.assertTrue(hasattr(pymongo, "uri_parser")) + self.assertTrue(pymongo.uri_parser) + self.assertTrue(pymongo.uri_parser.parse_uri) + def test_gridfs_imports(self): import gridfs from gridfs.errors import CorruptGridFile, FileExists, GridFSError, NoFile From b83389d6bce914e9b1283edcb1713a89317b300a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Apr 2025 14:44:09 -0500 Subject: [PATCH 1878/2111] PYTHON-5311 Create standard linux evergreen tasks (#2282) --- .evergreen/generated_configs/tasks.yml | 631 +++++++++++++++++++--- .evergreen/generated_configs/variants.yml | 6 +- .evergreen/scripts/generate_config.py | 65 +-- 3 files changed, 574 insertions(+), 128 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 8d80b113a3..5b09ce20f7 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -727,95 +727,6 @@ tasks: working_dir: src type: test - # Compression tests - - name: test-compression-v4.0-python3.9 - commands: - - func: run server - vars: - VERSION: "4.0" - - func: run tests - tags: [compression, "4.0"] - - name: test-compression-v4.2-python3.9 - commands: - - func: run server - vars: - VERSION: "4.2" - - func: run tests - tags: [compression, "4.2"] - - name: test-compression-v4.4-python3.9 - commands: - - func: run server - vars: - VERSION: "4.4" - - func: run tests - tags: [compression, "4.4"] - - name: test-compression-v5.0-python3.9 - commands: - - func: run server - vars: - VERSION: "5.0" - - func: run tests - tags: [compression, "5.0"] - - name: test-compression-v6.0-python3.9 - commands: - - func: run server - vars: - VERSION: "6.0" - - func: run tests - tags: [compression, "6.0"] - - name: test-compression-v7.0-python3.9 - commands: - - func: run server - vars: - VERSION: "7.0" - - func: run tests - tags: [compression, "7.0"] - - name: test-compression-v8.0-python3.9 - commands: - - func: run server - vars: - VERSION: "8.0" - - func: run tests - tags: [compression, "8.0"] - - name: test-compression-rapid-python3.9 - commands: - - func: run server - vars: - VERSION: rapid - - func: run tests - tags: [compression, rapid] - - name: test-compression-latest-python3.9 - commands: - - func: run server - vars: - VERSION: latest - - func: run tests - tags: [compression, latest] - - name: test-compression-latest-python3.13-no-c - commands: - - func: run server - vars: - VERSION: latest - - func: run tests - vars: - NO_EXT: "1" - tags: [compression, latest] - - name: test-compression-latest-python3.13 - commands: - - func: run server - vars: - VERSION: latest - - func: run tests - vars: {} - tags: [compression, latest] - - name: test-compression-latest-pypy3.10 - commands: - - func: run server - vars: - VERSION: latest - - func: run tests - tags: [compression, latest] - # Coverage report tests - name: coverage-report commands: @@ -8297,6 +8208,548 @@ tasks: SSL: ssl tags: [serverless] + # Standard linux tests + - name: test-v4.0-python3.9-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + PYTHON_VERSION: "3.9" + tags: + - standard-linux + - server-4.0 + - python-3.9 + - standalone-noauth-nossl + - name: test-v4.0-python3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.0" + PYTHON_VERSION: "3.10" + tags: + - standard-linux + - server-4.0 + - python-3.10 + - replica_set-noauth-ssl + - name: test-v4.0-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.0" + PYTHON_VERSION: "3.11" + tags: + - standard-linux + - server-4.0 + - python-3.11 + - sharded_cluster-auth-ssl + - name: test-v4.2-python3.12-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.12" + tags: + - standard-linux + - server-4.2 + - python-3.12 + - standalone-noauth-nossl + - name: test-v4.2-python3.13-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.13" + tags: + - standard-linux + - server-4.2 + - python-3.13 + - replica_set-noauth-ssl + - name: test-v4.2-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - standard-linux + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - name: test-v4.4-python3.9-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.9" + tags: + - standard-linux + - server-4.4 + - python-3.9 + - standalone-noauth-nossl + - name: test-v4.4-python3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.10" + tags: + - standard-linux + - server-4.4 + - python-3.10 + - replica_set-noauth-ssl + - name: test-v4.4-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.11" + tags: + - standard-linux + - server-4.4 + - python-3.11 + - sharded_cluster-auth-ssl + - name: test-v5.0-python3.12-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.12" + tags: + - standard-linux + - server-5.0 + - python-3.12 + - standalone-noauth-nossl + - name: test-v5.0-python3.13-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.13" + tags: + - standard-linux + - server-5.0 + - python-3.13 + - replica_set-noauth-ssl + - name: test-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - standard-linux + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - name: test-v6.0-python3.9-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: "3.9" + tags: + - standard-linux + - server-6.0 + - python-3.9 + - standalone-noauth-nossl + - name: test-v6.0-python3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: "3.10" + tags: + - standard-linux + - server-6.0 + - python-3.10 + - replica_set-noauth-ssl + - name: test-v6.0-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.11" + tags: + - standard-linux + - server-6.0 + - python-3.11 + - sharded_cluster-auth-ssl + - name: test-v7.0-python3.12-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: "3.12" + tags: + - standard-linux + - server-7.0 + - python-3.12 + - standalone-noauth-nossl + - name: test-v7.0-python3.13-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: "3.13" + tags: + - standard-linux + - server-7.0 + - python-3.13 + - replica_set-noauth-ssl + - name: test-v7.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - standard-linux + - server-7.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - name: test-v8.0-python3.9-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.9" + tags: + - standard-linux + - server-8.0 + - python-3.9 + - standalone-noauth-nossl + - name: test-v8.0-python3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + PYTHON_VERSION: "3.10" + tags: + - standard-linux + - server-8.0 + - python-3.10 + - replica_set-noauth-ssl + - name: test-v8.0-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.11" + tags: + - standard-linux + - server-8.0 + - python-3.11 + - sharded_cluster-auth-ssl + - name: test-rapid-python3.12-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.12" + tags: + - standard-linux + - server-rapid + - python-3.12 + - standalone-noauth-nossl + - name: test-rapid-python3.13-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.13" + tags: + - standard-linux + - server-rapid + - python-3.13 + - replica_set-noauth-ssl + - name: test-rapid-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - standard-linux + - server-rapid + - python-pypy3.10 + - sharded_cluster-auth-ssl + - name: test-latest-python3.9-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.9" + tags: + - standard-linux + - server-latest + - python-3.9 + - standalone-noauth-nossl + - name: test-latest-python3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: "3.10" + tags: + - standard-linux + - server-latest + - python-3.10 + - replica_set-noauth-ssl + - name: test-latest-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.11" + tags: + - standard-linux + - server-latest + - python-3.11 + - sharded_cluster-auth-ssl + # Standard non linux tests - name: test-v4.0-python3.9-sync-noauth-nossl-standalone commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 0ad366dbea..dc46ae6bf9 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -163,7 +163,7 @@ buildvariants: # Compression tests - name: compression-snappy-rhel8 tasks: - - name: .compression + - name: .standard-linux display_name: Compression snappy RHEL8 run_on: - rhel87-small @@ -171,7 +171,7 @@ buildvariants: COMPRESSOR: snappy - name: compression-zlib-rhel8 tasks: - - name: .compression + - name: .standard-linux display_name: Compression zlib RHEL8 run_on: - rhel87-small @@ -179,7 +179,7 @@ buildvariants: COMPRESSOR: zlib - name: compression-zstd-rhel8 tasks: - - name: .compression !.4.0 + - name: .standard-linux !.server-4.0 display_name: Compression zstd RHEL8 run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e99a9a3980..75ee52de68 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -211,13 +211,15 @@ def create_load_balancer_variants(): def create_compression_variants(): - # Compression tests - standalone versions of each server, across python versions. + # Compression tests - use the standard linux tests. host = DEFAULT_HOST - base_task = ".compression" variants = [] for compressor in "snappy", "zlib", "zstd": expansions = dict(COMPRESSOR=compressor) - tasks = [base_task] if compressor != "zstd" else [f"{base_task} !.4.0"] + if compressor == "zstd": + tasks = [".standard-linux !.server-4.0"] + else: + tasks = [".standard-linux"] display_name = get_variant_name(f"Compression {compressor}", host) variants.append( create_variant( @@ -613,6 +615,30 @@ def create_server_version_tasks(): return tasks +def create_standard_linux_tasks(): + tasks = [] + + for (version, topology), python in zip_cycle( + list(product(ALL_VERSIONS, TOPOLOGIES)), ALL_PYTHONS + ): + auth = "auth" if topology == "sharded_cluster" else "noauth" + ssl = "nossl" if topology == "standalone" else "ssl" + tags = [ + "standard-linux", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + ] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_standard_non_linux_tasks(): tasks = [] @@ -683,39 +709,6 @@ def create_load_balancer_tasks(): return tasks -def create_compression_tasks(): - tasks = [] - versions = get_versions_from("4.0") - # Test all server versions with min python. - for version in versions: - python = CPYTHONS[0] - tags = ["compression", version] - name = get_task_name("test-compression", python=python, version=version) - server_func = FunctionCall(func="run server", vars=dict(VERSION=version)) - test_func = FunctionCall(func="run tests") - tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) - - # Test latest with max python, with and without c exts. - version = "latest" - tags = ["compression", "latest"] - for c_ext in C_EXTS: - python = CPYTHONS[-1] - expansions = dict() - handle_c_ext(c_ext, expansions) - name = get_task_name("test-compression", python=python, version=version, **expansions) - server_func = FunctionCall(func="run server", vars=dict(VERSION=version)) - test_func = FunctionCall(func="run tests", vars=expansions) - tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) - - # Test on latest with pypy. - python = PYPYS[-1] - name = get_task_name("test-compression", python=python, version=version) - server_func = FunctionCall(func="run server", vars=dict(VERSION=version)) - test_func = FunctionCall(func="run tests") - tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) - return tasks - - def create_kms_tasks(): tasks = [] for kms_type in ["gcp", "azure"]: From 846b1fc25ced5979ade934bb27d6ffbb60f68267 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Apr 2025 08:50:51 -0500 Subject: [PATCH 1879/2111] PYTHON-5316 Update tests for other hosts (#2287) --- .evergreen/generated_configs/tasks.yml | 44 ++++++++++++++++++++ .evergreen/generated_configs/variants.yml | 45 ++++++++++++--------- .evergreen/scripts/generate_config.py | 42 +++++++++++++------ .evergreen/scripts/generate_config_utils.py | 6 +++ 4 files changed, 105 insertions(+), 32 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 5b09ce20f7..fddd725af6 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -3083,6 +3083,50 @@ tasks: SUB_TEST_NAME: gke tags: [auth_oidc, auth_oidc_remote] + # Other hosts tests + - name: test-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_NAME: default_sync + tags: [other-hosts, standalone-noauth-nossl] + - name: test-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_NAME: default_async + tags: [other-hosts, replica_set-noauth-ssl] + - name: test-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_NAME: default_sync + tags: [other-hosts, sharded_cluster-auth-ssl] + # Perf tests - name: perf-8.0-standalone-ssl commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index dc46ae6bf9..fe315ab9c0 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1,61 +1,66 @@ buildvariants: # Alternative hosts tests - - name: openssl-1.0.2-rhel7-python3.9 + - name: openssl-1.0.2-rhel7-v5.0-python3.9 tasks: - - name: .5.0 .standalone !.sync_async - display_name: OpenSSL 1.0.2 RHEL7 Python3.9 + - name: .other-hosts + display_name: OpenSSL 1.0.2 RHEL7 v5.0 Python3.9 run_on: - rhel79-small batchtime: 10080 expansions: + VERSION: "5.0" + PYTHON_VERSION: "3.9" PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: other-hosts-rhel9-fips + - name: other-hosts-rhel9-fips-latest tasks: - - name: .6.0 .standalone !.sync_async - display_name: Other hosts RHEL9-FIPS + - name: .other-hosts + display_name: Other hosts RHEL9-FIPS latest run_on: - rhel92-fips batchtime: 10080 expansions: + VERSION: latest NO_EXT: "1" REQUIRE_FIPS: "1" - - name: other-hosts-rhel8-zseries + - name: other-hosts-rhel8-zseries-latest tasks: - - name: .6.0 .standalone !.sync_async - display_name: Other hosts RHEL8-zseries + - name: .other-hosts + display_name: Other hosts RHEL8-zseries latest run_on: - rhel8-zseries-small batchtime: 10080 expansions: + VERSION: latest NO_EXT: "1" - - name: other-hosts-rhel8-power8 + - name: other-hosts-rhel8-power8-latest tasks: - - name: .6.0 .standalone !.sync_async - display_name: Other hosts RHEL8-POWER8 + - name: .other-hosts + display_name: Other hosts RHEL8-POWER8 latest run_on: - rhel8-power-small batchtime: 10080 expansions: + VERSION: latest NO_EXT: "1" - - name: other-hosts-rhel8-arm64 + - name: other-hosts-rhel8-arm64-latest tasks: - - name: .6.0 .standalone !.sync_async - display_name: Other hosts RHEL8-arm64 + - name: .other-hosts + display_name: Other hosts RHEL8-arm64 latest run_on: - rhel82-arm64-small batchtime: 10080 expansions: + VERSION: latest NO_EXT: "1" - - name: other-hosts-amazon2023 + - name: other-hosts-amazon2023-latest tasks: - - name: .latest !.sync_async .sharded_cluster .auth .ssl - - name: .latest !.sync_async .replica_set .noauth .ssl - - name: .latest !.sync_async .standalone .noauth .nossl - display_name: Other hosts Amazon2023 + - name: .other-hosts + display_name: Other hosts Amazon2023 latest run_on: - amazon2023-arm64-latest-large-m8g batchtime: 10080 expansions: + VERSION: latest NO_EXT: "1" # Atlas connect tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 75ee52de68..ebdbdd9669 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -22,6 +22,7 @@ create_variant, get_assume_role, get_s3_put, + get_standard_auth_ssl, get_subprocess_exec, get_task_name, get_variant_name, @@ -548,29 +549,29 @@ def create_alternative_hosts_variants(): variants = [] host = HOSTS["rhel7"] + version = "5.0" variants.append( create_variant( - [".5.0 .standalone !.sync_async"], - get_variant_name("OpenSSL 1.0.2", host, python=CPYTHONS[0]), + [".other-hosts"], + get_variant_name("OpenSSL 1.0.2", host, python=CPYTHONS[0], version=version), host=host, python=CPYTHONS[0], batchtime=batchtime, + expansions=dict(VERSION=version, PYTHON_VERSION=CPYTHONS[0]), ) ) + version = "latest" for host_name in OTHER_HOSTS: - expansions = dict() + expansions = dict(VERSION="latest") handle_c_ext(C_EXTS[0], expansions) host = HOSTS[host_name] if "fips" in host_name.lower(): expansions["REQUIRE_FIPS"] = "1" - tags = [".6.0 .standalone !.sync_async"] - if host_name == "Amazon2023": - tags = [f".latest !.sync_async {t}" for t in SUB_TASKS] variants.append( create_variant( - tags, - display_name=get_variant_name("Other hosts", host), + [".other-hosts"], + display_name=get_variant_name("Other hosts", host, version=version), batchtime=batchtime, host=host, expansions=expansions, @@ -615,14 +616,32 @@ def create_server_version_tasks(): return tasks +def create_other_hosts_tasks(): + tasks = [] + + for topology, sync in zip_cycle(TOPOLOGIES, SYNCS): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "other-hosts", + f"{topology}-{auth}-{ssl}", + ] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + name = get_task_name("test", sync=sync, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_standard_linux_tasks(): tasks = [] for (version, topology), python in zip_cycle( list(product(ALL_VERSIONS, TOPOLOGIES)), ALL_PYTHONS ): - auth = "auth" if topology == "sharded_cluster" else "noauth" - ssl = "nossl" if topology == "standalone" else "ssl" + auth, ssl = get_standard_auth_ssl(topology) tags = [ "standard-linux", f"server-{version}", @@ -645,8 +664,7 @@ def create_standard_non_linux_tasks(): for (version, topology), python, sync in zip_cycle( list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS, SYNCS ): - auth = "auth" if topology == "sharded_cluster" else "noauth" - ssl = "nossl" if topology == "standalone" else "ssl" + auth, ssl = get_standard_auth_ssl(topology) tags = [ "standard-non-linux", f"server-{version}", diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index aa43af9a68..facb832633 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -239,6 +239,12 @@ def handle_c_ext(c_ext, expansions) -> None: expansions["NO_EXT"] = "1" +def get_standard_auth_ssl(topology): + auth = "auth" if topology == "sharded_cluster" else "noauth" + ssl = "nossl" if topology == "standalone" else "ssl" + return auth, ssl + + def get_assume_role(**kwargs): kwargs.setdefault("command_type", EvgCommandType.SETUP) kwargs.setdefault("role_arn", "${assume_role_arn}") From 149fe390d49de755522c35df4d974c9bae15043e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Apr 2025 10:40:25 -0500 Subject: [PATCH 1880/2111] PYTHON-5188 Make version setting a part of the release process (#2288) --- .github/workflows/release-python.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 21c7ca5f7a..a684ff79b0 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -3,12 +3,8 @@ name: Release on: workflow_dispatch: inputs: - version: - description: "The new version to set" - required: true following_version: description: "The post (dev) version to set" - required: true dry_run: description: "Dry Run?" default: false @@ -26,7 +22,6 @@ env: # to 'false' when the input is set to 'false'. DRY_RUN: ${{ ! contains(inputs.dry_run, 'false') }} FOLLOWING_VERSION: ${{ inputs.following_version || '' }} - VERSION: ${{ inputs.version || '10.10.10.10' }} defaults: run: @@ -56,7 +51,6 @@ jobs: - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v2 id: pre-publish with: - version: ${{ env.VERSION }} dry_run: ${{ env.DRY_RUN }} build-dist: @@ -118,7 +112,6 @@ jobs: artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} - uses: mongodb-labs/drivers-github-tools/python/post-publish@v2 with: - version: ${{ env.VERSION }} following_version: ${{ env.FOLLOWING_VERSION }} product_name: ${{ env.PRODUCT_NAME }} evergreen_project: ${{ env.EVERGREEN_PROJECT }} From f476d8bd975a40f6f5907bd2265a203424726bea Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 16 Apr 2025 13:06:09 -0400 Subject: [PATCH 1881/2111] PYTHON-5324 - Fix Windows encryption test secrets path (#2289) --- .evergreen/scripts/setup_tests.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 9b657507c1..bc9f97f1e2 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -387,9 +387,9 @@ def handle_test_env() -> None: if not DRIVERS_TOOLS: raise RuntimeError("Missing DRIVERS_TOOLS") csfle_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/csfle") - run_command(f"bash {csfle_dir}/setup-secrets.sh", cwd=csfle_dir) + run_command(f"bash {csfle_dir.as_posix()}/setup-secrets.sh", cwd=csfle_dir) load_config_from_file(csfle_dir / "secrets-export.sh") - run_command(f"bash {csfle_dir}/start-servers.sh") + run_command(f"bash {csfle_dir.as_posix()}/start-servers.sh") if sub_test_name == "pyopenssl": UV_ARGS.append("--extra ocsp") From aa6fa7a6966be207c26bf70357504a79e4a73add Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 16 Apr 2025 14:10:10 -0400 Subject: [PATCH 1882/2111] PYTHON-5284 - Remove eventlet tests for CPython > 3.9 (#2290) --- .evergreen/generated_configs/variants.yml | 11 ----------- .evergreen/scripts/generate_config.py | 4 ++++ 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index fe315ab9c0..d36548f72f 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -536,17 +536,6 @@ buildvariants: AUTH: auth SSL: ssl PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: green-eventlet-rhel8-python3.13 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Green Eventlet RHEL8 Python3.13 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: eventlet - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.13/bin/python3 - name: green-gevent-rhel8-python3.13 tasks: - name: .standalone .noauth .nossl .sync_async diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index ebdbdd9669..7e88f82316 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -343,6 +343,10 @@ def create_green_framework_variants(): tasks = [".standalone .noauth .nossl .sync_async"] host = DEFAULT_HOST for python, framework in product([CPYTHONS[0], CPYTHONS[-1]], ["eventlet", "gevent"]): + if framework == "eventlet" and python == CPYTHONS[-1]: + # Eventlet has issues with dnspython > 2.0 and newer versions of CPython + # https://jira.mongodb.org/browse/PYTHON-5284 + continue expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") display_name = get_variant_name(f"Green {framework.capitalize()}", host, python=python) variant = create_variant( From 4cac781530238e1830a1af67440ae68ceff663ec Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 16 Apr 2025 14:31:05 -0400 Subject: [PATCH 1883/2111] PYTHON-5326 - Skip serverless tests with known issue (#2292) --- test/asynchronous/unified_format.py | 6 ++++++ test/unified_format.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 9099efbf0f..10b247d1fa 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -551,6 +551,12 @@ def maybe_skip_test(self, spec): self.skipTest("PYTHON-5170 tests are flakey") if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: self.skipTest("PYTHON-5174 tests are flakey") + if ( + "inserting _id with type null via clientBulkWrite" in spec["description"] + or "commitTransaction fails after Interrupted" in spec["description"] + or "commit is not retried after MaxTimeMSExpired error" in spec["description"] + ) and async_client_context.serverless: + self.skipTest("PYTHON-5326 known serverless failures") class_name = self.__class__.__name__.lower() description = spec["description"].lower() diff --git a/test/unified_format.py b/test/unified_format.py index 71d6cd50d4..d3da2b3a82 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -550,6 +550,12 @@ def maybe_skip_test(self, spec): self.skipTest("PYTHON-5170 tests are flakey") if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: self.skipTest("PYTHON-5174 tests are flakey") + if ( + "inserting _id with type null via clientBulkWrite" in spec["description"] + or "commitTransaction fails after Interrupted" in spec["description"] + or "commit is not retried after MaxTimeMSExpired error" in spec["description"] + ) and client_context.serverless: + self.skipTest("PYTHON-5326 known serverless failures") class_name = self.__class__.__name__.lower() description = spec["description"].lower() From 448c8e83264fcfacac6af83c8d7cce1a73524cbc Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 16 Apr 2025 16:16:25 -0400 Subject: [PATCH 1884/2111] PYTHON-5325 - Decrease TestAsyncConcurrency.test_concurrency threshold (#2291) --- test/asynchronous/test_concurrency.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_concurrency.py b/test/asynchronous/test_concurrency.py index 193ecf05c8..65ea90c03f 100644 --- a/test/asynchronous/test_concurrency.py +++ b/test/asynchronous/test_concurrency.py @@ -50,5 +50,5 @@ async def test_concurrency(self): concurrent_time = time.time() - start percent_faster = (sequential_time - concurrent_time) / concurrent_time * 100 - # We expect the concurrent tasks to be at least 75% faster on all platforms as a conservative benchmark - self.assertGreaterEqual(percent_faster, 75) + # We expect the concurrent tasks to be at least 50% faster on all platforms as a conservative benchmark + self.assertGreaterEqual(percent_faster, 50) From db1449b79f4b11b4d2b6903e963fafa0b9ab4a05 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Apr 2025 09:09:49 -0500 Subject: [PATCH 1885/2111] PYTHON-5330 Convert no c extensions and doctests to use the standard test pattern (#2294) --- .evergreen/generated_configs/functions.yml | 15 ------ .evergreen/generated_configs/tasks.yml | 9 ---- .evergreen/generated_configs/variants.yml | 53 +++------------------- .evergreen/scripts/generate_config.py | 39 ++++------------ .evergreen/scripts/run_tests.py | 7 +++ .evergreen/scripts/setup_tests.py | 3 ++ .evergreen/scripts/utils.py | 14 ++++-- .github/workflows/test-python.yml | 4 +- CONTRIBUTING.md | 3 +- doc/conf.py | 1 + doc/examples/client_bulk.rst | 4 ++ justfile | 4 -- 12 files changed, 47 insertions(+), 109 deletions(-) diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index bc8942efc5..06baa32e8d 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -85,21 +85,6 @@ functions: params: directory: src - # Run just script - run just script: - - command: subprocess.exec - params: - binary: bash - args: - - .evergreen/just.sh - - ${JUSTFILE_TARGET} - working_dir: src - include_expansions_in_env: - - AWS_ACCESS_KEY_ID - - AWS_SECRET_ACCESS_KEY - - AWS_SESSION_TOKEN - type: test - # Run server run server: - command: subprocess.exec diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index fddd725af6..4676ab80f3 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -734,15 +734,6 @@ tasks: depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] tags: [coverage] - # Doctest tests - - name: test-doctests - commands: - - func: run server - - func: run just script - vars: - JUSTFILE_TARGET: docs-test - tags: [doctests] - # Enterprise auth tests - name: test-enterprise-auth-python3.9 commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index d36548f72f..0855a27636 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -213,14 +213,14 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Doctests tests - - name: doctests-rhel8-python3.9 + - name: doctests-rhel8 tasks: - - name: .doctests - display_name: Doctests RHEL8 Python3.9 + - name: .standard-linux .standalone-noauth-nossl + display_name: Doctests RHEL8 run_on: - rhel87-small expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 + TEST_NAME: doctest # Encryption tests - name: encryption-rhel8-python3.9 @@ -609,51 +609,12 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # No c ext tests - - name: no-c-ext-rhel8-python3.9 - tasks: - - name: .standalone .noauth .nossl !.sync_async - display_name: No C Ext RHEL8 Python3.9 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: no-c-ext-rhel8-python3.10 + - name: no-c-ext-rhel8 tasks: - - name: .replica_set .noauth .nossl !.sync_async - display_name: No C Ext RHEL8 Python3.10 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: no-c-ext-rhel8-python3.11 - tasks: - - name: .sharded_cluster .noauth .nossl !.sync_async - display_name: No C Ext RHEL8 Python3.11 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: no-c-ext-rhel8-python3.12 - tasks: - - name: .standalone .noauth .nossl !.sync_async - display_name: No C Ext RHEL8 Python3.12 - run_on: - - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: no-c-ext-rhel8-python3.13 - tasks: - - name: .replica_set .noauth .nossl !.sync_async - display_name: No C Ext RHEL8 Python3.13 + - name: .standard-linux + display_name: No C Ext RHEL8 run_on: - rhel87-small - expansions: - NO_EXT: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 # No server tests - name: no-server diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 7e88f82316..b6bd5dce26 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -357,18 +357,12 @@ def create_green_framework_variants(): def create_no_c_ext_variants(): - variants = [] host = DEFAULT_HOST - for python, topology in zip_cycle(CPYTHONS, TOPOLOGIES): - tasks = [f".{topology} .noauth .nossl !.sync_async"] - expansions = dict() - handle_c_ext(C_EXTS[0], expansions) - display_name = get_variant_name("No C Ext", host, python=python) - variant = create_variant( - tasks, display_name, host=host, python=python, expansions=expansions - ) - variants.append(variant) - return variants + tasks = [".standard-linux"] + expansions = dict() + handle_c_ext(C_EXTS[0], expansions) + display_name = get_variant_name("No C Ext", host) + return [create_variant(tasks, display_name, host=host)] def create_atlas_data_lake_variants(): @@ -469,13 +463,13 @@ def create_mockupdb_variants(): def create_doctests_variants(): host = DEFAULT_HOST - python = CPYTHONS[0] + expansions = dict(TEST_NAME="doctest") return [ create_variant( - [".doctests"], - get_variant_name("Doctests", host, python=python), - python=python, + [".standard-linux .standalone-noauth-nossl"], + get_variant_name("Doctests", host), host=host, + expansions=expansions, ) ] @@ -1013,14 +1007,6 @@ def create_mockupdb_tasks(): return [EvgTask(name=task_name, tags=tags, commands=[test_func])] -def create_doctest_tasks(): - server_func = FunctionCall(func="run server") - test_func = FunctionCall(func="run just script", vars=dict(JUSTFILE_TARGET="docs-test")) - task_name = "test-doctests" - tags = ["doctests"] - return [EvgTask(name=task_name, tags=tags, commands=[server_func, test_func])] - - def create_no_server_tasks(): test_func = FunctionCall(func="run tests") task_name = "test-no-server" @@ -1164,13 +1150,6 @@ def create_run_server_func(): return "run server", [sub_cmd, expansion_cmd] -def create_run_just_script_func(): - includes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] - args = [".evergreen/just.sh", "${JUSTFILE_TARGET}"] - cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) - return "run just script", [cmd] - - def create_run_tests_func(): includes = [ "AUTH", diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 9f700d70e0..ae073c2666 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -152,6 +152,13 @@ def run() -> None: test_kms_send_to_remote(SUB_TEST_NAME) return + # Handle doctests. + if TEST_NAME == "doctest": + from sphinx.cmd.build import main + + result = main("-E -b doctest doc ./doc/_build/doctest".split()) + sys.exit(result) + # Send ecs tests to run remotely. if TEST_NAME == "auth_aws" and SUB_TEST_NAME == "ecs": run_command(f"{DRIVERS_TOOLS}/.evergreen/auth_aws/aws_setup.sh ecs") diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index bc9f97f1e2..bd307a4e10 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -285,6 +285,9 @@ def handle_test_env() -> None: write_env("GSSAPI_PORT", config["SASL_PORT"]) write_env("GSSAPI_PRINCIPAL", config["PRINCIPAL"]) + if test_name == "doctest": + UV_ARGS.append("--extra docs") + if test_name == "load_balancer": SINGLE_MONGOS_LB_URI = os.environ.get( "SINGLE_MONGOS_LB_URI", "mongodb://127.0.0.1:8000/?loadBalanced=true" diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 214a1fc347..bd93532600 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -52,10 +52,18 @@ class Distro: # Tests that require a sub test suite. SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] -EXTRA_TESTS = ["mod_wsgi", "aws_lambda"] +EXTRA_TESTS = ["mod_wsgi", "aws_lambda", "doctest"] # Tests that do not use run-orchestration directly. -NO_RUN_ORCHESTRATION = ["auth_oidc", "atlas_connect", "data_lake", "mockupdb", "serverless", "ocsp"] +NO_RUN_ORCHESTRATION = [ + "auth_oidc", + "atlas_connect", + "aws_lambda", + "data_lake", + "mockupdb", + "serverless", + "ocsp", +] def get_test_options( @@ -78,7 +86,7 @@ def get_test_options( else: parser.add_argument( "test_name", - choices=set(TEST_SUITE_MAP) - set(NO_RUN_ORCHESTRATION), + choices=set(list(TEST_SUITE_MAP) + EXTRA_TESTS) - set(NO_RUN_ORCHESTRATION), nargs="?", default="default", help="The optional name of the test suite to be run, which informs the server configuration.", diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 3c3eef989e..6d215bed0e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -99,7 +99,9 @@ jobs: - name: Install dependencies run: just install - name: Run tests - run: just docs-test + run: | + just setup-tests doctest + just run-tests docs: name: Docs Checks diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 60583022b7..1125113681 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -294,7 +294,8 @@ Note: these tests can only be run from an Evergreen host. The doc tests require a running server. - Run `just run-server`. -- Run `just docs-test`. +- Run `just setup-tests doctest`. +- Run `just run-tests`. ### Free-threaded Python Tests diff --git a/doc/conf.py b/doc/conf.py index c3ee5d8900..387b939344 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -105,6 +105,7 @@ client = MongoClient() client.drop_database("doctest_test") db = client.doctest_test +server_major_version = int(client.server_info()['version'].split()[-1][0]) """ # -- Options for HTML output --------------------------------------------------- diff --git a/doc/examples/client_bulk.rst b/doc/examples/client_bulk.rst index 447f09688f..ad435fa2e4 100644 --- a/doc/examples/client_bulk.rst +++ b/doc/examples/client_bulk.rst @@ -46,6 +46,7 @@ summary of the types of operations performed in the bulk write, along with their .. doctest:: :options: +NORMALIZE_WHITESPACE + :skipif: server_major_version < 8 >>> from pymongo import InsertOne, DeleteOne, UpdateOne >>> models = [ @@ -79,6 +80,7 @@ instance will also include detailed results about each successful operation perf .. doctest:: :options: +NORMALIZE_WHITESPACE + :skipif: server_major_version < 8 >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateMany >>> models = [ @@ -125,6 +127,7 @@ For example, a duplicate key error on the third operation below aborts the remai .. doctest:: :options: +NORMALIZE_WHITESPACE + :skipif: server_major_version < 8 >>> from pymongo import InsertOne, DeleteOne >>> from pymongo.errors import ClientBulkWriteException @@ -161,6 +164,7 @@ For example, the fourth and fifth write operations below get executed successful .. doctest:: :options: +NORMALIZE_WHITESPACE + :skipif: server_major_version < 8 >>> from pymongo import InsertOne, DeleteOne >>> from pymongo.errors import ClientBulkWriteException diff --git a/justfile b/justfile index 43aefb3f1a..74ebb48823 100644 --- a/justfile +++ b/justfile @@ -28,10 +28,6 @@ docs-serve: docs-linkcheck: {{docs_run}} sphinx-build -E -b linkcheck doc {{doc_build}}/linkcheck -[group('docs')] -docs-test: - {{docs_run}} --extra test sphinx-build -E -b doctest doc {{doc_build}}/doctest - [group('typing')] typing: just typing-mypy From 094a320817738c09f105223e606ab4ba6c49a0fb Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 17 Apr 2025 13:01:27 -0400 Subject: [PATCH 1886/2111] PYTHON-5284 - Update changelog for Eventlet testing removal (#2293) --- doc/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 597f75f873..2fb225e2e1 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -10,6 +10,9 @@ Version 4.12.1 is a bug fix release. - Fixed a bug that could raise ``UnboundLocalError`` when creating asynchronous connections over SSL. - Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. - Fixed a bug that caused direct use of ``pymongo.uri_parser`` to raise an ``AttributeError``. +- Removed Eventlet testing against Python versions newer than 3.9 since + Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. + Issues Resolved ............... From 5f956210f845a5df98b7bc3de580ea9f05221271 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Apr 2025 09:29:20 -0500 Subject: [PATCH 1887/2111] PYTHON-5332 Update AWS, mod_wsgi, and green framework variants (#2297) --- .evergreen/generated_configs/tasks.yml | 646 ++-------------------- .evergreen/generated_configs/variants.yml | 81 +-- .evergreen/scripts/generate_config.py | 69 ++- 3 files changed, 93 insertions(+), 703 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 4676ab80f3..980dbfbe51 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -33,7 +33,7 @@ tasks: tags: [aws_lambda] # Aws tests - - name: test-auth-aws-4.4-regular + - name: test-auth-aws-4.4-regular-python3.9 commands: - func: run server vars: @@ -44,541 +44,61 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: regular + PYTHON_VERSION: "3.9" tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-4.4-assume-role - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: assume-role - tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-4.4-ec2 - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ec2 - tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-4.4-env-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: env-creds - tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-4.4-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: session-creds - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-4.4-web-identity - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-4.4-ecs - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ecs - tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-4.4-web-identity-session-name - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "4.4" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - AWS_ROLE_SESSION_NAME: test - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-5.0-regular - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: regular - tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-5.0-assume-role - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: assume-role - tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-5.0-ec2 - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ec2 - tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-5.0-env-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: env-creds - tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-5.0-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: session-creds - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-5.0-web-identity - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-5.0-ecs - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ecs - tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-5.0-web-identity-session-name - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "5.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - AWS_ROLE_SESSION_NAME: test - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-6.0-regular - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: regular - tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-6.0-assume-role - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: assume-role - tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-6.0-ec2 - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ec2 - tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-6.0-env-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: env-creds - tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-6.0-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: session-creds - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-6.0-web-identity - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-6.0-ecs - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ecs - tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-6.0-web-identity-session-name - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "6.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - AWS_ROLE_SESSION_NAME: test - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-7.0-regular - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: regular - tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-7.0-assume-role - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: assume-role - tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-7.0-ec2 - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ec2 - tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-7.0-env-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: env-creds - tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-7.0-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: session-creds - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-7.0-web-identity - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-7.0-ecs - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ecs - tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-7.0-web-identity-session-name - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "7.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - AWS_ROLE_SESSION_NAME: test - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-8.0-regular - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "8.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: regular - tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-8.0-assume-role + - name: test-auth-aws-5.0-assume-role-python3.10 commands: - func: run server vars: AUTH_AWS: "1" - VERSION: "8.0" + VERSION: "5.0" - func: assume ec2 role - func: run tests vars: TEST_NAME: auth_aws SUB_TEST_NAME: assume-role + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-8.0-ec2 + - name: test-auth-aws-6.0-ec2-python3.11 commands: - func: run server vars: AUTH_AWS: "1" - VERSION: "8.0" + VERSION: "6.0" - func: assume ec2 role - func: run tests vars: TEST_NAME: auth_aws SUB_TEST_NAME: ec2 + PYTHON_VERSION: "3.11" tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-8.0-env-creds + - name: test-auth-aws-7.0-env-creds-python3.12 commands: - func: run server vars: AUTH_AWS: "1" - VERSION: "8.0" + VERSION: "7.0" - func: assume ec2 role - func: run tests vars: TEST_NAME: auth_aws SUB_TEST_NAME: env-creds + PYTHON_VERSION: "3.12" tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-8.0-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "8.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: session-creds - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-8.0-web-identity - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "8.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-8.0-ecs - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: "8.0" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ecs - tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-8.0-web-identity-session-name + - name: test-auth-aws-8.0-session-creds-python3.13 commands: - func: run server vars: AUTH_AWS: "1" VERSION: "8.0" - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - AWS_ROLE_SESSION_NAME: test - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-rapid-regular - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: rapid - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: regular - tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-rapid-assume-role - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: rapid - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: assume-role - tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-rapid-ec2 - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: rapid - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ec2 - tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-rapid-env-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: rapid - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: env-creds - tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-rapid-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: rapid - - func: assume ec2 role - func: run tests vars: TEST_NAME: auth_aws SUB_TEST_NAME: session-creds + PYTHON_VERSION: "3.13" tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-rapid-web-identity + - name: test-auth-aws-rapid-web-identity-python3.9 commands: - func: run server vars: @@ -589,20 +109,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity + PYTHON_VERSION: "3.9" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-rapid-ecs - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: rapid - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ecs - tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-rapid-web-identity-session-name + - name: test-auth-aws-rapid-web-identity-session-name-python3.9 commands: - func: run server vars: @@ -614,80 +123,9 @@ tasks: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity AWS_ROLE_SESSION_NAME: test + PYTHON_VERSION: "3.9" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-latest-regular - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: regular - tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-latest-assume-role - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: assume-role - tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-latest-ec2 - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: ec2 - tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-latest-env-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: env-creds - tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-latest-session-creds - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: session-creds - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-latest-web-identity - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-latest-ecs + - name: test-auth-aws-latest-ecs-python3.10 commands: - func: run server vars: @@ -698,20 +136,8 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ecs + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-ecs] - - name: test-auth-aws-latest-web-identity-session-name - commands: - - func: run server - vars: - AUTH_AWS: "1" - VERSION: latest - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: auth_aws - SUB_TEST_NAME: web-identity - AWS_ROLE_SESSION_NAME: test - tags: [auth-aws, auth-aws-web-identity] # Backport pr tests - name: backport-pr @@ -1077,45 +503,65 @@ tasks: tags: [mockupdb] # Mod wsgi tests - - name: mod-wsgi-standalone + - name: mod-wsgi-replica-set-python3.9 commands: - func: run server vars: - TOPOLOGY: standalone + TOPOLOGY: replica_set + PYTHON_VERSION: "3.9" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.9" + tags: [mod_wsgi] + - name: mod-wsgi-embedded-mode-replica-set-python3.10 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.10" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.10" tags: [mod_wsgi] - - name: mod-wsgi-replica-set + - name: mod-wsgi-replica-set-python3.11 commands: - func: run server vars: TOPOLOGY: replica_set + PYTHON_VERSION: "3.11" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.11" tags: [mod_wsgi] - - name: mod-wsgi-embedded-mode-standalone + - name: mod-wsgi-embedded-mode-replica-set-python3.12 commands: - func: run server vars: - TOPOLOGY: standalone + TOPOLOGY: replica_set + PYTHON_VERSION: "3.12" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.12" tags: [mod_wsgi] - - name: mod-wsgi-embedded-mode-replica-set + - name: mod-wsgi-replica-set-python3.13 commands: - func: run server vars: TOPOLOGY: replica_set + PYTHON_VERSION: "3.13" - func: run tests vars: TEST_NAME: mod_wsgi - SUB_TEST_NAME: embedded + SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.13" tags: [mod_wsgi] # No server tests diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 0855a27636..275de4c2fd 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -100,54 +100,24 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Aws auth tests - - name: auth-aws-ubuntu-20-python3.9 + - name: auth-aws-ubuntu-20 tasks: - name: .auth-aws - display_name: Auth AWS Ubuntu-20 Python3.9 + display_name: Auth AWS Ubuntu-20 run_on: - ubuntu2004-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: auth-aws-ubuntu-20-python3.13 - tasks: - - name: .auth-aws - display_name: Auth AWS Ubuntu-20 Python3.13 - run_on: - - ubuntu2004-small - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 - - name: auth-aws-win64-python3.9 + - name: auth-aws-win64 tasks: - name: .auth-aws !.auth-aws-ecs - display_name: Auth AWS Win64 Python3.9 + display_name: Auth AWS Win64 run_on: - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/Python39/python.exe - - name: auth-aws-win64-python3.13 - tasks: - - name: .auth-aws !.auth-aws-ecs - display_name: Auth AWS Win64 Python3.13 - run_on: - - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/Python313/python.exe - - name: auth-aws-macos-python3.9 + - name: auth-aws-macos tasks: - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 - display_name: Auth AWS macOS Python3.9 + display_name: Auth AWS macOS run_on: - macos-14 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: auth-aws-macos-python3.13 - tasks: - - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 - display_name: Auth AWS macOS Python3.13 - run_on: - - macos-14 - expansions: - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 # Aws lambda tests - name: faas-lambda @@ -514,39 +484,26 @@ buildvariants: PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t # Green framework tests - - name: green-eventlet-rhel8-python3.9 + - name: green-eventlet-rhel8 tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Green Eventlet RHEL8 Python3.9 + - name: .standard-linux .standalone-noauth-nossl .python-3.9 + display_name: Green Eventlet RHEL8 run_on: - rhel87-small expansions: GREEN_FRAMEWORK: eventlet AUTH: auth SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: green-gevent-rhel8-python3.9 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Green Gevent RHEL8 Python3.9 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: gevent - AUTH: auth - SSL: ssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: green-gevent-rhel8-python3.13 + - name: green-gevent-rhel8 tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Green Gevent RHEL8 Python3.13 + - name: .standard-linux .standalone-noauth-nossl + display_name: Green Gevent RHEL8 run_on: - rhel87-small expansions: GREEN_FRAMEWORK: gevent AUTH: auth SSL: ssl - PYTHON_BINARY: /opt/python/3.13/bin/python3 # Import time tests - name: import-time @@ -589,24 +546,14 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Mod wsgi tests - - name: mod_wsgi-ubuntu-22-python3.9 + - name: mod_wsgi-ubuntu-22 tasks: - name: .mod_wsgi - display_name: mod_wsgi Ubuntu-22 Python3.9 + display_name: mod_wsgi Ubuntu-22 run_on: - ubuntu2204-small expansions: MOD_WSGI_VERSION: "4" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: mod_wsgi-ubuntu-22-python3.13 - tasks: - - name: .mod_wsgi - display_name: mod_wsgi Ubuntu-22 Python3.13 - run_on: - - ubuntu2204-small - expansions: - MOD_WSGI_VERSION: "4" - PYTHON_BINARY: /opt/python/3.13/bin/python3 # No c ext tests - name: no-c-ext-rhel8 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b6bd5dce26..c610bc2353 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -340,18 +340,16 @@ def create_stable_api_variants(): def create_green_framework_variants(): variants = [] - tasks = [".standalone .noauth .nossl .sync_async"] host = DEFAULT_HOST - for python, framework in product([CPYTHONS[0], CPYTHONS[-1]], ["eventlet", "gevent"]): - if framework == "eventlet" and python == CPYTHONS[-1]: + for framework in ["eventlet", "gevent"]: + tasks = [".standard-linux .standalone-noauth-nossl"] + if framework == "eventlet": # Eventlet has issues with dnspython > 2.0 and newer versions of CPython # https://jira.mongodb.org/browse/PYTHON-5284 - continue + tasks = [".standard-linux .standalone-noauth-nossl .python-3.9"] expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") - display_name = get_variant_name(f"Green {framework.capitalize()}", host, python=python) - variant = create_variant( - tasks, display_name, host=host, python=python, expansions=expansions - ) + display_name = get_variant_name(f"Green {framework.capitalize()}", host) + variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) return variants @@ -377,17 +375,11 @@ def create_atlas_data_lake_variants(): def create_mod_wsgi_variants(): - variants = [] host = HOSTS["ubuntu22"] tasks = [".mod_wsgi"] expansions = dict(MOD_WSGI_VERSION="4") - for python in MIN_MAX_PYTHON: - display_name = get_variant_name("mod_wsgi", host, python=python) - variant = create_variant( - tasks, display_name, host=host, python=python, expansions=expansions - ) - variants.append(variant) - return variants + display_name = get_variant_name("mod_wsgi", host) + return [create_variant(tasks, display_name, host=host, expansions=expansions)] def create_disable_test_commands_variants(): @@ -518,7 +510,7 @@ def create_perf_variants(): def create_aws_auth_variants(): variants = [] - for host_name, python in product(["ubuntu20", "win64", "macos"], MIN_MAX_PYTHON): + for host_name in ["ubuntu20", "win64", "macos"]: expansions = dict() tasks = [".auth-aws"] if host_name == "macos": @@ -528,9 +520,8 @@ def create_aws_auth_variants(): host = HOSTS[host_name] variant = create_variant( tasks, - get_variant_name("Auth AWS", host, python=python), + get_variant_name("Auth AWS", host), host=host, - python=python, expansions=expansions, ) variants.append(variant) @@ -755,29 +746,32 @@ def create_aws_tasks(): "web-identity", "ecs", ] - for version in get_versions_from("4.4"): + for version, test_type, python in zip_cycle(get_versions_from("4.4"), aws_test_types, CPYTHONS): base_name = f"test-auth-aws-{version}" base_tags = ["auth-aws"] server_vars = dict(AUTH_AWS="1", VERSION=version) server_func = FunctionCall(func="run server", vars=server_vars) assume_func = FunctionCall(func="assume ec2 role") - for test_type in aws_test_types: - tags = [*base_tags, f"auth-aws-{test_type}"] - name = f"{base_name}-{test_type}" - test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type) - test_func = FunctionCall(func="run tests", vars=test_vars) - funcs = [server_func, assume_func, test_func] - tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) - - tags = [*base_tags, "auth-aws-web-identity"] - name = f"{base_name}-web-identity-session-name" - test_vars = dict( - TEST_NAME="auth_aws", SUB_TEST_NAME="web-identity", AWS_ROLE_SESSION_NAME="test" - ) + tags = [*base_tags, f"auth-aws-{test_type}"] + name = get_task_name(f"{base_name}-{test_type}", python=python) + test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type, PYTHON_VERSION=python) test_func = FunctionCall(func="run tests", vars=test_vars) funcs = [server_func, assume_func, test_func] tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + if test_type == "web-identity": + tags = [*base_tags, "auth-aws-web-identity"] + name = get_task_name(f"{base_name}-web-identity-session-name", python=python) + test_vars = dict( + TEST_NAME="auth_aws", + SUB_TEST_NAME="web-identity", + AWS_ROLE_SESSION_NAME="test", + PYTHON_VERSION=python, + ) + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [server_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + return tasks @@ -796,15 +790,18 @@ def create_oidc_tasks(): def create_mod_wsgi_tasks(): tasks = [] - for test, topology in product(["standalone", "embedded-mode"], ["standalone", "replica_set"]): + for (test, topology), python in zip_cycle( + product(["standalone", "embedded-mode"], ["standalone", "replica_set"]), CPYTHONS + ): if test == "standalone": task_name = "mod-wsgi-" else: task_name = "mod-wsgi-embedded-mode-" task_name += topology.replace("_", "-") - server_vars = dict(TOPOLOGY=topology) + task_name = get_task_name(task_name, python=python) + server_vars = dict(TOPOLOGY=topology, PYTHON_VERSION=python) server_func = FunctionCall(func="run server", vars=server_vars) - vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0]) + vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0], PYTHON_VERSION=python) test_func = FunctionCall(func="run tests", vars=vars) tags = ["mod_wsgi"] commands = [server_func, test_func] From 0f37bfd7a1052e6ca91476cd656e2c5bf0f880be Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Apr 2025 10:02:50 -0500 Subject: [PATCH 1888/2111] PYTHON-5331 Convert stable api and storage tests to new task pattern (#2295) --- .evergreen/generated_configs/functions.yml | 1 + .evergreen/generated_configs/variants.yml | 90 ++++++---------------- .evergreen/scripts/generate_config.py | 28 +++---- 3 files changed, 35 insertions(+), 84 deletions(-) diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index 06baa32e8d..a28cd2596e 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -143,6 +143,7 @@ functions: - NO_EXT - COMPRESSORS - MONGODB_API_VERSION + - REQUIRE_API_VERSION - DEBUG_LOG - ORCHESTRATION_FILE - OCSP_SERVER_TYPE diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 275de4c2fd..972de3118e 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -792,71 +792,36 @@ buildvariants: PYTHON_BINARY: /opt/python/3.13/bin/python3 # Stable api tests - - name: stable-api-require-v1-rhel8-python3.9-auth + - name: stable-api-require-v1-rhel8-auth tasks: - - name: "!.replica_set .5.0 .noauth .nossl .sync_async" - - name: "!.replica_set .6.0 .noauth .nossl .sync_async" - - name: "!.replica_set .7.0 .noauth .nossl .sync_async" - - name: "!.replica_set .8.0 .noauth .nossl .sync_async" - - name: "!.replica_set .rapid .noauth .nossl .sync_async" - - name: "!.replica_set .latest .noauth .nossl .sync_async" - display_name: Stable API require v1 RHEL8 Python3.9 Auth + - name: .standard-linux !.replica_set-noauth-ssl .server-5.0 + - name: .standard-linux !.replica_set-noauth-ssl .server-6.0 + - name: .standard-linux !.replica_set-noauth-ssl .server-7.0 + - name: .standard-linux !.replica_set-noauth-ssl .server-8.0 + - name: .standard-linux !.replica_set-noauth-ssl .server-rapid + - name: .standard-linux !.replica_set-noauth-ssl .server-latest + display_name: Stable API require v1 RHEL8 Auth run_on: - rhel87-small expansions: AUTH: auth REQUIRE_API_VERSION: "1" MONGODB_API_VERSION: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [versionedApi_tag] - - name: stable-api-accept-v2-rhel8-python3.9-auth - tasks: - - name: .standalone .5.0 .noauth .nossl .sync_async - - name: .standalone .6.0 .noauth .nossl .sync_async - - name: .standalone .7.0 .noauth .nossl .sync_async - - name: .standalone .8.0 .noauth .nossl .sync_async - - name: .standalone .rapid .noauth .nossl .sync_async - - name: .standalone .latest .noauth .nossl .sync_async - display_name: Stable API accept v2 RHEL8 Python3.9 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - ORCHESTRATION_FILE: versioned-api-testing.json - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [versionedApi_tag] - - name: stable-api-require-v1-rhel8-python3.13-auth - tasks: - - name: "!.replica_set .5.0 .noauth .nossl .sync_async" - - name: "!.replica_set .6.0 .noauth .nossl .sync_async" - - name: "!.replica_set .7.0 .noauth .nossl .sync_async" - - name: "!.replica_set .8.0 .noauth .nossl .sync_async" - - name: "!.replica_set .rapid .noauth .nossl .sync_async" - - name: "!.replica_set .latest .noauth .nossl .sync_async" - display_name: Stable API require v1 RHEL8 Python3.13 Auth - run_on: - - rhel87-small - expansions: - AUTH: auth - REQUIRE_API_VERSION: "1" - MONGODB_API_VERSION: "1" - PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [versionedApi_tag] - - name: stable-api-accept-v2-rhel8-python3.13-auth + - name: stable-api-accept-v2-rhel8-auth tasks: - - name: .standalone .5.0 .noauth .nossl .sync_async - - name: .standalone .6.0 .noauth .nossl .sync_async - - name: .standalone .7.0 .noauth .nossl .sync_async - - name: .standalone .8.0 .noauth .nossl .sync_async - - name: .standalone .rapid .noauth .nossl .sync_async - - name: .standalone .latest .noauth .nossl .sync_async - display_name: Stable API accept v2 RHEL8 Python3.13 Auth + - name: .standard-linux .server-5.0 .standalone-noauth-nossl + - name: .standard-linux .server-6.0 .standalone-noauth-nossl + - name: .standard-linux .server-7.0 .standalone-noauth-nossl + - name: .standard-linux .server-8.0 .standalone-noauth-nossl + - name: .standard-linux .server-rapid .standalone-noauth-nossl + - name: .standard-linux .server-latest .standalone-noauth-nossl + display_name: Stable API accept v2 RHEL8 Auth run_on: - rhel87-small expansions: AUTH: auth ORCHESTRATION_FILE: versioned-api-testing.json - PYTHON_BINARY: /opt/python/3.13/bin/python3 tags: [versionedApi_tag] # Standard nonlinux tests @@ -896,30 +861,19 @@ buildvariants: tags: [standard-non-linux] # Storage engine tests - - name: storage-inmemory-rhel8-python3.9 + - name: storage-inmemory-rhel8 tasks: - - name: .standalone .noauth .nossl .4.0 .sync_async - - name: .standalone .noauth .nossl .4.2 .sync_async - - name: .standalone .noauth .nossl .4.4 .sync_async - - name: .standalone .noauth .nossl .5.0 .sync_async - - name: .standalone .noauth .nossl .6.0 .sync_async - - name: .standalone .noauth .nossl .7.0 .sync_async - - name: .standalone .noauth .nossl .8.0 .sync_async - - name: .standalone .noauth .nossl .rapid .sync_async - - name: .standalone .noauth .nossl .latest .sync_async - display_name: Storage InMemory RHEL8 Python3.9 + - name: .standard-linux .standalone-noauth-nossl + display_name: Storage InMemory RHEL8 run_on: - rhel87-small expansions: STORAGE_ENGINE: inmemory - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: storage-mmapv1-rhel8-python3.9 + - name: storage-mmapv1-rhel8 tasks: - - name: .standalone .4.0 .noauth .nossl .sync_async - - name: .replica_set .4.0 .noauth .nossl .sync_async - display_name: Storage MMAPv1 RHEL8 Python3.9 + - name: .standard-linux !.sharded_cluster-auth-ssl .server-4.0 + display_name: Storage MMAPv1 RHEL8 run_on: - rhel87-small expansions: STORAGE_ENGINE: mmapv1 - PYTHON_BINARY: /opt/python/3.9/bin/python3 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index c610bc2353..4d197dec2f 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -283,20 +283,15 @@ def create_storage_engine_variants(): engines = ["InMemory", "MMAPv1"] variants = [] for engine in engines: - python = CPYTHONS[0] expansions = dict(STORAGE_ENGINE=engine.lower()) if engine == engines[0]: - tasks = [f".standalone .noauth .nossl .{v} .sync_async" for v in ALL_VERSIONS] + tasks = [".standard-linux .standalone-noauth-nossl"] else: # MongoDB 4.2 drops support for MMAPv1 versions = get_versions_until("4.0") - tasks = [f".standalone .{v} .noauth .nossl .sync_async" for v in versions] + [ - f".replica_set .{v} .noauth .nossl .sync_async" for v in versions - ] - display_name = get_variant_name(f"Storage {engine}", host, python=python) - variant = create_variant( - tasks, display_name, host=host, python=python, expansions=expansions - ) + tasks = [f".standard-linux !.sharded_cluster-auth-ssl .server-{v}" for v in versions] + display_name = get_variant_name(f"Storage {engine}", host) + variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) return variants @@ -308,7 +303,7 @@ def create_stable_api_variants(): types = ["require v1", "accept v2"] # All python versions across platforms. - for python, test_type in product(MIN_MAX_PYTHON, types): + for test_type in types: expansions = dict(AUTH="auth") # Test against a cluster with requireApiVersion=1. if test_type == types[0]: @@ -318,7 +313,8 @@ def create_stable_api_variants(): # MONGODB_API_VERSION is the apiVersion to use in the test suite. expansions["MONGODB_API_VERSION"] = "1" tasks = [ - f"!.replica_set .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0") + f".standard-linux !.replica_set-noauth-ssl .server-{v}" + for v in get_versions_from("5.0") ] else: # Test against a cluster with acceptApiVersion2 but without @@ -326,13 +322,12 @@ def create_stable_api_variants(): # clients created in the test suite. expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" tasks = [ - f".standalone .{v} .noauth .nossl .sync_async" for v in get_versions_from("5.0") + f".standard-linux .server-{v} .standalone-noauth-nossl" + for v in get_versions_from("5.0") ] base_display_name = f"Stable API {test_type}" - display_name = get_variant_name(base_display_name, host, python=python, **expansions) - variant = create_variant( - tasks, display_name, host=host, python=python, tags=tags, expansions=expansions - ) + display_name = get_variant_name(base_display_name, host, **expansions) + variant = create_variant(tasks, display_name, host=host, tags=tags, expansions=expansions) variants.append(variant) return variants @@ -1164,6 +1159,7 @@ def create_run_tests_func(): "NO_EXT", "COMPRESSORS", "MONGODB_API_VERSION", + "REQUIRE_API_VERSION", "DEBUG_LOG", "ORCHESTRATION_FILE", "OCSP_SERVER_TYPE", From 6ed3533b7343130b58555eb3226834da08b6ad80 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Apr 2025 13:03:53 -0500 Subject: [PATCH 1889/2111] PYTHON-5313 Create Evergreen tests that do not run orchestration (#2284) --- .evergreen/generated_configs/tasks.yml | 55 ++++++--------- .evergreen/generated_configs/variants.yml | 46 ++++--------- .evergreen/scripts/generate_config.py | 81 ++++++++--------------- test/__init__.py | 1 + test/asynchronous/__init__.py | 1 + 5 files changed, 65 insertions(+), 119 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 980dbfbe51..32754af9af 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1,28 +1,4 @@ tasks: - # Atlas connect tests - - name: test-atlas-connect - commands: - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: atlas_connect - tags: [atlas_connect] - - # Atlas data lake tests - - name: test-atlas-data-lake-without_ext - commands: - - func: run tests - vars: - TEST_NAME: data_lake - NO_EXT: "1" - tags: [atlas_data_lake] - - name: test-atlas-data-lake-with_ext - commands: - - func: run tests - vars: - TEST_NAME: data_lake - tags: [atlas_data_lake] - # Aws lambda tests - name: test-aws-lambda-deployed commands: @@ -494,14 +470,6 @@ tasks: TEST_NAME: load_balancer tags: [load-balancer, noauth, nossl] - # Mockupdb tests - - name: test-mockupdb - commands: - - func: run tests - vars: - TEST_NAME: mockupdb - tags: [mockupdb] - # Mod wsgi tests - name: mod-wsgi-replica-set-python3.9 commands: @@ -564,11 +532,28 @@ tasks: PYTHON_VERSION: "3.13" tags: [mod_wsgi] - # No server tests - - name: test-no-server + # No orchestration tests + - name: test-no-orchestration-python3.9 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: "3.9" + tags: [no-orchestration, python-3.9] + - name: test-no-orchestration-python3.13 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: "3.13" + tags: [no-orchestration, python-3.13] + - name: test-no-orchestration-pypy3.10 commands: + - func: assume ec2 role - func: run tests - tags: [no-server] + vars: + PYTHON_VERSION: pypy3.10 + tags: [no-orchestration, python-pypy3.10] # Ocsp tests - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.9 diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 972de3118e..bcce7818ac 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -64,40 +64,22 @@ buildvariants: NO_EXT: "1" # Atlas connect tests - - name: atlas-connect-rhel8-python3.9 + - name: atlas-connect-rhel8 tasks: - - name: .atlas_connect - display_name: Atlas connect RHEL8 Python3.9 + - name: .no-orchestration + display_name: Atlas connect RHEL8 run_on: - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-connect-rhel8-python3.13 - tasks: - - name: .atlas_connect - display_name: Atlas connect RHEL8 Python3.13 - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 # Atlas data lake tests - - name: atlas-data-lake-ubuntu-22-python3.9 + - name: atlas-data-lake-ubuntu-22 tasks: - - name: .atlas_data_lake - display_name: Atlas Data Lake Ubuntu-22 Python3.9 + - name: .no-orchestration + display_name: Atlas Data Lake Ubuntu-22 run_on: - ubuntu2204-small expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: atlas-data-lake-ubuntu-22-python3.13 - tasks: - - name: .atlas_data_lake - display_name: Atlas Data Lake Ubuntu-22 Python3.13 - run_on: - - ubuntu2204-small - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 + TEST_NAME: data_lake # Aws auth tests - name: auth-aws-ubuntu-20 @@ -536,14 +518,14 @@ buildvariants: batchtime: 10080 # Mockupdb tests - - name: mockupdb-rhel8-python3.9 + - name: mockupdb-rhel8 tasks: - - name: .mockupdb - display_name: MockupDB RHEL8 Python3.9 + - name: .no-orchestration + display_name: MockupDB RHEL8 run_on: - rhel87-small expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 + TEST_NAME: mockupdb # Mod wsgi tests - name: mod_wsgi-ubuntu-22 @@ -564,10 +546,10 @@ buildvariants: - rhel87-small # No server tests - - name: no-server + - name: no-server-rhel8 tasks: - - name: .no-server - display_name: No server + - name: .no-orchestration + display_name: No server RHEL8 run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 4d197dec2f..80eb248f35 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -359,14 +359,11 @@ def create_no_c_ext_variants(): def create_atlas_data_lake_variants(): - variants = [] host = HOSTS["ubuntu22"] - for python in MIN_MAX_PYTHON: - tasks = [".atlas_data_lake"] - display_name = get_variant_name("Atlas Data Lake", host, python=python) - variant = create_variant(tasks, display_name, host=host, python=python) - variants.append(variant) - return variants + tasks = [".no-orchestration"] + expansions = dict(TEST_NAME="data_lake") + display_name = get_variant_name("Atlas Data Lake", host) + return [create_variant(tasks, display_name, host=host, expansions=expansions)] def create_mod_wsgi_variants(): @@ -437,13 +434,13 @@ def create_search_index_variants(): def create_mockupdb_variants(): host = DEFAULT_HOST - python = CPYTHONS[0] + expansions = dict(TEST_NAME="mockupdb") return [ create_variant( - [".mockupdb"], - get_variant_name("MockupDB", host, python=python), - python=python, + [".no-orchestration"], + get_variant_name("MockupDB", host), host=host, + expansions=expansions, ) ] @@ -465,12 +462,10 @@ def create_atlas_connect_variants(): host = DEFAULT_HOST return [ create_variant( - [".atlas_connect"], - get_variant_name("Atlas connect", host, python=python), - python=python, - host=host, + [".no-orchestration"], + get_variant_name("Atlas connect", host), + host=DEFAULT_HOST, ) - for python in MIN_MAX_PYTHON ] @@ -525,7 +520,8 @@ def create_aws_auth_variants(): def create_no_server_variants(): host = HOSTS["rhel8"] - return [create_variant([".no-server"], "No server", host=host)] + name = get_variant_name("No server", host=host) + return [create_variant([".no-orchestration"], name, host=host)] def create_alternative_hosts_variants(): @@ -691,6 +687,22 @@ def create_server_tasks(): return tasks +def create_no_orchestration_tasks(): + tasks = [] + for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: + tags = [ + "no-orchestration", + f"python-{python}", + ] + name = get_task_name("test-no-orchestration", python=python) + assume_func = FunctionCall(func="assume ec2 role") + test_vars = dict(PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=test_vars) + commands = [assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=commands)) + return tasks + + def create_load_balancer_tasks(): tasks = [] for (auth, ssl), version in product(AUTH_SSLS, get_versions_from("6.0")): @@ -855,15 +867,6 @@ def create_search_index_tasks(): return [EvgTask(name=task_name, tags=tags, commands=commands)] -def create_atlas_connect_tasks(): - vars = dict(TEST_NAME="atlas_connect") - assume_func = FunctionCall(func="assume ec2 role") - test_func = FunctionCall(func="run tests", vars=vars) - task_name = "test-atlas-connect" - tags = ["atlas_connect"] - return [EvgTask(name=task_name, tags=tags, commands=[assume_func, test_func])] - - def create_enterprise_auth_tasks(): tasks = [] for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: @@ -901,18 +904,6 @@ def create_perf_tasks(): return tasks -def create_atlas_data_lake_tasks(): - tags = ["atlas_data_lake"] - tasks = [] - for c_ext in C_EXTS: - vars = dict(TEST_NAME="data_lake") - handle_c_ext(c_ext, vars) - test_func = FunctionCall(func="run tests", vars=vars) - task_name = f"test-atlas-data-lake-{c_ext}" - tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) - return tasks - - def create_getdata_tasks(): # Wildcard task. Do you need to find out what tools are available and where? # Throw it here, and execute this task on all buildvariants @@ -992,20 +983,6 @@ def create_ocsp_tasks(): return tasks -def create_mockupdb_tasks(): - test_func = FunctionCall(func="run tests", vars=dict(TEST_NAME="mockupdb")) - task_name = "test-mockupdb" - tags = ["mockupdb"] - return [EvgTask(name=task_name, tags=tags, commands=[test_func])] - - -def create_no_server_tasks(): - test_func = FunctionCall(func="run tests") - task_name = "test-no-server" - tags = ["no-server"] - return [EvgTask(name=task_name, tags=tags, commands=[test_func])] - - def create_free_threading_tasks(): vars = dict(VERSION="8.0", TOPOLOGY="replica_set") server_func = FunctionCall(func="run server", vars=vars) diff --git a/test/__init__.py b/test/__init__.py index a1c5091f3b..7ae3432062 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -206,6 +206,7 @@ def _init_client(self): if os.environ.get("TEST_DATA_LAKE"): self.is_data_lake = True self.auth_enabled = True + self.client.close() self.client = self._connect(host, port, username=db_user, password=db_pwd) self.connected = True return diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index f8d04f0d5d..c57bf2a880 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -206,6 +206,7 @@ async def _init_client(self): if os.environ.get("TEST_DATA_LAKE"): self.is_data_lake = True self.auth_enabled = True + await self.client.close() self.client = await self._connect(host, port, username=db_user, password=db_pwd) self.connected = True return From 0ee8e585c2e1f43ddf50a86503f4bcea2ebdf389 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 21 Apr 2025 09:43:58 -0400 Subject: [PATCH 1890/2111] PYTHON-5292 - Debug logs should only print on failed tests (#2296) --- .evergreen/scripts/run_tests.py | 2 +- .evergreen/scripts/setup_tests.py | 4 +--- CONTRIBUTING.md | 8 ++++---- test/asynchronous/test_client.py | 3 ++- test/test_client.py | 3 ++- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index ae073c2666..5c1ba25a97 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -181,7 +181,7 @@ def run() -> None: return if os.environ.get("DEBUG_LOG"): - TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG} -o log_cli=1".split()) + TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG}".split()) # Run local tests. ret = pytest.main(TEST_ARGS + sys.argv[1:]) diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index bd307a4e10..b8262df58f 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -468,9 +468,7 @@ def handle_test_env() -> None: UV_ARGS.append(f"--group {framework}") else: - # Use --capture=tee-sys so pytest prints test output inline: - # https://docs.pytest.org/en/stable/how-to/capture-stdout-stderr.html - TEST_ARGS = f"-v --capture=tee-sys --durations=5 {TEST_ARGS}" + TEST_ARGS = f"-v --durations=5 {TEST_ARGS}" TEST_SUITE = TEST_SUITE_MAP.get(test_name) if TEST_SUITE: TEST_ARGS = f"-m {TEST_SUITE} {TEST_ARGS}" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1125113681..369e688b1e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -352,11 +352,11 @@ If you are running one of the `no-responder` tests, omit the `run-server` step. ## Enable Debug Logs -- Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. -- Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine. -- You can also set `DEBUG_LOG=1` and run either `just setup-tests` or `just-test`. +- Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest` to output all debug logs to the terminal. **Warning**: This will output a huge amount of logs. +- Add `log_cli=1` and `log_cli_level="DEBUG"` to the `tool.pytest.ini_options` section in `pyproject.toml` to enable debug logs in this manner by default on your machine. +- Set `DEBUG_LOG=1` and run `just setup-tests`, `just-test`, or `pytest` to enable debug logs only for failed tests. - Finally, you can use `just setup-tests --debug-log`. -- For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for the patch. +- For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for failed tests in the patch. ## Adding a new test suite diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index c9cfca81fc..143cccc3c8 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -34,7 +34,7 @@ import time import uuid from typing import Any, Iterable, Type, no_type_check -from unittest import mock +from unittest import mock, skipIf from unittest.mock import patch import pytest @@ -629,6 +629,7 @@ def test_detected_environment_logging(self, mock_get_hosts): logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) + @skipIf(os.environ.get("DEBUG_LOG"), "Enabling debug logs breaks this test") @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") async def test_detected_environment_warning(self, mock_get_hosts): with self._caplog.at_level(logging.WARN): diff --git a/test/test_client.py b/test/test_client.py index 038ba2241b..8ef95699ca 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -34,7 +34,7 @@ import time import uuid from typing import Any, Iterable, Type, no_type_check -from unittest import mock +from unittest import mock, skipIf from unittest.mock import patch import pytest @@ -622,6 +622,7 @@ def test_detected_environment_logging(self, mock_get_hosts): logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] self.assertEqual(len(logs), 7) + @skipIf(os.environ.get("DEBUG_LOG"), "Enabling debug logs breaks this test") @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") def test_detected_environment_warning(self, mock_get_hosts): with self._caplog.at_level(logging.WARN): From d51c70b401d8f9b4dfb24c78ed94d3302799fd10 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 21 Apr 2025 10:48:26 -0500 Subject: [PATCH 1891/2111] PYTHON-5337 Evergreen PyOpenSSL variants should use PyOpenSSL (#2299) --- .evergreen/generated_configs/variants.yml | 12 ++++++------ .evergreen/scripts/generate_config.py | 2 +- .evergreen/scripts/setup-tests.sh | 2 ++ .evergreen/scripts/setup_tests.py | 4 ++-- .evergreen/scripts/utils.py | 1 - 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index bcce7818ac..08c0dbbf36 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -620,7 +620,7 @@ buildvariants: - macos-14 batchtime: 10080 expansions: - TEST_NAME: pyopenssl + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-python3.10 tasks: @@ -631,7 +631,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: pyopenssl + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-python3.11 tasks: @@ -642,7 +642,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: pyopenssl + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-python3.12 tasks: @@ -653,7 +653,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: pyopenssl + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-python3.13 tasks: @@ -664,7 +664,7 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 expansions: - TEST_NAME: pyopenssl + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.10 tasks: @@ -675,7 +675,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: pyopenssl + SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Search index tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 80eb248f35..7b88be85b7 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -250,7 +250,7 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" batchtime = BATCHTIME_WEEK - expansions = dict(TEST_NAME="pyopenssl") + expansions = dict(SUB_TEST_NAME="pyopenssl") variants = [] for python in ALL_PYTHONS: diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh index 0b75051a68..1074c7eaaf 100755 --- a/.evergreen/scripts/setup-tests.sh +++ b/.evergreen/scripts/setup-tests.sh @@ -20,4 +20,6 @@ if [ -f $SCRIPT_DIR/env.sh ]; then source $SCRIPT_DIR/env.sh fi +echo "Setting up tests with args \"$*\"..." uv run $SCRIPT_DIR/setup_tests.py "$@" +echo "Setting up tests with args \"$*\"... done." diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index b8262df58f..98c382ff60 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -394,8 +394,8 @@ def handle_test_env() -> None: load_config_from_file(csfle_dir / "secrets-export.sh") run_command(f"bash {csfle_dir.as_posix()}/start-servers.sh") - if sub_test_name == "pyopenssl": - UV_ARGS.append("--extra ocsp") + if sub_test_name == "pyopenssl": + UV_ARGS.append("--extra ocsp") if is_set("TEST_CRYPT_SHARED") or opts.crypt_shared: config = read_env(f"{DRIVERS_TOOLS}/mo-expansion.sh") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index bd93532600..c9195b638a 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -43,7 +43,6 @@ class Distro: "kms": "kms", "load_balancer": "load_balancer", "mockupdb": "mockupdb", - "pyopenssl": "", "ocsp": "ocsp", "perf": "perf", "serverless": "", From 412d0005b805588261ed8dd97aae5d448b2e41f7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 21 Apr 2025 15:53:21 -0400 Subject: [PATCH 1892/2111] PYTHON-5306 - Fix use of public MongoClient attributes before connection (#2285) --- doc/changelog.rst | 5 ++- pymongo/asynchronous/mongo_client.py | 39 ++++++++++++++++----- pymongo/asynchronous/settings.py | 7 ++-- pymongo/synchronous/mongo_client.py | 39 ++++++++++++++++----- pymongo/synchronous/settings.py | 7 ++-- test/asynchronous/test_client.py | 52 ++++++++++++++++++++++++++++ test/test_client.py | 52 ++++++++++++++++++++++++++++ 7 files changed, 178 insertions(+), 23 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 2fb225e2e1..4fff06c9cb 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -10,10 +10,13 @@ Version 4.12.1 is a bug fix release. - Fixed a bug that could raise ``UnboundLocalError`` when creating asynchronous connections over SSL. - Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. - Fixed a bug that caused direct use of ``pymongo.uri_parser`` to raise an ``AttributeError``. +- Fixed a bug where clients created with connect=False and a "mongodb+srv://" connection string + could cause public ``pymongo.MongoClient`` and ``pymongo.AsyncMongoClient`` attributes (topology_description, + nodes, address, primary, secondaries, arbiters) to incorrectly return a Database, leading to type + errors such as: "NotImplementedError: Database objects do not implement truth value testing or bool()". - Removed Eventlet testing against Python versions newer than 3.9 since Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. - Issues Resolved ............... diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 7744a75d9c..a236b21348 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -109,6 +109,7 @@ ) from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.results import ClientBulkWriteResult +from pymongo.server_description import ServerDescription from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription @@ -779,7 +780,7 @@ def __init__( keyword_opts["document_class"] = doc_class self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} - seeds = set() + self._seeds = set() is_srv = False username = None password = None @@ -804,18 +805,18 @@ def __init__( srv_max_hosts=srv_max_hosts, ) is_srv = entity.startswith(SRV_SCHEME) - seeds.update(res["nodelist"]) + self._seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password dbase = res["database"] or dbase opts = res["options"] fqdn = res["fqdn"] else: - seeds.update(split_hosts(entity, self._port)) - if not seeds: + self._seeds.update(split_hosts(entity, self._port)) + if not self._seeds: raise ConfigurationError("need to specify at least one host") - for hostname in [node[0] for node in seeds]: + for hostname in [node[0] for node in self._seeds]: if _detect_external_db(hostname): break @@ -838,7 +839,7 @@ def __init__( srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") - opts = self._normalize_and_validate_options(opts, seeds) + opts = self._normalize_and_validate_options(opts, self._seeds) # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) @@ -857,7 +858,7 @@ def __init__( "username": username, "password": password, "dbase": dbase, - "seeds": seeds, + "seeds": self._seeds, "fqdn": fqdn, "srv_service_name": srv_service_name, "pool_class": pool_class, @@ -873,8 +874,7 @@ def __init__( self._options.read_concern, ) - if not is_srv: - self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + self._init_based_on_options(self._seeds, srv_max_hosts, srv_service_name) self._opened = False self._closed = False @@ -975,6 +975,7 @@ def _init_based_on_options( srv_service_name=srv_service_name, srv_max_hosts=srv_max_hosts, server_monitoring_mode=self._options.server_monitoring_mode, + topology_id=self._topology_settings._topology_id if self._topology_settings else None, ) if self._options.auto_encryption_opts: from pymongo.asynchronous.encryption import _Encrypter @@ -1205,6 +1206,16 @@ def topology_description(self) -> TopologyDescription: .. versionadded:: 4.0 """ + if self._topology is None: + servers = {(host, port): ServerDescription((host, port)) for host, port in self._seeds} + return TopologyDescription( + TOPOLOGY_TYPE.Unknown, + servers, + None, + None, + None, + self._topology_settings, + ) return self._topology.description @property @@ -1218,6 +1229,8 @@ def nodes(self) -> FrozenSet[_Address]: to any servers, or a network partition causes it to lose connection to all servers. """ + if self._topology is None: + return frozenset() description = self._topology.description return frozenset(s.address for s in description.known_servers) @@ -1576,6 +1589,8 @@ async def address(self) -> Optional[tuple[str, int]]: .. versionadded:: 3.0 """ + if self._topology is None: + await self._get_topology() topology_type = self._topology._description.topology_type if ( topology_type == TOPOLOGY_TYPE.Sharded @@ -1598,6 +1613,8 @@ async def primary(self) -> Optional[tuple[str, int]]: .. versionadded:: 3.0 AsyncMongoClient gained this property in version 3.0. """ + if self._topology is None: + await self._get_topology() return await self._topology.get_primary() # type: ignore[return-value] @property @@ -1611,6 +1628,8 @@ async def secondaries(self) -> set[_Address]: .. versionadded:: 3.0 AsyncMongoClient gained this property in version 3.0. """ + if self._topology is None: + await self._get_topology() return await self._topology.get_secondaries() @property @@ -1621,6 +1640,8 @@ async def arbiters(self) -> set[_Address]: connected to a replica set, there are no arbiters, or this client was created without the `replicaSet` option. """ + if self._topology is None: + await self._get_topology() return await self._topology.get_arbiters() @property diff --git a/pymongo/asynchronous/settings.py b/pymongo/asynchronous/settings.py index 62be853fba..9c2331971a 100644 --- a/pymongo/asynchronous/settings.py +++ b/pymongo/asynchronous/settings.py @@ -51,6 +51,7 @@ def __init__( srv_service_name: str = common.SRV_SERVICE_NAME, srv_max_hosts: int = 0, server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + topology_id: Optional[ObjectId] = None, ): """Represent MongoClient's configuration. @@ -78,8 +79,10 @@ def __init__( self._srv_service_name = srv_service_name self._srv_max_hosts = srv_max_hosts or 0 self._server_monitoring_mode = server_monitoring_mode - - self._topology_id = ObjectId() + if topology_id is not None: + self._topology_id = topology_id + else: + self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. self._stack = "".join(traceback.format_stack()[:-2]) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 1c0adb5d6b..99a517e5c1 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -101,6 +101,7 @@ ) from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.results import ClientBulkWriteResult +from pymongo.server_description import ServerDescription from pymongo.server_selectors import writable_server_selector from pymongo.server_type import SERVER_TYPE from pymongo.synchronous import client_session, database, uri_parser @@ -777,7 +778,7 @@ def __init__( keyword_opts["document_class"] = doc_class self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} - seeds = set() + self._seeds = set() is_srv = False username = None password = None @@ -802,18 +803,18 @@ def __init__( srv_max_hosts=srv_max_hosts, ) is_srv = entity.startswith(SRV_SCHEME) - seeds.update(res["nodelist"]) + self._seeds.update(res["nodelist"]) username = res["username"] or username password = res["password"] or password dbase = res["database"] or dbase opts = res["options"] fqdn = res["fqdn"] else: - seeds.update(split_hosts(entity, self._port)) - if not seeds: + self._seeds.update(split_hosts(entity, self._port)) + if not self._seeds: raise ConfigurationError("need to specify at least one host") - for hostname in [node[0] for node in seeds]: + for hostname in [node[0] for node in self._seeds]: if _detect_external_db(hostname): break @@ -836,7 +837,7 @@ def __init__( srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") - opts = self._normalize_and_validate_options(opts, seeds) + opts = self._normalize_and_validate_options(opts, self._seeds) # Username and password passed as kwargs override user info in URI. username = opts.get("username", username) @@ -855,7 +856,7 @@ def __init__( "username": username, "password": password, "dbase": dbase, - "seeds": seeds, + "seeds": self._seeds, "fqdn": fqdn, "srv_service_name": srv_service_name, "pool_class": pool_class, @@ -871,8 +872,7 @@ def __init__( self._options.read_concern, ) - if not is_srv: - self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + self._init_based_on_options(self._seeds, srv_max_hosts, srv_service_name) self._opened = False self._closed = False @@ -973,6 +973,7 @@ def _init_based_on_options( srv_service_name=srv_service_name, srv_max_hosts=srv_max_hosts, server_monitoring_mode=self._options.server_monitoring_mode, + topology_id=self._topology_settings._topology_id if self._topology_settings else None, ) if self._options.auto_encryption_opts: from pymongo.synchronous.encryption import _Encrypter @@ -1203,6 +1204,16 @@ def topology_description(self) -> TopologyDescription: .. versionadded:: 4.0 """ + if self._topology is None: + servers = {(host, port): ServerDescription((host, port)) for host, port in self._seeds} + return TopologyDescription( + TOPOLOGY_TYPE.Unknown, + servers, + None, + None, + None, + self._topology_settings, + ) return self._topology.description @property @@ -1216,6 +1227,8 @@ def nodes(self) -> FrozenSet[_Address]: to any servers, or a network partition causes it to lose connection to all servers. """ + if self._topology is None: + return frozenset() description = self._topology.description return frozenset(s.address for s in description.known_servers) @@ -1570,6 +1583,8 @@ def address(self) -> Optional[tuple[str, int]]: .. versionadded:: 3.0 """ + if self._topology is None: + self._get_topology() topology_type = self._topology._description.topology_type if ( topology_type == TOPOLOGY_TYPE.Sharded @@ -1592,6 +1607,8 @@ def primary(self) -> Optional[tuple[str, int]]: .. versionadded:: 3.0 MongoClient gained this property in version 3.0. """ + if self._topology is None: + self._get_topology() return self._topology.get_primary() # type: ignore[return-value] @property @@ -1605,6 +1622,8 @@ def secondaries(self) -> set[_Address]: .. versionadded:: 3.0 MongoClient gained this property in version 3.0. """ + if self._topology is None: + self._get_topology() return self._topology.get_secondaries() @property @@ -1615,6 +1634,8 @@ def arbiters(self) -> set[_Address]: connected to a replica set, there are no arbiters, or this client was created without the `replicaSet` option. """ + if self._topology is None: + self._get_topology() return self._topology.get_arbiters() @property diff --git a/pymongo/synchronous/settings.py b/pymongo/synchronous/settings.py index bb17de1874..61b86fa18d 100644 --- a/pymongo/synchronous/settings.py +++ b/pymongo/synchronous/settings.py @@ -51,6 +51,7 @@ def __init__( srv_service_name: str = common.SRV_SERVICE_NAME, srv_max_hosts: int = 0, server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + topology_id: Optional[ObjectId] = None, ): """Represent MongoClient's configuration. @@ -78,8 +79,10 @@ def __init__( self._srv_service_name = srv_service_name self._srv_max_hosts = srv_max_hosts or 0 self._server_monitoring_mode = server_monitoring_mode - - self._topology_id = ObjectId() + if topology_id is not None: + self._topology_id = topology_id + else: + self._topology_id = ObjectId() # Store the allocation traceback to catch unclosed clients in the # test suite. self._stack = "".join(traceback.format_stack()[:-2]) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 143cccc3c8..b9deb985bd 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -850,6 +850,58 @@ async def test_init_disconnected_with_auth(self): with self.assertRaises(ConnectionFailure): await c.pymongo_test.test.find_one() + @async_client_context.require_no_standalone + @async_client_context.require_no_load_balancer + @async_client_context.require_tls + async def test_init_disconnected_with_srv(self): + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # nodes returns an empty set if not connected + self.assertEqual(c.nodes, frozenset()) + # topology_description returns the initial seed description if not connected + topology_description = c.topology_description + self.assertEqual(topology_description.topology_type, TOPOLOGY_TYPE.Unknown) + self.assertEqual( + { + ("test1.test.build.10gen.cc", None): ServerDescription( + ("test1.test.build.10gen.cc", None) + ) + }, + topology_description.server_descriptions(), + ) + + # address causes client to block until connected + self.assertIsNotNone(await c.address) + # Initial seed topology and connected topology have the same ID + self.assertEqual( + c._topology._topology_id, topology_description._topology_settings._topology_id + ) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # primary causes client to block until connected + await c.primary + self.assertIsNotNone(c._topology) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # secondaries causes client to block until connected + await c.secondaries + self.assertIsNotNone(c._topology) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # arbiters causes client to block until connected + await c.arbiters + self.assertIsNotNone(c._topology) + async def test_equality(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = await self.async_rs_or_single_client(seed, connect=False) diff --git a/test/test_client.py b/test/test_client.py index 8ef95699ca..c2df8ab2b6 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -825,6 +825,58 @@ def test_init_disconnected_with_auth(self): with self.assertRaises(ConnectionFailure): c.pymongo_test.test.find_one() + @client_context.require_no_standalone + @client_context.require_no_load_balancer + @client_context.require_tls + def test_init_disconnected_with_srv(self): + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # nodes returns an empty set if not connected + self.assertEqual(c.nodes, frozenset()) + # topology_description returns the initial seed description if not connected + topology_description = c.topology_description + self.assertEqual(topology_description.topology_type, TOPOLOGY_TYPE.Unknown) + self.assertEqual( + { + ("test1.test.build.10gen.cc", None): ServerDescription( + ("test1.test.build.10gen.cc", None) + ) + }, + topology_description.server_descriptions(), + ) + + # address causes client to block until connected + self.assertIsNotNone(c.address) + # Initial seed topology and connected topology have the same ID + self.assertEqual( + c._topology._topology_id, topology_description._topology_settings._topology_id + ) + c.close() + + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # primary causes client to block until connected + c.primary + self.assertIsNotNone(c._topology) + c.close() + + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # secondaries causes client to block until connected + c.secondaries + self.assertIsNotNone(c._topology) + c.close() + + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # arbiters causes client to block until connected + c.arbiters + self.assertIsNotNone(c._topology) + def test_equality(self): seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) c = self.rs_or_single_client(seed, connect=False) From e2e673edeb711e24d7feb620e7552a2a7af2f0b0 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 22 Apr 2025 11:44:58 -0700 Subject: [PATCH 1893/2111] PYTHON-5314 Fix default imports for modules that worked in v4.8 (#2300) --- pymongo/__init__.py | 9 ++++++++- test/test_default_exports.py | 6 ++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 9a35750811..95eabef242 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -106,7 +106,14 @@ from pymongo.write_concern import WriteConcern # Public module compatibility imports -import pymongo.uri_parser # noqa: F401 # isort: skip +# isort: off +from pymongo import uri_parser # noqa: F401 +from pymongo import change_stream # noqa: F401 +from pymongo import client_session # noqa: F401 +from pymongo import collection # noqa: F401 +from pymongo import command_cursor # noqa: F401 +from pymongo import database # noqa: F401 +# isort: on version = __version__ """Current version of PyMongo.""" diff --git a/test/test_default_exports.py b/test/test_default_exports.py index 5f3e749d36..adc3882a36 100644 --- a/test/test_default_exports.py +++ b/test/test_default_exports.py @@ -215,6 +215,12 @@ def test_pymongo_submodule_attributes(self): self.assertTrue(hasattr(pymongo, "uri_parser")) self.assertTrue(pymongo.uri_parser) self.assertTrue(pymongo.uri_parser.parse_uri) + self.assertTrue(pymongo.change_stream) + self.assertTrue(pymongo.client_session) + self.assertTrue(pymongo.collection) + self.assertTrue(pymongo.cursor) + self.assertTrue(pymongo.command_cursor) + self.assertTrue(pymongo.database) def test_gridfs_imports(self): import gridfs From 09897b698e0ef6cf429c93ac726c4e65dccb53f7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 23 Apr 2025 15:13:38 -0400 Subject: [PATCH 1894/2111] PYTHON-5212 - Do not hold Topology lock while resetting pool (#2301) --- pymongo/asynchronous/pool.py | 31 ++++++-- pymongo/asynchronous/topology.py | 11 ++- pymongo/synchronous/pool.py | 31 ++++++-- pymongo/synchronous/topology.py | 11 ++- test/__init__.py | 8 ++ test/asynchronous/__init__.py | 8 ++ .../test_discovery_and_monitoring.py | 73 +++++++++++++++++++ test/test_discovery_and_monitoring.py | 71 ++++++++++++++++++ tools/synchro.py | 11 ++- 9 files changed, 230 insertions(+), 25 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index a67cc5f3c8..8b18ab927b 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -14,6 +14,7 @@ from __future__ import annotations +import asyncio import collections import contextlib import logging @@ -860,8 +861,14 @@ async def _reset( # PoolClosedEvent but that reset() SHOULD close sockets *after* # publishing the PoolClearedEvent. if close: - for conn in sockets: - await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], + return_exceptions=True, + ) + else: + for conn in sockets: + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) if self.enabled_for_cmap: assert listeners is not None listeners.publish_pool_closed(self.address) @@ -891,8 +898,14 @@ async def _reset( serverPort=self.address[1], serviceId=service_id, ) - for conn in sockets: - await conn.close_conn(ConnectionClosedReason.STALE) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], + return_exceptions=True, + ) + else: + for conn in sockets: + await conn.close_conn(ConnectionClosedReason.STALE) async def update_is_writable(self, is_writable: Optional[bool]) -> None: """Updates the is_writable attribute on all sockets currently in the @@ -938,8 +951,14 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds ): close_conns.append(self.conns.pop()) - for conn in close_conns: - await conn.close_conn(ConnectionClosedReason.IDLE) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], + return_exceptions=True, + ) + else: + for conn in close_conns: + await conn.close_conn(ConnectionClosedReason.IDLE) while True: async with self.size_cond: diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 32776bf7b9..438dd1e352 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -529,12 +529,6 @@ async def _process_change( if not _IS_SYNC: self._monitor_tasks.append(self._srv_monitor) - # Clear the pool from a failed heartbeat. - if reset_pool: - server = self._servers.get(server_description.address) - if server: - await server.pool.reset(interrupt_connections=interrupt_connections) - # Wake anything waiting in select_servers(). self._condition.notify_all() @@ -557,6 +551,11 @@ async def on_change( # that didn't include this server. if self._opened and self._description.has_server(server_description.address): await self._process_change(server_description, reset_pool, interrupt_connections) + # Clear the pool from a failed heartbeat, done outside the lock to avoid blocking on connection close. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + await server.pool.reset(interrupt_connections=interrupt_connections) async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 224834af31..b3eec64f27 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -14,6 +14,7 @@ from __future__ import annotations +import asyncio import collections import contextlib import logging @@ -858,8 +859,14 @@ def _reset( # PoolClosedEvent but that reset() SHOULD close sockets *after* # publishing the PoolClearedEvent. if close: - for conn in sockets: - conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], + return_exceptions=True, + ) + else: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) if self.enabled_for_cmap: assert listeners is not None listeners.publish_pool_closed(self.address) @@ -889,8 +896,14 @@ def _reset( serverPort=self.address[1], serviceId=service_id, ) - for conn in sockets: - conn.close_conn(ConnectionClosedReason.STALE) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], + return_exceptions=True, + ) + else: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.STALE) def update_is_writable(self, is_writable: Optional[bool]) -> None: """Updates the is_writable attribute on all sockets currently in the @@ -934,8 +947,14 @@ def remove_stale_sockets(self, reference_generation: int) -> None: and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds ): close_conns.append(self.conns.pop()) - for conn in close_conns: - conn.close_conn(ConnectionClosedReason.IDLE) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], + return_exceptions=True, + ) + else: + for conn in close_conns: + conn.close_conn(ConnectionClosedReason.IDLE) while True: with self.size_cond: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index df23bff28c..1e99adf726 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -529,12 +529,6 @@ def _process_change( if not _IS_SYNC: self._monitor_tasks.append(self._srv_monitor) - # Clear the pool from a failed heartbeat. - if reset_pool: - server = self._servers.get(server_description.address) - if server: - server.pool.reset(interrupt_connections=interrupt_connections) - # Wake anything waiting in select_servers(). self._condition.notify_all() @@ -557,6 +551,11 @@ def on_change( # that didn't include this server. if self._opened and self._description.has_server(server_description.address): self._process_change(server_description, reset_pool, interrupt_connections) + # Clear the pool from a failed heartbeat, done outside the lock to avoid blocking on connection close. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + server.pool.reset(interrupt_connections=interrupt_connections) def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: """Process a new seedlist on an opened topology. diff --git a/test/__init__.py b/test/__init__.py index 7ae3432062..39b4045e66 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -826,6 +826,14 @@ def require_sync(self, func): lambda: _IS_SYNC, "This test only works with the synchronous API", func=func ) + def require_async(self, func): + """Run a test only if using the asynchronous API.""" # unasync: off + return self._require( + lambda: not _IS_SYNC, + "This test only works with the asynchronous API", # unasync: off + func=func, + ) + def mongos_seeds(self): return ",".join("{}:{}".format(*address) for address in self.mongoses) diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index c57bf2a880..882cb6110f 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -828,6 +828,14 @@ def require_sync(self, func): lambda: _IS_SYNC, "This test only works with the synchronous API", func=func ) + def require_async(self, func): + """Run a test only if using the asynchronous API.""" # unasync: off + return self._require( + lambda: not _IS_SYNC, + "This test only works with the asynchronous API", # unasync: off + func=func, + ) + def mongos_seeds(self): return ",".join("{}:{}".format(*address) for address in self.mongoses) diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index fa62b25dd1..cf26faf248 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -20,10 +20,15 @@ import socketserver import sys import threading +import time from asyncio import StreamReader, StreamWriter from pathlib import Path from test.asynchronous.helpers import ConcurrentRunner +from pymongo.asynchronous.pool import AsyncConnection +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector + sys.path[0:0] = [""] from test.asynchronous import ( @@ -370,6 +375,74 @@ async def test_pool_unpause(self): await listener.async_wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + @async_client_context.require_failCommand_appName + @async_client_context.require_test_commands + @async_client_context.require_async + async def test_connection_close_does_not_block_other_operations(self): + listener = CMAPHeartbeatListener() + client = await self.async_single_client( + appName="SDAMConnectionCloseTest", + event_listeners=[listener], + heartbeatFrequencyMS=500, + minPoolSize=10, + ) + server = await (await client._get_topology()).select_server( + writable_server_selector, _Op.TEST + ) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + await client.db.test.insert_one({"x": 1}) + close_delay = 0.1 + latencies = [] + should_exit = [] + + async def run_task(): + while True: + start_time = time.monotonic() + await client.db.test.find_one({}) + elapsed = time.monotonic() - start_time + latencies.append(elapsed) + if should_exit: + break + await asyncio.sleep(0.001) + + task = ConcurrentRunner(target=run_task) + await task.start() + original_close = AsyncConnection.close_conn + try: + # Artificially delay the close operation to simulate a slow close + async def mock_close(self, reason): + await asyncio.sleep(close_delay) + await original_close(self, reason) + + AsyncConnection.close_conn = mock_close + + fail_hello = { + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 91, + "appName": "SDAMConnectionCloseTest", + }, + } + async with self.fail_point(fail_hello): + # Wait for server heartbeat to fail + await listener.async_wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + # Wait until all idle connections are closed to simulate real-world conditions + await listener.async_wait_for_event(monitoring.ConnectionClosedEvent, 10) + # Wait for one more find to complete after the pool has been reset, then shutdown the task + n = len(latencies) + await async_wait_until(lambda: len(latencies) >= n + 1, "run one more find") + should_exit.append(True) + await task.join() + # No operation latency should not significantly exceed close_delay + self.assertLessEqual(max(latencies), close_delay * 5.0) + finally: + AsyncConnection.close_conn = original_close + class TestServerMonitoringMode(AsyncIntegrationTest): @async_client_context.require_no_serverless diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 07720473ca..9d6c945707 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -20,10 +20,15 @@ import socketserver import sys import threading +import time from asyncio import StreamReader, StreamWriter from pathlib import Path from test.helpers import ConcurrentRunner +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.pool import Connection + sys.path[0:0] = [""] from test import ( @@ -370,6 +375,72 @@ def test_pool_unpause(self): listener.wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) listener.wait_for_event(monitoring.PoolReadyEvent, 1) + @client_context.require_failCommand_appName + @client_context.require_test_commands + @client_context.require_async + def test_connection_close_does_not_block_other_operations(self): + listener = CMAPHeartbeatListener() + client = self.single_client( + appName="SDAMConnectionCloseTest", + event_listeners=[listener], + heartbeatFrequencyMS=500, + minPoolSize=10, + ) + server = (client._get_topology()).select_server(writable_server_selector, _Op.TEST) + wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + client.db.test.insert_one({"x": 1}) + close_delay = 0.1 + latencies = [] + should_exit = [] + + def run_task(): + while True: + start_time = time.monotonic() + client.db.test.find_one({}) + elapsed = time.monotonic() - start_time + latencies.append(elapsed) + if should_exit: + break + time.sleep(0.001) + + task = ConcurrentRunner(target=run_task) + task.start() + original_close = Connection.close_conn + try: + # Artificially delay the close operation to simulate a slow close + def mock_close(self, reason): + time.sleep(close_delay) + original_close(self, reason) + + Connection.close_conn = mock_close + + fail_hello = { + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 91, + "appName": "SDAMConnectionCloseTest", + }, + } + with self.fail_point(fail_hello): + # Wait for server heartbeat to fail + listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + # Wait until all idle connections are closed to simulate real-world conditions + listener.wait_for_event(monitoring.ConnectionClosedEvent, 10) + # Wait for one more find to complete after the pool has been reset, then shutdown the task + n = len(latencies) + wait_until(lambda: len(latencies) >= n + 1, "run one more find") + should_exit.append(True) + task.join() + # No operation latency should not significantly exceed close_delay + self.assertLessEqual(max(latencies), close_delay * 5.0) + finally: + Connection.close_conn = original_close + class TestServerMonitoringMode(IntegrationTest): @client_context.require_no_serverless diff --git a/tools/synchro.py b/tools/synchro.py index f6176e2038..bfe8f71125 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -288,7 +288,8 @@ def process_files( if file in docstring_translate_files: lines = translate_docstrings(lines) if file in sync_test_files: - translate_imports(lines) + lines = translate_imports(lines) + lines = process_ignores(lines) f.seek(0) f.writelines(lines) f.truncate() @@ -390,6 +391,14 @@ def translate_docstrings(lines: list[str]) -> list[str]: return [line for line in lines if line != "DOCSTRING_REMOVED"] +def process_ignores(lines: list[str]) -> list[str]: + for i in range(len(lines)): + for k, v in replacements.items(): + if "unasync: off" in lines[i] and v in lines[i]: + lines[i] = lines[i].replace(v, k) + return lines + + def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[str, str]) -> None: unasync_files( files, From 42cb70e9ab4300cd27eb4340bf6e59dbc5f973e0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 23 Apr 2025 14:43:49 -0500 Subject: [PATCH 1895/2111] PYTHON-5341 Fix handling of SSL tests with Stable API (#2305) --- test/asynchronous/test_ssl.py | 6 ++++++ test/test_ssl.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py index d920b77ac2..4d7566a61d 100644 --- a/test/asynchronous/test_ssl.py +++ b/test/asynchronous/test_ssl.py @@ -166,11 +166,14 @@ async def asyncTearDown(self): @async_client_context.require_tls async def test_simple_ssl(self): + if "PyPy" in sys.version: + self.skipTest("Test is flaky on PyPy") # Expects the server to be running with ssl and with # no --sslPEMKeyFile or with --sslWeakCertificateValidation await self.assertClientWorks(self.client) @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version @ignore_deprecations async def test_tlsCertificateKeyFilePassword(self): # Expects the server to be running with server.pem and ca.pem @@ -376,6 +379,7 @@ async def test_cert_ssl_validation_hostname_matching(self): ) @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version @ignore_deprecations async def test_tlsCRLFile_support(self): if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: @@ -531,6 +535,7 @@ def test_wincertstore(self): @async_client_context.require_auth @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version @ignore_deprecations async def test_mongodb_x509_auth(self): host, port = await async_client_context.host, await async_client_context.port @@ -640,6 +645,7 @@ async def test_mongodb_x509_auth(self): self.fail("Invalid certificate accepted.") @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version @ignore_deprecations async def test_connect_with_ca_bundle(self): def remove(path): diff --git a/test/test_ssl.py b/test/test_ssl.py index a66fe21be5..7decc8203d 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -166,11 +166,14 @@ def tearDown(self): @client_context.require_tls def test_simple_ssl(self): + if "PyPy" in sys.version: + self.skipTest("Test is flaky on PyPy") # Expects the server to be running with ssl and with # no --sslPEMKeyFile or with --sslWeakCertificateValidation self.assertClientWorks(self.client) @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_tlsCertificateKeyFilePassword(self): # Expects the server to be running with server.pem and ca.pem @@ -376,6 +379,7 @@ def test_cert_ssl_validation_hostname_matching(self): ) @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_tlsCRLFile_support(self): if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: @@ -531,6 +535,7 @@ def test_wincertstore(self): @client_context.require_auth @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port @@ -640,6 +645,7 @@ def test_mongodb_x509_auth(self): self.fail("Invalid certificate accepted.") @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_connect_with_ca_bundle(self): def remove(path): From 1bdf035802479afd232b0d41fbe3823345773e9d Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 23 Apr 2025 16:32:08 -0400 Subject: [PATCH 1896/2111] PYTHON-5212 changelog update (#2306) --- doc/changelog.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4fff06c9cb..46e7364f53 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -16,6 +16,8 @@ Version 4.12.1 is a bug fix release. errors such as: "NotImplementedError: Database objects do not implement truth value testing or bool()". - Removed Eventlet testing against Python versions newer than 3.9 since Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +- Fixed a bug where MongoDB cluster topology changes could cause asynchronous operations to take much longer to complete + due to holding the Topology lock while closing stale connections. Issues Resolved ............... From 34f7d7ee4c2cbe8ba23c91a489a938af2cb46386 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 23 Apr 2025 16:32:39 -0400 Subject: [PATCH 1897/2111] =?UTF-8?q?PYTHON-5346=20-=20test=5Finit=5Fdisco?= =?UTF-8?q?nnected=5Fwith=5Fsrv=20cannot=20run=20against=20shar=E2=80=A6?= =?UTF-8?q?=20(#2304)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- test/asynchronous/test_client.py | 2 +- test/test_client.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index b9deb985bd..1e1faf0a2a 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -850,7 +850,7 @@ async def test_init_disconnected_with_auth(self): with self.assertRaises(ConnectionFailure): await c.pymongo_test.test.find_one() - @async_client_context.require_no_standalone + @async_client_context.require_replica_set @async_client_context.require_no_load_balancer @async_client_context.require_tls async def test_init_disconnected_with_srv(self): diff --git a/test/test_client.py b/test/test_client.py index c2df8ab2b6..189e58e803 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -825,7 +825,7 @@ def test_init_disconnected_with_auth(self): with self.assertRaises(ConnectionFailure): c.pymongo_test.test.find_one() - @client_context.require_no_standalone + @client_context.require_replica_set @client_context.require_no_load_balancer @client_context.require_tls def test_init_disconnected_with_srv(self): From dae4f7f1590c333acb52c2af73205f4287367596 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 24 Apr 2025 09:28:10 -0500 Subject: [PATCH 1898/2111] PYTHON-5348 Fix CodeQL Scanning for GitHub Actions (#2308) --- .github/workflows/codeql.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index bb2418cf89..98cfa2f43f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -54,7 +54,6 @@ jobs: queries: security-extended config: | paths-ignore: - - '.github/**' - 'doc/**' - 'tools/**' - 'test/**' From c3e3373df2bf93c616dca0d53c7fbe3f7064d7ba Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 24 Apr 2025 16:19:09 -0700 Subject: [PATCH 1899/2111] PYTHON-5309 Ensure AsyncMongoClient doesn't use PyOpenSSL (#2286) Co-authored-by: Noah Stapp --- .evergreen/generated_configs/variants.yml | 14 +++-- .evergreen/scripts/generate_config.py | 29 +++++++--- doc/changelog.rst | 2 + pymongo/asynchronous/encryption.py | 9 ++- pymongo/asynchronous/pool.py | 6 +- pymongo/client_options.py | 7 ++- pymongo/encryption_options.py | 18 +++++- pymongo/network_layer.py | 22 +++----- pymongo/pool_shared.py | 19 ++++--- pymongo/ssl_support.py | 69 +++++++++++++++-------- pymongo/synchronous/encryption.py | 9 ++- pymongo/synchronous/pool.py | 6 +- pymongo/uri_parser_shared.py | 7 ++- test/asynchronous/test_encryption.py | 30 ++++++---- test/asynchronous/test_ssl.py | 37 +++++++----- test/atlas/test_connection.py | 6 +- test/test_encryption.py | 28 ++++++--- test/test_ssl.py | 35 +++++++----- tools/ocsptest.py | 1 + 19 files changed, 232 insertions(+), 122 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 08c0dbbf36..8ba16273de 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -620,17 +620,19 @@ buildvariants: - macos-14 batchtime: 10080 expansions: + TEST_NAME: default SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - name: pyopenssl-rhel8-python3.10 tasks: - - name: .replica_set .auth .ssl .sync - - name: .7.0 .auth .ssl .sync + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL RHEL8 Python3.10 run_on: - rhel87-small batchtime: 10080 expansions: + TEST_NAME: default SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.10/bin/python3 - name: pyopenssl-rhel8-python3.11 @@ -642,6 +644,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: + TEST_NAME: default SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.11/bin/python3 - name: pyopenssl-rhel8-python3.12 @@ -653,17 +656,19 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: + TEST_NAME: default SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/3.12/bin/python3 - name: pyopenssl-win64-python3.13 tasks: - - name: .replica_set .auth .ssl .sync - - name: .7.0 .auth .ssl .sync + - name: .replica_set .auth .ssl .sync_async + - name: .7.0 .auth .ssl .sync_async display_name: PyOpenSSL Win64 Python3.13 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: + TEST_NAME: default SUB_TEST_NAME: pyopenssl PYTHON_BINARY: C:/python/Python313/python.exe - name: pyopenssl-rhel8-pypy3.10 @@ -675,6 +680,7 @@ buildvariants: - rhel87-small batchtime: 10080 expansions: + TEST_NAME: default SUB_TEST_NAME: pyopenssl PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 7b88be85b7..be1a960db2 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -250,7 +250,7 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" batchtime = BATCHTIME_WEEK - expansions = dict(SUB_TEST_NAME="pyopenssl") + expansions = dict(TEST_NAME="default", SUB_TEST_NAME="pyopenssl") variants = [] for python in ALL_PYTHONS: @@ -265,14 +265,25 @@ def create_pyopenssl_variants(): host = DEFAULT_HOST display_name = get_variant_name(base_name, host, python=python) - variant = create_variant( - [f".replica_set .{auth} .{ssl} .sync", f".7.0 .{auth} .{ssl} .sync"], - display_name, - python=python, - host=host, - expansions=expansions, - batchtime=batchtime, - ) + # only need to run some on async + if python in (CPYTHONS[1], CPYTHONS[-1]): + variant = create_variant( + [f".replica_set .{auth} .{ssl} .sync_async", f".7.0 .{auth} .{ssl} .sync_async"], + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + ) + else: + variant = create_variant( + [f".replica_set .{auth} .{ssl} .sync", f".7.0 .{auth} .{ssl} .sync"], + display_name, + python=python, + host=host, + expansions=expansions, + batchtime=batchtime, + ) variants.append(variant) return variants diff --git a/doc/changelog.rst b/doc/changelog.rst index 46e7364f53..db344d4872 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -18,6 +18,8 @@ Version 4.12.1 is a bug fix release. Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. - Fixed a bug where MongoDB cluster topology changes could cause asynchronous operations to take much longer to complete due to holding the Topology lock while closing stale connections. +- Fixed a bug that would cause AsyncMongoClient to attempt to use PyOpenSSL when available, resulting in errors such as + "pymongo.errors.ServerSelectionTimeoutError: 'SSLContext' object has no attribute 'wrap_bio'". Issues Resolved ............... diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 71a694a619..9b0757b1a5 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -87,7 +87,7 @@ from pymongo.results import BulkWriteResult, DeleteResult from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context from pymongo.typings import _DocumentType, _DocumentTypeArg -from pymongo.uri_parser_shared import parse_host +from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host from pymongo.write_concern import WriteConcern if TYPE_CHECKING: @@ -157,6 +157,7 @@ def __init__( self.mongocryptd_client = mongocryptd_client self.opts = opts self._spawned = False + self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC) async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: """Complete a KMS request. @@ -168,7 +169,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: endpoint = kms_context.endpoint message = kms_context.message provider = kms_context.kms_provider - ctx = self.opts._kms_ssl_contexts.get(provider) + ctx = self._kms_ssl_contexts.get(provider) if ctx is None: # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. @@ -180,6 +181,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: False, # allow_invalid_certificates False, # allow_invalid_hostnames False, # disable_ocsp_endpoint_check + _IS_SYNC, ) # CSOT: set timeout for socket creation. connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) @@ -396,6 +398,8 @@ def __init__(self, client: AsyncMongoClient[_DocumentTypeArg], opts: AutoEncrypt encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) self._bypass_auto_encryption = opts._bypass_auto_encryption self._internal_client = None + # parsing kms_ssl_contexts here so that parsing errors will be raised before internal clients are created + opts._kms_ssl_contexts(_IS_SYNC) def _get_internal_client( encrypter: _Encrypter, mongo_client: AsyncMongoClient[_DocumentTypeArg] @@ -675,6 +679,7 @@ def __init__( kms_tls_options=kms_tls_options, key_expiration_ms=key_expiration_ms, ) + self._kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( None, key_vault_coll, None, opts ) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 8b18ab927b..f4d5b174fa 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -76,6 +76,7 @@ from pymongo.network_layer import AsyncNetworkingInterface, async_receive_message, async_sendall from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( + SSLErrors, _CancellationContext, _configured_protocol_interface, _get_timeout_details, @@ -86,7 +87,6 @@ from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import SSLError if TYPE_CHECKING: from bson import CodecOptions @@ -638,7 +638,7 @@ async def _raise_connection_failure(self, error: BaseException) -> NoReturn: reason = ConnectionClosedReason.ERROR await self.close_conn(reason) # SSLError from PyOpenSSL inherits directly from Exception. - if isinstance(error, (IOError, OSError, SSLError)): + if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) else: @@ -1052,7 +1052,7 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), error=ConnectionClosedReason.ERROR, ) - if isinstance(error, (IOError, OSError, SSLError)): + if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index a66e87c9f6..bd27dd4eb0 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -84,7 +84,9 @@ def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: return ReadConcern(concern) -def _parse_ssl_options(options: Mapping[str, Any]) -> tuple[Optional[SSLContext], bool]: +def _parse_ssl_options( + options: Mapping[str, Any], is_sync: bool +) -> tuple[Optional[SSLContext], bool]: """Parse ssl options.""" use_tls = options.get("tls") if use_tls is not None: @@ -138,6 +140,7 @@ def _parse_ssl_options(options: Mapping[str, Any]) -> tuple[Optional[SSLContext] allow_invalid_certificates, allow_invalid_hostnames, disable_ocsp_endpoint_check, + is_sync, ) return ctx, allow_invalid_hostnames return None, allow_invalid_hostnames @@ -167,7 +170,7 @@ def _parse_pool_options( compression_settings = CompressionSettings( options.get("compressors", []), options.get("zlibcompressionlevel", -1) ) - ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) + ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options, is_sync) load_balanced = options.get("loadbalanced") max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) return PoolOptions( diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index 4cb94cba30..e9ad1c1e01 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -20,6 +20,8 @@ from typing import TYPE_CHECKING, Any, Mapping, Optional +from pymongo.uri_parser_shared import _parse_kms_tls_options + try: import pymongocrypt # type:ignore[import-untyped] # noqa: F401 @@ -32,9 +34,9 @@ from bson import int64 from pymongo.common import validate_is_mapping from pymongo.errors import ConfigurationError -from pymongo.uri_parser_shared import _parse_kms_tls_options if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext from pymongo.typings import _AgnosticMongoClient, _DocumentTypeArg @@ -236,10 +238,22 @@ def __init__( if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. - self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) + self._kms_tls_options = kms_tls_options + self._sync_kms_ssl_contexts: Optional[dict[str, SSLContext]] = None + self._async_kms_ssl_contexts: Optional[dict[str, SSLContext]] = None self._bypass_query_analysis = bypass_query_analysis self._key_expiration_ms = key_expiration_ms + def _kms_ssl_contexts(self, is_sync: bool) -> dict[str, SSLContext]: + if is_sync: + if self._sync_kms_ssl_contexts is None: + self._sync_kms_ssl_contexts = _parse_kms_tls_options(self._kms_tls_options, True) + return self._sync_kms_ssl_contexts + else: + if self._async_kms_ssl_contexts is None: + self._async_kms_ssl_contexts = _parse_kms_tls_options(self._kms_tls_options, False) + return self._async_kms_ssl_contexts + class RangeOpts: """Options to configure encrypted queries using the range algorithm.""" diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index e287655c61..3fa180bf7a 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -46,22 +46,18 @@ _HAVE_SSL = False try: - from pymongo.pyopenssl_context import ( - BLOCKING_IO_LOOKUP_ERROR, - BLOCKING_IO_READ_ERROR, - BLOCKING_IO_WRITE_ERROR, - _sslConn, - ) + from pymongo.pyopenssl_context import _sslConn _HAVE_PYOPENSSL = True except ImportError: _HAVE_PYOPENSSL = False - _sslConn = SSLSocket # type: ignore - from pymongo.ssl_support import ( # type: ignore[assignment] - BLOCKING_IO_LOOKUP_ERROR, - BLOCKING_IO_READ_ERROR, - BLOCKING_IO_WRITE_ERROR, - ) + _sslConn = SSLSocket # type: ignore[assignment, misc] + +from pymongo.ssl_support import ( + BLOCKING_IO_LOOKUP_ERROR, + BLOCKING_IO_READ_ERROR, + BLOCKING_IO_WRITE_ERROR, +) if TYPE_CHECKING: from pymongo.asynchronous.pool import AsyncConnection @@ -71,7 +67,7 @@ _UNPACK_COMPRESSION_HEADER = struct.Struct(" Union[socket. try: # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. - if HAS_SNI: + if _has_sni(True): ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] else: ssl_sock = ssl_context.wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] @@ -467,7 +468,7 @@ def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket. # Raise _CertificateError directly like we do after match_hostname # below. raise - except (OSError, SSLError) as exc: + except (OSError, *SSLErrors) as exc: sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol @@ -507,7 +508,7 @@ def _configured_socket_interface(address: _Address, options: PoolOptions) -> Net try: # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. - if HAS_SNI: + if _has_sni(True): ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) else: ssl_sock = ssl_context.wrap_socket(sock) @@ -516,7 +517,7 @@ def _configured_socket_interface(address: _Address, options: PoolOptions) -> Net # Raise _CertificateError directly like we do after match_hostname # below. raise - except (OSError, SSLError) as exc: + except (OSError, *SSLErrors) as exc: sock.close() # We raise AutoReconnect for transient and permanent SSL handshake # failures alike. Permanent handshake failures, like protocol diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 2e6a509e3e..beafc717eb 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -15,16 +15,19 @@ """Support for SSL in PyMongo.""" from __future__ import annotations +import types import warnings -from typing import Optional +from typing import Any, Optional, Union from pymongo.errors import ConfigurationError HAVE_SSL = True +HAVE_PYSSL = True try: - import pymongo.pyopenssl_context as _ssl + import pymongo.pyopenssl_context as _pyssl except (ImportError, AttributeError) as exc: + HAVE_PYSSL = False if isinstance(exc, AttributeError): warnings.warn( "Failed to use the installed version of PyOpenSSL. " @@ -35,10 +38,10 @@ UserWarning, stacklevel=2, ) - try: - import pymongo.ssl_context as _ssl # type: ignore[no-redef] - except ImportError: - HAVE_SSL = False +try: + import pymongo.ssl_context as _ssl +except ImportError: + HAVE_SSL = False if HAVE_SSL: @@ -49,14 +52,29 @@ import ssl as _stdlibssl # noqa: F401 from ssl import CERT_NONE, CERT_REQUIRED - HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = True + + if HAVE_PYSSL: + PYSSLError: Any = _pyssl.SSLError + BLOCKING_IO_ERRORS: tuple = _ssl.BLOCKING_IO_ERRORS + _pyssl.BLOCKING_IO_ERRORS + BLOCKING_IO_READ_ERROR: tuple = (_pyssl.BLOCKING_IO_READ_ERROR, _ssl.BLOCKING_IO_READ_ERROR) + BLOCKING_IO_WRITE_ERROR: tuple = ( + _pyssl.BLOCKING_IO_WRITE_ERROR, + _ssl.BLOCKING_IO_WRITE_ERROR, + ) + else: + PYSSLError = _ssl.SSLError + BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS + BLOCKING_IO_READ_ERROR = (_ssl.BLOCKING_IO_READ_ERROR,) + BLOCKING_IO_WRITE_ERROR = (_ssl.BLOCKING_IO_WRITE_ERROR,) SSLError = _ssl.SSLError - BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS - BLOCKING_IO_READ_ERROR = _ssl.BLOCKING_IO_READ_ERROR - BLOCKING_IO_WRITE_ERROR = _ssl.BLOCKING_IO_WRITE_ERROR BLOCKING_IO_LOOKUP_ERROR = BLOCKING_IO_READ_ERROR + def _has_sni(is_sync: bool) -> bool: + if is_sync and HAVE_PYSSL: + return _pyssl.HAS_SNI + return _ssl.HAS_SNI + def get_ssl_context( certfile: Optional[str], passphrase: Optional[str], @@ -65,10 +83,15 @@ def get_ssl_context( allow_invalid_certificates: bool, allow_invalid_hostnames: bool, disable_ocsp_endpoint_check: bool, - ) -> _ssl.SSLContext: + is_sync: bool, + ) -> Union[_pyssl.SSLContext, _ssl.SSLContext]: # type: ignore[name-defined] """Create and return an SSLContext object.""" + if is_sync and HAVE_PYSSL: + ssl: types.ModuleType = _pyssl + else: + ssl = _ssl verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED - ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) + ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if verify_mode != CERT_NONE: ctx.check_hostname = not allow_invalid_hostnames else: @@ -80,22 +103,20 @@ def get_ssl_context( # up to date versions of MongoDB 2.4 and above already disable # SSLv2 and SSLv3, python disables SSLv2 by default in >= 2.7.7 # and >= 3.3.4 and SSLv3 in >= 3.4.3. - ctx.options |= _ssl.OP_NO_SSLv2 - ctx.options |= _ssl.OP_NO_SSLv3 - ctx.options |= _ssl.OP_NO_COMPRESSION - ctx.options |= _ssl.OP_NO_RENEGOTIATION + ctx.options |= ssl.OP_NO_SSLv2 + ctx.options |= ssl.OP_NO_SSLv3 + ctx.options |= ssl.OP_NO_COMPRESSION + ctx.options |= ssl.OP_NO_RENEGOTIATION if certfile is not None: try: ctx.load_cert_chain(certfile, None, passphrase) - except _ssl.SSLError as exc: + except ssl.SSLError as exc: raise ConfigurationError(f"Private key doesn't match certificate: {exc}") from None if crlfile is not None: - if _ssl.IS_PYOPENSSL: + if ssl.IS_PYOPENSSL: raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - ctx.verify_flags = getattr( # type:ignore[attr-defined] - _ssl, "VERIFY_CRL_CHECK_LEAF", 0 - ) + ctx.verify_flags = getattr(ssl, "VERIFY_CRL_CHECK_LEAF", 0) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) @@ -109,9 +130,11 @@ def get_ssl_context( class SSLError(Exception): # type: ignore pass - HAS_SNI = False IPADDR_SAFE = False - BLOCKING_IO_ERRORS = () # type:ignore[assignment] + BLOCKING_IO_ERRORS = () + + def _has_sni(is_sync: bool) -> bool: # noqa: ARG001 + return False def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index ed631e135d..5f9bdac4b7 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -86,7 +86,7 @@ from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient from pymongo.typings import _DocumentType, _DocumentTypeArg -from pymongo.uri_parser_shared import parse_host +from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host from pymongo.write_concern import WriteConcern if TYPE_CHECKING: @@ -156,6 +156,7 @@ def __init__( self.mongocryptd_client = mongocryptd_client self.opts = opts self._spawned = False + self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC) def kms_request(self, kms_context: MongoCryptKmsContext) -> None: """Complete a KMS request. @@ -167,7 +168,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: endpoint = kms_context.endpoint message = kms_context.message provider = kms_context.kms_provider - ctx = self.opts._kms_ssl_contexts.get(provider) + ctx = self._kms_ssl_contexts.get(provider) if ctx is None: # Enable strict certificate verification, OCSP, match hostname, and # SNI using the system default CA certificates. @@ -179,6 +180,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: False, # allow_invalid_certificates False, # allow_invalid_hostnames False, # disable_ocsp_endpoint_check + _IS_SYNC, ) # CSOT: set timeout for socket creation. connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) @@ -393,6 +395,8 @@ def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOp encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) self._bypass_auto_encryption = opts._bypass_auto_encryption self._internal_client = None + # parsing kms_ssl_contexts here so that parsing errors will be raised before internal clients are created + opts._kms_ssl_contexts(_IS_SYNC) def _get_internal_client( encrypter: _Encrypter, mongo_client: MongoClient[_DocumentTypeArg] @@ -668,6 +672,7 @@ def __init__( kms_tls_options=kms_tls_options, key_expiration_ms=key_expiration_ms, ) + self._kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( None, key_vault_coll, None, opts ) diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index b3eec64f27..44aec31a86 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -73,6 +73,7 @@ from pymongo.network_layer import NetworkingInterface, receive_message, sendall from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( + SSLErrors, _CancellationContext, _configured_socket_interface, _get_timeout_details, @@ -83,7 +84,6 @@ from pymongo.server_api import _add_to_command from pymongo.server_type import SERVER_TYPE from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import SSLError from pymongo.synchronous.client_session import _validate_session_write_concern from pymongo.synchronous.helpers import _handle_reauth from pymongo.synchronous.network import command @@ -636,7 +636,7 @@ def _raise_connection_failure(self, error: BaseException) -> NoReturn: reason = ConnectionClosedReason.ERROR self.close_conn(reason) # SSLError from PyOpenSSL inherits directly from Exception. - if isinstance(error, (IOError, OSError, SSLError)): + if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) else: @@ -1048,7 +1048,7 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), error=ConnectionClosedReason.ERROR, ) - if isinstance(error, (IOError, OSError, SSLError)): + if isinstance(error, (IOError, OSError, *SSLErrors)): details = _get_timeout_details(self.opts) _raise_connection_failure(self.address, error, timeout_details=details) diff --git a/pymongo/uri_parser_shared.py b/pymongo/uri_parser_shared.py index e7ba4c9fb5..0cef176bf1 100644 --- a/pymongo/uri_parser_shared.py +++ b/pymongo/uri_parser_shared.py @@ -420,7 +420,10 @@ def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") -def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict[str, SSLContext]: +def _parse_kms_tls_options( + kms_tls_options: Optional[Mapping[str, Any]], + is_sync: bool, +) -> dict[str, SSLContext]: """Parse KMS TLS connection options.""" if not kms_tls_options: return {} @@ -435,7 +438,7 @@ def _parse_kms_tls_options(kms_tls_options: Optional[Mapping[str, Any]]) -> dict opts = _handle_security_options(opts) opts = _normalize_options(opts) opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) - ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts, is_sync) if ssl_context is None: raise ConfigurationError("TLS is required for KMS providers") if allow_invalid_hostnames: diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index f9b03f6303..9e8758a1cd 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -41,6 +41,7 @@ from pymongo.asynchronous.collection import AsyncCollection from pymongo.asynchronous.helpers import anext from pymongo.daemon import _spawn_daemon +from pymongo.uri_parser_shared import _parse_kms_tls_options try: from pymongo.pyopenssl_context import IS_PYOPENSSL @@ -141,7 +142,7 @@ def test_init(self): self.assertEqual(opts._mongocryptd_bypass_spawn, False) self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) - self.assertEqual(opts._kms_ssl_contexts, {}) + self.assertEqual(opts._kms_tls_options, None) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_spawn_args(self): @@ -165,30 +166,38 @@ def test_init_spawn_args(self): ) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def test_init_kms_tls_options(self): + async def test_init_kms_tls_options(self): # Error cases: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): - AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + AsyncMongoClient(auto_encryption_opts=opts) + tls_opts: Any for tls_opts in [ {"kmip": {"tls": True, "tlsInsecure": True}}, {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, ]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): - opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + AsyncMongoClient(auto_encryption_opts=opts) + opts = AutoEncryptionOpts( + {}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}} + ) with self.assertRaises(FileNotFoundError): - AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) + AsyncMongoClient(auto_encryption_opts=opts) # Success cases: tls_opts: Any for tls_opts in [None, {}]: opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) - self.assertEqual(opts._kms_ssl_contexts, {}) + kms_tls_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self.assertEqual(kms_tls_contexts, {}) opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) - ctx = opts._kms_ssl_contexts["kmip"] + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) - ctx = opts._kms_ssl_contexts["aws"] + ctx = _kms_ssl_contexts["aws"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( @@ -196,7 +205,8 @@ def test_init_kms_tls_options(self): "k.d", kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, ) - ctx = opts._kms_ssl_contexts["kmip"] + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) @@ -2225,7 +2235,7 @@ async def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): encryption = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options ) - ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] + ctx = encryption._io_callbacks._kms_ssl_contexts["aws"] if not hasattr(ctx, "check_ocsp_endpoint"): raise self.skipTest("OCSP not enabled") self.assertFalse(ctx.check_ocsp_endpoint) diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py index 4d7566a61d..023ee91680 100644 --- a/test/asynchronous/test_ssl.py +++ b/test/asynchronous/test_ssl.py @@ -43,7 +43,7 @@ from pymongo import AsyncMongoClient, ssl_support from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure from pymongo.hello import HelloCompat -from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context +from pymongo.ssl_support import HAVE_PYSSL, HAVE_SSL, _ssl, get_ssl_context from pymongo.write_concern import WriteConcern _HAVE_PYOPENSSL = False @@ -134,7 +134,7 @@ def test_config_ssl(self): @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") def test_use_pyopenssl_when_available(self): - self.assertTrue(_ssl.IS_PYOPENSSL) + self.assertTrue(HAVE_PYSSL) @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") def test_load_trusted_ca_certs(self): @@ -180,7 +180,7 @@ async def test_tlsCertificateKeyFilePassword(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "SSLContext") and not HAVE_PYSSL: self.assertRaises( ConfigurationError, self.simple_client, @@ -312,13 +312,13 @@ async def test_cert_ssl_validation_hostname_matching(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - ctx = get_ssl_context(None, None, None, None, True, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, True, False, False) + ctx = get_ssl_context(None, None, None, None, True, False, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, False, True, False) + ctx = get_ssl_context(None, None, None, None, False, True, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) self.assertTrue(ctx.check_hostname) response = await self.client.admin.command(HelloCompat.LEGACY_CMD) @@ -379,10 +379,11 @@ async def test_cert_ssl_validation_hostname_matching(self): ) @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_sync @async_client_context.require_no_api_version @ignore_deprecations async def test_tlsCRLFile_support(self): - if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or HAVE_PYSSL: self.assertRaises( ConfigurationError, self.simple_client, @@ -473,7 +474,7 @@ async def test_validation_with_system_ca_certs(self): ) def test_system_certs_config_error(self): - ctx = get_ssl_context(None, None, None, None, True, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( ctx, "load_default_certs" ): @@ -504,11 +505,11 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) finally: @@ -525,11 +526,11 @@ def test_wincertstore(self): if not ssl_support.HAVE_WINCERTSTORE: raise SkipTest("Need wincertstore to test wincertstore.") - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) @@ -663,6 +664,16 @@ def remove(path): ) as client: self.assertTrue(await client.admin.command("ping")) + @async_client_context.require_async + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + async def test_pyopenssl_ignored_in_async(self): + client = AsyncMongoClient( + "mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true" + ) + await client.admin.command("ping") # command doesn't matter, just needs it to connect + await client.close() + if __name__ == "__main__": unittest.main() diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 3d34ff326e..a3e8b0b1d5 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -26,7 +26,7 @@ sys.path[0:0] = [""] import pymongo -from pymongo.ssl_support import HAS_SNI +from pymongo.ssl_support import _has_sni pytestmark = pytest.mark.atlas_connect @@ -57,7 +57,7 @@ def connect(self, uri): # No auth error client.test.test.count_documents({}) - @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + @unittest.skipUnless(_has_sni(True), "Free tier requires SNI support") def test_free_tier(self): self.connect(URIS["ATLAS_FREE"]) @@ -80,7 +80,7 @@ def connect_srv(self, uri): self.connect(uri) self.assertIn("mongodb+srv://", uri) - @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + @unittest.skipUnless(_has_sni(True), "Free tier requires SNI support") def test_srv_free_tier(self): self.connect_srv(URIS["ATLAS_SRV_FREE"]) diff --git a/test/test_encryption.py b/test/test_encryption.py index 5bbf8c8ad8..4b055b68d3 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -41,6 +41,7 @@ from pymongo.daemon import _spawn_daemon from pymongo.synchronous.collection import Collection from pymongo.synchronous.helpers import next +from pymongo.uri_parser_shared import _parse_kms_tls_options try: from pymongo.pyopenssl_context import IS_PYOPENSSL @@ -141,7 +142,7 @@ def test_init(self): self.assertEqual(opts._mongocryptd_bypass_spawn, False) self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) - self.assertEqual(opts._kms_ssl_contexts, {}) + self.assertEqual(opts._kms_tls_options, None) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_spawn_args(self): @@ -167,28 +168,36 @@ def test_init_spawn_args(self): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_kms_tls_options(self): # Error cases: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): - AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + MongoClient(auto_encryption_opts=opts) + tls_opts: Any for tls_opts in [ {"kmip": {"tls": True, "tlsInsecure": True}}, {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, ]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): - opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + MongoClient(auto_encryption_opts=opts) + opts = AutoEncryptionOpts( + {}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}} + ) with self.assertRaises(FileNotFoundError): - AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) + MongoClient(auto_encryption_opts=opts) # Success cases: tls_opts: Any for tls_opts in [None, {}]: opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) - self.assertEqual(opts._kms_ssl_contexts, {}) + kms_tls_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self.assertEqual(kms_tls_contexts, {}) opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) - ctx = opts._kms_ssl_contexts["kmip"] + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) - ctx = opts._kms_ssl_contexts["aws"] + ctx = _kms_ssl_contexts["aws"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( @@ -196,7 +205,8 @@ def test_init_kms_tls_options(self): "k.d", kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, ) - ctx = opts._kms_ssl_contexts["kmip"] + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) @@ -2217,7 +2227,7 @@ def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): encryption = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options ) - ctx = encryption._io_callbacks.opts._kms_ssl_contexts["aws"] + ctx = encryption._io_callbacks._kms_ssl_contexts["aws"] if not hasattr(ctx, "check_ocsp_endpoint"): raise self.skipTest("OCSP not enabled") self.assertFalse(ctx.check_ocsp_endpoint) diff --git a/test/test_ssl.py b/test/test_ssl.py index 7decc8203d..93a4b4e6ec 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -43,7 +43,7 @@ from pymongo import MongoClient, ssl_support from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure from pymongo.hello import HelloCompat -from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context +from pymongo.ssl_support import HAVE_PYSSL, HAVE_SSL, _ssl, get_ssl_context from pymongo.write_concern import WriteConcern _HAVE_PYOPENSSL = False @@ -134,7 +134,7 @@ def test_config_ssl(self): @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") def test_use_pyopenssl_when_available(self): - self.assertTrue(_ssl.IS_PYOPENSSL) + self.assertTrue(HAVE_PYSSL) @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") def test_load_trusted_ca_certs(self): @@ -180,7 +180,7 @@ def test_tlsCertificateKeyFilePassword(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "SSLContext") and not HAVE_PYSSL: self.assertRaises( ConfigurationError, self.simple_client, @@ -312,13 +312,13 @@ def test_cert_ssl_validation_hostname_matching(self): # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - ctx = get_ssl_context(None, None, None, None, True, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, True, False, False) + ctx = get_ssl_context(None, None, None, None, True, False, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, False, True, False) + ctx = get_ssl_context(None, None, None, None, False, True, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) self.assertTrue(ctx.check_hostname) response = self.client.admin.command(HelloCompat.LEGACY_CMD) @@ -379,10 +379,11 @@ def test_cert_ssl_validation_hostname_matching(self): ) @client_context.require_tlsCertificateKeyFile + @client_context.require_sync @client_context.require_no_api_version @ignore_deprecations def test_tlsCRLFile_support(self): - if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or HAVE_PYSSL: self.assertRaises( ConfigurationError, self.simple_client, @@ -473,7 +474,7 @@ def test_validation_with_system_ca_certs(self): ) def test_system_certs_config_error(self): - ctx = get_ssl_context(None, None, None, None, True, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( ctx, "load_default_certs" ): @@ -504,11 +505,11 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) finally: @@ -525,11 +526,11 @@ def test_wincertstore(self): if not ssl_support.HAVE_WINCERTSTORE: raise SkipTest("Need wincertstore to test wincertstore.") - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) @@ -663,6 +664,14 @@ def remove(path): ) as client: self.assertTrue(client.admin.command("ping")) + @client_context.require_async + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + def test_pyopenssl_ignored_in_async(self): + client = MongoClient("mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true") + client.admin.command("ping") # command doesn't matter, just needs it to connect + client.close() + if __name__ == "__main__": unittest.main() diff --git a/tools/ocsptest.py b/tools/ocsptest.py index 521d048f79..8596db226d 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -35,6 +35,7 @@ def check_ocsp(host: str, port: int, capath: str) -> None: False, # allow_invalid_certificates False, # allow_invalid_hostnames False, + True, # is sync ) # disable_ocsp_endpoint_check # Ensure we're using pyOpenSSL. From 2ebd2aaecd0794d46ea0cba51ea97a82b97c6545 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Fri, 25 Apr 2025 12:24:22 -0400 Subject: [PATCH 1900/2111] PYTHON-5336 Added VECTOR_SUBTYPE line to API docs (#2313) --- doc/api/bson/binary.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index 084fd02d50..7084a45b4e 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -16,6 +16,7 @@ .. autodata:: MD5_SUBTYPE .. autodata:: COLUMN_SUBTYPE .. autodata:: SENSITIVE_SUBTYPE + .. autodata:: VECTOR_SUBTYPE .. autodata:: USER_DEFINED_SUBTYPE .. autoclass:: UuidRepresentation From 1dc45fddc105153edaa1bcffa721022c27717a0e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 25 Apr 2025 13:27:27 -0400 Subject: [PATCH 1901/2111] PYTHON-5322 - Increase test_streaming_protocol.TestStreamingProtocol test_monitor_waits_after_server_check_error timeout (#2315) --- test/asynchronous/test_streaming_protocol.py | 2 +- test/test_streaming_protocol.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_streaming_protocol.py b/test/asynchronous/test_streaming_protocol.py index 1206e7b2fa..70ec49de80 100644 --- a/test/asynchronous/test_streaming_protocol.py +++ b/test/asynchronous/test_streaming_protocol.py @@ -172,7 +172,7 @@ async def test_monitor_waits_after_server_check_error(self): # 2504ms: application handshake succeeds # 2505ms: ping command succeeds self.assertGreaterEqual(duration, 2) - self.assertLessEqual(duration, 3.5) + self.assertLessEqual(duration, 4.0) @async_client_context.require_failCommand_appName async def test_heartbeat_awaited_flag(self): diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index acf7610c94..927230091f 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -172,7 +172,7 @@ def test_monitor_waits_after_server_check_error(self): # 2504ms: application handshake succeeds # 2505ms: ping command succeeds self.assertGreaterEqual(duration, 2) - self.assertLessEqual(duration, 3.5) + self.assertLessEqual(duration, 4.0) @client_context.require_failCommand_appName def test_heartbeat_awaited_flag(self): From 9a2f5678dee96b138fd4776962280798d4a6621f Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Mon, 28 Apr 2025 11:48:32 -0400 Subject: [PATCH 1902/2111] PYTHON-5353 Pin github actions (#2318) --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/dist.yml | 2 +- .github/workflows/release-python.yml | 4 ++-- .github/workflows/test-python.yml | 30 ++++++++++++++-------------- .github/workflows/zizmor.yml | 4 ++-- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 98cfa2f43f..f7cfea144e 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 81f86721ef..be172864dc 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -60,7 +60,7 @@ jobs: - name: Set up QEMU if: runner.os == 'Linux' - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 with: # setup-qemu-action by default uses `tonistiigi/binfmt:latest` image, # which is out of date. This causes seg faults during build. diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index a684ff79b0..9cce310d91 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -81,14 +81,14 @@ jobs: name: all-dist-${{ github.run_id }} path: dist/ - name: Publish package distributions to TestPyPI - uses: pypa/gh-action-pypi-publish@release/v1 + uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # release/v1 with: repository-url: https://test.pypi.org/legacy/ skip-existing: true attestations: ${{ env.DRY_RUN }} - name: Publish package distributions to PyPI if: startsWith(env.DRY_RUN, 'false') - uses: pypa/gh-action-pypi-publish@release/v1 + uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # release/v1 post-publish: needs: [publish] diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 6d215bed0e..05b87f4f81 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -23,9 +23,9 @@ jobs: with: persist-credentials: false - name: Install just - uses: extractions/setup-just@v3 + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 with: enable-cache: true python-version: "3.9" @@ -63,16 +63,16 @@ jobs: with: persist-credentials: false - name: Install just - uses: extractions/setup-just@v3 + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} - name: Install dependencies run: just install - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.12.0 + uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 with: mongodb-version: 6.0 - name: Run tests @@ -86,14 +86,14 @@ jobs: with: persist-credentials: false - name: Install just - uses: extractions/setup-just@v3 + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 with: enable-cache: true python-version: "3.9" - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.12.0 + uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 with: mongodb-version: '8.0.0-rc4' - name: Install dependencies @@ -111,12 +111,12 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 with: enable-cache: true python-version: "3.9" - name: Install just - uses: extractions/setup-just@v3 + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install dependencies run: just install - name: Build docs @@ -130,12 +130,12 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 with: enable-cache: true python-version: "3.9" - name: Install just - uses: extractions/setup-just@v3 + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install dependencies run: just install - name: Build docs @@ -152,12 +152,12 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 with: enable-cache: true python-version: "${{matrix.python}}" - name: Install just - uses: extractions/setup-just@v3 + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install dependencies run: | just install @@ -211,7 +211,7 @@ jobs: # Test sdist on lowest supported Python python-version: '3.9' - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.12.0 + uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 - name: Run connect test from sdist shell: bash run: | diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 31afeb6655..16f2ba2cbb 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,7 +18,7 @@ jobs: with: persist-credentials: false - name: Setup Rust - uses: actions-rust-lang/setup-rust-toolchain@v1 + uses: actions-rust-lang/setup-rust-toolchain@9d7e65c320fdb52dcd45ffaa68deb6c02c8754d9 # v1 - name: Get zizmor run: cargo install zizmor - name: Run zizmor 🌈 @@ -26,7 +26,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 with: sarif_file: results.sarif category: zizmor From 02c3df6fc99332be87a5c79a9608c98ddb30afe7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 28 Apr 2025 13:59:36 -0500 Subject: [PATCH 1903/2111] PYTHON-5298 Update lock file and clean up dependency installation (#2317) --- .evergreen/scripts/configure-env.sh | 3 ++- .evergreen/scripts/install-dependencies.sh | 26 +++++++++++++--------- uv.lock | 1 - 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh index 81713f4191..8dc328aab3 100755 --- a/.evergreen/scripts/configure-env.sh +++ b/.evergreen/scripts/configure-env.sh @@ -96,7 +96,7 @@ EOT _bin_path="" if [ "Windows_NT" == "${OS:-}" ]; then _bin_path="/cygdrive/c/Python/Current/Scripts" -elif [ "$(uname -s)" != "Darwin" ]; then +elif [ "$(uname -s)" == "Darwin" ]; then _bin_path="/Library/Frameworks/Python.Framework/Versions/Current/bin" else _bin_path="/opt/python/Current/bin" @@ -106,6 +106,7 @@ if [ -d "${_bin_path}" ]; then if [ "Windows_NT" == "${OS:-}" ]; then _suffix=".exe" fi + echo "Symlinking binaries from toolchain" mkdir -p $PYMONGO_BIN_DIR ln -s ${_bin_path}/just${_suffix} $PYMONGO_BIN_DIR/just${_suffix} ln -s ${_bin_path}/uv${_suffix} $PYMONGO_BIN_DIR/uv${_suffix} diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 780d250a2b..ec389690ca 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -10,9 +10,6 @@ if [ -f $HERE/env.sh ]; then . $HERE/env.sh fi -_BIN_DIR=${PYMONGO_BIN_DIR:-$HOME/.local/bin} -export PATH="$PATH:${_BIN_DIR}" - # Helper function to pip install a dependency using a temporary python env. function _pip_install() { _HERE=$(dirname ${BASH_SOURCE:-$0}) @@ -28,23 +25,27 @@ function _pip_install() { if [ "Windows_NT" = "${OS:-}" ]; then _suffix=".exe" fi - ln -s "$(which $2)" $_BIN_DIR/${2}${_suffix} + ln -s "$(which $2)" $PYMONGO_BIN_DIR/${2}${_suffix} # uv also comes with a uvx binary. if [ $2 == "uv" ]; then - ln -s "$(which uvx)" $_BIN_DIR/uvx${_suffix} + ln -s "$(which uvx)" $PYMONGO_BIN_DIR/uvx${_suffix} fi - echo "Installed to ${_BIN_DIR}" + echo "Installed to ${PYMONGO_BIN_DIR}" echo "Installing $2 using pip... done." } - # Ensure just is installed. -if ! command -v just >/dev/null 2>&1; then +if ! command -v just &>/dev/null; then # On most systems we can install directly. _TARGET="" if [ "Windows_NT" = "${OS:-}" ]; then _TARGET="--target x86_64-pc-windows-msvc" fi + if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + echo "Please install just!" + exit 1 + fi + _BIN_DIR=$PYMONGO_BIN_DIR echo "Installing just..." mkdir -p "$_BIN_DIR" 2>/dev/null || true curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { @@ -53,8 +54,13 @@ if ! command -v just >/dev/null 2>&1; then echo "Installing just... done." fi -# Install uv. -if ! command -v uv >/dev/null 2>&1; then +# Ensure uv is installed. +if ! command -v uv &>/dev/null; then + if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + echo "Please install uv!" + exit 1 + fi + _BIN_DIR=$PYMONGO_BIN_DIR echo "Installing uv..." # On most systems we can install directly. curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { diff --git a/uv.lock b/uv.lock index 6bc0839795..aa23663a84 100644 --- a/uv.lock +++ b/uv.lock @@ -998,7 +998,6 @@ sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5 [[package]] name = "pymongo" -version = "4.13.0.dev0" source = { editable = "." } dependencies = [ { name = "dnspython" }, From e7db0e34aadeeccd320683fe33434100ef4c1540 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 29 Apr 2025 08:42:22 -0400 Subject: [PATCH 1904/2111] PYTHON-5342 - Skip async test_srv_polling tests on Windows (#2320) --- test/asynchronous/test_srv_polling.py | 2 ++ test/test_srv_polling.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index 3dcd21ef1d..b40aa90cfa 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -20,6 +20,7 @@ import time from test.utils_shared import FunctionCallRecorder from typing import Any +from unittest import skipIf sys.path[0:0] = [""] @@ -91,6 +92,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.disable() +@skipIf(not _IS_SYNC and sys.platform == "win32", "PYTHON-5342 known issue on Windows") class TestSrvPolling(AsyncPyMongoTestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index df802acb43..0d84d41241 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -20,6 +20,7 @@ import time from test.utils_shared import FunctionCallRecorder from typing import Any +from unittest import skipIf sys.path[0:0] = [""] @@ -91,6 +92,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.disable() +@skipIf(not _IS_SYNC and sys.platform == "win32", "PYTHON-5342 known issue on Windows") class TestSrvPolling(PyMongoTestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), From 08e7f036a22b1ee38bed9793d494b7f5fe174c04 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Tue, 29 Apr 2025 09:26:40 -0400 Subject: [PATCH 1905/2111] PYTHON-5357 Update changelog for 4.12.1 release (#2321) --- doc/changelog.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index db344d4872..87918639cd 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -2,7 +2,7 @@ Changelog ========= -Changes in Version 4.12.1 (XXXX/XX/XX) +Changes in Version 4.12.1 (2025/04/29) -------------------------------------- Version 4.12.1 is a bug fix release. From 85c5ee45b532adc5004dac00d920fa5378f28833 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Wed, 30 Apr 2025 08:45:46 -0400 Subject: [PATCH 1906/2111] PYTHON-5364 Update package description (#2324) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4da75b4c13..fb2dd58131 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "hatchling.build" [project] name = "pymongo" dynamic = ["version", "dependencies", "optional-dependencies"] -description = "Python driver for MongoDB " +description = "PyMongo - the Official MongoDB Python driver" readme = "README.md" license = {file="LICENSE"} requires-python = ">=3.9" From 0ec57781d14160531887f61e4edf083acbef7686 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 1 May 2025 09:08:48 -0500 Subject: [PATCH 1907/2111] PYTHON-5345 Streamline the standard tasks (#2312) --- .evergreen/generated_configs/tasks.yml | 1885 ++++++++++++++------- .evergreen/generated_configs/variants.yml | 86 +- .evergreen/scripts/generate_config.py | 146 +- 3 files changed, 1371 insertions(+), 746 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 32754af9af..504cad4882 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -243,233 +243,6 @@ tasks: TEST_NAME: kms SUB_TEST_NAME: azure-fail - # Load balancer tests - - name: test-load-balancer-auth-ssl-v6.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - VERSION: "6.0" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, auth, ssl] - - name: test-load-balancer-auth-ssl-v7.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - VERSION: "7.0" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, auth, ssl] - - name: test-load-balancer-auth-ssl-v8.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - VERSION: "8.0" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, auth, ssl] - - name: test-load-balancer-auth-ssl-rapid - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - VERSION: rapid - - func: run tests - vars: - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, auth, ssl] - - name: test-load-balancer-auth-ssl-latest - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - VERSION: latest - - func: run tests - vars: - AUTH: auth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, auth, ssl] - - name: test-load-balancer-noauth-ssl-v6.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - VERSION: "6.0" - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, ssl] - - name: test-load-balancer-noauth-ssl-v7.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - VERSION: "7.0" - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, ssl] - - name: test-load-balancer-noauth-ssl-v8.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - VERSION: "8.0" - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, ssl] - - name: test-load-balancer-noauth-ssl-rapid - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - VERSION: rapid - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, ssl] - - name: test-load-balancer-noauth-ssl-latest - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - VERSION: latest - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, ssl] - - name: test-load-balancer-noauth-nossl-v6.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - VERSION: "6.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, nossl] - - name: test-load-balancer-noauth-nossl-v7.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - VERSION: "7.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, nossl] - - name: test-load-balancer-noauth-nossl-v8.0 - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - VERSION: "8.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, nossl] - - name: test-load-balancer-noauth-nossl-rapid - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - VERSION: rapid - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, nossl] - - name: test-load-balancer-noauth-nossl-latest - commands: - - func: run server - vars: - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - VERSION: latest - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TEST_NAME: load_balancer - tags: [load-balancer, noauth, nossl] - # Mod wsgi tests - name: mod-wsgi-replica-set-python3.9 commands: @@ -539,21 +312,65 @@ tasks: - func: run tests vars: PYTHON_VERSION: "3.9" - tags: [no-orchestration, python-3.9] + tags: [test-no-orchestration, python-3.9] - name: test-no-orchestration-python3.13 commands: - func: assume ec2 role - func: run tests vars: PYTHON_VERSION: "3.13" - tags: [no-orchestration, python-3.13] + tags: [test-no-orchestration, python-3.13] - name: test-no-orchestration-pypy3.10 commands: - func: assume ec2 role - func: run tests vars: PYTHON_VERSION: pypy3.10 - tags: [no-orchestration, python-pypy3.10] + tags: [test-no-orchestration, python-pypy3.10] + + # No toolchain tests + - name: test-no-toolchain-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_NAME: default_sync + tags: [test-no-toolchain, standalone-noauth-nossl] + - name: test-no-toolchain-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_NAME: default_async + tags: [test-no-toolchain, replica_set-noauth-ssl] + - name: test-no-toolchain-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_NAME: default_sync + tags: [test-no-toolchain, sharded_cluster-auth-ssl] # Ocsp tests - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.9 @@ -2505,50 +2322,6 @@ tasks: SUB_TEST_NAME: gke tags: [auth_oidc, auth_oidc_remote] - # Other hosts tests - - name: test-sync-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - TEST_NAME: default_sync - tags: [other-hosts, standalone-noauth-nossl] - - name: test-async-noauth-ssl-replica-set - commands: - - func: run server - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - TEST_NAME: default_async - tags: [other-hosts, replica_set-noauth-ssl] - - name: test-sync-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - TEST_NAME: default_sync - tags: [other-hosts, sharded_cluster-auth-ssl] - # Perf tests - name: perf-8.0-standalone-ssl commands: @@ -7397,181 +7170,236 @@ tasks: - sync_async # Server version tests - - name: test-python3.9-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.9-sync-auth-ssl-standalone-cov commands: - func: run server vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.9" - tags: [server-version, python-3.9, sharded_cluster-auth-ssl] - - name: test-python3.10-auth-ssl-sharded-cluster-cov + TEST_NAME: default_sync + tags: + - server-version + - python-3.9 + - standalone-auth-ssl + - sync + - name: test-server-version-python3.10-async-auth-ssl-standalone-cov commands: - func: run server vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.10" - tags: [server-version, python-3.10, sharded_cluster-auth-ssl] - - name: test-python3.11-auth-ssl-sharded-cluster-cov + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - standalone-auth-ssl + - async + - name: test-server-version-python3.11-sync-auth-nossl-standalone-cov commands: - func: run server vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.11" - tags: [server-version, python-3.11, sharded_cluster-auth-ssl] - - name: test-python3.12-auth-ssl-sharded-cluster-cov + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - standalone-auth-nossl + - sync + - name: test-server-version-python3.12-async-auth-nossl-standalone-cov commands: - func: run server vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.12" - tags: [server-version, python-3.12, sharded_cluster-auth-ssl] - - name: test-python3.13-auth-ssl-sharded-cluster-cov + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - standalone-auth-nossl + - async + - name: test-server-version-python3.13-sync-noauth-ssl-standalone-cov commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.13" - tags: [server-version, python-3.13, sharded_cluster-auth-ssl] - - name: test-pypy3.10-auth-ssl-sharded-cluster + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - standalone-noauth-ssl + - sync + - name: test-server-version-pypy3.10-async-noauth-ssl-standalone commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone PYTHON_VERSION: pypy3.10 - tags: [server-version, python-pypy3.10, sharded_cluster-auth-ssl] - - name: test-python3.9-auth-ssl-standalone-cov + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - standalone-noauth-ssl + - async + - name: test-server-version-python3.9-sync-noauth-nossl-standalone-cov commands: - func: run server vars: - AUTH: auth - SSL: ssl + AUTH: noauth + SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: - AUTH: auth - SSL: ssl + AUTH: noauth + SSL: nossl TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.9" - tags: [server-version, python-3.9, standalone-auth-ssl] - - name: test-python3.10-auth-nossl-standalone-cov + TEST_NAME: default_sync + tags: + - server-version + - python-3.9 + - standalone-noauth-nossl + - sync + - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: nossl TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.10" - tags: [server-version, python-3.10, standalone-auth-nossl] - - name: test-python3.11-noauth-ssl-standalone-cov + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - standalone-noauth-nossl + - async + - name: test-server-version-python3.11-sync-auth-ssl-replica-set-cov commands: - func: run server vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: standalone + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: standalone + TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.11" - tags: [server-version, python-3.11, standalone-noauth-ssl] - - name: test-python3.12-noauth-nossl-standalone-cov + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - replica_set-auth-ssl + - sync + - name: test-server-version-python3.12-async-auth-ssl-replica-set-cov commands: - func: run server vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.12" - tags: [server-version, python-3.12, standalone-noauth-nossl] - - name: test-python3.13-auth-ssl-replica-set-cov + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - replica_set-auth-ssl + - async + - name: test-server-version-python3.13-sync-auth-nossl-replica-set-cov commands: - func: run server vars: AUTH: auth - SSL: ssl + SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl + SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.13" - tags: [server-version, python-3.13, replica_set-auth-ssl] - - name: test-pypy3.10-auth-nossl-replica-set + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - replica_set-auth-nossl + - sync + - name: test-server-version-pypy3.10-async-auth-nossl-replica-set commands: - func: run server vars: @@ -7584,8 +7412,13 @@ tasks: SSL: nossl TOPOLOGY: replica_set PYTHON_VERSION: pypy3.10 - tags: [server-version, python-pypy3.10, replica_set-auth-nossl] - - name: test-python3.9-noauth-ssl-replica-set-cov + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - replica_set-auth-nossl + - async + - name: test-server-version-python3.9-sync-noauth-ssl-replica-set-cov commands: - func: run server vars: @@ -7600,8 +7433,34 @@ tasks: TOPOLOGY: replica_set COVERAGE: "1" PYTHON_VERSION: "3.9" - tags: [server-version, python-3.9, replica_set-noauth-ssl] - - name: test-python3.10-noauth-nossl-replica-set-cov + TEST_NAME: default_sync + tags: + - server-version + - python-3.9 + - replica_set-noauth-ssl + - sync + - name: test-server-version-python3.10-async-noauth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - replica_set-noauth-ssl + - async + - name: test-server-version-python3.11-sync-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -7615,9 +7474,75 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" - tags: [server-version, python-3.10, replica_set-noauth-nossl] - - name: test-python3.12-auth-nossl-sharded-cluster-cov + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - replica_set-noauth-nossl + - sync + - name: test-server-version-python3.12-async-noauth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - replica_set-noauth-nossl + - async + - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.9-sync-auth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -7631,9 +7556,35 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" - tags: [server-version, python-3.12, sharded_cluster-auth-nossl] - - name: test-python3.13-noauth-ssl-sharded-cluster-cov + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - server-version + - python-3.9 + - sharded_cluster-auth-nossl + - sync + - name: test-server-version-python3.10-async-auth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-nossl + - async + - name: test-server-version-python3.11-sync-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -7647,555 +7598,1073 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" - tags: [server-version, python-3.13, sharded_cluster-noauth-ssl] - - name: test-pypy3.10-noauth-nossl-sharded-cluster + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - sharded_cluster-noauth-ssl + - sync + - name: test-server-version-python3.12-async-noauth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - sharded_cluster-noauth-ssl + - async + - name: test-server-version-python3.13-sync-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 - tags: [server-version, python-pypy3.10, sharded_cluster-noauth-nossl] - - # Serverless tests - - name: test-serverless + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - sharded_cluster-noauth-nossl + - sync + - name: test-server-version-pypy3.10-async-noauth-nossl-sharded-cluster commands: - - func: run tests + - func: run server vars: - TEST_NAME: serverless + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-noauth-nossl + - async + - name: test-server-version-python3.9-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - server-version + - python-3.9 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.9-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.9" + TEST_NAME: default_async + tags: + - server-version + - python-3.9 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.10-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - server-version + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - server-version + - python-3.13 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + + # Serverless tests + - name: test-serverless + commands: + - func: run tests + vars: + TEST_NAME: serverless AUTH: auth SSL: ssl tags: [serverless] - # Standard linux tests - - name: test-v4.0-python3.9-noauth-nossl-standalone + # Standard tests + - name: test-standard-v4.0-python3.9-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.0 + - python-3.9 + - standalone-noauth-nossl + - sync + - name: test-standard-v4.0-python3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-4.0 + - python-3.10 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.0-python3.11-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.0 + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v4.2-python3.12-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-4.2 + - python-3.12 + - standalone-noauth-nossl + - async + - name: test-standard-v4.2-python3.13-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.13 + - replica_set-noauth-ssl + - sync + - name: test-standard-v4.2-python3.9-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.9" + TEST_NAME: default_async + tags: + - test-standard + - server-4.2 + - python-3.9 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v4.4-python3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.4 + - python-3.10 + - standalone-noauth-nossl + - sync + - name: test-standard-v4.4-python3.11-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.11 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.12-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.4 + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v5.0-python3.13-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" - PYTHON_VERSION: "3.9" + VERSION: "5.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async tags: - - standard-linux - - server-4.0 - - python-3.9 + - test-standard + - server-5.0 + - python-3.13 - standalone-noauth-nossl - - name: test-v4.0-python3.10-noauth-ssl-replica-set + - async + - name: test-standard-v5.0-python3.9-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.0" + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.0" - PYTHON_VERSION: "3.10" + VERSION: "5.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync tags: - - standard-linux - - server-4.0 - - python-3.10 + - test-standard + - server-5.0 + - python-3.9 - replica_set-noauth-ssl - - name: test-v4.0-python3.11-auth-ssl-sharded-cluster + - sync + - name: test-standard-v5.0-python3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.0" + VERSION: "5.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.0" - PYTHON_VERSION: "3.11" + VERSION: "5.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async tags: - - standard-linux - - server-4.0 - - python-3.11 + - test-standard + - server-5.0 + - python-3.10 - sharded_cluster-auth-ssl - - name: test-v4.2-python3.12-noauth-nossl-standalone + - async + - name: test-standard-v6.0-python3.11-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.2" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.2" - PYTHON_VERSION: "3.12" + VERSION: "6.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync tags: - - standard-linux - - server-4.2 - - python-3.12 + - test-standard + - server-6.0 + - python-3.11 - standalone-noauth-nossl - - name: test-v4.2-python3.13-noauth-ssl-replica-set + - sync + - name: test-standard-v6.0-python3.12-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" - PYTHON_VERSION: "3.13" + VERSION: "6.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async tags: - - standard-linux - - server-4.2 - - python-3.13 + - test-standard + - server-6.0 + - python-3.12 - replica_set-noauth-ssl - - name: test-v4.2-pypy3.10-auth-ssl-sharded-cluster + - async + - name: test-standard-v6.0-python3.13-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.2" + VERSION: "6.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + VERSION: "6.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync tags: - - standard-linux - - server-4.2 - - python-pypy3.10 + - test-standard + - server-6.0 + - python-3.13 - sharded_cluster-auth-ssl - - name: test-v4.4-python3.9-noauth-nossl-standalone + - sync + - name: test-standard-v7.0-python3.9-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.4" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.4" + VERSION: "7.0" PYTHON_VERSION: "3.9" + TEST_NAME: default_async tags: - - standard-linux - - server-4.4 + - test-standard + - server-7.0 - python-3.9 - standalone-noauth-nossl - - name: test-v4.4-python3.10-noauth-ssl-replica-set + - async + - name: test-standard-v7.0-python3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.4" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.4" + VERSION: "7.0" PYTHON_VERSION: "3.10" + TEST_NAME: default_sync tags: - - standard-linux - - server-4.4 + - test-standard + - server-7.0 - python-3.10 - replica_set-noauth-ssl - - name: test-v4.4-python3.11-auth-ssl-sharded-cluster + - sync + - name: test-standard-v7.0-python3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "7.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "7.0" PYTHON_VERSION: "3.11" + TEST_NAME: default_async tags: - - standard-linux - - server-4.4 + - test-standard + - server-7.0 - python-3.11 - sharded_cluster-auth-ssl - - name: test-v5.0-python3.12-noauth-nossl-standalone + - async + - name: test-standard-v8.0-python3.12-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "8.0" PYTHON_VERSION: "3.12" + TEST_NAME: default_sync tags: - - standard-linux - - server-5.0 + - test-standard + - server-8.0 - python-3.12 - standalone-noauth-nossl - - name: test-v5.0-python3.13-noauth-ssl-replica-set + - sync + - name: test-standard-v8.0-python3.13-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "5.0" + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "5.0" + VERSION: "8.0" PYTHON_VERSION: "3.13" + TEST_NAME: default_async tags: - - standard-linux - - server-5.0 + - test-standard + - server-8.0 - python-3.13 - replica_set-noauth-ssl - - name: test-v5.0-pypy3.10-auth-ssl-sharded-cluster + - async + - name: test-standard-v8.0-python3.9-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "5.0" + VERSION: "8.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + VERSION: "8.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync tags: - - standard-linux - - server-5.0 - - python-pypy3.10 + - test-standard + - server-8.0 + - python-3.9 - sharded_cluster-auth-ssl - - name: test-v6.0-python3.9-noauth-nossl-standalone + - sync + - name: test-standard-rapid-python3.10-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" - PYTHON_VERSION: "3.9" + VERSION: rapid + PYTHON_VERSION: "3.10" + TEST_NAME: default_async tags: - - standard-linux - - server-6.0 - - python-3.9 + - test-standard + - server-rapid + - python-3.10 - standalone-noauth-nossl - - name: test-v6.0-python3.10-noauth-ssl-replica-set + - async + - name: test-standard-rapid-python3.11-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" - PYTHON_VERSION: "3.10" + VERSION: rapid + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync tags: - - standard-linux - - server-6.0 - - python-3.10 + - test-standard + - server-rapid + - python-3.11 - replica_set-noauth-ssl - - name: test-v6.0-python3.11-auth-ssl-sharded-cluster + - sync + - name: test-standard-rapid-python3.12-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "6.0" + VERSION: rapid - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "6.0" - PYTHON_VERSION: "3.11" + VERSION: rapid + PYTHON_VERSION: "3.12" + TEST_NAME: default_async tags: - - standard-linux - - server-6.0 - - python-3.11 + - test-standard + - server-rapid + - python-3.12 - sharded_cluster-auth-ssl - - name: test-v7.0-python3.12-noauth-nossl-standalone + - async + - name: test-standard-latest-python3.13-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" + VERSION: latest - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" - PYTHON_VERSION: "3.12" + VERSION: latest + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync tags: - - standard-linux - - server-7.0 - - python-3.12 + - test-standard + - server-latest + - python-3.13 - standalone-noauth-nossl - - name: test-v7.0-python3.13-noauth-ssl-replica-set + - sync + - name: test-standard-latest-python3.9-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" + VERSION: latest - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" - PYTHON_VERSION: "3.13" + VERSION: latest + PYTHON_VERSION: "3.9" + TEST_NAME: default_async tags: - - standard-linux - - server-7.0 - - python-3.13 + - test-standard + - server-latest + - python-3.9 - replica_set-noauth-ssl - - name: test-v7.0-pypy3.10-auth-ssl-sharded-cluster + - async + - name: test-standard-latest-python3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: latest - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + VERSION: latest + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync tags: - - standard-linux - - server-7.0 - - python-pypy3.10 + - test-standard + - server-latest + - python-3.10 - sharded_cluster-auth-ssl - - name: test-v8.0-python3.9-noauth-nossl-standalone + - sync + - name: test-standard-v4.0-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: "4.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" - PYTHON_VERSION: "3.9" + VERSION: "4.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - - standard-linux - - server-8.0 - - python-3.9 + - test-standard + - server-4.0 + - python-pypy3.10 - standalone-noauth-nossl - - name: test-v8.0-python3.10-noauth-ssl-replica-set + - sync + - pypy + - name: test-standard-v4.2-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "8.0" + VERSION: "4.2" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "8.0" - PYTHON_VERSION: "3.10" + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async tags: - - standard-linux - - server-8.0 - - python-3.10 + - test-standard + - server-4.2 + - python-pypy3.10 - replica_set-noauth-ssl - - name: test-v8.0-python3.11-auth-ssl-sharded-cluster + - async + - pypy + - name: test-standard-v4.4-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" + VERSION: "4.4" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" - PYTHON_VERSION: "3.11" + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - - standard-linux - - server-8.0 - - python-3.11 + - test-standard + - server-4.4 + - python-pypy3.10 - sharded_cluster-auth-ssl - - name: test-rapid-python3.12-noauth-nossl-standalone + - sync + - pypy + - name: test-standard-v5.0-pypy3.10-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: rapid + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: rapid - PYTHON_VERSION: "3.12" + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async tags: - - standard-linux - - server-rapid - - python-3.12 + - test-standard + - server-5.0 + - python-pypy3.10 - standalone-noauth-nossl - - name: test-rapid-python3.13-noauth-ssl-replica-set + - async + - pypy + - name: test-standard-v6.0-pypy3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: rapid + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: rapid - PYTHON_VERSION: "3.13" + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - - standard-linux - - server-rapid - - python-3.13 + - test-standard + - server-6.0 + - python-pypy3.10 - replica_set-noauth-ssl - - name: test-rapid-pypy3.10-auth-ssl-sharded-cluster + - sync + - pypy + - name: test-standard-v7.0-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: rapid + VERSION: "7.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: rapid + VERSION: "7.0" PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async tags: - - standard-linux - - server-rapid + - test-standard + - server-7.0 - python-pypy3.10 - sharded_cluster-auth-ssl - - name: test-latest-python3.9-noauth-nossl-standalone + - async + - pypy + - name: test-standard-v8.0-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: latest + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: latest - PYTHON_VERSION: "3.9" + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - - standard-linux - - server-latest - - python-3.9 + - test-standard + - server-8.0 + - python-pypy3.10 - standalone-noauth-nossl - - name: test-latest-python3.10-noauth-ssl-replica-set + - sync + - pypy + - name: test-standard-rapid-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: latest + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: latest - PYTHON_VERSION: "3.10" + VERSION: rapid + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async tags: - - standard-linux - - server-latest - - python-3.10 + - test-standard + - server-rapid + - python-pypy3.10 - replica_set-noauth-ssl - - name: test-latest-python3.11-auth-ssl-sharded-cluster + - async + - pypy + - name: test-standard-latest-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8209,15 +8678,18 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.11" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - - standard-linux + - test-standard - server-latest - - python-3.11 + - python-pypy3.10 - sharded_cluster-auth-ssl + - sync + - pypy - # Standard non linux tests - - name: test-v4.0-python3.9-sync-noauth-nossl-standalone + # Test non standard tests + - name: test-non-standard-v4.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -8232,14 +8704,12 @@ tasks: TOPOLOGY: standalone VERSION: "4.0" PYTHON_VERSION: "3.9" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-4.0 - python-3.9 - standalone-noauth-nossl - - sync - - name: test-v4.0-python3.10-async-noauth-ssl-replica-set + - name: test-non-standard-v4.0-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -8254,14 +8724,12 @@ tasks: TOPOLOGY: replica_set VERSION: "4.0" PYTHON_VERSION: "3.10" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-4.0 - python-3.10 - replica_set-noauth-ssl - - async - - name: test-v4.0-python3.11-sync-auth-ssl-sharded-cluster + - name: test-non-standard-v4.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8276,14 +8744,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "4.0" PYTHON_VERSION: "3.11" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-4.0 - python-3.11 - sharded_cluster-auth-ssl - - sync - - name: test-v4.2-python3.12-async-noauth-nossl-standalone + - name: test-non-standard-v4.2-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -8298,14 +8764,12 @@ tasks: TOPOLOGY: standalone VERSION: "4.2" PYTHON_VERSION: "3.12" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-4.2 - python-3.12 - standalone-noauth-nossl - - async - - name: test-v4.2-python3.13-sync-noauth-ssl-replica-set + - name: test-non-standard-v4.2-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -8320,14 +8784,12 @@ tasks: TOPOLOGY: replica_set VERSION: "4.2" PYTHON_VERSION: "3.13" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-4.2 - python-3.13 - replica_set-noauth-ssl - - sync - - name: test-v4.2-python3.9-async-auth-ssl-sharded-cluster + - name: test-non-standard-v4.2-python3.9-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8342,14 +8804,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "4.2" PYTHON_VERSION: "3.9" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-4.2 - python-3.9 - sharded_cluster-auth-ssl - - async - - name: test-v4.4-python3.10-sync-noauth-nossl-standalone + - name: test-non-standard-v4.4-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -8364,14 +8824,12 @@ tasks: TOPOLOGY: standalone VERSION: "4.4" PYTHON_VERSION: "3.10" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-4.4 - python-3.10 - standalone-noauth-nossl - - sync - - name: test-v4.4-python3.11-async-noauth-ssl-replica-set + - name: test-non-standard-v4.4-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -8386,14 +8844,12 @@ tasks: TOPOLOGY: replica_set VERSION: "4.4" PYTHON_VERSION: "3.11" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-4.4 - python-3.11 - replica_set-noauth-ssl - - async - - name: test-v4.4-python3.12-sync-auth-ssl-sharded-cluster + - name: test-non-standard-v4.4-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8408,14 +8864,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "4.4" PYTHON_VERSION: "3.12" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-4.4 - python-3.12 - sharded_cluster-auth-ssl - - sync - - name: test-v5.0-python3.13-async-noauth-nossl-standalone + - name: test-non-standard-v5.0-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -8430,14 +8884,12 @@ tasks: TOPOLOGY: standalone VERSION: "5.0" PYTHON_VERSION: "3.13" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-5.0 - python-3.13 - standalone-noauth-nossl - - async - - name: test-v5.0-python3.9-sync-noauth-ssl-replica-set + - name: test-non-standard-v5.0-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -8452,14 +8904,12 @@ tasks: TOPOLOGY: replica_set VERSION: "5.0" PYTHON_VERSION: "3.9" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-5.0 - python-3.9 - replica_set-noauth-ssl - - sync - - name: test-v5.0-python3.10-async-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-python3.10-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8474,14 +8924,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "5.0" PYTHON_VERSION: "3.10" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-5.0 - python-3.10 - sharded_cluster-auth-ssl - - async - - name: test-v6.0-python3.11-sync-noauth-nossl-standalone + - name: test-non-standard-v6.0-python3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -8496,14 +8944,12 @@ tasks: TOPOLOGY: standalone VERSION: "6.0" PYTHON_VERSION: "3.11" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-6.0 - python-3.11 - standalone-noauth-nossl - - sync - - name: test-v6.0-python3.12-async-noauth-ssl-replica-set + - name: test-non-standard-v6.0-python3.12-noauth-ssl-replica-set commands: - func: run server vars: @@ -8518,14 +8964,12 @@ tasks: TOPOLOGY: replica_set VERSION: "6.0" PYTHON_VERSION: "3.12" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-6.0 - python-3.12 - replica_set-noauth-ssl - - async - - name: test-v6.0-python3.13-sync-auth-ssl-sharded-cluster + - name: test-non-standard-v6.0-python3.13-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8540,14 +8984,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "6.0" PYTHON_VERSION: "3.13" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-6.0 - python-3.13 - sharded_cluster-auth-ssl - - sync - - name: test-v7.0-python3.9-async-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -8562,14 +9004,12 @@ tasks: TOPOLOGY: standalone VERSION: "7.0" PYTHON_VERSION: "3.9" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-7.0 - python-3.9 - standalone-noauth-nossl - - async - - name: test-v7.0-python3.10-sync-noauth-ssl-replica-set + - name: test-non-standard-v7.0-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -8584,14 +9024,12 @@ tasks: TOPOLOGY: replica_set VERSION: "7.0" PYTHON_VERSION: "3.10" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-7.0 - python-3.10 - replica_set-noauth-ssl - - sync - - name: test-v7.0-python3.11-async-auth-ssl-sharded-cluster + - name: test-non-standard-v7.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8606,14 +9044,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "7.0" PYTHON_VERSION: "3.11" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-7.0 - python-3.11 - sharded_cluster-auth-ssl - - async - - name: test-v8.0-python3.12-sync-noauth-nossl-standalone + - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -8628,14 +9064,12 @@ tasks: TOPOLOGY: standalone VERSION: "8.0" PYTHON_VERSION: "3.12" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-8.0 - python-3.12 - standalone-noauth-nossl - - sync - - name: test-v8.0-python3.13-async-noauth-ssl-replica-set + - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -8650,14 +9084,12 @@ tasks: TOPOLOGY: replica_set VERSION: "8.0" PYTHON_VERSION: "3.13" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-8.0 - python-3.13 - replica_set-noauth-ssl - - async - - name: test-v8.0-python3.9-sync-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-python3.9-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8672,14 +9104,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: "8.0" PYTHON_VERSION: "3.9" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-8.0 - python-3.9 - sharded_cluster-auth-ssl - - sync - - name: test-rapid-python3.10-async-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -8694,14 +9124,12 @@ tasks: TOPOLOGY: standalone VERSION: rapid PYTHON_VERSION: "3.10" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-rapid - python-3.10 - standalone-noauth-nossl - - async - - name: test-rapid-python3.11-sync-noauth-ssl-replica-set + - name: test-non-standard-rapid-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -8716,14 +9144,12 @@ tasks: TOPOLOGY: replica_set VERSION: rapid PYTHON_VERSION: "3.11" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-rapid - python-3.11 - replica_set-noauth-ssl - - sync - - name: test-rapid-python3.12-async-auth-ssl-sharded-cluster + - name: test-non-standard-rapid-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8738,14 +9164,12 @@ tasks: TOPOLOGY: sharded_cluster VERSION: rapid PYTHON_VERSION: "3.12" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-rapid - python-3.12 - sharded_cluster-auth-ssl - - async - - name: test-latest-python3.13-sync-noauth-nossl-standalone + - name: test-non-standard-latest-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -8760,14 +9184,12 @@ tasks: TOPOLOGY: standalone VERSION: latest PYTHON_VERSION: "3.13" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-latest - python-3.13 - standalone-noauth-nossl - - sync - - name: test-latest-python3.9-async-noauth-ssl-replica-set + - name: test-non-standard-latest-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -8782,14 +9204,12 @@ tasks: TOPOLOGY: replica_set VERSION: latest PYTHON_VERSION: "3.9" - TEST_NAME: default_async tags: - - standard-non-linux + - test-non-standard - server-latest - python-3.9 - replica_set-noauth-ssl - - async - - name: test-latest-python3.10-sync-auth-ssl-sharded-cluster + - name: test-non-standard-latest-python3.10-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -8804,10 +9224,197 @@ tasks: TOPOLOGY: sharded_cluster VERSION: latest PYTHON_VERSION: "3.10" - TEST_NAME: default_sync tags: - - standard-non-linux + - test-non-standard - server-latest - python-3.10 - sharded_cluster-auth-ssl - - sync + - name: test-non-standard-v4.0-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.0 + - python-pypy3.10 + - standalone-noauth-nossl + - pypy + - name: test-non-standard-v4.2-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.2 + - python-pypy3.10 + - replica_set-noauth-ssl + - pypy + - name: test-non-standard-v4.4-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - pypy + - name: test-non-standard-v5.0-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-5.0 + - python-pypy3.10 + - standalone-noauth-nossl + - pypy + - name: test-non-standard-v6.0-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-6.0 + - python-pypy3.10 + - replica_set-noauth-ssl + - pypy + - name: test-non-standard-v7.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-7.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - pypy + - name: test-non-standard-v8.0-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-8.0 + - python-pypy3.10 + - standalone-noauth-nossl + - pypy + - name: test-non-standard-rapid-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-rapid + - python-pypy3.10 + - replica_set-noauth-ssl + - pypy + - name: test-non-standard-latest-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-latest + - python-pypy3.10 + - sharded_cluster-auth-ssl + - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 8ba16273de..3cb14c716d 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -2,7 +2,7 @@ buildvariants: # Alternative hosts tests - name: openssl-1.0.2-rhel7-v5.0-python3.9 tasks: - - name: .other-hosts + - name: .test-no-toolchain display_name: OpenSSL 1.0.2 RHEL7 v5.0 Python3.9 run_on: - rhel79-small @@ -13,7 +13,7 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: other-hosts-rhel9-fips-latest tasks: - - name: .other-hosts + - name: .test-no-toolchain display_name: Other hosts RHEL9-FIPS latest run_on: - rhel92-fips @@ -24,7 +24,7 @@ buildvariants: REQUIRE_FIPS: "1" - name: other-hosts-rhel8-zseries-latest tasks: - - name: .other-hosts + - name: .test-no-toolchain display_name: Other hosts RHEL8-zseries latest run_on: - rhel8-zseries-small @@ -34,7 +34,7 @@ buildvariants: NO_EXT: "1" - name: other-hosts-rhel8-power8-latest tasks: - - name: .other-hosts + - name: .test-no-toolchain display_name: Other hosts RHEL8-POWER8 latest run_on: - rhel8-power-small @@ -44,7 +44,7 @@ buildvariants: NO_EXT: "1" - name: other-hosts-rhel8-arm64-latest tasks: - - name: .other-hosts + - name: .test-no-toolchain display_name: Other hosts RHEL8-arm64 latest run_on: - rhel82-arm64-small @@ -54,7 +54,7 @@ buildvariants: NO_EXT: "1" - name: other-hosts-amazon2023-latest tasks: - - name: .other-hosts + - name: .test-no-toolchain display_name: Other hosts Amazon2023 latest run_on: - amazon2023-arm64-latest-large-m8g @@ -66,7 +66,7 @@ buildvariants: # Atlas connect tests - name: atlas-connect-rhel8 tasks: - - name: .no-orchestration + - name: .test-no-orchestration display_name: Atlas connect RHEL8 run_on: - rhel87-small @@ -74,7 +74,7 @@ buildvariants: # Atlas data lake tests - name: atlas-data-lake-ubuntu-22 tasks: - - name: .no-orchestration + - name: .test-no-orchestration display_name: Atlas Data Lake Ubuntu-22 run_on: - ubuntu2204-small @@ -120,7 +120,7 @@ buildvariants: # Compression tests - name: compression-snappy-rhel8 tasks: - - name: .standard-linux + - name: .test-standard display_name: Compression snappy RHEL8 run_on: - rhel87-small @@ -128,7 +128,7 @@ buildvariants: COMPRESSOR: snappy - name: compression-zlib-rhel8 tasks: - - name: .standard-linux + - name: .test-standard display_name: Compression zlib RHEL8 run_on: - rhel87-small @@ -136,7 +136,7 @@ buildvariants: COMPRESSOR: zlib - name: compression-zstd-rhel8 tasks: - - name: .standard-linux !.server-4.0 + - name: .test-standard !.server-4.0 display_name: Compression zstd RHEL8 run_on: - rhel87-small @@ -167,7 +167,7 @@ buildvariants: # Doctests tests - name: doctests-rhel8 tasks: - - name: .standard-linux .standalone-noauth-nossl + - name: .test-non-standard .standalone-noauth-nossl display_name: Doctests RHEL8 run_on: - rhel87-small @@ -468,7 +468,7 @@ buildvariants: # Green framework tests - name: green-eventlet-rhel8 tasks: - - name: .standard-linux .standalone-noauth-nossl .python-3.9 + - name: .test-standard .standalone-noauth-nossl .python-3.9 display_name: Green Eventlet RHEL8 run_on: - rhel87-small @@ -478,7 +478,7 @@ buildvariants: SSL: ssl - name: green-gevent-rhel8 tasks: - - name: .standard-linux .standalone-noauth-nossl + - name: .test-standard .standalone-noauth-nossl display_name: Green Gevent RHEL8 run_on: - rhel87-small @@ -511,16 +511,22 @@ buildvariants: # Load balancer tests - name: load-balancer tasks: - - name: .load-balancer + - name: .test-non-standard .server-6.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-7.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-8.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-rapid .sharded_cluster-auth-ssl + - name: .test-non-standard .server-latest .sharded_cluster-auth-ssl display_name: Load Balancer run_on: - rhel87-small batchtime: 10080 + expansions: + TEST_NAME: load_balancer # Mockupdb tests - name: mockupdb-rhel8 tasks: - - name: .no-orchestration + - name: .test-no-orchestration display_name: MockupDB RHEL8 run_on: - rhel87-small @@ -540,7 +546,7 @@ buildvariants: # No c ext tests - name: no-c-ext-rhel8 tasks: - - name: .standard-linux + - name: .test-standard display_name: No C Ext RHEL8 run_on: - rhel87-small @@ -548,7 +554,7 @@ buildvariants: # No server tests - name: no-server-rhel8 tasks: - - name: .no-orchestration + - name: .test-no-orchestration display_name: No server RHEL8 run_on: - rhel87-small @@ -782,12 +788,12 @@ buildvariants: # Stable api tests - name: stable-api-require-v1-rhel8-auth tasks: - - name: .standard-linux !.replica_set-noauth-ssl .server-5.0 - - name: .standard-linux !.replica_set-noauth-ssl .server-6.0 - - name: .standard-linux !.replica_set-noauth-ssl .server-7.0 - - name: .standard-linux !.replica_set-noauth-ssl .server-8.0 - - name: .standard-linux !.replica_set-noauth-ssl .server-rapid - - name: .standard-linux !.replica_set-noauth-ssl .server-latest + - name: .test-standard !.replica_set-noauth-ssl .server-5.0 + - name: .test-standard !.replica_set-noauth-ssl .server-6.0 + - name: .test-standard !.replica_set-noauth-ssl .server-7.0 + - name: .test-standard !.replica_set-noauth-ssl .server-8.0 + - name: .test-standard !.replica_set-noauth-ssl .server-rapid + - name: .test-standard !.replica_set-noauth-ssl .server-latest display_name: Stable API require v1 RHEL8 Auth run_on: - rhel87-small @@ -798,12 +804,12 @@ buildvariants: tags: [versionedApi_tag] - name: stable-api-accept-v2-rhel8-auth tasks: - - name: .standard-linux .server-5.0 .standalone-noauth-nossl - - name: .standard-linux .server-6.0 .standalone-noauth-nossl - - name: .standard-linux .server-7.0 .standalone-noauth-nossl - - name: .standard-linux .server-8.0 .standalone-noauth-nossl - - name: .standard-linux .server-rapid .standalone-noauth-nossl - - name: .standard-linux .server-latest .standalone-noauth-nossl + - name: .test-standard .server-5.0 .standalone-noauth-nossl + - name: .test-standard .server-6.0 .standalone-noauth-nossl + - name: .test-standard .server-7.0 .standalone-noauth-nossl + - name: .test-standard .server-8.0 .standalone-noauth-nossl + - name: .test-standard .server-rapid .standalone-noauth-nossl + - name: .test-standard .server-latest .standalone-noauth-nossl display_name: Stable API accept v2 RHEL8 Auth run_on: - rhel87-small @@ -815,32 +821,32 @@ buildvariants: # Standard nonlinux tests - name: test-macos tasks: - - name: .standard-non-linux + - name: .test-standard !.pypy display_name: "* Test macOS" run_on: - macos-14 tags: [standard-non-linux] - name: test-macos-arm64 tasks: - - name: .standard-non-linux .server-6.0 - - name: .standard-non-linux .server-7.0 - - name: .standard-non-linux .server-8.0 - - name: .standard-non-linux .server-rapid - - name: .standard-non-linux .server-latest + - name: .test-standard !.pypy .server-6.0 + - name: .test-standard !.pypy .server-7.0 + - name: .test-standard !.pypy .server-8.0 + - name: .test-standard !.pypy .server-rapid + - name: .test-standard !.pypy .server-latest display_name: "* Test macOS Arm64" run_on: - macos-14-arm64 tags: [standard-non-linux] - name: test-win64 tasks: - - name: .standard-non-linux + - name: .test-standard !.pypy display_name: "* Test Win64" run_on: - windows-64-vsMulti-small tags: [standard-non-linux] - name: test-win32 tasks: - - name: .standard-non-linux + - name: .test-standard !.pypy display_name: "* Test Win32" run_on: - windows-64-vsMulti-small @@ -851,7 +857,7 @@ buildvariants: # Storage engine tests - name: storage-inmemory-rhel8 tasks: - - name: .standard-linux .standalone-noauth-nossl + - name: .test-standard .standalone-noauth-nossl display_name: Storage InMemory RHEL8 run_on: - rhel87-small @@ -859,7 +865,7 @@ buildvariants: STORAGE_ENGINE: inmemory - name: storage-mmapv1-rhel8 tasks: - - name: .standard-linux !.sharded_cluster-auth-ssl .server-4.0 + - name: .test-standard !.sharded_cluster-auth-ssl .server-4.0 display_name: Storage MMAPv1 RHEL8 run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index be1a960db2..a0e39e2579 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -90,11 +90,11 @@ def create_standard_nonlinux_variants() -> list[BuildVariant]: # Test a subset on each of the other platforms. for host_name in ("macos", "macos-arm64", "win64", "win32"): - tasks = [".standard-non-linux"] + tasks = [".test-standard !.pypy"] # MacOS arm64 only works on server versions 6.0+ if host_name == "macos-arm64": tasks = [ - f".standard-non-linux .server-{version}" for version in get_versions_from("6.0") + f".test-standard !.pypy .server-{version}" for version in get_versions_from("6.0") ] host = HOSTS[host_name] tags = ["standard-non-linux"] @@ -203,10 +203,18 @@ def get_encryption_expansions(encryption): def create_load_balancer_variants(): - # Load balancer tests - run all supported server versions using the lowest supported python. + tasks = [ + f".test-non-standard .server-{v} .sharded_cluster-auth-ssl" + for v in get_versions_from("6.0") + ] + expansions = dict(TEST_NAME="load_balancer") return [ create_variant( - [".load-balancer"], "Load Balancer", host=DEFAULT_HOST, batchtime=BATCHTIME_WEEK + tasks, + "Load Balancer", + host=DEFAULT_HOST, + batchtime=BATCHTIME_WEEK, + expansions=expansions, ) ] @@ -218,9 +226,9 @@ def create_compression_variants(): for compressor in "snappy", "zlib", "zstd": expansions = dict(COMPRESSOR=compressor) if compressor == "zstd": - tasks = [".standard-linux !.server-4.0"] + tasks = [".test-standard !.server-4.0"] else: - tasks = [".standard-linux"] + tasks = [".test-standard"] display_name = get_variant_name(f"Compression {compressor}", host) variants.append( create_variant( @@ -296,11 +304,11 @@ def create_storage_engine_variants(): for engine in engines: expansions = dict(STORAGE_ENGINE=engine.lower()) if engine == engines[0]: - tasks = [".standard-linux .standalone-noauth-nossl"] + tasks = [".test-standard .standalone-noauth-nossl"] else: # MongoDB 4.2 drops support for MMAPv1 versions = get_versions_until("4.0") - tasks = [f".standard-linux !.sharded_cluster-auth-ssl .server-{v}" for v in versions] + tasks = [f".test-standard !.sharded_cluster-auth-ssl .server-{v}" for v in versions] display_name = get_variant_name(f"Storage {engine}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) @@ -324,7 +332,7 @@ def create_stable_api_variants(): # MONGODB_API_VERSION is the apiVersion to use in the test suite. expansions["MONGODB_API_VERSION"] = "1" tasks = [ - f".standard-linux !.replica_set-noauth-ssl .server-{v}" + f".test-standard !.replica_set-noauth-ssl .server-{v}" for v in get_versions_from("5.0") ] else: @@ -333,7 +341,7 @@ def create_stable_api_variants(): # clients created in the test suite. expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" tasks = [ - f".standard-linux .server-{v} .standalone-noauth-nossl" + f".test-standard .server-{v} .standalone-noauth-nossl" for v in get_versions_from("5.0") ] base_display_name = f"Stable API {test_type}" @@ -348,11 +356,11 @@ def create_green_framework_variants(): variants = [] host = DEFAULT_HOST for framework in ["eventlet", "gevent"]: - tasks = [".standard-linux .standalone-noauth-nossl"] + tasks = [".test-standard .standalone-noauth-nossl"] if framework == "eventlet": # Eventlet has issues with dnspython > 2.0 and newer versions of CPython # https://jira.mongodb.org/browse/PYTHON-5284 - tasks = [".standard-linux .standalone-noauth-nossl .python-3.9"] + tasks = [".test-standard .standalone-noauth-nossl .python-3.9"] expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") display_name = get_variant_name(f"Green {framework.capitalize()}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) @@ -362,7 +370,7 @@ def create_green_framework_variants(): def create_no_c_ext_variants(): host = DEFAULT_HOST - tasks = [".standard-linux"] + tasks = [".test-standard"] expansions = dict() handle_c_ext(C_EXTS[0], expansions) display_name = get_variant_name("No C Ext", host) @@ -371,7 +379,7 @@ def create_no_c_ext_variants(): def create_atlas_data_lake_variants(): host = HOSTS["ubuntu22"] - tasks = [".no-orchestration"] + tasks = [".test-no-orchestration"] expansions = dict(TEST_NAME="data_lake") display_name = get_variant_name("Atlas Data Lake", host) return [create_variant(tasks, display_name, host=host, expansions=expansions)] @@ -448,7 +456,7 @@ def create_mockupdb_variants(): expansions = dict(TEST_NAME="mockupdb") return [ create_variant( - [".no-orchestration"], + [".test-no-orchestration"], get_variant_name("MockupDB", host), host=host, expansions=expansions, @@ -461,7 +469,7 @@ def create_doctests_variants(): expansions = dict(TEST_NAME="doctest") return [ create_variant( - [".standard-linux .standalone-noauth-nossl"], + [".test-non-standard .standalone-noauth-nossl"], get_variant_name("Doctests", host), host=host, expansions=expansions, @@ -473,7 +481,7 @@ def create_atlas_connect_variants(): host = DEFAULT_HOST return [ create_variant( - [".no-orchestration"], + [".test-no-orchestration"], get_variant_name("Atlas connect", host), host=DEFAULT_HOST, ) @@ -532,7 +540,7 @@ def create_aws_auth_variants(): def create_no_server_variants(): host = HOSTS["rhel8"] name = get_variant_name("No server", host=host) - return [create_variant([".no-orchestration"], name, host=host)] + return [create_variant([".test-no-orchestration"], name, host=host)] def create_alternative_hosts_variants(): @@ -543,7 +551,7 @@ def create_alternative_hosts_variants(): version = "5.0" variants.append( create_variant( - [".other-hosts"], + [".test-no-toolchain"], get_variant_name("OpenSSL 1.0.2", host, python=CPYTHONS[0], version=version), host=host, python=CPYTHONS[0], @@ -561,7 +569,7 @@ def create_alternative_hosts_variants(): expansions["REQUIRE_FIPS"] = "1" variants.append( create_variant( - [".other-hosts"], + [".test-no-toolchain"], display_name=get_variant_name("Other hosts", host, version=version), batchtime=batchtime, host=host, @@ -583,41 +591,46 @@ def create_aws_lambda_variants(): def create_server_version_tasks(): tasks = [] - # Test all pythons with sharded_cluster, auth, and ssl. - task_types = [(p, "sharded_cluster", "auth", "ssl") for p in ALL_PYTHONS] - # Test all combinations of topology, auth, and ssl, with rotating pythons. - for (topology, auth, ssl), python in zip_cycle( - list(product(TOPOLOGIES, ["auth", "noauth"], ["ssl", "nossl"])), ALL_PYTHONS + task_inputs = [] + # All combinations of topology, auth, ssl, and sync should be tested. + for (topology, auth, ssl, sync), python in zip_cycle( + list(product(TOPOLOGIES, ["auth", "noauth"], ["ssl", "nossl"], SYNCS)), ALL_PYTHONS ): - # Skip the ones we already have. - if topology == "sharded_cluster" and auth == "auth" and ssl == "ssl": - continue - task_types.append((python, topology, auth, ssl)) - for python, topology, auth, ssl in task_types: - tags = ["server-version", f"python-{python}", f"{topology}-{auth}-{ssl}"] + task_inputs.append((topology, auth, ssl, sync, python)) + + # Every python should be tested with sharded cluster, auth, ssl, with sync and async. + for python, sync in product(ALL_PYTHONS, SYNCS): + task_input = ("sharded_cluster", "auth", "ssl", sync, python) + if task_input not in task_inputs: + task_inputs.append(task_input) + + # Assemble the tasks. + for topology, auth, ssl, sync, python in task_inputs: + tags = ["server-version", f"python-{python}", f"{topology}-{auth}-{ssl}", sync] expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) if python not in PYPYS: expansions["COVERAGE"] = "1" - name = get_task_name("test", python=python, **expansions) + name = get_task_name("test-server-version", python=python, sync=sync, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() test_vars["PYTHON_VERSION"] = python + test_vars["TEST_NAME"] = f"default_{sync}" test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) return tasks -def create_other_hosts_tasks(): +def create_no_toolchain_tasks(): tasks = [] for topology, sync in zip_cycle(TOPOLOGIES, SYNCS): auth, ssl = get_standard_auth_ssl(topology) tags = [ - "other-hosts", + "test-no-toolchain", f"{topology}-{auth}-{ssl}", ] expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) - name = get_task_name("test", sync=sync, **expansions) + name = get_task_name("test-no-toolchain", sync=sync, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() test_vars["TEST_NAME"] = f"default_{sync}" @@ -626,21 +639,28 @@ def create_other_hosts_tasks(): return tasks -def create_standard_linux_tasks(): +def create_test_non_standard_tasks(): + """For variants that set a TEST_NAME.""" tasks = [] - - for (version, topology), python in zip_cycle( - list(product(ALL_VERSIONS, TOPOLOGIES)), ALL_PYTHONS - ): + task_combos = [] + # For each version and topology, rotate through the CPythons. + for (version, topology), python in zip_cycle(list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS): + task_combos.append((version, topology, python)) + # For each PyPy and topology, rotate through the the versions. + for (python, topology), version in zip_cycle(list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS): + task_combos.append((version, topology, python)) + for version, topology, python in task_combos: auth, ssl = get_standard_auth_ssl(topology) tags = [ - "standard-linux", + "test-non-standard", f"server-{version}", f"python-{python}", f"{topology}-{auth}-{ssl}", ] + if python in PYPYS: + tags.append("pypy") expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) - name = get_task_name("test", python=python, **expansions) + name = get_task_name("test-non-standard", python=python, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() test_vars["PYTHON_VERSION"] = python @@ -649,22 +669,34 @@ def create_standard_linux_tasks(): return tasks -def create_standard_non_linux_tasks(): +def create_standard_tasks(): + """For variants that do not set a TEST_NAME.""" tasks = [] - + task_combos = [] + # For each version and topology, rotate through the CPythons and sync/async. for (version, topology), python, sync in zip_cycle( list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS, SYNCS ): + task_combos.append((version, topology, python, sync)) + # For each PyPy and topology, rotate through the the versions and sync/async. + for (python, topology), version, sync in zip_cycle( + list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS, SYNCS + ): + task_combos.append((version, topology, python, sync)) + + for version, topology, python, sync in task_combos: auth, ssl = get_standard_auth_ssl(topology) tags = [ - "standard-non-linux", + "test-standard", f"server-{version}", f"python-{python}", f"{topology}-{auth}-{ssl}", sync, ] + if python in PYPYS: + tags.append("pypy") expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) - name = get_task_name("test", python=python, sync=sync, **expansions) + name = get_task_name("test-standard", python=python, sync=sync, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() test_vars["PYTHON_VERSION"] = python @@ -702,7 +734,7 @@ def create_no_orchestration_tasks(): tasks = [] for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: tags = [ - "no-orchestration", + "test-no-orchestration", f"python-{python}", ] name = get_task_name("test-no-orchestration", python=python) @@ -714,26 +746,6 @@ def create_no_orchestration_tasks(): return tasks -def create_load_balancer_tasks(): - tasks = [] - for (auth, ssl), version in product(AUTH_SSLS, get_versions_from("6.0")): - name = get_task_name(f"test-load-balancer-{auth}-{ssl}", version=version) - tags = ["load-balancer", auth, ssl] - server_vars = dict( - TOPOLOGY="sharded_cluster", - AUTH=auth, - SSL=ssl, - TEST_NAME="load_balancer", - VERSION=version, - ) - server_func = FunctionCall(func="run server", vars=server_vars) - test_vars = dict(AUTH=auth, SSL=ssl, TEST_NAME="load_balancer") - test_func = FunctionCall(func="run tests", vars=test_vars) - tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) - - return tasks - - def create_kms_tasks(): tasks = [] for kms_type in ["gcp", "azure"]: From 000391c440c4c0256c51f48b20f5322a75e9c0bc Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 2 May 2025 10:35:34 -0500 Subject: [PATCH 1908/2111] PYTHON-5333 Update encryption and pyopenssl variants (#2328) --- .evergreen/generated_configs/tasks.yml | 4781 ------------------- .evergreen/generated_configs/variants.yml | 287 +- .evergreen/scripts/generate_config.py | 143 +- .evergreen/scripts/generate_config_utils.py | 16 +- CONTRIBUTING.md | 11 + 5 files changed, 90 insertions(+), 5148 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 504cad4882..32e139c2df 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -2388,4787 +2388,6 @@ tasks: TEST_NAME: search_index tags: [search_index] - # Server tests - - name: test-4.0-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - standalone - - auth - - ssl - - sync - - name: test-4.0-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - standalone - - auth - - ssl - - async - - name: test-4.0-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.0" - - standalone - - auth - - ssl - - sync_async - - name: test-4.0-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - standalone - - noauth - - ssl - - sync - - name: test-4.0-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - standalone - - noauth - - ssl - - async - - name: test-4.0-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.0" - - standalone - - noauth - - ssl - - sync_async - - name: test-4.0-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - standalone - - noauth - - nossl - - sync - - name: test-4.0-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - standalone - - noauth - - nossl - - async - - name: test-4.0-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.0" - - standalone - - noauth - - nossl - - sync_async - - name: test-4.2-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - standalone - - auth - - ssl - - sync - - name: test-4.2-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - standalone - - auth - - ssl - - async - - name: test-4.2-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.2" - - standalone - - auth - - ssl - - sync_async - - name: test-4.2-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - standalone - - noauth - - ssl - - sync - - name: test-4.2-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - standalone - - noauth - - ssl - - async - - name: test-4.2-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.2" - - standalone - - noauth - - ssl - - sync_async - - name: test-4.2-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - standalone - - noauth - - nossl - - sync - - name: test-4.2-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - standalone - - noauth - - nossl - - async - - name: test-4.2-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.2" - - standalone - - noauth - - nossl - - sync_async - - name: test-4.4-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - standalone - - auth - - ssl - - sync - - name: test-4.4-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - standalone - - auth - - ssl - - async - - name: test-4.4-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.4" - - standalone - - auth - - ssl - - sync_async - - name: test-4.4-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - standalone - - noauth - - ssl - - sync - - name: test-4.4-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - standalone - - noauth - - ssl - - async - - name: test-4.4-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.4" - - standalone - - noauth - - ssl - - sync_async - - name: test-4.4-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - standalone - - noauth - - nossl - - sync - - name: test-4.4-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - standalone - - noauth - - nossl - - async - - name: test-4.4-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.4" - - standalone - - noauth - - nossl - - sync_async - - name: test-5.0-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - standalone - - auth - - ssl - - sync - - name: test-5.0-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - standalone - - auth - - ssl - - async - - name: test-5.0-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "5.0" - - standalone - - auth - - ssl - - sync_async - - name: test-5.0-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - standalone - - noauth - - ssl - - sync - - name: test-5.0-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - standalone - - noauth - - ssl - - async - - name: test-5.0-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "5.0" - - standalone - - noauth - - ssl - - sync_async - - name: test-5.0-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - standalone - - noauth - - nossl - - sync - - name: test-5.0-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - standalone - - noauth - - nossl - - async - - name: test-5.0-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "5.0" - - standalone - - noauth - - nossl - - sync_async - - name: test-6.0-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - standalone - - auth - - ssl - - sync - - name: test-6.0-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - standalone - - auth - - ssl - - async - - name: test-6.0-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "6.0" - - standalone - - auth - - ssl - - sync_async - - name: test-6.0-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - standalone - - noauth - - ssl - - sync - - name: test-6.0-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - standalone - - noauth - - ssl - - async - - name: test-6.0-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "6.0" - - standalone - - noauth - - ssl - - sync_async - - name: test-6.0-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - standalone - - noauth - - nossl - - sync - - name: test-6.0-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - standalone - - noauth - - nossl - - async - - name: test-6.0-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "6.0" - - standalone - - noauth - - nossl - - sync_async - - name: test-7.0-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - standalone - - auth - - ssl - - sync - - name: test-7.0-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - standalone - - auth - - ssl - - async - - name: test-7.0-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "7.0" - - standalone - - auth - - ssl - - sync_async - - name: test-7.0-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - standalone - - noauth - - ssl - - sync - - name: test-7.0-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - standalone - - noauth - - ssl - - async - - name: test-7.0-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "7.0" - - standalone - - noauth - - ssl - - sync_async - - name: test-7.0-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - standalone - - noauth - - nossl - - sync - - name: test-7.0-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - standalone - - noauth - - nossl - - async - - name: test-7.0-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "7.0" - - standalone - - noauth - - nossl - - sync_async - - name: test-8.0-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - standalone - - auth - - ssl - - sync - - name: test-8.0-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - standalone - - auth - - ssl - - async - - name: test-8.0-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "8.0" - - standalone - - auth - - ssl - - sync_async - - name: test-8.0-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - standalone - - noauth - - ssl - - sync - - name: test-8.0-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - standalone - - noauth - - ssl - - async - - name: test-8.0-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "8.0" - - standalone - - noauth - - ssl - - sync_async - - name: test-8.0-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - standalone - - noauth - - nossl - - sync - - name: test-8.0-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - standalone - - noauth - - nossl - - async - - name: test-8.0-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "8.0" - - standalone - - noauth - - nossl - - sync_async - - name: test-rapid-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - standalone - - auth - - ssl - - sync - - name: test-rapid-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - standalone - - auth - - ssl - - async - - name: test-rapid-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - rapid - - standalone - - auth - - ssl - - sync_async - - name: test-rapid-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - standalone - - noauth - - ssl - - sync - - name: test-rapid-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - standalone - - noauth - - ssl - - async - - name: test-rapid-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - rapid - - standalone - - noauth - - ssl - - sync_async - - name: test-rapid-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - standalone - - noauth - - nossl - - sync - - name: test-rapid-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - standalone - - noauth - - nossl - - async - - name: test-rapid-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - rapid - - standalone - - noauth - - nossl - - sync_async - - name: test-latest-standalone-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - standalone - - auth - - ssl - - sync - - name: test-latest-standalone-auth-ssl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - standalone - - auth - - ssl - - async - - name: test-latest-standalone-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - latest - - standalone - - auth - - ssl - - sync_async - - name: test-latest-standalone-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - standalone - - noauth - - ssl - - sync - - name: test-latest-standalone-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - standalone - - noauth - - ssl - - async - - name: test-latest-standalone-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - latest - - standalone - - noauth - - ssl - - sync_async - - name: test-latest-standalone-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - standalone - - noauth - - nossl - - sync - - name: test-latest-standalone-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - standalone - - noauth - - nossl - - async - - name: test-latest-standalone-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: server - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - latest - - standalone - - noauth - - nossl - - sync_async - - name: test-4.0-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - replica_set - - auth - - ssl - - sync - - name: test-4.0-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - replica_set - - auth - - ssl - - async - - name: test-4.0-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.0" - - replica_set - - auth - - ssl - - sync_async - - name: test-4.0-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - replica_set - - noauth - - ssl - - sync - - name: test-4.0-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - replica_set - - noauth - - ssl - - async - - name: test-4.0-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.0" - - replica_set - - noauth - - ssl - - sync_async - - name: test-4.0-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - replica_set - - noauth - - nossl - - sync - - name: test-4.0-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - replica_set - - noauth - - nossl - - async - - name: test-4.0-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.0" - - replica_set - - noauth - - nossl - - sync_async - - name: test-4.2-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - replica_set - - auth - - ssl - - sync - - name: test-4.2-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - replica_set - - auth - - ssl - - async - - name: test-4.2-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.2" - - replica_set - - auth - - ssl - - sync_async - - name: test-4.2-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - replica_set - - noauth - - ssl - - sync - - name: test-4.2-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - replica_set - - noauth - - ssl - - async - - name: test-4.2-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.2" - - replica_set - - noauth - - ssl - - sync_async - - name: test-4.2-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - replica_set - - noauth - - nossl - - sync - - name: test-4.2-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - replica_set - - noauth - - nossl - - async - - name: test-4.2-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.2" - - replica_set - - noauth - - nossl - - sync_async - - name: test-4.4-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - replica_set - - auth - - ssl - - sync - - name: test-4.4-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - replica_set - - auth - - ssl - - async - - name: test-4.4-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.4" - - replica_set - - auth - - ssl - - sync_async - - name: test-4.4-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - replica_set - - noauth - - ssl - - sync - - name: test-4.4-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - replica_set - - noauth - - ssl - - async - - name: test-4.4-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.4" - - replica_set - - noauth - - ssl - - sync_async - - name: test-4.4-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - replica_set - - noauth - - nossl - - sync - - name: test-4.4-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - replica_set - - noauth - - nossl - - async - - name: test-4.4-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.4" - - replica_set - - noauth - - nossl - - sync_async - - name: test-5.0-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - replica_set - - auth - - ssl - - sync - - name: test-5.0-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - replica_set - - auth - - ssl - - async - - name: test-5.0-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "5.0" - - replica_set - - auth - - ssl - - sync_async - - name: test-5.0-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - replica_set - - noauth - - ssl - - sync - - name: test-5.0-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - replica_set - - noauth - - ssl - - async - - name: test-5.0-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "5.0" - - replica_set - - noauth - - ssl - - sync_async - - name: test-5.0-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - replica_set - - noauth - - nossl - - sync - - name: test-5.0-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - replica_set - - noauth - - nossl - - async - - name: test-5.0-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "5.0" - - replica_set - - noauth - - nossl - - sync_async - - name: test-6.0-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - replica_set - - auth - - ssl - - sync - - name: test-6.0-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - replica_set - - auth - - ssl - - async - - name: test-6.0-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "6.0" - - replica_set - - auth - - ssl - - sync_async - - name: test-6.0-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - replica_set - - noauth - - ssl - - sync - - name: test-6.0-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - replica_set - - noauth - - ssl - - async - - name: test-6.0-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "6.0" - - replica_set - - noauth - - ssl - - sync_async - - name: test-6.0-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - replica_set - - noauth - - nossl - - sync - - name: test-6.0-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - replica_set - - noauth - - nossl - - async - - name: test-6.0-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "6.0" - - replica_set - - noauth - - nossl - - sync_async - - name: test-7.0-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - replica_set - - auth - - ssl - - sync - - name: test-7.0-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - replica_set - - auth - - ssl - - async - - name: test-7.0-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "7.0" - - replica_set - - auth - - ssl - - sync_async - - name: test-7.0-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - replica_set - - noauth - - ssl - - sync - - name: test-7.0-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - replica_set - - noauth - - ssl - - async - - name: test-7.0-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "7.0" - - replica_set - - noauth - - ssl - - sync_async - - name: test-7.0-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - replica_set - - noauth - - nossl - - sync - - name: test-7.0-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - replica_set - - noauth - - nossl - - async - - name: test-7.0-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "7.0" - - replica_set - - noauth - - nossl - - sync_async - - name: test-8.0-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - replica_set - - auth - - ssl - - sync - - name: test-8.0-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - replica_set - - auth - - ssl - - async - - name: test-8.0-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "8.0" - - replica_set - - auth - - ssl - - sync_async - - name: test-8.0-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - replica_set - - noauth - - ssl - - sync - - name: test-8.0-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - replica_set - - noauth - - ssl - - async - - name: test-8.0-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "8.0" - - replica_set - - noauth - - ssl - - sync_async - - name: test-8.0-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - replica_set - - noauth - - nossl - - sync - - name: test-8.0-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - replica_set - - noauth - - nossl - - async - - name: test-8.0-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "8.0" - - replica_set - - noauth - - nossl - - sync_async - - name: test-rapid-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - replica_set - - auth - - ssl - - sync - - name: test-rapid-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - replica_set - - auth - - ssl - - async - - name: test-rapid-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - rapid - - replica_set - - auth - - ssl - - sync_async - - name: test-rapid-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - replica_set - - noauth - - ssl - - sync - - name: test-rapid-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - replica_set - - noauth - - ssl - - async - - name: test-rapid-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - rapid - - replica_set - - noauth - - ssl - - sync_async - - name: test-rapid-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - replica_set - - noauth - - nossl - - sync - - name: test-rapid-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - replica_set - - noauth - - nossl - - async - - name: test-rapid-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - rapid - - replica_set - - noauth - - nossl - - sync_async - - name: test-latest-replica_set-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - replica_set - - auth - - ssl - - sync - - name: test-latest-replica_set-auth-ssl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - replica_set - - auth - - ssl - - async - - name: test-latest-replica_set-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - latest - - replica_set - - auth - - ssl - - sync_async - - name: test-latest-replica_set-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - replica_set - - noauth - - ssl - - sync - - name: test-latest-replica_set-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - replica_set - - noauth - - ssl - - async - - name: test-latest-replica_set-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - latest - - replica_set - - noauth - - ssl - - sync_async - - name: test-latest-replica_set-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - replica_set - - noauth - - nossl - - sync - - name: test-latest-replica_set-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - replica_set - - noauth - - nossl - - async - - name: test-latest-replica_set-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: replica_set - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - latest - - replica_set - - noauth - - nossl - - sync_async - - name: test-4.0-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - sharded_cluster - - auth - - ssl - - sync - - name: test-4.0-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - sharded_cluster - - auth - - ssl - - async - - name: test-4.0-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.0" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-4.0-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-4.0-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - sharded_cluster - - noauth - - ssl - - async - - name: test-4.0-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.0" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-4.0-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.0" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-4.0-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.0" - - sharded_cluster - - noauth - - nossl - - async - - name: test-4.0-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.0" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-4.2-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - sharded_cluster - - auth - - ssl - - sync - - name: test-4.2-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - sharded_cluster - - auth - - ssl - - async - - name: test-4.2-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.2" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-4.2-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-4.2-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - sharded_cluster - - noauth - - ssl - - async - - name: test-4.2-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.2" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-4.2-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.2" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-4.2-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.2" - - sharded_cluster - - noauth - - nossl - - async - - name: test-4.2-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.2" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.2" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-4.4-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - sharded_cluster - - auth - - ssl - - sync - - name: test-4.4-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - sharded_cluster - - auth - - ssl - - async - - name: test-4.4-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "4.4" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-4.4-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-4.4-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - sharded_cluster - - noauth - - ssl - - async - - name: test-4.4-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "4.4" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-4.4-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "4.4" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-4.4-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "4.4" - - sharded_cluster - - noauth - - nossl - - async - - name: test-4.4-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "4.4" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "4.4" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-5.0-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - sharded_cluster - - auth - - ssl - - sync - - name: test-5.0-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - sharded_cluster - - auth - - ssl - - async - - name: test-5.0-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "5.0" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-5.0-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-5.0-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - sharded_cluster - - noauth - - ssl - - async - - name: test-5.0-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "5.0" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-5.0-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "5.0" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-5.0-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "5.0" - - sharded_cluster - - noauth - - nossl - - async - - name: test-5.0-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "5.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "5.0" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-6.0-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - sharded_cluster - - auth - - ssl - - sync - - name: test-6.0-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - sharded_cluster - - auth - - ssl - - async - - name: test-6.0-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "6.0" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-6.0-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-6.0-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - sharded_cluster - - noauth - - ssl - - async - - name: test-6.0-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "6.0" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-6.0-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "6.0" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-6.0-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "6.0" - - sharded_cluster - - noauth - - nossl - - async - - name: test-6.0-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "6.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "6.0" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-7.0-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - sharded_cluster - - auth - - ssl - - sync - - name: test-7.0-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - sharded_cluster - - auth - - ssl - - async - - name: test-7.0-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "7.0" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-7.0-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-7.0-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - sharded_cluster - - noauth - - ssl - - async - - name: test-7.0-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "7.0" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-7.0-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "7.0" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-7.0-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "7.0" - - sharded_cluster - - noauth - - nossl - - async - - name: test-7.0-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "7.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "7.0" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-8.0-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - sharded_cluster - - auth - - ssl - - sync - - name: test-8.0-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - sharded_cluster - - auth - - ssl - - async - - name: test-8.0-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - "8.0" - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-8.0-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - sharded_cluster - - noauth - - ssl - - sync - - name: test-8.0-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - sharded_cluster - - noauth - - ssl - - async - - name: test-8.0-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - "8.0" - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-8.0-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - "8.0" - - sharded_cluster - - noauth - - nossl - - sync - - name: test-8.0-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - "8.0" - - sharded_cluster - - noauth - - nossl - - async - - name: test-8.0-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - "8.0" - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-rapid-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - sharded_cluster - - auth - - ssl - - sync - - name: test-rapid-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - sharded_cluster - - auth - - ssl - - async - - name: test-rapid-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - rapid - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-rapid-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - sharded_cluster - - noauth - - ssl - - sync - - name: test-rapid-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - sharded_cluster - - noauth - - ssl - - async - - name: test-rapid-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - rapid - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-rapid-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - rapid - - sharded_cluster - - noauth - - nossl - - sync - - name: test-rapid-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - rapid - - sharded_cluster - - noauth - - nossl - - async - - name: test-rapid-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: rapid - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - rapid - - sharded_cluster - - noauth - - nossl - - sync_async - - name: test-latest-sharded_cluster-auth-ssl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - sharded_cluster - - auth - - ssl - - sync - - name: test-latest-sharded_cluster-auth-ssl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - sharded_cluster - - auth - - ssl - - async - - name: test-latest-sharded_cluster-auth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: auth - SSL: ssl - - func: run tests - vars: - AUTH: auth - SSL: ssl - SYNC: sync_async - tags: - - latest - - sharded_cluster - - auth - - ssl - - sync_async - - name: test-latest-sharded_cluster-noauth-ssl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - sharded_cluster - - noauth - - ssl - - sync - - name: test-latest-sharded_cluster-noauth-ssl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - sharded_cluster - - noauth - - ssl - - async - - name: test-latest-sharded_cluster-noauth-ssl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: ssl - - func: run tests - vars: - AUTH: noauth - SSL: ssl - SYNC: sync_async - tags: - - latest - - sharded_cluster - - noauth - - ssl - - sync_async - - name: test-latest-sharded_cluster-noauth-nossl-sync - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync - TEST_NAME: default_sync - tags: - - latest - - sharded_cluster - - noauth - - nossl - - sync - - name: test-latest-sharded_cluster-noauth-nossl-async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: async - TEST_NAME: default_async - tags: - - latest - - sharded_cluster - - noauth - - nossl - - async - - name: test-latest-sharded_cluster-noauth-nossl-sync_async - commands: - - func: run server - vars: - VERSION: latest - TOPOLOGY: sharded_cluster - AUTH: noauth - SSL: nossl - - func: run tests - vars: - AUTH: noauth - SSL: nossl - SYNC: sync_async - tags: - - latest - - sharded_cluster - - noauth - - nossl - - sync_async - # Server version tests - name: test-server-version-python3.9-sync-auth-ssl-standalone-cov commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 3cb14c716d..45266f0dff 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -154,7 +154,7 @@ buildvariants: # Disable test commands tests - name: disable-test-commands-rhel8-python3.9 tasks: - - name: .latest .sync_async + - name: .test-standard .server-latest display_name: Disable test commands RHEL8 Python3.9 run_on: - rhel87-small @@ -175,248 +175,79 @@ buildvariants: TEST_NAME: doctest # Encryption tests - - name: encryption-rhel8-python3.9 + - name: encryption-rhel8 tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 Python3.9 + - name: .test-non-standard + display_name: Encryption RHEL8 run_on: - rhel87-small batchtime: 10080 expansions: TEST_NAME: encryption - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [encryption_tag] - - name: encryption-rhel8-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 Python3.13 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [encryption_tag] - - name: encryption-rhel8-pypy3.10 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 PyPy3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-python3.9 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption crypt_shared RHEL8 Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption crypt_shared RHEL8 Python3.13 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [encryption_tag] - - name: encryption-crypt_shared-rhel8-pypy3.10 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - - name: .replica_set .noauth .ssl .sync_async - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption crypt_shared RHEL8 PyPy3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 - tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-python3.9 - tasks: - - name: .sharded_cluster .auth .ssl .sync - - name: .replica_set .noauth .ssl .sync - - name: .standalone .noauth .nossl .sync - display_name: Encryption PyOpenSSL RHEL8 Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/3.9/bin/python3 - tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-python3.13 - tasks: - - name: .sharded_cluster .auth .ssl .sync - - name: .replica_set .noauth .ssl .sync - - name: .standalone .noauth .nossl .sync - display_name: Encryption PyOpenSSL RHEL8 Python3.13 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/3.13/bin/python3 - tags: [encryption_tag] - - name: encryption-pyopenssl-rhel8-pypy3.10 - tasks: - - name: .sharded_cluster .auth .ssl .sync - - name: .replica_set .noauth .ssl .sync - - name: .standalone .noauth .nossl .sync - display_name: Encryption PyOpenSSL RHEL8 PyPy3.10 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 tags: [encryption_tag] - - name: encryption-rhel8-python3.10 - tasks: - - name: .sharded_cluster .auth .ssl .sync_async - display_name: Encryption RHEL8 Python3.10 - run_on: - - rhel87-small - expansions: - TEST_NAME: encryption - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: encryption-crypt_shared-rhel8-python3.11 + - name: encryption-macos tasks: - - name: .replica_set .noauth .ssl .sync_async - display_name: Encryption crypt_shared RHEL8 Python3.11 - run_on: - - rhel87-small - expansions: - TEST_NAME: encryption - TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: encryption-rhel8-python3.12 - tasks: - - name: .standalone .noauth .nossl .sync_async - display_name: Encryption RHEL8 Python3.12 - run_on: - - rhel87-small - expansions: - TEST_NAME: encryption - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: encryption-macos-python3.9 - tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption macOS Python3.9 + - name: .test-non-standard !.pypy + display_name: Encryption macOS run_on: - macos-14 batchtime: 10080 expansions: TEST_NAME: encryption - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-macos-python3.13 + - name: encryption-win64 tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption macOS Python3.13 + - name: .test-non-standard !.pypy + display_name: Encryption Win64 run_on: - - macos-14 + - windows-64-vsMulti-small batchtime: 10080 expansions: TEST_NAME: encryption - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-macos-python3.9 + - name: encryption-crypt_shared-rhel8 tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared macOS Python3.9 + - name: .test-non-standard + display_name: Encryption crypt_shared RHEL8 run_on: - - macos-14 + - rhel87-small batchtime: 10080 expansions: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 tags: [encryption_tag] - - name: encryption-crypt_shared-macos-python3.13 + - name: encryption-crypt_shared-macos tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared macOS Python3.13 + - name: .test-non-standard !.pypy + display_name: Encryption crypt_shared macOS run_on: - macos-14 batchtime: 10080 expansions: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.13/bin/python3 - tags: [encryption_tag] - - name: encryption-win64-python3.9 - tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption Win64 Python3.9 - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - PYTHON_BINARY: C:/python/Python39/python.exe - tags: [encryption_tag] - - name: encryption-win64-python3.13 - tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption Win64 Python3.13 - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - TEST_NAME: encryption - PYTHON_BINARY: C:/python/Python313/python.exe tags: [encryption_tag] - - name: encryption-crypt_shared-win64-python3.9 + - name: encryption-crypt_shared-win64 tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared Win64 Python3.9 + - name: .test-non-standard !.pypy + display_name: Encryption crypt_shared Win64 run_on: - windows-64-vsMulti-small batchtime: 10080 expansions: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: C:/python/Python39/python.exe tags: [encryption_tag] - - name: encryption-crypt_shared-win64-python3.13 + - name: encryption-pyopenssl-rhel8 tasks: - - name: .latest .replica_set .sync_async - display_name: Encryption crypt_shared Win64 Python3.13 + - name: .test-non-standard + display_name: Encryption PyOpenSSL RHEL8 run_on: - - windows-64-vsMulti-small + - rhel87-small batchtime: 10080 expansions: TEST_NAME: encryption - TEST_CRYPT_SHARED: "true" - PYTHON_BINARY: C:/python/Python313/python.exe + SUB_TEST_NAME: pyopenssl tags: [encryption_tag] # Enterprise auth tests @@ -617,78 +448,36 @@ buildvariants: batchtime: 10080 # Pyopenssl tests - - name: pyopenssl-macos-python3.9 + - name: pyopenssl-rhel8 tasks: - - name: .replica_set .noauth .nossl .sync - - name: .7.0 .noauth .nossl .sync - display_name: PyOpenSSL macOS Python3.9 - run_on: - - macos-14 - batchtime: 10080 - expansions: - TEST_NAME: default - SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /Library/Frameworks/Python.Framework/Versions/3.9/bin/python3 - - name: pyopenssl-rhel8-python3.10 - tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL RHEL8 Python3.10 + - name: .test-standard .sync + - name: .test-standard .async .replica_set-noauth-ssl + display_name: PyOpenSSL RHEL8 run_on: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: default SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/3.10/bin/python3 - - name: pyopenssl-rhel8-python3.11 + - name: pyopenssl-macos tasks: - - name: .replica_set .auth .ssl .sync - - name: .7.0 .auth .ssl .sync - display_name: PyOpenSSL RHEL8 Python3.11 + - name: .test-standard !.pypy .sync + - name: .test-standard !.pypy .async .replica_set-noauth-ssl + display_name: PyOpenSSL macOS run_on: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: default - SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/3.11/bin/python3 - - name: pyopenssl-rhel8-python3.12 - tasks: - - name: .replica_set .auth .ssl .sync - - name: .7.0 .auth .ssl .sync - display_name: PyOpenSSL RHEL8 Python3.12 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - TEST_NAME: default - SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/3.12/bin/python3 - - name: pyopenssl-win64-python3.13 - tasks: - - name: .replica_set .auth .ssl .sync_async - - name: .7.0 .auth .ssl .sync_async - display_name: PyOpenSSL Win64 Python3.13 - run_on: - - windows-64-vsMulti-small - batchtime: 10080 - expansions: - TEST_NAME: default SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: C:/python/Python313/python.exe - - name: pyopenssl-rhel8-pypy3.10 + - name: pyopenssl-win64 tasks: - - name: .replica_set .auth .ssl .sync - - name: .7.0 .auth .ssl .sync - display_name: PyOpenSSL RHEL8 PyPy3.10 + - name: .test-standard !.pypy .sync + - name: .test-standard !.pypy .async .replica_set-noauth-ssl + display_name: PyOpenSSL Win64 run_on: - rhel87-small batchtime: 10080 expansions: - TEST_NAME: default SUB_TEST_NAME: pyopenssl - PYTHON_BINARY: /opt/python/pypy3.10/bin/python3 # Search index tests - name: search-index-helpers-rhel8-python3.9 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index a0e39e2579..b92e957143 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -7,7 +7,6 @@ from generate_config_utils import ( ALL_PYTHONS, ALL_VERSIONS, - AUTH_SSLS, BATCHTIME_WEEK, C_EXTS, CPYTHONS, @@ -16,7 +15,6 @@ MIN_MAX_PYTHON, OTHER_HOSTS, PYPYS, - SUB_TASKS, SYNCS, TOPOLOGIES, create_variant, @@ -136,32 +134,18 @@ def get_encryption_expansions(encryption): expansions["SUB_TEST_NAME"] = "pyopenssl" return expansions - host = DEFAULT_HOST - - # Test against all server versions for the three main python versions. - encryptions = ["Encryption", "Encryption crypt_shared"] - for encryption, python in product(encryptions, [*MIN_MAX_PYTHON, PYPYS[-1]]): - expansions = get_encryption_expansions(encryption) - display_name = get_variant_name(encryption, host, python=python, **expansions) - variant = create_variant( - [f"{t} .sync_async" for t in SUB_TASKS], - display_name, - python=python, - host=host, - expansions=expansions, - batchtime=batchtime, - tags=tags, - ) - variants.append(variant) - - # Test PyOpenSSL against on all server versions for all python versions. - for encryption, python in product(["Encryption PyOpenSSL"], [*MIN_MAX_PYTHON, PYPYS[-1]]): + # Test encryption on all hosts. + for encryption, host in product( + ["Encryption", "Encryption crypt_shared"], ["rhel8", "macos", "win64"] + ): expansions = get_encryption_expansions(encryption) - display_name = get_variant_name(encryption, host, python=python, **expansions) + display_name = get_variant_name(encryption, host, **expansions) + tasks = [".test-non-standard"] + if host != "rhel8": + tasks = [".test-non-standard !.pypy"] variant = create_variant( - [f"{t} .sync" for t in SUB_TASKS], + tasks, display_name, - python=python, host=host, expansions=expansions, batchtime=batchtime, @@ -169,36 +153,20 @@ def get_encryption_expansions(encryption): ) variants.append(variant) - # Test the rest of the pythons on linux for all server versions. - for encryption, python, task in zip_cycle(encryptions, CPYTHONS[1:-1] + PYPYS[:-1], SUB_TASKS): - expansions = get_encryption_expansions(encryption) - display_name = get_variant_name(encryption, host, python=python, **expansions) - variant = create_variant( - [f"{task} .sync_async"], - display_name, - python=python, - host=host, - expansions=expansions, - ) - variants.append(variant) - - # Test on macos and linux on one server version and topology for min and max python. - encryptions = ["Encryption", "Encryption crypt_shared"] - task_names = [".latest .replica_set .sync_async"] - for host_name, encryption, python in product(["macos", "win64"], encryptions, MIN_MAX_PYTHON): - host = HOSTS[host_name] - expansions = get_encryption_expansions(encryption) - display_name = get_variant_name(encryption, host, python=python, **expansions) - variant = create_variant( - task_names, - display_name, - python=python, - host=host, - expansions=expansions, - batchtime=batchtime, - tags=tags, - ) - variants.append(variant) + # Test PyOpenSSL on linux. + host = DEFAULT_HOST + encryption = "Encryption PyOpenSSL" + expansions = get_encryption_expansions(encryption) + display_name = get_variant_name(encryption, host, **expansions) + variant = create_variant( + [".test-non-standard"], + display_name, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) return variants @@ -258,41 +226,22 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" batchtime = BATCHTIME_WEEK - expansions = dict(TEST_NAME="default", SUB_TEST_NAME="pyopenssl") + expansions = dict(SUB_TEST_NAME="pyopenssl") variants = [] - for python in ALL_PYTHONS: - # Only test "noauth" with min python. - auth = "noauth" if python == CPYTHONS[0] else "auth" - ssl = "nossl" if auth == "noauth" else "ssl" - if python == CPYTHONS[0]: - host = HOSTS["macos"] - elif python == CPYTHONS[-1]: - host = HOSTS["win64"] - else: - host = DEFAULT_HOST - - display_name = get_variant_name(base_name, host, python=python) - # only need to run some on async - if python in (CPYTHONS[1], CPYTHONS[-1]): - variant = create_variant( - [f".replica_set .{auth} .{ssl} .sync_async", f".7.0 .{auth} .{ssl} .sync_async"], - display_name, - python=python, - host=host, - expansions=expansions, - batchtime=batchtime, - ) - else: - variant = create_variant( - [f".replica_set .{auth} .{ssl} .sync", f".7.0 .{auth} .{ssl} .sync"], + for host in ["rhel8", "macos", "win64"]: + display_name = get_variant_name(base_name, host) + base_task = ".test-standard" if host == "rhel8" else ".test-standard !.pypy" + # We only need to run a subset on async. + tasks = [f"{base_task} .sync", f"{base_task} .async .replica_set-noauth-ssl"] + variants.append( + create_variant( + tasks, display_name, - python=python, - host=host, expansions=expansions, batchtime=batchtime, ) - variants.append(variant) + ) return variants @@ -398,7 +347,7 @@ def create_disable_test_commands_variants(): expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") python = CPYTHONS[0] display_name = get_variant_name("Disable test commands", host, python=python) - tasks = [".latest .sync_async"] + tasks = [".test-standard .server-latest"] return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] @@ -706,30 +655,6 @@ def create_standard_tasks(): return tasks -def create_server_tasks(): - tasks = [] - for topo, version, (auth, ssl), sync in product( - TOPOLOGIES, ALL_VERSIONS, AUTH_SSLS, [*SYNCS, "sync_async"] - ): - name = f"test-{version}-{topo}-{auth}-{ssl}-{sync}".lower() - tags = [version, topo, auth, ssl, sync] - server_vars = dict( - VERSION=version, - TOPOLOGY=topo if topo != "standalone" else "server", - AUTH=auth, - SSL=ssl, - ) - server_func = FunctionCall(func="run server", vars=server_vars) - test_vars = dict(AUTH=auth, SSL=ssl, SYNC=sync) - if sync == "sync": - test_vars["TEST_NAME"] = "default_sync" - elif sync == "async": - test_vars["TEST_NAME"] = "default_async" - test_func = FunctionCall(func="run tests", vars=test_vars) - tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) - return tasks - - def create_no_orchestration_tasks(): tasks = [] for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index facb832633..ad092983fa 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -30,12 +30,6 @@ AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] C_EXTS = ["without_ext", "with_ext"] -# By default test each of the topologies with a subset of auth/ssl. -SUB_TASKS = [ - ".sharded_cluster .auth .ssl", - ".replica_set .noauth .ssl", - ".standalone .noauth .nossl", -] SYNCS = ["sync", "async"] DISPLAY_LOOKUP = dict( ssl=dict(ssl="SSL", nossl="NoSSL"), @@ -95,13 +89,15 @@ def create_variant_generic( tasks: list[str | EvgTaskRef], display_name: str, *, - host: Host | None = None, + host: Host | str | None = None, default_run_on="rhel87-small", expansions: dict | None = None, **kwargs: Any, ) -> BuildVariant: """Create a build variant for the given inputs.""" task_refs = [] + if isinstance(host, str): + host = HOSTS[host] for t in tasks: if isinstance(t, EvgTaskRef): task_refs.append(t) @@ -134,7 +130,7 @@ def create_variant( display_name: str, *, version: str | None = None, - host: Host | None = None, + host: Host | str | None = None, python: str | None = None, expansions: dict | None = None, **kwargs: Any, @@ -214,9 +210,11 @@ def get_common_name(base: str, sep: str, **kwargs) -> str: return display_name -def get_variant_name(base: str, host: Host | None = None, **kwargs) -> str: +def get_variant_name(base: str, host: str | Host | None = None, **kwargs) -> str: """Get the display name of a variant.""" display_name = base + if isinstance(host, str): + host = HOSTS[host] if host is not None: display_name += f" {host.display_name}" return get_common_name(display_name, " ", **kwargs) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 369e688b1e..e0b6260e21 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -231,6 +231,17 @@ the pages will re-render and the browser will automatically refresh. - Run `just setup-tests encryption`. - Run the tests with `just run-tests`. +To test with `encryption` and `PyOpenSSL`, use `just setup-tests encryption pyopenssl`. + +### PyOpenSSL tests + +- Run `just run-server` to start the server. +- Run `just setup-tests default_sync pyopenssl`. +- Run the tests with `just run-tests`. + +Note: `PyOpenSSL` is not used in async tests, but you can use `just setup-tests default_async pyopenssl` +to verify that PyMongo falls back to the standard library `OpenSSL`. + ### Load balancer tests - Install `haproxy` (available as `brew install haproxy` on macOS). From 2b4ab2a9ad82526e18e78d4e39667e7471f943f9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 2 May 2025 11:54:03 -0500 Subject: [PATCH 1909/2111] PYTHON-5365 Fix handing of remote tests (#2327) --- .evergreen/scripts/install-dependencies.sh | 14 ++++++-------- .evergreen/scripts/utils.py | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index ec389690ca..5425d10c8c 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -10,6 +10,12 @@ if [ -f $HERE/env.sh ]; then . $HERE/env.sh fi +# Set up the default bin directory. +if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + PYMONGO_BIN_DIR="$HOME/.local/bin" + export PATH="$PYMONGO_BIN_DIR:$PATH" +fi + # Helper function to pip install a dependency using a temporary python env. function _pip_install() { _HERE=$(dirname ${BASH_SOURCE:-$0}) @@ -41,10 +47,6 @@ if ! command -v just &>/dev/null; then if [ "Windows_NT" = "${OS:-}" ]; then _TARGET="--target x86_64-pc-windows-msvc" fi - if [ -z "${PYMONGO_BIN_DIR:-}" ]; then - echo "Please install just!" - exit 1 - fi _BIN_DIR=$PYMONGO_BIN_DIR echo "Installing just..." mkdir -p "$_BIN_DIR" 2>/dev/null || true @@ -56,10 +58,6 @@ fi # Ensure uv is installed. if ! command -v uv &>/dev/null; then - if [ -z "${PYMONGO_BIN_DIR:-}" ]; then - echo "Please install uv!" - exit 1 - fi _BIN_DIR=$PYMONGO_BIN_DIR echo "Installing uv..." # On most systems we can install directly. diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index c9195b638a..7a8f9640f8 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -193,6 +193,6 @@ def run_command(cmd: str | list[str], **kwargs: Any) -> None: def create_archive() -> str: run_command("git add .", cwd=ROOT) - run_command('git commit -m "add files"', check=False, cwd=ROOT) + run_command('git commit --no-verify -m "add files"', check=False, cwd=ROOT) run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) return TMP_DRIVER_FILE From cf9b68c6f468a8c58d23c2df7bf49c6684d85b7a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 2 May 2025 11:54:54 -0500 Subject: [PATCH 1910/2111] Convert Enterprise Auth Variants to use common tasks (#2330) --- .evergreen/generated_configs/tasks.yml | 80 ++++++++++------------- .evergreen/generated_configs/variants.yml | 27 +++++--- .evergreen/scripts/generate_config.py | 33 +++------- 3 files changed, 62 insertions(+), 78 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 32e139c2df..5083b456c5 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -136,50 +136,6 @@ tasks: depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] tags: [coverage] - # Enterprise auth tests - - name: test-enterprise-auth-python3.9 - commands: - - func: run server - vars: - TEST_NAME: enterprise_auth - AUTH: auth - PYTHON_VERSION: "3.9" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: enterprise_auth - AUTH: auth - PYTHON_VERSION: "3.9" - tags: [enterprise_auth] - - name: test-enterprise-auth-python3.13 - commands: - - func: run server - vars: - TEST_NAME: enterprise_auth - AUTH: auth - PYTHON_VERSION: "3.13" - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: enterprise_auth - AUTH: auth - PYTHON_VERSION: "3.13" - tags: [enterprise_auth] - - name: test-enterprise-auth-pypy3.10 - commands: - - func: run server - vars: - TEST_NAME: enterprise_auth - AUTH: auth - PYTHON_VERSION: pypy3.10 - - func: assume ec2 role - - func: run tests - vars: - TEST_NAME: enterprise_auth - AUTH: auth - PYTHON_VERSION: pypy3.10 - tags: [enterprise_auth, pypy] - # Free threading tests - name: test-free-threading commands: @@ -3928,6 +3884,7 @@ tasks: - server-4.0 - python-3.9 - standalone-noauth-nossl + - noauth - name: test-non-standard-v4.0-python3.10-noauth-ssl-replica-set commands: - func: run server @@ -3948,6 +3905,7 @@ tasks: - server-4.0 - python-3.10 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v4.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server @@ -3968,6 +3926,7 @@ tasks: - server-4.0 - python-3.11 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v4.2-python3.12-noauth-nossl-standalone commands: - func: run server @@ -3988,6 +3947,7 @@ tasks: - server-4.2 - python-3.12 - standalone-noauth-nossl + - noauth - name: test-non-standard-v4.2-python3.13-noauth-ssl-replica-set commands: - func: run server @@ -4008,6 +3968,7 @@ tasks: - server-4.2 - python-3.13 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v4.2-python3.9-auth-ssl-sharded-cluster commands: - func: run server @@ -4028,6 +3989,7 @@ tasks: - server-4.2 - python-3.9 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v4.4-python3.10-noauth-nossl-standalone commands: - func: run server @@ -4048,6 +4010,7 @@ tasks: - server-4.4 - python-3.10 - standalone-noauth-nossl + - noauth - name: test-non-standard-v4.4-python3.11-noauth-ssl-replica-set commands: - func: run server @@ -4068,6 +4031,7 @@ tasks: - server-4.4 - python-3.11 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v4.4-python3.12-auth-ssl-sharded-cluster commands: - func: run server @@ -4088,6 +4052,7 @@ tasks: - server-4.4 - python-3.12 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v5.0-python3.13-noauth-nossl-standalone commands: - func: run server @@ -4108,6 +4073,7 @@ tasks: - server-5.0 - python-3.13 - standalone-noauth-nossl + - noauth - name: test-non-standard-v5.0-python3.9-noauth-ssl-replica-set commands: - func: run server @@ -4128,6 +4094,7 @@ tasks: - server-5.0 - python-3.9 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v5.0-python3.10-auth-ssl-sharded-cluster commands: - func: run server @@ -4148,6 +4115,7 @@ tasks: - server-5.0 - python-3.10 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v6.0-python3.11-noauth-nossl-standalone commands: - func: run server @@ -4168,6 +4136,7 @@ tasks: - server-6.0 - python-3.11 - standalone-noauth-nossl + - noauth - name: test-non-standard-v6.0-python3.12-noauth-ssl-replica-set commands: - func: run server @@ -4188,6 +4157,7 @@ tasks: - server-6.0 - python-3.12 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v6.0-python3.13-auth-ssl-sharded-cluster commands: - func: run server @@ -4208,6 +4178,7 @@ tasks: - server-6.0 - python-3.13 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone commands: - func: run server @@ -4228,6 +4199,7 @@ tasks: - server-7.0 - python-3.9 - standalone-noauth-nossl + - noauth - name: test-non-standard-v7.0-python3.10-noauth-ssl-replica-set commands: - func: run server @@ -4248,6 +4220,7 @@ tasks: - server-7.0 - python-3.10 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v7.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server @@ -4268,6 +4241,7 @@ tasks: - server-7.0 - python-3.11 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone commands: - func: run server @@ -4288,6 +4262,7 @@ tasks: - server-8.0 - python-3.12 - standalone-noauth-nossl + - noauth - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set commands: - func: run server @@ -4308,6 +4283,7 @@ tasks: - server-8.0 - python-3.13 - replica_set-noauth-ssl + - noauth - name: test-non-standard-v8.0-python3.9-auth-ssl-sharded-cluster commands: - func: run server @@ -4328,6 +4304,7 @@ tasks: - server-8.0 - python-3.9 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone commands: - func: run server @@ -4348,6 +4325,7 @@ tasks: - server-rapid - python-3.10 - standalone-noauth-nossl + - noauth - name: test-non-standard-rapid-python3.11-noauth-ssl-replica-set commands: - func: run server @@ -4368,6 +4346,7 @@ tasks: - server-rapid - python-3.11 - replica_set-noauth-ssl + - noauth - name: test-non-standard-rapid-python3.12-auth-ssl-sharded-cluster commands: - func: run server @@ -4388,6 +4367,7 @@ tasks: - server-rapid - python-3.12 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-latest-python3.13-noauth-nossl-standalone commands: - func: run server @@ -4408,6 +4388,7 @@ tasks: - server-latest - python-3.13 - standalone-noauth-nossl + - noauth - name: test-non-standard-latest-python3.9-noauth-ssl-replica-set commands: - func: run server @@ -4428,6 +4409,7 @@ tasks: - server-latest - python-3.9 - replica_set-noauth-ssl + - noauth - name: test-non-standard-latest-python3.10-auth-ssl-sharded-cluster commands: - func: run server @@ -4448,6 +4430,7 @@ tasks: - server-latest - python-3.10 - sharded_cluster-auth-ssl + - auth - name: test-non-standard-v4.0-pypy3.10-noauth-nossl-standalone commands: - func: run server @@ -4468,6 +4451,7 @@ tasks: - server-4.0 - python-pypy3.10 - standalone-noauth-nossl + - noauth - pypy - name: test-non-standard-v4.2-pypy3.10-noauth-ssl-replica-set commands: @@ -4489,6 +4473,7 @@ tasks: - server-4.2 - python-pypy3.10 - replica_set-noauth-ssl + - noauth - pypy - name: test-non-standard-v4.4-pypy3.10-auth-ssl-sharded-cluster commands: @@ -4510,6 +4495,7 @@ tasks: - server-4.4 - python-pypy3.10 - sharded_cluster-auth-ssl + - auth - pypy - name: test-non-standard-v5.0-pypy3.10-noauth-nossl-standalone commands: @@ -4531,6 +4517,7 @@ tasks: - server-5.0 - python-pypy3.10 - standalone-noauth-nossl + - noauth - pypy - name: test-non-standard-v6.0-pypy3.10-noauth-ssl-replica-set commands: @@ -4552,6 +4539,7 @@ tasks: - server-6.0 - python-pypy3.10 - replica_set-noauth-ssl + - noauth - pypy - name: test-non-standard-v7.0-pypy3.10-auth-ssl-sharded-cluster commands: @@ -4573,6 +4561,7 @@ tasks: - server-7.0 - python-pypy3.10 - sharded_cluster-auth-ssl + - auth - pypy - name: test-non-standard-v8.0-pypy3.10-noauth-nossl-standalone commands: @@ -4594,6 +4583,7 @@ tasks: - server-8.0 - python-pypy3.10 - standalone-noauth-nossl + - noauth - pypy - name: test-non-standard-rapid-pypy3.10-noauth-ssl-replica-set commands: @@ -4615,6 +4605,7 @@ tasks: - server-rapid - python-pypy3.10 - replica_set-noauth-ssl + - noauth - pypy - name: test-non-standard-latest-pypy3.10-auth-ssl-sharded-cluster commands: @@ -4636,4 +4627,5 @@ tasks: - server-latest - python-pypy3.10 - sharded_cluster-auth-ssl + - auth - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 45266f0dff..4ec9f5a3a8 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -251,24 +251,33 @@ buildvariants: tags: [encryption_tag] # Enterprise auth tests + - name: auth-enterprise-rhel8 + tasks: + - name: .test-non-standard .auth + display_name: Auth Enterprise RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: enterprise_auth + AUTH: auth - name: auth-enterprise-macos tasks: - - name: .enterprise_auth !.pypy + - name: .test-non-standard !.pypy .auth display_name: Auth Enterprise macOS run_on: - macos-14 + expansions: + TEST_NAME: enterprise_auth + AUTH: auth - name: auth-enterprise-win64 tasks: - - name: .enterprise_auth !.pypy + - name: .test-non-standard !.pypy .auth display_name: Auth Enterprise Win64 run_on: - windows-64-vsMulti-small - - name: auth-enterprise-rhel8 - tasks: - - name: .enterprise_auth - display_name: Auth Enterprise RHEL8 - run_on: - - rhel87-small + expansions: + TEST_NAME: enterprise_auth + AUTH: auth # Free threaded tests - name: free-threaded-rhel8-python3.13t @@ -368,7 +377,7 @@ buildvariants: - name: mod_wsgi-ubuntu-22 tasks: - name: .mod_wsgi - display_name: mod_wsgi Ubuntu-22 + display_name: Mod_WSGI Ubuntu-22 run_on: - ubuntu2204-small expansions: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b92e957143..e13976d8c7 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -211,15 +211,14 @@ def create_compression_variants(): def create_enterprise_auth_variants(): variants = [] - for host in [HOSTS["macos"], HOSTS["win64"], DEFAULT_HOST]: + for host in ["rhel8", "macos", "win64"]: + expansions = dict(TEST_NAME="enterprise_auth", AUTH="auth") display_name = get_variant_name("Auth Enterprise", host) - if host == DEFAULT_HOST: - tags = [".enterprise_auth"] - else: - tags = [".enterprise_auth !.pypy"] - variant = create_variant(tags, display_name, host=host) + tasks = [".test-non-standard .auth"] + if host != "rhel8": + tasks = [".test-non-standard !.pypy .auth"] + variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) - return variants @@ -338,7 +337,7 @@ def create_mod_wsgi_variants(): host = HOSTS["ubuntu22"] tasks = [".mod_wsgi"] expansions = dict(MOD_WSGI_VERSION="4") - display_name = get_variant_name("mod_wsgi", host) + display_name = get_variant_name("Mod_WSGI", host) return [create_variant(tasks, display_name, host=host, expansions=expansions)] @@ -605,6 +604,7 @@ def create_test_non_standard_tasks(): f"server-{version}", f"python-{python}", f"{topology}-{auth}-{ssl}", + auth, ] if python in PYPYS: tags.append("pypy") @@ -815,23 +815,6 @@ def create_search_index_tasks(): return [EvgTask(name=task_name, tags=tags, commands=commands)] -def create_enterprise_auth_tasks(): - tasks = [] - for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: - vars = dict(TEST_NAME="enterprise_auth", AUTH="auth", PYTHON_VERSION=python) - server_func = FunctionCall(func="run server", vars=vars) - assume_func = FunctionCall(func="assume ec2 role") - test_func = FunctionCall(func="run tests", vars=vars) - task_name = get_task_name("test-enterprise-auth", python=python) - tags = ["enterprise_auth"] - if python in PYPYS: - tags += ["pypy"] - tasks.append( - EvgTask(name=task_name, tags=tags, commands=[server_func, assume_func, test_func]) - ) - return tasks - - def create_perf_tasks(): tasks = [] for version, ssl, sync in product(["8.0"], ["ssl", "nossl"], ["sync", "async"]): From b0667b11d3bad7eb4cc7369a310c67bbec523a0c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 6 May 2025 09:00:30 -0400 Subject: [PATCH 1911/2111] PYTHON-5358 - Switch to supported Perf usage in EVG (#2334) --- .evergreen/generated_configs/functions.yml | 40 +++++++++++++++++++-- .evergreen/scripts/generate_config.py | 25 +++++++++++-- .evergreen/scripts/perf-submission-setup.sh | 15 ++++++++ .evergreen/scripts/perf-submission.sh | 25 +++++++++++++ test/performance/async_perf_test.py | 10 +++++- test/performance/perf_test.py | 10 +++++- 6 files changed, 118 insertions(+), 7 deletions(-) create mode 100755 .evergreen/scripts/perf-submission-setup.sh create mode 100755 .evergreen/scripts/perf-submission.sh diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index a28cd2596e..7d9ab2df3b 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -162,9 +162,45 @@ functions: # Send dashboard data send dashboard data: - - command: perf.send + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/perf-submission-setup.sh + working_dir: src + include_expansions_in_env: + - requester + - revision_order_id + - project_id + - version_id + - build_variant + - parsed_order_id + - task_name + - task_id + - execution + - is_mainline + type: test + - command: expansions.update params: - file: src/results.json + file: src/expansion.yml + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/perf-submission.sh + working_dir: src + include_expansions_in_env: + - requester + - revision_order_id + - project_id + - version_id + - build_variant + - parsed_order_id + - task_name + - task_id + - execution + - is_mainline + type: test # Setup system setup system: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e13976d8c7..c54908f5d7 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -41,7 +41,6 @@ ec2_assume_role, expansions_update, git_get_project, - perf_send, ) from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef @@ -1103,8 +1102,28 @@ def create_attach_benchmark_test_results_func(): def create_send_dashboard_data_func(): - cmd = perf_send(file="src/results.json") - return "send dashboard data", [cmd] + includes = [ + "requester", + "revision_order_id", + "project_id", + "version_id", + "build_variant", + "parsed_order_id", + "task_name", + "task_id", + "execution", + "is_mainline", + ] + cmds = [ + get_subprocess_exec( + include_expansions_in_env=includes, args=[".evergreen/scripts/perf-submission-setup.sh"] + ), + expansions_update(file="src/expansion.yml"), + get_subprocess_exec( + include_expansions_in_env=includes, args=[".evergreen/scripts/perf-submission.sh"] + ), + ] + return "send dashboard data", cmds mod = sys.modules[__name__] diff --git a/.evergreen/scripts/perf-submission-setup.sh b/.evergreen/scripts/perf-submission-setup.sh new file mode 100755 index 0000000000..ecb38751a5 --- /dev/null +++ b/.evergreen/scripts/perf-submission-setup.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# We use the requester expansion to determine whether the data is from a mainline evergreen run or not + +set -eu + +# shellcheck disable=SC2154 +if [ "${requester}" == "commit" ]; then + echo "is_mainline: true" >> expansion.yml +else + echo "is_mainline: false" >> expansion.yml +fi + +# We parse the username out of the order_id as patches append that in and SPS does not need that information +# shellcheck disable=SC2154 +echo "parsed_order_id: $(echo "${revision_order_id}" | awk -F'_' '{print $NF}')" >> expansion.yml diff --git a/.evergreen/scripts/perf-submission.sh b/.evergreen/scripts/perf-submission.sh new file mode 100755 index 0000000000..f7c3ea6664 --- /dev/null +++ b/.evergreen/scripts/perf-submission.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# We use the requester expansion to determine whether the data is from a mainline evergreen run or not + +set -eu + +# Submit the performance data to the SPS endpoint +# shellcheck disable=SC2154 +response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \ + "https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=${parsed_order_id}&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=${is_mainline}" \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d @results.json) + +http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}') +response_body=$(echo "$response" | sed '/HTTP_STATUS/d') + +# We want to throw an error if the data was not successfully submitted +if [ "$http_status" -ne 200 ]; then + echo "Error: Received HTTP status $http_status" + echo "Response Body: $response_body" + exit 1 +fi + +echo "Response Body: $response_body" +echo "HTTP Status: $http_status" diff --git a/test/performance/async_perf_test.py b/test/performance/async_perf_test.py index 969437f9c9..6eb31ea4fe 100644 --- a/test/performance/async_perf_test.py +++ b/test/performance/async_perf_test.py @@ -144,7 +144,15 @@ async def asyncTearDown(self): }, }, "metrics": [ - {"name": "megabytes_per_sec", "type": "MEDIAN", "value": megabytes_per_sec}, + { + "name": "megabytes_per_sec", + "type": "MEDIAN", + "value": megabytes_per_sec, + "metadata": { + "improvement_direction": "up", + "measurement_unit": "megabytes_per_second", + }, + }, ], } ) diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 39487eff6d..5688d28d2d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -151,7 +151,15 @@ def tearDown(self): }, }, "metrics": [ - {"name": "megabytes_per_sec", "type": "MEDIAN", "value": megabytes_per_sec}, + { + "name": "megabytes_per_sec", + "type": "MEDIAN", + "value": megabytes_per_sec, + "metadata": { + "improvement_direction": "up", + "measurement_unit": "megabytes_per_second", + }, + }, ], } ) From 12b4fe36446f15cad610a1faafde7d3463c7da1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 08:32:53 -0500 Subject: [PATCH 1912/2111] Bump the actions group with 2 updates (#2333) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/test-python.yml | 12 ++++++------ .github/workflows/zizmor.yml | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index f7cfea144e..3becfd72b5 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 05b87f4f81..f6056f6ca2 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -25,7 +25,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -88,7 +88,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 with: enable-cache: true python-version: "3.9" @@ -111,7 +111,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 with: enable-cache: true python-version: "3.9" @@ -130,7 +130,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 with: enable-cache: true python-version: "3.9" @@ -152,7 +152,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 with: enable-cache: true python-version: "${{matrix.python}}" diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 16f2ba2cbb..b0d4e7cf2e 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -26,7 +26,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v3 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 with: sarif_file: results.sarif category: zizmor From d0b0dc351238c44912d291898bff209bfba4229d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 6 May 2025 13:40:12 -0500 Subject: [PATCH 1913/2111] PYTHON-5339 Clean up GitHub PR definitions in Evergreen Project (#2331) --- .evergreen/generated_configs/tasks.yml | 24 +++++++++++++----- .evergreen/scripts/generate_config.py | 35 +++++++++++++++++++------- 2 files changed, 44 insertions(+), 15 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 5083b456c5..eb5b49e6f2 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -134,7 +134,7 @@ tasks: commands: - func: download and merge coverage depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] - tags: [coverage] + tags: [coverage, pr] # Free threading tests - name: test-free-threading @@ -211,7 +211,7 @@ tasks: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone PYTHON_VERSION: "3.9" - tags: [mod_wsgi] + tags: [mod_wsgi, pr] - name: mod-wsgi-embedded-mode-replica-set-python3.10 commands: - func: run server @@ -223,7 +223,7 @@ tasks: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded PYTHON_VERSION: "3.10" - tags: [mod_wsgi] + tags: [mod_wsgi, pr] - name: mod-wsgi-replica-set-python3.11 commands: - func: run server @@ -235,7 +235,7 @@ tasks: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone PYTHON_VERSION: "3.11" - tags: [mod_wsgi] + tags: [mod_wsgi, pr] - name: mod-wsgi-embedded-mode-replica-set-python3.12 commands: - func: run server @@ -247,7 +247,7 @@ tasks: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded PYTHON_VERSION: "3.12" - tags: [mod_wsgi] + tags: [mod_wsgi, pr] - name: mod-wsgi-replica-set-python3.13 commands: - func: run server @@ -259,7 +259,7 @@ tasks: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone PYTHON_VERSION: "3.13" - tags: [mod_wsgi] + tags: [mod_wsgi, pr] # No orchestration tests - name: test-no-orchestration-python3.9 @@ -2490,6 +2490,7 @@ tasks: - python-3.9 - standalone-noauth-nossl - sync + - pr - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov commands: - func: run server @@ -2511,6 +2512,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - async + - pr - name: test-server-version-python3.11-sync-auth-ssl-replica-set-cov commands: - func: run server @@ -2656,6 +2658,7 @@ tasks: - python-3.11 - replica_set-noauth-nossl - sync + - pr - name: test-server-version-python3.12-async-noauth-nossl-replica-set-cov commands: - func: run server @@ -2677,6 +2680,7 @@ tasks: - python-3.12 - replica_set-noauth-nossl - async + - pr - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov commands: - func: run server @@ -2698,6 +2702,7 @@ tasks: - python-3.13 - sharded_cluster-auth-ssl - sync + - pr - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server @@ -2717,6 +2722,7 @@ tasks: - python-pypy3.10 - sharded_cluster-auth-ssl - async + - pr - name: test-server-version-python3.9-sync-auth-nossl-sharded-cluster-cov commands: - func: run server @@ -3611,6 +3617,7 @@ tasks: - python-3.13 - standalone-noauth-nossl - sync + - pr - name: test-standard-latest-python3.9-async-noauth-ssl-replica-set commands: - func: run server @@ -3633,6 +3640,7 @@ tasks: - python-3.9 - replica_set-noauth-ssl - async + - pr - name: test-standard-latest-python3.10-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -3655,6 +3663,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - sync + - pr - name: test-standard-v4.0-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server @@ -4389,6 +4398,7 @@ tasks: - python-3.13 - standalone-noauth-nossl - noauth + - pr - name: test-non-standard-latest-python3.9-noauth-ssl-replica-set commands: - func: run server @@ -4410,6 +4420,7 @@ tasks: - python-3.9 - replica_set-noauth-ssl - noauth + - pr - name: test-non-standard-latest-python3.10-auth-ssl-sharded-cluster commands: - func: run server @@ -4431,6 +4442,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - auth + - pr - name: test-non-standard-v4.0-pypy3.10-noauth-nossl-standalone commands: - func: run server diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index c54908f5d7..8bb4d5b21f 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -552,8 +552,19 @@ def create_server_version_tasks(): task_inputs.append(task_input) # Assemble the tasks. + seen = set() for topology, auth, ssl, sync, python in task_inputs: - tags = ["server-version", f"python-{python}", f"{topology}-{auth}-{ssl}", sync] + combo = f"{topology}-{auth}-{ssl}" + tags = ["server-version", f"python-{python}", combo, sync] + if combo in [ + "standalone-noauth-nossl", + "replica_set-noauth-nossl", + "sharded_cluster-auth-ssl", + ]: + combo = f"{combo}-{sync}" + if combo not in seen: + seen.add(combo) + tags.append("pr") expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) if python not in PYPYS: expansions["COVERAGE"] = "1" @@ -592,11 +603,12 @@ def create_test_non_standard_tasks(): task_combos = [] # For each version and topology, rotate through the CPythons. for (version, topology), python in zip_cycle(list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS): - task_combos.append((version, topology, python)) + pr = version == "latest" + task_combos.append((version, topology, python, pr)) # For each PyPy and topology, rotate through the the versions. for (python, topology), version in zip_cycle(list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS): - task_combos.append((version, topology, python)) - for version, topology, python in task_combos: + task_combos.append((version, topology, python, False)) + for version, topology, python, pr in task_combos: auth, ssl = get_standard_auth_ssl(topology) tags = [ "test-non-standard", @@ -607,6 +619,8 @@ def create_test_non_standard_tasks(): ] if python in PYPYS: tags.append("pypy") + if pr: + tags.append("pr") expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) name = get_task_name("test-non-standard", python=python, **expansions) server_func = FunctionCall(func="run server", vars=expansions) @@ -625,14 +639,15 @@ def create_standard_tasks(): for (version, topology), python, sync in zip_cycle( list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS, SYNCS ): - task_combos.append((version, topology, python, sync)) + pr = version == "latest" + task_combos.append((version, topology, python, sync, pr)) # For each PyPy and topology, rotate through the the versions and sync/async. for (python, topology), version, sync in zip_cycle( list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS, SYNCS ): - task_combos.append((version, topology, python, sync)) + task_combos.append((version, topology, python, sync, False)) - for version, topology, python, sync in task_combos: + for version, topology, python, sync, pr in task_combos: auth, ssl = get_standard_auth_ssl(topology) tags = [ "test-standard", @@ -643,6 +658,8 @@ def create_standard_tasks(): ] if python in PYPYS: tags.append("pypy") + if pr: + tags.append("pr") expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) name = get_task_name("test-standard", python=python, sync=sync, **expansions) server_func = FunctionCall(func="run server", vars=expansions) @@ -757,7 +774,7 @@ def create_mod_wsgi_tasks(): server_func = FunctionCall(func="run server", vars=server_vars) vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0], PYTHON_VERSION=python) test_func = FunctionCall(func="run tests", vars=vars) - tags = ["mod_wsgi"] + tags = ["mod_wsgi", "pr"] commands = [server_func, test_func] tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) return tasks @@ -842,7 +859,7 @@ def create_getdata_tasks(): def create_coverage_report_tasks(): - tags = ["coverage"] + tags = ["coverage", "pr"] task_name = "coverage-report" # BUILD-3165: We can't use "*" (all tasks) and specify "variant". # Instead list out all coverage tasks using tags. From 5914ea0ff42b4bbacdb172f8b55321726553472f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 8 May 2025 13:10:11 -0500 Subject: [PATCH 1914/2111] PYTHON-5342 Fix test_dns_failures test (#2336) --- test/asynchronous/test_srv_polling.py | 5 +++-- test/test_srv_polling.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index b40aa90cfa..3ba50e77a8 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -20,7 +20,6 @@ import time from test.utils_shared import FunctionCallRecorder from typing import Any -from unittest import skipIf sys.path[0:0] = [""] @@ -92,7 +91,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.disable() -@skipIf(not _IS_SYNC and sys.platform == "win32", "PYTHON-5342 known issue on Windows") class TestSrvPolling(AsyncPyMongoTestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), @@ -186,6 +184,9 @@ def dns_resolver_response(): ): await assertion_method(expected_response, client) + # Close the client early to avoid affecting the next scenario run. + await client.close() + async def test_addition(self): response = self.BASE_SRV_RESPONSE[:] response.append(("localhost.test.build.10gen.cc", 27019)) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 0d84d41241..971c3bad50 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -20,7 +20,6 @@ import time from test.utils_shared import FunctionCallRecorder from typing import Any -from unittest import skipIf sys.path[0:0] = [""] @@ -92,7 +91,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.disable() -@skipIf(not _IS_SYNC and sys.platform == "win32", "PYTHON-5342 known issue on Windows") class TestSrvPolling(PyMongoTestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), @@ -186,6 +184,9 @@ def dns_resolver_response(): ): assertion_method(expected_response, client) + # Close the client early to avoid affecting the next scenario run. + client.close() + def test_addition(self): response = self.BASE_SRV_RESPONSE[:] response.append(("localhost.test.build.10gen.cc", 27019)) From 775b6832769a32752ef998dc84eb37c3cf52ca59 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 8 May 2025 14:20:11 -0400 Subject: [PATCH 1915/2111] PYTHON-5371 - Pass repr(ServerDescription) to logging (#2329) --- pymongo/asynchronous/topology.py | 16 ++++++++-------- pymongo/synchronous/topology.py | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 438dd1e352..99b30fed1e 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -154,8 +154,8 @@ def __init__(self, topology_settings: TopologySettings): _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=initial_td, - newDescription=self._description, + previousDescription=repr(initial_td), + newDescription=repr(self._description), ) for seed in topology_settings.seeds: @@ -514,8 +514,8 @@ async def _process_change( _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=td_old, - newDescription=self._description, + previousDescription=repr(td_old), + newDescription=repr(self._description), ) # Shutdown SRV polling for unsupported cluster types. @@ -581,8 +581,8 @@ async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=td_old, - newDescription=self._description, + previousDescription=repr(td_old), + newDescription=repr(self._description), ) async def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: @@ -747,8 +747,8 @@ async def close(self) -> None: _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=old_td, - newDescription=self._description, + previousDescription=repr(old_td), + newDescription=repr(self._description), ) _debug_log( _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 1e99adf726..10d41def6e 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -154,8 +154,8 @@ def __init__(self, topology_settings: TopologySettings): _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=initial_td, - newDescription=self._description, + previousDescription=repr(initial_td), + newDescription=repr(self._description), ) for seed in topology_settings.seeds: @@ -514,8 +514,8 @@ def _process_change( _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=td_old, - newDescription=self._description, + previousDescription=repr(td_old), + newDescription=repr(self._description), ) # Shutdown SRV polling for unsupported cluster types. @@ -581,8 +581,8 @@ def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=td_old, - newDescription=self._description, + previousDescription=repr(td_old), + newDescription=repr(self._description), ) def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: @@ -745,8 +745,8 @@ def close(self) -> None: _SDAM_LOGGER, message=_SDAMStatusMessage.TOPOLOGY_CHANGE, topologyId=self._topology_id, - previousDescription=old_td, - newDescription=self._description, + previousDescription=repr(old_td), + newDescription=repr(self._description), ) _debug_log( _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id From 98b030af947537b34ed5c11c20b763057632d37a Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 8 May 2025 15:19:31 -0400 Subject: [PATCH 1916/2111] PYTHON-5356 - Init unified test client SDAM for all unified tests (#2325) --- test/asynchronous/unified_format.py | 1 + test/unified_format.py | 1 + 2 files changed, 2 insertions(+) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 10b247d1fa..23707b942f 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -303,6 +303,7 @@ async def _create_entity(self, entity_spec, uri=None): if uri: kwargs["h"] = uri client = await self.test.async_rs_or_single_client(**kwargs) + await client.aconnect() self[spec["id"]] = client return elif entity_type == "database": diff --git a/test/unified_format.py b/test/unified_format.py index d3da2b3a82..84881800a2 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -302,6 +302,7 @@ def _create_entity(self, entity_spec, uri=None): if uri: kwargs["h"] = uri client = self.test.rs_or_single_client(**kwargs) + client._connect() self[spec["id"]] = client return elif entity_type == "database": From 2655bb4d864daeeb73cd5af0c5499c6a0874b590 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Thu, 8 May 2025 17:14:26 -0400 Subject: [PATCH 1917/2111] PYTHON-5033 Use PyModule_Add on >= 3.13 (#2332) --- bson/_cbsonmodule.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index 672f5eeda5..be91e41734 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -3227,11 +3227,18 @@ _cbson_exec(PyObject *m) INITERROR; } +#if PY_VERSION_HEX >= 0x030D0000 + if (PyModule_Add(m, "_C_API", c_api_object) < 0) { + Py_DECREF(m); + INITERROR; + } +# else if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { Py_DECREF(c_api_object); Py_DECREF(m); INITERROR; } +#endif return 0; } From aa41e70523e77c4bffeab4bbd625a9fc20566c3e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 12 May 2025 09:28:05 -0400 Subject: [PATCH 1918/2111] =?UTF-8?q?PYTHON-5369=20-=20Re-raise=20socket.t?= =?UTF-8?q?imeout=20errors=20if=20the=20deadline=20has=20alre=E2=80=A6=20(?= =?UTF-8?q?#2326)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/asynchronous/pool.py | 17 ++++++++++------- pymongo/asynchronous/topology.py | 2 +- pymongo/network_layer.py | 7 ++++++- pymongo/synchronous/pool.py | 17 ++++++++++------- pymongo/synchronous/topology.py | 2 +- test/asynchronous/utils.py | 3 ++- test/test_topology.py | 2 +- test/utils.py | 3 ++- 8 files changed, 33 insertions(+), 20 deletions(-) diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index f4d5b174fa..9a39883fc2 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -131,6 +131,7 @@ class AsyncConnection: :param pool: a Pool instance :param address: the server's (host, port) :param id: the id of this socket in it's pool + :param is_sdam: SDAM connections do not call hello on creation """ def __init__( @@ -139,11 +140,13 @@ def __init__( pool: Pool, address: tuple[str, int], id: int, + is_sdam: bool, ): self.pool_ref = weakref.ref(pool) self.conn = conn self.address = address self.id = id + self.is_sdam = is_sdam self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False @@ -711,13 +714,13 @@ def __init__( self, address: _Address, options: PoolOptions, - handshake: bool = True, + is_sdam: bool = False, client_id: Optional[ObjectId] = None, ): """ :param address: a (hostname, port) tuple :param options: a PoolOptions instance - :param handshake: whether to call hello for each new AsyncConnection + :param is_sdam: whether to call hello for each new AsyncConnection """ if options.pause_enabled: self.state = PoolState.PAUSED @@ -746,14 +749,14 @@ def __init__( self.pid = os.getpid() self.address = address self.opts = options - self.handshake = handshake + self.is_sdam = is_sdam # Don't publish events or logs in Monitor pools. self.enabled_for_cmap = ( - self.handshake + not self.is_sdam and self.opts._event_listeners is not None and self.opts._event_listeners.enabled_for_cmap ) - self.enabled_for_logging = self.handshake + self.enabled_for_logging = not self.is_sdam # The first portion of the wait queue. # Enforces: maxPoolSize @@ -1058,14 +1061,14 @@ async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> A raise - conn = AsyncConnection(networking_interface, self, self.address, conn_id) # type: ignore[arg-type] + conn = AsyncConnection(networking_interface, self, self.address, conn_id, self.is_sdam) # type: ignore[arg-type] async with self.lock: self.active_contexts.add(conn.cancel_context) self.active_contexts.discard(tmp_context) if tmp_context.cancelled: conn.cancel_context.cancel() try: - if self.handshake: + if not self.is_sdam: await conn.hello() self.is_writable = conn.is_writable if handler: diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 99b30fed1e..052f91afee 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -985,7 +985,7 @@ def _create_pool_for_monitor(self, address: _Address) -> Pool: ) return self._settings.pool_class( - address, monitor_pool_options, handshake=False, client_id=self._topology_id + address, monitor_pool_options, is_sdam=True, client_id=self._topology_id ) def _error_message(self, selector: Callable[[Selection], Selection]) -> str: diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 3fa180bf7a..6f1bb9a357 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -357,7 +357,12 @@ def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> me except socket.timeout: if conn.cancel_context.cancelled: raise _OperationCancelled("operation cancelled") from None - if _PYPY: + if ( + _PYPY + or not conn.is_sdam + and deadline is not None + and deadline - time.monotonic() < 0 + ): # We reached the true deadline. raise continue diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 44aec31a86..505f58c60f 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -131,6 +131,7 @@ class Connection: :param pool: a Pool instance :param address: the server's (host, port) :param id: the id of this socket in it's pool + :param is_sdam: SDAM connections do not call hello on creation """ def __init__( @@ -139,11 +140,13 @@ def __init__( pool: Pool, address: tuple[str, int], id: int, + is_sdam: bool, ): self.pool_ref = weakref.ref(pool) self.conn = conn self.address = address self.id = id + self.is_sdam = is_sdam self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False @@ -709,13 +712,13 @@ def __init__( self, address: _Address, options: PoolOptions, - handshake: bool = True, + is_sdam: bool = False, client_id: Optional[ObjectId] = None, ): """ :param address: a (hostname, port) tuple :param options: a PoolOptions instance - :param handshake: whether to call hello for each new Connection + :param is_sdam: whether to call hello for each new Connection """ if options.pause_enabled: self.state = PoolState.PAUSED @@ -744,14 +747,14 @@ def __init__( self.pid = os.getpid() self.address = address self.opts = options - self.handshake = handshake + self.is_sdam = is_sdam # Don't publish events or logs in Monitor pools. self.enabled_for_cmap = ( - self.handshake + not self.is_sdam and self.opts._event_listeners is not None and self.opts._event_listeners.enabled_for_cmap ) - self.enabled_for_logging = self.handshake + self.enabled_for_logging = not self.is_sdam # The first portion of the wait queue. # Enforces: maxPoolSize @@ -1054,14 +1057,14 @@ def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connect raise - conn = Connection(networking_interface, self, self.address, conn_id) # type: ignore[arg-type] + conn = Connection(networking_interface, self, self.address, conn_id, self.is_sdam) # type: ignore[arg-type] with self.lock: self.active_contexts.add(conn.cancel_context) self.active_contexts.discard(tmp_context) if tmp_context.cancelled: conn.cancel_context.cancel() try: - if self.handshake: + if not self.is_sdam: conn.hello() self.is_writable = conn.is_writable if handler: diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 10d41def6e..28370d4adc 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -983,7 +983,7 @@ def _create_pool_for_monitor(self, address: _Address) -> Pool: ) return self._settings.pool_class( - address, monitor_pool_options, handshake=False, client_id=self._topology_id + address, monitor_pool_options, is_sdam=True, client_id=self._topology_id ) def _error_message(self, selector: Callable[[Selection], Selection]) -> str: diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py index f653c575e9..ca80d1f6dd 100644 --- a/test/asynchronous/utils.py +++ b/test/asynchronous/utils.py @@ -159,6 +159,7 @@ def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False self.id = random.randint(0, 100) + self.is_sdam = False self.server_connection_id = random.randint(0, 100) def close_conn(self, reason): @@ -172,7 +173,7 @@ def __aexit__(self, exc_type, exc_val, exc_tb): class AsyncMockPool: - def __init__(self, address, options, handshake=True, client_id=None): + def __init__(self, address, options, is_sdam=False, client_id=None): self.gen = _PoolGeneration() self._lock = _async_create_lock() self.opts = options diff --git a/test/test_topology.py b/test/test_topology.py index 22e94739ee..141b2d7f21 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -121,7 +121,7 @@ def test_timeout_configuration(self): self.assertEqual(1, monitor._pool.opts.socket_timeout) # The monitor, not its pool, is responsible for calling hello. - self.assertFalse(monitor._pool.handshake) + self.assertTrue(monitor._pool.is_sdam) class TestSingleServerTopology(TopologyTest): diff --git a/test/utils.py b/test/utils.py index 3027ed7517..25d95d1d3c 100644 --- a/test/utils.py +++ b/test/utils.py @@ -157,6 +157,7 @@ def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False self.id = random.randint(0, 100) + self.is_sdam = False self.server_connection_id = random.randint(0, 100) def close_conn(self, reason): @@ -170,7 +171,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class MockPool: - def __init__(self, address, options, handshake=True, client_id=None): + def __init__(self, address, options, is_sdam=False, client_id=None): self.gen = _PoolGeneration() self._lock = _create_lock() self.opts = options From 75f6a3718ea29ffb6d896e8c5064c4cc984aa4d2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 12 May 2025 09:35:08 -0500 Subject: [PATCH 1919/2111] Revert "PYTHON-5126 & PYTHON-5280 Addresses issues raised in DRIVERS-3097 and DRIVERS-3123 " (#2337) --- bson/binary.py | 15 ------------ test/bson_binary_vector/packed_bit.json | 21 +++++------------ test/test_bson.py | 8 +++---- test/test_bson_binary_vector.py | 31 ++++++++----------------- 4 files changed, 20 insertions(+), 55 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index 48f1f58512..6698e55ccc 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -462,10 +462,6 @@ def from_vector( raise ValueError(f"{padding=}. It must be in [0,1, ..7].") if padding and not vector: raise ValueError("Empty vector with non-zero padding.") - if padding and not (vector[-1] & ((1 << padding) - 1)) == 0: # type: ignore - raise ValueError( - "If padding p is provided, all bits in the final byte lower than p must be 0." - ) elif dtype == BinaryVectorDtype.FLOAT32: # pack floats as float32 format_str = "f" if padding: @@ -494,11 +490,6 @@ def as_vector(self) -> BinaryVector: dtype = BinaryVectorDtype(dtype) n_values = len(self) - position - if padding and dtype != BinaryVectorDtype.PACKED_BIT: - raise ValueError( - f"Corrupt data. Padding ({padding}) must be 0 for all but PACKED_BIT dtypes. ({dtype=})" - ) - if dtype == BinaryVectorDtype.INT8: dtype_format = "b" format_string = f"<{n_values}{dtype_format}" @@ -522,12 +513,6 @@ def as_vector(self) -> BinaryVector: dtype_format = "B" format_string = f"<{n_values}{dtype_format}" unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) - if padding and not n_values: - raise ValueError("Corrupt data. Vector has a padding P, but no data.") - if padding and n_values and not (unpacked_uint8s[-1] & ((1 << padding) - 1)) == 0: - raise ValueError( - "Corrupt data. Vector has a padding P, but bits in the final byte lower than P are non-zero." - ) return BinaryVector(unpacked_uint8s, dtype, padding) else: diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json index 3015acba66..a220e7e318 100644 --- a/test/bson_binary_vector/packed_bit.json +++ b/test/bson_binary_vector/packed_bit.json @@ -21,32 +21,23 @@ "canonical_bson": "1600000005766563746F7200040000000910007F0700" }, { - "description": "PACKED_BIT with padding", + "description": "Empty Vector PACKED_BIT", "valid": true, - "vector": [127, 8], + "vector": [], "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", - "padding": 3, - "canonical_bson": "1600000005766563746F7200040000000910037F0800" + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" }, { - "description": "PACKED_BIT with inconsistent padding", - "valid": false, + "description": "PACKED_BIT with padding", + "valid": true, "vector": [127, 7], "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", "padding": 3, "canonical_bson": "1600000005766563746F7200040000000910037F0700" }, - { - "description": "Empty Vector PACKED_BIT", - "valid": true, - "vector": [], - "dtype_hex": "0x10", - "dtype_alias": "PACKED_BIT", - "padding": 0, - "canonical_bson": "1400000005766563746F72000200000009100000" - }, { "description": "Overflow Vector PACKED_BIT", "valid": false, diff --git a/test/test_bson.py b/test/test_bson.py index 522945d5f4..1616c513c2 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -739,7 +739,7 @@ def test_vector(self): """Tests of subtype 9""" # We start with valid cases, across the 3 dtypes implemented. # Work with a simple vector that can be interpreted as int8, float32, or ubyte - list_vector = [127, 8] + list_vector = [127, 7] # As INT8, vector has length 2 binary_vector = Binary.from_vector(list_vector, BinaryVectorDtype.INT8) vector = binary_vector.as_vector() @@ -764,18 +764,18 @@ def test_vector(self): uncompressed = "" for val in list_vector: uncompressed += format(val, "08b") - assert uncompressed[:-padding] == "0111111100001" + assert uncompressed[:-padding] == "0111111100000" # It is worthwhile explicitly showing the values encoded to BSON padded_doc = {"padded_vec": padded_vec} assert ( encode(padded_doc) - == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x08\x00" + == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x07\x00" ) # and dumped to json assert ( json_util.dumps(padded_doc) - == '{"padded_vec": {"$binary": {"base64": "EAN/CA==", "subType": "09"}}}' + == '{"padded_vec": {"$binary": {"base64": "EAN/Bw==", "subType": "09"}}}' ) # FLOAT32 is also implemented diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py index afe01f42bf..9bfdcbfb9a 100644 --- a/test/test_bson_binary_vector.py +++ b/test/test_bson_binary_vector.py @@ -48,11 +48,11 @@ def create_test(case_spec): def run_test(self): for test_case in case_spec.get("tests", []): description = test_case["description"] - vector_exp = test_case.get("vector", None) + vector_exp = test_case.get("vector", []) dtype_hex_exp = test_case["dtype_hex"] dtype_alias_exp = test_case.get("dtype_alias") padding_exp = test_case.get("padding", 0) - canonical_bson_exp = test_case.get("canonical_bson", None) + canonical_bson_exp = test_case.get("canonical_bson") # Convert dtype hex string into bytes dtype_exp = BinaryVectorDtype(int(dtype_hex_exp, 16).to_bytes(1, byteorder="little")) @@ -85,25 +85,14 @@ def run_test(self): self.assertEqual(cB_obs, canonical_bson_exp, description) else: - """ - #### To prove correct in an invalid case (`valid:false`), one MUST - - if the vector field is present, raise an exception when attempting to encode a document from the numeric values, - dtype, and padding. - - if the canonical_bson field is present, raise an exception when attempting to deserialize it into the corresponding - numeric values, as the field contains corrupted data. - """ - # Tests Binary.from_vector() - if vector_exp is not None: - with self.assertRaises((struct.error, ValueError), msg=description): - Binary.from_vector(vector_exp, dtype_exp, padding_exp) - - # Tests Binary.as_vector() - if canonical_bson_exp is not None: - with self.assertRaises((struct.error, ValueError), msg=description): - cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) - decoded_doc = decode(cB_exp) - binary_obs = decoded_doc[test_key] - binary_obs.as_vector() + with self.assertRaises((struct.error, ValueError), msg=description): + # Tests Binary.from_vector + Binary.from_vector(vector_exp, dtype_exp, padding_exp) + # Tests Binary.as_vector + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + binary_obs.as_vector() return run_test From 2374f3811aec6d7e785d3a5f5fc16d51c5873a3c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 13 May 2025 11:40:00 -0500 Subject: [PATCH 1920/2111] PYTHON-5379 Run more variants on pull requests (#2340) --- .evergreen/generated_configs/tasks.yml | 4 +++ .evergreen/generated_configs/variants.yml | 29 ++++++++++++++-- .evergreen/scripts/generate_config.py | 41 +++++++++++++++++++---- 3 files changed, 65 insertions(+), 9 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index eb5b49e6f2..8bc758890a 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -178,6 +178,7 @@ tasks: vars: TEST_NAME: kms SUB_TEST_NAME: gcp + tags: [] - name: test-gcpkms-fail commands: - func: run server @@ -185,12 +186,14 @@ tasks: vars: TEST_NAME: kms SUB_TEST_NAME: gcp-fail + tags: [pr] - name: test-azurekms commands: - func: run tests vars: TEST_NAME: kms SUB_TEST_NAME: azure + tags: [] - name: test-azurekms-fail commands: - func: run server @@ -198,6 +201,7 @@ tasks: vars: TEST_NAME: kms SUB_TEST_NAME: azure-fail + tags: [pr] # Mod wsgi tests - name: mod-wsgi-replica-set-python3.9 diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 4ec9f5a3a8..673bb111cd 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -22,6 +22,7 @@ buildvariants: VERSION: latest NO_EXT: "1" REQUIRE_FIPS: "1" + tags: [] - name: other-hosts-rhel8-zseries-latest tasks: - name: .test-no-toolchain @@ -32,6 +33,7 @@ buildvariants: expansions: VERSION: latest NO_EXT: "1" + tags: [] - name: other-hosts-rhel8-power8-latest tasks: - name: .test-no-toolchain @@ -42,6 +44,7 @@ buildvariants: expansions: VERSION: latest NO_EXT: "1" + tags: [] - name: other-hosts-rhel8-arm64-latest tasks: - name: .test-no-toolchain @@ -52,6 +55,7 @@ buildvariants: expansions: VERSION: latest NO_EXT: "1" + tags: [] - name: other-hosts-amazon2023-latest tasks: - name: .test-no-toolchain @@ -62,6 +66,7 @@ buildvariants: expansions: VERSION: latest NO_EXT: "1" + tags: [pr] # Atlas connect tests - name: atlas-connect-rhel8 @@ -70,6 +75,7 @@ buildvariants: display_name: Atlas connect RHEL8 run_on: - rhel87-small + tags: [pr] # Atlas data lake tests - name: atlas-data-lake-ubuntu-22 @@ -80,6 +86,7 @@ buildvariants: - ubuntu2204-small expansions: TEST_NAME: data_lake + tags: [pr] # Aws auth tests - name: auth-aws-ubuntu-20 @@ -88,18 +95,21 @@ buildvariants: display_name: Auth AWS Ubuntu-20 run_on: - ubuntu2004-small + tags: [] - name: auth-aws-win64 tasks: - name: .auth-aws !.auth-aws-ecs display_name: Auth AWS Win64 run_on: - windows-64-vsMulti-small + tags: [] - name: auth-aws-macos tasks: - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 display_name: Auth AWS macOS run_on: - macos-14 + tags: [pr] # Aws lambda tests - name: faas-lambda @@ -288,6 +298,7 @@ buildvariants: - rhel87-small expansions: PYTHON_BINARY: /opt/python/3.13t/bin/python3 + tags: [pr] - name: free-threaded-macos-python3.13t tasks: - name: .free-threading @@ -296,6 +307,7 @@ buildvariants: - macos-14 expansions: PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t + tags: [] - name: free-threaded-macos-arm64-python3.13t tasks: - name: .free-threading @@ -304,6 +316,7 @@ buildvariants: - macos-14-arm64 expansions: PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t + tags: [] # Green framework tests - name: green-eventlet-rhel8 @@ -372,6 +385,7 @@ buildvariants: - rhel87-small expansions: TEST_NAME: mockupdb + tags: [pr] # Mod wsgi tests - name: mod_wsgi-ubuntu-22 @@ -398,6 +412,7 @@ buildvariants: display_name: No server RHEL8 run_on: - rhel87-small + tags: [pr] # Ocsp tests - name: ocsp-rhel8 @@ -427,21 +442,29 @@ buildvariants: # Oidc auth tests - name: auth-oidc-ubuntu-22 tasks: - - name: .auth_oidc + - name: .auth_oidc_remote display_name: Auth OIDC Ubuntu-22 run_on: - ubuntu2204-small batchtime: 10080 + - name: auth-oidc-local-ubuntu-22 + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC Local Ubuntu-22 + run_on: + - ubuntu2204-small + batchtime: 10080 + tags: [pr] - name: auth-oidc-macos tasks: - - name: .auth_oidc !.auth_oidc_remote + - name: "!.auth_oidc_remote .auth_oidc" display_name: Auth OIDC macOS run_on: - macos-14 batchtime: 10080 - name: auth-oidc-win64 tasks: - - name: .auth_oidc !.auth_oidc_remote + - name: "!.auth_oidc_remote .auth_oidc" display_name: Auth OIDC Win64 run_on: - windows-64-vsMulti-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 8bb4d5b21f..9f42fb0a4b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -112,10 +112,13 @@ def create_free_threaded_variants() -> list[BuildVariant]: # TODO: PYTHON-5027 continue tasks = [".free-threading"] + tags = [] + if host_name == "rhel8": + tags.append("pr") host = HOSTS[host_name] python = "3.13t" display_name = get_variant_name("Free-threaded", host, python=python) - variant = create_variant(tasks, display_name, python=python, host=host) + variant = create_variant(tasks, display_name, tags=tags, python=python, host=host) variants.append(variant) return variants @@ -329,7 +332,7 @@ def create_atlas_data_lake_variants(): tasks = [".test-no-orchestration"] expansions = dict(TEST_NAME="data_lake") display_name = get_variant_name("Atlas Data Lake", host) - return [create_variant(tasks, display_name, host=host, expansions=expansions)] + return [create_variant(tasks, display_name, tags=["pr"], host=host, expansions=expansions)] def create_mod_wsgi_variants(): @@ -370,9 +373,9 @@ def create_oidc_auth_variants(): variants = [] for host_name in ["ubuntu22", "macos", "win64"]: if host_name == "ubuntu22": - tasks = [".auth_oidc"] + tasks = [".auth_oidc_remote"] else: - tasks = [".auth_oidc !.auth_oidc_remote"] + tasks = ["!.auth_oidc_remote .auth_oidc"] host = HOSTS[host_name] variants.append( create_variant( @@ -382,6 +385,18 @@ def create_oidc_auth_variants(): batchtime=BATCHTIME_WEEK, ) ) + # Add a specific local test to run on PRs. + if host_name == "ubuntu22": + tasks = ["!.auth_oidc_remote .auth_oidc"] + variants.append( + create_variant( + tasks, + get_variant_name("Auth OIDC Local", host), + tags=["pr"], + host=host, + batchtime=BATCHTIME_WEEK, + ) + ) return variants @@ -406,6 +421,7 @@ def create_mockupdb_variants(): [".test-no-orchestration"], get_variant_name("MockupDB", host), host=host, + tags=["pr"], expansions=expansions, ) ] @@ -430,6 +446,7 @@ def create_atlas_connect_variants(): create_variant( [".test-no-orchestration"], get_variant_name("Atlas connect", host), + tags=["pr"], host=DEFAULT_HOST, ) ] @@ -469,8 +486,10 @@ def create_aws_auth_variants(): for host_name in ["ubuntu20", "win64", "macos"]: expansions = dict() tasks = [".auth-aws"] + tags = [] if host_name == "macos": tasks = [".auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2"] + tags = ["pr"] elif host_name == "win64": tasks = [".auth-aws !.auth-aws-ecs"] host = HOSTS[host_name] @@ -478,6 +497,7 @@ def create_aws_auth_variants(): tasks, get_variant_name("Auth AWS", host), host=host, + tags=tags, expansions=expansions, ) variants.append(variant) @@ -487,7 +507,7 @@ def create_aws_auth_variants(): def create_no_server_variants(): host = HOSTS["rhel8"] name = get_variant_name("No server", host=host) - return [create_variant([".test-no-orchestration"], name, host=host)] + return [create_variant([".test-no-orchestration"], name, host=host, tags=["pr"])] def create_alternative_hosts_variants(): @@ -512,14 +532,18 @@ def create_alternative_hosts_variants(): expansions = dict(VERSION="latest") handle_c_ext(C_EXTS[0], expansions) host = HOSTS[host_name] + tags = [] if "fips" in host_name.lower(): expansions["REQUIRE_FIPS"] = "1" + if "amazon" in host_name.lower(): + tags.append("pr") variants.append( create_variant( [".test-no-toolchain"], display_name=get_variant_name("Other hosts", host, version=version), batchtime=batchtime, host=host, + tags=tags, expansions=expansions, ) ) @@ -693,16 +717,18 @@ def create_kms_tasks(): for success in [True, False]: name = f"test-{kms_type}kms" sub_test_name = kms_type + tags = [] if not success: name += "-fail" sub_test_name += "-fail" + tags.append("pr") commands = [] if not success: commands.append(FunctionCall(func="run server")) test_vars = dict(TEST_NAME="kms", SUB_TEST_NAME=sub_test_name) test_func = FunctionCall(func="run tests", vars=test_vars) commands.append(test_func) - tasks.append(EvgTask(name=name, commands=commands)) + tasks.append(EvgTask(name=name, tags=tags, commands=commands)) return tasks @@ -756,6 +782,7 @@ def create_oidc_tasks(): if sub_test != "default": tags.append("auth_oidc_remote") tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + return tasks @@ -802,6 +829,8 @@ def _create_ocsp_tasks(algo, variant, server_type, base_task_name): tags = ["ocsp", f"ocsp-{algo}", version] if "disableStapling" not in variant: tags.append("ocsp-staple") + if algo == "valid-cert-server-staples" and version == "latest": + tags.append("pr") task_name = get_task_name( f"test-ocsp-{algo}-{base_task_name}", python=python, version=version From 4cc5e89ebf67a10b4187b7c5fd08f55f6f9fa6a5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Tue, 13 May 2025 13:37:18 -0700 Subject: [PATCH 1921/2111] PYTHON-5362 WriteConcern repr should be eval-able (#2338) --- doc/changelog.rst | 15 +++++++++++++++ pymongo/write_concern.py | 2 +- test/test_write_concern.py | 13 +++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 87918639cd..9b0275dbd8 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,21 @@ Changelog ========= +Changes in Version 4.13.0 (2025/05/14) +-------------------------------------- + +PyMongo 4.13 brings a number of changes including: + +- Fixed a bug where :class:`pymongo.write_concern.WriteConcern` repr was not eval-able + when using ``w="majority"``. + +Issues Resolved +............... + +See the `PyMongo 4.13 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=42509 Changes in Version 4.12.1 (2025/04/29) -------------------------------------- diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ff31c6730d..1f9da7af2e 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -127,7 +127,7 @@ def acknowledged(self) -> bool: def __repr__(self) -> str: return "WriteConcern({})".format( - ", ".join("{}={}".format(*kvt) for kvt in self.__document.items()) + ", ".join(f"{k}={v!r}" for k, v in self.__document.items()) ) def __eq__(self, other: Any) -> bool: diff --git a/test/test_write_concern.py b/test/test_write_concern.py index e22c7e7a8c..02a7cb6e5c 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -67,6 +67,19 @@ def test_equality_incompatible_type(self): _fake_type = collections.namedtuple("NotAWriteConcern", ["document"]) # type: ignore self.assertNotEqual(WriteConcern(j=True), _fake_type({"j": True})) + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + def test_repr(self): + concern = WriteConcern(j=True, wtimeout=3000, w="majority", fsync=False) + self.assertRepr(concern) + self.assertEqual( + repr(concern), + "WriteConcern(wtimeout=3000, j=True, fsync=False, w='majority')", + ) + if __name__ == "__main__": unittest.main() From 92a562388641d8598af73ffd7dc29e1c7a4ad5f6 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 May 2025 07:45:37 -0400 Subject: [PATCH 1922/2111] PYTHON-5377 - Update assets to align with GA release of Async PyMongo (#2339) --- README.md | 2 +- doc/faq.rst | 7 ++----- doc/tools.rst | 3 +++ 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 962d0d958c..9bee92c568 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ The PyMongo distribution contains tools for interacting with MongoDB database from Python. The `bson` package is an implementation of the [BSON format](http://bsonspec.org) for Python. The `pymongo` package is -a native Python driver for MongoDB. The `gridfs` package is a +a native Python driver for MongoDB, offering both synchronous and asynchronous APIs. The `gridfs` package is a [gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.md/) implementation on top of `pymongo`. diff --git a/doc/faq.rst b/doc/faq.rst index 7656481d89..cb67ea7fe5 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -170,12 +170,9 @@ PyMongo supports CPython 3.9+ and PyPy3.10+. See the :doc:`python3` for details. Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? --------------------------------------------------------------------------------------- +As of PyMongo v4.13, PyMongo fully supports asyncio and `Tornado `_. See `the official docs `_ for more details. -PyMongo fully supports :doc:`Gevent `. - -To use MongoDB with `asyncio `_ -or `Tornado `_, see the -`Motor `_ project. +PyMongo also fully supports :doc:`Gevent `. For `Twisted `_, see `TxMongo `_. Its stated mission is to keep feature diff --git a/doc/tools.rst b/doc/tools.rst index a3f167d024..5a9297ad64 100644 --- a/doc/tools.rst +++ b/doc/tools.rst @@ -163,6 +163,9 @@ These are alternatives to PyMongo. * `Motor `_ is a full-featured, non-blocking MongoDB driver for Python Tornado applications. + As of PyMongo v4.13, Motor's features have been merged into PyMongo via the new AsyncMongoClient API. + As a result of this merger, Motor will be officially deprecated on May 14th, 2026. + For more information, see `the official PyMongo docs `_. * `TxMongo `_ is an asynchronous Twisted Python driver for MongoDB. * `MongoMock `_ is a small From 60faca02530eefc654d24834fc9447a72a1ad81d Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 May 2025 14:00:46 -0400 Subject: [PATCH 1923/2111] Update changelog for v4.13 release (#2341) --- doc/api/gridfs/asynchronous/index.rst | 4 ---- doc/api/pymongo/asynchronous/change_stream.rst | 4 ---- doc/api/pymongo/asynchronous/client_session.rst | 4 ---- doc/api/pymongo/asynchronous/collection.rst | 4 ---- doc/api/pymongo/asynchronous/command_cursor.rst | 4 ---- doc/api/pymongo/asynchronous/cursor.rst | 4 ---- doc/api/pymongo/asynchronous/database.rst | 4 ---- doc/api/pymongo/asynchronous/index.rst | 4 ---- doc/api/pymongo/asynchronous/mongo_client.rst | 4 ---- doc/async-tutorial.rst | 4 ---- doc/changelog.rst | 3 +++ pymongo/asynchronous/mongo_client.py | 2 -- 12 files changed, 3 insertions(+), 42 deletions(-) diff --git a/doc/api/gridfs/asynchronous/index.rst b/doc/api/gridfs/asynchronous/index.rst index 0904d10f98..7b6ebb28b8 100644 --- a/doc/api/gridfs/asynchronous/index.rst +++ b/doc/api/gridfs/asynchronous/index.rst @@ -1,10 +1,6 @@ :mod:`gridfs async` -- Async tools for working with GridFS ========================================================== -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: gridfs.asynchronous :synopsis: Async tools for working with GridFS diff --git a/doc/api/pymongo/asynchronous/change_stream.rst b/doc/api/pymongo/asynchronous/change_stream.rst index df4f5dee41..1b506fdb55 100644 --- a/doc/api/pymongo/asynchronous/change_stream.rst +++ b/doc/api/pymongo/asynchronous/change_stream.rst @@ -1,10 +1,6 @@ :mod:`change_stream` -- Watch changes on a collection, database, or cluster =========================================================================== -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.change_stream :members: diff --git a/doc/api/pymongo/asynchronous/client_session.rst b/doc/api/pymongo/asynchronous/client_session.rst index c4bbd8edd2..d8403325d7 100644 --- a/doc/api/pymongo/asynchronous/client_session.rst +++ b/doc/api/pymongo/asynchronous/client_session.rst @@ -1,10 +1,6 @@ :mod:`client_session` -- Logical sessions for sequential operations =================================================================== -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.client_session :members: diff --git a/doc/api/pymongo/asynchronous/collection.rst b/doc/api/pymongo/asynchronous/collection.rst index ce1fe3ca04..779295ced1 100644 --- a/doc/api/pymongo/asynchronous/collection.rst +++ b/doc/api/pymongo/asynchronous/collection.rst @@ -1,10 +1,6 @@ :mod:`collection` -- Collection level operations ================================================ -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.collection :synopsis: Collection level operations diff --git a/doc/api/pymongo/asynchronous/command_cursor.rst b/doc/api/pymongo/asynchronous/command_cursor.rst index 7058563eee..1f94c6e525 100644 --- a/doc/api/pymongo/asynchronous/command_cursor.rst +++ b/doc/api/pymongo/asynchronous/command_cursor.rst @@ -1,10 +1,6 @@ :mod:`command_cursor` -- Tools for iterating over MongoDB command results ========================================================================= -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.command_cursor :synopsis: Tools for iterating over MongoDB command results diff --git a/doc/api/pymongo/asynchronous/cursor.rst b/doc/api/pymongo/asynchronous/cursor.rst index d357b84514..f511734de4 100644 --- a/doc/api/pymongo/asynchronous/cursor.rst +++ b/doc/api/pymongo/asynchronous/cursor.rst @@ -1,10 +1,6 @@ :mod:`cursor` -- Tools for iterating over MongoDB query results =============================================================== -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.cursor :synopsis: Tools for iterating over MongoDB query results diff --git a/doc/api/pymongo/asynchronous/database.rst b/doc/api/pymongo/asynchronous/database.rst index b45fe457e7..7b043ab0d1 100644 --- a/doc/api/pymongo/asynchronous/database.rst +++ b/doc/api/pymongo/asynchronous/database.rst @@ -1,10 +1,6 @@ :mod:`database` -- Database level operations ============================================ -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.database :synopsis: Database level operations diff --git a/doc/api/pymongo/asynchronous/index.rst b/doc/api/pymongo/asynchronous/index.rst index 1b41fb8222..b7fc985415 100644 --- a/doc/api/pymongo/asynchronous/index.rst +++ b/doc/api/pymongo/asynchronous/index.rst @@ -1,10 +1,6 @@ :mod:`pymongo async` -- Async Python driver for MongoDB ======================================================= -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous :synopsis: Asynchronous Python driver for MongoDB diff --git a/doc/api/pymongo/asynchronous/mongo_client.rst b/doc/api/pymongo/asynchronous/mongo_client.rst index d0729da78b..899ca687d5 100644 --- a/doc/api/pymongo/asynchronous/mongo_client.rst +++ b/doc/api/pymongo/asynchronous/mongo_client.rst @@ -1,10 +1,6 @@ :mod:`mongo_client` -- Tools for connecting to MongoDB ====================================================== -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. automodule:: pymongo.asynchronous.mongo_client :synopsis: Tools for connecting to MongoDB diff --git a/doc/async-tutorial.rst b/doc/async-tutorial.rst index 1884631ec3..b3e33e4b5c 100644 --- a/doc/async-tutorial.rst +++ b/doc/async-tutorial.rst @@ -1,10 +1,6 @@ Async Tutorial ============== -.. warning:: This API is currently in beta, meaning the classes, methods, - and behaviors described within may change before the full release. - If you come across any bugs during your use of this API, - please file a Jira ticket in the "Python Driver" project at https://jira.mongodb.org/browse/PYTHON. .. code-block:: pycon diff --git a/doc/changelog.rst b/doc/changelog.rst index 9b0275dbd8..80d1c4e2f0 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,9 @@ Changes in Version 4.13.0 (2025/05/14) PyMongo 4.13 brings a number of changes including: +- The asynchronous API is now stable and no longer in beta. + See the :mod:`pymongo.asynchronous` docs + or the `migration guide `_ for more information. - Fixed a bug where :class:`pymongo.write_concern.WriteConcern` repr was not eval-able when using ``w="majority"``. diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index a236b21348..72755263c9 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -194,8 +194,6 @@ def __init__( For more details, see the relevant section of the PyMongo 4.x migration guide: :ref:`pymongo4-migration-direct-connection`. - .. warning:: This API is currently in beta, meaning the classes, methods, and behaviors described within may change before the full release. - The client object is thread-safe and has connection-pooling built in. If an operation fails because of a network error, :class:`~pymongo.errors.ConnectionFailure` is raised and the client From 397c280217c347bead06c6f7892cb921789c6eb7 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 14 May 2025 18:23:10 +0000 Subject: [PATCH 1924/2111] BUMP 4.13.0.dev1 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index e49406e755..a070cba8ee 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.13.0.dev0" +__version__ = "4.13.0.dev1" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 84411b91190e44a1ebf1eec40df63ae0d431fe22 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 14 May 2025 14:51:36 -0400 Subject: [PATCH 1925/2111] Bump version to 4.13.0 for release (#2342) --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index a070cba8ee..d3f1a7529d 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.13.0.dev1" +__version__ = "4.13.0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From a435a3e1c385dce4129bf8e1f86d98266569626a Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 14 May 2025 19:14:32 +0000 Subject: [PATCH 1926/2111] BUMP 4.14.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index d3f1a7529d..9e7924773b 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.13.0" +__version__ = "4.14.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 726a6fa98dddc38cf5ffaee22d843d94ddd0244d Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Mon, 19 May 2025 19:12:41 -0400 Subject: [PATCH 1927/2111] PYTHON-5384 Describe MongoDB specifications (#2344) --- CONTRIBUTING.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e0b6260e21..fa500273af 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -384,7 +384,14 @@ If you are running one of the `no-responder` tests, omit the `run-server` step. - Regenerate the test variants and tasks using `pre-commit run --all-files generate-config`. - Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. -## Re-sync Spec Tests +## Specification Tests + +The MongoDB [specifications repository](https://github.com/mongodb/specifications) +holds in progress and completed specifications for features of MongoDB, drivers, +and associated products. PyMongo supports the [Unified Test Format](https://jira.mongodb.org/browse/DRIVERS-709) +for running specification tests to confirm PyMongo behaves as expected. + +### Resynchronizing the Specification Tests If you would like to re-sync the copy of the specification tests in the PyMongo repository with that which is inside the [specifications From 106343a6a2688c9869a7909b31ab227dd077ee1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 May 2025 07:30:00 -0500 Subject: [PATCH 1928/2111] Bump github/codeql-action from 3.28.17 to 3.28.18 in the actions group (#2343) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/zizmor.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3becfd72b5..e88f86f278 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index b0d4e7cf2e..1c6c655055 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -26,7 +26,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3 + uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 with: sarif_file: results.sarif category: zizmor From 717fb47c17ea46ec66cfdbb9f6a6979294b6ac44 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 21 May 2025 13:45:36 -0400 Subject: [PATCH 1929/2111] PYTHON-5061 - Add an API to extend the bson TypeRegistry (#2345) --- bson/codec_options.py | 10 ++++++++++ doc/changelog.rst | 7 +++++++ test/asynchronous/test_custom_types.py | 9 +++++++++ test/test_custom_types.py | 9 +++++++++ 4 files changed, 35 insertions(+) diff --git a/bson/codec_options.py b/bson/codec_options.py index 258a777a1b..0428cf843f 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -160,6 +160,16 @@ def __init__( f"Expected an instance of {TypeEncoder.__name__}, {TypeDecoder.__name__}, or {TypeCodec.__name__}, got {codec!r} instead" ) + @property + def codecs(self) -> list[TypeEncoder | TypeDecoder | TypeCodec]: + """The list of type codecs in this registry.""" + return self.__type_codecs + + @property + def fallback_encoder(self) -> Optional[_Fallback]: + """The fallback encoder in this registry.""" + return self._fallback_encoder + def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES diff --git a/doc/changelog.rst b/doc/changelog.rst index 80d1c4e2f0..c44cfb41a2 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,13 @@ Changelog ========= +Changes in Version 4.14.0 (XXXX/XX/XX) +-------------------------------------- +PyMongo 4.14 brings a number of changes including: + +- Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties + to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. + Changes in Version 4.13.0 (2025/05/14) -------------------------------------- diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py index 0f9d737afe..0ab9e95fe0 100644 --- a/test/asynchronous/test_custom_types.py +++ b/test/asynchronous/test_custom_types.py @@ -579,6 +579,15 @@ def test_initialize_fail(self): with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + def test_type_registry_codecs(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + self.assertEqual(type_registry.codecs, codec_instances) + + def test_type_registry_fallback(self): + type_registry = TypeRegistry(fallback_encoder=self.fallback_encoder) + self.assertEqual(type_registry.fallback_encoder, self.fallback_encoder) + def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 08e2a46f8f..bcdc14f2e9 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -579,6 +579,15 @@ def test_initialize_fail(self): with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + def test_type_registry_codecs(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + self.assertEqual(type_registry.codecs, codec_instances) + + def test_type_registry_fallback(self): + type_registry = TypeRegistry(fallback_encoder=self.fallback_encoder) + self.assertEqual(type_registry.fallback_encoder, self.fallback_encoder) + def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) From 65089ead4cd71dc7cd4c0196a3878c420f77eee1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Thu, 22 May 2025 16:15:44 -0700 Subject: [PATCH 1930/2111] PYTHON-5386 Better test assertions for isinstance (#2347) --- test/asynchronous/test_bulk.py | 8 +- test/asynchronous/test_client.py | 10 +- test/asynchronous/test_collection.py | 100 ++++++++--------- test/asynchronous/test_cursor.py | 2 +- test/asynchronous/test_custom_types.py | 2 +- test/asynchronous/test_database.py | 8 +- test/asynchronous/test_encryption.py | 8 +- test/asynchronous/test_grid_file.py | 10 +- test/asynchronous/test_gridfs.py | 2 +- test/asynchronous/test_gridfs_bucket.py | 2 +- test/asynchronous/test_monitoring.py | 142 ++++++++++++------------ test/test_binary.py | 4 +- test/test_bson.py | 12 +- test/test_bulk.py | 8 +- test/test_client.py | 10 +- test/test_code.py | 4 +- test/test_collection.py | 100 ++++++++--------- test/test_cursor.py | 2 +- test/test_custom_types.py | 2 +- test/test_database.py | 8 +- test/test_encryption.py | 8 +- test/test_grid_file.py | 10 +- test/test_gridfs.py | 2 +- test/test_gridfs_bucket.py | 2 +- test/test_json_util.py | 2 +- test/test_monitoring.py | 142 ++++++++++++------------ test/test_timestamp.py | 2 +- test/test_uri_parser.py | 4 +- 28 files changed, 308 insertions(+), 308 deletions(-) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 65ed6e236a..7c6f032e81 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -281,7 +281,7 @@ async def test_upsert(self): self.assertEqual(1, result.upserted_count) assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) - self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) + self.assertIsInstance(result.upserted_ids.get(0), ObjectId) self.assertEqual(await self.coll.count_documents({"foo": "bar"}), 1) @@ -998,7 +998,7 @@ async def test_write_concern_failure_ordered(self): failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) await self.coll.delete_many({}) await self.coll.create_index("a", unique=True) @@ -1105,12 +1105,12 @@ async def test_write_concern_failure_unordered(self): failed = details["writeErrors"][0] self.assertEqual(2, failed["index"]) self.assertEqual(11000, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) self.assertEqual(1, failed["op"]["a"]) failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) upserts = details["upserted"] self.assertEqual(1, len(upserts)) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 1e1faf0a2a..4d247009e5 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -214,7 +214,7 @@ def make_db(base, name): self.assertRaises(InvalidName, make_db, self.client, "te/t") self.assertRaises(InvalidName, make_db, self.client, "te st") - self.assertTrue(isinstance(self.client.test, AsyncDatabase)) + self.assertIsInstance(self.client.test, AsyncDatabase) self.assertEqual(self.client.test, self.client["test"]) self.assertEqual(self.client.test, AsyncDatabase(self.client, "test")) @@ -228,7 +228,7 @@ def test_get_database(self): self.assertEqual(write_concern, db.write_concern) def test_getattr(self): - self.assertTrue(isinstance(self.client["_does_not_exist"], AsyncDatabase)) + self.assertIsInstance(self.client["_does_not_exist"], AsyncDatabase) with self.assertRaises(AttributeError) as context: self.client._does_not_exist @@ -1274,15 +1274,15 @@ async def test_document_class(self): await db.test.insert_one({"x": 1}) self.assertEqual(dict, c.codec_options.document_class) - self.assertTrue(isinstance(await db.test.find_one(), dict)) - self.assertFalse(isinstance(await db.test.find_one(), SON)) + self.assertIsInstance(await db.test.find_one(), dict) + self.assertNotIsInstance(await db.test.find_one(), SON) c = await self.async_rs_or_single_client(document_class=SON) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) - self.assertTrue(isinstance(await db.test.find_one(), SON)) + self.assertIsInstance(await db.test.find_one(), SON) async def test_timeouts(self): client = await self.async_rs_or_single_client( diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 00ed020d88..9367c43a0b 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -111,7 +111,7 @@ def make_col(base, name): def test_getattr(self): coll = self.db.test - self.assertTrue(isinstance(coll["_does_not_exist"], AsyncCollection)) + self.assertIsInstance(coll["_does_not_exist"], AsyncCollection) with self.assertRaises(AttributeError) as context: coll._does_not_exist @@ -176,7 +176,7 @@ def write_concern_collection(self): yield self.db.test async def test_equality(self): - self.assertTrue(isinstance(self.db.test, AsyncCollection)) + self.assertIsInstance(self.db.test, AsyncCollection) self.assertEqual(self.db.test, self.db["test"]) self.assertEqual(self.db.test, AsyncCollection(self.db, "test")) self.assertEqual(self.db.test.mike, self.db["test.mike"]) @@ -718,8 +718,8 @@ async def test_insert_one(self): document: dict[str, Any] = {"_id": 1000} result = await db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, int)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, int) self.assertEqual(document["_id"], result.inserted_id) self.assertTrue(result.acknowledged) self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) @@ -727,8 +727,8 @@ async def test_insert_one(self): document = {"foo": "bar"} result = await db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) self.assertEqual(document["_id"], result.inserted_id) self.assertTrue(result.acknowledged) self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) @@ -736,8 +736,8 @@ async def test_insert_one(self): db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = await db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) self.assertEqual(document["_id"], result.inserted_id) self.assertFalse(result.acknowledged) # The insert failed duplicate key... @@ -749,7 +749,7 @@ async def async_lambda(): document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) result = await db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(result.inserted_id, None) async def test_insert_many(self): @@ -758,38 +758,38 @@ async def test_insert_many(self): docs: list = [{} for _ in range(5)] result = await db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual(5, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, ObjectId)) + self.assertIsInstance(_id, ObjectId) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [{"_id": i} for i in range(5)] result = await db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual(5, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) + self.assertIsInstance(_id, int) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] result = await db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual([], result.inserted_ids) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) docs: list = [{} for _ in range(5)] result = await db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertFalse(result.acknowledged) self.assertEqual(20, await db.test.count_documents({})) @@ -830,20 +830,20 @@ async def test_delete_one(self): await self.db.test.insert_one({"z": 1}) result = await self.db.test.delete_one({"x": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(1, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(2, await self.db.test.count_documents({})) result = await self.db.test.delete_one({"y": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(1, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(1, await self.db.test.count_documents({})) db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = await db.test.delete_one({"z": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) @@ -861,14 +861,14 @@ async def test_delete_many(self): await self.db.test.insert_one({"y": 1}) result = await self.db.test.delete_many({"x": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(2, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(0, await self.db.test.count_documents({"x": 1})) db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = await db.test.delete_many({"y": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) @@ -920,10 +920,10 @@ async def test_insert_bypass_document_validation(self): with self.assertRaises(OperationFailure): await db.test.insert_one({"_id": 1, "x": 100}) result = await db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(1, result.inserted_id) result = await db.test.insert_one({"_id": 2, "a": 0}) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(2, result.inserted_id) await db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) @@ -938,21 +938,21 @@ async def async_lambda(): with self.assertRaises(OperationFailure): await db.test.insert_many(docs) result = await db.test.insert_many(docs, bypass_document_validation=True) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertTrue(97, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) + self.assertIsInstance(_id, int) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"x": doc["x"]})) self.assertTrue(result.acknowledged) docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] result = await db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertTrue(97, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) + self.assertIsInstance(_id, int) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) @@ -1182,7 +1182,7 @@ async def test_id_can_be_anything(self): await db.test.delete_many({}) auto_id = {"hello": "world"} await db.test.insert_one(auto_id) - self.assertTrue(isinstance(auto_id["_id"], ObjectId)) + self.assertIsInstance(auto_id["_id"], ObjectId) numeric = {"_id": 240, "hello": "world"} await db.test.insert_one(numeric) @@ -1346,7 +1346,7 @@ async def test_replace_one(self): id1 = (await db.test.insert_one({"x": 1})).inserted_id result = await db.test.replace_one({"x": 1}, {"y": 1}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1357,7 +1357,7 @@ async def test_replace_one(self): replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) result = await db.test.replace_one({"y": 1}, replacement, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1367,16 +1367,16 @@ async def test_replace_one(self): self.assertEqual((await db.test.find_one(id1))["z"], 1) # type: ignore result = await db.test.replace_one({"x": 2}, {"y": 2}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) self.assertEqual(1, await db.test.count_documents({"y": 2})) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = await db.test.replace_one({"x": 0}, {"y": 0}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1391,7 +1391,7 @@ async def test_update_one(self): id1 = (await db.test.insert_one({"x": 5})).inserted_id result = await db.test.update_one({}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1400,7 +1400,7 @@ async def test_update_one(self): id2 = (await db.test.insert_one({"x": 1})).inserted_id result = await db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1409,15 +1409,15 @@ async def test_update_one(self): self.assertEqual((await db.test.find_one(id2))["x"], 1) # type: ignore result = await db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1448,7 +1448,7 @@ async def test_update_many(self): await db.test.insert_one({"x": 4, "y": 4}) result = await db.test.update_many({"x": 4}, {"$set": {"y": 5}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(2, result.matched_count) self.assertTrue(result.modified_count in (None, 2)) self.assertIsNone(result.upserted_id) @@ -1456,7 +1456,7 @@ async def test_update_many(self): self.assertEqual(3, await db.test.count_documents({"y": 5})) result = await db.test.update_many({"x": 5}, {"$set": {"y": 6}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1464,15 +1464,15 @@ async def test_update_many(self): self.assertEqual(1, await db.test.count_documents({"y": 6})) result = await db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = await db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1556,7 +1556,7 @@ async def test_aggregate(self): pipeline = {"$project": {"_id": False, "foo": True}} result = await db.test.aggregate([pipeline]) - self.assertTrue(isinstance(result, AsyncCommandCursor)) + self.assertIsInstance(result, AsyncCommandCursor) self.assertEqual([{"foo": [1, 2]}], await result.to_list()) # Test write concern. @@ -1574,7 +1574,7 @@ async def test_aggregate_raw_bson(self): pipeline = {"$project": {"_id": False, "foo": True}} coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) result = await coll.aggregate([pipeline]) - self.assertTrue(isinstance(result, AsyncCommandCursor)) + self.assertIsInstance(result, AsyncCommandCursor) first_result = await anext(result) self.assertIsInstance(first_result, RawBSONDocument) self.assertEqual([1, 2], list(first_result["foo"])) @@ -1583,7 +1583,7 @@ async def test_aggregation_cursor_validation(self): db = self.db projection = {"$project": {"_id": "$_id"}} cursor = await db.test.aggregate([projection], cursor={}) - self.assertTrue(isinstance(cursor, AsyncCommandCursor)) + self.assertIsInstance(cursor, AsyncCommandCursor) async def test_aggregation_cursor(self): db = self.db @@ -2208,9 +2208,9 @@ async def test_find_regex(self): await c.drop() await c.insert_one({"r": re.compile(".*")}) - self.assertTrue(isinstance((await c.find_one())["r"], Regex)) # type: ignore + self.assertIsInstance((await c.find_one())["r"], Regex) # type: ignore async for doc in c.find(): - self.assertTrue(isinstance(doc["r"], Regex)) + self.assertIsInstance(doc["r"], Regex) def test_find_command_generation(self): cmd = _gen_find_command( diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 3c8570f336..5e4e590fef 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -959,7 +959,7 @@ async def test_clone(self): cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) # Internal types are now dict rather than SON by default - self.assertTrue(isinstance(cursor2._hint, dict)) + self.assertIsInstance(cursor2._hint, dict) self.assertEqual(cursor._hint, cursor2._hint) @async_client_context.require_sync diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py index 0ab9e95fe0..b3a51ae712 100644 --- a/test/asynchronous/test_custom_types.py +++ b/test/asynchronous/test_custom_types.py @@ -793,7 +793,7 @@ async def test_grid_out_custom_opts(self): self.assertEqual(5, two._id) self.assertEqual(11, two.length) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index b2ddd4122d..1e0f0c40d6 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -91,7 +91,7 @@ def test_get_collection(self): def test_getattr(self): db = self.client.pymongo_test - self.assertTrue(isinstance(db["_does_not_exist"], AsyncCollection)) + self.assertIsInstance(db["_does_not_exist"], AsyncCollection) with self.assertRaises(AttributeError) as context: db._does_not_exist @@ -428,7 +428,7 @@ async def test_command_with_regex(self): result = await db.command("aggregate", "test", pipeline=[], cursor={}) for doc in result["cursor"]["firstBatch"]: - self.assertTrue(isinstance(doc["r"], Regex)) + self.assertIsInstance(doc["r"], Regex) async def test_command_bulkWrite(self): # Ensure bulk write commands can be run directly via db.command(). @@ -472,7 +472,7 @@ def test_password_digest(self): with self.assertRaises(TypeError): auth._password_digest(None) # type: ignore[arg-type, call-arg] - self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) + self.assertIsInstance(auth._password_digest("mike", "password"), str) self.assertEqual( auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" ) @@ -543,7 +543,7 @@ async def test_insert_find_one(self): a_doc = SON({"hello": "world"}) a_key = (await db.test.insert_one(a_doc)).inserted_id - self.assertTrue(isinstance(a_doc["_id"], ObjectId)) + self.assertIsInstance(a_doc["_id"], ObjectId) self.assertEqual(a_doc["_id"], a_key) self.assertEqual(a_doc, await db.test.find_one({"_id": a_doc["_id"]})) self.assertEqual(a_doc, await db.test.find_one(a_key)) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 9e8758a1cd..9093b97ab4 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -2469,16 +2469,16 @@ async def asyncSetUp(self): await encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) doc = await unencrypted_client.db.csfle.find_one() - self.assertTrue(isinstance(doc["csfle"], Binary)) + self.assertIsInstance(doc["csfle"], Binary) await encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) doc = await unencrypted_client.db.csfle2.find_one() - self.assertTrue(isinstance(doc["csfle2"], Binary)) + self.assertIsInstance(doc["csfle2"], Binary) await encrypted_client.db.qe.insert_one({"qe": "qe"}) doc = await unencrypted_client.db.qe.find_one() - self.assertTrue(isinstance(doc["qe"], Binary)) + self.assertIsInstance(doc["qe"], Binary) await encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) doc = await unencrypted_client.db.qe2.find_one() - self.assertTrue(isinstance(doc["qe2"], Binary)) + self.assertIsInstance(doc["qe2"], Binary) await encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) await encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index 3f864367de..f3ca596142 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -150,7 +150,7 @@ async def test_grid_in_default_opts(self): a = AsyncGridIn(self.db.fs) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual(None, a.filename) @@ -195,7 +195,7 @@ async def test_grid_in_default_opts(self): self.assertEqual(42, a.forty_two) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual("my_file", a.filename) @@ -209,7 +209,7 @@ async def test_grid_in_default_opts(self): self.assertEqual(255 * 1024, a.chunk_size) self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) - self.assertTrue(isinstance(a.upload_date, datetime.datetime)) + self.assertIsInstance(a.upload_date, datetime.datetime) self.assertRaises(AttributeError, setattr, a, "upload_date", 5) self.assertEqual(["foo"], a.aliases) @@ -248,7 +248,7 @@ async def test_grid_out_default_opts(self): self.assertEqual(None, b.name) self.assertEqual(None, b.filename) self.assertEqual(255 * 1024, b.chunk_size) - self.assertTrue(isinstance(b.upload_date, datetime.datetime)) + self.assertIsInstance(b.upload_date, datetime.datetime) self.assertEqual(None, b.aliases) self.assertEqual(None, b.metadata) self.assertEqual(None, b.md5) @@ -309,7 +309,7 @@ async def test_grid_out_custom_opts(self): self.assertEqual(11, two.length) self.assertEqual("text/html", two.content_type) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 1, "bar": 2}, two.metadata) self.assertEqual(3, two.bar) diff --git a/test/asynchronous/test_gridfs.py b/test/asynchronous/test_gridfs.py index f886601f36..f60352f3cb 100644 --- a/test/asynchronous/test_gridfs.py +++ b/test/asynchronous/test_gridfs.py @@ -157,7 +157,7 @@ async def test_empty_file(self): assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) self.assertNotIn("md5", raw) diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py index e8d063b712..4640507e94 100644 --- a/test/asynchronous/test_gridfs_bucket.py +++ b/test/asynchronous/test_gridfs_bucket.py @@ -136,7 +136,7 @@ async def test_empty_file(self): assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) self.assertNotIn("md5", raw) diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index a7d56a8cf7..b2ca1aaaa6 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -68,26 +68,26 @@ async def test_started_simple(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(SON([("ping", 1)]), started.command) self.assertEqual("ping", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) async def test_succeeded_simple(self): await self.client.pymongo_test.command("ping") started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) self.assertEqual("ping", succeeded.command_name) self.assertEqual(await self.client.address, succeeded.connection_id) self.assertEqual(1, succeeded.reply.get("ok")) - self.assertTrue(isinstance(succeeded.request_id, int)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(succeeded.request_id, int) + self.assertIsInstance(succeeded.duration_micros, int) async def test_failed_simple(self): try: @@ -97,21 +97,21 @@ async def test_failed_simple(self): started = self.listener.started_events[0] failed = self.listener.failed_events[0] self.assertEqual(0, len(self.listener.succeeded_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) self.assertEqual("oops!", failed.command_name) self.assertEqual(await self.client.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) - self.assertTrue(isinstance(failed.request_id, int)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) async def test_find_one(self): await self.client.pymongo_test.test.find_one() started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), started.command, @@ -119,7 +119,7 @@ async def test_find_one(self): self.assertEqual("find", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) async def test_find_and_get_more(self): await self.client.pymongo_test.test.drop() @@ -132,7 +132,7 @@ async def test_find_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] @@ -142,11 +142,11 @@ async def test_find_and_get_more(self): self.assertEqual("find", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] self.assertEqual(csr["id"], cursor_id) @@ -161,7 +161,7 @@ async def test_find_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), started.command, @@ -169,11 +169,11 @@ async def test_find_and_get_more(self): self.assertEqual("getMore", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("getMore", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] self.assertEqual(csr["id"], cursor_id) @@ -196,16 +196,16 @@ async def test_find_with_explain(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(cmd, started.command) self.assertEqual("explain", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("explain", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(await self.client.address, succeeded.connection_id) self.assertEqual(res, succeeded.reply) @@ -227,16 +227,16 @@ async def _test_find_options(self, query, expected_cmd): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(expected_cmd, started.command) self.assertEqual("find", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(await self.client.address, succeeded.connection_id) finally: # Exhaust the cursor to avoid kill cursors. @@ -308,7 +308,7 @@ async def test_command_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [ @@ -322,11 +322,11 @@ async def test_command_and_get_more(self): self.assertEqual("aggregate", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("aggregate", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_cursor = { "id": cursor_id, @@ -341,7 +341,7 @@ async def test_command_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), started.command, @@ -349,11 +349,11 @@ async def test_command_and_get_more(self): self.assertEqual("getMore", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("getMore", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { "cursor": { @@ -381,18 +381,18 @@ async def test_get_more_failure(self): started = self.listener.started_events[0] self.assertEqual(0, len(self.listener.succeeded_events)) failed = self.listener.failed_events[0] - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test")]), started.command ) self.assertEqual("getMore", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertIsInstance(failed.duration_micros, int) self.assertEqual("getMore", failed.command_name) - self.assertTrue(isinstance(failed.request_id, int)) + self.assertIsInstance(failed.request_id, int) self.assertEqual(cursor.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) @@ -412,13 +412,13 @@ async def test_not_primary_error(self): started = self.listener.started_events[0] failed = self.listener.failed_events[0] self.assertEqual(0, len(self.listener.succeeded_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) self.assertEqual("findAndModify", failed.command_name) self.assertEqual(address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) - self.assertTrue(isinstance(failed.request_id, int)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) self.assertEqual(error, failed.failure) @async_client_context.require_no_mongos @@ -434,7 +434,7 @@ async def test_exhaust(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] @@ -444,11 +444,11 @@ async def test_exhaust(self): self.assertEqual("find", started.command_name) self.assertEqual(cursor.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { "cursor": { @@ -464,7 +464,7 @@ async def test_exhaust(self): tuple(await cursor.to_list()) self.assertEqual(0, len(self.listener.failed_events)) for event in self.listener.started_events: - self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) + self.assertIsInstance(event, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), event.command, @@ -472,12 +472,12 @@ async def test_exhaust(self): self.assertEqual("getMore", event.command_name) self.assertEqual(cursor.address, event.connection_id) self.assertEqual("pymongo_test", event.database_name) - self.assertTrue(isinstance(event.request_id, int)) + self.assertIsInstance(event.request_id, int) for event in self.listener.succeeded_events: - self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(event.duration_micros, int)) + self.assertIsInstance(event, monitoring.CommandSucceededEvent) + self.assertIsInstance(event.duration_micros, int) self.assertEqual("getMore", event.command_name) - self.assertTrue(isinstance(event.request_id, int)) + self.assertIsInstance(event.request_id, int) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) @@ -495,7 +495,7 @@ async def test_kill_cursors(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) # There could be more than one cursor_id here depending on # when the thread last ran. self.assertIn(cursor_id, started.command["cursors"]) @@ -503,11 +503,11 @@ async def test_kill_cursors(self): self.assertIs(type(started.connection_id), tuple) self.assertEqual(cursor.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("killCursors", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertIs(type(succeeded.connection_id), tuple) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on @@ -1157,13 +1157,13 @@ async def test_simple(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(SON([("ping", 1)]), started.command) self.assertEqual("ping", started.command_name) self.assertEqual(await self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) class AsyncTestEventClasses(unittest.IsolatedAsyncioTestCase): diff --git a/test/test_binary.py b/test/test_binary.py index 567c5ae92f..a64aa42280 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -82,8 +82,8 @@ def test_binary(self): a_binary = Binary(b"hello world") self.assertTrue(a_binary.startswith(b"hello")) self.assertTrue(a_binary.endswith(b"world")) - self.assertTrue(isinstance(a_binary, Binary)) - self.assertFalse(isinstance(a_string, Binary)) + self.assertIsInstance(a_binary, Binary) + self.assertNotIsInstance(a_string, Binary) def test_exceptions(self): self.assertRaises(TypeError, Binary, None) diff --git a/test/test_bson.py b/test/test_bson.py index 1616c513c2..23e0a29c4f 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -142,7 +142,7 @@ def helper(doc): helper({}) helper({"test": "hello"}) - self.assertTrue(isinstance(decoder(encoder({"hello": "world"}))["hello"], str)) + self.assertIsInstance(decoder(encoder({"hello": "world"}))["hello"], str) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) @@ -557,7 +557,7 @@ def test_unknown_type(self): try: decode(bs) except Exception as exc: - self.assertTrue(isinstance(exc, InvalidBSON)) + self.assertIsInstance(exc, InvalidBSON) self.assertIn(part, str(exc)) else: self.fail("Failed to raise an exception.") @@ -722,7 +722,7 @@ def test_uuid(self): opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) transformed_id = decode(encode({"id": id}, codec_options=opts), codec_options=opts)["id"] - self.assertTrue(isinstance(transformed_id, uuid.UUID)) + self.assertIsInstance(transformed_id, uuid.UUID) self.assertEqual(id, transformed_id) self.assertNotEqual(uuid.uuid4(), transformed_id) @@ -731,7 +731,7 @@ def test_uuid_legacy(self): legacy = Binary.from_uuid(id, UuidRepresentation.PYTHON_LEGACY) self.assertEqual(3, legacy.subtype) bin = decode(encode({"uuid": legacy}))["uuid"] - self.assertTrue(isinstance(bin, Binary)) + self.assertIsInstance(bin, Binary) transformed = bin.as_uuid(UuidRepresentation.PYTHON_LEGACY) self.assertEqual(id, transformed) @@ -787,7 +787,7 @@ def test_vector(self): try: Binary.from_vector([x], BinaryVectorDtype.PACKED_BIT) except Exception as exc: - self.assertTrue(isinstance(exc, struct.error)) + self.assertIsInstance(exc, struct.error) else: self.fail("Failed to raise an exception.") @@ -886,7 +886,7 @@ def test_utf8(self): y = {"hello": iso8859_bytes} # Stored as BSON binary subtype 0. out = decode(encode(y)) - self.assertTrue(isinstance(out["hello"], bytes)) + self.assertIsInstance(out["hello"], bytes) self.assertEqual(out["hello"], iso8859_bytes) def test_null_character(self): diff --git a/test/test_bulk.py b/test/test_bulk.py index 8a863cc49b..00c6c5e649 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -281,7 +281,7 @@ def test_upsert(self): self.assertEqual(1, result.upserted_count) assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) - self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) + self.assertIsInstance(result.upserted_ids.get(0), ObjectId) self.assertEqual(self.coll.count_documents({"foo": "bar"}), 1) @@ -996,7 +996,7 @@ def test_write_concern_failure_ordered(self): failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) self.coll.delete_many({}) self.coll.create_index("a", unique=True) @@ -1101,12 +1101,12 @@ def test_write_concern_failure_unordered(self): failed = details["writeErrors"][0] self.assertEqual(2, failed["index"]) self.assertEqual(11000, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) self.assertEqual(1, failed["op"]["a"]) failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) upserts = details["upserted"] self.assertEqual(1, len(upserts)) diff --git a/test/test_client.py b/test/test_client.py index 189e58e803..4f2e5751ad 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -211,7 +211,7 @@ def make_db(base, name): self.assertRaises(InvalidName, make_db, self.client, "te/t") self.assertRaises(InvalidName, make_db, self.client, "te st") - self.assertTrue(isinstance(self.client.test, Database)) + self.assertIsInstance(self.client.test, Database) self.assertEqual(self.client.test, self.client["test"]) self.assertEqual(self.client.test, Database(self.client, "test")) @@ -225,7 +225,7 @@ def test_get_database(self): self.assertEqual(write_concern, db.write_concern) def test_getattr(self): - self.assertTrue(isinstance(self.client["_does_not_exist"], Database)) + self.assertIsInstance(self.client["_does_not_exist"], Database) with self.assertRaises(AttributeError) as context: self.client._does_not_exist @@ -1237,15 +1237,15 @@ def test_document_class(self): db.test.insert_one({"x": 1}) self.assertEqual(dict, c.codec_options.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) + self.assertIsInstance(db.test.find_one(), dict) + self.assertNotIsInstance(db.test.find_one(), SON) c = self.rs_or_single_client(document_class=SON) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) + self.assertIsInstance(db.test.find_one(), SON) def test_timeouts(self): client = self.rs_or_single_client( diff --git a/test/test_code.py b/test/test_code.py index c564e3e04e..23f0af5cef 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -46,8 +46,8 @@ def test_code(self): a_code = Code("hello world") self.assertTrue(a_code.startswith("hello")) self.assertTrue(a_code.endswith("world")) - self.assertTrue(isinstance(a_code, Code)) - self.assertFalse(isinstance(a_string, Code)) + self.assertIsInstance(a_code, Code) + self.assertNotIsInstance(a_string, Code) self.assertIsNone(a_code.scope) with_scope = Code("hello world", {"my_var": 5}) self.assertEqual({"my_var": 5}, with_scope.scope) diff --git a/test/test_collection.py b/test/test_collection.py index 75c11383d0..6d1b02f1ac 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -111,7 +111,7 @@ def make_col(base, name): def test_getattr(self): coll = self.db.test - self.assertTrue(isinstance(coll["_does_not_exist"], Collection)) + self.assertIsInstance(coll["_does_not_exist"], Collection) with self.assertRaises(AttributeError) as context: coll._does_not_exist @@ -176,7 +176,7 @@ def write_concern_collection(self): yield self.db.test def test_equality(self): - self.assertTrue(isinstance(self.db.test, Collection)) + self.assertIsInstance(self.db.test, Collection) self.assertEqual(self.db.test, self.db["test"]) self.assertEqual(self.db.test, Collection(self.db, "test")) self.assertEqual(self.db.test.mike, self.db["test.mike"]) @@ -706,8 +706,8 @@ def test_insert_one(self): document: dict[str, Any] = {"_id": 1000} result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, int)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, int) self.assertEqual(document["_id"], result.inserted_id) self.assertTrue(result.acknowledged) self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) @@ -715,8 +715,8 @@ def test_insert_one(self): document = {"foo": "bar"} result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) self.assertEqual(document["_id"], result.inserted_id) self.assertTrue(result.acknowledged) self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) @@ -724,8 +724,8 @@ def test_insert_one(self): db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) self.assertEqual(document["_id"], result.inserted_id) self.assertFalse(result.acknowledged) # The insert failed duplicate key... @@ -737,7 +737,7 @@ def async_lambda(): document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(result.inserted_id, None) def test_insert_many(self): @@ -746,38 +746,38 @@ def test_insert_many(self): docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual(5, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, ObjectId)) + self.assertIsInstance(_id, ObjectId) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [{"_id": i} for i in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual(5, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) + self.assertIsInstance(_id, int) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual([], result.inserted_ids) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertFalse(result.acknowledged) self.assertEqual(20, db.test.count_documents({})) @@ -818,20 +818,20 @@ def test_delete_one(self): self.db.test.insert_one({"z": 1}) result = self.db.test.delete_one({"x": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(1, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(2, self.db.test.count_documents({})) result = self.db.test.delete_one({"y": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(1, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(1, self.db.test.count_documents({})) db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_one({"z": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) @@ -849,14 +849,14 @@ def test_delete_many(self): self.db.test.insert_one({"y": 1}) result = self.db.test.delete_many({"x": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(2, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(0, self.db.test.count_documents({"x": 1})) db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_many({"y": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) @@ -908,10 +908,10 @@ def test_insert_bypass_document_validation(self): with self.assertRaises(OperationFailure): db.test.insert_one({"_id": 1, "x": 100}) result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(1, result.inserted_id) result = db.test.insert_one({"_id": 2, "a": 0}) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(2, result.inserted_id) db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) @@ -926,21 +926,21 @@ def async_lambda(): with self.assertRaises(OperationFailure): db.test.insert_many(docs) result = db.test.insert_many(docs, bypass_document_validation=True) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertTrue(97, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) + self.assertIsInstance(_id, int) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, db.test.count_documents({"x": doc["x"]})) self.assertTrue(result.acknowledged) docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertTrue(97, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) + self.assertIsInstance(_id, int) self.assertTrue(_id in result.inserted_ids) self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) @@ -1168,7 +1168,7 @@ def test_id_can_be_anything(self): db.test.delete_many({}) auto_id = {"hello": "world"} db.test.insert_one(auto_id) - self.assertTrue(isinstance(auto_id["_id"], ObjectId)) + self.assertIsInstance(auto_id["_id"], ObjectId) numeric = {"_id": 240, "hello": "world"} db.test.insert_one(numeric) @@ -1332,7 +1332,7 @@ def test_replace_one(self): id1 = (db.test.insert_one({"x": 1})).inserted_id result = db.test.replace_one({"x": 1}, {"y": 1}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1343,7 +1343,7 @@ def test_replace_one(self): replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) result = db.test.replace_one({"y": 1}, replacement, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1353,16 +1353,16 @@ def test_replace_one(self): self.assertEqual((db.test.find_one(id1))["z"], 1) # type: ignore result = db.test.replace_one({"x": 2}, {"y": 2}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 2})) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.replace_one({"x": 0}, {"y": 0}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1377,7 +1377,7 @@ def test_update_one(self): id1 = (db.test.insert_one({"x": 5})).inserted_id result = db.test.update_one({}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1386,7 +1386,7 @@ def test_update_one(self): id2 = (db.test.insert_one({"x": 1})).inserted_id result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1395,15 +1395,15 @@ def test_update_one(self): self.assertEqual((db.test.find_one(id2))["x"], 1) # type: ignore result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1434,7 +1434,7 @@ def test_update_many(self): db.test.insert_one({"x": 4, "y": 4}) result = db.test.update_many({"x": 4}, {"$set": {"y": 5}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(2, result.matched_count) self.assertTrue(result.modified_count in (None, 2)) self.assertIsNone(result.upserted_id) @@ -1442,7 +1442,7 @@ def test_update_many(self): self.assertEqual(3, db.test.count_documents({"y": 5})) result = db.test.update_many({"x": 5}, {"$set": {"y": 6}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) self.assertTrue(result.modified_count in (None, 1)) self.assertIsNone(result.upserted_id) @@ -1450,15 +1450,15 @@ def test_update_many(self): self.assertEqual(1, db.test.count_documents({"y": 6})) result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1538,7 +1538,7 @@ def test_aggregate(self): pipeline = {"$project": {"_id": False, "foo": True}} result = db.test.aggregate([pipeline]) - self.assertTrue(isinstance(result, CommandCursor)) + self.assertIsInstance(result, CommandCursor) self.assertEqual([{"foo": [1, 2]}], result.to_list()) # Test write concern. @@ -1556,7 +1556,7 @@ def test_aggregate_raw_bson(self): pipeline = {"$project": {"_id": False, "foo": True}} coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) result = coll.aggregate([pipeline]) - self.assertTrue(isinstance(result, CommandCursor)) + self.assertIsInstance(result, CommandCursor) first_result = next(result) self.assertIsInstance(first_result, RawBSONDocument) self.assertEqual([1, 2], list(first_result["foo"])) @@ -1565,7 +1565,7 @@ def test_aggregation_cursor_validation(self): db = self.db projection = {"$project": {"_id": "$_id"}} cursor = db.test.aggregate([projection], cursor={}) - self.assertTrue(isinstance(cursor, CommandCursor)) + self.assertIsInstance(cursor, CommandCursor) def test_aggregation_cursor(self): db = self.db @@ -2186,9 +2186,9 @@ def test_find_regex(self): c.drop() c.insert_one({"r": re.compile(".*")}) - self.assertTrue(isinstance((c.find_one())["r"], Regex)) # type: ignore + self.assertIsInstance((c.find_one())["r"], Regex) # type: ignore for doc in c.find(): - self.assertTrue(isinstance(doc["r"], Regex)) + self.assertIsInstance(doc["r"], Regex) def test_find_command_generation(self): cmd = _gen_find_command( diff --git a/test/test_cursor.py b/test/test_cursor.py index 7b75f4ddc4..d5845e39b0 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -950,7 +950,7 @@ def test_clone(self): cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) # Internal types are now dict rather than SON by default - self.assertTrue(isinstance(cursor2._hint, dict)) + self.assertIsInstance(cursor2._hint, dict) self.assertEqual(cursor._hint, cursor2._hint) @client_context.require_sync diff --git a/test/test_custom_types.py b/test/test_custom_types.py index bcdc14f2e9..9e8dbcfbeb 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -793,7 +793,7 @@ def test_grid_out_custom_opts(self): self.assertEqual(5, two._id) self.assertEqual(11, two.length) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) diff --git a/test/test_database.py b/test/test_database.py index 4c09b421cf..0cb016e269 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -90,7 +90,7 @@ def test_get_collection(self): def test_getattr(self): db = self.client.pymongo_test - self.assertTrue(isinstance(db["_does_not_exist"], Collection)) + self.assertIsInstance(db["_does_not_exist"], Collection) with self.assertRaises(AttributeError) as context: db._does_not_exist @@ -423,7 +423,7 @@ def test_command_with_regex(self): result = db.command("aggregate", "test", pipeline=[], cursor={}) for doc in result["cursor"]["firstBatch"]: - self.assertTrue(isinstance(doc["r"], Regex)) + self.assertIsInstance(doc["r"], Regex) def test_command_bulkWrite(self): # Ensure bulk write commands can be run directly via db.command(). @@ -467,7 +467,7 @@ def test_password_digest(self): with self.assertRaises(TypeError): auth._password_digest(None) # type: ignore[arg-type, call-arg] - self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) + self.assertIsInstance(auth._password_digest("mike", "password"), str) self.assertEqual( auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" ) @@ -538,7 +538,7 @@ def test_insert_find_one(self): a_doc = SON({"hello": "world"}) a_key = (db.test.insert_one(a_doc)).inserted_id - self.assertTrue(isinstance(a_doc["_id"], ObjectId)) + self.assertIsInstance(a_doc["_id"], ObjectId) self.assertEqual(a_doc["_id"], a_key) self.assertEqual(a_doc, db.test.find_one({"_id": a_doc["_id"]})) self.assertEqual(a_doc, db.test.find_one(a_key)) diff --git a/test/test_encryption.py b/test/test_encryption.py index 4b055b68d3..3a86838af3 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -2453,16 +2453,16 @@ def setUp(self): encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) doc = unencrypted_client.db.csfle.find_one() - self.assertTrue(isinstance(doc["csfle"], Binary)) + self.assertIsInstance(doc["csfle"], Binary) encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) doc = unencrypted_client.db.csfle2.find_one() - self.assertTrue(isinstance(doc["csfle2"], Binary)) + self.assertIsInstance(doc["csfle2"], Binary) encrypted_client.db.qe.insert_one({"qe": "qe"}) doc = unencrypted_client.db.qe.find_one() - self.assertTrue(isinstance(doc["qe"], Binary)) + self.assertIsInstance(doc["qe"], Binary) encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) doc = unencrypted_client.db.qe2.find_one() - self.assertTrue(isinstance(doc["qe2"], Binary)) + self.assertIsInstance(doc["qe2"], Binary) encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 0baeb5ae19..6fe209f438 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -150,7 +150,7 @@ def test_grid_in_default_opts(self): a = GridIn(self.db.fs) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual(None, a.filename) @@ -195,7 +195,7 @@ def test_grid_in_default_opts(self): self.assertEqual(42, a.forty_two) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual("my_file", a.filename) @@ -209,7 +209,7 @@ def test_grid_in_default_opts(self): self.assertEqual(255 * 1024, a.chunk_size) self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) - self.assertTrue(isinstance(a.upload_date, datetime.datetime)) + self.assertIsInstance(a.upload_date, datetime.datetime) self.assertRaises(AttributeError, setattr, a, "upload_date", 5) self.assertEqual(["foo"], a.aliases) @@ -248,7 +248,7 @@ def test_grid_out_default_opts(self): self.assertEqual(None, b.name) self.assertEqual(None, b.filename) self.assertEqual(255 * 1024, b.chunk_size) - self.assertTrue(isinstance(b.upload_date, datetime.datetime)) + self.assertIsInstance(b.upload_date, datetime.datetime) self.assertEqual(None, b.aliases) self.assertEqual(None, b.metadata) self.assertEqual(None, b.md5) @@ -309,7 +309,7 @@ def test_grid_out_custom_opts(self): self.assertEqual(11, two.length) self.assertEqual("text/html", two.content_type) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 1, "bar": 2}, two.metadata) self.assertEqual(3, two.bar) diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 75342ee437..8bda041447 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -157,7 +157,7 @@ def test_empty_file(self): assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) self.assertNotIn("md5", raw) diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index e941369f99..33902c9cfc 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -136,7 +136,7 @@ def test_empty_file(self): assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) self.assertNotIn("md5", raw) diff --git a/test/test_json_util.py b/test/test_json_util.py index 8aed4a82bc..cf2c0efb93 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -341,7 +341,7 @@ def test_regex_object_hook(self): pat = "a*b" json_re = '{"$regex": "%s", "$options": "u"}' % pat loaded = json_util.object_hook(json.loads(json_re)) - self.assertTrue(isinstance(loaded, Regex)) + self.assertIsInstance(loaded, Regex) self.assertEqual(pat, loaded.pattern) self.assertEqual(re.U, loaded.flags) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index ae3e50db77..fc8be127e3 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -66,26 +66,26 @@ def test_started_simple(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(SON([("ping", 1)]), started.command) self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) def test_succeeded_simple(self): self.client.pymongo_test.command("ping") started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) self.assertEqual("ping", succeeded.command_name) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(1, succeeded.reply.get("ok")) - self.assertTrue(isinstance(succeeded.request_id, int)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(succeeded.request_id, int) + self.assertIsInstance(succeeded.duration_micros, int) def test_failed_simple(self): try: @@ -95,21 +95,21 @@ def test_failed_simple(self): started = self.listener.started_events[0] failed = self.listener.failed_events[0] self.assertEqual(0, len(self.listener.succeeded_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) self.assertEqual("oops!", failed.command_name) self.assertEqual(self.client.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) - self.assertTrue(isinstance(failed.request_id, int)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) def test_find_one(self): self.client.pymongo_test.test.find_one() started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), started.command, @@ -117,7 +117,7 @@ def test_find_one(self): self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) def test_find_and_get_more(self): self.client.pymongo_test.test.drop() @@ -130,7 +130,7 @@ def test_find_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] @@ -140,11 +140,11 @@ def test_find_and_get_more(self): self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] self.assertEqual(csr["id"], cursor_id) @@ -159,7 +159,7 @@ def test_find_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), started.command, @@ -167,11 +167,11 @@ def test_find_and_get_more(self): self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("getMore", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] self.assertEqual(csr["id"], cursor_id) @@ -194,16 +194,16 @@ def test_find_with_explain(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(cmd, started.command) self.assertEqual("explain", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("explain", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(res, succeeded.reply) @@ -225,16 +225,16 @@ def _test_find_options(self, query, expected_cmd): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(expected_cmd, started.command) self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(self.client.address, succeeded.connection_id) finally: # Exhaust the cursor to avoid kill cursors. @@ -306,7 +306,7 @@ def test_command_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [ @@ -320,11 +320,11 @@ def test_command_and_get_more(self): self.assertEqual("aggregate", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("aggregate", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_cursor = { "id": cursor_id, @@ -339,7 +339,7 @@ def test_command_and_get_more(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), started.command, @@ -347,11 +347,11 @@ def test_command_and_get_more(self): self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("getMore", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { "cursor": { @@ -379,18 +379,18 @@ def test_get_more_failure(self): started = self.listener.started_events[0] self.assertEqual(0, len(self.listener.succeeded_events)) failed = self.listener.failed_events[0] - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test")]), started.command ) self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertIsInstance(failed.duration_micros, int) self.assertEqual("getMore", failed.command_name) - self.assertTrue(isinstance(failed.request_id, int)) + self.assertIsInstance(failed.request_id, int) self.assertEqual(cursor.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) @@ -410,13 +410,13 @@ def test_not_primary_error(self): started = self.listener.started_events[0] failed = self.listener.failed_events[0] self.assertEqual(0, len(self.listener.succeeded_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) self.assertEqual("findAndModify", failed.command_name) self.assertEqual(address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) - self.assertTrue(isinstance(failed.request_id, int)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) self.assertEqual(error, failed.failure) @client_context.require_no_mongos @@ -432,7 +432,7 @@ def test_exhaust(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] @@ -442,11 +442,11 @@ def test_exhaust(self): self.assertEqual("find", started.command_name) self.assertEqual(cursor.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { "cursor": { @@ -462,7 +462,7 @@ def test_exhaust(self): tuple(cursor.to_list()) self.assertEqual(0, len(self.listener.failed_events)) for event in self.listener.started_events: - self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) + self.assertIsInstance(event, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), event.command, @@ -470,12 +470,12 @@ def test_exhaust(self): self.assertEqual("getMore", event.command_name) self.assertEqual(cursor.address, event.connection_id) self.assertEqual("pymongo_test", event.database_name) - self.assertTrue(isinstance(event.request_id, int)) + self.assertIsInstance(event.request_id, int) for event in self.listener.succeeded_events: - self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(event.duration_micros, int)) + self.assertIsInstance(event, monitoring.CommandSucceededEvent) + self.assertIsInstance(event.duration_micros, int) self.assertEqual("getMore", event.command_name) - self.assertTrue(isinstance(event.request_id, int)) + self.assertIsInstance(event.request_id, int) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) @@ -493,7 +493,7 @@ def test_kill_cursors(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) # There could be more than one cursor_id here depending on # when the thread last ran. self.assertIn(cursor_id, started.command["cursors"]) @@ -501,11 +501,11 @@ def test_kill_cursors(self): self.assertIs(type(started.connection_id), tuple) self.assertEqual(cursor.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("killCursors", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertIs(type(succeeded.connection_id), tuple) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on @@ -1155,13 +1155,13 @@ def test_simple(self): started = self.listener.started_events[0] succeeded = self.listener.succeeded_events[0] self.assertEqual(0, len(self.listener.failed_events)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(SON([("ping", 1)]), started.command) self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) class TestEventClasses(unittest.TestCase): diff --git a/test/test_timestamp.py b/test/test_timestamp.py index 7495d2ec9f..ef7d8bde15 100644 --- a/test/test_timestamp.py +++ b/test/test_timestamp.py @@ -33,7 +33,7 @@ def test_timestamp(self): t = Timestamp(123, 456) self.assertEqual(t.time, 123) self.assertEqual(t.inc, 456) - self.assertTrue(isinstance(t, Timestamp)) + self.assertIsInstance(t, Timestamp) def test_datetime(self): d = datetime.datetime(2010, 5, 5, tzinfo=utc) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index d4d17ac211..ec1c6c164c 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -135,8 +135,8 @@ def test_split_options(self): self.assertEqual({"connecttimeoutms": 0.3}, split_options("connectTimeoutMS=300")) self.assertEqual({"connecttimeoutms": 0.0001}, split_options("connectTimeoutMS=0.1")) self.assertTrue(split_options("connectTimeoutMS=300")) - self.assertTrue(isinstance(split_options("w=5")["w"], int)) - self.assertTrue(isinstance(split_options("w=5.5")["w"], str)) + self.assertIsInstance(split_options("w=5")["w"], int) + self.assertIsInstance(split_options("w=5.5")["w"], str) self.assertTrue(split_options("w=foo")) self.assertTrue(split_options("w=majority")) self.assertTrue(split_options("wtimeoutms=500")) From b8460b6001c261ad5704710a6430b194b43ac398 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Fri, 23 May 2025 09:04:32 -0700 Subject: [PATCH 1931/2111] PYTHON-5387 Better test assertions for membership (#2348) --- test/asynchronous/test_bulk.py | 14 +-- test/asynchronous/test_client.py | 20 ++-- test/asynchronous/test_collection.py | 98 +++++++++---------- test/asynchronous/test_cursor.py | 41 ++++---- test/asynchronous/test_database.py | 38 +++---- test/asynchronous/test_examples.py | 94 +++++++++--------- .../test_json_util_integration.py | 2 +- test/asynchronous/test_monitoring.py | 4 +- test/asynchronous/test_session.py | 33 ++++--- test/test_bulk.py | 14 +-- test/test_client.py | 20 ++-- test/test_collection.py | 98 +++++++++---------- test/test_cursor.py | 41 ++++---- test/test_database.py | 38 +++---- test/test_examples.py | 94 +++++++++--------- test/test_json_util_integration.py | 2 +- test/test_monitoring.py | 4 +- test/test_session.py | 33 ++++--- test/test_son.py | 4 +- 19 files changed, 352 insertions(+), 340 deletions(-) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 7c6f032e81..62022de24c 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -94,7 +94,7 @@ def assertEqualUpsert(self, expected, actual): self.assertEqual(expected["index"], actual["index"]) if expected["_id"] == "...": # Unspecified value. - self.assertTrue("_id" in actual) + self.assertIn("_id", actual) else: self.assertEqual(expected["_id"], actual["_id"]) @@ -107,7 +107,7 @@ def assertEqualWriteError(self, expected, actual): self.assertEqual(expected["code"], actual["code"]) if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue("errmsg" in actual) + self.assertIn("errmsg", actual) else: self.assertEqual(expected["errmsg"], actual["errmsg"]) @@ -115,7 +115,7 @@ def assertEqualWriteError(self, expected, actual): actual_op = actual["op"].copy() if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue("_id" in actual_op) + self.assertIn("_id", actual_op) actual_op.pop("_id") expected_op.pop("_id") @@ -160,7 +160,7 @@ async def _test_update_many(self, update): result = await self.coll.bulk_write([UpdateMany({}, update)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(2, result.matched_count) - self.assertTrue(result.modified_count in (2, None)) + self.assertIn(result.modified_count, (2, None)) async def test_update_many(self): await self._test_update_many({"$set": {"foo": "bar"}}) @@ -201,7 +201,7 @@ async def _test_update_one(self, update): result = await self.coll.bulk_write([UpdateOne({}, update)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (1, None)) + self.assertIn(result.modified_count, (1, None)) async def test_update_one(self): await self._test_update_one({"$set": {"foo": "bar"}}) @@ -227,7 +227,7 @@ async def test_replace_one(self): result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (1, None)) + self.assertIn(result.modified_count, (1, None)) async def test_remove(self): # Test removing all documents, ordered. @@ -1037,7 +1037,7 @@ async def test_write_concern_failure_ordered(self): self.assertTrue(len(details["writeConcernErrors"]) > 1) failed = details["writeErrors"][0] - self.assertTrue("duplicate" in failed["errmsg"]) + self.assertIn("duplicate", failed["errmsg"]) @async_client_context.require_version_max(7, 1) # PYTHON-4560 @async_client_context.require_replica_set diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 4d247009e5..ad6614711d 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -674,7 +674,7 @@ async def test_max_idle_time_reaper_default(self): async with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) - self.assertTrue(conn in server._pool.conns) + self.assertIn(conn, server._pool.conns) async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): @@ -752,7 +752,7 @@ async def test_min_pool_size(self): lambda: len(server._pool.conns) == 10, "a closed socket gets replaced from the pool", ) - self.assertFalse(conn in server._pool.conns) + self.assertNotIn(conn, server._pool.conns) async def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. @@ -769,8 +769,8 @@ async def test_max_idle_time_checkout(self): async with server._pool.checkout() as new_con: self.assertNotEqual(conn, new_con) self.assertEqual(1, len(server._pool.conns)) - self.assertFalse(conn in server._pool.conns) - self.assertTrue(new_con in server._pool.conns) + self.assertNotIn(conn, server._pool.conns) + self.assertIn(new_con, server._pool.conns) # Test that connections are reused if maxIdleTimeMS is not set. client = await self.async_rs_or_single_client() @@ -1032,8 +1032,8 @@ async def test_list_database_names(self): cmd_names = [doc["name"] for doc in cmd_docs] db_names = await self.client.list_database_names() - self.assertTrue("pymongo_test" in db_names) - self.assertTrue("pymongo_test_mike" in db_names) + self.assertIn("pymongo_test", db_names) + self.assertIn("pymongo_test_mike", db_names) self.assertEqual(db_names, cmd_names) async def test_drop_database(self): @@ -1257,9 +1257,9 @@ async def test_unix_socket(self): client = await self.async_rs_or_single_client(uri) await client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = await client.list_database_names() - self.assertTrue("pymongo_test" in dbs) + self.assertIn("pymongo_test", dbs) - self.assertTrue(mongodb_socket in repr(client)) + self.assertIn(mongodb_socket, repr(client)) # Confirm it fails with a missing socket. with self.assertRaises(ConnectionFailure): @@ -1431,8 +1431,8 @@ async def test_ipv6(self): await client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) dbs = await client.list_database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test_bernie", dbs) async def test_contextlib(self): client = await self.async_rs_or_single_client() diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 9367c43a0b..b6f96cc999 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -212,7 +212,7 @@ async def lambda_test_2(): async def test_drop_nonexistent_collection(self): await self.db.drop_collection("test") - self.assertFalse("test" in await self.db.list_collection_names()) + self.assertNotIn("test", await self.db.list_collection_names()) # No exception await self.db.drop_collection("test") @@ -248,7 +248,7 @@ async def test_create_indexes(self): await db.test.drop_indexes() self.assertEqual(len(await db.test.index_information()), 1) await db.test.create_indexes([IndexModel("hello")]) - self.assertTrue("hello_1" in await db.test.index_information()) + self.assertIn("hello_1", await db.test.index_information()) await db.test.drop_indexes() self.assertEqual(len(await db.test.index_information()), 1) @@ -257,7 +257,7 @@ async def test_create_indexes(self): ) info = await db.test.index_information() for name in names: - self.assertTrue(name in info) + self.assertIn(name, info) await db.test.drop() await db.test.insert_one({"a": 1}) @@ -311,16 +311,16 @@ async def test_create_index(self): await db.test.drop_indexes() self.assertEqual(len(await db.test.index_information()), 1) await db.test.create_index("hello") - self.assertTrue("hello_1" in await db.test.index_information()) + self.assertIn("hello_1", await db.test.index_information()) await db.test.drop_indexes() self.assertEqual(len(await db.test.index_information()), 1) await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) - self.assertTrue("hello_-1_world_1" in await db.test.index_information()) + self.assertIn("hello_-1_world_1", await db.test.index_information()) await db.test.drop_indexes() await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) - self.assertTrue("hello_-1_world_1" in await db.test.index_information()) + self.assertIn("hello_-1_world_1", await db.test.index_information()) await db.test.drop() await db.test.insert_one({"a": 1}) @@ -349,7 +349,7 @@ async def test_drop_index(self): with self.assertRaises(OperationFailure): await db.test.drop_index(name) self.assertEqual(len(await db.test.index_information()), 2) - self.assertTrue("hello_1" in await db.test.index_information()) + self.assertIn("hello_1", await db.test.index_information()) await db.test.drop_indexes() await db.test.create_index("hello") @@ -359,7 +359,7 @@ async def test_drop_index(self): self.assertEqual(name, "goodbye_1") await db.test.drop_index([("goodbye", ASCENDING)]) self.assertEqual(len(await db.test.index_information()), 2) - self.assertTrue("hello_1" in await db.test.index_information()) + self.assertIn("hello_1", await db.test.index_information()) with self.write_concern_collection() as coll: await coll.drop_index("hello_1") @@ -395,7 +395,7 @@ def map_indexes(indexes): indexes = await (await db.test.list_indexes()).to_list() self.assertEqual(len(indexes), 1) - self.assertTrue("_id_" in map_indexes(indexes)) + self.assertIn("_id_", map_indexes(indexes)) await db.test.create_index("hello") indexes = await (await db.test.list_indexes()).to_list() @@ -424,7 +424,7 @@ async def test_index_info(self): await db.test.drop() await db.test.insert_one({}) # create collection self.assertEqual(len(await db.test.index_information()), 1) - self.assertTrue("_id_" in await db.test.index_information()) + self.assertIn("_id_", await db.test.index_information()) await db.test.create_index("hello") self.assertEqual(len(await db.test.index_information()), 2) @@ -488,7 +488,7 @@ async def test_index_text(self): await db.test.drop_indexes() self.assertEqual("t_text", await db.test.create_index([("t", TEXT)])) index_info = (await db.test.index_information())["t_text"] - self.assertTrue("weights" in index_info) + self.assertIn("weights", index_info) await db.test.insert_many( [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] @@ -549,7 +549,7 @@ async def test_index_background(self): await db.test.create_index([("keya", ASCENDING)]) await db.test.create_index([("keyb", ASCENDING)], background=False) await db.test.create_index([("keyc", ASCENDING)], background=True) - self.assertFalse("background" in (await db.test.index_information())["keya_1"]) + self.assertNotIn("background", (await db.test.index_information())["keya_1"]) self.assertFalse((await db.test.index_information())["keyb_1"]["background"]) self.assertTrue((await db.test.index_information())["keyc_1"]["background"]) @@ -702,7 +702,7 @@ async def test_field_selection(self): doc = await anext(db.test.find({}, {"_id": False})) l = list(doc) - self.assertFalse("_id" in l) + self.assertNotIn("_id", l) async def test_options(self): db = self.db @@ -764,7 +764,7 @@ async def test_insert_many(self): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, ObjectId) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) @@ -776,7 +776,7 @@ async def test_insert_many(self): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, int) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) @@ -943,7 +943,7 @@ async def async_lambda(): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, int) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"x": doc["x"]})) self.assertTrue(result.acknowledged) docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] @@ -953,7 +953,7 @@ async def async_lambda(): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, int) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, await db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) @@ -1131,23 +1131,23 @@ async def test_find_w_fields(self): ) self.assertEqual(1, await db.test.count_documents({})) doc = await anext(db.test.find({})) - self.assertTrue("x" in doc) + self.assertIn("x", doc) doc = await anext(db.test.find({})) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = await anext(db.test.find({})) - self.assertTrue("extra thing" in doc) + self.assertIn("extra thing", doc) doc = await anext(db.test.find({}, ["x", "mike"])) - self.assertTrue("x" in doc) + self.assertIn("x", doc) doc = await anext(db.test.find({}, ["x", "mike"])) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = await anext(db.test.find({}, ["x", "mike"])) - self.assertFalse("extra thing" in doc) + self.assertNotIn("extra thing", doc) doc = await anext(db.test.find({}, ["mike"])) - self.assertFalse("x" in doc) + self.assertNotIn("x", doc) doc = await anext(db.test.find({}, ["mike"])) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = await anext(db.test.find({}, ["mike"])) - self.assertFalse("extra thing" in doc) + self.assertNotIn("extra thing", doc) @no_type_check async def test_fields_specifier_as_dict(self): @@ -1158,8 +1158,8 @@ async def test_fields_specifier_as_dict(self): self.assertEqual([1, 2, 3], (await db.test.find_one())["x"]) self.assertEqual([2, 3], (await db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) - self.assertTrue("x" not in await db.test.find_one(projection={"x": 0})) - self.assertTrue("mike" in await db.test.find_one(projection={"x": 0})) + self.assertNotIn("x", await db.test.find_one(projection={"x": 0})) + self.assertIn("mike", await db.test.find_one(projection={"x": 0})) async def test_find_w_regex(self): db = self.db @@ -1194,7 +1194,7 @@ async def test_id_can_be_anything(self): async for x in db.test.find(): self.assertEqual(x["hello"], "world") - self.assertTrue("_id" in x) + self.assertIn("_id", x) async def test_unique_index(self): db = self.db @@ -1314,7 +1314,7 @@ async def test_error_code(self): try: await self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) except OperationFailure as exc: - self.assertTrue(exc.code in (9, 10147, 16840, 17009)) + self.assertIn(exc.code, (9, 10147, 16840, 17009)) # Just check that we set the error document. Fields # vary by MongoDB version. self.assertTrue(exc.details is not None) @@ -1348,7 +1348,7 @@ async def test_replace_one(self): result = await db.test.replace_one({"x": 1}, {"y": 1}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, await db.test.count_documents({"y": 1})) @@ -1359,7 +1359,7 @@ async def test_replace_one(self): result = await db.test.replace_one({"y": 1}, replacement, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, await db.test.count_documents({"z": 1})) @@ -1369,7 +1369,7 @@ async def test_replace_one(self): result = await db.test.replace_one({"x": 2}, {"y": 2}, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) + self.assertIn(result.modified_count, (None, 0)) self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) self.assertEqual(1, await db.test.count_documents({"y": 2})) @@ -1393,7 +1393,7 @@ async def test_update_one(self): result = await db.test.update_one({}, {"$inc": {"x": 1}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual((await db.test.find_one(id1))["x"], 6) # type: ignore @@ -1402,7 +1402,7 @@ async def test_update_one(self): result = await db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual((await db.test.find_one(id1))["x"], 7) # type: ignore @@ -1411,7 +1411,7 @@ async def test_update_one(self): result = await db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) + self.assertIn(result.modified_count, (None, 0)) self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) @@ -1450,7 +1450,7 @@ async def test_update_many(self): result = await db.test.update_many({"x": 4}, {"$set": {"y": 5}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(2, result.matched_count) - self.assertTrue(result.modified_count in (None, 2)) + self.assertIn(result.modified_count, (None, 2)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(3, await db.test.count_documents({"y": 5})) @@ -1458,7 +1458,7 @@ async def test_update_many(self): result = await db.test.update_many({"x": 5}, {"$set": {"y": 6}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, await db.test.count_documents({"y": 6})) @@ -1466,7 +1466,7 @@ async def test_update_many(self): result = await db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) + self.assertIn(result.modified_count, (None, 0)) self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) @@ -1725,21 +1725,21 @@ async def test_find_one(self): self.assertEqual(await db.test.find_one({}), await db.test.find_one()) self.assertEqual(await db.test.find_one({"hello": "world"}), await db.test.find_one()) - self.assertTrue("hello" in await db.test.find_one(projection=["hello"])) - self.assertTrue("hello" not in await db.test.find_one(projection=["foo"])) + self.assertIn("hello", await db.test.find_one(projection=["hello"])) + self.assertNotIn("hello", await db.test.find_one(projection=["foo"])) - self.assertTrue("hello" in await db.test.find_one(projection=("hello",))) - self.assertTrue("hello" not in await db.test.find_one(projection=("foo",))) + self.assertIn("hello", await db.test.find_one(projection=("hello",))) + self.assertNotIn("hello", await db.test.find_one(projection=("foo",))) - self.assertTrue("hello" in await db.test.find_one(projection={"hello"})) - self.assertTrue("hello" not in await db.test.find_one(projection={"foo"})) + self.assertIn("hello", await db.test.find_one(projection={"hello"})) + self.assertNotIn("hello", await db.test.find_one(projection={"foo"})) - self.assertTrue("hello" in await db.test.find_one(projection=frozenset(["hello"]))) - self.assertTrue("hello" not in await db.test.find_one(projection=frozenset(["foo"]))) + self.assertIn("hello", await db.test.find_one(projection=frozenset(["hello"]))) + self.assertNotIn("hello", await db.test.find_one(projection=frozenset(["foo"]))) self.assertEqual(["_id"], list(await db.test.find_one(projection={"_id": True}))) - self.assertTrue("hello" in list(await db.test.find_one(projection={}))) - self.assertTrue("hello" in list(await db.test.find_one(projection=[]))) + self.assertIn("hello", list(await db.test.find_one(projection={}))) + self.assertIn("hello", list(await db.test.find_one(projection=[]))) self.assertEqual(None, await db.test.find_one({"hello": "foo"})) self.assertEqual(None, await db.test.find_one(ObjectId())) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 5e4e590fef..861345cb08 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -174,8 +174,8 @@ async def test_max_time_ms(self): cursor = coll.find().max_time_ms(999) c2 = cursor.clone() self.assertEqual(999, c2._max_time_ms) - self.assertTrue("$maxTimeMS" in cursor._query_spec()) - self.assertTrue("$maxTimeMS" in c2._query_spec()) + self.assertIn("$maxTimeMS", cursor._query_spec()) + self.assertIn("$maxTimeMS", c2._query_spec()) self.assertTrue(await coll.find_one(max_time_ms=1000)) @@ -240,19 +240,19 @@ async def test_max_await_time_ms(self): # Tailable_await defaults. await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() # find - self.assertFalse("maxTimeMS" in listener.started_events[0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) listener.reset() # Tailable_await with max_await_time_ms set. await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertIn("maxTimeMS", listener.started_events[1].command) self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) listener.reset() @@ -263,11 +263,11 @@ async def test_max_await_time_ms(self): await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) listener.reset() # Tailable_await with both max_time_ms and max_await_time_ms @@ -279,11 +279,11 @@ async def test_max_await_time_ms(self): ) # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertIn("maxTimeMS", listener.started_events[1].command) self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) listener.reset() @@ -291,31 +291,31 @@ async def test_max_await_time_ms(self): await coll.find(batch_size=1).max_await_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) listener.reset() # Non tailable_await with max_time_ms await coll.find(batch_size=1).max_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) # Non tailable_await with both max_time_ms and max_await_time_ms await coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) @async_client_context.require_test_commands @async_client_context.require_no_mongos @@ -933,16 +933,19 @@ async def test_clone(self): # Shallow copies can so can mutate cursor2 = copy.copy(cursor) cursor2._projection["cursor2"] = False - self.assertTrue(cursor._projection and "cursor2" in cursor._projection) + self.assertIsNotNone(cursor._projection) + self.assertIn("cursor2", cursor._projection.keys()) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) cursor3._projection["cursor3"] = False - self.assertFalse(cursor._projection and "cursor3" in cursor._projection) + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor3", cursor._projection.keys()) cursor4 = cursor.clone() cursor4._projection["cursor4"] = False - self.assertFalse(cursor._projection and "cursor4" in cursor._projection) + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor4", cursor._projection.keys()) # Test memo when deepcopying queries query = {"hello": "world"} diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 1e0f0c40d6..3195c74988 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -163,13 +163,13 @@ async def test_create_collection(self): await db.create_collection("coll..ection") # type: ignore[arg-type] test = await db.create_collection("test") - self.assertTrue("test" in await db.list_collection_names()) + self.assertIn("test", await db.list_collection_names()) await test.insert_one({"hello": "world"}) self.assertEqual((await db.test.find_one())["hello"], "world") await db.drop_collection("test.foo") await db.create_collection("test.foo") - self.assertTrue("test.foo" in await db.list_collection_names()) + self.assertIn("test.foo", await db.list_collection_names()) with self.assertRaises(CollectionInvalid): await db.create_collection("test.foo") @@ -179,10 +179,10 @@ async def test_list_collection_names(self): await db.test.mike.insert_one({"dummy": "object"}) colls = await db.list_collection_names() - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + self.assertIn("test", colls) + self.assertIn("test.mike", colls) for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) await db.systemcoll.test.insert_one({}) no_system_collections = await db.list_collection_names( @@ -252,12 +252,12 @@ async def test_list_collections(self): colls = [result["name"] async for result in results] # All the collections present. - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + self.assertIn("test", colls) + self.assertIn("test.mike", colls) # No collection containing a '$'. for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) # Duplicate check. coll_cnt: dict = {} @@ -294,12 +294,12 @@ async def test_list_collections(self): colls = [result["name"] async for result in results] # Checking only capped collections are present - self.assertTrue("test" in colls) - self.assertFalse("test.mike" in colls) + self.assertIn("test", colls) + self.assertNotIn("test.mike", colls) # No collection containing a '$'. for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) # Duplicate check. coll_cnt = {} @@ -339,24 +339,24 @@ async def test_drop_collection(self): await db.drop_collection(None) # type: ignore[arg-type] await db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in await db.list_collection_names()) + self.assertIn("test", await db.list_collection_names()) await db.drop_collection("test") - self.assertFalse("test" in await db.list_collection_names()) + self.assertNotIn("test", await db.list_collection_names()) await db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in await db.list_collection_names()) + self.assertIn("test", await db.list_collection_names()) await db.drop_collection("test") - self.assertFalse("test" in await db.list_collection_names()) + self.assertNotIn("test", await db.list_collection_names()) await db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in await db.list_collection_names()) + self.assertIn("test", await db.list_collection_names()) await db.drop_collection(db.test) - self.assertFalse("test" in await db.list_collection_names()) + self.assertNotIn("test", await db.list_collection_names()) await db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in await db.list_collection_names()) + self.assertIn("test", await db.list_collection_names()) await db.test.drop() - self.assertFalse("test" in await db.list_collection_names()) + self.assertNotIn("test", await db.list_collection_names()) await db.test.drop() await db.drop_collection(db.test.doesnotexist) diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py index a334a3ed1d..a4ebf72df9 100644 --- a/test/asynchronous/test_examples.py +++ b/test/asynchronous/test_examples.py @@ -479,77 +479,77 @@ async def test_projection(self): # End Example 44 async for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 45 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) # End Example 45 async for doc in cursor: - self.assertFalse("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertFalse("instock" in doc) + self.assertNotIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 46 cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) # End Example 46 async for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertFalse("status" in doc) - self.assertTrue("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertNotIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 47 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) # End Example 47 async for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertTrue("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) size = doc["size"] - self.assertTrue("uom" in size) - self.assertFalse("h" in size) - self.assertFalse("w" in size) + self.assertIn("uom", size) + self.assertNotIn("h", size) + self.assertNotIn("w", size) # Start Example 48 cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) # End Example 48 async for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertTrue("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertIn("instock", doc) size = doc["size"] - self.assertFalse("uom" in size) - self.assertTrue("h" in size) - self.assertTrue("w" in size) + self.assertNotIn("uom", size) + self.assertIn("h", size) + self.assertIn("w", size) # Start Example 49 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) # End Example 49 async for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) for subdoc in doc["instock"]: - self.assertFalse("warehouse" in subdoc) - self.assertTrue("qty" in subdoc) + self.assertNotIn("warehouse", subdoc) + self.assertIn("qty", subdoc) # Start Example 50 cursor = db.inventory.find( @@ -558,11 +558,11 @@ async def test_projection(self): # End Example 50 async for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) self.assertEqual(len(doc["instock"]), 1) async def test_update_and_replace(self): @@ -645,7 +645,7 @@ async def test_update_and_replace(self): async for doc in db.inventory.find({"item": "paper"}): self.assertEqual(doc["size"]["uom"], "cm") self.assertEqual(doc["status"], "P") - self.assertTrue("lastModified" in doc) + self.assertIn("lastModified", doc) # Start Example 53 await db.inventory.update_many( @@ -657,7 +657,7 @@ async def test_update_and_replace(self): async for doc in db.inventory.find({"qty": {"$lt": 50}}): self.assertEqual(doc["size"]["uom"], "in") self.assertEqual(doc["status"], "P") - self.assertTrue("lastModified" in doc) + self.assertIn("lastModified", doc) # Start Example 54 await db.inventory.replace_one( @@ -671,8 +671,8 @@ async def test_update_and_replace(self): async for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): self.assertEqual(len(doc.keys()), 2) - self.assertTrue("item" in doc) - self.assertTrue("instock" in doc) + self.assertIn("item", doc) + self.assertIn("instock", doc) self.assertEqual(len(doc["instock"]), 2) async def test_delete(self): diff --git a/test/asynchronous/test_json_util_integration.py b/test/asynchronous/test_json_util_integration.py index 4c02792d89..32312cb9d3 100644 --- a/test/asynchronous/test_json_util_integration.py +++ b/test/asynchronous/test_json_util_integration.py @@ -25,4 +25,4 @@ async def test_cursor(self): await db.test.insert_many(docs) reloaded_docs = json_util.loads(json_util.dumps(await (db.test.find()).to_list())) for doc in docs: - self.assertTrue(doc in reloaded_docs) + self.assertIn(doc, reloaded_docs) diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index b2ca1aaaa6..a44223a725 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -1088,8 +1088,8 @@ async def test_first_batch_helper(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue("cursor" in succeeded.reply) - self.assertTrue("ok" in succeeded.reply) + self.assertIn("cursor", succeeded.reply) + self.assertIn("ok", succeeded.reply) self.listener.reset() diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 3655f49aab..1f6fb0d319 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -134,8 +134,9 @@ async def _test_ops(self, client, *ops): await f(*args, **kw) self.assertGreaterEqual(len(listener.started_events), 1) for event in listener.started_events: - self.assertTrue( - "lsid" in event.command, + self.assertIn( + "lsid", + event.command, f"{f.__name__} sent no lsid with {event.command_name}", ) @@ -170,8 +171,9 @@ async def _test_ops(self, client, *ops): self.assertGreaterEqual(len(listener.started_events), 1) lsids = [] for event in listener.started_events: - self.assertTrue( - "lsid" in event.command, + self.assertIn( + "lsid", + event.command, f"{f.__name__} sent no lsid with {event.command_name}", ) @@ -422,8 +424,9 @@ async def test_cursor(self): await f(session=s) self.assertGreaterEqual(len(listener.started_events), 1) for event in listener.started_events: - self.assertTrue( - "lsid" in event.command, + self.assertIn( + "lsid", + event.command, f"{name} sent no lsid with {event.command_name}", ) @@ -441,15 +444,13 @@ async def test_cursor(self): listener.reset() await f(session=None) event0 = listener.first_command_started() - self.assertTrue( - "lsid" in event0.command, f"{name} sent no lsid with {event0.command_name}" - ) + self.assertIn("lsid", event0.command, f"{name} sent no lsid with {event0.command_name}") lsid = event0.command["lsid"] for event in listener.started_events[1:]: - self.assertTrue( - "lsid" in event.command, f"{name} sent no lsid with {event.command_name}" + self.assertIn( + "lsid", event.command, f"{name} sent no lsid with {event.command_name}" ) self.assertEqual( @@ -1201,15 +1202,17 @@ async def aggregate(): self.assertGreaterEqual(len(listener.started_events), 1) for i, event in enumerate(listener.started_events): - self.assertTrue( - "$clusterTime" in event.command, + self.assertIn( + "$clusterTime", + event.command, f"{f.__name__} sent no $clusterTime with {event.command_name}", ) if i > 0: succeeded = listener.succeeded_events[i - 1] - self.assertTrue( - "$clusterTime" in succeeded.reply, + self.assertIn( + "$clusterTime", + succeeded.reply, f"{f.__name__} received no $clusterTime with {succeeded.command_name}", ) diff --git a/test/test_bulk.py b/test/test_bulk.py index 00c6c5e649..77d0d6c06e 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -94,7 +94,7 @@ def assertEqualUpsert(self, expected, actual): self.assertEqual(expected["index"], actual["index"]) if expected["_id"] == "...": # Unspecified value. - self.assertTrue("_id" in actual) + self.assertIn("_id", actual) else: self.assertEqual(expected["_id"], actual["_id"]) @@ -107,7 +107,7 @@ def assertEqualWriteError(self, expected, actual): self.assertEqual(expected["code"], actual["code"]) if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue("errmsg" in actual) + self.assertIn("errmsg", actual) else: self.assertEqual(expected["errmsg"], actual["errmsg"]) @@ -115,7 +115,7 @@ def assertEqualWriteError(self, expected, actual): actual_op = actual["op"].copy() if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue("_id" in actual_op) + self.assertIn("_id", actual_op) actual_op.pop("_id") expected_op.pop("_id") @@ -160,7 +160,7 @@ def _test_update_many(self, update): result = self.coll.bulk_write([UpdateMany({}, update)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(2, result.matched_count) - self.assertTrue(result.modified_count in (2, None)) + self.assertIn(result.modified_count, (2, None)) def test_update_many(self): self._test_update_many({"$set": {"foo": "bar"}}) @@ -201,7 +201,7 @@ def _test_update_one(self, update): result = self.coll.bulk_write([UpdateOne({}, update)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (1, None)) + self.assertIn(result.modified_count, (1, None)) def test_update_one(self): self._test_update_one({"$set": {"foo": "bar"}}) @@ -227,7 +227,7 @@ def test_replace_one(self): result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (1, None)) + self.assertIn(result.modified_count, (1, None)) def test_remove(self): # Test removing all documents, ordered. @@ -1035,7 +1035,7 @@ def test_write_concern_failure_ordered(self): self.assertTrue(len(details["writeConcernErrors"]) > 1) failed = details["writeErrors"][0] - self.assertTrue("duplicate" in failed["errmsg"]) + self.assertIn("duplicate", failed["errmsg"]) @client_context.require_version_max(7, 1) # PYTHON-4560 @client_context.require_replica_set diff --git a/test/test_client.py b/test/test_client.py index 4f2e5751ad..c50082797d 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -665,7 +665,7 @@ def test_max_idle_time_reaper_default(self): with server._pool.checkout() as conn: pass self.assertEqual(1, len(server._pool.conns)) - self.assertTrue(conn in server._pool.conns) + self.assertIn(conn, server._pool.conns) def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): @@ -731,7 +731,7 @@ def test_min_pool_size(self): lambda: len(server._pool.conns) == 10, "a closed socket gets replaced from the pool", ) - self.assertFalse(conn in server._pool.conns) + self.assertNotIn(conn, server._pool.conns) def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. @@ -746,8 +746,8 @@ def test_max_idle_time_checkout(self): with server._pool.checkout() as new_con: self.assertNotEqual(conn, new_con) self.assertEqual(1, len(server._pool.conns)) - self.assertFalse(conn in server._pool.conns) - self.assertTrue(new_con in server._pool.conns) + self.assertNotIn(conn, server._pool.conns) + self.assertIn(new_con, server._pool.conns) # Test that connections are reused if maxIdleTimeMS is not set. client = self.rs_or_single_client() @@ -1005,8 +1005,8 @@ def test_list_database_names(self): cmd_names = [doc["name"] for doc in cmd_docs] db_names = self.client.list_database_names() - self.assertTrue("pymongo_test" in db_names) - self.assertTrue("pymongo_test_mike" in db_names) + self.assertIn("pymongo_test", db_names) + self.assertIn("pymongo_test_mike", db_names) self.assertEqual(db_names, cmd_names) def test_drop_database(self): @@ -1220,9 +1220,9 @@ def test_unix_socket(self): client = self.rs_or_single_client(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() - self.assertTrue("pymongo_test" in dbs) + self.assertIn("pymongo_test", dbs) - self.assertTrue(mongodb_socket in repr(client)) + self.assertIn(mongodb_socket, repr(client)) # Confirm it fails with a missing socket. with self.assertRaises(ConnectionFailure): @@ -1390,8 +1390,8 @@ def test_ipv6(self): client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test_bernie", dbs) def test_contextlib(self): client = self.rs_or_single_client() diff --git a/test/test_collection.py b/test/test_collection.py index 6d1b02f1ac..5643a62022 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -212,7 +212,7 @@ def lambda_test_2(): def test_drop_nonexistent_collection(self): self.db.drop_collection("test") - self.assertFalse("test" in self.db.list_collection_names()) + self.assertNotIn("test", self.db.list_collection_names()) # No exception self.db.drop_collection("test") @@ -248,7 +248,7 @@ def test_create_indexes(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) db.test.create_indexes([IndexModel("hello")]) - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) @@ -257,7 +257,7 @@ def test_create_indexes(self): ) info = db.test.index_information() for name in names: - self.assertTrue(name in info) + self.assertIn(name, info) db.test.drop() db.test.insert_one({"a": 1}) @@ -309,16 +309,16 @@ def test_create_index(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) db.test.create_index("hello") - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) - self.assertTrue("hello_-1_world_1" in db.test.index_information()) + self.assertIn("hello_-1_world_1", db.test.index_information()) db.test.drop_indexes() db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) - self.assertTrue("hello_-1_world_1" in db.test.index_information()) + self.assertIn("hello_-1_world_1", db.test.index_information()) db.test.drop() db.test.insert_one({"a": 1}) @@ -347,7 +347,7 @@ def test_drop_index(self): with self.assertRaises(OperationFailure): db.test.drop_index(name) self.assertEqual(len(db.test.index_information()), 2) - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() db.test.create_index("hello") @@ -357,7 +357,7 @@ def test_drop_index(self): self.assertEqual(name, "goodbye_1") db.test.drop_index([("goodbye", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 2) - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) with self.write_concern_collection() as coll: coll.drop_index("hello_1") @@ -389,7 +389,7 @@ def map_indexes(indexes): indexes = (db.test.list_indexes()).to_list() self.assertEqual(len(indexes), 1) - self.assertTrue("_id_" in map_indexes(indexes)) + self.assertIn("_id_", map_indexes(indexes)) db.test.create_index("hello") indexes = (db.test.list_indexes()).to_list() @@ -418,7 +418,7 @@ def test_index_info(self): db.test.drop() db.test.insert_one({}) # create collection self.assertEqual(len(db.test.index_information()), 1) - self.assertTrue("_id_" in db.test.index_information()) + self.assertIn("_id_", db.test.index_information()) db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) @@ -478,7 +478,7 @@ def test_index_text(self): db.test.drop_indexes() self.assertEqual("t_text", db.test.create_index([("t", TEXT)])) index_info = (db.test.index_information())["t_text"] - self.assertTrue("weights" in index_info) + self.assertIn("weights", index_info) db.test.insert_many( [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] @@ -539,7 +539,7 @@ def test_index_background(self): db.test.create_index([("keya", ASCENDING)]) db.test.create_index([("keyb", ASCENDING)], background=False) db.test.create_index([("keyc", ASCENDING)], background=True) - self.assertFalse("background" in (db.test.index_information())["keya_1"]) + self.assertNotIn("background", (db.test.index_information())["keya_1"]) self.assertFalse((db.test.index_information())["keyb_1"]["background"]) self.assertTrue((db.test.index_information())["keyc_1"]["background"]) @@ -690,7 +690,7 @@ def test_field_selection(self): doc = next(db.test.find({}, {"_id": False})) l = list(doc) - self.assertFalse("_id" in l) + self.assertNotIn("_id", l) def test_options(self): db = self.db @@ -752,7 +752,7 @@ def test_insert_many(self): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, ObjectId) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) @@ -764,7 +764,7 @@ def test_insert_many(self): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, int) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) @@ -931,7 +931,7 @@ def async_lambda(): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, int) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"x": doc["x"]})) self.assertTrue(result.acknowledged) docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] @@ -941,7 +941,7 @@ def async_lambda(): for doc in docs: _id = doc["_id"] self.assertIsInstance(_id, int) - self.assertTrue(_id in result.inserted_ids) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) @@ -1117,23 +1117,23 @@ def test_find_w_fields(self): db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) self.assertEqual(1, db.test.count_documents({})) doc = next(db.test.find({})) - self.assertTrue("x" in doc) + self.assertIn("x", doc) doc = next(db.test.find({})) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = next(db.test.find({})) - self.assertTrue("extra thing" in doc) + self.assertIn("extra thing", doc) doc = next(db.test.find({}, ["x", "mike"])) - self.assertTrue("x" in doc) + self.assertIn("x", doc) doc = next(db.test.find({}, ["x", "mike"])) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = next(db.test.find({}, ["x", "mike"])) - self.assertFalse("extra thing" in doc) + self.assertNotIn("extra thing", doc) doc = next(db.test.find({}, ["mike"])) - self.assertFalse("x" in doc) + self.assertNotIn("x", doc) doc = next(db.test.find({}, ["mike"])) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = next(db.test.find({}, ["mike"])) - self.assertFalse("extra thing" in doc) + self.assertNotIn("extra thing", doc) @no_type_check def test_fields_specifier_as_dict(self): @@ -1144,8 +1144,8 @@ def test_fields_specifier_as_dict(self): self.assertEqual([1, 2, 3], (db.test.find_one())["x"]) self.assertEqual([2, 3], (db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) - self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) - self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) + self.assertNotIn("x", db.test.find_one(projection={"x": 0})) + self.assertIn("mike", db.test.find_one(projection={"x": 0})) def test_find_w_regex(self): db = self.db @@ -1180,7 +1180,7 @@ def test_id_can_be_anything(self): for x in db.test.find(): self.assertEqual(x["hello"], "world") - self.assertTrue("_id" in x) + self.assertIn("_id", x) def test_unique_index(self): db = self.db @@ -1300,7 +1300,7 @@ def test_error_code(self): try: self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) except OperationFailure as exc: - self.assertTrue(exc.code in (9, 10147, 16840, 17009)) + self.assertIn(exc.code, (9, 10147, 16840, 17009)) # Just check that we set the error document. Fields # vary by MongoDB version. self.assertTrue(exc.details is not None) @@ -1334,7 +1334,7 @@ def test_replace_one(self): result = db.test.replace_one({"x": 1}, {"y": 1}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 1})) @@ -1345,7 +1345,7 @@ def test_replace_one(self): result = db.test.replace_one({"y": 1}, replacement, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"z": 1})) @@ -1355,7 +1355,7 @@ def test_replace_one(self): result = db.test.replace_one({"x": 2}, {"y": 2}, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) + self.assertIn(result.modified_count, (None, 0)) self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 2})) @@ -1379,7 +1379,7 @@ def test_update_one(self): result = db.test.update_one({}, {"$inc": {"x": 1}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual((db.test.find_one(id1))["x"], 6) # type: ignore @@ -1388,7 +1388,7 @@ def test_update_one(self): result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual((db.test.find_one(id1))["x"], 7) # type: ignore @@ -1397,7 +1397,7 @@ def test_update_one(self): result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) + self.assertIn(result.modified_count, (None, 0)) self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) @@ -1436,7 +1436,7 @@ def test_update_many(self): result = db.test.update_many({"x": 4}, {"$set": {"y": 5}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(2, result.matched_count) - self.assertTrue(result.modified_count in (None, 2)) + self.assertIn(result.modified_count, (None, 2)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(3, db.test.count_documents({"y": 5})) @@ -1444,7 +1444,7 @@ def test_update_many(self): result = db.test.update_many({"x": 5}, {"$set": {"y": 6}}) self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 6})) @@ -1452,7 +1452,7 @@ def test_update_many(self): result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) + self.assertIn(result.modified_count, (None, 0)) self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) @@ -1707,21 +1707,21 @@ def test_find_one(self): self.assertEqual(db.test.find_one({}), db.test.find_one()) self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) - self.assertTrue("hello" in db.test.find_one(projection=["hello"])) - self.assertTrue("hello" not in db.test.find_one(projection=["foo"])) + self.assertIn("hello", db.test.find_one(projection=["hello"])) + self.assertNotIn("hello", db.test.find_one(projection=["foo"])) - self.assertTrue("hello" in db.test.find_one(projection=("hello",))) - self.assertTrue("hello" not in db.test.find_one(projection=("foo",))) + self.assertIn("hello", db.test.find_one(projection=("hello",))) + self.assertNotIn("hello", db.test.find_one(projection=("foo",))) - self.assertTrue("hello" in db.test.find_one(projection={"hello"})) - self.assertTrue("hello" not in db.test.find_one(projection={"foo"})) + self.assertIn("hello", db.test.find_one(projection={"hello"})) + self.assertNotIn("hello", db.test.find_one(projection={"foo"})) - self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) - self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) + self.assertIn("hello", db.test.find_one(projection=frozenset(["hello"]))) + self.assertNotIn("hello", db.test.find_one(projection=frozenset(["foo"]))) self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) - self.assertTrue("hello" in list(db.test.find_one(projection={}))) - self.assertTrue("hello" in list(db.test.find_one(projection=[]))) + self.assertIn("hello", list(db.test.find_one(projection={}))) + self.assertIn("hello", list(db.test.find_one(projection=[]))) self.assertEqual(None, db.test.find_one({"hello": "foo"})) self.assertEqual(None, db.test.find_one(ObjectId())) diff --git a/test/test_cursor.py b/test/test_cursor.py index d5845e39b0..c33f509565 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -174,8 +174,8 @@ def test_max_time_ms(self): cursor = coll.find().max_time_ms(999) c2 = cursor.clone() self.assertEqual(999, c2._max_time_ms) - self.assertTrue("$maxTimeMS" in cursor._query_spec()) - self.assertTrue("$maxTimeMS" in c2._query_spec()) + self.assertIn("$maxTimeMS", cursor._query_spec()) + self.assertIn("$maxTimeMS", c2._query_spec()) self.assertTrue(coll.find_one(max_time_ms=1000)) @@ -236,19 +236,19 @@ def test_max_await_time_ms(self): # Tailable_defaults. coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() # find - self.assertFalse("maxTimeMS" in listener.started_events[0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) listener.reset() # Tailable_with max_await_time_ms set. coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertIn("maxTimeMS", listener.started_events[1].command) self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) listener.reset() @@ -259,11 +259,11 @@ def test_max_await_time_ms(self): coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) listener.reset() # Tailable_with both max_time_ms and max_await_time_ms @@ -275,11 +275,11 @@ def test_max_await_time_ms(self): ) # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[1].command) + self.assertIn("maxTimeMS", listener.started_events[1].command) self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) listener.reset() @@ -287,31 +287,31 @@ def test_max_await_time_ms(self): coll.find(batch_size=1).max_await_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) listener.reset() # Non tailable_await with max_time_ms coll.find(batch_size=1).max_time_ms(99).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) # Non tailable_await with both max_time_ms and max_await_time_ms coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88).to_list() # find self.assertEqual("find", listener.started_events[0].command_name) - self.assertTrue("maxTimeMS" in listener.started_events[0].command) + self.assertIn("maxTimeMS", listener.started_events[0].command) self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore self.assertEqual("getMore", listener.started_events[1].command_name) - self.assertFalse("maxTimeMS" in listener.started_events[1].command) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -924,16 +924,19 @@ def test_clone(self): # Shallow copies can so can mutate cursor2 = copy.copy(cursor) cursor2._projection["cursor2"] = False - self.assertTrue(cursor._projection and "cursor2" in cursor._projection) + self.assertIsNotNone(cursor._projection) + self.assertIn("cursor2", cursor._projection.keys()) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) cursor3._projection["cursor3"] = False - self.assertFalse(cursor._projection and "cursor3" in cursor._projection) + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor3", cursor._projection.keys()) cursor4 = cursor.clone() cursor4._projection["cursor4"] = False - self.assertFalse(cursor._projection and "cursor4" in cursor._projection) + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor4", cursor._projection.keys()) # Test memo when deepcopying queries query = {"hello": "world"} diff --git a/test/test_database.py b/test/test_database.py index 0cb016e269..0fe6c01a9d 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -162,13 +162,13 @@ def test_create_collection(self): db.create_collection("coll..ection") # type: ignore[arg-type] test = db.create_collection("test") - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) test.insert_one({"hello": "world"}) self.assertEqual((db.test.find_one())["hello"], "world") db.drop_collection("test.foo") db.create_collection("test.foo") - self.assertTrue("test.foo" in db.list_collection_names()) + self.assertIn("test.foo", db.list_collection_names()) with self.assertRaises(CollectionInvalid): db.create_collection("test.foo") @@ -178,10 +178,10 @@ def test_list_collection_names(self): db.test.mike.insert_one({"dummy": "object"}) colls = db.list_collection_names() - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + self.assertIn("test", colls) + self.assertIn("test.mike", colls) for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) db.systemcoll.test.insert_one({}) no_system_collections = db.list_collection_names( @@ -251,12 +251,12 @@ def test_list_collections(self): colls = [result["name"] for result in results] # All the collections present. - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + self.assertIn("test", colls) + self.assertIn("test.mike", colls) # No collection containing a '$'. for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) # Duplicate check. coll_cnt: dict = {} @@ -291,12 +291,12 @@ def test_list_collections(self): colls = [result["name"] for result in results] # Checking only capped collections are present - self.assertTrue("test" in colls) - self.assertFalse("test.mike" in colls) + self.assertIn("test", colls) + self.assertNotIn("test.mike", colls) # No collection containing a '$'. for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) # Duplicate check. coll_cnt = {} @@ -336,24 +336,24 @@ def test_drop_collection(self): db.drop_collection(None) # type: ignore[arg-type] db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.drop_collection("test") - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.drop_collection("test") - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.drop_collection(db.test) - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.test.drop() - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.drop() db.drop_collection(db.test.doesnotexist) diff --git a/test/test_examples.py b/test/test_examples.py index 0585d1e057..bda5403200 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -479,77 +479,77 @@ def test_projection(self): # End Example 44 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 45 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) # End Example 45 for doc in cursor: - self.assertFalse("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertFalse("instock" in doc) + self.assertNotIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 46 cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) # End Example 46 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertFalse("status" in doc) - self.assertTrue("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertNotIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 47 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) # End Example 47 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertTrue("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) size = doc["size"] - self.assertTrue("uom" in size) - self.assertFalse("h" in size) - self.assertFalse("w" in size) + self.assertIn("uom", size) + self.assertNotIn("h", size) + self.assertNotIn("w", size) # Start Example 48 cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) # End Example 48 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertTrue("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertIn("instock", doc) size = doc["size"] - self.assertFalse("uom" in size) - self.assertTrue("h" in size) - self.assertTrue("w" in size) + self.assertNotIn("uom", size) + self.assertIn("h", size) + self.assertIn("w", size) # Start Example 49 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) # End Example 49 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) for subdoc in doc["instock"]: - self.assertFalse("warehouse" in subdoc) - self.assertTrue("qty" in subdoc) + self.assertNotIn("warehouse", subdoc) + self.assertIn("qty", subdoc) # Start Example 50 cursor = db.inventory.find( @@ -558,11 +558,11 @@ def test_projection(self): # End Example 50 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) self.assertEqual(len(doc["instock"]), 1) def test_update_and_replace(self): @@ -645,7 +645,7 @@ def test_update_and_replace(self): for doc in db.inventory.find({"item": "paper"}): self.assertEqual(doc["size"]["uom"], "cm") self.assertEqual(doc["status"], "P") - self.assertTrue("lastModified" in doc) + self.assertIn("lastModified", doc) # Start Example 53 db.inventory.update_many( @@ -657,7 +657,7 @@ def test_update_and_replace(self): for doc in db.inventory.find({"qty": {"$lt": 50}}): self.assertEqual(doc["size"]["uom"], "in") self.assertEqual(doc["status"], "P") - self.assertTrue("lastModified" in doc) + self.assertIn("lastModified", doc) # Start Example 54 db.inventory.replace_one( @@ -671,8 +671,8 @@ def test_update_and_replace(self): for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): self.assertEqual(len(doc.keys()), 2) - self.assertTrue("item" in doc) - self.assertTrue("instock" in doc) + self.assertIn("item", doc) + self.assertIn("instock", doc) self.assertEqual(len(doc["instock"]), 2) def test_delete(self): diff --git a/test/test_json_util_integration.py b/test/test_json_util_integration.py index acab4f3182..4ef5f10fe2 100644 --- a/test/test_json_util_integration.py +++ b/test/test_json_util_integration.py @@ -25,4 +25,4 @@ def test_cursor(self): db.test.insert_many(docs) reloaded_docs = json_util.loads(json_util.dumps((db.test.find()).to_list())) for doc in docs: - self.assertTrue(doc in reloaded_docs) + self.assertIn(doc, reloaded_docs) diff --git a/test/test_monitoring.py b/test/test_monitoring.py index fc8be127e3..8b54793a36 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -1086,8 +1086,8 @@ def test_first_batch_helper(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue("cursor" in succeeded.reply) - self.assertTrue("ok" in succeeded.reply) + self.assertIn("cursor", succeeded.reply) + self.assertIn("ok", succeeded.reply) self.listener.reset() diff --git a/test/test_session.py b/test/test_session.py index a6266884aa..49cb9dba91 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -134,8 +134,9 @@ def _test_ops(self, client, *ops): f(*args, **kw) self.assertGreaterEqual(len(listener.started_events), 1) for event in listener.started_events: - self.assertTrue( - "lsid" in event.command, + self.assertIn( + "lsid", + event.command, f"{f.__name__} sent no lsid with {event.command_name}", ) @@ -170,8 +171,9 @@ def _test_ops(self, client, *ops): self.assertGreaterEqual(len(listener.started_events), 1) lsids = [] for event in listener.started_events: - self.assertTrue( - "lsid" in event.command, + self.assertIn( + "lsid", + event.command, f"{f.__name__} sent no lsid with {event.command_name}", ) @@ -422,8 +424,9 @@ def test_cursor(self): f(session=s) self.assertGreaterEqual(len(listener.started_events), 1) for event in listener.started_events: - self.assertTrue( - "lsid" in event.command, + self.assertIn( + "lsid", + event.command, f"{name} sent no lsid with {event.command_name}", ) @@ -441,15 +444,13 @@ def test_cursor(self): listener.reset() f(session=None) event0 = listener.first_command_started() - self.assertTrue( - "lsid" in event0.command, f"{name} sent no lsid with {event0.command_name}" - ) + self.assertIn("lsid", event0.command, f"{name} sent no lsid with {event0.command_name}") lsid = event0.command["lsid"] for event in listener.started_events[1:]: - self.assertTrue( - "lsid" in event.command, f"{name} sent no lsid with {event.command_name}" + self.assertIn( + "lsid", event.command, f"{name} sent no lsid with {event.command_name}" ) self.assertEqual( @@ -1187,15 +1188,17 @@ def aggregate(): self.assertGreaterEqual(len(listener.started_events), 1) for i, event in enumerate(listener.started_events): - self.assertTrue( - "$clusterTime" in event.command, + self.assertIn( + "$clusterTime", + event.command, f"{f.__name__} sent no $clusterTime with {event.command_name}", ) if i > 0: succeeded = listener.succeeded_events[i - 1] - self.assertTrue( - "$clusterTime" in succeeded.reply, + self.assertIn( + "$clusterTime", + succeeded.reply, f"{f.__name__} received no $clusterTime with {succeeded.command_name}", ) diff --git a/test/test_son.py b/test/test_son.py index a06d92bcb2..36a6834889 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -148,8 +148,8 @@ def test_contains_has(self): """has_key and __contains__""" test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertIn(1, test_son) - self.assertTrue(2 in test_son, "in failed") - self.assertFalse(22 in test_son, "in succeeded when it shouldn't") + self.assertIn(2, test_son, "in failed") + self.assertNotIn(22, test_son, "in succeeded when it shouldn't") self.assertTrue(test_son.has_key(2), "has_key failed") self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") From 27593796fb3deb7c219fddf861b1f62ab0f126ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20=C5=A0im=C3=A1=C4=8Dek?= Date: Fri, 23 May 2025 19:01:30 +0200 Subject: [PATCH 1932/2111] PYTHON-5391 Skip C extension build on GraalPy (#2349) Co-authored-by: Steven Silvester --- _setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/_setup.py b/_setup.py index 1a8b9e0246..f99e9e7dc8 100644 --- a/_setup.py +++ b/_setup.py @@ -130,7 +130,11 @@ def build_extension(self, ext): except ValueError: pass ext_modules = [] -elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: +elif ( + sys.platform.startswith("java") + or sys.platform == "cli" + or sys.implementation.name in ("pypy", "graalpy") +): sys.stdout.write( """ *****************************************************\n From 1366b9132edbf0a807796a965156e93a791b6697 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 28 May 2025 15:44:54 -0400 Subject: [PATCH 1933/2111] PYTHON-5394 - Add native async support for OIDC (#2352) --- pymongo/asynchronous/auth_oidc.py | 23 +- pymongo/asynchronous/cursor.py | 1 - pymongo/asynchronous/helpers.py | 2 +- pymongo/synchronous/auth_oidc.py | 15 +- pymongo/synchronous/cursor.py | 1 - test/asynchronous/test_auth_oidc.py | 1173 ++++++++++++++++++++++++ test/asynchronous/test_auth_spec.py | 2 +- test/{auth_oidc => }/test_auth_oidc.py | 45 +- test/test_auth_spec.py | 2 +- tools/synchro.py | 1 + 10 files changed, 1232 insertions(+), 33 deletions(-) create mode 100644 test/asynchronous/test_auth_oidc.py rename test/{auth_oidc => }/test_auth_oidc.py (97%) diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py index 217c8104a2..20b8340060 100644 --- a/pymongo/asynchronous/auth_oidc.py +++ b/pymongo/asynchronous/auth_oidc.py @@ -15,6 +15,7 @@ """MONGODB-OIDC Authentication helpers.""" from __future__ import annotations +import asyncio import threading import time from dataclasses import dataclass, field @@ -36,6 +37,7 @@ ) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE +from pymongo.lock import Lock, _async_create_lock if TYPE_CHECKING: from pymongo.asynchronous.pool import AsyncConnection @@ -81,7 +83,11 @@ class _OIDCAuthenticator: access_token: Optional[str] = field(default=None) idp_info: Optional[OIDCIdPInfo] = field(default=None) token_gen_id: int = field(default=0) - lock: threading.Lock = field(default_factory=threading.Lock) + if not _IS_SYNC: + lock: Lock = field(default_factory=_async_create_lock) # type: ignore[assignment] + else: + lock: threading.Lock = field(default_factory=_async_create_lock) # type: ignore[assignment, no-redef] + last_call_time: float = field(default=0) async def reauthenticate(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: @@ -164,7 +170,7 @@ async def _authenticate_human(self, conn: AsyncConnection) -> Optional[Mapping[s # Attempt to authenticate with a JwtStepRequest. return await self._sasl_continue_jwt(conn, start_resp) - def _get_access_token(self) -> Optional[str]: + async def _get_access_token(self) -> Optional[str]: properties = self.properties cb: Union[None, OIDCCallback] resp: OIDCCallbackResult @@ -186,7 +192,7 @@ def _get_access_token(self) -> Optional[str]: return None if not prev_token and cb is not None: - with self.lock: + async with self.lock: # type: ignore[attr-defined] # See if the token was changed while we were waiting for the # lock. new_token = self.access_token @@ -196,7 +202,7 @@ def _get_access_token(self) -> Optional[str]: # Ensure that we are waiting a min time between callback invocations. delta = time.time() - self.last_call_time if delta < TIME_BETWEEN_CALLS_SECONDS: - time.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + await asyncio.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) self.last_call_time = time.time() if is_human: @@ -211,7 +217,10 @@ def _get_access_token(self) -> Optional[str]: idp_info=self.idp_info, username=self.properties.username, ) - resp = cb.fetch(context) + if not _IS_SYNC: + resp = await asyncio.get_running_loop().run_in_executor(None, cb.fetch, context) # type: ignore[assignment] + else: + resp = cb.fetch(context) if not isinstance(resp, OIDCCallbackResult): raise ValueError( f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" @@ -253,13 +262,13 @@ async def _sasl_continue_jwt( start_payload: dict = bson.decode(start_resp["payload"]) if "issuer" in start_payload: self.idp_info = OIDCIdPInfo(**start_payload) - access_token = self._get_access_token() + access_token = await self._get_access_token() conn.oidc_token_gen_id = self.token_gen_id cmd = self._get_continue_command({"jwt": access_token}, start_resp) return await self._run_command(conn, cmd) async def _sasl_start_jwt(self, conn: AsyncConnection) -> Mapping[str, Any]: - access_token = self._get_access_token() + access_token = await self._get_access_token() conn.oidc_token_gen_id = self.token_gen_id cmd = self._get_start_command({"jwt": access_token}) return await self._run_command(conn, cmd) diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 1b25bf4ee8..02954fb559 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -1130,7 +1130,6 @@ async def _send_message(self, operation: Union[_Query, _GetMore]) -> None: except BaseException: await self.close() raise - self._address = response.address if isinstance(response, PinnedResponse): if not self._sock_mgr: diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 88b710345b..54fd64f74a 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -64,7 +64,7 @@ async def inner(*args: Any, **kwargs: Any) -> Any: await conn.authenticate(reauthenticate=True) else: raise - return func(*args, **kwargs) + return await func(*args, **kwargs) raise return cast(F, inner) diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py index 8a8703c142..f4d754687d 100644 --- a/pymongo/synchronous/auth_oidc.py +++ b/pymongo/synchronous/auth_oidc.py @@ -15,6 +15,7 @@ """MONGODB-OIDC Authentication helpers.""" from __future__ import annotations +import asyncio import threading import time from dataclasses import dataclass, field @@ -36,6 +37,7 @@ ) from pymongo.errors import ConfigurationError, OperationFailure from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE +from pymongo.lock import Lock, _create_lock if TYPE_CHECKING: from pymongo.auth_shared import MongoCredential @@ -81,7 +83,11 @@ class _OIDCAuthenticator: access_token: Optional[str] = field(default=None) idp_info: Optional[OIDCIdPInfo] = field(default=None) token_gen_id: int = field(default=0) - lock: threading.Lock = field(default_factory=threading.Lock) + if not _IS_SYNC: + lock: Lock = field(default_factory=_create_lock) # type: ignore[assignment] + else: + lock: threading.Lock = field(default_factory=_create_lock) # type: ignore[assignment, no-redef] + last_call_time: float = field(default=0) def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: @@ -186,7 +192,7 @@ def _get_access_token(self) -> Optional[str]: return None if not prev_token and cb is not None: - with self.lock: + with self.lock: # type: ignore[attr-defined] # See if the token was changed while we were waiting for the # lock. new_token = self.access_token @@ -211,7 +217,10 @@ def _get_access_token(self) -> Optional[str]: idp_info=self.idp_info, username=self.properties.username, ) - resp = cb.fetch(context) + if not _IS_SYNC: + resp = asyncio.get_running_loop().run_in_executor(None, cb.fetch, context) # type: ignore[assignment] + else: + resp = cb.fetch(context) if not isinstance(resp, OIDCCallbackResult): raise ValueError( f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 31c4604f89..ba35316516 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -1128,7 +1128,6 @@ def _send_message(self, operation: Union[_Query, _GetMore]) -> None: except BaseException: self.close() raise - self._address = response.address if isinstance(response, PinnedResponse): if not self._sock_mgr: diff --git a/test/asynchronous/test_auth_oidc.py b/test/asynchronous/test_auth_oidc.py new file mode 100644 index 0000000000..8c06c4a21d --- /dev/null +++ b/test/asynchronous/test_auth_oidc.py @@ -0,0 +1,1173 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" +from __future__ import annotations + +import os +import sys +import time +import unittest +import warnings +from contextlib import asynccontextmanager +from pathlib import Path +from test.asynchronous import AsyncPyMongoTestCase +from test.asynchronous.helpers import ConcurrentRunner +from typing import Dict + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import EventListener, OvertCommandListener + +from bson import SON +from pymongo import AsyncMongoClient +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.asynchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + _get_authenticator, +) +from pymongo.auth_oidc_shared import _get_k8s_token +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.cursor_shared import CursorType +from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne +from pymongo.synchronous.uri_parser import parse_uri + +_IS_SYNC = False + +ROOT = Path(__file__).parent.parent.resolve() +TEST_PATH = ROOT / "auth" / "unified" +ENVIRON = os.environ.get("OIDC_ENV", "test") +DOMAIN = os.environ.get("OIDC_DOMAIN", "") +TOKEN_DIR = os.environ.get("OIDC_TOKEN_DIR", "") +TOKEN_FILE = os.environ.get("OIDC_TOKEN_FILE", "") + +# Generate unified tests. +globals().update(generate_test_classes(str(TEST_PATH), module=__name__)) + +pytestmark = pytest.mark.auth_oidc + + +class OIDCTestBase(AsyncPyMongoTestCase): + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") + cls.uri_admin = os.environ["MONGODB_URI"] + if ENVIRON == "test": + if not TOKEN_DIR: + raise ValueError("Please set OIDC_TOKEN_DIR") + if not TOKEN_FILE: + raise ValueError("Please set OIDC_TOKEN_FILE") + + async def asyncSetUp(self): + self.request_called = 0 + + def get_token(self, username=None): + """Get a token for the current provider.""" + if ENVIRON == "test": + if username is None: + token_file = TOKEN_FILE + else: + token_file = os.path.join(TOKEN_DIR, username) + with open(token_file) as fid: # noqa: ASYNC101,RUF100 + return fid.read() + elif ENVIRON == "azure": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + return _get_azure_response(token_aud, username)["access_token"] + elif ENVIRON == "gcp": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + return _get_gcp_response(token_aud, username)["access_token"] + elif ENVIRON == "k8s": + return _get_k8s_token() + else: + raise ValueError(f"Unknown ENVIRON: {ENVIRON}") + + @asynccontextmanager + async def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = AsyncMongoClient(self.uri_admin) + await client.admin.command(cmd_on) + try: + yield + finally: + await client.admin.command( + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + await client.close() + + +class TestAuthOIDCHuman(OIDCTestBase): + uri: str + + @classmethod + def setUpClass(cls): + if ENVIRON != "test": + raise unittest.SkipTest("Human workflows are only tested with the test environment") + if DOMAIN is None: + raise ValueError("Missing OIDC_DOMAIN") + super().setUpClass() + + async def asyncSetUp(self): + self.refresh_present = 0 + await super().asyncSetUp() + + def create_request_cb(self, username="test_user1", sleep=0): + def request_token(context: OIDCCallbackContext): + # Validate the info. + self.assertIsInstance(context.idp_info.issuer, str) + if context.idp_info.clientId is not None: + self.assertIsInstance(context.idp_info.clientId, str) + + # Validate the timeout. + timeout_seconds = context.timeout_seconds + self.assertEqual(timeout_seconds, 60 * 5) + + if context.refresh_token: + self.refresh_present += 1 + + token = self.get_token(username) + resp = OIDCCallbackResult(access_token=token, refresh_token=token) + + time.sleep(sleep) + self.request_called += 1 + return resp + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + async def create_client(self, *args, **kwargs): + username = kwargs.get("username", "test_user1") + if kwargs.get("username") in ["test_user1", "test_user2"]: + kwargs["username"] = f"{username}@{DOMAIN}" + request_cb = kwargs.pop("request_cb", self.create_request_cb(username=username)) + props = kwargs.pop("authmechanismproperties", {"OIDC_HUMAN_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + + client = self.simple_client(*args, authmechanismproperties=props, **kwargs) + + return client + + async def test_1_1_single_principal_implicit_username(self): + # Create default OIDC client with authMechanism=MONGODB-OIDC. + client = await self.create_client() + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_2_single_principal_explicit_username(self): + # Create a client with MONGODB_URI_SINGLE, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(username="test_user1") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_3_multiple_principal_user_1(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple, username="test_user1") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_4_multiple_principal_user_2(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a human callback that reads in the generated test_user2 token file. + # Create a client with MONGODB_URI_MULTI, a username of test_user2, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple, username="test_user2") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_5_multiple_principal_no_user(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, no username, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple) + # Assert that a find operation fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_6_allowed_hosts_blocked(self): + # Create a default OIDC client, with an ALLOWED_HOSTS that is an empty list. + request_token = self.create_request_cb() + props: Dict = {"OIDC_HUMAN_CALLBACK": request_token, "ALLOWED_HOSTS": []} + client = await self.create_client(authmechanismproperties=props) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + await client.test.test.find_one() + # Close the client. + await client.close() + + # Create a client that uses the URL mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com, + # a human callback, and an ALLOWED_HOSTS that contains ["example.com"]. + props: Dict = { + "OIDC_HUMAN_CALLBACK": request_token, + "ALLOWED_HOSTS": ["example.com"], + } + with warnings.catch_warnings(): + warnings.simplefilter("default") + client = await self.create_client( + self.uri_single + "&ignored=example.com", + authmechanismproperties=props, + connect=False, + ) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_7_allowed_hosts_in_connection_string_ignored(self): + # Create an OIDC configured client with the connection string: `mongodb+srv://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D` and a Human Callback. + # Assert that the creation of the client raises a configuration error. + uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" + with self.assertRaises(ConfigurationError), warnings.catch_warnings(): + warnings.simplefilter("ignore") + c = AsyncMongoClient( + uri, + authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()), + ) + await c.aconnect() + + async def test_1_8_machine_idp_human_callback(self): + if not os.environ.get("OIDC_IS_LOCAL"): + raise unittest.SkipTest("Test Requires Local OIDC server") + # Create a client with MONGODB_URI_SINGLE, a username of test_machine, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(username="test_machine") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_1_valid_callback_inputs(self): + # Create a AsyncMongoClient with a human callback that validates its inputs and returns a valid access token. + client = await self.create_client() + # Perform a find operation that succeeds. Verify that the human callback was called with the appropriate inputs, including the timeout parameter if possible. + # Ensure that there are no unexpected fields. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_2_callback_returns_missing_data(self): + # Create a AsyncMongoClient with a human callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCB(OIDCCallback): + def fetch(self, ctx): + return dict() + + client = await self.create_client(request_cb=CustomCB()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_3_refresh_token_is_passed_to_the_callback(self): + # Create a AsyncMongoClient with a human callback that checks for the presence of a refresh token. + client = await self.create_client() + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Set a fail point for ``find`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Assert that the callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the refresh token was used once. + self.assertEqual(self.refresh_present, 1) + + async def test_3_1_uses_speculative_authentication_if_there_is_a_cached_token(self): + # Create a client with a human callback that returns a valid token. + client = await self.create_client() + + # Set a fail point for ``find`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that succeeds + await client.test.test.find_one() + + # Close the client. + await client.close() + + async def test_3_2_does_not_use_speculative_authentication_if_there_is_no_cached_token(self): + # Create a ``AsyncMongoClient`` with a human callback that returns a valid token + client = await self.create_client() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Close the client. + await client.close() + + async def test_4_1_reauthenticate_succeeds(self): + # Create a default OIDC client and add an event listener. + # The following assumes that the driver does not emit saslStart or saslContinue events. + # If the driver does emit those events, ignore/filter them for the purposes of this test. + listener = OvertCommandListener() + client = await self.create_client(event_listeners=[listener]) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Clear the listener state if possible. + listener.reset() + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform another find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the ordering of list started events is [find, find]. + # Note that if the listener stat could not be cleared then there will be an extra find command. + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + # Assert that the list of command succeeded events is [find]. + self.assertEqual(succeeded_events, ["find"]) + # Assert that a find operation failed once during the command execution. + self.assertEqual(failed_events, ["find"]) + # Close the client. + await client.close() + + async def test_4_2_reauthenticate_succeeds_no_refresh(self): + # Create a default OIDC client with a human callback that does not return a refresh token. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = None + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + # Close the client. + await client.close() + + async def test_4_3_reauthenticate_succeeds_after_refresh_fails(self): + # Create a default OIDC client with a human callback that returns an invalid refresh token + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called 2 times. + self.assertEqual(self.request_called, 2) + + # Close the client. + await client.close() + + async def test_4_4_reauthenticate_fails(self): + # Create a default OIDC client with a human callback that returns invalid refresh tokens and + # Returns invalid access tokens after the first access. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + fetch_called = 0 + + def fetch(self, *args, **kwargs): + self.fetch_called += 1 + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + if self.fetch_called > 1: + result.access_token = "bad" + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds (to force a speculative auth). + await client.test.test.find_one() + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthentication using a failCommand. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Assert that the human callback has been called three times. + self.assertEqual(self.request_called, 3) + + # Close the client. + await client.close() + + async def test_request_callback_returns_null(self): + class RequestTokenNull(OIDCCallback): + def fetch(self, a): + return None + + client = await self.create_client(request_cb=RequestTokenNull()) + with self.assertRaises(ValueError): + await client.test.test.find_one() + await client.close() + + async def test_request_callback_invalid_result(self): + class CallbackInvalidToken(OIDCCallback): + def fetch(self, a): + return {} + + client = await self.create_client(request_cb=CallbackInvalidToken()) + with self.assertRaises(ValueError): + await client.test.test.find_one() + await client.close() + + async def test_reauthentication_succeeds_multiple_connections(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + client1 = await self.create_client(request_cb=request_cb) + client2 = await self.create_client(request_cb=request_cb) + + # Perform an insert operation. + await client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + await client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + await client1.test.test.find_one() + await client2.test.test.find_one() + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + await client1.close() + await client2.close() + + # PyMongo specific tests, since we have multiple code paths for reauth handling. + + async def test_reauthenticate_succeeds_bulk_write(self): + # Create a client. + client = await self.create_client() + + # Perform a find operation. + await client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + await client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_bulk_read(self): + # Create a client. + client = await self.create_client() + + # Perform a find operation. + await client.test.test.find_one() + + # Perform a bulk write operation. + await client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + await cursor.to_list() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_cursor(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_get_more(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + client = await self.create_client() + hello = await client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + # Create a client with the callback. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_command(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = await client.test.command({"count": "test"}) + + self.assertGreaterEqual(len(cursor), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + +class TestAuthOIDCMachine(OIDCTestBase): + uri: str + + async def asyncSetUp(self): + self.request_called = 0 + + def create_request_cb(self, username=None, sleep=0): + def request_token(context): + assert isinstance(context.timeout_seconds, int) + assert context.version == 1 + assert context.refresh_token is None + assert context.idp_info is None + token = self.get_token(username) + time.sleep(sleep) + self.request_called += 1 + return OIDCCallbackResult(access_token=token) + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + async def create_client(self, *args, **kwargs): + request_cb = kwargs.pop("request_cb", self.create_request_cb()) + props = kwargs.pop("authmechanismproperties", {"OIDC_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + client = AsyncMongoClient(*args, authmechanismproperties=props, **kwargs) + self.addAsyncCleanup(client.close) + return client + + async def test_1_1_callback_is_called_during_reauthentication(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_1_2_callback_is_called_once_for_multiple_connections(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + await client.aconnect() + + # Start 10 tasks and run 100 find operations that all succeed in each task. + async def target(): + for _ in range(100): + await client.test.test.find_one() + + tasks = [] + for i in range(10): + tasks.append(ConcurrentRunner(target=target)) + for t in tasks: + await t.start() + for t in tasks: + await t.join() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_2_1_valid_callback_inputs(self): + # Create a AsyncMongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. + client = await self.create_client() + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. + self.assertEqual(self.request_called, 1) + + async def test_2_2_oidc_callback_returns_null(self): + # Create a AsyncMongoClient configured with an OIDC callback that returns null. + class CallbackNullToken(OIDCCallback): + def fetch(self, a): + return None + + client = await self.create_client(request_cb=CallbackNullToken()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_2_3_oidc_callback_returns_missing_data(self): + # Create a AsyncMongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return object() + + client = await self.create_client(request_cb=CustomCallback()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_2_4_invalid_client_configuration_with_callback(self): + # Create a AsyncMongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. + request_cb = self.create_request_cb() + props: Dict = {"OIDC_CALLBACK": request_cb, "ENVIRONMENT": "test"} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + async def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "test", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "test", "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + # Create an OIDC configured client with auth mechanism properties `{"OIDC_CALLBACK": "", "ALLOWED_HOSTS": []}`. + props: Dict = {"OIDC_CALLBACK": self.create_request_cb(), "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + async def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): + # Create a MongoCredential for OIDC with a machine callback. + props = {"OIDC_CALLBACK": self.create_request_cb()} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, "foo", None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "foo" + + # Create a MongoCredential for OIDC with an ENVIRONMENT. + props = {"ENVIRONMENT": "test"} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, None, None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "" + + async def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): + # Create a AsyncMongoClient and an OIDC callback that implements the provider logic. + client = await self.create_client() + await client.aconnect() + # Poison the cache with an invalid access token. + # Set a fail point for ``find`` command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. This is to force the ``AsyncMongoClient`` + # to cache an access token. + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + # Poison the cache of the client. + client.options.pool_options._credentials.cache.data.access_token = "bad" + # Reset the request count. + self.request_called = 0 + # Verify that a find succeeds. + await client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): + # Create a AsyncMongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return OIDCCallbackResult(access_token="bad value") + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(callback.count, 1) + + async def test_3_3_unexpected_error_code_does_not_clear_cache(self): + # Create a ``AsyncMongoClient`` with a human callback that returns a valid token + client = await self.create_client() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 20}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + async def test_4_1_reauthentication_succeeds(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + await client.aconnect() + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Verify that the callback was called 2 times (once during the connection + # handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + async def test_4_2_read_commands_fail_if_reauthentication_fails(self): + # Create a ``AsyncMongoClient`` whose OIDC callback returns one good token and then + # bad tokens after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + + # Perform a read operation that succeeds. + await client.test.test.find_one() + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + async def test_4_3_write_commands_fail_if_reauthentication_fails(self): + # Create a ``AsyncMongoClient`` whose OIDC callback returns one good token and then + # bad token after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + + # Perform an insert operation that succeeds. + await client.test.test.insert_one({}) + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a ``insert`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.insert_one({}) + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + async def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): + # Create an OIDC configured client that can listen for `SaslStart` commands. + listener = EventListener() + client = await self.create_client(event_listeners=[listener]) + await client.aconnect() + + # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. + client2 = await self.create_client() + await client2.test.test.find_one() + client.options.pool_options._credentials.cache.data = ( + client2.options.pool_options._credentials.cache.data + ) + await client2.close() + self.request_called = 0 + + # Perform an `insert` operation that succeeds. + await client.test.test.insert_one({}) + + # Assert that the callback was not called. + self.assertEqual(self.request_called, 0) + + # Assert there were no `SaslStart` commands executed. + assert not any( + event.command_name.lower() == "saslstart" for event in listener.started_events + ) + listener.reset() + + # Set a fail point for `insert` commands of the form: + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform an `insert` operation that succeeds. + await client.test.test.insert_one({}) + + # Assert that the callback was called once. + self.assertEqual(self.request_called, 1) + + # Assert there were `SaslStart` commands executed. + assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + + async def test_5_1_azure_with_no_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + opts = parse_uri(self.uri_single)["options"] + resource = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") + client = await self.create_client(authMechanismProperties=props) + await client.test.test.find_one() + + async def test_5_2_azure_with_bad_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") + client = await self.create_client(username="bad", authmechanismproperties=props) + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_speculative_auth_success(self): + client1 = await self.create_client() + await client1.test.test.find_one() + client2 = await self.create_client() + await client2.aconnect() + + # Prime the cache of the second client. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + # Set a fail point for saslStart commands. + async with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + await client2.test.test.find_one() + + async def test_reauthentication_succeeds_multiple_connections(self): + client1 = await self.create_client() + client2 = await self.create_client() + + # Perform an insert operation. + await client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + await client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + await client1.test.test.find_one() + await client2.test.test.find_one() + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_auth_spec.py b/test/asynchronous/test_auth_spec.py index 0a68658680..7c659c6d93 100644 --- a/test/asynchronous/test_auth_spec.py +++ b/test/asynchronous/test_auth_spec.py @@ -30,7 +30,7 @@ from test.asynchronous.unified_format import generate_test_classes from pymongo import AsyncMongoClient -from pymongo.asynchronous.auth_oidc import OIDCCallback +from pymongo.auth_oidc_shared import OIDCCallback pytestmark = pytest.mark.auth diff --git a/test/auth_oidc/test_auth_oidc.py b/test/test_auth_oidc.py similarity index 97% rename from test/auth_oidc/test_auth_oidc.py rename to test/test_auth_oidc.py index 7dbf817cce..e7a8dce957 100644 --- a/test/auth_oidc/test_auth_oidc.py +++ b/test/test_auth_oidc.py @@ -17,13 +17,13 @@ import os import sys -import threading import time import unittest import warnings from contextlib import contextmanager from pathlib import Path from test import PyMongoTestCase +from test.helpers import ConcurrentRunner from typing import Dict import pytest @@ -51,6 +51,8 @@ ) from pymongo.synchronous.uri_parser import parse_uri +_IS_SYNC = True + ROOT = Path(__file__).parent.parent.resolve() TEST_PATH = ROOT / "auth" / "unified" ENVIRON = os.environ.get("OIDC_ENV", "test") @@ -86,7 +88,7 @@ def get_token(self, username=None): token_file = TOKEN_FILE else: token_file = os.path.join(TOKEN_DIR, username) - with open(token_file) as fid: + with open(token_file) as fid: # noqa: ASYNC101,RUF100 return fid.read() elif ENVIRON == "azure": opts = parse_uri(self.uri_single)["options"] @@ -183,7 +185,7 @@ def test_1_2_single_principal_explicit_username(self): client = self.create_client(username="test_user1") # Perform a find operation that succeeds. client.test.test.find_one() - # Close the client.. + # Close the client. client.close() def test_1_3_multiple_principal_user_1(self): @@ -254,9 +256,11 @@ def test_1_7_allowed_hosts_in_connection_string_ignored(self): uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" with self.assertRaises(ConfigurationError), warnings.catch_warnings(): warnings.simplefilter("ignore") - _ = MongoClient( - uri, authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()) + c = MongoClient( + uri, + authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()), ) + c._connect() def test_1_8_machine_idp_human_callback(self): if not os.environ.get("OIDC_IS_LOCAL"): @@ -634,7 +638,7 @@ def test_reauthenticate_succeeds_bulk_read(self): ): # Perform a bulk read operation. cursor = client.test.test.find_raw_batches({}) - list(cursor) + cursor.to_list() # Assert that the request callback has been called twice. self.assertEqual(self.request_called, 2) @@ -658,7 +662,7 @@ def test_reauthenticate_succeeds_cursor(self): ): # Perform a find operation. cursor = client.test.test.find({"a": 1}) - self.assertGreaterEqual(len(list(cursor)), 1) + self.assertGreaterEqual(len(cursor.to_list()), 1) # Assert that the request callback has been called twice. self.assertEqual(self.request_called, 2) @@ -682,7 +686,7 @@ def test_reauthenticate_succeeds_get_more(self): ): # Perform a find operation. cursor = client.test.test.find({"a": 1}, batch_size=1) - self.assertGreaterEqual(len(list(cursor)), 1) + self.assertGreaterEqual(len(cursor.to_list()), 1) # Assert that the request callback has been called twice. self.assertEqual(self.request_called, 2) @@ -712,7 +716,7 @@ def test_reauthenticate_succeeds_get_more_exhaust(self): ): # Perform a find operation. cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) - self.assertGreaterEqual(len(list(cursor)), 1) + self.assertGreaterEqual(len(cursor.to_list()), 1) # Assert that the request callback has been called twice. self.assertEqual(self.request_called, 2) @@ -737,7 +741,7 @@ def test_reauthenticate_succeeds_command(self): # Perform a count operation. cursor = client.test.command({"count": "test"}) - self.assertGreaterEqual(len(list(cursor)), 1) + self.assertGreaterEqual(len(cursor), 1) # Assert that the request callback has been called twice. self.assertEqual(self.request_called, 2) @@ -790,19 +794,20 @@ def test_1_2_callback_is_called_once_for_multiple_connections(self): # Create a ``MongoClient`` configured with a custom OIDC callback that # implements the provider logic. client = self.create_client() + client._connect() - # Start 10 threads and run 100 find operations in each thread that all succeed. + # Start 10 tasks and run 100 find operations that all succeed in each task. def target(): for _ in range(100): client.test.test.find_one() - threads = [] - for _ in range(10): - thread = threading.Thread(target=target) - thread.start() - threads.append(thread) - for thread in threads: - thread.join() + tasks = [] + for i in range(10): + tasks.append(ConcurrentRunner(target=target)) + for t in tasks: + t.start() + for t in tasks: + t.join() # Assert that the callback was called 1 time. self.assertEqual(self.request_called, 1) @@ -880,6 +885,7 @@ def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): # Create a MongoClient and an OIDC callback that implements the provider logic. client = self.create_client() + client._connect() # Poison the cache with an invalid access token. # Set a fail point for ``find`` command. with self.fail_point( @@ -946,6 +952,7 @@ def test_4_1_reauthentication_succeeds(self): # Create a ``MongoClient`` configured with a custom OIDC callback that # implements the provider logic. client = self.create_client() + client._connect() # Set a fail point for the find command. with self.fail_point( @@ -1037,6 +1044,7 @@ def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(se # Create an OIDC configured client that can listen for `SaslStart` commands. listener = EventListener() client = self.create_client(event_listeners=[listener]) + client._connect() # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. client2 = self.create_client() @@ -1101,6 +1109,7 @@ def test_speculative_auth_success(self): client1 = self.create_client() client1.test.test.find_one() client2 = self.create_client() + client2._connect() # Prime the cache of the second client. client2.options.pool_options._credentials.cache.data = ( diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 9ba15e8d78..ac6411cd89 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -30,7 +30,7 @@ from test.unified_format import generate_test_classes from pymongo import MongoClient -from pymongo.synchronous.auth_oidc import OIDCCallback +from pymongo.auth_oidc_shared import OIDCCallback pytestmark = pytest.mark.auth diff --git a/tools/synchro.py b/tools/synchro.py index bfe8f71125..1fa8c674a5 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -203,6 +203,7 @@ def async_only_test(f: str) -> bool: "utils_spec_runner.py", "qcheck.py", "test_auth.py", + "test_auth_oidc.py", "test_auth_spec.py", "test_bulk.py", "test_change_stream.py", From 958b3d11dc7d01cfa6caa8dcbb4171e52b305f76 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 30 May 2025 16:30:11 -0500 Subject: [PATCH 1934/2111] PYTHON-5400 Migrate away from Windows Server 2019 runner image on GitHub Actions (#2355) --- .github/workflows/dist.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index be172864dc..14c253fe73 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -39,8 +39,8 @@ jobs: - [ubuntu-latest, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] - [ubuntu-latest, "manylinux_s390x", "cp3*-manylinux_s390x"] - [ubuntu-latest, "manylinux_i686", "cp3*-manylinux_i686"] - - [windows-2019, "win_amd6", "cp3*-win_amd64"] - - [windows-2019, "win32", "cp3*-win32"] + - [windows-2022, "win_amd6", "cp3*-win_amd64"] + - [windows-2022, "win32", "cp3*-win32"] - [macos-14, "macos", "cp*-macosx_*"] steps: From 454c1637880702d616e62ac607a825816fd9de37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Jun 2025 06:29:28 -0500 Subject: [PATCH 1935/2111] Bump astral-sh/setup-uv from 6.0.1 to 6.1.0 in the actions group (#2357) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test-python.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index f6056f6ca2..a2fde83c06 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -25,7 +25,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -88,7 +88,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 with: enable-cache: true python-version: "3.9" @@ -111,7 +111,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 with: enable-cache: true python-version: "3.9" @@ -130,7 +130,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 with: enable-cache: true python-version: "3.9" @@ -152,7 +152,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v5 + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 with: enable-cache: true python-version: "${{matrix.python}}" From 6d33d4fb3456d6919e2d901c341e55ea0dd301fb Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 5 Jun 2025 09:21:10 -0500 Subject: [PATCH 1936/2111] PYTHON-5399 Add a prose test for OIDC reauthentication when a session is involved (#2351) --- test/asynchronous/test_auth_oidc.py | 19 +++++++++++++++++++ test/test_auth_oidc.py | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/test/asynchronous/test_auth_oidc.py b/test/asynchronous/test_auth_oidc.py index 8c06c4a21d..f450c75df7 100644 --- a/test/asynchronous/test_auth_oidc.py +++ b/test/asynchronous/test_auth_oidc.py @@ -1085,6 +1085,25 @@ async def test_4_4_speculative_authentication_should_be_ignored_on_reauthenticat # Assert there were `SaslStart` commands executed. assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + async def test_4_5_reauthentication_succeeds_when_a_session_is_involved(self): + # Create an OIDC configured client. + client = await self.create_client() + + # Set a fail point for `find` commands of the form: + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Start a new session. + async with client.start_session() as session: + # In the started session perform a `find` operation that succeeds. + await client.test.test.find_one({}, session=session) + + # Assert that the callback was called 2 times (once during the connection handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + async def test_5_1_azure_with_no_username(self): if ENVIRON != "azure": raise unittest.SkipTest("Test is only supported on Azure") diff --git a/test/test_auth_oidc.py b/test/test_auth_oidc.py index e7a8dce957..33a1e55fe2 100644 --- a/test/test_auth_oidc.py +++ b/test/test_auth_oidc.py @@ -1083,6 +1083,25 @@ def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(se # Assert there were `SaslStart` commands executed. assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + def test_4_5_reauthentication_succeeds_when_a_session_is_involved(self): + # Create an OIDC configured client. + client = self.create_client() + + # Set a fail point for `find` commands of the form: + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Start a new session. + with client.start_session() as session: + # In the started session perform a `find` operation that succeeds. + client.test.test.find_one({}, session=session) + + # Assert that the callback was called 2 times (once during the connection handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + def test_5_1_azure_with_no_username(self): if ENVIRON != "azure": raise unittest.SkipTest("Test is only supported on Azure") From 536b1cb8ab410e9f190707629ff0107ee86feee5 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 6 Jun 2025 13:17:36 -0400 Subject: [PATCH 1937/2111] =?UTF-8?q?PYTHON-5406=20-=20AsyncPeriodicExecut?= =?UTF-8?q?or=20must=20reset=20CSOT=20contextvars=20befor=E2=80=A6=20(#236?= =?UTF-8?q?0)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pymongo/_csot.py | 6 +++ pymongo/periodic_executor.py | 3 ++ .../test_async_contextvars_reset.py | 43 +++++++++++++++++++ tools/synchro.py | 1 + 4 files changed, 53 insertions(+) create mode 100644 test/asynchronous/test_async_contextvars_reset.py diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 06c6b68ac9..c5681e345a 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -32,6 +32,12 @@ DEADLINE: ContextVar[float] = ContextVar("DEADLINE", default=float("inf")) +def reset_all() -> None: + TIMEOUT.set(None) + RTT.set(0.0) + DEADLINE.set(float("inf")) + + def get_timeout() -> Optional[float]: return TIMEOUT.get(None) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 323debdce2..ed369a2b21 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -23,6 +23,7 @@ import weakref from typing import Any, Optional +from pymongo import _csot from pymongo._asyncio_task import create_task from pymongo.lock import _create_lock @@ -93,6 +94,8 @@ def skip_sleep(self) -> None: self._skip_sleep = True async def _run(self) -> None: + # The CSOT contextvars must be cleared inside the executor task before execution begins + _csot.reset_all() while not self._stopped: if self._task and self._task.cancelling(): # type: ignore[unused-ignore, attr-defined] raise asyncio.CancelledError diff --git a/test/asynchronous/test_async_contextvars_reset.py b/test/asynchronous/test_async_contextvars_reset.py new file mode 100644 index 0000000000..9b0e2dc4dc --- /dev/null +++ b/test/asynchronous/test_async_contextvars_reset.py @@ -0,0 +1,43 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that AsyncPeriodicExecutors do not copy ContextVars from their parents.""" +from __future__ import annotations + +import asyncio +import sys +from test.asynchronous.utils import async_get_pool +from test.utils_shared import delay, one + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest + + +class TestAsyncContextVarsReset(AsyncIntegrationTest): + async def test_context_vars_are_reset_in_executor(self): + if sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Task.get_context (added in Python 3.11)") + + client = self.simple_client() + + await client.db.test.insert_one({"x": 1}) + for server in client._topology._servers.values(): + for context in [ + c + for c in server._monitor._executor._task.get_context() + if c.name in ["TIMEOUT", "RTT", "DEADLINE"] + ]: + self.assertIn(context.get(), [None, float("inf"), 0.0]) + await client.db.test.delete_many({}) diff --git a/tools/synchro.py b/tools/synchro.py index 1fa8c674a5..906bfd00da 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -185,6 +185,7 @@ def async_only_test(f: str) -> bool: "test_concurrency.py", "test_async_cancellation.py", "test_async_loop_safety.py", + "test_async_contextvars_reset.py", ] From 0f6647b49a4722a9257feea3f2810e649d28b982 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 9 Jun 2025 07:55:22 -0400 Subject: [PATCH 1938/2111] PYTHON-5305 - Fix Create Release Branch workflow (#2361) --- .github/workflows/create-release-branch.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/create-release-branch.yml b/.github/workflows/create-release-branch.yml index f24f94179a..72345d4a44 100644 --- a/.github/workflows/create-release-branch.yml +++ b/.github/workflows/create-release-branch.yml @@ -43,6 +43,8 @@ jobs: aws_region_name: ${{ vars.AWS_REGION_NAME }} aws_secret_id: ${{ secrets.AWS_SECRET_ID }} artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} + - name: Get hatch + run: pip install hatch - uses: mongodb-labs/drivers-github-tools/create-branch@v2 id: create-branch with: From 24e9da6a093ac9dc3e797e2f5c4c21e57d21a901 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 08:29:27 -0500 Subject: [PATCH 1939/2111] Bump github/codeql-action from 3.28.18 to 3.28.19 in the actions group (#2362) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/zizmor.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index e88f86f278..7a61ee9c62 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 1c6c655055..90d1eba118 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -26,7 +26,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3 + uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 with: sarif_file: results.sarif category: zizmor From f50ef65dd5f6ed99aa35ee40ab4f750524fee8f5 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Mon, 9 Jun 2025 11:26:07 -0700 Subject: [PATCH 1940/2111] PYTHON-5409 Make test_implicit_sessions_checkout less flaky (#2366) --- test/asynchronous/test_session.py | 13 +++++++------ test/test_session.py | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 1f6fb0d319..0ceaea98f9 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -194,10 +194,11 @@ async def test_implicit_sessions_checkout(self): # successful connection checkout" test from Driver Sessions Spec. succeeded = False lsid_set = set() - failures = 0 - for _ in range(5): - listener = OvertCommandListener() - client = await self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + # Retry up to 10 times because there is a known race that can cause multiple + # sessions to be used: connection check in happens before session check in + for _ in range(10): cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -240,9 +241,9 @@ async def target(op, *args): if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) if len(lsid_set) == 1: + # Break on first success. succeeded = True - else: - failures += 1 + break self.assertTrue(succeeded, lsid_set) async def test_pool_lifo(self): diff --git a/test/test_session.py b/test/test_session.py index 49cb9dba91..d70032d15f 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -194,10 +194,11 @@ def test_implicit_sessions_checkout(self): # successful connection checkout" test from Driver Sessions Spec. succeeded = False lsid_set = set() - failures = 0 - for _ in range(5): - listener = OvertCommandListener() - client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + # Retry up to 10 times because there is a known race that can cause multiple + # sessions to be used: connection check in happens before session check in + for _ in range(10): cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -240,9 +241,9 @@ def target(op, *args): if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) if len(lsid_set) == 1: + # Break on first success. succeeded = True - else: - failures += 1 + break self.assertTrue(succeeded, lsid_set) def test_pool_lifo(self): From 0dd5a5c794e2544096cf3e57801e331873f26f8e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 9 Jun 2025 19:36:44 -0500 Subject: [PATCH 1941/2111] PYTHON-5405 Use legacy wait_for_read cancellation approach on Windows (#2363) --- pymongo/network_layer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 6f1bb9a357..78eefc7177 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -286,6 +286,7 @@ async def _async_socket_receive( _PYPY = "PyPy" in sys.version +_WINDOWS = sys.platform == "win32" def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: @@ -337,7 +338,8 @@ def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> me while bytes_read < length: try: # Use the legacy wait_for_read cancellation approach on PyPy due to PYTHON-5011. - if _PYPY: + # also use it on Windows due to PYTHON-5405 + if _PYPY or _WINDOWS: wait_for_read(conn, deadline) if _csot.get_timeout() and deadline is not None: conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) @@ -359,6 +361,7 @@ def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> me raise _OperationCancelled("operation cancelled") from None if ( _PYPY + or _WINDOWS or not conn.is_sdam and deadline is not None and deadline - time.monotonic() < 0 From 1bcb85f1c1dd8bd348ca389098c60510548bf8ed Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 9 Jun 2025 19:46:10 -0500 Subject: [PATCH 1942/2111] PYTHON-5321 Remove Serverless testing (#2359) --- .evergreen/generated_configs/tasks.yml | 10 -- .evergreen/generated_configs/variants.yml | 20 ---- .evergreen/scripts/generate_config.py | 25 ----- .evergreen/scripts/setup_tests.py | 8 -- .evergreen/scripts/teardown_tests.py | 4 - .evergreen/scripts/utils.py | 2 - test/__init__.py | 101 +++++++---------- test/asynchronous/__init__.py | 103 +++++++----------- test/asynchronous/helpers.py | 10 -- test/asynchronous/test_client.py | 2 +- test/asynchronous/test_client_bulk_write.py | 19 ---- test/asynchronous/test_client_context.py | 10 -- test/asynchronous/test_crud_unified.py | 2 +- test/asynchronous/test_csot.py | 1 - .../test_discovery_and_monitoring.py | 1 - test/asynchronous/test_load_balancer.py | 7 +- test/asynchronous/test_retryable_reads.py | 1 - test/asynchronous/test_retryable_writes.py | 3 - test/asynchronous/test_transactions.py | 2 - test/asynchronous/test_unified_format.py | 2 - .../test_versioned_api_integration.py | 1 - test/asynchronous/unified_format.py | 24 +--- test/asynchronous/utils_spec_runner.py | 25 +---- test/atlas/test_connection.py | 8 -- test/helpers.py | 10 -- test/test_client.py | 2 +- test/test_client_bulk_write.py | 19 ---- test/test_client_context.py | 10 -- test/test_crud_unified.py | 2 +- test/test_csot.py | 1 - test/test_discovery_and_monitoring.py | 1 - test/test_load_balancer.py | 7 +- test/test_retryable_reads.py | 1 - test/test_retryable_writes.py | 3 - test/test_transactions.py | 2 - test/test_unified_format.py | 2 - test/test_versioned_api_integration.py | 1 - test/unified_format.py | 24 +--- test/utils_spec_runner.py | 25 +---- 39 files changed, 90 insertions(+), 411 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 8bc758890a..aa700629fb 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -3060,16 +3060,6 @@ tasks: - sharded_cluster-auth-ssl - sync - # Serverless tests - - name: test-serverless - commands: - - func: run tests - vars: - TEST_NAME: serverless - AUTH: auth - SSL: ssl - tags: [serverless] - # Standard tests - name: test-standard-v4.0-python3.9-sync-noauth-nossl-standalone commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 673bb111cd..57de725cb1 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -586,26 +586,6 @@ buildvariants: - rhel87-small tags: [coverage_tag] - # Serverless tests - - name: serverless-rhel8-python3.9 - tasks: - - name: .serverless - display_name: Serverless RHEL8 Python3.9 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 - - name: serverless-rhel8-python3.13 - tasks: - - name: .serverless - display_name: Serverless RHEL8 Python3.13 - run_on: - - rhel87-small - batchtime: 10080 - expansions: - PYTHON_BINARY: /opt/python/3.13/bin/python3 - # Stable api tests - name: stable-api-require-v1-rhel8-auth tasks: diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 9f42fb0a4b..e5c5209bf3 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -352,23 +352,6 @@ def create_disable_test_commands_variants(): return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] -def create_serverless_variants(): - host = DEFAULT_HOST - batchtime = BATCHTIME_WEEK - tasks = [".serverless"] - base_name = "Serverless" - return [ - create_variant( - tasks, - get_variant_name(base_name, host, python=python), - host=host, - python=python, - batchtime=batchtime, - ) - for python in MIN_MAX_PYTHON - ] - - def create_oidc_auth_variants(): variants = [] for host_name in ["ubuntu22", "macos", "win64"]: @@ -968,14 +951,6 @@ def create_free_threading_tasks(): return [EvgTask(name=task_name, tags=tags, commands=[server_func, test_func])] -def create_serverless_tasks(): - vars = dict(TEST_NAME="serverless", AUTH="auth", SSL="ssl") - test_func = FunctionCall(func="run tests", vars=vars) - tags = ["serverless"] - task_name = "test-serverless" - return [EvgTask(name=task_name, tags=tags, commands=[test_func])] - - ############## # Functions ############## diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 98c382ff60..13444fe9ca 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -229,14 +229,6 @@ def handle_test_env() -> None: config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh") DB_USER = config["ADL_USERNAME"] DB_PASSWORD = config["ADL_PASSWORD"] - elif test_name == "serverless": - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/serverless/setup.sh") - config = read_env(f"{DRIVERS_TOOLS}/.evergreen/serverless/secrets-export.sh") - DB_USER = config["SERVERLESS_ATLAS_USER"] - DB_PASSWORD = config["SERVERLESS_ATLAS_PASSWORD"] - write_env("MONGODB_URI", config["SERVERLESS_URI"]) - write_env("SINGLE_MONGOS_LB_URI", config["SERVERLESS_URI"]) - write_env("MULTI_MONGOS_LB_URI", config["SERVERLESS_URI"]) elif test_name == "auth_oidc": DB_USER = config["OIDC_ADMIN_USER"] DB_PASSWORD = config["OIDC_ADMIN_PWD"] diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index 390e0a68eb..d89f513f12 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -36,10 +36,6 @@ elif TEST_NAME == "ocsp": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh") -# Tear down serverless if applicable. -elif TEST_NAME == "serverless": - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/serverless/teardown.sh") - # Tear down atlas cluster if applicable. if TEST_NAME in ["aws_lambda", "search_index"]: run_command(f"bash {DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 7a8f9640f8..323ec2c567 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -45,7 +45,6 @@ class Distro: "mockupdb": "mockupdb", "ocsp": "ocsp", "perf": "perf", - "serverless": "", } # Tests that require a sub test suite. @@ -60,7 +59,6 @@ class Distro: "aws_lambda", "data_lake", "mockupdb", - "serverless", "ocsp", ] diff --git a/test/__init__.py b/test/__init__.py index 39b4045e66..0e6046b527 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -64,7 +64,6 @@ MONGODB_API_VERSION, MULTI_MONGOS_LB_URI, TEST_LOADBALANCER, - TEST_SERVERLESS, TLS_OPTIONS, SystemCertsPatcher, client_knobs, @@ -123,9 +122,8 @@ def __init__(self): self.conn_lock = threading.Lock() self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER - self.serverless = TEST_SERVERLESS self._fips_enabled = None - if self.load_balancer or self.serverless: + if self.load_balancer: self.default_client_options["loadBalanced"] = True if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS @@ -167,7 +165,7 @@ def uri(self): @property def hello(self): if not self._hello: - if self.serverless or self.load_balancer: + if self.load_balancer: self._hello = self.client.admin.command(HelloCompat.CMD) else: self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) @@ -222,24 +220,21 @@ def _init_client(self): if self.client: self.connected = True - if self.serverless: - self.auth_enabled = True - else: - try: - self.cmd_line = self.client.admin.command("getCmdLineOpts") - except pymongo.errors.OperationFailure as e: - assert e.details is not None - msg = e.details.get("errmsg", "") - if e.code == 13 or "unauthorized" in msg or "login" in msg: - # Unauthorized. - self.auth_enabled = True - else: - raise + try: + self.cmd_line = self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True else: - self.auth_enabled = self._server_started_with_auth() + raise + else: + self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: - if not self.serverless and not IS_SRV: + if not IS_SRV: # See if db_user already exists. if not self._check_user_provided(): _create_user(self.client.admin, db_user, db_pwd) @@ -259,13 +254,10 @@ def _init_client(self): # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command("getCmdLineOpts") - if self.serverless: - self.server_status = {} - else: - self.server_status = self.client.admin.command("serverStatus") - if self.storage_engine == "mmapv1": - # MMAPv1 does not support retryWrites=True. - self.default_client_options["retryWrites"] = False + self.server_status = self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False hello = self.hello self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello @@ -302,42 +294,33 @@ def _init_client(self): self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) - if self.serverless: - self.server_parameters = { - "requireApiVersion": False, - "enableTestCommands": True, - } + self.server_parameters = self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: self.test_commands_enabled = True - self.has_ipv6 = False - else: - self.server_parameters = self.client.admin.command("getParameter", "*") - assert self.cmd_line is not None - if self.server_parameters["enableTestCommands"]: + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: self.test_commands_enabled = True - elif "parsed" in self.cmd_line: - params = self.cmd_line["parsed"].get("setParameter", []) - if "enableTestCommands=1" in params: + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": self.test_commands_enabled = True - else: - params = self.cmd_line["parsed"].get("setParameter", {}) - if params.get("enableTestCommands") == "1": - self.test_commands_enabled = True - self.has_ipv6 = self._server_started_with_ipv6() + self.has_ipv6 = self._server_started_with_ipv6() self.is_mongos = (self.hello).get("msg") == "isdbgrid" if self.is_mongos: address = self.client.address self.mongoses.append(address) - if not self.serverless: - # Check for another mongos on the next port. - assert address is not None - next_address = address[0], address[1] + 1 - mongos_client = self._connect(*next_address, **self.default_client_options) - if mongos_client: - hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) - if hello.get("msg") == "isdbgrid": - self.mongoses.append(next_address) - mongos_client.close() + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + mongos_client.close() def init(self): with self.conn_lock: @@ -666,15 +649,9 @@ def require_no_load_balancer(self, func): lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func ) - def require_no_serverless(self, func): - """Run a test only if the client is not connected to serverless.""" - return self._require( - lambda: not self.serverless, "Must not be connected to serverless", func=func - ) - def require_change_streams(self, func): """Run a test only if the server supports change streams.""" - return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) + return self.require_no_mmap(self.require_no_standalone(func)) def is_topology_type(self, topologies): unknown = set(topologies) - { @@ -1195,8 +1172,6 @@ class IntegrationTest(PyMongoTestCase): def setUp(self) -> None: if client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") - if client_context.serverless and not getattr(self, "RUN_ON_SERVERLESS", False): - raise SkipTest("this test does not support serverless") self.client = client_context.client self.db = self.client.pymongo_test if client_context.auth_enabled: diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 882cb6110f..52583d30ef 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -64,7 +64,6 @@ MONGODB_API_VERSION, MULTI_MONGOS_LB_URI, TEST_LOADBALANCER, - TEST_SERVERLESS, TLS_OPTIONS, SystemCertsPatcher, client_knobs, @@ -123,9 +122,8 @@ def __init__(self): self.conn_lock = threading.Lock() self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER - self.serverless = TEST_SERVERLESS self._fips_enabled = None - if self.load_balancer or self.serverless: + if self.load_balancer: self.default_client_options["loadBalanced"] = True if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS @@ -167,7 +165,7 @@ async def uri(self): @property async def hello(self): if not self._hello: - if self.serverless or self.load_balancer: + if self.load_balancer: self._hello = await self.client.admin.command(HelloCompat.CMD) else: self._hello = await self.client.admin.command(HelloCompat.LEGACY_CMD) @@ -222,24 +220,21 @@ async def _init_client(self): if self.client: self.connected = True - if self.serverless: - self.auth_enabled = True - else: - try: - self.cmd_line = await self.client.admin.command("getCmdLineOpts") - except pymongo.errors.OperationFailure as e: - assert e.details is not None - msg = e.details.get("errmsg", "") - if e.code == 13 or "unauthorized" in msg or "login" in msg: - # Unauthorized. - self.auth_enabled = True - else: - raise + try: + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True else: - self.auth_enabled = self._server_started_with_auth() + raise + else: + self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: - if not self.serverless and not IS_SRV: + if not IS_SRV: # See if db_user already exists. if not await self._check_user_provided(): await _create_user(self.client.admin, db_user, db_pwd) @@ -259,13 +254,10 @@ async def _init_client(self): # May not have this if OperationFailure was raised earlier. self.cmd_line = await self.client.admin.command("getCmdLineOpts") - if self.serverless: - self.server_status = {} - else: - self.server_status = await self.client.admin.command("serverStatus") - if self.storage_engine == "mmapv1": - # MMAPv1 does not support retryWrites=True. - self.default_client_options["retryWrites"] = False + self.server_status = await self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False hello = await self.hello self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello @@ -302,44 +294,33 @@ async def _init_client(self): self.w = len(hello.get("hosts", [])) or 1 self.version = await Version.async_from_client(self.client) - if self.serverless: - self.server_parameters = { - "requireApiVersion": False, - "enableTestCommands": True, - } + self.server_parameters = await self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: self.test_commands_enabled = True - self.has_ipv6 = False - else: - self.server_parameters = await self.client.admin.command("getParameter", "*") - assert self.cmd_line is not None - if self.server_parameters["enableTestCommands"]: + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: self.test_commands_enabled = True - elif "parsed" in self.cmd_line: - params = self.cmd_line["parsed"].get("setParameter", []) - if "enableTestCommands=1" in params: + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": self.test_commands_enabled = True - else: - params = self.cmd_line["parsed"].get("setParameter", {}) - if params.get("enableTestCommands") == "1": - self.test_commands_enabled = True - self.has_ipv6 = await self._server_started_with_ipv6() + self.has_ipv6 = await self._server_started_with_ipv6() self.is_mongos = (await self.hello).get("msg") == "isdbgrid" if self.is_mongos: address = await self.client.address self.mongoses.append(address) - if not self.serverless: - # Check for another mongos on the next port. - assert address is not None - next_address = address[0], address[1] + 1 - mongos_client = await self._connect( - *next_address, **self.default_client_options - ) - if mongos_client: - hello = await mongos_client.admin.command(HelloCompat.LEGACY_CMD) - if hello.get("msg") == "isdbgrid": - self.mongoses.append(next_address) - await mongos_client.close() + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = await self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = await mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + await mongos_client.close() async def init(self): with self.conn_lock: @@ -668,15 +649,9 @@ def require_no_load_balancer(self, func): lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func ) - def require_no_serverless(self, func): - """Run a test only if the client is not connected to serverless.""" - return self._require( - lambda: not self.serverless, "Must not be connected to serverless", func=func - ) - def require_change_streams(self, func): """Run a test only if the server supports change streams.""" - return self.require_no_mmap(self.require_no_standalone(self.require_no_serverless(func))) + return self.require_no_mmap(self.require_no_standalone(func)) async def is_topology_type(self, topologies): unknown = set(topologies) - { @@ -1213,8 +1188,6 @@ class AsyncIntegrationTest(AsyncPyMongoTestCase): async def asyncSetUp(self) -> None: if async_client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") - if async_client_context.serverless and not getattr(self, "RUN_ON_SERVERLESS", False): - raise SkipTest("this test does not support serverless") self.client = async_client_context.client self.db = self.client.pymongo_test if async_client_context.auth_enabled: diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index 7b021e8b44..49b9af9bf7 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -82,7 +82,6 @@ COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) -TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") @@ -91,15 +90,6 @@ host, port = res["nodelist"][0] db_user = res["username"] or db_user db_pwd = res["password"] or db_pwd -elif TEST_SERVERLESS: - TEST_LOADBALANCER = True - res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res["nodelist"][0] - db_user = res["username"] or db_user - db_pwd = res["password"] or db_pwd - TLS_OPTIONS = {"tls": True} - # Spec says serverless tests must be run with compression. - COMPRESSORS = COMPRESSORS or "zlib" # Shared KMS data. diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index ad6614711d..aaa7e7d56d 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -2006,7 +2006,7 @@ async def test_srv_max_hosts_kwarg(self): self.assertEqual(len(client.topology_description.server_descriptions()), 2) @unittest.skipIf( - async_client_context.load_balancer or async_client_context.serverless, + async_client_context.load_balancer, "loadBalanced clients do not run SDAM", ) @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 9eb15298a6..2f48466af8 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -47,7 +47,6 @@ class TestClientBulkWrite(AsyncIntegrationTest): @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_returns_error_if_no_namespace_provided(self): models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -58,7 +57,6 @@ async def test_returns_error_if_no_namespace_provided(self): ) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_handles_non_pymongo_error(self): with patch.object( _AsyncClientBulk, "write_command", return_value={"error": TypeError("mock type error")} @@ -70,7 +68,6 @@ async def test_handles_non_pymongo_error(self): self.assertFalse(hasattr(context.exception.error, "details")) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_formats_write_error_correctly(self): models = [ InsertOne(namespace="db.coll", document={"_id": 1}), @@ -94,7 +91,6 @@ async def asyncSetUp(self): self.max_message_size_bytes = await async_client_context.max_message_size_bytes @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -119,7 +115,6 @@ async def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -151,7 +146,6 @@ async def test_batch_splits_if_ops_payload_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless @async_client_context.require_failCommand_fail_point async def test_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() @@ -194,7 +188,6 @@ async def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(bulk_write_events), 2) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -224,7 +217,6 @@ async def test_collects_write_errors_across_batches_unordered(self): self.assertEqual(len(bulk_write_events), 2) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -254,7 +246,6 @@ async def test_collects_write_errors_across_batches_ordered(self): self.assertEqual(len(bulk_write_events), 1) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -294,7 +285,6 @@ async def test_handles_cursor_requiring_getMore(self): self.assertTrue(get_more_event) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless @async_client_context.require_no_standalone async def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() @@ -337,7 +327,6 @@ async def test_handles_cursor_requiring_getMore_within_transaction(self): self.assertTrue(get_more_event) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless @async_client_context.require_failCommand_fail_point async def test_handles_getMore_error(self): listener = OvertCommandListener() @@ -392,7 +381,6 @@ async def test_handles_getMore_error(self): self.assertTrue(kill_cursors_event) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -452,7 +440,6 @@ async def _setup_namespace_test_models(self): return num_models, models @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -483,7 +470,6 @@ async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -521,7 +507,6 @@ async def test_batch_splits_if_new_namespace_is_too_large(self): self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = await self.async_rs_or_single_client() @@ -539,7 +524,6 @@ async def test_returns_error_if_no_writes_can_be_added_to_ops(self): await client.bulk_write(models=models) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") async def test_returns_error_if_auto_encryption_configured(self): opts = AutoEncryptionOpts( @@ -556,7 +540,6 @@ async def test_returns_error_if_auto_encryption_configured(self): ) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_upserted_result(self): client = await self.async_rs_or_single_client() @@ -596,7 +579,6 @@ async def test_upserted_result(self): self.assertEqual(result.update_results[2].did_upsert, False) @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless async def test_15_unacknowledged_write_across_batches(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -645,7 +627,6 @@ async def asyncSetUp(self): self.max_message_size_bytes = await async_client_context.max_message_size_bytes @async_client_context.require_version_min(8, 0, 0, -24) - @async_client_context.require_no_serverless @async_client_context.require_failCommand_fail_point async def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py index afca1c0b26..652b32e798 100644 --- a/test/asynchronous/test_client_context.py +++ b/test/asynchronous/test_client_context.py @@ -36,16 +36,6 @@ def test_must_connect(self): ), ) - def test_serverless(self): - if not os.environ.get("TEST_SERVERLESS"): - raise SkipTest("TEST_SERVERLESS is not set") - - self.assertTrue( - async_client_context.connected and async_client_context.serverless, - "client context must be connected to serverless when " - f"TEST_SERVERLESS is set. Failed attempts:\n{async_client_context.connection_attempt_info()}", - ) - def test_enableTestCommands_is_disabled(self): if not os.environ.get("DISABLE_TEST_COMMANDS"): raise SkipTest("DISABLE_TEST_COMMANDS is not set") diff --git a/test/asynchronous/test_crud_unified.py b/test/asynchronous/test_crud_unified.py index e6f42d5bdf..8b1f9b8e38 100644 --- a/test/asynchronous/test_crud_unified.py +++ b/test/asynchronous/test_crud_unified.py @@ -33,7 +33,7 @@ _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") # Generate unified tests. -globals().update(generate_test_classes(_TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/asynchronous/test_csot.py b/test/asynchronous/test_csot.py index 9e928c2251..46c97ce6d3 100644 --- a/test/asynchronous/test_csot.py +++ b/test/asynchronous/test_csot.py @@ -41,7 +41,6 @@ class TestCSOT(AsyncIntegrationTest): - RUN_ON_SERVERLESS = True RUN_ON_LOAD_BALANCER = True async def test_timeout_nested(self): diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index cf26faf248..70348c8daf 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -445,7 +445,6 @@ async def mock_close(self, reason): class TestServerMonitoringMode(AsyncIntegrationTest): - @async_client_context.require_no_serverless @async_client_context.require_no_load_balancer async def asyncSetUp(self): await super().asyncSetUp() diff --git a/test/asynchronous/test_load_balancer.py b/test/asynchronous/test_load_balancer.py index 127fdfd24d..db7ff9183f 100644 --- a/test/asynchronous/test_load_balancer.py +++ b/test/asynchronous/test_load_balancer.py @@ -54,7 +54,6 @@ class TestLB(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True async def test_connections_are_only_returned_once(self): if "PyPy" in sys.version: @@ -142,10 +141,8 @@ async def test_session_gc(self): session = client.start_session() await session.start_transaction() await client.test_session_gc.test.find_one({}, session=session) - # Cleanup the transaction left open on the server unless we're - # testing serverless which does not support killSessions. - if not async_client_context.serverless: - self.addAsyncCleanup(self.client.admin.command, "killSessions", [session.session_id]) + # Cleanup the transaction left open on the server + self.addAsyncCleanup(self.client.admin.command, "killSessions", [session.session_id]) if async_client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py index 10d9e738b4..a563e3974e 100644 --- a/test/asynchronous/test_retryable_reads.py +++ b/test/asynchronous/test_retryable_reads.py @@ -80,7 +80,6 @@ async def run(self): class TestPoolPausedError(AsyncIntegrationTest): # Pools don't get paused in load balanced mode. RUN_ON_LOAD_BALANCER = False - RUN_ON_SERVERLESS = False @async_client_context.require_sync @async_client_context.require_failCommand_blockConnection diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py index 842233a3ef..b399fa50e4 100644 --- a/test/asynchronous/test_retryable_writes.py +++ b/test/asynchronous/test_retryable_writes.py @@ -129,7 +129,6 @@ def non_retryable_single_statement_ops(coll): class IgnoreDeprecationsTest(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True deprecation_filter: DeprecationFilter async def asyncSetUp(self) -> None: @@ -423,7 +422,6 @@ async def test_retryable_writes_in_sharded_cluster_multiple_available(self): class TestWriteConcernError(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True fail_insert: dict @async_client_context.require_replica_set @@ -494,7 +492,6 @@ async def run(self): class TestPoolPausedError(AsyncIntegrationTest): # Pools don't get paused in load balanced mode. RUN_ON_LOAD_BALANCER = False - RUN_ON_SERVERLESS = False @async_client_context.require_sync @async_client_context.require_failCommand_blockConnection diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index e1b6001edb..5c2a4f6fae 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -73,8 +73,6 @@ def maybe_skip_scenario(self, test): class TestTransactions(AsyncTransactionsBase): - RUN_ON_SERVERLESS = True - @async_client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() diff --git a/test/asynchronous/test_unified_format.py b/test/asynchronous/test_unified_format.py index a005739e95..58a1ea3326 100644 --- a/test/asynchronous/test_unified_format.py +++ b/test/asynchronous/test_unified_format.py @@ -42,7 +42,6 @@ expected_failures=[ "Client side error in command starting transaction", # PYTHON-1894 ], - RUN_ON_SERVERLESS=False, ) ) @@ -56,7 +55,6 @@ expected_failures=[ ".*", # All tests expected to fail ], - RUN_ON_SERVERLESS=False, ) ) diff --git a/test/asynchronous/test_versioned_api_integration.py b/test/asynchronous/test_versioned_api_integration.py index 46e62d5c14..0f6b544465 100644 --- a/test/asynchronous/test_versioned_api_integration.py +++ b/test/asynchronous/test_versioned_api_integration.py @@ -40,7 +40,6 @@ class TestServerApiIntegration(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True def assertServerApi(self, event): self.assertIn("apiVersion", event.command) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 23707b942f..fbd1f87755 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -131,14 +131,6 @@ async def is_run_on_requirement_satisfied(requirement): if req_max_server_version: max_version_satisfied = Version.from_string(req_max_server_version) >= server_version - serverless = requirement.get("serverless") - if serverless == "require": - serverless_satisfied = async_client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not async_client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - params_satisfied = True params = requirement.get("serverParameters") if params: @@ -168,7 +160,6 @@ async def is_run_on_requirement_satisfied(requirement): topology_satisfied and min_version_satisfied and max_version_satisfied - and serverless_satisfied and params_satisfied and auth_satisfied and csfle_satisfied @@ -284,7 +275,7 @@ async def _create_entity(self, entity_spec, uri=None): self._listeners[spec["id"]] = listener kwargs["event_listeners"] = [listener] if spec.get("useMultipleMongoses"): - if async_client_context.load_balancer or async_client_context.serverless: + if async_client_context.load_balancer: kwargs["h"] = async_client_context.MULTI_MONGOS_LB_URI elif async_client_context.is_mongos: kwargs["h"] = async_client_context.mongos_seeds() @@ -440,7 +431,6 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): SCHEMA_VERSION = Version.from_string("1.22") RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes mongos_clients: list[AsyncMongoClient] = [] @@ -511,11 +501,7 @@ async def asyncSetUp(self): # Handle mongos_clients for transactions tests. self.mongos_clients = [] - if ( - async_client_context.supports_transactions() - and not async_client_context.load_balancer - and not async_client_context.serverless - ): + if async_client_context.supports_transactions() and not async_client_context.load_balancer: for address in async_client_context.mongoses: self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) @@ -552,12 +538,6 @@ def maybe_skip_test(self, spec): self.skipTest("PYTHON-5170 tests are flakey") if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: self.skipTest("PYTHON-5174 tests are flakey") - if ( - "inserting _id with type null via clientBulkWrite" in spec["description"] - or "commitTransaction fails after Interrupted" in spec["description"] - or "commit is not retried after MaxTimeMSExpired error" in spec["description"] - ) and async_client_context.serverless: - self.skipTest("PYTHON-5326 known serverless failures") class_name = self.__class__.__name__.lower() description = spec["description"].lower() diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index c83636a734..da36209166 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -124,18 +124,6 @@ def _ensure_min_max_server_version(self, scenario_def, method): if max_ver is not None: method = async_client_context.require_version_max(*max_ver)(method) - if "serverless" in scenario_def: - serverless = scenario_def["serverless"] - if serverless == "require": - serverless_satisfied = async_client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not async_client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - method = unittest.skipUnless( - serverless_satisfied, "Serverless requirement not satisfied" - )(method) - return method @staticmethod @@ -168,16 +156,6 @@ def valid_auth_enabled(run_on_req): return not async_client_context.auth_enabled return True - @staticmethod - def serverless_ok(run_on_req): - serverless = run_on_req["serverless"] - if serverless == "require": - return async_client_context.serverless - elif serverless == "forbid": - return not async_client_context.serverless - else: # unset or "allow" - return True - async def should_run_on(self, scenario_def): run_on = scenario_def.get("runOn", []) if not run_on: @@ -190,7 +168,6 @@ async def should_run_on(self, scenario_def): and self.min_server_version(req) and self.max_server_version(req) and self.valid_auth_enabled(req) - and self.serverless_ok(req) ): return True return False @@ -680,7 +657,7 @@ async def run_scenario(self, scenario_def, test): use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: - if async_client_context.load_balancer or async_client_context.serverless: + if async_client_context.load_balancer: host = async_client_context.MULTI_MONGOS_LB_URI elif async_client_context.is_mongos: host = async_client_context.mongos_seeds() diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index a3e8b0b1d5..0961f1084f 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -37,13 +37,11 @@ "ATLAS_FREE": os.environ.get("ATLAS_FREE"), "ATLAS_TLS11": os.environ.get("ATLAS_TLS11"), "ATLAS_TLS12": os.environ.get("ATLAS_TLS12"), - "ATLAS_SERVERLESS": os.environ.get("ATLAS_SERVERLESS"), "ATLAS_SRV_REPL": os.environ.get("ATLAS_SRV_REPL"), "ATLAS_SRV_SHRD": os.environ.get("ATLAS_SRV_SHRD"), "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), - "ATLAS_SRV_SERVERLESS": os.environ.get("ATLAS_SRV_SERVERLESS"), } @@ -73,9 +71,6 @@ def test_tls_11(self): def test_tls_12(self): self.connect(URIS["ATLAS_TLS12"]) - def test_serverless(self): - self.connect(URIS["ATLAS_SERVERLESS"]) - def connect_srv(self, uri): self.connect(uri) self.assertIn("mongodb+srv://", uri) @@ -96,9 +91,6 @@ def test_srv_tls_11(self): def test_srv_tls_12(self): self.connect_srv(URIS["ATLAS_SRV_TLS12"]) - def test_srv_serverless(self): - self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) - def test_uniqueness(self): """Ensure that we don't accidentally duplicate the test URIs.""" uri_to_names = defaultdict(list) diff --git a/test/helpers.py b/test/helpers.py index 12c55ade1b..ccf19a9228 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -82,7 +82,6 @@ COMPRESSORS = os.environ.get("COMPRESSORS") MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) -TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") @@ -91,15 +90,6 @@ host, port = res["nodelist"][0] db_user = res["username"] or db_user db_pwd = res["password"] or db_pwd -elif TEST_SERVERLESS: - TEST_LOADBALANCER = True - res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res["nodelist"][0] - db_user = res["username"] or db_user - db_pwd = res["password"] or db_pwd - TLS_OPTIONS = {"tls": True} - # Spec says serverless tests must be run with compression. - COMPRESSORS = COMPRESSORS or "zlib" # Shared KMS data. diff --git a/test/test_client.py b/test/test_client.py index c50082797d..18624f892c 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -1963,7 +1963,7 @@ def test_srv_max_hosts_kwarg(self): self.assertEqual(len(client.topology_description.server_descriptions()), 2) @unittest.skipIf( - client_context.load_balancer or client_context.serverless, + client_context.load_balancer, "loadBalanced clients do not run SDAM", ) @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 866b179c9e..84313c5be0 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -47,7 +47,6 @@ class TestClientBulkWrite(IntegrationTest): @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_returns_error_if_no_namespace_provided(self): models = [InsertOne(document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: @@ -58,7 +57,6 @@ def test_returns_error_if_no_namespace_provided(self): ) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_handles_non_pymongo_error(self): with patch.object( _ClientBulk, "write_command", return_value={"error": TypeError("mock type error")} @@ -70,7 +68,6 @@ def test_handles_non_pymongo_error(self): self.assertFalse(hasattr(context.exception.error, "details")) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_formats_write_error_correctly(self): models = [ InsertOne(namespace="db.coll", document={"_id": 1}), @@ -94,7 +91,6 @@ def setUp(self): self.max_message_size_bytes = client_context.max_message_size_bytes @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -119,7 +115,6 @@ def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -151,7 +146,6 @@ def test_batch_splits_if_ops_payload_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless @client_context.require_failCommand_fail_point def test_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() @@ -194,7 +188,6 @@ def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(bulk_write_events), 2) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -224,7 +217,6 @@ def test_collects_write_errors_across_batches_unordered(self): self.assertEqual(len(bulk_write_events), 2) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -254,7 +246,6 @@ def test_collects_write_errors_across_batches_ordered(self): self.assertEqual(len(bulk_write_events), 1) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -294,7 +285,6 @@ def test_handles_cursor_requiring_getMore(self): self.assertTrue(get_more_event) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless @client_context.require_no_standalone def test_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() @@ -337,7 +327,6 @@ def test_handles_cursor_requiring_getMore_within_transaction(self): self.assertTrue(get_more_event) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless @client_context.require_failCommand_fail_point def test_handles_getMore_error(self): listener = OvertCommandListener() @@ -392,7 +381,6 @@ def test_handles_getMore_error(self): self.assertTrue(kill_cursors_event) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -448,7 +436,6 @@ def _setup_namespace_test_models(self): return num_models, models @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -479,7 +466,6 @@ def test_no_batch_splits_if_new_namespace_is_not_too_large(self): self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -517,7 +503,6 @@ def test_batch_splits_if_new_namespace_is_too_large(self): self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_returns_error_if_no_writes_can_be_added_to_ops(self): client = self.rs_or_single_client() @@ -535,7 +520,6 @@ def test_returns_error_if_no_writes_can_be_added_to_ops(self): client.bulk_write(models=models) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_returns_error_if_auto_encryption_configured(self): opts = AutoEncryptionOpts( @@ -552,7 +536,6 @@ def test_returns_error_if_auto_encryption_configured(self): ) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_upserted_result(self): client = self.rs_or_single_client() @@ -592,7 +575,6 @@ def test_upserted_result(self): self.assertEqual(result.update_results[2].did_upsert, False) @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless def test_15_unacknowledged_write_across_batches(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -641,7 +623,6 @@ def setUp(self): self.max_message_size_bytes = client_context.max_message_size_bytes @client_context.require_version_min(8, 0, 0, -24) - @client_context.require_no_serverless @client_context.require_failCommand_fail_point def test_timeout_in_multi_batch_bulk_write(self): _OVERHEAD = 500 diff --git a/test/test_client_context.py b/test/test_client_context.py index ef3633a8b0..9c1b21ee78 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -36,16 +36,6 @@ def test_must_connect(self): ), ) - def test_serverless(self): - if not os.environ.get("TEST_SERVERLESS"): - raise SkipTest("TEST_SERVERLESS is not set") - - self.assertTrue( - client_context.connected and client_context.serverless, - "client context must be connected to serverless when " - f"TEST_SERVERLESS is set. Failed attempts:\n{client_context.connection_attempt_info()}", - ) - def test_enableTestCommands_is_disabled(self): if not os.environ.get("DISABLE_TEST_COMMANDS"): raise SkipTest("DISABLE_TEST_COMMANDS is not set") diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py index 26f34cba88..1b1abf3600 100644 --- a/test/test_crud_unified.py +++ b/test/test_crud_unified.py @@ -33,7 +33,7 @@ _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") # Generate unified tests. -globals().update(generate_test_classes(_TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/test_csot.py b/test/test_csot.py index 5201156a1d..ff907cc9c5 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -41,7 +41,6 @@ class TestCSOT(IntegrationTest): - RUN_ON_SERVERLESS = True RUN_ON_LOAD_BALANCER = True def test_timeout_nested(self): diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 9d6c945707..a0dabaaf8e 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -443,7 +443,6 @@ def mock_close(self, reason): class TestServerMonitoringMode(IntegrationTest): - @client_context.require_no_serverless @client_context.require_no_load_balancer def setUp(self): super().setUp() diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index d7f1d596cc..364a323627 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -54,7 +54,6 @@ class TestLB(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True def test_connections_are_only_returned_once(self): if "PyPy" in sys.version: @@ -142,10 +141,8 @@ def test_session_gc(self): session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) - # Cleanup the transaction left open on the server unless we're - # testing serverless which does not support killSessions. - if not client_context.serverless: - self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) + # Cleanup the transaction left open on the server + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 7ae4c41e70..5b87943fcc 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -80,7 +80,6 @@ def run(self): class TestPoolPausedError(IntegrationTest): # Pools don't get paused in load balanced mode. RUN_ON_LOAD_BALANCER = False - RUN_ON_SERVERLESS = False @client_context.require_sync @client_context.require_failCommand_blockConnection diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 598fc3fd76..ad5b0671e7 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -129,7 +129,6 @@ def non_retryable_single_statement_ops(coll): class IgnoreDeprecationsTest(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True deprecation_filter: DeprecationFilter def setUp(self) -> None: @@ -421,7 +420,6 @@ def test_retryable_writes_in_sharded_cluster_multiple_available(self): class TestWriteConcernError(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True fail_insert: dict @client_context.require_replica_set @@ -492,7 +490,6 @@ def run(self): class TestPoolPausedError(IntegrationTest): # Pools don't get paused in load balanced mode. RUN_ON_LOAD_BALANCER = False - RUN_ON_SERVERLESS = False @client_context.require_sync @client_context.require_failCommand_blockConnection diff --git a/test/test_transactions.py b/test/test_transactions.py index 93bcdbaeb1..f4578deddb 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -73,8 +73,6 @@ def maybe_skip_scenario(self, test): class TestTransactions(TransactionsBase): - RUN_ON_SERVERLESS = True - @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() diff --git a/test/test_unified_format.py b/test/test_unified_format.py index 05f58d5d06..f1cfd0139b 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -42,7 +42,6 @@ expected_failures=[ "Client side error in command starting transaction", # PYTHON-1894 ], - RUN_ON_SERVERLESS=False, ) ) @@ -56,7 +55,6 @@ expected_failures=[ ".*", # All tests expected to fail ], - RUN_ON_SERVERLESS=False, ) ) diff --git a/test/test_versioned_api_integration.py b/test/test_versioned_api_integration.py index 0066ecd977..066a1935ca 100644 --- a/test/test_versioned_api_integration.py +++ b/test/test_versioned_api_integration.py @@ -40,7 +40,6 @@ class TestServerApiIntegration(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True def assertServerApi(self, event): self.assertIn("apiVersion", event.command) diff --git a/test/unified_format.py b/test/unified_format.py index 84881800a2..0db037c654 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -130,14 +130,6 @@ def is_run_on_requirement_satisfied(requirement): if req_max_server_version: max_version_satisfied = Version.from_string(req_max_server_version) >= server_version - serverless = requirement.get("serverless") - if serverless == "require": - serverless_satisfied = client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - params_satisfied = True params = requirement.get("serverParameters") if params: @@ -167,7 +159,6 @@ def is_run_on_requirement_satisfied(requirement): topology_satisfied and min_version_satisfied and max_version_satisfied - and serverless_satisfied and params_satisfied and auth_satisfied and csfle_satisfied @@ -283,7 +274,7 @@ def _create_entity(self, entity_spec, uri=None): self._listeners[spec["id"]] = listener kwargs["event_listeners"] = [listener] if spec.get("useMultipleMongoses"): - if client_context.load_balancer or client_context.serverless: + if client_context.load_balancer: kwargs["h"] = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: kwargs["h"] = client_context.mongos_seeds() @@ -439,7 +430,6 @@ class UnifiedSpecTestMixinV1(IntegrationTest): SCHEMA_VERSION = Version.from_string("1.22") RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes mongos_clients: list[MongoClient] = [] @@ -510,11 +500,7 @@ def setUp(self): # Handle mongos_clients for transactions tests. self.mongos_clients = [] - if ( - client_context.supports_transactions() - and not client_context.load_balancer - and not client_context.serverless - ): + if client_context.supports_transactions() and not client_context.load_balancer: for address in client_context.mongoses: self.mongos_clients.append(self.single_client("{}:{}".format(*address))) @@ -551,12 +537,6 @@ def maybe_skip_test(self, spec): self.skipTest("PYTHON-5170 tests are flakey") if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: self.skipTest("PYTHON-5174 tests are flakey") - if ( - "inserting _id with type null via clientBulkWrite" in spec["description"] - or "commitTransaction fails after Interrupted" in spec["description"] - or "commit is not retried after MaxTimeMSExpired error" in spec["description"] - ) and client_context.serverless: - self.skipTest("PYTHON-5326 known serverless failures") class_name = self.__class__.__name__.lower() description = spec["description"].lower() diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 580e7cc120..3278063b4a 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -124,18 +124,6 @@ def _ensure_min_max_server_version(self, scenario_def, method): if max_ver is not None: method = client_context.require_version_max(*max_ver)(method) - if "serverless" in scenario_def: - serverless = scenario_def["serverless"] - if serverless == "require": - serverless_satisfied = client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - method = unittest.skipUnless( - serverless_satisfied, "Serverless requirement not satisfied" - )(method) - return method @staticmethod @@ -168,16 +156,6 @@ def valid_auth_enabled(run_on_req): return not client_context.auth_enabled return True - @staticmethod - def serverless_ok(run_on_req): - serverless = run_on_req["serverless"] - if serverless == "require": - return client_context.serverless - elif serverless == "forbid": - return not client_context.serverless - else: # unset or "allow" - return True - def should_run_on(self, scenario_def): run_on = scenario_def.get("runOn", []) if not run_on: @@ -190,7 +168,6 @@ def should_run_on(self, scenario_def): and self.min_server_version(req) and self.max_server_version(req) and self.valid_auth_enabled(req) - and self.serverless_ok(req) ): return True return False @@ -677,7 +654,7 @@ def run_scenario(self, scenario_def, test): use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: - if client_context.load_balancer or client_context.serverless: + if client_context.load_balancer: host = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: host = client_context.mongos_seeds() From 673f821acbe7ade4cd72cf7dac23eb10b86c28c5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Jun 2025 06:22:28 -0500 Subject: [PATCH 1943/2111] [v4.13] PYTHON-5406 AsyncPeriodicExecutor must reset CSOT contextvars before executing (#2367) Co-authored-by: Noah Stapp --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 9cce310d91..cbb2322fe8 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -16,7 +16,7 @@ env: # Changes per repo PRODUCT_NAME: PyMongo # Changes per branch - EVERGREEN_PROJECT: mongo-python-driver + EVERGREEN_PROJECT: mongo-python-driver-release # Constant # inputs will be empty on a scheduled run. so, we only set dry_run # to 'false' when the input is set to 'false'. From 9145521dfa039600b0cbb86540206430c09394a7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Jun 2025 12:12:05 -0500 Subject: [PATCH 1944/2111] PYTHON-5410 Assume ec2 role in backport task (#2369) --- .evergreen/generated_configs/tasks.yml | 1 + .evergreen/scripts/generate_config.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index aa700629fb..1129408f12 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -118,6 +118,7 @@ tasks: # Backport pr tests - name: backport-pr commands: + - func: assume ec2 role - command: subprocess.exec params: binary: bash diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e5c5209bf3..cb5c5a38a0 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -903,7 +903,8 @@ def create_backport_pr_tasks(): "${github.amrom.workers.devmit}", ] cmd = get_subprocess_exec(args=args) - return [EvgTask(name=name, commands=[cmd], allowed_requesters=["commit"])] + assume_func = FunctionCall(func="assume ec2 role") + return [EvgTask(name=name, commands=[assume_func, cmd], allowed_requesters=["commit"])] def create_ocsp_tasks(): From f645036d71bb4a0d31b86ee16c27b136fccf7a70 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 10 Jun 2025 12:26:25 -0500 Subject: [PATCH 1945/2111] Fix release metadata (#2372) --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index cbb2322fe8..9cce310d91 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -16,7 +16,7 @@ env: # Changes per repo PRODUCT_NAME: PyMongo # Changes per branch - EVERGREEN_PROJECT: mongo-python-driver-release + EVERGREEN_PROJECT: mongo-python-driver # Constant # inputs will be empty on a scheduled run. so, we only set dry_run # to 'false' when the input is set to 'false'. From 7e19515d7b6a85de80e58b199d28e808db8e8df8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 11 Jun 2025 10:44:46 -0500 Subject: [PATCH 1946/2111] PYTHON-5393 Make link checking more robust (#2374) --- doc/changelog.rst | 3 +-- doc/conf.py | 5 ++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index c44cfb41a2..d729f9afed 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1700,8 +1700,7 @@ Changes in Version 3.8.0 (2019/04/22) ------------------------------------- .. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install - Python 2.7 or newer from `Red Hat Software Collections - `_. + Python 2.7 or newer from Red Hat Software Collections. CentOS 6 users should install Python 2.7 or newer from `SCL `_ diff --git a/doc/conf.py b/doc/conf.py index 387b939344..a9711d259f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -82,7 +82,7 @@ # Options for link checking # The anchors on the rendered markdown page are created after the fact, # so those link results in a 404. -# wiki.centos.org has been flakey. +# wiki.centos.org has been flaky. # sourceforge.net is giving a 403 error, but is still accessible from the browser. linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", @@ -91,6 +91,9 @@ r"https://sourceforge.net/", ] +# Allow for flaky links. +linkcheck_retries = 3 + # -- Options for extensions ---------------------------------------------------- autoclass_content = "init" From 8a8cb6f0afb7d93c5159e0fb89d89740e5f9932e Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 11 Jun 2025 13:45:54 -0400 Subject: [PATCH 1947/2111] PYTHON-5406 - Use correct client for test (#2377) --- test/asynchronous/test_async_contextvars_reset.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/test/asynchronous/test_async_contextvars_reset.py b/test/asynchronous/test_async_contextvars_reset.py index 9b0e2dc4dc..c6e825bbdf 100644 --- a/test/asynchronous/test_async_contextvars_reset.py +++ b/test/asynchronous/test_async_contextvars_reset.py @@ -27,17 +27,15 @@ class TestAsyncContextVarsReset(AsyncIntegrationTest): async def test_context_vars_are_reset_in_executor(self): - if sys.version_info < (3, 11): - self.skipTest("Test requires asyncio.Task.get_context (added in Python 3.11)") + if sys.version_info < (3, 12): + self.skipTest("Test requires asyncio.Task.get_context (added in Python 3.12)") - client = self.simple_client() - - await client.db.test.insert_one({"x": 1}) - for server in client._topology._servers.values(): + await self.client.db.test.insert_one({"x": 1}) + for server in self.client._topology._servers.values(): for context in [ c for c in server._monitor._executor._task.get_context() if c.name in ["TIMEOUT", "RTT", "DEADLINE"] ]: self.assertIn(context.get(), [None, float("inf"), 0.0]) - await client.db.test.delete_many({}) + await self.client.db.test.delete_many({}) From dfd5573c19de8df5d97dde5faf3ab056342610fc Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Wed, 11 Jun 2025 15:54:32 -0400 Subject: [PATCH 1948/2111] PYTHON-5002 Include test/ dir in synchro gaurd (#2379) --- tools/synchro.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/synchro.py b/tools/synchro.py index 906bfd00da..aaf7c6836a 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -417,13 +417,18 @@ def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[ def main() -> None: modified_files = [f"./{f}" for f in sys.argv[1:]] errored = False - for fname in async_files + gridfs_files: + for fname in async_files + gridfs_files + test_files: # If the async file was modified, we don't need to check if the sync file was also modified. if str(fname) in modified_files: continue sync_name = str(fname).replace("asynchronous", "synchronous") - if sync_name in modified_files and "OVERRIDE_SYNCHRO_CHECK" not in os.environ: - print(f"Refusing to overwrite {sync_name}") + test_sync_name = str(fname).replace("/asynchronous", "") + if ( + sync_name in modified_files + or test_sync_name in modified_files + and "OVERRIDE_SYNCHRO_CHECK" not in os.environ + ): + print(f"Refusing to overwrite {test_sync_name}") errored = True if errored: raise ValueError("Aborting synchro due to errors") From a742aa22d4ccde1b0b666efb5fc3868423d8ecf6 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 11 Jun 2025 16:42:18 -0500 Subject: [PATCH 1949/2111] PYTHON-5411 Add 4.13.1 changelog to master (#2380) --- doc/changelog.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index d729f9afed..3f5df8df6c 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,6 +8,22 @@ PyMongo 4.14 brings a number of changes including: - Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. +Changes in Version 4.13.1 (2025/06/10) +-------------------------------------- + +Version 4.13.1 is a bug fix release. + +- Fixed a bug that could raise ``ServerSelectionTimeoutError`` when using timeouts with ``AsyncMongoClient``. +- Fixed a bug that could raise ``NetworkTimeout`` errors on Windows. + +Issues Resolved +............... + +See the `PyMongo 4.13.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43924 + Changes in Version 4.13.0 (2025/05/14) -------------------------------------- From 54846cd11029d6c3302fc55c2ead10041148f6a8 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Wed, 11 Jun 2025 19:24:59 -0400 Subject: [PATCH 1950/2111] PYTHON-5409 Update test_session.py comment (#2381) --- test/asynchronous/test_session.py | 2 +- test/test_session.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 0ceaea98f9..475238dc0d 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -196,7 +196,7 @@ async def test_implicit_sessions_checkout(self): lsid_set = set() listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) - # Retry up to 10 times because there is a known race that can cause multiple + # Retry up to 10 times because there is a known race condition that can cause multiple # sessions to be used: connection check in happens before session check in for _ in range(10): cursor = client.db.test.find({}) diff --git a/test/test_session.py b/test/test_session.py index d70032d15f..98ac442d15 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -196,7 +196,7 @@ def test_implicit_sessions_checkout(self): lsid_set = set() listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) - # Retry up to 10 times because there is a known race that can cause multiple + # Retry up to 10 times because there is a known race condition that can cause multiple # sessions to be used: connection check in happens before session check in for _ in range(10): cursor = client.db.test.find({}) From c2aefc2edab2c34d10e34d69793431b0d6800d24 Mon Sep 17 00:00:00 2001 From: Maarten Sijm <9739541+mpsijm@users.noreply.github.com> Date: Fri, 13 Jun 2025 01:45:18 +0200 Subject: [PATCH 1951/2111] PYTHON-5414 Fix "module service_identity has no attribute SICertificateError" when using pyopenssl (#2382) --- pymongo/pyopenssl_context.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 0d4f27cf55..08fe99c889 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -420,9 +420,9 @@ def wrap_socket( pyopenssl.verify_ip_address(ssl_conn, server_hostname) else: pyopenssl.verify_hostname(ssl_conn, server_hostname) - except ( # type:ignore[misc] - service_identity.SICertificateError, - service_identity.SIVerificationError, + except ( + service_identity.CertificateError, + service_identity.VerificationError, ) as exc: raise _CertificateError(str(exc)) from None return ssl_conn From e51ac1fd1c214b4357890a4bb671db77f06f3512 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 13 Jun 2025 09:33:32 -0700 Subject: [PATCH 1952/2111] PYTHON-5409 Fix test_implicit_sessions_checkout again (#2384) --- test/asynchronous/test_session.py | 1 - test/test_session.py | 1 - 2 files changed, 2 deletions(-) diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 475238dc0d..bf2bce27ea 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -235,7 +235,6 @@ async def target(op, *args): for t in tasks: await t.join() self.assertIsNone(t.exc) - await client.close() lsid_set.clear() for i in listener.started_events: if i.command.get("lsid"): diff --git a/test/test_session.py b/test/test_session.py index 98ac442d15..89670df9ee 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -235,7 +235,6 @@ def target(op, *args): for t in tasks: t.join() self.assertIsNone(t.exc) - client.close() lsid_set.clear() for i in listener.started_events: if i.command.get("lsid"): From c16ef0a13e97dd04d1f4234f21ecda0627aaee8c Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 13 Jun 2025 11:45:47 -0700 Subject: [PATCH 1953/2111] PYTHON-5414 Add test for hostname verification error message regression (#2385) --- test/asynchronous/test_ssl.py | 4 +++- test/test_ssl.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py index 023ee91680..a05bc9379d 100644 --- a/test/asynchronous/test_ssl.py +++ b/test/asynchronous/test_ssl.py @@ -323,7 +323,7 @@ async def test_cert_ssl_validation_hostname_matching(self): response = await self.client.admin.command(HelloCompat.LEGACY_CMD) - with self.assertRaises(ConnectionFailure): + with self.assertRaises(ConnectionFailure) as cm: await connected( self.simple_client( "server", @@ -335,6 +335,8 @@ async def test_cert_ssl_validation_hostname_matching(self): **self.credentials, # type: ignore[arg-type] ) ) + # PYTHON-5414 Check for "module service_identity has no attribute SICertificateError" + self.assertNotIn("has no attribute", str(cm.exception)) await connected( self.simple_client( diff --git a/test/test_ssl.py b/test/test_ssl.py index 93a4b4e6ec..3ac0a4555a 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -323,7 +323,7 @@ def test_cert_ssl_validation_hostname_matching(self): response = self.client.admin.command(HelloCompat.LEGACY_CMD) - with self.assertRaises(ConnectionFailure): + with self.assertRaises(ConnectionFailure) as cm: connected( self.simple_client( "server", @@ -335,6 +335,8 @@ def test_cert_ssl_validation_hostname_matching(self): **self.credentials, # type: ignore[arg-type] ) ) + # PYTHON-5414 Check for "module service_identity has no attribute SICertificateError" + self.assertNotIn("has no attribute", str(cm.exception)) connected( self.simple_client( From 50ea82310d71db2527fe28bfb8e583aa535fa91b Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 13 Jun 2025 15:30:10 -0400 Subject: [PATCH 1954/2111] PYTHON 5212 - Use asyncio.loop.sock_connect in _async_create_connection (#2383) --- doc/changelog.rst | 17 ++++++ pymongo/pool_shared.py | 17 ++++-- .../asynchronous/test_async_loop_unblocked.py | 56 +++++++++++++++++++ tools/synchro.py | 1 + 4 files changed, 87 insertions(+), 4 deletions(-) create mode 100644 test/asynchronous/test_async_loop_unblocked.py diff --git a/doc/changelog.rst b/doc/changelog.rst index 3f5df8df6c..ca4784f919 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,6 +8,23 @@ PyMongo 4.14 brings a number of changes including: - Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. +Changes in Version 4.13.2 (2025/06/17) +-------------------------------------- + +Version 4.13.2 is a bug fix release. + +- Fixed a bug where ``AsyncMongoClient`` would block the event loop while creating new connections, + potentially significantly increasing latency for ongoing operations. + +Issues Resolved +............... + +See the `PyMongo 4.13.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43937 + + Changes in Version 4.13.1 (2025/06/10) -------------------------------------- diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index 308ecef349..905f1a4d18 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -206,7 +206,8 @@ async def _async_create_connection(address: _Address, options: PoolOptions) -> s # SOCK_CLOEXEC not supported for Unix sockets. _set_non_inheritable_non_atomic(sock.fileno()) try: - sock.connect(host) + sock.setblocking(False) + await asyncio.get_running_loop().sock_connect(sock, host) return sock except OSError: sock.close() @@ -241,14 +242,22 @@ async def _async_create_connection(address: _Address, options: PoolOptions) -> s timeout = options.connect_timeout elif timeout <= 0: raise socket.timeout("timed out") - sock.settimeout(timeout) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) _set_keepalive_times(sock) - sock.connect(sa) + # Socket needs to be non-blocking during connection to not block the event loop + sock.setblocking(False) + await asyncio.wait_for( + asyncio.get_running_loop().sock_connect(sock, sa), timeout=timeout + ) + sock.settimeout(timeout) return sock + except asyncio.TimeoutError as e: + sock.close() + err = socket.timeout("timed out") + err.__cause__ = e except OSError as e: - err = e sock.close() + err = e # type: ignore[assignment] if err is not None: raise err diff --git a/test/asynchronous/test_async_loop_unblocked.py b/test/asynchronous/test_async_loop_unblocked.py new file mode 100644 index 0000000000..86f934b798 --- /dev/null +++ b/test/asynchronous/test_async_loop_unblocked.py @@ -0,0 +1,56 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that the asynchronous API does not block the event loop.""" +from __future__ import annotations + +import asyncio +import time +from test.asynchronous import AsyncIntegrationTest + +from pymongo.errors import ServerSelectionTimeoutError + + +class TestClientLoopUnblocked(AsyncIntegrationTest): + async def test_client_does_not_block_loop(self): + # Use an unreachable TEST-NET host to ensure that the client times out attempting to create a connection. + client = self.simple_client("192.0.2.1", serverSelectionTimeoutMS=500) + latencies = [] + + # If the loop is being blocked, at least one iteration will have a latency much more than 0.1 seconds + async def background_task(): + start = time.monotonic() + try: + while True: + start = time.monotonic() + await asyncio.sleep(0.1) + latencies.append(time.monotonic() - start) + except asyncio.CancelledError: + latencies.append(time.monotonic() - start) + raise + + t = asyncio.create_task(background_task()) + + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No servers found yet"): + await client.admin.command("ping") + + t.cancel() + with self.assertRaises(asyncio.CancelledError): + await t + + self.assertLessEqual( + sorted(latencies, reverse=True)[0], + 1.0, + "Background task was blocked from running", + ) diff --git a/tools/synchro.py b/tools/synchro.py index aaf7c6836a..541231cf71 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -186,6 +186,7 @@ def async_only_test(f: str) -> bool: "test_async_cancellation.py", "test_async_loop_safety.py", "test_async_contextvars_reset.py", + "test_async_loop_unblocked.py", ] From 87c015fbcfd95f35adfc30eeffb37cf2ab9ed4ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 06:31:12 -0500 Subject: [PATCH 1955/2111] Bump github/codeql-action from 3.28.19 to 3.29.0 in the actions group (#2388) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/zizmor.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 7a61ee9c62..36ed7fa2e9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 90d1eba118..48097316f0 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -26,7 +26,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3 + uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 with: sarif_file: results.sarif category: zizmor From 8a94de1c1b0befa7a7c839e2860a48eedb4b1767 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 16 Jun 2025 11:51:46 -0500 Subject: [PATCH 1956/2111] PYTHON-5343 Clean up contributing docs (#2390) --- CONTRIBUTING.md | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fa500273af..5a2bf4d913 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -190,13 +190,15 @@ just docs-serve Browse to the link provided, and then as you make changes to docstrings or narrative docs, the pages will re-render and the browser will automatically refresh. - ## Running Tests Locally -- Ensure you have started the appropriate Mongo Server(s). - Run `just install` to set a local virtual environment, or you can manually create a virtual environment and run `pytest` directly. If you want to use a specific version of Python, remove the `.venv` folder and set `PYTHON_BINARY` before running `just install`. +- Ensure you have started the appropriate Mongo Server(s). You can run `just run-server` with optional args + to set up the server. All given options will be passed to + [`run-orchestration.sh`](https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-orchestration.sh). Run `$DRIVERS_TOOLS/evergreen/run-orchestration.sh -h` + for a full list of options. - Run `just test` or `pytest` to run all of the tests. - Append `test/.py::::` to run specific tests. You can omit the `` to test a full class @@ -213,18 +215,36 @@ the pages will re-render and the browser will automatically refresh. `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. - Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools`. This can be put into a `.bashrc` file for convenience. -- Set up access to [Drivers test secrets](https://github.com/mongodb-labs/drivers-evergreen-tools/tree/master/.evergreen/secrets_handling#secrets-handling). +- Some tests require access to [Drivers test secrets](https://github.com/mongodb-labs/drivers-evergreen-tools/tree/master/.evergreen/secrets_handling#secrets-handling). ### Usage -- Run `just run-server` with optional args to set up the server. All given options will be passed to - `run-orchestration.sh` in `$DRIVERS_TOOLS`. See `$DRIVERS_TOOLS/evergreen/run-orchestration.sh -h` - for a full list of options. +- Run `just run-server` with optional args to set up the server. - Run `just setup-tests` with optional args to set up the test environment, secrets, etc. See `just setup-tests -h` for a full list of available options. - Run `just run-tests` to run the tests in an appropriate Python environment. - When done, run `just teardown-tests` to clean up and `just stop-server` to stop the server. +### SSL tests + +- Run `just run-server --ssl` to start the server with TLS enabled. +- Run `just setup-tests --ssl`. +- Run `just run-tests`. + +Note: for general testing purposes with an TLS-enabled server, you can use the following (this should ONLY be used +for local testing): + +```python +from pymongo import MongoClient + +client = MongoClient( + "mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true" +) +``` + +If you want to use the actual certificate file then set `tlsCertificateKeyFile` to the local path +to `/test/certificates/client.pem` and `tlsCAFile` to the local path to `/test/certificates/ca.pem`. + ### Encryption tests - Run `just run-server` to start the server. @@ -434,6 +454,7 @@ run `pre-commit run --all-files --hook-stage manual ruff` and fix all reported e hook again. ## Converting a test to async + The `tools/convert_test_to_async.py` script takes in an existing synchronous test file and outputs a partially-converted asynchronous version of the same name to the `test/asynchronous` directory. Use this generated file as a starting point for the completed conversion. From 336163aaa0d2ece592812fad52e561bedc52185b Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Wed, 18 Jun 2025 13:35:23 -0400 Subject: [PATCH 1957/2111] PYTHON-5126 - Implemented new test cases for Binary Vector (#2393) --- bson/binary.py | 9 ++++++++ test/bson_binary_vector/packed_bit.json | 16 ++++++------- test/test_bson_binary_vector.py | 30 +++++++++++++++++-------- 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index 6698e55ccc..a1f63adf27 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -490,6 +490,11 @@ def as_vector(self) -> BinaryVector: dtype = BinaryVectorDtype(dtype) n_values = len(self) - position + if padding and dtype != BinaryVectorDtype.PACKED_BIT: + raise ValueError( + f"Corrupt data. Padding ({padding}) must be 0 for all but PACKED_BIT dtypes. ({dtype=})" + ) + if dtype == BinaryVectorDtype.INT8: dtype_format = "b" format_string = f"<{n_values}{dtype_format}" @@ -510,6 +515,10 @@ def as_vector(self) -> BinaryVector: elif dtype == BinaryVectorDtype.PACKED_BIT: # data packed as uint8 + if padding and not n_values: + raise ValueError("Corrupt data. Vector has a padding P, but no data.") + if padding > 7 or padding < 0: + raise ValueError(f"Corrupt data. Padding ({padding}) must be between 0 and 7.") dtype_format = "B" format_string = f"<{n_values}{dtype_format}" unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json index a220e7e318..7cc272e38b 100644 --- a/test/bson_binary_vector/packed_bit.json +++ b/test/bson_binary_vector/packed_bit.json @@ -21,22 +21,22 @@ "canonical_bson": "1600000005766563746F7200040000000910007F0700" }, { - "description": "Empty Vector PACKED_BIT", + "description": "PACKED_BIT with padding", "valid": true, - "vector": [], + "vector": [127, 8], "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", - "padding": 0, - "canonical_bson": "1400000005766563746F72000200000009100000" + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000910037F0800" }, { - "description": "PACKED_BIT with padding", + "description": "Empty Vector PACKED_BIT", "valid": true, - "vector": [127, 7], + "vector": [], "dtype_hex": "0x10", "dtype_alias": "PACKED_BIT", - "padding": 3, - "canonical_bson": "1600000005766563746F7200040000000910037F0700" + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" }, { "description": "Overflow Vector PACKED_BIT", diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py index 9bfdcbfb9a..ba3eff8bb2 100644 --- a/test/test_bson_binary_vector.py +++ b/test/test_bson_binary_vector.py @@ -48,7 +48,7 @@ def create_test(case_spec): def run_test(self): for test_case in case_spec.get("tests", []): description = test_case["description"] - vector_exp = test_case.get("vector", []) + vector_exp = test_case.get("vector") dtype_hex_exp = test_case["dtype_hex"] dtype_alias_exp = test_case.get("dtype_alias") padding_exp = test_case.get("padding", 0) @@ -85,14 +85,26 @@ def run_test(self): self.assertEqual(cB_obs, canonical_bson_exp, description) else: - with self.assertRaises((struct.error, ValueError), msg=description): - # Tests Binary.from_vector - Binary.from_vector(vector_exp, dtype_exp, padding_exp) - # Tests Binary.as_vector - cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) - decoded_doc = decode(cB_exp) - binary_obs = decoded_doc[test_key] - binary_obs.as_vector() + """ + #### To prove correct in an invalid case (`valid:false`), one MUST + - (encoding case) if the vector field is present, raise an exception + when attempting to encode a document from the numeric values,dtype, and padding. + - (decoding case) if the canonical_bson field is present, raise an exception + when attempting to deserialize it into the corresponding + numeric values, as the field contains corrupted data. + """ + # Tests Binary.from_vector() + if vector_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + Binary.from_vector(vector_exp, dtype_exp, padding_exp) + + # Tests Binary.as_vector() + if canonical_bson_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + binary_obs.as_vector() return run_test From 4ea0288eaadc1387c00c2cc4ea725b18648e01c1 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Fri, 20 Jun 2025 09:40:05 -0400 Subject: [PATCH 1958/2111] PYTHON-5126 Updated changelog to reflect breaking change in bson.binary.BinaryVector (#2394) Co-authored-by: Steven Silvester --- doc/changelog.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index ca4784f919..35a9770a14 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -8,6 +8,9 @@ PyMongo 4.14 brings a number of changes including: - Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. +- Introduces a minor breaking change. When encoding :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised + if the 'padding' metadata field is < 0 or > 7, or non-zero for any type other than PACKED_BIT. + Changes in Version 4.13.2 (2025/06/17) -------------------------------------- From e2bfa9a59038f2551d3e0c161131dde8da119ca7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 20 Jun 2025 14:25:19 -0400 Subject: [PATCH 1959/2111] PYTHON-5248 - Drop support for MongoDB 4.0 (#2353) --- .evergreen/generated_configs/tasks.yml | 642 +++++++----------- .evergreen/generated_configs/variants.yml | 17 +- .evergreen/scripts/generate_config.py | 12 +- .evergreen/scripts/generate_config_utils.py | 2 +- pymongo/common.py | 4 +- test/__init__.py | 20 +- test/asynchronous/__init__.py | 20 +- test/asynchronous/test_bulk.py | 4 +- test/asynchronous/test_change_stream.py | 95 +-- ...nnections_survive_primary_stepdown_spec.py | 10 +- test/asynchronous/test_cursor.py | 10 - test/asynchronous/test_custom_types.py | 4 +- test/asynchronous/test_encryption.py | 14 - test/asynchronous/test_examples.py | 1 - test/asynchronous/test_retryable_writes.py | 32 - test/asynchronous/test_session.py | 9 - .../asynchronous/test_transactions_unified.py | 1 - test/asynchronous/unified_format.py | 27 - test/asynchronous/utils_spec_runner.py | 6 - .../errors/pre-42-InterruptedAtShutdown.json | 70 -- ...re-42-InterruptedDueToReplStateChange.json | 70 -- .../errors/pre-42-LegacyNotPrimary.json | 70 -- .../pre-42-NotPrimaryNoSecondaryOk.json | 70 -- .../errors/pre-42-NotPrimaryOrSecondary.json | 70 -- .../errors/pre-42-NotWritablePrimary.json | 70 -- .../errors/pre-42-PrimarySteppedDown.json | 70 -- .../errors/pre-42-ShutdownInProgress.json | 70 -- .../rs/null_election_id-pre-6.0.json | 8 +- .../rs/primary_mismatched_me_not_removed.json | 4 +- ...setversion_without_electionid-pre-6.0.json | 4 +- ...setversion_without_electionid-pre-6.0.json | 6 +- test/mockupdb/test_cursor_namespace.py | 2 +- test/test_bulk.py | 4 +- test/test_change_stream.py | 93 +-- ...nnections_survive_primary_stepdown_spec.py | 10 +- test/test_cursor.py | 10 - test/test_custom_types.py | 4 +- test/test_encryption.py | 14 - test/test_examples.py | 1 - test/test_retryable_writes.py | 32 - test/test_session.py | 9 - test/test_topology.py | 2 +- test/test_transactions_unified.py | 1 - test/unified_format.py | 27 - test/utils_spec_runner.py | 3 - 45 files changed, 292 insertions(+), 1432 deletions(-) delete mode 100644 test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json delete mode 100644 test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 1129408f12..e91d2c119b 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -3062,558 +3062,492 @@ tasks: - sync # Standard tests - - name: test-standard-v4.0-python3.9-sync-noauth-nossl-standalone + - name: test-standard-v4.2-python3.9-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "4.2" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "4.2" PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - test-standard - - server-4.0 + - server-4.2 - python-3.9 - standalone-noauth-nossl - sync - - name: test-standard-v4.0-python3.10-async-noauth-ssl-replica-set + - name: test-standard-v4.2-python3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.0" + VERSION: "4.2" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.0" + VERSION: "4.2" PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard - - server-4.0 + - server-4.2 - python-3.10 - replica_set-noauth-ssl - async - - name: test-standard-v4.0-python3.11-sync-auth-ssl-sharded-cluster + - name: test-standard-v4.2-python3.11-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.0" + VERSION: "4.2" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.0" + VERSION: "4.2" PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard - - server-4.0 + - server-4.2 - python-3.11 - sharded_cluster-auth-ssl - sync - - name: test-standard-v4.2-python3.12-async-noauth-nossl-standalone + - name: test-standard-v4.4-python3.12-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.2" + VERSION: "4.4" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.2" + VERSION: "4.4" PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard - - server-4.2 + - server-4.4 - python-3.12 - standalone-noauth-nossl - async - - name: test-standard-v4.2-python3.13-sync-noauth-ssl-replica-set + - name: test-standard-v4.4-python3.13-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "4.4" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "4.4" PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - - server-4.2 + - server-4.4 - python-3.13 - replica_set-noauth-ssl - sync - - name: test-standard-v4.2-python3.9-async-auth-ssl-sharded-cluster + - name: test-standard-v4.4-python3.9-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.2" + VERSION: "4.4" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.2" + VERSION: "4.4" PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - test-standard - - server-4.2 + - server-4.4 - python-3.9 - sharded_cluster-auth-ssl - async - - name: test-standard-v4.4-python3.10-sync-noauth-nossl-standalone + - name: test-standard-v5.0-python3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.4" + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.4" + VERSION: "5.0" PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - - server-4.4 + - server-5.0 - python-3.10 - standalone-noauth-nossl - sync - - name: test-standard-v4.4-python3.11-async-noauth-ssl-replica-set + - name: test-standard-v5.0-python3.11-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.4" + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.4" + VERSION: "5.0" PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - - server-4.4 + - server-5.0 - python-3.11 - replica_set-noauth-ssl - async - - name: test-standard-v4.4-python3.12-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-python3.12-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "5.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "5.0" PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard - - server-4.4 + - server-5.0 - python-3.12 - sharded_cluster-auth-ssl - sync - - name: test-standard-v5.0-python3.13-async-noauth-nossl-standalone + - name: test-standard-v6.0-python3.13-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "6.0" PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - - server-5.0 + - server-6.0 - python-3.13 - standalone-noauth-nossl - async - - name: test-standard-v5.0-python3.9-sync-noauth-ssl-replica-set + - name: test-standard-v6.0-python3.9-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "5.0" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "5.0" + VERSION: "6.0" PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - test-standard - - server-5.0 + - server-6.0 - python-3.9 - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.10-async-auth-ssl-sharded-cluster + - name: test-standard-v6.0-python3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "5.0" + VERSION: "6.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "5.0" + VERSION: "6.0" PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard - - server-5.0 + - server-6.0 - python-3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v6.0-python3.11-sync-noauth-nossl-standalone + - name: test-standard-v7.0-python3.11-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" + VERSION: "7.0" PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard - - server-6.0 + - server-7.0 - python-3.11 - standalone-noauth-nossl - sync - - name: test-standard-v6.0-python3.12-async-noauth-ssl-replica-set + - name: test-standard-v7.0-python3.12-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: "7.0" PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard - - server-6.0 + - server-7.0 - python-3.12 - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.13-sync-auth-ssl-sharded-cluster + - name: test-standard-v7.0-python3.13-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "6.0" + VERSION: "7.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "6.0" + VERSION: "7.0" PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - - server-6.0 + - server-7.0 - python-3.13 - sharded_cluster-auth-ssl - sync - - name: test-standard-v7.0-python3.9-async-noauth-nossl-standalone + - name: test-standard-v8.0-python3.9-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" + VERSION: "8.0" PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - test-standard - - server-7.0 + - server-8.0 - python-3.9 - standalone-noauth-nossl - async - - name: test-standard-v7.0-python3.10-sync-noauth-ssl-replica-set + - name: test-standard-v8.0-python3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" + VERSION: "8.0" PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - - server-7.0 + - server-8.0 - python-3.10 - replica_set-noauth-ssl - sync - - name: test-standard-v7.0-python3.11-async-auth-ssl-sharded-cluster + - name: test-standard-v8.0-python3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: "8.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: "8.0" PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - - server-7.0 + - server-8.0 - python-3.11 - sharded_cluster-auth-ssl - async - - name: test-standard-v8.0-python3.12-sync-noauth-nossl-standalone + - name: test-standard-rapid-python3.12-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: rapid PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard - - server-8.0 + - server-rapid - python-3.12 - standalone-noauth-nossl - sync - - name: test-standard-v8.0-python3.13-async-noauth-ssl-replica-set + - name: test-standard-rapid-python3.13-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "8.0" + VERSION: rapid PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - - server-8.0 + - server-rapid - python-3.13 - replica_set-noauth-ssl - async - - name: test-standard-v8.0-python3.9-sync-auth-ssl-sharded-cluster + - name: test-standard-rapid-python3.9-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" + VERSION: rapid PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - test-standard - - server-8.0 + - server-rapid - python-3.9 - sharded_cluster-auth-ssl - sync - - name: test-standard-rapid-python3.10-async-noauth-nossl-standalone + - name: test-standard-latest-python3.10-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: rapid + VERSION: latest - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: rapid + VERSION: latest PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard - - server-rapid + - server-latest - python-3.10 - standalone-noauth-nossl - async - - name: test-standard-rapid-python3.11-sync-noauth-ssl-replica-set - commands: - - func: run server - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync - tags: - - test-standard - - server-rapid - - python-3.11 - - replica_set-noauth-ssl - - sync - - name: test-standard-rapid-python3.12-async-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: rapid - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: rapid - PYTHON_VERSION: "3.12" - TEST_NAME: default_async - tags: - - test-standard - - server-rapid - - python-3.12 - - sharded_cluster-auth-ssl - - async - - name: test-standard-latest-python3.13-sync-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: latest - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: latest - PYTHON_VERSION: "3.13" - TEST_NAME: default_sync - tags: - - test-standard - - server-latest - - python-3.13 - - standalone-noauth-nossl - - sync - pr - - name: test-standard-latest-python3.9-async-noauth-ssl-replica-set + - name: test-standard-latest-python3.11-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3627,16 +3561,16 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.9" - TEST_NAME: default_async + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync tags: - test-standard - server-latest - - python-3.9 + - python-3.11 - replica_set-noauth-ssl - - async + - sync - pr - - name: test-standard-latest-python3.10-sync-auth-ssl-sharded-cluster + - name: test-standard-latest-python3.12-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3650,288 +3584,202 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.10" - TEST_NAME: default_sync + PYTHON_VERSION: "3.12" + TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.10 + - python-3.12 - sharded_cluster-auth-ssl - - sync + - async - pr - - name: test-standard-v4.0-pypy3.10-sync-noauth-nossl-standalone + - name: test-standard-v4.2-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "4.2" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "4.2" PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - - server-4.0 + - server-4.2 - python-pypy3.10 - standalone-noauth-nossl - sync - pypy - - name: test-standard-v4.2-pypy3.10-async-noauth-ssl-replica-set + - name: test-standard-v4.4-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "4.4" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "4.4" PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - - server-4.2 + - server-4.4 - python-pypy3.10 - replica_set-noauth-ssl - async - pypy - - name: test-standard-v4.4-pypy3.10-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "5.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "5.0" PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - - server-4.4 + - server-5.0 - python-pypy3.10 - sharded_cluster-auth-ssl - sync - pypy - - name: test-standard-v5.0-pypy3.10-async-noauth-nossl-standalone + - name: test-standard-v6.0-pypy3.10-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "6.0" PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - - server-5.0 + - server-6.0 - python-pypy3.10 - standalone-noauth-nossl - async - pypy - - name: test-standard-v6.0-pypy3.10-sync-noauth-ssl-replica-set + - name: test-standard-v7.0-pypy3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: "7.0" PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - - server-6.0 + - server-7.0 - python-pypy3.10 - replica_set-noauth-ssl - sync - pypy - - name: test-standard-v7.0-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-standard-v8.0-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: "8.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: "8.0" PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - - server-7.0 + - server-8.0 - python-pypy3.10 - sharded_cluster-auth-ssl - async - pypy - - name: test-standard-v8.0-pypy3.10-sync-noauth-nossl-standalone + - name: test-standard-rapid-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: rapid PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - - server-8.0 + - server-rapid - python-pypy3.10 - standalone-noauth-nossl - sync - pypy - - name: test-standard-rapid-pypy3.10-async-noauth-ssl-replica-set + - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: rapid + VERSION: latest - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: rapid + VERSION: latest PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - - server-rapid + - server-latest - python-pypy3.10 - replica_set-noauth-ssl - async - pypy - - name: test-standard-latest-pypy3.10-sync-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: latest - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: latest - PYTHON_VERSION: pypy3.10 - TEST_NAME: default_sync - tags: - - test-standard - - server-latest - - python-pypy3.10 - - sharded_cluster-auth-ssl - - sync - - pypy # Test non standard tests - - name: test-non-standard-v4.0-python3.9-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.0" - PYTHON_VERSION: "3.9" - tags: - - test-non-standard - - server-4.0 - - python-3.9 - - standalone-noauth-nossl - - noauth - - name: test-non-standard-v4.0-python3.10-noauth-ssl-replica-set - commands: - - func: run server - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: "4.0" - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: "4.0" - PYTHON_VERSION: "3.10" - tags: - - test-non-standard - - server-4.0 - - python-3.10 - - replica_set-noauth-ssl - - noauth - - name: test-non-standard-v4.0-python3.11-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "4.0" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "4.0" - PYTHON_VERSION: "3.11" - tags: - - test-non-standard - - server-4.0 - - python-3.11 - - sharded_cluster-auth-ssl - - auth - - name: test-non-standard-v4.2-python3.12-noauth-nossl-standalone + - name: test-non-standard-v4.2-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -3945,14 +3793,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-4.2 - - python-3.12 + - python-3.9 - standalone-noauth-nossl - noauth - - name: test-non-standard-v4.2-python3.13-noauth-ssl-replica-set + - name: test-non-standard-v4.2-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -3966,14 +3814,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-4.2 - - python-3.13 + - python-3.10 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v4.2-python3.9-auth-ssl-sharded-cluster + - name: test-non-standard-v4.2-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3987,14 +3835,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-4.2 - - python-3.9 + - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v4.4-python3.10-noauth-nossl-standalone + - name: test-non-standard-v4.4-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -4008,14 +3856,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-4.4 - - python-3.10 + - python-3.12 - standalone-noauth-nossl - noauth - - name: test-non-standard-v4.4-python3.11-noauth-ssl-replica-set + - name: test-non-standard-v4.4-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -4029,14 +3877,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-4.4 - - python-3.11 + - python-3.13 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v4.4-python3.12-auth-ssl-sharded-cluster + - name: test-non-standard-v4.4-python3.9-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4050,14 +3898,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-4.4 - - python-3.12 + - python-3.9 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v5.0-python3.13-noauth-nossl-standalone + - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4071,14 +3919,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-5.0 - - python-3.13 + - python-3.10 - standalone-noauth-nossl - noauth - - name: test-non-standard-v5.0-python3.9-noauth-ssl-replica-set + - name: test-non-standard-v5.0-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4092,14 +3940,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-5.0 - - python-3.9 + - python-3.11 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v5.0-python3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4113,14 +3961,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-5.0 - - python-3.10 + - python-3.12 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v6.0-python3.11-noauth-nossl-standalone + - name: test-non-standard-v6.0-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -4134,14 +3982,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-6.0 - - python-3.11 + - python-3.13 - standalone-noauth-nossl - noauth - - name: test-non-standard-v6.0-python3.12-noauth-ssl-replica-set + - name: test-non-standard-v6.0-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -4155,14 +4003,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-6.0 - - python-3.12 + - python-3.9 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v6.0-python3.13-auth-ssl-sharded-cluster + - name: test-non-standard-v6.0-python3.10-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4176,14 +4024,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-6.0 - - python-3.13 + - python-3.10 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4197,14 +4045,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-7.0 - - python-3.9 + - python-3.11 - standalone-noauth-nossl - noauth - - name: test-non-standard-v7.0-python3.10-noauth-ssl-replica-set + - name: test-non-standard-v7.0-python3.12-noauth-ssl-replica-set commands: - func: run server vars: @@ -4218,14 +4066,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-7.0 - - python-3.10 + - python-3.12 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v7.0-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-v7.0-python3.13-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4239,14 +4087,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-7.0 - - python-3.11 + - python-3.13 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone + - name: test-non-standard-v8.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -4260,14 +4108,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-8.0 - - python-3.12 + - python-3.9 - standalone-noauth-nossl - noauth - - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set + - name: test-non-standard-v8.0-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -4281,14 +4129,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-8.0 - - python-3.13 + - python-3.10 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v8.0-python3.9-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4302,14 +4150,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-8.0 - - python-3.9 + - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -4323,14 +4171,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-rapid - - python-3.10 + - python-3.12 - standalone-noauth-nossl - noauth - - name: test-non-standard-rapid-python3.11-noauth-ssl-replica-set + - name: test-non-standard-rapid-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -4344,14 +4192,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-rapid - - python-3.11 + - python-3.13 - replica_set-noauth-ssl - noauth - - name: test-non-standard-rapid-python3.12-auth-ssl-sharded-cluster + - name: test-non-standard-rapid-python3.9-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4365,14 +4213,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-rapid - - python-3.12 + - python-3.9 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-latest-python3.13-noauth-nossl-standalone + - name: test-non-standard-latest-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4386,15 +4234,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-latest - - python-3.13 + - python-3.10 - standalone-noauth-nossl - noauth - pr - - name: test-non-standard-latest-python3.9-noauth-ssl-replica-set + - name: test-non-standard-latest-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4408,15 +4256,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-latest - - python-3.9 + - python-3.11 - replica_set-noauth-ssl - noauth - pr - - name: test-non-standard-latest-python3.10-auth-ssl-sharded-cluster + - name: test-non-standard-latest-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4430,209 +4278,187 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-latest - - python-3.10 + - python-3.12 - sharded_cluster-auth-ssl - auth - pr - - name: test-non-standard-v4.0-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "4.2" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.0" + VERSION: "4.2" PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-4.0 + - server-4.2 - python-pypy3.10 - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v4.2-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "4.4" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.2" + VERSION: "4.4" PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-4.2 + - server-4.4 - python-pypy3.10 - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v4.4-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "5.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "4.4" + VERSION: "5.0" PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-4.4 + - server-5.0 - python-pypy3.10 - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v5.0-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "6.0" PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-5.0 + - server-6.0 - python-pypy3.10 - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v6.0-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "6.0" + VERSION: "7.0" PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-6.0 + - server-7.0 - python-pypy3.10 - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v7.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: "8.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "7.0" + VERSION: "8.0" PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-7.0 + - server-8.0 - python-pypy3.10 - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v8.0-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: rapid PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-8.0 + - server-rapid - python-pypy3.10 - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-rapid-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: rapid + VERSION: latest - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: rapid - PYTHON_VERSION: pypy3.10 - tags: - - test-non-standard - - server-rapid - - python-pypy3.10 - - replica_set-noauth-ssl - - noauth - - pypy - - name: test-non-standard-latest-pypy3.10-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: latest - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster VERSION: latest PYTHON_VERSION: pypy3.10 tags: - test-non-standard - server-latest - python-pypy3.10 - - sharded_cluster-auth-ssl - - auth + - replica_set-noauth-ssl + - noauth - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 57de725cb1..939d7bbdef 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -146,7 +146,7 @@ buildvariants: COMPRESSOR: zlib - name: compression-zstd-rhel8 tasks: - - name: .test-standard !.server-4.0 + - name: .test-standard !.server-4.2 display_name: Compression zstd RHEL8 run_on: - rhel87-small @@ -522,13 +522,6 @@ buildvariants: PYTHON_BINARY: /opt/python/3.9/bin/python3 # Server version tests - - name: mongodb-v4.0 - tasks: - - name: .server-version - display_name: "* MongoDB v4.0" - run_on: - - rhel87-small - tags: [coverage_tag] - name: mongodb-v4.2 tasks: - name: .server-version @@ -664,11 +657,3 @@ buildvariants: - rhel87-small expansions: STORAGE_ENGINE: inmemory - - name: storage-mmapv1-rhel8 - tasks: - - name: .test-standard !.sharded_cluster-auth-ssl .server-4.0 - display_name: Storage MMAPv1 RHEL8 - run_on: - - rhel87-small - expansions: - STORAGE_ENGINE: mmapv1 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index cb5c5a38a0..518d2487eb 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -25,7 +25,6 @@ get_task_name, get_variant_name, get_versions_from, - get_versions_until, handle_c_ext, write_functions_to_file, write_tasks_to_file, @@ -196,7 +195,7 @@ def create_compression_variants(): for compressor in "snappy", "zlib", "zstd": expansions = dict(COMPRESSOR=compressor) if compressor == "zstd": - tasks = [".test-standard !.server-4.0"] + tasks = [".test-standard !.server-4.2"] else: tasks = [".test-standard"] display_name = get_variant_name(f"Compression {compressor}", host) @@ -249,16 +248,11 @@ def create_pyopenssl_variants(): def create_storage_engine_variants(): host = DEFAULT_HOST - engines = ["InMemory", "MMAPv1"] + engines = ["InMemory"] variants = [] for engine in engines: expansions = dict(STORAGE_ENGINE=engine.lower()) - if engine == engines[0]: - tasks = [".test-standard .standalone-noauth-nossl"] - else: - # MongoDB 4.2 drops support for MMAPv1 - versions = get_versions_until("4.0") - tasks = [f".test-standard !.sharded_cluster-auth-ssl .server-{v}" for v in versions] + tasks = [".test-standard .standalone-noauth-nossl"] display_name = get_variant_name(f"Storage {engine}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index ad092983fa..62ea982cd8 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -21,7 +21,7 @@ # Globals ############## -ALL_VERSIONS = ["4.0", "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS diff --git a/pymongo/common.py b/pymongo/common.py index 3d8095eedf..96f9f87459 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -66,8 +66,8 @@ MAX_WRITE_BATCH_SIZE = 100000 # What this version of PyMongo supports. -MIN_SUPPORTED_SERVER_VERSION = "4.0" -MIN_SUPPORTED_WIRE_VERSION = 7 +MIN_SUPPORTED_SERVER_VERSION = "4.2" +MIN_SUPPORTED_WIRE_VERSION = 8 # MongoDB 8.0 MAX_SUPPORTED_WIRE_VERSION = 25 diff --git a/test/__init__.py b/test/__init__.py index 0e6046b527..e0646ce894 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -508,19 +508,6 @@ def require_data_lake(self, func): func=func, ) - def require_no_mmap(self, func): - """Run a test only if the server is not using the MMAPv1 storage - engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters. - """ - - def is_not_mmap(): - if self.is_mongos: - return True - return self.storage_engine != "mmapv1" - - return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) - def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) @@ -651,7 +638,7 @@ def require_no_load_balancer(self, func): def require_change_streams(self, func): """Run a test only if the server supports change streams.""" - return self.require_no_mmap(self.require_no_standalone(func)) + return self.require_no_standalone(func) def is_topology_type(self, topologies): unknown = set(topologies) - { @@ -754,8 +741,6 @@ def require_sessions(self, func): return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) def supports_retryable_writes(self): - if self.storage_engine == "mmapv1": - return False if not self.sessions_enabled: return False return self.is_mongos or self.is_rs @@ -769,9 +754,6 @@ def require_retryable_writes(self, func): ) def supports_transactions(self): - if self.storage_engine == "mmapv1": - return False - if self.version.at_least(4, 1, 8): return self.is_mongos or self.is_rs diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 52583d30ef..48c9dc2920 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -508,19 +508,6 @@ def require_data_lake(self, func): func=func, ) - def require_no_mmap(self, func): - """Run a test only if the server is not using the MMAPv1 storage - engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters. - """ - - def is_not_mmap(): - if self.is_mongos: - return True - return self.storage_engine != "mmapv1" - - return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) - def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) @@ -651,7 +638,7 @@ def require_no_load_balancer(self, func): def require_change_streams(self, func): """Run a test only if the server supports change streams.""" - return self.require_no_mmap(self.require_no_standalone(func)) + return self.require_no_standalone(func) async def is_topology_type(self, topologies): unknown = set(topologies) - { @@ -754,8 +741,6 @@ def require_sessions(self, func): return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) def supports_retryable_writes(self): - if self.storage_engine == "mmapv1": - return False if not self.sessions_enabled: return False return self.is_mongos or self.is_rs @@ -769,9 +754,6 @@ def require_retryable_writes(self, func): ) def supports_transactions(self): - if self.storage_engine == "mmapv1": - return False - if self.version.at_least(4, 1, 8): return self.is_mongos or self.is_rs diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index 62022de24c..b6dedb497c 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -165,7 +165,7 @@ async def _test_update_many(self, update): async def test_update_many(self): await self._test_update_many({"$set": {"foo": "bar"}}) - @async_client_context.require_version_min(4, 1, 11) + @async_client_context.require_version_min(4, 2, 0) async def test_update_many_pipeline(self): await self._test_update_many([{"$set": {"foo": "bar"}}]) @@ -206,7 +206,7 @@ async def _test_update_one(self, update): async def test_update_one(self): await self._test_update_one({"$set": {"foo": "bar"}}) - @async_client_context.require_version_min(4, 1, 11) + @async_client_context.require_version_min(4, 2, 0) async def test_update_one_pipeline(self): await self._test_update_one([{"$set": {"foo": "bar"}}]) diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 0260cb7a82..1be45bee3e 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -267,7 +267,7 @@ async def test_batch_size_is_honored(self): # $changeStream.startAtOperationTime was added in 4.0.0. @no_type_check - @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_min(4, 2, 0) async def test_start_at_operation_time(self): optime = await self.get_start_at_operation_time() @@ -436,7 +436,7 @@ async def test_change_operations(self): await self._test_get_invalidate_event(change_stream) @no_type_check - @async_client_context.require_version_min(4, 1, 1) + @async_client_context.require_version_min(4, 2, 0) async def test_start_after(self): resume_token = await self.get_resume_token(invalidate=True) @@ -452,7 +452,7 @@ async def test_start_after(self): self.assertEqual(change["fullDocument"], {"_id": 2}) @no_type_check - @async_client_context.require_version_min(4, 1, 1) + @async_client_context.require_version_min(4, 2, 0) async def test_start_after_resume_process_with_changes(self): resume_token = await self.get_resume_token(invalidate=True) @@ -563,27 +563,16 @@ async def _test_update_resume_token(self, expected_rt_getter): ) # Prose test no. 1 - @async_client_context.require_version_min(4, 0, 7) + @async_client_context.require_version_min(4, 2, 0) async def test_update_resume_token(self): await self._test_update_resume_token(self._get_expected_resume_token) - # Prose test no. 1 - @async_client_context.require_version_max(4, 0, 7) - async def test_update_resume_token_legacy(self): - await self._test_update_resume_token(self._get_expected_resume_token_legacy) - # Prose test no. 2 - @async_client_context.require_version_min(4, 1, 8) + @async_client_context.require_version_min(4, 2, 0) async def test_raises_error_on_missing_id_418plus(self): # Server returns an error on 4.1.8+ await self._test_raises_error_on_missing_id(OperationFailure) - # Prose test no. 2 - @async_client_context.require_version_max(4, 1, 8) - async def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error - await self._test_raises_error_on_missing_id(InvalidOperation) - # Prose test no. 3 @no_type_check async def test_resume_on_error(self): @@ -642,40 +631,12 @@ def raise_error(): cursor.close = raise_error await self.insert_one_and_check(change_stream, {"_id": 2}) - # Prose test no. 9 - @no_type_check - @async_client_context.require_version_min(4, 0, 0) - @async_client_context.require_version_max(4, 0, 7) - async def test_start_at_operation_time_caching(self): - # Case 1: change stream not started with startAtOperationTime - client, listener = self.client_with_listener("aggregate") - async with await self.change_stream_with_client(client) as cs: - await self.kill_change_stream_cursor(cs) - await cs.try_next() - cmd = listener.started_events[-1].command - self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) - - # Case 2: change stream started with startAtOperationTime - listener.reset() - optime = await self.get_start_at_operation_time() - async with await self.change_stream_with_client( - client, start_at_operation_time=optime - ) as cs: - await self.kill_change_stream_cursor(cs) - await cs.try_next() - cmd = listener.started_events[-1].command - self.assertEqual( - cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), - optime, - str([k.command for k in listener.started_events]), - ) - # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. # Prose test no. 11 @no_type_check - @async_client_context.require_version_min(4, 0, 7) + @async_client_context.require_version_min(4, 2, 0) async def test_resumetoken_empty_batch(self): client, listener = await self._client_with_listener("getMore") async with await self.change_stream_with_client(client) as change_stream: @@ -687,7 +648,7 @@ async def test_resumetoken_empty_batch(self): # Prose test no. 11 @no_type_check - @async_client_context.require_version_min(4, 0, 7) + @async_client_context.require_version_min(4, 2, 0) async def test_resumetoken_exhausted_batch(self): client, listener = await self._client_with_listener("getMore") async with await self.change_stream_with_client(client) as change_stream: @@ -697,38 +658,6 @@ async def test_resumetoken_exhausted_batch(self): response = listener.succeeded_events[-1].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) - # Prose test no. 12 - @no_type_check - @async_client_context.require_version_max(4, 0, 7) - async def test_resumetoken_empty_batch_legacy(self): - resume_point = await self.get_resume_token() - - # Empty resume token when neither resumeAfter or startAfter specified. - async with await self.change_stream() as change_stream: - await change_stream.try_next() - self.assertIsNone(change_stream.resume_token) - - # Resume token value is same as resumeAfter. - async with await self.change_stream(resume_after=resume_point) as change_stream: - await change_stream.try_next() - resume_token = change_stream.resume_token - self.assertEqual(resume_token, resume_point) - - # Prose test no. 12 - @no_type_check - @async_client_context.require_version_max(4, 0, 7) - async def test_resumetoken_exhausted_batch_legacy(self): - # Resume token is _id of last change. - async with await self.change_stream() as change_stream: - change = await self._populate_and_exhaust_change_stream(change_stream) - self.assertEqual(change_stream.resume_token, change["_id"]) - resume_point = change["_id"] - - # Resume token is _id of last change even if resumeAfter is specified. - async with await self.change_stream(resume_after=resume_point) as change_stream: - change = await self._populate_and_exhaust_change_stream(change_stream) - self.assertEqual(change_stream.resume_token, change["_id"]) - # Prose test no. 13 @no_type_check async def test_resumetoken_partially_iterated_batch(self): @@ -770,13 +699,13 @@ async def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): # Prose test no. 14 @no_type_check @async_client_context.require_no_mongos - @async_client_context.require_version_min(4, 1, 1) + @async_client_context.require_version_min(4, 2, 0) async def test_resumetoken_uniterated_nonempty_batch_startafter(self): await self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 @no_type_check - @async_client_context.require_version_min(4, 1, 1) + @async_client_context.require_version_min(4, 2, 0) async def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. resume_point = await self.get_resume_token() @@ -796,7 +725,7 @@ async def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Prose test no. 18 @no_type_check - @async_client_context.require_version_min(4, 1, 1) + @async_client_context.require_version_min(4, 2, 0) async def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. resume_point = await self.get_resume_token() @@ -843,7 +772,7 @@ async def test_split_large_change(self): class TestClusterAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): dbs: list - @async_client_context.require_version_min(4, 0, 0, -1) + @async_client_context.require_version_min(4, 2, 0) @async_client_context.require_change_streams async def asyncSetUp(self) -> None: await super().asyncSetUp() @@ -903,7 +832,7 @@ async def test_full_pipeline(self): class TestAsyncDatabaseAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): - @async_client_context.require_version_min(4, 0, 0, -1) + @async_client_context.require_version_min(4, 2, 0) @async_client_context.require_change_streams async def asyncSetUp(self) -> None: await super().asyncSetUp() diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py index 92c750c4fe..aed3c1ce7b 100644 --- a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -122,18 +122,12 @@ async def run_scenario(self, error_code, retry, pool_status_checker): async def test_not_primary_keep_connection_pool(self): await self.run_scenario(10107, True, self.verify_pool_not_cleared) - @async_client_context.require_version_min(4, 0, 0) - @async_client_context.require_version_max(4, 1, 0, -1) - @async_client_context.require_test_commands - async def test_not_primary_reset_connection_pool(self): - await self.run_scenario(10107, False, self.verify_pool_cleared) - - @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_min(4, 2, 0) @async_client_context.require_test_commands async def test_shutdown_in_progress(self): await self.run_scenario(91, False, self.verify_pool_cleared) - @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_min(4, 2, 0) @async_client_context.require_test_commands async def test_interrupted_at_shutdown(self): await self.run_scenario(11600, False, self.verify_pool_cleared) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 861345cb08..de836dbf80 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1190,15 +1190,6 @@ async def test_distinct(self): self.assertEqual(["b", "c"], distinct) - @async_client_context.require_version_max(4, 1, 0, -1) - async def test_max_scan(self): - await self.db.drop_collection("test") - await self.db.test.insert_many([{} for _ in range(100)]) - - self.assertEqual(100, len(await self.db.test.find().to_list())) - self.assertEqual(50, len(await self.db.test.find().max_scan(50).to_list())) - self.assertEqual(50, len(await self.db.test.find().max_scan(90).max_scan(50).to_list())) - async def test_with_statement(self): await self.db.drop_collection("test") await self.db.test.insert_many([{} for _ in range(100)]) @@ -1600,7 +1591,6 @@ async def test_get_item(self): async def test_collation(self): await anext(self.db.test.find_raw_batches(collation=Collation("en_US"))) - @async_client_context.require_no_mmap # MMAPv1 does not support read concern async def test_read_concern(self): await self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one( {} diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py index b3a51ae712..385f755a1d 100644 --- a/test/asynchronous/test_custom_types.py +++ b/test/asynchronous/test_custom_types.py @@ -953,7 +953,7 @@ async def create_targets(self, *args, **kwargs): class TestDatabaseChangeStreamsWCustomTypes( AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin ): - @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_min(4, 2, 0) @async_client_context.require_change_streams async def asyncSetUp(self): await super().asyncSetUp() @@ -973,7 +973,7 @@ async def create_targets(self, *args, **kwargs): class TestClusterChangeStreamsWCustomTypes( AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin ): - @async_client_context.require_version_min(4, 0, 0) + @async_client_context.require_version_min(4, 2, 0) @async_client_context.require_change_streams async def asyncSetUp(self): await super().asyncSetUp() diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 9093b97ab4..a766e63915 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -451,20 +451,6 @@ class TestClientMaxWireVersion(AsyncIntegrationTest): async def asyncSetUp(self): await super().asyncSetUp() - @async_client_context.require_version_max(4, 0, 99) - async def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = await self.async_rs_or_single_client(auto_encryption_opts=opts) - msg = "Auto-encryption requires a minimum MongoDB version of 4.2" - with self.assertRaisesRegex(ConfigurationError, msg): - await client.test.test.insert_one({}) - with self.assertRaisesRegex(ConfigurationError, msg): - await client.admin.command("ping") - with self.assertRaisesRegex(ConfigurationError, msg): - await client.test.test.find_one({}) - with self.assertRaisesRegex(ConfigurationError, msg): - await client.test.test.bulk_write([InsertOne({})]) - async def test_raise_unsupported_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = await self.async_rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py index a4ebf72df9..dd27623654 100644 --- a/test/asynchronous/test_examples.py +++ b/test/asynchronous/test_examples.py @@ -1162,7 +1162,6 @@ async def callback(session): class TestCausalConsistencyExamples(AsyncIntegrationTest): @async_client_context.require_secondaries_count(1) - @async_client_context.require_no_mmap async def test_causal_consistency(self): # Causal consistency examples client = self.client diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py index b399fa50e4..4fe5e5e37f 100644 --- a/test/asynchronous/test_retryable_writes.py +++ b/test/asynchronous/test_retryable_writes.py @@ -140,40 +140,10 @@ async def asyncTearDown(self) -> None: self.deprecation_filter.stop() -class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): - knobs: client_knobs - - async def asyncSetUp(self) -> None: - await super().asyncSetUp() - # Speed up the tests by decreasing the heartbeat frequency. - self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - self.knobs.enable() - self.client = await self.async_rs_or_single_client(retryWrites=True) - self.db = self.client.pymongo_test - - async def asyncTearDown(self) -> None: - self.knobs.disable() - - @async_client_context.require_no_standalone - async def test_actionable_error_message(self): - if async_client_context.storage_engine != "mmapv1": - raise SkipTest("This cluster is not running MMAPv1") - - expected_msg = ( - "This MongoDB deployment does not support retryable " - "writes. Please add retryWrites=false to your " - "connection string." - ) - for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - with self.assertRaisesRegex(OperationFailure, expected_msg): - await method(*args, **kwargs) - - class TestRetryableWrites(IgnoreDeprecationsTest): listener: OvertCommandListener knobs: client_knobs - @async_client_context.require_no_mmap async def asyncSetUp(self) -> None: await super().asyncSetUp() # Speed up the tests by decreasing the heartbeat frequency. @@ -425,7 +395,6 @@ class TestWriteConcernError(AsyncIntegrationTest): fail_insert: dict @async_client_context.require_replica_set - @async_client_context.require_no_mmap @async_client_context.require_failCommand_fail_point async def asyncSetUp(self) -> None: await super().asyncSetUp() @@ -596,7 +565,6 @@ async def test_returns_original_error_code( # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): @async_client_context.require_replica_set - @async_client_context.require_no_mmap async def test_increment_transaction_id_without_sending_command(self): """Test that the txnNumber field is properly incremented, even when the first attempt fails before sending the command. diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index bf2bce27ea..d357948ed0 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -1045,14 +1045,6 @@ async def test_writes_do_not_include_read_concern(self): lambda coll, session: coll.find({}, session=session).explain() ) - @async_client_context.require_no_standalone - @async_client_context.require_version_max(4, 1, 0) - async def test_aggregate_out_does_not_include_read_concern(self): - async def alambda(coll, session): - await (await coll.aggregate([{"$out": "aggout"}], session=session)).to_list() - - await self._test_no_read_concern(alambda) - @async_client_context.require_no_standalone async def test_get_more_does_not_include_read_concern(self): coll = self.client.pymongo_test.test @@ -1095,7 +1087,6 @@ async def test_server_not_causal(self): self.assertIsNone(act) @async_client_context.require_no_standalone - @async_client_context.require_no_mmap async def test_read_concern(self): async with self.client.start_session(causal_consistency=True) as s: coll = self.client.pymongo_test.test diff --git a/test/asynchronous/test_transactions_unified.py b/test/asynchronous/test_transactions_unified.py index 4519a0e39a..8e5b1ae181 100644 --- a/test/asynchronous/test_transactions_unified.py +++ b/test/asynchronous/test_transactions_unified.py @@ -27,7 +27,6 @@ _IS_SYNC = False -@client_context.require_no_mmap def setUpModule(): pass diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index fbd1f87755..5f6468b952 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -493,11 +493,6 @@ async def asyncSetUp(self): raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here - if async_client_context.storage_engine == "mmapv1": - if "retryable-writes" in self.TEST_SPEC["description"] or "retryable_writes" in str( - self.TEST_PATH - ): - raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") # Handle mongos_clients for transactions tests. self.mongos_clients = [] @@ -519,13 +514,6 @@ async def asyncSetUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if async_client_context.storage_engine == "mmapv1": - if ( - "Dirty explicit session is discarded" in spec["description"] - or "Dirty implicit session is discarded" in spec["description"] - or "Cancel server check" in spec["description"] - ): - self.skipTest("MMAPv1 does not support retryWrites=True") if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: @@ -544,10 +532,6 @@ def maybe_skip_test(self, spec): if "csot" in class_name: if "gridfs" in class_name and sys.platform == "win32": self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") - if async_client_context.storage_engine == "mmapv1": - self.skipTest( - "MMAPv1 does not support retryable writes which is required for CSOT tests" - ) if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: @@ -572,11 +556,6 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support count()") if name == "listIndexNames": self.skipTest("PyMongo does not support list_index_names()") - if async_client_context.storage_engine == "mmapv1": - if name == "createChangeStream": - self.skipTest("MMAPv1 does not support change streams") - if name == "withTransaction" or name == "startTransaction": - self.skipTest("MMAPv1 does not support document-level locking") if not async_client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") @@ -682,8 +661,6 @@ def __raise_if_unsupported(self, opname, target, *target_types): self.fail(f"Operation {opname} not supported for entity of type {type(target)}") async def __entityOperation_createChangeStream(self, target, *args, **kwargs): - if async_client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support change streams") self.__raise_if_unsupported( "createChangeStream", target, AsyncMongoClient, AsyncDatabase, AsyncCollection ) @@ -810,14 +787,10 @@ async def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): return await (await target.list_search_indexes(name, **agg_kwargs)).to_list() async def _sessionOperation_withTransaction(self, target, *args, **kwargs): - if async_client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support document-level locking") self.__raise_if_unsupported("withTransaction", target, AsyncClientSession) return await target.with_transaction(*args, **kwargs) async def _sessionOperation_startTransaction(self, target, *args, **kwargs): - if async_client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support document-level locking") self.__raise_if_unsupported("startTransaction", target, AsyncClientSession) return await target.start_transaction(*args, **kwargs) diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py index da36209166..3ce2984c62 100644 --- a/test/asynchronous/utils_spec_runner.py +++ b/test/asynchronous/utils_spec_runner.py @@ -648,12 +648,6 @@ async def run_scenario(self, scenario_def, test): server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. client_options = self.parse_client_options(test["clientOptions"]) - # MMAPv1 does not support retryable writes. - if ( - client_options.get("retryWrites") is True - and async_client_context.storage_engine == "mmapv1" - ): - self.skipTest("MMAPv1 does not support retryWrites=True") use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json deleted file mode 100644 index 9f6ea212e5..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 InterruptedAtShutdown error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 InterruptedAtShutdown error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "InterruptedAtShutdown", - "code": 11600 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json deleted file mode 100644 index 7e5f235713..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 InterruptedDueToReplStateChange error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 InterruptedDueToReplStateChange error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "InterruptedDueToReplStateChange", - "code": 11602 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json deleted file mode 100644 index 1635f1a856..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 LegacyNotPrimary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "LegacyNotPrimary", - "code": 10058 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json deleted file mode 100644 index 0e70ede02c..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotPrimaryNoSecondaryOk error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotPrimaryNoSecondaryOk error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotPrimaryNoSecondaryOk", - "code": 13435 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json deleted file mode 100644 index 3fefb21663..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotPrimaryOrSecondary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotPrimaryOrSecondary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotPrimaryOrSecondary", - "code": 13436 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json deleted file mode 100644 index d010da0a5b..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotWritablePrimary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotWritablePrimary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotWritablePrimary", - "code": 10107 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json deleted file mode 100644 index 02956d201d..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 PrimarySteppedDown error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 PrimarySteppedDown error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "PrimarySteppedDown", - "code": 189 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json deleted file mode 100644 index fc3a5aa6fe..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 ShutdownInProgress error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 ShutdownInProgress error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "ShutdownInProgress", - "code": 91 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json index 7261fbfc2a..8a77f31c50 100644 --- a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -18,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], @@ -66,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], @@ -116,7 +116,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], @@ -167,7 +167,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json index 4c40093659..a55dcfc6d4 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -18,7 +18,7 @@ "primary": "localhost:27017", "me": "a:27017", "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 25 } ] ], @@ -55,7 +55,7 @@ "primary": "localhost:27017", "me": "localhost:27018", "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 25 } ] ], diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json index 87029e578b..9a1ee61399 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json index a63efeac12..03195aacde 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], @@ -64,7 +64,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], @@ -109,7 +109,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 16 } ] ], diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 89b897f479..7538540bda 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -40,7 +40,7 @@ class TestCursorNamespace(PyMongoTestCase): @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={"maxWireVersion": 7}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 8}) cls.server.run() cls.client = cls.unmanaged_simple_client(cls.server.uri) diff --git a/test/test_bulk.py b/test/test_bulk.py index 77d0d6c06e..ac6f760d37 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -165,7 +165,7 @@ def _test_update_many(self, update): def test_update_many(self): self._test_update_many({"$set": {"foo": "bar"}}) - @client_context.require_version_min(4, 1, 11) + @client_context.require_version_min(4, 2, 0) def test_update_many_pipeline(self): self._test_update_many([{"$set": {"foo": "bar"}}]) @@ -206,7 +206,7 @@ def _test_update_one(self, update): def test_update_one(self): self._test_update_one({"$set": {"foo": "bar"}}) - @client_context.require_version_min(4, 1, 11) + @client_context.require_version_min(4, 2, 0) def test_update_one_pipeline(self): self._test_update_one([{"$set": {"foo": "bar"}}]) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 6099829031..59cad8925b 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -263,7 +263,7 @@ def test_batch_size_is_honored(self): # $changeStream.startAtOperationTime was added in 4.0.0. @no_type_check - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() @@ -432,7 +432,7 @@ def test_change_operations(self): self._test_get_invalidate_event(change_stream) @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_start_after(self): resume_token = self.get_resume_token(invalidate=True) @@ -448,7 +448,7 @@ def test_start_after(self): self.assertEqual(change["fullDocument"], {"_id": 2}) @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -553,27 +553,16 @@ def _test_update_resume_token(self, expected_rt_getter): ) # Prose test no. 1 - @client_context.require_version_min(4, 0, 7) + @client_context.require_version_min(4, 2, 0) def test_update_resume_token(self): self._test_update_resume_token(self._get_expected_resume_token) - # Prose test no. 1 - @client_context.require_version_max(4, 0, 7) - def test_update_resume_token_legacy(self): - self._test_update_resume_token(self._get_expected_resume_token_legacy) - # Prose test no. 2 - @client_context.require_version_min(4, 1, 8) + @client_context.require_version_min(4, 2, 0) def test_raises_error_on_missing_id_418plus(self): # Server returns an error on 4.1.8+ self._test_raises_error_on_missing_id(OperationFailure) - # Prose test no. 2 - @client_context.require_version_max(4, 1, 8) - def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error - self._test_raises_error_on_missing_id(InvalidOperation) - # Prose test no. 3 @no_type_check def test_resume_on_error(self): @@ -632,38 +621,12 @@ def raise_error(): cursor.close = raise_error self.insert_one_and_check(change_stream, {"_id": 2}) - # Prose test no. 9 - @no_type_check - @client_context.require_version_min(4, 0, 0) - @client_context.require_version_max(4, 0, 7) - def test_start_at_operation_time_caching(self): - # Case 1: change stream not started with startAtOperationTime - client, listener = self.client_with_listener("aggregate") - with self.change_stream_with_client(client) as cs: - self.kill_change_stream_cursor(cs) - cs.try_next() - cmd = listener.started_events[-1].command - self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) - - # Case 2: change stream started with startAtOperationTime - listener.reset() - optime = self.get_start_at_operation_time() - with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: - self.kill_change_stream_cursor(cs) - cs.try_next() - cmd = listener.started_events[-1].command - self.assertEqual( - cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), - optime, - str([k.command for k in listener.started_events]), - ) - # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. # Prose test no. 11 @no_type_check - @client_context.require_version_min(4, 0, 7) + @client_context.require_version_min(4, 2, 0) def test_resumetoken_empty_batch(self): client, listener = self._client_with_listener("getMore") with self.change_stream_with_client(client) as change_stream: @@ -675,7 +638,7 @@ def test_resumetoken_empty_batch(self): # Prose test no. 11 @no_type_check - @client_context.require_version_min(4, 0, 7) + @client_context.require_version_min(4, 2, 0) def test_resumetoken_exhausted_batch(self): client, listener = self._client_with_listener("getMore") with self.change_stream_with_client(client) as change_stream: @@ -685,38 +648,6 @@ def test_resumetoken_exhausted_batch(self): response = listener.succeeded_events[-1].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) - # Prose test no. 12 - @no_type_check - @client_context.require_version_max(4, 0, 7) - def test_resumetoken_empty_batch_legacy(self): - resume_point = self.get_resume_token() - - # Empty resume token when neither resumeAfter or startAfter specified. - with self.change_stream() as change_stream: - change_stream.try_next() - self.assertIsNone(change_stream.resume_token) - - # Resume token value is same as resumeAfter. - with self.change_stream(resume_after=resume_point) as change_stream: - change_stream.try_next() - resume_token = change_stream.resume_token - self.assertEqual(resume_token, resume_point) - - # Prose test no. 12 - @no_type_check - @client_context.require_version_max(4, 0, 7) - def test_resumetoken_exhausted_batch_legacy(self): - # Resume token is _id of last change. - with self.change_stream() as change_stream: - change = self._populate_and_exhaust_change_stream(change_stream) - self.assertEqual(change_stream.resume_token, change["_id"]) - resume_point = change["_id"] - - # Resume token is _id of last change even if resumeAfter is specified. - with self.change_stream(resume_after=resume_point) as change_stream: - change = self._populate_and_exhaust_change_stream(change_stream) - self.assertEqual(change_stream.resume_token, change["_id"]) - # Prose test no. 13 @no_type_check def test_resumetoken_partially_iterated_batch(self): @@ -758,13 +689,13 @@ def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): # Prose test no. 14 @no_type_check @client_context.require_no_mongos - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_resumetoken_uniterated_nonempty_batch_startafter(self): self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. resume_point = self.get_resume_token() @@ -782,7 +713,7 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Prose test no. 18 @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. resume_point = self.get_resume_token() @@ -827,7 +758,7 @@ def test_split_large_change(self): class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): dbs: list - @client_context.require_version_min(4, 0, 0, -1) + @client_context.require_version_min(4, 2, 0) @client_context.require_change_streams def setUp(self) -> None: super().setUp() @@ -887,7 +818,7 @@ def test_full_pipeline(self): class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): - @client_context.require_version_min(4, 0, 0, -1) + @client_context.require_version_min(4, 2, 0) @client_context.require_change_streams def setUp(self) -> None: super().setUp() diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index d923a477b5..8e9a3b8e62 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -122,18 +122,12 @@ def run_scenario(self, error_code, retry, pool_status_checker): def test_not_primary_keep_connection_pool(self): self.run_scenario(10107, True, self.verify_pool_not_cleared) - @client_context.require_version_min(4, 0, 0) - @client_context.require_version_max(4, 1, 0, -1) - @client_context.require_test_commands - def test_not_primary_reset_connection_pool(self): - self.run_scenario(10107, False, self.verify_pool_cleared) - - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) @client_context.require_test_commands def test_shutdown_in_progress(self): self.run_scenario(91, False, self.verify_pool_cleared) - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) @client_context.require_test_commands def test_interrupted_at_shutdown(self): self.run_scenario(11600, False, self.verify_pool_cleared) diff --git a/test/test_cursor.py b/test/test_cursor.py index c33f509565..83f2b79316 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1181,15 +1181,6 @@ def test_distinct(self): self.assertEqual(["b", "c"], distinct) - @client_context.require_version_max(4, 1, 0, -1) - def test_max_scan(self): - self.db.drop_collection("test") - self.db.test.insert_many([{} for _ in range(100)]) - - self.assertEqual(100, len(self.db.test.find().to_list())) - self.assertEqual(50, len(self.db.test.find().max_scan(50).to_list())) - self.assertEqual(50, len(self.db.test.find().max_scan(90).max_scan(50).to_list())) - def test_with_statement(self): self.db.drop_collection("test") self.db.test.insert_many([{} for _ in range(100)]) @@ -1591,7 +1582,6 @@ def test_get_item(self): def test_collation(self): next(self.db.test.find_raw_batches(collation=Collation("en_US"))) - @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one({}) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 9e8dbcfbeb..7360f2b18b 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -949,7 +949,7 @@ def create_targets(self, *args, **kwargs): class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) @client_context.require_change_streams def setUp(self): super().setUp() @@ -967,7 +967,7 @@ def create_targets(self, *args, **kwargs): class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) @client_context.require_change_streams def setUp(self): super().setUp() diff --git a/test/test_encryption.py b/test/test_encryption.py index 3a86838af3..baaefa1e73 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -451,20 +451,6 @@ class TestClientMaxWireVersion(IntegrationTest): def setUp(self): super().setUp() - @client_context.require_version_max(4, 0, 99) - def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = self.rs_or_single_client(auto_encryption_opts=opts) - msg = "Auto-encryption requires a minimum MongoDB version of 4.2" - with self.assertRaisesRegex(ConfigurationError, msg): - client.test.test.insert_one({}) - with self.assertRaisesRegex(ConfigurationError, msg): - client.admin.command("ping") - with self.assertRaisesRegex(ConfigurationError, msg): - client.test.test.find_one({}) - with self.assertRaisesRegex(ConfigurationError, msg): - client.test.test.bulk_write([InsertOne({})]) - def test_raise_unsupported_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") client = self.rs_or_single_client(auto_encryption_opts=opts) diff --git a/test/test_examples.py b/test/test_examples.py index bda5403200..13f0c94c56 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -1160,7 +1160,6 @@ def callback(session): class TestCausalConsistencyExamples(IntegrationTest): @client_context.require_secondaries_count(1) - @client_context.require_no_mmap def test_causal_consistency(self): # Causal consistency examples client = self.client diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index ad5b0671e7..2ac08691cf 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -140,40 +140,10 @@ def tearDown(self) -> None: self.deprecation_filter.stop() -class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): - knobs: client_knobs - - def setUp(self) -> None: - super().setUp() - # Speed up the tests by decreasing the heartbeat frequency. - self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - self.knobs.enable() - self.client = self.rs_or_single_client(retryWrites=True) - self.db = self.client.pymongo_test - - def tearDown(self) -> None: - self.knobs.disable() - - @client_context.require_no_standalone - def test_actionable_error_message(self): - if client_context.storage_engine != "mmapv1": - raise SkipTest("This cluster is not running MMAPv1") - - expected_msg = ( - "This MongoDB deployment does not support retryable " - "writes. Please add retryWrites=false to your " - "connection string." - ) - for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - with self.assertRaisesRegex(OperationFailure, expected_msg): - method(*args, **kwargs) - - class TestRetryableWrites(IgnoreDeprecationsTest): listener: OvertCommandListener knobs: client_knobs - @client_context.require_no_mmap def setUp(self) -> None: super().setUp() # Speed up the tests by decreasing the heartbeat frequency. @@ -423,7 +393,6 @@ class TestWriteConcernError(IntegrationTest): fail_insert: dict @client_context.require_replica_set - @client_context.require_no_mmap @client_context.require_failCommand_fail_point def setUp(self) -> None: super().setUp() @@ -592,7 +561,6 @@ def test_returns_original_error_code( # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): @client_context.require_replica_set - @client_context.require_no_mmap def test_increment_transaction_id_without_sending_command(self): """Test that the txnNumber field is properly incremented, even when the first attempt fails before sending the command. diff --git a/test/test_session.py b/test/test_session.py index 89670df9ee..d8add9f3b6 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -1031,14 +1031,6 @@ def test_writes_do_not_include_read_concern(self): # Not a write, but explain also doesn't support readConcern. self._test_no_read_concern(lambda coll, session: coll.find({}, session=session).explain()) - @client_context.require_no_standalone - @client_context.require_version_max(4, 1, 0) - def test_aggregate_out_does_not_include_read_concern(self): - def alambda(coll, session): - (coll.aggregate([{"$out": "aggout"}], session=session)).to_list() - - self._test_no_read_concern(alambda) - @client_context.require_no_standalone def test_get_more_does_not_include_read_concern(self): coll = self.client.pymongo_test.test @@ -1081,7 +1073,6 @@ def test_server_not_causal(self): self.assertIsNone(act) @client_context.require_no_standalone - @client_context.require_no_mmap def test_read_concern(self): with self.client.start_session(causal_consistency=True) as s: coll = self.client.pymongo_test.test diff --git a/test/test_topology.py b/test/test_topology.py index 141b2d7f21..530cecd1f7 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -559,7 +559,7 @@ def test_wire_version(self): ) self.assertEqual(server.description.min_wire_version, 1) - self.assertEqual(server.description.max_wire_version, 7) + self.assertEqual(server.description.max_wire_version, 8) t.select_servers(any_server_selector, _Op.TEST) # Incompatible. diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py index 641e05108a..4ab4885e2a 100644 --- a/test/test_transactions_unified.py +++ b/test/test_transactions_unified.py @@ -27,7 +27,6 @@ _IS_SYNC = True -@client_context.require_no_mmap def setUpModule(): pass diff --git a/test/unified_format.py b/test/unified_format.py index 0db037c654..e45922819d 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -492,11 +492,6 @@ def setUp(self): raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here - if client_context.storage_engine == "mmapv1": - if "retryable-writes" in self.TEST_SPEC["description"] or "retryable_writes" in str( - self.TEST_PATH - ): - raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") # Handle mongos_clients for transactions tests. self.mongos_clients = [] @@ -518,13 +513,6 @@ def setUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if client_context.storage_engine == "mmapv1": - if ( - "Dirty explicit session is discarded" in spec["description"] - or "Dirty implicit session is discarded" in spec["description"] - or "Cancel server check" in spec["description"] - ): - self.skipTest("MMAPv1 does not support retryWrites=True") if "Client side error in command starting transaction" in spec["description"]: self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: @@ -543,10 +531,6 @@ def maybe_skip_test(self, spec): if "csot" in class_name: if "gridfs" in class_name and sys.platform == "win32": self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") - if client_context.storage_engine == "mmapv1": - self.skipTest( - "MMAPv1 does not support retryable writes which is required for CSOT tests" - ) if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: @@ -571,11 +555,6 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support count()") if name == "listIndexNames": self.skipTest("PyMongo does not support list_index_names()") - if client_context.storage_engine == "mmapv1": - if name == "createChangeStream": - self.skipTest("MMAPv1 does not support change streams") - if name == "withTransaction" or name == "startTransaction": - self.skipTest("MMAPv1 does not support document-level locking") if not client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") @@ -681,8 +660,6 @@ def __raise_if_unsupported(self, opname, target, *target_types): self.fail(f"Operation {opname} not supported for entity of type {type(target)}") def __entityOperation_createChangeStream(self, target, *args, **kwargs): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support change streams") self.__raise_if_unsupported("createChangeStream", target, MongoClient, Database, Collection) stream = target.watch(*args, **kwargs) self.addCleanup(stream.close) @@ -807,14 +784,10 @@ def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): return (target.list_search_indexes(name, **agg_kwargs)).to_list() def _sessionOperation_withTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support document-level locking") self.__raise_if_unsupported("withTransaction", target, ClientSession) return target.with_transaction(*args, **kwargs) def _sessionOperation_startTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support document-level locking") self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index 3278063b4a..c0a8c81e30 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -648,9 +648,6 @@ def run_scenario(self, scenario_def, test): server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. client_options = self.parse_client_options(test["clientOptions"]) - # MMAPv1 does not support retryable writes. - if client_options.get("retryWrites") is True and client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support retryWrites=True") use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: From 65f7c542088356bba78bd70d68b7a4881cab7f8b Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 24 Jun 2025 09:34:53 -0700 Subject: [PATCH 1960/2111] PYTHON-5344 and PYTHON-5403 Allow Instantiated MongoClients to Send Client Metadata On-Demand (#2358) --- .evergreen/resync-specs.sh | 3 + doc/changelog.rst | 3 + pymongo/asynchronous/mongo_client.py | 15 ++ pymongo/pool_options.py | 32 ++- pymongo/synchronous/mongo_client.py | 15 ++ test/asynchronous/test_client_metadata.py | 215 ++++++++++++++++++ test/asynchronous/unified_format.py | 6 + .../unified/metadata-not-propagated.json | 100 ++++++++ test/test_client_metadata.py | 215 ++++++++++++++++++ test/unified_format.py | 6 + tools/synchro.py | 1 + 11 files changed, 599 insertions(+), 12 deletions(-) create mode 100644 test/asynchronous/test_client_metadata.py create mode 100644 test/handshake/unified/metadata-not-propagated.json create mode 100644 test/test_client_metadata.py diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 1f70940aa0..d7dfafbba9 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -131,6 +131,9 @@ do gridfs) cpjson gridfs/tests gridfs ;; + handshake) + cpjson mongodb-handshake/tests handshake + ;; index|index-management) cpjson index-management/tests index_management ;; diff --git a/doc/changelog.rst b/doc/changelog.rst index 35a9770a14..2fd1bdd6b9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -7,6 +7,9 @@ PyMongo 4.14 brings a number of changes including: - Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. +- Added :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and + :meth:`pymongo.mongo_client.MongoClient.append_metadata` to allow instantiated MongoClients to send client metadata + on-demand - Introduces a minor breaking change. When encoding :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised if the 'padding' metadata field is < 0 or > 7, or non-zero for any type other than PACKED_BIT. diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 72755263c9..3488030166 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -70,6 +70,7 @@ from pymongo.asynchronous.settings import TopologySettings from pymongo.asynchronous.topology import Topology, _ErrorContext from pymongo.client_options import ClientOptions +from pymongo.driver_info import DriverInfo from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -1040,6 +1041,20 @@ async def target() -> bool: self._kill_cursors_executor = executor self._opened = False + def append_metadata(self, driver_info: DriverInfo) -> None: + """Appends the given metadata to existing driver metadata. + + :param driver_info: a :class:`~pymongo.driver_info.DriverInfo` + + .. versionadded:: 4.14 + """ + + if not isinstance(driver_info, DriverInfo): + raise TypeError( + f"driver_info must be an instance of DriverInfo, not {type(driver_info)}" + ) + self._options.pool_options._update_metadata(driver_info) + def _should_pin_cursor(self, session: Optional[AsyncClientSession]) -> Optional[bool]: return self._options.load_balanced and not (session and session.in_transaction) diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index a2e309cc56..5c24709b16 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -376,18 +376,7 @@ def __init__( "async", ) if driver: - if driver.name: - self.__metadata["driver"]["name"] = "{}|{}".format( - self.__metadata["driver"]["name"], - driver.name, - ) - if driver.version: - self.__metadata["driver"]["version"] = "{}|{}".format( - _METADATA["driver"]["version"], - driver.version, - ) - if driver.platform: - self.__metadata["platform"] = "{}|{}".format(_METADATA["platform"], driver.platform) + self._update_metadata(driver) env = _metadata_env() if env: @@ -395,6 +384,25 @@ def __init__( _truncate_metadata(self.__metadata) + def _update_metadata(self, driver: DriverInfo) -> None: + """Updates the client's metadata""" + + metadata = copy.deepcopy(self.__metadata) + if driver.name: + metadata["driver"]["name"] = "{}|{}".format( + metadata["driver"]["name"], + driver.name, + ) + if driver.version: + metadata["driver"]["version"] = "{}|{}".format( + metadata["driver"]["version"], + driver.version, + ) + if driver.platform: + metadata["platform"] = "{}|{}".format(metadata["platform"], driver.platform) + + self.__metadata = metadata + @property def _credentials(self) -> Optional[MongoCredential]: """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 99a517e5c1..1fd506e052 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -62,6 +62,7 @@ from bson.timestamp import Timestamp from pymongo import _csot, common, helpers_shared, periodic_executor from pymongo.client_options import ClientOptions +from pymongo.driver_info import DriverInfo from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -1040,6 +1041,20 @@ def target() -> bool: self._kill_cursors_executor = executor self._opened = False + def append_metadata(self, driver_info: DriverInfo) -> None: + """Appends the given metadata to existing driver metadata. + + :param driver_info: a :class:`~pymongo.driver_info.DriverInfo` + + .. versionadded:: 4.14 + """ + + if not isinstance(driver_info, DriverInfo): + raise TypeError( + f"driver_info must be an instance of DriverInfo, not {type(driver_info)}" + ) + self._options.pool_options._update_metadata(driver_info) + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: return self._options.load_balanced and not (session and session.in_transaction) diff --git a/test/asynchronous/test_client_metadata.py b/test/asynchronous/test_client_metadata.py new file mode 100644 index 0000000000..cfecb49748 --- /dev/null +++ b/test/asynchronous/test_client_metadata.py @@ -0,0 +1,215 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import os +import pathlib +import time +import unittest +from test.asynchronous import AsyncIntegrationTest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import CMAPListener +from typing import Any, Optional + +import pytest + +from pymongo import AsyncMongoClient +from pymongo.driver_info import DriverInfo +from pymongo.monitoring import ConnectionClosedEvent + +try: + from mockupdb import MockupDB, OpMsgReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +pytestmark = pytest.mark.mockupdb + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "handshake", "unified") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "handshake", "unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +def _get_handshake_driver_info(request): + assert "client" in request + return request["client"] + + +class TestClientMetadataProse(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.server = MockupDB() + self.handshake_req = None + + def respond(r): + if "ismaster" in r: + # then this is a handshake request + self.handshake_req = r + return r.reply(OpMsgReply(maxWireVersion=13)) + + self.server.autoresponds(respond) + self.server.run() + self.addAsyncCleanup(self.server.stop) + + async def send_ping_and_get_metadata( + self, client: AsyncMongoClient, is_handshake: bool + ) -> tuple[str, Optional[str], Optional[str], dict[str, Any]]: + # reset if handshake request + if is_handshake: + self.handshake_req: Optional[dict] = None + + await client.admin.command("ping") + metadata = _get_handshake_driver_info(self.handshake_req) + driver_metadata = metadata["driver"] + name, version, platform = ( + driver_metadata["name"], + driver_metadata["version"], + metadata["platform"], + ) + return name, version, platform, metadata + + async def check_metadata_added( + self, + client: AsyncMongoClient, + add_name: str, + add_version: Optional[str], + add_platform: Optional[str], + ) -> None: + # send initial metadata + name, version, platform, metadata = await self.send_ping_and_get_metadata(client, True) + # wait for connection to become idle + await asyncio.sleep(0.005) + + # add new metadata + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + new_name, new_version, new_platform, new_metadata = await self.send_ping_and_get_metadata( + client, True + ) + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) + + metadata.pop("driver") + metadata.pop("platform") + new_metadata.pop("driver") + new_metadata.pop("platform") + self.assertEqual(metadata, new_metadata) + + async def test_append_metadata(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + async def test_append_metadata_platform_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", "2.0", None) + + async def test_append_metadata_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", None, "Framework Platform") + + async def test_append_metadata_platform_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", None, None) + + async def test_multiple_successive_metadata_updates(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, maxIdleTimeMS=1, connect=False + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + async def test_multiple_successive_metadata_updates_platform_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", "2.0", None) + + async def test_multiple_successive_metadata_updates_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, "Framework Platform") + + async def test_multiple_successive_metadata_updates_platform_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, None) + + async def test_doesnt_update_established_connections(self): + listener = CMAPListener() + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + event_listeners=[listener], + ) + + # send initial metadata + name, version, platform, metadata = await self.send_ping_and_get_metadata(client, True) + self.assertIsNotNone(name) + self.assertIsNotNone(version) + self.assertIsNotNone(platform) + + # add data + add_name, add_version, add_platform = "framework", "2.0", "Framework Platform" + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + # check new data isn't sent + self.handshake_req: Optional[dict] = None + await client.admin.command("ping") + self.assertIsNone(self.handshake_req) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 5f6468b952..5b66d12813 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -75,6 +75,7 @@ from pymongo.asynchronous.database import AsyncDatabase from pymongo.asynchronous.encryption import AsyncClientEncryption from pymongo.asynchronous.helpers import anext +from pymongo.driver_info import DriverInfo from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( AutoReconnect, @@ -813,6 +814,11 @@ async def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor, AsyncCommandCursor) return await target.close() + async def _clientOperation_appendMetadata(self, target, *args, **kwargs): + info_opts = kwargs["driver_info_options"] + driver_info = DriverInfo(info_opts["name"], info_opts["version"], info_opts["platform"]) + target.append_metadata(driver_info) + async def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): if "opts" in kwargs: kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) diff --git a/test/handshake/unified/metadata-not-propagated.json b/test/handshake/unified/metadata-not-propagated.json new file mode 100644 index 0000000000..500b579b89 --- /dev/null +++ b/test/handshake/unified/metadata-not-propagated.json @@ -0,0 +1,100 @@ +{ + "description": "client metadata is not propagated to the server", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "6.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandSucceededEvent", + "commandFailedEvent", + "connectionClosedEvent", + "connectionCreatedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "metadata append does not create new connections or close existing ones and no hello command is sent", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "appendMetadata", + "object": "client", + "arguments": { + "driverInfoOptions": { + "name": "framework", + "version": "2.0", + "platform": "Framework Platform" + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + } + ] + }, + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandSucceededEvent": { + "commandName": "ping" + } + }, + { + "commandSucceededEvent": { + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/test_client_metadata.py b/test/test_client_metadata.py new file mode 100644 index 0000000000..32cb9b8009 --- /dev/null +++ b/test/test_client_metadata.py @@ -0,0 +1,215 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import os +import pathlib +import time +import unittest +from test import IntegrationTest +from test.unified_format import generate_test_classes +from test.utils_shared import CMAPListener +from typing import Any, Optional + +import pytest + +from pymongo import MongoClient +from pymongo.driver_info import DriverInfo +from pymongo.monitoring import ConnectionClosedEvent + +try: + from mockupdb import MockupDB, OpMsgReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +pytestmark = pytest.mark.mockupdb + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "handshake", "unified") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "handshake", "unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +def _get_handshake_driver_info(request): + assert "client" in request + return request["client"] + + +class TestClientMetadataProse(IntegrationTest): + def setUp(self): + super().setUp() + self.server = MockupDB() + self.handshake_req = None + + def respond(r): + if "ismaster" in r: + # then this is a handshake request + self.handshake_req = r + return r.reply(OpMsgReply(maxWireVersion=13)) + + self.server.autoresponds(respond) + self.server.run() + self.addCleanup(self.server.stop) + + def send_ping_and_get_metadata( + self, client: MongoClient, is_handshake: bool + ) -> tuple[str, Optional[str], Optional[str], dict[str, Any]]: + # reset if handshake request + if is_handshake: + self.handshake_req: Optional[dict] = None + + client.admin.command("ping") + metadata = _get_handshake_driver_info(self.handshake_req) + driver_metadata = metadata["driver"] + name, version, platform = ( + driver_metadata["name"], + driver_metadata["version"], + metadata["platform"], + ) + return name, version, platform, metadata + + def check_metadata_added( + self, + client: MongoClient, + add_name: str, + add_version: Optional[str], + add_platform: Optional[str], + ) -> None: + # send initial metadata + name, version, platform, metadata = self.send_ping_and_get_metadata(client, True) + # wait for connection to become idle + time.sleep(0.005) + + # add new metadata + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + new_name, new_version, new_platform, new_metadata = self.send_ping_and_get_metadata( + client, True + ) + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) + + metadata.pop("driver") + metadata.pop("platform") + new_metadata.pop("driver") + new_metadata.pop("platform") + self.assertEqual(metadata, new_metadata) + + def test_append_metadata(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + def test_append_metadata_platform_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", "2.0", None) + + def test_append_metadata_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", None, "Framework Platform") + + def test_append_metadata_platform_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", None, None) + + def test_multiple_successive_metadata_updates(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, maxIdleTimeMS=1, connect=False + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + def test_multiple_successive_metadata_updates_platform_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", "2.0", None) + + def test_multiple_successive_metadata_updates_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, "Framework Platform") + + def test_multiple_successive_metadata_updates_platform_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, None) + + def test_doesnt_update_established_connections(self): + listener = CMAPListener() + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + event_listeners=[listener], + ) + + # send initial metadata + name, version, platform, metadata = self.send_ping_and_get_metadata(client, True) + self.assertIsNotNone(name) + self.assertIsNotNone(version) + self.assertIsNotNone(platform) + + # add data + add_name, add_version, add_platform = "framework", "2.0", "Framework Platform" + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + # check new data isn't sent + self.handshake_req: Optional[dict] = None + client.admin.command("ping") + self.assertIsNone(self.handshake_req) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/unified_format.py b/test/unified_format.py index e45922819d..d6a1f866c8 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -67,6 +67,7 @@ from bson.objectid import ObjectId from gridfs import GridFSBucket, GridOut, NoFile from pymongo import ASCENDING, CursorType, MongoClient, _csot +from pymongo.driver_info import DriverInfo from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import ( AutoReconnect, @@ -810,6 +811,11 @@ def _cursor_close(self, target, *args, **kwargs): self.__raise_if_unsupported("close", target, NonLazyCursor, CommandCursor) return target.close() + def _clientOperation_appendMetadata(self, target, *args, **kwargs): + info_opts = kwargs["driver_info_options"] + driver_info = DriverInfo(info_opts["name"], info_opts["version"], info_opts["platform"]) + target.append_metadata(driver_info) + def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): if "opts" in kwargs: kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) diff --git a/tools/synchro.py b/tools/synchro.py index 541231cf71..e502f96281 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -212,6 +212,7 @@ def async_only_test(f: str) -> bool: "test_client.py", "test_client_bulk_write.py", "test_client_context.py", + "test_client_metadata.py", "test_collation.py", "test_collection.py", "test_collection_management.py", From 244f17d57b1a28b06756b026372e27bc07b61e67 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 26 Jun 2025 16:37:03 -0400 Subject: [PATCH 1961/2111] PYTHON-5404 - Add docs + justfile target for profiling execution (#2402) --- CONTRIBUTING.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5a2bf4d913..ca98584602 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -460,3 +460,15 @@ partially-converted asynchronous version of the same name to the `test/asynchron Use this generated file as a starting point for the completed conversion. The script is used like so: `python tools/convert_test_to_async.py [test_file.py]` + +## Generating a flame graph using py-spy +To profile a test script and generate a flame graph, follow these steps: +1. Install `py-spy` if you haven't already: + ```bash + pip install py-spy + ``` +2. Inside your test script, perform any required setup and then loop over the code you want to profile for improved sampling. +3. Run `py-spy record -o -r -- python ` to generate a `.svg` file containing the flame graph. + (Note: on macOS you will need to run this command using `sudo` to allow `py-spy` to attach to the Python process.) +4. If you need to include native code (for example the C extensions), profiling should be done on a Linux system, as macOS and Windows do not support the `--native` option of `py-spy`. + Creating an ubuntu Evergreen spawn host and using `scp` to copy the flamegraph `.svg` file back to your local machine is the best way to do this. From 0cb4b2f1a6a913b3cf0a5434349cf3858d31913c Mon Sep 17 00:00:00 2001 From: Jib Date: Fri, 27 Jun 2025 12:58:11 -0400 Subject: [PATCH 1962/2111] PYTHON-5287: create CODEOWNERS (#2408) --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..e21b87ddd3 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @mongodb/dbx-python From 6a672d4dd36808cb65469194dfffa53e7fd2b83a Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 27 Jun 2025 14:41:53 -0400 Subject: [PATCH 1963/2111] PYTHON-5382 - Add a test with min dependencies (#2410) --- .github/workflows/test-python.yml | 52 +++++++++++++++++++++++++++ test/asynchronous/test_srv_polling.py | 2 +- test/test_srv_polling.py | 2 +- 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index a2fde83c06..32dc4ec7f5 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -220,3 +220,55 @@ jobs: which python pip install -e ".[test]" PYMONGO_MUST_CONNECT=1 pytest -v -k client_context + + test_minimum: + permissions: + contents: read + runs-on: ubuntu-latest + name: Test using minimum dependencies and supported Python + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + with: + python-version: '3.9' + - name: Start MongoDB + uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + with: + mongodb-version: 6.0 + # Async and our test_dns do not support dnspython 1.X, so we don't run async or dns tests here + - name: Run tests + shell: bash + run: | + uv venv + source .venv/bin/activate + uv pip install -e ".[test]" --resolution=lowest-direct + pytest -v test/test_srv_polling.py + + test_minimum_for_async: + permissions: + contents: read + runs-on: ubuntu-latest + name: Test async's minimum dependencies and Python + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + with: + python-version: '3.9' + - name: Start MongoDB + uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + with: + mongodb-version: 6.0 + # The lifetime kwarg we use in srv resolution was added to the async resolver API in dnspython 2.1.0 + - name: Run tests + shell: bash + run: | + uv venv + source .venv/bin/activate + uv pip install -e ".[test]" --resolution=lowest-direct dnspython==2.1.0 --force-reinstall + pytest -v test/test_srv_polling.py test/test_dns.py test/asynchronous/test_srv_polling.py test/asynchronous/test_dns.py diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index 3ba50e77a8..2e248628da 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -363,7 +363,7 @@ def test_import_dns_resolver(self): # Regression test for PYTHON-4407 import dns.resolver - self.assertTrue(hasattr(dns.resolver, "resolve")) + self.assertTrue(hasattr(dns.resolver, "resolve") or hasattr(dns.resolver, "query")) if __name__ == "__main__": diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 971c3bad50..16b076c1d3 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -363,7 +363,7 @@ def test_import_dns_resolver(self): # Regression test for PYTHON-4407 import dns.resolver - self.assertTrue(hasattr(dns.resolver, "resolve")) + self.assertTrue(hasattr(dns.resolver, "resolve") or hasattr(dns.resolver, "query")) if __name__ == "__main__": From 0e407351a4630c995d7adad960047c81cc22a153 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Fri, 27 Jun 2025 14:06:00 -0700 Subject: [PATCH 1964/2111] PYTHON-5392 Better test assertions for comparisons (#2350) Co-authored-by: Noah Stapp --- test/asynchronous/test_bulk.py | 8 ++++---- test/asynchronous/test_client.py | 2 +- test/asynchronous/test_collection.py | 2 +- test/asynchronous/test_database.py | 2 +- test/asynchronous/test_pooling.py | 9 +++++---- test/test_bson.py | 2 ++ test/test_bulk.py | 8 ++++---- test/test_client.py | 2 +- test/test_collection.py | 2 +- test/test_database.py | 2 +- test/test_objectid.py | 2 +- test/test_pooling.py | 9 +++++---- 12 files changed, 27 insertions(+), 23 deletions(-) diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py index b6dedb497c..02958e6f0e 100644 --- a/test/asynchronous/test_bulk.py +++ b/test/asynchronous/test_bulk.py @@ -994,7 +994,7 @@ async def test_write_concern_failure_ordered(self): # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 0) + self.assertGreater(len(details["writeConcernErrors"]), 0) failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) @@ -1035,7 +1035,7 @@ async def test_write_concern_failure_ordered(self): details, ) - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) failed = details["writeErrors"][0] self.assertIn("duplicate", failed["errmsg"]) @@ -1073,7 +1073,7 @@ async def test_write_concern_failure_unordered(self): self.assertEqual(0, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) await self.coll.delete_many({}) await self.coll.create_index("a", unique=True) @@ -1100,7 +1100,7 @@ async def test_write_concern_failure_unordered(self): self.assertEqual(1, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) failed = details["writeErrors"][0] self.assertEqual(2, failed["index"]) diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index aaa7e7d56d..52f16e0bcc 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -1005,7 +1005,7 @@ async def test_list_databases(self): cursor = await self.client.list_databases() self.assertIsInstance(cursor, AsyncCommandCursor) helper_docs = await cursor.to_list() - self.assertTrue(len(helper_docs) > 0) + self.assertGreater(len(helper_docs), 0) self.assertEqual(len(helper_docs), len(cmd_docs)) # PYTHON-3529 Some fields may change between calls, just compare names. for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index b6f96cc999..cda8452d1c 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -500,7 +500,7 @@ async def test_index_text(self): # Sort by 'score' field. cursor.sort([("score", {"$meta": "textScore"})]) results = await cursor.to_list() - self.assertTrue(results[0]["score"] >= results[1]["score"]) + self.assertGreaterEqual(results[0]["score"], results[1]["score"]) await db.test.drop_indexes() diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 3195c74988..e6f0c6a532 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -239,7 +239,7 @@ async def test_check_exists(self): listener.reset() await db.drop_collection("unique") await db.create_collection("unique", check_exists=False) - self.assertTrue(len(listener.started_events) > 0) + self.assertGreater(len(listener.started_events), 0) self.assertNotIn("listCollections", listener.started_command_names()) async def test_list_collections(self): diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 64c5738dba..66edf0177f 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -331,8 +331,9 @@ async def test_wait_queue_timeout(self): pass duration = time.time() - start - self.assertTrue( - abs(wait_queue_timeout - duration) < 1, + self.assertLess( + abs(wait_queue_timeout - duration), + 1, f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", ) @@ -547,7 +548,7 @@ async def f(): await async_joinall(tasks) self.assertEqual(ntasks, self.n_passed) - self.assertTrue(len(cx_pool.conns) > 1) + self.assertGreater(len(cx_pool.conns), 1) self.assertEqual(0, cx_pool.requests) async def test_max_pool_size_none(self): @@ -578,7 +579,7 @@ async def f(): await async_joinall(tasks) self.assertEqual(ntasks, self.n_passed) - self.assertTrue(len(cx_pool.conns) > 1) + self.assertGreater(len(cx_pool.conns), 1) self.assertEqual(cx_pool.max_pool_size, float("inf")) async def test_max_pool_size_zero(self): diff --git a/test/test_bson.py b/test/test_bson.py index 23e0a29c4f..e9a1dd1ca7 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1045,6 +1045,8 @@ def test_exception_wrapping(self): def test_minkey_maxkey_comparison(self): # MinKey's <, <=, >, >=, !=, and ==. + # These tests should be kept as assertTrue as opposed to using unittest's built-in comparison assertions because + # MinKey and MaxKey define their own __ge__, __le__, and other comparison attributes, and we want to explicitly test that. self.assertTrue(MinKey() < None) self.assertTrue(MinKey() < 1) self.assertTrue(MinKey() <= 1) diff --git a/test/test_bulk.py b/test/test_bulk.py index ac6f760d37..1de406fca5 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -992,7 +992,7 @@ def test_write_concern_failure_ordered(self): # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 0) + self.assertGreater(len(details["writeConcernErrors"]), 0) failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) @@ -1033,7 +1033,7 @@ def test_write_concern_failure_ordered(self): details, ) - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) failed = details["writeErrors"][0] self.assertIn("duplicate", failed["errmsg"]) @@ -1069,7 +1069,7 @@ def test_write_concern_failure_unordered(self): self.assertEqual(0, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) self.coll.delete_many({}) self.coll.create_index("a", unique=True) @@ -1096,7 +1096,7 @@ def test_write_concern_failure_unordered(self): self.assertEqual(1, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) failed = details["writeErrors"][0] self.assertEqual(2, failed["index"]) diff --git a/test/test_client.py b/test/test_client.py index 18624f892c..dd1bf94cf1 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -978,7 +978,7 @@ def test_list_databases(self): cursor = self.client.list_databases() self.assertIsInstance(cursor, CommandCursor) helper_docs = cursor.to_list() - self.assertTrue(len(helper_docs) > 0) + self.assertGreater(len(helper_docs), 0) self.assertEqual(len(helper_docs), len(cmd_docs)) # PYTHON-3529 Some fields may change between calls, just compare names. for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): diff --git a/test/test_collection.py b/test/test_collection.py index 5643a62022..ccace72bec 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -490,7 +490,7 @@ def test_index_text(self): # Sort by 'score' field. cursor.sort([("score", {"$meta": "textScore"})]) results = cursor.to_list() - self.assertTrue(results[0]["score"] >= results[1]["score"]) + self.assertGreaterEqual(results[0]["score"], results[1]["score"]) db.test.drop_indexes() diff --git a/test/test_database.py b/test/test_database.py index 0fe6c01a9d..56691383b2 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -238,7 +238,7 @@ def test_check_exists(self): listener.reset() db.drop_collection("unique") db.create_collection("unique", check_exists=False) - self.assertTrue(len(listener.started_events) > 0) + self.assertGreater(len(listener.started_events), 0) self.assertNotIn("listCollections", listener.started_command_names()) def test_list_collections(self): diff --git a/test/test_objectid.py b/test/test_objectid.py index d7db7229ea..dbc61951d1 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -92,7 +92,7 @@ def test_generation_time(self): self.assertEqual(utc, d2.tzinfo) d2 = d2.replace(tzinfo=None) - self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) + self.assertLess(d2 - d1, datetime.timedelta(seconds=2)) def test_from_datetime(self): d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) diff --git a/test/test_pooling.py b/test/test_pooling.py index 05513afe12..b995c467c2 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -331,8 +331,9 @@ def test_wait_queue_timeout(self): pass duration = time.time() - start - self.assertTrue( - abs(wait_queue_timeout - duration) < 1, + self.assertLess( + abs(wait_queue_timeout - duration), + 1, f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", ) @@ -545,7 +546,7 @@ def f(): joinall(tasks) self.assertEqual(ntasks, self.n_passed) - self.assertTrue(len(cx_pool.conns) > 1) + self.assertGreater(len(cx_pool.conns), 1) self.assertEqual(0, cx_pool.requests) def test_max_pool_size_none(self): @@ -576,7 +577,7 @@ def f(): joinall(tasks) self.assertEqual(ntasks, self.n_passed) - self.assertTrue(len(cx_pool.conns) > 1) + self.assertGreater(len(cx_pool.conns), 1) self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): From ed269759268f4eae08947eb65faca83d1ddfbbaf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 07:42:55 -0500 Subject: [PATCH 1965/2111] Bump the actions group across 1 directory with 3 updates (#2411) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/test-python.yml | 16 ++++++++-------- .github/workflows/zizmor.yml | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 36ed7fa2e9..4ae38d6a20 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 32dc4ec7f5..fb28f2476f 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -25,7 +25,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -88,7 +88,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: enable-cache: true python-version: "3.9" @@ -111,7 +111,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: enable-cache: true python-version: "3.9" @@ -130,7 +130,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: enable-cache: true python-version: "3.9" @@ -152,7 +152,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: enable-cache: true python-version: "${{matrix.python}}" @@ -231,7 +231,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: python-version: '3.9' - name: Start MongoDB @@ -257,7 +257,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@f0ec1fc3b38f5e7cd731bb6ce540c5af426746bb # v5 + uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 with: python-version: '3.9' - name: Start MongoDB diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 48097316f0..c6237d2bda 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,7 +18,7 @@ jobs: with: persist-credentials: false - name: Setup Rust - uses: actions-rust-lang/setup-rust-toolchain@9d7e65c320fdb52dcd45ffaa68deb6c02c8754d9 # v1 + uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 # v1 - name: Get zizmor run: cargo install zizmor - name: Run zizmor 🌈 @@ -26,7 +26,7 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3 + uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 with: sarif_file: results.sarif category: zizmor From 578c6c2ad2559c4c939c682151eeeaf3b847ab3e Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 30 Jun 2025 11:08:42 -0700 Subject: [PATCH 1966/2111] PYTHON-5423 Always use subprocess.run instead of subprocess.check_call or subprocess.call (#2412) --- hatch_build.py | 2 +- test/__init__.py | 2 +- test/asynchronous/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hatch_build.py b/hatch_build.py index 91315eb09f..40271972dd 100644 --- a/hatch_build.py +++ b/hatch_build.py @@ -19,7 +19,7 @@ def initialize(self, version, build_data): here = Path(__file__).parent.resolve() sys.path.insert(0, str(here)) - subprocess.check_call([sys.executable, "_setup.py", "build_ext", "-i"]) + subprocess.run([sys.executable, "_setup.py", "build_ext", "-i"], check=True) # Ensure wheel is marked as binary and contains the binary files. build_data["infer_tag"] = True diff --git a/test/__init__.py b/test/__init__.py index e0646ce894..f143730d2c 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -369,7 +369,7 @@ def fips_enabled(self): if self._fips_enabled is not None: return self._fips_enabled try: - subprocess.check_call(["fips-mode-setup", "--is-enabled"]) + subprocess.run(["fips-mode-setup", "--is-enabled"], check=True) self._fips_enabled = True except (subprocess.SubprocessError, FileNotFoundError): self._fips_enabled = False diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 48c9dc2920..af473e7c6a 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -369,7 +369,7 @@ def fips_enabled(self): if self._fips_enabled is not None: return self._fips_enabled try: - subprocess.check_call(["fips-mode-setup", "--is-enabled"]) + subprocess.run(["fips-mode-setup", "--is-enabled"], check=True) self._fips_enabled = True except (subprocess.SubprocessError, FileNotFoundError): self._fips_enabled = False From 0b2900d162ffeca355d7fa43111c4df02cf488be Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 1 Jul 2025 15:42:58 -0500 Subject: [PATCH 1967/2111] PYTHON-5413 Handle flaky tests (#2395) --- .evergreen/generated_configs/functions.yml | 1 + .evergreen/scripts/generate_config.py | 1 + .evergreen/scripts/setup_tests.py | 4 - CONTRIBUTING.md | 9 ++ test/__init__.py | 18 ++-- test/asynchronous/__init__.py | 18 ++-- test/asynchronous/test_client_bulk_write.py | 6 +- test/asynchronous/test_csot.py | 7 +- test/asynchronous/test_cursor.py | 7 +- test/asynchronous/test_encryption.py | 6 ++ test/asynchronous/test_retryable_writes.py | 3 +- .../test_server_selection_in_window.py | 2 + test/asynchronous/test_srv_polling.py | 2 + test/asynchronous/unified_format.py | 88 +++++++++++-------- test/asynchronous/utils.py | 63 +++++++++++++ test/test_client_bulk_write.py | 6 +- test/test_csot.py | 7 +- test/test_cursor.py | 7 +- test/test_encryption.py | 6 ++ test/test_retryable_writes.py | 3 +- test/test_server_selection_in_window.py | 2 + test/test_srv_polling.py | 2 + test/test_topology.py | 3 +- test/unified_format.py | 88 +++++++++++-------- test/unified_format_shared.py | 2 - test/utils.py | 63 +++++++++++++ 26 files changed, 305 insertions(+), 119 deletions(-) diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index 7d9ab2df3b..7f11bc5d06 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -145,6 +145,7 @@ functions: - MONGODB_API_VERSION - REQUIRE_API_VERSION - DEBUG_LOG + - DISABLE_FLAKY - ORCHESTRATION_FILE - OCSP_SERVER_TYPE - VERSION diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 518d2487eb..809792679b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -1084,6 +1084,7 @@ def create_run_tests_func(): "MONGODB_API_VERSION", "REQUIRE_API_VERSION", "DEBUG_LOG", + "DISABLE_FLAKY", "ORCHESTRATION_FILE", "OCSP_SERVER_TYPE", "VERSION", diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 13444fe9ca..a4ff77ab71 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -162,10 +162,6 @@ def handle_test_env() -> None: write_env("PIP_PREFER_BINARY") # Prefer binary dists by default. write_env("UV_FROZEN") # Do not modify lock files. - # Skip CSOT tests on non-linux platforms. - if PLATFORM != "linux": - write_env("SKIP_CSOT_TESTS") - # Set an environment variable for the test name and sub test name. write_env(f"TEST_{test_name.upper()}") write_env("TEST_NAME", test_name) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ca98584602..5b5ddef9a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -404,6 +404,15 @@ If you are running one of the `no-responder` tests, omit the `run-server` step. - Regenerate the test variants and tasks using `pre-commit run --all-files generate-config`. - Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. +## Handling flaky tests + +We have a custom `flaky` decorator in [test/asynchronous/utils.py](test/asynchronous/utils.py) that can be used for +tests that are `flaky`. By default the decorator only applies when not running on CPython on Linux, since other +runtimes tend to have more variation. When using the `flaky` decorator, open a corresponding ticket and +a use the ticket number as the "reason" parameter to the decorator, e.g. `@flaky(reason="PYTHON-1234")`. +When running tests locally (not in CI), the `flaky` decorator will be disabled unless `ENABLE_FLAKY` is set. +To disable the `flaky` decorator in CI, you can use `evergreen patch --param DISABLE_FLAKY=1`. + ## Specification Tests The MongoDB [specifications repository](https://github.com/mongodb/specifications) diff --git a/test/__init__.py b/test/__init__.py index f143730d2c..40d58cff89 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -32,6 +32,7 @@ import warnings from asyncio import iscoroutinefunction +from pymongo.errors import AutoReconnect from pymongo.synchronous.uri_parser import parse_uri try: @@ -1219,12 +1220,17 @@ def teardown(): c = client_context.client if c: if not client_context.is_data_lake: - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") + try: + c.drop_database("pymongo-pooling-tests") + c.drop_database("pymongo_test") + c.drop_database("pymongo_test1") + c.drop_database("pymongo_test2") + c.drop_database("pymongo_test_mike") + c.drop_database("pymongo_test_bernie") + except AutoReconnect: + # PYTHON-4982 + if sys.implementation.name.lower() != "pypy": + raise c.close() print_running_clients() diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index af473e7c6a..5d52c348df 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -33,6 +33,7 @@ from asyncio import iscoroutinefunction from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.errors import AutoReconnect try: import ipaddress @@ -1235,12 +1236,17 @@ async def async_teardown(): c = async_client_context.client if c: if not async_client_context.is_data_lake: - await c.drop_database("pymongo-pooling-tests") - await c.drop_database("pymongo_test") - await c.drop_database("pymongo_test1") - await c.drop_database("pymongo_test2") - await c.drop_database("pymongo_test_mike") - await c.drop_database("pymongo_test_bernie") + try: + await c.drop_database("pymongo-pooling-tests") + await c.drop_database("pymongo_test") + await c.drop_database("pymongo_test1") + await c.drop_database("pymongo_test2") + await c.drop_database("pymongo_test_mike") + await c.drop_database("pymongo_test_bernie") + except AutoReconnect: + # PYTHON-4982 + if sys.implementation.name.lower() != "pypy": + raise await c.close() print_running_clients() diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index 2f48466af8..cf863979b5 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -25,6 +25,7 @@ async_client_context, unittest, ) +from test.asynchronous.utils import flaky from test.utils_shared import ( OvertCommandListener, ) @@ -619,8 +620,6 @@ async def test_15_unacknowledged_write_across_batches(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(AsyncIntegrationTest): async def asyncSetUp(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") await super().asyncSetUp() self.max_write_batch_size = await async_client_context.max_write_batch_size self.max_bson_object_size = await async_client_context.max_bson_size @@ -628,7 +627,10 @@ async def asyncSetUp(self): @async_client_context.require_version_min(8, 0, 0, -24) @async_client_context.require_failCommand_fail_point + @flaky(reason="PYTHON-5290", max_runs=3, affects_cpython_linux=True) async def test_timeout_in_multi_batch_bulk_write(self): + if sys.platform != "linux" and "CI" in os.environ: + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows and MacOS") _OVERHEAD = 500 internal_client = await self.async_rs_or_single_client(timeoutMS=None) diff --git a/test/asynchronous/test_csot.py b/test/asynchronous/test_csot.py index 46c97ce6d3..a978d1ccc0 100644 --- a/test/asynchronous/test_csot.py +++ b/test/asynchronous/test_csot.py @@ -23,6 +23,7 @@ from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils import flaky import pymongo from pymongo import _csot @@ -43,9 +44,8 @@ class TestCSOT(AsyncIntegrationTest): RUN_ON_LOAD_BALANCER = True + @flaky(reason="PYTHON-3522") async def test_timeout_nested(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") coll = self.db.coll self.assertEqual(_csot.get_timeout(), None) self.assertEqual(_csot.get_deadline(), float("inf")) @@ -82,9 +82,8 @@ async def test_timeout_nested(self): self.assertEqual(_csot.get_rtt(), 0.0) @async_client_context.require_change_streams + @flaky(reason="PYTHON-3522") async def test_change_stream_can_resume_after_timeouts(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") coll = self.db.test await coll.insert_one({}) async with await coll.watch() as stream: diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index de836dbf80..984545438e 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -31,6 +31,7 @@ sys.path[0:0] = [""] from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import flaky from test.utils_shared import ( AllowListEventListener, EventListener, @@ -1406,9 +1407,8 @@ async def test_to_list_length(self): docs = await c.to_list(3) self.assertEqual(len(docs), 2) + @flaky(reason="PYTHON-3522") async def test_to_list_csot_applied(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = await self.async_single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey @@ -1449,9 +1449,8 @@ async def test_command_cursor_to_list_length(self): self.assertEqual(len(await result.to_list(1)), 1) @async_client_context.require_failCommand_blockConnection + @flaky(reason="PYTHON-3522") async def test_command_cursor_to_list_csot_applied(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = await self.async_single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index a766e63915..c2ef7c7e33 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -32,6 +32,7 @@ import warnings from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.utils import flaky from test.asynchronous.utils_spec_runner import AsyncSpecRunner, AsyncSpecTestCreator from threading import Thread from typing import Any, Dict, Mapping, Optional @@ -3247,6 +3248,7 @@ async def test_kms_retry(self): class TestAutomaticDecryptionKeys(AsyncEncryptionIntegrationTest): @async_client_context.require_no_standalone @async_client_context.require_version_min(7, 0, -1) + @flaky(reason="PYTHON-4982") async def asyncSetUp(self): await super().asyncSetUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") @@ -3489,6 +3491,8 @@ async def test_implicit_session_ignored_when_unsupported(self): self.assertNotIn("lsid", self.listener.started_events[1].command) + await self.mongocryptd_client.close() + async def test_explicit_session_errors_when_unsupported(self): self.listener.reset() async with self.mongocryptd_client.start_session() as s: @@ -3501,6 +3505,8 @@ async def test_explicit_session_errors_when_unsupported(self): ): await self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + await self.mongocryptd_client.close() + if __name__ == "__main__": unittest.main() diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py index 4fe5e5e37f..ddb1d39eb7 100644 --- a/test/asynchronous/test_retryable_writes.py +++ b/test/asynchronous/test_retryable_writes.py @@ -20,7 +20,7 @@ import pprint import sys import threading -from test.asynchronous.utils import async_set_fail_point +from test.asynchronous.utils import async_set_fail_point, flaky sys.path[0:0] = [""] @@ -466,6 +466,7 @@ class TestPoolPausedError(AsyncIntegrationTest): @async_client_context.require_failCommand_blockConnection @async_client_context.require_retryable_writes @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + @flaky(reason="PYTHON-5291") async def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() diff --git a/test/asynchronous/test_server_selection_in_window.py b/test/asynchronous/test_server_selection_in_window.py index 3fe448d4dd..dd0ff734f7 100644 --- a/test/asynchronous/test_server_selection_in_window.py +++ b/test/asynchronous/test_server_selection_in_window.py @@ -21,6 +21,7 @@ from pathlib import Path from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils import flaky from test.asynchronous.utils_selection_tests import create_topology from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator from test.utils_shared import ( @@ -137,6 +138,7 @@ async def frequencies(self, client, listener, n_finds=10): @async_client_context.require_failCommand_appName @async_client_context.require_multiple_mongoses + @flaky(reason="PYTHON-3689") async def test_load_balancing(self): listener = OvertCommandListener() cmap_listener = CMAPListener() diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index 2e248628da..05c4699653 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -18,6 +18,7 @@ import asyncio import sys import time +from test.asynchronous.utils import flaky from test.utils_shared import FunctionCallRecorder from typing import Any @@ -254,6 +255,7 @@ def final_callback(): # Nodelist should reflect new valid DNS resolver response. await self.assert_nodelist_change(response_final, client) + @flaky(reason="PYTHON-5315") async def test_recover_from_initially_empty_seedlist(self): def empty_seedlist(): return [] diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 5b66d12813..5c221a6df0 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -35,12 +35,11 @@ client_knobs, unittest, ) -from test.asynchronous.utils import async_get_pool +from test.asynchronous.utils import async_get_pool, flaky from test.asynchronous.utils_spec_runner import SpecRunnerTask from test.unified_format_shared import ( KMS_TLS_OPTS, PLACEHOLDER_MAP, - SKIP_CSOT_TESTS, EventListenerUtil, MatchEvaluatorUtil, coerce_result, @@ -519,20 +518,38 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") - if ( - "Error returned from connection pool clear with interruptInUseConnections=true is retryable" - in spec["description"] - and not _IS_SYNC - ): - self.skipTest("PYTHON-5170 tests are flakey") - if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: - self.skipTest("PYTHON-5174 tests are flakey") class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: - if "gridfs" in class_name and sys.platform == "win32": - self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") + # Skip tests that are too slow to run on a given platform. + slow_macos = [ + "operation fails after two consecutive socket timeouts.*", + "operation succeeds after one socket timeout.*", + "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + ] + slow_win32 = [ + *slow_macos, + "maxTimeMS value in the command is less than timeoutMS", + "timeoutMS applies to whole operation.*", + ] + slow_pypy = [ + "timeoutMS applies to whole operation.*", + ] + if "CI" in os.environ and sys.platform == "win32" and "gridfs" in class_name: + self.skipTest("PYTHON-3522 CSOT GridFS test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "win32": + for pat in slow_win32: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "darwin": + for pat in slow_macos: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on MacOS") + if "CI" in os.environ and sys.implementation.name.lower() == "pypy": + for pat in slow_pypy: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on PyPy") if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: @@ -1353,38 +1370,31 @@ async def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) async def run_scenario(self, spec, uri=None): - if "csot" in self.id().lower() and SKIP_CSOT_TESTS: - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") - # Kill all sessions before and after each test to prevent an open # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. await self.kill_all_sessions() - if "csot" in self.id().lower(): - # Retry CSOT tests up to 2 times to deal with flakey tests. - attempts = 3 - for i in range(attempts): - try: - return await self._run_scenario(spec, uri) - except (AssertionError, OperationFailure) as exc: - if isinstance(exc, OperationFailure) and ( - _IS_SYNC or "failpoint" not in exc._message - ): - raise - if i < attempts - 1: - print( - f"Retrying after attempt {i+1} of {self.id()} failed with:\n" - f"{traceback.format_exc()}", - file=sys.stderr, - ) - await self.asyncSetUp() - continue - raise - return None - else: - await self._run_scenario(spec, uri) - return None + # Handle flaky tests. + flaky_tests = [ + ("PYTHON-5170", ".*test_discovery_and_monitoring.*"), + ("PYTHON-5174", ".*Driver_extends_timeout_while_streaming"), + ("PYTHON-5315", ".*TestSrvPolling.test_recover_from_initially_.*"), + ("PYTHON-4987", ".*UnknownTransactionCommitResult_labels_to_connection_errors"), + ("PYTHON-3689", ".*TestProse.test_load_balancing"), + ("PYTHON-3522", ".*csot.*"), + ] + for reason, flaky_test in flaky_tests: + if re.match(flaky_test.lower(), self.id().lower()) is not None: + func_name = self.id() + options = dict(reason=reason, reset_func=self.asyncSetUp, func_name=func_name) + if "csot" in func_name.lower(): + options["max_runs"] = 3 + options["affects_cpython_linux"] = True + decorator = flaky(**options) + await decorator(self._run_scenario)(spec, uri) + return + await self._run_scenario(spec, uri) async def _run_scenario(self, spec, uri=None): # maybe skip test manually diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py index ca80d1f6dd..2b82355fc1 100644 --- a/test/asynchronous/utils.py +++ b/test/asynchronous/utils.py @@ -17,10 +17,14 @@ import asyncio import contextlib +import os import random +import sys import threading # Used in the synchronized version of this file import time +import traceback from asyncio import iscoroutinefunction +from functools import wraps from bson.son import SON from pymongo import AsyncMongoClient @@ -154,6 +158,65 @@ async def async_joinall(tasks): await asyncio.wait([t.task for t in tasks if t is not None], timeout=300) +def flaky( + *, + reason=None, + max_runs=2, + min_passes=1, + delay=1, + affects_cpython_linux=False, + func_name=None, + reset_func=None, +): + """Decorate a test as flaky. + + :param reason: the reason why the test is flaky + :param max_runs: the maximum number of runs before raising an error + :param min_passes: the minimum number of passing runs + :param delay: the delay in seconds between retries + :param affects_cpython_links: whether the test is flaky on CPython on Linux + :param func_name: the name of the function, used for the rety message + :param reset_func: a function to call before retrying + + """ + if reason is None: + raise ValueError("flaky requires a reason input") + is_cpython_linux = sys.platform == "linux" and sys.implementation.name == "cpython" + disable_flaky = "DISABLE_FLAKY" in os.environ + if "CI" not in os.environ and "ENABLE_FLAKY" not in os.environ: + disable_flaky = True + + if disable_flaky or (is_cpython_linux and not affects_cpython_linux): + max_runs = 1 + min_passes = 1 + + def decorator(target_func): + @wraps(target_func) + async def wrapper(*args, **kwargs): + passes = 0 + for i in range(max_runs): + try: + result = await target_func(*args, **kwargs) + passes += 1 + if passes == min_passes: + return result + except Exception as e: + if i == max_runs - 1: + raise e + print( + f"Retrying after attempt {i+1} of {func_name or target_func.__name__} failed with ({reason})):\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + await asyncio.sleep(delay) + if reset_func: + await reset_func() + + return wrapper + + return decorator + + class AsyncMockConnection: def __init__(self): self.cancel_context = _CancellationContext() diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 84313c5be0..1614e9f3cf 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -25,6 +25,7 @@ client_context, unittest, ) +from test.utils import flaky from test.utils_shared import ( OvertCommandListener, ) @@ -615,8 +616,6 @@ def test_15_unacknowledged_write_across_batches(self): # https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites class TestClientBulkWriteCSOT(IntegrationTest): def setUp(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") super().setUp() self.max_write_batch_size = client_context.max_write_batch_size self.max_bson_object_size = client_context.max_bson_size @@ -624,7 +623,10 @@ def setUp(self): @client_context.require_version_min(8, 0, 0, -24) @client_context.require_failCommand_fail_point + @flaky(reason="PYTHON-5290", max_runs=3, affects_cpython_linux=True) def test_timeout_in_multi_batch_bulk_write(self): + if sys.platform != "linux" and "CI" in os.environ: + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows and MacOS") _OVERHEAD = 500 internal_client = self.rs_or_single_client(timeoutMS=None) diff --git a/test/test_csot.py b/test/test_csot.py index ff907cc9c5..981af1ed03 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -23,6 +23,7 @@ from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes +from test.utils import flaky import pymongo from pymongo import _csot @@ -43,9 +44,8 @@ class TestCSOT(IntegrationTest): RUN_ON_LOAD_BALANCER = True + @flaky(reason="PYTHON-3522") def test_timeout_nested(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") coll = self.db.coll self.assertEqual(_csot.get_timeout(), None) self.assertEqual(_csot.get_deadline(), float("inf")) @@ -82,9 +82,8 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_rtt(), 0.0) @client_context.require_change_streams + @flaky(reason="PYTHON-3522") def test_change_stream_can_resume_after_timeouts(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") coll = self.db.test coll.insert_one({}) with coll.watch() as stream: diff --git a/test/test_cursor.py b/test/test_cursor.py index 83f2b79316..74fe429269 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -31,6 +31,7 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest +from test.utils import flaky from test.utils_shared import ( AllowListEventListener, EventListener, @@ -1397,9 +1398,8 @@ def test_to_list_length(self): docs = c.to_list(3) self.assertEqual(len(docs), 2) + @flaky(reason="PYTHON-3522") def test_to_list_csot_applied(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = self.single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey @@ -1440,9 +1440,8 @@ def test_command_cursor_to_list_length(self): self.assertEqual(len(result.to_list(1)), 1) @client_context.require_failCommand_blockConnection + @flaky(reason="PYTHON-3522") def test_command_cursor_to_list_csot_applied(self): - if os.environ.get("SKIP_CSOT_TESTS", ""): - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") client = self.single_client(timeoutMS=500, w=1) coll = client.pymongo.test # Initialize the client with a larger timeout to help make test less flakey diff --git a/test/test_encryption.py b/test/test_encryption.py index baaefa1e73..68b24f1729 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -32,6 +32,7 @@ import warnings from test import IntegrationTest, PyMongoTestCase, client_context from test.test_bulk import BulkTestBase +from test.utils import flaky from test.utils_spec_runner import SpecRunner, SpecTestCreator from threading import Thread from typing import Any, Dict, Mapping, Optional @@ -3229,6 +3230,7 @@ def test_kms_retry(self): class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): @client_context.require_no_standalone @client_context.require_version_min(7, 0, -1) + @flaky(reason="PYTHON-4982") def setUp(self): super().setUp() self.key1_document = json_data("etc", "data", "keys", "key1-document.json") @@ -3471,6 +3473,8 @@ def test_implicit_session_ignored_when_unsupported(self): self.assertNotIn("lsid", self.listener.started_events[1].command) + self.mongocryptd_client.close() + def test_explicit_session_errors_when_unsupported(self): self.listener.reset() with self.mongocryptd_client.start_session() as s: @@ -3483,6 +3487,8 @@ def test_explicit_session_errors_when_unsupported(self): ): self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + self.mongocryptd_client.close() + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 2ac08691cf..a74a3e8030 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -20,7 +20,7 @@ import pprint import sys import threading -from test.utils import set_fail_point +from test.utils import flaky, set_fail_point sys.path[0:0] = [""] @@ -464,6 +464,7 @@ class TestPoolPausedError(IntegrationTest): @client_context.require_failCommand_blockConnection @client_context.require_retryable_writes @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + @flaky(reason="PYTHON-5291") def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index 4aad34050c..fcf2cce0e0 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -21,6 +21,7 @@ from pathlib import Path from test import IntegrationTest, client_context, unittest from test.helpers import ConcurrentRunner +from test.utils import flaky from test.utils_selection_tests import create_topology from test.utils_shared import ( CMAPListener, @@ -137,6 +138,7 @@ def frequencies(self, client, listener, n_finds=10): @client_context.require_failCommand_appName @client_context.require_multiple_mongoses + @flaky(reason="PYTHON-3689") def test_load_balancing(self): listener = OvertCommandListener() cmap_listener = CMAPListener() diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 16b076c1d3..fd5e58da57 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -18,6 +18,7 @@ import asyncio import sys import time +from test.utils import flaky from test.utils_shared import FunctionCallRecorder from typing import Any @@ -254,6 +255,7 @@ def final_callback(): # Nodelist should reflect new valid DNS resolver response. self.assert_nodelist_change(response_final, client) + @flaky(reason="PYTHON-5315") def test_recover_from_initially_empty_seedlist(self): def empty_seedlist(): return [] diff --git a/test/test_topology.py b/test/test_topology.py index 530cecd1f7..837cf25c62 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -23,7 +23,7 @@ from test import client_knobs, unittest from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool +from test.utils import MockPool, flaky from test.utils_shared import wait_until from bson.objectid import ObjectId @@ -750,6 +750,7 @@ def get_primary(): class TestTopologyErrors(TopologyTest): # Errors when calling hello. + @flaky(reason="PYTHON-5366") def test_pool_reset(self): # hello succeeds at first, then always raises socket error. hello_count = [0] diff --git a/test/unified_format.py b/test/unified_format.py index d6a1f866c8..f1e55d87b9 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -38,7 +38,6 @@ from test.unified_format_shared import ( KMS_TLS_OPTS, PLACEHOLDER_MAP, - SKIP_CSOT_TESTS, EventListenerUtil, MatchEvaluatorUtil, coerce_result, @@ -48,7 +47,7 @@ parse_collection_or_database_options, with_metaclass, ) -from test.utils import get_pool +from test.utils import flaky, get_pool from test.utils_shared import ( camel_to_snake, camel_to_snake_args, @@ -518,20 +517,38 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") - if ( - "Error returned from connection pool clear with interruptInUseConnections=true is retryable" - in spec["description"] - and not _IS_SYNC - ): - self.skipTest("PYTHON-5170 tests are flakey") - if "Driver extends timeout while streaming" in spec["description"] and not _IS_SYNC: - self.skipTest("PYTHON-5174 tests are flakey") class_name = self.__class__.__name__.lower() description = spec["description"].lower() if "csot" in class_name: - if "gridfs" in class_name and sys.platform == "win32": - self.skipTest("PYTHON-3522 CSOT GridFS tests are flaky on Windows") + # Skip tests that are too slow to run on a given platform. + slow_macos = [ + "operation fails after two consecutive socket timeouts.*", + "operation succeeds after one socket timeout.*", + "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + ] + slow_win32 = [ + *slow_macos, + "maxTimeMS value in the command is less than timeoutMS", + "timeoutMS applies to whole operation.*", + ] + slow_pypy = [ + "timeoutMS applies to whole operation.*", + ] + if "CI" in os.environ and sys.platform == "win32" and "gridfs" in class_name: + self.skipTest("PYTHON-3522 CSOT GridFS test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "win32": + for pat in slow_win32: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "darwin": + for pat in slow_macos: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on MacOS") + if "CI" in os.environ and sys.implementation.name.lower() == "pypy": + for pat in slow_pypy: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on PyPy") if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: @@ -1340,38 +1357,31 @@ def verify_outcome(self, spec): self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): - if "csot" in self.id().lower() and SKIP_CSOT_TESTS: - raise unittest.SkipTest("SKIP_CSOT_TESTS is set, skipping...") - # Kill all sessions before and after each test to prevent an open # transaction (from a test failure) from blocking collection/database # operations during test set up and tear down. self.kill_all_sessions() - if "csot" in self.id().lower(): - # Retry CSOT tests up to 2 times to deal with flakey tests. - attempts = 3 - for i in range(attempts): - try: - return self._run_scenario(spec, uri) - except (AssertionError, OperationFailure) as exc: - if isinstance(exc, OperationFailure) and ( - _IS_SYNC or "failpoint" not in exc._message - ): - raise - if i < attempts - 1: - print( - f"Retrying after attempt {i+1} of {self.id()} failed with:\n" - f"{traceback.format_exc()}", - file=sys.stderr, - ) - self.setUp() - continue - raise - return None - else: - self._run_scenario(spec, uri) - return None + # Handle flaky tests. + flaky_tests = [ + ("PYTHON-5170", ".*test_discovery_and_monitoring.*"), + ("PYTHON-5174", ".*Driver_extends_timeout_while_streaming"), + ("PYTHON-5315", ".*TestSrvPolling.test_recover_from_initially_.*"), + ("PYTHON-4987", ".*UnknownTransactionCommitResult_labels_to_connection_errors"), + ("PYTHON-3689", ".*TestProse.test_load_balancing"), + ("PYTHON-3522", ".*csot.*"), + ] + for reason, flaky_test in flaky_tests: + if re.match(flaky_test.lower(), self.id().lower()) is not None: + func_name = self.id() + options = dict(reason=reason, reset_func=self.setUp, func_name=func_name) + if "csot" in func_name.lower(): + options["max_runs"] = 3 + options["affects_cpython_linux"] = True + decorator = flaky(**options) + decorator(self._run_scenario)(spec, uri) + return + self._run_scenario(spec, uri) def _run_scenario(self, spec, uri=None): # maybe skip test manually diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index ea0f2f233e..17dd73ec8c 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -91,8 +91,6 @@ from pymongo.server_description import ServerDescription from pymongo.topology_description import TopologyDescription -SKIP_CSOT_TESTS = os.getenv("SKIP_CSOT_TESTS") - JSON_OPTS = json_util.JSONOptions(tz_aware=False) IS_INTERRUPTED = False diff --git a/test/utils.py b/test/utils.py index 25d95d1d3c..3447440927 100644 --- a/test/utils.py +++ b/test/utils.py @@ -17,10 +17,14 @@ import asyncio import contextlib +import os import random +import sys import threading # Used in the synchronized version of this file import time +import traceback from asyncio import iscoroutinefunction +from functools import wraps from bson.son import SON from pymongo import MongoClient @@ -152,6 +156,65 @@ def joinall(tasks): asyncio.wait([t.task for t in tasks if t is not None], timeout=300) +def flaky( + *, + reason=None, + max_runs=2, + min_passes=1, + delay=1, + affects_cpython_linux=False, + func_name=None, + reset_func=None, +): + """Decorate a test as flaky. + + :param reason: the reason why the test is flaky + :param max_runs: the maximum number of runs before raising an error + :param min_passes: the minimum number of passing runs + :param delay: the delay in seconds between retries + :param affects_cpython_links: whether the test is flaky on CPython on Linux + :param func_name: the name of the function, used for the rety message + :param reset_func: a function to call before retrying + + """ + if reason is None: + raise ValueError("flaky requires a reason input") + is_cpython_linux = sys.platform == "linux" and sys.implementation.name == "cpython" + disable_flaky = "DISABLE_FLAKY" in os.environ + if "CI" not in os.environ and "ENABLE_FLAKY" not in os.environ: + disable_flaky = True + + if disable_flaky or (is_cpython_linux and not affects_cpython_linux): + max_runs = 1 + min_passes = 1 + + def decorator(target_func): + @wraps(target_func) + def wrapper(*args, **kwargs): + passes = 0 + for i in range(max_runs): + try: + result = target_func(*args, **kwargs) + passes += 1 + if passes == min_passes: + return result + except Exception as e: + if i == max_runs - 1: + raise e + print( + f"Retrying after attempt {i+1} of {func_name or target_func.__name__} failed with ({reason})):\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + time.sleep(delay) + if reset_func: + reset_func() + + return wrapper + + return decorator + + class MockConnection: def __init__(self): self.cancel_context = _CancellationContext() From 2eb18f18b27318ce1744e04507b42b2b56078c4a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 2 Jul 2025 11:22:12 -0500 Subject: [PATCH 1968/2111] PYTHON-5428 Mark test_connection_close_does_not_block_other_operations as flaky (#2415) --- test/asynchronous/test_discovery_and_monitoring.py | 2 ++ test/test_discovery_and_monitoring.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index 70348c8daf..46799201f1 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -24,6 +24,7 @@ from asyncio import StreamReader, StreamWriter from pathlib import Path from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils import flaky from pymongo.asynchronous.pool import AsyncConnection from pymongo.operations import _Op @@ -378,6 +379,7 @@ async def test_pool_unpause(self): @async_client_context.require_failCommand_appName @async_client_context.require_test_commands @async_client_context.require_async + @flaky(reason="PYTHON-5428") async def test_connection_close_does_not_block_other_operations(self): listener = CMAPHeartbeatListener() client = await self.async_single_client( diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index a0dabaaf8e..83a4adf179 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -24,6 +24,7 @@ from asyncio import StreamReader, StreamWriter from pathlib import Path from test.helpers import ConcurrentRunner +from test.utils import flaky from pymongo.operations import _Op from pymongo.server_selectors import writable_server_selector @@ -378,6 +379,7 @@ def test_pool_unpause(self): @client_context.require_failCommand_appName @client_context.require_test_commands @client_context.require_async + @flaky(reason="PYTHON-5428") def test_connection_close_does_not_block_other_operations(self): listener = CMAPHeartbeatListener() client = self.single_client( From 947fbe33eec1eaffb85a25c87d542f1253f4a946 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Wed, 2 Jul 2025 09:51:50 -0700 Subject: [PATCH 1969/2111] PYTHON-5421 Make parse_uri() return "options" as a dict rather than _CaseInsensitiveDictionary (#2413) --- pymongo/asynchronous/uri_parser.py | 2 + pymongo/synchronous/uri_parser.py | 2 + pymongo/uri_parser_shared.py | 62 +++++++++++++++++++ .../test_discovery_and_monitoring.py | 4 +- test/test_discovery_and_monitoring.py | 4 +- test/test_uri_parser.py | 35 ++++++----- test/test_uri_spec.py | 5 +- 7 files changed, 91 insertions(+), 23 deletions(-) diff --git a/pymongo/asynchronous/uri_parser.py b/pymongo/asynchronous/uri_parser.py index 47c6d72031..11a6a6299c 100644 --- a/pymongo/asynchronous/uri_parser.py +++ b/pymongo/asynchronous/uri_parser.py @@ -29,6 +29,7 @@ SCHEME_LEN, SRV_SCHEME_LEN, _check_options, + _make_options_case_sensitive, _validate_uri, split_hosts, split_options, @@ -113,6 +114,7 @@ async def parse_uri( srv_max_hosts, ) ) + result["options"] = _make_options_case_sensitive(result["options"]) return result diff --git a/pymongo/synchronous/uri_parser.py b/pymongo/synchronous/uri_parser.py index 52b59b8fe8..da0f86d720 100644 --- a/pymongo/synchronous/uri_parser.py +++ b/pymongo/synchronous/uri_parser.py @@ -29,6 +29,7 @@ SCHEME_LEN, SRV_SCHEME_LEN, _check_options, + _make_options_case_sensitive, _validate_uri, split_hosts, split_options, @@ -113,6 +114,7 @@ def parse_uri( srv_max_hosts, ) ) + result["options"] = _make_options_case_sensitive(result["options"]) return result diff --git a/pymongo/uri_parser_shared.py b/pymongo/uri_parser_shared.py index 0cef176bf1..59168d1e9f 100644 --- a/pymongo/uri_parser_shared.py +++ b/pymongo/uri_parser_shared.py @@ -54,6 +54,57 @@ SRV_SCHEME_LEN = len(SRV_SCHEME) DEFAULT_PORT = 27017 +URI_OPTIONS = frozenset( + [ + "appname", + "authMechanism", + "authMechanismProperties", + "authSource", + "compressors", + "connectTimeoutMS", + "directConnection", + "heartbeatFrequencyMS", + "journal", + "loadBalanced", + "localThresholdMS", + "maxIdleTimeMS", + "maxPoolSize", + "maxConnecting", + "maxStalenessSeconds", + "minPoolSize", + "proxyHost", + "proxyPort", + "proxyUsername", + "proxyPassword", + "readConcernLevel", + "readPreference", + "readPreferenceTags", + "replicaSet", + "retryReads", + "retryWrites", + "serverMonitoringMode", + "serverSelectionTimeoutMS", + "serverSelectionTryOnce", + "socketTimeoutMS", + "srvMaxHosts", + "srvServiceName", + "ssl", + "tls", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsCAFile", + "tlsCertificateKeyFile", + "tlsCertificateKeyFilePassword", + "tlsDisableCertificateRevocationCheck", + "tlsDisableOCSPEndpointCheck", + "tlsInsecure", + "w", + "waitQueueTimeoutMS", + "wTimeoutMS", + "zlibCompressionLevel", + ] +) + def _unquoted_percent(s: str) -> bool: """Check for unescaped percent signs. @@ -550,3 +601,14 @@ def _validate_uri( "options": options, "fqdn": fqdn, } + + +def _make_options_case_sensitive(options: _CaseInsensitiveDictionary) -> dict[str, Any]: + case_sensitive = {} + for option in URI_OPTIONS: + if option.lower() in options: + case_sensitive[option] = options[option] + options.pop(option) + for k, v in options.items(): + case_sensitive[k] = v + return case_sensitive diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index 46799201f1..2798afe7df 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -91,8 +91,8 @@ async def create_mock_topology(uri, monitor_class=DummyMonitor): replica_set_name = None direct_connection = None load_balanced = None - if "replicaset" in parsed_uri["options"]: - replica_set_name = parsed_uri["options"]["replicaset"] + if "replicaSet" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaSet"] if "directConnection" in parsed_uri["options"]: direct_connection = parsed_uri["options"]["directConnection"] if "loadBalanced" in parsed_uri["options"]: diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 83a4adf179..4f8ee30d16 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -91,8 +91,8 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): replica_set_name = None direct_connection = None load_balanced = None - if "replicaset" in parsed_uri["options"]: - replica_set_name = parsed_uri["options"]["replicaset"] + if "replicaSet" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaSet"] if "directConnection" in parsed_uri["options"]: direct_connection = parsed_uri["options"]["directConnection"] if "loadBalanced" in parsed_uri["options"]: diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index ec1c6c164c..ed1a53ea26 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -142,9 +142,9 @@ def test_split_options(self): self.assertTrue(split_options("wtimeoutms=500")) self.assertEqual({"fsync": True}, split_options("fsync=true")) self.assertEqual({"fsync": False}, split_options("fsync=false")) - self.assertEqual({"authmechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) + self.assertEqual({"authMechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) self.assertEqual( - {"authmechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") + {"authMechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") ) self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) @@ -290,12 +290,12 @@ def test_parse_uri(self): self.assertEqual(res, parse_uri('mongodb://localhost/test.name/with "delimiters')) res = copy.deepcopy(orig) - res["options"] = {"readpreference": ReadPreference.SECONDARY.mongos_mode} + res["options"] = {"readPreference": ReadPreference.SECONDARY.mongos_mode} self.assertEqual(res, parse_uri("mongodb://localhost/?readPreference=secondary")) # Various authentication tests res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "SCRAM-SHA-256"} + res["options"] = {"authMechanism": "SCRAM-SHA-256"} res["username"] = "user" res["password"] = "password" self.assertEqual( @@ -303,7 +303,7 @@ def test_parse_uri(self): ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "SCRAM-SHA-256", "authsource": "bar"} + res["options"] = {"authMechanism": "SCRAM-SHA-256", "authSource": "bar"} res["username"] = "user" res["password"] = "password" res["database"] = "foo" @@ -315,7 +315,7 @@ def test_parse_uri(self): ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "SCRAM-SHA-256"} + res["options"] = {"authMechanism": "SCRAM-SHA-256"} res["username"] = "user" res["password"] = "" self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=SCRAM-SHA-256")) @@ -327,7 +327,7 @@ def test_parse_uri(self): self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password@localhost/foo")) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "GSSAPI"} + res["options"] = {"authMechanism": "GSSAPI"} res["username"] = "user@domain.com" res["password"] = "password" res["database"] = "foo" @@ -337,7 +337,7 @@ def test_parse_uri(self): ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "GSSAPI"} + res["options"] = {"authMechanism": "GSSAPI"} res["username"] = "user@domain.com" res["password"] = "" res["database"] = "foo" @@ -347,8 +347,8 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["options"] = { - "readpreference": ReadPreference.SECONDARY.mongos_mode, - "readpreferencetags": [ + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ {"dc": "west", "use": "website"}, {"dc": "east", "use": "website"}, ], @@ -368,8 +368,8 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["options"] = { - "readpreference": ReadPreference.SECONDARY.mongos_mode, - "readpreferencetags": [ + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ {"dc": "west", "use": "website"}, {"dc": "east", "use": "website"}, {}, @@ -462,6 +462,7 @@ def test_tlsinsecure_simple(self): "tlsInsecure": True, "tlsDisableOCSPEndpointCheck": True, } + print(parse_uri(uri)["options"]) self.assertEqual(res, parse_uri(uri)["options"]) def test_normalize_options(self): @@ -479,8 +480,8 @@ def test_unquote_during_parsing(self): ) res = parse_uri(uri) options: dict[str, Any] = { - "authmechanism": "MONGODB-AWS", - "authmechanismproperties": {"AWS_SESSION_TOKEN": unquoted_val}, + "authMechanism": "MONGODB-AWS", + "authMechanismProperties": {"AWS_SESSION_TOKEN": unquoted_val}, } self.assertEqual(options, res["options"]) @@ -491,8 +492,8 @@ def test_unquote_during_parsing(self): ) res = parse_uri(uri) options = { - "readpreference": ReadPreference.SECONDARY.mongos_mode, - "readpreferencetags": [ + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ {"dc": "west", unquoted_val: unquoted_val}, {"dc": "east", "use": unquoted_val}, ], @@ -519,7 +520,7 @@ def test_handle_colon(self): ) res = parse_uri(uri) options = { - "authmechanism": "MONGODB-AWS", + "authMechanism": "MONGODB-AWS", "authMechanismProperties": {"AWS_SESSION_TOKEN": token}, } self.assertEqual(options, res["options"]) diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index aeb0be94b5..8f673cff4c 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -27,7 +27,7 @@ from test import unittest from test.helpers import clear_warning_registry -from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate +from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, _CaseInsensitiveDictionary, validate from pymongo.compression_support import _have_snappy from pymongo.synchronous.uri_parser import parse_uri @@ -169,7 +169,8 @@ def run_scenario(self): # Compare URI options. err_msg = "For option %s expected %s but got %s" if test["options"]: - opts = options["options"] + opts = _CaseInsensitiveDictionary() + opts.update(options["options"]) for opt in test["options"]: lopt = opt.lower() optname = INTERNAL_URI_OPTION_NAME_MAP.get(lopt, lopt) From dde8837fb292aeb68df6eccde4eb3bc2047e7be5 Mon Sep 17 00:00:00 2001 From: rishitb-mongodb <160672125+rishitb-mongodb@users.noreply.github.com> Date: Wed, 2 Jul 2025 17:04:40 -0400 Subject: [PATCH 1970/2111] DRIVERS-3105: Update README.md to add mention of SemVer adherence (#2391) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9bee92c568..2acc0fc086 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ a native Python driver for MongoDB, offering both synchronous and asynchronous A [gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.md/) implementation on top of `pymongo`. -PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. +PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. PyMongo follows [semantic versioning](https://semver.org/spec/v2.0.0.html) for its releases. ## Support / Feedback From 1d21d27dda8ec8a7f0e053a7fed5f9fe2b533830 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 3 Jul 2025 12:30:35 -0500 Subject: [PATCH 1971/2111] PYTHON-5430 Use the zizmor action (#2417) --- .github/workflows/zizmor.yml | 13 +------------ README.md | 2 +- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index c6237d2bda..1d58c0d5fb 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -17,16 +17,5 @@ jobs: uses: actions/checkout@v4 with: persist-credentials: false - - name: Setup Rust - uses: actions-rust-lang/setup-rust-toolchain@fb51252c7ba57d633bc668f941da052e410add48 # v1 - - name: Get zizmor - run: cargo install zizmor - name: Run zizmor 🌈 - run: zizmor --format sarif . > results.sarif - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 - with: - sarif_file: results.sarif - category: zizmor + uses: zizmorcore/zizmor-action@1c7106082dbc1753372e3924b7da1b9417011a21 diff --git a/README.md b/README.md index 2acc0fc086..374fc3e4f3 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ a native Python driver for MongoDB, offering both synchronous and asynchronous A [gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.md/) implementation on top of `pymongo`. -PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. PyMongo follows [semantic versioning](https://semver.org/spec/v2.0.0.html) for its releases. +PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. PyMongo follows [semantic versioning](https://semver.org/spec/v2.0.0.html) for its releases. ## Support / Feedback From c788c7e0c1694ee6d7eb7a4a7d5e3ed5f07a5e8b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 3 Jul 2025 13:36:22 -0500 Subject: [PATCH 1972/2111] PYTHON-5431 Include assume role creds in backport task (#2418) --- .evergreen/generated_configs/tasks.yml | 4 ++++ .evergreen/scripts/generate_config.py | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index e91d2c119b..79594da774 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -128,6 +128,10 @@ tasks: - mongo-python-driver - ${github.amrom.workers.devmit} working_dir: src + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN type: test # Coverage report tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 809792679b..1bd54b80ae 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -896,7 +896,8 @@ def create_backport_pr_tasks(): "mongo-python-driver", "${github.amrom.workers.devmit}", ] - cmd = get_subprocess_exec(args=args) + include_expansions = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + cmd = get_subprocess_exec(args=args, include_expansions_in_env=include_expansions) assume_func = FunctionCall(func="assume ec2 role") return [EvgTask(name=name, commands=[assume_func, cmd], allowed_requesters=["commit"])] From d6ab555b81f972de34856c6787f8f807ba84a378 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 07:17:35 -0500 Subject: [PATCH 1973/2111] Bump the actions group with 2 updates (#2422) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/zizmor.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 4ae38d6a20..d1934183df 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 1d58c0d5fb..6d9506776a 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@1c7106082dbc1753372e3924b7da1b9417011a21 + uses: zizmorcore/zizmor-action@0f0557ab4a0b31211d42435e42df31cbd63fdd59 From 11d348802681f5834fda2598acaffc862a46e8ff Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 7 Jul 2025 09:59:48 -0400 Subject: [PATCH 1974/2111] PYTHON-5415 - Unskip tests that rely on server hostname (#2398) --- .evergreen/scripts/setup-system.sh | 10 ++++++++++ test/asynchronous/test_ssl.py | 10 ++++++++++ test/test_ssl.py | 10 ++++++++++ 3 files changed, 30 insertions(+) diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh index d8552e0ad2..9158414cce 100755 --- a/.evergreen/scripts/setup-system.sh +++ b/.evergreen/scripts/setup-system.sh @@ -38,4 +38,14 @@ if [ "$(uname -s)" = "Darwin" ]; then fi fi +if [ -w /etc/hosts ]; then + SUDO="" +else + SUDO="sudo" +fi + +# Add 'server' and 'hostname_not_in_cert' as a hostnames +echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts +echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts + echo "Setting up system... done." diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py index a05bc9379d..0ce3e8bbac 100644 --- a/test/asynchronous/test_ssl.py +++ b/test/asynchronous/test_ssl.py @@ -304,8 +304,13 @@ async def test_cert_ssl_uri_support(self): client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) await self.assertClientWorks(client) + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) @async_client_context.require_tlsCertificateKeyFile @async_client_context.require_server_resolvable + @async_client_context.require_no_api_version @ignore_deprecations async def test_cert_ssl_validation_hostname_matching(self): # Expects the server to be running with server.pem and ca.pem @@ -430,8 +435,13 @@ async def test_tlsCRLFile_support(self): self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] ) + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) @async_client_context.require_tlsCertificateKeyFile @async_client_context.require_server_resolvable + @async_client_context.require_no_api_version @ignore_deprecations async def test_validation_with_system_ca_certs(self): # Expects the server to be running with server.pem and ca.pem. diff --git a/test/test_ssl.py b/test/test_ssl.py index 3ac0a4555a..b1e9a65eb5 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -304,8 +304,13 @@ def test_cert_ssl_uri_support(self): client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) self.assertClientWorks(client) + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable + @client_context.require_no_api_version @ignore_deprecations def test_cert_ssl_validation_hostname_matching(self): # Expects the server to be running with server.pem and ca.pem @@ -430,8 +435,13 @@ def test_tlsCRLFile_support(self): self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] ) + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable + @client_context.require_no_api_version @ignore_deprecations def test_validation_with_system_ca_certs(self): # Expects the server to be running with server.pem and ca.pem. From c77c15e369ddb9d9c63abb0adb9ac7d3f8ab11ef Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 7 Jul 2025 14:00:11 -0700 Subject: [PATCH 1975/2111] PYTHON-5421 continued - update changelog, update docstring, and add testing (#2420) --- doc/changelog.rst | 1 + pymongo/asynchronous/uri_parser.py | 3 +++ pymongo/synchronous/uri_parser.py | 3 +++ test/test_uri_parser.py | 4 ++++ 4 files changed, 11 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 2fd1bdd6b9..933e2922db 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -13,6 +13,7 @@ PyMongo 4.14 brings a number of changes including: - Introduces a minor breaking change. When encoding :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised if the 'padding' metadata field is < 0 or > 7, or non-zero for any type other than PACKED_BIT. +- Changed :meth:`~pymongo.uri_parser.parse_uri`'s ``options`` parameter to be type ``dict`` instead of ``_CaseInsensitiveDictionary``. Changes in Version 4.13.2 (2025/06/17) -------------------------------------- diff --git a/pymongo/asynchronous/uri_parser.py b/pymongo/asynchronous/uri_parser.py index 11a6a6299c..055b04d75a 100644 --- a/pymongo/asynchronous/uri_parser.py +++ b/pymongo/asynchronous/uri_parser.py @@ -80,6 +80,9 @@ async def parse_uri( wait for a response from the DNS server. :param srv_service_name: A custom SRV service name + .. versionchanged:: 4.14 + ``options`` is now type ``dict`` as opposed to a ``_CaseInsensitiveDictionary``. + .. versionchanged:: 4.6 The delimiting slash (``/``) between hosts and connection options is now optional. For example, "mongodb://example.com?tls=true" is now a valid URI. diff --git a/pymongo/synchronous/uri_parser.py b/pymongo/synchronous/uri_parser.py index da0f86d720..45c1752953 100644 --- a/pymongo/synchronous/uri_parser.py +++ b/pymongo/synchronous/uri_parser.py @@ -80,6 +80,9 @@ def parse_uri( wait for a response from the DNS server. :param srv_service_name: A custom SRV service name + .. versionchanged:: 4.14 + ``options`` is now type ``dict`` as opposed to a ``_CaseInsensitiveDictionary``. + .. versionchanged:: 4.6 The delimiting slash (``/``) between hosts and connection options is now optional. For example, "mongodb://example.com?tls=true" is now a valid URI. diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index ed1a53ea26..502faf82b0 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -555,6 +555,10 @@ def test_port_with_whitespace(self): with self.assertRaisesRegex(ValueError, r"Port contains whitespace character: '\\n'"): parse_uri("mongodb://localhost:27\n017") + def test_parse_uri_options_type(self): + opts = parse_uri("mongodb://localhost:27017")["options"] + self.assertIsInstance(opts, dict) + if __name__ == "__main__": unittest.main() From 04f2cc0fa9c073ce400251109e15326f70e95c69 Mon Sep 17 00:00:00 2001 From: Kevin Albertson Date: Wed, 9 Jul 2025 11:56:42 -0400 Subject: [PATCH 1976/2111] PYTHON-5373 test client auth on cloud-dev (#2423) --- .evergreen/generated_configs/variants.yml | 2 ++ .evergreen/scripts/generate_config.py | 1 + .evergreen/scripts/setup_tests.py | 13 ++++++++++++- .gitignore | 1 + test/atlas/test_connection.py | 4 ++++ 5 files changed, 20 insertions(+), 1 deletion(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 939d7bbdef..e98b0d342c 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -75,6 +75,8 @@ buildvariants: display_name: Atlas connect RHEL8 run_on: - rhel87-small + expansions: + TEST_NAME: atlas_connect tags: [pr] # Atlas data lake tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 1bd54b80ae..a30beb1d39 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -425,6 +425,7 @@ def create_atlas_connect_variants(): get_variant_name("Atlas connect", host), tags=["pr"], host=DEFAULT_HOST, + expansions=dict(TEST_NAME="atlas_connect"), ) ] diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index a4ff77ab71..9f383d9425 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -417,7 +417,18 @@ def handle_test_env() -> None: run_command(f"bash {auth_aws_dir}/setup-secrets.sh") if test_name == "atlas_connect": - get_secrets("drivers/atlas_connect") + secrets = get_secrets("drivers/atlas_connect") + + # Write file with Atlas X509 client certificate: + decoded = base64.b64decode(secrets["ATLAS_X509_DEV_CERT_BASE64"]).decode("utf8") + cert_file = ROOT / ".evergreen/atlas_x509_dev_client_certificate.pem" + with cert_file.open("w") as file: + file.write(decoded) + write_env( + "ATLAS_X509_DEV_WITH_CERT", + secrets["ATLAS_X509_DEV"] + "&tlsCertificateKeyFile=" + str(cert_file), + ) + # We do not want the default client_context to be initialized. write_env("DISABLE_CONTEXT") diff --git a/.gitignore b/.gitignore index a88a7556e2..74ed0bbb70 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ expansion.yml .evergreen/scripts/test-env.sh specifications/ results.json +.evergreen/atlas_x509_dev_client_certificate.pem # Lambda temp files test/lambda/.aws-sam diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index 0961f1084f..ac217ab40d 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -42,6 +42,7 @@ "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), + "ATLAS_X509_DEV_WITH_CERT": os.environ.get("ATLAS_X509_DEV_WITH_CERT"), } @@ -91,6 +92,9 @@ def test_srv_tls_11(self): def test_srv_tls_12(self): self.connect_srv(URIS["ATLAS_SRV_TLS12"]) + def test_x509_with_cert(self): + self.connect(URIS["ATLAS_X509_DEV_WITH_CERT"]) + def test_uniqueness(self): """Ensure that we don't accidentally duplicate the test URIs.""" uri_to_names = defaultdict(list) From f29c7b1f154e9f5e33d45f034946fe675c30dbfc Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 10 Jul 2025 06:32:43 -0500 Subject: [PATCH 1977/2111] PYTHON-5315 Mark test_recover_from_initially_erroring_seedlist as flaky (#2424) --- test/asynchronous/test_cursor.py | 4 ++-- test/asynchronous/test_retryable_reads.py | 2 +- test/asynchronous/test_srv_polling.py | 1 + test/test_cursor.py | 4 ++-- test/test_retryable_reads.py | 2 +- test/test_srv_polling.py | 1 + 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 984545438e..8d2dbf532e 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -1411,7 +1411,7 @@ async def test_to_list_length(self): async def test_to_list_csot_applied(self): client = await self.async_single_client(timeoutMS=500, w=1) coll = client.pymongo.test - # Initialize the client with a larger timeout to help make test less flakey + # Initialize the client with a larger timeout to help make test less flaky with pymongo.timeout(10): await coll.insert_many([{} for _ in range(5)]) cursor = coll.find({"$where": delay(1)}) @@ -1453,7 +1453,7 @@ async def test_command_cursor_to_list_length(self): async def test_command_cursor_to_list_csot_applied(self): client = await self.async_single_client(timeoutMS=500, w=1) coll = client.pymongo.test - # Initialize the client with a larger timeout to help make test less flakey + # Initialize the client with a larger timeout to help make test less flaky with pymongo.timeout(10): await coll.insert_many([{} for _ in range(5)]) fail_command = { diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py index a563e3974e..4ac694b58e 100644 --- a/test/asynchronous/test_retryable_reads.py +++ b/test/asynchronous/test_retryable_reads.py @@ -87,7 +87,7 @@ class TestPoolPausedError(AsyncIntegrationTest): async def test_pool_paused_error_is_retryable(self): if "PyPy" in sys.version: # Tracked in PYTHON-3519 - self.skipTest("Test is flakey on PyPy") + self.skipTest("Test is flaky on PyPy") cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = await self.async_rs_or_single_client( diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index 05c4699653..18a367a498 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -262,6 +262,7 @@ def empty_seedlist(): await self._test_recover_from_initial(empty_seedlist) + @flaky(reason="PYTHON-5315") async def test_recover_from_initially_erroring_seedlist(self): def erroring_seedlist(): raise ConfigurationError diff --git a/test/test_cursor.py b/test/test_cursor.py index 74fe429269..4902d9e4df 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -1402,7 +1402,7 @@ def test_to_list_length(self): def test_to_list_csot_applied(self): client = self.single_client(timeoutMS=500, w=1) coll = client.pymongo.test - # Initialize the client with a larger timeout to help make test less flakey + # Initialize the client with a larger timeout to help make test less flaky with pymongo.timeout(10): coll.insert_many([{} for _ in range(5)]) cursor = coll.find({"$where": delay(1)}) @@ -1444,7 +1444,7 @@ def test_command_cursor_to_list_length(self): def test_command_cursor_to_list_csot_applied(self): client = self.single_client(timeoutMS=500, w=1) coll = client.pymongo.test - # Initialize the client with a larger timeout to help make test less flakey + # Initialize the client with a larger timeout to help make test less flaky with pymongo.timeout(10): coll.insert_many([{} for _ in range(5)]) fail_command = { diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 5b87943fcc..cfd85b1ac5 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -87,7 +87,7 @@ class TestPoolPausedError(IntegrationTest): def test_pool_paused_error_is_retryable(self): if "PyPy" in sys.version: # Tracked in PYTHON-3519 - self.skipTest("Test is flakey on PyPy") + self.skipTest("Test is flaky on PyPy") cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() client = self.rs_or_single_client( diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index fd5e58da57..87ab418302 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -262,6 +262,7 @@ def empty_seedlist(): self._test_recover_from_initial(empty_seedlist) + @flaky(reason="PYTHON-5315") def test_recover_from_initially_erroring_seedlist(self): def erroring_seedlist(): raise ConfigurationError From 7b82b3582ff652c56954e73fcc1ea707e964989d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Jul 2025 10:03:33 -0500 Subject: [PATCH 1978/2111] PYTHON-5440 Use dochub link for index-wildcard (#2427) --- pymongo/operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/operations.py b/pymongo/operations.py index 300f1ba123..73fb8b5f36 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -781,7 +781,7 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: Added the ``partialFilterExpression`` option to support partial indexes. - .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ """ keys = _index_list(keys) if kwargs.get("name") is None: From e07a6b7e77ab92281d193fe27aafe4c4cd2fb8b1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Jul 2025 10:24:09 -0500 Subject: [PATCH 1979/2111] PYTHON-5439 Remove dead link in PyMongo 4 migration guide (#2428) --- doc/migrate-to-pymongo4.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 3e992a8249..68dc1980b9 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -837,7 +837,6 @@ pymongo.GEOHAYSTACK is removed Removed :attr:`pymongo.GEOHAYSTACK`. Replace with "geoHaystack" or create a 2d index and use $geoNear or $geoWithin instead. -See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. UUIDLegacy is removed --------------------- From 5ce53dc175789357b94a236e23b8753a12a82ad9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Jul 2025 10:47:38 -0500 Subject: [PATCH 1980/2111] PYTHON-5374 Assert unset BulkWriteException.partialResult in CRUD prose tests (#2425) Co-authored-by: Noah Stapp --- test/asynchronous/test_client_bulk_write.py | 36 +++++++++++-------- .../test_read_write_concern_spec.py | 4 +++ test/test_client_bulk_write.py | 36 +++++++++++-------- test/test_read_write_concern_spec.py | 4 +++ 4 files changed, 50 insertions(+), 30 deletions(-) diff --git a/test/asynchronous/test_client_bulk_write.py b/test/asynchronous/test_client_bulk_write.py index cf863979b5..49f969fa34 100644 --- a/test/asynchronous/test_client_bulk_write.py +++ b/test/asynchronous/test_client_bulk_write.py @@ -84,6 +84,7 @@ async def test_formats_write_error_correctly(self): # https://github.com/mongodb/specifications/tree/master/source/crud/tests +# Note: tests 1 and 2 are in test_read_write_concern_spec.py class TestClientBulkWriteCRUD(AsyncIntegrationTest): async def asyncSetUp(self): await super().asyncSetUp() @@ -92,7 +93,7 @@ async def asyncSetUp(self): self.max_message_size_bytes = await async_client_context.max_message_size_bytes @async_client_context.require_version_min(8, 0, 0, -24) - async def test_batch_splits_if_num_operations_too_large(self): + async def test_3_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -116,7 +117,7 @@ async def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @async_client_context.require_version_min(8, 0, 0, -24) - async def test_batch_splits_if_ops_payload_too_large(self): + async def test_4_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -148,7 +149,7 @@ async def test_batch_splits_if_ops_payload_too_large(self): @async_client_context.require_version_min(8, 0, 0, -24) @async_client_context.require_failCommand_fail_point - async def test_collects_write_concern_errors_across_batches(self): + async def test_5_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client( event_listeners=[listener], @@ -189,7 +190,7 @@ async def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(bulk_write_events), 2) @async_client_context.require_version_min(8, 0, 0, -24) - async def test_collects_write_errors_across_batches_unordered(self): + async def test_6_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -218,7 +219,7 @@ async def test_collects_write_errors_across_batches_unordered(self): self.assertEqual(len(bulk_write_events), 2) @async_client_context.require_version_min(8, 0, 0, -24) - async def test_collects_write_errors_across_batches_ordered(self): + async def test_6_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -247,7 +248,7 @@ async def test_collects_write_errors_across_batches_ordered(self): self.assertEqual(len(bulk_write_events), 1) @async_client_context.require_version_min(8, 0, 0, -24) - async def test_handles_cursor_requiring_getMore(self): + async def test_7_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -287,7 +288,7 @@ async def test_handles_cursor_requiring_getMore(self): @async_client_context.require_version_min(8, 0, 0, -24) @async_client_context.require_no_standalone - async def test_handles_cursor_requiring_getMore_within_transaction(self): + async def test_8_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -329,7 +330,7 @@ async def test_handles_cursor_requiring_getMore_within_transaction(self): @async_client_context.require_version_min(8, 0, 0, -24) @async_client_context.require_failCommand_fail_point - async def test_handles_getMore_error(self): + async def test_9_handles_getMore_error(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -382,7 +383,7 @@ async def test_handles_getMore_error(self): self.assertTrue(kill_cursors_event) @async_client_context.require_version_min(8, 0, 0, -24) - async def test_returns_error_if_unacknowledged_too_large_insert(self): + async def test_10_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -441,7 +442,7 @@ async def _setup_namespace_test_models(self): return num_models, models @async_client_context.require_version_min(8, 0, 0, -24) - async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): + async def test_11_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -471,7 +472,7 @@ async def test_no_batch_splits_if_new_namespace_is_not_too_large(self): self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") @async_client_context.require_version_min(8, 0, 0, -24) - async def test_batch_splits_if_new_namespace_is_too_large(self): + async def test_11_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client(event_listeners=[listener]) @@ -508,25 +509,27 @@ async def test_batch_splits_if_new_namespace_is_too_large(self): self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) @async_client_context.require_version_min(8, 0, 0, -24) - async def test_returns_error_if_no_writes_can_be_added_to_ops(self): + async def test_12_returns_error_if_no_writes_can_be_added_to_ops(self): client = await self.async_rs_or_single_client() # Document too large. b_repeated = "b" * self.max_message_size_bytes models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] - with self.assertRaises(DocumentTooLarge): + with self.assertRaises(DocumentTooLarge) as context: await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) # Namespace too large. c_repeated = "c" * self.max_message_size_bytes namespace = f"db.{c_repeated}" models = [InsertOne(namespace=namespace, document={"a": "b"})] - with self.assertRaises(DocumentTooLarge): + with self.assertRaises(DocumentTooLarge) as context: await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) @async_client_context.require_version_min(8, 0, 0, -24) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - async def test_returns_error_if_auto_encryption_configured(self): + async def test_13_returns_error_if_auto_encryption_configured(self): opts = AutoEncryptionOpts( key_vault_namespace="db.coll", kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, @@ -536,6 +539,7 @@ async def test_returns_error_if_auto_encryption_configured(self): models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) self.assertIn( "bulk_write does not currently support automatic encryption", context.exception._message ) @@ -579,6 +583,8 @@ async def test_upserted_result(self): self.assertEqual(result.update_results[1].did_upsert, True) self.assertEqual(result.update_results[2].did_upsert, False) + # Note: test 14 is optional and intentionally not implemented because we provide multiple APIs to specify explain. + @async_client_context.require_version_min(8, 0, 0, -24) async def test_15_unacknowledged_write_across_batches(self): listener = OvertCommandListener() diff --git a/test/asynchronous/test_read_write_concern_spec.py b/test/asynchronous/test_read_write_concern_spec.py index 86f79fd28d..b5cb32932f 100644 --- a/test/asynchronous/test_read_write_concern_spec.py +++ b/test/asynchronous/test_read_write_concern_spec.py @@ -180,6 +180,8 @@ async def test_raise_wtimeout(self): WriteConcern(w=async_client_context.w, wtimeout=1), WTimeoutError ) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 1 (included here instead of test_client_bulk_write.py) @async_client_context.require_failCommand_fail_point async def test_error_includes_errInfo(self): expected_wce = { @@ -214,6 +216,8 @@ async def test_error_includes_errInfo(self): } self.assertEqual(ctx.exception.details, expected_details) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 2 (included here instead of test_client_bulk_write.py) @async_client_context.require_version_min(4, 9) async def test_write_error_details_exposes_errinfo(self): listener = OvertCommandListener() diff --git a/test/test_client_bulk_write.py b/test/test_client_bulk_write.py index 1614e9f3cf..0cb6845099 100644 --- a/test/test_client_bulk_write.py +++ b/test/test_client_bulk_write.py @@ -84,6 +84,7 @@ def test_formats_write_error_correctly(self): # https://github.com/mongodb/specifications/tree/master/source/crud/tests +# Note: tests 1 and 2 are in test_read_write_concern_spec.py class TestClientBulkWriteCRUD(IntegrationTest): def setUp(self): super().setUp() @@ -92,7 +93,7 @@ def setUp(self): self.max_message_size_bytes = client_context.max_message_size_bytes @client_context.require_version_min(8, 0, 0, -24) - def test_batch_splits_if_num_operations_too_large(self): + def test_3_batch_splits_if_num_operations_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -116,7 +117,7 @@ def test_batch_splits_if_num_operations_too_large(self): self.assertEqual(first_event.operation_id, second_event.operation_id) @client_context.require_version_min(8, 0, 0, -24) - def test_batch_splits_if_ops_payload_too_large(self): + def test_4_batch_splits_if_ops_payload_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -148,7 +149,7 @@ def test_batch_splits_if_ops_payload_too_large(self): @client_context.require_version_min(8, 0, 0, -24) @client_context.require_failCommand_fail_point - def test_collects_write_concern_errors_across_batches(self): + def test_5_collects_write_concern_errors_across_batches(self): listener = OvertCommandListener() client = self.rs_or_single_client( event_listeners=[listener], @@ -189,7 +190,7 @@ def test_collects_write_concern_errors_across_batches(self): self.assertEqual(len(bulk_write_events), 2) @client_context.require_version_min(8, 0, 0, -24) - def test_collects_write_errors_across_batches_unordered(self): + def test_6_collects_write_errors_across_batches_unordered(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -218,7 +219,7 @@ def test_collects_write_errors_across_batches_unordered(self): self.assertEqual(len(bulk_write_events), 2) @client_context.require_version_min(8, 0, 0, -24) - def test_collects_write_errors_across_batches_ordered(self): + def test_6_collects_write_errors_across_batches_ordered(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -247,7 +248,7 @@ def test_collects_write_errors_across_batches_ordered(self): self.assertEqual(len(bulk_write_events), 1) @client_context.require_version_min(8, 0, 0, -24) - def test_handles_cursor_requiring_getMore(self): + def test_7_handles_cursor_requiring_getMore(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -287,7 +288,7 @@ def test_handles_cursor_requiring_getMore(self): @client_context.require_version_min(8, 0, 0, -24) @client_context.require_no_standalone - def test_handles_cursor_requiring_getMore_within_transaction(self): + def test_8_handles_cursor_requiring_getMore_within_transaction(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -329,7 +330,7 @@ def test_handles_cursor_requiring_getMore_within_transaction(self): @client_context.require_version_min(8, 0, 0, -24) @client_context.require_failCommand_fail_point - def test_handles_getMore_error(self): + def test_9_handles_getMore_error(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -382,7 +383,7 @@ def test_handles_getMore_error(self): self.assertTrue(kill_cursors_event) @client_context.require_version_min(8, 0, 0, -24) - def test_returns_error_if_unacknowledged_too_large_insert(self): + def test_10_returns_error_if_unacknowledged_too_large_insert(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -437,7 +438,7 @@ def _setup_namespace_test_models(self): return num_models, models @client_context.require_version_min(8, 0, 0, -24) - def test_no_batch_splits_if_new_namespace_is_not_too_large(self): + def test_11_no_batch_splits_if_new_namespace_is_not_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -467,7 +468,7 @@ def test_no_batch_splits_if_new_namespace_is_not_too_large(self): self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") @client_context.require_version_min(8, 0, 0, -24) - def test_batch_splits_if_new_namespace_is_too_large(self): + def test_11_batch_splits_if_new_namespace_is_too_large(self): listener = OvertCommandListener() client = self.rs_or_single_client(event_listeners=[listener]) @@ -504,25 +505,27 @@ def test_batch_splits_if_new_namespace_is_too_large(self): self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) @client_context.require_version_min(8, 0, 0, -24) - def test_returns_error_if_no_writes_can_be_added_to_ops(self): + def test_12_returns_error_if_no_writes_can_be_added_to_ops(self): client = self.rs_or_single_client() # Document too large. b_repeated = "b" * self.max_message_size_bytes models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] - with self.assertRaises(DocumentTooLarge): + with self.assertRaises(DocumentTooLarge) as context: client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) # Namespace too large. c_repeated = "c" * self.max_message_size_bytes namespace = f"db.{c_repeated}" models = [InsertOne(namespace=namespace, document={"a": "b"})] - with self.assertRaises(DocumentTooLarge): + with self.assertRaises(DocumentTooLarge) as context: client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) @client_context.require_version_min(8, 0, 0, -24) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def test_returns_error_if_auto_encryption_configured(self): + def test_13_returns_error_if_auto_encryption_configured(self): opts = AutoEncryptionOpts( key_vault_namespace="db.coll", kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, @@ -532,6 +535,7 @@ def test_returns_error_if_auto_encryption_configured(self): models = [InsertOne(namespace="db.coll", document={"a": "b"})] with self.assertRaises(InvalidOperation) as context: client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) self.assertIn( "bulk_write does not currently support automatic encryption", context.exception._message ) @@ -575,6 +579,8 @@ def test_upserted_result(self): self.assertEqual(result.update_results[1].did_upsert, True) self.assertEqual(result.update_results[2].did_upsert, False) + # Note: test 14 is optional and intentionally not implemented because we provide multiple APIs to specify explain. + @client_context.require_version_min(8, 0, 0, -24) def test_15_unacknowledged_write_across_batches(self): listener = OvertCommandListener() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 383dc70902..4b816b7af9 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -178,6 +178,8 @@ def test_raise_wtimeout(self): self.disable_replication(client_context.client) self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 1 (included here instead of test_client_bulk_write.py) @client_context.require_failCommand_fail_point def test_error_includes_errInfo(self): expected_wce = { @@ -212,6 +214,8 @@ def test_error_includes_errInfo(self): } self.assertEqual(ctx.exception.details, expected_details) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 2 (included here instead of test_client_bulk_write.py) @client_context.require_version_min(4, 9) def test_write_error_details_exposes_errinfo(self): listener = OvertCommandListener() From 84db915d91437a51a19de478b0fd94ded01e7301 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 14 Jul 2025 12:27:22 -0500 Subject: [PATCH 1981/2111] PYTHON-5361 Fix timeouts in CSE custom endpoint test (#2426) --- test/asynchronous/test_encryption.py | 21 ++++++++------------- test/test_encryption.py | 21 ++++++++------------- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index c2ef7c7e33..f6afa4b2a3 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -1306,7 +1306,7 @@ async def asyncSetUp(self): kms_providers_invalid = copy.deepcopy(kms_providers) kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" - kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.invalid:5698" self.client_encryption_invalid = self.create_client_encryption( kms_providers=kms_providers_invalid, key_vault_namespace="keyvault.datakeys", @@ -1364,15 +1364,10 @@ async def test_03_aws_region_key_endpoint_port(self): }, ) - @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - async def test_04_aws_endpoint_invalid_port(self): - master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:12345", - } - with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345"): - await self.client_encryption.create_data_key("aws", master_key=master_key) + async def test_04_kmip_endpoint_invalid_port(self): + master_key = {"keyId": "1", "endpoint": "localhost:12345"} + with self.assertRaisesRegex(EncryptionError, "localhost:12345"): + await self.client_encryption.create_data_key("kmip", master_key=master_key) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") async def test_05_aws_endpoint_wrong_region(self): @@ -1478,7 +1473,7 @@ async def test_11_kmip_master_key_endpoint(self): self.assertEqual("test", await self.client_encryption_invalid.decrypt(encrypted)) async def test_12_kmip_master_key_invalid_endpoint(self): - key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} + key = {"keyId": "1", "endpoint": "doesnotexist.invalid:5698"} with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): await self.client_encryption.create_data_key("kmip", key) @@ -2166,7 +2161,7 @@ async def test_01_aws(self): await self.client_encryption_invalid_hostname.create_data_key("aws", key) async def test_02_azure(self): - key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): await self.client_encryption_no_client_cert.create_data_key("azure", key) @@ -2241,7 +2236,7 @@ async def test_06_named_kms_providers_apply_tls_options_aws(self): await self.client_encryption_with_names.create_data_key("aws:with_tls", key) async def test_06_named_kms_providers_apply_tls_options_azure(self): - key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): await self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) diff --git a/test/test_encryption.py b/test/test_encryption.py index 68b24f1729..5c8813203d 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1302,7 +1302,7 @@ def setUp(self): kms_providers_invalid = copy.deepcopy(kms_providers) kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" - kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.invalid:5698" self.client_encryption_invalid = self.create_client_encryption( kms_providers=kms_providers_invalid, key_vault_namespace="keyvault.datakeys", @@ -1358,15 +1358,10 @@ def test_03_aws_region_key_endpoint_port(self): }, ) - @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - def test_04_aws_endpoint_invalid_port(self): - master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:12345", - } - with self.assertRaisesRegex(EncryptionError, "kms.us-east-1.amazonaws.com:12345"): - self.client_encryption.create_data_key("aws", master_key=master_key) + def test_04_kmip_endpoint_invalid_port(self): + master_key = {"keyId": "1", "endpoint": "localhost:12345"} + with self.assertRaisesRegex(EncryptionError, "localhost:12345"): + self.client_encryption.create_data_key("kmip", master_key=master_key) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): @@ -1472,7 +1467,7 @@ def test_11_kmip_master_key_endpoint(self): self.assertEqual("test", self.client_encryption_invalid.decrypt(encrypted)) def test_12_kmip_master_key_invalid_endpoint(self): - key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} + key = {"keyId": "1", "endpoint": "doesnotexist.invalid:5698"} with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): self.client_encryption.create_data_key("kmip", key) @@ -2158,7 +2153,7 @@ def test_01_aws(self): self.client_encryption_invalid_hostname.create_data_key("aws", key) def test_02_azure(self): - key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): self.client_encryption_no_client_cert.create_data_key("azure", key) @@ -2233,7 +2228,7 @@ def test_06_named_kms_providers_apply_tls_options_aws(self): self.client_encryption_with_names.create_data_key("aws:with_tls", key) def test_06_named_kms_providers_apply_tls_options_azure(self): - key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) From ca3cbc3f31d46236317a82e29229d55a02a9c307 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 15 Jul 2025 08:34:47 -0700 Subject: [PATCH 1982/2111] PYTHON-5253 Automated Spec Test Sync (#2409) Co-authored-by: Noah Stapp --- .evergreen/config.yml | 20 ++++ .evergreen/remove-unimplemented-tests.sh | 45 +++++++++ .evergreen/resync-specs.sh | 13 +-- .evergreen/scripts/create-spec-pr.sh | 50 ++++++++++ .evergreen/scripts/resync-all-specs.py | 119 +++++++++++++++++++++++ .evergreen/scripts/resync-all-specs.sh | 42 ++++++++ .evergreen/spec-patch/PYTHON-4884.patch | 12 +++ .evergreen/spec-patch/PYTHON-4918.patch | 24 +++++ .evergreen/spec-patch/PYTHON-4931.patch | 93 ++++++++++++++++++ .evergreen/spec-patch/PYTHON-5237.patch | 48 +++++++++ CONTRIBUTING.md | 33 +++++++ 11 files changed, 493 insertions(+), 6 deletions(-) create mode 100755 .evergreen/remove-unimplemented-tests.sh create mode 100755 .evergreen/scripts/create-spec-pr.sh create mode 100644 .evergreen/scripts/resync-all-specs.py create mode 100755 .evergreen/scripts/resync-all-specs.sh create mode 100644 .evergreen/spec-patch/PYTHON-4884.patch create mode 100644 .evergreen/spec-patch/PYTHON-4918.patch create mode 100644 .evergreen/spec-patch/PYTHON-4931.patch create mode 100644 .evergreen/spec-patch/PYTHON-5237.patch diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 46c86103ad..91fa442775 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -42,3 +42,23 @@ post: - func: "upload mo artifacts" - func: "upload test results" - func: "cleanup" + +tasks: + - name: resync_specs + commands: + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] + args: + - .evergreen/scripts/resync-all-specs.sh + working_dir: src + +buildvariants: + - name: resync_specs + display_name: "Resync Specs" + run_on: rhel80-small + cron: '0 16 * * MON' + patchable: true + tasks: + - name: resync_specs diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh new file mode 100755 index 0000000000..e31011f615 --- /dev/null +++ b/.evergreen/remove-unimplemented-tests.sh @@ -0,0 +1,45 @@ +#!/bin/bash +PYMONGO=$(dirname "$(cd "$(dirname "$0")" || exit; pwd)") + +rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 +rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 +rm $PYMONGO/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json # PYTHON-5143 +rm $PYMONGO/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json # PYTHON-5143 +rm $PYMONGO/test/client-side-encryption/spec/unified/localSchema.json # PYTHON-5143 +rm $PYMONGO/test/client-side-encryption/spec/unified/maxWireVersion.json # PYTHON-5143 +rm $PYMONGO/test/unified-test-format/valid-pass/poc-queryable-encryption.json # PYTHON-5143 +rm $PYMONGO/test/gridfs/rename.json # PYTHON-4931 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-application-error.json # PYTHON-4918 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-checkout-error.json # PYTHON-4918 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-min-pool-size-error.json # PYTHON-4918 + +# Python doesn't implement DRIVERS-3064 +rm $PYMONGO/test/collection_management/listCollections-rawdata.json +rm $PYMONGO/test/crud/unified/aggregate-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-deleteMany-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-deleteOne-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-updateMany-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-updateOne-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-delete-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-update-rawdata.json +rm $PYMONGO/test/crud/unified/count-rawdata.json +rm $PYMONGO/test/crud/unified/countDocuments-rawdata.json +rm $PYMONGO/test/crud/unified/db-aggregate-rawdata.json +rm $PYMONGO/test/crud/unified/deleteMany-rawdata.json +rm $PYMONGO/test/crud/unified/deleteOne-rawdata.json +rm $PYMONGO/test/crud/unified/distinct-rawdata.json +rm $PYMONGO/test/crud/unified/estimatedDocumentCount-rawdata.json +rm $PYMONGO/test/crud/unified/find-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndDelete-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndReplace-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndUpdate-rawdata.json +rm $PYMONGO/test/crud/unified/insertMany-rawdata.json +rm $PYMONGO/test/crud/unified/insertOne-rawdata.json +rm $PYMONGO/test/crud/unified/replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/updateMany-rawdata.json +rm $PYMONGO/test/crud/unified/updateOne-rawdata.json +rm $PYMONGO/test/index_management/index-rawdata.json + +echo "Done removing unimplemented tests\n" diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index d7dfafbba9..765af2a562 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -45,9 +45,12 @@ then fi # Ensure the JSON files are up to date. -cd $SPECS/source -make -cd - +if ! [ -n "${CI:-}" ] +then + cd $SPECS/source + make + cd - +fi # cpjson unified-test-format/tests/invalid unified-test-format/invalid # * param1: Path to spec tests dir in specifications repo # * param2: Path to where the corresponding tests live in Python. @@ -110,7 +113,6 @@ do cmap|CMAP|connection-monitoring-and-pooling) cpjson connection-monitoring-and-pooling/tests/logging connection_logging cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring - rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 ;; apm|APM|command-monitoring|command_monitoring) cpjson command-logging-and-monitoring/tests/monitoring command_monitoring @@ -174,7 +176,7 @@ do ;; server-selection|server_selection) cpjson server-selection/tests/ server_selection - rm -rf $PYMONGO/test/server_selection/logging + rm -rf $PYMONGO/test/server_selection/logging # these tests live in server_selection_logging cpjson server-selection/tests/logging server_selection_logging ;; server-selection-logging|server_selection_logging) @@ -186,7 +188,6 @@ do transactions|transactions-convenient-api) cpjson transactions/tests/ transactions cpjson transactions-convenient-api/tests/ transactions-convenient-api - rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 ;; unified|unified-test-format) cpjson unified-test-format/tests/ unified-test-format/ diff --git a/.evergreen/scripts/create-spec-pr.sh b/.evergreen/scripts/create-spec-pr.sh new file mode 100755 index 0000000000..a5e49bb211 --- /dev/null +++ b/.evergreen/scripts/create-spec-pr.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +tools="$(realpath -s "../drivers-tools")" +pushd $tools/.evergreen/github_app || exit + +owner="mongodb" +repo="mongo-python-driver" + +# Bootstrap the app. +echo "bootstrapping" +source utils.sh +bootstrap drivers/comment-bot + +# Run the app. +source ./secrets-export.sh + +# Get a github access token for the git checkout. +echo "Getting github token..." + +token=$(bash ./get-access-token.sh $repo $owner) +if [ -z "${token}" ]; then + echo "Failed to get github access token!" + popd || exit + exit 1 +fi +echo "Getting github token... done." +popd || exit + +# Make the git checkout and create a new branch. +echo "Creating the git checkout..." +branch="spec-resync-"$(date '+%m-%d-%Y') + +git remote set-url origin https://x-access-token:${token}@github.com/$owner/$repo.git +git checkout -b $branch "origin/master" +git add ./test +git commit -am "resyncing specs $(date '+%m-%d-%Y')" +echo "Creating the git checkout... done." + +git push origin $branch +resp=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $token" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -d "{\"title\":\"[Spec Resync] $(date '+%m-%d-%Y')\",\"body\":\"$(cat "$1")\",\"head\":\"${branch}\",\"base\":\"master\"}" \ + --url https://api.github.com/repos/$owner/$repo/pulls) +echo $resp | jq '.html_url' +echo "Creating the PR... done." + +rm -rf $tools diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py new file mode 100644 index 0000000000..d824211d40 --- /dev/null +++ b/.evergreen/scripts/resync-all-specs.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +import argparse +import os +import pathlib +import subprocess +from argparse import Namespace +from subprocess import CalledProcessError +from typing import Optional + + +def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: + """Actually sync the specs""" + print("Beginning to sync specs") # noqa: T201 + for spec in os.scandir(directory): + if not spec.is_dir(): + continue + + if spec.name in ["asynchronous"]: + continue + try: + subprocess.run( + ["bash", "./.evergreen/resync-specs.sh", spec.name], # noqa: S603, S607 + capture_output=True, + text=True, + check=True, + ) + except CalledProcessError as exc: + errored[spec.name] = exc.stderr + print("Done syncing specs") # noqa: T201 + + +def apply_patches(): + print("Beginning to apply patches") # noqa: T201 + subprocess.run(["bash", "./.evergreen/remove-unimplemented-tests.sh"], check=True) # noqa: S603, S607 + subprocess.run(["git apply -R --allow-empty ./.evergreen/spec-patch/*"], shell=True, check=True) # noqa: S602, S607 + + +def check_new_spec_directories(directory: pathlib.Path) -> list[str]: + """Check to see if there are any directories in the spec repo that don't exist in pymongo/test""" + spec_dir = pathlib.Path(os.environ["MDB_SPECS"]) / "source" + spec_set = { + entry.name.replace("-", "_") + for entry in os.scandir(spec_dir) + if entry.is_dir() + and (pathlib.Path(entry.path) / "tests").is_dir() + and len(list(os.scandir(pathlib.Path(entry.path) / "tests"))) > 1 + } + test_set = {entry.name.replace("-", "_") for entry in os.scandir(directory) if entry.is_dir()} + known_mappings = { + "ocsp_support": "ocsp", + "client_side_operations_timeout": "csot", + "mongodb_handshake": "handshake", + "load_balancers": "load_balancer", + "atlas_data_lake_testing": "atlas", + "connection_monitoring_and_pooling": "connection_monitoring", + "command_logging_and_monitoring": "command_logging", + "initial_dns_seedlist_discovery": "srv_seedlist", + "server_discovery_and_monitoring": "sdam_monitoring", + } + + for k, v in known_mappings.items(): + if k in spec_set: + spec_set.remove(k) + spec_set.add(v) + return list(spec_set - test_set) + + +def write_summary(errored: dict[str, str], new: list[str], filename: Optional[str]) -> None: + """Generate the PR description""" + pr_body = "" + process = subprocess.run( + ["git diff --name-only | awk -F'/' '{print $2}' | sort | uniq"], # noqa: S607 + shell=True, # noqa: S602 + capture_output=True, + text=True, + check=True, + ) + succeeded = process.stdout.strip().split() + if len(succeeded) > 0: + pr_body += "The following specs were changed:\n -" + pr_body += "\n -".join(succeeded) + pr_body += "\n" + if len(errored) > 0: + pr_body += "\n\nThe following spec syncs encountered errors:\n -" + for k, v in errored.items(): + pr_body += f"\n -{k}\n```{v}\n```" + pr_body += "\n" + if len(new) > 0: + pr_body += "\n\nThe following directories are in the specification repository and not in our test directory:\n -" + pr_body += "\n -".join(new) + pr_body += "\n" + if pr_body != "": + if filename is None: + print(f"\n{pr_body}") # noqa: T201 + else: + with open(filename, "w") as f: + # replacements made for proper json + f.write(pr_body.replace("\n", "\\n").replace("\t", "\\t")) + + +def main(args: Namespace): + directory = pathlib.Path("./test") + errored: dict[str, str] = {} + resync_specs(directory, errored) + apply_patches() + new = check_new_spec_directories(directory) + write_summary(errored, new, args.filename) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Python Script to resync all specs and generate summary for PR." + ) + parser.add_argument( + "--filename", help="Name of file for the summary to be written into.", default=None + ) + args = parser.parse_args() + main(args) diff --git a/.evergreen/scripts/resync-all-specs.sh b/.evergreen/scripts/resync-all-specs.sh new file mode 100755 index 0000000000..0f7ae2ccd8 --- /dev/null +++ b/.evergreen/scripts/resync-all-specs.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Run spec syncing script and create PR + +# SETUP +SRC_URL="https://github.com/mongodb/specifications.git" +# needs to be set for resync-specs.sh +SPEC_SRC="$(realpath "../specifications")" +SCRIPT="$(realpath "./.evergreen/resync-specs.sh")" + +# Clone the spec repo if the directory does not exist +if [[ ! -d $SPEC_SRC ]]; then + git clone $SRC_URL $SPEC_SRC + if [[ $? -ne 0 ]]; then + echo "Error: Failed to clone repository." + exit 1 + fi +fi + +# Set environment variable to the cloned spec repo for resync-specs.sh +export MDB_SPECS="$SPEC_SRC" + +# Check that resync-specs.sh exists and is executable +if [[ ! -x $SCRIPT ]]; then + echo "Error: $SCRIPT not found or is not executable." + exit 1 +fi + +PR_DESC="spec_sync.txt" + +# run python script that actually does all the resyncing +if ! [ -n "${CI:-}" ] +then + # we're running locally + python3 ./.evergreen/scripts/resync-all-specs.py +else + /opt/devtools/bin/python3.11 ./.evergreen/scripts/resync-all-specs.py "$PR_DESC" + if [[ -f $PR_DESC ]]; then + # changes were made -> call scrypt to create PR for us + .evergreen/scripts/create-spec-pr.sh "$PR_DESC" + rm "$PR_DESC" + fi +fi diff --git a/.evergreen/spec-patch/PYTHON-4884.patch b/.evergreen/spec-patch/PYTHON-4884.patch new file mode 100644 index 0000000000..0ef66e072a --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4884.patch @@ -0,0 +1,12 @@ +diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json +index f857afdc..1554341d 100644 +--- a/test/bson_corpus/datetime.json ++++ b/test/bson_corpus/datetime.json +@@ -24,6 +24,7 @@ + { + "description" : "Y10K", + "canonical_bson" : "1000000009610000DC1FD277E6000000", ++ "relaxed_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}", + "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" + }, + { diff --git a/.evergreen/spec-patch/PYTHON-4918.patch b/.evergreen/spec-patch/PYTHON-4918.patch new file mode 100644 index 0000000000..5f409c5870 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4918.patch @@ -0,0 +1,24 @@ +diff --git a/test/connection_monitoring/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json +index 1c744b85..509b2a23 100644 +--- a/test/connection_monitoring/pool-create-min-size-error.json ++++ b/test/connection_monitoring/pool-create-min-size-error.json +@@ -49,15 +49,15 @@ + "type": "ConnectionCreated", + "address": 42 + }, ++ { ++ "type": "ConnectionPoolCleared", ++ "address": 42 ++ }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" +- }, +- { +- "type": "ConnectionPoolCleared", +- "address": 42 + } + ], + "ignore": [ diff --git a/.evergreen/spec-patch/PYTHON-4931.patch b/.evergreen/spec-patch/PYTHON-4931.patch new file mode 100644 index 0000000000..ad7086b378 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4931.patch @@ -0,0 +1,93 @@ +diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json +index 277b9ed7..9a9b22fc 100644 +--- a/test/gridfs/delete.json ++++ b/test/gridfs/delete.json +@@ -497,7 +497,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ], +@@ -650,7 +650,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ], +diff --git a/test/gridfs/download.json b/test/gridfs/download.json +index f0cb8517..67658ac5 100644 +--- a/test/gridfs/download.json ++++ b/test/gridfs/download.json +@@ -338,7 +338,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] +@@ -370,7 +370,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] +@@ -402,7 +402,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] +@@ -471,7 +471,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] +@@ -514,7 +514,7 @@ + } + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] +diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json +index 7b20933c..45abaf7b 100644 +--- a/test/gridfs/downloadByName.json ++++ b/test/gridfs/downloadByName.json +@@ -290,7 +290,7 @@ + "filename": "xyz" + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] +@@ -306,7 +306,7 @@ + "revision": 999 + }, + "expectError": { +- "isError": true ++ "isClientError": true + } + } + ] diff --git a/.evergreen/spec-patch/PYTHON-5237.patch b/.evergreen/spec-patch/PYTHON-5237.patch new file mode 100644 index 0000000000..01de56b6c8 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5237.patch @@ -0,0 +1,48 @@ +diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json +index 6aa1da1d..d7a1c6ab 100644 +--- a/test/sessions/driver-sessions-dirty-session-errors.json ++++ b/test/sessions/driver-sessions-dirty-session-errors.json +@@ -347,7 +347,9 @@ + "x": 1 + } + }, +- "new": false, ++ "new": { ++ "$$unsetOrMatches": false ++ }, + "lsid": { + "$$sessionLsid": "session0" + }, +@@ -375,7 +377,9 @@ + "x": 1 + } + }, +- "new": false, ++ "new": { ++ "$$unsetOrMatches": false ++ }, + "lsid": { + "$$sessionLsid": "session0" + }, +@@ -627,7 +631,9 @@ + "x": 1 + } + }, +- "new": false, ++ "new": { ++ "$$unsetOrMatches": false ++ }, + "lsid": { + "$$type": "object" + }, +@@ -655,7 +661,9 @@ + "x": 1 + } + }, +- "new": false, ++ "new": { ++ "$$unsetOrMatches": false ++ }, + "lsid": { + "$$type": "object" + }, diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5b5ddef9a7..dc5ee4fe8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -441,6 +441,39 @@ update in PyMongo. This is primarily helpful if you are implementing a new feature in PyMongo that has spec tests already implemented, or if you are attempting to validate new spec tests in PyMongo. +### Automated Specification Test Resyncing +The (`/.evergreen/scripts/resync-all-specs.sh`) script +automatically runs once a week to resync all the specs with the [specifications repo](https://github.com/mongodb/specifications). +A PR will be generated by mongodb-drivers-pr-bot containing any changes picked up by this resync. +The PR description will display the name(s) of the updated specs along +with any errors that occurred. + +Spec test changes associated with a behavioral change or bugfix that has yet to be implemented in PyMongo +must be added to a patch file in `/.evergreen/spec-patch`. Each patch +file must be named after the associated PYTHON ticket and contain the +test differences between PyMongo's current tests and the specification. +All changes listed in these patch files will be *undone* by the script and won't +be applied to PyMongo's tests. + +When a new test file or folder is added to the spec repo before the associated code changes are implemented, that test's path must be added to `.evergreen/remove-unimplemented-tests.sh` along with a comment indicating the associated PYTHON ticket for those changes. + +Any PR that implements a PYTHON ticket documented in a patch file or within `.evergreen/remove-unimplemented-tests.sh` must also remove the associated patch file or entry in `remove-unimplemented-tests.sh`. + +#### Adding to a patch file +To add to or create a patch file, run `git diff` to show the desired changes to undo and copy the +results into the patch file. + +For example: the imaginary, unimplemented PYTHON-1234 ticket has associated spec test changes. To add those changes to `PYTHON-1234.patch`), do the following: +```bash +git diff HEAD~1 path/to/file >> .evergreen/spec-patch/PYTHON-1234.patch + +#### Running Locally +Both `resync-all-specs.sh` and `resync-all-specs.py` can be run locally (and won't generate a PR). +```bash +./.evergreen/scripts/resync-all-specs.sh +python3 ./.evergreen/scripts/resync-all-specs.py +``` + ## Making a Release Follow the [Python Driver Release Process Wiki](https://wiki.corp.mongodb.com/display/DRIVERS/Python+Driver+Release+Process). From 1e67c5c02c4f60eb92ec9740340f9c69abe097c3 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Tue, 15 Jul 2025 10:17:30 -0700 Subject: [PATCH 1983/2111] PYTHON-5289 Validate ignored bits are 0 on write for bson.BinaryVector (#2397) --- bson/binary.py | 17 +++++++++++++++++ doc/changelog.rst | 4 ++++ test/test_bson.py | 24 ++++++++++++++---------- 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index a1f63adf27..a43f81bf2d 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -14,6 +14,7 @@ from __future__ import annotations import struct +import warnings from enum import Enum from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union, overload from uuid import UUID @@ -255,6 +256,9 @@ def __eq__(self, other: Any) -> bool: self.dtype == other.dtype and self.padding == other.padding and self.data == other.data ) + def __len__(self) -> int: + return len(self.data) + class Binary(bytes): """Representation of BSON binary data. @@ -439,6 +443,9 @@ def from_vector( :param padding: For fractional bytes, number of bits to ignore at end of vector. :return: Binary packed data identified by dtype and padding. + .. versionchanged:: 4.14 + When padding is non-zero, ignored bits should be zero. Raise exception on encoding, warn on decoding. + .. versionadded:: 4.10 """ if isinstance(vector, BinaryVector): @@ -471,6 +478,10 @@ def from_vector( metadata = struct.pack(" BinaryVector: @@ -522,6 +533,12 @@ def as_vector(self) -> BinaryVector: dtype_format = "B" format_string = f"<{n_values}{dtype_format}" unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) + if padding and n_values and unpacked_uint8s[-1] & (1 << padding) - 1 != 0: + warnings.warn( + "Vector has a padding P, but bits in the final byte lower than P are non-zero. For pymongo>=5.0, they must be zero.", + DeprecationWarning, + stacklevel=2, + ) return BinaryVector(unpacked_uint8s, dtype, padding) else: diff --git a/doc/changelog.rst b/doc/changelog.rst index 933e2922db..e4da112097 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -58,6 +58,10 @@ PyMongo 4.13 brings a number of changes including: or the `migration guide `_ for more information. - Fixed a bug where :class:`pymongo.write_concern.WriteConcern` repr was not eval-able when using ``w="majority"``. +- When padding is set, ignored bits in a BSON BinaryVector of PACKED_BIT dtype should be set to zero. + When encoding, this is enforced and is a breaking change. + It is not yet enforced when decoding, so reading from the database will not fail, however a warning will be triggered. + From PyMongo 5.0, this rule will be enforced for both encoding and decoding. Issues Resolved ............... diff --git a/test/test_bson.py b/test/test_bson.py index e9a1dd1ca7..e4cf85c46c 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -739,7 +739,7 @@ def test_vector(self): """Tests of subtype 9""" # We start with valid cases, across the 3 dtypes implemented. # Work with a simple vector that can be interpreted as int8, float32, or ubyte - list_vector = [127, 7] + list_vector = [127, 8] # As INT8, vector has length 2 binary_vector = Binary.from_vector(list_vector, BinaryVectorDtype.INT8) vector = binary_vector.as_vector() @@ -764,18 +764,18 @@ def test_vector(self): uncompressed = "" for val in list_vector: uncompressed += format(val, "08b") - assert uncompressed[:-padding] == "0111111100000" + assert uncompressed[:-padding] == "0111111100001" # It is worthwhile explicitly showing the values encoded to BSON padded_doc = {"padded_vec": padded_vec} assert ( encode(padded_doc) - == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x07\x00" + == b"\x1a\x00\x00\x00\x05padded_vec\x00\x04\x00\x00\x00\t\x10\x03\x7f\x08\x00" ) # and dumped to json assert ( json_util.dumps(padded_doc) - == '{"padded_vec": {"$binary": {"base64": "EAN/Bw==", "subType": "09"}}}' + == '{"padded_vec": {"$binary": {"base64": "EAN/CA==", "subType": "09"}}}' ) # FLOAT32 is also implemented @@ -784,15 +784,19 @@ def test_vector(self): # Now some invalid cases for x in [-1, 257]: - try: + with self.assertRaises(struct.error): Binary.from_vector([x], BinaryVectorDtype.PACKED_BIT) - except Exception as exc: - self.assertIsInstance(exc, struct.error) - else: - self.fail("Failed to raise an exception.") - # Test form of Binary.from_vector(BinaryVector) + # Test one must pass zeros for all ignored bits + with self.assertRaises(ValueError): + Binary.from_vector([255], BinaryVectorDtype.PACKED_BIT, padding=7) + with self.assertWarns(DeprecationWarning): + meta = struct.pack(" Date: Tue, 15 Jul 2025 11:27:38 -0700 Subject: [PATCH 1984/2111] PYTHON-5289 Fixes indentation in docstring of Binary.from_vector (#2432) --- bson/binary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bson/binary.py b/bson/binary.py index a43f81bf2d..693b838b80 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -444,7 +444,7 @@ def from_vector( :return: Binary packed data identified by dtype and padding. .. versionchanged:: 4.14 - When padding is non-zero, ignored bits should be zero. Raise exception on encoding, warn on decoding. + When padding is non-zero, ignored bits should be zero. Raise exception on encoding, warn on decoding. .. versionadded:: 4.10 """ From 83fcf7cd084f6095c0be394b4f464ffa817fe321 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 15 Jul 2025 12:15:05 -0700 Subject: [PATCH 1985/2111] PYTHON-4931 Add spec tests for GridFS rename (#2431) --- .evergreen/remove-unimplemented-tests.sh | 1 - .evergreen/spec-patch/PYTHON-4931.patch | 93 ------------ test/asynchronous/unified_format.py | 3 + test/gridfs/delete.json | 4 +- test/gridfs/download.json | 10 +- test/gridfs/downloadByName.json | 4 +- test/gridfs/rename.json | 179 +++++++++++++++++++++++ test/unified_format.py | 3 + 8 files changed, 194 insertions(+), 103 deletions(-) delete mode 100644 .evergreen/spec-patch/PYTHON-4931.patch create mode 100644 test/gridfs/rename.json diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh index e31011f615..d4eaff473e 100755 --- a/.evergreen/remove-unimplemented-tests.sh +++ b/.evergreen/remove-unimplemented-tests.sh @@ -8,7 +8,6 @@ rm $PYMONGO/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-E rm $PYMONGO/test/client-side-encryption/spec/unified/localSchema.json # PYTHON-5143 rm $PYMONGO/test/client-side-encryption/spec/unified/maxWireVersion.json # PYTHON-5143 rm $PYMONGO/test/unified-test-format/valid-pass/poc-queryable-encryption.json # PYTHON-5143 -rm $PYMONGO/test/gridfs/rename.json # PYTHON-4931 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-application-error.json # PYTHON-4918 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-checkout-error.json # PYTHON-4918 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-min-pool-size-error.json # PYTHON-4918 diff --git a/.evergreen/spec-patch/PYTHON-4931.patch b/.evergreen/spec-patch/PYTHON-4931.patch deleted file mode 100644 index ad7086b378..0000000000 --- a/.evergreen/spec-patch/PYTHON-4931.patch +++ /dev/null @@ -1,93 +0,0 @@ -diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json -index 277b9ed7..9a9b22fc 100644 ---- a/test/gridfs/delete.json -+++ b/test/gridfs/delete.json -@@ -497,7 +497,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ], -@@ -650,7 +650,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ], -diff --git a/test/gridfs/download.json b/test/gridfs/download.json -index f0cb8517..67658ac5 100644 ---- a/test/gridfs/download.json -+++ b/test/gridfs/download.json -@@ -338,7 +338,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] -@@ -370,7 +370,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] -@@ -402,7 +402,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] -@@ -471,7 +471,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] -@@ -514,7 +514,7 @@ - } - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] -diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json -index 7b20933c..45abaf7b 100644 ---- a/test/gridfs/downloadByName.json -+++ b/test/gridfs/downloadByName.json -@@ -290,7 +290,7 @@ - "filename": "xyz" - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] -@@ -306,7 +306,7 @@ - "revision": 999 - }, - "expectError": { -- "isError": true -+ "isClientError": true - } - } - ] diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 5c221a6df0..5b2e3563f9 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -66,6 +66,7 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId from gridfs import AsyncGridFSBucket, GridOut, NoFile +from gridfs.errors import CorruptGridFile from pymongo import ASCENDING, AsyncMongoClient, CursorType, _csot from pymongo.asynchronous.change_stream import AsyncChangeStream from pymongo.asynchronous.client_session import AsyncClientSession, TransactionOptions, _TxnState @@ -613,6 +614,8 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(error, ConnectionFailure): self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, CorruptGridFile): + pass elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): pass else: diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json index 277b9ed7e1..9a9b22fc1e 100644 --- a/test/gridfs/delete.json +++ b/test/gridfs/delete.json @@ -497,7 +497,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -650,7 +650,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], diff --git a/test/gridfs/download.json b/test/gridfs/download.json index f0cb851708..67658ac512 100644 --- a/test/gridfs/download.json +++ b/test/gridfs/download.json @@ -338,7 +338,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -370,7 +370,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -402,7 +402,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -471,7 +471,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -514,7 +514,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json index 7b20933c16..45abaf7b42 100644 --- a/test/gridfs/downloadByName.json +++ b/test/gridfs/downloadByName.json @@ -290,7 +290,7 @@ "filename": "xyz" }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -306,7 +306,7 @@ "revision": 999 }, "expectError": { - "isError": true + "isClientError": true } } ] diff --git a/test/gridfs/rename.json b/test/gridfs/rename.json new file mode 100644 index 0000000000..08064d4a5c --- /dev/null +++ b/test/gridfs/rename.json @@ -0,0 +1,179 @@ +{ + "description": "gridfs-rename", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "rename by id", + "operations": [ + { + "name": "rename", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + }, + "newFilename": "newfilename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "rename when file id does not exist", + "operations": [ + { + "name": "rename", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + }, + "newFilename": "newfilename" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index f1e55d87b9..859cb29977 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -65,6 +65,7 @@ from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId from gridfs import GridFSBucket, GridOut, NoFile +from gridfs.errors import CorruptGridFile from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.driver_info import DriverInfo from pymongo.encryption_options import _HAVE_PYMONGOCRYPT @@ -612,6 +613,8 @@ def process_error(self, exception, spec): # Connection errors are considered client errors. if isinstance(error, ConnectionFailure): self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, CorruptGridFile): + pass elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): pass else: From 3be7f76763464fe55f710ee427369197a21c0843 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Jul 2025 15:38:15 -0500 Subject: [PATCH 1986/2111] PYTHON-4203 Update prose tests for mongos deprioritization during retryable ops (#2430) --- test/asynchronous/test_retryable_reads.py | 52 +++++++++++++++++++---- test/test_retryable_reads.py | 50 ++++++++++++++++++---- 2 files changed, 84 insertions(+), 18 deletions(-) diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py index 4ac694b58e..26454b3823 100644 --- a/test/asynchronous/test_retryable_reads.py +++ b/test/asynchronous/test_retryable_reads.py @@ -21,7 +21,7 @@ import threading from test.asynchronous.utils import async_set_fail_point -from pymongo.errors import AutoReconnect +from pymongo.errors import OperationFailure sys.path[0:0] = [""] @@ -147,15 +147,11 @@ async def test_pool_paused_error_is_retryable(self): class TestRetryableReads(AsyncIntegrationTest): @async_client_context.require_multiple_mongoses @async_client_context.require_failCommand_fail_point - async def test_retryable_reads_in_sharded_cluster_multiple_available(self): + async def test_retryable_reads_are_retried_on_a_different_mongos_when_one_is_available(self): fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 1}, - "data": { - "failCommands": ["find"], - "closeConnection": True, - "appName": "retryableReadTest", - }, + "data": {"failCommands": ["find"], "errorCode": 6}, } mongos_clients = [] @@ -168,12 +164,11 @@ async def test_retryable_reads_in_sharded_cluster_multiple_available(self): listener = OvertCommandListener() client = await self.async_rs_or_single_client( async_client_context.mongos_seeds(), - appName="retryableReadTest", event_listeners=[listener], retryReads=True, ) - with self.assertRaises(AutoReconnect): + with self.assertRaises(OperationFailure): await client.t.t.find_one({}) # Disable failpoints on each mongos @@ -184,6 +179,45 @@ async def test_retryable_reads_in_sharded_cluster_multiple_available(self): self.assertEqual(len(listener.failed_events), 2) self.assertEqual(len(listener.succeeded_events), 0) + # Assert that both events occurred on different mongos. + assert listener.failed_events[0].connection_id != listener.failed_events[1].connection_id + + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_available( + self + ): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + host = async_client_context.mongos_seeds().split(",")[0] + mongos_client = await self.async_rs_or_single_client(host) + await async_set_fail_point(mongos_client, fail_command) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + host, + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + await client.t.t.find_one({}) + + # Disable failpoint. + fail_command["mode"] = "off" + await async_set_fail_point(mongos_client, fail_command) + + # Assert that exactly one failed command event and one succeeded command event occurred. + self.assertEqual(len(listener.failed_events), 1) + self.assertEqual(len(listener.succeeded_events), 1) + + # Assert that both events occurred on the same mongos. + assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index cfd85b1ac5..fb8a374dac 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -21,7 +21,7 @@ import threading from test.utils import set_fail_point -from pymongo.errors import AutoReconnect +from pymongo.errors import OperationFailure sys.path[0:0] = [""] @@ -147,15 +147,11 @@ def test_pool_paused_error_is_retryable(self): class TestRetryableReads(IntegrationTest): @client_context.require_multiple_mongoses @client_context.require_failCommand_fail_point - def test_retryable_reads_in_sharded_cluster_multiple_available(self): + def test_retryable_reads_are_retried_on_a_different_mongos_when_one_is_available(self): fail_command = { "configureFailPoint": "failCommand", "mode": {"times": 1}, - "data": { - "failCommands": ["find"], - "closeConnection": True, - "appName": "retryableReadTest", - }, + "data": {"failCommands": ["find"], "errorCode": 6}, } mongos_clients = [] @@ -168,12 +164,11 @@ def test_retryable_reads_in_sharded_cluster_multiple_available(self): listener = OvertCommandListener() client = self.rs_or_single_client( client_context.mongos_seeds(), - appName="retryableReadTest", event_listeners=[listener], retryReads=True, ) - with self.assertRaises(AutoReconnect): + with self.assertRaises(OperationFailure): client.t.t.find_one({}) # Disable failpoints on each mongos @@ -184,6 +179,43 @@ def test_retryable_reads_in_sharded_cluster_multiple_available(self): self.assertEqual(len(listener.failed_events), 2) self.assertEqual(len(listener.succeeded_events), 0) + # Assert that both events occurred on different mongos. + assert listener.failed_events[0].connection_id != listener.failed_events[1].connection_id + + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + host = client_context.mongos_seeds().split(",")[0] + mongos_client = self.rs_or_single_client(host) + set_fail_point(mongos_client, fail_command) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + host, + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + client.t.t.find_one({}) + + # Disable failpoint. + fail_command["mode"] = "off" + set_fail_point(mongos_client, fail_command) + + # Assert that exactly one failed command event and one succeeded command event occurred. + self.assertEqual(len(listener.failed_events), 1) + self.assertEqual(len(listener.succeeded_events), 1) + + # Assert that both events occurred on the same mongos. + assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + if __name__ == "__main__": unittest.main() From 71514b598972ec93c024e44cb5d4217eacc5913e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Jul 2025 20:25:13 -0500 Subject: [PATCH 1987/2111] PYTHON-5152 Sunset Astrolabe (#2434) --- ...client-storeEventsAsEntities-minItems.json | 18 ----- ...ity-client-storeEventsAsEntities-type.json | 18 ----- ...reEventsAsEntity-additionalProperties.json | 26 ------- .../storeEventsAsEntity-events-enum.json | 25 ------- .../storeEventsAsEntity-events-minItems.json | 23 ------- .../storeEventsAsEntity-events-required.json | 22 ------ .../storeEventsAsEntity-events-type.json | 23 ------- .../storeEventsAsEntity-id-required.json | 24 ------- .../invalid/storeEventsAsEntity-id-type.json | 25 ------- ...ntsAsEntities-conflict_with_client_id.json | 28 -------- ...ities-conflict_within_different_array.json | 43 ------------ ...AsEntities-conflict_within_same_array.json | 36 ---------- .../entity-client-storeEventsAsEntities.json | 67 ------------------- 13 files changed, 378 deletions(-) delete mode 100644 test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json delete mode 100644 test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-required.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-events-type.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-id-required.json delete mode 100644 test/unified-test-format/invalid/storeEventsAsEntity-id-type.json delete mode 100644 test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json delete mode 100644 test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json delete mode 100644 test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json delete mode 100644 test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json deleted file mode 100644 index d94863ed11..0000000000 --- a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-minItems", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json deleted file mode 100644 index 79f6b85ed2..0000000000 --- a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-type", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": 0 - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json b/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json deleted file mode 100644 index 5357da8d8d..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "description": "storeEventsAsEntity-additionalProperties", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [ - "CommandStartedEvent" - ], - "foo": 0 - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json deleted file mode 100644 index ee99a55381..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-enum", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [ - "foo" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json deleted file mode 100644 index ddab042b1b..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-minItems", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json deleted file mode 100644 index 90b45918ce..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-required", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events" - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json deleted file mode 100644 index 1b920ebd5d..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-type", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": 0 - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json deleted file mode 100644 index 71387c5315..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "description": "storeEventsAsEntity-id-required", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "events": [ - "CommandStartedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json deleted file mode 100644 index 4f52dc2533..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "description": "storeEventsAsEntity-id-type", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": 0, - "events": [ - "CommandStartedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json deleted file mode 100644 index 8c0c4d2041..0000000000 --- a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-conflict_with_client_id", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0", - "events": [ - "PoolCreatedEvent", - "PoolReadyEvent", - "PoolClearedEvent", - "PoolClosedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json deleted file mode 100644 index 77bc4abf2e..0000000000 --- a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-conflict_within_different_array", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "events", - "events": [ - "PoolCreatedEvent", - "PoolReadyEvent", - "PoolClearedEvent", - "PoolClosedEvent" - ] - } - ] - } - }, - { - "client": { - "id": "client1", - "storeEventsAsEntities": [ - { - "id": "events", - "events": [ - "CommandStartedEvent", - "CommandSucceededEvent", - "CommandFailedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json deleted file mode 100644 index e1a9499883..0000000000 --- a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-conflict_within_same_array", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "events", - "events": [ - "PoolCreatedEvent", - "PoolReadyEvent", - "PoolClearedEvent", - "PoolClosedEvent" - ] - }, - { - "id": "events", - "events": [ - "CommandStartedEvent", - "CommandSucceededEvent", - "CommandFailedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json b/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json deleted file mode 100644 index e37e5a1acd..0000000000 --- a/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [ - "CommandStartedEvent", - "CommandSucceededEvent", - "CommandFailedEvent" - ] - } - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ] - } - ], - "tests": [ - { - "description": "storeEventsAsEntities captures events", - "operations": [ - { - "name": "find", - "object": "collection0", - "arguments": { - "filter": {} - }, - "expectResult": [ - { - "_id": 1, - "x": 11 - } - ] - } - ] - } - ] -} From 36bb704c76adc735a624c783ea82edc2c2c6cfce Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 15 Jul 2025 20:26:29 -0500 Subject: [PATCH 1988/2111] PYTHON-5237 Relax requirement for optional fields for sessions unified tests (#2435) --- .evergreen/spec-patch/PYTHON-5237.patch | 48 ------------------- .../driver-sessions-dirty-session-errors.json | 16 +++++-- 2 files changed, 12 insertions(+), 52 deletions(-) delete mode 100644 .evergreen/spec-patch/PYTHON-5237.patch diff --git a/.evergreen/spec-patch/PYTHON-5237.patch b/.evergreen/spec-patch/PYTHON-5237.patch deleted file mode 100644 index 01de56b6c8..0000000000 --- a/.evergreen/spec-patch/PYTHON-5237.patch +++ /dev/null @@ -1,48 +0,0 @@ -diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json -index 6aa1da1d..d7a1c6ab 100644 ---- a/test/sessions/driver-sessions-dirty-session-errors.json -+++ b/test/sessions/driver-sessions-dirty-session-errors.json -@@ -347,7 +347,9 @@ - "x": 1 - } - }, -- "new": false, -+ "new": { -+ "$$unsetOrMatches": false -+ }, - "lsid": { - "$$sessionLsid": "session0" - }, -@@ -375,7 +377,9 @@ - "x": 1 - } - }, -- "new": false, -+ "new": { -+ "$$unsetOrMatches": false -+ }, - "lsid": { - "$$sessionLsid": "session0" - }, -@@ -627,7 +631,9 @@ - "x": 1 - } - }, -- "new": false, -+ "new": { -+ "$$unsetOrMatches": false -+ }, - "lsid": { - "$$type": "object" - }, -@@ -655,7 +661,9 @@ - "x": 1 - } - }, -- "new": false, -+ "new": { -+ "$$unsetOrMatches": false -+ }, - "lsid": { - "$$type": "object" - }, diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json index 6aa1da1df5..d7a1c6aba7 100644 --- a/test/sessions/driver-sessions-dirty-session-errors.json +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -347,7 +347,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -375,7 +377,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -627,7 +631,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$type": "object" }, @@ -655,7 +661,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$type": "object" }, From 6ef91357b2c861c8901de0618c9ddc3eb9547e81 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Jul 2025 16:27:26 -0500 Subject: [PATCH 1989/2111] PYTHON-4884 Test encoding dates after year 9999 with Relaxed Extended JSON (#2437) --- .evergreen/spec-patch/PYTHON-4884.patch | 12 ------------ test/bson_corpus/datetime.json | 1 + 2 files changed, 1 insertion(+), 12 deletions(-) delete mode 100644 .evergreen/spec-patch/PYTHON-4884.patch diff --git a/.evergreen/spec-patch/PYTHON-4884.patch b/.evergreen/spec-patch/PYTHON-4884.patch deleted file mode 100644 index 0ef66e072a..0000000000 --- a/.evergreen/spec-patch/PYTHON-4884.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json -index f857afdc..1554341d 100644 ---- a/test/bson_corpus/datetime.json -+++ b/test/bson_corpus/datetime.json -@@ -24,6 +24,7 @@ - { - "description" : "Y10K", - "canonical_bson" : "1000000009610000DC1FD277E6000000", -+ "relaxed_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}", - "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" - }, - { diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json index f857afdc36..1554341d29 100644 --- a/test/bson_corpus/datetime.json +++ b/test/bson_corpus/datetime.json @@ -24,6 +24,7 @@ { "description" : "Y10K", "canonical_bson" : "1000000009610000DC1FD277E6000000", + "relaxed_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}", "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" }, { From fed738df42c5f5e6715c5f6e9aaa822c642f5dec Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 16 Jul 2025 16:27:59 -0500 Subject: [PATCH 1990/2111] PYTHON-5444 Update OIDC tests use camelCase options (#2436) --- test/asynchronous/test_auth_oidc.py | 8 ++++---- test/test_auth_oidc.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/asynchronous/test_auth_oidc.py b/test/asynchronous/test_auth_oidc.py index f450c75df7..639c155e73 100644 --- a/test/asynchronous/test_auth_oidc.py +++ b/test/asynchronous/test_auth_oidc.py @@ -92,11 +92,11 @@ def get_token(self, username=None): return fid.read() elif ENVIRON == "azure": opts = parse_uri(self.uri_single)["options"] - token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] return _get_azure_response(token_aud, username)["access_token"] elif ENVIRON == "gcp": opts = parse_uri(self.uri_single)["options"] - token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] return _get_gcp_response(token_aud, username)["access_token"] elif ENVIRON == "k8s": return _get_k8s_token() @@ -1108,7 +1108,7 @@ async def test_5_1_azure_with_no_username(self): if ENVIRON != "azure": raise unittest.SkipTest("Test is only supported on Azure") opts = parse_uri(self.uri_single)["options"] - resource = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + resource = opts["authMechanismProperties"]["TOKEN_RESOURCE"] props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") client = await self.create_client(authMechanismProperties=props) @@ -1119,7 +1119,7 @@ async def test_5_2_azure_with_bad_username(self): raise unittest.SkipTest("Test is only supported on Azure") opts = parse_uri(self.uri_single)["options"] - token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") client = await self.create_client(username="bad", authmechanismproperties=props) diff --git a/test/test_auth_oidc.py b/test/test_auth_oidc.py index 33a1e55fe2..877a5ca981 100644 --- a/test/test_auth_oidc.py +++ b/test/test_auth_oidc.py @@ -92,11 +92,11 @@ def get_token(self, username=None): return fid.read() elif ENVIRON == "azure": opts = parse_uri(self.uri_single)["options"] - token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] return _get_azure_response(token_aud, username)["access_token"] elif ENVIRON == "gcp": opts = parse_uri(self.uri_single)["options"] - token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] return _get_gcp_response(token_aud, username)["access_token"] elif ENVIRON == "k8s": return _get_k8s_token() @@ -1106,7 +1106,7 @@ def test_5_1_azure_with_no_username(self): if ENVIRON != "azure": raise unittest.SkipTest("Test is only supported on Azure") opts = parse_uri(self.uri_single)["options"] - resource = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + resource = opts["authMechanismProperties"]["TOKEN_RESOURCE"] props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") client = self.create_client(authMechanismProperties=props) @@ -1117,7 +1117,7 @@ def test_5_2_azure_with_bad_username(self): raise unittest.SkipTest("Test is only supported on Azure") opts = parse_uri(self.uri_single)["options"] - token_aud = opts["authmechanismproperties"]["TOKEN_RESOURCE"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") client = self.create_client(username="bad", authmechanismproperties=props) From 55d399b75ab969001904407aee8b7b6ab254afb9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 17 Jul 2025 09:29:11 -0500 Subject: [PATCH 1991/2111] PYTHON-4019 Infinite loop in generic transactional provider due to dup keys (#2438) --- doc/conf.py | 1 + pymongo/asynchronous/client_session.py | 9 +++++++++ pymongo/synchronous/client_session.py | 9 +++++++++ 3 files changed, 19 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index a9711d259f..8a7f418609 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -86,6 +86,7 @@ # sourceforge.net is giving a 403 error, but is still accessible from the browser. linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", + "https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", r"https://sourceforge.net/", diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index b808684dd4..1225445710 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -660,6 +660,12 @@ async def callback(session, custom_arg, custom_kwarg=None): ``with_transaction`` starts a new transaction and re-executes the ``callback``. + The ``callback`` MUST NOT silently handle command errors + without allowing such errors to propagate. Command errors may abort the + transaction on the server, and an attempt to commit the transaction will + be rejected with a ``NoSuchTransaction`` error. For more information see + the `transactions specification`_. + When :meth:`~AsyncClientSession.commit_transaction` raises an exception with the ``"UnknownTransactionCommitResult"`` error label, ``with_transaction`` retries the commit until the result of the @@ -689,6 +695,9 @@ async def callback(session, custom_arg, custom_kwarg=None): :return: The return value of the ``callback``. .. versionadded:: 3.9 + + .. _transactions specification: + https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback """ start_time = time.monotonic() while True: diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index aaf2d7574f..8d5bf7697b 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -659,6 +659,12 @@ def callback(session, custom_arg, custom_kwarg=None): ``with_transaction`` starts a new transaction and re-executes the ``callback``. + The ``callback`` MUST NOT silently handle command errors + without allowing such errors to propagate. Command errors may abort the + transaction on the server, and an attempt to commit the transaction will + be rejected with a ``NoSuchTransaction`` error. For more information see + the `transactions specification`_. + When :meth:`~ClientSession.commit_transaction` raises an exception with the ``"UnknownTransactionCommitResult"`` error label, ``with_transaction`` retries the commit until the result of the @@ -688,6 +694,9 @@ def callback(session, custom_arg, custom_kwarg=None): :return: The return value of the ``callback``. .. versionadded:: 3.9 + + .. _transactions specification: + https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback """ start_time = time.monotonic() while True: From cf2630148a58e3088a614a5fd37ff932ed6d5e72 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 18 Jul 2025 12:04:10 -0500 Subject: [PATCH 1992/2111] PYTHON-4677 Specify how maxTimeMS can be set for explain helpers (#2439) --- pymongo/asynchronous/cursor.py | 2 ++ pymongo/synchronous/cursor.py | 2 ++ test/asynchronous/test_cursor.py | 18 ++++++++++++++++++ test/test_cursor.py | 18 ++++++++++++++++++ 4 files changed, 40 insertions(+) diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 02954fb559..51efab4f43 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -767,6 +767,8 @@ async def explain(self) -> _DocumentType: :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` to run the explain command directly. + .. note:: The timeout of this method can be set using :func:`pymongo.timeout`. + .. seealso:: The MongoDB documentation on `explain `_. """ c = self.clone() diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index ba35316516..e49141e811 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -765,6 +765,8 @@ def explain(self) -> _DocumentType: :meth:`~pymongo.database.Database.command` to run the explain command directly. + .. note:: The timeout of this method can be set using :func:`pymongo.timeout`. + .. seealso:: The MongoDB documentation on `explain `_. """ c = self.clone() diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 8d2dbf532e..53b3289fb8 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -362,6 +362,24 @@ async def test_explain_with_read_concern(self): self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) + # https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#14-explain-helpers-allow-users-to-specify-maxtimems + async def test_explain_csot(self): + # Create a MongoClient with command monitoring enabled (referred to as client). + listener = AllowListEventListener("explain") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + # Create a collection, referred to as collection, with the namespace explain-test.collection. + collection = client["explain-test"]["collection"] + + # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. + with pymongo.timeout(2.0): + self.assertTrue(await collection.find({"name": "john doe"}).explain()) + + # Obtain the command started event for the explain. Confirm that the top-level explain command should has a maxTimeMS value of 2000. + started = listener.started_events + self.assertEqual(len(started), 1) + assert 1500 < started[0].command["maxTimeMS"] <= 2000 + async def test_hint(self): db = self.db with self.assertRaises(TypeError): diff --git a/test/test_cursor.py b/test/test_cursor.py index 4902d9e4df..d0bd48e747 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -354,6 +354,24 @@ def test_explain_with_read_concern(self): self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) + # https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#14-explain-helpers-allow-users-to-specify-maxtimems + def test_explain_csot(self): + # Create a MongoClient with command monitoring enabled (referred to as client). + listener = AllowListEventListener("explain") + client = self.rs_or_single_client(event_listeners=[listener]) + + # Create a collection, referred to as collection, with the namespace explain-test.collection. + collection = client["explain-test"]["collection"] + + # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. + with pymongo.timeout(2.0): + self.assertTrue(collection.find({"name": "john doe"}).explain()) + + # Obtain the command started event for the explain. Confirm that the top-level explain command should has a maxTimeMS value of 2000. + started = listener.started_events + self.assertEqual(len(started), 1) + assert 1500 < started[0].command["maxTimeMS"] <= 2000 + def test_hint(self): db = self.db with self.assertRaises(TypeError): From 31cca98656d5f28a25b7bbe7efa07f186c6245b1 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 21 Jul 2025 14:18:39 -0700 Subject: [PATCH 1993/2111] PYTHON-5253 Automated Spec Resync Quick Followup/Fix (#2443) --- .evergreen/scripts/resync-all-specs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/scripts/resync-all-specs.sh b/.evergreen/scripts/resync-all-specs.sh index 0f7ae2ccd8..4bcf2cd23b 100755 --- a/.evergreen/scripts/resync-all-specs.sh +++ b/.evergreen/scripts/resync-all-specs.sh @@ -33,7 +33,7 @@ then # we're running locally python3 ./.evergreen/scripts/resync-all-specs.py else - /opt/devtools/bin/python3.11 ./.evergreen/scripts/resync-all-specs.py "$PR_DESC" + /opt/devtools/bin/python3.11 ./.evergreen/scripts/resync-all-specs.py --filename "$PR_DESC" if [[ -f $PR_DESC ]]; then # changes were made -> call scrypt to create PR for us .evergreen/scripts/create-spec-pr.sh "$PR_DESC" From f9b2f711c099a5b54e1ed00c74f431308791df3a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:51:49 -0500 Subject: [PATCH 1994/2111] Bump furo from 2024.8.6 to 2025.7.19 (#2440) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements/docs.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/docs.txt b/requirements/docs.txt index 7d52c1cb3e..5543a62695 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -3,4 +3,4 @@ sphinx_rtd_theme>=2,<4 readthedocs-sphinx-search~=0.3 sphinxcontrib-shellcheck>=1,<2 sphinx-autobuild>=2020.9.1 -furo==2024.8.6 +furo==2025.7.19 From 5a640daf926663fd950fb0b58bbeef0ec3970535 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 07:52:15 -0500 Subject: [PATCH 1995/2111] Bump astral-sh/setup-uv from 6.3.1 to 6.4.1 in the actions group (#2441) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test-python.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index fb28f2476f..27bf7511d0 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -25,7 +25,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -88,7 +88,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: enable-cache: true python-version: "3.9" @@ -111,7 +111,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: enable-cache: true python-version: "3.9" @@ -130,7 +130,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: enable-cache: true python-version: "3.9" @@ -152,7 +152,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: enable-cache: true python-version: "${{matrix.python}}" @@ -231,7 +231,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: python-version: '3.9' - name: Start MongoDB @@ -257,7 +257,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@bd01e18f51369d5a26f1651c3cb451d3417e3bba # v5 + uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 with: python-version: '3.9' - name: Start MongoDB From 06872f7f0314b4c86c0a83889a767444fe644525 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 22 Jul 2025 10:23:26 -0500 Subject: [PATCH 1996/2111] PYTHON-4780 Implement fast path for server selection with Primary (#2416) --- .github/workflows/zizmor.yml | 2 +- doc/changelog.rst | 1 + pymongo/topology_description.py | 13 ++++++++++- test/asynchronous/test_server_selection.py | 4 ++-- test/test_server_selection.py | 4 ++-- test/test_topology.py | 26 ++++++++++++++++++++-- 6 files changed, 42 insertions(+), 8 deletions(-) diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 6d9506776a..1d58c0d5fb 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@0f0557ab4a0b31211d42435e42df31cbd63fdd59 + uses: zizmorcore/zizmor-action@1c7106082dbc1753372e3924b7da1b9417011a21 diff --git a/doc/changelog.rst b/doc/changelog.rst index e4da112097..2e56b2c019 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -10,6 +10,7 @@ PyMongo 4.14 brings a number of changes including: - Added :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and :meth:`pymongo.mongo_client.MongoClient.append_metadata` to allow instantiated MongoClients to send client metadata on-demand +- Improved performance of selecting a server with the Primary selector. - Introduces a minor breaking change. When encoding :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised if the 'padding' metadata field is < 0 or > 7, or non-zero for any type other than PACKED_BIT. diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index 29293b2314..e226992b45 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -34,7 +34,7 @@ from bson.objectid import ObjectId from pymongo import common from pymongo.errors import ConfigurationError, PyMongoError -from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode +from pymongo.read_preferences import Primary, ReadPreference, _AggWritePref, _ServerMode from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE @@ -324,6 +324,17 @@ def apply_selector( description = self.server_descriptions().get(address) return [description] if description else [] + # Primary selection fast path. + if self.topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary and type(selector) is Primary: + for sd in self._server_descriptions.values(): + if sd.server_type == SERVER_TYPE.RSPrimary: + sds = [sd] + if custom_selector: + sds = custom_selector(sds) + return sds + # No primary found, return an empty list. + return [] + selection = Selection.from_topology_description(self) # Ignore read preference for sharded clusters. if self.topology_type != TOPOLOGY_TYPE.Sharded: diff --git a/test/asynchronous/test_server_selection.py b/test/asynchronous/test_server_selection.py index f98a05ee91..f570662b85 100644 --- a/test/asynchronous/test_server_selection.py +++ b/test/asynchronous/test_server_selection.py @@ -130,12 +130,12 @@ async def test_selector_called(self): test_collection = mongo_client.testdb.test_collection self.addAsyncCleanup(mongo_client.drop_database, "testdb") - # Do N operations and test selector is called at least N times. + # Do N operations and test selector is called at least N-1 times due to fast path. await test_collection.insert_one({"age": 20, "name": "John"}) await test_collection.insert_one({"age": 31, "name": "Jane"}) await test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) await test_collection.find_one({"name": "Roe"}) - self.assertGreaterEqual(selector.call_count, 4) + self.assertGreaterEqual(selector.call_count, 3) @async_client_context.require_replica_set async def test_latency_threshold_application(self): diff --git a/test/test_server_selection.py b/test/test_server_selection.py index aec8e2e47a..4384deac2b 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -130,12 +130,12 @@ def test_selector_called(self): test_collection = mongo_client.testdb.test_collection self.addCleanup(mongo_client.drop_database, "testdb") - # Do N operations and test selector is called at least N times. + # Do N operations and test selector is called at least N-1 times due to fast path. test_collection.insert_one({"age": 20, "name": "John"}) test_collection.insert_one({"age": 31, "name": "Jane"}) test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) test_collection.find_one({"name": "Roe"}) - self.assertGreaterEqual(selector.call_count, 4) + self.assertGreaterEqual(selector.call_count, 3) @client_context.require_replica_set def test_latency_threshold_application(self): diff --git a/test/test_topology.py b/test/test_topology.py index 837cf25c62..d3bbcd9060 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -30,7 +30,7 @@ from pymongo import common from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure from pymongo.hello import Hello, HelloCompat -from pymongo.read_preferences import ReadPreference, Secondary +from pymongo.read_preferences import Primary, ReadPreference, Secondary from pymongo.server_description import ServerDescription from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE @@ -51,7 +51,10 @@ def get_topology_type(self): def create_mock_topology( - seeds=None, replica_set_name=None, monitor_class=DummyMonitor, direct_connection=False + seeds=None, + replica_set_name=None, + monitor_class=DummyMonitor, + direct_connection=False, ): partitioned_seeds = list(map(common.partition_node, seeds or ["a"])) topology_settings = TopologySettings( @@ -123,6 +126,25 @@ def test_timeout_configuration(self): # The monitor, not its pool, is responsible for calling hello. self.assertTrue(monitor._pool.is_sdam) + def test_selector_fast_path(self): + topology = create_mock_topology(seeds=["a", "b:27018"], replica_set_name="foo") + description = topology.description + description._topology_type = TOPOLOGY_TYPE.ReplicaSetWithPrimary + + # There is no primary yet, so it should give an empty list. + self.assertEqual(description.apply_selector(Primary()), []) + + # If we set a primary server, we should get it back. + sd = list(description._server_descriptions.values())[0] + sd._server_type = SERVER_TYPE.RSPrimary + self.assertEqual(description.apply_selector(Primary()), [sd]) + + # If there is a custom selector, it should be applied. + def custom_selector(servers): + return [] + + self.assertEqual(description.apply_selector(Primary(), custom_selector=custom_selector), []) + class TestSingleServerTopology(TopologyTest): def test_direct_connection(self): From ffb372aec73d119f03810c1d7816f6278fe14afe Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 24 Jul 2025 13:20:19 -0500 Subject: [PATCH 1997/2111] PYTHON-5027 Test Windows with Python 3.14t (#2444) --- .evergreen/generated_configs/variants.yml | 9 +++++++++ .evergreen/scripts/generate_config.py | 6 +++--- .evergreen/scripts/generate_config_utils.py | 13 ++++++------- pyproject.toml | 3 +++ test/__init__.py | 2 +- test/asynchronous/__init__.py | 2 +- test/asynchronous/helpers.py | 2 +- test/asynchronous/test_comment.py | 2 +- test/asynchronous/test_session.py | 2 +- test/asynchronous/unified_format.py | 2 +- test/asynchronous/utils.py | 2 +- test/asynchronous/utils_spec_runner.py | 2 +- test/helpers.py | 2 +- test/test_bson_binary_vector.py | 3 +-- test/test_bson_corpus.py | 3 +-- test/test_comment.py | 2 +- test/test_session.py | 2 +- test/unified_format.py | 2 +- test/utils.py | 2 +- test/utils_shared.py | 2 +- test/utils_spec_runner.py | 2 +- tools/convert_test_to_async.py | 4 ++-- 22 files changed, 40 insertions(+), 31 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index e98b0d342c..d4726a22a9 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -319,6 +319,15 @@ buildvariants: expansions: PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t tags: [] + - name: free-threaded-win64-python3.14t + tasks: + - name: .free-threading + display_name: Free-threaded Win64 Python3.14t + run_on: + - windows-64-vsMulti-small + expansions: + PYTHON_BINARY: C:/python/Python314/python3.14t.exe + tags: [] # Green framework tests - name: green-eventlet-rhel8 diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index a30beb1d39..c27cfe130f 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -108,14 +108,14 @@ def create_free_threaded_variants() -> list[BuildVariant]: variants = [] for host_name in ("rhel8", "macos", "macos-arm64", "win64"): if host_name == "win64": - # TODO: PYTHON-5027 - continue + python = "3.14t" + else: + python = "3.13t" tasks = [".free-threading"] tags = [] if host_name == "rhel8": tags.append("pr") host = HOSTS[host_name] - python = "3.13t" display_name = get_variant_name("Free-threaded", host, python=python) variant = create_variant(tasks, display_name, tags=tags, python=python, host=host) variants.append(variant) diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 62ea982cd8..e676200d38 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -153,18 +153,17 @@ def get_python_binary(python: str, host: Host) -> str: base = "C:/python/32" else: base = "C:/python" - python = python.replace(".", "") - if python == "313t": - return f"{base}/Python313/python3.13t.exe" - return f"{base}/Python{python}/python.exe" + python_dir = python.replace(".", "").replace("t", "") + return f"{base}/Python{python_dir}/python{python}.exe" if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: return f"/opt/python/{python}/bin/python3" if name in ["macos", "macos-arm64"]: - if python == "3.13t": - return "/Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t" - return f"/Library/Frameworks/Python.Framework/Versions/{python}/bin/python3" + bin_name = "python3t" if "t" in python else "python3" + python_dir = python.replace("t", "") + framework_dir = "PythonT" if "t" in python else "Python" + return f"/Library/Frameworks/{framework_dir}.Framework/Versions/{python_dir}/bin/{bin_name}" raise ValueError(f"no match found for python {python} on {name}") diff --git a/pyproject.toml b/pyproject.toml index fb2dd58131..e7e3161906 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -111,6 +111,9 @@ filterwarnings = [ "module:Wire protocol compression with:UserWarning", "module:GridIn property:DeprecationWarning", "module:GridOut property:DeprecationWarning", + # pytest-asyncio known issue: https://github.com/pytest-dev/pytest-asyncio/issues/1032 + "module:.*WindowsSelectorEventLoopPolicy:DeprecationWarning", + "module:.*et_event_loop_policy:DeprecationWarning", # TODO: Remove as part of PYTHON-3923. "module:unclosed set[str]: for k, v in vars(x).items() if callable(v) and not isinstance(v, classmethod) - and asyncio.iscoroutinefunction(v) + and inspect.iscoroutinefunction(v) and v.__name__[0] != "_" } result = result | methods From 59d94f397bb36202a8292d170b6fd80e5a61abf0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 11:32:04 -0500 Subject: [PATCH 1998/2111] Bump the actions group with 3 updates (#2446) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/test-python.yml | 16 ++++++++-------- .github/workflows/zizmor.yml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index d1934183df..f8588a8cc3 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 + uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 27bf7511d0..b7b8fb5062 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -25,7 +25,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -88,7 +88,7 @@ jobs: - name: Install just uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "3.9" @@ -111,7 +111,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "3.9" @@ -130,7 +130,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "3.9" @@ -152,7 +152,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "${{matrix.python}}" @@ -231,7 +231,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: python-version: '3.9' - name: Start MongoDB @@ -257,7 +257,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@7edac99f961f18b581bbd960d59d049f04c0002f # v5 + uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: python-version: '3.9' - name: Start MongoDB diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 1d58c0d5fb..e45d3e48db 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@1c7106082dbc1753372e3924b7da1b9417011a21 + uses: zizmorcore/zizmor-action@87e33752ad17c7c7fc16fe27c858900c59b18d77 From 9514a6727022d602cfd87bf74ebccd21023252dd Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 31 Jul 2025 08:54:12 -0400 Subject: [PATCH 1999/2111] PYTHON-5441 - Unskip gridfs download chunk tests (#2449) --- test/asynchronous/unified_format.py | 15 ++++++++++----- test/unified_format.py | 15 ++++++++++----- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index b9cebd0fab..964d2df96d 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -519,6 +519,15 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if any( + x in spec["description"] + for x in [ + "First insertOne is never committed", + "Second updateOne is never committed", + "Third updateOne is never committed", + ] + ): + self.skipTest("Implement PYTHON-4597") class_name = self.__class__.__name__.lower() description = spec["description"].lower() @@ -672,7 +681,7 @@ def process_error(self, exception, spec): self.match_evaluator.match_result(expect_result, result) else: self.fail( - f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions" + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions, got {exception}" ) return exception @@ -982,13 +991,9 @@ async def run_entity_operation(self, spec): if ignore and isinstance(exc, (PyMongoError,)): return exc if expect_error: - if method_name == "_collectionOperation_bulkWrite": - self.skipTest("Skipping test pending PYTHON-4598") return self.process_error(exc, expect_error) raise else: - if method_name == "_collectionOperation_bulkWrite": - self.skipTest("Skipping test pending PYTHON-4598") if expect_error: self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') diff --git a/test/unified_format.py b/test/unified_format.py index 2c10506fc0..c21f29fe19 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -518,6 +518,15 @@ def maybe_skip_test(self, spec): self.skipTest("Implement PYTHON-1894") if "timeoutMS applied to entire download" in spec["description"]: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if any( + x in spec["description"] + for x in [ + "First insertOne is never committed", + "Second updateOne is never committed", + "Third updateOne is never committed", + ] + ): + self.skipTest("Implement PYTHON-4597") class_name = self.__class__.__name__.lower() description = spec["description"].lower() @@ -671,7 +680,7 @@ def process_error(self, exception, spec): self.match_evaluator.match_result(expect_result, result) else: self.fail( - f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions" + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions, got {exception}" ) return exception @@ -973,13 +982,9 @@ def run_entity_operation(self, spec): if ignore and isinstance(exc, (PyMongoError,)): return exc if expect_error: - if method_name == "_collectionOperation_bulkWrite": - self.skipTest("Skipping test pending PYTHON-4598") return self.process_error(exc, expect_error) raise else: - if method_name == "_collectionOperation_bulkWrite": - self.skipTest("Skipping test pending PYTHON-4598") if expect_error: self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') From 9f64dad6871bd13e359c56698810e93f5a1173be Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 31 Jul 2025 08:57:00 -0400 Subject: [PATCH 2000/2111] PYTHON-5473 - Better test assertions for booleans (#2450) --- test/asynchronous/test_database.py | 23 +++++++------------- test/asynchronous/test_gridfs_bucket.py | 28 ++++++++++++------------- test/asynchronous/test_monitoring.py | 7 +++---- test/test_database.py | 23 +++++++------------- test/test_gridfs_bucket.py | 28 ++++++++++++------------- test/test_monitoring.py | 7 +++---- 6 files changed, 46 insertions(+), 70 deletions(-) diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index e6f0c6a532..3b77330c0e 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -189,7 +189,7 @@ async def test_list_collection_names(self): filter={"name": {"$regex": r"^(?!system\.)"}} ) for coll in no_system_collections: - self.assertTrue(not coll.startswith("system.")) + self.assertFalse(coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) # Force more than one batch. @@ -265,19 +265,13 @@ async def test_list_collections(self): try: # Found duplicate. coll_cnt[coll] += 1 - self.assertTrue(False) + self.fail("Found duplicate") except KeyError: coll_cnt[coll] = 1 coll_cnt: dict = {} - # Checking if is there any collection which don't exists. - if ( - len(set(colls) - {"test", "test.mike"}) == 0 - or len(set(colls) - {"test", "test.mike", "system.indexes"}) == 0 - ): - self.assertTrue(True) - else: - self.assertTrue(False) + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "test.mike", "system.indexes"}) colls = await (await db.list_collections(filter={"name": {"$regex": "^test$"}})).to_list() self.assertEqual(1, len(colls)) @@ -307,16 +301,13 @@ async def test_list_collections(self): try: # Found duplicate. coll_cnt[coll] += 1 - self.assertTrue(False) + self.fail("Found duplicate") except KeyError: coll_cnt[coll] = 1 coll_cnt = {} - # Checking if is there any collection which don't exists. - if len(set(colls) - {"test"}) == 0 or len(set(colls) - {"test", "system.indexes"}) == 0: - self.assertTrue(True) - else: - self.assertTrue(False) + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "system.indexes"}) await self.client.drop_database("pymongo_test") diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py index 4640507e94..fd9b9883bf 100644 --- a/test/asynchronous/test_gridfs_bucket.py +++ b/test/asynchronous/test_gridfs_bucket.py @@ -164,17 +164,16 @@ async def test_upload_ensures_index(self): await files.drop() await self.fs.upload_from_stream("filename", b"junk") - self.assertTrue( - any( - info.get("key") == [("files_id", 1), ("n", 1)] - for info in (await chunks.index_information()).values() - ) + self.assertIn( + [("files_id", 1), ("n", 1)], + [info.get("key") for info in (await chunks.index_information()).values()], + "Missing required index on chunks collection: {files_id: 1, n: 1}", ) - self.assertTrue( - any( - info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in (await files.index_information()).values() - ) + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (await files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", ) async def test_ensure_index_shell_compat(self): @@ -192,11 +191,10 @@ async def test_ensure_index_shell_compat(self): # No error. await self.fs.upload_from_stream("filename", b"data") - self.assertTrue( - any( - info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in (await files.index_information()).values() - ) + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (await files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", ) await files.drop() diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index a44223a725..9b2a3691eb 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -512,9 +512,8 @@ async def test_kill_cursors(self): self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertTrue( - cursor_id in succeeded.reply["cursorsUnknown"] - or cursor_id in succeeded.reply["cursorsKilled"] + self.assertIn( + cursor_id, succeeded.reply["cursorsUnknown"] + succeeded.reply["cursorsKilled"] ) async def test_non_bulk_writes(self): @@ -1066,7 +1065,7 @@ async def test_write_errors(self): self.assertEqual(2, len(errors)) fields = {"index", "code", "errmsg"} for error in errors: - self.assertTrue(fields.issubset(set(error))) + self.assertLessEqual(fields, set(error)) async def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch diff --git a/test/test_database.py b/test/test_database.py index 56691383b2..c50e09b6e1 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -188,7 +188,7 @@ def test_list_collection_names(self): filter={"name": {"$regex": r"^(?!system\.)"}} ) for coll in no_system_collections: - self.assertTrue(not coll.startswith("system.")) + self.assertFalse(coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) # Force more than one batch. @@ -264,19 +264,13 @@ def test_list_collections(self): try: # Found duplicate. coll_cnt[coll] += 1 - self.assertTrue(False) + self.fail("Found duplicate") except KeyError: coll_cnt[coll] = 1 coll_cnt: dict = {} - # Checking if is there any collection which don't exists. - if ( - len(set(colls) - {"test", "test.mike"}) == 0 - or len(set(colls) - {"test", "test.mike", "system.indexes"}) == 0 - ): - self.assertTrue(True) - else: - self.assertTrue(False) + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "test.mike", "system.indexes"}) colls = (db.list_collections(filter={"name": {"$regex": "^test$"}})).to_list() self.assertEqual(1, len(colls)) @@ -304,16 +298,13 @@ def test_list_collections(self): try: # Found duplicate. coll_cnt[coll] += 1 - self.assertTrue(False) + self.fail("Found duplicate") except KeyError: coll_cnt[coll] = 1 coll_cnt = {} - # Checking if is there any collection which don't exists. - if len(set(colls) - {"test"}) == 0 or len(set(colls) - {"test", "system.indexes"}) == 0: - self.assertTrue(True) - else: - self.assertTrue(False) + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "system.indexes"}) self.client.drop_database("pymongo_test") diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index 33902c9cfc..9dbb082ee9 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -162,17 +162,16 @@ def test_upload_ensures_index(self): files.drop() self.fs.upload_from_stream("filename", b"junk") - self.assertTrue( - any( - info.get("key") == [("files_id", 1), ("n", 1)] - for info in (chunks.index_information()).values() - ) + self.assertIn( + [("files_id", 1), ("n", 1)], + [info.get("key") for info in (chunks.index_information()).values()], + "Missing required index on chunks collection: {files_id: 1, n: 1}", ) - self.assertTrue( - any( - info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in (files.index_information()).values() - ) + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", ) def test_ensure_index_shell_compat(self): @@ -190,11 +189,10 @@ def test_ensure_index_shell_compat(self): # No error. self.fs.upload_from_stream("filename", b"data") - self.assertTrue( - any( - info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in (files.index_information()).values() - ) + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", ) files.drop() diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 8b54793a36..7cb93adf81 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -510,9 +510,8 @@ def test_kill_cursors(self): self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertTrue( - cursor_id in succeeded.reply["cursorsUnknown"] - or cursor_id in succeeded.reply["cursorsKilled"] + self.assertIn( + cursor_id, succeeded.reply["cursorsUnknown"] + succeeded.reply["cursorsKilled"] ) def test_non_bulk_writes(self): @@ -1064,7 +1063,7 @@ def test_write_errors(self): self.assertEqual(2, len(errors)) fields = {"index", "code", "errmsg"} for error in errors: - self.assertTrue(fields.issubset(set(error))) + self.assertLessEqual(fields, set(error)) def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch From bfaab82e2644a9d6867626cff08c1ff7d30517a3 Mon Sep 17 00:00:00 2001 From: "mongodb-drivers-pr-bot[bot]" <147046816+mongodb-drivers-pr-bot[bot]@users.noreply.github.com> Date: Thu, 31 Jul 2025 11:40:02 -0400 Subject: [PATCH 2001/2111] [Spec Resync] 07-28-2025 (#2447) Co-authored-by: Cloud User Co-authored-by: Noah Stapp --- .evergreen/remove-unimplemented-tests.sh | 15 +- .evergreen/scripts/resync-all-specs.py | 6 +- .evergreen/spec-patch/PYTHON-2673.patch | 64 ++ .evergreen/spec-patch/PYTHON-3712.patch | 14 + .evergreen/spec-patch/PYTHON-4261.patch | 61 ++ test/bson_corpus/decimal128-1.json | 24 + .../modifyCollection-pre_and_post_images.json | 111 --- .../timeseries-collection.json | 2 +- .../pre-42-server-connection-id.json | 119 --- .../pre-42-server-connection-id.json | 101 -- .../connection-logging.json | 34 +- ...out-returned-connection-maxConnecting.json | 14 +- .../connection_string/test/valid-options.json | 2 +- .../test/valid-warnings.json | 2 +- .../client-bulkWrite-replaceOne-sort.json | 2 +- .../discovered_standalone.json | 2 +- .../replica_set_with_no_primary.json | 2 +- .../replica_set_with_primary.json | 2 +- .../replica_set_with_removal.json | 2 +- .../sdam_monitoring/required_replica_set.json | 2 +- test/sdam_monitoring/standalone.json | 2 +- ...ne_suppress_equal_description_changes.json | 4 +- test/server_selection_logging/standalone.json | 928 +----------------- 23 files changed, 215 insertions(+), 1300 deletions(-) create mode 100644 .evergreen/spec-patch/PYTHON-2673.patch create mode 100644 .evergreen/spec-patch/PYTHON-3712.patch create mode 100644 .evergreen/spec-patch/PYTHON-4261.patch delete mode 100644 test/collection_management/modifyCollection-pre_and_post_images.json delete mode 100644 test/command_logging/pre-42-server-connection-id.json delete mode 100644 test/command_monitoring/pre-42-server-connection-id.json diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh index d4eaff473e..92685ab2b7 100755 --- a/.evergreen/remove-unimplemented-tests.sh +++ b/.evergreen/remove-unimplemented-tests.sh @@ -41,4 +41,17 @@ rm $PYMONGO/test/crud/unified/updateMany-rawdata.json rm $PYMONGO/test/crud/unified/updateOne-rawdata.json rm $PYMONGO/test/index_management/index-rawdata.json -echo "Done removing unimplemented tests\n" +# PyMongo does not support modifyCollection +rm $PYMONGO/test/collection_management/modifyCollection-*.json + +# PYTHON-5248 - Remove support for MongoDB 4.0 +rm $PYMONGO/test/**/pre-42-*.json + +# PYTHON-3359 - Remove Database and Collection level timeout override +rm $PYMONGO/test/csot/override-collection-timeoutMS.json +rm $PYMONGO/test/csot/override-database-timeoutMS.json + +# PYTHON-2943 - Socks5 Proxy Support +rm $PYMONGO/test/uri_options/proxy-options.json + +echo "Done removing unimplemented tests" diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py index d824211d40..0817e2fc3b 100644 --- a/.evergreen/scripts/resync-all-specs.py +++ b/.evergreen/scripts/resync-all-specs.py @@ -33,7 +33,11 @@ def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: def apply_patches(): print("Beginning to apply patches") # noqa: T201 subprocess.run(["bash", "./.evergreen/remove-unimplemented-tests.sh"], check=True) # noqa: S603, S607 - subprocess.run(["git apply -R --allow-empty ./.evergreen/spec-patch/*"], shell=True, check=True) # noqa: S602, S607 + subprocess.run( + ["git apply -R --allow-empty --whitespace=fix ./.evergreen/spec-patch/*"], # noqa: S607 + shell=True, # noqa: S602 + check=True, + ) def check_new_spec_directories(directory: pathlib.Path) -> list[str]: diff --git a/.evergreen/spec-patch/PYTHON-2673.patch b/.evergreen/spec-patch/PYTHON-2673.patch new file mode 100644 index 0000000000..868538f7b7 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-2673.patch @@ -0,0 +1,64 @@ +diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json +index 43e4fbb4f..4e2a55fd4 100644 +--- a/test/load_balancer/cursors.json ++++ b/test/load_balancer/cursors.json +@@ -376,7 +376,7 @@ + ] + }, + { ++ "description": "pinned connections are not returned after an network error during getMore", +- "description": "pinned connections are returned after an network error during getMore", + "operations": [ + { + "name": "failPoint", +@@ -440,7 +440,7 @@ + "object": "testRunner", + "arguments": { + "client": "client0", ++ "connections": 1 +- "connections": 0 + } + }, + { +@@ -659,7 +659,7 @@ + ] + }, + { ++ "description": "pinned connections are not returned to the pool after a non-network error on getMore", +- "description": "pinned connections are returned to the pool after a non-network error on getMore", + "operations": [ + { + "name": "failPoint", +@@ -715,7 +715,7 @@ + "object": "testRunner", + "arguments": { + "client": "client0", ++ "connections": 1 +- "connections": 0 + } + }, + { +diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json +index 63aabc04d..462fa0aac 100644 +--- a/test/load_balancer/sdam-error-handling.json ++++ b/test/load_balancer/sdam-error-handling.json +@@ -366,6 +366,9 @@ + { + "connectionCreatedEvent": {} + }, ++ { ++ "poolClearedEvent": {} ++ }, + { + "connectionClosedEvent": { + "reason": "error" +@@ -378,9 +375,6 @@ + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } +- }, +- { +- "poolClearedEvent": {} + } + ] + } diff --git a/.evergreen/spec-patch/PYTHON-3712.patch b/.evergreen/spec-patch/PYTHON-3712.patch new file mode 100644 index 0000000000..c746455cd9 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-3712.patch @@ -0,0 +1,14 @@ +diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json +index 4b492f7d8..e44fad1bc 100644 +--- a/test/discovery_and_monitoring/unified/serverMonitoringMode.json ++++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json +@@ -5,8 +5,7 @@ + { + "topologies": [ + "single", ++ "sharded" +- "sharded", +- "sharded-replicaset" + ], + "serverless": "forbid" + } diff --git a/.evergreen/spec-patch/PYTHON-4261.patch b/.evergreen/spec-patch/PYTHON-4261.patch new file mode 100644 index 0000000000..e4ffc5ce9f --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4261.patch @@ -0,0 +1,61 @@ +diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/replica-set.json ++++ b/test/server_selection_logging/replica-set.json +@@ -184,7 +184,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/standalone.json ++++ b/test/server_selection_logging/standalone.json +@@ -191,7 +191,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/sharded.json ++++ b/test/server_selection_logging/sharded.json +@@ -193,7 +193,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/operation-id.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/operation-id.json ++++ b/test/server_selection_logging/operation-id.json +@@ -197,7 +197,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +@@ -383,7 +383,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", diff --git a/test/bson_corpus/decimal128-1.json b/test/bson_corpus/decimal128-1.json index 7eefec6bf7..8e7fbc93c6 100644 --- a/test/bson_corpus/decimal128-1.json +++ b/test/bson_corpus/decimal128-1.json @@ -312,6 +312,30 @@ "canonical_bson": "18000000136400000000000a5bc138938d44c64d31cc3700", "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}" + }, + { + "description": "Clamped zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "Clamped zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "Clamped negative zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "Clamped negative zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" } ] } diff --git a/test/collection_management/modifyCollection-pre_and_post_images.json b/test/collection_management/modifyCollection-pre_and_post_images.json deleted file mode 100644 index 8026faeb17..0000000000 --- a/test/collection_management/modifyCollection-pre_and_post_images.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "description": "modifyCollection-pre_and_post_images", - "schemaVersion": "1.4", - "runOnRequirements": [ - { - "minServerVersion": "6.0", - "serverless": "forbid" - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "papi-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "test" - } - } - ], - "tests": [ - { - "description": "modifyCollection to changeStreamPreAndPostImages enabled", - "operations": [ - { - "name": "dropCollection", - "object": "database0", - "arguments": { - "collection": "test" - } - }, - { - "name": "createCollection", - "object": "database0", - "arguments": { - "collection": "test", - "changeStreamPreAndPostImages": { - "enabled": false - } - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "databaseName": "papi-tests", - "collectionName": "test" - } - }, - { - "name": "modifyCollection", - "object": "database0", - "arguments": { - "collection": "test", - "changeStreamPreAndPostImages": { - "enabled": true - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "drop": "test" - }, - "databaseName": "papi-tests" - } - }, - { - "commandStartedEvent": { - "command": { - "create": "test", - "changeStreamPreAndPostImages": { - "enabled": false - } - } - } - }, - { - "commandStartedEvent": { - "command": { - "collMod": "test", - "changeStreamPreAndPostImages": { - "enabled": true - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json index 8525056fd1..2ee52eac41 100644 --- a/test/collection_management/timeseries-collection.json +++ b/test/collection_management/timeseries-collection.json @@ -255,7 +255,7 @@ "description": "createCollection with bucketing options", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "6.3" } ], "operations": [ diff --git a/test/command_logging/pre-42-server-connection-id.json b/test/command_logging/pre-42-server-connection-id.json deleted file mode 100644 index d5ebd86590..0000000000 --- a/test/command_logging/pre-42-server-connection-id.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "description": "pre-42-server-connection-id", - "schemaVersion": "1.13", - "runOnRequirements": [ - { - "maxServerVersion": "4.0.99" - } - ], - "createEntities": [ - { - "client": { - "id": "client", - "observeLogMessages": { - "command": "debug" - } - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "logging-server-connection-id-tests" - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "logging-tests-collection" - } - } - ], - "initialData": [ - { - "databaseName": "logging-server-connection-id-tests", - "collectionName": "logging-tests-collection", - "documents": [] - } - ], - "tests": [ - { - "description": "command log messages do not include server connection id", - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "$or": true - } - }, - "expectError": { - "isError": true - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "command", - "data": { - "message": "Command started", - "commandName": "insert", - "serverConnectionId": { - "$$exists": false - } - } - }, - { - "level": "debug", - "component": "command", - "data": { - "message": "Command succeeded", - "commandName": "insert", - "serverConnectionId": { - "$$exists": false - } - } - }, - { - "level": "debug", - "component": "command", - "data": { - "message": "Command started", - "commandName": "find", - "serverConnectionId": { - "$$exists": false - } - } - }, - { - "level": "debug", - "component": "command", - "data": { - "message": "Command failed", - "commandName": "find", - "serverConnectionId": { - "$$exists": false - } - } - } - ] - } - ] - } - ] -} diff --git a/test/command_monitoring/pre-42-server-connection-id.json b/test/command_monitoring/pre-42-server-connection-id.json deleted file mode 100644 index 141fbe584f..0000000000 --- a/test/command_monitoring/pre-42-server-connection-id.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "description": "pre-42-server-connection-id", - "schemaVersion": "1.6", - "runOnRequirements": [ - { - "maxServerVersion": "4.0.99" - } - ], - "createEntities": [ - { - "client": { - "id": "client", - "observeEvents": [ - "commandStartedEvent", - "commandSucceededEvent", - "commandFailedEvent" - ] - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "server-connection-id-tests" - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "coll" - } - } - ], - "initialData": [ - { - "databaseName": "server-connection-id-tests", - "collectionName": "coll", - "documents": [] - } - ], - "tests": [ - { - "description": "command events do not include server connection id", - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "x": 1 - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "$or": true - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "insert", - "hasServerConnectionId": false - } - }, - { - "commandSucceededEvent": { - "commandName": "insert", - "hasServerConnectionId": false - } - }, - { - "commandStartedEvent": { - "commandName": "find", - "hasServerConnectionId": false - } - }, - { - "commandFailedEvent": { - "commandName": "find", - "hasServerConnectionId": false - } - } - ] - } - ] - } - ] -} diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index bfbdbe8639..72103b3cab 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -446,6 +446,22 @@ } } }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool cleared", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, { "level": "debug", "component": "connection", @@ -498,26 +514,10 @@ ] } } - }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection pool cleared", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } } ] } ] } ] -} \ No newline at end of file +} diff --git a/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json index 965d56f6d8..10b526e0c3 100644 --- a/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json +++ b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json @@ -23,6 +23,7 @@ } }, "poolOptions": { + "maxConnecting": 2, "maxPoolSize": 10, "waitQueueTimeoutMS": 5000 }, @@ -72,9 +73,8 @@ "connection": "conn0" }, { - "name": "waitForEvent", - "event": "ConnectionCheckedOut", - "count": 4 + "name": "wait", + "ms": 100 } ], "events": [ @@ -104,14 +104,6 @@ "type": "ConnectionCheckedOut", "connectionId": 1, "address": 42 - }, - { - "type": "ConnectionCheckedOut", - "address": 42 - }, - { - "type": "ConnectionCheckedOut", - "address": 42 } ], "ignore": [ diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json index e094bcf606..fce53873a6 100644 --- a/test/connection_string/test/valid-options.json +++ b/test/connection_string/test/valid-options.json @@ -59,4 +59,4 @@ } } ] -} \ No newline at end of file +} diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json index c46a8311c5..e11757eb0e 100644 --- a/test/connection_string/test/valid-warnings.json +++ b/test/connection_string/test/valid-warnings.json @@ -112,4 +112,4 @@ } } ] -} \ No newline at end of file +} diff --git a/test/crud/unified/client-bulkWrite-replaceOne-sort.json b/test/crud/unified/client-bulkWrite-replaceOne-sort.json index b86bc5f942..fc66ec015d 100644 --- a/test/crud/unified/client-bulkWrite-replaceOne-sort.json +++ b/test/crud/unified/client-bulkWrite-replaceOne-sort.json @@ -1,5 +1,5 @@ { - "description": "client bulkWrite updateOne-sort", + "description": "client bulkWrite replaceOne-sort", "schemaVersion": "1.4", "runOnRequirements": [ { diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json index dd8f7fc51e..097203694e 100644 --- a/test/sdam_monitoring/discovered_standalone.json +++ b/test/sdam_monitoring/discovered_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json index 950e32efe1..41d048729d 100644 --- a/test/sdam_monitoring/replica_set_with_no_primary.json +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -19,7 +19,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json index 2ad94d6e6a..3ccc127d1d 100644 --- a/test/sdam_monitoring/replica_set_with_primary.json +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json index ae28faa30c..dc6fbe7e7d 100644 --- a/test/sdam_monitoring/replica_set_with_removal.json +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -69,7 +69,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json index 401c5d99c5..1f4e5c1d71 100644 --- a/test/sdam_monitoring/required_replica_set.json +++ b/test/sdam_monitoring/required_replica_set.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 821a1525d4..f375a383ca 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json index 5958e2d26c..4d046ff8ed 100644 --- a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -21,7 +21,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json index 3152d0bbf3..fa01ad9911 100644 --- a/test/server_selection_logging/standalone.json +++ b/test/server_selection_logging/standalone.json @@ -47,29 +47,9 @@ } } ], - "initialData": [ - { - "collectionName": "server-selection", - "databaseName": "logging-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], "tests": [ { - "description": "A successful insert operation", + "description": "A successful operation", "operations": [ { "name": "waitForEvent", @@ -250,912 +230,6 @@ ] } ] - }, - { - "description": "A successful find operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - } - } - } - - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful findAndModify operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - }, - "replacement": { - "x": 11 - } - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "findAndModify", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "findAndModify", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful find and getMore operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "batchSize": 3 - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "find", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "getMore", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "getMore", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful aggregate operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - } - ] - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "aggregate", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "aggregate", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful count operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "count", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "count", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful distinct operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": {} - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "distinct", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "distinct", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "Successful collection management operations", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "foo" - } - }, - { - "name": "listCollections", - "object": "database" - }, - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "foo" - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "create", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "create", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "listCollections", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "listCollections", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "drop", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "drop", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "Successful index operations", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "keys": { - "x": 1 - }, - "name": "x_1" - } - }, - { - "name": "listIndexes", - "object": "collection" - }, - { - "name": "dropIndex", - "object": "collection", - "arguments": { - "name": "x_1" - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "createIndexes", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "createIndexes", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "listIndexes", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "listIndexes", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "dropIndexes", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "dropIndexes", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful update operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "update", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "update", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] - }, - { - "description": "A successful delete operation", - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": {} - }, - "count": 2 - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "filter": { - "x": 1 - } - } - } - ], - "expectLogMessages": [ - { - "client": "client", - "messages": [ - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection started", - "selector": { - "$$exists": true - }, - "operation": "delete", - "topologyDescription": { - "$$exists": true - } - } - }, - { - "level": "debug", - "component": "serverSelection", - "data": { - "message": "Server selection succeeded", - "selector": { - "$$exists": true - }, - "operation": "delete", - "topologyDescription": { - "$$exists": true - }, - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - } - ] - } - ] } ] } From 0249a08201529346024d191c7cc098365375d5e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 06:36:28 -0500 Subject: [PATCH 2002/2111] Bump mypy from 1.14.1 to 1.17.1 (#2452) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e7e3161906..da2a425c01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,7 +64,7 @@ pymongocrypt_source = [ ] perf = ["simplejson"] typing = [ - "mypy==1.14.1", + "mypy==1.17.1", "pyright==1.1.392.post0", "typing_extensions", "pip" From cbe1b9e81b62f5a63f83694336fcf466126ce830 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 06:37:25 -0500 Subject: [PATCH 2003/2111] Update coverage requirement from <=7.5,>=5 to >=5,<=7.10.2 (#2453) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index da2a425c01..b3e9683ccd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ gevent = ["gevent"] eventlet = ["eventlet"] coverage = [ "pytest-cov", - "coverage>=5,<=7.5" + "coverage>=5,<=7.10.2" ] mockupdb = [ "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" From 003ff56cbcecaa4860e14403b342967f5ee1336f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 06:37:55 -0500 Subject: [PATCH 2004/2111] Bump the actions group with 2 updates (#2454) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/zizmor.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index f8588a8cc3..fd2808ea19 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@4e828ff8d448a8a6e532957b1811f387a63867e8 # v3 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index e45d3e48db..8a2bccf931 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@87e33752ad17c7c7fc16fe27c858900c59b18d77 + uses: zizmorcore/zizmor-action@383d31df2eb66a2f42db98c9654bdc73231f3e3a From baec1e05f7005dc8a5979028ba5aac74256e3a3e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 07:47:28 -0500 Subject: [PATCH 2005/2111] Bump pyright from 1.1.392.post0 to 1.1.403 (#2455) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b3e9683ccd..4753349e77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,7 +65,7 @@ pymongocrypt_source = [ perf = ["simplejson"] typing = [ "mypy==1.17.1", - "pyright==1.1.392.post0", + "pyright==1.1.403", "typing_extensions", "pip" ] From d11cf20452fbf6418f1612607260faf3ae18753c Mon Sep 17 00:00:00 2001 From: Kevin Albertson Date: Tue, 5 Aug 2025 10:05:22 -0400 Subject: [PATCH 2006/2111] Fix In-Use Encryption examples (#2457) --- doc/examples/encryption.rst | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst index 338b177be3..4b3de8d8d0 100644 --- a/doc/examples/encryption.rst +++ b/doc/examples/encryption.rst @@ -614,7 +614,7 @@ An application using Azure credentials would look like, this time using from pymongo.encryption_options import AutoEncryptionOpts # The empty dictionary enables on-demand credentials. - kms_providers = ({"azure": {}},) + kms_providers = {"azure": {}} key_vault_namespace = "keyvault.datakeys" auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) client = MongoClient(auto_encryption_opts=auto_encryption_opts) @@ -647,7 +647,7 @@ as demonstrated by the following example: import os from bson.codec_options import CodecOptions from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption, QueryType + from pymongo.encryption import ClientEncryption from pymongo.encryption_options import AutoEncryptionOpts local_master_key = os.urandom(96) @@ -670,8 +670,6 @@ as demonstrated by the following example: encrypted_fields_map = { "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "ecocCollection": "encryptedCollection.ecoc", "fields": [ { "path": "firstName", @@ -768,8 +766,6 @@ using an ``encrypted_fields`` mapping, as demonstrated by the following example: unindexed_key_id = client_encryption.create_data_key("local") encrypted_fields = { - "escCollection": "enxcol_.default.esc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": indexed_key_id, From d7074ba9eedc8db2efa9e1929d63f3258b6320bd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 5 Aug 2025 13:01:30 -0500 Subject: [PATCH 2007/2111] PYTHON-5454 & PYTHON-5455 Add preliminary python 3.14 support (#2451) --- .evergreen/generated_configs/tasks.yml | 1117 +++++--- .evergreen/generated_configs/variants.yml | 26 +- .evergreen/scripts/generate_config.py | 30 +- .evergreen/scripts/generate_config_utils.py | 2 +- doc/changelog.rst | 5 + pyproject.toml | 3 +- test/asynchronous/test_cursor.py | 7 +- test/test_cursor.py | 7 +- uv.lock | 2589 +++++++++++-------- 9 files changed, 2184 insertions(+), 1602 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 79594da774..65813db1cf 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -74,7 +74,7 @@ tasks: SUB_TEST_NAME: session-creds PYTHON_VERSION: "3.13" tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-rapid-web-identity-python3.9 + - name: test-auth-aws-rapid-web-identity-python3.14 commands: - func: run server vars: @@ -85,9 +85,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-rapid-web-identity-session-name-python3.9 + - name: test-auth-aws-rapid-web-identity-session-name-python3.14 commands: - func: run server vars: @@ -99,9 +99,9 @@ tasks: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity AWS_ROLE_SESSION_NAME: test - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-latest-ecs-python3.10 + - name: test-auth-aws-latest-ecs-python3.9 commands: - func: run server vars: @@ -112,7 +112,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ecs - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: [auth-aws, auth-aws-ecs] # Backport pr tests @@ -269,6 +269,18 @@ tasks: SUB_TEST_NAME: standalone PYTHON_VERSION: "3.13" tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.14 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.14" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.14" + tags: [mod_wsgi, pr] # No orchestration tests - name: test-no-orchestration-python3.9 @@ -278,13 +290,13 @@ tasks: vars: PYTHON_VERSION: "3.9" tags: [test-no-orchestration, python-3.9] - - name: test-no-orchestration-python3.13 + - name: test-no-orchestration-python3.14 commands: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: "3.13" - tags: [test-no-orchestration, python-3.13] + PYTHON_VERSION: "3.14" + tags: [test-no-orchestration, python-3.14] - name: test-no-orchestration-pypy3.10 commands: - func: assume ec2 role @@ -398,14 +410,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.9 @@ -468,14 +480,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.9 @@ -538,14 +550,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.9 @@ -608,14 +620,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.9 @@ -678,14 +690,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-soft-fail-latest-python3.13 + - name: test-ocsp-ecdsa-soft-fail-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.9 @@ -772,14 +784,14 @@ tasks: - ocsp-ecdsa - rapid - ocsp-staple - - name: test-ocsp-ecdsa-valid-cert-server-staples-latest-python3.13 + - name: test-ocsp-ecdsa-valid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -870,14 +882,14 @@ tasks: - ocsp-ecdsa - rapid - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-latest-python3.13 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -968,14 +980,14 @@ tasks: - ocsp-ecdsa - rapid - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-latest-python3.13 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1066,14 +1078,14 @@ tasks: - ocsp-ecdsa - rapid - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-latest-python3.13 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1140,14 +1152,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 @@ -1210,14 +1222,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.9 @@ -1280,14 +1292,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.13 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.9 @@ -1350,14 +1362,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.9 @@ -1420,14 +1432,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.9 @@ -1490,14 +1502,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.9 @@ -1560,14 +1572,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-soft-fail-v4.4-python3.9 @@ -1630,14 +1642,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-soft-fail-latest-python3.13 + - name: test-ocsp-rsa-soft-fail-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.9 @@ -1724,14 +1736,14 @@ tasks: - ocsp-rsa - rapid - ocsp-staple - - name: test-ocsp-rsa-valid-cert-server-staples-latest-python3.13 + - name: test-ocsp-rsa-valid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1822,14 +1834,14 @@ tasks: - ocsp-rsa - rapid - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-latest-python3.13 + - name: test-ocsp-rsa-invalid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1920,14 +1932,14 @@ tasks: - ocsp-rsa - rapid - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-latest-python3.13 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -2018,14 +2030,14 @@ tasks: - ocsp-rsa - rapid - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-latest-python3.13 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -2092,14 +2104,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 @@ -2162,14 +2174,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.9 @@ -2232,14 +2244,14 @@ tasks: PYTHON_VERSION: "3.9" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.13 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] @@ -2459,48 +2471,48 @@ tasks: - python-3.13 - standalone-noauth-ssl - sync - - name: test-server-version-pypy3.10-async-noauth-ssl-standalone + - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.14 - standalone-noauth-ssl - async - - name: test-server-version-python3.9-sync-noauth-nossl-standalone-cov + - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-pypy3.10 - standalone-noauth-nossl - sync - pr - - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov + - name: test-server-version-python3.9-async-noauth-nossl-standalone-cov commands: - func: run server vars: @@ -2514,15 +2526,15 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-3.9 - standalone-noauth-nossl - async - pr - - name: test-server-version-python3.11-sync-auth-ssl-replica-set-cov + - name: test-server-version-python3.10-sync-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2536,14 +2548,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.11 + - python-3.10 - replica_set-auth-ssl - sync - - name: test-server-version-python3.12-async-auth-ssl-replica-set-cov + - name: test-server-version-python3.11-async-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2557,14 +2569,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.12 + - python-3.11 - replica_set-auth-ssl - async - - name: test-server-version-python3.13-sync-auth-nossl-replica-set-cov + - name: test-server-version-python3.12-sync-auth-nossl-replica-set-cov commands: - func: run server vars: @@ -2578,33 +2590,35 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-3.13 + - python-3.12 - replica_set-auth-nossl - sync - - name: test-server-version-pypy3.10-async-auth-nossl-replica-set + - name: test-server-version-python3.13-async-auth-nossl-replica-set-cov commands: - func: run server vars: AUTH: auth SSL: nossl TOPOLOGY: replica_set + COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: nossl TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.13 - replica_set-auth-nossl - async - - name: test-server-version-python3.9-sync-noauth-ssl-replica-set-cov + - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov commands: - func: run server vars: @@ -2618,35 +2632,33 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-3.14 - replica_set-noauth-ssl - sync - - name: test-server-version-python3.10-async-noauth-ssl-replica-set-cov + - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-pypy3.10 - replica_set-noauth-ssl - async - - name: test-server-version-python3.11-sync-noauth-nossl-replica-set-cov + - name: test-server-version-python3.9-sync-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2660,15 +2672,15 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - server-version - - python-3.11 + - python-3.9 - replica_set-noauth-nossl - sync - pr - - name: test-server-version-python3.12-async-noauth-nossl-replica-set-cov + - name: test-server-version-python3.10-async-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2682,15 +2694,15 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.12 + - python-3.10 - replica_set-noauth-nossl - async - pr - - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2704,35 +2716,37 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version - - python-3.13 + - python-3.11 - sharded_cluster-auth-ssl - sync - pr - - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.12 - sharded_cluster-auth-ssl - async - pr - - name: test-server-version-python3.9-sync-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.13-sync-auth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2746,14 +2760,14 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-3.13 - sharded_cluster-auth-nossl - sync - - name: test-server-version-python3.10-async-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2767,35 +2781,33 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-3.14 - sharded_cluster-auth-nossl - async - - name: test-server-version-python3.11-sync-noauth-ssl-sharded-cluster-cov + - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - server-version - - python-3.11 + - python-pypy3.10 - sharded_cluster-noauth-ssl - sync - - name: test-server-version-python3.12-async-noauth-ssl-sharded-cluster-cov + - name: test-server-version-python3.9-async-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2809,14 +2821,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - server-version - - python-3.12 + - python-3.9 - sharded_cluster-noauth-ssl - async - - name: test-server-version-python3.13-sync-noauth-nossl-sharded-cluster-cov + - name: test-server-version-python3.10-sync-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2830,30 +2842,32 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.13 + - python-3.10 - sharded_cluster-noauth-nossl - sync - - name: test-server-version-pypy3.10-async-noauth-nossl-sharded-cluster + - name: test-server-version-python3.11-async-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.11 - sharded_cluster-noauth-nossl - async - name: test-server-version-python3.9-sync-auth-ssl-sharded-cluster-cov @@ -2940,7 +2954,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - async - - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2955,13 +2969,13 @@ tasks: TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.11" - TEST_NAME: default_sync + TEST_NAME: default_async tags: - server-version - python-3.11 - sharded_cluster-auth-ssl - - sync - - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov + - async + - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2975,14 +2989,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" - TEST_NAME: default_async + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync tags: - server-version - - python-3.11 + - python-3.12 - sharded_cluster-auth-ssl - - async - - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov + - sync + - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2996,14 +3010,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - server-version - - python-3.12 + - python-3.13 - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -3017,14 +3031,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-3.12 + - python-3.13 - sharded_cluster-auth-ssl - async - - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.14-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -3038,11 +3052,32 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.14-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-3.13 + - python-3.14 - sharded_cluster-auth-ssl - async - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster @@ -3064,31 +3099,50 @@ tasks: - python-pypy3.10 - sharded_cluster-auth-ssl - sync + - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async # Standard tests - - name: test-standard-v4.2-python3.9-sync-noauth-nossl-standalone + - name: test-standard-v4.2-python3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth - SSL: nossl - TOPOLOGY: standalone + SSL: ssl + TOPOLOGY: replica_set VERSION: "4.2" - func: run tests vars: AUTH: noauth - SSL: nossl - TOPOLOGY: standalone + SSL: ssl + TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.9 - - standalone-noauth-nossl + - python-3.10 + - replica_set-noauth-ssl - sync - - name: test-standard-v4.2-python3.10-async-noauth-ssl-replica-set + - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3102,14 +3156,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.10" - TEST_NAME: default_async + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.10 + - python-3.14 - replica_set-noauth-ssl - - async + - sync - name: test-standard-v4.2-python3.11-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -3132,51 +3186,118 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - sync - - name: test-standard-v4.4-python3.12-async-noauth-nossl-standalone + - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.4" + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" - func: run tests vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.4" - PYTHON_VERSION: "3.12" - TEST_NAME: default_async + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - test-standard - - server-4.4 - - python-3.12 - - standalone-noauth-nossl - - async - - name: test-standard-v4.4-python3.13-sync-noauth-ssl-replica-set + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + - pypy + - name: test-standard-v4.2-python3.13-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: "4.4" + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" - func: run tests vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: "4.4" + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - - server-4.4 + - server-4.2 - python-3.13 - - replica_set-noauth-ssl + - standalone-noauth-nossl - sync - - name: test-standard-v4.4-python3.9-async-auth-ssl-sharded-cluster + - name: test-standard-v4.2-python3.9-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.9 + - standalone-noauth-nossl + - sync + - name: test-standard-v4.4-python3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.10 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.14 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3190,37 +3311,104 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.9 + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-pypy3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v5.0-python3.10-sync-noauth-nossl-standalone + - pypy + - name: test-standard-v4.4-python3.13-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "4.4" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.13 + - standalone-noauth-nossl + - async + - name: test-standard-v4.4-python3.9-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.9" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.9 + - standalone-noauth-nossl + - async + - name: test-standard-v5.0-python3.13-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.10" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.10 - - standalone-noauth-nossl + - python-3.13 + - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.11-async-noauth-ssl-replica-set + - name: test-standard-v5.0-python3.9-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3234,15 +3422,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.11" - TEST_NAME: default_async + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.11 + - python-3.9 - replica_set-noauth-ssl - - async - - name: test-standard-v5.0-python3.12-sync-auth-ssl-sharded-cluster + - sync + - name: test-standard-v5.0-python3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3256,27 +3444,71 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.12 + - python-3.10 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.14 - sharded_cluster-auth-ssl - sync - - name: test-standard-v6.0-python3.13-async-noauth-nossl-standalone + - name: test-standard-v5.0-python3.12-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.12 + - standalone-noauth-nossl + - sync + - name: test-standard-v6.0-python3.13-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set VERSION: "6.0" PYTHON_VERSION: "3.13" TEST_NAME: default_async @@ -3284,9 +3516,9 @@ tasks: - test-standard - server-6.0 - python-3.13 - - standalone-noauth-nossl + - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.9-sync-noauth-ssl-replica-set + - name: test-standard-v6.0-python3.9-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3301,13 +3533,13 @@ tasks: TOPOLOGY: replica_set VERSION: "6.0" PYTHON_VERSION: "3.9" - TEST_NAME: default_sync + TEST_NAME: default_async tags: - test-standard - server-6.0 - python-3.9 - replica_set-noauth-ssl - - sync + - async - name: test-standard-v6.0-python3.10-async-auth-ssl-sharded-cluster commands: - func: run server @@ -3330,29 +3562,51 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v7.0-python3.11-sync-noauth-nossl-standalone + - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.14 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v6.0-python3.12-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync + VERSION: "6.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async tags: - test-standard - - server-7.0 - - python-3.11 + - server-6.0 + - python-3.12 - standalone-noauth-nossl - - sync - - name: test-standard-v7.0-python3.12-async-noauth-ssl-replica-set + - async + - name: test-standard-v7.0-python3.12-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3367,13 +3621,13 @@ tasks: TOPOLOGY: replica_set VERSION: "7.0" PYTHON_VERSION: "3.12" - TEST_NAME: default_async + TEST_NAME: default_sync tags: - test-standard - server-7.0 - python-3.12 - replica_set-noauth-ssl - - async + - sync - name: test-standard-v7.0-python3.13-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -3396,29 +3650,74 @@ tasks: - python-3.13 - sharded_cluster-auth-ssl - sync - - name: test-standard-v8.0-python3.9-async-noauth-nossl-standalone + - name: test-standard-v7.0-python3.9-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.9 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v7.0-python3.11-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" - PYTHON_VERSION: "3.9" - TEST_NAME: default_async + VERSION: "7.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync tags: - test-standard - - server-8.0 - - python-3.9 + - server-7.0 + - python-3.11 - standalone-noauth-nossl - - async - - name: test-standard-v8.0-python3.10-sync-noauth-ssl-replica-set + - sync + - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-pypy3.10 + - standalone-noauth-nossl + - sync + - pypy + - name: test-standard-v8.0-python3.12-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3432,15 +3731,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.10" - TEST_NAME: default_sync + PYTHON_VERSION: "3.12" + TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.10 + - python-3.12 - replica_set-noauth-ssl - - sync - - name: test-standard-v8.0-python3.11-async-auth-ssl-sharded-cluster + - async + - name: test-standard-v8.0-python3.13-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3454,104 +3753,105 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.11 + - python-3.13 - sharded_cluster-auth-ssl - async - - name: test-standard-rapid-python3.12-sync-noauth-nossl-standalone + - name: test-standard-v8.0-python3.9-async-auth-ssl-sharded-cluster commands: - func: run server vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: rapid + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" - func: run tests vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: rapid - PYTHON_VERSION: "3.12" - TEST_NAME: default_sync + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.9" + TEST_NAME: default_async tags: - test-standard - - server-rapid - - python-3.12 - - standalone-noauth-nossl - - sync - - name: test-standard-rapid-python3.13-async-noauth-ssl-replica-set + - server-8.0 + - python-3.9 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v8.0-python3.11-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" - func: run tests vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - PYTHON_VERSION: "3.13" + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - - server-rapid - - python-3.13 - - replica_set-noauth-ssl + - server-8.0 + - python-3.11 + - standalone-noauth-nossl - async - - name: test-standard-rapid-python3.9-sync-auth-ssl-sharded-cluster + - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone commands: - func: run server vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: rapid + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" - func: run tests vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: rapid - PYTHON_VERSION: "3.9" - TEST_NAME: default_sync + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async tags: - test-standard - - server-rapid - - python-3.9 - - sharded_cluster-auth-ssl - - sync - - name: test-standard-latest-python3.10-async-noauth-nossl-standalone + - server-8.0 + - python-pypy3.10 + - standalone-noauth-nossl + - async + - pypy + - name: test-standard-latest-python3.11-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth - SSL: nossl - TOPOLOGY: standalone + SSL: ssl + TOPOLOGY: replica_set VERSION: latest - func: run tests vars: AUTH: noauth - SSL: nossl - TOPOLOGY: standalone + SSL: ssl + TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.10 - - standalone-noauth-nossl + - python-3.11 + - replica_set-noauth-ssl - async - pr - - name: test-standard-latest-python3.11-sync-noauth-ssl-replica-set + - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3565,15 +3865,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.11 + - python-pypy3.10 - replica_set-noauth-ssl - - sync - - pr + - async + - pypy - name: test-standard-latest-python3.12-async-auth-ssl-sharded-cluster commands: - func: run server @@ -3597,145 +3897,120 @@ tasks: - sharded_cluster-auth-ssl - async - pr - - name: test-standard-v4.2-pypy3.10-sync-noauth-nossl-standalone + - name: test-standard-latest-python3.10-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.2" + VERSION: latest - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.2" - PYTHON_VERSION: pypy3.10 - TEST_NAME: default_sync + VERSION: latest + PYTHON_VERSION: "3.10" + TEST_NAME: default_async tags: - test-standard - - server-4.2 - - python-pypy3.10 + - server-latest + - python-3.10 - standalone-noauth-nossl - - sync - - pypy - - name: test-standard-v4.4-pypy3.10-async-noauth-ssl-replica-set + - async + - pr + - name: test-standard-latest-python3.14-async-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: "4.4" + SSL: nossl + TOPOLOGY: standalone + VERSION: latest - func: run tests vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - - server-4.4 - - python-pypy3.10 - - replica_set-noauth-ssl + - server-latest + - python-3.14 + - standalone-noauth-nossl - async - - pypy - - name: test-standard-v5.0-pypy3.10-sync-auth-ssl-sharded-cluster + - pr + - name: test-standard-rapid-python3.11-sync-noauth-ssl-replica-set commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "5.0" + TOPOLOGY: replica_set + VERSION: rapid - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard - - server-5.0 - - python-pypy3.10 - - sharded_cluster-auth-ssl + - server-rapid + - python-3.11 + - replica_set-noauth-ssl - sync - - pypy - - name: test-standard-v6.0-pypy3.10-async-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "6.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "6.0" - PYTHON_VERSION: pypy3.10 - TEST_NAME: default_async - tags: - - test-standard - - server-6.0 - - python-pypy3.10 - - standalone-noauth-nossl - - async - - pypy - - name: test-standard-v7.0-pypy3.10-sync-noauth-ssl-replica-set + - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" + VERSION: rapid PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - - server-7.0 + - server-rapid - python-pypy3.10 - replica_set-noauth-ssl - sync - pypy - - name: test-standard-v8.0-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-standard-rapid-python3.12-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" - PYTHON_VERSION: pypy3.10 - TEST_NAME: default_async + VERSION: rapid + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync tags: - test-standard - - server-8.0 - - python-pypy3.10 + - server-rapid + - python-3.12 - sharded_cluster-auth-ssl - - async - - pypy - - name: test-standard-rapid-pypy3.10-sync-noauth-nossl-standalone + - sync + - name: test-standard-rapid-python3.10-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3749,38 +4024,36 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-pypy3.10 + - python-3.10 - standalone-noauth-nossl - sync - - pypy - - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set + - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: latest + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid - func: run tests vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: latest - PYTHON_VERSION: pypy3.10 - TEST_NAME: default_async + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync tags: - test-standard - - server-latest - - python-pypy3.10 - - replica_set-noauth-ssl - - async - - pypy + - server-rapid + - python-3.14 + - standalone-noauth-nossl + - sync # Test non standard tests - name: test-non-standard-v4.2-python3.9-noauth-nossl-standalone @@ -3888,7 +4161,7 @@ tasks: - python-3.13 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v4.4-python3.9-auth-ssl-sharded-cluster + - name: test-non-standard-v4.4-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3902,14 +4175,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-4.4 - - python-3.9 + - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone + - name: test-non-standard-v5.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -3923,14 +4196,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-5.0 - - python-3.10 + - python-3.9 - standalone-noauth-nossl - noauth - - name: test-non-standard-v5.0-python3.11-noauth-ssl-replica-set + - name: test-non-standard-v5.0-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -3944,14 +4217,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-5.0 - - python-3.11 + - python-3.10 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v5.0-python3.12-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3965,14 +4238,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-5.0 - - python-3.12 + - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v6.0-python3.13-noauth-nossl-standalone + - name: test-non-standard-v6.0-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -3986,14 +4259,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-6.0 - - python-3.13 + - python-3.12 - standalone-noauth-nossl - noauth - - name: test-non-standard-v6.0-python3.9-noauth-ssl-replica-set + - name: test-non-standard-v6.0-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -4007,14 +4280,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-6.0 - - python-3.9 + - python-3.13 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v6.0-python3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v6.0-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4028,14 +4301,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-6.0 - - python-3.10 + - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v7.0-python3.11-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -4049,14 +4322,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-7.0 - - python-3.11 + - python-3.9 - standalone-noauth-nossl - noauth - - name: test-non-standard-v7.0-python3.12-noauth-ssl-replica-set + - name: test-non-standard-v7.0-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -4070,14 +4343,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-7.0 - - python-3.12 + - python-3.10 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v7.0-python3.13-auth-ssl-sharded-cluster + - name: test-non-standard-v7.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4091,14 +4364,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-7.0 - - python-3.13 + - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v8.0-python3.9-noauth-nossl-standalone + - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -4112,14 +4385,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-8.0 - - python-3.9 + - python-3.12 - standalone-noauth-nossl - noauth - - name: test-non-standard-v8.0-python3.10-noauth-ssl-replica-set + - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -4133,14 +4406,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-8.0 - - python-3.10 + - python-3.13 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v8.0-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4154,14 +4427,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-8.0 - - python-3.11 + - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-rapid-python3.12-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -4175,14 +4448,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-rapid - - python-3.12 + - python-3.9 - standalone-noauth-nossl - noauth - - name: test-non-standard-rapid-python3.13-noauth-ssl-replica-set + - name: test-non-standard-rapid-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -4196,14 +4469,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-rapid - - python-3.13 + - python-3.10 - replica_set-noauth-ssl - noauth - - name: test-non-standard-rapid-python3.9-auth-ssl-sharded-cluster + - name: test-non-standard-rapid-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4217,14 +4490,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-rapid - - python-3.9 + - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-latest-python3.10-noauth-nossl-standalone + - name: test-non-standard-latest-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -4238,15 +4511,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-latest - - python-3.10 + - python-3.12 - standalone-noauth-nossl - noauth - pr - - name: test-non-standard-latest-python3.11-noauth-ssl-replica-set + - name: test-non-standard-latest-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -4260,15 +4533,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-latest - - python-3.11 + - python-3.13 - replica_set-noauth-ssl - noauth - pr - - name: test-non-standard-latest-python3.12-auth-ssl-sharded-cluster + - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4282,11 +4555,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-latest - - python-3.12 + - python-3.14 - sharded_cluster-auth-ssl - auth - pr diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index d4726a22a9..4fb5a36250 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -292,32 +292,32 @@ buildvariants: AUTH: auth # Free threaded tests - - name: free-threaded-rhel8-python3.13t + - name: free-threaded-rhel8-python3.14t tasks: - name: .free-threading - display_name: Free-threaded RHEL8 Python3.13t + display_name: Free-threaded RHEL8 Python3.14t run_on: - rhel87-small expansions: - PYTHON_BINARY: /opt/python/3.13t/bin/python3 + PYTHON_BINARY: /opt/python/3.14t/bin/python3 tags: [pr] - - name: free-threaded-macos-python3.13t + - name: free-threaded-macos-python3.14t tasks: - name: .free-threading - display_name: Free-threaded macOS Python3.13t + display_name: Free-threaded macOS Python3.14t run_on: - macos-14 expansions: - PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t + PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.14/bin/python3t tags: [] - - name: free-threaded-macos-arm64-python3.13t + - name: free-threaded-macos-arm64-python3.14t tasks: - name: .free-threading - display_name: Free-threaded macOS Arm64 Python3.13t + display_name: Free-threaded macOS Arm64 Python3.14t run_on: - macos-14-arm64 expansions: - PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.13/bin/python3t + PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.14/bin/python3t tags: [] - name: free-threaded-win64-python3.14t tasks: @@ -332,24 +332,20 @@ buildvariants: # Green framework tests - name: green-eventlet-rhel8 tasks: - - name: .test-standard .standalone-noauth-nossl .python-3.9 + - name: .test-standard .standalone-noauth-nossl .python-3.9 .sync display_name: Green Eventlet RHEL8 run_on: - rhel87-small expansions: GREEN_FRAMEWORK: eventlet - AUTH: auth - SSL: ssl - name: green-gevent-rhel8 tasks: - - name: .test-standard .standalone-noauth-nossl + - name: .test-standard .standalone-noauth-nossl .sync display_name: Green Gevent RHEL8 run_on: - rhel87-small expansions: GREEN_FRAMEWORK: gevent - AUTH: auth - SSL: ssl # Import time tests - name: import-time diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index c27cfe130f..2d160152f7 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -107,10 +107,7 @@ def create_standard_nonlinux_variants() -> list[BuildVariant]: def create_free_threaded_variants() -> list[BuildVariant]: variants = [] for host_name in ("rhel8", "macos", "macos-arm64", "win64"): - if host_name == "win64": - python = "3.14t" - else: - python = "3.13t" + python = "3.14t" tasks = [".free-threading"] tags = [] if host_name == "rhel8": @@ -300,12 +297,12 @@ def create_green_framework_variants(): variants = [] host = DEFAULT_HOST for framework in ["eventlet", "gevent"]: - tasks = [".test-standard .standalone-noauth-nossl"] + tasks = [".test-standard .standalone-noauth-nossl .sync"] if framework == "eventlet": # Eventlet has issues with dnspython > 2.0 and newer versions of CPython # https://jira.mongodb.org/browse/PYTHON-5284 - tasks = [".test-standard .standalone-noauth-nossl .python-3.9"] - expansions = dict(GREEN_FRAMEWORK=framework, AUTH="auth", SSL="ssl") + tasks = [".test-standard .standalone-noauth-nossl .python-3.9 .sync"] + expansions = dict(GREEN_FRAMEWORK=framework) display_name = get_variant_name(f"Green {framework.capitalize()}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) @@ -636,20 +633,15 @@ def create_test_non_standard_tasks(): def create_standard_tasks(): """For variants that do not set a TEST_NAME.""" tasks = [] - task_combos = [] - # For each version and topology, rotate through the CPythons and sync/async. - for (version, topology), python, sync in zip_cycle( - list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS, SYNCS - ): - pr = version == "latest" - task_combos.append((version, topology, python, sync, pr)) - # For each PyPy and topology, rotate through the the versions and sync/async. - for (python, topology), version, sync in zip_cycle( - list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS, SYNCS + task_combos = set() + # For each python and topology and sync/async, rotate through the the versions. + for (python, topology, sync), version in zip_cycle( + list(product(CPYTHONS + PYPYS, TOPOLOGIES, SYNCS)), ALL_VERSIONS ): - task_combos.append((version, topology, python, sync, False)) + pr = version == "latest" and python not in PYPYS + task_combos.add((version, topology, python, sync, pr)) - for version, topology, python, sync, pr in task_combos: + for version, topology, python, sync, pr in sorted(task_combos): auth, ssl = get_standard_auth_ssl(topology) tags = [ "test-standard", diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index e676200d38..632d34ea6f 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -22,7 +22,7 @@ ############## ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] -CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13"] +CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"] PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] diff --git a/doc/changelog.rst b/doc/changelog.rst index 2e56b2c019..d88b114fc6 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -5,6 +5,11 @@ Changes in Version 4.14.0 (XXXX/XX/XX) -------------------------------------- PyMongo 4.14 brings a number of changes including: +- Added preliminary support for Python 3.14 and 3.14 with free-threading. We do not yet support the following with Python 3.14: + - Subinterpreters (``concurrent.interpreters``) + - Free-threading with Encryption + - mod_wsgi +- Removed experimental support for free-threading support in Python 3.13. - Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. - Added :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and diff --git a/pyproject.toml b/pyproject.toml index 4753349e77..a877301226 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,7 +50,8 @@ dev = [ "pre-commit>=4.0" ] pip = ["pip"] -gevent = ["gevent"] +# TODO: PYTHON-5464 +gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] eventlet = ["eventlet"] coverage = [ "pytest-cov", diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 53b3289fb8..e7da40fa19 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -369,7 +369,12 @@ async def test_explain_csot(self): client = await self.async_rs_or_single_client(event_listeners=[listener]) # Create a collection, referred to as collection, with the namespace explain-test.collection. - collection = client["explain-test"]["collection"] + # Workaround for SERVER-108463 + names = await client["explain-test"].list_collection_names() + if "collection" not in names: + collection = await client["explain-test"].create_collection("collection") + else: + collection = client["explain-test"]["collection"] # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. with pymongo.timeout(2.0): diff --git a/test/test_cursor.py b/test/test_cursor.py index d0bd48e747..9a4fb86e93 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -361,7 +361,12 @@ def test_explain_csot(self): client = self.rs_or_single_client(event_listeners=[listener]) # Create a collection, referred to as collection, with the namespace explain-test.collection. - collection = client["explain-test"]["collection"] + # Workaround for SERVER-108463 + names = client["explain-test"].list_collection_names() + if "collection" not in names: + collection = client["explain-test"].create_collection("collection") + else: + collection = client["explain-test"]["collection"] # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. with pymongo.timeout(2.0): diff --git a/uv.lock b/uv.lock index aa23663a84..9c45c4cdb9 100644 --- a/uv.lock +++ b/uv.lock @@ -1,11 +1,25 @@ version = 1 -revision = 1 +revision = 2 requires-python = ">=3.9" resolution-markers = [ - "python_full_version >= '3.10'", + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", "python_full_version < '3.10'", ] +[[package]] +name = "accessible-pygments" +version = "0.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/c1/bbac6a50d02774f91572938964c582fff4270eee73ab822a4aeea4d8b11b/accessible_pygments-0.0.5.tar.gz", hash = "sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872", size = 1377899, upload-time = "2024-05-10T11:23:10.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/3f/95338030883d8c8b91223b4e21744b04d11b161a3ef117295d8241f50ab4/accessible_pygments-0.0.5-py3-none-any.whl", hash = "sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7", size = 1395903, upload-time = "2024-05-10T11:23:08.421Z" }, +] + [[package]] name = "alabaster" version = "0.7.16" @@ -13,9 +27,9 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776 } +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776, upload-time = "2024-01-10T00:56:10.189Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511 }, + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511, upload-time = "2024-01-10T00:56:08.388Z" }, ] [[package]] @@ -23,16 +37,18 @@ name = "alabaster" version = "1.0.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version >= '3.10'", + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", ] -sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210 } +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210, upload-time = "2024-07-26T18:15:03.762Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929 }, + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, ] [[package]] name = "anyio" -version = "4.8.0" +version = "4.9.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, @@ -40,299 +56,431 @@ dependencies = [ { name = "sniffio" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126 } +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041 }, + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, ] [[package]] name = "attrs" -version = "24.3.0" +version = "25.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/48/c8/6260f8ccc11f0917360fc0da435c5c9c7504e3db174d5a12a1494887b045/attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff", size = 805984 } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/aa/ab0f7891a01eeb2d2e338ae8fecbe57fcebea1a24dbb64d45801bfab481d/attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308", size = 63397 }, + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, ] [[package]] name = "babel" -version = "2.16.0" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } +sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, + { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, ] [[package]] name = "beautifulsoup4" -version = "4.12.3" +version = "4.13.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "soupsieve" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b3/ca/824b1195773ce6166d388573fc106ce56d4a805bd7427b624e063596ec58/beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", size = 581181 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067, upload-time = "2025-04-15T17:05:13.836Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, + { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285, upload-time = "2025-04-15T17:05:12.221Z" }, ] [[package]] name = "boto3" -version = "1.36.2" +version = "1.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3a/e9/c0b2fa75efc4007ea1af21bc2fcbedf6e545c517fb90904d7f59850e02bf/boto3-1.36.2.tar.gz", hash = "sha256:fde1c29996b77274a60b7bc9f741525afa6267bb1716eb644a764fb7c124a0d2", size = 110998 } +sdist = { url = "https://files.pythonhosted.org/packages/7b/34/298ef2023d7d88069776c9cc26b42ba6f05d143a1c9b44a0f65cd795c65b/boto3-1.40.0.tar.gz", hash = "sha256:fc1b3ca3baf3d8820c6faddf47cbba8ad3cd16f8e8d7e2f76d304bf995932eb7", size = 111847, upload-time = "2025-07-31T19:21:06.735Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/17/c2/72a92794237b43f64141e156bc3a58bc36d18631f1a614e1e97a48b56447/boto3-1.36.2-py3-none-any.whl", hash = "sha256:76cfc9a705be46e8d22607efacc8d688c064f923d785a01c00b28e9a96425d1a", size = 139166 }, + { url = "https://files.pythonhosted.org/packages/5d/44/158581021038c5fc886ffa27fa4731fb4939258da7a23e0bc70b2d5757c9/boto3-1.40.0-py3-none-any.whl", hash = "sha256:959443055d2af676c336cc6033b3f870a8a924384b70d0b2905081d649378179", size = 139882, upload-time = "2025-07-31T19:21:04.65Z" }, ] [[package]] name = "botocore" -version = "1.36.2" +version = "1.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "urllib3", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "urllib3", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c6/93/353b70cea6447e37789fc2d6f761fc12ae36fb4adb6f558055de8cdf655f/botocore-1.36.2.tar.gz", hash = "sha256:a1fe6610983f0214b0c7655fe6990b6a731746baf305b182976fc7b568fc3cb0", size = 13505440 } +sdist = { url = "https://files.pythonhosted.org/packages/8f/e7/770ce910457ac6c68ea79b83892ab7a7cb08528f5d1dd77e51bf02a8529e/botocore-1.40.0.tar.gz", hash = "sha256:850242560dc8e74d542045a81eb6cc15f1b730b4ba55ba5b30e6d686548dfcaf", size = 14262316, upload-time = "2025-07-31T19:20:56.662Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/fe/c066e8cb069027c12dbcf9066a7a4f3e9d2a31b10c7b174a8455ef1d0f46/botocore-1.36.2-py3-none-any.whl", hash = "sha256:bc3b7e3b573a48af2bd7116b80fe24f9a335b0b67314dcb2697a327d009abf29", size = 13302324 }, + { url = "https://files.pythonhosted.org/packages/38/5a/bebc53f022514412613615b09aef20fbe804abb3ea26ec27e504a2d21c8f/botocore-1.40.0-py3-none-any.whl", hash = "sha256:2063e6d035a6a382b2ae37e40f5144044e55d4e091910d0c9f1be3121ad3e4e6", size = 13921768, upload-time = "2025-07-31T19:20:51.487Z" }, ] [[package]] name = "certifi" -version = "2024.12.14" +version = "2025.7.14" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +sdist = { url = "https://files.pythonhosted.org/packages/b3/76/52c535bcebe74590f296d6c77c86dabf761c41980e1347a2422e4aa2ae41/certifi-2025.7.14.tar.gz", hash = "sha256:8ea99dbdfaaf2ba2f9bac77b9249ef62ec5218e7c2b2e903378ed5fccf765995", size = 163981, upload-time = "2025-07-14T03:29:28.449Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, + { url = "https://files.pythonhosted.org/packages/4f/52/34c6cf5bb9285074dc3531c437b3919e825d976fde097a7a73f79e726d03/certifi-2025.7.14-py3-none-any.whl", hash = "sha256:6b31f564a415d79ee77df69d757bb49a5bb53bd9f756cbbe24394ffd6fc1f4b2", size = 162722, upload-time = "2025-07-14T03:29:26.863Z" }, ] [[package]] name = "cffi" version = "1.17.1" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] +dependencies = [ + { name = "pycparser", marker = "python_full_version != '3.14.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191, upload-time = "2024-09-04T20:43:30.027Z" }, + { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592, upload-time = "2024-09-04T20:43:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024, upload-time = "2024-09-04T20:43:34.186Z" }, + { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188, upload-time = "2024-09-04T20:43:36.286Z" }, + { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571, upload-time = "2024-09-04T20:43:38.586Z" }, + { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687, upload-time = "2024-09-04T20:43:40.084Z" }, + { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211, upload-time = "2024-09-04T20:43:41.526Z" }, + { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325, upload-time = "2024-09-04T20:43:43.117Z" }, + { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784, upload-time = "2024-09-04T20:43:45.256Z" }, + { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564, upload-time = "2024-09-04T20:43:46.779Z" }, + { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804, upload-time = "2024-09-04T20:43:48.186Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299, upload-time = "2024-09-04T20:43:49.812Z" }, + { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" }, + { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" }, + { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" }, + { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" }, + { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" }, + { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" }, + { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" }, + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ea/8bb50596b8ffbc49ddd7a1ad305035daa770202a6b782fc164647c2673ad/cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", size = 182220, upload-time = "2024-09-04T20:45:01.577Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/e77c8cd24f58285a82c23af484cf5b124a376b32644e445960d1a4654c3a/cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", size = 178605, upload-time = "2024-09-04T20:45:03.837Z" }, + { url = "https://files.pythonhosted.org/packages/ed/65/25a8dc32c53bf5b7b6c2686b42ae2ad58743f7ff644844af7cdb29b49361/cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", size = 424910, upload-time = "2024-09-04T20:45:05.315Z" }, + { url = "https://files.pythonhosted.org/packages/42/7a/9d086fab7c66bd7c4d0f27c57a1b6b068ced810afc498cc8c49e0088661c/cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", size = 447200, upload-time = "2024-09-04T20:45:06.903Z" }, + { url = "https://files.pythonhosted.org/packages/da/63/1785ced118ce92a993b0ec9e0d0ac8dc3e5dbfbcaa81135be56c69cabbb6/cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", size = 454565, upload-time = "2024-09-04T20:45:08.975Z" }, + { url = "https://files.pythonhosted.org/packages/74/06/90b8a44abf3556599cdec107f7290277ae8901a58f75e6fe8f970cd72418/cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", size = 435635, upload-time = "2024-09-04T20:45:10.64Z" }, + { url = "https://files.pythonhosted.org/packages/bd/62/a1f468e5708a70b1d86ead5bab5520861d9c7eacce4a885ded9faa7729c3/cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", size = 445218, upload-time = "2024-09-04T20:45:12.366Z" }, + { url = "https://files.pythonhosted.org/packages/5b/95/b34462f3ccb09c2594aa782d90a90b045de4ff1f70148ee79c69d37a0a5a/cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", size = 460486, upload-time = "2024-09-04T20:45:13.935Z" }, + { url = "https://files.pythonhosted.org/packages/fc/fc/a1e4bebd8d680febd29cf6c8a40067182b64f00c7d105f8f26b5bc54317b/cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", size = 437911, upload-time = "2024-09-04T20:45:15.696Z" }, + { url = "https://files.pythonhosted.org/packages/e6/c3/21cab7a6154b6a5ea330ae80de386e7665254835b9e98ecc1340b3a7de9a/cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", size = 460632, upload-time = "2024-09-04T20:45:17.284Z" }, + { url = "https://files.pythonhosted.org/packages/cb/b5/fd9f8b5a84010ca169ee49f4e4ad6f8c05f4e3545b72ee041dbbcb159882/cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", size = 171820, upload-time = "2024-09-04T20:45:18.762Z" }, + { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290, upload-time = "2024-09-04T20:45:20.226Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0b1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", +] dependencies = [ - { name = "pycparser" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191 }, - { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592 }, - { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024 }, - { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188 }, - { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571 }, - { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687 }, - { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211 }, - { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325 }, - { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784 }, - { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564 }, - { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804 }, - { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299 }, - { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264 }, - { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651 }, - { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259 }, - { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200 }, - { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235 }, - { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721 }, - { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242 }, - { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999 }, - { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242 }, - { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604 }, - { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727 }, - { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400 }, - { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, - { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, - { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, - { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, - { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, - { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, - { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, - { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, - { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, - { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, - { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, - { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, - { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, - { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, - { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, - { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, - { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, - { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, - { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, - { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, - { url = "https://files.pythonhosted.org/packages/b9/ea/8bb50596b8ffbc49ddd7a1ad305035daa770202a6b782fc164647c2673ad/cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", size = 182220 }, - { url = "https://files.pythonhosted.org/packages/ae/11/e77c8cd24f58285a82c23af484cf5b124a376b32644e445960d1a4654c3a/cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", size = 178605 }, - { url = "https://files.pythonhosted.org/packages/ed/65/25a8dc32c53bf5b7b6c2686b42ae2ad58743f7ff644844af7cdb29b49361/cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", size = 424910 }, - { url = "https://files.pythonhosted.org/packages/42/7a/9d086fab7c66bd7c4d0f27c57a1b6b068ced810afc498cc8c49e0088661c/cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", size = 447200 }, - { url = "https://files.pythonhosted.org/packages/da/63/1785ced118ce92a993b0ec9e0d0ac8dc3e5dbfbcaa81135be56c69cabbb6/cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", size = 454565 }, - { url = "https://files.pythonhosted.org/packages/74/06/90b8a44abf3556599cdec107f7290277ae8901a58f75e6fe8f970cd72418/cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", size = 435635 }, - { url = "https://files.pythonhosted.org/packages/bd/62/a1f468e5708a70b1d86ead5bab5520861d9c7eacce4a885ded9faa7729c3/cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", size = 445218 }, - { url = "https://files.pythonhosted.org/packages/5b/95/b34462f3ccb09c2594aa782d90a90b045de4ff1f70148ee79c69d37a0a5a/cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", size = 460486 }, - { url = "https://files.pythonhosted.org/packages/fc/fc/a1e4bebd8d680febd29cf6c8a40067182b64f00c7d105f8f26b5bc54317b/cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", size = 437911 }, - { url = "https://files.pythonhosted.org/packages/e6/c3/21cab7a6154b6a5ea330ae80de386e7665254835b9e98ecc1340b3a7de9a/cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", size = 460632 }, - { url = "https://files.pythonhosted.org/packages/cb/b5/fd9f8b5a84010ca169ee49f4e4ad6f8c05f4e3545b72ee041dbbcb159882/cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", size = 171820 }, - { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290 }, + { name = "pycparser", marker = "python_full_version == '3.14.*' and implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/84/7930c3586ca7c66a63b2d7a30d9df649ce8c3660f8da241b0661bba4e566/cffi-2.0.0b1.tar.gz", hash = "sha256:4440de58d19c0bebe6a2f3b721253d67b27aabb34e00ab35756d8699876191ea", size = 521625, upload-time = "2025-07-29T01:11:50.959Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/41/1baf86bc9ebd4a994990ef743d7f625c2e81fc57b3689a7c2f4f4ae32b39/cffi-2.0.0b1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:4b69c24a89c30a7821ecd25bcaff99075d95dd0c85c8845768c340a7736d84cf", size = 184335, upload-time = "2025-07-29T01:10:01.619Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4a/93b0c2fde594dd0be91e78c577174b3380e977a1002710986403528ea0e6/cffi-2.0.0b1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ba9946f292f7ae3a6f1cc72af259c477c291eb10ad3ca74180862e39f46a521", size = 180531, upload-time = "2025-07-29T01:10:03.901Z" }, + { url = "https://files.pythonhosted.org/packages/db/23/d78944312174f6f12921cb27bee5d194664b1577a80ee910446355e24b8e/cffi-2.0.0b1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1f4ca4ac8b9ee620ff5cb4307fae08691a0911bf0eeb488e8d6cf55bd77dfe43", size = 203099, upload-time = "2025-07-29T01:10:05.238Z" }, + { url = "https://files.pythonhosted.org/packages/2e/f7/f59dd3007400d362de620cf7955ed8bf5748fb0d0cddfcb28919b65af5b7/cffi-2.0.0b1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0eb17b22e313c453c940931f5d063ba9e87e5db12d99473477ab1851e66fedb4", size = 203366, upload-time = "2025-07-29T01:10:06.596Z" }, + { url = "https://files.pythonhosted.org/packages/b5/81/52a261b2ca9a30c5f3c7f16b11142fcd827f345550cea51580463594400d/cffi-2.0.0b1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6a1faa47c7fbe0627f6b621dadebed9f532a789a1d3b519731304da1d3ec3d14", size = 217073, upload-time = "2025-07-29T01:10:07.972Z" }, + { url = "https://files.pythonhosted.org/packages/57/ce/ec093352e9e35491579fee73fb0c3600c82bd9fbea92a64fb291f5874c7d/cffi-2.0.0b1-cp310-cp310-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:230a97779cdd6734b6af3bfda4be31406bab58a078f25327b169975be9225a46", size = 208272, upload-time = "2025-07-29T01:10:09.034Z" }, + { url = "https://files.pythonhosted.org/packages/20/07/b01c9e2a8065aaec510fbe67837a7a3c4e05b347d9094e5db2179d084cce/cffi-2.0.0b1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c177aa1cdae420519665da22760f4a4a159551733d4686a4467f579bf7b75470", size = 216698, upload-time = "2025-07-29T01:10:10.439Z" }, + { url = "https://files.pythonhosted.org/packages/f7/30/eed081ff6faad34ee37beb69d0b269f0bd63743772f20412ea69d16e4aee/cffi-2.0.0b1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bdd3ce5e620ff6ee1e89fb7abb620756482fb3e337e5121e441cb0071c11cbd0", size = 218874, upload-time = "2025-07-29T01:10:11.924Z" }, + { url = "https://files.pythonhosted.org/packages/32/b5/e92bd27352dc749a1d286279fbe07de1850b9b674f8f6782294fd7ae8a93/cffi-2.0.0b1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0dbbe4a9bfcc058fccfee33ea5bebe50440767d219c2efa3a722a90ed59e8cfa", size = 211257, upload-time = "2025-07-29T01:10:13.227Z" }, + { url = "https://files.pythonhosted.org/packages/f5/02/c67687aa6b025166f43a2b915cf2e54cf1a32f0b3e849cbfb531f7719548/cffi-2.0.0b1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5f304ce328ecfb7bc36034374c20d0b4ae70423253f8a81c5e0b5efd90e29cd4", size = 218081, upload-time = "2025-07-29T01:10:14.294Z" }, + { url = "https://files.pythonhosted.org/packages/24/d5/926fc2526a452ebe33709fd59a28f4aa241edf3e6cbc7c05b9ed261df8e1/cffi-2.0.0b1-cp310-cp310-win32.whl", hash = "sha256:5acd1da34b96c8881b5df0e3d83cdbecc349b9ad5e9b8c0c589646c241448853", size = 172220, upload-time = "2025-07-29T01:10:15.331Z" }, + { url = "https://files.pythonhosted.org/packages/b8/cc/572111b18a4091a67d53aff91c7c00895cf93da7ed84f30ad304af4f6ff7/cffi-2.0.0b1-cp310-cp310-win_amd64.whl", hash = "sha256:ebb116751a49977c0b130493d3af13c567c4613946d293d4f61601237fabcd5f", size = 182827, upload-time = "2025-07-29T01:10:16.62Z" }, + { url = "https://files.pythonhosted.org/packages/67/90/14deaf13603dfff56bb872a4d53e1043486178ae7a2ce8cc17ea5677d97e/cffi-2.0.0b1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:5f373f9bdc3569acd8aaebb6b521080eeb5a298533a58715537caf74e9e27f6b", size = 184383, upload-time = "2025-07-29T01:10:17.675Z" }, + { url = "https://files.pythonhosted.org/packages/f7/36/0a125a1ab354a95aae2165ce4c2b8fcd057706a85380670e3991052dcfcd/cffi-2.0.0b1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a898f76bac81f9a371df6c8664228a85cdea6b283a721f2493f0df6f80afd208", size = 180599, upload-time = "2025-07-29T01:10:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/bc/cb/27237bcd6c4e883104db737929f02838a7405caed422aeeb76ee5ffa14d9/cffi-2.0.0b1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:314afab228f7b45de7bae55059b4e706296e7d3984d53e643cc0389757216221", size = 203212, upload-time = "2025-07-29T01:10:20.057Z" }, + { url = "https://files.pythonhosted.org/packages/12/94/bbeddca63090c5335ad597310bd6f2011f1c8733bc71e88f53c38ac4ff4c/cffi-2.0.0b1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6de033c73dc89f80139c5a7d135fbd6c1d7b28ebb0d2df98cd1f4ef76991b15c", size = 202714, upload-time = "2025-07-29T01:10:21.401Z" }, + { url = "https://files.pythonhosted.org/packages/f4/9b/b7587a1f3f7f52795a7d125d6c6b844f7a8355cbb54ae8fdef2a03488914/cffi-2.0.0b1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffbbeedd6bac26c0373b71831d3c73181a1c100dc6fc7aadbfcca54cace417db", size = 217093, upload-time = "2025-07-29T01:10:22.481Z" }, + { url = "https://files.pythonhosted.org/packages/a3/b2/af4e0ed2c2aded25ed54107f96d424407839bdfa7e90858f8e0f6fed6ee9/cffi-2.0.0b1-cp311-cp311-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:c5713cac21b2351a53958c765d8e9eda45184bb757c3ccab139608e708788796", size = 209019, upload-time = "2025-07-29T01:10:23.584Z" }, + { url = "https://files.pythonhosted.org/packages/7b/6e/899c5473c3d7cc89815db894abcd81cd976a1f314c142e708aef3c0982a3/cffi-2.0.0b1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:71ab35c6cc375da1e2c06af65bf0b5049199ad9b264f9ed7c90c0fe9450900e3", size = 215662, upload-time = "2025-07-29T01:10:24.997Z" }, + { url = "https://files.pythonhosted.org/packages/1c/8e/953a07806f307bf1089239858013cc81c6d5cc8ca23593704b0530429302/cffi-2.0.0b1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53c780c2ec8ce0e5db9b74e9b0b55ff5d5f70071202740cef073a2771fa1d2ce", size = 219015, upload-time = "2025-07-29T01:10:27.077Z" }, + { url = "https://files.pythonhosted.org/packages/ea/0a/ffd99099d96a911236decff459cb330a1c046483008456b23554f62c81c6/cffi-2.0.0b1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:be957dd266facf8e4925643073159b05021a990b46620b06ca27eaf9d900dbc2", size = 212021, upload-time = "2025-07-29T01:10:28.527Z" }, + { url = "https://files.pythonhosted.org/packages/2f/00/c68c1a1665a28dfb8c848668f128d0f1919dc8e843f2e20ce90bce7b60d8/cffi-2.0.0b1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:16dc303af3630f54186b86aadf1121badf3cba6de17dfeacb84c5091e059a690", size = 217124, upload-time = "2025-07-29T01:10:29.877Z" }, + { url = "https://files.pythonhosted.org/packages/de/a7/194d80668bebc5a6a8d95ec5f3a1f186e8d87c864882c96a9ec2ecbd06a8/cffi-2.0.0b1-cp311-cp311-win32.whl", hash = "sha256:504d264944d0934d7b02164af5c62b175255ef0d39c5142d95968b710c58a8f6", size = 172111, upload-time = "2025-07-29T01:10:30.973Z" }, + { url = "https://files.pythonhosted.org/packages/e2/b6/0002211aab83b6bfbdba09dc8cd354e44c49216e6207999b9f0d1d0053cb/cffi-2.0.0b1-cp311-cp311-win_amd64.whl", hash = "sha256:e2920fa42cf0616c21ea6d3948ad207cf0e420d2d2ef449d86ccad6ef9c13393", size = 182858, upload-time = "2025-07-29T01:10:32.021Z" }, + { url = "https://files.pythonhosted.org/packages/52/9e/c6773b5b91b20c5642166c57503a9c67c6948ae4009aa4d2ce233a6b570f/cffi-2.0.0b1-cp311-cp311-win_arm64.whl", hash = "sha256:142c9c0c75fbc95ce23836e538681bd89e483de37b7cdf251dbdf0975995f8ac", size = 177421, upload-time = "2025-07-29T01:10:33.191Z" }, + { url = "https://files.pythonhosted.org/packages/50/20/432dc366952574ea190bce0a2970f92e676e972c78ef501d58406b459883/cffi-2.0.0b1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9d04b5fc06ba0ce45d7e51dfd8a14dc20708ef301fcf5a215c507f4e084b00c8", size = 185303, upload-time = "2025-07-29T01:10:34.291Z" }, + { url = "https://files.pythonhosted.org/packages/54/2d/e89016a2019212d54be2523756faa5b2c3ab8cb6f520a82e0d6bcacd527d/cffi-2.0.0b1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7b17e92900eb61bce62ea07ea8dd0dc33aa476ee8f977918050e52f90f5b645c", size = 181101, upload-time = "2025-07-29T01:10:35.641Z" }, + { url = "https://files.pythonhosted.org/packages/89/4f/6978a38ee0d8976f3087c09e779f9306ed51b9fb68ce5e3606244f6e2469/cffi-2.0.0b1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2155d2a0819c3fdcaa37832fb69e698d455627c23f83bc9c7adbef699fe4be19", size = 208122, upload-time = "2025-07-29T01:10:36.757Z" }, + { url = "https://files.pythonhosted.org/packages/20/2f/568d19b010aa304f6f55aaf160834e0db9677943b0c268462876c4e1c0ef/cffi-2.0.0b1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4210ddc2b41c20739c64dede1304fb81415220ea671885623063fab44066e376", size = 206747, upload-time = "2025-07-29T01:10:37.837Z" }, + { url = "https://files.pythonhosted.org/packages/bf/7b/171907beef5622bc6164ae9db94eaaa8e56bfb986f375742a9669ecc18f7/cffi-2.0.0b1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:31b8e3204cdef043e59a296383e6a43461d17c5c3d73fa9cebf4716a561291b0", size = 220804, upload-time = "2025-07-29T01:10:39.299Z" }, + { url = "https://files.pythonhosted.org/packages/49/2a/539d6021b1570308159745e775d0bd4164e43957e515bffd33cb6e57cf06/cffi-2.0.0b1-cp312-cp312-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:cbde39be02aa7d8fbcd6bf1a9241cb1d84f2e2f0614970c51a707a9a176b85c6", size = 211912, upload-time = "2025-07-29T01:10:40.767Z" }, + { url = "https://files.pythonhosted.org/packages/87/a9/2cddc8eeabd7b32d494de5bb9db95e3816b47ad00e05269b33e2bb8be9f3/cffi-2.0.0b1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ea57043b545f346b081877737cb0320960012107d0250fa5183a4306f9365d6", size = 219528, upload-time = "2025-07-29T01:10:42.419Z" }, + { url = "https://files.pythonhosted.org/packages/a8/18/49ff9cbe89eae3fff54a7af79474dd897bac44325073a6a7dc9b7ae4b64e/cffi-2.0.0b1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d31ba9f54739dcf98edb87e4881e326fad79e4866137c24afb0da531c1a965ca", size = 223011, upload-time = "2025-07-29T01:10:43.906Z" }, + { url = "https://files.pythonhosted.org/packages/a1/1e/4f10dd0fd9cb8d921620663beb497af0a6175c96cecd87e5baf613d0c947/cffi-2.0.0b1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:27309de8cebf48e056550db6607e2fb2c50109b54fc72c02b3b34811233483be", size = 221408, upload-time = "2025-07-29T01:10:45.385Z" }, + { url = "https://files.pythonhosted.org/packages/00/82/cbbb23951d9890475f151c1137d067a712e7f1e59509def619c5d9a645aa/cffi-2.0.0b1-cp312-cp312-win32.whl", hash = "sha256:f4b5acb4cddcaf0ebb82a226f9fa1d5063505e0c206031ee1f4d173750b592fd", size = 172972, upload-time = "2025-07-29T01:10:46.458Z" }, + { url = "https://files.pythonhosted.org/packages/ea/6b/e52b88ee438acd26fd84963f357a90ce8f4494cc7d94cbde1b26e199bd22/cffi-2.0.0b1-cp312-cp312-win_amd64.whl", hash = "sha256:cf1b2510f1a91c4d7e8f83df6a13404332421e6e4a067059174d455653ae5314", size = 183592, upload-time = "2025-07-29T01:10:47.916Z" }, + { url = "https://files.pythonhosted.org/packages/73/ac/3a5a182637b9a02c16335743b14485cb916ca984dcdc18737851732bff16/cffi-2.0.0b1-cp312-cp312-win_arm64.whl", hash = "sha256:bd7ce5d8224fb5a57bd7f1d9843aa4ecb870ec3f4a2101e1ba8314e91177e184", size = 177583, upload-time = "2025-07-29T01:10:49.091Z" }, + { url = "https://files.pythonhosted.org/packages/8e/5b/d5307bdfac914ec977af904947ead0f22013e066aff82a215a5ff7db5e20/cffi-2.0.0b1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a160995771c54b12dc5a1ef44d6fd59aeea4909e2d58c10169156e9d9a7e2960", size = 185280, upload-time = "2025-07-29T01:10:50.173Z" }, + { url = "https://files.pythonhosted.org/packages/b1/f5/b1fc8c8508e724b824713cd829cb5f0a39e182619ffc4d4bc1a8f142040d/cffi-2.0.0b1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c70c77ec47b96a593477386d7bf23243996c75f1cc7ce383ba35dcedca9bd14", size = 181098, upload-time = "2025-07-29T01:10:51.592Z" }, + { url = "https://files.pythonhosted.org/packages/1a/2e/2fdbdfb2783a103176c78fc9833aff80080b6567e90647e05e35160d4082/cffi-2.0.0b1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:47a91ab8d17ed7caed27e5b2eda3b3478f3d28cecb3939d708545804273e159b", size = 208101, upload-time = "2025-07-29T01:10:53.059Z" }, + { url = "https://files.pythonhosted.org/packages/1f/23/4eea412e3aa8173bad1ad77fc28905aa393bf4738221fc4dc99587157940/cffi-2.0.0b1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fd8f55419576289d7cd8c9349ea46a222379936136754ab4c2b041294b0b48d", size = 206671, upload-time = "2025-07-29T01:10:54.652Z" }, + { url = "https://files.pythonhosted.org/packages/c4/c1/3c334b249ae3faa1b5126c9db797561be3669d29f8096675b5d0e55754e3/cffi-2.0.0b1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:916141ca9ff05e9f67fe73c39a527d96a7101191673dee9985e71cd164b55915", size = 220797, upload-time = "2025-07-29T01:10:55.826Z" }, + { url = "https://files.pythonhosted.org/packages/ff/4a/67cf1060b419ea26ffb79dd645371246cffd3c7cf5fca5c7cd66769e7323/cffi-2.0.0b1-cp313-cp313-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:91fc109a1412dd29657f442a61bb571baaa1d074628145008ceb54dc9bb13941", size = 211900, upload-time = "2025-07-29T01:10:57.298Z" }, + { url = "https://files.pythonhosted.org/packages/de/df/d890a3638e86f9abe533d95bf08b5d5ec140c3a0befad9a3e9edc8546553/cffi-2.0.0b1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b08dd1a826b678d39aa78f30edc1b7d9bd1e5b7e5adc2d47e8f56ab25ac7c13", size = 219467, upload-time = "2025-07-29T01:10:58.819Z" }, + { url = "https://files.pythonhosted.org/packages/e8/2b/079e4e0535b72066029bd58438a3f6c538623742d31f80467d340cbaf8d9/cffi-2.0.0b1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76a19efb88a495bb7377fc542c7f97c9816dfc1d6bb4ad147acb99599a83e248", size = 222974, upload-time = "2025-07-29T01:11:00.179Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e3/3428e9dbf24464bc04af09ad298b28c48a9481f0a89924f619388354734b/cffi-2.0.0b1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:87acb9e2221ed37c385c9cef866377fbaa13180de9ba1cdc4e6dc927b273c87f", size = 221343, upload-time = "2025-07-29T01:11:01.718Z" }, + { url = "https://files.pythonhosted.org/packages/bf/d8/9eba61d92eaf59ce97d85855895ed1961330c2e9a0ba9f922c920808b303/cffi-2.0.0b1-cp313-cp313-win32.whl", hash = "sha256:60c2c1d7adf558b932de9e4633f68e359063d1a748c92a4a3cba832085e9819b", size = 172947, upload-time = "2025-07-29T01:11:02.835Z" }, + { url = "https://files.pythonhosted.org/packages/fb/84/582fc182fe8994b495a0dde875c30ec9202154f13dfc1bbea96233b6ae1b/cffi-2.0.0b1-cp313-cp313-win_amd64.whl", hash = "sha256:6ff1ba153e0740c2ea47d74d015c1a03c3addab1681633be0838103c297b855c", size = 183441, upload-time = "2025-07-29T01:11:04.029Z" }, + { url = "https://files.pythonhosted.org/packages/0e/a5/85855a9ad255edf6be1fcd6e44384daa506a2276ef4f0e6164bc2dd03785/cffi-2.0.0b1-cp313-cp313-win_arm64.whl", hash = "sha256:adbed7d68bc8837eb2c73e01bc284b5af9898e82b6067a6cbffea4f1820626e4", size = 177621, upload-time = "2025-07-29T01:11:05.191Z" }, + { url = "https://files.pythonhosted.org/packages/7a/04/070592956f9818f6ef2c5219410209af08c3b81889da0b36185b535bdb2a/cffi-2.0.0b1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fe8cb43962af8e43facad740930fadc4cf8cdc1e073f59d0f13714711807979f", size = 185398, upload-time = "2025-07-29T01:11:06.337Z" }, + { url = "https://files.pythonhosted.org/packages/f7/68/704fba8db6ece9cb13f48e1c17311f70f49153671e056ae99ea29c549d39/cffi-2.0.0b1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a812e9ab7a0bfef3e89089c0359e631d8521d5efc8d21c7ede3f1568db689920", size = 181540, upload-time = "2025-07-29T01:11:07.4Z" }, + { url = "https://files.pythonhosted.org/packages/aa/f7/5a6f7913430f0e0e5e2ac5b06fd69bb532f1e420404d508936da6117a5b8/cffi-2.0.0b1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bce5ce4790b8347c2d7937312218d0282af344f8a589db163520a02fe8e42281", size = 207806, upload-time = "2025-07-29T01:11:08.543Z" }, + { url = "https://files.pythonhosted.org/packages/79/78/870845b72b8017717826bbfca874115e2dac88b8bf204298edc946691817/cffi-2.0.0b1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:39eedbed09879f6d1591ad155afcc162aa11ebf3271215339b4aef3df5631573", size = 206531, upload-time = "2025-07-29T01:11:09.803Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f4/d65f9a303b97453f19588fd7d336c6e527b8ee9fc3b956296d63c6af5562/cffi-2.0.0b1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7dfd6f8f57e812f3175aa0d4d36ed797b6ff35f7cdfefea05417569b543ddc94", size = 220766, upload-time = "2025-07-29T01:11:10.978Z" }, + { url = "https://files.pythonhosted.org/packages/a1/09/85fa0b2841a16d2c3571661a9c4bb53441e195dda2413cfeab05b9726e56/cffi-2.0.0b1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:782f60714ea2935e5391a0f69ad4705624cdc86243b18dcfafd08565c28e89bd", size = 219317, upload-time = "2025-07-29T01:11:12.148Z" }, + { url = "https://files.pythonhosted.org/packages/75/87/91037b0c976babf124760cae2e0a0ca0ce18f02b5b34146421feecd6558d/cffi-2.0.0b1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f2ebc97ba03b26e9b6b048b6c3981165126905cb20564fbf6584f5e072a1c189", size = 222874, upload-time = "2025-07-29T01:11:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/56/53/1c871477e707c001c30537e8f4807341f1d3b40bd6f094cf054864b41dc6/cffi-2.0.0b1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:fba9546b80f3b275f04915ffbca7b75aa22a353c4f6410469fb1d8c340ec1c31", size = 220973, upload-time = "2025-07-29T01:11:14.528Z" }, + { url = "https://files.pythonhosted.org/packages/81/c7/4cb50e2e7623a41d9416dc8d7d043ba3a69f2424209a1e04c28833216f90/cffi-2.0.0b1-cp314-cp314-win32.whl", hash = "sha256:339e853c75f69c726b1a85f2217db6880422f915770679c47150eea895e02b46", size = 175360, upload-time = "2025-07-29T01:11:31.19Z" }, + { url = "https://files.pythonhosted.org/packages/c1/ba/d0fb6fc597d2d11b77294626c51d3f01f9475c4ec3462687fef5244f09be/cffi-2.0.0b1-cp314-cp314-win_amd64.whl", hash = "sha256:856eb353a42b04d02b0633c71123276710a5390e92a27fbd2446864ca7d27923", size = 185681, upload-time = "2025-07-29T01:11:32.464Z" }, + { url = "https://files.pythonhosted.org/packages/24/0f/12390e59c1cb01a161d24f5ef73f15110c6c8f1e51ba8a42411d3faf5d58/cffi-2.0.0b1-cp314-cp314-win_arm64.whl", hash = "sha256:9e23ac717e8b3767c80198d483c743fe596b055a6e29ef34f9d8cdf61f941f2f", size = 180386, upload-time = "2025-07-29T01:11:33.648Z" }, + { url = "https://files.pythonhosted.org/packages/48/6a/87dfc25b45dcae6e05e342f29ac384b5847256c06b99b5e226d59549bf21/cffi-2.0.0b1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e227627762046204df31c589d7406540778d05622e395d41fc68b7895d40c174", size = 188831, upload-time = "2025-07-29T01:11:15.772Z" }, + { url = "https://files.pythonhosted.org/packages/9d/d9/4c6e38b9837e053f096007c37586be4dc6201664103db3a401618f37159e/cffi-2.0.0b1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2355cd38f375906da70a8bad548eb63f65bed43c1044ed075691fa36e8e8315a", size = 185064, upload-time = "2025-07-29T01:11:16.961Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b4/e3797890685586d764c4bc20947e45cdddfa6dec8a635df84a947c7be8f8/cffi-2.0.0b1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:14c0ade7949f088615450abf884064b4ef11e8c9917b99d53f12e06cdfd2cd36", size = 209488, upload-time = "2025-07-29T01:11:18.258Z" }, + { url = "https://files.pythonhosted.org/packages/85/51/b91f5e8a30ea6b77a9ede74bab40482a86ec0d4c462ef4bc8f2c0775f969/cffi-2.0.0b1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:765c82d4a73ded03bfea961364f4c57dd6cfe7b0d57b7a2d9b95e2e7bd5de6f7", size = 208670, upload-time = "2025-07-29T01:11:19.753Z" }, + { url = "https://files.pythonhosted.org/packages/12/4c/ced2c206f38bd7cc1124aa8d9b4cbbd6db54a7a9220f889ba35a07b4f4b2/cffi-2.0.0b1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:265666e15da6974e6a74110873321e84c7c2288e379aca44a7df4713325b9be4", size = 222420, upload-time = "2025-07-29T01:11:21.043Z" }, + { url = "https://files.pythonhosted.org/packages/c1/8c/49feb0f27d072d7b4f5fe48407451a697015e6cf3197e144ebc5ed6c361f/cffi-2.0.0b1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d88f849d03c9aa2d7bbd710a0e20266f92bf524396c7fce881cd5a1971447812", size = 221747, upload-time = "2025-07-29T01:11:22.362Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ea/f0b0c31e6445767441e8dad5a3fa267de7ffc5a87ebd13bc0fd2efa76f8f/cffi-2.0.0b1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:853e90e942246f9e098f16baa45896f80675f86ab6447823c4030a67c3cc112d", size = 224491, upload-time = "2025-07-29T01:11:23.95Z" }, + { url = "https://files.pythonhosted.org/packages/dc/6e/e5349ac9bf812e9a44914f699999c960c045bbd12b63358a4b583ab6ad85/cffi-2.0.0b1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3b8aee0176d80781a21855832c411cfd3126c34966650693ec1245f0b756498b", size = 223484, upload-time = "2025-07-29T01:11:25.266Z" }, + { url = "https://files.pythonhosted.org/packages/f5/11/b2a10765c287d368f87dd57e2840876609418d4bb2ea6cfc56d05c8cb8e0/cffi-2.0.0b1-cp314-cp314t-win32.whl", hash = "sha256:2da933859e1465a08f36d88e0452194da27b9ff0813e5ba49f02c544682d40e0", size = 180528, upload-time = "2025-07-29T01:11:26.968Z" }, + { url = "https://files.pythonhosted.org/packages/41/e8/b7a5be3b8c2d07627e6c007628cdd58c26b18b27ca110334c375d39c1665/cffi-2.0.0b1-cp314-cp314t-win_amd64.whl", hash = "sha256:53fbcfdb35760bc6fb68096632d29700bcf37fd0d71922dcc577eb6193fc6edc", size = 191764, upload-time = "2025-07-29T01:11:28.464Z" }, + { url = "https://files.pythonhosted.org/packages/1b/f5/5cec5a3462fe50687acf04f820b96f490a2c28acd7857472607839ba2712/cffi-2.0.0b1-cp314-cp314t-win_arm64.whl", hash = "sha256:505bec438236c623d7cfd8cc740598611a1d4883a629a0e33eb9e3c2dcd81b04", size = 183450, upload-time = "2025-07-29T01:11:29.941Z" }, + { url = "https://files.pythonhosted.org/packages/e3/c9/3a4777fe105edfdd6e21aa312213e4511c5265a917f2132b8ea73e01f048/cffi-2.0.0b1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:d2ede96d5de012d74b174082dec44c58a35b42e0ea9f197063ddb5e504ee0c7e", size = 184327, upload-time = "2025-07-29T01:11:34.906Z" }, + { url = "https://files.pythonhosted.org/packages/96/e9/a36e643af2d18aac1ecdf66bd6b384b99879ddd57a435f90d20514356558/cffi-2.0.0b1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:14505e4a82aa84abddab6e493946d3ed6bf6d268b58e4c2f5bcf8ec2dee2ca2d", size = 180553, upload-time = "2025-07-29T01:11:36.126Z" }, + { url = "https://files.pythonhosted.org/packages/26/33/36072caa8edb5abc416dc129cdcdf08577dcddf998238ab596eeac5fdae5/cffi-2.0.0b1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:762dd8db1bd710f7b828b3c6cbb7101b5e190e722eb5633eb79b1a6b751e349a", size = 203083, upload-time = "2025-07-29T01:11:37.333Z" }, + { url = "https://files.pythonhosted.org/packages/cc/98/ff861689fb84c1cbeffa7a4c18148c943a88b6e0c13043d75d740b1d033a/cffi-2.0.0b1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8af08fd246d2a544c8b68c25c171809d08eed9372f2026ae48dad17d26525578", size = 203433, upload-time = "2025-07-29T01:11:38.544Z" }, + { url = "https://files.pythonhosted.org/packages/4d/8c/130f35263b0be08946e06228c602a2012c5075ca838019f0ef2954407f16/cffi-2.0.0b1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e342223ada6b1d34f3719d3612991924cb68fa7f8fb2ec22f5bda254882828ab", size = 217086, upload-time = "2025-07-29T01:11:39.91Z" }, + { url = "https://files.pythonhosted.org/packages/1b/d4/e67a4dd21e34a716aaa71b300de43d654a36c5878678f5a343903d890fa1/cffi-2.0.0b1-cp39-cp39-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:352e1949f7af33c37b060d2c2ea8a8fa1be6695ff94f8d5f7738bacacb9d6de4", size = 208221, upload-time = "2025-07-29T01:11:41.478Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/2126fa7eb0131a6eaef5d13a93c2e9bbfff06271f55b7dd57835915cf460/cffi-2.0.0b1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3cc3245802b4950bc5459a2ef9a650d948972e44df120ecd2c6201814c8edb54", size = 216788, upload-time = "2025-07-29T01:11:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/5b/94/b6646306de2a61c661110ebfb28b31f63d01f28f8ab6e6ec698112b5726a/cffi-2.0.0b1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ab4aea2f93ab6c408f0c6be8ddebe4d1086b4966148f542fe11cf82ca698dc07", size = 218944, upload-time = "2025-07-29T01:11:43.947Z" }, + { url = "https://files.pythonhosted.org/packages/12/6c/77bd877a1cae4234e47128c675478df1c5881b9e156569d9b408f83e9f5e/cffi-2.0.0b1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ecf72cb96106fbde29682db37569c7cee3ebf29ecf9ead46978679057c6df234", size = 211290, upload-time = "2025-07-29T01:11:45.23Z" }, + { url = "https://files.pythonhosted.org/packages/b5/f5/01670d1960b8f76f37e37be31d9e3f7e1473c3e89e9196e7d6c6d4f7688b/cffi-2.0.0b1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:aaec3f41cd6f0ffda5e23365822710d747b8613d3b8f54e12b5d7dcde688300d", size = 218084, upload-time = "2025-07-29T01:11:46.563Z" }, + { url = "https://files.pythonhosted.org/packages/94/03/f5ffb99d7ba1c0b5e48873829bed6349e4bb1e5fa108e0dffd94de23ea5a/cffi-2.0.0b1-cp39-cp39-win32.whl", hash = "sha256:601ddbaa51b1bd96a92a6a26e855060390023ab600377280a9bed7703ed2a088", size = 172173, upload-time = "2025-07-29T01:11:48.184Z" }, + { url = "https://files.pythonhosted.org/packages/72/29/3c890ed3ef27a19cb696fa1032b8ef83e0aa586ec55d4feeb0970e28c673/cffi-2.0.0b1-cp39-cp39-win_amd64.whl", hash = "sha256:cb351fade24f7ba9ca481bee53d4257053b9fa9da55da276fe1187a990a49dde", size = 182827, upload-time = "2025-07-29T01:11:49.444Z" }, ] [[package]] name = "cfgv" version = "3.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114 } +sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114, upload-time = "2023-08-12T20:38:17.776Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249 }, + { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249, upload-time = "2023-08-12T20:38:16.269Z" }, ] [[package]] name = "charset-normalizer" -version = "3.4.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0d/58/5580c1716040bc89206c77d8f74418caf82ce519aae06450393ca73475d1/charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de", size = 198013 }, - { url = "https://files.pythonhosted.org/packages/d0/11/00341177ae71c6f5159a08168bcb98c6e6d196d372c94511f9f6c9afe0c6/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176", size = 141285 }, - { url = "https://files.pythonhosted.org/packages/01/09/11d684ea5819e5a8f5100fb0b38cf8d02b514746607934134d31233e02c8/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037", size = 151449 }, - { url = "https://files.pythonhosted.org/packages/08/06/9f5a12939db324d905dc1f70591ae7d7898d030d7662f0d426e2286f68c9/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f", size = 143892 }, - { url = "https://files.pythonhosted.org/packages/93/62/5e89cdfe04584cb7f4d36003ffa2936681b03ecc0754f8e969c2becb7e24/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a", size = 146123 }, - { url = "https://files.pythonhosted.org/packages/a9/ac/ab729a15c516da2ab70a05f8722ecfccc3f04ed7a18e45c75bbbaa347d61/charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a", size = 147943 }, - { url = "https://files.pythonhosted.org/packages/03/d2/3f392f23f042615689456e9a274640c1d2e5dd1d52de36ab8f7955f8f050/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247", size = 142063 }, - { url = "https://files.pythonhosted.org/packages/f2/e3/e20aae5e1039a2cd9b08d9205f52142329f887f8cf70da3650326670bddf/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408", size = 150578 }, - { url = "https://files.pythonhosted.org/packages/8d/af/779ad72a4da0aed925e1139d458adc486e61076d7ecdcc09e610ea8678db/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb", size = 153629 }, - { url = "https://files.pythonhosted.org/packages/c2/b6/7aa450b278e7aa92cf7732140bfd8be21f5f29d5bf334ae987c945276639/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d", size = 150778 }, - { url = "https://files.pythonhosted.org/packages/39/f4/d9f4f712d0951dcbfd42920d3db81b00dd23b6ab520419626f4023334056/charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807", size = 146453 }, - { url = "https://files.pythonhosted.org/packages/49/2b/999d0314e4ee0cff3cb83e6bc9aeddd397eeed693edb4facb901eb8fbb69/charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f", size = 95479 }, - { url = "https://files.pythonhosted.org/packages/2d/ce/3cbed41cff67e455a386fb5e5dd8906cdda2ed92fbc6297921f2e4419309/charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f", size = 102790 }, - { url = "https://files.pythonhosted.org/packages/72/80/41ef5d5a7935d2d3a773e3eaebf0a9350542f2cab4eac59a7a4741fbbbbe/charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125", size = 194995 }, - { url = "https://files.pythonhosted.org/packages/7a/28/0b9fefa7b8b080ec492110af6d88aa3dea91c464b17d53474b6e9ba5d2c5/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1", size = 139471 }, - { url = "https://files.pythonhosted.org/packages/71/64/d24ab1a997efb06402e3fc07317e94da358e2585165930d9d59ad45fcae2/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3", size = 149831 }, - { url = "https://files.pythonhosted.org/packages/37/ed/be39e5258e198655240db5e19e0b11379163ad7070962d6b0c87ed2c4d39/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd", size = 142335 }, - { url = "https://files.pythonhosted.org/packages/88/83/489e9504711fa05d8dde1574996408026bdbdbd938f23be67deebb5eca92/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00", size = 143862 }, - { url = "https://files.pythonhosted.org/packages/c6/c7/32da20821cf387b759ad24627a9aca289d2822de929b8a41b6241767b461/charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12", size = 145673 }, - { url = "https://files.pythonhosted.org/packages/68/85/f4288e96039abdd5aeb5c546fa20a37b50da71b5cf01e75e87f16cd43304/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77", size = 140211 }, - { url = "https://files.pythonhosted.org/packages/28/a3/a42e70d03cbdabc18997baf4f0227c73591a08041c149e710045c281f97b/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146", size = 148039 }, - { url = "https://files.pythonhosted.org/packages/85/e4/65699e8ab3014ecbe6f5c71d1a55d810fb716bbfd74f6283d5c2aa87febf/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd", size = 151939 }, - { url = "https://files.pythonhosted.org/packages/b1/82/8e9fe624cc5374193de6860aba3ea8070f584c8565ee77c168ec13274bd2/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6", size = 149075 }, - { url = "https://files.pythonhosted.org/packages/3d/7b/82865ba54c765560c8433f65e8acb9217cb839a9e32b42af4aa8e945870f/charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8", size = 144340 }, - { url = "https://files.pythonhosted.org/packages/b5/b6/9674a4b7d4d99a0d2df9b215da766ee682718f88055751e1e5e753c82db0/charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b", size = 95205 }, - { url = "https://files.pythonhosted.org/packages/1e/ab/45b180e175de4402dcf7547e4fb617283bae54ce35c27930a6f35b6bef15/charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76", size = 102441 }, - { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, - { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, - { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, - { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, - { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, - { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, - { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, - { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, - { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, - { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, - { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, - { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, - { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, - { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, - { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, - { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, - { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, - { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, - { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, - { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, - { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, - { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, - { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, - { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, - { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, - { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, - { url = "https://files.pythonhosted.org/packages/7f/c0/b913f8f02836ed9ab32ea643c6fe4d3325c3d8627cf6e78098671cafff86/charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41", size = 197867 }, - { url = "https://files.pythonhosted.org/packages/0f/6c/2bee440303d705b6fb1e2ec789543edec83d32d258299b16eed28aad48e0/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f", size = 141385 }, - { url = "https://files.pythonhosted.org/packages/3d/04/cb42585f07f6f9fd3219ffb6f37d5a39b4fd2db2355b23683060029c35f7/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2", size = 151367 }, - { url = "https://files.pythonhosted.org/packages/54/54/2412a5b093acb17f0222de007cc129ec0e0df198b5ad2ce5699355269dfe/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770", size = 143928 }, - { url = "https://files.pythonhosted.org/packages/5a/6d/e2773862b043dcf8a221342954f375392bb2ce6487bcd9f2c1b34e1d6781/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4", size = 146203 }, - { url = "https://files.pythonhosted.org/packages/b9/f8/ca440ef60d8f8916022859885f231abb07ada3c347c03d63f283bec32ef5/charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537", size = 148082 }, - { url = "https://files.pythonhosted.org/packages/04/d2/42fd330901aaa4b805a1097856c2edf5095e260a597f65def493f4b8c833/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496", size = 142053 }, - { url = "https://files.pythonhosted.org/packages/9e/af/3a97a4fa3c53586f1910dadfc916e9c4f35eeada36de4108f5096cb7215f/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78", size = 150625 }, - { url = "https://files.pythonhosted.org/packages/26/ae/23d6041322a3556e4da139663d02fb1b3c59a23ab2e2b56432bd2ad63ded/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7", size = 153549 }, - { url = "https://files.pythonhosted.org/packages/94/22/b8f2081c6a77cb20d97e57e0b385b481887aa08019d2459dc2858ed64871/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6", size = 150945 }, - { url = "https://files.pythonhosted.org/packages/c7/0b/c5ec5092747f801b8b093cdf5610e732b809d6cb11f4c51e35fc28d1d389/charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294", size = 146595 }, - { url = "https://files.pythonhosted.org/packages/0c/5a/0b59704c38470df6768aa154cc87b1ac7c9bb687990a1559dc8765e8627e/charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5", size = 95453 }, - { url = "https://files.pythonhosted.org/packages/85/2d/a9790237cb4d01a6d57afadc8573c8b73c609ade20b80f4cda30802009ee/charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765", size = 102811 }, - { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/28/9901804da60055b406e1a1c5ba7aac1276fb77f1dde635aabfc7fd84b8ab/charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941", size = 201818, upload-time = "2025-05-02T08:31:46.725Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9b/892a8c8af9110935e5adcbb06d9c6fe741b6bb02608c6513983048ba1a18/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd", size = 144649, upload-time = "2025-05-02T08:31:48.889Z" }, + { url = "https://files.pythonhosted.org/packages/7b/a5/4179abd063ff6414223575e008593861d62abfc22455b5d1a44995b7c101/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6", size = 155045, upload-time = "2025-05-02T08:31:50.757Z" }, + { url = "https://files.pythonhosted.org/packages/3b/95/bc08c7dfeddd26b4be8c8287b9bb055716f31077c8b0ea1cd09553794665/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d", size = 147356, upload-time = "2025-05-02T08:31:52.634Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/7a5b635aa65284bf3eab7653e8b4151ab420ecbae918d3e359d1947b4d61/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86", size = 149471, upload-time = "2025-05-02T08:31:56.207Z" }, + { url = "https://files.pythonhosted.org/packages/ae/38/51fc6ac74251fd331a8cfdb7ec57beba8c23fd5493f1050f71c87ef77ed0/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c", size = 151317, upload-time = "2025-05-02T08:31:57.613Z" }, + { url = "https://files.pythonhosted.org/packages/b7/17/edee1e32215ee6e9e46c3e482645b46575a44a2d72c7dfd49e49f60ce6bf/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0", size = 146368, upload-time = "2025-05-02T08:31:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/2c/ea3e66f2b5f21fd00b2825c94cafb8c326ea6240cd80a91eb09e4a285830/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef", size = 154491, upload-time = "2025-05-02T08:32:01.219Z" }, + { url = "https://files.pythonhosted.org/packages/52/47/7be7fa972422ad062e909fd62460d45c3ef4c141805b7078dbab15904ff7/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6", size = 157695, upload-time = "2025-05-02T08:32:03.045Z" }, + { url = "https://files.pythonhosted.org/packages/2f/42/9f02c194da282b2b340f28e5fb60762de1151387a36842a92b533685c61e/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366", size = 154849, upload-time = "2025-05-02T08:32:04.651Z" }, + { url = "https://files.pythonhosted.org/packages/67/44/89cacd6628f31fb0b63201a618049be4be2a7435a31b55b5eb1c3674547a/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db", size = 150091, upload-time = "2025-05-02T08:32:06.719Z" }, + { url = "https://files.pythonhosted.org/packages/1f/79/4b8da9f712bc079c0f16b6d67b099b0b8d808c2292c937f267d816ec5ecc/charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a", size = 98445, upload-time = "2025-05-02T08:32:08.66Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d7/96970afb4fb66497a40761cdf7bd4f6fca0fc7bafde3a84f836c1f57a926/charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509", size = 105782, upload-time = "2025-05-02T08:32:10.46Z" }, + { url = "https://files.pythonhosted.org/packages/05/85/4c40d00dcc6284a1c1ad5de5e0996b06f39d8232f1031cd23c2f5c07ee86/charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2", size = 198794, upload-time = "2025-05-02T08:32:11.945Z" }, + { url = "https://files.pythonhosted.org/packages/41/d9/7a6c0b9db952598e97e93cbdfcb91bacd89b9b88c7c983250a77c008703c/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645", size = 142846, upload-time = "2025-05-02T08:32:13.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/82/a37989cda2ace7e37f36c1a8ed16c58cf48965a79c2142713244bf945c89/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd", size = 153350, upload-time = "2025-05-02T08:32:15.873Z" }, + { url = "https://files.pythonhosted.org/packages/df/68/a576b31b694d07b53807269d05ec3f6f1093e9545e8607121995ba7a8313/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8", size = 145657, upload-time = "2025-05-02T08:32:17.283Z" }, + { url = "https://files.pythonhosted.org/packages/92/9b/ad67f03d74554bed3aefd56fe836e1623a50780f7c998d00ca128924a499/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f", size = 147260, upload-time = "2025-05-02T08:32:18.807Z" }, + { url = "https://files.pythonhosted.org/packages/a6/e6/8aebae25e328160b20e31a7e9929b1578bbdc7f42e66f46595a432f8539e/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7", size = 149164, upload-time = "2025-05-02T08:32:20.333Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f2/b3c2f07dbcc248805f10e67a0262c93308cfa149a4cd3d1fe01f593e5fd2/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9", size = 144571, upload-time = "2025-05-02T08:32:21.86Z" }, + { url = "https://files.pythonhosted.org/packages/60/5b/c3f3a94bc345bc211622ea59b4bed9ae63c00920e2e8f11824aa5708e8b7/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544", size = 151952, upload-time = "2025-05-02T08:32:23.434Z" }, + { url = "https://files.pythonhosted.org/packages/e2/4d/ff460c8b474122334c2fa394a3f99a04cf11c646da895f81402ae54f5c42/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82", size = 155959, upload-time = "2025-05-02T08:32:24.993Z" }, + { url = "https://files.pythonhosted.org/packages/a2/2b/b964c6a2fda88611a1fe3d4c400d39c66a42d6c169c924818c848f922415/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0", size = 153030, upload-time = "2025-05-02T08:32:26.435Z" }, + { url = "https://files.pythonhosted.org/packages/59/2e/d3b9811db26a5ebf444bc0fa4f4be5aa6d76fc6e1c0fd537b16c14e849b6/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5", size = 148015, upload-time = "2025-05-02T08:32:28.376Z" }, + { url = "https://files.pythonhosted.org/packages/90/07/c5fd7c11eafd561bb51220d600a788f1c8d77c5eef37ee49454cc5c35575/charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a", size = 98106, upload-time = "2025-05-02T08:32:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/a8/05/5e33dbef7e2f773d672b6d79f10ec633d4a71cd96db6673625838a4fd532/charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28", size = 105402, upload-time = "2025-05-02T08:32:32.191Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/28/f8/dfb01ff6cc9af38552c69c9027501ff5a5117c4cc18dcd27cb5259fa1888/charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4", size = 201671, upload-time = "2025-05-02T08:34:12.696Z" }, + { url = "https://files.pythonhosted.org/packages/32/fb/74e26ee556a9dbfe3bd264289b67be1e6d616329403036f6507bb9f3f29c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7", size = 144744, upload-time = "2025-05-02T08:34:14.665Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/8499ee5aa7addc6f6d72e068691826ff093329fe59891e83b092ae4c851c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836", size = 154993, upload-time = "2025-05-02T08:34:17.134Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a2/5e4c187680728219254ef107a6949c60ee0e9a916a5dadb148c7ae82459c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597", size = 147382, upload-time = "2025-05-02T08:34:19.081Z" }, + { url = "https://files.pythonhosted.org/packages/4c/fe/56aca740dda674f0cc1ba1418c4d84534be51f639b5f98f538b332dc9a95/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7", size = 149536, upload-time = "2025-05-02T08:34:21.073Z" }, + { url = "https://files.pythonhosted.org/packages/53/13/db2e7779f892386b589173dd689c1b1e304621c5792046edd8a978cbf9e0/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f", size = 151349, upload-time = "2025-05-02T08:34:23.193Z" }, + { url = "https://files.pythonhosted.org/packages/69/35/e52ab9a276186f729bce7a0638585d2982f50402046e4b0faa5d2c3ef2da/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba", size = 146365, upload-time = "2025-05-02T08:34:25.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d8/af7333f732fc2e7635867d56cb7c349c28c7094910c72267586947561b4b/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12", size = 154499, upload-time = "2025-05-02T08:34:27.359Z" }, + { url = "https://files.pythonhosted.org/packages/7a/3d/a5b2e48acef264d71e036ff30bcc49e51bde80219bb628ba3e00cf59baac/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518", size = 157735, upload-time = "2025-05-02T08:34:29.798Z" }, + { url = "https://files.pythonhosted.org/packages/85/d8/23e2c112532a29f3eef374375a8684a4f3b8e784f62b01da931186f43494/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5", size = 154786, upload-time = "2025-05-02T08:34:31.858Z" }, + { url = "https://files.pythonhosted.org/packages/c7/57/93e0169f08ecc20fe82d12254a200dfaceddc1c12a4077bf454ecc597e33/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3", size = 150203, upload-time = "2025-05-02T08:34:33.88Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9d/9bf2b005138e7e060d7ebdec7503d0ef3240141587651f4b445bdf7286c2/charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471", size = 98436, upload-time = "2025-05-02T08:34:35.907Z" }, + { url = "https://files.pythonhosted.org/packages/6d/24/5849d46cf4311bbf21b424c443b09b459f5b436b1558c04e45dbb7cc478b/charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e", size = 105772, upload-time = "2025-05-02T08:34:37.935Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, ] [[package]] name = "click" version = "8.1.8" source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593 } +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188 }, + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] name = "coverage" version = "7.5.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/52/d3/3ec80acdd57a0d6a1111b978ade388824f37126446fd6750d38bfaca949c/coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8", size = 798314 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/31/db/08d54dbc12fdfe5857b06105fd1235bdebb7da7c11cd1a0fae936556162a/coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c", size = 210025 }, - { url = "https://files.pythonhosted.org/packages/a8/ff/02c4bcff1025b4a788aa3933e1cd1474d79de43e0d859273b3319ef43cd3/coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b", size = 210499 }, - { url = "https://files.pythonhosted.org/packages/ab/b1/7820a8ef62adeebd37612af9d2369f4467a3bc2641dea1243450def5489e/coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932", size = 238399 }, - { url = "https://files.pythonhosted.org/packages/2c/0e/23a388f3ce16c5ea01a454fef6a9039115abd40b748027d4fef18b3628a7/coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3", size = 236676 }, - { url = "https://files.pythonhosted.org/packages/f8/81/e871b0d58ca5d6cc27d00b2f668ce09c4643ef00512341f3a592a81fb6cd/coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517", size = 237467 }, - { url = "https://files.pythonhosted.org/packages/95/cb/42a6d34d5840635394f1e172aaa0e7cbd9346155e5004a8ee75d8e434c6b/coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a", size = 243539 }, - { url = "https://files.pythonhosted.org/packages/6a/6a/18b3819919fdfd3e2062a75219b363f895f24ae5b80e72ffe5dfb1a7e9c8/coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880", size = 241725 }, - { url = "https://files.pythonhosted.org/packages/b5/3d/a0650978e8b8f78d269358421b7401acaf7cb89e957b2e1be5205ea5940e/coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58", size = 242913 }, - { url = "https://files.pythonhosted.org/packages/8a/fe/95a74158fa0eda56d39783e918edc6fbb3dd3336be390557fc0a2815ecd4/coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4", size = 212381 }, - { url = "https://files.pythonhosted.org/packages/4c/26/b276e0c70cba5059becce2594a268a2731d5b4f2386e9a6afdf37ffa3d44/coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a", size = 213225 }, - { url = "https://files.pythonhosted.org/packages/71/cf/964bb667ea37d64b25f04d4cfaf6232cdb7a6472e1f4a4faf0459ddcec40/coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375", size = 210130 }, - { url = "https://files.pythonhosted.org/packages/aa/56/31edd4baa132fe2b991437e0acf3e36c50418370044a89b65518e5581f4c/coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb", size = 210617 }, - { url = "https://files.pythonhosted.org/packages/26/6d/4cd14bd0221180c307fae4f8ef00dbd86a13507c25081858c620aa6fafd8/coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95", size = 242048 }, - { url = "https://files.pythonhosted.org/packages/84/60/7eb84255bd9947b140e0382721b0a1b25fd670b4f0f176f11f90b5632d02/coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d", size = 239619 }, - { url = "https://files.pythonhosted.org/packages/76/6b/e8f4696194fdf3c19422f2a80ac10e03a9322f93e6c9ef57a89e03a8c8f7/coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743", size = 241321 }, - { url = "https://files.pythonhosted.org/packages/3f/1c/6a6990fd2e6890807775852882b1ed0a8e50519a525252490b0c219aa8a5/coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1", size = 250419 }, - { url = "https://files.pythonhosted.org/packages/1a/be/b6422a1422381704dd015cc23e503acd1a44a6bdc4e59c75f8c6a2b24151/coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de", size = 248794 }, - { url = "https://files.pythonhosted.org/packages/9b/93/e8231000754d4a31fe9a6c550f6a436eacd2e50763ba2b418f10b2308e45/coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff", size = 249873 }, - { url = "https://files.pythonhosted.org/packages/d3/6f/eb5aae80bf9d01d0f293121d4caa660ac968da2cb967f82547a7b5e8d65b/coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d", size = 212380 }, - { url = "https://files.pythonhosted.org/packages/30/73/b70ab57f11b62f5ca9a83f43cae752fbbb4417bea651875235c32eb2fc2e/coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656", size = 213316 }, - { url = "https://files.pythonhosted.org/packages/36/db/f4e17ffb5ac2d125c72ee3b235c2e04f85a4296a6a9e17730e218af113d8/coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9", size = 210340 }, - { url = "https://files.pythonhosted.org/packages/c3/bc/d7e832280f269be9e8d46cff5c4031b4840f1844674dc53ad93c5a9c1da6/coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64", size = 210612 }, - { url = "https://files.pythonhosted.org/packages/54/84/543e2cd6c1de30c7522a0afcb040677957bac756dd8677bade8bdd9274ba/coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af", size = 242926 }, - { url = "https://files.pythonhosted.org/packages/ad/06/570533f747141b4fd727a193317e16c6e677ed7945e23a195b8f64e685a2/coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc", size = 240294 }, - { url = "https://files.pythonhosted.org/packages/fa/d9/ec4ba0913195d240d026670d41b91f3e5b9a8a143a385f93a09e97c90f5c/coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2", size = 242232 }, - { url = "https://files.pythonhosted.org/packages/d9/3f/1a613c32aa1980d20d6ca2f54faf800df04aafad6016d7132b3276d8715d/coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1", size = 249171 }, - { url = "https://files.pythonhosted.org/packages/b9/3b/e16b12693572fd69148453abc6ddcd20cbeae6f0a040b5ed6af2f75b646f/coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb", size = 247073 }, - { url = "https://files.pythonhosted.org/packages/e7/3e/04a05d40bb09f90a312296a32fb2c5ade2dfcf803edf777ad18b97547503/coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2", size = 248812 }, - { url = "https://files.pythonhosted.org/packages/ba/f7/3a8b7b0affe548227f3d45e248c0f22c5b55bff0ee062b49afc165b3ff25/coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4", size = 212634 }, - { url = "https://files.pythonhosted.org/packages/7c/31/5f5286d2a5e21e1fe5670629bb24c79bf46383a092e74e00077e7a178e5c/coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475", size = 213460 }, - { url = "https://files.pythonhosted.org/packages/62/18/5573216d5b8db7d9f29189350dcd81830a03a624966c35f8201ae10df09c/coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1", size = 210014 }, - { url = "https://files.pythonhosted.org/packages/7c/0e/e98d6c6d569d65ff3195f095e6b006b3d7780fd6182322a25e7dfe0d53d3/coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5", size = 210494 }, - { url = "https://files.pythonhosted.org/packages/d3/63/98e5a6b7ed1bfca874729ee309cc49a6d6658ab9e479a2b6d223ccc96e03/coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631", size = 237996 }, - { url = "https://files.pythonhosted.org/packages/76/e4/d3c67a0a092127b8a3dffa2f75334a8cdb2cefc99e3d75a7f42cf1ff98a9/coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46", size = 236287 }, - { url = "https://files.pythonhosted.org/packages/12/7f/9b787ffc31bc39aa9e98c7005b698e7c6539bd222043e4a9c83b83c782a2/coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e", size = 237070 }, - { url = "https://files.pythonhosted.org/packages/31/ee/9998a0d855cad5f8e04062f7428b83c34aa643e5df468409593a480d5585/coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be", size = 243115 }, - { url = "https://files.pythonhosted.org/packages/16/94/1e348cd4445404c588ec8199adde0b45727b1d7989d8fb097d39c93e3da5/coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b", size = 241315 }, - { url = "https://files.pythonhosted.org/packages/28/17/6fe1695d2a706e586b87a407598f4ed82dd218b2b43cdc790f695f259849/coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0", size = 242467 }, - { url = "https://files.pythonhosted.org/packages/81/a2/1e550272c8b1f89b980504230b1a929de83d8f3d5ecb268477b32e5996a6/coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7", size = 212394 }, - { url = "https://files.pythonhosted.org/packages/c9/48/7d3c31064c5adcc743fe5370cf7e198cee06cc0e2d37b5cbe930691a3f54/coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493", size = 213246 }, - { url = "https://files.pythonhosted.org/packages/34/81/f00ce7ef95479085feb01fa9e352b2b5b2b9d24767acf2266d6267a6dba9/coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067", size = 202381 }, +sdist = { url = "https://files.pythonhosted.org/packages/52/d3/3ec80acdd57a0d6a1111b978ade388824f37126446fd6750d38bfaca949c/coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8", size = 798314, upload-time = "2024-04-23T17:42:35.508Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/db/08d54dbc12fdfe5857b06105fd1235bdebb7da7c11cd1a0fae936556162a/coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c", size = 210025, upload-time = "2024-04-23T17:40:22.328Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ff/02c4bcff1025b4a788aa3933e1cd1474d79de43e0d859273b3319ef43cd3/coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b", size = 210499, upload-time = "2024-04-23T17:40:25.747Z" }, + { url = "https://files.pythonhosted.org/packages/ab/b1/7820a8ef62adeebd37612af9d2369f4467a3bc2641dea1243450def5489e/coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932", size = 238399, upload-time = "2024-04-23T17:40:27.591Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/23a388f3ce16c5ea01a454fef6a9039115abd40b748027d4fef18b3628a7/coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3", size = 236676, upload-time = "2024-04-23T17:40:30.455Z" }, + { url = "https://files.pythonhosted.org/packages/f8/81/e871b0d58ca5d6cc27d00b2f668ce09c4643ef00512341f3a592a81fb6cd/coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517", size = 237467, upload-time = "2024-04-23T17:40:32.704Z" }, + { url = "https://files.pythonhosted.org/packages/95/cb/42a6d34d5840635394f1e172aaa0e7cbd9346155e5004a8ee75d8e434c6b/coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a", size = 243539, upload-time = "2024-04-23T17:40:35.068Z" }, + { url = "https://files.pythonhosted.org/packages/6a/6a/18b3819919fdfd3e2062a75219b363f895f24ae5b80e72ffe5dfb1a7e9c8/coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880", size = 241725, upload-time = "2024-04-23T17:40:37.251Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/a0650978e8b8f78d269358421b7401acaf7cb89e957b2e1be5205ea5940e/coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58", size = 242913, upload-time = "2024-04-23T17:40:39.992Z" }, + { url = "https://files.pythonhosted.org/packages/8a/fe/95a74158fa0eda56d39783e918edc6fbb3dd3336be390557fc0a2815ecd4/coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4", size = 212381, upload-time = "2024-04-23T17:40:42.632Z" }, + { url = "https://files.pythonhosted.org/packages/4c/26/b276e0c70cba5059becce2594a268a2731d5b4f2386e9a6afdf37ffa3d44/coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a", size = 213225, upload-time = "2024-04-23T17:40:45.175Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/964bb667ea37d64b25f04d4cfaf6232cdb7a6472e1f4a4faf0459ddcec40/coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375", size = 210130, upload-time = "2024-04-23T17:40:47.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/56/31edd4baa132fe2b991437e0acf3e36c50418370044a89b65518e5581f4c/coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb", size = 210617, upload-time = "2024-04-23T17:40:49.82Z" }, + { url = "https://files.pythonhosted.org/packages/26/6d/4cd14bd0221180c307fae4f8ef00dbd86a13507c25081858c620aa6fafd8/coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95", size = 242048, upload-time = "2024-04-23T17:40:52.779Z" }, + { url = "https://files.pythonhosted.org/packages/84/60/7eb84255bd9947b140e0382721b0a1b25fd670b4f0f176f11f90b5632d02/coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d", size = 239619, upload-time = "2024-04-23T17:40:54.847Z" }, + { url = "https://files.pythonhosted.org/packages/76/6b/e8f4696194fdf3c19422f2a80ac10e03a9322f93e6c9ef57a89e03a8c8f7/coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743", size = 241321, upload-time = "2024-04-23T17:40:57.092Z" }, + { url = "https://files.pythonhosted.org/packages/3f/1c/6a6990fd2e6890807775852882b1ed0a8e50519a525252490b0c219aa8a5/coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1", size = 250419, upload-time = "2024-04-23T17:40:59.051Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/b6422a1422381704dd015cc23e503acd1a44a6bdc4e59c75f8c6a2b24151/coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de", size = 248794, upload-time = "2024-04-23T17:41:01.803Z" }, + { url = "https://files.pythonhosted.org/packages/9b/93/e8231000754d4a31fe9a6c550f6a436eacd2e50763ba2b418f10b2308e45/coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff", size = 249873, upload-time = "2024-04-23T17:41:04.719Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/eb5aae80bf9d01d0f293121d4caa660ac968da2cb967f82547a7b5e8d65b/coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d", size = 212380, upload-time = "2024-04-23T17:41:06.879Z" }, + { url = "https://files.pythonhosted.org/packages/30/73/b70ab57f11b62f5ca9a83f43cae752fbbb4417bea651875235c32eb2fc2e/coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656", size = 213316, upload-time = "2024-04-23T17:41:09.233Z" }, + { url = "https://files.pythonhosted.org/packages/36/db/f4e17ffb5ac2d125c72ee3b235c2e04f85a4296a6a9e17730e218af113d8/coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9", size = 210340, upload-time = "2024-04-23T17:41:11.811Z" }, + { url = "https://files.pythonhosted.org/packages/c3/bc/d7e832280f269be9e8d46cff5c4031b4840f1844674dc53ad93c5a9c1da6/coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64", size = 210612, upload-time = "2024-04-23T17:41:14.256Z" }, + { url = "https://files.pythonhosted.org/packages/54/84/543e2cd6c1de30c7522a0afcb040677957bac756dd8677bade8bdd9274ba/coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af", size = 242926, upload-time = "2024-04-23T17:41:16.284Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/570533f747141b4fd727a193317e16c6e677ed7945e23a195b8f64e685a2/coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc", size = 240294, upload-time = "2024-04-23T17:41:19.099Z" }, + { url = "https://files.pythonhosted.org/packages/fa/d9/ec4ba0913195d240d026670d41b91f3e5b9a8a143a385f93a09e97c90f5c/coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2", size = 242232, upload-time = "2024-04-23T17:41:21.05Z" }, + { url = "https://files.pythonhosted.org/packages/d9/3f/1a613c32aa1980d20d6ca2f54faf800df04aafad6016d7132b3276d8715d/coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1", size = 249171, upload-time = "2024-04-23T17:41:23.723Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3b/e16b12693572fd69148453abc6ddcd20cbeae6f0a040b5ed6af2f75b646f/coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb", size = 247073, upload-time = "2024-04-23T17:41:25.719Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3e/04a05d40bb09f90a312296a32fb2c5ade2dfcf803edf777ad18b97547503/coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2", size = 248812, upload-time = "2024-04-23T17:41:27.951Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f7/3a8b7b0affe548227f3d45e248c0f22c5b55bff0ee062b49afc165b3ff25/coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4", size = 212634, upload-time = "2024-04-23T17:41:30.114Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/5f5286d2a5e21e1fe5670629bb24c79bf46383a092e74e00077e7a178e5c/coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475", size = 213460, upload-time = "2024-04-23T17:41:32.683Z" }, + { url = "https://files.pythonhosted.org/packages/62/18/5573216d5b8db7d9f29189350dcd81830a03a624966c35f8201ae10df09c/coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1", size = 210014, upload-time = "2024-04-23T17:41:56.535Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0e/e98d6c6d569d65ff3195f095e6b006b3d7780fd6182322a25e7dfe0d53d3/coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5", size = 210494, upload-time = "2024-04-23T17:41:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/d3/63/98e5a6b7ed1bfca874729ee309cc49a6d6658ab9e479a2b6d223ccc96e03/coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631", size = 237996, upload-time = "2024-04-23T17:42:01.514Z" }, + { url = "https://files.pythonhosted.org/packages/76/e4/d3c67a0a092127b8a3dffa2f75334a8cdb2cefc99e3d75a7f42cf1ff98a9/coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46", size = 236287, upload-time = "2024-04-23T17:42:03.838Z" }, + { url = "https://files.pythonhosted.org/packages/12/7f/9b787ffc31bc39aa9e98c7005b698e7c6539bd222043e4a9c83b83c782a2/coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e", size = 237070, upload-time = "2024-04-23T17:42:06.993Z" }, + { url = "https://files.pythonhosted.org/packages/31/ee/9998a0d855cad5f8e04062f7428b83c34aa643e5df468409593a480d5585/coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be", size = 243115, upload-time = "2024-04-23T17:42:09.281Z" }, + { url = "https://files.pythonhosted.org/packages/16/94/1e348cd4445404c588ec8199adde0b45727b1d7989d8fb097d39c93e3da5/coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b", size = 241315, upload-time = "2024-04-23T17:42:11.836Z" }, + { url = "https://files.pythonhosted.org/packages/28/17/6fe1695d2a706e586b87a407598f4ed82dd218b2b43cdc790f695f259849/coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0", size = 242467, upload-time = "2024-04-23T17:42:14.019Z" }, + { url = "https://files.pythonhosted.org/packages/81/a2/1e550272c8b1f89b980504230b1a929de83d8f3d5ecb268477b32e5996a6/coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7", size = 212394, upload-time = "2024-04-23T17:42:17.655Z" }, + { url = "https://files.pythonhosted.org/packages/c9/48/7d3c31064c5adcc743fe5370cf7e198cee06cc0e2d37b5cbe930691a3f54/coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493", size = 213246, upload-time = "2024-04-23T17:42:19.777Z" }, + { url = "https://files.pythonhosted.org/packages/34/81/f00ce7ef95479085feb01fa9e352b2b5b2b9d24767acf2266d6267a6dba9/coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067", size = 202381, upload-time = "2024-04-23T17:42:22.127Z" }, ] [package.optional-dependencies] @@ -342,339 +490,399 @@ toml = [ [[package]] name = "cramjam" -version = "2.9.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c9/68/09b6b5603d21a0c7d4362d513217a5079c47b1b7a88967c52dbef13db183/cramjam-2.9.1.tar.gz", hash = "sha256:336cc591d86cbd225d256813779f46624f857bc9c779db126271eff9ddc524ae", size = 47892 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/27/5d/0b03115fa6a95a6dd9be344cd186879b763f1a6fab57ae55ffe2777aa0a7/cramjam-2.9.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:8e82464d1e00fbbb12958999b8471ba5e9f3d9711954505a0a7b378762332e6f", size = 2136622 }, - { url = "https://files.pythonhosted.org/packages/6f/ac/a17644e182ede7e8e24fb3af038bc2c1cf3dd0447c935cb10409f21d099b/cramjam-2.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d2df8a6511cc08ef1fccd2e0c65e2ebc9f57574ec8376052a76851af5398810", size = 1927947 }, - { url = "https://files.pythonhosted.org/packages/9e/1e/e6c4f9695e4ba7b9c63160dcbfa76428bd3221930eedeb8f16364ab6f642/cramjam-2.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:21ea784e6c3f1843d3523ae0f03651dd06058b39eeb64beb82ee3b100fa83662", size = 2268766 }, - { url = "https://files.pythonhosted.org/packages/ab/37/4c81e5d039bdfc75a695abd426e6cdd9ab18a87f65d57837d78936cfa226/cramjam-2.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e0c5d98a4e791f0bbd0ffcb7dae879baeb2dcc357348a8dc2be0a8c10403a2a", size = 2108762 }, - { url = "https://files.pythonhosted.org/packages/b9/bb/3bf3a8877b9a4105b625d710410bd2bc83ef38d4a7fe4eaeb3895d997b2d/cramjam-2.9.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e076fd87089197cb61117c63dbe7712ad5eccb93968860eb3bae09b767bac813", size = 2086694 }, - { url = "https://files.pythonhosted.org/packages/c3/78/317b7ab6a9b0f24c45d56305a8288cdb6408f855034dc80530ed16a5cc6c/cramjam-2.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d86b44933aea0151e4a2e1e6935448499849045c38167d288ca4c59d5b8cd4e", size = 2441698 }, - { url = "https://files.pythonhosted.org/packages/c5/2d/bc98992c29eb8647196b3bda814fd7ecfba6aff85177d44180be2aa320e8/cramjam-2.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb032549dec897b942ddcf80c1cdccbcb40629f15fc902731dbe6362da49326", size = 2759280 }, - { url = "https://files.pythonhosted.org/packages/dd/64/a4e54d74110c22477e467586935167d61fc7bae5284d393e76779b214a3e/cramjam-2.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf29b4def86ec503e329fe138842a9b79a997e3beb6c7809b05665a0d291edff", size = 2385128 }, - { url = "https://files.pythonhosted.org/packages/b0/1a/6ee093bf8a41cf31980175310abbbcdd1a39dadadbe96843112f42cef0fe/cramjam-2.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a36adf7d13b7accfa206e1c917f08924eb905b45aa8e62176509afa7b14db71e", size = 2373494 }, - { url = "https://files.pythonhosted.org/packages/9d/a6/1ae1f1a8ef559c2fab9d6d7f09b19995684e6727e617bf1b73967ee1c6be/cramjam-2.9.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:cf4ea758d98b6fad1b4b2d808d0de690d3162ac56c26968aea0af6524e3eb736", size = 2386900 }, - { url = "https://files.pythonhosted.org/packages/d9/e6/cf18deeaa0a96e7fc87f0eacde3c97e2893b573ac148ec746655570c18fc/cramjam-2.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4826d6d81ea490fa7a3ae7a4b9729866a945ffac1f77fe57b71e49d6e1b21efd", size = 2400609 }, - { url = "https://files.pythonhosted.org/packages/90/97/98a8fa24249dc72a936a9a51a81407a399070ba4ceb528d0af291c760eff/cramjam-2.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:335103317475bf992953c58838152a4761fc3c87354000edbfc4d7e57cf05909", size = 2553159 }, - { url = "https://files.pythonhosted.org/packages/ae/6b/4f71f72bc3405f221ec8bd2ba869e324d5f87ddd58c14bf59f7937ea37ab/cramjam-2.9.1-cp310-cp310-win32.whl", hash = "sha256:258120cb1e3afc3443f756f9de161ed63eed56a2c31f6093e81c571c0f2dc9f6", size = 1817873 }, - { url = "https://files.pythonhosted.org/packages/8e/f4/32639916897d59e94d286b5b22263ce8c2903ecc93a868ebe9443ece8f12/cramjam-2.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c60e5996aa02547d12bc2740d44e90e006b0f93100f53206f7abe6732ad56e69", size = 2092168 }, - { url = "https://files.pythonhosted.org/packages/6c/28/dd2b62be30ffe1fa8df10c99ba7b46abfbfb2fc6ace6acbbf9264a1a6b48/cramjam-2.9.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b9db1debe48060e41a5b91af9193c524e473c57f6105462c5524a41f5aabdb88", size = 2136699 }, - { url = "https://files.pythonhosted.org/packages/03/c9/fcebeb6f06879af4226337715fbc42ffe543158bcba8c244bba144767897/cramjam-2.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f6f18f0242212d3409d26ce3874937b5b979cebd61f08b633a6ea893c32fc7b6", size = 1927934 }, - { url = "https://files.pythonhosted.org/packages/e8/f3/77032e4f5db4dfcc2b0365f92655b7d6f3fc1527ea5b637f9fb9f8156a65/cramjam-2.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b5b1cd7d39242b2b903cf09cd4696b3a6e04dc537ffa9f3ac8668edae76eecb6", size = 2268584 }, - { url = "https://files.pythonhosted.org/packages/38/16/52175e94390f57196382783a3386c122ace7656b57339abaacdc9433b609/cramjam-2.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47de0a68f5f4d9951250ef5af31f2a7228132caa9ed60994234f7eb98090d33", size = 2108599 }, - { url = "https://files.pythonhosted.org/packages/99/25/5f7476d127a8d18cd19a2f3fd25c0fe09ef7848069d23aac70bc96385eb6/cramjam-2.9.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e13c9a697881e5e38148958612dc6856967f5ff8cd7bba5ff751f2d6ac020aa4", size = 2086632 }, - { url = "https://files.pythonhosted.org/packages/7b/97/76ff3e1209add6acb7e2aa7997be48dc1f92ad66ee3e8fa1179eb2bb9b44/cramjam-2.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba560244bc1335b420b74e91e35f9d4e7f307a3be3a4603ce0f0d7e15a0acdf0", size = 2441757 }, - { url = "https://files.pythonhosted.org/packages/69/c4/228e74c30576556d11e54d86f356955cd86ff5e11bbfec74b66ed0dd237d/cramjam-2.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d47fd41ce260cf4f0ff0e788de961fab9e9c6844a05ce55d06ce31e06107bdc", size = 2758144 }, - { url = "https://files.pythonhosted.org/packages/4b/e7/0fd22e12c6a2879abc501979779d4b8cfe8fe692c708c2c0d1664e88fd79/cramjam-2.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84d154fbadece82935396eb6bcb502085d944d2fd13b07a94348364344370c2c", size = 2385062 }, - { url = "https://files.pythonhosted.org/packages/dd/9c/845592ddf9eb7130ae8bc5958a01d469304a43f8071effe164e2d239e3fa/cramjam-2.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:038df668ffb94d64d67b6ecc59cbd206745a425ffc0402897dde12d89fa6a870", size = 2373473 }, - { url = "https://files.pythonhosted.org/packages/10/c2/287cc94b7f8e87e3b0c21819d3a5deead99ebfdcb2b2d85cd04011b37292/cramjam-2.9.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:4125d8cd86fa08495d310e80926c2f0563f157b76862e7479f9b2cf94823ea0c", size = 2386816 }, - { url = "https://files.pythonhosted.org/packages/7c/22/869a1eeea53db4d9fbde6693a2465909762bffeab1a671e193c95b26f99f/cramjam-2.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4206ebdd1d1ef0f3f86c8c2f7c426aa4af6094f4f41e274601fd4c4569f37454", size = 2400713 }, - { url = "https://files.pythonhosted.org/packages/3f/89/ff988bd6427f01041ccb1a9104c05b6373ae476682d317b6844f4b40af92/cramjam-2.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ab687bef5c493732b9a4ab870542ee43f5eae0025f9c684c7cb399c3a85cb380", size = 2553081 }, - { url = "https://files.pythonhosted.org/packages/2e/68/13fa8561335de609f3cd40b132c1a3abbaf26d3c277e8b8a7446de34ef2c/cramjam-2.9.1-cp311-cp311-win32.whl", hash = "sha256:dda7698b6d7caeae1047adafebc4b43b2a82478234f6c2b45bc3edad854e0600", size = 1817782 }, - { url = "https://files.pythonhosted.org/packages/94/75/f3506ee802460e3b86a91e53bba1f67cf457fa04e4316fe7d5823ba5d28b/cramjam-2.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:872b00ff83e84bcbdc7e951af291ebe65eed20b09c47e7c4af21c312f90b796f", size = 2092227 }, - { url = "https://files.pythonhosted.org/packages/56/66/69a1c17331e38b02c78c923262fc315272de7c2618ef7eac8b3358969d90/cramjam-2.9.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:79417957972553502b217a0093532e48893c8b4ca30ccc941cefe9c72379df7c", size = 2132273 }, - { url = "https://files.pythonhosted.org/packages/3d/17/23d0b1d3301480e924545cdd27f2b949c50438949f64c74e800a09c12c37/cramjam-2.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce2b94117f373defc876f88e74e44049a9969223dbca3240415b71752d0422fb", size = 1926919 }, - { url = "https://files.pythonhosted.org/packages/8e/da/e9565f4abbbaa14645ccd7ce83f9631e90955454b87dc3ef9208aebc72e6/cramjam-2.9.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:67040e0fd84404885ec716a806bee6110f9960c3647e0ef1670aab3b7375a70a", size = 2271776 }, - { url = "https://files.pythonhosted.org/packages/88/ac/e6e0794ac01deb52e7a6a3e59720699abdee08d9b9c63a8d8874201d8155/cramjam-2.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bedb84e068b53c944bd08dcb501fd00d67daa8a917922356dd559b484ce7eab", size = 2109248 }, - { url = "https://files.pythonhosted.org/packages/22/0f/c3724b2dcdfbe7e07917803cf7a6db4a874818a6f8d2b95ca1ceaf177170/cramjam-2.9.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:06e3f97a379386d97debf08638a78b3d3850fdf6124755eb270b54905a169930", size = 2088611 }, - { url = "https://files.pythonhosted.org/packages/ce/16/929a5ae899ad6298f58e66622dc223476fe8e1d4e8dae608f4e1a34bfd09/cramjam-2.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11118675e9c7952ececabc62f023290ee4f8ecf0bee0d2c7eb8d1c402ee9769d", size = 2438373 }, - { url = "https://files.pythonhosted.org/packages/2a/2a/ad473f1ca65d3285e8c1d99fc0289f5856224c0d452dabcf856fd4dcdd77/cramjam-2.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b7de6b61b11545570e4d6033713f3599525efc615ee353a822be8f6b0c65b77", size = 2836669 }, - { url = "https://files.pythonhosted.org/packages/9b/5a/e9b4868ee27099a2a21646cf5ea5cf08c660eae90b55a395ada974dcf3fb/cramjam-2.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57ca8f3775324a9de3ee6f05ca172687ba258c0dea79f7e3a6b4112834982f2a", size = 2343995 }, - { url = "https://files.pythonhosted.org/packages/5f/c4/870a9b4524107bf85a207b82a42613318881238b20f2d237e62815af646a/cramjam-2.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9847dd6f288f1c56359f52acb48ff2df848ff3e3bff34d23855bbcf7016427cc", size = 2374270 }, - { url = "https://files.pythonhosted.org/packages/70/4b/b69e8e3951b7cec5e7da2539b7573bb396bed66af07d760b1878b00fd120/cramjam-2.9.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:8d1248dfa7f151e893ce819670f00879e4b7650b8d4c01279ce4f12140d68dd2", size = 2388789 }, - { url = "https://files.pythonhosted.org/packages/05/1a/af02f6192060413314735c0db61259d7279b0d8d99eee29eff2af09c5892/cramjam-2.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9da6d970281083bae91b914362de325414aa03c01fc806f6bb2cc006322ec834", size = 2402459 }, - { url = "https://files.pythonhosted.org/packages/20/9a/a4ab3e90d72eb4f2c1b983fa32b4050ba676f533ba15bd78158f0632295a/cramjam-2.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c33bc095db5733c841a102b8693062be5db8cdac17b9782ebc00577c6a94480", size = 2518440 }, - { url = "https://files.pythonhosted.org/packages/35/3b/e632dd7e2c5c8a2af2d83144b00d6840f1afcf9c6959ed59ec5b0f925288/cramjam-2.9.1-cp312-cp312-win32.whl", hash = "sha256:9e9193cd4bb57e7acd3af24891526299244bfed88168945efdaa09af4e50720f", size = 1822630 }, - { url = "https://files.pythonhosted.org/packages/0e/a2/d1c46618b81b83578d58a62f3709046c4f3b4ddba10df4b9797cfe096b98/cramjam-2.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:15955dd75e80f66c1ea271167a5347661d9bdc365f894a57698c383c9b7d465c", size = 2094684 }, - { url = "https://files.pythonhosted.org/packages/85/45/f1d1e6ffdceb3b0c18511df2f8e779e03972459fb71d7c1ab0f6a5c063a3/cramjam-2.9.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5a7797a2fff994fc5e323f7a967a35a3e37e3006ed21d64dcded086502f482af", size = 2131814 }, - { url = "https://files.pythonhosted.org/packages/3a/96/36bbd431fbf0fa2ff51fd2db4c3bead66e9e373693a8455d411d45125a68/cramjam-2.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d51b9b140b1df39a44bff7896d98a10da345b7d5f5ce92368d328c1c2c829167", size = 1926380 }, - { url = "https://files.pythonhosted.org/packages/67/c4/99b6507ec697d5f56d32c9c04614775004b05b7fa870725a492dc6b639eb/cramjam-2.9.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:07ac76b7f992556e7aa910244be11ece578cdf84f4d5d5297461f9a895e18312", size = 2271581 }, - { url = "https://files.pythonhosted.org/packages/cb/1b/6d55dff244fb22c0b686dd5a96a754c0638f8a94056beb27c457c6035cc5/cramjam-2.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d90a72608c7550cd7eba914668f6277bfb0b24f074d1f1bd9d061fcb6f2adbd6", size = 2109255 }, - { url = "https://files.pythonhosted.org/packages/ca/fb/b9fcf492a21a8d978c6f999025fce2c6656399448c017ed2fc859425f37f/cramjam-2.9.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:56495975401b1821dbe1f29cf222e23556232209a2fdb809fe8156d120ca9c7f", size = 2088323 }, - { url = "https://files.pythonhosted.org/packages/88/1f/69b523395aeaa201dbd53d203453288205a0c651e7c910161892d694eb4d/cramjam-2.9.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b695259e71fde6d5be66b77a4474523ced9ffe9fe8a34cb9b520ec1241a14d3", size = 2437930 }, - { url = "https://files.pythonhosted.org/packages/b0/2c/d07e802f1786c4082e8286db1087563e4fab31cd6534ed31523f1f9584d1/cramjam-2.9.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab1e69dc4831bbb79b6d547077aae89074c83e8ad94eba1a3d80e94d2424fd02", size = 2836655 }, - { url = "https://files.pythonhosted.org/packages/1f/f5/6b425e82395c078bc95a7437b685e6bdba39d28c2b2986d79374fc1681aa/cramjam-2.9.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:440b489902bfb7a26d3fec1ca888007615336ff763d2a32a2fc40586548a0dbf", size = 2387107 }, - { url = "https://files.pythonhosted.org/packages/33/65/7bf97d89ba7607aaea5464af6f249e3d94c291acf73d72768367a3e361c0/cramjam-2.9.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:217fe22b41f8c3dce03852f828b059abfad11d1344a1df2f43d3eb8634b18d75", size = 2374006 }, - { url = "https://files.pythonhosted.org/packages/29/11/8b6c82eda6d0affbc15d7ab4dc758856eb4308e8ddae73300c1648f5aa0f/cramjam-2.9.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:95f3646ddc98af25af25d5692ae65966488a283813336ea9cf41b22e542e7c0d", size = 2388731 }, - { url = "https://files.pythonhosted.org/packages/48/25/6cdd57c0b1a83c98aec9029310d09a6c1a31e9e9fb8efd9001bd0cbea992/cramjam-2.9.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:6b19fc60ead1cae9795a5b359599da3a1c95d38f869bdfb51c441fd76b04e926", size = 2402131 }, - { url = "https://files.pythonhosted.org/packages/b4/e7/cbf80c9647fa582432aa833c4bdd20cf437917c8066ce653e3b78deff658/cramjam-2.9.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:8dc5207567459d049696f62a1fdfb220f3fe6aa0d722285d44753e12504dac6c", size = 2555296 }, - { url = "https://files.pythonhosted.org/packages/18/a6/fabe1959a980f5d2783a6c138311509dd168bd76e62018624a91cd1cbb41/cramjam-2.9.1-cp313-cp313-win32.whl", hash = "sha256:fbfe35929a61b914de9e5dbacde0cfbba86cbf5122f9285a24c14ed0b645490b", size = 1822484 }, - { url = "https://files.pythonhosted.org/packages/55/d5/24e4562771711711c466768c92097640ed97b0283abe9043ffb6c6d4cf04/cramjam-2.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:06068bd191a82ad4fc1ac23d6f8627fb5e37ec4be0431711b9a2dbacaccfeddb", size = 2094445 }, - { url = "https://files.pythonhosted.org/packages/c7/5a/50523fd478390acb6ca8e57239f7cf79f7260dc0d16be89137d47823e50a/cramjam-2.9.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:af39006faddfc6253beb93ca821d544931cfee7f0177b99ff106dfd8fd6a2cd8", size = 2137158 }, - { url = "https://files.pythonhosted.org/packages/df/83/54eca302e431d51149074d8aad6ec588870c5797060e2142dfe6ca3599a8/cramjam-2.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b3291be0d3f73d5774d69013be4ab33978c777363b5312d14f62f77817c2f75a", size = 1927910 }, - { url = "https://files.pythonhosted.org/packages/6d/e9/5d38ffa5376c5bffcbd16545707d9dac6beffccd00410f0cc19d83d85ef7/cramjam-2.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1539fd758f0e57fad7913cebff8baaee871bb561ddf6fa710a427b74da6b6778", size = 2269458 }, - { url = "https://files.pythonhosted.org/packages/15/f3/99fedc4210db1967256e602fdcb60947585421fd659f8baeeeb4ea16e4c7/cramjam-2.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff362f68bd68ac0eccb445209238d589bba728fb6d7f2e9dc199e0ec3a61d6e0", size = 2109406 }, - { url = "https://files.pythonhosted.org/packages/f2/e9/f380e0c1bd03046c522da4fd6d43ea897ba0b832c78fc4ea5708d8c35c21/cramjam-2.9.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:23b9786d1d17686fb8d600ade2a19374c7188d4b8867efa9af0d8274a220aec7", size = 2086677 }, - { url = "https://files.pythonhosted.org/packages/13/a7/3ae887753f6d41f6e4af8e25654d103c56e13dda2f4b4d13acac570c65c1/cramjam-2.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8bc9c2c748aaf91863d89c4583f529c1c709485c94f8dfeb3ee48662d88e3258", size = 2442136 }, - { url = "https://files.pythonhosted.org/packages/de/a2/763fd98340936057e44ea0b870c9cdb87ad5f90d49e492e8a11cf74e7b29/cramjam-2.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd0fa9a0e7f18224b6d2d1d69dbdc3aecec80ef1393c59244159b131604a4395", size = 2754985 }, - { url = "https://files.pythonhosted.org/packages/33/31/7c8cdf6b16fcd46bad4a307c8203a58b7a2fddf6cb3aad9dc441c050f62f/cramjam-2.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ceef6e09ee22457997370882aa3c69de01e6dd0aaa2f953e1e87ad11641d042", size = 2385597 }, - { url = "https://files.pythonhosted.org/packages/dd/ba/ec0f3b5a3a90721bdb42f4f4989b60adf823d137f40365e83df0cd299378/cramjam-2.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1376f6fdbf0b30712413a0b4e51663a4938ae2f6b449f8e4635dbb3694db83cf", size = 2374339 }, - { url = "https://files.pythonhosted.org/packages/ff/0a/f5bccdc8d12821aed4473a427e9eb8282a38c9337a30e02ed102b18941bf/cramjam-2.9.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:342fb946f8d3e9e35b837288b03ab23cfbe0bb5a30e582ed805ef79706823a96", size = 2386933 }, - { url = "https://files.pythonhosted.org/packages/a0/6e/ce3ffad2b3b8cb73156a19345e27a2e27fb5be79b64f2c81b0c6d6e16c57/cramjam-2.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a237064a6e2c2256c9a1cf2beb7c971382190c0f1eb2e810e02e971881756132", size = 2400860 }, - { url = "https://files.pythonhosted.org/packages/32/a9/e4509e5dfc8f41d9e7f9fdddbf567967937303621d410197c86b11d6a3e4/cramjam-2.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53145fc9f2319c1245d4329e1da8cfacd6e35e27090c07c0b9d453ae2bbdac3e", size = 2553681 }, - { url = "https://files.pythonhosted.org/packages/0a/83/52401c5c654ddff2850d890b0f1cfc355ff6887c6def420d0c8d8178ff97/cramjam-2.9.1-cp39-cp39-win32.whl", hash = "sha256:8a9f52c27292c21457f43c4ce124939302a9acfb62295e7cda8667310563a5a3", size = 1818130 }, - { url = "https://files.pythonhosted.org/packages/93/b3/1645986d8b915fd0426a7224cd00c2c17c32b4d69bc5faad3fb3f5fd5081/cramjam-2.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:8097ee39b61c86848a443c0b25b2df1de6b331fd512b20836a4f5cfde51ab255", size = 2092440 }, - { url = "https://files.pythonhosted.org/packages/bc/91/3f7884172573072a4280bc8bc19b7562b2cd66d2a65576b11e72115cd5fe/cramjam-2.9.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:86824c695688fcd06c5ac9bbd3fea9bdfb4cca194b1e706fbf11a629df48d2b4", size = 2159537 }, - { url = "https://files.pythonhosted.org/packages/ef/49/a0a89e9c45413e89a1e408d4ab416c0f88f19f6db7571fd5c517e429e276/cramjam-2.9.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:27571bfa5a5d618604696747d0dc1d2a99b5906c967c8dee53c13a7107edfde6", size = 1936244 }, - { url = "https://files.pythonhosted.org/packages/26/f7/6422b9e4d148f1a351c0358a95d59023f25cab76609b180804f6a3ed17e9/cramjam-2.9.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb01f6e38719818778144d3165a89ea1ad9dc58c6342b7f20aa194c70f34cbd1", size = 2119487 }, - { url = "https://files.pythonhosted.org/packages/b5/59/6fc930217f7ae085eca6d22d3477cd0145a105cdc39e63b834cb0c1b25e3/cramjam-2.9.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b5cef5cf40725fe64592af9ec163e7389855077700678a1d94bec549403a74d", size = 2400910 }, - { url = "https://files.pythonhosted.org/packages/2d/36/7e53cf5aaed4b446490e298f7571e69ce15d0dfb148feabe8bf02e58827f/cramjam-2.9.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ac48b978aa0675f62b642750e798c394a64d25ce852e4e541f69bef9a564c2f0", size = 2100860 }, +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/14/12/34bf6e840a79130dfd0da7badfb6f7810b8fcfd60e75b0539372667b41b6/cramjam-2.11.0.tar.gz", hash = "sha256:5c82500ed91605c2d9781380b378397012e25127e89d64f460fea6aeac4389b4", size = 99100, upload-time = "2025-07-27T21:25:07.559Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/d3/20d0402e4e983b66603117ad3dd3b864a05d7997a830206d3ff9cacef9a2/cramjam-2.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d0859c65775e8ebf2cbc084bfd51bd0ffda10266da6f9306451123b89f8e5a63", size = 3558999, upload-time = "2025-07-27T21:21:34.105Z" }, + { url = "https://files.pythonhosted.org/packages/f5/a8/a6e2744288938ccd320a5c6f6f3653faa790f933f5edd088c6e5782a2354/cramjam-2.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:1d77b9b0aca02a3f6eeeff27fcd315ca5972616c0919ee38e522cce257bcd349", size = 1861558, upload-time = "2025-07-27T21:21:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/96/29/7961e09a849eea7d8302e7baa6f829dd3ef3faf199cb25ed29b318ae799b/cramjam-2.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66425bc25b5481359b12a6719b6e7c90ffe76d85d0691f1da7df304bfb8ce45c", size = 1699431, upload-time = "2025-07-27T21:21:38.396Z" }, + { url = "https://files.pythonhosted.org/packages/7a/60/6665e52f01a8919bf37c43dcf0e03b6dd3866f5c4e95440b357d508ee14e/cramjam-2.11.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bd748d3407ec63e049b3aea1595e218814fccab329b7fb10bb51120a30e9fb7e", size = 2025262, upload-time = "2025-07-27T21:21:40.417Z" }, + { url = "https://files.pythonhosted.org/packages/d7/80/79bd84dbeb109e2c6efb74e661b7bd4c3ba393208ebcf69e2ae9454ae80c/cramjam-2.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6d9a23a35b3a105c42a8de60fc2e80281ae6e758f05a3baea0b68eb1ddcb679", size = 1766177, upload-time = "2025-07-27T21:21:42.224Z" }, + { url = "https://files.pythonhosted.org/packages/28/ef/b43280767ebcde022ba31f1e9902137655a956ae30e920d75630fa67e36e/cramjam-2.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:40a75b95e05e38a2a055b2446f09994ce1139151721659315151d4ad6289bbff", size = 1854031, upload-time = "2025-07-27T21:21:43.651Z" }, + { url = "https://files.pythonhosted.org/packages/60/1c/79d522757c494dfd9e9b208b0604cc7e97b481483cc477144f5705a06ab7/cramjam-2.11.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5d042c376d2025300da37d65192d06a457918b63b31140f697f85fd8e310b29", size = 2035812, upload-time = "2025-07-27T21:21:45.473Z" }, + { url = "https://files.pythonhosted.org/packages/c8/70/3bf0670380069b3abd4c6b53f61d3148f4e08935569c08efbeaf7550e87d/cramjam-2.11.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb148b35ab20c75b19a06c27f05732e2a321adbd86fadc93f9466dbd7b1154a7", size = 2067661, upload-time = "2025-07-27T21:21:47.901Z" }, + { url = "https://files.pythonhosted.org/packages/db/7e/4f6ca98a4b474348e965a529b359184785d1119ab7c4c9ec1280b8bea50a/cramjam-2.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee47c220f0f5179ddc923ab91fc9e282c27b29fabc60c433dfe06f08084f798", size = 1981523, upload-time = "2025-07-27T21:21:49.704Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6c/b241511c7ffd5f1da29641429bb0e19b5fbcffafde5ba1bbcbf9394ea456/cramjam-2.11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0cf1b5a81b21ea175c976c3ab09e00494258f4b49b7995efc86060cced3f0b2e", size = 2034251, upload-time = "2025-07-27T21:21:51.252Z" }, + { url = "https://files.pythonhosted.org/packages/14/5c/4ef926c8c3c1bf6da96f9c53450ff334cdb6d0fc1efced0aea97e2090803/cramjam-2.11.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:360c00338ecf48921492455007f904be607fc7818de3d681acbcc542aae2fb36", size = 2155322, upload-time = "2025-07-27T21:21:53.348Z" }, + { url = "https://files.pythonhosted.org/packages/be/fb/eb2aef7fb2730e56c5a2c9000817ee8fb4a95c92f19cc6e441afed42ec29/cramjam-2.11.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f31fcc0d30dc3f3e94ea6b4d8e1a855071757c6abf6a7b1e284050ab7d4c299c", size = 2169094, upload-time = "2025-07-27T21:21:55.187Z" }, + { url = "https://files.pythonhosted.org/packages/3b/80/925a5c668dcee1c6f61775067185c5dc9a63c766d5393e5c60d2af4217a7/cramjam-2.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:033be66fdceb3d63b2c99b257a98380c4ec22c9e4dca54a2bfec3718cd24e184", size = 2159089, upload-time = "2025-07-27T21:21:57.118Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ac/b2819640eef0592a6de7ca832c0d23c69bd1620f765ce88b60dbc8da9ba2/cramjam-2.11.0-cp310-cp310-win32.whl", hash = "sha256:1c6cea67f6000b81f6bd27d14c8a6f62d00336ca7252fd03ee16f6b70eb5c0d2", size = 1605046, upload-time = "2025-07-27T21:21:58.617Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f4/06af04727b9556721049e2127656d727306d275c518e3d97f9ed4cffd0d8/cramjam-2.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:98aa4a351b047b0f7f9e971585982065028adc2c162c5c23c5d5734c5ccc1077", size = 1710647, upload-time = "2025-07-27T21:22:00.279Z" }, + { url = "https://files.pythonhosted.org/packages/d0/89/8001f6a9b6b6e9fa69bec5319789083475d6f26d52aaea209d3ebf939284/cramjam-2.11.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:04cfa39118570e70e920a9b75c733299784b6d269733dbc791d9aaed6edd2615", size = 3559272, upload-time = "2025-07-27T21:22:01.988Z" }, + { url = "https://files.pythonhosted.org/packages/0b/f3/001d00070ca92e5fbe6aacc768e455568b0cde46b0eb944561a4ea132300/cramjam-2.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:66a18f68506290349a256375d7aa2f645b9f7993c10fc4cc211db214e4e61d2b", size = 1861743, upload-time = "2025-07-27T21:22:03.754Z" }, + { url = "https://files.pythonhosted.org/packages/c9/35/041a3af01bf3f6158f120070f798546d4383b962b63c35cd91dcbf193e17/cramjam-2.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:50e7d65533857736cd56f6509cf2c4866f28ad84dd15b5bdbf2f8a81e77fa28a", size = 1699631, upload-time = "2025-07-27T21:22:05.192Z" }, + { url = "https://files.pythonhosted.org/packages/17/eb/5358b238808abebd0c949c42635c3751204ca7cf82b29b984abe9f5e33c8/cramjam-2.11.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1f71989668458fc327ac15396db28d92df22f8024bb12963929798b2729d2df5", size = 2025603, upload-time = "2025-07-27T21:22:06.726Z" }, + { url = "https://files.pythonhosted.org/packages/0e/79/19dba7c03a27408d8d11b5a7a4a7908459cfd4e6f375b73264dc66517bf6/cramjam-2.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee77ac543f1e2b22af1e8be3ae589f729491b6090582340aacd77d1d757d9569", size = 1766283, upload-time = "2025-07-27T21:22:08.568Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ad/40e4b3408501d886d082db465c33971655fe82573c535428e52ab905f4d0/cramjam-2.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad52784120e7e4d8a0b5b0517d185b8bf7f74f5e17272857ddc8951a628d9be1", size = 1854407, upload-time = "2025-07-27T21:22:10.518Z" }, + { url = "https://files.pythonhosted.org/packages/36/6e/c1b60ceb6d7ea6ff8b0bf197520aefe23f878bf2bfb0de65f2b0c2f82cd1/cramjam-2.11.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b86f8e6d9c1b3f9a75b2af870c93ceee0f1b827cd2507387540e053b35d7459", size = 2035793, upload-time = "2025-07-27T21:22:12.504Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ad/32a8d5f4b1e3717787945ec6d71bd1c6e6bccba4b7e903fc0d9d4e4b08c3/cramjam-2.11.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:320d61938950d95da2371b46c406ec433e7955fae9f396c8e1bf148ffc187d11", size = 2067499, upload-time = "2025-07-27T21:22:14.067Z" }, + { url = "https://files.pythonhosted.org/packages/ff/cd/3b5a662736ea62ff7fa4c4a10a85e050bfdaad375cc53dc80427e8afe41c/cramjam-2.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41eafc8c1653a35a5c7e75ad48138f9f60085cc05cd99d592e5298552d944e9f", size = 1981853, upload-time = "2025-07-27T21:22:15.908Z" }, + { url = "https://files.pythonhosted.org/packages/26/8e/1dbcfaaa7a702ee82ee683ec3a81656934dd7e04a7bc4ee854033686f98a/cramjam-2.11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03a7316c6bf763dfa34279335b27702321da44c455a64de58112968c0818ec4a", size = 2034514, upload-time = "2025-07-27T21:22:17.352Z" }, + { url = "https://files.pythonhosted.org/packages/50/62/f11709bfdce74af79a88b410dcb76dedc97612166e759136931bf63cfd7b/cramjam-2.11.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:244c2ed8bd7ccbb294a2abe7ca6498db7e89d7eb5e744691dc511a7dc82e65ca", size = 2155343, upload-time = "2025-07-27T21:22:18.854Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6d/3b98b61841a5376d9a9b8468ae58753a8e6cf22be9534a0fa5af4d8621cc/cramjam-2.11.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:405f8790bad36ce0b4bbdb964ad51507bfc7942c78447f25cb828b870a1d86a0", size = 2169367, upload-time = "2025-07-27T21:22:20.389Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/bd5db5c49dbebc8b002f1c4983101b28d2e7fc9419753db1c31ec22b03ef/cramjam-2.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b1b751a5411032b08fb3ac556160229ca01c6bbe4757bb3a9a40b951ebaac23", size = 2159334, upload-time = "2025-07-27T21:22:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/34/32/203c57acdb6eea727e7078b2219984e64ed4ad043c996ed56321301ba167/cramjam-2.11.0-cp311-cp311-win32.whl", hash = "sha256:5251585608778b9ac8effed544933df7ad85b4ba21ee9738b551f17798b215ac", size = 1605313, upload-time = "2025-07-27T21:22:24.126Z" }, + { url = "https://files.pythonhosted.org/packages/a9/bd/102d6deb87a8524ac11cddcd31a7612b8f20bf9b473c3c645045e3b957c7/cramjam-2.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:dca88bc8b68ce6d35dafd8c4d5d59a238a56c43fa02b74c2ce5f9dfb0d1ccb46", size = 1710991, upload-time = "2025-07-27T21:22:25.661Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0d/7c84c913a5fae85b773a9dcf8874390f9d68ba0fcc6630efa7ff1541b950/cramjam-2.11.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:dba5c14b8b4f73ea1e65720f5a3fe4280c1d27761238378be8274135c60bbc6e", size = 3553368, upload-time = "2025-07-27T21:22:27.162Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cc/4f6d185d8a744776f53035e72831ff8eefc2354f46ab836f4bd3c4f6c138/cramjam-2.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:11eb40722b3fcf3e6890fba46c711bf60f8dc26360a24876c85e52d76c33b25b", size = 1860014, upload-time = "2025-07-27T21:22:28.738Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a8/626c76263085c6d5ded0e71823b411e9522bfc93ba6cc59855a5869296e7/cramjam-2.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aeb26e2898994b6e8319f19a4d37c481512acdcc6d30e1b5ecc9d8ec57e835cb", size = 1693512, upload-time = "2025-07-27T21:22:30.999Z" }, + { url = "https://files.pythonhosted.org/packages/e9/52/0851a16a62447532e30ba95a80e638926fdea869a34b4b5b9d0a020083ba/cramjam-2.11.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f8d82081ed7d8fe52c982bd1f06e4c7631a73fe1fb6d4b3b3f2404f87dc40fe", size = 2025285, upload-time = "2025-07-27T21:22:32.954Z" }, + { url = "https://files.pythonhosted.org/packages/98/76/122e444f59dbc216451d8e3d8282c9665dc79eaf822f5f1470066be1b695/cramjam-2.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:092a3ec26e0a679305018380e4f652eae1b6dfe3fc3b154ee76aa6b92221a17c", size = 1761327, upload-time = "2025-07-27T21:22:34.484Z" }, + { url = "https://files.pythonhosted.org/packages/a3/bc/3a0189aef1af2b29632c039c19a7a1b752bc21a4053582a5464183a0ad3d/cramjam-2.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:529d6d667c65fd105d10bd83d1cd3f9869f8fd6c66efac9415c1812281196a92", size = 1854075, upload-time = "2025-07-27T21:22:36.157Z" }, + { url = "https://files.pythonhosted.org/packages/2e/80/8a6343b13778ce52d94bb8d5365a30c3aa951276b1857201fe79d7e2ad25/cramjam-2.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:555eb9c90c450e0f76e27d9ff064e64a8b8c6478ab1a5594c91b7bc5c82fd9f0", size = 2032710, upload-time = "2025-07-27T21:22:38.17Z" }, + { url = "https://files.pythonhosted.org/packages/df/6b/cd1778a207c29eda10791e3dfa018b588001928086e179fc71254793c625/cramjam-2.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5edf4c9e32493035b514cf2ba0c969d81ccb31de63bd05490cc8bfe3b431674e", size = 2068353, upload-time = "2025-07-27T21:22:39.615Z" }, + { url = "https://files.pythonhosted.org/packages/dc/f0/5c2a5cd5711032f3b191ca50cb786c17689b4a9255f9f768866e6c9f04d9/cramjam-2.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa2fe41f48c4d58d923803383b0737f048918b5a0d10390de9628bb6272b107", size = 1978104, upload-time = "2025-07-27T21:22:41.106Z" }, + { url = "https://files.pythonhosted.org/packages/f9/8b/b363a5fb2c3347504fe9a64f8d0f1e276844f0e532aa7162c061cd1ffee4/cramjam-2.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9ca14cf1cabdb0b77d606db1bb9e9ca593b1dbd421fcaf251ec9a5431ec449f3", size = 2030779, upload-time = "2025-07-27T21:22:42.969Z" }, + { url = "https://files.pythonhosted.org/packages/78/7b/d83dad46adb6c988a74361f81ad9c5c22642be53ad88616a19baedd06243/cramjam-2.11.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:309e95bf898829476bccf4fd2c358ec00e7ff73a12f95a3cdeeba4bb1d3683d5", size = 2155297, upload-time = "2025-07-27T21:22:44.6Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/60d9be4cb33d8740a4aa94c7513f2ef3c4eba4fd13536f086facbafade71/cramjam-2.11.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:86dca35d2f15ef22922411496c220f3c9e315d5512f316fe417461971cc1648d", size = 2169255, upload-time = "2025-07-27T21:22:46.534Z" }, + { url = "https://files.pythonhosted.org/packages/11/b0/4a595f01a243aec8ad272b160b161c44351190c35d98d7787919d962e9e5/cramjam-2.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:193c6488bd2f514cbc0bef5c18fad61a5f9c8d059dd56edf773b3b37f0e85496", size = 2155651, upload-time = "2025-07-27T21:22:48.46Z" }, + { url = "https://files.pythonhosted.org/packages/38/47/7776659aaa677046b77f527106e53ddd47373416d8fcdb1e1a881ec5dc06/cramjam-2.11.0-cp312-cp312-win32.whl", hash = "sha256:514e2c008a8b4fa823122ca3ecab896eac41d9aa0f5fc881bd6264486c204e32", size = 1603568, upload-time = "2025-07-27T21:22:50.084Z" }, + { url = "https://files.pythonhosted.org/packages/75/b1/d53002729cfd94c5844ddfaf1233c86d29f2dbfc1b764a6562c41c044199/cramjam-2.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:53fed080476d5f6ad7505883ec5d1ec28ba36c2273db3b3e92d7224fe5e463db", size = 1709287, upload-time = "2025-07-27T21:22:51.534Z" }, + { url = "https://files.pythonhosted.org/packages/0a/8b/406c5dc0f8e82385519d8c299c40fd6a56d97eca3fcd6f5da8dad48de75b/cramjam-2.11.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2c289729cc1c04e88bafa48b51082fb462b0a57dbc96494eab2be9b14dca62af", size = 3553330, upload-time = "2025-07-27T21:22:53.124Z" }, + { url = "https://files.pythonhosted.org/packages/00/ad/4186884083d6e4125b285903e17841827ab0d6d0cffc86216d27ed91e91d/cramjam-2.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:045201ee17147e36cf43d8ae2fa4b4836944ac672df5874579b81cf6d40f1a1f", size = 1859756, upload-time = "2025-07-27T21:22:54.821Z" }, + { url = "https://files.pythonhosted.org/packages/54/01/91b485cf76a7efef638151e8a7d35784dae2c4ff221b1aec2c083e4b106d/cramjam-2.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:619cd195d74c9e1d2a3ad78d63451d35379c84bd851aec552811e30842e1c67a", size = 1693609, upload-time = "2025-07-27T21:22:56.331Z" }, + { url = "https://files.pythonhosted.org/packages/cd/84/d0c80d279b2976870fc7d10f15dcb90a3c10c06566c6964b37c152694974/cramjam-2.11.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6eb3ae5ab72edb2ed68bdc0f5710f0a6cad7fd778a610ec2c31ee15e32d3921e", size = 2024912, upload-time = "2025-07-27T21:22:57.915Z" }, + { url = "https://files.pythonhosted.org/packages/d6/70/88f2a5cb904281ed5d3c111b8f7d5366639817a5470f059bcd26833fc870/cramjam-2.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7da3f4b19e3078f9635f132d31b0a8196accb2576e3213ddd7a77f93317c20", size = 1760715, upload-time = "2025-07-27T21:22:59.528Z" }, + { url = "https://files.pythonhosted.org/packages/b2/06/cf5b02081132537d28964fb385fcef9ed9f8a017dd7d8c59d317e53ba50d/cramjam-2.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57286b289cd557ac76c24479d8ecfb6c3d5b854cce54ccc7671f9a2f5e2a2708", size = 1853782, upload-time = "2025-07-27T21:23:01.07Z" }, + { url = "https://files.pythonhosted.org/packages/57/27/63525087ed40a53d1867021b9c4858b80cc86274ffe7225deed067d88d92/cramjam-2.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28952fbbf8b32c0cb7fa4be9bcccfca734bf0d0989f4b509dc7f2f70ba79ae06", size = 2032354, upload-time = "2025-07-27T21:23:03.021Z" }, + { url = "https://files.pythonhosted.org/packages/c3/ef/dbba082c6ebfb6410da4dd39a64e654d7194fcfd4567f85991a83fa4ec32/cramjam-2.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78ed2e4099812a438b545dfbca1928ec825e743cd253bc820372d6ef8c3adff4", size = 2068007, upload-time = "2025-07-27T21:23:04.526Z" }, + { url = "https://files.pythonhosted.org/packages/35/ce/d902b9358a46a086938feae83b2251720e030f06e46006f4c1fc0ac9da20/cramjam-2.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9aecd5c3845d415bd6c9957c93de8d93097e269137c2ecb0e5a5256374bdc8", size = 1977485, upload-time = "2025-07-27T21:23:06.058Z" }, + { url = "https://files.pythonhosted.org/packages/e8/03/982f54553244b0afcbdb2ad2065d460f0ab05a72a96896a969a1ca136a1e/cramjam-2.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:362fcf4d6f5e1242a4540812455f5a594949190f6fbc04f2ffbfd7ae0266d788", size = 2030447, upload-time = "2025-07-27T21:23:07.679Z" }, + { url = "https://files.pythonhosted.org/packages/74/5f/748e54cdb665ec098ec519e23caacc65fc5ae58718183b071e33fc1c45b4/cramjam-2.11.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:13240b3dea41b1174456cb9426843b085dc1a2bdcecd9ee2d8f65ac5703374b0", size = 2154949, upload-time = "2025-07-27T21:23:09.366Z" }, + { url = "https://files.pythonhosted.org/packages/69/81/c4e6cb06ed69db0dc81f9a8b1dc74995ebd4351e7a1877143f7031ff2700/cramjam-2.11.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:c54eed83726269594b9086d827decc7d2015696e31b99bf9b69b12d9063584fe", size = 2168925, upload-time = "2025-07-27T21:23:10.976Z" }, + { url = "https://files.pythonhosted.org/packages/13/5b/966365523ce8290a08e163e3b489626c5adacdff2b3da9da1b0823dfb14e/cramjam-2.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f8195006fdd0fc0a85b19df3d64a3ef8a240e483ae1dfc7ac6a4316019eb5df2", size = 2154950, upload-time = "2025-07-27T21:23:12.514Z" }, + { url = "https://files.pythonhosted.org/packages/3a/7d/7f8eb5c534b72b32c6eb79d74585bfee44a9a5647a14040bb65c31c2572d/cramjam-2.11.0-cp313-cp313-win32.whl", hash = "sha256:ccf30e3fe6d770a803dcdf3bb863fa44ba5dc2664d4610ba2746a3c73599f2e4", size = 1603199, upload-time = "2025-07-27T21:23:14.38Z" }, + { url = "https://files.pythonhosted.org/packages/37/05/47b5e0bf7c41a3b1cdd3b7c2147f880c93226a6bef1f5d85183040cbdece/cramjam-2.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:ee36348a204f0a68b03400f4736224e9f61d1c6a1582d7f875c1ca56f0254268", size = 1708924, upload-time = "2025-07-27T21:23:16.332Z" }, + { url = "https://files.pythonhosted.org/packages/de/07/a1051cdbbe6d723df16d756b97f09da7c1adb69e29695c58f0392bc12515/cramjam-2.11.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7ba5e38c9fbd06f086f4a5a64a1a5b7b417cd3f8fc07a20e5c03651f72f36100", size = 3554141, upload-time = "2025-07-27T21:23:17.938Z" }, + { url = "https://files.pythonhosted.org/packages/74/66/58487d2e16ef3d04f51a7c7f0e69823e806744b4c21101e89da4873074bc/cramjam-2.11.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:b8adeee57b41fe08e4520698a4b0bd3cc76dbd81f99424b806d70a5256a391d3", size = 1860353, upload-time = "2025-07-27T21:23:19.593Z" }, + { url = "https://files.pythonhosted.org/packages/67/b4/67f6254d166ffbcc9d5fa1b56876eaa920c32ebc8e9d3d525b27296b693b/cramjam-2.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b96a74fa03a636c8a7d76f700d50e9a8bc17a516d6a72d28711225d641e30968", size = 1693832, upload-time = "2025-07-27T21:23:21.185Z" }, + { url = "https://files.pythonhosted.org/packages/55/a3/4e0b31c0d454ae70c04684ed7c13d3c67b4c31790c278c1e788cb804fa4a/cramjam-2.11.0-cp314-cp314-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c3811a56fa32e00b377ef79121c0193311fd7501f0fb378f254c7f083cc1fbe0", size = 2027080, upload-time = "2025-07-27T21:23:23.303Z" }, + { url = "https://files.pythonhosted.org/packages/d9/c7/5e8eed361d1d3b8be14f38a54852c5370cc0ceb2c2d543b8ba590c34f080/cramjam-2.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5d927e87461f8a0d448e4ab5eb2bca9f31ca5d8ea86d70c6f470bb5bc666d7e", size = 1761543, upload-time = "2025-07-27T21:23:24.991Z" }, + { url = "https://files.pythonhosted.org/packages/09/0c/06b7f8b0ce9fde89470505116a01fc0b6cb92d406c4fb1e46f168b5d3fa5/cramjam-2.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f1f5c450121430fd89cb5767e0a9728ecc65997768fd4027d069cb0368af62f9", size = 1854636, upload-time = "2025-07-27T21:23:26.987Z" }, + { url = "https://files.pythonhosted.org/packages/6f/c6/6ebc02c9d5acdf4e5f2b1ec6e1252bd5feee25762246798ae823b3347457/cramjam-2.11.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:724aa7490be50235d97f07e2ca10067927c5d7f336b786ddbc868470e822aa25", size = 2032715, upload-time = "2025-07-27T21:23:28.603Z" }, + { url = "https://files.pythonhosted.org/packages/a2/77/a122971c23f5ca4b53e4322c647ac7554626c95978f92d19419315dddd05/cramjam-2.11.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:54c4637122e7cfd7aac5c1d3d4c02364f446d6923ea34cf9d0e8816d6e7a4936", size = 2069039, upload-time = "2025-07-27T21:23:30.319Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/f6121b90b86b9093c066889274d26a1de3f29969d45c2ed1ecbe2033cb78/cramjam-2.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17eb39b1696179fb471eea2de958fa21f40a2cd8bf6b40d428312d5541e19dc4", size = 1979566, upload-time = "2025-07-27T21:23:32.002Z" }, + { url = "https://files.pythonhosted.org/packages/e0/a3/f95bc57fd7f4166ce6da816cfa917fb7df4bb80e669eb459d85586498414/cramjam-2.11.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:36aa5a798aa34e11813a80425a30d8e052d8de4a28f27bfc0368cfc454d1b403", size = 2030905, upload-time = "2025-07-27T21:23:33.696Z" }, + { url = "https://files.pythonhosted.org/packages/fc/52/e429de4e8bc86ee65e090dae0f87f45abd271742c63fb2d03c522ffde28a/cramjam-2.11.0-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:449fca52774dc0199545fbf11f5128933e5a6833946707885cf7be8018017839", size = 2155592, upload-time = "2025-07-27T21:23:35.375Z" }, + { url = "https://files.pythonhosted.org/packages/6c/6c/65a7a0207787ad39ad804af4da7f06a60149de19481d73d270b540657234/cramjam-2.11.0-cp314-cp314-musllinux_1_1_i686.whl", hash = "sha256:d87d37b3d476f4f7623c56a232045d25bd9b988314702ea01bd9b4a94948a778", size = 2170839, upload-time = "2025-07-27T21:23:37.197Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c5/5c5db505ba692bc844246b066e23901d5905a32baf2f33719c620e65887f/cramjam-2.11.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:26cb45c47d71982d76282e303931c6dd4baee1753e5d48f9a89b3a63e690b3a3", size = 2157236, upload-time = "2025-07-27T21:23:38.854Z" }, + { url = "https://files.pythonhosted.org/packages/b0/22/88e6693e60afe98901e5bbe91b8dea193e3aa7f42e2770f9c3339f5c1065/cramjam-2.11.0-cp314-cp314-win32.whl", hash = "sha256:4efe919d443c2fd112fe25fe636a52f9628250c9a50d9bddb0488d8a6c09acc6", size = 1604136, upload-time = "2025-07-27T21:23:40.56Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f8/01618801cd59ccedcc99f0f96d20be67d8cfc3497da9ccaaad6b481781dd/cramjam-2.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:ccec3524ea41b9abd5600e3e27001fd774199dbb4f7b9cb248fcee37d4bda84c", size = 1710272, upload-time = "2025-07-27T21:23:42.236Z" }, + { url = "https://files.pythonhosted.org/packages/40/81/6cdb3ed222d13ae86bda77aafe8d50566e81a1169d49ed195b6263610704/cramjam-2.11.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:966ac9358b23d21ecd895c418c048e806fd254e46d09b1ff0cdad2eba195ea3e", size = 3559671, upload-time = "2025-07-27T21:23:44.504Z" }, + { url = "https://files.pythonhosted.org/packages/cb/43/52b7e54fe5ba1ef0270d9fdc43dabd7971f70ea2d7179be918c997820247/cramjam-2.11.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:387f09d647a0d38dcb4539f8a14281f8eb6bb1d3e023471eb18a5974b2121c86", size = 1867876, upload-time = "2025-07-27T21:23:46.987Z" }, + { url = "https://files.pythonhosted.org/packages/9d/28/30d5b8d10acd30db3193bc562a313bff722888eaa45cfe32aa09389f2b24/cramjam-2.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:665b0d8fbbb1a7f300265b43926457ec78385200133e41fef19d85790fc1e800", size = 1695562, upload-time = "2025-07-27T21:23:48.644Z" }, + { url = "https://files.pythonhosted.org/packages/d9/86/ec806f986e01b896a650655024ea52a13e25c3ac8a3a382f493089483cdc/cramjam-2.11.0-cp314-cp314t-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ca905387c7a371531b9622d93471be4d745ef715f2890c3702479cd4fc85aa51", size = 2025056, upload-time = "2025-07-27T21:23:50.404Z" }, + { url = "https://files.pythonhosted.org/packages/09/43/c2c17586b90848d29d63181f7d14b8bd3a7d00975ad46e3edf2af8af7e1f/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1aa56aef2c8af55a21ed39040a94a12b53fb23beea290f94d19a76027e2ffb", size = 1764084, upload-time = "2025-07-27T21:23:52.265Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a9/68bc334fadb434a61df10071dc8606702aa4f5b6cdb2df62474fc21d2845/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5db59c1cdfaa2ab85cc988e602d6919495f735ca8a5fd7603608eb1e23c26d5", size = 1854859, upload-time = "2025-07-27T21:23:54.085Z" }, + { url = "https://files.pythonhosted.org/packages/5b/4e/b48e67835b5811ec5e9cb2e2bcba9c3fd76dab3e732569fe801b542c6ca9/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1f893014f00fe5e89a660a032e813bf9f6d91de74cd1490cdb13b2b59d0c9a3", size = 2035970, upload-time = "2025-07-27T21:23:55.758Z" }, + { url = "https://files.pythonhosted.org/packages/c4/70/d2ac33d572b4d90f7f0f2c8a1d60fb48f06b128fdc2c05f9b49891bb0279/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c26a1eb487947010f5de24943bd7c422dad955b2b0f8650762539778c380ca89", size = 2069320, upload-time = "2025-07-27T21:23:57.494Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4c/85cec77af4a74308ba5fca8e296c4e2f80ec465c537afc7ab1e0ca2f9a00/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d5c8bfb438d94e7b892d1426da5fc4b4a5370cc360df9b8d9d77c33b896c37e", size = 1982668, upload-time = "2025-07-27T21:23:59.126Z" }, + { url = "https://files.pythonhosted.org/packages/55/45/938546d1629e008cc3138df7c424ef892719b1796ff408a2ab8550032e5e/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:cb1fb8c9337ab0da25a01c05d69a0463209c347f16512ac43be5986f3d1ebaf4", size = 2034028, upload-time = "2025-07-27T21:24:00.865Z" }, + { url = "https://files.pythonhosted.org/packages/01/76/b5a53e20505555f1640e66dcf70394bcf51a1a3a072aa18ea35135a0f9ed/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:1f6449f6de52dde3e2f1038284910c8765a397a25e2d05083870f3f5e7fc682c", size = 2155513, upload-time = "2025-07-27T21:24:02.92Z" }, + { url = "https://files.pythonhosted.org/packages/84/12/8d3f6ceefae81bbe45a347fdfa2219d9f3ac75ebc304f92cd5fcb4fbddc5/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_i686.whl", hash = "sha256:382dec4f996be48ed9c6958d4e30c2b89435d7c2c4dbf32480b3b8886293dd65", size = 2170035, upload-time = "2025-07-27T21:24:04.558Z" }, + { url = "https://files.pythonhosted.org/packages/4b/85/3be6f0a1398f976070672be64f61895f8839857618a2d8cc0d3ab529d3dc/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:d388bd5723732c3afe1dd1d181e4213cc4e1be210b080572e7d5749f6e955656", size = 2160229, upload-time = "2025-07-27T21:24:06.729Z" }, + { url = "https://files.pythonhosted.org/packages/57/5e/66cfc3635511b20014bbb3f2ecf0095efb3049e9e96a4a9e478e4f3d7b78/cramjam-2.11.0-cp314-cp314t-win32.whl", hash = "sha256:0a70ff17f8e1d13f322df616505550f0f4c39eda62290acb56f069d4857037c8", size = 1610267, upload-time = "2025-07-27T21:24:08.428Z" }, + { url = "https://files.pythonhosted.org/packages/ce/c6/c71e82e041c95ffe6a92ac707785500aa2a515a4339c2c7dd67e3c449249/cramjam-2.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188", size = 1713108, upload-time = "2025-07-27T21:24:10.147Z" }, + { url = "https://files.pythonhosted.org/packages/8c/33/3d7a7fbfb313614d59ae2e512b9dacfc22efb07c20e4af7deb73d3409f7b/cramjam-2.11.0-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2581e82dca742b55d8b1d7f33892394c06b057a74f2853ffcb0802dcddcbf694", size = 3559843, upload-time = "2025-07-27T21:24:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b0/ccf09697df7fcc750c4913dc4bf3fb91e5b778dda65fb9fa55dde61c03dc/cramjam-2.11.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a9994a42cd12f07ece04eff94dbf6e127b3986f7af9b26db1eb4545c477a6604", size = 1862081, upload-time = "2025-07-27T21:24:13.8Z" }, + { url = "https://files.pythonhosted.org/packages/41/55/d36255f1a9004a3352469143d2b8a5b769e0eb4e484a8192da41ad67e893/cramjam-2.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4963dac24213690183110d6b41125fdc4af871a5a213589d6c6606d49e1b949", size = 1699970, upload-time = "2025-07-27T21:24:15.547Z" }, + { url = "https://files.pythonhosted.org/packages/35/52/722a2efbe104903648185411f9c634e5678035476bc556001d6ef811e191/cramjam-2.11.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9af16f0b07d851b968c54e52d19430d820bb47c26d10a09cfb5c7127de26773", size = 2025715, upload-time = "2025-07-27T21:24:17.327Z" }, + { url = "https://files.pythonhosted.org/packages/0a/60/75084f30277d5f2481d20a544654894a32528f98f4415c1bd467823ab5b2/cramjam-2.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e2400c09ba620e2ca91a903dbe907d75f6a1994d8337e9f3026778daa92b08d", size = 1766999, upload-time = "2025-07-27T21:24:19.163Z" }, + { url = "https://files.pythonhosted.org/packages/89/5c/2663bdfcea6ab06fcac97883b5b574a12236c5d9f70691cc05dd49cb10fb/cramjam-2.11.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b820004db8b22715cee2ef154d4b47b3d76c4677ff217c587dd46f694a3052f9", size = 1854352, upload-time = "2025-07-27T21:24:20.953Z" }, + { url = "https://files.pythonhosted.org/packages/b4/df/1db5b57ccf77e923687b2061766e69c2cbdaf41641204207dbf55ef7ebe9/cramjam-2.11.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:261e9200942189d8201a005ffa1e29339479364b5b0013ab0758b03229d9ac67", size = 2036219, upload-time = "2025-07-27T21:24:23.029Z" }, + { url = "https://files.pythonhosted.org/packages/f7/28/fa3b017668a3264068c893e57a6b923dfd8fa851a1c821c4cc1c95cd47a6/cramjam-2.11.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24c61f1fad56ca68aee53bf67b6a84cd762a2c71ee4b71064378547c2411ae6", size = 2077245, upload-time = "2025-07-27T21:24:25.127Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1d/6f6018ee81acec6c4ef6cda6bd0770959992caf2f1c41e7944a135a53eca/cramjam-2.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab86d22f69a21961f35d1a1b02278b5bb9a95c5f5b4722c6904bca343c8d219f", size = 1982235, upload-time = "2025-07-27T21:24:26.851Z" }, + { url = "https://files.pythonhosted.org/packages/31/b4/c38f6077d8ec7c9208d23d4f7f19a618f5b4940170c9deba5d3bdc722eb6/cramjam-2.11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a88bc9b191422cd5b22a1521b28607008590628b6b2a8a7db5c54ec04dc82fa1", size = 2034629, upload-time = "2025-07-27T21:24:28.694Z" }, + { url = "https://files.pythonhosted.org/packages/66/3b/3f46a349b1a7a67e2bda10e99403e9163c87c95e34399cc69f4f86a2461a/cramjam-2.11.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:7855bc4df5ed5f7fb1c98ea3fd98292e9acd3c097b1b21d596a69e1e60455400", size = 2155552, upload-time = "2025-07-27T21:24:30.572Z" }, + { url = "https://files.pythonhosted.org/packages/ed/86/b431a51162d4c8f33b28bdcca047382e1038757d43625e65c8d29ed6c31f/cramjam-2.11.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:19eb43e21db9dc42613599703c1a8e40b0170514a313f11f4c8be380425a1019", size = 2169651, upload-time = "2025-07-27T21:24:32.331Z" }, + { url = "https://files.pythonhosted.org/packages/6e/d5/9aa69784da58b6bd3f5abcaad2eb76ad2a89efde7929821bad17355fd8da/cramjam-2.11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cec977d673ad596bae6bdfc0091ee386cef05b515b23f2ce52f9fadd0156186a", size = 2159740, upload-time = "2025-07-27T21:24:34.108Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e1/75706936eb81605a939e15b8b7a1241b35e805ce76a64838b4586c440f61/cramjam-2.11.0-cp39-cp39-win32.whl", hash = "sha256:dcc3b15b97f3054964b47e2a5fcfb4f5ff569e9af0a7af19f1d4c5f4231bbf3b", size = 1605449, upload-time = "2025-07-27T21:24:36.538Z" }, + { url = "https://files.pythonhosted.org/packages/37/6b/ae7626994c7285bfc0ffa0d9929c3c16f2d0aea5b9e151dad82fd0616762/cramjam-2.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:5eb0603d8f8019451fc00e1daf4022dfc9df59c16d2e68f925c77ac94555493b", size = 1710860, upload-time = "2025-07-27T21:24:38.243Z" }, + { url = "https://files.pythonhosted.org/packages/bf/8f/82e35ec3c5387f1864f46b3c24bce89a07af8bb3ef242ae47281db2c1848/cramjam-2.11.0-pp310-pypy310_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:37bed927abc4a7ae2d2669baa3675e21904d8a038ed8e4313326ea7b3be62b2b", size = 3573104, upload-time = "2025-07-27T21:24:40.069Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4e/0c821918080a32ba1e52c040e12dd02dada67728f07305c5f778b808a807/cramjam-2.11.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:50e4a58635fa8c6897d84847d6e065eb69f92811670fc5e9f2d9e3b6279a02b6", size = 1873441, upload-time = "2025-07-27T21:24:42.333Z" }, + { url = "https://files.pythonhosted.org/packages/a8/fd/848d077bf6abc4ce84273d8e3f3a70d61a2240519a339462f699d8acf829/cramjam-2.11.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d1ba626dd5f81f7f09bbf59f70b534e2b75e0d6582b056b7bd31b397f1c13e9", size = 1702589, upload-time = "2025-07-27T21:24:44.305Z" }, + { url = "https://files.pythonhosted.org/packages/9d/1c/899818999bbdb59c601756b413e87d37fd65875d1315346c10e367bb3505/cramjam-2.11.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c71e140d5eb3145d61d59d0be0bf72f07cc4cf4b32cb136b09f712a3b1040f5f", size = 1773646, upload-time = "2025-07-27T21:24:46.495Z" }, + { url = "https://files.pythonhosted.org/packages/5f/26/c2813c5422c43b3dcd8b6645bc359f08870737c44325ee4accc18f24eee0/cramjam-2.11.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a6ed7926a5cca28edebad7d0fedd2ad492710ae3524d25fc59a2b20546d9ce1", size = 1994179, upload-time = "2025-07-27T21:24:49.131Z" }, + { url = "https://files.pythonhosted.org/packages/2e/4f/af984f8d7f963f0301812cdd620ddcfd8276461ed7a786c0f89e82b14739/cramjam-2.11.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5eb4ed3cea945b164b0513fd491884993acac2153a27b93a84019c522e8eda82", size = 1714790, upload-time = "2025-07-27T21:24:51.045Z" }, + { url = "https://files.pythonhosted.org/packages/81/da/b3301962ccd6fce9fefa1ecd8ea479edaeaa38fadb1f34d5391d2587216a/cramjam-2.11.0-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:52d5db3369f95b27b9f3c14d067acb0b183333613363ed34268c9e04560f997f", size = 3573546, upload-time = "2025-07-27T21:24:52.944Z" }, + { url = "https://files.pythonhosted.org/packages/b6/c2/410ddb8ad4b9dfb129284666293cb6559479645da560f7077dc19d6bee9e/cramjam-2.11.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4820516366d455b549a44d0e2210ee7c4575882dda677564ce79092588321d54", size = 1873654, upload-time = "2025-07-27T21:24:54.958Z" }, + { url = "https://files.pythonhosted.org/packages/d5/99/f68a443c64f7ce7aff5bed369b0aa5b2fac668fa3dfd441837e316e97a1f/cramjam-2.11.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d9e5db525dc0a950a825202f84ee68d89a072479e07da98795a3469df942d301", size = 1702846, upload-time = "2025-07-27T21:24:57.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/02/0ff358ab773def1ee3383587906c453d289953171e9c92db84fdd01bf172/cramjam-2.11.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62ab4971199b2270005359cdc379bc5736071dc7c9a228581c5122d9ffaac50c", size = 1773683, upload-time = "2025-07-27T21:24:59.28Z" }, + { url = "https://files.pythonhosted.org/packages/e9/31/3298e15f87c9cf2aabdbdd90b153d8644cf989cb42a45d68a1b71e1f7aaf/cramjam-2.11.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24758375cc5414d3035ca967ebb800e8f24604ececcba3c67d6f0218201ebf2d", size = 1994136, upload-time = "2025-07-27T21:25:01.565Z" }, + { url = "https://files.pythonhosted.org/packages/c7/90/20d1747255f1ee69a412e319da51ea594c18cca195e7a4d4c713f045eff5/cramjam-2.11.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6c2eea545fef1065c7dd4eda991666fd9c783fbc1d226592ccca8d8891c02f23", size = 1714982, upload-time = "2025-07-27T21:25:05.79Z" }, ] [[package]] name = "cryptography" -version = "44.0.0" +version = "45.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, - { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, - { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, - { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, - { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, - { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, - { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, - { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, - { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, - { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, - { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, - { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, - { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, - { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, - { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, - { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, - { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, - { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, - { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, - { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, - { url = "https://files.pythonhosted.org/packages/77/d4/fea74422326388bbac0c37b7489a0fcb1681a698c3b875959430ba550daa/cryptography-44.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37d76e6863da3774cd9db5b409a9ecfd2c71c981c38788d3fcfaf177f447b731", size = 3338857 }, - { url = "https://files.pythonhosted.org/packages/1a/aa/ba8a7467c206cb7b62f09b4168da541b5109838627f582843bbbe0235e8e/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f677e1268c4e23420c3acade68fac427fffcb8d19d7df95ed7ad17cdef8404f4", size = 3850615 }, - { url = "https://files.pythonhosted.org/packages/89/fa/b160e10a64cc395d090105be14f399b94e617c879efd401188ce0fea39ee/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f5e7cb1e5e56ca0933b4873c0220a78b773b24d40d186b6738080b73d3d0a756", size = 4081622 }, - { url = "https://files.pythonhosted.org/packages/47/8f/20ff0656bb0cf7af26ec1d01f780c5cfbaa7666736063378c5f48558b515/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:8b3e6eae66cf54701ee7d9c83c30ac0a1e3fa17be486033000f2a73a12ab507c", size = 3867546 }, - { url = "https://files.pythonhosted.org/packages/38/d9/28edf32ee2fcdca587146bcde90102a7319b2f2c690edfa627e46d586050/cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:be4ce505894d15d5c5037167ffb7f0ae90b7be6f2a98f9a5c3442395501c32fa", size = 4090937 }, - { url = "https://files.pythonhosted.org/packages/cc/9d/37e5da7519de7b0b070a3fedd4230fe76d50d2a21403e0f2153d70ac4163/cryptography-44.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:62901fb618f74d7d81bf408c8719e9ec14d863086efe4185afd07c352aee1d2c", size = 3128774 }, + { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*' and platform_python_implementation != 'PyPy'" }, + { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*' and platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/1e/49527ac611af559665f71cbb8f92b332b5ec9c6fbc4e88b0f8e92f5e85df/cryptography-45.0.5.tar.gz", hash = "sha256:72e76caa004ab63accdf26023fccd1d087f6d90ec6048ff33ad0445abf7f605a", size = 744903, upload-time = "2025-07-02T13:06:25.941Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/fb/09e28bc0c46d2c547085e60897fea96310574c70fb21cd58a730a45f3403/cryptography-45.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:101ee65078f6dd3e5a028d4f19c07ffa4dd22cce6a20eaa160f8b5219911e7d8", size = 7043092, upload-time = "2025-07-02T13:05:01.514Z" }, + { url = "https://files.pythonhosted.org/packages/b1/05/2194432935e29b91fb649f6149c1a4f9e6d3d9fc880919f4ad1bcc22641e/cryptography-45.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a264aae5f7fbb089dbc01e0242d3b67dffe3e6292e1f5182122bdf58e65215d", size = 4205926, upload-time = "2025-07-02T13:05:04.741Z" }, + { url = "https://files.pythonhosted.org/packages/07/8b/9ef5da82350175e32de245646b1884fc01124f53eb31164c77f95a08d682/cryptography-45.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e74d30ec9c7cb2f404af331d5b4099a9b322a8a6b25c4632755c8757345baac5", size = 4429235, upload-time = "2025-07-02T13:05:07.084Z" }, + { url = "https://files.pythonhosted.org/packages/7c/e1/c809f398adde1994ee53438912192d92a1d0fc0f2d7582659d9ef4c28b0c/cryptography-45.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3af26738f2db354aafe492fb3869e955b12b2ef2e16908c8b9cb928128d42c57", size = 4209785, upload-time = "2025-07-02T13:05:09.321Z" }, + { url = "https://files.pythonhosted.org/packages/d0/8b/07eb6bd5acff58406c5e806eff34a124936f41a4fb52909ffa4d00815f8c/cryptography-45.0.5-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e6c00130ed423201c5bc5544c23359141660b07999ad82e34e7bb8f882bb78e0", size = 3893050, upload-time = "2025-07-02T13:05:11.069Z" }, + { url = "https://files.pythonhosted.org/packages/ec/ef/3333295ed58d900a13c92806b67e62f27876845a9a908c939f040887cca9/cryptography-45.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:dd420e577921c8c2d31289536c386aaa30140b473835e97f83bc71ea9d2baf2d", size = 4457379, upload-time = "2025-07-02T13:05:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9d/44080674dee514dbb82b21d6fa5d1055368f208304e2ab1828d85c9de8f4/cryptography-45.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:d05a38884db2ba215218745f0781775806bde4f32e07b135348355fe8e4991d9", size = 4209355, upload-time = "2025-07-02T13:05:15.017Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d8/0749f7d39f53f8258e5c18a93131919ac465ee1f9dccaf1b3f420235e0b5/cryptography-45.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ad0caded895a00261a5b4aa9af828baede54638754b51955a0ac75576b831b27", size = 4456087, upload-time = "2025-07-02T13:05:16.945Z" }, + { url = "https://files.pythonhosted.org/packages/09/d7/92acac187387bf08902b0bf0699816f08553927bdd6ba3654da0010289b4/cryptography-45.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9024beb59aca9d31d36fcdc1604dd9bbeed0a55bface9f1908df19178e2f116e", size = 4332873, upload-time = "2025-07-02T13:05:18.743Z" }, + { url = "https://files.pythonhosted.org/packages/03/c2/840e0710da5106a7c3d4153c7215b2736151bba60bf4491bdb421df5056d/cryptography-45.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:91098f02ca81579c85f66df8a588c78f331ca19089763d733e34ad359f474174", size = 4564651, upload-time = "2025-07-02T13:05:21.382Z" }, + { url = "https://files.pythonhosted.org/packages/2e/92/cc723dd6d71e9747a887b94eb3827825c6c24b9e6ce2bb33b847d31d5eaa/cryptography-45.0.5-cp311-abi3-win32.whl", hash = "sha256:926c3ea71a6043921050eaa639137e13dbe7b4ab25800932a8498364fc1abec9", size = 2929050, upload-time = "2025-07-02T13:05:23.39Z" }, + { url = "https://files.pythonhosted.org/packages/1f/10/197da38a5911a48dd5389c043de4aec4b3c94cb836299b01253940788d78/cryptography-45.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:b85980d1e345fe769cfc57c57db2b59cff5464ee0c045d52c0df087e926fbe63", size = 3403224, upload-time = "2025-07-02T13:05:25.202Z" }, + { url = "https://files.pythonhosted.org/packages/fe/2b/160ce8c2765e7a481ce57d55eba1546148583e7b6f85514472b1d151711d/cryptography-45.0.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f3562c2f23c612f2e4a6964a61d942f891d29ee320edb62ff48ffb99f3de9ae8", size = 7017143, upload-time = "2025-07-02T13:05:27.229Z" }, + { url = "https://files.pythonhosted.org/packages/c2/e7/2187be2f871c0221a81f55ee3105d3cf3e273c0a0853651d7011eada0d7e/cryptography-45.0.5-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3fcfbefc4a7f332dece7272a88e410f611e79458fab97b5efe14e54fe476f4fd", size = 4197780, upload-time = "2025-07-02T13:05:29.299Z" }, + { url = "https://files.pythonhosted.org/packages/b9/cf/84210c447c06104e6be9122661159ad4ce7a8190011669afceeaea150524/cryptography-45.0.5-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:460f8c39ba66af7db0545a8c6f2eabcbc5a5528fc1cf6c3fa9a1e44cec33385e", size = 4420091, upload-time = "2025-07-02T13:05:31.221Z" }, + { url = "https://files.pythonhosted.org/packages/3e/6a/cb8b5c8bb82fafffa23aeff8d3a39822593cee6e2f16c5ca5c2ecca344f7/cryptography-45.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9b4cf6318915dccfe218e69bbec417fdd7c7185aa7aab139a2c0beb7468c89f0", size = 4198711, upload-time = "2025-07-02T13:05:33.062Z" }, + { url = "https://files.pythonhosted.org/packages/04/f7/36d2d69df69c94cbb2473871926daf0f01ad8e00fe3986ac3c1e8c4ca4b3/cryptography-45.0.5-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2089cc8f70a6e454601525e5bf2779e665d7865af002a5dec8d14e561002e135", size = 3883299, upload-time = "2025-07-02T13:05:34.94Z" }, + { url = "https://files.pythonhosted.org/packages/82/c7/f0ea40f016de72f81288e9fe8d1f6748036cb5ba6118774317a3ffc6022d/cryptography-45.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0027d566d65a38497bc37e0dd7c2f8ceda73597d2ac9ba93810204f56f52ebc7", size = 4450558, upload-time = "2025-07-02T13:05:37.288Z" }, + { url = "https://files.pythonhosted.org/packages/06/ae/94b504dc1a3cdf642d710407c62e86296f7da9e66f27ab12a1ee6fdf005b/cryptography-45.0.5-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:be97d3a19c16a9be00edf79dca949c8fa7eff621763666a145f9f9535a5d7f42", size = 4198020, upload-time = "2025-07-02T13:05:39.102Z" }, + { url = "https://files.pythonhosted.org/packages/05/2b/aaf0adb845d5dabb43480f18f7ca72e94f92c280aa983ddbd0bcd6ecd037/cryptography-45.0.5-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:7760c1c2e1a7084153a0f68fab76e754083b126a47d0117c9ed15e69e2103492", size = 4449759, upload-time = "2025-07-02T13:05:41.398Z" }, + { url = "https://files.pythonhosted.org/packages/91/e4/f17e02066de63e0100a3a01b56f8f1016973a1d67551beaf585157a86b3f/cryptography-45.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6ff8728d8d890b3dda5765276d1bc6fb099252915a2cd3aff960c4c195745dd0", size = 4319991, upload-time = "2025-07-02T13:05:43.64Z" }, + { url = "https://files.pythonhosted.org/packages/f2/2e/e2dbd629481b499b14516eed933f3276eb3239f7cee2dcfa4ee6b44d4711/cryptography-45.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7259038202a47fdecee7e62e0fd0b0738b6daa335354396c6ddebdbe1206af2a", size = 4554189, upload-time = "2025-07-02T13:05:46.045Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ea/a78a0c38f4c8736287b71c2ea3799d173d5ce778c7d6e3c163a95a05ad2a/cryptography-45.0.5-cp37-abi3-win32.whl", hash = "sha256:1e1da5accc0c750056c556a93c3e9cb828970206c68867712ca5805e46dc806f", size = 2911769, upload-time = "2025-07-02T13:05:48.329Z" }, + { url = "https://files.pythonhosted.org/packages/79/b3/28ac139109d9005ad3f6b6f8976ffede6706a6478e21c889ce36c840918e/cryptography-45.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:90cb0a7bb35959f37e23303b7eed0a32280510030daba3f7fdfbb65defde6a97", size = 3390016, upload-time = "2025-07-02T13:05:50.811Z" }, + { url = "https://files.pythonhosted.org/packages/f8/8b/34394337abe4566848a2bd49b26bcd4b07fd466afd3e8cce4cb79a390869/cryptography-45.0.5-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:206210d03c1193f4e1ff681d22885181d47efa1ab3018766a7b32a7b3d6e6afd", size = 3575762, upload-time = "2025-07-02T13:05:53.166Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5d/a19441c1e89afb0f173ac13178606ca6fab0d3bd3ebc29e9ed1318b507fc/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c648025b6840fe62e57107e0a25f604db740e728bd67da4f6f060f03017d5097", size = 4140906, upload-time = "2025-07-02T13:05:55.914Z" }, + { url = "https://files.pythonhosted.org/packages/4b/db/daceb259982a3c2da4e619f45b5bfdec0e922a23de213b2636e78ef0919b/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b8fa8b0a35a9982a3c60ec79905ba5bb090fc0b9addcfd3dc2dd04267e45f25e", size = 4374411, upload-time = "2025-07-02T13:05:57.814Z" }, + { url = "https://files.pythonhosted.org/packages/6a/35/5d06ad06402fc522c8bf7eab73422d05e789b4e38fe3206a85e3d6966c11/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:14d96584701a887763384f3c47f0ca7c1cce322aa1c31172680eb596b890ec30", size = 4140942, upload-time = "2025-07-02T13:06:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/65/79/020a5413347e44c382ef1f7f7e7a66817cd6273e3e6b5a72d18177b08b2f/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57c816dfbd1659a367831baca4b775b2a5b43c003daf52e9d57e1d30bc2e1b0e", size = 4374079, upload-time = "2025-07-02T13:06:02.043Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c5/c0e07d84a9a2a8a0ed4f865e58f37c71af3eab7d5e094ff1b21f3f3af3bc/cryptography-45.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b9e38e0a83cd51e07f5a48ff9691cae95a79bea28fe4ded168a8e5c6c77e819d", size = 3321362, upload-time = "2025-07-02T13:06:04.463Z" }, + { url = "https://files.pythonhosted.org/packages/c0/71/9bdbcfd58d6ff5084687fe722c58ac718ebedbc98b9f8f93781354e6d286/cryptography-45.0.5-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8c4a6ff8a30e9e3d38ac0539e9a9e02540ab3f827a3394f8852432f6b0ea152e", size = 3587878, upload-time = "2025-07-02T13:06:06.339Z" }, + { url = "https://files.pythonhosted.org/packages/f0/63/83516cfb87f4a8756eaa4203f93b283fda23d210fc14e1e594bd5f20edb6/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bd4c45986472694e5121084c6ebbd112aa919a25e783b87eb95953c9573906d6", size = 4152447, upload-time = "2025-07-02T13:06:08.345Z" }, + { url = "https://files.pythonhosted.org/packages/22/11/d2823d2a5a0bd5802b3565437add16f5c8ce1f0778bf3822f89ad2740a38/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:982518cd64c54fcada9d7e5cf28eabd3ee76bd03ab18e08a48cad7e8b6f31b18", size = 4386778, upload-time = "2025-07-02T13:06:10.263Z" }, + { url = "https://files.pythonhosted.org/packages/5f/38/6bf177ca6bce4fe14704ab3e93627c5b0ca05242261a2e43ef3168472540/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:12e55281d993a793b0e883066f590c1ae1e802e3acb67f8b442e721e475e6463", size = 4151627, upload-time = "2025-07-02T13:06:13.097Z" }, + { url = "https://files.pythonhosted.org/packages/38/6a/69fc67e5266bff68a91bcb81dff8fb0aba4d79a78521a08812048913e16f/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:5aa1e32983d4443e310f726ee4b071ab7569f58eedfdd65e9675484a4eb67bd1", size = 4385593, upload-time = "2025-07-02T13:06:15.689Z" }, + { url = "https://files.pythonhosted.org/packages/f6/34/31a1604c9a9ade0fdab61eb48570e09a796f4d9836121266447b0eaf7feb/cryptography-45.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:e357286c1b76403dd384d938f93c46b2b058ed4dfcdce64a770f0537ed3feb6f", size = 3331106, upload-time = "2025-07-02T13:06:18.058Z" }, ] [[package]] name = "decorator" -version = "5.1.1" +version = "5.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] [[package]] name = "distlib" -version = "0.3.9" +version = "0.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923 } +sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973 }, + { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, ] [[package]] name = "dnspython" version = "2.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197 } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197, upload-time = "2024-10-05T20:14:59.362Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632 }, + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632, upload-time = "2024-10-05T20:14:57.687Z" }, ] [[package]] name = "docutils" version = "0.21.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444, upload-time = "2024-04-23T18:57:18.24Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408, upload-time = "2024-04-23T18:57:14.835Z" }, ] [[package]] name = "eventlet" -version = "0.38.2" +version = "0.40.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "dnspython" }, { name = "greenlet" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a6/4e/f974cc85b8d19b31176e0cca90e1650156f385c9c294a96fc42846ca75e9/eventlet-0.38.2.tar.gz", hash = "sha256:6a46823af1dca7d29cf04c0d680365805435473c3acbffc176765c7f8787edac", size = 561526 } +sdist = { url = "https://files.pythonhosted.org/packages/bf/a3/500893510ad316fc571d116d407ea17d6007a8ecdb0a456badb66eee42ae/eventlet-0.40.2.tar.gz", hash = "sha256:42636c277f761d026905cd0ba0a11edec7600001be401d6ae7e9546559c8d8b0", size = 565548, upload-time = "2025-07-22T14:49:54.317Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/aa/07/00feb2c708d71796e190a3051a0d530a4922bfb6b346aa8302725840698c/eventlet-0.38.2-py3-none-any.whl", hash = "sha256:4a2e3cbc53917c8f39074ccf689501168563d3a4df59e9cddd5e9d3b7f85c599", size = 363192 }, + { url = "https://files.pythonhosted.org/packages/b6/41/2e2d46f31ed22c1c147936145badb86e0e28ba7fe7d7a54aa69849a93a52/eventlet-0.40.2-py3-none-any.whl", hash = "sha256:590c67b982015bc6b753a5303f3ec7356bc7890a39efd65176179f0113f5d35e", size = 364228, upload-time = "2025-07-22T14:49:52.082Z" }, ] [[package]] name = "exceptiongroup" -version = "1.2.2" +version = "1.3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 }, + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, ] [[package]] name = "filelock" -version = "3.16.1" +version = "3.18.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037 } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163 }, + { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" }, ] [[package]] name = "furo" -version = "2024.8.6" +version = "2025.7.19" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "accessible-pygments" }, { name = "beautifulsoup4" }, { name = "pygments" }, { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "sphinx-basic-ng" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a0/e2/d351d69a9a9e4badb4a5be062c2d0e87bd9e6c23b5e57337fef14bef34c8/furo-2024.8.6.tar.gz", hash = "sha256:b63e4cee8abfc3136d3bc03a3d45a76a850bada4d6374d24c1716b0e01394a01", size = 1661506 } +sdist = { url = "https://files.pythonhosted.org/packages/d0/69/312cd100fa45ddaea5a588334d2defa331ff427bcb61f5fe2ae61bdc3762/furo-2025.7.19.tar.gz", hash = "sha256:4164b2cafcf4023a59bb3c594e935e2516f6b9d35e9a5ea83d8f6b43808fe91f", size = 1662054, upload-time = "2025-07-19T10:52:09.754Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/27/48/e791a7ed487dbb9729ef32bb5d1af16693d8925f4366befef54119b2e576/furo-2024.8.6-py3-none-any.whl", hash = "sha256:6cd97c58b47813d3619e63e9081169880fbe331f0ca883c871ff1f3f11814f5c", size = 341333 }, + { url = "https://files.pythonhosted.org/packages/3a/34/2b07b72bee02a63241d654f5d8af87a2de977c59638eec41ca356ab915cd/furo-2025.7.19-py3-none-any.whl", hash = "sha256:bdea869822dfd2b494ea84c0973937e35d1575af088b6721a29c7f7878adc9e3", size = 342175, upload-time = "2025-07-19T10:52:02.399Z" }, ] [[package]] name = "gevent" -version = "24.11.1" +version = "25.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*' and platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*' and platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, { name = "zope-event" }, { name = "zope-interface" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ab/75/a53f1cb732420f5e5d79b2563fc3504d22115e7ecfe7966e5cf9b3582ae7/gevent-24.11.1.tar.gz", hash = "sha256:8bd1419114e9e4a3ed33a5bad766afff9a3cf765cb440a582a1b3a9bc80c1aca", size = 5976624 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/36/7d/27ed3603f4bf96b36fb2746e923e033bc600c6684de8fe164d64eb8c4dcc/gevent-24.11.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:92fe5dfee4e671c74ffaa431fd7ffd0ebb4b339363d24d0d944de532409b935e", size = 2998254 }, - { url = "https://files.pythonhosted.org/packages/a8/03/a8f6c70f50a644a79e75d9f15e6f1813115d34c3c55528e4669a9316534d/gevent-24.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7bfcfe08d038e1fa6de458891bca65c1ada6d145474274285822896a858c870", size = 4817711 }, - { url = "https://files.pythonhosted.org/packages/f0/05/4f9bc565520a18f107464d40ac15a91708431362c797e77fbb5e7ff26e64/gevent-24.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7398c629d43b1b6fd785db8ebd46c0a353880a6fab03d1cf9b6788e7240ee32e", size = 4934468 }, - { url = "https://files.pythonhosted.org/packages/4a/7d/f15561eeebecbebc0296dd7bebea10ac4af0065d98249e3d8c4998e68edd/gevent-24.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7886b63ebfb865178ab28784accd32f287d5349b3ed71094c86e4d3ca738af5", size = 5014067 }, - { url = "https://files.pythonhosted.org/packages/67/c1/07eff117a600fc3c9bd4e3a1ff3b726f146ee23ce55981156547ccae0c85/gevent-24.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9ca80711e6553880974898d99357fb649e062f9058418a92120ca06c18c3c59", size = 6625531 }, - { url = "https://files.pythonhosted.org/packages/4b/72/43f76ab6b18e5e56b1003c844829971f3044af08b39b3c9040559be00a2b/gevent-24.11.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e24181d172f50097ac8fc272c8c5b030149b630df02d1c639ee9f878a470ba2b", size = 5249671 }, - { url = "https://files.pythonhosted.org/packages/6b/fc/1a847ada0757cc7690f83959227514b1a52ff6de504619501c81805fa1da/gevent-24.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1d4fadc319b13ef0a3c44d2792f7918cf1bca27cacd4d41431c22e6b46668026", size = 6773903 }, - { url = "https://files.pythonhosted.org/packages/3b/9d/254dcf455f6659ab7e36bec0bc11f51b18ea25eac2de69185e858ccf3c30/gevent-24.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:3d882faa24f347f761f934786dde6c73aa6c9187ee710189f12dcc3a63ed4a50", size = 1560443 }, - { url = "https://files.pythonhosted.org/packages/ea/fd/86a170f77ef51a15297573c50dbec4cc67ddc98b677cc2d03cc7f2927f4c/gevent-24.11.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:351d1c0e4ef2b618ace74c91b9b28b3eaa0dd45141878a964e03c7873af09f62", size = 2951424 }, - { url = "https://files.pythonhosted.org/packages/7f/0a/987268c9d446f61883bc627c77c5ed4a97869c0f541f76661a62b2c411f6/gevent-24.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5efe72e99b7243e222ba0c2c2ce9618d7d36644c166d63373af239da1036bab", size = 4878504 }, - { url = "https://files.pythonhosted.org/packages/dc/d4/2f77ddd837c0e21b4a4460bcb79318b6754d95ef138b7a29f3221c7e9993/gevent-24.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d3b249e4e1f40c598ab8393fc01ae6a3b4d51fc1adae56d9ba5b315f6b2d758", size = 5007668 }, - { url = "https://files.pythonhosted.org/packages/80/a0/829e0399a1f9b84c344b72d2be9aa60fe2a64e993cac221edcc14f069679/gevent-24.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81d918e952954675f93fb39001da02113ec4d5f4921bf5a0cc29719af6824e5d", size = 5067055 }, - { url = "https://files.pythonhosted.org/packages/1e/67/0e693f9ddb7909c2414f8fcfc2409aa4157884c147bc83dab979e9cf717c/gevent-24.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9c935b83d40c748b6421625465b7308d87c7b3717275acd587eef2bd1c39546", size = 6761883 }, - { url = "https://files.pythonhosted.org/packages/fa/b6/b69883fc069d7148dd23c5dda20826044e54e7197f3c8e72b8cc2cd4035a/gevent-24.11.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff96c5739834c9a594db0e12bf59cb3fa0e5102fc7b893972118a3166733d61c", size = 5440802 }, - { url = "https://files.pythonhosted.org/packages/32/4e/b00094d995ff01fd88b3cf6b9d1d794f935c31c645c431e65cd82d808c9c/gevent-24.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d6c0a065e31ef04658f799215dddae8752d636de2bed61365c358f9c91e7af61", size = 6866992 }, - { url = "https://files.pythonhosted.org/packages/37/ed/58dbe9fb09d36f6477ff8db0459ebd3be9a77dc05ae5d96dc91ad657610d/gevent-24.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:97e2f3999a5c0656f42065d02939d64fffaf55861f7d62b0107a08f52c984897", size = 1543736 }, - { url = "https://files.pythonhosted.org/packages/dd/32/301676f67ffa996ff1c4175092fb0c48c83271cc95e5c67650b87156b6cf/gevent-24.11.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:a3d75fa387b69c751a3d7c5c3ce7092a171555126e136c1d21ecd8b50c7a6e46", size = 2956467 }, - { url = "https://files.pythonhosted.org/packages/6b/84/aef1a598123cef2375b6e2bf9d17606b961040f8a10e3dcc3c3dd2a99f05/gevent-24.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:beede1d1cff0c6fafae3ab58a0c470d7526196ef4cd6cc18e7769f207f2ea4eb", size = 5136486 }, - { url = "https://files.pythonhosted.org/packages/92/7b/04f61187ee1df7a913b3fca63b0a1206c29141ab4d2a57e7645237b6feb5/gevent-24.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85329d556aaedced90a993226d7d1186a539c843100d393f2349b28c55131c85", size = 5299718 }, - { url = "https://files.pythonhosted.org/packages/36/2a/ebd12183ac25eece91d084be2111e582b061f4d15ead32239b43ed47e9ba/gevent-24.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:816b3883fa6842c1cf9d2786722014a0fd31b6312cca1f749890b9803000bad6", size = 5400118 }, - { url = "https://files.pythonhosted.org/packages/ec/c9/f006c0cd59f0720fbb62ee11da0ad4c4c0fd12799afd957dd491137e80d9/gevent-24.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b24d800328c39456534e3bc3e1684a28747729082684634789c2f5a8febe7671", size = 6775163 }, - { url = "https://files.pythonhosted.org/packages/49/f1/5edf00b674b10d67e3b967c2d46b8a124c2bc8cfd59d4722704392206444/gevent-24.11.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a5f1701ce0f7832f333dd2faf624484cbac99e60656bfbb72504decd42970f0f", size = 5479886 }, - { url = "https://files.pythonhosted.org/packages/22/11/c48e62744a32c0d48984268ae62b99edb81eaf0e03b42de52e2f09855509/gevent-24.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d740206e69dfdfdcd34510c20adcb9777ce2cc18973b3441ab9767cd8948ca8a", size = 6891452 }, - { url = "https://files.pythonhosted.org/packages/11/b2/5d20664ef6a077bec9f27f7a7ee761edc64946d0b1e293726a3d074a9a18/gevent-24.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:68bee86b6e1c041a187347ef84cf03a792f0b6c7238378bf6ba4118af11feaae", size = 1541631 }, - { url = "https://files.pythonhosted.org/packages/a4/8f/4958e70caeaf469c576ecc5b5f2cb49ddaad74336fa82363d89cddb3c284/gevent-24.11.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:d618e118fdb7af1d6c1a96597a5cd6ac84a9f3732b5be8515c6a66e098d498b6", size = 2949601 }, - { url = "https://files.pythonhosted.org/packages/3b/64/79892d250b7b2aa810688dfebe783aec02568e5cecacb1e100acbb9d95c6/gevent-24.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2142704c2adce9cd92f6600f371afb2860a446bfd0be5bd86cca5b3e12130766", size = 5107052 }, - { url = "https://files.pythonhosted.org/packages/66/44/9ee0ed1909b4f41375e32bf10036d5d8624962afcbd901573afdecd2e36a/gevent-24.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92e0d7759de2450a501effd99374256b26359e801b2d8bf3eedd3751973e87f5", size = 5271736 }, - { url = "https://files.pythonhosted.org/packages/e3/48/0184b2622a388a256199c5fadcad6b52b6455019c2a4b19edd6de58e30ba/gevent-24.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca845138965c8c56d1550499d6b923eb1a2331acfa9e13b817ad8305dde83d11", size = 5367782 }, - { url = "https://files.pythonhosted.org/packages/9a/b1/1a2704c346234d889d2e0042efb182534f7d294115f0e9f99d8079fa17eb/gevent-24.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:356b73d52a227d3313f8f828025b665deada57a43d02b1cf54e5d39028dbcf8d", size = 6757533 }, - { url = "https://files.pythonhosted.org/packages/ed/6e/b2eed8dec617264f0046d50a13a42d3f0a06c50071b9fc1eae00285a03f1/gevent-24.11.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:58851f23c4bdb70390f10fc020c973ffcf409eb1664086792c8b1e20f25eef43", size = 5449436 }, - { url = "https://files.pythonhosted.org/packages/63/c2/eca6b95fbf9af287fa91c327494e4b74a8d5bfa0156cd87b233f63f118dc/gevent-24.11.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1ea50009ecb7f1327347c37e9eb6561bdbc7de290769ee1404107b9a9cba7cf1", size = 6866470 }, - { url = "https://files.pythonhosted.org/packages/b7/e6/51824bd1f2c1ce70aa01495aa6ffe04ab789fa819fa7e6f0ad2388fb03c6/gevent-24.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:ec68e270543ecd532c4c1d70fca020f90aa5486ad49c4f3b8b2e64a66f5c9274", size = 1540088 }, - { url = "https://files.pythonhosted.org/packages/a0/73/263d0f63186d27d205b3dc157efe838afe3aba10a3baca15d85e97b90eae/gevent-24.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9347690f4e53de2c4af74e62d6fabc940b6d4a6cad555b5a379f61e7d3f2a8e", size = 6658480 }, - { url = "https://files.pythonhosted.org/packages/8a/fd/ec7b5c764a3d1340160b82f7394fdc1220d18e11ae089c472cf7bcc2fe6a/gevent-24.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8619d5c888cb7aebf9aec6703e410620ef5ad48cdc2d813dd606f8aa7ace675f", size = 6808247 }, - { url = "https://files.pythonhosted.org/packages/95/82/2ce68dc8dbc2c3ed3f4e73f21e1b7a45d80b5225670225a48e695f248850/gevent-24.11.1-cp39-cp39-win32.whl", hash = "sha256:c6b775381f805ff5faf250e3a07c0819529571d19bb2a9d474bee8c3f90d66af", size = 1483133 }, - { url = "https://files.pythonhosted.org/packages/76/96/aa4cbcf1807187b65a9c9ff15b32b08c2014968be852dda34d212cf8cc58/gevent-24.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1c3443b0ed23dcb7c36a748d42587168672953d368f2956b17fad36d43b58836", size = 1566354 }, - { url = "https://files.pythonhosted.org/packages/86/63/197aa67250943b508b34995c2aa6b46402e7e6f11785487740c2057bfb20/gevent-24.11.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:f43f47e702d0c8e1b8b997c00f1601486f9f976f84ab704f8f11536e3fa144c9", size = 1271676 }, +sdist = { url = "https://files.pythonhosted.org/packages/f1/58/267e8160aea00ab00acd2de97197eecfe307064a376fb5c892870a8a6159/gevent-25.5.1.tar.gz", hash = "sha256:582c948fa9a23188b890d0bc130734a506d039a2e5ad87dae276a456cc683e61", size = 6388207, upload-time = "2025-05-12T12:57:59.833Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/a7/438568c37fb255f80e710318bfcad04731b92ce764bc16adee278fdc6b4d/gevent-25.5.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8e5a0fab5e245b15ec1005b3666b0a2e867c26f411c8fe66ae1afe07174a30e9", size = 2922800, upload-time = "2025-05-12T11:11:46.728Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b3/b44d8b1c4a4d01097a7f82ffbc582d054007365c27b28867f0b2d4241d73/gevent-25.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7b80a37f2fb45ee4a8f7e64b77dd8a842d364384046e394227b974a4e9c9a52", size = 1812954, upload-time = "2025-05-12T11:52:27.059Z" }, + { url = "https://files.pythonhosted.org/packages/1e/c6/935b4c973ad827c9ec49c354d68d047da1d23e3018bda63d3723cce43178/gevent-25.5.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29ab729d50ae85077a68e0385f129f5b01052d01a0ae6d7fdc1824f5337905e4", size = 1900169, upload-time = "2025-05-12T11:54:17.797Z" }, + { url = "https://files.pythonhosted.org/packages/38/8a/b745bddfec35fb723cafb036f191e5e0a0013f1698bf0ba4fa2cb8e01879/gevent-25.5.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80d20592aeabcc4e294fd441fd43d45cb537437fd642c374ea9d964622fad229", size = 1849786, upload-time = "2025-05-12T12:00:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/7c/b3/7aa7b09d91207bebe7608699558bbadd34f63e32904351867c29f8be25de/gevent-25.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8ba0257542ccbb72a8229dc34d00844ccdfba110417e4b7b34599548d0e20e9", size = 2139021, upload-time = "2025-05-12T11:32:58.961Z" }, + { url = "https://files.pythonhosted.org/packages/74/da/cf52ae0c84361f4164a04f3338508b1234331ce79719db103e50dbc5598c/gevent-25.5.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cad0821dff998c7c60dd238f92cd61380342c47fb9e92e1a8705d9b5ac7c16e8", size = 1830758, upload-time = "2025-05-12T11:59:55.666Z" }, + { url = "https://files.pythonhosted.org/packages/93/93/73a49b896d78eec27f0895ce3008f9825db748a5aacbca47404d1014da4b/gevent-25.5.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:017a7384c0cd1a5907751c991535a0699596e89725468a7fc39228312e10efa1", size = 2199993, upload-time = "2025-05-12T11:40:50.845Z" }, + { url = "https://files.pythonhosted.org/packages/df/c7/34680b7d2a75492fa032fa8ecaacc03c1940767a35125f6740954a0132a3/gevent-25.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:469c86d02fccad7e2a3d82fe22237e47ecb376fbf4710bc18747b49c50716817", size = 1652665, upload-time = "2025-05-12T12:35:58.105Z" }, + { url = "https://files.pythonhosted.org/packages/c6/eb/015e93f16a718e2f836ecebecae9bcd7b4d2a5695d1c8bd5bba2d5d91548/gevent-25.5.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:12380aba5c316e9ff53cc21d8ab80f4a91c0df3ada58f65d4f5eb2cf693db00e", size = 2877441, upload-time = "2025-05-12T11:14:57.735Z" }, + { url = "https://files.pythonhosted.org/packages/7b/86/42d191a6f6672ca59d6d79b4cd9b89d4a15f59c843fbbad42f2b749f8ea9/gevent-25.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0694daab1a041b69a53f53c2141c12994892b2503870515cabe6a5dbd2a928", size = 1774873, upload-time = "2025-05-12T11:52:29.015Z" }, + { url = "https://files.pythonhosted.org/packages/f5/9f/42dd255849c9ca2e814f5cbe180980594007ba19044a132cf674069e38bf/gevent-25.5.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2797885e9aeffdc98e1846723e5aa212e7ce53007dbef40d6fd2add264235c41", size = 1857911, upload-time = "2025-05-12T11:54:19.523Z" }, + { url = "https://files.pythonhosted.org/packages/3e/fc/8e799a733be48f6114bfc531b94e28812741664d8af89872dd90e117f8a4/gevent-25.5.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cde6aaac36b54332e10ea2a5bc0de6a8aba6c205c92603fe4396e3777c88e05d", size = 1812751, upload-time = "2025-05-12T12:00:03.719Z" }, + { url = "https://files.pythonhosted.org/packages/52/4f/a3f3acd961887da10cb0b49c3d915201973d59ce6bf49e2922eaf2058d5f/gevent-25.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24484f80f14befb8822bf29554cfb3a26a26cb69cd1e5a8be9e23b4bd7a96e25", size = 2087115, upload-time = "2025-05-12T11:33:01.128Z" }, + { url = "https://files.pythonhosted.org/packages/b6/27/bb38e005106a53787c13ad1f9f73ed990e403e462108acae6320ab11d442/gevent-25.5.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc7446895fa184890d8ca5ea61e502691114f9db55c9b76adc33f3086c4368", size = 1793549, upload-time = "2025-05-12T11:59:57.854Z" }, + { url = "https://files.pythonhosted.org/packages/ee/56/da817bc69e1f0ae8438f12f2cd150656b09a8c3576c6d12f992dc9ca64ef/gevent-25.5.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5b6106e2414b1797133786258fa1962a5e836480e4d5e861577f9fc63b673a5a", size = 2145899, upload-time = "2025-05-12T11:40:53.275Z" }, + { url = "https://files.pythonhosted.org/packages/b8/42/989403abbdbb1346a1507083c02018bee3fedaef3f9648940c767d8c0958/gevent-25.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:bc899212d90f311784c58938a9c09c59802fb6dc287a35fabdc36d180f57f575", size = 1635771, upload-time = "2025-05-12T12:26:47.644Z" }, + { url = "https://files.pythonhosted.org/packages/58/c5/cf71423666a0b83db3d7e3f85788bc47d573fca5fe62b798fe2c4273de7c/gevent-25.5.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d87c0a1bd809d8f70f96b9b229779ec6647339830b8888a192beed33ac8d129f", size = 2909333, upload-time = "2025-05-12T11:11:34.883Z" }, + { url = "https://files.pythonhosted.org/packages/26/7e/d2f174ee8bec6eb85d961ca203bc599d059c857b8412e367b8fa206603a5/gevent-25.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87a4b66edb3808d4d07bbdb0deed5a710cf3d3c531e082759afd283758bb649", size = 1788420, upload-time = "2025-05-12T11:52:30.306Z" }, + { url = "https://files.pythonhosted.org/packages/fe/f3/3aba8c147b9108e62ba348c726fe38ae69735a233db425565227336e8ce6/gevent-25.5.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f076779050029a82feb0cb1462021d3404d22f80fa76a181b1a7889cd4d6b519", size = 1868854, upload-time = "2025-05-12T11:54:21.564Z" }, + { url = "https://files.pythonhosted.org/packages/c6/b1/11a5453f8fcebe90a456471fad48bd154c6a62fcb96e3475a5e408d05fc8/gevent-25.5.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb673eb291c19370f69295f7a881a536451408481e2e3deec3f41dedb7c281ec", size = 1833946, upload-time = "2025-05-12T12:00:05.514Z" }, + { url = "https://files.pythonhosted.org/packages/70/1c/37d4a62303f86e6af67660a8df38c1171b7290df61b358e618c6fea79567/gevent-25.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1325ed44225c8309c0dd188bdbbbee79e1df8c11ceccac226b861c7d52e4837", size = 2070583, upload-time = "2025-05-12T11:33:02.803Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/3b14929ff28263aba1d268ea97bcf104be1a86ba6f6bb4633838e7a1905e/gevent-25.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcd5bcad3102bde686d0adcc341fade6245186050ce14386d547ccab4bd54310", size = 1808341, upload-time = "2025-05-12T11:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/2f/fc/674ec819fb8a96e482e4d21f8baa43d34602dba09dfce7bbdc8700899d1b/gevent-25.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a93062609e8fa67ec97cd5fb9206886774b2a09b24887f40148c9c37e6fb71c", size = 2137974, upload-time = "2025-05-12T11:40:54.78Z" }, + { url = "https://files.pythonhosted.org/packages/05/9a/048b7f5e28c54e4595ad4a8ad3c338fa89560e558db2bbe8273f44f030de/gevent-25.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:2534c23dc32bed62b659ed4fd9e198906179e68b26c9276a897e04163bdde806", size = 1638344, upload-time = "2025-05-12T12:08:31.776Z" }, + { url = "https://files.pythonhosted.org/packages/10/25/2162b38d7b48e08865db6772d632bd1648136ce2bb50e340565e45607cad/gevent-25.5.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a022a9de9275ce0b390b7315595454258c525dc8287a03f1a6cacc5878ab7cbc", size = 2928044, upload-time = "2025-05-12T11:11:36.33Z" }, + { url = "https://files.pythonhosted.org/packages/1b/e0/dbd597a964ed00176da122ea759bf2a6c1504f1e9f08e185379f92dc355f/gevent-25.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fae8533f9d0ef3348a1f503edcfb531ef7a0236b57da1e24339aceb0ce52922", size = 1788751, upload-time = "2025-05-12T11:52:32.643Z" }, + { url = "https://files.pythonhosted.org/packages/f1/74/960cc4cf4c9c90eafbe0efc238cdf588862e8e278d0b8c0d15a0da4ed480/gevent-25.5.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c7b32d9c3b5294b39ea9060e20c582e49e1ec81edbfeae6cf05f8ad0829cb13d", size = 1869766, upload-time = "2025-05-12T11:54:23.903Z" }, + { url = "https://files.pythonhosted.org/packages/56/78/fa84b1c7db79b156929685db09a7c18c3127361dca18a09e998e98118506/gevent-25.5.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b95815fe44f318ebbfd733b6428b4cb18cc5e68f1c40e8501dd69cc1f42a83d", size = 1835358, upload-time = "2025-05-12T12:00:06.794Z" }, + { url = "https://files.pythonhosted.org/packages/00/5c/bfefe3822bbca5b83bfad256c82251b3f5be13d52d14e17a786847b9b625/gevent-25.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d316529b70d325b183b2f3f5cde958911ff7be12eb2b532b5c301f915dbbf1e", size = 2073071, upload-time = "2025-05-12T11:33:04.2Z" }, + { url = "https://files.pythonhosted.org/packages/20/e4/08a77a3839a37db96393dea952e992d5846a881b887986dde62ead6b48a1/gevent-25.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f6ba33c13db91ffdbb489a4f3d177a261ea1843923e1d68a5636c53fe98fa5ce", size = 1809805, upload-time = "2025-05-12T12:00:00.537Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ac/28848348f790c1283df74b0fc0a554271d0606676470f848eccf84eae42a/gevent-25.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ee34b77c7553777c0b8379915f75934c3f9c8cd32f7cd098ea43c9323c2276", size = 2138305, upload-time = "2025-05-12T11:40:56.566Z" }, + { url = "https://files.pythonhosted.org/packages/52/9e/0e9e40facd2d714bfb00f71fc6dacaacc82c24c1c2e097bf6461e00dec9f/gevent-25.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fa6aa0da224ed807d3b76cdb4ee8b54d4d4d5e018aed2478098e685baae7896", size = 1637444, upload-time = "2025-05-12T12:17:45.995Z" }, + { url = "https://files.pythonhosted.org/packages/60/16/b71171e97ec7b4ded8669542f4369d88d5a289e2704efbbde51e858e062a/gevent-25.5.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:0bacf89a65489d26c7087669af89938d5bfd9f7afb12a07b57855b9fad6ccbd0", size = 2937113, upload-time = "2025-05-12T11:12:03.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/54/e5908beb092c2745aa8390f15b9559cc3ebd77bf1ba71c81c606f7b1fb92/gevent-25.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e30169ef9cc0a57930bfd8fe14d86bc9d39fb96d278e3891e85cbe7b46058a97", size = 2147450, upload-time = "2025-05-12T11:33:05.883Z" }, + { url = "https://files.pythonhosted.org/packages/ee/39/206c9da2395a7df11c13e2989f7c7c65a7799babdb8b4b055cccae4d5c14/gevent-25.5.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e72ad5f8d9c92df017fb91a1f6a438cfb63b0eff4b40904ff81b40cb8150078c", size = 2210122, upload-time = "2025-05-12T11:40:58.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/18/d10ca3841b686143c1973cac816651a72ff77ad9e79a5300cbbbe310fced/gevent-25.5.1-cp39-cp39-win32.whl", hash = "sha256:e5f358e81e27b1a7f2fb2f5219794e13ab5f59ce05571aa3877cfac63adb97db", size = 1548447, upload-time = "2025-05-12T12:48:21.565Z" }, + { url = "https://files.pythonhosted.org/packages/ac/9d/48c01ff8324ce4bfaba0760c0f1db6f4e2c976838655f6b80333cfd47999/gevent-25.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:b83aff2441c7d4ee93e519989713b7c2607d4510abe990cd1d04f641bc6c03af", size = 1659832, upload-time = "2025-05-12T12:45:00.794Z" }, + { url = "https://files.pythonhosted.org/packages/11/81/834da3c1ea5e71e4dc1a78a034a15f2813d9760d135464aae5d1f058a8c6/gevent-25.5.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:60ad4ca9ca2c4cc8201b607c229cd17af749831e371d006d8a91303bb5568eb1", size = 1291540, upload-time = "2025-05-12T11:11:55.456Z" }, ] [[package]] name = "greenlet" -version = "3.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2f/ff/df5fede753cc10f6a5be0931204ea30c35fa2f2ea7a35b25bdaf4fe40e46/greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467", size = 186022 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/90/5234a78dc0ef6496a6eb97b67a42a8e96742a56f7dc808cb954a85390448/greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563", size = 271235 }, - { url = "https://files.pythonhosted.org/packages/7c/16/cd631fa0ab7d06ef06387135b7549fdcc77d8d859ed770a0d28e47b20972/greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83", size = 637168 }, - { url = "https://files.pythonhosted.org/packages/2f/b1/aed39043a6fec33c284a2c9abd63ce191f4f1a07319340ffc04d2ed3256f/greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0", size = 648826 }, - { url = "https://files.pythonhosted.org/packages/76/25/40e0112f7f3ebe54e8e8ed91b2b9f970805143efef16d043dfc15e70f44b/greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120", size = 644443 }, - { url = "https://files.pythonhosted.org/packages/fb/2f/3850b867a9af519794784a7eeed1dd5bc68ffbcc5b28cef703711025fd0a/greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc", size = 643295 }, - { url = "https://files.pythonhosted.org/packages/cf/69/79e4d63b9387b48939096e25115b8af7cd8a90397a304f92436bcb21f5b2/greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617", size = 599544 }, - { url = "https://files.pythonhosted.org/packages/46/1d/44dbcb0e6c323bd6f71b8c2f4233766a5faf4b8948873225d34a0b7efa71/greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7", size = 1125456 }, - { url = "https://files.pythonhosted.org/packages/e0/1d/a305dce121838d0278cee39d5bb268c657f10a5363ae4b726848f833f1bb/greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6", size = 1149111 }, - { url = "https://files.pythonhosted.org/packages/96/28/d62835fb33fb5652f2e98d34c44ad1a0feacc8b1d3f1aecab035f51f267d/greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80", size = 298392 }, - { url = "https://files.pythonhosted.org/packages/28/62/1c2665558618553c42922ed47a4e6d6527e2fa3516a8256c2f431c5d0441/greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70", size = 272479 }, - { url = "https://files.pythonhosted.org/packages/76/9d/421e2d5f07285b6e4e3a676b016ca781f63cfe4a0cd8eaecf3fd6f7a71ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159", size = 640404 }, - { url = "https://files.pythonhosted.org/packages/e5/de/6e05f5c59262a584e502dd3d261bbdd2c97ab5416cc9c0b91ea38932a901/greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e", size = 652813 }, - { url = "https://files.pythonhosted.org/packages/49/93/d5f93c84241acdea15a8fd329362c2c71c79e1a507c3f142a5d67ea435ae/greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1", size = 648517 }, - { url = "https://files.pythonhosted.org/packages/15/85/72f77fc02d00470c86a5c982b8daafdf65d38aefbbe441cebff3bf7037fc/greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383", size = 647831 }, - { url = "https://files.pythonhosted.org/packages/f7/4b/1c9695aa24f808e156c8f4813f685d975ca73c000c2a5056c514c64980f6/greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a", size = 602413 }, - { url = "https://files.pythonhosted.org/packages/76/70/ad6e5b31ef330f03b12559d19fda2606a522d3849cde46b24f223d6d1619/greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511", size = 1129619 }, - { url = "https://files.pythonhosted.org/packages/f4/fb/201e1b932e584066e0f0658b538e73c459b34d44b4bd4034f682423bc801/greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395", size = 1155198 }, - { url = "https://files.pythonhosted.org/packages/12/da/b9ed5e310bb8b89661b80cbcd4db5a067903bbcd7fc854923f5ebb4144f0/greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39", size = 298930 }, - { url = "https://files.pythonhosted.org/packages/7d/ec/bad1ac26764d26aa1353216fcbfa4670050f66d445448aafa227f8b16e80/greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d", size = 274260 }, - { url = "https://files.pythonhosted.org/packages/66/d4/c8c04958870f482459ab5956c2942c4ec35cac7fe245527f1039837c17a9/greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79", size = 649064 }, - { url = "https://files.pythonhosted.org/packages/51/41/467b12a8c7c1303d20abcca145db2be4e6cd50a951fa30af48b6ec607581/greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa", size = 663420 }, - { url = "https://files.pythonhosted.org/packages/27/8f/2a93cd9b1e7107d5c7b3b7816eeadcac2ebcaf6d6513df9abaf0334777f6/greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441", size = 658035 }, - { url = "https://files.pythonhosted.org/packages/57/5c/7c6f50cb12be092e1dccb2599be5a942c3416dbcfb76efcf54b3f8be4d8d/greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36", size = 660105 }, - { url = "https://files.pythonhosted.org/packages/f1/66/033e58a50fd9ec9df00a8671c74f1f3a320564c6415a4ed82a1c651654ba/greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9", size = 613077 }, - { url = "https://files.pythonhosted.org/packages/19/c5/36384a06f748044d06bdd8776e231fadf92fc896bd12cb1c9f5a1bda9578/greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0", size = 1135975 }, - { url = "https://files.pythonhosted.org/packages/38/f9/c0a0eb61bdf808d23266ecf1d63309f0e1471f284300ce6dac0ae1231881/greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942", size = 1163955 }, - { url = "https://files.pythonhosted.org/packages/43/21/a5d9df1d21514883333fc86584c07c2b49ba7c602e670b174bd73cfc9c7f/greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01", size = 299655 }, - { url = "https://files.pythonhosted.org/packages/f3/57/0db4940cd7bb461365ca8d6fd53e68254c9dbbcc2b452e69d0d41f10a85e/greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1", size = 272990 }, - { url = "https://files.pythonhosted.org/packages/1c/ec/423d113c9f74e5e402e175b157203e9102feeb7088cee844d735b28ef963/greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff", size = 649175 }, - { url = "https://files.pythonhosted.org/packages/a9/46/ddbd2db9ff209186b7b7c621d1432e2f21714adc988703dbdd0e65155c77/greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a", size = 663425 }, - { url = "https://files.pythonhosted.org/packages/bc/f9/9c82d6b2b04aa37e38e74f0c429aece5eeb02bab6e3b98e7db89b23d94c6/greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e", size = 657736 }, - { url = "https://files.pythonhosted.org/packages/d9/42/b87bc2a81e3a62c3de2b0d550bf91a86939442b7ff85abb94eec3fc0e6aa/greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4", size = 660347 }, - { url = "https://files.pythonhosted.org/packages/37/fa/71599c3fd06336cdc3eac52e6871cfebab4d9d70674a9a9e7a482c318e99/greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e", size = 615583 }, - { url = "https://files.pythonhosted.org/packages/4e/96/e9ef85de031703ee7a4483489b40cf307f93c1824a02e903106f2ea315fe/greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1", size = 1133039 }, - { url = "https://files.pythonhosted.org/packages/87/76/b2b6362accd69f2d1889db61a18c94bc743e961e3cab344c2effaa4b4a25/greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c", size = 1160716 }, - { url = "https://files.pythonhosted.org/packages/1f/1b/54336d876186920e185066d8c3024ad55f21d7cc3683c856127ddb7b13ce/greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761", size = 299490 }, - { url = "https://files.pythonhosted.org/packages/5f/17/bea55bf36990e1638a2af5ba10c1640273ef20f627962cf97107f1e5d637/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011", size = 643731 }, - { url = "https://files.pythonhosted.org/packages/78/d2/aa3d2157f9ab742a08e0fd8f77d4699f37c22adfbfeb0c610a186b5f75e0/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13", size = 649304 }, - { url = "https://files.pythonhosted.org/packages/f1/8e/d0aeffe69e53ccff5a28fa86f07ad1d2d2d6537a9506229431a2a02e2f15/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475", size = 646537 }, - { url = "https://files.pythonhosted.org/packages/05/79/e15408220bbb989469c8871062c97c6c9136770657ba779711b90870d867/greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b", size = 642506 }, - { url = "https://files.pythonhosted.org/packages/18/87/470e01a940307796f1d25f8167b551a968540fbe0551c0ebb853cb527dd6/greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822", size = 602753 }, - { url = "https://files.pythonhosted.org/packages/e2/72/576815ba674eddc3c25028238f74d7b8068902b3968cbe456771b166455e/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01", size = 1122731 }, - { url = "https://files.pythonhosted.org/packages/ac/38/08cc303ddddc4b3d7c628c3039a61a3aae36c241ed01393d00c2fd663473/greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6", size = 1142112 }, - { url = "https://files.pythonhosted.org/packages/8c/82/8051e82af6d6b5150aacb6789a657a8afd48f0a44d8e91cb72aaaf28553a/greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3", size = 270027 }, - { url = "https://files.pythonhosted.org/packages/f9/74/f66de2785880293780eebd18a2958aeea7cbe7814af1ccef634f4701f846/greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42", size = 634822 }, - { url = "https://files.pythonhosted.org/packages/68/23/acd9ca6bc412b02b8aa755e47b16aafbe642dde0ad2f929f836e57a7949c/greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f", size = 646866 }, - { url = "https://files.pythonhosted.org/packages/a9/ab/562beaf8a53dc9f6b2459f200e7bc226bb07e51862a66351d8b7817e3efd/greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437", size = 641985 }, - { url = "https://files.pythonhosted.org/packages/03/d3/1006543621f16689f6dc75f6bcf06e3c23e044c26fe391c16c253623313e/greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145", size = 641268 }, - { url = "https://files.pythonhosted.org/packages/2f/c1/ad71ce1b5f61f900593377b3f77b39408bce5dc96754790311b49869e146/greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c", size = 597376 }, - { url = "https://files.pythonhosted.org/packages/f7/ff/183226685b478544d61d74804445589e069d00deb8ddef042699733950c7/greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e", size = 1123359 }, - { url = "https://files.pythonhosted.org/packages/c0/8b/9b3b85a89c22f55f315908b94cd75ab5fed5973f7393bbef000ca8b2c5c1/greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e", size = 1147458 }, - { url = "https://files.pythonhosted.org/packages/b8/1c/248fadcecd1790b0ba793ff81fa2375c9ad6442f4c748bf2cc2e6563346a/greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c", size = 281131 }, - { url = "https://files.pythonhosted.org/packages/ae/02/e7d0aef2354a38709b764df50b2b83608f0621493e47f47694eb80922822/greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22", size = 298306 }, +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/db/b4c12cff13ebac2786f4f217f06588bccd8b53d260453404ef22b121fc3a/greenlet-3.2.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:1afd685acd5597349ee6d7a88a8bec83ce13c106ac78c196ee9dde7c04fe87be", size = 268977, upload-time = "2025-06-05T16:10:24.001Z" }, + { url = "https://files.pythonhosted.org/packages/52/61/75b4abd8147f13f70986df2801bf93735c1bd87ea780d70e3b3ecda8c165/greenlet-3.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:761917cac215c61e9dc7324b2606107b3b292a8349bdebb31503ab4de3f559ac", size = 627351, upload-time = "2025-06-05T16:38:50.685Z" }, + { url = "https://files.pythonhosted.org/packages/35/aa/6894ae299d059d26254779a5088632874b80ee8cf89a88bca00b0709d22f/greenlet-3.2.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a433dbc54e4a37e4fff90ef34f25a8c00aed99b06856f0119dcf09fbafa16392", size = 638599, upload-time = "2025-06-05T16:41:34.057Z" }, + { url = "https://files.pythonhosted.org/packages/30/64/e01a8261d13c47f3c082519a5e9dbf9e143cc0498ed20c911d04e54d526c/greenlet-3.2.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:72e77ed69312bab0434d7292316d5afd6896192ac4327d44f3d613ecb85b037c", size = 634482, upload-time = "2025-06-05T16:48:16.26Z" }, + { url = "https://files.pythonhosted.org/packages/47/48/ff9ca8ba9772d083a4f5221f7b4f0ebe8978131a9ae0909cf202f94cd879/greenlet-3.2.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:68671180e3849b963649254a882cd544a3c75bfcd2c527346ad8bb53494444db", size = 633284, upload-time = "2025-06-05T16:13:01.599Z" }, + { url = "https://files.pythonhosted.org/packages/e9/45/626e974948713bc15775b696adb3eb0bd708bec267d6d2d5c47bb47a6119/greenlet-3.2.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49c8cfb18fb419b3d08e011228ef8a25882397f3a859b9fe1436946140b6756b", size = 582206, upload-time = "2025-06-05T16:12:48.51Z" }, + { url = "https://files.pythonhosted.org/packages/b1/8e/8b6f42c67d5df7db35b8c55c9a850ea045219741bb14416255616808c690/greenlet-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:efc6dc8a792243c31f2f5674b670b3a95d46fa1c6a912b8e310d6f542e7b0712", size = 1111412, upload-time = "2025-06-05T16:36:45.479Z" }, + { url = "https://files.pythonhosted.org/packages/05/46/ab58828217349500a7ebb81159d52ca357da747ff1797c29c6023d79d798/greenlet-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:731e154aba8e757aedd0781d4b240f1225b075b4409f1bb83b05ff410582cf00", size = 1135054, upload-time = "2025-06-05T16:12:36.478Z" }, + { url = "https://files.pythonhosted.org/packages/68/7f/d1b537be5080721c0f0089a8447d4ef72839039cdb743bdd8ffd23046e9a/greenlet-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:96c20252c2f792defe9a115d3287e14811036d51e78b3aaddbee23b69b216302", size = 296573, upload-time = "2025-06-05T16:34:26.521Z" }, + { url = "https://files.pythonhosted.org/packages/fc/2e/d4fcb2978f826358b673f779f78fa8a32ee37df11920dc2bb5589cbeecef/greenlet-3.2.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:784ae58bba89fa1fa5733d170d42486580cab9decda3484779f4759345b29822", size = 270219, upload-time = "2025-06-05T16:10:10.414Z" }, + { url = "https://files.pythonhosted.org/packages/16/24/929f853e0202130e4fe163bc1d05a671ce8dcd604f790e14896adac43a52/greenlet-3.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0921ac4ea42a5315d3446120ad48f90c3a6b9bb93dd9b3cf4e4d84a66e42de83", size = 630383, upload-time = "2025-06-05T16:38:51.785Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b2/0320715eb61ae70c25ceca2f1d5ae620477d246692d9cc284c13242ec31c/greenlet-3.2.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d2971d93bb99e05f8c2c0c2f4aa9484a18d98c4c3bd3c62b65b7e6ae33dfcfaf", size = 642422, upload-time = "2025-06-05T16:41:35.259Z" }, + { url = "https://files.pythonhosted.org/packages/bd/49/445fd1a210f4747fedf77615d941444349c6a3a4a1135bba9701337cd966/greenlet-3.2.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c667c0bf9d406b77a15c924ef3285e1e05250948001220368e039b6aa5b5034b", size = 638375, upload-time = "2025-06-05T16:48:18.235Z" }, + { url = "https://files.pythonhosted.org/packages/7e/c8/ca19760cf6eae75fa8dc32b487e963d863b3ee04a7637da77b616703bc37/greenlet-3.2.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:592c12fb1165be74592f5de0d70f82bc5ba552ac44800d632214b76089945147", size = 637627, upload-time = "2025-06-05T16:13:02.858Z" }, + { url = "https://files.pythonhosted.org/packages/65/89/77acf9e3da38e9bcfca881e43b02ed467c1dedc387021fc4d9bd9928afb8/greenlet-3.2.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29e184536ba333003540790ba29829ac14bb645514fbd7e32af331e8202a62a5", size = 585502, upload-time = "2025-06-05T16:12:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/97/c6/ae244d7c95b23b7130136e07a9cc5aadd60d59b5951180dc7dc7e8edaba7/greenlet-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:93c0bb79844a367782ec4f429d07589417052e621aa39a5ac1fb99c5aa308edc", size = 1114498, upload-time = "2025-06-05T16:36:46.598Z" }, + { url = "https://files.pythonhosted.org/packages/89/5f/b16dec0cbfd3070658e0d744487919740c6d45eb90946f6787689a7efbce/greenlet-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:751261fc5ad7b6705f5f76726567375bb2104a059454e0226e1eef6c756748ba", size = 1139977, upload-time = "2025-06-05T16:12:38.262Z" }, + { url = "https://files.pythonhosted.org/packages/66/77/d48fb441b5a71125bcac042fc5b1494c806ccb9a1432ecaa421e72157f77/greenlet-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:83a8761c75312361aa2b5b903b79da97f13f556164a7dd2d5448655425bd4c34", size = 297017, upload-time = "2025-06-05T16:25:05.225Z" }, + { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" }, + { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" }, + { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" }, + { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055, upload-time = "2025-06-05T16:12:40.457Z" }, + { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817, upload-time = "2025-06-05T16:29:49.244Z" }, + { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, + { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, + { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, + { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, + { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, + { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, + { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, + { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, + { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, + { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, + { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, + { url = "https://files.pythonhosted.org/packages/3d/d9/a3114df5fba2bf9823e0acc01e9e2abdcd8ea4c5487cf1c3dcd4cc0b48cf/greenlet-3.2.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:42efc522c0bd75ffa11a71e09cd8a399d83fafe36db250a87cf1dacfaa15dc64", size = 267769, upload-time = "2025-06-05T16:10:44.802Z" }, + { url = "https://files.pythonhosted.org/packages/bc/da/47dfc50f6e5673116e66a737dc58d1eca651db9a9aa8797c1d27e940e211/greenlet-3.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d760f9bdfe79bff803bad32b4d8ffb2c1d2ce906313fc10a83976ffb73d64ca7", size = 625472, upload-time = "2025-06-05T16:38:56.882Z" }, + { url = "https://files.pythonhosted.org/packages/f5/74/f6ef9f85d981b2fcd665bbee3e69e3c0a10fb962eb4c6a5889ac3b6debfa/greenlet-3.2.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8324319cbd7b35b97990090808fdc99c27fe5338f87db50514959f8059999805", size = 637253, upload-time = "2025-06-05T16:41:40.542Z" }, + { url = "https://files.pythonhosted.org/packages/66/69/4919bb1c9e43bfc16dc886e7a37fe1bc04bfa4101aba177936a10f313cad/greenlet-3.2.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:8c37ef5b3787567d322331d5250e44e42b58c8c713859b8a04c6065f27efbf72", size = 632611, upload-time = "2025-06-05T16:48:24.976Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/97d988d019f40b6b360b0c71c99e5b4c877a3d92666fe48b081d0e1ea1cd/greenlet-3.2.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ce539fb52fb774d0802175d37fcff5c723e2c7d249c65916257f0a940cee8904", size = 631843, upload-time = "2025-06-05T16:13:09.476Z" }, + { url = "https://files.pythonhosted.org/packages/59/24/d5e1504ec00768755d4ccc2168b76d9f4524e96694a14ad45bd87796e9bb/greenlet-3.2.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:003c930e0e074db83559edc8705f3a2d066d4aa8c2f198aff1e454946efd0f26", size = 580781, upload-time = "2025-06-05T16:12:55.029Z" }, + { url = "https://files.pythonhosted.org/packages/9c/df/d009bcca566dbfd2283b306b4e424f4c0e59bf984868f8b789802fe9e607/greenlet-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7e70ea4384b81ef9e84192e8a77fb87573138aa5d4feee541d8014e452b434da", size = 1109903, upload-time = "2025-06-05T16:36:51.491Z" }, + { url = "https://files.pythonhosted.org/packages/33/54/5036097197a78388aa6901a5b90b562f3a154a9fbee89c301a26f56f3942/greenlet-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:22eb5ba839c4b2156f18f76768233fe44b23a31decd9cc0d4cc8141c211fd1b4", size = 1133975, upload-time = "2025-06-05T16:12:43.866Z" }, + { url = "https://files.pythonhosted.org/packages/e2/15/b001456a430805fdd8b600a788d19a790664eee8863739523395f68df752/greenlet-3.2.3-cp39-cp39-win32.whl", hash = "sha256:4532f0d25df67f896d137431b13f4cdce89f7e3d4a96387a41290910df4d3a57", size = 279320, upload-time = "2025-06-05T16:43:34.043Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4c/bf2100cbc1bd07f39bee3b09e7eef39beffe29f5453dc2477a2693737913/greenlet-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:aaa7aae1e7f75eaa3ae400ad98f8644bb81e1dc6ba47ce8a93d3f17274e08322", size = 296444, upload-time = "2025-06-05T16:39:22.664Z" }, ] [[package]] name = "h11" -version = "0.14.0" +version = "0.16.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f5/38/3af3d3633a34a3316095b39c8e8fb4853a28a536e55d347bd8d8e9a14b03/h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", size = 100418 } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761", size = 58259 }, + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, ] [[package]] name = "httpcore" -version = "1.0.7" +version = "1.0.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6a/41/d7d0a89eb493922c37d343b607bc1b5da7f5be7e383740b4753ad8943e90/httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c", size = 85196 } +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd", size = 78551 }, + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, ] [[package]] @@ -687,146 +895,146 @@ dependencies = [ { name = "httpcore" }, { name = "idna" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 } +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 }, + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, ] [[package]] name = "identify" -version = "2.6.5" +version = "2.6.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cf/92/69934b9ef3c31ca2470980423fda3d00f0460ddefdf30a67adf7f17e2e00/identify-2.6.5.tar.gz", hash = "sha256:c10b33f250e5bba374fae86fb57f3adcebf1161bce7cdf92031915fd480c13bc", size = 99213 } +sdist = { url = "https://files.pythonhosted.org/packages/a2/88/d193a27416618628a5eea64e3223acd800b40749a96ffb322a9b55a49ed1/identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6", size = 99254, upload-time = "2025-05-23T20:37:53.3Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/fa/dce098f4cdf7621aa8f7b4f919ce545891f489482f0bfa5102f3eca8608b/identify-2.6.5-py2.py3-none-any.whl", hash = "sha256:14181a47091eb75b337af4c23078c9d09225cd4c48929f521f3bf16b09d02566", size = 99078 }, + { url = "https://files.pythonhosted.org/packages/7a/cd/18f8da995b658420625f7ef13f037be53ae04ec5ad33f9b718240dcfd48c/identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2", size = 99145, upload-time = "2025-05-23T20:37:51.495Z" }, ] [[package]] name = "idna" version = "3.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, ] [[package]] name = "imagesize" version = "1.4.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026 } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026, upload-time = "2022-07-01T12:21:05.687Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, ] [[package]] name = "importlib-metadata" -version = "8.5.0" +version = "8.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp", marker = "python_full_version < '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304 } +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514 }, + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, ] [[package]] name = "iniconfig" -version = "2.0.0" +version = "2.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, ] [[package]] name = "jinja2" -version = "3.1.5" +version = "3.1.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markupsafe" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/af/92/b3130cbbf5591acf9ade8708c365f3238046ac7cb8ccba6e81abccb0ccff/jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", size = 244674 } +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bd/0f/2ba5fbcd631e3e88689309dbe978c5769e883e4b84ebfe7da30b43275c5a/jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb", size = 134596 }, + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, ] [[package]] name = "jmespath" version = "1.0.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843 } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256 }, + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, ] [[package]] name = "markupsafe" version = "3.0.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357 }, - { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393 }, - { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732 }, - { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866 }, - { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964 }, - { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977 }, - { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366 }, - { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091 }, - { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065 }, - { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514 }, - { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 }, - { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 }, - { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 }, - { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 }, - { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 }, - { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 }, - { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 }, - { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 }, - { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 }, - { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 }, - { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, - { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, - { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, - { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, - { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, - { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, - { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, - { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, - { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, - { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, - { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, - { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, - { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, - { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, - { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, - { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, - { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, - { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, - { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, - { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, - { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, - { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, - { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, - { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, - { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, - { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, - { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, - { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, - { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, - { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, - { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344 }, - { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389 }, - { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607 }, - { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728 }, - { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826 }, - { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843 }, - { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219 }, - { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946 }, - { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063 }, - { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506 }, +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, + { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344, upload-time = "2024-10-18T15:21:43.721Z" }, + { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389, upload-time = "2024-10-18T15:21:44.666Z" }, + { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607, upload-time = "2024-10-18T15:21:45.452Z" }, + { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728, upload-time = "2024-10-18T15:21:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826, upload-time = "2024-10-18T15:21:47.134Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843, upload-time = "2024-10-18T15:21:48.334Z" }, + { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219, upload-time = "2024-10-18T15:21:49.587Z" }, + { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946, upload-time = "2024-10-18T15:21:50.441Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063, upload-time = "2024-10-18T15:21:51.385Z" }, + { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506, upload-time = "2024-10-18T15:21:52.974Z" }, ] [[package]] @@ -846,98 +1054,98 @@ dependencies = [ { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002 }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400 }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172 }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732 }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197 }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836 }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432 }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515 }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791 }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203 }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900 }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869 }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668 }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060 }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167 }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341 }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991 }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016 }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097 }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728 }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965 }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660 }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198 }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276 }, - { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493 }, - { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702 }, - { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104 }, - { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167 }, - { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834 }, - { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231 }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905 }, +sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, + { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, + { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, + { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, + { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, + { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, + { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, + { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, + { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, + { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, + { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, + { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, + { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, + { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, + { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, + { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, + { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, + { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, + { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, + { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, + { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, + { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, + { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, + { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, + { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, ] [[package]] name = "mypy-extensions" -version = "1.0.0" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 }, + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] [[package]] name = "nodeenv" version = "1.9.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 }, + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, ] [[package]] name = "packaging" -version = "24.2" +version = "25.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] name = "pip" -version = "24.3.1" +version = "25.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f4/b1/b422acd212ad7eedddaf7981eee6e5de085154ff726459cf2da7c5a184c1/pip-24.3.1.tar.gz", hash = "sha256:ebcb60557f2aefabc2e0f918751cd24ea0d56d8ec5445fe1807f1d2109660b99", size = 1931073 } +sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/7d/500c9ad20238fcfcb4cb9243eede163594d7020ce87bd9610c9e02771876/pip-24.3.1-py3-none-any.whl", hash = "sha256:3790624780082365f47549d032f3770eeb2b1e8bd1f7b2e02dace1afa361b4ed", size = 1822182 }, + { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, ] [[package]] name = "platformdirs" -version = "4.3.6" +version = "4.3.8" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 } +sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, + { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" }, ] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] [[package]] name = "pre-commit" -version = "4.0.1" +version = "4.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, @@ -946,55 +1154,55 @@ dependencies = [ { name = "pyyaml" }, { name = "virtualenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2e/c8/e22c292035f1bac8b9f5237a2622305bc0304e776080b246f3df57c4ff9f/pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2", size = 191678 } +sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424, upload-time = "2025-03-18T21:35:20.987Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/16/8f/496e10d51edd6671ebe0432e33ff800aa86775d2d147ce7d43389324a525/pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878", size = 218713 }, + { url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707, upload-time = "2025-03-18T21:35:19.343Z" }, ] [[package]] name = "pyasn1" version = "0.6.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, ] [[package]] name = "pyasn1-modules" -version = "0.4.1" +version = "0.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pyasn1" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1d/67/6afbf0d507f73c32d21084a79946bfcfca5fbc62a72057e9c23797a737c9/pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c", size = 310028 } +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/89/bc88a6711935ba795a679ea6ebee07e128050d6382eaa35a0a47c8032bdc/pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd", size = 181537 }, + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, ] [[package]] name = "pycparser" version = "2.22" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, ] [[package]] name = "pygments" -version = "2.19.1" +version = "2.19.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, ] [[package]] name = "pykerberos" version = "1.2.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5ca1ed745a2c5672dc838a8398101051dd5f255b130d/pykerberos-1.2.4.tar.gz", hash = "sha256:9d701ebd8fc596c99d3155d5ba45813bd5908d26ef83ba0add250edb622abed4", size = 25046 } +sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5ca1ed745a2c5672dc838a8398101051dd5f255b130d/pykerberos-1.2.4.tar.gz", hash = "sha256:9d701ebd8fc596c99d3155d5ba45813bd5908d26ef83ba0add250edb622abed4", size = 25046, upload-time = "2022-03-09T03:54:08.546Z" } [[package]] name = "pymongo" @@ -1011,7 +1219,8 @@ docs = [ { name = "furo" }, { name = "readthedocs-sphinx-search" }, { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "sphinx-autobuild" }, { name = "sphinx-rtd-theme" }, { name = "sphinxcontrib-shellcheck" }, @@ -1055,6 +1264,7 @@ eventlet = [ { name = "eventlet" }, ] gevent = [ + { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, { name = "gevent" }, ] mockupdb = [ @@ -1082,7 +1292,7 @@ requires-dist = [ { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')" }, { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, - { name = "furo", marker = "extra == 'docs'", specifier = "==2024.8.6" }, + { name = "furo", marker = "extra == 'docs'", specifier = "==2025.7.19" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, @@ -1110,7 +1320,10 @@ coverage = [ ] dev = [{ name = "pre-commit", specifier = ">=4.0" }] eventlet = [{ name = "eventlet" }] -gevent = [{ name = "gevent" }] +gevent = [ + { name = "cffi", marker = "python_full_version == '3.14.*'", specifier = ">=2.0.0b1" }, + { name = "gevent" }, +] mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] perf = [{ name = "simplejson" }] pip = [{ name = "pip" }] @@ -1130,17 +1343,18 @@ dependencies = [ { name = "boto3" }, { name = "botocore" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c7/37/ca8d840f322f0047b71afcec7a489b1ea1f59a5f6d29f91ad8004024736f/pymongo_auth_aws-1.3.0.tar.gz", hash = "sha256:d0fa893958dc525ca29f601c34f2ca73c860f66bc6511ec0a7da6eb7ea44e94f", size = 18559 } +sdist = { url = "https://files.pythonhosted.org/packages/c7/37/ca8d840f322f0047b71afcec7a489b1ea1f59a5f6d29f91ad8004024736f/pymongo_auth_aws-1.3.0.tar.gz", hash = "sha256:d0fa893958dc525ca29f601c34f2ca73c860f66bc6511ec0a7da6eb7ea44e94f", size = 18559, upload-time = "2024-09-11T20:29:17.668Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f4/12/a997fc108416f31fac55748e5406c1c8c4e976a4073f07b5553825641611/pymongo_auth_aws-1.3.0-py3-none-any.whl", hash = "sha256:367f6d853da428a02e9e450422756133715d40f8141f47ae5d98f139a88c0ce5", size = 15470 }, + { url = "https://files.pythonhosted.org/packages/f4/12/a997fc108416f31fac55748e5406c1c8c4e976a4073f07b5553825641611/pymongo_auth_aws-1.3.0-py3-none-any.whl", hash = "sha256:367f6d853da428a02e9e450422756133715d40f8141f47ae5d98f139a88c0ce5", size = 15470, upload-time = "2024-09-11T20:29:16.637Z" }, ] [[package]] name = "pymongocrypt" -version = "1.14.0.dev0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#af621673c46d3d8fd2a2fe9d5540e24a79d9357a" } +version = "1.14.2.dev0" +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#56048cf426bfeffa0805934b668a7af5ed8e907c" } dependencies = [ - { name = "cffi" }, + { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*'" }, + { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, { name = "cryptography" }, { name = "httpx" }, { name = "packaging" }, @@ -1148,15 +1362,15 @@ dependencies = [ [[package]] name = "pyopenssl" -version = "25.0.0" +version = "25.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cryptography" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9f/26/e25b4a374b4639e0c235527bbe31c0524f26eda701d79456a7e1877f4cc5/pyopenssl-25.0.0.tar.gz", hash = "sha256:cd2cef799efa3936bb08e8ccb9433a575722b9dd986023f1cabc4ae64e9dac16", size = 179573 } +sdist = { url = "https://files.pythonhosted.org/packages/04/8c/cd89ad05804f8e3c17dea8f178c3f40eeab5694c30e0c9f5bcd49f576fc3/pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b", size = 179937, upload-time = "2025-05-17T16:28:31.31Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/d7/eb76863d2060dcbe7c7e6cccfd95ac02ea0b9acc37745a0d99ff6457aefb/pyOpenSSL-25.0.0-py3-none-any.whl", hash = "sha256:424c247065e46e76a37411b9ab1782541c23bb658bf003772c3405fbaa128e90", size = 56453 }, + { url = "https://files.pythonhosted.org/packages/80/28/2659c02301b9500751f8d42f9a6632e1508aa5120de5e43042b8b30f8d5d/pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab", size = 56771, upload-time = "2025-05-17T16:28:29.197Z" }, ] [[package]] @@ -1167,14 +1381,14 @@ dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/66/df/3c6f6b08fba7ccf49b114dfc4bb33e25c299883fd763f93fad47ef8bc58d/pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd", size = 3789911 } +sdist = { url = "https://files.pythonhosted.org/packages/66/df/3c6f6b08fba7ccf49b114dfc4bb33e25c299883fd763f93fad47ef8bc58d/pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd", size = 3789911, upload-time = "2025-01-15T15:01:20.913Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/b1/a18de17f40e4f61ca58856b9ef9b0febf74ff88978c3f7776f910071f567/pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2", size = 5595487 }, + { url = "https://files.pythonhosted.org/packages/e7/b1/a18de17f40e4f61ca58856b9ef9b0febf74ff88978c3f7776f910071f567/pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2", size = 5595487, upload-time = "2025-01-15T15:01:17.775Z" }, ] [[package]] name = "pytest" -version = "8.3.4" +version = "8.4.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -1182,36 +1396,40 @@ dependencies = [ { name = "iniconfig" }, { name = "packaging" }, { name = "pluggy" }, + { name = "pygments" }, { name = "tomli", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, + { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" }, ] [[package]] name = "pytest-asyncio" -version = "0.25.2" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/72/df/adcc0d60f1053d74717d21d58c0048479e9cab51464ce0d2965b086bd0e2/pytest_asyncio-0.25.2.tar.gz", hash = "sha256:3f8ef9a98f45948ea91a0ed3dc4268b5326c0e7bce73892acc654df4262ad45f", size = 53950 } +sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/d8/defa05ae50dcd6019a95527200d3b3980043df5aa445d40cb0ef9f7f98ab/pytest_asyncio-0.25.2-py3-none-any.whl", hash = "sha256:0d0bb693f7b99da304a0634afc0a4b19e49d5e0de2d670f38dc4bfa5727c5075", size = 19400 }, + { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" }, ] [[package]] name = "pytest-cov" -version = "6.0.0" +version = "6.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "coverage", extra = ["toml"] }, + { name = "pluggy" }, { name = "pytest" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/be/45/9b538de8cef30e17c7b45ef42f538a94889ed6a16f2387a6c89e73220651/pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0", size = 66945 } +sdist = { url = "https://files.pythonhosted.org/packages/18/99/668cade231f434aaa59bbfbf49469068d2ddd945000621d3d165d2e7dd7b/pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2", size = 69432, upload-time = "2025-06-12T10:47:47.684Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/36/3b/48e79f2cd6a61dbbd4807b4ed46cb564b4fd50a76166b1c4ea5c1d9e2371/pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35", size = 22949 }, + { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" }, ] [[package]] @@ -1221,9 +1439,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "six" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, ] [[package]] @@ -1233,99 +1451,108 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cramjam" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/39/66/9185fbb6605ba92716d9f77fbb13c97eb671cd13c3ad56bd154016fbf08b/python_snappy-0.7.3.tar.gz", hash = "sha256:40216c1badfb2d38ac781ecb162a1d0ec40f8ee9747e610bcfefdfa79486cee3", size = 9337 } +sdist = { url = "https://files.pythonhosted.org/packages/39/66/9185fbb6605ba92716d9f77fbb13c97eb671cd13c3ad56bd154016fbf08b/python_snappy-0.7.3.tar.gz", hash = "sha256:40216c1badfb2d38ac781ecb162a1d0ec40f8ee9747e610bcfefdfa79486cee3", size = 9337, upload-time = "2024-08-29T13:16:05.705Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155 }, + { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155, upload-time = "2024-08-29T13:16:04.773Z" }, ] [[package]] name = "pyyaml" version = "6.0.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199 }, - { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758 }, - { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463 }, - { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280 }, - { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239 }, - { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802 }, - { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527 }, - { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052 }, - { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774 }, - { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, - { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, - { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, - { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, - { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, - { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, - { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, - { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, - { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, - { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, - { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, - { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, - { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, - { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, - { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, - { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, - { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, - { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, - { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, - { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, - { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, - { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, - { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, - { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, - { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, - { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, - { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, - { url = "https://files.pythonhosted.org/packages/65/d8/b7a1db13636d7fb7d4ff431593c510c8b8fca920ade06ca8ef20015493c5/PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", size = 184777 }, - { url = "https://files.pythonhosted.org/packages/0a/02/6ec546cd45143fdf9840b2c6be8d875116a64076218b61d68e12548e5839/PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", size = 172318 }, - { url = "https://files.pythonhosted.org/packages/0e/9a/8cc68be846c972bda34f6c2a93abb644fb2476f4dcc924d52175786932c9/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", size = 720891 }, - { url = "https://files.pythonhosted.org/packages/e9/6c/6e1b7f40181bc4805e2e07f4abc10a88ce4648e7e95ff1abe4ae4014a9b2/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", size = 722614 }, - { url = "https://files.pythonhosted.org/packages/3d/32/e7bd8535d22ea2874cef6a81021ba019474ace0d13a4819c2a4bce79bd6a/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", size = 737360 }, - { url = "https://files.pythonhosted.org/packages/d7/12/7322c1e30b9be969670b672573d45479edef72c9a0deac3bb2868f5d7469/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", size = 699006 }, - { url = "https://files.pythonhosted.org/packages/82/72/04fcad41ca56491995076630c3ec1e834be241664c0c09a64c9a2589b507/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", size = 723577 }, - { url = "https://files.pythonhosted.org/packages/ed/5e/46168b1f2757f1fcd442bc3029cd8767d88a98c9c05770d8b420948743bb/PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", size = 144593 }, - { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312 }, +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" }, + { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" }, + { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" }, + { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" }, + { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" }, + { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" }, + { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" }, + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, + { url = "https://files.pythonhosted.org/packages/65/d8/b7a1db13636d7fb7d4ff431593c510c8b8fca920ade06ca8ef20015493c5/PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", size = 184777, upload-time = "2024-08-06T20:33:25.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/02/6ec546cd45143fdf9840b2c6be8d875116a64076218b61d68e12548e5839/PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", size = 172318, upload-time = "2024-08-06T20:33:27.212Z" }, + { url = "https://files.pythonhosted.org/packages/0e/9a/8cc68be846c972bda34f6c2a93abb644fb2476f4dcc924d52175786932c9/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", size = 720891, upload-time = "2024-08-06T20:33:28.974Z" }, + { url = "https://files.pythonhosted.org/packages/e9/6c/6e1b7f40181bc4805e2e07f4abc10a88ce4648e7e95ff1abe4ae4014a9b2/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", size = 722614, upload-time = "2024-08-06T20:33:34.157Z" }, + { url = "https://files.pythonhosted.org/packages/3d/32/e7bd8535d22ea2874cef6a81021ba019474ace0d13a4819c2a4bce79bd6a/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", size = 737360, upload-time = "2024-08-06T20:33:35.84Z" }, + { url = "https://files.pythonhosted.org/packages/d7/12/7322c1e30b9be969670b672573d45479edef72c9a0deac3bb2868f5d7469/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", size = 699006, upload-time = "2024-08-06T20:33:37.501Z" }, + { url = "https://files.pythonhosted.org/packages/82/72/04fcad41ca56491995076630c3ec1e834be241664c0c09a64c9a2589b507/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", size = 723577, upload-time = "2024-08-06T20:33:39.389Z" }, + { url = "https://files.pythonhosted.org/packages/ed/5e/46168b1f2757f1fcd442bc3029cd8767d88a98c9c05770d8b420948743bb/PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", size = 144593, upload-time = "2024-08-06T20:33:46.63Z" }, + { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312, upload-time = "2024-08-06T20:33:49.073Z" }, ] [[package]] name = "readthedocs-sphinx-search" version = "0.3.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8f/96/0c51439e3dbc634cf5328ffb173ff759b7fc9abf3276e78bf71d9fc0aa51/readthedocs-sphinx-search-0.3.2.tar.gz", hash = "sha256:277773bfa28566a86694c08e568d5a648cd80f22826545555a764d6d20c365fb", size = 21949 } +sdist = { url = "https://files.pythonhosted.org/packages/8f/96/0c51439e3dbc634cf5328ffb173ff759b7fc9abf3276e78bf71d9fc0aa51/readthedocs-sphinx-search-0.3.2.tar.gz", hash = "sha256:277773bfa28566a86694c08e568d5a648cd80f22826545555a764d6d20c365fb", size = 21949, upload-time = "2024-01-15T16:46:22.565Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/04/3c/41bc9d7d4d936a73e380423f23996bee1691e17598d8a03c062be6aac640/readthedocs_sphinx_search-0.3.2-py3-none-any.whl", hash = "sha256:58716fd21f01581e6e67bf3bc02e79c77e10dc58b5f8e4c7cc1977e013eda173", size = 21379 }, + { url = "https://files.pythonhosted.org/packages/04/3c/41bc9d7d4d936a73e380423f23996bee1691e17598d8a03c062be6aac640/readthedocs_sphinx_search-0.3.2-py3-none-any.whl", hash = "sha256:58716fd21f01581e6e67bf3bc02e79c77e10dc58b5f8e4c7cc1977e013eda173", size = 21379, upload-time = "2024-01-15T16:46:20.552Z" }, ] [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "charset-normalizer" }, { name = "idna" }, { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "urllib3", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "urllib3", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "roman-numerals-py" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/76/48fd56d17c5bdbdf65609abbc67288728a98ed4c02919428d4f52d23b24b/roman_numerals_py-3.1.0.tar.gz", hash = "sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d", size = 9017, upload-time = "2025-02-22T07:34:54.333Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/97/d2cbbaa10c9b826af0e10fdf836e1bf344d9f0abb873ebc34d1f49642d3f/roman_numerals_py-3.1.0-py3-none-any.whl", hash = "sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c", size = 7742, upload-time = "2025-02-22T07:34:52.422Z" }, ] [[package]] name = "s3transfer" -version = "0.11.1" +version = "0.13.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1a/aa/fdd958c626b00e3f046d4004363e7f1a2aba4354f78d65ceb3b217fa5eb8/s3transfer-0.11.1.tar.gz", hash = "sha256:3f25c900a367c8b7f7d8f9c34edc87e300bde424f779dc9f0a8ae4f9df9264f6", size = 146952 } +sdist = { url = "https://files.pythonhosted.org/packages/6d/05/d52bf1e65044b4e5e27d4e63e8d1579dbdec54fce685908ae09bc3720030/s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf", size = 150589, upload-time = "2025-07-18T19:22:42.31Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/ce/22673f4a85ccc640735b4f8d12178a0f41b5d3c6eda7f33756d10ce56901/s3transfer-0.11.1-py3-none-any.whl", hash = "sha256:8fa0aa48177be1f3425176dfe1ab85dcd3d962df603c3dbfc585e6bf857ef0ff", size = 84111 }, + { url = "https://files.pythonhosted.org/packages/6d/4f/d073e09df851cfa251ef7840007d04db3293a0482ce607d2b993926089be/s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724", size = 85308, upload-time = "2025-07-18T19:22:40.947Z" }, ] [[package]] @@ -1338,128 +1565,128 @@ dependencies = [ { name = "pyasn1" }, { name = "pyasn1-modules" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/07/a5/dfc752b979067947261dbbf2543470c58efe735c3c1301dd870ef27830ee/service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09", size = 39245 } +sdist = { url = "https://files.pythonhosted.org/packages/07/a5/dfc752b979067947261dbbf2543470c58efe735c3c1301dd870ef27830ee/service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09", size = 39245, upload-time = "2024-10-26T07:21:57.736Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/08/2c/ca6dd598b384bc1ce581e24aaae0f2bed4ccac57749d5c3befbb5e742081/service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85", size = 11364 }, + { url = "https://files.pythonhosted.org/packages/08/2c/ca6dd598b384bc1ce581e24aaae0f2bed4ccac57749d5c3befbb5e742081/service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85", size = 11364, upload-time = "2024-10-26T07:21:56.302Z" }, ] [[package]] name = "setuptools" -version = "75.8.0" +version = "80.9.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/92/ec/089608b791d210aec4e7f97488e67ab0d33add3efccb83a056cbafe3a2a6/setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6", size = 1343222 } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/8a/b9dc7678803429e4a3bc9ba462fa3dd9066824d3c607490235c6a796be5a/setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3", size = 1228782 }, + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, ] [[package]] name = "simplejson" -version = "3.19.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3d/29/085111f19717f865eceaf0d4397bf3e76b08d60428b076b64e2a1903706d/simplejson-3.19.3.tar.gz", hash = "sha256:8e086896c36210ab6050f2f9f095a5f1e03c83fa0e7f296d6cba425411364680", size = 85237 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/39/24/260ad03435ce8ef2436031951134659c7161776ec3a78094b35b9375ceea/simplejson-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:50d8b742d74c449c4dcac570d08ce0f21f6a149d2d9cf7652dbf2ba9a1bc729a", size = 93660 }, - { url = "https://files.pythonhosted.org/packages/63/a1/dee207f357bcd6b106f2ca5129ee916c24993ba08b7dfbf9a37c22442ea9/simplejson-3.19.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd011fc3c1d88b779645495fdb8189fb318a26981eebcce14109460e062f209b", size = 75546 }, - { url = "https://files.pythonhosted.org/packages/80/7b/45ef1da43f54d209ce2ef59b7356cda13f810186c381f38ae23a4d2b1337/simplejson-3.19.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:637c4d4b81825c1f4d651e56210bd35b5604034b192b02d2d8f17f7ce8c18f42", size = 75602 }, - { url = "https://files.pythonhosted.org/packages/7f/4b/9a132382982f8127bc7ce5212a5585d83c174707c9dd698d0cb6a0d41882/simplejson-3.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f56eb03bc9e432bb81adc8ecff2486d39feb371abb442964ffb44f6db23b332", size = 138632 }, - { url = "https://files.pythonhosted.org/packages/76/37/012f5ad2f38afa28f8a6ad9da01dc0b64492ffbaf2a3f2f8a0e1fddf9c1d/simplejson-3.19.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ef59a53be400c1fad2c914b8d74c9d42384fed5174f9321dd021b7017fd40270", size = 146740 }, - { url = "https://files.pythonhosted.org/packages/69/b3/89640bd676e26ea2315b5aaf80712a6fbbb4338e4caf872d91448502a19b/simplejson-3.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72e8abbc86fcac83629a030888b45fed3a404d54161118be52cb491cd6975d3e", size = 134440 }, - { url = "https://files.pythonhosted.org/packages/61/20/0035a288deaff05397d6cc0145b33f3dd2429b99cdc880de4c5eca41ca72/simplejson-3.19.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8efb03ca77bd7725dfacc9254df00d73e6f43013cf39bd37ef1a8ed0ebb5165", size = 137949 }, - { url = "https://files.pythonhosted.org/packages/5d/de/5b03fafe3003e32d179588953d38183af6c3747e95c7dcc668c4f9eb886a/simplejson-3.19.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:add8850db04b98507a8b62d248a326ecc8561e6d24336d1ca5c605bbfaab4cad", size = 139992 }, - { url = "https://files.pythonhosted.org/packages/d1/ce/e493116ff49fd215f7baa25195b8f684c91e65c153e2a57e04dc3f3a466b/simplejson-3.19.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fc3dc9fb413fc34c396f52f4c87de18d0bd5023804afa8ab5cc224deeb6a9900", size = 140320 }, - { url = "https://files.pythonhosted.org/packages/86/f3/a18b98a7a27548829f672754dd3940fb637a27981399838128d3e560087f/simplejson-3.19.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dfa420bb9225dd33b6efdabde7c6a671b51150b9b1d9c4e5cd74d3b420b3fe1", size = 148625 }, - { url = "https://files.pythonhosted.org/packages/0f/55/d3da33ee3e708133da079b9d537693d7fef281e6f0d27921cc7e5b3ec523/simplejson-3.19.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7b5c472099b39b274dcde27f1113db8d818c9aa3ba8f78cbb8ad04a4c1ac2118", size = 141287 }, - { url = "https://files.pythonhosted.org/packages/17/e8/56184ab4d66bb64a6ff569f069b3796dfd943f9b961268fe0d403526fc17/simplejson-3.19.3-cp310-cp310-win32.whl", hash = "sha256:817abad79241ed4a507b3caf4d3f2be5079f39d35d4c550a061988986bffd2ec", size = 74143 }, - { url = "https://files.pythonhosted.org/packages/be/8f/a0089eff060f10a925f08b0a0f50854321484f1ac54b1895bbf4c9213dfe/simplejson-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:dd5b9b1783e14803e362a558680d88939e830db2466f3fa22df5c9319f8eea94", size = 75643 }, - { url = "https://files.pythonhosted.org/packages/8c/bb/9ee3959e6929d228cf669b3f13f0edd43c5261b6cd69598640748b19ca35/simplejson-3.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e88abff510dcff903a18d11c2a75f9964e768d99c8d147839913886144b2065e", size = 91930 }, - { url = "https://files.pythonhosted.org/packages/ac/ae/a06523928af3a6783e2638cd4f6035c3e32de1c1063d563d9060c8d2f1ad/simplejson-3.19.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:934a50a614fb831614db5dbfba35127ee277624dda4d15895c957d2f5d48610c", size = 74787 }, - { url = "https://files.pythonhosted.org/packages/c3/58/fea732e48a7540035fe46d39e6fd77679f5810311d31da8661ce7a18210a/simplejson-3.19.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:212fce86a22188b0c7f53533b0f693ea9605c1a0f02c84c475a30616f55a744d", size = 74612 }, - { url = "https://files.pythonhosted.org/packages/ab/4d/15718f20cb0e3875b8af9597d6bb3bfbcf1383834b82b6385ee9ac0b72a9/simplejson-3.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d9e8f836688a8fabe6a6b41b334aa550a6823f7b4ac3d3712fc0ad8655be9a8", size = 143550 }, - { url = "https://files.pythonhosted.org/packages/93/44/815a4343774760f7a82459c8f6a4d8268b4b6d23f81e7b922a5e2ca79171/simplejson-3.19.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23228037dc5d41c36666384062904d74409a62f52283d9858fa12f4c22cffad1", size = 153284 }, - { url = "https://files.pythonhosted.org/packages/9d/52/d3202d9bba95444090d1c98e43da3c10907875babf63ed3c134d1b9437e3/simplejson-3.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0791f64fed7d4abad639491f8a6b1ba56d3c604eb94b50f8697359b92d983f36", size = 141518 }, - { url = "https://files.pythonhosted.org/packages/b7/d4/850948bcbcfe0b4a6c69dfde10e245d3a1ea45252f16a1e2308a3b06b1da/simplejson-3.19.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f614581b61a26fbbba232a1391f6cee82bc26f2abbb6a0b44a9bba25c56a1c", size = 144688 }, - { url = "https://files.pythonhosted.org/packages/58/d2/b8dcb0a07d9cd54c47f9fe8733dbb83891d1efe4fc786d9dfc8781cc04f9/simplejson-3.19.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1df0aaf1cb787fdf34484ed4a1f0c545efd8811f6028623290fef1a53694e597", size = 144534 }, - { url = "https://files.pythonhosted.org/packages/a9/95/1e92d99039041f596e0923ec4f9153244acaf3830944dc69a7c11b23ceaa/simplejson-3.19.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:951095be8d4451a7182403354c22ec2de3e513e0cc40408b689af08d02611588", size = 146565 }, - { url = "https://files.pythonhosted.org/packages/21/04/c96aeb3a74031255e4cbcc0ca1b6ebfb5549902f0a065f06d65ce8447c0c/simplejson-3.19.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a954b30810988feeabde843e3263bf187697e0eb5037396276db3612434049b", size = 155014 }, - { url = "https://files.pythonhosted.org/packages/b7/41/e28a28593afc4a75d8999d057bfb7c73a103e35f927e66f4bb92571787ae/simplejson-3.19.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c40df31a75de98db2cdfead6074d4449cd009e79f54c1ebe5e5f1f153c68ad20", size = 148092 }, - { url = "https://files.pythonhosted.org/packages/2b/82/1c81a3af06f937afb6d2e9d74a465c0e0ae6db444d1bf2a436ea26de1965/simplejson-3.19.3-cp311-cp311-win32.whl", hash = "sha256:7e2a098c21ad8924076a12b6c178965d88a0ad75d1de67e1afa0a66878f277a5", size = 73942 }, - { url = "https://files.pythonhosted.org/packages/65/be/d8ab9717f471be3c114f16abd8be21d9a6a0a09b9b49177d93d64d3717d9/simplejson-3.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:c9bedebdc5fdad48af8783022bae307746d54006b783007d1d3c38e10872a2c6", size = 75469 }, - { url = "https://files.pythonhosted.org/packages/20/15/513fea93fafbdd4993eacfcb762965b2ff3d29e618c029e2956174d68c4b/simplejson-3.19.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:66a0399e21c2112acacfebf3d832ebe2884f823b1c7e6d1363f2944f1db31a99", size = 92921 }, - { url = "https://files.pythonhosted.org/packages/a4/4f/998a907ae1a6c104dc0ee48aa248c2478490152808d34d8e07af57f396c3/simplejson-3.19.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6ef9383c5e05f445be60f1735c1816163c874c0b1ede8bb4390aff2ced34f333", size = 75311 }, - { url = "https://files.pythonhosted.org/packages/db/44/acd6122201e927451869d45952b9ab1d3025cdb5e61548d286d08fbccc08/simplejson-3.19.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:42e5acf80d4d971238d4df97811286a044d720693092b20a56d5e56b7dcc5d09", size = 74964 }, - { url = "https://files.pythonhosted.org/packages/27/ca/d0a1e8f16e1bbdc0b8c6d88166f45f565ed7285f53928cfef3b6ce78f14d/simplejson-3.19.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0b0efc7279d768db7c74d3d07f0b5c81280d16ae3fb14e9081dc903e8360771", size = 150106 }, - { url = "https://files.pythonhosted.org/packages/63/59/0554b78cf26c98e2b9cae3f44723bd72c2394e2afec1a14eedc6211f7187/simplejson-3.19.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0552eb06e7234da892e1d02365cd2b7b2b1f8233aa5aabdb2981587b7cc92ea0", size = 158347 }, - { url = "https://files.pythonhosted.org/packages/b2/fe/9f30890352e431e8508cc569912d3322147d3e7e4f321e48c0adfcb4c97d/simplejson-3.19.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf6a3b9a7d7191471b464fe38f684df10eb491ec9ea454003edb45a011ab187", size = 148456 }, - { url = "https://files.pythonhosted.org/packages/37/e3/663a09542ee021d4131162f7a164cb2e7f04ef48433a67591738afbf12ea/simplejson-3.19.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7017329ca8d4dca94ad5e59f496e5fc77630aecfc39df381ffc1d37fb6b25832", size = 152190 }, - { url = "https://files.pythonhosted.org/packages/31/20/4e0c4d35e10ff6465003bec304316d822a559a1c38c66ef6892ca199c207/simplejson-3.19.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:67a20641afebf4cfbcff50061f07daad1eace6e7b31d7622b6fa2c40d43900ba", size = 149846 }, - { url = "https://files.pythonhosted.org/packages/08/7a/46e2e072cac3987cbb05946f25167f0ad2fe536748e7405953fd6661a486/simplejson-3.19.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:dd6a7dabcc4c32daf601bc45e01b79175dde4b52548becea4f9545b0a4428169", size = 151714 }, - { url = "https://files.pythonhosted.org/packages/7f/7d/dbeeac10eb61d5d8858d0bb51121a21050d281dc83af4c557f86da28746c/simplejson-3.19.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:08f9b443a94e72dd02c87098c96886d35790e79e46b24e67accafbf13b73d43b", size = 158777 }, - { url = "https://files.pythonhosted.org/packages/fc/8f/a98bdbb799c6a4a884b5823db31785a96ba895b4b0f4d8ac345d6fe98bbf/simplejson-3.19.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa97278ae6614346b5ca41a45a911f37a3261b57dbe4a00602048652c862c28b", size = 154230 }, - { url = "https://files.pythonhosted.org/packages/b1/db/852eebceb85f969ae40e06babed1a93d3bacb536f187d7a80ff5823a5979/simplejson-3.19.3-cp312-cp312-win32.whl", hash = "sha256:ef28c3b328d29b5e2756903aed888960bc5df39b4c2eab157ae212f70ed5bf74", size = 74002 }, - { url = "https://files.pythonhosted.org/packages/fe/68/9f0e5df0651cb79ef83cba1378765a00ee8038e6201cc82b8e7178a7778e/simplejson-3.19.3-cp312-cp312-win_amd64.whl", hash = "sha256:1e662336db50ad665777e6548b5076329a94a0c3d4a0472971c588b3ef27de3a", size = 75596 }, - { url = "https://files.pythonhosted.org/packages/93/3a/5896821ed543899fcb9c4256c7e71bb110048047349a00f42bc8b8fb379f/simplejson-3.19.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0959e6cb62e3994b5a40e31047ff97ef5c4138875fae31659bead691bed55896", size = 92931 }, - { url = "https://files.pythonhosted.org/packages/39/15/5d33d269440912ee40d856db0c8be2b91aba7a219690ab01f86cb0edd590/simplejson-3.19.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7a7bfad839c624e139a4863007233a3f194e7c51551081f9789cba52e4da5167", size = 75318 }, - { url = "https://files.pythonhosted.org/packages/2a/8d/2e7483a2bf7ec53acf7e012bafbda79d7b34f90471dda8e424544a59d484/simplejson-3.19.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afab2f7f2486a866ff04d6d905e9386ca6a231379181a3838abce1f32fbdcc37", size = 74971 }, - { url = "https://files.pythonhosted.org/packages/4d/9d/9bdf34437c8834a7cf7246f85e9d5122e30579f512c10a0c2560e994294f/simplejson-3.19.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00313681015ac498e1736b304446ee6d1c72c5b287cd196996dad84369998f7", size = 150112 }, - { url = "https://files.pythonhosted.org/packages/a7/e2/1f2ae2d89eaf85f6163c82150180aae5eaa18085cfaf892f8a57d4c51cbd/simplejson-3.19.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d936ae682d5b878af9d9eb4d8bb1fdd5e41275c8eb59ceddb0aeed857bb264a2", size = 158354 }, - { url = "https://files.pythonhosted.org/packages/60/83/26f610adf234c8492b3f30501e12f2271e67790f946c6898fe0c58aefe99/simplejson-3.19.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c6657485393f2e9b8177c77a7634f13ebe70d5e6de150aae1677d91516ce6b", size = 148455 }, - { url = "https://files.pythonhosted.org/packages/b5/4b/109af50006af77133653c55b5b91b4bd2d579ff8254ce11216c0b75f911b/simplejson-3.19.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a6a750d3c7461b1c47cfc6bba8d9e57a455e7c5f80057d2a82f738040dd1129", size = 152191 }, - { url = "https://files.pythonhosted.org/packages/75/dc/108872a8825cbd99ae6f4334e0490ff1580367baf12198bcaf988f6820ba/simplejson-3.19.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ea7a4a998c87c5674a27089e022110a1a08a7753f21af3baf09efe9915c23c3c", size = 149954 }, - { url = "https://files.pythonhosted.org/packages/eb/be/deec1d947a5d0472276ab4a4d1a9378dc5ee27f3dc9e54d4f62ffbad7a08/simplejson-3.19.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6300680d83a399be2b8f3b0ef7ef90b35d2a29fe6e9c21438097e0938bbc1564", size = 151812 }, - { url = "https://files.pythonhosted.org/packages/e9/58/4ee130702d36b1551ef66e7587eefe56651f3669255bf748cd71691e2434/simplejson-3.19.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ab69f811a660c362651ae395eba8ce84f84c944cea0df5718ea0ba9d1e4e7252", size = 158880 }, - { url = "https://files.pythonhosted.org/packages/0f/e1/59cc6a371b60f89e3498d9f4c8109f6b7359094d453f5fe80b2677b777b0/simplejson-3.19.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:256e09d0f94d9c3d177d9e95fd27a68c875a4baa2046633df387b86b652f5747", size = 154344 }, - { url = "https://files.pythonhosted.org/packages/79/45/1b36044670016f5cb25ebd92497427d2d1711ecb454d00f71eb9a00b77cc/simplejson-3.19.3-cp313-cp313-win32.whl", hash = "sha256:2c78293470313aefa9cfc5e3f75ca0635721fb016fb1121c1c5b0cb8cc74712a", size = 74002 }, - { url = "https://files.pythonhosted.org/packages/e2/58/b06226e6b0612f2b1fa13d5273551da259f894566b1eef32249ddfdcce44/simplejson-3.19.3-cp313-cp313-win_amd64.whl", hash = "sha256:3bbcdc438dc1683b35f7a8dc100960c721f922f9ede8127f63bed7dfded4c64c", size = 75599 }, - { url = "https://files.pythonhosted.org/packages/9a/3d/e7f1caf7fa8c004c30e2c0595a22646a178344a7f53924c11c3d263a8623/simplejson-3.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b5587feda2b65a79da985ae6d116daf6428bf7489992badc29fc96d16cd27b05", size = 93646 }, - { url = "https://files.pythonhosted.org/packages/01/40/ff5cae1b4ff35c7822456ad7d098371d697479d418194064b8aff8142d70/simplejson-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e0d2b00ecbcd1a3c5ea1abc8bb99a26508f758c1759fd01c3be482a3655a176f", size = 75544 }, - { url = "https://files.pythonhosted.org/packages/56/a8/dbe799f3620a08337ff5f3be27df7b5ba5beb1ee06acaf75f3cb46f8d650/simplejson-3.19.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:32a3ada8f3ea41db35e6d37b86dade03760f804628ec22e4fe775b703d567426", size = 75593 }, - { url = "https://files.pythonhosted.org/packages/d5/53/6ed299b9201ea914bb6a178a7e65413ed1969981533f50bfbe8a215be98f/simplejson-3.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f455672f4738b0f47183c5896e3606cd65c9ddee3805a4d18e8c96aa3f47c84", size = 138077 }, - { url = "https://files.pythonhosted.org/packages/1c/73/14306559157a6faedb4ecae28ad907b64b5359be5c9ec79233546acb96a4/simplejson-3.19.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b737a5fefedb8333fa50b8db3dcc9b1d18fd6c598f89fa7debff8b46bf4e511", size = 146307 }, - { url = "https://files.pythonhosted.org/packages/5b/1a/7994abb33e53ec972dd5e6dbb337b9070d3ad96017c4cff9d5dc83678ad4/simplejson-3.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb47ee773ce67476a960e2db4a0a906680c54f662521550828c0cc57d0099426", size = 133922 }, - { url = "https://files.pythonhosted.org/packages/08/15/8b4e1a8c7729b37797d0eab1381f517f928bd323d17efa7f4414c3565e1f/simplejson-3.19.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eed8cd98a7b24861da9d3d937f5fbfb6657350c547528a117297fe49e3960667", size = 137367 }, - { url = "https://files.pythonhosted.org/packages/59/9a/f5b786fe611395564d3e84f58f668242a7a2e674b4fac71b4e6b21d6d2b7/simplejson-3.19.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:619756f1dd634b5bdf57d9a3914300526c3b348188a765e45b8b08eabef0c94e", size = 139513 }, - { url = "https://files.pythonhosted.org/packages/4d/87/c310daf5e2f10306de3720f075f8ed74cbe83396879b8c55e832393233a5/simplejson-3.19.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dd7230d061e755d60a4d5445bae854afe33444cdb182f3815cff26ac9fb29a15", size = 139749 }, - { url = "https://files.pythonhosted.org/packages/fd/89/690880e1639b421a919d36fadf1fc364a38c3bc4f208dc11627426cdbe98/simplejson-3.19.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:101a3c8392028cd704a93c7cba8926594e775ca3c91e0bee82144e34190903f1", size = 148103 }, - { url = "https://files.pythonhosted.org/packages/a3/31/ef13eda5b5a0d8d9555b70151ee2956f63b845e1fac4ff904339dfb4dd89/simplejson-3.19.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e557712fc79f251673aeb3fad3501d7d4da3a27eff0857af2e1d1afbbcf6685", size = 140740 }, - { url = "https://files.pythonhosted.org/packages/39/5f/26b0a036592e45a2cb4be2f53d8827257e169bd5c84744a1aac89b0ff56f/simplejson-3.19.3-cp39-cp39-win32.whl", hash = "sha256:0bc5544e3128891bf613b9f71813ee2ec9c11574806f74dd8bb84e5e95bf64a2", size = 74115 }, - { url = "https://files.pythonhosted.org/packages/32/06/a35e2e1d8850aff1cf1320d4887bd5f97921c8964a1e260983d38d5d6c17/simplejson-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:06662392e4913dc8846d6a71a6d5de86db5fba244831abe1dd741d62a4136764", size = 75636 }, - { url = "https://files.pythonhosted.org/packages/0d/e7/f9fafbd4f39793a20cc52e77bbd766f7384312526d402c382928dc7667f6/simplejson-3.19.3-py3-none-any.whl", hash = "sha256:49cc4c7b940d43bd12bf87ec63f28cbc4964fc4e12c031cc8cd01650f43eb94e", size = 57004 }, +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/92/51b417685abd96b31308b61b9acce7ec50d8e1de8fbc39a7fd4962c60689/simplejson-3.20.1.tar.gz", hash = "sha256:e64139b4ec4f1f24c142ff7dcafe55a22b811a74d86d66560c8815687143037d", size = 85591, upload-time = "2025-02-15T05:18:53.15Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/c4/627214fb418cd4a17fb0230ff0b6c3bb4a85cbb48dd69c85dcc3b85df828/simplejson-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e580aa65d5f6c3bf41b9b4afe74be5d5ddba9576701c107c772d936ea2b5043a", size = 93790, upload-time = "2025-02-15T05:15:32.954Z" }, + { url = "https://files.pythonhosted.org/packages/15/ca/56a6a2a33cbcf330c4d71af3f827c47e4e0ba791e78f2642f3d1ab02ff31/simplejson-3.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a586ce4f78cec11f22fe55c5bee0f067e803aab9bad3441afe2181693b5ebb5", size = 75707, upload-time = "2025-02-15T05:15:34.954Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c8/3d92b67e03a3b6207d97202669f9454ed700b35ade9bd4428265a078fb6c/simplejson-3.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74a1608f9e6e8c27a4008d70a54270868306d80ed48c9df7872f9f4b8ac87808", size = 75700, upload-time = "2025-02-15T05:15:37.144Z" }, + { url = "https://files.pythonhosted.org/packages/74/30/20001219d6fdca4aaa3974c96dfb6955a766b4e2cc950505a5b51fd050b0/simplejson-3.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03db8cb64154189a92a7786209f24e391644f3a3fa335658be2df2af1960b8d8", size = 138672, upload-time = "2025-02-15T05:15:38.547Z" }, + { url = "https://files.pythonhosted.org/packages/21/47/50157810876c2a7ebbd6e6346ec25eda841fe061fecaa02538a7742a3d2a/simplejson-3.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eea7e2b7d858f6fdfbf0fe3cb846d6bd8a45446865bc09960e51f3d473c2271b", size = 146616, upload-time = "2025-02-15T05:15:39.871Z" }, + { url = "https://files.pythonhosted.org/packages/95/60/8c97cdc93096437b0aca2745aca63c880fe2315fd7f6a6ce6edbb344a2ae/simplejson-3.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e66712b17d8425bb7ff8968d4c7c7fd5a2dd7bd63728b28356223c000dd2f91f", size = 134344, upload-time = "2025-02-15T05:15:42.091Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9e/da184f0e9bb3a5d7ffcde713bd41b4fe46cca56b6f24d9bd155fac56805a/simplejson-3.20.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2cc4f6486f9f515b62f5831ff1888886619b84fc837de68f26d919ba7bbdcbc", size = 138017, upload-time = "2025-02-15T05:15:43.542Z" }, + { url = "https://files.pythonhosted.org/packages/31/db/00d1a8d9b036db98f678c8a3c69ed17d2894d1768d7a00576e787ad3e546/simplejson-3.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3c2df555ee4016148fa192e2b9cd9e60bc1d40769366134882685e90aee2a1e", size = 140118, upload-time = "2025-02-15T05:15:45.7Z" }, + { url = "https://files.pythonhosted.org/packages/52/21/57fc47eab8c1c73390b933a5ba9271f08e3e1ec83162c580357f28f5b97c/simplejson-3.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:78520f04b7548a5e476b5396c0847e066f1e0a4c0c5e920da1ad65e95f410b11", size = 140314, upload-time = "2025-02-15T05:16:07.949Z" }, + { url = "https://files.pythonhosted.org/packages/ad/cc/7cfd78d1e0fa5e57350b98cfe77353b6dfa13dce21afa4060e1019223852/simplejson-3.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f4bd49ecde87b0fe9f55cc971449a32832bca9910821f7072bbfae1155eaa007", size = 148544, upload-time = "2025-02-15T05:16:09.455Z" }, + { url = "https://files.pythonhosted.org/packages/63/26/1c894a1c2bd95dc8be0cf5a2fa73b0d173105b6ca18c90cb981ff10443d0/simplejson-3.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7eaae2b88eb5da53caaffdfa50e2e12022553949b88c0df4f9a9663609373f72", size = 141172, upload-time = "2025-02-15T05:16:10.966Z" }, + { url = "https://files.pythonhosted.org/packages/93/27/0717dccc10cd9988dbf1314def52ab32678a95a95328bb37cafacf499400/simplejson-3.20.1-cp310-cp310-win32.whl", hash = "sha256:e836fb88902799eac8debc2b642300748f4860a197fa3d9ea502112b6bb8e142", size = 74181, upload-time = "2025-02-15T05:16:12.361Z" }, + { url = "https://files.pythonhosted.org/packages/5f/af/593f896573f306519332d4287b1ab8b7b888c239bbd5159f7054d7055c2d/simplejson-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a19b552b212fc3b5b96fc5ce92333d4a9ac0a800803e1f17ebb16dac4be5", size = 75738, upload-time = "2025-02-15T05:16:14.438Z" }, + { url = "https://files.pythonhosted.org/packages/76/59/74bc90d1c051bc2432c96b34bd4e8036875ab58b4fcbe4d6a5a76985f853/simplejson-3.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:325b8c107253d3217e89d7b50c71015b5b31e2433e6c5bf38967b2f80630a8ca", size = 92132, upload-time = "2025-02-15T05:16:15.743Z" }, + { url = "https://files.pythonhosted.org/packages/71/c7/1970916e0c51794fff89f76da2f632aaf0b259b87753c88a8c409623d3e1/simplejson-3.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88a7baa8211089b9e58d78fbc1b0b322103f3f3d459ff16f03a36cece0d0fcf0", size = 74956, upload-time = "2025-02-15T05:16:17.062Z" }, + { url = "https://files.pythonhosted.org/packages/c8/0d/98cc5909180463f1d75fac7180de62d4cdb4e82c4fef276b9e591979372c/simplejson-3.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:299b1007b8101d50d95bc0db1bf5c38dc372e85b504cf77f596462083ee77e3f", size = 74772, upload-time = "2025-02-15T05:16:19.204Z" }, + { url = "https://files.pythonhosted.org/packages/e1/94/a30a5211a90d67725a3e8fcc1c788189f2ae2ed2b96b63ed15d0b7f5d6bb/simplejson-3.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ec618ed65caab48e81e3ed29586236a8e57daef792f1f3bb59504a7e98cd10", size = 143575, upload-time = "2025-02-15T05:16:21.337Z" }, + { url = "https://files.pythonhosted.org/packages/ee/08/cdb6821f1058eb5db46d252de69ff7e6c53f05f1bae6368fe20d5b51d37e/simplejson-3.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2cdead1d3197f0ff43373cf4730213420523ba48697743e135e26f3d179f38", size = 153241, upload-time = "2025-02-15T05:16:22.859Z" }, + { url = "https://files.pythonhosted.org/packages/4c/2d/ca3caeea0bdc5efc5503d5f57a2dfb56804898fb196dfada121323ee0ccb/simplejson-3.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3466d2839fdc83e1af42e07b90bc8ff361c4e8796cd66722a40ba14e458faddd", size = 141500, upload-time = "2025-02-15T05:16:25.068Z" }, + { url = "https://files.pythonhosted.org/packages/e1/33/d3e0779d5c58245e7370c98eb969275af6b7a4a5aec3b97cbf85f09ad328/simplejson-3.20.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d492ed8e92f3a9f9be829205f44b1d0a89af6582f0cf43e0d129fa477b93fe0c", size = 144757, upload-time = "2025-02-15T05:16:28.301Z" }, + { url = "https://files.pythonhosted.org/packages/54/53/2d93128bb55861b2fa36c5944f38da51a0bc6d83e513afc6f7838440dd15/simplejson-3.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f924b485537b640dc69434565463fd6fc0c68c65a8c6e01a823dd26c9983cf79", size = 144409, upload-time = "2025-02-15T05:16:29.687Z" }, + { url = "https://files.pythonhosted.org/packages/99/4c/dac310a98f897ad3435b4bdc836d92e78f09e38c5dbf28211ed21dc59fa2/simplejson-3.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e8eacf6a3491bf76ea91a8d46726368a6be0eb94993f60b8583550baae9439e", size = 146082, upload-time = "2025-02-15T05:16:31.064Z" }, + { url = "https://files.pythonhosted.org/packages/ee/22/d7ba958cfed39827335b82656b1c46f89678faecda9a7677b47e87b48ee6/simplejson-3.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d34d04bf90b4cea7c22d8b19091633908f14a096caa301b24c2f3d85b5068fb8", size = 154339, upload-time = "2025-02-15T05:16:32.719Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c8/b072b741129406a7086a0799c6f5d13096231bf35fdd87a0cffa789687fc/simplejson-3.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69dd28d4ce38390ea4aaf212902712c0fd1093dc4c1ff67e09687c3c3e15a749", size = 147915, upload-time = "2025-02-15T05:16:34.291Z" }, + { url = "https://files.pythonhosted.org/packages/6c/46/8347e61e9cf3db5342a42f7fd30a81b4f5cf85977f916852d7674a540907/simplejson-3.20.1-cp311-cp311-win32.whl", hash = "sha256:dfe7a9da5fd2a3499436cd350f31539e0a6ded5da6b5b3d422df016444d65e43", size = 73972, upload-time = "2025-02-15T05:16:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/01/85/b52f24859237b4e9d523d5655796d911ba3d46e242eb1959c45b6af5aedd/simplejson-3.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:896a6c04d7861d507d800da7642479c3547060bf97419d9ef73d98ced8258766", size = 75595, upload-time = "2025-02-15T05:16:36.957Z" }, + { url = "https://files.pythonhosted.org/packages/8d/eb/34c16a1ac9ba265d024dc977ad84e1659d931c0a700967c3e59a98ed7514/simplejson-3.20.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f31c4a3a7ab18467ee73a27f3e59158255d1520f3aad74315edde7a940f1be23", size = 93100, upload-time = "2025-02-15T05:16:38.801Z" }, + { url = "https://files.pythonhosted.org/packages/41/fc/2c2c007d135894971e6814e7c0806936e5bade28f8db4dd7e2a58b50debd/simplejson-3.20.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884e6183d16b725e113b83a6fc0230152ab6627d4d36cb05c89c2c5bccfa7bc6", size = 75464, upload-time = "2025-02-15T05:16:40.905Z" }, + { url = "https://files.pythonhosted.org/packages/0f/05/2b5ecb33b776c34bb5cace5de5d7669f9b60e3ca13c113037b2ca86edfbd/simplejson-3.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d7a426e416fe0d3337115f04164cd9427eb4256e843a6b8751cacf70abc832", size = 75112, upload-time = "2025-02-15T05:16:42.246Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/1f3609a2792f06cd4b71030485f78e91eb09cfd57bebf3116bf2980a8bac/simplejson-3.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000602141d0bddfcff60ea6a6e97d5e10c9db6b17fd2d6c66199fa481b6214bb", size = 150182, upload-time = "2025-02-15T05:16:43.557Z" }, + { url = "https://files.pythonhosted.org/packages/2f/b0/053fbda38b8b602a77a4f7829def1b4f316cd8deb5440a6d3ee90790d2a4/simplejson-3.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af8377a8af78226e82e3a4349efdde59ffa421ae88be67e18cef915e4023a595", size = 158363, upload-time = "2025-02-15T05:16:45.748Z" }, + { url = "https://files.pythonhosted.org/packages/d1/4b/2eb84ae867539a80822e92f9be4a7200dffba609275faf99b24141839110/simplejson-3.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c7de4c88ab2fbcb8781a3b982ef883696736134e20b1210bca43fb42ff1acf", size = 148415, upload-time = "2025-02-15T05:16:47.861Z" }, + { url = "https://files.pythonhosted.org/packages/e0/bd/400b0bd372a5666addf2540c7358bfc3841b9ce5cdbc5cc4ad2f61627ad8/simplejson-3.20.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:455a882ff3f97d810709f7b620007d4e0aca8da71d06fc5c18ba11daf1c4df49", size = 152213, upload-time = "2025-02-15T05:16:49.25Z" }, + { url = "https://files.pythonhosted.org/packages/50/12/143f447bf6a827ee9472693768dc1a5eb96154f8feb140a88ce6973a3cfa/simplejson-3.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fc0f523ce923e7f38eb67804bc80e0a028c76d7868500aa3f59225574b5d0453", size = 150048, upload-time = "2025-02-15T05:16:51.5Z" }, + { url = "https://files.pythonhosted.org/packages/5e/ea/dd9b3e8e8ed710a66f24a22c16a907c9b539b6f5f45fd8586bd5c231444e/simplejson-3.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76461ec929282dde4a08061071a47281ad939d0202dc4e63cdd135844e162fbc", size = 151668, upload-time = "2025-02-15T05:16:53Z" }, + { url = "https://files.pythonhosted.org/packages/99/af/ee52a8045426a0c5b89d755a5a70cc821815ef3c333b56fbcad33c4435c0/simplejson-3.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19c2da8c043607bde4d4ef3a6b633e668a7d2e3d56f40a476a74c5ea71949f", size = 158840, upload-time = "2025-02-15T05:16:54.851Z" }, + { url = "https://files.pythonhosted.org/packages/68/db/ab32869acea6b5de7d75fa0dac07a112ded795d41eaa7e66c7813b17be95/simplejson-3.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2578bedaedf6294415197b267d4ef678fea336dd78ee2a6d2f4b028e9d07be3", size = 154212, upload-time = "2025-02-15T05:16:56.318Z" }, + { url = "https://files.pythonhosted.org/packages/fa/7a/e3132d454977d75a3bf9a6d541d730f76462ebf42a96fea2621498166f41/simplejson-3.20.1-cp312-cp312-win32.whl", hash = "sha256:339f407373325a36b7fd744b688ba5bae0666b5d340ec6d98aebc3014bf3d8ea", size = 74101, upload-time = "2025-02-15T05:16:57.746Z" }, + { url = "https://files.pythonhosted.org/packages/bc/5d/4e243e937fa3560107c69f6f7c2eed8589163f5ed14324e864871daa2dd9/simplejson-3.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:627d4486a1ea7edf1f66bb044ace1ce6b4c1698acd1b05353c97ba4864ea2e17", size = 75736, upload-time = "2025-02-15T05:16:59.017Z" }, + { url = "https://files.pythonhosted.org/packages/c4/03/0f453a27877cb5a5fff16a975925f4119102cc8552f52536b9a98ef0431e/simplejson-3.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:71e849e7ceb2178344998cbe5ade101f1b329460243c79c27fbfc51c0447a7c3", size = 93109, upload-time = "2025-02-15T05:17:00.377Z" }, + { url = "https://files.pythonhosted.org/packages/74/1f/a729f4026850cabeaff23e134646c3f455e86925d2533463420635ae54de/simplejson-3.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b63fdbab29dc3868d6f009a59797cefaba315fd43cd32ddd998ee1da28e50e29", size = 75475, upload-time = "2025-02-15T05:17:02.544Z" }, + { url = "https://files.pythonhosted.org/packages/e2/14/50a2713fee8ff1f8d655b1a14f4a0f1c0c7246768a1b3b3d12964a4ed5aa/simplejson-3.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1190f9a3ce644fd50ec277ac4a98c0517f532cfebdcc4bd975c0979a9f05e1fb", size = 75112, upload-time = "2025-02-15T05:17:03.875Z" }, + { url = "https://files.pythonhosted.org/packages/45/86/ea9835abb646755140e2d482edc9bc1e91997ed19a59fd77ae4c6a0facea/simplejson-3.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1336ba7bcb722ad487cd265701ff0583c0bb6de638364ca947bb84ecc0015d1", size = 150245, upload-time = "2025-02-15T05:17:06.899Z" }, + { url = "https://files.pythonhosted.org/packages/12/b4/53084809faede45da829fe571c65fbda8479d2a5b9c633f46b74124d56f5/simplejson-3.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e975aac6a5acd8b510eba58d5591e10a03e3d16c1cf8a8624ca177491f7230f0", size = 158465, upload-time = "2025-02-15T05:17:08.707Z" }, + { url = "https://files.pythonhosted.org/packages/a9/7d/d56579468d1660b3841e1f21c14490d103e33cf911886b22652d6e9683ec/simplejson-3.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a6dd11ee282937ad749da6f3b8d87952ad585b26e5edfa10da3ae2536c73078", size = 148514, upload-time = "2025-02-15T05:17:11.323Z" }, + { url = "https://files.pythonhosted.org/packages/19/e3/874b1cca3d3897b486d3afdccc475eb3a09815bf1015b01cf7fcb52a55f0/simplejson-3.20.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab980fcc446ab87ea0879edad41a5c28f2d86020014eb035cf5161e8de4474c6", size = 152262, upload-time = "2025-02-15T05:17:13.543Z" }, + { url = "https://files.pythonhosted.org/packages/32/84/f0fdb3625292d945c2bd13a814584603aebdb38cfbe5fe9be6b46fe598c4/simplejson-3.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f5aee2a4cb6b146bd17333ac623610f069f34e8f31d2f4f0c1a2186e50c594f0", size = 150164, upload-time = "2025-02-15T05:17:15.021Z" }, + { url = "https://files.pythonhosted.org/packages/95/51/6d625247224f01eaaeabace9aec75ac5603a42f8ebcce02c486fbda8b428/simplejson-3.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:652d8eecbb9a3b6461b21ec7cf11fd0acbab144e45e600c817ecf18e4580b99e", size = 151795, upload-time = "2025-02-15T05:17:16.542Z" }, + { url = "https://files.pythonhosted.org/packages/7f/d9/bb921df6b35be8412f519e58e86d1060fddf3ad401b783e4862e0a74c4c1/simplejson-3.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c09948f1a486a89251ee3a67c9f8c969b379f6ffff1a6064b41fea3bce0a112", size = 159027, upload-time = "2025-02-15T05:17:18.083Z" }, + { url = "https://files.pythonhosted.org/packages/03/c5/5950605e4ad023a6621cf4c931b29fd3d2a9c1f36be937230bfc83d7271d/simplejson-3.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd7b215ad4fc6f058b5dd4c26ee5c59f72e031dfda3ac183d7968a99e4ca3a", size = 154380, upload-time = "2025-02-15T05:17:20.334Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/b74149557c5ec1e4e4d55758bda426f5d2ec0123cd01a53ae63b8de51fa3/simplejson-3.20.1-cp313-cp313-win32.whl", hash = "sha256:ae81e482476eaa088ef9d0120ae5345de924f23962c0c1e20abbdff597631f87", size = 74102, upload-time = "2025-02-15T05:17:22.475Z" }, + { url = "https://files.pythonhosted.org/packages/db/a9/25282fdd24493e1022f30b7f5cdf804255c007218b2bfaa655bd7ad34b2d/simplejson-3.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:1b9fd15853b90aec3b1739f4471efbf1ac05066a2c7041bf8db821bb73cd2ddc", size = 75736, upload-time = "2025-02-15T05:17:24.122Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ba/d32fe890a5edaf4a8518adf043bccf7866b600123f512a6de0988cf36810/simplejson-3.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a8011f1dd1d676befcd4d675ebdbfdbbefd3bf350052b956ba8c699fca7d8cef", size = 93773, upload-time = "2025-02-15T05:18:28.231Z" }, + { url = "https://files.pythonhosted.org/packages/48/c7/361e7f6695b56001a04e0a5cc623cd6c82ea2f45e872e61213e405cc8a24/simplejson-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e91703a4c5fec53e36875ae426ad785f4120bd1d93b65bed4752eeccd1789e0c", size = 75697, upload-time = "2025-02-15T05:18:30.006Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2f/d0ff0b772d4ef092876eb85c99bc591c446b0502715551dad7dfc7f7c2c0/simplejson-3.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e39eaa57c7757daa25bcd21f976c46be443b73dd6c3da47fe5ce7b7048ccefe2", size = 75692, upload-time = "2025-02-15T05:18:31.424Z" }, + { url = "https://files.pythonhosted.org/packages/26/94/cab4db9530b6ca9d62f16a260e8311b04130ccd670dab75e958fcb44590e/simplejson-3.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceab2ce2acdc7fbaa433a93006758db6ba9a659e80c4faa13b80b9d2318e9b17", size = 138106, upload-time = "2025-02-15T05:18:32.907Z" }, + { url = "https://files.pythonhosted.org/packages/40/22/11c0f746bdb44c297cea8a37d8f7ccb75ea6681132aadfb9f820d9a52647/simplejson-3.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d4f320c33277a5b715db5bf5b10dae10c19076bd6d66c2843e04bd12d1f1ea5", size = 146242, upload-time = "2025-02-15T05:18:35.223Z" }, + { url = "https://files.pythonhosted.org/packages/78/e9/b7c4c26f29b41cc41ba5f0224c47adbfa7f28427418edfd58ab122f3b584/simplejson-3.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6436c48e64378fa844d8c9e58a5ed0352bbcfd4028369a9b46679b7ab79d2d", size = 133866, upload-time = "2025-02-15T05:18:36.998Z" }, + { url = "https://files.pythonhosted.org/packages/09/68/1e81ed83f38906c8859f2b973afb19302357d6003e724a6105cee0f61ec7/simplejson-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e18345c8dda5d699be8166b61f9d80aaee4545b709f1363f60813dc032dac53", size = 137444, upload-time = "2025-02-15T05:18:38.763Z" }, + { url = "https://files.pythonhosted.org/packages/9a/6b/8d1e076c543277c1d603230eec24f4dd75ebce46d351c0679526d202981f/simplejson-3.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:90b573693d1526bed576f6817e2a492eaaef68f088b57d7a9e83d122bbb49e51", size = 139617, upload-time = "2025-02-15T05:18:40.36Z" }, + { url = "https://files.pythonhosted.org/packages/d1/46/7b74803de10d4157c5cd2e89028897fa733374667bc5520a44b23b6c887a/simplejson-3.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:272cc767826e924a6bd369ea3dbf18e166ded29059c7a4d64d21a9a22424b5b5", size = 139725, upload-time = "2025-02-15T05:18:42.012Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/9991582665a7b6d95415e439bb4fbaa4faf0f77231666675a0fd1de54107/simplejson-3.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:51b41f284d603c4380732d7d619f8b34bd04bc4aa0ed0ed5f4ffd0539b14da44", size = 148010, upload-time = "2025-02-15T05:18:43.749Z" }, + { url = "https://files.pythonhosted.org/packages/54/ee/3c6e91989cdf65ec75e75662d9f15cfe167a792b893806169ea5b1da6fd2/simplejson-3.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6e6697a3067d281f01de0fe96fc7cba4ea870d96d7deb7bfcf85186d74456503", size = 140624, upload-time = "2025-02-15T05:18:45.498Z" }, + { url = "https://files.pythonhosted.org/packages/9d/bd/05e13ebb7ead81c8b555f4ccc741ea7dfa0ef5c2a0c183d6a7bc50a02bca/simplejson-3.20.1-cp39-cp39-win32.whl", hash = "sha256:6dd3a1d5aca87bf947f3339b0f8e8e329f1badf548bdbff37fac63c17936da8e", size = 74148, upload-time = "2025-02-15T05:18:47.27Z" }, + { url = "https://files.pythonhosted.org/packages/88/c9/d8bf87aaebec5a4c3ccfd5228689578e2fe77027d6114a259255d54969bf/simplejson-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:463f1fca8fbf23d088e5850fdd0dd4d5faea8900a9f9680270bd98fd649814ca", size = 75732, upload-time = "2025-02-15T05:18:49.598Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/00f02a0a921556dd5a6db1ef2926a1bc7a8bbbfb1c49cfed68a275b8ab2b/simplejson-3.20.1-py3-none-any.whl", hash = "sha256:8a6c1bbac39fa4a79f83cbf1df6ccd8ff7069582a9fd8db1e52cea073bc2c697", size = 57121, upload-time = "2025-02-15T05:18:51.243Z" }, ] [[package]] name = "six" version = "1.17.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] [[package]] name = "sniffio" version = "1.3.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 }, + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, ] [[package]] name = "snowballstemmer" -version = "2.2.0" +version = "3.0.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1", size = 86699 } +sdist = { url = "https://files.pythonhosted.org/packages/75/a7/9810d872919697c9d01295633f5d574fb416d47e535f258272ca1f01f447/snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895", size = 105575, upload-time = "2025-05-09T16:34:51.843Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a", size = 93002 }, + { url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" }, ] [[package]] name = "soupsieve" -version = "2.6" +version = "2.7" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/ce/fbaeed4f9fb8b2daa961f90591662df6a86c1abf25c548329a86920aedfb/soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb", size = 101569 } +sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418, upload-time = "2025-04-20T18:50:08.518Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 }, + { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" }, ] [[package]] @@ -1489,9 +1716,9 @@ dependencies = [ { name = "sphinxcontrib-serializinghtml", marker = "python_full_version < '3.10'" }, { name = "tomli", marker = "python_full_version < '3.10'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911 } +sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911, upload-time = "2024-07-20T14:46:56.059Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624 }, + { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624, upload-time = "2024-07-20T14:46:52.142Z" }, ] [[package]] @@ -1499,30 +1726,62 @@ name = "sphinx" version = "8.1.3" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version >= '3.10'", + "python_full_version == '3.10.*'", ] dependencies = [ - { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, - { name = "babel", marker = "python_full_version >= '3.10'" }, - { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, - { name = "docutils", marker = "python_full_version >= '3.10'" }, - { name = "imagesize", marker = "python_full_version >= '3.10'" }, - { name = "jinja2", marker = "python_full_version >= '3.10'" }, - { name = "packaging", marker = "python_full_version >= '3.10'" }, - { name = "pygments", marker = "python_full_version >= '3.10'" }, - { name = "requests", marker = "python_full_version >= '3.10'" }, - { name = "snowballstemmer", marker = "python_full_version >= '3.10'" }, - { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.10'" }, - { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.10'" }, - { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.10'" }, - { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.10'" }, - { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.10'" }, - { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.10'" }, + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "babel", marker = "python_full_version == '3.10.*'" }, + { name = "colorama", marker = "python_full_version == '3.10.*' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version == '3.10.*'" }, + { name = "imagesize", marker = "python_full_version == '3.10.*'" }, + { name = "jinja2", marker = "python_full_version == '3.10.*'" }, + { name = "packaging", marker = "python_full_version == '3.10.*'" }, + { name = "pygments", marker = "python_full_version == '3.10.*'" }, + { name = "requests", marker = "python_full_version == '3.10.*'" }, + { name = "snowballstemmer", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version == '3.10.*'" }, { name = "tomli", marker = "python_full_version == '3.10.*'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611 } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611, upload-time = "2024-10-13T20:27:13.93Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125 }, + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125, upload-time = "2024-10-13T20:27:10.448Z" }, +] + +[[package]] +name = "sphinx" +version = "8.2.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "babel", marker = "python_full_version >= '3.11'" }, + { name = "colorama", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version >= '3.11'" }, + { name = "imagesize", marker = "python_full_version >= '3.11'" }, + { name = "jinja2", marker = "python_full_version >= '3.11'" }, + { name = "packaging", marker = "python_full_version >= '3.11'" }, + { name = "pygments", marker = "python_full_version >= '3.11'" }, + { name = "requests", marker = "python_full_version >= '3.11'" }, + { name = "roman-numerals-py", marker = "python_full_version >= '3.11'" }, + { name = "snowballstemmer", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/ad/4360e50ed56cb483667b8e6dadf2d3fda62359593faabbe749a27c4eaca6/sphinx-8.2.3.tar.gz", hash = "sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348", size = 8321876, upload-time = "2025-03-02T22:31:59.658Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/53/136e9eca6e0b9dc0e1962e2c908fbea2e5ac000c2a2fbd9a35797958c48b/sphinx-8.2.3-py3-none-any.whl", hash = "sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3", size = 3589741, upload-time = "2025-03-02T22:31:56.836Z" }, ] [[package]] @@ -1532,15 +1791,16 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama" }, { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "starlette" }, { name = "uvicorn" }, { name = "watchfiles" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a5/2c/155e1de2c1ba96a72e5dba152c509a8b41e047ee5c2def9e9f0d812f8be7/sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1", size = 14023 } +sdist = { url = "https://files.pythonhosted.org/packages/a5/2c/155e1de2c1ba96a72e5dba152c509a8b41e047ee5c2def9e9f0d812f8be7/sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1", size = 14023, upload-time = "2024-10-02T23:15:30.172Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/18/c0/eba125db38c84d3c74717008fd3cb5000b68cd7e2cbafd1349c6a38c3d3b/sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa", size = 11908 }, + { url = "https://files.pythonhosted.org/packages/18/c0/eba125db38c84d3c74717008fd3cb5000b68cd7e2cbafd1349c6a38c3d3b/sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa", size = 11908, upload-time = "2024-10-02T23:15:28.739Z" }, ] [[package]] @@ -1549,11 +1809,12 @@ version = "1.0.0b2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736 } +sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736, upload-time = "2023-07-08T18:40:54.166Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/dd/018ce05c532a22007ac58d4f45232514cd9d6dd0ee1dc374e309db830983/sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b", size = 22496 }, + { url = "https://files.pythonhosted.org/packages/3c/dd/018ce05c532a22007ac58d4f45232514cd9d6dd0ee1dc374e309db830983/sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b", size = 22496, upload-time = "2023-07-08T18:40:52.659Z" }, ] [[package]] @@ -1563,39 +1824,40 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docutils" }, { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "sphinxcontrib-jquery" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463 } +sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463, upload-time = "2024-11-13T11:06:04.545Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561 }, + { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561, upload-time = "2024-11-13T11:06:02.094Z" }, ] [[package]] name = "sphinxcontrib-applehelp" version = "2.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053 } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053, upload-time = "2024-07-29T01:09:00.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300 }, + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300, upload-time = "2024-07-29T01:08:58.99Z" }, ] [[package]] name = "sphinxcontrib-devhelp" version = "2.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967 } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967, upload-time = "2024-07-29T01:09:23.417Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530 }, + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530, upload-time = "2024-07-29T01:09:21.945Z" }, ] [[package]] name = "sphinxcontrib-htmlhelp" version = "2.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617 } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617, upload-time = "2024-07-29T01:09:37.889Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705 }, + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705, upload-time = "2024-07-29T01:09:36.407Z" }, ] [[package]] @@ -1604,38 +1866,39 @@ version = "4.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331 } +sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331, upload-time = "2023-03-14T15:01:01.944Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104 }, + { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104, upload-time = "2023-03-14T15:01:00.356Z" }, ] [[package]] name = "sphinxcontrib-jsmath" version = "1.0.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787 } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787, upload-time = "2019-01-21T16:10:16.347Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071 }, + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" }, ] [[package]] name = "sphinxcontrib-qthelp" version = "2.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165 } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165, upload-time = "2024-07-29T01:09:56.435Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743 }, + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743, upload-time = "2024-07-29T01:09:54.885Z" }, ] [[package]] name = "sphinxcontrib-serializinghtml" version = "2.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080 } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080, upload-time = "2024-07-29T01:10:09.332Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072, upload-time = "2024-07-29T01:10:08.203Z" }, ] [[package]] @@ -1647,75 +1910,76 @@ dependencies = [ { name = "docutils" }, { name = "six" }, { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ef/2b/20717a5e0c7ee99dfd5fcdf11a8cf0ab02533cf62775f24d344ea5cf48c1/sphinxcontrib-shellcheck-1.1.2.zip", hash = "sha256:475a3ae12a1cfc1bc26cff57f0dd15561213818e3b470b3eacc4bb8be7b129c0", size = 338739 } +sdist = { url = "https://files.pythonhosted.org/packages/ef/2b/20717a5e0c7ee99dfd5fcdf11a8cf0ab02533cf62775f24d344ea5cf48c1/sphinxcontrib-shellcheck-1.1.2.zip", hash = "sha256:475a3ae12a1cfc1bc26cff57f0dd15561213818e3b470b3eacc4bb8be7b129c0", size = 338739, upload-time = "2020-03-30T01:51:39.993Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/06/9c/1ff7fe5174f944fac0fcb53bdaac7b98d73a98dd2ca800d95af6af9edb9a/sphinxcontrib_shellcheck-1.1.2-py35-none-any.whl", hash = "sha256:c0449dc9402521ab1d05a1b9eb8c9099707da64824341686dab4f620dc688514", size = 11532 }, - { url = "https://files.pythonhosted.org/packages/9f/8c/833388d3127d8dc0d5558bf52225eb20ed024ac46ef8ef4bffe7298ceb3d/sphinxcontrib_shellcheck-1.1.2-py36-none-any.whl", hash = "sha256:bcd8ffd26e6430deff9ffd10705683b502ace3fc8b4d1ba84496b3752f65fe52", size = 11533 }, - { url = "https://files.pythonhosted.org/packages/9d/b5/cdc74763bcf0916f47d053830c00114f1de65d97ea2281b66bbf2a587b8a/sphinxcontrib_shellcheck-1.1.2-py37-none-any.whl", hash = "sha256:46d1aba8201bbfc7a2c51e08446cab36bdab318c997223c8fc40733a5eedc71f", size = 11533 }, - { url = "https://files.pythonhosted.org/packages/58/ba/cf15480bc238a15e10604ee7f0e3e20ea0bf9a55a4f0b4e50571e8d13e60/sphinxcontrib_shellcheck-1.1.2-py38-none-any.whl", hash = "sha256:4c5f2840418cd1d7d662c0b3f51a07625f1a8f92755b19347ce85e8258e9d847", size = 11532 }, + { url = "https://files.pythonhosted.org/packages/06/9c/1ff7fe5174f944fac0fcb53bdaac7b98d73a98dd2ca800d95af6af9edb9a/sphinxcontrib_shellcheck-1.1.2-py35-none-any.whl", hash = "sha256:c0449dc9402521ab1d05a1b9eb8c9099707da64824341686dab4f620dc688514", size = 11532, upload-time = "2020-03-30T01:51:34.913Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/833388d3127d8dc0d5558bf52225eb20ed024ac46ef8ef4bffe7298ceb3d/sphinxcontrib_shellcheck-1.1.2-py36-none-any.whl", hash = "sha256:bcd8ffd26e6430deff9ffd10705683b502ace3fc8b4d1ba84496b3752f65fe52", size = 11533, upload-time = "2020-03-30T01:51:36.422Z" }, + { url = "https://files.pythonhosted.org/packages/9d/b5/cdc74763bcf0916f47d053830c00114f1de65d97ea2281b66bbf2a587b8a/sphinxcontrib_shellcheck-1.1.2-py37-none-any.whl", hash = "sha256:46d1aba8201bbfc7a2c51e08446cab36bdab318c997223c8fc40733a5eedc71f", size = 11533, upload-time = "2020-03-30T01:51:37.351Z" }, + { url = "https://files.pythonhosted.org/packages/58/ba/cf15480bc238a15e10604ee7f0e3e20ea0bf9a55a4f0b4e50571e8d13e60/sphinxcontrib_shellcheck-1.1.2-py38-none-any.whl", hash = "sha256:4c5f2840418cd1d7d662c0b3f51a07625f1a8f92755b19347ce85e8258e9d847", size = 11532, upload-time = "2020-03-30T01:51:38.858Z" }, ] [[package]] name = "starlette" -version = "0.45.2" +version = "0.47.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, - { name = "typing-extensions", marker = "python_full_version < '3.10'" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/90/4f/e1c9f4ec3dae67a94c9285ed275355d5f7cf0f3a5c34538c8ae5412af550/starlette-0.45.2.tar.gz", hash = "sha256:bba1831d15ae5212b22feab2f218bab6ed3cd0fc2dc1d4442443bb1ee52260e0", size = 2574026 } +sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/aa/ab/fe4f57c83620b39dfc9e7687ebad59129ff05170b99422105019d9a65eec/starlette-0.45.2-py3-none-any.whl", hash = "sha256:4daec3356fb0cb1e723a5235e5beaf375d2259af27532958e2d79df549dad9da", size = 71505 }, + { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" }, ] [[package]] name = "tomli" version = "2.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, - { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, - { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, - { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, - { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, - { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, - { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, - { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, - { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, - { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, - { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, - { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, - { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, - { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, - { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, - { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, - { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, - { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, - { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, - { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, - { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, - { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, - { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, - { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, - { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, - { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, - { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, - { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, - { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, - { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, - { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, ] [[package]] name = "typing-extensions" -version = "4.12.2" +version = "4.14.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, + { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" }, ] [[package]] @@ -1725,243 +1989,283 @@ source = { registry = "https://pypi.org/simple" } resolution-markers = [ "python_full_version < '3.10'", ] -sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380 } +sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380, upload-time = "2024-08-29T15:43:11.37Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225 }, + { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225, upload-time = "2024-08-29T15:43:08.921Z" }, ] [[package]] name = "urllib3" -version = "2.3.0" +version = "2.5.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version >= '3.10'", + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", ] -sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, ] [[package]] name = "uvicorn" -version = "0.34.0" +version = "0.35.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click" }, + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "h11" }, { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4b/4d/938bd85e5bf2edeec766267a5015ad969730bb91e31b44021dfe8b22df6c/uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9", size = 76568 } +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 }, + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, ] [[package]] name = "virtualenv" -version = "20.29.1" +version = "20.32.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, { name = "filelock" }, { name = "platformdirs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a7/ca/f23dcb02e161a9bba141b1c08aa50e8da6ea25e6d780528f1d385a3efe25/virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35", size = 7658028 } +sdist = { url = "https://files.pythonhosted.org/packages/a9/96/0834f30fa08dca3738614e6a9d42752b6420ee94e58971d702118f7cfd30/virtualenv-20.32.0.tar.gz", hash = "sha256:886bf75cadfdc964674e6e33eb74d787dff31ca314ceace03ca5810620f4ecf0", size = 6076970, upload-time = "2025-07-21T04:09:50.985Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/89/9b/599bcfc7064fbe5740919e78c5df18e5dceb0887e676256a1061bb5ae232/virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779", size = 4282379 }, + { url = "https://files.pythonhosted.org/packages/5c/c6/f8f28009920a736d0df434b52e9feebfb4d702ba942f15338cb4a83eafc1/virtualenv-20.32.0-py3-none-any.whl", hash = "sha256:2c310aecb62e5aa1b06103ed7c2977b81e042695de2697d01017ff0f1034af56", size = 6057761, upload-time = "2025-07-21T04:09:48.059Z" }, ] [[package]] name = "watchfiles" -version = "1.0.4" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f5/26/c705fc77d0a9ecdb9b66f1e2976d95b81df3cae518967431e7dbf9b5e219/watchfiles-1.0.4.tar.gz", hash = "sha256:6ba473efd11062d73e4f00c2b730255f9c1bdd73cd5f9fe5b5da8dbd4a717205", size = 94625 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/14/02/22fcaed0396730b0d362bc8d1ffb3be2658fd473eecbb2ba84243e157f11/watchfiles-1.0.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ba5bb3073d9db37c64520681dd2650f8bd40902d991e7b4cfaeece3e32561d08", size = 395212 }, - { url = "https://files.pythonhosted.org/packages/e9/3d/ec5a2369a46edf3ebe092c39d9ae48e8cb6dacbde51c4b4f98936c524269/watchfiles-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f25d0ba0fe2b6d2c921cf587b2bf4c451860086534f40c384329fb96e2044d1", size = 384815 }, - { url = "https://files.pythonhosted.org/packages/df/b4/898991cececbe171e67142c31905510203649569d9817848f47c4177ee42/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47eb32ef8c729dbc4f4273baece89398a4d4b5d21a1493efea77a17059f4df8a", size = 450680 }, - { url = "https://files.pythonhosted.org/packages/58/f7/d4aa3000e812cfb5e5c2c6c0a3ec9d0a46a42489a8727edd160631c4e210/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:076f293100db3b0b634514aa0d294b941daa85fc777f9c698adb1009e5aca0b1", size = 455923 }, - { url = "https://files.pythonhosted.org/packages/dd/95/7e2e4c6aba1b02fb5c76d2f6a450b85215921ec5f8f7ad5efd075369563f/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1eacd91daeb5158c598fe22d7ce66d60878b6294a86477a4715154990394c9b3", size = 482339 }, - { url = "https://files.pythonhosted.org/packages/bb/67/4265b0fabcc2ef2c9e3e8802ba7908cf718a357ebfb49c72e53787156a48/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13c2ce7b72026cfbca120d652f02c7750f33b4c9395d79c9790b27f014c8a5a2", size = 519908 }, - { url = "https://files.pythonhosted.org/packages/0d/96/b57802d5f8164bdf070befb4fd3dec4edba5a364ec0670965a97eb8098ce/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90192cdc15ab7254caa7765a98132a5a41471cf739513cc9bcf7d2ffcc0ec7b2", size = 501410 }, - { url = "https://files.pythonhosted.org/packages/8b/18/6db0de4e8911ba14e31853201b40c0fa9fea5ecf3feb86b0ad58f006dfc3/watchfiles-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278aaa395f405972e9f523bd786ed59dfb61e4b827856be46a42130605fd0899", size = 452876 }, - { url = "https://files.pythonhosted.org/packages/df/df/092a961815edf723a38ba2638c49491365943919c3526cc9cf82c42786a6/watchfiles-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a462490e75e466edbb9fc4cd679b62187153b3ba804868452ef0577ec958f5ff", size = 615353 }, - { url = "https://files.pythonhosted.org/packages/f3/cf/b85fe645de4ff82f3f436c5e9032379fce37c303f6396a18f9726cc34519/watchfiles-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8d0d0630930f5cd5af929040e0778cf676a46775753e442a3f60511f2409f48f", size = 613187 }, - { url = "https://files.pythonhosted.org/packages/f6/d4/a9fea27aef4dd69689bc3556718c1157a7accb72aa035ece87c1fa8483b5/watchfiles-1.0.4-cp310-cp310-win32.whl", hash = "sha256:cc27a65069bcabac4552f34fd2dce923ce3fcde0721a16e4fb1b466d63ec831f", size = 270799 }, - { url = "https://files.pythonhosted.org/packages/df/02/dbe9d4439f15dd4ad0720b6e039bde9d66d1f830331f34c18eb70fa6608e/watchfiles-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:8b1f135238e75d075359cf506b27bf3f4ca12029c47d3e769d8593a2024ce161", size = 284145 }, - { url = "https://files.pythonhosted.org/packages/0f/bb/8461adc4b1fed009546fb797fc0d5698dcfe5e289cb37e1b8f16a93cdc30/watchfiles-1.0.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2a9f93f8439639dc244c4d2902abe35b0279102bca7bbcf119af964f51d53c19", size = 394869 }, - { url = "https://files.pythonhosted.org/packages/55/88/9ebf36b3547176d1709c320de78c1fa3263a46be31b5b1267571d9102686/watchfiles-1.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9eea33ad8c418847dd296e61eb683cae1c63329b6d854aefcd412e12d94ee235", size = 384905 }, - { url = "https://files.pythonhosted.org/packages/03/8a/04335ce23ef78d8c69f0913e8b20cf7d9233e3986543aeef95ef2d6e43d2/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31f1a379c9dcbb3f09cf6be1b7e83b67c0e9faabed0471556d9438a4a4e14202", size = 449944 }, - { url = "https://files.pythonhosted.org/packages/17/4e/c8d5dcd14fe637f4633616dabea8a4af0a10142dccf3b43e0f081ba81ab4/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab594e75644421ae0a2484554832ca5895f8cab5ab62de30a1a57db460ce06c6", size = 456020 }, - { url = "https://files.pythonhosted.org/packages/5e/74/3e91e09e1861dd7fbb1190ce7bd786700dc0fbc2ccd33bb9fff5de039229/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc2eb5d14a8e0d5df7b36288979176fbb39672d45184fc4b1c004d7c3ce29317", size = 482983 }, - { url = "https://files.pythonhosted.org/packages/a1/3d/e64de2d1ce4eb6a574fd78ce3a28c279da263be9ef3cfcab6f708df192f2/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f68d8e9d5a321163ddacebe97091000955a1b74cd43724e346056030b0bacee", size = 520320 }, - { url = "https://files.pythonhosted.org/packages/2c/bd/52235f7063b57240c66a991696ed27e2a18bd6fcec8a1ea5a040b70d0611/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9ce064e81fe79faa925ff03b9f4c1a98b0bbb4a1b8c1b015afa93030cb21a49", size = 500988 }, - { url = "https://files.pythonhosted.org/packages/3a/b0/ff04194141a5fe650c150400dd9e42667916bc0f52426e2e174d779b8a74/watchfiles-1.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b77d5622ac5cc91d21ae9c2b284b5d5c51085a0bdb7b518dba263d0af006132c", size = 452573 }, - { url = "https://files.pythonhosted.org/packages/3d/9d/966164332c5a178444ae6d165082d4f351bd56afd9c3ec828eecbf190e6a/watchfiles-1.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1941b4e39de9b38b868a69b911df5e89dc43767feeda667b40ae032522b9b5f1", size = 615114 }, - { url = "https://files.pythonhosted.org/packages/94/df/f569ae4c1877f96ad4086c153a8eee5a19a3b519487bf5c9454a3438c341/watchfiles-1.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f8c4998506241dedf59613082d1c18b836e26ef2a4caecad0ec41e2a15e4226", size = 613076 }, - { url = "https://files.pythonhosted.org/packages/15/ae/8ce5f29e65d5fa5790e3c80c289819c55e12be2e1b9f5b6a0e55e169b97d/watchfiles-1.0.4-cp311-cp311-win32.whl", hash = "sha256:4ebbeca9360c830766b9f0df3640b791be569d988f4be6c06d6fae41f187f105", size = 271013 }, - { url = "https://files.pythonhosted.org/packages/a4/c6/79dc4a7c598a978e5fafa135090aaf7bbb03b8dec7bada437dfbe578e7ed/watchfiles-1.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:05d341c71f3d7098920f8551d4df47f7b57ac5b8dad56558064c3431bdfc0b74", size = 284229 }, - { url = "https://files.pythonhosted.org/packages/37/3d/928633723211753f3500bfb138434f080363b87a1b08ca188b1ce54d1e05/watchfiles-1.0.4-cp311-cp311-win_arm64.whl", hash = "sha256:32b026a6ab64245b584acf4931fe21842374da82372d5c039cba6bf99ef722f3", size = 276824 }, - { url = "https://files.pythonhosted.org/packages/5b/1a/8f4d9a1461709756ace48c98f07772bc6d4519b1e48b5fa24a4061216256/watchfiles-1.0.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:229e6ec880eca20e0ba2f7e2249c85bae1999d330161f45c78d160832e026ee2", size = 391345 }, - { url = "https://files.pythonhosted.org/packages/bc/d2/6750b7b3527b1cdaa33731438432e7238a6c6c40a9924049e4cebfa40805/watchfiles-1.0.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5717021b199e8353782dce03bd8a8f64438832b84e2885c4a645f9723bf656d9", size = 381515 }, - { url = "https://files.pythonhosted.org/packages/4e/17/80500e42363deef1e4b4818729ed939aaddc56f82f4e72b2508729dd3c6b/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0799ae68dfa95136dde7c472525700bd48777875a4abb2ee454e3ab18e9fc712", size = 449767 }, - { url = "https://files.pythonhosted.org/packages/10/37/1427fa4cfa09adbe04b1e97bced19a29a3462cc64c78630787b613a23f18/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43b168bba889886b62edb0397cab5b6490ffb656ee2fcb22dec8bfeb371a9e12", size = 455677 }, - { url = "https://files.pythonhosted.org/packages/c5/7a/39e9397f3a19cb549a7d380412fd9e507d4854eddc0700bfad10ef6d4dba/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb2c46e275fbb9f0c92e7654b231543c7bbfa1df07cdc4b99fa73bedfde5c844", size = 482219 }, - { url = "https://files.pythonhosted.org/packages/45/2d/7113931a77e2ea4436cad0c1690c09a40a7f31d366f79c6f0a5bc7a4f6d5/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:857f5fc3aa027ff5e57047da93f96e908a35fe602d24f5e5d8ce64bf1f2fc733", size = 518830 }, - { url = "https://files.pythonhosted.org/packages/f9/1b/50733b1980fa81ef3c70388a546481ae5fa4c2080040100cd7bf3bf7b321/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55ccfd27c497b228581e2838d4386301227fc0cb47f5a12923ec2fe4f97b95af", size = 497997 }, - { url = "https://files.pythonhosted.org/packages/2b/b4/9396cc61b948ef18943e7c85ecfa64cf940c88977d882da57147f62b34b1/watchfiles-1.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c11ea22304d17d4385067588123658e9f23159225a27b983f343fcffc3e796a", size = 452249 }, - { url = "https://files.pythonhosted.org/packages/fb/69/0c65a5a29e057ad0dc691c2fa6c23b2983c7dabaa190ba553b29ac84c3cc/watchfiles-1.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:74cb3ca19a740be4caa18f238298b9d472c850f7b2ed89f396c00a4c97e2d9ff", size = 614412 }, - { url = "https://files.pythonhosted.org/packages/7f/b9/319fcba6eba5fad34327d7ce16a6b163b39741016b1996f4a3c96b8dd0e1/watchfiles-1.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c7cce76c138a91e720d1df54014a047e680b652336e1b73b8e3ff3158e05061e", size = 611982 }, - { url = "https://files.pythonhosted.org/packages/f1/47/143c92418e30cb9348a4387bfa149c8e0e404a7c5b0585d46d2f7031b4b9/watchfiles-1.0.4-cp312-cp312-win32.whl", hash = "sha256:b045c800d55bc7e2cadd47f45a97c7b29f70f08a7c2fa13241905010a5493f94", size = 271822 }, - { url = "https://files.pythonhosted.org/packages/ea/94/b0165481bff99a64b29e46e07ac2e0df9f7a957ef13bec4ceab8515f44e3/watchfiles-1.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:c2acfa49dd0ad0bf2a9c0bb9a985af02e89345a7189be1efc6baa085e0f72d7c", size = 285441 }, - { url = "https://files.pythonhosted.org/packages/11/de/09fe56317d582742d7ca8c2ca7b52a85927ebb50678d9b0fa8194658f536/watchfiles-1.0.4-cp312-cp312-win_arm64.whl", hash = "sha256:22bb55a7c9e564e763ea06c7acea24fc5d2ee5dfc5dafc5cfbedfe58505e9f90", size = 277141 }, - { url = "https://files.pythonhosted.org/packages/08/98/f03efabec64b5b1fa58c0daab25c68ef815b0f320e54adcacd0d6847c339/watchfiles-1.0.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:8012bd820c380c3d3db8435e8cf7592260257b378b649154a7948a663b5f84e9", size = 390954 }, - { url = "https://files.pythonhosted.org/packages/16/09/4dd49ba0a32a45813debe5fb3897955541351ee8142f586303b271a02b40/watchfiles-1.0.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa216f87594f951c17511efe5912808dfcc4befa464ab17c98d387830ce07b60", size = 381133 }, - { url = "https://files.pythonhosted.org/packages/76/59/5aa6fc93553cd8d8ee75c6247763d77c02631aed21551a97d94998bf1dae/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c9953cf85529c05b24705639ffa390f78c26449e15ec34d5339e8108c7c407", size = 449516 }, - { url = "https://files.pythonhosted.org/packages/4c/aa/df4b6fe14b6317290b91335b23c96b488d365d65549587434817e06895ea/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf684aa9bba4cd95ecb62c822a56de54e3ae0598c1a7f2065d51e24637a3c5d", size = 454820 }, - { url = "https://files.pythonhosted.org/packages/5e/71/185f8672f1094ce48af33252c73e39b48be93b761273872d9312087245f6/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f44a39aee3cbb9b825285ff979ab887a25c5d336e5ec3574f1506a4671556a8d", size = 481550 }, - { url = "https://files.pythonhosted.org/packages/85/d7/50ebba2c426ef1a5cb17f02158222911a2e005d401caf5d911bfca58f4c4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38320582736922be8c865d46520c043bff350956dfc9fbaee3b2df4e1740a4b", size = 518647 }, - { url = "https://files.pythonhosted.org/packages/f0/7a/4c009342e393c545d68987e8010b937f72f47937731225b2b29b7231428f/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39f4914548b818540ef21fd22447a63e7be6e24b43a70f7642d21f1e73371590", size = 497547 }, - { url = "https://files.pythonhosted.org/packages/0f/7c/1cf50b35412d5c72d63b2bf9a4fffee2e1549a245924960dd087eb6a6de4/watchfiles-1.0.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f12969a3765909cf5dc1e50b2436eb2c0e676a3c75773ab8cc3aa6175c16e902", size = 452179 }, - { url = "https://files.pythonhosted.org/packages/d6/a9/3db1410e1c1413735a9a472380e4f431ad9a9e81711cda2aaf02b7f62693/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0986902677a1a5e6212d0c49b319aad9cc48da4bd967f86a11bde96ad9676ca1", size = 614125 }, - { url = "https://files.pythonhosted.org/packages/f2/e1/0025d365cf6248c4d1ee4c3d2e3d373bdd3f6aff78ba4298f97b4fad2740/watchfiles-1.0.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:308ac265c56f936636e3b0e3f59e059a40003c655228c131e1ad439957592303", size = 611911 }, - { url = "https://files.pythonhosted.org/packages/55/55/035838277d8c98fc8c917ac9beeb0cd6c59d675dc2421df5f9fcf44a0070/watchfiles-1.0.4-cp313-cp313-win32.whl", hash = "sha256:aee397456a29b492c20fda2d8961e1ffb266223625346ace14e4b6d861ba9c80", size = 271152 }, - { url = "https://files.pythonhosted.org/packages/f0/e5/96b8e55271685ddbadc50ce8bc53aa2dff278fb7ac4c2e473df890def2dc/watchfiles-1.0.4-cp313-cp313-win_amd64.whl", hash = "sha256:d6097538b0ae5c1b88c3b55afa245a66793a8fec7ada6755322e465fb1a0e8cc", size = 285216 }, - { url = "https://files.pythonhosted.org/packages/15/81/54484fc2fa715abe79694b975692af963f0878fb9d72b8251aa542bf3f10/watchfiles-1.0.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d3452c1ec703aa1c61e15dfe9d482543e4145e7c45a6b8566978fbb044265a21", size = 394967 }, - { url = "https://files.pythonhosted.org/packages/14/b3/557f0cd90add86586fe3deeebd11e8299db6bc3452b44a534f844c6ab831/watchfiles-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7b75fee5a16826cf5c46fe1c63116e4a156924d668c38b013e6276f2582230f0", size = 384707 }, - { url = "https://files.pythonhosted.org/packages/03/a3/34638e1bffcb85a405e7b005e30bb211fd9be2ab2cb1847f2ceb81bef27b/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e997802d78cdb02623b5941830ab06f8860038faf344f0d288d325cc9c5d2ff", size = 450442 }, - { url = "https://files.pythonhosted.org/packages/8f/9f/6a97460dd11a606003d634c7158d9fea8517e98daffc6f56d0f5fde2e86a/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0611d244ce94d83f5b9aff441ad196c6e21b55f77f3c47608dcf651efe54c4a", size = 455959 }, - { url = "https://files.pythonhosted.org/packages/9d/bb/e0648c6364e4d37ec692bc3f0c77507d17d8bb8f75689148819142010bbf/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9745a4210b59e218ce64c91deb599ae8775c8a9da4e95fb2ee6fe745fc87d01a", size = 483187 }, - { url = "https://files.pythonhosted.org/packages/dd/ad/d9290586a25288a81dfa8ad6329cf1de32aa1a9798ace45259eb95dcfb37/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4810ea2ae622add560f4aa50c92fef975e475f7ac4900ce5ff5547b2434642d8", size = 519733 }, - { url = "https://files.pythonhosted.org/packages/4e/a9/150c1666825cc9637093f8cae7fc6f53b3296311ab8bd65f1389acb717cb/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:740d103cd01458f22462dedeb5a3382b7f2c57d07ff033fbc9465919e5e1d0f3", size = 502275 }, - { url = "https://files.pythonhosted.org/packages/44/dc/5bfd21e20a330aca1706ac44713bc322838061938edf4b53130f97a7b211/watchfiles-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdbd912a61543a36aef85e34f212e5d2486e7c53ebfdb70d1e0b060cc50dd0bf", size = 452907 }, - { url = "https://files.pythonhosted.org/packages/50/fe/8f4fc488f1699f564687b697456eb5c0cb8e2b0b8538150511c234c62094/watchfiles-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0bc80d91ddaf95f70258cf78c471246846c1986bcc5fd33ccc4a1a67fcb40f9a", size = 615927 }, - { url = "https://files.pythonhosted.org/packages/ad/19/2e45f6f6eec89dd97a4d281635e3d73c17e5f692e7432063bdfdf9562c89/watchfiles-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab0311bb2ffcd9f74b6c9de2dda1612c13c84b996d032cd74799adb656af4e8b", size = 613435 }, - { url = "https://files.pythonhosted.org/packages/91/17/dc5ac62ca377827c24321d68050efc2eaee2ebaf3f21d055bbce2206d309/watchfiles-1.0.4-cp39-cp39-win32.whl", hash = "sha256:02a526ee5b5a09e8168314c905fc545c9bc46509896ed282aeb5a8ba9bd6ca27", size = 270810 }, - { url = "https://files.pythonhosted.org/packages/82/2b/dad851342492d538e7ffe72a8c756f747dd147988abb039ac9d6577d2235/watchfiles-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:a5ae5706058b27c74bac987d615105da17724172d5aaacc6c362a40599b6de43", size = 284866 }, - { url = "https://files.pythonhosted.org/packages/6f/06/175d5ac6b838fb319008c0cd981d7bf289317c510154d411d3584ca2b67b/watchfiles-1.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdcc92daeae268de1acf5b7befcd6cfffd9a047098199056c72e4623f531de18", size = 396269 }, - { url = "https://files.pythonhosted.org/packages/86/ee/5db93b0b57dc0587abdbac4149296ee73275f615d790a82cb5598af0557f/watchfiles-1.0.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8d3d9203705b5797f0af7e7e5baa17c8588030aaadb7f6a86107b7247303817", size = 386010 }, - { url = "https://files.pythonhosted.org/packages/75/61/fe0dc5fedf152bfc085a53711f740701f6bdb8ab6b5c950402b681d4858b/watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdef5a1be32d0b07dcea3318a0be95d42c98ece24177820226b56276e06b63b0", size = 450913 }, - { url = "https://files.pythonhosted.org/packages/9f/dd/3c7731af3baf1a9957afc643d176f94480921a690ec3237c9f9d11301c08/watchfiles-1.0.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:342622287b5604ddf0ed2d085f3a589099c9ae8b7331df3ae9845571586c4f3d", size = 453474 }, - { url = "https://files.pythonhosted.org/packages/6b/b4/c3998f54c91a35cee60ee6d3a855a069c5dff2bae6865147a46e9090dccd/watchfiles-1.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9fe37a2de80aa785d340f2980276b17ef697ab8db6019b07ee4fd28a8359d2f3", size = 395565 }, - { url = "https://files.pythonhosted.org/packages/3f/05/ac1a4d235beb9ddfb8ac26ce93a00ba6bd1b1b43051ef12d7da957b4a9d1/watchfiles-1.0.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d1ef56b56ed7e8f312c934436dea93bfa3e7368adfcf3df4c0da6d4de959a1e", size = 385406 }, - { url = "https://files.pythonhosted.org/packages/4c/ea/36532e7d86525f4e52a10efed182abf33efb106a93d49f5fbc994b256bcd/watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b42cac65beae3a362629950c444077d1b44f1790ea2772beaea95451c086bb", size = 450424 }, - { url = "https://files.pythonhosted.org/packages/7a/e9/3cbcf4d70cd0b6d3f30631deae1bf37cc0be39887ca327a44462fe546bf5/watchfiles-1.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e0227b8ed9074c6172cf55d85b5670199c99ab11fd27d2c473aa30aec67ee42", size = 452488 }, +sdist = { url = "https://files.pythonhosted.org/packages/2a/9a/d451fcc97d029f5812e898fd30a53fd8c15c7bbd058fd75cfc6beb9bd761/watchfiles-1.1.0.tar.gz", hash = "sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575", size = 94406, upload-time = "2025-06-15T19:06:59.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/dd/579d1dc57f0f895426a1211c4ef3b0cb37eb9e642bb04bdcd962b5df206a/watchfiles-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc", size = 405757, upload-time = "2025-06-15T19:04:51.058Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/7a0318cd874393344d48c34d53b3dd419466adf59a29ba5b51c88dd18b86/watchfiles-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df", size = 397511, upload-time = "2025-06-15T19:04:52.79Z" }, + { url = "https://files.pythonhosted.org/packages/06/be/503514656d0555ec2195f60d810eca29b938772e9bfb112d5cd5ad6f6a9e/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68", size = 450739, upload-time = "2025-06-15T19:04:54.203Z" }, + { url = "https://files.pythonhosted.org/packages/4e/0d/a05dd9e5f136cdc29751816d0890d084ab99f8c17b86f25697288ca09bc7/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc", size = 458106, upload-time = "2025-06-15T19:04:55.607Z" }, + { url = "https://files.pythonhosted.org/packages/f1/fa/9cd16e4dfdb831072b7ac39e7bea986e52128526251038eb481effe9f48e/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97", size = 484264, upload-time = "2025-06-15T19:04:57.009Z" }, + { url = "https://files.pythonhosted.org/packages/32/04/1da8a637c7e2b70e750a0308e9c8e662ada0cca46211fa9ef24a23937e0b/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c", size = 597612, upload-time = "2025-06-15T19:04:58.409Z" }, + { url = "https://files.pythonhosted.org/packages/30/01/109f2762e968d3e58c95731a206e5d7d2a7abaed4299dd8a94597250153c/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5", size = 477242, upload-time = "2025-06-15T19:04:59.786Z" }, + { url = "https://files.pythonhosted.org/packages/b5/b8/46f58cf4969d3b7bc3ca35a98e739fa4085b0657a1540ccc29a1a0bc016f/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9", size = 453148, upload-time = "2025-06-15T19:05:01.103Z" }, + { url = "https://files.pythonhosted.org/packages/a5/cd/8267594263b1770f1eb76914940d7b2d03ee55eca212302329608208e061/watchfiles-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72", size = 626574, upload-time = "2025-06-15T19:05:02.582Z" }, + { url = "https://files.pythonhosted.org/packages/a1/2f/7f2722e85899bed337cba715723e19185e288ef361360718973f891805be/watchfiles-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc", size = 624378, upload-time = "2025-06-15T19:05:03.719Z" }, + { url = "https://files.pythonhosted.org/packages/bf/20/64c88ec43d90a568234d021ab4b2a6f42a5230d772b987c3f9c00cc27b8b/watchfiles-1.1.0-cp310-cp310-win32.whl", hash = "sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587", size = 279829, upload-time = "2025-06-15T19:05:04.822Z" }, + { url = "https://files.pythonhosted.org/packages/39/5c/a9c1ed33de7af80935e4eac09570de679c6e21c07070aa99f74b4431f4d6/watchfiles-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82", size = 292192, upload-time = "2025-06-15T19:05:06.348Z" }, + { url = "https://files.pythonhosted.org/packages/8b/78/7401154b78ab484ccaaeef970dc2af0cb88b5ba8a1b415383da444cdd8d3/watchfiles-1.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2", size = 405751, upload-time = "2025-06-15T19:05:07.679Z" }, + { url = "https://files.pythonhosted.org/packages/76/63/e6c3dbc1f78d001589b75e56a288c47723de28c580ad715eb116639152b5/watchfiles-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c", size = 397313, upload-time = "2025-06-15T19:05:08.764Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a2/8afa359ff52e99af1632f90cbf359da46184207e893a5f179301b0c8d6df/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d", size = 450792, upload-time = "2025-06-15T19:05:09.869Z" }, + { url = "https://files.pythonhosted.org/packages/1d/bf/7446b401667f5c64972a57a0233be1104157fc3abf72c4ef2666c1bd09b2/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7", size = 458196, upload-time = "2025-06-15T19:05:11.91Z" }, + { url = "https://files.pythonhosted.org/packages/58/2f/501ddbdfa3fa874ea5597c77eeea3d413579c29af26c1091b08d0c792280/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c", size = 484788, upload-time = "2025-06-15T19:05:13.373Z" }, + { url = "https://files.pythonhosted.org/packages/61/1e/9c18eb2eb5c953c96bc0e5f626f0e53cfef4bd19bd50d71d1a049c63a575/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575", size = 597879, upload-time = "2025-06-15T19:05:14.725Z" }, + { url = "https://files.pythonhosted.org/packages/8b/6c/1467402e5185d89388b4486745af1e0325007af0017c3384cc786fff0542/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8", size = 477447, upload-time = "2025-06-15T19:05:15.775Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a1/ec0a606bde4853d6c4a578f9391eeb3684a9aea736a8eb217e3e00aa89a1/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f", size = 453145, upload-time = "2025-06-15T19:05:17.17Z" }, + { url = "https://files.pythonhosted.org/packages/90/b9/ef6f0c247a6a35d689fc970dc7f6734f9257451aefb30def5d100d6246a5/watchfiles-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4", size = 626539, upload-time = "2025-06-15T19:05:18.557Z" }, + { url = "https://files.pythonhosted.org/packages/34/44/6ffda5537085106ff5aaa762b0d130ac6c75a08015dd1621376f708c94de/watchfiles-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d", size = 624472, upload-time = "2025-06-15T19:05:19.588Z" }, + { url = "https://files.pythonhosted.org/packages/c3/e3/71170985c48028fa3f0a50946916a14055e741db11c2e7bc2f3b61f4d0e3/watchfiles-1.1.0-cp311-cp311-win32.whl", hash = "sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2", size = 279348, upload-time = "2025-06-15T19:05:20.856Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/3e39c68b68a7a171070f81fc2561d23ce8d6859659406842a0e4bebf3bba/watchfiles-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12", size = 292607, upload-time = "2025-06-15T19:05:21.937Z" }, + { url = "https://files.pythonhosted.org/packages/61/9f/2973b7539f2bdb6ea86d2c87f70f615a71a1fc2dba2911795cea25968aea/watchfiles-1.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a", size = 285056, upload-time = "2025-06-15T19:05:23.12Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b8/858957045a38a4079203a33aaa7d23ea9269ca7761c8a074af3524fbb240/watchfiles-1.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179", size = 402339, upload-time = "2025-06-15T19:05:24.516Z" }, + { url = "https://files.pythonhosted.org/packages/80/28/98b222cca751ba68e88521fabd79a4fab64005fc5976ea49b53fa205d1fa/watchfiles-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5", size = 394409, upload-time = "2025-06-15T19:05:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/50/dee79968566c03190677c26f7f47960aff738d32087087bdf63a5473e7df/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297", size = 450939, upload-time = "2025-06-15T19:05:26.494Z" }, + { url = "https://files.pythonhosted.org/packages/40/45/a7b56fb129700f3cfe2594a01aa38d033b92a33dddce86c8dfdfc1247b72/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0", size = 457270, upload-time = "2025-06-15T19:05:27.466Z" }, + { url = "https://files.pythonhosted.org/packages/b5/c8/fa5ef9476b1d02dc6b5e258f515fcaaecf559037edf8b6feffcbc097c4b8/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e", size = 483370, upload-time = "2025-06-15T19:05:28.548Z" }, + { url = "https://files.pythonhosted.org/packages/98/68/42cfcdd6533ec94f0a7aab83f759ec11280f70b11bfba0b0f885e298f9bd/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee", size = 598654, upload-time = "2025-06-15T19:05:29.997Z" }, + { url = "https://files.pythonhosted.org/packages/d3/74/b2a1544224118cc28df7e59008a929e711f9c68ce7d554e171b2dc531352/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd", size = 478667, upload-time = "2025-06-15T19:05:31.172Z" }, + { url = "https://files.pythonhosted.org/packages/8c/77/e3362fe308358dc9f8588102481e599c83e1b91c2ae843780a7ded939a35/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f", size = 452213, upload-time = "2025-06-15T19:05:32.299Z" }, + { url = "https://files.pythonhosted.org/packages/6e/17/c8f1a36540c9a1558d4faf08e909399e8133599fa359bf52ec8fcee5be6f/watchfiles-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4", size = 626718, upload-time = "2025-06-15T19:05:33.415Z" }, + { url = "https://files.pythonhosted.org/packages/26/45/fb599be38b4bd38032643783d7496a26a6f9ae05dea1a42e58229a20ac13/watchfiles-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f", size = 623098, upload-time = "2025-06-15T19:05:34.534Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e7/fdf40e038475498e160cd167333c946e45d8563ae4dd65caf757e9ffe6b4/watchfiles-1.1.0-cp312-cp312-win32.whl", hash = "sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd", size = 279209, upload-time = "2025-06-15T19:05:35.577Z" }, + { url = "https://files.pythonhosted.org/packages/3f/d3/3ae9d5124ec75143bdf088d436cba39812122edc47709cd2caafeac3266f/watchfiles-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47", size = 292786, upload-time = "2025-06-15T19:05:36.559Z" }, + { url = "https://files.pythonhosted.org/packages/26/2f/7dd4fc8b5f2b34b545e19629b4a018bfb1de23b3a496766a2c1165ca890d/watchfiles-1.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6", size = 284343, upload-time = "2025-06-15T19:05:37.5Z" }, + { url = "https://files.pythonhosted.org/packages/d3/42/fae874df96595556a9089ade83be34a2e04f0f11eb53a8dbf8a8a5e562b4/watchfiles-1.1.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30", size = 402004, upload-time = "2025-06-15T19:05:38.499Z" }, + { url = "https://files.pythonhosted.org/packages/fa/55/a77e533e59c3003d9803c09c44c3651224067cbe7fb5d574ddbaa31e11ca/watchfiles-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a", size = 393671, upload-time = "2025-06-15T19:05:39.52Z" }, + { url = "https://files.pythonhosted.org/packages/05/68/b0afb3f79c8e832e6571022611adbdc36e35a44e14f129ba09709aa4bb7a/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc", size = 449772, upload-time = "2025-06-15T19:05:40.897Z" }, + { url = "https://files.pythonhosted.org/packages/ff/05/46dd1f6879bc40e1e74c6c39a1b9ab9e790bf1f5a2fe6c08b463d9a807f4/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b", size = 456789, upload-time = "2025-06-15T19:05:42.045Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ca/0eeb2c06227ca7f12e50a47a3679df0cd1ba487ea19cf844a905920f8e95/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895", size = 482551, upload-time = "2025-06-15T19:05:43.781Z" }, + { url = "https://files.pythonhosted.org/packages/31/47/2cecbd8694095647406645f822781008cc524320466ea393f55fe70eed3b/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a", size = 597420, upload-time = "2025-06-15T19:05:45.244Z" }, + { url = "https://files.pythonhosted.org/packages/d9/7e/82abc4240e0806846548559d70f0b1a6dfdca75c1b4f9fa62b504ae9b083/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b", size = 477950, upload-time = "2025-06-15T19:05:46.332Z" }, + { url = "https://files.pythonhosted.org/packages/25/0d/4d564798a49bf5482a4fa9416dea6b6c0733a3b5700cb8a5a503c4b15853/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c", size = 451706, upload-time = "2025-06-15T19:05:47.459Z" }, + { url = "https://files.pythonhosted.org/packages/81/b5/5516cf46b033192d544102ea07c65b6f770f10ed1d0a6d388f5d3874f6e4/watchfiles-1.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b", size = 625814, upload-time = "2025-06-15T19:05:48.654Z" }, + { url = "https://files.pythonhosted.org/packages/0c/dd/7c1331f902f30669ac3e754680b6edb9a0dd06dea5438e61128111fadd2c/watchfiles-1.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb", size = 622820, upload-time = "2025-06-15T19:05:50.088Z" }, + { url = "https://files.pythonhosted.org/packages/1b/14/36d7a8e27cd128d7b1009e7715a7c02f6c131be9d4ce1e5c3b73d0e342d8/watchfiles-1.1.0-cp313-cp313-win32.whl", hash = "sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9", size = 279194, upload-time = "2025-06-15T19:05:51.186Z" }, + { url = "https://files.pythonhosted.org/packages/25/41/2dd88054b849aa546dbeef5696019c58f8e0774f4d1c42123273304cdb2e/watchfiles-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7", size = 292349, upload-time = "2025-06-15T19:05:52.201Z" }, + { url = "https://files.pythonhosted.org/packages/c8/cf/421d659de88285eb13941cf11a81f875c176f76a6d99342599be88e08d03/watchfiles-1.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5", size = 283836, upload-time = "2025-06-15T19:05:53.265Z" }, + { url = "https://files.pythonhosted.org/packages/45/10/6faf6858d527e3599cc50ec9fcae73590fbddc1420bd4fdccfebffeedbc6/watchfiles-1.1.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1", size = 400343, upload-time = "2025-06-15T19:05:54.252Z" }, + { url = "https://files.pythonhosted.org/packages/03/20/5cb7d3966f5e8c718006d0e97dfe379a82f16fecd3caa7810f634412047a/watchfiles-1.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339", size = 392916, upload-time = "2025-06-15T19:05:55.264Z" }, + { url = "https://files.pythonhosted.org/packages/8c/07/d8f1176328fa9e9581b6f120b017e286d2a2d22ae3f554efd9515c8e1b49/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633", size = 449582, upload-time = "2025-06-15T19:05:56.317Z" }, + { url = "https://files.pythonhosted.org/packages/66/e8/80a14a453cf6038e81d072a86c05276692a1826471fef91df7537dba8b46/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011", size = 456752, upload-time = "2025-06-15T19:05:57.359Z" }, + { url = "https://files.pythonhosted.org/packages/5a/25/0853b3fe0e3c2f5af9ea60eb2e781eade939760239a72c2d38fc4cc335f6/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670", size = 481436, upload-time = "2025-06-15T19:05:58.447Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9e/4af0056c258b861fbb29dcb36258de1e2b857be4a9509e6298abcf31e5c9/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf", size = 596016, upload-time = "2025-06-15T19:05:59.59Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fa/95d604b58aa375e781daf350897aaaa089cff59d84147e9ccff2447c8294/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4", size = 476727, upload-time = "2025-06-15T19:06:01.086Z" }, + { url = "https://files.pythonhosted.org/packages/65/95/fe479b2664f19be4cf5ceeb21be05afd491d95f142e72d26a42f41b7c4f8/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20", size = 451864, upload-time = "2025-06-15T19:06:02.144Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8a/3c4af14b93a15ce55901cd7a92e1a4701910f1768c78fb30f61d2b79785b/watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef", size = 625626, upload-time = "2025-06-15T19:06:03.578Z" }, + { url = "https://files.pythonhosted.org/packages/da/f5/cf6aa047d4d9e128f4b7cde615236a915673775ef171ff85971d698f3c2c/watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb", size = 622744, upload-time = "2025-06-15T19:06:05.066Z" }, + { url = "https://files.pythonhosted.org/packages/2c/00/70f75c47f05dea6fd30df90f047765f6fc2d6eb8b5a3921379b0b04defa2/watchfiles-1.1.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297", size = 402114, upload-time = "2025-06-15T19:06:06.186Z" }, + { url = "https://files.pythonhosted.org/packages/53/03/acd69c48db4a1ed1de26b349d94077cca2238ff98fd64393f3e97484cae6/watchfiles-1.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018", size = 393879, upload-time = "2025-06-15T19:06:07.369Z" }, + { url = "https://files.pythonhosted.org/packages/2f/c8/a9a2a6f9c8baa4eceae5887fecd421e1b7ce86802bcfc8b6a942e2add834/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0", size = 450026, upload-time = "2025-06-15T19:06:08.476Z" }, + { url = "https://files.pythonhosted.org/packages/fe/51/d572260d98388e6e2b967425c985e07d47ee6f62e6455cefb46a6e06eda5/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12", size = 457917, upload-time = "2025-06-15T19:06:09.988Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/4258e52917bf9f12909b6ec314ff9636276f3542f9d3807d143f27309104/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb", size = 483602, upload-time = "2025-06-15T19:06:11.088Z" }, + { url = "https://files.pythonhosted.org/packages/84/99/bee17a5f341a4345fe7b7972a475809af9e528deba056f8963d61ea49f75/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77", size = 596758, upload-time = "2025-06-15T19:06:12.197Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/e4bec1d59b25b89d2b0716b41b461ed655a9a53c60dc78ad5771fda5b3e6/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92", size = 477601, upload-time = "2025-06-15T19:06:13.391Z" }, + { url = "https://files.pythonhosted.org/packages/1f/fa/a514292956f4a9ce3c567ec0c13cce427c158e9f272062685a8a727d08fc/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e", size = 451936, upload-time = "2025-06-15T19:06:14.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/5d/c3bf927ec3bbeb4566984eba8dd7a8eb69569400f5509904545576741f88/watchfiles-1.1.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b", size = 626243, upload-time = "2025-06-15T19:06:16.232Z" }, + { url = "https://files.pythonhosted.org/packages/e6/65/6e12c042f1a68c556802a84d54bb06d35577c81e29fba14019562479159c/watchfiles-1.1.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259", size = 623073, upload-time = "2025-06-15T19:06:17.457Z" }, + { url = "https://files.pythonhosted.org/packages/89/ab/7f79d9bf57329e7cbb0a6fd4c7bd7d0cee1e4a8ef0041459f5409da3506c/watchfiles-1.1.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f", size = 400872, upload-time = "2025-06-15T19:06:18.57Z" }, + { url = "https://files.pythonhosted.org/packages/df/d5/3f7bf9912798e9e6c516094db6b8932df53b223660c781ee37607030b6d3/watchfiles-1.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e", size = 392877, upload-time = "2025-06-15T19:06:19.55Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c5/54ec7601a2798604e01c75294770dbee8150e81c6e471445d7601610b495/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa", size = 449645, upload-time = "2025-06-15T19:06:20.66Z" }, + { url = "https://files.pythonhosted.org/packages/0a/04/c2f44afc3b2fce21ca0b7802cbd37ed90a29874f96069ed30a36dfe57c2b/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8", size = 457424, upload-time = "2025-06-15T19:06:21.712Z" }, + { url = "https://files.pythonhosted.org/packages/9f/b0/eec32cb6c14d248095261a04f290636da3df3119d4040ef91a4a50b29fa5/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f", size = 481584, upload-time = "2025-06-15T19:06:22.777Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e2/ca4bb71c68a937d7145aa25709e4f5d68eb7698a25ce266e84b55d591bbd/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e", size = 596675, upload-time = "2025-06-15T19:06:24.226Z" }, + { url = "https://files.pythonhosted.org/packages/a1/dd/b0e4b7fb5acf783816bc950180a6cd7c6c1d2cf7e9372c0ea634e722712b/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb", size = 477363, upload-time = "2025-06-15T19:06:25.42Z" }, + { url = "https://files.pythonhosted.org/packages/69/c4/088825b75489cb5b6a761a4542645718893d395d8c530b38734f19da44d2/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147", size = 452240, upload-time = "2025-06-15T19:06:26.552Z" }, + { url = "https://files.pythonhosted.org/packages/10/8c/22b074814970eeef43b7c44df98c3e9667c1f7bf5b83e0ff0201b0bd43f9/watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8", size = 625607, upload-time = "2025-06-15T19:06:27.606Z" }, + { url = "https://files.pythonhosted.org/packages/32/fa/a4f5c2046385492b2273213ef815bf71a0d4c1943b784fb904e184e30201/watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db", size = 623315, upload-time = "2025-06-15T19:06:29.076Z" }, + { url = "https://files.pythonhosted.org/packages/47/8a/a45db804b9f0740f8408626ab2bca89c3136432e57c4673b50180bf85dd9/watchfiles-1.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa", size = 406400, upload-time = "2025-06-15T19:06:30.233Z" }, + { url = "https://files.pythonhosted.org/packages/64/06/a08684f628fb41addd451845aceedc2407dc3d843b4b060a7c4350ddee0c/watchfiles-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433", size = 397920, upload-time = "2025-06-15T19:06:31.315Z" }, + { url = "https://files.pythonhosted.org/packages/79/e6/e10d5675af653b1b07d4156906858041149ca222edaf8995877f2605ba9e/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4", size = 451196, upload-time = "2025-06-15T19:06:32.435Z" }, + { url = "https://files.pythonhosted.org/packages/f6/8a/facd6988100cd0f39e89f6c550af80edb28e3a529e1ee662e750663e6b36/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7", size = 458218, upload-time = "2025-06-15T19:06:33.503Z" }, + { url = "https://files.pythonhosted.org/packages/90/26/34cbcbc4d0f2f8f9cc243007e65d741ae039f7a11ef8ec6e9cd25bee08d1/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f", size = 484851, upload-time = "2025-06-15T19:06:34.541Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1f/f59faa9fc4b0e36dbcdd28a18c430416443b309d295d8b82e18192d120ad/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf", size = 599520, upload-time = "2025-06-15T19:06:35.785Z" }, + { url = "https://files.pythonhosted.org/packages/83/72/3637abecb3bf590529f5154ca000924003e5f4bbb9619744feeaf6f0b70b/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29", size = 477956, upload-time = "2025-06-15T19:06:36.965Z" }, + { url = "https://files.pythonhosted.org/packages/f7/f3/d14ffd9acc0c1bd4790378995e320981423263a5d70bd3929e2e0dc87fff/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e", size = 453196, upload-time = "2025-06-15T19:06:38.024Z" }, + { url = "https://files.pythonhosted.org/packages/7f/38/78ad77bd99e20c0fdc82262be571ef114fc0beef9b43db52adb939768c38/watchfiles-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86", size = 627479, upload-time = "2025-06-15T19:06:39.442Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/549d50a22fcc83f1017c6427b1c76c053233f91b526f4ad7a45971e70c0b/watchfiles-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f", size = 624414, upload-time = "2025-06-15T19:06:40.859Z" }, + { url = "https://files.pythonhosted.org/packages/72/de/57d6e40dc9140af71c12f3a9fc2d3efc5529d93981cd4d265d484d7c9148/watchfiles-1.1.0-cp39-cp39-win32.whl", hash = "sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267", size = 280020, upload-time = "2025-06-15T19:06:41.89Z" }, + { url = "https://files.pythonhosted.org/packages/88/bb/7d287fc2a762396b128a0fca2dbae29386e0a242b81d1046daf389641db3/watchfiles-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc", size = 292758, upload-time = "2025-06-15T19:06:43.251Z" }, + { url = "https://files.pythonhosted.org/packages/be/7c/a3d7c55cfa377c2f62c4ae3c6502b997186bc5e38156bafcb9b653de9a6d/watchfiles-1.1.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5", size = 406748, upload-time = "2025-06-15T19:06:44.2Z" }, + { url = "https://files.pythonhosted.org/packages/38/d0/c46f1b2c0ca47f3667b144de6f0515f6d1c670d72f2ca29861cac78abaa1/watchfiles-1.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d", size = 398801, upload-time = "2025-06-15T19:06:45.774Z" }, + { url = "https://files.pythonhosted.org/packages/70/9c/9a6a42e97f92eeed77c3485a43ea96723900aefa3ac739a8c73f4bff2cd7/watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea", size = 451528, upload-time = "2025-06-15T19:06:46.791Z" }, + { url = "https://files.pythonhosted.org/packages/51/7b/98c7f4f7ce7ff03023cf971cd84a3ee3b790021ae7584ffffa0eb2554b96/watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6", size = 454095, upload-time = "2025-06-15T19:06:48.211Z" }, + { url = "https://files.pythonhosted.org/packages/8c/6b/686dcf5d3525ad17b384fd94708e95193529b460a1b7bf40851f1328ec6e/watchfiles-1.1.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3", size = 406910, upload-time = "2025-06-15T19:06:49.335Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d3/71c2dcf81dc1edcf8af9f4d8d63b1316fb0a2dd90cbfd427e8d9dd584a90/watchfiles-1.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c", size = 398816, upload-time = "2025-06-15T19:06:50.433Z" }, + { url = "https://files.pythonhosted.org/packages/b8/fa/12269467b2fc006f8fce4cd6c3acfa77491dd0777d2a747415f28ccc8c60/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432", size = 451584, upload-time = "2025-06-15T19:06:51.834Z" }, + { url = "https://files.pythonhosted.org/packages/bd/d3/254cea30f918f489db09d6a8435a7de7047f8cb68584477a515f160541d6/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792", size = 454009, upload-time = "2025-06-15T19:06:52.896Z" }, + { url = "https://files.pythonhosted.org/packages/48/93/5c96bdb65e7f88f7da40645f34c0a3c317a2931ed82161e93c91e8eddd27/watchfiles-1.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9", size = 406640, upload-time = "2025-06-15T19:06:54.868Z" }, + { url = "https://files.pythonhosted.org/packages/e3/25/09204836e93e1b99cce88802ce87264a1d20610c7a8f6de24def27ad95b1/watchfiles-1.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a", size = 398543, upload-time = "2025-06-15T19:06:55.95Z" }, + { url = "https://files.pythonhosted.org/packages/5e/dc/6f324a6f32c5ab73b54311b5f393a79df34c1584b8d2404cf7e6d780aa5d/watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866", size = 451787, upload-time = "2025-06-15T19:06:56.998Z" }, + { url = "https://files.pythonhosted.org/packages/45/5d/1d02ef4caa4ec02389e72d5594cdf9c67f1800a7c380baa55063c30c6598/watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277", size = 454272, upload-time = "2025-06-15T19:06:58.055Z" }, ] [[package]] name = "websockets" -version = "14.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f4/1b/380b883ce05bb5f45a905b61790319a28958a9ab1e4b6b95ff5464b60ca1/websockets-14.1.tar.gz", hash = "sha256:398b10c77d471c0aab20a845e7a60076b6390bfdaac7a6d2edb0d2c59d75e8d8", size = 162840 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/af/91/b1b375dbd856fd5fff3f117de0e520542343ecaf4e8fc60f1ac1e9f5822c/websockets-14.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a0adf84bc2e7c86e8a202537b4fd50e6f7f0e4a6b6bf64d7ccb96c4cd3330b29", size = 161950 }, - { url = "https://files.pythonhosted.org/packages/61/8f/4d52f272d3ebcd35e1325c646e98936099a348374d4a6b83b524bded8116/websockets-14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90b5d9dfbb6d07a84ed3e696012610b6da074d97453bd01e0e30744b472c8179", size = 159601 }, - { url = "https://files.pythonhosted.org/packages/c4/b1/29e87b53eb1937992cdee094a0988aadc94f25cf0b37e90c75eed7123d75/websockets-14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2177ee3901075167f01c5e335a6685e71b162a54a89a56001f1c3e9e3d2ad250", size = 159854 }, - { url = "https://files.pythonhosted.org/packages/3f/e6/752a2f5e8321ae2a613062676c08ff2fccfb37dc837a2ee919178a372e8a/websockets-14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f14a96a0034a27f9d47fd9788913924c89612225878f8078bb9d55f859272b0", size = 168835 }, - { url = "https://files.pythonhosted.org/packages/60/27/ca62de7877596926321b99071639275e94bb2401397130b7cf33dbf2106a/websockets-14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f874ba705deea77bcf64a9da42c1f5fc2466d8f14daf410bc7d4ceae0a9fcb0", size = 167844 }, - { url = "https://files.pythonhosted.org/packages/7e/db/f556a1d06635c680ef376be626c632e3f2bbdb1a0189d1d1bffb061c3b70/websockets-14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9607b9a442392e690a57909c362811184ea429585a71061cd5d3c2b98065c199", size = 168157 }, - { url = "https://files.pythonhosted.org/packages/b3/bc/99e5f511838c365ac6ecae19674eb5e94201aa4235bd1af3e6fa92c12905/websockets-14.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bea45f19b7ca000380fbd4e02552be86343080120d074b87f25593ce1700ad58", size = 168561 }, - { url = "https://files.pythonhosted.org/packages/c6/e7/251491585bad61c79e525ac60927d96e4e17b18447cc9c3cfab47b2eb1b8/websockets-14.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:219c8187b3ceeadbf2afcf0f25a4918d02da7b944d703b97d12fb01510869078", size = 167979 }, - { url = "https://files.pythonhosted.org/packages/ac/98/7ac2e4eeada19bdbc7a3a66a58e3ebdf33648b9e1c5b3f08c3224df168cf/websockets-14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ad2ab2547761d79926effe63de21479dfaf29834c50f98c4bf5b5480b5838434", size = 167925 }, - { url = "https://files.pythonhosted.org/packages/ab/3d/09e65c47ee2396b7482968068f6e9b516221e1032b12dcf843b9412a5dfb/websockets-14.1-cp310-cp310-win32.whl", hash = "sha256:1288369a6a84e81b90da5dbed48610cd7e5d60af62df9851ed1d1d23a9069f10", size = 162831 }, - { url = "https://files.pythonhosted.org/packages/8a/67/59828a3d09740e6a485acccfbb66600632f2178b6ed1b61388ee96f17d5a/websockets-14.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0744623852f1497d825a49a99bfbec9bea4f3f946df6eb9d8a2f0c37a2fec2e", size = 163266 }, - { url = "https://files.pythonhosted.org/packages/97/ed/c0d03cb607b7fe1f7ff45e2cd4bb5cd0f9e3299ced79c2c303a6fff44524/websockets-14.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:449d77d636f8d9c17952628cc7e3b8faf6e92a17ec581ec0c0256300717e1512", size = 161949 }, - { url = "https://files.pythonhosted.org/packages/06/91/bf0a44e238660d37a2dda1b4896235d20c29a2d0450f3a46cd688f43b239/websockets-14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a35f704be14768cea9790d921c2c1cc4fc52700410b1c10948511039be824aac", size = 159606 }, - { url = "https://files.pythonhosted.org/packages/ff/b8/7185212adad274c2b42b6a24e1ee6b916b7809ed611cbebc33b227e5c215/websockets-14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b1f3628a0510bd58968c0f60447e7a692933589b791a6b572fcef374053ca280", size = 159854 }, - { url = "https://files.pythonhosted.org/packages/5a/8a/0849968d83474be89c183d8ae8dcb7f7ada1a3c24f4d2a0d7333c231a2c3/websockets-14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c3deac3748ec73ef24fc7be0b68220d14d47d6647d2f85b2771cb35ea847aa1", size = 169402 }, - { url = "https://files.pythonhosted.org/packages/bd/4f/ef886e37245ff6b4a736a09b8468dae05d5d5c99de1357f840d54c6f297d/websockets-14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7048eb4415d46368ef29d32133134c513f507fff7d953c18c91104738a68c3b3", size = 168406 }, - { url = "https://files.pythonhosted.org/packages/11/43/e2dbd4401a63e409cebddedc1b63b9834de42f51b3c84db885469e9bdcef/websockets-14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cf0ad281c979306a6a34242b371e90e891bce504509fb6bb5246bbbf31e7b6", size = 168776 }, - { url = "https://files.pythonhosted.org/packages/6d/d6/7063e3f5c1b612e9f70faae20ebaeb2e684ffa36cb959eb0862ee2809b32/websockets-14.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cc1fc87428c1d18b643479caa7b15db7d544652e5bf610513d4a3478dbe823d0", size = 169083 }, - { url = "https://files.pythonhosted.org/packages/49/69/e6f3d953f2fa0f8a723cf18cd011d52733bd7f6e045122b24e0e7f49f9b0/websockets-14.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f95ba34d71e2fa0c5d225bde3b3bdb152e957150100e75c86bc7f3964c450d89", size = 168529 }, - { url = "https://files.pythonhosted.org/packages/70/ff/f31fa14561fc1d7b8663b0ed719996cf1f581abee32c8fb2f295a472f268/websockets-14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9481a6de29105d73cf4515f2bef8eb71e17ac184c19d0b9918a3701c6c9c4f23", size = 168475 }, - { url = "https://files.pythonhosted.org/packages/f1/15/b72be0e4bf32ff373aa5baef46a4c7521b8ea93ad8b49ca8c6e8e764c083/websockets-14.1-cp311-cp311-win32.whl", hash = "sha256:368a05465f49c5949e27afd6fbe0a77ce53082185bbb2ac096a3a8afaf4de52e", size = 162833 }, - { url = "https://files.pythonhosted.org/packages/bc/ef/2d81679acbe7057ffe2308d422f744497b52009ea8bab34b6d74a2657d1d/websockets-14.1-cp311-cp311-win_amd64.whl", hash = "sha256:6d24fc337fc055c9e83414c94e1ee0dee902a486d19d2a7f0929e49d7d604b09", size = 163263 }, - { url = "https://files.pythonhosted.org/packages/55/64/55698544ce29e877c9188f1aee9093712411a8fc9732cca14985e49a8e9c/websockets-14.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ed907449fe5e021933e46a3e65d651f641975a768d0649fee59f10c2985529ed", size = 161957 }, - { url = "https://files.pythonhosted.org/packages/a2/b1/b088f67c2b365f2c86c7b48edb8848ac27e508caf910a9d9d831b2f343cb/websockets-14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:87e31011b5c14a33b29f17eb48932e63e1dcd3fa31d72209848652310d3d1f0d", size = 159620 }, - { url = "https://files.pythonhosted.org/packages/c1/89/2a09db1bbb40ba967a1b8225b07b7df89fea44f06de9365f17f684d0f7e6/websockets-14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bc6ccf7d54c02ae47a48ddf9414c54d48af9c01076a2e1023e3b486b6e72c707", size = 159852 }, - { url = "https://files.pythonhosted.org/packages/ca/c1/f983138cd56e7d3079f1966e81f77ce6643f230cd309f73aa156bb181749/websockets-14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9777564c0a72a1d457f0848977a1cbe15cfa75fa2f67ce267441e465717dcf1a", size = 169675 }, - { url = "https://files.pythonhosted.org/packages/c1/c8/84191455d8660e2a0bdb33878d4ee5dfa4a2cedbcdc88bbd097303b65bfa/websockets-14.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a655bde548ca98f55b43711b0ceefd2a88a71af6350b0c168aa77562104f3f45", size = 168619 }, - { url = "https://files.pythonhosted.org/packages/8d/a7/62e551fdcd7d44ea74a006dc193aba370505278ad76efd938664531ce9d6/websockets-14.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3dfff83ca578cada2d19e665e9c8368e1598d4e787422a460ec70e531dbdd58", size = 169042 }, - { url = "https://files.pythonhosted.org/packages/ad/ed/1532786f55922c1e9c4d329608e36a15fdab186def3ca9eb10d7465bc1cc/websockets-14.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6a6c9bcf7cdc0fd41cc7b7944447982e8acfd9f0d560ea6d6845428ed0562058", size = 169345 }, - { url = "https://files.pythonhosted.org/packages/ea/fb/160f66960d495df3de63d9bcff78e1b42545b2a123cc611950ffe6468016/websockets-14.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4b6caec8576e760f2c7dd878ba817653144d5f369200b6ddf9771d64385b84d4", size = 168725 }, - { url = "https://files.pythonhosted.org/packages/cf/53/1bf0c06618b5ac35f1d7906444b9958f8485682ab0ea40dee7b17a32da1e/websockets-14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb6d38971c800ff02e4a6afd791bbe3b923a9a57ca9aeab7314c21c84bf9ff05", size = 168712 }, - { url = "https://files.pythonhosted.org/packages/e5/22/5ec2f39fff75f44aa626f86fa7f20594524a447d9c3be94d8482cd5572ef/websockets-14.1-cp312-cp312-win32.whl", hash = "sha256:1d045cbe1358d76b24d5e20e7b1878efe578d9897a25c24e6006eef788c0fdf0", size = 162838 }, - { url = "https://files.pythonhosted.org/packages/74/27/28f07df09f2983178db7bf6c9cccc847205d2b92ced986cd79565d68af4f/websockets-14.1-cp312-cp312-win_amd64.whl", hash = "sha256:90f4c7a069c733d95c308380aae314f2cb45bd8a904fb03eb36d1a4983a4993f", size = 163277 }, - { url = "https://files.pythonhosted.org/packages/34/77/812b3ba5110ed8726eddf9257ab55ce9e85d97d4aa016805fdbecc5e5d48/websockets-14.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3630b670d5057cd9e08b9c4dab6493670e8e762a24c2c94ef312783870736ab9", size = 161966 }, - { url = "https://files.pythonhosted.org/packages/8d/24/4fcb7aa6986ae7d9f6d083d9d53d580af1483c5ec24bdec0978307a0f6ac/websockets-14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:36ebd71db3b89e1f7b1a5deaa341a654852c3518ea7a8ddfdf69cc66acc2db1b", size = 159625 }, - { url = "https://files.pythonhosted.org/packages/f8/47/2a0a3a2fc4965ff5b9ce9324d63220156bd8bedf7f90824ab92a822e65fd/websockets-14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5b918d288958dc3fa1c5a0b9aa3256cb2b2b84c54407f4813c45d52267600cd3", size = 159857 }, - { url = "https://files.pythonhosted.org/packages/dd/c8/d7b425011a15e35e17757e4df75b25e1d0df64c0c315a44550454eaf88fc/websockets-14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00fe5da3f037041da1ee0cf8e308374e236883f9842c7c465aa65098b1c9af59", size = 169635 }, - { url = "https://files.pythonhosted.org/packages/93/39/6e3b5cffa11036c40bd2f13aba2e8e691ab2e01595532c46437b56575678/websockets-14.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8149a0f5a72ca36720981418eeffeb5c2729ea55fa179091c81a0910a114a5d2", size = 168578 }, - { url = "https://files.pythonhosted.org/packages/cf/03/8faa5c9576299b2adf34dcccf278fc6bbbcda8a3efcc4d817369026be421/websockets-14.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77569d19a13015e840b81550922056acabc25e3f52782625bc6843cfa034e1da", size = 169018 }, - { url = "https://files.pythonhosted.org/packages/8c/05/ea1fec05cc3a60defcdf0bb9f760c3c6bd2dd2710eff7ac7f891864a22ba/websockets-14.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cf5201a04550136ef870aa60ad3d29d2a59e452a7f96b94193bee6d73b8ad9a9", size = 169383 }, - { url = "https://files.pythonhosted.org/packages/21/1d/eac1d9ed787f80754e51228e78855f879ede1172c8b6185aca8cef494911/websockets-14.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:88cf9163ef674b5be5736a584c999e98daf3aabac6e536e43286eb74c126b9c7", size = 168773 }, - { url = "https://files.pythonhosted.org/packages/0e/1b/e808685530185915299740d82b3a4af3f2b44e56ccf4389397c7a5d95d39/websockets-14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:836bef7ae338a072e9d1863502026f01b14027250a4545672673057997d5c05a", size = 168757 }, - { url = "https://files.pythonhosted.org/packages/b6/19/6ab716d02a3b068fbbeb6face8a7423156e12c446975312f1c7c0f4badab/websockets-14.1-cp313-cp313-win32.whl", hash = "sha256:0d4290d559d68288da9f444089fd82490c8d2744309113fc26e2da6e48b65da6", size = 162834 }, - { url = "https://files.pythonhosted.org/packages/6c/fd/ab6b7676ba712f2fc89d1347a4b5bdc6aa130de10404071f2b2606450209/websockets-14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8621a07991add373c3c5c2cf89e1d277e49dc82ed72c75e3afc74bd0acc446f0", size = 163277 }, - { url = "https://files.pythonhosted.org/packages/4d/23/ac9d8c5ec7b90efc3687d60474ef7e698f8b75cb7c9dfedad72701e797c9/websockets-14.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:01bb2d4f0a6d04538d3c5dfd27c0643269656c28045a53439cbf1c004f90897a", size = 161945 }, - { url = "https://files.pythonhosted.org/packages/c5/6b/ffa450e3b736a86ae6b40ce20a758ac9af80c96a18548f6c323ed60329c5/websockets-14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:414ffe86f4d6f434a8c3b7913655a1a5383b617f9bf38720e7c0799fac3ab1c6", size = 159600 }, - { url = "https://files.pythonhosted.org/packages/74/62/f90d1fd57ea7337ecaa99f17c31a544b9dcdb7c7c32a3d3997ccc42d57d3/websockets-14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fda642151d5affdee8a430bd85496f2e2517be3a2b9d2484d633d5712b15c56", size = 159850 }, - { url = "https://files.pythonhosted.org/packages/35/dd/1e71865de1f3c265e11d02b0b4c76178f84351c6611e515fbe3d2bd1b98c/websockets-14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd7c11968bc3860d5c78577f0dbc535257ccec41750675d58d8dc66aa47fe52c", size = 168616 }, - { url = "https://files.pythonhosted.org/packages/ba/ae/0d069b52e26d48402dbe90c7581eb6a5bed5d7dbe3d9ca3cf1033859d58e/websockets-14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a032855dc7db987dff813583d04f4950d14326665d7e714d584560b140ae6b8b", size = 167619 }, - { url = "https://files.pythonhosted.org/packages/1c/3f/d3f2df62704c53e0296f0ce714921b6a15df10e2e463734c737b1d9e2522/websockets-14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7e7ea2f782408c32d86b87a0d2c1fd8871b0399dd762364c731d86c86069a78", size = 167921 }, - { url = "https://files.pythonhosted.org/packages/e0/e2/2dcb295bdae9393070cea58c790d87d1d36149bb4319b1da6014c8a36d42/websockets-14.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:39450e6215f7d9f6f7bc2a6da21d79374729f5d052333da4d5825af8a97e6735", size = 168343 }, - { url = "https://files.pythonhosted.org/packages/6b/fd/fa48e8b4e10e2c165cbfc16dada7405b4008818be490fc6b99a4928e232a/websockets-14.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ceada5be22fa5a5a4cdeec74e761c2ee7db287208f54c718f2df4b7e200b8d4a", size = 167745 }, - { url = "https://files.pythonhosted.org/packages/42/45/79db33f2b744d2014b40946428e6c37ce944fde8791d82e1c2f4d4a67d96/websockets-14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3fc753451d471cff90b8f467a1fc0ae64031cf2d81b7b34e1811b7e2691bc4bc", size = 167705 }, - { url = "https://files.pythonhosted.org/packages/da/27/f66507db34ca9c79562f28fa5983433f7b9080fd471cc188906006d36ba4/websockets-14.1-cp39-cp39-win32.whl", hash = "sha256:14839f54786987ccd9d03ed7f334baec0f02272e7ec4f6e9d427ff584aeea8b4", size = 162828 }, - { url = "https://files.pythonhosted.org/packages/11/25/bb8f81a4ec94f595adb845608c5ec9549cb6b446945b292fe61807c7c95b/websockets-14.1-cp39-cp39-win_amd64.whl", hash = "sha256:d9fd19ecc3a4d5ae82ddbfb30962cf6d874ff943e56e0c81f5169be2fda62979", size = 163271 }, - { url = "https://files.pythonhosted.org/packages/fb/cd/382a05a1ba2a93bd9fb807716a660751295df72e77204fb130a102fcdd36/websockets-14.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e5dc25a9dbd1a7f61eca4b7cb04e74ae4b963d658f9e4f9aad9cd00b688692c8", size = 159633 }, - { url = "https://files.pythonhosted.org/packages/b7/a0/fa7c62e2952ef028b422fbf420f9353d9dd4dfaa425de3deae36e98c0784/websockets-14.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:04a97aca96ca2acedf0d1f332c861c5a4486fdcba7bcef35873820f940c4231e", size = 159867 }, - { url = "https://files.pythonhosted.org/packages/c1/94/954b4924f868db31d5f0935893c7a8446515ee4b36bb8ad75a929469e453/websockets-14.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df174ece723b228d3e8734a6f2a6febbd413ddec39b3dc592f5a4aa0aff28098", size = 161121 }, - { url = "https://files.pythonhosted.org/packages/7a/2e/f12bbb41a8f2abb76428ba4fdcd9e67b5b364a3e7fa97c88f4d6950aa2d4/websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:034feb9f4286476f273b9a245fb15f02c34d9586a5bc936aff108c3ba1b21beb", size = 160731 }, - { url = "https://files.pythonhosted.org/packages/13/97/b76979401f2373af1fe3e08f960b265cecab112e7dac803446fb98351a52/websockets-14.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c308dabd2b380807ab64b62985eaccf923a78ebc572bd485375b9ca2b7dc7", size = 160681 }, - { url = "https://files.pythonhosted.org/packages/39/9c/16916d9a436c109a1d7ba78817e8fee357b78968be3f6e6f517f43afa43d/websockets-14.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5a42d3ecbb2db5080fc578314439b1d79eef71d323dc661aa616fb492436af5d", size = 163316 }, - { url = "https://files.pythonhosted.org/packages/0f/57/50fd09848a80a1b63a572c610f230f8a17590ca47daf256eb28a0851df73/websockets-14.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ddaa4a390af911da6f680be8be4ff5aaf31c4c834c1a9147bc21cbcbca2d4370", size = 159633 }, - { url = "https://files.pythonhosted.org/packages/d7/2f/db728b0c7962ad6a13ced8286325bf430b59722d943e7f6bdbd8a78e2bfe/websockets-14.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a4c805c6034206143fbabd2d259ec5e757f8b29d0a2f0bf3d2fe5d1f60147a4a", size = 159863 }, - { url = "https://files.pythonhosted.org/packages/fa/e4/21e7481936fbfffee138edb488a6184eb3468b402a8181b95b9e44f6a676/websockets-14.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:205f672a6c2c671a86d33f6d47c9b35781a998728d2c7c2a3e1cf3333fcb62b7", size = 161119 }, - { url = "https://files.pythonhosted.org/packages/64/2d/efb6cf716d4f9da60190756e06f8db2066faf1ae4a4a8657ab136dfcc7a8/websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef440054124728cc49b01c33469de06755e5a7a4e83ef61934ad95fc327fbb0", size = 160724 }, - { url = "https://files.pythonhosted.org/packages/40/b0/a70b972d853c3f26040834fcff3dd45c8a0292af9f5f0b36f9fbb82d5d44/websockets-14.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7591d6f440af7f73c4bd9404f3772bfee064e639d2b6cc8c94076e71b2471c1", size = 160676 }, - { url = "https://files.pythonhosted.org/packages/4a/76/f9da7f97476cc7b8c74829bb4851f1faf660455839689ffcc354b52860a7/websockets-14.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:25225cc79cfebc95ba1d24cd3ab86aaa35bcd315d12fa4358939bd55e9bd74a5", size = 163311 }, - { url = "https://files.pythonhosted.org/packages/b0/0b/c7e5d11020242984d9d37990310520ed663b942333b83a033c2f20191113/websockets-14.1-py3-none-any.whl", hash = "sha256:4d4fc827a20abe6d544a119896f6b78ee13fe81cbfef416f3f2ddf09a03f0e2e", size = 156277 }, +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" }, + { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" }, + { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" }, + { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" }, + { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" }, + { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" }, + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/36/db/3fff0bcbe339a6fa6a3b9e3fbc2bfb321ec2f4cd233692272c5a8d6cf801/websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5", size = 175424, upload-time = "2025-03-05T20:02:56.505Z" }, + { url = "https://files.pythonhosted.org/packages/46/e6/519054c2f477def4165b0ec060ad664ed174e140b0d1cbb9fafa4a54f6db/websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a", size = 173077, upload-time = "2025-03-05T20:02:58.37Z" }, + { url = "https://files.pythonhosted.org/packages/1a/21/c0712e382df64c93a0d16449ecbf87b647163485ca1cc3f6cbadb36d2b03/websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b", size = 173324, upload-time = "2025-03-05T20:02:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/1c/cb/51ba82e59b3a664df54beed8ad95517c1b4dc1a913730e7a7db778f21291/websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770", size = 182094, upload-time = "2025-03-05T20:03:01.827Z" }, + { url = "https://files.pythonhosted.org/packages/fb/0f/bf3788c03fec679bcdaef787518dbe60d12fe5615a544a6d4cf82f045193/websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb", size = 181094, upload-time = "2025-03-05T20:03:03.123Z" }, + { url = "https://files.pythonhosted.org/packages/5e/da/9fb8c21edbc719b66763a571afbaf206cb6d3736d28255a46fc2fe20f902/websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054", size = 181397, upload-time = "2025-03-05T20:03:04.443Z" }, + { url = "https://files.pythonhosted.org/packages/2e/65/65f379525a2719e91d9d90c38fe8b8bc62bd3c702ac651b7278609b696c4/websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee", size = 181794, upload-time = "2025-03-05T20:03:06.708Z" }, + { url = "https://files.pythonhosted.org/packages/d9/26/31ac2d08f8e9304d81a1a7ed2851c0300f636019a57cbaa91342015c72cc/websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed", size = 181194, upload-time = "2025-03-05T20:03:08.844Z" }, + { url = "https://files.pythonhosted.org/packages/98/72/1090de20d6c91994cd4b357c3f75a4f25ee231b63e03adea89671cc12a3f/websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880", size = 181164, upload-time = "2025-03-05T20:03:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/2d/37/098f2e1c103ae8ed79b0e77f08d83b0ec0b241cf4b7f2f10edd0126472e1/websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411", size = 176381, upload-time = "2025-03-05T20:03:12.77Z" }, + { url = "https://files.pythonhosted.org/packages/75/8b/a32978a3ab42cebb2ebdd5b05df0696a09f4d436ce69def11893afa301f0/websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4", size = 176841, upload-time = "2025-03-05T20:03:14.367Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" }, + { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" }, + { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" }, + { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/b7/48/4b67623bac4d79beb3a6bb27b803ba75c1bdedc06bd827e465803690a4b2/websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940", size = 173106, upload-time = "2025-03-05T20:03:29.404Z" }, + { url = "https://files.pythonhosted.org/packages/ed/f0/adb07514a49fe5728192764e04295be78859e4a537ab8fcc518a3dbb3281/websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e", size = 173339, upload-time = "2025-03-05T20:03:30.755Z" }, + { url = "https://files.pythonhosted.org/packages/87/28/bd23c6344b18fb43df40d0700f6d3fffcd7cef14a6995b4f976978b52e62/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9", size = 174597, upload-time = "2025-03-05T20:03:32.247Z" }, + { url = "https://files.pythonhosted.org/packages/6d/79/ca288495863d0f23a60f546f0905ae8f3ed467ad87f8b6aceb65f4c013e4/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b", size = 174205, upload-time = "2025-03-05T20:03:33.731Z" }, + { url = "https://files.pythonhosted.org/packages/04/e4/120ff3180b0872b1fe6637f6f995bcb009fb5c87d597c1fc21456f50c848/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f", size = 174150, upload-time = "2025-03-05T20:03:35.757Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c3/30e2f9c539b8da8b1d76f64012f3b19253271a63413b2d3adb94b143407f/websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123", size = 176877, upload-time = "2025-03-05T20:03:37.199Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, ] [[package]] name = "winkerberos" -version = "0.12.0" +version = "0.12.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/2d/4f/8db9aae372e88031877067a9d8da027d6e67454d233177cb49198ab216a5/winkerberos-0.12.0.tar.gz", hash = "sha256:b19b9b8c87ab9dc76bb325f0dd4e93a2d669abc68d2283eec25ed67176ad7ad3", size = 35572 } +sdist = { url = "https://files.pythonhosted.org/packages/2d/75/86d470935167eb1c40d53498993e14cc021d9611a539d61c9b4202c291ab/winkerberos-0.12.2.tar.gz", hash = "sha256:ff91daed04727a0362892802ee093d8da11f08536393526bdf3bc64e04079faa", size = 35672, upload-time = "2025-04-02T14:41:48.274Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7b/71/69549a95c4077a35819b04f3179292eec7119903ec035995254a41a3622a/winkerberos-0.12.0-cp310-cp310-win32.whl", hash = "sha256:bb37e91f9959adbeb3c6ae25c828c1d033fa2b1b03176037d7bec0adfbb85b8f", size = 25297 }, - { url = "https://files.pythonhosted.org/packages/8d/47/c8e2138e51201f79f9adc73a13a6616c375d0490081b124e2d8eebf21711/winkerberos-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:e479a498ab1f93bde0c0eb880f2c68378272850db51b978c75e9d73148c44f9c", size = 27635 }, - { url = "https://files.pythonhosted.org/packages/bf/5b/5799a0b7b3162b4476443b16c7a12a63ec3dbd9e9e2bf622c5833c27079b/winkerberos-0.12.0-cp311-cp311-win32.whl", hash = "sha256:35ed9eedc2551063758756724c345d906b4a68b8d31bc9fd6e935c1eb37c4a35", size = 25297 }, - { url = "https://files.pythonhosted.org/packages/24/ec/d437a005207d3c66bdb22196f954d25716fea21b79d4873873a2cd836946/winkerberos-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:838fdab8f71905c5a80ee1c868e2c7f3c2fee233113e8e65cd989b353e9a980e", size = 27640 }, - { url = "https://files.pythonhosted.org/packages/12/6f/1cab2c1685c3cb55a5a6b87c75df33def11b25cf01525021fa4f18c2ba24/winkerberos-0.12.0-cp312-cp312-win32.whl", hash = "sha256:f8a9dedd35eda764cd0591d050234a8f381c57a559c16a914de311ed426f6f50", size = 25365 }, - { url = "https://files.pythonhosted.org/packages/01/e9/0408c1abd6d599d61709ceecafdb0f8ff725e015b8c5444db62de6466b37/winkerberos-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:22db6871a842d16bb045d93440d0acc98d7690320acd7d7174ae36509ce78198", size = 27678 }, - { url = "https://files.pythonhosted.org/packages/7a/ff/b6cd850e9bed012d289cbcf1a2c9f70292c6d2664f65c0b6741877f0f7ec/winkerberos-0.12.0-cp39-cp39-win32.whl", hash = "sha256:987a16e5fff8b6e1cd2d1a52db92c51ba657a34e6c55b0b7d96247f512ed7444", size = 25290 }, - { url = "https://files.pythonhosted.org/packages/85/a9/c2319bcf270170ddb9c52105851d7565e6ce7266dc5a3e6cdf97fb6fe43b/winkerberos-0.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:4ffe1b654884e169c88785aa3960cc8dc4f09b757d242b59b3022c632736d2cd", size = 27629 }, + { url = "https://files.pythonhosted.org/packages/59/ac/c6ce495af45371ffd85a6a3d24c2ced679b8dbcf3b8c6beca093706b1620/winkerberos-0.12.2-cp310-cp310-win32.whl", hash = "sha256:f8b751bd5a28e6a9146f154bed395c30ce4f245448addc763f98cb8843879027", size = 25331, upload-time = "2025-04-02T14:41:36.398Z" }, + { url = "https://files.pythonhosted.org/packages/cb/7b/ad32174c3ed4710cd2ad8f20171f5061cb13603f091d714d5aa6b30d51f0/winkerberos-0.12.2-cp310-cp310-win_amd64.whl", hash = "sha256:4be3b0de548b80f52a6544dff9d571da6cdfde590176a01477358b3808b12dfa", size = 27670, upload-time = "2025-04-02T14:41:37.68Z" }, + { url = "https://files.pythonhosted.org/packages/91/12/23b29d359dee9f7a8243cb0040ea1834acd1af8cbc38cfe1c7ca82ab4ec0/winkerberos-0.12.2-cp311-cp311-win32.whl", hash = "sha256:ff2b2ec9b9246bbc05f0d4e6fe5f3f3563237357b9b35eaa58ec1a9ddf349ab8", size = 25332, upload-time = "2025-04-02T14:41:38.671Z" }, + { url = "https://files.pythonhosted.org/packages/23/d2/2bfa1dcdb4a47b7f989a9e758c892bd7393a156b0e1f0df63eca8304e892/winkerberos-0.12.2-cp311-cp311-win_amd64.whl", hash = "sha256:e6ac2b2cc329a68502821905f6ffe48e109d54a46aba7414ea231a30c75bb2d9", size = 27671, upload-time = "2025-04-02T14:41:40.104Z" }, + { url = "https://files.pythonhosted.org/packages/4f/01/26c5b1435654596c07b314653183ffe42b64ea07041c328f0fd4c68fe9f9/winkerberos-0.12.2-cp312-cp312-win32.whl", hash = "sha256:46dac1300e20738cbaf6c17c2e4832062ed7faee346c7a96f0e57f8bbe279c25", size = 25396, upload-time = "2025-04-02T14:41:41.6Z" }, + { url = "https://files.pythonhosted.org/packages/64/b1/6c4a1e4e50553798eb44dbb0d71ba6af48e2a62a0eb01bd0d4e2b41914e3/winkerberos-0.12.2-cp312-cp312-win_amd64.whl", hash = "sha256:2c5c7a70c0d4a43546b20d5654e7e7e5e5e96f42084a7f293864f7ad0fb1e953", size = 27710, upload-time = "2025-04-02T14:41:42.656Z" }, + { url = "https://files.pythonhosted.org/packages/5f/91/cff6750c7c3b2a9f35e12cd7c4df901251fc3be985edef707a3458c43e9a/winkerberos-0.12.2-cp313-cp313-win32.whl", hash = "sha256:482a72500b7822cc8f941d0c6eed668a24c030ac145c97732e175b51441bebbf", size = 25391, upload-time = "2025-04-02T14:41:43.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/98/defb037ad127c4006c4e992dd55ce0df92059626d3df5f5f4c5fc8502c26/winkerberos-0.12.2-cp313-cp313-win_amd64.whl", hash = "sha256:efd65ba54534512070916cb9c91ef9798a0f9fb0b04e12732c9631e71553fd69", size = 27704, upload-time = "2025-04-02T14:41:45.203Z" }, + { url = "https://files.pythonhosted.org/packages/be/17/b16e72e0b896cdf05666994cbc402a66f5911d56ea28d4e858714328b698/winkerberos-0.12.2-cp39-cp39-win32.whl", hash = "sha256:0c80eed53472a38d7f1dd015e27d93705b22a2acd2557bad13d8b5d688037b29", size = 25326, upload-time = "2025-04-02T14:41:46.216Z" }, + { url = "https://files.pythonhosted.org/packages/65/04/ae42e839e8d836fde613f94f30395953292a7b9be388247237196d1e5caa/winkerberos-0.12.2-cp39-cp39-win_amd64.whl", hash = "sha256:4b908aab5ab42e98bee44eca67dfebe4733d210bccf021e42b669bf4af2005a4", size = 27663, upload-time = "2025-04-02T14:41:47.294Z" }, ] [[package]] name = "zipp" -version = "3.21.0" +version = "3.23.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3f/50/bad581df71744867e9468ebd0bcd6505de3b275e06f202c2cb016e3ff56f/zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4", size = 24545 } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/1a/7e4798e9339adc931158c9d69ecc34f5e6791489d469f5e50ec15e35f458/zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931", size = 9630 }, + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, ] [[package]] name = "zope-event" -version = "5.0" +version = "5.1.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "setuptools" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/46/c2/427f1867bb96555d1d34342f1dd97f8c420966ab564d58d18469a1db8736/zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd", size = 17350 } +sdist = { url = "https://files.pythonhosted.org/packages/5a/9f/c443569a68d3844c044d9fa9711e08adb33649b527b4d432433f4c2a6a02/zope_event-5.1.1.tar.gz", hash = "sha256:c1ac931abf57efba71a2a313c5f4d57768a19b15c37e3f02f50eb1536be12d4e", size = 18811, upload-time = "2025-07-22T07:04:00.924Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fe/42/f8dbc2b9ad59e927940325a22d6d3931d630c3644dae7e2369ef5d9ba230/zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26", size = 6824 }, + { url = "https://files.pythonhosted.org/packages/e9/04/fd55695f6448abd22295fc68b2d3a135389558f0f49a24b0dffe019d0ecb/zope_event-5.1.1-py3-none-any.whl", hash = "sha256:8d5ea7b992c42ce73a6fa9c2ba99a004c52cd9f05d87f3220768ef0329b92df7", size = 7014, upload-time = "2025-07-22T07:03:59.9Z" }, ] [[package]] @@ -1971,38 +2275,38 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "setuptools" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/76/71/e6177f390e8daa7e75378505c5ab974e0bf59c1d3b19155638c7afbf4b2d/zope.interface-7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce290e62229964715f1011c3dbeab7a4a1e4971fd6f31324c4519464473ef9f2", size = 208243 }, - { url = "https://files.pythonhosted.org/packages/52/db/7e5f4226bef540f6d55acfd95cd105782bc6ee044d9b5587ce2c95558a5e/zope.interface-7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05b910a5afe03256b58ab2ba6288960a2892dfeef01336dc4be6f1b9ed02ab0a", size = 208759 }, - { url = "https://files.pythonhosted.org/packages/28/ea/fdd9813c1eafd333ad92464d57a4e3a82b37ae57c19497bcffa42df673e4/zope.interface-7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:550f1c6588ecc368c9ce13c44a49b8d6b6f3ca7588873c679bd8fd88a1b557b6", size = 254922 }, - { url = "https://files.pythonhosted.org/packages/3b/d3/0000a4d497ef9fbf4f66bb6828b8d0a235e690d57c333be877bec763722f/zope.interface-7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ef9e2f865721553c6f22a9ff97da0f0216c074bd02b25cf0d3af60ea4d6931d", size = 249367 }, - { url = "https://files.pythonhosted.org/packages/3e/e5/0b359e99084f033d413419eff23ee9c2bd33bca2ca9f4e83d11856f22d10/zope.interface-7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f926f0dcb058211a3bb3e0e501c69759613b17a553788b2caeb991bed3b61d", size = 254488 }, - { url = "https://files.pythonhosted.org/packages/7b/90/12d50b95f40e3b2fc0ba7f7782104093b9fd62806b13b98ef4e580f2ca61/zope.interface-7.2-cp310-cp310-win_amd64.whl", hash = "sha256:144964649eba4c5e4410bb0ee290d338e78f179cdbfd15813de1a664e7649b3b", size = 211947 }, - { url = "https://files.pythonhosted.org/packages/98/7d/2e8daf0abea7798d16a58f2f3a2bf7588872eee54ac119f99393fdd47b65/zope.interface-7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1909f52a00c8c3dcab6c4fad5d13de2285a4b3c7be063b239b8dc15ddfb73bd2", size = 208776 }, - { url = "https://files.pythonhosted.org/packages/a0/2a/0c03c7170fe61d0d371e4c7ea5b62b8cb79b095b3d630ca16719bf8b7b18/zope.interface-7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ecf2451596f19fd607bb09953f426588fc1e79e93f5968ecf3367550396b22", size = 209296 }, - { url = "https://files.pythonhosted.org/packages/49/b4/451f19448772b4a1159519033a5f72672221e623b0a1bd2b896b653943d8/zope.interface-7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:033b3923b63474800b04cba480b70f6e6243a62208071fc148354f3f89cc01b7", size = 260997 }, - { url = "https://files.pythonhosted.org/packages/65/94/5aa4461c10718062c8f8711161faf3249d6d3679c24a0b81dd6fc8ba1dd3/zope.interface-7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a102424e28c6b47c67923a1f337ede4a4c2bba3965b01cf707978a801fc7442c", size = 255038 }, - { url = "https://files.pythonhosted.org/packages/9f/aa/1a28c02815fe1ca282b54f6705b9ddba20328fabdc37b8cf73fc06b172f0/zope.interface-7.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25e6a61dcb184453bb00eafa733169ab6d903e46f5c2ace4ad275386f9ab327a", size = 259806 }, - { url = "https://files.pythonhosted.org/packages/a7/2c/82028f121d27c7e68632347fe04f4a6e0466e77bb36e104c8b074f3d7d7b/zope.interface-7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3f6771d1647b1fc543d37640b45c06b34832a943c80d1db214a37c31161a93f1", size = 212305 }, - { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959 }, - { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357 }, - { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235 }, - { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253 }, - { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702 }, - { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466 }, - { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961 }, - { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356 }, - { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196 }, - { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237 }, - { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696 }, - { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472 }, - { url = "https://files.pythonhosted.org/packages/8c/2c/1f49dc8b4843c4f0848d8e43191aed312bad946a1563d1bf9e46cf2816ee/zope.interface-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7bd449c306ba006c65799ea7912adbbfed071089461a19091a228998b82b1fdb", size = 208349 }, - { url = "https://files.pythonhosted.org/packages/ed/7d/83ddbfc8424c69579a90fc8edc2b797223da2a8083a94d8dfa0e374c5ed4/zope.interface-7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a19a6cc9c6ce4b1e7e3d319a473cf0ee989cbbe2b39201d7c19e214d2dfb80c7", size = 208799 }, - { url = "https://files.pythonhosted.org/packages/36/22/b1abd91854c1be03f5542fe092e6a745096d2eca7704d69432e119100583/zope.interface-7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cd1790b48c16db85d51fbbd12d20949d7339ad84fd971427cf00d990c1f137", size = 254267 }, - { url = "https://files.pythonhosted.org/packages/2a/dd/fcd313ee216ad0739ae00e6126bc22a0af62a74f76a9ca668d16cd276222/zope.interface-7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52e446f9955195440e787596dccd1411f543743c359eeb26e9b2c02b077b0519", size = 248614 }, - { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800 }, - { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980 }, +sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960, upload-time = "2024-11-28T08:45:39.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/71/e6177f390e8daa7e75378505c5ab974e0bf59c1d3b19155638c7afbf4b2d/zope.interface-7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce290e62229964715f1011c3dbeab7a4a1e4971fd6f31324c4519464473ef9f2", size = 208243, upload-time = "2024-11-28T08:47:29.781Z" }, + { url = "https://files.pythonhosted.org/packages/52/db/7e5f4226bef540f6d55acfd95cd105782bc6ee044d9b5587ce2c95558a5e/zope.interface-7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05b910a5afe03256b58ab2ba6288960a2892dfeef01336dc4be6f1b9ed02ab0a", size = 208759, upload-time = "2024-11-28T08:47:31.908Z" }, + { url = "https://files.pythonhosted.org/packages/28/ea/fdd9813c1eafd333ad92464d57a4e3a82b37ae57c19497bcffa42df673e4/zope.interface-7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:550f1c6588ecc368c9ce13c44a49b8d6b6f3ca7588873c679bd8fd88a1b557b6", size = 254922, upload-time = "2024-11-28T09:18:11.795Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d3/0000a4d497ef9fbf4f66bb6828b8d0a235e690d57c333be877bec763722f/zope.interface-7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ef9e2f865721553c6f22a9ff97da0f0216c074bd02b25cf0d3af60ea4d6931d", size = 249367, upload-time = "2024-11-28T08:48:24.238Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e5/0b359e99084f033d413419eff23ee9c2bd33bca2ca9f4e83d11856f22d10/zope.interface-7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f926f0dcb058211a3bb3e0e501c69759613b17a553788b2caeb991bed3b61d", size = 254488, upload-time = "2024-11-28T08:48:28.816Z" }, + { url = "https://files.pythonhosted.org/packages/7b/90/12d50b95f40e3b2fc0ba7f7782104093b9fd62806b13b98ef4e580f2ca61/zope.interface-7.2-cp310-cp310-win_amd64.whl", hash = "sha256:144964649eba4c5e4410bb0ee290d338e78f179cdbfd15813de1a664e7649b3b", size = 211947, upload-time = "2024-11-28T08:48:18.831Z" }, + { url = "https://files.pythonhosted.org/packages/98/7d/2e8daf0abea7798d16a58f2f3a2bf7588872eee54ac119f99393fdd47b65/zope.interface-7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1909f52a00c8c3dcab6c4fad5d13de2285a4b3c7be063b239b8dc15ddfb73bd2", size = 208776, upload-time = "2024-11-28T08:47:53.009Z" }, + { url = "https://files.pythonhosted.org/packages/a0/2a/0c03c7170fe61d0d371e4c7ea5b62b8cb79b095b3d630ca16719bf8b7b18/zope.interface-7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ecf2451596f19fd607bb09953f426588fc1e79e93f5968ecf3367550396b22", size = 209296, upload-time = "2024-11-28T08:47:57.993Z" }, + { url = "https://files.pythonhosted.org/packages/49/b4/451f19448772b4a1159519033a5f72672221e623b0a1bd2b896b653943d8/zope.interface-7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:033b3923b63474800b04cba480b70f6e6243a62208071fc148354f3f89cc01b7", size = 260997, upload-time = "2024-11-28T09:18:13.935Z" }, + { url = "https://files.pythonhosted.org/packages/65/94/5aa4461c10718062c8f8711161faf3249d6d3679c24a0b81dd6fc8ba1dd3/zope.interface-7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a102424e28c6b47c67923a1f337ede4a4c2bba3965b01cf707978a801fc7442c", size = 255038, upload-time = "2024-11-28T08:48:26.381Z" }, + { url = "https://files.pythonhosted.org/packages/9f/aa/1a28c02815fe1ca282b54f6705b9ddba20328fabdc37b8cf73fc06b172f0/zope.interface-7.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25e6a61dcb184453bb00eafa733169ab6d903e46f5c2ace4ad275386f9ab327a", size = 259806, upload-time = "2024-11-28T08:48:30.78Z" }, + { url = "https://files.pythonhosted.org/packages/a7/2c/82028f121d27c7e68632347fe04f4a6e0466e77bb36e104c8b074f3d7d7b/zope.interface-7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3f6771d1647b1fc543d37640b45c06b34832a943c80d1db214a37c31161a93f1", size = 212305, upload-time = "2024-11-28T08:49:14.525Z" }, + { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959, upload-time = "2024-11-28T08:47:47.788Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357, upload-time = "2024-11-28T08:47:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235, upload-time = "2024-11-28T09:18:15.56Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253, upload-time = "2024-11-28T08:48:29.025Z" }, + { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702, upload-time = "2024-11-28T08:48:37.363Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466, upload-time = "2024-11-28T08:49:14.397Z" }, + { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961, upload-time = "2024-11-28T08:48:29.865Z" }, + { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356, upload-time = "2024-11-28T08:48:33.297Z" }, + { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196, upload-time = "2024-11-28T09:18:17.584Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237, upload-time = "2024-11-28T08:48:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696, upload-time = "2024-11-28T08:48:41.161Z" }, + { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472, upload-time = "2024-11-28T08:49:56.587Z" }, + { url = "https://files.pythonhosted.org/packages/8c/2c/1f49dc8b4843c4f0848d8e43191aed312bad946a1563d1bf9e46cf2816ee/zope.interface-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7bd449c306ba006c65799ea7912adbbfed071089461a19091a228998b82b1fdb", size = 208349, upload-time = "2024-11-28T08:49:28.872Z" }, + { url = "https://files.pythonhosted.org/packages/ed/7d/83ddbfc8424c69579a90fc8edc2b797223da2a8083a94d8dfa0e374c5ed4/zope.interface-7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a19a6cc9c6ce4b1e7e3d319a473cf0ee989cbbe2b39201d7c19e214d2dfb80c7", size = 208799, upload-time = "2024-11-28T08:49:30.616Z" }, + { url = "https://files.pythonhosted.org/packages/36/22/b1abd91854c1be03f5542fe092e6a745096d2eca7704d69432e119100583/zope.interface-7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cd1790b48c16db85d51fbbd12d20949d7339ad84fd971427cf00d990c1f137", size = 254267, upload-time = "2024-11-28T09:18:21.059Z" }, + { url = "https://files.pythonhosted.org/packages/2a/dd/fcd313ee216ad0739ae00e6126bc22a0af62a74f76a9ca668d16cd276222/zope.interface-7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52e446f9955195440e787596dccd1411f543743c359eeb26e9b2c02b077b0519", size = 248614, upload-time = "2024-11-28T08:48:41.953Z" }, + { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800, upload-time = "2024-11-28T08:48:46.637Z" }, + { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980, upload-time = "2024-11-28T08:50:35.681Z" }, ] [[package]] @@ -2010,88 +2314,89 @@ name = "zstandard" version = "0.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/55/bd0487e86679db1823fc9ee0d8c9c78ae2413d34c0b461193b5f4c31d22f/zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", size = 788701 }, - { url = "https://files.pythonhosted.org/packages/e1/8a/ccb516b684f3ad987dfee27570d635822e3038645b1a950c5e8022df1145/zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", size = 633678 }, - { url = "https://files.pythonhosted.org/packages/12/89/75e633d0611c028e0d9af6df199423bf43f54bea5007e6718ab7132e234c/zstandard-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", size = 4941098 }, - { url = "https://files.pythonhosted.org/packages/4a/7a/bd7f6a21802de358b63f1ee636ab823711c25ce043a3e9f043b4fcb5ba32/zstandard-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", size = 5308798 }, - { url = "https://files.pythonhosted.org/packages/79/3b/775f851a4a65013e88ca559c8ae42ac1352db6fcd96b028d0df4d7d1d7b4/zstandard-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", size = 5341840 }, - { url = "https://files.pythonhosted.org/packages/09/4f/0cc49570141dd72d4d95dd6fcf09328d1b702c47a6ec12fbed3b8aed18a5/zstandard-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", size = 5440337 }, - { url = "https://files.pythonhosted.org/packages/e7/7c/aaa7cd27148bae2dc095191529c0570d16058c54c4597a7d118de4b21676/zstandard-0.23.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", size = 4861182 }, - { url = "https://files.pythonhosted.org/packages/ac/eb/4b58b5c071d177f7dc027129d20bd2a44161faca6592a67f8fcb0b88b3ae/zstandard-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", size = 4932936 }, - { url = "https://files.pythonhosted.org/packages/44/f9/21a5fb9bb7c9a274b05ad700a82ad22ce82f7ef0f485980a1e98ed6e8c5f/zstandard-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", size = 5464705 }, - { url = "https://files.pythonhosted.org/packages/49/74/b7b3e61db3f88632776b78b1db597af3f44c91ce17d533e14a25ce6a2816/zstandard-0.23.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", size = 4857882 }, - { url = "https://files.pythonhosted.org/packages/4a/7f/d8eb1cb123d8e4c541d4465167080bec88481ab54cd0b31eb4013ba04b95/zstandard-0.23.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", size = 4697672 }, - { url = "https://files.pythonhosted.org/packages/5e/05/f7dccdf3d121309b60342da454d3e706453a31073e2c4dac8e1581861e44/zstandard-0.23.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", size = 5206043 }, - { url = "https://files.pythonhosted.org/packages/86/9d/3677a02e172dccd8dd3a941307621c0cbd7691d77cb435ac3c75ab6a3105/zstandard-0.23.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", size = 5667390 }, - { url = "https://files.pythonhosted.org/packages/41/7e/0012a02458e74a7ba122cd9cafe491facc602c9a17f590367da369929498/zstandard-0.23.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", size = 5198901 }, - { url = "https://files.pythonhosted.org/packages/65/3a/8f715b97bd7bcfc7342d8adcd99a026cb2fb550e44866a3b6c348e1b0f02/zstandard-0.23.0-cp310-cp310-win32.whl", hash = "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", size = 430596 }, - { url = "https://files.pythonhosted.org/packages/19/b7/b2b9eca5e5a01111e4fe8a8ffb56bdcdf56b12448a24effe6cfe4a252034/zstandard-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", size = 495498 }, - { url = "https://files.pythonhosted.org/packages/9e/40/f67e7d2c25a0e2dc1744dd781110b0b60306657f8696cafb7ad7579469bd/zstandard-0.23.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", size = 788699 }, - { url = "https://files.pythonhosted.org/packages/e8/46/66d5b55f4d737dd6ab75851b224abf0afe5774976fe511a54d2eb9063a41/zstandard-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", size = 633681 }, - { url = "https://files.pythonhosted.org/packages/63/b6/677e65c095d8e12b66b8f862b069bcf1f1d781b9c9c6f12eb55000d57583/zstandard-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", size = 4944328 }, - { url = "https://files.pythonhosted.org/packages/59/cc/e76acb4c42afa05a9d20827116d1f9287e9c32b7ad58cc3af0721ce2b481/zstandard-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", size = 5311955 }, - { url = "https://files.pythonhosted.org/packages/78/e4/644b8075f18fc7f632130c32e8f36f6dc1b93065bf2dd87f03223b187f26/zstandard-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", size = 5344944 }, - { url = "https://files.pythonhosted.org/packages/76/3f/dbafccf19cfeca25bbabf6f2dd81796b7218f768ec400f043edc767015a6/zstandard-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", size = 5442927 }, - { url = "https://files.pythonhosted.org/packages/0c/c3/d24a01a19b6733b9f218e94d1a87c477d523237e07f94899e1c10f6fd06c/zstandard-0.23.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", size = 4864910 }, - { url = "https://files.pythonhosted.org/packages/1c/a9/cf8f78ead4597264f7618d0875be01f9bc23c9d1d11afb6d225b867cb423/zstandard-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", size = 4935544 }, - { url = "https://files.pythonhosted.org/packages/2c/96/8af1e3731b67965fb995a940c04a2c20997a7b3b14826b9d1301cf160879/zstandard-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", size = 5467094 }, - { url = "https://files.pythonhosted.org/packages/ff/57/43ea9df642c636cb79f88a13ab07d92d88d3bfe3e550b55a25a07a26d878/zstandard-0.23.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", size = 4860440 }, - { url = "https://files.pythonhosted.org/packages/46/37/edb78f33c7f44f806525f27baa300341918fd4c4af9472fbc2c3094be2e8/zstandard-0.23.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", size = 4700091 }, - { url = "https://files.pythonhosted.org/packages/c1/f1/454ac3962671a754f3cb49242472df5c2cced4eb959ae203a377b45b1a3c/zstandard-0.23.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", size = 5208682 }, - { url = "https://files.pythonhosted.org/packages/85/b2/1734b0fff1634390b1b887202d557d2dd542de84a4c155c258cf75da4773/zstandard-0.23.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", size = 5669707 }, - { url = "https://files.pythonhosted.org/packages/52/5a/87d6971f0997c4b9b09c495bf92189fb63de86a83cadc4977dc19735f652/zstandard-0.23.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", size = 5201792 }, - { url = "https://files.pythonhosted.org/packages/79/02/6f6a42cc84459d399bd1a4e1adfc78d4dfe45e56d05b072008d10040e13b/zstandard-0.23.0-cp311-cp311-win32.whl", hash = "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", size = 430586 }, - { url = "https://files.pythonhosted.org/packages/be/a2/4272175d47c623ff78196f3c10e9dc7045c1b9caf3735bf041e65271eca4/zstandard-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", size = 495420 }, - { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713 }, - { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459 }, - { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707 }, - { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545 }, - { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533 }, - { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510 }, - { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973 }, - { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968 }, - { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179 }, - { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577 }, - { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899 }, - { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964 }, - { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398 }, - { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313 }, - { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877 }, - { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595 }, - { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975 }, - { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448 }, - { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269 }, - { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228 }, - { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891 }, - { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310 }, - { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912 }, - { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946 }, - { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994 }, - { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681 }, - { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239 }, - { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149 }, - { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392 }, - { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299 }, - { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862 }, - { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578 }, - { url = "https://files.pythonhosted.org/packages/fb/96/4fcafeb7e013a2386d22f974b5b97a0b9a65004ed58c87ae001599bfbd48/zstandard-0.23.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", size = 788697 }, - { url = "https://files.pythonhosted.org/packages/83/ff/a52ce725be69b86a2967ecba0497a8184540cc284c0991125515449e54e2/zstandard-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", size = 633679 }, - { url = "https://files.pythonhosted.org/packages/34/0f/3dc62db122f6a9c481c335fff6fc9f4e88d8f6e2d47321ee3937328addb4/zstandard-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", size = 4940416 }, - { url = "https://files.pythonhosted.org/packages/1d/e5/9fe0dd8c85fdc2f635e6660d07872a5dc4b366db566630161e39f9f804e1/zstandard-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", size = 5307693 }, - { url = "https://files.pythonhosted.org/packages/73/bf/fe62c0cd865c171ee8ed5bc83174b5382a2cb729c8d6162edfb99a83158b/zstandard-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", size = 5341236 }, - { url = "https://files.pythonhosted.org/packages/39/86/4fe79b30c794286110802a6cd44a73b6a314ac8196b9338c0fbd78c2407d/zstandard-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", size = 5439101 }, - { url = "https://files.pythonhosted.org/packages/72/ed/cacec235c581ebf8c608c7fb3d4b6b70d1b490d0e5128ea6996f809ecaef/zstandard-0.23.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", size = 4860320 }, - { url = "https://files.pythonhosted.org/packages/f6/1e/2c589a2930f93946b132fc852c574a19d5edc23fad2b9e566f431050c7ec/zstandard-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", size = 4931933 }, - { url = "https://files.pythonhosted.org/packages/8e/f5/30eadde3686d902b5d4692bb5f286977cbc4adc082145eb3f49d834b2eae/zstandard-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", size = 5463878 }, - { url = "https://files.pythonhosted.org/packages/e0/c8/8aed1f0ab9854ef48e5ad4431367fcb23ce73f0304f7b72335a8edc66556/zstandard-0.23.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", size = 4857192 }, - { url = "https://files.pythonhosted.org/packages/a8/c6/55e666cfbcd032b9e271865e8578fec56e5594d4faeac379d371526514f5/zstandard-0.23.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69", size = 4696513 }, - { url = "https://files.pythonhosted.org/packages/dc/bd/720b65bea63ec9de0ac7414c33b9baf271c8de8996e5ff324dc93fc90ff1/zstandard-0.23.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", size = 5204823 }, - { url = "https://files.pythonhosted.org/packages/d8/40/d678db1556e3941d330cd4e95623a63ef235b18547da98fa184cbc028ecf/zstandard-0.23.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", size = 5666490 }, - { url = "https://files.pythonhosted.org/packages/ed/cc/c89329723d7515898a1fc7ef5d251264078548c505719d13e9511800a103/zstandard-0.23.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", size = 5196622 }, - { url = "https://files.pythonhosted.org/packages/78/4c/634289d41e094327a94500dfc919e58841b10ea3a9efdfafbac614797ec2/zstandard-0.23.0-cp39-cp39-win32.whl", hash = "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", size = 430620 }, - { url = "https://files.pythonhosted.org/packages/a2/e2/0b0c5a0f4f7699fecd92c1ba6278ef9b01f2b0b0dd46f62bfc6729c05659/zstandard-0.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", size = 495528 }, + { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*' and platform_python_implementation == 'PyPy'" }, + { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*' and platform_python_implementation == 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/55/bd0487e86679db1823fc9ee0d8c9c78ae2413d34c0b461193b5f4c31d22f/zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", size = 788701, upload-time = "2024-07-15T00:13:27.351Z" }, + { url = "https://files.pythonhosted.org/packages/e1/8a/ccb516b684f3ad987dfee27570d635822e3038645b1a950c5e8022df1145/zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", size = 633678, upload-time = "2024-07-15T00:13:30.24Z" }, + { url = "https://files.pythonhosted.org/packages/12/89/75e633d0611c028e0d9af6df199423bf43f54bea5007e6718ab7132e234c/zstandard-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", size = 4941098, upload-time = "2024-07-15T00:13:32.526Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7a/bd7f6a21802de358b63f1ee636ab823711c25ce043a3e9f043b4fcb5ba32/zstandard-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", size = 5308798, upload-time = "2024-07-15T00:13:34.925Z" }, + { url = "https://files.pythonhosted.org/packages/79/3b/775f851a4a65013e88ca559c8ae42ac1352db6fcd96b028d0df4d7d1d7b4/zstandard-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", size = 5341840, upload-time = "2024-07-15T00:13:37.376Z" }, + { url = "https://files.pythonhosted.org/packages/09/4f/0cc49570141dd72d4d95dd6fcf09328d1b702c47a6ec12fbed3b8aed18a5/zstandard-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", size = 5440337, upload-time = "2024-07-15T00:13:39.772Z" }, + { url = "https://files.pythonhosted.org/packages/e7/7c/aaa7cd27148bae2dc095191529c0570d16058c54c4597a7d118de4b21676/zstandard-0.23.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", size = 4861182, upload-time = "2024-07-15T00:13:42.495Z" }, + { url = "https://files.pythonhosted.org/packages/ac/eb/4b58b5c071d177f7dc027129d20bd2a44161faca6592a67f8fcb0b88b3ae/zstandard-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", size = 4932936, upload-time = "2024-07-15T00:13:44.234Z" }, + { url = "https://files.pythonhosted.org/packages/44/f9/21a5fb9bb7c9a274b05ad700a82ad22ce82f7ef0f485980a1e98ed6e8c5f/zstandard-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", size = 5464705, upload-time = "2024-07-15T00:13:46.822Z" }, + { url = "https://files.pythonhosted.org/packages/49/74/b7b3e61db3f88632776b78b1db597af3f44c91ce17d533e14a25ce6a2816/zstandard-0.23.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", size = 4857882, upload-time = "2024-07-15T00:13:49.297Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7f/d8eb1cb123d8e4c541d4465167080bec88481ab54cd0b31eb4013ba04b95/zstandard-0.23.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", size = 4697672, upload-time = "2024-07-15T00:13:51.447Z" }, + { url = "https://files.pythonhosted.org/packages/5e/05/f7dccdf3d121309b60342da454d3e706453a31073e2c4dac8e1581861e44/zstandard-0.23.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", size = 5206043, upload-time = "2024-07-15T00:13:53.587Z" }, + { url = "https://files.pythonhosted.org/packages/86/9d/3677a02e172dccd8dd3a941307621c0cbd7691d77cb435ac3c75ab6a3105/zstandard-0.23.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", size = 5667390, upload-time = "2024-07-15T00:13:56.137Z" }, + { url = "https://files.pythonhosted.org/packages/41/7e/0012a02458e74a7ba122cd9cafe491facc602c9a17f590367da369929498/zstandard-0.23.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", size = 5198901, upload-time = "2024-07-15T00:13:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/65/3a/8f715b97bd7bcfc7342d8adcd99a026cb2fb550e44866a3b6c348e1b0f02/zstandard-0.23.0-cp310-cp310-win32.whl", hash = "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", size = 430596, upload-time = "2024-07-15T00:14:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/19/b7/b2b9eca5e5a01111e4fe8a8ffb56bdcdf56b12448a24effe6cfe4a252034/zstandard-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", size = 495498, upload-time = "2024-07-15T00:14:02.741Z" }, + { url = "https://files.pythonhosted.org/packages/9e/40/f67e7d2c25a0e2dc1744dd781110b0b60306657f8696cafb7ad7579469bd/zstandard-0.23.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", size = 788699, upload-time = "2024-07-15T00:14:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/e8/46/66d5b55f4d737dd6ab75851b224abf0afe5774976fe511a54d2eb9063a41/zstandard-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", size = 633681, upload-time = "2024-07-15T00:14:13.99Z" }, + { url = "https://files.pythonhosted.org/packages/63/b6/677e65c095d8e12b66b8f862b069bcf1f1d781b9c9c6f12eb55000d57583/zstandard-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", size = 4944328, upload-time = "2024-07-15T00:14:16.588Z" }, + { url = "https://files.pythonhosted.org/packages/59/cc/e76acb4c42afa05a9d20827116d1f9287e9c32b7ad58cc3af0721ce2b481/zstandard-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", size = 5311955, upload-time = "2024-07-15T00:14:19.389Z" }, + { url = "https://files.pythonhosted.org/packages/78/e4/644b8075f18fc7f632130c32e8f36f6dc1b93065bf2dd87f03223b187f26/zstandard-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", size = 5344944, upload-time = "2024-07-15T00:14:22.173Z" }, + { url = "https://files.pythonhosted.org/packages/76/3f/dbafccf19cfeca25bbabf6f2dd81796b7218f768ec400f043edc767015a6/zstandard-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", size = 5442927, upload-time = "2024-07-15T00:14:24.825Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c3/d24a01a19b6733b9f218e94d1a87c477d523237e07f94899e1c10f6fd06c/zstandard-0.23.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", size = 4864910, upload-time = "2024-07-15T00:14:26.982Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a9/cf8f78ead4597264f7618d0875be01f9bc23c9d1d11afb6d225b867cb423/zstandard-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", size = 4935544, upload-time = "2024-07-15T00:14:29.582Z" }, + { url = "https://files.pythonhosted.org/packages/2c/96/8af1e3731b67965fb995a940c04a2c20997a7b3b14826b9d1301cf160879/zstandard-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", size = 5467094, upload-time = "2024-07-15T00:14:40.126Z" }, + { url = "https://files.pythonhosted.org/packages/ff/57/43ea9df642c636cb79f88a13ab07d92d88d3bfe3e550b55a25a07a26d878/zstandard-0.23.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", size = 4860440, upload-time = "2024-07-15T00:14:42.786Z" }, + { url = "https://files.pythonhosted.org/packages/46/37/edb78f33c7f44f806525f27baa300341918fd4c4af9472fbc2c3094be2e8/zstandard-0.23.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", size = 4700091, upload-time = "2024-07-15T00:14:45.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/f1/454ac3962671a754f3cb49242472df5c2cced4eb959ae203a377b45b1a3c/zstandard-0.23.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", size = 5208682, upload-time = "2024-07-15T00:14:47.407Z" }, + { url = "https://files.pythonhosted.org/packages/85/b2/1734b0fff1634390b1b887202d557d2dd542de84a4c155c258cf75da4773/zstandard-0.23.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", size = 5669707, upload-time = "2024-07-15T00:15:03.529Z" }, + { url = "https://files.pythonhosted.org/packages/52/5a/87d6971f0997c4b9b09c495bf92189fb63de86a83cadc4977dc19735f652/zstandard-0.23.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", size = 5201792, upload-time = "2024-07-15T00:15:28.372Z" }, + { url = "https://files.pythonhosted.org/packages/79/02/6f6a42cc84459d399bd1a4e1adfc78d4dfe45e56d05b072008d10040e13b/zstandard-0.23.0-cp311-cp311-win32.whl", hash = "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", size = 430586, upload-time = "2024-07-15T00:15:32.26Z" }, + { url = "https://files.pythonhosted.org/packages/be/a2/4272175d47c623ff78196f3c10e9dc7045c1b9caf3735bf041e65271eca4/zstandard-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", size = 495420, upload-time = "2024-07-15T00:15:34.004Z" }, + { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713, upload-time = "2024-07-15T00:15:35.815Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459, upload-time = "2024-07-15T00:15:37.995Z" }, + { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707, upload-time = "2024-07-15T00:15:39.872Z" }, + { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545, upload-time = "2024-07-15T00:15:41.75Z" }, + { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533, upload-time = "2024-07-15T00:15:44.114Z" }, + { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510, upload-time = "2024-07-15T00:15:46.509Z" }, + { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973, upload-time = "2024-07-15T00:15:49.939Z" }, + { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968, upload-time = "2024-07-15T00:15:52.025Z" }, + { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179, upload-time = "2024-07-15T00:15:54.971Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577, upload-time = "2024-07-15T00:15:57.634Z" }, + { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899, upload-time = "2024-07-15T00:16:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964, upload-time = "2024-07-15T00:16:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398, upload-time = "2024-07-15T00:16:06.694Z" }, + { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313, upload-time = "2024-07-15T00:16:09.758Z" }, + { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877, upload-time = "2024-07-15T00:16:11.758Z" }, + { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595, upload-time = "2024-07-15T00:16:13.731Z" }, + { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975, upload-time = "2024-07-15T00:16:16.005Z" }, + { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448, upload-time = "2024-07-15T00:16:17.897Z" }, + { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269, upload-time = "2024-07-15T00:16:20.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228, upload-time = "2024-07-15T00:16:23.398Z" }, + { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891, upload-time = "2024-07-15T00:16:26.391Z" }, + { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310, upload-time = "2024-07-15T00:16:29.018Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912, upload-time = "2024-07-15T00:16:31.871Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946, upload-time = "2024-07-15T00:16:34.593Z" }, + { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994, upload-time = "2024-07-15T00:16:36.887Z" }, + { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681, upload-time = "2024-07-15T00:16:39.709Z" }, + { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239, upload-time = "2024-07-15T00:16:41.83Z" }, + { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149, upload-time = "2024-07-15T00:16:44.287Z" }, + { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392, upload-time = "2024-07-15T00:16:46.423Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299, upload-time = "2024-07-15T00:16:49.053Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862, upload-time = "2024-07-15T00:16:51.003Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578, upload-time = "2024-07-15T00:16:53.135Z" }, + { url = "https://files.pythonhosted.org/packages/fb/96/4fcafeb7e013a2386d22f974b5b97a0b9a65004ed58c87ae001599bfbd48/zstandard-0.23.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", size = 788697, upload-time = "2024-07-15T00:17:31.236Z" }, + { url = "https://files.pythonhosted.org/packages/83/ff/a52ce725be69b86a2967ecba0497a8184540cc284c0991125515449e54e2/zstandard-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", size = 633679, upload-time = "2024-07-15T00:17:32.911Z" }, + { url = "https://files.pythonhosted.org/packages/34/0f/3dc62db122f6a9c481c335fff6fc9f4e88d8f6e2d47321ee3937328addb4/zstandard-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", size = 4940416, upload-time = "2024-07-15T00:17:34.849Z" }, + { url = "https://files.pythonhosted.org/packages/1d/e5/9fe0dd8c85fdc2f635e6660d07872a5dc4b366db566630161e39f9f804e1/zstandard-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", size = 5307693, upload-time = "2024-07-15T00:17:37.355Z" }, + { url = "https://files.pythonhosted.org/packages/73/bf/fe62c0cd865c171ee8ed5bc83174b5382a2cb729c8d6162edfb99a83158b/zstandard-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", size = 5341236, upload-time = "2024-07-15T00:17:40.213Z" }, + { url = "https://files.pythonhosted.org/packages/39/86/4fe79b30c794286110802a6cd44a73b6a314ac8196b9338c0fbd78c2407d/zstandard-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", size = 5439101, upload-time = "2024-07-15T00:17:42.284Z" }, + { url = "https://files.pythonhosted.org/packages/72/ed/cacec235c581ebf8c608c7fb3d4b6b70d1b490d0e5128ea6996f809ecaef/zstandard-0.23.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", size = 4860320, upload-time = "2024-07-15T00:17:44.21Z" }, + { url = "https://files.pythonhosted.org/packages/f6/1e/2c589a2930f93946b132fc852c574a19d5edc23fad2b9e566f431050c7ec/zstandard-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", size = 4931933, upload-time = "2024-07-15T00:17:46.455Z" }, + { url = "https://files.pythonhosted.org/packages/8e/f5/30eadde3686d902b5d4692bb5f286977cbc4adc082145eb3f49d834b2eae/zstandard-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", size = 5463878, upload-time = "2024-07-15T00:17:48.866Z" }, + { url = "https://files.pythonhosted.org/packages/e0/c8/8aed1f0ab9854ef48e5ad4431367fcb23ce73f0304f7b72335a8edc66556/zstandard-0.23.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", size = 4857192, upload-time = "2024-07-15T00:17:51.558Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/55e666cfbcd032b9e271865e8578fec56e5594d4faeac379d371526514f5/zstandard-0.23.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69", size = 4696513, upload-time = "2024-07-15T00:17:53.924Z" }, + { url = "https://files.pythonhosted.org/packages/dc/bd/720b65bea63ec9de0ac7414c33b9baf271c8de8996e5ff324dc93fc90ff1/zstandard-0.23.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", size = 5204823, upload-time = "2024-07-15T00:17:55.948Z" }, + { url = "https://files.pythonhosted.org/packages/d8/40/d678db1556e3941d330cd4e95623a63ef235b18547da98fa184cbc028ecf/zstandard-0.23.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", size = 5666490, upload-time = "2024-07-15T00:17:58.327Z" }, + { url = "https://files.pythonhosted.org/packages/ed/cc/c89329723d7515898a1fc7ef5d251264078548c505719d13e9511800a103/zstandard-0.23.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", size = 5196622, upload-time = "2024-07-15T00:18:00.404Z" }, + { url = "https://files.pythonhosted.org/packages/78/4c/634289d41e094327a94500dfc919e58841b10ea3a9efdfafbac614797ec2/zstandard-0.23.0-cp39-cp39-win32.whl", hash = "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", size = 430620, upload-time = "2024-07-15T00:18:02.613Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e2/0b0c5a0f4f7699fecd92c1ba6278ef9b01f2b0b0dd46f62bfc6729c05659/zstandard-0.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", size = 495528, upload-time = "2024-07-15T00:18:04.452Z" }, ] From bbb6f88fae5593ab14c36b9578b07d2e0f0fce62 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 6 Aug 2025 14:21:53 -0400 Subject: [PATCH 2008/2111] PYTHON-5257 - Turn on mypy disallow_any_generics (#2456) --- bson/json_util.py | 30 ++++++------- bson/typings.py | 2 +- gridfs/asynchronous/grid_file.py | 24 +++++----- gridfs/synchronous/grid_file.py | 24 +++++----- pymongo/_asyncio_lock.py | 6 +-- pymongo/_asyncio_task.py | 4 +- pymongo/_csot.py | 2 +- pymongo/asynchronous/aggregation.py | 20 ++++----- pymongo/asynchronous/auth_oidc.py | 2 +- pymongo/asynchronous/bulk.py | 8 ++-- pymongo/asynchronous/change_stream.py | 8 ++-- pymongo/asynchronous/client_bulk.py | 6 +-- pymongo/asynchronous/client_session.py | 14 +++--- pymongo/asynchronous/collection.py | 14 +++--- pymongo/asynchronous/command_cursor.py | 4 +- pymongo/asynchronous/cursor.py | 34 +++++++------- pymongo/asynchronous/database.py | 2 +- pymongo/asynchronous/mongo_client.py | 31 +++++++------ pymongo/asynchronous/monitor.py | 2 +- pymongo/asynchronous/network.py | 2 +- pymongo/asynchronous/pool.py | 32 ++++++------- pymongo/asynchronous/server.py | 4 +- pymongo/asynchronous/topology.py | 4 +- pymongo/client_options.py | 2 +- pymongo/common.py | 20 ++++----- pymongo/encryption_options.py | 4 +- pymongo/helpers_shared.py | 8 ++-- pymongo/message.py | 62 +++++++++++++------------- pymongo/monitoring.py | 10 +++-- pymongo/network_layer.py | 10 ++--- pymongo/periodic_executor.py | 2 +- pymongo/server_description.py | 4 +- pymongo/ssl_support.py | 19 +++++--- pymongo/synchronous/aggregation.py | 20 ++++----- pymongo/synchronous/auth_oidc.py | 2 +- pymongo/synchronous/bulk.py | 8 ++-- pymongo/synchronous/change_stream.py | 8 ++-- pymongo/synchronous/client_bulk.py | 6 +-- pymongo/synchronous/client_session.py | 14 +++--- pymongo/synchronous/collection.py | 14 +++--- pymongo/synchronous/command_cursor.py | 4 +- pymongo/synchronous/cursor.py | 34 +++++++------- pymongo/synchronous/database.py | 2 +- pymongo/synchronous/mongo_client.py | 33 ++++++++------ pymongo/synchronous/monitor.py | 2 +- pymongo/synchronous/network.py | 2 +- pymongo/synchronous/pool.py | 32 ++++++------- pymongo/synchronous/server.py | 4 +- pymongo/synchronous/topology.py | 4 +- pymongo/topology_description.py | 4 +- pymongo/typings.py | 2 +- pyproject.toml | 5 ++- 52 files changed, 323 insertions(+), 297 deletions(-) diff --git a/bson/json_util.py b/bson/json_util.py index ecae103b55..1a3b0bd833 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -844,7 +844,7 @@ def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: return {"$binary": {"base64": base64.b64encode(data).decode(), "subType": "%02x" % subtype}} -def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: +def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: # type: ignore[type-arg] if ( json_options.datetime_representation == DatetimeRepresentation.ISO8601 and 0 <= int(obj) <= _MAX_UTC_MS @@ -855,7 +855,7 @@ def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: return {"$date": {"$numberLong": str(int(obj))}} -def _encode_code(obj: Code, json_options: JSONOptions) -> dict: +def _encode_code(obj: Code, json_options: JSONOptions) -> dict: # type: ignore[type-arg] if obj.scope is None: return {"$code": str(obj)} else: @@ -873,7 +873,7 @@ def _encode_noop(obj: Any, dummy0: Any) -> Any: return obj -def _encode_regex(obj: Any, json_options: JSONOptions) -> dict: +def _encode_regex(obj: Any, json_options: JSONOptions) -> dict: # type: ignore[type-arg] flags = "" if obj.flags & re.IGNORECASE: flags += "i" @@ -918,7 +918,7 @@ def _encode_float(obj: float, json_options: JSONOptions) -> Any: return obj -def _encode_datetime(obj: datetime.datetime, json_options: JSONOptions) -> dict: +def _encode_datetime(obj: datetime.datetime, json_options: JSONOptions) -> dict: # type: ignore[type-arg] if json_options.datetime_representation == DatetimeRepresentation.ISO8601: if not obj.tzinfo: obj = obj.replace(tzinfo=utc) @@ -941,15 +941,15 @@ def _encode_datetime(obj: datetime.datetime, json_options: JSONOptions) -> dict: return {"$date": {"$numberLong": str(millis)}} -def _encode_bytes(obj: bytes, json_options: JSONOptions) -> dict: +def _encode_bytes(obj: bytes, json_options: JSONOptions) -> dict: # type: ignore[type-arg] return _encode_binary(obj, 0, json_options) -def _encode_binary_obj(obj: Binary, json_options: JSONOptions) -> dict: +def _encode_binary_obj(obj: Binary, json_options: JSONOptions) -> dict: # type: ignore[type-arg] return _encode_binary(obj, obj.subtype, json_options) -def _encode_uuid(obj: uuid.UUID, json_options: JSONOptions) -> dict: +def _encode_uuid(obj: uuid.UUID, json_options: JSONOptions) -> dict: # type: ignore[type-arg] if json_options.strict_uuid: binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) return _encode_binary(binval, binval.subtype, json_options) @@ -957,27 +957,27 @@ def _encode_uuid(obj: uuid.UUID, json_options: JSONOptions) -> dict: return {"$uuid": obj.hex} -def _encode_objectid(obj: ObjectId, dummy0: Any) -> dict: +def _encode_objectid(obj: ObjectId, dummy0: Any) -> dict: # type: ignore[type-arg] return {"$oid": str(obj)} -def _encode_timestamp(obj: Timestamp, dummy0: Any) -> dict: +def _encode_timestamp(obj: Timestamp, dummy0: Any) -> dict: # type: ignore[type-arg] return {"$timestamp": {"t": obj.time, "i": obj.inc}} -def _encode_decimal128(obj: Timestamp, dummy0: Any) -> dict: +def _encode_decimal128(obj: Timestamp, dummy0: Any) -> dict: # type: ignore[type-arg] return {"$numberDecimal": str(obj)} -def _encode_dbref(obj: DBRef, json_options: JSONOptions) -> dict: +def _encode_dbref(obj: DBRef, json_options: JSONOptions) -> dict: # type: ignore[type-arg] return _json_convert(obj.as_doc(), json_options=json_options) -def _encode_minkey(dummy0: Any, dummy1: Any) -> dict: +def _encode_minkey(dummy0: Any, dummy1: Any) -> dict: # type: ignore[type-arg] return {"$minKey": 1} -def _encode_maxkey(dummy0: Any, dummy1: Any) -> dict: +def _encode_maxkey(dummy0: Any, dummy1: Any) -> dict: # type: ignore[type-arg] return {"$maxKey": 1} @@ -985,7 +985,7 @@ def _encode_maxkey(dummy0: Any, dummy1: Any) -> dict: # Each encoder function's signature is: # - obj: a Python data type, e.g. a Python int for _encode_int # - json_options: a JSONOptions -_ENCODERS: dict[Type, Callable[[Any, JSONOptions], Any]] = { +_ENCODERS: dict[Type, Callable[[Any, JSONOptions], Any]] = { # type: ignore[type-arg] bool: _encode_noop, bytes: _encode_bytes, datetime.datetime: _encode_datetime, @@ -1056,7 +1056,7 @@ def _get_datetime_size(obj: datetime.datetime) -> int: return 5 + len(str(obj.time())) -def _get_regex_size(obj: Regex) -> int: +def _get_regex_size(obj: Regex) -> int: # type: ignore[type-arg] return 18 + len(obj.pattern) diff --git a/bson/typings.py b/bson/typings.py index b80c661454..55e90b19a5 100644 --- a/bson/typings.py +++ b/bson/typings.py @@ -28,4 +28,4 @@ _DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) _DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) -_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] +_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] # type: ignore[type-arg] diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index 3c7d4ef0e9..e512f796a8 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -70,7 +70,7 @@ def _disallow_transactions(session: Optional[AsyncClientSession]) -> None: class AsyncGridFS: """An instance of GridFS on top of a single Database.""" - def __init__(self, database: AsyncDatabase, collection: str = "fs"): + def __init__(self, database: AsyncDatabase[Any], collection: str = "fs"): """Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of @@ -463,7 +463,7 @@ class AsyncGridFSBucket: def __init__( self, - db: AsyncDatabase, + db: AsyncDatabase[Any], bucket_name: str = "fs", chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, write_concern: Optional[WriteConcern] = None, @@ -513,11 +513,11 @@ def __init__( self._bucket_name = bucket_name self._collection = db[bucket_name] - self._chunks: AsyncCollection = self._collection.chunks.with_options( + self._chunks: AsyncCollection[Any] = self._collection.chunks.with_options( write_concern=write_concern, read_preference=read_preference ) - self._files: AsyncCollection = self._collection.files.with_options( + self._files: AsyncCollection[Any] = self._collection.files.with_options( write_concern=write_concern, read_preference=read_preference ) @@ -1085,7 +1085,7 @@ class AsyncGridIn: def __init__( self, - root_collection: AsyncCollection, + root_collection: AsyncCollection[Any], session: Optional[AsyncClientSession] = None, **kwargs: Any, ) -> None: @@ -1172,7 +1172,7 @@ def __init__( object.__setattr__(self, "_buffered_docs_size", 0) async def _create_index( - self, collection: AsyncCollection, index_key: Any, unique: bool + self, collection: AsyncCollection[Any], index_key: Any, unique: bool ) -> None: doc = await collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: @@ -1456,7 +1456,7 @@ class AsyncGridOut(GRIDOUT_BASE_CLASS): # type: ignore def __init__( self, - root_collection: AsyncCollection, + root_collection: AsyncCollection[Any], file_id: Optional[int] = None, file_document: Optional[Any] = None, session: Optional[AsyncClientSession] = None, @@ -1829,7 +1829,7 @@ class _AsyncGridOutChunkIterator: def __init__( self, grid_out: AsyncGridOut, - chunks: AsyncCollection, + chunks: AsyncCollection[Any], session: Optional[AsyncClientSession], next_chunk: Any, ) -> None: @@ -1842,7 +1842,7 @@ def __init__( self._num_chunks = math.ceil(float(self._length) / self._chunk_size) self._cursor = None - _cursor: Optional[AsyncCursor] + _cursor: Optional[AsyncCursor[Any]] def expected_chunk_length(self, chunk_n: int) -> int: if chunk_n < self._num_chunks - 1: @@ -1921,7 +1921,7 @@ async def close(self) -> None: class AsyncGridOutIterator: def __init__( - self, grid_out: AsyncGridOut, chunks: AsyncCollection, session: AsyncClientSession + self, grid_out: AsyncGridOut, chunks: AsyncCollection[Any], session: AsyncClientSession ): self._chunk_iter = _AsyncGridOutChunkIterator(grid_out, chunks, session, 0) @@ -1935,14 +1935,14 @@ async def next(self) -> bytes: __anext__ = next -class AsyncGridOutCursor(AsyncCursor): +class AsyncGridOutCursor(AsyncCursor): # type: ignore[type-arg] """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ def __init__( self, - collection: AsyncCollection, + collection: AsyncCollection[Any], filter: Optional[Mapping[str, Any]] = None, skip: int = 0, limit: int = 0, diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index d0a4c7fc7f..70a4f80774 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -70,7 +70,7 @@ def _disallow_transactions(session: Optional[ClientSession]) -> None: class GridFS: """An instance of GridFS on top of a single Database.""" - def __init__(self, database: Database, collection: str = "fs"): + def __init__(self, database: Database[Any], collection: str = "fs"): """Create a new instance of :class:`GridFS`. Raises :class:`TypeError` if `database` is not an instance of @@ -461,7 +461,7 @@ class GridFSBucket: def __init__( self, - db: Database, + db: Database[Any], bucket_name: str = "fs", chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, write_concern: Optional[WriteConcern] = None, @@ -511,11 +511,11 @@ def __init__( self._bucket_name = bucket_name self._collection = db[bucket_name] - self._chunks: Collection = self._collection.chunks.with_options( + self._chunks: Collection[Any] = self._collection.chunks.with_options( write_concern=write_concern, read_preference=read_preference ) - self._files: Collection = self._collection.files.with_options( + self._files: Collection[Any] = self._collection.files.with_options( write_concern=write_concern, read_preference=read_preference ) @@ -1077,7 +1077,7 @@ class GridIn: def __init__( self, - root_collection: Collection, + root_collection: Collection[Any], session: Optional[ClientSession] = None, **kwargs: Any, ) -> None: @@ -1163,7 +1163,7 @@ def __init__( object.__setattr__(self, "_buffered_docs", []) object.__setattr__(self, "_buffered_docs_size", 0) - def _create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: + def _create_index(self, collection: Collection[Any], index_key: Any, unique: bool) -> None: doc = collection.find_one(projection={"_id": 1}, session=self._session) if doc is None: try: @@ -1444,7 +1444,7 @@ class GridOut(GRIDOUT_BASE_CLASS): # type: ignore def __init__( self, - root_collection: Collection, + root_collection: Collection[Any], file_id: Optional[int] = None, file_document: Optional[Any] = None, session: Optional[ClientSession] = None, @@ -1817,7 +1817,7 @@ class GridOutChunkIterator: def __init__( self, grid_out: GridOut, - chunks: Collection, + chunks: Collection[Any], session: Optional[ClientSession], next_chunk: Any, ) -> None: @@ -1830,7 +1830,7 @@ def __init__( self._num_chunks = math.ceil(float(self._length) / self._chunk_size) self._cursor = None - _cursor: Optional[Cursor] + _cursor: Optional[Cursor[Any]] def expected_chunk_length(self, chunk_n: int) -> int: if chunk_n < self._num_chunks - 1: @@ -1908,7 +1908,7 @@ def close(self) -> None: class GridOutIterator: - def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): + def __init__(self, grid_out: GridOut, chunks: Collection[Any], session: ClientSession): self._chunk_iter = GridOutChunkIterator(grid_out, chunks, session, 0) def __iter__(self) -> GridOutIterator: @@ -1921,14 +1921,14 @@ def next(self) -> bytes: __next__ = next -class GridOutCursor(Cursor): +class GridOutCursor(Cursor): # type: ignore[type-arg] """A cursor / iterator for returning GridOut objects as the result of an arbitrary query against the GridFS files collection. """ def __init__( self, - collection: Collection, + collection: Collection[Any], filter: Optional[Mapping[str, Any]] = None, skip: int = 0, limit: int = 0, diff --git a/pymongo/_asyncio_lock.py b/pymongo/_asyncio_lock.py index a9c409d486..5ca09982fa 100644 --- a/pymongo/_asyncio_lock.py +++ b/pymongo/_asyncio_lock.py @@ -93,7 +93,7 @@ class Lock(_ContextManagerMixin, _LoopBoundMixin): """ def __init__(self) -> None: - self._waiters: Optional[collections.deque] = None + self._waiters: Optional[collections.deque[Any]] = None self._locked = False def __repr__(self) -> str: @@ -196,7 +196,7 @@ def __init__(self, lock: Optional[Lock] = None) -> None: self.acquire = lock.acquire self.release = lock.release - self._waiters: collections.deque = collections.deque() + self._waiters: collections.deque[Any] = collections.deque() def __repr__(self) -> str: res = super().__repr__() @@ -260,7 +260,7 @@ async def wait(self) -> bool: self._notify(1) raise - async def wait_for(self, predicate: Any) -> Coroutine: + async def wait_for(self, predicate: Any) -> Coroutine[Any, Any, Any]: """Wait until a predicate becomes true. The predicate should be a callable whose result will be diff --git a/pymongo/_asyncio_task.py b/pymongo/_asyncio_task.py index 7a528f027d..118471963a 100644 --- a/pymongo/_asyncio_task.py +++ b/pymongo/_asyncio_task.py @@ -24,7 +24,7 @@ # TODO (https://jira.mongodb.org/browse/PYTHON-4981): Revisit once the underlying cause of the swallowed cancellations is uncovered -class _Task(asyncio.Task): +class _Task(asyncio.Task[Any]): def __init__(self, coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> None: super().__init__(coro, name=name) self._cancel_requests = 0 @@ -43,7 +43,7 @@ def cancelling(self) -> int: return self._cancel_requests -def create_task(coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> asyncio.Task: +def create_task(coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> asyncio.Task[Any]: if sys.version_info >= (3, 11): return asyncio.create_task(coro, name=name) return _Task(coro, name=name) diff --git a/pymongo/_csot.py b/pymongo/_csot.py index c5681e345a..ce72a66486 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -68,7 +68,7 @@ def clamp_remaining(max_timeout: float) -> float: return min(timeout, max_timeout) -class _TimeoutContext(AbstractContextManager): +class _TimeoutContext(AbstractContextManager[Any]): """Internal timeout context manager. Use :func:`pymongo.timeout` instead:: diff --git a/pymongo/asynchronous/aggregation.py b/pymongo/asynchronous/aggregation.py index daccd1bcb0..059d698772 100644 --- a/pymongo/asynchronous/aggregation.py +++ b/pymongo/asynchronous/aggregation.py @@ -46,8 +46,8 @@ class _AggregationCommand: def __init__( self, - target: Union[AsyncDatabase, AsyncCollection], - cursor_class: type[AsyncCommandCursor], + target: Union[AsyncDatabase[Any], AsyncCollection[Any]], + cursor_class: type[AsyncCommandCursor[Any]], pipeline: _Pipeline, options: MutableMapping[str, Any], explicit_session: bool, @@ -111,12 +111,12 @@ def _cursor_namespace(self) -> str: """The namespace in which the aggregate command is run.""" raise NotImplementedError - def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> AsyncCollection: + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> AsyncCollection[Any]: """The AsyncCollection used for the aggregate command cursor.""" raise NotImplementedError @property - def _database(self) -> AsyncDatabase: + def _database(self) -> AsyncDatabase[Any]: """The database against which the aggregation command is run.""" raise NotImplementedError @@ -205,7 +205,7 @@ async def get_cursor( class _CollectionAggregationCommand(_AggregationCommand): - _target: AsyncCollection + _target: AsyncCollection[Any] @property def _aggregation_target(self) -> str: @@ -215,12 +215,12 @@ def _aggregation_target(self) -> str: def _cursor_namespace(self) -> str: return self._target.full_name - def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection: + def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection[Any]: """The AsyncCollection used for the aggregate command cursor.""" return self._target @property - def _database(self) -> AsyncDatabase: + def _database(self) -> AsyncDatabase[Any]: return self._target.database @@ -234,7 +234,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class _DatabaseAggregationCommand(_AggregationCommand): - _target: AsyncDatabase + _target: AsyncDatabase[Any] @property def _aggregation_target(self) -> int: @@ -245,10 +245,10 @@ def _cursor_namespace(self) -> str: return f"{self._target.name}.$cmd.aggregate" @property - def _database(self) -> AsyncDatabase: + def _database(self) -> AsyncDatabase[Any]: return self._target - def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection: + def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection[Any]: """The AsyncCollection used for the aggregate command cursor.""" # AsyncCollection level aggregate may not always return the "ns" field # according to our MockupDB tests. Let's handle that case for db level diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py index 20b8340060..f8f046bd94 100644 --- a/pymongo/asynchronous/auth_oidc.py +++ b/pymongo/asynchronous/auth_oidc.py @@ -259,7 +259,7 @@ async def _sasl_continue_jwt( ) -> Mapping[str, Any]: self.access_token = None self.refresh_token = None - start_payload: dict = bson.decode(start_resp["payload"]) + start_payload: dict[str, Any] = bson.decode(start_resp["payload"]) if "issuer" in start_payload: self.idp_info = OIDCIdPInfo(**start_payload) access_token = await self._get_access_token() diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py index ac514db98f..4a54f9eb3f 100644 --- a/pymongo/asynchronous/bulk.py +++ b/pymongo/asynchronous/bulk.py @@ -248,7 +248,7 @@ async def write_command( request_id: int, msg: bytes, docs: list[Mapping[str, Any]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> dict[str, Any]: """A proxy for SocketInfo.write_command that handles event publishing.""" cmd[bwc.field] = docs @@ -334,7 +334,7 @@ async def unack_write( msg: bytes, max_doc_size: int, docs: list[Mapping[str, Any]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> Optional[Mapping[str, Any]]: """A proxy for AsyncConnection.unack_write that handles event publishing.""" if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): @@ -419,7 +419,7 @@ async def _execute_batch_unack( bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], cmd: dict[str, Any], ops: list[Mapping[str, Any]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> list[Mapping[str, Any]]: if self.is_encrypted: _, batched_cmd, to_send = bwc.batch_command(cmd, ops) @@ -446,7 +446,7 @@ async def _execute_batch( bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], cmd: dict[str, Any], ops: list[Mapping[str, Any]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: if self.is_encrypted: _, batched_cmd, to_send = bwc.batch_command(cmd, ops) diff --git a/pymongo/asynchronous/change_stream.py b/pymongo/asynchronous/change_stream.py index 6c37f9d05f..3940111df2 100644 --- a/pymongo/asynchronous/change_stream.py +++ b/pymongo/asynchronous/change_stream.py @@ -164,7 +164,7 @@ def _aggregation_command_class(self) -> Type[_AggregationCommand]: raise NotImplementedError @property - def _client(self) -> AsyncMongoClient: + def _client(self) -> AsyncMongoClient: # type: ignore[type-arg] """The client against which the aggregation commands for this AsyncChangeStream will be run. """ @@ -206,7 +206,7 @@ def _command_options(self) -> dict[str, Any]: def _aggregation_pipeline(self) -> list[dict[str, Any]]: """Return the full aggregation pipeline for this AsyncChangeStream.""" options = self._change_stream_options() - full_pipeline: list = [{"$changeStream": options}] + full_pipeline: list[dict[str, Any]] = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline @@ -237,7 +237,7 @@ def _process_result(self, result: Mapping[str, Any], conn: AsyncConnection) -> N async def _run_aggregation_cmd( self, session: Optional[AsyncClientSession], explicit_session: bool - ) -> AsyncCommandCursor: + ) -> AsyncCommandCursor: # type: ignore[type-arg] """Run the full aggregation pipeline for this AsyncChangeStream and return the corresponding AsyncCommandCursor. """ @@ -257,7 +257,7 @@ async def _run_aggregation_cmd( operation=_Op.AGGREGATE, ) - async def _create_cursor(self) -> AsyncCommandCursor: + async def _create_cursor(self) -> AsyncCommandCursor: # type: ignore[type-arg] async with self._client._tmp_session(self._session, close=False) as s: return await self._run_aggregation_cmd( session=s, explicit_session=self._session is not None diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 5f7ac013e9..45812b3400 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -88,7 +88,7 @@ class _AsyncClientBulk: def __init__( self, - client: AsyncMongoClient, + client: AsyncMongoClient[Any], write_concern: WriteConcern, ordered: bool = True, bypass_document_validation: Optional[bool] = None, @@ -233,7 +233,7 @@ async def write_command( msg: Union[bytes, dict[str, Any]], op_docs: list[Mapping[str, Any]], ns_docs: list[Mapping[str, Any]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> dict[str, Any]: """A proxy for AsyncConnection.write_command that handles event publishing.""" cmd["ops"] = op_docs @@ -324,7 +324,7 @@ async def unack_write( msg: bytes, op_docs: list[Mapping[str, Any]], ns_docs: list[Mapping[str, Any]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> Optional[Mapping[str, Any]]: """A proxy for AsyncConnection.unack_write that handles event publishing.""" if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index 1225445710..c30fc6679f 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -396,7 +396,7 @@ class _TxnState: class _Transaction: """Internal class to hold transaction information in a AsyncClientSession.""" - def __init__(self, opts: Optional[TransactionOptions], client: AsyncMongoClient): + def __init__(self, opts: Optional[TransactionOptions], client: AsyncMongoClient[Any]): self.opts = opts self.state = _TxnState.NONE self.sharded = False @@ -459,7 +459,7 @@ def _max_time_expired_error(exc: PyMongoError) -> bool: # From the transactions spec, all the retryable writes errors plus # WriteConcernTimeout. -_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( # type: ignore[type-arg] [ 64, # WriteConcernTimeout 50, # MaxTimeMSExpired @@ -499,13 +499,13 @@ class AsyncClientSession: def __init__( self, - client: AsyncMongoClient, + client: AsyncMongoClient[Any], server_session: Any, options: SessionOptions, implicit: bool, ) -> None: # An AsyncMongoClient, a _ServerSession, a SessionOptions, and a set. - self._client: AsyncMongoClient = client + self._client: AsyncMongoClient[Any] = client self._server_session = server_session self._options = options self._cluster_time: Optional[Mapping[str, Any]] = None @@ -551,7 +551,7 @@ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: await self._end_session(lock=True) @property - def client(self) -> AsyncMongoClient: + def client(self) -> AsyncMongoClient[Any]: """The :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` this session was created from. """ @@ -751,7 +751,7 @@ async def start_transaction( write_concern: Optional[WriteConcern] = None, read_preference: Optional[_ServerMode] = None, max_commit_time_ms: Optional[int] = None, - ) -> AsyncContextManager: + ) -> AsyncContextManager[Any]: """Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. @@ -1123,7 +1123,7 @@ def inc_transaction_id(self) -> None: self._transaction_id += 1 -class _ServerSessionPool(collections.deque): +class _ServerSessionPool(collections.deque): # type: ignore[type-arg] """Pool of _ServerSession objects. This class is thread-safe. diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 7fb20b7ab3..313c8c7c04 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -581,7 +581,7 @@ async def _command( conn: AsyncConnection, command: MutableMapping[str, Any], read_preference: Optional[_ServerMode] = None, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional[CodecOptions[Mapping[str, Any]]] = None, check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_concern: Optional[ReadConcern] = None, @@ -704,7 +704,7 @@ async def bulk_write( bypass_document_validation: Optional[bool] = None, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, - let: Optional[Mapping] = None, + let: Optional[Mapping[str, Any]] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -2525,7 +2525,7 @@ async def _list_indexes( session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, ) -> AsyncCommandCursor[MutableMapping[str, Any]]: - codec_options: CodecOptions = CodecOptions(SON) + codec_options: CodecOptions[Mapping[str, Any]] = CodecOptions(SON) coll = cast( AsyncCollection[MutableMapping[str, Any]], self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), @@ -2871,7 +2871,7 @@ async def _aggregate( self, aggregation_command: Type[_AggregationCommand], pipeline: _Pipeline, - cursor_class: Type[AsyncCommandCursor], + cursor_class: Type[AsyncCommandCursor], # type: ignore[type-arg] session: Optional[AsyncClientSession], explicit_session: bool, let: Optional[Mapping[str, Any]] = None, @@ -3114,7 +3114,7 @@ async def distinct( comment: Optional[Any] = None, hint: Optional[_IndexKeyHint] = None, **kwargs: Any, - ) -> list: + ) -> list[str]: """Get a list of distinct values for `key` among all documents in this collection. @@ -3177,7 +3177,7 @@ async def _cmd( _server: Server, conn: AsyncConnection, read_preference: Optional[_ServerMode], - ) -> list: + ) -> list: # type: ignore[type-arg] return ( await self._command( conn, @@ -3202,7 +3202,7 @@ async def _find_and_modify( array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, session: Optional[AsyncClientSession] = None, - let: Optional[Mapping] = None, + let: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Any: """Internal findAndModify helper.""" diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py index 353c5e91c2..db7c2b6638 100644 --- a/pymongo/asynchronous/command_cursor.py +++ b/pymongo/asynchronous/command_cursor.py @@ -350,7 +350,7 @@ async def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: else: return None - async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] """Get all or some available documents from the cursor.""" if not len(self._data) and not self._killed: await self._refresh() @@ -457,7 +457,7 @@ def _unpack_response( # type: ignore[override] self, response: Union[_OpReply, _OpMsg], cursor_id: Optional[int], - codec_options: CodecOptions, + codec_options: CodecOptions[dict[str, Any]], user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False, ) -> list[Mapping[str, Any]]: diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index 51efab4f43..ab2d0e873c 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -216,7 +216,7 @@ def __init__( # it anytime we change __limit. self._empty = False - self._data: deque = deque() + self._data: deque = deque() # type: ignore[type-arg] self._address: Optional[_Address] = None self._retrieved = 0 @@ -280,7 +280,7 @@ def clone(self) -> AsyncCursor[_DocumentType]: """ return self._clone(True) - def _clone(self, deepcopy: bool = True, base: Optional[AsyncCursor] = None) -> AsyncCursor: + def _clone(self, deepcopy: bool = True, base: Optional[AsyncCursor] = None) -> AsyncCursor: # type: ignore[type-arg] """Internal clone helper.""" if not base: if self._explicit_session: @@ -322,7 +322,7 @@ def _clone(self, deepcopy: bool = True, base: Optional[AsyncCursor] = None) -> A base.__dict__.update(data) return base - def _clone_base(self, session: Optional[AsyncClientSession]) -> AsyncCursor: + def _clone_base(self, session: Optional[AsyncClientSession]) -> AsyncCursor: # type: ignore[type-arg] """Creates an empty AsyncCursor object for information to be copied into.""" return self.__class__(self._collection, session=session) @@ -864,7 +864,7 @@ def where(self, code: Union[str, Code]) -> AsyncCursor[_DocumentType]: if self._has_filter: spec = dict(self._spec) else: - spec = cast(dict, self._spec) + spec = cast(dict, self._spec) # type: ignore[type-arg] spec["$where"] = code self._spec = spec return self @@ -888,7 +888,7 @@ def _unpack_response( self, response: Union[_OpReply, _OpMsg], cursor_id: Optional[int], - codec_options: CodecOptions, + codec_options: CodecOptions, # type: ignore[type-arg] user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False, ) -> Sequence[_DocumentOut]: @@ -964,29 +964,33 @@ def __deepcopy__(self, memo: Any) -> Any: return self._clone(deepcopy=True) @overload - def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: # type: ignore[type-arg] ... @overload def _deepcopy( - self, x: SupportsItems, memo: Optional[dict[int, Union[list, dict]]] = None - ) -> dict: + self, + x: SupportsItems, # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> dict: # type: ignore[type-arg] ... def _deepcopy( - self, x: Union[Iterable, SupportsItems], memo: Optional[dict[int, Union[list, dict]]] = None - ) -> Union[list, dict]: + self, + x: Union[Iterable, SupportsItems], # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> Union[list[Any], dict[str, Any]]: """Deepcopy helper for the data dictionary or list. Regular expressions cannot be deep copied but as they are immutable we don't have to copy them when cloning. """ - y: Union[list, dict] + y: Union[list[Any], dict[str, Any]] iterator: Iterable[tuple[Any, Any]] if not hasattr(x, "items"): y, is_list, iterator = [], True, enumerate(x) else: - y, is_list, iterator = {}, False, cast("SupportsItems", x).items() + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() # type: ignore[type-arg] if memo is None: memo = {} val_id = id(x) @@ -1060,7 +1064,7 @@ async def close(self) -> None: """Explicitly close / kill this cursor.""" await self._die_lock() - async def distinct(self, key: str) -> list: + async def distinct(self, key: str) -> list[str]: """Get a list of distinct values for `key` among all documents in the result set of this query. @@ -1265,7 +1269,7 @@ async def next(self) -> _DocumentType: else: raise StopAsyncIteration - async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] """Get all or some documents from the cursor.""" if not self._exhaust_checked: self._exhaust_checked = True @@ -1325,7 +1329,7 @@ async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: return res -class AsyncRawBatchCursor(AsyncCursor, Generic[_DocumentType]): +class AsyncRawBatchCursor(AsyncCursor, Generic[_DocumentType]): # type: ignore[type-arg] """An asynchronous cursor / iterator over raw batches of BSON data from a query result.""" _query_class = _RawBatchQuery diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index d0089eb4ee..09713c37ec 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -771,7 +771,7 @@ async def _command( self._name, command, read_preference, - codec_options, + codec_options, # type: ignore[arg-type] check, allowable_errors, write_concern=write_concern, diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 3488030166..b988120d7c 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -161,10 +161,10 @@ _IS_SYNC = False _WriteOp = Union[ - InsertOne, + InsertOne, # type: ignore[type-arg] DeleteOne, DeleteMany, - ReplaceOne, + ReplaceOne, # type: ignore[type-arg] UpdateOne, UpdateMany, ] @@ -176,7 +176,7 @@ class AsyncMongoClient(common.BaseObject, Generic[_DocumentType]): # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. _constructor_args = ("document_class", "tz_aware", "connect") - _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # type: ignore[type-arg] def __init__( self, @@ -847,7 +847,7 @@ def __init__( self._default_database_name = dbase self._lock = _async_create_lock() - self._kill_cursors_queue: list = [] + self._kill_cursors_queue: list = [] # type: ignore[type-arg] self._encrypter: Optional[_Encrypter] = None @@ -1064,7 +1064,7 @@ def _after_fork(self) -> None: # Reset the session pool to avoid duplicate sessions in the child process. self._topology._session_pool.reset() - def _duplicate(self, **kwargs: Any) -> AsyncMongoClient: + def _duplicate(self, **kwargs: Any) -> AsyncMongoClient: # type: ignore[type-arg] args = self._init_kwargs.copy() args.update(kwargs) return AsyncMongoClient(**args) @@ -1548,7 +1548,7 @@ def get_database( self, name, codec_options, read_preference, write_concern, read_concern ) - def _database_default_options(self, name: str) -> database.AsyncDatabase: + def _database_default_options(self, name: str) -> database.AsyncDatabase: # type: ignore[type-arg] """Get a AsyncDatabase instance with the default settings.""" return self.get_database( name, @@ -1887,7 +1887,7 @@ async def _conn_for_reads( async def _run_operation( self, operation: Union[_Query, _GetMore], - unpack_res: Callable, + unpack_res: Callable, # type: ignore[type-arg] address: Optional[_Address] = None, ) -> Response: """Run a _Query/_GetMore operation and return a Response. @@ -2261,7 +2261,7 @@ def _return_server_session( @contextlib.asynccontextmanager async def _tmp_session( self, session: Optional[client_session.AsyncClientSession], close: bool = True - ) -> AsyncGenerator[Optional[client_session.AsyncClientSession], None, None]: + ) -> AsyncGenerator[Optional[client_session.AsyncClientSession], None]: """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.AsyncClientSession): @@ -2308,8 +2308,8 @@ async def server_info( .. versionchanged:: 3.6 Added ``session`` parameter. """ - return cast( - dict, + return cast( # type: ignore[redundant-cast] + dict[str, Any], await self.admin.command( "buildinfo", read_preference=ReadPreference.PRIMARY, session=session ), @@ -2438,13 +2438,13 @@ async def drop_database( @_csot.apply async def bulk_write( self, - models: Sequence[_WriteOp[_DocumentType]], + models: Sequence[_WriteOp], session: Optional[AsyncClientSession] = None, ordered: bool = True, verbose_results: bool = False, bypass_document_validation: Optional[bool] = None, comment: Optional[Any] = None, - let: Optional[Mapping] = None, + let: Optional[Mapping[str, Any]] = None, write_concern: Optional[WriteConcern] = None, ) -> ClientBulkWriteResult: """Send a batch of write operations, potentially across multiple namespaces, to the server. @@ -2631,7 +2631,10 @@ class _MongoClientErrorHandler: ) def __init__( - self, client: AsyncMongoClient, server: Server, session: Optional[AsyncClientSession] + self, + client: AsyncMongoClient, # type: ignore[type-arg] + server: Server, + session: Optional[AsyncClientSession], ): if not isinstance(client, AsyncMongoClient): # This is for compatibility with mocked and subclassed types, such as in Motor. @@ -2704,7 +2707,7 @@ class _ClientConnectionRetryable(Generic[T]): def __init__( self, - mongo_client: AsyncMongoClient, + mongo_client: AsyncMongoClient, # type: ignore[type-arg] func: _WriteCall[T] | _ReadCall[T], bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], operation: str, diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index 32b545380a..e067bd8c54 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -351,7 +351,7 @@ async def _check_once(self) -> ServerDescription: ) return sd - async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float]: + async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float]: # type: ignore[type-arg] """Return (Hello, round_trip_time). Can raise ConnectionFailure or OperationFailure. diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py index 1605efe92d..5a5dc7fa2c 100644 --- a/pymongo/asynchronous/network.py +++ b/pymongo/asynchronous/network.py @@ -66,7 +66,7 @@ async def command( read_preference: Optional[_ServerMode], codec_options: CodecOptions[_DocumentType], session: Optional[AsyncClientSession], - client: Optional[AsyncMongoClient], + client: Optional[AsyncMongoClient[Any]], check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, address: Optional[_Address] = None, diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 9a39883fc2..e215cafdc1 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -201,7 +201,7 @@ def set_conn_timeout(self, timeout: Optional[float]) -> None: self.conn.get_conn.settimeout(timeout) def apply_timeout( - self, client: AsyncMongoClient, cmd: Optional[MutableMapping[str, Any]] + self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] ) -> Optional[float]: # CSOT: use remaining timeout when set. timeout = _csot.remaining() @@ -255,7 +255,7 @@ def hello_cmd(self) -> dict[str, Any]: else: return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} - async def hello(self) -> Hello: + async def hello(self) -> Hello[dict[str, Any]]: return await self._hello(None, None) async def _hello( @@ -357,7 +357,7 @@ async def command( dbname: str, spec: MutableMapping[str, Any], read_preference: _ServerMode = ReadPreference.PRIMARY, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + codec_options: CodecOptions[Mapping[str, Any]] = DEFAULT_CODEC_OPTIONS, # type: ignore[assignment] check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_concern: Optional[ReadConcern] = None, @@ -365,7 +365,7 @@ async def command( parse_write_concern_error: bool = False, collation: Optional[_CollationIn] = None, session: Optional[AsyncClientSession] = None, - client: Optional[AsyncMongoClient] = None, + client: Optional[AsyncMongoClient[Any]] = None, retryable_write: bool = False, publish_events: bool = True, user_fields: Optional[Mapping[str, Any]] = None, @@ -417,7 +417,7 @@ async def command( spec, self.is_mongos, read_preference, - codec_options, + codec_options, # type: ignore[arg-type] session, client, check, @@ -489,7 +489,7 @@ async def unack_write(self, msg: bytes, max_doc_size: int) -> None: await self.send_message(msg, max_doc_size) async def write_command( - self, request_id: int, msg: bytes, codec_options: CodecOptions + self, request_id: int, msg: bytes, codec_options: CodecOptions[Mapping[str, Any]] ) -> dict[str, Any]: """Send "insert" etc. command, returning response as a dict. @@ -541,7 +541,7 @@ async def authenticate(self, reauthenticate: bool = False) -> None: ) def validate_session( - self, client: Optional[AsyncMongoClient], session: Optional[AsyncClientSession] + self, client: Optional[AsyncMongoClient[Any]], session: Optional[AsyncClientSession] ) -> None: """Validate this session before use with client. @@ -598,7 +598,7 @@ def send_cluster_time( self, command: MutableMapping[str, Any], session: Optional[AsyncClientSession], - client: Optional[AsyncMongoClient], + client: Optional[AsyncMongoClient[Any]], ) -> None: """Add $clusterTime.""" if client: @@ -732,7 +732,7 @@ def __init__( # LIFO pool. Sockets are ordered on idle time. Sockets claimed # and returned to pool from the left side. Stale sockets removed # from the right side. - self.conns: collections.deque = collections.deque() + self.conns: collections.deque[AsyncConnection] = collections.deque() self.active_contexts: set[_CancellationContext] = set() self.lock = _async_create_lock() self._max_connecting_cond = _async_create_condition(self.lock) @@ -839,8 +839,8 @@ async def _reset( if service_id is None: sockets, self.conns = self.conns, collections.deque() else: - discard: collections.deque = collections.deque() - keep: collections.deque = collections.deque() + discard: collections.deque = collections.deque() # type: ignore[type-arg] + keep: collections.deque = collections.deque() # type: ignore[type-arg] for conn in self.conns: if conn.service_id == service_id: discard.append(conn) @@ -866,7 +866,7 @@ async def _reset( if close: if not _IS_SYNC: await asyncio.gather( - *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], # type: ignore[func-returns-value] return_exceptions=True, ) else: @@ -903,7 +903,7 @@ async def _reset( ) if not _IS_SYNC: await asyncio.gather( - *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], # type: ignore[func-returns-value] return_exceptions=True, ) else: @@ -917,7 +917,7 @@ async def update_is_writable(self, is_writable: Optional[bool]) -> None: self.is_writable = is_writable async with self.lock: for _socket in self.conns: - _socket.update_is_writable(self.is_writable) + _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] async def reset( self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False @@ -956,7 +956,7 @@ async def remove_stale_sockets(self, reference_generation: int) -> None: close_conns.append(self.conns.pop()) if not _IS_SYNC: await asyncio.gather( - *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], # type: ignore[func-returns-value] return_exceptions=True, ) else: @@ -1477,4 +1477,4 @@ def __del__(self) -> None: # not safe to acquire a lock in __del__. if _IS_SYNC: for conn in self.conns: - conn.close_conn(None) + conn.close_conn(None) # type: ignore[unused-coroutine] diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index 0e0d53b96f..0f8565f6cc 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -66,7 +66,7 @@ def __init__( monitor: Monitor, topology_id: Optional[ObjectId] = None, listeners: Optional[_EventListeners] = None, - events: Optional[ReferenceType[Queue]] = None, + events: Optional[ReferenceType[Queue[Any]]] = None, ) -> None: """Represent one MongoDB server.""" self._description = server_description @@ -142,7 +142,7 @@ async def run_operation( read_preference: _ServerMode, listeners: Optional[_EventListeners], unpack_res: Callable[..., list[_DocumentOut]], - client: AsyncMongoClient, + client: AsyncMongoClient[Any], ) -> Response: """Run a _Query or _GetMore operation and return a Response object. diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py index 052f91afee..283aabc690 100644 --- a/pymongo/asynchronous/topology.py +++ b/pymongo/asynchronous/topology.py @@ -84,7 +84,7 @@ _pymongo_dir = str(Path(__file__).parent) -def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: # type: ignore[type-arg] q = queue_ref() if not q: return False # Cancel PeriodicExecutor. @@ -186,7 +186,7 @@ def __init__(self, topology_settings: TopologySettings): if self._publish_server or self._publish_tp: assert self._events is not None - weak: weakref.ReferenceType[queue.Queue] + weak: weakref.ReferenceType[queue.Queue[Any]] async def target() -> bool: return process_events_queue(weak) diff --git a/pymongo/client_options.py b/pymongo/client_options.py index bd27dd4eb0..8b4eea7e65 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -247,7 +247,7 @@ def connect(self) -> Optional[bool]: return self.__connect @property - def codec_options(self) -> CodecOptions: + def codec_options(self) -> CodecOptions[Any]: """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options diff --git a/pymongo/common.py b/pymongo/common.py index 96f9f87459..5210e72189 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -56,7 +56,7 @@ from pymongo.typings import _AgnosticClientSession -ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) +ORDERED_TYPES: Sequence[Type[Any]] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024**2) @@ -166,7 +166,7 @@ def clean_node(node: str) -> tuple[str, int]: return host.lower(), port -def raise_config_error(key: str, suggestions: Optional[list] = None) -> NoReturn: +def raise_config_error(key: str, suggestions: Optional[list[str]] = None) -> NoReturn: """Raise ConfigurationError with the given key name.""" msg = f"Unknown option: {key}." if suggestions: @@ -411,7 +411,7 @@ def validate_read_preference_tags(name: str, value: Any) -> list[dict[str, str]] if not isinstance(value, list): value = [value] - tag_sets: list = [] + tag_sets: list[dict[str, Any]] = [] for tag_set in value: if tag_set == "": tag_sets.append({}) @@ -497,7 +497,7 @@ def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Uni def validate_document_class( option: str, value: Any -) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: +) -> Union[Type[MutableMapping[str, Any]], Type[RawBSONDocument]]: """Validate the document_class option.""" # issubclass can raise TypeError for generic aliases like SON[str, Any]. # In that case we can use the base class for the comparison. @@ -523,14 +523,14 @@ def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: return value -def validate_list(option: str, value: Any) -> list: +def validate_list(option: str, value: Any) -> list[Any]: """Validates that 'value' is a list.""" if not isinstance(value, list): raise TypeError(f"{option} must be a list, not {type(value)}") return value -def validate_list_or_none(option: Any, value: Any) -> Optional[list]: +def validate_list_or_none(option: Any, value: Any) -> Optional[list[Any]]: """Validates that 'value' is a list or None.""" if value is None: return value @@ -597,7 +597,7 @@ def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: return value -def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: +def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable[..., Any]]: """Validates that 'value' is a callable.""" if value is None: return value @@ -829,7 +829,7 @@ def validate_auth_option(option: str, value: Any) -> tuple[str, Any]: def _get_validator( key: str, validators: dict[str, Callable[[Any, Any], Any]], normed_key: Optional[str] = None -) -> Callable: +) -> Callable[[Any, Any], Any]: normed_key = normed_key or key try: return validators[normed_key] @@ -917,7 +917,7 @@ class BaseObject: def __init__( self, - codec_options: CodecOptions, + codec_options: CodecOptions[Any], read_preference: _ServerMode, write_concern: WriteConcern, read_concern: ReadConcern, @@ -947,7 +947,7 @@ def __init__( self._read_concern = read_concern @property - def codec_options(self) -> CodecOptions: + def codec_options(self) -> CodecOptions[Any]: """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index e9ad1c1e01..cf686f6ab5 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -37,7 +37,7 @@ if TYPE_CHECKING: from pymongo.pyopenssl_context import SSLContext - from pymongo.typings import _AgnosticMongoClient, _DocumentTypeArg + from pymongo.typings import _AgnosticMongoClient class AutoEncryptionOpts: @@ -47,7 +47,7 @@ def __init__( self, kms_providers: Mapping[str, Any], key_vault_namespace: str, - key_vault_client: Optional[_AgnosticMongoClient[_DocumentTypeArg]] = None, + key_vault_client: Optional[_AgnosticMongoClient] = None, schema_map: Optional[Mapping[str, Any]] = None, bypass_auto_encryption: bool = False, mongocryptd_uri: str = "mongodb://localhost:27020", diff --git a/pymongo/helpers_shared.py b/pymongo/helpers_shared.py index a664e87a69..9646c0691a 100644 --- a/pymongo/helpers_shared.py +++ b/pymongo/helpers_shared.py @@ -52,7 +52,7 @@ # From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES: frozenset = frozenset( +_SHUTDOWN_CODES: frozenset[int] = frozenset( [ 11600, # InterruptedAtShutdown 91, # ShutdownInProgress @@ -61,7 +61,7 @@ # From the SDAM spec, the "not primary" error codes are combined with the # "node is recovering" error codes (of which the "node is shutting down" # errors are a subset). -_NOT_PRIMARY_CODES: frozenset = ( +_NOT_PRIMARY_CODES: frozenset[int] = ( frozenset( [ 10058, # LegacyNotPrimary <=3.2 "not primary" error code @@ -75,7 +75,7 @@ | _SHUTDOWN_CODES ) # From the retryable writes spec. -_RETRYABLE_ERROR_CODES: frozenset = _NOT_PRIMARY_CODES | frozenset( +_RETRYABLE_ERROR_CODES: frozenset[int] = _NOT_PRIMARY_CODES | frozenset( [ 7, # HostNotFound 6, # HostUnreachable @@ -95,7 +95,7 @@ # Note - to avoid bugs from forgetting which if these is all lowercase and # which are camelCase, and at the same time avoid having to add a test for # every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS: set = { +_SENSITIVE_COMMANDS: set[str] = { "authenticate", "saslstart", "saslcontinue", diff --git a/pymongo/message.py b/pymongo/message.py index d51c77a174..b2e5a685af 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -333,7 +333,7 @@ def _op_msg_no_header( command: Mapping[str, Any], identifier: str, docs: Optional[list[Mapping[str, Any]]], - opts: CodecOptions, + opts: CodecOptions[Any], ) -> tuple[bytes, int, int]: """Get a OP_MSG message. @@ -365,7 +365,7 @@ def _op_msg_compressed( command: Mapping[str, Any], identifier: str, docs: Optional[list[Mapping[str, Any]]], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: Union[SnappyContext, ZlibContext, ZstdContext], ) -> tuple[int, bytes, int, int]: """Internal OP_MSG message helper.""" @@ -379,7 +379,7 @@ def _op_msg_uncompressed( command: Mapping[str, Any], identifier: str, docs: Optional[list[Mapping[str, Any]]], - opts: CodecOptions, + opts: CodecOptions[Any], ) -> tuple[int, bytes, int, int]: """Internal compressed OP_MSG message helper.""" data, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) @@ -396,7 +396,7 @@ def _op_msg( command: MutableMapping[str, Any], dbname: str, read_preference: Optional[_ServerMode], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, ) -> tuple[int, bytes, int, int]: """Get a OP_MSG message.""" @@ -430,7 +430,7 @@ def _query_impl( num_to_return: int, query: Mapping[str, Any], field_selector: Optional[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ) -> tuple[bytes, int]: """Get an OP_QUERY message.""" encoded = _dict_to_bson(query, False, opts) @@ -461,7 +461,7 @@ def _query_compressed( num_to_return: int, query: Mapping[str, Any], field_selector: Optional[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: Union[SnappyContext, ZlibContext, ZstdContext], ) -> tuple[int, bytes, int]: """Internal compressed query message helper.""" @@ -479,7 +479,7 @@ def _query_uncompressed( num_to_return: int, query: Mapping[str, Any], field_selector: Optional[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ) -> tuple[int, bytes, int]: """Internal query message helper.""" op_query, max_bson_size = _query_impl( @@ -500,7 +500,7 @@ def _query( num_to_return: int, query: Mapping[str, Any], field_selector: Optional[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, ) -> tuple[int, bytes, int]: """Get a **query** message.""" @@ -598,7 +598,7 @@ def __init__( listeners: _EventListeners, session: Optional[_AgnosticClientSession], op_type: int, - codec: CodecOptions, + codec: CodecOptions[Any], ): self.db_name = database_name self.conn = conn @@ -679,7 +679,7 @@ def __init__( listeners: _EventListeners, session: Optional[_AgnosticClientSession], op_type: int, - codec: CodecOptions, + codec: CodecOptions[Any], ): super().__init__( database_name, @@ -771,7 +771,7 @@ def _batched_op_msg_impl( command: Mapping[str, Any], docs: list[Mapping[str, Any]], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, buf: _BytesIO, ) -> tuple[list[Mapping[str, Any]], int]: @@ -839,7 +839,7 @@ def _encode_batched_op_msg( command: Mapping[str, Any], docs: list[Mapping[str, Any]], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, ) -> tuple[bytes, list[Mapping[str, Any]]]: """Encode the next batched insert, update, or delete operation @@ -860,7 +860,7 @@ def _batched_op_msg_compressed( command: Mapping[str, Any], docs: list[Mapping[str, Any]], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]]]: """Create the next batched insert, update, or delete operation @@ -878,7 +878,7 @@ def _batched_op_msg( command: Mapping[str, Any], docs: list[Mapping[str, Any]], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]]]: """OP_MSG implementation entry point.""" @@ -910,7 +910,7 @@ def _do_batched_op_msg( operation: int, command: MutableMapping[str, Any], docs: list[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]]]: """Create the next batched insert, update, or delete operation @@ -939,7 +939,7 @@ def __init__( operation_id: int, listeners: _EventListeners, session: Optional[_AgnosticClientSession], - codec: CodecOptions, + codec: CodecOptions[Any], ): super().__init__( database_name, @@ -1043,7 +1043,7 @@ def _client_batched_op_msg_impl( operations: list[tuple[str, Mapping[str, Any]]], namespaces: list[str], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _ClientBulkWriteContext, buf: _BytesIO, ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]], int]: @@ -1161,7 +1161,7 @@ def _client_encode_batched_op_msg( operations: list[tuple[str, Mapping[str, Any]]], namespaces: list[str], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _ClientBulkWriteContext, ) -> tuple[bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Encode the next batched client-level bulkWrite @@ -1180,7 +1180,7 @@ def _client_batched_op_msg_compressed( operations: list[tuple[str, Mapping[str, Any]]], namespaces: list[str], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _ClientBulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Create the next batched client-level bulkWrite operation @@ -1200,7 +1200,7 @@ def _client_batched_op_msg( operations: list[tuple[str, Mapping[str, Any]]], namespaces: list[str], ack: bool, - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _ClientBulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: """OP_MSG implementation entry point for client-level bulkWrite.""" @@ -1229,7 +1229,7 @@ def _client_do_batched_op_msg( command: MutableMapping[str, Any], operations: list[tuple[str, Mapping[str, Any]]], namespaces: list[str], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _ClientBulkWriteContext, ) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: """Create the next batched client-level bulkWrite @@ -1253,7 +1253,7 @@ def _encode_batched_write_command( operation: int, command: MutableMapping[str, Any], docs: list[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, ) -> tuple[bytes, list[Mapping[str, Any]]]: """Encode the next batched insert, update, or delete command.""" @@ -1272,7 +1272,7 @@ def _batched_write_command_impl( operation: int, command: MutableMapping[str, Any], docs: list[Mapping[str, Any]], - opts: CodecOptions, + opts: CodecOptions[Any], ctx: _BulkWriteContext, buf: _BytesIO, ) -> tuple[list[Mapping[str, Any]], int]: @@ -1383,7 +1383,7 @@ def raw_response( errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: - error_object: dict = bson.BSON(self.documents).decode() + error_object: dict[str, Any] = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): @@ -1405,7 +1405,7 @@ def raw_response( def unpack_response( self, cursor_id: Optional[int] = None, - codec_options: CodecOptions = _UNICODE_REPLACE_CODEC_OPTIONS, + codec_options: CodecOptions[Any] = _UNICODE_REPLACE_CODEC_OPTIONS, user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False, ) -> list[dict[str, Any]]: @@ -1431,7 +1431,7 @@ def unpack_response( return bson.decode_all(self.documents, codec_options) return bson._decode_all_selective(self.documents, codec_options, user_fields) - def command_response(self, codec_options: CodecOptions) -> dict[str, Any]: + def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: """Unpack a command response.""" docs = self.unpack_response(codec_options=codec_options) assert self.number_returned == 1 @@ -1491,7 +1491,7 @@ def raw_response( def unpack_response( self, cursor_id: Optional[int] = None, - codec_options: CodecOptions = _UNICODE_REPLACE_CODEC_OPTIONS, + codec_options: CodecOptions[Any] = _UNICODE_REPLACE_CODEC_OPTIONS, user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False, ) -> list[dict[str, Any]]: @@ -1508,7 +1508,7 @@ def unpack_response( assert not legacy_response return bson._decode_all_selective(self.payload_document, codec_options, user_fields) - def command_response(self, codec_options: CodecOptions) -> dict[str, Any]: + def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: """Unpack a command response.""" return self.unpack_response(codec_options=codec_options)[0] @@ -1583,7 +1583,7 @@ def __init__( ntoskip: int, spec: Mapping[str, Any], fields: Optional[Mapping[str, Any]], - codec_options: CodecOptions, + codec_options: CodecOptions[Any], read_preference: _ServerMode, limit: int, batch_size: int, @@ -1757,7 +1757,7 @@ def __init__( coll: str, ntoreturn: int, cursor_id: int, - codec_options: CodecOptions, + codec_options: CodecOptions[Any], read_preference: _ServerMode, session: Optional[_AgnosticClientSession], client: _AgnosticMongoClient, @@ -1871,7 +1871,7 @@ def use_command(self, conn: _AgnosticConnection) -> bool: return False -class _CursorAddress(tuple): +class _CursorAddress(tuple[Any, ...]): """The server address (host, port) of a cursor, with namespace property.""" __namespace: Any diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index 101a8fbc37..46a78aea0b 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -1347,7 +1347,11 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): __slots__ = ("__duration", "__reply") def __init__( - self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + self, + duration: float, + reply: Hello[dict[str, Any]], + connection_id: _Address, + awaited: bool = False, ) -> None: super().__init__(connection_id, awaited) self.__duration = duration @@ -1359,7 +1363,7 @@ def duration(self) -> float: return self.__duration @property - def reply(self) -> Hello: + def reply(self) -> Hello[dict[str, Any]]: """An instance of :class:`~pymongo.hello.Hello`.""" return self.__reply @@ -1647,7 +1651,7 @@ def publish_server_heartbeat_started(self, connection_id: _Address, awaited: boo _handle_exception() def publish_server_heartbeat_succeeded( - self, connection_id: _Address, duration: float, reply: Hello, awaited: bool + self, connection_id: _Address, duration: float, reply: Hello[dict[str, Any]], awaited: bool ) -> None: """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 78eefc7177..2f7f9c320f 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -96,7 +96,7 @@ async def _async_socket_sendall_ssl( view = memoryview(buf) sent = 0 - def _is_ready(fut: Future) -> None: + def _is_ready(fut: Future[Any]) -> None: if fut.done(): return fut.set_result(None) @@ -139,7 +139,7 @@ async def _async_socket_receive_ssl( mv = memoryview(bytearray(length)) total_read = 0 - def _is_ready(fut: Future) -> None: + def _is_ready(fut: Future[Any]) -> None: if fut.done(): return fut.set_result(None) @@ -486,15 +486,15 @@ def __init__(self, timeout: Optional[float] = None): self._message_size = 0 self._op_code = 0 self._connection_lost = False - self._read_waiter: Optional[Future] = None + self._read_waiter: Optional[Future[Any]] = None self._timeout = timeout self._is_compressed = False self._compressor_id: Optional[int] = None self._max_message_size = MAX_MESSAGE_SIZE self._response_to: Optional[int] = None self._closed = asyncio.get_running_loop().create_future() - self._pending_messages: collections.deque[Future] = collections.deque() - self._done_messages: collections.deque[Future] = collections.deque() + self._pending_messages: collections.deque[Future[Any]] = collections.deque() + self._done_messages: collections.deque[Future[Any]] = collections.deque() def settimeout(self, timeout: float | None) -> None: self._timeout = timeout diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index ed369a2b21..82f506f039 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -53,7 +53,7 @@ def __init__( self._min_interval = min_interval self._target = target self._stopped = False - self._task: Optional[asyncio.Task] = None + self._task: Optional[asyncio.Task[Any]] = None self._name = name self._skip_sleep = False diff --git a/pymongo/server_description.py b/pymongo/server_description.py index afc5346bb7..d038c04b1c 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -69,7 +69,7 @@ class ServerDescription: def __init__( self, address: _Address, - hello: Optional[Hello] = None, + hello: Optional[Hello[dict[str, Any]]] = None, round_trip_time: Optional[float] = None, error: Optional[Exception] = None, min_round_trip_time: float = 0.0, @@ -299,4 +299,4 @@ def __repr__(self) -> str: ) # For unittesting only. Use under no circumstances! - _host_to_round_trip_time: dict = {} + _host_to_round_trip_time: dict = {} # type: ignore[type-arg] diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index beafc717eb..7dbd0f2148 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -56,17 +56,22 @@ if HAVE_PYSSL: PYSSLError: Any = _pyssl.SSLError - BLOCKING_IO_ERRORS: tuple = _ssl.BLOCKING_IO_ERRORS + _pyssl.BLOCKING_IO_ERRORS - BLOCKING_IO_READ_ERROR: tuple = (_pyssl.BLOCKING_IO_READ_ERROR, _ssl.BLOCKING_IO_READ_ERROR) - BLOCKING_IO_WRITE_ERROR: tuple = ( + BLOCKING_IO_ERRORS: tuple = ( # type: ignore[type-arg] + _ssl.BLOCKING_IO_ERRORS + _pyssl.BLOCKING_IO_ERRORS + ) + BLOCKING_IO_READ_ERROR: tuple = ( # type: ignore[type-arg] + _pyssl.BLOCKING_IO_READ_ERROR, + _ssl.BLOCKING_IO_READ_ERROR, + ) + BLOCKING_IO_WRITE_ERROR: tuple = ( # type: ignore[type-arg] _pyssl.BLOCKING_IO_WRITE_ERROR, _ssl.BLOCKING_IO_WRITE_ERROR, ) else: PYSSLError = _ssl.SSLError - BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS - BLOCKING_IO_READ_ERROR = (_ssl.BLOCKING_IO_READ_ERROR,) - BLOCKING_IO_WRITE_ERROR = (_ssl.BLOCKING_IO_WRITE_ERROR,) + BLOCKING_IO_ERRORS: tuple = _ssl.BLOCKING_IO_ERRORS # type: ignore[type-arg, no-redef] + BLOCKING_IO_READ_ERROR: tuple = (_ssl.BLOCKING_IO_READ_ERROR,) # type: ignore[type-arg, no-redef] + BLOCKING_IO_WRITE_ERROR: tuple = (_ssl.BLOCKING_IO_WRITE_ERROR,) # type: ignore[type-arg, no-redef] SSLError = _ssl.SSLError BLOCKING_IO_LOOKUP_ERROR = BLOCKING_IO_READ_ERROR @@ -131,7 +136,7 @@ class SSLError(Exception): # type: ignore pass IPADDR_SAFE = False - BLOCKING_IO_ERRORS = () + BLOCKING_IO_ERRORS: tuple = () # type: ignore[type-arg, no-redef] def _has_sni(is_sync: bool) -> bool: # noqa: ARG001 return False diff --git a/pymongo/synchronous/aggregation.py b/pymongo/synchronous/aggregation.py index 3eb0c8bf54..9845f28b08 100644 --- a/pymongo/synchronous/aggregation.py +++ b/pymongo/synchronous/aggregation.py @@ -46,8 +46,8 @@ class _AggregationCommand: def __init__( self, - target: Union[Database, Collection], - cursor_class: type[CommandCursor], + target: Union[Database[Any], Collection[Any]], + cursor_class: type[CommandCursor[Any]], pipeline: _Pipeline, options: MutableMapping[str, Any], explicit_session: bool, @@ -111,12 +111,12 @@ def _cursor_namespace(self) -> str: """The namespace in which the aggregate command is run.""" raise NotImplementedError - def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection: + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection[Any]: """The Collection used for the aggregate command cursor.""" raise NotImplementedError @property - def _database(self) -> Database: + def _database(self) -> Database[Any]: """The database against which the aggregation command is run.""" raise NotImplementedError @@ -205,7 +205,7 @@ def get_cursor( class _CollectionAggregationCommand(_AggregationCommand): - _target: Collection + _target: Collection[Any] @property def _aggregation_target(self) -> str: @@ -215,12 +215,12 @@ def _aggregation_target(self) -> str: def _cursor_namespace(self) -> str: return self._target.full_name - def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection[Any]: """The Collection used for the aggregate command cursor.""" return self._target @property - def _database(self) -> Database: + def _database(self) -> Database[Any]: return self._target.database @@ -234,7 +234,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class _DatabaseAggregationCommand(_AggregationCommand): - _target: Database + _target: Database[Any] @property def _aggregation_target(self) -> int: @@ -245,10 +245,10 @@ def _cursor_namespace(self) -> str: return f"{self._target.name}.$cmd.aggregate" @property - def _database(self) -> Database: + def _database(self) -> Database[Any]: return self._target - def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection: + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection[Any]: """The Collection used for the aggregate command cursor.""" # Collection level aggregate may not always return the "ns" field # according to our MockupDB tests. Let's handle that case for db level diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py index f4d754687d..583ee39f67 100644 --- a/pymongo/synchronous/auth_oidc.py +++ b/pymongo/synchronous/auth_oidc.py @@ -257,7 +257,7 @@ def _sasl_continue_jwt( ) -> Mapping[str, Any]: self.access_token = None self.refresh_token = None - start_payload: dict = bson.decode(start_resp["payload"]) + start_payload: dict[str, Any] = bson.decode(start_resp["payload"]) if "issuer" in start_payload: self.idp_info = OIDCIdPInfo(**start_payload) access_token = self._get_access_token() diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py index a528b09add..22d6a7a76a 100644 --- a/pymongo/synchronous/bulk.py +++ b/pymongo/synchronous/bulk.py @@ -248,7 +248,7 @@ def write_command( request_id: int, msg: bytes, docs: list[Mapping[str, Any]], - client: MongoClient, + client: MongoClient[Any], ) -> dict[str, Any]: """A proxy for SocketInfo.write_command that handles event publishing.""" cmd[bwc.field] = docs @@ -334,7 +334,7 @@ def unack_write( msg: bytes, max_doc_size: int, docs: list[Mapping[str, Any]], - client: MongoClient, + client: MongoClient[Any], ) -> Optional[Mapping[str, Any]]: """A proxy for Connection.unack_write that handles event publishing.""" if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): @@ -419,7 +419,7 @@ def _execute_batch_unack( bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], cmd: dict[str, Any], ops: list[Mapping[str, Any]], - client: MongoClient, + client: MongoClient[Any], ) -> list[Mapping[str, Any]]: if self.is_encrypted: _, batched_cmd, to_send = bwc.batch_command(cmd, ops) @@ -446,7 +446,7 @@ def _execute_batch( bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], cmd: dict[str, Any], ops: list[Mapping[str, Any]], - client: MongoClient, + client: MongoClient[Any], ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: if self.is_encrypted: _, batched_cmd, to_send = bwc.batch_command(cmd, ops) diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py index 304427b89b..f5f6352186 100644 --- a/pymongo/synchronous/change_stream.py +++ b/pymongo/synchronous/change_stream.py @@ -164,7 +164,7 @@ def _aggregation_command_class(self) -> Type[_AggregationCommand]: raise NotImplementedError @property - def _client(self) -> MongoClient: + def _client(self) -> MongoClient: # type: ignore[type-arg] """The client against which the aggregation commands for this ChangeStream will be run. """ @@ -206,7 +206,7 @@ def _command_options(self) -> dict[str, Any]: def _aggregation_pipeline(self) -> list[dict[str, Any]]: """Return the full aggregation pipeline for this ChangeStream.""" options = self._change_stream_options() - full_pipeline: list = [{"$changeStream": options}] + full_pipeline: list[dict[str, Any]] = [{"$changeStream": options}] full_pipeline.extend(self._pipeline) return full_pipeline @@ -237,7 +237,7 @@ def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: def _run_aggregation_cmd( self, session: Optional[ClientSession], explicit_session: bool - ) -> CommandCursor: + ) -> CommandCursor: # type: ignore[type-arg] """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ @@ -257,7 +257,7 @@ def _run_aggregation_cmd( operation=_Op.AGGREGATE, ) - def _create_cursor(self) -> CommandCursor: + def _create_cursor(self) -> CommandCursor: # type: ignore[type-arg] with self._client._tmp_session(self._session, close=False) as s: return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index d73bfb2a2b..1076ceba99 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -88,7 +88,7 @@ class _ClientBulk: def __init__( self, - client: MongoClient, + client: MongoClient[Any], write_concern: WriteConcern, ordered: bool = True, bypass_document_validation: Optional[bool] = None, @@ -233,7 +233,7 @@ def write_command( msg: Union[bytes, dict[str, Any]], op_docs: list[Mapping[str, Any]], ns_docs: list[Mapping[str, Any]], - client: MongoClient, + client: MongoClient[Any], ) -> dict[str, Any]: """A proxy for Connection.write_command that handles event publishing.""" cmd["ops"] = op_docs @@ -324,7 +324,7 @@ def unack_write( msg: bytes, op_docs: list[Mapping[str, Any]], ns_docs: list[Mapping[str, Any]], - client: MongoClient, + client: MongoClient[Any], ) -> Optional[Mapping[str, Any]]: """A proxy for Connection.unack_write that handles event publishing.""" if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index 8d5bf7697b..68a01dd7e7 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -395,7 +395,7 @@ class _TxnState: class _Transaction: """Internal class to hold transaction information in a ClientSession.""" - def __init__(self, opts: Optional[TransactionOptions], client: MongoClient): + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient[Any]): self.opts = opts self.state = _TxnState.NONE self.sharded = False @@ -458,7 +458,7 @@ def _max_time_expired_error(exc: PyMongoError) -> bool: # From the transactions spec, all the retryable writes errors plus # WriteConcernTimeout. -_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( # type: ignore[type-arg] [ 64, # WriteConcernTimeout 50, # MaxTimeMSExpired @@ -498,13 +498,13 @@ class ClientSession: def __init__( self, - client: MongoClient, + client: MongoClient[Any], server_session: Any, options: SessionOptions, implicit: bool, ) -> None: # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client: MongoClient = client + self._client: MongoClient[Any] = client self._server_session = server_session self._options = options self._cluster_time: Optional[Mapping[str, Any]] = None @@ -550,7 +550,7 @@ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: self._end_session(lock=True) @property - def client(self) -> MongoClient: + def client(self) -> MongoClient[Any]: """The :class:`~pymongo.mongo_client.MongoClient` this session was created from. """ @@ -748,7 +748,7 @@ def start_transaction( write_concern: Optional[WriteConcern] = None, read_preference: Optional[_ServerMode] = None, max_commit_time_ms: Optional[int] = None, - ) -> ContextManager: + ) -> ContextManager[Any]: """Start a multi-statement transaction. Takes the same arguments as :class:`TransactionOptions`. @@ -1118,7 +1118,7 @@ def inc_transaction_id(self) -> None: self._transaction_id += 1 -class _ServerSessionPool(collections.deque): +class _ServerSessionPool(collections.deque): # type: ignore[type-arg] """Pool of _ServerSession objects. This class is thread-safe. diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 8a71768318..32da83b0c2 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -582,7 +582,7 @@ def _command( conn: Connection, command: MutableMapping[str, Any], read_preference: Optional[_ServerMode] = None, - codec_options: Optional[CodecOptions] = None, + codec_options: Optional[CodecOptions[Mapping[str, Any]]] = None, check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_concern: Optional[ReadConcern] = None, @@ -703,7 +703,7 @@ def bulk_write( bypass_document_validation: Optional[bool] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, - let: Optional[Mapping] = None, + let: Optional[Mapping[str, Any]] = None, ) -> BulkWriteResult: """Send a batch of write operations to the server. @@ -2522,7 +2522,7 @@ def _list_indexes( session: Optional[ClientSession] = None, comment: Optional[Any] = None, ) -> CommandCursor[MutableMapping[str, Any]]: - codec_options: CodecOptions = CodecOptions(SON) + codec_options: CodecOptions[Mapping[str, Any]] = CodecOptions(SON) coll = cast( Collection[MutableMapping[str, Any]], self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), @@ -2864,7 +2864,7 @@ def _aggregate( self, aggregation_command: Type[_AggregationCommand], pipeline: _Pipeline, - cursor_class: Type[CommandCursor], + cursor_class: Type[CommandCursor], # type: ignore[type-arg] session: Optional[ClientSession], explicit_session: bool, let: Optional[Mapping[str, Any]] = None, @@ -3107,7 +3107,7 @@ def distinct( comment: Optional[Any] = None, hint: Optional[_IndexKeyHint] = None, **kwargs: Any, - ) -> list: + ) -> list[str]: """Get a list of distinct values for `key` among all documents in this collection. @@ -3170,7 +3170,7 @@ def _cmd( _server: Server, conn: Connection, read_preference: Optional[_ServerMode], - ) -> list: + ) -> list: # type: ignore[type-arg] return ( self._command( conn, @@ -3195,7 +3195,7 @@ def _find_and_modify( array_filters: Optional[Sequence[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, session: Optional[ClientSession] = None, - let: Optional[Mapping] = None, + let: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> Any: """Internal findAndModify helper.""" diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index e23519d740..bcdeed5f94 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -350,7 +350,7 @@ def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: else: return None - def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] """Get all or some available documents from the cursor.""" if not len(self._data) and not self._killed: self._refresh() @@ -457,7 +457,7 @@ def _unpack_response( # type: ignore[override] self, response: Union[_OpReply, _OpMsg], cursor_id: Optional[int], - codec_options: CodecOptions, + codec_options: CodecOptions[dict[str, Any]], user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False, ) -> list[Mapping[str, Any]]: diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index e49141e811..eb45d9c5d1 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -216,7 +216,7 @@ def __init__( # it anytime we change __limit. self._empty = False - self._data: deque = deque() + self._data: deque = deque() # type: ignore[type-arg] self._address: Optional[_Address] = None self._retrieved = 0 @@ -280,7 +280,7 @@ def clone(self) -> Cursor[_DocumentType]: """ return self._clone(True) - def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: + def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: # type: ignore[type-arg] """Internal clone helper.""" if not base: if self._explicit_session: @@ -322,7 +322,7 @@ def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor base.__dict__.update(data) return base - def _clone_base(self, session: Optional[ClientSession]) -> Cursor: + def _clone_base(self, session: Optional[ClientSession]) -> Cursor: # type: ignore[type-arg] """Creates an empty Cursor object for information to be copied into.""" return self.__class__(self._collection, session=session) @@ -862,7 +862,7 @@ def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: if self._has_filter: spec = dict(self._spec) else: - spec = cast(dict, self._spec) + spec = cast(dict, self._spec) # type: ignore[type-arg] spec["$where"] = code self._spec = spec return self @@ -886,7 +886,7 @@ def _unpack_response( self, response: Union[_OpReply, _OpMsg], cursor_id: Optional[int], - codec_options: CodecOptions, + codec_options: CodecOptions, # type: ignore[type-arg] user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False, ) -> Sequence[_DocumentOut]: @@ -962,29 +962,33 @@ def __deepcopy__(self, memo: Any) -> Any: return self._clone(deepcopy=True) @overload - def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: # type: ignore[type-arg] ... @overload def _deepcopy( - self, x: SupportsItems, memo: Optional[dict[int, Union[list, dict]]] = None - ) -> dict: + self, + x: SupportsItems, # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> dict: # type: ignore[type-arg] ... def _deepcopy( - self, x: Union[Iterable, SupportsItems], memo: Optional[dict[int, Union[list, dict]]] = None - ) -> Union[list, dict]: + self, + x: Union[Iterable, SupportsItems], # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> Union[list[Any], dict[str, Any]]: """Deepcopy helper for the data dictionary or list. Regular expressions cannot be deep copied but as they are immutable we don't have to copy them when cloning. """ - y: Union[list, dict] + y: Union[list[Any], dict[str, Any]] iterator: Iterable[tuple[Any, Any]] if not hasattr(x, "items"): y, is_list, iterator = [], True, enumerate(x) else: - y, is_list, iterator = {}, False, cast("SupportsItems", x).items() + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() # type: ignore[type-arg] if memo is None: memo = {} val_id = id(x) @@ -1058,7 +1062,7 @@ def close(self) -> None: """Explicitly close / kill this cursor.""" self._die_lock() - def distinct(self, key: str) -> list: + def distinct(self, key: str) -> list[str]: """Get a list of distinct values for `key` among all documents in the result set of this query. @@ -1263,7 +1267,7 @@ def next(self) -> _DocumentType: else: raise StopIteration - def _next_batch(self, result: list, total: Optional[int] = None) -> bool: + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] """Get all or some documents from the cursor.""" if not self._exhaust_checked: self._exhaust_checked = True @@ -1323,7 +1327,7 @@ def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: return res -class RawBatchCursor(Cursor, Generic[_DocumentType]): +class RawBatchCursor(Cursor, Generic[_DocumentType]): # type: ignore[type-arg] """A cursor / iterator over raw batches of BSON data from a query result.""" _query_class = _RawBatchQuery diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index a11674b9aa..dd9ea01558 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -771,7 +771,7 @@ def _command( self._name, command, read_preference, - codec_options, + codec_options, # type: ignore[arg-type] check, allowable_errors, write_concern=write_concern, diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 1fd506e052..5d95e9c9d5 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -158,10 +158,10 @@ _IS_SYNC = True _WriteOp = Union[ - InsertOne, + InsertOne, # type: ignore[type-arg] DeleteOne, DeleteMany, - ReplaceOne, + ReplaceOne, # type: ignore[type-arg] UpdateOne, UpdateMany, ] @@ -173,7 +173,7 @@ class MongoClient(common.BaseObject, Generic[_DocumentType]): # Define order to retrieve options from ClientOptions for __repr__. # No host/port; these are retrieved from TopologySettings. _constructor_args = ("document_class", "tz_aware", "connect") - _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # type: ignore[type-arg] def __init__( self, @@ -847,7 +847,7 @@ def __init__( self._default_database_name = dbase self._lock = _create_lock() - self._kill_cursors_queue: list = [] + self._kill_cursors_queue: list = [] # type: ignore[type-arg] self._encrypter: Optional[_Encrypter] = None @@ -1064,7 +1064,7 @@ def _after_fork(self) -> None: # Reset the session pool to avoid duplicate sessions in the child process. self._topology._session_pool.reset() - def _duplicate(self, **kwargs: Any) -> MongoClient: + def _duplicate(self, **kwargs: Any) -> MongoClient: # type: ignore[type-arg] args = self._init_kwargs.copy() args.update(kwargs) return MongoClient(**args) @@ -1546,7 +1546,7 @@ def get_database( self, name, codec_options, read_preference, write_concern, read_concern ) - def _database_default_options(self, name: str) -> database.Database: + def _database_default_options(self, name: str) -> database.Database: # type: ignore[type-arg] """Get a Database instance with the default settings.""" return self.get_database( name, @@ -1883,7 +1883,7 @@ def _conn_for_reads( def _run_operation( self, operation: Union[_Query, _GetMore], - unpack_res: Callable, + unpack_res: Callable, # type: ignore[type-arg] address: Optional[_Address] = None, ) -> Response: """Run a _Query/_GetMore operation and return a Response. @@ -2257,7 +2257,7 @@ def _return_server_session( @contextlib.contextmanager def _tmp_session( self, session: Optional[client_session.ClientSession], close: bool = True - ) -> Generator[Optional[client_session.ClientSession], None, None]: + ) -> Generator[Optional[client_session.ClientSession], None]: """If provided session is None, lend a temporary session.""" if session is not None: if not isinstance(session, client_session.ClientSession): @@ -2300,8 +2300,8 @@ def server_info(self, session: Optional[client_session.ClientSession] = None) -> .. versionchanged:: 3.6 Added ``session`` parameter. """ - return cast( - dict, + return cast( # type: ignore[redundant-cast] + dict[str, Any], self.admin.command( "buildinfo", read_preference=ReadPreference.PRIMARY, session=session ), @@ -2428,13 +2428,13 @@ def drop_database( @_csot.apply def bulk_write( self, - models: Sequence[_WriteOp[_DocumentType]], + models: Sequence[_WriteOp], session: Optional[ClientSession] = None, ordered: bool = True, verbose_results: bool = False, bypass_document_validation: Optional[bool] = None, comment: Optional[Any] = None, - let: Optional[Mapping] = None, + let: Optional[Mapping[str, Any]] = None, write_concern: Optional[WriteConcern] = None, ) -> ClientBulkWriteResult: """Send a batch of write operations, potentially across multiple namespaces, to the server. @@ -2620,7 +2620,12 @@ class _MongoClientErrorHandler: "handled", ) - def __init__(self, client: MongoClient, server: Server, session: Optional[ClientSession]): + def __init__( + self, + client: MongoClient, # type: ignore[type-arg] + server: Server, + session: Optional[ClientSession], + ): if not isinstance(client, MongoClient): # This is for compatibility with mocked and subclassed types, such as in Motor. if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): @@ -2692,7 +2697,7 @@ class _ClientConnectionRetryable(Generic[T]): def __init__( self, - mongo_client: MongoClient, + mongo_client: MongoClient, # type: ignore[type-arg] func: _WriteCall[T] | _ReadCall[T], bulk: Optional[Union[_Bulk, _ClientBulk]], operation: str, diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index f41040801f..d5dd5caf82 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -349,7 +349,7 @@ def _check_once(self) -> ServerDescription: ) return sd - def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: + def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: # type: ignore[type-arg] """Return (Hello, round_trip_time). Can raise ConnectionFailure or OperationFailure. diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py index 9559a5a542..7d9bca4d58 100644 --- a/pymongo/synchronous/network.py +++ b/pymongo/synchronous/network.py @@ -66,7 +66,7 @@ def command( read_preference: Optional[_ServerMode], codec_options: CodecOptions[_DocumentType], session: Optional[ClientSession], - client: Optional[MongoClient], + client: Optional[MongoClient[Any]], check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, address: Optional[_Address] = None, diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 505f58c60f..4ea5cb1c1e 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -201,7 +201,7 @@ def set_conn_timeout(self, timeout: Optional[float]) -> None: self.conn.get_conn.settimeout(timeout) def apply_timeout( - self, client: MongoClient, cmd: Optional[MutableMapping[str, Any]] + self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] ) -> Optional[float]: # CSOT: use remaining timeout when set. timeout = _csot.remaining() @@ -255,7 +255,7 @@ def hello_cmd(self) -> dict[str, Any]: else: return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} - def hello(self) -> Hello: + def hello(self) -> Hello[dict[str, Any]]: return self._hello(None, None) def _hello( @@ -357,7 +357,7 @@ def command( dbname: str, spec: MutableMapping[str, Any], read_preference: _ServerMode = ReadPreference.PRIMARY, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + codec_options: CodecOptions[Mapping[str, Any]] = DEFAULT_CODEC_OPTIONS, # type: ignore[assignment] check: bool = True, allowable_errors: Optional[Sequence[Union[str, int]]] = None, read_concern: Optional[ReadConcern] = None, @@ -365,7 +365,7 @@ def command( parse_write_concern_error: bool = False, collation: Optional[_CollationIn] = None, session: Optional[ClientSession] = None, - client: Optional[MongoClient] = None, + client: Optional[MongoClient[Any]] = None, retryable_write: bool = False, publish_events: bool = True, user_fields: Optional[Mapping[str, Any]] = None, @@ -417,7 +417,7 @@ def command( spec, self.is_mongos, read_preference, - codec_options, + codec_options, # type: ignore[arg-type] session, client, check, @@ -489,7 +489,7 @@ def unack_write(self, msg: bytes, max_doc_size: int) -> None: self.send_message(msg, max_doc_size) def write_command( - self, request_id: int, msg: bytes, codec_options: CodecOptions + self, request_id: int, msg: bytes, codec_options: CodecOptions[Mapping[str, Any]] ) -> dict[str, Any]: """Send "insert" etc. command, returning response as a dict. @@ -541,7 +541,7 @@ def authenticate(self, reauthenticate: bool = False) -> None: ) def validate_session( - self, client: Optional[MongoClient], session: Optional[ClientSession] + self, client: Optional[MongoClient[Any]], session: Optional[ClientSession] ) -> None: """Validate this session before use with client. @@ -596,7 +596,7 @@ def send_cluster_time( self, command: MutableMapping[str, Any], session: Optional[ClientSession], - client: Optional[MongoClient], + client: Optional[MongoClient[Any]], ) -> None: """Add $clusterTime.""" if client: @@ -730,7 +730,7 @@ def __init__( # LIFO pool. Sockets are ordered on idle time. Sockets claimed # and returned to pool from the left side. Stale sockets removed # from the right side. - self.conns: collections.deque = collections.deque() + self.conns: collections.deque[Connection] = collections.deque() self.active_contexts: set[_CancellationContext] = set() self.lock = _create_lock() self._max_connecting_cond = _create_condition(self.lock) @@ -837,8 +837,8 @@ def _reset( if service_id is None: sockets, self.conns = self.conns, collections.deque() else: - discard: collections.deque = collections.deque() - keep: collections.deque = collections.deque() + discard: collections.deque = collections.deque() # type: ignore[type-arg] + keep: collections.deque = collections.deque() # type: ignore[type-arg] for conn in self.conns: if conn.service_id == service_id: discard.append(conn) @@ -864,7 +864,7 @@ def _reset( if close: if not _IS_SYNC: asyncio.gather( - *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], # type: ignore[func-returns-value] return_exceptions=True, ) else: @@ -901,7 +901,7 @@ def _reset( ) if not _IS_SYNC: asyncio.gather( - *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], # type: ignore[func-returns-value] return_exceptions=True, ) else: @@ -915,7 +915,7 @@ def update_is_writable(self, is_writable: Optional[bool]) -> None: self.is_writable = is_writable with self.lock: for _socket in self.conns: - _socket.update_is_writable(self.is_writable) + _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] def reset( self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False @@ -952,7 +952,7 @@ def remove_stale_sockets(self, reference_generation: int) -> None: close_conns.append(self.conns.pop()) if not _IS_SYNC: asyncio.gather( - *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], # type: ignore[func-returns-value] return_exceptions=True, ) else: @@ -1473,4 +1473,4 @@ def __del__(self) -> None: # not safe to acquire a lock in __del__. if _IS_SYNC: for conn in self.conns: - conn.close_conn(None) + conn.close_conn(None) # type: ignore[unused-coroutine] diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py index c3643ba815..a85f1b0db7 100644 --- a/pymongo/synchronous/server.py +++ b/pymongo/synchronous/server.py @@ -66,7 +66,7 @@ def __init__( monitor: Monitor, topology_id: Optional[ObjectId] = None, listeners: Optional[_EventListeners] = None, - events: Optional[ReferenceType[Queue]] = None, + events: Optional[ReferenceType[Queue[Any]]] = None, ) -> None: """Represent one MongoDB server.""" self._description = server_description @@ -142,7 +142,7 @@ def run_operation( read_preference: _ServerMode, listeners: Optional[_EventListeners], unpack_res: Callable[..., list[_DocumentOut]], - client: MongoClient, + client: MongoClient[Any], ) -> Response: """Run a _Query or _GetMore operation and return a Response object. diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py index 28370d4adc..a4ca0e6e0f 100644 --- a/pymongo/synchronous/topology.py +++ b/pymongo/synchronous/topology.py @@ -84,7 +84,7 @@ _pymongo_dir = str(Path(__file__).parent) -def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: # type: ignore[type-arg] q = queue_ref() if not q: return False # Cancel PeriodicExecutor. @@ -186,7 +186,7 @@ def __init__(self, topology_settings: TopologySettings): if self._publish_server or self._publish_tp: assert self._events is not None - weak: weakref.ReferenceType[queue.Queue] + weak: weakref.ReferenceType[queue.Queue[Any]] def target() -> bool: return process_events_queue(weak) diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index e226992b45..de67a8f94a 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -569,8 +569,8 @@ def _update_rs_from_primary( return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id if server_description.max_wire_version is None or server_description.max_wire_version < 17: - new_election_tuple: tuple = (server_description.set_version, server_description.election_id) - max_election_tuple: tuple = (max_set_version, max_election_id) + new_election_tuple: tuple = (server_description.set_version, server_description.election_id) # type: ignore[type-arg] + max_election_tuple: tuple = (max_set_version, max_election_id) # type: ignore[type-arg] if None not in new_election_tuple: if None not in max_election_tuple and new_election_tuple < max_election_tuple: # Stale primary, set to type Unknown. diff --git a/pymongo/typings.py b/pymongo/typings.py index ce6f369d1f..e678720db9 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -51,7 +51,7 @@ _T = TypeVar("_T") # Type hinting types for compatibility between async and sync classes -_AgnosticMongoClient = Union["AsyncMongoClient", "MongoClient"] +_AgnosticMongoClient = Union["AsyncMongoClient", "MongoClient"] # type: ignore[type-arg] _AgnosticConnection = Union["AsyncConnection", "Connection"] _AgnosticClientSession = Union["AsyncClientSession", "ClientSession"] _AgnosticBulk = Union["_AsyncBulk", "_Bulk"] diff --git a/pyproject.toml b/pyproject.toml index a877301226..83009ff8cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -149,11 +149,12 @@ markers = [ strict = true show_error_codes = true pretty = true -disable_error_code = ["type-arg", "no-any-return"] +disable_error_code = ["no-any-return"] +disallow_any_generics = true [[tool.mypy.overrides]] module = ["test.*"] -disable_error_code = ["no-untyped-def", "no-untyped-call"] +disable_error_code = ["type-arg", "no-untyped-def", "no-untyped-call"] [[tool.mypy.overrides]] module = ["service_identity.*"] From ad16d6e8800d0dc52ee130436e6bc3e386e809fd Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 7 Aug 2025 12:06:38 -0400 Subject: [PATCH 2009/2111] PYTHON-4431 - Remove ReadTheDocs Documentation in Favor of Official Docs (#2459) --- README.md | 14 +- bson/__init__.py | 6 +- bson/binary.py | 14 +- bson/codec_options.py | 16 +- bson/datetime_ms.py | 2 +- bson/json_util.py | 2 +- doc/async-tutorial.rst | 425 -------------- doc/atlas.rst | 43 -- doc/changelog.rst | 113 ++-- doc/common-issues.rst | 96 --- doc/compatibility-policy.rst | 62 -- doc/developer/index.rst | 9 - doc/developer/periodic_executor.rst | 113 ---- doc/examples/aggregation.rst | 90 --- doc/examples/authentication.rst | 528 ----------------- doc/examples/bulk.rst | 184 ------ doc/examples/client_bulk.rst | 192 ------ doc/examples/collations.rst | 134 ----- doc/examples/copydb.rst | 73 --- doc/examples/custom_type.rst | 436 -------------- doc/examples/datetimes.rst | 177 ------ doc/examples/encryption.rst | 840 --------------------------- doc/examples/geo.rst | 109 ---- doc/examples/gevent.rst | 52 -- doc/examples/gridfs.rst | 84 --- doc/examples/high_availability.rst | 367 ------------ doc/examples/index.rst | 40 -- doc/examples/logging.rst | 63 -- doc/examples/mod_wsgi.rst | 64 -- doc/examples/network_compression.rst | 39 -- doc/examples/server_selection.rst | 108 ---- doc/examples/tailable.rst | 42 -- doc/examples/timeouts.rst | 162 ------ doc/examples/tls.rst | 234 -------- doc/examples/type_hints.rst | 332 ----------- doc/examples/uuid.rst | 512 ---------------- doc/faq.rst | 595 ------------------- doc/index.rst | 77 +-- doc/installation.rst | 197 ------- doc/migrate-to-pymongo4.rst | 9 +- doc/python3.rst | 114 ---- doc/tools.rst | 173 ------ doc/tutorial.rst | 413 ------------- pymongo/__init__.py | 2 +- pymongo/asynchronous/collection.py | 12 +- pymongo/asynchronous/database.py | 4 +- pymongo/asynchronous/encryption.py | 4 +- pymongo/asynchronous/mongo_client.py | 28 +- pymongo/daemon.py | 2 +- pymongo/encryption_options.py | 4 +- pymongo/read_preferences.py | 2 +- pymongo/synchronous/collection.py | 12 +- pymongo/synchronous/database.py | 4 +- pymongo/synchronous/encryption.py | 4 +- pymongo/synchronous/mongo_client.py | 28 +- 55 files changed, 147 insertions(+), 7314 deletions(-) delete mode 100644 doc/async-tutorial.rst delete mode 100644 doc/atlas.rst delete mode 100644 doc/common-issues.rst delete mode 100644 doc/compatibility-policy.rst delete mode 100644 doc/developer/index.rst delete mode 100644 doc/developer/periodic_executor.rst delete mode 100644 doc/examples/aggregation.rst delete mode 100644 doc/examples/authentication.rst delete mode 100644 doc/examples/bulk.rst delete mode 100644 doc/examples/client_bulk.rst delete mode 100644 doc/examples/collations.rst delete mode 100644 doc/examples/copydb.rst delete mode 100644 doc/examples/custom_type.rst delete mode 100644 doc/examples/datetimes.rst delete mode 100644 doc/examples/encryption.rst delete mode 100644 doc/examples/geo.rst delete mode 100644 doc/examples/gevent.rst delete mode 100644 doc/examples/gridfs.rst delete mode 100644 doc/examples/high_availability.rst delete mode 100644 doc/examples/index.rst delete mode 100644 doc/examples/logging.rst delete mode 100644 doc/examples/mod_wsgi.rst delete mode 100644 doc/examples/network_compression.rst delete mode 100644 doc/examples/server_selection.rst delete mode 100644 doc/examples/tailable.rst delete mode 100644 doc/examples/timeouts.rst delete mode 100644 doc/examples/tls.rst delete mode 100644 doc/examples/type_hints.rst delete mode 100644 doc/examples/uuid.rst delete mode 100644 doc/faq.rst delete mode 100644 doc/installation.rst delete mode 100644 doc/python3.rst delete mode 100644 doc/tools.rst delete mode 100644 doc/tutorial.rst diff --git a/README.md b/README.md index 374fc3e4f3..695f00be0a 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,13 @@ implementation on top of `pymongo`. PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. PyMongo follows [semantic versioning](https://semver.org/spec/v2.0.0.html) for its releases. +## Documentation + +Documentation is available at +[mongodb.com](https://www.mongodb.com/docs/languages/python/pymongo-driver/current/). + +[API documentation](https://pymongo.readthedocs.io/en/stable/api/) and the [full changelog](https://pymongo.readthedocs.io/en/stable/changelog.html) for each release is available at [readthedocs.io](https://pymongo.readthedocs.io/en/stable/index.html). + ## Support / Feedback For issues with, questions about, or feedback for PyMongo, please look @@ -191,13 +198,6 @@ ObjectId('4aba160ee23f6b543e000002') [8, 11] ``` -## Documentation - -Documentation is available at -[pymongo.readthedocs.io](https://pymongo.readthedocs.io/en/stable/). - -See the [contributing guide](./CONTRIBUTING.md#documentation) for how to build the documentation. - ## Learning Resources - MongoDB Learn - [Python diff --git a/bson/__init__.py b/bson/__init__.py index 790ac06ef1..b655e30c2c 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -58,10 +58,10 @@ the microsecond field is truncated. .. [#dt2] all datetime.datetime instances are encoded as UTC. By default, they are decoded as *naive* but timezone aware datetimes are also supported. - See :doc:`/examples/datetimes` for examples. + See `Dates and Times `_ for examples. .. [#dt3] To enable decoding a bson UTC datetime to a :class:`~bson.datetime_ms.DatetimeMS` - instance see :ref:`handling-out-of-range-datetimes`. -.. [#uuid] For :py:class:`uuid.UUID` encoding and decoding behavior see :doc:`/examples/uuid`. + instance see `handling out of range datetimes `_. +.. [#uuid] For :py:class:`uuid.UUID` encoding and decoding behavior see ``_. .. [#re] :class:`~bson.regex.Regex` instances and regular expression objects from ``re.compile()`` are both saved as BSON regular expressions. BSON regular expressions are decoded as :class:`~bson.regex.Regex` diff --git a/bson/binary.py b/bson/binary.py index 693b838b80..b48ae4fcc6 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -79,7 +79,7 @@ class UuidRepresentation: :class:`~bson.binary.Binary` instance will be returned instead of a :class:`uuid.UUID` instance. - See :ref:`unspecified-representation-details` for details. + See `unspecified representation details `_ for details. .. versionadded:: 3.11 """ @@ -91,7 +91,7 @@ class UuidRepresentation: and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`UUID_SUBTYPE`. - See :ref:`standard-representation-details` for details. + See `standard representation details `_ for details. .. versionadded:: 3.11 """ @@ -103,7 +103,7 @@ class UuidRepresentation: and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`OLD_UUID_SUBTYPE`. - See :ref:`python-legacy-representation-details` for details. + See `python legacy representation details `_ for details. .. versionadded:: 3.11 """ @@ -115,7 +115,7 @@ class UuidRepresentation: and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the Java driver's legacy byte order. - See :ref:`java-legacy-representation-details` for details. + See `Java Legacy UUID `_ for details. .. versionadded:: 3.11 """ @@ -127,7 +127,7 @@ class UuidRepresentation: and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the C# driver's legacy byte order. - See :ref:`csharp-legacy-representation-details` for details. + See `C# Legacy UUID `_ for details. .. versionadded:: 3.11 """ @@ -328,7 +328,7 @@ def from_uuid( :param uuid_representation: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. - See :ref:`handling-uuid-data-example` for details. + See `UUID representations `_ for details. .. versionadded:: 3.11 """ @@ -377,7 +377,7 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI :param uuid_representation: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. - See :ref:`handling-uuid-data-example` for details. + See `UUID representations `_ for details. .. versionadded:: 3.11 """ diff --git a/bson/codec_options.py b/bson/codec_options.py index 0428cf843f..add5416a5b 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -57,7 +57,7 @@ class TypeEncoder(abc.ABC): Codec classes must implement the ``python_type`` attribute, and the ``transform_python`` method to support encoding. - See :ref:`custom-type-type-codec` documentation for an example. + See `encode data with type codecs `_ documentation for an example. """ @abc.abstractproperty @@ -76,7 +76,7 @@ class TypeDecoder(abc.ABC): Codec classes must implement the ``bson_type`` attribute, and the ``transform_bson`` method to support decoding. - See :ref:`custom-type-type-codec` documentation for an example. + See `encode data with type codecs `_ documentation for an example. """ @abc.abstractproperty @@ -98,7 +98,7 @@ class TypeCodec(TypeEncoder, TypeDecoder): ``bson_type`` attribute, and the ``transform_bson`` method to support decoding. - See :ref:`custom-type-type-codec` documentation for an example. + See `encode data with type codecs `_ documentation for an example. """ @@ -118,7 +118,7 @@ class TypeRegistry: >>> type_registry = TypeRegistry([Codec1, Codec2, Codec3, ...], ... fallback_encoder) - See :ref:`custom-type-type-registry` documentation for an example. + See `add codec to the type registry `_ documentation for an example. :param type_codecs: iterable of type codec instances. If ``type_codecs`` contains multiple codecs that transform a single @@ -128,7 +128,7 @@ class TypeRegistry: type. :param fallback_encoder: callable that accepts a single, unencodable python value and transforms it into a type that - :mod:`bson` can encode. See :ref:`fallback-encoder-callable` + :mod:`bson` can encode. See `define a fallback encoder `_ documentation for an example. """ @@ -327,10 +327,10 @@ def __init__(self, *args, **kwargs): >>> doc._id ObjectId('5b3016359110ea14e8c58b93') - See :doc:`/examples/datetimes` for examples using the `tz_aware` and + See `Dates and Times `_ for examples using the `tz_aware` and `tzinfo` options. - See :doc:`/examples/uuid` for examples using the `uuid_representation` + See `UUID `_ for examples using the `uuid_representation` option. :param document_class: BSON documents returned in queries will be decoded @@ -344,7 +344,7 @@ def __init__(self, *args, **kwargs): :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New applications should consider setting this to :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. + compatibility. See `UUID representations `_ for details. :param unicode_decode_error_handler: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py index 679524cb60..2047bd30b2 100644 --- a/bson/datetime_ms.py +++ b/bson/datetime_ms.py @@ -51,7 +51,7 @@ def __init__(self, value: Union[int, datetime.datetime]): To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in :class:`~bson.codec_options.CodecOptions` must be set to 'datetime_ms' or - 'datetime_auto'. See :ref:`handling-out-of-range-datetimes` for + 'datetime_auto'. See `handling out of range datetimes `_ for details. :param value: An instance of :class:`datetime.datetime` to be diff --git a/bson/json_util.py b/bson/json_util.py index 1a3b0bd833..8151226a26 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -281,7 +281,7 @@ def __init__(self, *args: Any, **kwargs: Any): return DatetimeMS objects when the underlying datetime is out-of-range and 'datetime_clamp' to clamp to the minimum and maximum possible datetimes. Defaults to 'datetime'. See - :ref:`handling-out-of-range-datetimes` for details. + `handling out of range datetimes `_ for details. :param args: arguments to :class:`~bson.codec_options.CodecOptions` :param kwargs: arguments to :class:`~bson.codec_options.CodecOptions` diff --git a/doc/async-tutorial.rst b/doc/async-tutorial.rst deleted file mode 100644 index b3e33e4b5c..0000000000 --- a/doc/async-tutorial.rst +++ /dev/null @@ -1,425 +0,0 @@ -Async Tutorial -============== - - -.. code-block:: pycon - - from pymongo import AsyncMongoClient - - client = AsyncMongoClient() - await client.drop_database("test-database") - -This tutorial is intended as an introduction to working with -**MongoDB** and **PyMongo** using the asynchronous API. - -Prerequisites -------------- -Before we start, make sure that you have the **PyMongo** distribution -:doc:`installed `. In the Python shell, the following -should run without raising an exception: - -.. code-block:: pycon - - >>> import pymongo - -This tutorial also assumes that a MongoDB instance is running on the -default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you -can start it like so: - -.. code-block:: bash - - $ mongod - -Making a Connection with AsyncMongoClient ------------------------------------------ -The first step when working with **PyMongo** is to create a -:class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` to the running **mongod** -instance. Doing so is easy: - -.. code-block:: pycon - - >>> from pymongo import AsyncMongoClient - >>> client = AsyncMongoClient() - -The above code will connect on the default host and port. We can also -specify the host and port explicitly, as follows: - -.. code-block:: pycon - - >>> client = AsyncMongoClient("localhost", 27017) - -Or use the MongoDB URI format: - -.. code-block:: pycon - - >>> client = AsyncMongoClient("mongodb://localhost:27017/") - -By default, :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` only connects to the database on its first operation. -To explicitly connect before performing an operation, use :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.aconnect`: - -.. code-block:: pycon - - >>> client = await AsyncMongoClient().aconnect() - -Getting a Database ------------------- -A single instance of MongoDB can support multiple independent -`databases `_. When -working with PyMongo you access databases using attribute style access -on :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` instances: - -.. code-block:: pycon - - >>> db = client.test_database - -If your database name is such that using attribute style access won't -work (like ``test-database``), you can use dictionary style access -instead: - -.. code-block:: pycon - - >>> db = client["test-database"] - -Getting a Collection --------------------- -A `collection `_ is a -group of documents stored in MongoDB, and can be thought of as roughly -the equivalent of a table in a relational database. Getting a -collection in PyMongo works the same as getting a database: - -.. code-block:: pycon - - >>> collection = db.test_collection - -or (using dictionary style access): - -.. code-block:: pycon - - >>> collection = db["test-collection"] - -An important note about collections (and databases) in MongoDB is that -they are created lazily - none of the above commands have actually -performed any operations on the MongoDB server. Collections and -databases are created when the first document is inserted into them. - -Documents ---------- -Data in MongoDB is represented (and stored) using JSON-style -documents. In PyMongo we use dictionaries to represent documents. As -an example, the following dictionary might be used to represent a blog -post: - -.. code-block:: pycon - - >>> import datetime - >>> post = { - ... "author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.now(tz=datetime.timezone.utc), - ... } - -Note that documents can contain native Python types (like -:class:`datetime.datetime` instances) which will be automatically -converted to and from the appropriate `BSON -`_ types. - -Inserting a Document --------------------- -To insert a document into a collection we can use the -:meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_one` method: - -.. code-block:: pycon - - >>> posts = db.posts - >>> post_id = (await posts.insert_one(post)).inserted_id - >>> post_id - ObjectId('...') - -When a document is inserted a special key, ``"_id"``, is automatically -added if the document doesn't already contain an ``"_id"`` key. The value -of ``"_id"`` must be unique across the -collection. :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_one` returns an -instance of :class:`~pymongo.results.InsertOneResult`. For more information -on ``"_id"``, see the `documentation on _id -`_. - -After inserting the first document, the *posts* collection has -actually been created on the server. We can verify this by listing all -of the collections in our database: - -.. code-block:: pycon - - >>> await db.list_collection_names() - ['posts'] - -Getting a Single Document With :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one` ------------------------------------------------------------------------------------------------- -The most basic type of query that can be performed in MongoDB is -:meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one`. This method returns a -single document matching a query (or ``None`` if there are no -matches). It is useful when you know there is only one matching -document, or are only interested in the first match. Here we use -:meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one` to get the first -document from the posts collection: - -.. code-block:: pycon - - >>> import pprint - >>> pprint.pprint(await posts.find_one()) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -The result is a dictionary matching the one that we inserted previously. - -.. note:: The returned document contains an ``"_id"``, which was - automatically added on insert. - -:meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one` also supports querying -on specific elements that the resulting document must match. To limit -our results to a document with author "Mike" we do: - -.. code-block:: pycon - - >>> pprint.pprint(await posts.find_one({"author": "Mike"})) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -If we try with a different author, like "Eliot", we'll get no result: - -.. code-block:: pycon - - >>> await posts.find_one({"author": "Eliot"}) - >>> - -.. _async-querying-by-objectid: - -Querying By ObjectId --------------------- -We can also find a post by its ``_id``, which in our example is an ObjectId: - -.. code-block:: pycon - - >>> post_id - ObjectId(...) - >>> pprint.pprint(await posts.find_one({"_id": post_id})) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -Note that an ObjectId is not the same as its string representation: - -.. code-block:: pycon - - >>> post_id_as_str = str(post_id) - >>> await posts.find_one({"_id": post_id_as_str}) # No result - >>> - -A common task in web applications is to get an ObjectId from the -request URL and find the matching document. It's necessary in this -case to **convert the ObjectId from a string** before passing it to -``find_one``:: - - from bson.objectid import ObjectId - - # The web framework gets post_id from the URL and passes it as a string - async def get(post_id): - # Convert from string to ObjectId: - document = await client.db.collection.find_one({'_id': ObjectId(post_id)}) - -.. seealso:: :ref:`web-application-querying-by-objectid` - -Bulk Inserts ------------- -In order to make querying a little more interesting, let's insert a -few more documents. In addition to inserting a single document, we can -also perform *bulk insert* operations, by passing a list as the -first argument to :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_many`. -This will insert each document in the list, sending only a single -command to the server: - -.. code-block:: pycon - - >>> new_posts = [ - ... { - ... "author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14), - ... }, - ... { - ... "author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45), - ... }, - ... ] - >>> result = await posts.insert_many(new_posts) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...')] - -There are a couple of interesting things to note about this example: - - - The result from :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_many` now - returns two :class:`~bson.objectid.ObjectId` instances, one for - each inserted document. - - ``new_posts[1]`` has a different "shape" than the other posts - - there is no ``"tags"`` field and we've added a new field, - ``"title"``. This is what we mean when we say that MongoDB is - *schema-free*. - -Querying for More Than One Document ------------------------------------ -To get more than a single document as the result of a query we use the -:meth:`~pymongo.asynchronous.collection.AsyncCollection.find` -method. :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` returns a -:class:`~pymongo.asynchronous.cursor.AsyncCursor` instance, which allows us to iterate -over all matching documents. For example, we can iterate over every -document in the ``posts`` collection: - -.. code-block:: pycon - - >>> async for post in posts.find(): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - {'_id': ObjectId('...'), - 'author': 'Eliot', - 'date': datetime.datetime(...), - 'text': 'and pretty easy too!', - 'title': 'MongoDB is fun'} - -Just like we did with :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one`, -we can pass a document to :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` -to limit the returned results. Here, we get only those documents whose -author is "Mike": - -.. code-block:: pycon - - >>> async for post in posts.find({"author": "Mike"}): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - -Counting --------- -If we just want to know how many documents match a query we can -perform a :meth:`~pymongo.asynchronous.collection.AsyncCollection.count_documents` operation -instead of a full query. We can get a count of all of the documents -in a collection: - -.. code-block:: pycon - - >>> await posts.count_documents({}) - 3 - -or just of those documents that match a specific query: - -.. code-block:: pycon - - >>> await posts.count_documents({"author": "Mike"}) - 2 - -Range Queries -------------- -MongoDB supports many different types of `advanced queries -`_. As an -example, lets perform a query where we limit results to posts older -than a certain date, but also sort the results by author: - -.. code-block:: pycon - - >>> d = datetime.datetime(2009, 11, 12, 12) - >>> async for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Eliot', - 'date': datetime.datetime(...), - 'text': 'and pretty easy too!', - 'title': 'MongoDB is fun'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - -Here we use the special ``"$lt"`` operator to do a range query, and -also call :meth:`~pymongo.asynchronous.cursor.AsyncCursor.sort` to sort the results -by author. - -Indexing --------- - -Adding indexes can help accelerate certain queries and can also add additional -functionality to querying and storing documents. In this example, we'll -demonstrate how to create a `unique index -`_ on a key that rejects -documents whose value for that key already exists in the index. - -First, we'll need to create the index: - -.. code-block:: pycon - - >>> result = await db.profiles.create_index([("user_id", pymongo.ASCENDING)], unique=True) - >>> sorted(list(await db.profiles.index_information())) - ['_id_', 'user_id_1'] - -Notice that we have two indexes now: one is the index on ``_id`` that MongoDB -creates automatically, and the other is the index on ``user_id`` we just -created. - -Now let's set up some user profiles: - -.. code-block:: pycon - - >>> user_profiles = [{"user_id": 211, "name": "Luke"}, {"user_id": 212, "name": "Ziltoid"}] - >>> result = await db.profiles.insert_many(user_profiles) - -The index prevents us from inserting a document whose ``user_id`` is already in -the collection: - -.. code-block:: pycon - - >>> new_profile = {"user_id": 213, "name": "Drew"} - >>> duplicate_profile = {"user_id": 212, "name": "Tommy"} - >>> result = await db.profiles.insert_one(new_profile) # This is fine. - >>> result = await db.profiles.insert_one(duplicate_profile) - Traceback (most recent call last): - DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } - -.. seealso:: The MongoDB documentation on `indexes `_ - -Task Cancellation ------------------ -`Cancelling `_ an asyncio Task -that is running a PyMongo operation is treated as a fatal interrupt. Any connections, cursors, and transactions -involved in a cancelled Task will be safely closed and cleaned up as part of the cancellation. If those resources are -also used elsewhere, attempting to utilize them after the cancellation will result in an error. diff --git a/doc/atlas.rst b/doc/atlas.rst deleted file mode 100644 index 19ba9732f2..0000000000 --- a/doc/atlas.rst +++ /dev/null @@ -1,43 +0,0 @@ -Using PyMongo with MongoDB Atlas -================================ - -`Atlas `_ is MongoDB, Inc.'s hosted MongoDB as a -service offering. To connect to Atlas, pass the connection string provided by -Atlas to :class:`~pymongo.mongo_client.MongoClient`:: - - client = pymongo.MongoClient() - -Connections to Atlas require TLS/SSL. - -.. warning:: Industry best practices recommend, and some regulations require, - the use of TLS 1.1 or newer. Though no application changes are required for - PyMongo to make use of the newest protocols, some operating systems or - versions may not provide an OpenSSL version new enough to support them. - - Users of macOS older than 10.13 (High Sierra) will need to install Python - from `python.org`_, `homebrew`_, `macports`_, or another similar source. - - Users of Linux or other non-macOS Unix can check their OpenSSL version like - this:: - - $ openssl version - - If the version number is less than 1.0.1 support for TLS 1.1 or newer is not - available. Contact your operating system vendor for a solution or upgrade to - a newer distribution. - - You can check your Python interpreter by installing the `requests`_ module - and executing the following command:: - - python -c "import requests; print(requests.get('https://www.howsmyssl.com/a/check', verify=False).json()['tls_version'])" - - You should see "TLS 1.X" where X is >= 1. - - You can read more about TLS versions and their security implications here: - - ``_ - -.. _python.org: https://www.python.org/downloads/ -.. _homebrew: https://brew.sh/ -.. _macports: https://www.macports.org/ -.. _requests: https://pypi.python.org/pypi/requests diff --git a/doc/changelog.rst b/doc/changelog.rst index d88b114fc6..25d412364f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -115,7 +115,7 @@ PyMongo 4.12 brings a number of changes including: - Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to :class:`~pymongo.encryption_options.AutoEncryptionOpts`. - Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. -- pymongocrypt>=1.13 is now required for :ref:`In-Use Encryption` support. +- pymongocrypt>=1.13 is now required for `In-Use Encryption `_ support. - Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.rename_by_name` and :meth:`gridfs.grid_file.GridFSBucket.rename_by_name` for more performant renaming of a file with multiple revisions. - Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.delete_by_name` and :meth:`gridfs.grid_file.GridFSBucket.delete_by_name` @@ -177,7 +177,7 @@ PyMongo 4.11 brings a number of changes including: - Dropped support for Python 3.8 and PyPy 3.9. - Dropped support for MongoDB 3.6. - Dropped support for the MONGODB-CR authenticate mechanism, which is no longer supported by MongoDB 4.0+. -- pymongocrypt>=1.12 is now required for :ref:`In-Use Encryption` support. +- pymongocrypt>=1.12 is now required for `In-Use Encryption `_ support. - Added support for free-threaded Python with the GIL disabled. For more information see: `Free-threaded CPython `_. We do not yet support free-threaded Python on Windows (`PYTHON-5027`_) or with In-Use Encryption (`PYTHON-5024`_). @@ -299,7 +299,7 @@ PyMongo 4.9 brings a number of improvements including: ``sparsity`` and ``trim_factor`` are now optional in :class:`~pymongo.encryption_options.RangeOpts`. - Added support for the "delegated" option for the KMIP ``master_key`` in :meth:`~pymongo.encryption.ClientEncryption.create_data_key`. -- pymongocrypt>=1.10 is now required for :ref:`In-Use Encryption` support. +- pymongocrypt>=1.10 is now required for `In-Use Encryption `_ support. - Added :meth:`~pymongo.cursor.Cursor.to_list` to :class:`~pymongo.cursor.Cursor`, :class:`~pymongo.command_cursor.CommandCursor`, :class:`~pymongo.asynchronous.cursor.AsyncCursor`, @@ -309,7 +309,7 @@ PyMongo 4.9 brings a number of improvements including: and :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient`, enabling users to perform insert, update, and delete operations against mixed namespaces in a minimized number of round trips. - Please see :doc:`examples/client_bulk` for more information. + Please see `Client Bulk Write `_ for more information. - Added support for the ``namespace`` parameter to the :class:`~pymongo.operations.InsertOne`, :class:`~pymongo.operations.ReplaceOne`, @@ -339,7 +339,7 @@ PyMongo 4.9 brings a number of improvements including: unction-as-a-service (FaaS) like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. On some FaaS systems, there is a ``fork()`` operation at function startup. By delaying the connection to the first operation, we avoid a deadlock. See - :ref:`pymongo-fork-safe` for more information. + `multiple forks `_ for more information. Issues Resolved @@ -446,10 +446,10 @@ PyMongo 4.7 brings a number of improvements including: using an OpenID Connect (OIDC) access token. The driver supports OIDC for workload identity, defined as an identity you assign to a software workload (such as an application, service, script, or container) to authenticate and access other services and resources. - Please see :doc:`examples/authentication` for more information. + Please see `Authentication `_ for more information. - Added support for Python's `native logging library `_, enabling developers to customize the verbosity of log messages for their applications. - Please see :doc:`examples/logging` for more information. + Please see `Logging `_ for more information. - Significantly improved the performance of encoding BSON documents to JSON. - Added support for named KMS providers for client side field level encryption. Previously supported KMS providers were only: aws, azure, gcp, kmip, and local. @@ -608,7 +608,7 @@ PyMongo 4.6 brings a number of improvements including: "mongodb://example.com?tls=true" is now a valid URI. - Fixed a bug where PyMongo would incorrectly promote all cursors to exhaust cursors when connected to load balanced MongoDB clusters or Serverless clusters. -- Added the :ref:`network-compression-example` documentation page. +- Added the `network compression `_ documentation page. - Added more timeout information to network errors. Issues Resolved @@ -633,7 +633,7 @@ PyMongo 4.5 brings a number of improvements including: - Added :meth:`~pymongo.database.Database.cursor_command` and :meth:`~pymongo.command_cursor.CommandCursor.try_next` to support executing an arbitrary command that returns a cursor. -- ``cryptography`` 2.5 or later is now required for :ref:`OCSP` support. +- ``cryptography`` 2.5 or later is now required for `OCSP `_ support. - Improved bson encoding and decoding performance by up to 134%(`PYTHON-3729`_, `PYTHON-3797`_, `PYTHON-3816`_, `PYTHON-3817`_, `PYTHON-3820`_, `PYTHON-3824`_, and `PYTHON-3846`_). .. warning:: PyMongo no longer supports PyPy3 versions older than 3.8. Users @@ -694,7 +694,7 @@ PyMongo 4.4 brings a number of improvements including: :class:`~pymongo.encryption_options.RangeOpts`, and :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW` as part of the experimental Queryable Encryption beta. -- pymongocrypt 1.6.0 or later is now required for :ref:`In-Use Encryption` support. MongoDB +- pymongocrypt 1.6.0 or later is now required for `In-Use Encryption `_ support. MongoDB Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and PyMongo 4.4+. @@ -722,9 +722,9 @@ Changes in Version 4.3.3 (2022/11/17) Version 4.3.3 documents support for the following: -- :ref:`CSFLE on-demand credentials` for cloud KMS providers. -- Authentication support for :ref:`EKS Clusters`. -- Added the :ref:`timeout-example` example page to improve the documentation +- `CSFLE on-demand credentials `_ for cloud KMS providers. +- Authentication support for `EKS Clusters `_. +- Added the `timeout `_ example page to improve the documentation for :func:`pymongo.timeout`. Bug Fixes @@ -759,7 +759,7 @@ PyMongo 4.3 brings a number of improvements including: - Added support for decoding BSON datetimes outside of the range supported by Python's :class:`~datetime.datetime` builtin. See - :ref:`handling-out-of-range-datetimes` for examples, as well as + `handling out of range datetimes `_ for examples, as well as :class:`bson.datetime_ms.DatetimeMS`, :class:`bson.codec_options.DatetimeConversion`, and :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` @@ -768,7 +768,7 @@ PyMongo 4.3 brings a number of improvements including: after a :py:func:`os.fork` to reduce the frequency of deadlocks. Note that deadlocks are still possible because libraries that PyMongo depends like OpenSSL cannot be made fork() safe in multithreaded applications. - (`PYTHON-2484`_). For more info see :ref:`pymongo-fork-safe`. + (`PYTHON-2484`_). For more info see `multiple forks `_. - When used with MongoDB 6.0+, :class:`~pymongo.change_stream.ChangeStream` s now allow for new types of events (such as DDL and C2C replication events) to be recorded with the new parameter ``show_expanded_events`` @@ -778,7 +778,7 @@ PyMongo 4.3 brings a number of improvements including: credentials expire or an error is encountered. - When using the ``MONGODB-AWS`` authentication mechanism with the ``aws`` extra, the behavior of credential fetching has changed with - ``pymongo_auth_aws>=1.1.0``. Please see :doc:`examples/authentication` for + ``pymongo_auth_aws>=1.1.0``. Please see `Authentication `_ for more information. Bug fixes @@ -811,9 +811,9 @@ PyMongo 4.2 brings a number of improvements including: - Support for MongoDB 6.0. - Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking - changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. + changes may be made before the final release. See `automatic queryable client-side encryption `_ for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout - to an entire block of pymongo operations. See :ref:`timeout-example` for examples. + to an entire block of pymongo operations. See `timeout `_ for examples. - Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. - Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when the error was caused by a timeout. @@ -861,7 +861,7 @@ Unavoidable breaking changes encryption support. - :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, - the count command was not included in V1 of the :ref:`versioned-api-ref`. + the count command was not included in V1 of the `Stable API `_. Users of the Stable API with estimated_document_count are recommended to upgrade their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors (`PYTHON-3167`_). @@ -924,7 +924,7 @@ Changes in Version 4.1 (2021/12/07) PyMongo 4.1 brings a number of improvements including: -- Type Hinting support (formerly provided by `pymongo-stubs`_). See :doc:`examples/type_hints` for more information. +- Type Hinting support (formerly provided by `pymongo-stubs`_). See `Type Hints `_ for more information. - Added support for the ``comment`` parameter to all helpers. For example see :meth:`~pymongo.collection.Collection.insert_one`. - Added support for the ``let`` parameter to @@ -1013,7 +1013,7 @@ Breaking Changes in 4.0 :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. - See :ref:`handling-uuid-data-example` for details. + See `UUID representations `_ for details. - Removed the ``waitQueueMultiple`` keyword argument to :class:`~pymongo.mongo_client.MongoClient` and removed :exc:`pymongo.errors.ExceededMaxWaiters`. @@ -1352,7 +1352,7 @@ Notable improvements - Added support for MongoDB 5.0. - Support for MongoDB Stable API, see :class:`~pymongo.server_api.ServerApi`. -- Support for snapshot reads on secondaries (see :ref:`snapshot-reads-ref`). +- Support for snapshot reads on secondaries (see `snapshot reads `_). - Support for Azure and GCP KMS providers for client side field level encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, :class:`~pymongo.encryption_options.AutoEncryptionOpts`, @@ -1409,7 +1409,7 @@ Deprecations same API. - Deprecated the :mod:`pymongo.messeage` module. - Deprecated the ``ssl_keyfile`` and ``ssl_certfile`` URI options in favor - of ``tlsCertificateKeyFile`` (see :doc:`examples/tls`). + of ``tlsCertificateKeyFile`` (see `TLS `_). .. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 .. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 @@ -1507,12 +1507,12 @@ Changes in Version 3.11.0 (2020/07/30) Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. Highlights include: -- Support for :ref:`OCSP` (Online Certificate Status Protocol). +- Support for `OCSP `_ (Online Certificate Status Protocol). - Support for `PyOpenSSL `_ as an - alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` + alternative TLS implementation. PyOpenSSL is required for `OCSP `_ support. It will also be installed when using the "tls" extra if the version of Python in use is older than 2.7.9. -- Support for the :ref:`MONGODB-AWS` authentication mechanism. +- Support for the `MONGODB-AWS `_ authentication mechanism. - Support for the ``directConnection`` URI option and kwarg to :class:`~pymongo.mongo_client.MongoClient`. - Support for speculative authentication attempts in connection handshakes @@ -1538,7 +1538,7 @@ Highlights include: - Added support for :data:`bson.binary.UuidRepresentation.UNSPECIFIED` and ``MongoClient(uuidRepresentation='unspecified')`` which will become the default UUID representation starting in PyMongo 4.0. See - :ref:`handling-uuid-data-example` for details. + `UUID representations `_ for details. - New methods :meth:`bson.binary.Binary.from_uuid` and :meth:`bson.binary.Binary.as_uuid`. - Added the ``background`` parameter to @@ -1622,7 +1622,7 @@ Version 3.10 includes a number of improvements and bug fixes. Highlights include: - Support for Client-Side Field Level Encryption with MongoDB 4.2. See - :doc:`examples/encryption` for examples. + `Client-Side Field Level Encryption `_ for examples. - Support for Python 3.8. - Added :attr:`pymongo.client_session.ClientSession.in_transaction`. - Do not hold the Topology lock while creating connections in a MongoClient's @@ -1648,7 +1648,7 @@ Changes in Version 3.9.0 (2019/08/13) Version 3.9 adds support for MongoDB 4.2. Highlights include: - Support for MongoDB 4.2 sharded transactions. Sharded transactions have - the same API as replica set transactions. See :ref:`transactions-ref`. + the same API as replica set transactions. See `Transactions `_. - New method :meth:`pymongo.client_session.ClientSession.with_transaction` to support conveniently running a transaction in a session with automatic retries and at-most-once semantics. @@ -1776,7 +1776,7 @@ Changes in Version 3.8.0 (2019/04/22) - Custom types can now be directly encoded to, and decoded from MongoDB using the :class:`~bson.codec_options.TypeCodec` and :class:`~bson.codec_options.TypeRegistry` APIs. For more information, see - the :doc:`custom type example `. + `Custom Types `_. - Attempting a multi-document transaction on a sharded cluster now raises a :exc:`~pymongo.errors.ConfigurationError`. - :meth:`pymongo.cursor.Cursor.distinct` and @@ -1806,7 +1806,7 @@ Changes in Version 3.8.0 (2019/04/22) - Iterating over a :class:`~bson.raw_bson.RawBSONDocument` now maintains the same field order of the underlying raw BSON document. - Applications can now register a custom server selector. For more information - see the :doc:`server selector example `. + see `Customize Server Selection `_. - The connection pool now implements a LIFO policy. Unavoidable breaking changes: @@ -1874,9 +1874,9 @@ Changes in Version 3.7.0 (2018/06/26) Version 3.7 adds support for MongoDB 4.0. Highlights include: - Support for single replica set multi-document ACID transactions. - See :ref:`transactions-ref`. + See `transactions `_. - Support for wire protocol compression via the new ``compressors`` URI and keyword argument to - :meth:`~pymongo.mongo_client.MongoClient`. See :ref:`network-compression-example` for details. + :meth:`~pymongo.mongo_client.MongoClient`. See `network compression `_ for details. - Support for Python 3.7. - New count methods, :meth:`~pymongo.collection.Collection.count_documents` and :meth:`~pymongo.collection.Collection.estimated_document_count`. @@ -1897,9 +1897,9 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: the following features and changes allow PyMongo to function when MD5 support is disabled in OpenSSL by the FIPS Object Module: - - Support for the :ref:`SCRAM-SHA-256 ` - authentication mechanism. The :ref:`GSSAPI `, - :ref:`PLAIN `, and :ref:`MONGODB-X509 ` + - Support for the `SCRAM-SHA-256 `_ + authentication mechanism. The `GSSAPI `_, + `PLAIN `_, and `MONGODB-X509 `_ mechanisms can also be used to avoid issues with OpenSSL in FIPS environments. - MD5 checksums are now optional in GridFS. See the ``disable_md5`` option @@ -1917,7 +1917,7 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: class which is a subclass of :class:`~pymongo.change_stream.ChangeStream`. - SCRAM client and server keys are cached for improved performance, following `RFC 5802 `_. -- If not specified, the authSource for the :ref:`PLAIN ` +- If not specified, the authSource for the `PLAIN `_ authentication mechanism defaults to $external. - wtimeoutMS is once again supported as a URI option. - When using unacknowledged write concern and connected to MongoDB server @@ -2167,7 +2167,7 @@ Changes and Deprecations: consistent across all MongoDB versions. - In Python 3, :meth:`~bson.json_util.loads` now automatically decodes JSON $binary with a subtype of 0 into :class:`bytes` instead of - :class:`~bson.binary.Binary`. See the :doc:`/python3` for more details. + :class:`~bson.binary.Binary`. - :meth:`~bson.json_util.loads` now raises ``TypeError`` or ``ValueError`` when parsing JSON type wrappers with values of the wrong type or any extra keys. @@ -2196,7 +2196,7 @@ Highlights include: - Complete support for MongoDB 3.4: - - Unicode aware string comparison using :doc:`examples/collations`. + - Unicode aware string comparison using `Collation `_. - Support for the new :class:`~bson.decimal128.Decimal128` BSON type. - A new maxStalenessSeconds read preference option. - A username is no longer required for the MONGODB-X509 authentication @@ -2534,7 +2534,7 @@ In PyMongo 3.0, the ``use_greenlets`` option is gone. To use PyMongo with Gevent simply call ``gevent.monkey.patch_all()``. For more information, -see :doc:`PyMongo's Gevent documentation `. +see `Gevent `_. :class:`~pymongo.mongo_client.MongoClient` changes .................................................. @@ -2578,7 +2578,7 @@ the list, and used it until a network error prompted it to re-evaluate all mongoses' latencies and reconnect to one of them. In PyMongo 3, the client monitors its network latency to all the mongoses continuously, and distributes operations evenly among those with the lowest latency. -See :ref:`mongos-load-balancing` for more information. +See `load balancing `_ for more information. The client methods ``start_request``, ``in_request``, and ``end_request`` are removed, and so is the ``auto_start_request`` option. Requests were @@ -2586,7 +2586,7 @@ designed to make read-your-writes consistency more likely with the ``w=0`` write concern. Additionally, a thread in a request used the same member for all secondary reads in a replica set. To ensure read-your-writes consistency in PyMongo 3.0, do not override the default write concern with ``w=0``, and -do not override the default :ref:`read preference ` of +do not override the default `read preference `_ of PRIMARY. Support for the ``slaveOk`` (or ``slave_okay``), ``safe``, and @@ -2600,8 +2600,7 @@ The ``max_pool_size`` option has been removed. It is replaced by the ``maxPoolSize`` MongoDB URI option. ``maxPoolSize`` is now a supported URI option in PyMongo and can be passed as a keyword argument. -The ``copy_database`` method is removed, see the -:doc:`copy_database examples ` for alternatives. +The ``copy_database`` method is removed, see `Copy and Clone Databases `_ for alternatives. The ``disconnect`` method is removed. Use :meth:`~pymongo.mongo_client.MongoClient.close` instead. @@ -2938,7 +2937,7 @@ Version 2.9.4 fixes issues reported since the release of 2.9.3. - Fixed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` handling of uuidRepresentation. - Fixed building and testing the documentation with python 3.x. -- New documentation for :doc:`examples/tls` and :doc:`atlas`. +- New documentation for `TLS `_ and `Atlas `_. Issues Resolved ............... @@ -3177,7 +3176,7 @@ PyMongo 2.7 is a major release with a large number of new features and bug fixes. Highlights include: - Full support for MongoDB 2.6. -- A new :doc:`bulk write operations API `. +- A new `bulk write operations API `_. - Support for server side query timeouts using :meth:`~pymongo.cursor.Cursor.max_time_ms`. - Support for writing :meth:`~pymongo.collection.Collection.aggregate` @@ -3188,7 +3187,7 @@ fixes. Highlights include: error details from the server. - A new GridFS :meth:`~gridfs.GridFS.find` method that returns a :class:`~gridfs.grid_file.GridOutCursor`. -- Greatly improved :doc:`support for mod_wsgi ` when using +- Greatly improved `support for mod_wsgi `_ when using PyMongo's C extensions. Read `Jesse's blog post `_ for details. - Improved C extension support for ARM little endian. @@ -3268,14 +3267,14 @@ Important new features: ``waitQueueTimeoutMS`` is set, an operation that blocks waiting for a socket will raise :exc:`~pymongo.errors.ConnectionFailure` after the timeout. By default ``waitQueueTimeoutMS`` is not set. - See :ref:`connection-pooling` for more information. + See `connection pooling `_ for more information. - The :meth:`~pymongo.collection.Collection.insert` method automatically splits large batches of documents into multiple insert messages based on :attr:`~pymongo.mongo_client.MongoClient.max_message_size` - Support for the exhaust cursor flag. See :meth:`~pymongo.collection.Collection.find` for details and caveats. - Support for the PLAIN and MONGODB-X509 authentication mechanisms. - See :doc:`the authentication docs ` for more + See `the authentication docs `_ for more information. - Support aggregation output as a :class:`~pymongo.cursor.Cursor`. See :meth:`~pymongo.collection.Collection.aggregate` for details. @@ -3288,7 +3287,7 @@ Important new features: to having a ``max_pool_size`` larger than necessary. Err towards a larger value.) If your application accepts the default, continue to do so. - See :ref:`connection-pooling` for more information. + See `connection pooling `_ for more information. Issues Resolved ............... @@ -3334,7 +3333,7 @@ Version 2.5 includes changes to support new features in MongoDB 2.4. Important new features: -- Support for :ref:`GSSAPI (Kerberos) authentication `. +- Support for `GSSAPI (Kerberos) `_. - Support for SSL certificate validation with hostname matching. - Support for delegated and role based authentication. - New GEOSPHERE (2dsphere) and HASHED index constants. @@ -3441,7 +3440,7 @@ Version 2.3 adds support for new features and behavior changes in MongoDB Important New Features: - Support for expanded read preferences including directing reads to tagged - servers - See :ref:`secondary-reads` for more information. + servers - See `secondary reads `_ for more information. - Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework @@ -3495,10 +3494,10 @@ to this release. Important New Features: -- Support for Python 3 - - See the :doc:`python3` for more information. +- Support for Python 3. + See `Python 3 `_ for more information. - Support for Gevent - - See :doc:`examples/gevent` for more information. + See `Gevent `_ for more information. - Improved connection pooling. See `PYTHON-287 `_. @@ -4104,7 +4103,7 @@ Other changes: - clean up all cases where :class:`~pymongo.errors.ConnectionFailure` is raised. - simplification of connection pooling - makes driver ~2x faster for - simple benchmarks. see :ref:`connection-pooling` for more information. + simple benchmarks. see `connection pooling `_ for more information. - DEPRECATED ``pool_size``, ``auto_start_request`` and ``timeout`` parameters to :class:`~pymongo.connection.Connection`. DEPRECATED :meth:`~pymongo.connection.Connection.start_request`. @@ -4171,7 +4170,7 @@ Changes in Version 1.2 (2009/12/09) get around some issues with queries on fields named ``query`` - enforce 4MB document limit on the client side - added :meth:`~pymongo.collection.Collection.map_reduce` helper - see - :doc:`example ` + `Aggregation `_ - added :meth:`~pymongo.cursor.Cursor.distinct` method on :class:`~pymongo.cursor.Cursor` instances to allow distinct with queries diff --git a/doc/common-issues.rst b/doc/common-issues.rst deleted file mode 100644 index b300bac784..0000000000 --- a/doc/common-issues.rst +++ /dev/null @@ -1,96 +0,0 @@ -Frequently Encountered Issues -============================= - -Also see the :ref:`TLSErrors` section. - -Server reports wire version X, PyMongo requires Y -------------------------------------------------- - -When one attempts to connect to a <=3.6 version server, PyMongo will throw the following error:: - - >>> client.admin.command('ping') - ... - pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 6, but this version of PyMongo requires at least 7 (MongoDB 4.0). - -This is caused by the driver being too new for the server it is being run against. -To resolve this issue either upgrade your database to version >= 4.0 or downgrade to an early version of PyMongo which supports MongoDB < 4.0. - - -'Cursor' object has no attribute '_Cursor__killed' --------------------------------------------------- - -On versions of PyMongo <3.9, when supplying invalid arguments the constructor of Cursor, -there will be a TypeError raised, and an AttributeError printed to ``stderr``. The AttributeError is not relevant, -instead look at the TypeError for debugging information:: - - >>> coll.find(wrong=1) - Exception ignored in: - ... - AttributeError: 'Cursor' object has no attribute '_Cursor__killed' - ... - TypeError: __init__() got an unexpected keyword argument 'wrong' - -To fix this, make sure that you are supplying the correct keyword arguments. -In addition, you can also upgrade to PyMongo >=3.9, which will remove the spurious error. - - -MongoClient fails ConfigurationError ------------------------------------- - -This is a common issue stemming from using incorrect keyword argument names. - - >>> client = MongoClient(wrong=1) - ... - pymongo.errors.ConfigurationError: Unknown option wrong - -To fix this, check your spelling and make sure that the keyword argument you are specifying exists. - - -DeprecationWarning: count is deprecated ---------------------------------------- - -PyMongo no longer supports :meth:`pymongo.cursor.count`. -Instead, use :meth:`pymongo.collection.count_documents`:: - - >>> client = MongoClient() - >>> d = datetime.datetime(2009, 11, 12, 12) - >>> list(client.db.coll.find({"date": {"$lt": d}}, limit=2)) - [{'_id': ObjectId('6247b058cebb8b179b7039f8'), 'date': datetime.datetime(1, 1, 1, 0, 0)}, {'_id': ObjectId('6247b059cebb8b179b7039f9'), 'date': datetime.datetime(1, 1, 1, 0, 0)}] - >>> client.db.coll.count_documents({"date": {"$lt": d}}, limit=2) - 2 - -Note that this is NOT the same as ``Cursor.count_documents`` (which does not exist), -this is a method of the Collection class, so you must call it on a collection object -or you will receive the following error:: - - >>> Cursor(MongoClient().db.coll).count() - Traceback (most recent call last): - File "", line 1, in - AttributeError: 'Cursor' object has no attribute 'count' - >>> - -Timeout when accessing MongoDB from PyMongo with tunneling ----------------------------------------------------------- - -When attempting to connect to a replica set MongoDB instance over an SSH tunnel you -will receive the following error:: - - File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1560, in count - return self._count(cmd, collation, session) - File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1504, in _count - with self._socket_for_reads() as (connection, slave_ok): - File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py", line 17, in __enter__ - return self.gen.next() - File "/Library/Python/2.7/site-packages/pymongo/mongo_client.py", line 982, in _socket_for_reads - server = topology.select_server(read_preference) - File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 224, in select_server - address)) - File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 183, in select_servers - selector, server_timeout, address) - File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 199, in _select_servers_loop - self._error_message(selector)) - pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out - -This is due to the fact that PyMongo discovers replica set members using the response from the isMaster command which -then contains the address and ports of the other members. However, these addresses and ports will not be accessible through the SSH tunnel. Thus, this behavior is unsupported. -You can, however, connect directly to a single MongoDB node using the directConnection=True option with SSH tunneling. diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst deleted file mode 100644 index 9721877d4d..0000000000 --- a/doc/compatibility-policy.rst +++ /dev/null @@ -1,62 +0,0 @@ -Compatibility Policy -==================== - -Semantic Versioning -------------------- - -PyMongo's version numbers follow `semantic versioning`_: each version number -is structured "major.minor.patch". Patch releases fix bugs, minor releases -add features (and may fix bugs), and major releases include API changes that -break backwards compatibility (and may add features and fix bugs). - -Deprecation ------------ - -Before we remove a feature in a major release, PyMongo's maintainers make an -effort to release at least one minor version that *deprecates* it. We add -"**DEPRECATED**" to the feature's documentation, and update the code to raise a -`DeprecationWarning`_. You can ensure your code is future-proof by running -your code with the latest PyMongo release and looking for DeprecationWarnings. - -The interpreter silences DeprecationWarnings by default. For example, the -following code uses the deprecated ``insert`` method but does not raise any -warning: - -.. code-block:: python - - # "insert.py" (with PyMongo 3.X) - from pymongo import MongoClient - - client = MongoClient() - client.test.test.insert({}) - -To print deprecation warnings to stderr, run python with "-Wd":: - - $ python3 -Wd insert.py - insert.py:4: DeprecationWarning: insert is deprecated. Use insert_one or insert_many instead. - client.test.test.insert({}) - -You can turn warnings into exceptions with "python -We":: - - $ python3 -We insert.py - Traceback (most recent call last): - File "insert.py", line 4, in - client.test.test.insert({}) - File "/home/durin/work/mongo-python-driver/pymongo/collection.py", line 2906, in insert - "instead.", DeprecationWarning, stacklevel=2) - DeprecationWarning: insert is deprecated. Use insert_one or insert_many instead. - -If your own code's test suite passes with "python -We" then it uses no -deprecated PyMongo features. - -.. seealso:: The Python documentation on `the warnings module`_, - and `the -W command line option`_. - -.. _semantic versioning: https://semver.org/ - -.. _DeprecationWarning: - https://docs.python.org/3/library/exceptions.html#DeprecationWarning - -.. _the warnings module: https://docs.python.org/3/library/warnings.html - -.. _the -W command line option: https://docs.python.org/3/using/cmdline.html#cmdoption-W diff --git a/doc/developer/index.rst b/doc/developer/index.rst deleted file mode 100644 index 2ce1e0536c..0000000000 --- a/doc/developer/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Developer Guide -=============== - -Technical guide for contributors to PyMongo. - -.. toctree:: - :maxdepth: 1 - - periodic_executor diff --git a/doc/developer/periodic_executor.rst b/doc/developer/periodic_executor.rst deleted file mode 100644 index 67eaa89f10..0000000000 --- a/doc/developer/periodic_executor.rst +++ /dev/null @@ -1,113 +0,0 @@ -Periodic Executors -================== - -.. currentmodule:: pymongo - -PyMongo implements a :class:`~periodic_executor.PeriodicExecutor` for two -purposes: as the background thread for :class:`~monitor.Monitor`, and to -regularly check if there are ``OP_KILL_CURSORS`` messages that must be sent to the server. - -Killing Cursors ---------------- - -An incompletely iterated :class:`~cursor.Cursor` on the client represents an -open cursor object on the server. In code like this, we lose a reference to -the cursor before finishing iteration:: - - for doc in collection.find(): - raise Exception() - -We try to send an ``OP_KILL_CURSORS`` to the server to tell it to clean up the -server-side cursor. But we must not take any locks directly from the cursor's -destructor (see `PYTHON-799`_), so we cannot safely use the PyMongo data -structures required to send a message. The solution is to add the cursor's id -to an array on the :class:`~mongo_client.MongoClient` without taking any locks. - -Each client has a :class:`~periodic_executor.PeriodicExecutor` devoted to -checking the array for cursor ids. Any it sees are the result of cursors that -were freed while the server-side cursor was still open. The executor can safely -take the locks it needs in order to send the ``OP_KILL_CURSORS`` message. - -.. _PYTHON-799: https://jira.mongodb.org/browse/PYTHON-799 - -Stopping Executors ------------------- - -Just as :class:`~cursor.Cursor` must not take any locks from its destructor, -neither can :class:`~mongo_client.MongoClient` and :class:`~topology.Topology`. -Thus, although the client calls :meth:`close` on its kill-cursors thread, and -the topology calls :meth:`close` on all its monitor threads, the :meth:`close` -method cannot actually call :meth:`wake` on the executor, since :meth:`wake` -takes a lock. - -Instead, executors wake periodically to check if ``self.close`` is set, -and if so they exit. - -A thread can log spurious errors if it wakes late in the Python interpreter's -shutdown sequence, so we try to join threads before then. Each periodic -executor (either a monitor or a kill-cursors thread) adds a weakref to itself -to a set called ``_EXECUTORS``, in the ``periodic_executor`` module. - -An `exit handler`_ runs on shutdown and tells all executors to stop, then -tries (with a short timeout) to join all executor threads. - -.. _exit handler: https://docs.python.org/2/library/atexit.html - -Monitoring ----------- - -For each server in the topology, :class:`~topology.Topology` uses a periodic -executor to launch a monitor thread. This thread must not prevent the topology -from being freed, so it weakrefs the topology. Furthermore, it uses a weakref -callback to terminate itself soon after the topology is freed. - -Solid lines represent strong references, dashed lines weak ones: - -.. generated with graphviz: "dot -Tpng periodic-executor-refs.dot > periodic-executor-refs.png" - -.. image:: ../static/periodic-executor-refs.png - -See `Stopping Executors`_ above for an explanation of the ``_EXECUTORS`` set. - -It is a requirement of the `Server Discovery And Monitoring Spec`_ that a -sleeping monitor can be awakened early. Aside from infrequent wakeups to do -their appointed chores, and occasional interruptions, periodic executors also -wake periodically to check if they should terminate. - -Our first implementation of this idea was the obvious one: use the Python -standard library's threading.Condition.wait with a timeout. Another thread -wakes the executor early by signaling the condition variable. - -A topology cannot signal the condition variable to tell the executor to -terminate, because it would risk a deadlock in the garbage collector: no -destructor or weakref callback can take a lock to signal the condition variable -(see `PYTHON-863`_); thus the only way for a dying object to terminate a -periodic executor is to set its "stopped" flag and let the executor see the -flag next time it wakes. - -We erred on the side of prompt cleanup, and set the check interval at 100ms. We -assumed that checking a flag and going back to sleep 10 times a second was -cheap on modern machines. - -Starting in Python 3.2, the builtin C implementation of lock.acquire takes a -timeout parameter, so Python 3.2+ Condition variables sleep simply by calling -lock.acquire; they are implemented as efficiently as expected. - -But in Python 2, lock.acquire has no timeout. To wait with a timeout, a Python -2 condition variable sleeps a millisecond, tries to acquire the lock, sleeps -twice as long, and tries again. This exponential backoff reaches a maximum -sleep time of 50ms. - -If PyMongo calls the condition variable's "wait" method with a short timeout, -the exponential backoff is restarted frequently. Overall, the condition variable -is not waking a few times a second, but hundreds of times. (See `PYTHON-983`_.) - -Thus the current design of periodic executors is surprisingly simple: they -do a simple ``time.sleep`` for a half-second, check if it is time to wake or -terminate, and sleep again. - -.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check - -.. _PYTHON-863: https://jira.mongodb.org/browse/PYTHON-863 - -.. _PYTHON-983: https://jira.mongodb.org/browse/PYTHON-983 diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst deleted file mode 100644 index e7e3df6ce1..0000000000 --- a/doc/examples/aggregation.rst +++ /dev/null @@ -1,90 +0,0 @@ -Aggregation Examples -==================== - -There are several methods of performing aggregations in MongoDB. These -examples cover the new aggregation framework, using map reduce and using the -group method. - -.. testsetup:: - - from pymongo import MongoClient - - client = MongoClient() - client.drop_database("aggregation_example") - -Setup ------ -To start, we'll insert some example data which we can perform -aggregations on: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> db = MongoClient().aggregation_example - >>> result = db.things.insert_many( - ... [ - ... {"x": 1, "tags": ["dog", "cat"]}, - ... {"x": 2, "tags": ["cat"]}, - ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, - ... {"x": 3, "tags": []}, - ... ] - ... ) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] - -.. _aggregate-examples: - -Aggregation Framework ---------------------- - -This example shows how to use the -:meth:`~pymongo.collection.Collection.aggregate` method to use the aggregation -framework. We'll perform a simple aggregation to count the number of -occurrences for each tag in the ``tags`` array, across the entire collection. -To achieve this we need to pass in three operations to the pipeline. -First, we need to unwind the ``tags`` array, then group by the tags and -sum them up, finally we sort by count. - -Python dictionaries prior to 3.7 don't maintain order. You should use :class:`~bson.son.SON` -or :class:`collections.OrderedDict` where explicit ordering is required for an older Python version -eg "$sort": - -.. note:: - - aggregate requires server version **>= 2.1.0**. - -.. doctest:: - - >>> from bson.son import SON - >>> pipeline = [ - ... {"$unwind": "$tags"}, - ... {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - ... {"$sort": SON([("count", -1), ("_id", -1)])}, - ... ] - >>> import pprint - >>> pprint.pprint(list(db.things.aggregate(pipeline))) - [{'_id': 'cat', 'count': 3}, - {'_id': 'dog', 'count': 2}, - {'_id': 'mouse', 'count': 1}] - -To run an explain plan for this aggregation use -`PyMongoExplain `_, -a companion library for PyMongo. It allows you to explain any CRUD operation -by providing a few convenience classes:: - - >>> from pymongoexplain import ExplainableCollection - >>> ExplainableCollection(collection).aggregate(pipeline) - {'ok': 1.0, 'queryPlanner': [...]} - -Or, use the :meth:`~pymongo.database.Database.command` method:: - - >>> db.command('aggregate', 'things', pipeline=pipeline, explain=True) - {'ok': 1.0, 'stages': [...]} - -As well as simple aggregations the aggregation framework provides projection -capabilities to reshape the returned data. Using projections and aggregation, -you can add computed fields, create new virtual sub-objects, and extract -sub-fields into the top-level of results. - -.. seealso:: The full documentation for MongoDB's `aggregation framework - `_ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst deleted file mode 100644 index 3f1137969d..0000000000 --- a/doc/examples/authentication.rst +++ /dev/null @@ -1,528 +0,0 @@ -Authentication Examples -======================= - -MongoDB supports several different authentication mechanisms. These examples -cover all authentication methods currently supported by PyMongo, documenting -Python module and MongoDB version dependencies. - -.. _percent escaped: - -Percent-Escaping Username and Password --------------------------------------- - -Username and password must be percent-escaped with -:py:func:`urllib.parse.quote_plus`, to be used in a MongoDB URI. For example:: - - >>> from pymongo import MongoClient - >>> import urllib.parse - >>> username = urllib.parse.quote_plus('user') - >>> username - 'user' - >>> password = urllib.parse.quote_plus('pass/word') - >>> password - 'pass%2Fword' - >>> MongoClient('mongodb://%s:%s@127.0.0.1' % (username, password)) - ... - -.. _scram_sha_256: - -SCRAM-SHA-256 (RFC 7677) ------------------------- -.. versionadded:: 3.7 - -SCRAM-SHA-256 is the default authentication mechanism supported by a cluster -configured for authentication with MongoDB 4.0 or later. Authentication -requires a username, a password, and a database name. The default database -name is "admin", this can be overridden with the ``authSource`` option. -Credentials can be specified as arguments to -:class:`~pymongo.mongo_client.MongoClient`:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... username='user', - ... password='password', - ... authSource='the_database', - ... authMechanism='SCRAM-SHA-256') - -Or through the MongoDB URI:: - - >>> uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=SCRAM-SHA-256" - >>> client = MongoClient(uri) - -SCRAM-SHA-1 (RFC 5802) ----------------------- -.. versionadded:: 2.8 - -SCRAM-SHA-1 is the default authentication mechanism supported by a cluster -configured for authentication with MongoDB 3.0 or later. Authentication -requires a username, a password, and a database name. The default database -name is "admin", this can be overridden with the ``authSource`` option. -Credentials can be specified as arguments to -:class:`~pymongo.mongo_client.MongoClient`:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... username='user', - ... password='password', - ... authSource='the_database', - ... authMechanism='SCRAM-SHA-1') - -Or through the MongoDB URI:: - - >>> uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=SCRAM-SHA-1" - >>> client = MongoClient(uri) - -For best performance on Python versions older than 2.7.8 install `backports.pbkdf2`_. - -.. _backports.pbkdf2: https://pypi.python.org/pypi/backports.pbkdf2/ - -Default Authentication Mechanism --------------------------------- - -If no mechanism is specified, PyMongo automatically negotiates the mechanism to use (SCRAM-SHA-1 -or SCRAM-SHA-256) with the MongoDB server. - -Default Database and "authSource" ---------------------------------- - -You can specify both a default database and the authentication database in the -URI:: - - >>> uri = "mongodb://user:password@example.com/default_db?authSource=admin" - >>> client = MongoClient(uri) - -PyMongo will authenticate on the "admin" database, but the default database -will be "default_db":: - - >>> # get_database with no "name" argument chooses the DB from the URI - >>> db = MongoClient(uri).get_database() - >>> print(db.name) - 'default_db' - -.. _mongodb_x509: - -MONGODB-X509 ------------- -.. versionadded:: 2.6 - -The MONGODB-X509 mechanism authenticates via the X.509 certificate presented -by the driver during TLS/SSL negotiation. This authentication method requires -the use of TLS/SSL connections with certificate validation:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... authMechanism="MONGODB-X509", - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... tlsCAFile='/path/to/ca.pem') - -MONGODB-X509 authenticates against the $external virtual database, so you -do not have to specify a database in the URI:: - - >>> uri = "mongodb://example.com/?authMechanism=MONGODB-X509" - >>> client = MongoClient(uri, - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... tlsCAFile='/path/to/ca.pem') - >>> - -.. _gssapi: - -GSSAPI (Kerberos) ------------------ -.. versionadded:: 2.5 - -GSSAPI (Kerberos) authentication is available in the Enterprise Edition of -MongoDB. - -Unix -~~~~ - -To authenticate using GSSAPI you must first install the python `kerberos`_ or -`pykerberos`_ module using pip. Make sure you run kinit before -using the following authentication methods:: - - $ kinit mongodbuser@EXAMPLE.COM - mongodbuser@EXAMPLE.COM's Password: - $ klist - Credentials cache: FILE:/tmp/krb5cc_1000 - Principal: mongodbuser@EXAMPLE.COM - - Issued Expires Principal - Feb 9 13:48:51 2013 Feb 9 23:48:51 2013 krbtgt/EXAMPLE.COM@EXAMPLE.COM - -Now authenticate using the MongoDB URI. GSSAPI authenticates against the -$external virtual database so you do not have to specify a database in the -URI:: - - >>> # Note: the kerberos principal must be url encoded. - >>> from pymongo import MongoClient - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@mongo-server.example.com/?authMechanism=GSSAPI" - >>> client = MongoClient(uri) - >>> - -The default service name used by MongoDB and PyMongo is ``mongodb``. You can -specify a custom service name with the ``authMechanismProperties`` option:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@mongo-server.example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:myservicename" - >>> client = MongoClient(uri) - -Windows (SSPI) -~~~~~~~~~~~~~~ -.. versionadded:: 3.3 - -First install the `winkerberos`_ module. Unlike authentication on Unix kinit is -not used. If the user to authenticate is different from the user that owns the -application process provide a password to authenticate:: - - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM:mongodbuserpassword@example.com/?authMechanism=GSSAPI" - -Two extra ``authMechanismProperties`` are supported on Windows platforms: - -- CANONICALIZE_HOST_NAME - Uses the fully qualified domain name (FQDN) of the - MongoDB host for the server principal (GSSAPI libraries on Unix do this by - default):: - - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI&authMechanismProperties=CANONICALIZE_HOST_NAME:true" - -- SERVICE_REALM - This is used when the user's realm is different from the service's realm:: - - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_REALM:otherrealm" - - -.. _kerberos: https://pypi.python.org/pypi/kerberos -.. _pykerberos: https://pypi.python.org/pypi/pykerberos -.. _winkerberos: https://pypi.python.org/pypi/winkerberos/ - -.. _sasl_plain: - -SASL PLAIN (RFC 4616) ---------------------- -.. versionadded:: 2.6 - -MongoDB Enterprise Edition version 2.6 and newer support the SASL PLAIN -authentication mechanism, initially intended for delegating authentication -to an LDAP server. These examples use the $external virtual database for LDAP support:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" - >>> client = MongoClient(uri) - >>> - -SASL PLAIN is a clear-text authentication mechanism. We **strongly** recommend -that you connect to MongoDB using TLS/SSL with certificate validation when -using the SASL PLAIN mechanism:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" - >>> client = MongoClient(uri, - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... tlsCAFile='/path/to/ca.pem') - >>> - -.. _MONGODB-AWS: - -MONGODB-AWS ------------ -.. versionadded:: 3.11 - -The MONGODB-AWS authentication mechanism is available in MongoDB 4.4+ and -requires extra pymongo dependencies. To use it, install pymongo with the -``aws`` extra:: - - $ python -m pip install 'pymongo[aws]' - -The MONGODB-AWS mechanism authenticates using AWS IAM credentials (an access -key ID and a secret access key), `temporary AWS IAM credentials`_ obtained -from an `AWS Security Token Service (STS)`_ `Assume Role`_ request, -AWS Lambda `environment variables`_, or temporary AWS IAM credentials assigned -to an `EC2 instance`_ or ECS task. The use of temporary credentials, in -addition to an access key ID and a secret access key, also requires a -security (or session) token. - -Credentials can be configured through the MongoDB URI, environment variables, -or the local EC2 or ECS endpoint. The order in which the client searches for -`credentials`_ is the same as the one used by the AWS ``boto3`` library -when using ``pymongo_auth_aws>=1.1.0``. - -Because we are now using ``boto3`` to handle credentials, the order and -locations of credentials are slightly different from before. Particularly, -if you have a shared AWS credentials or config file, -then those credentials will be used by default if AWS auth environment -variables are not set. To override this behavior, set -``AWS_SHARED_CREDENTIALS_FILE=""`` in your shell or add -``os.environ["AWS_SHARED_CREDENTIALS_FILE"] = ""`` to your script or -application. Alternatively, you can create an AWS profile specifically for -your MongoDB credentials and set ``AWS_PROFILE`` to that profile name. - -MONGODB-AWS authenticates against the "$external" virtual database, so none of -the URIs in this section need to include the ``authSource`` URI option. - -.. _credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - -AWS IAM credentials -~~~~~~~~~~~~~~~~~~~ - -Applications can authenticate using AWS IAM credentials by providing a valid -access key id and secret access key pair as the username and password, -respectively, in the MongoDB URI. A sample URI would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: The access_key_id and secret_access_key passed into the URI MUST - be `percent escaped`_. - -AssumeRole -~~~~~~~~~~ - -Applications can authenticate using temporary credentials returned from an -assume role request. These temporary credentials consist of an access key -ID, a secret access key, and a security token passed into the URI. -A sample URI would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb+srv://:@example.mongodb.net/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" - >>> client = MongoClient(uri) - -.. note:: The access_key_id, secret_access_key, and session_token passed into - the URI MUST be `percent escaped`_. - - -AWS Lambda (Environment Variables) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When the username and password are not provided and the MONGODB-AWS mechanism -is set, the client will fallback to using the `environment variables`_ -``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` -for the access key ID, secret access key, and session token, respectively:: - - $ export AWS_ACCESS_KEY_ID= - $ export AWS_SECRET_ACCESS_KEY= - $ export AWS_SESSION_TOKEN= - $ python - >>> from pymongo import MongoClient - >>> uri = "mongodb+srv://example.mongodb.net/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: No username, password, or session token is passed into the URI. - PyMongo will use credentials set via the environment variables. - These environment variables MUST NOT be `percent escaped`_. - - -.. _EKS Clusters: - -EKS Clusters -~~~~~~~~~~~~ - -Applications using the `Authenticating users for your cluster from an OpenID Connect identity provider `_ capability on EKS can now -use the provided credentials, by giving the associated IAM User -`sts:AssumeRoleWithWebIdentity `_ -permission. - -When the username and password are not provided, the MONGODB-AWS mechanism -is set, and ``AWS_WEB_IDENTITY_TOKEN_FILE``, ``AWS_ROLE_ARN``, and -optional ``AWS_ROLE_SESSION_NAME`` are available, the driver will use -an ``AssumeRoleWithWebIdentity`` call to retrieve temporary credentials. -The application must be using ``pymongo_auth_aws`` >= 1.1.0 for EKS support. - -ECS Container -~~~~~~~~~~~~~ - -Applications can authenticate from an ECS container via temporary -credentials assigned to the machine. A sample URI on an ECS container -would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: No username, password, or session token is passed into the URI. - PyMongo will query the ECS container endpoint to obtain these - credentials. - -EC2 Instance -~~~~~~~~~~~~ - -Applications can authenticate from an EC2 instance via temporary -credentials assigned to the machine. A sample URI on an EC2 machine -would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb+srv://example.mongodb.com/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: No username, password, or session token is passed into the URI. - PyMongo will query the EC2 instance endpoint to obtain these - credentials. - -.. _temporary AWS IAM credentials: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html -.. _AWS Security Token Service (STS): https://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html -.. _Assume Role: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html -.. _EC2 instance: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html -.. _environment variables: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime - -MONGODB-OIDC ------------- -.. versionadded:: 4.7 - -The `MONGODB-OIDC authentication mechanism`_ is available in MongoDB 7.0+ on Linux platforms. - -The MONGODB-OIDC mechanism authenticates using an OpenID Connect (OIDC) access token. -The driver supports OIDC for workload identity, defined as an identity you assign to a software workload -(such as an application, service, script, or container) to authenticate and access other services and resources. - -Credentials can be configured through the MongoDB URI or as arguments to -:class:`~pymongo.mongo_client.MongoClient`. - -Built-in Support -~~~~~~~~~~~~~~~~ - -The driver has built-in support for Azure IMDS and GCP IMDS environments. Other environments -are supported with `Custom Callbacks`_. - -Azure IMDS -^^^^^^^^^^ - -For an application running on an Azure VM or otherwise using the `Azure Internal Metadata Service`_, -you can use the built-in support for Azure. If using an Azure managed identity, the "" is -the client ID. If using a service principal to represent an enterprise application, the "" is -the application ID of the service principal. The ```` value is the ``audience`` -`configured on your MongoDB deployment`_. - -.. code-block:: python - - import os - - uri = os.environ["MONGODB_URI"] - - props = {"ENVIRONMENT": "azure", "TOKEN_RESOURCE": ""} - c = MongoClient( - uri, - username="", - authMechanism="MONGODB-OIDC", - authMechanismProperties=props, - ) - c.test.test.insert_one({}) - c.close() - -If the application is running on an Azure VM and only one managed identity is associated with the -VM, ``username`` can be omitted. - -If providing the ``TOKEN_RESOURCE`` as part of a connection string, it can be given as follows. -If the ``TOKEN_RESOURCE`` contains any of the following characters [``,``, ``+``, ``&``], then -it MUST be url-encoded. - -.. code-block:: python - - import os - - uri = f'{os.environ["MONGODB_URI"]}?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:' - c = MongoClient(uri) - c.test.test.insert_one({}) - c.close() - -GCP IMDS -^^^^^^^^ - -For an application running on an GCP VM or otherwise using the `GCP Internal Metadata Service`_, -you can use the built-in support for GCP, where ```` below is the ``audience`` -`configured on your MongoDB deployment`_. - -.. code-block:: python - - import os - - uri = os.environ["MONGODB_URI"] - - props = {"ENVIRONMENT": "gcp", "TOKEN_RESOURCE": ""} - c = MongoClient(uri, authMechanism="MONGODB-OIDC", authMechanismProperties=props) - c.test.test.insert_one({}) - c.close() - -If providing the ``TOKEN_RESOURCE`` as part of a connection string, it can be given as follows. -If the ``TOKEN_RESOURCE`` contains any of the following characters [``,``, ``+``, ``&``], then -it MUST be url-encoded. - -.. code-block:: python - - import os - - uri = f'{os.environ["MONGODB_URI"]}?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:' - c = MongoClient(uri) - c.test.test.insert_one({}) - c.close() - -Custom Callbacks -~~~~~~~~~~~~~~~~ - -For environments that are not directly supported by the driver, you can use :class:`~pymongo.auth_oidc.OIDCCallback`. -Some examples are given below. - -Other Azure Environments -^^^^^^^^^^^^^^^^^^^^^^^^ - -For applications running on Azure Functions, App Service Environment (ASE), or -Azure Kubernetes Service (AKS), you can use the `azure-identity package`_ -to fetch the credentials. This example assumes you have set environment variables for -the ``audience`` `configured on your MongoDB deployment`_, and for the client id of the Azure -managed identity. - -.. code-block:: python - - import os - from azure.identity import DefaultAzureCredential - from pymongo import MongoClient - from pymongo.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult - - audience = os.environ["AZURE_AUDIENCE"] - client_id = os.environ["AZURE_IDENTITY_CLIENT_ID"] - uri = os.environ["MONGODB_URI"] - - - class MyCallback(OIDCCallback): - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - credential = DefaultAzureCredential(managed_identity_client_id=client_id) - token = credential.get_token(f"{audience}/.default").token - return OIDCCallbackResult(access_token=token) - - - props = {"OIDC_CALLBACK": MyCallback()} - c = MongoClient(uri, authMechanism="MONGODB-OIDC", authMechanismProperties=props) - c.test.test.insert_one({}) - c.close() - -GCP GKE -^^^^^^^ - -For a Google Kubernetes Engine cluster with a `configured service account`_, the token can be read from the standard -service account token file location. - -.. code-block:: python - - import os - from pymongo.auth_oidc import OIDCCallback, OIDCCallbackContext, OIDCCallbackResult - - - class MyCallback(OIDCCallback): - def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: - with open("/var/run/secrets/kubernetes.io/serviceaccount/token") as fid: - token = fid.read() - return OIDCCallbackResult(access_token=token) - - - uri = os.environ["MONGODB_URI"] - props = {"OIDC_CALLBACK": MyCallback()} - c = MongoClient(uri, authMechanism="MONGODB-OIDC", authMechanismProperties=props) - c.test.test.insert_one({}) - c.close() - -.. _MONGODB-OIDC authentication mechanism: https://www.mongodb.com/docs/manual/core/security-oidc/ -.. _Azure Internal Metadata Service: https://learn.microsoft.com/en-us/azure/virtual-machines/instance-metadata-service -.. _configured on your MongoDB deployment: https://www.mongodb.com/docs/manual/reference/parameters/#mongodb-parameter-param.oidcIdentityProviders -.. _GCP Internal Metadata Service: https://cloud.google.com/compute/docs/metadata/querying-metadata -.. _azure-identity package: https://pypi.org/project/azure-identity/ -.. _configured service account: https://cloud.google.com/kubernetes-engine/docs/how-to/service-accounts diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst deleted file mode 100644 index 3ed8e09645..0000000000 --- a/doc/examples/bulk.rst +++ /dev/null @@ -1,184 +0,0 @@ -Bulk Write Operations -===================== - -.. testsetup:: - - from pymongo import MongoClient - - client = MongoClient() - client.drop_database("bulk_example") - -This tutorial explains how to take advantage of PyMongo's bulk -write operation features. Executing write operations in batches -reduces the number of network round trips, increasing write -throughput. - -Bulk Insert ------------ - -.. versionadded:: 2.6 - -A batch of documents can be inserted by passing a list to the -:meth:`~pymongo.collection.Collection.insert_many` method. PyMongo -will automatically split the batch into smaller sub-batches based on -the maximum message size accepted by MongoDB, supporting very large -bulk insert operations. - -.. doctest:: - - >>> import pymongo - >>> db = pymongo.MongoClient().bulk_example - >>> db.test.insert_many([{"i": i} for i in range(10000)]).inserted_ids - [...] - >>> db.test.count_documents({}) - 10000 - -Mixed Bulk Write Operations ---------------------------- - -.. versionadded:: 2.7 - -PyMongo also supports executing mixed bulk write operations. A batch -of insert, update, and remove operations can be executed together using -the bulk write operations API. - -.. _ordered_bulk: - -Ordered Bulk Write Operations -............................. - -Ordered bulk write operations are batched and sent to the server in the -order provided for serial execution. The return value is an instance of -:class:`~pymongo.results.BulkWriteResult` describing the type and count -of operations performed. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from pprint import pprint - >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne - >>> result = db.test.bulk_write( - ... [ - ... DeleteMany({}), # Remove all documents from the previous example. - ... InsertOne({"_id": 1}), - ... InsertOne({"_id": 2}), - ... InsertOne({"_id": 3}), - ... UpdateOne({"_id": 1}, {"$set": {"foo": "bar"}}), - ... UpdateOne({"_id": 4}, {"$inc": {"j": 1}}, upsert=True), - ... ReplaceOne({"j": 1}, {"j": 2}), - ... ] - ... ) - >>> pprint(result.bulk_api_result) - {'nInserted': 3, - 'nMatched': 2, - 'nModified': 2, - 'nRemoved': 10000, - 'nUpserted': 1, - 'upserted': [{'_id': 4, 'index': 5}], - 'writeConcernErrors': [], - 'writeErrors': []} - -The first write failure that occurs (e.g. duplicate key error) aborts the -remaining operations, and PyMongo raises -:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attribute of -the exception instance provides the execution results up until the failure -occurred and details about the failure - including the operation that caused -the failure. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from pymongo import InsertOne, DeleteOne, ReplaceOne - >>> from pymongo.errors import BulkWriteError - >>> requests = [ - ... ReplaceOne({"j": 2}, {"i": 5}), - ... InsertOne({"_id": 4}), # Violates the unique key constraint on _id. - ... DeleteOne({"i": 5}), - ... ] - >>> try: - ... db.test.bulk_write(requests) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 0, - 'nMatched': 1, - 'nModified': 1, - 'nRemoved': 0, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [{'code': 11000, - 'errmsg': '...E11000...duplicate key error...', - 'index': 1,... - 'op': {'_id': 4}}]} - -.. _unordered_bulk: - -Unordered Bulk Write Operations -............................... - -Unordered bulk write operations are batched and sent to the server in -**arbitrary order** where they may be executed in parallel. Any errors -that occur are reported after all operations are attempted. - -In the next example the first and third operations fail due to the unique -constraint on _id. Since we are doing unordered execution the second -and fourth operations succeed. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> requests = [ - ... InsertOne({"_id": 1}), - ... DeleteOne({"_id": 2}), - ... InsertOne({"_id": 3}), - ... ReplaceOne({"_id": 4}, {"i": 1}), - ... ] - >>> try: - ... db.test.bulk_write(requests, ordered=False) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 0, - 'nMatched': 1, - 'nModified': 1, - 'nRemoved': 1, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [{'code': 11000, - 'errmsg': '...E11000...duplicate key error...', - 'index': 0,... - 'op': {'_id': 1}}, - {'code': 11000, - 'errmsg': '...', - 'index': 2,... - 'op': {'_id': 3}}]} - -Write Concern -............. - -Bulk operations are executed with the -:attr:`~pymongo.collection.Collection.write_concern` of the collection they -are executed against. Write concern errors (e.g. wtimeout) will be reported -after all operations are attempted, regardless of execution order. - -:: - >>> from pymongo import WriteConcern - >>> coll = db.get_collection( - ... 'test', write_concern=WriteConcern(w=3, wtimeout=1)) - >>> try: - ... coll.bulk_write([InsertOne({'a': i}) for i in range(4)]) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 4, - 'nMatched': 0, - 'nModified': 0, - 'nRemoved': 0, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [{'code': 64... - 'errInfo': {'wtimeout': True}, - 'errmsg': 'waiting for replication timed out'}], - 'writeErrors': []} diff --git a/doc/examples/client_bulk.rst b/doc/examples/client_bulk.rst deleted file mode 100644 index ad435fa2e4..0000000000 --- a/doc/examples/client_bulk.rst +++ /dev/null @@ -1,192 +0,0 @@ -Client Bulk Write Operations -============================= - -.. testsetup:: - - from pymongo import MongoClient - - client = MongoClient() - client.drop_database("client_bulk_example") - db = client.client_bulk_example - client.db.drop_collection("test_one") - client.db.drop_collection("test_two") - client.db.drop_collection("test_three") - client.db.drop_collection("test_four") - client.db.drop_collection("test_five") - client.db.drop_collection("test_six") - -The :meth:`~pymongo.mongo_client.MongoClient.bulk_write` -method has been added to :class:`~pymongo.mongo_client.MongoClient` in PyMongo 4.9. -This method enables users to perform batches of write operations **across -multiple namespaces** in a minimized number of round trips, and -to receive detailed results for each operation performed. - -.. note:: This method requires MongoDB server version 8.0+. - -Basic Usage ------------- - -A list of insert, update, and delete operations can be passed into the -:meth:`~pymongo.mongo_client.MongoClient.bulk_write` method. Each request -must include the namespace on which to perform the operation. - -PyMongo will automatically split the given requests into smaller sub-batches based on -the maximum message size accepted by MongoDB, supporting very large bulk write operations. - -The return value is an instance of -:class:`~pymongo.results.ClientBulkWriteResult`. - -.. _summary_client_bulk: - -Summary Results -................. - -By default, the returned :class:`~pymongo.results.ClientBulkWriteResult` instance will contain a -summary of the types of operations performed in the bulk write, along with their respective counts. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - :skipif: server_major_version < 8 - - >>> from pymongo import InsertOne, DeleteOne, UpdateOne - >>> models = [ - ... InsertOne(namespace="db.test_one", document={"_id": 1}), - ... InsertOne(namespace="db.test_two", document={"_id": 2}), - ... DeleteOne(namespace="db.test_one", filter={"_id": 1}), - ... UpdateOne( - ... namespace="db.test_two", - ... filter={"_id": 4}, - ... update={"$inc": {"j": 1}}, - ... upsert=True, - ... ), - ... ] - >>> result = client.bulk_write(models) - >>> result.inserted_count - 2 - >>> result.deleted_count - 1 - >>> result.modified_count - 0 - >>> result.upserted_count - 1 - -.. _verbose_client_bulk: - -Verbose Results -................. - -If the ``verbose_results`` parameter is set to True, the returned :class:`~pymongo.results.ClientBulkWriteResult` -instance will also include detailed results about each successful operation performed as part of the bulk write. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - :skipif: server_major_version < 8 - - >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateMany - >>> models = [ - ... DeleteMany( - ... namespace="db.test_two", filter={} - ... ), # Delete all documents from the previous example - ... InsertOne(namespace="db.test_one", document={"_id": 1}), - ... InsertOne(namespace="db.test_one", document={"_id": 2}), - ... InsertOne(namespace="db.test_two", document={"_id": 3}), - ... UpdateMany(namespace="db.test_one", filter={}, update={"$set": {"foo": "bar"}}), - ... ReplaceOne( - ... namespace="db.test_two", filter={"j": 1}, replacement={"_id": 4}, upsert=True - ... ), - ... ] - >>> result = client.bulk_write(models, verbose_results=True) - >>> result.delete_results - {0: DeleteResult({'ok': 1.0, 'idx': 0, 'n': 2}, ...)} - >>> result.insert_results - {1: InsertOneResult(1, ...), - 2: InsertOneResult(2, ...), - 3: InsertOneResult(3, ...)} - >>> result.update_results - {4: UpdateResult({'ok': 1.0, 'idx': 4, 'n': 2, 'nModified': 2}, ...), - 5: UpdateResult({'ok': 1.0, 'idx': 5, 'n': 1, 'nModified': 0, 'upserted': {'_id': 4}}, ...)} - - -Handling Errors ----------------- - -If any errors occur during the bulk write, a :class:`~pymongo.errors.ClientBulkWriteException` will be raised. -If a server, connection, or network error occurred, the ``error`` field of the exception will contain -that error. - -Individual write errors or write concern errors get recorded in the ``write_errors`` and ``write_concern_errors`` fields of the exception. -The ``partial_result`` field gets populated with the results of any operations that were successfully completed before the exception was raised. - -.. _ordered_client_bulk: - -Ordered Operations -.................... - -In an ordered bulk write (the default), if an individual write fails, no further operations will get executed. -For example, a duplicate key error on the third operation below aborts the remaining two operations. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - :skipif: server_major_version < 8 - - >>> from pymongo import InsertOne, DeleteOne - >>> from pymongo.errors import ClientBulkWriteException - >>> models = [ - ... InsertOne(namespace="db.test_three", document={"_id": 3}), - ... InsertOne(namespace="db.test_four", document={"_id": 4}), - ... InsertOne(namespace="db.test_three", document={"_id": 3}), # Duplicate _id - ... InsertOne(namespace="db.test_four", document={"_id": 5}), - ... DeleteOne(namespace="db.test_three", filter={"_id": 3}), - ... ] - >>> try: - ... client.bulk_write(models) - ... except ClientBulkWriteException as cbwe: - ... exception = cbwe - ... - >>> exception.write_errors - [{'ok': 0.0, - 'idx': 2, - 'code': 11000, - 'errmsg': 'E11000 duplicate key error ... dup key: { _id: 3 }', ... - 'op': {'insert': 0, 'document': {'_id': 3}}}] - >>> exception.partial_result.inserted_count - 2 - >>> exception.partial_result.deleted_count - 0 - -.. _unordered_client_bulk: - -Unordered Operations -..................... - -If the ``ordered`` parameter is set to False, all operations in the bulk write will be attempted, regardless of any individual write errors that occur. -For example, the fourth and fifth write operations below get executed successfully, despite the duplicate key error on the third operation. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - :skipif: server_major_version < 8 - - >>> from pymongo import InsertOne, DeleteOne - >>> from pymongo.errors import ClientBulkWriteException - >>> models = [ - ... InsertOne(namespace="db.test_five", document={"_id": 5}), - ... InsertOne(namespace="db.test_six", document={"_id": 6}), - ... InsertOne(namespace="db.test_five", document={"_id": 5}), # Duplicate _id - ... InsertOne(namespace="db.test_six", document={"_id": 7}), - ... DeleteOne(namespace="db.test_five", filter={"_id": 5}), - ... ] - >>> try: - ... client.bulk_write(models, ordered=False) - ... except ClientBulkWriteException as cbwe: - ... exception = cbwe - ... - >>> exception.write_errors - [{'ok': 0.0, - 'idx': 2, - 'code': 11000, - 'errmsg': 'E11000 duplicate key error ... dup key: { _id: 5 }', ... - 'op': {'insert': 0, 'document': {'_id': 5}}}] - >>> exception.partial_result.inserted_count - 3 - >>> exception.partial_result.deleted_count - 1 diff --git a/doc/examples/collations.rst b/doc/examples/collations.rst deleted file mode 100644 index 45e647d816..0000000000 --- a/doc/examples/collations.rst +++ /dev/null @@ -1,134 +0,0 @@ -Collations -========== - -.. seealso:: The API docs for :mod:`~pymongo.collation`. - -Collations are a new feature in MongoDB version 3.4. They provide a set of rules -to use when comparing strings that comply with the conventions of a particular -language, such as Spanish or German. If no collation is specified, the server -sorts strings based on a binary comparison. Many languages have specific -ordering rules, and collations allow users to build applications that adhere to -language-specific comparison rules. - -In French, for example, the last accent in a given word determines the sorting -order. The correct sorting order for the following four words in French is:: - - cote < côte < coté < côté - -Specifying a French collation allows users to sort string fields using the -French sort order. - -Usage ------ - -Users can specify a collation for a -:ref:`collection`, an -:ref:`index`, or a -:ref:`CRUD command `. - -Collation Parameters: -~~~~~~~~~~~~~~~~~~~~~ - -Collations can be specified with the :class:`~pymongo.collation.Collation` model -or with plain Python dictionaries. The structure is the same:: - - Collation(locale=, - caseLevel=, - caseFirst=, - strength=, - numericOrdering=, - alternate=, - maxVariable=, - backwards=) - -The only required parameter is ``locale``, which the server parses as -an `ICU format locale ID `_. -For example, set ``locale`` to ``en_US`` to represent US English -or ``fr_CA`` to represent Canadian French. - -For a complete description of the available parameters, see the MongoDB `manual -`_. - -.. COMMENT add link for manual entry. - -.. _collation-on-collection: - -Assign a Default Collation to a Collection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following example demonstrates how to create a new collection called -``contacts`` and assign a default collation with the ``fr_CA`` locale. This -operation ensures that all queries that are run against the ``contacts`` -collection use the ``fr_CA`` collation unless another collation is explicitly -specified:: - - from pymongo import MongoClient - from pymongo.collation import Collation - - db = MongoClient().test - collection = db.create_collection('contacts', - collation=Collation(locale='fr_CA')) - -.. _collation-on-index: - -Assign a Default Collation to an Index -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When creating a new index, you can specify a default collation. - -The following example shows how to create an index on the ``name`` -field of the ``contacts`` collection, with the ``unique`` parameter -enabled and a default collation with ``locale`` set to ``fr_CA``:: - - from pymongo import MongoClient - from pymongo.collation import Collation - - contacts = MongoClient().test.contacts - contacts.create_index('name', - unique=True, - collation=Collation(locale='fr_CA')) - -.. _collation-on-operation: - -Specify a Collation for a Query -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Individual queries can specify a collation to use when sorting -results. The following example demonstrates a query that runs on the -``contacts`` collection in database ``test``. It matches on -documents that contain ``New York`` in the ``city`` field, -and sorts on the ``name`` field with the ``fr_CA`` collation:: - - from pymongo import MongoClient - from pymongo.collation import Collation - - collection = MongoClient().test.contacts - docs = collection.find({'city': 'New York'}).sort('name').collation( - Collation(locale='fr_CA')) - -Other Query Types -~~~~~~~~~~~~~~~~~ - -You can use collations to control document matching rules for several different -types of queries. All the various update and delete methods -(:meth:`~pymongo.collection.Collection.update_one`, -:meth:`~pymongo.collection.Collection.update_many`, -:meth:`~pymongo.collection.Collection.delete_one`, etc.) support collation, and -you can create query filters which employ collations to comply with any of the -languages and variants available to the ``locale`` parameter. - -The following example uses a collation with ``strength`` set to -:const:`~pymongo.collation.CollationStrength.SECONDARY`, which considers only -the base character and character accents in string comparisons, but not case -sensitivity, for example. All documents in the ``contacts`` collection with -``jürgen`` (case-insensitive) in the ``first_name`` field are updated:: - - from pymongo import MongoClient - from pymongo.collation import Collation, CollationStrength - - contacts = MongoClient().test.contacts - result = contacts.update_many( - {'first_name': 'jürgen'}, - {'$set': {'verified': 1}}, - collation=Collation(locale='de', - strength=CollationStrength.SECONDARY)) diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst deleted file mode 100644 index c8026ba05f..0000000000 --- a/doc/examples/copydb.rst +++ /dev/null @@ -1,73 +0,0 @@ -Copying a Database -================== - -MongoDB >= 4.2 --------------- - -Starting in MongoDB version 4.2, the server removes the deprecated ``copydb`` command. -As an alternative, users can use ``mongodump`` and ``mongorestore`` (with the ``mongorestore`` -options ``--nsFrom`` and ``--nsTo``). - -For example, to copy the ``test`` database from a local instance running on the -default port 27017 to the ``examples`` database on the same instance, you can: - -#. Use ``mongodump`` to dump the test database to an archive ``mongodump-test-db``:: - - mongodump --archive="mongodump-test-db" --db=test - -#. Use ``mongorestore`` with ``--nsFrom`` and ``--nsTo`` to restore (with database name change) - from the archive:: - - mongorestore --archive="mongodump-test-db" --nsFrom='test.*' --nsTo='examples.*' - -Include additional options as necessary, such as to specify the uri or host, username, -password and authentication database. - -For more info about using ``mongodump`` and ``mongorestore`` see the `Copy a Database`_ example -in the official ``mongodump`` documentation. - -MongoDB <= 4.0 --------------- - -When using MongoDB <= 4.0, it is possible to use the deprecated ``copydb`` command -to copy a database. To copy a database within a single ``mongod`` process, or -between ``mongod`` servers, connect to the target ``mongod`` and use the -:meth:`~pymongo.database.Database.command` method:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('target.example.com') - >>> client.admin.command('copydb', - fromdb='source_db_name', - todb='target_db_name') - -To copy from a different mongod server that is not password-protected:: - - >>> client.admin.command('copydb', - fromdb='source_db_name', - todb='target_db_name', - fromhost='source.example.com') - -If the target server is password-protected, authenticate to the "admin" -database:: - - >>> client = MongoClient('target.example.com', - ... username='administrator', - ... password='pwd') - >>> client.admin.command('copydb', - fromdb='source_db_name', - todb='target_db_name', - fromhost='source.example.com') - -See the :doc:`authentication examples `. - -If the **source** server is password-protected, use the `copyDatabase -function in the mongo shell`_. - -Versions of PyMongo before 3.0 included a ``copy_database`` helper method, -but it has been removed. - -.. _copyDatabase function in the mongo shell: - https://mongodb.com/docs/manual/reference/method/db.copyDatabase/ - -.. _Copy a Database: - https://www.mongodb.com/docs/database-tools/mongodump/mongodump-examples/#copy-and-clone-databases diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst deleted file mode 100644 index acf706deba..0000000000 --- a/doc/examples/custom_type.rst +++ /dev/null @@ -1,436 +0,0 @@ -Custom Type Example -=================== - -This is an example of using a custom type with PyMongo. The example here shows -how to subclass :class:`~bson.codec_options.TypeCodec` to write a type -codec, which is used to populate a :class:`~bson.codec_options.TypeRegistry`. -The type registry can then be used to create a custom-type-aware -:class:`~pymongo.collection.Collection`. Read and write operations -issued against the resulting collection object transparently manipulate -documents as they are saved to or retrieved from MongoDB. - - -Setting Up ----------- - -We'll start by getting a clean database to use for the example: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client = MongoClient() - >>> client.drop_database("custom_type_example") - >>> db = client.custom_type_example - - -Since the purpose of the example is to demonstrate working with custom types, -we'll need a custom data type to use. For this example, we will be working with -the :py:class:`~decimal.Decimal` type from Python's standard library. Since the -BSON library's :class:`~bson.decimal128.Decimal128` type (that implements -the IEEE 754 decimal128 decimal-based floating-point numbering format) is -distinct from Python's built-in :py:class:`~decimal.Decimal` type, attempting -to save an instance of ``Decimal`` with PyMongo, results in an -:exc:`~bson.errors.InvalidDocument` exception. - -.. doctest:: - - >>> from decimal import Decimal - >>> num = Decimal("45.321") - >>> db.test.insert_one({"num": num}) - Traceback (most recent call last): - ... - bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: - - -.. _custom-type-type-codec: - -The :class:`~bson.codec_options.TypeCodec` Class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. versionadded:: 3.8 - -In order to encode a custom type, we must first define a **type codec** for -that type. A type codec describes how an instance of a custom type can be -*transformed* to and/or from one of the types :mod:`~bson` already understands. -Depending on the desired functionality, users must choose from the following -base classes when defining type codecs: - -* :class:`~bson.codec_options.TypeEncoder`: subclass this to define a codec that - encodes a custom Python type to a known BSON type. Users must implement the - ``python_type`` property/attribute and the ``transform_python`` method. -* :class:`~bson.codec_options.TypeDecoder`: subclass this to define a codec that - decodes a specified BSON type into a custom Python type. Users must implement - the ``bson_type`` property/attribute and the ``transform_bson`` method. -* :class:`~bson.codec_options.TypeCodec`: subclass this to define a codec that - can both encode and decode a custom type. Users must implement the - ``python_type`` and ``bson_type`` properties/attributes, as well as the - ``transform_python`` and ``transform_bson`` methods. - - -The type codec for our custom type simply needs to define how a -:py:class:`~decimal.Decimal` instance can be converted into a -:class:`~bson.decimal128.Decimal128` instance and vice-versa. Since we are -interested in both encoding and decoding our custom type, we use the -``TypeCodec`` base class to define our codec: - -.. doctest:: - - >>> from bson.decimal128 import Decimal128 - >>> from bson.codec_options import TypeCodec - >>> class DecimalCodec(TypeCodec): - ... python_type = Decimal # the Python type acted upon by this type codec - ... bson_type = Decimal128 # the BSON type acted upon by this type codec - ... def transform_python(self, value): - ... """Function that transforms a custom type value into a type - ... that BSON can encode.""" - ... return Decimal128(value) - ... def transform_bson(self, value): - ... """Function that transforms a vanilla BSON type value into our - ... custom type.""" - ... return value.to_decimal() - ... - >>> decimal_codec = DecimalCodec() - - -.. _custom-type-type-registry: - -The :class:`~bson.codec_options.TypeRegistry` Class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. versionadded:: 3.8 - -Before we can begin encoding and decoding our custom type objects, we must -first inform PyMongo about the corresponding codec. This is done by creating -a :class:`~bson.codec_options.TypeRegistry` instance: - -.. doctest:: - - >>> from bson.codec_options import TypeRegistry - >>> type_registry = TypeRegistry([decimal_codec]) - - -Note that type registries can be instantiated with any number of type codecs. -Once instantiated, registries are immutable and the only way to add codecs -to a registry is to create a new one. - - -Putting It Together -------------------- - -Finally, we can define a :class:`~bson.codec_options.CodecOptions` instance -with our ``type_registry`` and use it to get a -:class:`~pymongo.collection.Collection` object that understands the -:py:class:`~decimal.Decimal` data type: - -.. doctest:: - - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection("test", codec_options=codec_options) - - -Now, we can seamlessly encode and decode instances of -:py:class:`~decimal.Decimal`: - -.. doctest:: - - >>> collection.insert_one({"num": Decimal("45.321")}) - InsertOneResult(ObjectId('...'), acknowledged=True) - >>> mydoc = collection.find_one() - >>> import pprint - >>> pprint.pprint(mydoc) - {'_id': ObjectId('...'), 'num': Decimal('45.321')} - - -We can see what's actually being saved to the database by creating a fresh -collection object without the customized codec options and using that to query -MongoDB: - -.. doctest:: - - >>> vanilla_collection = db.get_collection("test") - >>> pprint.pprint(vanilla_collection.find_one()) - {'_id': ObjectId('...'), 'num': Decimal128('45.321')} - - -Encoding Subtypes -^^^^^^^^^^^^^^^^^ - -Consider the situation where, in addition to encoding -:py:class:`~decimal.Decimal`, we also need to encode a type that subclasses -``Decimal``. PyMongo does this automatically for types that inherit from -Python types that are BSON-encodable by default, but the type codec system -described above does not offer the same flexibility. - -Consider this subtype of ``Decimal`` that has a method to return its value as -an integer: - -.. doctest:: - - >>> class DecimalInt(Decimal): - ... def my_method(self): - ... """Method implementing some custom logic.""" - ... return int(self) - ... - -If we try to save an instance of this type without first registering a type -codec for it, we get an error: - -.. doctest:: - - >>> collection.insert_one({"num": DecimalInt("45.321")}) - Traceback (most recent call last): - ... - bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: - -In order to proceed further, we must define a type codec for ``DecimalInt``. -This is trivial to do since the same transformation as the one used for -``Decimal`` is adequate for encoding ``DecimalInt`` as well: - -.. doctest:: - - >>> class DecimalIntCodec(DecimalCodec): - ... @property - ... def python_type(self): - ... """The Python type acted upon by this type codec.""" - ... return DecimalInt - ... - >>> decimalint_codec = DecimalIntCodec() - - -.. note:: - - No attempt is made to modify decoding behavior because without additional - information, it is impossible to discern which incoming - :class:`~bson.decimal128.Decimal128` value needs to be decoded as ``Decimal`` - and which needs to be decoded as ``DecimalInt``. This example only considers - the situation where a user wants to *encode* documents containing either - of these types. - -After creating a new codec options object and using it to get a collection -object, we can seamlessly encode instances of ``DecimalInt``: - -.. doctest:: - - >>> type_registry = TypeRegistry([decimal_codec, decimalint_codec]) - >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection("test", codec_options=codec_options) - >>> collection.drop() - >>> collection.insert_one({"num": DecimalInt("45.321")}) - InsertOneResult(ObjectId('...'), acknowledged=True) - >>> mydoc = collection.find_one() - >>> pprint.pprint(mydoc) - {'_id': ObjectId('...'), 'num': Decimal('45.321')} - -Note that the ``transform_bson`` method of the base codec class results in -these values being decoded as ``Decimal`` (and not ``DecimalInt``). - - -.. _decoding-binary-types: - -Decoding :class:`~bson.binary.Binary` Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The decoding treatment of :class:`~bson.binary.Binary` types having -``subtype = 0`` by the :mod:`bson` module varies slightly depending on the -version of the Python runtime in use. This must be taken into account while -writing a ``TypeDecoder`` that modifies how this datatype is decoded. - -On Python 3.x, :class:`~bson.binary.Binary` data (``subtype = 0``) is decoded -as a ``bytes`` instance: - -.. code-block:: pycon - - >>> # On Python 3.x. - >>> from bson.binary import Binary - >>> newcoll = db.get_collection("new") - >>> newcoll.insert_one({"_id": 1, "data": Binary(b"123", subtype=0)}) - >>> doc = newcoll.find_one() - >>> type(doc["data"]) - bytes - - -On Python 2.7.x, the same data is decoded as a :class:`~bson.binary.Binary` -instance: - -.. code-block:: pycon - - >>> # On Python 2.7.x - >>> newcoll = db.get_collection("new") - >>> doc = newcoll.find_one() - >>> type(doc["data"]) - bson.binary.Binary - - -As a consequence of this disparity, users must set the ``bson_type`` attribute -on their :class:`~bson.codec_options.TypeDecoder` classes differently, -depending on the python version in use. - - -.. note:: - - For codebases requiring compatibility with both Python 2 and 3, type - decoders will have to be registered for both possible ``bson_type`` values. - - -.. _fallback-encoder-callable: - -The ``fallback_encoder`` Callable ---------------------------------- - -.. versionadded:: 3.8 - - -In addition to type codecs, users can also register a callable to encode types -that BSON doesn't recognize and for which no type codec has been registered. -This callable is the **fallback encoder** and like the ``transform_python`` -method, it accepts an unencodable value as a parameter and returns a -BSON-encodable value. The following fallback encoder encodes python's -:py:class:`~decimal.Decimal` type to a :class:`~bson.decimal128.Decimal128`: - -.. doctest:: - - >>> def fallback_encoder(value): - ... if isinstance(value, Decimal): - ... return Decimal128(value) - ... return value - ... - -After declaring the callback, we must create a type registry and codec options -with this fallback encoder before it can be used for initializing a collection: - -.. doctest:: - - >>> type_registry = TypeRegistry(fallback_encoder=fallback_encoder) - >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection("test", codec_options=codec_options) - >>> collection.drop() - -We can now seamlessly encode instances of :py:class:`~decimal.Decimal`: - -.. doctest:: - - >>> collection.insert_one({"num": Decimal("45.321")}) - InsertOneResult(ObjectId('...'), acknowledged=True) - >>> mydoc = collection.find_one() - >>> pprint.pprint(mydoc) - {'_id': ObjectId('...'), 'num': Decimal128('45.321')} - - -.. note:: - - Fallback encoders are invoked *after* attempts to encode the given value - with standard BSON encoders and any configured type encoders have failed. - Therefore, in a type registry configured with a type encoder and fallback - encoder that both target the same custom type, the behavior specified in - the type encoder will prevail. - - -Because fallback encoders don't need to declare the types that they encode -beforehand, they can be used to support interesting use-cases that cannot be -serviced by ``TypeEncoder``. One such use-case is described in the next -section. - - -Encoding Unknown Types -^^^^^^^^^^^^^^^^^^^^^^ - -In this example, we demonstrate how a fallback encoder can be used to save -arbitrary objects to the database. We will use the the standard library's -:py:mod:`pickle` module to serialize the unknown types and so naturally, this -approach only works for types that are picklable. - -We start by defining some arbitrary custom types: - -.. code-block:: python - - class MyStringType(object): - def __init__(self, value): - self.__value = value - - def __repr__(self): - return "MyStringType('%s')" % (self.__value,) - - - class MyNumberType(object): - def __init__(self, value): - self.__value = value - - def __repr__(self): - return "MyNumberType(%s)" % (self.__value,) - -We also define a fallback encoder that pickles whatever objects it receives -and returns them as :class:`~bson.binary.Binary` instances with a custom -subtype. The custom subtype, in turn, allows us to write a TypeDecoder that -identifies pickled artifacts upon retrieval and transparently decodes them -back into Python objects: - -.. code-block:: python - - import pickle - from bson.binary import Binary, USER_DEFINED_SUBTYPE - - - def fallback_pickle_encoder(value): - return Binary(pickle.dumps(value), USER_DEFINED_SUBTYPE) - - - class PickledBinaryDecoder(TypeDecoder): - bson_type = Binary - - def transform_bson(self, value): - if value.subtype == USER_DEFINED_SUBTYPE: - return pickle.loads(value) - return value - - -.. note:: - - The above example is written assuming the use of Python 3. If you are using - Python 2, ``bson_type`` must be set to ``Binary``. See the - :ref:`decoding-binary-types` section for a detailed explanation. - - -Finally, we create a ``CodecOptions`` instance: - -.. code-block:: python - - codec_options = CodecOptions( - type_registry=TypeRegistry( - [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder - ) - ) - -We can now round trip our custom objects to MongoDB: - -.. code-block:: python - - collection = db.get_collection("test_fe", codec_options=codec_options) - collection.insert_one( - {"_id": 1, "str": MyStringType("hello world"), "num": MyNumberType(2)} - ) - mydoc = collection.find_one() - assert isinstance(mydoc["str"], MyStringType) - assert isinstance(mydoc["num"], MyNumberType) - - -Limitations ------------ - -PyMongo's type codec and fallback encoder features have the following -limitations: - -#. Users cannot customize the encoding behavior of Python types that PyMongo - already understands like ``int`` and ``str`` (the 'built-in types'). - Attempting to instantiate a type registry with one or more codecs that act - upon a built-in type results in a ``TypeError``. This limitation extends - to all subtypes of the standard types. -#. Chaining type encoders is not supported. A custom type value, once - transformed by a codec's ``transform_python`` method, *must* result in a - type that is either BSON-encodable by default, or can be - transformed by the fallback encoder into something BSON-encodable--it - *cannot* be transformed a second time by a different type codec. -#. The :meth:`~pymongo.database.Database.command` method does not apply the - user's TypeDecoders while decoding the command response document. -#. :mod:`gridfs` does not apply custom type encoding or decoding to any - documents received from or to returned to the user. diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst deleted file mode 100644 index a8c0476903..0000000000 --- a/doc/examples/datetimes.rst +++ /dev/null @@ -1,177 +0,0 @@ -Datetimes and Timezones -======================= - -.. testsetup:: - - import datetime - from pymongo import MongoClient - from bson.codec_options import CodecOptions - - client = MongoClient() - client.drop_database("dt_example") - db = client.dt_example - -These examples show how to handle Python :class:`datetime.datetime` objects -correctly in PyMongo. - -Basic Usage ------------ - -PyMongo uses :class:`datetime.datetime` objects for representing dates and times -in MongoDB documents. Because MongoDB assumes that dates and times are in UTC, -care should be taken to ensure that dates and times written to the database -reflect UTC. For example, the following code stores the current UTC date and -time into MongoDB: - -.. doctest:: - - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now(tz=datetime.timezone.utc)} - ... ) - -Always use :meth:`datetime.datetime.now(tz=datetime.timezone.utc)`, which explicitly returns the current time in -UTC, instead of :meth:`datetime.datetime.now`, with no arguments, which returns the current local -time. Avoid doing this: - -.. doctest:: - - >>> result = db.objects.insert_one({"last_modified": datetime.datetime.now()}) - -The value for ``last_modified`` is very different between these two examples, even -though both documents were stored at around the same local time. This will be -confusing to the application that reads them: - -.. doctest:: - - >>> [doc["last_modified"] for doc in db.objects.find()] # doctest: +SKIP - [datetime.datetime(2015, 7, 8, 18, 17, 28, 324000), - datetime.datetime(2015, 7, 8, 11, 17, 42, 911000)] - -:class:`bson.codec_options.CodecOptions` has a ``tz_aware`` option that enables -"aware" :class:`datetime.datetime` objects, i.e., datetimes that know what -timezone they're in. By default, PyMongo retrieves naive datetimes: - -.. doctest:: - - >>> result = db.tzdemo.insert_one({"date": datetime.datetime(2002, 10, 27, 6, 0, 0)}) - >>> db.tzdemo.find_one()["date"] - datetime.datetime(2002, 10, 27, 6, 0) - >>> options = CodecOptions(tz_aware=True) - >>> db.get_collection("tzdemo", codec_options=options).find_one()["date"] # doctest: +SKIP - datetime.datetime(2002, 10, 27, 6, 0, - tzinfo=) - -Saving Datetimes with Timezones -------------------------------- - -When storing :class:`datetime.datetime` objects that specify a timezone -(i.e. they have a ``tzinfo`` property that isn't ``None``), PyMongo will convert -those datetimes to UTC automatically: - -.. doctest:: - - >>> from zoneinfo import ZoneInfo - >>> from datetime import datetime - >>> aware_datetime = datetime(2002, 10, 27, 6, 0, 0, tzinfo=ZoneInfo("US/Pacific")) - >>> result = db.times.insert_one({"date": aware_datetime}) - >>> db.times.find_one()["date"] - datetime.datetime(2002, 10, 27, 14, 0) - -Reading Time ------------- - -As previously mentioned, by default all :class:`datetime.datetime` objects -returned by PyMongo will be naive but reflect UTC (i.e. the time as stored in -MongoDB). By setting the ``tz_aware`` option on -:class:`~bson.codec_options.CodecOptions`, :class:`datetime.datetime` objects -will be timezone-aware and have a ``tzinfo`` property that reflects the UTC -timezone. - -PyMongo 3.1 introduced a ``tzinfo`` property that can be set on -:class:`~bson.codec_options.CodecOptions` to convert :class:`datetime.datetime` -objects to local time automatically. For example, if we wanted to read all times -out of MongoDB in US/Pacific time: - - >>> from bson.codec_options import CodecOptions - >>> db.times.find_one()['date'] - datetime.datetime(2002, 10, 27, 14, 0) - >>> aware_times = db.times.with_options(codec_options=CodecOptions( - ... tz_aware=True, - ... tzinfo=ZoneInfo("US/Pacific"))) - >>> result = aware_times.find_one()['date'] - datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE - tzinfo=) - -.. _handling-out-of-range-datetimes: - -Handling out of range datetimes -------------------------------- - -Python's :class:`~datetime.datetime` can only represent datetimes within the -range allowed by -:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`, whereas -the range of datetimes allowed in BSON can represent any 64-bit number -of milliseconds from the Unix epoch. To deal with this, we can use the -:class:`bson.datetime_ms.DatetimeMS` object, which is a wrapper for the -:class:`int` built-in. - -To decode UTC datetime values as :class:`~bson.datetime_ms.DatetimeMS`, -:class:`~bson.codec_options.CodecOptions` should have its -``datetime_conversion`` parameter set to one of the options available in -:class:`bson.datetime_ms.DatetimeConversion`. These include -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME`, -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS`, -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO`, -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP`. -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME` is the default -option and has the behavior of raising an :class:`~builtin.OverflowError` upon -attempting to decode an out-of-range date. -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_MS` will only return -:class:`~bson.datetime_ms.DatetimeMS` objects, regardless of whether the -represented datetime is in- or out-of-range: - -.. doctest:: - - >>> from datetime import datetime - >>> from bson import encode, decode - >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import CodecOptions, DatetimeConversion - >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> codec_ms = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) - >>> decode(x, codec_options=codec_ms) - {'x': DatetimeMS(0)} - -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_AUTO` will return -:class:`~datetime.datetime` if the underlying UTC datetime is within range, -or :class:`~bson.datetime_ms.DatetimeMS` if the underlying datetime -cannot be represented using the builtin Python :class:`~datetime.datetime`: - -.. doctest:: - - >>> x = encode({"x": datetime(1970, 1, 1)}) - >>> y = encode({"x": DatetimeMS(-(2**62))}) - >>> codec_auto = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) - >>> decode(x, codec_options=codec_auto) - {'x': datetime.datetime(1970, 1, 1, 0, 0)} - >>> decode(y, codec_options=codec_auto) - {'x': DatetimeMS(-4611686018427387904)} - -:attr:`~bson.datetime_ms.DatetimeConversion.DATETIME_CLAMP` will clamp -resulting :class:`~datetime.datetime` objects to be within -:attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max` -(trimmed to ``999000`` microseconds): - -.. doctest:: - - >>> x = encode({"x": DatetimeMS(2**62)}) - >>> y = encode({"x": DatetimeMS(-(2**62))}) - >>> codec_clamp = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP) - >>> decode(x, codec_options=codec_clamp) - {'x': datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)} - >>> decode(y, codec_options=codec_clamp) - {'x': datetime.datetime(1, 1, 1, 0, 0)} - -:class:`~bson.datetime_ms.DatetimeMS` objects have support for rich comparison -methods against other instances of :class:`~bson.datetime_ms.DatetimeMS`. -They can also be converted to :class:`~datetime.datetime` objects with -:meth:`~bson.datetime_ms.DatetimeMS.to_datetime()`. diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst deleted file mode 100644 index 4b3de8d8d0..0000000000 --- a/doc/examples/encryption.rst +++ /dev/null @@ -1,840 +0,0 @@ -.. _In-Use Encryption: - -In-Use Encryption -================= - -.. _Client-Side Field Level Encryption: - -Client-Side Field Level Encryption ----------------------------------- - -New in MongoDB 4.2, client-side field level encryption allows an application -to encrypt specific data fields in addition to pre-existing MongoDB -encryption features such as `Encryption at Rest -`_ and -`TLS/SSL (Transport Encryption) -`_. - -With field level encryption, applications can encrypt fields in documents -*prior* to transmitting data over the wire to the server. Client-side field -level encryption supports workloads where applications must guarantee that -unauthorized parties, including server administrators, cannot read the -encrypted data. - -.. seealso:: The MongoDB documentation on `Client Side Field Level Encryption `_. - -Dependencies -~~~~~~~~~~~~ - -To get started using client-side field level encryption in your project, -you will need to install the -`pymongocrypt `_ and -`pymongo-auth-aws `_ libraries -as well as the driver itself. Install both the driver and a compatible -version of the dependencies like this:: - - $ python -m pip install 'pymongo[encryption]' - -Note that installing on Linux requires pip 19 or later for manylinux2010 wheel -support. For more information about installing pymongocrypt see -`the installation instructions on the project's PyPI page -`_. - -Additionally, either `crypt_shared`_ or `mongocryptd`_ are required in order -to use *automatic* client-side encryption. - -crypt_shared -```````````` - -The Automatic Encryption Shared Library (crypt_shared) provides the same -functionality as `mongocryptd`_, but does not require you to spawn another -process to perform automatic encryption. - -By default, pymongo attempts to load crypt_shared from the system and if -found uses it automatically. To load crypt_shared from another location, -use the ``crypt_shared_lib_path`` argument to -:class:`~pymongo.encryption_options.AutoEncryptionOpts`. -If pymongo cannot load crypt_shared it will attempt to fallback to using -`mongocryptd`_ by default. Set ``crypt_shared_lib_required=True`` to make -the app always use crypt_shared and fail if it could not be loaded. - -For detailed installation instructions see -`the MongoDB documentation on Automatic Encryption Shared Library -`_. - -mongocryptd -``````````` - -The ``mongocryptd`` binary is required for automatic client-side encryption -and is included as a component in the `MongoDB Enterprise Server package -`_. -For detailed installation instructions see -`the MongoDB documentation on mongocryptd -`_. - -``mongocryptd`` performs the following: - -- Parses the automatic encryption rules specified to the database connection. - If the JSON schema contains invalid automatic encryption syntax or any - document validation syntax, ``mongocryptd`` returns an error. -- Uses the specified automatic encryption rules to mark fields in read and - write operations for encryption. -- Rejects read/write operations that may return unexpected or incorrect results - when applied to an encrypted field. For supported and unsupported operations, - see `Read/Write Support with Automatic Field Level Encryption - `_. - -A MongoClient configured with auto encryption will automatically spawn the -``mongocryptd`` process from the application's ``PATH``. Applications can -control the spawning behavior as part of the automatic encryption options. -For example to set the path to the ``mongocryptd`` process:: - - auto_encryption_opts = AutoEncryptionOpts( - ..., - mongocryptd_spawn_path='/path/to/mongocryptd') - -To control the logging output of ``mongocryptd`` pass options using -``mongocryptd_spawn_args``:: - - auto_encryption_opts = AutoEncryptionOpts( - ..., - mongocryptd_spawn_args=['--logpath=/path/to/mongocryptd.log', '--logappend']) - -If your application wishes to manage the ``mongocryptd`` process manually, -it is possible to disable spawning ``mongocryptd``:: - - auto_encryption_opts = AutoEncryptionOpts( - ..., - mongocryptd_bypass_spawn=True, - # URI of the local ``mongocryptd`` process. - mongocryptd_uri='mongodb://localhost:27020') - -``mongocryptd`` is only responsible for supporting automatic client-side field -level encryption and does not itself perform any encryption or decryption. - -.. _automatic-client-side-encryption: - -Automatic Client-Side Field Level Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Automatic client-side field level encryption is enabled by creating a -:class:`~pymongo.mongo_client.MongoClient` with the ``auto_encryption_opts`` -option set to an instance of -:class:`~pymongo.encryption_options.AutoEncryptionOpts`. The following -examples show how to setup automatic client-side field level encryption -using :class:`~pymongo.encryption.ClientEncryption` to create a new -encryption data key. - -.. note:: Automatic client-side field level encryption requires MongoDB >=4.2 - enterprise or a MongoDB >=4.2 Atlas cluster. The community version of the - server supports automatic decryption as well as - :ref:`explicit-client-side-encryption`. - -Providing Local Automatic Encryption Rules -`````````````````````````````````````````` - -The following example shows how to specify automatic encryption rules via the -``schema_map`` option. The automatic encryption rules are expressed using a -`strict subset of the JSON Schema syntax -`_. - -Supplying a ``schema_map`` provides more security than relying on -JSON Schemas obtained from the server. It protects against a -malicious server advertising a false JSON Schema, which could trick -the client into sending unencrypted data that should be encrypted. - -JSON Schemas supplied in the ``schema_map`` only apply to configuring -automatic client-side field level encryption. Other validation -rules in the JSON schema will not be enforced by the driver and -will result in an error. - -.. code-block:: python - - import os - from bson.codec_options import CodecOptions - from bson import json_util - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption - from pymongo.encryption_options import AutoEncryptionOpts - - - def create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client): - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - key_vault_client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. We will not be calling - # encrypt() or decrypt() in this example so we can use any - # CodecOptions. - CodecOptions(), - ) - - # Create a new data key and json schema for the encryptedField. - # https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules - data_key_id = client_encryption.create_data_key( - "local", key_alt_names=["pymongo_encryption_example_1"] - ) - schema = { - "properties": { - "encryptedField": { - "encrypt": { - "keyId": [data_key_id], - "bsonType": "string", - "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - } - } - }, - "bsonType": "object", - } - # Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be - # able to parse the MongoDB extended JSON file. - json_schema_string = json_util.dumps( - schema, json_options=json_util.CANONICAL_JSON_OPTIONS - ) - - with open("jsonSchema.json", "w") as file: - file.write(json_schema_string) - - - def main(): - # The MongoDB namespace (db.collection) used to store the - # encrypted documents in this example. - encrypted_namespace = "test.coll" - - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # The MongoClient used to access the key vault (key_vault_namespace). - key_vault_client = MongoClient() - key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}, - ) - - create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client) - - # Load the JSON Schema and construct the local schema_map option. - with open("jsonSchema.json", "r") as file: - json_schema_string = file.read() - json_schema = json_util.loads(json_schema_string) - schema_map = {encrypted_namespace: json_schema} - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, schema_map=schema_map - ) - - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - db_name, coll_name = encrypted_namespace.split(".", 1) - coll = client[db_name][coll_name] - # Clear old data - coll.drop() - - coll.insert_one({"encryptedField": "123456789"}) - print("Decrypted document: %s" % (coll.find_one(),)) - unencrypted_coll = MongoClient()[db_name][coll_name] - print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) - - - if __name__ == "__main__": - main() - - -Server-Side Field Level Encryption Enforcement -`````````````````````````````````````````````` - -MongoDB >=4.2 servers supports using schema validation to enforce encryption -of specific fields in a collection. This schema validation will prevent an -application from inserting unencrypted values for any fields marked with the -``"encrypt"`` JSON schema keyword. - -The following example shows how to setup automatic client-side field level -encryption using -:class:`~pymongo.encryption.ClientEncryption` to create a new encryption -data key and create a collection with the -`Automatic Encryption JSON Schema Syntax -`_: - -.. code-block:: python - - import os - - from bson.codec_options import CodecOptions - from bson.binary import STANDARD - - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption - from pymongo.encryption_options import AutoEncryptionOpts - from pymongo.errors import OperationFailure - from pymongo.write_concern import WriteConcern - - - def main(): - # The MongoDB namespace (db.collection) used to store the - # encrypted documents in this example. - encrypted_namespace = "test.coll" - - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # The MongoClient used to access the key vault (key_vault_namespace). - key_vault_client = MongoClient() - key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}, - ) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - key_vault_client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. We will not be calling - # encrypt() or decrypt() in this example so we can use any - # CodecOptions. - CodecOptions(), - ) - - # Create a new data key and json schema for the encryptedField. - data_key_id = client_encryption.create_data_key( - "local", key_alt_names=["pymongo_encryption_example_2"] - ) - json_schema = { - "properties": { - "encryptedField": { - "encrypt": { - "keyId": [data_key_id], - "bsonType": "string", - "algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - } - } - }, - "bsonType": "object", - } - - auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - db_name, coll_name = encrypted_namespace.split(".", 1) - db = client[db_name] - # Clear old data - db.drop_collection(coll_name) - # Create the collection with the encryption JSON Schema. - db.create_collection( - coll_name, - # uuid_representation=STANDARD is required to ensure that any - # UUIDs in the $jsonSchema document are encoded to BSON Binary - # with the standard UUID subtype 4. This is only needed when - # running the "create" collection command with an encryption - # JSON Schema. - codec_options=CodecOptions(uuid_representation=STANDARD), - write_concern=WriteConcern(w="majority"), - validator={"$jsonSchema": json_schema}, - ) - coll = client[db_name][coll_name] - - coll.insert_one({"encryptedField": "123456789"}) - print("Decrypted document: %s" % (coll.find_one(),)) - unencrypted_coll = MongoClient()[db_name][coll_name] - print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) - try: - unencrypted_coll.insert_one({"encryptedField": "123456789"}) - except OperationFailure as exc: - print("Unencrypted insert failed: %s" % (exc.details,)) - - - if __name__ == "__main__": - main() - - -.. _explicit-client-side-encryption: - -Explicit Encryption -~~~~~~~~~~~~~~~~~~~ - -Explicit encryption is a MongoDB community feature and does not use the -``mongocryptd`` process. Explicit encryption is provided by the -:class:`~pymongo.encryption.ClientEncryption` class, for example: - -.. code-block:: python - - import os - - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # The MongoClient used to read/write application data. - client = MongoClient() - coll = client.test.coll - # Clear old data - coll.drop() - - # Set up the key vault (key_vault_namespace) for this example. - key_vault = client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}, - ) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - coll.codec_options, - ) - - # Create a new data key for the encryptedField. - data_key_id = client_encryption.create_data_key( - "local", key_alt_names=["pymongo_encryption_example_3"] - ) - - # Explicitly encrypt a field: - encrypted_field = client_encryption.encrypt( - "123456789", - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id, - ) - coll.insert_one({"encryptedField": encrypted_field}) - doc = coll.find_one() - print("Encrypted document: %s" % (doc,)) - - # Explicitly decrypt the field: - doc["encryptedField"] = client_encryption.decrypt(doc["encryptedField"]) - print("Decrypted document: %s" % (doc,)) - - # Cleanup resources. - client_encryption.close() - client.close() - - - if __name__ == "__main__": - main() - - -Explicit Encryption with Automatic Decryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although automatic encryption requires MongoDB >=4.2 enterprise or a -MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all users. -To configure automatic *decryption* without automatic *encryption* set -``bypass_auto_encryption=True`` in -:class:`~pymongo.encryption_options.AutoEncryptionOpts`: - -.. code-block:: python - - import os - - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption - from pymongo.encryption_options import AutoEncryptionOpts - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # bypass_auto_encryption=True disable automatic encryption but keeps - # the automatic _decryption_ behavior. bypass_auto_encryption will - # also disable spawning mongocryptd. - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, bypass_auto_encryption=True - ) - - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - coll = client.test.coll - # Clear old data - coll.drop() - - # Set up the key vault (key_vault_namespace) for this example. - key_vault = client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}, - ) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - coll.codec_options, - ) - - # Create a new data key for the encryptedField. - data_key_id = client_encryption.create_data_key( - "local", key_alt_names=["pymongo_encryption_example_4"] - ) - - # Explicitly encrypt a field: - encrypted_field = client_encryption.encrypt( - "123456789", - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name="pymongo_encryption_example_4", - ) - coll.insert_one({"encryptedField": encrypted_field}) - # Automatically decrypts any encrypted fields. - doc = coll.find_one() - print("Decrypted document: %s" % (doc,)) - unencrypted_coll = MongoClient().test.coll - print("Encrypted document: %s" % (unencrypted_coll.find_one(),)) - - # Cleanup resources. - client_encryption.close() - client.close() - - - if __name__ == "__main__": - main() - - -.. _CSFLE on-demand credentials: - - -CSFLE on-demand credentials -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``pymongocrypt`` 1.4 adds support for fetching on-demand KMS credentials for -AWS, GCP, and Azure cloud environments. - -To enable the driver's behavior to obtain credentials from the environment, add the appropriate key ("aws", "gcp", or "azure") with an empty map to -"kms_providers" in either :class:`~pymongo.encryption_options.AutoEncryptionOpts` or :class:`~pymongo.encryption.ClientEncryption` options. - -An application using AWS credentials would look like: - -.. code-block:: python - - from pymongo import MongoClient - from pymongo.encryption import ClientEncryption - - client = MongoClient() - client_encryption = ClientEncryption( - # The empty dictionary enables on-demand credentials. - kms_providers={"aws": {}}, - key_vault_namespace="keyvault.datakeys", - key_vault_client=client, - codec_options=client.codec_options, - ) - master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:123456789:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - } - client_encryption.create_data_key("aws", master_key) - -The above will enable the same behavior of obtaining AWS credentials from the environment as is used for :ref:`MONGODB-AWS` authentication, including the -caching to avoid rate limiting. - -An application using GCP credentials would look like: - -.. code-block:: python - - from pymongo import MongoClient - from pymongo.encryption import ClientEncryption - - client = MongoClient() - client_encryption = ClientEncryption( - # The empty dictionary enables on-demand credentials. - kms_providers={"gcp": {}}, - key_vault_namespace="keyvault.datakeys", - key_vault_client=client, - codec_options=client.codec_options, - ) - master_key = { - "projectId": "my-project", - "location": "global", - "keyRing": "key-ring-csfle", - "keyName": "key-name-csfle", - } - client_encryption.create_data_key("gcp", master_key) - -The driver will query the `VM instance metadata `_ to obtain credentials. - -An application using Azure credentials would look like, this time using -:class:`~pymongo.encryption_options.AutoEncryptionOpts`: - -.. code-block:: python - - from pymongo import MongoClient - from pymongo.encryption_options import AutoEncryptionOpts - - # The empty dictionary enables on-demand credentials. - kms_providers = {"azure": {}} - key_vault_namespace = "keyvault.datakeys" - auto_encryption_opts = AutoEncryptionOpts(kms_providers, key_vault_namespace) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - coll = client.test.coll - coll.insert_one({"encryptedField": "123456789"}) - -The driver will `acquire an access token `_ from the Azure VM. - -.. _Queryable Encryption: - -Queryable Encryption --------------------- - -.. _automatic-queryable-client-side-encryption: - -Automatic Queryable Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Automatic Queryable Encryption requires MongoDB 7.0+ Enterprise or a MongoDB 7.0+ Atlas cluster. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, -as demonstrated by the following example: - -.. code-block:: python - - import os - from bson.codec_options import CodecOptions - from pymongo import MongoClient - from pymongo.encryption import ClientEncryption - from pymongo.encryption_options import AutoEncryptionOpts - - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - key_vault_namespace = "keyvault.datakeys" - key_vault_client = MongoClient() - client_encryption = ClientEncryption( - kms_providers, key_vault_namespace, key_vault_client, CodecOptions() - ) - key_vault = key_vault_client["keyvault"]["datakeys"] - key_vault.drop() - # Ensure that two data keys cannot share the same keyAltName. - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}, - ) - key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) - key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) - - encrypted_fields_map = { - "default.encryptedCollection": { - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": key1_id, - "queries": [{"queryType": "equality"}], - }, - { - "path": "lastName", - "bsonType": "string", - "keyId": key2_id, - }, - ], - } - } - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, - key_vault_namespace, - encrypted_fields_map=encrypted_fields_map, - ) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - client.default.drop_collection("encryptedCollection") - coll = client.default.create_collection("encryptedCollection") - coll.insert_one({"_id": 1, "firstName": "Jane", "lastName": "Doe"}) - docs = list(coll.find({"firstName": "Jane"})) - print(docs) - -In the above example, the ``firstName`` and ``lastName`` fields are -automatically encrypted and decrypted. - -Explicit Queryable Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Explicit Queryable Encryption requires MongoDB 7.0+. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` -methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured -using an ``encrypted_fields`` mapping, as demonstrated by the following example: - -.. code-block:: python - - import os - from pymongo import MongoClient - from pymongo.encryption import ( - Algorithm, - AutoEncryptionOpts, - ClientEncryption, - QueryType, - ) - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # Set up the key vault (key_vault_namespace) for this example. - client = MongoClient() - key_vault = client[key_vault_db_name][key_vault_coll_name] - - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}, - ) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - client.codec_options, - ) - - # Create a new data key for the encryptedField. - indexed_key_id = client_encryption.create_data_key("local") - unindexed_key_id = client_encryption.create_data_key("local") - - encrypted_fields = { - "fields": [ - { - "keyId": indexed_key_id, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": {"queryType": "equality"}, - }, - { - "keyId": unindexed_key_id, - "path": "encryptedUnindexed", - "bsonType": "string", - }, - ], - } - - opts = AutoEncryptionOpts( - {"local": {"key": local_master_key}}, - key_vault.full_name, - bypass_query_analysis=True, - key_vault_client=client, - ) - - # The MongoClient used to read/write application data. - encrypted_client = MongoClient(auto_encryption_opts=opts) - encrypted_client.drop_database("test") - db = encrypted_client.test - - # Create the collection with encrypted fields. - coll = db.create_collection("coll", encryptedFields=encrypted_fields) - - # Create and encrypt an indexed and unindexed value. - val = "encrypted indexed value" - unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, contention_factor=1 - ) - insert_payload_unindexed = client_encryption.encrypt( - unindexed_val, Algorithm.UNINDEXED, unindexed_key_id - ) - - # Insert the payloads. - coll.insert_one( - { - "encryptedIndexed": insert_payload_indexed, - "encryptedUnindexed": insert_payload_unindexed, - } - ) - - # Encrypt our find payload using QueryType.EQUALITY. - # The value of "indexed_key_id" must be the same as used to encrypt - # the values above. - find_payload = client_encryption.encrypt( - val, - Algorithm.INDEXED, - indexed_key_id, - query_type=QueryType.EQUALITY, - contention_factor=1, - ) - - # Find the document we inserted using the encrypted payload. - # The returned document is automatically decrypted. - doc = coll.find_one({"encryptedIndexed": find_payload}) - print("Returned document: %s" % (doc,)) - - # Cleanup resources. - client_encryption.close() - encrypted_client.close() - client.close() - - - if __name__ == "__main__": - main() diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst deleted file mode 100644 index e7da156720..0000000000 --- a/doc/examples/geo.rst +++ /dev/null @@ -1,109 +0,0 @@ -Geospatial Indexing Example -=========================== - -.. testsetup:: - - from pymongo import MongoClient - - client = MongoClient() - client.drop_database("geo_example") - -This example shows how to create and use a :data:`~pymongo.GEO2D` -index in PyMongo. To create a spherical (earth-like) geospatial index use :data:`~pymongo.GEOSPHERE` instead. - -.. seealso:: The MongoDB documentation on `Geospatial Indexes `_. - -Creating a Geospatial Index ---------------------------- - -Creating a geospatial index in pymongo is easy: - -.. doctest:: - - >>> from pymongo import MongoClient, GEO2D - >>> db = MongoClient().geo_example - >>> db.places.create_index([("loc", GEO2D)]) - 'loc_2d' - -Inserting Places ----------------- - -Locations in MongoDB are represented using either embedded documents -or lists where the first two elements are coordinates. Here, we'll -insert a couple of example locations: - -.. doctest:: - - >>> result = db.places.insert_many( - ... [{"loc": [2, 5]}, {"loc": [30, 5]}, {"loc": [1, 2]}, {"loc": [4, 4]}] - ... ) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] - -.. note:: If specifying latitude and longitude coordinates in :data:`~pymongo.GEOSPHERE`, list the **longitude** first and then **latitude**. - -Querying --------- - -Using the geospatial index we can find documents near another point: - -.. doctest:: - - >>> import pprint - >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - {'_id': ObjectId('...'), 'loc': [1, 2]} - -.. note:: If using :data:`pymongo.GEOSPHERE`, using $nearSphere is recommended. - -The $maxDistance operator requires the use of :class:`~bson.son.SON`: - -.. doctest:: - - >>> from bson.son import SON - >>> query = {"loc": SON([("$near", [3, 6]), ("$maxDistance", 100)])} - >>> for doc in db.places.find(query).limit(3): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - {'_id': ObjectId('...'), 'loc': [1, 2]} - -It's also possible to query for all items within a given rectangle -(specified by lower-left and upper-right coordinates): - -.. doctest:: - - >>> query = {"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}} - >>> for doc in db.places.find(query).sort("_id"): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - -Or circle (specified by center point and radius): - -.. doctest:: - - >>> query = {"loc": {"$within": {"$center": [[0, 0], 6]}}} - >>> for doc in db.places.find(query).sort("_id"): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [1, 2]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - -geoNear queries are also supported using :class:`~bson.son.SON`:: - - >>> from bson.son import SON - >>> db.command(SON([('geoNear', 'places'), ('near', [1, 2])])) - {'ok': 1.0, 'stats': ...} - -.. warning:: Starting in MongoDB version 4.0, MongoDB deprecates the **geoNear** command. Use one of the following operations instead. - - * $geoNear - aggregation stage. - * $near - query operator. - * $nearSphere - query operator. diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst deleted file mode 100644 index f62697d19f..0000000000 --- a/doc/examples/gevent.rst +++ /dev/null @@ -1,52 +0,0 @@ -Gevent -====== - -PyMongo supports `Gevent `_. Simply call Gevent's -``monkey.patch_all()`` before loading any other modules: - -.. code-block:: pycon - - >>> # You must call patch_all() *before* importing any other modules - >>> from gevent import monkey - >>> _ = monkey.patch_all() - >>> from pymongo import MongoClient - >>> client = MongoClient() - -PyMongo uses thread and socket functions from the Python standard library. -Gevent's monkey-patching replaces those standard functions so that PyMongo -does asynchronous I/O with non-blocking sockets, and schedules operations -on greenlets instead of threads. - -Avoid blocking in Hub.join --------------------------- - -By default, PyMongo uses threads to discover and monitor your servers' topology -(see :ref:`health-monitoring`). If you execute ``monkey.patch_all()`` when -your application first begins, PyMongo automatically uses greenlets instead -of threads. - -When shutting down, if your application calls :meth:`~gevent.hub.Hub.join` on -Gevent's :class:`~gevent.hub.Hub` without first terminating these background -greenlets, the call to :meth:`~gevent.hub.Hub.join` blocks indefinitely. You -therefore **must close or dereference** any active -:class:`~pymongo.mongo_client.MongoClient` before exiting. - -An example solution to this issue in some application frameworks is a signal -handler to end background greenlets when your application receives SIGHUP: - -.. code-block:: python - - import signal - - - def graceful_reload(signum, traceback): - """Explicitly close some global MongoClient object.""" - client.close() - - - signal.signal(signal.SIGHUP, graceful_reload) - -Applications using uWSGI prior to 1.9.16 are affected by this issue, -or newer uWSGI versions with the ``-gevent-wait-for-hub`` option. -See `the uWSGI changelog for details -`_. diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst deleted file mode 100644 index 52920adbda..0000000000 --- a/doc/examples/gridfs.rst +++ /dev/null @@ -1,84 +0,0 @@ -GridFS Example -============== - -.. testsetup:: - - from pymongo import MongoClient - - client = MongoClient() - client.drop_database("gridfs_example") - -This example shows how to use :mod:`gridfs` to store large binary -objects (e.g. files) in MongoDB. - -.. seealso:: The API docs for :mod:`gridfs`. - -.. seealso:: `This blog post - `_ - for some motivation behind this API. - -Setup ------ - -We start by creating a :class:`~gridfs.GridFS` instance to use: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> import gridfs - >>> - >>> db = MongoClient().gridfs_example - >>> fs = gridfs.GridFS(db) - -Every :class:`~gridfs.GridFS` instance is created with and will -operate on a specific :class:`~pymongo.database.Database` instance. - -Saving and Retrieving Data --------------------------- - -The simplest way to work with :mod:`gridfs` is to use its key/value -interface (the :meth:`~gridfs.GridFS.put` and -:meth:`~gridfs.GridFS.get` methods). To write data to GridFS, use -:meth:`~gridfs.GridFS.put`: - -.. doctest:: - - >>> a = fs.put(b"hello world") - -:meth:`~gridfs.GridFS.put` creates a new file in GridFS, and returns -the value of the file document's ``"_id"`` key. Given that ``"_id"`` -we can use :meth:`~gridfs.GridFS.get` to get back the contents of the -file: - -.. doctest:: - - >>> fs.get(a).read() - b'hello world' - -:meth:`~gridfs.GridFS.get` returns a file-like object, so we get the -file's contents by calling :meth:`~gridfs.grid_file.GridOut.read`. - -In addition to putting a :class:`str` as a GridFS file, we can also -put any file-like object (an object with a :meth:`read` -method). GridFS will handle reading the file in chunk-sized segments -automatically. We can also add additional attributes to the file as -keyword arguments: - -.. doctest:: - - >>> b = fs.put(fs.get(a), filename="foo", bar="baz") - >>> out = fs.get(b) - >>> out.read() - b'hello world' - >>> out.filename - 'foo' - >>> out.bar - 'baz' - >>> out.upload_date - datetime.datetime(...) - -The attributes we set in :meth:`~gridfs.GridFS.put` are stored in the -file document, and retrievable after calling -:meth:`~gridfs.GridFS.get`. Some attributes (like ``"filename"``) are -special and are defined in the GridFS specification - see that -document for more details. diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst deleted file mode 100644 index 80026153f8..0000000000 --- a/doc/examples/high_availability.rst +++ /dev/null @@ -1,367 +0,0 @@ -High Availability and PyMongo -============================= - -PyMongo makes it easy to write highly available applications whether -you use a `single replica set `_ -or a `large sharded cluster -`_. - -Connecting to a Replica Set ---------------------------- - -PyMongo makes working with `replica sets -`_ easy. Here we'll launch a new -replica set and show how to handle both initialization and normal -connections with PyMongo. - -.. seealso:: The MongoDB documentation on `replication `_. - -Starting a Replica Set -~~~~~~~~~~~~~~~~~~~~~~ - -The main `replica set documentation -`_ contains extensive information -about setting up a new replica set or migrating an existing MongoDB -setup, be sure to check that out. Here, we'll just do the bare minimum -to get a three node replica set setup locally. - -.. warning:: Replica sets should always use multiple nodes in - production - putting all set members on the same physical node is - only recommended for testing and development. - -We start three ``mongod`` processes, each on a different port and with -a different dbpath, but all using the same replica set name "foo". - -.. code-block:: bash - - $ mkdir -p /data/db0 /data/db1 /data/db2 - $ mongod --port 27017 --dbpath /data/db0 --replSet foo - -.. code-block:: bash - - $ mongod --port 27018 --dbpath /data/db1 --replSet foo - -.. code-block:: bash - - $ mongod --port 27019 --dbpath /data/db2 --replSet foo - -Initializing the Set -~~~~~~~~~~~~~~~~~~~~ - -At this point all of our nodes are up and running, but the set has yet -to be initialized. Until the set is initialized no node will become -the primary, and things are essentially "offline". - -To initialize the set we need to connect directly to a single node and run the -initiate command using the ``directConnection`` option:: - - >>> from pymongo import MongoClient - >>> c = MongoClient('localhost', 27017, directConnection=True) - -.. note:: We could have connected to any of the other nodes instead, - but only the node we initiate from is allowed to contain any - initial data. - -After connecting, we run the initiate command to get things started:: - - >>> config = {'_id': 'foo', 'members': [ - ... {'_id': 0, 'host': 'localhost:27017'}, - ... {'_id': 1, 'host': 'localhost:27018'}, - ... {'_id': 2, 'host': 'localhost:27019'}]} - >>> c.admin.command("replSetInitiate", config) - {'ok': 1.0, ...} - -The three ``mongod`` servers we started earlier will now coordinate -and come online as a replica set. - -Connecting to a Replica Set -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The initial connection as made above is a special case for an -uninitialized replica set. Normally we'll want to connect -differently. A connection to a replica set can be made using the -:meth:`~pymongo.mongo_client.MongoClient` constructor, specifying -one or more members of the set and optionally the replica set name. -Any of the following connects to the replica set we just created:: - - >>> MongoClient('localhost') - MongoClient(host=['localhost:27017'], ...) - >>> MongoClient('localhost', replicaset='foo') - MongoClient(host=['localhost:27017'], replicaset='foo', ...) - >>> MongoClient('localhost:27018', replicaset='foo') - MongoClient(['localhost:27018'], replicaset='foo', ...) - >>> MongoClient('localhost', 27019, replicaset='foo') - MongoClient(['localhost:27019'], replicaset='foo', ...) - >>> MongoClient('mongodb://localhost:27017,localhost:27018/') - MongoClient(['localhost:27017', 'localhost:27018'], ...) - >>> MongoClient('mongodb://localhost:27017,localhost:27018/?replicaSet=foo') - MongoClient(['localhost:27017', 'localhost:27018'], replicaset='foo', ...) - -The addresses passed to :meth:`~pymongo.mongo_client.MongoClient` are called -the *seeds*. As long as at least one of the seeds is online, MongoClient -discovers all the members in the replica set, and determines which is the -current primary and which are secondaries or arbiters. Each seed must be the -address of a single mongod. Multihomed and round robin DNS addresses are -**not** supported. - -The :class:`~pymongo.mongo_client.MongoClient` constructor is non-blocking: -the constructor returns immediately while the client connects to the replica -set using background threads. Note how, if you create a client and immediately -print the string representation of its -:attr:`~pymongo.mongo_client.MongoClient.nodes` attribute, the list may be -empty initially. If you wait a moment, MongoClient discovers the whole replica -set:: - - >>> from time import sleep - >>> c = MongoClient(replicaset='foo'); print(c.nodes); sleep(0.1); print(c.nodes) - frozenset([]) - frozenset([('localhost', 27019), ('localhost', 27017), ('localhost', 27018)]) - -You need not wait for replica set discovery in your application, however. -If you need to do any operation with a MongoClient, such as a -:meth:`~pymongo.collection.Collection.find` or an -:meth:`~pymongo.collection.Collection.insert_one`, the client waits to discover -a suitable member before it attempts the operation. - -Handling Failover -~~~~~~~~~~~~~~~~~ - -When a failover occurs, PyMongo will automatically attempt to find the -new primary node and perform subsequent operations on that node. This -can't happen completely transparently, however. Here we'll perform an -example failover to illustrate how everything behaves. First, we'll -connect to the replica set and perform a couple of basic operations:: - - >>> db = MongoClient("localhost", replicaSet='foo').test - >>> db.test.insert_one({"x": 1}).inserted_id - ObjectId('...') - >>> db.test.find_one() - {'x': 1, '_id': ObjectId('...')} - -By checking the host and port, we can see that we're connected to -*localhost:27017*, which is the current primary:: - - >>> db.client.address - ('localhost', 27017) - -Now let's bring down that node and see what happens when we run our -query again:: - - >>> db.test.find_one() - Traceback (most recent call last): - pymongo.errors.AutoReconnect: ... - -We get an :class:`~pymongo.errors.AutoReconnect` exception. This means -that the driver was not able to connect to the old primary (which -makes sense, as we killed the server), but that it will attempt to -automatically reconnect on subsequent operations. When this exception -is raised our application code needs to decide whether to retry the -operation or to simply continue, accepting the fact that the operation -might have failed. - -On subsequent attempts to run the query we might continue to see this -exception. Eventually, however, the replica set will failover and -elect a new primary (this should take no more than a couple of seconds in -general). At that point the driver will connect to the new primary and -the operation will succeed:: - - >>> db.test.find_one() - {'x': 1, '_id': ObjectId('...')} - >>> db.client.address - ('localhost', 27018) - -Bring the former primary back up. It will rejoin the set as a secondary. -Now we can move to the next section: distributing reads to secondaries. - -.. _secondary-reads: - -Secondary Reads -~~~~~~~~~~~~~~~ - -By default an instance of MongoClient sends queries to -the primary member of the replica set. To use secondaries for queries -we have to change the read preference:: - - >>> client = MongoClient( - ... 'localhost:27017', - ... replicaSet='foo', - ... readPreference='secondaryPreferred') - >>> client.read_preference - SecondaryPreferred(tag_sets=None) - -Now all queries will be sent to the secondary members of the set. If there are -no secondary members the primary will be used as a fallback. If you have -queries you would prefer to never send to the primary you can specify that -using the ``secondary`` read preference. - -By default the read preference of a :class:`~pymongo.database.Database` is -inherited from its MongoClient, and the read preference of a -:class:`~pymongo.collection.Collection` is inherited from its Database. To use -a different read preference use the -:meth:`~pymongo.mongo_client.MongoClient.get_database` method, or the -:meth:`~pymongo.database.Database.get_collection` method:: - - >>> from pymongo import ReadPreference - >>> client.read_preference - SecondaryPreferred(tag_sets=None) - >>> db = client.get_database('test', read_preference=ReadPreference.SECONDARY) - >>> db.read_preference - Secondary(tag_sets=None) - >>> coll = db.get_collection('test', read_preference=ReadPreference.PRIMARY) - >>> coll.read_preference - Primary() - -You can also change the read preference of an existing -:class:`~pymongo.collection.Collection` with the -:meth:`~pymongo.collection.Collection.with_options` method:: - - >>> coll2 = coll.with_options(read_preference=ReadPreference.NEAREST) - >>> coll.read_preference - Primary() - >>> coll2.read_preference - Nearest(tag_sets=None) - -Note that since most database commands can only be sent to the primary of a -replica set, the :meth:`~pymongo.database.Database.command` method does not obey -the Database's :attr:`~pymongo.database.Database.read_preference`, but you can -pass an explicit read preference to the method:: - - >>> db.command('dbstats', read_preference=ReadPreference.NEAREST) - {...} - -Reads are configured using three options: **read preference**, **tag sets**, -and **local threshold**. - -**Read preference**: - -Read preference is configured using one of the classes from -:mod:`~pymongo.read_preferences` (:class:`~pymongo.read_preferences.Primary`, -:class:`~pymongo.read_preferences.PrimaryPreferred`, -:class:`~pymongo.read_preferences.Secondary`, -:class:`~pymongo.read_preferences.SecondaryPreferred`, or -:class:`~pymongo.read_preferences.Nearest`). For convenience, we also provide -:class:`~pymongo.read_preferences.ReadPreference` with the following -attributes: - -- ``PRIMARY``: Read from the primary. This is the default read preference, - and provides the strongest consistency. If no primary is available, raise - :class:`~pymongo.errors.AutoReconnect`. - -- ``PRIMARY_PREFERRED``: Read from the primary if available, otherwise read - from a secondary. - -- ``SECONDARY``: Read from a secondary. If no matching secondary is available, - raise :class:`~pymongo.errors.AutoReconnect`. - -- ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise - from the primary. - -- ``NEAREST``: Read from any available member. - -**Tag sets**: - -Replica-set members can be `tagged -`_ according to any -criteria you choose. By default, PyMongo ignores tags when -choosing a member to read from, but your read preference can be configured with -a ``tag_sets`` parameter. ``tag_sets`` must be a list of dictionaries, each -dict providing tag values that the replica set member must match. -PyMongo tries each set of tags in turn until it finds a set of -tags with at least one matching member. For example, to prefer reads from the -New York data center, but fall back to the San Francisco data center, tag your -replica set members according to their location and create a -MongoClient like so:: - - >>> from pymongo.read_preferences import Secondary - >>> db = client.get_database( - ... 'test', read_preference=Secondary([{'dc': 'ny'}, {'dc': 'sf'}])) - >>> db.read_preference - Secondary(tag_sets=[{'dc': 'ny'}, {'dc': 'sf'}]) - -MongoClient tries to find secondaries in New York, then San Francisco, -and raises :class:`~pymongo.errors.AutoReconnect` if none are available. As an -additional fallback, specify a final, empty tag set, ``{}``, which means "read -from any member that matches the mode, ignoring tags." - -See :mod:`~pymongo.read_preferences` for more information. - -.. _distributes reads to secondaries: - -**Local threshold**: - -If multiple members match the read preference and tag sets, PyMongo reads -from among the nearest members, chosen according to ping time. By default, -only members whose ping times are within 15 milliseconds of the nearest -are used for queries. You can choose to distribute reads among members with -higher latencies by setting ``localThresholdMS`` to a larger -number:: - - >>> client = pymongo.MongoClient( - ... replicaSet='repl0', - ... readPreference='secondaryPreferred', - ... localThresholdMS=35) - -In this case, PyMongo distributes reads among matching members within 35 -milliseconds of the closest member's ping time. - -.. note:: ``localThresholdMS`` is ignored when talking to a - replica set *through* a mongos. The equivalent is the localThreshold_ command - line option. - -.. _localThreshold: https://mongodb.com/docs/manual/reference/program/mongos/#std-option-mongos.--localThreshold - -.. _health-monitoring: - -Health Monitoring -''''''''''''''''' - -When MongoClient is initialized it launches background threads to -monitor the replica set for changes in: - -* Health: detect when a member goes down or comes up, or if a different member - becomes primary -* Configuration: detect when members are added or removed, and detect changes - in members' tags -* Latency: track a moving average of each member's ping time - -Replica-set monitoring ensures queries are continually routed to the proper -members as the state of the replica set changes. - -.. _mongos-load-balancing: - -mongos Load Balancing ---------------------- - -An instance of :class:`~pymongo.mongo_client.MongoClient` can be configured -with a list of addresses of mongos servers: - - >>> client = MongoClient('mongodb://host1,host2,host3') - -Each member of the list must be a single mongos server. Multihomed and round -robin DNS addresses are **not** supported. The client continuously -monitors all the mongoses' availability, and its network latency to each. - -PyMongo distributes operations evenly among the set of mongoses within its -``localThresholdMS`` (similar to how it `distributes reads to secondaries`_ -in a replica set). By default the threshold is 15 ms. - -The lowest-latency server, and all servers with latencies no more than -``localThresholdMS`` beyond the lowest-latency server's, receive -operations equally. For example, if we have three mongoses: - - - host1: 20 ms - - host2: 35 ms - - host3: 40 ms - -By default the ``localThresholdMS`` is 15 ms, so PyMongo uses host1 and host2 -evenly. It uses host1 because its network latency to the driver is shortest. It -uses host2 because its latency is within 15 ms of the lowest-latency server's. -But it excuses host3: host3 is 20ms beyond the lowest-latency server. - -If we set ``localThresholdMS`` to 30 ms all servers are within the threshold: - - >>> client = MongoClient('mongodb://host1,host2,host3/?localThresholdMS=30') - -.. warning:: Do **not** connect PyMongo to a pool of mongos instances through a - load balancer. A single socket connection must always be routed to the same - mongos instance for proper cursor support. diff --git a/doc/examples/index.rst b/doc/examples/index.rst deleted file mode 100644 index 57682fa1af..0000000000 --- a/doc/examples/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -Examples -======== - -The examples in this section are intended to give in depth overviews -of how to accomplish specific tasks with MongoDB and PyMongo. - -Unless otherwise noted, all examples assume that a MongoDB instance is -running on the default host and port. Assuming you have `downloaded -and installed `_ -MongoDB, you can start it like so: - -.. code-block:: bash - - $ mongod - -.. toctree:: - :maxdepth: 1 - - aggregation - authentication - collations - copydb - custom_type - bulk - client_bulk - datetimes - geo - gevent - gridfs - high_availability - logging - mod_wsgi - network_compression - server_selection - tailable - timeouts - tls - type_hints - encryption - uuid diff --git a/doc/examples/logging.rst b/doc/examples/logging.rst deleted file mode 100644 index 0cbc8eff09..0000000000 --- a/doc/examples/logging.rst +++ /dev/null @@ -1,63 +0,0 @@ -Logging -======== - -Starting in 4.8, **PyMongo** supports `Python's native logging library `_, -enabling developers to customize the verbosity of log messages for their applications. - -Components -------------- -There are currently three different **PyMongo** components with logging support: ``pymongo.command``, ``pymongo.connection``, and ``pymongo.serverSelection``. -These components deal with command operations, connection management, and server selection, respectively. -Each can be configured separately or they can all be configured together. - -Configuration -------------- -Currently, the above components each support ``DEBUG`` logging. To enable a single component, do the following:: - - import logging - logging.getLogger('pymongo.').setLevel(logging.DEBUG) - - - -For example, to enable command logging:: - - import logging - logging.getLogger('pymongo.command').setLevel(logging.DEBUG) - - -You can also enable all ``DEBUG`` logs at once:: - - import logging - logging.getLogger('pymongo').setLevel(logging.DEBUG) - - -Truncation -------------- -When ``pymongo.command`` debug logs are enabled, every command sent to the server and every response sent back will be included as part of the logs. -By default, these command and response documents are truncated after 1000 bytes. - -You can configure a higher truncation limit by setting the ``MONGOB_LOG_MAX_DOCUMENT_LENGTH`` environment variable to your desired length. - -Note that by default, only sensitive authentication command contents are redacted. -All commands containing user data will be logged, including the actual contents of your queries. -To prevent this behavior, set ``MONGOB_LOG_MAX_DOCUMENT_LENGTH`` to 0. This will omit the command and response bodies from the logs. - -Example -------------- -Here's a simple example that enables ``pymongo.command`` debug logs and performs two database operations:: - - import logging - import pymongo - - # Automatically writes all logs to stdout - logging.basicConfig() - logging.getLogger('pymongo.command').setLevel(logging.DEBUG) - - client = pymongo.MongoClient() - client.db.test.insert_one({"x": 1}) - client.db.test.find_one({"x": 1}) - --------------------------------- - DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command started", "command": "{\"insert\": \"test\", \"ordered\": true, \"lsid\": {\"id\": {\"$binary\": {\"base64\": \"GI7ubVhPSsWd7+OwHEFx6Q==\", \"subType\": \"04\"}}}, \"$db\": \"db\", \"documents\": [{\"x\": 1, \"_id\": {\"$oid\": \"65cbe82614be1fc2beb4e4aa\"}}]}", "commandName": "insert", "databaseName": "db", "requestId": 1144108930, "operationId": 1144108930, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} - DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command succeeded", "durationMS": 0.515, "reply": "{\"n\": 1, \"ok\": 1.0}", "commandName": "insert", "databaseName": "db", "requestId": 1144108930, "operationId": 1144108930, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} - DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command started", "command": "{\"find\": \"test\", \"filter\": {\"x\": 1}, \"limit\": 1, \"singleBatch\": true, \"lsid\": {\"id\": {\"$binary\": {\"base64\": \"GI7ubVhPSsWd7+OwHEFx6Q==\", \"subType\": \"04\"}}}, \"$db\": \"db\"}", "commandName": "find", "databaseName": "db", "requestId": 470211272, "operationId": 470211272, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} - DEBUG:pymongo.command:{"clientId": {"$oid": "65cbe82614be1fc2beb4e4a9"}, "message": "Command succeeded", "durationMS": 0.621, "reply": "{\"cursor\": {\"firstBatch\": [{\"_id\": {\"$oid\": \"65cbdf391a957ed280001417\"}, \"x\": 1}], \"ns\": \"db.test\"}, \"ok\": 1.0}", "commandName": "find", "databaseName": "db", "requestId": 470211272, "operationId": 470211272, "driverConnectionId": 1, "serverConnectionId": 3554, "serverHost": "localhost", "serverPort": 27017} diff --git a/doc/examples/mod_wsgi.rst b/doc/examples/mod_wsgi.rst deleted file mode 100644 index 96d6ce892f..0000000000 --- a/doc/examples/mod_wsgi.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _pymongo-and-mod_wsgi: - -PyMongo and mod_wsgi -==================== - -To run your application under `mod_wsgi `_, -follow these guidelines: - -* Run ``mod_wsgi`` in daemon mode with the ``WSGIDaemonProcess`` directive. -* Assign each application to a separate daemon with ``WSGIProcessGroup``. -* Use ``WSGIApplicationGroup %{GLOBAL}`` to ensure your application is running - in the daemon's main Python interpreter, not a sub interpreter. - -For example, this ``mod_wsgi`` configuration ensures an application runs in the -main interpreter:: - - - WSGIDaemonProcess my_process - WSGIScriptAlias /my_app /path/to/app.wsgi - WSGIProcessGroup my_process - WSGIApplicationGroup %{GLOBAL} - - -If you have multiple applications that use PyMongo, put each in a separate -daemon, still in the global application group:: - - - WSGIDaemonProcess my_process - WSGIScriptAlias /my_app /path/to/app.wsgi - - WSGIProcessGroup my_process - - - WSGIDaemonProcess my_other_process - WSGIScriptAlias /my_other_app /path/to/other_app.wsgi - - WSGIProcessGroup my_other_process - - - WSGIApplicationGroup %{GLOBAL} - - -Background: ``mod_wsgi`` can run in "embedded" mode when only WSGIScriptAlias -is set, or "daemon" mode with WSGIDaemonProcess. In daemon mode, ``mod_wsgi`` -can run your application in the Python main interpreter, or in sub interpreters. -The correct way to run a PyMongo application is in daemon mode, using the main -interpreter. - -Python C extensions in general have issues running in multiple -Python sub interpreters. These difficulties are explained in the documentation for -`Py_NewInterpreter `_ -and in the `Multiple Python Sub Interpreters -`_ -section of the ``mod_wsgi`` documentation. - -Beginning with PyMongo 2.7, the C extension for BSON detects when it is running -in a sub interpreter and activates a workaround, which adds a small cost to -BSON decoding. To avoid this cost, use ``WSGIApplicationGroup %{GLOBAL}`` to -ensure your application runs in the main interpreter. - -Since your program runs in the main interpreter it should not share its -process with any other applications, lest they interfere with each other's -state. Each application should have its own daemon process, as shown in the -example above. diff --git a/doc/examples/network_compression.rst b/doc/examples/network_compression.rst deleted file mode 100644 index c270dff4b3..0000000000 --- a/doc/examples/network_compression.rst +++ /dev/null @@ -1,39 +0,0 @@ - -.. _network-compression-example: - -Network Compression -=================== - -PyMongo supports network compression where network traffic between the client -and MongoDB server are compressed which reduces the amount of data passed -over the network. By default no compression is used. - -The driver supports the following algorithms: - -- `snappy `_ available in MongoDB 3.4 and later. -- :mod:`zlib` available in MongoDB 3.6 and later. -- `zstandard `_ available in MongoDB 4.2 and later. - -.. note:: snappy and zstandard compression require additional dependencies. See :ref:`optional-deps`. - -Applications can enable wire protocol compression via the ``compressors`` URI and -keyword argument to :meth:`~pymongo.mongo_client.MongoClient`. For example:: - - >>> client = MongoClient(compressors='zlib') - -When multiple compression algorithms are given, the driver selects the first one in the -list supported by the MongoDB instance to which it is connected. For example:: - - >>> client = MongoClient(compressors='snappy,zstandard,zlib') - -The ``compressors`` option can also be set via the URI:: - - >>> client = MongoClient('mongodb://example.com/?compressors=snappy,zstandard,zlib') - -Additionally, zlib compression allows specifying a compression level with supported values from -1 to 9:: - - >>> client = MongoClient(compressors='zlib', zlibCompressionLevel=-1) - -The ``zlibCompressionLevel`` is passed as the ``level`` argument to :func:`zlib.compress`. - -.. seealso:: The MongoDB documentation on `network compression URI options `_. diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst deleted file mode 100644 index 227e849df3..0000000000 --- a/doc/examples/server_selection.rst +++ /dev/null @@ -1,108 +0,0 @@ -Server Selector Example -======================= - -Users can exert fine-grained control over the `server selection algorithm`_ -by setting the ``server_selector`` option on the :class:`~pymongo.MongoClient` -to an appropriate callable. This example shows how to use this functionality -to prefer servers running on ``localhost``. - - -.. warning:: - - Use of custom server selector functions is a power user feature. Misusing - custom server selectors can have unintended consequences such as degraded - read/write performance. - - -.. testsetup:: - - from pymongo import MongoClient - - -.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ - - -Example: Selecting Servers Running on ``localhost`` ---------------------------------------------------- - -To start, we need to write the server selector function that will be used. -The server selector function should accept a list of -:class:`~pymongo.server_description.ServerDescription` objects and return a -list of server descriptions that are suitable for the read or write operation. -A server selector must not create or modify -:class:`~pymongo.server_description.ServerDescription` objects, and must return -the selected instances unchanged. - -In this example, we write a server selector that prioritizes servers running on -``localhost``. This can be desirable when using a sharded cluster with multiple -``mongos``, as locally run queries are likely to see lower latency and higher -throughput. Please note, however, that it is highly dependent on the -application if preferring ``localhost`` is beneficial or not. - -In addition to comparing the hostname with ``localhost``, our server selector -function accounts for the edge case when no servers are running on -``localhost``. In this case, we allow the default server selection logic to -prevail by passing through the received server description list unchanged. -Failure to do this would render the client unable to communicate with MongoDB -in the event that no servers were running on ``localhost``. - - -The described server selection logic is implemented in the following server -selector function: - - -.. doctest:: - - >>> def server_selector(server_descriptions): - ... servers = [ - ... server for server in server_descriptions if server.address[0] == "localhost" - ... ] - ... if not servers: - ... return server_descriptions - ... return servers - ... - - - -Finally, we can create a :class:`~pymongo.MongoClient` instance with this -server selector. - - -.. doctest:: - - >>> client = MongoClient(server_selector=server_selector) - - - -Server Selection Process ------------------------- - -This section dives deeper into the server selection process for reads and -writes. In the case of a write, the driver performs the following operations -(in order) during the selection process: - - -#. Select all writeable servers from the list of known hosts. For a replica set - this is the primary, while for a sharded cluster this is all the known mongoses. - -#. Apply the user-defined server selector function. Note that the custom server - selector is **not** called if there are no servers left from the previous - filtering stage. - -#. Apply the ``localThresholdMS`` setting to the list of remaining hosts. This - whittles the host list down to only contain servers whose latency is at most - ``localThresholdMS`` milliseconds higher than the lowest observed latency. - -#. Select a server at random from the remaining host list. The desired - operation is then performed against the selected server. - - -In the case of **reads** the process is identical except for the first step. -Here, instead of selecting all writeable servers, we select all servers -matching the user's :class:`~pymongo.read_preferences.ReadPreference` from the -list of known hosts. As an example, for a 3-member replica set with a -:class:`~pymongo.read_preferences.Secondary` read preference, we would select -all available secondaries. - - -.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst deleted file mode 100644 index 79458dc2ff..0000000000 --- a/doc/examples/tailable.rst +++ /dev/null @@ -1,42 +0,0 @@ -Tailable Cursors -================ - -By default, MongoDB will automatically close a cursor when the client has -exhausted all results in the cursor. However, for `capped collections -`_ you may -use a `tailable cursor -`_ -that remains open after the client exhausts the results in the initial cursor. - -The following is a basic example of using a tailable cursor to tail the oplog -of a replica set member:: - - import time - - import pymongo - - client = pymongo.MongoClient() - oplog = client.local.oplog.rs - first = oplog.find().sort('$natural', pymongo.ASCENDING).limit(-1).next() - print(first) - ts = first['ts'] - - while True: - # For a regular capped collection CursorType.TAILABLE_AWAIT is the - # only option required to create a tailable cursor. When querying the - # oplog, the oplog_replay option enables an optimization to quickly - # find the 'ts' value we're looking for. The oplog_replay option - # can only be used when querying the oplog. Starting in MongoDB 4.4 - # this option is ignored by the server as queries against the oplog - # are optimized automatically by the MongoDB query engine. - cursor = oplog.find({'ts': {'$gt': ts}}, - cursor_type=pymongo.CursorType.TAILABLE_AWAIT, - oplog_replay=True) - while cursor.alive: - for doc in cursor: - ts = doc['ts'] - print(doc) - # We end up here if the find() returned no documents or if the - # tailable cursor timed out (no new documents were added to the - # collection for more than 1 second). - time.sleep(1) diff --git a/doc/examples/timeouts.rst b/doc/examples/timeouts.rst deleted file mode 100644 index 5171588962..0000000000 --- a/doc/examples/timeouts.rst +++ /dev/null @@ -1,162 +0,0 @@ - -.. _timeout-example: - -Client Side Operation Timeout -============================= - -PyMongo 4.2 introduced :meth:`~pymongo.timeout` and the ``timeoutMS`` -URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. -These features allow applications to more easily limit the amount of time that -one or more operations can execute before control is returned to the app. This -timeout applies to all of the work done to execute the operation, including -but not limited to server selection, connection checkout, serialization, and -server-side execution. - -Basic Usage ------------ - -The following example uses :meth:`~pymongo.timeout` to configure a 10-second -timeout for an :meth:`~pymongo.collection.Collection.insert_one` operation:: - - import pymongo - with pymongo.timeout(10): - coll.insert_one({"name": "Nunu"}) - -The :meth:`~pymongo.timeout` applies to all pymongo operations within the block. -The following example ensures that both the ``insert`` and the ``find`` complete -within 10 seconds total, or raise a timeout error:: - - with pymongo.timeout(10): - coll.insert_one({"name": "Nunu"}) - coll.find_one({"name": "Nunu"}) - -When nesting :func:`~pymongo.timeout`, the nested deadline is capped by the outer -deadline. The deadline can only be shortened, not extended. -When exiting the block, the previous deadline is restored:: - - with pymongo.timeout(5): - coll.find_one() # Uses the 5 second deadline. - with pymongo.timeout(3): - coll.find_one() # Uses the 3 second deadline. - coll.find_one() # Uses the original 5 second deadline. - with pymongo.timeout(10): - coll.find_one() # Still uses the original 5 second deadline. - coll.find_one() # Uses the original 5 second deadline. - -Timeout errors --------------- - -When the :meth:`~pymongo.timeout` with-statement is entered, a deadline is set -for the entire block. When that deadline is exceeded, any blocking pymongo operation -will raise a timeout exception. For example:: - - try: - with pymongo.timeout(10): - coll.insert_one({"name": "Nunu"}) - time.sleep(10) - # The deadline has now expired, the next operation will raise - # a timeout exception. - coll.find_one({"name": "Nunu"}) - except PyMongoError as exc: - if exc.timeout: - print(f"block timed out: {exc!r}") - else: - print(f"failed with non-timeout error: {exc!r}") - -The :attr:`pymongo.errors.PyMongoError.timeout` property (added in PyMongo 4.2) -will be ``True`` when the error was caused by a timeout and ``False`` otherwise. - -The timeoutMS URI option ------------------------- - -PyMongo 4.2 also added support for the ``timeoutMS`` URI and keyword argument to -:class:`~pymongo.mongo_client.MongoClient`. When this option is configured, the -client will automatically apply the timeout to each API call. For example:: - - client = MongoClient("mongodb://localhost/?timeoutMS=10000") - coll = client.test.test - coll.insert_one({"name": "Nunu"}) # Uses a 10-second timeout. - coll.find_one({"name": "Nunu"}) # Also uses a 10-second timeout. - -The above is roughly equivalent to:: - - client = MongoClient() - coll = client.test.test - with pymongo.timeout(10): - coll.insert_one({"name": "Nunu"}) - with pymongo.timeout(10): - coll.find_one({"name": "Nunu"}) - -pymongo.timeout overrides timeoutMS ------------------------------------ - -:meth:`~pymongo.timeout` overrides ``timeoutMS``; within a -:meth:`~pymongo.timeout` block a client's ``timeoutMS`` option is ignored:: - - client = MongoClient("mongodb://localhost/?timeoutMS=10000") - coll = client.test.test - coll.insert_one({"name": "Nunu"}) # Uses the client's 10-second timeout. - # pymongo.timeout overrides the client's timeoutMS. - with pymongo.timeout(20): - coll.insert_one({"name": "Nunu"}) # Uses the 20-second timeout. - with pymongo.timeout(5): - coll.find_one({"name": "Nunu"}) # Uses the 5-second timeout. - -pymongo.timeout is thread safe ------------------------------- - -:meth:`~pymongo.timeout` is thread safe; the timeout only applies to current -thread and multiple threads can configure different timeouts in parallel. - -pymongo.timeout is asyncio safe -------------------------------- - -:meth:`~pymongo.timeout` is asyncio safe; the timeout only applies to current -Task and multiple Tasks can configure different timeouts concurrently. -:meth:`~pymongo.timeout` can be used identically in -`Motor `_, for example:: - - import motor.motor_asyncio - client = motor.motor_asyncio.AsyncIOMotorClient() - coll = client.test.test - with pymongo.timeout(10): - await coll.insert_one({"name": "Nunu"}) - await coll.find_one({"name": "Nunu"}) - -Troubleshooting ---------------- - -There are many timeout errors that can be raised depending on when the timeout -expires. In code, these can be identified with the :attr:`pymongo.errors.PyMongoError.timeout` -property. Some specific timeout errors examples are described below. - -When the client was unable to find an available server to run the operation -within the given timeout:: - - pymongo.errors.ServerSelectionTimeoutError: No servers found yet, Timeout: -0.00202266700216569s, Topology Description: ]> - -When either the client was unable to establish a connection within the given -timeout or the operation was sent but the server was not able to respond in time:: - - pymongo.errors.NetworkTimeout: localhost:27017: timed out - -When the server cancelled the operation because it exceeded the given timeout. -Note that the operation may have partially completed on the server (depending -on the operation):: - - pymongo.errors.ExecutionTimeout: operation exceeded time limit, full error: {'ok': 0.0, 'errmsg': 'operation exceeded time limit', 'code': 50, 'codeName': 'MaxTimeMSExpired'} - -When the client cancelled the operation because it was not possible to complete -within the given timeout:: - - pymongo.errors.ExecutionTimeout: operation would exceed time limit, remaining timeout:0.00196 <= network round trip time:0.00427 - -When the client attempted a write operation but the server could not replicate -that write (according to the configured write concern) within the given timeout:: - - pymongo.errors.WTimeoutError: operation exceeded time limit, full error: {'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}} - -The same error as above but for :meth:`~pymongo.collection.Collection.insert_many` -or :meth:`~pymongo.collection.Collection.bulk_write`:: - - pymongo.errors.BulkWriteError: batch op errors occurred, full error: {'writeErrors': [], 'writeConcernErrors': [{'code': 50, 'codeName': 'MaxTimeMSExpired', 'errmsg': 'operation exceeded time limit', 'errInfo': {'writeConcern': {'w': 1, 'wtimeout': 0}}}], 'nInserted': 2, 'nUpserted': 0, 'nMatched': 0, 'nModified': 0, 'nRemoved': 0, 'upserted': []} diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst deleted file mode 100644 index ee4d75027e..0000000000 --- a/doc/examples/tls.rst +++ /dev/null @@ -1,234 +0,0 @@ -TLS/SSL and PyMongo -=================== - -PyMongo supports connecting to MongoDB over TLS/SSL. This guide covers the -configuration options supported by PyMongo. See `the server documentation -`_ to configure -MongoDB. - -.. warning:: Industry best practices recommend, and some regulations require, - the use of TLS 1.1 or newer. Though no application changes are required for - PyMongo to make use of the newest protocols, some operating systems or - versions may not provide an OpenSSL version new enough to support them. - - Users of macOS older than 10.13 (High Sierra) will need to install Python - from `python.org`_, `homebrew`_, `macports`_, or another similar source. - - Users of Linux or other non-macOS Unix can check their OpenSSL version like - this:: - - $ openssl version - - If the version number is less than 1.0.1 support for TLS 1.1 or newer is not - available. Contact your operating system vendor for a solution or upgrade to - a newer distribution. - - You can check your Python interpreter by installing the `requests`_ module - and executing the following command:: - - python -c "import requests; print(requests.get('https://www.howsmyssl.com/a/check', verify=False).json()['tls_version'])" - - You should see "TLS 1.X" where X is >= 1. - - You can read more about TLS versions and their security implications here: - - ``_ - -.. _python.org: https://www.python.org/downloads/ -.. _homebrew: https://brew.sh/ -.. _macports: https://www.macports.org/ -.. _requests: https://pypi.python.org/pypi/requests - -Basic configuration -................... - -In many cases connecting to MongoDB over TLS/SSL requires nothing more than -passing ``tls=True`` as a keyword argument to -:class:`~pymongo.mongo_client.MongoClient`:: - - >>> client = pymongo.MongoClient('example.com', tls=True) - -Or passing ``tls=true`` in the URI:: - - >>> client = pymongo.MongoClient('mongodb://example.com/?tls=true') - -This configures PyMongo to connect to the server using TLS, verify the server's -certificate and verify that the host you are attempting to connect to is listed -by that certificate. - -Certificate verification policy -............................... - -By default, PyMongo is configured to require a certificate from the server when -TLS is enabled. This is configurable using the ``tlsAllowInvalidCertificates`` -option. To disable this requirement pass ``tlsAllowInvalidCertificates=True`` -as a keyword parameter:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsAllowInvalidCertificates=True) - -Or, in the URI:: - - >>> uri = 'mongodb://example.com/?tls=true&tlsAllowInvalidCertificates=true' - >>> client = pymongo.MongoClient(uri) - -Specifying a CA file -.................... - -In some cases you may want to configure PyMongo to use a specific set of CA -certificates. This is most often the case when you are acting as your own -certificate authority rather than using server certificates signed by a well -known authority. The ``tlsCAFile`` option takes a path to a CA file. It can be -passed as a keyword argument:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCAFile='/path/to/ca.pem') - -Or, in the URI:: - - >>> uri = 'mongodb://example.com/?tls=true&tlsCAFile=/path/to/ca.pem' - >>> client = pymongo.MongoClient(uri) - -Specifying a certificate revocation list -........................................ - -The ``tlsCRLFile`` option takes a path to a CRL file. It can be passed -as a keyword argument:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCRLFile='/path/to/crl.pem') - -Or, in the URI:: - - >>> uri = 'mongodb://example.com/?tls=true&tlsCRLFile=/path/to/crl.pem' - >>> client = pymongo.MongoClient(uri) - -.. note:: Certificate revocation lists and :ref:`OCSP` cannot be used together. - -Client certificates -................... - -PyMongo can be configured to present a client certificate using the -``tlsCertificateKeyFile`` option:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem') - -If the private key for the client certificate is stored in a separate file, -it should be concatenated with the certificate file. For example, to -concatenate a PEM-formatted certificate file ``cert.pem`` and a PEM-formatted -keyfile ``key.pem`` into a single file ``combined.pem``, on Unix systems, -users can run:: - - $ cat key.pem cert.pem > combined.pem - -PyMongo can be configured with the concatenated certificate keyfile using the -``tlsCertificateKeyFile`` option:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCertificateKeyFile='/path/to/combined.pem') - -If the private key contained in the certificate keyfile is encrypted, users -can provide a password or passphrase to decrypt the encrypted private keys -using the ``tlsCertificateKeyFilePassword`` option:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCertificateKeyFile='/path/to/combined.pem', - ... tlsCertificateKeyFilePassword=) - -These options can also be passed as part of the MongoDB URI. - -.. _OCSP: - -OCSP -.... - -Starting with PyMongo 3.11, if PyMongo was installed with the "ocsp" extra:: - - python -m pip install pymongo[ocsp] - -certificate revocation checking is enabled by way of `OCSP (Online Certification -Status Protocol) `_. -MongoDB 4.4+ `staples OCSP responses `_ -to the TLS handshake which PyMongo will verify, failing the TLS handshake if -the stapled OCSP response is invalid or indicates that the peer certificate is -revoked. - -When connecting to a server version older than 4.4, or when a 4.4+ version of -MongoDB does not staple an OCSP response, PyMongo will attempt to connect -directly to an OCSP endpoint if the peer certificate specified one. The TLS -handshake will only fail in this case if the response indicates that the -certificate is revoked. Invalid or malformed responses will be ignored, -favoring availability over maximum security. - -.. _TLSErrors: - -Troubleshooting TLS Errors -.......................... - -TLS errors often fall into three categories - certificate verification failure, -protocol version mismatch or certificate revocation checking failure. An error -message similar to the following means that OpenSSL was not able to verify the -server's certificate:: - - [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed - -This often occurs because OpenSSL does not have access to the system's -root certificates or the certificates are out of date. Linux users should -ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.7 or newer downloaded -from python.org `may have to run a script included with python -`_ to install -root certificates:: - - open "/Applications/Python /Install Certificates.command" - -Users of older PyPy portable versions may have to `set an environment -variable `_ to tell -OpenSSL where to find root certificates. This is easily done using the `certifi -module `_ from pypi:: - - $ pypy -m pip install certifi - $ export SSL_CERT_FILE=$(pypy -c "import certifi; print(certifi.where())") - -An error message similar to the following message means that the OpenSSL -version used by Python does not support a new enough TLS protocol to connect -to the server:: - - [SSL: TLSV1_ALERT_PROTOCOL_VERSION] tlsv1 alert protocol version - -Industry best practices recommend, and some regulations require, that older -TLS protocols be disabled in some MongoDB deployments. Some deployments may -disable TLS 1.0, others may disable TLS 1.0 and TLS 1.1. See the warning -earlier in this document for troubleshooting steps and solutions. - -An error message similar to the following message means that certificate -revocation checking failed:: - - [('SSL routines', 'tls_process_initial_server_flight', 'invalid status response')] - -See :ref:`OCSP` for more details. - -Python 3.10+ incompatibilities with TLS/SSL on MongoDB <= 4.0 -............................................................. - -Note that `changes made to the ssl module in Python 3.10+ -`_ may cause incompatibilities -with MongoDB <= 4.0. The following are some example errors that may occur with this -combination:: - - SSL handshake failed: localhost:27017: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:997) - SSL handshake failed: localhost:27017: EOF occurred in violation of protocol (_ssl.c:997) - -The MongoDB server logs may show the following error:: - - 2021-06-30T21:22:44.917+0100 E NETWORK [conn16] SSL: error:1408A0C1:SSL routines:ssl3_get_client_hello:no shared cipher - -To resolve this issue, use Python <=3.10, upgrade to MongoDB 4.2+, or install -pymongo with the :ref:`OCSP` extra which relies on PyOpenSSL. diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst deleted file mode 100644 index 375ad14330..0000000000 --- a/doc/examples/type_hints.rst +++ /dev/null @@ -1,332 +0,0 @@ - -.. _type_hints-example: - -Type Hints -========== - -As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python -type checkers can easily find bugs before they reveal themselves in your code. - -If your IDE is configured to use type hints, -it can suggest more appropriate completions and highlight errors in your code. -Some examples include `PyCharm`_, `Sublime Text`_, and `Visual Studio Code`_. - -You can also use the `mypy`_ tool from your command line or in Continuous Integration tests. - -All of the public APIs in PyMongo are fully type hinted, and -several of them support generic parameters for the -type of document object returned when decoding BSON documents. - -Due to `limitations in mypy`_, the default -values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). - -For a larger set of examples that use types, see the PyMongo `test_typing module`_. - -If you would like to opt out of using the provided types, add the following to -your `mypy config`_: :: - - [mypy-pymongo] - follow_imports = False - - -Basic Usage ------------ - -Note that a type for :class:`~pymongo.mongo_client.MongoClient` must be specified. Here we use the -default, unspecified document type: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client: MongoClient = MongoClient() - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) - >>> retrieved = collection.find_one({"x": 1}) - >>> assert isinstance(retrieved, dict) - -For a more accurate typing for document type you can use: - -.. doctest:: - - >>> from typing import Any, Dict - >>> from pymongo import MongoClient - >>> client: MongoClient[Dict[str, Any]] = MongoClient() - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) - >>> retrieved = collection.find_one({"x": 1}) - >>> assert isinstance(retrieved, dict) - -Typed Client ------------- - -:class:`~pymongo.mongo_client.MongoClient` is generic on the document type used to decode BSON documents. - -You can specify a :class:`~bson.raw_bson.RawBSONDocument` document type: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> from bson.raw_bson import RawBSONDocument - >>> client = MongoClient(document_class=RawBSONDocument) - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) - >>> result = collection.find_one({"x": 1}) - >>> assert isinstance(result, RawBSONDocument) - -Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :class:`~bson.son.SON`: - -.. doctest:: - - >>> from bson import SON - >>> from pymongo import MongoClient - >>> client = MongoClient(document_class=SON[str, int]) - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "y": 2}) - >>> result = collection.find_one({"x": 1}) - >>> assert result is not None - >>> assert result["x"] == 1 - -Note that when using :class:`~bson.son.SON`, the key and value types must be given, e.g. ``SON[str, Any]``. - - -Typed Collection ----------------- - -You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a -:class:`~pymongo.collection.Collection`. Note that all `schema validation`_ for inserts and updates is done on the server. -These methods automatically add an "_id" field. - -.. doctest:: - :pyversion: >= 3.8 - - >>> from typing import TypedDict - >>> from pymongo import MongoClient - >>> from pymongo.collection import Collection - >>> class Movie(TypedDict): - ... name: str - ... year: int - ... - >>> client: MongoClient = MongoClient() - >>> collection: Collection[Movie] = client.test.test - >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> assert result["year"] == 1993 - >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. - >>> assert result["_id"] # type:ignore[typeddict-item] - -This same typing scheme works for all of the insert methods (:meth:`~pymongo.collection.Collection.insert_one`, -:meth:`~pymongo.collection.Collection.insert_many`, and :meth:`~pymongo.collection.Collection.bulk_write`). -For ``bulk_write`` both :class:`~pymongo.operations.InsertOne` and :class:`~pymongo.operations.ReplaceOne` operators are generic. - -.. doctest:: - :pyversion: >= 3.8 - - >>> from typing import TypedDict - >>> from pymongo import MongoClient - >>> from pymongo.operations import InsertOne - >>> from pymongo.collection import Collection - >>> client: MongoClient = MongoClient() - >>> collection: Collection[Movie] = client.test.test - >>> inserted = collection.bulk_write([InsertOne(Movie(name="Jurassic Park", year=1993))]) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> assert result["year"] == 1993 - >>> # This will raise a type-checking error, despite being present, because it is added by PyMongo. - >>> assert result["_id"] # type:ignore[typeddict-item] - -Modeling Document Types with TypedDict --------------------------------------- - -You can use :py:class:`~typing.TypedDict` (Python 3.8+) to model structured data. -As noted above, PyMongo will automatically add an ``_id`` field if it is not present. This also applies to TypedDict. -There are three approaches to this: - - 1. Do not specify ``_id`` at all. It will be inserted automatically, and can be retrieved at run-time, but will yield a type-checking error unless explicitly ignored. - - 2. Specify ``_id`` explicitly. This will mean that every instance of your custom TypedDict class will have to pass a value for ``_id``. - - 3. Make use of :py:class:`~typing.NotRequired`. This has the flexibility of option 1, but with the ability to access the ``_id`` field without causing a type-checking error. - -Note: to use :py:class:`~typing.TypedDict` and :py:class:`~typing.NotRequired` in earlier versions of Python (<3.8, <3.11), use the ``typing_extensions`` package. - -.. doctest:: typed-dict-example - :pyversion: >= 3.11 - - >>> from typing import TypedDict, NotRequired - >>> from pymongo import MongoClient - >>> from pymongo.collection import Collection - >>> from bson import ObjectId - >>> class Movie(TypedDict): - ... name: str - ... year: int - ... - >>> class ExplicitMovie(TypedDict): - ... _id: ObjectId - ... name: str - ... year: int - ... - >>> class NotRequiredMovie(TypedDict): - ... _id: NotRequired[ObjectId] - ... name: str - ... year: int - ... - >>> client: MongoClient = MongoClient() - >>> collection: Collection[Movie] = client.test.test - >>> inserted = collection.insert_one(Movie(name="Jurassic Park", year=1993)) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> # This will yield a type-checking error, despite being present, because it is added by PyMongo. - >>> assert result["_id"] # type:ignore[typeddict-item] - >>> collection: Collection[ExplicitMovie] = client.test.test - >>> # Note that the _id keyword argument must be supplied - >>> inserted = collection.insert_one( - ... ExplicitMovie(_id=ObjectId(), name="Jurassic Park", year=1993) - ... ) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> # This will not raise a type-checking error. - >>> assert result["_id"] - >>> collection: Collection[NotRequiredMovie] = client.test.test - >>> # Note the lack of _id, similar to the first example - >>> inserted = collection.insert_one(NotRequiredMovie(name="Jurassic Park", year=1993)) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> # This will not raise a type-checking error, despite not being provided explicitly. - >>> assert result["_id"] - - -Typed Database --------------- - -While less common, you could specify that the documents in an entire database -match a well-defined schema using :py:class:`~typing.TypedDict` (Python 3.8+). - - -.. doctest:: - - >>> from typing import TypedDict - >>> from pymongo import MongoClient - >>> from pymongo.database import Database - >>> class Movie(TypedDict): - ... name: str - ... year: int - ... - >>> client: MongoClient = MongoClient() - >>> db: Database[Movie] = client.test - >>> collection = db.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993}) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> assert result["year"] == 1993 - -Typed Command -------------- -When using the :meth:`~pymongo.database.Database.command`, you can specify the document type by providing a custom :class:`~bson.codec_options.CodecOptions`: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> from bson.raw_bson import RawBSONDocument - >>> from bson import CodecOptions - >>> client: MongoClient = MongoClient() - >>> options = CodecOptions(RawBSONDocument) - >>> result = client.admin.command("ping", codec_options=options) - >>> assert isinstance(result, RawBSONDocument) - -Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. -For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. - -Typed BSON Decoding -------------------- -You can specify the document type returned by :mod:`bson` decoding functions by providing :class:`~bson.codec_options.CodecOptions`: - -.. doctest:: - - >>> from typing import Any, Dict - >>> from bson import CodecOptions, encode, decode - >>> class MyDict(Dict[str, Any]): - ... def foo(self): - ... return "bar" - ... - >>> options = CodecOptions(document_class=MyDict) - >>> doc = {"x": 1, "y": 2} - >>> bsonbytes = encode(doc, codec_options=options) - >>> rt_document = decode(bsonbytes, codec_options=options) - >>> assert rt_document.foo() == "bar" - -:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. -For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. - - -Troubleshooting ---------------- - -Client Type Annotation -~~~~~~~~~~~~~~~~~~~~~~ -If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the following ``mypy`` error:: - - from pymongo import MongoClient - client = MongoClient() # error: Need type annotation for "client" - -The solution is to annotate the type as ``client: MongoClient`` or ``client: MongoClient[Dict[str, Any]]``. See `Basic Usage`_. - -Incompatible Types -~~~~~~~~~~~~~~~~~~ -If you use the generic form of :class:`~pymongo.mongo_client.MongoClient` you -may encounter a ``mypy`` error like:: - - from pymongo import MongoClient - - client: MongoClient = MongoClient() - client.test.test.insert_many( - {"a": 1} - ) # error: Dict entry 0 has incompatible type "str": "int"; - # expected "Mapping[str, Any]": "int" - - -The solution is to use ``client: MongoClient[Dict[str, Any]]`` as used in -`Basic Usage`_ . - -Actual Type Errors -~~~~~~~~~~~~~~~~~~ - -Other times ``mypy`` will catch an actual error, like the following code:: - - from pymongo import MongoClient - from typing import Mapping - client: MongoClient = MongoClient() - client.test.test.insert_one( - [{}] - ) # error: Argument 1 to "insert_one" of "Collection" has - # incompatible type "List[Dict[, ]]"; - # expected "Mapping[str, Any]" - -In this case the solution is to use ``insert_one({})``, passing a document instead of a list. - -Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocument`, which is read-only.:: - - from bson.raw_bson import RawBSONDocument - from pymongo import MongoClient - - client = MongoClient(document_class=RawBSONDocument) - coll = client.test.test - doc = {"my": "doc"} - coll.insert_one(doc) - retrieved = coll.find_one({"_id": doc["_id"]}) - assert retrieved is not None - assert len(retrieved.raw) > 0 - retrieved[ - "foo" - ] = "bar" # error: Unsupported target for indexed assignment - # ("RawBSONDocument") [index] - -.. _PyCharm: https://www.jetbrains.com/help/pycharm/type-hinting-in-product.html -.. _Visual Studio Code: https://code.visualstudio.com/docs/languages/python -.. _Sublime Text: https://github.com/sublimelsp/LSP-pyright -.. _type hints: https://docs.python.org/3/library/typing.html -.. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html -.. _limitations in mypy: https://github.com/python/mypy/issues/3737 -.. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html -.. _test_typing module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_typing.py -.. _schema validation: https://www.mongodb.com/docs/manual/core/schema-validation/#when-to-use-schema-validation diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst deleted file mode 100644 index 350db14d9a..0000000000 --- a/doc/examples/uuid.rst +++ /dev/null @@ -1,512 +0,0 @@ - -.. _handling-uuid-data-example: - -Handling UUID Data -================== - -PyMongo ships with built-in support for dealing with UUID types. -It is straightforward to store native :class:`uuid.UUID` objects -to MongoDB and retrieve them as native :class:`uuid.UUID` objects:: - - from pymongo import MongoClient - from bson.binary import UuidRepresentation - from uuid import uuid4 - - # use the 'standard' representation for cross-language compatibility. - client = MongoClient(uuidRepresentation='standard') - collection = client.get_database('uuid_db').get_collection('uuid_coll') - - # remove all documents from collection - collection.delete_many({}) - - # create a native uuid object - uuid_obj = uuid4() - - # save the native uuid object to MongoDB - collection.insert_one({'uuid': uuid_obj}) - - # retrieve the stored uuid object from MongoDB - document = collection.find_one({}) - - # check that the retrieved UUID matches the inserted UUID - assert document['uuid'] == uuid_obj - -Native :class:`uuid.UUID` objects can also be used as part of MongoDB -queries:: - - document = collection.find({'uuid': uuid_obj}) - assert document['uuid'] == uuid_obj - -The above examples illustrate the simplest of use-cases - one where the -UUID is generated by, and used in the same application. However, -the situation can be significantly more complex when dealing with a MongoDB -deployment that contains UUIDs created by other drivers as the Java and CSharp -drivers have historically encoded UUIDs using a byte-order that is different -from the one used by PyMongo. Applications that require interoperability across -these drivers must specify the appropriate -:class:`~bson.binary.UuidRepresentation`. - -In the following sections, we describe how drivers have historically differed -in their encoding of UUIDs, and how applications can use the -:class:`~bson.binary.UuidRepresentation` configuration option to maintain -cross-language compatibility. - -.. attention:: New applications that do not share a MongoDB deployment with - any other application and that have never stored UUIDs in MongoDB - should use the ``standard`` UUID representation for cross-language - compatibility. See :ref:`configuring-uuid-representation` for details - on how to configure the :class:`~bson.binary.UuidRepresentation`. - -.. _example-legacy-uuid: - -Legacy Handling of UUID Data ----------------------------- - -Historically, MongoDB Drivers have used different byte-ordering -while serializing UUID types to :class:`~bson.binary.Binary`. -Consider, for instance, a UUID with the following canonical textual -representation:: - - 00112233-4455-6677-8899-aabbccddeeff - -This UUID would historically be serialized by the Python driver as:: - - 00112233-4455-6677-8899-aabbccddeeff - -The same UUID would historically be serialized by the C# driver as:: - - 33221100-5544-7766-8899-aabbccddeeff - -Finally, the same UUID would historically be serialized by the Java driver as:: - - 77665544-3322-1100-ffee-ddccbbaa9988 - -.. note:: For in-depth information about the the byte-order historically - used by different drivers, see the `Handling of Native UUID Types - Specification - `_. - -This difference in the byte-order of UUIDs encoded by different drivers can -result in highly unintuitive behavior in some scenarios. We detail two such -scenarios in the next sections. - -Scenario 1: Applications Share a MongoDB Deployment -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Consider the following situation: - -* Application ``C`` written in C# generates a UUID and uses it as the ``_id`` - of a document that it proceeds to insert into the ``uuid_test`` collection of - the ``example_db`` database. Let's assume that the canonical textual - representation of the generated UUID is:: - - 00112233-4455-6677-8899-aabbccddeeff - -* Application ``P`` written in Python attempts to ``find`` the document - written by application ``C`` in the following manner:: - - from uuid import UUID - collection = client.example_db.uuid_test - result = collection.find_one({'_id': UUID('00112233-4455-6677-8899-aabbccddeeff')}) - - In this instance, ``result`` will never be the document that - was inserted by application ``C`` in the previous step. This is because of - the different byte-order used by the C# driver for representing UUIDs as - BSON Binary. The following query, on the other hand, will successfully find - this document:: - - result = collection.find_one({'_id': UUID('33221100-5544-7766-8899-aabbccddeeff')}) - -This example demonstrates how the differing byte-order used by different -drivers can hamper interoperability. To workaround this problem, users should -configure their ``MongoClient`` with the appropriate -:class:`~bson.binary.UuidRepresentation` (in this case, ``client`` in application -``P`` can be configured to use the -:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation to -avoid the unintuitive behavior) as described in -:ref:`configuring-uuid-representation`. - -Scenario 2: Round-Tripping UUIDs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the following examples, we see how using a misconfigured -:class:`~bson.binary.UuidRepresentation` can cause an application -to inadvertently change the :class:`~bson.binary.Binary` subtype, and in some -cases, the bytes of the :class:`~bson.binary.Binary` field itself when -round-tripping documents containing UUIDs. - -Consider the following situation:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.PYTHON_LEGACY stores a Binary subtype-3 UUID - python_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=python_opts) - collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)})['_id'] == 'foo' - - # Retrieving this document using UuidRepresentation.STANDARD returns a Binary instance - std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - std_collection = client.testdb.get_collection('test', codec_options=std_opts) - doc = std_collection.find_one({'_id': 'foo'}) - assert isinstance(doc['uuid'], Binary) - - # Round-tripping the retrieved document yields the exact same document - std_collection.replace_one({'_id': 'foo'}, doc) - round_tripped_doc = collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) - assert doc == round_tripped_doc - - -In this example, round-tripping the document using the incorrect -:class:`~bson.binary.UuidRepresentation` (``STANDARD`` instead of -``PYTHON_LEGACY``) changes the :class:`~bson.binary.Binary` subtype as a -side-effect. **Note that this can also happen when the situation is reversed - -i.e. when the original document is written using ``STANDARD`` representation -and then round-tripped using the ``PYTHON_LEGACY`` representation.** - -In the next example, we see the consequences of incorrectly using a -representation that modifies byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) -when round-tripping documents:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.STANDARD stores a Binary subtype-4 UUID - std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=std_opts) - collection.insert_one({'_id': 'baz', 'uuid': input_uuid}) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)})['_id'] == 'baz' - - # Retrieving this document using UuidRepresentation.JAVA_LEGACY returns a native UUID - # without modifying the UUID byte-order - java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - java_collection = client.testdb.get_collection('test', codec_options=java_opts) - doc = java_collection.find_one({'_id': 'baz'}) - assert doc['uuid'] == input_uuid - - # Round-tripping the retrieved document silently changes the Binary bytes and subtype - java_collection.replace_one({'_id': 'baz'}, doc) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) is None - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)}) is None - round_tripped_doc = collection.find_one({'_id': 'baz'}) - assert round_tripped_doc['uuid'] == Binary(input_uuid.bytes, 3).as_uuid(UuidRepresentation.JAVA_LEGACY) - - -In this case, using the incorrect :class:`~bson.binary.UuidRepresentation` -(``JAVA_LEGACY`` instead of ``STANDARD``) changes the -:class:`~bson.binary.Binary` bytes and subtype as a side-effect. -**Note that this happens when any representation that -manipulates byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) is incorrectly -used to round-trip UUIDs written with ``STANDARD``. When the situation is -reversed - i.e. when the original document is written using ``CSHARP_LEGACY`` -or ``JAVA_LEGACY`` and then round-tripped using ``STANDARD`` - -only the :class:`~bson.binary.Binary` subtype is changed.** - -.. note:: Starting in PyMongo 4.0, these issue will be resolved as - the ``STANDARD`` representation will decode Binary subtype 3 fields as - :class:`~bson.binary.Binary` objects of subtype 3 (instead of - :class:`uuid.UUID`), and each of the ``LEGACY_*`` representations will - decode Binary subtype 4 fields to :class:`~bson.binary.Binary` objects of - subtype 4 (instead of :class:`uuid.UUID`). - -.. _configuring-uuid-representation: - -Configuring a UUID Representation ---------------------------------- - -Users can workaround the problems described above by configuring their -applications with the appropriate :class:`~bson.binary.UuidRepresentation`. -Configuring the representation modifies PyMongo's behavior while -encoding :class:`uuid.UUID` objects to BSON and decoding -Binary subtype 3 and 4 fields from BSON. - -Applications can set the UUID representation in one of the following ways: - -#. At the ``MongoClient`` level using the ``uuidRepresentation`` URI option, - e.g.:: - - client = MongoClient("mongodb://a:27107/?uuidRepresentation=standard") - - Valid values are: - - .. list-table:: - :header-rows: 1 - - * - Value - - UUID Representation - - * - ``unspecified`` - - :ref:`unspecified-representation-details` - - * - ``standard`` - - :ref:`standard-representation-details` - - * - ``pythonLegacy`` - - :ref:`python-legacy-representation-details` - - * - ``javaLegacy`` - - :ref:`java-legacy-representation-details` - - * - ``csharpLegacy`` - - :ref:`csharp-legacy-representation-details` - -#. At the ``MongoClient`` level using the ``uuidRepresentation`` kwarg - option, e.g.:: - - from bson.binary import UuidRepresentation - client = MongoClient(uuidRepresentation=UuidRepresentation.STANDARD) - -#. At the ``Database`` or ``Collection`` level by supplying a suitable - :class:`~bson.codec_options.CodecOptions` instance, e.g.:: - - from bson.codec_options import CodecOptions - csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) - java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - - # Get database/collection from client with csharpLegacy UUID representation - csharp_database = client.get_database('csharp_db', codec_options=csharp_opts) - csharp_collection = client.testdb.get_collection('csharp_coll', codec_options=csharp_opts) - - # Get database/collection from existing database/collection with javaLegacy UUID representation - java_database = csharp_database.with_options(codec_options=java_opts) - java_collection = csharp_collection.with_options(codec_options=java_opts) - -Supported UUID Representations ------------------------------- - -.. list-table:: - :header-rows: 1 - - * - UUID Representation - - Default? - - Encode :class:`uuid.UUID` to - - Decode :class:`~bson.binary.Binary` subtype 4 to - - Decode :class:`~bson.binary.Binary` subtype 3 to - - * - :ref:`standard-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - - :class:`~bson.binary.Binary` subtype 3 - - * - :ref:`unspecified-representation-details` - - Yes, in PyMongo>=4 - - Raise :exc:`ValueError` - - :class:`~bson.binary.Binary` subtype 4 - - :class:`~bson.binary.Binary` subtype 3 - - * - :ref:`python-legacy-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 3 with standard byte-order - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - - * - :ref:`java-legacy-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 3 with Java legacy byte-order - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - - * - :ref:`csharp-legacy-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 3 with C# legacy byte-order - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - -We now detail the behavior and use-case for each supported UUID -representation. - -.. _unspecified-representation-details: - -``UNSPECIFIED`` -^^^^^^^^^^^^^^^ - -.. attention:: Starting in PyMongo 4.0, - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` is the default - UUID representation used by PyMongo. - -The :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` representation -prevents the incorrect interpretation of UUID bytes by stopping short of -automatically converting UUID fields in BSON to native UUID types. Decoding -a UUID when using this representation returns a :class:`~bson.binary.Binary` -object instead. If required, users can coerce the decoded -:class:`~bson.binary.Binary` objects into native UUIDs using the -:meth:`~bson.binary.Binary.as_uuid` method and specifying the appropriate -representation format. The following example shows -what this might look like for a UUID stored by the C# driver:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.CSHARP_LEGACY - csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) - - # Store a legacy C#-formatted UUID - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=csharp_opts) - collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) - - # Using UuidRepresentation.UNSPECIFIED - unspec_opts = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) - unspec_collection = client.testdb.get_collection('test', codec_options=unspec_opts) - - # UUID fields are decoded as Binary when UuidRepresentation.UNSPECIFIED is configured - document = unspec_collection.find_one({'_id': 'foo'}) - decoded_field = document['uuid'] - assert isinstance(decoded_field, Binary) - - # Binary.as_uuid() can be used to coerce the decoded value to a native UUID - decoded_uuid = decoded_field.as_uuid(UuidRepresentation.CSHARP_LEGACY) - assert decoded_uuid == input_uuid - -Native :class:`uuid.UUID` objects cannot directly be encoded to -:class:`~bson.binary.Binary` when the UUID representation is ``UNSPECIFIED`` -and attempting to do so will result in an exception:: - - unspec_collection.insert_one({'_id': 'bar', 'uuid': uuid4()}) - Traceback (most recent call last): - ... - ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information. - -Instead, applications using :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` -must explicitly coerce a native UUID using the -:meth:`~bson.binary.Binary.from_uuid` method:: - - explicit_binary = Binary.from_uuid(uuid4(), UuidRepresentation.STANDARD) - unspec_collection.insert_one({'_id': 'bar', 'uuid': explicit_binary}) - -.. _standard-representation-details: - -``STANDARD`` -^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used by new applications or - applications that are encoding and/or decoding UUIDs in MongoDB for the - first time. - -The :data:`~bson.binary.UuidRepresentation.STANDARD` representation -enables cross-language compatibility by ensuring the same byte-ordering -when encoding UUIDs from all drivers. UUIDs written by a driver with this -representation configured will be handled correctly by every other provided -it is also configured with the ``STANDARD`` representation. - -``STANDARD`` encodes native :class:`uuid.UUID` objects to -:class:`~bson.binary.Binary` subtype 4 objects. - -.. _python-legacy-representation-details: - -``PYTHON_LEGACY`` -^^^^^^^^^^^^^^^^^ - -.. attention:: This uuid representation should be used when reading UUIDs - generated by existing applications that use the Python driver - but **don't** explicitly set a UUID representation. - -.. attention:: :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` - was the default uuid representation in PyMongo 3. - -The :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` representation -corresponds to the legacy representation of UUIDs used by PyMongo. This -representation conforms with -`RFC 4122 Section 4.1.2 `_. - -The following example illustrates the use of this representation:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - - # No configured UUID representation - collection = client.python_legacy.get_collection('test', codec_options=DEFAULT_CODEC_OPTIONS) - - # Using UuidRepresentation.PYTHON_LEGACY - pylegacy_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) - pylegacy_collection = client.python_legacy.get_collection('test', codec_options=pylegacy_opts) - - # UUIDs written by PyMongo 3 with no UuidRepresentation configured - # (or PyMongo 4.0 with PYTHON_LEGACY) can be queried using PYTHON_LEGACY - uuid_1 = uuid4() - pylegacy_collection.insert_one({'uuid': uuid_1}) - document = pylegacy_collection.find_one({'uuid': uuid_1}) - -``PYTHON_LEGACY`` encodes native :class:`uuid.UUID` objects to -:class:`~bson.binary.Binary` subtype 3 objects, preserving the same -byte-order as :attr:`~uuid.UUID.bytes`:: - - from bson.binary import Binary - - document = collection.find_one({'uuid': Binary(uuid_2.bytes, subtype=3)}) - assert document['uuid'] == uuid_2 - -.. _java-legacy-representation-details: - -``JAVA_LEGACY`` -^^^^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used when reading UUIDs - written to MongoDB by the legacy applications (i.e. applications that don't - use the ``STANDARD`` representation) using the Java driver. - -The :data:`~bson.binary.UuidRepresentation.JAVA_LEGACY` representation -corresponds to the legacy representation of UUIDs used by the MongoDB Java -Driver. - -.. note:: The ``JAVA_LEGACY`` representation reverses the order of bytes 0-7, - and bytes 8-15. - -As an example, consider the same UUID described in :ref:`example-legacy-uuid`. -Let us assume that an application used the Java driver without an explicitly -specified UUID representation to insert the example UUID -``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this -value using ``PYTHON_LEGACY``, we end up with an entirely different UUID:: - - UUID('77665544-3322-1100-ffee-ddccbbaa9988') - -However, if we explicitly set the representation to -:data:`~bson.binary.UuidRepresentation.JAVA_LEGACY`, we get the correct result:: - - UUID('00112233-4455-6677-8899-aabbccddeeff') - -PyMongo uses the specified UUID representation to reorder the BSON bytes and -load them correctly. ``JAVA_LEGACY`` encodes native :class:`uuid.UUID` objects -to :class:`~bson.binary.Binary` subtype 3 objects, while performing the same -byte-reordering as the legacy Java driver's UUID to BSON encoder. - -.. _csharp-legacy-representation-details: - -``CSHARP_LEGACY`` -^^^^^^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used when reading UUIDs - written to MongoDB by the legacy applications (i.e. applications that don't - use the ``STANDARD`` representation) using the C# driver. - -The :data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation -corresponds to the legacy representation of UUIDs used by the MongoDB Java -Driver. - -.. note:: The ``CSHARP_LEGACY`` representation reverses the order of bytes 0-3, - bytes 4-5, and bytes 6-7. - -As an example, consider the same UUID described in :ref:`example-legacy-uuid`. -Let us assume that an application used the C# driver without an explicitly -specified UUID representation to insert the example UUID -``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this -value using PYTHON_LEGACY, we end up with an entirely different UUID:: - - UUID('33221100-5544-7766-8899-aabbccddeeff') - -However, if we explicitly set the representation to -:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY`, we get the correct result:: - - UUID('00112233-4455-6677-8899-aabbccddeeff') - -PyMongo uses the specified UUID representation to reorder the BSON bytes and -load them correctly. ``CSHARP_LEGACY`` encodes native :class:`uuid.UUID` -objects to :class:`~bson.binary.Binary` subtype 3 objects, while performing -the same byte-reordering as the legacy C# driver's UUID to BSON encoder. diff --git a/doc/faq.rst b/doc/faq.rst deleted file mode 100644 index cb67ea7fe5..0000000000 --- a/doc/faq.rst +++ /dev/null @@ -1,595 +0,0 @@ -Frequently Asked Questions -========================== - -Is PyMongo thread-safe? ------------------------ - -PyMongo is thread-safe and provides built-in connection pooling -for threaded applications. - -.. _pymongo-fork-safe: - -Is PyMongo fork-safe? ---------------------- - -PyMongo is not fork-safe. Care must be taken when using instances of -:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, -instances of MongoClient must not be copied from a parent process to -a child process. Instead, the parent process and each child process must -create their own instances of MongoClient. Instances of MongoClient copied from -the parent process have a high probability of deadlock in the child process due -to the inherent incompatibilities between ``fork()``, threads, and locks -described :ref:`below `. PyMongo will attempt to -issue a warning if there is a chance of this deadlock occurring. - -.. _pymongo-fork-safe-details: - -MongoClient spawns multiple threads to run background tasks such as monitoring -connected servers. These threads share state that is protected by instances of -:class:`~threading.Lock`, which are themselves `not fork-safe`_. The -driver is therefore subject to the same limitations as any other multithreaded -code that uses :class:`~threading.Lock` (and mutexes in general). One of these -limitations is that the locks become useless after ``fork()``. During the fork, -all locks are copied over to the child process in the same state as they were -in the parent: if they were locked, the copied locks are also locked. The child -created by ``fork()`` only has one thread, so any locks that were taken out by -other threads in the parent will never be released in the child. The next time -the child process attempts to acquire one of these locks, deadlock occurs. - -Starting in version 4.3, PyMongo utilizes :py:func:`os.register_at_fork` to -reset its locks and other shared state in the child process after a -:py:func:`os.fork` to reduce the frequency of deadlocks. However deadlocks -are still possible because libraries that PyMongo depends on, like `OpenSSL`_ -and `getaddrinfo(3)`_ (on some platforms), are not fork() safe in a -multithreaded application. Linux also imposes the restriction that: - - After a `fork()`_ in a multithreaded program, the child can - safely call only async-signal-safe functions (see - `signal-safety(7)`_) until such time as it calls `execve(2)`_. - -PyMongo relies on functions that are *not* `async-signal-safe`_ and hence the -child process can experience deadlocks or crashes when attempting to call -a non `async-signal-safe`_ function. For examples of deadlocks or crashes -that could occur see `PYTHON-3406`_. - -For a long but interesting read about the problems of Python locks in -multithreaded contexts with ``fork()``, see https://bugs.python.org/issue6721. - -.. _not fork-safe: https://bugs.python.org/issue6721 -.. _OpenSSL: https://github.com/openssl/openssl/issues/19066 -.. _fork(): https://man7.org/linux/man-pages/man2/fork.2.html -.. _signal-safety(7): https://man7.org/linux/man-pages/man7/signal-safety.7.html -.. _async-signal-safe: https://man7.org/linux/man-pages/man7/signal-safety.7.html -.. _execve(2): https://man7.org/linux/man-pages/man2/execve.2.html -.. _getaddrinfo(3): https://man7.org/linux/man-pages/man3/gai_strerror.3.html -.. _PYTHON-3406: https://jira.mongodb.org/browse/PYTHON-3406 - -.. _connection-pooling: - -Can PyMongo help me load the results of my query as a Pandas ``DataFrame``? ---------------------------------------------------------------------------- - -While PyMongo itself does not provide any APIs for working with -numerical or columnar data, -`PyMongoArrow `_ -is a companion library to PyMongo that makes it easy to load MongoDB query result sets as -`Pandas DataFrames `_, -`NumPy ndarrays `_, or -`Apache Arrow Tables `_. - -How does connection pooling work in PyMongo? --------------------------------------------- - -Every :class:`~pymongo.mongo_client.MongoClient` instance has a built-in -connection pool per server in your MongoDB topology. These pools open sockets -on demand to support the number of concurrent MongoDB operations that your -multi-threaded application requires. There is no thread-affinity for sockets. - -The size of each connection pool is capped at ``maxPoolSize``, which defaults -to 100. If there are ``maxPoolSize`` connections to a server and all are in -use, the next request to that server will wait until one of the connections -becomes available. - -The client instance opens two additional sockets per server in your MongoDB -topology for monitoring the server's state. - -For example, a client connected to a 3-node replica set opens 6 monitoring -sockets. It also opens as many sockets as needed to support a multi-threaded -application's concurrent operations on each server, up to ``maxPoolSize``. With -a ``maxPoolSize`` of 100, if the application only uses the primary (the -default), then only the primary connection pool grows and the total connections -is at most 106. If the application uses a -:class:`~pymongo.read_preferences.ReadPreference` to query the secondaries, -their pools also grow and the total connections can reach 306. - -Additionally, the pools are rate limited such that each connection pool can -only create at most 2 connections in parallel at any time. The connection -creation covers covers all the work required to setup a new connection -including DNS, TCP, SSL/TLS, MongoDB handshake, and MongoDB authentication. -For example, if three threads concurrently attempt to check out a connection -from an empty pool, the first two threads will begin creating new connections -while the third thread will wait. The third thread stops waiting when either: - -- one of the first two threads finishes creating a connection, or -- an existing connection is checked back into the pool. - -Rate limiting concurrent connection creation reduces the likelihood of -connection storms and improves the driver's ability to reuse existing -connections. - -It is possible to set the minimum number of concurrent connections to each -server with ``minPoolSize``, which defaults to 0. The connection pool will be -initialized with this number of sockets. If sockets are closed due to any -network errors, causing the total number of sockets (both in use and idle) to -drop below the minimum, more sockets are opened until the minimum is reached. - -The maximum number of milliseconds that a connection can remain idle in the -pool before being removed and replaced can be set with ``maxIdleTimeMS``, which -defaults to ``None`` (no limit). - -The default configuration for a :class:`~pymongo.mongo_client.MongoClient` -works for most applications:: - - client = MongoClient(host, port) - -Create this client **once** for each process, and reuse it for all -operations. It is a common mistake to create a new client for each request, -which is very inefficient. - -To support extremely high numbers of concurrent MongoDB operations within one -process, increase ``maxPoolSize``:: - - client = MongoClient(host, port, maxPoolSize=200) - -... or make it unbounded:: - - client = MongoClient(host, port, maxPoolSize=None) - -Once the pool reaches its maximum size, additional threads have to wait for -sockets to become available. PyMongo does not limit the number of threads -that can wait for sockets to become available and it is the application's -responsibility to limit the size of its thread pool to bound queuing during a -load spike. Threads are allowed to wait for any length of time unless -``waitQueueTimeoutMS`` is defined:: - - client = MongoClient(host, port, waitQueueTimeoutMS=100) - -A thread that waits more than 100ms (in this example) for a socket raises -:exc:`~pymongo.errors.ConnectionFailure`. Use this option if it is more -important to bound the duration of operations during a load spike than it is to -complete every operation. - -When :meth:`~pymongo.mongo_client.MongoClient.close` is called by any thread, -all idle sockets are closed, and all sockets that are in use will be closed as -they are returned to the pool. - -Does PyMongo support Python 3? ------------------------------- - -PyMongo supports CPython 3.9+ and PyPy3.10+. See the :doc:`python3` for details. - -Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? ---------------------------------------------------------------------------------------- -As of PyMongo v4.13, PyMongo fully supports asyncio and `Tornado `_. See `the official docs `_ for more details. - -PyMongo also fully supports :doc:`Gevent `. - -For `Twisted `_, see `TxMongo -`_. Its stated mission is to keep feature -parity with PyMongo. - -.. _writes-and-ids: - -Why does PyMongo add an _id field to all of my documents? ---------------------------------------------------------- - -When a document is inserted to MongoDB using -:meth:`~pymongo.collection.Collection.insert_one`, -:meth:`~pymongo.collection.Collection.insert_many`, or -:meth:`~pymongo.collection.Collection.bulk_write`, and that document does not -include an ``_id`` field, PyMongo automatically adds one for you, set to an -instance of :class:`~bson.objectid.ObjectId`. For example:: - - >>> my_doc = {'x': 1} - >>> collection.insert_one(my_doc) - InsertOneResult(ObjectId('560db337fba522189f171720'), acknowledged=True) - >>> my_doc - {'x': 1, '_id': ObjectId('560db337fba522189f171720')} - -Users often discover this behavior when calling -:meth:`~pymongo.collection.Collection.insert_many` with a list of references -to a single document raises :exc:`~pymongo.errors.BulkWriteError`. Several -Python idioms lead to this pitfall:: - - >>> doc = {} - >>> collection.insert_many(doc for _ in range(10)) - Traceback (most recent call last): - ... - pymongo.errors.BulkWriteError: batch op errors occurred - >>> doc - {'_id': ObjectId('560f171cfba52279f0b0da0c')} - - >>> docs = [{}] - >>> collection.insert_many(docs * 10) - Traceback (most recent call last): - ... - pymongo.errors.BulkWriteError: batch op errors occurred - >>> docs - [{'_id': ObjectId('560f1933fba52279f0b0da0e')}] - -PyMongo adds an ``_id`` field in this manner for a few reasons: - -- All MongoDB documents are required to have an ``_id`` field. -- If PyMongo were to insert a document without an ``_id`` MongoDB would add one - itself, but it would not report the value back to PyMongo. -- Copying the document to insert before adding the ``_id`` field would be - prohibitively expensive for most high write volume applications. - -If you don't want PyMongo to add an ``_id`` to your documents, insert only -documents that already have an ``_id`` field, added by your application. - -Key order in subdocuments -- why does my query work in the shell but not PyMongo? ---------------------------------------------------------------------------------- - -.. - Note: We should rework this section now that Python 3.6+ has ordered dict. - -.. testsetup:: key-order - - from bson.son import SON - from pymongo.mongo_client import MongoClient - - collection = MongoClient().test.collection - collection.drop() - collection.insert_one({"_id": 1.0, "subdocument": SON([("b", 1.0), ("a", 1.0)])}) - -The key-value pairs in a BSON document can have any order (except that ``_id`` -is always first). The mongo shell preserves key order when reading and writing -data. Observe that "b" comes before "a" when we create the document and when it -is displayed: - -.. code-block:: javascript - - > // mongo shell. - > db.collection.insertOne( { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } ) - WriteResult({ "nInserted" : 1 }) - > db.collection.findOne() - { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } - -PyMongo represents BSON documents as Python dicts by default, and the order -of keys in dicts is not defined. That is, a dict declared with the "a" key -first is the same, to Python, as one with "b" first: - - >>> print({'a': 1.0, 'b': 1.0}) - {'a': 1.0, 'b': 1.0} - >>> print({'b': 1.0, 'a': 1.0}) - {'a': 1.0, 'b': 1.0} - -Therefore, Python dicts are not guaranteed to show keys in the order they are -stored in BSON. Here, "a" is shown before "b": - - >>> print(collection.find_one()) - {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} - -To preserve order when reading BSON, use the :class:`~bson.son.SON` class, -which is a dict that remembers its key order. First, get a handle to the -collection, configured to use :class:`~bson.son.SON` instead of dict: - -.. doctest:: key-order - :options: +NORMALIZE_WHITESPACE - - >>> from bson import CodecOptions, SON - >>> opts = CodecOptions(document_class=SON) - >>> opts - CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None), datetime_conversion=DatetimeConversion.DATETIME) - >>> collection_son = collection.with_options(codec_options=opts) - -Now, documents and subdocuments in query results are represented with -:class:`~bson.son.SON` objects: - -.. doctest:: key-order - - >>> print(collection_son.find_one()) - SON([('_id', 1.0), ('subdocument', SON([('b', 1.0), ('a', 1.0)]))]) - -The subdocument's actual storage layout is now visible: "b" is before "a". - -Because a dict's key order is not defined, you cannot predict how it will be -serialized **to** BSON. But MongoDB considers subdocuments equal only if their -keys have the same order. So if you use a dict to query on a subdocument it may -not match: - - >>> collection.find_one({'subdocument': {'a': 1.0, 'b': 1.0}}) is None - True - -Swapping the key order in your query makes no difference: - - >>> collection.find_one({'subdocument': {'b': 1.0, 'a': 1.0}}) is None - True - -... because, as we saw above, Python considers the two dicts the same. - -There are two solutions. First, you can match the subdocument field-by-field: - - >>> collection.find_one({'subdocument.a': 1.0, - ... 'subdocument.b': 1.0}) - {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} - -The query matches any subdocument with an "a" of 1.0 and a "b" of 1.0, -regardless of the order you specify them in Python or the order they are stored -in BSON. Additionally, this query now matches subdocuments with additional -keys besides "a" and "b", whereas the previous query required an exact match. - -The second solution is to use a :class:`~bson.son.SON` to specify the key order: - - >>> query = {'subdocument': SON([('b', 1.0), ('a', 1.0)])} - >>> collection.find_one(query) - {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} - -The key order you use when you create a :class:`~bson.son.SON` is preserved -when it is serialized to BSON and used as a query. Thus you can create a -subdocument that exactly matches the subdocument in the collection. - -.. seealso:: `MongoDB Manual entry on subdocument matching - `_. - -What does *CursorNotFound* cursor id not valid at server mean? --------------------------------------------------------------- -Cursors in MongoDB can timeout on the server if they've been open for -a long time without any operations being performed on them. This can -lead to an :class:`~pymongo.errors.CursorNotFound` exception being -raised when attempting to iterate the cursor. - -How do I change the timeout value for cursors? ----------------------------------------------- -MongoDB doesn't support custom timeouts for cursors, but cursor -timeouts can be turned off entirely. Pass ``no_cursor_timeout=True`` to -:meth:`~pymongo.collection.Collection.find`. - -How can I store :mod:`decimal.Decimal` instances? -------------------------------------------------- - -PyMongo >= 3.4 supports the Decimal128 BSON type introduced in MongoDB 3.4. -See :mod:`~bson.decimal128` for more information. - -MongoDB <= 3.2 only supports IEEE 754 floating points - the same as the -Python float type. The only way PyMongo could store Decimal instances to -these versions of MongoDB would be to convert them to this standard, so -you'd really only be storing floats anyway - we force users to do this -conversion explicitly so that they are aware that it is happening. - -I'm saving ``9.99`` but when I query my document contains ``9.9900000000000002`` - what's going on here? --------------------------------------------------------------------------------------------------------- -The database representation is ``9.99`` as an IEEE floating point (which -is common to MongoDB and Python as well as most other modern -languages). The problem is that ``9.99`` cannot be represented exactly -with a double precision floating point - this is true in some versions of -Python as well: - - >>> 9.99 - 9.9900000000000002 - -The result that you get when you save ``9.99`` with PyMongo is exactly the -same as the result you'd get saving it with the JavaScript shell or -any of the other languages (and as the data you're working with when -you type ``9.99`` into a Python program). - -Can you add attribute style access for documents? -------------------------------------------------- -This request has come up a number of times but we've decided not to -implement anything like this. The relevant `jira case -`_ has some information -about the decision, but here is a brief summary: - -1. This will pollute the attribute namespace for documents, so could - lead to subtle bugs / confusing errors when using a key with the - same name as a dictionary method. - -2. The only reason we even use SON objects instead of regular - dictionaries is to maintain key ordering, since the server - requires this for certain operations. So we're hesitant to - needlessly complicate SON (at some point it's hypothetically - possible we might want to revert back to using dictionaries alone, - without breaking backwards compatibility for everyone). - -3. It's easy (and Pythonic) for new users to deal with documents, - since they behave just like dictionaries. If we start changing - their behavior it adds a barrier to entry for new users - another - class to learn. - -What is the correct way to handle time zones with PyMongo? ----------------------------------------------------------- - -See :doc:`examples/datetimes` for examples on how to handle -:class:`~datetime.datetime` objects correctly. - -How can I save a :mod:`datetime.date` instance? ------------------------------------------------ -PyMongo doesn't support saving :mod:`datetime.date` instances, since -there is no BSON type for dates without times. Rather than having the -driver enforce a convention for converting :mod:`datetime.date` -instances to :mod:`datetime.datetime` instances for you, any -conversion should be performed in your client code. - -.. _web-application-querying-by-objectid: - -When I query for a document by ObjectId in my web application I get no result ------------------------------------------------------------------------------ -It's common in web applications to encode documents' ObjectIds in URLs, like:: - - "/posts/50b3bda58a02fb9a84d8991e" - -Your web framework will pass the ObjectId portion of the URL to your request -handler as a string, so it must be converted to :class:`~bson.objectid.ObjectId` -before it is passed to :meth:`~pymongo.collection.Collection.find_one`. It is a -common mistake to forget to do this conversion. Here's how to do it correctly -in Flask_ (other web frameworks are similar):: - - from pymongo import MongoClient - from bson.objectid import ObjectId - - from flask import Flask, render_template - - client = MongoClient() - app = Flask(__name__) - - @app.route("/posts/<_id>") - def show_post(_id): - # NOTE!: converting _id from string to ObjectId before passing to find_one - post = client.db.posts.find_one({'_id': ObjectId(_id)}) - return render_template('post.html', post=post) - - if __name__ == "__main__": - app.run() - -.. _Flask: http://flask.pocoo.org/ - -.. seealso:: :ref:`querying-by-objectid` - -How can I use PyMongo from Django? ----------------------------------- -`Django `_ is a popular Python web -framework. Django includes an ORM, :mod:`django.db`. Currently, -there's no official MongoDB backend for Django. - -`django-mongodb-engine `_ -is an unofficial MongoDB backend that supports Django aggregations, (atomic) -updates, embedded objects, Map/Reduce and GridFS. It allows you to use most -of Django's built-in features, including the ORM, admin, authentication, site -and session frameworks and caching. - -However, it's easy to use MongoDB (and PyMongo) from Django -without using a Django backend. Certain features of Django that require -:mod:`django.db` (admin, authentication and sessions) will not work -using just MongoDB, but most of what Django provides can still be -used. - -One project which should make working with MongoDB and Django easier -is `mango `_. Mango is a set of -MongoDB backends for Django sessions and authentication (bypassing -:mod:`django.db` entirely). - -.. _using-with-mod-wsgi: - -Does PyMongo work with **mod_wsgi**? ------------------------------------- -Yes. See the configuration guide for :ref:`pymongo-and-mod_wsgi`. - -Does PyMongo work with PythonAnywhere? --------------------------------------- -No. PyMongo creates Python threads which -`PythonAnywhere `_ does not support. For more -information see `PYTHON-1495 `_. - -How can I use something like Python's ``json`` module to encode my documents to JSON? -------------------------------------------------------------------------------------- -:mod:`~bson.json_util` is PyMongo's built in, flexible tool for using -Python's :mod:`json` module with BSON documents and `MongoDB Extended JSON -`_. The -:mod:`json` module won't work out of the box with all documents from PyMongo -as PyMongo supports some special types (like :class:`~bson.objectid.ObjectId` -and :class:`~bson.dbref.DBRef`) that are not supported in JSON. - -`python-bsonjs `_ is a fast -BSON to MongoDB Extended JSON converter built on top of -`libbson `_. ``python-bsonjs`` does not -depend on PyMongo and can offer a nice performance improvement over -:mod:`~bson.json_util`. ``python-bsonjs`` works best with PyMongo when using -:class:`~bson.raw_bson.RawBSONDocument`. - -Why do I get OverflowError decoding dates stored by another language's driver? ------------------------------------------------------------------------------- -PyMongo decodes BSON datetime values to instances of Python's -:class:`datetime.datetime`. Instances of :class:`datetime.datetime` are -limited to years between :data:`datetime.MINYEAR` (usually 1) and -:data:`datetime.MAXYEAR` (usually 9999). Some MongoDB drivers (e.g. the PHP -driver) can store BSON datetimes with year values far outside those supported -by :class:`datetime.datetime`. - -There are a few ways to work around this issue. Starting with PyMongo 4.3, -:func:`bson.decode` can decode BSON datetimes in one of four ways, and can -be specified using the ``datetime_conversion`` parameter of -:class:`~bson.codec_options.CodecOptions`. - -The default option is -:attr:`~bson.codec_options.DatetimeConversion.DATETIME`, which will -attempt to decode as a :class:`datetime.datetime`, allowing -:class:`~builtin.OverflowError` to occur upon out-of-range dates. -:attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` alters -this behavior to instead return :class:`~bson.datetime_ms.DatetimeMS` when -representations are out-of-range, while returning :class:`~datetime.datetime` -objects as before: - -.. doctest:: - - >>> from datetime import datetime - >>> from bson.datetime_ms import DatetimeMS - >>> from bson.codec_options import DatetimeConversion - >>> from pymongo import MongoClient - >>> client = MongoClient(datetime_conversion=DatetimeConversion.DATETIME_AUTO) - >>> client.db.collection.insert_one({"x": datetime(1970, 1, 1)}) - InsertOneResult(ObjectId('...'), acknowledged=True) - >>> client.db.collection.insert_one({"x": DatetimeMS(2**62)}) - InsertOneResult(ObjectId('...'), acknowledged=True) - >>> for x in client.db.collection.find(): - ... print(x) - ... - {'_id': ObjectId('...'), 'x': datetime.datetime(1970, 1, 1, 0, 0)} - {'_id': ObjectId('...'), 'x': DatetimeMS(4611686018427387904)} - -For other options, please refer to -:class:`~bson.codec_options.DatetimeConversion`. - -Another option that does not involve setting ``datetime_conversion`` is to to -filter out documents values outside of the range supported by -:class:`~datetime.datetime`: - - >>> from datetime import datetime - >>> coll = client.test.dates - >>> cur = coll.find({'dt': {'$gte': datetime.min, '$lte': datetime.max}}) - -Another option, assuming you don't need the datetime field, is to filter out -just that field:: - - >>> cur = coll.find({}, projection={'dt': False}) - -.. _multiprocessing: - -Using PyMongo with Multiprocessing ----------------------------------- - -On Unix systems the multiprocessing module spawns processes using ``fork()``. -Care must be taken when using instances of -:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, -instances of MongoClient must not be copied from a parent process to a child -process. Instead, the parent process and each child process must create their -own instances of MongoClient. For example:: - - # Each process creates its own instance of MongoClient. - def func(): - db = pymongo.MongoClient().mydb - # Do something with db. - - proc = multiprocessing.Process(target=func) - proc.start() - -**Never do this**:: - - client = pymongo.MongoClient() - - # Each child process attempts to copy a global MongoClient - # created in the parent process. Never do this. - def func(): - db = client.mydb - # Do something with db. - - proc = multiprocessing.Process(target=func) - proc.start() - -Instances of MongoClient copied from the parent process have a high probability -of deadlock in the child process due to -:ref:`inherent incompatibilities between fork(), threads, and locks -`. PyMongo will attempt to issue a warning if there -is a chance of this deadlock occurring. - -.. seealso:: :ref:`pymongo-fork-safe` diff --git a/doc/index.rst b/doc/index.rst index c7616ca795..85812d1b14 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -3,70 +3,21 @@ PyMongo |release| Documentation .. note:: The PyMongo documentation has been migrated to the `MongoDB Documentation site `_. - As of PyMongo 4.10, the ReadTheDocs site will contain the detailed changelog and API docs, while the - rest of the documentation will only appear on the MongoDB Documentation site. + This site contains only the detailed changelog and API docs, while the + rest of the documentation appears on the MongoDB Documentation site. Overview -------- **PyMongo** is a Python distribution containing tools for working with `MongoDB `_, and is the recommended way to -work with MongoDB from Python. This documentation attempts to explain -everything you need to know to use **PyMongo**. - -.. todo:: a list of PyMongo's features - -:doc:`installation` - Instructions on how to get the distribution. - -:doc:`tutorial` - Start here for a quick overview. - -:doc:`async-tutorial` - Start here for a quick overview of the asynchronous API. - -:doc:`examples/index` - Examples of how to perform specific tasks. - -:doc:`atlas` - Using PyMongo with MongoDB Atlas. - -:doc:`examples/tls` - Using PyMongo with TLS / SSL. - -:doc:`examples/encryption` - Using PyMongo with In-Use Encryption. - -:doc:`examples/type_hints` - Using PyMongo with type hints. - -:doc:`examples/logging` - Using PyMongo's logging capabilities. - -:doc:`faq` - Some questions that come up often. - -:doc:`migrate-to-pymongo4` - A PyMongo 3.x to 4.x migration guide. - -:doc:`python3` - Frequently asked questions about python 3 support. - -:doc:`compatibility-policy` - Explanation of deprecations, and how to keep pace with changes in PyMongo's - API. +work with MongoDB from Python. :doc:`api/index` The complete API documentation, organized by module. -:doc:`tools` - A listing of Python tools and libraries that have been written for - MongoDB. +:doc:`changelog` + A full list of changes to PyMongo. -:doc:`developer/index` - Developer guide for contributors to PyMongo. - -:doc:`common-issues` - Common issues encountered when using PyMongo. Getting Help ------------ @@ -97,10 +48,6 @@ minor tweaks to this documentation. To contribute, fork the project on `GitHub `_ and send a pull request. -Changes -------- -See the :doc:`changelog` for a full list of changes to PyMongo. - About This Documentation ------------------------ This documentation is generated using the `Sphinx @@ -119,18 +66,6 @@ Indices and tables .. toctree:: :hidden: - atlas - installation - tutorial - async-tutorial - examples/index - faq - compatibility-policy api/index - tools - contributors changelog - python3 - migrate-to-pymongo4 - developer/index - common-issues + contributors diff --git a/doc/installation.rst b/doc/installation.rst deleted file mode 100644 index 837cbf4d97..0000000000 --- a/doc/installation.rst +++ /dev/null @@ -1,197 +0,0 @@ -Installing / Upgrading -====================== -.. highlight:: bash - -**PyMongo** is in the `Python Package Index -`_. - -.. warning:: **Do not install the "bson" package from pypi.** PyMongo comes - with its own bson package; doing "pip install bson" - installs a third-party package that is incompatible with PyMongo. - -Installing with pip -------------------- - -We recommend using `pip `_ -to install pymongo on all platforms:: - - $ python3 -m pip install pymongo - -To get a specific version of pymongo:: - - $ python3 -m pip install pymongo==3.5.1 - -To upgrade using pip:: - - $ python3 -m pip install --upgrade pymongo - -Dependencies ------------- - -PyMongo supports CPython 3.9+ and PyPy3.10+. - -Required dependencies -..................... - -Support for mongodb+srv:// URIs requires `dnspython -`_ - -.. _optional-deps: - -Optional dependencies -..................... - -GSSAPI authentication requires `pykerberos -`_ on Unix or `WinKerberos -`_ on Windows. The correct -dependency can be installed automatically along with PyMongo:: - - $ python3 -m pip install "pymongo[gssapi]" - -:ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws -`_:: - - $ python3 -m pip install "pymongo[aws]" - - - -:ref:`OCSP` requires `PyOpenSSL -`_, `requests -`_ and `service_identity -`_:: - - $ python3 -m pip install "pymongo[ocsp]" - -Wire protocol compression with snappy requires `python-snappy -`_:: - - $ python3 -m pip install "pymongo[snappy]" - -Wire protocol compression with zstandard requires `zstandard -`_:: - - $ python3 -m pip install "pymongo[zstd]" - -:ref:`Client-Side Field Level Encryption` requires `pymongocrypt -`_ and -`pymongo-auth-aws `_:: - - $ python3 -m pip install "pymongo[encryption]" - -You can install all dependencies automatically with the following -command:: - - $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" - -Installing from source ----------------------- - -If you'd rather install directly from the source (i.e. to stay on the -bleeding edge), install the C extension dependencies then check out the -latest source from GitHub and install the driver from the resulting tree:: - - $ git clone https://github.com/mongodb/mongo-python-driver.git pymongo - $ cd pymongo/ - $ pip install . - -Installing from source on Unix -.............................. - -To build the optional C extensions on Linux or another non-macOS Unix you must -have the GNU C compiler (gcc) installed. Depending on your flavor of Unix -(or Linux distribution) you may also need a python development package that -provides the necessary header files for your version of Python. The package -name may vary from distro to distro. - -Debian and Ubuntu users should issue the following command:: - - $ sudo apt-get install build-essential python-dev - -Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, Oracle Linux, -Fedora, etc.) should issue the following command:: - - $ sudo yum install gcc python-devel - -Installing from source on macOS / OSX -..................................... - -If you want to install PyMongo with C extensions from source you will need -the command line developer tools. On modern versions of macOS they can be -installed by running the following in Terminal (found in -/Applications/Utilities/):: - - xcode-select --install - -For older versions of OSX you may need Xcode. See the notes below for various -OSX and Xcode versions. - -**Snow Leopard (10.6)** - Xcode 3 with 'UNIX Development Support'. - -**Snow Leopard Xcode 4**: The Python versions shipped with OSX 10.6.x -are universal binaries. They support i386, PPC, and x86_64. Xcode 4 removed -support for PPC, causing the distutils version shipped with Apple's builds of -Python to fail to build the C extensions if you have Xcode 4 installed. There -is a workaround:: - - # For some Python builds from python.org - $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m pip install pymongo - -See `https://bugs.python.org/issue11623 `_ -for a more detailed explanation. - -**Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.9+ downloaded from python.org. In all cases Xcode must be -installed with 'UNIX Development Support'. - -**Xcode 5.1**: Starting with version 5.1 the version of clang that ships with -Xcode throws an error when it encounters compiler flags it doesn't recognize. -This may cause C extension builds to fail with an error similar to:: - - clang: error: unknown argument: '-mno-fused-madd' [-Wunused-command-line-argument-hard-error-in-future] - -There are workarounds:: - - # Apple specified workaround for Xcode 5.1 - $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install pymongo - - # Alternative workaround using CFLAGS - $ CFLAGS=-Qunused-arguments pip install pymongo - - -Installing from source on Windows -................................. - -If you want to install PyMongo with C extensions from source the following -requirements apply to both CPython and ActiveState's ActivePython: - -Windows -~~~~~~~ - -Install Visual Studio 2015+. - -.. _install-no-c: - -Installing Without C Extensions -------------------------------- - -By default, the driver attempts to build and install optional C -extensions (used for increasing performance) when it is installed. If -any extension fails to build the driver will be installed anyway but a -warning will be printed. - -If you wish to install PyMongo without the C extensions, even if the -extensions build properly, it can be done using a command line option to -*pip install*:: - - $ NO_EXT=1 python -m pip install . - -Installing a beta or release candidate --------------------------------------- - -MongoDB, Inc. may occasionally tag a beta or release candidate for testing by -the community before final release. These releases will not be uploaded to pypi -but can be found on the -`GitHub tags page `_. -They can be installed by passing the full URL for the tag to pip:: - - $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/4.4.0b0.tar.gz diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 68dc1980b9..fda3e2e129 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -1,3 +1,5 @@ +:orphan: + .. _pymongo4-migration-guide: PyMongo 4 Migration Guide @@ -34,7 +36,7 @@ Python 3.6+ PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to upgrade to 4.x must first upgrade to Python 3.6.2+. Users upgrading from -Python 2 should consult the :doc:`python3`. +Python 2 should consult `Python 3 `_. Enable Deprecation Warnings --------------------------- @@ -796,8 +798,7 @@ incoming documents after receiving them from PyMongo. Alternatively, if your application uses the ``SONManipulator`` API to convert custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and :class:`~bson.codec_options.TypeRegistry` APIs may be a suitable alternative. -For more information, see the -:doc:`custom type example `. +For more information, see `Custom Types `_. ``SON().items()`` now returns ``dict_items`` object. ---------------------------------------------------- @@ -982,7 +983,7 @@ you will receive an error like this when attempting to encode a :class:`uuid.UUI ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted... -See :ref:`handling-uuid-data-example` for details. +See `Handling UUIDs `_ for details. Additional BSON classes implement ``__slots__`` ............................................... diff --git a/doc/python3.rst b/doc/python3.rst deleted file mode 100644 index 0a63f968a5..0000000000 --- a/doc/python3.rst +++ /dev/null @@ -1,114 +0,0 @@ -Python 3 FAQ -============ - -What Python 3 versions are supported? -------------------------------------- - -PyMongo supports CPython 3.9+ and PyPy3.10+. - -Are there any PyMongo behavior changes with Python 3? ------------------------------------------------------ - -Only one intentional change. Instances of :class:`bytes` -are encoded as BSON type 5 (Binary data) with subtype 0. -In Python 3 they are decoded back to :class:`bytes`. In -Python 2 they are decoded to :class:`~bson.binary.Binary` -with subtype 0. - -For example, let's insert a :class:`bytes` instance using Python 3 then -read it back. Notice the byte string is decoded back to :class:`bytes`:: - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> c = pymongo.MongoClient() - >>> c.test.bintest.insert_one({'binary': b'this is a byte string'}).inserted_id - ObjectId('4f9086b1fba5222021000000') - >>> c.test.bintest.find_one() - {'binary': b'this is a byte string', '_id': ObjectId('4f9086b1fba5222021000000')} - -Now retrieve the same document in Python 2. Notice the byte string is decoded -to :class:`~bson.binary.Binary`:: - - Python 2.7.6 (default, Feb 26 2014, 10:36:22) - [GCC 4.7.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> c = pymongo.MongoClient() - >>> c.test.bintest.find_one() - {u'binary': Binary('this is a byte string', 0), u'_id': ObjectId('4f9086b1fba5222021000000')} - - -There is a similar change in behavior in parsing JSON binary with subtype 0. -In Python 3 they are decoded into :class:`bytes`. In Python 2 they are -decoded to :class:`~bson.binary.Binary` with subtype 0. - -For example, let's decode a JSON binary subtype 0 using Python 3. Notice the -byte string is decoded to :class:`bytes`:: - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> from bson.json_util import loads - >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') - {'b': b'this is a byte string'} - -Now decode the same JSON in Python 2 . Notice the byte string is decoded -to :class:`~bson.binary.Binary`:: - - Python 2.7.10 (default, Feb 7 2017, 00:08:15) - [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> from bson.json_util import loads - >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') - {u'b': Binary('this is a byte string', 0)} - -Why can't I share pickled ObjectIds between some versions of Python 2 and 3? ----------------------------------------------------------------------------- - -Instances of :class:`~bson.objectid.ObjectId` pickled using Python 2 -can always be unpickled using Python 3. - -If you pickled an ObjectId using Python 2 and want to unpickle it using -Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: - - Python 2.7.6 (default, Feb 26 2014, 10:36:22) - [GCC 4.7.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> oid - ObjectId('4f919ba2fba5225b84000000') - >>> pickle.dumps(oid) - 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') - ObjectId('4f919ba2fba5225b84000000') - - -If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 -you must use ``protocol <= 2``:: - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> oid - ObjectId('4f96f20c430ee6bd06000000') - >>> pickle.dumps(oid, protocol=2) - b'\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c_codecs\nencode\...' - - Python 2.7.15 (default, Jun 21 2018, 15:00:48) - [GCC 7.3.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads('\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c_codecs\nencode\...') - ObjectId('4f96f20c430ee6bd06000000') diff --git a/doc/tools.rst b/doc/tools.rst deleted file mode 100644 index 5a9297ad64..0000000000 --- a/doc/tools.rst +++ /dev/null @@ -1,173 +0,0 @@ -Tools -===== -Many tools have been written for working with **PyMongo**. If you know -of or have created a tool for working with MongoDB from Python please -list it here. - -.. note:: We try to keep this list current. As such, projects that - have not been updated recently or appear to be unmaintained will - occasionally be removed from the list or moved to the back (to keep - the list from becoming too intimidating). - - If a project gets removed that is still being developed or is in active use - please let us know or add it back. - -ORM-like Layers ---------------- -Some people have found that they prefer to work with a layer that -has more features than PyMongo provides. Often, things like models and -validation are desired. To that end, several different ORM-like layers -have been written by various authors. - -It is our recommendation that new users begin by working directly with -PyMongo, as described in the rest of this documentation. Many people -have found that the features of PyMongo are enough for their -needs. Even if you eventually come to the decision to use one of these -layers, the time spent working directly with the driver will have -increased your understanding of how MongoDB actually works. - -MongoEngine - `MongoEngine `_ is another ORM-like - layer on top of PyMongo. It allows you to define schemas for - documents and query collections using syntax inspired by the Django - ORM. The code is available on `GitHub - `_; for more information, see - the `tutorial `_. - -MincePy - `MincePy `_ is an - object-document mapper (ODM) designed to make any Python object storable - and queryable in a MongoDB database. It is designed with machine learning - and big-data computational and experimental science applications in mind - but is entirely general and can be useful to anyone looking to organise, - share, or process large amounts data with as little change to their current - workflow as possible. - -Ming - `Ming `_ is a - library that allows you to enforce schemas on a MongoDB database in - your Python application. It was developed by `SourceForge - `_ in the course of their migration to - MongoDB. - -MotorEngine - `MotorEngine `_ is a port of - MongoEngine to Motor, for asynchronous access with Tornado. - It implements the same modeling APIs to be data-portable, meaning that a - model defined in MongoEngine can be read in MotorEngine. The source is - `available on GitHub `_. - -uMongo - `uMongo `_ is a Python MongoDB ODM. - Its inception comes from two needs: the lack of async ODM and the - difficulty to do document (un)serialization with existing ODMs. - Works with multiple drivers: PyMongo, TxMongo, motor_asyncio, and - mongomock. The source `is available on GitHub - `_ - -Django MongoDB Backend - `Django MongoDB Backend `_ is a - database backend library specifically made for Django. The integration takes - advantage of MongoDB's unique document model capabilities, which align - naturally with Django's philosophy of simplified data modeling and - reduced development complexity. The source is available - `on GitHub `_. - -No longer maintained -"""""""""""""""""""" - -PyMODM - `PyMODM `_ is an ORM-like framework on top - of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick - to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it - provides simple, extensible functionality that can be leveraged by other - libraries to target platforms like Django. At the same time, PyMODM is - powerful enough to be used for developing applications on its own. Complete - documentation is available on `readthedocs - `_. - -MongoKit - The `MongoKit `_ framework - is an ORM-like layer on top of PyMongo. There is also a MongoKit - `google group `_. - -Minimongo - `minimongo `_ is a lightweight, - pythonic interface to MongoDB. It retains pymongo's query and update API, - and provides a number of additional features, including a simple - document-oriented interface, connection pooling, index management, and - collection & database naming helpers. The `source is on GitHub - `_. - -Manga - `Manga `_ aims to be a simpler ORM-like - layer on top of PyMongo. The syntax for defining schema is inspired by the - Django ORM, but Pymongo's query language is maintained. The source `is on - GitHub `_. - -Humongolus - `Humongolus `_ is a lightweight ORM - framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the - concept of a miniature though fully formed human body). Humongolus allows - you to create models/schemas with robust validation. It attempts to be as - pythonic as possible and exposes the pymongo cursor objects whenever - possible. The code is available for download - `at GitHub `_. Tutorials and usage - examples are also available at GitHub. - -Framework Tools ---------------- -This section lists tools and adapters that have been designed to work with -various Python frameworks and libraries. - -* `Djongo `_ is a connector for using - Django with MongoDB as the database backend. Use the Django Admin GUI to add and - modify documents in MongoDB. - The `Djongo Source Code `_ is hosted on GitHub - and the `Djongo package `_ is on pypi. -* `Django MongoDB Engine - `_ is a MongoDB - database backend for Django that completely integrates with its ORM. - For more information `see the tutorial - `_. -* `mango `_ provides MongoDB backends for - Django sessions and authentication (bypassing :mod:`django.db` entirely). -* `Django MongoEngine - `_ is a MongoDB backend for - Django, an `example: - `_. - For more information see ``_ -* `mongodb_beaker `_ is a - project to enable using MongoDB as a backend for `beakers `_ caching / session system. - `The source is on GitHub `_. -* `Log4Mongo `_ is a flexible - Python logging handler that can store logs in MongoDB using normal and capped - collections. -* `MongoLog `_ is a Python logging - handler that stores logs in MongoDB using a capped collection. -* `rod.recipe.mongodb `_ is a - ZC Buildout recipe for downloading and installing MongoDB. -* `mongobox `_ is a tool to run a sandboxed - MongoDB instance from within a python app. -* `Flask-MongoAlchemy `_ Add - Flask support for MongoDB using MongoAlchemy. -* `Flask-MongoKit `_ Flask extension - to better integrate MongoKit into Flask. -* `Flask-PyMongo `_ Flask-PyMongo - bridges Flask and PyMongo. - -Alternative Drivers -------------------- -These are alternatives to PyMongo. - -* `Motor `_ is a full-featured, non-blocking - MongoDB driver for Python Tornado applications. - As of PyMongo v4.13, Motor's features have been merged into PyMongo via the new AsyncMongoClient API. - As a result of this merger, Motor will be officially deprecated on May 14th, 2026. - For more information, see `the official PyMongo docs `_. -* `TxMongo `_ is an asynchronous Twisted - Python driver for MongoDB. -* `MongoMock `_ is a small - library to help testing Python code that interacts with MongoDB via - Pymongo. diff --git a/doc/tutorial.rst b/doc/tutorial.rst deleted file mode 100644 index 46bde3035d..0000000000 --- a/doc/tutorial.rst +++ /dev/null @@ -1,413 +0,0 @@ -Tutorial -======== - -.. testsetup:: - - from pymongo import MongoClient - - client = MongoClient() - client.drop_database("test-database") - -This tutorial is intended as an introduction to working with -**MongoDB** and **PyMongo**. - -Prerequisites -------------- -Before we start, make sure that you have the **PyMongo** distribution -:doc:`installed `. In the Python shell, the following -should run without raising an exception: - -.. doctest:: - - >>> import pymongo - -This tutorial also assumes that a MongoDB instance is running on the -default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you -can start it like so: - -.. code-block:: bash - - $ mongod - -Making a Connection with MongoClient ------------------------------------- -The first step when working with **PyMongo** is to create a -:class:`~pymongo.mongo_client.MongoClient` to the running **mongod** -instance. Doing so is easy: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client = MongoClient() - -The above code will connect on the default host and port. We can also -specify the host and port explicitly, as follows: - -.. doctest:: - - >>> client = MongoClient("localhost", 27017) - -Or use the MongoDB URI format: - -.. doctest:: - - >>> client = MongoClient("mongodb://localhost:27017/") - -Getting a Database ------------------- -A single instance of MongoDB can support multiple independent -`databases `_. When -working with PyMongo you access databases using attribute style access -on :class:`~pymongo.mongo_client.MongoClient` instances: - -.. doctest:: - - >>> db = client.test_database - -If your database name is such that using attribute style access won't -work (like ``test-database``), you can use dictionary style access -instead: - -.. doctest:: - - >>> db = client["test-database"] - -Getting a Collection --------------------- -A `collection `_ is a -group of documents stored in MongoDB, and can be thought of as roughly -the equivalent of a table in a relational database. Getting a -collection in PyMongo works the same as getting a database: - -.. doctest:: - - >>> collection = db.test_collection - -or (using dictionary style access): - -.. doctest:: - - >>> collection = db["test-collection"] - -An important note about collections (and databases) in MongoDB is that -they are created lazily - none of the above commands have actually -performed any operations on the MongoDB server. Collections and -databases are created when the first document is inserted into them. - -Documents ---------- -Data in MongoDB is represented (and stored) using JSON-style -documents. In PyMongo we use dictionaries to represent documents. As -an example, the following dictionary might be used to represent a blog -post: - -.. doctest:: - - >>> import datetime - >>> post = { - ... "author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.now(tz=datetime.timezone.utc), - ... } - -Note that documents can contain native Python types (like -:class:`datetime.datetime` instances) which will be automatically -converted to and from the appropriate `BSON -`_ types. - -.. todo:: link to table of Python <-> BSON types - -Inserting a Document --------------------- -To insert a document into a collection we can use the -:meth:`~pymongo.collection.Collection.insert_one` method: - -.. doctest:: - - >>> posts = db.posts - >>> post_id = posts.insert_one(post).inserted_id - >>> post_id - ObjectId('...') - -When a document is inserted a special key, ``"_id"``, is automatically -added if the document doesn't already contain an ``"_id"`` key. The value -of ``"_id"`` must be unique across the -collection. :meth:`~pymongo.collection.Collection.insert_one` returns an -instance of :class:`~pymongo.results.InsertOneResult`. For more information -on ``"_id"``, see the `documentation on _id -`_. - -After inserting the first document, the *posts* collection has -actually been created on the server. We can verify this by listing all -of the collections in our database: - -.. doctest:: - - >>> db.list_collection_names() - ['posts'] - -Getting a Single Document With :meth:`~pymongo.collection.Collection.find_one` ------------------------------------------------------------------------------- -The most basic type of query that can be performed in MongoDB is -:meth:`~pymongo.collection.Collection.find_one`. This method returns a -single document matching a query (or ``None`` if there are no -matches). It is useful when you know there is only one matching -document, or are only interested in the first match. Here we use -:meth:`~pymongo.collection.Collection.find_one` to get the first -document from the posts collection: - -.. doctest:: - - >>> import pprint - >>> pprint.pprint(posts.find_one()) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -The result is a dictionary matching the one that we inserted previously. - -.. note:: The returned document contains an ``"_id"``, which was - automatically added on insert. - -:meth:`~pymongo.collection.Collection.find_one` also supports querying -on specific elements that the resulting document must match. To limit -our results to a document with author "Mike" we do: - -.. doctest:: - - >>> pprint.pprint(posts.find_one({"author": "Mike"})) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -If we try with a different author, like "Eliot", we'll get no result: - -.. doctest:: - - >>> posts.find_one({"author": "Eliot"}) - >>> - -.. _querying-by-objectid: - -Querying By ObjectId --------------------- -We can also find a post by its ``_id``, which in our example is an ObjectId: - -.. doctest:: - - >>> post_id - ObjectId(...) - >>> pprint.pprint(posts.find_one({"_id": post_id})) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -Note that an ObjectId is not the same as its string representation: - -.. doctest:: - - >>> post_id_as_str = str(post_id) - >>> posts.find_one({"_id": post_id_as_str}) # No result - >>> - -A common task in web applications is to get an ObjectId from the -request URL and find the matching document. It's necessary in this -case to **convert the ObjectId from a string** before passing it to -``find_one``:: - - from bson.objectid import ObjectId - - # The web framework gets post_id from the URL and passes it as a string - def get(post_id): - # Convert from string to ObjectId: - document = client.db.collection.find_one({'_id': ObjectId(post_id)}) - -.. seealso:: :ref:`web-application-querying-by-objectid` - -Bulk Inserts ------------- -In order to make querying a little more interesting, let's insert a -few more documents. In addition to inserting a single document, we can -also perform *bulk insert* operations, by passing a list as the -first argument to :meth:`~pymongo.collection.Collection.insert_many`. -This will insert each document in the list, sending only a single -command to the server: - -.. doctest:: - - >>> new_posts = [ - ... { - ... "author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14), - ... }, - ... { - ... "author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45), - ... }, - ... ] - >>> result = posts.insert_many(new_posts) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...')] - -There are a couple of interesting things to note about this example: - - - The result from :meth:`~pymongo.collection.Collection.insert_many` now - returns two :class:`~bson.objectid.ObjectId` instances, one for - each inserted document. - - ``new_posts[1]`` has a different "shape" than the other posts - - there is no ``"tags"`` field and we've added a new field, - ``"title"``. This is what we mean when we say that MongoDB is - *schema-free*. - -Querying for More Than One Document ------------------------------------ -To get more than a single document as the result of a query we use the -:meth:`~pymongo.collection.Collection.find` -method. :meth:`~pymongo.collection.Collection.find` returns a -:class:`~pymongo.cursor.Cursor` instance, which allows us to iterate -over all matching documents. For example, we can iterate over every -document in the ``posts`` collection: - -.. doctest:: - - >>> for post in posts.find(): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - {'_id': ObjectId('...'), - 'author': 'Eliot', - 'date': datetime.datetime(...), - 'text': 'and pretty easy too!', - 'title': 'MongoDB is fun'} - -Just like we did with :meth:`~pymongo.collection.Collection.find_one`, -we can pass a document to :meth:`~pymongo.collection.Collection.find` -to limit the returned results. Here, we get only those documents whose -author is "Mike": - -.. doctest:: - - >>> for post in posts.find({"author": "Mike"}): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - -Counting --------- -If we just want to know how many documents match a query we can -perform a :meth:`~pymongo.collection.Collection.count_documents` operation -instead of a full query. We can get a count of all of the documents -in a collection: - -.. doctest:: - - >>> posts.count_documents({}) - 3 - -or just of those documents that match a specific query: - -.. doctest:: - - >>> posts.count_documents({"author": "Mike"}) - 2 - -Range Queries -------------- -MongoDB supports many different types of `advanced queries -`_. As an -example, lets perform a query where we limit results to posts older -than a certain date, but also sort the results by author: - -.. doctest:: - - >>> d = datetime.datetime(2009, 11, 12, 12) - >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Eliot', - 'date': datetime.datetime(...), - 'text': 'and pretty easy too!', - 'title': 'MongoDB is fun'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - -Here we use the special ``"$lt"`` operator to do a range query, and -also call :meth:`~pymongo.cursor.Cursor.sort` to sort the results -by author. - -Indexing --------- - -Adding indexes can help accelerate certain queries and can also add additional -functionality to querying and storing documents. In this example, we'll -demonstrate how to create a `unique index -`_ on a key that rejects -documents whose value for that key already exists in the index. - -First, we'll need to create the index: - -.. doctest:: - - >>> result = db.profiles.create_index([("user_id", pymongo.ASCENDING)], unique=True) - >>> sorted(list(db.profiles.index_information())) - ['_id_', 'user_id_1'] - -Notice that we have two indexes now: one is the index on ``_id`` that MongoDB -creates automatically, and the other is the index on ``user_id`` we just -created. - -Now let's set up some user profiles: - -.. doctest:: - - >>> user_profiles = [{"user_id": 211, "name": "Luke"}, {"user_id": 212, "name": "Ziltoid"}] - >>> result = db.profiles.insert_many(user_profiles) - -The index prevents us from inserting a document whose ``user_id`` is already in -the collection: - -.. doctest:: - :options: +IGNORE_EXCEPTION_DETAIL - - >>> new_profile = {"user_id": 213, "name": "Drew"} - >>> duplicate_profile = {"user_id": 212, "name": "Tommy"} - >>> result = db.profiles.insert_one(new_profile) # This is fine. - >>> result = db.profiles.insert_one(duplicate_profile) - Traceback (most recent call last): - DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } - -.. seealso:: The MongoDB documentation on `indexes `_ diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 95eabef242..ac540d94db 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -165,7 +165,7 @@ def timeout(seconds: Optional[float]) -> ContextManager[None]: :raises: :py:class:`ValueError`: When `seconds` is negative. - See :ref:`timeout-example` for more examples. + See `Limit Server Execution Time `_ for more examples. .. versionadded:: 4.2 """ diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 313c8c7c04..e07c8615dd 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -762,7 +762,7 @@ async def bulk_write( :return: An instance of :class:`~pymongo.results.BulkWriteResult`. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: `bypass_document_validation` requires server version **>= 3.2** @@ -867,7 +867,7 @@ async def insert_one( :return: - An instance of :class:`~pymongo.results.InsertOneResult`. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: `bypass_document_validation` requires server version **>= 3.2** @@ -936,7 +936,7 @@ async def insert_many( :return: An instance of :class:`~pymongo.results.InsertManyResult`. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: `bypass_document_validation` requires server version **>= 3.2** @@ -2041,7 +2041,7 @@ async def estimated_document_count(self, comment: Optional[Any] = None, **kwargs .. versionchanged:: 4.2 This method now always uses the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the - :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are + `versioned API `_. Users of the Stable API with estimated_document_count are recommended to upgrade their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. @@ -2916,7 +2916,7 @@ async def aggregate( .. note:: This method does not support the 'explain' option. Please use `PyMongoExplain `_ - instead. An example is included in the :ref:`aggregate-examples` + instead. An example is included in the `aggregation example `_ documentation. .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of @@ -2977,7 +2977,7 @@ async def aggregate( The :meth:`aggregate` method always returns an AsyncCommandCursor. The pipeline argument must be a list. - .. seealso:: :doc:`/examples/aggregation` + .. seealso:: `Aggregation `_ .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index 09713c37ec..5a1dcfe364 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -893,7 +893,7 @@ async def command( when decoding the command response. .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will + API (see `versioned API `_), then :meth:`command` will automatically add API versioning options to the given command. Explicitly adding API versioning options in the command and declaring an API version on the client is not supported. @@ -994,7 +994,7 @@ async def cursor_command( when decoding the command response. .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will + API (see `versioned API `_), then :meth:`command` will automatically add API versioning options to the given command. Explicitly adding API versioning options in the command and declaring an API version on the client is not supported. diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 9b0757b1a5..149cb3ac85 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -579,7 +579,7 @@ def __init__( creating data keys. It does not provide an API to query keys from the key vault collection, as this can be done directly on the AsyncMongoClient. - See :ref:`explicit-client-side-encryption` for an example. + See `explicit client-side encryption `_ for an example. :param kms_providers: Map of KMS provider options. The `kms_providers` map values differ by provider: @@ -608,7 +608,7 @@ def __init__( KMS providers may be specified with an optional name suffix separated by a colon, for example "kmip:name" or "aws:name". - Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + Named KMS providers do not support `CSFLE on-demand credentials `_. :param key_vault_namespace: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption and decryption. Data keys are stored as documents in this MongoDB diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index b988120d7c..8ca7bca436 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -14,7 +14,7 @@ """Tools for connecting to MongoDB. -.. seealso:: :doc:`/examples/high_availability` for examples of connecting +.. seealso:: `Read and Write Settings `_ for examples of connecting to replica sets or sets of mongos servers. To get a :class:`~pymongo.asynchronous.database.AsyncDatabase` instance from a @@ -263,7 +263,7 @@ def __init__( print("Server not available") .. warning:: When using PyMongo in a multiprocessing context, please - read :ref:`multiprocessing` first. + read `PyMongo multiprocessing `_ first. .. note:: Many of the following options can be passed using a MongoDB URI or keyword parameters. If the same option is passed in a URI and @@ -296,7 +296,7 @@ def __init__( return DatetimeMS objects when the underlying datetime is out-of-range and 'datetime_clamp' to clamp to the minimum and maximum possible datetimes. Defaults to 'datetime'. See - :ref:`handling-out-of-range-datetimes` for details. + `handling out of range datetimes `_ for details. - `directConnection` (optional): if ``True``, forces this client to connect directly to the specified MongoDB host as a standalone. If ``false``, the client connects to the entire replica set of @@ -421,7 +421,7 @@ def __init__( package. By default no compression is used. Compression support must also be enabled on the server. MongoDB 3.6+ supports snappy and zlib compression. MongoDB 4.2+ adds support for zstd. - See :ref:`network-compression-example` for details. + See `compress network traffic `_ for details. - `zlibCompressionLevel`: (int) The zlib compression level to use when zlib is used as the wire protocol compressor. Supported values are -1 through 9. -1 tells the zlib library to use its default @@ -432,7 +432,7 @@ def __init__( values are the strings: "standard", "pythonLegacy", "javaLegacy", "csharpLegacy", and "unspecified" (the default). New applications should consider setting this to "standard" for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. + compatibility. See `handling UUID data `_ for details. - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include @@ -496,7 +496,7 @@ def __init__( is set, it must be a positive integer greater than or equal to 90 seconds. - .. seealso:: :doc:`/examples/server_selection` + .. seealso:: `Customize Server Selection `_ | **Authentication:** @@ -522,7 +522,7 @@ def __init__( To specify the session token for MONGODB-AWS authentication pass ``authMechanismProperties='AWS_SESSION_TOKEN:'``. - .. seealso:: :doc:`/examples/authentication` + .. seealso:: `Authentication `_ | **TLS/SSL configuration:** @@ -585,7 +585,7 @@ def __init__( :class:`~pymongo.encryption_options.AutoEncryptionOpts` which configures this client to automatically encrypt collection commands and automatically decrypt results. See - :ref:`automatic-client-side-encryption` for an example. + `client-side field level encryption `_ for an example. If a :class:`AsyncMongoClient` is configured with ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a separate internal ``AsyncMongoClient`` is created if any of the @@ -601,7 +601,7 @@ def __init__( - `server_api`: A :class:`~pymongo.server_api.ServerApi` which configures this - client to use Stable API. See :ref:`versioned-api-ref` for + client to use Stable API. See `versioned API `_ for details. .. seealso:: The MongoDB documentation on `connections `_. @@ -712,15 +712,15 @@ def __init__( reconnect to one of them. In PyMongo 3, the client monitors its network latency to all the mongoses continuously, and distributes operations evenly among those with the lowest latency. See - :ref:`mongos-load-balancing` for more information. + `load balancing `_ for more information. The ``connect`` option is added. The ``start_request``, ``in_request``, and ``end_request`` methods are removed, as well as the ``auto_start_request`` option. - The ``copy_database`` method is removed, see the - :doc:`copy_database examples ` for alternatives. + The ``copy_database`` method is removed, see + `Copy and Clone Databases `_ for alternatives. The :meth:`AsyncMongoClient.disconnect` method is removed; it was a synonym for :meth:`~pymongo.asynchronous.AsyncMongoClient.close`. @@ -2519,9 +2519,9 @@ async def bulk_write( :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. - .. seealso:: For more info, see :doc:`/examples/client_bulk`. + .. seealso:: For more info, see `Client Bulk Write `_. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: requires MongoDB server version 8.0+. diff --git a/pymongo/daemon.py b/pymongo/daemon.py index be976decd9..c0a01db16d 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -16,7 +16,7 @@ PyMongo only attempts to spawn the mongocryptd daemon process when automatic client-side field level encryption is enabled. See -:ref:`automatic-client-side-encryption` for more info. +`Client-side Field Level Encryption `_ for more info. """ from __future__ import annotations diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index cf686f6ab5..bbc736d1c0 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -75,7 +75,7 @@ def __init__( encryption and explicit decryption is also supported for all users with the :class:`~pymongo.asynchronous.encryption.AsyncClientEncryption` and :class:`~pymongo.encryption.ClientEncryption` classes. - See :ref:`automatic-client-side-encryption` for an example. + See `client-side field level encryption `_ for an example. :param kms_providers: Map of KMS provider options. The `kms_providers` map values differ by provider: @@ -104,7 +104,7 @@ def __init__( KMS providers may be specified with an optional name suffix separated by a colon, for example "kmip:name" or "aws:name". - Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + Named KMS providers do not support `CSFLE on-demand credentials `_. Named KMS providers enables more than one of each KMS provider type to be configured. For example, to configure multiple local KMS providers:: diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index dae414c37c..35b92c4d01 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -551,7 +551,7 @@ class ReadPreference: Nearest(tag_sets=[{"node":"analytics"}]) - See :doc:`/examples/high_availability` for code examples. + See `Read and Write Settings `_ for code examples. A read preference is used in three cases: diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 32da83b0c2..f37c32e938 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -761,7 +761,7 @@ def bulk_write( :return: An instance of :class:`~pymongo.results.BulkWriteResult`. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: `bypass_document_validation` requires server version **>= 3.2** @@ -866,7 +866,7 @@ def insert_one( :return: - An instance of :class:`~pymongo.results.InsertOneResult`. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: `bypass_document_validation` requires server version **>= 3.2** @@ -935,7 +935,7 @@ def insert_many( :return: An instance of :class:`~pymongo.results.InsertManyResult`. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: `bypass_document_validation` requires server version **>= 3.2** @@ -2040,7 +2040,7 @@ def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) .. versionchanged:: 4.2 This method now always uses the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the - :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are + `versioned API `_. Users of the Stable API with estimated_document_count are recommended to upgrade their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. @@ -2909,7 +2909,7 @@ def aggregate( .. note:: This method does not support the 'explain' option. Please use `PyMongoExplain `_ - instead. An example is included in the :ref:`aggregate-examples` + instead. An example is included in the `aggregation example `_ documentation. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of @@ -2970,7 +2970,7 @@ def aggregate( The :meth:`aggregate` method always returns a CommandCursor. The pipeline argument must be a list. - .. seealso:: :doc:`/examples/aggregation` + .. seealso:: `Aggregation `_ .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index dd9ea01558..2874e1c8d9 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -893,7 +893,7 @@ def command( when decoding the command response. .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will + API (see `versioned API `_), then :meth:`command` will automatically add API versioning options to the given command. Explicitly adding API versioning options in the command and declaring an API version on the client is not supported. @@ -992,7 +992,7 @@ def cursor_command( when decoding the command response. .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will + API (see `versioned API `_), then :meth:`command` will automatically add API versioning options to the given command. Explicitly adding API versioning options in the command and declaring an API version on the client is not supported. diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 5f9bdac4b7..ba304e7bd3 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -576,7 +576,7 @@ def __init__( creating data keys. It does not provide an API to query keys from the key vault collection, as this can be done directly on the MongoClient. - See :ref:`explicit-client-side-encryption` for an example. + See `explicit client-side encryption `_ for an example. :param kms_providers: Map of KMS provider options. The `kms_providers` map values differ by provider: @@ -605,7 +605,7 @@ def __init__( KMS providers may be specified with an optional name suffix separated by a colon, for example "kmip:name" or "aws:name". - Named KMS providers do not support :ref:`CSFLE on-demand credentials`. + Named KMS providers do not support `CSFLE on-demand credentials `_. :param key_vault_namespace: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption and decryption. Data keys are stored as documents in this MongoDB diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 5d95e9c9d5..f44368ec5c 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -14,7 +14,7 @@ """Tools for connecting to MongoDB. -.. seealso:: :doc:`/examples/high_availability` for examples of connecting +.. seealso:: `Read and Write Settings `_ for examples of connecting to replica sets or sets of mongos servers. To get a :class:`~pymongo.database.Database` instance from a @@ -260,7 +260,7 @@ def __init__( print("Server not available") .. warning:: When using PyMongo in a multiprocessing context, please - read :ref:`multiprocessing` first. + read `PyMongo multiprocessing `_ first. .. note:: Many of the following options can be passed using a MongoDB URI or keyword parameters. If the same option is passed in a URI and @@ -296,7 +296,7 @@ def __init__( return DatetimeMS objects when the underlying datetime is out-of-range and 'datetime_clamp' to clamp to the minimum and maximum possible datetimes. Defaults to 'datetime'. See - :ref:`handling-out-of-range-datetimes` for details. + `handling out of range datetimes `_ for details. - `directConnection` (optional): if ``True``, forces this client to connect directly to the specified MongoDB host as a standalone. If ``false``, the client connects to the entire replica set of @@ -421,7 +421,7 @@ def __init__( package. By default no compression is used. Compression support must also be enabled on the server. MongoDB 3.6+ supports snappy and zlib compression. MongoDB 4.2+ adds support for zstd. - See :ref:`network-compression-example` for details. + See `compress network traffic `_ for details. - `zlibCompressionLevel`: (int) The zlib compression level to use when zlib is used as the wire protocol compressor. Supported values are -1 through 9. -1 tells the zlib library to use its default @@ -432,7 +432,7 @@ def __init__( values are the strings: "standard", "pythonLegacy", "javaLegacy", "csharpLegacy", and "unspecified" (the default). New applications should consider setting this to "standard" for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. + compatibility. See `handling UUID data `_ for details. - `unicode_decode_error_handler`: The error handler to apply when a Unicode-related error occurs during BSON decoding that would otherwise raise :exc:`UnicodeDecodeError`. Valid options include @@ -496,7 +496,7 @@ def __init__( is set, it must be a positive integer greater than or equal to 90 seconds. - .. seealso:: :doc:`/examples/server_selection` + .. seealso:: `Customize Server Selection `_ | **Authentication:** @@ -522,7 +522,7 @@ def __init__( To specify the session token for MONGODB-AWS authentication pass ``authMechanismProperties='AWS_SESSION_TOKEN:'``. - .. seealso:: :doc:`/examples/authentication` + .. seealso:: `Authentication `_ | **TLS/SSL configuration:** @@ -585,7 +585,7 @@ def __init__( :class:`~pymongo.encryption_options.AutoEncryptionOpts` which configures this client to automatically encrypt collection commands and automatically decrypt results. See - :ref:`automatic-client-side-encryption` for an example. + `client-side field level encryption `_ for an example. If a :class:`MongoClient` is configured with ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a separate internal ``MongoClient`` is created if any of the @@ -601,7 +601,7 @@ def __init__( - `server_api`: A :class:`~pymongo.server_api.ServerApi` which configures this - client to use Stable API. See :ref:`versioned-api-ref` for + client to use Stable API. See `versioned API `_ for details. .. seealso:: The MongoDB documentation on `connections `_. @@ -712,15 +712,15 @@ def __init__( reconnect to one of them. In PyMongo 3, the client monitors its network latency to all the mongoses continuously, and distributes operations evenly among those with the lowest latency. See - :ref:`mongos-load-balancing` for more information. + `load balancing `_ for more information. The ``connect`` option is added. The ``start_request``, ``in_request``, and ``end_request`` methods are removed, as well as the ``auto_start_request`` option. - The ``copy_database`` method is removed, see the - :doc:`copy_database examples ` for alternatives. + The ``copy_database`` method is removed, see + `Copy and Clone Databases `_ for alternatives. The :meth:`MongoClient.disconnect` method is removed; it was a synonym for :meth:`~pymongo.MongoClient.close`. @@ -2509,9 +2509,9 @@ def bulk_write( :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. - .. seealso:: For more info, see :doc:`/examples/client_bulk`. + .. seealso:: For more info, see `Client Bulk Write `_. - .. seealso:: :ref:`writes-and-ids` + .. seealso:: `Writes and ids `_ .. note:: requires MongoDB server version 8.0+. From d88596cef1ec083e27581248849470b09c059394 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 7 Aug 2025 13:32:11 -0400 Subject: [PATCH 2010/2111] PYTHON-5218 - Add logging statement when SRV polling fails (#2463) Co-authored-by: Jib --- pymongo/asynchronous/monitor.py | 3 ++- pymongo/synchronous/monitor.py | 3 ++- test/asynchronous/test_srv_polling.py | 13 +++++++++++++ test/test_srv_polling.py | 13 +++++++++++++ 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py index e067bd8c54..45c12b219f 100644 --- a/pymongo/asynchronous/monitor.py +++ b/pymongo/asynchronous/monitor.py @@ -423,12 +423,13 @@ async def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception - except Exception: + except Exception as exc: # As per the spec, upon encountering an error: # - An error must not be raised # - SRV records must be rescanned every heartbeatFrequencyMS # - Topology must be left unchanged self.request_check() + _debug_log(_SDAM_LOGGER, message="SRV monitor check failed", failure=repr(exc)) return None else: self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py index d5dd5caf82..f395588814 100644 --- a/pymongo/synchronous/monitor.py +++ b/pymongo/synchronous/monitor.py @@ -421,12 +421,13 @@ def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: if len(seedlist) == 0: # As per the spec: this should be treated as a failure. raise Exception - except Exception: + except Exception as exc: # As per the spec, upon encountering an error: # - An error must not be raised # - SRV records must be rescanned every heartbeatFrequencyMS # - Topology must be left unchanged self.request_check() + _debug_log(_SDAM_LOGGER, message="SRV monitor check failed", failure=repr(exc)) return None else: self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index 18a367a498..a89403b473 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -225,6 +225,19 @@ def response_callback(*args): await self.run_scenario(response_callback, False) + async def test_dns_failures_logging(self): + from dns import exception + + with self.assertLogs("pymongo.topology", level="DEBUG") as cm: + + def response_callback(*args): + raise exception.Timeout("DNS Failure!") + + await self.run_scenario(response_callback, False) + + srv_failure_logs = [r for r in cm.records if "SRV monitor check failed" in r.getMessage()] + self.assertEqual(len(srv_failure_logs), 1) + async def test_dns_record_lookup_empty(self): response: list = [] await self.run_scenario(response, False) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 87ab418302..09579eda12 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -225,6 +225,19 @@ def response_callback(*args): self.run_scenario(response_callback, False) + def test_dns_failures_logging(self): + from dns import exception + + with self.assertLogs("pymongo.topology", level="DEBUG") as cm: + + def response_callback(*args): + raise exception.Timeout("DNS Failure!") + + self.run_scenario(response_callback, False) + + srv_failure_logs = [r for r in cm.records if "SRV monitor check failed" in r.getMessage()] + self.assertEqual(len(srv_failure_logs), 1) + def test_dns_record_lookup_empty(self): response: list = [] self.run_scenario(response, False) From 4e9b52b8d6a7d7c5897acf70bd3157cd47a2ab3e Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Thu, 7 Aug 2025 14:01:08 -0400 Subject: [PATCH 2011/2111] PYTHON-5487 Update 4.14 changelog to mention MongoDB 4.0 is no longer supported (#2462) Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Co-authored-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- doc/changelog.rst | 41 +++++++++++++++++++++++++++-------------- pymongo/_version.py | 2 +- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 25d412364f..a553be0144 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,25 +1,38 @@ Changelog ========= -Changes in Version 4.14.0 (XXXX/XX/XX) +Changes in Version 4.14.0 (2025/08/06) -------------------------------------- + +.. warning:: PyMongo 4.14 drops support for MongoDB 4.0. PyMongo now supports + MongoDB 4.2+. + PyMongo 4.14 brings a number of changes including: -- Added preliminary support for Python 3.14 and 3.14 with free-threading. We do not yet support the following with Python 3.14: - - Subinterpreters (``concurrent.interpreters``) - - Free-threading with Encryption - - mod_wsgi +- Dropped support for MongoDB 4.0. +- Added preliminary support for Python 3.14 and 3.14 with free-threading. We do + not yet support the following with Python 3.14: + + - Subinterpreters (``concurrent.interpreters``) + - Free-threading with Encryption + - mod_wsgi + - Removed experimental support for free-threading support in Python 3.13. -- Added :attr:`bson.codec_options.TypeRegistry.codecs` and :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties - to allow users to directly access the type codecs and fallback encoder for a given :class:`bson.codec_options.TypeRegistry`. -- Added :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and - :meth:`pymongo.mongo_client.MongoClient.append_metadata` to allow instantiated MongoClients to send client metadata - on-demand +- Added :attr:`bson.codec_options.TypeRegistry.codecs` and + :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties + to allow users to directly access the type codecs and fallback encoder for a + given :class:`bson.codec_options.TypeRegistry`. +- Added + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and + :meth:`pymongo.mongo_client.MongoClient.append_metadata` to allow instantiated + MongoClients to send client metadata on-demand - Improved performance of selecting a server with the Primary selector. - -- Introduces a minor breaking change. When encoding :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised - if the 'padding' metadata field is < 0 or > 7, or non-zero for any type other than PACKED_BIT. -- Changed :meth:`~pymongo.uri_parser.parse_uri`'s ``options`` parameter to be type ``dict`` instead of ``_CaseInsensitiveDictionary``. +- Introduces a minor breaking change. When encoding + :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised if the + 'padding' metadata field is < 0 or > 7, or non-zero for any type other than + PACKED_BIT. +- Changed :meth:`~pymongo.uri_parser.parse_uri`'s ``options`` return value to be + type ``dict`` instead of ``_CaseInsensitiveDictionary``. Changes in Version 4.13.2 (2025/06/17) -------------------------------------- diff --git a/pymongo/_version.py b/pymongo/_version.py index 9e7924773b..6eb73ba97c 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.14.0.dev0" +__version__ = "4.15.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 578a5323953d33cbcec966aa3ba6791f90991e15 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 8 Aug 2025 19:52:17 -0500 Subject: [PATCH 2012/2111] PYTHON-5491 Skip non-idempotent dropIndex tests (#2467) --- .evergreen/generated_configs/variants.yml | 16 ++++++++++++++++ .evergreen/scripts/generate_config.py | 6 +++++- test/asynchronous/test_collection.py | 2 ++ test/asynchronous/unified_format.py | 2 ++ test/test_collection.py | 2 ++ test/unified_format.py | 2 ++ 6 files changed, 29 insertions(+), 1 deletion(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 4fb5a36250..a05cc61f05 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -535,6 +535,8 @@ buildvariants: display_name: "* MongoDB v4.2" run_on: - rhel87-small + expansions: + VERSION: "4.2" tags: [coverage_tag] - name: mongodb-v4.4 tasks: @@ -542,6 +544,8 @@ buildvariants: display_name: "* MongoDB v4.4" run_on: - rhel87-small + expansions: + VERSION: "4.4" tags: [coverage_tag] - name: mongodb-v5.0 tasks: @@ -549,6 +553,8 @@ buildvariants: display_name: "* MongoDB v5.0" run_on: - rhel87-small + expansions: + VERSION: "5.0" tags: [coverage_tag] - name: mongodb-v6.0 tasks: @@ -556,6 +562,8 @@ buildvariants: display_name: "* MongoDB v6.0" run_on: - rhel87-small + expansions: + VERSION: "6.0" tags: [coverage_tag] - name: mongodb-v7.0 tasks: @@ -563,6 +571,8 @@ buildvariants: display_name: "* MongoDB v7.0" run_on: - rhel87-small + expansions: + VERSION: "7.0" tags: [coverage_tag] - name: mongodb-v8.0 tasks: @@ -570,6 +580,8 @@ buildvariants: display_name: "* MongoDB v8.0" run_on: - rhel87-small + expansions: + VERSION: "8.0" tags: [coverage_tag] - name: mongodb-rapid tasks: @@ -577,6 +589,8 @@ buildvariants: display_name: "* MongoDB rapid" run_on: - rhel87-small + expansions: + VERSION: rapid tags: [coverage_tag] - name: mongodb-latest tasks: @@ -584,6 +598,8 @@ buildvariants: display_name: "* MongoDB latest" run_on: - rhel87-small + expansions: + VERSION: latest tags: [coverage_tag] # Stable api tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 2d160152f7..3a386be4f7 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -74,7 +74,11 @@ def create_server_version_variants() -> list[BuildVariant]: for version in ALL_VERSIONS: display_name = get_variant_name("* MongoDB", version=version) variant = create_variant( - [".server-version"], display_name, host=DEFAULT_HOST, tags=["coverage_tag"] + [".server-version"], + display_name, + version=version, + host=DEFAULT_HOST, + tags=["coverage_tag"], ) variants.append(variant) return variants diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index cda8452d1c..6a85b63960 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -335,6 +335,8 @@ async def test_create_index(self): await db.test.create_index(["hello", ("world", DESCENDING)]) await db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + # TODO: PYTHON-5491 - remove version max + @async_client_context.require_version_max(8, 0, -1) async def test_drop_index(self): db = self.db await db.test.drop_indexes() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 964d2df96d..09bf7e83ea 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -564,6 +564,8 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") + if "dropindex on collection" in description: + self.skipTest("PYTHON-5491") if ( "tailable" in class_name or "tailable" in description diff --git a/test/test_collection.py b/test/test_collection.py index ccace72bec..0dce88423b 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -333,6 +333,8 @@ def test_create_index(self): db.test.create_index(["hello", ("world", DESCENDING)]) db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + # TODO: PYTHON-5491 - remove version max + @client_context.require_version_max(8, 0, -1) def test_drop_index(self): db = self.db db.test.drop_indexes() diff --git a/test/unified_format.py b/test/unified_format.py index c21f29fe19..3496b2ad44 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -563,6 +563,8 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") + if "dropindex on collection" in description: + self.skipTest("PYTHON-5491") if ( "tailable" in class_name or "tailable" in description From e79c19b4d2aa61a792eba754e942c9ef249e3d71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 10:35:58 -0500 Subject: [PATCH 2013/2111] Bump the actions group with 3 updates (#2469) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/dist.yml | 2 +- .github/workflows/release-python.yml | 2 +- .github/workflows/test-python.yml | 2 +- .github/workflows/zizmor.yml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fd2808ea19..042685bc21 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 14c253fe73..586f862820 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -142,7 +142,7 @@ jobs: name: Download Wheels steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 - name: Flatten directory working-directory: . run: | diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 9cce310d91..0c10fa2e98 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -76,7 +76,7 @@ jobs: id-token: write steps: - name: Download all the dists - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: all-dist-${{ github.run_id }} path: dist/ diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index b7b8fb5062..9a2f2e42ba 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -195,7 +195,7 @@ jobs: timeout-minutes: 20 steps: - name: Download sdist - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 - name: Unpack SDist shell: bash run: | diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 8a2bccf931..14bf81a087 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@383d31df2eb66a2f42db98c9654bdc73231f3e3a + uses: zizmorcore/zizmor-action@c17832b972c15fd5f3d5065a7e16ad761a0a10d2 From 61e90473e6b5b029fa6e83c1fb254d346aa96663 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 10:36:57 -0500 Subject: [PATCH 2014/2111] Update coverage requirement from <=7.10.2,>=5 to >=5,<=7.10.3 (#2470) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 83009ff8cc..b7afbc5473 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] eventlet = ["eventlet"] coverage = [ "pytest-cov", - "coverage>=5,<=7.10.2" + "coverage>=5,<=7.10.3" ] mockupdb = [ "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" From f105789e1245e383550731ca7cdf045003c76c47 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 11 Aug 2025 13:49:29 -0500 Subject: [PATCH 2015/2111] Revert "Bump the actions group with 3 updates" (#2471) --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/dist.yml | 2 +- .github/workflows/release-python.yml | 2 +- .github/workflows/test-python.yml | 2 +- .github/workflows/zizmor.yml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 042685bc21..fd2808ea19 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 586f862820..14c253fe73 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -142,7 +142,7 @@ jobs: name: Download Wheels steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v4 - name: Flatten directory working-directory: . run: | diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 0c10fa2e98..9cce310d91 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -76,7 +76,7 @@ jobs: id-token: write steps: - name: Download all the dists - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v4 with: name: all-dist-${{ github.run_id }} path: dist/ diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 9a2f2e42ba..b7b8fb5062 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -195,7 +195,7 @@ jobs: timeout-minutes: 20 steps: - name: Download sdist - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v4 - name: Unpack SDist shell: bash run: | diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 14bf81a087..8a2bccf931 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@c17832b972c15fd5f3d5065a7e16ad761a0a10d2 + uses: zizmorcore/zizmor-action@383d31df2eb66a2f42db98c9654bdc73231f3e3a From 3c786f5cd9ddf434273d18208cab957d7b117924 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 13 Aug 2025 09:46:01 -0400 Subject: [PATCH 2016/2111] PYTHON-3606 - Document best practice for closing MongoClients and cursors (#2465) --- pymongo/asynchronous/collection.py | 36 ++++++++++++++++++++++++++++ pymongo/asynchronous/database.py | 18 ++++++++++++-- pymongo/asynchronous/mongo_client.py | 15 ++++++++++++ pymongo/synchronous/collection.py | 36 ++++++++++++++++++++++++++++ pymongo/synchronous/database.py | 14 +++++++++++ pymongo/synchronous/mongo_client.py | 15 ++++++++++++ 6 files changed, 132 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index e07c8615dd..741c11e551 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -1776,6 +1776,15 @@ def find(self, *args: Any, **kwargs: Any) -> AsyncCursor[_DocumentType]: improper type. Returns an instance of :class:`~pymongo.asynchronous.cursor.AsyncCursor` corresponding to this query. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with collection.find() as cursor: + async for doc in cursor: + print(doc) + The :meth:`find` method obeys the :attr:`read_preference` of this :class:`AsyncCollection`. @@ -2503,6 +2512,15 @@ async def list_indexes( ... SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.list_indexes() as cursor: + async for index in cursor: + print(index) + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param comment: A user-provided comment to attach to this @@ -2620,6 +2638,15 @@ async def list_search_indexes( ) -> AsyncCommandCursor[Mapping[str, Any]]: """Return a cursor over search indexes for the current collection. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.list_search_indexes() as cursor: + async for index in cursor: + print(index) + :param name: If given, the name of the index to search for. Only indexes with matching index names will be returned. If not given, all search indexes for the current collection @@ -2922,6 +2949,15 @@ async def aggregate( .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of this collection is automatically applied to this operation. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.aggregate() as cursor: + async for operation in cursor: + print(operation) + :param pipeline: a list of aggregation pipeline stages :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index 5a1dcfe364..f70c2b403f 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -643,8 +643,8 @@ async def aggregate( .. code-block:: python # Lists all operations currently running on the server. - with client.admin.aggregate([{"$currentOp": {}}]) as cursor: - for operation in cursor: + async with await client.admin.aggregate([{"$currentOp": {}}]) as cursor: + async for operation in cursor: print(operation) The :meth:`aggregate` method obeys the :attr:`read_preference` of this @@ -652,6 +652,11 @@ async def aggregate( which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement. + .. note:: This method does not support the 'explain' option. Please use :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` instead. @@ -1154,6 +1159,15 @@ async def list_collections( ) -> AsyncCommandCursor[MutableMapping[str, Any]]: """Get a cursor over the collections of this database. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await database.list_collections() as cursor: + async for collection in cursor: + print(collection) + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param filter: A query document to filter the list of diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index 8ca7bca436..b616647791 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -202,6 +202,12 @@ def __init__( exception (recognizing that the operation failed) and then continue to execute. + Best practice is to call :meth:`AsyncMongoClient.close` when the client is no longer needed, + or use the client in a with statement:: + + async with AsyncMongoClient(url) as client: + # Use client here. + The `host` parameter can be a full `mongodb URI `_, in addition to a simple hostname. It can also be a list of hostnames but no more @@ -2345,6 +2351,15 @@ async def list_databases( ) -> AsyncCommandCursor[dict[str, Any]]: """Get a cursor over the databases of the connected server. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await client.list_databases() as cursor: + async for database in cursor: + print(database) + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. :param comment: A user-provided comment to attach to this diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index f37c32e938..9f32deb765 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -1775,6 +1775,15 @@ def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: improper type. Returns an instance of :class:`~pymongo.cursor.Cursor` corresponding to this query. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.find() as cursor: + for doc in cursor: + print(doc) + The :meth:`find` method obeys the :attr:`read_preference` of this :class:`Collection`. @@ -2500,6 +2509,15 @@ def list_indexes( ... SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.list_indexes() as cursor: + for index in cursor: + print(index) + :param session: a :class:`~pymongo.client_session.ClientSession`. :param comment: A user-provided comment to attach to this @@ -2617,6 +2635,15 @@ def list_search_indexes( ) -> CommandCursor[Mapping[str, Any]]: """Return a cursor over search indexes for the current collection. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.list_search_indexes() as cursor: + for index in cursor: + print(index) + :param name: If given, the name of the index to search for. Only indexes with matching index names will be returned. If not given, all search indexes for the current collection @@ -2915,6 +2942,15 @@ def aggregate( .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.aggregate() as cursor: + for operation in cursor: + print(operation) + :param pipeline: a list of aggregation pipeline stages :param session: a :class:`~pymongo.client_session.ClientSession`. diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index 2874e1c8d9..e30f97817c 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -652,6 +652,11 @@ def aggregate( which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement. + .. note:: This method does not support the 'explain' option. Please use :meth:`~pymongo.database.Database.command` instead. @@ -1148,6 +1153,15 @@ def list_collections( ) -> CommandCursor[MutableMapping[str, Any]]: """Get a cursor over the collections of this database. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with database.list_collections() as cursor: + for collection in cursor: + print(collection) + :param session: a :class:`~pymongo.client_session.ClientSession`. :param filter: A query document to filter the list of diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index f44368ec5c..ef0663584c 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -199,6 +199,12 @@ def __init__( exception (recognizing that the operation failed) and then continue to execute. + Best practice is to call :meth:`MongoClient.close` when the client is no longer needed, + or use the client in a with statement:: + + with MongoClient(url) as client: + # Use client here. + The `host` parameter can be a full `mongodb URI `_, in addition to a simple hostname. It can also be a list of hostnames but no more @@ -2335,6 +2341,15 @@ def list_databases( ) -> CommandCursor[dict[str, Any]]: """Get a cursor over the databases of the connected server. + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with client.list_databases() as cursor: + for database in cursor: + print(database) + :param session: a :class:`~pymongo.client_session.ClientSession`. :param comment: A user-provided comment to attach to this From 1ffdedc7a4b4ea68663b03562dbe466fd112b73e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 14 Aug 2025 13:54:24 -0500 Subject: [PATCH 2017/2111] PYTHON-5492 Mark test as flaky (#2472) --- test/asynchronous/test_pooling.py | 3 ++- test/test_pooling.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index 66edf0177f..cbf6d336bd 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -21,7 +21,7 @@ import socket import sys import time -from test.asynchronous.utils import async_get_pool, async_joinall +from test.asynchronous.utils import async_get_pool, async_joinall, flaky from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -429,6 +429,7 @@ async def find_one(): # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds print(len(pool.conns)) + @flaky(reason="PYTHON-5492") @async_client_context.require_failCommand_appName async def test_csot_timeout_message(self): client = await self.async_rs_or_single_client(appName="connectionTimeoutApp") diff --git a/test/test_pooling.py b/test/test_pooling.py index b995c467c2..5ce4284e33 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -21,7 +21,7 @@ import socket import sys import time -from test.utils import get_pool, joinall +from test.utils import flaky, get_pool, joinall from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.son import SON @@ -429,6 +429,7 @@ def find_one(): # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds print(len(pool.conns)) + @flaky(reason="PYTHON-5492") @client_context.require_failCommand_appName def test_csot_timeout_message(self): client = self.rs_or_single_client(appName="connectionTimeoutApp") From b83fcbb1a99a65dc05cae6c11952fc3cbe423206 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 15 Aug 2025 09:15:37 -0500 Subject: [PATCH 2018/2111] PYTHON-5349 Use drivers-evergreen-tools to start servers in GitHub Actions (#2474) --- .github/workflows/test-python.yml | 40 +++++++++++++++---------------- .github/zizmor.yml | 7 ++++++ 2 files changed, 26 insertions(+), 21 deletions(-) create mode 100644 .github/zizmor.yml diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index b7b8fb5062..96729e3a6e 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -50,33 +50,31 @@ jobs: cppcheck pymongo build: - # supercharge/mongodb-github-action requires containers so we don't test other platforms runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: + # Tests currently only pass on ubuntu on GitHub Actions. os: [ubuntu-latest] - python-version: ["3.9", "pypy-3.10", "3.13", "3.13t"] + python-version: ["3.9", "pypy-3.10", "3.13t"] + mongodb-version: ["8.0"] + name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - uses: actions/checkout@v4 with: persist-credentials: false - - name: Install just - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: just install - - name: Start MongoDB - uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master with: - mongodb-version: 6.0 + version: "${{ matrix.mongodb-version }}" - name: Run tests - run: just test + run: uv run --extra test pytest -v doctest: runs-on: ubuntu-latest @@ -92,10 +90,10 @@ jobs: with: enable-cache: true python-version: "3.9" - - name: Start MongoDB - uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master with: - mongodb-version: '8.0.0-rc4' + version: "8.0" - name: Install dependencies run: just install - name: Run tests @@ -210,8 +208,8 @@ jobs: cache-dependency-path: 'sdist/test/pyproject.toml' # Test sdist on lowest supported Python python-version: '3.9' - - name: Start MongoDB - uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master - name: Run connect test from sdist shell: bash run: | @@ -234,10 +232,10 @@ jobs: uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: python-version: '3.9' - - name: Start MongoDB - uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master with: - mongodb-version: 6.0 + version: "8.0" # Async and our test_dns do not support dnspython 1.X, so we don't run async or dns tests here - name: Run tests shell: bash @@ -260,10 +258,10 @@ jobs: uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: python-version: '3.9' - - name: Start MongoDB - uses: supercharge/mongodb-github-action@90004df786821b6308fb02299e5835d0dae05d0d # 1.12.0 + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master with: - mongodb-version: 6.0 + version: "8.0" # The lifetime kwarg we use in srv resolution was added to the async resolver API in dnspython 2.1.0 - name: Run tests shell: bash diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 0000000000..10fd4cdfcf --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,7 @@ +rules: + unpinned-uses: + config: + policies: + actions/*: ref-pin + mongodb-labs/drivers-github-tools/*: ref-pin + mongodb-labs/drivers-evergreen-tools: ref-pin From e44ece0b07d4bb81ef8a5678ad01014070c1a374 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 15 Aug 2025 11:58:39 -0500 Subject: [PATCH 2019/2111] PYTHON-5493 Add a patch for the log order difference (#2473) --- .evergreen/spec-patch/PYTHON-5493.patch | 99 +++++++++++++++++++ .../connection-logging.json | 32 +++--- 2 files changed, 115 insertions(+), 16 deletions(-) create mode 100644 .evergreen/spec-patch/PYTHON-5493.patch diff --git a/.evergreen/spec-patch/PYTHON-5493.patch b/.evergreen/spec-patch/PYTHON-5493.patch new file mode 100644 index 0000000000..cf1afbb271 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5493.patch @@ -0,0 +1,99 @@ +diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json +index d40cfbb7e..5799e834d 100644 +--- a/test/connection_logging/connection-logging.json ++++ b/test/connection_logging/connection-logging.json +@@ -272,7 +272,13 @@ + "level": "debug", + "component": "connection", + "data": { +- "message": "Connection pool closed", ++ "message": "Connection closed", ++ "driverConnectionId": { ++ "$$type": [ ++ "int", ++ "long" ++ ] ++ }, + "serverHost": { + "$$type": "string" + }, +@@ -281,20 +287,15 @@ + "int", + "long" + ] +- } ++ }, ++ "reason": "Connection pool was closed" + } + }, + { + "level": "debug", + "component": "connection", + "data": { +- "message": "Connection closed", +- "driverConnectionId": { +- "$$type": [ +- "int", +- "long" +- ] +- }, ++ "message": "Connection pool closed", + "serverHost": { + "$$type": "string" + }, +@@ -303,8 +304,7 @@ + "int", + "long" + ] +- }, +- "reason": "Connection pool was closed" ++ } + } + } + ] +@@ -446,22 +446,6 @@ + } + } + }, +- { +- "level": "debug", +- "component": "connection", +- "data": { +- "message": "Connection pool cleared", +- "serverHost": { +- "$$type": "string" +- }, +- "serverPort": { +- "$$type": [ +- "int", +- "long" +- ] +- } +- } +- }, + { + "level": "debug", + "component": "connection", +@@ -514,6 +498,22 @@ + ] + } + } ++ }, ++ { ++ "level": "debug", ++ "component": "connection", ++ "data": { ++ "message": "Connection pool cleared", ++ "serverHost": { ++ "$$type": "string" ++ }, ++ "serverPort": { ++ "$$type": [ ++ "int", ++ "long" ++ ] ++ } ++ } + } + ] + } diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json index 72103b3cab..5799e834d7 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json @@ -446,22 +446,6 @@ } } }, - { - "level": "debug", - "component": "connection", - "data": { - "message": "Connection pool cleared", - "serverHost": { - "$$type": "string" - }, - "serverPort": { - "$$type": [ - "int", - "long" - ] - } - } - }, { "level": "debug", "component": "connection", @@ -514,6 +498,22 @@ ] } } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool cleared", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } } ] } From 9dbccbee2c93582f360f5b822b000d2c06d1c0bd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 15 Aug 2025 19:13:51 -0500 Subject: [PATCH 2020/2111] PYTHON-5492 Fix handling of MaxTimeMSExpired responses (#2477) --- .evergreen/scripts/install-dependencies.sh | 2 ++ pymongo/asynchronous/server.py | 5 +++++ pymongo/synchronous/server.py | 5 +++++ test/asynchronous/test_pooling.py | 1 - test/test_pooling.py | 1 - 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 5425d10c8c..49fc614ca7 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -48,6 +48,7 @@ if ! command -v just &>/dev/null; then _TARGET="--target x86_64-pc-windows-msvc" fi _BIN_DIR=$PYMONGO_BIN_DIR + mkdir -p ${_BIN_DIR} echo "Installing just..." mkdir -p "$_BIN_DIR" 2>/dev/null || true curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { @@ -59,6 +60,7 @@ fi # Ensure uv is installed. if ! command -v uv &>/dev/null; then _BIN_DIR=$PYMONGO_BIN_DIR + mkdir -p ${_BIN_DIR} echo "Installing uv..." # On most systems we can install directly. curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index 0f8565f6cc..cef8bd011c 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -38,6 +38,7 @@ _SDAMStatusMessage, ) from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.pool_shared import _get_timeout_details, format_timeout_details from pymongo.response import PinnedResponse, Response if TYPE_CHECKING: @@ -224,6 +225,10 @@ async def run_operation( if use_cmd: first = docs[0] await operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] + # Append timeout details to MaxTimeMSExpired responses. + if first.get("code") == 50: + timeout_details = _get_timeout_details(conn.opts) # type:ignore[has-type] + first["errmsg"] += format_timeout_details(timeout_details) # type:ignore[index] _check_command_response(first, conn.max_wire_version) except Exception as exc: duration = datetime.now() - start diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py index a85f1b0db7..6651f63a30 100644 --- a/pymongo/synchronous/server.py +++ b/pymongo/synchronous/server.py @@ -37,6 +37,7 @@ _SDAMStatusMessage, ) from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.pool_shared import _get_timeout_details, format_timeout_details from pymongo.response import PinnedResponse, Response from pymongo.synchronous.helpers import _handle_reauth @@ -224,6 +225,10 @@ def run_operation( if use_cmd: first = docs[0] operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] + # Append timeout details to MaxTimeMSExpired responses. + if first.get("code") == 50: + timeout_details = _get_timeout_details(conn.opts) # type:ignore[has-type] + first["errmsg"] += format_timeout_details(timeout_details) # type:ignore[index] _check_command_response(first, conn.max_wire_version) except Exception as exc: duration = datetime.now() - start diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py index cbf6d336bd..3193d9e3d5 100644 --- a/test/asynchronous/test_pooling.py +++ b/test/asynchronous/test_pooling.py @@ -429,7 +429,6 @@ async def find_one(): # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds print(len(pool.conns)) - @flaky(reason="PYTHON-5492") @async_client_context.require_failCommand_appName async def test_csot_timeout_message(self): client = await self.async_rs_or_single_client(appName="connectionTimeoutApp") diff --git a/test/test_pooling.py b/test/test_pooling.py index 5ce4284e33..cb5b206996 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -429,7 +429,6 @@ def find_one(): # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds print(len(pool.conns)) - @flaky(reason="PYTHON-5492") @client_context.require_failCommand_appName def test_csot_timeout_message(self): client = self.rs_or_single_client(appName="connectionTimeoutApp") From bfa01c6a6ccdbb7cb0e4e21b063fabb26e1462a1 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 18 Aug 2025 06:53:15 -0500 Subject: [PATCH 2021/2111] PYTHON-5498 Disable C extensions for Remote KMS Tests (#2478) --- .evergreen/scripts/kms_tester.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py index 40fd65919d..f0438e4eef 100644 --- a/.evergreen/scripts/kms_tester.py +++ b/.evergreen/scripts/kms_tester.py @@ -30,7 +30,7 @@ def _setup_azure_vm(base_env: dict[str, str]) -> None: env["AZUREKMS_CMD"] = "tar xf mongo-python-driver.tgz" run_command(f"{azure_dir}/run-command.sh", env=env) - env["AZUREKMS_CMD"] = "bash .evergreen/just.sh setup-tests kms azure-remote" + env["AZUREKMS_CMD"] = "NO_EXT=1 bash .evergreen/just.sh setup-tests kms azure-remote" run_command(f"{azure_dir}/run-command.sh", env=env) LOGGER.info("Setting up Azure VM... done.") @@ -47,7 +47,7 @@ def _setup_gcp_vm(base_env: dict[str, str]) -> None: env["GCPKMS_CMD"] = "tar xf mongo-python-driver.tgz" run_command(f"{gcp_dir}/run-command.sh", env=env) - env["GCPKMS_CMD"] = "bash ./.evergreen/just.sh setup-tests kms gcp-remote" + env["GCPKMS_CMD"] = "NO_EXT=1 bash ./.evergreen/just.sh setup-tests kms gcp-remote" run_command(f"{gcp_dir}/run-command.sh", env=env) LOGGER.info("Setting up GCP VM...") From de332c553c2835c4626e137abd1423194c666223 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 18 Aug 2025 12:05:26 -0500 Subject: [PATCH 2022/2111] PYTHON-5500 Mark test_dns_failures_logging as flaky (#2480) --- .evergreen/scripts/install-dependencies.sh | 2 ++ test/asynchronous/test_srv_polling.py | 1 + test/test_srv_polling.py | 1 + 3 files changed, 4 insertions(+) diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 49fc614ca7..23d865d0d8 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -52,6 +52,8 @@ if ! command -v just &>/dev/null; then echo "Installing just..." mkdir -p "$_BIN_DIR" 2>/dev/null || true curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { + # Remove just file if it exists (can be created if there was an install error). + rm -f ${_BIN_DIR}/just _pip_install rust-just just } echo "Installing just... done." diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index a89403b473..d6f0f6a18f 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -225,6 +225,7 @@ def response_callback(*args): await self.run_scenario(response_callback, False) + @flaky(reason="PYTHON-5500") async def test_dns_failures_logging(self): from dns import exception diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 09579eda12..09c900cf09 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -225,6 +225,7 @@ def response_callback(*args): self.run_scenario(response_callback, False) + @flaky(reason="PYTHON-5500") def test_dns_failures_logging(self): from dns import exception From 2a1523fa85b4d6d0c4ea55decebd22fb6dfb5f24 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 18 Aug 2025 11:12:48 -0700 Subject: [PATCH 2023/2111] PYTHON-5488 `append_metadata` should not add duplicates (#2461) --- pymongo/pool_options.py | 5 ++++ test/asynchronous/test_client_metadata.py | 35 +++++++++++++++++------ test/test_client_metadata.py | 35 +++++++++++++++++------ 3 files changed, 57 insertions(+), 18 deletions(-) diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py index 5c24709b16..a5d76007b0 100644 --- a/pymongo/pool_options.py +++ b/pymongo/pool_options.py @@ -386,8 +386,13 @@ def __init__( def _update_metadata(self, driver: DriverInfo) -> None: """Updates the client's metadata""" + if driver.name and driver.name.lower() in self.__metadata["driver"]["name"].lower().split( + "|" + ): + return metadata = copy.deepcopy(self.__metadata) + if driver.name: metadata["driver"]["name"] = "{}|{}".format( metadata["driver"]["name"], diff --git a/test/asynchronous/test_client_metadata.py b/test/asynchronous/test_client_metadata.py index cfecb49748..2f175cceed 100644 --- a/test/asynchronous/test_client_metadata.py +++ b/test/asynchronous/test_client_metadata.py @@ -107,15 +107,20 @@ async def check_metadata_added( new_name, new_version, new_platform, new_metadata = await self.send_ping_and_get_metadata( client, True ) - self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) - self.assertEqual( - new_version, - f"{version}|{add_version}" if add_version is not None else version, - ) - self.assertEqual( - new_platform, - f"{platform}|{add_platform}" if add_platform is not None else platform, - ) + if add_name is not None and add_name.lower() in name.lower().split("|"): + self.assertEqual(name, new_name) + self.assertEqual(version, new_version) + self.assertEqual(platform, new_platform) + else: + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) metadata.pop("driver") metadata.pop("platform") @@ -210,6 +215,18 @@ async def test_doesnt_update_established_connections(self): self.assertIsNone(self.handshake_req) self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + async def test_duplicate_driver_name_no_op(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, None) + # wait for connection to become idle + await asyncio.sleep(0.005) + # add same metadata again + await self.check_metadata_added(client, "Framework", None, None) + if __name__ == "__main__": unittest.main() diff --git a/test/test_client_metadata.py b/test/test_client_metadata.py index 32cb9b8009..a94c5aa25e 100644 --- a/test/test_client_metadata.py +++ b/test/test_client_metadata.py @@ -107,15 +107,20 @@ def check_metadata_added( new_name, new_version, new_platform, new_metadata = self.send_ping_and_get_metadata( client, True ) - self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) - self.assertEqual( - new_version, - f"{version}|{add_version}" if add_version is not None else version, - ) - self.assertEqual( - new_platform, - f"{platform}|{add_platform}" if add_platform is not None else platform, - ) + if add_name is not None and add_name.lower() in name.lower().split("|"): + self.assertEqual(name, new_name) + self.assertEqual(version, new_version) + self.assertEqual(platform, new_platform) + else: + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) metadata.pop("driver") metadata.pop("platform") @@ -210,6 +215,18 @@ def test_doesnt_update_established_connections(self): self.assertIsNone(self.handshake_req) self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + def test_duplicate_driver_name_no_op(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, None) + # wait for connection to become idle + time.sleep(0.005) + # add same metadata again + self.check_metadata_added(client, "Framework", None, None) + if __name__ == "__main__": unittest.main() From b32da4b4090456b6988c4f1525b909befb440252 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 18 Aug 2025 18:52:46 -0500 Subject: [PATCH 2024/2111] PYTHON-5492 Fix handling of MaxTimeMS message (#2484) --- pymongo/asynchronous/encryption.py | 2 +- pymongo/asynchronous/pool.py | 3 +-- pymongo/asynchronous/server.py | 7 +----- pymongo/helpers_shared.py | 34 ++++++++++++++++++++++++++++++ pymongo/pool_shared.py | 27 +----------------------- pymongo/synchronous/encryption.py | 2 +- pymongo/synchronous/pool.py | 3 +-- pymongo/synchronous/server.py | 7 +----- test/asynchronous/test_cursor.py | 16 ++++++++++++++ test/test_cursor.py | 16 ++++++++++++++ 10 files changed, 73 insertions(+), 44 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 149cb3ac85..4f7d55cd06 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -75,12 +75,12 @@ NetworkTimeout, ServerSelectionTimeoutError, ) +from pymongo.helpers_shared import _get_timeout_details from pymongo.network_layer import async_socket_sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( _async_configured_socket, - _get_timeout_details, _raise_connection_failure, ) from pymongo.read_concern import ReadConcern diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index e215cafdc1..196ec9040f 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -58,6 +58,7 @@ WaitQueueTimeoutError, ) from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details from pymongo.lock import ( _async_cond_wait, _async_create_condition, @@ -79,9 +80,7 @@ SSLErrors, _CancellationContext, _configured_protocol_interface, - _get_timeout_details, _raise_connection_failure, - format_timeout_details, ) from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py index cef8bd011c..f212306174 100644 --- a/pymongo/asynchronous/server.py +++ b/pymongo/asynchronous/server.py @@ -38,7 +38,6 @@ _SDAMStatusMessage, ) from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query -from pymongo.pool_shared import _get_timeout_details, format_timeout_details from pymongo.response import PinnedResponse, Response if TYPE_CHECKING: @@ -225,11 +224,7 @@ async def run_operation( if use_cmd: first = docs[0] await operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] - # Append timeout details to MaxTimeMSExpired responses. - if first.get("code") == 50: - timeout_details = _get_timeout_details(conn.opts) # type:ignore[has-type] - first["errmsg"] += format_timeout_details(timeout_details) # type:ignore[index] - _check_command_response(first, conn.max_wire_version) + _check_command_response(first, conn.max_wire_version, pool_opts=conn.opts) # type:ignore[has-type] except Exception as exc: duration = datetime.now() - start if isinstance(exc, (NotPrimaryError, OperationFailure)): diff --git a/pymongo/helpers_shared.py b/pymongo/helpers_shared.py index 9646c0691a..c3611df7c8 100644 --- a/pymongo/helpers_shared.py +++ b/pymongo/helpers_shared.py @@ -47,6 +47,7 @@ if TYPE_CHECKING: from pymongo.cursor_shared import _Hint from pymongo.operations import _IndexList + from pymongo.pool_options import PoolOptions from pymongo.typings import _DocumentOut @@ -108,6 +109,34 @@ } +def _get_timeout_details(options: PoolOptions) -> dict[str, float]: + from pymongo import _csot + + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details + + +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result + + def _gen_index_name(keys: _IndexList) -> str: """Generate an index name from the set of fields it is over.""" return "_".join(["{}_{}".format(*item) for item in keys]) @@ -188,6 +217,7 @@ def _check_command_response( max_wire_version: Optional[int], allowable_errors: Optional[Container[Union[int, str]]] = None, parse_write_concern_error: bool = False, + pool_opts: Optional[PoolOptions] = None, ) -> None: """Check the response to a command for errors.""" if "ok" not in response: @@ -243,6 +273,10 @@ def _check_command_response( if code in (11000, 11001, 12582): raise DuplicateKeyError(errmsg, code, response, max_wire_version) elif code == 50: + # Append timeout details to MaxTimeMSExpired responses. + if pool_opts: + timeout_details = _get_timeout_details(pool_opts) + errmsg += format_timeout_details(timeout_details) raise ExecutionTimeout(errmsg, code, response, max_wire_version) elif code == 43: raise CursorNotFound(errmsg, code, response, max_wire_version) diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index 905f1a4d18..ac562af542 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -36,6 +36,7 @@ NetworkTimeout, _CertificateError, ) +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details from pymongo.network_layer import AsyncNetworkingInterface, NetworkingInterface, PyMongoProtocol from pymongo.pool_options import PoolOptions from pymongo.ssl_support import PYSSLError, SSLError, _has_sni @@ -149,32 +150,6 @@ def _raise_connection_failure( raise AutoReconnect(msg) from error -def _get_timeout_details(options: PoolOptions) -> dict[str, float]: - details = {} - timeout = _csot.get_timeout() - socket_timeout = options.socket_timeout - connect_timeout = options.connect_timeout - if timeout: - details["timeoutMS"] = timeout * 1000 - if socket_timeout and not timeout: - details["socketTimeoutMS"] = socket_timeout * 1000 - if connect_timeout: - details["connectTimeoutMS"] = connect_timeout * 1000 - return details - - -def format_timeout_details(details: Optional[dict[str, float]]) -> str: - result = "" - if details: - result += " (configured timeouts:" - for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: - if timeout in details: - result += f" {timeout}: {details[timeout]}ms," - result = result[:-1] - result += ")" - return result - - class _CancellationContext: def __init__(self) -> None: self._cancelled = False diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index ba304e7bd3..d9aebf5ccd 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -70,12 +70,12 @@ NetworkTimeout, ServerSelectionTimeoutError, ) +from pymongo.helpers_shared import _get_timeout_details from pymongo.network_layer import sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( _configured_socket, - _get_timeout_details, _raise_connection_failure, ) from pymongo.read_concern import ReadConcern diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index 4ea5cb1c1e..f7f6a26c68 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -55,6 +55,7 @@ WaitQueueTimeoutError, ) from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details from pymongo.lock import ( _cond_wait, _create_condition, @@ -76,9 +77,7 @@ SSLErrors, _CancellationContext, _configured_socket_interface, - _get_timeout_details, _raise_connection_failure, - format_timeout_details, ) from pymongo.read_preferences import ReadPreference from pymongo.server_api import _add_to_command diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py index 6651f63a30..f57420918b 100644 --- a/pymongo/synchronous/server.py +++ b/pymongo/synchronous/server.py @@ -37,7 +37,6 @@ _SDAMStatusMessage, ) from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query -from pymongo.pool_shared import _get_timeout_details, format_timeout_details from pymongo.response import PinnedResponse, Response from pymongo.synchronous.helpers import _handle_reauth @@ -225,11 +224,7 @@ def run_operation( if use_cmd: first = docs[0] operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] - # Append timeout details to MaxTimeMSExpired responses. - if first.get("code") == 50: - timeout_details = _get_timeout_details(conn.opts) # type:ignore[has-type] - first["errmsg"] += format_timeout_details(timeout_details) # type:ignore[index] - _check_command_response(first, conn.max_wire_version) + _check_command_response(first, conn.max_wire_version, pool_opts=conn.opts) # type:ignore[has-type] except Exception as exc: duration = datetime.now() - start if isinstance(exc, (NotPrimaryError, OperationFailure)): diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index e7da40fa19..08da82762c 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -43,6 +43,7 @@ from bson import decode_all from bson.code import Code +from bson.raw_bson import RawBSONDocument from pymongo import ASCENDING, DESCENDING from pymongo.asynchronous.cursor import AsyncCursor, CursorType from pymongo.asynchronous.helpers import anext @@ -199,6 +200,21 @@ async def test_max_time_ms(self): finally: await client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + async def test_maxtime_ms_message(self): + db = self.db + await db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + await db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + client = await self.async_rs_client(document_class=RawBSONDocument) + await client.db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + await client.db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + async def test_max_await_time_ms(self): db = self.db await db.pymongo_test.drop() diff --git a/test/test_cursor.py b/test/test_cursor.py index 9a4fb86e93..b63638bfab 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -43,6 +43,7 @@ from bson import decode_all from bson.code import Code +from bson.raw_bson import RawBSONDocument from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError @@ -197,6 +198,21 @@ def test_max_time_ms(self): finally: client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + def test_maxtime_ms_message(self): + db = self.db + db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + client = self.rs_client(document_class=RawBSONDocument) + client.db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + client.db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() From 37d327fbd8a53a61d81969b5c80235c61f8817f0 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 19 Aug 2025 08:37:54 -0500 Subject: [PATCH 2025/2111] PYTHON-5502 Fix handling of c extensions in Azure and GCP VMs (#2486) --- .evergreen/run-mongodb-oidc-test.sh | 2 ++ .evergreen/scripts/kms_tester.py | 10 ++++++++-- .evergreen/scripts/oidc_tester.py | 9 ++++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index a60b112bcb..1a1cd81a8b 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -8,7 +8,9 @@ if [ ${OIDC_ENV} == "k8s" ]; then SUB_TEST_NAME=$K8S_VARIANT-remote else SUB_TEST_NAME=$OIDC_ENV-remote + apt-get install -y python3-dev build-essential fi + bash ./.evergreen/just.sh setup-tests auth_oidc $SUB_TEST_NAME bash ./.evergreen/just.sh run-tests "${@:1}" diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py index f0438e4eef..3579e77619 100644 --- a/.evergreen/scripts/kms_tester.py +++ b/.evergreen/scripts/kms_tester.py @@ -30,7 +30,10 @@ def _setup_azure_vm(base_env: dict[str, str]) -> None: env["AZUREKMS_CMD"] = "tar xf mongo-python-driver.tgz" run_command(f"{azure_dir}/run-command.sh", env=env) - env["AZUREKMS_CMD"] = "NO_EXT=1 bash .evergreen/just.sh setup-tests kms azure-remote" + env["AZUREKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" + run_command(f"{azure_dir}/run-command.sh", env=env) + + env["AZUREKMS_CMD"] = "bash .evergreen/just.sh setup-tests kms azure-remote" run_command(f"{azure_dir}/run-command.sh", env=env) LOGGER.info("Setting up Azure VM... done.") @@ -47,7 +50,10 @@ def _setup_gcp_vm(base_env: dict[str, str]) -> None: env["GCPKMS_CMD"] = "tar xf mongo-python-driver.tgz" run_command(f"{gcp_dir}/run-command.sh", env=env) - env["GCPKMS_CMD"] = "NO_EXT=1 bash ./.evergreen/just.sh setup-tests kms gcp-remote" + env["GCPKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" + run_command(f"{gcp_dir}/run-command.sh", env=env) + + env["GCPKMS_CMD"] = "bash ./.evergreen/just.sh setup-tests kms gcp-remote" run_command(f"{gcp_dir}/run-command.sh", env=env) LOGGER.info("Setting up GCP VM...") diff --git a/.evergreen/scripts/oidc_tester.py b/.evergreen/scripts/oidc_tester.py index fd702cf1d1..d6f127bbd6 100644 --- a/.evergreen/scripts/oidc_tester.py +++ b/.evergreen/scripts/oidc_tester.py @@ -2,7 +2,14 @@ import os -from utils import DRIVERS_TOOLS, TMP_DRIVER_FILE, create_archive, read_env, run_command, write_env +from utils import ( + DRIVERS_TOOLS, + TMP_DRIVER_FILE, + create_archive, + read_env, + run_command, + write_env, +) K8S_NAMES = ["aks", "gke", "eks"] K8S_REMOTE_NAMES = [f"{n}-remote" for n in K8S_NAMES] From e4b7eb52e6c514218e7b694fb7225aae91770f5d Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 19 Aug 2025 08:45:24 -0500 Subject: [PATCH 2026/2111] PYTHON-5215 Add an asyncio.Protocol implementation for KMS (#2460) --- pymongo/asynchronous/encryption.py | 27 +- pymongo/asynchronous/pool.py | 161 +++++----- pymongo/network_layer.py | 466 +++++++++++------------------ pymongo/pool_shared.py | 127 +------- pymongo/synchronous/encryption.py | 27 +- pymongo/synchronous/pool.py | 161 +++++----- tools/synchro.py | 2 +- 7 files changed, 398 insertions(+), 573 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 4f7d55cd06..f4d66cb956 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -64,6 +64,7 @@ from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.database import AsyncDatabase from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.asynchronous.pool import AsyncBaseConnection from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts @@ -76,11 +77,11 @@ ServerSelectionTimeoutError, ) from pymongo.helpers_shared import _get_timeout_details -from pymongo.network_layer import async_socket_sendall +from pymongo.network_layer import PyMongoKMSProtocol, async_receive_kms, async_sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( - _async_configured_socket, + _configured_protocol_interface, _raise_connection_failure, ) from pymongo.read_concern import ReadConcern @@ -93,10 +94,8 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext - from pymongo.pyopenssl_context import _sslConn from pymongo.typings import _Address - _IS_SYNC = False _HTTPS_PORT = 443 @@ -111,9 +110,10 @@ _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) -async def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: +async def _connect_kms(address: _Address, opts: PoolOptions) -> AsyncBaseConnection: try: - return await _async_configured_socket(address, opts) + interface = await _configured_protocol_interface(address, opts, PyMongoKMSProtocol) + return AsyncBaseConnection(interface, opts) except Exception as exc: _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) @@ -198,18 +198,11 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: try: conn = await _connect_kms(address, opts) try: - await async_socket_sendall(conn, message) + await async_sendall(conn.conn.get_conn, message) while kms_context.bytes_needed > 0: # CSOT: update timeout. - conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - if _IS_SYNC: - data = conn.recv(kms_context.bytes_needed) - else: - from pymongo.network_layer import ( # type: ignore[attr-defined] - async_receive_data_socket, - ) - - data = await async_receive_data_socket(conn, kms_context.bytes_needed) + conn.set_conn_timeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data = await async_receive_kms(conn, kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) @@ -228,7 +221,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) ) finally: - conn.close() + await conn.close_conn(None) except MongoCryptError: raise # Propagate MongoCryptError errors directly. except Exception as exc: diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 196ec9040f..8c169b4c52 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -123,7 +123,89 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 _IS_SYNC = False -class AsyncConnection: +class AsyncBaseConnection: + """A base connection object for server and kms connections.""" + + def __init__(self, conn: AsyncNetworkingInterface, opts: PoolOptions): + self.conn = conn + self.socket_checker: SocketChecker = SocketChecker() + self.cancel_context: _CancellationContext = _CancellationContext() + self.is_sdam = False + self.closed = False + self.last_timeout: float | None = None + self.more_to_come = False + self.opts = opts + self.max_wire_version = -1 + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + if self.max_wire_version != -1: + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + else: + raise TimeoutError(errmsg) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + async def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + await self._close_conn() + + async def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + await self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + + +class AsyncConnection(AsyncBaseConnection): """Store a connection with some metadata. :param conn: a raw connection object @@ -141,29 +223,27 @@ def __init__( id: int, is_sdam: bool, ): + super().__init__(conn, pool.opts) self.pool_ref = weakref.ref(pool) - self.conn = conn - self.address = address - self.id = id + self.address: tuple[str, int] = address + self.id: int = id self.is_sdam = is_sdam - self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False self.is_writable: bool = False self.max_wire_version = MAX_WIRE_VERSION - self.max_bson_size = MAX_BSON_SIZE - self.max_message_size = MAX_MESSAGE_SIZE - self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.max_bson_size: int = MAX_BSON_SIZE + self.max_message_size: int = MAX_MESSAGE_SIZE + self.max_write_batch_size: int = MAX_WRITE_BATCH_SIZE self.supports_sessions = False self.hello_ok: bool = False - self.is_mongos = False + self.is_mongos: bool = False self.op_msg_enabled = False self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap self.enabled_for_logging = pool.enabled_for_logging self.compression_settings = pool.opts._compression_settings self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None - self.socket_checker: SocketChecker = SocketChecker() self.oidc_token_gen_id: Optional[int] = None # Support for mechanism negotiation on the initial handshake. self.negotiated_mechs: Optional[list[str]] = None @@ -174,9 +254,6 @@ def __init__( self.pool_gen = pool.gen self.generation = self.pool_gen.get_overall() self.ready = False - self.cancel_context: _CancellationContext = _CancellationContext() - self.opts = pool.opts - self.more_to_come: bool = False # For load balancer support. self.service_id: Optional[ObjectId] = None self.server_connection_id: Optional[int] = None @@ -192,44 +269,6 @@ def __init__( # For gossiping $clusterTime from the connection handshake to the client. self._cluster_time = None - def set_conn_timeout(self, timeout: Optional[float]) -> None: - """Cache last timeout to avoid duplicate calls to conn.settimeout.""" - if timeout == self.last_timeout: - return - self.last_timeout = timeout - self.conn.get_conn.settimeout(timeout) - - def apply_timeout( - self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] - ) -> Optional[float]: - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - # Reset the socket timeout unless we're performing a streaming monitor check. - if not self.more_to_come: - self.set_conn_timeout(self.opts.socket_timeout) - return None - # RTT validation. - rtt = _csot.get_rtt() - if rtt is None: - rtt = self.connect_rtt - max_time_ms = timeout - rtt - if max_time_ms < 0: - timeout_details = _get_timeout_details(self.opts) - formatted = format_timeout_details(timeout_details) - # CSOT: raise an error without running the command since we know it will time out. - errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" - raise ExecutionTimeout( - errmsg, - 50, - {"ok": 0, "errmsg": errmsg, "code": 50}, - self.max_wire_version, - ) - if cmd is not None: - cmd["maxTimeMS"] = int(max_time_ms * 1000) - self.set_conn_timeout(timeout) - return timeout - def pin_txn(self) -> None: self.pinned_txn = True assert not self.pinned_cursor @@ -573,26 +612,6 @@ async def close_conn(self, reason: Optional[str]) -> None: error=reason, ) - async def _close_conn(self) -> None: - """Close this connection.""" - if self.closed: - return - self.closed = True - self.cancel_context.cancel() - # Note: We catch exceptions to avoid spurious errors on interpreter - # shutdown. - try: - await self.conn.close() - except Exception: # noqa: S110 - pass - - def conn_closed(self) -> bool: - """Return True if we know socket has been closed, False otherwise.""" - if _IS_SYNC: - return self.socket_checker.socket_closed(self.conn.get_conn) - else: - return self.conn.is_closing() - def send_cluster_time( self, command: MutableMapping[str, Any], diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 2f7f9c320f..605b8dde9b 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -22,10 +22,11 @@ import struct import sys import time -from asyncio import AbstractEventLoop, BaseTransport, BufferedProtocol, Future, Transport +from asyncio import BaseTransport, BufferedProtocol, Future, Protocol, Transport from typing import ( TYPE_CHECKING, Any, + Callable, Optional, Union, ) @@ -38,208 +39,30 @@ from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply from pymongo.socket_checker import _errno_from_exception -try: - from ssl import SSLError, SSLSocket - - _HAVE_SSL = True -except ImportError: - _HAVE_SSL = False - -try: - from pymongo.pyopenssl_context import _sslConn - - _HAVE_PYOPENSSL = True -except ImportError: - _HAVE_PYOPENSSL = False - _sslConn = SSLSocket # type: ignore[assignment, misc] - -from pymongo.ssl_support import ( - BLOCKING_IO_LOOKUP_ERROR, - BLOCKING_IO_READ_ERROR, - BLOCKING_IO_WRITE_ERROR, -) - if TYPE_CHECKING: - from pymongo.asynchronous.pool import AsyncConnection - from pymongo.synchronous.pool import Connection + from pymongo.asynchronous.pool import AsyncBaseConnection, AsyncConnection + from pymongo.pyopenssl_context import _sslConn + from pymongo.synchronous.pool import BaseConnection, Connection _UNPACK_HEADER = struct.Struct(" None: - timeout = sock.gettimeout() - sock.settimeout(0.0) - loop = asyncio.get_running_loop() - try: - if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): - await asyncio.wait_for(_async_socket_sendall_ssl(sock, buf, loop), timeout=timeout) - else: - await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] - except asyncio.TimeoutError as exc: - # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. - raise socket.timeout("timed out") from exc - finally: - sock.settimeout(timeout) - - -if sys.platform != "win32": - - async def _async_socket_sendall_ssl( - sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop - ) -> None: - view = memoryview(buf) - sent = 0 - - def _is_ready(fut: Future[Any]) -> None: - if fut.done(): - return - fut.set_result(None) - - while sent < len(buf): - try: - sent += sock.send(view[sent:]) - except BLOCKING_IO_ERRORS as exc: - fd = sock.fileno() - # Check for closed socket. - if fd == -1: - raise SSLError("Underlying socket has been closed") from None - if isinstance(exc, BLOCKING_IO_READ_ERROR): - fut = loop.create_future() - loop.add_reader(fd, _is_ready, fut) - try: - await fut - finally: - loop.remove_reader(fd) - if isinstance(exc, BLOCKING_IO_WRITE_ERROR): - fut = loop.create_future() - loop.add_writer(fd, _is_ready, fut) - try: - await fut - finally: - loop.remove_writer(fd) - if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): - fut = loop.create_future() - loop.add_reader(fd, _is_ready, fut) - try: - loop.add_writer(fd, _is_ready, fut) - await fut - finally: - loop.remove_reader(fd) - loop.remove_writer(fd) - - async def _async_socket_receive_ssl( - conn: _sslConn, length: int, loop: AbstractEventLoop, once: Optional[bool] = False - ) -> memoryview: - mv = memoryview(bytearray(length)) - total_read = 0 - - def _is_ready(fut: Future[Any]) -> None: - if fut.done(): - return - fut.set_result(None) +_PYPY = "PyPy" in sys.version +_WINDOWS = sys.platform == "win32" - while total_read < length: - try: - read = conn.recv_into(mv[total_read:]) - if read == 0: - raise OSError("connection closed") - # KMS responses update their expected size after the first batch, stop reading after one loop - if once: - return mv[:read] - total_read += read - except BLOCKING_IO_ERRORS as exc: - fd = conn.fileno() - # Check for closed socket. - if fd == -1: - raise SSLError("Underlying socket has been closed") from None - if isinstance(exc, BLOCKING_IO_READ_ERROR): - fut = loop.create_future() - loop.add_reader(fd, _is_ready, fut) - try: - await fut - finally: - loop.remove_reader(fd) - if isinstance(exc, BLOCKING_IO_WRITE_ERROR): - fut = loop.create_future() - loop.add_writer(fd, _is_ready, fut) - try: - await fut - finally: - loop.remove_writer(fd) - if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): - fut = loop.create_future() - loop.add_reader(fd, _is_ready, fut) - try: - loop.add_writer(fd, _is_ready, fut) - await fut - finally: - loop.remove_reader(fd) - loop.remove_writer(fd) - return mv - -else: - # The default Windows asyncio event loop does not support loop.add_reader/add_writer: - # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support - # Note: In PYTHON-4493 we plan to replace this code with asyncio streams. - async def _async_socket_sendall_ssl( - sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop - ) -> None: - view = memoryview(buf) - total_length = len(buf) - total_sent = 0 - # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success - # down to 1ms. - backoff = 0.001 - while total_sent < total_length: - try: - sent = sock.send(view[total_sent:]) - except BLOCKING_IO_ERRORS: - await asyncio.sleep(backoff) - sent = 0 - if sent > 0: - backoff = max(backoff / 2, 0.001) - else: - backoff = min(backoff * 2, 0.512) - total_sent += sent - - async def _async_socket_receive_ssl( - conn: _sslConn, length: int, dummy: AbstractEventLoop, once: Optional[bool] = False - ) -> memoryview: - mv = memoryview(bytearray(length)) - total_read = 0 - # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success - # down to 1ms. - backoff = 0.001 - while total_read < length: - try: - read = conn.recv_into(mv[total_read:]) - if read == 0: - raise OSError("connection closed") - # KMS responses update their expected size after the first batch, stop reading after one loop - if once: - return mv[:read] - except BLOCKING_IO_ERRORS: - await asyncio.sleep(backoff) - read = 0 - if read > 0: - backoff = max(backoff / 2, 0.001) - else: - backoff = min(backoff * 2, 0.512) - total_read += read - return mv +# Errors raised by sockets (and TLS sockets) when in non-blocking mode. +BLOCKING_IO_ERRORS = ( + BlockingIOError, + *ssl_support.BLOCKING_IO_LOOKUP_ERROR, + *ssl_support.BLOCKING_IO_ERRORS, +) def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: sock.sendall(buf) -async def _poll_cancellation(conn: AsyncConnection) -> None: +async def _poll_cancellation(conn: AsyncBaseConnection) -> None: while True: if conn.cancel_context.cancelled: return @@ -247,49 +70,7 @@ async def _poll_cancellation(conn: AsyncConnection) -> None: await asyncio.sleep(_POLL_TIMEOUT) -async def async_receive_data_socket( - sock: Union[socket.socket, _sslConn], length: int -) -> memoryview: - sock_timeout = sock.gettimeout() - timeout = sock_timeout - - sock.settimeout(0.0) - loop = asyncio.get_running_loop() - try: - if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): - return await asyncio.wait_for( - _async_socket_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] - timeout=timeout, - ) - else: - return await asyncio.wait_for( - _async_socket_receive(sock, length, loop), # type: ignore[arg-type] - timeout=timeout, - ) - except asyncio.TimeoutError as err: - raise socket.timeout("timed out") from err - finally: - sock.settimeout(sock_timeout) - - -async def _async_socket_receive( - conn: socket.socket, length: int, loop: AbstractEventLoop -) -> memoryview: - mv = memoryview(bytearray(length)) - bytes_read = 0 - while bytes_read < length: - chunk_length = await loop.sock_recv_into(conn, mv[bytes_read:]) - if chunk_length == 0: - raise OSError("connection closed") - bytes_read += chunk_length - return mv - - -_PYPY = "PyPy" in sys.version -_WINDOWS = sys.platform == "win32" - - -def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: +def wait_for_read(conn: BaseConnection, deadline: Optional[float]) -> None: """Block until at least one byte is read, or a timeout, or a cancel.""" sock = conn.conn.sock timed_out = False @@ -322,7 +103,7 @@ def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: raise socket.timeout("timed out") -def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: +def receive_data(conn: BaseConnection, length: int, deadline: Optional[float]) -> memoryview: buf = bytearray(length) mv = memoryview(buf) bytes_read = 0 @@ -412,7 +193,7 @@ def sock(self) -> Any: class AsyncNetworkingInterface(NetworkingInterfaceBase): - def __init__(self, conn: tuple[Transport, PyMongoProtocol]): + def __init__(self, conn: tuple[Transport, PyMongoBaseProtocol]): super().__init__(conn) @property @@ -430,7 +211,7 @@ def is_closing(self) -> bool: return self.conn[0].is_closing() @property - def get_conn(self) -> PyMongoProtocol: + def get_conn(self) -> PyMongoBaseProtocol: return self.conn[1] @property @@ -469,9 +250,51 @@ def recv_into(self, buffer: bytes) -> int: return self.conn.recv_into(buffer) -class PyMongoProtocol(BufferedProtocol): +class PyMongoBaseProtocol(Protocol): def __init__(self, timeout: Optional[float] = None): self.transport: Transport = None # type: ignore[assignment] + self._timeout = timeout + self._closed = asyncio.get_running_loop().create_future() + self._connection_lost = False + + def settimeout(self, timeout: float | None) -> None: + self._timeout = timeout + + @property + def gettimeout(self) -> float | None: + """The configured timeout for the socket that underlies our protocol pair.""" + return self._timeout + + def close(self, exc: Optional[Exception] = None) -> None: + self.transport.abort() + self._resolve_pending(exc) + self._connection_lost = True + + def connection_lost(self, exc: Optional[Exception] = None) -> None: + self._resolve_pending(exc) + if not self._closed.done(): + self._closed.set_result(None) + + def _resolve_pending(self, exc: Optional[Exception] = None) -> None: + pass + + async def wait_closed(self) -> None: + await self._closed + + async def write(self, message: bytes) -> None: + """Write a message to this connection's transport.""" + if self.transport.is_closing(): + raise OSError("Connection is closed") + self.transport.write(message) + self.transport.resume_reading() + + async def read(self, *args: Any) -> Any: + raise NotImplementedError + + +class PyMongoProtocol(PyMongoBaseProtocol, BufferedProtocol): + def __init__(self, timeout: Optional[float] = None): + super().__init__(timeout) # Each message is reader in 2-3 parts: header, compression header, and message body # The message buffer is allocated after the header is read. self._header = memoryview(bytearray(16)) @@ -485,25 +308,14 @@ def __init__(self, timeout: Optional[float] = None): self._expecting_compression = False self._message_size = 0 self._op_code = 0 - self._connection_lost = False self._read_waiter: Optional[Future[Any]] = None - self._timeout = timeout self._is_compressed = False self._compressor_id: Optional[int] = None self._max_message_size = MAX_MESSAGE_SIZE self._response_to: Optional[int] = None - self._closed = asyncio.get_running_loop().create_future() self._pending_messages: collections.deque[Future[Any]] = collections.deque() self._done_messages: collections.deque[Future[Any]] = collections.deque() - def settimeout(self, timeout: float | None) -> None: - self._timeout = timeout - - @property - def gettimeout(self) -> float | None: - """The configured timeout for the socket that underlies our protocol pair.""" - return self._timeout - def connection_made(self, transport: BaseTransport) -> None: """Called exactly once when a connection is made. The transport argument is the transport representing the write side of the connection. @@ -511,13 +323,6 @@ def connection_made(self, transport: BaseTransport) -> None: self.transport = transport # type: ignore[assignment] self.transport.set_write_buffer_limits(MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE) - async def write(self, message: bytes) -> None: - """Write a message to this connection's transport.""" - if self.transport.is_closing(): - raise OSError("Connection is closed") - self.transport.write(message) - self.transport.resume_reading() - async def read(self, request_id: Optional[int], max_message_size: int) -> tuple[bytes, int]: """Read a single MongoDB Wire Protocol message from this connection.""" if self.transport: @@ -660,7 +465,7 @@ def process_compression_header(self) -> tuple[int, int]: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(self._compression_header) return op_code, compressor_id - def _resolve_pending_messages(self, exc: Optional[Exception] = None) -> None: + def _resolve_pending(self, exc: Optional[Exception] = None) -> None: pending = list(self._pending_messages) for msg in pending: if not msg.done(): @@ -670,21 +475,92 @@ def _resolve_pending_messages(self, exc: Optional[Exception] = None) -> None: msg.set_exception(exc) self._done_messages.append(msg) - def close(self, exc: Optional[Exception] = None) -> None: - self.transport.abort() - self._resolve_pending_messages(exc) - self._connection_lost = True - def connection_lost(self, exc: Optional[Exception] = None) -> None: - self._resolve_pending_messages(exc) - if not self._closed.done(): - self._closed.set_result(None) +class PyMongoKMSProtocol(PyMongoBaseProtocol): + def __init__(self, timeout: Optional[float] = None): + super().__init__(timeout) + self._buffers: collections.deque[memoryview[bytes]] = collections.deque() + self._bytes_ready = 0 + self._pending_reads: collections.deque[int] = collections.deque() + self._pending_listeners: collections.deque[Future[Any]] = collections.deque() - async def wait_closed(self) -> None: - await self._closed + def connection_made(self, transport: BaseTransport) -> None: + """Called exactly once when a connection is made. + The transport argument is the transport representing the write side of the connection. + """ + self.transport = transport # type: ignore[assignment] + def data_received(self, data: bytes) -> None: + if self._connection_lost: + return + + self._bytes_ready += len(data) + self._buffers.append(memoryview(data)) + + if not len(self._pending_reads): + return + + bytes_needed = self._pending_reads.popleft() + data = self._read(bytes_needed) + waiter = self._pending_listeners.popleft() + waiter.set_result(data) + + async def read(self, bytes_needed: int) -> bytes: + """Read up to the requested bytes from this connection.""" + # Note: all reads are "up-to" bytes_needed because we don't know if the kms_context + # has processed a Content-Length header and is requesting a response or not. + # Wait for other listeners first. + if len(self._pending_listeners): + await asyncio.gather(*self._pending_listeners) + # If there are bytes ready, then there is no need to wait further. + if self._bytes_ready > 0: + return self._read(bytes_needed) + if self.transport: + try: + self.transport.resume_reading() + # Known bug in SSL Protocols, fixed in Python 3.11: https://github.com/python/cpython/issues/89322 + except AttributeError: + raise OSError("connection is already closed") from None + if self.transport and self.transport.is_closing(): + raise OSError("connection is already closed") + self._pending_reads.append(bytes_needed) + read_waiter = asyncio.get_running_loop().create_future() + self._pending_listeners.append(read_waiter) + return await read_waiter + + def _resolve_pending(self, exc: Optional[Exception] = None) -> None: + while self._pending_listeners: + fut = self._pending_listeners.popleft() + fut.set_result(b"") + + def _read(self, bytes_needed: int) -> memoryview: + """Read bytes.""" + # Send the bytes to the listener. + if self._bytes_ready < bytes_needed: + bytes_needed = self._bytes_ready + self._bytes_ready -= bytes_needed + + output_buf = bytearray(bytes_needed) + n_remaining = bytes_needed + out_index = 0 + while n_remaining > 0: + buffer = self._buffers.popleft() + buf_size = len(buffer) + # if we didn't exhaust the buffer, read the partial data and return the buffer. + if buf_size > n_remaining: + output_buf[out_index : n_remaining + out_index] = buffer[:n_remaining] + buffer = buffer[n_remaining:] + n_remaining = 0 + self._buffers.appendleft(buffer) + # otherwise exhaust the buffer. + else: + output_buf[out_index : out_index + buf_size] = buffer[:] + out_index += buf_size + n_remaining -= buf_size + return memoryview(output_buf) -async def async_sendall(conn: PyMongoProtocol, buf: bytes) -> None: + +async def async_sendall(conn: PyMongoBaseProtocol, buf: bytes) -> None: try: await asyncio.wait_for(conn.write(buf), timeout=conn.gettimeout) except asyncio.TimeoutError as exc: @@ -692,12 +568,18 @@ async def async_sendall(conn: PyMongoProtocol, buf: bytes) -> None: raise socket.timeout("timed out") from exc -async def async_receive_message( - conn: AsyncConnection, - request_id: Optional[int], - max_message_size: int = MAX_MESSAGE_SIZE, -) -> Union[_OpReply, _OpMsg]: - """Receive a raw BSON message or raise socket.error.""" +async def async_receive_kms(conn: AsyncBaseConnection, bytes_needed: int) -> bytes: + """Receive raw bytes from the kms connection.""" + + def callback(result: Any) -> bytes: + return result + + return await _async_receive_data(conn, callback, bytes_needed) + + +async def _async_receive_data( + conn: AsyncBaseConnection, callback: Callable[..., Any], *args: Any +) -> Any: timeout: Optional[Union[float, int]] timeout = conn.conn.gettimeout if _csot.get_timeout(): @@ -713,8 +595,8 @@ async def async_receive_message( # timeouts on AWS Lambda and other FaaS environments. timeout = max(deadline - time.monotonic(), 0) + read_task = create_task(conn.conn.get_conn.read(*args)) cancellation_task = create_task(_poll_cancellation(conn)) - read_task = create_task(conn.conn.get_conn.read(request_id, max_message_size)) tasks = [read_task, cancellation_task] try: done, pending = await asyncio.wait( @@ -727,14 +609,7 @@ async def async_receive_message( if len(done) == 0: raise socket.timeout("timed out") if read_task in done: - data, op_code = read_task.result() - try: - unpack_reply = _UNPACK_REPLY[op_code] - except KeyError: - raise ProtocolError( - f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" - ) from None - return unpack_reply(data) + return callback(read_task.result()) raise _OperationCancelled("operation cancelled") except asyncio.CancelledError: for task in tasks: @@ -743,6 +618,31 @@ async def async_receive_message( raise +async def async_receive_message( + conn: AsyncConnection, + request_id: Optional[int], + max_message_size: int = MAX_MESSAGE_SIZE, +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + + def callback(result: Any) -> _OpMsg | _OpReply: + data, op_code = result + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) + + return await _async_receive_data(conn, callback, request_id, max_message_size) + + +def receive_kms(conn: BaseConnection, bytes_needed: int) -> bytes: + """Receive raw bytes from the kms connection.""" + return conn.conn.sock.recv(bytes_needed) + + def receive_message( conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE ) -> Union[_OpReply, _OpMsg]: diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index ac562af542..0536dc3835 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -16,7 +16,6 @@ from __future__ import annotations import asyncio -import functools import socket import ssl import sys @@ -25,7 +24,6 @@ Any, NoReturn, Optional, - Union, ) from pymongo import _csot @@ -37,13 +35,17 @@ _CertificateError, ) from pymongo.helpers_shared import _get_timeout_details, format_timeout_details -from pymongo.network_layer import AsyncNetworkingInterface, NetworkingInterface, PyMongoProtocol +from pymongo.network_layer import ( + AsyncNetworkingInterface, + NetworkingInterface, + PyMongoBaseProtocol, + PyMongoProtocol, +) from pymongo.pool_options import PoolOptions from pymongo.ssl_support import PYSSLError, SSLError, _has_sni SSLErrors = (PYSSLError, SSLError) if TYPE_CHECKING: - from pymongo.pyopenssl_context import _sslConn from pymongo.typings import _Address try: @@ -244,64 +246,10 @@ async def _async_create_connection(address: _Address, options: PoolOptions) -> s raise OSError("getaddrinfo failed") -async def _async_configured_socket( - address: _Address, options: PoolOptions -) -> Union[socket.socket, _sslConn]: - """Given (host, port) and PoolOptions, return a raw configured socket. - - Can raise socket.error, ConnectionFailure, or _CertificateError. - - Sets socket's SSL and timeout options. - """ - sock = await _async_create_connection(address, options) - ssl_context = options._ssl_context - - if ssl_context is None: - sock.settimeout(options.socket_timeout) - return sock - - host = address[0] - try: - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if _has_sni(False): - loop = asyncio.get_running_loop() - ssl_sock = await loop.run_in_executor( - None, - functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] - ) - else: - loop = asyncio.get_running_loop() - ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] - except _CertificateError: - sock.close() - # Raise _CertificateError directly like we do after match_hostname - # below. - raise - except (OSError, *SSLErrors) as exc: - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - details = _get_timeout_details(options) - _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) - if ( - ssl_context.verify_mode - and not ssl_context.check_hostname - and not options.tls_allow_invalid_hostnames - ): - try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] - except _CertificateError: - ssl_sock.close() - raise - - ssl_sock.settimeout(options.socket_timeout) - return ssl_sock - - async def _configured_protocol_interface( - address: _Address, options: PoolOptions + address: _Address, + options: PoolOptions, + protocol_kls: type[PyMongoBaseProtocol] = PyMongoProtocol, ) -> AsyncNetworkingInterface: """Given (host, port) and PoolOptions, return a configured AsyncNetworkingInterface. @@ -316,7 +264,7 @@ async def _configured_protocol_interface( if ssl_context is None: return AsyncNetworkingInterface( await asyncio.get_running_loop().create_connection( - lambda: PyMongoProtocol(timeout=timeout), sock=sock + lambda: protocol_kls(timeout=timeout), sock=sock ) ) @@ -325,7 +273,7 @@ async def _configured_protocol_interface( # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. transport, protocol = await asyncio.get_running_loop().create_connection( # type: ignore[call-overload] - lambda: PyMongoProtocol(timeout=timeout), + lambda: protocol_kls(timeout=timeout), sock=sock, server_hostname=host, ssl=ssl_context, @@ -425,56 +373,9 @@ def _create_connection(address: _Address, options: PoolOptions) -> socket.socket raise OSError("getaddrinfo failed") -def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: - """Given (host, port) and PoolOptions, return a raw configured socket. - - Can raise socket.error, ConnectionFailure, or _CertificateError. - - Sets socket's SSL and timeout options. - """ - sock = _create_connection(address, options) - ssl_context = options._ssl_context - - if ssl_context is None: - sock.settimeout(options.socket_timeout) - return sock - - host = address[0] - try: - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if _has_sni(True): - ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] - else: - ssl_sock = ssl_context.wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] - except _CertificateError: - sock.close() - # Raise _CertificateError directly like we do after match_hostname - # below. - raise - except (OSError, *SSLErrors) as exc: - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - details = _get_timeout_details(options) - _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) - if ( - ssl_context.verify_mode - and not ssl_context.check_hostname - and not options.tls_allow_invalid_hostnames - ): - try: - ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] - except _CertificateError: - ssl_sock.close() - raise - - ssl_sock.settimeout(options.socket_timeout) - return ssl_sock - - -def _configured_socket_interface(address: _Address, options: PoolOptions) -> NetworkingInterface: +def _configured_socket_interface( + address: _Address, options: PoolOptions, *args: Any +) -> NetworkingInterface: """Given (host, port) and PoolOptions, return a NetworkingInterface wrapping a configured socket. Can raise socket.error, ConnectionFailure, or _CertificateError. diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index d9aebf5ccd..7b98243528 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -71,11 +71,11 @@ ServerSelectionTimeoutError, ) from pymongo.helpers_shared import _get_timeout_details -from pymongo.network_layer import sendall +from pymongo.network_layer import PyMongoKMSProtocol, receive_kms, sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( - _configured_socket, + _configured_socket_interface, _raise_connection_failure, ) from pymongo.read_concern import ReadConcern @@ -85,6 +85,7 @@ from pymongo.synchronous.cursor import Cursor from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.pool import BaseConnection from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host from pymongo.write_concern import WriteConcern @@ -92,10 +93,8 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext - from pymongo.pyopenssl_context import _sslConn from pymongo.typings import _Address - _IS_SYNC = True _HTTPS_PORT = 443 @@ -110,9 +109,10 @@ _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) -def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: +def _connect_kms(address: _Address, opts: PoolOptions) -> BaseConnection: try: - return _configured_socket(address, opts) + interface = _configured_socket_interface(address, opts, PyMongoKMSProtocol) + return BaseConnection(interface, opts) except Exception as exc: _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) @@ -197,18 +197,11 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: try: conn = _connect_kms(address, opts) try: - sendall(conn, message) + sendall(conn.conn.get_conn, message) while kms_context.bytes_needed > 0: # CSOT: update timeout. - conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - if _IS_SYNC: - data = conn.recv(kms_context.bytes_needed) - else: - from pymongo.network_layer import ( # type: ignore[attr-defined] - receive_data_socket, - ) - - data = receive_data_socket(conn, kms_context.bytes_needed) + conn.set_conn_timeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data = receive_kms(conn, kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) @@ -227,7 +220,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) ) finally: - conn.close() + conn.close_conn(None) except MongoCryptError: raise # Propagate MongoCryptError errors directly. except Exception as exc: diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index f7f6a26c68..f35ca4d0fd 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -123,7 +123,89 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 _IS_SYNC = True -class Connection: +class BaseConnection: + """A base connection object for server and kms connections.""" + + def __init__(self, conn: NetworkingInterface, opts: PoolOptions): + self.conn = conn + self.socket_checker: SocketChecker = SocketChecker() + self.cancel_context: _CancellationContext = _CancellationContext() + self.is_sdam = False + self.closed = False + self.last_timeout: float | None = None + self.more_to_come = False + self.opts = opts + self.max_wire_version = -1 + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + if self.max_wire_version != -1: + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + else: + raise TimeoutError(errmsg) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + self._close_conn() + + def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + + +class Connection(BaseConnection): """Store a connection with some metadata. :param conn: a raw connection object @@ -141,29 +223,27 @@ def __init__( id: int, is_sdam: bool, ): + super().__init__(conn, pool.opts) self.pool_ref = weakref.ref(pool) - self.conn = conn - self.address = address - self.id = id + self.address: tuple[str, int] = address + self.id: int = id self.is_sdam = is_sdam - self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False self.is_writable: bool = False self.max_wire_version = MAX_WIRE_VERSION - self.max_bson_size = MAX_BSON_SIZE - self.max_message_size = MAX_MESSAGE_SIZE - self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.max_bson_size: int = MAX_BSON_SIZE + self.max_message_size: int = MAX_MESSAGE_SIZE + self.max_write_batch_size: int = MAX_WRITE_BATCH_SIZE self.supports_sessions = False self.hello_ok: bool = False - self.is_mongos = False + self.is_mongos: bool = False self.op_msg_enabled = False self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap self.enabled_for_logging = pool.enabled_for_logging self.compression_settings = pool.opts._compression_settings self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None - self.socket_checker: SocketChecker = SocketChecker() self.oidc_token_gen_id: Optional[int] = None # Support for mechanism negotiation on the initial handshake. self.negotiated_mechs: Optional[list[str]] = None @@ -174,9 +254,6 @@ def __init__( self.pool_gen = pool.gen self.generation = self.pool_gen.get_overall() self.ready = False - self.cancel_context: _CancellationContext = _CancellationContext() - self.opts = pool.opts - self.more_to_come: bool = False # For load balancer support. self.service_id: Optional[ObjectId] = None self.server_connection_id: Optional[int] = None @@ -192,44 +269,6 @@ def __init__( # For gossiping $clusterTime from the connection handshake to the client. self._cluster_time = None - def set_conn_timeout(self, timeout: Optional[float]) -> None: - """Cache last timeout to avoid duplicate calls to conn.settimeout.""" - if timeout == self.last_timeout: - return - self.last_timeout = timeout - self.conn.get_conn.settimeout(timeout) - - def apply_timeout( - self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] - ) -> Optional[float]: - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - # Reset the socket timeout unless we're performing a streaming monitor check. - if not self.more_to_come: - self.set_conn_timeout(self.opts.socket_timeout) - return None - # RTT validation. - rtt = _csot.get_rtt() - if rtt is None: - rtt = self.connect_rtt - max_time_ms = timeout - rtt - if max_time_ms < 0: - timeout_details = _get_timeout_details(self.opts) - formatted = format_timeout_details(timeout_details) - # CSOT: raise an error without running the command since we know it will time out. - errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" - raise ExecutionTimeout( - errmsg, - 50, - {"ok": 0, "errmsg": errmsg, "code": 50}, - self.max_wire_version, - ) - if cmd is not None: - cmd["maxTimeMS"] = int(max_time_ms * 1000) - self.set_conn_timeout(timeout) - return timeout - def pin_txn(self) -> None: self.pinned_txn = True assert not self.pinned_cursor @@ -571,26 +610,6 @@ def close_conn(self, reason: Optional[str]) -> None: error=reason, ) - def _close_conn(self) -> None: - """Close this connection.""" - if self.closed: - return - self.closed = True - self.cancel_context.cancel() - # Note: We catch exceptions to avoid spurious errors on interpreter - # shutdown. - try: - self.conn.close() - except Exception: # noqa: S110 - pass - - def conn_closed(self) -> bool: - """Return True if we know socket has been closed, False otherwise.""" - if _IS_SYNC: - return self.socket_checker.socket_closed(self.conn.get_conn) - else: - return self.conn.is_closing() - def send_cluster_time( self, command: MutableMapping[str, Any], diff --git a/tools/synchro.py b/tools/synchro.py index e502f96281..9a760c0ad7 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -120,9 +120,9 @@ "_async_create_lock": "_create_lock", "_async_create_condition": "_create_condition", "_async_cond_wait": "_cond_wait", + "async_receive_kms": "receive_kms", "AsyncNetworkingInterface": "NetworkingInterface", "_configured_protocol_interface": "_configured_socket_interface", - "_async_configured_socket": "_configured_socket", "SpecRunnerTask": "SpecRunnerThread", "AsyncMockConnection": "MockConnection", "AsyncMockPool": "MockPool", From d24b4a56974b8bbe032e95f1e2ee6bcaf3316b3c Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 19 Aug 2025 11:23:51 -0500 Subject: [PATCH 2027/2111] PYTHON-5503 Use uv to install just in GitHub Actions (#2490) --- .github/workflows/test-python.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 96729e3a6e..11255f9e49 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -22,13 +22,13 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - name: Install just - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "3.9" + - name: Install just + run: uv tool install rust-just - name: Install Python dependencies run: | just install @@ -83,13 +83,13 @@ jobs: - uses: actions/checkout@v4 with: persist-credentials: false - - name: Install just - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 - name: Install uv uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 with: enable-cache: true python-version: "3.9" + - name: Install just + run: uv tool install rust-just - id: setup-mongodb uses: mongodb-labs/drivers-evergreen-tools@master with: @@ -114,7 +114,7 @@ jobs: enable-cache: true python-version: "3.9" - name: Install just - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 + run: uv tool install rust-just - name: Install dependencies run: just install - name: Build docs @@ -133,7 +133,7 @@ jobs: enable-cache: true python-version: "3.9" - name: Install just - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 + run: uv tool install rust-just - name: Install dependencies run: just install - name: Build docs @@ -155,7 +155,7 @@ jobs: enable-cache: true python-version: "${{matrix.python}}" - name: Install just - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 + run: uv tool install rust-just - name: Install dependencies run: | just install From 3a26119eb332fed1cded500a137eb54a129660ad Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 19 Aug 2025 11:26:11 -0500 Subject: [PATCH 2028/2111] PYTHON-5502 Fix c extensions on OIDC VMs (#2489) --- .evergreen/run-mongodb-oidc-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh index 1a1cd81a8b..b34013a6ac 100755 --- a/.evergreen/run-mongodb-oidc-test.sh +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -8,7 +8,7 @@ if [ ${OIDC_ENV} == "k8s" ]; then SUB_TEST_NAME=$K8S_VARIANT-remote else SUB_TEST_NAME=$OIDC_ENV-remote - apt-get install -y python3-dev build-essential + sudo apt-get install -y python3-dev build-essential fi bash ./.evergreen/just.sh setup-tests auth_oidc $SUB_TEST_NAME From db3d3c702225431f74d607e9eb11fbeb528c27cb Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 19 Aug 2025 17:46:25 -0700 Subject: [PATCH 2029/2111] Prep for 4.14.1 release (#2495) [master] (#2496) --- doc/changelog.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index a553be0144..e41ecc7e1b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,5 +1,20 @@ Changelog ========= +Changes in Version 4.14.1 (2025/08/19) +-------------------------------------- + +Version 4.14.1 is a bug fix release. + + - Fixed a bug in ``MongoClient.append_metadata()`` and ``AsyncMongoClient.append_metadata()`` + that allowed duplicate ``DriverInfo.name`` to be appended to the metadata. + +Issues Resolved +............... + +See the `PyMongo 4.14.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.14.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=45256 Changes in Version 4.14.0 (2025/08/06) -------------------------------------- @@ -34,6 +49,14 @@ PyMongo 4.14 brings a number of changes including: - Changed :meth:`~pymongo.uri_parser.parse_uri`'s ``options`` return value to be type ``dict`` instead of ``_CaseInsensitiveDictionary``. +Issues Resolved +............... + +See the `PyMongo 4.14 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.14 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43041 + Changes in Version 4.13.2 (2025/06/17) -------------------------------------- From f7b94be0dbd263c8e2e570c2ae425946a075a08a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 20 Aug 2025 08:58:20 -0500 Subject: [PATCH 2030/2111] PYTHON-5143 Support auto encryption in unified tests (#2488) --- .evergreen/remove-unimplemented-tests.sh | 5 - test/__init__.py | 5 +- test/asynchronous/__init__.py | 5 +- test/asynchronous/helpers.py | 248 +------------ test/asynchronous/test_encryption.py | 46 +-- test/asynchronous/unified_format.py | 40 +- .../unified/fle2v2-BypassQueryAnalysis.json | 322 ++++++++++++++++ ...EncryptedFields-vs-EncryptedFieldsMap.json | 256 +++++++++++++ .../spec/unified/localSchema.json | 343 ++++++++++++++++++ .../spec/unified/maxWireVersion.json | 101 ++++++ test/helpers.py | 248 +------------ test/helpers_shared.py | 271 ++++++++++++++ test/test_encryption.py | 46 +-- test/test_uri_spec.py | 2 +- .../valid-pass/poc-queryable-encryption.json | 193 ++++++++++ test/unified_format.py | 40 +- test/unified_format_shared.py | 15 +- 17 files changed, 1627 insertions(+), 559 deletions(-) create mode 100644 test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json create mode 100644 test/client-side-encryption/spec/unified/localSchema.json create mode 100644 test/client-side-encryption/spec/unified/maxWireVersion.json create mode 100644 test/helpers_shared.py create mode 100644 test/unified-test-format/valid-pass/poc-queryable-encryption.json diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh index 92685ab2b7..fd50010138 100755 --- a/.evergreen/remove-unimplemented-tests.sh +++ b/.evergreen/remove-unimplemented-tests.sh @@ -3,11 +3,6 @@ PYMONGO=$(dirname "$(cd "$(dirname "$0")" || exit; pwd)") rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 -rm $PYMONGO/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json # PYTHON-5143 -rm $PYMONGO/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json # PYTHON-5143 -rm $PYMONGO/test/client-side-encryption/spec/unified/localSchema.json # PYTHON-5143 -rm $PYMONGO/test/client-side-encryption/spec/unified/maxWireVersion.json # PYTHON-5143 -rm $PYMONGO/test/unified-test-format/valid-pass/poc-queryable-encryption.json # PYTHON-5143 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-application-error.json # PYTHON-4918 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-checkout-error.json # PYTHON-4918 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-min-pool-size-error.json # PYTHON-4918 diff --git a/test/__init__.py b/test/__init__.py index 95c2d7ee9d..12660e3a4a 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -59,7 +59,8 @@ sys.path[0:0] = [""] -from test.helpers import ( +from test.helpers import client_knobs, global_knobs +from test.helpers_shared import ( COMPRESSORS, IS_SRV, MONGODB_API_VERSION, @@ -67,10 +68,8 @@ TEST_LOADBALANCER, TLS_OPTIONS, SystemCertsPatcher, - client_knobs, db_pwd, db_user, - global_knobs, host, is_server_resolvable, port, diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 96769dc9c5..7b594b184d 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -59,7 +59,8 @@ sys.path[0:0] = [""] -from test.helpers import ( +from test.asynchronous.helpers import client_knobs, global_knobs +from test.helpers_shared import ( COMPRESSORS, IS_SRV, MONGODB_API_VERSION, @@ -67,10 +68,8 @@ TEST_LOADBALANCER, TLS_OPTIONS, SystemCertsPatcher, - client_knobs, db_pwd, db_user, - global_knobs, host, is_server_resolvable, port, diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py index bcb004af51..892c629631 100644 --- a/test/asynchronous/helpers.py +++ b/test/asynchronous/helpers.py @@ -12,137 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Shared constants and helper methods for pymongo, bson, and gridfs test suites.""" +"""Shared helper methods for pymongo, bson, and gridfs test suites.""" from __future__ import annotations import asyncio -import base64 -import gc -import multiprocessing -import os -import signal -import socket -import subprocess -import sys import threading -import time import traceback -import unittest -import warnings -from inspect import iscoroutinefunction - -from pymongo._asyncio_task import create_task - -try: - import ipaddress - - HAVE_IPADDRESS = True -except ImportError: - HAVE_IPADDRESS = False from functools import wraps -from typing import Any, Callable, Dict, Generator, Optional, no_type_check -from unittest import SkipTest +from typing import Optional, no_type_check -from bson.son import SON -from pymongo import common, message +from bson import SON +from pymongo import common +from pymongo._asyncio_task import create_task from pymongo.read_preferences import ReadPreference -from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] -from pymongo.synchronous.uri_parser import parse_uri - -if HAVE_SSL: - import ssl _IS_SYNC = False -# Enable debug output for uncollectable objects. PyPy does not have set_debug. -if hasattr(gc, "set_debug"): - gc.set_debug( - gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) - ) - -# The host and port of a single mongod or mongos, or the seed host -# for a replica set. -host = os.environ.get("DB_IP", "localhost") -port = int(os.environ.get("DB_PORT", 27017)) -IS_SRV = "mongodb+srv" in host - -db_user = os.environ.get("DB_USER", "user") -db_pwd = os.environ.get("DB_PASSWORD", "password") - -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") -CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) -CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) - -TLS_OPTIONS: Dict = {"tls": True} -if CLIENT_PEM: - TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM -if CA_PEM: - TLS_OPTIONS["tlsCAFile"] = CA_PEM - -COMPRESSORS = os.environ.get("COMPRESSORS") -MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") -TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) -SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") -MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") - -if TEST_LOADBALANCER: - res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res["nodelist"][0] - db_user = res["username"] or db_user - db_pwd = res["password"] or db_pwd - - -# Shared KMS data. -LOCAL_MASTER_KEY = base64.b64decode( - b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" - b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" -) -AWS_CREDS = { - "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), -} -AWS_CREDS_2 = { - "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), -} -AZURE_CREDS = { - "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), - "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), - "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), -} -GCP_CREDS = { - "email": os.environ.get("FLE_GCP_EMAIL", ""), - "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), -} -KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} - -# Ensure Evergreen metadata doesn't result in truncation -os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") - - -def is_server_resolvable(): - """Returns True if 'server' is resolvable.""" - socket_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(1) - try: - try: - socket.gethostbyname("server") - return True - except OSError: - return False - finally: - socket.setdefaulttimeout(socket_timeout) - - -def _create_user(authdb, user, pwd=None, roles=None, **kwargs): - cmd = SON([("createUser", user)]) - # X509 doesn't use a password - if pwd: - cmd["pwd"] = pwd - cmd["roles"] = roles or ["root"] - cmd.update(**kwargs) - return authdb.command(cmd) - async def async_repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" @@ -237,133 +122,10 @@ def __del__(self): raise Exception(msg) -def _all_users(db): - return {u["user"] for u in db.command("usersInfo").get("users", [])} - - -def sanitize_cmd(cmd): - cp = cmd.copy() - cp.pop("$clusterTime", None) - cp.pop("$db", None) - cp.pop("$readPreference", None) - cp.pop("lsid", None) - if MONGODB_API_VERSION: - # Stable API parameters - cp.pop("apiVersion", None) - # OP_MSG encoding may move the payload type one field to the - # end of the command. Do the same here. - name = next(iter(cp)) - try: - identifier = message._FIELD_MAP[name] - docs = cp.pop(identifier) - cp[identifier] = docs - except KeyError: - pass - return cp - - -def sanitize_reply(reply): - cp = reply.copy() - cp.pop("$clusterTime", None) - cp.pop("operationTime", None) - return cp - - -def print_thread_tracebacks() -> None: - """Print all Python thread tracebacks.""" - for thread_id, frame in sys._current_frames().items(): - sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") - traceback.print_stack(frame, file=sys.stderr) - - -def print_thread_stacks(pid: int) -> None: - """Print all C-level thread stacks for a given process id.""" - if sys.platform == "darwin": - cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] - else: - cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] - - try: - res = subprocess.run( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" - ) - except Exception as exc: - sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") - else: - sys.stderr.write(res.stdout) - - # Global knobs to speed up the test suite. global_knobs = client_knobs(events_queue_frequency=0.05) -def _get_executors(topology): - executors = [] - for server in topology._servers.values(): - # Some MockMonitor do not have an _executor. - if hasattr(server._monitor, "_executor"): - executors.append(server._monitor._executor) - if hasattr(server._monitor, "_rtt_monitor"): - executors.append(server._monitor._rtt_monitor._executor) - executors.append(topology._Topology__events_executor) - if topology._srv_monitor: - executors.append(topology._srv_monitor._executor) - - return [e for e in executors if e is not None] - - -def print_running_topology(topology): - running = [e for e in _get_executors(topology) if not e._stopped] - if running: - print( - "WARNING: found Topology with running threads:\n" - f" Threads: {running}\n" - f" Topology: {topology}\n" - f" Creation traceback:\n{topology._settings._stack}" - ) - - -def test_cases(suite): - """Iterator over all TestCases within a TestSuite.""" - for suite_or_case in suite._tests: - if isinstance(suite_or_case, unittest.TestCase): - # unittest.TestCase - yield suite_or_case - else: - # unittest.TestSuite - yield from test_cases(suite_or_case) - - -# Helper method to workaround https://bugs.python.org/issue21724 -def clear_warning_registry(): - """Clear the __warningregistry__ for all modules.""" - for _, module in list(sys.modules.items()): - if hasattr(module, "__warningregistry__"): - module.__warningregistry__ = {} # type:ignore[attr-defined] - - -class SystemCertsPatcher: - def __init__(self, ca_certs): - if ( - ssl.OPENSSL_VERSION.lower().startswith("libressl") - and sys.platform == "darwin" - and not _ssl.IS_PYOPENSSL - ): - raise SkipTest( - "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable." - ) - self.original_certs = os.environ.get("SSL_CERT_FILE") - # Tell OpenSSL where CA certificates live. - os.environ["SSL_CERT_FILE"] = ca_certs - - def disable(self): - if self.original_certs is None: - os.environ.pop("SSL_CERT_FILE") - else: - os.environ["SSL_CERT_FILE"] = self.original_certs - - if _IS_SYNC: PARENT = threading.Thread else: diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index f6afa4b2a3..337dba0f64 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -57,11 +57,14 @@ from test.asynchronous.test_bulk import AsyncBulkTestBase from test.asynchronous.unified_format import generate_test_classes from test.asynchronous.utils_spec_runner import AsyncSpecRunner -from test.helpers import ( +from test.helpers_shared import ( + ALL_KMS_PROVIDERS, AWS_CREDS, + AWS_TEMP_CREDS, AZURE_CREDS, CA_PEM, CLIENT_PEM, + DEFAULT_KMS_TLS, GCP_CREDS, KMIP_CREDS, LOCAL_MASTER_KEY, @@ -204,7 +207,7 @@ async def test_init_kms_tls_options(self): opts = AutoEncryptionOpts( {}, "k.d", - kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + kms_tls_options=DEFAULT_KMS_TLS, ) _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) ctx = _kms_ssl_contexts["kmip"] @@ -616,17 +619,10 @@ async def test_with_statement(self): # Spec tests -AWS_TEMP_CREDS = { - "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), - "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), - "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), -} - AWS_TEMP_NO_SESSION_CREDS = { "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } -KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} class AsyncTestSpec(AsyncSpecRunner): @@ -663,7 +659,7 @@ def parse_auto_encrypt_opts(self, opts): self.skipTest("GCP environment credentials are not set") if "kmip" in kms_providers: kms_providers["kmip"] = KMIP_CREDS - opts["kms_tls_options"] = KMS_TLS_OPTS + opts["kms_tls_options"] = DEFAULT_KMS_TLS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" if "extra_options" in opts: @@ -757,14 +753,6 @@ async def run_scenario(self): ) # Prose Tests -ALL_KMS_PROVIDERS = { - "aws": AWS_CREDS, - "azure": AZURE_CREDS, - "gcp": GCP_CREDS, - "kmip": KMIP_CREDS, - "local": {"key": LOCAL_MASTER_KEY}, -} - LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) @@ -851,13 +839,17 @@ async def asyncSetUp(self): self.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self.client_encrypted = await self.async_rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) self.client_encryption = self.create_client_encryption( - self.KMS_PROVIDERS, "keyvault.datakeys", self.client, OPTS, kms_tls_options=KMS_TLS_OPTS + self.KMS_PROVIDERS, + "keyvault.datakeys", + self.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self.listener.reset() @@ -1066,7 +1058,7 @@ async def _test_corpus(self, opts): "keyvault.datakeys", async_client_context.client, OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) @@ -1158,7 +1150,7 @@ async def _test_corpus(self, opts): async def test_corpus(self): opts = AutoEncryptionOpts( - self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + self.kms_providers(), "keyvault.datakeys", kms_tls_options=DEFAULT_KMS_TLS ) await self._test_corpus(opts) @@ -1169,7 +1161,7 @@ async def test_corpus_local_schema(self): self.kms_providers(), "keyvault.datakeys", schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) await self._test_corpus(opts) @@ -1300,7 +1292,7 @@ async def asyncSetUp(self): key_vault_namespace="keyvault.datakeys", key_vault_client=async_client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) kms_providers_invalid = copy.deepcopy(kms_providers) @@ -1312,7 +1304,7 @@ async def asyncSetUp(self): key_vault_namespace="keyvault.datakeys", key_vault_client=async_client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self._kmip_host_error = None self._invalid_host_error = None @@ -2752,7 +2744,7 @@ async def run_test(self, src_provider, dst_provider): key_vault_client=self.client, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, codec_options=OPTS, ) @@ -2772,7 +2764,7 @@ async def run_test(self, src_provider, dst_provider): key_vault_client=client2, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, codec_options=OPTS, ) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 09bf7e83ea..b06654b328 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -37,6 +37,7 @@ ) from test.asynchronous.utils import async_get_pool, flaky from test.asynchronous.utils_spec_runner import SpecRunnerTask +from test.helpers_shared import ALL_KMS_PROVIDERS, DEFAULT_KMS_TLS from test.unified_format_shared import ( KMS_TLS_OPTS, PLACEHOLDER_MAP, @@ -61,6 +62,8 @@ from test.version import Version from typing import Any, Dict, List, Mapping, Optional +import pytest + import pymongo from bson import SON, json_util from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -76,7 +79,7 @@ from pymongo.asynchronous.encryption import AsyncClientEncryption from pymongo.asynchronous.helpers import anext from pymongo.driver_info import DriverInfo -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -259,6 +262,23 @@ async def _create_entity(self, entity_spec, uri=None): kwargs: dict = {} observe_events = spec.get("observeEvents", []) + if "autoEncryptOpts" in spec: + auto_encrypt_opts = spec["autoEncryptOpts"].copy() + auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) + kms_providers = ALL_KMS_PROVIDERS.copy() + key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") + for provider_name, provider_value in auto_encrypt_opts.pop("kmsProviders").items(): + kms_providers[provider_name].update(provider_value) + extra_opts = auto_encrypt_opts.pop("extraOptions", {}) + for key, value in extra_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + for key, value in auto_encrypt_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, **auto_encrypt_kwargs + ) + kwargs["auto_encryption_opts"] = auto_encryption_opts + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent for i in range(len(observe_events)): if "topologyOpeningEvent" == observe_events[i]: @@ -430,7 +450,7 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.22") + SCHEMA_VERSION = Version.from_string("1.23") RUN_ON_LOAD_BALANCER = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes @@ -462,6 +482,13 @@ async def insert_initial_data(self, initial_data): wc = WriteConcern(w="majority") else: wc = WriteConcern(w=1) + + # Remove any encryption collections associated with the collection. + collections = await db.list_collection_names() + for collection in collections: + if collection in [f"enxcol_.{coll_name}.esc", f"enxcol_.{coll_name}.ecoc"]: + await db.drop_collection(collection) + if documents: if opts: await db.create_collection(coll_name, **opts) @@ -1516,7 +1543,14 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec EXPECTED_FAILURES = expected_failures - return SpecTestBase + base = SpecTestBase + + # Add "encryption" marker if the "csfle" runOnRequirement is set. + for req in test_spec.get("runOnRequirements", []): + if req.get("csfle", False): + base = pytest.mark.encryption(base) + + return base for dirpath, _, filenames in os.walk(test_path): dirname = os.path.split(dirpath)[-1] diff --git a/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..0817508f8f --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,322 @@ +{ + "description": "fle2v2-BypassQueryAnalysis", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "csfle": true, + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "bypassQueryAnalysis": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..b5f848c080 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,256 @@ +{ + "description": "fle2v2-EncryptedFields-vs-EncryptedFieldsMap", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "csfle": true, + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localSchema.json b/test/client-side-encryption/spec/unified/localSchema.json new file mode 100644 index 0000000000..aee323d949 --- /dev/null +++ b/test/client-side-encryption/spec/unified/localSchema.json @@ -0,0 +1,343 @@ +{ + "description": "localSchema", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "test": { + "bsonType": "string" + } + }, + "bsonType": "object", + "required": [ + "test" + ] + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "database": { + "id": "encryptedDB2", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl2", + "database": "encryptedDB2", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "A local schema should override", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + ] + }, + { + "description": "A local schema with no encryption is an error", + "operations": [ + { + "object": "encryptedColl2", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "expectError": { + "isError": true, + "errorContains": "JSON schema keyword 'required' is only allowed with a remote schema" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/maxWireVersion.json b/test/client-side-encryption/spec/unified/maxWireVersion.json new file mode 100644 index 0000000000..d0af75ac99 --- /dev/null +++ b/test/client-side-encryption/spec/unified/maxWireVersion.json @@ -0,0 +1,101 @@ +{ + "description": "maxWireVersion", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "keyVaultNamespace": "keyvault.datakeys", + "extraOptions": { + "mongocryptdBypassSpawn": true + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "operation fails with maxWireVersion < 8", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "encrypted_string": "string0" + } + }, + "expectError": { + "errorContains": "Auto-encryption requires a minimum MongoDB version of 4.2" + } + } + ] + } + ] +} diff --git a/test/helpers.py b/test/helpers.py index 22bdc0d25d..163bf01c12 100644 --- a/test/helpers.py +++ b/test/helpers.py @@ -12,137 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Shared constants and helper methods for pymongo, bson, and gridfs test suites.""" +"""Shared helper methods for pymongo, bson, and gridfs test suites.""" from __future__ import annotations import asyncio -import base64 -import gc -import multiprocessing -import os -import signal -import socket -import subprocess -import sys import threading -import time import traceback -import unittest -import warnings -from inspect import iscoroutinefunction - -from pymongo._asyncio_task import create_task - -try: - import ipaddress - - HAVE_IPADDRESS = True -except ImportError: - HAVE_IPADDRESS = False from functools import wraps -from typing import Any, Callable, Dict, Generator, Optional, no_type_check -from unittest import SkipTest +from typing import Optional, no_type_check -from bson.son import SON -from pymongo import common, message +from bson import SON +from pymongo import common +from pymongo._asyncio_task import create_task from pymongo.read_preferences import ReadPreference -from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] -from pymongo.synchronous.uri_parser import parse_uri - -if HAVE_SSL: - import ssl _IS_SYNC = True -# Enable debug output for uncollectable objects. PyPy does not have set_debug. -if hasattr(gc, "set_debug"): - gc.set_debug( - gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) - ) - -# The host and port of a single mongod or mongos, or the seed host -# for a replica set. -host = os.environ.get("DB_IP", "localhost") -port = int(os.environ.get("DB_PORT", 27017)) -IS_SRV = "mongodb+srv" in host - -db_user = os.environ.get("DB_USER", "user") -db_pwd = os.environ.get("DB_PASSWORD", "password") - -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") -CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) -CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) - -TLS_OPTIONS: Dict = {"tls": True} -if CLIENT_PEM: - TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM -if CA_PEM: - TLS_OPTIONS["tlsCAFile"] = CA_PEM - -COMPRESSORS = os.environ.get("COMPRESSORS") -MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") -TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) -SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") -MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") - -if TEST_LOADBALANCER: - res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res["nodelist"][0] - db_user = res["username"] or db_user - db_pwd = res["password"] or db_pwd - - -# Shared KMS data. -LOCAL_MASTER_KEY = base64.b64decode( - b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" - b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" -) -AWS_CREDS = { - "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), -} -AWS_CREDS_2 = { - "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), -} -AZURE_CREDS = { - "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), - "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), - "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), -} -GCP_CREDS = { - "email": os.environ.get("FLE_GCP_EMAIL", ""), - "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), -} -KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} - -# Ensure Evergreen metadata doesn't result in truncation -os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") - - -def is_server_resolvable(): - """Returns True if 'server' is resolvable.""" - socket_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(1) - try: - try: - socket.gethostbyname("server") - return True - except OSError: - return False - finally: - socket.setdefaulttimeout(socket_timeout) - - -def _create_user(authdb, user, pwd=None, roles=None, **kwargs): - cmd = SON([("createUser", user)]) - # X509 doesn't use a password - if pwd: - cmd["pwd"] = pwd - cmd["roles"] = roles or ["root"] - cmd.update(**kwargs) - return authdb.command(cmd) - def repl_set_step_down(client, **kwargs): """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" @@ -237,133 +122,10 @@ def __del__(self): raise Exception(msg) -def _all_users(db): - return {u["user"] for u in db.command("usersInfo").get("users", [])} - - -def sanitize_cmd(cmd): - cp = cmd.copy() - cp.pop("$clusterTime", None) - cp.pop("$db", None) - cp.pop("$readPreference", None) - cp.pop("lsid", None) - if MONGODB_API_VERSION: - # Stable API parameters - cp.pop("apiVersion", None) - # OP_MSG encoding may move the payload type one field to the - # end of the command. Do the same here. - name = next(iter(cp)) - try: - identifier = message._FIELD_MAP[name] - docs = cp.pop(identifier) - cp[identifier] = docs - except KeyError: - pass - return cp - - -def sanitize_reply(reply): - cp = reply.copy() - cp.pop("$clusterTime", None) - cp.pop("operationTime", None) - return cp - - -def print_thread_tracebacks() -> None: - """Print all Python thread tracebacks.""" - for thread_id, frame in sys._current_frames().items(): - sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") - traceback.print_stack(frame, file=sys.stderr) - - -def print_thread_stacks(pid: int) -> None: - """Print all C-level thread stacks for a given process id.""" - if sys.platform == "darwin": - cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] - else: - cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] - - try: - res = subprocess.run( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" - ) - except Exception as exc: - sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") - else: - sys.stderr.write(res.stdout) - - # Global knobs to speed up the test suite. global_knobs = client_knobs(events_queue_frequency=0.05) -def _get_executors(topology): - executors = [] - for server in topology._servers.values(): - # Some MockMonitor do not have an _executor. - if hasattr(server._monitor, "_executor"): - executors.append(server._monitor._executor) - if hasattr(server._monitor, "_rtt_monitor"): - executors.append(server._monitor._rtt_monitor._executor) - executors.append(topology._Topology__events_executor) - if topology._srv_monitor: - executors.append(topology._srv_monitor._executor) - - return [e for e in executors if e is not None] - - -def print_running_topology(topology): - running = [e for e in _get_executors(topology) if not e._stopped] - if running: - print( - "WARNING: found Topology with running threads:\n" - f" Threads: {running}\n" - f" Topology: {topology}\n" - f" Creation traceback:\n{topology._settings._stack}" - ) - - -def test_cases(suite): - """Iterator over all TestCases within a TestSuite.""" - for suite_or_case in suite._tests: - if isinstance(suite_or_case, unittest.TestCase): - # unittest.TestCase - yield suite_or_case - else: - # unittest.TestSuite - yield from test_cases(suite_or_case) - - -# Helper method to workaround https://bugs.python.org/issue21724 -def clear_warning_registry(): - """Clear the __warningregistry__ for all modules.""" - for _, module in list(sys.modules.items()): - if hasattr(module, "__warningregistry__"): - module.__warningregistry__ = {} # type:ignore[attr-defined] - - -class SystemCertsPatcher: - def __init__(self, ca_certs): - if ( - ssl.OPENSSL_VERSION.lower().startswith("libressl") - and sys.platform == "darwin" - and not _ssl.IS_PYOPENSSL - ): - raise SkipTest( - "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable." - ) - self.original_certs = os.environ.get("SSL_CERT_FILE") - # Tell OpenSSL where CA certificates live. - os.environ["SSL_CERT_FILE"] = ca_certs - - def disable(self): - if self.original_certs is None: - os.environ.pop("SSL_CERT_FILE") - else: - os.environ["SSL_CERT_FILE"] = self.original_certs - - if _IS_SYNC: PARENT = threading.Thread else: diff --git a/test/helpers_shared.py b/test/helpers_shared.py new file mode 100644 index 0000000000..49cf131808 --- /dev/null +++ b/test/helpers_shared.py @@ -0,0 +1,271 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import base64 +import gc +import os +import socket +import subprocess +import sys +import traceback +import unittest +from pathlib import Path + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from functools import wraps +from typing import no_type_check +from unittest import SkipTest + +from bson.son import SON +from pymongo import message +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.synchronous.uri_parser import parse_uri + +if HAVE_SSL: + import ssl + + +# Enable debug output for uncollectable objects. PyPy does not have set_debug. +if hasattr(gc, "set_debug"): + gc.set_debug( + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) + +# The host and port of a single mongod or mongos, or the seed host +# for a replica set. +host = os.environ.get("DB_IP", "localhost") +port = int(os.environ.get("DB_PORT", 27017)) +IS_SRV = "mongodb+srv" in host + +db_user = os.environ.get("DB_USER", "user") +db_pwd = os.environ.get("DB_PASSWORD", "password") + +HERE = Path(__file__).absolute() +CERT_PATH = str(HERE.parent / "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) + +TLS_OPTIONS: dict = {"tls": True} +if CLIENT_PEM: + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM +if CA_PEM: + TLS_OPTIONS["tlsCAFile"] = CA_PEM + +COMPRESSORS = os.environ.get("COMPRESSORS") +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) +SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") +MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") + +if TEST_LOADBALANCER: + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + + +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AWS_CREDS_2 = { + "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} +AWS_TEMP_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), +} + +ALL_KMS_PROVIDERS = dict( + aws=AWS_CREDS, + azure=AZURE_CREDS, + gcp=GCP_CREDS, + local=dict(key=LOCAL_MASTER_KEY), + kmip=KMIP_CREDS, +) +DEFAULT_KMS_TLS = dict(kmip=dict(tlsCAFile=CA_PEM, tlsCertificateKeyFile=CLIENT_PEM)) + +# Ensure Evergreen metadata doesn't result in truncation +os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") + + +def is_server_resolvable(): + """Returns True if 'server' is resolvable.""" + socket_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(1) + try: + try: + socket.gethostbyname("server") + return True + except OSError: + return False + finally: + socket.setdefaulttimeout(socket_timeout) + + +def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return authdb.command(cmd) + + +def _all_users(db): + return {u["user"] for u in db.command("usersInfo").get("users", [])} + + +def sanitize_cmd(cmd): + cp = cmd.copy() + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) + if MONGODB_API_VERSION: + # Stable API parameters + cp.pop("apiVersion", None) + # OP_MSG encoding may move the payload type one field to the + # end of the command. Do the same here. + name = next(iter(cp)) + try: + identifier = message._FIELD_MAP[name] + docs = cp.pop(identifier) + cp[identifier] = docs + except KeyError: + pass + return cp + + +def sanitize_reply(reply): + cp = reply.copy() + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) + return cp + + +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + + +def _get_executors(topology): + executors = [] + for server in topology._servers.values(): + # Some MockMonitor do not have an _executor. + if hasattr(server._monitor, "_executor"): + executors.append(server._monitor._executor) + if hasattr(server._monitor, "_rtt_monitor"): + executors.append(server._monitor._rtt_monitor._executor) + executors.append(topology._Topology__events_executor) + if topology._srv_monitor: + executors.append(topology._srv_monitor._executor) + + return [e for e in executors if e is not None] + + +def print_running_topology(topology): + running = [e for e in _get_executors(topology) if not e._stopped] + if running: + print( + "WARNING: found Topology with running threads:\n" + f" Threads: {running}\n" + f" Topology: {topology}\n" + f" Creation traceback:\n{topology._settings._stack}" + ) + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +# Helper method to workaround https://bugs.python.org/issue21724 +def clear_warning_registry(): + """Clear the __warningregistry__ for all modules.""" + for _, module in list(sys.modules.items()): + if hasattr(module, "__warningregistry__"): + module.__warningregistry__ = {} # type:ignore[attr-defined] + + +class SystemCertsPatcher: + def __init__(self, ca_certs): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") + # Tell OpenSSL where CA certificates live. + os.environ["SSL_CERT_FILE"] = ca_certs + + def disable(self): + if self.original_certs is None: + os.environ.pop("SSL_CERT_FILE") + else: + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/test_encryption.py b/test/test_encryption.py index 5c8813203d..46d8c785c4 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -54,11 +54,14 @@ from test import ( unittest, ) -from test.helpers import ( +from test.helpers_shared import ( + ALL_KMS_PROVIDERS, AWS_CREDS, + AWS_TEMP_CREDS, AZURE_CREDS, CA_PEM, CLIENT_PEM, + DEFAULT_KMS_TLS, GCP_CREDS, KMIP_CREDS, LOCAL_MASTER_KEY, @@ -204,7 +207,7 @@ def test_init_kms_tls_options(self): opts = AutoEncryptionOpts( {}, "k.d", - kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + kms_tls_options=DEFAULT_KMS_TLS, ) _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) ctx = _kms_ssl_contexts["kmip"] @@ -614,17 +617,10 @@ def test_with_statement(self): # Spec tests -AWS_TEMP_CREDS = { - "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), - "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), - "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), -} - AWS_TEMP_NO_SESSION_CREDS = { "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } -KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} class TestSpec(SpecRunner): @@ -661,7 +657,7 @@ def parse_auto_encrypt_opts(self, opts): self.skipTest("GCP environment credentials are not set") if "kmip" in kms_providers: kms_providers["kmip"] = KMIP_CREDS - opts["kms_tls_options"] = KMS_TLS_OPTS + opts["kms_tls_options"] = DEFAULT_KMS_TLS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" if "extra_options" in opts: @@ -755,14 +751,6 @@ def run_scenario(self): ) # Prose Tests -ALL_KMS_PROVIDERS = { - "aws": AWS_CREDS, - "azure": AZURE_CREDS, - "gcp": GCP_CREDS, - "kmip": KMIP_CREDS, - "local": {"key": LOCAL_MASTER_KEY}, -} - LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) @@ -849,13 +837,17 @@ def setUp(self): self.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self.client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) self.client_encryption = self.create_client_encryption( - self.KMS_PROVIDERS, "keyvault.datakeys", self.client, OPTS, kms_tls_options=KMS_TLS_OPTS + self.KMS_PROVIDERS, + "keyvault.datakeys", + self.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self.listener.reset() @@ -1062,7 +1054,7 @@ def _test_corpus(self, opts): "keyvault.datakeys", client_context.client, OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) @@ -1154,7 +1146,7 @@ def _test_corpus(self, opts): def test_corpus(self): opts = AutoEncryptionOpts( - self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + self.kms_providers(), "keyvault.datakeys", kms_tls_options=DEFAULT_KMS_TLS ) self._test_corpus(opts) @@ -1165,7 +1157,7 @@ def test_corpus_local_schema(self): self.kms_providers(), "keyvault.datakeys", schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self._test_corpus(opts) @@ -1296,7 +1288,7 @@ def setUp(self): key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) kms_providers_invalid = copy.deepcopy(kms_providers) @@ -1308,7 +1300,7 @@ def setUp(self): key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self._kmip_host_error = None self._invalid_host_error = None @@ -2736,7 +2728,7 @@ def run_test(self, src_provider, dst_provider): key_vault_client=self.client, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, codec_options=OPTS, ) @@ -2756,7 +2748,7 @@ def run_test(self, src_provider, dst_provider): key_vault_client=client2, key_vault_namespace="keyvault.datakeys", kms_providers=ALL_KMS_PROVIDERS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, codec_options=OPTS, ) diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index 8f673cff4c..3d8f7b2b75 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -25,7 +25,7 @@ sys.path[0:0] = [""] from test import unittest -from test.helpers import clear_warning_registry +from test.helpers_shared import clear_warning_registry from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, _CaseInsensitiveDictionary, validate from pymongo.compression_support import _have_snappy diff --git a/test/unified-test-format/valid-pass/poc-queryable-encryption.json b/test/unified-test-format/valid-pass/poc-queryable-encryption.json new file mode 100644 index 0000000000..309d1d3b4b --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-queryable-encryption.json @@ -0,0 +1,193 @@ +{ + "description": "poc-queryable-encryption", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "csfle": true, + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "encrypted" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "encrypted" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "poc-queryable-encryption", + "collectionName": "encrypted", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + ], + "tests": [ + { + "description": "insert, replace, and find with queryable encryption", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": 11 + } + } + }, + { + "object": "encryptedColl", + "name": "replaceOne", + "arguments": { + "filter": { + "encryptedInt": 11 + }, + "replacement": { + "encryptedInt": 22 + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "encryptedInt": 22 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": 22 + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhS16TJojgDDBtbluxBokvcotP1mQTGeYpNt8xd3MJQ=", + "subType": "00" + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 3496b2ad44..2cbc581aca 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -35,6 +35,7 @@ client_knobs, unittest, ) +from test.helpers_shared import ALL_KMS_PROVIDERS, DEFAULT_KMS_TLS from test.unified_format_shared import ( KMS_TLS_OPTS, PLACEHOLDER_MAP, @@ -60,6 +61,8 @@ from test.version import Version from typing import Any, Dict, List, Mapping, Optional +import pytest + import pymongo from bson import SON, json_util from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -68,7 +71,7 @@ from gridfs.errors import CorruptGridFile from pymongo import ASCENDING, CursorType, MongoClient, _csot from pymongo.driver_info import DriverInfo -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -258,6 +261,23 @@ def _create_entity(self, entity_spec, uri=None): kwargs: dict = {} observe_events = spec.get("observeEvents", []) + if "autoEncryptOpts" in spec: + auto_encrypt_opts = spec["autoEncryptOpts"].copy() + auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) + kms_providers = ALL_KMS_PROVIDERS.copy() + key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") + for provider_name, provider_value in auto_encrypt_opts.pop("kmsProviders").items(): + kms_providers[provider_name].update(provider_value) + extra_opts = auto_encrypt_opts.pop("extraOptions", {}) + for key, value in extra_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + for key, value in auto_encrypt_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, **auto_encrypt_kwargs + ) + kwargs["auto_encryption_opts"] = auto_encryption_opts + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent for i in range(len(observe_events)): if "topologyOpeningEvent" == observe_events[i]: @@ -429,7 +449,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.22") + SCHEMA_VERSION = Version.from_string("1.23") RUN_ON_LOAD_BALANCER = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes @@ -461,6 +481,13 @@ def insert_initial_data(self, initial_data): wc = WriteConcern(w="majority") else: wc = WriteConcern(w=1) + + # Remove any encryption collections associated with the collection. + collections = db.list_collection_names() + for collection in collections: + if collection in [f"enxcol_.{coll_name}.esc", f"enxcol_.{coll_name}.ecoc"]: + db.drop_collection(collection) + if documents: if opts: db.create_collection(coll_name, **opts) @@ -1501,7 +1528,14 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec EXPECTED_FAILURES = expected_failures - return SpecTestBase + base = SpecTestBase + + # Add "encryption" marker if the "csfle" runOnRequirement is set. + for req in test_spec.get("runOnRequirements", []): + if req.get("csfle", False): + base = pytest.mark.encryption(base) + + return base for dirpath, _, filenames in os.walk(test_path): dirname = os.path.split(dirpath)[-1] diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 17dd73ec8c..96b037976b 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -25,9 +25,10 @@ import time import types from collections import abc -from test.helpers import ( +from test.helpers_shared import ( AWS_CREDS, AWS_CREDS_2, + AWS_TEMP_CREDS, AZURE_CREDS, CA_PEM, CLIENT_PEM, @@ -118,10 +119,22 @@ ("kmip", KMIP_CREDS), ("kmip:name1", KMIP_CREDS), ]: + # Use the temp aws creds for autoEncryptOpts. + if provider_name == "aws": + for key, value in AWS_TEMP_CREDS.items(): + placeholder = f"/autoEncryptOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + for key, value in provider_data.items(): placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" PLACEHOLDER_MAP[placeholder] = value + if provider_name == "aws": + continue + + placeholder = f"/autoEncryptOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + OIDC_ENV = os.environ.get("OIDC_ENV", "test") if OIDC_ENV == "test": PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "test"} From 9a9a65c6170ac202d618593a7e50165a85dcb1b9 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 20 Aug 2025 18:42:06 -0500 Subject: [PATCH 2031/2111] PYTHON-5496 Update CSOT tests for change in dropIndex behavior in 8.3 (#2498) --- test/asynchronous/unified_format.py | 2 - test/csot/deprecated-options.json | 67 ++++++++++++++--- test/csot/global-timeoutMS.json | 20 ++++- test/csot/override-operation-timeoutMS.json | 40 ++++++++-- test/csot/tailable-awaitData.json | 82 ++++++++++++++++++++- test/unified_format.py | 2 - 6 files changed, 183 insertions(+), 30 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index b06654b328..5f01642a44 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -591,8 +591,6 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") - if "dropindex on collection" in description: - self.skipTest("PYTHON-5491") if ( "tailable" in class_name or "tailable" in description diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json index d3e4631ff4..647e1bf792 100644 --- a/test/csot/deprecated-options.json +++ b/test/csot/deprecated-options.json @@ -6750,16 +6750,23 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 100000, "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ] @@ -6815,16 +6822,23 @@ ] } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 100000, "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ], @@ -6832,6 +6846,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", @@ -6903,6 +6923,16 @@ ] } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", @@ -6910,10 +6940,6 @@ "timeoutMS": 1000, "maxTimeMS": 5000, "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ], @@ -6921,6 +6947,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", @@ -7003,6 +7035,17 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1", + "timeoutMS": 100000 + } + }, { "name": "dropIndexes", "object": "collection", diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json index 740bbad2e2..f1edbe68e3 100644 --- a/test/csot/global-timeoutMS.json +++ b/test/csot/global-timeoutMS.json @@ -5621,15 +5621,21 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ], @@ -5637,6 +5643,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json index 6fa0bd802a..f33f876137 100644 --- a/test/csot/override-operation-timeoutMS.json +++ b/test/csot/override-operation-timeoutMS.json @@ -3378,15 +3378,23 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 1000, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 1000, "name": "x_1" - }, - "expectError": { - "isTimeoutError": false } } ], @@ -3394,6 +3402,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", @@ -3436,15 +3450,23 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 0, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 0, "name": "x_1" - }, - "expectError": { - "isTimeoutError": false } } ], @@ -3452,6 +3474,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json index 81683d3993..80e95ca906 100644 --- a/test/csot/tailable-awaitData.json +++ b/test/csot/tailable-awaitData.json @@ -78,7 +78,7 @@ ] }, { - "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "description": "error on find if maxAwaitTimeMS is greater than timeoutMS", "operations": [ { "name": "find", @@ -90,13 +90,50 @@ "maxAwaitTimeMS": 10 }, "expectError": { - "isClientError": true + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on aggregate if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on watch if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false } } ] }, { - "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "description": "error on find if maxAwaitTimeMS is equal to timeoutMS", "operations": [ { "name": "find", @@ -108,7 +145,44 @@ "maxAwaitTimeMS": 5 }, "expectError": { - "isClientError": true + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on aggregate if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on watch if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false } } ] diff --git a/test/unified_format.py b/test/unified_format.py index 2cbc581aca..375d845783 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -590,8 +590,6 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") - if "dropindex on collection" in description: - self.skipTest("PYTHON-5491") if ( "tailable" in class_name or "tailable" in description From 5e96353797f081ad2090270818a8808d37ebac40 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 21 Aug 2025 06:51:00 -0700 Subject: [PATCH 2032/2111] PYTHON-5508 - Add built-in DecimalEncoder and DecimalDecoder (#2499) --- bson/decimal128.py | 39 ++++++++++++++++++++++++++ doc/changelog.rst | 7 +++++ test/asynchronous/test_custom_types.py | 25 ++--------------- test/test_custom_types.py | 25 ++--------------- 4 files changed, 50 insertions(+), 46 deletions(-) diff --git a/bson/decimal128.py b/bson/decimal128.py index 92c054d878..7480f94d0a 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -20,8 +20,11 @@ import decimal import struct +from decimal import Decimal from typing import Any, Sequence, Tuple, Type, Union +from bson.codec_options import TypeDecoder, TypeEncoder + _PACK_64 = struct.Struct(" Type[Decimal]: + return Decimal + + def transform_python(self, value: Any) -> Decimal128: + return Decimal128(value) + + +class DecimalDecoder(TypeDecoder): + """Converts BSON :class:`Decimal128` to Python :class:`decimal.Decimal`. + + For example:: + opts = CodecOptions(type_registry=TypeRegistry([DecimalDecoder()])) + bson.decode(data, codec_options=opts) + + .. versionadded:: 4.15 + """ + + @property + def bson_type(self) -> Type[Decimal128]: + return Decimal128 + + def transform_bson(self, value: Any) -> decimal.Decimal: + return value.to_decimal() + + def create_decimal128_context() -> decimal.Context: """Returns an instance of :class:`decimal.Context` appropriate for working with IEEE-754 128-bit decimal floating point values. diff --git a/doc/changelog.rst b/doc/changelog.rst index e41ecc7e1b..305c989106 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,5 +1,12 @@ Changelog ========= +Changes in Version 4.15.0 (XXXX/XX/XX) +-------------------------------------- +PyMongo 4.15 brings a number of changes including: + +- Added :class:`bson.decimal128.DecimalEncoder` and :class:`bson.decimal128.DecimalDecoder` + to support encoding and decoding of BSON Decimal128 values to decimal.Decimal values using the TypeRegistry API. + Changes in Version 4.14.1 (2025/08/19) -------------------------------------- diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py index 385f755a1d..82c54512cc 100644 --- a/test/asynchronous/test_custom_types.py +++ b/test/asynchronous/test_custom_types.py @@ -23,6 +23,7 @@ from random import random from typing import Any, Tuple, Type, no_type_check +from bson.decimal128 import DecimalDecoder, DecimalEncoder from gridfs.asynchronous.grid_file import AsyncGridIn, AsyncGridOut sys.path[0:0] = [""] @@ -59,29 +60,7 @@ _IS_SYNC = False -class DecimalEncoder(TypeEncoder): - @property - def python_type(self): - return Decimal - - def transform_python(self, value): - return Decimal128(value) - - -class DecimalDecoder(TypeDecoder): - @property - def bson_type(self): - return Decimal128 - - def transform_bson(self, value): - return value.to_decimal() - - -class DecimalCodec(DecimalDecoder, DecimalEncoder): - pass - - -DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalEncoder(), DecimalDecoder()])) class UndecipherableInt64Type: diff --git a/test/test_custom_types.py b/test/test_custom_types.py index 7360f2b18b..aba6b55119 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -23,6 +23,7 @@ from random import random from typing import Any, Tuple, Type, no_type_check +from bson.decimal128 import DecimalDecoder, DecimalEncoder from gridfs.synchronous.grid_file import GridIn, GridOut sys.path[0:0] = [""] @@ -59,29 +60,7 @@ _IS_SYNC = True -class DecimalEncoder(TypeEncoder): - @property - def python_type(self): - return Decimal - - def transform_python(self, value): - return Decimal128(value) - - -class DecimalDecoder(TypeDecoder): - @property - def bson_type(self): - return Decimal128 - - def transform_bson(self, value): - return value.to_decimal() - - -class DecimalCodec(DecimalDecoder, DecimalEncoder): - pass - - -DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalEncoder(), DecimalDecoder()])) class UndecipherableInt64Type: From e08284bdca4e35121e618443741aae1d25153fc8 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 21 Aug 2025 10:55:48 -0500 Subject: [PATCH 2033/2111] PYTHON-5456 Support text indexes with auto encryption (#2500) --- test/asynchronous/unified_format.py | 12 +- ...-Text-cleanupStructuredEncryptionData.json | 219 +++++++ ...-Text-compactStructuredEncryptionData.json | 261 +++++++++ .../spec/unified/QE-Text-prefixPreview.json | 338 +++++++++++ .../unified/QE-Text-substringPreview.json | 551 ++++++++++++++++++ .../spec/unified/QE-Text-suffixPreview.json | 338 +++++++++++ test/unified_format.py | 12 +- 7 files changed, 1727 insertions(+), 4 deletions(-) create mode 100644 test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json create mode 100644 test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json create mode 100644 test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json create mode 100644 test/client-side-encryption/spec/unified/QE-Text-substringPreview.json create mode 100644 test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 5f01642a44..9bd0fabdb8 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -159,6 +159,14 @@ async def is_run_on_requirement_satisfied(requirement): if req_csfle is True: min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: + csfle_satisfied = False + req_version = req_csfle["minLibmongocryptVersion"] + if _HAVE_PYMONGOCRYPT: + from pymongocrypt import libmongocrypt_version + + if Version.from_string(libmongocrypt_version()) >= Version.from_string(req_version): + csfle_satisfied = True return ( topology_satisfied @@ -450,7 +458,7 @@ class UnifiedSpecTestMixinV1(AsyncIntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.23") + SCHEMA_VERSION = Version.from_string("1.25") RUN_ON_LOAD_BALANCER = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes @@ -1545,7 +1553,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore # Add "encryption" marker if the "csfle" runOnRequirement is set. for req in test_spec.get("runOnRequirements", []): - if req.get("csfle", False): + if "csfle" in req: base = pytest.mark.encryption(base) return base diff --git a/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json b/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json new file mode 100644 index 0000000000..24f33ab3ec --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json @@ -0,0 +1,219 @@ +{ + "description": "QE-Text-cleanupStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text cleanupStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "cleanupStructuredEncryptionData": "coll" + }, + "commandName": "cleanupStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "cleanupStructuredEncryptionData": "coll", + "cleanupTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "cleanupStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json b/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json new file mode 100644 index 0000000000..c7abfe2d4b --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json @@ -0,0 +1,261 @@ +{ + "description": "QE-Text-compactStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text compactStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "coll" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "coll", + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "db.coll": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ], + "strEncodeVersion": { + "$numberInt": "1" + }, + "escCollection": "enxcol_.coll.esc", + "ecocCollection": "enxcol_.coll.ecoc" + } + } + }, + "compactionTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json b/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json new file mode 100644 index 0000000000..7279385743 --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-prefixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE prefixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fmUMXTMV/XRiN0IL3VXxSEn6SQG9E6Po30kJKB8JJlQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vZIDMiFDgjmLNYVrrbnq1zT4hg7sGpe/PMtighSsnRc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "26Z5G+sHTzV3D7F8Y0m08389USZ2afinyFV3ez9UEBQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q/JEq8of7bE0QE5Id0XuOsNQ4qVpANYymcPQDUL2Ywk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Uvvv46LkfbgLoPqZ6xTBzpgoYRTM6FUgRdqZ9eaVojI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nMxdq2lladuBJA3lv3JC2MumIUtRJBNJVLp3PVE6nQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hS3V0qq5CF/SkTl3ZWWWgXcAJ8G5yGtkY2RwcHNc5Oc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McgwYUxfKj5+4D0vskZymy4KA82s71MR25iV/Enutww=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ciqdk1b+t+Vrr6oIlFFk0Zdym5BPmwN3glQ0/VcsVdM=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json b/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json new file mode 100644 index 0000000000..6a8f133eac --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json @@ -0,0 +1,551 @@ +{ + "description": "QE-Text-substringPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "10" + }, + "strMaxLength": { + "$numberLong": "20" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "oba" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IpY3x/jjm8j/74jAdUhgxdM5hk68zR0zv/lTKm/72Vg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G+ky260C6QiOfIxKz14FmaMbAxvui1BKJO/TnLOHlGk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7dv3gAKe9vwJMZmpB40pRCwRTmc7ds9UkGhxH8j084E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o0V+Efn6x8XQdE80F1tztNaT3qxHjcsd9DOQ47BtmQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sJvrCjyVot7PIZFsdRehWFANKAj6fmBaj3FLbz/dZLE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e98auxFmu02h5MfBIARk29MI7hSmvN3F9DaQ0xjqoEM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US83krGNov/ezL6IhsY5eEOCxv1xUPDIEL/nmY0IKi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P2Aq5+OHZPG0CWIdmZvWq9c/18ZKVYW3vbxd+WU/TXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8AdPRPnSzcd5uhq4TZfNvNeF0XjLNVwAsJJMTtktw84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9O6u/G51I4ZHFLhL4ZLuudbr0s202A2QnPfThmOXPhI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N7AjYVyVlv6+lVSTM+cIxRL3SMgs3G5LgxSs+jrgDkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RbGF7dQbPGYQFd9DDO1hPz1UlLOJ77FAC6NsjGwJeos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m7srHMgKm6kZwsNx8rc45pmw0/9Qro6xuQ8lZS3+RYk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K75CNU3JyKFqZWPiIsVi4+n7DhYmcPl/nEhQ3d88mVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c7bwGpUZc/7JzEnMS7qQ/TPuXZyrmMihFaAV6zIqbZc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rDvEdUgEk8u4Srt3ETokWs2FXcnyJaRGQ+NbkFwi2rQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VcdZj9zfveRBRlpCR2OYWau2+GokOFb73TE3gpElNiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOa9o2xfA6OgkbYUxd6wQJicaeN6guhy2V66W3ALsaA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1xGkJh+um70XiRd8lKLDtyHgDqrf7/59Mg7X0+KZh8k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OSvllqHxycbcZN4phR6NDujY3ttA59o7nQJ6V9eJpX0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZTX1pyk8Vdw0BSbJx7GeJNcQf3tGKxbrrNSTqBqUWkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cn7V05zb5iXwYrePGMHztC+GRq+Tj8IMpRDraauPhSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E9bV9KyrZxHJSUmMg0HrDK4gGN+75ruelAnrM6hXQgY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrssTNmdgXoTGpbaF0JLRCGH6cDQuz1XEFNTy98nrb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jZmyOJP35dsxQ/OY5U4ISpVRIYr8iedNfcwZiKt29Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d2mocORMbX9MX+/itAW8r1kxVw2/uii4vzXtc+2CIRQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JBnJy58eRPhDo3DuZvsHbvQDiHXxdtAx1Eif66k5SfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OjbDulC8s62v0pgweBSsQqtJjJBwH5JinfJpj7nVr+A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "85i7KT2GP9nSda3Gsil5LKubhq0LDtc22pxBxHpR+nE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "u9Fvsclwrs9lwIcMPV/fMZD7L3d5anSfJQVjQb9mgLg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LZ32ttmLJGOIw9oFaUCn3Sx5uHPTYJPSFpeGRWNqlUc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMsZvGEePTqtl0FJAL/jAdyWNQIlpwN61YIlZsSIZ6s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XZcu1a/ZGsIzAl3j4MXQlLo4v2p7kvIqRHtIQYFmL6k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Zse27LinlYCEnX6iTmJceI33mEJxFb0LdPxp0RiMOaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOv2Hgb2/sBpnX9XwFbIN6yDxhjchwlmczUf82W2tp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oQxZ9A6j3x5j6x1Jqw/N9tpP4rfWMjcV3y+a3PkrL7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/D7ew3EijyUnmT22awVFspcuyo3JChJcDeCPwpljzVM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BEmmwqyamt9X3bcWDld61P01zquy8fBHAXq3SHAPP0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wygD9/kAo1KsRvtr1v+9/lvqoWdKwgh6gDHvAQfXPPk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pRTKgF/uksrF1c1AcfSTY6ZhqBKVud1vIztQ4/36SLs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C4iUo8oNJsjJ37BqnBgIgSQpf99X2Bb4W5MZEAmakHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "icoE53jIq6Fu/YGKUiSUTYyZ8xdiTQY9jJiGxVJObpw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oubCwk0V6G2RFWtcOnYDU4uUBoXBrhBRi4nZgrYj9JY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IyqhQ9nGhzEi5YW2W6v1kGU5DY2u2qSqbM/qXdLdWVU=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "blah" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json b/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json new file mode 100644 index 0000000000..deec5e63b0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-suffixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uDCWsucUsJemUP7pmeb+Kd8B9qupVzI8wnLFqX1rkiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W3E1x4bHZ8SEHFz4zwXM0G5Z5WSwBhnxE8x5/qdP6JM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6g/TXVDDf6z+ntResIvTKWdmIy4ajQ1rhwdNZIiEG7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hU+u/T3D6dHDpT3d/v5AlgtRoAufCXCAyO2jQlgsnCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vrPnq0AtBIURNgNGA6HJL+5/p5SBWe+qz8505TRo/dE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W5pylBxdv2soY2NcBfPiHDVLTS6tx+0ULkI8gysBeFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oWO3xX3x0bYUJGK2S1aPAmlU3Xtfsgb9lTZ6flGAlsg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SjZGucTEUbdpd86O8yj1pyMyBOOKxvAQ9C8ngZ9C5UE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CEaMZkxVDVbnXr+To0DOyvsva04UQkIYP3KtgYVVwf8=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrEndsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/unified_format.py b/test/unified_format.py index 375d845783..bc21464ab6 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -158,6 +158,14 @@ def is_run_on_requirement_satisfied(requirement): if req_csfle is True: min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: + csfle_satisfied = False + req_version = req_csfle["minLibmongocryptVersion"] + if _HAVE_PYMONGOCRYPT: + from pymongocrypt import libmongocrypt_version + + if Version.from_string(libmongocrypt_version()) >= Version.from_string(req_version): + csfle_satisfied = True return ( topology_satisfied @@ -449,7 +457,7 @@ class UnifiedSpecTestMixinV1(IntegrationTest): a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.23") + SCHEMA_VERSION = Version.from_string("1.25") RUN_ON_LOAD_BALANCER = True TEST_SPEC: Any TEST_PATH = "" # This gets filled in by generate_test_classes @@ -1530,7 +1538,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore # Add "encryption" marker if the "csfle" runOnRequirement is set. for req in test_spec.get("runOnRequirements", []): - if req.get("csfle", False): + if "csfle" in req: base = pytest.mark.encryption(base) return base From ddf9508e15d25091ec8f4936ce12865723dc3f29 Mon Sep 17 00:00:00 2001 From: Shane Harvey Date: Fri, 22 Aug 2025 14:51:39 -0700 Subject: [PATCH 2034/2111] PYTHON-5510 Fix server selection log message for commitTransaction (#2503) --- pymongo/asynchronous/client_session.py | 3 +-- pymongo/synchronous/client_session.py | 5 +++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index c30fc6679f..be02295cea 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -167,7 +167,6 @@ WTimeoutError, ) from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES -from pymongo.operations import _Op from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE @@ -868,7 +867,7 @@ async def func( return await self._finish_transaction(conn, command_name) return await self._client._retry_internal( - func, self, None, retryable=True, operation=_Op.ABORT + func, self, None, retryable=True, operation=command_name ) async def _finish_transaction(self, conn: AsyncConnection, command_name: str) -> dict[str, Any]: diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index 68a01dd7e7..72a5b8e885 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -165,7 +165,6 @@ WTimeoutError, ) from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES -from pymongo.operations import _Op from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference, _ServerMode from pymongo.server_type import SERVER_TYPE @@ -864,7 +863,9 @@ def func( ) -> dict[str, Any]: return self._finish_transaction(conn, command_name) - return self._client._retry_internal(func, self, None, retryable=True, operation=_Op.ABORT) + return self._client._retry_internal( + func, self, None, retryable=True, operation=command_name + ) def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: self._transaction.attempt += 1 From 3ebd93480a8e66be27163c86aa87f01762e55f4a Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 25 Aug 2025 08:54:10 -0700 Subject: [PATCH 2035/2111] PYTHON-5514 Specific assertions for "is" and "is not None" (#2502) --- test/asynchronous/test_collection.py | 2 +- test/asynchronous/test_session.py | 6 +++--- test/test_collection.py | 2 +- test/test_session.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 6a85b63960..90a0518532 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -1319,7 +1319,7 @@ async def test_error_code(self): self.assertIn(exc.code, (9, 10147, 16840, 17009)) # Just check that we set the error document. Fields # vary by MongoDB version. - self.assertTrue(exc.details is not None) + self.assertIsNotNone(exc.details) else: self.fail("OperationFailure was not raised") diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 5ed3597751..19ce868c56 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -378,9 +378,9 @@ async def test_cursor_clone(self): async with self.client.start_session() as s: cursor = coll.find(session=s) - self.assertTrue(cursor.session is s) + self.assertIs(cursor.session, s) clone = cursor.clone() - self.assertTrue(clone.session is s) + self.assertIs(clone.session, s) # No explicit session. cursor = coll.find(batch_size=2) @@ -392,7 +392,7 @@ async def test_cursor_clone(self): await anext(clone) self.assertIsNone(clone.session) self.assertIsNotNone(clone._session) - self.assertFalse(cursor._session is clone._session) + self.assertIsNot(cursor._session, clone._session) await cursor.close() await clone.close() diff --git a/test/test_collection.py b/test/test_collection.py index 0dce88423b..b1947259ba 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1305,7 +1305,7 @@ def test_error_code(self): self.assertIn(exc.code, (9, 10147, 16840, 17009)) # Just check that we set the error document. Fields # vary by MongoDB version. - self.assertTrue(exc.details is not None) + self.assertIsNotNone(exc.details) else: self.fail("OperationFailure was not raised") diff --git a/test/test_session.py b/test/test_session.py index 16a219ae52..40d0a53afb 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -378,9 +378,9 @@ def test_cursor_clone(self): with self.client.start_session() as s: cursor = coll.find(session=s) - self.assertTrue(cursor.session is s) + self.assertIs(cursor.session, s) clone = cursor.clone() - self.assertTrue(clone.session is s) + self.assertIs(clone.session, s) # No explicit session. cursor = coll.find(batch_size=2) @@ -392,7 +392,7 @@ def test_cursor_clone(self): next(clone) self.assertIsNone(clone.session) self.assertIsNotNone(clone._session) - self.assertFalse(cursor._session is clone._session) + self.assertIsNot(cursor._session, clone._session) cursor.close() clone.close() From cd4e5db997515dfddfe48404ed7cf03fc1267cc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 11:57:02 -0500 Subject: [PATCH 2036/2111] Bump pyright from 1.1.403 to 1.1.404 (#2506) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b7afbc5473..391de1bfe5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ pymongocrypt_source = [ perf = ["simplejson"] typing = [ "mypy==1.17.1", - "pyright==1.1.403", + "pyright==1.1.404", "typing_extensions", "pip" ] From 9892e1bbe970dd118f3168fd37077054d447882f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 11:57:35 -0500 Subject: [PATCH 2037/2111] Update coverage requirement from <=7.10.3,>=5 to >=5,<=7.10.5 (#2507) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 391de1bfe5..111136d08b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] eventlet = ["eventlet"] coverage = [ "pytest-cov", - "coverage>=5,<=7.10.3" + "coverage>=5,<=7.10.5" ] mockupdb = [ "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" From 8c361be2190ed6252065c6aff7db89de728c6557 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 08:24:30 -0500 Subject: [PATCH 2038/2111] Bump the actions group with 5 updates (#2505) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/dist.yml | 4 ++-- .github/workflows/test-python.yml | 34 +++++++++++++++---------------- .github/workflows/zizmor.yml | 4 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fd2808ea19..8dffe1fa7b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -38,7 +38,7 @@ jobs: build-mode: none steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: ${{ inputs.ref }} persist-credentials: false @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3 + uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 14c253fe73..f5f8c20c88 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout pymongo - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 persist-credentials: false @@ -108,7 +108,7 @@ jobs: name: Make SDist runs-on: macos-13 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 persist-credentials: false diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 11255f9e49..d55c0d7c7c 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -19,11 +19,11 @@ jobs: static: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: enable-cache: true python-version: "3.9" @@ -61,11 +61,11 @@ jobs: name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -80,11 +80,11 @@ jobs: runs-on: ubuntu-latest name: DocTest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: enable-cache: true python-version: "3.9" @@ -105,11 +105,11 @@ jobs: name: Docs Checks runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: enable-cache: true python-version: "3.9" @@ -124,11 +124,11 @@ jobs: name: Link Check runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: enable-cache: true python-version: "3.9" @@ -146,11 +146,11 @@ jobs: matrix: python: ["3.9", "3.11"] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: enable-cache: true python-version: "${{matrix.python}}" @@ -167,7 +167,7 @@ jobs: runs-on: ubuntu-latest name: "Make an sdist" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - uses: actions/setup-python@v5 @@ -225,11 +225,11 @@ jobs: runs-on: ubuntu-latest name: Test using minimum dependencies and supported Python steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: python-version: '3.9' - id: setup-mongodb @@ -251,11 +251,11 @@ jobs: runs-on: ubuntu-latest name: Test async's minimum dependencies and Python steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@e92bafb6253dcd438e0484186d7669ea7a8ca1cc # v5 + uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 with: python-version: '3.9' - id: setup-mongodb diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 8a2bccf931..e7a39fa39e 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -14,8 +14,8 @@ jobs: security-events: write steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@383d31df2eb66a2f42db98c9654bdc73231f3e3a + uses: zizmorcore/zizmor-action@7f2abfff7488a44086dba64ed2f5a9b431508079 From 0d4c84e86ff8ba7c1ee0e01a1bc2690de502c7e3 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 26 Aug 2025 09:52:09 -0500 Subject: [PATCH 2039/2111] PYTHON-5519 Clean up uv handling (#2510) --- .evergreen/run-tests.sh | 5 +---- justfile | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 2b7d856d41..a9f2ba2b5c 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -26,12 +26,9 @@ else fi # List the packages. -uv sync ${UV_ARGS} --reinstall +uv sync ${UV_ARGS} --reinstall --quiet uv pip list -# Ensure we go back to base environment after the test. -trap "uv sync" EXIT HUP - # Start the test runner. uv run ${UV_ARGS} .evergreen/scripts/run_tests.py "$@" diff --git a/justfile b/justfile index 74ebb48823..24da94a499 100644 --- a/justfile +++ b/justfile @@ -2,7 +2,7 @@ set shell := ["bash", "-c"] # Commonly used command segments. -uv_run := "uv run --isolated --frozen " +uv_run := "uv run --frozen " typing_run := uv_run + "--group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" docs_run := uv_run + "--extra docs" doc_build := "./doc/_build" @@ -13,51 +13,55 @@ mypy_args := "--install-types --non-interactive" default: @just --list +[private] +resync: + @uv sync --quiet --frozen + install: bash .evergreen/scripts/setup-dev-env.sh [group('docs')] -docs: +docs: && resync {{docs_run}} sphinx-build -W -b html doc {{doc_build}}/html [group('docs')] -docs-serve: +docs-serve: && resync {{docs_run}} sphinx-autobuild -W -b html doc --watch ./pymongo --watch ./bson --watch ./gridfs {{doc_build}}/serve [group('docs')] -docs-linkcheck: +docs-linkcheck: && resync {{docs_run}} sphinx-build -E -b linkcheck doc {{doc_build}}/linkcheck [group('typing')] -typing: +typing: && resync just typing-mypy just typing-pyright [group('typing')] -typing-mypy: +typing-mypy: && resync {{typing_run}} mypy {{mypy_args}} bson gridfs tools pymongo {{typing_run}} mypy {{mypy_args}} --config-file mypy_test.ini test {{typing_run}} mypy {{mypy_args}} test/test_typing.py test/test_typing_strict.py [group('typing')] -typing-pyright: +typing-pyright: && resync {{typing_run}} pyright test/test_typing.py test/test_typing_strict.py {{typing_run}} pyright -p strict_pyrightconfig.json test/test_typing_strict.py [group('lint')] -lint: +lint: && resync {{uv_run}} pre-commit run --all-files [group('lint')] -lint-manual: +lint-manual: && resync {{uv_run}} pre-commit run --all-files --hook-stage manual [group('test')] -test *args="-v --durations=5 --maxfail=10": +test *args="-v --durations=5 --maxfail=10": && resync {{uv_run}} --extra test pytest {{args}} [group('test')] -run-tests *args: +run-tests *args: && resync bash ./.evergreen/run-tests.sh {{args}} [group('test')] From cffb9069fd7b82b80e3a3f131ccd4d730e36e3f4 Mon Sep 17 00:00:00 2001 From: Finn Womack Date: Wed, 27 Aug 2025 05:30:56 -0700 Subject: [PATCH 2040/2111] PYTHON-5520 Add windows arm64 wheel support (#2511) --- .github/workflows/dist.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index f5f8c20c88..e5b36ad7dd 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -41,6 +41,7 @@ jobs: - [ubuntu-latest, "manylinux_i686", "cp3*-manylinux_i686"] - [windows-2022, "win_amd6", "cp3*-win_amd64"] - [windows-2022, "win32", "cp3*-win32"] + - [windows-11-arm, "win_arm64", "cp3*-win_arm64"] - [macos-14, "macos", "cp*-macosx_*"] steps: @@ -54,7 +55,7 @@ jobs: - uses: actions/setup-python@v5 with: cache: 'pip' - python-version: 3.9 + python-version: 3.11 cache-dependency-path: 'pyproject.toml' allow-prereleases: true From 66567678506043db6c24ed869f636401cbe19fbf Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 27 Aug 2025 11:24:47 -0500 Subject: [PATCH 2041/2111] PYTHON-5486 Test Gevent with Auth and SSL (#2508) --- .evergreen/generated_configs/variants.yml | 4 ++-- .evergreen/scripts/generate_config.py | 4 ++-- test/asynchronous/test_monitor.py | 5 ++++- test/test_monitor.py | 5 ++++- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index a05cc61f05..33b8e0ba02 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -332,7 +332,7 @@ buildvariants: # Green framework tests - name: green-eventlet-rhel8 tasks: - - name: .test-standard .standalone-noauth-nossl .python-3.9 .sync + - name: .test-standard .python-3.9 .sync display_name: Green Eventlet RHEL8 run_on: - rhel87-small @@ -340,7 +340,7 @@ buildvariants: GREEN_FRAMEWORK: eventlet - name: green-gevent-rhel8 tasks: - - name: .test-standard .standalone-noauth-nossl .sync + - name: .test-standard .sync display_name: Green Gevent RHEL8 run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 3a386be4f7..f76c0d1e04 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -301,11 +301,11 @@ def create_green_framework_variants(): variants = [] host = DEFAULT_HOST for framework in ["eventlet", "gevent"]: - tasks = [".test-standard .standalone-noauth-nossl .sync"] + tasks = [".test-standard .sync"] if framework == "eventlet": # Eventlet has issues with dnspython > 2.0 and newer versions of CPython # https://jira.mongodb.org/browse/PYTHON-5284 - tasks = [".test-standard .standalone-noauth-nossl .python-3.9 .sync"] + tasks = [".test-standard .python-3.9 .sync"] expansions = dict(GREEN_FRAMEWORK=framework) display_name = get_variant_name(f"Green {framework.capitalize()}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) diff --git a/test/asynchronous/test_monitor.py b/test/asynchronous/test_monitor.py index 55a20d7643..dde8976c06 100644 --- a/test/asynchronous/test_monitor.py +++ b/test/asynchronous/test_monitor.py @@ -28,7 +28,7 @@ from test.asynchronous.utils import ( async_wait_until, ) -from test.utils_shared import ServerAndTopologyEventListener +from test.utils_shared import ServerAndTopologyEventListener, gevent_monkey_patched from pymongo.periodic_executor import _EXECUTORS @@ -58,6 +58,9 @@ async def create_client(self): return client @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") + @unittest.skipIf( + gevent_monkey_patched(), "PYTHON-5516 Resources are not cleared when using gevent" + ) async def test_cleanup_executors_on_client_del(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") diff --git a/test/test_monitor.py b/test/test_monitor.py index 8bcdf7130a..c10662c893 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -28,7 +28,7 @@ from test.utils import ( wait_until, ) -from test.utils_shared import ServerAndTopologyEventListener +from test.utils_shared import ServerAndTopologyEventListener, gevent_monkey_patched from pymongo.periodic_executor import _EXECUTORS @@ -58,6 +58,9 @@ def create_client(self): return client @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") + @unittest.skipIf( + gevent_monkey_patched(), "PYTHON-5516 Resources are not cleared when using gevent" + ) def test_cleanup_executors_on_client_del(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") From b2bba67b61f033be5360bebc1ea260164ae75bb2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 07:30:34 -0500 Subject: [PATCH 2042/2111] Update coverage requirement from <=7.10.5,>=5 to >=5,<=7.10.6 (#2512) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 111136d08b..53025cc3f4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] eventlet = ["eventlet"] coverage = [ "pytest-cov", - "coverage>=5,<=7.10.5" + "coverage>=5,<=7.10.6" ] mockupdb = [ "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" From b756bbd2a392ee9a810dd9b5df97fc4e16cfae37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 08:37:19 -0500 Subject: [PATCH 2043/2111] Bump the actions group with 2 updates (#2513) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- .github/workflows/zizmor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index e7a39fa39e..2db3b43e7f 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@7f2abfff7488a44086dba64ed2f5a9b431508079 + uses: zizmorcore/zizmor-action@a016d81e77496751b5c04eb1e8f00214bd396553 From d63edf7aea260be286a3bc98e28f961db43a71b0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 3 Sep 2025 13:35:43 -0400 Subject: [PATCH 2044/2111] PYTHON-5524 - Fix CSFLE spec test min version checks (#2516) --- test/asynchronous/unified_format.py | 4 +++- test/unified_format.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 9bd0fabdb8..b10879733b 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -157,7 +157,9 @@ async def is_run_on_requirement_satisfied(requirement): csfle_satisfied = True req_csfle = requirement.get("csfle") if req_csfle is True: - min_version_satisfied = Version.from_string("4.2") <= server_version + # Don't overwrite unsatisfied minimum version requirements. + if min_version_satisfied: + min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: csfle_satisfied = False diff --git a/test/unified_format.py b/test/unified_format.py index bc21464ab6..1d47f747d3 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -156,7 +156,9 @@ def is_run_on_requirement_satisfied(requirement): csfle_satisfied = True req_csfle = requirement.get("csfle") if req_csfle is True: - min_version_satisfied = Version.from_string("4.2") <= server_version + # Don't overwrite unsatisfied minimum version requirements. + if min_version_satisfied: + min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: csfle_satisfied = False From c0e0554a3b69b7c3140a792934682c19b43fc269 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 3 Sep 2025 14:18:51 -0400 Subject: [PATCH 2045/2111] PYTHON-5521 - Update TestBsonSizeBatches.test_06_insert_fails_over_16MiB error codes (#2515) --- test/asynchronous/test_encryption.py | 2 +- test/test_encryption.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 337dba0f64..4517c9f917 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -1268,7 +1268,7 @@ async def test_06_insert_fails_over_16MiB(self): with self.assertRaises(BulkWriteError) as ctx: await self.coll_encrypted.bulk_write([InsertOne(doc)]) err = ctx.exception.details["writeErrors"][0] - self.assertEqual(2, err["code"]) + self.assertIn(err["code"], [2, 10334]) self.assertIn("object to insert too large", err["errmsg"]) diff --git a/test/test_encryption.py b/test/test_encryption.py index 46d8c785c4..fb18cc2dd0 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -1264,7 +1264,7 @@ def test_06_insert_fails_over_16MiB(self): with self.assertRaises(BulkWriteError) as ctx: self.coll_encrypted.bulk_write([InsertOne(doc)]) err = ctx.exception.details["writeErrors"][0] - self.assertEqual(2, err["code"]) + self.assertIn(err["code"], [2, 10334]) self.assertIn("object to insert too large", err["errmsg"]) From b84e1a7ce4f53faf3b4898fdf8b5b89251f8c047 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 3 Sep 2025 15:00:04 -0400 Subject: [PATCH 2046/2111] PYTHON-5527 - Unified test typo in 'Expected error' (#2517) --- test/asynchronous/unified_format.py | 2 +- test/unified_format.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index b10879733b..cc8f58477e 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -1032,7 +1032,7 @@ async def run_entity_operation(self, spec): raise else: if expect_error: - self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') + self.fail(f'Expected error {expect_error} but "{opname}" succeeded: {result}') if expect_result: actual = coerce_result(opname, result) diff --git a/test/unified_format.py b/test/unified_format.py index 1d47f747d3..8945948e69 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -1023,7 +1023,7 @@ def run_entity_operation(self, spec): raise else: if expect_error: - self.fail(f'Excepted error {expect_error} but "{opname}" succeeded: {result}') + self.fail(f'Expected error {expect_error} but "{opname}" succeeded: {result}') if expect_result: actual = coerce_result(opname, result) From 47c5460d2ec7eced587add33cb6ab0d717218f49 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 09:10:29 -0500 Subject: [PATCH 2047/2111] Bump pyright from 1.1.404 to 1.1.405 (#2518) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 53025cc3f4..890244b688 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ pymongocrypt_source = [ perf = ["simplejson"] typing = [ "mypy==1.17.1", - "pyright==1.1.404", + "pyright==1.1.405", "typing_extensions", "pip" ] From 7580309e993fdda26dc4ea38ccfd0178e78c99e7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 8 Sep 2025 16:01:12 -0500 Subject: [PATCH 2048/2111] PYTHON-4928 Convert CSFLE spec tests to unified test format (#2520) --- .evergreen/remove-unimplemented-tests.sh | 1 + test/asynchronous/test_encryption.py | 3 +- test/asynchronous/unified_format.py | 59 +- .../data/encryptedFields-prefix-suffix.json | 38 + .../etc/data/encryptedFields-substring.json | 30 + .../limits/limits-encryptedFields.json | 14 + .../limits/limits-qe-doc.json | 3 + .../spec/unified/aggregate.json | 433 ++++ .../spec/unified/awsTemporary.json | 313 +++ .../spec/unified/azureKMS.json | 293 +++ .../spec/unified/badQueries.json | 1393 ++++++++++ .../spec/unified/badSchema.json | 393 +++ .../spec/unified/basic.json | 431 ++++ .../spec/unified/bulk.json | 407 +++ .../spec/unified/bypassAutoEncryption.json | 403 +++ .../spec/unified/bypassedCommand.json | 147 ++ .../spec/unified/count.json | 293 +++ .../spec/unified/countDocuments.json | 296 +++ .../unified/create-and-createIndexes.json | 121 + .../spec/unified/delete.json | 396 +++ .../spec/unified/distinct.json | 325 +++ .../spec/unified/explain.json | 293 +++ .../spec/unified/find.json | 458 ++++ .../spec/unified/findOneAndDelete.json | 276 ++ .../spec/unified/findOneAndReplace.json | 282 ++ .../spec/unified/findOneAndUpdate.json | 286 +++ .../unified/fle2v2-BypassQueryAnalysis.json | 8 +- .../spec/unified/fle2v2-Compact.json | 312 +++ .../fle2v2-CreateCollection-OldServer.json | 127 + .../spec/unified/fle2v2-CreateCollection.json | 1748 +++++++++++++ .../unified/fle2v2-DecryptExistingData.json | 186 ++ .../spec/unified/fle2v2-Delete.json | 326 +++ ...EncryptedFields-vs-EncryptedFieldsMap.json | 10 +- .../fle2v2-EncryptedFields-vs-jsonSchema.json | 367 +++ .../fle2v2-EncryptedFieldsMap-defaults.json | 139 + .../spec/unified/fle2v2-FindOneAndUpdate.json | 622 +++++ .../unified/fle2v2-InsertFind-Indexed.json | 361 +++ .../unified/fle2v2-InsertFind-Unindexed.json | 301 +++ .../spec/unified/fle2v2-MissingKey.json | 137 + .../spec/unified/fle2v2-NoEncryption.json | 123 + .../spec/unified/fle2v2-Rangev2-Compact.json | 358 +++ .../fle2v2-Rangev2-Date-Aggregate.json | 574 +++++ .../fle2v2-Rangev2-Date-Correctness.json | 1610 ++++++++++++ .../unified/fle2v2-Rangev2-Date-Delete.json | 505 ++++ .../fle2v2-Rangev2-Date-FindOneAndUpdate.json | 577 +++++ .../fle2v2-Rangev2-Date-InsertFind.json | 562 ++++ .../unified/fle2v2-Rangev2-Date-Update.json | 581 +++++ .../fle2v2-Rangev2-Decimal-Aggregate.json | 1965 ++++++++++++++ .../fle2v2-Rangev2-Decimal-Correctness.json | 1016 ++++++++ .../fle2v2-Rangev2-Decimal-Delete.json | 1179 +++++++++ ...e2v2-Rangev2-Decimal-FindOneAndUpdate.json | 1969 ++++++++++++++ .../fle2v2-Rangev2-Decimal-InsertFind.json | 1956 ++++++++++++++ .../fle2v2-Rangev2-Decimal-Update.json | 1975 ++++++++++++++ ...v2-Rangev2-DecimalPrecision-Aggregate.json | 647 +++++ ...-Rangev2-DecimalPrecision-Correctness.json | 1418 +++++++++++ ...le2v2-Rangev2-DecimalPrecision-Delete.json | 539 ++++ ...ev2-DecimalPrecision-FindOneAndUpdate.json | 651 +++++ ...2-Rangev2-DecimalPrecision-InsertFind.json | 634 +++++ ...le2v2-Rangev2-DecimalPrecision-Update.json | 653 +++++ .../spec/unified/fle2v2-Rangev2-Defaults.json | 444 ++++ .../fle2v2-Rangev2-Double-Aggregate.json | 1195 +++++++++ .../fle2v2-Rangev2-Double-Correctness.json | 1018 ++++++++ .../unified/fle2v2-Rangev2-Double-Delete.json | 795 ++++++ ...le2v2-Rangev2-Double-FindOneAndUpdate.json | 1199 +++++++++ .../fle2v2-Rangev2-Double-InsertFind.json | 1186 +++++++++ .../unified/fle2v2-Rangev2-Double-Update.json | 1205 +++++++++ ...2v2-Rangev2-DoublePrecision-Aggregate.json | 643 +++++ ...2-Rangev2-DoublePrecision-Correctness.json | 1418 +++++++++++ ...fle2v2-Rangev2-DoublePrecision-Delete.json | 537 ++++ ...gev2-DoublePrecision-FindOneAndUpdate.json | 647 +++++ ...v2-Rangev2-DoublePrecision-InsertFind.json | 634 +++++ ...fle2v2-Rangev2-DoublePrecision-Update.json | 653 +++++ .../unified/fle2v2-Rangev2-Int-Aggregate.json | 547 ++++ .../fle2v2-Rangev2-Int-Correctness.json | 1412 ++++++++++ .../unified/fle2v2-Rangev2-Int-Delete.json | 483 ++++ .../fle2v2-Rangev2-Int-FindOneAndUpdate.json | 551 ++++ .../fle2v2-Rangev2-Int-InsertFind.json | 538 ++++ .../unified/fle2v2-Rangev2-Int-Update.json | 557 ++++ .../fle2v2-Rangev2-Long-Aggregate.json | 547 ++++ .../fle2v2-Rangev2-Long-Correctness.json | 1412 ++++++++++ .../unified/fle2v2-Rangev2-Long-Delete.json | 483 ++++ .../fle2v2-Rangev2-Long-FindOneAndUpdate.json | 551 ++++ .../fle2v2-Rangev2-Long-InsertFind.json | 538 ++++ .../unified/fle2v2-Rangev2-Long-Update.json | 557 ++++ .../unified/fle2v2-Rangev2-WrongType.json | 204 ++ .../spec/unified/fle2v2-Update.json | 633 +++++ ...v2-validatorAndPartialFieldExpression.json | 304 +++ .../spec/unified/gcpKMS.json | 292 +++ .../spec/unified/getMore.json | 321 +++ .../spec/unified/insert.json | 421 +++ .../spec/unified/keyAltName.json | 299 +++ .../spec/unified/kmipKMS.json | 415 +++ .../spec/unified/localKMS.json | 261 ++ .../spec/unified/localSchema.json | 22 +- .../spec/unified/malformedCiphertext.json | 241 ++ .../spec/unified/maxWireVersion.json | 11 +- .../spec/unified/missingKey.json | 233 ++ .../spec/unified/namedKMS.json | 241 ++ .../spec/unified/noSchema.json | 115 + .../spec/unified/replaceOne.json | 316 +++ .../spec/unified/timeoutMS.json | 270 ++ .../spec/unified/types.json | 2262 +++++++++++++++++ .../spec/unified/unsupportedCommand.json | 200 ++ .../spec/unified/updateMany.json | 376 +++ .../spec/unified/updateOne.json | 538 ++++ .../validatorAndPartialFieldExpression.json | 323 +++ test/test_encryption.py | 3 +- test/unified_format.py | 59 +- test/unified_format_shared.py | 10 +- 109 files changed, 59585 insertions(+), 58 deletions(-) create mode 100644 test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json create mode 100644 test/client-side-encryption/etc/data/encryptedFields-substring.json create mode 100644 test/client-side-encryption/limits/limits-encryptedFields.json create mode 100644 test/client-side-encryption/limits/limits-qe-doc.json create mode 100644 test/client-side-encryption/spec/unified/aggregate.json create mode 100644 test/client-side-encryption/spec/unified/awsTemporary.json create mode 100644 test/client-side-encryption/spec/unified/azureKMS.json create mode 100644 test/client-side-encryption/spec/unified/badQueries.json create mode 100644 test/client-side-encryption/spec/unified/badSchema.json create mode 100644 test/client-side-encryption/spec/unified/basic.json create mode 100644 test/client-side-encryption/spec/unified/bulk.json create mode 100644 test/client-side-encryption/spec/unified/bypassAutoEncryption.json create mode 100644 test/client-side-encryption/spec/unified/bypassedCommand.json create mode 100644 test/client-side-encryption/spec/unified/count.json create mode 100644 test/client-side-encryption/spec/unified/countDocuments.json create mode 100644 test/client-side-encryption/spec/unified/create-and-createIndexes.json create mode 100644 test/client-side-encryption/spec/unified/delete.json create mode 100644 test/client-side-encryption/spec/unified/distinct.json create mode 100644 test/client-side-encryption/spec/unified/explain.json create mode 100644 test/client-side-encryption/spec/unified/find.json create mode 100644 test/client-side-encryption/spec/unified/findOneAndDelete.json create mode 100644 test/client-side-encryption/spec/unified/findOneAndReplace.json create mode 100644 test/client-side-encryption/spec/unified/findOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Compact.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-MissingKey.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-Update.json create mode 100644 test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json create mode 100644 test/client-side-encryption/spec/unified/gcpKMS.json create mode 100644 test/client-side-encryption/spec/unified/getMore.json create mode 100644 test/client-side-encryption/spec/unified/insert.json create mode 100644 test/client-side-encryption/spec/unified/keyAltName.json create mode 100644 test/client-side-encryption/spec/unified/kmipKMS.json create mode 100644 test/client-side-encryption/spec/unified/localKMS.json create mode 100644 test/client-side-encryption/spec/unified/malformedCiphertext.json create mode 100644 test/client-side-encryption/spec/unified/missingKey.json create mode 100644 test/client-side-encryption/spec/unified/namedKMS.json create mode 100644 test/client-side-encryption/spec/unified/noSchema.json create mode 100644 test/client-side-encryption/spec/unified/replaceOne.json create mode 100644 test/client-side-encryption/spec/unified/timeoutMS.json create mode 100644 test/client-side-encryption/spec/unified/types.json create mode 100644 test/client-side-encryption/spec/unified/unsupportedCommand.json create mode 100644 test/client-side-encryption/spec/unified/updateMany.json create mode 100644 test/client-side-encryption/spec/unified/updateOne.json create mode 100644 test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh index fd50010138..e5e1d02192 100755 --- a/.evergreen/remove-unimplemented-tests.sh +++ b/.evergreen/remove-unimplemented-tests.sh @@ -6,6 +6,7 @@ rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-application-error.json # PYTHON-4918 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-checkout-error.json # PYTHON-4918 rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-min-pool-size-error.json # PYTHON-4918 +rm $PYMONGO/test/client-side-encryption/spec/unified/client-bulkWrite-qe.json # PYTHON-4929 # Python doesn't implement DRIVERS-3064 rm $PYMONGO/test/collection_management/listCollections-rawdata.json diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 4517c9f917..241cb15668 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -747,8 +747,7 @@ async def run_scenario(self): if _HAVE_PYMONGOCRYPT: globals().update( generate_test_classes( - os.path.join(SPEC_PATH, "unified"), - module=__name__, + os.path.join(SPEC_PATH, "unified"), module=__name__, expected_failures=["mapReduce .*"] ) ) diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index cc8f58477e..64659a34d4 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -255,6 +255,10 @@ def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: raise ValueError(f"Could not find a placeholder value for {path}") return PLACEHOLDER_MAP[path] + # Distinguish between temp and non-temp aws credentials. + if path.endswith("/kmsProviders/aws") and "sessionToken" in current: + path = path.replace("aws", "aws_temp") + for key in list(current): value = current[key] if isinstance(value, dict): @@ -275,10 +279,8 @@ async def _create_entity(self, entity_spec, uri=None): if "autoEncryptOpts" in spec: auto_encrypt_opts = spec["autoEncryptOpts"].copy() auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) - kms_providers = ALL_KMS_PROVIDERS.copy() + kms_providers = auto_encrypt_opts.pop("kmsProviders", ALL_KMS_PROVIDERS.copy()) key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") - for provider_name, provider_value in auto_encrypt_opts.pop("kmsProviders").items(): - kms_providers[provider_name].update(provider_value) extra_opts = auto_encrypt_opts.pop("extraOptions", {}) for key, value in extra_opts.items(): auto_encrypt_kwargs[camel_to_snake(key)] = value @@ -552,22 +554,25 @@ async def asyncSetUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if "Client side error in command starting transaction" in spec["description"]: + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + + if "client side error in command starting transaction" in description: self.skipTest("Implement PYTHON-1894") - if "timeoutMS applied to entire download" in spec["description"]: + if "type=symbol" in description: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to entire download" in description: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") if any( - x in spec["description"] + x in description for x in [ - "First insertOne is never committed", - "Second updateOne is never committed", - "Third updateOne is never committed", + "first insertone is never committed", + "second updateone is never committed", + "third updateone is never committed", ] ): self.skipTest("Implement PYTHON-4597") - class_name = self.__class__.__name__.lower() - description = spec["description"].lower() if "csot" in class_name: # Skip tests that are too slow to run on a given platform. slow_macos = [ @@ -785,6 +790,38 @@ async def _databaseOperation_createCommandCursor(self, target, **kwargs): return cursor + async def _collectionOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + async def _collectionOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + async def _collectionOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = await self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + async def _databaseOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + async def _databaseOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + async def _databaseOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = await self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + async def kill_all_sessions(self): if getattr(self, "client", None) is None: return diff --git a/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json b/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json new file mode 100644 index 0000000000..ec4489fa09 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json @@ -0,0 +1,38 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + }, + { + "queryType": "suffixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-substring.json b/test/client-side-encryption/etc/data/encryptedFields-substring.json new file mode 100644 index 0000000000..ee22def77b --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-substring.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "strMaxLength": { + "$numberInt": "10" + }, + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/test/client-side-encryption/limits/limits-encryptedFields.json b/test/client-side-encryption/limits/limits-encryptedFields.json new file mode 100644 index 0000000000..c52a0271e1 --- /dev/null +++ b/test/client-side-encryption/limits/limits-encryptedFields.json @@ -0,0 +1,14 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "path": "foo", + "bsonType": "string" + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-qe-doc.json b/test/client-side-encryption/limits/limits-qe-doc.json new file mode 100644 index 0000000000..71efbf4068 --- /dev/null +++ b/test/client-side-encryption/limits/limits-qe-doc.json @@ -0,0 +1,3 @@ +{ + "foo": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +} \ No newline at end of file diff --git a/test/client-side-encryption/spec/unified/aggregate.json b/test/client-side-encryption/spec/unified/aggregate.json new file mode 100644 index 0000000000..d04ce49d28 --- /dev/null +++ b/test/client-side-encryption/spec/unified/aggregate.json @@ -0,0 +1,433 @@ +{ + "description": "aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with deterministic encryption", + "skipReason": "SERVER-39395", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Aggregate with empty pipeline", + "skipReason": "SERVER-40829 hides agg support behind enableTestCommands flag.", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [], + "cursor": {} + }, + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Aggregate should fail with random encryption", + "skipReason": "SERVER-39395", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "random": "abc" + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "Database aggregate should fail", + "operations": [ + { + "name": "aggregate", + "object": "db", + "arguments": { + "pipeline": [ + { + "$currentOp": { + "allUsers": false, + "idleConnections": false, + "localOps": true + } + }, + { + "$match": { + "command.aggregate": { + "$eq": 1 + } + } + }, + { + "$project": { + "command": 1 + } + }, + { + "$project": { + "command.lsid": 0 + } + } + ] + }, + "expectError": { + "errorContains": "non-collection command not supported for auto encryption: aggregate" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/awsTemporary.json b/test/client-side-encryption/spec/unified/awsTemporary.json new file mode 100644 index 0000000000..24b732a5eb --- /dev/null +++ b/test/client-side-encryption/spec/unified/awsTemporary.json @@ -0,0 +1,313 @@ +{ + "description": "awsTemporary", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": "bad" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using the AWS provider with temporary credentials", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Insert with invalid temporary credentials", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll1", + "expectError": { + "errorContains": "security token" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/azureKMS.json b/test/client-side-encryption/spec/unified/azureKMS.json new file mode 100644 index 0000000000..b70959217f --- /dev/null +++ b/test/client-side-encryption/spec/unified/azureKMS.json @@ -0,0 +1,293 @@ +{ + "description": "azureKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "azure_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using Azure KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_azure": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/badQueries.json b/test/client-side-encryption/spec/unified/badQueries.json new file mode 100644 index 0000000000..7a4f30d5b7 --- /dev/null +++ b/test/client-side-encryption/spec/unified/badQueries.json @@ -0,0 +1,1393 @@ +{ + "description": "badQueries", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "collection": { + "id": "coll_with_encrypted_id", + "database": "db", + "collectionName": "coll_with_encrypted_id" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "default", + "collectionName": "coll_with_encrypted_id", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "_id": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + } + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "$text unconditionally fails", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$text": { + "$search": "search text" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$where unconditionally fails", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$where": { + "$code": "function() { return true }" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$bit operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllClear": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllClear": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllSet": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllSet": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnyClear": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnyClear": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnySet": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnySet": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "geo operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$near": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$near": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "inequality operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gt": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gt": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lt": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lt": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gte": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gte": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lte": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lte": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "other misc operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$regex": "pattern", + "$options": "" + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$regex": "pattern", + "$options": "" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$size": 2 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$size": 2 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$eq": null + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$eq": null + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Illegal equality to null predicate for encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$in": [ + null + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + null + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Illegal equality to null inside $in against an encrypted field" + } + } + ] + }, + { + "description": "$addToSet succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "unencrypted": [ + "a" + ] + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "encrypted_string": [ + "a" + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$addToSet not allowed on encrypted values" + } + } + ] + }, + { + "description": "$inc succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$mul succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$max succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$min succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$currentDate succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "unencrypted": true + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "encrypted_string": true + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$currentDate not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pop succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pop not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pull succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pull not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pullAll succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "unencrypted": [ + 1 + ] + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "encrypted_string": [ + 1 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pullAll not allowed on encrypted values" + } + } + ] + }, + { + "description": "$push succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$push not allowed on encrypted values" + } + } + ] + }, + { + "description": "array filters on encrypted fields does not error in mongocryptd, but errors in mongod", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string.$[i].x": 1 + } + }, + "arrayFilters": [ + { + "i.x": 1 + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "Array update operations not allowed on encrypted values" + } + } + ] + }, + { + "description": "positional operator succeeds on unencrypted, errors on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "unencrypted": 1 + }, + "update": { + "$set": { + "unencrypted.$": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "abc" + }, + "update": { + "$set": { + "encrypted_string.$": "abc" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt fields below '$' positional update operator" + } + } + ] + }, + { + "description": "an update that would produce an array on an encrypted field errors", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string": [ + 1, + 2 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with encrypted field on _id errors", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "object": "coll_with_encrypted_id", + "expectError": { + "errorContains": "Invalid schema containing the 'encrypt' keyword." + } + } + ] + }, + { + "description": "an insert with an array value for an encrypted field fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "encrypted_string": [ + "123", + "456" + ] + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with a Timestamp(0,0) value in the top-level fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "random": { + "$timestamp": { + "t": 0, + "i": 0 + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "A command that inserts cannot supply Timestamp(0, 0) for an encrypted" + } + } + ] + }, + { + "description": "distinct with the key referring to a field where the keyID is a JSON Pointer errors", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": {}, + "fieldName": "encrypted_w_altname" + }, + "object": "coll", + "expectError": { + "errorContains": "The distinct key is not allowed to be marked for encryption with a non-UUID keyId" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/badSchema.json b/test/client-side-encryption/spec/unified/badSchema.json new file mode 100644 index 0000000000..af93d659d4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/badSchema.json @@ -0,0 +1,393 @@ +{ + "description": "badSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "array" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll0", + "database": "db0", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "foo": { + "properties": { + "bar": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + }, + "bsonType": "object" + } + } + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll2", + "database": "db2", + "collectionName": "default" + } + }, + { + "client": { + "id": "client3", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "anyOf": [ + { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + ] + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db3", + "client": "client3", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll3", + "database": "db3", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Schema with an encrypted field in an array", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll0", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema without specifying parent object types", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll1", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema with siblings of encrypt document", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll2", + "expectError": { + "errorContains": "'encrypt' cannot be used in conjunction with 'bsonType'" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema with logical keywords", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll3", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/basic.json b/test/client-side-encryption/spec/unified/basic.json new file mode 100644 index 0000000000..5522f585da --- /dev/null +++ b/test/client-side-encryption/spec/unified/basic.json @@ -0,0 +1,431 @@ +{ + "description": "basic", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Insert with randomized encryption, then find it", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "random": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "random": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bulk.json b/test/client-side-encryption/spec/unified/bulk.json new file mode 100644 index 0000000000..90922b88d0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bulk.json @@ -0,0 +1,407 @@ +{ + "description": "bulk", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Bulk write with encryption", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "encrypted_string": "string1" + } + } + }, + { + "updateOne": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + } + }, + { + "deleteOne": { + "filter": { + "$and": [ + { + "encrypted_string": "string1" + }, + { + "_id": 2 + } + ] + } + } + } + ], + "ordered": true + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "$and": [ + { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + { + "_id": { + "$eq": 2 + } + } + ] + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bypassAutoEncryption.json b/test/client-side-encryption/spec/unified/bypassAutoEncryption.json new file mode 100644 index 0000000000..3254c43781 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bypassAutoEncryption.json @@ -0,0 +1,403 @@ +{ + "description": "bypassAutoEncryption", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "bypassAutoEncryption": true, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with bypassAutoEncryption", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Insert with bypassAutoEncryption for local schema", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bypassedCommand.json b/test/client-side-encryption/spec/unified/bypassedCommand.json new file mode 100644 index 0000000000..b0c4c56322 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bypassedCommand.json @@ -0,0 +1,147 @@ +{ + "description": "bypassedCommand", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "ping is bypassed", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "kill op is not bypassed", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "killOp": 1, + "op": 1234 + }, + "commandName": "killOp" + }, + "expectError": { + "errorContains": "command not supported for auto encryption: killOp" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/count.json b/test/client-side-encryption/spec/unified/count.json new file mode 100644 index 0000000000..d44b3e827d --- /dev/null +++ b/test/client-side-encryption/spec/unified/count.json @@ -0,0 +1,293 @@ +{ + "description": "count", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Count with deterministic encryption", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "cursor": {}, + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + { + "$group": { + "_id": { + "$const": 1 + }, + "n": { + "$sum": { + "$const": 1 + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Count fails when filtering on a random encrypted field", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/countDocuments.json b/test/client-side-encryption/spec/unified/countDocuments.json new file mode 100644 index 0000000000..c0202258b8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/countDocuments.json @@ -0,0 +1,296 @@ +{ + "description": "countDocuments", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with deterministic encryption", + "skipReason": "waiting on SERVER-39395", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": 1 + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/create-and-createIndexes.json b/test/client-side-encryption/spec/unified/create-and-createIndexes.json new file mode 100644 index 0000000000..5debd15945 --- /dev/null +++ b/test/client-side-encryption/spec/unified/create-and-createIndexes.json @@ -0,0 +1,121 @@ +{ + "description": "create-and-createIndexes", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "unencryptedCollection" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "unencryptedCollection", + "documents": [] + } + ], + "tests": [ + { + "description": "create is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createIndex", + "object": "coll", + "arguments": { + "keys": { + "x": 1 + }, + "name": "name" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "unencryptedCollection", + "indexName": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/delete.json b/test/client-side-encryption/spec/unified/delete.json new file mode 100644 index 0000000000..242bcdba8c --- /dev/null +++ b/test/client-side-encryption/spec/unified/delete.json @@ -0,0 +1,396 @@ +{ + "description": "delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with deterministic encryption", + "operations": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "deleteMany with deterministic encryption", + "operations": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/distinct.json b/test/client-side-encryption/spec/unified/distinct.json new file mode 100644 index 0000000000..a7ac0fc7f1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/distinct.json @@ -0,0 +1,325 @@ +{ + "description": "distinct", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "distinct with deterministic encryption", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "fieldName": "encrypted_string" + }, + "object": "coll", + "expectResult": [ + "string0" + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "default", + "key": "encrypted_string", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "Distinct fails when filtering on a random encrypted field", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "random": "abc" + }, + "fieldName": "encrypted_string" + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/explain.json b/test/client-side-encryption/spec/unified/explain.json new file mode 100644 index 0000000000..667f921165 --- /dev/null +++ b/test/client-side-encryption/spec/unified/explain.json @@ -0,0 +1,293 @@ +{ + "description": "explain", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Explain a find with deterministic encryption", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": "string1" + } + } + }, + "commandName": "explain" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "verbosity": "allPlansExecution" + }, + "commandName": "explain" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/find.json b/test/client-side-encryption/spec/unified/find.json new file mode 100644 index 0000000000..7f358d9c08 --- /dev/null +++ b/test/client-side-encryption/spec/unified/find.json @@ -0,0 +1,458 @@ +{ + "description": "find", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Find with deterministic encryption", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Find with $in with deterministic encryption", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1", + "random": "abc" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Find fails when filtering on a random encrypted field", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndDelete.json b/test/client-side-encryption/spec/unified/findOneAndDelete.json new file mode 100644 index 0000000000..ff1103cb9b --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndDelete.json @@ -0,0 +1,276 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with deterministic encryption", + "operations": [ + { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "remove": true + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndReplace.json b/test/client-side-encryption/spec/unified/findOneAndReplace.json new file mode 100644 index 0000000000..c1a89fd2f6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndReplace.json @@ -0,0 +1,282 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with deterministic encryption", + "operations": [ + { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1" + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndUpdate.json b/test/client-side-encryption/spec/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..ffcb0e79e4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndUpdate.json @@ -0,0 +1,286 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with deterministic encryption", + "operations": [ + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json index 0817508f8f..671413b83f 100644 --- a/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json +++ b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json @@ -1,16 +1,18 @@ { "description": "fle2v2-BypassQueryAnalysis", - "schemaVersion": "1.23", + "schemaVersion": "1.25", "runOnRequirements": [ { "minServerVersion": "7.0.0", "serverless": "forbid", - "csfle": true, "topologies": [ "replicaset", "sharded", "load-balanced" - ] + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } } ], "createEntities": [ diff --git a/test/client-side-encryption/spec/unified/fle2v2-Compact.json b/test/client-side-encryption/spec/unified/fle2v2-Compact.json new file mode 100644 index 0000000000..07ebf4351b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Compact.json @@ -0,0 +1,312 @@ +{ + "description": "fle2v2-Compact", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Compact works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + }, + { + "commandSucceededEvent": { + "commandName": "compactStructuredEncryptionData", + "reply": { + "ok": 1 + } + } + } + ] + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "db1", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectError": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..fc069d55b2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,127 @@ +{ + "description": "fle2v2-CreateCollection-OldServer", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + }, + "expectError": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json new file mode 100644 index 0000000000..3dfb76c461 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json @@ -0,0 +1,1748 @@ +{ + "description": "fle2v2-CreateCollection", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": {} + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + } + ], + "tests": [ + { + "description": "state collections and index are created", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "default state collection names are applied", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "drop removes all state collections", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "db", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "plaintextCollection" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "plaintextCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "plaintextCollection" + }, + "databaseName": "default", + "commandName": "create" + } + } + ] + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "operations": [ + { + "name": "dropCollection", + "object": "db1", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db1", + "arguments": { + "collection": "encryptedCollection" + }, + "expectError": { + "errorContains": "Encrypted State Collection name should follow" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json new file mode 100644 index 0000000000..b171c78c00 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json @@ -0,0 +1,186 @@ +{ + "description": "fle2v2-DecryptExistingData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Delete.json new file mode 100644 index 0000000000..305f642ae1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Delete.json @@ -0,0 +1,326 @@ +{ + "description": "fle2v2-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json index b5f848c080..7a6957db0a 100644 --- a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -1,16 +1,18 @@ { "description": "fle2v2-EncryptedFields-vs-EncryptedFieldsMap", - "schemaVersion": "1.23", + "schemaVersion": "1.25", "runOnRequirements": [ { "minServerVersion": "7.0.0", "serverless": "forbid", - "csfle": true, "topologies": [ "replicaset", "sharded", "load-balanced" - ] + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } } ], "createEntities": [ @@ -18,12 +20,12 @@ "client": { "id": "client0", "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { "local": { "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" } }, - "keyVaultNamespace": "keyvault.datakeys", "encryptedFieldsMap": { "default.default": { "fields": [] diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..af24e9b369 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,367 @@ +{ + "description": "fle2v2-EncryptedFields-vs-jsonSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "validator": { + "$jsonSchema": { + "properties": {}, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..3727e43147 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,139 @@ +{ + "description": "fle2v2-EncryptedFieldsMap-defaults", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] + } + } + }, + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json new file mode 100644 index 0000000000..5131dc9fef --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json @@ -0,0 +1,622 @@ +{ + "description": "fle2v2-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "encryptedIndexed": "value456" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json new file mode 100644 index 0000000000..8155797583 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json @@ -0,0 +1,361 @@ +{ + "description": "fle2v2-InsertFind-Indexed", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..a6410bb9d8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json @@ -0,0 +1,301 @@ +{ + "description": "fle2v2-InsertFind-Unindexed", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Query with an unindexed field fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "object": "coll", + "expectError": { + "errorContains": "encrypt" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json b/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json new file mode 100644 index 0000000000..dc8ffc57b2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json @@ -0,0 +1,137 @@ +{ + "description": "fle2v2-MissingKey", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "FLE2 encrypt fails with missing key", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with missing key", + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json new file mode 100644 index 0000000000..4036fe5edd --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json @@ -0,0 +1,123 @@ +{ + "description": "fle2v2-NoEncryption", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [] + } + } + } + ], + "tests": [ + { + "description": "insert with no encryption succeeds", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json new file mode 100644 index 0000000000..8ccbcafc24 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json @@ -0,0 +1,358 @@ +{ + "description": "fle2v2-Rangev2-Compact", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + }, + { + "commandSucceededEvent": { + "commandName": "compactStructuredEncryptionData", + "reply": { + "ok": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json new file mode 100644 index 0000000000..7933cc5600 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json @@ -0,0 +1,574 @@ +{ + "description": "fle2v2-Rangev2-Date-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json new file mode 100644 index 0000000000..9ed541fa8e --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json @@ -0,0 +1,1610 @@ +{ + "description": "fle2v2-Rangev2-Date-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json new file mode 100644 index 0000000000..ad05dd4e17 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json @@ -0,0 +1,505 @@ +{ + "description": "fle2v2-Rangev2-Date-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..55db0279c2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -0,0 +1,577 @@ +{ + "description": "fle2v2-Rangev2-Date-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json new file mode 100644 index 0000000000..1fd1edf191 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json @@ -0,0 +1,562 @@ +{ + "description": "fle2v2-Rangev2-Date-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json new file mode 100644 index 0000000000..d5153270d5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json @@ -0,0 +1,581 @@ +{ + "description": "fle2v2-Rangev2-Date-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json new file mode 100644 index 0000000000..712a68be32 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json @@ -0,0 +1,1965 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json new file mode 100644 index 0000000000..edca7724a7 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json @@ -0,0 +1,1016 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json new file mode 100644 index 0000000000..4b0121ac22 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json @@ -0,0 +1,1179 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..2697549f6a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1969 @@ +{ + "description": "fle2v2-Rangev2-Decimal-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json new file mode 100644 index 0000000000..e3d52f5d04 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json @@ -0,0 +1,1956 @@ +{ + "description": "fle2v2-Rangev2-Decimal-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json new file mode 100644 index 0000000000..8ade3593e6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json @@ -0,0 +1,1975 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..41ba49112b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -0,0 +1,647 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..bc4e1f4508 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -0,0 +1,1418 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..1912f68ee5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -0,0 +1,539 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..9cf4488622 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,651 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..a9c3a8a46a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -0,0 +1,634 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json new file mode 100644 index 0000000000..7f8ea38ae0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -0,0 +1,653 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..cdbd169676 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,444 @@ +{ + "description": "fle2v2-Rangev2-Defaults", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json new file mode 100644 index 0000000000..c0211a1a34 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json @@ -0,0 +1,1195 @@ +{ + "description": "fle2v2-Rangev2-Double-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json new file mode 100644 index 0000000000..3bffc95191 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json @@ -0,0 +1,1018 @@ +{ + "description": "fle2v2-Rangev2-Double-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json new file mode 100644 index 0000000000..ac82c52b14 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json @@ -0,0 +1,795 @@ +{ + "description": "fle2v2-Rangev2-Double-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..ce1be99a3a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -0,0 +1,1199 @@ +{ + "description": "fle2v2-Rangev2-Double-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json new file mode 100644 index 0000000000..cac8bcafea --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json @@ -0,0 +1,1186 @@ +{ + "description": "fle2v2-Rangev2-Double-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json new file mode 100644 index 0000000000..938657c91c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json @@ -0,0 +1,1205 @@ +{ + "description": "fle2v2-Rangev2-Double-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..2046630a7b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -0,0 +1,643 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..939a12c9f8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -0,0 +1,1418 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json new file mode 100644 index 0000000000..db615d6fe3 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -0,0 +1,537 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..a8f87596e8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,647 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..5e4aa5f1e0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -0,0 +1,634 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json new file mode 100644 index 0000000000..10cae6be89 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json @@ -0,0 +1,653 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json new file mode 100644 index 0000000000..77a8f43e9c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json @@ -0,0 +1,547 @@ +{ + "description": "fle2v2-Rangev2-Int-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json new file mode 100644 index 0000000000..dde5ec371b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json @@ -0,0 +1,1412 @@ +{ + "description": "fle2v2-Rangev2-Int-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json new file mode 100644 index 0000000000..1c54c6e0f6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json @@ -0,0 +1,483 @@ +{ + "description": "fle2v2-Rangev2-Int-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..265a0c6f0d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -0,0 +1,551 @@ +{ + "description": "fle2v2-Rangev2-Int-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json new file mode 100644 index 0000000000..08b6d2c2a5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json @@ -0,0 +1,538 @@ +{ + "description": "fle2v2-Rangev2-Int-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json new file mode 100644 index 0000000000..9f28f768bb --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json @@ -0,0 +1,557 @@ +{ + "description": "fle2v2-Rangev2-Int-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json new file mode 100644 index 0000000000..01ff139a55 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json @@ -0,0 +1,547 @@ +{ + "description": "fle2v2-Rangev2-Long-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json new file mode 100644 index 0000000000..cc5388b1f0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json @@ -0,0 +1,1412 @@ +{ + "description": "fle2v2-Rangev2-Long-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json new file mode 100644 index 0000000000..0a8580110c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json @@ -0,0 +1,483 @@ +{ + "description": "fle2v2-Rangev2-Long-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..f014e1a4ac --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -0,0 +1,551 @@ +{ + "description": "fle2v2-Rangev2-Long-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json new file mode 100644 index 0000000000..2896df0032 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json @@ -0,0 +1,538 @@ +{ + "description": "fle2v2-Rangev2-Long-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json new file mode 100644 index 0000000000..4f8cd1d80d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json @@ -0,0 +1,557 @@ +{ + "description": "fle2v2-Rangev2-Long-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json new file mode 100644 index 0000000000..03681947ce --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json @@ -0,0 +1,204 @@ +{ + "description": "fle2v2-Rangev2-WrongType", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "maxServerVersion": "8.99.99", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberLong": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Update.json new file mode 100644 index 0000000000..9c39c4d83d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Update.json @@ -0,0 +1,633 @@ +{ + "description": "fle2v2-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "Update can modify an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "encryptedIndexed": "value456" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..54cc60a3b1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json @@ -0,0 +1,304 @@ +{ + "description": "fle2v2-validatorAndPartialFieldExpression", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "coll", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "coll", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/gcpKMS.json b/test/client-side-encryption/spec/unified/gcpKMS.json new file mode 100644 index 0000000000..6468b5b6ce --- /dev/null +++ b/test/client-side-encryption/spec/unified/gcpKMS.json @@ -0,0 +1,292 @@ +{ + "description": "gcpKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "gcp_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using GCP KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_gcp": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getMore.json b/test/client-side-encryption/spec/unified/getMore.json new file mode 100644 index 0000000000..adaa59b01e --- /dev/null +++ b/test/client-side-encryption/spec/unified/getMore.json @@ -0,0 +1,321 @@ +{ + "description": "getMore", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "getMore with encryption", + "operations": [ + { + "name": "find", + "arguments": { + "batchSize": 2, + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + }, + { + "_id": 3, + "encrypted_string": "string2" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "default", + "batchSize": 2 + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/insert.json b/test/client-side-encryption/spec/unified/insert.json new file mode 100644 index 0000000000..23e4e6c2ae --- /dev/null +++ b/test/client-side-encryption/spec/unified/insert.json @@ -0,0 +1,421 @@ +{ + "description": "insert", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "insertOne with encryption", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany with encryption", + "operations": [ + { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/keyAltName.json b/test/client-side-encryption/spec/unified/keyAltName.json new file mode 100644 index 0000000000..826f43df22 --- /dev/null +++ b/test/client-side-encryption/spec/unified/keyAltName.json @@ -0,0 +1,299 @@ +{ + "description": "keyAltName", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with encryption using key alt name", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_w_altname": "string0", + "altname": "altname" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [] + } + }, + { + "keyAltNames": { + "$in": [ + "altname" + ] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Replace with key alt name fails", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_w_altname": "string0" + } + }, + "upsert": true + }, + "object": "coll", + "expectError": { + "errorContains": "A non-static (JSONPointer) keyId is not supported" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/kmipKMS.json b/test/client-side-encryption/spec/unified/kmipKMS.json new file mode 100644 index 0000000000..e19f85882b --- /dev/null +++ b/test/client-side-encryption/spec/unified/kmipKMS.json @@ -0,0 +1,415 @@ +{ + "description": "kmipKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + }, + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": "11" + }, + "keyAltNames": [ + "delegated" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Insert a document with auto encryption using KMIP delegated KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip_delegated": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localKMS.json b/test/client-side-encryption/spec/unified/localKMS.json new file mode 100644 index 0000000000..03b8486484 --- /dev/null +++ b/test/client-side-encryption/spec/unified/localKMS.json @@ -0,0 +1,261 @@ +{ + "description": "localKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using local KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localSchema.json b/test/client-side-encryption/spec/unified/localSchema.json index aee323d949..685ee39d7c 100644 --- a/test/client-side-encryption/spec/unified/localSchema.json +++ b/test/client-side-encryption/spec/unified/localSchema.json @@ -27,7 +27,7 @@ "keyId": [ { "$binary": { - "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "base64": "OyQRAeK7QlWMr0E2xWapYg==", "subType": "04" } } @@ -41,7 +41,7 @@ "keyId": [ { "$binary": { - "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "base64": "OyQRAeK7QlWMr0E2xWapYg==", "subType": "04" } } @@ -55,7 +55,7 @@ "keyId": [ { "$binary": { - "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "base64": "OyQRAeK7QlWMr0E2xWapYg==", "subType": "04" } } @@ -76,9 +76,6 @@ }, "secretAccessKey": { "$$placeholder": 1 - }, - "sessionToken": { - "$$placeholder": 1 } } } @@ -92,6 +89,7 @@ "client": { "id": "client1", "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", "schemaMap": { "default.default": { "properties": { @@ -105,7 +103,6 @@ ] } }, - "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { "aws": { "accessKeyId": { @@ -113,9 +110,6 @@ }, "secretAccessKey": { "$$placeholder": 1 - }, - "sessionToken": { - "$$placeholder": 1 } } } @@ -163,7 +157,7 @@ "status": 1, "_id": { "$binary": { - "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "base64": "OyQRAeK7QlWMr0E2xWapYg==", "subType": "04" } }, @@ -248,7 +242,7 @@ "$in": [ { "$binary": { - "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "base64": "OyQRAeK7QlWMr0E2xWapYg==", "subType": "04" } } @@ -278,7 +272,7 @@ "_id": 1, "encrypted_string": { "$binary": { - "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", "subType": "06" } } @@ -311,7 +305,7 @@ "_id": 1, "encrypted_string": { "$binary": { - "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", "subType": "06" } } diff --git a/test/client-side-encryption/spec/unified/malformedCiphertext.json b/test/client-side-encryption/spec/unified/malformedCiphertext.json new file mode 100644 index 0000000000..550928f1e0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/malformedCiphertext.json @@ -0,0 +1,241 @@ +{ + "description": "malformedCiphertext", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQ==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAa2V2aW4gYWxiZXJ0c29uCg==", + "subType": "06" + } + } + } + ] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Wrong subtype", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "Empty data", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 2 + } + }, + "object": "coll", + "expectError": { + "errorContains": "malformed ciphertext" + } + } + ] + }, + { + "description": "Malformed data", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 3 + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/maxWireVersion.json b/test/client-side-encryption/spec/unified/maxWireVersion.json index d0af75ac99..f7a5f0b7db 100644 --- a/test/client-side-encryption/spec/unified/maxWireVersion.json +++ b/test/client-side-encryption/spec/unified/maxWireVersion.json @@ -12,10 +12,17 @@ "client": { "id": "client0", "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", "kmsProviders": { - "aws": {} + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } }, - "keyVaultNamespace": "keyvault.datakeys", "extraOptions": { "mongocryptdBypassSpawn": true } diff --git a/test/client-side-encryption/spec/unified/missingKey.json b/test/client-side-encryption/spec/unified/missingKey.json new file mode 100644 index 0000000000..af0fd5812a --- /dev/null +++ b/test/client-side-encryption/spec/unified/missingKey.json @@ -0,0 +1,233 @@ +{ + "description": "missingKey", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.different", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with encryption on a missing key", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "different", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS.json b/test/client-side-encryption/spec/unified/namedKMS.json new file mode 100644 index 0000000000..5e203865fd --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS.json @@ -0,0 +1,241 @@ +{ + "description": "namedKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/noSchema.json b/test/client-side-encryption/spec/unified/noSchema.json new file mode 100644 index 0000000000..c18afa4ed4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/noSchema.json @@ -0,0 +1,115 @@ +{ + "description": "noSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "unencrypted" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "unencrypted", + "documents": [] + } + ], + "tests": [ + { + "description": "Insert on an unencrypted collection", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1 + } + ], + "collectionName": "unencrypted", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/replaceOne.json b/test/client-side-encryption/spec/unified/replaceOne.json new file mode 100644 index 0000000000..a093e238ba --- /dev/null +++ b/test/client-side-encryption/spec/unified/replaceOne.json @@ -0,0 +1,316 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "replaceOne with encryption", + "operations": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1", + "random": "abc" + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/timeoutMS.json b/test/client-side-encryption/spec/unified/timeoutMS.json new file mode 100644 index 0000000000..98dc50e98a --- /dev/null +++ b/test/client-side-encryption/spec/unified/timeoutMS.json @@ -0,0 +1,270 @@ +{ + "description": "timeoutMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "timeoutMS": 500 + } + } + }, + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "cse-timeouts-db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "cse-timeouts-coll" + } + } + ], + "initialData": [ + { + "databaseName": "cse-timeouts-db", + "collectionName": "cse-timeouts-coll", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/types.json b/test/client-side-encryption/spec/unified/types.json new file mode 100644 index 0000000000..3bb49f2a64 --- /dev/null +++ b/test/client-side-encryption/spec/unified/types.json @@ -0,0 +1,2262 @@ +{ + "description": "types", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_objectId": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "objectId", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll0", + "database": "db0", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_symbol": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "symbol", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_int": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll2", + "database": "db2", + "collectionName": "default" + } + }, + { + "client": { + "id": "client3", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_double": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "double", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db3", + "client": "client3", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll3", + "database": "db3", + "collectionName": "default" + } + }, + { + "client": { + "id": "client4", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_decimal": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "decimal", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db4", + "client": "client4", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll4", + "database": "db4", + "collectionName": "default" + } + }, + { + "client": { + "id": "client5", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_binData": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "binData", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db5", + "client": "client5", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll5", + "database": "db5", + "collectionName": "default" + } + }, + { + "client": { + "id": "client6", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascript": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascript", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db6", + "client": "client6", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll6", + "database": "db6", + "collectionName": "default" + } + }, + { + "client": { + "id": "client7", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascriptWithScope": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascriptWithScope", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db7", + "client": "client7", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll7", + "database": "db7", + "collectionName": "default" + } + }, + { + "client": { + "id": "client8", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_object": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "object", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db8", + "client": "client8", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll8", + "database": "db8", + "collectionName": "default" + } + }, + { + "client": { + "id": "client9", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_timestamp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "timestamp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db9", + "client": "client9", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll9", + "database": "db9", + "collectionName": "default" + } + }, + { + "client": { + "id": "client10", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_regex": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "regex", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db10", + "client": "client10", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll10", + "database": "db10", + "collectionName": "default" + } + }, + { + "client": { + "id": "client11", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_date": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "date", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db11", + "client": "client11", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll11", + "database": "db11", + "collectionName": "default" + } + }, + { + "client": { + "id": "client12", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_minKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "minKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db12", + "client": "client12", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll12", + "database": "db12", + "collectionName": "default" + } + }, + { + "client": { + "id": "client13", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_maxKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "maxKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db13", + "client": "client13", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll13", + "database": "db13", + "collectionName": "default" + } + }, + { + "client": { + "id": "client14", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_undefined": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "undefined", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db14", + "client": "client14", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll14", + "database": "db14", + "collectionName": "default" + } + }, + { + "client": { + "id": "client15", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_array": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db15", + "client": "client15", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll15", + "database": "db15", + "collectionName": "default" + } + }, + { + "client": { + "id": "client16", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_bool": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "bool", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db16", + "client": "client16", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll16", + "database": "db16", + "collectionName": "default" + } + }, + { + "client": { + "id": "client17", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_null": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "null", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db17", + "client": "client17", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll17", + "database": "db17", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "type=objectId", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + }, + "object": "coll0" + }, + { + "name": "find", + "object": "coll0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=symbol", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + }, + "object": "coll1" + }, + { + "name": "find", + "object": "coll1", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + }, + "object": "coll2" + }, + { + "name": "find", + "object": "coll2", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_double": { + "$numberDouble": "1.23" + } + } + }, + "object": "coll3", + "expectError": { + "errorContains": "element of type: double" + } + } + ] + }, + { + "description": "type=decimal", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_decimal": { + "$numberDecimal": "1.23" + } + } + }, + "object": "coll4", + "expectError": { + "errorContains": "element of type: decimal" + } + } + ] + }, + { + "description": "type=binData", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + }, + "object": "coll5" + }, + { + "name": "find", + "object": "coll5", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client5", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=javascript", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + }, + "object": "coll6" + }, + { + "name": "find", + "object": "coll6", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client6", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=javascriptWithScope", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascriptWithScope": { + "$code": "var x = 1;", + "$scope": {} + } + } + }, + "object": "coll7", + "expectError": { + "errorContains": "element of type: javascriptWithScope" + } + } + ] + }, + { + "description": "type=object", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_object": {} + } + }, + "object": "coll8", + "expectError": { + "errorContains": "element of type: object" + } + } + ] + }, + { + "description": "type=timestamp", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + }, + "object": "coll9" + }, + { + "name": "find", + "object": "coll9", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client9", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=regex", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + }, + "object": "coll10" + }, + { + "name": "find", + "object": "coll10", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client10", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=date", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + }, + "object": "coll11" + }, + { + "name": "find", + "object": "coll11", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client11", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=minKey", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_minKey": { + "$minKey": 1 + } + } + }, + "object": "coll12", + "expectError": { + "errorContains": "Cannot encrypt element of type: minKey" + } + } + ] + }, + { + "description": "type=maxKey", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_maxKey": { + "$maxKey": 1 + } + } + }, + "object": "coll13", + "expectError": { + "errorContains": "Cannot encrypt element of type: maxKey" + } + } + ] + }, + { + "description": "type=undefined", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_undefined": { + "$undefined": true + } + } + }, + "object": "coll14", + "expectError": { + "errorContains": "Cannot encrypt element of type: undefined" + } + } + ] + }, + { + "description": "type=array", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_array": [] + } + }, + "object": "coll15", + "expectError": { + "errorContains": "element of type: array" + } + } + ] + }, + { + "description": "type=bool", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_bool": true + } + }, + "object": "coll16", + "expectError": { + "errorContains": "element of type: bool" + } + } + ] + }, + { + "description": "type=null", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_null": true + } + }, + "object": "coll17", + "expectError": { + "errorContains": "Cannot encrypt element of type: null" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/unsupportedCommand.json b/test/client-side-encryption/spec/unified/unsupportedCommand.json new file mode 100644 index 0000000000..a91390324a --- /dev/null +++ b/test/client-side-encryption/spec/unified/unsupportedCommand.json @@ -0,0 +1,200 @@ +{ + "description": "unsupportedCommand", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "x": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "x": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "mapReduce deterministic encryption (unsupported)", + "operations": [ + { + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "command not supported for auto encryption: mapreduce" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/updateMany.json b/test/client-side-encryption/spec/unified/updateMany.json new file mode 100644 index 0000000000..cae4c0eaf4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/updateMany.json @@ -0,0 +1,376 @@ +{ + "description": "updateMany", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "updateMany with deterministic encryption", + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + }, + "update": { + "$set": { + "encrypted_string": "string2", + "random": "abc" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": true, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateMany fails when filtering on a random field", + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/updateOne.json b/test/client-side-encryption/spec/unified/updateOne.json new file mode 100644 index 0000000000..6c8fdcbb6e --- /dev/null +++ b/test/client-side-encryption/spec/unified/updateOne.json @@ -0,0 +1,538 @@ +{ + "description": "updateOne", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "updateOne with deterministic encryption", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1", + "random": "abc" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne fails when filtering on a random field", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "$unset works with an encrypted field", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$unset": { + "encrypted_string": "" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1 + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$unset": { + "encrypted_string": "" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "$rename works if target value has same encryption options", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_equivalent": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "$rename fails if target value has different encryption options", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "random" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$rename between two encrypted fields must have the same metadata or both be unencrypted" + } + } + ] + }, + { + "description": "an invalid update (no $ operators) is validated and errors", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "encrypted_string": "random" + } + }, + "object": "coll", + "expectError": { + "errorContains": "" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..c46a193273 --- /dev/null +++ b/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json @@ -0,0 +1,323 @@ +{ + "description": "validatorAndPartialFieldExpression", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/test_encryption.py b/test/test_encryption.py index fb18cc2dd0..be1b7ec1b6 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -745,8 +745,7 @@ def run_scenario(self): if _HAVE_PYMONGOCRYPT: globals().update( generate_test_classes( - os.path.join(SPEC_PATH, "unified"), - module=__name__, + os.path.join(SPEC_PATH, "unified"), module=__name__, expected_failures=["mapReduce .*"] ) ) diff --git a/test/unified_format.py b/test/unified_format.py index 8945948e69..580aed552b 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -254,6 +254,10 @@ def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: raise ValueError(f"Could not find a placeholder value for {path}") return PLACEHOLDER_MAP[path] + # Distinguish between temp and non-temp aws credentials. + if path.endswith("/kmsProviders/aws") and "sessionToken" in current: + path = path.replace("aws", "aws_temp") + for key in list(current): value = current[key] if isinstance(value, dict): @@ -274,10 +278,8 @@ def _create_entity(self, entity_spec, uri=None): if "autoEncryptOpts" in spec: auto_encrypt_opts = spec["autoEncryptOpts"].copy() auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) - kms_providers = ALL_KMS_PROVIDERS.copy() + kms_providers = auto_encrypt_opts.pop("kmsProviders", ALL_KMS_PROVIDERS.copy()) key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") - for provider_name, provider_value in auto_encrypt_opts.pop("kmsProviders").items(): - kms_providers[provider_name].update(provider_value) extra_opts = auto_encrypt_opts.pop("extraOptions", {}) for key, value in extra_opts.items(): auto_encrypt_kwargs[camel_to_snake(key)] = value @@ -551,22 +553,25 @@ def setUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if "Client side error in command starting transaction" in spec["description"]: + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + + if "client side error in command starting transaction" in description: self.skipTest("Implement PYTHON-1894") - if "timeoutMS applied to entire download" in spec["description"]: + if "type=symbol" in description: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to entire download" in description: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") if any( - x in spec["description"] + x in description for x in [ - "First insertOne is never committed", - "Second updateOne is never committed", - "Third updateOne is never committed", + "first insertone is never committed", + "second updateone is never committed", + "third updateone is never committed", ] ): self.skipTest("Implement PYTHON-4597") - class_name = self.__class__.__name__.lower() - description = spec["description"].lower() if "csot" in class_name: # Skip tests that are too slow to run on a given platform. slow_macos = [ @@ -782,6 +787,38 @@ def _databaseOperation_createCommandCursor(self, target, **kwargs): return cursor + def _collectionOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + def _collectionOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + for index in collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + def _collectionOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + def _databaseOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + def _databaseOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + for index in collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + def _databaseOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + def kill_all_sessions(self): if getattr(self, "client", None) is None: return diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 96b037976b..7ebcc6eb20 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -109,6 +109,7 @@ for provider_name, provider_data in [ ("local", {"key": LOCAL_MASTER_KEY}), ("local:name1", {"key": LOCAL_MASTER_KEY}), + ("aws_temp", AWS_TEMP_CREDS), ("aws", AWS_CREDS), ("aws:name1", AWS_CREDS), ("aws:name2", AWS_CREDS_2), @@ -119,19 +120,10 @@ ("kmip", KMIP_CREDS), ("kmip:name1", KMIP_CREDS), ]: - # Use the temp aws creds for autoEncryptOpts. - if provider_name == "aws": - for key, value in AWS_TEMP_CREDS.items(): - placeholder = f"/autoEncryptOpts/kmsProviders/{provider_name}/{key}" - PLACEHOLDER_MAP[placeholder] = value - for key, value in provider_data.items(): placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" PLACEHOLDER_MAP[placeholder] = value - if provider_name == "aws": - continue - placeholder = f"/autoEncryptOpts/kmsProviders/{provider_name}/{key}" PLACEHOLDER_MAP[placeholder] = value From d7316afb632fb16e23214ec2a404b308a1054896 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 10 Sep 2025 10:35:35 -0500 Subject: [PATCH 2049/2111] PYTHON-5328 CRUD Support in Driver for Prefix/Suffix/Substring Indexes (#2521) --- doc/changelog.rst | 7 + pymongo/asynchronous/encryption.py | 40 ++++- pymongo/encryption_options.py | 84 ++++++++- pymongo/synchronous/encryption.py | 40 ++++- test/__init__.py | 14 ++ test/asynchronous/__init__.py | 14 ++ test/asynchronous/test_encryption.py | 257 ++++++++++++++++++++++++++- test/test_encryption.py | 257 ++++++++++++++++++++++++++- uv.lock | 4 +- 9 files changed, 710 insertions(+), 7 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 305c989106..4d95559d69 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -4,6 +4,13 @@ Changes in Version 4.15.0 (XXXX/XX/XX) -------------------------------------- PyMongo 4.15 brings a number of changes including: +- Added :class:`~pymongo.encryption_options.TextOpts`, + :attr:`~pymongo.encryption.Algorithm.TEXTPREVIEW`, + :attr:`~pymongo.encryption.QueryType.PREFIXPREVIEW`, + :attr:`~pymongo.encryption.QueryType.SUFFIXPREVIEW`, + :attr:`~pymongo.encryption.QueryType.SUBSTRINGPREVIEW`, + as part of the experimental Queryable Encryption text queries beta. + ``pymongocrypt>=1.16`` is required for text query support. - Added :class:`bson.decimal128.DecimalEncoder` and :class:`bson.decimal128.DecimalDecoder` to support encoding and decoding of BSON Decimal128 values to decimal.Decimal values using the TypeRegistry API. diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index f4d66cb956..b302631108 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -67,7 +67,7 @@ from pymongo.asynchronous.pool import AsyncBaseConnection from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts +from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts, TextOpts from pymongo.errors import ( ConfigurationError, EncryptedCollectionError, @@ -516,6 +516,11 @@ class Algorithm(str, enum.Enum): .. versionadded:: 4.4 """ + TEXTPREVIEW = "TextPreview" + """**BETA** - TextPreview. + + .. versionadded:: 4.15 + """ class QueryType(str, enum.Enum): @@ -541,6 +546,24 @@ class QueryType(str, enum.Enum): .. versionadded:: 4.4 """ + PREFIXPREVIEW = "prefixPreview" + """**BETA** - Used to encrypt a value for a prefixPreview query. + + .. versionadded:: 4.15 + """ + + SUFFIXPREVIEW = "suffixPreview" + """**BETA** - Used to encrypt a value for a suffixPreview query. + + .. versionadded:: 4.15 + """ + + SUBSTRINGPREVIEW = "substringPreview" + """**BETA** - Used to encrypt a value for a substringPreview query. + + .. versionadded:: 4.15 + """ + def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. @@ -876,6 +899,7 @@ async def _encrypt_helper( contention_factor: Optional[int] = None, range_opts: Optional[RangeOpts] = None, is_expression: bool = False, + text_opts: Optional[TextOpts] = None, ) -> Any: self._check_closed() if isinstance(key_id, uuid.UUID): @@ -895,6 +919,12 @@ async def _encrypt_helper( range_opts.document, codec_options=self._codec_options, ) + text_opts_bytes = None + if text_opts: + text_opts_bytes = encode( + text_opts.document, + codec_options=self._codec_options, + ) with _wrap_encryption_errors(): encrypted_doc = await self._encryption.encrypt( value=doc, @@ -905,6 +935,7 @@ async def _encrypt_helper( contention_factor=contention_factor, range_opts=range_opts_bytes, is_expression=is_expression, + text_opts=text_opts_bytes, ) return decode(encrypted_doc)["v"] @@ -917,6 +948,7 @@ async def encrypt( query_type: Optional[str] = None, contention_factor: Optional[int] = None, range_opts: Optional[RangeOpts] = None, + text_opts: Optional[TextOpts] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -937,9 +969,14 @@ async def encrypt( used. :param range_opts: Index options for `range` queries. See :class:`RangeOpts` for some valid options. + :param text_opts: Index options for `textPreview` queries. See + :class:`TextOpts` for some valid options. :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + .. versionchanged:: 4.9 + Added the `text_opts` parameter. + .. versionchanged:: 4.9 Added the `range_opts` parameter. @@ -960,6 +997,7 @@ async def encrypt( contention_factor=contention_factor, range_opts=range_opts, is_expression=False, + text_opts=text_opts, ), ) diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index bbc736d1c0..da34a3be52 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -18,7 +18,7 @@ """ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Mapping, Optional +from typing import TYPE_CHECKING, Any, Mapping, Optional, TypedDict from pymongo.uri_parser_shared import _parse_kms_tls_options @@ -295,3 +295,85 @@ def document(self) -> dict[str, Any]: if v is not None: doc[k] = v return doc + + +class TextOpts: + """**BETA** Options to configure encrypted queries using the text algorithm. + + TextOpts is currently unstable API and subject to backwards breaking changes.""" + + def __init__( + self, + substring: Optional[SubstringOpts] = None, + prefix: Optional[PrefixOpts] = None, + suffix: Optional[SuffixOpts] = None, + case_sensitive: Optional[bool] = None, + diacritic_sensitive: Optional[bool] = None, + ) -> None: + """Options to configure encrypted queries using the text algorithm. + + :param substring: Further options to support substring queries. + :param prefix: Further options to support prefix queries. + :param suffix: Further options to support suffix queries. + :param case_sensitive: Whether text indexes for this field are case sensitive. + :param diacritic_sensitive: Whether text indexes for this field are diacritic sensitive. + + .. versionadded:: 4.15 + """ + self.substring = substring + self.prefix = prefix + self.suffix = suffix + self.case_sensitive = case_sensitive + self.diacritic_sensitive = diacritic_sensitive + + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("substring", self.substring), + ("prefix", self.prefix), + ("suffix", self.suffix), + ("caseSensitive", self.case_sensitive), + ("diacriticSensitive", self.diacritic_sensitive), + ]: + if v is not None: + doc[k] = v + return doc + + +class SubstringOpts(TypedDict): + """**BETA** Options for substring text queries. + + SubstringOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMaxLength is the maximum allowed length to insert. Inserting longer strings will error. + strMaxLength: int + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int + + +class PrefixOpts(TypedDict): + """**BETA** Options for prefix text queries. + + PrefixOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int + + +class SuffixOpts(TypedDict): + """**BETA** Options for suffix text queries. + + SuffixOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 7b98243528..752026af84 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -61,7 +61,7 @@ from pymongo import _csot from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts +from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts, TextOpts from pymongo.errors import ( ConfigurationError, EncryptedCollectionError, @@ -513,6 +513,11 @@ class Algorithm(str, enum.Enum): .. versionadded:: 4.4 """ + TEXTPREVIEW = "TextPreview" + """**BETA** - TextPreview. + + .. versionadded:: 4.15 + """ class QueryType(str, enum.Enum): @@ -538,6 +543,24 @@ class QueryType(str, enum.Enum): .. versionadded:: 4.4 """ + PREFIXPREVIEW = "prefixPreview" + """**BETA** - Used to encrypt a value for a prefixPreview query. + + .. versionadded:: 4.15 + """ + + SUFFIXPREVIEW = "suffixPreview" + """**BETA** - Used to encrypt a value for a suffixPreview query. + + .. versionadded:: 4.15 + """ + + SUBSTRINGPREVIEW = "substringPreview" + """**BETA** - Used to encrypt a value for a substringPreview query. + + .. versionadded:: 4.15 + """ + def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. @@ -869,6 +892,7 @@ def _encrypt_helper( contention_factor: Optional[int] = None, range_opts: Optional[RangeOpts] = None, is_expression: bool = False, + text_opts: Optional[TextOpts] = None, ) -> Any: self._check_closed() if isinstance(key_id, uuid.UUID): @@ -888,6 +912,12 @@ def _encrypt_helper( range_opts.document, codec_options=self._codec_options, ) + text_opts_bytes = None + if text_opts: + text_opts_bytes = encode( + text_opts.document, + codec_options=self._codec_options, + ) with _wrap_encryption_errors(): encrypted_doc = self._encryption.encrypt( value=doc, @@ -898,6 +928,7 @@ def _encrypt_helper( contention_factor=contention_factor, range_opts=range_opts_bytes, is_expression=is_expression, + text_opts=text_opts_bytes, ) return decode(encrypted_doc)["v"] @@ -910,6 +941,7 @@ def encrypt( query_type: Optional[str] = None, contention_factor: Optional[int] = None, range_opts: Optional[RangeOpts] = None, + text_opts: Optional[TextOpts] = None, ) -> Binary: """Encrypt a BSON value with a given key and algorithm. @@ -930,9 +962,14 @@ def encrypt( used. :param range_opts: Index options for `range` queries. See :class:`RangeOpts` for some valid options. + :param text_opts: Index options for `textPreview` queries. See + :class:`TextOpts` for some valid options. :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + .. versionchanged:: 4.9 + Added the `text_opts` parameter. + .. versionchanged:: 4.9 Added the `range_opts` parameter. @@ -953,6 +990,7 @@ def encrypt( contention_factor=contention_factor, range_opts=range_opts, is_expression=False, + text_opts=text_opts, ), ) diff --git a/test/__init__.py b/test/__init__.py index 12660e3a4a..d583a72f0f 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -32,6 +32,7 @@ import warnings from inspect import iscoroutinefunction +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import AutoReconnect from pymongo.synchronous.uri_parser import parse_uri @@ -524,6 +525,19 @@ def require_version_max(self, *ver): "Server version must be at most %s" % str(other_version), ) + def require_libmongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import libmongocrypt_version + + version = Version.from_string(libmongocrypt_version()) + return self._require( + lambda: version >= other_version, + "Libmongocrypt version must be at least %s" % str(other_version), + ) + def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" return self._require( diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 7b594b184d..8ab7ff7219 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -33,6 +33,7 @@ from inspect import iscoroutinefunction from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT from pymongo.errors import AutoReconnect try: @@ -524,6 +525,19 @@ def require_version_max(self, *ver): "Server version must be at most %s" % str(other_version), ) + def require_libmongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import libmongocrypt_version + + version = Version.from_string(libmongocrypt_version()) + return self._require( + lambda: version >= other_version, + "Libmongocrypt version must be at least %s" % str(other_version), + ) + def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" return self._require( diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 241cb15668..e510de5631 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -89,7 +89,7 @@ from pymongo.asynchronous.encryption import Algorithm, AsyncClientEncryption, QueryType from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.cursor_shared import CursorType -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts, TextOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -3443,6 +3443,261 @@ async def test_collection_name_collision(self): self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption +class TestExplicitTextEncryptionProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 2, -1) + @async_client_context.require_libmongocrypt_min(1, 15, 1) + async def asyncSetUp(self): + await super().asyncSetUp() + # Load the file key1-document.json as key1Document. + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + # Read the "_id" field of key1Document as key1ID. + self.key1_id = self.key1_document["_id"] + # Drop and create the collection keyvault.datakeys. + # Insert key1Document in keyvault.datakeys with majority write concern. + self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addAsyncCleanup(self.key_vault.drop) + # Create a ClientEncryption object named clientEncryption with these options. + self.kms_providers = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + self.kms_providers, + self.key_vault.full_name, + self.client, + OPTS, + ) + # Create a MongoClient named encryptedClient with these AutoEncryptionOpts. + opts = AutoEncryptionOpts( + self.kms_providers, + "keyvault.datakeys", + bypass_query_analysis=True, + ) + self.client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + # Using QE CreateCollection() and Collection.Drop(), drop and create the following collections with majority write concern: + # db.prefix-suffix using the encryptedFields option set to the contents of encryptedFields-prefix-suffix.json. + db = self.client_encrypted.db + await db.drop_collection("prefix-suffix") + encrypted_fields = json_data("etc", "data", "encryptedFields-prefix-suffix.json") + await self.client_encryption.create_encrypted_collection( + db, "prefix-suffix", kms_provider="local", encrypted_fields=encrypted_fields + ) + # db.substring using the encryptedFields option set to the contents of encryptedFields-substring.json. + await db.drop_collection("substring") + encrypted_fields = json_data("etc", "data", "encryptedFields-substring.json") + await self.client_encryption.create_encrypted_collection( + db, "substring", kms_provider="local", encrypted_fields=encrypted_fields + ) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.prefix-suffix with majority write concern. + coll = self.client_encrypted.db["prefix-suffix"].with_options( + write_concern=WriteConcern(w="majority") + ) + await coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.substring with majority write concern. + coll = self.client_encrypted.db["substring"].with_options( + write_concern=WriteConcern(w="majority") + ) + await coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + async def test_01_can_find_a_document_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter. + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_02_can_find_a_document_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_03_no_document_found_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_04_no_document_found_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_05_can_find_a_document_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "bar" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "bar", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = await self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert the following document is returned: + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_06_no_document_found_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "qux" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "qux", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = await self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_07_contentionFactor_is_required(self): + from pymongocrypt.errors import MongoCryptError + + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + with self.assertRaises(EncryptionError) as ctx: + await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + text_opts=text_opts, + ) + # Expect an error from libmongocrypt with a message containing the string: "contention factor is required for textPreview algorithm". + self.assertIsInstance(ctx.exception.cause, MongoCryptError) + self.assertEqual( + str(ctx.exception), "contention factor is required for textPreview algorithm" + ) + + def start_mongocryptd(port) -> None: args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] _spawn_daemon(args) diff --git a/test/test_encryption.py b/test/test_encryption.py index be1b7ec1b6..4eb0331715 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -86,7 +86,7 @@ from bson.son import SON from pymongo import ReadPreference from pymongo.cursor_shared import CursorType -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts, TextOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, @@ -3425,6 +3425,261 @@ def test_collection_name_collision(self): self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption +class TestExplicitTextEncryptionProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 2, -1) + @client_context.require_libmongocrypt_min(1, 15, 1) + def setUp(self): + super().setUp() + # Load the file key1-document.json as key1Document. + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + # Read the "_id" field of key1Document as key1ID. + self.key1_id = self.key1_document["_id"] + # Drop and create the collection keyvault.datakeys. + # Insert key1Document in keyvault.datakeys with majority write concern. + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + # Create a ClientEncryption object named clientEncryption with these options. + self.kms_providers = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + self.kms_providers, + self.key_vault.full_name, + self.client, + OPTS, + ) + # Create a MongoClient named encryptedClient with these AutoEncryptionOpts. + opts = AutoEncryptionOpts( + self.kms_providers, + "keyvault.datakeys", + bypass_query_analysis=True, + ) + self.client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + + # Using QE CreateCollection() and Collection.Drop(), drop and create the following collections with majority write concern: + # db.prefix-suffix using the encryptedFields option set to the contents of encryptedFields-prefix-suffix.json. + db = self.client_encrypted.db + db.drop_collection("prefix-suffix") + encrypted_fields = json_data("etc", "data", "encryptedFields-prefix-suffix.json") + self.client_encryption.create_encrypted_collection( + db, "prefix-suffix", kms_provider="local", encrypted_fields=encrypted_fields + ) + # db.substring using the encryptedFields option set to the contents of encryptedFields-substring.json. + db.drop_collection("substring") + encrypted_fields = json_data("etc", "data", "encryptedFields-substring.json") + self.client_encryption.create_encrypted_collection( + db, "substring", kms_provider="local", encrypted_fields=encrypted_fields + ) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.prefix-suffix with majority write concern. + coll = self.client_encrypted.db["prefix-suffix"].with_options( + write_concern=WriteConcern(w="majority") + ) + coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.substring with majority write concern. + coll = self.client_encrypted.db["substring"].with_options( + write_concern=WriteConcern(w="majority") + ) + coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + def test_01_can_find_a_document_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter. + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_02_can_find_a_document_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_03_no_document_found_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_04_no_document_found_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_05_can_find_a_document_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "bar" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "bar", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert the following document is returned: + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_06_no_document_found_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "qux" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "qux", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_07_contentionFactor_is_required(self): + from pymongocrypt.errors import MongoCryptError + + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + with self.assertRaises(EncryptionError) as ctx: + self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + text_opts=text_opts, + ) + # Expect an error from libmongocrypt with a message containing the string: "contention factor is required for textPreview algorithm". + self.assertIsInstance(ctx.exception.cause, MongoCryptError) + self.assertEqual( + str(ctx.exception), "contention factor is required for textPreview algorithm" + ) + + def start_mongocryptd(port) -> None: args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] _spawn_daemon(args) diff --git a/uv.lock b/uv.lock index 9c45c4cdb9..2e0ef2a151 100644 --- a/uv.lock +++ b/uv.lock @@ -1350,8 +1350,8 @@ wheels = [ [[package]] name = "pymongocrypt" -version = "1.14.2.dev0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#56048cf426bfeffa0805934b668a7af5ed8e907c" } +version = "1.16.0" +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#63d2591b84a9d4348cbe1c74556e266cd560ac5b" } dependencies = [ { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*'" }, { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, From 98e9f5ecc1396abf2fafe070de36e1bed5691930 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 10 Sep 2025 10:36:14 -0500 Subject: [PATCH 2050/2111] PYTHON-5538 Clean up uv lock file handling (#2522) --- justfile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/justfile b/justfile index 24da94a499..7ac5bd33ff 100644 --- a/justfile +++ b/justfile @@ -1,10 +1,11 @@ # See https://just.systems/man/en/ for instructions set shell := ["bash", "-c"] +# Do not modify the lock file when running justfile commands. +export UV_FROZEN := "1" # Commonly used command segments. -uv_run := "uv run --frozen " -typing_run := uv_run + "--group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" -docs_run := uv_run + "--extra docs" +typing_run := "uv run --group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" +docs_run := "uv run --extra docs" doc_build := "./doc/_build" mypy_args := "--install-types --non-interactive" @@ -50,15 +51,15 @@ typing-pyright: && resync [group('lint')] lint: && resync - {{uv_run}} pre-commit run --all-files + uv run pre-commit run --all-files [group('lint')] lint-manual: && resync - {{uv_run}} pre-commit run --all-files --hook-stage manual + uv run pre-commit run --all-files --hook-stage manual [group('test')] test *args="-v --durations=5 --maxfail=10": && resync - {{uv_run}} --extra test pytest {{args}} + uv run --extra test pytest {{args}} [group('test')] run-tests *args: && resync From 1514e9b784ed395e7dad806c7b722fef531ecb15 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Wed, 10 Sep 2025 12:03:54 -0400 Subject: [PATCH 2051/2111] Prepare 4.15 release (#2523) --- doc/changelog.rst | 5 ++++- pymongo/_version.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 4d95559d69..64c61e5877 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,7 +1,9 @@ Changelog ========= -Changes in Version 4.15.0 (XXXX/XX/XX) + +Changes in Version 4.15.0 (2025/09/10) -------------------------------------- + PyMongo 4.15 brings a number of changes including: - Added :class:`~pymongo.encryption_options.TextOpts`, @@ -13,6 +15,7 @@ PyMongo 4.15 brings a number of changes including: ``pymongocrypt>=1.16`` is required for text query support. - Added :class:`bson.decimal128.DecimalEncoder` and :class:`bson.decimal128.DecimalDecoder` to support encoding and decoding of BSON Decimal128 values to decimal.Decimal values using the TypeRegistry API. +- Added support for Windows ``arm64`` wheels. Changes in Version 4.14.1 (2025/08/19) -------------------------------------- diff --git a/pymongo/_version.py b/pymongo/_version.py index 6eb73ba97c..a0a3bf79bb 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.15.0.dev0" +__version__ = "4.15.0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From d2653eecc69fe76599bc4c5fd7ab3835deff1da8 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Wed, 10 Sep 2025 16:50:43 +0000 Subject: [PATCH 2052/2111] BUMP 4.16.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index a0a3bf79bb..c6ba82ab13 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.15.0" +__version__ = "4.16.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 8879f2b9512eec909ca8df017c4755fd530bf89c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Sep 2025 13:27:36 -0500 Subject: [PATCH 2053/2111] Bump the actions group with 5 updates (#2519) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/dist.yml | 6 +++--- .github/workflows/release-python.yml | 6 +++--- .github/workflows/test-python.yml | 24 +++++++++++++----------- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 8dffe1fa7b..1027b20834 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -42,11 +42,11 @@ jobs: with: ref: ${{ inputs.ref }} persist-credentials: false - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/init@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3 + uses: github/codeql-action/analyze@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index e5b36ad7dd..0110d8df41 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -52,7 +52,7 @@ jobs: persist-credentials: false ref: ${{ inputs.ref }} - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: cache: 'pip' python-version: 3.11 @@ -115,7 +115,7 @@ jobs: persist-credentials: false ref: ${{ inputs.ref }} - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: # Build sdist on lowest supported Python python-version: '3.9' @@ -143,7 +143,7 @@ jobs: name: Download Wheels steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 - name: Flatten directory working-directory: . run: | diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 9cce310d91..a30afbccd5 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -76,19 +76,19 @@ jobs: id-token: write steps: - name: Download all the dists - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 with: name: all-dist-${{ github.run_id }} path: dist/ - name: Publish package distributions to TestPyPI - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # release/v1 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1 with: repository-url: https://test.pypi.org/legacy/ skip-existing: true attestations: ${{ env.DRY_RUN }} - name: Publish package distributions to PyPI if: startsWith(env.DRY_RUN, 'false') - uses: pypa/gh-action-pypi-publish@76f52bc884231f62b9a034ebfe128415bbaabdfc # release/v1 + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1 post-publish: needs: [publish] diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index d55c0d7c7c..89c1e6a95f 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -23,7 +23,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -84,7 +84,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: enable-cache: true python-version: "3.9" @@ -109,7 +109,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: enable-cache: true python-version: "3.9" @@ -128,7 +128,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: enable-cache: true python-version: "3.9" @@ -150,7 +150,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: enable-cache: true python-version: "${{matrix.python}}" @@ -170,7 +170,7 @@ jobs: - uses: actions/checkout@v5 with: persist-credentials: false - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: cache: 'pip' cache-dependency-path: 'pyproject.toml' @@ -193,7 +193,9 @@ jobs: timeout-minutes: 20 steps: - name: Download sdist - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v5 + with: + path: sdist/ - name: Unpack SDist shell: bash run: | @@ -202,7 +204,7 @@ jobs: mkdir test tar --strip-components=1 -zxf *.tar.gz -C ./test ls test - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: cache: 'pip' cache-dependency-path: 'sdist/test/pyproject.toml' @@ -229,7 +231,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: python-version: '3.9' - id: setup-mongodb @@ -255,7 +257,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@4959332f0f014c5280e7eac8b70c90cb574c9f9b # v5 + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 with: python-version: '3.9' - id: setup-mongodb From 527cbdd18a13227c3dae7a263be46c83692cfdc4 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 10 Sep 2025 13:28:02 -0500 Subject: [PATCH 2054/2111] PYTHON-5537 Update typing dependencies (#2524) --- bson/binary.py | 2 +- bson/raw_bson.py | 14 +++-- bson/son.py | 2 +- bson/typings.py | 2 +- pymongo/asynchronous/cursor.py | 2 +- pymongo/asynchronous/encryption.py | 6 +- pymongo/asynchronous/helpers.py | 2 +- pymongo/compression_support.py | 2 +- pymongo/message.py | 16 ++--- pymongo/network_layer.py | 9 +-- pymongo/synchronous/cursor.py | 2 +- pymongo/synchronous/encryption.py | 6 +- pymongo/synchronous/helpers.py | 2 +- test/test_typing.py | 4 +- test/unified_format_shared.py | 6 +- uv.lock | 98 +++++++++++++++++------------- 16 files changed, 100 insertions(+), 75 deletions(-) diff --git a/bson/binary.py b/bson/binary.py index b48ae4fcc6..48eb12b0ac 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -298,7 +298,7 @@ class Binary(bytes): def __new__( cls: Type[Binary], - data: Union[memoryview, bytes, _mmap, _array[Any]], + data: Union[memoryview, bytes, bytearray, _mmap, _array[Any]], subtype: int = BINARY_SUBTYPE, ) -> Binary: if not isinstance(subtype, int): diff --git a/bson/raw_bson.py b/bson/raw_bson.py index 2ce53143c2..9ead0765dc 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -60,7 +60,9 @@ def _inflate_bson( - bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument], raw_array: bool = False + bson_bytes: bytes | memoryview, + codec_options: CodecOptions[RawBSONDocument], + raw_array: bool = False, ) -> dict[str, Any]: """Inflates the top level fields of a BSON document. @@ -85,7 +87,9 @@ class RawBSONDocument(Mapping[str, Any]): __codec_options: CodecOptions[RawBSONDocument] def __init__( - self, bson_bytes: bytes, codec_options: Optional[CodecOptions[RawBSONDocument]] = None + self, + bson_bytes: bytes | memoryview, + codec_options: Optional[CodecOptions[RawBSONDocument]] = None, ) -> None: """Create a new :class:`RawBSONDocument` @@ -135,7 +139,7 @@ class from the standard library so it can be used like a read-only _get_object_size(bson_bytes, 0, len(bson_bytes)) @property - def raw(self) -> bytes: + def raw(self) -> bytes | memoryview: """The raw BSON bytes composing this document.""" return self.__raw @@ -153,7 +157,7 @@ def __inflated(self) -> Mapping[str, Any]: @staticmethod def _inflate_bson( - bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument] + bson_bytes: bytes | memoryview, codec_options: CodecOptions[RawBSONDocument] ) -> Mapping[str, Any]: return _inflate_bson(bson_bytes, codec_options) @@ -180,7 +184,7 @@ class _RawArrayBSONDocument(RawBSONDocument): @staticmethod def _inflate_bson( - bson_bytes: bytes, codec_options: CodecOptions[RawBSONDocument] + bson_bytes: bytes | memoryview, codec_options: CodecOptions[RawBSONDocument] ) -> Mapping[str, Any]: return _inflate_bson(bson_bytes, codec_options, raw_array=True) diff --git a/bson/son.py b/bson/son.py index 24275fce16..8fd4f95cd2 100644 --- a/bson/son.py +++ b/bson/son.py @@ -143,7 +143,7 @@ def popitem(self) -> Tuple[_Key, _Value]: del self[k] return (k, v) - def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type: ignore[override] + def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # Make progressively weaker assumptions about "other" if other is None: pass diff --git a/bson/typings.py b/bson/typings.py index 55e90b19a5..5913860556 100644 --- a/bson/typings.py +++ b/bson/typings.py @@ -28,4 +28,4 @@ _DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] _DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) _DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) -_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] # type: ignore[type-arg] +_ReadableBuffer = Union[bytes, memoryview, bytearray, "mmap", "array"] # type: ignore[type-arg] diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index ab2d0e873c..d9fdd576f4 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -1009,7 +1009,7 @@ def _deepcopy( else: if not isinstance(key, RE_TYPE): key = copy.deepcopy(key, memo) # noqa: PLW2901 - y[key] = value + y[key] = value # type:ignore[index] return y def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index b302631108..7328f91235 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -264,7 +264,7 @@ def spawn(self) -> None: args.extend(self.opts._mongocryptd_spawn_args) _spawn_daemon(args) - async def mark_command(self, database: str, cmd: bytes) -> bytes: + async def mark_command(self, database: str, cmd: bytes) -> bytes | memoryview: """Mark a command for encryption. :param database: The database on which to run this command. @@ -291,7 +291,7 @@ async def mark_command(self, database: str, cmd: bytes) -> bytes: ) return res.raw - async def fetch_keys(self, filter: bytes) -> AsyncGenerator[bytes, None]: + async def fetch_keys(self, filter: bytes) -> AsyncGenerator[bytes | memoryview, None]: """Yields one or more keys from the key vault. :param filter: The filter to pass to find. @@ -463,7 +463,7 @@ async def encrypt( # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - async def decrypt(self, response: bytes) -> Optional[bytes]: + async def decrypt(self, response: bytes | memoryview) -> Optional[bytes]: """Decrypt a MongoDB command response. :param response: A MongoDB command response as BSON. diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index 54fd64f74a..ccda16e28b 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -78,7 +78,7 @@ async def _getaddrinfo( socket.SocketKind, int, str, - tuple[str, int] | tuple[str, int, int, int], + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], ] ]: if not _IS_SYNC: diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index db14b8d83f..64ffe052ec 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -152,7 +152,7 @@ def compress(data: bytes) -> bytes: return zstandard.ZstdCompressor().compress(data) -def decompress(data: bytes, compressor_id: int) -> bytes: +def decompress(data: bytes | memoryview, compressor_id: int) -> bytes: if compressor_id == SnappyContext.compressor_id: # python-snappy doesn't support the buffer interface. # https://github.com/andrix/python-snappy/issues/65 diff --git a/pymongo/message.py b/pymongo/message.py index b2e5a685af..0f3aaaba77 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -1352,7 +1352,9 @@ class _OpReply: UNPACK_FROM = struct.Struct(" list[bytes]: + ) -> list[bytes | memoryview]: """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. @@ -1448,7 +1450,7 @@ def more_to_come(self) -> bool: return False @classmethod - def unpack(cls, msg: bytes) -> _OpReply: + def unpack(cls, msg: bytes | memoryview) -> _OpReply: """Construct an _OpReply from raw bytes.""" # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) @@ -1470,7 +1472,7 @@ class _OpMsg: MORE_TO_COME = 1 << 1 EXHAUST_ALLOWED = 1 << 16 # Only present on requests. - def __init__(self, flags: int, payload_document: bytes): + def __init__(self, flags: int, payload_document: bytes | memoryview): self.flags = flags self.payload_document = payload_document @@ -1512,7 +1514,7 @@ def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: """Unpack a command response.""" return self.unpack_response(codec_options=codec_options)[0] - def raw_command_response(self) -> bytes: + def raw_command_response(self) -> bytes | memoryview: """Return the bytes of the command response.""" return self.payload_document @@ -1522,7 +1524,7 @@ def more_to_come(self) -> bool: return bool(self.flags & self.MORE_TO_COME) @classmethod - def unpack(cls, msg: bytes) -> _OpMsg: + def unpack(cls, msg: bytes | memoryview) -> _OpMsg: """Construct an _OpMsg from raw bytes.""" flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: @@ -1541,7 +1543,7 @@ def unpack(cls, msg: bytes) -> _OpMsg: return cls(flags, payload_document) -_UNPACK_REPLY: dict[int, Callable[[bytes], Union[_OpReply, _OpMsg]]] = { +_UNPACK_REPLY: dict[int, Callable[[bytes | memoryview], Union[_OpReply, _OpMsg]]] = { _OpReply.OP_CODE: _OpReply.unpack, _OpMsg.OP_CODE: _OpMsg.unpack, } diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 605b8dde9b..028316de34 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -246,7 +246,7 @@ def sock(self) -> Union[socket.socket, _sslConn]: def fileno(self) -> int: return self.conn.fileno() - def recv_into(self, buffer: bytes) -> int: + def recv_into(self, buffer: bytes | memoryview) -> int: return self.conn.recv_into(buffer) @@ -533,14 +533,14 @@ def _resolve_pending(self, exc: Optional[Exception] = None) -> None: fut = self._pending_listeners.popleft() fut.set_result(b"") - def _read(self, bytes_needed: int) -> memoryview: + def _read(self, bytes_needed: int) -> bytes: """Read bytes.""" # Send the bytes to the listener. if self._bytes_ready < bytes_needed: bytes_needed = self._bytes_ready self._bytes_ready -= bytes_needed - output_buf = bytearray(bytes_needed) + output_buf = memoryview(bytearray(bytes_needed)) n_remaining = bytes_needed out_index = 0 while n_remaining > 0: @@ -557,7 +557,7 @@ def _read(self, bytes_needed: int) -> memoryview: output_buf[out_index : out_index + buf_size] = buffer[:] out_index += buf_size n_remaining -= buf_size - return memoryview(output_buf) + return bytes(output_buf) async def async_sendall(conn: PyMongoBaseProtocol, buf: bytes) -> None: @@ -670,6 +670,7 @@ def receive_message( f"Message length ({length!r}) is larger than server max " f"message size ({max_message_size!r})" ) + data: bytes | memoryview if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) data = decompress(receive_data(conn, length - 25, deadline), compressor_id) diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index eb45d9c5d1..3dd550f4d5 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -1007,7 +1007,7 @@ def _deepcopy( else: if not isinstance(key, RE_TYPE): key = copy.deepcopy(key, memo) # noqa: PLW2901 - y[key] = value + y[key] = value # type:ignore[index] return y def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 752026af84..35adc8eed6 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -261,7 +261,7 @@ def spawn(self) -> None: args.extend(self.opts._mongocryptd_spawn_args) _spawn_daemon(args) - def mark_command(self, database: str, cmd: bytes) -> bytes: + def mark_command(self, database: str, cmd: bytes) -> bytes | memoryview: """Mark a command for encryption. :param database: The database on which to run this command. @@ -288,7 +288,7 @@ def mark_command(self, database: str, cmd: bytes) -> bytes: ) return res.raw - def fetch_keys(self, filter: bytes) -> Generator[bytes, None]: + def fetch_keys(self, filter: bytes) -> Generator[bytes | memoryview, None]: """Yields one or more keys from the key vault. :param filter: The filter to pass to find. @@ -460,7 +460,7 @@ def encrypt( # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - def decrypt(self, response: bytes) -> Optional[bytes]: + def decrypt(self, response: bytes | memoryview) -> Optional[bytes]: """Decrypt a MongoDB command response. :param response: A MongoDB command response as BSON. diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index bc69a49e80..1fff9a0f23 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -78,7 +78,7 @@ def _getaddrinfo( socket.SocketKind, int, str, - tuple[str, int] | tuple[str, int, int, int], + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], ] ]: if not _IS_SYNC: diff --git a/test/test_typing.py b/test/test_typing.py index 65937020d2..8709186e12 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -475,7 +475,7 @@ def test_typeddict_not_required_document_type(self) -> None: # This should fail because the output is a Movie. assert out["foo"] # type:ignore[typeddict-item] # pyright gives reportTypedDictNotRequiredAccess for the following: - assert out["_id"] # type:ignore + assert out["_id"] # type:ignore[unused-ignore] @only_type_check def test_typeddict_empty_document_type(self) -> None: @@ -496,7 +496,7 @@ def test_typeddict_find_notrequired(self): out = coll.find_one({}) assert out is not None # pyright gives reportTypedDictNotRequiredAccess for the following: - assert out["_id"] # type:ignore + assert out["_id"] # type:ignore[unused-ignore] @only_type_check def test_raw_bson_document_type(self) -> None: diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py index 7ebcc6eb20..5aa989cb24 100644 --- a/test/unified_format_shared.py +++ b/test/unified_format_shared.py @@ -37,7 +37,7 @@ LOCAL_MASTER_KEY, ) from test.utils_shared import CMAPListener, camel_to_snake, parse_collection_options -from typing import Any, Union +from typing import Any, MutableMapping, Union from bson import ( RE_TYPE, @@ -162,7 +162,9 @@ def __new__(cls, name, this_bases, d): return meta(name, resolved_bases, d) @classmethod - def __prepare__(cls, name, this_bases): + def __prepare__( + cls, name: str, this_bases: tuple[type, ...], /, **kwds: Any + ) -> MutableMapping[str, object]: return meta.__prepare__(name, bases) return type.__new__(metaclass, "temporary_class", (), {}) diff --git a/uv.lock b/uv.lock index 2e0ef2a151..77f6a46385 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.9" resolution-markers = [ "python_full_version == '3.14.*'", @@ -1047,46 +1047,53 @@ dependencies = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.17.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, + { name = "pathspec" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, - { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, - { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, - { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, - { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, - { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, - { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/8e/22/ea637422dedf0bf36f3ef238eab4e455e2a0dcc3082b5cc067615347ab8e/mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01", size = 3352570, upload-time = "2025-07-31T07:54:19.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/a9/3d7aa83955617cdf02f94e50aab5c830d205cfa4320cf124ff64acce3a8e/mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972", size = 11003299, upload-time = "2025-07-31T07:54:06.425Z" }, + { url = "https://files.pythonhosted.org/packages/83/e8/72e62ff837dd5caaac2b4a5c07ce769c8e808a00a65e5d8f94ea9c6f20ab/mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7", size = 10125451, upload-time = "2025-07-31T07:53:52.974Z" }, + { url = "https://files.pythonhosted.org/packages/7d/10/f3f3543f6448db11881776f26a0ed079865926b0c841818ee22de2c6bbab/mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df", size = 11916211, upload-time = "2025-07-31T07:53:18.879Z" }, + { url = "https://files.pythonhosted.org/packages/06/bf/63e83ed551282d67bb3f7fea2cd5561b08d2bb6eb287c096539feb5ddbc5/mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390", size = 12652687, upload-time = "2025-07-31T07:53:30.544Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/68f2eeef11facf597143e85b694a161868b3b006a5fbad50e09ea117ef24/mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94", size = 12896322, upload-time = "2025-07-31T07:53:50.74Z" }, + { url = "https://files.pythonhosted.org/packages/a3/87/8e3e9c2c8bd0d7e071a89c71be28ad088aaecbadf0454f46a540bda7bca6/mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b", size = 9507962, upload-time = "2025-07-31T07:53:08.431Z" }, + { url = "https://files.pythonhosted.org/packages/46/cf/eadc80c4e0a70db1c08921dcc220357ba8ab2faecb4392e3cebeb10edbfa/mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58", size = 10921009, upload-time = "2025-07-31T07:53:23.037Z" }, + { url = "https://files.pythonhosted.org/packages/5d/c1/c869d8c067829ad30d9bdae051046561552516cfb3a14f7f0347b7d973ee/mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5", size = 10047482, upload-time = "2025-07-31T07:53:26.151Z" }, + { url = "https://files.pythonhosted.org/packages/98/b9/803672bab3fe03cee2e14786ca056efda4bb511ea02dadcedde6176d06d0/mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd", size = 11832883, upload-time = "2025-07-31T07:53:47.948Z" }, + { url = "https://files.pythonhosted.org/packages/88/fb/fcdac695beca66800918c18697b48833a9a6701de288452b6715a98cfee1/mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b", size = 12566215, upload-time = "2025-07-31T07:54:04.031Z" }, + { url = "https://files.pythonhosted.org/packages/7f/37/a932da3d3dace99ee8eb2043b6ab03b6768c36eb29a02f98f46c18c0da0e/mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5", size = 12751956, upload-time = "2025-07-31T07:53:36.263Z" }, + { url = "https://files.pythonhosted.org/packages/8c/cf/6438a429e0f2f5cab8bc83e53dbebfa666476f40ee322e13cac5e64b79e7/mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b", size = 9507307, upload-time = "2025-07-31T07:53:59.734Z" }, + { url = "https://files.pythonhosted.org/packages/17/a2/7034d0d61af8098ec47902108553122baa0f438df8a713be860f7407c9e6/mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb", size = 11086295, upload-time = "2025-07-31T07:53:28.124Z" }, + { url = "https://files.pythonhosted.org/packages/14/1f/19e7e44b594d4b12f6ba8064dbe136505cec813549ca3e5191e40b1d3cc2/mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403", size = 10112355, upload-time = "2025-07-31T07:53:21.121Z" }, + { url = "https://files.pythonhosted.org/packages/5b/69/baa33927e29e6b4c55d798a9d44db5d394072eef2bdc18c3e2048c9ed1e9/mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056", size = 11875285, upload-time = "2025-07-31T07:53:55.293Z" }, + { url = "https://files.pythonhosted.org/packages/90/13/f3a89c76b0a41e19490b01e7069713a30949d9a6c147289ee1521bcea245/mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341", size = 12737895, upload-time = "2025-07-31T07:53:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/23/a1/c4ee79ac484241301564072e6476c5a5be2590bc2e7bfd28220033d2ef8f/mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb", size = 12931025, upload-time = "2025-07-31T07:54:17.125Z" }, + { url = "https://files.pythonhosted.org/packages/89/b8/7409477be7919a0608900e6320b155c72caab4fef46427c5cc75f85edadd/mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19", size = 9584664, upload-time = "2025-07-31T07:54:12.842Z" }, + { url = "https://files.pythonhosted.org/packages/5b/82/aec2fc9b9b149f372850291827537a508d6c4d3664b1750a324b91f71355/mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7", size = 11075338, upload-time = "2025-07-31T07:53:38.873Z" }, + { url = "https://files.pythonhosted.org/packages/07/ac/ee93fbde9d2242657128af8c86f5d917cd2887584cf948a8e3663d0cd737/mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81", size = 10113066, upload-time = "2025-07-31T07:54:14.707Z" }, + { url = "https://files.pythonhosted.org/packages/5a/68/946a1e0be93f17f7caa56c45844ec691ca153ee8b62f21eddda336a2d203/mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6", size = 11875473, upload-time = "2025-07-31T07:53:14.504Z" }, + { url = "https://files.pythonhosted.org/packages/9f/0f/478b4dce1cb4f43cf0f0d00fba3030b21ca04a01b74d1cd272a528cf446f/mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849", size = 12744296, upload-time = "2025-07-31T07:53:03.896Z" }, + { url = "https://files.pythonhosted.org/packages/ca/70/afa5850176379d1b303f992a828de95fc14487429a7139a4e0bdd17a8279/mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14", size = 12914657, upload-time = "2025-07-31T07:54:08.576Z" }, + { url = "https://files.pythonhosted.org/packages/53/f9/4a83e1c856a3d9c8f6edaa4749a4864ee98486e9b9dbfbc93842891029c2/mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a", size = 9593320, upload-time = "2025-07-31T07:53:01.341Z" }, + { url = "https://files.pythonhosted.org/packages/38/56/79c2fac86da57c7d8c48622a05873eaab40b905096c33597462713f5af90/mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733", size = 11040037, upload-time = "2025-07-31T07:54:10.942Z" }, + { url = "https://files.pythonhosted.org/packages/4d/c3/adabe6ff53638e3cad19e3547268482408323b1e68bf082c9119000cd049/mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd", size = 10131550, upload-time = "2025-07-31T07:53:41.307Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c5/2e234c22c3bdeb23a7817af57a58865a39753bde52c74e2c661ee0cfc640/mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0", size = 11872963, upload-time = "2025-07-31T07:53:16.878Z" }, + { url = "https://files.pythonhosted.org/packages/ab/26/c13c130f35ca8caa5f2ceab68a247775648fdcd6c9a18f158825f2bc2410/mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a", size = 12710189, upload-time = "2025-07-31T07:54:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/82/df/c7d79d09f6de8383fe800521d066d877e54d30b4fb94281c262be2df84ef/mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91", size = 12900322, upload-time = "2025-07-31T07:53:10.551Z" }, + { url = "https://files.pythonhosted.org/packages/b8/98/3d5a48978b4f708c55ae832619addc66d677f6dc59f3ebad71bae8285ca6/mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed", size = 9751879, upload-time = "2025-07-31T07:52:56.683Z" }, + { url = "https://files.pythonhosted.org/packages/29/cb/673e3d34e5d8de60b3a61f44f80150a738bff568cd6b7efb55742a605e98/mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9", size = 10992466, upload-time = "2025-07-31T07:53:57.574Z" }, + { url = "https://files.pythonhosted.org/packages/0c/d0/fe1895836eea3a33ab801561987a10569df92f2d3d4715abf2cfeaa29cb2/mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99", size = 10117638, upload-time = "2025-07-31T07:53:34.256Z" }, + { url = "https://files.pythonhosted.org/packages/97/f3/514aa5532303aafb95b9ca400a31054a2bd9489de166558c2baaeea9c522/mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8", size = 11915673, upload-time = "2025-07-31T07:52:59.361Z" }, + { url = "https://files.pythonhosted.org/packages/ab/c3/c0805f0edec96fe8e2c048b03769a6291523d509be8ee7f56ae922fa3882/mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8", size = 12649022, upload-time = "2025-07-31T07:53:45.92Z" }, + { url = "https://files.pythonhosted.org/packages/45/3e/d646b5a298ada21a8512fa7e5531f664535a495efa672601702398cea2b4/mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259", size = 12895536, upload-time = "2025-07-31T07:53:06.17Z" }, + { url = "https://files.pythonhosted.org/packages/14/55/e13d0dcd276975927d1f4e9e2ec4fd409e199f01bdc671717e673cc63a22/mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d", size = 9512564, upload-time = "2025-07-31T07:53:12.346Z" }, + { url = "https://files.pythonhosted.org/packages/1d/f3/8fcd2af0f5b806f6cf463efaffd3c9548a28f84220493ecd38d127b6b66d/mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9", size = 2283411, upload-time = "2025-07-31T07:53:24.664Z" }, ] [[package]] @@ -1116,6 +1123,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + [[package]] name = "pip" version = "25.2" @@ -1315,7 +1331,7 @@ provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "tes [package.metadata.requires-dev] coverage = [ - { name = "coverage", specifier = ">=5,<=7.5" }, + { name = "coverage", specifier = ">=5,<=7.10.6" }, { name = "pytest-cov" }, ] dev = [{ name = "pre-commit", specifier = ">=4.0" }] @@ -1329,9 +1345,9 @@ perf = [{ name = "simplejson" }] pip = [{ name = "pip" }] pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] typing = [ - { name = "mypy", specifier = "==1.14.1" }, + { name = "mypy", specifier = "==1.17.1" }, { name = "pip" }, - { name = "pyright", specifier = "==1.1.392.post0" }, + { name = "pyright", specifier = "==1.1.405" }, { name = "typing-extensions" }, ] @@ -1375,15 +1391,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.392.post0" +version = "1.1.405" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/66/df/3c6f6b08fba7ccf49b114dfc4bb33e25c299883fd763f93fad47ef8bc58d/pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd", size = 3789911, upload-time = "2025-01-15T15:01:20.913Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/6c/ba4bbee22e76af700ea593a1d8701e3225080956753bee9750dcc25e2649/pyright-1.1.405.tar.gz", hash = "sha256:5c2a30e1037af27eb463a1cc0b9f6d65fec48478ccf092c1ac28385a15c55763", size = 4068319, upload-time = "2025-09-04T03:37:06.776Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/b1/a18de17f40e4f61ca58856b9ef9b0febf74ff88978c3f7776f910071f567/pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2", size = 5595487, upload-time = "2025-01-15T15:01:17.775Z" }, + { url = "https://files.pythonhosted.org/packages/d5/1a/524f832e1ff1962a22a1accc775ca7b143ba2e9f5924bb6749dce566784a/pyright-1.1.405-py3-none-any.whl", hash = "sha256:a2cb13700b5508ce8e5d4546034cb7ea4aedb60215c6c33f56cec7f53996035a", size = 5905038, upload-time = "2025-09-04T03:37:04.913Z" }, ] [[package]] From 2b148867e7f127818b8c855b72d5c2ffda2409b5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 10 Sep 2025 16:38:55 -0500 Subject: [PATCH 2055/2111] PYTHON-5540 Fix usage of text_opts for older versions of pymongocrypt (#2525) --- pymongo/asynchronous/encryption.py | 3 ++- pymongo/synchronous/encryption.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 7328f91235..2b1895b832 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -935,7 +935,8 @@ async def _encrypt_helper( contention_factor=contention_factor, range_opts=range_opts_bytes, is_expression=is_expression, - text_opts=text_opts_bytes, + # For compatibility with pymongocrypt < 1.16: + **{"text_opts": text_opts_bytes} if text_opts_bytes else {}, ) return decode(encrypted_doc)["v"] diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index 35adc8eed6..a08302c211 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -928,7 +928,8 @@ def _encrypt_helper( contention_factor=contention_factor, range_opts=range_opts_bytes, is_expression=is_expression, - text_opts=text_opts_bytes, + # For compatibility with pymongocrypt < 1.16: + **{"text_opts": text_opts_bytes} if text_opts_bytes else {}, ) return decode(encrypted_doc)["v"] From 3da6e858d58b7b2cf6038d998d538f0ac75fefb2 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Thu, 11 Sep 2025 16:37:22 -0400 Subject: [PATCH 2056/2111] PYTHON-5543 PyMongoBaseProtocol should inherit from asyncio.BaseProtocol (#2528) Co-authored-by: Noah Stapp --- doc/changelog.rst | 8 ++++++++ pymongo/network_layer.py | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 64c61e5877..8fb2d12f96 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,14 @@ Changelog ========= +Changes in Version 4.15.1 (XXXX/XX/XX) +-------------------------------------- + +Version 4.15.1 is a bug fix release. + +- Fixed a bug in ``AsyncMongoClient`` that caused a + ``ServerSelectionTimeoutError`` when used with ``uvicorn``, ``FastAPI``, or ``uvloop``. + Changes in Version 4.15.0 (2025/09/10) -------------------------------------- diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 028316de34..a3900e30c1 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -22,7 +22,7 @@ import struct import sys import time -from asyncio import BaseTransport, BufferedProtocol, Future, Protocol, Transport +from asyncio import BaseProtocol, BaseTransport, BufferedProtocol, Future, Transport from typing import ( TYPE_CHECKING, Any, @@ -250,7 +250,7 @@ def recv_into(self, buffer: bytes | memoryview) -> int: return self.conn.recv_into(buffer) -class PyMongoBaseProtocol(Protocol): +class PyMongoBaseProtocol(BaseProtocol): def __init__(self, timeout: Optional[float] = None): self.transport: Transport = None # type: ignore[assignment] self._timeout = timeout From 32e183baa78c6c759a2dee7da740c647c35730f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Sep 2025 07:44:32 -0500 Subject: [PATCH 2057/2111] Bump the actions group with 3 updates (#2531) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/test-python.yml | 16 ++++++++-------- .github/workflows/zizmor.yml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1027b20834..3ed1f0d9bb 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3 + uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3 + uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 89c1e6a95f..6499e8ba8d 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -23,7 +23,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true python-version: "3.9" @@ -65,7 +65,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -84,7 +84,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true python-version: "3.9" @@ -109,7 +109,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true python-version: "3.9" @@ -128,7 +128,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true python-version: "3.9" @@ -150,7 +150,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true python-version: "${{matrix.python}}" @@ -231,7 +231,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: python-version: '3.9' - id: setup-mongodb @@ -257,7 +257,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: python-version: '3.9' - id: setup-mongodb diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 2db3b43e7f..31d8c1eef3 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@a016d81e77496751b5c04eb1e8f00214bd396553 + uses: zizmorcore/zizmor-action@873539476a7f9b0da7504d0d9e9a6a5275094d98 From eca38b730b8227c52cd5f0c655f617e969743ee1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Sep 2025 07:45:15 -0500 Subject: [PATCH 2058/2111] Bump mypy from 1.17.1 to 1.18.1 (#2532) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 890244b688..fe277d8ed0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,7 +65,7 @@ pymongocrypt_source = [ ] perf = ["simplejson"] typing = [ - "mypy==1.17.1", + "mypy==1.18.1", "pyright==1.1.405", "typing_extensions", "pip" From 7a07c0281457f16fe807e4e65ef0c13f7b1deb2f Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 16 Sep 2025 09:16:31 -0500 Subject: [PATCH 2059/2111] PYTHON-5544 Revert changes to base protocol layer (#2535) --- doc/changelog.rst | 8 - pymongo/asynchronous/encryption.py | 28 +- pymongo/asynchronous/pool.py | 161 +++++----- pymongo/network_layer.py | 468 +++++++++++++++++------------ pymongo/pool_shared.py | 127 +++++++- pymongo/synchronous/encryption.py | 28 +- pymongo/synchronous/pool.py | 161 +++++----- tools/synchro.py | 2 +- 8 files changed, 576 insertions(+), 407 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 8fb2d12f96..64c61e5877 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,14 +1,6 @@ Changelog ========= -Changes in Version 4.15.1 (XXXX/XX/XX) --------------------------------------- - -Version 4.15.1 is a bug fix release. - -- Fixed a bug in ``AsyncMongoClient`` that caused a - ``ServerSelectionTimeoutError`` when used with ``uvicorn``, ``FastAPI``, or ``uvloop``. - Changes in Version 4.15.0 (2025/09/10) -------------------------------------- diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index 2b1895b832..d32a5b3204 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -64,7 +64,6 @@ from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.database import AsyncDatabase from pymongo.asynchronous.mongo_client import AsyncMongoClient -from pymongo.asynchronous.pool import AsyncBaseConnection from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts, TextOpts @@ -77,11 +76,11 @@ ServerSelectionTimeoutError, ) from pymongo.helpers_shared import _get_timeout_details -from pymongo.network_layer import PyMongoKMSProtocol, async_receive_kms, async_sendall +from pymongo.network_layer import async_socket_sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( - _configured_protocol_interface, + _async_configured_socket, _raise_connection_failure, ) from pymongo.read_concern import ReadConcern @@ -94,8 +93,10 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext + from pymongo.pyopenssl_context import _sslConn from pymongo.typings import _Address + _IS_SYNC = False _HTTPS_PORT = 443 @@ -110,10 +111,9 @@ _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) -async def _connect_kms(address: _Address, opts: PoolOptions) -> AsyncBaseConnection: +async def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: try: - interface = await _configured_protocol_interface(address, opts, PyMongoKMSProtocol) - return AsyncBaseConnection(interface, opts) + return await _async_configured_socket(address, opts) except Exception as exc: _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) @@ -198,11 +198,19 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: try: conn = await _connect_kms(address, opts) try: - await async_sendall(conn.conn.get_conn, message) + await async_socket_sendall(conn, message) while kms_context.bytes_needed > 0: # CSOT: update timeout. - conn.set_conn_timeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = await async_receive_kms(conn, kms_context.bytes_needed) + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data: memoryview | bytes + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + async_receive_data_socket, + ) + + data = await async_receive_data_socket(conn, kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) @@ -221,7 +229,7 @@ async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) ) finally: - await conn.close_conn(None) + conn.close() except MongoCryptError: raise # Propagate MongoCryptError errors directly. except Exception as exc: diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 8c169b4c52..196ec9040f 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -123,89 +123,7 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 _IS_SYNC = False -class AsyncBaseConnection: - """A base connection object for server and kms connections.""" - - def __init__(self, conn: AsyncNetworkingInterface, opts: PoolOptions): - self.conn = conn - self.socket_checker: SocketChecker = SocketChecker() - self.cancel_context: _CancellationContext = _CancellationContext() - self.is_sdam = False - self.closed = False - self.last_timeout: float | None = None - self.more_to_come = False - self.opts = opts - self.max_wire_version = -1 - - def set_conn_timeout(self, timeout: Optional[float]) -> None: - """Cache last timeout to avoid duplicate calls to conn.settimeout.""" - if timeout == self.last_timeout: - return - self.last_timeout = timeout - self.conn.get_conn.settimeout(timeout) - - def apply_timeout( - self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] - ) -> Optional[float]: - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - # Reset the socket timeout unless we're performing a streaming monitor check. - if not self.more_to_come: - self.set_conn_timeout(self.opts.socket_timeout) - return None - # RTT validation. - rtt = _csot.get_rtt() - if rtt is None: - rtt = self.connect_rtt - max_time_ms = timeout - rtt - if max_time_ms < 0: - timeout_details = _get_timeout_details(self.opts) - formatted = format_timeout_details(timeout_details) - # CSOT: raise an error without running the command since we know it will time out. - errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" - if self.max_wire_version != -1: - raise ExecutionTimeout( - errmsg, - 50, - {"ok": 0, "errmsg": errmsg, "code": 50}, - self.max_wire_version, - ) - else: - raise TimeoutError(errmsg) - if cmd is not None: - cmd["maxTimeMS"] = int(max_time_ms * 1000) - self.set_conn_timeout(timeout) - return timeout - - async def close_conn(self, reason: Optional[str]) -> None: - """Close this connection with a reason.""" - if self.closed: - return - await self._close_conn() - - async def _close_conn(self) -> None: - """Close this connection.""" - if self.closed: - return - self.closed = True - self.cancel_context.cancel() - # Note: We catch exceptions to avoid spurious errors on interpreter - # shutdown. - try: - await self.conn.close() - except Exception: # noqa: S110 - pass - - def conn_closed(self) -> bool: - """Return True if we know socket has been closed, False otherwise.""" - if _IS_SYNC: - return self.socket_checker.socket_closed(self.conn.get_conn) - else: - return self.conn.is_closing() - - -class AsyncConnection(AsyncBaseConnection): +class AsyncConnection: """Store a connection with some metadata. :param conn: a raw connection object @@ -223,27 +141,29 @@ def __init__( id: int, is_sdam: bool, ): - super().__init__(conn, pool.opts) self.pool_ref = weakref.ref(pool) - self.address: tuple[str, int] = address - self.id: int = id + self.conn = conn + self.address = address + self.id = id self.is_sdam = is_sdam + self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False self.is_writable: bool = False self.max_wire_version = MAX_WIRE_VERSION - self.max_bson_size: int = MAX_BSON_SIZE - self.max_message_size: int = MAX_MESSAGE_SIZE - self.max_write_batch_size: int = MAX_WRITE_BATCH_SIZE + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE self.supports_sessions = False self.hello_ok: bool = False - self.is_mongos: bool = False + self.is_mongos = False self.op_msg_enabled = False self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap self.enabled_for_logging = pool.enabled_for_logging self.compression_settings = pool.opts._compression_settings self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() self.oidc_token_gen_id: Optional[int] = None # Support for mechanism negotiation on the initial handshake. self.negotiated_mechs: Optional[list[str]] = None @@ -254,6 +174,9 @@ def __init__( self.pool_gen = pool.gen self.generation = self.pool_gen.get_overall() self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False # For load balancer support. self.service_id: Optional[ObjectId] = None self.server_connection_id: Optional[int] = None @@ -269,6 +192,44 @@ def __init__( # For gossiping $clusterTime from the connection handshake to the client. self._cluster_time = None + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + def pin_txn(self) -> None: self.pinned_txn = True assert not self.pinned_cursor @@ -612,6 +573,26 @@ async def close_conn(self, reason: Optional[str]) -> None: error=reason, ) + async def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + await self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + def send_cluster_time( self, command: MutableMapping[str, Any], diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index a3900e30c1..2e5b61f8ae 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -22,11 +22,10 @@ import struct import sys import time -from asyncio import BaseProtocol, BaseTransport, BufferedProtocol, Future, Transport +from asyncio import AbstractEventLoop, BaseTransport, BufferedProtocol, Future, Transport from typing import ( TYPE_CHECKING, Any, - Callable, Optional, Union, ) @@ -39,30 +38,208 @@ from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply from pymongo.socket_checker import _errno_from_exception -if TYPE_CHECKING: - from pymongo.asynchronous.pool import AsyncBaseConnection, AsyncConnection +try: + from ssl import SSLError, SSLSocket + + _HAVE_SSL = True +except ImportError: + _HAVE_SSL = False + +try: from pymongo.pyopenssl_context import _sslConn - from pymongo.synchronous.pool import BaseConnection, Connection + + _HAVE_PYOPENSSL = True +except ImportError: + _HAVE_PYOPENSSL = False + _sslConn = SSLSocket # type: ignore[assignment, misc] + +from pymongo.ssl_support import ( + BLOCKING_IO_LOOKUP_ERROR, + BLOCKING_IO_READ_ERROR, + BLOCKING_IO_WRITE_ERROR, +) + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.synchronous.pool import Connection _UNPACK_HEADER = struct.Struct(" None: + timeout = sock.gettimeout() + sock.settimeout(0.0) + loop = asyncio.get_running_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + await asyncio.wait_for(_async_socket_sendall_ssl(sock, buf, loop), timeout=timeout) + else: + await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc + finally: + sock.settimeout(timeout) + + +if sys.platform != "win32": + + async def _async_socket_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop + ) -> None: + view = memoryview(buf) + sent = 0 + + def _is_ready(fut: Future[Any]) -> None: + if fut.done(): + return + fut.set_result(None) + + while sent < len(buf): + try: + sent += sock.send(view[sent:]) # type:ignore[arg-type] + except BLOCKING_IO_ERRORS as exc: + fd = sock.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + + async def _async_socket_receive_ssl( + conn: _sslConn, length: int, loop: AbstractEventLoop, once: Optional[bool] = False + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + + def _is_ready(fut: Future[Any]) -> None: + if fut.done(): + return + fut.set_result(None) + + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] + total_read += read + except BLOCKING_IO_ERRORS as exc: + fd = conn.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + return mv + +else: + # The default Windows asyncio event loop does not support loop.add_reader/add_writer: + # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support + # Note: In PYTHON-4493 we plan to replace this code with asyncio streams. + async def _async_socket_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop + ) -> None: + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_sent < total_length: + try: + sent = sock.send(view[total_sent:]) + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + sent = 0 + if sent > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_sent += sent + + async def _async_socket_receive_ssl( + conn: _sslConn, length: int, dummy: AbstractEventLoop, once: Optional[bool] = False + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + read = 0 + if read > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_read += read + return mv def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: sock.sendall(buf) -async def _poll_cancellation(conn: AsyncBaseConnection) -> None: +async def _poll_cancellation(conn: AsyncConnection) -> None: while True: if conn.cancel_context.cancelled: return @@ -70,7 +247,49 @@ async def _poll_cancellation(conn: AsyncBaseConnection) -> None: await asyncio.sleep(_POLL_TIMEOUT) -def wait_for_read(conn: BaseConnection, deadline: Optional[float]) -> None: +async def async_receive_data_socket( + sock: Union[socket.socket, _sslConn], length: int +) -> memoryview: + sock_timeout = sock.gettimeout() + timeout = sock_timeout + + sock.settimeout(0.0) + loop = asyncio.get_running_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + return await asyncio.wait_for( + _async_socket_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] + timeout=timeout, + ) + else: + return await asyncio.wait_for( + _async_socket_receive(sock, length, loop), # type: ignore[arg-type] + timeout=timeout, + ) + except asyncio.TimeoutError as err: + raise socket.timeout("timed out") from err + finally: + sock.settimeout(sock_timeout) + + +async def _async_socket_receive( + conn: socket.socket, length: int, loop: AbstractEventLoop +) -> memoryview: + mv = memoryview(bytearray(length)) + bytes_read = 0 + while bytes_read < length: + chunk_length = await loop.sock_recv_into(conn, mv[bytes_read:]) + if chunk_length == 0: + raise OSError("connection closed") + bytes_read += chunk_length + return mv + + +_PYPY = "PyPy" in sys.version +_WINDOWS = sys.platform == "win32" + + +def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: """Block until at least one byte is read, or a timeout, or a cancel.""" sock = conn.conn.sock timed_out = False @@ -103,7 +322,7 @@ def wait_for_read(conn: BaseConnection, deadline: Optional[float]) -> None: raise socket.timeout("timed out") -def receive_data(conn: BaseConnection, length: int, deadline: Optional[float]) -> memoryview: +def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: buf = bytearray(length) mv = memoryview(buf) bytes_read = 0 @@ -193,7 +412,7 @@ def sock(self) -> Any: class AsyncNetworkingInterface(NetworkingInterfaceBase): - def __init__(self, conn: tuple[Transport, PyMongoBaseProtocol]): + def __init__(self, conn: tuple[Transport, PyMongoProtocol]): super().__init__(conn) @property @@ -211,7 +430,7 @@ def is_closing(self) -> bool: return self.conn[0].is_closing() @property - def get_conn(self) -> PyMongoBaseProtocol: + def get_conn(self) -> PyMongoProtocol: return self.conn[1] @property @@ -250,51 +469,9 @@ def recv_into(self, buffer: bytes | memoryview) -> int: return self.conn.recv_into(buffer) -class PyMongoBaseProtocol(BaseProtocol): +class PyMongoProtocol(BufferedProtocol): def __init__(self, timeout: Optional[float] = None): self.transport: Transport = None # type: ignore[assignment] - self._timeout = timeout - self._closed = asyncio.get_running_loop().create_future() - self._connection_lost = False - - def settimeout(self, timeout: float | None) -> None: - self._timeout = timeout - - @property - def gettimeout(self) -> float | None: - """The configured timeout for the socket that underlies our protocol pair.""" - return self._timeout - - def close(self, exc: Optional[Exception] = None) -> None: - self.transport.abort() - self._resolve_pending(exc) - self._connection_lost = True - - def connection_lost(self, exc: Optional[Exception] = None) -> None: - self._resolve_pending(exc) - if not self._closed.done(): - self._closed.set_result(None) - - def _resolve_pending(self, exc: Optional[Exception] = None) -> None: - pass - - async def wait_closed(self) -> None: - await self._closed - - async def write(self, message: bytes) -> None: - """Write a message to this connection's transport.""" - if self.transport.is_closing(): - raise OSError("Connection is closed") - self.transport.write(message) - self.transport.resume_reading() - - async def read(self, *args: Any) -> Any: - raise NotImplementedError - - -class PyMongoProtocol(PyMongoBaseProtocol, BufferedProtocol): - def __init__(self, timeout: Optional[float] = None): - super().__init__(timeout) # Each message is reader in 2-3 parts: header, compression header, and message body # The message buffer is allocated after the header is read. self._header = memoryview(bytearray(16)) @@ -308,14 +485,25 @@ def __init__(self, timeout: Optional[float] = None): self._expecting_compression = False self._message_size = 0 self._op_code = 0 + self._connection_lost = False self._read_waiter: Optional[Future[Any]] = None + self._timeout = timeout self._is_compressed = False self._compressor_id: Optional[int] = None self._max_message_size = MAX_MESSAGE_SIZE self._response_to: Optional[int] = None + self._closed = asyncio.get_running_loop().create_future() self._pending_messages: collections.deque[Future[Any]] = collections.deque() self._done_messages: collections.deque[Future[Any]] = collections.deque() + def settimeout(self, timeout: float | None) -> None: + self._timeout = timeout + + @property + def gettimeout(self) -> float | None: + """The configured timeout for the socket that underlies our protocol pair.""" + return self._timeout + def connection_made(self, transport: BaseTransport) -> None: """Called exactly once when a connection is made. The transport argument is the transport representing the write side of the connection. @@ -323,6 +511,13 @@ def connection_made(self, transport: BaseTransport) -> None: self.transport = transport # type: ignore[assignment] self.transport.set_write_buffer_limits(MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE) + async def write(self, message: bytes) -> None: + """Write a message to this connection's transport.""" + if self.transport.is_closing(): + raise OSError("Connection is closed") + self.transport.write(message) + self.transport.resume_reading() + async def read(self, request_id: Optional[int], max_message_size: int) -> tuple[bytes, int]: """Read a single MongoDB Wire Protocol message from this connection.""" if self.transport: @@ -465,7 +660,7 @@ def process_compression_header(self) -> tuple[int, int]: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(self._compression_header) return op_code, compressor_id - def _resolve_pending(self, exc: Optional[Exception] = None) -> None: + def _resolve_pending_messages(self, exc: Optional[Exception] = None) -> None: pending = list(self._pending_messages) for msg in pending: if not msg.done(): @@ -475,92 +670,21 @@ def _resolve_pending(self, exc: Optional[Exception] = None) -> None: msg.set_exception(exc) self._done_messages.append(msg) + def close(self, exc: Optional[Exception] = None) -> None: + self.transport.abort() + self._resolve_pending_messages(exc) + self._connection_lost = True -class PyMongoKMSProtocol(PyMongoBaseProtocol): - def __init__(self, timeout: Optional[float] = None): - super().__init__(timeout) - self._buffers: collections.deque[memoryview[bytes]] = collections.deque() - self._bytes_ready = 0 - self._pending_reads: collections.deque[int] = collections.deque() - self._pending_listeners: collections.deque[Future[Any]] = collections.deque() - - def connection_made(self, transport: BaseTransport) -> None: - """Called exactly once when a connection is made. - The transport argument is the transport representing the write side of the connection. - """ - self.transport = transport # type: ignore[assignment] - - def data_received(self, data: bytes) -> None: - if self._connection_lost: - return - - self._bytes_ready += len(data) - self._buffers.append(memoryview(data)) - - if not len(self._pending_reads): - return + def connection_lost(self, exc: Optional[Exception] = None) -> None: + self._resolve_pending_messages(exc) + if not self._closed.done(): + self._closed.set_result(None) - bytes_needed = self._pending_reads.popleft() - data = self._read(bytes_needed) - waiter = self._pending_listeners.popleft() - waiter.set_result(data) - - async def read(self, bytes_needed: int) -> bytes: - """Read up to the requested bytes from this connection.""" - # Note: all reads are "up-to" bytes_needed because we don't know if the kms_context - # has processed a Content-Length header and is requesting a response or not. - # Wait for other listeners first. - if len(self._pending_listeners): - await asyncio.gather(*self._pending_listeners) - # If there are bytes ready, then there is no need to wait further. - if self._bytes_ready > 0: - return self._read(bytes_needed) - if self.transport: - try: - self.transport.resume_reading() - # Known bug in SSL Protocols, fixed in Python 3.11: https://github.com/python/cpython/issues/89322 - except AttributeError: - raise OSError("connection is already closed") from None - if self.transport and self.transport.is_closing(): - raise OSError("connection is already closed") - self._pending_reads.append(bytes_needed) - read_waiter = asyncio.get_running_loop().create_future() - self._pending_listeners.append(read_waiter) - return await read_waiter - - def _resolve_pending(self, exc: Optional[Exception] = None) -> None: - while self._pending_listeners: - fut = self._pending_listeners.popleft() - fut.set_result(b"") - - def _read(self, bytes_needed: int) -> bytes: - """Read bytes.""" - # Send the bytes to the listener. - if self._bytes_ready < bytes_needed: - bytes_needed = self._bytes_ready - self._bytes_ready -= bytes_needed - - output_buf = memoryview(bytearray(bytes_needed)) - n_remaining = bytes_needed - out_index = 0 - while n_remaining > 0: - buffer = self._buffers.popleft() - buf_size = len(buffer) - # if we didn't exhaust the buffer, read the partial data and return the buffer. - if buf_size > n_remaining: - output_buf[out_index : n_remaining + out_index] = buffer[:n_remaining] - buffer = buffer[n_remaining:] - n_remaining = 0 - self._buffers.appendleft(buffer) - # otherwise exhaust the buffer. - else: - output_buf[out_index : out_index + buf_size] = buffer[:] - out_index += buf_size - n_remaining -= buf_size - return bytes(output_buf) + async def wait_closed(self) -> None: + await self._closed -async def async_sendall(conn: PyMongoBaseProtocol, buf: bytes) -> None: +async def async_sendall(conn: PyMongoProtocol, buf: bytes) -> None: try: await asyncio.wait_for(conn.write(buf), timeout=conn.gettimeout) except asyncio.TimeoutError as exc: @@ -568,18 +692,12 @@ async def async_sendall(conn: PyMongoBaseProtocol, buf: bytes) -> None: raise socket.timeout("timed out") from exc -async def async_receive_kms(conn: AsyncBaseConnection, bytes_needed: int) -> bytes: - """Receive raw bytes from the kms connection.""" - - def callback(result: Any) -> bytes: - return result - - return await _async_receive_data(conn, callback, bytes_needed) - - -async def _async_receive_data( - conn: AsyncBaseConnection, callback: Callable[..., Any], *args: Any -) -> Any: +async def async_receive_message( + conn: AsyncConnection, + request_id: Optional[int], + max_message_size: int = MAX_MESSAGE_SIZE, +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" timeout: Optional[Union[float, int]] timeout = conn.conn.gettimeout if _csot.get_timeout(): @@ -595,8 +713,8 @@ async def _async_receive_data( # timeouts on AWS Lambda and other FaaS environments. timeout = max(deadline - time.monotonic(), 0) - read_task = create_task(conn.conn.get_conn.read(*args)) cancellation_task = create_task(_poll_cancellation(conn)) + read_task = create_task(conn.conn.get_conn.read(request_id, max_message_size)) tasks = [read_task, cancellation_task] try: done, pending = await asyncio.wait( @@ -609,7 +727,14 @@ async def _async_receive_data( if len(done) == 0: raise socket.timeout("timed out") if read_task in done: - return callback(read_task.result()) + data, op_code = read_task.result() + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) raise _OperationCancelled("operation cancelled") except asyncio.CancelledError: for task in tasks: @@ -618,31 +743,6 @@ async def _async_receive_data( raise -async def async_receive_message( - conn: AsyncConnection, - request_id: Optional[int], - max_message_size: int = MAX_MESSAGE_SIZE, -) -> Union[_OpReply, _OpMsg]: - """Receive a raw BSON message or raise socket.error.""" - - def callback(result: Any) -> _OpMsg | _OpReply: - data, op_code = result - try: - unpack_reply = _UNPACK_REPLY[op_code] - except KeyError: - raise ProtocolError( - f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" - ) from None - return unpack_reply(data) - - return await _async_receive_data(conn, callback, request_id, max_message_size) - - -def receive_kms(conn: BaseConnection, bytes_needed: int) -> bytes: - """Receive raw bytes from the kms connection.""" - return conn.conn.sock.recv(bytes_needed) - - def receive_message( conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE ) -> Union[_OpReply, _OpMsg]: @@ -670,7 +770,7 @@ def receive_message( f"Message length ({length!r}) is larger than server max " f"message size ({max_message_size!r})" ) - data: bytes | memoryview + data: memoryview | bytes if op_code == 2012: op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) data = decompress(receive_data(conn, length - 25, deadline), compressor_id) diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index 0536dc3835..ac562af542 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -16,6 +16,7 @@ from __future__ import annotations import asyncio +import functools import socket import ssl import sys @@ -24,6 +25,7 @@ Any, NoReturn, Optional, + Union, ) from pymongo import _csot @@ -35,17 +37,13 @@ _CertificateError, ) from pymongo.helpers_shared import _get_timeout_details, format_timeout_details -from pymongo.network_layer import ( - AsyncNetworkingInterface, - NetworkingInterface, - PyMongoBaseProtocol, - PyMongoProtocol, -) +from pymongo.network_layer import AsyncNetworkingInterface, NetworkingInterface, PyMongoProtocol from pymongo.pool_options import PoolOptions from pymongo.ssl_support import PYSSLError, SSLError, _has_sni SSLErrors = (PYSSLError, SSLError) if TYPE_CHECKING: + from pymongo.pyopenssl_context import _sslConn from pymongo.typings import _Address try: @@ -246,10 +244,64 @@ async def _async_create_connection(address: _Address, options: PoolOptions) -> s raise OSError("getaddrinfo failed") +async def _async_configured_socket( + address: _Address, options: PoolOptions +) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(False): + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor( + None, + functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] + ) + else: + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + async def _configured_protocol_interface( - address: _Address, - options: PoolOptions, - protocol_kls: type[PyMongoBaseProtocol] = PyMongoProtocol, + address: _Address, options: PoolOptions ) -> AsyncNetworkingInterface: """Given (host, port) and PoolOptions, return a configured AsyncNetworkingInterface. @@ -264,7 +316,7 @@ async def _configured_protocol_interface( if ssl_context is None: return AsyncNetworkingInterface( await asyncio.get_running_loop().create_connection( - lambda: protocol_kls(timeout=timeout), sock=sock + lambda: PyMongoProtocol(timeout=timeout), sock=sock ) ) @@ -273,7 +325,7 @@ async def _configured_protocol_interface( # We have to pass hostname / ip address to wrap_socket # to use SSLContext.check_hostname. transport, protocol = await asyncio.get_running_loop().create_connection( # type: ignore[call-overload] - lambda: protocol_kls(timeout=timeout), + lambda: PyMongoProtocol(timeout=timeout), sock=sock, server_hostname=host, ssl=ssl_context, @@ -373,9 +425,56 @@ def _create_connection(address: _Address, options: PoolOptions) -> socket.socket raise OSError("getaddrinfo failed") -def _configured_socket_interface( - address: _Address, options: PoolOptions, *args: Any -) -> NetworkingInterface: +def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(True): + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] + else: + ssl_sock = ssl_context.wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +def _configured_socket_interface(address: _Address, options: PoolOptions) -> NetworkingInterface: """Given (host, port) and PoolOptions, return a NetworkingInterface wrapping a configured socket. Can raise socket.error, ConnectionFailure, or _CertificateError. diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index a08302c211..f9d51a9eab 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -71,11 +71,11 @@ ServerSelectionTimeoutError, ) from pymongo.helpers_shared import _get_timeout_details -from pymongo.network_layer import PyMongoKMSProtocol, receive_kms, sendall +from pymongo.network_layer import sendall from pymongo.operations import UpdateOne from pymongo.pool_options import PoolOptions from pymongo.pool_shared import ( - _configured_socket_interface, + _configured_socket, _raise_connection_failure, ) from pymongo.read_concern import ReadConcern @@ -85,7 +85,6 @@ from pymongo.synchronous.cursor import Cursor from pymongo.synchronous.database import Database from pymongo.synchronous.mongo_client import MongoClient -from pymongo.synchronous.pool import BaseConnection from pymongo.typings import _DocumentType, _DocumentTypeArg from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host from pymongo.write_concern import WriteConcern @@ -93,8 +92,10 @@ if TYPE_CHECKING: from pymongocrypt.mongocrypt import MongoCryptKmsContext + from pymongo.pyopenssl_context import _sslConn from pymongo.typings import _Address + _IS_SYNC = True _HTTPS_PORT = 443 @@ -109,10 +110,9 @@ _KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) -def _connect_kms(address: _Address, opts: PoolOptions) -> BaseConnection: +def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: try: - interface = _configured_socket_interface(address, opts, PyMongoKMSProtocol) - return BaseConnection(interface, opts) + return _configured_socket(address, opts) except Exception as exc: _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) @@ -197,11 +197,19 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: try: conn = _connect_kms(address, opts) try: - sendall(conn.conn.get_conn, message) + sendall(conn, message) while kms_context.bytes_needed > 0: # CSOT: update timeout. - conn.set_conn_timeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = receive_kms(conn, kms_context.bytes_needed) + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data: memoryview | bytes + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + receive_data_socket, + ) + + data = receive_data_socket(conn, kms_context.bytes_needed) if not data: raise OSError("KMS connection closed") kms_context.feed(data) @@ -220,7 +228,7 @@ def kms_request(self, kms_context: MongoCryptKmsContext) -> None: address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) ) finally: - conn.close_conn(None) + conn.close() except MongoCryptError: raise # Propagate MongoCryptError errors directly. except Exception as exc: diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index f35ca4d0fd..f7f6a26c68 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -123,89 +123,7 @@ def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 _IS_SYNC = True -class BaseConnection: - """A base connection object for server and kms connections.""" - - def __init__(self, conn: NetworkingInterface, opts: PoolOptions): - self.conn = conn - self.socket_checker: SocketChecker = SocketChecker() - self.cancel_context: _CancellationContext = _CancellationContext() - self.is_sdam = False - self.closed = False - self.last_timeout: float | None = None - self.more_to_come = False - self.opts = opts - self.max_wire_version = -1 - - def set_conn_timeout(self, timeout: Optional[float]) -> None: - """Cache last timeout to avoid duplicate calls to conn.settimeout.""" - if timeout == self.last_timeout: - return - self.last_timeout = timeout - self.conn.get_conn.settimeout(timeout) - - def apply_timeout( - self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] - ) -> Optional[float]: - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - # Reset the socket timeout unless we're performing a streaming monitor check. - if not self.more_to_come: - self.set_conn_timeout(self.opts.socket_timeout) - return None - # RTT validation. - rtt = _csot.get_rtt() - if rtt is None: - rtt = self.connect_rtt - max_time_ms = timeout - rtt - if max_time_ms < 0: - timeout_details = _get_timeout_details(self.opts) - formatted = format_timeout_details(timeout_details) - # CSOT: raise an error without running the command since we know it will time out. - errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" - if self.max_wire_version != -1: - raise ExecutionTimeout( - errmsg, - 50, - {"ok": 0, "errmsg": errmsg, "code": 50}, - self.max_wire_version, - ) - else: - raise TimeoutError(errmsg) - if cmd is not None: - cmd["maxTimeMS"] = int(max_time_ms * 1000) - self.set_conn_timeout(timeout) - return timeout - - def close_conn(self, reason: Optional[str]) -> None: - """Close this connection with a reason.""" - if self.closed: - return - self._close_conn() - - def _close_conn(self) -> None: - """Close this connection.""" - if self.closed: - return - self.closed = True - self.cancel_context.cancel() - # Note: We catch exceptions to avoid spurious errors on interpreter - # shutdown. - try: - self.conn.close() - except Exception: # noqa: S110 - pass - - def conn_closed(self) -> bool: - """Return True if we know socket has been closed, False otherwise.""" - if _IS_SYNC: - return self.socket_checker.socket_closed(self.conn.get_conn) - else: - return self.conn.is_closing() - - -class Connection(BaseConnection): +class Connection: """Store a connection with some metadata. :param conn: a raw connection object @@ -223,27 +141,29 @@ def __init__( id: int, is_sdam: bool, ): - super().__init__(conn, pool.opts) self.pool_ref = weakref.ref(pool) - self.address: tuple[str, int] = address - self.id: int = id + self.conn = conn + self.address = address + self.id = id self.is_sdam = is_sdam + self.closed = False self.last_checkin_time = time.monotonic() self.performed_handshake = False self.is_writable: bool = False self.max_wire_version = MAX_WIRE_VERSION - self.max_bson_size: int = MAX_BSON_SIZE - self.max_message_size: int = MAX_MESSAGE_SIZE - self.max_write_batch_size: int = MAX_WRITE_BATCH_SIZE + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE self.supports_sessions = False self.hello_ok: bool = False - self.is_mongos: bool = False + self.is_mongos = False self.op_msg_enabled = False self.listeners = pool.opts._event_listeners self.enabled_for_cmap = pool.enabled_for_cmap self.enabled_for_logging = pool.enabled_for_logging self.compression_settings = pool.opts._compression_settings self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() self.oidc_token_gen_id: Optional[int] = None # Support for mechanism negotiation on the initial handshake. self.negotiated_mechs: Optional[list[str]] = None @@ -254,6 +174,9 @@ def __init__( self.pool_gen = pool.gen self.generation = self.pool_gen.get_overall() self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False # For load balancer support. self.service_id: Optional[ObjectId] = None self.server_connection_id: Optional[int] = None @@ -269,6 +192,44 @@ def __init__( # For gossiping $clusterTime from the connection handshake to the client. self._cluster_time = None + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + def pin_txn(self) -> None: self.pinned_txn = True assert not self.pinned_cursor @@ -610,6 +571,26 @@ def close_conn(self, reason: Optional[str]) -> None: error=reason, ) + def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + def send_cluster_time( self, command: MutableMapping[str, Any], diff --git a/tools/synchro.py b/tools/synchro.py index 9a760c0ad7..e502f96281 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -120,9 +120,9 @@ "_async_create_lock": "_create_lock", "_async_create_condition": "_create_condition", "_async_cond_wait": "_cond_wait", - "async_receive_kms": "receive_kms", "AsyncNetworkingInterface": "NetworkingInterface", "_configured_protocol_interface": "_configured_socket_interface", + "_async_configured_socket": "_configured_socket", "SpecRunnerTask": "SpecRunnerThread", "AsyncMockConnection": "MockConnection", "AsyncMockPool": "MockPool", From 8cf65796daf32be283367b2ebc40a41dfe034b9b Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 16 Sep 2025 11:01:47 -0500 Subject: [PATCH 2060/2111] PYTHON-5542 Prepare for 4.15.1 Release (#2537) --- doc/changelog.rst | 33 +++++++++++++++++++++++++++++---- pymongo/_version.py | 2 +- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 64c61e5877..082c22fafc 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,28 @@ Changelog ========= +Changes in Version 4.15.1 (2025/09/16) +-------------------------------------- + +Version 4.15.1 is a bug fix release. + +- Fixed a bug in :meth:`~pymongo.synchronous.encryption.ClientEncryption.encrypt` + and :meth:`~pymongo.asynchronous.encryption.AsyncClientEncryption.encrypt` + that would cause a ``TypeError`` when using ``pymongocrypt<1.16`` by passing + an unsupported ``type_opts`` parameter even if Queryable Encryption text + queries beta was not used. + +- Fixed a bug in ``AsyncMongoClient`` that caused a ``ServerSelectionTimeoutError`` + when used with ``uvicorn``, ``FastAPI``, or ``uvloop``. + +Issues Resolved +............... + +See the `PyMongo 4.15.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=46486 + Changes in Version 4.15.0 (2025/09/10) -------------------------------------- @@ -13,8 +35,10 @@ PyMongo 4.15 brings a number of changes including: :attr:`~pymongo.encryption.QueryType.SUBSTRINGPREVIEW`, as part of the experimental Queryable Encryption text queries beta. ``pymongocrypt>=1.16`` is required for text query support. -- Added :class:`bson.decimal128.DecimalEncoder` and :class:`bson.decimal128.DecimalDecoder` - to support encoding and decoding of BSON Decimal128 values to decimal.Decimal values using the TypeRegistry API. +- Added :class:`bson.decimal128.DecimalEncoder` and + :class:`bson.decimal128.DecimalDecoder` + to support encoding and decoding of BSON Decimal128 values to + decimal.Decimal values using the TypeRegistry API. - Added support for Windows ``arm64`` wheels. Changes in Version 4.14.1 (2025/08/19) @@ -22,8 +46,9 @@ Changes in Version 4.14.1 (2025/08/19) Version 4.14.1 is a bug fix release. - - Fixed a bug in ``MongoClient.append_metadata()`` and ``AsyncMongoClient.append_metadata()`` - that allowed duplicate ``DriverInfo.name`` to be appended to the metadata. +- Fixed a bug in ``MongoClient.append_metadata()`` and + ``AsyncMongoClient.append_metadata()`` + that allowed duplicate ``DriverInfo.name`` to be appended to the metadata. Issues Resolved ............... diff --git a/pymongo/_version.py b/pymongo/_version.py index c6ba82ab13..7abecf4416 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.16.0.dev0" +__version__ = "4.15.1" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 4b4c94999786af9834c6f8bd17e593e60f1901d8 Mon Sep 17 00:00:00 2001 From: "mongodb-dbx-release-bot[bot]" <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> Date: Tue, 16 Sep 2025 16:43:29 +0000 Subject: [PATCH 2061/2111] BUMP 4.16.0.dev0 Signed-off-by: mongodb-dbx-release-bot[bot] <167856002+mongodb-dbx-release-bot[bot]@users.noreply.github.com> --- pymongo/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pymongo/_version.py b/pymongo/_version.py index 7abecf4416..c6ba82ab13 100644 --- a/pymongo/_version.py +++ b/pymongo/_version.py @@ -18,7 +18,7 @@ import re from typing import List, Tuple, Union -__version__ = "4.15.1" +__version__ = "4.16.0.dev0" def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: From 4b4d74971c58716137e09bc235b9f79f56615759 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 17 Sep 2025 06:38:24 -0500 Subject: [PATCH 2062/2111] PYTHON-5500 Account for extra flakiness in test_dns_failures_logging (#2533) --- test/asynchronous/test_srv_polling.py | 2 +- test/test_srv_polling.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py index d6f0f6a18f..3d4aed1bc1 100644 --- a/test/asynchronous/test_srv_polling.py +++ b/test/asynchronous/test_srv_polling.py @@ -225,7 +225,7 @@ def response_callback(*args): await self.run_scenario(response_callback, False) - @flaky(reason="PYTHON-5500") + @flaky(reason="PYTHON-5500", max_runs=3) async def test_dns_failures_logging(self): from dns import exception diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 09c900cf09..f5096bea01 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -225,7 +225,7 @@ def response_callback(*args): self.run_scenario(response_callback, False) - @flaky(reason="PYTHON-5500") + @flaky(reason="PYTHON-5500", max_runs=3) def test_dns_failures_logging(self): from dns import exception From 5787acc2713986764b6a0f8a3b414b2f2c781553 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 17 Sep 2025 06:38:47 -0500 Subject: [PATCH 2063/2111] PYTHON-5556 Keep uv lock file up to date (#2534) --- .pre-commit-config.yaml | 6 +++ CONTRIBUTING.md | 8 ++++ bson/__init__.py | 4 +- justfile | 8 ++-- test/test_typing.py | 8 ++-- uv.lock | 86 ++++++++++++++++++++--------------------- 6 files changed, 67 insertions(+), 53 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9c67b8283b..ac129f95f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -105,6 +105,12 @@ repos: # - test/test_client.py:188: te ==> the, be, we, to args: ["-L", "fle,fo,infinit,isnt,nin,te,aks"] +- repo: https://github.com/astral-sh/uv-pre-commit + # uv version. + rev: 0.8.17 + hooks: + - id: uv-lock + - repo: local hooks: - id: executable-shell diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dc5ee4fe8f..ed52727765 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -514,3 +514,11 @@ To profile a test script and generate a flame graph, follow these steps: (Note: on macOS you will need to run this command using `sudo` to allow `py-spy` to attach to the Python process.) 4. If you need to include native code (for example the C extensions), profiling should be done on a Linux system, as macOS and Windows do not support the `--native` option of `py-spy`. Creating an ubuntu Evergreen spawn host and using `scp` to copy the flamegraph `.svg` file back to your local machine is the best way to do this. + +## Dependabot updates + +Dependabot will raise PRs at most once per week, grouped by GitHub Actions updates and Python requirement +file updates. We have a pre-commit hook that will update the `uv.lock` file when requirements change. +To update the lock file on a failing PR, you can use a method like `gh pr checkout `, then run +`just lint uv-lock` to update the lock file, and then push the changes. If a typing dependency has changed, +also run `just typing` and handle any new findings. diff --git a/bson/__init__.py b/bson/__init__.py index b655e30c2c..e677d8cfdd 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1327,7 +1327,7 @@ def decode_iter( elements = data[position : position + obj_size] position += obj_size - yield _bson_to_dict(elements, opts) # type:ignore[misc] + yield _bson_to_dict(elements, opts) @overload @@ -1373,7 +1373,7 @@ def decode_file_iter( raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) - yield _bson_to_dict(elements, opts) # type:ignore[arg-type, misc] + yield _bson_to_dict(elements, opts) # type:ignore[misc] def is_valid(bson: bytes) -> bool: diff --git a/justfile b/justfile index 7ac5bd33ff..c129b2c199 100644 --- a/justfile +++ b/justfile @@ -50,12 +50,12 @@ typing-pyright: && resync {{typing_run}} pyright -p strict_pyrightconfig.json test/test_typing_strict.py [group('lint')] -lint: && resync - uv run pre-commit run --all-files +lint *args="": && resync + uv run pre-commit run --all-files {{args}} [group('lint')] -lint-manual: && resync - uv run pre-commit run --all-files --hook-stage manual +lint-manual *args="": && resync + uv run pre-commit run --all-files --hook-stage manual {{args}} [group('test')] test *args="-v --durations=5 --maxfail=10": && resync diff --git a/test/test_typing.py b/test/test_typing.py index 8709186e12..7240e59c06 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -170,7 +170,7 @@ def test_bulk_write_heterogeneous(self): InsertOne(Movie(name="American Graffiti", year=1973)), ReplaceOne( {}, - {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[typeddict-item] + {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[arg-type] ), DeleteOne({}), ] @@ -243,7 +243,7 @@ def test_with_options(self) -> None: assert retrieved is not None assert retrieved["name"] == "foo" # We expect a type error here. - assert retrieved["other"] == 1 # type:ignore[typeddict-item] + assert retrieved["other"] == 1 # type:ignore[misc] class TestDecode(unittest.TestCase): @@ -416,11 +416,11 @@ def test_typeddict_document_type_insertion(self) -> None: bad_mov = {"name": "THX-1138", "year": "WRONG TYPE"} bad_movie = Movie(name="THX-1138", year="WRONG TYPE") # type: ignore[typeddict-item] coll.insert_one(bad_mov) # type:ignore[arg-type] - coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[typeddict-item] + coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[arg-type] coll.insert_one(bad_movie) coll.insert_many([bad_mov]) # type: ignore[list-item] coll.insert_many( - [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[typeddict-item] + [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[list-item] ) coll.insert_many([bad_movie]) diff --git a/uv.lock b/uv.lock index 77f6a46385..ff7bd6fe24 100644 --- a/uv.lock +++ b/uv.lock @@ -1047,7 +1047,7 @@ dependencies = [ [[package]] name = "mypy" -version = "1.17.1" +version = "1.18.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, @@ -1055,45 +1055,45 @@ dependencies = [ { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8e/22/ea637422dedf0bf36f3ef238eab4e455e2a0dcc3082b5cc067615347ab8e/mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01", size = 3352570, upload-time = "2025-07-31T07:54:19.204Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/77/a9/3d7aa83955617cdf02f94e50aab5c830d205cfa4320cf124ff64acce3a8e/mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972", size = 11003299, upload-time = "2025-07-31T07:54:06.425Z" }, - { url = "https://files.pythonhosted.org/packages/83/e8/72e62ff837dd5caaac2b4a5c07ce769c8e808a00a65e5d8f94ea9c6f20ab/mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7", size = 10125451, upload-time = "2025-07-31T07:53:52.974Z" }, - { url = "https://files.pythonhosted.org/packages/7d/10/f3f3543f6448db11881776f26a0ed079865926b0c841818ee22de2c6bbab/mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df", size = 11916211, upload-time = "2025-07-31T07:53:18.879Z" }, - { url = "https://files.pythonhosted.org/packages/06/bf/63e83ed551282d67bb3f7fea2cd5561b08d2bb6eb287c096539feb5ddbc5/mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390", size = 12652687, upload-time = "2025-07-31T07:53:30.544Z" }, - { url = "https://files.pythonhosted.org/packages/69/66/68f2eeef11facf597143e85b694a161868b3b006a5fbad50e09ea117ef24/mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94", size = 12896322, upload-time = "2025-07-31T07:53:50.74Z" }, - { url = "https://files.pythonhosted.org/packages/a3/87/8e3e9c2c8bd0d7e071a89c71be28ad088aaecbadf0454f46a540bda7bca6/mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b", size = 9507962, upload-time = "2025-07-31T07:53:08.431Z" }, - { url = "https://files.pythonhosted.org/packages/46/cf/eadc80c4e0a70db1c08921dcc220357ba8ab2faecb4392e3cebeb10edbfa/mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58", size = 10921009, upload-time = "2025-07-31T07:53:23.037Z" }, - { url = "https://files.pythonhosted.org/packages/5d/c1/c869d8c067829ad30d9bdae051046561552516cfb3a14f7f0347b7d973ee/mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5", size = 10047482, upload-time = "2025-07-31T07:53:26.151Z" }, - { url = "https://files.pythonhosted.org/packages/98/b9/803672bab3fe03cee2e14786ca056efda4bb511ea02dadcedde6176d06d0/mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd", size = 11832883, upload-time = "2025-07-31T07:53:47.948Z" }, - { url = "https://files.pythonhosted.org/packages/88/fb/fcdac695beca66800918c18697b48833a9a6701de288452b6715a98cfee1/mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b", size = 12566215, upload-time = "2025-07-31T07:54:04.031Z" }, - { url = "https://files.pythonhosted.org/packages/7f/37/a932da3d3dace99ee8eb2043b6ab03b6768c36eb29a02f98f46c18c0da0e/mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5", size = 12751956, upload-time = "2025-07-31T07:53:36.263Z" }, - { url = "https://files.pythonhosted.org/packages/8c/cf/6438a429e0f2f5cab8bc83e53dbebfa666476f40ee322e13cac5e64b79e7/mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b", size = 9507307, upload-time = "2025-07-31T07:53:59.734Z" }, - { url = "https://files.pythonhosted.org/packages/17/a2/7034d0d61af8098ec47902108553122baa0f438df8a713be860f7407c9e6/mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb", size = 11086295, upload-time = "2025-07-31T07:53:28.124Z" }, - { url = "https://files.pythonhosted.org/packages/14/1f/19e7e44b594d4b12f6ba8064dbe136505cec813549ca3e5191e40b1d3cc2/mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403", size = 10112355, upload-time = "2025-07-31T07:53:21.121Z" }, - { url = "https://files.pythonhosted.org/packages/5b/69/baa33927e29e6b4c55d798a9d44db5d394072eef2bdc18c3e2048c9ed1e9/mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056", size = 11875285, upload-time = "2025-07-31T07:53:55.293Z" }, - { url = "https://files.pythonhosted.org/packages/90/13/f3a89c76b0a41e19490b01e7069713a30949d9a6c147289ee1521bcea245/mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341", size = 12737895, upload-time = "2025-07-31T07:53:43.623Z" }, - { url = "https://files.pythonhosted.org/packages/23/a1/c4ee79ac484241301564072e6476c5a5be2590bc2e7bfd28220033d2ef8f/mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb", size = 12931025, upload-time = "2025-07-31T07:54:17.125Z" }, - { url = "https://files.pythonhosted.org/packages/89/b8/7409477be7919a0608900e6320b155c72caab4fef46427c5cc75f85edadd/mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19", size = 9584664, upload-time = "2025-07-31T07:54:12.842Z" }, - { url = "https://files.pythonhosted.org/packages/5b/82/aec2fc9b9b149f372850291827537a508d6c4d3664b1750a324b91f71355/mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7", size = 11075338, upload-time = "2025-07-31T07:53:38.873Z" }, - { url = "https://files.pythonhosted.org/packages/07/ac/ee93fbde9d2242657128af8c86f5d917cd2887584cf948a8e3663d0cd737/mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81", size = 10113066, upload-time = "2025-07-31T07:54:14.707Z" }, - { url = "https://files.pythonhosted.org/packages/5a/68/946a1e0be93f17f7caa56c45844ec691ca153ee8b62f21eddda336a2d203/mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6", size = 11875473, upload-time = "2025-07-31T07:53:14.504Z" }, - { url = "https://files.pythonhosted.org/packages/9f/0f/478b4dce1cb4f43cf0f0d00fba3030b21ca04a01b74d1cd272a528cf446f/mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849", size = 12744296, upload-time = "2025-07-31T07:53:03.896Z" }, - { url = "https://files.pythonhosted.org/packages/ca/70/afa5850176379d1b303f992a828de95fc14487429a7139a4e0bdd17a8279/mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14", size = 12914657, upload-time = "2025-07-31T07:54:08.576Z" }, - { url = "https://files.pythonhosted.org/packages/53/f9/4a83e1c856a3d9c8f6edaa4749a4864ee98486e9b9dbfbc93842891029c2/mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a", size = 9593320, upload-time = "2025-07-31T07:53:01.341Z" }, - { url = "https://files.pythonhosted.org/packages/38/56/79c2fac86da57c7d8c48622a05873eaab40b905096c33597462713f5af90/mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733", size = 11040037, upload-time = "2025-07-31T07:54:10.942Z" }, - { url = "https://files.pythonhosted.org/packages/4d/c3/adabe6ff53638e3cad19e3547268482408323b1e68bf082c9119000cd049/mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd", size = 10131550, upload-time = "2025-07-31T07:53:41.307Z" }, - { url = "https://files.pythonhosted.org/packages/b8/c5/2e234c22c3bdeb23a7817af57a58865a39753bde52c74e2c661ee0cfc640/mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0", size = 11872963, upload-time = "2025-07-31T07:53:16.878Z" }, - { url = "https://files.pythonhosted.org/packages/ab/26/c13c130f35ca8caa5f2ceab68a247775648fdcd6c9a18f158825f2bc2410/mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a", size = 12710189, upload-time = "2025-07-31T07:54:01.962Z" }, - { url = "https://files.pythonhosted.org/packages/82/df/c7d79d09f6de8383fe800521d066d877e54d30b4fb94281c262be2df84ef/mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91", size = 12900322, upload-time = "2025-07-31T07:53:10.551Z" }, - { url = "https://files.pythonhosted.org/packages/b8/98/3d5a48978b4f708c55ae832619addc66d677f6dc59f3ebad71bae8285ca6/mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed", size = 9751879, upload-time = "2025-07-31T07:52:56.683Z" }, - { url = "https://files.pythonhosted.org/packages/29/cb/673e3d34e5d8de60b3a61f44f80150a738bff568cd6b7efb55742a605e98/mypy-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5d1092694f166a7e56c805caaf794e0585cabdbf1df36911c414e4e9abb62ae9", size = 10992466, upload-time = "2025-07-31T07:53:57.574Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d0/fe1895836eea3a33ab801561987a10569df92f2d3d4715abf2cfeaa29cb2/mypy-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79d44f9bfb004941ebb0abe8eff6504223a9c1ac51ef967d1263c6572bbebc99", size = 10117638, upload-time = "2025-07-31T07:53:34.256Z" }, - { url = "https://files.pythonhosted.org/packages/97/f3/514aa5532303aafb95b9ca400a31054a2bd9489de166558c2baaeea9c522/mypy-1.17.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b01586eed696ec905e61bd2568f48740f7ac4a45b3a468e6423a03d3788a51a8", size = 11915673, upload-time = "2025-07-31T07:52:59.361Z" }, - { url = "https://files.pythonhosted.org/packages/ab/c3/c0805f0edec96fe8e2c048b03769a6291523d509be8ee7f56ae922fa3882/mypy-1.17.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43808d9476c36b927fbcd0b0255ce75efe1b68a080154a38ae68a7e62de8f0f8", size = 12649022, upload-time = "2025-07-31T07:53:45.92Z" }, - { url = "https://files.pythonhosted.org/packages/45/3e/d646b5a298ada21a8512fa7e5531f664535a495efa672601702398cea2b4/mypy-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:feb8cc32d319edd5859da2cc084493b3e2ce5e49a946377663cc90f6c15fb259", size = 12895536, upload-time = "2025-07-31T07:53:06.17Z" }, - { url = "https://files.pythonhosted.org/packages/14/55/e13d0dcd276975927d1f4e9e2ec4fd409e199f01bdc671717e673cc63a22/mypy-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d7598cf74c3e16539d4e2f0b8d8c318e00041553d83d4861f87c7a72e95ac24d", size = 9512564, upload-time = "2025-07-31T07:53:12.346Z" }, - { url = "https://files.pythonhosted.org/packages/1d/f3/8fcd2af0f5b806f6cf463efaffd3c9548a28f84220493ecd38d127b6b66d/mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9", size = 2283411, upload-time = "2025-07-31T07:53:24.664Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/14/a3/931e09fc02d7ba96da65266884da4e4a8806adcdb8a57faaacc6edf1d538/mypy-1.18.1.tar.gz", hash = "sha256:9e988c64ad3ac5987f43f5154f884747faf62141b7f842e87465b45299eea5a9", size = 3448447, upload-time = "2025-09-11T23:00:47.067Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/06/29ea5a34c23938ae93bc0040eb2900eb3f0f2ef4448cc59af37ab3ddae73/mypy-1.18.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2761b6ae22a2b7d8e8607fb9b81ae90bc2e95ec033fd18fa35e807af6c657763", size = 12811535, upload-time = "2025-09-11T22:58:55.399Z" }, + { url = "https://files.pythonhosted.org/packages/a8/40/04c38cb04fa9f1dc224b3e9634021a92c47b1569f1c87dfe6e63168883bb/mypy-1.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5b10e3ea7f2eec23b4929a3fabf84505da21034a4f4b9613cda81217e92b74f3", size = 11897559, upload-time = "2025-09-11T22:59:48.041Z" }, + { url = "https://files.pythonhosted.org/packages/46/bf/4c535bd45ea86cebbc1a3b6a781d442f53a4883f322ebd2d442db6444d0b/mypy-1.18.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:261fbfced030228bc0f724d5d92f9ae69f46373bdfd0e04a533852677a11dbea", size = 12507430, upload-time = "2025-09-11T22:59:30.415Z" }, + { url = "https://files.pythonhosted.org/packages/e2/e1/cbefb16f2be078d09e28e0b9844e981afb41f6ffc85beb68b86c6976e641/mypy-1.18.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4dc6b34a1c6875e6286e27d836a35c0d04e8316beac4482d42cfea7ed2527df8", size = 13243717, upload-time = "2025-09-11T22:59:11.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/e8/3e963da63176f16ca9caea7fa48f1bc8766de317cd961528c0391565fd47/mypy-1.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1cabb353194d2942522546501c0ff75c4043bf3b63069cb43274491b44b773c9", size = 13492052, upload-time = "2025-09-11T23:00:09.29Z" }, + { url = "https://files.pythonhosted.org/packages/4b/09/d5d70c252a3b5b7530662d145437bd1de15f39fa0b48a27ee4e57d254aa1/mypy-1.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:738b171690c8e47c93569635ee8ec633d2cdb06062f510b853b5f233020569a9", size = 9765846, upload-time = "2025-09-11T22:58:26.198Z" }, + { url = "https://files.pythonhosted.org/packages/32/28/47709d5d9e7068b26c0d5189c8137c8783e81065ad1102b505214a08b548/mypy-1.18.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c903857b3e28fc5489e54042684a9509039ea0aedb2a619469438b544ae1961", size = 12734635, upload-time = "2025-09-11T23:00:24.983Z" }, + { url = "https://files.pythonhosted.org/packages/7c/12/ee5c243e52497d0e59316854041cf3b3130131b92266d0764aca4dec3c00/mypy-1.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a0c8392c19934c2b6c65566d3a6abdc6b51d5da7f5d04e43f0eb627d6eeee65", size = 11817287, upload-time = "2025-09-11T22:59:07.38Z" }, + { url = "https://files.pythonhosted.org/packages/48/bd/2aeb950151005fe708ab59725afed7c4aeeb96daf844f86a05d4b8ac34f8/mypy-1.18.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f85eb7efa2ec73ef63fc23b8af89c2fe5bf2a4ad985ed2d3ff28c1bb3c317c92", size = 12430464, upload-time = "2025-09-11T22:58:48.084Z" }, + { url = "https://files.pythonhosted.org/packages/71/e8/7a20407aafb488acb5734ad7fb5e8c2ef78d292ca2674335350fa8ebef67/mypy-1.18.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:82ace21edf7ba8af31c3308a61dc72df30500f4dbb26f99ac36b4b80809d7e94", size = 13164555, upload-time = "2025-09-11T23:00:13.803Z" }, + { url = "https://files.pythonhosted.org/packages/e8/c9/5f39065252e033b60f397096f538fb57c1d9fd70a7a490f314df20dd9d64/mypy-1.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a2dfd53dfe632f1ef5d161150a4b1f2d0786746ae02950eb3ac108964ee2975a", size = 13359222, upload-time = "2025-09-11T23:00:33.469Z" }, + { url = "https://files.pythonhosted.org/packages/85/b6/d54111ef3c1e55992cd2ec9b8b6ce9c72a407423e93132cae209f7e7ba60/mypy-1.18.1-cp311-cp311-win_amd64.whl", hash = "sha256:320f0ad4205eefcb0e1a72428dde0ad10be73da9f92e793c36228e8ebf7298c0", size = 9760441, upload-time = "2025-09-11T23:00:44.826Z" }, + { url = "https://files.pythonhosted.org/packages/e7/14/1c3f54d606cb88a55d1567153ef3a8bc7b74702f2ff5eb64d0994f9e49cb/mypy-1.18.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:502cde8896be8e638588b90fdcb4c5d5b8c1b004dfc63fd5604a973547367bb9", size = 12911082, upload-time = "2025-09-11T23:00:41.465Z" }, + { url = "https://files.pythonhosted.org/packages/90/83/235606c8b6d50a8eba99773add907ce1d41c068edb523f81eb0d01603a83/mypy-1.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7509549b5e41be279afc1228242d0e397f1af2919a8f2877ad542b199dc4083e", size = 11919107, upload-time = "2025-09-11T22:58:40.903Z" }, + { url = "https://files.pythonhosted.org/packages/ca/25/4e2ce00f8d15b99d0c68a2536ad63e9eac033f723439ef80290ec32c1ff5/mypy-1.18.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5956ecaabb3a245e3f34100172abca1507be687377fe20e24d6a7557e07080e2", size = 12472551, upload-time = "2025-09-11T22:58:37.272Z" }, + { url = "https://files.pythonhosted.org/packages/32/bb/92642a9350fc339dd9dcefcf6862d171b52294af107d521dce075f32f298/mypy-1.18.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8750ceb014a96c9890421c83f0db53b0f3b8633e2864c6f9bc0a8e93951ed18d", size = 13340554, upload-time = "2025-09-11T22:59:38.756Z" }, + { url = "https://files.pythonhosted.org/packages/cd/ee/38d01db91c198fb6350025d28f9719ecf3c8f2c55a0094bfbf3ef478cc9a/mypy-1.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fb89ea08ff41adf59476b235293679a6eb53a7b9400f6256272fb6029bec3ce5", size = 13530933, upload-time = "2025-09-11T22:59:20.228Z" }, + { url = "https://files.pythonhosted.org/packages/da/8d/6d991ae631f80d58edbf9d7066e3f2a96e479dca955d9a968cd6e90850a3/mypy-1.18.1-cp312-cp312-win_amd64.whl", hash = "sha256:2657654d82fcd2a87e02a33e0d23001789a554059bbf34702d623dafe353eabf", size = 9828426, upload-time = "2025-09-11T23:00:21.007Z" }, + { url = "https://files.pythonhosted.org/packages/e4/ec/ef4a7260e1460a3071628a9277a7579e7da1b071bc134ebe909323f2fbc7/mypy-1.18.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d70d2b5baf9b9a20bc9c730015615ae3243ef47fb4a58ad7b31c3e0a59b5ef1f", size = 12918671, upload-time = "2025-09-11T22:58:29.814Z" }, + { url = "https://files.pythonhosted.org/packages/a1/82/0ea6c3953f16223f0b8eda40c1aeac6bd266d15f4902556ae6e91f6fca4c/mypy-1.18.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b8367e33506300f07a43012fc546402f283c3f8bcff1dc338636affb710154ce", size = 11913023, upload-time = "2025-09-11T23:00:29.049Z" }, + { url = "https://files.pythonhosted.org/packages/ae/ef/5e2057e692c2690fc27b3ed0a4dbde4388330c32e2576a23f0302bc8358d/mypy-1.18.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:913f668ec50c3337b89df22f973c1c8f0b29ee9e290a8b7fe01cc1ef7446d42e", size = 12473355, upload-time = "2025-09-11T23:00:04.544Z" }, + { url = "https://files.pythonhosted.org/packages/98/43/b7e429fc4be10e390a167b0cd1810d41cb4e4add4ae50bab96faff695a3b/mypy-1.18.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a0e70b87eb27b33209fa4792b051c6947976f6ab829daa83819df5f58330c71", size = 13346944, upload-time = "2025-09-11T22:58:23.024Z" }, + { url = "https://files.pythonhosted.org/packages/89/4e/899dba0bfe36bbd5b7c52e597de4cf47b5053d337b6d201a30e3798e77a6/mypy-1.18.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c378d946e8a60be6b6ede48c878d145546fb42aad61df998c056ec151bf6c746", size = 13512574, upload-time = "2025-09-11T22:59:52.152Z" }, + { url = "https://files.pythonhosted.org/packages/f5/f8/7661021a5b0e501b76440454d786b0f01bb05d5c4b125fcbda02023d0250/mypy-1.18.1-cp313-cp313-win_amd64.whl", hash = "sha256:2cd2c1e0f3a7465f22731987fff6fc427e3dcbb4ca5f7db5bbeaff2ff9a31f6d", size = 9837684, upload-time = "2025-09-11T22:58:44.454Z" }, + { url = "https://files.pythonhosted.org/packages/bf/87/7b173981466219eccc64c107cf8e5ab9eb39cc304b4c07df8e7881533e4f/mypy-1.18.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ba24603c58e34dd5b096dfad792d87b304fc6470cbb1c22fd64e7ebd17edcc61", size = 12900265, upload-time = "2025-09-11T22:59:03.4Z" }, + { url = "https://files.pythonhosted.org/packages/ae/cc/b10e65bae75b18a5ac8f81b1e8e5867677e418f0dd2c83b8e2de9ba96ebd/mypy-1.18.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ed36662fb92ae4cb3cacc682ec6656208f323bbc23d4b08d091eecfc0863d4b5", size = 11942890, upload-time = "2025-09-11T23:00:00.607Z" }, + { url = "https://files.pythonhosted.org/packages/39/d4/aeefa07c44d09f4c2102e525e2031bc066d12e5351f66b8a83719671004d/mypy-1.18.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:040ecc95e026f71a9ad7956fea2724466602b561e6a25c2e5584160d3833aaa8", size = 12472291, upload-time = "2025-09-11T22:59:43.425Z" }, + { url = "https://files.pythonhosted.org/packages/c6/07/711e78668ff8e365f8c19735594ea95938bff3639a4c46a905e3ed8ff2d6/mypy-1.18.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:937e3ed86cb731276706e46e03512547e43c391a13f363e08d0fee49a7c38a0d", size = 13318610, upload-time = "2025-09-11T23:00:17.604Z" }, + { url = "https://files.pythonhosted.org/packages/ca/85/df3b2d39339c31d360ce299b418c55e8194ef3205284739b64962f6074e7/mypy-1.18.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1f95cc4f01c0f1701ca3b0355792bccec13ecb2ec1c469e5b85a6ef398398b1d", size = 13513697, upload-time = "2025-09-11T22:58:59.534Z" }, + { url = "https://files.pythonhosted.org/packages/b1/df/462866163c99ea73bb28f0eb4d415c087e30de5d36ee0f5429d42e28689b/mypy-1.18.1-cp314-cp314-win_amd64.whl", hash = "sha256:e4f16c0019d48941220ac60b893615be2f63afedaba6a0801bdcd041b96991ce", size = 9985739, upload-time = "2025-09-11T22:58:51.644Z" }, + { url = "https://files.pythonhosted.org/packages/64/1a/9005d78ffedaac58b3ee3a44d53a65b09ac1d27c36a00ade849015b8e014/mypy-1.18.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e37763af63a8018308859bc83d9063c501a5820ec5bd4a19f0a2ac0d1c25c061", size = 12809347, upload-time = "2025-09-11T22:59:15.468Z" }, + { url = "https://files.pythonhosted.org/packages/46/b3/c932216b281f7c223a2c8b98b9c8e1eb5bea1650c11317ac778cfc3778e4/mypy-1.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:51531b6e94f34b8bd8b01dee52bbcee80daeac45e69ec5c36e25bce51cbc46e6", size = 11899906, upload-time = "2025-09-11T22:59:56.473Z" }, + { url = "https://files.pythonhosted.org/packages/30/6b/542daf553f97275677c35d183404d1d83b64cea315f452195c5a5782a225/mypy-1.18.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dbfdea20e90e9c5476cea80cfd264d8e197c6ef2c58483931db2eefb2f7adc14", size = 12504415, upload-time = "2025-09-11T23:00:37.332Z" }, + { url = "https://files.pythonhosted.org/packages/37/d3/061d0d861377ea3fdb03784d11260bfa2adbb4eeeb24b63bd1eea7b6080c/mypy-1.18.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:99f272c9b59f5826fffa439575716276d19cbf9654abc84a2ba2d77090a0ba14", size = 13243466, upload-time = "2025-09-11T22:58:18.562Z" }, + { url = "https://files.pythonhosted.org/packages/7d/5e/6e88a79bdfec8d01ba374c391150c94f6c74545bdc37bdc490a7f30c5095/mypy-1.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8c05a7f8c00300a52f3a4fcc95a185e99bf944d7e851ff141bae8dcf6dcfeac4", size = 13493539, upload-time = "2025-09-11T22:59:24.479Z" }, + { url = "https://files.pythonhosted.org/packages/92/5a/a14a82e44ed76998d73a070723b6584963fdb62f597d373c8b22c3a3da3d/mypy-1.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:2fbcecbe5cf213ba294aa8c0b8c104400bf7bb64db82fb34fe32a205da4b3531", size = 9764809, upload-time = "2025-09-11T22:58:33.133Z" }, + { url = "https://files.pythonhosted.org/packages/e0/1d/4b97d3089b48ef3d904c9ca69fab044475bd03245d878f5f0b3ea1daf7ce/mypy-1.18.1-py3-none-any.whl", hash = "sha256:b76a4de66a0ac01da1be14ecc8ae88ddea33b8380284a9e3eae39d57ebcbe26e", size = 2352212, upload-time = "2025-09-11T22:59:26.576Z" }, ] [[package]] @@ -1345,7 +1345,7 @@ perf = [{ name = "simplejson" }] pip = [{ name = "pip" }] pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] typing = [ - { name = "mypy", specifier = "==1.17.1" }, + { name = "mypy", specifier = "==1.18.1" }, { name = "pip" }, { name = "pyright", specifier = "==1.1.405" }, { name = "typing-extensions" }, @@ -1366,8 +1366,8 @@ wheels = [ [[package]] name = "pymongocrypt" -version = "1.16.0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#63d2591b84a9d4348cbe1c74556e266cd560ac5b" } +version = "1.17.0.dev0" +source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#36a6beafc99efdbe990ece675573ef581d151c92" } dependencies = [ { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*'" }, { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, From a7a645f85fc3d728032f57285231399c8c1d42fd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 17 Sep 2025 06:39:37 -0500 Subject: [PATCH 2064/2111] PYTHON-5555 Fix AWS Lambda build (#2540) --- pymongo/asynchronous/srv_resolver.py | 8 ++++---- pymongo/synchronous/srv_resolver.py | 8 ++++---- test/lambda/build_internal.sh | 2 +- test/lambda/template.yaml | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py index 9d1b8fe141..8d0d40c276 100644 --- a/pymongo/asynchronous/srv_resolver.py +++ b/pymongo/asynchronous/srv_resolver.py @@ -107,7 +107,7 @@ async def get_options(self) -> Optional[str]: # No TXT records return None except Exception as exc: - raise ConfigurationError(str(exc)) from None + raise ConfigurationError(str(exc)) from exc if len(results) > 1: raise ConfigurationError("Only one TXT record is supported") return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] @@ -122,7 +122,7 @@ async def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: # Raise the original error. raise # Else, raise all errors as ConfigurationError. - raise ConfigurationError(str(exc)) from None + raise ConfigurationError(str(exc)) from exc return results async def _get_srv_response_and_hosts( @@ -145,8 +145,8 @@ async def _get_srv_response_and_hosts( ) try: nlist = srv_host.split(".")[1:][-self.__slen :] - except Exception: - raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None + except Exception as exc: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from exc if self.__plist != nlist: raise ConfigurationError(f"Invalid SRV host: {node[0]}") if self.__srv_max_hosts: diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py index 0817c6dcd7..f6e99a3ea8 100644 --- a/pymongo/synchronous/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -107,7 +107,7 @@ def get_options(self) -> Optional[str]: # No TXT records return None except Exception as exc: - raise ConfigurationError(str(exc)) from None + raise ConfigurationError(str(exc)) from exc if len(results) > 1: raise ConfigurationError("Only one TXT record is supported") return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] @@ -122,7 +122,7 @@ def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: # Raise the original error. raise # Else, raise all errors as ConfigurationError. - raise ConfigurationError(str(exc)) from None + raise ConfigurationError(str(exc)) from exc return results def _get_srv_response_and_hosts( @@ -145,8 +145,8 @@ def _get_srv_response_and_hosts( ) try: nlist = srv_host.split(".")[1:][-self.__slen :] - except Exception: - raise ConfigurationError(f"Invalid SRV host: {node[0]}") from None + except Exception as exc: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from exc if self.__plist != nlist: raise ConfigurationError(f"Invalid SRV host: {node[0]}") if self.__srv_max_hosts: diff --git a/test/lambda/build_internal.sh b/test/lambda/build_internal.sh index fec488d32c..84423db4d1 100755 --- a/test/lambda/build_internal.sh +++ b/test/lambda/build_internal.sh @@ -1,5 +1,5 @@ #!/bin/bash -ex cd /src -PYTHON=/opt/python/cp39-cp39/bin/python +PYTHON=/opt/python/cp310-cp310/bin/python $PYTHON -m pip install -v -e . diff --git a/test/lambda/template.yaml b/test/lambda/template.yaml index 651ac4a8f8..11052f88dd 100644 --- a/test/lambda/template.yaml +++ b/test/lambda/template.yaml @@ -23,7 +23,7 @@ Resources: Variables: MONGODB_URI: !Ref MongoDbUri Handler: app.lambda_handler - Runtime: python3.9 + Runtime: python3.10 Architectures: - x86_64 Events: From dba0aa94adfc1ec5581cfb4edd5069b30662f5cc Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 17 Sep 2025 08:15:32 -0500 Subject: [PATCH 2065/2111] PYTHON-5472 Remove driver tests for Atlas Data Lake (#2542) --- .evergreen/generated_configs/variants.yml | 11 --- .evergreen/resync-specs.sh | 3 - .evergreen/scripts/generate_config.py | 8 -- .evergreen/scripts/resync-all-specs.py | 1 - .evergreen/scripts/setup_tests.py | 12 +-- .evergreen/scripts/teardown_tests.py | 4 - .evergreen/scripts/utils.py | 2 - CONTRIBUTING.md | 7 -- pyproject.toml | 1 - test/__init__.py | 34 ------- test/asynchronous/__init__.py | 34 ------- test/asynchronous/test_data_lake.py | 107 ---------------------- test/test_data_lake.py | 107 ---------------------- tools/synchro.py | 1 - 14 files changed, 1 insertion(+), 331 deletions(-) delete mode 100644 test/asynchronous/test_data_lake.py delete mode 100644 test/test_data_lake.py diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 33b8e0ba02..5e769173f9 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -79,17 +79,6 @@ buildvariants: TEST_NAME: atlas_connect tags: [pr] - # Atlas data lake tests - - name: atlas-data-lake-ubuntu-22 - tasks: - - name: .test-no-orchestration - display_name: Atlas Data Lake Ubuntu-22 - run_on: - - ubuntu2204-small - expansions: - TEST_NAME: data_lake - tags: [pr] - # Aws auth tests - name: auth-aws-ubuntu-20 tasks: diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index 765af2a562..d2bd89c781 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -76,9 +76,6 @@ do auth) cpjson auth/tests/ auth ;; - atlas-data-lake-testing|data_lake) - cpjson atlas-data-lake-testing/tests/ data_lake - ;; bson-binary-vector|bson_binary_vector) cpjson bson-binary-vector/tests/ bson_binary_vector ;; diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index f76c0d1e04..a04a64d30a 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -322,14 +322,6 @@ def create_no_c_ext_variants(): return [create_variant(tasks, display_name, host=host)] -def create_atlas_data_lake_variants(): - host = HOSTS["ubuntu22"] - tasks = [".test-no-orchestration"] - expansions = dict(TEST_NAME="data_lake") - display_name = get_variant_name("Atlas Data Lake", host) - return [create_variant(tasks, display_name, tags=["pr"], host=host, expansions=expansions)] - - def create_mod_wsgi_variants(): host = HOSTS["ubuntu22"] tasks = [".mod_wsgi"] diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py index 0817e2fc3b..778a38af88 100644 --- a/.evergreen/scripts/resync-all-specs.py +++ b/.evergreen/scripts/resync-all-specs.py @@ -56,7 +56,6 @@ def check_new_spec_directories(directory: pathlib.Path) -> list[str]: "client_side_operations_timeout": "csot", "mongodb_handshake": "handshake", "load_balancers": "load_balancer", - "atlas_data_lake_testing": "atlas", "connection_monitoring_and_pooling": "connection_monitoring", "command_logging_and_monitoring": "command_logging", "initial_dns_seedlist_discovery": "srv_seedlist", diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 9f383d9425..2ddede127d 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -214,18 +214,8 @@ def handle_test_env() -> None: if key in os.environ: write_env(key, os.environ[key]) - if test_name == "data_lake": - # Stop any running mongo-orchestration which might be using the port. - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh") - run_command(f"bash {DRIVERS_TOOLS}/.evergreen/atlas_data_lake/setup.sh") - AUTH = "auth" - if AUTH != "noauth": - if test_name == "data_lake": - config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/secrets-export.sh") - DB_USER = config["ADL_USERNAME"] - DB_PASSWORD = config["ADL_PASSWORD"] - elif test_name == "auth_oidc": + if test_name == "auth_oidc": DB_USER = config["OIDC_ADMIN_USER"] DB_PASSWORD = config["OIDC_ADMIN_PWD"] elif test_name == "search_index": diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index d89f513f12..7da0b60815 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -57,10 +57,6 @@ teardown_mod_wsgi() -# Tear down data_lake if applicable. -elif TEST_NAME == "data_lake": - run_command(f"{DRIVERS_TOOLS}/.evergreen/atlas_data_lake/teardown.sh") - # Tear down coverage if applicable. if os.environ.get("COVERAGE"): shutil.rmtree(".pytest_cache", ignore_errors=True) diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 323ec2c567..8d1b466b6c 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -33,7 +33,6 @@ class Distro: "atlas_connect": "atlas_connect", "auth_aws": "auth_aws", "auth_oidc": "auth_oidc", - "data_lake": "data_lake", "default": "", "default_async": "default_async", "default_sync": "default", @@ -57,7 +56,6 @@ class Distro: "auth_oidc", "atlas_connect", "aws_lambda", - "data_lake", "mockupdb", "ocsp", ] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ed52727765..16907d4285 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -355,13 +355,6 @@ Note: these tests can only be run from an Evergreen Linux host that has the Pyth The `mode` can be `standalone` or `embedded`. For the `replica_set` version of the tests, use `TOPOLOGY=replica_set just run-server`. -### Atlas Data Lake tests. - -You must have `docker` or `podman` installed locally. - -- Run `just setup-tests data_lake`. -- Run `just run-tests`. - ### OCSP tests - Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. diff --git a/pyproject.toml b/pyproject.toml index fe277d8ed0..ad256a6c6f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -134,7 +134,6 @@ markers = [ "auth: tests that rely on authentication", "ocsp: tests that rely on ocsp", "atlas_connect: tests that rely on an atlas connection", - "data_lake: tests that rely on atlas data lake", "perf: benchmark tests", "search_index: search index helper tests", "kms: client-side field-level encryption tests using kms", diff --git a/test/__init__.py b/test/__init__.py index d583a72f0f..bef672b5f7 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -121,7 +121,6 @@ def __init__(self): self.sessions_enabled = False self.client = None # type: ignore self.conn_lock = threading.Lock() - self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER self._fips_enabled = None if self.load_balancer: @@ -199,16 +198,6 @@ def _init_client(self): self.mongoses = [] self.connection_attempts = [] self.client = self._connect(host, port) - if self.client is not None: - # Return early when connected to dataLake as mongohoused does not - # support the getCmdLineOpts command and is tested without TLS. - if os.environ.get("TEST_DATA_LAKE"): - self.is_data_lake = True - self.auth_enabled = True - self.client.close() - self.client = self._connect(host, port, username=db_user, password=db_pwd) - self.connected = True - return if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? @@ -501,14 +490,6 @@ def require_connection(self, func): func=func, ) - def require_data_lake(self, func): - """Run a test only if we are connected to Atlas Data Lake.""" - return self._require( - lambda: self.is_data_lake, - "Not connected to Atlas Data Lake on self.pair", - func=func, - ) - def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) @@ -1230,21 +1211,6 @@ def teardown(): garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") if garbage: raise AssertionError("\n".join(garbage)) - c = client_context.client - if c: - if not client_context.is_data_lake: - try: - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") - except AutoReconnect: - # PYTHON-4982 - if sys.implementation.name.lower() != "pypy": - raise - c.close() print_running_clients() diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 8ab7ff7219..5a65311808 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -121,7 +121,6 @@ def __init__(self): self.sessions_enabled = False self.client = None # type: ignore self.conn_lock = threading.Lock() - self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER self._fips_enabled = None if self.load_balancer: @@ -199,16 +198,6 @@ async def _init_client(self): self.mongoses = [] self.connection_attempts = [] self.client = await self._connect(host, port) - if self.client is not None: - # Return early when connected to dataLake as mongohoused does not - # support the getCmdLineOpts command and is tested without TLS. - if os.environ.get("TEST_DATA_LAKE"): - self.is_data_lake = True - self.auth_enabled = True - await self.client.close() - self.client = await self._connect(host, port, username=db_user, password=db_pwd) - self.connected = True - return if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? @@ -501,14 +490,6 @@ def require_connection(self, func): func=func, ) - def require_data_lake(self, func): - """Run a test only if we are connected to Atlas Data Lake.""" - return self._require( - lambda: self.is_data_lake, - "Not connected to Atlas Data Lake on self.pair", - func=func, - ) - def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) @@ -1246,21 +1227,6 @@ async def async_teardown(): garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") if garbage: raise AssertionError("\n".join(garbage)) - c = async_client_context.client - if c: - if not async_client_context.is_data_lake: - try: - await c.drop_database("pymongo-pooling-tests") - await c.drop_database("pymongo_test") - await c.drop_database("pymongo_test1") - await c.drop_database("pymongo_test2") - await c.drop_database("pymongo_test_mike") - await c.drop_database("pymongo_test_bernie") - except AutoReconnect: - # PYTHON-4982 - if sys.implementation.name.lower() != "pypy": - raise - await c.close() print_running_clients() diff --git a/test/asynchronous/test_data_lake.py b/test/asynchronous/test_data_lake.py deleted file mode 100644 index 689bf38534..0000000000 --- a/test/asynchronous/test_data_lake.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test Atlas Data Lake.""" -from __future__ import annotations - -import os -import sys -from pathlib import Path - -import pytest - -sys.path[0:0] = [""] - -from test.asynchronous import AsyncIntegrationTest, AsyncUnitTest, async_client_context, unittest -from test.asynchronous.unified_format import generate_test_classes -from test.utils_shared import ( - OvertCommandListener, -) - -from pymongo.asynchronous.helpers import anext - -_IS_SYNC = False - -pytestmark = pytest.mark.data_lake - - -class TestDataLakeMustConnect(AsyncUnitTest): - async def test_connected_to_data_lake(self): - self.assertTrue( - async_client_context.is_data_lake and async_client_context.connected, - "client context must be connected to data lake when DATA_LAKE is set. Failed attempts:\n{}".format( - async_client_context.connection_attempt_info() - ), - ) - - -class TestDataLakeProse(AsyncIntegrationTest): - # Default test database and collection names. - TEST_DB = "test" - TEST_COLLECTION = "driverdata" - - @async_client_context.require_data_lake - async def asyncSetUp(self): - await super().asyncSetUp() - - # Test killCursors - async def test_1(self): - listener = OvertCommandListener() - client = await self.async_rs_or_single_client(event_listeners=[listener]) - cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) - await anext(cursor) - - # find command assertions - find_cmd = listener.succeeded_events[-1] - self.assertEqual(find_cmd.command_name, "find") - cursor_id = find_cmd.reply["cursor"]["id"] - cursor_ns = find_cmd.reply["cursor"]["ns"] - - # killCursors command assertions - await cursor.close() - started = listener.started_events[-1] - self.assertEqual(started.command_name, "killCursors") - succeeded = listener.succeeded_events[-1] - self.assertEqual(succeeded.command_name, "killCursors") - - self.assertIn(cursor_id, started.command["cursors"]) - target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) - self.assertEqual(cursor_ns, target_ns) - - self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) - - # Test no auth - async def test_2(self): - client = await self.async_rs_client_noauth() - await client.admin.command("ping") - - # Test with auth - async def test_3(self): - for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: - client = await self.async_rs_or_single_client(authMechanism=mechanism) - await client[self.TEST_DB][self.TEST_COLLECTION].find_one() - - -# Location of JSON test specifications. -if _IS_SYNC: - TEST_PATH = Path(__file__).parent / "data_lake/unified" -else: - TEST_PATH = Path(__file__).parent.parent / "data_lake/unified" - -# Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_data_lake.py b/test/test_data_lake.py deleted file mode 100644 index d6d2007007..0000000000 --- a/test/test_data_lake.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test Atlas Data Lake.""" -from __future__ import annotations - -import os -import sys -from pathlib import Path - -import pytest - -sys.path[0:0] = [""] - -from test import IntegrationTest, UnitTest, client_context, unittest -from test.unified_format import generate_test_classes -from test.utils_shared import ( - OvertCommandListener, -) - -from pymongo.synchronous.helpers import next - -_IS_SYNC = True - -pytestmark = pytest.mark.data_lake - - -class TestDataLakeMustConnect(UnitTest): - def test_connected_to_data_lake(self): - self.assertTrue( - client_context.is_data_lake and client_context.connected, - "client context must be connected to data lake when DATA_LAKE is set. Failed attempts:\n{}".format( - client_context.connection_attempt_info() - ), - ) - - -class TestDataLakeProse(IntegrationTest): - # Default test database and collection names. - TEST_DB = "test" - TEST_COLLECTION = "driverdata" - - @client_context.require_data_lake - def setUp(self): - super().setUp() - - # Test killCursors - def test_1(self): - listener = OvertCommandListener() - client = self.rs_or_single_client(event_listeners=[listener]) - cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) - next(cursor) - - # find command assertions - find_cmd = listener.succeeded_events[-1] - self.assertEqual(find_cmd.command_name, "find") - cursor_id = find_cmd.reply["cursor"]["id"] - cursor_ns = find_cmd.reply["cursor"]["ns"] - - # killCursors command assertions - cursor.close() - started = listener.started_events[-1] - self.assertEqual(started.command_name, "killCursors") - succeeded = listener.succeeded_events[-1] - self.assertEqual(succeeded.command_name, "killCursors") - - self.assertIn(cursor_id, started.command["cursors"]) - target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) - self.assertEqual(cursor_ns, target_ns) - - self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) - - # Test no auth - def test_2(self): - client = self.rs_client_noauth() - client.admin.command("ping") - - # Test with auth - def test_3(self): - for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: - client = self.rs_or_single_client(authMechanism=mechanism) - client[self.TEST_DB][self.TEST_COLLECTION].find_one() - - -# Location of JSON test specifications. -if _IS_SYNC: - TEST_PATH = Path(__file__).parent / "data_lake/unified" -else: - TEST_PATH = Path(__file__).parent.parent / "data_lake/unified" - -# Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) - - -if __name__ == "__main__": - unittest.main() diff --git a/tools/synchro.py b/tools/synchro.py index e502f96281..a4190529c4 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -230,7 +230,6 @@ def async_only_test(f: str) -> bool: "test_cursor.py", "test_custom_types.py", "test_database.py", - "test_data_lake.py", "test_discovery_and_monitoring.py", "test_dns.py", "test_encryption.py", From 4936fe90bfdc0e43e9bd6376b2622773e98b823e Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 17 Sep 2025 13:05:52 -0500 Subject: [PATCH 2066/2111] PYTHON-5539 Fix installation of pymongocrypt from source (#2541) --- .evergreen/run-tests.sh | 6 +----- .evergreen/scripts/generate_config_utils.py | 2 +- .evergreen/scripts/resync-all-specs.py | 8 +++---- .evergreen/scripts/run_tests.py | 24 +++++++++++++++++++++ .evergreen/scripts/setup_tests.py | 4 +++- pyproject.toml | 4 +--- requirements/test.txt | 1 + uv.lock | 19 ++++++++++------ 8 files changed, 47 insertions(+), 21 deletions(-) diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index a9f2ba2b5c..ec3746b29c 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -25,11 +25,7 @@ else exit 1 fi -# List the packages. -uv sync ${UV_ARGS} --reinstall --quiet -uv pip list - # Start the test runner. -uv run ${UV_ARGS} .evergreen/scripts/run_tests.py "$@" +uv run ${UV_ARGS} --reinstall .evergreen/scripts/run_tests.py "$@" popd diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 632d34ea6f..26fe753e8c 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -273,7 +273,7 @@ def generate_yaml(tasks=None, variants=None): out = ShrubService.generate_yaml(project) # Dedent by two spaces to match what we use in config.yml lines = [line[2:] for line in out.splitlines()] - print("\n".join(lines)) # noqa: T201 + print("\n".join(lines)) ################## diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py index 778a38af88..dc02545824 100644 --- a/.evergreen/scripts/resync-all-specs.py +++ b/.evergreen/scripts/resync-all-specs.py @@ -11,7 +11,7 @@ def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: """Actually sync the specs""" - print("Beginning to sync specs") # noqa: T201 + print("Beginning to sync specs") for spec in os.scandir(directory): if not spec.is_dir(): continue @@ -27,11 +27,11 @@ def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: ) except CalledProcessError as exc: errored[spec.name] = exc.stderr - print("Done syncing specs") # noqa: T201 + print("Done syncing specs") def apply_patches(): - print("Beginning to apply patches") # noqa: T201 + print("Beginning to apply patches") subprocess.run(["bash", "./.evergreen/remove-unimplemented-tests.sh"], check=True) # noqa: S603, S607 subprocess.run( ["git apply -R --allow-empty --whitespace=fix ./.evergreen/spec-patch/*"], # noqa: S607 @@ -95,7 +95,7 @@ def write_summary(errored: dict[str, str], new: list[str], filename: Optional[st pr_body += "\n" if pr_body != "": if filename is None: - print(f"\n{pr_body}") # noqa: T201 + print(f"\n{pr_body}") else: with open(filename, "w") as f: # replacements made for proper json diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 5c1ba25a97..3a1c15a41b 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -10,6 +10,12 @@ from pathlib import Path from shutil import which +try: + import importlib_metadata +except ImportError: + from importlib import metadata as importlib_metadata + + import pytest from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command @@ -23,6 +29,21 @@ SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") +def list_packages(): + packages = dict() + for distribution in importlib_metadata.distributions(): + packages[distribution.name] = distribution + print("Package Version URL") + print("------------------- ----------- ----------------------------------------------------") + for name in sorted(packages): + distribution = packages[name] + url = "" + if distribution.origin is not None: + url = distribution.origin.url + print(f"{name:20s}{distribution.version:12s}{url}") + print("------------------- ----------- ----------------------------------------------------\n") + + def handle_perf(start_time: datetime): end_time = datetime.now() elapsed_secs = (end_time - start_time).total_seconds() @@ -121,6 +142,9 @@ def handle_aws_lambda() -> None: def run() -> None: + # List the installed packages. + list_packages() + # Handle green framework first so they can patch modules. if GREEN_FRAMEWORK: handle_green_framework() diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 2ddede127d..02b8fae45b 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -346,7 +346,9 @@ def handle_test_env() -> None: setup_libmongocrypt() # TODO: Test with 'pip install pymongocrypt' - UV_ARGS.append("--group pymongocrypt_source") + UV_ARGS.append( + "--with pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" + ) # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE = ROOT / "libmongocrypt/nocrypto" diff --git a/pyproject.toml b/pyproject.toml index ad256a6c6f..53fbfc8c1d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,9 +60,6 @@ coverage = [ mockupdb = [ "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" ] -pymongocrypt_source = [ - "pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" -] perf = ["simplejson"] typing = [ "mypy==1.18.1", @@ -238,6 +235,7 @@ dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?)|dummy.*)$" [tool.ruff.lint.per-file-ignores] "pymongo/__init__.py" = ["E402"] +".evergreen/scripts/*.py" = ["T201"] "test/*.py" = ["PT", "E402", "PLW", "SIM", "E741", "PTH", "S", "B904", "E722", "T201", "RET", "ARG", "F405", "B028", "PGH001", "B018", "F403", "RUF015", "E731", "B007", "UP031", "F401", "B023", "F811"] diff --git a/requirements/test.txt b/requirements/test.txt index 135114feff..566cade7ec 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,2 +1,3 @@ pytest>=8.2 pytest-asyncio>=0.24.0 +importlib_metadata>=7.0;python_version < "3.13" diff --git a/uv.lock b/uv.lock index ff7bd6fe24..ce0367e872 100644 --- a/uv.lock +++ b/uv.lock @@ -932,7 +932,7 @@ name = "importlib-metadata" version = "8.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "zipp", marker = "python_full_version < '3.10'" }, + { name = "zipp", marker = "python_full_version != '3.14.*'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } wheels = [ @@ -1261,6 +1261,7 @@ snappy = [ { name = "python-snappy" }, ] test = [ + { name = "importlib-metadata", marker = "python_full_version < '3.13'" }, { name = "pytest" }, { name = "pytest-asyncio" }, ] @@ -1292,9 +1293,6 @@ perf = [ pip = [ { name = "pip" }, ] -pymongocrypt-source = [ - { name = "pymongocrypt" }, -] typing = [ { name = "mypy" }, { name = "pip" }, @@ -1309,6 +1307,7 @@ requires-dist = [ { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, { name = "furo", marker = "extra == 'docs'", specifier = "==2025.7.19" }, + { name = "importlib-metadata", marker = "python_full_version < '3.13' and extra == 'test'", specifier = ">=7.0" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, @@ -1343,7 +1342,6 @@ gevent = [ mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] perf = [{ name = "simplejson" }] pip = [{ name = "pip" }] -pymongocrypt-source = [{ name = "pymongocrypt", git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master" }] typing = [ { name = "mypy", specifier = "==1.18.1" }, { name = "pip" }, @@ -1366,8 +1364,8 @@ wheels = [ [[package]] name = "pymongocrypt" -version = "1.17.0.dev0" -source = { git = "https://github.com/mongodb/libmongocrypt?subdirectory=bindings%2Fpython&rev=master#36a6beafc99efdbe990ece675573ef581d151c92" } +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*'" }, { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, @@ -1375,6 +1373,13 @@ dependencies = [ { name = "httpx" }, { name = "packaging" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/fb/8e/dd9ed710e8fd4eec127dac1db3b3e9156ffcf340a0463a82087a12ae924e/pymongocrypt-1.16.0.tar.gz", hash = "sha256:0db0812055d00e6f5562a8d66711c4cba4b75014c363306c9b298a9fd68fccdd", size = 65354, upload-time = "2025-09-09T18:54:25.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/8b/dda0f19ce16f7b257e4aa2a8831a1a1307c1ea124a00f571cda83a04adcb/pymongocrypt-1.16.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:fbd85534880ea8525956b96e583a7021c721abbf3b51a6dbe48a57d7eba8e74a", size = 4721169, upload-time = "2025-09-09T18:54:18.642Z" }, + { url = "https://files.pythonhosted.org/packages/99/48/512a5b597d71407f9b06a14cd8e5ac376e06b780d4d54a4e69726bd48703/pymongocrypt-1.16.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:85df0a78480e91bdd3a5a6da3e4cdc7d9700de8a871aa8168588981c041f1914", size = 4038242, upload-time = "2025-09-09T18:54:20.496Z" }, + { url = "https://files.pythonhosted.org/packages/3f/67/3bdeda347191d6c1ee257eb3da8c85f1278d86dfb493cc9bc26352a41d0a/pymongocrypt-1.16.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d2ebeb1b5e4f4554bf44f726e8009c59c4d7d0b412beebfece875991714676", size = 3775742, upload-time = "2025-09-09T18:54:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/dc/81/70f6947afbd1ac7be54482b44cb1b99e8e9b9cac41985e6250c4fc279e58/pymongocrypt-1.16.0-py3-none-win_amd64.whl", hash = "sha256:c20afcd89ec5fc53305e924c05c4a0321ddc73f1e4e7c8240ee2fd0123e23609", size = 1607917, upload-time = "2025-09-09T18:54:24.182Z" }, +] [[package]] name = "pyopenssl" From 668bd8232a9f0be50fd33e0c39f32e8df7d3f3b0 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 17 Sep 2025 16:52:00 -0400 Subject: [PATCH 2067/2111] PYTHON-2391 - Ensure retries do not use duplicate command payloads (#2545) --- pymongo/asynchronous/collection.py | 42 +++++++++++++++--------------- pymongo/synchronous/collection.py | 42 +++++++++++++++--------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 741c11e551..064231ccfc 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -2144,11 +2144,9 @@ async def count_documents( if comment is not None: kwargs["comment"] = comment pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) - cmd = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) async def _cmd( session: Optional[AsyncClientSession], @@ -2156,6 +2154,8 @@ async def _cmd( conn: AsyncConnection, read_preference: Optional[_ServerMode], ) -> int: + cmd: dict[str, Any] = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + cmd.update(kwargs) result = await self._aggregate_one_result( conn, read_preference, cmd, collation, session ) @@ -3194,19 +3194,14 @@ async def distinct( """ if not isinstance(key, str): raise TypeError(f"key must be an instance of str, not {type(key)}") - cmd = {"distinct": self._name, "key": key} if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment if hint is not None: if not isinstance(hint, str): hint = helpers_shared._index_document(hint) - cmd["hint"] = hint # type: ignore[assignment] async def _cmd( session: Optional[AsyncClientSession], @@ -3214,6 +3209,12 @@ async def _cmd( conn: AsyncConnection, read_preference: Optional[_ServerMode], ) -> list: # type: ignore[type-arg] + cmd = {"distinct": self._name, "key": key} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + if hint is not None: + cmd["hint"] = hint # type: ignore[assignment] return ( await self._command( conn, @@ -3248,27 +3249,26 @@ async def _find_and_modify( f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd = {"findAndModify": self._name, "query": filter, "new": return_document} - if let is not None: - common.validate_is_mapping("let", let) - cmd["let"] = let - cmd.update(kwargs) - if projection is not None: - cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") - if sort is not None: - cmd["sort"] = helpers_shared._index_document(sort) - if upsert is not None: - validate_boolean("upsert", upsert) - cmd["upsert"] = upsert if hint is not None: if not isinstance(hint, str): hint = helpers_shared._index_document(hint) - - write_concern = self._write_concern_for_cmd(cmd, session) + write_concern = self._write_concern_for_cmd(kwargs, session) async def _find_and_modify_helper( session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool ) -> Any: + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers_shared._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert acknowledged = write_concern.acknowledged if array_filters is not None: if not acknowledged: diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index 9f32deb765..e5cc816cd3 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -2143,11 +2143,9 @@ def count_documents( if comment is not None: kwargs["comment"] = comment pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) - cmd = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} if "hint" in kwargs and not isinstance(kwargs["hint"], str): kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) def _cmd( session: Optional[ClientSession], @@ -2155,6 +2153,8 @@ def _cmd( conn: Connection, read_preference: Optional[_ServerMode], ) -> int: + cmd: dict[str, Any] = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + cmd.update(kwargs) result = self._aggregate_one_result(conn, read_preference, cmd, collation, session) if not result: return 0 @@ -3187,19 +3187,14 @@ def distinct( """ if not isinstance(key, str): raise TypeError(f"key must be an instance of str, not {type(key)}") - cmd = {"distinct": self._name, "key": key} if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment if hint is not None: if not isinstance(hint, str): hint = helpers_shared._index_document(hint) - cmd["hint"] = hint # type: ignore[assignment] def _cmd( session: Optional[ClientSession], @@ -3207,6 +3202,12 @@ def _cmd( conn: Connection, read_preference: Optional[_ServerMode], ) -> list: # type: ignore[type-arg] + cmd = {"distinct": self._name, "key": key} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + if hint is not None: + cmd["hint"] = hint # type: ignore[assignment] return ( self._command( conn, @@ -3241,27 +3242,26 @@ def _find_and_modify( f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" ) collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd = {"findAndModify": self._name, "query": filter, "new": return_document} - if let is not None: - common.validate_is_mapping("let", let) - cmd["let"] = let - cmd.update(kwargs) - if projection is not None: - cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") - if sort is not None: - cmd["sort"] = helpers_shared._index_document(sort) - if upsert is not None: - validate_boolean("upsert", upsert) - cmd["upsert"] = upsert if hint is not None: if not isinstance(hint, str): hint = helpers_shared._index_document(hint) - - write_concern = self._write_concern_for_cmd(cmd, session) + write_concern = self._write_concern_for_cmd(kwargs, session) def _find_and_modify_helper( session: Optional[ClientSession], conn: Connection, retryable_write: bool ) -> Any: + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers_shared._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert acknowledged = write_concern.acknowledged if array_filters is not None: if not acknowledged: From ef59602e39382e2d0fdf03858cf8dca458ef6553 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 18 Sep 2025 20:08:22 -0500 Subject: [PATCH 2068/2111] PYTHON-5491 Update test for dropIndex behavior change (#2546) --- test/asynchronous/test_collection.py | 8 +++++--- test/test_collection.py | 8 +++++--- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 90a0518532..379ec9e8c8 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -40,6 +40,7 @@ OvertCommandListener, async_wait_until, ) +from test.version import Version from bson import encode from bson.codec_options import CodecOptions @@ -335,8 +336,6 @@ async def test_create_index(self): await db.test.create_index(["hello", ("world", DESCENDING)]) await db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] - # TODO: PYTHON-5491 - remove version max - @async_client_context.require_version_max(8, 0, -1) async def test_drop_index(self): db = self.db await db.test.drop_indexes() @@ -348,7 +347,10 @@ async def test_drop_index(self): await db.test.drop_index(name) # Drop it again. - with self.assertRaises(OperationFailure): + if async_client_context.version < Version(8, 3, -1): + with self.assertRaises(OperationFailure): + await db.test.drop_index(name) + else: await db.test.drop_index(name) self.assertEqual(len(await db.test.index_information()), 2) self.assertIn("hello_1", await db.test.index_information()) diff --git a/test/test_collection.py b/test/test_collection.py index b1947259ba..1bd3a80c5f 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -40,6 +40,7 @@ OvertCommandListener, wait_until, ) +from test.version import Version from bson import encode from bson.codec_options import CodecOptions @@ -333,8 +334,6 @@ def test_create_index(self): db.test.create_index(["hello", ("world", DESCENDING)]) db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] - # TODO: PYTHON-5491 - remove version max - @client_context.require_version_max(8, 0, -1) def test_drop_index(self): db = self.db db.test.drop_indexes() @@ -346,7 +345,10 @@ def test_drop_index(self): db.test.drop_index(name) # Drop it again. - with self.assertRaises(OperationFailure): + if client_context.version < Version(8, 3, -1): + with self.assertRaises(OperationFailure): + db.test.drop_index(name) + else: db.test.drop_index(name) self.assertEqual(len(db.test.index_information()), 2) self.assertIn("hello_1", db.test.index_information()) From 9603a85f214e6365659bd1d607dc7be8443c8b80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Sep 2025 13:05:14 -0500 Subject: [PATCH 2069/2111] Bump the actions group with 2 updates (#2550) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- .github/workflows/create-release-branch.yml | 6 +++--- .github/workflows/release-python.yml | 14 ++++++-------- .github/workflows/zizmor.yml | 2 +- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.github/workflows/create-release-branch.yml b/.github/workflows/create-release-branch.yml index 72345d4a44..95a5e65c88 100644 --- a/.github/workflows/create-release-branch.yml +++ b/.github/workflows/create-release-branch.yml @@ -33,11 +33,11 @@ jobs: outputs: version: ${{ steps.pre-publish.outputs.version }} steps: - - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 with: app_id: ${{ vars.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} - - uses: mongodb-labs/drivers-github-tools/setup@v2 + - uses: mongodb-labs/drivers-github-tools/setup@v3 with: aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} aws_region_name: ${{ vars.AWS_REGION_NAME }} @@ -45,7 +45,7 @@ jobs: artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} - name: Get hatch run: pip install hatch - - uses: mongodb-labs/drivers-github-tools/create-branch@v2 + - uses: mongodb-labs/drivers-github-tools/create-branch@v3 id: create-branch with: branch_name: ${{ inputs.branch_name }} diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index a30afbccd5..6abca9e528 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -38,17 +38,16 @@ jobs: outputs: version: ${{ steps.pre-publish.outputs.version }} steps: - - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 with: app_id: ${{ vars.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} - - uses: mongodb-labs/drivers-github-tools/setup@v2 + - uses: mongodb-labs/drivers-github-tools/setup@v3 with: aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} aws_region_name: ${{ vars.AWS_REGION_NAME }} aws_secret_id: ${{ secrets.AWS_SECRET_ID }} - artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} - - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v2 + - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v3 id: pre-publish with: dry_run: ${{ env.DRY_RUN }} @@ -100,17 +99,16 @@ jobs: attestations: write security-events: write steps: - - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 with: app_id: ${{ vars.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} - - uses: mongodb-labs/drivers-github-tools/setup@v2 + - uses: mongodb-labs/drivers-github-tools/setup@v3 with: aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} aws_region_name: ${{ vars.AWS_REGION_NAME }} aws_secret_id: ${{ secrets.AWS_SECRET_ID }} - artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} - - uses: mongodb-labs/drivers-github-tools/python/post-publish@v2 + - uses: mongodb-labs/drivers-github-tools/python/post-publish@v3 with: following_version: ${{ env.FOLLOWING_VERSION }} product_name: ${{ env.PRODUCT_NAME }} diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index 31d8c1eef3..a3eb5d5508 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@873539476a7f9b0da7504d0d9e9a6a5275094d98 + uses: zizmorcore/zizmor-action@0696496a48b64e0568faa46ddaf5f6fe48b83b04 From 6fe85436ae62de6e352b87c2526c796e05471559 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 22 Sep 2025 17:15:02 -0500 Subject: [PATCH 2070/2111] PYTHON-3414 Improve error message when using incompatible dependencies (#2549) --- pymongo/asynchronous/encryption.py | 9 ++- pymongo/asynchronous/srv_resolver.py | 12 +++- pymongo/common.py | 89 ++++++++++++++++++++++++++++ pymongo/encryption_options.py | 17 +++++- pymongo/synchronous/encryption.py | 9 ++- pymongo/synchronous/srv_resolver.py | 12 +++- test/version.py | 64 +------------------- 7 files changed, 144 insertions(+), 68 deletions(-) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py index d32a5b3204..4dfd36aa49 100644 --- a/pymongo/asynchronous/encryption.py +++ b/pymongo/asynchronous/encryption.py @@ -66,7 +66,12 @@ from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts, TextOpts +from pymongo.encryption_options import ( + AutoEncryptionOpts, + RangeOpts, + TextOpts, + check_min_pymongocrypt, +) from pymongo.errors import ( ConfigurationError, EncryptedCollectionError, @@ -675,6 +680,8 @@ def __init__( "python -m pip install --upgrade 'pymongo[encryption]'" ) + check_min_pymongocrypt() + if not isinstance(codec_options, CodecOptions): raise TypeError( f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py index 8d0d40c276..006abbb616 100644 --- a/pymongo/asynchronous/srv_resolver.py +++ b/pymongo/asynchronous/srv_resolver.py @@ -19,7 +19,7 @@ import random from typing import TYPE_CHECKING, Any, Optional, Union -from pymongo.common import CONNECT_TIMEOUT +from pymongo.common import CONNECT_TIMEOUT, check_for_min_version from pymongo.errors import ConfigurationError if TYPE_CHECKING: @@ -32,6 +32,14 @@ def _have_dnspython() -> bool: try: import dns # noqa: F401 + dns_version, required_version, is_valid = check_for_min_version("dnspython") + if not is_valid: + raise RuntimeError( + f"pymongo requires dnspython>={required_version}, " + f"found version {dns_version}. " + "Install a compatible version with pip" + ) + return True except ImportError: return False @@ -80,6 +88,8 @@ def __init__( srv_service_name: str, srv_max_hosts: int = 0, ): + # Ensure the version of dnspython is compatible. + _have_dnspython() self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT diff --git a/pymongo/common.py b/pymongo/common.py index 5210e72189..e23adac426 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -20,6 +20,7 @@ import warnings from collections import OrderedDict, abc from difflib import get_close_matches +from importlib.metadata import requires, version from typing import ( TYPE_CHECKING, Any, @@ -1092,3 +1093,91 @@ def has_c() -> bool: return True except ImportError: return False + + +class Version(tuple[int, ...]): + """A class that can be used to compare version strings.""" + + def __new__(cls, *version: int) -> Version: + padded_version = cls._padded(version, 4) + return super().__new__(cls, tuple(padded_version)) + + @classmethod + def _padded(cls, iter: Any, length: int, padding: int = 0) -> list[int]: + as_list = list(iter) + if len(as_list) < length: + for _ in range(length - len(as_list)): + as_list.append(padding) + return as_list + + @classmethod + def from_string(cls, version_string: str) -> Version: + mod = 0 + bump_patch_level = False + if version_string.endswith("+"): + version_string = version_string[0:-1] + mod = 1 + elif version_string.endswith("-pre-"): + version_string = version_string[0:-5] + mod = -1 + elif version_string.endswith("-"): + version_string = version_string[0:-1] + mod = -1 + # Deal with .devX substrings + if ".dev" in version_string: + version_string = version_string[0 : version_string.find(".dev")] + mod = -1 + # Deal with '-rcX' substrings + if "-rc" in version_string: + version_string = version_string[0 : version_string.find("-rc")] + mod = -1 + # Deal with git describe generated substrings + elif "-" in version_string: + version_string = version_string[0 : version_string.find("-")] + mod = -1 + bump_patch_level = True + + version = [int(part) for part in version_string.split(".")] + version = cls._padded(version, 3) + # Make from_string and from_version_array agree. For example: + # MongoDB Enterprise > db.runCommand('buildInfo').versionArray + # [ 3, 2, 1, -100 ] + # MongoDB Enterprise > db.runCommand('buildInfo').version + # 3.2.0-97-g1ef94fe + if bump_patch_level: + version[-1] += 1 + version.append(mod) + + return Version(*version) + + @classmethod + def from_version_array(cls, version_array: Any) -> Version: + version = list(version_array) + if version[-1] < 0: + version[-1] = -1 + version = cls._padded(version, 3) + return Version(*version) + + def at_least(self, *other_version: Any) -> bool: + return self >= Version(*other_version) + + def __str__(self) -> str: + return ".".join(map(str, self)) + + +def check_for_min_version(package_name: str) -> tuple[str, str, bool]: + """Test whether an installed package is of the desired version.""" + package_version_str = version(package_name) + package_version = Version.from_string(package_version_str) + # Dependency is expected to be in one of the forms: + # "pymongocrypt<2.0.0,>=1.13.0; extra == 'encryption'" + # 'dnspython<3.0.0,>=1.16.0' + # + requirements = requires("pymongo") + assert requirements is not None + requirement = [i for i in requirements if i.startswith(package_name)][0] # noqa: RUF015 + if ";" in requirement: + requirement = requirement.split(";")[0] + required_version = requirement[requirement.find(">=") + 2 :] + is_valid = package_version >= Version.from_string(required_version) + return package_version_str, required_version, is_valid diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index da34a3be52..b2037617b0 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -23,7 +23,7 @@ from pymongo.uri_parser_shared import _parse_kms_tls_options try: - import pymongocrypt # type:ignore[import-untyped] # noqa: F401 + import pymongocrypt # type:ignore[import-untyped] # noqa: F401 # Check for pymongocrypt>=1.10. from pymongocrypt import synchronous as _ # noqa: F401 @@ -32,7 +32,7 @@ except ImportError: _HAVE_PYMONGOCRYPT = False from bson import int64 -from pymongo.common import validate_is_mapping +from pymongo.common import check_for_min_version, validate_is_mapping from pymongo.errors import ConfigurationError if TYPE_CHECKING: @@ -40,6 +40,18 @@ from pymongo.typings import _AgnosticMongoClient +def check_min_pymongocrypt() -> None: + """Raise an appropriate error if the min pymongocrypt is not installed.""" + pymongocrypt_version, required_version, is_valid = check_for_min_version("pymongocrypt") + if not is_valid: + raise ConfigurationError( + f"client side encryption requires pymongocrypt>={required_version}, " + f"found version {pymongocrypt_version}. " + "Install a compatible version with: " + "python -m pip install 'pymongo[encryption]'" + ) + + class AutoEncryptionOpts: """Options to configure automatic client-side field level encryption.""" @@ -215,6 +227,7 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) + check_min_pymongocrypt() if encrypted_fields_map: validate_is_mapping("encrypted_fields_map", encrypted_fields_map) self._encrypted_fields_map = encrypted_fields_map diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py index f9d51a9eab..2d666b9763 100644 --- a/pymongo/synchronous/encryption.py +++ b/pymongo/synchronous/encryption.py @@ -61,7 +61,12 @@ from pymongo import _csot from pymongo.common import CONNECT_TIMEOUT from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts, TextOpts +from pymongo.encryption_options import ( + AutoEncryptionOpts, + RangeOpts, + TextOpts, + check_min_pymongocrypt, +) from pymongo.errors import ( ConfigurationError, EncryptedCollectionError, @@ -672,6 +677,8 @@ def __init__( "python -m pip install --upgrade 'pymongo[encryption]'" ) + check_min_pymongocrypt() + if not isinstance(codec_options, CodecOptions): raise TypeError( f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py index f6e99a3ea8..8e492061ae 100644 --- a/pymongo/synchronous/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -19,7 +19,7 @@ import random from typing import TYPE_CHECKING, Any, Optional, Union -from pymongo.common import CONNECT_TIMEOUT +from pymongo.common import CONNECT_TIMEOUT, check_for_min_version from pymongo.errors import ConfigurationError if TYPE_CHECKING: @@ -32,6 +32,14 @@ def _have_dnspython() -> bool: try: import dns # noqa: F401 + dns_version, required_version, is_valid = check_for_min_version("dnspython") + if not is_valid: + raise RuntimeError( + f"pymongo requires dnspython>={required_version}, " + f"found version {dns_version}. " + "Install a compatible version with pip" + ) + return True except ImportError: return False @@ -80,6 +88,8 @@ def __init__( srv_service_name: str, srv_max_hosts: int = 0, ): + # Ensure the version of dnspython is compatible. + _have_dnspython() self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT diff --git a/test/version.py b/test/version.py index 42d53cfcf4..ae6ecb331f 100644 --- a/test/version.py +++ b/test/version.py @@ -15,64 +15,10 @@ """Some tools for running tests based on MongoDB server version.""" from __future__ import annotations +from pymongo.common import Version as BaseVersion -class Version(tuple): - def __new__(cls, *version): - padded_version = cls._padded(version, 4) - return super().__new__(cls, tuple(padded_version)) - - @classmethod - def _padded(cls, iter, length, padding=0): - l = list(iter) - if len(l) < length: - for _ in range(length - len(l)): - l.append(padding) - return l - - @classmethod - def from_string(cls, version_string): - mod = 0 - bump_patch_level = False - if version_string.endswith("+"): - version_string = version_string[0:-1] - mod = 1 - elif version_string.endswith("-pre-"): - version_string = version_string[0:-5] - mod = -1 - elif version_string.endswith("-"): - version_string = version_string[0:-1] - mod = -1 - # Deal with '-rcX' substrings - if "-rc" in version_string: - version_string = version_string[0 : version_string.find("-rc")] - mod = -1 - # Deal with git describe generated substrings - elif "-" in version_string: - version_string = version_string[0 : version_string.find("-")] - mod = -1 - bump_patch_level = True - - version = [int(part) for part in version_string.split(".")] - version = cls._padded(version, 3) - # Make from_string and from_version_array agree. For example: - # MongoDB Enterprise > db.runCommand('buildInfo').versionArray - # [ 3, 2, 1, -100 ] - # MongoDB Enterprise > db.runCommand('buildInfo').version - # 3.2.0-97-g1ef94fe - if bump_patch_level: - version[-1] += 1 - version.append(mod) - - return Version(*version) - - @classmethod - def from_version_array(cls, version_array): - version = list(version_array) - if version[-1] < 0: - version[-1] = -1 - version = cls._padded(version, 3) - return Version(*version) +class Version(BaseVersion): @classmethod def from_client(cls, client): info = client.server_info() @@ -86,9 +32,3 @@ async def async_from_client(cls, client): if "versionArray" in info: return cls.from_version_array(info["versionArray"]) return cls.from_string(info["version"]) - - def at_least(self, *other_version): - return self >= Version(*other_version) - - def __str__(self): - return ".".join(map(str, self)) From 266caf02c4edb295eeafd1d7fcba0c8a52d2e611 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 23 Sep 2025 14:31:35 -0400 Subject: [PATCH 2071/2111] PYTHON-5449 - Do not attach invalid document in exception message (#2539) --- bson/__init__.py | 2 +- bson/_cbsonmodule.c | 26 +++++++++++--------------- bson/errors.py | 13 +++++++++++++ doc/changelog.rst | 9 +++++++++ test/test_bson.py | 15 +++++++++++---- 5 files changed, 45 insertions(+), 20 deletions(-) diff --git a/bson/__init__.py b/bson/__init__.py index e677d8cfdd..d260fb876f 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1009,7 +1009,7 @@ def _dict_to_bson( try: elements.append(_element_to_bson(key, value, check_keys, opts)) except InvalidDocument as err: - raise InvalidDocument(f"Invalid document {doc} | {err}") from err + raise InvalidDocument(f"Invalid document: {err}", doc) from err except AttributeError: raise TypeError(f"encoder expected a mapping type but got: {doc!r}") from None diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index be91e41734..bee7198567 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1645,11 +1645,11 @@ static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { } -/* Update Invalid Document error message to include doc. +/* Update Invalid Document error to include doc as a property. */ void handle_invalid_doc_error(PyObject* dict) { PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; - PyObject *msg = NULL, *dict_str = NULL, *new_msg = NULL; + PyObject *msg = NULL, *new_msg = NULL, *new_evalue = NULL; PyErr_Fetch(&etype, &evalue, &etrace); PyObject *InvalidDocument = _error("InvalidDocument"); if (InvalidDocument == NULL) { @@ -1659,26 +1659,22 @@ void handle_invalid_doc_error(PyObject* dict) { if (evalue && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { PyObject *msg = PyObject_Str(evalue); if (msg) { - // Prepend doc to the existing message - PyObject *dict_str = PyObject_Str(dict); - if (dict_str == NULL) { - goto cleanup; - } - const char * dict_str_utf8 = PyUnicode_AsUTF8(dict_str); - if (dict_str_utf8 == NULL) { - goto cleanup; - } const char * msg_utf8 = PyUnicode_AsUTF8(msg); if (msg_utf8 == NULL) { goto cleanup; } - PyObject *new_msg = PyUnicode_FromFormat("Invalid document %s | %s", dict_str_utf8, msg_utf8); + PyObject *new_msg = PyUnicode_FromFormat("Invalid document: %s", msg_utf8); + if (new_msg == NULL) { + goto cleanup; + } + // Add doc to the error instance as a property. + PyObject *new_evalue = PyObject_CallFunctionObjArgs(InvalidDocument, new_msg, dict, NULL); Py_DECREF(evalue); Py_DECREF(etype); etype = InvalidDocument; InvalidDocument = NULL; - if (new_msg) { - evalue = new_msg; + if (new_evalue) { + evalue = new_evalue; } else { evalue = msg; } @@ -1689,7 +1685,7 @@ void handle_invalid_doc_error(PyObject* dict) { PyErr_Restore(etype, evalue, etrace); Py_XDECREF(msg); Py_XDECREF(InvalidDocument); - Py_XDECREF(dict_str); + Py_XDECREF(new_evalue); Py_XDECREF(new_msg); } diff --git a/bson/errors.py b/bson/errors.py index a3699e704c..ffc117f7ac 100644 --- a/bson/errors.py +++ b/bson/errors.py @@ -15,6 +15,8 @@ """Exceptions raised by the BSON package.""" from __future__ import annotations +from typing import Any, Optional + class BSONError(Exception): """Base class for all BSON exceptions.""" @@ -31,6 +33,17 @@ class InvalidStringData(BSONError): class InvalidDocument(BSONError): """Raised when trying to create a BSON object from an invalid document.""" + def __init__(self, message: str, document: Optional[Any] = None) -> None: + super().__init__(message) + self._document = document + + @property + def document(self) -> Any: + """The invalid document that caused the error. + + ..versionadded:: 4.16""" + return self._document + class InvalidId(BSONError): """Raised when trying to create an ObjectId from invalid data.""" diff --git a/doc/changelog.rst b/doc/changelog.rst index 082c22fafc..7270043d41 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,6 +1,15 @@ Changelog ========= +Changes in Version 4.16.0 (XXXX/XX/XX) +-------------------------------------- + +PyMongo 4.16 brings a number of changes including: + +- Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as + doing so may leak sensitive user data. + Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. + Changes in Version 4.15.1 (2025/09/16) -------------------------------------- diff --git a/test/test_bson.py b/test/test_bson.py index e4cf85c46c..f792db1e89 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1163,7 +1163,7 @@ def __repr__(self): ): encode({"t": Wrapper(1)}) - def test_doc_in_invalid_document_error_message(self): + def test_doc_in_invalid_document_error_as_property(self): class Wrapper: def __init__(self, val): self.val = val @@ -1173,10 +1173,11 @@ def __repr__(self): self.assertEqual("1", repr(Wrapper(1))) doc = {"t": Wrapper(1)} - with self.assertRaisesRegex(InvalidDocument, f"Invalid document {doc}"): + with self.assertRaisesRegex(InvalidDocument, "Invalid document:") as cm: encode(doc) + self.assertEqual(cm.exception.document, doc) - def test_doc_in_invalid_document_error_message_mapping(self): + def test_doc_in_invalid_document_error_as_property_mapping(self): class MyMapping(abc.Mapping): def keys(self): return ["t"] @@ -1192,6 +1193,11 @@ def __len__(self): def __iter__(self): return iter(["t"]) + def __eq__(self, other): + if isinstance(other, MyMapping): + return True + return False + class Wrapper: def __init__(self, val): self.val = val @@ -1201,8 +1207,9 @@ def __repr__(self): self.assertEqual("1", repr(Wrapper(1))) doc = MyMapping() - with self.assertRaisesRegex(InvalidDocument, f"Invalid document {doc}"): + with self.assertRaisesRegex(InvalidDocument, "Invalid document:") as cm: encode(doc) + self.assertEqual(cm.exception.document, doc) class TestCodecOptions(unittest.TestCase): From 29c4c2cc0ff772ac3a5ef9907f4974b2cef50eca Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 23 Sep 2025 14:08:13 -0500 Subject: [PATCH 2072/2111] PYTHON-5570 Do not freeze the lockfile (#2555) --- .evergreen/scripts/setup-dev-env.sh | 4 ++-- .pre-commit-config.yaml | 17 +++++++++++------ justfile | 4 +--- uv.lock | 2 +- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 6e6b5965bd..38824f32b7 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -47,13 +47,13 @@ if [ -f $HOME/.visualStudioEnv.sh ]; then SSH_TTY=1 source $HOME/.visualStudioEnv.sh set -u fi -uv sync --frozen +uv sync echo "Setting up python environment... done." # Ensure there is a pre-commit hook if there is a git checkout. if [ -d .git ] && [ ! -f .git/hooks/pre-commit ]; then - uv run --frozen pre-commit install + uv run pre-commit install fi popd > /dev/null diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ac129f95f0..d2b9d9a17a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -105,12 +105,6 @@ repos: # - test/test_client.py:188: te ==> the, be, we, to args: ["-L", "fle,fo,infinit,isnt,nin,te,aks"] -- repo: https://github.com/astral-sh/uv-pre-commit - # uv version. - rev: 0.8.17 - hooks: - - id: uv-lock - - repo: local hooks: - id: executable-shell @@ -128,3 +122,14 @@ repos: language: python require_serial: true additional_dependencies: ["shrub.py>=3.10.0", "pyyaml>=6.0.2"] + + - id: uv-lock + name: uv-lock + entry: uv lock + language: python + require_serial: true + files: ^(uv\.lock|pyproject\.toml|requirements.txt|requirements/.*\.txt)$ + pass_filenames: false + fail_fast: true + additional_dependencies: + - "uv>=0.8.4" diff --git a/justfile b/justfile index c129b2c199..4645a4a47d 100644 --- a/justfile +++ b/justfile @@ -1,7 +1,5 @@ # See https://just.systems/man/en/ for instructions set shell := ["bash", "-c"] -# Do not modify the lock file when running justfile commands. -export UV_FROZEN := "1" # Commonly used command segments. typing_run := "uv run --group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" @@ -16,7 +14,7 @@ default: [private] resync: - @uv sync --quiet --frozen + @uv sync --quiet install: bash .evergreen/scripts/setup-dev-env.sh diff --git a/uv.lock b/uv.lock index ce0367e872..5a7279e1ef 100644 --- a/uv.lock +++ b/uv.lock @@ -1309,7 +1309,7 @@ requires-dist = [ { name = "furo", marker = "extra == 'docs'", specifier = "==2025.7.19" }, { name = "importlib-metadata", marker = "python_full_version < '3.13' and extra == 'test'", specifier = ">=7.0" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, - { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.1,<2.0.0" }, { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, { name = "pymongocrypt", marker = "extra == 'encryption'", specifier = ">=1.13.0,<2.0.0" }, { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, From 51f7b408f36beb9038098366f30774ed6dd00fa3 Mon Sep 17 00:00:00 2001 From: Jib Date: Wed, 24 Sep 2025 10:27:45 -0400 Subject: [PATCH 2073/2111] PYTHON-5572: Add team members to contributors.rst (#2554) --- doc/contributors.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/contributors.rst b/doc/contributors.rst index 4a7f5424b1..08296e9595 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -103,3 +103,7 @@ The following is a list of people who have contributed to - Terry Patterson - Romain Morotti - Navjot Singh (navjots18) +- Jib Adegunloye (Jibola) +- Jeffrey A. Clark (aclark4life) +- Steven Silvester (blink1073) +- Noah Stapp (NoahStapp) From 0049dc8896a152eae57e42d614560818a56a18a5 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 24 Sep 2025 13:23:28 -0400 Subject: [PATCH 2074/2111] PYTHON-2390 - Retryable reads use the same implicit session (#2544) --- pymongo/asynchronous/aggregation.py | 3 -- pymongo/asynchronous/change_stream.py | 9 ++--- pymongo/asynchronous/client_bulk.py | 3 +- pymongo/asynchronous/client_session.py | 6 +++- pymongo/asynchronous/collection.py | 13 ++----- pymongo/asynchronous/command_cursor.py | 20 +++++------ pymongo/asynchronous/cursor.py | 18 +++++----- pymongo/asynchronous/database.py | 13 +++---- pymongo/asynchronous/mongo_client.py | 36 +++++++++---------- pymongo/synchronous/aggregation.py | 3 -- pymongo/synchronous/change_stream.py | 9 ++--- pymongo/synchronous/client_bulk.py | 3 +- pymongo/synchronous/client_session.py | 6 +++- pymongo/synchronous/collection.py | 13 ++----- pymongo/synchronous/command_cursor.py | 20 +++++------ pymongo/synchronous/cursor.py | 18 +++++----- pymongo/synchronous/database.py | 13 +++---- pymongo/synchronous/mongo_client.py | 36 +++++++++---------- test/asynchronous/test_retryable_reads.py | 43 +++++++++++++++++++++++ test/test_retryable_reads.py | 43 +++++++++++++++++++++++ 20 files changed, 198 insertions(+), 130 deletions(-) diff --git a/pymongo/asynchronous/aggregation.py b/pymongo/asynchronous/aggregation.py index 059d698772..6ca60ad9c3 100644 --- a/pymongo/asynchronous/aggregation.py +++ b/pymongo/asynchronous/aggregation.py @@ -50,7 +50,6 @@ def __init__( cursor_class: type[AsyncCommandCursor[Any]], pipeline: _Pipeline, options: MutableMapping[str, Any], - explicit_session: bool, let: Optional[Mapping[str, Any]] = None, user_fields: Optional[MutableMapping[str, Any]] = None, result_processor: Optional[Callable[[Mapping[str, Any], AsyncConnection], None]] = None, @@ -92,7 +91,6 @@ def __init__( self._options["cursor"]["batchSize"] = self._batch_size self._cursor_class = cursor_class - self._explicit_session = explicit_session self._user_fields = user_fields self._result_processor = result_processor @@ -197,7 +195,6 @@ async def get_cursor( batch_size=self._batch_size or 0, max_await_time_ms=self._max_await_time_ms, session=session, - explicit_session=self._explicit_session, comment=self._options.get("comment"), ) await cmd_cursor._maybe_pin_connection(conn) diff --git a/pymongo/asynchronous/change_stream.py b/pymongo/asynchronous/change_stream.py index 3940111df2..b2b78b0660 100644 --- a/pymongo/asynchronous/change_stream.py +++ b/pymongo/asynchronous/change_stream.py @@ -236,7 +236,7 @@ def _process_result(self, result: Mapping[str, Any], conn: AsyncConnection) -> N ) async def _run_aggregation_cmd( - self, session: Optional[AsyncClientSession], explicit_session: bool + self, session: Optional[AsyncClientSession] ) -> AsyncCommandCursor: # type: ignore[type-arg] """Run the full aggregation pipeline for this AsyncChangeStream and return the corresponding AsyncCommandCursor. @@ -246,7 +246,6 @@ async def _run_aggregation_cmd( AsyncCommandCursor, self._aggregation_pipeline(), self._command_options(), - explicit_session, result_processor=self._process_result, comment=self._comment, ) @@ -258,10 +257,8 @@ async def _run_aggregation_cmd( ) async def _create_cursor(self) -> AsyncCommandCursor: # type: ignore[type-arg] - async with self._client._tmp_session(self._session, close=False) as s: - return await self._run_aggregation_cmd( - session=s, explicit_session=self._session is not None - ) + async with self._client._tmp_session(self._session) as s: + return await self._run_aggregation_cmd(session=s) async def _resume(self) -> None: """Reestablish this change stream after a resumable error.""" diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py index 45812b3400..151942c8a8 100644 --- a/pymongo/asynchronous/client_bulk.py +++ b/pymongo/asynchronous/client_bulk.py @@ -440,6 +440,8 @@ async def _process_results_cursor( ) -> None: """Internal helper for processing the server reply command cursor.""" if result.get("cursor"): + if session: + session._leave_alive = True coll = AsyncCollection( database=AsyncDatabase(self.client, "admin"), name="$cmd.bulkWrite", @@ -449,7 +451,6 @@ async def _process_results_cursor( result["cursor"], conn.address, session=session, - explicit_session=session is not None, comment=self.comment, ) await cmd_cursor._maybe_pin_connection(conn) diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index be02295cea..8674e98447 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -513,6 +513,10 @@ def __init__( # Is this an implicitly created session? self._implicit = implicit self._transaction = _Transaction(None, client) + # Is this session attached to a cursor? + self._attached_to_cursor = False + # Should we leave the session alive when the cursor is closed? + self._leave_alive = False async def end_session(self) -> None: """Finish this session. If a transaction has started, abort it. @@ -535,7 +539,7 @@ async def _end_session(self, lock: bool) -> None: def _end_implicit_session(self) -> None: # Implicit sessions can't be part of transactions or pinned connections - if self._server_session is not None: + if not self._leave_alive and self._server_session is not None: self._client._return_server_session(self._server_session) self._server_session = None diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 064231ccfc..6af1f4f782 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -2549,7 +2549,6 @@ async def _list_indexes( self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), ) read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - explicit_session = session is not None async def _cmd( session: Optional[AsyncClientSession], @@ -2576,13 +2575,12 @@ async def _cmd( cursor, conn.address, session=session, - explicit_session=explicit_session, comment=cmd.get("comment"), ) await cmd_cursor._maybe_pin_connection(conn) return cmd_cursor - async with self._database.client._tmp_session(session, False) as s: + async with self._database.client._tmp_session(session) as s: return await self._database.client._retryable_read( _cmd, read_pref, s, operation=_Op.LIST_INDEXES ) @@ -2678,7 +2676,6 @@ async def list_search_indexes( AsyncCommandCursor, pipeline, kwargs, - explicit_session=session is not None, comment=comment, user_fields={"cursor": {"firstBatch": 1}}, ) @@ -2900,7 +2897,6 @@ async def _aggregate( pipeline: _Pipeline, cursor_class: Type[AsyncCommandCursor], # type: ignore[type-arg] session: Optional[AsyncClientSession], - explicit_session: bool, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -2912,7 +2908,6 @@ async def _aggregate( cursor_class, pipeline, kwargs, - explicit_session, let, user_fields={"cursor": {"firstBatch": 1}}, ) @@ -3018,13 +3013,12 @@ async def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - async with self._database.client._tmp_session(session, close=False) as s: + async with self._database.client._tmp_session(session) as s: return await self._aggregate( _CollectionAggregationCommand, pipeline, AsyncCommandCursor, session=s, - explicit_session=session is not None, let=let, comment=comment, **kwargs, @@ -3065,7 +3059,7 @@ async def aggregate_raw_batches( raise InvalidOperation("aggregate_raw_batches does not support auto encryption") if comment is not None: kwargs["comment"] = comment - async with self._database.client._tmp_session(session, close=False) as s: + async with self._database.client._tmp_session(session) as s: return cast( AsyncRawBatchCursor[_DocumentType], await self._aggregate( @@ -3073,7 +3067,6 @@ async def aggregate_raw_batches( pipeline, AsyncRawBatchCommandCursor, session=s, - explicit_session=session is not None, **kwargs, ), ) diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py index db7c2b6638..e18b3a330e 100644 --- a/pymongo/asynchronous/command_cursor.py +++ b/pymongo/asynchronous/command_cursor.py @@ -64,7 +64,6 @@ def __init__( batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional[AsyncClientSession] = None, - explicit_session: bool = False, comment: Any = None, ) -> None: """Create a new command cursor.""" @@ -80,7 +79,8 @@ def __init__( self._max_await_time_ms = max_await_time_ms self._timeout = self._collection.database.client.options.timeout self._session = session - self._explicit_session = explicit_session + if self._session is not None: + self._session._attached_to_cursor = True self._killed = self._id == 0 self._comment = comment if self._killed: @@ -197,7 +197,7 @@ def session(self) -> Optional[AsyncClientSession]: .. versionadded:: 3.6 """ - if self._explicit_session: + if self._session and not self._session._implicit: return self._session return None @@ -218,9 +218,10 @@ def _die_no_lock(self) -> None: """Closes this cursor without acquiring a lock.""" cursor_id, address = self._prepare_to_die() self._collection.database.client._cleanup_cursor_no_lock( - cursor_id, address, self._sock_mgr, self._session, self._explicit_session + cursor_id, address, self._sock_mgr, self._session ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None @@ -232,14 +233,15 @@ async def _die_lock(self) -> None: address, self._sock_mgr, self._session, - self._explicit_session, ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None def _end_session(self) -> None: - if self._session and not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session._end_implicit_session() self._session = None @@ -430,7 +432,6 @@ def __init__( batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional[AsyncClientSession] = None, - explicit_session: bool = False, comment: Any = None, ) -> None: """Create a new cursor / iterator over raw batches of BSON data. @@ -449,7 +450,6 @@ def __init__( batch_size, max_await_time_ms, session, - explicit_session, comment, ) diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index d9fdd576f4..df060a4fa9 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -138,10 +138,9 @@ def __init__( if session: self._session = session - self._explicit_session = True + self._session._attached_to_cursor = True else: self._session = None - self._explicit_session = False spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) @@ -150,7 +149,7 @@ def __init__( if not isinstance(limit, int): raise TypeError(f"limit must be an instance of int, not {type(limit)}") validate_boolean("no_cursor_timeout", no_cursor_timeout) - if no_cursor_timeout and not self._explicit_session: + if no_cursor_timeout and self._session and self._session._implicit: warnings.warn( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " @@ -283,7 +282,7 @@ def clone(self) -> AsyncCursor[_DocumentType]: def _clone(self, deepcopy: bool = True, base: Optional[AsyncCursor] = None) -> AsyncCursor: # type: ignore[type-arg] """Internal clone helper.""" if not base: - if self._explicit_session: + if self._session and not self._session._implicit: base = self._clone_base(self._session) else: base = self._clone_base(None) @@ -945,7 +944,7 @@ def session(self) -> Optional[AsyncClientSession]: .. versionadded:: 3.6 """ - if self._explicit_session: + if self._session and not self._session._implicit: return self._session return None @@ -1034,9 +1033,10 @@ def _die_no_lock(self) -> None: cursor_id, address = self._prepare_to_die(already_killed) self._collection.database.client._cleanup_cursor_no_lock( - cursor_id, address, self._sock_mgr, self._session, self._explicit_session + cursor_id, address, self._sock_mgr, self._session ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None @@ -1054,9 +1054,9 @@ async def _die_lock(self) -> None: address, self._sock_mgr, self._session, - self._explicit_session, ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py index f70c2b403f..8e0afc9dc9 100644 --- a/pymongo/asynchronous/database.py +++ b/pymongo/asynchronous/database.py @@ -611,6 +611,8 @@ async def create_collection( common.validate_is_mapping("clusteredIndex", clustered_index) async with self._client._tmp_session(session) as s: + if s and not s.in_transaction: + s._leave_alive = True # Skip this check in a transaction where listCollections is not # supported. if ( @@ -619,6 +621,8 @@ async def create_collection( and name in await self._list_collection_names(filter={"name": name}, session=s) ): raise CollectionInvalid("collection %s already exists" % name) + if s: + s._leave_alive = False coll = AsyncCollection( self, name, @@ -699,13 +703,12 @@ async def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - async with self.client._tmp_session(session, close=False) as s: + async with self.client._tmp_session(session) as s: cmd = _DatabaseAggregationCommand( self, AsyncCommandCursor, pipeline, kwargs, - session is not None, user_fields={"cursor": {"firstBatch": 1}}, ) return await self.client._retryable_read( @@ -1011,7 +1014,7 @@ async def cursor_command( else: command_name = next(iter(command)) - async with self._client._tmp_session(session, close=False) as tmp_session: + async with self._client._tmp_session(session) as tmp_session: opts = codec_options or DEFAULT_CODEC_OPTIONS if read_preference is None: @@ -1043,7 +1046,6 @@ async def cursor_command( conn.address, max_await_time_ms=max_await_time_ms, session=tmp_session, - explicit_session=session is not None, comment=comment, ) await cmd_cursor._maybe_pin_connection(conn) @@ -1089,7 +1091,7 @@ async def _list_collections( ) cmd = {"listCollections": 1, "cursor": {}} cmd.update(kwargs) - async with self._client._tmp_session(session, close=False) as tmp_session: + async with self._client._tmp_session(session) as tmp_session: cursor = ( await self._command(conn, cmd, read_preference=read_preference, session=tmp_session) )["cursor"] @@ -1098,7 +1100,6 @@ async def _list_collections( cursor, conn.address, session=tmp_session, - explicit_session=session is not None, comment=cmd.get("comment"), ) await cmd_cursor._maybe_pin_connection(conn) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index b616647791..d9bf808d55 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -2048,17 +2048,18 @@ async def _retryable_read( retryable = bool( retryable and self.options.retry_reads and not (session and session.in_transaction) ) - return await self._retry_internal( - func, - session, - None, - operation, - is_read=True, - address=address, - read_pref=read_pref, - retryable=retryable, - operation_id=operation_id, - ) + async with self._tmp_session(session) as s: + return await self._retry_internal( + func, + s, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) async def _retryable_write( self, @@ -2091,7 +2092,6 @@ def _cleanup_cursor_no_lock( address: Optional[_CursorAddress], conn_mgr: _ConnectionManager, session: Optional[AsyncClientSession], - explicit_session: bool, ) -> None: """Cleanup a cursor from __del__ without locking. @@ -2106,7 +2106,7 @@ def _cleanup_cursor_no_lock( # The cursor will be closed later in a different session. if cursor_id or conn_mgr: self._close_cursor_soon(cursor_id, address, conn_mgr) - if session and not explicit_session: + if session and session._implicit and not session._leave_alive: session._end_implicit_session() async def _cleanup_cursor_lock( @@ -2115,7 +2115,6 @@ async def _cleanup_cursor_lock( address: Optional[_CursorAddress], conn_mgr: _ConnectionManager, session: Optional[AsyncClientSession], - explicit_session: bool, ) -> None: """Cleanup a cursor from cursor.close() using a lock. @@ -2127,7 +2126,6 @@ async def _cleanup_cursor_lock( :param address: The _CursorAddress. :param conn_mgr: The _ConnectionManager for the pinned connection or None. :param session: The cursor's session. - :param explicit_session: True if the session was passed explicitly. """ if cursor_id: if conn_mgr and conn_mgr.more_to_come: @@ -2140,7 +2138,7 @@ async def _cleanup_cursor_lock( await self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) if conn_mgr: await conn_mgr.close() - if session and not explicit_session: + if session and session._implicit and not session._leave_alive: session._end_implicit_session() async def _close_cursor_now( @@ -2221,7 +2219,7 @@ async def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: - await self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) + await self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None) except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it @@ -2266,7 +2264,7 @@ def _return_server_session( @contextlib.asynccontextmanager async def _tmp_session( - self, session: Optional[client_session.AsyncClientSession], close: bool = True + self, session: Optional[client_session.AsyncClientSession] ) -> AsyncGenerator[Optional[client_session.AsyncClientSession], None]: """If provided session is None, lend a temporary session.""" if session is not None: @@ -2291,7 +2289,7 @@ async def _tmp_session( raise finally: # Call end_session when we exit this scope. - if close: + if not s._attached_to_cursor: await s.end_session() else: yield None diff --git a/pymongo/synchronous/aggregation.py b/pymongo/synchronous/aggregation.py index 9845f28b08..486768ab7d 100644 --- a/pymongo/synchronous/aggregation.py +++ b/pymongo/synchronous/aggregation.py @@ -50,7 +50,6 @@ def __init__( cursor_class: type[CommandCursor[Any]], pipeline: _Pipeline, options: MutableMapping[str, Any], - explicit_session: bool, let: Optional[Mapping[str, Any]] = None, user_fields: Optional[MutableMapping[str, Any]] = None, result_processor: Optional[Callable[[Mapping[str, Any], Connection], None]] = None, @@ -92,7 +91,6 @@ def __init__( self._options["cursor"]["batchSize"] = self._batch_size self._cursor_class = cursor_class - self._explicit_session = explicit_session self._user_fields = user_fields self._result_processor = result_processor @@ -197,7 +195,6 @@ def get_cursor( batch_size=self._batch_size or 0, max_await_time_ms=self._max_await_time_ms, session=session, - explicit_session=self._explicit_session, comment=self._options.get("comment"), ) cmd_cursor._maybe_pin_connection(conn) diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py index f5f6352186..7e34d7b848 100644 --- a/pymongo/synchronous/change_stream.py +++ b/pymongo/synchronous/change_stream.py @@ -235,9 +235,7 @@ def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: f"response : {result!r}" ) - def _run_aggregation_cmd( - self, session: Optional[ClientSession], explicit_session: bool - ) -> CommandCursor: # type: ignore[type-arg] + def _run_aggregation_cmd(self, session: Optional[ClientSession]) -> CommandCursor: # type: ignore[type-arg] """Run the full aggregation pipeline for this ChangeStream and return the corresponding CommandCursor. """ @@ -246,7 +244,6 @@ def _run_aggregation_cmd( CommandCursor, self._aggregation_pipeline(), self._command_options(), - explicit_session, result_processor=self._process_result, comment=self._comment, ) @@ -258,8 +255,8 @@ def _run_aggregation_cmd( ) def _create_cursor(self) -> CommandCursor: # type: ignore[type-arg] - with self._client._tmp_session(self._session, close=False) as s: - return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) + with self._client._tmp_session(self._session) as s: + return self._run_aggregation_cmd(session=s) def _resume(self) -> None: """Reestablish this change stream after a resumable error.""" diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py index 1076ceba99..a606d028e1 100644 --- a/pymongo/synchronous/client_bulk.py +++ b/pymongo/synchronous/client_bulk.py @@ -438,6 +438,8 @@ def _process_results_cursor( ) -> None: """Internal helper for processing the server reply command cursor.""" if result.get("cursor"): + if session: + session._leave_alive = True coll = Collection( database=Database(self.client, "admin"), name="$cmd.bulkWrite", @@ -447,7 +449,6 @@ def _process_results_cursor( result["cursor"], conn.address, session=session, - explicit_session=session is not None, comment=self.comment, ) cmd_cursor._maybe_pin_connection(conn) diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py index 72a5b8e885..9b547dc946 100644 --- a/pymongo/synchronous/client_session.py +++ b/pymongo/synchronous/client_session.py @@ -512,6 +512,10 @@ def __init__( # Is this an implicitly created session? self._implicit = implicit self._transaction = _Transaction(None, client) + # Is this session attached to a cursor? + self._attached_to_cursor = False + # Should we leave the session alive when the cursor is closed? + self._leave_alive = False def end_session(self) -> None: """Finish this session. If a transaction has started, abort it. @@ -534,7 +538,7 @@ def _end_session(self, lock: bool) -> None: def _end_implicit_session(self) -> None: # Implicit sessions can't be part of transactions or pinned connections - if self._server_session is not None: + if not self._leave_alive and self._server_session is not None: self._client._return_server_session(self._server_session) self._server_session = None diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index e5cc816cd3..b68e4befed 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -2546,7 +2546,6 @@ def _list_indexes( self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), ) read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - explicit_session = session is not None def _cmd( session: Optional[ClientSession], @@ -2573,13 +2572,12 @@ def _cmd( cursor, conn.address, session=session, - explicit_session=explicit_session, comment=cmd.get("comment"), ) cmd_cursor._maybe_pin_connection(conn) return cmd_cursor - with self._database.client._tmp_session(session, False) as s: + with self._database.client._tmp_session(session) as s: return self._database.client._retryable_read( _cmd, read_pref, s, operation=_Op.LIST_INDEXES ) @@ -2675,7 +2673,6 @@ def list_search_indexes( CommandCursor, pipeline, kwargs, - explicit_session=session is not None, comment=comment, user_fields={"cursor": {"firstBatch": 1}}, ) @@ -2893,7 +2890,6 @@ def _aggregate( pipeline: _Pipeline, cursor_class: Type[CommandCursor], # type: ignore[type-arg] session: Optional[ClientSession], - explicit_session: bool, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None, **kwargs: Any, @@ -2905,7 +2901,6 @@ def _aggregate( cursor_class, pipeline, kwargs, - explicit_session, let, user_fields={"cursor": {"firstBatch": 1}}, ) @@ -3011,13 +3006,12 @@ def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - with self._database.client._tmp_session(session, close=False) as s: + with self._database.client._tmp_session(session) as s: return self._aggregate( _CollectionAggregationCommand, pipeline, CommandCursor, session=s, - explicit_session=session is not None, let=let, comment=comment, **kwargs, @@ -3058,7 +3052,7 @@ def aggregate_raw_batches( raise InvalidOperation("aggregate_raw_batches does not support auto encryption") if comment is not None: kwargs["comment"] = comment - with self._database.client._tmp_session(session, close=False) as s: + with self._database.client._tmp_session(session) as s: return cast( RawBatchCursor[_DocumentType], self._aggregate( @@ -3066,7 +3060,6 @@ def aggregate_raw_batches( pipeline, RawBatchCommandCursor, session=s, - explicit_session=session is not None, **kwargs, ), ) diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py index bcdeed5f94..a09a67efc9 100644 --- a/pymongo/synchronous/command_cursor.py +++ b/pymongo/synchronous/command_cursor.py @@ -64,7 +64,6 @@ def __init__( batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional[ClientSession] = None, - explicit_session: bool = False, comment: Any = None, ) -> None: """Create a new command cursor.""" @@ -80,7 +79,8 @@ def __init__( self._max_await_time_ms = max_await_time_ms self._timeout = self._collection.database.client.options.timeout self._session = session - self._explicit_session = explicit_session + if self._session is not None: + self._session._attached_to_cursor = True self._killed = self._id == 0 self._comment = comment if self._killed: @@ -197,7 +197,7 @@ def session(self) -> Optional[ClientSession]: .. versionadded:: 3.6 """ - if self._explicit_session: + if self._session and not self._session._implicit: return self._session return None @@ -218,9 +218,10 @@ def _die_no_lock(self) -> None: """Closes this cursor without acquiring a lock.""" cursor_id, address = self._prepare_to_die() self._collection.database.client._cleanup_cursor_no_lock( - cursor_id, address, self._sock_mgr, self._session, self._explicit_session + cursor_id, address, self._sock_mgr, self._session ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None @@ -232,14 +233,15 @@ def _die_lock(self) -> None: address, self._sock_mgr, self._session, - self._explicit_session, ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None def _end_session(self) -> None: - if self._session and not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session._end_implicit_session() self._session = None @@ -430,7 +432,6 @@ def __init__( batch_size: int = 0, max_await_time_ms: Optional[int] = None, session: Optional[ClientSession] = None, - explicit_session: bool = False, comment: Any = None, ) -> None: """Create a new cursor / iterator over raw batches of BSON data. @@ -449,7 +450,6 @@ def __init__( batch_size, max_await_time_ms, session, - explicit_session, comment, ) diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 3dd550f4d5..2cecc5b38a 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -138,10 +138,9 @@ def __init__( if session: self._session = session - self._explicit_session = True + self._session._attached_to_cursor = True else: self._session = None - self._explicit_session = False spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) @@ -150,7 +149,7 @@ def __init__( if not isinstance(limit, int): raise TypeError(f"limit must be an instance of int, not {type(limit)}") validate_boolean("no_cursor_timeout", no_cursor_timeout) - if no_cursor_timeout and not self._explicit_session: + if no_cursor_timeout and self._session and self._session._implicit: warnings.warn( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " @@ -283,7 +282,7 @@ def clone(self) -> Cursor[_DocumentType]: def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: # type: ignore[type-arg] """Internal clone helper.""" if not base: - if self._explicit_session: + if self._session and not self._session._implicit: base = self._clone_base(self._session) else: base = self._clone_base(None) @@ -943,7 +942,7 @@ def session(self) -> Optional[ClientSession]: .. versionadded:: 3.6 """ - if self._explicit_session: + if self._session and not self._session._implicit: return self._session return None @@ -1032,9 +1031,10 @@ def _die_no_lock(self) -> None: cursor_id, address = self._prepare_to_die(already_killed) self._collection.database.client._cleanup_cursor_no_lock( - cursor_id, address, self._sock_mgr, self._session, self._explicit_session + cursor_id, address, self._sock_mgr, self._session ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None @@ -1052,9 +1052,9 @@ def _die_lock(self) -> None: address, self._sock_mgr, self._session, - self._explicit_session, ) - if not self._explicit_session: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False self._session = None self._sock_mgr = None diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py index e30f97817c..0d129ba972 100644 --- a/pymongo/synchronous/database.py +++ b/pymongo/synchronous/database.py @@ -611,6 +611,8 @@ def create_collection( common.validate_is_mapping("clusteredIndex", clustered_index) with self._client._tmp_session(session) as s: + if s and not s.in_transaction: + s._leave_alive = True # Skip this check in a transaction where listCollections is not # supported. if ( @@ -619,6 +621,8 @@ def create_collection( and name in self._list_collection_names(filter={"name": name}, session=s) ): raise CollectionInvalid("collection %s already exists" % name) + if s: + s._leave_alive = False coll = Collection( self, name, @@ -699,13 +703,12 @@ def aggregate( .. _aggregate command: https://mongodb.com/docs/manual/reference/command/aggregate """ - with self.client._tmp_session(session, close=False) as s: + with self.client._tmp_session(session) as s: cmd = _DatabaseAggregationCommand( self, CommandCursor, pipeline, kwargs, - session is not None, user_fields={"cursor": {"firstBatch": 1}}, ) return self.client._retryable_read( @@ -1009,7 +1012,7 @@ def cursor_command( else: command_name = next(iter(command)) - with self._client._tmp_session(session, close=False) as tmp_session: + with self._client._tmp_session(session) as tmp_session: opts = codec_options or DEFAULT_CODEC_OPTIONS if read_preference is None: @@ -1039,7 +1042,6 @@ def cursor_command( conn.address, max_await_time_ms=max_await_time_ms, session=tmp_session, - explicit_session=session is not None, comment=comment, ) cmd_cursor._maybe_pin_connection(conn) @@ -1085,7 +1087,7 @@ def _list_collections( ) cmd = {"listCollections": 1, "cursor": {}} cmd.update(kwargs) - with self._client._tmp_session(session, close=False) as tmp_session: + with self._client._tmp_session(session) as tmp_session: cursor = ( self._command(conn, cmd, read_preference=read_preference, session=tmp_session) )["cursor"] @@ -1094,7 +1096,6 @@ def _list_collections( cursor, conn.address, session=tmp_session, - explicit_session=session is not None, comment=cmd.get("comment"), ) cmd_cursor._maybe_pin_connection(conn) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index ef0663584c..6e716402f4 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -2044,17 +2044,18 @@ def _retryable_read( retryable = bool( retryable and self.options.retry_reads and not (session and session.in_transaction) ) - return self._retry_internal( - func, - session, - None, - operation, - is_read=True, - address=address, - read_pref=read_pref, - retryable=retryable, - operation_id=operation_id, - ) + with self._tmp_session(session) as s: + return self._retry_internal( + func, + s, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) def _retryable_write( self, @@ -2087,7 +2088,6 @@ def _cleanup_cursor_no_lock( address: Optional[_CursorAddress], conn_mgr: _ConnectionManager, session: Optional[ClientSession], - explicit_session: bool, ) -> None: """Cleanup a cursor from __del__ without locking. @@ -2102,7 +2102,7 @@ def _cleanup_cursor_no_lock( # The cursor will be closed later in a different session. if cursor_id or conn_mgr: self._close_cursor_soon(cursor_id, address, conn_mgr) - if session and not explicit_session: + if session and session._implicit and not session._leave_alive: session._end_implicit_session() def _cleanup_cursor_lock( @@ -2111,7 +2111,6 @@ def _cleanup_cursor_lock( address: Optional[_CursorAddress], conn_mgr: _ConnectionManager, session: Optional[ClientSession], - explicit_session: bool, ) -> None: """Cleanup a cursor from cursor.close() using a lock. @@ -2123,7 +2122,6 @@ def _cleanup_cursor_lock( :param address: The _CursorAddress. :param conn_mgr: The _ConnectionManager for the pinned connection or None. :param session: The cursor's session. - :param explicit_session: True if the session was passed explicitly. """ if cursor_id: if conn_mgr and conn_mgr.more_to_come: @@ -2136,7 +2134,7 @@ def _cleanup_cursor_lock( self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) if conn_mgr: conn_mgr.close() - if session and not explicit_session: + if session and session._implicit and not session._leave_alive: session._end_implicit_session() def _close_cursor_now( @@ -2217,7 +2215,7 @@ def _process_kill_cursors(self) -> None: for address, cursor_id, conn_mgr in pinned_cursors: try: - self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None, False) + self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None) except Exception as exc: if isinstance(exc, InvalidOperation) and self._topology._closed: # Raise the exception when client is closed so that it @@ -2262,7 +2260,7 @@ def _return_server_session( @contextlib.contextmanager def _tmp_session( - self, session: Optional[client_session.ClientSession], close: bool = True + self, session: Optional[client_session.ClientSession] ) -> Generator[Optional[client_session.ClientSession], None]: """If provided session is None, lend a temporary session.""" if session is not None: @@ -2287,7 +2285,7 @@ def _tmp_session( raise finally: # Call end_session when we exit this scope. - if close: + if not s._attached_to_cursor: s.end_session() else: yield None diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py index 26454b3823..47ac91b0f5 100644 --- a/test/asynchronous/test_retryable_reads.py +++ b/test/asynchronous/test_retryable_reads.py @@ -218,6 +218,49 @@ async def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are # Assert that both events occurred on the same mongos. assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_the_same_implicit_session(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + await client.t.t.insert_one({"x": 1}) + + commands = [ + ("aggregate", lambda: client.t.t.count_documents({})), + ("aggregate", lambda: client.t.t.aggregate([{"$match": {}}])), + ("count", lambda: client.t.t.estimated_document_count()), + ("distinct", lambda: client.t.t.distinct("x")), + ("find", lambda: client.t.t.find_one({})), + ("listDatabases", lambda: client.list_databases()), + ("listCollections", lambda: client.t.list_collections()), + ("listIndexes", lambda: client.t.t.list_indexes()), + ] + + for command_name, operation in commands: + listener.reset() + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": [command_name], "errorCode": 6}, + } + + async with self.fail_point(fail_command): + await operation() + + # Assert that both events occurred on the same session. + command_docs = [ + event.command + for event in listener.started_events + if event.command_name == command_name + ] + self.assertEqual(len(command_docs), 2) + self.assertEqual(command_docs[0]["lsid"], command_docs[1]["lsid"]) + self.assertIsNot(command_docs[0], command_docs[1]) + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index fb8a374dac..c9f72ae547 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -216,6 +216,49 @@ def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_avail # Assert that both events occurred on the same mongos. assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_the_same_implicit_session(self): + listener = OvertCommandListener() + client = self.rs_or_single_client( + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + client.t.t.insert_one({"x": 1}) + + commands = [ + ("aggregate", lambda: client.t.t.count_documents({})), + ("aggregate", lambda: client.t.t.aggregate([{"$match": {}}])), + ("count", lambda: client.t.t.estimated_document_count()), + ("distinct", lambda: client.t.t.distinct("x")), + ("find", lambda: client.t.t.find_one({})), + ("listDatabases", lambda: client.list_databases()), + ("listCollections", lambda: client.t.list_collections()), + ("listIndexes", lambda: client.t.t.list_indexes()), + ] + + for command_name, operation in commands: + listener.reset() + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": [command_name], "errorCode": 6}, + } + + with self.fail_point(fail_command): + operation() + + # Assert that both events occurred on the same session. + command_docs = [ + event.command + for event in listener.started_events + if event.command_name == command_name + ] + self.assertEqual(len(command_docs), 2) + self.assertEqual(command_docs[0]["lsid"], command_docs[1]["lsid"]) + self.assertIsNot(command_docs[0], command_docs[1]) + if __name__ == "__main__": unittest.main() From 9e64ed1bd84e2ab5c9cde6cd8ee2b6f432d320cf Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 24 Sep 2025 14:07:51 -0400 Subject: [PATCH 2075/2111] PYTHON-4755 - Stop supporting and testing against Eventlet (#2557) --- .evergreen/generated_configs/variants.yml | 8 -------- .evergreen/scripts/generate_config.py | 6 +----- .evergreen/scripts/run_tests.py | 8 +------- .evergreen/scripts/utils.py | 2 +- doc/changelog.rst | 2 ++ pymongo/asynchronous/pool.py | 2 +- pymongo/pool_shared.py | 12 +++++------- pymongo/synchronous/pool.py | 2 +- pyproject.toml | 4 ---- test/asynchronous/test_encryption.py | 2 +- test/test_encryption.py | 2 +- test/test_fork.py | 2 +- test/utils_shared.py | 9 +-------- uv.lock | 19 +------------------ 14 files changed, 17 insertions(+), 63 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 5e769173f9..dbfec82f93 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -319,14 +319,6 @@ buildvariants: tags: [] # Green framework tests - - name: green-eventlet-rhel8 - tasks: - - name: .test-standard .python-3.9 .sync - display_name: Green Eventlet RHEL8 - run_on: - - rhel87-small - expansions: - GREEN_FRAMEWORK: eventlet - name: green-gevent-rhel8 tasks: - name: .test-standard .sync diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index a04a64d30a..7b17b127f4 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -300,12 +300,8 @@ def create_stable_api_variants(): def create_green_framework_variants(): variants = [] host = DEFAULT_HOST - for framework in ["eventlet", "gevent"]: + for framework in ["gevent"]: tasks = [".test-standard .sync"] - if framework == "eventlet": - # Eventlet has issues with dnspython > 2.0 and newer versions of CPython - # https://jira.mongodb.org/browse/PYTHON-5284 - tasks = [".test-standard .python-3.9 .sync"] expansions = dict(GREEN_FRAMEWORK=framework) display_name = get_variant_name(f"Green {framework.capitalize()}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 3a1c15a41b..fd4fd13e5b 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -67,13 +67,7 @@ def handle_perf(start_time: datetime): def handle_green_framework() -> None: - if GREEN_FRAMEWORK == "eventlet": - import eventlet - - # https://github.com/eventlet/eventlet/issues/401 - eventlet.sleep() - eventlet.monkey_patch() - elif GREEN_FRAMEWORK == "gevent": + if GREEN_FRAMEWORK == "gevent": from gevent import monkey monkey.patch_all() diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 8d1b466b6c..50894ff634 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -104,7 +104,7 @@ def get_test_options( parser.add_argument( "--green-framework", nargs=1, - choices=["eventlet", "gevent"], + choices=["gevent"], help="Optional green framework to test against.", ) parser.add_argument( diff --git a/doc/changelog.rst b/doc/changelog.rst index 7270043d41..6dcb80497b 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,8 @@ PyMongo 4.16 brings a number of changes including: - Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as doing so may leak sensitive user data. Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. +- Removed support for Eventlet. + Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. Changes in Version 4.15.1 (2025/09/16) -------------------------------------- diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py index 196ec9040f..f521091e3c 100644 --- a/pymongo/asynchronous/pool.py +++ b/pymongo/asynchronous/pool.py @@ -628,7 +628,7 @@ async def _raise_connection_failure(self, error: BaseException) -> NoReturn: # signals and throws KeyboardInterrupt into the current frame on the # main thread. # - # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, + # But in Gevent, the polling mechanism (epoll, kqueue, # ..) is called in Python code, which experiences the signal as a # KeyboardInterrupt from the start, rather than as an initial # socket.error, so we catch that, close the socket, and reraise it. diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py index ac562af542..8db26ccead 100644 --- a/pymongo/pool_shared.py +++ b/pymongo/pool_shared.py @@ -138,13 +138,11 @@ def _raise_connection_failure( msg = msg_prefix + msg if "configured timeouts" not in msg: msg += format_timeout_details(timeout_details) - if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) from error - elif isinstance(error, SSLErrors) and "timed out" in str(error): - # Eventlet does not distinguish TLS network timeouts from other - # SSLErrors (https://github.com/eventlet/eventlet/issues/692). - # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised. + if ( + isinstance(error, socket.timeout) + or isinstance(error, SSLErrors) + and "timed out" in str(error) + ): raise NetworkTimeout(msg) from error else: raise AutoReconnect(msg) from error diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py index f7f6a26c68..66258fda18 100644 --- a/pymongo/synchronous/pool.py +++ b/pymongo/synchronous/pool.py @@ -626,7 +626,7 @@ def _raise_connection_failure(self, error: BaseException) -> NoReturn: # signals and throws KeyboardInterrupt into the current frame on the # main thread. # - # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, + # But in Gevent, the polling mechanism (epoll, kqueue, # ..) is called in Python code, which experiences the signal as a # KeyboardInterrupt from the start, rather than as an initial # socket.error, so we catch that, close the socket, and reraise it. diff --git a/pyproject.toml b/pyproject.toml index 53fbfc8c1d..2130c61a96 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,6 @@ dev = [ pip = ["pip"] # TODO: PYTHON-5464 gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] -eventlet = ["eventlet"] coverage = [ "pytest-cov", "coverage>=5,<=7.10.6" @@ -113,15 +112,12 @@ filterwarnings = [ "module:.*WindowsSelectorEventLoopPolicy:DeprecationWarning", "module:.*et_event_loop_policy:DeprecationWarning", # TODO: Remove as part of PYTHON-3923. - "module:unclosed Date: Wed, 24 Sep 2025 11:42:14 -0700 Subject: [PATCH 2076/2111] PYTHON-5563: Change most tasks to run daily instead of weekly (#2556) --- .evergreen/generated_configs/variants.yml | 48 ++++++++++----------- .evergreen/scripts/generate_config.py | 21 +++++---- .evergreen/scripts/generate_config_utils.py | 1 + 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index dbfec82f93..82eef47ff3 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -6,7 +6,7 @@ buildvariants: display_name: OpenSSL 1.0.2 RHEL7 v5.0 Python3.9 run_on: - rhel79-small - batchtime: 10080 + batchtime: 1440 expansions: VERSION: "5.0" PYTHON_VERSION: "3.9" @@ -17,7 +17,7 @@ buildvariants: display_name: Other hosts RHEL9-FIPS latest run_on: - rhel92-fips - batchtime: 10080 + batchtime: 1440 expansions: VERSION: latest NO_EXT: "1" @@ -29,7 +29,7 @@ buildvariants: display_name: Other hosts RHEL8-zseries latest run_on: - rhel8-zseries-small - batchtime: 10080 + batchtime: 1440 expansions: VERSION: latest NO_EXT: "1" @@ -40,7 +40,7 @@ buildvariants: display_name: Other hosts RHEL8-POWER8 latest run_on: - rhel8-power-small - batchtime: 10080 + batchtime: 1440 expansions: VERSION: latest NO_EXT: "1" @@ -51,7 +51,7 @@ buildvariants: display_name: Other hosts RHEL8-arm64 latest run_on: - rhel82-arm64-small - batchtime: 10080 + batchtime: 1440 expansions: VERSION: latest NO_EXT: "1" @@ -62,7 +62,7 @@ buildvariants: display_name: Other hosts Amazon2023 latest run_on: - amazon2023-arm64-latest-large-m8g - batchtime: 10080 + batchtime: 1440 expansions: VERSION: latest NO_EXT: "1" @@ -182,7 +182,7 @@ buildvariants: display_name: Encryption RHEL8 run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption tags: [encryption_tag] @@ -192,7 +192,7 @@ buildvariants: display_name: Encryption macOS run_on: - macos-14 - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption tags: [encryption_tag] @@ -202,7 +202,7 @@ buildvariants: display_name: Encryption Win64 run_on: - windows-64-vsMulti-small - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption tags: [encryption_tag] @@ -212,7 +212,7 @@ buildvariants: display_name: Encryption crypt_shared RHEL8 run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" @@ -223,7 +223,7 @@ buildvariants: display_name: Encryption crypt_shared macOS run_on: - macos-14 - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" @@ -234,7 +234,7 @@ buildvariants: display_name: Encryption crypt_shared Win64 run_on: - windows-64-vsMulti-small - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption TEST_CRYPT_SHARED: "true" @@ -245,7 +245,7 @@ buildvariants: display_name: Encryption PyOpenSSL RHEL8 run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: encryption SUB_TEST_NAME: pyopenssl @@ -340,10 +340,10 @@ buildvariants: - name: kms tasks: - name: test-gcpkms - batchtime: 10080 + batchtime: 1440 - name: test-gcpkms-fail - name: test-azurekms - batchtime: 10080 + batchtime: 1440 - name: test-azurekms-fail display_name: KMS run_on: @@ -360,7 +360,7 @@ buildvariants: display_name: Load Balancer run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: TEST_NAME: load_balancer @@ -434,14 +434,14 @@ buildvariants: display_name: Auth OIDC Ubuntu-22 run_on: - ubuntu2204-small - batchtime: 10080 + batchtime: 1440 - name: auth-oidc-local-ubuntu-22 tasks: - name: "!.auth_oidc_remote .auth_oidc" display_name: Auth OIDC Local Ubuntu-22 run_on: - ubuntu2204-small - batchtime: 10080 + batchtime: 1440 tags: [pr] - name: auth-oidc-macos tasks: @@ -449,14 +449,14 @@ buildvariants: display_name: Auth OIDC macOS run_on: - macos-14 - batchtime: 10080 + batchtime: 1440 - name: auth-oidc-win64 tasks: - name: "!.auth_oidc_remote .auth_oidc" display_name: Auth OIDC Win64 run_on: - windows-64-vsMulti-small - batchtime: 10080 + batchtime: 1440 # Perf tests - name: performance-benchmarks @@ -465,7 +465,7 @@ buildvariants: display_name: Performance Benchmarks run_on: - rhel90-dbx-perf-large - batchtime: 10080 + batchtime: 1440 # Pyopenssl tests - name: pyopenssl-rhel8 @@ -475,7 +475,7 @@ buildvariants: display_name: PyOpenSSL RHEL8 run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: SUB_TEST_NAME: pyopenssl - name: pyopenssl-macos @@ -485,7 +485,7 @@ buildvariants: display_name: PyOpenSSL macOS run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: SUB_TEST_NAME: pyopenssl - name: pyopenssl-win64 @@ -495,7 +495,7 @@ buildvariants: display_name: PyOpenSSL Win64 run_on: - rhel87-small - batchtime: 10080 + batchtime: 1440 expansions: SUB_TEST_NAME: pyopenssl diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 7b17b127f4..4ad8c71b6e 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -7,6 +7,7 @@ from generate_config_utils import ( ALL_PYTHONS, ALL_VERSIONS, + BATCHTIME_DAY, BATCHTIME_WEEK, C_EXTS, CPYTHONS, @@ -126,7 +127,7 @@ def create_free_threaded_variants() -> list[BuildVariant]: def create_encryption_variants() -> list[BuildVariant]: variants = [] tags = ["encryption_tag"] - batchtime = BATCHTIME_WEEK + batchtime = BATCHTIME_DAY def get_encryption_expansions(encryption): expansions = dict(TEST_NAME="encryption") @@ -183,7 +184,7 @@ def create_load_balancer_variants(): tasks, "Load Balancer", host=DEFAULT_HOST, - batchtime=BATCHTIME_WEEK, + batchtime=BATCHTIME_DAY, expansions=expansions, ) ] @@ -226,7 +227,7 @@ def create_enterprise_auth_variants(): def create_pyopenssl_variants(): base_name = "PyOpenSSL" - batchtime = BATCHTIME_WEEK + batchtime = BATCHTIME_DAY expansions = dict(SUB_TEST_NAME="pyopenssl") variants = [] @@ -348,7 +349,7 @@ def create_oidc_auth_variants(): tasks, get_variant_name("Auth OIDC", host), host=host, - batchtime=BATCHTIME_WEEK, + batchtime=BATCHTIME_DAY, ) ) # Add a specific local test to run on PRs. @@ -360,7 +361,7 @@ def create_oidc_auth_variants(): get_variant_name("Auth OIDC Local", host), tags=["pr"], host=host, - batchtime=BATCHTIME_WEEK, + batchtime=BATCHTIME_DAY, ) ) return variants @@ -425,9 +426,9 @@ def create_coverage_report_variants(): def create_kms_variants(): tasks = [] - tasks.append(EvgTaskRef(name="test-gcpkms", batchtime=BATCHTIME_WEEK)) + tasks.append(EvgTaskRef(name="test-gcpkms", batchtime=BATCHTIME_DAY)) tasks.append("test-gcpkms-fail") - tasks.append(EvgTaskRef(name="test-azurekms", batchtime=BATCHTIME_WEEK)) + tasks.append(EvgTaskRef(name="test-azurekms", batchtime=BATCHTIME_DAY)) tasks.append("test-azurekms-fail") return [create_variant(tasks, "KMS", host=HOSTS["debian11"])] @@ -442,9 +443,7 @@ def create_backport_pr_variants(): def create_perf_variants(): host = HOSTS["perf"] - return [ - create_variant([".perf"], "Performance Benchmarks", host=host, batchtime=BATCHTIME_WEEK) - ] + return [create_variant([".perf"], "Performance Benchmarks", host=host, batchtime=BATCHTIME_DAY)] def create_aws_auth_variants(): @@ -478,7 +477,7 @@ def create_no_server_variants(): def create_alternative_hosts_variants(): - batchtime = BATCHTIME_WEEK + batchtime = BATCHTIME_DAY variants = [] host = HOSTS["rhel7"] diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 26fe753e8c..b7daea43a7 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -27,6 +27,7 @@ ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] BATCHTIME_WEEK = 10080 +BATCHTIME_DAY = 1440 AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] C_EXTS = ["without_ext", "with_ext"] From 448a4944ff9d23dcf24f2ad144f929c43deb60bf Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 24 Sep 2025 19:48:03 -0500 Subject: [PATCH 2077/2111] PYTHON-5574 Allow uv lockfile to update from justfile lint (#2558) --- .evergreen/scripts/setup-dev-env.sh | 5 -- justfile | 7 +- pyproject.toml | 4 +- uv.lock | 133 +--------------------------- 4 files changed, 6 insertions(+), 143 deletions(-) diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 38824f32b7..1204848e72 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -51,9 +51,4 @@ uv sync echo "Setting up python environment... done." -# Ensure there is a pre-commit hook if there is a git checkout. -if [ -d .git ] && [ ! -f .git/hooks/pre-commit ]; then - uv run pre-commit install -fi - popd > /dev/null diff --git a/justfile b/justfile index 4645a4a47d..9b6cce62c9 100644 --- a/justfile +++ b/justfile @@ -18,6 +18,7 @@ resync: install: bash .evergreen/scripts/setup-dev-env.sh + uvx pre-commit install [group('docs')] docs: && resync @@ -49,15 +50,15 @@ typing-pyright: && resync [group('lint')] lint *args="": && resync - uv run pre-commit run --all-files {{args}} + uvx pre-commit run --all-files {{args}} [group('lint')] lint-manual *args="": && resync - uv run pre-commit run --all-files --hook-stage manual {{args}} + uvx pre-commit run --all-files --hook-stage manual {{args}} [group('test')] test *args="-v --durations=5 --maxfail=10": && resync - uv run --extra test pytest {{args}} + uvx --extra test pytest {{args}} [group('test')] run-tests *args: && resync diff --git a/pyproject.toml b/pyproject.toml index 2130c61a96..3c81684b4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,9 +46,7 @@ Source = "https://github.com/mongodb/mongo-python-driver" Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" [dependency-groups] -dev = [ - "pre-commit>=4.0" -] +dev = [] pip = ["pip"] # TODO: PYTHON-5464 gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] diff --git a/uv.lock b/uv.lock index 52333d9dbe..01718858b9 100644 --- a/uv.lock +++ b/uv.lock @@ -310,15 +310,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/72/29/3c890ed3ef27a19cb696fa1032b8ef83e0aa586ec55d4feeb0970e28c673/cffi-2.0.0b1-cp39-cp39-win_amd64.whl", hash = "sha256:cb351fade24f7ba9ca481bee53d4257053b9fa9da55da276fe1187a990a49dde", size = 182827, upload-time = "2025-07-29T01:11:49.444Z" }, ] -[[package]] -name = "cfgv" -version = "3.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114, upload-time = "2023-08-12T20:38:17.776Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249, upload-time = "2023-08-12T20:38:16.269Z" }, -] - [[package]] name = "charset-normalizer" version = "3.4.2" @@ -670,15 +661,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] -[[package]] -name = "distlib" -version = "0.4.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" }, -] - [[package]] name = "dnspython" version = "2.7.0" @@ -709,15 +691,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, ] -[[package]] -name = "filelock" -version = "3.18.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" }, -] - [[package]] name = "furo" version = "2025.7.19" @@ -887,15 +860,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, ] -[[package]] -name = "identify" -version = "2.6.12" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/88/d193a27416618628a5eea64e3223acd800b40749a96ffb322a9b55a49ed1/identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6", size = 99254, upload-time = "2025-05-23T20:37:53.3Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/cd/18f8da995b658420625f7ef13f037be53ae04ec5ad33f9b718240dcfd48c/identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2", size = 99145, upload-time = "2025-05-23T20:37:51.495Z" }, -] - [[package]] name = "idna" version = "3.10" @@ -1128,15 +1092,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, ] -[[package]] -name = "platformdirs" -version = "4.3.8" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" }, -] - [[package]] name = "pluggy" version = "1.6.0" @@ -1146,22 +1101,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] -[[package]] -name = "pre-commit" -version = "4.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cfgv" }, - { name = "identify" }, - { name = "nodeenv" }, - { name = "pyyaml" }, - { name = "virtualenv" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424, upload-time = "2025-03-18T21:35:20.987Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707, upload-time = "2025-03-18T21:35:19.343Z" }, -] - [[package]] name = "pyasn1" version = "0.6.1" @@ -1261,9 +1200,6 @@ coverage = [ { name = "coverage" }, { name = "pytest-cov" }, ] -dev = [ - { name = "pre-commit" }, -] gevent = [ { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, { name = "gevent" }, @@ -1317,7 +1253,7 @@ coverage = [ { name = "coverage", specifier = ">=5,<=7.10.6" }, { name = "pytest-cov" }, ] -dev = [{ name = "pre-commit", specifier = ">=4.0" }] +dev = [] gevent = [ { name = "cffi", marker = "python_full_version == '3.14.*'", specifier = ">=2.0.0b1" }, { name = "gevent" }, @@ -1460,59 +1396,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155, upload-time = "2024-08-29T13:16:04.773Z" }, ] -[[package]] -name = "pyyaml" -version = "6.0.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" }, - { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" }, - { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" }, - { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" }, - { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" }, - { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" }, - { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" }, - { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" }, - { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" }, - { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" }, - { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" }, - { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" }, - { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" }, - { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" }, - { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" }, - { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" }, - { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" }, - { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" }, - { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, - { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, - { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, - { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, - { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, - { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, - { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, - { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, - { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, - { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, - { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, - { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, - { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, - { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, - { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, - { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, - { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, - { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, - { url = "https://files.pythonhosted.org/packages/65/d8/b7a1db13636d7fb7d4ff431593c510c8b8fca920ade06ca8ef20015493c5/PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", size = 184777, upload-time = "2024-08-06T20:33:25.896Z" }, - { url = "https://files.pythonhosted.org/packages/0a/02/6ec546cd45143fdf9840b2c6be8d875116a64076218b61d68e12548e5839/PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", size = 172318, upload-time = "2024-08-06T20:33:27.212Z" }, - { url = "https://files.pythonhosted.org/packages/0e/9a/8cc68be846c972bda34f6c2a93abb644fb2476f4dcc924d52175786932c9/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", size = 720891, upload-time = "2024-08-06T20:33:28.974Z" }, - { url = "https://files.pythonhosted.org/packages/e9/6c/6e1b7f40181bc4805e2e07f4abc10a88ce4648e7e95ff1abe4ae4014a9b2/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", size = 722614, upload-time = "2024-08-06T20:33:34.157Z" }, - { url = "https://files.pythonhosted.org/packages/3d/32/e7bd8535d22ea2874cef6a81021ba019474ace0d13a4819c2a4bce79bd6a/PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", size = 737360, upload-time = "2024-08-06T20:33:35.84Z" }, - { url = "https://files.pythonhosted.org/packages/d7/12/7322c1e30b9be969670b672573d45479edef72c9a0deac3bb2868f5d7469/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", size = 699006, upload-time = "2024-08-06T20:33:37.501Z" }, - { url = "https://files.pythonhosted.org/packages/82/72/04fcad41ca56491995076630c3ec1e834be241664c0c09a64c9a2589b507/PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", size = 723577, upload-time = "2024-08-06T20:33:39.389Z" }, - { url = "https://files.pythonhosted.org/packages/ed/5e/46168b1f2757f1fcd442bc3029cd8767d88a98c9c05770d8b420948743bb/PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", size = 144593, upload-time = "2024-08-06T20:33:46.63Z" }, - { url = "https://files.pythonhosted.org/packages/19/87/5124b1c1f2412bb95c59ec481eaf936cd32f0fe2a7b16b97b81c4c017a6a/PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", size = 162312, upload-time = "2024-08-06T20:33:49.073Z" }, -] - [[package]] name = "readthedocs-sphinx-search" version = "0.3.2" @@ -2027,20 +1910,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, ] -[[package]] -name = "virtualenv" -version = "20.32.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "distlib" }, - { name = "filelock" }, - { name = "platformdirs" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a9/96/0834f30fa08dca3738614e6a9d42752b6420ee94e58971d702118f7cfd30/virtualenv-20.32.0.tar.gz", hash = "sha256:886bf75cadfdc964674e6e33eb74d787dff31ca314ceace03ca5810620f4ecf0", size = 6076970, upload-time = "2025-07-21T04:09:50.985Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5c/c6/f8f28009920a736d0df434b52e9feebfb4d702ba942f15338cb4a83eafc1/virtualenv-20.32.0-py3-none-any.whl", hash = "sha256:2c310aecb62e5aa1b06103ed7c2977b81e042695de2697d01017ff0f1034af56", size = 6057761, upload-time = "2025-07-21T04:09:48.059Z" }, -] - [[package]] name = "watchfiles" version = "1.1.0" From fad2ccb0e7f975e9b4b5c412a946696a4d59a3da Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 25 Sep 2025 09:28:39 -0500 Subject: [PATCH 2078/2111] PYTHON-5565 Add minimum version test for Encryption (#2547) --- .evergreen/generated_configs/functions.yml | 1 + .evergreen/generated_configs/tasks.yml | 1030 ++++++++++--------- .evergreen/generated_configs/variants.yml | 2 + .evergreen/run-tests.sh | 17 +- .evergreen/scripts/generate_config.py | 49 +- .evergreen/scripts/generate_config_utils.py | 3 +- .evergreen/scripts/run_tests.py | 10 +- .evergreen/scripts/setup_tests.py | 20 +- .evergreen/scripts/utils.py | 47 +- CONTRIBUTING.md | 5 + pyproject.toml | 5 +- requirements/encryption.txt | 2 +- requirements/ocsp.txt | 2 +- test/__init__.py | 13 + test/asynchronous/__init__.py | 13 + test/asynchronous/test_encryption.py | 1 + test/test_encryption.py | 1 + uv.lock | 12 +- 18 files changed, 691 insertions(+), 542 deletions(-) diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index 7f11bc5d06..ce95648849 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -151,6 +151,7 @@ functions: - VERSION - IS_WIN32 - REQUIRE_FIPS + - TEST_MIN_DEPS type: test - command: subprocess.exec params: diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 65813db1cf..e5deb5887b 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -208,6 +208,50 @@ tasks: SUB_TEST_NAME: azure-fail tags: [pr] + # Min deps tests + - name: test-min-deps-python3.9-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_MIN_DEPS: "1" + tags: [test-min-deps, standalone-noauth-nossl] + - name: test-min-deps-python3.9-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_MIN_DEPS: "1" + tags: [test-min-deps, replica_set-noauth-ssl] + - name: test-min-deps-python3.9-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_MIN_DEPS: "1" + tags: [test-min-deps, sharded_cluster-auth-ssl] + # Mod wsgi tests - name: mod-wsgi-replica-set-python3.9 commands: @@ -2366,343 +2410,323 @@ tasks: tags: [search_index] # Server version tests - - name: test-server-version-python3.9-sync-auth-ssl-standalone-cov - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: standalone - COVERAGE: "1" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: standalone - COVERAGE: "1" - PYTHON_VERSION: "3.9" - TEST_NAME: default_sync - tags: - - server-version - - python-3.9 - - standalone-auth-ssl - - sync - - name: test-server-version-python3.10-async-auth-ssl-standalone-cov + - name: test-server-version-python3.13-async-auth-nossl-replica-set-cov commands: - func: run server vars: AUTH: auth - SSL: ssl - TOPOLOGY: standalone + SSL: nossl + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl - TOPOLOGY: standalone + SSL: nossl + TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-3.10 - - standalone-auth-ssl + - python-3.13 + - replica_set-auth-nossl - async - - name: test-server-version-python3.11-sync-auth-nossl-standalone-cov + - name: test-server-version-python3.12-sync-auth-nossl-replica-set-cov commands: - func: run server vars: AUTH: auth SSL: nossl - TOPOLOGY: standalone + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: nossl - TOPOLOGY: standalone + TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-3.11 - - standalone-auth-nossl + - python-3.12 + - replica_set-auth-nossl - sync - - name: test-server-version-python3.12-async-auth-nossl-standalone-cov + - name: test-server-version-python3.11-async-auth-ssl-replica-set-cov commands: - func: run server vars: AUTH: auth - SSL: nossl - TOPOLOGY: standalone + SSL: ssl + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: nossl - TOPOLOGY: standalone + SSL: ssl + TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.12 - - standalone-auth-nossl + - python-3.11 + - replica_set-auth-ssl - async - - name: test-server-version-python3.13-sync-noauth-ssl-standalone-cov + - name: test-server-version-python3.10-sync-auth-ssl-replica-set-cov commands: - func: run server vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: standalone + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: standalone + TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.13 - - standalone-noauth-ssl + - python-3.10 + - replica_set-auth-ssl - sync - - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov + - name: test-server-version-python3.10-async-noauth-nossl-replica-set-cov commands: - func: run server vars: AUTH: noauth - SSL: ssl - TOPOLOGY: standalone + SSL: nossl + TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: AUTH: noauth - SSL: ssl - TOPOLOGY: standalone + SSL: nossl + TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.14 - - standalone-noauth-ssl + - python-3.10 + - replica_set-noauth-nossl - async - - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone + - pr + - name: test-server-version-python3.9-sync-noauth-nossl-replica-set-cov commands: - func: run server vars: AUTH: noauth SSL: nossl - TOPOLOGY: standalone + TOPOLOGY: replica_set + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl - TOPOLOGY: standalone - PYTHON_VERSION: pypy3.10 + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 - - standalone-noauth-nossl + - python-3.9 + - replica_set-noauth-nossl - sync - pr - - name: test-server-version-python3.9-async-noauth-nossl-standalone-cov + - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - COVERAGE: "1" + SSL: ssl + TOPOLOGY: replica_set - func: run tests vars: AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - COVERAGE: "1" - PYTHON_VERSION: "3.9" + SSL: ssl + TOPOLOGY: replica_set + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.9 - - standalone-noauth-nossl + - python-pypy3.10 + - replica_set-noauth-ssl - async - - pr - - name: test-server-version-python3.10-sync-auth-ssl-replica-set-cov + - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.10 - - replica_set-auth-ssl + - python-3.14 + - replica_set-noauth-ssl - sync - - name: test-server-version-python3.11-async-auth-ssl-replica-set-cov + - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov commands: - func: run server vars: AUTH: auth - SSL: ssl - TOPOLOGY: replica_set + SSL: nossl + TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl - TOPOLOGY: replica_set + SSL: nossl + TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-3.11 - - replica_set-auth-ssl + - python-3.14 + - sharded_cluster-auth-nossl - async - - name: test-server-version-python3.12-sync-auth-nossl-replica-set-cov + - name: test-server-version-python3.13-sync-auth-nossl-sharded-cluster-cov commands: - func: run server vars: AUTH: auth SSL: nossl - TOPOLOGY: replica_set + TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: nossl - TOPOLOGY: replica_set + TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - server-version - - python-3.12 - - replica_set-auth-nossl + - python-3.13 + - sharded_cluster-auth-nossl - sync - - name: test-server-version-python3.13-async-auth-nossl-replica-set-cov + - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: AUTH: auth - SSL: nossl - TOPOLOGY: replica_set + SSL: ssl + TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: nossl - TOPOLOGY: replica_set + SSL: ssl + TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.13 - - replica_set-auth-nossl + - python-3.10 + - sharded_cluster-auth-ssl - async - - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov + - pr + - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: replica_set + TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: replica_set + TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" - TEST_NAME: default_sync + PYTHON_VERSION: "3.11" + TEST_NAME: default_async tags: - server-version - - python-3.14 - - replica_set-noauth-ssl - - sync - - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: replica_set + TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: - AUTH: noauth + AUTH: auth SSL: ssl - TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.10 + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 - - replica_set-noauth-ssl + - python-3.12 + - sharded_cluster-auth-ssl - async - - name: test-server-version-python3.9-sync-noauth-nossl-replica-set-cov + - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.9" - TEST_NAME: default_sync + PYTHON_VERSION: "3.13" + TEST_NAME: default_async tags: - server-version - - python-3.9 - - replica_set-noauth-nossl - - sync - - pr - - name: test-server-version-python3.10-async-noauth-nossl-replica-set-cov + - python-3.13 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.14-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: replica_set + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-3.10 - - replica_set-noauth-nossl + - python-3.14 + - sharded_cluster-auth-ssl - async - - pr - - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.9-async-auth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2716,214 +2740,211 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync + PYTHON_VERSION: "3.9" + TEST_NAME: default_async tags: - server-version - - python-3.11 + - python-3.9 - sharded_cluster-auth-ssl - - sync - - pr - - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov + - async + - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.12 + - python-pypy3.10 - sharded_cluster-auth-ssl - async - - pr - - name: test-server-version-python3.13-sync-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.10-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: AUTH: auth - SSL: nossl + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: nossl + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.13 - - sharded_cluster-auth-nossl + - python-3.10 + - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov + - pr + - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: AUTH: auth - SSL: nossl + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: nossl + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" - TEST_NAME: default_async + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync tags: - server-version - - python-3.14 - - sharded_cluster-auth-nossl - - async - - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth + AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: - AUTH: noauth + AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 - - sharded_cluster-noauth-ssl + - python-3.12 + - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.9-async-noauth-ssl-sharded-cluster-cov + - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth + AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: noauth + AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.9" - TEST_NAME: default_async + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync tags: - server-version - - python-3.9 - - sharded_cluster-noauth-ssl - - async - - name: test-server-version-python3.10-sync-noauth-nossl-sharded-cluster-cov + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.14-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth - SSL: nossl + AUTH: auth + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: noauth - SSL: nossl + AUTH: auth + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.10 - - sharded_cluster-noauth-nossl + - python-3.14 + - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.11-async-noauth-nossl-sharded-cluster-cov + - name: test-server-version-python3.9-sync-auth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: noauth - SSL: nossl + AUTH: auth + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: noauth - SSL: nossl + AUTH: auth + SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" - TEST_NAME: default_async + PYTHON_VERSION: "3.9" + TEST_NAME: default_sync tags: - server-version - - python-3.11 - - sharded_cluster-noauth-nossl - - async - - name: test-server-version-python3.9-sync-auth-ssl-sharded-cluster-cov + - python-3.9 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-pypy3.10 - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.9-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.11-async-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: - AUTH: auth - SSL: ssl + AUTH: noauth + SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: auth - SSL: ssl + AUTH: noauth + SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.9 - - sharded_cluster-auth-ssl + - python-3.11 + - sharded_cluster-noauth-nossl - async - - name: test-server-version-python3.10-sync-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.10-sync-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: - AUTH: auth - SSL: ssl + AUTH: noauth + SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: auth - SSL: ssl + AUTH: noauth + SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" PYTHON_VERSION: "3.10" @@ -2931,193 +2952,216 @@ tasks: tags: - server-version - python-3.10 - - sharded_cluster-auth-ssl + - sharded_cluster-noauth-nossl - sync - - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.9-async-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - server-version - - python-3.10 - - sharded_cluster-auth-ssl + - python-3.9 + - sharded_cluster-noauth-ssl - async - - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.11" - TEST_NAME: default_async + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync tags: - server-version - - python-3.11 - - sharded_cluster-auth-ssl - - async - - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov + - python-pypy3.10 + - sharded_cluster-noauth-ssl + - sync + - name: test-server-version-python3.12-async-auth-nossl-standalone-cov commands: - func: run server vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" PYTHON_VERSION: "3.12" - TEST_NAME: default_sync + TEST_NAME: default_async tags: - server-version - python-3.12 - - sharded_cluster-auth-ssl - - sync - - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov + - standalone-auth-nossl + - async + - name: test-server-version-python3.11-sync-auth-nossl-standalone-cov commands: - func: run server vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version - - python-3.13 - - sharded_cluster-auth-ssl + - python-3.11 + - standalone-auth-nossl - sync - - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.10-async-auth-ssl-standalone-cov commands: - func: run server vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.13 - - sharded_cluster-auth-ssl + - python-3.10 + - standalone-auth-ssl - async - - name: test-server-version-python3.14-sync-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.9-sync-auth-ssl-standalone-cov commands: - func: run server vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - server-version - - python-3.14 - - sharded_cluster-auth-ssl + - python-3.9 + - standalone-auth-ssl - sync - - name: test-server-version-python3.14-async-auth-ssl-sharded-cluster-cov + - name: test-server-version-python3.9-async-noauth-nossl-standalone-cov commands: - func: run server vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - func: run tests vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - server-version - - python-3.14 - - sharded_cluster-auth-ssl + - python-3.9 + - standalone-noauth-nossl - async - - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster + - pr + - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone - func: run tests vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - server-version - python-pypy3.10 - - sharded_cluster-auth-ssl + - standalone-noauth-nossl - sync - - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + - pr + - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov commands: - func: run server vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster + TOPOLOGY: standalone + COVERAGE: "1" - func: run tests vars: - AUTH: auth + AUTH: noauth SSL: ssl - TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 - - sharded_cluster-auth-ssl + - python-3.14 + - standalone-noauth-ssl - async + - name: test-server-version-python3.13-sync-noauth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - standalone-noauth-ssl + - sync # Standard tests - name: test-standard-v4.2-python3.10-sync-noauth-ssl-replica-set @@ -4056,27 +4100,6 @@ tasks: - sync # Test non standard tests - - name: test-non-standard-v4.2-python3.9-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.2" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.2" - PYTHON_VERSION: "3.9" - tags: - - test-non-standard - - server-4.2 - - python-3.9 - - standalone-noauth-nossl - - noauth - name: test-non-standard-v4.2-python3.10-noauth-ssl-replica-set commands: - func: run server @@ -4119,27 +4142,49 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v4.4-python3.12-noauth-nossl-standalone + - name: test-non-standard-v4.2-python3.9-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.4" + VERSION: "4.2" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "4.4" - PYTHON_VERSION: "3.12" + VERSION: "4.2" + PYTHON_VERSION: "3.9" tags: - test-non-standard - - server-4.4 - - python-3.12 + - server-4.2 + - python-3.9 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.2 + - python-pypy3.10 - standalone-noauth-nossl - noauth + - pypy - name: test-non-standard-v4.4-python3.13-noauth-ssl-replica-set commands: - func: run server @@ -4161,6 +4206,28 @@ tasks: - python-3.13 - replica_set-noauth-ssl - noauth + - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.4 + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy - name: test-non-standard-v4.4-python3.14-auth-ssl-sharded-cluster commands: - func: run server @@ -4182,25 +4249,25 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v5.0-python3.9-noauth-nossl-standalone + - name: test-non-standard-v4.4-python3.12-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" + VERSION: "4.4" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "5.0" - PYTHON_VERSION: "3.9" + VERSION: "4.4" + PYTHON_VERSION: "3.12" tags: - test-non-standard - - server-5.0 - - python-3.9 + - server-4.4 + - python-3.12 - standalone-noauth-nossl - noauth - name: test-non-standard-v5.0-python3.10-noauth-ssl-replica-set @@ -4245,25 +4312,47 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v6.0-python3.12-noauth-nossl-standalone + - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-non-standard-v5.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" + VERSION: "5.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" - PYTHON_VERSION: "3.12" + VERSION: "5.0" + PYTHON_VERSION: "3.9" tags: - test-non-standard - - server-6.0 - - python-3.12 + - server-5.0 + - python-3.9 - standalone-noauth-nossl - noauth - name: test-non-standard-v6.0-python3.13-noauth-ssl-replica-set @@ -4297,38 +4386,60 @@ tasks: VERSION: "6.0" - func: run tests vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-6.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v6.0-python3.12-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-6.0 - - python-3.14 - - sharded_cluster-auth-ssl - - auth - - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone + - python-3.12 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" + VERSION: "6.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "7.0" - PYTHON_VERSION: "3.9" + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-7.0 - - python-3.9 + - server-6.0 + - python-pypy3.10 - standalone-noauth-nossl - noauth + - pypy - name: test-non-standard-v7.0-python3.10-noauth-ssl-replica-set commands: - func: run server @@ -4350,6 +4461,28 @@ tasks: - python-3.10 - replica_set-noauth-ssl - noauth + - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-7.0 + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy - name: test-non-standard-v7.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server @@ -4371,25 +4504,25 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" + VERSION: "7.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "8.0" - PYTHON_VERSION: "3.12" + VERSION: "7.0" + PYTHON_VERSION: "3.9" tags: - test-non-standard - - server-8.0 - - python-3.12 + - server-7.0 + - python-3.9 - standalone-noauth-nossl - noauth - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set @@ -4434,91 +4567,49 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-rapid-python3.9-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: rapid - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: rapid - PYTHON_VERSION: "3.9" - tags: - - test-non-standard - - server-rapid - - python-3.9 - - standalone-noauth-nossl - - noauth - - name: test-non-standard-rapid-python3.10-noauth-ssl-replica-set - commands: - - func: run server - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - PYTHON_VERSION: "3.10" - tags: - - test-non-standard - - server-rapid - - python-3.10 - - replica_set-noauth-ssl - - noauth - - name: test-non-standard-rapid-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: rapid + VERSION: "8.0" - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: rapid - PYTHON_VERSION: "3.11" + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-rapid - - python-3.11 + - server-8.0 + - python-pypy3.10 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-latest-python3.12-noauth-nossl-standalone + - pypy + - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: latest + VERSION: "8.0" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: latest + VERSION: "8.0" PYTHON_VERSION: "3.12" tags: - test-non-standard - - server-latest + - server-8.0 - python-3.12 - standalone-noauth-nossl - noauth - - pr - name: test-non-standard-latest-python3.13-noauth-ssl-replica-set commands: - func: run server @@ -4541,161 +4632,115 @@ tasks: - replica_set-noauth-ssl - noauth - pr - - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: latest - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: latest - PYTHON_VERSION: "3.14" - tags: - - test-non-standard - - server-latest - - python-3.14 - - sharded_cluster-auth-ssl - - auth - - pr - - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.2" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "4.2" - PYTHON_VERSION: pypy3.10 - tags: - - test-non-standard - - server-4.2 - - python-pypy3.10 - - standalone-noauth-nossl - - noauth - - pypy - - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.4" + VERSION: latest - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "4.4" + VERSION: latest PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-4.4 + - server-latest - python-pypy3.10 - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "5.0" + VERSION: latest - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + VERSION: latest + PYTHON_VERSION: "3.14" tags: - test-non-standard - - server-5.0 - - python-pypy3.10 + - server-latest + - python-3.14 - sharded_cluster-auth-ssl - auth - - pypy - - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone + - pr + - name: test-non-standard-latest-python3.12-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" + VERSION: latest - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - VERSION: "6.0" - PYTHON_VERSION: pypy3.10 + VERSION: latest + PYTHON_VERSION: "3.12" tags: - test-non-standard - - server-6.0 - - python-pypy3.10 + - server-latest + - python-3.12 - standalone-noauth-nossl - noauth - - pypy - - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set + - pr + - name: test-non-standard-rapid-python3.10-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" + VERSION: rapid - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + VERSION: rapid + PYTHON_VERSION: "3.10" tags: - test-non-standard - - server-7.0 - - python-pypy3.10 + - server-rapid + - python-3.10 - replica_set-noauth-ssl - noauth - - pypy - - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-rapid-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" + VERSION: rapid - func: run tests vars: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + VERSION: rapid + PYTHON_VERSION: "3.11" tags: - test-non-standard - - server-8.0 - - python-pypy3.10 + - server-rapid + - python-3.11 - sharded_cluster-auth-ssl - auth - - pypy - - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.9-noauth-nossl-standalone commands: - func: run server vars: @@ -4709,33 +4754,32 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-rapid - - python-pypy3.10 + - python-3.9 - standalone-noauth-nossl - noauth - - pypy - - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: latest + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid - func: run tests vars: AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: latest + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid PYTHON_VERSION: pypy3.10 tags: - test-non-standard - - server-latest + - server-rapid - python-pypy3.10 - - replica_set-noauth-ssl + - standalone-noauth-nossl - noauth - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 82eef47ff3..b23dc7a147 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -179,6 +179,7 @@ buildvariants: - name: encryption-rhel8 tasks: - name: .test-non-standard + - name: .test-min-deps display_name: Encryption RHEL8 run_on: - rhel87-small @@ -209,6 +210,7 @@ buildvariants: - name: encryption-crypt_shared-rhel8 tasks: - name: .test-non-standard + - name: .test-min-deps display_name: Encryption crypt_shared RHEL8 run_on: - rhel87-small diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index ec3746b29c..c14215244e 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -6,7 +6,8 @@ SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" ROOT_DIR="$(dirname $SCRIPT_DIR)" -pushd $ROOT_DIR +PREV_DIR=$(pwd) +cd $ROOT_DIR # Try to source the env file. if [ -f $SCRIPT_DIR/scripts/env.sh ]; then @@ -25,7 +26,17 @@ else exit 1 fi +cleanup_tests() { + # Avoid leaving the lock file in a changed state when we change the resolution type. + if [ -n "${TEST_MIN_DEPS:-}" ]; then + git checkout uv.lock || true + fi + cd $PREV_DIR +} + +trap "cleanup_tests" SIGINT ERR + # Start the test runner. -uv run ${UV_ARGS} --reinstall .evergreen/scripts/run_tests.py "$@" +uv run ${UV_ARGS} --reinstall-package pymongo .evergreen/scripts/run_tests.py "$@" -popd +cleanup_tests diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 4ad8c71b6e..e074a7be2e 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -143,7 +143,7 @@ def get_encryption_expansions(encryption): ): expansions = get_encryption_expansions(encryption) display_name = get_variant_name(encryption, host, **expansions) - tasks = [".test-non-standard"] + tasks = [".test-non-standard", ".test-min-deps"] if host != "rhel8": tasks = [".test-non-standard !.pypy"] variant = create_variant( @@ -528,22 +528,20 @@ def create_aws_lambda_variants(): def create_server_version_tasks(): tasks = [] - task_inputs = [] + task_combos = set() # All combinations of topology, auth, ssl, and sync should be tested. for (topology, auth, ssl, sync), python in zip_cycle( list(product(TOPOLOGIES, ["auth", "noauth"], ["ssl", "nossl"], SYNCS)), ALL_PYTHONS ): - task_inputs.append((topology, auth, ssl, sync, python)) + task_combos.add((topology, auth, ssl, sync, python)) # Every python should be tested with sharded cluster, auth, ssl, with sync and async. for python, sync in product(ALL_PYTHONS, SYNCS): - task_input = ("sharded_cluster", "auth", "ssl", sync, python) - if task_input not in task_inputs: - task_inputs.append(task_input) + task_combos.add(("sharded_cluster", "auth", "ssl", sync, python)) # Assemble the tasks. seen = set() - for topology, auth, ssl, sync, python in task_inputs: + for topology, auth, ssl, sync, python in sorted(task_combos): combo = f"{topology}-{auth}-{ssl}" tags = ["server-version", f"python-{python}", combo, sync] if combo in [ @@ -558,7 +556,12 @@ def create_server_version_tasks(): expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) if python not in PYPYS: expansions["COVERAGE"] = "1" - name = get_task_name("test-server-version", python=python, sync=sync, **expansions) + name = get_task_name( + "test-server-version", + python=python, + sync=sync, + **expansions, + ) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() test_vars["PYTHON_VERSION"] = python @@ -590,15 +593,15 @@ def create_no_toolchain_tasks(): def create_test_non_standard_tasks(): """For variants that set a TEST_NAME.""" tasks = [] - task_combos = [] + task_combos = set() # For each version and topology, rotate through the CPythons. for (version, topology), python in zip_cycle(list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS): pr = version == "latest" - task_combos.append((version, topology, python, pr)) + task_combos.add((version, topology, python, pr)) # For each PyPy and topology, rotate through the the versions. for (python, topology), version in zip_cycle(list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS): - task_combos.append((version, topology, python, False)) - for version, topology, python, pr in task_combos: + task_combos.add((version, topology, python, False)) + for version, topology, python, pr in sorted(task_combos): auth, ssl = get_standard_auth_ssl(topology) tags = [ "test-non-standard", @@ -621,6 +624,22 @@ def create_test_non_standard_tasks(): return tasks +def create_min_deps_tasks(): + """For variants that support testing with minimum dependencies.""" + tasks = [] + for topology in TOPOLOGIES: + auth, ssl = get_standard_auth_ssl(topology) + tags = ["test-min-deps", f"{topology}-{auth}-{ssl}"] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["TEST_MIN_DEPS"] = "1" + name = get_task_name("test-min-deps", python=CPYTHONS[0], sync="sync", **test_vars) + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_standard_tasks(): """For variants that do not set a TEST_NAME.""" tasks = [] @@ -794,9 +813,12 @@ def _create_ocsp_tasks(algo, variant, server_type, base_task_name): tags.append("pr") task_name = get_task_name( - f"test-ocsp-{algo}-{base_task_name}", python=python, version=version + f"test-ocsp-{algo}-{base_task_name}", + python=python, + version=version, ) tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + return tasks @@ -1075,6 +1097,7 @@ def create_run_tests_func(): "VERSION", "IS_WIN32", "REQUIRE_FIPS", + "TEST_MIN_DEPS", ] args = [".evergreen/just.sh", "setup-tests", "${TEST_NAME}", "${SUB_TEST_NAME}"] setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index b7daea43a7..83bdd58fdc 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -42,6 +42,7 @@ sync={"sync": "Sync", "async": "Async"}, coverage={"1": "cov"}, no_ext={"1": "No C"}, + test_min_deps={True: "Min Deps"}, ) HOSTS = dict() @@ -202,7 +203,7 @@ def get_common_name(base: str, sep: str, **kwargs) -> str: name = f"Python{value}" else: name = f"PyPy{value.replace('pypy', '')}" - elif key.lower() in DISPLAY_LOOKUP: + elif key.lower() in DISPLAY_LOOKUP and value in DISPLAY_LOOKUP[key.lower()]: name = DISPLAY_LOOKUP[key.lower()][value] else: continue diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index fd4fd13e5b..c1c29c58bc 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -30,13 +30,14 @@ def list_packages(): - packages = dict() + packages = set() for distribution in importlib_metadata.distributions(): - packages[distribution.name] = distribution + if distribution.name: + packages.add(distribution.name) print("Package Version URL") print("------------------- ----------- ----------------------------------------------------") for name in sorted(packages): - distribution = packages[name] + distribution = importlib_metadata.distribution(name) url = "" if distribution.origin is not None: url = distribution.origin.url @@ -136,6 +137,9 @@ def handle_aws_lambda() -> None: def run() -> None: + # Add diagnostic for python version. + print("Running with python", sys.version) + # List the installed packages. list_packages() diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 02b8fae45b..ea2fb5c5ef 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -160,7 +160,6 @@ def handle_test_env() -> None: write_env("PIP_QUIET") # Quiet by default. write_env("PIP_PREFER_BINARY") # Prefer binary dists by default. - write_env("UV_FROZEN") # Do not modify lock files. # Set an environment variable for the test name and sub test name. write_env(f"TEST_{test_name.upper()}") @@ -178,6 +177,9 @@ def handle_test_env() -> None: if group := GROUP_MAP.get(test_name, ""): UV_ARGS.append(f"--group {group}") + if opts.test_min_deps: + UV_ARGS.append("--resolution=lowest-direct") + if test_name == "auth_oidc": from oidc_tester import setup_oidc @@ -233,7 +235,7 @@ def handle_test_env() -> None: if is_set("MONGODB_URI"): write_env("PYMONGO_MUST_CONNECT", "true") - if is_set("DISABLE_TEST_COMMANDS") or opts.disable_test_commands: + if opts.disable_test_commands: write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") if test_name == "enterprise_auth": @@ -345,10 +347,10 @@ def handle_test_env() -> None: if not (ROOT / "libmongocrypt").exists(): setup_libmongocrypt() - # TODO: Test with 'pip install pymongocrypt' - UV_ARGS.append( - "--with pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" - ) + if not opts.test_min_deps: + UV_ARGS.append( + "--with pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" + ) # Use the nocrypto build to avoid dependency issues with older windows/python versions. BASE = ROOT / "libmongocrypt/nocrypto" @@ -377,7 +379,7 @@ def handle_test_env() -> None: if sub_test_name == "pyopenssl": UV_ARGS.append("--extra ocsp") - if is_set("TEST_CRYPT_SHARED") or opts.crypt_shared: + if opts.crypt_shared: config = read_env(f"{DRIVERS_TOOLS}/mo-expansion.sh") CRYPT_SHARED_DIR = Path(config["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) @@ -447,14 +449,14 @@ def handle_test_env() -> None: # Add coverage if requested. # Only cover CPython. PyPy reports suspiciously low coverage. - if (is_set("COVERAGE") or opts.cov) and platform.python_implementation() == "CPython": + if opts.cov and platform.python_implementation() == "CPython": # Keep in sync with combine-coverage.sh. # coverage >=5 is needed for relative_files=true. UV_ARGS.append("--group coverage") TEST_ARGS = f"{TEST_ARGS} --cov" write_env("COVERAGE") - if is_set("GREEN_FRAMEWORK") or opts.green_framework: + if opts.green_framework: framework = opts.green_framework or os.environ["GREEN_FRAMEWORK"] UV_ARGS.append(f"--group {framework}") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index 50894ff634..2bc9c720d2 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -60,6 +60,9 @@ class Distro: "ocsp", ] +# Mapping of env variables to options +OPTION_TO_ENV_VAR = {"cov": "COVERAGE", "crypt_shared": "TEST_CRYPT_SHARED"} + def get_test_options( description, require_sub_test_name=True, allow_extra_opts=False @@ -94,6 +97,9 @@ def get_test_options( ) parser.add_argument("--auth", action="store_true", help="Whether to add authentication.") parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration.") + parser.add_argument( + "--test-min-deps", action="store_true", help="Test against minimum dependency versions" + ) # Add the test modifiers. if require_sub_test_name: @@ -127,26 +133,53 @@ def get_test_options( opts, extra_opts = parser.parse_args(), [] else: opts, extra_opts = parser.parse_known_args() - if opts.verbose: - LOGGER.setLevel(logging.DEBUG) - elif opts.quiet: - LOGGER.setLevel(logging.WARNING) + + # Convert list inputs to strings. + for name in vars(opts): + value = getattr(opts, name) + if isinstance(value, list): + setattr(opts, name, value[0]) # Handle validation and environment variable overrides. test_name = opts.test_name sub_test_name = opts.sub_test_name if require_sub_test_name else "" if require_sub_test_name and test_name in SUB_TEST_REQUIRED and not sub_test_name: raise ValueError(f"Test '{test_name}' requires a sub_test_name") - if "auth" in test_name or os.environ.get("AUTH") == "auth": + handle_env_overrides(parser, opts) + if "auth" in test_name: opts.auth = True # 'auth_aws ecs' shouldn't have extra auth set. if test_name == "auth_aws" and sub_test_name == "ecs": opts.auth = False - if os.environ.get("SSL") == "ssl": - opts.ssl = True + if opts.verbose: + LOGGER.setLevel(logging.DEBUG) + elif opts.quiet: + LOGGER.setLevel(logging.WARNING) return opts, extra_opts +def handle_env_overrides(parser: argparse.ArgumentParser, opts: argparse.Namespace) -> None: + # Get the options, and then allow environment variable overrides. + for key in vars(opts): + if key in OPTION_TO_ENV_VAR: + env_var = OPTION_TO_ENV_VAR[key] + else: + env_var = key.upper() + if env_var in os.environ: + if parser.get_default(key) != getattr(opts, key): + LOGGER.info("Overriding env var '%s' with cli option", env_var) + elif env_var == "AUTH": + opts.auth = os.environ.get("AUTH") == "auth" + elif env_var == "SSL": + ssl_opt = os.environ.get("SSL", "") + opts.ssl = ssl_opt and ssl_opt.lower() != "nossl" + elif isinstance(getattr(opts, key), bool): + if os.environ[env_var]: + setattr(opts, key, True) + else: + setattr(opts, key, os.environ[env_var]) + + def read_env(path: Path | str) -> dict[str, str]: config = dict() with Path(path).open() as fid: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 16907d4285..a0f22044f6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -382,6 +382,11 @@ If you are running one of the `no-responder` tests, omit the `run-server` step. - Finally, you can use `just setup-tests --debug-log`. - For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for failed tests in the patch. +## Testing minimum dependencies + +To run any of the test suites with minimum supported dependencies, pass `--test-min-deps` to +`just setup-tests`. + ## Adding a new test suite - If adding new tests files that should only be run for that test suite, add a pytest marker to the file and add diff --git a/pyproject.toml b/pyproject.toml index 3c81684b4f..227865bc30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,8 +48,7 @@ Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" [dependency-groups] dev = [] pip = ["pip"] -# TODO: PYTHON-5464 -gevent = ["gevent", "cffi>=2.0.0b1;python_version=='3.14'"] +gevent = ["gevent>=20.6.0"] coverage = [ "pytest-cov", "coverage>=5,<=7.10.6" @@ -57,7 +56,7 @@ coverage = [ mockupdb = [ "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" ] -perf = ["simplejson"] +perf = ["simplejson>=3.17.0"] typing = [ "mypy==1.18.1", "pyright==1.1.405", diff --git a/requirements/encryption.txt b/requirements/encryption.txt index 321aba5bac..eec1c990f7 100644 --- a/requirements/encryption.txt +++ b/requirements/encryption.txt @@ -1,3 +1,3 @@ pymongo-auth-aws>=1.1.0,<2.0.0 pymongocrypt>=1.13.0,<2.0.0 -certifi;os.name=='nt' or sys_platform=='darwin' +certifi>=2023.7.22;os.name=='nt' or sys_platform=='darwin' diff --git a/requirements/ocsp.txt b/requirements/ocsp.txt index 6570b0905a..39dbddef14 100644 --- a/requirements/ocsp.txt +++ b/requirements/ocsp.txt @@ -5,7 +5,7 @@ # Fallback to certifi on Windows if we can't load CA certs from the system # store and just use certifi on macOS. # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths -certifi;os.name=='nt' or sys_platform=='darwin' +certifi>=2023.7.22;os.name=='nt' or sys_platform=='darwin' pyopenssl>=17.2.0 requests<3.0.0 cryptography>=2.5 diff --git a/test/__init__.py b/test/__init__.py index bef672b5f7..f3b66c20a9 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -519,6 +519,19 @@ def require_libmongocrypt_min(self, *ver): "Libmongocrypt version must be at least %s" % str(other_version), ) + def require_pymongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import __version__ as pymongocrypt_version + + version = Version.from_string(pymongocrypt_version) + return self._require( + lambda: version >= other_version, + "PyMongoCrypt version must be at least %s" % str(other_version), + ) + def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" return self._require( diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 5a65311808..7a6a23ed27 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -519,6 +519,19 @@ def require_libmongocrypt_min(self, *ver): "Libmongocrypt version must be at least %s" % str(other_version), ) + def require_pymongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import __version__ as pymongocrypt_version + + version = Version.from_string(pymongocrypt_version) + return self._require( + lambda: version >= other_version, + "PyMongoCrypt version must be at least %s" % str(other_version), + ) + def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" return self._require( diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 8d4a3d441d..5d9cf433ba 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -3448,6 +3448,7 @@ class TestExplicitTextEncryptionProse(AsyncEncryptionIntegrationTest): @async_client_context.require_no_standalone @async_client_context.require_version_min(8, 2, -1) @async_client_context.require_libmongocrypt_min(1, 15, 1) + @async_client_context.require_pymongocrypt_min(1, 16, 0) async def asyncSetUp(self): await super().asyncSetUp() # Load the file key1-document.json as key1Document. diff --git a/test/test_encryption.py b/test/test_encryption.py index b408e009b6..50d617dc43 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -3430,6 +3430,7 @@ class TestExplicitTextEncryptionProse(EncryptionIntegrationTest): @client_context.require_no_standalone @client_context.require_version_min(8, 2, -1) @client_context.require_libmongocrypt_min(1, 15, 1) + @client_context.require_pymongocrypt_min(1, 16, 0) def setUp(self): super().setUp() # Load the file key1-document.json as key1Document. diff --git a/uv.lock b/uv.lock index 01718858b9..c7a6ed91f1 100644 --- a/uv.lock +++ b/uv.lock @@ -1201,7 +1201,6 @@ coverage = [ { name = "pytest-cov" }, ] gevent = [ - { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, { name = "gevent" }, ] mockupdb = [ @@ -1222,8 +1221,8 @@ typing = [ [package.metadata] requires-dist = [ - { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')" }, - { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')" }, + { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')", specifier = ">=2023.7.22" }, + { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')", specifier = ">=2023.7.22" }, { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, { name = "furo", marker = "extra == 'docs'", specifier = "==2025.7.19" }, @@ -1254,12 +1253,9 @@ coverage = [ { name = "pytest-cov" }, ] dev = [] -gevent = [ - { name = "cffi", marker = "python_full_version == '3.14.*'", specifier = ">=2.0.0b1" }, - { name = "gevent" }, -] +gevent = [{ name = "gevent", specifier = ">=20.6.0" }] mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] -perf = [{ name = "simplejson" }] +perf = [{ name = "simplejson", specifier = ">=3.17.0" }] pip = [{ name = "pip" }] typing = [ { name = "mypy", specifier = "==1.18.1" }, From eb0cedd9691e3c4fe5e33f2393a7d911f1938cc4 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Thu, 25 Sep 2025 11:16:17 -0400 Subject: [PATCH 2079/2111] PYTHON-5577 - Drop support for OpenSSL 1.0.2 (#2561) --- .evergreen/generated_configs/variants.yml | 11 ----------- .evergreen/scripts/generate_config.py | 13 ------------- 2 files changed, 24 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index b23dc7a147..821fe10811 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -1,16 +1,5 @@ buildvariants: # Alternative hosts tests - - name: openssl-1.0.2-rhel7-v5.0-python3.9 - tasks: - - name: .test-no-toolchain - display_name: OpenSSL 1.0.2 RHEL7 v5.0 Python3.9 - run_on: - - rhel79-small - batchtime: 1440 - expansions: - VERSION: "5.0" - PYTHON_VERSION: "3.9" - PYTHON_BINARY: /opt/python/3.9/bin/python3 - name: other-hosts-rhel9-fips-latest tasks: - name: .test-no-toolchain diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index e074a7be2e..2193db32c1 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -480,19 +480,6 @@ def create_alternative_hosts_variants(): batchtime = BATCHTIME_DAY variants = [] - host = HOSTS["rhel7"] - version = "5.0" - variants.append( - create_variant( - [".test-no-toolchain"], - get_variant_name("OpenSSL 1.0.2", host, python=CPYTHONS[0], version=version), - host=host, - python=CPYTHONS[0], - batchtime=batchtime, - expansions=dict(VERSION=version, PYTHON_VERSION=CPYTHONS[0]), - ) - ) - version = "latest" for host_name in OTHER_HOSTS: expansions = dict(VERSION="latest") From 1f308c841f6096e4a0d40edbfe82fa01dc585e27 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 25 Sep 2025 12:52:30 -0500 Subject: [PATCH 2080/2111] PYTHON-5480 Update Python 3.9-specific tests to use Python 3.10 (#2560) --- .evergreen/generated_configs/tasks.yml | 852 +++++++++--------- .evergreen/generated_configs/variants.yml | 11 +- .evergreen/run-mongodb-aws-ecs-test.sh | 7 +- .evergreen/scripts/generate_config.py | 5 +- .evergreen/scripts/generate_config_utils.py | 2 +- .evergreen/scripts/kms_tester.py | 11 +- .evergreen/scripts/oidc_tester.py | 7 +- .evergreen/scripts/setup_tests.py | 9 +- test/asynchronous/test_client.py | 11 +- .../test_discovery_and_monitoring.py | 2 +- test/test_client.py | 11 +- test/test_discovery_and_monitoring.py | 2 +- 12 files changed, 479 insertions(+), 451 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index e5deb5887b..d6aa8966c4 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -9,7 +9,7 @@ tasks: tags: [aws_lambda] # Aws tests - - name: test-auth-aws-4.4-regular-python3.9 + - name: test-auth-aws-4.4-regular-python3.10 commands: - func: run server vars: @@ -20,9 +20,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: regular - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-5.0-assume-role-python3.10 + - name: test-auth-aws-5.0-assume-role-python3.9 commands: - func: run server vars: @@ -33,7 +33,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: assume-role - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-6.0-ec2-python3.11 commands: @@ -101,7 +101,7 @@ tasks: AWS_ROLE_SESSION_NAME: test PYTHON_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-latest-ecs-python3.9 + - name: test-auth-aws-latest-ecs-python3.10 commands: - func: run server vars: @@ -112,7 +112,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ecs - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-ecs] # Backport pr tests @@ -209,7 +209,7 @@ tasks: tags: [pr] # Min deps tests - - name: test-min-deps-python3.9-sync-noauth-nossl-standalone + - name: test-min-deps-python3.10-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -223,7 +223,7 @@ tasks: TOPOLOGY: standalone TEST_MIN_DEPS: "1" tags: [test-min-deps, standalone-noauth-nossl] - - name: test-min-deps-python3.9-sync-noauth-ssl-replica-set + - name: test-min-deps-python3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -237,7 +237,7 @@ tasks: TOPOLOGY: replica_set TEST_MIN_DEPS: "1" tags: [test-min-deps, replica_set-noauth-ssl] - - name: test-min-deps-python3.9-sync-auth-ssl-sharded-cluster + - name: test-min-deps-python3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -253,29 +253,29 @@ tasks: tags: [test-min-deps, sharded_cluster-auth-ssl] # Mod wsgi tests - - name: mod-wsgi-replica-set-python3.9 + - name: mod-wsgi-replica-set-python3.10 commands: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: [mod_wsgi, pr] - - name: mod-wsgi-embedded-mode-replica-set-python3.10 + - name: mod-wsgi-embedded-mode-replica-set-python3.9 commands: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: [mod_wsgi, pr] - name: mod-wsgi-replica-set-python3.11 commands: @@ -327,13 +327,13 @@ tasks: tags: [mod_wsgi, pr] # No orchestration tests - - name: test-no-orchestration-python3.9 + - name: test-no-orchestration-python3.10 commands: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: "3.9" - tags: [test-no-orchestration, python-3.9] + PYTHON_VERSION: "3.10" + tags: [test-no-orchestration, python-3.10] - name: test-no-orchestration-python3.14 commands: - func: assume ec2 role @@ -394,64 +394,64 @@ tasks: tags: [test-no-toolchain, sharded_cluster-auth-ssl] # Ocsp tests - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.14 @@ -464,64 +464,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.14 @@ -534,64 +534,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 @@ -604,64 +604,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 @@ -674,64 +674,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.9 + - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-soft-fail-v5.0-python3.9 + - name: test-ocsp-ecdsa-soft-fail-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-soft-fail-v6.0-python3.9 + - name: test-ocsp-ecdsa-soft-fail-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-soft-fail-v7.0-python3.9 + - name: test-ocsp-ecdsa-soft-fail-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-soft-fail-v8.0-python3.9 + - name: test-ocsp-ecdsa-soft-fail-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-soft-fail-rapid-python3.9 + - name: test-ocsp-ecdsa-soft-fail-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-soft-fail-latest-python3.14 @@ -744,84 +744,84 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-ecdsa - "4.4" - ocsp-staple - - name: test-ocsp-ecdsa-valid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-ecdsa - "5.0" - ocsp-staple - - name: test-ocsp-ecdsa-valid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-ecdsa - "6.0" - ocsp-staple - - name: test-ocsp-ecdsa-valid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-ecdsa - "7.0" - ocsp-staple - - name: test-ocsp-ecdsa-valid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-ecdsa - "8.0" - ocsp-staple - - name: test-ocsp-ecdsa-valid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-ecdsa-valid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -842,84 +842,84 @@ tasks: - ocsp-ecdsa - latest - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-ecdsa - "4.4" - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-ecdsa - "5.0" - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-ecdsa - "6.0" - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-ecdsa - "7.0" - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-ecdsa - "8.0" - ocsp-staple - - name: test-ocsp-ecdsa-invalid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-ecdsa-invalid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -940,84 +940,84 @@ tasks: - ocsp-ecdsa - latest - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-ecdsa - "4.4" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-ecdsa - "5.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-ecdsa - "6.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-ecdsa - "7.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-ecdsa - "8.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1038,84 +1038,84 @@ tasks: - ocsp-ecdsa - latest - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-ecdsa - "4.4" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-ecdsa - "5.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-ecdsa - "6.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-ecdsa - "7.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-ecdsa - "8.0" - ocsp-staple - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1136,64 +1136,64 @@ tasks: - ocsp-ecdsa - latest - ocsp-staple - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -1206,64 +1206,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -1276,64 +1276,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 @@ -1346,64 +1346,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-valid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.14 @@ -1416,64 +1416,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.14 @@ -1486,64 +1486,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 @@ -1556,64 +1556,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 @@ -1626,64 +1626,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-soft-fail-v4.4-python3.9 + - name: test-ocsp-rsa-soft-fail-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-soft-fail-v5.0-python3.9 + - name: test-ocsp-rsa-soft-fail-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-soft-fail-v6.0-python3.9 + - name: test-ocsp-rsa-soft-fail-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-soft-fail-v7.0-python3.9 + - name: test-ocsp-rsa-soft-fail-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-soft-fail-v8.0-python3.9 + - name: test-ocsp-rsa-soft-fail-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-soft-fail-rapid-python3.9 + - name: test-ocsp-rsa-soft-fail-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-soft-fail-latest-python3.14 @@ -1696,84 +1696,84 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-rsa - "4.4" - ocsp-staple - - name: test-ocsp-rsa-valid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-rsa - "5.0" - ocsp-staple - - name: test-ocsp-rsa-valid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-rsa - "6.0" - ocsp-staple - - name: test-ocsp-rsa-valid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-rsa - "7.0" - ocsp-staple - - name: test-ocsp-rsa-valid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-rsa-valid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-rsa - "8.0" - ocsp-staple - - name: test-ocsp-rsa-valid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-rsa-valid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1794,84 +1794,84 @@ tasks: - ocsp-rsa - latest - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-rsa - "4.4" - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-rsa - "5.0" - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-rsa - "6.0" - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-rsa - "7.0" - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-rsa - "8.0" - ocsp-staple - - name: test-ocsp-rsa-invalid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-rsa-invalid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1892,84 +1892,84 @@ tasks: - ocsp-rsa - latest - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-rsa - "4.4" - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-rsa - "5.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-rsa - "6.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-rsa - "7.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-rsa - "8.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-valid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1990,84 +1990,84 @@ tasks: - ocsp-rsa - latest - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v4.4-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: - ocsp - ocsp-rsa - "4.4" - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v5.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: - ocsp - ocsp-rsa - "5.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v6.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: - ocsp - ocsp-rsa - "6.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v7.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: - ocsp - ocsp-rsa - "7.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v8.0-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: - ocsp - ocsp-rsa - "8.0" - ocsp-staple - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-rapid-python3.9 + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -2088,64 +2088,64 @@ tasks: - ocsp-rsa - latest - ocsp-staple - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -2158,64 +2158,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -2228,64 +2228,64 @@ tasks: PYTHON_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.9 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.9 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.9 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.9 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.9 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.9 + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 commands: - func: run tests vars: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 @@ -2473,7 +2473,7 @@ tasks: - python-3.11 - replica_set-auth-ssl - async - - name: test-server-version-python3.10-sync-auth-ssl-replica-set-cov + - name: test-server-version-python3.9-sync-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2487,14 +2487,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - server-version - - python-3.10 + - python-3.9 - replica_set-auth-ssl - sync - - name: test-server-version-python3.10-async-noauth-nossl-replica-set-cov + - name: test-server-version-python3.9-async-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2508,15 +2508,15 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-3.9 - replica_set-noauth-nossl - async - pr - - name: test-server-version-python3.9-sync-noauth-nossl-replica-set-cov + - name: test-server-version-python3.10-sync-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2530,11 +2530,11 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-3.10 - replica_set-noauth-nossl - sync - pr @@ -2933,7 +2933,7 @@ tasks: - python-3.11 - sharded_cluster-noauth-nossl - async - - name: test-server-version-python3.10-sync-noauth-nossl-sharded-cluster-cov + - name: test-server-version-python3.9-sync-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2947,14 +2947,14 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - server-version - - python-3.10 + - python-3.9 - sharded_cluster-noauth-nossl - sync - - name: test-server-version-python3.9-async-noauth-ssl-sharded-cluster-cov + - name: test-server-version-python3.10-async-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2968,11 +2968,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.9 + - python-3.10 - sharded_cluster-noauth-ssl - async - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster @@ -3036,7 +3036,7 @@ tasks: - python-3.11 - standalone-auth-nossl - sync - - name: test-server-version-python3.10-async-auth-ssl-standalone-cov + - name: test-server-version-python3.9-async-auth-ssl-standalone-cov commands: - func: run server vars: @@ -3050,14 +3050,14 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-3.9 - standalone-auth-ssl - async - - name: test-server-version-python3.9-sync-auth-ssl-standalone-cov + - name: test-server-version-python3.10-sync-auth-ssl-standalone-cov commands: - func: run server vars: @@ -3071,14 +3071,14 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-3.10 - standalone-auth-ssl - sync - - name: test-server-version-python3.9-async-noauth-nossl-standalone-cov + - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov commands: - func: run server vars: @@ -3092,11 +3092,11 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.9 + - python-3.10 - standalone-noauth-nossl - async - pr @@ -3164,7 +3164,7 @@ tasks: - sync # Standard tests - - name: test-standard-v4.2-python3.10-sync-noauth-ssl-replica-set + - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3178,15 +3178,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.10 + - python-3.14 - replica_set-noauth-ssl - sync - - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set + - name: test-standard-v4.2-python3.9-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3200,12 +3200,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.14 + - python-3.9 - replica_set-noauth-ssl - sync - name: test-standard-v4.2-python3.11-sync-auth-ssl-sharded-cluster @@ -3253,7 +3253,7 @@ tasks: - sharded_cluster-auth-ssl - sync - pypy - - name: test-standard-v4.2-python3.13-sync-noauth-nossl-standalone + - name: test-standard-v4.2-python3.10-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3267,15 +3267,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.13 + - python-3.10 - standalone-noauth-nossl - sync - - name: test-standard-v4.2-python3.9-sync-noauth-nossl-standalone + - name: test-standard-v4.2-python3.13-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3289,15 +3289,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.9 + - python-3.13 - standalone-noauth-nossl - sync - - name: test-standard-v4.4-python3.10-async-noauth-ssl-replica-set + - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3311,15 +3311,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.10 + - python-3.14 - replica_set-noauth-ssl - async - - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set + - name: test-standard-v4.4-python3.9-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3333,12 +3333,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.14 + - python-3.9 - replica_set-noauth-ssl - async - name: test-standard-v4.4-python3.11-async-auth-ssl-sharded-cluster @@ -3386,7 +3386,7 @@ tasks: - sharded_cluster-auth-ssl - async - pypy - - name: test-standard-v4.4-python3.13-async-noauth-nossl-standalone + - name: test-standard-v4.4-python3.10-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3400,15 +3400,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.13 + - python-3.10 - standalone-noauth-nossl - async - - name: test-standard-v4.4-python3.9-async-noauth-nossl-standalone + - name: test-standard-v4.4-python3.13-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3422,15 +3422,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.9 + - python-3.13 - standalone-noauth-nossl - async - - name: test-standard-v5.0-python3.13-sync-noauth-ssl-replica-set + - name: test-standard-v5.0-python3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3444,15 +3444,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.13 + - python-3.10 - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.9-sync-noauth-ssl-replica-set + - name: test-standard-v5.0-python3.13-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3466,15 +3466,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.9 + - python-3.13 - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.10-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3488,15 +3488,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.10 + - python-3.14 - sharded_cluster-auth-ssl - sync - - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-python3.9-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3510,12 +3510,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.14 + - python-3.9 - sharded_cluster-auth-ssl - sync - name: test-standard-v5.0-python3.12-sync-noauth-nossl-standalone @@ -3540,7 +3540,7 @@ tasks: - python-3.12 - standalone-noauth-nossl - sync - - name: test-standard-v6.0-python3.13-async-noauth-ssl-replica-set + - name: test-standard-v6.0-python3.10-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3554,15 +3554,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.13 + - python-3.10 - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.9-async-noauth-ssl-replica-set + - name: test-standard-v6.0-python3.13-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3576,15 +3576,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.9 + - python-3.13 - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.10-async-auth-ssl-sharded-cluster + - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3598,15 +3598,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.10 + - python-3.14 - sharded_cluster-auth-ssl - async - - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster + - name: test-standard-v6.0-python3.9-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3620,12 +3620,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.14 + - python-3.9 - sharded_cluster-auth-ssl - async - name: test-standard-v6.0-python3.12-async-noauth-nossl-standalone @@ -3672,7 +3672,7 @@ tasks: - python-3.12 - replica_set-noauth-ssl - sync - - name: test-standard-v7.0-python3.13-sync-auth-ssl-sharded-cluster + - name: test-standard-v7.0-python3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3686,15 +3686,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-3.13 + - python-3.10 - sharded_cluster-auth-ssl - sync - - name: test-standard-v7.0-python3.9-sync-auth-ssl-sharded-cluster + - name: test-standard-v7.0-python3.13-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3708,12 +3708,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-3.9 + - python-3.13 - sharded_cluster-auth-ssl - sync - name: test-standard-v7.0-python3.11-sync-noauth-nossl-standalone @@ -3783,7 +3783,7 @@ tasks: - python-3.12 - replica_set-noauth-ssl - async - - name: test-standard-v8.0-python3.13-async-auth-ssl-sharded-cluster + - name: test-standard-v8.0-python3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3797,15 +3797,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.13 + - python-3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v8.0-python3.9-async-auth-ssl-sharded-cluster + - name: test-standard-v8.0-python3.13-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3819,12 +3819,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.9 + - python-3.13 - sharded_cluster-auth-ssl - async - name: test-standard-v8.0-python3.11-async-noauth-nossl-standalone @@ -3941,7 +3941,7 @@ tasks: - sharded_cluster-auth-ssl - async - pr - - name: test-standard-latest-python3.10-async-noauth-nossl-standalone + - name: test-standard-latest-python3.14-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3955,16 +3955,16 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.10 + - python-3.14 - standalone-noauth-nossl - async - pr - - name: test-standard-latest-python3.14-async-noauth-nossl-standalone + - name: test-standard-latest-python3.9-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3978,12 +3978,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.14 + - python-3.9 - standalone-noauth-nossl - async - pr @@ -4054,7 +4054,7 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - sync - - name: test-standard-rapid-python3.10-sync-noauth-nossl-standalone + - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -4068,15 +4068,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-3.10 + - python-3.14 - standalone-noauth-nossl - sync - - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone + - name: test-standard-rapid-python3.9-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -4090,17 +4090,17 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.9" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-3.14 + - python-3.9 - standalone-noauth-nossl - sync # Test non standard tests - - name: test-non-standard-v4.2-python3.10-noauth-ssl-replica-set + - name: test-non-standard-v4.2-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -4114,11 +4114,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-4.2 - - python-3.10 + - python-3.9 - replica_set-noauth-ssl - noauth - name: test-non-standard-v4.2-python3.11-auth-ssl-sharded-cluster @@ -4142,7 +4142,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v4.2-python3.9-noauth-nossl-standalone + - name: test-non-standard-v4.2-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4156,11 +4156,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-4.2 - - python-3.9 + - python-3.10 - standalone-noauth-nossl - noauth - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone @@ -4270,7 +4270,7 @@ tasks: - python-3.12 - standalone-noauth-nossl - noauth - - name: test-non-standard-v5.0-python3.10-noauth-ssl-replica-set + - name: test-non-standard-v5.0-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -4284,11 +4284,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-5.0 - - python-3.10 + - python-3.9 - replica_set-noauth-ssl - noauth - name: test-non-standard-v5.0-python3.11-auth-ssl-sharded-cluster @@ -4334,7 +4334,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v5.0-python3.9-noauth-nossl-standalone + - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4348,11 +4348,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-5.0 - - python-3.9 + - python-3.10 - standalone-noauth-nossl - noauth - name: test-non-standard-v6.0-python3.13-noauth-ssl-replica-set @@ -4440,7 +4440,7 @@ tasks: - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v7.0-python3.10-noauth-ssl-replica-set + - name: test-non-standard-v7.0-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -4454,11 +4454,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-7.0 - - python-3.10 + - python-3.9 - replica_set-noauth-ssl - noauth - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set @@ -4504,7 +4504,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v7.0-python3.9-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4518,11 +4518,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-7.0 - - python-3.9 + - python-3.10 - standalone-noauth-nossl - noauth - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set @@ -4698,7 +4698,7 @@ tasks: - standalone-noauth-nossl - noauth - pr - - name: test-non-standard-rapid-python3.10-noauth-ssl-replica-set + - name: test-non-standard-rapid-python3.9-noauth-ssl-replica-set commands: - func: run server vars: @@ -4712,11 +4712,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.9" tags: - test-non-standard - server-rapid - - python-3.10 + - python-3.9 - replica_set-noauth-ssl - noauth - name: test-non-standard-rapid-python3.11-auth-ssl-sharded-cluster @@ -4740,7 +4740,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-rapid-python3.9-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4754,11 +4754,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-rapid - - python-3.9 + - python-3.10 - standalone-noauth-nossl - noauth - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 821fe10811..676f9878b9 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -142,17 +142,16 @@ buildvariants: - rhel87-small # Disable test commands tests - - name: disable-test-commands-rhel8-python3.9 + - name: disable-test-commands-rhel8 tasks: - name: .test-standard .server-latest - display_name: Disable test commands RHEL8 Python3.9 + display_name: Disable test commands RHEL8 run_on: - rhel87-small expansions: AUTH: auth SSL: ssl DISABLE_TEST_COMMANDS: "1" - PYTHON_BINARY: /opt/python/3.9/bin/python3 # Doctests tests - name: doctests-rhel8 @@ -491,14 +490,14 @@ buildvariants: SUB_TEST_NAME: pyopenssl # Search index tests - - name: search-index-helpers-rhel8-python3.9 + - name: search-index-helpers-rhel8-python3.10 tasks: - name: .search_index - display_name: Search Index Helpers RHEL8 Python3.9 + display_name: Search Index Helpers RHEL8 Python3.10 run_on: - rhel87-small expansions: - PYTHON_BINARY: /opt/python/3.9/bin/python3 + PYTHON_BINARY: /opt/python/3.10/bin/python3 # Server version tests - name: mongodb-v4.2 diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index c55c423e49..b8330de511 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -20,8 +20,13 @@ fi set -o xtrace # Install python with pip. -PYTHON_VER="python3.9" +PYTHON_VER="python3.10" apt-get -qq update < /dev/null > /dev/null +apt-get -q install -y software-properties-common +# Use openpgp to avoid gpg key timeout. +mkdir -p $HOME/.gnupg +echo "keyserver keys.openpgp.org" >> $HOME/.gnupg/gpg.conf +add-apt-repository -y 'ppa:deadsnakes/ppa' apt-get -qq install $PYTHON_VER $PYTHON_VER-venv build-essential $PYTHON_VER-dev -y < /dev/null > /dev/null export PYTHON_BINARY=$PYTHON_VER diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 2193db32c1..0519f4930b 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -330,10 +330,9 @@ def create_mod_wsgi_variants(): def create_disable_test_commands_variants(): host = DEFAULT_HOST expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") - python = CPYTHONS[0] - display_name = get_variant_name("Disable test commands", host, python=python) + display_name = get_variant_name("Disable test commands", host) tasks = [".test-standard .server-latest"] - return [create_variant(tasks, display_name, host=host, python=python, expansions=expansions)] + return [create_variant(tasks, display_name, host=host, expansions=expansions)] def create_oidc_auth_variants(): diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 83bdd58fdc..aee4ed3bee 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -22,7 +22,7 @@ ############## ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] -CPYTHONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"] +CPYTHONS = ["3.10", "3.9", "3.11", "3.12", "3.13", "3.14"] PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py index 3579e77619..e3833ae63a 100644 --- a/.evergreen/scripts/kms_tester.py +++ b/.evergreen/scripts/kms_tester.py @@ -33,7 +33,7 @@ def _setup_azure_vm(base_env: dict[str, str]) -> None: env["AZUREKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" run_command(f"{azure_dir}/run-command.sh", env=env) - env["AZUREKMS_CMD"] = "bash .evergreen/just.sh setup-tests kms azure-remote" + env["AZUREKMS_CMD"] = "NO_EXT=1 bash .evergreen/just.sh setup-tests kms azure-remote" run_command(f"{azure_dir}/run-command.sh", env=env) LOGGER.info("Setting up Azure VM... done.") @@ -53,7 +53,7 @@ def _setup_gcp_vm(base_env: dict[str, str]) -> None: env["GCPKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" run_command(f"{gcp_dir}/run-command.sh", env=env) - env["GCPKMS_CMD"] = "bash ./.evergreen/just.sh setup-tests kms gcp-remote" + env["GCPKMS_CMD"] = "NO_EXT=1 bash ./.evergreen/just.sh setup-tests kms gcp-remote" run_command(f"{gcp_dir}/run-command.sh", env=env) LOGGER.info("Setting up GCP VM...") @@ -98,6 +98,13 @@ def setup_kms(sub_test_name: str) -> None: if sub_test_target == "azure": os.environ["AZUREKMS_VMNAME_PREFIX"] = "PYTHON_DRIVER" + # Found using "az vm image list --output table" + os.environ[ + "AZUREKMS_IMAGE" + ] = "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" + else: + os.environ["GCPKMS_IMAGEFAMILY"] = "debian-12" + run_command("./setup.sh", cwd=kms_dir) base_env = _load_kms_config(sub_test_target) diff --git a/.evergreen/scripts/oidc_tester.py b/.evergreen/scripts/oidc_tester.py index d6f127bbd6..ac2960371e 100644 --- a/.evergreen/scripts/oidc_tester.py +++ b/.evergreen/scripts/oidc_tester.py @@ -42,6 +42,11 @@ def setup_oidc(sub_test_name: str) -> dict[str, str] | None: if sub_test_name == "azure": env["AZUREOIDC_VMNAME_PREFIX"] = "PYTHON_DRIVER" if "-remote" not in sub_test_name: + if sub_test_name == "azure": + # Found using "az vm image list --output table" + env["AZUREOIDC_IMAGE"] = "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" + else: + env["GCPKMS_IMAGEFAMILY"] = "debian-12" run_command(f"bash {target_dir}/setup.sh", env=env) if sub_test_name in K8S_NAMES: run_command(f"bash {target_dir}/setup-pod.sh {sub_test_name}") @@ -84,7 +89,7 @@ def test_oidc_send_to_remote(sub_test_name: str) -> None: env[f"{upper_name}OIDC_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE env[ f"{upper_name}OIDC_TEST_CMD" - ] = f"OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" + ] = f"NO_EXT=1 OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" elif sub_test_name in K8S_NAMES: env["K8S_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE env["K8S_TEST_CMD"] = "OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index ea2fb5c5ef..3f0a8cc7f9 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -53,7 +53,7 @@ GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") # The python version used for perf tests. -PERF_PYTHON_VERSION = "3.9.13" +PERF_PYTHON_VERSION = "3.10.11" def is_set(var: str) -> bool: @@ -90,6 +90,13 @@ def setup_libmongocrypt(): distro = get_distro() if distro.name.startswith("Debian"): target = f"debian{distro.version_id}" + elif distro.name.startswith("Ubuntu"): + if distro.version_id == "20.04": + target = "debian11" + elif distro.version_id == "22.04": + target = "debian12" + elif distro.version_id == "24.04": + target = "debian13" elif distro.name.startswith("Red Hat"): if distro.version_id.startswith("7"): target = "rhel-70-64-bit" diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index 52f16e0bcc..f375874916 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -2059,7 +2059,7 @@ async def _test_handshake(self, env_vars, expected_env): async def test_handshake_01_aws(self): await self._test_handshake( { - "AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "us-east-2", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", }, @@ -2097,7 +2097,7 @@ async def test_handshake_04_vercel(self): async def test_handshake_05_multiple(self): await self._test_handshake( - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "FUNCTIONS_WORKER_RUNTIME": "python"}, + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "FUNCTIONS_WORKER_RUNTIME": "python"}, None, ) # Extra cases for other combos. @@ -2109,13 +2109,16 @@ async def test_handshake_05_multiple(self): async def test_handshake_06_region_too_long(self): await self._test_handshake( - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_REGION": "a" * 512}, + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "a" * 512}, {"name": "aws.lambda"}, ) async def test_handshake_07_memory_invalid_int(self): await self._test_handshake( - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big"}, + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", + }, {"name": "aws.lambda"}, ) diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py index 2798afe7df..5820d00c48 100644 --- a/test/asynchronous/test_discovery_and_monitoring.py +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -485,7 +485,7 @@ async def test_rtt_connection_is_disabled_poll(self): async def test_rtt_connection_is_disabled_auto(self): envs = [ - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9"}, + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10"}, {"FUNCTIONS_WORKER_RUNTIME": "python"}, {"K_SERVICE": "gcpservicename"}, {"FUNCTION_NAME": "gcpfunctionname"}, diff --git a/test/test_client.py b/test/test_client.py index dd1bf94cf1..73ed3ac3d4 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -2016,7 +2016,7 @@ def _test_handshake(self, env_vars, expected_env): def test_handshake_01_aws(self): self._test_handshake( { - "AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "us-east-2", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", }, @@ -2054,7 +2054,7 @@ def test_handshake_04_vercel(self): def test_handshake_05_multiple(self): self._test_handshake( - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "FUNCTIONS_WORKER_RUNTIME": "python"}, + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "FUNCTIONS_WORKER_RUNTIME": "python"}, None, ) # Extra cases for other combos. @@ -2066,13 +2066,16 @@ def test_handshake_05_multiple(self): def test_handshake_06_region_too_long(self): self._test_handshake( - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_REGION": "a" * 512}, + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "a" * 512}, {"name": "aws.lambda"}, ) def test_handshake_07_memory_invalid_int(self): self._test_handshake( - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9", "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big"}, + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", + }, {"name": "aws.lambda"}, ) diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 4f8ee30d16..67a82996bd 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -483,7 +483,7 @@ def test_rtt_connection_is_disabled_poll(self): def test_rtt_connection_is_disabled_auto(self): envs = [ - {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.9"}, + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10"}, {"FUNCTIONS_WORKER_RUNTIME": "python"}, {"K_SERVICE": "gcpservicename"}, {"FUNCTION_NAME": "gcpfunctionname"}, From 0d93ec48a58291f1a3d8bf8968cc962f64312854 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 25 Sep 2025 13:09:33 -0500 Subject: [PATCH 2081/2111] PYTHON-5573 Require dnspython 2.6.1+ (#2559) --- .github/workflows/test-python.yml | 31 ++-------------------------- doc/changelog.rst | 2 ++ pymongo/asynchronous/srv_resolver.py | 13 ++---------- pymongo/synchronous/srv_resolver.py | 13 ++---------- requirements.txt | 2 +- uv.lock | 2 +- 6 files changed, 10 insertions(+), 53 deletions(-) diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 6499e8ba8d..336ada8d70 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -225,7 +225,7 @@ jobs: permissions: contents: read runs-on: ubuntu-latest - name: Test using minimum dependencies and supported Python + name: Test minimum dependencies and Python steps: - uses: actions/checkout@v5 with: @@ -238,37 +238,10 @@ jobs: uses: mongodb-labs/drivers-evergreen-tools@master with: version: "8.0" - # Async and our test_dns do not support dnspython 1.X, so we don't run async or dns tests here - name: Run tests shell: bash run: | uv venv source .venv/bin/activate - uv pip install -e ".[test]" --resolution=lowest-direct - pytest -v test/test_srv_polling.py - - test_minimum_for_async: - permissions: - contents: read - runs-on: ubuntu-latest - name: Test async's minimum dependencies and Python - steps: - - uses: actions/checkout@v5 - with: - persist-credentials: false - - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 - with: - python-version: '3.9' - - id: setup-mongodb - uses: mongodb-labs/drivers-evergreen-tools@master - with: - version: "8.0" - # The lifetime kwarg we use in srv resolution was added to the async resolver API in dnspython 2.1.0 - - name: Run tests - shell: bash - run: | - uv venv - source .venv/bin/activate - uv pip install -e ".[test]" --resolution=lowest-direct dnspython==2.1.0 --force-reinstall + uv pip install -e ".[test]" --resolution=lowest-direct --force-reinstall pytest -v test/test_srv_polling.py test/test_dns.py test/asynchronous/test_srv_polling.py test/asynchronous/test_dns.py diff --git a/doc/changelog.rst b/doc/changelog.rst index 6dcb80497b..a1cea177b9 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -9,6 +9,8 @@ PyMongo 4.16 brings a number of changes including: - Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as doing so may leak sensitive user data. Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. +- PyMongo now requires ``dnspython>=2.6.1``, since ``dnspython`` 1.0 is no longer maintained and is incompatible with + Python 3.10+. The minimum version is ``2.6.1`` to account for `CVE-2023-29483 `_. - Removed support for Eventlet. Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py index 006abbb616..0130f0e8b3 100644 --- a/pymongo/asynchronous/srv_resolver.py +++ b/pymongo/asynchronous/srv_resolver.py @@ -58,20 +58,11 @@ async def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: if _IS_SYNC: from dns import resolver - if hasattr(resolver, "resolve"): - # dnspython >= 2 - return resolver.resolve(*args, **kwargs) - # dnspython 1.X - return resolver.query(*args, **kwargs) + return resolver.resolve(*args, **kwargs) else: from dns import asyncresolver - if hasattr(asyncresolver, "resolve"): - # dnspython >= 2 - return await asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] - raise ConfigurationError( - "Upgrade to dnspython version >= 2.0 to use AsyncMongoClient with mongodb+srv:// connections." - ) + return await asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] _INVALID_HOST_MSG = ( diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py index 8e492061ae..e3e208e5c6 100644 --- a/pymongo/synchronous/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -58,20 +58,11 @@ def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: if _IS_SYNC: from dns import resolver - if hasattr(resolver, "resolve"): - # dnspython >= 2 - return resolver.resolve(*args, **kwargs) - # dnspython 1.X - return resolver.query(*args, **kwargs) + return resolver.resolve(*args, **kwargs) else: from dns import asyncresolver - if hasattr(asyncresolver, "resolve"): - # dnspython >= 2 - return asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] - raise ConfigurationError( - "Upgrade to dnspython version >= 2.0 to use MongoClient with mongodb+srv:// connections." - ) + return asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] _INVALID_HOST_MSG = ( diff --git a/requirements.txt b/requirements.txt index bdc0d7edc7..8b3d442182 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1 @@ -dnspython>=1.16.0,<3.0.0 +dnspython>=2.6.1,<3.0.0 diff --git a/uv.lock b/uv.lock index c7a6ed91f1..b574a4b2c0 100644 --- a/uv.lock +++ b/uv.lock @@ -1224,7 +1224,7 @@ requires-dist = [ { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')", specifier = ">=2023.7.22" }, { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')", specifier = ">=2023.7.22" }, { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, - { name = "dnspython", specifier = ">=1.16.0,<3.0.0" }, + { name = "dnspython", specifier = ">=2.6.1,<3.0.0" }, { name = "furo", marker = "extra == 'docs'", specifier = "==2025.7.19" }, { name = "importlib-metadata", marker = "python_full_version < '3.13' and extra == 'test'", specifier = ">=7.0" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, From e0767cf5a1ae47474889680685046e2866317900 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Fri, 26 Sep 2025 09:54:19 -0500 Subject: [PATCH 2082/2111] PYTHON-5479 Drop support for Python 3.9 (#2562) Co-authored-by: Noah Stapp --- .evergreen/generated_configs/tasks.yml | 699 +++++++------------- .evergreen/scripts/generate_config_utils.py | 2 +- .evergreen/utils.sh | 32 +- .github/workflows/dist.yml | 18 +- .github/workflows/test-python.yml | 18 +- CONTRIBUTING.md | 2 +- README.md | 2 +- doc/changelog.rst | 3 + gridfs/asynchronous/grid_file.py | 1 - gridfs/synchronous/grid_file.py | 1 - pymongo/asynchronous/cursor.py | 1 - pymongo/asynchronous/helpers.py | 16 - pymongo/synchronous/cursor.py | 1 - pymongo/synchronous/helpers.py | 16 - pyproject.toml | 3 +- test/asynchronous/test_change_stream.py | 1 - test/asynchronous/test_client.py | 1 - test/asynchronous/test_collation.py | 1 - test/asynchronous/test_collection.py | 1 - test/asynchronous/test_cursor.py | 1 - test/asynchronous/test_custom_types.py | 1 - test/asynchronous/test_database.py | 1 - test/asynchronous/test_encryption.py | 1 - test/asynchronous/test_examples.py | 1 - test/asynchronous/test_grid_file.py | 1 - test/asynchronous/test_load_balancer.py | 2 - test/asynchronous/test_monitoring.py | 1 - test/asynchronous/test_read_preferences.py | 1 - test/asynchronous/test_session.py | 1 - test/asynchronous/test_transactions.py | 1 - test/asynchronous/unified_format.py | 1 - test/test_change_stream.py | 1 - test/test_client.py | 1 - test/test_collation.py | 1 - test/test_collection.py | 1 - test/test_cursor.py | 1 - test/test_custom_types.py | 1 - test/test_database.py | 1 - test/test_encryption.py | 1 - test/test_examples.py | 1 - test/test_grid_file.py | 1 - test/test_load_balancer.py | 2 - test/test_monitoring.py | 1 - test/test_read_preferences.py | 1 - test/test_session.py | 1 - test/test_transactions.py | 1 - test/unified_format.py | 1 - 47 files changed, 293 insertions(+), 556 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index d6aa8966c4..dc65bfb557 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -22,7 +22,7 @@ tasks: SUB_TEST_NAME: regular PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-regular] - - name: test-auth-aws-5.0-assume-role-python3.9 + - name: test-auth-aws-5.0-assume-role-python3.11 commands: - func: run server vars: @@ -33,9 +33,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: assume-role - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: [auth-aws, auth-aws-assume-role] - - name: test-auth-aws-6.0-ec2-python3.11 + - name: test-auth-aws-6.0-ec2-python3.12 commands: - func: run server vars: @@ -46,9 +46,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ec2 - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.12" tags: [auth-aws, auth-aws-ec2] - - name: test-auth-aws-7.0-env-creds-python3.12 + - name: test-auth-aws-7.0-env-creds-python3.13 commands: - func: run server vars: @@ -59,9 +59,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: env-creds - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-8.0-session-creds-python3.13 + - name: test-auth-aws-8.0-session-creds-python3.14 commands: - func: run server vars: @@ -72,9 +72,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: session-creds - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-rapid-web-identity-python3.14 + - name: test-auth-aws-rapid-web-identity-python3.10 commands: - func: run server vars: @@ -85,9 +85,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-rapid-web-identity-session-name-python3.14 + - name: test-auth-aws-rapid-web-identity-session-name-python3.10 commands: - func: run server vars: @@ -99,9 +99,9 @@ tasks: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity AWS_ROLE_SESSION_NAME: test - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-latest-ecs-python3.10 + - name: test-auth-aws-latest-ecs-python3.11 commands: - func: run server vars: @@ -112,7 +112,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ecs - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.11" tags: [auth-aws, auth-aws-ecs] # Backport pr tests @@ -265,19 +265,7 @@ tasks: SUB_TEST_NAME: standalone PYTHON_VERSION: "3.10" tags: [mod_wsgi, pr] - - name: mod-wsgi-embedded-mode-replica-set-python3.9 - commands: - - func: run server - vars: - TOPOLOGY: replica_set - PYTHON_VERSION: "3.9" - - func: run tests - vars: - TEST_NAME: mod_wsgi - SUB_TEST_NAME: embedded - PYTHON_VERSION: "3.9" - tags: [mod_wsgi, pr] - - name: mod-wsgi-replica-set-python3.11 + - name: mod-wsgi-embedded-mode-replica-set-python3.11 commands: - func: run server vars: @@ -286,10 +274,10 @@ tasks: - func: run tests vars: TEST_NAME: mod_wsgi - SUB_TEST_NAME: standalone + SUB_TEST_NAME: embedded PYTHON_VERSION: "3.11" tags: [mod_wsgi, pr] - - name: mod-wsgi-embedded-mode-replica-set-python3.12 + - name: mod-wsgi-replica-set-python3.12 commands: - func: run server vars: @@ -298,10 +286,10 @@ tasks: - func: run tests vars: TEST_NAME: mod_wsgi - SUB_TEST_NAME: embedded + SUB_TEST_NAME: standalone PYTHON_VERSION: "3.12" tags: [mod_wsgi, pr] - - name: mod-wsgi-replica-set-python3.13 + - name: mod-wsgi-embedded-mode-replica-set-python3.13 commands: - func: run server vars: @@ -310,10 +298,10 @@ tasks: - func: run tests vars: TEST_NAME: mod_wsgi - SUB_TEST_NAME: standalone + SUB_TEST_NAME: embedded PYTHON_VERSION: "3.13" tags: [mod_wsgi, pr] - - name: mod-wsgi-embedded-mode-replica-set-python3.14 + - name: mod-wsgi-replica-set-python3.14 commands: - func: run server vars: @@ -322,7 +310,7 @@ tasks: - func: run tests vars: TEST_NAME: mod_wsgi - SUB_TEST_NAME: embedded + SUB_TEST_NAME: standalone PYTHON_VERSION: "3.14" tags: [mod_wsgi, pr] @@ -2410,28 +2398,26 @@ tasks: tags: [search_index] # Server version tests - - name: test-server-version-python3.13-async-auth-nossl-replica-set-cov + - name: test-server-version-pypy3.10-async-auth-nossl-replica-set commands: - func: run server vars: AUTH: auth SSL: nossl TOPOLOGY: replica_set - COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: nossl TOPOLOGY: replica_set - COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.13 + - python-pypy3.10 - replica_set-auth-nossl - async - - name: test-server-version-python3.12-sync-auth-nossl-replica-set-cov + - name: test-server-version-python3.14-sync-auth-nossl-replica-set-cov commands: - func: run server vars: @@ -2445,14 +2431,14 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.12 + - python-3.14 - replica_set-auth-nossl - sync - - name: test-server-version-python3.11-async-auth-ssl-replica-set-cov + - name: test-server-version-python3.13-async-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2466,14 +2452,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-3.11 + - python-3.13 - replica_set-auth-ssl - async - - name: test-server-version-python3.9-sync-auth-ssl-replica-set-cov + - name: test-server-version-python3.12-sync-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2487,14 +2473,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-3.12 - replica_set-auth-ssl - sync - - name: test-server-version-python3.9-async-noauth-nossl-replica-set-cov + - name: test-server-version-python3.13-async-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2508,15 +2494,15 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-3.9 + - python-3.13 - replica_set-noauth-nossl - async - pr - - name: test-server-version-python3.10-sync-noauth-nossl-replica-set-cov + - name: test-server-version-python3.12-sync-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2530,34 +2516,36 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-3.10 + - python-3.12 - replica_set-noauth-nossl - sync - pr - - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set + - name: test-server-version-python3.11-async-noauth-ssl-replica-set-cov commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.11 - replica_set-noauth-ssl - async - - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov + - name: test-server-version-python3.10-sync-noauth-ssl-replica-set-cov commands: - func: run server vars: @@ -2571,14 +2559,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.14 + - python-3.10 - replica_set-noauth-ssl - sync - - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.11-async-auth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2592,14 +2580,14 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.14 + - python-3.11 - sharded_cluster-auth-nossl - async - - name: test-server-version-python3.13-sync-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.10-sync-auth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2613,11 +2601,11 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.13 + - python-3.10 - sharded_cluster-auth-nossl - sync - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov @@ -2726,27 +2714,6 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - async - - name: test-server-version-python3.9-async-auth-ssl-sharded-cluster-cov - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - COVERAGE: "1" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.9" - TEST_NAME: default_async - tags: - - server-version - - python-3.9 - - sharded_cluster-auth-ssl - - async - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server @@ -2872,27 +2839,6 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.9-sync-auth-ssl-sharded-cluster-cov - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - COVERAGE: "1" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.9" - TEST_NAME: default_sync - tags: - - server-version - - python-3.9 - - sharded_cluster-auth-ssl - - sync - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -2912,28 +2858,26 @@ tasks: - python-pypy3.10 - sharded_cluster-auth-ssl - sync - - name: test-server-version-python3.11-async-noauth-nossl-sharded-cluster-cov + - name: test-server-version-pypy3.10-async-noauth-nossl-sharded-cluster commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.11 + - python-pypy3.10 - sharded_cluster-noauth-nossl - async - - name: test-server-version-python3.9-sync-noauth-nossl-sharded-cluster-cov + - name: test-server-version-python3.14-sync-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2947,14 +2891,14 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.9 + - python-3.14 - sharded_cluster-noauth-nossl - sync - - name: test-server-version-python3.10-async-noauth-ssl-sharded-cluster-cov + - name: test-server-version-python3.13-async-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2968,33 +2912,35 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-3.13 - sharded_cluster-noauth-ssl - async - - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster + - name: test-server-version-python3.12-sync-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-3.12 - sharded_cluster-noauth-ssl - sync - - name: test-server-version-python3.12-async-auth-nossl-standalone-cov + - name: test-server-version-python3.13-async-auth-nossl-standalone-cov commands: - func: run server vars: @@ -3008,14 +2954,14 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - server-version - - python-3.12 + - python-3.13 - standalone-auth-nossl - async - - name: test-server-version-python3.11-sync-auth-nossl-standalone-cov + - name: test-server-version-python3.12-sync-auth-nossl-standalone-cov commands: - func: run server vars: @@ -3029,14 +2975,14 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version - - python-3.11 + - python-3.12 - standalone-auth-nossl - sync - - name: test-server-version-python3.9-async-auth-ssl-standalone-cov + - name: test-server-version-python3.11-async-auth-ssl-standalone-cov commands: - func: run server vars: @@ -3050,11 +2996,11 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.9 + - python-3.11 - standalone-auth-ssl - async - name: test-server-version-python3.10-sync-auth-ssl-standalone-cov @@ -3078,7 +3024,7 @@ tasks: - python-3.10 - standalone-auth-ssl - sync - - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov + - name: test-server-version-python3.11-async-noauth-nossl-standalone-cov commands: - func: run server vars: @@ -3092,56 +3038,56 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.10 + - python-3.11 - standalone-noauth-nossl - async - pr - - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone + - name: test-server-version-python3.10-sync-noauth-nossl-standalone-cov commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-3.10 - standalone-noauth-nossl - sync - pr - - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov + - name: test-server-version-pypy3.10-async-noauth-ssl-standalone commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone - COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.14 + - python-pypy3.10 - standalone-noauth-ssl - async - - name: test-server-version-python3.13-sync-noauth-ssl-standalone-cov + - name: test-server-version-python3.14-sync-noauth-ssl-standalone-cov commands: - func: run server vars: @@ -3155,16 +3101,16 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.13 + - python-3.14 - standalone-noauth-ssl - sync # Standard tests - - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set + - name: test-standard-v4.2-python3.11-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3178,15 +3124,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.14 + - python-3.11 - replica_set-noauth-ssl - sync - - name: test-standard-v4.2-python3.9-sync-noauth-ssl-replica-set + - name: test-standard-v4.2-pypy3.10-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3200,37 +3146,16 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.9 + - python-pypy3.10 - replica_set-noauth-ssl - sync - - name: test-standard-v4.2-python3.11-sync-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "4.2" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "4.2" - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync - tags: - - test-standard - - server-4.2 - - python-3.11 - - sharded_cluster-auth-ssl - - sync - - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster + - pypy + - name: test-standard-v4.2-python3.12-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3244,15 +3169,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-pypy3.10 + - python-3.12 - sharded_cluster-auth-ssl - sync - - pypy - name: test-standard-v4.2-python3.10-sync-noauth-nossl-standalone commands: - func: run server @@ -3275,7 +3199,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - sync - - name: test-standard-v4.2-python3.13-sync-noauth-nossl-standalone + - name: test-standard-v4.2-python3.14-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3289,15 +3213,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.13 + - python-3.14 - standalone-noauth-nossl - sync - - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set + - name: test-standard-v4.4-python3.11-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3311,15 +3235,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.14 + - python-3.11 - replica_set-noauth-ssl - async - - name: test-standard-v4.4-python3.9-async-noauth-ssl-replica-set + - name: test-standard-v4.4-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3333,37 +3257,16 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.9 + - python-pypy3.10 - replica_set-noauth-ssl - async - - name: test-standard-v4.4-python3.11-async-auth-ssl-sharded-cluster - commands: - - func: run server - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "4.4" - - func: run tests - vars: - AUTH: auth - SSL: ssl - TOPOLOGY: sharded_cluster - VERSION: "4.4" - PYTHON_VERSION: "3.11" - TEST_NAME: default_async - tags: - - test-standard - - server-4.4 - - python-3.11 - - sharded_cluster-auth-ssl - - async - - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + - pypy + - name: test-standard-v4.4-python3.12-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3377,15 +3280,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-pypy3.10 + - python-3.12 - sharded_cluster-auth-ssl - async - - pypy - name: test-standard-v4.4-python3.10-async-noauth-nossl-standalone commands: - func: run server @@ -3408,7 +3310,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - async - - name: test-standard-v4.4-python3.13-async-noauth-nossl-standalone + - name: test-standard-v4.4-python3.14-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3422,12 +3324,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.13 + - python-3.14 - standalone-noauth-nossl - async - name: test-standard-v5.0-python3.10-sync-noauth-ssl-replica-set @@ -3452,7 +3354,7 @@ tasks: - python-3.10 - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.13-sync-noauth-ssl-replica-set + - name: test-standard-v5.0-python3.14-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3466,15 +3368,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.13 + - python-3.14 - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-python3.11-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3488,15 +3390,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.14 + - python-3.11 - sharded_cluster-auth-ssl - sync - - name: test-standard-v5.0-python3.9-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3510,15 +3412,16 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.9 + - python-pypy3.10 - sharded_cluster-auth-ssl - sync - - name: test-standard-v5.0-python3.12-sync-noauth-nossl-standalone + - pypy + - name: test-standard-v5.0-python3.13-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3532,12 +3435,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.12 + - python-3.13 - standalone-noauth-nossl - sync - name: test-standard-v6.0-python3.10-async-noauth-ssl-replica-set @@ -3562,7 +3465,7 @@ tasks: - python-3.10 - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.13-async-noauth-ssl-replica-set + - name: test-standard-v6.0-python3.14-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3576,15 +3479,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.13 + - python-3.14 - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster + - name: test-standard-v6.0-python3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3598,15 +3501,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.14 + - python-3.11 - sharded_cluster-auth-ssl - async - - name: test-standard-v6.0-python3.9-async-auth-ssl-sharded-cluster + - name: test-standard-v6.0-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3620,15 +3523,16 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.9 + - python-pypy3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v6.0-python3.12-async-noauth-nossl-standalone + - pypy + - name: test-standard-v6.0-python3.13-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3642,15 +3546,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.12 + - python-3.13 - standalone-noauth-nossl - async - - name: test-standard-v7.0-python3.12-sync-noauth-ssl-replica-set + - name: test-standard-v7.0-python3.13-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3664,12 +3568,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-3.12 + - python-3.13 - replica_set-noauth-ssl - sync - name: test-standard-v7.0-python3.10-sync-auth-ssl-sharded-cluster @@ -3694,7 +3598,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - sync - - name: test-standard-v7.0-python3.13-sync-auth-ssl-sharded-cluster + - name: test-standard-v7.0-python3.14-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3708,15 +3612,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-3.13 + - python-3.14 - sharded_cluster-auth-ssl - sync - - name: test-standard-v7.0-python3.11-sync-noauth-nossl-standalone + - name: test-standard-v7.0-python3.12-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3730,38 +3634,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync - tags: - - test-standard - - server-7.0 - - python-3.11 - - standalone-noauth-nossl - - sync - - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "7.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-pypy3.10 + - python-3.12 - standalone-noauth-nossl - sync - - pypy - - name: test-standard-v8.0-python3.12-async-noauth-ssl-replica-set + - name: test-standard-v8.0-python3.13-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3775,12 +3656,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.12 + - python-3.13 - replica_set-noauth-ssl - async - name: test-standard-v8.0-python3.10-async-auth-ssl-sharded-cluster @@ -3805,7 +3686,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v8.0-python3.13-async-auth-ssl-sharded-cluster + - name: test-standard-v8.0-python3.14-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3819,15 +3700,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.13 + - python-3.14 - sharded_cluster-auth-ssl - async - - name: test-standard-v8.0-python3.11-async-noauth-nossl-standalone + - name: test-standard-v8.0-python3.12-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3841,38 +3722,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.11" - TEST_NAME: default_async - tags: - - test-standard - - server-8.0 - - python-3.11 - - standalone-noauth-nossl - - async - - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone - commands: - - func: run server - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "8.0" - - func: run tests - vars: - AUTH: noauth - SSL: nossl - TOPOLOGY: standalone - VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-pypy3.10 + - python-3.12 - standalone-noauth-nossl - async - - pypy - - name: test-standard-latest-python3.11-async-noauth-ssl-replica-set + - name: test-standard-latest-python3.12-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3886,39 +3744,16 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.11 + - python-3.12 - replica_set-noauth-ssl - async - pr - - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set - commands: - - func: run server - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: latest - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: latest - PYTHON_VERSION: pypy3.10 - TEST_NAME: default_async - tags: - - test-standard - - server-latest - - python-pypy3.10 - - replica_set-noauth-ssl - - async - - pypy - - name: test-standard-latest-python3.12-async-auth-ssl-sharded-cluster + - name: test-standard-latest-python3.13-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3932,16 +3767,16 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.12 + - python-3.13 - sharded_cluster-auth-ssl - async - pr - - name: test-standard-latest-python3.14-async-noauth-nossl-standalone + - name: test-standard-latest-python3.11-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3955,16 +3790,16 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.14 + - python-3.11 - standalone-noauth-nossl - async - pr - - name: test-standard-latest-python3.9-async-noauth-nossl-standalone + - name: test-standard-latest-pypy3.10-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3978,38 +3813,16 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - test-standard - server-latest - - python-3.9 + - python-pypy3.10 - standalone-noauth-nossl - async - - pr - - name: test-standard-rapid-python3.11-sync-noauth-ssl-replica-set - commands: - - func: run server - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - - func: run tests - vars: - AUTH: noauth - SSL: ssl - TOPOLOGY: replica_set - VERSION: rapid - PYTHON_VERSION: "3.11" - TEST_NAME: default_sync - tags: - - test-standard - - server-rapid - - python-3.11 - - replica_set-noauth-ssl - - sync - - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set + - pypy + - name: test-standard-rapid-python3.12-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -4023,16 +3836,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-pypy3.10 + - python-3.12 - replica_set-noauth-ssl - sync - - pypy - - name: test-standard-rapid-python3.12-sync-auth-ssl-sharded-cluster + - name: test-standard-rapid-python3.13-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4046,15 +3858,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-3.12 + - python-3.13 - sharded_cluster-auth-ssl - sync - - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone + - name: test-standard-rapid-python3.11-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -4068,15 +3880,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-3.14 + - python-3.11 - standalone-noauth-nossl - sync - - name: test-standard-rapid-python3.9-sync-noauth-nossl-standalone + - name: test-standard-rapid-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -4090,17 +3902,18 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.9" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-3.9 + - python-pypy3.10 - standalone-noauth-nossl - sync + - pypy # Test non standard tests - - name: test-non-standard-v4.2-python3.9-noauth-ssl-replica-set + - name: test-non-standard-v4.2-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4114,14 +3927,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-4.2 - - python-3.9 + - python-3.11 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v4.2-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-v4.2-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4135,11 +3948,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-4.2 - - python-3.11 + - python-3.12 - sharded_cluster-auth-ssl - auth - name: test-non-standard-v4.2-python3.10-noauth-nossl-standalone @@ -4185,7 +3998,7 @@ tasks: - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v4.4-python3.13-noauth-ssl-replica-set + - name: test-non-standard-v4.4-python3.14-noauth-ssl-replica-set commands: - func: run server vars: @@ -4199,11 +4012,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-4.4 - - python-3.13 + - python-3.14 - replica_set-noauth-ssl - noauth - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set @@ -4228,7 +4041,7 @@ tasks: - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v4.4-python3.14-auth-ssl-sharded-cluster + - name: test-non-standard-v4.4-python3.10-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4242,14 +4055,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-4.4 - - python-3.14 + - python-3.10 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v4.4-python3.12-noauth-nossl-standalone + - name: test-non-standard-v4.4-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -4263,14 +4076,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-4.4 - - python-3.12 + - python-3.13 - standalone-noauth-nossl - noauth - - name: test-non-standard-v5.0-python3.9-noauth-ssl-replica-set + - name: test-non-standard-v5.0-python3.12-noauth-ssl-replica-set commands: - func: run server vars: @@ -4284,14 +4097,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-5.0 - - python-3.9 + - python-3.12 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v5.0-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-python3.13-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4305,11 +4118,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-5.0 - - python-3.11 + - python-3.13 - sharded_cluster-auth-ssl - auth - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster @@ -4334,7 +4147,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone + - name: test-non-standard-v5.0-python3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4348,14 +4161,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-5.0 - - python-3.10 + - python-3.11 - standalone-noauth-nossl - noauth - - name: test-non-standard-v6.0-python3.13-noauth-ssl-replica-set + - name: test-non-standard-v6.0-python3.10-noauth-ssl-replica-set commands: - func: run server vars: @@ -4369,14 +4182,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-6.0 - - python-3.13 + - python-3.10 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v6.0-python3.14-auth-ssl-sharded-cluster + - name: test-non-standard-v6.0-python3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4390,14 +4203,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-6.0 - - python-3.14 + - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v6.0-python3.12-noauth-nossl-standalone + - name: test-non-standard-v6.0-python3.14-noauth-nossl-standalone commands: - func: run server vars: @@ -4411,11 +4224,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-6.0 - - python-3.12 + - python-3.14 - standalone-noauth-nossl - noauth - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone @@ -4440,7 +4253,7 @@ tasks: - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v7.0-python3.9-noauth-ssl-replica-set + - name: test-non-standard-v7.0-python3.13-noauth-ssl-replica-set commands: - func: run server vars: @@ -4454,11 +4267,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-7.0 - - python-3.9 + - python-3.13 - replica_set-noauth-ssl - noauth - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set @@ -4483,7 +4296,7 @@ tasks: - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v7.0-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-v7.0-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4497,14 +4310,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-7.0 - - python-3.11 + - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v7.0-python3.10-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.12-noauth-nossl-standalone commands: - func: run server vars: @@ -4518,14 +4331,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-7.0 - - python-3.10 + - python-3.12 - standalone-noauth-nossl - noauth - - name: test-non-standard-v8.0-python3.13-noauth-ssl-replica-set + - name: test-non-standard-v8.0-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4539,14 +4352,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-8.0 - - python-3.13 + - python-3.11 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v8.0-python3.14-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4560,11 +4373,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-8.0 - - python-3.14 + - python-3.12 - sharded_cluster-auth-ssl - auth - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster @@ -4589,7 +4402,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v8.0-python3.12-noauth-nossl-standalone + - name: test-non-standard-v8.0-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4603,14 +4416,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-8.0 - - python-3.12 + - python-3.10 - standalone-noauth-nossl - noauth - - name: test-non-standard-latest-python3.13-noauth-ssl-replica-set + - name: test-non-standard-latest-python3.12-noauth-ssl-replica-set commands: - func: run server vars: @@ -4624,11 +4437,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-latest - - python-3.13 + - python-3.12 - replica_set-noauth-ssl - noauth - pr @@ -4654,7 +4467,7 @@ tasks: - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster + - name: test-non-standard-latest-python3.13-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4668,15 +4481,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-latest - - python-3.14 + - python-3.13 - sharded_cluster-auth-ssl - auth - pr - - name: test-non-standard-latest-python3.12-noauth-nossl-standalone + - name: test-non-standard-latest-python3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4690,15 +4503,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-latest - - python-3.12 + - python-3.11 - standalone-noauth-nossl - noauth - pr - - name: test-non-standard-rapid-python3.9-noauth-ssl-replica-set + - name: test-non-standard-rapid-python3.14-noauth-ssl-replica-set commands: - func: run server vars: @@ -4712,14 +4525,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.9" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-rapid - - python-3.9 + - python-3.14 - replica_set-noauth-ssl - noauth - - name: test-non-standard-rapid-python3.11-auth-ssl-sharded-cluster + - name: test-non-standard-rapid-python3.10-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4733,14 +4546,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-rapid - - python-3.11 + - python-3.10 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -4754,11 +4567,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-rapid - - python-3.10 + - python-3.13 - standalone-noauth-nossl - noauth - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index aee4ed3bee..a76753ebe9 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -22,7 +22,7 @@ ############## ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] -CPYTHONS = ["3.10", "3.9", "3.11", "3.12", "3.13", "3.14"] +CPYTHONS = ["3.10", "3.11", "3.12", "3.13", "3.14"] PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 354d18dbf7..f9f36cc6cc 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -6,29 +6,29 @@ find_python3() { PYTHON="" # Find a suitable toolchain version, if available. if [ "$(uname -s)" = "Darwin" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/3.9/bin/python3" + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - PYTHON="C:/python/Python39/python.exe" + PYTHON="C:/python/Python310/python.exe" else - # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.9+. - if [ -f "/opt/python/3.9/bin/python3" ]; then + # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.10+. + if [ -f "/opt/python/3.10/bin/python3" ]; then PYTHON="/opt/python/Current/bin/python3" - elif is_python_39 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v5/bin/python3" - elif is_python_39 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v4/bin/python3" - elif is_python_39 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then PYTHON="/opt/mongodbtoolchain/v3/bin/python3" fi fi - # Add a fallback system python3 if it is available and Python 3.9+. + # Add a fallback system python3 if it is available and Python 3.10+. if [ -z "$PYTHON" ]; then - if is_python_39 "$(command -v python3)"; then + if is_python_310 "$(command -v python3)"; then PYTHON="$(command -v python3)" fi fi if [ -z "$PYTHON" ]; then - echo "Cannot test without python3.9+ installed!" + echo "Cannot test without python3.10+ installed!" exit 1 fi echo "$PYTHON" @@ -99,15 +99,15 @@ testinstall () { fi } -# Function that returns success if the provided Python binary is version 3.9 or later +# Function that returns success if the provided Python binary is version 3.10 or later # Usage: -# is_python_39 /path/to/python +# is_python_310 /path/to/python # * param1: Python binary -is_python_39() { +is_python_310() { if [ -z "$1" ]; then return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 9))"; then - # runs when sys.version_info[:2] >= (3, 9) + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 10))"; then + # runs when sys.version_info[:2] >= (3, 10) return 0 else return 1 @@ -131,7 +131,7 @@ get_python_binary() { else PYTHON="/opt/python/$version/bin/python3" fi - if is_python_39 "$(command -v $PYTHON)"; then + if is_python_310 "$(command -v $PYTHON)"; then echo "$PYTHON" else echo "Could not find suitable python binary for '$version'" >&2 diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 0110d8df41..acbfc6cfe5 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -70,34 +70,26 @@ jobs: platforms: all - name: Install cibuildwheel - # Note: the default manylinux is manylinux2014 + # Note: the default manylinux is manylinux_2_28 run: | python -m pip install -U pip - python -m pip install "cibuildwheel>=2.20,<3" + python -m pip install "cibuildwheel>=3.2.0,<4" - name: Build wheels env: CIBW_BUILD: ${{ matrix.buildplat[2] }} run: python -m cibuildwheel --output-dir wheelhouse - - name: Build manylinux1 wheels - if: ${{ matrix.buildplat[1] == 'manylinux_x86_64' || matrix.buildplat[1] == 'manylinux_i686' }} - env: - CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 - CIBW_MANYLINUX_I686_IMAGE: manylinux1 - CIBW_BUILD: "cp39-${{ matrix.buildplat[1] }} cp39-${{ matrix.buildplat[1] }}" - run: python -m cibuildwheel --output-dir wheelhouse - - name: Assert all versions in wheelhouse if: ${{ ! startsWith(matrix.buildplat[1], 'macos') }} run: | - ls wheelhouse/*cp39*.whl ls wheelhouse/*cp310*.whl ls wheelhouse/*cp311*.whl ls wheelhouse/*cp312*.whl ls wheelhouse/*cp313*.whl + ls wheelhouse/*cp314*.whl # Free-threading builds: - ls wheelhouse/*cp313t*.whl + ls wheelhouse/*cp314t*.whl - uses: actions/upload-artifact@v4 with: @@ -118,7 +110,7 @@ jobs: - uses: actions/setup-python@v6 with: # Build sdist on lowest supported Python - python-version: '3.9' + python-version: "3.10" - name: Build SDist run: | diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 336ada8d70..40a497480c 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -26,7 +26,7 @@ jobs: uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true - python-version: "3.9" + python-version: "3.10" - name: Install just run: uv tool install rust-just - name: Install Python dependencies @@ -56,7 +56,7 @@ jobs: matrix: # Tests currently only pass on ubuntu on GitHub Actions. os: [ubuntu-latest] - python-version: ["3.9", "pypy-3.10", "3.13t"] + python-version: ["3.10", "pypy-3.10", "3.13t"] mongodb-version: ["8.0"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} @@ -87,7 +87,7 @@ jobs: uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true - python-version: "3.9" + python-version: "3.10" - name: Install just run: uv tool install rust-just - id: setup-mongodb @@ -112,7 +112,7 @@ jobs: uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true - python-version: "3.9" + python-version: "3.10" - name: Install just run: uv tool install rust-just - name: Install dependencies @@ -131,7 +131,7 @@ jobs: uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: enable-cache: true - python-version: "3.9" + python-version: "3.10" - name: Install just run: uv tool install rust-just - name: Install dependencies @@ -144,7 +144,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ["3.9", "3.11"] + python: ["3.10", "3.11"] steps: - uses: actions/checkout@v5 with: @@ -175,7 +175,7 @@ jobs: cache: 'pip' cache-dependency-path: 'pyproject.toml' # Build sdist on lowest supported Python - python-version: '3.9' + python-version: "3.10" - name: Build SDist shell: bash run: | @@ -209,7 +209,7 @@ jobs: cache: 'pip' cache-dependency-path: 'sdist/test/pyproject.toml' # Test sdist on lowest supported Python - python-version: '3.9' + python-version: "3.10" - id: setup-mongodb uses: mongodb-labs/drivers-evergreen-tools@master - name: Run connect test from sdist @@ -233,7 +233,7 @@ jobs: - name: Install uv uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 with: - python-version: '3.9' + python-version: "3.10" - id: setup-mongodb uses: mongodb-labs/drivers-evergreen-tools@master with: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a0f22044f6..c4f7b55817 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,7 +16,7 @@ be of interest or that has already been addressed. ## Supported Interpreters -PyMongo supports CPython 3.9+ and PyPy3.10+. Language features not +PyMongo supports CPython 3.10+ and PyPy3.10+. Language features not supported by all interpreters can not be used. ## Style Guide diff --git a/README.md b/README.md index 695f00be0a..ba1688cb70 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ package that is incompatible with PyMongo. ## Dependencies -PyMongo supports CPython 3.9+ and PyPy3.10+. +PyMongo supports CPython 3.10+ and PyPy3.10+. Required dependencies: diff --git a/doc/changelog.rst b/doc/changelog.rst index a1cea177b9..b96b3082f8 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,6 +6,9 @@ Changes in Version 4.16.0 (XXXX/XX/XX) PyMongo 4.16 brings a number of changes including: +.. warning:: PyMongo 4.16 drops support for Python 3.9: Python 3.10+ is now required. + +- Dropped support for Python 3.9. - Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as doing so may leak sensitive user data. Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py index e512f796a8..69a2200d3b 100644 --- a/gridfs/asynchronous/grid_file.py +++ b/gridfs/asynchronous/grid_file.py @@ -46,7 +46,6 @@ from pymongo.asynchronous.collection import AsyncCollection from pymongo.asynchronous.cursor import AsyncCursor from pymongo.asynchronous.database import AsyncDatabase -from pymongo.asynchronous.helpers import anext from pymongo.common import validate_string from pymongo.errors import ( BulkWriteError, diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py index 70a4f80774..7364aedda3 100644 --- a/gridfs/synchronous/grid_file.py +++ b/gridfs/synchronous/grid_file.py @@ -57,7 +57,6 @@ from pymongo.synchronous.collection import Collection from pymongo.synchronous.cursor import Cursor from pymongo.synchronous.database import Database -from pymongo.synchronous.helpers import next _IS_SYNC = True diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index df060a4fa9..cf3a5372b4 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -37,7 +37,6 @@ from bson.code import Code from bson.son import SON from pymongo import _csot, helpers_shared -from pymongo.asynchronous.helpers import anext from pymongo.collation import validate_collation_or_none from pymongo.common import ( validate_is_document_type, diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py index ccda16e28b..4a8c918133 100644 --- a/pymongo/asynchronous/helpers.py +++ b/pymongo/asynchronous/helpers.py @@ -16,9 +16,7 @@ from __future__ import annotations import asyncio -import builtins import socket -import sys from typing import ( Any, Callable, @@ -86,17 +84,3 @@ async def _getaddrinfo( return await loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] else: return socket.getaddrinfo(host, port, **kwargs) - - -if sys.version_info >= (3, 10): - anext = builtins.anext - aiter = builtins.aiter -else: - - async def anext(cls: Any) -> Any: - """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" - return await cls.__anext__() - - def aiter(cls: Any) -> Any: - """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#anext.""" - return cls.__aiter__() diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 2cecc5b38a..12e2863bc6 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -55,7 +55,6 @@ _RawBatchQuery, ) from pymongo.response import PinnedResponse -from pymongo.synchronous.helpers import next from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType from pymongo.write_concern import validate_boolean diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py index 1fff9a0f23..c1b75a3c95 100644 --- a/pymongo/synchronous/helpers.py +++ b/pymongo/synchronous/helpers.py @@ -16,9 +16,7 @@ from __future__ import annotations import asyncio -import builtins import socket -import sys from typing import ( Any, Callable, @@ -86,17 +84,3 @@ def _getaddrinfo( return loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] else: return socket.getaddrinfo(host, port, **kwargs) - - -if sys.version_info >= (3, 10): - next = builtins.next - iter = builtins.iter -else: - - def next(cls: Any) -> Any: - """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#next.""" - return cls.__next__() - - def iter(cls: Any) -> Any: - """Compatibility function until we drop 3.9 support: https://docs.python.org/3/library/functions.html#next.""" - return cls.__iter__() diff --git a/pyproject.toml b/pyproject.toml index 227865bc30..0cf161baab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Topic :: Database", "Typing :: Typed", ] @@ -260,8 +261,6 @@ partial_branches = ["if (.*and +)*not _use_c( and.*)*:"] directory = "htmlcov" [tool.cibuildwheel] -# Enable free-threaded support -free-threaded-support = true skip = "pp* *-musllinux*" build-frontend = "build" test-command = "python {project}/tools/fail_if_no_c.py" diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py index 1be45bee3e..3fb8b517f3 100644 --- a/test/asynchronous/test_change_stream.py +++ b/test/asynchronous/test_change_stream.py @@ -48,7 +48,6 @@ from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import AsyncMongoClient from pymongo.asynchronous.command_cursor import AsyncCommandCursor -from pymongo.asynchronous.helpers import anext from pymongo.errors import ( InvalidOperation, OperationFailure, diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py index f375874916..6794605339 100644 --- a/test/asynchronous/test_client.py +++ b/test/asynchronous/test_client.py @@ -92,7 +92,6 @@ from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor, CursorType from pymongo.asynchronous.database import AsyncDatabase -from pymongo.asynchronous.helpers import anext from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.asynchronous.pool import ( AsyncConnection, diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py index 05e548c79e..da810a2a9f 100644 --- a/test/asynchronous/test_collation.py +++ b/test/asynchronous/test_collation.py @@ -21,7 +21,6 @@ from test.utils_shared import EventListener, OvertCommandListener from typing import Any -from pymongo.asynchronous.helpers import anext from pymongo.collation import ( Collation, CollationAlternate, diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py index 379ec9e8c8..498563fe83 100644 --- a/test/asynchronous/test_collection.py +++ b/test/asynchronous/test_collection.py @@ -51,7 +51,6 @@ from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT from pymongo.asynchronous.collection import AsyncCollection, ReturnDocument from pymongo.asynchronous.command_cursor import AsyncCommandCursor -from pymongo.asynchronous.helpers import anext from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.bulk_shared import BulkWriteError from pymongo.cursor_shared import CursorType diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py index 08da82762c..906f78cc97 100644 --- a/test/asynchronous/test_cursor.py +++ b/test/asynchronous/test_cursor.py @@ -46,7 +46,6 @@ from bson.raw_bson import RawBSONDocument from pymongo import ASCENDING, DESCENDING from pymongo.asynchronous.cursor import AsyncCursor, CursorType -from pymongo.asynchronous.helpers import anext from pymongo.collation import Collation from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError from pymongo.operations import _IndexList diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py index 82c54512cc..f8fa51ba76 100644 --- a/test/asynchronous/test_custom_types.py +++ b/test/asynchronous/test_custom_types.py @@ -53,7 +53,6 @@ from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument from pymongo.asynchronous.collection import ReturnDocument -from pymongo.asynchronous.helpers import anext from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py index 3b77330c0e..b49183a852 100644 --- a/test/asynchronous/test_database.py +++ b/test/asynchronous/test_database.py @@ -42,7 +42,6 @@ from pymongo.asynchronous import auth from pymongo.asynchronous.collection import AsyncCollection from pymongo.asynchronous.database import AsyncDatabase -from pymongo.asynchronous.helpers import anext from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.errors import ( CollectionInvalid, diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py index 5d9cf433ba..74c0136ad0 100644 --- a/test/asynchronous/test_encryption.py +++ b/test/asynchronous/test_encryption.py @@ -40,7 +40,6 @@ import pytest from pymongo.asynchronous.collection import AsyncCollection -from pymongo.asynchronous.helpers import anext from pymongo.daemon import _spawn_daemon from pymongo.uri_parser_shared import _parse_kms_tls_options diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py index dd27623654..21770f490c 100644 --- a/test/asynchronous/test_examples.py +++ b/test/asynchronous/test_examples.py @@ -29,7 +29,6 @@ from test.utils_shared import async_wait_until import pymongo -from pymongo.asynchronous.helpers import anext from pymongo.errors import ConnectionFailure, OperationFailure from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py index f3ca596142..2a7e9e1f9d 100644 --- a/test/asynchronous/test_grid_file.py +++ b/test/asynchronous/test_grid_file.py @@ -47,7 +47,6 @@ ) from gridfs.errors import NoFile from pymongo import AsyncMongoClient -from pymongo.asynchronous.helpers import aiter, anext from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress diff --git a/test/asynchronous/test_load_balancer.py b/test/asynchronous/test_load_balancer.py index db7ff9183f..17d85841f9 100644 --- a/test/asynchronous/test_load_balancer.py +++ b/test/asynchronous/test_load_balancer.py @@ -36,8 +36,6 @@ create_async_event, ) -from pymongo.asynchronous.helpers import anext - _IS_SYNC = False pytestmark = pytest.mark.load_balancer diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py index 9b2a3691eb..6a9a5b8da7 100644 --- a/test/asynchronous/test_monitoring.py +++ b/test/asynchronous/test_monitoring.py @@ -40,7 +40,6 @@ from bson.son import SON from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring from pymongo.asynchronous.command_cursor import AsyncCommandCursor -from pymongo.asynchronous.helpers import anext from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.write_concern import WriteConcern diff --git a/test/asynchronous/test_read_preferences.py b/test/asynchronous/test_read_preferences.py index 72dd809db0..d18887da40 100644 --- a/test/asynchronous/test_read_preferences.py +++ b/test/asynchronous/test_read_preferences.py @@ -42,7 +42,6 @@ from test.version import Version from bson.son import SON -from pymongo.asynchronous.helpers import anext from pymongo.asynchronous.mongo_client import AsyncMongoClient from pymongo.errors import ConfigurationError, OperationFailure from pymongo.message import _maybe_add_read_preference diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py index 19ce868c56..ff0feebafc 100644 --- a/test/asynchronous/test_session.py +++ b/test/asynchronous/test_session.py @@ -48,7 +48,6 @@ from pymongo import ASCENDING, AsyncMongoClient, _csot, monitoring from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor -from pymongo.asynchronous.helpers import anext from pymongo.common import _MAX_END_SESSIONS from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure from pymongo.operations import IndexModel, InsertOne, UpdateOne diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 5c2a4f6fae..478710362e 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -39,7 +39,6 @@ from pymongo.asynchronous.client_session import TransactionOptions from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.cursor import AsyncCursor -from pymongo.asynchronous.helpers import anext from pymongo.errors import ( AutoReconnect, CollectionInvalid, diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py index 64659a34d4..0c9e8c10c8 100644 --- a/test/asynchronous/unified_format.py +++ b/test/asynchronous/unified_format.py @@ -77,7 +77,6 @@ from pymongo.asynchronous.command_cursor import AsyncCommandCursor from pymongo.asynchronous.database import AsyncDatabase from pymongo.asynchronous.encryption import AsyncClientEncryption -from pymongo.asynchronous.helpers import anext from pymongo.driver_info import DriverInfo from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 59cad8925b..ad51f91873 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -55,7 +55,6 @@ from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern from pymongo.synchronous.command_cursor import CommandCursor -from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern _IS_SYNC = True diff --git a/test/test_client.py b/test/test_client.py index 73ed3ac3d4..9d201c663b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -114,7 +114,6 @@ from pymongo.synchronous.command_cursor import CommandCursor from pymongo.synchronous.cursor import Cursor, CursorType from pymongo.synchronous.database import Database -from pymongo.synchronous.helpers import next from pymongo.synchronous.mongo_client import MongoClient from pymongo.synchronous.pool import ( Connection, diff --git a/test/test_collation.py b/test/test_collation.py index 5425551dc6..903f24a228 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -37,7 +37,6 @@ UpdateMany, UpdateOne, ) -from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern _IS_SYNC = True diff --git a/test/test_collection.py b/test/test_collection.py index 1bd3a80c5f..18be309f22 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -74,7 +74,6 @@ ) from pymongo.synchronous.collection import Collection, ReturnDocument from pymongo.synchronous.command_cursor import CommandCursor -from pymongo.synchronous.helpers import next from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern diff --git a/test/test_cursor.py b/test/test_cursor.py index b63638bfab..219ca396c9 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -51,7 +51,6 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.synchronous.cursor import Cursor, CursorType -from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern _IS_SYNC = True diff --git a/test/test_custom_types.py b/test/test_custom_types.py index aba6b55119..02f3127165 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -55,7 +55,6 @@ from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress from pymongo.synchronous.collection import ReturnDocument -from pymongo.synchronous.helpers import next _IS_SYNC = True diff --git a/test/test_database.py b/test/test_database.py index c50e09b6e1..ebbf6e55c6 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -51,7 +51,6 @@ from pymongo.synchronous import auth from pymongo.synchronous.collection import Collection from pymongo.synchronous.database import Database -from pymongo.synchronous.helpers import next from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern diff --git a/test/test_encryption.py b/test/test_encryption.py index 50d617dc43..04e61b7bad 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -41,7 +41,6 @@ from pymongo.daemon import _spawn_daemon from pymongo.synchronous.collection import Collection -from pymongo.synchronous.helpers import next from pymongo.uri_parser_shared import _parse_kms_tls_options try: diff --git a/test/test_examples.py b/test/test_examples.py index 13f0c94c56..266e32e8d4 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -33,7 +33,6 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.server_api import ServerApi -from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern _IS_SYNC = True diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 6fe209f438..c7ccda44a4 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -49,7 +49,6 @@ from pymongo import MongoClient from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress -from pymongo.synchronous.helpers import iter, next _IS_SYNC = True diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 364a323627..472ef51da3 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -36,8 +36,6 @@ wait_until, ) -from pymongo.synchronous.helpers import next - _IS_SYNC = True pytestmark = pytest.mark.load_balancer diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 7cb93adf81..f5a18af9ed 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -42,7 +42,6 @@ from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference from pymongo.synchronous.command_cursor import CommandCursor -from pymongo.synchronous.helpers import next from pymongo.write_concern import WriteConcern _IS_SYNC = True diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index afde01723d..084abdf3e1 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -56,7 +56,6 @@ from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection, readable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.synchronous.helpers import next from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern diff --git a/test/test_session.py b/test/test_session.py index 40d0a53afb..9aa56a711e 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -52,7 +52,6 @@ from pymongo.read_concern import ReadConcern from pymongo.synchronous.command_cursor import CommandCursor from pymongo.synchronous.cursor import Cursor -from pymongo.synchronous.helpers import next _IS_SYNC = True diff --git a/test/test_transactions.py b/test/test_transactions.py index f4578deddb..813d6a688d 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -50,7 +50,6 @@ from pymongo.synchronous.client_session import TransactionOptions from pymongo.synchronous.command_cursor import CommandCursor from pymongo.synchronous.cursor import Cursor -from pymongo.synchronous.helpers import next _IS_SYNC = True diff --git a/test/unified_format.py b/test/unified_format.py index 580aed552b..0c5f68edd3 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -101,7 +101,6 @@ from pymongo.synchronous.command_cursor import CommandCursor from pymongo.synchronous.database import Database from pymongo.synchronous.encryption import ClientEncryption -from pymongo.synchronous.helpers import next from pymongo.topology_description import TopologyDescription from pymongo.typings import _Address from pymongo.write_concern import WriteConcern From 4839e523c823fc8de6b95449ffde63742aa3d5b7 Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Mon, 29 Sep 2025 10:29:08 -0700 Subject: [PATCH 2083/2111] PYTHON-5569: [Build Failure] Spec Resync job is failing silently (#2553) --- .evergreen/remove-unimplemented-tests.sh | 2 +- .evergreen/scripts/resync-all-specs.py | 20 +- .evergreen/scripts/resync-all-specs.sh | 1 + .evergreen/spec-patch/PYTHON-3712.patch | 10 +- .evergreen/spec-patch/PYTHON-5052.patch | 440 +++++++++++++ .evergreen/spec-patch/PYTHON-5493.patch | 87 +-- .evergreen/spec-patch/PYTHON-5529.patch | 587 ++++++++++++++++++ ...csfle-minLibmongocryptVersion-pattern.json | 17 + ...nt-csfle-minLibmongocryptVersion-type.json | 17 + 9 files changed, 1099 insertions(+), 82 deletions(-) create mode 100644 .evergreen/spec-patch/PYTHON-5052.patch create mode 100644 .evergreen/spec-patch/PYTHON-5529.patch create mode 100644 test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json create mode 100644 test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh index e5e1d02192..88ef137f86 100755 --- a/.evergreen/remove-unimplemented-tests.sh +++ b/.evergreen/remove-unimplemented-tests.sh @@ -41,7 +41,7 @@ rm $PYMONGO/test/index_management/index-rawdata.json rm $PYMONGO/test/collection_management/modifyCollection-*.json # PYTHON-5248 - Remove support for MongoDB 4.0 -rm $PYMONGO/test/**/pre-42-*.json +find /$PYMONGO /test -type f -name 'pre-42-*.json' -delete # PYTHON-3359 - Remove Database and Collection level timeout override rm $PYMONGO/test/csot/override-collection-timeoutMS.json diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py index dc02545824..8e58e56da2 100644 --- a/.evergreen/scripts/resync-all-specs.py +++ b/.evergreen/scripts/resync-all-specs.py @@ -30,14 +30,18 @@ def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: print("Done syncing specs") -def apply_patches(): +def apply_patches(errored): print("Beginning to apply patches") subprocess.run(["bash", "./.evergreen/remove-unimplemented-tests.sh"], check=True) # noqa: S603, S607 - subprocess.run( - ["git apply -R --allow-empty --whitespace=fix ./.evergreen/spec-patch/*"], # noqa: S607 - shell=True, # noqa: S602 - check=True, - ) + try: + subprocess.run( + ["git apply -R --allow-empty --whitespace=fix ./.evergreen/spec-patch/*"], # noqa: S607 + shell=True, # noqa: S602 + check=True, + stderr=subprocess.PIPE, + ) + except CalledProcessError as exc: + errored["applying patches"] = exc.stderr def check_new_spec_directories(directory: pathlib.Path) -> list[str]: @@ -85,7 +89,7 @@ def write_summary(errored: dict[str, str], new: list[str], filename: Optional[st pr_body += "\n -".join(succeeded) pr_body += "\n" if len(errored) > 0: - pr_body += "\n\nThe following spec syncs encountered errors:\n -" + pr_body += "\n\nThe following spec syncs encountered errors:" for k, v in errored.items(): pr_body += f"\n -{k}\n```{v}\n```" pr_body += "\n" @@ -106,7 +110,7 @@ def main(args: Namespace): directory = pathlib.Path("./test") errored: dict[str, str] = {} resync_specs(directory, errored) - apply_patches() + apply_patches(errored) new = check_new_spec_directories(directory) write_summary(errored, new, args.filename) diff --git a/.evergreen/scripts/resync-all-specs.sh b/.evergreen/scripts/resync-all-specs.sh index 4bcf2cd23b..41e4a2bc73 100755 --- a/.evergreen/scripts/resync-all-specs.sh +++ b/.evergreen/scripts/resync-all-specs.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash # Run spec syncing script and create PR +set -eu # SETUP SRC_URL="https://github.com/mongodb/specifications.git" diff --git a/.evergreen/spec-patch/PYTHON-3712.patch b/.evergreen/spec-patch/PYTHON-3712.patch index c746455cd9..b48c05124c 100644 --- a/.evergreen/spec-patch/PYTHON-3712.patch +++ b/.evergreen/spec-patch/PYTHON-3712.patch @@ -1,14 +1,14 @@ diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json -index 4b492f7d8..e44fad1bc 100644 +index e44fad1b..4b492f7d 100644 --- a/test/discovery_and_monitoring/unified/serverMonitoringMode.json +++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json -@@ -5,8 +5,7 @@ +@@ -5,7 +5,8 @@ { "topologies": [ "single", -+ "sharded" -- "sharded", -- "sharded-replicaset" +- "sharded" ++ "sharded", ++ "sharded-replicaset" ], "serverless": "forbid" } diff --git a/.evergreen/spec-patch/PYTHON-5052.patch b/.evergreen/spec-patch/PYTHON-5052.patch new file mode 100644 index 0000000000..01cbc00116 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5052.patch @@ -0,0 +1,440 @@ +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json +new file mode 100644 +index 00000000..aa8046d2 +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "entity-client-observeTracingMessages-additionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": { ++ "foo": "bar" ++ } ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages must not have additional properties'", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json +new file mode 100644 +index 00000000..0b3a65f5 +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "entity-client-observeTracingMessages-additionalPropertyType", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": { ++ "enableCommandPayload": 0 ++ } ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages enableCommandPayload must be boolean", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json +new file mode 100644 +index 00000000..de3ef39a +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json +@@ -0,0 +1,18 @@ ++{ ++ "description": "entity-client-observeTracingMessages-type", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": "foo" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages must be an object", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json b/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json +new file mode 100644 +index 00000000..5947a286 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json +@@ -0,0 +1,30 @@ ++{ ++ "description": "expectedTracingSpans-additionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "additional property foo not allowed in expectTracingMessages", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "ignoreExtraSpans": false, ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ], ++ "foo": 0 ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-clientType.json b/test/unified-test-format/invalid/expectedTracingSpans-clientType.json +new file mode 100644 +index 00000000..2fe7faea +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-clientType.json +@@ -0,0 +1,28 @@ ++{ ++ "description": "expectedTracingSpans-clientType", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "client type must be string", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": 0, ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json b/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json +new file mode 100644 +index 00000000..8a98d5ba +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json +@@ -0,0 +1,29 @@ ++{ ++ "description": "expectedTracingSpans-emptyNestedSpan", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested spans must not have fewer than 1 items'", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ }, ++ "nested": [] ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json b/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json +new file mode 100644 +index 00000000..79a86744 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json +@@ -0,0 +1,31 @@ ++{ ++ "description": "expectedTracingSpans-invalidNestedSpan", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested span must have required property name", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ }, ++ "nested": [ ++ {} ++ ] ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json +new file mode 100644 +index 00000000..2fb1cd5b +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-missingPropertyClient", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required property client", ++ "operations": [], ++ "expectTracingMessages": { ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json +new file mode 100644 +index 00000000..acd10307 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "expectedTracingSpans-missingPropertySpans", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required property spans", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0" ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json +new file mode 100644 +index 00000000..17299f86 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json +@@ -0,0 +1,28 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedAdditionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "Span must not have additional properties", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": {}, ++ "nested": [], ++ "foo": "bar" ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json +new file mode 100644 +index 00000000..0257cd9b +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedMissingName", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required span name", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json +new file mode 100644 +index 00000000..a09ca31c +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json +@@ -0,0 +1,25 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedMissingTags", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required span tags", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo" ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json +new file mode 100644 +index 00000000..ccff0410 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedNestedMustBeArray", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested spans must be an array", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": {}, ++ "nested": {} ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json +new file mode 100644 +index 00000000..72af1c29 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json +@@ -0,0 +1,26 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedNestedMustBeObject", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "span tags must be an object", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": [] ++ } ++ ] ++ } ++ } ++ ] ++} diff --git a/.evergreen/spec-patch/PYTHON-5493.patch b/.evergreen/spec-patch/PYTHON-5493.patch index cf1afbb271..99c105dcef 100644 --- a/.evergreen/spec-patch/PYTHON-5493.patch +++ b/.evergreen/spec-patch/PYTHON-5493.patch @@ -1,60 +1,35 @@ diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json -index d40cfbb7e..5799e834d 100644 +index 5799e834..72103b3c 100644 --- a/test/connection_logging/connection-logging.json +++ b/test/connection_logging/connection-logging.json -@@ -272,7 +272,13 @@ - "level": "debug", - "component": "connection", - "data": { -- "message": "Connection pool closed", -+ "message": "Connection closed", -+ "driverConnectionId": { +@@ -446,6 +446,22 @@ + } + } + }, ++ { ++ "level": "debug", ++ "component": "connection", ++ "data": { ++ "message": "Connection pool cleared", ++ "serverHost": { ++ "$$type": "string" ++ }, ++ "serverPort": { + "$$type": [ + "int", + "long" + ] -+ }, - "serverHost": { - "$$type": "string" - }, -@@ -281,20 +287,15 @@ - "int", - "long" - ] -- } -+ }, -+ "reason": "Connection pool was closed" - } - }, ++ } ++ } ++ }, { "level": "debug", "component": "connection", - "data": { -- "message": "Connection closed", -- "driverConnectionId": { -- "$$type": [ -- "int", -- "long" -- ] -- }, -+ "message": "Connection pool closed", - "serverHost": { - "$$type": "string" - }, -@@ -303,8 +304,7 @@ - "int", - "long" +@@ -498,22 +514,6 @@ ] -- }, -- "reason": "Connection pool was closed" -+ } - } - } - ] -@@ -446,22 +446,6 @@ } } - }, +- }, - { - "level": "debug", - "component": "connection", @@ -70,30 +45,6 @@ index d40cfbb7e..5799e834d 100644 - ] - } - } -- }, - { - "level": "debug", - "component": "connection", -@@ -514,6 +498,22 @@ - ] - } - } -+ }, -+ { -+ "level": "debug", -+ "component": "connection", -+ "data": { -+ "message": "Connection pool cleared", -+ "serverHost": { -+ "$$type": "string" -+ }, -+ "serverPort": { -+ "$$type": [ -+ "int", -+ "long" -+ ] -+ } -+ } } ] } diff --git a/.evergreen/spec-patch/PYTHON-5529.patch b/.evergreen/spec-patch/PYTHON-5529.patch new file mode 100644 index 0000000000..a97602e055 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5529.patch @@ -0,0 +1,587 @@ +diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json +index aa9c3eb2..212cd410 100644 +--- a/test/csot/command-execution.json ++++ b/test/csot/command-execution.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly during command execution", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", +@@ -69,8 +69,10 @@ + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, +- "heartbeatFrequencyMS": 500 ++ "heartbeatFrequencyMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +@@ -185,8 +187,10 @@ + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, +- "heartbeatFrequencyMS": 500 ++ "heartbeatFrequencyMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +@@ -316,8 +320,10 @@ + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, +- "heartbeatFrequencyMS": 100000 ++ "heartbeatFrequencyMS": 100000, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json +index 3868b302..f9d03429 100644 +--- a/test/csot/convenient-transactions.json ++++ b/test/csot/convenient-transactions.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for the withTransaction API", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -21,8 +21,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 500 ++ "timeoutMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json +index 4889e395..89be49f0 100644 +--- a/test/csot/error-transformations.json ++++ b/test/csot/error-transformations.json +@@ -1,6 +1,6 @@ + { + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", +@@ -26,8 +26,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json +index f1edbe68..9d8046d1 100644 +--- a/test/csot/global-timeoutMS.json ++++ b/test/csot/global-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS can be configured on a MongoClient", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -38,8 +38,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -217,8 +219,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -390,8 +394,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -569,8 +575,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -762,8 +770,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -941,8 +951,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1120,8 +1132,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1305,8 +1319,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1484,8 +1500,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1663,8 +1681,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1842,8 +1862,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2021,8 +2043,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2194,8 +2218,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2375,8 +2401,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2554,8 +2582,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2733,8 +2763,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2906,8 +2938,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3079,8 +3113,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3258,8 +3294,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3441,8 +3479,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3628,8 +3668,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3807,8 +3849,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3986,8 +4030,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4171,8 +4217,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4360,8 +4408,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4549,8 +4599,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4728,8 +4780,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4913,8 +4967,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5102,8 +5158,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5297,8 +5355,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5482,8 +5542,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5677,8 +5739,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json +index 291c6e72..58c59cb3 100644 +--- a/test/csot/non-tailable-cursors.json ++++ b/test/csot/non-tailable-cursors.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for non-tailable cursors", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4" +@@ -17,8 +17,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 200 ++ "timeoutMS": 200, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json +index 9daad260..5a0c9f36 100644 +--- a/test/csot/retryability-timeoutMS.json ++++ b/test/csot/retryability-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for retryable operations", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", +@@ -26,8 +26,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 100 ++ "timeoutMS": 100, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/runCursorCommand.json b/test/csot/runCursorCommand.json +index 36f774fb..e5182e33 100644 +--- a/test/csot/runCursorCommand.json ++++ b/test/csot/runCursorCommand.json +@@ -1,6 +1,6 @@ + { + "description": "runCursorCommand", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4" +@@ -16,6 +16,10 @@ + { + "client": { + "id": "commandClient", ++ "uriOptions": { ++ "minPoolSize": 1 ++ }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", +diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json +index 13ea91c7..dbf163e4 100644 +--- a/test/csot/sessions-inherit-timeoutMS.json ++++ b/test/csot/sessions-inherit-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "sessions inherit timeoutMS from their parent MongoClient", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -21,8 +21,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 500 ++ "timeoutMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json new file mode 100644 index 0000000000..1db023bf68 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-csfle-minLibmongocryptVersion-pattern", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "csfle": { + "minLibmongocryptVersion": "1.2.3.4" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json new file mode 100644 index 0000000000..8de7b293f1 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-csfle-minLibmongocryptVersion-type", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "csfle": { + "minLibmongocryptVersion": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} From 8d4518287c078e9a693761b5707eff654a20b631 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Sep 2025 11:29:57 -0700 Subject: [PATCH 2084/2111] Bump mypy from 1.18.1 to 1.18.2 (#2551) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester Co-authored-by: Iris Ho --- pyproject.toml | 2 +- uv.lock | 82 +++++++++++++++++++++++++------------------------- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0cf161baab..b06e6401a4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,7 +59,7 @@ mockupdb = [ ] perf = ["simplejson>=3.17.0"] typing = [ - "mypy==1.18.1", + "mypy==1.18.2", "pyright==1.1.405", "typing_extensions", "pip" diff --git a/uv.lock b/uv.lock index b574a4b2c0..82400618af 100644 --- a/uv.lock +++ b/uv.lock @@ -998,7 +998,7 @@ dependencies = [ [[package]] name = "mypy" -version = "1.18.1" +version = "1.18.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, @@ -1006,45 +1006,45 @@ dependencies = [ { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/14/a3/931e09fc02d7ba96da65266884da4e4a8806adcdb8a57faaacc6edf1d538/mypy-1.18.1.tar.gz", hash = "sha256:9e988c64ad3ac5987f43f5154f884747faf62141b7f842e87465b45299eea5a9", size = 3448447, upload-time = "2025-09-11T23:00:47.067Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/06/29ea5a34c23938ae93bc0040eb2900eb3f0f2ef4448cc59af37ab3ddae73/mypy-1.18.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2761b6ae22a2b7d8e8607fb9b81ae90bc2e95ec033fd18fa35e807af6c657763", size = 12811535, upload-time = "2025-09-11T22:58:55.399Z" }, - { url = "https://files.pythonhosted.org/packages/a8/40/04c38cb04fa9f1dc224b3e9634021a92c47b1569f1c87dfe6e63168883bb/mypy-1.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5b10e3ea7f2eec23b4929a3fabf84505da21034a4f4b9613cda81217e92b74f3", size = 11897559, upload-time = "2025-09-11T22:59:48.041Z" }, - { url = "https://files.pythonhosted.org/packages/46/bf/4c535bd45ea86cebbc1a3b6a781d442f53a4883f322ebd2d442db6444d0b/mypy-1.18.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:261fbfced030228bc0f724d5d92f9ae69f46373bdfd0e04a533852677a11dbea", size = 12507430, upload-time = "2025-09-11T22:59:30.415Z" }, - { url = "https://files.pythonhosted.org/packages/e2/e1/cbefb16f2be078d09e28e0b9844e981afb41f6ffc85beb68b86c6976e641/mypy-1.18.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4dc6b34a1c6875e6286e27d836a35c0d04e8316beac4482d42cfea7ed2527df8", size = 13243717, upload-time = "2025-09-11T22:59:11.297Z" }, - { url = "https://files.pythonhosted.org/packages/65/e8/3e963da63176f16ca9caea7fa48f1bc8766de317cd961528c0391565fd47/mypy-1.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1cabb353194d2942522546501c0ff75c4043bf3b63069cb43274491b44b773c9", size = 13492052, upload-time = "2025-09-11T23:00:09.29Z" }, - { url = "https://files.pythonhosted.org/packages/4b/09/d5d70c252a3b5b7530662d145437bd1de15f39fa0b48a27ee4e57d254aa1/mypy-1.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:738b171690c8e47c93569635ee8ec633d2cdb06062f510b853b5f233020569a9", size = 9765846, upload-time = "2025-09-11T22:58:26.198Z" }, - { url = "https://files.pythonhosted.org/packages/32/28/47709d5d9e7068b26c0d5189c8137c8783e81065ad1102b505214a08b548/mypy-1.18.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c903857b3e28fc5489e54042684a9509039ea0aedb2a619469438b544ae1961", size = 12734635, upload-time = "2025-09-11T23:00:24.983Z" }, - { url = "https://files.pythonhosted.org/packages/7c/12/ee5c243e52497d0e59316854041cf3b3130131b92266d0764aca4dec3c00/mypy-1.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a0c8392c19934c2b6c65566d3a6abdc6b51d5da7f5d04e43f0eb627d6eeee65", size = 11817287, upload-time = "2025-09-11T22:59:07.38Z" }, - { url = "https://files.pythonhosted.org/packages/48/bd/2aeb950151005fe708ab59725afed7c4aeeb96daf844f86a05d4b8ac34f8/mypy-1.18.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f85eb7efa2ec73ef63fc23b8af89c2fe5bf2a4ad985ed2d3ff28c1bb3c317c92", size = 12430464, upload-time = "2025-09-11T22:58:48.084Z" }, - { url = "https://files.pythonhosted.org/packages/71/e8/7a20407aafb488acb5734ad7fb5e8c2ef78d292ca2674335350fa8ebef67/mypy-1.18.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:82ace21edf7ba8af31c3308a61dc72df30500f4dbb26f99ac36b4b80809d7e94", size = 13164555, upload-time = "2025-09-11T23:00:13.803Z" }, - { url = "https://files.pythonhosted.org/packages/e8/c9/5f39065252e033b60f397096f538fb57c1d9fd70a7a490f314df20dd9d64/mypy-1.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a2dfd53dfe632f1ef5d161150a4b1f2d0786746ae02950eb3ac108964ee2975a", size = 13359222, upload-time = "2025-09-11T23:00:33.469Z" }, - { url = "https://files.pythonhosted.org/packages/85/b6/d54111ef3c1e55992cd2ec9b8b6ce9c72a407423e93132cae209f7e7ba60/mypy-1.18.1-cp311-cp311-win_amd64.whl", hash = "sha256:320f0ad4205eefcb0e1a72428dde0ad10be73da9f92e793c36228e8ebf7298c0", size = 9760441, upload-time = "2025-09-11T23:00:44.826Z" }, - { url = "https://files.pythonhosted.org/packages/e7/14/1c3f54d606cb88a55d1567153ef3a8bc7b74702f2ff5eb64d0994f9e49cb/mypy-1.18.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:502cde8896be8e638588b90fdcb4c5d5b8c1b004dfc63fd5604a973547367bb9", size = 12911082, upload-time = "2025-09-11T23:00:41.465Z" }, - { url = "https://files.pythonhosted.org/packages/90/83/235606c8b6d50a8eba99773add907ce1d41c068edb523f81eb0d01603a83/mypy-1.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7509549b5e41be279afc1228242d0e397f1af2919a8f2877ad542b199dc4083e", size = 11919107, upload-time = "2025-09-11T22:58:40.903Z" }, - { url = "https://files.pythonhosted.org/packages/ca/25/4e2ce00f8d15b99d0c68a2536ad63e9eac033f723439ef80290ec32c1ff5/mypy-1.18.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5956ecaabb3a245e3f34100172abca1507be687377fe20e24d6a7557e07080e2", size = 12472551, upload-time = "2025-09-11T22:58:37.272Z" }, - { url = "https://files.pythonhosted.org/packages/32/bb/92642a9350fc339dd9dcefcf6862d171b52294af107d521dce075f32f298/mypy-1.18.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8750ceb014a96c9890421c83f0db53b0f3b8633e2864c6f9bc0a8e93951ed18d", size = 13340554, upload-time = "2025-09-11T22:59:38.756Z" }, - { url = "https://files.pythonhosted.org/packages/cd/ee/38d01db91c198fb6350025d28f9719ecf3c8f2c55a0094bfbf3ef478cc9a/mypy-1.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fb89ea08ff41adf59476b235293679a6eb53a7b9400f6256272fb6029bec3ce5", size = 13530933, upload-time = "2025-09-11T22:59:20.228Z" }, - { url = "https://files.pythonhosted.org/packages/da/8d/6d991ae631f80d58edbf9d7066e3f2a96e479dca955d9a968cd6e90850a3/mypy-1.18.1-cp312-cp312-win_amd64.whl", hash = "sha256:2657654d82fcd2a87e02a33e0d23001789a554059bbf34702d623dafe353eabf", size = 9828426, upload-time = "2025-09-11T23:00:21.007Z" }, - { url = "https://files.pythonhosted.org/packages/e4/ec/ef4a7260e1460a3071628a9277a7579e7da1b071bc134ebe909323f2fbc7/mypy-1.18.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d70d2b5baf9b9a20bc9c730015615ae3243ef47fb4a58ad7b31c3e0a59b5ef1f", size = 12918671, upload-time = "2025-09-11T22:58:29.814Z" }, - { url = "https://files.pythonhosted.org/packages/a1/82/0ea6c3953f16223f0b8eda40c1aeac6bd266d15f4902556ae6e91f6fca4c/mypy-1.18.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b8367e33506300f07a43012fc546402f283c3f8bcff1dc338636affb710154ce", size = 11913023, upload-time = "2025-09-11T23:00:29.049Z" }, - { url = "https://files.pythonhosted.org/packages/ae/ef/5e2057e692c2690fc27b3ed0a4dbde4388330c32e2576a23f0302bc8358d/mypy-1.18.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:913f668ec50c3337b89df22f973c1c8f0b29ee9e290a8b7fe01cc1ef7446d42e", size = 12473355, upload-time = "2025-09-11T23:00:04.544Z" }, - { url = "https://files.pythonhosted.org/packages/98/43/b7e429fc4be10e390a167b0cd1810d41cb4e4add4ae50bab96faff695a3b/mypy-1.18.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a0e70b87eb27b33209fa4792b051c6947976f6ab829daa83819df5f58330c71", size = 13346944, upload-time = "2025-09-11T22:58:23.024Z" }, - { url = "https://files.pythonhosted.org/packages/89/4e/899dba0bfe36bbd5b7c52e597de4cf47b5053d337b6d201a30e3798e77a6/mypy-1.18.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c378d946e8a60be6b6ede48c878d145546fb42aad61df998c056ec151bf6c746", size = 13512574, upload-time = "2025-09-11T22:59:52.152Z" }, - { url = "https://files.pythonhosted.org/packages/f5/f8/7661021a5b0e501b76440454d786b0f01bb05d5c4b125fcbda02023d0250/mypy-1.18.1-cp313-cp313-win_amd64.whl", hash = "sha256:2cd2c1e0f3a7465f22731987fff6fc427e3dcbb4ca5f7db5bbeaff2ff9a31f6d", size = 9837684, upload-time = "2025-09-11T22:58:44.454Z" }, - { url = "https://files.pythonhosted.org/packages/bf/87/7b173981466219eccc64c107cf8e5ab9eb39cc304b4c07df8e7881533e4f/mypy-1.18.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ba24603c58e34dd5b096dfad792d87b304fc6470cbb1c22fd64e7ebd17edcc61", size = 12900265, upload-time = "2025-09-11T22:59:03.4Z" }, - { url = "https://files.pythonhosted.org/packages/ae/cc/b10e65bae75b18a5ac8f81b1e8e5867677e418f0dd2c83b8e2de9ba96ebd/mypy-1.18.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ed36662fb92ae4cb3cacc682ec6656208f323bbc23d4b08d091eecfc0863d4b5", size = 11942890, upload-time = "2025-09-11T23:00:00.607Z" }, - { url = "https://files.pythonhosted.org/packages/39/d4/aeefa07c44d09f4c2102e525e2031bc066d12e5351f66b8a83719671004d/mypy-1.18.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:040ecc95e026f71a9ad7956fea2724466602b561e6a25c2e5584160d3833aaa8", size = 12472291, upload-time = "2025-09-11T22:59:43.425Z" }, - { url = "https://files.pythonhosted.org/packages/c6/07/711e78668ff8e365f8c19735594ea95938bff3639a4c46a905e3ed8ff2d6/mypy-1.18.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:937e3ed86cb731276706e46e03512547e43c391a13f363e08d0fee49a7c38a0d", size = 13318610, upload-time = "2025-09-11T23:00:17.604Z" }, - { url = "https://files.pythonhosted.org/packages/ca/85/df3b2d39339c31d360ce299b418c55e8194ef3205284739b64962f6074e7/mypy-1.18.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1f95cc4f01c0f1701ca3b0355792bccec13ecb2ec1c469e5b85a6ef398398b1d", size = 13513697, upload-time = "2025-09-11T22:58:59.534Z" }, - { url = "https://files.pythonhosted.org/packages/b1/df/462866163c99ea73bb28f0eb4d415c087e30de5d36ee0f5429d42e28689b/mypy-1.18.1-cp314-cp314-win_amd64.whl", hash = "sha256:e4f16c0019d48941220ac60b893615be2f63afedaba6a0801bdcd041b96991ce", size = 9985739, upload-time = "2025-09-11T22:58:51.644Z" }, - { url = "https://files.pythonhosted.org/packages/64/1a/9005d78ffedaac58b3ee3a44d53a65b09ac1d27c36a00ade849015b8e014/mypy-1.18.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e37763af63a8018308859bc83d9063c501a5820ec5bd4a19f0a2ac0d1c25c061", size = 12809347, upload-time = "2025-09-11T22:59:15.468Z" }, - { url = "https://files.pythonhosted.org/packages/46/b3/c932216b281f7c223a2c8b98b9c8e1eb5bea1650c11317ac778cfc3778e4/mypy-1.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:51531b6e94f34b8bd8b01dee52bbcee80daeac45e69ec5c36e25bce51cbc46e6", size = 11899906, upload-time = "2025-09-11T22:59:56.473Z" }, - { url = "https://files.pythonhosted.org/packages/30/6b/542daf553f97275677c35d183404d1d83b64cea315f452195c5a5782a225/mypy-1.18.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dbfdea20e90e9c5476cea80cfd264d8e197c6ef2c58483931db2eefb2f7adc14", size = 12504415, upload-time = "2025-09-11T23:00:37.332Z" }, - { url = "https://files.pythonhosted.org/packages/37/d3/061d0d861377ea3fdb03784d11260bfa2adbb4eeeb24b63bd1eea7b6080c/mypy-1.18.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:99f272c9b59f5826fffa439575716276d19cbf9654abc84a2ba2d77090a0ba14", size = 13243466, upload-time = "2025-09-11T22:58:18.562Z" }, - { url = "https://files.pythonhosted.org/packages/7d/5e/6e88a79bdfec8d01ba374c391150c94f6c74545bdc37bdc490a7f30c5095/mypy-1.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8c05a7f8c00300a52f3a4fcc95a185e99bf944d7e851ff141bae8dcf6dcfeac4", size = 13493539, upload-time = "2025-09-11T22:59:24.479Z" }, - { url = "https://files.pythonhosted.org/packages/92/5a/a14a82e44ed76998d73a070723b6584963fdb62f597d373c8b22c3a3da3d/mypy-1.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:2fbcecbe5cf213ba294aa8c0b8c104400bf7bb64db82fb34fe32a205da4b3531", size = 9764809, upload-time = "2025-09-11T22:58:33.133Z" }, - { url = "https://files.pythonhosted.org/packages/e0/1d/4b97d3089b48ef3d904c9ca69fab044475bd03245d878f5f0b3ea1daf7ce/mypy-1.18.1-py3-none-any.whl", hash = "sha256:b76a4de66a0ac01da1be14ecc8ae88ddea33b8380284a9e3eae39d57ebcbe26e", size = 2352212, upload-time = "2025-09-11T22:59:26.576Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/c0/77/8f0d0001ffad290cef2f7f216f96c814866248a0b92a722365ed54648e7e/mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b", size = 3448846, upload-time = "2025-09-19T00:11:10.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/6f/657961a0743cff32e6c0611b63ff1c1970a0b482ace35b069203bf705187/mypy-1.18.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eab0cf6294dafe397c261a75f96dc2c31bffe3b944faa24db5def4e2b0f77c", size = 12807973, upload-time = "2025-09-19T00:10:35.282Z" }, + { url = "https://files.pythonhosted.org/packages/10/e9/420822d4f661f13ca8900f5fa239b40ee3be8b62b32f3357df9a3045a08b/mypy-1.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a780ca61fc239e4865968ebc5240bb3bf610ef59ac398de9a7421b54e4a207e", size = 11896527, upload-time = "2025-09-19T00:10:55.791Z" }, + { url = "https://files.pythonhosted.org/packages/aa/73/a05b2bbaa7005f4642fcfe40fb73f2b4fb6bb44229bd585b5878e9a87ef8/mypy-1.18.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448acd386266989ef11662ce3c8011fd2a7b632e0ec7d61a98edd8e27472225b", size = 12507004, upload-time = "2025-09-19T00:11:05.411Z" }, + { url = "https://files.pythonhosted.org/packages/4f/01/f6e4b9f0d031c11ccbd6f17da26564f3a0f3c4155af344006434b0a05a9d/mypy-1.18.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f9e171c465ad3901dc652643ee4bffa8e9fef4d7d0eece23b428908c77a76a66", size = 13245947, upload-time = "2025-09-19T00:10:46.923Z" }, + { url = "https://files.pythonhosted.org/packages/d7/97/19727e7499bfa1ae0773d06afd30ac66a58ed7437d940c70548634b24185/mypy-1.18.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:592ec214750bc00741af1f80cbf96b5013d81486b7bb24cb052382c19e40b428", size = 13499217, upload-time = "2025-09-19T00:09:39.472Z" }, + { url = "https://files.pythonhosted.org/packages/9f/4f/90dc8c15c1441bf31cf0f9918bb077e452618708199e530f4cbd5cede6ff/mypy-1.18.2-cp310-cp310-win_amd64.whl", hash = "sha256:7fb95f97199ea11769ebe3638c29b550b5221e997c63b14ef93d2e971606ebed", size = 9766753, upload-time = "2025-09-19T00:10:49.161Z" }, + { url = "https://files.pythonhosted.org/packages/88/87/cafd3ae563f88f94eec33f35ff722d043e09832ea8530ef149ec1efbaf08/mypy-1.18.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:807d9315ab9d464125aa9fcf6d84fde6e1dc67da0b6f80e7405506b8ac72bc7f", size = 12731198, upload-time = "2025-09-19T00:09:44.857Z" }, + { url = "https://files.pythonhosted.org/packages/0f/e0/1e96c3d4266a06d4b0197ace5356d67d937d8358e2ee3ffac71faa843724/mypy-1.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:776bb00de1778caf4db739c6e83919c1d85a448f71979b6a0edd774ea8399341", size = 11817879, upload-time = "2025-09-19T00:09:47.131Z" }, + { url = "https://files.pythonhosted.org/packages/72/ef/0c9ba89eb03453e76bdac5a78b08260a848c7bfc5d6603634774d9cd9525/mypy-1.18.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1379451880512ffce14505493bd9fe469e0697543717298242574882cf8cdb8d", size = 12427292, upload-time = "2025-09-19T00:10:22.472Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/ec4a061dd599eb8179d5411d99775bec2a20542505988f40fc2fee781068/mypy-1.18.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1331eb7fd110d60c24999893320967594ff84c38ac6d19e0a76c5fd809a84c86", size = 13163750, upload-time = "2025-09-19T00:09:51.472Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5f/2cf2ceb3b36372d51568f2208c021870fe7834cf3186b653ac6446511839/mypy-1.18.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ca30b50a51e7ba93b00422e486cbb124f1c56a535e20eff7b2d6ab72b3b2e37", size = 13351827, upload-time = "2025-09-19T00:09:58.311Z" }, + { url = "https://files.pythonhosted.org/packages/c8/7d/2697b930179e7277529eaaec1513f8de622818696857f689e4a5432e5e27/mypy-1.18.2-cp311-cp311-win_amd64.whl", hash = "sha256:664dc726e67fa54e14536f6e1224bcfce1d9e5ac02426d2326e2bb4e081d1ce8", size = 9757983, upload-time = "2025-09-19T00:10:09.071Z" }, + { url = "https://files.pythonhosted.org/packages/07/06/dfdd2bc60c66611dd8335f463818514733bc763e4760dee289dcc33df709/mypy-1.18.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:33eca32dd124b29400c31d7cf784e795b050ace0e1f91b8dc035672725617e34", size = 12908273, upload-time = "2025-09-19T00:10:58.321Z" }, + { url = "https://files.pythonhosted.org/packages/81/14/6a9de6d13a122d5608e1a04130724caf9170333ac5a924e10f670687d3eb/mypy-1.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3c47adf30d65e89b2dcd2fa32f3aeb5e94ca970d2c15fcb25e297871c8e4764", size = 11920910, upload-time = "2025-09-19T00:10:20.043Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a9/b29de53e42f18e8cc547e38daa9dfa132ffdc64f7250e353f5c8cdd44bee/mypy-1.18.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d6c838e831a062f5f29d11c9057c6009f60cb294fea33a98422688181fe2893", size = 12465585, upload-time = "2025-09-19T00:10:33.005Z" }, + { url = "https://files.pythonhosted.org/packages/77/ae/6c3d2c7c61ff21f2bee938c917616c92ebf852f015fb55917fd6e2811db2/mypy-1.18.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01199871b6110a2ce984bde85acd481232d17413868c9807e95c1b0739a58914", size = 13348562, upload-time = "2025-09-19T00:10:11.51Z" }, + { url = "https://files.pythonhosted.org/packages/4d/31/aec68ab3b4aebdf8f36d191b0685d99faa899ab990753ca0fee60fb99511/mypy-1.18.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a2afc0fa0b0e91b4599ddfe0f91e2c26c2b5a5ab263737e998d6817874c5f7c8", size = 13533296, upload-time = "2025-09-19T00:10:06.568Z" }, + { url = "https://files.pythonhosted.org/packages/9f/83/abcb3ad9478fca3ebeb6a5358bb0b22c95ea42b43b7789c7fb1297ca44f4/mypy-1.18.2-cp312-cp312-win_amd64.whl", hash = "sha256:d8068d0afe682c7c4897c0f7ce84ea77f6de953262b12d07038f4d296d547074", size = 9828828, upload-time = "2025-09-19T00:10:28.203Z" }, + { url = "https://files.pythonhosted.org/packages/5f/04/7f462e6fbba87a72bc8097b93f6842499c428a6ff0c81dd46948d175afe8/mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc", size = 12898728, upload-time = "2025-09-19T00:10:01.33Z" }, + { url = "https://files.pythonhosted.org/packages/99/5b/61ed4efb64f1871b41fd0b82d29a64640f3516078f6c7905b68ab1ad8b13/mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e", size = 11910758, upload-time = "2025-09-19T00:10:42.607Z" }, + { url = "https://files.pythonhosted.org/packages/3c/46/d297d4b683cc89a6e4108c4250a6a6b717f5fa96e1a30a7944a6da44da35/mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986", size = 12475342, upload-time = "2025-09-19T00:11:00.371Z" }, + { url = "https://files.pythonhosted.org/packages/83/45/4798f4d00df13eae3bfdf726c9244bcb495ab5bd588c0eed93a2f2dd67f3/mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d", size = 13338709, upload-time = "2025-09-19T00:11:03.358Z" }, + { url = "https://files.pythonhosted.org/packages/d7/09/479f7358d9625172521a87a9271ddd2441e1dab16a09708f056e97007207/mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba", size = 13529806, upload-time = "2025-09-19T00:10:26.073Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/ac0f2c7e9d0ea3c75cd99dff7aec1c9df4a1376537cb90e4c882267ee7e9/mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544", size = 9833262, upload-time = "2025-09-19T00:10:40.035Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0c/7d5300883da16f0063ae53996358758b2a2df2a09c72a5061fa79a1f5006/mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce", size = 12893775, upload-time = "2025-09-19T00:10:03.814Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/2cffbf25737bdb236f60c973edf62e3e7b4ee1c25b6878629e88e2cde967/mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d", size = 11936852, upload-time = "2025-09-19T00:10:51.631Z" }, + { url = "https://files.pythonhosted.org/packages/be/50/34059de13dd269227fb4a03be1faee6e2a4b04a2051c82ac0a0b5a773c9a/mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c", size = 12480242, upload-time = "2025-09-19T00:11:07.955Z" }, + { url = "https://files.pythonhosted.org/packages/5b/11/040983fad5132d85914c874a2836252bbc57832065548885b5bb5b0d4359/mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb", size = 13326683, upload-time = "2025-09-19T00:09:55.572Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ba/89b2901dd77414dd7a8c8729985832a5735053be15b744c18e4586e506ef/mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075", size = 13514749, upload-time = "2025-09-19T00:10:44.827Z" }, + { url = "https://files.pythonhosted.org/packages/25/bc/cc98767cffd6b2928ba680f3e5bc969c4152bf7c2d83f92f5a504b92b0eb/mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf", size = 9982959, upload-time = "2025-09-19T00:10:37.344Z" }, + { url = "https://files.pythonhosted.org/packages/3f/a6/490ff491d8ecddf8ab91762d4f67635040202f76a44171420bcbe38ceee5/mypy-1.18.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25a9c8fb67b00599f839cf472713f54249a62efd53a54b565eb61956a7e3296b", size = 12807230, upload-time = "2025-09-19T00:09:49.471Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2e/60076fc829645d167ece9e80db9e8375648d210dab44cc98beb5b322a826/mypy-1.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2b9c7e284ee20e7598d6f42e13ca40b4928e6957ed6813d1ab6348aa3f47133", size = 11895666, upload-time = "2025-09-19T00:10:53.678Z" }, + { url = "https://files.pythonhosted.org/packages/97/4a/1e2880a2a5dda4dc8d9ecd1a7e7606bc0b0e14813637eeda40c38624e037/mypy-1.18.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6985ed057513e344e43a26cc1cd815c7a94602fb6a3130a34798625bc2f07b6", size = 12499608, upload-time = "2025-09-19T00:09:36.204Z" }, + { url = "https://files.pythonhosted.org/packages/00/81/a117f1b73a3015b076b20246b1f341c34a578ebd9662848c6b80ad5c4138/mypy-1.18.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22f27105f1525ec024b5c630c0b9f36d5c1cc4d447d61fe51ff4bd60633f47ac", size = 13244551, upload-time = "2025-09-19T00:10:17.531Z" }, + { url = "https://files.pythonhosted.org/packages/9b/61/b9f48e1714ce87c7bf0358eb93f60663740ebb08f9ea886ffc670cea7933/mypy-1.18.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:030c52d0ea8144e721e49b1f68391e39553d7451f0c3f8a7565b59e19fcb608b", size = 13491552, upload-time = "2025-09-19T00:10:13.753Z" }, + { url = "https://files.pythonhosted.org/packages/c9/66/b2c0af3b684fa80d1b27501a8bdd3d2daa467ea3992a8aa612f5ca17c2db/mypy-1.18.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa5e07ac1a60a253445797e42b8b2963c9675563a94f11291ab40718b016a7a0", size = 9765635, upload-time = "2025-09-19T00:10:30.993Z" }, + { url = "https://files.pythonhosted.org/packages/87/e3/be76d87158ebafa0309946c4a73831974d4d6ab4f4ef40c3b53a385a66fd/mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e", size = 2352367, upload-time = "2025-09-19T00:10:15.489Z" }, ] [[package]] @@ -1258,7 +1258,7 @@ mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mo perf = [{ name = "simplejson", specifier = ">=3.17.0" }] pip = [{ name = "pip" }] typing = [ - { name = "mypy", specifier = "==1.18.1" }, + { name = "mypy", specifier = "==1.18.2" }, { name = "pip" }, { name = "pyright", specifier = "==1.1.405" }, { name = "typing-extensions" }, From b2918071066ace4405b23c19257c99b3fec6c1bf Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 30 Sep 2025 11:39:51 -0500 Subject: [PATCH 2085/2111] PYTHON-5587 Remove check for dnspython version (#2566) --- pymongo/asynchronous/srv_resolver.py | 12 +----------- pymongo/synchronous/srv_resolver.py | 12 +----------- 2 files changed, 2 insertions(+), 22 deletions(-) diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py index 0130f0e8b3..9c4d9a9d57 100644 --- a/pymongo/asynchronous/srv_resolver.py +++ b/pymongo/asynchronous/srv_resolver.py @@ -19,7 +19,7 @@ import random from typing import TYPE_CHECKING, Any, Optional, Union -from pymongo.common import CONNECT_TIMEOUT, check_for_min_version +from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError if TYPE_CHECKING: @@ -32,14 +32,6 @@ def _have_dnspython() -> bool: try: import dns # noqa: F401 - dns_version, required_version, is_valid = check_for_min_version("dnspython") - if not is_valid: - raise RuntimeError( - f"pymongo requires dnspython>={required_version}, " - f"found version {dns_version}. " - "Install a compatible version with pip" - ) - return True except ImportError: return False @@ -79,8 +71,6 @@ def __init__( srv_service_name: str, srv_max_hosts: int = 0, ): - # Ensure the version of dnspython is compatible. - _have_dnspython() self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py index e3e208e5c6..4802310698 100644 --- a/pymongo/synchronous/srv_resolver.py +++ b/pymongo/synchronous/srv_resolver.py @@ -19,7 +19,7 @@ import random from typing import TYPE_CHECKING, Any, Optional, Union -from pymongo.common import CONNECT_TIMEOUT, check_for_min_version +from pymongo.common import CONNECT_TIMEOUT from pymongo.errors import ConfigurationError if TYPE_CHECKING: @@ -32,14 +32,6 @@ def _have_dnspython() -> bool: try: import dns # noqa: F401 - dns_version, required_version, is_valid = check_for_min_version("dnspython") - if not is_valid: - raise RuntimeError( - f"pymongo requires dnspython>={required_version}, " - f"found version {dns_version}. " - "Install a compatible version with pip" - ) - return True except ImportError: return False @@ -79,8 +71,6 @@ def __init__( srv_service_name: str, srv_max_hosts: int = 0, ): - # Ensure the version of dnspython is compatible. - _have_dnspython() self.__fqdn = fqdn self.__srv = srv_service_name self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT From 67384f0f08ce9b05bf326a6750d04bc2a5bf8581 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 30 Sep 2025 12:30:00 -0500 Subject: [PATCH 2086/2111] PYTHON-5550 Add a test that uses uvloop as the event loop (#2543) --- .github/workflows/test-python.yml | 33 ++++++++++++++++++++++++ CONTRIBUTING.md | 8 ++++++ integration_tests/README.md | 42 +++++++++++++++++++++++++++++++ integration_tests/run.sh | 11 ++++++++ integration_tests/test_uv_loop.py | 27 ++++++++++++++++++++ justfile | 4 +++ test/__init__.py | 7 ++++++ test/asynchronous/__init__.py | 7 ++++++ tools/synchro.py | 1 + 9 files changed, 140 insertions(+) create mode 100644 integration_tests/README.md create mode 100755 integration_tests/run.sh create mode 100644 integration_tests/test_uv_loop.py diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 40a497480c..0ed23b9d83 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -14,6 +14,9 @@ defaults: run: shell: bash -eux {0} +permissions: + contents: read + jobs: static: @@ -163,6 +166,36 @@ jobs: run: | just typing + integration_tests: + runs-on: ubuntu-latest + name: Integration Tests + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: | + just install + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + - name: Run tests + run: | + just integration-tests + - id: setup-mongodb-ssl + uses: mongodb-labs/drivers-evergreen-tools@master + with: + ssl: true + - name: Run tests + run: | + just integration-tests + make_sdist: runs-on: ubuntu-latest name: "Make an sdist" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c4f7b55817..a8881db9cb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -411,6 +411,14 @@ a use the ticket number as the "reason" parameter to the decorator, e.g. `@flaky When running tests locally (not in CI), the `flaky` decorator will be disabled unless `ENABLE_FLAKY` is set. To disable the `flaky` decorator in CI, you can use `evergreen patch --param DISABLE_FLAKY=1`. +## Integration Tests + +The `integration_tests` directory has a set of scripts that verify the usage of PyMongo with downstream packages or frameworks. See the [README](./integration_tests/README.md) for more information. + +To run the tests, use `just integration_tests`. + +The tests should be able to run with and without SSL enabled. + ## Specification Tests The MongoDB [specifications repository](https://github.com/mongodb/specifications) diff --git a/integration_tests/README.md b/integration_tests/README.md new file mode 100644 index 0000000000..fb64a9066f --- /dev/null +++ b/integration_tests/README.md @@ -0,0 +1,42 @@ +# Integration Tests + +A set of tests that verify the usage of PyMongo with downstream packages or frameworks. + +Each test uses [PEP 723 inline metadata](https://packaging.python.org/en/latest/specifications/inline-script-metadata/) and can be run using `pipx` or `uv`. + +The `run.sh` convenience script can be used to run all of the files using `uv`. + +Here is an example header for the script with the inline dependencies: + +```python +# /// script +# dependencies = [ +# "uvloop>=0.18" +# ] +# requires-python = ">=3.10" +# /// +``` + +Here is an example of using the test helper function to create a configured client for the test: + + +```python +import asyncio +import sys +from pathlib import Path + +# Use pymongo from parent directory. +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + +from test.asynchronous import async_simple_test_client # noqa: E402 + + +async def main(): + async with async_simple_test_client() as client: + result = await client.admin.command("ping") + assert result["ok"] + + +asyncio.run(main()) +``` diff --git a/integration_tests/run.sh b/integration_tests/run.sh new file mode 100755 index 0000000000..051e2b8a75 --- /dev/null +++ b/integration_tests/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Run all of the integration test files using `uv run`. +set -eu + +for file in integration_tests/test_*.py ; do + echo "-----------------" + echo "Running $file..." + uv run $file + echo "Running $file...done." + echo "-----------------" +done diff --git a/integration_tests/test_uv_loop.py b/integration_tests/test_uv_loop.py new file mode 100644 index 0000000000..88a3ad73ab --- /dev/null +++ b/integration_tests/test_uv_loop.py @@ -0,0 +1,27 @@ +# /// script +# dependencies = [ +# "uvloop>=0.18" +# ] +# requires-python = ">=3.10" +# /// +from __future__ import annotations + +import sys +from pathlib import Path + +import uvloop + +# Use pymongo from parent directory. +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + +from test.asynchronous import async_simple_test_client # noqa: E402 + + +async def main(): + async with async_simple_test_client() as client: + result = await client.admin.command("ping") + assert result["ok"] + + +uvloop.run(main()) diff --git a/justfile b/justfile index 9b6cce62c9..f235346160 100644 --- a/justfile +++ b/justfile @@ -72,6 +72,10 @@ setup-tests *args="": teardown-tests: bash .evergreen/scripts/teardown-tests.sh +[group('test')] +integration-tests: + bash integration_tests/run.sh + [group('server')] run-server *args="": bash .evergreen/scripts/run-server.sh {{args}} diff --git a/test/__init__.py b/test/__init__.py index f3b66c20a9..1ee2c283d6 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -1227,6 +1227,13 @@ def teardown(): print_running_clients() +@contextmanager +def simple_test_client(): + client_context.init() + yield client_context.client + client_context.client.close() + + def test_cases(suite): """Iterator over all TestCases within a TestSuite.""" for suite_or_case in suite._tests: diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py index 7a6a23ed27..78d0576add 100644 --- a/test/asynchronous/__init__.py +++ b/test/asynchronous/__init__.py @@ -1243,6 +1243,13 @@ async def async_teardown(): print_running_clients() +@asynccontextmanager +async def async_simple_test_client(): + await async_client_context.init() + yield async_client_context.client + await async_client_context.client.close() + + def test_cases(suite): """Iterator over all TestCases within a TestSuite.""" for suite_or_case in suite._tests: diff --git a/tools/synchro.py b/tools/synchro.py index a4190529c4..e3d4835502 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -131,6 +131,7 @@ "async_create_barrier": "create_barrier", "async_barrier_wait": "barrier_wait", "async_joinall": "joinall", + "async_simple_test_client": "simple_test_client", "_async_create_connection": "_create_connection", "pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts": "pymongo.synchronous.srv_resolver._SrvResolver.get_hosts", "dns.asyncresolver.resolve": "dns.resolver.resolve", From 215b3b1938e1e716a03c4ee01407b0b83ad0f56a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Oct 2025 13:51:14 -0400 Subject: [PATCH 2087/2111] Bump github/codeql-action from 3.30.3 to 3.30.5 in the actions group (#2564) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 3ed1f0d9bb..0c1f2f6d47 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@192325c86100d080feab897ff886c34abd4c83a3 # v3 + uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@192325c86100d080feab897ff886c34abd4c83a3 # v3 + uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3 with: category: "/language:${{matrix.language}}" From e3910f868b30371c54a1be6b247718712c4658a0 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Thu, 2 Oct 2025 13:43:31 -0400 Subject: [PATCH 2088/2111] PYTHON-5593 Adds v4.15.2 notes to changelog (#2570) --- doc/changelog.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index b96b3082f8..82df4cdb07 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -17,6 +17,21 @@ PyMongo 4.16 brings a number of changes including: - Removed support for Eventlet. Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +Changes in Version 4.15.2 (2025/10/01) +-------------------------------------- + +Version 4.15.2 is a bug fix release. + +- Add wheels for Python 3.14 and 3.14t that were missing from 4.15.0 release. Drop the 3.13t wheel. + +Issues Resolved +............... + +See the `PyMongo 4.15.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47186 + Changes in Version 4.15.1 (2025/09/16) -------------------------------------- From 6bdf07e7260203c0499417744e968df526d36f13 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Thu, 2 Oct 2025 17:48:22 -0400 Subject: [PATCH 2089/2111] PYTHON-5585 Add jira.mongodb.org/secure/ReleaseNote links to linkcheck_ignore (#2572) --- doc/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index 8a7f418609..063429cd98 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -84,12 +84,14 @@ # so those link results in a 404. # wiki.centos.org has been flaky. # sourceforge.net is giving a 403 error, but is still accessible from the browser. +# Links to release notes in jira give 401 error: unauthorized. PYTHON-5585 linkcheck_ignore = [ "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", "https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", r"https://sourceforge.net/", + r"https://jira\.mongodb\.org/secure/ReleaseNote\.jspa.*", ] # Allow for flaky links. From d47bd9cf959e7e5e19921889ffa7c6967bc4ef32 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 3 Oct 2025 13:03:07 -0400 Subject: [PATCH 2090/2111] PYTHON-5024 - Add 3.14t as a standard Python matrix version (#2563) --- .evergreen/generated_configs/tasks.yml | 544 +++++++++++++------- .evergreen/generated_configs/variants.yml | 44 +- .evergreen/scripts/generate_config.py | 47 +- .evergreen/scripts/generate_config_utils.py | 2 +- .evergreen/utils.sh | 12 +- pymongo/network_layer.py | 2 +- uv.lock | 369 ++++++------- 7 files changed, 544 insertions(+), 476 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index dc65bfb557..8064ec85e6 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -61,7 +61,7 @@ tasks: SUB_TEST_NAME: env-creds PYTHON_VERSION: "3.13" tags: [auth-aws, auth-aws-env-creds] - - name: test-auth-aws-8.0-session-creds-python3.14 + - name: test-auth-aws-8.0-session-creds-python3.14t commands: - func: run server vars: @@ -72,9 +72,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: session-creds - PYTHON_VERSION: "3.14" - tags: [auth-aws, auth-aws-session-creds] - - name: test-auth-aws-rapid-web-identity-python3.10 + PYTHON_VERSION: 3.14t + tags: [auth-aws, auth-aws-session-creds, free-threaded] + - name: test-auth-aws-rapid-web-identity-python3.14 commands: - func: run server vars: @@ -85,9 +85,9 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-rapid-web-identity-session-name-python3.10 + - name: test-auth-aws-rapid-web-identity-session-name-python3.14 commands: - func: run server vars: @@ -99,9 +99,9 @@ tasks: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity AWS_ROLE_SESSION_NAME: test - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - - name: test-auth-aws-latest-ecs-python3.11 + - name: test-auth-aws-latest-ecs-python3.10 commands: - func: run server vars: @@ -112,7 +112,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ecs - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.10" tags: [auth-aws, auth-aws-ecs] # Backport pr tests @@ -141,16 +141,6 @@ tasks: depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] tags: [coverage, pr] - # Free threading tests - - name: test-free-threading - commands: - - func: run server - vars: - VERSION: "8.0" - TOPOLOGY: replica_set - - func: run tests - tags: [free-threading] - # Getdata tests - name: getdata commands: @@ -301,7 +291,7 @@ tasks: SUB_TEST_NAME: embedded PYTHON_VERSION: "3.13" tags: [mod_wsgi, pr] - - name: mod-wsgi-replica-set-python3.14 + - name: mod-wsgi-embedded-mode-replica-set-python3.14 commands: - func: run server vars: @@ -310,7 +300,7 @@ tasks: - func: run tests vars: TEST_NAME: mod_wsgi - SUB_TEST_NAME: standalone + SUB_TEST_NAME: embedded PYTHON_VERSION: "3.14" tags: [mod_wsgi, pr] @@ -2398,7 +2388,7 @@ tasks: tags: [search_index] # Server version tests - - name: test-server-version-pypy3.10-async-auth-nossl-replica-set + - name: test-server-version-python3.14t-async-auth-nossl-replica-set commands: - func: run server vars: @@ -2410,14 +2400,15 @@ tasks: AUTH: auth SSL: nossl TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: 3.14t TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.14t - replica_set-auth-nossl - async - - name: test-server-version-python3.14-sync-auth-nossl-replica-set-cov + - free-threaded + - name: test-server-version-python3.13-sync-auth-nossl-replica-set-cov commands: - func: run server vars: @@ -2431,14 +2422,14 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.13" TEST_NAME: default_sync tags: - server-version - - python-3.14 + - python-3.13 - replica_set-auth-nossl - sync - - name: test-server-version-python3.13-async-auth-ssl-replica-set-cov + - name: test-server-version-python3.12-async-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2452,14 +2443,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - server-version - - python-3.13 + - python-3.12 - replica_set-auth-ssl - async - - name: test-server-version-python3.12-sync-auth-ssl-replica-set-cov + - name: test-server-version-python3.11-sync-auth-ssl-replica-set-cov commands: - func: run server vars: @@ -2473,14 +2464,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version - - python-3.12 + - python-3.11 - replica_set-auth-ssl - sync - - name: test-server-version-python3.13-async-noauth-nossl-replica-set-cov + - name: test-server-version-python3.11-async-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2494,15 +2485,15 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.11" TEST_NAME: default_async tags: - server-version - - python-3.13 + - python-3.11 - replica_set-noauth-nossl - async - pr - - name: test-server-version-python3.12-sync-noauth-nossl-replica-set-cov + - name: test-server-version-python3.10-sync-noauth-nossl-replica-set-cov commands: - func: run server vars: @@ -2516,36 +2507,34 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version - - python-3.12 + - python-3.10 - replica_set-noauth-nossl - sync - pr - - name: test-server-version-python3.11-async-noauth-ssl-replica-set-cov + - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_async tags: - server-version - - python-3.11 + - python-pypy3.10 - replica_set-noauth-ssl - async - - name: test-server-version-python3.10-sync-noauth-ssl-replica-set-cov + - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov commands: - func: run server vars: @@ -2559,14 +2548,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version - - python-3.10 + - python-3.14 - replica_set-noauth-ssl - sync - - name: test-server-version-python3.11-async-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2580,34 +2569,33 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-3.11 + - python-3.14 - sharded_cluster-auth-nossl - async - - name: test-server-version-python3.10-sync-auth-nossl-sharded-cluster-cov + - name: test-server-version-python3.14t-sync-auth-nossl-sharded-cluster commands: - func: run server vars: AUTH: auth SSL: nossl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: AUTH: auth SSL: nossl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: 3.14t TEST_NAME: default_sync tags: - server-version - - python-3.10 + - python-3.14t - sharded_cluster-auth-nossl - sync + - free-threaded - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov commands: - func: run server @@ -2714,6 +2702,26 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - async + - name: test-server-version-python3.14t-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-ssl + - async + - free-threaded - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster commands: - func: run server @@ -2839,6 +2847,26 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - sync + - name: test-server-version-python3.14t-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-ssl + - sync + - free-threaded - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -2858,26 +2886,28 @@ tasks: - python-pypy3.10 - sharded_cluster-auth-ssl - sync - - name: test-server-version-pypy3.10-async-noauth-nossl-sharded-cluster + - name: test-server-version-python3.12-async-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.12" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.12 - sharded_cluster-noauth-nossl - async - - name: test-server-version-python3.14-sync-noauth-nossl-sharded-cluster-cov + - name: test-server-version-python3.11-sync-noauth-nossl-sharded-cluster-cov commands: - func: run server vars: @@ -2891,14 +2921,14 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version - - python-3.14 + - python-3.11 - sharded_cluster-noauth-nossl - sync - - name: test-server-version-python3.13-async-noauth-ssl-sharded-cluster-cov + - name: test-server-version-python3.10-async-noauth-ssl-sharded-cluster-cov commands: - func: run server vars: @@ -2912,32 +2942,30 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.13 + - python-3.10 - sharded_cluster-noauth-ssl - async - - name: test-server-version-python3.12-sync-noauth-ssl-sharded-cluster-cov + - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - COVERAGE: "1" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - server-version - - python-3.12 + - python-pypy3.10 - sharded_cluster-noauth-ssl - sync - name: test-server-version-python3.13-async-auth-nossl-standalone-cov @@ -3024,7 +3052,7 @@ tasks: - python-3.10 - standalone-auth-ssl - sync - - name: test-server-version-python3.11-async-noauth-nossl-standalone-cov + - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov commands: - func: run server vars: @@ -3038,76 +3066,75 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.10" TEST_NAME: default_async tags: - server-version - - python-3.11 + - python-3.10 - standalone-noauth-nossl - async - pr - - name: test-server-version-python3.10-sync-noauth-nossl-standalone-cov + - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone commands: - func: run server vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: nossl TOPOLOGY: standalone - COVERAGE: "1" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: pypy3.10 TEST_NAME: default_sync tags: - server-version - - python-3.10 + - python-pypy3.10 - standalone-noauth-nossl - sync - pr - - name: test-server-version-pypy3.10-async-noauth-ssl-standalone + - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone + COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone - PYTHON_VERSION: pypy3.10 + COVERAGE: "1" + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-3.14 - standalone-noauth-ssl - async - - name: test-server-version-python3.14-sync-noauth-ssl-standalone-cov + - name: test-server-version-python3.14t-sync-noauth-ssl-standalone commands: - func: run server vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone - COVERAGE: "1" - func: run tests vars: AUTH: noauth SSL: ssl TOPOLOGY: standalone - COVERAGE: "1" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_sync tags: - server-version - - python-3.14 + - python-3.14t - standalone-noauth-ssl - sync + - free-threaded # Standard tests - name: test-standard-v4.2-python3.11-sync-noauth-ssl-replica-set @@ -3132,7 +3159,7 @@ tasks: - python-3.11 - replica_set-noauth-ssl - sync - - name: test-standard-v4.2-pypy3.10-sync-noauth-ssl-replica-set + - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3146,15 +3173,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-pypy3.10 + - python-3.14 - replica_set-noauth-ssl - sync - - pypy - name: test-standard-v4.2-python3.12-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -3177,6 +3203,29 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - sync + - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + - pypy - name: test-standard-v4.2-python3.10-sync-noauth-nossl-standalone commands: - func: run server @@ -3199,7 +3248,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - sync - - name: test-standard-v4.2-python3.14-sync-noauth-nossl-standalone + - name: test-standard-v4.2-python3.14t-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3213,14 +3262,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-3.14 + - python-3.14t - standalone-noauth-nossl - sync + - free-threaded - name: test-standard-v4.4-python3.11-async-noauth-ssl-replica-set commands: - func: run server @@ -3243,7 +3293,7 @@ tasks: - python-3.11 - replica_set-noauth-ssl - async - - name: test-standard-v4.4-pypy3.10-async-noauth-ssl-replica-set + - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3257,15 +3307,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-pypy3.10 + - python-3.14 - replica_set-noauth-ssl - async - - pypy - name: test-standard-v4.4-python3.12-async-auth-ssl-sharded-cluster commands: - func: run server @@ -3288,6 +3337,29 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - async + - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async + - pypy - name: test-standard-v4.4-python3.10-async-noauth-nossl-standalone commands: - func: run server @@ -3310,7 +3382,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - async - - name: test-standard-v4.4-python3.14-async-noauth-nossl-standalone + - name: test-standard-v4.4-python3.14t-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3324,14 +3396,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-3.14 + - python-3.14t - standalone-noauth-nossl - async + - free-threaded - name: test-standard-v5.0-python3.10-sync-noauth-ssl-replica-set commands: - func: run server @@ -3354,7 +3427,7 @@ tasks: - python-3.10 - replica_set-noauth-ssl - sync - - name: test-standard-v5.0-python3.14-sync-noauth-ssl-replica-set + - name: test-standard-v5.0-python3.14t-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -3368,14 +3441,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-3.14 + - python-3.14t - replica_set-noauth-ssl - sync + - free-threaded - name: test-standard-v5.0-python3.11-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -3398,7 +3472,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - sync - - name: test-standard-v5.0-pypy3.10-sync-auth-ssl-sharded-cluster + - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3412,15 +3486,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-5.0 - - python-pypy3.10 + - python-3.14 - sharded_cluster-auth-ssl - sync - - pypy - name: test-standard-v5.0-python3.13-sync-noauth-nossl-standalone commands: - func: run server @@ -3465,7 +3538,7 @@ tasks: - python-3.10 - replica_set-noauth-ssl - async - - name: test-standard-v6.0-python3.14-async-noauth-ssl-replica-set + - name: test-standard-v6.0-python3.14t-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3479,14 +3552,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-3.14 + - python-3.14t - replica_set-noauth-ssl - async + - free-threaded - name: test-standard-v6.0-python3.11-async-auth-ssl-sharded-cluster commands: - func: run server @@ -3509,7 +3583,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - async - - name: test-standard-v6.0-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3523,15 +3597,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-6.0 - - python-pypy3.10 + - python-3.14 - sharded_cluster-auth-ssl - async - - pypy - name: test-standard-v6.0-python3.13-async-noauth-nossl-standalone commands: - func: run server @@ -3598,7 +3671,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - sync - - name: test-standard-v7.0-python3.14-sync-auth-ssl-sharded-cluster + - name: test-standard-v7.0-python3.14t-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3612,14 +3685,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-3.14 + - python-3.14t - sharded_cluster-auth-ssl - sync + - free-threaded - name: test-standard-v7.0-python3.12-sync-noauth-nossl-standalone commands: - func: run server @@ -3642,6 +3716,29 @@ tasks: - python-3.12 - standalone-noauth-nossl - sync + - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-pypy3.10 + - standalone-noauth-nossl + - sync + - pypy - name: test-standard-v8.0-python3.13-async-noauth-ssl-replica-set commands: - func: run server @@ -3686,7 +3783,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - async - - name: test-standard-v8.0-python3.14-async-auth-ssl-sharded-cluster + - name: test-standard-v8.0-python3.14t-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3700,14 +3797,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-3.14 + - python-3.14t - sharded_cluster-auth-ssl - async + - free-threaded - name: test-standard-v8.0-python3.12-async-noauth-nossl-standalone commands: - func: run server @@ -3730,6 +3828,29 @@ tasks: - python-3.12 - standalone-noauth-nossl - async + - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-pypy3.10 + - standalone-noauth-nossl + - async + - pypy - name: test-standard-latest-python3.12-async-noauth-ssl-replica-set commands: - func: run server @@ -3753,6 +3874,29 @@ tasks: - replica_set-noauth-ssl - async - pr + - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-pypy3.10 + - replica_set-noauth-ssl + - async + - pypy - name: test-standard-latest-python3.13-async-auth-ssl-sharded-cluster commands: - func: run server @@ -3799,7 +3943,7 @@ tasks: - standalone-noauth-nossl - async - pr - - name: test-standard-latest-pypy3.10-async-noauth-nossl-standalone + - name: test-standard-latest-python3.14-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3813,15 +3957,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard - server-latest - - python-pypy3.10 + - python-3.14 - standalone-noauth-nossl - async - - pypy + - pr - name: test-standard-rapid-python3.12-sync-noauth-ssl-replica-set commands: - func: run server @@ -3844,6 +3988,29 @@ tasks: - python-3.12 - replica_set-noauth-ssl - sync + - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-pypy3.10 + - replica_set-noauth-ssl + - sync + - pypy - name: test-standard-rapid-python3.13-sync-auth-ssl-sharded-cluster commands: - func: run server @@ -3888,7 +4055,7 @@ tasks: - python-3.11 - standalone-noauth-nossl - sync - - name: test-standard-rapid-pypy3.10-sync-noauth-nossl-standalone + - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3902,15 +4069,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-pypy3.10 + - python-3.14 - standalone-noauth-nossl - sync - - pypy # Test non standard tests - name: test-non-standard-v4.2-python3.11-noauth-ssl-replica-set @@ -3998,7 +4164,7 @@ tasks: - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v4.4-python3.14-noauth-ssl-replica-set + - name: test-non-standard-v4.4-python3.14t-noauth-ssl-replica-set commands: - func: run server vars: @@ -4012,13 +4178,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: 3.14t tags: - test-non-standard - server-4.4 - - python-3.14 + - python-3.14t - replica_set-noauth-ssl - noauth + - free-threaded - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set commands: - func: run server @@ -4041,7 +4208,7 @@ tasks: - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v4.4-python3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v4.4-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4055,11 +4222,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-4.4 - - python-3.10 + - python-3.14 - sharded_cluster-auth-ssl - auth - name: test-non-standard-v4.4-python3.13-noauth-nossl-standalone @@ -4083,7 +4250,7 @@ tasks: - python-3.13 - standalone-noauth-nossl - noauth - - name: test-non-standard-v5.0-python3.12-noauth-ssl-replica-set + - name: test-non-standard-v5.0-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4097,14 +4264,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-5.0 - - python-3.12 + - python-3.11 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v5.0-python3.13-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4118,11 +4285,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-5.0 - - python-3.13 + - python-3.12 - sharded_cluster-auth-ssl - auth - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster @@ -4147,7 +4314,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v5.0-python3.11-noauth-nossl-standalone + - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4161,14 +4328,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-5.0 - - python-3.11 + - python-3.10 - standalone-noauth-nossl - noauth - - name: test-non-standard-v6.0-python3.10-noauth-ssl-replica-set + - name: test-non-standard-v6.0-python3.14t-noauth-ssl-replica-set commands: - func: run server vars: @@ -4182,14 +4349,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: 3.14t tags: - test-non-standard - server-6.0 - - python-3.10 + - python-3.14t - replica_set-noauth-ssl - noauth - - name: test-non-standard-v6.0-python3.11-auth-ssl-sharded-cluster + - free-threaded + - name: test-non-standard-v6.0-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4203,14 +4371,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-6.0 - - python-3.11 + - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v6.0-python3.14-noauth-nossl-standalone + - name: test-non-standard-v6.0-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -4224,11 +4392,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-6.0 - - python-3.14 + - python-3.13 - standalone-noauth-nossl - noauth - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone @@ -4253,7 +4421,7 @@ tasks: - standalone-noauth-nossl - noauth - pypy - - name: test-non-standard-v7.0-python3.13-noauth-ssl-replica-set + - name: test-non-standard-v7.0-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4267,11 +4435,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-7.0 - - python-3.13 + - python-3.11 - replica_set-noauth-ssl - noauth - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set @@ -4296,7 +4464,7 @@ tasks: - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-v7.0-python3.14-auth-ssl-sharded-cluster + - name: test-non-standard-v7.0-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4310,14 +4478,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-7.0 - - python-3.14 + - python-3.12 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v7.0-python3.12-noauth-nossl-standalone + - name: test-non-standard-v7.0-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4331,14 +4499,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-7.0 - - python-3.12 + - python-3.10 - standalone-noauth-nossl - noauth - - name: test-non-standard-v8.0-python3.11-noauth-ssl-replica-set + - name: test-non-standard-v8.0-python3.14t-noauth-ssl-replica-set commands: - func: run server vars: @@ -4352,14 +4520,15 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.11" + PYTHON_VERSION: 3.14t tags: - test-non-standard - server-8.0 - - python-3.11 + - python-3.14t - replica_set-noauth-ssl - noauth - - name: test-non-standard-v8.0-python3.12-auth-ssl-sharded-cluster + - free-threaded + - name: test-non-standard-v8.0-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4373,11 +4542,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.12" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-8.0 - - python-3.12 + - python-3.14 - sharded_cluster-auth-ssl - auth - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster @@ -4402,7 +4571,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pypy - - name: test-non-standard-v8.0-python3.10-noauth-nossl-standalone + - name: test-non-standard-v8.0-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -4416,14 +4585,14 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-8.0 - - python-3.10 + - python-3.13 - standalone-noauth-nossl - noauth - - name: test-non-standard-latest-python3.12-noauth-ssl-replica-set + - name: test-non-standard-latest-python3.14t-noauth-ssl-replica-set commands: - func: run server vars: @@ -4437,13 +4606,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.12" + PYTHON_VERSION: 3.14t tags: - test-non-standard - server-latest - - python-3.12 + - python-3.14t - replica_set-noauth-ssl - noauth + - free-threaded - pr - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set commands: @@ -4467,7 +4637,7 @@ tasks: - replica_set-noauth-ssl - noauth - pypy - - name: test-non-standard-latest-python3.13-auth-ssl-sharded-cluster + - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4481,15 +4651,15 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.14" tags: - test-non-standard - server-latest - - python-3.13 + - python-3.14 - sharded_cluster-auth-ssl - auth - pr - - name: test-non-standard-latest-python3.11-noauth-nossl-standalone + - name: test-non-standard-latest-python3.13-noauth-nossl-standalone commands: - func: run server vars: @@ -4503,15 +4673,15 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.11" + PYTHON_VERSION: "3.13" tags: - test-non-standard - server-latest - - python-3.11 + - python-3.13 - standalone-noauth-nossl - noauth - pr - - name: test-non-standard-rapid-python3.14-noauth-ssl-replica-set + - name: test-non-standard-rapid-python3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4525,14 +4695,14 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.14" + PYTHON_VERSION: "3.11" tags: - test-non-standard - server-rapid - - python-3.14 + - python-3.11 - replica_set-noauth-ssl - noauth - - name: test-non-standard-rapid-python3.10-auth-ssl-sharded-cluster + - name: test-non-standard-rapid-python3.12-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4546,14 +4716,14 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.10" + PYTHON_VERSION: "3.12" tags: - test-non-standard - server-rapid - - python-3.10 + - python-3.12 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-rapid-python3.13-noauth-nossl-standalone + - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone commands: - func: run server vars: @@ -4567,11 +4737,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.13" + PYTHON_VERSION: "3.10" tags: - test-non-standard - server-rapid - - python-3.13 + - python-3.10 - standalone-noauth-nossl - noauth - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 676f9878b9..acdbd0dca3 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -244,7 +244,7 @@ buildvariants: # Enterprise auth tests - name: auth-enterprise-rhel8 tasks: - - name: .test-non-standard .auth + - name: .test-non-standard .auth !.free-threaded display_name: Auth Enterprise RHEL8 run_on: - rhel87-small @@ -253,7 +253,7 @@ buildvariants: AUTH: auth - name: auth-enterprise-macos tasks: - - name: .test-non-standard !.pypy .auth + - name: .test-non-standard !.pypy .auth !.free-threaded display_name: Auth Enterprise macOS run_on: - macos-14 @@ -270,48 +270,10 @@ buildvariants: TEST_NAME: enterprise_auth AUTH: auth - # Free threaded tests - - name: free-threaded-rhel8-python3.14t - tasks: - - name: .free-threading - display_name: Free-threaded RHEL8 Python3.14t - run_on: - - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.14t/bin/python3 - tags: [pr] - - name: free-threaded-macos-python3.14t - tasks: - - name: .free-threading - display_name: Free-threaded macOS Python3.14t - run_on: - - macos-14 - expansions: - PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.14/bin/python3t - tags: [] - - name: free-threaded-macos-arm64-python3.14t - tasks: - - name: .free-threading - display_name: Free-threaded macOS Arm64 Python3.14t - run_on: - - macos-14-arm64 - expansions: - PYTHON_BINARY: /Library/Frameworks/PythonT.Framework/Versions/3.14/bin/python3t - tags: [] - - name: free-threaded-win64-python3.14t - tasks: - - name: .free-threading - display_name: Free-threaded Win64 Python3.14t - run_on: - - windows-64-vsMulti-small - expansions: - PYTHON_BINARY: C:/python/Python314/python3.14t.exe - tags: [] - # Green framework tests - name: green-gevent-rhel8 tasks: - - name: .test-standard .sync + - name: .test-standard .sync !.free-threaded display_name: Green Gevent RHEL8 run_on: - rhel87-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 0519f4930b..203f288300 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -109,21 +109,6 @@ def create_standard_nonlinux_variants() -> list[BuildVariant]: return variants -def create_free_threaded_variants() -> list[BuildVariant]: - variants = [] - for host_name in ("rhel8", "macos", "macos-arm64", "win64"): - python = "3.14t" - tasks = [".free-threading"] - tags = [] - if host_name == "rhel8": - tags.append("pr") - host = HOSTS[host_name] - display_name = get_variant_name("Free-threaded", host, python=python) - variant = create_variant(tasks, display_name, tags=tags, python=python, host=host) - variants.append(variant) - return variants - - def create_encryption_variants() -> list[BuildVariant]: variants = [] tags = ["encryption_tag"] @@ -217,8 +202,11 @@ def create_enterprise_auth_variants(): for host in ["rhel8", "macos", "win64"]: expansions = dict(TEST_NAME="enterprise_auth", AUTH="auth") display_name = get_variant_name("Auth Enterprise", host) - tasks = [".test-non-standard .auth"] - if host != "rhel8": + tasks = [".test-non-standard .auth !.free-threaded"] + # https://jira.mongodb.org/browse/PYTHON-5586 + if host == "macos": + tasks = [".test-non-standard !.pypy .auth !.free-threaded"] + if host == "win64": tasks = [".test-non-standard !.pypy .auth"] variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) @@ -302,7 +290,7 @@ def create_green_framework_variants(): variants = [] host = DEFAULT_HOST for framework in ["gevent"]: - tasks = [".test-standard .sync"] + tasks = [".test-standard .sync !.free-threaded"] expansions = dict(GREEN_FRAMEWORK=framework) display_name = get_variant_name(f"Green {framework.capitalize()}", host) variant = create_variant(tasks, display_name, host=host, expansions=expansions) @@ -540,7 +528,9 @@ def create_server_version_tasks(): seen.add(combo) tags.append("pr") expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) - if python not in PYPYS: + if "t" in python: + tags.append("free-threaded") + if python not in PYPYS and "t" not in python: expansions["COVERAGE"] = "1" name = get_task_name( "test-server-version", @@ -596,6 +586,8 @@ def create_test_non_standard_tasks(): f"{topology}-{auth}-{ssl}", auth, ] + if "t" in python: + tags.append("free-threaded") if python in PYPYS: tags.append("pypy") if pr: @@ -646,6 +638,8 @@ def create_standard_tasks(): f"{topology}-{auth}-{ssl}", sync, ] + if "t" in python: + tags.append("free-threaded") if python in PYPYS: tags.append("pypy") if pr: @@ -716,6 +710,8 @@ def create_aws_tasks(): server_func = FunctionCall(func="run server", vars=server_vars) assume_func = FunctionCall(func="assume ec2 role") tags = [*base_tags, f"auth-aws-{test_type}"] + if "t" in python: + tags.append("free-threaded") name = get_task_name(f"{base_name}-{test_type}", python=python) test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type, PYTHON_VERSION=python) test_func = FunctionCall(func="run tests", vars=test_vars) @@ -731,6 +727,8 @@ def create_aws_tasks(): AWS_ROLE_SESSION_NAME="test", PYTHON_VERSION=python, ) + if "t" in python: + tags.append("free-threaded") test_func = FunctionCall(func="run tests", vars=test_vars) funcs = [server_func, assume_func, test_func] tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) @@ -757,6 +755,8 @@ def create_mod_wsgi_tasks(): for (test, topology), python in zip_cycle( product(["standalone", "embedded-mode"], ["standalone", "replica_set"]), CPYTHONS ): + if "t" in python: + continue if test == "standalone": task_name = "mod-wsgi-" else: @@ -930,15 +930,6 @@ def create_ocsp_tasks(): return tasks -def create_free_threading_tasks(): - vars = dict(VERSION="8.0", TOPOLOGY="replica_set") - server_func = FunctionCall(func="run server", vars=vars) - test_func = FunctionCall(func="run tests") - task_name = "test-free-threading" - tags = ["free-threading"] - return [EvgTask(name=task_name, tags=tags, commands=[server_func, test_func])] - - ############## # Functions ############## diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index a76753ebe9..4eb6bcb0dc 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -22,7 +22,7 @@ ############## ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] -CPYTHONS = ["3.10", "3.11", "3.12", "3.13", "3.14"] +CPYTHONS = ["3.10", "3.11", "3.12", "3.13", "3.14t", "3.14"] PYPYS = ["pypy3.10"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index f9f36cc6cc..dadb7db084 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -120,9 +120,17 @@ is_python_310() { get_python_binary() { version=$1 if [ "$(uname -s)" = "Darwin" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/$version/bin/python3" + if [[ "$version" == *"t"* ]]; then + binary_name="python3t" + framework_dir="PythonT" + else + binary_name="python3" + framework_dir="Python" + fi + version=$(echo "$version" | sed 's/t//g') + PYTHON="/Library/Frameworks/$framework_dir.Framework/Versions/$version/bin/$binary_name" elif [ "Windows_NT" = "${OS:-}" ]; then - version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g') + version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g; s/t//g') if [ -n "${IS_WIN32:-}" ]; then PYTHON="C:/python/32/Python$version/python.exe" else diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py index 2e5b61f8ae..7c62a251f8 100644 --- a/pymongo/network_layer.py +++ b/pymongo/network_layer.py @@ -103,7 +103,7 @@ def _is_ready(fut: Future[Any]) -> None: while sent < len(buf): try: - sent += sock.send(view[sent:]) # type:ignore[arg-type] + sent += sock.send(view[sent:]) except BLOCKING_IO_ERRORS as exc: fd = sock.fileno() # Check for closed socket. diff --git a/uv.lock b/uv.lock index 82400618af..273a7b6adf 100644 --- a/uv.lock +++ b/uv.lock @@ -141,173 +141,96 @@ wheels = [ [[package]] name = "cffi" -version = "1.17.1" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", - "python_full_version == '3.10.*'", - "python_full_version < '3.10'", -] -dependencies = [ - { name = "pycparser", marker = "python_full_version != '3.14.*'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191, upload-time = "2024-09-04T20:43:30.027Z" }, - { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592, upload-time = "2024-09-04T20:43:32.108Z" }, - { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024, upload-time = "2024-09-04T20:43:34.186Z" }, - { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188, upload-time = "2024-09-04T20:43:36.286Z" }, - { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571, upload-time = "2024-09-04T20:43:38.586Z" }, - { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687, upload-time = "2024-09-04T20:43:40.084Z" }, - { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211, upload-time = "2024-09-04T20:43:41.526Z" }, - { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325, upload-time = "2024-09-04T20:43:43.117Z" }, - { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784, upload-time = "2024-09-04T20:43:45.256Z" }, - { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564, upload-time = "2024-09-04T20:43:46.779Z" }, - { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804, upload-time = "2024-09-04T20:43:48.186Z" }, - { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299, upload-time = "2024-09-04T20:43:49.812Z" }, - { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" }, - { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" }, - { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" }, - { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" }, - { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" }, - { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" }, - { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" }, - { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" }, - { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" }, - { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" }, - { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" }, - { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" }, - { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, - { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, - { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, - { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, - { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, - { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, - { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, - { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, - { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, - { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, - { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, - { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, - { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, - { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, - { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, - { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, - { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, - { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, - { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, - { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, - { url = "https://files.pythonhosted.org/packages/b9/ea/8bb50596b8ffbc49ddd7a1ad305035daa770202a6b782fc164647c2673ad/cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", size = 182220, upload-time = "2024-09-04T20:45:01.577Z" }, - { url = "https://files.pythonhosted.org/packages/ae/11/e77c8cd24f58285a82c23af484cf5b124a376b32644e445960d1a4654c3a/cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", size = 178605, upload-time = "2024-09-04T20:45:03.837Z" }, - { url = "https://files.pythonhosted.org/packages/ed/65/25a8dc32c53bf5b7b6c2686b42ae2ad58743f7ff644844af7cdb29b49361/cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", size = 424910, upload-time = "2024-09-04T20:45:05.315Z" }, - { url = "https://files.pythonhosted.org/packages/42/7a/9d086fab7c66bd7c4d0f27c57a1b6b068ced810afc498cc8c49e0088661c/cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", size = 447200, upload-time = "2024-09-04T20:45:06.903Z" }, - { url = "https://files.pythonhosted.org/packages/da/63/1785ced118ce92a993b0ec9e0d0ac8dc3e5dbfbcaa81135be56c69cabbb6/cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", size = 454565, upload-time = "2024-09-04T20:45:08.975Z" }, - { url = "https://files.pythonhosted.org/packages/74/06/90b8a44abf3556599cdec107f7290277ae8901a58f75e6fe8f970cd72418/cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", size = 435635, upload-time = "2024-09-04T20:45:10.64Z" }, - { url = "https://files.pythonhosted.org/packages/bd/62/a1f468e5708a70b1d86ead5bab5520861d9c7eacce4a885ded9faa7729c3/cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", size = 445218, upload-time = "2024-09-04T20:45:12.366Z" }, - { url = "https://files.pythonhosted.org/packages/5b/95/b34462f3ccb09c2594aa782d90a90b045de4ff1f70148ee79c69d37a0a5a/cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", size = 460486, upload-time = "2024-09-04T20:45:13.935Z" }, - { url = "https://files.pythonhosted.org/packages/fc/fc/a1e4bebd8d680febd29cf6c8a40067182b64f00c7d105f8f26b5bc54317b/cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", size = 437911, upload-time = "2024-09-04T20:45:15.696Z" }, - { url = "https://files.pythonhosted.org/packages/e6/c3/21cab7a6154b6a5ea330ae80de386e7665254835b9e98ecc1340b3a7de9a/cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", size = 460632, upload-time = "2024-09-04T20:45:17.284Z" }, - { url = "https://files.pythonhosted.org/packages/cb/b5/fd9f8b5a84010ca169ee49f4e4ad6f8c05f4e3545b72ee041dbbcb159882/cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", size = 171820, upload-time = "2024-09-04T20:45:18.762Z" }, - { url = "https://files.pythonhosted.org/packages/8c/52/b08750ce0bce45c143e1b5d7357ee8c55341b52bdef4b0f081af1eb248c2/cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", size = 181290, upload-time = "2024-09-04T20:45:20.226Z" }, -] - -[[package]] -name = "cffi" -version = "2.0.0b1" +version = "2.0.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version == '3.14.*'", -] dependencies = [ - { name = "pycparser", marker = "python_full_version == '3.14.*' and implementation_name != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/75/84/7930c3586ca7c66a63b2d7a30d9df649ce8c3660f8da241b0661bba4e566/cffi-2.0.0b1.tar.gz", hash = "sha256:4440de58d19c0bebe6a2f3b721253d67b27aabb34e00ab35756d8699876191ea", size = 521625, upload-time = "2025-07-29T01:11:50.959Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/03/41/1baf86bc9ebd4a994990ef743d7f625c2e81fc57b3689a7c2f4f4ae32b39/cffi-2.0.0b1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:4b69c24a89c30a7821ecd25bcaff99075d95dd0c85c8845768c340a7736d84cf", size = 184335, upload-time = "2025-07-29T01:10:01.619Z" }, - { url = "https://files.pythonhosted.org/packages/b0/4a/93b0c2fde594dd0be91e78c577174b3380e977a1002710986403528ea0e6/cffi-2.0.0b1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ba9946f292f7ae3a6f1cc72af259c477c291eb10ad3ca74180862e39f46a521", size = 180531, upload-time = "2025-07-29T01:10:03.901Z" }, - { url = "https://files.pythonhosted.org/packages/db/23/d78944312174f6f12921cb27bee5d194664b1577a80ee910446355e24b8e/cffi-2.0.0b1-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1f4ca4ac8b9ee620ff5cb4307fae08691a0911bf0eeb488e8d6cf55bd77dfe43", size = 203099, upload-time = "2025-07-29T01:10:05.238Z" }, - { url = "https://files.pythonhosted.org/packages/2e/f7/f59dd3007400d362de620cf7955ed8bf5748fb0d0cddfcb28919b65af5b7/cffi-2.0.0b1-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0eb17b22e313c453c940931f5d063ba9e87e5db12d99473477ab1851e66fedb4", size = 203366, upload-time = "2025-07-29T01:10:06.596Z" }, - { url = "https://files.pythonhosted.org/packages/b5/81/52a261b2ca9a30c5f3c7f16b11142fcd827f345550cea51580463594400d/cffi-2.0.0b1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6a1faa47c7fbe0627f6b621dadebed9f532a789a1d3b519731304da1d3ec3d14", size = 217073, upload-time = "2025-07-29T01:10:07.972Z" }, - { url = "https://files.pythonhosted.org/packages/57/ce/ec093352e9e35491579fee73fb0c3600c82bd9fbea92a64fb291f5874c7d/cffi-2.0.0b1-cp310-cp310-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:230a97779cdd6734b6af3bfda4be31406bab58a078f25327b169975be9225a46", size = 208272, upload-time = "2025-07-29T01:10:09.034Z" }, - { url = "https://files.pythonhosted.org/packages/20/07/b01c9e2a8065aaec510fbe67837a7a3c4e05b347d9094e5db2179d084cce/cffi-2.0.0b1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c177aa1cdae420519665da22760f4a4a159551733d4686a4467f579bf7b75470", size = 216698, upload-time = "2025-07-29T01:10:10.439Z" }, - { url = "https://files.pythonhosted.org/packages/f7/30/eed081ff6faad34ee37beb69d0b269f0bd63743772f20412ea69d16e4aee/cffi-2.0.0b1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bdd3ce5e620ff6ee1e89fb7abb620756482fb3e337e5121e441cb0071c11cbd0", size = 218874, upload-time = "2025-07-29T01:10:11.924Z" }, - { url = "https://files.pythonhosted.org/packages/32/b5/e92bd27352dc749a1d286279fbe07de1850b9b674f8f6782294fd7ae8a93/cffi-2.0.0b1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0dbbe4a9bfcc058fccfee33ea5bebe50440767d219c2efa3a722a90ed59e8cfa", size = 211257, upload-time = "2025-07-29T01:10:13.227Z" }, - { url = "https://files.pythonhosted.org/packages/f5/02/c67687aa6b025166f43a2b915cf2e54cf1a32f0b3e849cbfb531f7719548/cffi-2.0.0b1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5f304ce328ecfb7bc36034374c20d0b4ae70423253f8a81c5e0b5efd90e29cd4", size = 218081, upload-time = "2025-07-29T01:10:14.294Z" }, - { url = "https://files.pythonhosted.org/packages/24/d5/926fc2526a452ebe33709fd59a28f4aa241edf3e6cbc7c05b9ed261df8e1/cffi-2.0.0b1-cp310-cp310-win32.whl", hash = "sha256:5acd1da34b96c8881b5df0e3d83cdbecc349b9ad5e9b8c0c589646c241448853", size = 172220, upload-time = "2025-07-29T01:10:15.331Z" }, - { url = "https://files.pythonhosted.org/packages/b8/cc/572111b18a4091a67d53aff91c7c00895cf93da7ed84f30ad304af4f6ff7/cffi-2.0.0b1-cp310-cp310-win_amd64.whl", hash = "sha256:ebb116751a49977c0b130493d3af13c567c4613946d293d4f61601237fabcd5f", size = 182827, upload-time = "2025-07-29T01:10:16.62Z" }, - { url = "https://files.pythonhosted.org/packages/67/90/14deaf13603dfff56bb872a4d53e1043486178ae7a2ce8cc17ea5677d97e/cffi-2.0.0b1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:5f373f9bdc3569acd8aaebb6b521080eeb5a298533a58715537caf74e9e27f6b", size = 184383, upload-time = "2025-07-29T01:10:17.675Z" }, - { url = "https://files.pythonhosted.org/packages/f7/36/0a125a1ab354a95aae2165ce4c2b8fcd057706a85380670e3991052dcfcd/cffi-2.0.0b1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a898f76bac81f9a371df6c8664228a85cdea6b283a721f2493f0df6f80afd208", size = 180599, upload-time = "2025-07-29T01:10:18.753Z" }, - { url = "https://files.pythonhosted.org/packages/bc/cb/27237bcd6c4e883104db737929f02838a7405caed422aeeb76ee5ffa14d9/cffi-2.0.0b1-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:314afab228f7b45de7bae55059b4e706296e7d3984d53e643cc0389757216221", size = 203212, upload-time = "2025-07-29T01:10:20.057Z" }, - { url = "https://files.pythonhosted.org/packages/12/94/bbeddca63090c5335ad597310bd6f2011f1c8733bc71e88f53c38ac4ff4c/cffi-2.0.0b1-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6de033c73dc89f80139c5a7d135fbd6c1d7b28ebb0d2df98cd1f4ef76991b15c", size = 202714, upload-time = "2025-07-29T01:10:21.401Z" }, - { url = "https://files.pythonhosted.org/packages/f4/9b/b7587a1f3f7f52795a7d125d6c6b844f7a8355cbb54ae8fdef2a03488914/cffi-2.0.0b1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffbbeedd6bac26c0373b71831d3c73181a1c100dc6fc7aadbfcca54cace417db", size = 217093, upload-time = "2025-07-29T01:10:22.481Z" }, - { url = "https://files.pythonhosted.org/packages/a3/b2/af4e0ed2c2aded25ed54107f96d424407839bdfa7e90858f8e0f6fed6ee9/cffi-2.0.0b1-cp311-cp311-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:c5713cac21b2351a53958c765d8e9eda45184bb757c3ccab139608e708788796", size = 209019, upload-time = "2025-07-29T01:10:23.584Z" }, - { url = "https://files.pythonhosted.org/packages/7b/6e/899c5473c3d7cc89815db894abcd81cd976a1f314c142e708aef3c0982a3/cffi-2.0.0b1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:71ab35c6cc375da1e2c06af65bf0b5049199ad9b264f9ed7c90c0fe9450900e3", size = 215662, upload-time = "2025-07-29T01:10:24.997Z" }, - { url = "https://files.pythonhosted.org/packages/1c/8e/953a07806f307bf1089239858013cc81c6d5cc8ca23593704b0530429302/cffi-2.0.0b1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53c780c2ec8ce0e5db9b74e9b0b55ff5d5f70071202740cef073a2771fa1d2ce", size = 219015, upload-time = "2025-07-29T01:10:27.077Z" }, - { url = "https://files.pythonhosted.org/packages/ea/0a/ffd99099d96a911236decff459cb330a1c046483008456b23554f62c81c6/cffi-2.0.0b1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:be957dd266facf8e4925643073159b05021a990b46620b06ca27eaf9d900dbc2", size = 212021, upload-time = "2025-07-29T01:10:28.527Z" }, - { url = "https://files.pythonhosted.org/packages/2f/00/c68c1a1665a28dfb8c848668f128d0f1919dc8e843f2e20ce90bce7b60d8/cffi-2.0.0b1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:16dc303af3630f54186b86aadf1121badf3cba6de17dfeacb84c5091e059a690", size = 217124, upload-time = "2025-07-29T01:10:29.877Z" }, - { url = "https://files.pythonhosted.org/packages/de/a7/194d80668bebc5a6a8d95ec5f3a1f186e8d87c864882c96a9ec2ecbd06a8/cffi-2.0.0b1-cp311-cp311-win32.whl", hash = "sha256:504d264944d0934d7b02164af5c62b175255ef0d39c5142d95968b710c58a8f6", size = 172111, upload-time = "2025-07-29T01:10:30.973Z" }, - { url = "https://files.pythonhosted.org/packages/e2/b6/0002211aab83b6bfbdba09dc8cd354e44c49216e6207999b9f0d1d0053cb/cffi-2.0.0b1-cp311-cp311-win_amd64.whl", hash = "sha256:e2920fa42cf0616c21ea6d3948ad207cf0e420d2d2ef449d86ccad6ef9c13393", size = 182858, upload-time = "2025-07-29T01:10:32.021Z" }, - { url = "https://files.pythonhosted.org/packages/52/9e/c6773b5b91b20c5642166c57503a9c67c6948ae4009aa4d2ce233a6b570f/cffi-2.0.0b1-cp311-cp311-win_arm64.whl", hash = "sha256:142c9c0c75fbc95ce23836e538681bd89e483de37b7cdf251dbdf0975995f8ac", size = 177421, upload-time = "2025-07-29T01:10:33.191Z" }, - { url = "https://files.pythonhosted.org/packages/50/20/432dc366952574ea190bce0a2970f92e676e972c78ef501d58406b459883/cffi-2.0.0b1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9d04b5fc06ba0ce45d7e51dfd8a14dc20708ef301fcf5a215c507f4e084b00c8", size = 185303, upload-time = "2025-07-29T01:10:34.291Z" }, - { url = "https://files.pythonhosted.org/packages/54/2d/e89016a2019212d54be2523756faa5b2c3ab8cb6f520a82e0d6bcacd527d/cffi-2.0.0b1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7b17e92900eb61bce62ea07ea8dd0dc33aa476ee8f977918050e52f90f5b645c", size = 181101, upload-time = "2025-07-29T01:10:35.641Z" }, - { url = "https://files.pythonhosted.org/packages/89/4f/6978a38ee0d8976f3087c09e779f9306ed51b9fb68ce5e3606244f6e2469/cffi-2.0.0b1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2155d2a0819c3fdcaa37832fb69e698d455627c23f83bc9c7adbef699fe4be19", size = 208122, upload-time = "2025-07-29T01:10:36.757Z" }, - { url = "https://files.pythonhosted.org/packages/20/2f/568d19b010aa304f6f55aaf160834e0db9677943b0c268462876c4e1c0ef/cffi-2.0.0b1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4210ddc2b41c20739c64dede1304fb81415220ea671885623063fab44066e376", size = 206747, upload-time = "2025-07-29T01:10:37.837Z" }, - { url = "https://files.pythonhosted.org/packages/bf/7b/171907beef5622bc6164ae9db94eaaa8e56bfb986f375742a9669ecc18f7/cffi-2.0.0b1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:31b8e3204cdef043e59a296383e6a43461d17c5c3d73fa9cebf4716a561291b0", size = 220804, upload-time = "2025-07-29T01:10:39.299Z" }, - { url = "https://files.pythonhosted.org/packages/49/2a/539d6021b1570308159745e775d0bd4164e43957e515bffd33cb6e57cf06/cffi-2.0.0b1-cp312-cp312-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:cbde39be02aa7d8fbcd6bf1a9241cb1d84f2e2f0614970c51a707a9a176b85c6", size = 211912, upload-time = "2025-07-29T01:10:40.767Z" }, - { url = "https://files.pythonhosted.org/packages/87/a9/2cddc8eeabd7b32d494de5bb9db95e3816b47ad00e05269b33e2bb8be9f3/cffi-2.0.0b1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ea57043b545f346b081877737cb0320960012107d0250fa5183a4306f9365d6", size = 219528, upload-time = "2025-07-29T01:10:42.419Z" }, - { url = "https://files.pythonhosted.org/packages/a8/18/49ff9cbe89eae3fff54a7af79474dd897bac44325073a6a7dc9b7ae4b64e/cffi-2.0.0b1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d31ba9f54739dcf98edb87e4881e326fad79e4866137c24afb0da531c1a965ca", size = 223011, upload-time = "2025-07-29T01:10:43.906Z" }, - { url = "https://files.pythonhosted.org/packages/a1/1e/4f10dd0fd9cb8d921620663beb497af0a6175c96cecd87e5baf613d0c947/cffi-2.0.0b1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:27309de8cebf48e056550db6607e2fb2c50109b54fc72c02b3b34811233483be", size = 221408, upload-time = "2025-07-29T01:10:45.385Z" }, - { url = "https://files.pythonhosted.org/packages/00/82/cbbb23951d9890475f151c1137d067a712e7f1e59509def619c5d9a645aa/cffi-2.0.0b1-cp312-cp312-win32.whl", hash = "sha256:f4b5acb4cddcaf0ebb82a226f9fa1d5063505e0c206031ee1f4d173750b592fd", size = 172972, upload-time = "2025-07-29T01:10:46.458Z" }, - { url = "https://files.pythonhosted.org/packages/ea/6b/e52b88ee438acd26fd84963f357a90ce8f4494cc7d94cbde1b26e199bd22/cffi-2.0.0b1-cp312-cp312-win_amd64.whl", hash = "sha256:cf1b2510f1a91c4d7e8f83df6a13404332421e6e4a067059174d455653ae5314", size = 183592, upload-time = "2025-07-29T01:10:47.916Z" }, - { url = "https://files.pythonhosted.org/packages/73/ac/3a5a182637b9a02c16335743b14485cb916ca984dcdc18737851732bff16/cffi-2.0.0b1-cp312-cp312-win_arm64.whl", hash = "sha256:bd7ce5d8224fb5a57bd7f1d9843aa4ecb870ec3f4a2101e1ba8314e91177e184", size = 177583, upload-time = "2025-07-29T01:10:49.091Z" }, - { url = "https://files.pythonhosted.org/packages/8e/5b/d5307bdfac914ec977af904947ead0f22013e066aff82a215a5ff7db5e20/cffi-2.0.0b1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a160995771c54b12dc5a1ef44d6fd59aeea4909e2d58c10169156e9d9a7e2960", size = 185280, upload-time = "2025-07-29T01:10:50.173Z" }, - { url = "https://files.pythonhosted.org/packages/b1/f5/b1fc8c8508e724b824713cd829cb5f0a39e182619ffc4d4bc1a8f142040d/cffi-2.0.0b1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c70c77ec47b96a593477386d7bf23243996c75f1cc7ce383ba35dcedca9bd14", size = 181098, upload-time = "2025-07-29T01:10:51.592Z" }, - { url = "https://files.pythonhosted.org/packages/1a/2e/2fdbdfb2783a103176c78fc9833aff80080b6567e90647e05e35160d4082/cffi-2.0.0b1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:47a91ab8d17ed7caed27e5b2eda3b3478f3d28cecb3939d708545804273e159b", size = 208101, upload-time = "2025-07-29T01:10:53.059Z" }, - { url = "https://files.pythonhosted.org/packages/1f/23/4eea412e3aa8173bad1ad77fc28905aa393bf4738221fc4dc99587157940/cffi-2.0.0b1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fd8f55419576289d7cd8c9349ea46a222379936136754ab4c2b041294b0b48d", size = 206671, upload-time = "2025-07-29T01:10:54.652Z" }, - { url = "https://files.pythonhosted.org/packages/c4/c1/3c334b249ae3faa1b5126c9db797561be3669d29f8096675b5d0e55754e3/cffi-2.0.0b1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:916141ca9ff05e9f67fe73c39a527d96a7101191673dee9985e71cd164b55915", size = 220797, upload-time = "2025-07-29T01:10:55.826Z" }, - { url = "https://files.pythonhosted.org/packages/ff/4a/67cf1060b419ea26ffb79dd645371246cffd3c7cf5fca5c7cd66769e7323/cffi-2.0.0b1-cp313-cp313-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:91fc109a1412dd29657f442a61bb571baaa1d074628145008ceb54dc9bb13941", size = 211900, upload-time = "2025-07-29T01:10:57.298Z" }, - { url = "https://files.pythonhosted.org/packages/de/df/d890a3638e86f9abe533d95bf08b5d5ec140c3a0befad9a3e9edc8546553/cffi-2.0.0b1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b08dd1a826b678d39aa78f30edc1b7d9bd1e5b7e5adc2d47e8f56ab25ac7c13", size = 219467, upload-time = "2025-07-29T01:10:58.819Z" }, - { url = "https://files.pythonhosted.org/packages/e8/2b/079e4e0535b72066029bd58438a3f6c538623742d31f80467d340cbaf8d9/cffi-2.0.0b1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76a19efb88a495bb7377fc542c7f97c9816dfc1d6bb4ad147acb99599a83e248", size = 222974, upload-time = "2025-07-29T01:11:00.179Z" }, - { url = "https://files.pythonhosted.org/packages/ff/e3/3428e9dbf24464bc04af09ad298b28c48a9481f0a89924f619388354734b/cffi-2.0.0b1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:87acb9e2221ed37c385c9cef866377fbaa13180de9ba1cdc4e6dc927b273c87f", size = 221343, upload-time = "2025-07-29T01:11:01.718Z" }, - { url = "https://files.pythonhosted.org/packages/bf/d8/9eba61d92eaf59ce97d85855895ed1961330c2e9a0ba9f922c920808b303/cffi-2.0.0b1-cp313-cp313-win32.whl", hash = "sha256:60c2c1d7adf558b932de9e4633f68e359063d1a748c92a4a3cba832085e9819b", size = 172947, upload-time = "2025-07-29T01:11:02.835Z" }, - { url = "https://files.pythonhosted.org/packages/fb/84/582fc182fe8994b495a0dde875c30ec9202154f13dfc1bbea96233b6ae1b/cffi-2.0.0b1-cp313-cp313-win_amd64.whl", hash = "sha256:6ff1ba153e0740c2ea47d74d015c1a03c3addab1681633be0838103c297b855c", size = 183441, upload-time = "2025-07-29T01:11:04.029Z" }, - { url = "https://files.pythonhosted.org/packages/0e/a5/85855a9ad255edf6be1fcd6e44384daa506a2276ef4f0e6164bc2dd03785/cffi-2.0.0b1-cp313-cp313-win_arm64.whl", hash = "sha256:adbed7d68bc8837eb2c73e01bc284b5af9898e82b6067a6cbffea4f1820626e4", size = 177621, upload-time = "2025-07-29T01:11:05.191Z" }, - { url = "https://files.pythonhosted.org/packages/7a/04/070592956f9818f6ef2c5219410209af08c3b81889da0b36185b535bdb2a/cffi-2.0.0b1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fe8cb43962af8e43facad740930fadc4cf8cdc1e073f59d0f13714711807979f", size = 185398, upload-time = "2025-07-29T01:11:06.337Z" }, - { url = "https://files.pythonhosted.org/packages/f7/68/704fba8db6ece9cb13f48e1c17311f70f49153671e056ae99ea29c549d39/cffi-2.0.0b1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a812e9ab7a0bfef3e89089c0359e631d8521d5efc8d21c7ede3f1568db689920", size = 181540, upload-time = "2025-07-29T01:11:07.4Z" }, - { url = "https://files.pythonhosted.org/packages/aa/f7/5a6f7913430f0e0e5e2ac5b06fd69bb532f1e420404d508936da6117a5b8/cffi-2.0.0b1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bce5ce4790b8347c2d7937312218d0282af344f8a589db163520a02fe8e42281", size = 207806, upload-time = "2025-07-29T01:11:08.543Z" }, - { url = "https://files.pythonhosted.org/packages/79/78/870845b72b8017717826bbfca874115e2dac88b8bf204298edc946691817/cffi-2.0.0b1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:39eedbed09879f6d1591ad155afcc162aa11ebf3271215339b4aef3df5631573", size = 206531, upload-time = "2025-07-29T01:11:09.803Z" }, - { url = "https://files.pythonhosted.org/packages/a7/f4/d65f9a303b97453f19588fd7d336c6e527b8ee9fc3b956296d63c6af5562/cffi-2.0.0b1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7dfd6f8f57e812f3175aa0d4d36ed797b6ff35f7cdfefea05417569b543ddc94", size = 220766, upload-time = "2025-07-29T01:11:10.978Z" }, - { url = "https://files.pythonhosted.org/packages/a1/09/85fa0b2841a16d2c3571661a9c4bb53441e195dda2413cfeab05b9726e56/cffi-2.0.0b1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:782f60714ea2935e5391a0f69ad4705624cdc86243b18dcfafd08565c28e89bd", size = 219317, upload-time = "2025-07-29T01:11:12.148Z" }, - { url = "https://files.pythonhosted.org/packages/75/87/91037b0c976babf124760cae2e0a0ca0ce18f02b5b34146421feecd6558d/cffi-2.0.0b1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f2ebc97ba03b26e9b6b048b6c3981165126905cb20564fbf6584f5e072a1c189", size = 222874, upload-time = "2025-07-29T01:11:13.346Z" }, - { url = "https://files.pythonhosted.org/packages/56/53/1c871477e707c001c30537e8f4807341f1d3b40bd6f094cf054864b41dc6/cffi-2.0.0b1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:fba9546b80f3b275f04915ffbca7b75aa22a353c4f6410469fb1d8c340ec1c31", size = 220973, upload-time = "2025-07-29T01:11:14.528Z" }, - { url = "https://files.pythonhosted.org/packages/81/c7/4cb50e2e7623a41d9416dc8d7d043ba3a69f2424209a1e04c28833216f90/cffi-2.0.0b1-cp314-cp314-win32.whl", hash = "sha256:339e853c75f69c726b1a85f2217db6880422f915770679c47150eea895e02b46", size = 175360, upload-time = "2025-07-29T01:11:31.19Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ba/d0fb6fc597d2d11b77294626c51d3f01f9475c4ec3462687fef5244f09be/cffi-2.0.0b1-cp314-cp314-win_amd64.whl", hash = "sha256:856eb353a42b04d02b0633c71123276710a5390e92a27fbd2446864ca7d27923", size = 185681, upload-time = "2025-07-29T01:11:32.464Z" }, - { url = "https://files.pythonhosted.org/packages/24/0f/12390e59c1cb01a161d24f5ef73f15110c6c8f1e51ba8a42411d3faf5d58/cffi-2.0.0b1-cp314-cp314-win_arm64.whl", hash = "sha256:9e23ac717e8b3767c80198d483c743fe596b055a6e29ef34f9d8cdf61f941f2f", size = 180386, upload-time = "2025-07-29T01:11:33.648Z" }, - { url = "https://files.pythonhosted.org/packages/48/6a/87dfc25b45dcae6e05e342f29ac384b5847256c06b99b5e226d59549bf21/cffi-2.0.0b1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e227627762046204df31c589d7406540778d05622e395d41fc68b7895d40c174", size = 188831, upload-time = "2025-07-29T01:11:15.772Z" }, - { url = "https://files.pythonhosted.org/packages/9d/d9/4c6e38b9837e053f096007c37586be4dc6201664103db3a401618f37159e/cffi-2.0.0b1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2355cd38f375906da70a8bad548eb63f65bed43c1044ed075691fa36e8e8315a", size = 185064, upload-time = "2025-07-29T01:11:16.961Z" }, - { url = "https://files.pythonhosted.org/packages/d1/b4/e3797890685586d764c4bc20947e45cdddfa6dec8a635df84a947c7be8f8/cffi-2.0.0b1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:14c0ade7949f088615450abf884064b4ef11e8c9917b99d53f12e06cdfd2cd36", size = 209488, upload-time = "2025-07-29T01:11:18.258Z" }, - { url = "https://files.pythonhosted.org/packages/85/51/b91f5e8a30ea6b77a9ede74bab40482a86ec0d4c462ef4bc8f2c0775f969/cffi-2.0.0b1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:765c82d4a73ded03bfea961364f4c57dd6cfe7b0d57b7a2d9b95e2e7bd5de6f7", size = 208670, upload-time = "2025-07-29T01:11:19.753Z" }, - { url = "https://files.pythonhosted.org/packages/12/4c/ced2c206f38bd7cc1124aa8d9b4cbbd6db54a7a9220f889ba35a07b4f4b2/cffi-2.0.0b1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:265666e15da6974e6a74110873321e84c7c2288e379aca44a7df4713325b9be4", size = 222420, upload-time = "2025-07-29T01:11:21.043Z" }, - { url = "https://files.pythonhosted.org/packages/c1/8c/49feb0f27d072d7b4f5fe48407451a697015e6cf3197e144ebc5ed6c361f/cffi-2.0.0b1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d88f849d03c9aa2d7bbd710a0e20266f92bf524396c7fce881cd5a1971447812", size = 221747, upload-time = "2025-07-29T01:11:22.362Z" }, - { url = "https://files.pythonhosted.org/packages/0b/ea/f0b0c31e6445767441e8dad5a3fa267de7ffc5a87ebd13bc0fd2efa76f8f/cffi-2.0.0b1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:853e90e942246f9e098f16baa45896f80675f86ab6447823c4030a67c3cc112d", size = 224491, upload-time = "2025-07-29T01:11:23.95Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6e/e5349ac9bf812e9a44914f699999c960c045bbd12b63358a4b583ab6ad85/cffi-2.0.0b1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3b8aee0176d80781a21855832c411cfd3126c34966650693ec1245f0b756498b", size = 223484, upload-time = "2025-07-29T01:11:25.266Z" }, - { url = "https://files.pythonhosted.org/packages/f5/11/b2a10765c287d368f87dd57e2840876609418d4bb2ea6cfc56d05c8cb8e0/cffi-2.0.0b1-cp314-cp314t-win32.whl", hash = "sha256:2da933859e1465a08f36d88e0452194da27b9ff0813e5ba49f02c544682d40e0", size = 180528, upload-time = "2025-07-29T01:11:26.968Z" }, - { url = "https://files.pythonhosted.org/packages/41/e8/b7a5be3b8c2d07627e6c007628cdd58c26b18b27ca110334c375d39c1665/cffi-2.0.0b1-cp314-cp314t-win_amd64.whl", hash = "sha256:53fbcfdb35760bc6fb68096632d29700bcf37fd0d71922dcc577eb6193fc6edc", size = 191764, upload-time = "2025-07-29T01:11:28.464Z" }, - { url = "https://files.pythonhosted.org/packages/1b/f5/5cec5a3462fe50687acf04f820b96f490a2c28acd7857472607839ba2712/cffi-2.0.0b1-cp314-cp314t-win_arm64.whl", hash = "sha256:505bec438236c623d7cfd8cc740598611a1d4883a629a0e33eb9e3c2dcd81b04", size = 183450, upload-time = "2025-07-29T01:11:29.941Z" }, - { url = "https://files.pythonhosted.org/packages/e3/c9/3a4777fe105edfdd6e21aa312213e4511c5265a917f2132b8ea73e01f048/cffi-2.0.0b1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:d2ede96d5de012d74b174082dec44c58a35b42e0ea9f197063ddb5e504ee0c7e", size = 184327, upload-time = "2025-07-29T01:11:34.906Z" }, - { url = "https://files.pythonhosted.org/packages/96/e9/a36e643af2d18aac1ecdf66bd6b384b99879ddd57a435f90d20514356558/cffi-2.0.0b1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:14505e4a82aa84abddab6e493946d3ed6bf6d268b58e4c2f5bcf8ec2dee2ca2d", size = 180553, upload-time = "2025-07-29T01:11:36.126Z" }, - { url = "https://files.pythonhosted.org/packages/26/33/36072caa8edb5abc416dc129cdcdf08577dcddf998238ab596eeac5fdae5/cffi-2.0.0b1-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:762dd8db1bd710f7b828b3c6cbb7101b5e190e722eb5633eb79b1a6b751e349a", size = 203083, upload-time = "2025-07-29T01:11:37.333Z" }, - { url = "https://files.pythonhosted.org/packages/cc/98/ff861689fb84c1cbeffa7a4c18148c943a88b6e0c13043d75d740b1d033a/cffi-2.0.0b1-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8af08fd246d2a544c8b68c25c171809d08eed9372f2026ae48dad17d26525578", size = 203433, upload-time = "2025-07-29T01:11:38.544Z" }, - { url = "https://files.pythonhosted.org/packages/4d/8c/130f35263b0be08946e06228c602a2012c5075ca838019f0ef2954407f16/cffi-2.0.0b1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e342223ada6b1d34f3719d3612991924cb68fa7f8fb2ec22f5bda254882828ab", size = 217086, upload-time = "2025-07-29T01:11:39.91Z" }, - { url = "https://files.pythonhosted.org/packages/1b/d4/e67a4dd21e34a716aaa71b300de43d654a36c5878678f5a343903d890fa1/cffi-2.0.0b1-cp39-cp39-manylinux_2_27_i686.manylinux_2_28_i686.whl", hash = "sha256:352e1949f7af33c37b060d2c2ea8a8fa1be6695ff94f8d5f7738bacacb9d6de4", size = 208221, upload-time = "2025-07-29T01:11:41.478Z" }, - { url = "https://files.pythonhosted.org/packages/6c/9c/2126fa7eb0131a6eaef5d13a93c2e9bbfff06271f55b7dd57835915cf460/cffi-2.0.0b1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3cc3245802b4950bc5459a2ef9a650d948972e44df120ecd2c6201814c8edb54", size = 216788, upload-time = "2025-07-29T01:11:42.685Z" }, - { url = "https://files.pythonhosted.org/packages/5b/94/b6646306de2a61c661110ebfb28b31f63d01f28f8ab6e6ec698112b5726a/cffi-2.0.0b1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ab4aea2f93ab6c408f0c6be8ddebe4d1086b4966148f542fe11cf82ca698dc07", size = 218944, upload-time = "2025-07-29T01:11:43.947Z" }, - { url = "https://files.pythonhosted.org/packages/12/6c/77bd877a1cae4234e47128c675478df1c5881b9e156569d9b408f83e9f5e/cffi-2.0.0b1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ecf72cb96106fbde29682db37569c7cee3ebf29ecf9ead46978679057c6df234", size = 211290, upload-time = "2025-07-29T01:11:45.23Z" }, - { url = "https://files.pythonhosted.org/packages/b5/f5/01670d1960b8f76f37e37be31d9e3f7e1473c3e89e9196e7d6c6d4f7688b/cffi-2.0.0b1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:aaec3f41cd6f0ffda5e23365822710d747b8613d3b8f54e12b5d7dcde688300d", size = 218084, upload-time = "2025-07-29T01:11:46.563Z" }, - { url = "https://files.pythonhosted.org/packages/94/03/f5ffb99d7ba1c0b5e48873829bed6349e4bb1e5fa108e0dffd94de23ea5a/cffi-2.0.0b1-cp39-cp39-win32.whl", hash = "sha256:601ddbaa51b1bd96a92a6a26e855060390023ab600377280a9bed7703ed2a088", size = 172173, upload-time = "2025-07-29T01:11:48.184Z" }, - { url = "https://files.pythonhosted.org/packages/72/29/3c890ed3ef27a19cb696fa1032b8ef83e0aa586ec55d4feeb0970e28c673/cffi-2.0.0b1-cp39-cp39-win_amd64.whl", hash = "sha256:cb351fade24f7ba9ca481bee53d4257053b9fa9da55da276fe1187a990a49dde", size = 182827, upload-time = "2025-07-29T01:11:49.444Z" }, + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, + { url = "https://files.pythonhosted.org/packages/c0/cc/08ed5a43f2996a16b462f64a7055c6e962803534924b9b2f1371d8c00b7b/cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf", size = 184288, upload-time = "2025-09-08T23:23:48.404Z" }, + { url = "https://files.pythonhosted.org/packages/3d/de/38d9726324e127f727b4ecc376bc85e505bfe61ef130eaf3f290c6847dd4/cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7", size = 180509, upload-time = "2025-09-08T23:23:49.73Z" }, + { url = "https://files.pythonhosted.org/packages/9b/13/c92e36358fbcc39cf0962e83223c9522154ee8630e1df7c0b3a39a8124e2/cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c", size = 208813, upload-time = "2025-09-08T23:23:51.263Z" }, + { url = "https://files.pythonhosted.org/packages/15/12/a7a79bd0df4c3bff744b2d7e52cc1b68d5e7e427b384252c42366dc1ecbc/cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165", size = 216498, upload-time = "2025-09-08T23:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ad/5c51c1c7600bdd7ed9a24a203ec255dccdd0ebf4527f7b922a0bde2fb6ed/cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534", size = 203243, upload-time = "2025-09-08T23:23:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/32/f2/81b63e288295928739d715d00952c8c6034cb6c6a516b17d37e0c8be5600/cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f", size = 203158, upload-time = "2025-09-08T23:23:55.169Z" }, + { url = "https://files.pythonhosted.org/packages/1f/74/cc4096ce66f5939042ae094e2e96f53426a979864aa1f96a621ad128be27/cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63", size = 216548, upload-time = "2025-09-08T23:23:56.506Z" }, + { url = "https://files.pythonhosted.org/packages/e8/be/f6424d1dc46b1091ffcc8964fa7c0ab0cd36839dd2761b49c90481a6ba1b/cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2", size = 218897, upload-time = "2025-09-08T23:23:57.825Z" }, + { url = "https://files.pythonhosted.org/packages/f7/e0/dda537c2309817edf60109e39265f24f24aa7f050767e22c98c53fe7f48b/cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65", size = 211249, upload-time = "2025-09-08T23:23:59.139Z" }, + { url = "https://files.pythonhosted.org/packages/2b/e7/7c769804eb75e4c4b35e658dba01de1640a351a9653c3d49ca89d16ccc91/cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322", size = 218041, upload-time = "2025-09-08T23:24:00.496Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d9/6218d78f920dcd7507fc16a766b5ef8f3b913cc7aa938e7fc80b9978d089/cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a", size = 172138, upload-time = "2025-09-08T23:24:01.7Z" }, + { url = "https://files.pythonhosted.org/packages/54/8f/a1e836f82d8e32a97e6b29cc8f641779181ac7363734f12df27db803ebda/cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9", size = 182794, upload-time = "2025-09-08T23:24:02.943Z" }, ] [[package]] @@ -606,50 +529,67 @@ wheels = [ [[package]] name = "cryptography" -version = "45.0.5" +version = "46.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*' and platform_python_implementation != 'PyPy'" }, - { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*' and platform_python_implementation != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/95/1e/49527ac611af559665f71cbb8f92b332b5ec9c6fbc4e88b0f8e92f5e85df/cryptography-45.0.5.tar.gz", hash = "sha256:72e76caa004ab63accdf26023fccd1d087f6d90ec6048ff33ad0445abf7f605a", size = 744903, upload-time = "2025-07-02T13:06:25.941Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/fb/09e28bc0c46d2c547085e60897fea96310574c70fb21cd58a730a45f3403/cryptography-45.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:101ee65078f6dd3e5a028d4f19c07ffa4dd22cce6a20eaa160f8b5219911e7d8", size = 7043092, upload-time = "2025-07-02T13:05:01.514Z" }, - { url = "https://files.pythonhosted.org/packages/b1/05/2194432935e29b91fb649f6149c1a4f9e6d3d9fc880919f4ad1bcc22641e/cryptography-45.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a264aae5f7fbb089dbc01e0242d3b67dffe3e6292e1f5182122bdf58e65215d", size = 4205926, upload-time = "2025-07-02T13:05:04.741Z" }, - { url = "https://files.pythonhosted.org/packages/07/8b/9ef5da82350175e32de245646b1884fc01124f53eb31164c77f95a08d682/cryptography-45.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e74d30ec9c7cb2f404af331d5b4099a9b322a8a6b25c4632755c8757345baac5", size = 4429235, upload-time = "2025-07-02T13:05:07.084Z" }, - { url = "https://files.pythonhosted.org/packages/7c/e1/c809f398adde1994ee53438912192d92a1d0fc0f2d7582659d9ef4c28b0c/cryptography-45.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3af26738f2db354aafe492fb3869e955b12b2ef2e16908c8b9cb928128d42c57", size = 4209785, upload-time = "2025-07-02T13:05:09.321Z" }, - { url = "https://files.pythonhosted.org/packages/d0/8b/07eb6bd5acff58406c5e806eff34a124936f41a4fb52909ffa4d00815f8c/cryptography-45.0.5-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e6c00130ed423201c5bc5544c23359141660b07999ad82e34e7bb8f882bb78e0", size = 3893050, upload-time = "2025-07-02T13:05:11.069Z" }, - { url = "https://files.pythonhosted.org/packages/ec/ef/3333295ed58d900a13c92806b67e62f27876845a9a908c939f040887cca9/cryptography-45.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:dd420e577921c8c2d31289536c386aaa30140b473835e97f83bc71ea9d2baf2d", size = 4457379, upload-time = "2025-07-02T13:05:13.32Z" }, - { url = "https://files.pythonhosted.org/packages/d9/9d/44080674dee514dbb82b21d6fa5d1055368f208304e2ab1828d85c9de8f4/cryptography-45.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:d05a38884db2ba215218745f0781775806bde4f32e07b135348355fe8e4991d9", size = 4209355, upload-time = "2025-07-02T13:05:15.017Z" }, - { url = "https://files.pythonhosted.org/packages/c9/d8/0749f7d39f53f8258e5c18a93131919ac465ee1f9dccaf1b3f420235e0b5/cryptography-45.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ad0caded895a00261a5b4aa9af828baede54638754b51955a0ac75576b831b27", size = 4456087, upload-time = "2025-07-02T13:05:16.945Z" }, - { url = "https://files.pythonhosted.org/packages/09/d7/92acac187387bf08902b0bf0699816f08553927bdd6ba3654da0010289b4/cryptography-45.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9024beb59aca9d31d36fcdc1604dd9bbeed0a55bface9f1908df19178e2f116e", size = 4332873, upload-time = "2025-07-02T13:05:18.743Z" }, - { url = "https://files.pythonhosted.org/packages/03/c2/840e0710da5106a7c3d4153c7215b2736151bba60bf4491bdb421df5056d/cryptography-45.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:91098f02ca81579c85f66df8a588c78f331ca19089763d733e34ad359f474174", size = 4564651, upload-time = "2025-07-02T13:05:21.382Z" }, - { url = "https://files.pythonhosted.org/packages/2e/92/cc723dd6d71e9747a887b94eb3827825c6c24b9e6ce2bb33b847d31d5eaa/cryptography-45.0.5-cp311-abi3-win32.whl", hash = "sha256:926c3ea71a6043921050eaa639137e13dbe7b4ab25800932a8498364fc1abec9", size = 2929050, upload-time = "2025-07-02T13:05:23.39Z" }, - { url = "https://files.pythonhosted.org/packages/1f/10/197da38a5911a48dd5389c043de4aec4b3c94cb836299b01253940788d78/cryptography-45.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:b85980d1e345fe769cfc57c57db2b59cff5464ee0c045d52c0df087e926fbe63", size = 3403224, upload-time = "2025-07-02T13:05:25.202Z" }, - { url = "https://files.pythonhosted.org/packages/fe/2b/160ce8c2765e7a481ce57d55eba1546148583e7b6f85514472b1d151711d/cryptography-45.0.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f3562c2f23c612f2e4a6964a61d942f891d29ee320edb62ff48ffb99f3de9ae8", size = 7017143, upload-time = "2025-07-02T13:05:27.229Z" }, - { url = "https://files.pythonhosted.org/packages/c2/e7/2187be2f871c0221a81f55ee3105d3cf3e273c0a0853651d7011eada0d7e/cryptography-45.0.5-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3fcfbefc4a7f332dece7272a88e410f611e79458fab97b5efe14e54fe476f4fd", size = 4197780, upload-time = "2025-07-02T13:05:29.299Z" }, - { url = "https://files.pythonhosted.org/packages/b9/cf/84210c447c06104e6be9122661159ad4ce7a8190011669afceeaea150524/cryptography-45.0.5-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:460f8c39ba66af7db0545a8c6f2eabcbc5a5528fc1cf6c3fa9a1e44cec33385e", size = 4420091, upload-time = "2025-07-02T13:05:31.221Z" }, - { url = "https://files.pythonhosted.org/packages/3e/6a/cb8b5c8bb82fafffa23aeff8d3a39822593cee6e2f16c5ca5c2ecca344f7/cryptography-45.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9b4cf6318915dccfe218e69bbec417fdd7c7185aa7aab139a2c0beb7468c89f0", size = 4198711, upload-time = "2025-07-02T13:05:33.062Z" }, - { url = "https://files.pythonhosted.org/packages/04/f7/36d2d69df69c94cbb2473871926daf0f01ad8e00fe3986ac3c1e8c4ca4b3/cryptography-45.0.5-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2089cc8f70a6e454601525e5bf2779e665d7865af002a5dec8d14e561002e135", size = 3883299, upload-time = "2025-07-02T13:05:34.94Z" }, - { url = "https://files.pythonhosted.org/packages/82/c7/f0ea40f016de72f81288e9fe8d1f6748036cb5ba6118774317a3ffc6022d/cryptography-45.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0027d566d65a38497bc37e0dd7c2f8ceda73597d2ac9ba93810204f56f52ebc7", size = 4450558, upload-time = "2025-07-02T13:05:37.288Z" }, - { url = "https://files.pythonhosted.org/packages/06/ae/94b504dc1a3cdf642d710407c62e86296f7da9e66f27ab12a1ee6fdf005b/cryptography-45.0.5-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:be97d3a19c16a9be00edf79dca949c8fa7eff621763666a145f9f9535a5d7f42", size = 4198020, upload-time = "2025-07-02T13:05:39.102Z" }, - { url = "https://files.pythonhosted.org/packages/05/2b/aaf0adb845d5dabb43480f18f7ca72e94f92c280aa983ddbd0bcd6ecd037/cryptography-45.0.5-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:7760c1c2e1a7084153a0f68fab76e754083b126a47d0117c9ed15e69e2103492", size = 4449759, upload-time = "2025-07-02T13:05:41.398Z" }, - { url = "https://files.pythonhosted.org/packages/91/e4/f17e02066de63e0100a3a01b56f8f1016973a1d67551beaf585157a86b3f/cryptography-45.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6ff8728d8d890b3dda5765276d1bc6fb099252915a2cd3aff960c4c195745dd0", size = 4319991, upload-time = "2025-07-02T13:05:43.64Z" }, - { url = "https://files.pythonhosted.org/packages/f2/2e/e2dbd629481b499b14516eed933f3276eb3239f7cee2dcfa4ee6b44d4711/cryptography-45.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7259038202a47fdecee7e62e0fd0b0738b6daa335354396c6ddebdbe1206af2a", size = 4554189, upload-time = "2025-07-02T13:05:46.045Z" }, - { url = "https://files.pythonhosted.org/packages/f8/ea/a78a0c38f4c8736287b71c2ea3799d173d5ce778c7d6e3c163a95a05ad2a/cryptography-45.0.5-cp37-abi3-win32.whl", hash = "sha256:1e1da5accc0c750056c556a93c3e9cb828970206c68867712ca5805e46dc806f", size = 2911769, upload-time = "2025-07-02T13:05:48.329Z" }, - { url = "https://files.pythonhosted.org/packages/79/b3/28ac139109d9005ad3f6b6f8976ffede6706a6478e21c889ce36c840918e/cryptography-45.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:90cb0a7bb35959f37e23303b7eed0a32280510030daba3f7fdfbb65defde6a97", size = 3390016, upload-time = "2025-07-02T13:05:50.811Z" }, - { url = "https://files.pythonhosted.org/packages/f8/8b/34394337abe4566848a2bd49b26bcd4b07fd466afd3e8cce4cb79a390869/cryptography-45.0.5-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:206210d03c1193f4e1ff681d22885181d47efa1ab3018766a7b32a7b3d6e6afd", size = 3575762, upload-time = "2025-07-02T13:05:53.166Z" }, - { url = "https://files.pythonhosted.org/packages/8b/5d/a19441c1e89afb0f173ac13178606ca6fab0d3bd3ebc29e9ed1318b507fc/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c648025b6840fe62e57107e0a25f604db740e728bd67da4f6f060f03017d5097", size = 4140906, upload-time = "2025-07-02T13:05:55.914Z" }, - { url = "https://files.pythonhosted.org/packages/4b/db/daceb259982a3c2da4e619f45b5bfdec0e922a23de213b2636e78ef0919b/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b8fa8b0a35a9982a3c60ec79905ba5bb090fc0b9addcfd3dc2dd04267e45f25e", size = 4374411, upload-time = "2025-07-02T13:05:57.814Z" }, - { url = "https://files.pythonhosted.org/packages/6a/35/5d06ad06402fc522c8bf7eab73422d05e789b4e38fe3206a85e3d6966c11/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:14d96584701a887763384f3c47f0ca7c1cce322aa1c31172680eb596b890ec30", size = 4140942, upload-time = "2025-07-02T13:06:00.137Z" }, - { url = "https://files.pythonhosted.org/packages/65/79/020a5413347e44c382ef1f7f7e7a66817cd6273e3e6b5a72d18177b08b2f/cryptography-45.0.5-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57c816dfbd1659a367831baca4b775b2a5b43c003daf52e9d57e1d30bc2e1b0e", size = 4374079, upload-time = "2025-07-02T13:06:02.043Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c5/c0e07d84a9a2a8a0ed4f865e58f37c71af3eab7d5e094ff1b21f3f3af3bc/cryptography-45.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b9e38e0a83cd51e07f5a48ff9691cae95a79bea28fe4ded168a8e5c6c77e819d", size = 3321362, upload-time = "2025-07-02T13:06:04.463Z" }, - { url = "https://files.pythonhosted.org/packages/c0/71/9bdbcfd58d6ff5084687fe722c58ac718ebedbc98b9f8f93781354e6d286/cryptography-45.0.5-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8c4a6ff8a30e9e3d38ac0539e9a9e02540ab3f827a3394f8852432f6b0ea152e", size = 3587878, upload-time = "2025-07-02T13:06:06.339Z" }, - { url = "https://files.pythonhosted.org/packages/f0/63/83516cfb87f4a8756eaa4203f93b283fda23d210fc14e1e594bd5f20edb6/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bd4c45986472694e5121084c6ebbd112aa919a25e783b87eb95953c9573906d6", size = 4152447, upload-time = "2025-07-02T13:06:08.345Z" }, - { url = "https://files.pythonhosted.org/packages/22/11/d2823d2a5a0bd5802b3565437add16f5c8ce1f0778bf3822f89ad2740a38/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:982518cd64c54fcada9d7e5cf28eabd3ee76bd03ab18e08a48cad7e8b6f31b18", size = 4386778, upload-time = "2025-07-02T13:06:10.263Z" }, - { url = "https://files.pythonhosted.org/packages/5f/38/6bf177ca6bce4fe14704ab3e93627c5b0ca05242261a2e43ef3168472540/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:12e55281d993a793b0e883066f590c1ae1e802e3acb67f8b442e721e475e6463", size = 4151627, upload-time = "2025-07-02T13:06:13.097Z" }, - { url = "https://files.pythonhosted.org/packages/38/6a/69fc67e5266bff68a91bcb81dff8fb0aba4d79a78521a08812048913e16f/cryptography-45.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:5aa1e32983d4443e310f726ee4b071ab7569f58eedfdd65e9675484a4eb67bd1", size = 4385593, upload-time = "2025-07-02T13:06:15.689Z" }, - { url = "https://files.pythonhosted.org/packages/f6/34/31a1604c9a9ade0fdab61eb48570e09a796f4d9836121266447b0eaf7feb/cryptography-45.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:e357286c1b76403dd384d938f93c46b2b058ed4dfcdce64a770f0537ed3feb6f", size = 3331106, upload-time = "2025-07-02T13:06:18.058Z" }, + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/62/e3664e6ffd7743e1694b244dde70b43a394f6f7fbcacf7014a8ff5197c73/cryptography-46.0.1.tar.gz", hash = "sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7", size = 749198, upload-time = "2025-09-17T00:10:35.797Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/8c/44ee01267ec01e26e43ebfdae3f120ec2312aa72fa4c0507ebe41a26739f/cryptography-46.0.1-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475", size = 7285044, upload-time = "2025-09-17T00:08:36.807Z" }, + { url = "https://files.pythonhosted.org/packages/22/59/9ae689a25047e0601adfcb159ec4f83c0b4149fdb5c3030cc94cd218141d/cryptography-46.0.1-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080", size = 4308182, upload-time = "2025-09-17T00:08:39.388Z" }, + { url = "https://files.pythonhosted.org/packages/c4/ee/ca6cc9df7118f2fcd142c76b1da0f14340d77518c05b1ebfbbabca6b9e7d/cryptography-46.0.1-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e", size = 4572393, upload-time = "2025-09-17T00:08:41.663Z" }, + { url = "https://files.pythonhosted.org/packages/7f/a3/0f5296f63815d8e985922b05c31f77ce44787b3127a67c0b7f70f115c45f/cryptography-46.0.1-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6", size = 4308400, upload-time = "2025-09-17T00:08:43.559Z" }, + { url = "https://files.pythonhosted.org/packages/5d/8c/74fcda3e4e01be1d32775d5b4dd841acaac3c1b8fa4d0774c7ac8d52463d/cryptography-46.0.1-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8", size = 4015786, upload-time = "2025-09-17T00:08:45.758Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b8/85d23287baeef273b0834481a3dd55bbed3a53587e3b8d9f0898235b8f91/cryptography-46.0.1-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28", size = 4982606, upload-time = "2025-09-17T00:08:47.602Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d3/de61ad5b52433b389afca0bc70f02a7a1f074651221f599ce368da0fe437/cryptography-46.0.1-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9", size = 4604234, upload-time = "2025-09-17T00:08:49.879Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1f/dbd4d6570d84748439237a7478d124ee0134bf166ad129267b7ed8ea6d22/cryptography-46.0.1-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736", size = 4307669, upload-time = "2025-09-17T00:08:52.321Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fd/ca0a14ce7f0bfe92fa727aacaf2217eb25eb7e4ed513b14d8e03b26e63ed/cryptography-46.0.1-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b", size = 4947579, upload-time = "2025-09-17T00:08:54.697Z" }, + { url = "https://files.pythonhosted.org/packages/89/6b/09c30543bb93401f6f88fce556b3bdbb21e55ae14912c04b7bf355f5f96c/cryptography-46.0.1-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab", size = 4603669, upload-time = "2025-09-17T00:08:57.16Z" }, + { url = "https://files.pythonhosted.org/packages/23/9a/38cb01cb09ce0adceda9fc627c9cf98eb890fc8d50cacbe79b011df20f8a/cryptography-46.0.1-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75", size = 4435828, upload-time = "2025-09-17T00:08:59.606Z" }, + { url = "https://files.pythonhosted.org/packages/0f/53/435b5c36a78d06ae0bef96d666209b0ecd8f8181bfe4dda46536705df59e/cryptography-46.0.1-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5", size = 4709553, upload-time = "2025-09-17T00:09:01.832Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c4/0da6e55595d9b9cd3b6eb5dc22f3a07ded7f116a3ea72629cab595abb804/cryptography-46.0.1-cp311-abi3-win32.whl", hash = "sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0", size = 3058327, upload-time = "2025-09-17T00:09:03.726Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/cd29a35e0d6e78a0ee61793564c8cff0929c38391cb0de27627bdc7525aa/cryptography-46.0.1-cp311-abi3-win_amd64.whl", hash = "sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7", size = 3523893, upload-time = "2025-09-17T00:09:06.272Z" }, + { url = "https://files.pythonhosted.org/packages/f2/dd/eea390f3e78432bc3d2f53952375f8b37cb4d37783e626faa6a51e751719/cryptography-46.0.1-cp311-abi3-win_arm64.whl", hash = "sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0", size = 2932145, upload-time = "2025-09-17T00:09:08.568Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fb/c73588561afcd5e24b089952bd210b14676c0c5bf1213376350ae111945c/cryptography-46.0.1-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee", size = 7193928, upload-time = "2025-09-17T00:09:10.595Z" }, + { url = "https://files.pythonhosted.org/packages/26/34/0ff0bb2d2c79f25a2a63109f3b76b9108a906dd2a2eb5c1d460b9938adbb/cryptography-46.0.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd", size = 4293515, upload-time = "2025-09-17T00:09:12.861Z" }, + { url = "https://files.pythonhosted.org/packages/df/b7/d4f848aee24ecd1be01db6c42c4a270069a4f02a105d9c57e143daf6cf0f/cryptography-46.0.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a", size = 4545619, upload-time = "2025-09-17T00:09:15.397Z" }, + { url = "https://files.pythonhosted.org/packages/44/a5/42fedefc754fd1901e2d95a69815ea4ec8a9eed31f4c4361fcab80288661/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a", size = 4299160, upload-time = "2025-09-17T00:09:17.155Z" }, + { url = "https://files.pythonhosted.org/packages/86/a1/cd21174f56e769c831fbbd6399a1b7519b0ff6280acec1b826d7b072640c/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a", size = 3994491, upload-time = "2025-09-17T00:09:18.971Z" }, + { url = "https://files.pythonhosted.org/packages/8d/2f/a8cbfa1c029987ddc746fd966711d4fa71efc891d37fbe9f030fe5ab4eec/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12", size = 4960157, upload-time = "2025-09-17T00:09:20.923Z" }, + { url = "https://files.pythonhosted.org/packages/67/ae/63a84e6789e0d5a2502edf06b552bcb0fa9ff16147265d5c44a211942abe/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129", size = 4577263, upload-time = "2025-09-17T00:09:23.356Z" }, + { url = "https://files.pythonhosted.org/packages/ef/8f/1b9fa8e92bd9cbcb3b7e1e593a5232f2c1e6f9bd72b919c1a6b37d315f92/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da", size = 4298703, upload-time = "2025-09-17T00:09:25.566Z" }, + { url = "https://files.pythonhosted.org/packages/c3/af/bb95db070e73fea3fae31d8a69ac1463d89d1c084220f549b00dd01094a8/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b", size = 4926363, upload-time = "2025-09-17T00:09:27.451Z" }, + { url = "https://files.pythonhosted.org/packages/f5/3b/d8fb17ffeb3a83157a1cc0aa5c60691d062aceecba09c2e5e77ebfc1870c/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657", size = 4576958, upload-time = "2025-09-17T00:09:29.924Z" }, + { url = "https://files.pythonhosted.org/packages/d9/46/86bc3a05c10c8aa88c8ae7e953a8b4e407c57823ed201dbcba55c4d655f4/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0", size = 4422507, upload-time = "2025-09-17T00:09:32.222Z" }, + { url = "https://files.pythonhosted.org/packages/a8/4e/387e5a21dfd2b4198e74968a541cfd6128f66f8ec94ed971776e15091ac3/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0", size = 4683964, upload-time = "2025-09-17T00:09:34.118Z" }, + { url = "https://files.pythonhosted.org/packages/25/a3/f9f5907b166adb8f26762071474b38bbfcf89858a5282f032899075a38a1/cryptography-46.0.1-cp314-cp314t-win32.whl", hash = "sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277", size = 3029705, upload-time = "2025-09-17T00:09:36.381Z" }, + { url = "https://files.pythonhosted.org/packages/12/66/4d3a4f1850db2e71c2b1628d14b70b5e4c1684a1bd462f7fffb93c041c38/cryptography-46.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05", size = 3502175, upload-time = "2025-09-17T00:09:38.261Z" }, + { url = "https://files.pythonhosted.org/packages/52/c7/9f10ad91435ef7d0d99a0b93c4360bea3df18050ff5b9038c489c31ac2f5/cryptography-46.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784", size = 2912354, upload-time = "2025-09-17T00:09:40.078Z" }, + { url = "https://files.pythonhosted.org/packages/98/e5/fbd632385542a3311915976f88e0dfcf09e62a3fc0aff86fb6762162a24d/cryptography-46.0.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b", size = 7255677, upload-time = "2025-09-17T00:09:42.407Z" }, + { url = "https://files.pythonhosted.org/packages/56/3e/13ce6eab9ad6eba1b15a7bd476f005a4c1b3f299f4c2f32b22408b0edccf/cryptography-46.0.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8", size = 4301110, upload-time = "2025-09-17T00:09:45.614Z" }, + { url = "https://files.pythonhosted.org/packages/a2/67/65dc233c1ddd688073cf7b136b06ff4b84bf517ba5529607c9d79720fc67/cryptography-46.0.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead", size = 4562369, upload-time = "2025-09-17T00:09:47.601Z" }, + { url = "https://files.pythonhosted.org/packages/17/db/d64ae4c6f4e98c3dac5bf35dd4d103f4c7c345703e43560113e5e8e31b2b/cryptography-46.0.1-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2", size = 4302126, upload-time = "2025-09-17T00:09:49.335Z" }, + { url = "https://files.pythonhosted.org/packages/3d/19/5f1eea17d4805ebdc2e685b7b02800c4f63f3dd46cfa8d4c18373fea46c8/cryptography-46.0.1-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32", size = 4009431, upload-time = "2025-09-17T00:09:51.239Z" }, + { url = "https://files.pythonhosted.org/packages/81/b5/229ba6088fe7abccbfe4c5edb96c7a5ad547fac5fdd0d40aa6ea540b2985/cryptography-46.0.1-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef", size = 4980739, upload-time = "2025-09-17T00:09:54.181Z" }, + { url = "https://files.pythonhosted.org/packages/3a/9c/50aa38907b201e74bc43c572f9603fa82b58e831bd13c245613a23cff736/cryptography-46.0.1-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0", size = 4592289, upload-time = "2025-09-17T00:09:56.731Z" }, + { url = "https://files.pythonhosted.org/packages/5a/33/229858f8a5bb22f82468bb285e9f4c44a31978d5f5830bb4ea1cf8a4e454/cryptography-46.0.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128", size = 4301815, upload-time = "2025-09-17T00:09:58.548Z" }, + { url = "https://files.pythonhosted.org/packages/52/cb/b76b2c87fbd6ed4a231884bea3ce073406ba8e2dae9defad910d33cbf408/cryptography-46.0.1-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca", size = 4943251, upload-time = "2025-09-17T00:10:00.475Z" }, + { url = "https://files.pythonhosted.org/packages/94/0f/f66125ecf88e4cb5b8017ff43f3a87ede2d064cb54a1c5893f9da9d65093/cryptography-46.0.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc", size = 4591247, upload-time = "2025-09-17T00:10:02.874Z" }, + { url = "https://files.pythonhosted.org/packages/f6/22/9f3134ae436b63b463cfdf0ff506a0570da6873adb4bf8c19b8a5b4bac64/cryptography-46.0.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7", size = 4428534, upload-time = "2025-09-17T00:10:04.994Z" }, + { url = "https://files.pythonhosted.org/packages/89/39/e6042bcb2638650b0005c752c38ea830cbfbcbb1830e4d64d530000aa8dc/cryptography-46.0.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a", size = 4699541, upload-time = "2025-09-17T00:10:06.925Z" }, + { url = "https://files.pythonhosted.org/packages/68/46/753d457492d15458c7b5a653fc9a84a1c9c7a83af6ebdc94c3fc373ca6e8/cryptography-46.0.1-cp38-abi3-win32.whl", hash = "sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1", size = 3043779, upload-time = "2025-09-17T00:10:08.951Z" }, + { url = "https://files.pythonhosted.org/packages/2f/50/b6f3b540c2f6ee712feeb5fa780bb11fad76634e71334718568e7695cb55/cryptography-46.0.1-cp38-abi3-win_amd64.whl", hash = "sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3", size = 3517226, upload-time = "2025-09-17T00:10:10.769Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e8/77d17d00981cdd27cc493e81e1749a0b8bbfb843780dbd841e30d7f50743/cryptography-46.0.1-cp38-abi3-win_arm64.whl", hash = "sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9", size = 2923149, upload-time = "2025-09-17T00:10:13.236Z" }, + { url = "https://files.pythonhosted.org/packages/14/b9/b260180b31a66859648cfed5c980544ee22b15f8bd20ef82a23f58c0b83e/cryptography-46.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd4b5e2ee4e60425711ec65c33add4e7a626adef79d66f62ba0acfd493af282d", size = 3714683, upload-time = "2025-09-17T00:10:15.601Z" }, + { url = "https://files.pythonhosted.org/packages/c5/5a/1cd3ef86e5884edcbf8b27c3aa8f9544e9b9fcce5d3ed8b86959741f4f8e/cryptography-46.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48948940d0ae00483e85e9154bb42997d0b77c21e43a77b7773c8c80de532ac5", size = 3443784, upload-time = "2025-09-17T00:10:18.014Z" }, + { url = "https://files.pythonhosted.org/packages/27/27/077e09fd92075dd1338ea0ffaf5cfee641535545925768350ad90d8c36ca/cryptography-46.0.1-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b9c79af2c3058430d911ff1a5b2b96bbfe8da47d5ed961639ce4681886614e70", size = 3722319, upload-time = "2025-09-17T00:10:20.273Z" }, + { url = "https://files.pythonhosted.org/packages/db/32/6fc7250280920418651640d76cee34d91c1e0601d73acd44364570cf041f/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0ca4be2af48c24df689a150d9cd37404f689e2968e247b6b8ff09bff5bcd786f", size = 4249030, upload-time = "2025-09-17T00:10:22.396Z" }, + { url = "https://files.pythonhosted.org/packages/32/33/8d5398b2da15a15110b2478480ab512609f95b45ead3a105c9a9c76f9980/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:13e67c4d3fb8b6bc4ef778a7ccdd8df4cd15b4bcc18f4239c8440891a11245cc", size = 4528009, upload-time = "2025-09-17T00:10:24.418Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1c/4012edad2a8977ab386c36b6e21f5065974d37afa3eade83a9968cba4855/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:15b5fd9358803b0d1cc42505a18d8bca81dabb35b5cfbfea1505092e13a9d96d", size = 4248902, upload-time = "2025-09-17T00:10:26.255Z" }, + { url = "https://files.pythonhosted.org/packages/58/a3/257cd5ae677302de8fa066fca9de37128f6729d1e63c04dd6a15555dd450/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:e34da95e29daf8a71cb2841fd55df0511539a6cdf33e6f77c1e95e44006b9b46", size = 4527150, upload-time = "2025-09-17T00:10:28.28Z" }, + { url = "https://files.pythonhosted.org/packages/6a/cd/fe6b65e1117ec7631f6be8951d3db076bac3e1b096e3e12710ed071ffc3c/cryptography-46.0.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:34f04b7311174469ab3ac2647469743720f8b6c8b046f238e5cb27905695eb2a", size = 3448210, upload-time = "2025-09-17T00:10:30.145Z" }, ] [[package]] @@ -714,8 +654,7 @@ name = "gevent" version = "25.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*' and platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, - { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*' and platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, { name = "zope-event" }, { name = "zope-interface" }, @@ -1282,8 +1221,7 @@ name = "pymongocrypt" version = "1.16.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*'" }, - { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*'" }, + { name = "cffi" }, { name = "cryptography" }, { name = "httpx" }, { name = "packaging" }, @@ -1298,15 +1236,15 @@ wheels = [ [[package]] name = "pyopenssl" -version = "25.1.0" +version = "25.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cryptography" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/04/8c/cd89ad05804f8e3c17dea8f178c3f40eeab5694c30e0c9f5bcd49f576fc3/pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b", size = 179937, upload-time = "2025-05-17T16:28:31.31Z" } +sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073, upload-time = "2025-09-17T00:32:21.037Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/80/28/2659c02301b9500751f8d42f9a6632e1508aa5120de5e43042b8b30f8d5d/pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab", size = 56771, upload-time = "2025-05-17T16:28:29.197Z" }, + { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268, upload-time = "2025-09-17T00:32:19.474Z" }, ] [[package]] @@ -2183,8 +2121,7 @@ name = "zstandard" version = "0.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version != '3.14.*' and platform_python_implementation == 'PyPy'" }, - { name = "cffi", version = "2.0.0b1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.14.*' and platform_python_implementation == 'PyPy'" }, + { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } wheels = [ From 52400e11a1ac69bcc89dfe5a034df00a4a7f690c Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 6 Oct 2025 09:25:57 -0400 Subject: [PATCH 2091/2111] PYTHON-5571 - Fix memory leak when raising InvalidDocument with C extensions (#2573) --- bson/_cbsonmodule.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index bee7198567..7d184641c5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -1657,26 +1657,28 @@ void handle_invalid_doc_error(PyObject* dict) { } if (evalue && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { - PyObject *msg = PyObject_Str(evalue); + msg = PyObject_Str(evalue); if (msg) { const char * msg_utf8 = PyUnicode_AsUTF8(msg); if (msg_utf8 == NULL) { goto cleanup; } - PyObject *new_msg = PyUnicode_FromFormat("Invalid document: %s", msg_utf8); + new_msg = PyUnicode_FromFormat("Invalid document: %s", msg_utf8); if (new_msg == NULL) { goto cleanup; } // Add doc to the error instance as a property. - PyObject *new_evalue = PyObject_CallFunctionObjArgs(InvalidDocument, new_msg, dict, NULL); + new_evalue = PyObject_CallFunctionObjArgs(InvalidDocument, new_msg, dict, NULL); Py_DECREF(evalue); Py_DECREF(etype); etype = InvalidDocument; InvalidDocument = NULL; if (new_evalue) { evalue = new_evalue; + new_evalue = NULL; } else { evalue = msg; + msg = NULL; } } PyErr_NormalizeException(&etype, &evalue, &etrace); From 16a2fea21928d47ad8c81912bd19ba2768fa9155 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 10:31:00 -0500 Subject: [PATCH 2092/2111] Bump the actions group with 3 updates (#2574) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/test-python.yml | 16 ++++++++-------- .github/workflows/zizmor.yml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 0c1f2f6d47..b138324bf4 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3 + uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@3599b3baa15b485a2e49ef411a7a4bb2452e7f93 # v3 + uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 0ed23b9d83..a057570f3f 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -26,7 +26,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: "3.10" @@ -68,7 +68,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -87,7 +87,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: "3.10" @@ -112,7 +112,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: "3.10" @@ -131,7 +131,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: "3.10" @@ -153,7 +153,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: "${{matrix.python}}" @@ -174,7 +174,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@557e51de59eb14aaaba2ed9621916900a91d50c6 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: enable-cache: true python-version: "3.10" @@ -264,7 +264,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@b75a909f75acd358c2196fb9a5f1299a9a8868a4 # v5 + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 with: python-version: "3.10" - id: setup-mongodb diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml index a3eb5d5508..c991de2e6d 100644 --- a/.github/workflows/zizmor.yml +++ b/.github/workflows/zizmor.yml @@ -18,4 +18,4 @@ jobs: with: persist-credentials: false - name: Run zizmor 🌈 - uses: zizmorcore/zizmor-action@0696496a48b64e0568faa46ddaf5f6fe48b83b04 + uses: zizmorcore/zizmor-action@da5ac40c5419dcf7f21630fb2f95e725ae8fb9d5 From 406bed041832a4371a8b933c57daf1e99a009b98 Mon Sep 17 00:00:00 2001 From: "Jeffrey A. Clark" Date: Mon, 6 Oct 2025 13:10:31 -0400 Subject: [PATCH 2093/2111] PYTHON-5597 Upgrade to macos-latest (#2578) --- .github/workflows/dist.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index acbfc6cfe5..84bf1ba893 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -99,7 +99,7 @@ jobs: make_sdist: name: Make SDist - runs-on: macos-13 + runs-on: macos-latest steps: - uses: actions/checkout@v5 with: From 46974363b4bd42405c7a8f3285997f9688094440 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 13:02:53 -0500 Subject: [PATCH 2094/2111] PYTHON-5538 Fix lock file handling and bump pyright from 1.1.405 to 1.1.406 (#2575) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- .evergreen/scripts/setup-dev-env.sh | 5 +++++ pyproject.toml | 2 +- uv.lock | 8 ++++---- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 1204848e72..209857d542 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -7,6 +7,11 @@ HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" ROOT=$(dirname "$(dirname $HERE)") pushd $ROOT > /dev/null +# Bail early if running on GitHub Actions. +if [ -n "${GITHUB_ACTION:-}" ]; then + exit 0 +fi + # Source the env files to pick up common variables. if [ -f $HERE/env.sh ]; then . $HERE/env.sh diff --git a/pyproject.toml b/pyproject.toml index b06e6401a4..623eb6c164 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,7 +60,7 @@ mockupdb = [ perf = ["simplejson>=3.17.0"] typing = [ "mypy==1.18.2", - "pyright==1.1.405", + "pyright==1.1.406", "typing_extensions", "pip" ] diff --git a/uv.lock b/uv.lock index 273a7b6adf..480f64c263 100644 --- a/uv.lock +++ b/uv.lock @@ -1199,7 +1199,7 @@ pip = [{ name = "pip" }] typing = [ { name = "mypy", specifier = "==1.18.2" }, { name = "pip" }, - { name = "pyright", specifier = "==1.1.405" }, + { name = "pyright", specifier = "==1.1.406" }, { name = "typing-extensions" }, ] @@ -1249,15 +1249,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.405" +version = "1.1.406" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/6c/ba4bbee22e76af700ea593a1d8701e3225080956753bee9750dcc25e2649/pyright-1.1.405.tar.gz", hash = "sha256:5c2a30e1037af27eb463a1cc0b9f6d65fec48478ccf092c1ac28385a15c55763", size = 4068319, upload-time = "2025-09-04T03:37:06.776Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f7/16/6b4fbdd1fef59a0292cbb99f790b44983e390321eccbc5921b4d161da5d1/pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c", size = 4113151, upload-time = "2025-10-02T01:04:45.488Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/1a/524f832e1ff1962a22a1accc775ca7b143ba2e9f5924bb6749dce566784a/pyright-1.1.405-py3-none-any.whl", hash = "sha256:a2cb13700b5508ce8e5d4546034cb7ea4aedb60215c6c33f56cec7f53996035a", size = 5905038, upload-time = "2025-09-04T03:37:04.913Z" }, + { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, ] [[package]] From a2e39ada00a099d7dff2e69eb3b63461a9498d2a Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Oct 2025 11:04:16 -0500 Subject: [PATCH 2095/2111] PYTHON-5596 Fix return type for distinct methods (#2576) --- justfile | 2 +- pymongo/asynchronous/collection.py | 2 +- pymongo/asynchronous/cursor.py | 2 +- pymongo/synchronous/collection.py | 2 +- pymongo/synchronous/cursor.py | 2 +- test/test_typing.py | 28 +++++++++++++++++++++++++++- 6 files changed, 32 insertions(+), 6 deletions(-) diff --git a/justfile b/justfile index f235346160..17b95e87b7 100644 --- a/justfile +++ b/justfile @@ -58,7 +58,7 @@ lint-manual *args="": && resync [group('test')] test *args="-v --durations=5 --maxfail=10": && resync - uvx --extra test pytest {{args}} + uv run --extra test pytest {{args}} [group('test')] run-tests *args: && resync diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py index 6af1f4f782..e7e2f58031 100644 --- a/pymongo/asynchronous/collection.py +++ b/pymongo/asynchronous/collection.py @@ -3143,7 +3143,7 @@ async def distinct( comment: Optional[Any] = None, hint: Optional[_IndexKeyHint] = None, **kwargs: Any, - ) -> list[str]: + ) -> list[Any]: """Get a list of distinct values for `key` among all documents in this collection. diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py index cf3a5372b4..f19d3f6cee 100644 --- a/pymongo/asynchronous/cursor.py +++ b/pymongo/asynchronous/cursor.py @@ -1063,7 +1063,7 @@ async def close(self) -> None: """Explicitly close / kill this cursor.""" await self._die_lock() - async def distinct(self, key: str) -> list[str]: + async def distinct(self, key: str) -> list[Any]: """Get a list of distinct values for `key` among all documents in the result set of this query. diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py index b68e4befed..4e5f7d08fb 100644 --- a/pymongo/synchronous/collection.py +++ b/pymongo/synchronous/collection.py @@ -3136,7 +3136,7 @@ def distinct( comment: Optional[Any] = None, hint: Optional[_IndexKeyHint] = None, **kwargs: Any, - ) -> list[str]: + ) -> list[Any]: """Get a list of distinct values for `key` among all documents in this collection. diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py index 12e2863bc6..fcd8ebeb1d 100644 --- a/pymongo/synchronous/cursor.py +++ b/pymongo/synchronous/cursor.py @@ -1061,7 +1061,7 @@ def close(self) -> None: """Explicitly close / kill this cursor.""" self._die_lock() - def distinct(self, key: str) -> list[str]: + def distinct(self, key: str) -> list[Any]: """Get a list of distinct values for `key` among all documents in the result set of this query. diff --git a/test/test_typing.py b/test/test_typing.py index 7240e59c06..17dc21b4e0 100644 --- a/test/test_typing.py +++ b/test/test_typing.py @@ -69,7 +69,7 @@ class ImplicitMovie(TypedDict): from test import IntegrationTest, PyMongoTestCase, client_context -from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode +from bson import CodecOptions, ObjectId, decode, decode_all, decode_file_iter, decode_iter, encode from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo import ASCENDING, MongoClient @@ -141,6 +141,32 @@ def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: docs = to_list(cursor) self.assertTrue(docs) + def test_distinct(self) -> None: + self.coll.delete_many({}) + self.coll.insert_many( + [ + {"_id": None}, + {"_id": 0}, + {"_id": ""}, + {"_id": ObjectId()}, + {"_id": True}, + ] + ) + + def collection_distinct( + collection: Collection, + ) -> list[None | int | str | ObjectId | bool]: + return collection.distinct("_id") + + def cursor_distinct( + collection: Collection, + ) -> list[None | int | str | ObjectId | bool]: + cursor = collection.find() + return cursor.distinct("_id") + + collection_distinct(self.coll) + cursor_distinct(self.coll) + @only_type_check def test_bulk_write(self) -> None: self.coll.insert_one({}) From 84772bd8a91a3278858be4fb7a2315a3df107bc2 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Oct 2025 11:07:44 -0500 Subject: [PATCH 2096/2111] PYTHON-5604 Skip ECS tests until we can test on Ubuntu 22 (#2582) --- .evergreen/generated_configs/variants.yml | 2 +- .evergreen/scripts/generate_config.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index acdbd0dca3..2fa66db214 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -71,7 +71,7 @@ buildvariants: # Aws auth tests - name: auth-aws-ubuntu-20 tasks: - - name: .auth-aws + - name: .auth-aws !.auth-aws-ecs display_name: Auth AWS Ubuntu-20 run_on: - ubuntu2004-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 203f288300..051c0c8660 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -438,7 +438,8 @@ def create_aws_auth_variants(): for host_name in ["ubuntu20", "win64", "macos"]: expansions = dict() - tasks = [".auth-aws"] + # PYTHON-5604 - we need to skip ECS tests for now. + tasks = [".auth-aws !.auth-aws-ecs"] tags = [] if host_name == "macos": tasks = [".auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2"] From 491f5ba77f7d280c3c5c39d313409de713b49daa Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 7 Oct 2025 12:30:06 -0500 Subject: [PATCH 2097/2111] PYTHON-5588 Fix python binary used in FIPS tests (#2581) --- .evergreen/generated_configs/variants.yml | 5 +++++ .evergreen/scripts/generate_config.py | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 2fa66db214..0c46898c62 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -9,6 +9,7 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest + PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" REQUIRE_FIPS: "1" tags: [] @@ -21,6 +22,7 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest + PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [] - name: other-hosts-rhel8-power8-latest @@ -32,6 +34,7 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest + PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [] - name: other-hosts-rhel8-arm64-latest @@ -43,6 +46,7 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest + PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [] - name: other-hosts-amazon2023-latest @@ -54,6 +58,7 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest + PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [pr] diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 051c0c8660..36cb02659a 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -470,7 +470,8 @@ def create_alternative_hosts_variants(): version = "latest" for host_name in OTHER_HOSTS: - expansions = dict(VERSION="latest") + # Use explicit Python 3.11 binary on the host since the default python3 is 3.9. + expansions = dict(VERSION="latest", PYTHON_BINARY="/usr/bin/python3.11") handle_c_ext(C_EXTS[0], expansions) host = HOSTS[host_name] tags = [] From 89a4eaa36cb2694bb34f3ba796b14eb14776b0fa Mon Sep 17 00:00:00 2001 From: Iris <58442094+sleepyStick@users.noreply.github.com> Date: Tue, 7 Oct 2025 12:34:56 -0700 Subject: [PATCH 2098/2111] PYTHON-5576: add PR template to mongo-python-driver (#2567) --- .github/pull_request_template.md | 38 ++++++++++++++++++++++ .github/workflows/pull_request_template.md | 23 ------------- 2 files changed, 38 insertions(+), 23 deletions(-) create mode 100644 .github/pull_request_template.md delete mode 100644 .github/workflows/pull_request_template.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..8185a38836 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,38 @@ + + +[Issue Key](https://jira.mongodb.org/browse/%7BISSUE_KEY%7D) +## Summary + + +## Changes in this PR + + +## Testing Plan + + +### Screenshots (optional) + + +## Checklist + +### Checklist for Author +- [ ] Did you update the changelog (if necessary)? +- [ ] Is the intention of the code captured in relevant tests? +- [ ] If there are new TODOs, has a related JIRA ticket been created? + +### Checklist for Reviewer {@primary_reviewer} +- [ ] Does the title of the PR reference a JIRA Ticket? +- [ ] Do you fully understand the implementation? (Would you be comfortable explaining how this code works to someone else?) +- [ ] Have you checked for spelling & grammar errors? +- [ ] Is all relevant documentation (README or docstring) updated? + +## Focus Areas for Reviewer (optional) + diff --git a/.github/workflows/pull_request_template.md b/.github/workflows/pull_request_template.md deleted file mode 100644 index 852066d4b2..0000000000 --- a/.github/workflows/pull_request_template.md +++ /dev/null @@ -1,23 +0,0 @@ -# [JIRA Ticket ID](Link to Ticket) - - -# Summary - - -# Changes in this PR - - -# Test Plan - - -# Screenshots (Optional) - - -# Callouts or Follow-up items (Optional) - From d59591311777f954470c77e8ea363d6695c03496 Mon Sep 17 00:00:00 2001 From: Casey Clements Date: Tue, 7 Oct 2025 15:43:07 -0400 Subject: [PATCH 2099/2111] PYTHON-5598 Add generate_config method to ensure auth is tested on free-threaded python 3.14t (#2580) --- .evergreen/generated_configs/tasks.yml | 348 ++++++++++++++++++++++ .evergreen/generated_configs/variants.yml | 6 +- .evergreen/scripts/generate_config.py | 48 ++- 3 files changed, 395 insertions(+), 7 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 8064ec85e6..855cbefef8 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -4766,3 +4766,351 @@ tasks: - standalone-noauth-nossl - noauth - pypy + + # Test standard auth tests + - name: test-standard-auth-v4.2-python3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.10" + tags: + - test-standard-auth + - server-4.2 + - python-3.10 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v4.2-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v4.4-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.11" + tags: + - test-standard-auth + - server-4.4 + - python-3.11 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v4.4-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v5.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.12" + tags: + - test-standard-auth + - server-5.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v6.0-python3.13-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.13" + tags: + - test-standard-auth + - server-6.0 + - python-3.13 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v6.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-6.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v7.0-python3.14t-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: 3.14t + tags: + - test-standard-auth + - server-7.0 + - python-3.14t + - sharded_cluster-auth-ssl + - auth + - free-threaded + - name: test-standard-auth-v7.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-7.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v8.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.14" + tags: + - test-standard-auth + - server-8.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v8.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-8.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-latest-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.11" + tags: + - test-standard-auth + - server-latest + - python-3.11 + - sharded_cluster-auth-ssl + - auth + - pr + - name: test-standard-auth-latest-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-latest + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-rapid-python3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.10" + tags: + - test-standard-auth + - server-rapid + - python-3.10 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-rapid-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-rapid + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 0c46898c62..aae221adf5 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -249,7 +249,7 @@ buildvariants: # Enterprise auth tests - name: auth-enterprise-rhel8 tasks: - - name: .test-non-standard .auth !.free-threaded + - name: .test-standard-auth .auth !.free-threaded display_name: Auth Enterprise RHEL8 run_on: - rhel87-small @@ -258,7 +258,7 @@ buildvariants: AUTH: auth - name: auth-enterprise-macos tasks: - - name: .test-non-standard !.pypy .auth !.free-threaded + - name: .test-standard-auth !.pypy .auth !.free-threaded display_name: Auth Enterprise macOS run_on: - macos-14 @@ -267,7 +267,7 @@ buildvariants: AUTH: auth - name: auth-enterprise-win64 tasks: - - name: .test-non-standard !.pypy .auth + - name: .test-standard-auth !.pypy .auth display_name: Auth Enterprise Win64 run_on: - windows-64-vsMulti-small diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 36cb02659a..7f199d1483 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -202,12 +202,12 @@ def create_enterprise_auth_variants(): for host in ["rhel8", "macos", "win64"]: expansions = dict(TEST_NAME="enterprise_auth", AUTH="auth") display_name = get_variant_name("Auth Enterprise", host) - tasks = [".test-non-standard .auth !.free-threaded"] + tasks = [".test-standard-auth .auth !.free-threaded"] # https://jira.mongodb.org/browse/PYTHON-5586 if host == "macos": - tasks = [".test-non-standard !.pypy .auth !.free-threaded"] + tasks = [".test-standard-auth !.pypy .auth !.free-threaded"] if host == "win64": - tasks = [".test-non-standard !.pypy .auth"] + tasks = [".test-standard-auth !.pypy .auth"] variant = create_variant(tasks, display_name, host=host, expansions=expansions) variants.append(variant) return variants @@ -576,7 +576,7 @@ def create_test_non_standard_tasks(): for (version, topology), python in zip_cycle(list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS): pr = version == "latest" task_combos.add((version, topology, python, pr)) - # For each PyPy and topology, rotate through the the versions. + # For each PyPy and topology, rotate through the MongoDB versions. for (python, topology), version in zip_cycle(list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS): task_combos.add((version, topology, python, False)) for version, topology, python, pr in sorted(task_combos): @@ -604,6 +604,46 @@ def create_test_non_standard_tasks(): return tasks +def create_test_standard_auth_tasks(): + """We only use auth on sharded clusters""" + tasks = [] + task_combos = set() + # Rotate through the CPython and MongoDB versions + for (version, topology), python in zip_cycle( + list(product(ALL_VERSIONS, ["sharded_cluster"])), CPYTHONS + ): + pr = version == "latest" + task_combos.add((version, topology, python, pr)) + # Rotate through each PyPy and MongoDB versions. + for (python, topology), version in zip_cycle( + list(product(PYPYS, ["sharded_cluster"])), ALL_VERSIONS + ): + task_combos.add((version, topology, python, False)) + for version, topology, python, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-standard-auth", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + auth, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-standard-auth", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + def create_min_deps_tasks(): """For variants that support testing with minimum dependencies.""" tasks = [] From 5eb1edf3151711bf94733165bdd9372fd4a4b4c5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 8 Oct 2025 07:36:44 -0500 Subject: [PATCH 2100/2111] PYTHON-5609 Add 4.15.3 release to changelog (#2585) --- doc/changelog.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/doc/changelog.rst b/doc/changelog.rst index 82df4cdb07..f3eb4f6f23 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -17,6 +17,24 @@ PyMongo 4.16 brings a number of changes including: - Removed support for Eventlet. Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +Changes in Version 4.15.3 (2025/10/07) +-------------------------------------- + +Version 4.15.3 is a bug fix release. + +- Fixed a memory leak when raising :class:`bson.errors.InvalidDocument` with C extensions. +- Fixed the return type of the :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct`, + :meth:`~pymongo.synchronous.collection.Collection.distinct`, :meth:`pymongo.asynchronous.cursor.AsyncCursor.distinct`, + and :meth:`pymongo.asynchronous.cursor.AsyncCursor.distinct` methods. + +Issues Resolved +............... + +See the `PyMongo 4.15.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47293 + Changes in Version 4.15.2 (2025/10/01) -------------------------------------- From 6d91859659f830b9804d02dc9c437e81c30fa5d7 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 8 Oct 2025 12:26:16 -0500 Subject: [PATCH 2101/2111] PYTHON-5611 Fix python binary usage for Other Hosts (#2586) --- .evergreen/generated_configs/variants.yml | 6 +----- .evergreen/scripts/generate_config.py | 5 +++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index aae221adf5..9bae5f4680 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -9,9 +9,9 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest - PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" REQUIRE_FIPS: "1" + PYTHON_BINARY: /usr/bin/python3.11 tags: [] - name: other-hosts-rhel8-zseries-latest tasks: @@ -22,7 +22,6 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest - PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [] - name: other-hosts-rhel8-power8-latest @@ -34,7 +33,6 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest - PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [] - name: other-hosts-rhel8-arm64-latest @@ -46,7 +44,6 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest - PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [] - name: other-hosts-amazon2023-latest @@ -58,7 +55,6 @@ buildvariants: batchtime: 1440 expansions: VERSION: latest - PYTHON_BINARY: /usr/bin/python3.11 NO_EXT: "1" tags: [pr] diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 7f199d1483..daec0841d5 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -470,13 +470,14 @@ def create_alternative_hosts_variants(): version = "latest" for host_name in OTHER_HOSTS: - # Use explicit Python 3.11 binary on the host since the default python3 is 3.9. - expansions = dict(VERSION="latest", PYTHON_BINARY="/usr/bin/python3.11") + expansions = dict(VERSION="latest") handle_c_ext(C_EXTS[0], expansions) host = HOSTS[host_name] tags = [] if "fips" in host_name.lower(): expansions["REQUIRE_FIPS"] = "1" + # Use explicit Python 3.11 binary on the host since the default python3 is 3.9. + expansions["PYTHON_BINARY"] = "/usr/bin/python3.11" if "amazon" in host_name.lower(): tags.append("pr") variants.append( From 6a796c866813ff0308c3741e6e90d993ffd59a83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Oct 2025 11:27:01 -0500 Subject: [PATCH 2102/2111] Bump furo from 2025.7.19 to 2025.9.25 (#2565) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester --- requirements/docs.txt | 2 +- uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements/docs.txt b/requirements/docs.txt index 5543a62695..54ebf3625d 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -3,4 +3,4 @@ sphinx_rtd_theme>=2,<4 readthedocs-sphinx-search~=0.3 sphinxcontrib-shellcheck>=1,<2 sphinx-autobuild>=2020.9.1 -furo==2025.7.19 +furo==2025.9.25 diff --git a/uv.lock b/uv.lock index 480f64c263..f9a389c896 100644 --- a/uv.lock +++ b/uv.lock @@ -633,7 +633,7 @@ wheels = [ [[package]] name = "furo" -version = "2025.7.19" +version = "2025.9.25" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "accessible-pygments" }, @@ -644,9 +644,9 @@ dependencies = [ { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "sphinx-basic-ng" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/69/312cd100fa45ddaea5a588334d2defa331ff427bcb61f5fe2ae61bdc3762/furo-2025.7.19.tar.gz", hash = "sha256:4164b2cafcf4023a59bb3c594e935e2516f6b9d35e9a5ea83d8f6b43808fe91f", size = 1662054, upload-time = "2025-07-19T10:52:09.754Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/29/ff3b83a1ffce74676043ab3e7540d398e0b1ce7660917a00d7c4958b93da/furo-2025.9.25.tar.gz", hash = "sha256:3eac05582768fdbbc2bdfa1cdbcdd5d33cfc8b4bd2051729ff4e026a1d7e0a98", size = 1662007, upload-time = "2025-09-25T21:37:19.221Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3a/34/2b07b72bee02a63241d654f5d8af87a2de977c59638eec41ca356ab915cd/furo-2025.7.19-py3-none-any.whl", hash = "sha256:bdea869822dfd2b494ea84c0973937e35d1575af088b6721a29c7f7878adc9e3", size = 342175, upload-time = "2025-07-19T10:52:02.399Z" }, + { url = "https://files.pythonhosted.org/packages/ba/69/964b55f389c289e16ba2a5dfe587c3c462aac09e24123f09ddf703889584/furo-2025.9.25-py3-none-any.whl", hash = "sha256:2937f68e823b8e37b410c972c371bc2b1d88026709534927158e0cb3fac95afe", size = 340409, upload-time = "2025-09-25T21:37:17.244Z" }, ] [[package]] @@ -1164,7 +1164,7 @@ requires-dist = [ { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')", specifier = ">=2023.7.22" }, { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, { name = "dnspython", specifier = ">=2.6.1,<3.0.0" }, - { name = "furo", marker = "extra == 'docs'", specifier = "==2025.7.19" }, + { name = "furo", marker = "extra == 'docs'", specifier = "==2025.9.25" }, { name = "importlib-metadata", marker = "python_full_version < '3.13' and extra == 'test'", specifier = ">=7.0" }, { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, From faa77eab43b16ba5ee70c10430948a1502f94e8a Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 21 Oct 2025 13:06:41 -0400 Subject: [PATCH 2103/2111] [Task] PYTHON-5561: Add support for PyPy 3.11 (#2596) --- .evergreen/generated_configs/tasks.yml | 168 ++++++++++---------- .evergreen/scripts/generate_config_utils.py | 2 +- .github/workflows/test-python.yml | 2 +- doc/changelog.rst | 4 +- 4 files changed, 88 insertions(+), 88 deletions(-) diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 855cbefef8..9a7f16f543 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -319,13 +319,13 @@ tasks: vars: PYTHON_VERSION: "3.14" tags: [test-no-orchestration, python-3.14] - - name: test-no-orchestration-pypy3.10 + - name: test-no-orchestration-pypy3.11 commands: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: pypy3.10 - tags: [test-no-orchestration, python-pypy3.10] + PYTHON_VERSION: pypy3.11 + tags: [test-no-orchestration, python-pypy3.11] # No toolchain tests - name: test-no-toolchain-sync-noauth-nossl-standalone @@ -2515,7 +2515,7 @@ tasks: - replica_set-noauth-nossl - sync - pr - - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set + - name: test-server-version-pypy3.11-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -2527,11 +2527,11 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - async - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov @@ -2722,7 +2722,7 @@ tasks: - sharded_cluster-auth-ssl - async - free-threaded - - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-server-version-pypy3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -2734,11 +2734,11 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - async - name: test-server-version-python3.10-sync-auth-ssl-sharded-cluster-cov @@ -2867,7 +2867,7 @@ tasks: - sharded_cluster-auth-ssl - sync - free-threaded - - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster + - name: test-server-version-pypy3.11-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -2879,11 +2879,11 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - sync - name: test-server-version-python3.12-async-noauth-nossl-sharded-cluster-cov @@ -2949,7 +2949,7 @@ tasks: - python-3.10 - sharded_cluster-noauth-ssl - async - - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster + - name: test-server-version-pypy3.11-sync-noauth-ssl-sharded-cluster commands: - func: run server vars: @@ -2961,11 +2961,11 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-noauth-ssl - sync - name: test-server-version-python3.13-async-auth-nossl-standalone-cov @@ -3074,7 +3074,7 @@ tasks: - standalone-noauth-nossl - async - pr - - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone + - name: test-server-version-pypy3.11-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3086,11 +3086,11 @@ tasks: AUTH: noauth SSL: nossl TOPOLOGY: standalone - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - sync - pr @@ -3203,7 +3203,7 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - sync - - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster + - name: test-standard-v4.2-pypy3.11-sync-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3217,12 +3217,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard - server-4.2 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - sync - pypy @@ -3337,7 +3337,7 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - async - - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + - name: test-standard-v4.4-pypy3.11-async-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -3351,12 +3351,12 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard - server-4.4 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - async - pypy @@ -3716,7 +3716,7 @@ tasks: - python-3.12 - standalone-noauth-nossl - sync - - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone + - name: test-standard-v7.0-pypy3.11-sync-noauth-nossl-standalone commands: - func: run server vars: @@ -3730,12 +3730,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard - server-7.0 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - sync - pypy @@ -3828,7 +3828,7 @@ tasks: - python-3.12 - standalone-noauth-nossl - async - - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone + - name: test-standard-v8.0-pypy3.11-async-noauth-nossl-standalone commands: - func: run server vars: @@ -3842,12 +3842,12 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard - server-8.0 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - async - pypy @@ -3874,7 +3874,7 @@ tasks: - replica_set-noauth-ssl - async - pr - - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set + - name: test-standard-latest-pypy3.11-async-noauth-ssl-replica-set commands: - func: run server vars: @@ -3888,12 +3888,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard - server-latest - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - async - pypy @@ -3988,7 +3988,7 @@ tasks: - python-3.12 - replica_set-noauth-ssl - sync - - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set + - name: test-standard-rapid-pypy3.11-sync-noauth-ssl-replica-set commands: - func: run server vars: @@ -4002,12 +4002,12 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard - server-rapid - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - sync - pypy @@ -4142,7 +4142,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - noauth - - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-v4.2-pypy3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4156,11 +4156,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-4.2 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - noauth - pypy @@ -4186,7 +4186,7 @@ tasks: - replica_set-noauth-ssl - noauth - free-threaded - - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-v4.4-pypy3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4200,11 +4200,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-4.4 - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - noauth - pypy @@ -4292,7 +4292,7 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v5.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4306,11 +4306,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-5.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -4399,7 +4399,7 @@ tasks: - python-3.13 - standalone-noauth-nossl - noauth - - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-v6.0-pypy3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4413,11 +4413,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-6.0 - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - noauth - pypy @@ -4442,7 +4442,7 @@ tasks: - python-3.11 - replica_set-noauth-ssl - noauth - - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-v7.0-pypy3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4456,11 +4456,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-7.0 - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - noauth - pypy @@ -4549,7 +4549,7 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-non-standard-v8.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4563,11 +4563,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-8.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -4615,7 +4615,7 @@ tasks: - noauth - free-threaded - pr - - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set + - name: test-non-standard-latest-pypy3.11-noauth-ssl-replica-set commands: - func: run server vars: @@ -4629,11 +4629,11 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-latest - - python-pypy3.10 + - python-pypy3.11 - replica_set-noauth-ssl - noauth - pypy @@ -4744,7 +4744,7 @@ tasks: - python-3.10 - standalone-noauth-nossl - noauth - - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone + - name: test-non-standard-rapid-pypy3.11-noauth-nossl-standalone commands: - func: run server vars: @@ -4758,11 +4758,11 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-non-standard - server-rapid - - python-pypy3.10 + - python-pypy3.11 - standalone-noauth-nossl - noauth - pypy @@ -4789,7 +4789,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - auth - - name: test-standard-auth-v4.2-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-v4.2-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4803,11 +4803,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-4.2 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -4832,7 +4832,7 @@ tasks: - python-3.11 - sharded_cluster-auth-ssl - auth - - name: test-standard-auth-v4.4-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-v4.4-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4846,11 +4846,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-4.4 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -4875,7 +4875,7 @@ tasks: - python-3.12 - sharded_cluster-auth-ssl - auth - - name: test-standard-auth-v5.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-v5.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4889,11 +4889,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-5.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -4918,7 +4918,7 @@ tasks: - python-3.13 - sharded_cluster-auth-ssl - auth - - name: test-standard-auth-v6.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-v6.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4932,11 +4932,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-6.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -4962,7 +4962,7 @@ tasks: - sharded_cluster-auth-ssl - auth - free-threaded - - name: test-standard-auth-v7.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-v7.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -4976,11 +4976,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-7.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -5005,7 +5005,7 @@ tasks: - python-3.14 - sharded_cluster-auth-ssl - auth - - name: test-standard-auth-v8.0-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-v8.0-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -5019,11 +5019,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-8.0 - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -5049,7 +5049,7 @@ tasks: - sharded_cluster-auth-ssl - auth - pr - - name: test-standard-auth-latest-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-latest-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -5063,11 +5063,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-latest - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy @@ -5092,7 +5092,7 @@ tasks: - python-3.10 - sharded_cluster-auth-ssl - auth - - name: test-standard-auth-rapid-pypy3.10-auth-ssl-sharded-cluster + - name: test-standard-auth-rapid-pypy3.11-auth-ssl-sharded-cluster commands: - func: run server vars: @@ -5106,11 +5106,11 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: pypy3.10 + PYTHON_VERSION: pypy3.11 tags: - test-standard-auth - server-rapid - - python-pypy3.10 + - python-pypy3.11 - sharded_cluster-auth-ssl - auth - pypy diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 4eb6bcb0dc..28ee45f9b7 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -23,7 +23,7 @@ ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] CPYTHONS = ["3.10", "3.11", "3.12", "3.13", "3.14t", "3.14"] -PYPYS = ["pypy3.10"] +PYPYS = ["pypy3.11"] ALL_PYTHONS = CPYTHONS + PYPYS MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] BATCHTIME_WEEK = 10080 diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index a057570f3f..4685ba2d92 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -59,7 +59,7 @@ jobs: matrix: # Tests currently only pass on ubuntu on GitHub Actions. os: [ubuntu-latest] - python-version: ["3.10", "pypy-3.10", "3.13t"] + python-version: ["3.10", "pypy-3.11", "3.13t"] mongodb-version: ["8.0"] name: CPython ${{ matrix.python-version }}-${{ matrix.os }} diff --git a/doc/changelog.rst b/doc/changelog.rst index f3eb4f6f23..47d3fafd66 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -6,9 +6,9 @@ Changes in Version 4.16.0 (XXXX/XX/XX) PyMongo 4.16 brings a number of changes including: -.. warning:: PyMongo 4.16 drops support for Python 3.9: Python 3.10+ is now required. +.. warning:: PyMongo 4.16 drops support for Python 3.9 and PyPy 3.10: Python 3.10+ or PyPy 3.11+ is now required. -- Dropped support for Python 3.9. +- Dropped support for Python 3.9 and PyPy 3.10. - Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as doing so may leak sensitive user data. Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. From ad1167d01ea03780bae1fdb66ddc88283ce9a983 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Tue, 21 Oct 2025 15:57:36 -0400 Subject: [PATCH 2104/2111] [Task]-PYTHON-5626: Remove project.license toml table (#2595) --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 623eb6c164..ef7140eddf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,8 @@ name = "pymongo" dynamic = ["version", "dependencies", "optional-dependencies"] description = "PyMongo - the Official MongoDB Python driver" readme = "README.md" -license = {file="LICENSE"} +license = "Apache-2.0" +license-files = ["LICENSE"] requires-python = ">=3.9" authors = [ { name = "The MongoDB Python Team" }, @@ -22,7 +23,6 @@ keywords = [ classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Operating System :: POSIX", From a5f6d638b9e2243cb03202c996f54b2d56022aca Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Wed, 22 Oct 2025 17:22:22 -0500 Subject: [PATCH 2105/2111] PYTHON-5615 Use uv python when python toolchain is not available (#2597) --- .evergreen/combine-coverage.sh | 19 +- .evergreen/generated_configs/functions.yml | 8 +- .evergreen/generated_configs/tasks.yml | 631 ++++++++++---------- .evergreen/generated_configs/variants.yml | 10 +- .evergreen/run-mongodb-aws-ecs-test.sh | 12 +- .evergreen/run-tests.sh | 2 + .evergreen/scripts/check-import-time.sh | 10 +- .evergreen/scripts/generate_config.py | 41 +- .evergreen/scripts/generate_config_utils.py | 26 - .evergreen/scripts/install-dependencies.sh | 57 +- .evergreen/scripts/kms_tester.py | 4 +- .evergreen/scripts/oidc_tester.py | 2 +- .evergreen/scripts/run_tests.py | 13 +- .evergreen/scripts/setup-dev-env.sh | 75 ++- .evergreen/scripts/setup-system.sh | 6 +- .evergreen/scripts/setup-uv-python.sh | 53 ++ .evergreen/scripts/setup_tests.py | 3 +- .evergreen/setup-spawn-host.sh | 3 +- .evergreen/utils.sh | 148 ----- CONTRIBUTING.md | 14 +- justfile | 1 - 21 files changed, 484 insertions(+), 654 deletions(-) create mode 100755 .evergreen/scripts/setup-uv-python.sh delete mode 100755 .evergreen/utils.sh diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh index 36266c1842..5f04f72adb 100755 --- a/.evergreen/combine-coverage.sh +++ b/.evergreen/combine-coverage.sh @@ -5,19 +5,12 @@ set -eu -. .evergreen/utils.sh +# Set up the virtual env. +. .evergreen/scripts/setup-dev-env.sh +uv sync --group coverage +source .venv/bin/activate -if [ -z "${PYTHON_BINARY:-}" ]; then - PYTHON_BINARY=$(find_python3) -fi - -createvirtualenv "$PYTHON_BINARY" covenv -# Keep in sync with run-tests.sh -# coverage >=5 is needed for relative_files=true. -pip install -q "coverage[toml]>=5,<=7.5" - -pip list ls -la coverage/ -python -m coverage combine coverage/coverage.* -python -m coverage html -d htmlcov +coverage combine coverage/coverage.* +coverage html -d htmlcov diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml index ce95648849..4b53ac6ac8 100644 --- a/.evergreen/generated_configs/functions.yml +++ b/.evergreen/generated_configs/functions.yml @@ -101,8 +101,8 @@ functions: - AUTH - SSL - ORCHESTRATION_FILE - - PYTHON_BINARY - - PYTHON_VERSION + - UV_PYTHON + - TOOLCHAIN_VERSION - STORAGE_ENGINE - REQUIRE_API_VERSION - DRIVERS_TOOLS @@ -134,10 +134,10 @@ functions: - AWS_SECRET_ACCESS_KEY - AWS_SESSION_TOKEN - COVERAGE - - PYTHON_BINARY + - UV_PYTHON - LIBMONGOCRYPT_URL - MONGODB_URI - - PYTHON_VERSION + - TOOLCHAIN_VERSION - DISABLE_TEST_COMMANDS - GREEN_FRAMEWORK - NO_EXT diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 9a7f16f543..cb7ae1e6c9 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -20,7 +20,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: regular - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: [auth-aws, auth-aws-regular] - name: test-auth-aws-5.0-assume-role-python3.11 commands: @@ -33,7 +33,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: assume-role - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: [auth-aws, auth-aws-assume-role] - name: test-auth-aws-6.0-ec2-python3.12 commands: @@ -46,7 +46,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ec2 - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: [auth-aws, auth-aws-ec2] - name: test-auth-aws-7.0-env-creds-python3.13 commands: @@ -59,7 +59,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: env-creds - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: [auth-aws, auth-aws-env-creds] - name: test-auth-aws-8.0-session-creds-python3.14t commands: @@ -72,7 +72,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: session-creds - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t tags: [auth-aws, auth-aws-session-creds, free-threaded] - name: test-auth-aws-rapid-web-identity-python3.14 commands: @@ -85,7 +85,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-rapid-web-identity-session-name-python3.14 commands: @@ -99,7 +99,7 @@ tasks: TEST_NAME: auth_aws SUB_TEST_NAME: web-identity AWS_ROLE_SESSION_NAME: test - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: [auth-aws, auth-aws-web-identity] - name: test-auth-aws-latest-ecs-python3.10 commands: @@ -112,7 +112,7 @@ tasks: vars: TEST_NAME: auth_aws SUB_TEST_NAME: ecs - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: [auth-aws, auth-aws-ecs] # Backport pr tests @@ -248,60 +248,60 @@ tasks: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: [mod_wsgi, pr] - name: mod-wsgi-embedded-mode-replica-set-python3.11 commands: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: [mod_wsgi, pr] - name: mod-wsgi-replica-set-python3.12 commands: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: standalone - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: [mod_wsgi, pr] - name: mod-wsgi-embedded-mode-replica-set-python3.13 commands: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: [mod_wsgi, pr] - name: mod-wsgi-embedded-mode-replica-set-python3.14 commands: - func: run server vars: TOPOLOGY: replica_set - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" - func: run tests vars: TEST_NAME: mod_wsgi SUB_TEST_NAME: embedded - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: [mod_wsgi, pr] # No orchestration tests @@ -310,21 +310,21 @@ tasks: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: [test-no-orchestration, python-3.10] - name: test-no-orchestration-python3.14 commands: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: [test-no-orchestration, python-3.14] - name: test-no-orchestration-pypy3.11 commands: - func: assume ec2 role - func: run tests vars: - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: [test-no-orchestration, python-pypy3.11] # No toolchain tests @@ -379,7 +379,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v5.0-python3.10 @@ -389,7 +389,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v6.0-python3.10 @@ -399,7 +399,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v7.0-python3.10 @@ -409,7 +409,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v8.0-python3.10 @@ -419,7 +419,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-rapid-python3.10 @@ -429,7 +429,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.14 @@ -439,7 +439,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.10 @@ -449,7 +449,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v5.0-python3.10 @@ -459,7 +459,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v6.0-python3.10 @@ -469,7 +469,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v7.0-python3.10 @@ -479,7 +479,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v8.0-python3.10 @@ -489,7 +489,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-rapid-python3.10 @@ -499,7 +499,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.14 @@ -509,7 +509,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 @@ -519,7 +519,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 @@ -529,7 +529,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 @@ -539,7 +539,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 @@ -549,7 +549,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 @@ -559,7 +559,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 @@ -569,7 +569,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 @@ -579,7 +579,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 @@ -589,7 +589,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 @@ -599,7 +599,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 @@ -609,7 +609,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 @@ -619,7 +619,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 @@ -629,7 +629,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 @@ -639,7 +639,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 @@ -649,7 +649,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.10 @@ -659,7 +659,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-soft-fail-v5.0-python3.10 @@ -669,7 +669,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-soft-fail-v6.0-python3.10 @@ -679,7 +679,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-soft-fail-v7.0-python3.10 @@ -689,7 +689,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-soft-fail-v8.0-python3.10 @@ -699,7 +699,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-soft-fail-rapid-python3.10 @@ -709,7 +709,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-soft-fail-latest-python3.14 @@ -719,7 +719,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.10 @@ -729,7 +729,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -743,7 +743,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -757,7 +757,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -771,7 +771,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -785,7 +785,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -799,7 +799,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -813,7 +813,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -827,7 +827,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -841,7 +841,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -855,7 +855,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -869,7 +869,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -883,7 +883,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -897,7 +897,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -911,7 +911,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -925,7 +925,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -939,7 +939,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -953,7 +953,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -967,7 +967,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -981,7 +981,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -995,7 +995,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1009,7 +1009,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1023,7 +1023,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -1037,7 +1037,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -1051,7 +1051,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -1065,7 +1065,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -1079,7 +1079,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -1093,7 +1093,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1107,7 +1107,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1121,7 +1121,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 @@ -1131,7 +1131,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 @@ -1141,7 +1141,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 @@ -1151,7 +1151,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 @@ -1161,7 +1161,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 @@ -1171,7 +1171,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -1181,7 +1181,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 @@ -1191,7 +1191,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 @@ -1201,7 +1201,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 @@ -1211,7 +1211,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 @@ -1221,7 +1221,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 @@ -1231,7 +1231,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 @@ -1241,7 +1241,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -1251,7 +1251,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 @@ -1261,7 +1261,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-ecdsa, "4.4"] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 @@ -1271,7 +1271,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-ecdsa, "5.0"] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 @@ -1281,7 +1281,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-ecdsa, "6.0"] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 @@ -1291,7 +1291,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-ecdsa, "7.0"] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 @@ -1301,7 +1301,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-ecdsa, "8.0"] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 @@ -1311,7 +1311,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-ecdsa, rapid] - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 @@ -1321,7 +1321,7 @@ tasks: ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-ecdsa, latest] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.10 @@ -1331,7 +1331,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v5.0-python3.10 @@ -1341,7 +1341,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v6.0-python3.10 @@ -1351,7 +1351,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v7.0-python3.10 @@ -1361,7 +1361,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v8.0-python3.10 @@ -1371,7 +1371,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-rapid-python3.10 @@ -1381,7 +1381,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.14 @@ -1391,7 +1391,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.10 @@ -1401,7 +1401,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v5.0-python3.10 @@ -1411,7 +1411,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v6.0-python3.10 @@ -1421,7 +1421,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v7.0-python3.10 @@ -1431,7 +1431,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v8.0-python3.10 @@ -1441,7 +1441,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-rapid-python3.10 @@ -1451,7 +1451,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.14 @@ -1461,7 +1461,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 @@ -1471,7 +1471,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 @@ -1481,7 +1481,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 @@ -1491,7 +1491,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 @@ -1501,7 +1501,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 @@ -1511,7 +1511,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 @@ -1521,7 +1521,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 @@ -1531,7 +1531,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 @@ -1541,7 +1541,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 @@ -1551,7 +1551,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 @@ -1561,7 +1561,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 @@ -1571,7 +1571,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 @@ -1581,7 +1581,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 @@ -1591,7 +1591,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 @@ -1601,7 +1601,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-soft-fail-v4.4-python3.10 @@ -1611,7 +1611,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-soft-fail-v5.0-python3.10 @@ -1621,7 +1621,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-soft-fail-v6.0-python3.10 @@ -1631,7 +1631,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-soft-fail-v7.0-python3.10 @@ -1641,7 +1641,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-soft-fail-v8.0-python3.10 @@ -1651,7 +1651,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-soft-fail-rapid-python3.10 @@ -1661,7 +1661,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-soft-fail-latest-python3.14 @@ -1671,7 +1671,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.10 @@ -1681,7 +1681,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -1695,7 +1695,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -1709,7 +1709,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -1723,7 +1723,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -1737,7 +1737,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -1751,7 +1751,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1765,7 +1765,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1779,7 +1779,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -1793,7 +1793,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -1807,7 +1807,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -1821,7 +1821,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -1835,7 +1835,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -1849,7 +1849,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1863,7 +1863,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1877,7 +1877,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -1891,7 +1891,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -1905,7 +1905,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -1919,7 +1919,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -1933,7 +1933,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -1947,7 +1947,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -1961,7 +1961,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: valid-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -1975,7 +1975,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: - ocsp @@ -1989,7 +1989,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: - ocsp @@ -2003,7 +2003,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: - ocsp @@ -2017,7 +2017,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: - ocsp @@ -2031,7 +2031,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: - ocsp @@ -2045,7 +2045,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: - ocsp @@ -2059,7 +2059,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: - ocsp @@ -2073,7 +2073,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 @@ -2083,7 +2083,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 @@ -2093,7 +2093,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 @@ -2103,7 +2103,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 @@ -2113,7 +2113,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 @@ -2123,7 +2123,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -2133,7 +2133,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 @@ -2143,7 +2143,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 @@ -2153,7 +2153,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 @@ -2163,7 +2163,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 @@ -2173,7 +2173,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 @@ -2183,7 +2183,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 @@ -2193,7 +2193,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 @@ -2203,7 +2203,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: revoked-delegate TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 @@ -2213,7 +2213,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "4.4" tags: [ocsp, ocsp-rsa, "4.4"] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 @@ -2223,7 +2223,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "5.0" tags: [ocsp, ocsp-rsa, "5.0"] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 @@ -2233,7 +2233,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "6.0" tags: [ocsp, ocsp-rsa, "6.0"] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 @@ -2243,7 +2243,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "7.0" tags: [ocsp, ocsp-rsa, "7.0"] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 @@ -2253,7 +2253,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: "8.0" tags: [ocsp, ocsp-rsa, "8.0"] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 @@ -2263,7 +2263,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" VERSION: rapid tags: [ocsp, ocsp-rsa, rapid] - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 @@ -2273,7 +2273,7 @@ tasks: ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json OCSP_SERVER_TYPE: no-responder TEST_NAME: ocsp - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" VERSION: latest tags: [ocsp, ocsp-rsa, latest] @@ -2385,6 +2385,7 @@ tasks: - func: run tests vars: TEST_NAME: search_index + TOOLCHAIN_VERSION: "3.10" tags: [search_index] # Server version tests @@ -2400,7 +2401,7 @@ tasks: AUTH: auth SSL: nossl TOPOLOGY: replica_set - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_async tags: - server-version @@ -2422,7 +2423,7 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_sync tags: - server-version @@ -2443,7 +2444,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_async tags: - server-version @@ -2464,7 +2465,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version @@ -2485,7 +2486,7 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_async tags: - server-version @@ -2507,7 +2508,7 @@ tasks: SSL: nossl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version @@ -2527,7 +2528,7 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: replica_set - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_async tags: - server-version @@ -2548,7 +2549,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set COVERAGE: "1" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version @@ -2569,7 +2570,7 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_async tags: - server-version @@ -2588,7 +2589,7 @@ tasks: AUTH: auth SSL: nossl TOPOLOGY: sharded_cluster - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_sync tags: - server-version @@ -2610,7 +2611,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_async tags: - server-version @@ -2632,7 +2633,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_async tags: - server-version @@ -2653,7 +2654,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_async tags: - server-version @@ -2674,7 +2675,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_async tags: - server-version @@ -2695,7 +2696,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_async tags: - server-version @@ -2714,7 +2715,7 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_async tags: - server-version @@ -2734,7 +2735,7 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_async tags: - server-version @@ -2755,7 +2756,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version @@ -2777,7 +2778,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version @@ -2798,7 +2799,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version @@ -2819,7 +2820,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_sync tags: - server-version @@ -2840,7 +2841,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_sync tags: - server-version @@ -2859,7 +2860,7 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_sync tags: - server-version @@ -2879,7 +2880,7 @@ tasks: AUTH: auth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version @@ -2900,7 +2901,7 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_async tags: - server-version @@ -2921,7 +2922,7 @@ tasks: SSL: nossl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_sync tags: - server-version @@ -2942,7 +2943,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster COVERAGE: "1" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_async tags: - server-version @@ -2961,7 +2962,7 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: sharded_cluster - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version @@ -2982,7 +2983,7 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_async tags: - server-version @@ -3003,7 +3004,7 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_sync tags: - server-version @@ -3024,7 +3025,7 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_async tags: - server-version @@ -3045,7 +3046,7 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_sync tags: - server-version @@ -3066,7 +3067,7 @@ tasks: SSL: nossl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_async tags: - server-version @@ -3086,7 +3087,7 @@ tasks: AUTH: noauth SSL: nossl TOPOLOGY: standalone - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_sync tags: - server-version @@ -3108,7 +3109,7 @@ tasks: SSL: ssl TOPOLOGY: standalone COVERAGE: "1" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_async tags: - server-version @@ -3127,7 +3128,7 @@ tasks: AUTH: noauth SSL: ssl TOPOLOGY: standalone - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_sync tags: - server-version @@ -3151,7 +3152,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard @@ -3173,7 +3174,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard @@ -3195,7 +3196,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard @@ -3217,7 +3218,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard @@ -3240,7 +3241,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard @@ -3262,7 +3263,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_sync tags: - test-standard @@ -3285,7 +3286,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard @@ -3307,7 +3308,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard @@ -3329,7 +3330,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard @@ -3351,7 +3352,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard @@ -3374,7 +3375,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard @@ -3396,7 +3397,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_async tags: - test-standard @@ -3419,7 +3420,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard @@ -3441,7 +3442,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_sync tags: - test-standard @@ -3464,7 +3465,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard @@ -3486,7 +3487,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard @@ -3508,7 +3509,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard @@ -3530,7 +3531,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard @@ -3552,7 +3553,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_async tags: - test-standard @@ -3575,7 +3576,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard @@ -3597,7 +3598,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard @@ -3619,7 +3620,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard @@ -3641,7 +3642,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard @@ -3663,7 +3664,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_sync tags: - test-standard @@ -3685,7 +3686,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_sync tags: - test-standard @@ -3708,7 +3709,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard @@ -3730,7 +3731,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard @@ -3753,7 +3754,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard @@ -3775,7 +3776,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" TEST_NAME: default_async tags: - test-standard @@ -3797,7 +3798,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t TEST_NAME: default_async tags: - test-standard @@ -3820,7 +3821,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard @@ -3842,7 +3843,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard @@ -3865,7 +3866,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_async tags: - test-standard @@ -3888,7 +3889,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_async tags: - test-standard @@ -3911,7 +3912,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_async tags: - test-standard @@ -3934,7 +3935,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_async tags: - test-standard @@ -3957,7 +3958,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_async tags: - test-standard @@ -3980,7 +3981,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" TEST_NAME: default_sync tags: - test-standard @@ -4002,7 +4003,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 TEST_NAME: default_sync tags: - test-standard @@ -4025,7 +4026,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" TEST_NAME: default_sync tags: - test-standard @@ -4047,7 +4048,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" TEST_NAME: default_sync tags: - test-standard @@ -4069,7 +4070,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" TEST_NAME: default_sync tags: - test-standard @@ -4093,7 +4094,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.2" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: - test-non-standard - server-4.2 @@ -4114,7 +4115,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: - test-non-standard - server-4.2 @@ -4135,7 +4136,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: - test-non-standard - server-4.2 @@ -4156,7 +4157,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.2" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-4.2 @@ -4178,7 +4179,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t tags: - test-non-standard - server-4.4 @@ -4200,7 +4201,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "4.4" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-4.4 @@ -4222,7 +4223,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: - test-non-standard - server-4.4 @@ -4243,7 +4244,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "4.4" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: - test-non-standard - server-4.4 @@ -4264,7 +4265,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "5.0" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: - test-non-standard - server-5.0 @@ -4285,7 +4286,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: - test-non-standard - server-5.0 @@ -4306,7 +4307,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-5.0 @@ -4328,7 +4329,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "5.0" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: - test-non-standard - server-5.0 @@ -4349,7 +4350,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "6.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t tags: - test-non-standard - server-6.0 @@ -4371,7 +4372,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: - test-non-standard - server-6.0 @@ -4392,7 +4393,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: - test-non-standard - server-6.0 @@ -4413,7 +4414,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "6.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-6.0 @@ -4435,7 +4436,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: - test-non-standard - server-7.0 @@ -4456,7 +4457,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "7.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-7.0 @@ -4478,7 +4479,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: - test-non-standard - server-7.0 @@ -4499,7 +4500,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "7.0" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: - test-non-standard - server-7.0 @@ -4520,7 +4521,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: "8.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t tags: - test-non-standard - server-8.0 @@ -4542,7 +4543,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: - test-non-standard - server-8.0 @@ -4563,7 +4564,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-8.0 @@ -4585,7 +4586,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: "8.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: - test-non-standard - server-8.0 @@ -4606,7 +4607,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t tags: - test-non-standard - server-latest @@ -4629,7 +4630,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: latest - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-latest @@ -4651,7 +4652,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: - test-non-standard - server-latest @@ -4673,7 +4674,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: latest - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: - test-non-standard - server-latest @@ -4695,7 +4696,7 @@ tasks: SSL: ssl TOPOLOGY: replica_set VERSION: rapid - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: - test-non-standard - server-rapid @@ -4716,7 +4717,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: - test-non-standard - server-rapid @@ -4737,7 +4738,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: - test-non-standard - server-rapid @@ -4758,7 +4759,7 @@ tasks: SSL: nossl TOPOLOGY: standalone VERSION: rapid - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-non-standard - server-rapid @@ -4782,7 +4783,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: - test-standard-auth - server-4.2 @@ -4803,7 +4804,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.2" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-4.2 @@ -4825,7 +4826,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: - test-standard-auth - server-4.4 @@ -4846,7 +4847,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "4.4" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-4.4 @@ -4868,7 +4869,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: "3.12" + TOOLCHAIN_VERSION: "3.12" tags: - test-standard-auth - server-5.0 @@ -4889,7 +4890,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "5.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-5.0 @@ -4911,7 +4912,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: "3.13" + TOOLCHAIN_VERSION: "3.13" tags: - test-standard-auth - server-6.0 @@ -4932,7 +4933,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "6.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-6.0 @@ -4954,7 +4955,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: 3.14t + TOOLCHAIN_VERSION: 3.14t tags: - test-standard-auth - server-7.0 @@ -4976,7 +4977,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "7.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-7.0 @@ -4998,7 +4999,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: "3.14" + TOOLCHAIN_VERSION: "3.14" tags: - test-standard-auth - server-8.0 @@ -5019,7 +5020,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: "8.0" - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-8.0 @@ -5041,7 +5042,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: "3.11" + TOOLCHAIN_VERSION: "3.11" tags: - test-standard-auth - server-latest @@ -5063,7 +5064,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: latest - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-latest @@ -5085,7 +5086,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: "3.10" + TOOLCHAIN_VERSION: "3.10" tags: - test-standard-auth - server-rapid @@ -5106,7 +5107,7 @@ tasks: SSL: ssl TOPOLOGY: sharded_cluster VERSION: rapid - PYTHON_VERSION: pypy3.11 + TOOLCHAIN_VERSION: pypy3.11 tags: - test-standard-auth - server-rapid diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 9bae5f4680..816ff8d188 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -11,7 +11,7 @@ buildvariants: VERSION: latest NO_EXT: "1" REQUIRE_FIPS: "1" - PYTHON_BINARY: /usr/bin/python3.11 + UV_PYTHON: /usr/bin/python3.11 tags: [] - name: other-hosts-rhel8-zseries-latest tasks: @@ -72,7 +72,7 @@ buildvariants: # Aws auth tests - name: auth-aws-ubuntu-20 tasks: - - name: .auth-aws !.auth-aws-ecs + - name: .auth-aws display_name: Auth AWS Ubuntu-20 run_on: - ubuntu2004-small @@ -453,14 +453,12 @@ buildvariants: SUB_TEST_NAME: pyopenssl # Search index tests - - name: search-index-helpers-rhel8-python3.10 + - name: search-index-helpers-rhel8 tasks: - name: .search_index - display_name: Search Index Helpers RHEL8 Python3.10 + display_name: Search Index Helpers RHEL8 run_on: - rhel87-small - expansions: - PYTHON_BINARY: /opt/python/3.10/bin/python3 # Server version tests - name: mongodb-v4.2 diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index b8330de511..414c7a0a25 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -19,20 +19,14 @@ fi # Now we can safely enable xtrace set -o xtrace -# Install python with pip. -PYTHON_VER="python3.10" +# Install a c compiler. apt-get -qq update < /dev/null > /dev/null -apt-get -q install -y software-properties-common -# Use openpgp to avoid gpg key timeout. -mkdir -p $HOME/.gnupg -echo "keyserver keys.openpgp.org" >> $HOME/.gnupg/gpg.conf -add-apt-repository -y 'ppa:deadsnakes/ppa' -apt-get -qq install $PYTHON_VER $PYTHON_VER-venv build-essential $PYTHON_VER-dev -y < /dev/null > /dev/null +apt-get -q install -y build-essential -export PYTHON_BINARY=$PYTHON_VER export SET_XTRACE_ON=1 cd src rm -rf .venv rm -f .evergreen/scripts/test-env.sh || true +rm -f .evergreen/scripts/env.sh || true bash ./.evergreen/just.sh setup-tests auth_aws ecs-remote bash .evergreen/just.sh run-tests diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index c14215244e..095b7938dc 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -37,6 +37,8 @@ cleanup_tests() { trap "cleanup_tests" SIGINT ERR # Start the test runner. +echo "Running tests with UV_PYTHON=${UV_PYTHON:-}..." uv run ${UV_ARGS} --reinstall-package pymongo .evergreen/scripts/run_tests.py "$@" +echo "Running tests with UV_PYTHON=${UV_PYTHON:-}... done." cleanup_tests diff --git a/.evergreen/scripts/check-import-time.sh b/.evergreen/scripts/check-import-time.sh index f7a1117b97..ce4e834708 100755 --- a/.evergreen/scripts/check-import-time.sh +++ b/.evergreen/scripts/check-import-time.sh @@ -11,11 +11,10 @@ pushd $HERE/../.. >/dev/null BASE_SHA="$1" HEAD_SHA="$2" -. .evergreen/utils.sh - -if [ -z "${PYTHON_BINARY:-}" ]; then - PYTHON_BINARY=$(find_python3) -fi +# Set up the virtual env. +. $HERE/setup-dev-env.sh +uv venv --seed +source .venv/bin/activate # Use the previous commit if this was not a PR run. if [ "$BASE_SHA" == "$HEAD_SHA" ]; then @@ -24,7 +23,6 @@ fi function get_import_time() { local log_file - createvirtualenv "$PYTHON_BINARY" import-venv python -m pip install -q ".[aws,encryption,gssapi,ocsp,snappy,zstd]" # Import once to cache modules python -c "import pymongo" diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index daec0841d5..310da42f90 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -356,12 +356,10 @@ def create_oidc_auth_variants(): def create_search_index_variants(): host = DEFAULT_HOST - python = CPYTHONS[0] return [ create_variant( [".search_index"], - get_variant_name("Search Index Helpers", host, python=python), - python=python, + get_variant_name("Search Index Helpers", host), host=host, ) ] @@ -438,8 +436,7 @@ def create_aws_auth_variants(): for host_name in ["ubuntu20", "win64", "macos"]: expansions = dict() - # PYTHON-5604 - we need to skip ECS tests for now. - tasks = [".auth-aws !.auth-aws-ecs"] + tasks = [".auth-aws"] tags = [] if host_name == "macos": tasks = [".auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2"] @@ -477,7 +474,7 @@ def create_alternative_hosts_variants(): if "fips" in host_name.lower(): expansions["REQUIRE_FIPS"] = "1" # Use explicit Python 3.11 binary on the host since the default python3 is 3.9. - expansions["PYTHON_BINARY"] = "/usr/bin/python3.11" + expansions["UV_PYTHON"] = "/usr/bin/python3.11" if "amazon" in host_name.lower(): tags.append("pr") variants.append( @@ -543,7 +540,7 @@ def create_server_version_tasks(): ) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() - test_vars["PYTHON_VERSION"] = python + test_vars["TOOLCHAIN_VERSION"] = python test_vars["TEST_NAME"] = f"default_{sync}" test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) @@ -599,7 +596,7 @@ def create_test_non_standard_tasks(): name = get_task_name("test-non-standard", python=python, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() - test_vars["PYTHON_VERSION"] = python + test_vars["TOOLCHAIN_VERSION"] = python test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) return tasks @@ -639,7 +636,7 @@ def create_test_standard_auth_tasks(): name = get_task_name("test-standard-auth", python=python, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() - test_vars["PYTHON_VERSION"] = python + test_vars["TOOLCHAIN_VERSION"] = python test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) return tasks @@ -691,7 +688,7 @@ def create_standard_tasks(): name = get_task_name("test-standard", python=python, sync=sync, **expansions) server_func = FunctionCall(func="run server", vars=expansions) test_vars = expansions.copy() - test_vars["PYTHON_VERSION"] = python + test_vars["TOOLCHAIN_VERSION"] = python test_vars["TEST_NAME"] = f"default_{sync}" test_func = FunctionCall(func="run tests", vars=test_vars) tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) @@ -707,7 +704,7 @@ def create_no_orchestration_tasks(): ] name = get_task_name("test-no-orchestration", python=python) assume_func = FunctionCall(func="assume ec2 role") - test_vars = dict(PYTHON_VERSION=python) + test_vars = dict(TOOLCHAIN_VERSION=python) test_func = FunctionCall(func="run tests", vars=test_vars) commands = [assume_func, test_func] tasks.append(EvgTask(name=name, tags=tags, commands=commands)) @@ -756,7 +753,7 @@ def create_aws_tasks(): if "t" in python: tags.append("free-threaded") name = get_task_name(f"{base_name}-{test_type}", python=python) - test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type, PYTHON_VERSION=python) + test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type, TOOLCHAIN_VERSION=python) test_func = FunctionCall(func="run tests", vars=test_vars) funcs = [server_func, assume_func, test_func] tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) @@ -768,7 +765,7 @@ def create_aws_tasks(): TEST_NAME="auth_aws", SUB_TEST_NAME="web-identity", AWS_ROLE_SESSION_NAME="test", - PYTHON_VERSION=python, + TOOLCHAIN_VERSION=python, ) if "t" in python: tags.append("free-threaded") @@ -806,9 +803,11 @@ def create_mod_wsgi_tasks(): task_name = "mod-wsgi-embedded-mode-" task_name += topology.replace("_", "-") task_name = get_task_name(task_name, python=python) - server_vars = dict(TOPOLOGY=topology, PYTHON_VERSION=python) + server_vars = dict(TOPOLOGY=topology, TOOLCHAIN_VERSION=python) server_func = FunctionCall(func="run server", vars=server_vars) - vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0], PYTHON_VERSION=python) + vars = dict( + TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0], TOOLCHAIN_VERSION=python + ) test_func = FunctionCall(func="run tests", vars=vars) tags = ["mod_wsgi", "pr"] commands = [server_func, test_func] @@ -830,7 +829,7 @@ def _create_ocsp_tasks(algo, variant, server_type, base_task_name): ORCHESTRATION_FILE=file_name, OCSP_SERVER_TYPE=server_type, TEST_NAME="ocsp", - PYTHON_VERSION=python, + TOOLCHAIN_VERSION=python, VERSION=version, ) test_func = FunctionCall(func="run tests", vars=vars) @@ -864,7 +863,7 @@ def create_aws_lambda_tasks(): def create_search_index_tasks(): assume_func = FunctionCall(func="assume ec2 role") server_func = FunctionCall(func="run server", vars=dict(TEST_NAME="search_index")) - vars = dict(TEST_NAME="search_index") + vars = dict(TEST_NAME="search_index", TOOLCHAIN_VERSION=CPYTHONS[0]) test_func = FunctionCall(func="run tests", vars=vars) task_name = "test-search-index-helpers" tags = ["search_index"] @@ -1075,8 +1074,8 @@ def create_run_server_func(): "AUTH", "SSL", "ORCHESTRATION_FILE", - "PYTHON_BINARY", - "PYTHON_VERSION", + "UV_PYTHON", + "TOOLCHAIN_VERSION", "STORAGE_ENGINE", "REQUIRE_API_VERSION", "DRIVERS_TOOLS", @@ -1100,10 +1099,10 @@ def create_run_tests_func(): "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "COVERAGE", - "PYTHON_BINARY", + "UV_PYTHON", "LIBMONGOCRYPT_URL", "MONGODB_URI", - "PYTHON_VERSION", + "TOOLCHAIN_VERSION", "DISABLE_TEST_COMMANDS", "GREEN_FRAMEWORK", "NO_EXT", diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py index 28ee45f9b7..a32092f5b2 100644 --- a/.evergreen/scripts/generate_config_utils.py +++ b/.evergreen/scripts/generate_config_utils.py @@ -133,43 +133,17 @@ def create_variant( *, version: str | None = None, host: Host | str | None = None, - python: str | None = None, expansions: dict | None = None, **kwargs: Any, ) -> BuildVariant: expansions = expansions and expansions.copy() or dict() if version: expansions["VERSION"] = version - if python: - expansions["PYTHON_BINARY"] = get_python_binary(python, host) return create_variant_generic( tasks, display_name, version=version, host=host, expansions=expansions, **kwargs ) -def get_python_binary(python: str, host: Host) -> str: - """Get the appropriate python binary given a python version and host.""" - name = host.name - if name in ["win64", "win32"]: - if name == "win32": - base = "C:/python/32" - else: - base = "C:/python" - python_dir = python.replace(".", "").replace("t", "") - return f"{base}/Python{python_dir}/python{python}.exe" - - if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: - return f"/opt/python/{python}/bin/python3" - - if name in ["macos", "macos-arm64"]: - bin_name = "python3t" if "t" in python else "python3" - python_dir = python.replace("t", "") - framework_dir = "PythonT" if "t" in python else "Python" - return f"/Library/Frameworks/{framework_dir}.Framework/Versions/{python_dir}/bin/{bin_name}" - - raise ValueError(f"no match found for python {python} on {name}") - - def get_versions_from(min_version: str) -> list[str]: """Get all server versions starting from a minimum version.""" min_version_float = float(min_version) diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh index 23d865d0d8..8df2af79ca 100755 --- a/.evergreen/scripts/install-dependencies.sh +++ b/.evergreen/scripts/install-dependencies.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Install the dependencies needed for an evergreen run. +# Install the necessary dependencies. set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) @@ -13,50 +13,6 @@ fi # Set up the default bin directory. if [ -z "${PYMONGO_BIN_DIR:-}" ]; then PYMONGO_BIN_DIR="$HOME/.local/bin" - export PATH="$PYMONGO_BIN_DIR:$PATH" -fi - -# Helper function to pip install a dependency using a temporary python env. -function _pip_install() { - _HERE=$(dirname ${BASH_SOURCE:-$0}) - . $_HERE/../utils.sh - _VENV_PATH=$(mktemp -d) - if [ "Windows_NT" = "${OS:-}" ]; then - _VENV_PATH=$(cygpath -m $_VENV_PATH) - fi - echo "Installing $2 using pip..." - createvirtualenv "$(find_python3)" $_VENV_PATH - python -m pip install $1 - _suffix="" - if [ "Windows_NT" = "${OS:-}" ]; then - _suffix=".exe" - fi - ln -s "$(which $2)" $PYMONGO_BIN_DIR/${2}${_suffix} - # uv also comes with a uvx binary. - if [ $2 == "uv" ]; then - ln -s "$(which uvx)" $PYMONGO_BIN_DIR/uvx${_suffix} - fi - echo "Installed to ${PYMONGO_BIN_DIR}" - echo "Installing $2 using pip... done." -} - -# Ensure just is installed. -if ! command -v just &>/dev/null; then - # On most systems we can install directly. - _TARGET="" - if [ "Windows_NT" = "${OS:-}" ]; then - _TARGET="--target x86_64-pc-windows-msvc" - fi - _BIN_DIR=$PYMONGO_BIN_DIR - mkdir -p ${_BIN_DIR} - echo "Installing just..." - mkdir -p "$_BIN_DIR" 2>/dev/null || true - curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { - # Remove just file if it exists (can be created if there was an install error). - rm -f ${_BIN_DIR}/just - _pip_install rust-just just - } - echo "Installing just... done." fi # Ensure uv is installed. @@ -64,14 +20,17 @@ if ! command -v uv &>/dev/null; then _BIN_DIR=$PYMONGO_BIN_DIR mkdir -p ${_BIN_DIR} echo "Installing uv..." - # On most systems we can install directly. - curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { - _pip_install uv uv - } + curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh if [ "Windows_NT" = "${OS:-}" ]; then chmod +x "$(cygpath -u $_BIN_DIR)/uv.exe" fi + export PATH="$PYMONGO_BIN_DIR:$PATH" echo "Installing uv... done." fi +# Ensure just is installed. +if ! command -v just &>/dev/null; then + uv tool install rust-just +fi + popd > /dev/null diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py index e3833ae63a..9bafdc1f64 100644 --- a/.evergreen/scripts/kms_tester.py +++ b/.evergreen/scripts/kms_tester.py @@ -33,7 +33,7 @@ def _setup_azure_vm(base_env: dict[str, str]) -> None: env["AZUREKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" run_command(f"{azure_dir}/run-command.sh", env=env) - env["AZUREKMS_CMD"] = "NO_EXT=1 bash .evergreen/just.sh setup-tests kms azure-remote" + env["AZUREKMS_CMD"] = "bash .evergreen/just.sh setup-tests kms azure-remote" run_command(f"{azure_dir}/run-command.sh", env=env) LOGGER.info("Setting up Azure VM... done.") @@ -53,7 +53,7 @@ def _setup_gcp_vm(base_env: dict[str, str]) -> None: env["GCPKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" run_command(f"{gcp_dir}/run-command.sh", env=env) - env["GCPKMS_CMD"] = "NO_EXT=1 bash ./.evergreen/just.sh setup-tests kms gcp-remote" + env["GCPKMS_CMD"] = "bash ./.evergreen/just.sh setup-tests kms gcp-remote" run_command(f"{gcp_dir}/run-command.sh", env=env) LOGGER.info("Setting up GCP VM...") diff --git a/.evergreen/scripts/oidc_tester.py b/.evergreen/scripts/oidc_tester.py index ac2960371e..a8949a9381 100644 --- a/.evergreen/scripts/oidc_tester.py +++ b/.evergreen/scripts/oidc_tester.py @@ -89,7 +89,7 @@ def test_oidc_send_to_remote(sub_test_name: str) -> None: env[f"{upper_name}OIDC_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE env[ f"{upper_name}OIDC_TEST_CMD" - ] = f"NO_EXT=1 OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" + ] = f"OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" elif sub_test_name in K8S_NAMES: env["K8S_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE env["K8S_TEST_CMD"] = "OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index c1c29c58bc..9c8101c5b1 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -106,10 +106,11 @@ def handle_aws_lambda() -> None: env["TEST_LAMBDA_DIRECTORY"] = str(target_dir) env.setdefault("AWS_REGION", "us-east-1") dirs = ["pymongo", "gridfs", "bson"] - # Store the original .so files. - before_sos = [] + # Remove the original .so files. for dname in dirs: - before_sos.extend(f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")) + so_paths = [f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")] + for so_path in list(so_paths): + Path(so_path).unlink() # Build the c extensions. docker = which("docker") or which("podman") if not docker: @@ -122,15 +123,11 @@ def handle_aws_lambda() -> None: target = ROOT / "test/lambda/mongodb" / dname shutil.rmtree(target, ignore_errors=True) shutil.copytree(ROOT / dname, target) - # Remove the original so files from the lambda directory. - for so_path in before_sos: - (ROOT / "test/lambda/mongodb" / so_path).unlink() # Remove the new so files from the ROOT directory. for dname in dirs: so_paths = [f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")] for so_path in list(so_paths): - if so_path not in before_sos: - Path(so_path).unlink() + Path(so_path).unlink() script_name = "run-deployed-lambda-aws-tests.sh" run_command(f"bash {DRIVERS_TOOLS}/.evergreen/aws_lambda/{script_name}", env=env) diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh index 209857d542..fa5f86d798 100755 --- a/.evergreen/scripts/setup-dev-env.sh +++ b/.evergreen/scripts/setup-dev-env.sh @@ -1,22 +1,17 @@ #!/bin/bash -# Set up a development environment on an evergreen host. +# Set up development environment. set -eu HERE=$(dirname ${BASH_SOURCE:-$0}) HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" ROOT=$(dirname "$(dirname $HERE)") -pushd $ROOT > /dev/null - -# Bail early if running on GitHub Actions. -if [ -n "${GITHUB_ACTION:-}" ]; then - exit 0 -fi # Source the env files to pick up common variables. if [ -f $HERE/env.sh ]; then . $HERE/env.sh fi -# PYTHON_BINARY or PYTHON_VERSION may be defined in test-env.sh. + +# Get variables defined in test-env.sh. if [ -f $HERE/test-env.sh ]; then . $HERE/test-env.sh fi @@ -24,36 +19,40 @@ fi # Ensure dependencies are installed. bash $HERE/install-dependencies.sh -# Get the appropriate UV_PYTHON. -. $ROOT/.evergreen/utils.sh - -if [ -z "${PYTHON_BINARY:-}" ]; then - if [ -n "${PYTHON_VERSION:-}" ]; then - PYTHON_BINARY=$(get_python_binary $PYTHON_VERSION) - else - PYTHON_BINARY=$(find_python3) +# Handle the value for UV_PYTHON. +. $HERE/setup-uv-python.sh + +# Only run the next part if not running on CI. +if [ -z "${CI:-}" ]; then + # Add the default install path to the path if needed. + if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + export PATH="$PATH:$HOME/.local/bin" + fi + + # Set up venv, making sure c extensions build unless disabled. + if [ -z "${NO_EXT:-}" ]; then + export PYMONGO_C_EXT_MUST_BUILD=1 + fi + + ( + cd $ROOT && uv sync + ) + + # Set up build utilities on Windows spawn hosts. + if [ -f $HOME/.visualStudioEnv.sh ]; then + set +u + SSH_TTY=1 source $HOME/.visualStudioEnv.sh + set -u + fi + + # Only set up pre-commit if we are in a git checkout. + if [ -f $HERE/.git ]; then + if ! command -v pre-commit &>/dev/null; then + uv tool install pre-commit fi -fi -export UV_PYTHON=${PYTHON_BINARY} -echo "Using python $UV_PYTHON" -# Add the default install path to the path if needed. -if [ -z "${PYMONGO_BIN_DIR:-}" ]; then - export PATH="$PATH:$HOME/.local/bin" -fi - -# Set up venv, making sure c extensions build unless disabled. -if [ -z "${NO_EXT:-}" ]; then - export PYMONGO_C_EXT_MUST_BUILD=1 -fi -# Set up visual studio env on Windows spawn hosts. -if [ -f $HOME/.visualStudioEnv.sh ]; then - set +u - SSH_TTY=1 source $HOME/.visualStudioEnv.sh - set -u + if [ ! -f .git/hooks/pre-commit ]; then + uvx pre-commit install + fi + fi fi -uv sync - -echo "Setting up python environment... done." - -popd > /dev/null diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh index 9158414cce..bd7e2dd4bc 100755 --- a/.evergreen/scripts/setup-system.sh +++ b/.evergreen/scripts/setup-system.sh @@ -8,9 +8,13 @@ echo "Setting up system..." bash .evergreen/scripts/configure-env.sh source .evergreen/scripts/env.sh bash $DRIVERS_TOOLS/.evergreen/setup.sh -bash .evergreen/scripts/install-dependencies.sh popd +# Run spawn host-specific tasks. +if [ -z "${CI:-}" ]; then + bash $HERE/setup-dev-env.sh +fi + # Enable core dumps if enabled on the machine # Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml if [ -f /proc/self/coredump_filter ]; then diff --git a/.evergreen/scripts/setup-uv-python.sh b/.evergreen/scripts/setup-uv-python.sh new file mode 100755 index 0000000000..a32e4a8f92 --- /dev/null +++ b/.evergreen/scripts/setup-uv-python.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Set up the UV_PYTHON variable. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" + +# Use min supported version by default. +_python="3.10" + +# Source the env files to pick up common variables. +if [ -f $HERE/env.sh ]; then + . $HERE/env.sh +fi + +# Get variables defined in test-env.sh. +if [ -f $HERE/test-env.sh ]; then + . $HERE/test-env.sh +fi + +if [ -z "${UV_PYTHON:-}" ]; then + set -x + # Translate a TOOLCHAIN_VERSION to UV_PYTHON. + if [ -n "${TOOLCHAIN_VERSION:-}" ]; then + _python=$TOOLCHAIN_VERSION + if [ "$(uname -s)" = "Darwin" ]; then + if [[ "$_python" == *"t"* ]]; then + binary_name="python3t" + framework_dir="PythonT" + else + binary_name="python3" + framework_dir="Python" + fi + _python=$(echo "$_python" | sed 's/t//g') + _python="/Library/Frameworks/$framework_dir.Framework/Versions/$_python/bin/$binary_name" + elif [ "Windows_NT" = "${OS:-}" ]; then + _python=$(echo $_python | cut -d. -f1,2 | sed 's/\.//g; s/t//g') + if [[ "$TOOLCHAIN_VERSION" == *"t"* ]]; then + _exe="python${TOOLCHAIN_VERSION}.exe" + else + _exe="python.exe" + fi + if [ -n "${IS_WIN32:-}" ]; then + _python="C:/python/32/Python${_python}/${_exe}" + else + _python="C:/python/Python${_python}/${_exe}" + fi + elif [ -d "/opt/python/$_python/bin" ]; then + _python="/opt/python/$_python/bin/python3" + fi + fi + export UV_PYTHON="$_python" +fi diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 3f0a8cc7f9..7908836f50 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -31,8 +31,7 @@ "NO_EXT", "MONGODB_API_VERSION", "DEBUG_LOG", - "PYTHON_BINARY", - "PYTHON_VERSION", + "UV_PYTHON", "REQUIRE_FIPS", "IS_WIN32", ] diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh index bada61e568..3db18d15ab 100755 --- a/.evergreen/setup-spawn-host.sh +++ b/.evergreen/setup-spawn-host.sh @@ -15,5 +15,4 @@ echo "Copying files to $target..." rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir echo "Copying files to $target... done" -ssh $target $remote_dir/.evergreen/scripts/setup-system.sh -ssh $target "cd $remote_dir && PYTHON_BINARY=${PYTHON_BINARY:-} .evergreen/scripts/setup-dev-env.sh" +ssh $target "$remote_dir/.evergreen/scripts/setup-system.sh" diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh deleted file mode 100755 index dadb7db084..0000000000 --- a/.evergreen/utils.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -# Utility functions used by pymongo evergreen scripts. -set -eu - -find_python3() { - PYTHON="" - # Find a suitable toolchain version, if available. - if [ "$(uname -s)" = "Darwin" ]; then - PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" - elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin - PYTHON="C:/python/Python310/python.exe" - else - # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.10+. - if [ -f "/opt/python/3.10/bin/python3" ]; then - PYTHON="/opt/python/Current/bin/python3" - elif is_python_310 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then - PYTHON="/opt/mongodbtoolchain/v5/bin/python3" - elif is_python_310 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then - PYTHON="/opt/mongodbtoolchain/v4/bin/python3" - elif is_python_310 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then - PYTHON="/opt/mongodbtoolchain/v3/bin/python3" - fi - fi - # Add a fallback system python3 if it is available and Python 3.10+. - if [ -z "$PYTHON" ]; then - if is_python_310 "$(command -v python3)"; then - PYTHON="$(command -v python3)" - fi - fi - if [ -z "$PYTHON" ]; then - echo "Cannot test without python3.10+ installed!" - exit 1 - fi - echo "$PYTHON" -} - -# Usage: -# createvirtualenv /path/to/python /output/path/for/venv -# * param1: Python binary to use for the virtualenv -# * param2: Path to the virtualenv to create -createvirtualenv () { - PYTHON=$1 - VENVPATH=$2 - - # Prefer venv - VENV="$PYTHON -m venv" - if [ "$(uname -s)" = "Darwin" ]; then - VIRTUALENV="$PYTHON -m virtualenv" - else - VIRTUALENV=$(command -v virtualenv 2>/dev/null || echo "$PYTHON -m virtualenv") - VIRTUALENV="$VIRTUALENV -p $PYTHON" - fi - if ! $VENV $VENVPATH 2>/dev/null; then - # Workaround for bug in older versions of virtualenv. - $VIRTUALENV $VENVPATH 2>/dev/null || $VIRTUALENV $VENVPATH - fi - if [ "Windows_NT" = "${OS:-}" ]; then - # Workaround https://bugs.python.org/issue32451: - # mongovenv/Scripts/activate: line 3: $'\r': command not found - dos2unix $VENVPATH/Scripts/activate || true - . $VENVPATH/Scripts/activate - else - . $VENVPATH/bin/activate - fi - - export PIP_QUIET=1 - python -m pip install --upgrade pip -} - -# Usage: -# testinstall /path/to/python /path/to/.whl ["no-virtualenv"] -# * param1: Python binary to test -# * param2: Path to the wheel to install -# * param3 (optional): If set to a non-empty string, don't create a virtualenv. Used in manylinux containers. -testinstall () { - PYTHON=$1 - RELEASE=$2 - NO_VIRTUALENV=$3 - PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") - - if [ -z "$NO_VIRTUALENV" ]; then - createvirtualenv $PYTHON venvtestinstall - PYTHON=python - fi - - $PYTHON -m pip install --upgrade $RELEASE - cd tools - - if [ "$PYTHON_IMPL" = "CPython" ]; then - $PYTHON fail_if_no_c.py - fi - - $PYTHON -m pip uninstall -y pymongo - cd .. - - if [ -z "$NO_VIRTUALENV" ]; then - deactivate - rm -rf venvtestinstall - fi -} - -# Function that returns success if the provided Python binary is version 3.10 or later -# Usage: -# is_python_310 /path/to/python -# * param1: Python binary -is_python_310() { - if [ -z "$1" ]; then - return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 10))"; then - # runs when sys.version_info[:2] >= (3, 10) - return 0 - else - return 1 - fi -} - - -# Function that gets a python binary given a python version string. -# Versions can be of the form 3.xx or pypy3.xx. -get_python_binary() { - version=$1 - if [ "$(uname -s)" = "Darwin" ]; then - if [[ "$version" == *"t"* ]]; then - binary_name="python3t" - framework_dir="PythonT" - else - binary_name="python3" - framework_dir="Python" - fi - version=$(echo "$version" | sed 's/t//g') - PYTHON="/Library/Frameworks/$framework_dir.Framework/Versions/$version/bin/$binary_name" - elif [ "Windows_NT" = "${OS:-}" ]; then - version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g; s/t//g') - if [ -n "${IS_WIN32:-}" ]; then - PYTHON="C:/python/32/Python$version/python.exe" - else - PYTHON="C:/python/Python$version/python.exe" - fi - else - PYTHON="/opt/python/$version/bin/python3" - fi - if is_python_310 "$(command -v $PYTHON)"; then - echo "$PYTHON" - else - echo "Could not find suitable python binary for '$version'" >&2 - return 1 - fi -} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a8881db9cb..2cf15a9838 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -194,7 +194,7 @@ the pages will re-render and the browser will automatically refresh. - Run `just install` to set a local virtual environment, or you can manually create a virtual environment and run `pytest` directly. If you want to use a specific - version of Python, remove the `.venv` folder and set `PYTHON_BINARY` before running `just install`. + version of Python, set `UV_PYTHON` before running `just install`. - Ensure you have started the appropriate Mongo Server(s). You can run `just run-server` with optional args to set up the server. All given options will be passed to [`run-orchestration.sh`](https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-orchestration.sh). Run `$DRIVERS_TOOLS/evergreen/run-orchestration.sh -h` @@ -335,7 +335,7 @@ Locally you can run: - Run `just run-server`. - Run `just setup-tests`. -- Run `UV_PYTHON=3.13t just run-tests`. +- Run `UV_PYTHON=3.14t just run-tests`. ### AWS Lambda tests @@ -399,6 +399,16 @@ To run any of the test suites with minimum supported dependencies, pass `--test- - If there are any special test considerations, including not running `pytest` at all, handle it in `.evergreen/scripts/run_tests.py`. - If there are any services or atlas clusters to teardown, handle them in `.evergreen/scripts/teardown_tests.py`. - Add functions to generate the test variant(s) and task(s) to the `.evergreen/scripts/generate_config.py`. +- There are some considerations about the Python version used in the test: + - If a specific version of Python is needed in a task that is running on variants with a toolchain, use +``TOOLCHAIN_VERSION`` (e.g. `TOOLCHAIN_VERSION=3.10`). The actual path lookup needs to be done on the host, since +tasks are host-agnostic. + - If a specific Python binary is needed (for example on the FIPS host), set `UV_PYTHON=/path/to/python`. + - If a specific Python version is needed and the toolchain will not be available, use `UV_PYTHON` (e.g. `UV_PYTHON=3.11`). + - The default if neither ``TOOLCHAIN_VERSION`` or ``UV_PYTHON`` is set is to use UV to install the minimum + supported version of Python and use that. This ensures a consistent behavior across host types that do not + have the Python toolchain (e.g. Azure VMs), by having a known version of Python with the build headers (`Python.h`) + needed to build the C extensions. - Regenerate the test variants and tasks using `pre-commit run --all-files generate-config`. - Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. diff --git a/justfile b/justfile index 17b95e87b7..92bdee5be3 100644 --- a/justfile +++ b/justfile @@ -18,7 +18,6 @@ resync: install: bash .evergreen/scripts/setup-dev-env.sh - uvx pre-commit install [group('docs')] docs: && resync From 0c8a22b87d40d2afcec65de08c5f76505fa03091 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Fri, 24 Oct 2025 15:26:46 -0400 Subject: [PATCH 2106/2111] PYTHON-5627 - Update feedback link (#2601) --- doc/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.rst b/doc/index.rst index 85812d1b14..9a2c3eb6b2 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -37,7 +37,7 @@ project. Feature Requests / Feedback --------------------------- -Use our `feedback engine `_ +Use our `feedback engine `_ to send us feature requests and general feedback about PyMongo. Contributing From fd025503491e2bc939d8272770623c47dbaa3fd7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Mon, 27 Oct 2025 11:41:14 -0400 Subject: [PATCH 2107/2111] PYTHON-5628 - Update the link for help in the documentation (#2602) --- doc/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.rst b/doc/index.rst index 9a2c3eb6b2..17b12e51a8 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -22,7 +22,7 @@ work with MongoDB from Python. Getting Help ------------ If you're having trouble or have questions about PyMongo, ask your question on -our `MongoDB Community Forum `_. +one of the platforms listed on `Technical Support `_. You may also want to consider a `commercial support subscription `_. Once you get an answer, it'd be great if you could work it back into this From b607ef144cf0c1e379f8c83677a614cd44670722 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 29 Oct 2025 14:30:18 -0400 Subject: [PATCH 2108/2111] PYTHON-5214 - Improve BSON decoding InvalidBSON error message (#2605) --- bson/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bson/__init__.py b/bson/__init__.py index d260fb876f..ebb1bd0ccc 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -1109,7 +1109,9 @@ def _decode_all(data: _ReadableBuffer, opts: CodecOptions[_DocumentType]) -> lis while position < end: obj_size = _UNPACK_INT_FROM(data, position)[0] if data_len - position < obj_size: - raise InvalidBSON("invalid object size") + raise InvalidBSON( + f"invalid object size: expected {obj_size}, got {data_len - position}" + ) obj_end = position + obj_size - 1 if data[obj_end] != 0: raise InvalidBSON("bad eoo") From 5f00966f9ce591cbff982bea56df888875b80fb7 Mon Sep 17 00:00:00 2001 From: Noah Stapp Date: Wed, 29 Oct 2025 14:31:25 -0400 Subject: [PATCH 2109/2111] [TASK]-[PYTHON-5623]: Change with_transaction callback return type to Awaitable (#2594) Co-authored-by: Logan Pulley --- doc/changelog.rst | 16 ++++++++++++++++ pymongo/asynchronous/client_session.py | 4 ++-- test/asynchronous/test_transactions.py | 12 ++++++++++++ test/test_transactions.py | 12 ++++++++++++ tools/synchro.py | 8 ++++++++ 5 files changed, 50 insertions(+), 2 deletions(-) diff --git a/doc/changelog.rst b/doc/changelog.rst index 47d3fafd66..52e282228f 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -17,6 +17,22 @@ PyMongo 4.16 brings a number of changes including: - Removed support for Eventlet. Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +Changes in Version 4.15.4 (2025/10/21) +-------------------------------------- + +Version 4.15.4 is a bug fix release. + +- Relaxed the callback type of :meth:`~pymongo.asynchronous.client_session.AsyncClientSession.with_transaction` to allow the broader Awaitable type rather than only Coroutine objects. +- Added the missing Python 3.14 trove classifier to the package metadata. + +Issues Resolved +............... + +See the `PyMongo 4.15.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47237 + Changes in Version 4.15.3 (2025/10/07) -------------------------------------- diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py index 8674e98447..6ab3b39983 100644 --- a/pymongo/asynchronous/client_session.py +++ b/pymongo/asynchronous/client_session.py @@ -143,8 +143,8 @@ TYPE_CHECKING, Any, AsyncContextManager, + Awaitable, Callable, - Coroutine, Mapping, MutableMapping, NoReturn, @@ -604,7 +604,7 @@ def _inherit_option(self, name: str, val: _T) -> _T: async def with_transaction( self, - callback: Callable[[AsyncClientSession], Coroutine[Any, Any, _T]], + callback: Callable[[AsyncClientSession], Awaitable[_T]], read_concern: Optional[ReadConcern] = None, write_concern: Optional[WriteConcern] = None, read_preference: Optional[_ServerMode] = None, diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py index 478710362e..29c5d26423 100644 --- a/test/asynchronous/test_transactions.py +++ b/test/asynchronous/test_transactions.py @@ -15,6 +15,7 @@ """Execute Transactions Spec tests.""" from __future__ import annotations +import asyncio import sys from io import BytesIO from test.asynchronous.utils_spec_runner import AsyncSpecRunner @@ -468,6 +469,17 @@ async def callback2(session): async with self.client.start_session() as s: self.assertEqual(await s.with_transaction(callback2), "Foo") + @async_client_context.require_transactions + @async_client_context.require_async + async def test_callback_awaitable_no_coroutine(self): + def callback(_): + future = asyncio.Future() + future.set_result("Foo") + return future + + async with self.client.start_session() as s: + self.assertEqual(await s.with_transaction(callback), "Foo") + @async_client_context.require_transactions async def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() diff --git a/test/test_transactions.py b/test/test_transactions.py index 813d6a688d..37e1a249e0 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -15,6 +15,7 @@ """Execute Transactions Spec tests.""" from __future__ import annotations +import asyncio import sys from io import BytesIO from test.utils_spec_runner import SpecRunner @@ -460,6 +461,17 @@ def callback2(session): with self.client.start_session() as s: self.assertEqual(s.with_transaction(callback2), "Foo") + @client_context.require_transactions + @client_context.require_async + def test_callback_awaitable_no_coroutine(self): + def callback(_): + future = asyncio.Future() + future.set_result("Foo") + return future + + with self.client.start_session() as s: + self.assertEqual(s.with_transaction(callback), "Foo") + @client_context.require_transactions def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() diff --git a/tools/synchro.py b/tools/synchro.py index e3d4835502..1444b22994 100644 --- a/tools/synchro.py +++ b/tools/synchro.py @@ -322,6 +322,14 @@ def translate_coroutine_types(lines: list[str]) -> list[str]: index = lines.index(type) new = type.replace(old, res.group(3)) lines[index] = new + coroutine_types = [line for line in lines if "Awaitable[" in line] + for type in coroutine_types: + res = re.search(r"Awaitable\[([A-z]+)\]", type) + if res: + old = res[0] + index = lines.index(type) + new = type.replace(old, res.group(1)) + lines[index] = new return lines From f278e471d1637d297551477487b2d9ef25656374 Mon Sep 17 00:00:00 2001 From: Rogdham <3994389+Rogdham@users.noreply.github.com> Date: Fri, 31 Oct 2025 22:14:14 +0100 Subject: [PATCH 2110/2111] PYTHON-5522: Support std lib zstandard in 3.14 (#2592) --- .evergreen/generated_configs/variants.yml | 9 + .evergreen/scripts/generate_config.py | 16 ++ README.md | 3 +- doc/changelog.rst | 1 + pymongo/asynchronous/mongo_client.py | 4 +- pymongo/compression_support.py | 42 +++-- pymongo/synchronous/mongo_client.py | 4 +- requirements/zstd.txt | 2 +- uv.lock | 205 ++++++++++++---------- 9 files changed, 172 insertions(+), 114 deletions(-) diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index 816ff8d188..4efa338991 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -133,6 +133,15 @@ buildvariants: - rhel87-small expansions: COMPRESSOR: zstd + - name: compression-zstd-ubuntu-22 + tasks: + - name: .test-standard !.server-4.2 !.server-4.4 !.server-5.0 .python-3.14 + - name: .test-standard !.server-4.2 !.server-4.4 !.server-5.0 .python-3.14t + display_name: Compression zstd Ubuntu-22 + run_on: + - ubuntu2204-small + expansions: + COMPRESSOR: ztsd # Coverage report tests - name: coverage-report diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index 310da42f90..26580cfae1 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -194,6 +194,22 @@ def create_compression_variants(): expansions=expansions, ) ) + # Add explicit tests with compression.zstd support on linux. + host = HOSTS["ubuntu22"] + expansions = dict(COMPRESSOR="ztsd") + tasks = [ + ".test-standard !.server-4.2 !.server-4.4 !.server-5.0 .python-3.14", + ".test-standard !.server-4.2 !.server-4.4 !.server-5.0 .python-3.14t", + ] + display_name = get_variant_name(f"Compression {compressor}", host) + variants.append( + create_variant( + tasks, + display_name, + host=host, + expansions=expansions, + ) + ) return variants diff --git a/README.md b/README.md index ba1688cb70..b8bfa294a6 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,8 @@ python -m pip install "pymongo[snappy]" ``` Wire protocol compression with zstandard requires -[zstandard](https://pypi.org/project/zstandard): +[backports.zstd](https://pypi.org/project/backports.zstd) +when used with Python versions before 3.14: ```bash python -m pip install "pymongo[zstd]" diff --git a/doc/changelog.rst b/doc/changelog.rst index 52e282228f..dbf24aaaad 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -16,6 +16,7 @@ PyMongo 4.16 brings a number of changes including: Python 3.10+. The minimum version is ``2.6.1`` to account for `CVE-2023-29483 `_. - Removed support for Eventlet. Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +- Use Zstandard support from the standard library for Python 3.14+, and use ``backports.zstd`` for older versions. Changes in Version 4.15.4 (2025/10/21) -------------------------------------- diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py index d9bf808d55..2a8ff43392 100644 --- a/pymongo/asynchronous/mongo_client.py +++ b/pymongo/asynchronous/mongo_client.py @@ -422,8 +422,8 @@ def __init__( with the server. Currently supported options are "snappy", "zlib" and "zstd". Support for snappy requires the `python-snappy `_ package. - zlib support requires the Python standard library zlib module. zstd - requires the `zstandard `_ + zlib support requires the Python standard library zlib module. For + Python before 3.14 zstd requires the `backports.zstd `_ package. By default no compression is used. Compression support must also be enabled on the server. MongoDB 3.6+ supports snappy and zlib compression. MongoDB 4.2+ adds support for zstd. diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index 64ffe052ec..f7ed1aadcc 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import annotations +import sys import warnings from typing import Any, Iterable, Optional, Union @@ -44,7 +45,10 @@ def _have_zlib() -> bool: def _have_zstd() -> bool: try: - import zstandard # noqa: F401 + if sys.version_info >= (3, 14): + from compression import zstd + else: + from backports import zstd # noqa: F401 return True except ImportError: @@ -79,11 +83,18 @@ def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> list[s ) elif compressor == "zstd" and not _have_zstd(): compressors.remove(compressor) - warnings.warn( - "Wire protocol compression with zstandard is not available. " - "You must install the zstandard module for zstandard support.", - stacklevel=2, - ) + if sys.version_info >= (3, 14): + warnings.warn( + "Wire protocol compression with zstandard is not available. " + "The compression.zstd module is not available.", + stacklevel=2, + ) + else: + warnings.warn( + "Wire protocol compression with zstandard is not available. " + "You must install the backports.zstd module for zstandard support.", + stacklevel=2, + ) return compressors @@ -144,12 +155,12 @@ class ZstdContext: @staticmethod def compress(data: bytes) -> bytes: - # ZstdCompressor is not thread safe. - # TODO: Use a pool? - - import zstandard + if sys.version_info >= (3, 14): + from compression import zstd + else: + from backports import zstd - return zstandard.ZstdCompressor().compress(data) + return zstd.compress(data) def decompress(data: bytes | memoryview, compressor_id: int) -> bytes: @@ -166,10 +177,11 @@ def decompress(data: bytes | memoryview, compressor_id: int) -> bytes: return zlib.decompress(data) elif compressor_id == ZstdContext.compressor_id: - # ZstdDecompressor is not thread safe. - # TODO: Use a pool? - import zstandard + if sys.version_info >= (3, 14): + from compression import zstd + else: + from backports import zstd - return zstandard.ZstdDecompressor().decompress(data) + return zstd.decompress(data) else: raise ValueError("Unknown compressorId %d" % (compressor_id,)) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py index 6e716402f4..fea2d6daef 100644 --- a/pymongo/synchronous/mongo_client.py +++ b/pymongo/synchronous/mongo_client.py @@ -422,8 +422,8 @@ def __init__( with the server. Currently supported options are "snappy", "zlib" and "zstd". Support for snappy requires the `python-snappy `_ package. - zlib support requires the Python standard library zlib module. zstd - requires the `zstandard `_ + zlib support requires the Python standard library zlib module. For + Python before 3.14 zstd requires the `backports.zstd `_ package. By default no compression is used. Compression support must also be enabled on the server. MongoDB 3.6+ supports snappy and zlib compression. MongoDB 4.2+ adds support for zstd. diff --git a/requirements/zstd.txt b/requirements/zstd.txt index 864700d2b3..26c899e04c 100644 --- a/requirements/zstd.txt +++ b/requirements/zstd.txt @@ -1 +1 @@ -zstandard +backports.zstd>=1.0.0;python_version<'3.14' diff --git a/uv.lock b/uv.lock index f9a389c896..c021943b07 100644 --- a/uv.lock +++ b/uv.lock @@ -88,6 +88,116 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, ] +[[package]] +name = "backports-zstd" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/12/8080a1b7bce609eb250813519f550b36ad5950b64f0af2738c0fb53e7fb3/backports_zstd-1.0.0.tar.gz", hash = "sha256:8e99702fd4092c26624b914bcd140d03911a16445ba6a74435b29a190469cce3", size = 995991, upload-time = "2025-10-10T07:06:18.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/79/10389134d3a7e3099798ca55fc82abe9d7f49239c69c8d9c4979b091338c/backports_zstd-1.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:34cd28f44f6b8f70ea5d86c2b3ba26d0d51f94606bd1f0c80bde1f820e8c82b2", size = 435682, upload-time = "2025-10-10T07:03:59.169Z" }, + { url = "https://files.pythonhosted.org/packages/3c/6e/40d033accd0d54ee0b696f8f2ae0840bd4fae7255b3463ff46b210520e4f/backports_zstd-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f7ccfd860da6b4c0c1f0f68dc917483977383e1207bdee262d40369fe616fc8", size = 362079, upload-time = "2025-10-10T07:04:00.784Z" }, + { url = "https://files.pythonhosted.org/packages/73/47/a1ed28ffd9b956aadbde6ad9a8d4adeab38b5cbfdd1d3f3d485a1bb18eba/backports_zstd-1.0.0-cp310-cp310-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:65de196c84b0d2c44fb76a729041de3a292f0128ded1b5ff7c1c4eab948aae0b", size = 505978, upload-time = "2025-10-10T07:04:02.903Z" }, + { url = "https://files.pythonhosted.org/packages/0e/14/6ea8a2567881ce0a46b7c8376c336f366e6e5dfe84766c45fed7473f0649/backports_zstd-1.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1096d4504557d2deb9e71e1aef9914c651b7aa05c35276d77854fee7a4bdd09a", size = 475592, upload-time = "2025-10-10T07:04:04.275Z" }, + { url = "https://files.pythonhosted.org/packages/62/27/5782d0bb36adbeec58687a2abf7e1a1659af30782129355456551486794f/backports_zstd-1.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1a0db63d2a112c217a347789aecd38bdd17bc1030fa9d3d256d5d011231852f2", size = 581221, upload-time = "2025-10-10T07:04:05.759Z" }, + { url = "https://files.pythonhosted.org/packages/69/0b/accbbdbd24940b7a93d8064224b4eb304bd51d68a5e03fd2af3e2b5af268/backports_zstd-1.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0dfab6714325c29d621593b1e9198e47064bb755e601c3e79fde5211a16527f7", size = 640865, upload-time = "2025-10-10T07:04:07.236Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c4/7c5c5e47565e959a5dd6d2eae4e34e2ca46b6ba123ee1dd548c0c0d316f2/backports_zstd-1.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63076e558ba75b2c016257bdbe6b83b5b9a41fe5b6601b567f1e07890370f2f1", size = 491083, upload-time = "2025-10-10T07:04:09.04Z" }, + { url = "https://files.pythonhosted.org/packages/47/46/4ef914cfaf8a91fecd01e3d342fd506f450a06109c749a034e3a48ce97b2/backports_zstd-1.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9cd445dcb1ec8a8aa38d3c4e9cda7991cacead193bb07cf002f60cfc002d8628", size = 481539, upload-time = "2025-10-10T07:04:10.532Z" }, + { url = "https://files.pythonhosted.org/packages/e6/f0/ae1dd6cf45d48b535fb6e5a77a107d6cc39db2ae8a9c060762d8fb6bcad2/backports_zstd-1.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d01aa1b91007edd00abc78b3cb7e10ad9394b54a75cbd76b1584524a3b238cfc", size = 509486, upload-time = "2025-10-10T07:04:11.763Z" }, + { url = "https://files.pythonhosted.org/packages/91/35/a4829b1715e965baa00ef52529f513c8c30de83d3d2f662cbd016ad8861a/backports_zstd-1.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d6bbb553640f7ed630223dae9ec93d6f56785d03db0c8385e0c4cfc88d54bdf4", size = 585584, upload-time = "2025-10-10T07:04:13.264Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b7/3d139188c2803b7e2944cc22ad7e2a974cc9773534c4dd736a136b671160/backports_zstd-1.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8d25adbe96f4a3fb6492d8d504e78c1641b06002ca2b578a0b24862ee9fd5a58", size = 631442, upload-time = "2025-10-10T07:04:14.655Z" }, + { url = "https://files.pythonhosted.org/packages/a5/72/442ada2be3fa510b0a93ca3f353c546c7626690029c3533dd348ea3c7730/backports_zstd-1.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dde864a9c6aaa94eafe4962f59916de3165e99b3fd4d2f6584cd116707aed8ff", size = 495142, upload-time = "2025-10-10T07:04:16.144Z" }, + { url = "https://files.pythonhosted.org/packages/4c/80/e26f98015801a790262f9637feaebf2028edebd915e196cc1507f4ee7b6f/backports_zstd-1.0.0-cp310-cp310-win32.whl", hash = "sha256:474847d1ac3ed2e4bfca2207bbfd5632110143ddd4fc6ea41ff5c5b05d2fda1d", size = 288590, upload-time = "2025-10-10T07:04:17.755Z" }, + { url = "https://files.pythonhosted.org/packages/db/39/f322cf4d8b3194353a5bc01db6f2829835d1df273e93ebd1607f130213b5/backports_zstd-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5d2fb3f4d53b2f92a26e7fc34b313ac5eebd7ff437f37f8f5c308d72c844fbd7", size = 313505, upload-time = "2025-10-10T07:04:18.895Z" }, + { url = "https://files.pythonhosted.org/packages/b6/c0/e6ce5b66c48dfe29ec149eee01901be136071dd1692d6f99e14dbd7ba7d1/backports_zstd-1.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:525a1ecb62edd97c6695812fef0e4dc7c2faa5edf3057aa3d8ec9d2dbd0f7799", size = 288707, upload-time = "2025-10-10T07:04:19.968Z" }, + { url = "https://files.pythonhosted.org/packages/01/0a/cbf3f9cb7ca865eca93744d1b859ed50d28be3f64d83cfd96ad114ed88d6/backports_zstd-1.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:782923d65aa6d0c8c465c32563df70dbdd3e255532de2a2d26e13598fc5f85ae", size = 435683, upload-time = "2025-10-10T07:04:21.097Z" }, + { url = "https://files.pythonhosted.org/packages/c7/70/65f975ac0e1780963c5bcfae40e822724d7e4bfe902eeef3637a14fb56b1/backports_zstd-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6825598589ef9b8c0c4e574170d29d84400be24c2f172b81403435b33c8d103a", size = 362075, upload-time = "2025-10-10T07:04:22.382Z" }, + { url = "https://files.pythonhosted.org/packages/0a/22/007acd1b0af3a78188c2b71fd4a3284f005826bd93e234e73412944d7b99/backports_zstd-1.0.0-cp311-cp311-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:15863463b7f28049d4511f9f123c3b96d66c3de543315c21ef3bc6b001b20d01", size = 505978, upload-time = "2025-10-10T07:04:23.504Z" }, + { url = "https://files.pythonhosted.org/packages/6e/d6/f0a148d3f0d0558ace2fc0e7d4f0cc648e88c212665cbf8df718037adde9/backports_zstd-1.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b39da619431f4782f3d8bb0d99a6067db571eab50579527ba168bcc12887d328", size = 475589, upload-time = "2025-10-10T07:04:24.791Z" }, + { url = "https://files.pythonhosted.org/packages/49/b5/32fcb6342cfa9ca5692b0344961aafd082887e4fad89248f890927522bad/backports_zstd-1.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:411da73bb3eadef58da781c55c6399fc6dba9b898ca05009410138fb1d7fef8d", size = 581218, upload-time = "2025-10-10T07:04:26.493Z" }, + { url = "https://files.pythonhosted.org/packages/21/00/757aa4952b8f3d955bb62b72360940639c781fc4f39249f5ea40e0b8125b/backports_zstd-1.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1f8b0bc92f5be153a4878188ab0aeab5b9bbff3dc3e9d3ad3b19e29fe4932741", size = 640908, upload-time = "2025-10-10T07:04:27.837Z" }, + { url = "https://files.pythonhosted.org/packages/37/5f/075c31cbe58fffd8144bc482fea73d2833562159684430b3f1d402fa9f8d/backports_zstd-1.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34cd5bdb76448f2259ea371d6cd62a7e339021e1429fe3c386acb3e58c1f6c61", size = 491121, upload-time = "2025-10-10T07:04:29.045Z" }, + { url = "https://files.pythonhosted.org/packages/2d/a0/4c4b9a85ff52fe90a3265aa9b5cb7b35bf1a2d48bd1ed4604d7fe1aabfc7/backports_zstd-1.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d251a49e80e1868e132e6edadfbef8dba7ded7751e59a41684cd6da38bbd3507", size = 481544, upload-time = "2025-10-10T07:04:30.174Z" }, + { url = "https://files.pythonhosted.org/packages/9c/0e/1bd54a04e9f236f5a8d426c00ce0a6d5af6d68735138e9887d5545311761/backports_zstd-1.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a54ea58ddeaab9a1385c368f84fca474b87b052087b62e56ac1ebd10cabac157", size = 509487, upload-time = "2025-10-10T07:04:31.386Z" }, + { url = "https://files.pythonhosted.org/packages/ef/eb/03a53be8a982e953acd8864d63ca1622ca309d9fbcf1f7ec5e2550b45057/backports_zstd-1.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d6272730803dc5b212615f50af7395f2b05155d9415e367492d6dac807edc949", size = 585574, upload-time = "2025-10-10T07:04:32.585Z" }, + { url = "https://files.pythonhosted.org/packages/5c/90/17810915587c2686e767a5cd2de014e902c76e0a242daf1c4a97544ba1f5/backports_zstd-1.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f6a27510ebb9e1cb877aaa26fc5e0303437bd2023e0a24976da854a3421e60e5", size = 631483, upload-time = "2025-10-10T07:04:34.107Z" }, + { url = "https://files.pythonhosted.org/packages/a4/22/d65a54a803061e475b66164c7d03d2ed889c32eaf32544c2e0d599c20628/backports_zstd-1.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c55f842917ac4405a9779476b1ec8219247f35d86673769cf2d3c140799d3e4a", size = 495147, upload-time = "2025-10-10T07:04:35.958Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/bdf4e76e148cfac7c324b74f76fbda83c5a587b8a85871bad09722729283/backports_zstd-1.0.0-cp311-cp311-win32.whl", hash = "sha256:c28cfbd6217ba4837d35cdd8cfd5dcf84ad54bffcb531734002e27dcc84c87ca", size = 288686, upload-time = "2025-10-10T07:04:37.132Z" }, + { url = "https://files.pythonhosted.org/packages/3b/b1/726a07d04b85a687776b04b53a02b7d2c4b666d51b18c44fa2ddaadfe383/backports_zstd-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:f009996762b887d1bf9330ac0ce1e83608db0b881f63644ae30f2b6a290cd36b", size = 313630, upload-time = "2025-10-10T07:04:38.358Z" }, + { url = "https://files.pythonhosted.org/packages/52/e6/727584a8794fa28164e0795441d8b86f89c75a2368dec0aaaa086f7ac58c/backports_zstd-1.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:62ab49843fc7761383aa7bea8105ca70941797c7145647f71fa4340bfd3b747a", size = 288829, upload-time = "2025-10-10T07:04:39.602Z" }, + { url = "https://files.pythonhosted.org/packages/ba/22/2a68534673efe608d7b2d0de03595d5d1de629616a2f4e394813376eed21/backports_zstd-1.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f9afba11403cf03849464e0f1214b035970d43546a7cdd9d8ee31dc154889e78", size = 435990, upload-time = "2025-10-10T07:04:41.075Z" }, + { url = "https://files.pythonhosted.org/packages/3a/44/c3f06c172f128bf1160f6122df2a942440e36b8450cf4ba44c69465c5f55/backports_zstd-1.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86498856adc6e8c6f50cbfb1d4afd4e0997d5837fb225245d3fb26008f3c9412", size = 362142, upload-time = "2025-10-10T07:04:42.344Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0d/f46bba0f0df4dcd3d47160003b956b19329c25f63fe9e910aa17ca9fa0e5/backports_zstd-1.0.0-cp312-cp312-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:de915a8ecd290c601be7b7436291397b0ac1f7841c97c3a13777bb1065881773", size = 506399, upload-time = "2025-10-10T07:04:43.846Z" }, + { url = "https://files.pythonhosted.org/packages/92/a1/681e03e50379d72e06c3de796fb8cc5880fca8b70b82562b2eb712abf6d1/backports_zstd-1.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9fffb08c5c1b629c96813108183c8b02d6b07ed6ec81cca8d094089e749db4b5", size = 476222, upload-time = "2025-10-10T07:04:44.975Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ca/8b0a8b959668668c50af6bfad6fea564d2b6becdcffd998e03dfc04c3954/backports_zstd-1.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:064d4dc840bcfd8c5c9b37dcacd4fb27eac473c75006120015a9f88b73368c9b", size = 581678, upload-time = "2025-10-10T07:04:46.459Z" }, + { url = "https://files.pythonhosted.org/packages/4f/9a/921ec253ad5a592da20bf8ab1a5be16b242722f193e02d7a3678702aeffc/backports_zstd-1.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0051911391c3f934bb48e8ca08f4319d94b08362a40d96a4b5534c60f00deca2", size = 640408, upload-time = "2025-10-10T07:04:48.178Z" }, + { url = "https://files.pythonhosted.org/packages/ca/8c/0826259b7076cdaaceda1d52f2859c771dc45efed155084a49f538f0ea2e/backports_zstd-1.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e5f3453f0ea32ccf262e11e711ef1a0a986903b8a3a3078bf93fafdd5cf311c", size = 494195, upload-time = "2025-10-10T07:04:49.326Z" }, + { url = "https://files.pythonhosted.org/packages/a9/a5/75b1c1e26e305f06a7cde591213d5b3c8591b06882ae635b8ffeb8df6f44/backports_zstd-1.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:33bce0351cd0ad7bd9f363740b894e65255eb93d16d05097ef1d60643ce1cc27", size = 482255, upload-time = "2025-10-10T07:04:50.722Z" }, + { url = "https://files.pythonhosted.org/packages/dc/24/7061610369a5dbadcddc6f340d5aa8304ae58aee07a6a851b8fa24638036/backports_zstd-1.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:820f3cd08c5c8672015b7e52bf04e3e707727e6576d988eadc1799c0c47b33d9", size = 509829, upload-time = "2025-10-10T07:04:52.014Z" }, + { url = "https://files.pythonhosted.org/packages/e6/28/afc0158ba3d5d5a03560348f9a79fb8a1e0d0ef98f1d176ab37aa887ed5e/backports_zstd-1.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4e327fe73bfc634e8b04b5e0f715c97680987d633f161fd4702027b34685be43", size = 586059, upload-time = "2025-10-10T07:04:53.255Z" }, + { url = "https://files.pythonhosted.org/packages/b4/0d/68f1fa86a79faee7f6533bced500ee622dde98c9b3b0ddab58a4fe6410d5/backports_zstd-1.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4055318ebb7f6ffad99dabd312706599c9e119c834d6c741a946c0d4b3e5be4e", size = 630869, upload-time = "2025-10-10T07:04:54.397Z" }, + { url = "https://files.pythonhosted.org/packages/83/e1/a529be674d179caf201e5e406dc70a2c4156e182fa777e43f43f6afa69c6/backports_zstd-1.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:79d3c879720ee4987782da55d728919f9294a8ea6fac76c9af84bc06f3b0f942", size = 498686, upload-time = "2025-10-10T07:04:55.593Z" }, + { url = "https://files.pythonhosted.org/packages/17/9a/075582e942841520c47535f9ff62b728a88565b737ae21dc99ebcc15ef61/backports_zstd-1.0.0-cp312-cp312-win32.whl", hash = "sha256:930ccc283fdf76d1acca9529acd6ccb6cd26cdaf684d69cc6f359683f90357be", size = 288822, upload-time = "2025-10-10T07:04:56.835Z" }, + { url = "https://files.pythonhosted.org/packages/7f/14/615cd31c0de23e330e13ba77a6aed9a1d27360ebdf5e68b078c54b8cdbdb/backports_zstd-1.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f1bb3e6d21ebfb22070288b7fb47bbb0baaae604890c4087edf5637debb6bd91", size = 313841, upload-time = "2025-10-10T07:04:58.003Z" }, + { url = "https://files.pythonhosted.org/packages/8e/b8/87b2467bf82eabb4acd4651f193363ec04973baa35141be441bf9e9e98c0/backports_zstd-1.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:f5fe75e5996b5f4712235f9c63cdb7e5932a9cdf3a41232989f8a3ef1667f784", size = 288950, upload-time = "2025-10-10T07:04:59.279Z" }, + { url = "https://files.pythonhosted.org/packages/19/36/0182161a23009d5439e125d4af7b13d2df0292663e7f87141d5cf76d3060/backports_zstd-1.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c522469a67fef66998fd9eff2195512ca54d78c6fecdf1c466d2b7752dd810b", size = 435481, upload-time = "2025-10-10T07:05:00.833Z" }, + { url = "https://files.pythonhosted.org/packages/79/ce/6c235828d54d0027838316d9ce284b52e7bc266154f5e57086a7c7796691/backports_zstd-1.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c28546bcffb88ee38a742e3364338c49672d776ea2c73decc05fbf79f045797e", size = 361757, upload-time = "2025-10-10T07:05:02.422Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/7cbc80512df9b89ae39ab3920afbaad733d4b64390b4439e52ef3673da7b/backports_zstd-1.0.0-cp313-cp313-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:9719c14984ca99f5567a5974210d04c75aa02b0124653ee1b1d9a39bf0764fc6", size = 505673, upload-time = "2025-10-10T07:05:03.596Z" }, + { url = "https://files.pythonhosted.org/packages/f8/bc/ea32d4698fac21fe6cc08a124ae21daa41be03f788f244791c47e31a4360/backports_zstd-1.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a31220e8544c2194c4a7c3bd9f7fb0eee3c0ce5f8306e55df762428159ff0512", size = 475879, upload-time = "2025-10-10T07:05:04.796Z" }, + { url = "https://files.pythonhosted.org/packages/bf/42/68344db3586455983bdcdffe51253fa4415908e700d50287249ad6589bc9/backports_zstd-1.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3571e35d6682119daf109678a68fa8a9e29f79487ee7ec2da63a7e97562acb8c", size = 581359, upload-time = "2025-10-10T07:05:05.977Z" }, + { url = "https://files.pythonhosted.org/packages/0f/d0/3d153d78a52a46ce4c363680da7fbc593eeb314150f005c4bf7c2bd5b51f/backports_zstd-1.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:26ccb82bbeb36fffeb3865abe7df9b9b82d6462a488cd2f3c10e91c41c3103cc", size = 642203, upload-time = "2025-10-10T07:05:07.236Z" }, + { url = "https://files.pythonhosted.org/packages/11/c3/e31b4e591daec3eab2446db971f275d349aad36041236d5f067ab20fa1a9/backports_zstd-1.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d90cfb475d6d08c596ae77a7009cdda7374ecd79354fd75185cf029bf2204620", size = 490828, upload-time = "2025-10-10T07:05:08.446Z" }, + { url = "https://files.pythonhosted.org/packages/2d/80/ef7d02d846f710fc95c6d7eb3298ef6504e51f8707f24e1624d139f791d5/backports_zstd-1.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0b00c9c22cae8c1f87e2f23f9aeda7fee82ff671672b9f5a161a7ba094d9904b", size = 481638, upload-time = "2025-10-10T07:05:10.18Z" }, + { url = "https://files.pythonhosted.org/packages/5e/9b/f32500bf26ef588ce4f6284f453532d08789e412a5ecd60c501c77c88f8f/backports_zstd-1.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a87c4491036954ae6d15edaf1e6d5b1941e13a9df14d6a9952899713fcfb0796", size = 509228, upload-time = "2025-10-10T07:05:11.313Z" }, + { url = "https://files.pythonhosted.org/packages/6d/67/f689055f90a2874578b2b3e7c84311c3007b2fa60c51454e8c432203f1c7/backports_zstd-1.0.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8ea8c5d283211bc21c9782db7a8504a275a5b97e883b0bf67f6903a3af48f3d3", size = 585789, upload-time = "2025-10-10T07:05:12.477Z" }, + { url = "https://files.pythonhosted.org/packages/86/53/dea52bd76a3ba519a4937e6cab6cbdcdc36b618090eabeac998f69d1bb97/backports_zstd-1.0.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e1d12f64d1bd535c782f30b33d1f60c060105d124f9ade22556fefbf36087776", size = 632571, upload-time = "2025-10-10T07:05:14.18Z" }, + { url = "https://files.pythonhosted.org/packages/43/c8/ce10a94132957f57860b9440fe726615a6a6e8c5fdfee565d8a1b3a573de/backports_zstd-1.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df08eb2735363a11a9222203c3e9a478d7569511bdd9aa2cc64a39e0403cf09a", size = 495124, upload-time = "2025-10-10T07:05:15.398Z" }, + { url = "https://files.pythonhosted.org/packages/8a/c0/830ea473e3c6133758a9a421157c8d4d5c65408d565336a59403e6bb0b29/backports_zstd-1.0.0-cp313-cp313-win32.whl", hash = "sha256:0309f924ec026d2174297754aeb97fe5fa665cfe0f8bc70e7bb82808a7adcd08", size = 288467, upload-time = "2025-10-10T07:05:16.563Z" }, + { url = "https://files.pythonhosted.org/packages/75/5a/318d40e1589908a44532e2c850fedfaedbf4e7c75b6fa3cf4b532fcadc84/backports_zstd-1.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:d68f7f72579070bfef7890ba5316701c001e90b4455bb5c2591558b9d53a7f6e", size = 313680, upload-time = "2025-10-10T07:05:17.71Z" }, + { url = "https://files.pythonhosted.org/packages/fa/c8/bb0067165e9b1066104a88536eac04cfac388abb5d500b3405cf783c96e8/backports_zstd-1.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:911a099122ce7cebed9e1ec64c1fa54a6ab461d6c7cec8d460d8b3a09bbd439f", size = 288699, upload-time = "2025-10-10T07:05:18.904Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0e/83badde9b389c198a9a45bccd38a9dc5baa7db92e531d4951b1c0686e29a/backports_zstd-1.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:fad9af0c89048a50e67bfd9e3509d710b268d4ae0e47a2bc945dca273a17286d", size = 436173, upload-time = "2025-10-10T07:05:20.083Z" }, + { url = "https://files.pythonhosted.org/packages/a1/92/d1f5e9f7e1afbb730020e8c7060d6101cad4aa20eb13b7cb98dda9414726/backports_zstd-1.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:1d0459de16491399e6b6d151213964395ba092ba21b7739756f0507533c8e44f", size = 362456, upload-time = "2025-10-10T07:05:21.367Z" }, + { url = "https://files.pythonhosted.org/packages/fa/0c/165b04a4bd9b39455e5d051f504acab6c5af3583939336bd2c77a2dc6398/backports_zstd-1.0.0-cp313-cp313t-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:2dcf4c080c0fe8f4ca8f1ff560756ae78e6fada721813c1506f8fd3399996646", size = 507618, upload-time = "2025-10-10T07:05:23.083Z" }, + { url = "https://files.pythonhosted.org/packages/72/45/868e6b66852b64766feb3a3ce28cc74dd86141120ac6740855f90239fb85/backports_zstd-1.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e26b558e0f2413e9499949dd75985a03be008287598916eaa75a67efc52e4f1b", size = 475518, upload-time = "2025-10-10T07:05:24.297Z" }, + { url = "https://files.pythonhosted.org/packages/44/ff/71021dae5e024d7e12b5078719582b26eeae984f5718846c135134288330/backports_zstd-1.0.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f0a0c11aee04e0a10e9688ef8d9014af888763507bea85a0d7a7ba5220272996", size = 580942, upload-time = "2025-10-10T07:05:25.497Z" }, + { url = "https://files.pythonhosted.org/packages/7c/64/553009a1d449033fafba311d2e204b19ebb0dfdba069a639965fb6f0bc57/backports_zstd-1.0.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c8aa92bf9407ed1ba62234e085876b628ecd9d2636c0e1e23f2dacf3be21af2a", size = 639934, upload-time = "2025-10-10T07:05:27.147Z" }, + { url = "https://files.pythonhosted.org/packages/12/da/490a0b80144fb888ae9328f73d7bfa58fd5ccf8bdb81a6d20561ec5a0ff7/backports_zstd-1.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c78c1eaf3fdea00514afe9636e01f94890f1e4c6e8e1dfede48015364b950705", size = 494822, upload-time = "2025-10-10T07:05:28.325Z" }, + { url = "https://files.pythonhosted.org/packages/ad/d2/0f7702000bd08ff6aa71114b377141f2d30154597dcd9459a08554122fa5/backports_zstd-1.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4ff181018de5afb1b87edf9a88ec7e62b4b053e75b91ec8ac7819042126ca7cf", size = 482001, upload-time = "2025-10-10T07:05:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/20/78/2cc5dc095b93841eb251d91cf4b3b4c1e5efc15db40f97f003603acaba3f/backports_zstd-1.0.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:eed0753c698a21f0d38464c2a6d4d5e770d2ea2e9c3a308f1712d674598a049f", size = 511380, upload-time = "2025-10-10T07:05:30.874Z" }, + { url = "https://files.pythonhosted.org/packages/eb/b3/328c4835b661b3a9f2c6f2eb6350a9d4bc673e7e5c7d1149ecb235abe774/backports_zstd-1.0.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:18f5d3ed08afcd08b86b305bf167c0f2b582b906742e4bd3c7389050d5b59817", size = 585514, upload-time = "2025-10-10T07:05:32.523Z" }, + { url = "https://files.pythonhosted.org/packages/4f/31/3d347703f5d913d35edb58e9fbfbf8155dc63d1e6c0ed93eb5205e09d5f1/backports_zstd-1.0.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:7a8c950abe629e5d8ea606e6600dd1d6cd6bddd7a4566cf34201d31244d10ab3", size = 630541, upload-time = "2025-10-10T07:05:33.799Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ac/323abb5ba0e5da924dec83073464eb87223677c577e0969c90b279700c1f/backports_zstd-1.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:973e74f4e1f19f7879a6a7900e9a268522eb4297100a573ed69969df63f94674", size = 499450, upload-time = "2025-10-10T07:05:35.4Z" }, + { url = "https://files.pythonhosted.org/packages/81/cb/1d77d6cf3850e804f4994a8106db2830e58638ed0f2d0f92636adb38a38d/backports_zstd-1.0.0-cp313-cp313t-win32.whl", hash = "sha256:870effb06ffb7623af1c8dac35647a1c4b597d3bb0b3f9895c738bd5ad23666c", size = 289410, upload-time = "2025-10-10T07:05:36.776Z" }, + { url = "https://files.pythonhosted.org/packages/16/59/5ec914419b6db0516794f6f5214b1990e550971fe0867c60ea55262b5d68/backports_zstd-1.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8bb6470186301e84aaa704c8eb339c97dcdec67445e7e197d44665e933807e4e", size = 314778, upload-time = "2025-10-10T07:05:38.637Z" }, + { url = "https://files.pythonhosted.org/packages/75/88/198e1726f65229f219bb2a72849c9424ba41f6de989c3a8c9bf58118a4a7/backports_zstd-1.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b2d85810393b3be6e8e77d89a165fc67c2a08290a210dbd77e2fc148dbc4106f", size = 289333, upload-time = "2025-10-10T07:05:39.758Z" }, + { url = "https://files.pythonhosted.org/packages/c5/80/cad971088dd705adedce95e4ce77801cbad61ac9250b4e77fbbb2881c34f/backports_zstd-1.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1315107754808856ddcf187a19cc139cb4a2a65970bd1bafd71718cfd051d32e", size = 435835, upload-time = "2025-10-10T07:05:41.027Z" }, + { url = "https://files.pythonhosted.org/packages/8c/9f/8c13830b7d698bd270d9aaeebd685670e8955282a3e5f6967521bcb5b2d3/backports_zstd-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:96bf0a564af74951adfa6addd1c148ab467ba92172cd23b267dd150b0f47fd9e", size = 362191, upload-time = "2025-10-10T07:05:42.594Z" }, + { url = "https://files.pythonhosted.org/packages/db/b4/dd0d86d04b1dd4d08468e8d980d3ece48d86909b9635f1efebce309b98d4/backports_zstd-1.0.0-cp39-cp39-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:d7c1c6ebedf7bc70c1adca3f4624e1e04b2a0d7a389b065f0c5d6244f6be3dae", size = 506076, upload-time = "2025-10-10T07:05:43.842Z" }, + { url = "https://files.pythonhosted.org/packages/86/6e/b484e33d8eb13b9379741e9e88daa48c15c9038e9ee9926ebf1096bfed6f/backports_zstd-1.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2ea4ff5e162fb61f8421724021eac0a612af0aff2da9e585c96d27c2da924589", size = 475720, upload-time = "2025-10-10T07:05:45.094Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e6/c49157bb8240ffd4c0abf93306276be4e80d2ef8c1b8465e06bcecece250/backports_zstd-1.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5a6047fb0bef5bbe519b1e46108847e01a48d002b3dfc69af1423a53d8144dda", size = 581396, upload-time = "2025-10-10T07:05:46.389Z" }, + { url = "https://files.pythonhosted.org/packages/67/24/a900cfdc4dd74306c6b53604ad51af5f38e2353b0d615a3c869051134b3b/backports_zstd-1.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2d510b422e7b2b6ca142082fa85ac360edf77b73108454335ecfd19071c819ff", size = 641053, upload-time = "2025-10-10T07:05:48.012Z" }, + { url = "https://files.pythonhosted.org/packages/3d/75/5ce7953c6306fc976abf7cf33f0071a10d58c71c94348844ae625dfdee22/backports_zstd-1.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e6349defa266342802d86343b7fc59ee12048bca5f77a9fcb1c1ab9bb894d09", size = 491186, upload-time = "2025-10-10T07:05:49.424Z" }, + { url = "https://files.pythonhosted.org/packages/f9/db/375410a26abf2ac972fec554122065d774fa037f9ffeedf4f7b05553b01d/backports_zstd-1.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:20b0a1be02b2ee18c74b68a89eec14be98d11f0415a79eb209dce4bc2d6f4e52", size = 481750, upload-time = "2025-10-10T07:05:50.678Z" }, + { url = "https://files.pythonhosted.org/packages/21/d1/fa7c2d7b7a1c433e4e79c027c54d17f2ffc489ab7e76496b149d9ae6f667/backports_zstd-1.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3595cbc2f4d8a5dc6bd791ba8d9fee2fdfcdfc07206e944c1b3ec3090fcbc99e", size = 509601, upload-time = "2025-10-10T07:05:51.952Z" }, + { url = "https://files.pythonhosted.org/packages/c4/35/befe5ee9bec078f7f4c9290cefc56d3336b4ee52d17a60293d9dda4589c0/backports_zstd-1.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d3eddb298db7a9a1b122c40bcb418a154b6c8f1b54ef7308644e0e67d42c159e", size = 585743, upload-time = "2025-10-10T07:05:53.609Z" }, + { url = "https://files.pythonhosted.org/packages/a3/0a/cfbf0ae24348be3c3f597717c639e9cbe29692a99ad650c232b8a97c74c1/backports_zstd-1.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ef31a9482727e6b335f673a8b8116be186b83ca72be4a07f60684b8220a213e9", size = 631591, upload-time = "2025-10-10T07:05:54.846Z" }, + { url = "https://files.pythonhosted.org/packages/e1/2d/7c996648c7a7b84a3e8b045fb494466475c1f599374da3c780198bde96c4/backports_zstd-1.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0a6a6d114058735d042116aa9199b0b436236fddcb5f805fb17310fcadddd441", size = 495294, upload-time = "2025-10-10T07:05:56.417Z" }, + { url = "https://files.pythonhosted.org/packages/be/c8/5a15a4a52506e2e2598d2667ae67404516ea4336535fdd7b7b1b2fffd623/backports_zstd-1.0.0-cp39-cp39-win32.whl", hash = "sha256:8aea1bdc89becb21d1df1cdcc6182b2aa9540addaa20569169e01b25b8996f41", size = 288646, upload-time = "2025-10-10T07:05:57.993Z" }, + { url = "https://files.pythonhosted.org/packages/67/4e/42409d11a9d324f68a079493c5806d593f54184962e5fff1dc88a1d5e3ba/backports_zstd-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:23a40a40fb56f4b47ece5e9cb7048c2e93d9eeb81ad5fb4e68adcaeb699d6b98", size = 313532, upload-time = "2025-10-10T07:05:59.212Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f8/932b05fd2f98f85c95674f09ae28ccc1638b8cc17d6f566d21ed499ee456/backports_zstd-1.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:2f07bd1c1b478bd8a0bbe413439c24ee08ceb6ebc957a97de3666e8f2e612463", size = 288756, upload-time = "2025-10-10T07:06:01.216Z" }, + { url = "https://files.pythonhosted.org/packages/5d/35/680ac0ad73676eb1f3bb71f6dd3bbaa2d28a9e4293d3ede4adcd78905b93/backports_zstd-1.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:efa53658c1e617986ed202e7aa8eb23c69fc8f33d01192cd1565e455ed9aa057", size = 409790, upload-time = "2025-10-10T07:06:02.405Z" }, + { url = "https://files.pythonhosted.org/packages/62/6c/6410c334890b4a43c893b9dcd3cbc8b10f17ea8dced483d9ba200b17ccab/backports_zstd-1.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:4386a17c99ce647877298c916f2afeacb238e56cb7cca2d665822a0ee743b5d5", size = 339308, upload-time = "2025-10-10T07:06:03.667Z" }, + { url = "https://files.pythonhosted.org/packages/0f/b2/ad3e651985b8a2a4876e5adc61100cef07a8caefb87180391f1f5b8c801c/backports_zstd-1.0.0-pp310-pypy310_pp73-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:cbbb0bda54bda18af99961d7d22d7bc7fedcc7d8ca3a04dcde9189494dbfc87a", size = 420356, upload-time = "2025-10-10T07:06:04.984Z" }, + { url = "https://files.pythonhosted.org/packages/e7/c4/361bde3f570804674b9033ac41cc26735ceb4e33ccce2645079eff62a26f/backports_zstd-1.0.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c49048ec50f81b196ab0f3c49c025912eba1c6e55259b99f11ee6e8c04226ab", size = 393900, upload-time = "2025-10-10T07:06:06.252Z" }, + { url = "https://files.pythonhosted.org/packages/7b/90/f7bc5c0d204c2312fbe4e62592c92200f19da8840ce8b4a1df56080b7537/backports_zstd-1.0.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6d1fe816c3c31241b0bdcc96364fae689f3e49a923469ad1ad7a9aeb0bbcd67", size = 413862, upload-time = "2025-10-10T07:06:07.506Z" }, + { url = "https://files.pythonhosted.org/packages/77/2b/9c1949456566228578d30013e81a593577e63e1cae9e72b058e37ae4c5e2/backports_zstd-1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:96a57f48d2d64c985393bb4ae15b61097d8fc0d56416e790f7cc09bf9212fb87", size = 299722, upload-time = "2025-10-10T07:06:08.661Z" }, + { url = "https://files.pythonhosted.org/packages/a0/51/f22627d208ab63e97f5441374110363f4b5e0c2ce0b4f2412e753eb12bf1/backports_zstd-1.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8191c019cefaf074c3f05ebec5ad19ec606b7ac1dc915b66a0820268e6f0e327", size = 409687, upload-time = "2025-10-10T07:06:09.844Z" }, + { url = "https://files.pythonhosted.org/packages/4f/93/50b2ebb2e8f388bb124c4a39974e29f841ef1452d603045e292e107227b9/backports_zstd-1.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:479270cd6385775dca98accaf304e5f011d94280ad4681d3e925a1b4dfd19aaf", size = 339221, upload-time = "2025-10-10T07:06:11.13Z" }, + { url = "https://files.pythonhosted.org/packages/25/f5/103645f44a92c4de2860b8d6cf6c5414b63956278764f8b7db359bdeae94/backports_zstd-1.0.0-pp311-pypy311_pp73-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:3beab43bfda8e453648b9cce5edcceb5add6c42c331873b41ab1d24232d9c2b0", size = 420355, upload-time = "2025-10-10T07:06:12.283Z" }, + { url = "https://files.pythonhosted.org/packages/d9/10/e185f05ec85bc05c82d7efdd75528e695c85181eb291cc4c19b2f26153f1/backports_zstd-1.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67469b247c99537b77f7d402580cbb7298fa15ebe3ce6984d89a5b65d4d5a6c2", size = 393900, upload-time = "2025-10-10T07:06:13.508Z" }, + { url = "https://files.pythonhosted.org/packages/fd/40/3f717216e21617e919d12d6520d0da5b22002e07f12638629acc9e5dcc2e/backports_zstd-1.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6910a9311e7a2987d353f396568f5e401cf4917e2112bf610e62385ad02d8cf4", size = 413863, upload-time = "2025-10-10T07:06:15.531Z" }, + { url = "https://files.pythonhosted.org/packages/23/f5/cb12f5dd6ac648e92d8cec8b69fd4064bd549c126fb0d3fe6d3dd237afbe/backports_zstd-1.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:85f08363b7ca504a8bceaa2d4333a1a307d2b2056f77a13036a81d7aa3c87b2a", size = 299719, upload-time = "2025-10-10T07:06:17.032Z" }, +] + [[package]] name = "beautifulsoup4" version = "4.13.4" @@ -1131,7 +1241,7 @@ test = [ { name = "pytest-asyncio" }, ] zstd = [ - { name = "zstandard" }, + { name = "backports-zstd", marker = "python_full_version < '3.14'" }, ] [package.dev-dependencies] @@ -1160,6 +1270,7 @@ typing = [ [package.metadata] requires-dist = [ + { name = "backports-zstd", marker = "python_full_version < '3.14' and extra == 'zstd'", specifier = ">=1.0.0" }, { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')", specifier = ">=2023.7.22" }, { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')", specifier = ">=2023.7.22" }, { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, @@ -1182,7 +1293,6 @@ requires-dist = [ { name = "sphinx-rtd-theme", marker = "extra == 'docs'", specifier = ">=2,<4" }, { name = "sphinxcontrib-shellcheck", marker = "extra == 'docs'", specifier = ">=1,<2" }, { name = "winkerberos", marker = "os_name == 'nt' and extra == 'gssapi'", specifier = ">=0.5.0" }, - { name = "zstandard", marker = "extra == 'zstd'" }, ] provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "test", "zstd"] @@ -2115,94 +2225,3 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800, upload-time = "2024-11-28T08:48:46.637Z" }, { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980, upload-time = "2024-11-28T08:50:35.681Z" }, ] - -[[package]] -name = "zstandard" -version = "0.23.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/55/bd0487e86679db1823fc9ee0d8c9c78ae2413d34c0b461193b5f4c31d22f/zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", size = 788701, upload-time = "2024-07-15T00:13:27.351Z" }, - { url = "https://files.pythonhosted.org/packages/e1/8a/ccb516b684f3ad987dfee27570d635822e3038645b1a950c5e8022df1145/zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", size = 633678, upload-time = "2024-07-15T00:13:30.24Z" }, - { url = "https://files.pythonhosted.org/packages/12/89/75e633d0611c028e0d9af6df199423bf43f54bea5007e6718ab7132e234c/zstandard-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", size = 4941098, upload-time = "2024-07-15T00:13:32.526Z" }, - { url = "https://files.pythonhosted.org/packages/4a/7a/bd7f6a21802de358b63f1ee636ab823711c25ce043a3e9f043b4fcb5ba32/zstandard-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", size = 5308798, upload-time = "2024-07-15T00:13:34.925Z" }, - { url = "https://files.pythonhosted.org/packages/79/3b/775f851a4a65013e88ca559c8ae42ac1352db6fcd96b028d0df4d7d1d7b4/zstandard-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", size = 5341840, upload-time = "2024-07-15T00:13:37.376Z" }, - { url = "https://files.pythonhosted.org/packages/09/4f/0cc49570141dd72d4d95dd6fcf09328d1b702c47a6ec12fbed3b8aed18a5/zstandard-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", size = 5440337, upload-time = "2024-07-15T00:13:39.772Z" }, - { url = "https://files.pythonhosted.org/packages/e7/7c/aaa7cd27148bae2dc095191529c0570d16058c54c4597a7d118de4b21676/zstandard-0.23.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", size = 4861182, upload-time = "2024-07-15T00:13:42.495Z" }, - { url = "https://files.pythonhosted.org/packages/ac/eb/4b58b5c071d177f7dc027129d20bd2a44161faca6592a67f8fcb0b88b3ae/zstandard-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", size = 4932936, upload-time = "2024-07-15T00:13:44.234Z" }, - { url = "https://files.pythonhosted.org/packages/44/f9/21a5fb9bb7c9a274b05ad700a82ad22ce82f7ef0f485980a1e98ed6e8c5f/zstandard-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", size = 5464705, upload-time = "2024-07-15T00:13:46.822Z" }, - { url = "https://files.pythonhosted.org/packages/49/74/b7b3e61db3f88632776b78b1db597af3f44c91ce17d533e14a25ce6a2816/zstandard-0.23.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", size = 4857882, upload-time = "2024-07-15T00:13:49.297Z" }, - { url = "https://files.pythonhosted.org/packages/4a/7f/d8eb1cb123d8e4c541d4465167080bec88481ab54cd0b31eb4013ba04b95/zstandard-0.23.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", size = 4697672, upload-time = "2024-07-15T00:13:51.447Z" }, - { url = "https://files.pythonhosted.org/packages/5e/05/f7dccdf3d121309b60342da454d3e706453a31073e2c4dac8e1581861e44/zstandard-0.23.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", size = 5206043, upload-time = "2024-07-15T00:13:53.587Z" }, - { url = "https://files.pythonhosted.org/packages/86/9d/3677a02e172dccd8dd3a941307621c0cbd7691d77cb435ac3c75ab6a3105/zstandard-0.23.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", size = 5667390, upload-time = "2024-07-15T00:13:56.137Z" }, - { url = "https://files.pythonhosted.org/packages/41/7e/0012a02458e74a7ba122cd9cafe491facc602c9a17f590367da369929498/zstandard-0.23.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", size = 5198901, upload-time = "2024-07-15T00:13:58.584Z" }, - { url = "https://files.pythonhosted.org/packages/65/3a/8f715b97bd7bcfc7342d8adcd99a026cb2fb550e44866a3b6c348e1b0f02/zstandard-0.23.0-cp310-cp310-win32.whl", hash = "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", size = 430596, upload-time = "2024-07-15T00:14:00.693Z" }, - { url = "https://files.pythonhosted.org/packages/19/b7/b2b9eca5e5a01111e4fe8a8ffb56bdcdf56b12448a24effe6cfe4a252034/zstandard-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", size = 495498, upload-time = "2024-07-15T00:14:02.741Z" }, - { url = "https://files.pythonhosted.org/packages/9e/40/f67e7d2c25a0e2dc1744dd781110b0b60306657f8696cafb7ad7579469bd/zstandard-0.23.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", size = 788699, upload-time = "2024-07-15T00:14:04.909Z" }, - { url = "https://files.pythonhosted.org/packages/e8/46/66d5b55f4d737dd6ab75851b224abf0afe5774976fe511a54d2eb9063a41/zstandard-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", size = 633681, upload-time = "2024-07-15T00:14:13.99Z" }, - { url = "https://files.pythonhosted.org/packages/63/b6/677e65c095d8e12b66b8f862b069bcf1f1d781b9c9c6f12eb55000d57583/zstandard-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", size = 4944328, upload-time = "2024-07-15T00:14:16.588Z" }, - { url = "https://files.pythonhosted.org/packages/59/cc/e76acb4c42afa05a9d20827116d1f9287e9c32b7ad58cc3af0721ce2b481/zstandard-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", size = 5311955, upload-time = "2024-07-15T00:14:19.389Z" }, - { url = "https://files.pythonhosted.org/packages/78/e4/644b8075f18fc7f632130c32e8f36f6dc1b93065bf2dd87f03223b187f26/zstandard-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", size = 5344944, upload-time = "2024-07-15T00:14:22.173Z" }, - { url = "https://files.pythonhosted.org/packages/76/3f/dbafccf19cfeca25bbabf6f2dd81796b7218f768ec400f043edc767015a6/zstandard-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", size = 5442927, upload-time = "2024-07-15T00:14:24.825Z" }, - { url = "https://files.pythonhosted.org/packages/0c/c3/d24a01a19b6733b9f218e94d1a87c477d523237e07f94899e1c10f6fd06c/zstandard-0.23.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", size = 4864910, upload-time = "2024-07-15T00:14:26.982Z" }, - { url = "https://files.pythonhosted.org/packages/1c/a9/cf8f78ead4597264f7618d0875be01f9bc23c9d1d11afb6d225b867cb423/zstandard-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", size = 4935544, upload-time = "2024-07-15T00:14:29.582Z" }, - { url = "https://files.pythonhosted.org/packages/2c/96/8af1e3731b67965fb995a940c04a2c20997a7b3b14826b9d1301cf160879/zstandard-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", size = 5467094, upload-time = "2024-07-15T00:14:40.126Z" }, - { url = "https://files.pythonhosted.org/packages/ff/57/43ea9df642c636cb79f88a13ab07d92d88d3bfe3e550b55a25a07a26d878/zstandard-0.23.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", size = 4860440, upload-time = "2024-07-15T00:14:42.786Z" }, - { url = "https://files.pythonhosted.org/packages/46/37/edb78f33c7f44f806525f27baa300341918fd4c4af9472fbc2c3094be2e8/zstandard-0.23.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", size = 4700091, upload-time = "2024-07-15T00:14:45.184Z" }, - { url = "https://files.pythonhosted.org/packages/c1/f1/454ac3962671a754f3cb49242472df5c2cced4eb959ae203a377b45b1a3c/zstandard-0.23.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", size = 5208682, upload-time = "2024-07-15T00:14:47.407Z" }, - { url = "https://files.pythonhosted.org/packages/85/b2/1734b0fff1634390b1b887202d557d2dd542de84a4c155c258cf75da4773/zstandard-0.23.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", size = 5669707, upload-time = "2024-07-15T00:15:03.529Z" }, - { url = "https://files.pythonhosted.org/packages/52/5a/87d6971f0997c4b9b09c495bf92189fb63de86a83cadc4977dc19735f652/zstandard-0.23.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", size = 5201792, upload-time = "2024-07-15T00:15:28.372Z" }, - { url = "https://files.pythonhosted.org/packages/79/02/6f6a42cc84459d399bd1a4e1adfc78d4dfe45e56d05b072008d10040e13b/zstandard-0.23.0-cp311-cp311-win32.whl", hash = "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", size = 430586, upload-time = "2024-07-15T00:15:32.26Z" }, - { url = "https://files.pythonhosted.org/packages/be/a2/4272175d47c623ff78196f3c10e9dc7045c1b9caf3735bf041e65271eca4/zstandard-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", size = 495420, upload-time = "2024-07-15T00:15:34.004Z" }, - { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713, upload-time = "2024-07-15T00:15:35.815Z" }, - { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459, upload-time = "2024-07-15T00:15:37.995Z" }, - { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707, upload-time = "2024-07-15T00:15:39.872Z" }, - { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545, upload-time = "2024-07-15T00:15:41.75Z" }, - { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533, upload-time = "2024-07-15T00:15:44.114Z" }, - { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510, upload-time = "2024-07-15T00:15:46.509Z" }, - { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973, upload-time = "2024-07-15T00:15:49.939Z" }, - { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968, upload-time = "2024-07-15T00:15:52.025Z" }, - { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179, upload-time = "2024-07-15T00:15:54.971Z" }, - { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577, upload-time = "2024-07-15T00:15:57.634Z" }, - { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899, upload-time = "2024-07-15T00:16:00.811Z" }, - { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964, upload-time = "2024-07-15T00:16:03.669Z" }, - { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398, upload-time = "2024-07-15T00:16:06.694Z" }, - { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313, upload-time = "2024-07-15T00:16:09.758Z" }, - { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877, upload-time = "2024-07-15T00:16:11.758Z" }, - { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595, upload-time = "2024-07-15T00:16:13.731Z" }, - { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975, upload-time = "2024-07-15T00:16:16.005Z" }, - { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448, upload-time = "2024-07-15T00:16:17.897Z" }, - { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269, upload-time = "2024-07-15T00:16:20.136Z" }, - { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228, upload-time = "2024-07-15T00:16:23.398Z" }, - { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891, upload-time = "2024-07-15T00:16:26.391Z" }, - { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310, upload-time = "2024-07-15T00:16:29.018Z" }, - { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912, upload-time = "2024-07-15T00:16:31.871Z" }, - { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946, upload-time = "2024-07-15T00:16:34.593Z" }, - { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994, upload-time = "2024-07-15T00:16:36.887Z" }, - { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681, upload-time = "2024-07-15T00:16:39.709Z" }, - { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239, upload-time = "2024-07-15T00:16:41.83Z" }, - { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149, upload-time = "2024-07-15T00:16:44.287Z" }, - { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392, upload-time = "2024-07-15T00:16:46.423Z" }, - { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299, upload-time = "2024-07-15T00:16:49.053Z" }, - { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862, upload-time = "2024-07-15T00:16:51.003Z" }, - { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578, upload-time = "2024-07-15T00:16:53.135Z" }, - { url = "https://files.pythonhosted.org/packages/fb/96/4fcafeb7e013a2386d22f974b5b97a0b9a65004ed58c87ae001599bfbd48/zstandard-0.23.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", size = 788697, upload-time = "2024-07-15T00:17:31.236Z" }, - { url = "https://files.pythonhosted.org/packages/83/ff/a52ce725be69b86a2967ecba0497a8184540cc284c0991125515449e54e2/zstandard-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", size = 633679, upload-time = "2024-07-15T00:17:32.911Z" }, - { url = "https://files.pythonhosted.org/packages/34/0f/3dc62db122f6a9c481c335fff6fc9f4e88d8f6e2d47321ee3937328addb4/zstandard-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", size = 4940416, upload-time = "2024-07-15T00:17:34.849Z" }, - { url = "https://files.pythonhosted.org/packages/1d/e5/9fe0dd8c85fdc2f635e6660d07872a5dc4b366db566630161e39f9f804e1/zstandard-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", size = 5307693, upload-time = "2024-07-15T00:17:37.355Z" }, - { url = "https://files.pythonhosted.org/packages/73/bf/fe62c0cd865c171ee8ed5bc83174b5382a2cb729c8d6162edfb99a83158b/zstandard-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", size = 5341236, upload-time = "2024-07-15T00:17:40.213Z" }, - { url = "https://files.pythonhosted.org/packages/39/86/4fe79b30c794286110802a6cd44a73b6a314ac8196b9338c0fbd78c2407d/zstandard-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", size = 5439101, upload-time = "2024-07-15T00:17:42.284Z" }, - { url = "https://files.pythonhosted.org/packages/72/ed/cacec235c581ebf8c608c7fb3d4b6b70d1b490d0e5128ea6996f809ecaef/zstandard-0.23.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", size = 4860320, upload-time = "2024-07-15T00:17:44.21Z" }, - { url = "https://files.pythonhosted.org/packages/f6/1e/2c589a2930f93946b132fc852c574a19d5edc23fad2b9e566f431050c7ec/zstandard-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", size = 4931933, upload-time = "2024-07-15T00:17:46.455Z" }, - { url = "https://files.pythonhosted.org/packages/8e/f5/30eadde3686d902b5d4692bb5f286977cbc4adc082145eb3f49d834b2eae/zstandard-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", size = 5463878, upload-time = "2024-07-15T00:17:48.866Z" }, - { url = "https://files.pythonhosted.org/packages/e0/c8/8aed1f0ab9854ef48e5ad4431367fcb23ce73f0304f7b72335a8edc66556/zstandard-0.23.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", size = 4857192, upload-time = "2024-07-15T00:17:51.558Z" }, - { url = "https://files.pythonhosted.org/packages/a8/c6/55e666cfbcd032b9e271865e8578fec56e5594d4faeac379d371526514f5/zstandard-0.23.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69", size = 4696513, upload-time = "2024-07-15T00:17:53.924Z" }, - { url = "https://files.pythonhosted.org/packages/dc/bd/720b65bea63ec9de0ac7414c33b9baf271c8de8996e5ff324dc93fc90ff1/zstandard-0.23.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", size = 5204823, upload-time = "2024-07-15T00:17:55.948Z" }, - { url = "https://files.pythonhosted.org/packages/d8/40/d678db1556e3941d330cd4e95623a63ef235b18547da98fa184cbc028ecf/zstandard-0.23.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", size = 5666490, upload-time = "2024-07-15T00:17:58.327Z" }, - { url = "https://files.pythonhosted.org/packages/ed/cc/c89329723d7515898a1fc7ef5d251264078548c505719d13e9511800a103/zstandard-0.23.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", size = 5196622, upload-time = "2024-07-15T00:18:00.404Z" }, - { url = "https://files.pythonhosted.org/packages/78/4c/634289d41e094327a94500dfc919e58841b10ea3a9efdfafbac614797ec2/zstandard-0.23.0-cp39-cp39-win32.whl", hash = "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", size = 430620, upload-time = "2024-07-15T00:18:02.613Z" }, - { url = "https://files.pythonhosted.org/packages/a2/e2/0b0c5a0f4f7699fecd92c1ba6278ef9b01f2b0b0dd46f62bfc6729c05659/zstandard-0.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", size = 495528, upload-time = "2024-07-15T00:18:04.452Z" }, -] From eb25ce420e04aac8b939cc44a52ab11a268cf0fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Nov 2025 12:20:45 -0600 Subject: [PATCH 2111/2111] Bump the actions group across 1 directory with 4 updates (#2604) --- .github/workflows/codeql.yml | 4 ++-- .github/workflows/dist.yml | 8 ++++---- .github/workflows/release-python.yml | 2 +- .github/workflows/test-python.yml | 20 ++++++++++---------- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b138324bf4..5820c86b97 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -46,7 +46,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 + uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v4 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -63,6 +63,6 @@ jobs: pip install -e . - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 + uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml index 84bf1ba893..b4d4a2e78b 100644 --- a/.github/workflows/dist.yml +++ b/.github/workflows/dist.yml @@ -91,7 +91,7 @@ jobs: # Free-threading builds: ls wheelhouse/*cp314t*.whl - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: wheel-${{ matrix.buildplat[1] }} path: ./wheelhouse/*.whl @@ -124,7 +124,7 @@ jobs: cd .. python -c "from pymongo import has_c; assert has_c()" - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: "sdist" path: ./dist/*.tar.gz @@ -135,13 +135,13 @@ jobs: name: Download Wheels steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 - name: Flatten directory working-directory: . run: | find . -mindepth 2 -type f -exec mv {} . \; find . -type d -empty -delete - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: all-dist-${{ github.run_id }} path: "./*" diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 6abca9e528..43e500337e 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -75,7 +75,7 @@ jobs: id-token: write steps: - name: Download all the dists - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: name: all-dist-${{ github.run_id }} path: dist/ diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 4685ba2d92..20323dd925 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -26,7 +26,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: "3.10" @@ -68,7 +68,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -87,7 +87,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: "3.10" @@ -112,7 +112,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: "3.10" @@ -131,7 +131,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: "3.10" @@ -153,7 +153,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: "${{matrix.python}}" @@ -174,7 +174,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: enable-cache: true python-version: "3.10" @@ -214,7 +214,7 @@ jobs: run: | pip install build python -m build --sdist - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: "sdist" path: dist/*.tar.gz @@ -226,7 +226,7 @@ jobs: timeout-minutes: 20 steps: - name: Download sdist - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v6 with: path: sdist/ - name: Unpack SDist @@ -264,7 +264,7 @@ jobs: with: persist-credentials: false - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7 with: python-version: "3.10" - id: setup-mongodb